summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/spdk/dpdk/drivers
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers')
-rw-r--r--src/spdk/dpdk/drivers/Makefile25
-rw-r--r--src/spdk/dpdk/drivers/baseband/Makefile14
-rw-r--r--src/spdk/dpdk/drivers/baseband/null/Makefile25
-rw-r--r--src/spdk/dpdk/drivers/baseband/null/bbdev_null.c356
-rw-r--r--src/spdk/dpdk/drivers/baseband/null/rte_pmd_bbdev_null_version.map3
-rw-r--r--src/spdk/dpdk/drivers/baseband/turbo_sw/Makefile42
-rw-r--r--src/spdk/dpdk/drivers/baseband/turbo_sw/bbdev_turbo_software.c1307
-rw-r--r--src/spdk/dpdk/drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map3
-rw-r--r--src/spdk/dpdk/drivers/bus/Makefile15
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/Makefile52
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman.c581
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c611
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/fman/netcfg_layer.c160
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/fman/of.c587
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.c361
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.h541
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c290
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_priv.h92
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_alloc.c71
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.c103
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.h28
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/process.c298
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.c2755
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.h913
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c362
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_priv.h278
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/dpaa_bus.c646
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/compat.h384
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_bits.h39
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_list.h75
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_rbtree.h117
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fman.h425
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_bman.h342
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman.h154
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman_crc64.h230
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_qman.h2057
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_usd.h78
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/netcfg.h63
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/of.h159
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/process.h77
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/meson.build29
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/rte_bus_dpaa_version.map104
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_bus.h192
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_logs.h98
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/Makefile48
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/fslmc_bus.c528
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/fslmc_logs.h41
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.c789
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.h54
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/dpbp.c346
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/dpci.c440
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/dpcon.c331
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/dpdmai.c429
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/dpio.c470
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/dpmng.c84
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp.h95
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h98
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci.h220
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci_cmd.h132
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon.h89
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h75
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai.h189
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h107
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio.h132
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio_cmd.h115
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng.h54
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h35
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_cmd.h175
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_sys.h63
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/mc_sys.c84
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/meson.build27
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c123
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c187
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c527
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h52
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h379
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/include/compat.h102
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h132
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h30
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h1186
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c66
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.c1425
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.h144
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys.h380
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys_decl.h53
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/rte_bus_fslmc_version.map118
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/rte_fslmc.h216
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/Makefile32
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/ifpga_bus.c467
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.c88
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.h18
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/ifpga_logs.h31
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga.h149
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga_version.map10
-rw-r--r--src/spdk/dpdk/drivers/bus/meson.build7
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/Makefile38
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/bsd/Makefile4
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/bsd/pci.c651
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/linux/Makefile8
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/linux/pci.c926
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/linux/pci_init.h88
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/linux/pci_uio.c562
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/linux/pci_vfio.c794
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/meson.build19
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/pci_common.c443
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/pci_common_uio.c206
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/private.h169
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/rte_bus_pci.h315
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/rte_bus_pci_version.map18
-rw-r--r--src/spdk/dpdk/drivers/bus/vdev/Makefile30
-rw-r--r--src/spdk/dpdk/drivers/bus/vdev/meson.build7
-rw-r--r--src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev.h162
-rw-r--r--src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev_version.map18
-rw-r--r--src/spdk/dpdk/drivers/bus/vdev/vdev.c549
-rw-r--r--src/spdk/dpdk/drivers/bus/vdev/vdev_logs.h16
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/Makefile36
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/linux/Makefile3
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_bus.c355
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_uio.c398
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/meson.build18
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/private.h132
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus.h407
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus_version.map29
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/rte_vmbus_reg.h344
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/vmbus_bufring.c244
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/vmbus_channel.c405
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/vmbus_common.c286
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/vmbus_common_uio.c232
-rw-r--r--src/spdk/dpdk/drivers/common/Makefile11
-rw-r--r--src/spdk/dpdk/drivers/common/meson.build7
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx/Makefile24
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx/meson.build5
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.c249
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.h37
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx/rte_common_octeontx_version.map7
-rw-r--r--src/spdk/dpdk/drivers/common/qat/Makefile66
-rw-r--r--src/spdk/dpdk/drivers/common/qat/meson.build14
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h136
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw.h318
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_comp.h482
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_la.h361
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_hw.h386
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_common.c123
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_common.h79
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_device.c279
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_device.h102
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_logs.c38
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_logs.h34
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_qp.c642
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_qp.h111
-rw-r--r--src/spdk/dpdk/drivers/compress/Makefile10
-rw-r--r--src/spdk/dpdk/drivers/compress/isal/Makefile31
-rw-r--r--src/spdk/dpdk/drivers/compress/isal/isal_compress_pmd.c694
-rw-r--r--src/spdk/dpdk/drivers/compress/isal/isal_compress_pmd_ops.c351
-rw-r--r--src/spdk/dpdk/drivers/compress/isal/isal_compress_pmd_private.h57
-rw-r--r--src/spdk/dpdk/drivers/compress/isal/meson.build14
-rw-r--r--src/spdk/dpdk/drivers/compress/isal/rte_pmd_isal_version.map3
-rw-r--r--src/spdk/dpdk/drivers/compress/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/compress/octeontx/Makefile30
-rw-r--r--src/spdk/dpdk/drivers/compress/octeontx/include/zip_regs.h711
-rw-r--r--src/spdk/dpdk/drivers/compress/octeontx/meson.build9
-rw-r--r--src/spdk/dpdk/drivers/compress/octeontx/otx_zip.c180
-rw-r--r--src/spdk/dpdk/drivers/compress/octeontx/otx_zip.h277
-rw-r--r--src/spdk/dpdk/drivers/compress/octeontx/otx_zip_pmd.c658
-rw-r--r--src/spdk/dpdk/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map3
-rw-r--r--src/spdk/dpdk/drivers/compress/qat/meson.build18
-rw-r--r--src/spdk/dpdk/drivers/compress/qat/qat_comp.c393
-rw-r--r--src/spdk/dpdk/drivers/compress/qat/qat_comp.h65
-rw-r--r--src/spdk/dpdk/drivers/compress/qat/qat_comp_pmd.c429
-rw-r--r--src/spdk/dpdk/drivers/compress/qat/qat_comp_pmd.h39
-rw-r--r--src/spdk/dpdk/drivers/compress/qat/rte_pmd_qat_version.map3
-rw-r--r--src/spdk/dpdk/drivers/compress/zlib/Makefile29
-rw-r--r--src/spdk/dpdk/drivers/compress/zlib/meson.build14
-rw-r--r--src/spdk/dpdk/drivers/compress/zlib/rte_pmd_zlib_version.map3
-rw-r--r--src/spdk/dpdk/drivers/compress/zlib/zlib_pmd.c436
-rw-r--r--src/spdk/dpdk/drivers/compress/zlib/zlib_pmd_ops.c307
-rw-r--r--src/spdk/dpdk/drivers/compress/zlib/zlib_pmd_private.h71
-rw-r--r--src/spdk/dpdk/drivers/crypto/Makefile25
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_gcm/Makefile32
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h116
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c585
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c333
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h110
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_mb/Makefile29
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_mb/aesni_mb_ops.h221
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c1046
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c621
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h254
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/armv8/Makefile40
-rw-r--r--src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd.c851
-rw-r--r--src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_ops.c348
-rw-r--r--src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_private.h197
-rw-r--r--src/spdk/dpdk/drivers/crypto/armv8/rte_pmd_armv8_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/Makefile35
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.c2951
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.h388
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.c810
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.h495
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.c236
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.h27
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_ops.c833
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_private.h107
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/meson.build21
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c397
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/rte_pmd_ccp_version.map4
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/Makefile55
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c2931
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h42
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h449
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/compat.h126
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc.h2568
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/algo.h646
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/common.h98
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h1521
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta.h921
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h313
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h218
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h174
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h189
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h302
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h369
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h412
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h163
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h570
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h699
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h790
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h175
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h42
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h152
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/dpseci.c643
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h399
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h190
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/meson.build14
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map4
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa_sec/Makefile42
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c2419
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.h452
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_log.h43
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa_sec/meson.build13
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map4
-rw-r--r--src/spdk/dpdk/drivers/crypto/kasumi/Makefile38
-rw-r--r--src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd.c629
-rw-r--r--src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c319
-rw-r--r--src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_private.h75
-rw-r--r--src/spdk/dpdk/drivers/crypto/kasumi/rte_pmd_kasumi_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/meson.build9
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/Makefile42
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/meson.build21
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_compat.h23
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd.c937
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c722
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_private.h95
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/rte_pmd_mvsam_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/null/Makefile30
-rw-r--r--src/spdk/dpdk/drivers/crypto/null/meson.build6
-rw-r--r--src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd.c253
-rw-r--r--src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_ops.c331
-rw-r--r--src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_private.h52
-rw-r--r--src/spdk/dpdk/drivers/crypto/null/rte_pmd_null_crypto_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/Makefile29
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/compat.h108
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/meson.build11
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c2194
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c1264
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_private.h185
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/rte_pmd_openssl_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/README7
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/meson.build18
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym.c569
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym.h174
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym_capabilities.h557
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.c331
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.h41
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.c1725
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.h145
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/Makefile37
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c584
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.h284
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h56
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map21
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_failover.c220
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_multicore.c413
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pkt_size_distr.c420
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd.c572
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c545
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h116
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c212
-rw-r--r--src/spdk/dpdk/drivers/crypto/snow3g/Makefile38
-rw-r--r--src/spdk/dpdk/drivers/crypto/snow3g/rte_pmd_snow3g_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c625
-rw-r--r--src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c321
-rw-r--r--src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_private.h77
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/Makefile35
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_algs.h28
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_capabilities.h51
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.c1505
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.h64
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_logs.h91
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.c462
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.h253
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_ring.h137
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_rxtx.c527
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtqueue.c43
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtqueue.h171
-rw-r--r--src/spdk/dpdk/drivers/crypto/zuc/Makefile38
-rw-r--r--src/spdk/dpdk/drivers/crypto/zuc/rte_pmd_zuc_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd.c548
-rw-r--r--src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_ops.c322
-rw-r--r--src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_private.h77
-rw-r--r--src/spdk/dpdk/drivers/event/Makefile18
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa/Makefile38
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa/dpaa_eventdev.c655
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa/dpaa_eventdev.h81
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa/meson.build10
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa/rte_pmd_dpaa_event_version.map4
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/Makefile41
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c830
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.h84
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev_logs.h39
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/dpaa2_hw_dpcon.c113
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/meson.build11
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map3
-rw-r--r--src/spdk/dpdk/drivers/event/meson.build7
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/Makefile54
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/meson.build14
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/rte_pmd_octeontx_event_version.map3
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/ssovf_evdev.c763
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/ssovf_evdev.h186
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/ssovf_evdev_selftest.c1523
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/ssovf_probe.c290
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/ssovf_worker.c263
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/ssovf_worker.h137
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/timvf_evdev.c405
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/timvf_evdev.h225
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/timvf_probe.c148
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/timvf_worker.c199
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx/timvf_worker.h443
-rw-r--r--src/spdk/dpdk/drivers/event/opdl/Makefile39
-rw-r--r--src/spdk/dpdk/drivers/event/opdl/opdl_evdev.c766
-rw-r--r--src/spdk/dpdk/drivers/event/opdl/opdl_evdev.h314
-rw-r--r--src/spdk/dpdk/drivers/event/opdl/opdl_evdev_init.c943
-rw-r--r--src/spdk/dpdk/drivers/event/opdl/opdl_evdev_xstats.c180
-rw-r--r--src/spdk/dpdk/drivers/event/opdl/opdl_log.h21
-rw-r--r--src/spdk/dpdk/drivers/event/opdl/opdl_ring.c1272
-rw-r--r--src/spdk/dpdk/drivers/event/opdl/opdl_ring.h614
-rw-r--r--src/spdk/dpdk/drivers/event/opdl/opdl_test.c1057
-rw-r--r--src/spdk/dpdk/drivers/event/opdl/rte_pmd_evdev_opdl_version.map3
-rw-r--r--src/spdk/dpdk/drivers/event/skeleton/Makefile26
-rw-r--r--src/spdk/dpdk/drivers/event/skeleton/meson.build5
-rw-r--r--src/spdk/dpdk/drivers/event/skeleton/rte_pmd_skeleton_event_version.map4
-rw-r--r--src/spdk/dpdk/drivers/event/skeleton/skeleton_eventdev.c477
-rw-r--r--src/spdk/dpdk/drivers/event/skeleton/skeleton_eventdev.h41
-rw-r--r--src/spdk/dpdk/drivers/event/sw/Makefile40
-rw-r--r--src/spdk/dpdk/drivers/event/sw/event_ring.h153
-rw-r--r--src/spdk/dpdk/drivers/event/sw/iq_chunk.h196
-rw-r--r--src/spdk/dpdk/drivers/event/sw/meson.build11
-rw-r--r--src/spdk/dpdk/drivers/event/sw/rte_pmd_sw_event_version.map3
-rw-r--r--src/spdk/dpdk/drivers/event/sw/sw_evdev.c1082
-rw-r--r--src/spdk/dpdk/drivers/event/sw/sw_evdev.h292
-rw-r--r--src/spdk/dpdk/drivers/event/sw/sw_evdev_log.h23
-rw-r--r--src/spdk/dpdk/drivers/event/sw/sw_evdev_scheduler.c560
-rw-r--r--src/spdk/dpdk/drivers/event/sw/sw_evdev_selftest.c3324
-rw-r--r--src/spdk/dpdk/drivers/event/sw/sw_evdev_worker.c186
-rw-r--r--src/spdk/dpdk/drivers/event/sw/sw_evdev_xstats.c652
-rw-r--r--src/spdk/dpdk/drivers/mempool/Makefile17
-rw-r--r--src/spdk/dpdk/drivers/mempool/bucket/Makefile27
-rw-r--r--src/spdk/dpdk/drivers/mempool/bucket/meson.build9
-rw-r--r--src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket.c628
-rw-r--r--src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket_version.map4
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa/Makefile35
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.c337
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.h63
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa/meson.build12
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa/rte_mempool_dpaa_version.map8
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/Makefile36
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c450
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.h67
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h38
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/meson.build12
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/rte_dpaa2_mempool.h53
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map17
-rw-r--r--src/spdk/dpdk/drivers/mempool/meson.build7
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/Makefile40
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c806
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.h114
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/octeontx_pool_logs.h22
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c202
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx_version.map3
-rw-r--r--src/spdk/dpdk/drivers/mempool/ring/Makefile21
-rw-r--r--src/spdk/dpdk/drivers/mempool/ring/meson.build4
-rw-r--r--src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring.c136
-rw-r--r--src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring_version.map4
-rw-r--r--src/spdk/dpdk/drivers/mempool/stack/Makefile24
-rw-r--r--src/spdk/dpdk/drivers/mempool/stack/meson.build4
-rw-r--r--src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack.c118
-rw-r--r--src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack_version.map4
-rw-r--r--src/spdk/dpdk/drivers/meson.build144
-rw-r--r--src/spdk/dpdk/drivers/net/Makefile73
-rw-r--r--src/spdk/dpdk/drivers/net/af_packet/Makefile29
-rw-r--r--src/spdk/dpdk/drivers/net/af_packet/meson.build7
-rw-r--r--src/spdk/dpdk/drivers/net/af_packet/rte_eth_af_packet.c1025
-rw-r--r--src/spdk/dpdk/drivers/net/af_packet/rte_pmd_af_packet_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/ark/Makefile41
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ddm.c122
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ddm.h148
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ethdev.c1023
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c643
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h36
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c438
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h30
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ext.h90
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_global.h133
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_logs.h90
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_mpu.c152
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_mpu.h125
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c449
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h88
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_pktdir.c56
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_pktdir.h41
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_pktgen.c471
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_pktgen.h79
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_rqp.c68
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_rqp.h57
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_udm.c197
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_udm.h163
-rw-r--r--src/spdk/dpdk/drivers/net/ark/meson.build13
-rw-r--r--src/spdk/dpdk/drivers/net/ark/rte_pmd_ark_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/avf/Makefile54
-rw-r--r--src/spdk/dpdk/drivers/net/avf/avf.h216
-rw-r--r--src/spdk/dpdk/drivers/net/avf/avf_ethdev.c1446
-rw-r--r--src/spdk/dpdk/drivers/net/avf/avf_log.h44
-rw-r--r--src/spdk/dpdk/drivers/net/avf/avf_rxtx.c1962
-rw-r--r--src/spdk/dpdk/drivers/net/avf/avf_rxtx.h264
-rw-r--r--src/spdk/dpdk/drivers/net/avf/avf_rxtx_vec_common.h210
-rw-r--r--src/spdk/dpdk/drivers/net/avf/avf_rxtx_vec_sse.c656
-rw-r--r--src/spdk/dpdk/drivers/net/avf/avf_vchnl.c812
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/README19
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_adminq.c1010
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_adminq.h166
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_adminq_cmd.h2842
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_alloc.h65
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_common.c1845
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_devids.h43
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_hmc.h245
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_lan_hmc.h200
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_osdep.h187
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_prototype.h206
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_register.h346
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_status.h108
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/avf_type.h2024
-rw-r--r--src/spdk/dpdk/drivers/net/avf/base/virtchnl.h787
-rw-r--r--src/spdk/dpdk/drivers/net/avf/rte_pmd_avf_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/avp/Makefile31
-rw-r--r--src/spdk/dpdk/drivers/net/avp/avp_ethdev.c2279
-rw-r--r--src/spdk/dpdk/drivers/net/avp/avp_logs.h30
-rw-r--r--src/spdk/dpdk/drivers/net/avp/meson.build5
-rw-r--r--src/spdk/dpdk/drivers/net/avp/rte_avp_common.h382
-rw-r--r--src/spdk/dpdk/drivers/net/avp/rte_avp_fifo.h118
-rw-r--r--src/spdk/dpdk/drivers/net/avp/rte_pmd_avp_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/Makefile35
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/axgbe_common.h1710
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/axgbe_dev.c1103
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.c770
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.h586
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/axgbe_i2c.c331
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/axgbe_logs.h26
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/axgbe_mdio.c1066
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/axgbe_phy.h192
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/axgbe_phy_impl.c2191
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.c674
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.h186
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c93
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/meson.build19
-rw-r--r--src/spdk/dpdk/drivers/net/axgbe/rte_pmd_axgbe_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/Makefile35
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/bnx2x.c11785
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/bnx2x.h2013
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c746
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.h80
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/bnx2x_logs.h54
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.c483
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.h80
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.c1584
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.h609
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c677
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h334
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/ecore_fw_defs.h401
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/ecore_hsi.h6328
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/ecore_init.h817
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/ecore_init_ops.h863
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/ecore_mfw_req.h185
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/ecore_reg.h3642
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.c5428
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.h1766
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/elink.c13096
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/elink.h582
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/meson.build14
-rw-r--r--src/spdk/dpdk/drivers/net/bnx2x/rte_pmd_bnx2x_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/Makefile51
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt.h352
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.c162
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.h92
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_ethdev.c3552
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.c195
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.h137
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_flow.c1171
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.c3947
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.h177
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.c127
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.h26
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_nvm_defs.h70
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.c501
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.h76
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.c471
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.h61
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.c805
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.h113
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.c406
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.h31
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.c144
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.h47
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.c442
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.h77
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_util.c18
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_util.h11
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.c240
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.h65
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h28211
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/meson.build20
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c811
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.h326
-rw-r--r--src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt_version.map22
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/Makefile38
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/meson.build12
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/rte_eth_bond.h351
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c1614
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h334
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad_private.h300
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.c259
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.h113
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_api.c867
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_args.c306
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_flow.c228
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c3624
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_private.h324
-rw-r--r--src/spdk/dpdk/drivers/net/bonding/rte_pmd_bond_version.map55
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/Makefile58
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h829
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/common.h541
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_chip_type.h60
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c5544
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h144
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h541
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h136
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h954
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs_values.h146
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h26
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4fw_interface.h2350
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c880
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h15
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c193
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h31
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h68
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h256
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c1239
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c1252
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h235
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c845
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h42
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c1903
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h89
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h45
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c201
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c295
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/meson.build14
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/rte_pmd_cxgbe_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/sge.c2739
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa/Makefile45
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.c1500
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.h186
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.c960
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.h275
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa/meson.build14
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa.h36
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa_version.map12
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/Makefile44
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c333
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h251
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c2061
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h134
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h41
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c842
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c74
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c1943
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h203
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h1135
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h605
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h454
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/meson.build18
-rw-r--r--src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map12
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/Makefile80
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/README65
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.c1525
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.h100
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_82540.c717
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.c1268
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.h91
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_82542.c590
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.c1553
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.h56
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.c2030
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.h65
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.c3782
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.h522
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_api.c1382
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_api.h167
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_defines.h1514
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_hw.h1049
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.c1033
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.h110
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.c6125
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.h339
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.c2249
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.h95
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.c576
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.h95
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.c791
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.h105
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.c1385
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.h98
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.c83
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.h198
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.c4260
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.h341
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_regs.h695
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.c589
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.h295
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/base/meson.build37
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/e1000_ethdev.h517
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/e1000_logs.c26
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/e1000_logs.h50
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/em_ethdev.c1829
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/em_rxtx.c1999
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/igb_ethdev.c5692
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/igb_flow.c1911
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/igb_pf.c512
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/igb_regs.h194
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/igb_rxtx.c2952
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/meson.build17
-rw-r--r--src/spdk/dpdk/drivers/net/e1000/rte_pmd_e1000_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/ena/Makefile63
-rw-r--r--src/spdk/dpdk/drivers/net/ena/base/ena_com.c2770
-rw-r--r--src/spdk/dpdk/drivers/net/ena/base/ena_com.h1054
-rw-r--r--src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_admin_defs.h1412
-rw-r--r--src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_common_defs.h50
-rw-r--r--src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h960
-rw-r--r--src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_gen_info.h35
-rw-r--r--src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_includes.h37
-rw-r--r--src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_regs_defs.h171
-rw-r--r--src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.c526
-rw-r--r--src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.h169
-rw-r--r--src/spdk/dpdk/drivers/net/ena/base/ena_plat.h57
-rw-r--r--src/spdk/dpdk/drivers/net/ena/base/ena_plat_dpdk.h286
-rw-r--r--src/spdk/dpdk/drivers/net/ena/ena_ethdev.c2303
-rw-r--r--src/spdk/dpdk/drivers/net/ena/ena_ethdev.h211
-rw-r--r--src/spdk/dpdk/drivers/net/ena/ena_logs.h68
-rw-r--r--src/spdk/dpdk/drivers/net/ena/ena_platform.h59
-rw-r--r--src/spdk/dpdk/drivers/net/ena/meson.build11
-rw-r--r--src/spdk/dpdk/drivers/net/ena/rte_pmd_ena_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/enic/Makefile42
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/cq_desc.h97
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/cq_enet_desc.h244
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/rq_enet_desc.h46
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_cq.c77
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_cq.h77
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_dev.c1096
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_dev.h190
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_devcmd.h1125
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_enet.h66
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_intr.c48
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_intr.h96
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_nic.h60
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_resource.h67
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_rq.c147
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_rq.h143
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_rss.c23
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_rss.h32
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_stats.h56
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_wq.c175
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/vnic_wq.h164
-rw-r--r--src/spdk/dpdk/drivers/net/enic/base/wq_enet_desc.h84
-rw-r--r--src/spdk/dpdk/drivers/net/enic/enic.h336
-rw-r--r--src/spdk/dpdk/drivers/net/enic/enic_clsf.c491
-rw-r--r--src/spdk/dpdk/drivers/net/enic/enic_compat.h115
-rw-r--r--src/spdk/dpdk/drivers/net/enic/enic_ethdev.c1057
-rw-r--r--src/spdk/dpdk/drivers/net/enic/enic_flow.c1573
-rw-r--r--src/spdk/dpdk/drivers/net/enic/enic_main.c1772
-rw-r--r--src/spdk/dpdk/drivers/net/enic/enic_res.c300
-rw-r--r--src/spdk/dpdk/drivers/net/enic/enic_res.h74
-rw-r--r--src/spdk/dpdk/drivers/net/enic/enic_rxtx.c914
-rw-r--r--src/spdk/dpdk/drivers/net/enic/meson.build19
-rw-r--r--src/spdk/dpdk/drivers/net/enic/rte_pmd_enic_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/failsafe/Makefile46
-rw-r--r--src/spdk/dpdk/drivers/net/failsafe/failsafe.c362
-rw-r--r--src/spdk/dpdk/drivers/net/failsafe/failsafe_args.c521
-rw-r--r--src/spdk/dpdk/drivers/net/failsafe/failsafe_eal.c167
-rw-r--r--src/spdk/dpdk/drivers/net/failsafe/failsafe_ether.c518
-rw-r--r--src/spdk/dpdk/drivers/net/failsafe/failsafe_flow.c238
-rw-r--r--src/spdk/dpdk/drivers/net/failsafe/failsafe_intr.c536
-rw-r--r--src/spdk/dpdk/drivers/net/failsafe/failsafe_ops.c1041
-rw-r--r--src/spdk/dpdk/drivers/net/failsafe/failsafe_private.h486
-rw-r--r--src/spdk/dpdk/drivers/net/failsafe/failsafe_rxtx.c157
-rw-r--r--src/spdk/dpdk/drivers/net/failsafe/meson.build23
-rw-r--r--src/spdk/dpdk/drivers/net/failsafe/rte_pmd_failsafe_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/Makefile79
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c363
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h64
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.c579
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.h52
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c2254
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h326
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h174
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c2128
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h187
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c916
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h194
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h883
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c675
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h92
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/meson.build27
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/fm10k.h353
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c3294
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h46
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c696
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c882
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/meson.build16
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/rte_pmd_fm10k_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/Makefile114
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/README59
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.c1163
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.h166
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h2822
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_alloc.h65
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_common.c7814
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.c1381
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.h223
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_devids.h82
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.c175
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.h61
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.c369
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.h245
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c1406
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h200
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_nvm.c1714
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_osdep.h243
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_prototype.h642
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_register.h5368
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_status.h108
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/i40e_type.h2024
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/meson.build28
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/base/virtchnl.h772
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.c12530
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.h1398
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_ethdev_vf.c2758
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_fdir.c2182
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_flow.c4976
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_logs.h43
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_pf.c1450
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_pf.h40
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_regs.h968
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.c3241
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.h803
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c645
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c792
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h218
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c597
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c626
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_tm.c971
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/i40e_vf_representor.c531
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/meson.build50
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.c3192
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.h1064
-rw-r--r--src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e_version.map67
-rw-r--r--src/spdk/dpdk/drivers/net/ifc/Makefile35
-rw-r--r--src/spdk/dpdk/drivers/net/ifc/base/ifcvf.c298
-rw-r--r--src/spdk/dpdk/drivers/net/ifc/base/ifcvf.h154
-rw-r--r--src/spdk/dpdk/drivers/net/ifc/base/ifcvf_osdep.h52
-rw-r--r--src/spdk/dpdk/drivers/net/ifc/ifcvf_vdpa.c793
-rw-r--r--src/spdk/dpdk/drivers/net/ifc/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/net/ifc/rte_pmd_ifc_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/Makefile112
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/README62
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.c1440
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.h53
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c2640
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.h64
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.c1708
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.h225
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.c5445
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.h199
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.c733
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.h174
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c372
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.h99
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.c610
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.h153
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.c257
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.h41
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.c769
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.h165
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h172
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c2713
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.h218
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_type.h4390
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c784
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.h145
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c1063
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.h67
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c4663
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.h124
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/base/meson.build32
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_82599_bypass.c285
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.c386
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.h39
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h271
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_defines.h131
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c8595
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h782
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_fdir.c1649
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_flow.c3463
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c728
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.h117
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_logs.h44
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_pf.c844
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_regs.h347
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c5746
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h302
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h293
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c522
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c750
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_tm.c1031
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/ixgbe_vf_representor.c231
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/meson.build35
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c1244
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h724
-rw-r--r--src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe_version.map64
-rw-r--r--src/spdk/dpdk/drivers/net/kni/Makefile33
-rw-r--r--src/spdk/dpdk/drivers/net/kni/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/net/kni/rte_eth_kni.c497
-rw-r--r--src/spdk/dpdk/drivers/net/kni/rte_pmd_kni_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/Makefile32
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_reg.h165
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.c513
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.h63
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/base/lio_hw_defs.h239
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.c246
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.h102
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.c2154
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.h176
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/lio_logs.h58
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.c1806
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.h740
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/lio_struct.h661
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/net/liquidio/rte_pmd_liquidio_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/meson.build35
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/Makefile128
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4.c1013
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4.h153
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_ethdev.c883
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.c1617
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.h60
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.c279
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.h89
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_intr.c406
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.c1181
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.h122
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_prm.h162
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_rxq.c936
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.c1394
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.h227
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_txq.c374
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.c189
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.h99
-rw-r--r--src/spdk/dpdk/drivers/net/mlx4/rte_pmd_mlx4_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/Makefile422
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5.c1690
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5.h418
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_defs.h136
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_ethdev.c1372
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.c3848
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_glue.c395
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_glue.h129
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_mac.c232
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.c1186
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.h120
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_nl.c916
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_nl_flow.c1248
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_prm.h364
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_rss.c229
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_rxmode.c122
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_rxq.c2191
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.c2373
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.h856
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c316
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.h119
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h1017
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h969
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_socket.c308
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_stats.c494
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_trigger.c404
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_txq.c903
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.h163
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/mlx5_vlan.c180
-rw-r--r--src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5_version.map3
-rw-r--r--src/spdk/dpdk/drivers/net/mvpp2/Makefile42
-rw-r--r--src/spdk/dpdk/drivers/net/mvpp2/meson.build25
-rw-r--r--src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.c2761
-rw-r--r--src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.h109
-rw-r--r--src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.c2779
-rw-r--r--src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.c894
-rw-r--r--src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.h107
-rw-r--r--src/spdk/dpdk/drivers/net/mvpp2/rte_pmd_mvpp2_version.map3
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/Makefile23
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/hn_ethdev.c761
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/hn_logs.h36
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/hn_nvs.c546
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/hn_nvs.h229
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/hn_rndis.c1099
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/hn_rndis.h32
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/hn_rxtx.c1334
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/hn_var.h158
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/meson.build10
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/ndis.h378
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/rndis.h414
-rw-r--r--src/spdk/dpdk/drivers/net/netvsc/rte_pmd_netvsc_version.map5
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/Makefile43
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/meson.build16
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfp_net.c3301
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfp_net_ctrl.h352
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfp_net_logs.h64
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfp_net_pmd.h472
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h722
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h35
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h592
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h40
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h26
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h781
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c845
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c858
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.c49
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.h19
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c199
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.h85
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.c154
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.h21
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c424
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.c235
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.h86
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.c427
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h304
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c109
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_eth.c665
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.c266
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.h52
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c327
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.h61
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_target.h579
-rw-r--r--src/spdk/dpdk/drivers/net/nfp/rte_pmd_nfp_version.map3
-rw-r--r--src/spdk/dpdk/drivers/net/null/Makefile54
-rw-r--r--src/spdk/dpdk/drivers/net/null/meson.build5
-rw-r--r--src/spdk/dpdk/drivers/net/null/rte_eth_null.c707
-rw-r--r--src/spdk/dpdk/drivers/net/null/rte_pmd_null_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/Makefile56
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/meson.build21
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.c245
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.h122
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_io.h128
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pki_var.h209
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.c142
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.h525
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c590
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.h69
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/meson.build15
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.c1321
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.h92
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h33
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c99
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h109
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/rte_pmd_octeontx_version.map11
-rw-r--r--src/spdk/dpdk/drivers/net/pcap/Makefile34
-rw-r--r--src/spdk/dpdk/drivers/net/pcap/meson.build12
-rw-r--r--src/spdk/dpdk/drivers/net/pcap/rte_eth_pcap.c1138
-rw-r--r--src/spdk/dpdk/drivers/net/pcap/rte_pmd_pcap_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/qede/Makefile110
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/bcm_osal.c310
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/bcm_osal.h456
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/common_hsi.h1642
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore.h970
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_attn_values.h13285
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_chain.h810
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.c2227
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.h203
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_cxt_api.h38
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.c1535
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.h62
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx_api.h240
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_dev.c5715
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_dev_api.h704
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_reg_addr.h50
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_values.h31
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_common.h2467
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_debug_tools.h1112
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_eth.h2414
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_func.h136
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_tool.h405
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_hw.c1032
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_hw.h260
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_hw_defs.h58
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c2047
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.h492
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.c621
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.h110
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_int.c2683
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_int.h260
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_int_api.h346
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_iov_api.h753
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_iro.h203
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_iro_values.h115
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_l2.c2304
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_l2.h165
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_l2_api.h463
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.c4029
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.h569
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_mcp_api.h1215
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_mng_tlv.c1540
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_proto_if.h109
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_rt_defs.h538
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_sp_api.h64
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.c660
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.h163
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_spq.c1061
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_spq.h313
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.c4923
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.h297
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_status.h29
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_utils.h35
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_vf.c1920
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_vf.h330
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_vf_api.h178
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/ecore_vfpf_if.h703
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/eth_common.h684
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/mcp_public.h1924
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/nvm_cfg.h1983
-rw-r--r--src/spdk/dpdk/drivers/net/qede/base/reg_addr.h1216
-rw-r--r--src/spdk/dpdk/drivers/net/qede/qede_ethdev.c3452
-rw-r--r--src/spdk/dpdk/drivers/net/qede/qede_ethdev.h259
-rw-r--r--src/spdk/dpdk/drivers/net/qede/qede_fdir.c470
-rw-r--r--src/spdk/dpdk/drivers/net/qede/qede_if.h189
-rw-r--r--src/spdk/dpdk/drivers/net/qede/qede_logs.h76
-rw-r--r--src/spdk/dpdk/drivers/net/qede/qede_main.c784
-rw-r--r--src/spdk/dpdk/drivers/net/qede/qede_rxtx.c2108
-rw-r--r--src/spdk/dpdk/drivers/net/qede/qede_rxtx.h276
-rw-r--r--src/spdk/dpdk/drivers/net/qede/rte_pmd_qede_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/ring/Makefile31
-rw-r--r--src/spdk/dpdk/drivers/net/ring/meson.build6
-rw-r--r--src/spdk/dpdk/drivers/net/ring/rte_eth_ring.c693
-rw-r--r--src/spdk/dpdk/drivers/net/ring/rte_eth_ring.h57
-rw-r--r--src/spdk/dpdk/drivers/net/ring/rte_pmd_ring_version.map14
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/Makefile132
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/README16
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_ev.c1449
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_filter.c1750
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_image.c885
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_impl.h1233
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_intr.c176
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_mac.c1048
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_mcdi.c325
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_nic.c2463
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_nvram.c2388
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_phy.c691
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_rx.c1230
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_signed_image_layout.h62
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_tlv_layout.h1011
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_tx.c778
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/ef10_vpd.c450
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx.h3064
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_bootcfg.c586
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_check.h367
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_crc32.c98
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_ev.c1455
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_filter.c1554
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_hash.c304
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_impl.h1267
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_intr.c565
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_lic.c1703
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_mac.c950
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.c2367
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.h395
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_mon.c237
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_nic.c1072
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_nvram.c1054
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_phy.c547
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_phy_ids.h27
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_port.c230
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_regs.h3846
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_regs_ef10.h727
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h18144
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi_aoe.h2914
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_regs_pci.h2332
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_rx.c1661
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_sram.c309
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_tunnel.c469
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_tx.c1136
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_types.h1634
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/efx_vpd.c998
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/hunt_impl.h50
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/hunt_nic.c231
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.c560
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.h50
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/medford2_impl.h35
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/medford2_nic.c162
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/medford_impl.h35
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/medford_nic.c160
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/meson.build76
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/siena_flash.h204
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/siena_impl.h427
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/siena_mac.c471
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/siena_mcdi.c243
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/siena_nic.c795
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/siena_nvram.c720
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/siena_phy.c782
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/siena_sram.c154
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/base/siena_vpd.c601
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/efsys.h773
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/meson.build61
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/rte_pmd_sfc_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc.c1103
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc.h355
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_debug.h36
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_dp.c79
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_dp.h106
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_dp_rx.h238
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_dp_tx.h187
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_ef10.h119
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c723
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx.c675
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h175
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_ef10_tx.c667
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_ethdev.c2102
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_ev.c921
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_ev.h107
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_filter.c129
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_filter.h50
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_flow.c2504
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_flow.h70
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_intr.c323
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.c123
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.h85
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_log.h102
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_mcdi.c310
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_port.c554
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_rx.c1597
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_rx.h166
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_tso.c180
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_tweak.h45
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_tx.c1064
-rw-r--r--src/spdk/dpdk/drivers/net/sfc/sfc_tx.h148
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/Makefile53
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/conn.c332
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/conn.h49
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/firmware.cli21
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/hash_func.h359
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/hash_func_arm64.h261
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/meson.build18
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/parser.c703
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/parser.h68
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.c594
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.h65
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_action.c389
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c5259
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_internals.h910
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_link.c98
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_mempool.c103
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_pipeline.c966
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_swq.c114
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tap.c118
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_thread.c2929
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tm.c3412
-rw-r--r--src/spdk/dpdk/drivers/net/softnic/rte_pmd_softnic_version.map13
-rw-r--r--src/spdk/dpdk/drivers/net/szedata2/Makefile32
-rw-r--r--src/spdk/dpdk/drivers/net/szedata2/meson.build7
-rw-r--r--src/spdk/dpdk/drivers/net/szedata2/rte_eth_szedata2.c1934
-rw-r--r--src/spdk/dpdk/drivers/net/szedata2/rte_eth_szedata2.h87
-rw-r--r--src/spdk/dpdk/drivers/net/szedata2/rte_pmd_szedata2_version.map3
-rw-r--r--src/spdk/dpdk/drivers/net/szedata2/szedata2_logs.h22
-rw-r--r--src/spdk/dpdk/drivers/net/tap/Makefile99
-rw-r--r--src/spdk/dpdk/drivers/net/tap/rte_eth_tap.c2140
-rw-r--r--src/spdk/dpdk/drivers/net/tap/rte_eth_tap.h99
-rw-r--r--src/spdk/dpdk/drivers/net/tap/rte_pmd_tap_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_bpf.h117
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_bpf_api.c190
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_bpf_insns.h1696
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_bpf_program.c224
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_flow.c2191
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_flow.h68
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_intr.c110
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_log.h10
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_netlink.c340
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_netlink.h42
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_rss.h40
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_tcmsgs.c296
-rw-r--r--src/spdk/dpdk/drivers/net/tap/tap_tcmsgs.h37
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/Makefile43
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/base/meson.build15
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/base/nicvf_bsvf.c44
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/base/nicvf_bsvf.h48
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw.c918
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw.h218
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h1200
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/base/nicvf_mbox.c432
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/base/nicvf_mbox.h216
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/base/nicvf_plat.h81
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/meson.build20
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/nicvf_ethdev.c2280
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/nicvf_ethdev.h134
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/nicvf_logs.h44
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/nicvf_rxtx.c669
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/nicvf_rxtx.h118
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/nicvf_struct.h116
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/nicvf_svf.c50
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/nicvf_svf.h38
-rw-r--r--src/spdk/dpdk/drivers/net/thunderx/rte_pmd_thunderx_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/vdev_netvsc/Makefile32
-rw-r--r--src/spdk/dpdk/drivers/net/vdev_netvsc/rte_pmd_vdev_netvsc_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c835
-rw-r--r--src/spdk/dpdk/drivers/net/vhost/Makefile33
-rw-r--r--src/spdk/dpdk/drivers/net/vhost/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/net/vhost/rte_eth_vhost.c1466
-rw-r--r--src/spdk/dpdk/drivers/net/vhost/rte_eth_vhost.h59
-rw-r--r--src/spdk/dpdk/drivers/net/vhost/rte_pmd_vhost_version.map13
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/Makefile48
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/meson.build27
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/rte_pmd_virtio_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_ethdev.c2219
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_ethdev.h101
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_logs.h36
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_pci.c722
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_pci.h342
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_ring.h134
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_rxtx.c1555
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_rxtx.h63
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple.c57
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple.h58
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple_neon.c208
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple_sse.c194
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost.h94
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel.c362
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.c116
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.h39
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_user.c504
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c606
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h56
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtio_user_ethdev.c687
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtqueue.c104
-rw-r--r--src/spdk/dpdk/drivers/net/virtio/virtqueue.h371
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/Makefile56
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/base/README47
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/base/upt1_defs.h94
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/base/vmware_pack_begin.h3
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/base/vmware_pack_end.h3
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h821
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/base/vmxnet3_osdep.h19
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/rte_pmd_vmxnet3_version.map4
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c1331
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h181
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h40
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ring.h156
-rw-r--r--src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c1345
-rw-r--r--src/spdk/dpdk/drivers/raw/Makefile14
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_cmdif/Makefile36
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c297
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h46
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_cmdif/meson.build10
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h35
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map4
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/Makefile37
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c1001
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.h150
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h46
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/meson.build10
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h286
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map20
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/Makefile36
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/Makefile26
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/README31
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.c294
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.h28
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_compat.h58
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_defines.h1663
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c821
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h11
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c253
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h168
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme.c734
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c301
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c381
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c715
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c352
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_hw.h127
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_port.c388
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c144
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/meson.build34
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_debug.c99
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_debug.h19
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.c381
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.h253
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c145
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h279
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_osdep.h79
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h75
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h45
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/ifpga_rawdev.c617
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/ifpga_rawdev.h37
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/meson.build15
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map4
-rw-r--r--src/spdk/dpdk/drivers/raw/meson.build7
-rw-r--r--src/spdk/dpdk/drivers/raw/skeleton_rawdev/Makefile28
-rw-r--r--src/spdk/dpdk/drivers/raw/skeleton_rawdev/meson.build6
-rw-r--r--src/spdk/dpdk/drivers/raw/skeleton_rawdev/rte_pmd_skeleton_rawdev_version.map4
-rw-r--r--src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.c765
-rw-r--r--src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.h136
-rw-r--r--src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c462
1339 files changed, 800375 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/Makefile b/src/spdk/dpdk/drivers/Makefile
new file mode 100644
index 00000000..75660765
--- /dev/null
+++ b/src/spdk/dpdk/drivers/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2015 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-y += common
+DIRS-y += bus
+DIRS-y += mempool
+DEPDIRS-mempool := common bus
+DIRS-y += net
+DEPDIRS-net := common bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += baseband
+DEPDIRS-baseband := common bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto
+DEPDIRS-crypto := common bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += common/qat
+DEPDIRS-common/qat := bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += compress
+DEPDIRS-compress := bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event
+DEPDIRS-event := common bus mempool net
+DIRS-$(CONFIG_RTE_LIBRTE_RAWDEV) += raw
+DEPDIRS-raw := common bus mempool net event
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/baseband/Makefile b/src/spdk/dpdk/drivers/baseband/Makefile
new file mode 100644
index 00000000..4ec83b0a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/baseband/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+core-libs := librte_eal librte_mbuf librte_mempool librte_ring
+core-libs += librte_bbdev librte_kvargs librte_cfgfile
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL) += null
+DEPDIRS-null = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW) += turbo_sw
+DEPDIRS-turbo_sw = $(core-libs)
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/baseband/null/Makefile b/src/spdk/dpdk/drivers/baseband/null/Makefile
new file mode 100644
index 00000000..f885a97b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/baseband/null/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+# library name
+LIB = librte_pmd_bbdev_null.a
+
+# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lrte_kvargs
+LDLIBS += -lrte_bbdev
+LDLIBS += -lrte_bus_vdev
+
+# versioning export map
+EXPORT_MAP := rte_pmd_bbdev_null_version.map
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL) += bbdev_null.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/baseband/null/bbdev_null.c b/src/spdk/dpdk/drivers/baseband/null/bbdev_null.c
new file mode 100644
index 00000000..2f251510
--- /dev/null
+++ b/src/spdk/dpdk/drivers/baseband/null/bbdev_null.c
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_kvargs.h>
+
+#include <rte_bbdev.h>
+#include <rte_bbdev_pmd.h>
+
+#define DRIVER_NAME baseband_null
+
+/* NULL BBDev logging ID */
+static int bbdev_null_logtype;
+
+/* Helper macro for logging */
+#define rte_bbdev_log(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, bbdev_null_logtype, fmt "\n", ##__VA_ARGS__)
+
+#define rte_bbdev_log_debug(fmt, ...) \
+ rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
+ ##__VA_ARGS__)
+
+/* Initialisation params structure that can be used by null BBDEV driver */
+struct bbdev_null_params {
+ int socket_id; /*< Null BBDEV socket */
+ uint16_t queues_num; /*< Null BBDEV queues number */
+};
+
+/* Accecptable params for null BBDEV devices */
+#define BBDEV_NULL_MAX_NB_QUEUES_ARG "max_nb_queues"
+#define BBDEV_NULL_SOCKET_ID_ARG "socket_id"
+
+static const char * const bbdev_null_valid_params[] = {
+ BBDEV_NULL_MAX_NB_QUEUES_ARG,
+ BBDEV_NULL_SOCKET_ID_ARG
+};
+
+/* private data structure */
+struct bbdev_private {
+ unsigned int max_nb_queues; /**< Max number of queues */
+};
+
+/* queue */
+struct bbdev_queue {
+ struct rte_ring *processed_pkts; /* Ring for processed packets */
+} __rte_cache_aligned;
+
+/* Get device info */
+static void
+info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
+{
+ struct bbdev_private *internals = dev->data->dev_private;
+
+ static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
+ RTE_BBDEV_END_OF_CAPABILITIES_LIST(),
+ };
+
+ static struct rte_bbdev_queue_conf default_queue_conf = {
+ .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
+ };
+
+ default_queue_conf.socket = dev->data->socket_id;
+
+ dev_info->driver_name = RTE_STR(DRIVER_NAME);
+ dev_info->max_num_queues = internals->max_nb_queues;
+ dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
+ dev_info->hardware_accelerated = false;
+ dev_info->max_dl_queue_priority = 0;
+ dev_info->max_ul_queue_priority = 0;
+ dev_info->default_queue_conf = default_queue_conf;
+ dev_info->capabilities = bbdev_capabilities;
+ dev_info->cpu_flag_reqs = NULL;
+ dev_info->min_alignment = 0;
+
+ rte_bbdev_log_debug("got device info from %u", dev->data->dev_id);
+}
+
+/* Release queue */
+static int
+q_release(struct rte_bbdev *dev, uint16_t q_id)
+{
+ struct bbdev_queue *q = dev->data->queues[q_id].queue_private;
+
+ if (q != NULL) {
+ rte_ring_free(q->processed_pkts);
+ rte_free(q);
+ dev->data->queues[q_id].queue_private = NULL;
+ }
+
+ rte_bbdev_log_debug("released device queue %u:%u",
+ dev->data->dev_id, q_id);
+ return 0;
+}
+
+/* Setup a queue */
+static int
+q_setup(struct rte_bbdev *dev, uint16_t q_id,
+ const struct rte_bbdev_queue_conf *queue_conf)
+{
+ struct bbdev_queue *q;
+ char ring_name[RTE_RING_NAMESIZE];
+ snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME) "%u:%u",
+ dev->data->dev_id, q_id);
+
+ /* Allocate the queue data structure. */
+ q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q == NULL) {
+ rte_bbdev_log(ERR, "Failed to allocate queue memory");
+ return -ENOMEM;
+ }
+
+ q->processed_pkts = rte_ring_create(ring_name, queue_conf->queue_size,
+ queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (q->processed_pkts == NULL) {
+ rte_bbdev_log(ERR, "Failed to create ring");
+ goto free_q;
+ }
+
+ dev->data->queues[q_id].queue_private = q;
+ rte_bbdev_log_debug("setup device queue %s", ring_name);
+ return 0;
+
+free_q:
+ rte_free(q);
+ return -EFAULT;
+}
+
+static const struct rte_bbdev_ops pmd_ops = {
+ .info_get = info_get,
+ .queue_setup = q_setup,
+ .queue_release = q_release
+};
+
+/* Enqueue decode burst */
+static uint16_t
+enqueue_dec_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
+{
+ struct bbdev_queue *q = q_data->queue_private;
+ uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+
+ q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
+ q_data->queue_stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+/* Enqueue encode burst */
+static uint16_t
+enqueue_enc_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
+{
+ struct bbdev_queue *q = q_data->queue_private;
+ uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+
+ q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
+ q_data->queue_stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+/* Dequeue decode burst */
+static uint16_t
+dequeue_dec_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
+{
+ struct bbdev_queue *q = q_data->queue_private;
+ uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ q_data->queue_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Dequeue encode burst */
+static uint16_t
+dequeue_enc_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
+{
+ struct bbdev_queue *q = q_data->queue_private;
+ uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ q_data->queue_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Parse 16bit integer from string argument */
+static inline int
+parse_u16_arg(const char *key, const char *value, void *extra_args)
+{
+ uint16_t *u16 = extra_args;
+ unsigned int long result;
+
+ if ((value == NULL) || (extra_args == NULL))
+ return -EINVAL;
+ errno = 0;
+ result = strtoul(value, NULL, 0);
+ if ((result >= (1 << 16)) || (errno != 0)) {
+ rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key);
+ return -ERANGE;
+ }
+ *u16 = (uint16_t)result;
+ return 0;
+}
+
+/* Parse parameters used to create device */
+static int
+parse_bbdev_null_params(struct bbdev_null_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args, bbdev_null_valid_params);
+ if (kvlist == NULL)
+ return -EFAULT;
+
+ ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[0],
+ &parse_u16_arg, &params->queues_num);
+ if (ret < 0)
+ goto exit;
+
+ ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[1],
+ &parse_u16_arg, &params->socket_id);
+ if (ret < 0)
+ goto exit;
+
+ if (params->socket_id >= RTE_MAX_NUMA_NODES) {
+ rte_bbdev_log(ERR, "Invalid socket, must be < %u",
+ RTE_MAX_NUMA_NODES);
+ goto exit;
+ }
+ }
+
+exit:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+/* Create device */
+static int
+null_bbdev_create(struct rte_vdev_device *vdev,
+ struct bbdev_null_params *init_params)
+{
+ struct rte_bbdev *bbdev;
+ const char *name = rte_vdev_device_name(vdev);
+
+ bbdev = rte_bbdev_allocate(name);
+ if (bbdev == NULL)
+ return -ENODEV;
+
+ bbdev->data->dev_private = rte_zmalloc_socket(name,
+ sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE,
+ init_params->socket_id);
+ if (bbdev->data->dev_private == NULL) {
+ rte_bbdev_release(bbdev);
+ return -ENOMEM;
+ }
+
+ bbdev->dev_ops = &pmd_ops;
+ bbdev->device = &vdev->device;
+ bbdev->data->socket_id = init_params->socket_id;
+ bbdev->intr_handle = NULL;
+
+ /* register rx/tx burst functions for data path */
+ bbdev->dequeue_enc_ops = dequeue_enc_ops;
+ bbdev->dequeue_dec_ops = dequeue_dec_ops;
+ bbdev->enqueue_enc_ops = enqueue_enc_ops;
+ bbdev->enqueue_dec_ops = enqueue_dec_ops;
+ ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues =
+ init_params->queues_num;
+
+ return 0;
+}
+
+/* Initialise device */
+static int
+null_bbdev_probe(struct rte_vdev_device *vdev)
+{
+ struct bbdev_null_params init_params = {
+ rte_socket_id(),
+ RTE_BBDEV_DEFAULT_MAX_NB_QUEUES
+ };
+ const char *name;
+ const char *input_args;
+
+ if (vdev == NULL)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+ parse_bbdev_null_params(&init_params, input_args);
+
+ rte_bbdev_log_debug("Init %s on NUMA node %d with max queues: %d",
+ name, init_params.socket_id, init_params.queues_num);
+
+ return null_bbdev_create(vdev, &init_params);
+}
+
+/* Uninitialise device */
+static int
+null_bbdev_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_bbdev *bbdev;
+ const char *name;
+
+ if (vdev == NULL)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ bbdev = rte_bbdev_get_named_dev(name);
+ if (bbdev == NULL)
+ return -EINVAL;
+
+ rte_free(bbdev->data->dev_private);
+
+ return rte_bbdev_release(bbdev);
+}
+
+static struct rte_vdev_driver bbdev_null_pmd_drv = {
+ .probe = null_bbdev_probe,
+ .remove = null_bbdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_null_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
+ BBDEV_NULL_MAX_NB_QUEUES_ARG"=<int> "
+ BBDEV_NULL_SOCKET_ID_ARG"=<int>");
+RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, bbdev_null);
+
+RTE_INIT(null_bbdev_init_log)
+{
+ bbdev_null_logtype = rte_log_register("pmd.bb.null");
+ if (bbdev_null_logtype >= 0)
+ rte_log_set_level(bbdev_null_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/baseband/null/rte_pmd_bbdev_null_version.map b/src/spdk/dpdk/drivers/baseband/null/rte_pmd_bbdev_null_version.map
new file mode 100644
index 00000000..58b94270
--- /dev/null
+++ b/src/spdk/dpdk/drivers/baseband/null/rte_pmd_bbdev_null_version.map
@@ -0,0 +1,3 @@
+DPDK_18.02 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/baseband/turbo_sw/Makefile b/src/spdk/dpdk/drivers/baseband/turbo_sw/Makefile
new file mode 100644
index 00000000..79eb5547
--- /dev/null
+++ b/src/spdk/dpdk/drivers/baseband/turbo_sw/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(FLEXRAN_SDK),)
+$(error "Please define FLEXRAN_SDK environment variable")
+endif
+
+# library name
+LIB = librte_pmd_bbdev_turbo_sw.a
+
+# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lrte_kvargs
+LDLIBS += -lrte_bbdev
+LDLIBS += -lrte_bus_vdev
+
+# versioning export map
+EXPORT_MAP := rte_pmd_bbdev_turbo_sw_version.map
+
+# external library dependencies
+CFLAGS += -I$(FLEXRAN_SDK)/lib_common
+CFLAGS += -I$(FLEXRAN_SDK)/lib_turbo
+CFLAGS += -I$(FLEXRAN_SDK)/lib_crc
+CFLAGS += -I$(FLEXRAN_SDK)/lib_rate_matching
+
+LDLIBS += -L$(FLEXRAN_SDK)/lib_crc -lcrc
+LDLIBS += -L$(FLEXRAN_SDK)/lib_turbo -lturbo
+LDLIBS += -L$(FLEXRAN_SDK)/lib_rate_matching -lrate_matching
+LDLIBS += -L$(FLEXRAN_SDK)/lib_common -lcommon
+LDLIBS += -lstdc++ -lirc -limf -lipps
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW) += bbdev_turbo_software.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/baseband/turbo_sw/bbdev_turbo_software.c b/src/spdk/dpdk/drivers/baseband/turbo_sw/bbdev_turbo_software.c
new file mode 100644
index 00000000..8ceb2769
--- /dev/null
+++ b/src/spdk/dpdk/drivers/baseband/turbo_sw/bbdev_turbo_software.c
@@ -0,0 +1,1307 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_kvargs.h>
+#include <rte_cycles.h>
+
+#include <rte_bbdev.h>
+#include <rte_bbdev_pmd.h>
+
+#include <phy_turbo.h>
+#include <phy_crc.h>
+#include <phy_rate_match.h>
+#include <divide.h>
+
+#define DRIVER_NAME baseband_turbo_sw
+
+/* Turbo SW PMD logging ID */
+static int bbdev_turbo_sw_logtype;
+
+/* Helper macro for logging */
+#define rte_bbdev_log(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, bbdev_turbo_sw_logtype, fmt "\n", \
+ ##__VA_ARGS__)
+
+#define rte_bbdev_log_debug(fmt, ...) \
+ rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
+ ##__VA_ARGS__)
+
+#define DEINT_INPUT_BUF_SIZE (((RTE_BBDEV_MAX_CB_SIZE >> 3) + 1) * 48)
+#define DEINT_OUTPUT_BUF_SIZE (DEINT_INPUT_BUF_SIZE * 6)
+#define ADAPTER_OUTPUT_BUF_SIZE ((RTE_BBDEV_MAX_CB_SIZE + 4) * 48)
+
+/* private data structure */
+struct bbdev_private {
+ unsigned int max_nb_queues; /**< Max number of queues */
+};
+
+/* Initialisation params structure that can be used by Turbo SW driver */
+struct turbo_sw_params {
+ int socket_id; /*< Turbo SW device socket */
+ uint16_t queues_num; /*< Turbo SW device queues number */
+};
+
+/* Accecptable params for Turbo SW devices */
+#define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues"
+#define TURBO_SW_SOCKET_ID_ARG "socket_id"
+
+static const char * const turbo_sw_valid_params[] = {
+ TURBO_SW_MAX_NB_QUEUES_ARG,
+ TURBO_SW_SOCKET_ID_ARG
+};
+
+/* queue */
+struct turbo_sw_queue {
+ /* Ring for processed (encoded/decoded) operations which are ready to
+ * be dequeued.
+ */
+ struct rte_ring *processed_pkts;
+ /* Stores input for turbo encoder (used when CRC attachment is
+ * performed
+ */
+ uint8_t *enc_in;
+ /* Stores output from turbo encoder */
+ uint8_t *enc_out;
+ /* Alpha gamma buf for bblib_turbo_decoder() function */
+ int8_t *ag;
+ /* Temp buf for bblib_turbo_decoder() function */
+ uint16_t *code_block;
+ /* Input buf for bblib_rate_dematching_lte() function */
+ uint8_t *deint_input;
+ /* Output buf for bblib_rate_dematching_lte() function */
+ uint8_t *deint_output;
+ /* Output buf for bblib_turbodec_adapter_lte() function */
+ uint8_t *adapter_output;
+ /* Operation type of this queue */
+ enum rte_bbdev_op_type type;
+} __rte_cache_aligned;
+
+/* Calculate index based on Table 5.1.3-3 from TS34.212 */
+static inline int32_t
+compute_idx(uint16_t k)
+{
+ int32_t result = 0;
+
+ if (k < RTE_BBDEV_MIN_CB_SIZE || k > RTE_BBDEV_MAX_CB_SIZE)
+ return -1;
+
+ if (k > 2048) {
+ if ((k - 2048) % 64 != 0)
+ result = -1;
+
+ result = 124 + (k - 2048) / 64;
+ } else if (k <= 512) {
+ if ((k - 40) % 8 != 0)
+ result = -1;
+
+ result = (k - 40) / 8 + 1;
+ } else if (k <= 1024) {
+ if ((k - 512) % 16 != 0)
+ result = -1;
+
+ result = 60 + (k - 512) / 16;
+ } else { /* 1024 < k <= 2048 */
+ if ((k - 1024) % 32 != 0)
+ result = -1;
+
+ result = 92 + (k - 1024) / 32;
+ }
+
+ return result;
+}
+
+/* Read flag value 0/1 from bitmap */
+static inline bool
+check_bit(uint32_t bitmap, uint32_t bitmask)
+{
+ return bitmap & bitmask;
+}
+
+/* Get device info */
+static void
+info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
+{
+ struct bbdev_private *internals = dev->data->dev_private;
+
+ static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
+ {
+ .type = RTE_BBDEV_OP_TURBO_DEC,
+ .cap.turbo_dec = {
+ .capability_flags =
+ RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |
+ RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN |
+ RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |
+ RTE_BBDEV_TURBO_CRC_TYPE_24B |
+ RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP |
+ RTE_BBDEV_TURBO_EARLY_TERMINATION,
+ .max_llr_modulus = 16,
+ .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
+ .num_buffers_hard_out =
+ RTE_BBDEV_MAX_CODE_BLOCKS,
+ .num_buffers_soft_out = 0,
+ }
+ },
+ {
+ .type = RTE_BBDEV_OP_TURBO_ENC,
+ .cap.turbo_enc = {
+ .capability_flags =
+ RTE_BBDEV_TURBO_CRC_24B_ATTACH |
+ RTE_BBDEV_TURBO_CRC_24A_ATTACH |
+ RTE_BBDEV_TURBO_RATE_MATCH |
+ RTE_BBDEV_TURBO_RV_INDEX_BYPASS,
+ .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
+ .num_buffers_dst = RTE_BBDEV_MAX_CODE_BLOCKS,
+ }
+ },
+ RTE_BBDEV_END_OF_CAPABILITIES_LIST()
+ };
+
+ static struct rte_bbdev_queue_conf default_queue_conf = {
+ .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
+ };
+
+ static const enum rte_cpu_flag_t cpu_flag = RTE_CPUFLAG_SSE4_2;
+
+ default_queue_conf.socket = dev->data->socket_id;
+
+ dev_info->driver_name = RTE_STR(DRIVER_NAME);
+ dev_info->max_num_queues = internals->max_nb_queues;
+ dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
+ dev_info->hardware_accelerated = false;
+ dev_info->max_dl_queue_priority = 0;
+ dev_info->max_ul_queue_priority = 0;
+ dev_info->default_queue_conf = default_queue_conf;
+ dev_info->capabilities = bbdev_capabilities;
+ dev_info->cpu_flag_reqs = &cpu_flag;
+ dev_info->min_alignment = 64;
+
+ rte_bbdev_log_debug("got device info from %u\n", dev->data->dev_id);
+}
+
+/* Release queue */
+static int
+q_release(struct rte_bbdev *dev, uint16_t q_id)
+{
+ struct turbo_sw_queue *q = dev->data->queues[q_id].queue_private;
+
+ if (q != NULL) {
+ rte_ring_free(q->processed_pkts);
+ rte_free(q->enc_out);
+ rte_free(q->enc_in);
+ rte_free(q->ag);
+ rte_free(q->code_block);
+ rte_free(q->deint_input);
+ rte_free(q->deint_output);
+ rte_free(q->adapter_output);
+ rte_free(q);
+ dev->data->queues[q_id].queue_private = NULL;
+ }
+
+ rte_bbdev_log_debug("released device queue %u:%u",
+ dev->data->dev_id, q_id);
+ return 0;
+}
+
+/* Setup a queue */
+static int
+q_setup(struct rte_bbdev *dev, uint16_t q_id,
+ const struct rte_bbdev_queue_conf *queue_conf)
+{
+ int ret;
+ struct turbo_sw_queue *q;
+ char name[RTE_RING_NAMESIZE];
+
+ /* Allocate the queue data structure. */
+ q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q == NULL) {
+ rte_bbdev_log(ERR, "Failed to allocate queue memory");
+ return -ENOMEM;
+ }
+
+ /* Allocate memory for encoder output. */
+ ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_o%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->enc_out = rte_zmalloc_socket(name,
+ ((RTE_BBDEV_MAX_TB_SIZE >> 3) + 3) *
+ sizeof(*q->enc_out) * 3,
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->enc_out == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Allocate memory for rate matching output. */
+ ret = snprintf(name, RTE_RING_NAMESIZE,
+ RTE_STR(DRIVER_NAME)"_enc_i%u:%u", dev->data->dev_id,
+ q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->enc_in = rte_zmalloc_socket(name,
+ (RTE_BBDEV_MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->enc_in == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Allocate memory for Aplha Gamma temp buffer. */
+ ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_ag%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->ag = rte_zmalloc_socket(name,
+ RTE_BBDEV_MAX_CB_SIZE * 10 * sizeof(*q->ag),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->ag == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Allocate memory for code block temp buffer. */
+ ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_cb%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->code_block = rte_zmalloc_socket(name,
+ RTE_BBDEV_MAX_CB_SIZE * sizeof(*q->code_block),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->code_block == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Allocate memory for Deinterleaver input. */
+ ret = snprintf(name, RTE_RING_NAMESIZE,
+ RTE_STR(DRIVER_NAME)"_de_i%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->deint_input = rte_zmalloc_socket(name,
+ DEINT_INPUT_BUF_SIZE * sizeof(*q->deint_input),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->deint_input == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Allocate memory for Deinterleaver output. */
+ ret = snprintf(name, RTE_RING_NAMESIZE,
+ RTE_STR(DRIVER_NAME)"_de_o%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->deint_output = rte_zmalloc_socket(NULL,
+ DEINT_OUTPUT_BUF_SIZE * sizeof(*q->deint_output),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->deint_output == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Allocate memory for Adapter output. */
+ ret = snprintf(name, RTE_RING_NAMESIZE,
+ RTE_STR(DRIVER_NAME)"_ada_o%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->adapter_output = rte_zmalloc_socket(NULL,
+ ADAPTER_OUTPUT_BUF_SIZE * sizeof(*q->adapter_output),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->adapter_output == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Create ring for packets awaiting to be dequeued. */
+ ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->processed_pkts = rte_ring_create(name, queue_conf->queue_size,
+ queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (q->processed_pkts == NULL) {
+ rte_bbdev_log(ERR, "Failed to create ring for %s", name);
+ goto free_q;
+ }
+
+ q->type = queue_conf->op_type;
+
+ dev->data->queues[q_id].queue_private = q;
+ rte_bbdev_log_debug("setup device queue %s", name);
+ return 0;
+
+free_q:
+ rte_ring_free(q->processed_pkts);
+ rte_free(q->enc_out);
+ rte_free(q->enc_in);
+ rte_free(q->ag);
+ rte_free(q->code_block);
+ rte_free(q->deint_input);
+ rte_free(q->deint_output);
+ rte_free(q->adapter_output);
+ rte_free(q);
+ return -EFAULT;
+}
+
+static const struct rte_bbdev_ops pmd_ops = {
+ .info_get = info_get,
+ .queue_setup = q_setup,
+ .queue_release = q_release
+};
+
+/* Checks if the encoder input buffer is correct.
+ * Returns 0 if it's valid, -1 otherwise.
+ */
+static inline int
+is_enc_input_valid(const uint16_t k, const int32_t k_idx,
+ const uint16_t in_length)
+{
+ if (k_idx < 0) {
+ rte_bbdev_log(ERR, "K Index is invalid");
+ return -1;
+ }
+
+ if (in_length - (k >> 3) < 0) {
+ rte_bbdev_log(ERR,
+ "Mismatch between input length (%u bytes) and K (%u bits)",
+ in_length, k);
+ return -1;
+ }
+
+ if (k > RTE_BBDEV_MAX_CB_SIZE) {
+ rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
+ k, RTE_BBDEV_MAX_CB_SIZE);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Checks if the decoder input buffer is correct.
+ * Returns 0 if it's valid, -1 otherwise.
+ */
+static inline int
+is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length)
+{
+ if (k_idx < 0) {
+ rte_bbdev_log(ERR, "K index is invalid");
+ return -1;
+ }
+
+ if (in_length - kw < 0) {
+ rte_bbdev_log(ERR,
+ "Mismatch between input length (%u) and kw (%u)",
+ in_length, kw);
+ return -1;
+ }
+
+ if (kw > RTE_BBDEV_MAX_KW) {
+ rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d",
+ kw, RTE_BBDEV_MAX_KW);
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline void
+process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
+ uint8_t r, uint8_t c, uint16_t k, uint16_t ncb,
+ uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out,
+ uint16_t in_offset, uint16_t out_offset, uint16_t total_left,
+ struct rte_bbdev_stats *q_stats)
+{
+ int ret;
+ int16_t k_idx;
+ uint16_t m;
+ uint8_t *in, *out0, *out1, *out2, *tmp_out, *rm_out;
+ uint64_t first_3_bytes = 0;
+ struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc;
+ struct bblib_crc_request crc_req;
+ struct bblib_crc_response crc_resp;
+ struct bblib_turbo_encoder_request turbo_req;
+ struct bblib_turbo_encoder_response turbo_resp;
+ struct bblib_rate_match_dl_request rm_req;
+ struct bblib_rate_match_dl_response rm_resp;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ uint64_t start_time;
+#else
+ RTE_SET_USED(q_stats);
+#endif
+
+ k_idx = compute_idx(k);
+ in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
+
+ /* CRC24A (for TB) */
+ if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH) &&
+ (enc->code_block_mode == 1)) {
+ ret = is_enc_input_valid(k - 24, k_idx, total_left);
+ if (ret != 0) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+ crc_req.data = in;
+ crc_req.len = k - 24;
+ /* Check if there is a room for CRC bits if not use
+ * the temporary buffer.
+ */
+ if (rte_pktmbuf_append(m_in, 3) == NULL) {
+ rte_memcpy(q->enc_in, in, (k - 24) >> 3);
+ in = q->enc_in;
+ } else {
+ /* Store 3 first bytes of next CB as they will be
+ * overwritten by CRC bytes. If it is the last CB then
+ * there is no point to store 3 next bytes and this
+ * if..else branch will be omitted.
+ */
+ first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]);
+ }
+
+ crc_resp.data = in;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+ bblib_lte_crc24a_gen(&crc_req, &crc_resp);
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->offload_time += rte_rdtsc_precise() - start_time;
+#endif
+ } else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) {
+ /* CRC24B */
+ ret = is_enc_input_valid(k - 24, k_idx, total_left);
+ if (ret != 0) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+ crc_req.data = in;
+ crc_req.len = k - 24;
+ /* Check if there is a room for CRC bits if this is the last
+ * CB in TB. If not use temporary buffer.
+ */
+ if ((c - r == 1) && (rte_pktmbuf_append(m_in, 3) == NULL)) {
+ rte_memcpy(q->enc_in, in, (k - 24) >> 3);
+ in = q->enc_in;
+ } else if (c - r > 1) {
+ /* Store 3 first bytes of next CB as they will be
+ * overwritten by CRC bytes. If it is the last CB then
+ * there is no point to store 3 next bytes and this
+ * if..else branch will be omitted.
+ */
+ first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]);
+ }
+
+ crc_resp.data = in;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+ bblib_lte_crc24b_gen(&crc_req, &crc_resp);
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->offload_time += rte_rdtsc_precise() - start_time;
+#endif
+ } else {
+ ret = is_enc_input_valid(k, k_idx, total_left);
+ if (ret != 0) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+ }
+
+ /* Turbo encoder */
+
+ /* Each bit layer output from turbo encoder is (k+4) bits long, i.e.
+ * input length + 4 tail bits. That's (k/8) + 1 bytes after rounding up.
+ * So dst_data's length should be 3*(k/8) + 3 bytes.
+ * In Rate-matching bypass case outputs pointers passed to encoder
+ * (out0, out1 and out2) can directly point to addresses of output from
+ * turbo_enc entity.
+ */
+ if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) {
+ out0 = q->enc_out;
+ out1 = RTE_PTR_ADD(out0, (k >> 3) + 1);
+ out2 = RTE_PTR_ADD(out1, (k >> 3) + 1);
+ } else {
+ out0 = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3) * 3 + 2);
+ if (out0 == NULL) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR,
+ "Too little space in output mbuf");
+ return;
+ }
+ enc->output.length += (k >> 3) * 3 + 2;
+ /* rte_bbdev_op_data.offset can be different than the
+ * offset of the appended bytes
+ */
+ out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
+ out1 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
+ out_offset + (k >> 3) + 1);
+ out2 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
+ out_offset + 2 * ((k >> 3) + 1));
+ }
+
+ turbo_req.case_id = k_idx;
+ turbo_req.input_win = in;
+ turbo_req.length = k >> 3;
+ turbo_resp.output_win_0 = out0;
+ turbo_resp.output_win_1 = out1;
+ turbo_resp.output_win_2 = out2;
+
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+
+ if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) {
+ op->status |= 1 << RTE_BBDEV_DRV_ERROR;
+ rte_bbdev_log(ERR, "Turbo Encoder failed");
+ return;
+ }
+
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->offload_time += rte_rdtsc_precise() - start_time;
+#endif
+
+ /* Restore 3 first bytes of next CB if they were overwritten by CRC*/
+ if (first_3_bytes != 0)
+ *((uint64_t *)&in[(k - 32) >> 3]) = first_3_bytes;
+
+ /* Rate-matching */
+ if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) {
+ uint8_t mask_id;
+ /* Integer round up division by 8 */
+ uint16_t out_len = (e + 7) >> 3;
+ /* The mask array is indexed using E%8. E is an even number so
+ * there are only 4 possible values.
+ */
+ const uint8_t mask_out[] = {0xFF, 0xC0, 0xF0, 0xFC};
+
+ /* get output data starting address */
+ rm_out = (uint8_t *)rte_pktmbuf_append(m_out, out_len);
+ if (rm_out == NULL) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR,
+ "Too little space in output mbuf");
+ return;
+ }
+ /* rte_bbdev_op_data.offset can be different than the offset
+ * of the appended bytes
+ */
+ rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
+
+ /* index of current code block */
+ rm_req.r = r;
+ /* total number of code block */
+ rm_req.C = c;
+ /* For DL - 1, UL - 0 */
+ rm_req.direction = 1;
+ /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nsoft, KMIMO
+ * and MDL_HARQ are used for Ncb calculation. As Ncb is already
+ * known we can adjust those parameters
+ */
+ rm_req.Nsoft = ncb * rm_req.C;
+ rm_req.KMIMO = 1;
+ rm_req.MDL_HARQ = 1;
+ /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nl, Qm and G
+ * are used for E calculation. As E is already known we can
+ * adjust those parameters
+ */
+ rm_req.NL = e;
+ rm_req.Qm = 1;
+ rm_req.G = rm_req.NL * rm_req.Qm * rm_req.C;
+
+ rm_req.rvidx = enc->rv_index;
+ rm_req.Kidx = k_idx - 1;
+ rm_req.nLen = k + 4;
+ rm_req.tin0 = out0;
+ rm_req.tin1 = out1;
+ rm_req.tin2 = out2;
+ rm_resp.output = rm_out;
+ rm_resp.OutputLen = out_len;
+ if (enc->op_flags & RTE_BBDEV_TURBO_RV_INDEX_BYPASS)
+ rm_req.bypass_rvidx = 1;
+ else
+ rm_req.bypass_rvidx = 0;
+
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+
+ if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) {
+ op->status |= 1 << RTE_BBDEV_DRV_ERROR;
+ rte_bbdev_log(ERR, "Rate matching failed");
+ return;
+ }
+
+ /* SW fills an entire last byte even if E%8 != 0. Clear the
+ * superfluous data bits for consistency with HW device.
+ */
+ mask_id = (e & 7) >> 1;
+ rm_out[out_len - 1] &= mask_out[mask_id];
+
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->offload_time += rte_rdtsc_precise() - start_time;
+#endif
+
+ enc->output.length += rm_resp.OutputLen;
+ } else {
+ /* Rate matching is bypassed */
+
+ /* Completing last byte of out0 (where 4 tail bits are stored)
+ * by moving first 4 bits from out1
+ */
+ tmp_out = (uint8_t *) --out1;
+ *tmp_out = *tmp_out | ((*(tmp_out + 1) & 0xF0) >> 4);
+ tmp_out++;
+ /* Shifting out1 data by 4 bits to the left */
+ for (m = 0; m < k >> 3; ++m) {
+ uint8_t *first = tmp_out;
+ uint8_t second = *(tmp_out + 1);
+ *first = (*first << 4) | ((second & 0xF0) >> 4);
+ tmp_out++;
+ }
+ /* Shifting out2 data by 8 bits to the left */
+ for (m = 0; m < (k >> 3) + 1; ++m) {
+ *tmp_out = *(tmp_out + 1);
+ tmp_out++;
+ }
+ *tmp_out = 0;
+ }
+}
+
+static inline void
+enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
+ struct rte_bbdev_stats *queue_stats)
+{
+ uint8_t c, r, crc24_bits = 0;
+ uint16_t k, ncb;
+ uint32_t e;
+ struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc;
+ uint16_t in_offset = enc->input.offset;
+ uint16_t out_offset = enc->output.offset;
+ struct rte_mbuf *m_in = enc->input.data;
+ struct rte_mbuf *m_out = enc->output.data;
+ uint16_t total_left = enc->input.length;
+
+ /* Clear op status */
+ op->status = 0;
+
+ if (total_left > RTE_BBDEV_MAX_TB_SIZE >> 3) {
+ rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d",
+ total_left, RTE_BBDEV_MAX_TB_SIZE);
+ op->status = 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+
+ if (m_in == NULL || m_out == NULL) {
+ rte_bbdev_log(ERR, "Invalid mbuf pointer");
+ op->status = 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+
+ if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) ||
+ (enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH))
+ crc24_bits = 24;
+
+ if (enc->code_block_mode == 0) { /* For Transport Block mode */
+ c = enc->tb_params.c;
+ r = enc->tb_params.r;
+ } else {/* For Code Block mode */
+ c = 1;
+ r = 0;
+ }
+
+ while (total_left > 0 && r < c) {
+ if (enc->code_block_mode == 0) {
+ k = (r < enc->tb_params.c_neg) ?
+ enc->tb_params.k_neg : enc->tb_params.k_pos;
+ ncb = (r < enc->tb_params.c_neg) ?
+ enc->tb_params.ncb_neg : enc->tb_params.ncb_pos;
+ e = (r < enc->tb_params.cab) ?
+ enc->tb_params.ea : enc->tb_params.eb;
+ } else {
+ k = enc->cb_params.k;
+ ncb = enc->cb_params.ncb;
+ e = enc->cb_params.e;
+ }
+
+ process_enc_cb(q, op, r, c, k, ncb, e, m_in,
+ m_out, in_offset, out_offset, total_left,
+ queue_stats);
+ /* Update total_left */
+ total_left -= (k - crc24_bits) >> 3;
+ /* Update offsets for next CBs (if exist) */
+ in_offset += (k - crc24_bits) >> 3;
+ if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH)
+ out_offset += e >> 3;
+ else
+ out_offset += (k >> 3) * 3 + 2;
+ r++;
+ }
+
+ /* check if all input data was processed */
+ if (total_left != 0) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR,
+ "Mismatch between mbuf length and included CBs sizes");
+ }
+}
+
+static inline uint16_t
+enqueue_enc_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_enc_op **ops,
+ uint16_t nb_ops, struct rte_bbdev_stats *queue_stats)
+{
+ uint16_t i;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ queue_stats->offload_time = 0;
+#endif
+
+ for (i = 0; i < nb_ops; ++i)
+ enqueue_enc_one_op(q, ops[i], queue_stats);
+
+ return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
+ NULL);
+}
+
+/* Remove the padding bytes from a cyclic buffer.
+ * The input buffer is a data stream wk as described in 3GPP TS 36.212 section
+ * 5.1.4.1.2 starting from w0 and with length Ncb bytes.
+ * The output buffer is a data stream wk with pruned padding bytes. It's length
+ * is 3*D bytes and the order of non-padding bytes is preserved.
+ */
+static inline void
+remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k,
+ uint16_t ncb)
+{
+ uint32_t in_idx, out_idx, c_idx;
+ const uint32_t d = k + 4;
+ const uint32_t kw = (ncb / 3);
+ const uint32_t nd = kw - d;
+ const uint32_t r_subblock = kw / RTE_BBDEV_C_SUBBLOCK;
+ /* Inter-column permutation pattern */
+ const uint32_t P[RTE_BBDEV_C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28,
+ 2, 18, 10, 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13,
+ 29, 3, 19, 11, 27, 7, 23, 15, 31};
+ in_idx = 0;
+ out_idx = 0;
+
+ /* The padding bytes are at the first Nd positions in the first row. */
+ for (c_idx = 0; in_idx < kw; in_idx += r_subblock, ++c_idx) {
+ if (P[c_idx] < nd) {
+ rte_memcpy(&out[out_idx], &in[in_idx + 1],
+ r_subblock - 1);
+ out_idx += r_subblock - 1;
+ } else {
+ rte_memcpy(&out[out_idx], &in[in_idx], r_subblock);
+ out_idx += r_subblock;
+ }
+ }
+
+ /* First and second parity bits sub-blocks are interlaced. */
+ for (c_idx = 0; in_idx < ncb - 2 * r_subblock;
+ in_idx += 2 * r_subblock, ++c_idx) {
+ uint32_t second_block_c_idx = P[c_idx];
+ uint32_t third_block_c_idx = P[c_idx] + 1;
+
+ if (second_block_c_idx < nd && third_block_c_idx < nd) {
+ rte_memcpy(&out[out_idx], &in[in_idx + 2],
+ 2 * r_subblock - 2);
+ out_idx += 2 * r_subblock - 2;
+ } else if (second_block_c_idx >= nd &&
+ third_block_c_idx >= nd) {
+ rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock);
+ out_idx += 2 * r_subblock;
+ } else if (second_block_c_idx < nd) {
+ out[out_idx++] = in[in_idx];
+ rte_memcpy(&out[out_idx], &in[in_idx + 2],
+ 2 * r_subblock - 2);
+ out_idx += 2 * r_subblock - 2;
+ } else {
+ rte_memcpy(&out[out_idx], &in[in_idx + 1],
+ 2 * r_subblock - 1);
+ out_idx += 2 * r_subblock - 1;
+ }
+ }
+
+ /* Last interlaced row is different - its last byte is the only padding
+ * byte. We can have from 4 up to 28 padding bytes (Nd) per sub-block.
+ * After interlacing the 1st and 2nd parity sub-blocks we can have 0, 1
+ * or 2 padding bytes each time we make a step of 2 * R_SUBBLOCK bytes
+ * (moving to another column). 2nd parity sub-block uses the same
+ * inter-column permutation pattern as the systematic and 1st parity
+ * sub-blocks but it adds '1' to the resulting index and calculates the
+ * modulus of the result and Kw. Last column is mapped to itself (id 31)
+ * so the first byte taken from the 2nd parity sub-block will be the
+ * 32nd (31+1) byte, then 64th etc. (step is C_SUBBLOCK == 32) and the
+ * last byte will be the first byte from the sub-block:
+ * (32 + 32 * (R_SUBBLOCK-1)) % Kw == Kw % Kw == 0. Nd can't be smaller
+ * than 4 so we know that bytes with ids 0, 1, 2 and 3 must be the
+ * padding bytes. The bytes from the 1st parity sub-block are the bytes
+ * from the 31st column - Nd can't be greater than 28 so we are sure
+ * that there are no padding bytes in 31st column.
+ */
+ rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock - 1);
+}
+
+static inline void
+move_padding_bytes(const uint8_t *in, uint8_t *out, uint16_t k,
+ uint16_t ncb)
+{
+ uint16_t d = k + 4;
+ uint16_t kpi = ncb / 3;
+ uint16_t nd = kpi - d;
+
+ rte_memcpy(&out[nd], in, d);
+ rte_memcpy(&out[nd + kpi + 64], &in[kpi], d);
+ rte_memcpy(&out[(nd - 1) + 2 * (kpi + 64)], &in[2 * kpi], d);
+}
+
+static inline void
+process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
+ uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in,
+ struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset,
+ bool check_crc_24b, uint16_t crc24_overlap, uint16_t total_left)
+{
+ int ret;
+ int32_t k_idx;
+ int32_t iter_cnt;
+ uint8_t *in, *out, *adapter_input;
+ int32_t ncb, ncb_without_null;
+ struct bblib_turbo_adapter_ul_response adapter_resp;
+ struct bblib_turbo_adapter_ul_request adapter_req;
+ struct bblib_turbo_decoder_request turbo_req;
+ struct bblib_turbo_decoder_response turbo_resp;
+ struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
+
+ k_idx = compute_idx(k);
+
+ ret = is_dec_input_valid(k_idx, kw, total_left);
+ if (ret != 0) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+
+ in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
+ ncb = kw;
+ ncb_without_null = (k + 4) * 3;
+
+ if (check_bit(dec->op_flags, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE)) {
+ struct bblib_deinterleave_ul_request deint_req;
+ struct bblib_deinterleave_ul_response deint_resp;
+
+ /* SW decoder accepts only a circular buffer without NULL bytes
+ * so the input needs to be converted.
+ */
+ remove_nulls_from_circular_buf(in, q->deint_input, k, ncb);
+
+ deint_req.pharqbuffer = q->deint_input;
+ deint_req.ncb = ncb_without_null;
+ deint_resp.pinteleavebuffer = q->deint_output;
+ bblib_deinterleave_ul(&deint_req, &deint_resp);
+ } else
+ move_padding_bytes(in, q->deint_output, k, ncb);
+
+ adapter_input = q->deint_output;
+
+ if (dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN)
+ adapter_req.isinverted = 1;
+ else if (dec->op_flags & RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN)
+ adapter_req.isinverted = 0;
+ else {
+ op->status |= 1 << RTE_BBDEV_DRV_ERROR;
+ rte_bbdev_log(ERR, "LLR format wasn't specified");
+ return;
+ }
+
+ adapter_req.ncb = ncb_without_null;
+ adapter_req.pinteleavebuffer = adapter_input;
+ adapter_resp.pharqout = q->adapter_output;
+ bblib_turbo_adapter_ul(&adapter_req, &adapter_resp);
+
+ out = (uint8_t *)rte_pktmbuf_append(m_out, ((k - crc24_overlap) >> 3));
+ if (out == NULL) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR, "Too little space in output mbuf");
+ return;
+ }
+ /* rte_bbdev_op_data.offset can be different than the offset of the
+ * appended bytes
+ */
+ out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
+ if (check_crc_24b)
+ turbo_req.c = c + 1;
+ else
+ turbo_req.c = c;
+ turbo_req.input = (int8_t *)q->adapter_output;
+ turbo_req.k = k;
+ turbo_req.k_idx = k_idx;
+ turbo_req.max_iter_num = dec->iter_max;
+ turbo_req.early_term_disable = !check_bit(dec->op_flags,
+ RTE_BBDEV_TURBO_EARLY_TERMINATION);
+ turbo_resp.ag_buf = q->ag;
+ turbo_resp.cb_buf = q->code_block;
+ turbo_resp.output = out;
+ iter_cnt = bblib_turbo_decoder(&turbo_req, &turbo_resp);
+ dec->hard_output.length += (k >> 3);
+
+ if (iter_cnt > 0) {
+ /* Temporary solution for returned iter_count from SDK */
+ iter_cnt = (iter_cnt - 1) / 2;
+ dec->iter_count = RTE_MAX(iter_cnt, dec->iter_count);
+ } else {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR, "Turbo Decoder failed");
+ return;
+ }
+}
+
+static inline void
+enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op)
+{
+ uint8_t c, r = 0;
+ uint16_t kw, k = 0;
+ uint16_t crc24_overlap = 0;
+ struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
+ struct rte_mbuf *m_in = dec->input.data;
+ struct rte_mbuf *m_out = dec->hard_output.data;
+ uint16_t in_offset = dec->input.offset;
+ uint16_t total_left = dec->input.length;
+ uint16_t out_offset = dec->hard_output.offset;
+
+ /* Clear op status */
+ op->status = 0;
+
+ if (m_in == NULL || m_out == NULL) {
+ rte_bbdev_log(ERR, "Invalid mbuf pointer");
+ op->status = 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+
+ if (dec->code_block_mode == 0) { /* For Transport Block mode */
+ c = dec->tb_params.c;
+ } else { /* For Code Block mode */
+ k = dec->cb_params.k;
+ c = 1;
+ }
+
+ if ((c > 1) && !check_bit(dec->op_flags,
+ RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP))
+ crc24_overlap = 24;
+
+ while (total_left > 0) {
+ if (dec->code_block_mode == 0)
+ k = (r < dec->tb_params.c_neg) ?
+ dec->tb_params.k_neg : dec->tb_params.k_pos;
+
+ /* Calculates circular buffer size (Kw).
+ * According to 3gpp 36.212 section 5.1.4.2
+ * Kw = 3 * Kpi,
+ * where:
+ * Kpi = nCol * nRow
+ * where nCol is 32 and nRow can be calculated from:
+ * D =< nCol * nRow
+ * where D is the size of each output from turbo encoder block
+ * (k + 4).
+ */
+ kw = RTE_ALIGN_CEIL(k + 4, RTE_BBDEV_C_SUBBLOCK) * 3;
+
+ process_dec_cb(q, op, c, k, kw, m_in, m_out, in_offset,
+ out_offset, check_bit(dec->op_flags,
+ RTE_BBDEV_TURBO_CRC_TYPE_24B), crc24_overlap,
+ total_left);
+ /* To keep CRC24 attached to end of Code block, use
+ * RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP flag as it
+ * removed by default once verified.
+ */
+
+ /* Update total_left */
+ total_left -= kw;
+ /* Update offsets for next CBs (if exist) */
+ in_offset += kw;
+ out_offset += ((k - crc24_overlap) >> 3);
+ r++;
+ }
+ if (total_left != 0) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR,
+ "Mismatch between mbuf length and included Circular buffer sizes");
+ }
+}
+
+static inline uint16_t
+enqueue_dec_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_dec_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; ++i)
+ enqueue_dec_one_op(q, ops[i]);
+
+ return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
+ NULL);
+}
+
+/* Enqueue burst */
+static uint16_t
+enqueue_enc_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
+{
+ void *queue = q_data->queue_private;
+ struct turbo_sw_queue *q = queue;
+ uint16_t nb_enqueued = 0;
+
+ nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops, &q_data->queue_stats);
+
+ q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
+ q_data->queue_stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+/* Enqueue burst */
+static uint16_t
+enqueue_dec_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
+{
+ void *queue = q_data->queue_private;
+ struct turbo_sw_queue *q = queue;
+ uint16_t nb_enqueued = 0;
+
+ nb_enqueued = enqueue_dec_all_ops(q, ops, nb_ops);
+
+ q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
+ q_data->queue_stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+/* Dequeue decode burst */
+static uint16_t
+dequeue_dec_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
+{
+ struct turbo_sw_queue *q = q_data->queue_private;
+ uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ q_data->queue_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Dequeue encode burst */
+static uint16_t
+dequeue_enc_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
+{
+ struct turbo_sw_queue *q = q_data->queue_private;
+ uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ q_data->queue_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Parse 16bit integer from string argument */
+static inline int
+parse_u16_arg(const char *key, const char *value, void *extra_args)
+{
+ uint16_t *u16 = extra_args;
+ unsigned int long result;
+
+ if ((value == NULL) || (extra_args == NULL))
+ return -EINVAL;
+ errno = 0;
+ result = strtoul(value, NULL, 0);
+ if ((result >= (1 << 16)) || (errno != 0)) {
+ rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key);
+ return -ERANGE;
+ }
+ *u16 = (uint16_t)result;
+ return 0;
+}
+
+/* Parse parameters used to create device */
+static int
+parse_turbo_sw_params(struct turbo_sw_params *params, const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args, turbo_sw_valid_params);
+ if (kvlist == NULL)
+ return -EFAULT;
+
+ ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[0],
+ &parse_u16_arg, &params->queues_num);
+ if (ret < 0)
+ goto exit;
+
+ ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[1],
+ &parse_u16_arg, &params->socket_id);
+ if (ret < 0)
+ goto exit;
+
+ if (params->socket_id >= RTE_MAX_NUMA_NODES) {
+ rte_bbdev_log(ERR, "Invalid socket, must be < %u",
+ RTE_MAX_NUMA_NODES);
+ goto exit;
+ }
+ }
+
+exit:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+/* Create device */
+static int
+turbo_sw_bbdev_create(struct rte_vdev_device *vdev,
+ struct turbo_sw_params *init_params)
+{
+ struct rte_bbdev *bbdev;
+ const char *name = rte_vdev_device_name(vdev);
+
+ bbdev = rte_bbdev_allocate(name);
+ if (bbdev == NULL)
+ return -ENODEV;
+
+ bbdev->data->dev_private = rte_zmalloc_socket(name,
+ sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE,
+ init_params->socket_id);
+ if (bbdev->data->dev_private == NULL) {
+ rte_bbdev_release(bbdev);
+ return -ENOMEM;
+ }
+
+ bbdev->dev_ops = &pmd_ops;
+ bbdev->device = &vdev->device;
+ bbdev->data->socket_id = init_params->socket_id;
+ bbdev->intr_handle = NULL;
+
+ /* register rx/tx burst functions for data path */
+ bbdev->dequeue_enc_ops = dequeue_enc_ops;
+ bbdev->dequeue_dec_ops = dequeue_dec_ops;
+ bbdev->enqueue_enc_ops = enqueue_enc_ops;
+ bbdev->enqueue_dec_ops = enqueue_dec_ops;
+ ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues =
+ init_params->queues_num;
+
+ return 0;
+}
+
+/* Initialise device */
+static int
+turbo_sw_bbdev_probe(struct rte_vdev_device *vdev)
+{
+ struct turbo_sw_params init_params = {
+ rte_socket_id(),
+ RTE_BBDEV_DEFAULT_MAX_NB_QUEUES
+ };
+ const char *name;
+ const char *input_args;
+
+ if (vdev == NULL)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+ parse_turbo_sw_params(&init_params, input_args);
+
+ rte_bbdev_log_debug(
+ "Initialising %s on NUMA node %d with max queues: %d\n",
+ name, init_params.socket_id, init_params.queues_num);
+
+ return turbo_sw_bbdev_create(vdev, &init_params);
+}
+
+/* Uninitialise device */
+static int
+turbo_sw_bbdev_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_bbdev *bbdev;
+ const char *name;
+
+ if (vdev == NULL)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ bbdev = rte_bbdev_get_named_dev(name);
+ if (bbdev == NULL)
+ return -EINVAL;
+
+ rte_free(bbdev->data->dev_private);
+
+ return rte_bbdev_release(bbdev);
+}
+
+static struct rte_vdev_driver bbdev_turbo_sw_pmd_drv = {
+ .probe = turbo_sw_bbdev_probe,
+ .remove = turbo_sw_bbdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_turbo_sw_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
+ TURBO_SW_MAX_NB_QUEUES_ARG"=<int> "
+ TURBO_SW_SOCKET_ID_ARG"=<int>");
+RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, turbo_sw);
+
+RTE_INIT(turbo_sw_bbdev_init_log)
+{
+ bbdev_turbo_sw_logtype = rte_log_register("pmd.bb.turbo_sw");
+ if (bbdev_turbo_sw_logtype >= 0)
+ rte_log_set_level(bbdev_turbo_sw_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map b/src/spdk/dpdk/drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map
new file mode 100644
index 00000000..58b94270
--- /dev/null
+++ b/src/spdk/dpdk/drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map
@@ -0,0 +1,3 @@
+DPDK_18.02 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/bus/Makefile b/src/spdk/dpdk/drivers/bus/Makefile
new file mode 100644
index 00000000..cea3b55e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += dpaa
+ifeq ($(CONFIG_RTE_EAL_VFIO),y)
+DIRS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga
+DIRS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci
+DIRS-$(CONFIG_RTE_LIBRTE_VDEV_BUS) += vdev
+DIRS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/Makefile b/src/spdk/dpdk/drivers/bus/dpaa/Makefile
new file mode 100644
index 00000000..bffaa9d9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/Makefile
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+RTE_BUS_DPAA=$(RTE_SDK)/drivers/bus/dpaa
+
+#
+# library name
+#
+LIB = librte_bus_dpaa.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -Wno-cast-qual
+CFLAGS += -D _GNU_SOURCE
+CFLAGS += -I$(RTE_BUS_DPAA)/
+CFLAGS += -I$(RTE_BUS_DPAA)/include
+CFLAGS += -I$(RTE_BUS_DPAA)/base/qbman
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+
+# versioning export map
+EXPORT_MAP := rte_bus_dpaa_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
+ dpaa_bus.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
+ base/fman/fman.c \
+ base/fman/fman_hw.c \
+ base/fman/of.c \
+ base/fman/netcfg_layer.c \
+ base/qbman/process.c \
+ base/qbman/bman.c \
+ base/qbman/bman_driver.c \
+ base/qbman/qman.c \
+ base/qbman/qman_driver.c \
+ base/qbman/dpaa_alloc.c \
+ base/qbman/dpaa_sys.c
+
+# Link Pthread
+LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman.c b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman.c
new file mode 100644
index 00000000..bdb70042
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman.c
@@ -0,0 +1,581 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <ifaddrs.h>
+
+/* This header declares the driver interface we implement */
+#include <fman.h>
+#include <of.h>
+#include <rte_dpaa_logs.h>
+
+#define QMI_PORT_REGS_OFFSET 0x400
+
+/* CCSR map address to access ccsr based register */
+void *fman_ccsr_map;
+/* fman version info */
+u16 fman_ip_rev;
+static int get_once;
+u32 fman_dealloc_bufs_mask_hi;
+u32 fman_dealloc_bufs_mask_lo;
+
+int fman_ccsr_map_fd = -1;
+static COMPAT_LIST_HEAD(__ifs);
+
+/* This is the (const) global variable that callers have read-only access to.
+ * Internally, we have read-write access directly to __ifs.
+ */
+const struct list_head *fman_if_list = &__ifs;
+
+static void
+if_destructor(struct __fman_if *__if)
+{
+ struct fman_if_bpool *bp, *tmpbp;
+
+ if (!__if)
+ return;
+
+ if (__if->__if.mac_type == fman_offline)
+ goto cleanup;
+
+ list_for_each_entry_safe(bp, tmpbp, &__if->__if.bpool_list, node) {
+ list_del(&bp->node);
+ free(bp);
+ }
+cleanup:
+ free(__if);
+}
+
+static int
+fman_get_ip_rev(const struct device_node *fman_node)
+{
+ const uint32_t *fman_addr;
+ uint64_t phys_addr;
+ uint64_t regs_size;
+ uint32_t ip_rev_1;
+ int _errno;
+
+ fman_addr = of_get_address(fman_node, 0, &regs_size, NULL);
+ if (!fman_addr) {
+ pr_err("of_get_address cannot return fman address\n");
+ return -EINVAL;
+ }
+ phys_addr = of_translate_address(fman_node, fman_addr);
+ if (!phys_addr) {
+ pr_err("of_translate_address failed\n");
+ return -EINVAL;
+ }
+ fman_ccsr_map = mmap(NULL, regs_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fman_ccsr_map_fd, phys_addr);
+ if (fman_ccsr_map == MAP_FAILED) {
+ pr_err("Can not map FMan ccsr base");
+ return -EINVAL;
+ }
+
+ ip_rev_1 = in_be32(fman_ccsr_map + FMAN_IP_REV_1);
+ fman_ip_rev = (ip_rev_1 & FMAN_IP_REV_1_MAJOR_MASK) >>
+ FMAN_IP_REV_1_MAJOR_SHIFT;
+
+ _errno = munmap(fman_ccsr_map, regs_size);
+ if (_errno)
+ pr_err("munmap() of FMan ccsr failed");
+
+ return 0;
+}
+
+static int
+fman_get_mac_index(uint64_t regs_addr_host, uint8_t *mac_idx)
+{
+ int ret = 0;
+
+ /*
+ * MAC1 : E_0000h
+ * MAC2 : E_2000h
+ * MAC3 : E_4000h
+ * MAC4 : E_6000h
+ * MAC5 : E_8000h
+ * MAC6 : E_A000h
+ * MAC7 : E_C000h
+ * MAC8 : E_E000h
+ * MAC9 : F_0000h
+ * MAC10: F_2000h
+ */
+ switch (regs_addr_host) {
+ case 0xE0000:
+ *mac_idx = 1;
+ break;
+ case 0xE2000:
+ *mac_idx = 2;
+ break;
+ case 0xE4000:
+ *mac_idx = 3;
+ break;
+ case 0xE6000:
+ *mac_idx = 4;
+ break;
+ case 0xE8000:
+ *mac_idx = 5;
+ break;
+ case 0xEA000:
+ *mac_idx = 6;
+ break;
+ case 0xEC000:
+ *mac_idx = 7;
+ break;
+ case 0xEE000:
+ *mac_idx = 8;
+ break;
+ case 0xF0000:
+ *mac_idx = 9;
+ break;
+ case 0xF2000:
+ *mac_idx = 10;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+fman_if_init(const struct device_node *dpa_node)
+{
+ const char *rprop, *mprop;
+ uint64_t phys_addr;
+ struct __fman_if *__if;
+ struct fman_if_bpool *bpool;
+
+ const phandle *mac_phandle, *ports_phandle, *pools_phandle;
+ const phandle *tx_channel_id = NULL, *mac_addr, *cell_idx;
+ const phandle *rx_phandle, *tx_phandle;
+ uint64_t tx_phandle_host[4] = {0};
+ uint64_t rx_phandle_host[4] = {0};
+ uint64_t regs_addr_host = 0;
+ uint64_t cell_idx_host = 0;
+
+ const struct device_node *mac_node = NULL, *tx_node;
+ const struct device_node *pool_node, *fman_node, *rx_node;
+ const uint32_t *regs_addr = NULL;
+ const char *mname, *fname;
+ const char *dname = dpa_node->full_name;
+ size_t lenp;
+ int _errno;
+ const char *char_prop;
+ uint32_t na;
+
+ if (of_device_is_available(dpa_node) == false)
+ return 0;
+
+ rprop = "fsl,qman-frame-queues-rx";
+ mprop = "fsl,fman-mac";
+
+ /* Allocate an object for this network interface */
+ __if = malloc(sizeof(*__if));
+ if (!__if) {
+ FMAN_ERR(-ENOMEM, "malloc(%zu)\n", sizeof(*__if));
+ goto err;
+ }
+ memset(__if, 0, sizeof(*__if));
+ INIT_LIST_HEAD(&__if->__if.bpool_list);
+ strncpy(__if->node_path, dpa_node->full_name, PATH_MAX - 1);
+ __if->node_path[PATH_MAX - 1] = '\0';
+
+ /* Obtain the MAC node used by this interface except macless */
+ mac_phandle = of_get_property(dpa_node, mprop, &lenp);
+ if (!mac_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no %s\n", dname, mprop);
+ goto err;
+ }
+ assert(lenp == sizeof(phandle));
+ mac_node = of_find_node_by_phandle(*mac_phandle);
+ if (!mac_node) {
+ FMAN_ERR(-ENXIO, "%s: bad 'fsl,fman-mac\n", dname);
+ goto err;
+ }
+ mname = mac_node->full_name;
+
+ /* Map the CCSR regs for the MAC node */
+ regs_addr = of_get_address(mac_node, 0, &__if->regs_size, NULL);
+ if (!regs_addr) {
+ FMAN_ERR(-EINVAL, "of_get_address(%s)\n", mname);
+ goto err;
+ }
+ phys_addr = of_translate_address(mac_node, regs_addr);
+ if (!phys_addr) {
+ FMAN_ERR(-EINVAL, "of_translate_address(%s, %p)\n",
+ mname, regs_addr);
+ goto err;
+ }
+ __if->ccsr_map = mmap(NULL, __if->regs_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ fman_ccsr_map_fd, phys_addr);
+ if (__if->ccsr_map == MAP_FAILED) {
+ FMAN_ERR(-errno, "mmap(0x%"PRIx64")\n", phys_addr);
+ goto err;
+ }
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues). Convert to host byte order */
+ regs_addr_host = of_read_number(regs_addr, na);
+
+
+ /* Get the index of the Fman this i/f belongs to */
+ fman_node = of_get_parent(mac_node);
+ na = of_n_addr_cells(mac_node);
+ if (!fman_node) {
+ FMAN_ERR(-ENXIO, "of_get_parent(%s)\n", mname);
+ goto err;
+ }
+ fname = fman_node->full_name;
+ cell_idx = of_get_property(fman_node, "cell-index", &lenp);
+ if (!cell_idx) {
+ FMAN_ERR(-ENXIO, "%s: no cell-index)\n", fname);
+ goto err;
+ }
+ assert(lenp == sizeof(*cell_idx));
+ cell_idx_host = of_read_number(cell_idx, lenp / sizeof(phandle));
+ __if->__if.fman_idx = cell_idx_host;
+ if (!get_once) {
+ _errno = fman_get_ip_rev(fman_node);
+ if (_errno) {
+ FMAN_ERR(-ENXIO, "%s: ip_rev is not available\n",
+ fname);
+ goto err;
+ }
+ }
+
+ if (fman_ip_rev >= FMAN_V3) {
+ /*
+ * Set A2V, OVOM, EBD bits in contextA to allow external
+ * buffer deallocation by fman.
+ */
+ fman_dealloc_bufs_mask_hi = FMAN_V3_CONTEXTA_EN_A2V |
+ FMAN_V3_CONTEXTA_EN_OVOM;
+ fman_dealloc_bufs_mask_lo = FMAN_V3_CONTEXTA_EN_EBD;
+ } else {
+ fman_dealloc_bufs_mask_hi = 0;
+ fman_dealloc_bufs_mask_lo = 0;
+ }
+ /* Is the MAC node 1G, 10G? */
+ __if->__if.is_memac = 0;
+
+ if (of_device_is_compatible(mac_node, "fsl,fman-1g-mac"))
+ __if->__if.mac_type = fman_mac_1g;
+ else if (of_device_is_compatible(mac_node, "fsl,fman-10g-mac"))
+ __if->__if.mac_type = fman_mac_10g;
+ else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
+ __if->__if.is_memac = 1;
+ char_prop = of_get_property(mac_node, "phy-connection-type",
+ NULL);
+ if (!char_prop) {
+ printf("memac: unknown MII type assuming 1G\n");
+ /* Right now forcing memac to 1g in case of error*/
+ __if->__if.mac_type = fman_mac_1g;
+ } else {
+ if (strstr(char_prop, "sgmii"))
+ __if->__if.mac_type = fman_mac_1g;
+ else if (strstr(char_prop, "rgmii")) {
+ __if->__if.mac_type = fman_mac_1g;
+ __if->__if.is_rgmii = 1;
+ } else if (strstr(char_prop, "xgmii"))
+ __if->__if.mac_type = fman_mac_10g;
+ }
+ } else {
+ FMAN_ERR(-EINVAL, "%s: unknown MAC type\n", mname);
+ goto err;
+ }
+
+ /*
+ * For MAC ports, we cannot rely on cell-index. In
+ * T2080, two of the 10G ports on single FMAN have same
+ * duplicate cell-indexes as the other two 10G ports on
+ * same FMAN. Hence, we now rely upon addresses of the
+ * ports from device tree to deduce the index.
+ */
+
+ _errno = fman_get_mac_index(regs_addr_host, &__if->__if.mac_idx);
+ if (_errno) {
+ FMAN_ERR(-EINVAL, "Invalid register address: %" PRIx64,
+ regs_addr_host);
+ goto err;
+ }
+
+ /* Extract the MAC address for private and shared interfaces */
+ mac_addr = of_get_property(mac_node, "local-mac-address",
+ &lenp);
+ if (!mac_addr) {
+ FMAN_ERR(-EINVAL, "%s: no local-mac-address\n",
+ mname);
+ goto err;
+ }
+ memcpy(&__if->__if.mac_addr, mac_addr, ETHER_ADDR_LEN);
+
+ /* Extract the Tx port (it's the second of the two port handles)
+ * and get its channel ID
+ */
+ ports_phandle = of_get_property(mac_node, "fsl,port-handles",
+ &lenp);
+ if (!ports_phandle)
+ ports_phandle = of_get_property(mac_node, "fsl,fman-ports",
+ &lenp);
+ if (!ports_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,port-handles\n",
+ mname);
+ goto err;
+ }
+ assert(lenp == (2 * sizeof(phandle)));
+ tx_node = of_find_node_by_phandle(ports_phandle[1]);
+ if (!tx_node) {
+ FMAN_ERR(-ENXIO, "%s: bad fsl,port-handle[1]\n", mname);
+ goto err;
+ }
+ /* Extract the channel ID (from tx-port-handle) */
+ tx_channel_id = of_get_property(tx_node, "fsl,qman-channel-id",
+ &lenp);
+ if (!tx_channel_id) {
+ FMAN_ERR(-EINVAL, "%s: no fsl-qman-channel-id\n",
+ tx_node->full_name);
+ goto err;
+ }
+
+ rx_node = of_find_node_by_phandle(ports_phandle[0]);
+ if (!rx_node) {
+ FMAN_ERR(-ENXIO, "%s: bad fsl,port-handle[0]\n", mname);
+ goto err;
+ }
+ regs_addr = of_get_address(rx_node, 0, &__if->regs_size, NULL);
+ if (!regs_addr) {
+ FMAN_ERR(-EINVAL, "of_get_address(%s)\n", mname);
+ goto err;
+ }
+ phys_addr = of_translate_address(rx_node, regs_addr);
+ if (!phys_addr) {
+ FMAN_ERR(-EINVAL, "of_translate_address(%s, %p)\n",
+ mname, regs_addr);
+ goto err;
+ }
+ __if->bmi_map = mmap(NULL, __if->regs_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ fman_ccsr_map_fd, phys_addr);
+ if (__if->bmi_map == MAP_FAILED) {
+ FMAN_ERR(-errno, "mmap(0x%"PRIx64")\n", phys_addr);
+ goto err;
+ }
+
+ /* No channel ID for MAC-less */
+ assert(lenp == sizeof(*tx_channel_id));
+ na = of_n_addr_cells(mac_node);
+ __if->__if.tx_channel_id = of_read_number(tx_channel_id, na);
+
+ /* Extract the Rx FQIDs. (Note, the device representation is silly,
+ * there are "counts" that must always be 1.)
+ */
+ rx_phandle = of_get_property(dpa_node, rprop, &lenp);
+ if (!rx_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,qman-frame-queues-rx\n", dname);
+ goto err;
+ }
+
+ assert(lenp == (4 * sizeof(phandle)));
+
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues). Convert to host byte order */
+ rx_phandle_host[0] = of_read_number(&rx_phandle[0], na);
+ rx_phandle_host[1] = of_read_number(&rx_phandle[1], na);
+ rx_phandle_host[2] = of_read_number(&rx_phandle[2], na);
+ rx_phandle_host[3] = of_read_number(&rx_phandle[3], na);
+
+ assert((rx_phandle_host[1] == 1) && (rx_phandle_host[3] == 1));
+ __if->__if.fqid_rx_err = rx_phandle_host[0];
+ __if->__if.fqid_rx_def = rx_phandle_host[2];
+
+ /* Extract the Tx FQIDs */
+ tx_phandle = of_get_property(dpa_node,
+ "fsl,qman-frame-queues-tx", &lenp);
+ if (!tx_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,qman-frame-queues-tx\n", dname);
+ goto err;
+ }
+
+ assert(lenp == (4 * sizeof(phandle)));
+ /*TODO: Fix for other cases also */
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues). Convert to host byte order */
+ tx_phandle_host[0] = of_read_number(&tx_phandle[0], na);
+ tx_phandle_host[1] = of_read_number(&tx_phandle[1], na);
+ tx_phandle_host[2] = of_read_number(&tx_phandle[2], na);
+ tx_phandle_host[3] = of_read_number(&tx_phandle[3], na);
+ assert((tx_phandle_host[1] == 1) && (tx_phandle_host[3] == 1));
+ __if->__if.fqid_tx_err = tx_phandle_host[0];
+ __if->__if.fqid_tx_confirm = tx_phandle_host[2];
+
+ /* Obtain the buffer pool nodes used by this interface */
+ pools_phandle = of_get_property(dpa_node, "fsl,bman-buffer-pools",
+ &lenp);
+ if (!pools_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,bman-buffer-pools\n", dname);
+ goto err;
+ }
+ /* For each pool, parse the corresponding node and add a pool object
+ * to the interface's "bpool_list"
+ */
+ assert(lenp && !(lenp % sizeof(phandle)));
+ while (lenp) {
+ size_t proplen;
+ const phandle *prop;
+ uint64_t bpid_host = 0;
+ uint64_t bpool_host[6] = {0};
+ const char *pname;
+ /* Allocate an object for the pool */
+ bpool = malloc(sizeof(*bpool));
+ if (!bpool) {
+ FMAN_ERR(-ENOMEM, "malloc(%zu)\n", sizeof(*bpool));
+ goto err;
+ }
+ /* Find the pool node */
+ pool_node = of_find_node_by_phandle(*pools_phandle);
+ if (!pool_node) {
+ FMAN_ERR(-ENXIO, "%s: bad fsl,bman-buffer-pools\n",
+ dname);
+ free(bpool);
+ goto err;
+ }
+ pname = pool_node->full_name;
+ /* Extract the BPID property */
+ prop = of_get_property(pool_node, "fsl,bpid", &proplen);
+ if (!prop) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,bpid\n", pname);
+ free(bpool);
+ goto err;
+ }
+ assert(proplen == sizeof(*prop));
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues).
+ * Convert to host byte-order
+ */
+ bpid_host = of_read_number(prop, na);
+ bpool->bpid = bpid_host;
+ /* Extract the cfg property (count/size/addr). "fsl,bpool-cfg"
+ * indicates for the Bman driver to seed the pool.
+ * "fsl,bpool-ethernet-cfg" is used by the network driver. The
+ * two are mutually exclusive, so check for either of them.
+ */
+ prop = of_get_property(pool_node, "fsl,bpool-cfg",
+ &proplen);
+ if (!prop)
+ prop = of_get_property(pool_node,
+ "fsl,bpool-ethernet-cfg",
+ &proplen);
+ if (!prop) {
+ /* It's OK for there to be no bpool-cfg */
+ bpool->count = bpool->size = bpool->addr = 0;
+ } else {
+ assert(proplen == (6 * sizeof(*prop)));
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues).
+ * Convert to host byte order
+ */
+ bpool_host[0] = of_read_number(&prop[0], na);
+ bpool_host[1] = of_read_number(&prop[1], na);
+ bpool_host[2] = of_read_number(&prop[2], na);
+ bpool_host[3] = of_read_number(&prop[3], na);
+ bpool_host[4] = of_read_number(&prop[4], na);
+ bpool_host[5] = of_read_number(&prop[5], na);
+
+ bpool->count = ((uint64_t)bpool_host[0] << 32) |
+ bpool_host[1];
+ bpool->size = ((uint64_t)bpool_host[2] << 32) |
+ bpool_host[3];
+ bpool->addr = ((uint64_t)bpool_host[4] << 32) |
+ bpool_host[5];
+ }
+ /* Parsing of the pool is complete, add it to the interface
+ * list.
+ */
+ list_add_tail(&bpool->node, &__if->__if.bpool_list);
+ lenp -= sizeof(phandle);
+ pools_phandle++;
+ }
+
+ /* Parsing of the network interface is complete, add it to the list */
+ DPAA_BUS_LOG(DEBUG, "Found %s, Tx Channel = %x, FMAN = %x,"
+ "Port ID = %x",
+ dname, __if->__if.tx_channel_id, __if->__if.fman_idx,
+ __if->__if.mac_idx);
+
+ list_add_tail(&__if->__if.node, &__ifs);
+ return 0;
+err:
+ if_destructor(__if);
+ return _errno;
+}
+
+int
+fman_init(void)
+{
+ const struct device_node *dpa_node;
+ int _errno;
+
+ /* If multiple dependencies try to initialise the Fman driver, don't
+ * panic.
+ */
+ if (fman_ccsr_map_fd != -1)
+ return 0;
+
+ fman_ccsr_map_fd = open(FMAN_DEVICE_PATH, O_RDWR);
+ if (unlikely(fman_ccsr_map_fd < 0)) {
+ DPAA_BUS_LOG(ERR, "Unable to open (/dev/mem)");
+ return fman_ccsr_map_fd;
+ }
+
+ for_each_compatible_node(dpa_node, NULL, "fsl,dpa-ethernet-init") {
+ _errno = fman_if_init(dpa_node);
+ if (_errno) {
+ FMAN_ERR(_errno, "if_init(%s)\n", dpa_node->full_name);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ fman_finish();
+ return _errno;
+}
+
+void
+fman_finish(void)
+{
+ struct __fman_if *__if, *tmpif;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ list_for_each_entry_safe(__if, tmpif, &__ifs, __if.node) {
+ int _errno;
+
+ /* disable Rx and Tx */
+ if ((__if->__if.mac_type == fman_mac_1g) &&
+ (!__if->__if.is_memac))
+ out_be32(__if->ccsr_map + 0x100,
+ in_be32(__if->ccsr_map + 0x100) & ~(u32)0x5);
+ else
+ out_be32(__if->ccsr_map + 8,
+ in_be32(__if->ccsr_map + 8) & ~(u32)3);
+ /* release the mapping */
+ _errno = munmap(__if->ccsr_map, __if->regs_size);
+ if (unlikely(_errno < 0))
+ fprintf(stderr, "%s:%d:%s(): munmap() = %d (%s)\n",
+ __FILE__, __LINE__, __func__,
+ -errno, strerror(errno));
+ printf("Tearing down %s\n", __if->node_path);
+ list_del(&__if->__if.node);
+ free(__if);
+ }
+
+ close(fman_ccsr_map_fd);
+ fman_ccsr_map_fd = -1;
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c
new file mode 100644
index 00000000..4ebbc3d3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -0,0 +1,611 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <ifaddrs.h>
+#include <fman.h>
+/* This header declares things about Fman hardware itself (the format of status
+ * words and an inline implementation of CRC64). We include it only in order to
+ * instantiate the one global variable it depends on.
+ */
+#include <fsl_fman.h>
+#include <fsl_fman_crc64.h>
+#include <fsl_bman.h>
+
+#define FMAN_SP_SG_DISABLE 0x80000000
+#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
+
+/* Instantiate the global variable that the inline CRC64 implementation (in
+ * <fsl_fman.h>) depends on.
+ */
+DECLARE_FMAN_CRC64_TABLE();
+
+#define ETH_ADDR_TO_UINT64(eth_addr) \
+ (uint64_t)(((uint64_t)(eth_addr)[0] << 40) | \
+ ((uint64_t)(eth_addr)[1] << 32) | \
+ ((uint64_t)(eth_addr)[2] << 24) | \
+ ((uint64_t)(eth_addr)[3] << 16) | \
+ ((uint64_t)(eth_addr)[4] << 8) | \
+ ((uint64_t)(eth_addr)[5]))
+
+void
+fman_if_set_mcast_filter_table(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *hashtable_ctrl;
+ uint32_t i;
+
+ hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+ for (i = 0; i < 64; i++)
+ out_be32(hashtable_ctrl, i|HASH_CTRL_MCAST_EN);
+}
+
+void
+fman_if_reset_mcast_filter_table(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *hashtable_ctrl;
+ uint32_t i;
+
+ hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+ for (i = 0; i < 64; i++)
+ out_be32(hashtable_ctrl, i & ~HASH_CTRL_MCAST_EN);
+}
+
+static
+uint32_t get_mac_hash_code(uint64_t eth_addr)
+{
+ uint64_t mask1, mask2;
+ uint32_t xorVal = 0;
+ uint8_t i, j;
+
+ for (i = 0; i < 6; i++) {
+ mask1 = eth_addr & (uint64_t)0x01;
+ eth_addr >>= 1;
+
+ for (j = 0; j < 7; j++) {
+ mask2 = eth_addr & (uint64_t)0x01;
+ mask1 ^= mask2;
+ eth_addr >>= 1;
+ }
+
+ xorVal |= (mask1 << (5 - i));
+ }
+
+ return xorVal;
+}
+
+int
+fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth)
+{
+ uint64_t eth_addr;
+ void *hashtable_ctrl;
+ uint32_t hash;
+
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ eth_addr = ETH_ADDR_TO_UINT64(eth);
+
+ if (!(eth_addr & GROUP_ADDRESS))
+ return -1;
+
+ hash = get_mac_hash_code(eth_addr) & HASH_CTRL_ADDR_MASK;
+ hash = hash | HASH_CTRL_MCAST_EN;
+
+ hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+ out_be32(hashtable_ctrl, hash);
+
+ return 0;
+}
+
+int
+fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *mac_reg =
+ &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_l;
+ u32 val = in_be32(mac_reg);
+
+ eth[0] = (val & 0x000000ff) >> 0;
+ eth[1] = (val & 0x0000ff00) >> 8;
+ eth[2] = (val & 0x00ff0000) >> 16;
+ eth[3] = (val & 0xff000000) >> 24;
+
+ mac_reg = &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_u;
+ val = in_be32(mac_reg);
+
+ eth[4] = (val & 0x000000ff) >> 0;
+ eth[5] = (val & 0x0000ff00) >> 8;
+
+ return 0;
+}
+
+void
+fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ void *reg;
+
+ if (addr_num) {
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_l;
+ out_be32(reg, 0x0);
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_u;
+ out_be32(reg, 0x0);
+ } else {
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_l;
+ out_be32(reg, 0x0);
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_u;
+ out_be32(reg, 0x0);
+ }
+}
+
+int
+fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+
+ void *reg;
+ u32 val;
+
+ memcpy(&m->__if.mac_addr, eth, ETHER_ADDR_LEN);
+
+ if (addr_num)
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_l;
+ else
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_l;
+
+ val = (m->__if.mac_addr.addr_bytes[0] |
+ (m->__if.mac_addr.addr_bytes[1] << 8) |
+ (m->__if.mac_addr.addr_bytes[2] << 16) |
+ (m->__if.mac_addr.addr_bytes[3] << 24));
+ out_be32(reg, val);
+
+ if (addr_num)
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_u;
+ else
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_u;
+
+ val = ((m->__if.mac_addr.addr_bytes[4] << 0) |
+ (m->__if.mac_addr.addr_bytes[5] << 8));
+ out_be32(reg, val);
+
+ return 0;
+}
+
+void
+fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ u32 value = 0;
+ void *cmdcfg;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Set Rx Ignore Pause Frames */
+ cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
+ if (enable)
+ value = in_be32(cmdcfg) | CMD_CFG_PAUSE_IGNORE;
+ else
+ value = in_be32(cmdcfg) & ~CMD_CFG_PAUSE_IGNORE;
+
+ out_be32(cmdcfg, value);
+}
+
+void
+fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ unsigned int *maxfrm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Set Max frame length */
+ maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
+ out_be32(maxfrm, (MAXFRM_RX_MASK & max_frame_len));
+}
+
+void
+fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ struct memac_regs *regs = m->ccsr_map;
+
+ /* read recved packet count */
+ stats->ipackets = ((u64)in_be32(&regs->rfrm_u)) << 32 |
+ in_be32(&regs->rfrm_l);
+ stats->ibytes = ((u64)in_be32(&regs->roct_u)) << 32 |
+ in_be32(&regs->roct_l);
+ stats->ierrors = ((u64)in_be32(&regs->rerr_u)) << 32 |
+ in_be32(&regs->rerr_l);
+
+ /* read xmited packet count */
+ stats->opackets = ((u64)in_be32(&regs->tfrm_u)) << 32 |
+ in_be32(&regs->tfrm_l);
+ stats->obytes = ((u64)in_be32(&regs->toct_u)) << 32 |
+ in_be32(&regs->toct_l);
+ stats->oerrors = ((u64)in_be32(&regs->terr_u)) << 32 |
+ in_be32(&regs->terr_l);
+}
+
+void
+fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ struct memac_regs *regs = m->ccsr_map;
+ int i;
+ uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
+
+ for (i = 0; i < n; i++)
+ value[i] = ((u64)in_be32((char *)regs
+ + base_offset + 8 * i + 4)) << 32 |
+ ((u64)in_be32((char *)regs
+ + base_offset + 8 * i));
+}
+
+void
+fman_if_stats_reset(struct fman_if *p)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ struct memac_regs *regs = m->ccsr_map;
+ uint32_t tmp;
+
+ tmp = in_be32(&regs->statn_config);
+
+ tmp |= STATS_CFG_CLR;
+
+ out_be32(&regs->statn_config, tmp);
+
+ while (in_be32(&regs->statn_config) & STATS_CFG_CLR)
+ ;
+}
+
+void
+fman_if_promiscuous_enable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *cmdcfg;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Enable Rx promiscuous mode */
+ cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) | CMD_CFG_PROMIS_EN);
+}
+
+void
+fman_if_promiscuous_disable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *cmdcfg;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Disable Rx promiscuous mode */
+ cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) & (~CMD_CFG_PROMIS_EN));
+}
+
+void
+fman_if_enable_rx(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* enable Rx and Tx */
+ out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) | 3);
+}
+
+void
+fman_if_disable_rx(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* only disable Rx, not Tx */
+ out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) & ~(u32)2);
+}
+
+void
+fman_if_loopback_enable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Enable loopback mode */
+ if ((__if->__if.is_memac) && (__if->__if.is_rgmii)) {
+ unsigned int *ifmode =
+ &((struct memac_regs *)__if->ccsr_map)->if_mode;
+ out_be32(ifmode, in_be32(ifmode) | IF_MODE_RLP);
+ } else{
+ unsigned int *cmdcfg =
+ &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) | CMD_CFG_LOOPBACK_EN);
+ }
+}
+
+void
+fman_if_loopback_disable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+ /* Disable loopback mode */
+ if ((__if->__if.is_memac) && (__if->__if.is_rgmii)) {
+ unsigned int *ifmode =
+ &((struct memac_regs *)__if->ccsr_map)->if_mode;
+ out_be32(ifmode, in_be32(ifmode) & ~IF_MODE_RLP);
+ } else {
+ unsigned int *cmdcfg =
+ &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) & ~CMD_CFG_LOOPBACK_EN);
+ }
+}
+
+void
+fman_if_set_bp(struct fman_if *fm_if, unsigned num __always_unused,
+ int bpid, size_t bufsize)
+{
+ u32 fmbm_ebmpi;
+ u32 ebmpi_val_ace = 0xc0000000;
+ u32 ebmpi_mask = 0xffc00000;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_ebmpi =
+ in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ebmpi[0]);
+ fmbm_ebmpi = ebmpi_val_ace | (fmbm_ebmpi & ebmpi_mask) | (bpid << 16) |
+ (bufsize);
+
+ out_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ebmpi[0],
+ fmbm_ebmpi);
+}
+
+int
+fman_if_get_fc_threshold(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_mpd;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
+ return in_be32(fmbm_mpd);
+}
+
+int
+fman_if_set_fc_threshold(struct fman_if *fm_if, u32 high_water,
+ u32 low_water, u32 bpid)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_mpd;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
+ out_be32(fmbm_mpd, FMAN_ENABLE_BPOOL_DEPLETION);
+ return bm_pool_set_hw_threshold(bpid, low_water, high_water);
+
+}
+
+int
+fman_if_get_fc_quanta(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ return in_be32(&((struct memac_regs *)__if->ccsr_map)->pause_quanta[0]);
+}
+
+int
+fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ out_be32(&((struct memac_regs *)__if->ccsr_map)->pause_quanta[0],
+ pause_quanta);
+ return 0;
+}
+
+int
+fman_if_get_fdoff(struct fman_if *fm_if)
+{
+ u32 fmbm_rebm;
+ int fdoff;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
+
+ fdoff = (fmbm_rebm >> FMAN_SP_EXT_BUF_MARG_START_SHIFT) & 0x1ff;
+
+ return fdoff;
+}
+
+void
+fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ unsigned int *fmbm_refqid =
+ &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_refqid;
+ out_be32(fmbm_refqid, err_fqid);
+}
+
+int
+fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ int val = 0;
+ int iceof_mask = 0x001f0000;
+ int icsz_mask = 0x0000001f;
+ int iciof_mask = 0x00000f00;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ unsigned int *fmbm_ricp =
+ &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
+ val = in_be32(fmbm_ricp);
+
+ icp->iceof = (val & iceof_mask) >> 12;
+ icp->iciof = (val & iciof_mask) >> 4;
+ icp->icsz = (val & icsz_mask) << 4;
+
+ return 0;
+}
+
+int
+fman_if_set_ic_params(struct fman_if *fm_if,
+ const struct fman_if_ic_params *icp)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ int val = 0;
+ int iceof_mask = 0x001f0000;
+ int icsz_mask = 0x0000001f;
+ int iciof_mask = 0x00000f00;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ val |= (icp->iceof << 12) & iceof_mask;
+ val |= (icp->iciof << 4) & iciof_mask;
+ val |= (icp->icsz >> 4) & icsz_mask;
+
+ unsigned int *fmbm_ricp =
+ &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
+ out_be32(fmbm_ricp, val);
+
+ return 0;
+}
+
+void
+fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rebm;
+ int val = 0;
+ int fmbm_mask = 0x01ff0000;
+
+ val = fd_offset << FMAN_SP_EXT_BUF_MARG_START_SHIFT;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
+
+ out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
+}
+
+void
+fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *reg_maxfrm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
+
+ out_be32(reg_maxfrm, (in_be32(reg_maxfrm) & 0xFFFF0000) | max_frm);
+}
+
+uint16_t
+fman_if_get_maxfrm(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *reg_maxfrm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
+
+ return (in_be32(reg_maxfrm) | 0x0000FFFF);
+}
+
+/* MSB in fmbm_rebm register
+ * 0 - If BMI cannot store the frame in a single buffer it may select a buffer
+ * of smaller size and store the frame in scatter gather (S/G) buffers
+ * 1 - Scatter gather format is not enabled for frame storage. If BMI cannot
+ * store the frame in a single buffer, the frame is discarded.
+ */
+
+int
+fman_if_get_sg_enable(struct fman_if *fm_if)
+{
+ u32 fmbm_rebm;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
+
+ return (fmbm_rebm & FMAN_SP_SG_DISABLE) ? 0 : 1;
+}
+
+void
+fman_if_set_sg(struct fman_if *fm_if, int enable)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rebm;
+ int val;
+ int fmbm_mask = FMAN_SP_SG_DISABLE;
+
+ if (enable)
+ val = 0;
+ else
+ val = FMAN_SP_SG_DISABLE;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
+
+ out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
+}
+
+void
+fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmqm_pndn;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmqm_pndn = &((struct fman_port_qmi_regs *)__if->qmi_map)->fmqm_pndn;
+
+ out_be32(fmqm_pndn, nia);
+}
+
+void
+fman_if_discard_rx_errors(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rfsdm, *fmbm_rfsem;
+
+ fmbm_rfsem = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsem;
+ out_be32(fmbm_rfsem, 0);
+
+ /* Configure the discard mask to discard the error packets which have
+ * DMA errors, Frame size error, Header error etc. The mask 0x010CE3F0
+ * is to configured discard all the errors which come in the FD[STATUS]
+ */
+ fmbm_rfsdm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsdm;
+ out_be32(fmbm_rfsdm, 0x010CE3F0);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/fman/netcfg_layer.c b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/netcfg_layer.c
new file mode 100644
index 00000000..031c6f1a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/netcfg_layer.c
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#include <inttypes.h>
+#include <of.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <error.h>
+#include <net/if_arp.h>
+#include <assert.h>
+#include <unistd.h>
+
+#include <rte_malloc.h>
+
+#include <rte_dpaa_logs.h>
+#include <netcfg.h>
+
+/* This data structure contaings all configurations information
+ * related to usages of DPA devices.
+ */
+struct netcfg_info *netcfg;
+/* fd to open a socket for making ioctl request to disable/enable shared
+ * interfaces.
+ */
+static int skfd = -1;
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+void
+dump_netcfg(struct netcfg_info *cfg_ptr)
+{
+ int i;
+
+ printf(".......... DPAA Configuration ..........\n\n");
+
+ /* Network interfaces */
+ printf("Network interfaces: %d\n", cfg_ptr->num_ethports);
+ for (i = 0; i < cfg_ptr->num_ethports; i++) {
+ struct fman_if_bpool *bpool;
+ struct fm_eth_port_cfg *p_cfg = &cfg_ptr->port_cfg[i];
+ struct fman_if *__if = p_cfg->fman_if;
+
+ printf("\n+ Fman %d, MAC %d (%s);\n",
+ __if->fman_idx, __if->mac_idx,
+ (__if->mac_type == fman_mac_1g) ? "1G" : "10G");
+
+ printf("\tmac_addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ (&__if->mac_addr)->addr_bytes[0],
+ (&__if->mac_addr)->addr_bytes[1],
+ (&__if->mac_addr)->addr_bytes[2],
+ (&__if->mac_addr)->addr_bytes[3],
+ (&__if->mac_addr)->addr_bytes[4],
+ (&__if->mac_addr)->addr_bytes[5]);
+
+ printf("\ttx_channel_id: 0x%02x\n",
+ __if->tx_channel_id);
+
+ printf("\tfqid_rx_def: 0x%x\n", p_cfg->rx_def);
+ printf("\tfqid_rx_err: 0x%x\n", __if->fqid_rx_err);
+
+ printf("\tfqid_tx_err: 0x%x\n", __if->fqid_tx_err);
+ printf("\tfqid_tx_confirm: 0x%x\n", __if->fqid_tx_confirm);
+ fman_if_for_each_bpool(bpool, __if)
+ printf("\tbuffer pool: (bpid=%d, count=%"PRId64
+ " size=%"PRId64", addr=0x%"PRIx64")\n",
+ bpool->bpid, bpool->count, bpool->size,
+ bpool->addr);
+ }
+}
+#endif /* RTE_LIBRTE_DPAA_DEBUG_DRIVER */
+
+struct netcfg_info *
+netcfg_acquire(void)
+{
+ struct fman_if *__if;
+ int _errno, idx = 0;
+ uint8_t num_ports = 0;
+ uint8_t num_cfg_ports = 0;
+ size_t size;
+
+ /* Extract dpa configuration from fman driver and FMC configuration
+ * for command-line interfaces.
+ */
+
+ /* Open a basic socket to enable/disable shared
+ * interfaces.
+ */
+ skfd = socket(AF_PACKET, SOCK_RAW, 0);
+ if (unlikely(skfd < 0)) {
+ error(0, errno, "%s(): open(SOCK_RAW)", __func__);
+ return NULL;
+ }
+
+ /* Initialise the Fman driver */
+ _errno = fman_init();
+ if (_errno) {
+ DPAA_BUS_LOG(ERR, "FMAN driver init failed (%d)", errno);
+ close(skfd);
+ skfd = -1;
+ return NULL;
+ }
+
+ /* Number of MAC ports */
+ list_for_each_entry(__if, fman_if_list, node)
+ num_ports++;
+
+ if (!num_ports) {
+ DPAA_BUS_LOG(ERR, "FMAN ports not available");
+ return NULL;
+ }
+ /* Allocate space for all enabled mac ports */
+ size = sizeof(*netcfg) +
+ (num_ports * sizeof(struct fm_eth_port_cfg));
+
+ netcfg = calloc(1, size);
+ if (unlikely(netcfg == NULL)) {
+ DPAA_BUS_LOG(ERR, "Unable to allocat mem for netcfg");
+ goto error;
+ }
+
+ netcfg->num_ethports = num_ports;
+
+ list_for_each_entry(__if, fman_if_list, node) {
+ struct fm_eth_port_cfg *cfg = &netcfg->port_cfg[idx];
+ /* Hook in the fman driver interface */
+ cfg->fman_if = __if;
+ cfg->rx_def = __if->fqid_rx_def;
+ num_cfg_ports++;
+ idx++;
+ }
+
+ if (!num_cfg_ports) {
+ DPAA_BUS_LOG(ERR, "No FMAN ports found");
+ goto error;
+ } else if (num_ports != num_cfg_ports)
+ netcfg->num_ethports = num_cfg_ports;
+
+ return netcfg;
+
+error:
+ if (netcfg) {
+ free(netcfg);
+ netcfg = NULL;
+ }
+
+ return NULL;
+}
+
+void
+netcfg_release(struct netcfg_info *cfg_ptr)
+{
+ free(cfg_ptr);
+ /* Close socket for shared interfaces */
+ if (skfd >= 0) {
+ close(skfd);
+ skfd = -1;
+ }
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/fman/of.c b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/of.c
new file mode 100644
index 00000000..a7f3174e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/of.c
@@ -0,0 +1,587 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <of.h>
+#include <rte_dpaa_logs.h>
+
+static int alive;
+static struct dt_dir root_dir;
+static const char *base_dir;
+static COMPAT_LIST_HEAD(linear);
+
+static int
+of_open_dir(const char *relative_path, struct dirent ***d)
+{
+ int ret;
+ char full_path[PATH_MAX];
+
+ snprintf(full_path, PATH_MAX, "%s/%s", base_dir, relative_path);
+ ret = scandir(full_path, d, 0, versionsort);
+ if (ret < 0)
+ DPAA_BUS_LOG(ERR, "Failed to open directory %s",
+ full_path);
+ return ret;
+}
+
+static void
+of_close_dir(struct dirent **d, int num)
+{
+ while (num--)
+ free(d[num]);
+ free(d);
+}
+
+static int
+of_open_file(const char *relative_path)
+{
+ int ret;
+ char full_path[PATH_MAX];
+
+ snprintf(full_path, PATH_MAX, "%s/%s", base_dir, relative_path);
+ ret = open(full_path, O_RDONLY);
+ if (ret < 0)
+ DPAA_BUS_LOG(ERR, "Failed to open directory %s",
+ full_path);
+ return ret;
+}
+
+static void
+process_file(struct dirent *dent, struct dt_dir *parent)
+{
+ int fd;
+ struct dt_file *f = malloc(sizeof(*f));
+
+ if (!f) {
+ DPAA_BUS_LOG(DEBUG, "Unable to allocate memory for file node");
+ return;
+ }
+ f->node.is_file = 1;
+ snprintf(f->node.node.name, NAME_MAX, "%s", dent->d_name);
+ snprintf(f->node.node.full_name, PATH_MAX, "%s/%s",
+ parent->node.node.full_name, dent->d_name);
+ f->parent = parent;
+ fd = of_open_file(f->node.node.full_name);
+ if (fd < 0) {
+ DPAA_BUS_LOG(DEBUG, "Unable to open file node");
+ free(f);
+ return;
+ }
+ f->len = read(fd, f->buf, OF_FILE_BUF_MAX);
+ close(fd);
+ if (f->len < 0) {
+ DPAA_BUS_LOG(DEBUG, "Unable to read file node");
+ free(f);
+ return;
+ }
+ list_add_tail(&f->node.list, &parent->files);
+}
+
+static const struct dt_dir *
+node2dir(const struct device_node *n)
+{
+ struct dt_node *dn = container_of((struct device_node *)n,
+ struct dt_node, node);
+ const struct dt_dir *d = container_of(dn, struct dt_dir, node);
+
+ assert(!dn->is_file);
+ return d;
+}
+
+/* process_dir() calls iterate_dir(), but the latter will also call the former
+ * when recursing into sub-directories, so a predeclaration is needed.
+ */
+static int process_dir(const char *relative_path, struct dt_dir *dt);
+
+static int
+iterate_dir(struct dirent **d, int num, struct dt_dir *dt)
+{
+ int loop;
+ /* Iterate the directory contents */
+ for (loop = 0; loop < num; loop++) {
+ struct dt_dir *subdir;
+ int ret;
+ /* Ignore dot files of all types (especially "..") */
+ if (d[loop]->d_name[0] == '.')
+ continue;
+ switch (d[loop]->d_type) {
+ case DT_REG:
+ process_file(d[loop], dt);
+ break;
+ case DT_DIR:
+ subdir = malloc(sizeof(*subdir));
+ if (!subdir) {
+ perror("malloc");
+ return -ENOMEM;
+ }
+ snprintf(subdir->node.node.name, NAME_MAX, "%s",
+ d[loop]->d_name);
+ snprintf(subdir->node.node.full_name, PATH_MAX,
+ "%s/%s", dt->node.node.full_name,
+ d[loop]->d_name);
+ subdir->parent = dt;
+ ret = process_dir(subdir->node.node.full_name, subdir);
+ if (ret)
+ return ret;
+ list_add_tail(&subdir->node.list, &dt->subdirs);
+ break;
+ default:
+ DPAA_BUS_LOG(DEBUG, "Ignoring invalid dt entry %s/%s",
+ dt->node.node.full_name, d[loop]->d_name);
+ }
+ }
+ return 0;
+}
+
+static int
+process_dir(const char *relative_path, struct dt_dir *dt)
+{
+ struct dirent **d;
+ int ret, num;
+
+ dt->node.is_file = 0;
+ INIT_LIST_HEAD(&dt->subdirs);
+ INIT_LIST_HEAD(&dt->files);
+ ret = of_open_dir(relative_path, &d);
+ if (ret < 0)
+ return ret;
+ num = ret;
+ ret = iterate_dir(d, num, dt);
+ of_close_dir(d, num);
+ return (ret < 0) ? ret : 0;
+}
+
+static void
+linear_dir(struct dt_dir *d)
+{
+ struct dt_file *f;
+ struct dt_dir *dd;
+
+ d->compatible = NULL;
+ d->status = NULL;
+ d->lphandle = NULL;
+ d->a_cells = NULL;
+ d->s_cells = NULL;
+ d->reg = NULL;
+ list_for_each_entry(f, &d->files, node.list) {
+ if (!strcmp(f->node.node.name, "compatible")) {
+ if (d->compatible)
+ DPAA_BUS_LOG(DEBUG, "Duplicate compatible in"
+ " %s", d->node.node.full_name);
+ d->compatible = f;
+ } else if (!strcmp(f->node.node.name, "status")) {
+ if (d->status)
+ DPAA_BUS_LOG(DEBUG, "Duplicate status in %s",
+ d->node.node.full_name);
+ d->status = f;
+ } else if (!strcmp(f->node.node.name, "linux,phandle")) {
+ if (d->lphandle)
+ DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s",
+ d->node.node.full_name);
+ d->lphandle = f;
+ } else if (!strcmp(f->node.node.name, "phandle")) {
+ if (d->lphandle)
+ DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s",
+ d->node.node.full_name);
+ d->lphandle = f;
+ } else if (!strcmp(f->node.node.name, "#address-cells")) {
+ if (d->a_cells)
+ DPAA_BUS_LOG(DEBUG, "Duplicate a_cells in %s",
+ d->node.node.full_name);
+ d->a_cells = f;
+ } else if (!strcmp(f->node.node.name, "#size-cells")) {
+ if (d->s_cells)
+ DPAA_BUS_LOG(DEBUG, "Duplicate s_cells in %s",
+ d->node.node.full_name);
+ d->s_cells = f;
+ } else if (!strcmp(f->node.node.name, "reg")) {
+ if (d->reg)
+ DPAA_BUS_LOG(DEBUG, "Duplicate reg in %s",
+ d->node.node.full_name);
+ d->reg = f;
+ }
+ }
+
+ list_for_each_entry(dd, &d->subdirs, node.list) {
+ list_add_tail(&dd->linear, &linear);
+ linear_dir(dd);
+ }
+}
+
+int
+of_init_path(const char *dt_path)
+{
+ int ret;
+
+ base_dir = dt_path;
+
+ /* This needs to be singleton initialization */
+ DPAA_BUS_HWWARN(alive, "Double-init of device-tree driver!");
+
+ /* Prepare root node (the remaining fields are set in process_dir()) */
+ root_dir.node.node.name[0] = '\0';
+ root_dir.node.node.full_name[0] = '\0';
+ INIT_LIST_HEAD(&root_dir.node.list);
+ root_dir.parent = NULL;
+
+ /* Kick things off... */
+ ret = process_dir("", &root_dir);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "Unable to parse device tree");
+ return ret;
+ }
+
+ /* Now make a flat, linear list of directories */
+ linear_dir(&root_dir);
+ alive = 1;
+ return 0;
+}
+
+static void
+destroy_dir(struct dt_dir *d)
+{
+ struct dt_file *f, *tmpf;
+ struct dt_dir *dd, *tmpd;
+
+ list_for_each_entry_safe(f, tmpf, &d->files, node.list) {
+ list_del(&f->node.list);
+ free(f);
+ }
+ list_for_each_entry_safe(dd, tmpd, &d->subdirs, node.list) {
+ destroy_dir(dd);
+ list_del(&dd->node.list);
+ free(dd);
+ }
+}
+
+void
+of_finish(void)
+{
+ DPAA_BUS_HWWARN(!alive, "Double-finish of device-tree driver!");
+
+ destroy_dir(&root_dir);
+ INIT_LIST_HEAD(&linear);
+ alive = 0;
+}
+
+static const struct dt_dir *
+next_linear(const struct dt_dir *f)
+{
+ if (f->linear.next == &linear)
+ return NULL;
+ return list_entry(f->linear.next, struct dt_dir, linear);
+}
+
+static int
+check_compatible(const struct dt_file *f, const char *compatible)
+{
+ const char *c = (char *)f->buf;
+ unsigned int len, remains = f->len;
+
+ while (remains) {
+ len = strlen(c);
+ if (!strcmp(c, compatible))
+ return 1;
+
+ if (remains < len + 1)
+ break;
+
+ c += (len + 1);
+ remains -= (len + 1);
+ }
+ return 0;
+}
+
+const struct device_node *
+of_find_compatible_node(const struct device_node *from,
+ const char *type __always_unused,
+ const char *compatible)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ if (list_empty(&linear))
+ return NULL;
+ if (!from)
+ d = list_entry(linear.next, struct dt_dir, linear);
+ else
+ d = node2dir(from);
+ for (d = next_linear(d); d && (!d->compatible ||
+ !check_compatible(d->compatible,
+ compatible));
+ d = next_linear(d))
+ ;
+ if (d)
+ return &d->node.node;
+ return NULL;
+}
+
+const void *
+of_get_property(const struct device_node *from, const char *name,
+ size_t *lenp)
+{
+ const struct dt_dir *d;
+ const struct dt_file *f;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ d = node2dir(from);
+ list_for_each_entry(f, &d->files, node.list)
+ if (!strcmp(f->node.node.name, name)) {
+ if (lenp)
+ *lenp = f->len;
+ return f->buf;
+ }
+ return NULL;
+}
+
+bool
+of_device_is_available(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ d = node2dir(dev_node);
+ if (!d->status)
+ return true;
+ if (!strcmp((char *)d->status->buf, "okay"))
+ return true;
+ if (!strcmp((char *)d->status->buf, "ok"))
+ return true;
+ return false;
+}
+
+const struct device_node *
+of_find_node_by_phandle(phandle ph)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ list_for_each_entry(d, &linear, linear)
+ if (d->lphandle && (d->lphandle->len == 4) &&
+ !memcmp(d->lphandle->buf, &ph, 4))
+ return &d->node.node;
+ return NULL;
+}
+
+const struct device_node *
+of_get_parent(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ if (!dev_node)
+ return NULL;
+ d = node2dir(dev_node);
+ if (!d->parent)
+ return NULL;
+ return &d->parent->node.node;
+}
+
+const struct device_node *
+of_get_next_child(const struct device_node *dev_node,
+ const struct device_node *prev)
+{
+ const struct dt_dir *p, *c;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ if (!dev_node)
+ return NULL;
+ p = node2dir(dev_node);
+ if (prev) {
+ c = node2dir(prev);
+ DPAA_BUS_HWWARN((c->parent != p), "Parent/child mismatch");
+ if (c->parent != p)
+ return NULL;
+ if (c->node.list.next == &p->subdirs)
+ /* prev was the last child */
+ return NULL;
+ c = list_entry(c->node.list.next, struct dt_dir, node.list);
+ return &c->node.node;
+ }
+ /* Return first child */
+ if (list_empty(&p->subdirs))
+ return NULL;
+ c = list_entry(p->subdirs.next, struct dt_dir, node.list);
+ return &c->node.node;
+}
+
+uint32_t
+of_n_addr_cells(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised");
+ if (!dev_node)
+ return OF_DEFAULT_NA;
+ d = node2dir(dev_node);
+ while ((d = d->parent))
+ if (d->a_cells) {
+ unsigned char *buf =
+ (unsigned char *)&d->a_cells->buf[0];
+ assert(d->a_cells->len == 4);
+ return ((uint32_t)buf[0] << 24) |
+ ((uint32_t)buf[1] << 16) |
+ ((uint32_t)buf[2] << 8) |
+ (uint32_t)buf[3];
+ }
+ return OF_DEFAULT_NA;
+}
+
+uint32_t
+of_n_size_cells(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ if (!dev_node)
+ return OF_DEFAULT_NA;
+ d = node2dir(dev_node);
+ while ((d = d->parent))
+ if (d->s_cells) {
+ unsigned char *buf =
+ (unsigned char *)&d->s_cells->buf[0];
+ assert(d->s_cells->len == 4);
+ return ((uint32_t)buf[0] << 24) |
+ ((uint32_t)buf[1] << 16) |
+ ((uint32_t)buf[2] << 8) |
+ (uint32_t)buf[3];
+ }
+ return OF_DEFAULT_NS;
+}
+
+const uint32_t *
+of_get_address(const struct device_node *dev_node, size_t idx,
+ uint64_t *size, uint32_t *flags __rte_unused)
+{
+ const struct dt_dir *d;
+ const unsigned char *buf;
+ uint32_t na = of_n_addr_cells(dev_node);
+ uint32_t ns = of_n_size_cells(dev_node);
+
+ if (!dev_node)
+ d = &root_dir;
+ else
+ d = node2dir(dev_node);
+ if (!d->reg)
+ return NULL;
+ assert(d->reg->len % ((na + ns) * 4) == 0);
+ assert(d->reg->len / ((na + ns) * 4) > (unsigned int) idx);
+ buf = (const unsigned char *)&d->reg->buf[0];
+ buf += (na + ns) * idx * 4;
+ if (size)
+ for (*size = 0; ns > 0; ns--, na++)
+ *size = (*size << 32) +
+ (((uint32_t)buf[4 * na] << 24) |
+ ((uint32_t)buf[4 * na + 1] << 16) |
+ ((uint32_t)buf[4 * na + 2] << 8) |
+ (uint32_t)buf[4 * na + 3]);
+ return (const uint32_t *)buf;
+}
+
+uint64_t
+of_translate_address(const struct device_node *dev_node,
+ const uint32_t *addr)
+{
+ uint64_t phys_addr, tmp_addr;
+ const struct device_node *parent;
+ const uint32_t *ranges;
+ size_t rlen;
+ uint32_t na, pna;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ assert(dev_node != NULL);
+
+ na = of_n_addr_cells(dev_node);
+ phys_addr = of_read_number(addr, na);
+
+ dev_node = of_get_parent(dev_node);
+ if (!dev_node)
+ return 0;
+ else if (node2dir(dev_node) == &root_dir)
+ return phys_addr;
+
+ do {
+ pna = of_n_addr_cells(dev_node);
+ parent = of_get_parent(dev_node);
+ if (!parent)
+ return 0;
+
+ ranges = of_get_property(dev_node, "ranges", &rlen);
+ /* "ranges" property is missing. Translation breaks */
+ if (!ranges)
+ return 0;
+ /* "ranges" property is empty. Do 1:1 translation */
+ else if (rlen == 0)
+ continue;
+ else
+ tmp_addr = of_read_number(ranges + na, pna);
+
+ na = pna;
+ dev_node = parent;
+ phys_addr += tmp_addr;
+ } while (node2dir(parent) != &root_dir);
+
+ return phys_addr;
+}
+
+bool
+of_device_is_compatible(const struct device_node *dev_node,
+ const char *compatible)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ if (!dev_node)
+ d = &root_dir;
+ else
+ d = node2dir(dev_node);
+ if (d->compatible && check_compatible(d->compatible, compatible))
+ return true;
+ return false;
+}
+
+static const void *of_get_mac_addr(const struct device_node *np,
+ const char *name)
+{
+ return of_get_property(np, name, NULL);
+}
+
+/**
+ * Search the device tree for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address. If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the device tree, but were not set by U-Boot. For example, the
+ * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
+ * addresses. Some older U-Boots only initialized 'local-mac-address'. In
+ * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
+ * but is all zeros.
+ */
+const void *of_get_mac_address(const struct device_node *np)
+{
+ const void *addr;
+
+ addr = of_get_mac_addr(np, "mac-address");
+ if (addr)
+ return addr;
+
+ addr = of_get_mac_addr(np, "local-mac-address");
+ if (addr)
+ return addr;
+
+ return of_get_mac_addr(np, "address");
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.c
new file mode 100644
index 00000000..8a629073
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.c
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include "bman.h"
+#include <rte_branch_prediction.h>
+
+/* Compilation constants */
+#define RCR_THRESH 2 /* reread h/w CI when running out of space */
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+
+struct bman_portal {
+ struct bm_portal p;
+ /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
+ struct bman_depletion *pools;
+ int thresh_set;
+ unsigned long irq_sources;
+ u32 slowpoll; /* only used when interrupts are off */
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct bm_portal_config *config;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static RTE_DEFINE_PER_LCORE(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+ return &RTE_PER_LCORE(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of
+ * the pool are operating via different portals.
+ */
+struct bman_pool {
+ struct bman_pool_params params;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ atomic_t in_use;
+#endif
+};
+
+static inline
+struct bman_portal *bman_create_portal(struct bman_portal *portal,
+ const struct bm_portal_config *c)
+{
+ struct bm_portal *p;
+ const struct bman_depletion *pools = &c->mask;
+ int ret;
+ u8 bpid = 0;
+
+ p = &portal->p;
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference...
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+ pr_err("Bman RCR initialisation failed\n");
+ return NULL;
+ }
+ if (bm_mc_init(p)) {
+ pr_err("Bman MC initialisation failed\n");
+ goto fail_mc;
+ }
+ portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
+ if (!portal->pools)
+ goto fail_pools;
+ portal->pools[0] = *pools;
+ bman_depletion_init(portal->pools + 1);
+ while (bpid < bman_pool_max) {
+ /*
+ * Default to all BPIDs disabled, we enable as required at
+ * run-time.
+ */
+ bm_isr_bscn_mask(p, bpid, 0);
+ bpid++;
+ }
+ portal->slowpoll = 0;
+ /* Write-to-clear any stale interrupt status bits */
+ bm_isr_disable_write(p, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_isr_enable_write(p, portal->irq_sources);
+ bm_isr_status_clear(p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, NULL, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(p);
+ if (ret) {
+ pr_err("Bman RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = c;
+
+ bm_isr_disable_write(p, 0);
+ bm_isr_uninhibit(p);
+ return portal;
+fail_rcr_empty:
+ free_irq(c->irq, portal);
+fail_irq:
+ kfree(portal->pools);
+fail_pools:
+ bm_mc_finish(p);
+fail_mc:
+ bm_rcr_finish(p);
+ return NULL;
+}
+
+struct bman_portal *
+bman_create_affine_portal(const struct bm_portal_config *c)
+{
+ struct bman_portal *portal = get_affine_portal();
+
+ /*This function is called from the context which is already affine to
+ *CPU or in other words this in non-migratable to other CPUs.
+ */
+ portal = bman_create_portal(portal, c);
+ if (portal) {
+ spin_lock(&affine_mask_lock);
+ CPU_SET(c->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ }
+ return portal;
+}
+
+static inline
+void bman_destroy_portal(struct bman_portal *bm)
+{
+ const struct bm_portal_config *pcfg;
+
+ pcfg = bm->config;
+ bm_rcr_cce_update(&bm->p);
+ bm_rcr_cce_update(&bm->p);
+
+ free_irq(pcfg->irq, bm);
+
+ kfree(bm->pools);
+ bm_mc_finish(&bm->p);
+ bm_rcr_finish(&bm->p);
+ bm->config = NULL;
+}
+
+const struct
+bm_portal_config *bman_destroy_affine_portal(void)
+{
+ struct bman_portal *bm = get_affine_portal();
+ const struct bm_portal_config *pcfg;
+
+ pcfg = bm->config;
+ bman_destroy_portal(bm);
+ spin_lock(&affine_mask_lock);
+ CPU_CLR(pcfg->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ return pcfg;
+}
+
+int
+bman_get_portal_index(void)
+{
+ struct bman_portal *p = get_affine_portal();
+ return p->config->index;
+}
+
+static const u32 zero_thresholds[4] = {0, 0, 0, 0};
+
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
+ int ret = bman_alloc_bpid(&bpid);
+
+ if (ret)
+ return NULL;
+ } else {
+ if (params->bpid >= bman_pool_max)
+ return NULL;
+ bpid = params->bpid;
+ }
+ if (params->flags & BMAN_POOL_FLAG_THRESH) {
+ int ret = bm_pool_set(bpid, params->thresholds);
+
+ if (ret)
+ goto err;
+ }
+
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+ pool->params = *params;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ atomic_set(&pool->in_use, 1);
+#endif
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ pool->params.bpid = bpid;
+
+ return pool;
+err:
+ if (params->flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(bpid, zero_thresholds);
+
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(bpid);
+ kfree(pool);
+
+ return NULL;
+}
+
+void bman_free_pool(struct bman_pool *pool)
+{
+ if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(pool->params.bpid, zero_thresholds);
+ if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(pool->params.bpid);
+ kfree(pool);
+}
+
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
+{
+ return &pool->params;
+}
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+#define BMAN_BUF_MASK 0x0000fffffffffffful
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+ u32 flags __maybe_unused)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ u32 i = num - 1;
+ u8 avail;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
+ return -EINVAL;
+#endif
+
+ p = get_affine_portal();
+ avail = bm_rcr_get_avail(&p->p);
+ if (avail < 2)
+ update_rcr_ci(p, avail);
+ r = bm_rcr_start(&p->p);
+ if (unlikely(!r))
+ return -EBUSY;
+
+ /*
+ * we can copy all but the first entry, as this can trigger badness
+ * with the valid-bit
+ */
+ r->bufs[0].opaque =
+ cpu_to_be64(((u64)pool->params.bpid << 48) |
+ (bufs[0].opaque & BMAN_BUF_MASK));
+ if (i) {
+ for (i = 1; i < num; i++)
+ r->bufs[i].opaque =
+ cpu_to_be64(bufs[i].opaque & BMAN_BUF_MASK);
+ }
+
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+ return 0;
+}
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+ u32 flags __maybe_unused)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ struct bm_mc_result *mcr;
+ int ret, i;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
+ return -EINVAL;
+#endif
+
+ mcc = bm_mc_start(&p->p);
+ mcc->acquire.bpid = pool->params.bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs) {
+ for (i = 0; i < num; i++)
+ bufs[i].opaque =
+ be64_to_cpu(mcr->acquire.bufs[i].opaque);
+ }
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+
+int bman_query_pools(struct bm_pool_state *state)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_result *mcr;
+
+ bm_mc_start(&p->p);
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) ==
+ BM_MCR_VERB_CMD_QUERY);
+ *state = mcr->query;
+ state->as.state.state[0] = be32_to_cpu(state->as.state.state[0]);
+ state->as.state.state[1] = be32_to_cpu(state->as.state.state[1]);
+ state->ds.state.state[0] = be32_to_cpu(state->ds.state.state[0]);
+ state->ds.state.state[1] = be32_to_cpu(state->ds.state.state[1]);
+ return 0;
+}
+
+u32 bman_query_free_buffers(struct bman_pool *pool)
+{
+ return bm_pool_free_buffers(pool->params.bpid);
+}
+
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
+{
+ u32 bpid;
+
+ bpid = bman_get_params(pool)->bpid;
+
+ return bm_pool_set(bpid, thresholds);
+}
+
+int bman_shutdown_pool(u32 bpid)
+{
+ struct bman_portal *p = get_affine_portal();
+ return bm_shutdown_pool(&p->p, bpid);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.h
new file mode 100644
index 00000000..21a6bee7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.h
@@ -0,0 +1,541 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __BMAN_H
+#define __BMAN_H
+
+#include "bman_priv.h"
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x3000
+#define BM_REG_RCR_CI_CINH 0x3100
+#define BM_REG_RCR_ITR 0x3200
+#define BM_REG_CFG 0x3300
+#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
+#define BM_REG_ISR 0x3e00
+#define BM_REG_IIR 0x3ec0
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent).
+ */
+
+/* Cache-inhibited register access. */
+#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->ci + (o)))
+#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \
+ (bm)->ci + (o))
+#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
+#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->ce + (o))
+#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->ce + (o))
+#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->ce + (o)))
+#define __bm_cl_out(bm, o, val) \
+ do { \
+ u32 *__tmpclout = (bm)->ce + (o); \
+ __raw_writel(cpu_to_be32(val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __bm_cl_invalidate(bm, o) dccivac((bm)->ce + (o))
+#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
+#define bm_cl_invalidate(reg)\
+ __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues.
+ */
+static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ struct bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+ struct bm_portal_config config;
+} ____cacheline_aligned;
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define RCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void RCR_INC(struct bm_rcr *rcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates
+ * fast code with essentially no branching overheads. We increment to
+ * the next RCR pointer and handle overflow and 'vbit'.
+ */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = RCR_CARRYCLEAR(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ __maybe_unused enum bm_rcr_cmode cmode)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case.
+ */
+ register struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.ce + BM_CL_RCR;
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+
+ pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(RCR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void bm_rcr_finish(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!rcr->busy);
+#endif
+ if (pi != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("losing uncommitted RCR entries\n");
+ if (ci != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!rcr->busy);
+#endif
+ if (!rcr->available)
+ return NULL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 1;
+#endif
+ dcbz_64(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ rcr->busy = 0;
+#endif
+}
+
+static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
+ struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode != bm_rcr_pvb);
+#endif
+ if (rcr->available == 1)
+ return NULL;
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcr->cursor);
+ RCR_INC(rcr);
+ rcr->available--;
+ dcbz_64(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pci);
+#endif
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ hwsync();
+ bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
+#endif
+ bm_cl_invalidate(RCR_PI);
+ bm_cl_touch_rw(RCR_PI);
+}
+
+static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
+#endif
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ lwsync();
+ bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+#endif
+ lwsync();
+ rcursor = rcr->cursor;
+ rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcursor);
+ RCR_INC(rcr);
+ rcr->available--;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cci);
+#endif
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+#endif
+ bm_cl_touch_ro(RCR_CI);
+}
+
+static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+#endif
+ rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(RCR_CI);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->ithresh;
+}
+
+static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ rcr->ithresh = ithresh;
+ bm_out(RCR_ITR, ithresh);
+}
+
+static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
+}
+
+static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+/* --- Management command API --- */
+
+static inline int bm_mc_init(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + BM_CL_CR;
+ mc->rr = portal->addr.ce + BM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+ BM_MCC_VERB_VBIT) ? 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static inline void bm_mc_finish(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_idle);
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_idle);
+ mc->state = mc_user;
+#endif
+ dcbz_64(mc->cr);
+ return mc->cr;
+}
+
+static inline void bm_mc_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_user);
+ mc->state = mc_idle;
+#endif
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_user);
+#endif
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_hw;
+#endif
+}
+
+static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_hw);
+#endif
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering.
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
+#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
+static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
+ int enable)
+{
+ u32 val;
+
+ DPAA_ASSERT(bpid < bman_pool_max);
+ /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
+ val = __bm_in(&portal->addr, SCN_REG(bpid));
+ if (enable)
+ val |= SCN_BIT(bpid);
+ else
+ val &= ~SCN_BIT(bpid);
+ __bm_out(&portal->addr, SCN_REG(bpid), val);
+}
+
+static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
+{
+#if defined(RTE_ARCH_ARM64)
+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
+#else
+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
+#endif
+}
+
+static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
+ u32 val)
+{
+#if defined(RTE_ARCH_ARM64)
+ __bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
+#else
+ __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
+#endif
+}
+
+/* Buffer Pool Cleanup */
+static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
+{
+ struct bm_mc_command *bm_cmd;
+ struct bm_mc_result *bm_res;
+
+ int aq_count = 0;
+ bool stop = false;
+
+ while (!stop) {
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(p);
+ bm_cmd->acquire.bpid = bpid;
+ bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ while (!(bm_res = bm_mc_result(p)))
+ cpu_relax();
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ /* Pool is empty */
+ stop = true;
+ } else
+ ++aq_count;
+ };
+ return 0;
+}
+
+#endif /* __BMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c
new file mode 100644
index 00000000..b14b5905
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <rte_branch_prediction.h>
+
+#include <fsl_usd.h>
+#include <process.h>
+#include "bman_priv.h"
+#include <sys/ioctl.h>
+
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+static u16 bman_ip_rev;
+u16 bman_pool_max;
+static void *bman_ccsr_map;
+
+/*****************/
+/* Portal driver */
+/*****************/
+
+static __thread int fd = -1;
+static __thread struct bm_portal_config pcfg;
+static __thread struct dpaa_ioctl_portal_map map = {
+ .type = dpaa_portal_bman
+};
+
+static int fsl_bman_portal_init(uint32_t idx, int is_shared)
+{
+ cpu_set_t cpuset;
+ struct bman_portal *portal;
+ int loop, ret;
+ struct dpaa_ioctl_irq_map irq_map;
+
+ /* Verify the thread's cpu-affinity */
+ ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+ &cpuset);
+ if (ret) {
+ error(0, ret, "pthread_getaffinity_np()");
+ return ret;
+ }
+ pcfg.cpu = -1;
+ for (loop = 0; loop < CPU_SETSIZE; loop++)
+ if (CPU_ISSET(loop, &cpuset)) {
+ if (pcfg.cpu != -1) {
+ pr_err("Thread is not affine to 1 cpu");
+ return -EINVAL;
+ }
+ pcfg.cpu = loop;
+ }
+ if (pcfg.cpu == -1) {
+ pr_err("Bug in getaffinity handling!");
+ return -EINVAL;
+ }
+ /* Allocate and map a bman portal */
+ map.index = idx;
+ ret = process_portal_map(&map);
+ if (ret) {
+ error(0, ret, "process_portal_map()");
+ return ret;
+ }
+ /* Make the portal's cache-[enabled|inhibited] regions */
+ pcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
+ pcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
+ pcfg.is_shared = is_shared;
+ pcfg.index = map.index;
+ bman_depletion_fill(&pcfg.mask);
+
+ fd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (fd == -1) {
+ pr_err("BMan irq init failed");
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+ /* Use the IRQ FD as a unique IRQ number */
+ pcfg.irq = fd;
+
+ portal = bman_create_affine_portal(&pcfg);
+ if (!portal) {
+ pr_err("Bman portal initialisation failed (%d)",
+ pcfg.cpu);
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+
+ /* Set the IRQ number */
+ irq_map.type = dpaa_portal_bman;
+ irq_map.portal_cinh = map.addr.cinh;
+ process_portal_irq_map(fd, &irq_map);
+ return 0;
+}
+
+static int fsl_bman_portal_finish(void)
+{
+ __maybe_unused const struct bm_portal_config *cfg;
+ int ret;
+
+ process_portal_irq_unmap(fd);
+
+ cfg = bman_destroy_affine_portal();
+ DPAA_BUG_ON(cfg != &pcfg);
+ ret = process_portal_unmap(&map.addr);
+ if (ret)
+ error(0, ret, "process_portal_unmap()");
+ return ret;
+}
+
+int bman_thread_init(void)
+{
+ /* Convert from contiguous/virtual cpu numbering to real cpu when
+ * calling into the code that is dependent on the device naming.
+ */
+ return fsl_bman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
+}
+
+int bman_thread_finish(void)
+{
+ return fsl_bman_portal_finish();
+}
+
+void bman_thread_irq(void)
+{
+ qbman_invoke_irq(pcfg.irq);
+ /* Now we need to uninhibit interrupts. This is the only code outside
+ * the regular portal driver that manipulates any portal register, so
+ * rather than breaking that encapsulation I am simply hard-coding the
+ * offset to the inhibit register here.
+ */
+ out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
+}
+
+int bman_init_ccsr(const struct device_node *node)
+{
+ static int ccsr_map_fd;
+ uint64_t phys_addr;
+ const uint32_t *bman_addr;
+ uint64_t regs_size;
+
+ bman_addr = of_get_address(node, 0, &regs_size, NULL);
+ if (!bman_addr) {
+ pr_err("of_get_address cannot return BMan address");
+ return -EINVAL;
+ }
+ phys_addr = of_translate_address(node, bman_addr);
+ if (!phys_addr) {
+ pr_err("of_translate_address failed");
+ return -EINVAL;
+ }
+
+ ccsr_map_fd = open(BMAN_CCSR_MAP, O_RDWR);
+ if (unlikely(ccsr_map_fd < 0)) {
+ pr_err("Can not open /dev/mem for BMan CCSR map");
+ return ccsr_map_fd;
+ }
+
+ bman_ccsr_map = mmap(NULL, regs_size, PROT_READ |
+ PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr);
+ if (bman_ccsr_map == MAP_FAILED) {
+ pr_err("Can not map BMan CCSR base Bman: "
+ "0x%x Phys: 0x%" PRIx64 " size 0x%" PRIu64,
+ *bman_addr, phys_addr, regs_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bman_global_init(void)
+{
+ const struct device_node *dt_node;
+ static int done;
+
+ if (done)
+ return -EBUSY;
+ /* Use the device-tree to determine IP revision until something better
+ * is devised.
+ */
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,bman-portal");
+ if (!dt_node) {
+ pr_err("No bman portals available for any CPU\n");
+ return -ENODEV;
+ }
+ if (of_device_is_compatible(dt_node, "fsl,bman-portal-1.0") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-1.0.0")) {
+ bman_ip_rev = BMAN_REV10;
+ bman_pool_max = 64;
+ } else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.0") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.0.8")) {
+ bman_ip_rev = BMAN_REV20;
+ bman_pool_max = 8;
+ } else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.0") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.1") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.2") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.3")) {
+ bman_ip_rev = BMAN_REV21;
+ bman_pool_max = 64;
+ } else {
+ pr_warn("unknown BMan version in portal node,default "
+ "to rev1.0");
+ bman_ip_rev = BMAN_REV10;
+ bman_pool_max = 64;
+ }
+
+ if (!bman_ip_rev) {
+ pr_err("Unknown bman portal version\n");
+ return -ENODEV;
+ }
+ {
+ const struct device_node *dn = of_find_compatible_node(NULL,
+ NULL, "fsl,bman");
+ if (!dn)
+ pr_err("No bman device node available");
+
+ if (bman_init_ccsr(dn))
+ pr_err("BMan CCSR map failed.");
+ }
+
+ done = 1;
+ return 0;
+}
+
+#define BMAN_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
+u32 bm_pool_free_buffers(u32 bpid)
+{
+ return in_be32(bman_ccsr_map + BMAN_POOL_CONTENT(bpid));
+}
+
+static u32 __generate_thresh(u32 val, int roundup)
+{
+ u32 e = 0; /* co-efficient, exponent */
+ int oddbit = 0;
+
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ DPAA_ASSERT(e < 0x10);
+ return (val | (e << 8));
+}
+
+#define POOL_SWDET(n) (0x0000 + ((n) * 0x04))
+#define POOL_HWDET(n) (0x0100 + ((n) * 0x04))
+#define POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
+#define POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
+int bm_pool_set(u32 bpid, const u32 *thresholds)
+{
+ if (!bman_ccsr_map)
+ return -ENODEV;
+ if (bpid >= bman_pool_max)
+ return -EINVAL;
+ out_be32(bman_ccsr_map + POOL_SWDET(bpid),
+ __generate_thresh(thresholds[0], 0));
+ out_be32(bman_ccsr_map + POOL_SWDXT(bpid),
+ __generate_thresh(thresholds[1], 1));
+ out_be32(bman_ccsr_map + POOL_HWDET(bpid),
+ __generate_thresh(thresholds[2], 0));
+ out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
+ __generate_thresh(thresholds[3], 1));
+ return 0;
+}
+
+#define BMAN_LOW_DEFAULT_THRESH 0x40
+#define BMAN_HIGH_DEFAULT_THRESH 0x80
+int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
+ const u32 high_thresh)
+{
+ if (!bman_ccsr_map)
+ return -ENODEV;
+ if (bpid >= bman_pool_max)
+ return -EINVAL;
+ if (low_thresh && high_thresh) {
+ out_be32(bman_ccsr_map + POOL_HWDET(bpid),
+ __generate_thresh(low_thresh, 0));
+ out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
+ __generate_thresh(high_thresh, 1));
+ } else {
+ out_be32(bman_ccsr_map + POOL_HWDET(bpid),
+ __generate_thresh(BMAN_LOW_DEFAULT_THRESH, 0));
+ out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
+ __generate_thresh(BMAN_HIGH_DEFAULT_THRESH, 1));
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_priv.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_priv.h
new file mode 100644
index 00000000..5a3e330d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_priv.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __BMAN_PRIV_H
+#define __BMAN_PRIV_H
+
+#include "dpaa_sys.h"
+#include <fsl_bman.h>
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+
+#define BMAN_PORTAL_IRQ_PATH "/dev/fsl-usdpaa-irq"
+#define BMAN_CCSR_MAP "/dev/mem"
+
+/* This mask contains all the "irqsource" bits visible to API users */
+#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
+
+/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write".
+ */
+#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
+#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
+#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
+#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
+#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
+#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
+#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
+
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+extern u16 bman_pool_max;
+
+/* used by CCSR and portal interrupt code */
+enum bm_isr_reg {
+ bm_isr_status = 0,
+ bm_isr_enable = 1,
+ bm_isr_disable = 2,
+ bm_isr_inhibit = 3
+};
+
+struct bm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* This is used for any "core-affine" portals, ie. default portals
+ * associated to the corresponding cpu. -1 implies that there is no
+ * core affinity configured.
+ */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /* the unique index of this portal */
+ u32 index;
+ /* Is this portal shared? (If so, it has coarser locking and demuxes
+ * processing on behalf of other CPUs.).
+ */
+ int is_shared;
+ /* These are the buffer pool IDs that may be used via this portal. */
+ struct bman_depletion mask;
+
+};
+
+int bman_init_ccsr(const struct device_node *node);
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config);
+const struct bm_portal_config *bman_destroy_affine_portal(void);
+
+/* Set depletion thresholds associated with a buffer pool. Requires that the
+ * operating system have access to Bman CCSR (ie. compiled in support and
+ * run-time access courtesy of the device-tree).
+ */
+int bm_pool_set(u32 bpid, const u32 *thresholds);
+
+/* Read the free buffer count for a given buffer */
+u32 bm_pool_free_buffers(u32 bpid);
+
+#endif /* __BMAN_PRIV_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_alloc.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
new file mode 100644
index 00000000..a05803c2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2009-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include "dpaa_sys.h"
+#include <process.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_bpid, result, count, align, partial);
+}
+
+void bman_release_bpid_range(u32 bpid, u32 count)
+{
+ process_release(dpaa_id_bpid, bpid, count);
+}
+
+int bman_reserve_bpid_range(u32 bpid, u32 count)
+{
+ return process_reserve(dpaa_id_bpid, bpid, count);
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_fqid, result, count, align, partial);
+}
+
+void qman_release_fqid_range(u32 fqid, u32 count)
+{
+ process_release(dpaa_id_fqid, fqid, count);
+}
+
+int qman_reserve_fqid_range(u32 fqid, unsigned int count)
+{
+ return process_reserve(dpaa_id_fqid, fqid, count);
+}
+
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_qpool, result, count, align, partial);
+}
+
+void qman_release_pool_range(u32 pool, u32 count)
+{
+ process_release(dpaa_id_qpool, pool, count);
+}
+
+int qman_reserve_pool_range(u32 pool, u32 count)
+{
+ return process_reserve(dpaa_id_qpool, pool, count);
+}
+
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_cgrid, result, count, align, partial);
+}
+
+void qman_release_cgrid_range(u32 cgrid, u32 count)
+{
+ process_release(dpaa_id_cgrid, cgrid, count);
+}
+
+int qman_reserve_cgrid_range(u32 cgrid, u32 count)
+{
+ return process_reserve(dpaa_id_cgrid, cgrid, count);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.c
new file mode 100644
index 00000000..9d6bfd40
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.c
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <process.h>
+#include "dpaa_sys.h"
+
+struct process_interrupt {
+ int irq;
+ irqreturn_t (*isr)(int irq, void *arg);
+ unsigned long flags;
+ const char *name;
+ void *arg;
+ struct list_head node;
+};
+
+static COMPAT_LIST_HEAD(process_irq_list);
+static pthread_mutex_t process_irq_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static void process_interrupt_install(struct process_interrupt *irq)
+{
+ int ret;
+ /* Add the irq to the end of the list */
+ ret = pthread_mutex_lock(&process_irq_lock);
+ assert(!ret);
+ list_add_tail(&irq->node, &process_irq_list);
+ ret = pthread_mutex_unlock(&process_irq_lock);
+ assert(!ret);
+}
+
+static void process_interrupt_remove(struct process_interrupt *irq)
+{
+ int ret;
+
+ ret = pthread_mutex_lock(&process_irq_lock);
+ assert(!ret);
+ list_del(&irq->node);
+ ret = pthread_mutex_unlock(&process_irq_lock);
+ assert(!ret);
+}
+
+static struct process_interrupt *process_interrupt_find(int irq_num)
+{
+ int ret;
+ struct process_interrupt *i = NULL;
+
+ ret = pthread_mutex_lock(&process_irq_lock);
+ assert(!ret);
+ list_for_each_entry(i, &process_irq_list, node) {
+ if (i->irq == irq_num)
+ goto done;
+ }
+done:
+ ret = pthread_mutex_unlock(&process_irq_lock);
+ assert(!ret);
+ return i;
+}
+
+/* This is the interface from the platform-agnostic driver code to (de)register
+ * interrupt handlers. We simply create/destroy corresponding structs.
+ */
+int qbman_request_irq(int irq, irqreturn_t (*isr)(int irq, void *arg),
+ unsigned long flags, const char *name,
+ void *arg __maybe_unused)
+{
+ struct process_interrupt *irq_node =
+ kmalloc(sizeof(*irq_node), GFP_KERNEL);
+
+ if (!irq_node)
+ return -ENOMEM;
+ irq_node->irq = irq;
+ irq_node->isr = isr;
+ irq_node->flags = flags;
+ irq_node->name = name;
+ irq_node->arg = arg;
+ process_interrupt_install(irq_node);
+ return 0;
+}
+
+int qbman_free_irq(int irq, __maybe_unused void *arg)
+{
+ struct process_interrupt *irq_node = process_interrupt_find(irq);
+
+ if (!irq_node)
+ return -EINVAL;
+ process_interrupt_remove(irq_node);
+ kfree(irq_node);
+ return 0;
+}
+
+/* This is the interface from the platform-specific driver code to obtain
+ * interrupt handlers that have been registered.
+ */
+void qbman_invoke_irq(int irq)
+{
+ struct process_interrupt *irq_node = process_interrupt_find(irq);
+
+ if (irq_node)
+ irq_node->isr(irq, irq_node->arg);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.h
new file mode 100644
index 00000000..034991ba
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <of.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+#define DPAA_ASSERT(x) RTE_ASSERT(x)
+
+/* This is the interface from the platform-agnostic driver code to (de)register
+ * interrupt handlers. We simply create/destroy corresponding structs.
+ */
+int qbman_request_irq(int irq, irqreturn_t (*isr)(int irq, void *arg),
+ unsigned long flags, const char *name, void *arg);
+int qbman_free_irq(int irq, void *arg);
+
+void qbman_invoke_irq(int irq);
+
+#endif /* __DPAA_SYS_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/process.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/process.c
new file mode 100644
index 00000000..2c23c98d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/process.c
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2011-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#include <assert.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+
+#include "process.h"
+
+#include <fsl_usd.h>
+
+/* As higher-level drivers will be built on top of this (dma_mem, qbman, ...),
+ * it's preferable that the process driver itself not provide any exported API.
+ * As such, combined with the fact that none of these operations are
+ * performance critical, it is justified to use lazy initialisation, so that's
+ * what the lock is for.
+ */
+static int fd = -1;
+static pthread_mutex_t fd_init_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static int check_fd(void)
+{
+ int ret;
+
+ if (fd >= 0)
+ return 0;
+ ret = pthread_mutex_lock(&fd_init_lock);
+ assert(!ret);
+ /* check again with the lock held */
+ if (fd < 0)
+ fd = open(PROCESS_PATH, O_RDWR);
+ ret = pthread_mutex_unlock(&fd_init_lock);
+ assert(!ret);
+ return (fd >= 0) ? 0 : -ENODEV;
+}
+
+#define DPAA_IOCTL_MAGIC 'u'
+struct dpaa_ioctl_id_alloc {
+ uint32_t base; /* Return value, the start of the allocated range */
+ enum dpaa_id_type id_type; /* what kind of resource(s) to allocate */
+ uint32_t num; /* how many IDs to allocate (and return value) */
+ uint32_t align; /* must be a power of 2, 0 is treated like 1 */
+ int partial; /* whether to allow less than 'num' */
+};
+
+struct dpaa_ioctl_id_release {
+ /* Input; */
+ enum dpaa_id_type id_type;
+ uint32_t base;
+ uint32_t num;
+};
+
+struct dpaa_ioctl_id_reserve {
+ enum dpaa_id_type id_type;
+ uint32_t base;
+ uint32_t num;
+};
+
+#define DPAA_IOCTL_ID_ALLOC \
+ _IOWR(DPAA_IOCTL_MAGIC, 0x01, struct dpaa_ioctl_id_alloc)
+#define DPAA_IOCTL_ID_RELEASE \
+ _IOW(DPAA_IOCTL_MAGIC, 0x02, struct dpaa_ioctl_id_release)
+#define DPAA_IOCTL_ID_RESERVE \
+ _IOW(DPAA_IOCTL_MAGIC, 0x0A, struct dpaa_ioctl_id_reserve)
+
+int process_alloc(enum dpaa_id_type id_type, uint32_t *base, uint32_t num,
+ uint32_t align, int partial)
+{
+ struct dpaa_ioctl_id_alloc id = {
+ .id_type = id_type,
+ .num = num,
+ .align = align,
+ .partial = partial
+ };
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+ ret = ioctl(fd, DPAA_IOCTL_ID_ALLOC, &id);
+ if (ret)
+ return ret;
+ for (ret = 0; ret < (int)id.num; ret++)
+ base[ret] = id.base + ret;
+ return id.num;
+}
+
+void process_release(enum dpaa_id_type id_type, uint32_t base, uint32_t num)
+{
+ struct dpaa_ioctl_id_release id = {
+ .id_type = id_type,
+ .base = base,
+ .num = num
+ };
+ int ret = check_fd();
+
+ if (ret) {
+ fprintf(stderr, "Process FD failure\n");
+ return;
+ }
+ ret = ioctl(fd, DPAA_IOCTL_ID_RELEASE, &id);
+ if (ret)
+ fprintf(stderr, "Process FD ioctl failure type %d base 0x%x num %d\n",
+ id_type, base, num);
+}
+
+int process_reserve(enum dpaa_id_type id_type, uint32_t base, uint32_t num)
+{
+ struct dpaa_ioctl_id_reserve id = {
+ .id_type = id_type,
+ .base = base,
+ .num = num
+ };
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+ return ioctl(fd, DPAA_IOCTL_ID_RESERVE, &id);
+}
+
+/***************************************/
+/* Mapping and using QMan/BMan portals */
+/***************************************/
+
+#define DPAA_IOCTL_PORTAL_MAP \
+ _IOWR(DPAA_IOCTL_MAGIC, 0x07, struct dpaa_ioctl_portal_map)
+#define DPAA_IOCTL_PORTAL_UNMAP \
+ _IOW(DPAA_IOCTL_MAGIC, 0x08, struct dpaa_portal_map)
+
+int process_portal_map(struct dpaa_ioctl_portal_map *params)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_PORTAL_MAP, params);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_PORTAL_MAP)");
+ return ret;
+ }
+ return 0;
+}
+
+int process_portal_unmap(struct dpaa_portal_map *map)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_PORTAL_UNMAP, map);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_PORTAL_UNMAP)");
+ return ret;
+ }
+ return 0;
+}
+
+#define DPAA_IOCTL_PORTAL_IRQ_MAP \
+ _IOW(DPAA_IOCTL_MAGIC, 0x09, struct dpaa_ioctl_irq_map)
+
+int process_portal_irq_map(int ifd, struct dpaa_ioctl_irq_map *map)
+{
+ map->fd = fd;
+ return ioctl(ifd, DPAA_IOCTL_PORTAL_IRQ_MAP, map);
+}
+
+int process_portal_irq_unmap(int ifd)
+{
+ return close(ifd);
+}
+
+struct dpaa_ioctl_raw_portal {
+ /* inputs */
+ enum dpaa_portal_type type; /* Type of portal to allocate */
+
+ uint8_t enable_stash; /* set to non zero to turn on stashing */
+ /* Stashing attributes for the portal */
+ uint32_t cpu;
+ uint32_t cache;
+ uint32_t window;
+ /* Specifies the stash request queue this portal should use */
+ uint8_t sdest;
+
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ * for don't care. The portal index will be populated by the
+ * driver when the ioctl() successfully completes.
+ */
+ uint32_t index;
+
+ /* outputs */
+ uint64_t cinh;
+ uint64_t cena;
+};
+
+#define DPAA_IOCTL_ALLOC_RAW_PORTAL \
+ _IOWR(DPAA_IOCTL_MAGIC, 0x0C, struct dpaa_ioctl_raw_portal)
+
+#define DPAA_IOCTL_FREE_RAW_PORTAL \
+ _IOR(DPAA_IOCTL_MAGIC, 0x0D, struct dpaa_ioctl_raw_portal)
+
+static int process_portal_allocate(struct dpaa_ioctl_raw_portal *portal)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_ALLOC_RAW_PORTAL, portal);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_ALLOC_RAW_PORTAL)");
+ return ret;
+ }
+ return 0;
+}
+
+static int process_portal_free(struct dpaa_ioctl_raw_portal *portal)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_FREE_RAW_PORTAL, portal);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_FREE_RAW_PORTAL)");
+ return ret;
+ }
+ return 0;
+}
+
+int qman_allocate_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+ int ret;
+
+ input.type = dpaa_portal_qman;
+ input.index = portal->index;
+ input.enable_stash = portal->enable_stash;
+ input.cpu = portal->cpu;
+ input.cache = portal->cache;
+ input.window = portal->window;
+ input.sdest = portal->sdest;
+
+ ret = process_portal_allocate(&input);
+ if (ret)
+ return ret;
+ portal->index = input.index;
+ portal->cinh = input.cinh;
+ portal->cena = input.cena;
+ return 0;
+}
+
+int qman_free_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+
+ input.type = dpaa_portal_qman;
+ input.index = portal->index;
+ input.cinh = portal->cinh;
+ input.cena = portal->cena;
+
+ return process_portal_free(&input);
+}
+
+int bman_allocate_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+ int ret;
+
+ input.type = dpaa_portal_bman;
+ input.index = portal->index;
+ input.enable_stash = 0;
+
+ ret = process_portal_allocate(&input);
+ if (ret)
+ return ret;
+ portal->index = input.index;
+ portal->cinh = input.cinh;
+ portal->cena = input.cena;
+ return 0;
+}
+
+int bman_free_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+
+ input.type = dpaa_portal_bman;
+ input.index = portal->index;
+ input.cinh = portal->cinh;
+ input.cena = portal->cena;
+
+ return process_portal_free(&input);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.c
new file mode 100644
index 00000000..7c17027f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.c
@@ -0,0 +1,2755 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include "qman.h"
+#include <rte_branch_prediction.h>
+#include <rte_dpaa_bus.h>
+#include <rte_eventdev.h>
+#include <rte_byteorder.h>
+
+/* Compilation constants */
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+/* maximum number of DQRR entries to process in qman_poll() */
+#define FSL_QMAN_POLL_LIMIT 8
+
+/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
+ * inter-processor locking only. Note, FQLOCK() is always called either under a
+ * local_irq_save() or from interrupt context - hence there's no need for irq
+ * protection (and indeed, attempting to nest irq-protection doesn't work, as
+ * the "irq en/disable" machinery isn't recursive...).
+ */
+#define FQLOCK(fq) \
+ do { \
+ struct qman_fq *__fq478 = (fq); \
+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+ spin_lock(&__fq478->fqlock); \
+ } while (0)
+#define FQUNLOCK(fq) \
+ do { \
+ struct qman_fq *__fq478 = (fq); \
+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+ spin_unlock(&__fq478->fqlock); \
+ } while (0)
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ dpaa_set_bits(mask, &fq->flags);
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+ dpaa_clear_bits(mask, &fq->flags);
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long bits;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ u32 slowpoll; /* only used when interrupts are off */
+ /* only 1 volatile dequeue at a time */
+ struct qman_fq *vdqcr_owned;
+ u32 sdqcr;
+ int dqrr_disable_ref;
+ /* A portal-specific handler for DCP ERNs. If this is NULL, the global
+ * handler is called instead.
+ */
+ qman_cb_dc_ern cb_dc_ern;
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct qm_portal_config *config;
+ struct dpa_rbtree retire_table;
+ char irqname[MAX_IRQNAME];
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ /* track if memory was allocated by the driver */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
+ * do byte swaps of DQRR read only memory. First entry must be aligned
+ * to 2 ** 10 to ensure DQRR index calculations based shadow copy
+ * address (6 bits for address shift + 4 bits for the DQRR size).
+ */
+ struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
+ __attribute__((aligned(1024)));
+#endif
+};
+
+/* Global handler for DCP ERNs. Used when the portal receiving the message does
+ * not have a portal-specific handler.
+ */
+static qman_cb_dc_ern cb_dc_ern;
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+ return &RTE_PER_LCORE(qman_affine_portal);
+}
+
+/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
+ * retirement notifications (the fact they are sometimes h/w-consumed means that
+ * contextB isn't always a s/w demux - and as we can't know which case it is
+ * when looking at the notification, we have to use the slow lookup for all of
+ * them). NB, it's possible to have multiple FQ objects refer to the same FQID
+ * (though at most one of them should be the consumer), so this table isn't for
+ * all FQs - FQs are added when retirement commands are issued, and removed when
+ * they complete, which also massively reduces the size of this table.
+ */
+IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+ int ret = fqtree_push(&p->retire_table, fq);
+
+ if (ret)
+ pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
+ return ret;
+}
+
+static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+ fqtree_del(&p->retire_table, fq);
+}
+
+static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
+{
+ return fqtree_find(&p->retire_table, fqid);
+}
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+static void **qman_fq_lookup_table;
+static size_t qman_fq_lookup_table_size;
+
+int qman_setup_fq_lookup_table(size_t num_entries)
+{
+ num_entries++;
+ /* Allocate 1 more entry since the first entry is not used */
+ qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
+ if (!qman_fq_lookup_table) {
+ pr_err("QMan: Could not allocate fq lookup table\n");
+ return -ENOMEM;
+ }
+ memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
+ qman_fq_lookup_table_size = num_entries;
+ pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
+ qman_fq_lookup_table,
+ (unsigned long)qman_fq_lookup_table_size);
+ return 0;
+}
+
+/* global structure that maintains fq object mapping */
+static DEFINE_SPINLOCK(fq_hash_table_lock);
+
+static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
+{
+ u32 i;
+
+ spin_lock(&fq_hash_table_lock);
+ /* Can't use index zero because this has special meaning
+ * in context_b field.
+ */
+ for (i = 1; i < qman_fq_lookup_table_size; i++) {
+ if (qman_fq_lookup_table[i] == NULL) {
+ *entry = i;
+ qman_fq_lookup_table[i] = fq;
+ spin_unlock(&fq_hash_table_lock);
+ return 0;
+ }
+ }
+ spin_unlock(&fq_hash_table_lock);
+ return -ENOMEM;
+}
+
+static void clear_fq_table_entry(u32 entry)
+{
+ spin_lock(&fq_hash_table_lock);
+ DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
+ qman_fq_lookup_table[entry] = NULL;
+ spin_unlock(&fq_hash_table_lock);
+}
+
+static inline struct qman_fq *get_fq_table_entry(u32 entry)
+{
+ DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
+ return qman_fq_lookup_table[entry];
+}
+#endif
+
+static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
+{
+ /* Byteswap the FQD to HW format */
+ fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
+ fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
+ fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
+ fqd->context_b = cpu_to_be32(fqd->context_b);
+ fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
+ fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
+}
+
+static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
+{
+ /* Byteswap the FQD to CPU format */
+ fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
+ fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
+ fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
+ fqd->context_b = be32_to_cpu(fqd->context_b);
+ fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
+}
+
+static inline void cpu_to_hw_fd(struct qm_fd *fd)
+{
+ fd->addr = cpu_to_be40(fd->addr);
+ fd->status = cpu_to_be32(fd->status);
+ fd->opaque = cpu_to_be32(fd->opaque);
+}
+
+static inline void hw_fd_to_cpu(struct qm_fd *fd)
+{
+ fd->addr = be40_to_cpu(fd->addr);
+ fd->status = be32_to_cpu(fd->status);
+ fd->opaque = be32_to_cpu(fd->opaque);
+}
+
+/* In the case that slow- and fast-path handling are both done by qman_poll()
+ * (ie. because there is no interrupt handling), we ought to balance how often
+ * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
+ * sources, so we call the fast poll 'n' times before calling the slow poll
+ * once. The idle decrementer constant is used when the last slow-poll detected
+ * no work to do, and the busy decrementer constant when the last slow-poll had
+ * work to do.
+ */
+#define SLOW_POLL_IDLE 1000
+#define SLOW_POLL_BUSY 10
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+ /*
+ * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
+ * it could race against a Query Congestion State command also given
+ * as part of the handling of this interrupt source. We mustn't
+ * clear it a second time in this top-level function.
+ */
+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
+ u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is);
+ qm_isr_status_clear(&p->p, clear);
+ return IRQ_HANDLED;
+}
+
+/* This inner version is used privately by qman_create_affine_portal(), as well
+ * as by the exported qman_stop_dequeues().
+ */
+static inline void qman_stop_dequeues_ex(struct qman_portal *p)
+{
+ if (!(p->dqrr_disable_ref++))
+ qm_dqrr_set_maxfill(&p->p, 0);
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const struct qm_mr_entry *msg;
+loop:
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /*
+ * if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here.
+ */
+ u64 now, then = mfatb();
+
+ do {
+ now = mfatb();
+ } while ((then + 10000) > now);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("Found verb 0x%x in MR\n", msg->ern.verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case.
+ */
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(EQCR_CI);
+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(EQCR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi, ci;
+ u32 cfg;
+
+ /*
+ * Disable EQCI stashing because the QMan only
+ * presents the value it previously stashed to
+ * maintain coherency. Setting the stash threshold
+ * to 1 then 0 ensures that QMan has resyncronized
+ * its internal copy so that the portal is clean
+ * when it is reinitialized in the future
+ */
+ cfg = (qm_in(CFG) & 0x0fffffff) |
+ (1 << 28); /* QCSP_CFG: EST */
+ qm_out(CFG, cfg);
+ cfg &= 0x0fffffff; /* stash threshold = 0 */
+ qm_out(CFG, cfg);
+
+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ /* Refresh EQCR CI cache value */
+ qm_cl_invalidate(EQCR_CI);
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!eqcr->busy);
+#endif
+ if (pi != EQCR_PTR2IDX(eqcr->cursor))
+ pr_crit("losing uncommitted EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ __maybe_unused const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ __maybe_unused enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(DQRR_SDQCR, 0);
+ qm_out(DQRR_VDQCR, 0);
+ qm_out(DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(DQRR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dccivac(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+ 0xa0 | /* RE+SE */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
+ return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if ((dqrr->cmode != qm_dqrr_cdc) &&
+ (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline int qm_mr_init(struct qm_portal *portal,
+ __maybe_unused enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ register struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.ce + QM_CL_MR;
+ mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(MR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+#endif
+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = MR_INC(res);
+ }
+ dcbit_ro(res);
+}
+
+static inline
+struct qman_portal *qman_create_portal(
+ struct qman_portal *portal,
+ const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *p;
+ char buf[16];
+ int ret;
+ u32 isdr;
+
+ p = &portal->p;
+
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY)
+ portal->use_eqcr_ci_stashing = 3;
+ else
+ portal->use_eqcr_ci_stashing =
+ ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing, 1)) {
+ pr_err("Qman EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ pr_err("Qman DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+ pr_err("Qman MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(p)) {
+ pr_err("Qman MC initialisation failed\n");
+ goto fail_mc;
+ }
+
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(p, 0);
+ qm_mr_set_ithresh(p, 0);
+ qm_isr_set_iperiod(p, 0);
+ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ portal->bits = 0;
+ portal->slowpoll = 0;
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ portal->dqrr_disable_ref = 0;
+ portal->cb_dc_ern = NULL;
+ sprintf(buf, "qportal-%d", c->channel);
+ dpa_rbtree_init(&portal->retire_table);
+ isdr = 0xffffffff;
+ qm_isr_disable_write(p, isdr);
+ portal->irq_sources = 0;
+ qm_isr_enable_write(p, portal->irq_sources);
+ qm_isr_status_clear(p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+
+ /* Need EQCR to be empty before continuing */
+ isdr &= ~QM_PIRQ_EQCI;
+ qm_isr_disable_write(p, isdr);
+ ret = qm_eqcr_get_fill(p);
+ if (ret) {
+ pr_err("Qman EQCR unclean\n");
+ goto fail_eqcr_empty;
+ }
+ isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_isr_disable_write(p, isdr);
+ if (qm_dqrr_current(p)) {
+ pr_err("Qman DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(p, 0xffff);
+ }
+ if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ if (drain_mr_fqrni(p))
+ goto fail_dqrr_mr_empty;
+ }
+ /* Success */
+ portal->config = c;
+ qm_isr_disable_write(p, 0);
+ qm_isr_uninhibit(p);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(p, portal->sdqcr);
+ return portal;
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+ free_irq(c->irq, portal);
+fail_irq:
+ kfree(portal->cgrs);
+ spin_lock_destroy(&portal->cgr_lock);
+fail_cgrs:
+ qm_mc_finish(p);
+fail_mc:
+ qm_mr_finish(p);
+fail_mr:
+ qm_dqrr_finish(p);
+fail_dqrr:
+ qm_eqcr_finish(p);
+fail_eqcr:
+ return NULL;
+}
+
+#define MAX_GLOBAL_PORTALS 8
+static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
+static rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
+
+static struct qman_portal *
+qman_alloc_global_portal(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
+ if (rte_atomic16_test_and_set(&global_portals_used[i]))
+ return &global_portals[i];
+ }
+ pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS);
+
+ return NULL;
+}
+
+static int
+qman_free_global_portal(struct qman_portal *portal)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
+ if (&global_portals[i] == portal) {
+ rte_atomic16_clear(&global_portals_used[i]);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs,
+ int alloc)
+{
+ struct qman_portal *res;
+ struct qman_portal *portal;
+
+ if (alloc)
+ portal = qman_alloc_global_portal();
+ else
+ portal = get_affine_portal();
+
+ /* A criteria for calling this function (from qman_driver.c) is that
+ * we're already affine to the cpu and won't schedule onto another cpu.
+ */
+
+ res = qman_create_portal(portal, c, cgrs);
+ if (res) {
+ spin_lock(&affine_mask_lock);
+ CPU_SET(c->cpu, &affine_mask);
+ affine_channels[c->cpu] =
+ c->channel;
+ spin_unlock(&affine_mask_lock);
+ }
+ return res;
+}
+
+static inline
+void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /*
+ * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began.
+ */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->irq, qm);
+
+ kfree(qm->cgrs);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+ qm->config = NULL;
+
+ spin_lock_destroy(&qm->cgr_lock);
+}
+
+const struct qm_portal_config *
+qman_destroy_affine_portal(struct qman_portal *qp)
+{
+ /* We don't want to redirect if we're a slave, use "raw" */
+ struct qman_portal *qm;
+ const struct qm_portal_config *pcfg;
+ int cpu;
+
+ if (qp == NULL)
+ qm = get_affine_portal();
+ else
+ qm = qp;
+ pcfg = qm->config;
+ cpu = pcfg->cpu;
+
+ qman_destroy_portal(qm);
+
+ spin_lock(&affine_mask_lock);
+ CPU_CLR(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+
+ qman_free_global_portal(qm);
+
+ return pcfg;
+}
+
+int qman_get_portal_index(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ return p->config->index;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_mr_entry *msg, u8 verb)
+{
+ FQLOCK(fq);
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ table_del_fq(p, fq);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
+ (fq->state == qman_fq_state_sched));
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ else
+ table_del_fq(p, fq);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPAA_ASSERT(fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
+ }
+ FQUNLOCK(fq);
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+ const struct qm_mr_entry *msg;
+ struct qm_mr_entry swapped_msg;
+
+ if (is & QM_PIRQ_CSCI) {
+ struct qman_cgrs rr, c;
+ struct qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+ spin_lock(&p->cgr_lock);
+ /*
+ * The CSCI bit must be cleared _before_ issuing the
+ * Query Congestion State command, to ensure that a long
+ * CGR State Change callback cannot miss an intervening
+ * state change.
+ */
+ qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (const struct qman_cgrs *)
+ &mcr->querycongestion.state, &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock(&p->cgr_lock);
+ }
+
+ if (is & QM_PIRQ_EQRI) {
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ wake_up(&affine_queue);
+ }
+
+ if (is & QM_PIRQ_MRI) {
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+mr_loop:
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ goto mr_done;
+ swapped_msg = *msg;
+ hw_fd_to_cpu(&swapped_msg.ern.fd);
+ verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is set */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = table_find_fq(p,
+ be32_to_cpu(msg->fq.fqid));
+ DPAA_BUG_ON(!fq);
+ fq_state_change(p, fq, &swapped_msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, &swapped_msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(
+ be32_to_cpu(msg->fq.contextB));
+#else
+ fq = (void *)(uintptr_t)
+ be32_to_cpu(msg->fq.contextB);
+#endif
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, &swapped_msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ if (p->cb_dc_ern)
+ p->cb_dc_ern(p, msg);
+ else if (cb_dc_ern)
+ cb_dc_ern(p, msg);
+ else {
+ static int warn_once;
+
+ if (!warn_once) {
+ pr_crit("Leaking DCP ERNs!\n");
+ warn_once = 1;
+ }
+ }
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
+#else
+ fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
+#endif
+ fq->cb.ern(p, fq, &swapped_msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ goto mr_loop;
+mr_done:
+ qm_mr_cci_consume(&p->p, num);
+ }
+ /*
+ * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
+ * processing. If that interrupt source has meanwhile been re-asserted,
+ * we mustn't clear it here (or in the top-level interrupt handler).
+ */
+ return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+ p->vdqcr_owned = NULL;
+ FQLOCK(fq);
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_set_vdq() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_set_vdq() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ struct qm_dqrr_entry *shadow;
+#endif
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (unlikely(!dq))
+ break;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->contextB = be32_to_cpu(shadow->contextB);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /*
+ * VDQCR: don't trust context_b as the FQ may have
+ * been configured for h/w consumption and we're
+ * draining it post-retirement.
+ */
+ fq = p->vdqcr_owned;
+ /*
+ * We only set QMAN_FQ_STATE_NE when retiring, so we
+ * only need to check for clearing it when doing
+ * volatile dequeues. It's one less thing to check
+ * in the critical path (SDQCR).
+ */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /*
+ * This is duplicated from the SDQCR code, but we
+ * have stuff to do before *and* after this callback,
+ * and we don't want multiple if()s in the critical
+ * path (SDQCR).
+ */
+ res = fq->cb.dqrr(p, fq, dq);
+ if (res == qman_cb_dqrr_stop)
+ break;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(dq->contextB);
+#else
+ fq = (void *)(uintptr_t)dq->contextB;
+#endif
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq);
+ /*
+ * The callback can request that we exit without
+ * consuming this entry nor advancing;
+ */
+ if (res == qman_cb_dqrr_stop)
+ break;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ /* just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+ return limit;
+}
+
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_affine_portal();
+
+ cpu = portal->config->cpu;
+ }
+ DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
+ return affine_channels[cpu];
+}
+
+unsigned int qman_portal_poll_rx(unsigned int poll_limit,
+ void **bufs,
+ struct qman_portal *p)
+{
+ struct qm_portal *portal = &p->p;
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
+ struct qman_fq *fq;
+ unsigned int limit = 0, rx_number = 0;
+ uint32_t consume = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ if (!dqrr->fill)
+ break;
+
+ dq[rx_number] = dqrr->cursor;
+ dqrr->cursor = DQRR_CARRYCLEAR(dqrr->cursor + 1);
+ /* Prefetch the next DQRR entry */
+ rte_prefetch0(dqrr->cursor);
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow[rx_number] =
+ &p->shadow_dqrr[DQRR_PTR2IDX(dq[rx_number])];
+ shadow[rx_number]->fd.opaque_addr =
+ dq[rx_number]->fd.opaque_addr;
+ shadow[rx_number]->fd.addr =
+ be40_to_cpu(dq[rx_number]->fd.addr);
+ shadow[rx_number]->fd.opaque =
+ be32_to_cpu(dq[rx_number]->fd.opaque);
+#else
+ shadow[rx_number] = dq[rx_number];
+#endif
+
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = qman_fq_lookup_table[be32_to_cpu(dq[rx_number]->contextB)];
+#else
+ fq = (void *)be32_to_cpu(dq[rx_number]->contextB);
+#endif
+ if (fq->cb.dqrr_prepare)
+ fq->cb.dqrr_prepare(shadow[rx_number],
+ &bufs[rx_number]);
+
+ consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
+ rx_number++;
+ --dqrr->fill;
+ } while (++limit < poll_limit);
+
+ if (rx_number)
+ fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number);
+
+ /* Consume all the DQRR enries together */
+ qm_out(DQRR_DCAP, (1 << 8) | consume);
+
+ return rx_number;
+}
+
+u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
+ void **bufs)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+ struct qman_portal *p = get_affine_portal();
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ struct qm_dqrr_entry *shadow;
+#endif
+ unsigned int rx_number = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /*
+ * If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->contextB = be32_to_cpu(shadow->contextB);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(dq->contextB);
+#else
+ fq = (void *)(uintptr_t)dq->contextB;
+#endif
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq,
+ dq, &bufs[rx_number]);
+ rx_number++;
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit);
+
+ return limit;
+}
+
+struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
+{
+ struct qman_portal *p = get_affine_portal();
+ const struct qm_dqrr_entry *dq;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ struct qm_dqrr_entry *shadow;
+#endif
+
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ return NULL;
+
+ if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
+ /* Invalid DQRR - put the portal and consume the DQRR.
+ * Return NULL to user as no packet is seen.
+ */
+ qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
+ return NULL;
+ }
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->contextB = be32_to_cpu(shadow->contextB);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+
+ return (struct qm_dqrr_entry *)dq;
+}
+
+void qman_dqrr_consume(struct qman_fq *fq,
+ struct qm_dqrr_entry *dq)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
+ qm_dqrr_next(&p->p);
+}
+
+int qman_poll_dqrr(unsigned int limit)
+{
+ struct qman_portal *p = get_affine_portal();
+ int ret;
+
+ ret = __poll_portal_fast(p, limit);
+ return ret;
+}
+
+void qman_poll(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ if ((~p->irq_sources) & QM_PIRQ_SLOW) {
+ if (!(p->slowpoll--)) {
+ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
+ u32 active = __poll_portal_slow(p, is);
+
+ if (active) {
+ qm_isr_status_clear(&p->p, active);
+ p->slowpoll = SLOW_POLL_BUSY;
+ } else
+ p->slowpoll = SLOW_POLL_IDLE;
+ }
+ }
+ if ((~p->irq_sources) & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
+}
+
+void qman_stop_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qman_stop_dequeues_ex(p);
+}
+
+void qman_start_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ DPAA_ASSERT(p->dqrr_disable_ref > 0);
+ if (!(--p->dqrr_disable_ref))
+ qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
+}
+
+void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
+{
+ struct qman_portal *p = qp ? qp : get_affine_portal();
+
+ pools &= p->config->pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+}
+
+void qman_static_dequeue_del(u32 pools, struct qman_portal *qp)
+{
+ struct qman_portal *p = qp ? qp : get_affine_portal();
+
+ pools &= p->config->pools;
+ p->sdqcr &= ~pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+}
+
+u32 qman_static_dequeue_get(struct qman_portal *qp)
+{
+ struct qman_portal *p = qp ? qp : get_affine_portal();
+ return p->sdqcr;
+}
+
+void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
+}
+
+void qman_dca_index(u8 index, int park_request)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qm_dqrr_cdc_consume_1(&p->p, index, park_request);
+}
+
+/* Frame queue API */
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
+ }
+ return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+ struct qm_fqd fqd;
+ struct qm_mcr_queryfq_np np;
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+
+ if (ret)
+ return ret;
+ }
+ spin_lock_init(&fq->fqlock);
+ fq->fqid = fqid;
+ fq->fqid_le = cpu_to_be32(fqid);
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
+ pr_info("Find empty table entry failed\n");
+ return -ENOMEM;
+ }
+#endif
+ if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
+ return 0;
+ /* Everything else is AS_IS support */
+ p = get_affine_portal();
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
+ goto err;
+ }
+ fqd = mcr->queryfq.fqd;
+ hw_fqd_to_cpu(&fqd);
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
+ goto err;
+ }
+ np = mcr->queryfq_np;
+ /* Phew, have queryfq and queryfq_np results, stitch together
+ * the FQ object from those.
+ */
+ fq->cgr_groupid = fqd.cgid;
+ switch (np.state & QM_MCR_NP_STATE_MASK) {
+ case QM_MCR_NP_STATE_OOS:
+ break;
+ case QM_MCR_NP_STATE_RETIRED:
+ fq->state = qman_fq_state_retired;
+ if (np.frm_cnt)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ break;
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ fq->state = qman_fq_state_sched;
+ if (np.state & QM_MCR_NP_STATE_R)
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ break;
+ case QM_MCR_NP_STATE_PARKED:
+ fq->state = qman_fq_state_parked;
+ break;
+ default:
+ DPAA_ASSERT(NULL == "invalid FQ state");
+ }
+ if (fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq->state |= QMAN_FQ_STATE_CGR_EN;
+ return 0;
+err:
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
+ qman_release_fqid(fqid);
+ return -EIO;
+}
+
+void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
+{
+ /*
+ * We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks.
+ */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
+ /* Fallthrough */
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ clear_fq_table_entry(fq->key);
+#endif
+ return;
+ default:
+ break;
+ }
+ DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+ return fq->fqid;
+}
+
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
+{
+ if (state)
+ *state = fq->state;
+ if (flags)
+ *flags = fq->flags;
+}
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if ((fq->state != qman_fq_state_oos) &&
+ (fq->state != qman_fq_state_parked))
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ ((fq->state != qman_fq_state_oos) &&
+ (fq->state != qman_fq_state_parked)))) {
+ FQUNLOCK(fq);
+ return -EBUSY;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ mcc->initfq.fqid = cpu_to_be32(fq->fqid);
+ mcc->initfq.count = 0;
+ /*
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it.
+ */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ mcc->initfq.fqd.context_b = fq->key;
+#else
+ mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
+#endif
+ /*
+ * and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings.
+ */
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+ phys_fq = rte_mem_virt2iova(fq);
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ mcc->initfq.fqd.dest.channel = p->config->channel;
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ mcc->initfq.fqd.dest.wq = 4;
+ }
+ }
+ mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
+ cpu_to_hw_fqd(&mcc->initfq.fqd);
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ FQUNLOCK(fq);
+ return -EIO;
+ }
+ if (opts) {
+ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (opts->we_mask & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+ FQUNLOCK(fq);
+ return 0;
+}
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int ret = 0;
+ u8 res;
+
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state != qman_fq_state_parked))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ FQUNLOCK(fq);
+
+ return ret;
+}
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int rval;
+ u8 res;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_sched))
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state == qman_fq_state_retired) ||
+ (fq->state == qman_fq_state_oos))) {
+ rval = -EBUSY;
+ goto out;
+ }
+ rval = table_push_fq(p, fq);
+ if (rval)
+ goto out;
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /*
+ * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change().
+ */
+ if (likely(res == QM_MCR_RESULT_OK)) {
+ rval = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ else
+ table_del_fq(p, fq);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /*
+ * Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI...
+ */
+ struct qm_mr_entry msg;
+
+ msg.ern.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ msg.fq.fqid = fq->fqid;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ msg.fq.contextB = fq->key;
+#else
+ msg.fq.contextB = (u32)(uintptr_t)fq;
+#endif
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ rval = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ rval = -EIO;
+ table_del_fq(p, fq);
+ }
+out:
+ FQUNLOCK(fq);
+ return rval;
+}
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int ret = 0;
+ u8 res;
+
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
+ (fq->state != qman_fq_state_retired))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ FQUNLOCK(fq);
+ return ret;
+}
+
+int qman_fq_flow_control(struct qman_fq *fq, int xon)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int ret = 0;
+ u8 res;
+ u8 myverb;
+
+ if ((fq->state == qman_fq_state_oos) ||
+ (fq->state == qman_fq_state_retired) ||
+ (fq->state == qman_fq_state_parked))
+ return -EINVAL;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
+ p = get_affine_portal();
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state == qman_fq_state_parked) ||
+ (fq->state == qman_fq_state_oos) ||
+ (fq->state == qman_fq_state_retired))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ mcc->alterfq.count = 0;
+ myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
+
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+out:
+ FQUNLOCK(fq);
+ return ret;
+}
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
+ hw_fqd_to_cpu(fqd);
+ if (res != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+
+int qman_query_fq_has_pkts(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ int ret = 0;
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ ret = !!mcr->queryfq_np.frm_cnt;
+ return ret;
+}
+
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ *np = mcr->queryfq_np;
+ np->fqd_link = be24_to_cpu(np->fqd_link);
+ np->odp_seq = be16_to_cpu(np->odp_seq);
+ np->orp_nesn = be16_to_cpu(np->orp_nesn);
+ np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
+ np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
+ np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
+ np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
+ np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
+ np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
+ np->ics_surp = be16_to_cpu(np->ics_surp);
+ np->byte_cnt = be32_to_cpu(np->byte_cnt);
+ np->frm_cnt = be24_to_cpu(np->frm_cnt);
+ np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
+ np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
+ np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
+ np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
+ np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
+ }
+ if (res == QM_MCR_RESULT_ERR_FQID)
+ return -ERANGE;
+ else if (res != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+
+int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *frm_cnt = be24_to_cpu(mcr->queryfq_np.frm_cnt);
+ else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+ return -ERANGE;
+ else if (mcr->result != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res, myverb;
+
+ myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
+ QM_MCR_VERB_QUERYWQ;
+ mcc = qm_mc_start(&p->p);
+ mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ int i, array_len;
+
+ wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
+ array_len = ARRAY_SIZE(mcr->querywq.wq_len);
+ for (i = 0; i < array_len; i++)
+ wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
+ }
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+ struct qm_mcr_cgrtestwrite *result)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->cgrtestwrite.cgid = cgr->cgrid;
+ mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
+ mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
+ qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *result = mcr->cgrtestwrite;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 res;
+ unsigned int i;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->querycgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ cgrd->cgr.wr_parm_g.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_g.word);
+ cgrd->cgr.wr_parm_y.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_y.word);
+ cgrd->cgr.wr_parm_r.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_r.word);
+ cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
+ cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
+ for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
+ cgrd->cscn_targ_swp[i] =
+ be32_to_cpu(cgrd->cscn_targ_swp[i]);
+ return 0;
+}
+
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
+{
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 res;
+ unsigned int i;
+
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCC_VERB_QUERYCONGESTION);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *congestion = mcr->querycongestion;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
+ congestion->state.state[i] =
+ be32_to_cpu(congestion->state.state[i]);
+ return 0;
+}
+
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
+{
+ struct qman_portal *p = get_affine_portal();
+ uint32_t vdqcr;
+ int ret = -EBUSY;
+
+ vdqcr = vdqcr_flags;
+ vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+
+ if (!p->vdqcr_owned) {
+ FQLOCK(fq);
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto escape;
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ p->vdqcr_owned = fq;
+ ret = 0;
+ }
+escape:
+ if (!ret)
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+
+out:
+ return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
+ u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret = -EBUSY;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired))
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+
+ p = get_affine_portal();
+
+ if (!p->vdqcr_owned) {
+ FQLOCK(fq);
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto escape;
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ p->vdqcr_owned = fq;
+ ret = 0;
+ }
+escape:
+ if (ret)
+ return ret;
+
+ /* VDQCR is set */
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ return 0;
+}
+
+static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
+
+int qman_eqcr_is_empty(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ u8 avail;
+
+ update_eqcr_ci(p, 0);
+ avail = qm_eqcr_get_fill(&p->p);
+ return (avail == 0);
+}
+
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
+{
+ if (affine) {
+ struct qman_portal *p = get_affine_portal();
+
+ p->cb_dc_ern = handler;
+ } else
+ cb_dc_ern = handler;
+}
+
+static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ u8 avail;
+
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq))
+ return NULL;
+
+ if (flags & QMAN_ENQUEUE_FLAG_DCA)
+ eq->dca = QM_EQCR_DCA_ENABLE |
+ ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
+ QM_EQCR_DCA_PARK : 0) |
+ ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
+ eq->fqid = cpu_to_be32(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ eq->tag = cpu_to_be32(fq->key);
+#else
+ eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
+#endif
+ eq->fd = *fd;
+ cpu_to_hw_fd(&eq->fd);
+ return eq;
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_eqcr_entry *eq;
+
+ eq = try_p_eq_start(p, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ return 0;
+}
+
+int qman_enqueue_multi(struct qman_fq *fq,
+ const struct qm_fd *fd, u32 *flags,
+ int frames_to_send)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_portal *portal = &p->p;
+
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+
+ u8 i = 0, diff, old_ci, sent = 0;
+
+ /* Update the available entries if no entry is free */
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ /* try to send as many frames as possible */
+ while (eqcr->available && frames_to_send--) {
+ eq->fqid = fq->fqid_le;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ eq->tag = cpu_to_be32(fq->key);
+#else
+ eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
+#endif
+ eq->fd.opaque_addr = fd->opaque_addr;
+ eq->fd.addr = cpu_to_be40(fd->addr);
+ eq->fd.status = cpu_to_be32(fd->status);
+ eq->fd.opaque = cpu_to_be32(fd->opaque);
+ if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
+ eq->dca = QM_EQCR_DCA_ENABLE |
+ ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
+ }
+ i++;
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ eqcr->available--;
+ sent++;
+ fd++;
+ }
+ lwsync();
+
+ /* In order for flushes to complete faster, all lines are recorded in
+ * 32 bit word.
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ eq->__dont_write_directly__verb =
+ QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
+ prev_eq = eq;
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ if (unlikely((prev_eq + 1) != eq))
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+ }
+
+ /* We need to flush all the lines but without load/store operations
+ * between them
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ dcbf(eq);
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ }
+ /* Update cursor for the next call */
+ eqcr->cursor = eq;
+ return sent;
+}
+
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+ int frames_to_send)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_portal *portal = &p->p;
+
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+
+ u8 i, diff, old_ci, sent = 0;
+
+ /* Update the available entries if no entry is free */
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ /* try to send as many frames as possible */
+ while (eqcr->available && frames_to_send--) {
+ eq->fqid = fq[sent]->fqid_le;
+ eq->fd.opaque_addr = fd->opaque_addr;
+ eq->fd.addr = cpu_to_be40(fd->addr);
+ eq->fd.status = cpu_to_be32(fd->status);
+ eq->fd.opaque = cpu_to_be32(fd->opaque);
+
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ eqcr->available--;
+ sent++;
+ fd++;
+ }
+ lwsync();
+
+ /* In order for flushes to complete faster, all lines are recorded in
+ * 32 bit word.
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ eq->__dont_write_directly__verb =
+ QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
+ prev_eq = eq;
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ if (unlikely((prev_eq + 1) != eq))
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+ }
+
+ /* We need to flush all the lines but without load/store operations
+ * between them
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ dcbf(eq);
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ }
+ /* Update cursor for the next call */
+ eqcr->cursor = eq;
+ return sent;
+}
+
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_eqcr_entry *eq;
+
+ eq = try_p_eq_start(p, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Process ORP-specifics here */
+ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+ else {
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+ if (flags & QMAN_ENQUEUE_FLAG_NESN)
+ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+ else
+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+ }
+ eq->seqnum = cpu_to_be16(orp_seqnum);
+ eq->orp = cpu_to_be32(orp->fqid);
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+
+ return 0;
+}
+
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
+ mcc->initcgr.cgr.wr_parm_g.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
+ mcc->initcgr.cgr.wr_parm_y.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
+ mcc->initcgr.cgr.wr_parm_r.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
+ mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
+ mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
+
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ res = mcr->result;
+ return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
+}
+
+#define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
+ QM_CHANNEL_SWPORTAL0))
+#define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
+#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret;
+ struct qman_portal *p;
+
+ /* We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= __CGR_NUM)
+ return -EINVAL;
+
+ p = get_affine_portal();
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ cgr->chan = p->config->channel;
+ spin_lock(&p->cgr_lock);
+
+ /* if no opts specified, just add it to the list */
+ if (!opts)
+ goto add_list;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto release_lock;
+ if (opts)
+ local_opts = *opts;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+ else
+ /* Overwrite TARG */
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_MASK(p);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
+ else
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto release_lock;
+add_list:
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success, but screen
+ * and wail to the log file.
+ */
+ pr_crit("CGR HW state partially modified\n");
+ ret = 0;
+ goto release_lock;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
+ cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+release_lock:
+ spin_unlock(&p->cgr_lock);
+ return ret;
+}
+
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcc_initcgr local_opts;
+ struct qm_mcr_querycgr cgr_state;
+ int ret;
+
+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
+ pr_warn("QMan version doesn't support CSCN => DCP portal\n");
+ return -EINVAL;
+ }
+ /* We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= __CGR_NUM)
+ return -EINVAL;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ return ret;
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ if (opts)
+ local_opts = *opts;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
+ QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_DCP_MASK(dcp_portal);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+
+ return ret;
+}
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->channel) {
+ pr_crit("Attempting to delete cgr from different portal than"
+ " it was create: create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->channel);
+ ret = -EINVAL;
+ goto put_portal;
+ }
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock(&p->cgr_lock);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+ * update CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if ((i->cgrid == cgr->cgrid) && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+ /* Overwrite TARG */
+ local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+ ~(TARG_MASK(p));
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock(&p->cgr_lock);
+put_portal:
+ return ret;
+}
+
+int qman_shutdown_fq(u32 fqid)
+{
+ struct qman_portal *p;
+ struct qm_portal *low_p;
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ u8 state;
+ int orl_empty, fq_empty, drain = 0;
+ u32 result;
+ u32 channel, wq;
+ u16 dest_wq;
+
+ p = get_affine_portal();
+ low_p = &p->p;
+
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(low_p);
+ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ return 0; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(low_p);
+ mcc->queryfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+
+ /* Need to store these since the MCR gets reused */
+ dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
+ channel = dest_wq & 0x7;
+ wq = dest_wq >> 3;
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(low_p);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ result = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (result == QM_MCR_RESULT_PENDING) {
+ /* Need to wait for the FQRN in the message ring, which
+ * will only occur once the FQ has been drained. In
+ * order for the FQ to drain the portal needs to be set
+ * to dequeue from the channel the FQ is scheduled on
+ */
+ const struct qm_mr_entry *msg;
+ const struct qm_dqrr_entry *dqrr = NULL;
+ int found_fqrn = 0;
+ __maybe_unused u16 dequeue_wq = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < (u16)(qm_channel_pool1 + 15)) {
+ /* Pool channel, enable the bit in the portal */
+ dequeue_wq = (channel -
+ qm_channel_pool1 + 1) << 4 | wq;
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ dequeue_wq = wq;
+ } else {
+ pr_info("Cannot recover FQ 0x%x,"
+ " it is scheduled on channel 0x%x",
+ fqid, channel);
+ return -EBUSY;
+ }
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ qm_dqrr_sdqcr_set(low_p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ qm_dqrr_sdqcr_set(low_p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ while (!found_fqrn) {
+ /* Keep draining DQRR while checking the MR*/
+ qm_dqrr_pvb_update(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ while (dqrr) {
+ qm_dqrr_cdc_consume_1ptr(
+ low_p, dqrr, 0);
+ qm_dqrr_pvb_update(low_p);
+ qm_dqrr_next(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ }
+ /* Process message ring too */
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ while (msg) {
+ if ((msg->ern.verb &
+ QM_MR_VERB_TYPE_MASK)
+ == QM_MR_VERB_FQRN)
+ found_fqrn = 1;
+ qm_mr_next(low_p);
+ qm_mr_cci_consume_to_current(low_p);
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ }
+ cpu_relax();
+ }
+ }
+ if (result != QM_MCR_RESULT_OK &&
+ result != QM_MCR_RESULT_PENDING) {
+ /* error */
+ pr_err("qman_retire_fq failed on FQ 0x%x,"
+ " result=0x%x\n", fqid, result);
+ return -1;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /* ORL had no entries, no need to wait until the
+ * ERNs come in.
+ */
+ orl_empty = 1;
+ }
+ /* Retirement succeeded, check to see if FQ needs
+ * to be drained.
+ */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ fq_empty = 0;
+ do {
+ const struct qm_dqrr_entry *dqrr = NULL;
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+ qm_dqrr_vdqcr_set(low_p, vdqcr);
+
+ /* Wait for a dequeue to occur */
+ while (dqrr == NULL) {
+ qm_dqrr_pvb_update(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ if (!dqrr)
+ cpu_relax();
+ }
+ /* Process the dequeues, making sure to
+ * empty the ring completely.
+ */
+ while (dqrr) {
+ if (dqrr->fqid == fqid &&
+ dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_empty = 1;
+ qm_dqrr_cdc_consume_1ptr(low_p,
+ dqrr, 0);
+ qm_dqrr_pvb_update(low_p);
+ qm_dqrr_next(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ }
+ } while (fq_empty == 0);
+ }
+ qm_dqrr_sdqcr_set(low_p, 0);
+
+ /* Wait for the ORL to have been completely drained */
+ while (orl_empty == 0) {
+ const struct qm_mr_entry *msg;
+
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ while (msg) {
+ if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
+ QM_MR_VERB_FQRL)
+ orl_empty = 1;
+ qm_mr_next(low_p);
+ qm_mr_cci_consume_to_current(low_p);
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ }
+ cpu_relax();
+ }
+ mcc = qm_mc_start(low_p);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err(
+ "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
+ fqid, mcr->result);
+ return -1;
+ }
+ return 0;
+
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(low_p);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result) {
+ pr_err("OOS Failed on FQID 0x%x\n", fqid);
+ return -1;
+ }
+ return 0;
+
+ }
+ return -1;
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.h
new file mode 100644
index 00000000..4346d865
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.h
@@ -0,0 +1,913 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include "qman_priv.h"
+
+/***************************/
+/* Portal register assists */
+/***************************/
+#define QM_REG_EQCR_PI_CINH 0x3000
+#define QM_REG_EQCR_CI_CINH 0x3040
+#define QM_REG_EQCR_ITR 0x3080
+#define QM_REG_DQRR_PI_CINH 0x3100
+#define QM_REG_DQRR_CI_CINH 0x3140
+#define QM_REG_DQRR_ITR 0x3180
+#define QM_REG_DQRR_DCAP 0x31C0
+#define QM_REG_DQRR_SDQCR 0x3200
+#define QM_REG_DQRR_VDQCR 0x3240
+#define QM_REG_DQRR_PDQCR 0x3280
+#define QM_REG_MR_PI_CINH 0x3300
+#define QM_REG_MR_CI_CINH 0x3340
+#define QM_REG_MR_ITR 0x3380
+#define QM_REG_CFG 0x3500
+#define QM_REG_ISR 0x3600
+#define QM_REG_IIR 0x36C0
+#define QM_REG_ITPR 0x3740
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3040
+#define QM_CL_DQRR_PI_CENA 0x3100
+#define QM_CL_DQRR_CI_CENA 0x3140
+#define QM_CL_MR_PI_CENA 0x3300
+#define QM_CL_MR_CI_CENA 0x3340
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent).
+ */
+
+/* Cache-inhibited register access. */
+#define __qm_in(qm, o) be32_to_cpu(__raw_readl((qm)->ci + (o)))
+#define __qm_out(qm, o, val) __raw_writel((cpu_to_be32(val)), \
+ (qm)->ci + (o))
+#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg)
+#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->ce + (o))
+#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->ce + (o))
+#define __qm_cl_in(qm, o) be32_to_cpu(__raw_readl((qm)->ce + (o)))
+#define __qm_cl_out(qm, o, val) \
+ do { \
+ u32 *__tmpclout = (qm)->ce + (o); \
+ __raw_writel(cpu_to_be32(val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __qm_cl_invalidate(qm, o) dccivac((qm)->ce + (o))
+#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
+#define qm_cl_invalidate(reg)\
+ __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues.
+ */
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
+};
+
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
+};
+
+/* ------------------------- */
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+ struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+ const struct qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
+};
+
+struct qm_mc {
+ struct qm_mc_command *cr;
+ struct qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
+};
+
+#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
+
+struct qm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct qm_portal {
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} QM_PORTAL_ALIGNMENT;
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define EQCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
+
+extern dma_addr_t rte_mem_virt2iova(const void *addr);
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void EQCR_INC(struct qm_eqcr *eqcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates fast
+ * code with essentially no branching overheads. We increment to the
+ * next EQCR pointer and handle overflow and 'vbit'.
+ */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+ eqcr->cursor = EQCR_CARRYCLEAR(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!eqcr->busy);
+#endif
+ if (!eqcr->available)
+ return NULL;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 1;
+#endif
+
+ return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!eqcr->busy);
+#endif
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 1;
+#endif
+ return eqcr->cursor;
+}
+
+static inline void qm_eqcr_abort(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(eqcr->busy);
+ eqcr->busy = 0;
+#endif
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
+ struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(eqcr->busy);
+ DPAA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
+#endif
+ if (eqcr->available == 1)
+ return NULL;
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ dcbf(eqcr->cursor);
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ return eqcr->cursor;
+}
+
+#define EQCR_COMMIT_CHECKS(eqcr) \
+do { \
+ DPAA_ASSERT(eqcr->busy); \
+ DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \
+ DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \
+} while (0)
+
+static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pci);
+#endif
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbf(eqcr->cursor);
+ hwsync();
+ qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+#endif
+ qm_cl_invalidate(EQCR_PI);
+ qm_cl_touch_rw(EQCR_PI);
+}
+
+static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+#endif
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbf(eqcr->cursor);
+ lwsync();
+ qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+#endif
+ lwsync();
+ eqcursor = eqcr->cursor;
+ eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ dcbf(eqcursor);
+ EQCR_INC(eqcr);
+ eqcr->available--;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ qm_cl_touch_ro(EQCR_CI);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(EQCR_CI);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->ithresh;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ eqcr->ithresh = ithresh;
+ qm_out(EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+#define DQRR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
+
+static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
+}
+
+static inline struct qm_dqrr_entry *DQRR_INC(
+ const struct qm_dqrr_entry *e)
+{
+ return DQRR_CARRYCLEAR(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ return DQRR_PTR2IDX(dqrr->cursor);
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->fill);
+ dqrr->cursor = DQRR_INC(dqrr->cursor);
+ return --dqrr->fill;
+}
+
+static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 diff, old_pi = dqrr->pi;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pci);
+#endif
+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+ dqrr->fill += diff;
+ return diff;
+}
+
+static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+#endif
+ qm_cl_invalidate(DQRR_PI);
+ qm_cl_touch_ro(DQRR_PI);
+}
+
+static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 diff, old_pi = dqrr->pi;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+#endif
+ dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+ dqrr->fill += diff;
+ return diff;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#endif
+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
+ }
+}
+
+static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+#endif
+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+ qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+#endif
+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+ qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+#endif
+ qm_cl_invalidate(DQRR_CI);
+ qm_cl_touch_rw(DQRR_CI);
+}
+
+static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+#endif
+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+ qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+#endif
+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+ qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
+ int park)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ ((park ? 1 : 0) << 6) | /* PK */
+ idx); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 idx = DQRR_PTR2IDX(dq);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+}
+
+static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+}
+
+static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ qm_cl_invalidate(DQRR_CI);
+ qm_cl_touch_ro(DQRR_CI);
+}
+
+static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
+}
+
+static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+#endif
+ return dqrr->ci;
+}
+
+static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+#endif
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ (1 << 6) | /* PK */
+ (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_park_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+#endif
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ (1 << 6) | /* PK */
+ DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(DQRR_SDQCR, sdqcr);
+}
+
+static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_SDQCR);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(DQRR_VDQCR, vdqcr);
+}
+
+static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_VDQCR);
+}
+
+static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ return dqrr->ithresh;
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(DQRR_ITR, ithresh);
+}
+
+static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
+{
+ return (qm_in(CFG) & 0x00f00000) >> 20;
+}
+
+/* -------------- */
+/* --- MR API --- */
+
+#define MR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
+
+static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
+}
+
+static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
+{
+ return MR_CARRYCLEAR(e + 1);
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ if (mr->ci != MR_PTR2IDX(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
+
+static inline u8 qm_mr_next(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->fill);
+ mr->cursor = MR_INC(mr->cursor);
+ return --mr->fill;
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+#endif
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+#endif
+ mr->ci = MR_PTR2IDX(mr->cursor);
+ qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(MR_ITR, ithresh);
+}
+
+/* ------------------------------ */
+/* --- Management command API --- */
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + QM_CL_CR;
+ mc->rr = portal->addr.ce + QM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+ QM_MCC_VERB_VBIT) ? 0 : 1;
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_idle;
+#endif
+ return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ mc->state = qman_mc_user;
+#endif
+ dcbz_64(mc->cr);
+ return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_mc *mc = &portal->mc;
+ struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_user);
+#endif
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+ struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_hw);
+#endif
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering.
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+/* Portal interrupt register API */
+static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
+{
+ qm_out(ITPR, iperiod);
+}
+
+static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
+{
+#if defined(RTE_ARCH_ARM64)
+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 6));
+#else
+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
+#endif
+}
+
+static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
+ u32 val)
+{
+#if defined(RTE_ARCH_ARM64)
+ __qm_out(&portal->addr, QM_REG_ISR + (n << 6), val);
+#else
+ __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
+#endif
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c
new file mode 100644
index 00000000..f6ecd6b2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <fsl_usd.h>
+#include <process.h>
+#include "qman_priv.h"
+#include <sys/ioctl.h>
+#include <rte_branch_prediction.h>
+
+/* Global variable containing revision id (even on non-control plane systems
+ * where CCSR isn't available).
+ */
+u16 qman_ip_rev;
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+u16 qm_channel_pme = QMAN_CHANNEL_PME;
+
+/* Ccsr map address to access ccsrbased register */
+static void *qman_ccsr_map;
+/* The qman clock frequency */
+static u32 qman_clk;
+
+static __thread int qmfd = -1;
+static __thread struct qm_portal_config qpcfg;
+static __thread struct dpaa_ioctl_portal_map map = {
+ .type = dpaa_portal_qman
+};
+
+static int fsl_qman_portal_init(uint32_t index, int is_shared)
+{
+ cpu_set_t cpuset;
+ struct qman_portal *portal;
+ int loop, ret;
+ struct dpaa_ioctl_irq_map irq_map;
+
+ /* Verify the thread's cpu-affinity */
+ ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+ &cpuset);
+ if (ret) {
+ error(0, ret, "pthread_getaffinity_np()");
+ return ret;
+ }
+ qpcfg.cpu = -1;
+ for (loop = 0; loop < CPU_SETSIZE; loop++)
+ if (CPU_ISSET(loop, &cpuset)) {
+ if (qpcfg.cpu != -1) {
+ pr_err("Thread is not affine to 1 cpu\n");
+ return -EINVAL;
+ }
+ qpcfg.cpu = loop;
+ }
+ if (qpcfg.cpu == -1) {
+ pr_err("Bug in getaffinity handling!\n");
+ return -EINVAL;
+ }
+
+ /* Allocate and map a qman portal */
+ map.index = index;
+ ret = process_portal_map(&map);
+ if (ret) {
+ error(0, ret, "process_portal_map()");
+ return ret;
+ }
+ qpcfg.channel = map.channel;
+ qpcfg.pools = map.pools;
+ qpcfg.index = map.index;
+
+ /* Make the portal's cache-[enabled|inhibited] regions */
+ qpcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
+ qpcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
+
+ qmfd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (qmfd == -1) {
+ pr_err("QMan irq init failed\n");
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+
+ qpcfg.is_shared = is_shared;
+ qpcfg.node = NULL;
+ qpcfg.irq = qmfd;
+
+ portal = qman_create_affine_portal(&qpcfg, NULL, 0);
+ if (!portal) {
+ pr_err("Qman portal initialisation failed (%d)\n",
+ qpcfg.cpu);
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+
+ irq_map.type = dpaa_portal_qman;
+ irq_map.portal_cinh = map.addr.cinh;
+ process_portal_irq_map(qmfd, &irq_map);
+ return 0;
+}
+
+static int fsl_qman_portal_finish(void)
+{
+ __maybe_unused const struct qm_portal_config *cfg;
+ int ret;
+
+ process_portal_irq_unmap(qmfd);
+
+ cfg = qman_destroy_affine_portal(NULL);
+ DPAA_BUG_ON(cfg != &qpcfg);
+ ret = process_portal_unmap(&map.addr);
+ if (ret)
+ error(0, ret, "process_portal_unmap()");
+ return ret;
+}
+
+int qman_thread_init(void)
+{
+ /* Convert from contiguous/virtual cpu numbering to real cpu when
+ * calling into the code that is dependent on the device naming.
+ */
+ return fsl_qman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
+}
+
+int qman_thread_finish(void)
+{
+ return fsl_qman_portal_finish();
+}
+
+void qman_thread_irq(void)
+{
+ qbman_invoke_irq(qpcfg.irq);
+
+ /* Now we need to uninhibit interrupts. This is the only code outside
+ * the regular portal driver that manipulates any portal register, so
+ * rather than breaking that encapsulation I am simply hard-coding the
+ * offset to the inhibit register here.
+ */
+ out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
+}
+
+struct qman_portal *fsl_qman_portal_create(void)
+{
+ cpu_set_t cpuset;
+ struct qman_portal *res;
+
+ struct qm_portal_config *q_pcfg;
+ int loop, ret;
+ struct dpaa_ioctl_irq_map irq_map;
+ struct dpaa_ioctl_portal_map q_map = {0};
+ int q_fd;
+
+ q_pcfg = kzalloc((sizeof(struct qm_portal_config)), 0);
+ if (!q_pcfg) {
+ error(0, -1, "q_pcfg kzalloc failed");
+ return NULL;
+ }
+
+ /* Verify the thread's cpu-affinity */
+ ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+ &cpuset);
+ if (ret) {
+ error(0, ret, "pthread_getaffinity_np()");
+ kfree(q_pcfg);
+ return NULL;
+ }
+
+ q_pcfg->cpu = -1;
+ for (loop = 0; loop < CPU_SETSIZE; loop++)
+ if (CPU_ISSET(loop, &cpuset)) {
+ if (q_pcfg->cpu != -1) {
+ pr_err("Thread is not affine to 1 cpu\n");
+ kfree(q_pcfg);
+ return NULL;
+ }
+ q_pcfg->cpu = loop;
+ }
+ if (q_pcfg->cpu == -1) {
+ pr_err("Bug in getaffinity handling!\n");
+ kfree(q_pcfg);
+ return NULL;
+ }
+
+ /* Allocate and map a qman portal */
+ q_map.type = dpaa_portal_qman;
+ q_map.index = QBMAN_ANY_PORTAL_IDX;
+ ret = process_portal_map(&q_map);
+ if (ret) {
+ error(0, ret, "process_portal_map()");
+ kfree(q_pcfg);
+ return NULL;
+ }
+ q_pcfg->channel = q_map.channel;
+ q_pcfg->pools = q_map.pools;
+ q_pcfg->index = q_map.index;
+
+ /* Make the portal's cache-[enabled|inhibited] regions */
+ q_pcfg->addr_virt[DPAA_PORTAL_CE] = q_map.addr.cena;
+ q_pcfg->addr_virt[DPAA_PORTAL_CI] = q_map.addr.cinh;
+
+ q_fd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (q_fd == -1) {
+ pr_err("QMan irq init failed\n");
+ goto err1;
+ }
+
+ q_pcfg->irq = q_fd;
+
+ res = qman_create_affine_portal(q_pcfg, NULL, true);
+ if (!res) {
+ pr_err("Qman portal initialisation failed (%d)\n",
+ q_pcfg->cpu);
+ goto err2;
+ }
+
+ irq_map.type = dpaa_portal_qman;
+ irq_map.portal_cinh = q_map.addr.cinh;
+ process_portal_irq_map(q_fd, &irq_map);
+
+ return res;
+err2:
+ close(q_fd);
+err1:
+ process_portal_unmap(&q_map.addr);
+ kfree(q_pcfg);
+ return NULL;
+}
+
+int fsl_qman_portal_destroy(struct qman_portal *qp)
+{
+ const struct qm_portal_config *cfg;
+ struct dpaa_portal_map addr;
+ int ret;
+
+ cfg = qman_destroy_affine_portal(qp);
+ kfree(qp);
+
+ process_portal_irq_unmap(cfg->irq);
+
+ addr.cena = cfg->addr_virt[DPAA_PORTAL_CE];
+ addr.cinh = cfg->addr_virt[DPAA_PORTAL_CI];
+
+ ret = process_portal_unmap(&addr);
+ if (ret)
+ pr_err("process_portal_unmap() (%d)\n", ret);
+
+ kfree((void *)cfg);
+
+ return ret;
+}
+
+int qman_global_init(void)
+{
+ const struct device_node *dt_node;
+ size_t lenp;
+ const u32 *chanid;
+ static int ccsr_map_fd;
+ const uint32_t *qman_addr;
+ uint64_t phys_addr;
+ uint64_t regs_size;
+ const u32 *clk;
+
+ static int done;
+
+ if (done)
+ return -EBUSY;
+
+ /* Use the device-tree to determine IP revision until something better
+ * is devised.
+ */
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman-portal");
+ if (!dt_node) {
+ pr_err("No qman portals available for any CPU\n");
+ return -ENODEV;
+ }
+ if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-1.0.0"))
+ pr_err("QMan rev1.0 on P4080 rev1 is not supported!\n");
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.1") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-1.1.0"))
+ qman_ip_rev = QMAN_REV11;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.2") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-1.2.0"))
+ qman_ip_rev = QMAN_REV12;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-2.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-2.0.0"))
+ qman_ip_rev = QMAN_REV20;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.1"))
+ qman_ip_rev = QMAN_REV30;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.1") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.2") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.3"))
+ qman_ip_rev = QMAN_REV31;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.1"))
+ qman_ip_rev = QMAN_REV32;
+ else
+ qman_ip_rev = QMAN_REV11;
+
+ if (!qman_ip_rev) {
+ pr_err("Unknown qman portal version\n");
+ return -ENODEV;
+ }
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+ qm_channel_pme = QMAN_CHANNEL_PME_REV3;
+ }
+
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,pool-channel-range");
+ if (!dt_node) {
+ pr_err("No qman pool channel range available\n");
+ return -ENODEV;
+ }
+ chanid = of_get_property(dt_node, "fsl,pool-channel-range", &lenp);
+ if (!chanid) {
+ pr_err("Can not get pool-channel-range property\n");
+ return -EINVAL;
+ }
+
+ /* get ccsr base */
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman");
+ if (!dt_node) {
+ pr_err("No qman device node available\n");
+ return -ENODEV;
+ }
+ qman_addr = of_get_address(dt_node, 0, &regs_size, NULL);
+ if (!qman_addr) {
+ pr_err("of_get_address cannot return qman address\n");
+ return -EINVAL;
+ }
+ phys_addr = of_translate_address(dt_node, qman_addr);
+ if (!phys_addr) {
+ pr_err("of_translate_address failed\n");
+ return -EINVAL;
+ }
+
+ ccsr_map_fd = open("/dev/mem", O_RDWR);
+ if (unlikely(ccsr_map_fd < 0)) {
+ pr_err("Can not open /dev/mem for qman ccsr map\n");
+ return ccsr_map_fd;
+ }
+
+ qman_ccsr_map = mmap(NULL, regs_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, ccsr_map_fd, phys_addr);
+ if (qman_ccsr_map == MAP_FAILED) {
+ pr_err("Can not map qman ccsr base\n");
+ return -EINVAL;
+ }
+
+ clk = of_get_property(dt_node, "clock-frequency", NULL);
+ if (!clk)
+ pr_warn("Can't find Qman clock frequency\n");
+ else
+ qman_clk = be32_to_cpu(*clk);
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ return qman_setup_fq_lookup_table(CONFIG_FSL_QMAN_FQ_LOOKUP_MAX);
+#endif
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_priv.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_priv.h
new file mode 100644
index 00000000..02f6301f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_priv.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __QMAN_PRIV_H
+#define __QMAN_PRIV_H
+
+#include "dpaa_sys.h"
+#include <fsl_qman.h>
+
+/* Congestion Groups */
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+struct qman_cgrs {
+ struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
+{
+ return QM_MCR_QUERYCONGESTION(&c->q, num);
+}
+
+static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
+{
+ c->q.state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
+}
+
+static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
+{
+ c->q.state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
+}
+
+static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
+{
+ while ((++num < (int)__CGR_NUM) && !qman_cgrs_get(c, num))
+ ;
+ return num;
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+ const struct qman_cgrs *src)
+{
+ memcpy(dest, src, sizeof(*dest));
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+ const struct qman_cgrs *a,
+ const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) & *(_b++);
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+ const struct qman_cgrs *a,
+ const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) ^ *(_b++);
+}
+
+/* used by CCSR and portal interrupt code */
+enum qm_isr_reg {
+ qm_isr_status = 0,
+ qm_isr_enable = 1,
+ qm_isr_disable = 2,
+ qm_isr_inhibit = 3
+};
+
+struct qm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ struct device_node *node;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* If the caller enables DQRR stashing (and thus wishes to operate the
+ * portal from only one cpu), this is the logical CPU that the portal
+ * will stash to. Whether stashing is enabled or not, this setting is
+ * also used for any "core-affine" portals, ie. default portals
+ * associated to the corresponding cpu. -1 implies that there is no
+ * core affinity configured.
+ */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /* the unique index of this portal */
+ u32 index;
+ /* Is this portal shared? (If so, it has coarser locking and demuxes
+ * processing on behalf of other CPUs.).
+ */
+ int is_shared;
+ /* The portal's dedicated channel id, use this value for initialising
+ * frame queues to target this portal when scheduled.
+ */
+ u16 channel;
+ /* A mask of which pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask).
+ */
+ u32 pools;
+
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+#define QMAN_REV32 0x0302
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+int qm_set_wpm(int wpm);
+int qm_get_wpm(int *wpm);
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs,
+ int alloc);
+const struct qm_portal_config *
+qman_destroy_affine_portal(struct qman_portal *q);
+
+struct qm_portal_config *qm_get_unused_portal(void);
+struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
+
+void qm_put_unused_portal(struct qm_portal_config *pcfg);
+void qm_set_liodns(struct qm_portal_config *pcfg);
+
+/* This CGR feature is supported by h/w and required by unit-tests and the
+ * debugfs hooks, so is implemented in the driver. However it allows an explicit
+ * corruption of h/w fields by s/w that are usually incorruptible (because the
+ * counters are usually maintained entirely within h/w). As such, we declare
+ * this API internally.
+ */
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+ struct qm_mcr_cgrtestwrite *result);
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+/* If the fq object pointer is greater than the size of context_b field,
+ * than a lookup table is required.
+ */
+int qman_setup_fq_lookup_table(size_t num_entries);
+#endif
+
+/* QMan s/w corenet portal, low-level i/face */
+
+/*
+ * For Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SOURCE == SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS 0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
+#define QM_SDQCR_COUNT_EXACT1 0x0
+#define QM_SDQCR_COUNT_UPTO3 0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_SDQCR_TYPE_MASK 0x03000000
+#define QM_SDQCR_TYPE_NULL 0x0
+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
+
+#define QM_VDQCR_FQID_MASK 0x00ffffff
+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
+
+#define QM_EQCR_VERB_VBIT 0x80
+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
+#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */
+#define QM_EQCR_VERB_COLOUR_GREEN 0x00
+#define QM_EQCR_VERB_COLOUR_YELLOW 0x08
+#define QM_EQCR_VERB_COLOUR_RED 0x10
+#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18
+#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */
+#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */
+#define QM_EQCR_DCA_ENABLE 0x80
+#define QM_EQCR_DCA_PARK 0x40
+#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */
+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
+#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */
+
+#define QM_MCC_VERB_VBIT 0x80
+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED 0x40
+#define QM_MCC_VERB_INITFQ_SCHED 0x41
+#define QM_MCC_VERB_QUERYFQ 0x44
+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ 0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR 0x50
+#define QM_MCC_VERB_MODIFYCGR 0x51
+#define QM_MCC_VERB_CGRTESTWRITE 0x52
+#define QM_MCC_VERB_QUERYCGR 0x58
+#define QM_MCC_VERB_QUERYCONGESTION 0x59
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL 0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK 0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write".
+ */
+#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
+#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
+#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
+#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
+#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
+#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
+/* TODO: unfortunate name-clash here, reword? */
+#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
+#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
+
+#define QMAN_PORTAL_IRQ_PATH "/dev/fsl-usdpaa-irq"
+
+#endif /* _QMAN_PRIV_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/dpaa_bus.c b/src/spdk/dpdk/drivers/bus/dpaa/dpaa_bus.c
new file mode 100644
index 00000000..16fabd1b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/dpaa_bus.c
@@ -0,0 +1,646 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_bus.h>
+#include <rte_mbuf_pool_ops.h>
+
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+int dpaa_logtype_bus;
+int dpaa_logtype_mempool;
+int dpaa_logtype_pmd;
+int dpaa_logtype_eventdev;
+
+struct rte_dpaa_bus rte_dpaa_bus;
+struct netcfg_info *dpaa_netcfg;
+
+/* define a variable to hold the portal_key, once created.*/
+static pthread_key_t dpaa_portal_key;
+
+unsigned int dpaa_svr_family;
+
+#define FSL_DPAA_BUS_NAME dpaa_bus
+
+RTE_DEFINE_PER_LCORE(bool, dpaa_io);
+RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
+
+static int
+compare_dpaa_devices(struct rte_dpaa_device *dev1,
+ struct rte_dpaa_device *dev2)
+{
+ int comp = 0;
+
+ /* Segragating ETH from SEC devices */
+ if (dev1->device_type > dev2->device_type)
+ comp = 1;
+ else if (dev1->device_type < dev2->device_type)
+ comp = -1;
+ else
+ comp = 0;
+
+ if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
+ return comp;
+
+ if (dev1->id.fman_id > dev2->id.fman_id) {
+ comp = 1;
+ } else if (dev1->id.fman_id < dev2->id.fman_id) {
+ comp = -1;
+ } else {
+ /* FMAN ids match, check for mac_id */
+ if (dev1->id.mac_id > dev2->id.mac_id)
+ comp = 1;
+ else if (dev1->id.mac_id < dev2->id.mac_id)
+ comp = -1;
+ else
+ comp = 0;
+ }
+
+ return comp;
+}
+
+static inline void
+dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
+{
+ int comp, inserted = 0;
+ struct rte_dpaa_device *dev = NULL;
+ struct rte_dpaa_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+ comp = compare_dpaa_devices(newdev, dev);
+ if (comp < 0) {
+ TAILQ_INSERT_BEFORE(dev, newdev, next);
+ inserted = 1;
+ break;
+ }
+ }
+
+ if (!inserted)
+ TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
+}
+
+/*
+ * Reads the SEC device from DTS
+ * Returns -1 if SEC devices not available, 0 otherwise
+ */
+static inline int
+dpaa_sec_available(void)
+{
+ const struct device_node *caam_node;
+
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ return 0;
+ }
+
+ return -1;
+}
+
+static void dpaa_clean_device_list(void);
+
+static struct rte_devargs *
+dpaa_devargs_lookup(struct rte_dpaa_device *dev)
+{
+ struct rte_devargs *devargs;
+ char dev_name[32];
+
+ RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) {
+ devargs->bus->parse(devargs->name, &dev_name);
+ if (strcmp(dev_name, dev->device.name) == 0) {
+ DPAA_BUS_INFO("**Devargs matched %s", dev_name);
+ return devargs;
+ }
+ }
+ return NULL;
+}
+
+static int
+dpaa_create_device_list(void)
+{
+ int i;
+ int ret;
+ struct rte_dpaa_device *dev;
+ struct fm_eth_port_cfg *cfg;
+ struct fman_if *fman_intf;
+
+ /* Creating Ethernet Devices */
+ for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
+ dev = calloc(1, sizeof(struct rte_dpaa_device));
+ if (!dev) {
+ DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cfg = &dpaa_netcfg->port_cfg[i];
+ fman_intf = cfg->fman_if;
+
+ /* Device identifiers */
+ dev->id.fman_id = fman_intf->fman_idx + 1;
+ dev->id.mac_id = fman_intf->mac_idx;
+ dev->device_type = FSL_DPAA_ETH;
+ dev->id.dev_id = i;
+
+ /* Create device name */
+ memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
+ sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
+ fman_intf->mac_idx);
+ DPAA_BUS_LOG(INFO, "%s netdev added", dev->name);
+ dev->device.name = dev->name;
+ dev->device.devargs = dpaa_devargs_lookup(dev);
+
+ dpaa_add_to_device_list(dev);
+ }
+
+ rte_dpaa_bus.device_count = i;
+
+ /* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
+ * constantly created only if "sec" property is found in the device
+ * tree. Logically there is no limit for number of devices (QI
+ * interfaces) that can be created.
+ */
+
+ if (dpaa_sec_available()) {
+ DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
+ return 0;
+ }
+
+ /* Creating SEC Devices */
+ for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
+ dev = calloc(1, sizeof(struct rte_dpaa_device));
+ if (!dev) {
+ DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
+ ret = -1;
+ goto cleanup;
+ }
+
+ dev->device_type = FSL_DPAA_CRYPTO;
+ dev->id.dev_id = rte_dpaa_bus.device_count + i;
+
+ /* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
+ * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
+ * allocated for dev->name/
+ */
+ memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
+ sprintf(dev->name, "dpaa-sec%d", i);
+ DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name);
+ dev->device.name = dev->name;
+ dev->device.devargs = dpaa_devargs_lookup(dev);
+
+ dpaa_add_to_device_list(dev);
+ }
+
+ rte_dpaa_bus.device_count += i;
+
+ return 0;
+
+cleanup:
+ dpaa_clean_device_list();
+ return ret;
+}
+
+static void
+dpaa_clean_device_list(void)
+{
+ struct rte_dpaa_device *dev = NULL;
+ struct rte_dpaa_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+ TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ }
+}
+
+int rte_dpaa_portal_init(void *arg)
+{
+ cpu_set_t cpuset;
+ pthread_t id;
+ uint32_t cpu = rte_lcore_id();
+ int ret;
+ struct dpaa_portal *dpaa_io_portal;
+
+ BUS_INIT_FUNC_TRACE();
+
+ if ((size_t)arg == 1 || cpu == LCORE_ID_ANY)
+ cpu = rte_get_master_lcore();
+ /* if the core id is not supported */
+ else
+ if (cpu >= RTE_MAX_LCORE)
+ return -1;
+
+ /* Set CPU affinity for this thread */
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ id = pthread_self();
+ ret = pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "pthread_setaffinity_np failed on "
+ "core :%d with ret: %d", cpu, ret);
+ return ret;
+ }
+
+ /* Initialise bman thread portals */
+ ret = bman_thread_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
+ "core %d with ret: %d", cpu, ret);
+ return ret;
+ }
+
+ DPAA_BUS_LOG(DEBUG, "BMAN thread initialized");
+
+ /* Initialise qman thread portals */
+ ret = qman_thread_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
+ "core %d with ret: %d", cpu, ret);
+ bman_thread_finish();
+ return ret;
+ }
+
+ DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
+
+ dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpaa_io_portal) {
+ DPAA_BUS_LOG(ERR, "Unable to allocate memory");
+ bman_thread_finish();
+ qman_thread_finish();
+ return -ENOMEM;
+ }
+
+ dpaa_io_portal->qman_idx = qman_get_portal_index();
+ dpaa_io_portal->bman_idx = bman_get_portal_index();
+ dpaa_io_portal->tid = syscall(SYS_gettid);
+
+ ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "pthread_setspecific failed on "
+ "core %d with ret: %d", cpu, ret);
+ dpaa_portal_finish(NULL);
+
+ return ret;
+ }
+
+ RTE_PER_LCORE(dpaa_io) = true;
+
+ DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
+
+ return 0;
+}
+
+int
+rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
+{
+ /* Affine above created portal with channel*/
+ u32 sdqcr;
+ struct qman_portal *qp;
+ int ret;
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init(arg);
+ if (ret < 0) {
+ DPAA_BUS_LOG(ERR, "portal initialization failure");
+ return ret;
+ }
+ }
+
+ /* Initialise qman specific portals */
+ qp = fsl_qman_portal_create();
+ if (!qp) {
+ DPAA_BUS_LOG(ERR, "Unable to alloc fq portal");
+ return -1;
+ }
+ fq->qp = qp;
+ sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
+ qman_static_dequeue_add(sdqcr, qp);
+
+ return 0;
+}
+
+int rte_dpaa_portal_fq_close(struct qman_fq *fq)
+{
+ return fsl_qman_portal_destroy(fq->qp);
+}
+
+void
+dpaa_portal_finish(void *arg)
+{
+ struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
+
+ if (!dpaa_io_portal) {
+ DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
+ return;
+ }
+
+ bman_thread_finish();
+ qman_thread_finish();
+
+ pthread_setspecific(dpaa_portal_key, NULL);
+
+ rte_free(dpaa_io_portal);
+ dpaa_io_portal = NULL;
+
+ RTE_PER_LCORE(dpaa_io) = false;
+}
+
+static int
+rte_dpaa_bus_parse(const char *name, void *out_name)
+{
+ int i, j;
+ int max_fman = 2, max_macs = 16;
+ char *sep = strchr(name, ':');
+
+ if (strncmp(name, RTE_STR(FSL_DPAA_BUS_NAME),
+ strlen(RTE_STR(FSL_DPAA_BUS_NAME)))) {
+ return -EINVAL;
+ }
+
+ if (!sep) {
+ DPAA_BUS_ERR("Incorrect device name observed");
+ return -EINVAL;
+ }
+
+ sep = (char *) (sep + 1);
+
+ for (i = 0; i < max_fman; i++) {
+ for (j = 0; j < max_macs; j++) {
+ char fm_name[16];
+ snprintf(fm_name, 16, "fm%d-mac%d", i, j);
+ if (strcmp(fm_name, sep) == 0) {
+ if (out_name)
+ strcpy(out_name, sep);
+ return 0;
+ }
+ }
+ }
+
+ for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
+ char sec_name[16];
+
+ snprintf(sec_name, 16, "dpaa-sec%d", i);
+ if (strcmp(sec_name, sep) == 0) {
+ if (out_name)
+ strcpy(out_name, sep);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+#define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
+#define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
+
+static int
+rte_dpaa_bus_scan(void)
+{
+ int ret;
+
+ BUS_INIT_FUNC_TRACE();
+
+ if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
+ (access(DPAA_DEV_PATH2, F_OK) != 0)) {
+ RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
+ return 0;
+ }
+
+ /* Load the device-tree driver */
+ ret = of_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
+ return -1;
+ }
+
+ /* Get the interface configurations from device-tree */
+ dpaa_netcfg = netcfg_acquire();
+ if (!dpaa_netcfg) {
+ DPAA_BUS_LOG(ERR, "netcfg_acquire failed");
+ return -EINVAL;
+ }
+
+ RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
+
+ if (!dpaa_netcfg->num_ethports) {
+ DPAA_BUS_LOG(INFO, "no network interfaces available");
+ /* This is not an error */
+ return 0;
+ }
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+ dump_netcfg(dpaa_netcfg);
+#endif
+
+ DPAA_BUS_LOG(DEBUG, "Number of ethernet devices = %d",
+ dpaa_netcfg->num_ethports);
+ ret = dpaa_create_device_list();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
+ return ret;
+ }
+
+ /* create the key, supplying a function that'll be invoked
+ * when a portal affined thread will be deleted.
+ */
+ ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
+ if (ret) {
+ DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
+ dpaa_clean_device_list();
+ return ret;
+ }
+
+ return 0;
+}
+
+/* register a dpaa bus based dpaa driver */
+void
+rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
+{
+ RTE_VERIFY(driver);
+
+ BUS_INIT_FUNC_TRACE();
+
+ TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
+ /* Update Bus references */
+ driver->dpaa_bus = &rte_dpaa_bus;
+}
+
+/* un-register a dpaa bus based dpaa driver */
+void
+rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
+{
+ struct rte_dpaa_bus *dpaa_bus;
+
+ BUS_INIT_FUNC_TRACE();
+
+ dpaa_bus = driver->dpaa_bus;
+
+ TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
+ /* Update Bus references */
+ driver->dpaa_bus = NULL;
+}
+
+static int
+rte_dpaa_device_match(struct rte_dpaa_driver *drv,
+ struct rte_dpaa_device *dev)
+{
+ if (!drv || !dev) {
+ DPAA_BUS_DEBUG("Invalid drv or dev received.");
+ return -1;
+ }
+
+ if (drv->drv_type == dev->device_type)
+ return 0;
+
+ return -1;
+}
+
+static int
+rte_dpaa_bus_probe(void)
+{
+ int ret = -1;
+ struct rte_dpaa_device *dev;
+ struct rte_dpaa_driver *drv;
+ FILE *svr_file = NULL;
+ unsigned int svr_ver;
+ int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
+
+ svr_file = fopen(DPAA_SOC_ID_FILE, "r");
+ if (svr_file) {
+ if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
+ dpaa_svr_family = svr_ver & SVR_MASK;
+ fclose(svr_file);
+ }
+
+ /* For each registered driver, and device, call the driver->probe */
+ TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
+ TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
+ ret = rte_dpaa_device_match(drv, dev);
+ if (ret)
+ continue;
+
+ if (!drv->probe ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLACKLISTED))
+ continue;
+
+ if (probe_all ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy ==
+ RTE_DEV_WHITELISTED)) {
+ ret = drv->probe(drv, dev);
+ if (ret)
+ DPAA_BUS_ERR("Unable to probe.\n");
+ }
+ break;
+ }
+ }
+
+ /* Register DPAA mempool ops only if any DPAA device has
+ * been detected.
+ */
+ if (!TAILQ_EMPTY(&rte_dpaa_bus.device_list))
+ rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
+
+ return 0;
+}
+
+static struct rte_device *
+rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ struct rte_dpaa_device *dev;
+
+ TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
+ if (start && &dev->device == start) {
+ start = NULL; /* starting point found */
+ continue;
+ }
+
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ }
+
+ return NULL;
+}
+
+/*
+ * Get iommu class of DPAA2 devices on the bus.
+ */
+static enum rte_iova_mode
+rte_dpaa_get_iommu_class(void)
+{
+ if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
+ (access(DPAA_DEV_PATH2, F_OK) != 0)) {
+ return RTE_IOVA_DC;
+ }
+ return RTE_IOVA_PA;
+}
+
+struct rte_dpaa_bus rte_dpaa_bus = {
+ .bus = {
+ .scan = rte_dpaa_bus_scan,
+ .probe = rte_dpaa_bus_probe,
+ .parse = rte_dpaa_bus_parse,
+ .find_device = rte_dpaa_find_device,
+ .get_iommu_class = rte_dpaa_get_iommu_class,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
+ .device_count = 0,
+};
+
+RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
+
+RTE_INIT(dpaa_init_log)
+{
+ dpaa_logtype_bus = rte_log_register("bus.dpaa");
+ if (dpaa_logtype_bus >= 0)
+ rte_log_set_level(dpaa_logtype_bus, RTE_LOG_NOTICE);
+
+ dpaa_logtype_mempool = rte_log_register("mempool.dpaa");
+ if (dpaa_logtype_mempool >= 0)
+ rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
+
+ dpaa_logtype_pmd = rte_log_register("pmd.net.dpaa");
+ if (dpaa_logtype_pmd >= 0)
+ rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);
+
+ dpaa_logtype_eventdev = rte_log_register("pmd.event.dpaa");
+ if (dpaa_logtype_eventdev >= 0)
+ rte_log_set_level(dpaa_logtype_eventdev, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/compat.h b/src/spdk/dpdk/drivers/bus/dpaa/include/compat.h
new file mode 100644
index 00000000..92241d23
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/compat.h
@@ -0,0 +1,384 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __COMPAT_H
+#define __COMPAT_H
+
+#include <sched.h>
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdint.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <pthread.h>
+#include <linux/types.h>
+#include <stdbool.h>
+#include <ctype.h>
+#include <malloc.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <limits.h>
+#include <assert.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include <error.h>
+#include <rte_byteorder.h>
+#include <rte_atomic.h>
+#include <rte_spinlock.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+
+/* The following definitions are primarily to allow the single-source driver
+ * interfaces to be included by arbitrary program code. Ie. for interfaces that
+ * are also available in kernel-space, these definitions provide compatibility
+ * with certain attributes and types used in those interfaces.
+ */
+
+/* Required compiler attributes */
+#ifndef __maybe_unused
+#define __maybe_unused __rte_unused
+#endif
+#ifndef __always_unused
+#define __always_unused __rte_unused
+#endif
+#ifndef __packed
+#define __packed __rte_packed
+#endif
+#define noinline __attribute__((noinline))
+
+#define L1_CACHE_BYTES 64
+#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+#define __stringify_1(x) #x
+#define __stringify(x) __stringify_1(x)
+
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#endif
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+/* Debugging */
+#define prflush(fmt, args...) \
+ do { \
+ printf(fmt, ##args); \
+ fflush(stdout); \
+ } while (0)
+
+#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
+#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
+#define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
+#define pr_info(fmt, args...) prflush(fmt, ##args)
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
+#ifdef pr_debug
+#undef pr_debug
+#endif
+#define pr_debug(fmt, args...) printf(fmt, ##args)
+#else
+#define pr_debug(fmt, args...) {}
+#endif
+
+#define DPAA_BUG_ON(x) RTE_ASSERT(x)
+
+/* Required types */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef uint64_t dma_addr_t;
+typedef cpu_set_t cpumask_t;
+typedef uint32_t phandle;
+typedef uint32_t gfp_t;
+typedef uint32_t irqreturn_t;
+
+#define IRQ_HANDLED 0
+#define request_irq qbman_request_irq
+#define free_irq qbman_free_irq
+
+#define __iomem
+#define GFP_KERNEL 0
+#define __raw_readb(p) (*(const volatile unsigned char *)(p))
+#define __raw_readl(p) (*(const volatile unsigned int *)(p))
+#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
+
+/* to be used as an upper-limit only */
+#define NR_CPUS 64
+
+/* Waitqueue stuff */
+typedef struct { } wait_queue_head_t;
+#define DECLARE_WAIT_QUEUE_HEAD(x) int dummy_##x __always_unused
+#define wake_up(x) do { } while (0)
+
+/* I/O operations */
+static inline u32 in_be32(volatile void *__p)
+{
+ volatile u32 *p = __p;
+ return rte_be_to_cpu_32(*p);
+}
+
+static inline void out_be32(volatile void *__p, u32 val)
+{
+ volatile u32 *p = __p;
+ *p = rte_cpu_to_be_32(val);
+}
+
+#define hwsync() rte_rmb()
+#define lwsync() rte_wmb()
+
+#define dcbt_ro(p) __builtin_prefetch(p, 0)
+#define dcbt_rw(p) __builtin_prefetch(p, 1)
+
+#if defined(RTE_ARCH_ARM64)
+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
+#define dcbz_64(p) dcbz(p)
+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
+
+#define dcbit_ro(p) \
+ do { \
+ dccivac(p); \
+ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); \
+ } while (0)
+
+#elif defined(RTE_ARCH_ARM)
+#define dcbz(p) memset((p), 0, 32)
+#define dcbz_64(p) memset((p), 0, 64)
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define dcbit_ro(p) RTE_SET_USED(p)
+
+#else
+#define dcbz(p) RTE_SET_USED(p)
+#define dcbz_64(p) dcbz(p)
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define dcbit_ro(p) RTE_SET_USED(p)
+#endif
+
+#define barrier() { asm volatile ("" : : : "memory"); }
+#define cpu_relax barrier
+
+#if defined(RTE_ARCH_ARM64)
+static inline uint64_t mfatb(void)
+{
+ uint64_t ret, ret_new, timeout = 200;
+
+ asm volatile ("mrs %0, cntvct_el0" : "=r" (ret));
+ asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
+ while (ret != ret_new && timeout--) {
+ ret = ret_new;
+ asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
+ }
+ DPAA_BUG_ON(!timeout && (ret != ret_new));
+ return ret * 64;
+}
+#else
+
+#define mfatb rte_rdtsc
+
+#endif
+
+/* Spin for a few cycles without bothering the bus */
+static inline void cpu_spin(int cycles)
+{
+ uint64_t now = mfatb();
+
+ while (mfatb() < (now + cycles))
+ ;
+}
+
+/* Qman/Bman API inlines and macros; */
+#ifdef lower_32_bits
+#undef lower_32_bits
+#endif
+#define lower_32_bits(x) ((u32)(x))
+
+#ifdef upper_32_bits
+#undef upper_32_bits
+#endif
+#define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
+
+/*
+ * Swap bytes of a 48-bit value.
+ */
+static inline uint64_t
+__bswap_48(uint64_t x)
+{
+ return ((x & 0x0000000000ffULL) << 40) |
+ ((x & 0x00000000ff00ULL) << 24) |
+ ((x & 0x000000ff0000ULL) << 8) |
+ ((x & 0x0000ff000000ULL) >> 8) |
+ ((x & 0x00ff00000000ULL) >> 24) |
+ ((x & 0xff0000000000ULL) >> 40);
+}
+
+/*
+ * Swap bytes of a 40-bit value.
+ */
+static inline uint64_t
+__bswap_40(uint64_t x)
+{
+ return ((x & 0x00000000ffULL) << 32) |
+ ((x & 0x000000ff00ULL) << 16) |
+ ((x & 0x0000ff0000ULL)) |
+ ((x & 0x00ff000000ULL) >> 16) |
+ ((x & 0xff00000000ULL) >> 32);
+}
+
+/*
+ * Swap bytes of a 24-bit value.
+ */
+static inline uint32_t
+__bswap_24(uint32_t x)
+{
+ return ((x & 0x0000ffULL) << 16) |
+ ((x & 0x00ff00ULL)) |
+ ((x & 0xff0000ULL) >> 16);
+}
+
+#define be64_to_cpu(x) rte_be_to_cpu_64(x)
+#define be32_to_cpu(x) rte_be_to_cpu_32(x)
+#define be16_to_cpu(x) rte_be_to_cpu_16(x)
+
+#define cpu_to_be64(x) rte_cpu_to_be_64(x)
+#define cpu_to_be32(x) rte_cpu_to_be_32(x)
+#define cpu_to_be16(x) rte_cpu_to_be_16(x)
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define cpu_to_be48(x) __bswap_48(x)
+#define be48_to_cpu(x) __bswap_48(x)
+
+#define cpu_to_be40(x) __bswap_40(x)
+#define be40_to_cpu(x) __bswap_40(x)
+
+#define cpu_to_be24(x) __bswap_24(x)
+#define be24_to_cpu(x) __bswap_24(x)
+
+#else /* RTE_BIG_ENDIAN */
+
+#define cpu_to_be48(x) (x)
+#define be48_to_cpu(x) (x)
+
+#define cpu_to_be40(x) (x)
+#define be40_to_cpu(x) (x)
+
+#define cpu_to_be24(x) (x)
+#define be24_to_cpu(x) (x)
+
+#endif /* RTE_BIG_ENDIAN */
+
+/* When copying aligned words or shorts, try to avoid memcpy() */
+/* memcpy() stuff - when you know alignments in advance */
+#define CONFIG_TRY_BETTER_MEMCPY
+
+#ifdef CONFIG_TRY_BETTER_MEMCPY
+static inline void copy_words(void *dest, const void *src, size_t sz)
+{
+ u32 *__dest = dest;
+ const u32 *__src = src;
+ size_t __sz = sz >> 2;
+
+ DPAA_BUG_ON((unsigned long)dest & 0x3);
+ DPAA_BUG_ON((unsigned long)src & 0x3);
+ DPAA_BUG_ON(sz & 0x3);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+
+static inline void copy_shorts(void *dest, const void *src, size_t sz)
+{
+ u16 *__dest = dest;
+ const u16 *__src = src;
+ size_t __sz = sz >> 1;
+
+ DPAA_BUG_ON((unsigned long)dest & 0x1);
+ DPAA_BUG_ON((unsigned long)src & 0x1);
+ DPAA_BUG_ON(sz & 0x1);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+
+static inline void copy_bytes(void *dest, const void *src, size_t sz)
+{
+ u8 *__dest = dest;
+ const u8 *__src = src;
+
+ while (sz--)
+ *(__dest++) = *(__src++);
+}
+#else
+#define copy_words memcpy
+#define copy_shorts memcpy
+#define copy_bytes memcpy
+#endif
+
+/* Allocator stuff */
+#define kmalloc(sz, t) malloc(sz)
+#define vmalloc(sz) malloc(sz)
+#define kfree(p) { if (p) free(p); }
+static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused)
+{
+ void *ptr = malloc(sz);
+
+ if (ptr)
+ memset(ptr, 0, sz);
+ return ptr;
+}
+
+static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)
+{
+ void *p;
+
+ if (posix_memalign(&p, 4096, 4096))
+ return 0;
+ memset(p, 0, 4096);
+ return (unsigned long)p;
+}
+
+/* Spinlock stuff */
+#define spinlock_t rte_spinlock_t
+#define __SPIN_LOCK_UNLOCKED(x) RTE_SPINLOCK_INITIALIZER
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+#define spin_lock_init(x) rte_spinlock_init(x)
+#define spin_lock_destroy(x)
+#define spin_lock(x) rte_spinlock_lock(x)
+#define spin_unlock(x) rte_spinlock_unlock(x)
+#define spin_lock_irq(x) spin_lock(x)
+#define spin_unlock_irq(x) spin_unlock(x)
+#define spin_lock_irqsave(x, f) spin_lock_irq(x)
+#define spin_unlock_irqrestore(x, f) spin_unlock_irq(x)
+
+#define atomic_t rte_atomic32_t
+#define atomic_read(v) rte_atomic32_read(v)
+#define atomic_set(v, i) rte_atomic32_set(v, i)
+
+#define atomic_inc(v) rte_atomic32_add(v, 1)
+#define atomic_dec(v) rte_atomic32_sub(v, 1)
+
+#define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v)
+#define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v)
+
+#define atomic_inc_return(v) rte_atomic32_add_return(v, 1)
+#define atomic_dec_return(v) rte_atomic32_sub_return(v, 1)
+#define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)
+
+#include <dpaa_list.h>
+#include <dpaa_bits.h>
+
+#endif /* __COMPAT_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_bits.h b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_bits.h
new file mode 100644
index 00000000..9bc14d0c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_bits.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA_BITS_H
+#define __DPAA_BITS_H
+
+/* Bitfield stuff. */
+#define BITS_PER_ULONG (sizeof(unsigned long) << 3)
+#define SHIFT_PER_ULONG (((1 << 5) == BITS_PER_ULONG) ? 5 : 6)
+#define BITS_MASK(idx) (1UL << ((idx) & (BITS_PER_ULONG - 1)))
+#define BITS_IDX(idx) ((idx) >> SHIFT_PER_ULONG)
+
+static inline void dpaa_set_bits(unsigned long mask,
+ volatile unsigned long *p)
+{
+ *p |= mask;
+}
+
+static inline void dpaa_set_bit(int idx, volatile unsigned long *bits)
+{
+ dpaa_set_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
+}
+
+static inline void dpaa_clear_bits(unsigned long mask,
+ volatile unsigned long *p)
+{
+ *p &= ~mask;
+}
+
+static inline void dpaa_clear_bit(int idx,
+ volatile unsigned long *bits)
+{
+ dpaa_clear_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
+}
+
+#endif /* __DPAA_BITS_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_list.h b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_list.h
new file mode 100644
index 00000000..e9457598
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_list.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA_LIST_H
+#define __DPAA_LIST_H
+
+/****************/
+/* Linked-lists */
+/****************/
+
+struct list_head {
+ struct list_head *prev;
+ struct list_head *next;
+};
+
+#define COMPAT_LIST_HEAD(n) \
+struct list_head n = { \
+ .prev = &n, \
+ .next = &n \
+}
+
+#define INIT_LIST_HEAD(p) \
+do { \
+ struct list_head *__p298 = (p); \
+ __p298->next = __p298; \
+ __p298->prev = __p298->next; \
+} while (0)
+#define list_entry(node, type, member) \
+ (type *)((void *)node - offsetof(type, member))
+#define list_empty(p) \
+({ \
+ const struct list_head *__p298 = (p); \
+ ((__p298->next == __p298) && (__p298->prev == __p298)); \
+})
+#define list_add(p, l) \
+do { \
+ struct list_head *__p298 = (p); \
+ struct list_head *__l298 = (l); \
+ __p298->next = __l298->next; \
+ __p298->prev = __l298; \
+ __l298->next->prev = __p298; \
+ __l298->next = __p298; \
+} while (0)
+#define list_add_tail(p, l) \
+do { \
+ struct list_head *__p298 = (p); \
+ struct list_head *__l298 = (l); \
+ __p298->prev = __l298->prev; \
+ __p298->next = __l298; \
+ __l298->prev->next = __p298; \
+ __l298->prev = __p298; \
+} while (0)
+#define list_for_each(i, l) \
+ for (i = (l)->next; i != (l); i = i->next)
+#define list_for_each_safe(i, j, l) \
+ for (i = (l)->next, j = i->next; i != (l); \
+ i = j, j = i->next)
+#define list_for_each_entry(i, l, name) \
+ for (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \
+ i = list_entry(i->name.next, typeof(*i), name))
+#define list_for_each_entry_safe(i, j, l, name) \
+ for (i = list_entry((l)->next, typeof(*i), name), \
+ j = list_entry(i->name.next, typeof(*j), name); \
+ &i->name != (l); \
+ i = j, j = list_entry(j->name.next, typeof(*j), name))
+#define list_del(i) \
+do { \
+ (i)->next->prev = (i)->prev; \
+ (i)->prev->next = (i)->next; \
+} while (0)
+
+#endif /* __DPAA_LIST_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_rbtree.h b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_rbtree.h
new file mode 100644
index 00000000..6c237e70
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_rbtree.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA_RBTREE_H
+#define __DPAA_RBTREE_H
+
+#include <rte_common.h>
+/************/
+/* RB-trees */
+/************/
+
+/* Linux has a good RB-tree implementation, that we can't use (GPL). It also has
+ * a flat/hooked-in interface that virtually requires license-contamination in
+ * order to write a caller-compatible implementation. Instead, I've created an
+ * RB-tree encapsulation on top of linux's primitives (it does some of the work
+ * the client logic would normally do), and this gives us something we can
+ * reimplement on LWE. Unfortunately there's no good+free RB-tree
+ * implementations out there that are license-compatible and "flat" (ie. no
+ * dynamic allocation). I did find a malloc-based one that I could convert, but
+ * that will be a task for later on. For now, LWE's RB-tree is implemented using
+ * an ordered linked-list.
+ *
+ * Note, the only linux-esque type is "struct rb_node", because it's used
+ * statically in the exported header, so it can't be opaque. Our version doesn't
+ * include a "rb_parent_color" field because we're doing linked-list instead of
+ * a true rb-tree.
+ */
+
+struct rb_node {
+ struct rb_node *prev, *next;
+};
+
+struct dpa_rbtree {
+ struct rb_node *head, *tail;
+};
+
+#define DPAA_RBTREE { NULL, NULL }
+static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
+{
+ tree->head = tree->tail = NULL;
+}
+
+#define QMAN_NODE2OBJ(ptr, type, node_field) \
+ (type *)((char *)ptr - offsetof(type, node_field))
+
+#define IMPLEMENT_DPAA_RBTREE(name, type, node_field, val_field) \
+static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
+{ \
+ struct rb_node *node = tree->head; \
+ if (!node) { \
+ tree->head = tree->tail = &obj->node_field; \
+ obj->node_field.prev = obj->node_field.next = NULL; \
+ return 0; \
+ } \
+ while (node) { \
+ type *item = QMAN_NODE2OBJ(node, type, node_field); \
+ if (obj->val_field == item->val_field) \
+ return -EBUSY; \
+ if (obj->val_field < item->val_field) { \
+ if (tree->head == node) \
+ tree->head = &obj->node_field; \
+ else \
+ node->prev->next = &obj->node_field; \
+ obj->node_field.prev = node->prev; \
+ obj->node_field.next = node; \
+ node->prev = &obj->node_field; \
+ return 0; \
+ } \
+ node = node->next; \
+ } \
+ obj->node_field.prev = tree->tail; \
+ obj->node_field.next = NULL; \
+ tree->tail->next = &obj->node_field; \
+ tree->tail = &obj->node_field; \
+ return 0; \
+} \
+static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
+{ \
+ if (tree->head == &obj->node_field) { \
+ if (tree->tail == &obj->node_field) \
+ /* Only item in the list */ \
+ tree->head = tree->tail = NULL; \
+ else { \
+ /* Is the head, next != NULL */ \
+ tree->head = tree->head->next; \
+ tree->head->prev = NULL; \
+ } \
+ } else { \
+ if (tree->tail == &obj->node_field) { \
+ /* Is the tail, prev != NULL */ \
+ tree->tail = tree->tail->prev; \
+ tree->tail->next = NULL; \
+ } else { \
+ /* Is neither the head nor the tail */ \
+ obj->node_field.prev->next = obj->node_field.next; \
+ obj->node_field.next->prev = obj->node_field.prev; \
+ } \
+ } \
+} \
+static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
+{ \
+ struct rb_node *node = tree->head; \
+ while (node) { \
+ type *item = QMAN_NODE2OBJ(node, type, node_field); \
+ if (val == item->val_field) \
+ return item; \
+ if (val < item->val_field) \
+ return NULL; \
+ node = node->next; \
+ } \
+ return NULL; \
+}
+
+#endif /* __DPAA_RBTREE_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fman.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fman.h
new file mode 100644
index 00000000..15bf73a4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fman.h
@@ -0,0 +1,425 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2012 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __FMAN_H
+#define __FMAN_H
+
+#include <stdbool.h>
+#include <net/if.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+
+#include <compat.h>
+
+#ifndef FMAN_DEVICE_PATH
+#define FMAN_DEVICE_PATH "/dev/mem"
+#endif
+
+#define MEMAC_NUM_OF_PADDRS 7 /* Num of additional exact match MAC adr regs */
+
+/* Control and Configuration Register (COMMAND_CONFIG) for MEMAC */
+#define CMD_CFG_LOOPBACK_EN 0x00000400
+/**< 21 XGMII/GMII loopback enable */
+#define CMD_CFG_PROMIS_EN 0x00000010
+/**< 27 Promiscuous operation enable */
+#define CMD_CFG_PAUSE_IGNORE 0x00000100
+/**< 23 Ignore Pause frame quanta */
+
+/* Statistics Configuration Register (STATN_CONFIG) */
+#define STATS_CFG_CLR 0x00000004
+/**< 29 Reset all counters */
+#define STATS_CFG_CLR_ON_RD 0x00000002
+/**< 30 Clear on read */
+#define STATS_CFG_SATURATE 0x00000001
+/**< 31 Saturate at the maximum val */
+
+/**< Max receive frame length mask */
+#define MAXFRM_SIZE_MEMAC 0x00007fe0
+#define MAXFRM_RX_MASK 0x0000ffff
+
+/**< Interface Mode Register Register for MEMAC */
+#define IF_MODE_RLP 0x00000820
+
+/**< Pool Limits */
+#define FMAN_PORT_MAX_EXT_POOLS_NUM 8
+#define FMAN_PORT_OBS_EXT_POOLS_NUM 2
+
+#define FMAN_PORT_CG_MAP_NUM 8
+#define FMAN_PORT_PRS_RESULT_WORDS_NUM 8
+#define FMAN_PORT_BMI_FIFO_UNITS 0x100
+#define FMAN_PORT_IC_OFFSET_UNITS 0x10
+
+#define FMAN_ENABLE_BPOOL_DEPLETION 0xF00000F0
+
+#define HASH_CTRL_MCAST_EN 0x00000100
+#define GROUP_ADDRESS 0x0000010000000000LL
+#define HASH_CTRL_ADDR_MASK 0x0000003F
+
+/* Pre definitions of FMAN interface and Bpool structures */
+struct __fman_if;
+struct fman_if_bpool;
+/* Lists of fman interfaces and bpools */
+TAILQ_HEAD(rte_fman_if_list, __fman_if);
+
+/* Represents the different flavour of network interface */
+enum fman_mac_type {
+ fman_offline = 0,
+ fman_mac_1g,
+ fman_mac_10g,
+};
+
+struct mac_addr {
+ uint32_t mac_addr_l; /**< Lower 32 bits of 48-bit MAC address */
+ uint32_t mac_addr_u; /**< Upper 16 bits of 48-bit MAC address */
+};
+
+struct memac_regs {
+ /* General Control and Status */
+ uint32_t res0000[2];
+ uint32_t command_config; /**< 0x008 Ctrl and cfg */
+ struct mac_addr mac_addr0; /**< 0x00C-0x010 MAC_ADDR_0...1 */
+ uint32_t maxfrm; /**< 0x014 Max frame length */
+ uint32_t res0018[5];
+ uint32_t hashtable_ctrl; /**< 0x02C Hash table control */
+ uint32_t res0030[4];
+ uint32_t ievent; /**< 0x040 Interrupt event */
+ uint32_t tx_ipg_length;
+ /**< 0x044 Transmitter inter-packet-gap */
+ uint32_t res0048;
+ uint32_t imask; /**< 0x04C Interrupt mask */
+ uint32_t res0050;
+ uint32_t pause_quanta[4]; /**< 0x054 Pause quanta */
+ uint32_t pause_thresh[4]; /**< 0x064 Pause quanta threshold */
+ uint32_t rx_pause_status; /**< 0x074 Receive pause status */
+ uint32_t res0078[2];
+ struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];
+ /**< 0x80-0x0B4 mac padr */
+ uint32_t lpwake_timer;
+ /**< 0x0B8 Low Power Wakeup Timer */
+ uint32_t sleep_timer;
+ /**< 0x0BC Transmit EEE Low Power Timer */
+ uint32_t res00c0[8];
+ uint32_t statn_config;
+ /**< 0x0E0 Statistics configuration */
+ uint32_t res00e4[7];
+ /* Rx Statistics Counter */
+ uint32_t reoct_l; /**<Rx Eth Octets Counter */
+ uint32_t reoct_u;
+ uint32_t roct_l; /**<Rx Octet Counters */
+ uint32_t roct_u;
+ uint32_t raln_l; /**<Rx Alignment Error Counter */
+ uint32_t raln_u;
+ uint32_t rxpf_l; /**<Rx valid Pause Frame */
+ uint32_t rxpf_u;
+ uint32_t rfrm_l; /**<Rx Frame counter */
+ uint32_t rfrm_u;
+ uint32_t rfcs_l; /**<Rx frame check seq error */
+ uint32_t rfcs_u;
+ uint32_t rvlan_l; /**<Rx Vlan Frame Counter */
+ uint32_t rvlan_u;
+ uint32_t rerr_l; /**<Rx Frame error */
+ uint32_t rerr_u;
+ uint32_t ruca_l; /**<Rx Unicast */
+ uint32_t ruca_u;
+ uint32_t rmca_l; /**<Rx Multicast */
+ uint32_t rmca_u;
+ uint32_t rbca_l; /**<Rx Broadcast */
+ uint32_t rbca_u;
+ uint32_t rdrp_l; /**<Rx Dropper Packet */
+ uint32_t rdrp_u;
+ uint32_t rpkt_l; /**<Rx packet */
+ uint32_t rpkt_u;
+ uint32_t rund_l; /**<Rx undersized packets */
+ uint32_t rund_u;
+ uint32_t r64_l; /**<Rx 64 byte */
+ uint32_t r64_u;
+ uint32_t r127_l;
+ uint32_t r127_u;
+ uint32_t r255_l;
+ uint32_t r255_u;
+ uint32_t r511_l;
+ uint32_t r511_u;
+ uint32_t r1023_l;
+ uint32_t r1023_u;
+ uint32_t r1518_l;
+ uint32_t r1518_u;
+ uint32_t r1519x_l;
+ uint32_t r1519x_u;
+ uint32_t rovr_l; /**<Rx oversized but good */
+ uint32_t rovr_u;
+ uint32_t rjbr_l; /**<Rx oversized with bad csum */
+ uint32_t rjbr_u;
+ uint32_t rfrg_l; /**<Rx fragment Packet */
+ uint32_t rfrg_u;
+ uint32_t rcnp_l; /**<Rx control packets (0x8808 */
+ uint32_t rcnp_u;
+ uint32_t rdrntp_l; /**<Rx dropped due to FIFO overflow */
+ uint32_t rdrntp_u;
+ uint32_t res01d0[12];
+ /* Tx Statistics Counter */
+ uint32_t teoct_l; /**<Tx eth octets */
+ uint32_t teoct_u;
+ uint32_t toct_l; /**<Tx Octets */
+ uint32_t toct_u;
+ uint32_t res0210[2];
+ uint32_t txpf_l; /**<Tx valid pause frame */
+ uint32_t txpf_u;
+ uint32_t tfrm_l; /**<Tx frame counter */
+ uint32_t tfrm_u;
+ uint32_t tfcs_l; /**<Tx FCS error */
+ uint32_t tfcs_u;
+ uint32_t tvlan_l; /**<Tx Vlan Frame */
+ uint32_t tvlan_u;
+ uint32_t terr_l; /**<Tx frame error */
+ uint32_t terr_u;
+ uint32_t tuca_l; /**<Tx Unicast */
+ uint32_t tuca_u;
+ uint32_t tmca_l; /**<Tx Multicast */
+ uint32_t tmca_u;
+ uint32_t tbca_l; /**<Tx Broadcast */
+ uint32_t tbca_u;
+ uint32_t res0258[2];
+ uint32_t tpkt_l; /**<Tx Packet */
+ uint32_t tpkt_u;
+ uint32_t tund_l; /**<Tx Undersized */
+ uint32_t tund_u;
+ uint32_t t64_l;
+ uint32_t t64_u;
+ uint32_t t127_l;
+ uint32_t t127_u;
+ uint32_t t255_l;
+ uint32_t t255_u;
+ uint32_t t511_l;
+ uint32_t t511_u;
+ uint32_t t1023_l;
+ uint32_t t1023_u;
+ uint32_t t1518_l;
+ uint32_t t1518_u;
+ uint32_t t1519x_l;
+ uint32_t t1519x_u;
+ uint32_t res02a8[6];
+ uint32_t tcnp_l; /**<Tx Control Packet type - 0x8808 */
+ uint32_t tcnp_u;
+ uint32_t res02c8[14];
+ /* Line Interface Control */
+ uint32_t if_mode; /**< 0x300 Interface Mode Control */
+ uint32_t if_status; /**< 0x304 Interface Status */
+ uint32_t res0308[14];
+ /* HiGig/2 */
+ uint32_t hg_config; /**< 0x340 Control and cfg */
+ uint32_t res0344[3];
+ uint32_t hg_pause_quanta; /**< 0x350 Pause quanta */
+ uint32_t res0354[3];
+ uint32_t hg_pause_thresh; /**< 0x360 Pause quanta threshold */
+ uint32_t res0364[3];
+ uint32_t hgrx_pause_status; /**< 0x370 Receive pause status */
+ uint32_t hg_fifos_status; /**< 0x374 fifos status */
+ uint32_t rhm; /**< 0x378 rx messages counter */
+ uint32_t thm; /**< 0x37C tx messages counter */
+};
+
+struct rx_bmi_regs {
+ uint32_t fmbm_rcfg; /**< Rx Configuration */
+ uint32_t fmbm_rst; /**< Rx Status */
+ uint32_t fmbm_rda; /**< Rx DMA attributes*/
+ uint32_t fmbm_rfp; /**< Rx FIFO Parameters*/
+ uint32_t fmbm_rfed; /**< Rx Frame End Data*/
+ uint32_t fmbm_ricp; /**< Rx Internal Context Parameters*/
+ uint32_t fmbm_rim; /**< Rx Internal Buffer Margins*/
+ uint32_t fmbm_rebm; /**< Rx External Buffer Margins*/
+ uint32_t fmbm_rfne; /**< Rx Frame Next Engine*/
+ uint32_t fmbm_rfca; /**< Rx Frame Command Attributes.*/
+ uint32_t fmbm_rfpne; /**< Rx Frame Parser Next Engine*/
+ uint32_t fmbm_rpso; /**< Rx Parse Start Offset*/
+ uint32_t fmbm_rpp; /**< Rx Policer Profile */
+ uint32_t fmbm_rccb; /**< Rx Coarse Classification Base */
+ uint32_t fmbm_reth; /**< Rx Excessive Threshold */
+ uint32_t reserved003c[1]; /**< (0x03C 0x03F) */
+ uint32_t fmbm_rprai[FMAN_PORT_PRS_RESULT_WORDS_NUM];
+ /**< Rx Parse Results Array Init*/
+ uint32_t fmbm_rfqid; /**< Rx Frame Queue ID*/
+ uint32_t fmbm_refqid; /**< Rx Error Frame Queue ID*/
+ uint32_t fmbm_rfsdm; /**< Rx Frame Status Discard Mask*/
+ uint32_t fmbm_rfsem; /**< Rx Frame Status Error Mask*/
+ uint32_t fmbm_rfene; /**< Rx Frame Enqueue Next Engine */
+ uint32_t reserved0074[0x2]; /**< (0x074-0x07C) */
+ uint32_t fmbm_rcmne;
+ /**< Rx Frame Continuous Mode Next Engine */
+ uint32_t reserved0080[0x20];/**< (0x080 0x0FF) */
+ uint32_t fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /**< Buffer Manager pool Information-*/
+ uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /**< Allocate Counter-*/
+ uint32_t reserved0130[8];
+ /**< 0x130/0x140 - 0x15F reserved -*/
+ uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
+ /**< Congestion Group Map*/
+ uint32_t fmbm_mpd; /**< BM Pool Depletion */
+ uint32_t reserved0184[0x1F]; /**< (0x184 0x1FF) */
+ uint32_t fmbm_rstc; /**< Rx Statistics Counters*/
+ uint32_t fmbm_rfrc; /**< Rx Frame Counter*/
+ uint32_t fmbm_rfbc; /**< Rx Bad Frames Counter*/
+ uint32_t fmbm_rlfc; /**< Rx Large Frames Counter*/
+ uint32_t fmbm_rffc; /**< Rx Filter Frames Counter*/
+ uint32_t fmbm_rfdc; /**< Rx Frame Discard Counter*/
+ uint32_t fmbm_rfldec; /**< Rx Frames List DMA Error Counter*/
+ uint32_t fmbm_rodc; /**< Rx Out of Buffers Discard nntr*/
+ uint32_t fmbm_rbdc; /**< Rx Buffers Deallocate Counter*/
+ uint32_t reserved0224[0x17]; /**< (0x224 0x27F) */
+ uint32_t fmbm_rpc; /**< Rx Performance Counters*/
+ uint32_t fmbm_rpcp; /**< Rx Performance Count Parameters*/
+ uint32_t fmbm_rccn; /**< Rx Cycle Counter*/
+ uint32_t fmbm_rtuc; /**< Rx Tasks Utilization Counter*/
+ uint32_t fmbm_rrquc;
+ /**< Rx Receive Queue Utilization cntr*/
+ uint32_t fmbm_rduc; /**< Rx DMA Utilization Counter*/
+ uint32_t fmbm_rfuc; /**< Rx FIFO Utilization Counter*/
+ uint32_t fmbm_rpac; /**< Rx Pause Activation Counter*/
+ uint32_t reserved02a0[0x18]; /**< (0x2A0 0x2FF) */
+ uint32_t fmbm_rdbg; /**< Rx Debug-*/
+};
+
+struct fman_port_qmi_regs {
+ uint32_t fmqm_pnc; /**< PortID n Configuration Register */
+ uint32_t fmqm_pns; /**< PortID n Status Register */
+ uint32_t fmqm_pnts; /**< PortID n Task Status Register */
+ uint32_t reserved00c[4]; /**< 0xn00C - 0xn01B */
+ uint32_t fmqm_pnen; /**< PortID n Enqueue NIA Register */
+ uint32_t fmqm_pnetfc; /**< PortID n Enq Total Frame Counter */
+ uint32_t reserved024[2]; /**< 0xn024 - 0x02B */
+ uint32_t fmqm_pndn; /**< PortID n Dequeue NIA Register */
+ uint32_t fmqm_pndc; /**< PortID n Dequeue Config Register */
+ uint32_t fmqm_pndtfc; /**< PortID n Dequeue tot Frame cntr */
+ uint32_t fmqm_pndfdc; /**< PortID n Dequeue FQID Dflt Cntr */
+ uint32_t fmqm_pndcc; /**< PortID n Dequeue Confirm Counter */
+};
+
+/* This struct exports parameters about an Fman network interface, determined
+ * from the device-tree.
+ */
+struct fman_if {
+ /* Which Fman this interface belongs to */
+ uint8_t fman_idx;
+ /* The type/speed of the interface */
+ enum fman_mac_type mac_type;
+ /* Boolean, set when mac type is memac */
+ uint8_t is_memac;
+ /* Boolean, set when PHY is RGMII */
+ uint8_t is_rgmii;
+ /* The index of this MAC (within the Fman it belongs to) */
+ uint8_t mac_idx;
+ /* The MAC address */
+ struct ether_addr mac_addr;
+ /* The Qman channel to schedule Tx FQs to */
+ u16 tx_channel_id;
+ /* The hard-coded FQIDs for this interface. Note: this doesn't cover
+ * the PCD nor the "Rx default" FQIDs, which are configured via FMC
+ * and its XML-based configuration.
+ */
+ uint32_t fqid_rx_def;
+ uint32_t fqid_rx_err;
+ uint32_t fqid_tx_err;
+ uint32_t fqid_tx_confirm;
+
+ struct list_head bpool_list;
+ /* The node for linking this interface into "fman_if_list" */
+ struct list_head node;
+};
+
+/* This struct exposes parameters for buffer pools, extracted from the network
+ * interface settings in the device tree.
+ */
+struct fman_if_bpool {
+ uint32_t bpid;
+ uint64_t count;
+ uint64_t size;
+ uint64_t addr;
+ /* The node for linking this bpool into fman_if::bpool_list */
+ struct list_head node;
+};
+
+/* Internal Context transfer params - FMBM_RICP*/
+struct fman_if_ic_params {
+ /*IC offset in the packet buffer */
+ uint16_t iceof;
+ /*IC internal offset */
+ uint16_t iciof;
+ /*IC size to copy */
+ uint16_t icsz;
+};
+
+/* The exported "struct fman_if" type contains the subset of fields we want
+ * exposed. This struct is embedded in a larger "struct __fman_if" which
+ * contains the extra bits we *don't* want exposed.
+ */
+struct __fman_if {
+ struct fman_if __if;
+ char node_path[PATH_MAX];
+ uint64_t regs_size;
+ void *ccsr_map;
+ void *bmi_map;
+ void *qmi_map;
+ struct list_head node;
+};
+
+/* And this is the base list node that the interfaces are added to. (See
+ * fman_if_enable_all_rx() below for an example of its use.)
+ */
+extern const struct list_head *fman_if_list;
+
+extern int fman_ccsr_map_fd;
+
+/* To iterate the "bpool_list" for an interface. Eg;
+ * struct fman_if *p = get_ptr_to_some_interface();
+ * struct fman_if_bpool *bp;
+ * printf("Interface uses following BPIDs;\n");
+ * fman_if_for_each_bpool(bp, p) {
+ * printf(" %d\n", bp->bpid);
+ * [...]
+ * }
+ */
+#define fman_if_for_each_bpool(bp, __if) \
+ list_for_each_entry(bp, &(__if)->bpool_list, node)
+
+#define FMAN_ERR(rc, fmt, args...) \
+ do { \
+ _errno = (rc); \
+ DPAA_BUS_LOG(ERR, fmt "(%d)", ##args, errno); \
+ } while (0)
+
+#define FMAN_IP_REV_1 0xC30C4
+#define FMAN_IP_REV_1_MAJOR_MASK 0x0000FF00
+#define FMAN_IP_REV_1_MAJOR_SHIFT 8
+#define FMAN_V3 0x06
+#define FMAN_V3_CONTEXTA_EN_A2V 0x10000000
+#define FMAN_V3_CONTEXTA_EN_OVOM 0x02000000
+#define FMAN_V3_CONTEXTA_EN_EBD 0x80000000
+#define FMAN_CONTEXTA_DIS_CHECKSUM 0x7ull
+#define FMAN_CONTEXTA_SET_OPCODE11 0x2000000b00000000
+extern u16 fman_ip_rev;
+extern u32 fman_dealloc_bufs_mask_hi;
+extern u32 fman_dealloc_bufs_mask_lo;
+
+/**
+ * Initialize the FMAN driver
+ *
+ * @args void
+ * @return
+ * 0 for success; error OTHERWISE
+ */
+int fman_init(void);
+
+/**
+ * Teardown the FMAN driver
+ *
+ * @args void
+ * @return void
+ */
+void fman_finish(void);
+
+#endif /* __FMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_bman.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_bman.h
new file mode 100644
index 00000000..0c74aba4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_bman.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_BMAN_H
+#define __FSL_BMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* This wrapper represents a bit-array for the depletion state of the 64 Bman
+ * buffer pools.
+ */
+struct bman_depletion {
+ u32 state[2];
+};
+
+static inline void bman_depletion_init(struct bman_depletion *c)
+{
+ c->state[0] = c->state[1] = 0;
+}
+
+static inline void bman_depletion_fill(struct bman_depletion *c)
+{
+ c->state[0] = c->state[1] = ~0;
+}
+
+/* --- Bman data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
+struct bm_mc_command; /* MC (Management Command) command */
+struct bm_mc_result; /* MC result */
+
+/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
+ * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
+ * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used.
+ */
+struct bm_buffer {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1;
+ u8 bpid;
+ u16 hi; /* High 16-bits of 48-bit address */
+ u32 lo; /* Low 32-bits of 48-bit address */
+#else
+ u32 lo;
+ u16 hi;
+ u8 bpid;
+ u8 __reserved;
+#endif
+ };
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u64 __notaddress:16;
+ u64 addr:48;
+#else
+ u64 addr:48;
+ u64 __notaddress:16;
+#endif
+ };
+ u64 opaque;
+ };
+} __attribute__((aligned(8)));
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
+{
+ return buf->addr;
+}
+
+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
+{
+ return (dma_addr_t)buf->addr;
+}
+
+#define bm_buffer_set64(buf, v) \
+ do { \
+ struct bm_buffer *__buf931 = (buf); \
+ __buf931->hi = upper_32_bits(v); \
+ __buf931->lo = lower_32_bits(v); \
+ } while (0)
+
+/* See 1.5.3.5.4: "Release Command" */
+struct bm_rcr_entry {
+ union {
+ struct {
+ u8 __dont_write_directly__verb;
+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+ u8 __reserved1[62];
+ };
+ struct bm_buffer bufs[8];
+ };
+} __packed;
+#define BM_RCR_VERB_VBIT 0x80
+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
+
+/* See 1.5.3.1: "Acquire Command" */
+/* See 1.5.3.2: "Query Command" */
+struct bm_mcc_acquire {
+ u8 bpid;
+ u8 __reserved1[62];
+} __packed;
+struct bm_mcc_query {
+ u8 __reserved2[63];
+} __packed;
+struct bm_mc_command {
+ u8 __dont_write_directly__verb;
+ union {
+ struct bm_mcc_acquire acquire;
+ struct bm_mcc_query query;
+ };
+} __packed;
+#define BM_MCC_VERB_VBIT 0x80
+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
+#define BM_MCC_VERB_CMD_QUERY 0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
+
+/* See 1.5.3.3: "Acquire Response" */
+/* See 1.5.3.4: "Query Response" */
+struct bm_pool_state {
+ u8 __reserved1[32];
+ /* "availability state" and "depletion state" */
+ struct {
+ u8 __reserved1[8];
+ /* Access using bman_depletion_***() */
+ struct bman_depletion state;
+ } as, ds;
+};
+
+struct bm_mc_result {
+ union {
+ struct {
+ u8 verb;
+ u8 __reserved1[63];
+ };
+ union {
+ struct {
+ u8 __reserved1;
+ u8 bpid;
+ u8 __reserved2[62];
+ };
+ struct bm_buffer bufs[8];
+ } acquire;
+ struct bm_pool_state query;
+ };
+} __packed;
+#define BM_MCR_VERB_VBIT 0x80
+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+
+/* Portal and Buffer Pools */
+/* Represents a managed portal */
+struct bman_portal;
+
+/* This object type represents Bman buffer pools. */
+struct bman_pool;
+
+/* This struct specifies parameters for a bman_pool object. */
+struct bman_pool_params {
+ /* index of the buffer pool to encapsulate (0-63), ignored if
+ * BMAN_POOL_FLAG_DYNAMIC_BPID is set.
+ */
+ u32 bpid;
+ /* bit-mask of BMAN_POOL_FLAG_*** options */
+ u32 flags;
+ /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
+ * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
+ * when run in the control plane (which controls Bman CCSR). This array
+ * matches the definition of bm_pool_set().
+ */
+ u32 thresholds[4];
+};
+
+/* Flags to bman_new_pool() */
+#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
+#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
+#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
+#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
+
+/* Flags to bman_release() */
+#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
+
+
+/**
+ * bman_get_portal_index - get portal configuration index
+ */
+int bman_get_portal_index(void);
+
+/**
+ * bman_rcr_is_empty - Determine if portal's RCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * releases for the local portal have been processed by Bman but can't use the
+ * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
+ * The function forces tracking of RCR consumption (which normally doesn't
+ * happen until release processing needs to find space to put new release
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int bman_rcr_is_empty(void);
+
+/**
+ * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
+ * @result: is set by the API to the base BPID of the allocated range
+ * @count: the number of BPIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count BPIDs
+ *
+ * Returns the number of buffer pools allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * BPs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int bman_alloc_bpid(u32 *result)
+{
+ int ret = bman_alloc_bpid_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * bman_release_bpid_range - Release the specified range of buffer pool IDs
+ * @bpid: the base BPID of the range to deallocate
+ * @count: the number of BPIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of BPIDs
+ * that it can subsequently allocate from.
+ */
+void bman_release_bpid_range(u32 bpid, unsigned int count);
+static inline void bman_release_bpid(u32 bpid)
+{
+ bman_release_bpid_range(bpid, 1);
+}
+
+int bman_reserve_bpid_range(u32 bpid, unsigned int count);
+static inline int bman_reserve_bpid(u32 bpid)
+{
+ return bman_reserve_bpid_range(bpid, 1);
+}
+
+void bman_seed_bpid_range(u32 bpid, unsigned int count);
+
+int bman_shutdown_pool(u32 bpid);
+
+/**
+ * bman_new_pool - Allocates a Buffer Pool object
+ * @params: parameters specifying the buffer pool ID and behaviour
+ *
+ * Creates a pool object for the given @params. A portal and the depletion
+ * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
+ * is set. NB, the fields from @params are copied into the new pool object, so
+ * the structure provided by the caller can be released or reused after the
+ * function returns.
+ */
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
+
+/**
+ * bman_free_pool - Deallocates a Buffer Pool object
+ * @pool: the pool object to release
+ */
+void bman_free_pool(struct bman_pool *pool);
+
+/**
+ * bman_get_params - Returns a pool object's parameters.
+ * @pool: the pool object
+ *
+ * The returned pointer refers to state within the pool object so must not be
+ * modified and can no longer be read once the pool object is destroyed.
+ */
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
+
+/**
+ * bman_release - Release buffer(s) to the buffer pool
+ * @pool: the buffer pool object to release to
+ * @bufs: an array of buffers to release
+ * @num: the number of buffers in @bufs (1-8)
+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
+ *
+ */
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+ u32 flags);
+
+/**
+ * bman_acquire - Acquire buffer(s) from a buffer pool
+ * @pool: the buffer pool object to acquire from
+ * @bufs: array for storing the acquired buffers
+ * @num: the number of buffers desired (@bufs is at least this big)
+ *
+ * Issues an "Acquire" command via the portal's management command interface.
+ * The return value will be the number of buffers obtained from the pool, or a
+ * negative error code if a h/w error or pool starvation was encountered.
+ */
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+ u32 flags);
+
+/**
+ * bman_query_pools - Query all buffer pool states
+ * @state: storage for the queried availability and depletion states
+ */
+int bman_query_pools(struct bm_pool_state *state);
+
+/**
+ * bman_query_free_buffers - Query how many free buffers are in buffer pool
+ * @pool: the buffer pool object to query
+ *
+ * Return the number of the free buffers
+ */
+u32 bman_query_free_buffers(struct bman_pool *pool);
+
+/**
+ * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
+ * @pool: the buffer pool object to which the thresholds will be set
+ * @thresholds: the new thresholds
+ */
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
+
+/**
+ * bm_pool_set_hw_threshold - Change the buffer pool's thresholds
+ * @pool: Pool id
+ * @low_thresh: low threshold
+ * @high_thresh: high threshold
+ */
+int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
+ const u32 high_thresh);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_BMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman.h
new file mode 100644
index 00000000..1d1ce867
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __FSL_FMAN_H
+#define __FSL_FMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Status field in FD is updated on Rx side by FMAN with following information.
+ * Refer to field description in FM BG.
+ */
+struct fm_status_t {
+ unsigned int reserved0:3;
+ unsigned int dcl4c:1; /* Don't Check L4 Checksum */
+ unsigned int reserved1:1;
+ unsigned int ufd:1; /* Unsupported Format */
+ unsigned int lge:1; /* Length Error */
+ unsigned int dme:1; /* DMA Error */
+
+ unsigned int reserved2:4;
+ unsigned int fpe:1; /* Frame physical Error */
+ unsigned int fse:1; /* Frame Size Error */
+ unsigned int dis:1; /* Discard by Classification */
+ unsigned int reserved3:1;
+
+ unsigned int eof:1; /* Key Extraction goes out of frame */
+ unsigned int nss:1; /* No Scheme selected */
+ unsigned int kso:1; /* Key Size Overflow */
+ unsigned int reserved4:1;
+ unsigned int fcl:2; /* Frame Color */
+ unsigned int ipp:1; /* Illegal Policer Profile Selected */
+ unsigned int flm:1; /* Frame Length Mismatch */
+ unsigned int pte:1; /* Parser Timeout */
+ unsigned int isp:1; /* Invalid Soft Parser Instruction */
+ unsigned int phe:1; /* Header Error during parsing */
+ unsigned int frdr:1; /* Frame Dropped by disabled port */
+ unsigned int reserved5:4;
+} __attribute__ ((__packed__));
+
+/* Set MAC address for a particular interface */
+int fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num);
+
+/* Remove a MAC address for a particular interface */
+void fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num);
+
+/* Get the FMAN statistics */
+void fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats);
+
+/* Reset the FMAN statistics */
+void fman_if_stats_reset(struct fman_if *p);
+
+/* Get all of the FMAN statistics */
+void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
+
+/* Set ignore pause option for a specific interface */
+void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
+
+/* Set max frame length */
+void fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len);
+
+/* Enable/disable Rx promiscuous mode on specified interface */
+void fman_if_promiscuous_enable(struct fman_if *p);
+void fman_if_promiscuous_disable(struct fman_if *p);
+
+/* Enable/disable Rx on specific interfaces */
+void fman_if_enable_rx(struct fman_if *p);
+void fman_if_disable_rx(struct fman_if *p);
+
+/* Enable/disable loopback on specific interfaces */
+void fman_if_loopback_enable(struct fman_if *p);
+void fman_if_loopback_disable(struct fman_if *p);
+
+/* Set buffer pool on specific interface */
+void fman_if_set_bp(struct fman_if *fm_if, unsigned int num, int bpid,
+ size_t bufsize);
+
+/* Get Flow Control threshold parameters on specific interface */
+int fman_if_get_fc_threshold(struct fman_if *fm_if);
+
+/* Enable and Set Flow Control threshold parameters on specific interface */
+int fman_if_set_fc_threshold(struct fman_if *fm_if,
+ u32 high_water, u32 low_water, u32 bpid);
+
+/* Get Flow Control pause quanta on specific interface */
+int fman_if_get_fc_quanta(struct fman_if *fm_if);
+
+/* Set Flow Control pause quanta on specific interface */
+int fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta);
+
+/* Set default error fqid on specific interface */
+void fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid);
+
+/* Get IC transfer params */
+int fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp);
+
+/* Set IC transfer params */
+int fman_if_set_ic_params(struct fman_if *fm_if,
+ const struct fman_if_ic_params *icp);
+
+/* Get interface fd->offset value */
+int fman_if_get_fdoff(struct fman_if *fm_if);
+
+/* Set interface fd->offset value */
+void fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset);
+
+/* Get interface SG enable status value */
+int fman_if_get_sg_enable(struct fman_if *fm_if);
+
+/* Set interface SG support mode */
+void fman_if_set_sg(struct fman_if *fm_if, int enable);
+
+/* Get interface Max Frame length (MTU) */
+uint16_t fman_if_get_maxfrm(struct fman_if *fm_if);
+
+/* Set interface Max Frame length (MTU) */
+void fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm);
+
+/* Set interface next invoked action for dequeue operation */
+void fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia);
+
+/* discard error packets on rx */
+void fman_if_discard_rx_errors(struct fman_if *fm_if);
+
+void fman_if_set_mcast_filter_table(struct fman_if *p);
+
+void fman_if_reset_mcast_filter_table(struct fman_if *p);
+
+int fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth);
+
+int fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth);
+
+
+/* Enable/disable Rx on all interfaces */
+static inline void fman_if_enable_all_rx(void)
+{
+ struct fman_if *__if;
+
+ list_for_each_entry(__if, fman_if_list, node)
+ fman_if_enable_rx(__if);
+}
+
+static inline void fman_if_disable_all_rx(void)
+{
+ struct fman_if *__if;
+
+ list_for_each_entry(__if, fman_if_list, node)
+ fman_if_disable_rx(__if);
+}
+#endif /* __FSL_FMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman_crc64.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman_crc64.h
new file mode 100644
index 00000000..bf162f3a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman_crc64.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_FMAN_CRC64_H
+#define __FSL_FMAN_CRC64_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This following definitions provide a software implementation of the CRC64
+ * algorithm implemented within Fman.
+ *
+ * The following example shows how to compute a CRC64 hash value based on
+ * SRC_IP, DST_IP and ESP_SPI values
+ *
+ * #define compute_hash(saddr,daddr,spi) \
+ * do { \
+ * uint64_t result; \
+ * result = fman_crc64_init(); \
+ * result = fman_crc64_compute_32bit(saddr, result); \
+ * result = fman_crc64_compute_32bit(daddr, result); \
+ * result = fman_crc64_compute_32bit(spi, result); \
+ * return (uint32_t) result & RC_HASH_MASK; \
+ * } while (0);
+ *
+ * If hashing over a different number of fields (or of different types) is
+ * required, this can be implemented using the following primitives.
+ */
+
+/* The following table provides the constants used by the Fman CRC64
+ * implementation. The table is instantiated within the DPAA fman driver.
+ * However if the application is not going to be linked against the DPAA fman
+ * driver but will use this Fman CRC64 implementation, then it will need to
+ * instantiate this table by using the DECLARE_FMAN_CRC64_TABLE() macro.
+ */
+struct fman_crc64_t {
+ uint64_t initial;
+ uint64_t table[1 << 8];
+};
+extern struct fman_crc64_t FMAN_CRC64_ECMA_182;
+#define DECLARE_FMAN_CRC64_TABLE() \
+struct fman_crc64_t FMAN_CRC64_ECMA_182 = { \
+ 0xFFFFFFFFFFFFFFFFULL, \
+ { \
+ 0x0000000000000000ULL, 0xb32e4cbe03a75f6fULL, \
+ 0xf4843657a840a05bULL, 0x47aa7ae9abe7ff34ULL, \
+ 0x7bd0c384ff8f5e33ULL, 0xc8fe8f3afc28015cULL, \
+ 0x8f54f5d357cffe68ULL, 0x3c7ab96d5468a107ULL, \
+ 0xf7a18709ff1ebc66ULL, 0x448fcbb7fcb9e309ULL, \
+ 0x0325b15e575e1c3dULL, 0xb00bfde054f94352ULL, \
+ 0x8c71448d0091e255ULL, 0x3f5f08330336bd3aULL, \
+ 0x78f572daa8d1420eULL, 0xcbdb3e64ab761d61ULL, \
+ 0x7d9ba13851336649ULL, 0xceb5ed8652943926ULL, \
+ 0x891f976ff973c612ULL, 0x3a31dbd1fad4997dULL, \
+ 0x064b62bcaebc387aULL, 0xb5652e02ad1b6715ULL, \
+ 0xf2cf54eb06fc9821ULL, 0x41e11855055bc74eULL, \
+ 0x8a3a2631ae2dda2fULL, 0x39146a8fad8a8540ULL, \
+ 0x7ebe1066066d7a74ULL, 0xcd905cd805ca251bULL, \
+ 0xf1eae5b551a2841cULL, 0x42c4a90b5205db73ULL, \
+ 0x056ed3e2f9e22447ULL, 0xb6409f5cfa457b28ULL, \
+ 0xfb374270a266cc92ULL, 0x48190ecea1c193fdULL, \
+ 0x0fb374270a266cc9ULL, 0xbc9d3899098133a6ULL, \
+ 0x80e781f45de992a1ULL, 0x33c9cd4a5e4ecdceULL, \
+ 0x7463b7a3f5a932faULL, 0xc74dfb1df60e6d95ULL, \
+ 0x0c96c5795d7870f4ULL, 0xbfb889c75edf2f9bULL, \
+ 0xf812f32ef538d0afULL, 0x4b3cbf90f69f8fc0ULL, \
+ 0x774606fda2f72ec7ULL, 0xc4684a43a15071a8ULL, \
+ 0x83c230aa0ab78e9cULL, 0x30ec7c140910d1f3ULL, \
+ 0x86ace348f355aadbULL, 0x3582aff6f0f2f5b4ULL, \
+ 0x7228d51f5b150a80ULL, 0xc10699a158b255efULL, \
+ 0xfd7c20cc0cdaf4e8ULL, 0x4e526c720f7dab87ULL, \
+ 0x09f8169ba49a54b3ULL, 0xbad65a25a73d0bdcULL, \
+ 0x710d64410c4b16bdULL, 0xc22328ff0fec49d2ULL, \
+ 0x85895216a40bb6e6ULL, 0x36a71ea8a7ace989ULL, \
+ 0x0adda7c5f3c4488eULL, 0xb9f3eb7bf06317e1ULL, \
+ 0xfe5991925b84e8d5ULL, 0x4d77dd2c5823b7baULL, \
+ 0x64b62bcaebc387a1ULL, 0xd7986774e864d8ceULL, \
+ 0x90321d9d438327faULL, 0x231c512340247895ULL, \
+ 0x1f66e84e144cd992ULL, 0xac48a4f017eb86fdULL, \
+ 0xebe2de19bc0c79c9ULL, 0x58cc92a7bfab26a6ULL, \
+ 0x9317acc314dd3bc7ULL, 0x2039e07d177a64a8ULL, \
+ 0x67939a94bc9d9b9cULL, 0xd4bdd62abf3ac4f3ULL, \
+ 0xe8c76f47eb5265f4ULL, 0x5be923f9e8f53a9bULL, \
+ 0x1c4359104312c5afULL, 0xaf6d15ae40b59ac0ULL, \
+ 0x192d8af2baf0e1e8ULL, 0xaa03c64cb957be87ULL, \
+ 0xeda9bca512b041b3ULL, 0x5e87f01b11171edcULL, \
+ 0x62fd4976457fbfdbULL, 0xd1d305c846d8e0b4ULL, \
+ 0x96797f21ed3f1f80ULL, 0x2557339fee9840efULL, \
+ 0xee8c0dfb45ee5d8eULL, 0x5da24145464902e1ULL, \
+ 0x1a083bacedaefdd5ULL, 0xa9267712ee09a2baULL, \
+ 0x955cce7fba6103bdULL, 0x267282c1b9c65cd2ULL, \
+ 0x61d8f8281221a3e6ULL, 0xd2f6b4961186fc89ULL, \
+ 0x9f8169ba49a54b33ULL, 0x2caf25044a02145cULL, \
+ 0x6b055fede1e5eb68ULL, 0xd82b1353e242b407ULL, \
+ 0xe451aa3eb62a1500ULL, 0x577fe680b58d4a6fULL, \
+ 0x10d59c691e6ab55bULL, 0xa3fbd0d71dcdea34ULL, \
+ 0x6820eeb3b6bbf755ULL, 0xdb0ea20db51ca83aULL, \
+ 0x9ca4d8e41efb570eULL, 0x2f8a945a1d5c0861ULL, \
+ 0x13f02d374934a966ULL, 0xa0de61894a93f609ULL, \
+ 0xe7741b60e174093dULL, 0x545a57dee2d35652ULL, \
+ 0xe21ac88218962d7aULL, 0x5134843c1b317215ULL, \
+ 0x169efed5b0d68d21ULL, 0xa5b0b26bb371d24eULL, \
+ 0x99ca0b06e7197349ULL, 0x2ae447b8e4be2c26ULL, \
+ 0x6d4e3d514f59d312ULL, 0xde6071ef4cfe8c7dULL, \
+ 0x15bb4f8be788911cULL, 0xa6950335e42fce73ULL, \
+ 0xe13f79dc4fc83147ULL, 0x521135624c6f6e28ULL, \
+ 0x6e6b8c0f1807cf2fULL, 0xdd45c0b11ba09040ULL, \
+ 0x9aefba58b0476f74ULL, 0x29c1f6e6b3e0301bULL, \
+ 0xc96c5795d7870f42ULL, 0x7a421b2bd420502dULL, \
+ 0x3de861c27fc7af19ULL, 0x8ec62d7c7c60f076ULL, \
+ 0xb2bc941128085171ULL, 0x0192d8af2baf0e1eULL, \
+ 0x4638a2468048f12aULL, 0xf516eef883efae45ULL, \
+ 0x3ecdd09c2899b324ULL, 0x8de39c222b3eec4bULL, \
+ 0xca49e6cb80d9137fULL, 0x7967aa75837e4c10ULL, \
+ 0x451d1318d716ed17ULL, 0xf6335fa6d4b1b278ULL, \
+ 0xb199254f7f564d4cULL, 0x02b769f17cf11223ULL, \
+ 0xb4f7f6ad86b4690bULL, 0x07d9ba1385133664ULL, \
+ 0x4073c0fa2ef4c950ULL, 0xf35d8c442d53963fULL, \
+ 0xcf273529793b3738ULL, 0x7c0979977a9c6857ULL, \
+ 0x3ba3037ed17b9763ULL, 0x888d4fc0d2dcc80cULL, \
+ 0x435671a479aad56dULL, 0xf0783d1a7a0d8a02ULL, \
+ 0xb7d247f3d1ea7536ULL, 0x04fc0b4dd24d2a59ULL, \
+ 0x3886b22086258b5eULL, 0x8ba8fe9e8582d431ULL, \
+ 0xcc0284772e652b05ULL, 0x7f2cc8c92dc2746aULL, \
+ 0x325b15e575e1c3d0ULL, 0x8175595b76469cbfULL, \
+ 0xc6df23b2dda1638bULL, 0x75f16f0cde063ce4ULL, \
+ 0x498bd6618a6e9de3ULL, 0xfaa59adf89c9c28cULL, \
+ 0xbd0fe036222e3db8ULL, 0x0e21ac88218962d7ULL, \
+ 0xc5fa92ec8aff7fb6ULL, 0x76d4de52895820d9ULL, \
+ 0x317ea4bb22bfdfedULL, 0x8250e80521188082ULL, \
+ 0xbe2a516875702185ULL, 0x0d041dd676d77eeaULL, \
+ 0x4aae673fdd3081deULL, 0xf9802b81de97deb1ULL, \
+ 0x4fc0b4dd24d2a599ULL, 0xfceef8632775faf6ULL, \
+ 0xbb44828a8c9205c2ULL, 0x086ace348f355aadULL, \
+ 0x34107759db5dfbaaULL, 0x873e3be7d8faa4c5ULL, \
+ 0xc094410e731d5bf1ULL, 0x73ba0db070ba049eULL, \
+ 0xb86133d4dbcc19ffULL, 0x0b4f7f6ad86b4690ULL, \
+ 0x4ce50583738cb9a4ULL, 0xffcb493d702be6cbULL, \
+ 0xc3b1f050244347ccULL, 0x709fbcee27e418a3ULL, \
+ 0x3735c6078c03e797ULL, 0x841b8ab98fa4b8f8ULL, \
+ 0xadda7c5f3c4488e3ULL, 0x1ef430e13fe3d78cULL, \
+ 0x595e4a08940428b8ULL, 0xea7006b697a377d7ULL, \
+ 0xd60abfdbc3cbd6d0ULL, 0x6524f365c06c89bfULL, \
+ 0x228e898c6b8b768bULL, 0x91a0c532682c29e4ULL, \
+ 0x5a7bfb56c35a3485ULL, 0xe955b7e8c0fd6beaULL, \
+ 0xaeffcd016b1a94deULL, 0x1dd181bf68bdcbb1ULL, \
+ 0x21ab38d23cd56ab6ULL, 0x9285746c3f7235d9ULL, \
+ 0xd52f0e859495caedULL, 0x6601423b97329582ULL, \
+ 0xd041dd676d77eeaaULL, 0x636f91d96ed0b1c5ULL, \
+ 0x24c5eb30c5374ef1ULL, 0x97eba78ec690119eULL, \
+ 0xab911ee392f8b099ULL, 0x18bf525d915feff6ULL, \
+ 0x5f1528b43ab810c2ULL, 0xec3b640a391f4fadULL, \
+ 0x27e05a6e926952ccULL, 0x94ce16d091ce0da3ULL, \
+ 0xd3646c393a29f297ULL, 0x604a2087398eadf8ULL, \
+ 0x5c3099ea6de60cffULL, 0xef1ed5546e415390ULL, \
+ 0xa8b4afbdc5a6aca4ULL, 0x1b9ae303c601f3cbULL, \
+ 0x56ed3e2f9e224471ULL, 0xe5c372919d851b1eULL, \
+ 0xa26908783662e42aULL, 0x114744c635c5bb45ULL, \
+ 0x2d3dfdab61ad1a42ULL, 0x9e13b115620a452dULL, \
+ 0xd9b9cbfcc9edba19ULL, 0x6a978742ca4ae576ULL, \
+ 0xa14cb926613cf817ULL, 0x1262f598629ba778ULL, \
+ 0x55c88f71c97c584cULL, 0xe6e6c3cfcadb0723ULL, \
+ 0xda9c7aa29eb3a624ULL, 0x69b2361c9d14f94bULL, \
+ 0x2e184cf536f3067fULL, 0x9d36004b35545910ULL, \
+ 0x2b769f17cf112238ULL, 0x9858d3a9ccb67d57ULL, \
+ 0xdff2a94067518263ULL, 0x6cdce5fe64f6dd0cULL, \
+ 0x50a65c93309e7c0bULL, 0xe388102d33392364ULL, \
+ 0xa4226ac498dedc50ULL, 0x170c267a9b79833fULL, \
+ 0xdcd7181e300f9e5eULL, 0x6ff954a033a8c131ULL, \
+ 0x28532e49984f3e05ULL, 0x9b7d62f79be8616aULL, \
+ 0xa707db9acf80c06dULL, 0x14299724cc279f02ULL, \
+ 0x5383edcd67c06036ULL, 0xe0ada17364673f59ULL} \
+}
+
+/*
+ * Return the initial CRC seed. Use the value returned from this API as the
+ * "crc" parameter to the first call to add data.
+ */
+static inline uint64_t fman_crc64_init(void)
+{
+ return FMAN_CRC64_ECMA_182.initial;
+}
+
+/* Updates the CRC with arbitrary data */
+static inline uint64_t fman_crc64_update(uint64_t crc,
+ void *data, unsigned int len)
+{
+ uint8_t *p = data;
+ while (len--)
+ crc = FMAN_CRC64_ECMA_182.table[(crc ^ *(p++)) & 0xff] ^
+ (crc >> 8);
+ return crc;
+}
+
+/* Shorthands for updating the CRC with 8/16/32 bits of data.
+ * IMPORTANT NOTE: the typed "data" arguments should not be mistaken for
+ * host-endian numerical values, the assumption is that these values contain
+ * big-endian (ie. network byte order) data.
+ */
+static inline uint64_t fman_crc64_compute_32bit(uint32_t data, uint64_t crc)
+{
+ return fman_crc64_update(crc, &data, sizeof(data));
+}
+static inline uint64_t fman_crc64_compute_16bit(uint16_t data, uint64_t crc)
+{
+ return fman_crc64_update(crc, &data, sizeof(data));
+}
+static inline uint64_t fman_crc64_compute_8bit(uint8_t data, uint64_t crc)
+{
+ return fman_crc64_update(crc, &data, sizeof(data));
+}
+
+/*
+ * Finalise the CRC (using 2's complement)
+ */
+static inline uint64_t fman_crc64_finish(uint64_t seed)
+{
+ return ~seed;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_FMAN_CRC64_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_qman.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_qman.h
new file mode 100644
index 00000000..b18cf037
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_qman.h
@@ -0,0 +1,2057 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_QMAN_H
+#define __FSL_QMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <dpaa_rbtree.h>
+#include <rte_eventdev.h>
+
+/* FQ lookups (turn this on for 64bit user-space) */
+#if (__WORDSIZE == 64)
+#define CONFIG_FSL_QMAN_FQ_LOOKUP
+/* if FQ lookups are supported, this controls the number of initialised,
+ * s/w-consumed FQs that can be supported at any one time.
+ */
+#define CONFIG_FSL_QMAN_FQ_LOOKUP_MAX (32 * 1024)
+#endif
+
+/* Last updated for v00.800 of the BG */
+
+/* Hardware constants */
+#define QM_CHANNEL_SWPORTAL0 0
+#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_CAAM 0x80
+#define QMAN_CHANNEL_PME 0xa0
+#define QMAN_CHANNEL_POOL1_REV3 0x401
+#define QMAN_CHANNEL_CAAM_REV3 0x840
+#define QMAN_CHANNEL_PME_REV3 0x860
+extern u16 qm_channel_pool1;
+extern u16 qm_channel_caam;
+extern u16 qm_channel_pme;
+enum qm_dc_portal {
+ qm_dc_portal_fman0 = 0,
+ qm_dc_portal_fman1 = 1,
+ qm_dc_portal_caam = 2,
+ qm_dc_portal_pme = 3
+};
+
+/* Portal processing (interrupt) sources */
+#define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
+#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
+#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
+#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
+#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
+#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
+/*
+ * This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing.
+ */
+#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
+ QM_PIRQ_MRI | QM_PIRQ_CCSCI)
+
+/* For qman_static_dequeue_*** APIs */
+#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
+/* for n in [1,15] */
+#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
+/* for conversion from n of qm_channel */
+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
+{
+ return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
+}
+
+/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
+#define QM_VDQCR_EXACT 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+
+/* --- QMan data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
+struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
+struct qm_mr_entry; /* MR (Message Ring) entries */
+struct qm_mc_command; /* MC (Management Command) command */
+struct qm_mc_result; /* MC result */
+
+#define QM_FD_FORMAT_SG 0x4
+#define QM_FD_FORMAT_LONG 0x2
+#define QM_FD_FORMAT_COMPOUND 0x1
+enum qm_fd_format {
+ /*
+ * 'contig' implies a contiguous buffer, whereas 'sg' implies a
+ * scatter-gather table. 'big' implies a 29-bit length with no offset
+ * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
+ * implies a s/g-like table, where each entry itself represents a frame
+ * (contiguous or scatter-gather) and the 29-bit "length" is
+ * interpreted purely for congestion calculations, ie. a "congestion
+ * weight".
+ */
+ qm_fd_contig = 0,
+ qm_fd_contig_big = QM_FD_FORMAT_LONG,
+ qm_fd_sg = QM_FD_FORMAT_SG,
+ qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
+ qm_fd_compound = QM_FD_FORMAT_COMPOUND
+};
+
+/* Capitalised versions are un-typed but can be used in static expressions */
+#define QM_FD_CONTIG 0
+#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
+#define QM_FD_SG QM_FD_FORMAT_SG
+#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
+#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
+
+/* "Frame Descriptor (FD)" */
+struct qm_fd {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 dd:2; /* dynamic debug */
+ u8 liodn_offset:6;
+ u8 bpid:8; /* Buffer Pool ID */
+ u8 eliodn_offset:4;
+ u8 __reserved:4;
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#else
+ u8 liodn_offset:6;
+ u8 dd:2; /* dynamic debug */
+ u8 bpid:8; /* Buffer Pool ID */
+ u8 __reserved:4;
+ u8 eliodn_offset:4;
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#endif
+ };
+ struct {
+ u64 __notaddress:24;
+ /* More efficient address accessor */
+ u64 addr:40;
+ };
+ u64 opaque_addr;
+ };
+ /* The 'format' field indicates the interpretation of the remaining 29
+ * bits of the 32-bit word. For packing reasons, it is duplicated in the
+ * other union elements. Note, union'd structs are difficult to use with
+ * static initialisation under gcc, in which case use the "opaque" form
+ * with one of the macros.
+ */
+ union {
+ /* For easier/faster copying of this part of the fd (eg. from a
+ * DQRR entry to an EQCR entry) copy 'opaque'
+ */
+ u32 opaque;
+ /* If 'format' is _contig or _sg, 20b length and 9b offset */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format format:3;
+ u16 offset:9;
+ u32 length20:20;
+#else
+ u32 length20:20;
+ u16 offset:9;
+ enum qm_fd_format format:3;
+#endif
+ };
+ /* If 'format' is _contig_big or _sg_big, 29b length */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format _format1:3;
+ u32 length29:29;
+#else
+ u32 length29:29;
+ enum qm_fd_format _format1:3;
+#endif
+ };
+ /* If 'format' is _compound, 29b "congestion weight" */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format _format2:3;
+ u32 cong_weight:29;
+#else
+ u32 cong_weight:29;
+ enum qm_fd_format _format2:3;
+#endif
+ };
+ };
+ union {
+ u32 cmd;
+ u32 status;
+ };
+} __attribute__((aligned(8)));
+#define QM_FD_DD_NULL 0x00
+#define QM_FD_PID_MASK 0x3f
+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
+{
+ return fd->addr;
+}
+
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+ return (dma_addr_t)fd->addr;
+}
+
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_fd_addr_set64(fd, v) \
+ do { \
+ struct qm_fd *__fd931 = (fd); \
+ __fd931->addr = v; \
+ } while (0)
+
+/* Scatter/Gather table entry */
+struct qm_sg_entry {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1[3];
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#else
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u8 __reserved1[3];
+#endif
+ };
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u64 __notaddress:24;
+ u64 addr:40;
+#else
+ u64 addr:40;
+ u64 __notaddress:24;
+#endif
+ };
+ u64 opaque;
+ };
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 extension:1; /* Extension bit */
+ u32 final:1; /* Final bit */
+ u32 length:30;
+#else
+ u32 length:30;
+ u32 final:1; /* Final bit */
+ u32 extension:1; /* Extension bit */
+#endif
+ };
+ u32 val;
+ };
+ u8 __reserved2;
+ u8 bpid;
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved3:3;
+ u16 offset:13;
+#else
+ u16 offset:13;
+ u16 __reserved3:3;
+#endif
+ };
+ u16 val_off;
+ };
+} __packed;
+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
+{
+ return sg->addr;
+}
+
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+ return (dma_addr_t)sg->addr;
+}
+
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_sg_entry_set64(sg, v) \
+ do { \
+ struct qm_sg_entry *__sg931 = (sg); \
+ __sg931->addr = v; \
+ } while (0)
+
+/* See 1.5.8.1: "Enqueue Command" */
+struct __rte_aligned(8) qm_eqcr_entry {
+ u8 __dont_write_directly__verb;
+ u8 dca;
+ u16 seqnum;
+ u32 orp; /* 24-bit */
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd; /* this has alignment 8 */
+ u8 __reserved3[32];
+} __packed;
+
+
+/* "Frame Dequeue Response" */
+struct __rte_aligned(8) qm_dqrr_entry {
+ u8 verb;
+ u8 stat;
+ u16 seqnum; /* 15-bit */
+ u8 tok;
+ u8 __reserved2[3];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ struct qm_fd fd; /* this has alignment 8 */
+ u8 __reserved4[32];
+};
+
+#define QM_DQRR_VERB_VBIT 0x80
+#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
+#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
+#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
+#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
+#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
+#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
+
+
+/* "ERN Message Response" */
+/* "FQ State Change Notification" */
+struct qm_mr_entry {
+ union {
+ struct {
+ u8 verb;
+ u8 dca;
+ u16 seqnum;
+ u8 rc; /* Rejection Code */
+ u32 orp:24;
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) ern;
+ struct {
+ u8 verb;
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
+ u8 __reserved1:4;
+ enum qm_dc_portal portal:2;
+#else
+ enum qm_dc_portal portal:3;
+ u8 __reserved1:3;
+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
+#endif
+ u16 __reserved2;
+ u8 rc; /* Rejection Code */
+ u32 __reserved3:24;
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) dcern;
+ struct {
+ u8 verb;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[6];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ u8 __reserved2[16];
+ } __packed __rte_aligned(8) fq; /* FQRN/FQRNI/FQRL/FQPN */
+ };
+ u8 __reserved2[32];
+} __packed __rte_aligned(8);
+#define QM_MR_VERB_VBIT 0x80
+/*
+ * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
+ * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
+ * from the other MR types by noting if the 0x20 bit is unset.
+ */
+#define QM_MR_VERB_TYPE_MASK 0x27
+#define QM_MR_VERB_DC_ERN 0x20
+#define QM_MR_VERB_FQRN 0x21
+#define QM_MR_VERB_FQRNI 0x22
+#define QM_MR_VERB_FQRL 0x23
+#define QM_MR_VERB_FQPN 0x24
+#define QM_MR_RC_MASK 0xf0 /* contains one of; */
+#define QM_MR_RC_CGR_TAILDROP 0x00
+#define QM_MR_RC_WRED 0x10
+#define QM_MR_RC_ERROR 0x20
+#define QM_MR_RC_ORPWINDOW_EARLY 0x30
+#define QM_MR_RC_ORPWINDOW_LATE 0x40
+#define QM_MR_RC_FQ_TAILDROP 0x50
+#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
+#define QM_MR_RC_ORP_ZERO 0x70
+#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+#define QM_MR_DCERN_COLOUR_GREEN 0x00
+#define QM_MR_DCERN_COLOUR_YELLOW 0x01
+#define QM_MR_DCERN_COLOUR_RED 0x02
+#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
+/*
+ * An identical structure of FQD fields is present in the "Init FQ" command and
+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
+ * latter has two inlines to assist with converting to/from the mant+exp
+ * representation.
+ */
+struct qm_fqd_stashing {
+ /* See QM_STASHING_EXCL_<...> */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 exclusive;
+ u8 __reserved1:2;
+ /* Numbers of cachelines */
+ u8 annotation_cl:2;
+ u8 data_cl:2;
+ u8 context_cl:2;
+#else
+ u8 context_cl:2;
+ u8 data_cl:2;
+ u8 annotation_cl:2;
+ u8 __reserved1:2;
+ u8 exclusive;
+#endif
+} __packed;
+struct qm_fqd_taildrop {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved1:3;
+ u16 mant:8;
+ u16 exp:5;
+#else
+ u16 exp:5;
+ u16 mant:8;
+ u16 __reserved1:3;
+#endif
+} __packed;
+struct qm_fqd_oac {
+ /* "Overhead Accounting Control", see QM_OAC_<...> */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 oac:2; /* "Overhead Accounting Control" */
+ u8 __reserved1:6;
+#else
+ u8 __reserved1:6;
+ u8 oac:2; /* "Overhead Accounting Control" */
+#endif
+ /* Two's-complement value (-128 to +127) */
+ signed char oal; /* "Overhead Accounting Length" */
+} __packed;
+struct qm_fqd {
+ union {
+ u8 orpc;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1:2;
+ u8 orprws:3;
+ u8 oa:1;
+ u8 olws:2;
+#else
+ u8 olws:2;
+ u8 oa:1;
+ u8 orprws:3;
+ u8 __reserved1:2;
+#endif
+ } __packed;
+ };
+ u8 cgid;
+ u16 fq_ctrl; /* See QM_FQCTRL_<...> */
+ union {
+ u16 dest_wq;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 channel:13; /* qm_channel */
+ u16 wq:3;
+#else
+ u16 wq:3;
+ u16 channel:13; /* qm_channel */
+#endif
+ } __packed dest;
+ };
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved2:1;
+ u16 ics_cred:15;
+#else
+ u16 __reserved2:1;
+ u16 ics_cred:15;
+#endif
+ /*
+ * For "Initialize Frame Queue" commands, the write-enable mask
+ * determines whether 'td' or 'oac_init' is observed. For query
+ * commands, this field is always 'td', and 'oac_query' (below) reflects
+ * the Overhead ACcounting values.
+ */
+ union {
+ uint16_t opaque_td;
+ struct qm_fqd_taildrop td;
+ struct qm_fqd_oac oac_init;
+ };
+ u32 context_b;
+ union {
+ /* Treat it as 64-bit opaque */
+ u64 opaque;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 hi;
+ u32 lo;
+#else
+ u32 lo;
+ u32 hi;
+#endif
+ };
+ /* Treat it as s/w portal stashing config */
+ /* see "FQD Context_A field used for [...]" */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ struct qm_fqd_stashing stashing;
+ /*
+ * 48-bit address of FQ context to
+ * stash, must be cacheline-aligned
+ */
+ u16 context_hi;
+ u32 context_lo;
+#else
+ u32 context_lo;
+ u16 context_hi;
+ struct qm_fqd_stashing stashing;
+#endif
+ } __packed;
+ } context_a;
+ struct qm_fqd_oac oac_query;
+} __packed;
+/* 64-bit converters for context_hi/lo */
+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
+{
+ return ((u64)fqd->context_a.context_hi << 32) |
+ (u64)fqd->context_a.context_lo;
+}
+
+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
+{
+ return (dma_addr_t)qm_fqd_stashing_get64(fqd);
+}
+
+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
+{
+ return ((u64)fqd->context_a.hi << 32) |
+ (u64)fqd->context_a.lo;
+}
+
+static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.context_hi = upper_32_bits(addr);
+ fqd->context_a.context_lo = lower_32_bits(addr);
+}
+
+static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.hi = upper_32_bits(addr);
+ fqd->context_a.lo = lower_32_bits(addr);
+}
+
+/* convert a threshold value into mant+exp representation */
+static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
+ int roundup)
+{
+ u32 e = 0;
+ int oddbit = 0;
+
+ if (val > 0xe0000000)
+ return -ERANGE;
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ td->exp = e;
+ td->mant = val;
+ return 0;
+}
+
+/* and the other direction */
+static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
+{
+ return (u32)td->mant << td->exp;
+}
+
+
+/* See "Frame Queue Descriptor (FQD)" */
+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
+#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
+#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
+#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
+#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
+#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
+#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
+#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
+#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
+#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
+#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
+#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
+
+/* See "FQD Context_A field used for [...] */
+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
+#define QM_STASHING_EXCL_ANNOTATION 0x04
+#define QM_STASHING_EXCL_DATA 0x02
+#define QM_STASHING_EXCL_CTX 0x01
+
+/* See "Intra Class Scheduling" */
+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
+#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
+#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
+
+/*
+ * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+ * and associated commands/responses. The WRED parameters are calculated from
+ * these fields as follows;
+ * MaxTH = MA * (2 ^ Mn)
+ * Slope = SA / (2 ^ Sn)
+ * MaxP = 4 * (Pn + 1)
+ */
+struct qm_cgr_wr_parm {
+ union {
+ u32 word;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 MA:8;
+ u32 Mn:5;
+ u32 SA:7; /* must be between 64-127 */
+ u32 Sn:6;
+ u32 Pn:6;
+#else
+ u32 Pn:6;
+ u32 Sn:6;
+ u32 SA:7; /* must be between 64-127 */
+ u32 Mn:5;
+ u32 MA:8;
+#endif
+ } __packed;
+ };
+} __packed;
+/*
+ * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ * management commands, this is padded to a 16-bit structure field, so that's
+ * how we represent it here. The congestion state threshold is calculated from
+ * these fields as follows;
+ * CS threshold = TA * (2 ^ Tn)
+ */
+struct qm_cgr_cs_thres {
+ union {
+ u16 hword;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved:3;
+ u16 TA:8;
+ u16 Tn:5;
+#else
+ u16 Tn:5;
+ u16 TA:8;
+ u16 __reserved:3;
+#endif
+ } __packed;
+ };
+} __packed;
+/*
+ * This identical structure of CGR fields is present in the "Init/Modify CGR"
+ * commands and the "Query CGR" result. It's suctioned out here into its own
+ * struct.
+ */
+struct __qm_mc_cgr {
+ struct qm_cgr_wr_parm wr_parm_g;
+ struct qm_cgr_wr_parm wr_parm_y;
+ struct qm_cgr_wr_parm wr_parm_r;
+ u8 wr_en_g; /* boolean, use QM_CGR_EN */
+ u8 wr_en_y; /* boolean, use QM_CGR_EN */
+ u8 wr_en_r; /* boolean, use QM_CGR_EN */
+ u8 cscn_en; /* boolean, use QM_CGR_EN */
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+#else
+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+#endif
+ };
+ u32 cscn_targ; /* use QM_CGR_TARG_* */
+ };
+ u8 cstd_en; /* boolean, use QM_CGR_EN */
+ u8 cs; /* boolean, only used in query response */
+ union {
+ struct qm_cgr_cs_thres cs_thres;
+ /* use qm_cgr_cs_thres_set64() */
+ u16 __cs_thres;
+ };
+ u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
+} __packed;
+#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
+#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
+#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
+#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
+#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
+/* Convert CGR thresholds to/from "cs_thres" format */
+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
+{
+ return (u64)th->TA << th->Tn;
+}
+
+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
+ int roundup)
+{
+ u32 e = 0;
+ int oddbit = 0;
+
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ th->Tn = e;
+ th->TA = val;
+ return 0;
+}
+
+/* See 1.5.8.5.1: "Initialize FQ" */
+/* See 1.5.8.5.2: "Query FQ" */
+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
+/* See 1.5.8.5.4: "Alter FQ State Commands " */
+/* See 1.5.8.6.1: "Initialize/Modify CGR" */
+/* See 1.5.8.6.2: "CGR Test Write" */
+/* See 1.5.8.6.3: "Query CGR" */
+/* See 1.5.8.6.4: "Query Congestion Group State" */
+struct qm_mcc_initfq {
+ u8 __reserved1;
+ u16 we_mask; /* Write Enable Mask */
+ u32 fqid; /* 24-bit */
+ u16 count; /* Initialises 'count+1' FQDs */
+ struct qm_fqd fqd; /* the FQD fields go here */
+ u8 __reserved3[30];
+} __packed;
+struct qm_mcc_queryfq {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+struct qm_mcc_queryfq_np {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+struct qm_mcc_alterfq {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2;
+ u8 count; /* number of consecutive FQID */
+ u8 __reserved3[10];
+ u32 context_b; /* frame queue context b */
+ u8 __reserved4[40];
+} __packed;
+struct qm_mcc_initcgr {
+ u8 __reserved1;
+ u16 we_mask; /* Write Enable Mask */
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[2];
+ u8 cgid;
+ u8 __reserved4[32];
+} __packed;
+struct qm_mcc_cgrtestwrite {
+ u8 __reserved1[2];
+ u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved2[23];
+ u8 cgid;
+ u8 __reserved3[32];
+} __packed;
+struct qm_mcc_querycgr {
+ u8 __reserved1[30];
+ u8 cgid;
+ u8 __reserved2[32];
+} __packed;
+struct qm_mcc_querycongestion {
+ u8 __reserved[63];
+} __packed;
+struct qm_mcc_querywq {
+ u8 __reserved;
+ /* select channel if verb != QUERYWQ_DEDICATED */
+ union {
+ u16 channel_wq; /* ignores wq (3 lsbits) */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 id:13; /* qm_channel */
+ u16 __reserved1:3;
+#else
+ u16 __reserved1:3;
+ u16 id:13; /* qm_channel */
+#endif
+ } __packed channel;
+ };
+ u8 __reserved2[60];
+} __packed;
+
+struct qm_mc_command {
+ u8 __dont_write_directly__verb;
+ union {
+ struct qm_mcc_initfq initfq;
+ struct qm_mcc_queryfq queryfq;
+ struct qm_mcc_queryfq_np queryfq_np;
+ struct qm_mcc_alterfq alterfq;
+ struct qm_mcc_initcgr initcgr;
+ struct qm_mcc_cgrtestwrite cgrtestwrite;
+ struct qm_mcc_querycgr querycgr;
+ struct qm_mcc_querycongestion querycongestion;
+ struct qm_mcc_querywq querywq;
+ };
+} __packed;
+
+/* INITFQ-specific flags */
+#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
+#define QM_INITFQ_WE_OAC 0x0100
+#define QM_INITFQ_WE_ORPC 0x0080
+#define QM_INITFQ_WE_CGID 0x0040
+#define QM_INITFQ_WE_FQCTRL 0x0020
+#define QM_INITFQ_WE_DESTWQ 0x0010
+#define QM_INITFQ_WE_ICSCRED 0x0008
+#define QM_INITFQ_WE_TDTHRESH 0x0004
+#define QM_INITFQ_WE_CONTEXTB 0x0002
+#define QM_INITFQ_WE_CONTEXTA 0x0001
+/* INITCGR/MODIFYCGR-specific flags */
+#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
+#define QM_CGR_WE_WR_PARM_G 0x0400
+#define QM_CGR_WE_WR_PARM_Y 0x0200
+#define QM_CGR_WE_WR_PARM_R 0x0100
+#define QM_CGR_WE_WR_EN_G 0x0080
+#define QM_CGR_WE_WR_EN_Y 0x0040
+#define QM_CGR_WE_WR_EN_R 0x0020
+#define QM_CGR_WE_CSCN_EN 0x0010
+#define QM_CGR_WE_CSCN_TARG 0x0008
+#define QM_CGR_WE_CSTD_EN 0x0004
+#define QM_CGR_WE_CS_THRES 0x0002
+#define QM_CGR_WE_MODE 0x0001
+
+struct qm_mcr_initfq {
+ u8 __reserved1[62];
+} __packed;
+struct qm_mcr_queryfq {
+ u8 __reserved1[8];
+ struct qm_fqd fqd; /* the FQD fields are here */
+ u8 __reserved2[30];
+} __packed;
+struct qm_mcr_queryfq_np {
+ u8 __reserved1;
+ u8 state; /* QM_MCR_NP_STATE_*** */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved2;
+ u32 fqd_link:24;
+ u16 __reserved3:2;
+ u16 odp_seq:14;
+ u16 __reserved4:2;
+ u16 orp_nesn:14;
+ u16 __reserved5:1;
+ u16 orp_ea_hseq:15;
+ u16 __reserved6:1;
+ u16 orp_ea_tseq:15;
+ u8 __reserved7;
+ u32 orp_ea_hptr:24;
+ u8 __reserved8;
+ u32 orp_ea_tptr:24;
+ u8 __reserved9;
+ u32 pfdr_hptr:24;
+ u8 __reserved10;
+ u32 pfdr_tptr:24;
+ u8 __reserved11[5];
+ u8 __reserved12:7;
+ u8 is:1;
+ u16 ics_surp;
+ u32 byte_cnt;
+ u8 __reserved13;
+ u32 frm_cnt:24;
+ u32 __reserved14;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved15;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+#else
+ u8 __reserved2;
+ u32 fqd_link:24;
+
+ u16 odp_seq:14;
+ u16 __reserved3:2;
+
+ u16 orp_nesn:14;
+ u16 __reserved4:2;
+
+ u16 orp_ea_hseq:15;
+ u16 __reserved5:1;
+
+ u16 orp_ea_tseq:15;
+ u16 __reserved6:1;
+
+ u8 __reserved7;
+ u32 orp_ea_hptr:24;
+
+ u8 __reserved8;
+ u32 orp_ea_tptr:24;
+
+ u8 __reserved9;
+ u32 pfdr_hptr:24;
+
+ u8 __reserved10;
+ u32 pfdr_tptr:24;
+
+ u8 __reserved11[5];
+ u8 is:1;
+ u8 __reserved12:7;
+ u16 ics_surp;
+ u32 byte_cnt;
+ u8 __reserved13;
+ u32 frm_cnt:24;
+ u32 __reserved14;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved15;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+#endif
+} __packed;
+
+struct qm_mcr_alterfq {
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[61];
+} __packed;
+struct qm_mcr_initcgr {
+ u8 __reserved1[62];
+} __packed;
+struct qm_mcr_cgrtestwrite {
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[3];
+ u32 __reserved3:24;
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 __reserved4:24;
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u16 lgt; /* Last Group Tick */
+ u16 wr_prob_g;
+ u16 wr_prob_y;
+ u16 wr_prob_r;
+ u8 __reserved5[8];
+} __packed;
+struct qm_mcr_querycgr {
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[3];
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved3:24;
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+#else
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 __reserved3:24;
+#endif
+ };
+ u64 i_bcnt;
+ };
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved4:24;
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+#else
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 __reserved4:24;
+#endif
+ };
+ u64 a_bcnt;
+ };
+ union {
+ u32 cscn_targ_swp[4];
+ u8 __reserved5[16];
+ };
+} __packed;
+
+struct __qm_mcr_querycongestion {
+ u32 state[8];
+};
+
+struct qm_mcr_querycongestion {
+ u8 __reserved[30];
+ /* Access this struct using QM_MCR_QUERYCONGESTION() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+struct qm_mcr_querywq {
+ union {
+ u16 channel_wq; /* ignores wq (3 lsbits) */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 id:13; /* qm_channel */
+ u16 __reserved:3;
+#else
+ u16 __reserved:3;
+ u16 id:13; /* qm_channel */
+#endif
+ } __packed channel;
+ };
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+struct qm_mc_result {
+ u8 verb;
+ u8 result;
+ union {
+ struct qm_mcr_initfq initfq;
+ struct qm_mcr_queryfq queryfq;
+ struct qm_mcr_queryfq_np queryfq_np;
+ struct qm_mcr_alterfq alterfq;
+ struct qm_mcr_initcgr initcgr;
+ struct qm_mcr_cgrtestwrite cgrtestwrite;
+ struct qm_mcr_querycgr querycgr;
+ struct qm_mcr_querycongestion querycongestion;
+ struct qm_mcr_querywq querywq;
+ };
+} __packed;
+
+#define QM_MCR_VERB_RRID 0x80
+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL 0x00
+#define QM_MCR_RESULT_OK 0xf0
+#define QM_MCR_RESULT_ERR_FQID 0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
+#define QM_MCR_RESULT_PENDING 0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
+#define QM_MCR_NP_STATE_FE 0x10
+#define QM_MCR_NP_STATE_R 0x08
+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS 0x00
+#define QM_MCR_NP_STATE_RETIRED 0x01
+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
+#define QM_MCR_NP_STATE_PARKED 0x04
+#define QM_MCR_NP_STATE_ACTIVE 0x05
+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+/* This extracts the state for congestion group 'n' from a query response.
+ * Eg.
+ * u8 cgr = [...];
+ * struct qm_mc_result *res = [...];
+ * printf("congestion group %d congestion state: %d\n", cgr,
+ * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
+ */
+#define __CGR_WORD(num) (num >> 5)
+#define __CGR_SHIFT(num) (num & 0x1f)
+#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
+ u8 cgr)
+{
+ return p->state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
+}
+
+ /* Portal and Frame Queues */
+/* Represents a managed portal */
+struct qman_portal;
+
+/*
+ * This object type represents QMan frame queue descriptors (FQD), it is
+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
+ * defined further down.
+ */
+struct qman_fq;
+
+/*
+ * This object type represents a QMan congestion group, it is defined further
+ * down.
+ */
+struct qman_cgr;
+
+/*
+ * This enum, and the callback type that returns it, are used when handling
+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
+ * portal object (for handling dequeues that do not demux because context_b is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume.
+ */
+enum qman_cb_dqrr_result {
+ /* DQRR entry can be consumed */
+ qman_cb_dqrr_consume,
+ /* Like _consume, but requests parking - FQ must be held-active */
+ qman_cb_dqrr_park,
+ /* Does not consume, for DCA mode only. This allows out-of-order
+ * consumes by explicit calls to qman_dca() and/or the use of implicit
+ * DCA via EQCR entries.
+ */
+ qman_cb_dqrr_defer,
+ /*
+ * Stop processing without consuming this ring entry. Exits the current
+ * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
+ * an interrupt handler, the callback would typically call
+ * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
+ * otherwise the interrupt will reassert immediately.
+ */
+ qman_cb_dqrr_stop,
+ /* Like qman_cb_dqrr_stop, but consumes the current entry. */
+ qman_cb_dqrr_consume_stop
+};
+
+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr);
+
+typedef enum qman_cb_dqrr_result (*qman_dpdk_cb_dqrr)(void *event,
+ struct qman_portal *qm,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bd);
+
+/* This callback type is used when handling buffers in dpdk pull mode */
+typedef void (*qman_dpdk_pull_cb_dqrr)(struct qman_fq **fq,
+ struct qm_dqrr_entry **dqrr,
+ void **bufs,
+ int num_bufs);
+
+typedef void (*qman_dpdk_cb_prepare)(struct qm_dqrr_entry *dq, void **bufs);
+
+/*
+ * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns.
+ */
+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
+ const struct qm_mr_entry *msg);
+
+/* This callback type is used when handling DCP ERNs */
+typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
+ const struct qm_mr_entry *msg);
+/*
+ * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+ * held-active + held-suspended are just "sched". Things like "retired" will not
+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
+ * then, to indicate it's completing and to gate attempts to retry the retire
+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
+ * index rather than the FQ that ring entry corresponds to), so repeated park
+ * commands are allowed (if you're silly enough to try) but won't change FQ
+ * state, and the resulting park notifications move FQs from "sched" to
+ * "parked".
+ */
+enum qman_fq_state {
+ qman_fq_state_oos,
+ qman_fq_state_parked,
+ qman_fq_state_sched,
+ qman_fq_state_retired
+};
+
+
+/*
+ * Frame queue objects (struct qman_fq) are stored within memory passed to
+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
+ * they should;
+ *
+ * (a) extend the qman_fq structure with their state; eg.
+ *
+ * // myfq is allocated and driver_fq callbacks filled in;
+ * struct my_fq {
+ * struct qman_fq base;
+ * int an_extra_field;
+ * [ ... add other fields to be associated with each FQ ...]
+ * } *myfq = some_my_fq_allocator();
+ * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
+ *
+ * // in a dequeue callback, access extra fields from 'fq' via a cast;
+ * struct my_fq *myfq = (struct my_fq *)fq;
+ * do_something_with(myfq->an_extra_field);
+ * [...]
+ *
+ * (b) when and if configuring the FQ for context stashing, specify how ever
+ * many cachelines are required to stash 'struct my_fq', to accelerate not
+ * only the QMan driver but the callback as well.
+ */
+
+struct qman_fq_cb {
+ union { /* for dequeued frames */
+ qman_dpdk_cb_dqrr dqrr_dpdk_cb;
+ qman_dpdk_pull_cb_dqrr dqrr_dpdk_pull_cb;
+ qman_cb_dqrr dqrr;
+ };
+ qman_dpdk_cb_prepare dqrr_prepare;
+ qman_cb_mr ern; /* for s/w ERNs */
+ qman_cb_mr fqs; /* frame-queue state changes*/
+};
+
+struct qman_fq {
+ /* Caller of qman_create_fq() provides these demux callbacks */
+ struct qman_fq_cb cb;
+
+ u32 fqid_le;
+ u16 ch_id;
+ u8 cgr_groupid;
+ u8 is_static;
+
+ /* DPDK Interface */
+ void *dpaa_intf;
+
+ struct rte_event ev;
+ /* affined portal in case of static queue */
+ struct qman_portal *qp;
+
+ volatile unsigned long flags;
+
+ enum qman_fq_state state;
+ u32 fqid;
+ spinlock_t fqlock;
+
+ struct rb_node node;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ u32 key;
+#endif
+};
+
+/*
+ * This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
+ */
+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
+ struct qman_cgr *cgr, int congested);
+
+struct qman_cgr {
+ /* Set these prior to qman_create_cgr() */
+ u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
+ qman_cb_cgr cb;
+ /* These are private to the driver */
+ u16 chan; /* portal channel this object is created on */
+ struct list_head node;
+};
+
+/* Flags to qman_create_fq() */
+#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
+#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
+#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
+#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */
+#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */
+#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
+
+/* Flags to qman_destroy_fq() */
+#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */
+
+/* Flags from qman_fq_state() */
+#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
+
+/* Flags to qman_init_fq() */
+#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
+#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
+
+/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
+ * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
+ * any change here should be audited in PME.)
+ */
+#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */
+#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */
+#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */
+#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \
+ (((u32)(p) << 2) & 0x00000f00)
+#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */
+#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008
+#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010
+#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
+/* For the ORP-specific qman_enqueue_orp() variant;
+ * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
+ * of a frame.
+ */
+#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000
+/* - this flag performs no enqueue but fills in an ORP sequence number that
+ * would otherwise block it (eg. if a frame has been dropped).
+ */
+#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000
+/* - this flag performs no enqueue but advances NESN to the given sequence
+ * number.
+ */
+#define QMAN_ENQUEUE_FLAG_NESN 0x04000000
+
+/* Flags to qman_modify_cgr() */
+#define QMAN_CGR_FLAG_USE_INIT 0x00000001
+#define QMAN_CGR_MODE_FRAME 0x00000001
+
+/**
+ * qman_get_portal_index - get portal configuration index
+ */
+int qman_get_portal_index(void);
+
+u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
+ void **bufs);
+
+/**
+ * qman_affine_channel - return the channel ID of an portal
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
+ * bug to call this function for any value of @cpu (other than -1) that is not a
+ * member of the cpu mask.
+ */
+u16 qman_affine_channel(int cpu);
+
+unsigned int qman_portal_poll_rx(unsigned int poll_limit,
+ void **bufs, struct qman_portal *q);
+
+/**
+ * qman_set_vdq - Issue a volatile dequeue command
+ * @fq: Frame Queue on which the volatile dequeue command is issued
+ * @num: Number of Frames requested for volatile dequeue
+ * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command
+ *
+ * This function will issue a volatile dequeue command to the QMAN.
+ */
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);
+
+/**
+ * qman_dequeue - Get the DQRR entry after volatile dequeue command
+ * @fq: Frame Queue on which the volatile dequeue command is issued
+ *
+ * This function will return the DQRR entry after a volatile dequeue command
+ * is issued. It will keep returning NULL until there is no packet available on
+ * the DQRR.
+ */
+struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
+
+/**
+ * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue
+ * @fq: Frame Queue on which the volatile dequeue command is issued
+ * @dq: DQRR entry to consume. This is the one which is provided by the
+ * 'qbman_dequeue' command.
+ *
+ * This will consume the DQRR enrey and make it available for next volatile
+ * dequeue.
+ */
+void qman_dqrr_consume(struct qman_fq *fq,
+ struct qm_dqrr_entry *dq);
+
+/**
+ * qman_poll_dqrr - process DQRR (fast-path) entries
+ * @limit: the maximum number of DQRR entries to process
+ *
+ * Use of this function requires that DQRR processing not be interrupt-driven.
+ * Ie. the value returned by qman_irqsource_get() should not include
+ * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
+ * this function will return -EINVAL, otherwise the return value is >=0 and
+ * represents the number of DQRR entries processed.
+ */
+int qman_poll_dqrr(unsigned int limit);
+
+/**
+ * qman_poll
+ *
+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
+ * affine portal. There are two classes of portal processing in question;
+ * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
+ * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
+ * thresholds, congestion state changes, etc). This function does whatever
+ * processing is not triggered by interrupts.
+ *
+ * Note, if DQRR and some slow-path processing are poll-driven (rather than
+ * interrupt-driven) then this function uses a heuristic to determine how often
+ * to run slow-path processing - as slow-path processing introduces at least a
+ * minimum latency each time it is run, whereas fast-path (DQRR) processing is
+ * close to zero-cost if there is no work to be done.
+ */
+void qman_poll(void);
+
+/**
+ * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
+ *
+ * Disables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_stop_dequeues(void);
+
+/**
+ * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
+ *
+ * Enables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_start_dequeues(void);
+
+/**
+ * qman_static_dequeue_add - Add pool channels to the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Adds a set of pool channels to the portal's static dequeue command register
+ * (SDQCR). The requested pools are limited to those the portal has dequeue
+ * access to.
+ */
+void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
+
+/**
+ * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Removes a set of pool channels from the portal's static dequeue command
+ * register (SDQCR). The requested pools are limited to those the portal has
+ * dequeue access to.
+ */
+void qman_static_dequeue_del(u32 pools, struct qman_portal *qp);
+
+/**
+ * qman_static_dequeue_get - return the portal's current SDQCR
+ *
+ * Returns the portal's current static dequeue command register (SDQCR). The
+ * entire register is returned, so if only the currently-enabled pool channels
+ * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
+ */
+u32 qman_static_dequeue_get(struct qman_portal *qp);
+
+/**
+ * qman_dca - Perform a Discrete Consumption Acknowledgment
+ * @dq: the DQRR entry to be consumed
+ * @park_request: indicates whether the held-active @fq should be parked
+ *
+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
+ * does not take a 'portal' argument but implies the core affine portal from the
+ * cpu that is currently executing the function. For reasons of locking, this
+ * function must be called from the same CPU as that which processed the DQRR
+ * entry in the first place.
+ */
+void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
+
+/**
+ * qman_dca_index - Perform a Discrete Consumption Acknowledgment
+ * @index: the DQRR index to be consumed
+ * @park_request: indicates whether the held-active @fq should be parked
+ *
+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
+ * does not take a 'portal' argument but implies the core affine portal from the
+ * cpu that is currently executing the function. For reasons of locking, this
+ * function must be called from the same CPU as that which processed the DQRR
+ * entry in the first place.
+ */
+void qman_dca_index(u8 index, int park_request);
+
+/**
+ * qman_eqcr_is_empty - Determine if portal's EQCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * enqueues for the local portal have been processed by Qman but can't use the
+ * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
+ * The function forces tracking of EQCR consumption (which normally doesn't
+ * happen until enqueue processing needs to find space to put new enqueue
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int qman_eqcr_is_empty(void);
+
+/**
+ * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
+ * @handler: callback for processing DCP ERNs
+ * @affine: whether this handler is specific to the locally affine portal
+ *
+ * If a hardware block's interface to Qman (ie. its direct-connect portal, or
+ * DCP) is configured not to receive enqueue rejections, then any enqueues
+ * through that DCP that are rejected will be sent to a given software portal.
+ * If @affine is non-zero, then this handler will only be used for DCP ERNs
+ * received on the portal affine to the current CPU. If multiple CPUs share a
+ * portal and they all call this function, they will be setting the handler for
+ * the same portal! If @affine is zero, then this handler will be global to all
+ * portals handled by this instance of the driver. Only those portals that do
+ * not have their own affine handler will use the global handler.
+ */
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
+
+ /* FQ management */
+ /* ------------- */
+/**
+ * qman_create_fq - Allocates a FQ
+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
+ * @fq: memory for storing the 'fq', with callbacks filled in
+ *
+ * Creates a frame queue object for the given @fqid, unless the
+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
+ * dynamically allocated (or the function fails if none are available). Once
+ * created, the caller should not touch the memory at 'fq' except as extended to
+ * adjacent memory for user-defined fields (see the definition of "struct
+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
+ * causes the driver to honour any contextB modifications requested in the
+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
+ * software portals, the contextB field is controlled by the driver and can't be
+ * modified by the caller. If the AS_IS flag is specified, management commands
+ * will be used on portal @p to query state for frame queue @fqid and construct
+ * a frame queue object based on that, rather than assuming/requiring that it be
+ * Out of Service.
+ */
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
+
+/**
+ * qman_destroy_fq - Deallocates a FQ
+ * @fq: the frame queue object to release
+ * @flags: bit-mask of QMAN_FQ_FREE_*** options
+ *
+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
+ * not deallocated but the caller regains ownership, to do with as desired. The
+ * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
+ * is specified, in which case it may also be in the 'parked' state.
+ */
+void qman_destroy_fq(struct qman_fq *fq, u32 flags);
+
+/**
+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
+ * @fq: the frame queue object to query
+ */
+u32 qman_fq_fqid(struct qman_fq *fq);
+
+/**
+ * qman_fq_state - Queries the state of a FQ object
+ * @fq: the frame queue object to query
+ * @state: pointer to state enum to return the FQ scheduling state
+ * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
+ *
+ * Queries the state of the FQ object, without performing any h/w commands.
+ * This captures the state, as seen by the driver, at the time the function
+ * executes.
+ */
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
+
+/**
+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
+ * @fq: the frame queue object to modify, must be 'parked' or new.
+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
+ * @opts: the FQ-modification settings, as defined in the low-level API
+ *
+ * The @opts parameter comes from the low-level portal API. Select
+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
+ * rather than parked. NB, @opts can be NULL.
+ *
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver;
+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
+ * affects one frame queue: @fq).
+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
+ * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
+ * initialised to a value used by the driver for demux.
+ * - if context_b is initialised for demux, so is context_a in case stashing
+ * is requested (see item 4).
+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
+ * objects.)
+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
+ * 'dest::channel' field will be overwritten to match the portal used to issue
+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
+ * isn't set, the destination channel/workqueue fields and the write-enable bit
+ * are left as-is.
+ * 4. if the driver overwrites context_a/b for demux, then if
+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
+ * context_a.address fields and will leave the stashing fields provided by the
+ * user alone, otherwise it will zero out the context_a.stashing fields.
+ */
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
+
+/**
+ * qman_schedule_fq - Schedules a FQ
+ * @fq: the frame queue object to schedule, must be 'parked'
+ *
+ * Schedules the frame queue, which must be Parked, which takes it to
+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
+ */
+int qman_schedule_fq(struct qman_fq *fq);
+
+/**
+ * qman_retire_fq - Retires a FQ
+ * @fq: the frame queue object to retire
+ * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
+ *
+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
+ * the retirement was started asynchronously, otherwise it returns negative for
+ * failure. When this function returns zero, @flags is set to indicate whether
+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
+ * FQRN message shows up on the portal's message ring.
+ *
+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
+ * Active state), the completion will be via the message ring as a FQRN - but
+ * the corresponding callback may occur before this function returns!! Ie. the
+ * caller should be prepared to accept the callback as the function is called,
+ * not only once it has returned.
+ */
+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
+
+/**
+ * qman_oos_fq - Puts a FQ "out of service"
+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
+ *
+ * The frame queue must be retired and empty, and if any order restoration list
+ * was released as ERNs at the time of retirement, they must all be consumed.
+ */
+int qman_oos_fq(struct qman_fq *fq);
+
+/**
+ * qman_fq_flow_control - Set the XON/XOFF state of a FQ
+ * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
+ * or 'retired' or 'parked' state
+ * @xon: boolean to set fq in XON or XOFF state
+ *
+ * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
+ * otherwise the IFSI interrupt will be asserted.
+ */
+int qman_fq_flow_control(struct qman_fq *fq, int xon);
+
+/**
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/**
+ * qman_query_fq_has_pkts - Queries non-programmable FQD fields and returns '1'
+ * if packets are in the frame queue. If there are no packets on frame
+ * queue '0' is returned.
+ * @fq: the frame queue object to be queried
+ */
+int qman_query_fq_has_pkts(struct qman_fq *fq);
+
+/**
+ * qman_query_fq_np - Queries non-programmable FQD fields
+ * @fq: the frame queue object to be queried
+ * @np: storage for the queried FQD fields
+ */
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
+
+/**
+ * qman_query_fq_frmcnt - Queries fq frame count
+ * @fq: the frame queue object to be queried
+ * @frm_cnt: number of frames in the queue
+ */
+int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
+
+/**
+ * qman_query_wq - Queries work queue lengths
+ * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
+ * to this software portal. Otherwise, query length of WQs in a
+ * channel specified in wq.
+ * @wq: storage for the queried WQs lengths. Also specified the channel to
+ * to query if query_dedicated is zero.
+ */
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
+
+/**
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
+ * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
+ * "flags" retrieved from qman_fq_state().
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+/**
+ * qman_enqueue - Enqueue a frame to a frame queue
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ *
+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
+ * field is ignored. The return value is non-zero on error, such as ring full
+ * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
+ * specified), etc. If the ring is full and FLAG_WAIT is specified, this
+ * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
+ * interrupt will assert when Qman consumes the EQCR entry (subject to "status
+ * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
+ * perform an implied "discrete consumption acknowledgment" on the dequeue
+ * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
+ * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
+ * this implicit DCA can delay the release of a "held active" frame queue
+ * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
+ * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
+ * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
+ * acknowledgment should "park request" the "held active" frame queue. Ie.
+ * when the portal eventually releases that frame queue, it will be left in the
+ * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
+ * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
+ * is requested, and the FQ is a member of a congestion group, then this
+ * function returns -EAGAIN if the congestion group is currently congested.
+ * Note, this does not eliminate ERNs, as the async interface means we can be
+ * sending enqueue commands to an un-congested FQ that becomes congested before
+ * the enqueue commands are processed, but it does minimise needless thrashing
+ * of an already busy hardware resource by throttling many of the to-be-dropped
+ * enqueues "at the source".
+ */
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
+
+int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
+ int frames_to_send);
+
+/**
+ * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame
+ * queues.
+ * @fq[]: Array of frame queue objects to enqueue to
+ * @fd: pointer to first descriptor of frame to be enqueued
+ * @frames_to_send: number of frames to be sent.
+ *
+ * This API is similar to qman_enqueue_multi(), but it takes fd which needs
+ * to be processed by different frame queues.
+ */
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+ int frames_to_send);
+
+typedef int (*qman_cb_precommit) (void *arg);
+
+/**
+ * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ * @orp: the frame queue object used as an order restoration point.
+ * @orp_seqnum: the sequence number of this frame in the order restoration path
+ *
+ * Similar to qman_enqueue(), but with the addition of an Order Restoration
+ * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
+ * enqueue operation to employ order restoration. Each frame queue object acts
+ * as an Order Definition Point (ODP) by providing each frame dequeued from it
+ * with an incrementing sequence number, this value is generally ignored unless
+ * that sequence of dequeued frames will need order restoration later. Each
+ * frame queue object also encapsulates an Order Restoration Point (ORP), which
+ * is a re-assembly context for re-ordering frames relative to their sequence
+ * numbers as they are enqueued. The ORP does not have to be within the frame
+ * queue that receives the enqueued frame, in fact it is usually the frame
+ * queue from which the frames were originally dequeued. For the purposes of
+ * order restoration, multiple frames (or "fragments") can be enqueued for a
+ * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
+ * enqueues except the final fragment of a given sequence number. Ordering
+ * between sequence numbers is guaranteed, even if fragments of different
+ * sequence numbers are interlaced with one another. Fragments of the same
+ * sequence number will retain the order in which they are enqueued. If no
+ * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
+ * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
+ * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
+ * sequence number should become the ORP's "Next Expected Sequence Number".
+ *
+ * Side note: a frame queue object can be used purely as an ORP, without
+ * carrying any frames at all. Care should be taken not to deallocate a frame
+ * queue object that is being actively used as an ORP, as a future allocation
+ * of the frame queue object may start using the internal ORP before the
+ * previous use has finished.
+ */
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum);
+
+/**
+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
+ * @result: is set by the API to the base FQID of the allocated range
+ * @count: the number of FQIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count FQIDs
+ *
+ * Returns the number of frame queues allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * FQs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_fqid(u32 *result)
+{
+ int ret = qman_alloc_fqid_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_fqid_range - Release the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of FQIDs
+ * that it can subsequently allocate from.
+ */
+void qman_release_fqid_range(u32 fqid, unsigned int count);
+static inline void qman_release_fqid(u32 fqid)
+{
+ qman_release_fqid_range(fqid, 1);
+}
+
+void qman_seed_fqid_range(u32 fqid, unsigned int count);
+
+int qman_shutdown_fq(u32 fqid);
+
+/**
+ * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ */
+int qman_reserve_fqid_range(u32 fqid, unsigned int count);
+static inline int qman_reserve_fqid(u32 fqid)
+{
+ return qman_reserve_fqid_range(fqid, 1);
+}
+
+/* Pool-channel management */
+/**
+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
+ * @result: is set by the API to the base pool-channel ID of the allocated range
+ * @count: the number of pool-channel IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of pool-channel IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_pool(u32 *result)
+{
+ int ret = qman_alloc_pool_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_pool_range - Release the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to deallocate
+ * @count: the number of pool-channel IDs in the range
+ */
+void qman_release_pool_range(u32 id, unsigned int count);
+static inline void qman_release_pool(u32 id)
+{
+ qman_release_pool_range(id, 1);
+}
+
+/**
+ * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to reserve
+ * @count: the number of pool-channel IDs in the range
+ */
+int qman_reserve_pool_range(u32 id, unsigned int count);
+static inline int qman_reserve_pool(u32 id)
+{
+ return qman_reserve_pool_range(id, 1);
+}
+
+void qman_seed_pool_range(u32 id, unsigned int count);
+
+ /* CGR management */
+ /* -------------- */
+/**
+ * qman_create_cgr - Register a congestion group object
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: optional state of CGR settings
+ *
+ * Registers this object to receiving congestion entry/exit callbacks on the
+ * portal affine to the cpu portal on which this API is executed. If opts is
+ * NULL then only the callback (cgr->cb) function is registered. If @flags
+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
+ * any unspecified parameters) will be used rather than a modify hw hardware
+ * (which only modifies the specified parameters).
+ */
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @dcp_portal: the DCP portal to which the cgr object is registered.
+ * @opts: optional state of CGR settings
+ *
+ */
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_delete_cgr - Deregisters a congestion group object
+ * @cgr: the 'cgr' object to deregister
+ *
+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+ * is executed. This must be excuted on the same affine portal on which it was
+ * created.
+ */
+int qman_delete_cgr(struct qman_cgr *cgr);
+
+/**
+ * qman_modify_cgr - Modify CGR fields
+ * @cgr: the 'cgr' object to modify
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: the CGR-modification settings
+ *
+ * The @opts parameter comes from the low-level portal API, and can be NULL.
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver, in particular the 'cgrid' field is ignored (this operation
+ * only affects the given CGR object). If @flags contains
+ * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
+ * unspecified parameters) will be used rather than a modify hw hardware (which
+ * only modifies the specified parameters).
+ */
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_query_cgr - Queries CGR fields
+ * @cgr: the 'cgr' object to query
+ * @result: storage for the queried congestion group record
+ */
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
+
+/**
+ * qman_query_congestion - Queries the state of all congestion groups
+ * @congestion: storage for the queried state of all congestion groups
+ */
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
+
+/**
+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
+ * @result: is set by the API to the base CGR ID of the allocated range
+ * @count: the number of CGR IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of CGR IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_cgrid(u32 *result)
+{
+ int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_cgrid_range - Release the specified range of CGR IDs
+ * @id: the base CGR ID of the range to deallocate
+ * @count: the number of CGR IDs in the range
+ */
+void qman_release_cgrid_range(u32 id, unsigned int count);
+static inline void qman_release_cgrid(u32 id)
+{
+ qman_release_cgrid_range(id, 1);
+}
+
+/**
+ * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
+ * @id: the base CGR ID of the range to reserve
+ * @count: the number of CGR IDs in the range
+ */
+int qman_reserve_cgrid_range(u32 id, unsigned int count);
+static inline int qman_reserve_cgrid(u32 id)
+{
+ return qman_reserve_cgrid_range(id, 1);
+}
+
+void qman_seed_cgrid_range(u32 id, unsigned int count);
+
+ /* Helpers */
+ /* ------- */
+/**
+ * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
+ * @fqid: the FQID that will be initialised by other s/w
+ *
+ * In many situations, a FQID is provided for communication between s/w
+ * entities, and whilst the consumer is responsible for initialising and
+ * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
+ * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
+ * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
+ * However, data can not be enqueued to the FQ until it is initialised out of
+ * the OOS state - this function polls for that condition. It is particularly
+ * useful for users of IPC functions - each endpoint's Rx FQ is the other
+ * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
+ * and then use this API on the (NO_MODIFY) Tx FQ object in order to
+ * synchronise. The function returns zero for success, +1 if the FQ is still in
+ * the OOS state, or negative if there was an error.
+ */
+static inline int qman_poll_fq_for_init(struct qman_fq *fq)
+{
+ struct qm_mcr_queryfq_np np;
+ int err;
+
+ err = qman_query_fq_np(fq, &np);
+ if (err)
+ return err;
+ if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
+ return 1;
+ return 0;
+}
+
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define cpu_to_hw_sg(x)
+#define hw_sg_to_cpu(x)
+#else
+#define cpu_to_hw_sg(x) __cpu_to_hw_sg(x)
+#define hw_sg_to_cpu(x) __hw_sg_to_cpu(x)
+
+static inline void __cpu_to_hw_sg(struct qm_sg_entry *sgentry)
+{
+ sgentry->opaque = cpu_to_be64(sgentry->opaque);
+ sgentry->val = cpu_to_be32(sgentry->val);
+ sgentry->val_off = cpu_to_be16(sgentry->val_off);
+}
+
+static inline void __hw_sg_to_cpu(struct qm_sg_entry *sgentry)
+{
+ sgentry->opaque = be64_to_cpu(sgentry->opaque);
+ sgentry->val = be32_to_cpu(sgentry->val);
+ sgentry->val_off = be16_to_cpu(sgentry->val_off);
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_QMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_usd.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_usd.h
new file mode 100644
index 00000000..e1836175
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_usd.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __FSL_USD_H
+#define __FSL_USD_H
+
+#include <compat.h>
+#include <fsl_qman.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Thread-entry/exit hooks; */
+int qman_thread_init(void);
+int bman_thread_init(void);
+int qman_thread_finish(void);
+int bman_thread_finish(void);
+
+#define QBMAN_ANY_PORTAL_IDX 0xffffffff
+
+/* Obtain and free raw (unitialized) portals */
+
+struct dpaa_raw_portal {
+ /* inputs */
+
+ /* set to non zero to turn on stashing */
+ uint8_t enable_stash;
+ /* Stashing attributes for the portal */
+ uint32_t cpu;
+ uint32_t cache;
+ uint32_t window;
+
+ /* Specifies the stash request queue this portal should use */
+ uint8_t sdest;
+
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ * for don't care. The portal index will be populated by the
+ * driver when the ioctl() successfully completes.
+ */
+ uint32_t index;
+
+ /* outputs */
+ uint64_t cinh;
+ uint64_t cena;
+};
+
+int qman_allocate_raw_portal(struct dpaa_raw_portal *portal);
+int qman_free_raw_portal(struct dpaa_raw_portal *portal);
+
+int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);
+int bman_free_raw_portal(struct dpaa_raw_portal *portal);
+
+/* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt
+ * line before notifying us, and this post-processing re-enables it once
+ * processing is complete. As such, it is essential to call this before going
+ * into another blocking read/select/poll.
+ */
+void qman_thread_irq(void);
+void bman_thread_irq(void);
+
+/* Global setup */
+int qman_global_init(void);
+int bman_global_init(void);
+
+/* Direct portal create and destroy */
+struct qman_portal *fsl_qman_portal_create(void);
+int fsl_qman_portal_destroy(struct qman_portal *qp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_USD_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/netcfg.h b/src/spdk/dpdk/drivers/bus/dpaa/include/netcfg.h
new file mode 100644
index 00000000..7818de68
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/netcfg.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2012 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __NETCFG_H
+#define __NETCFG_H
+
+#include <fman.h>
+#include <argp.h>
+
+/* Configuration information related to a specific ethernet port */
+struct fm_eth_port_cfg {
+ /**< A list of PCD FQ ranges, obtained from FMC configuration */
+ struct list_head *list;
+ /**< The "Rx default" FQID, obtained from FMC configuration */
+ uint32_t rx_def;
+ /**< Other interface details are in the fman driver interface */
+ struct fman_if *fman_if;
+};
+
+struct netcfg_info {
+ uint8_t num_ethports;
+ /**< Number of ports */
+ struct fm_eth_port_cfg port_cfg[0];
+ /**< Variable structure array of size num_ethports */
+};
+
+struct interface_info {
+ char *name;
+ struct ether_addr mac_addr;
+ struct ether_addr peer_mac;
+ int mac_present;
+ int fman_enabled_mac_interface;
+};
+
+struct netcfg_interface {
+ uint8_t numof_netcfg_interface;
+ uint8_t numof_fman_enabled_macless;
+ struct interface_info interface_info[0];
+};
+
+/* pcd_file: FMC netpcd XML ("policy") file, that contains PCD information.
+ * cfg_file: FMC config XML file
+ * Returns the configuration information in newly allocated memory.
+ */
+struct netcfg_info *netcfg_acquire(void);
+
+/* cfg_ptr: configuration information pointer.
+ * Frees the resources allocated by the configuration layer.
+ */
+void netcfg_release(struct netcfg_info *cfg_ptr);
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+/* cfg_ptr: configuration information pointer.
+ * This function dumps configuration data to stdout.
+ */
+void dump_netcfg(struct netcfg_info *cfg_ptr);
+#endif
+
+#endif /* __NETCFG_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/of.h b/src/spdk/dpdk/drivers/bus/dpaa/include/of.h
new file mode 100644
index 00000000..7ea7608f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/of.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __OF_H
+#define __OF_H
+
+#include <compat.h>
+
+#ifndef OF_INIT_DEFAULT_PATH
+#define OF_INIT_DEFAULT_PATH "/proc/device-tree"
+#endif
+
+#define OF_DEFAULT_NA 1
+#define OF_DEFAULT_NS 1
+
+#define OF_FILE_BUF_MAX 256
+
+/**
+ * Layout of Device Tree:
+ * dt_dir
+ * |- dt_dir
+ * | |- dt_dir
+ * | | |- dt_dir
+ * | | | |- dt_file
+ * | | | ``- dt_file
+ * | | ``- dt_file
+ * | `-dt_file`
+ * ``- dt_file
+ *
+ * +------------------+
+ * |dt_dir |
+ * |+----------------+|
+ * ||dt_node ||
+ * ||+--------------+||
+ * |||device_node |||
+ * ||+--------------+||
+ * || list_dt_nodes ||
+ * |+----------------+|
+ * | list of subdir |
+ * | list of files |
+ * +------------------+
+ */
+
+/**
+ * Device description on of a device node in device tree.
+ */
+struct device_node {
+ char name[NAME_MAX];
+ char full_name[PATH_MAX];
+};
+
+/**
+ * List of device nodes available in a device tree layout
+ */
+struct dt_node {
+ struct device_node node; /**< Property of node */
+ int is_file; /**< FALSE==dir, TRUE==file */
+ struct list_head list; /**< Nodes within a parent subdir */
+};
+
+/**
+ * Types we use to represent directories and files
+ */
+struct dt_file;
+struct dt_dir {
+ struct dt_node node;
+ struct list_head subdirs;
+ struct list_head files;
+ struct list_head linear;
+ struct dt_dir *parent;
+ struct dt_file *compatible;
+ struct dt_file *status;
+ struct dt_file *lphandle;
+ struct dt_file *a_cells;
+ struct dt_file *s_cells;
+ struct dt_file *reg;
+};
+
+struct dt_file {
+ struct dt_node node;
+ struct dt_dir *parent;
+ ssize_t len;
+ uint64_t buf[OF_FILE_BUF_MAX >> 3];
+};
+
+const struct device_node *of_find_compatible_node(
+ const struct device_node *from,
+ const char *type __always_unused,
+ const char *compatible)
+ __attribute__((nonnull(3)));
+
+#define for_each_compatible_node(dev_node, type, compatible) \
+ for (dev_node = of_find_compatible_node(NULL, type, compatible); \
+ dev_node != NULL; \
+ dev_node = of_find_compatible_node(dev_node, type, compatible))
+
+const void *of_get_property(const struct device_node *from, const char *name,
+ size_t *lenp) __attribute__((nonnull(2)));
+bool of_device_is_available(const struct device_node *dev_node);
+
+const struct device_node *of_find_node_by_phandle(phandle ph);
+
+const struct device_node *of_get_parent(const struct device_node *dev_node);
+
+const struct device_node *of_get_next_child(const struct device_node *dev_node,
+ const struct device_node *prev);
+
+const void *of_get_mac_address(const struct device_node *np);
+
+#define for_each_child_node(parent, child) \
+ for (child = of_get_next_child(parent, NULL); child != NULL; \
+ child = of_get_next_child(parent, child))
+
+uint32_t of_n_addr_cells(const struct device_node *dev_node);
+uint32_t of_n_size_cells(const struct device_node *dev_node);
+
+const uint32_t *of_get_address(const struct device_node *dev_node, size_t idx,
+ uint64_t *size, uint32_t *flags);
+
+uint64_t of_translate_address(const struct device_node *dev_node,
+ const u32 *addr) __attribute__((nonnull));
+
+bool of_device_is_compatible(const struct device_node *dev_node,
+ const char *compatible);
+
+/* of_init() must be called prior to initialisation or use of any driver
+ * subsystem that is device-tree-dependent. Eg. Qman/Bman, config layers, etc.
+ * The path should usually be "/proc/device-tree".
+ */
+int of_init_path(const char *dt_path);
+
+/* of_finish() allows a controlled tear-down of the device-tree layer, eg. if a
+ * full reload is desired without a process exit.
+ */
+void of_finish(void);
+
+/* Use of this wrapper is recommended. */
+static inline int of_init(void)
+{
+ return of_init_path(OF_INIT_DEFAULT_PATH);
+}
+
+/* Read a numeric property according to its size and return it as a 64-bit
+ * value.
+ */
+static inline uint64_t of_read_number(const __be32 *cell, int size)
+{
+ uint64_t r = 0;
+
+ while (size--)
+ r = (r << 32) | be32toh(*(cell++));
+ return r;
+}
+
+#endif /* __OF_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/process.h b/src/spdk/dpdk/drivers/bus/dpaa/include/process.h
new file mode 100644
index 00000000..d9ec94ee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/process.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __PROCESS_H
+#define __PROCESS_H
+
+#include <compat.h>
+
+/* The process device underlies process-wide user/kernel interactions, such as
+ * mapping dma_mem memory and providing accompanying ioctl()s. (This isn't used
+ * for portals, which use one UIO device each.).
+ */
+#define PROCESS_PATH "/dev/fsl-usdpaa"
+
+/* Allocation of resource IDs uses a generic interface. This enum is used to
+ * distinguish between the type of underlying object being manipulated.
+ */
+enum dpaa_id_type {
+ dpaa_id_fqid,
+ dpaa_id_bpid,
+ dpaa_id_qpool,
+ dpaa_id_cgrid,
+ dpaa_id_max /* <-- not a valid type, represents the number of types */
+};
+
+int process_alloc(enum dpaa_id_type id_type, uint32_t *base, uint32_t num,
+ uint32_t align, int partial);
+void process_release(enum dpaa_id_type id_type, uint32_t base, uint32_t num);
+
+int process_reserve(enum dpaa_id_type id_type, uint32_t base, uint32_t num);
+
+/* Mapping and using QMan/BMan portals */
+enum dpaa_portal_type {
+ dpaa_portal_qman,
+ dpaa_portal_bman,
+};
+
+struct dpaa_portal_map {
+ void *cinh;
+ void *cena;
+};
+
+struct dpaa_ioctl_portal_map {
+ /* Input parameter, is a qman or bman portal required. */
+ enum dpaa_portal_type type;
+ /* Specifes a specific portal index to map or 0xffffffff
+ * for don't care.
+ */
+ uint32_t index;
+
+ /* Return value if the map succeeds, this gives the mapped
+ * cache-inhibited (cinh) and cache-enabled (cena) addresses.
+ */
+ struct dpaa_portal_map addr;
+
+ /* Qman-specific return values */
+ u16 channel;
+ uint32_t pools;
+};
+
+int process_portal_map(struct dpaa_ioctl_portal_map *params);
+int process_portal_unmap(struct dpaa_portal_map *map);
+
+struct dpaa_ioctl_irq_map {
+ enum dpaa_portal_type type; /* Type of portal to map */
+ int fd; /* File descriptor that contains the portal */
+ void *portal_cinh; /* Cache inhibited area to identify the portal */
+};
+
+int process_portal_irq_map(int fd, struct dpaa_ioctl_irq_map *irq);
+int process_portal_irq_unmap(int fd);
+
+#endif /* __PROCESS_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/meson.build b/src/spdk/dpdk/drivers/bus/dpaa/meson.build
new file mode 100644
index 00000000..d10b62c0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/meson.build
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['eventdev']
+sources = files('base/fman/fman.c',
+ 'base/fman/fman_hw.c',
+ 'base/fman/netcfg_layer.c',
+ 'base/fman/of.c',
+ 'base/qbman/bman.c',
+ 'base/qbman/bman_driver.c',
+ 'base/qbman/dpaa_alloc.c',
+ 'base/qbman/dpaa_sys.c',
+ 'base/qbman/process.c',
+ 'base/qbman/qman.c',
+ 'base/qbman/qman_driver.c',
+ 'dpaa_bus.c')
+
+allow_experimental_apis = true
+
+if cc.has_argument('-Wno-cast-qual')
+ cflags += '-Wno-cast-qual'
+endif
+
+includes += include_directories('include', 'base/qbman')
+cflags += ['-D_GNU_SOURCE']
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/rte_bus_dpaa_version.map b/src/spdk/dpdk/drivers/bus/dpaa/rte_bus_dpaa_version.map
new file mode 100644
index 00000000..7d6d6243
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/rte_bus_dpaa_version.map
@@ -0,0 +1,104 @@
+DPDK_17.11 {
+ global:
+
+ bman_acquire;
+ bman_free_pool;
+ bman_get_params;
+ bman_global_init;
+ bman_new_pool;
+ bman_query_free_buffers;
+ bman_release;
+ dpaa_logtype_mempool;
+ dpaa_logtype_pmd;
+ dpaa_netcfg;
+ fman_ccsr_map_fd;
+ fman_dealloc_bufs_mask_hi;
+ fman_dealloc_bufs_mask_lo;
+ fman_if_add_mac_addr;
+ fman_if_clear_mac_addr;
+ fman_if_disable_rx;
+ fman_if_enable_rx;
+ fman_if_discard_rx_errors;
+ fman_if_get_fc_threshold;
+ fman_if_get_fc_quanta;
+ fman_if_get_fdoff;
+ fman_if_loopback_disable;
+ fman_if_loopback_enable;
+ fman_if_promiscuous_disable;
+ fman_if_promiscuous_enable;
+ fman_if_reset_mcast_filter_table;
+ fman_if_set_bp;
+ fman_if_set_fc_threshold;
+ fman_if_set_fc_quanta;
+ fman_if_set_fdoff;
+ fman_if_set_ic_params;
+ fman_if_set_maxfrm;
+ fman_if_set_mcast_filter_table;
+ fman_if_stats_get;
+ fman_if_stats_get_all;
+ fman_if_stats_reset;
+ fman_ip_rev;
+ netcfg_acquire;
+ netcfg_release;
+ of_find_compatible_node;
+ of_get_property;
+ qm_channel_caam;
+ qman_create_fq;
+ qman_dequeue;
+ qman_dqrr_consume;
+ qman_enqueue;
+ qman_enqueue_multi;
+ qman_fq_fqid;
+ qman_fq_state;
+ qman_global_init;
+ qman_init_fq;
+ qman_poll_dqrr;
+ qman_query_fq_np;
+ qman_set_vdq;
+ qman_reserve_fqid_range;
+ qman_volatile_dequeue;
+ rte_dpaa_driver_register;
+ rte_dpaa_driver_unregister;
+ rte_dpaa_mem_ptov;
+ rte_dpaa_portal_init;
+
+ local: *;
+};
+
+DPDK_18.02 {
+ global:
+
+ dpaa_logtype_eventdev;
+ dpaa_svr_family;
+ per_lcore_dpaa_io;
+ per_lcore_held_bufs;
+ qm_channel_pool1;
+ qman_alloc_cgrid_range;
+ qman_alloc_pool_range;
+ qman_create_cgr;
+ qman_dca_index;
+ qman_delete_cgr;
+ qman_enqueue_multi_fq;
+ qman_modify_cgr;
+ qman_oos_fq;
+ qman_portal_dequeue;
+ qman_portal_poll_rx;
+ qman_query_fq_frm_cnt;
+ qman_release_cgrid_range;
+ qman_retire_fq;
+ qman_static_dequeue_add;
+ rte_dpaa_portal_fq_close;
+ rte_dpaa_portal_fq_init;
+
+ local: *;
+} DPDK_17.11;
+
+DPDK_18.08 {
+ global:
+
+ fman_if_get_sg_enable;
+ fman_if_set_sg;
+ of_get_mac_address;
+
+ local: *;
+} DPDK_18.02;
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_bus.h b/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_bus.h
new file mode 100644
index 00000000..15dc6a4a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+#ifndef __RTE_DPAA_BUS_H__
+#define __RTE_DPAA_BUS_H__
+
+#include <rte_bus.h>
+#include <rte_mempool.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+#define DPAA_MEMPOOL_OPS_NAME "dpaa"
+
+#define DEV_TO_DPAA_DEVICE(ptr) \
+ container_of(ptr, struct rte_dpaa_device, device)
+
+/* DPAA SoC identifier; If this is not available, it can be concluded
+ * that board is non-DPAA. Single slot is currently supported.
+ */
+#define DPAA_SOC_ID_FILE "/sys/devices/soc0/soc_id"
+
+#define SVR_LS1043A_FAMILY 0x87920000
+#define SVR_LS1046A_FAMILY 0x87070000
+#define SVR_MASK 0xffff0000
+
+extern unsigned int dpaa_svr_family;
+
+extern RTE_DEFINE_PER_LCORE(bool, dpaa_io);
+
+struct rte_dpaa_device;
+struct rte_dpaa_driver;
+
+/* DPAA Device and Driver lists for DPAA bus */
+TAILQ_HEAD(rte_dpaa_device_list, rte_dpaa_device);
+TAILQ_HEAD(rte_dpaa_driver_list, rte_dpaa_driver);
+
+/* Configuration variables exported from DPAA bus */
+extern struct netcfg_info *dpaa_netcfg;
+
+enum rte_dpaa_type {
+ FSL_DPAA_ETH = 1,
+ FSL_DPAA_CRYPTO,
+};
+
+struct rte_dpaa_bus {
+ struct rte_bus bus;
+ struct rte_dpaa_device_list device_list;
+ struct rte_dpaa_driver_list driver_list;
+ int device_count;
+};
+
+struct dpaa_device_id {
+ uint8_t fman_id; /**< Fman interface ID, for ETH type device */
+ uint8_t mac_id; /**< Fman MAC interface ID, for ETH type device */
+ uint16_t dev_id; /**< Device Identifier from DPDK */
+};
+
+struct rte_dpaa_device {
+ TAILQ_ENTRY(rte_dpaa_device) next;
+ struct rte_device device;
+ union {
+ struct rte_eth_dev *eth_dev;
+ struct rte_cryptodev *crypto_dev;
+ };
+ struct rte_dpaa_driver *driver;
+ struct dpaa_device_id id;
+ enum rte_dpaa_type device_type; /**< Ethernet or crypto type device */
+ char name[RTE_ETH_NAME_MAX_LEN];
+};
+
+typedef int (*rte_dpaa_probe_t)(struct rte_dpaa_driver *dpaa_drv,
+ struct rte_dpaa_device *dpaa_dev);
+typedef int (*rte_dpaa_remove_t)(struct rte_dpaa_device *dpaa_dev);
+
+struct rte_dpaa_driver {
+ TAILQ_ENTRY(rte_dpaa_driver) next;
+ struct rte_driver driver;
+ struct rte_dpaa_bus *dpaa_bus;
+ enum rte_dpaa_type drv_type;
+ rte_dpaa_probe_t probe;
+ rte_dpaa_remove_t remove;
+};
+
+struct dpaa_portal {
+ uint32_t bman_idx; /**< BMAN Portal ID*/
+ uint32_t qman_idx; /**< QMAN Portal ID*/
+ uint64_t tid;/**< Parent Thread id for this portal */
+};
+
+/* Various structures representing contiguous memory maps */
+struct dpaa_memseg {
+ TAILQ_ENTRY(dpaa_memseg) next;
+ char *vaddr;
+ rte_iova_t iova;
+ size_t len;
+};
+
+TAILQ_HEAD(dpaa_memseg_list, dpaa_memseg);
+extern struct dpaa_memseg_list rte_dpaa_memsegs;
+
+/* Either iterate over the list of internal memseg references or fallback to
+ * EAL memseg based iova2virt.
+ */
+static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
+{
+ struct dpaa_memseg *ms;
+
+ /* Check if the address is already part of the memseg list internally
+ * maintained by the dpaa driver.
+ */
+ TAILQ_FOREACH(ms, &rte_dpaa_memsegs, next) {
+ if (paddr >= ms->iova && paddr <
+ ms->iova + ms->len)
+ return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
+ }
+
+ /* If not, Fallback to full memseg list searching */
+ return rte_mem_iova2virt(paddr);
+}
+
+/**
+ * Register a DPAA driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa_driver structure describing the driver
+ * to be registered.
+ */
+void rte_dpaa_driver_register(struct rte_dpaa_driver *driver);
+
+/**
+ * Unregister a DPAA driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver);
+
+/**
+ * Initialize a DPAA portal
+ *
+ * @param arg
+ * Per thread ID
+ *
+ * @return
+ * 0 in case of success, error otherwise
+ */
+int rte_dpaa_portal_init(void *arg);
+
+int rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq);
+
+int rte_dpaa_portal_fq_close(struct qman_fq *fq);
+
+/**
+ * Cleanup a DPAA Portal
+ */
+void dpaa_portal_finish(void *arg);
+
+/** Helper for DPAA device registration from driver (eth, crypto) instance */
+#define RTE_PMD_REGISTER_DPAA(nm, dpaa_drv) \
+RTE_INIT(dpaainitfn_ ##nm) \
+{\
+ (dpaa_drv).driver.name = RTE_STR(nm);\
+ rte_dpaa_driver_register(&dpaa_drv); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+/* Create storage for dqrr entries per lcore */
+#define DPAA_PORTAL_DEQUEUE_DEPTH 16
+struct dpaa_portal_dqrr {
+ void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
+ uint64_t dqrr_held;
+ uint8_t dqrr_size;
+};
+
+RTE_DECLARE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
+
+#define DPAA_PER_LCORE_DQRR_SIZE RTE_PER_LCORE(held_bufs).dqrr_size
+#define DPAA_PER_LCORE_DQRR_HELD RTE_PER_LCORE(held_bufs).dqrr_held
+#define DPAA_PER_LCORE_DQRR_MBUF(i) RTE_PER_LCORE(held_bufs).mbuf[i]
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DPAA_BUS_H__ */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_logs.h b/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_logs.h
new file mode 100644
index 00000000..e4143543
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_logs.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef _DPAA_LOGS_H_
+#define _DPAA_LOGS_H_
+
+#include <rte_log.h>
+
+extern int dpaa_logtype_bus;
+extern int dpaa_logtype_mempool;
+extern int dpaa_logtype_pmd;
+extern int dpaa_logtype_eventdev;
+
+#define DPAA_BUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_bus, "dpaa: " fmt "\n", ##args)
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
+#define DPAA_BUS_HWWARN(cond, fmt, args...) \
+ do {\
+ if (cond) \
+ DPAA_BUS_LOG(DEBUG, "WARN: " fmt, ##args); \
+ } while (0)
+#else
+#define DPAA_BUS_HWWARN(cond, fmt, args...) do { } while (0)
+#endif
+
+#define DPAA_BUS_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa_logtype_bus, "dpaa: %s(): " fmt "\n", \
+ __func__, ##args)
+
+#define BUS_INIT_FUNC_TRACE() DPAA_BUS_DEBUG(" >>")
+
+#define DPAA_BUS_INFO(fmt, args...) \
+ DPAA_BUS_LOG(INFO, fmt, ## args)
+#define DPAA_BUS_ERR(fmt, args...) \
+ DPAA_BUS_LOG(ERR, fmt, ## args)
+#define DPAA_BUS_WARN(fmt, args...) \
+ DPAA_BUS_LOG(WARNING, fmt, ## args)
+
+/* Mempool related logs */
+
+#define DPAA_MEMPOOL_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_mempool, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define MEMPOOL_INIT_FUNC_TRACE() DPAA_MEMPOOL_LOG(DEBUG, " >>")
+
+#define DPAA_MEMPOOL_DPDEBUG(fmt, args...) \
+ RTE_LOG_DP(DEBUG, PMD, fmt, ## args)
+#define DPAA_MEMPOOL_DEBUG(fmt, args...) \
+ DPAA_MEMPOOL_LOG(DEBUG, fmt, ## args)
+#define DPAA_MEMPOOL_ERR(fmt, args...) \
+ DPAA_MEMPOOL_LOG(ERR, fmt, ## args)
+#define DPAA_MEMPOOL_INFO(fmt, args...) \
+ DPAA_MEMPOOL_LOG(INFO, fmt, ## args)
+#define DPAA_MEMPOOL_WARN(fmt, args...) \
+ DPAA_MEMPOOL_LOG(WARNING, fmt, ## args)
+
+/* PMD related logs */
+
+#define DPAA_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_pmd, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA_PMD_LOG(DEBUG, " >>")
+
+#define DPAA_PMD_DEBUG(fmt, args...) \
+ DPAA_PMD_LOG(DEBUG, fmt, ## args)
+#define DPAA_PMD_ERR(fmt, args...) \
+ DPAA_PMD_LOG(ERR, fmt, ## args)
+#define DPAA_PMD_INFO(fmt, args...) \
+ DPAA_PMD_LOG(INFO, fmt, ## args)
+#define DPAA_PMD_WARN(fmt, args...) \
+ DPAA_PMD_LOG(WARNING, fmt, ## args)
+
+#define DPAA_EVENTDEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_eventdev, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define EVENTDEV_INIT_FUNC_TRACE() DPAA_EVENTDEV_LOG(DEBUG, " >>")
+
+#define DPAA_EVENTDEV_DEBUG(fmt, args...) \
+ DPAA_EVENTDEV_LOG(DEBUG, fmt, ## args)
+#define DPAA_EVENTDEV_ERR(fmt, args...) \
+ DPAA_EVENTDEV_LOG(ERR, fmt, ## args)
+#define DPAA_EVENTDEV_INFO(fmt, args...) \
+ DPAA_EVENTDEV_LOG(INFO, fmt, ## args)
+#define DPAA_EVENTDEV_WARN(fmt, args...) \
+ DPAA_EVENTDEV_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#endif /* _DPAA_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/Makefile b/src/spdk/dpdk/drivers/bus/fslmc/Makefile
new file mode 100644
index 00000000..515d0f53
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/Makefile
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_bus_fslmc.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev
+
+# versioning export map
+EXPORT_MAP := rte_bus_fslmc_version.map
+
+# library version
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
+ qbman/qbman_portal.c \
+ qbman/qbman_debug.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
+ mc/dpmng.c \
+ mc/dpbp.c \
+ mc/dpio.c \
+ mc/mc_sys.c \
+ mc/dpcon.c \
+ mc/dpci.c \
+ mc/dpdmai.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpio.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpbp.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpci.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc_vfio.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc_bus.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/fslmc_bus.c b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_bus.c
new file mode 100644
index 00000000..d2900edc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_bus.c
@@ -0,0 +1,528 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <string.h>
+#include <dirent.h>
+#include <stdbool.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_devargs.h>
+#include <rte_memcpy.h>
+#include <rte_ethdev_driver.h>
+
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include "fslmc_logs.h"
+
+int dpaa2_logtype_bus;
+
+#define VFIO_IOMMU_GROUP_PATH "/sys/kernel/iommu_groups"
+#define FSLMC_BUS_NAME fslmc
+
+struct rte_fslmc_bus rte_fslmc_bus;
+uint8_t dpaa2_virt_mode;
+
+uint32_t
+rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type)
+{
+ if (device_type > DPAA2_DEVTYPE_MAX)
+ return 0;
+ return rte_fslmc_bus.device_count[device_type];
+}
+
+RTE_DEFINE_PER_LCORE(struct dpaa2_portal_dqrr, dpaa2_held_bufs);
+
+static void
+cleanup_fslmc_device_list(void)
+{
+ struct rte_dpaa2_device *dev;
+ struct rte_dpaa2_device *t_dev;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, t_dev) {
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ }
+}
+
+static int
+compare_dpaa2_devname(struct rte_dpaa2_device *dev1,
+ struct rte_dpaa2_device *dev2)
+{
+ int comp;
+
+ if (dev1->dev_type > dev2->dev_type) {
+ comp = 1;
+ } else if (dev1->dev_type < dev2->dev_type) {
+ comp = -1;
+ } else {
+ /* Check the ID as types match */
+ if (dev1->object_id > dev2->object_id)
+ comp = 1;
+ else if (dev1->object_id < dev2->object_id)
+ comp = -1;
+ else
+ comp = 0; /* Duplicate device name */
+ }
+
+ return comp;
+}
+
+static void
+insert_in_device_list(struct rte_dpaa2_device *newdev)
+{
+ int comp, inserted = 0;
+ struct rte_dpaa2_device *dev = NULL;
+ struct rte_dpaa2_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, tdev) {
+ comp = compare_dpaa2_devname(newdev, dev);
+ if (comp < 0) {
+ TAILQ_INSERT_BEFORE(dev, newdev, next);
+ inserted = 1;
+ break;
+ }
+ }
+
+ if (!inserted)
+ TAILQ_INSERT_TAIL(&rte_fslmc_bus.device_list, newdev, next);
+}
+
+static struct rte_devargs *
+fslmc_devargs_lookup(struct rte_dpaa2_device *dev)
+{
+ struct rte_devargs *devargs;
+ char dev_name[32];
+
+ RTE_EAL_DEVARGS_FOREACH("fslmc", devargs) {
+ devargs->bus->parse(devargs->name, &dev_name);
+ if (strcmp(dev_name, dev->device.name) == 0) {
+ DPAA2_BUS_INFO("**Devargs matched %s", dev_name);
+ return devargs;
+ }
+ }
+ return NULL;
+}
+
+static void
+dump_device_list(void)
+{
+ struct rte_dpaa2_device *dev;
+ uint32_t global_log_level;
+ int local_log_level;
+
+ /* Only if the log level has been set to Debugging, print list */
+ global_log_level = rte_log_get_global_level();
+ local_log_level = rte_log_get_level(dpaa2_logtype_bus);
+ if (global_log_level == RTE_LOG_DEBUG ||
+ local_log_level == RTE_LOG_DEBUG) {
+ DPAA2_BUS_LOG(DEBUG, "List of devices scanned on bus:");
+ TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
+ DPAA2_BUS_LOG(DEBUG, "\t\t%s", dev->device.name);
+ }
+ }
+}
+
+static int
+scan_one_fslmc_device(char *dev_name)
+{
+ char *dup_dev_name, *t_ptr;
+ struct rte_dpaa2_device *dev;
+
+ if (!dev_name)
+ return -1;
+
+ /* Ignore the Container name itself */
+ if (!strncmp("dprc", dev_name, 4))
+ return 0;
+
+ /* Creating a temporary copy to perform cut-parse over string */
+ dup_dev_name = strdup(dev_name);
+ if (!dup_dev_name) {
+ DPAA2_BUS_ERR("Unable to allocate device name memory");
+ return -ENOMEM;
+ }
+
+ /* For all other devices, we allocate rte_dpaa2_device.
+ * For those devices where there is no driver, probe would release
+ * the memory associated with the rte_dpaa2_device after necessary
+ * initialization.
+ */
+ dev = calloc(1, sizeof(struct rte_dpaa2_device));
+ if (!dev) {
+ DPAA2_BUS_ERR("Unable to allocate device object");
+ free(dup_dev_name);
+ return -ENOMEM;
+ }
+
+ /* Parse the device name and ID */
+ t_ptr = strtok(dup_dev_name, ".");
+ if (!t_ptr) {
+ DPAA2_BUS_ERR("Incorrect device name observed");
+ goto cleanup;
+ }
+ if (!strncmp("dpni", t_ptr, 4))
+ dev->dev_type = DPAA2_ETH;
+ else if (!strncmp("dpseci", t_ptr, 6))
+ dev->dev_type = DPAA2_CRYPTO;
+ else if (!strncmp("dpcon", t_ptr, 5))
+ dev->dev_type = DPAA2_CON;
+ else if (!strncmp("dpbp", t_ptr, 4))
+ dev->dev_type = DPAA2_BPOOL;
+ else if (!strncmp("dpio", t_ptr, 4))
+ dev->dev_type = DPAA2_IO;
+ else if (!strncmp("dpci", t_ptr, 4))
+ dev->dev_type = DPAA2_CI;
+ else if (!strncmp("dpmcp", t_ptr, 5))
+ dev->dev_type = DPAA2_MPORTAL;
+ else if (!strncmp("dpdmai", t_ptr, 6))
+ dev->dev_type = DPAA2_QDMA;
+ else
+ dev->dev_type = DPAA2_UNKNOWN;
+
+ /* Update the device found into the device_count table */
+ rte_fslmc_bus.device_count[dev->dev_type]++;
+
+ t_ptr = strtok(NULL, ".");
+ if (!t_ptr) {
+ DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
+ goto cleanup;
+ }
+
+ sscanf(t_ptr, "%hu", &dev->object_id);
+ dev->device.name = strdup(dev_name);
+ if (!dev->device.name) {
+ DPAA2_BUS_ERR("Unable to clone device name. Out of memory");
+ goto cleanup;
+ }
+ dev->device.devargs = fslmc_devargs_lookup(dev);
+
+ /* Add device in the fslmc device list */
+ insert_in_device_list(dev);
+
+ /* Don't need the duplicated device filesystem entry anymore */
+ if (dup_dev_name)
+ free(dup_dev_name);
+
+ return 0;
+cleanup:
+ if (dup_dev_name)
+ free(dup_dev_name);
+ if (dev)
+ free(dev);
+ return -1;
+}
+
+static int
+rte_fslmc_parse(const char *name, void *addr)
+{
+ uint16_t dev_id;
+ char *t_ptr;
+ char *sep = strchr(name, ':');
+
+ if (strncmp(name, RTE_STR(FSLMC_BUS_NAME),
+ strlen(RTE_STR(FSLMC_BUS_NAME)))) {
+ return -EINVAL;
+ }
+
+ if (!sep) {
+ DPAA2_BUS_ERR("Incorrect device name observed");
+ return -EINVAL;
+ }
+
+ t_ptr = (char *)(sep + 1);
+
+ if (strncmp("dpni", t_ptr, 4) &&
+ strncmp("dpseci", t_ptr, 6) &&
+ strncmp("dpcon", t_ptr, 5) &&
+ strncmp("dpbp", t_ptr, 4) &&
+ strncmp("dpio", t_ptr, 4) &&
+ strncmp("dpci", t_ptr, 4) &&
+ strncmp("dpmcp", t_ptr, 5) &&
+ strncmp("dpdmai", t_ptr, 6)) {
+ DPAA2_BUS_ERR("Unknown or unsupported device");
+ return -EINVAL;
+ }
+
+ t_ptr = strchr(name, '.');
+ if (!t_ptr) {
+ DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
+ return -EINVAL;
+ }
+
+ t_ptr = (char *)(t_ptr + 1);
+ if (sscanf(t_ptr, "%hu", &dev_id) <= 0) {
+ DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
+ return -EINVAL;
+ }
+
+ if (addr)
+ strcpy(addr, (char *)(sep + 1));
+ return 0;
+}
+
+static int
+rte_fslmc_scan(void)
+{
+ int ret;
+ int device_count = 0;
+ char fslmc_dirpath[PATH_MAX];
+ DIR *dir;
+ struct dirent *entry;
+ static int process_once;
+ int groupid;
+
+ if (process_once) {
+ DPAA2_BUS_DEBUG("Fslmc bus already scanned. Not rescanning");
+ return 0;
+ }
+ process_once = 1;
+
+ ret = fslmc_get_container_group(&groupid);
+ if (ret != 0)
+ goto scan_fail;
+
+ /* Scan devices on the group */
+ sprintf(fslmc_dirpath, "%s/%d/devices", VFIO_IOMMU_GROUP_PATH,
+ groupid);
+ dir = opendir(fslmc_dirpath);
+ if (!dir) {
+ DPAA2_BUS_ERR("Unable to open VFIO group directory");
+ goto scan_fail;
+ }
+
+ while ((entry = readdir(dir)) != NULL) {
+ if (entry->d_name[0] == '.' || entry->d_type != DT_LNK)
+ continue;
+
+ ret = scan_one_fslmc_device(entry->d_name);
+ if (ret != 0) {
+ /* Error in parsing directory - exit gracefully */
+ goto scan_fail_cleanup;
+ }
+ device_count += 1;
+ }
+
+ closedir(dir);
+
+ DPAA2_BUS_INFO("FSLMC Bus scan completed");
+ /* If debugging is enabled, device list is dumped to log output */
+ dump_device_list();
+
+ return 0;
+
+scan_fail_cleanup:
+ closedir(dir);
+
+ /* Remove all devices in the list */
+ cleanup_fslmc_device_list();
+scan_fail:
+ DPAA2_BUS_DEBUG("FSLMC Bus Not Available. Skipping");
+ /* Irrespective of failure, scan only return success */
+ return 0;
+}
+
+static int
+rte_fslmc_match(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ if (dpaa2_drv->drv_type == dpaa2_dev->dev_type)
+ return 0;
+
+ return 1;
+}
+
+static int
+rte_fslmc_probe(void)
+{
+ int ret = 0;
+ int probe_all;
+
+ struct rte_dpaa2_device *dev;
+ struct rte_dpaa2_driver *drv;
+
+ if (TAILQ_EMPTY(&rte_fslmc_bus.device_list))
+ return 0;
+
+ ret = fslmc_vfio_setup_group();
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to setup VFIO %d", ret);
+ return 0;
+ }
+
+ /* Map existing segments as well as, in case of hotpluggable memory,
+ * install callback handler.
+ */
+ ret = rte_fslmc_vfio_dmamap();
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to DMA map existing VAs: (%d)", ret);
+ /* Not continuing ahead */
+ DPAA2_BUS_ERR("FSLMC VFIO Mapping failed");
+ return 0;
+ }
+
+ ret = fslmc_vfio_process_group();
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to setup devices %d", ret);
+ return 0;
+ }
+
+ probe_all = rte_fslmc_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
+
+ TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
+ TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
+ ret = rte_fslmc_match(drv, dev);
+ if (ret)
+ continue;
+
+ if (!drv->probe)
+ continue;
+
+ if (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLACKLISTED) {
+ DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping",
+ dev->device.name);
+ continue;
+ }
+
+ if (probe_all ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy ==
+ RTE_DEV_WHITELISTED)) {
+ ret = drv->probe(drv, dev);
+ if (ret)
+ DPAA2_BUS_ERR("Unable to probe");
+ }
+ break;
+ }
+ }
+
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ dpaa2_virt_mode = 1;
+
+ return 0;
+}
+
+static struct rte_device *
+rte_fslmc_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ const struct rte_dpaa2_device *dstart;
+ struct rte_dpaa2_device *dev;
+
+ if (start != NULL) {
+ dstart = RTE_DEV_TO_FSLMC_CONST(start);
+ dev = TAILQ_NEXT(dstart, next);
+ } else {
+ dev = TAILQ_FIRST(&rte_fslmc_bus.device_list);
+ }
+ while (dev != NULL) {
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ dev = TAILQ_NEXT(dev, next);
+ }
+
+ return NULL;
+}
+
+/*register a fslmc bus based dpaa2 driver */
+void
+rte_fslmc_driver_register(struct rte_dpaa2_driver *driver)
+{
+ RTE_VERIFY(driver);
+
+ TAILQ_INSERT_TAIL(&rte_fslmc_bus.driver_list, driver, next);
+ /* Update Bus references */
+ driver->fslmc_bus = &rte_fslmc_bus;
+}
+
+/*un-register a fslmc bus based dpaa2 driver */
+void
+rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
+{
+ struct rte_fslmc_bus *fslmc_bus;
+
+ fslmc_bus = driver->fslmc_bus;
+
+ TAILQ_REMOVE(&fslmc_bus->driver_list, driver, next);
+ /* Update Bus references */
+ driver->fslmc_bus = NULL;
+}
+
+/*
+ * All device has iova as va
+ */
+static inline int
+fslmc_all_device_support_iova(void)
+{
+ int ret = 0;
+ struct rte_dpaa2_device *dev;
+ struct rte_dpaa2_driver *drv;
+
+ TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
+ TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
+ ret = rte_fslmc_match(drv, dev);
+ if (ret)
+ continue;
+ /* if the driver is not supporting IOVA */
+ if (!(drv->drv_flags & RTE_DPAA2_DRV_IOVA_AS_VA))
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
+ * Get iommu class of DPAA2 devices on the bus.
+ */
+static enum rte_iova_mode
+rte_dpaa2_get_iommu_class(void)
+{
+ bool is_vfio_noiommu_enabled = 1;
+ bool has_iova_va;
+
+ if (TAILQ_EMPTY(&rte_fslmc_bus.device_list))
+ return RTE_IOVA_DC;
+
+ /* check if all devices on the bus support Virtual addressing or not */
+ has_iova_va = fslmc_all_device_support_iova();
+
+#ifdef VFIO_PRESENT
+ is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ?
+ true : false;
+#endif
+
+ if (has_iova_va && !is_vfio_noiommu_enabled)
+ return RTE_IOVA_VA;
+
+ return RTE_IOVA_PA;
+}
+
+struct rte_fslmc_bus rte_fslmc_bus = {
+ .bus = {
+ .scan = rte_fslmc_scan,
+ .probe = rte_fslmc_probe,
+ .parse = rte_fslmc_parse,
+ .find_device = rte_fslmc_find_device,
+ .get_iommu_class = rte_dpaa2_get_iommu_class,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.driver_list),
+ .device_count = {0},
+};
+
+RTE_REGISTER_BUS(FSLMC_BUS_NAME, rte_fslmc_bus.bus);
+
+RTE_INIT(fslmc_init_log)
+{
+ /* Bus level logs */
+ dpaa2_logtype_bus = rte_log_register("bus.fslmc");
+ if (dpaa2_logtype_bus >= 0)
+ rte_log_set_level(dpaa2_logtype_bus, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/fslmc_logs.h b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_logs.h
new file mode 100644
index 00000000..dd74cb7d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_logs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _FSLMC_LOGS_H_
+#define _FSLMC_LOGS_H_
+
+extern int dpaa2_logtype_bus;
+
+#define DPAA2_BUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_bus, "fslmc: " fmt "\n", \
+ ##args)
+
+/* Debug logs are with Function names */
+#define DPAA2_BUS_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_bus, "fslmc: %s(): " fmt "\n", \
+ __func__, ##args)
+
+#define BUS_INIT_FUNC_TRACE() DPAA2_BUS_DEBUG(" >>")
+
+#define DPAA2_BUS_INFO(fmt, args...) \
+ DPAA2_BUS_LOG(INFO, fmt, ## args)
+#define DPAA2_BUS_ERR(fmt, args...) \
+ DPAA2_BUS_LOG(ERR, fmt, ## args)
+#define DPAA2_BUS_WARN(fmt, args...) \
+ DPAA2_BUS_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_BUS_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_BUS_DP_DEBUG(fmt, args...) \
+ DPAA2_BUS_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_BUS_DP_INFO(fmt, args...) \
+ DPAA2_BUS_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_BUS_DP_WARN(fmt, args...) \
+ DPAA2_BUS_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _FSLMC_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.c b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.c
new file mode 100644
index 00000000..4c2cd2a8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.c
@@ -0,0 +1,789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/vfs.h>
+#include <libgen.h>
+#include <dirent.h>
+#include <sys/eventfd.h>
+
+#include <eal_filesystem.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_bus.h>
+#include <rte_eal_memconfig.h>
+
+#include "rte_fslmc.h"
+#include "fslmc_vfio.h"
+#include "fslmc_logs.h"
+#include <mc/fsl_dpmng.h>
+
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+/** Pathname of FSL-MC devices directory. */
+#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
+
+#define FSLMC_CONTAINER_MAX_LEN 8 /**< Of the format dprc.XX */
+
+/* Number of VFIO containers & groups with in */
+static struct fslmc_vfio_group vfio_group;
+static struct fslmc_vfio_container vfio_container;
+static int container_device_fd;
+static char *g_container;
+static uint32_t *msi_intr_vaddr;
+void *(*rte_mcp_ptr_list);
+
+static struct rte_dpaa2_object_list dpaa2_obj_list =
+ TAILQ_HEAD_INITIALIZER(dpaa2_obj_list);
+
+/*register a fslmc bus based dpaa2 driver */
+void
+rte_fslmc_object_register(struct rte_dpaa2_object *object)
+{
+ RTE_VERIFY(object);
+
+ TAILQ_INSERT_TAIL(&dpaa2_obj_list, object, next);
+}
+
+int
+fslmc_get_container_group(int *groupid)
+{
+ int ret;
+ char *container;
+
+ if (!g_container) {
+ container = getenv("DPRC");
+ if (container == NULL) {
+ DPAA2_BUS_DEBUG("DPAA2: DPRC not available");
+ return -EINVAL;
+ }
+
+ if (strlen(container) >= FSLMC_CONTAINER_MAX_LEN) {
+ DPAA2_BUS_ERR("Invalid container name: %s", container);
+ return -1;
+ }
+
+ g_container = strdup(container);
+ if (!g_container) {
+ DPAA2_BUS_ERR("Mem alloc failure; Container name");
+ return -ENOMEM;
+ }
+ }
+
+ /* get group number */
+ ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES,
+ g_container, groupid);
+ if (ret <= 0) {
+ DPAA2_BUS_ERR("Unable to find %s IOMMU group", g_container);
+ return -1;
+ }
+
+ DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d",
+ g_container, *groupid);
+
+ return 0;
+}
+
+static int
+vfio_connect_container(void)
+{
+ int fd, ret;
+
+ if (vfio_container.used) {
+ DPAA2_BUS_DEBUG("No container available");
+ return -1;
+ }
+
+ /* Try connecting to vfio container if already created */
+ if (!ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER,
+ &vfio_container.fd)) {
+ DPAA2_BUS_DEBUG(
+ "Container pre-exists with FD[0x%x] for this group",
+ vfio_container.fd);
+ vfio_group.container = &vfio_container;
+ return 0;
+ }
+
+ /* Opens main vfio file descriptor which represents the "container" */
+ fd = rte_vfio_get_container_fd();
+ if (fd < 0) {
+ DPAA2_BUS_ERR("Failed to open VFIO container");
+ return -errno;
+ }
+
+ /* Check whether support for SMMU type IOMMU present or not */
+ if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
+ /* Connect group to container */
+ ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd);
+ if (ret) {
+ DPAA2_BUS_ERR("Failed to setup group container");
+ close(fd);
+ return -errno;
+ }
+
+ ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
+ if (ret) {
+ DPAA2_BUS_ERR("Failed to setup VFIO iommu");
+ close(fd);
+ return -errno;
+ }
+ } else {
+ DPAA2_BUS_ERR("No supported IOMMU available");
+ close(fd);
+ return -EINVAL;
+ }
+
+ vfio_container.used = 1;
+ vfio_container.fd = fd;
+ vfio_container.group = &vfio_group;
+ vfio_group.container = &vfio_container;
+
+ return 0;
+}
+
+static int vfio_map_irq_region(struct fslmc_vfio_group *group)
+{
+ int ret;
+ unsigned long *vaddr = NULL;
+ struct vfio_iommu_type1_dma_map map = {
+ .argsz = sizeof(map),
+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
+ .vaddr = 0x6030000,
+ .iova = 0x6030000,
+ .size = 0x1000,
+ };
+
+ vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
+ PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
+ if (vaddr == MAP_FAILED) {
+ DPAA2_BUS_ERR("Unable to map region (errno = %d)", errno);
+ return -errno;
+ }
+
+ msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
+ map.vaddr = (unsigned long)vaddr;
+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map);
+ if (ret == 0)
+ return 0;
+
+ DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno);
+ return -errno;
+}
+
+static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
+static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
+
+static void
+fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
+ void *arg __rte_unused)
+{
+ struct rte_memseg_list *msl;
+ struct rte_memseg *ms;
+ size_t cur_len = 0, map_len = 0;
+ uint64_t virt_addr;
+ rte_iova_t iova_addr;
+ int ret;
+
+ msl = rte_mem_virt2memseg_list(addr);
+
+ while (cur_len < len) {
+ const void *va = RTE_PTR_ADD(addr, cur_len);
+
+ ms = rte_mem_virt2memseg(va, msl);
+ iova_addr = ms->iova;
+ virt_addr = ms->addr_64;
+ map_len = ms->len;
+
+ DPAA2_BUS_DEBUG("Request for %s, va=%p, "
+ "virt_addr=0x%" PRIx64 ", "
+ "iova=0x%" PRIx64 ", map_len=%zu",
+ type == RTE_MEM_EVENT_ALLOC ?
+ "alloc" : "dealloc",
+ va, virt_addr, iova_addr, map_len);
+
+ if (type == RTE_MEM_EVENT_ALLOC)
+ ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
+ else
+ ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len);
+
+ if (ret != 0) {
+ DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. "
+ "Map=%d, addr=%p, len=%zu, err:(%d)",
+ type, va, map_len, ret);
+ return;
+ }
+
+ cur_len += map_len;
+ }
+
+ if (type == RTE_MEM_EVENT_ALLOC)
+ DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu",
+ addr, len);
+ else
+ DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu",
+ addr, len);
+}
+
+static int
+fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len)
+{
+ struct fslmc_vfio_group *group;
+ struct vfio_iommu_type1_dma_map dma_map = {
+ .argsz = sizeof(struct vfio_iommu_type1_dma_map),
+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
+ };
+ int ret;
+
+ dma_map.size = len;
+ dma_map.vaddr = vaddr;
+
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ dma_map.iova = iovaddr;
+#else
+ dma_map.iova = dma_map.vaddr;
+#endif
+
+ /* SET DMA MAP for IOMMU */
+ group = &vfio_group;
+
+ if (!group->container) {
+ DPAA2_BUS_ERR("Container is not connected ");
+ return -1;
+ }
+
+ DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"",
+ (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size);
+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map);
+ if (ret) {
+ DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)",
+ errno);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
+{
+ struct fslmc_vfio_group *group;
+ struct vfio_iommu_type1_dma_unmap dma_unmap = {
+ .argsz = sizeof(struct vfio_iommu_type1_dma_unmap),
+ .flags = 0,
+ };
+ int ret;
+
+ dma_unmap.size = len;
+ dma_unmap.iova = vaddr;
+
+ /* SET DMA MAP for IOMMU */
+ group = &vfio_group;
+
+ if (!group->container) {
+ DPAA2_BUS_ERR("Container is not connected ");
+ return -1;
+ }
+
+ DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"",
+ (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size);
+ ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
+ if (ret) {
+ DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)",
+ errno);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
+{
+ int *n_segs = arg;
+ int ret;
+
+ ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len);
+ if (ret)
+ DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)",
+ ms->addr, ms->len);
+ else
+ (*n_segs)++;
+
+ return ret;
+}
+
+int rte_fslmc_vfio_dmamap(void)
+{
+ int i = 0, ret;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
+
+ /* Lock before parsing and registering callback to memory subsystem */
+ rte_rwlock_read_lock(mem_lock);
+
+ if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
+ rte_rwlock_read_unlock(mem_lock);
+ return -1;
+ }
+
+ ret = rte_mem_event_callback_register("fslmc_memevent_clb",
+ fslmc_memevent_cb, NULL);
+ if (ret && rte_errno == ENOTSUP)
+ DPAA2_BUS_DEBUG("Memory event callbacks not supported");
+ else if (ret)
+ DPAA2_BUS_DEBUG("Unable to install memory handler");
+ else
+ DPAA2_BUS_DEBUG("Installed memory callback handler");
+
+ DPAA2_BUS_DEBUG("Total %d segments found.", i);
+
+ /* TODO - This is a W.A. as VFIO currently does not add the mapping of
+ * the interrupt region to SMMU. This should be removed once the
+ * support is added in the Kernel.
+ */
+ vfio_map_irq_region(&vfio_group);
+
+ /* Existing segments have been mapped and memory callback for hotplug
+ * has been installed.
+ */
+ rte_rwlock_read_unlock(mem_lock);
+
+ return 0;
+}
+
+static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
+{
+ intptr_t v_addr = (intptr_t)MAP_FAILED;
+ int32_t ret, mc_fd;
+
+ struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
+
+ /* getting the mcp object's fd*/
+ mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
+ if (mc_fd < 0) {
+ DPAA2_BUS_ERR("Error in VFIO get dev %s fd from group %d",
+ mcp_obj, group->fd);
+ return v_addr;
+ }
+
+ /* getting device info*/
+ ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info);
+ if (ret < 0) {
+ DPAA2_BUS_ERR("Error in VFIO getting DEVICE_INFO");
+ goto MC_FAILURE;
+ }
+
+ /* getting device region info*/
+ ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
+ if (ret < 0) {
+ DPAA2_BUS_ERR("Error in VFIO getting REGION_INFO");
+ goto MC_FAILURE;
+ }
+
+ v_addr = (size_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ mc_fd, reg_info.offset);
+
+MC_FAILURE:
+ close(mc_fd);
+
+ return v_addr;
+}
+
+#define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
+
+int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index)
+{
+ int len, ret;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int *fd_ptr;
+
+ len = sizeof(irq_set_buf);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->count = 1;
+ irq_set->flags =
+ VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = index;
+ irq_set->start = 0;
+ fd_ptr = (int *)&irq_set->data;
+ *fd_ptr = intr_handle->fd;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret) {
+ DPAA2_BUS_ERR("Error:dpaa2 SET IRQs fd=%d, err = %d(%s)",
+ intr_handle->fd, errno, strerror(errno));
+ return ret;
+ }
+
+ return ret;
+}
+
+int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index)
+{
+ struct vfio_irq_set *irq_set;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ int len, ret;
+
+ len = sizeof(struct vfio_irq_set);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = index;
+ irq_set->start = 0;
+ irq_set->count = 0;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret)
+ DPAA2_BUS_ERR(
+ "Error disabling dpaa2 interrupts for fd %d",
+ intr_handle->fd);
+
+ return ret;
+}
+
+/* set up interrupt support (but not enable interrupts) */
+int
+rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
+ int vfio_dev_fd,
+ int num_irqs)
+{
+ int i, ret;
+
+ /* start from MSI-X interrupt type */
+ for (i = 0; i < num_irqs; i++) {
+ struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
+ int fd = -1;
+
+ irq_info.index = i;
+
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
+ if (ret < 0) {
+ DPAA2_BUS_ERR("Cannot get IRQ(%d) info, error %i (%s)",
+ i, errno, strerror(errno));
+ return -1;
+ }
+
+ /* if this vector cannot be used with eventfd,
+ * fail if we explicitly
+ * specified interrupt type, otherwise continue
+ */
+ if ((irq_info.flags & VFIO_IRQ_INFO_EVENTFD) == 0)
+ continue;
+
+ /* set up an eventfd for interrupts */
+ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (fd < 0) {
+ DPAA2_BUS_ERR("Cannot set up eventfd, error %i (%s)",
+ errno, strerror(errno));
+ return -1;
+ }
+
+ intr_handle->fd = fd;
+ intr_handle->type = RTE_INTR_HANDLE_VFIO_MSI;
+ intr_handle->vfio_dev_fd = vfio_dev_fd;
+
+ return 0;
+ }
+
+ /* if we're here, we haven't found a suitable interrupt vector */
+ return -1;
+}
+
+/*
+ * fslmc_process_iodevices for processing only IO (ETH, CRYPTO, and possibly
+ * EVENT) devices.
+ */
+static int
+fslmc_process_iodevices(struct rte_dpaa2_device *dev)
+{
+ int dev_fd;
+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
+ struct rte_dpaa2_object *object = NULL;
+
+ dev_fd = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD,
+ dev->device.name);
+ if (dev_fd <= 0) {
+ DPAA2_BUS_ERR("Unable to obtain device FD for device:%s",
+ dev->device.name);
+ return -1;
+ }
+
+ if (ioctl(dev_fd, VFIO_DEVICE_GET_INFO, &device_info)) {
+ DPAA2_BUS_ERR("Unable to obtain information for device:%s",
+ dev->device.name);
+ return -1;
+ }
+
+ switch (dev->dev_type) {
+ case DPAA2_ETH:
+ rte_dpaa2_vfio_setup_intr(&dev->intr_handle, dev_fd,
+ device_info.num_irqs);
+ break;
+ case DPAA2_CON:
+ case DPAA2_IO:
+ case DPAA2_CI:
+ case DPAA2_BPOOL:
+ TAILQ_FOREACH(object, &dpaa2_obj_list, next) {
+ if (dev->dev_type == object->dev_type)
+ object->create(dev_fd, &device_info,
+ dev->object_id);
+ else
+ continue;
+ }
+ break;
+ default:
+ break;
+ }
+
+ DPAA2_BUS_LOG(DEBUG, "Device (%s) abstracted from VFIO",
+ dev->device.name);
+ return 0;
+}
+
+static int
+fslmc_process_mcp(struct rte_dpaa2_device *dev)
+{
+ int ret;
+ intptr_t v_addr;
+ char *dev_name = NULL;
+ struct fsl_mc_io dpmng = {0};
+ struct mc_version mc_ver_info = {0};
+
+ rte_mcp_ptr_list = malloc(sizeof(void *) * 1);
+ if (!rte_mcp_ptr_list) {
+ DPAA2_BUS_ERR("Unable to allocate MC portal memory");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ dev_name = strdup(dev->device.name);
+ if (!dev_name) {
+ DPAA2_BUS_ERR("Unable to allocate MC device name memory");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ v_addr = vfio_map_mcp_obj(&vfio_group, dev_name);
+ if (v_addr == (intptr_t)MAP_FAILED) {
+ DPAA2_BUS_ERR("Error mapping region (errno = %d)", errno);
+ ret = -1;
+ goto cleanup;
+ }
+
+ /* check the MC version compatibility */
+ dpmng.regs = (void *)v_addr;
+ if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) {
+ DPAA2_BUS_ERR("Unable to obtain MC version");
+ ret = -1;
+ goto cleanup;
+ }
+
+ if ((mc_ver_info.major != MC_VER_MAJOR) ||
+ (mc_ver_info.minor < MC_VER_MINOR)) {
+ DPAA2_BUS_ERR("DPAA2 MC version not compatible!"
+ " Expected %d.%d.x, Detected %d.%d.%d",
+ MC_VER_MAJOR, MC_VER_MINOR,
+ mc_ver_info.major, mc_ver_info.minor,
+ mc_ver_info.revision);
+ ret = -1;
+ goto cleanup;
+ }
+ rte_mcp_ptr_list[0] = (void *)v_addr;
+
+ free(dev_name);
+ return 0;
+
+cleanup:
+ if (dev_name)
+ free(dev_name);
+
+ if (rte_mcp_ptr_list) {
+ free(rte_mcp_ptr_list);
+ rte_mcp_ptr_list = NULL;
+ }
+
+ return ret;
+}
+
+int
+fslmc_vfio_process_group(void)
+{
+ int ret;
+ int found_mportal = 0;
+ struct rte_dpaa2_device *dev, *dev_temp;
+
+ /* Search the MCP as that should be initialized first. */
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
+ if (dev->dev_type == DPAA2_MPORTAL) {
+ ret = fslmc_process_mcp(dev);
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to map MC Portal");
+ return -1;
+ }
+ if (!found_mportal)
+ found_mportal = 1;
+
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ /* Ideally there is only a single dpmcp, but in case
+ * multiple exists, looping on remaining devices.
+ */
+ }
+ }
+
+ /* Cannot continue if there is not even a single mportal */
+ if (!found_mportal) {
+ DPAA2_BUS_ERR("No MC Portal device found. Not continuing");
+ return -1;
+ }
+
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
+ switch (dev->dev_type) {
+ case DPAA2_ETH:
+ case DPAA2_CRYPTO:
+ case DPAA2_QDMA:
+ ret = fslmc_process_iodevices(dev);
+ if (ret) {
+ DPAA2_BUS_DEBUG("Dev (%s) init failed",
+ dev->device.name);
+ return ret;
+ }
+ break;
+ case DPAA2_CON:
+ case DPAA2_IO:
+ case DPAA2_CI:
+ case DPAA2_BPOOL:
+ /* Call the object creation routine and remove the
+ * device entry from device list
+ */
+ ret = fslmc_process_iodevices(dev);
+ if (ret) {
+ DPAA2_BUS_DEBUG("Dev (%s) init failed",
+ dev->device.name);
+ return -1;
+ }
+
+ /* This device is not required to be in the DPDK
+ * exposed device list.
+ */
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ break;
+ case DPAA2_UNKNOWN:
+ default:
+ /* Unknown - ignore */
+ DPAA2_BUS_DEBUG("Found unknown device (%s)",
+ dev->device.name);
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ }
+ }
+
+ return 0;
+}
+
+int
+fslmc_vfio_setup_group(void)
+{
+ int groupid;
+ int ret;
+ struct vfio_group_status status = { .argsz = sizeof(status) };
+
+ /* if already done once */
+ if (container_device_fd)
+ return 0;
+
+ ret = fslmc_get_container_group(&groupid);
+ if (ret)
+ return ret;
+
+ /* In case this group was already opened, continue without any
+ * processing.
+ */
+ if (vfio_group.groupid == groupid) {
+ DPAA2_BUS_ERR("groupid already exists %d", groupid);
+ return 0;
+ }
+
+ /* Get the actual group fd */
+ ret = rte_vfio_get_group_fd(groupid);
+ if (ret < 0)
+ return ret;
+ vfio_group.fd = ret;
+
+ /* Check group viability */
+ ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_STATUS, &status);
+ if (ret) {
+ DPAA2_BUS_ERR("VFIO error getting group status");
+ close(vfio_group.fd);
+ rte_vfio_clear_group(vfio_group.fd);
+ return ret;
+ }
+
+ if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
+ DPAA2_BUS_ERR("VFIO group not viable");
+ close(vfio_group.fd);
+ rte_vfio_clear_group(vfio_group.fd);
+ return -EPERM;
+ }
+ /* Since Group is VIABLE, Store the groupid */
+ vfio_group.groupid = groupid;
+
+ /* check if group does not have a container yet */
+ if (!(status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
+ /* Now connect this IOMMU group to given container */
+ ret = vfio_connect_container();
+ if (ret) {
+ DPAA2_BUS_ERR(
+ "Error connecting container with groupid %d",
+ groupid);
+ close(vfio_group.fd);
+ rte_vfio_clear_group(vfio_group.fd);
+ return ret;
+ }
+ }
+
+ /* Get Device information */
+ ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, g_container);
+ if (ret < 0) {
+ DPAA2_BUS_ERR("Error getting device %s fd from group %d",
+ g_container, vfio_group.groupid);
+ close(vfio_group.fd);
+ rte_vfio_clear_group(vfio_group.fd);
+ return ret;
+ }
+ container_device_fd = ret;
+ DPAA2_BUS_DEBUG("VFIO Container FD is [0x%X]",
+ container_device_fd);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.h b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.h
new file mode 100644
index 00000000..9e2c4fee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _FSLMC_VFIO_H_
+#define _FSLMC_VFIO_H_
+
+#include <rte_vfio.h>
+
+#define DPAA2_MC_DPNI_DEVID 7
+#define DPAA2_MC_DPSECI_DEVID 3
+#define DPAA2_MC_DPCON_DEVID 5
+#define DPAA2_MC_DPIO_DEVID 9
+#define DPAA2_MC_DPBP_DEVID 10
+#define DPAA2_MC_DPCI_DEVID 11
+
+typedef struct fslmc_vfio_device {
+ int fd; /* fslmc root container device ?? */
+ int index; /*index of child object */
+ struct fslmc_vfio_device *child; /* Child object */
+} fslmc_vfio_device;
+
+typedef struct fslmc_vfio_group {
+ int fd; /* /dev/vfio/"groupid" */
+ int groupid;
+ struct fslmc_vfio_container *container;
+ int object_index;
+ struct fslmc_vfio_device *vfio_device;
+} fslmc_vfio_group;
+
+typedef struct fslmc_vfio_container {
+ int fd; /* /dev/vfio/vfio */
+ int used;
+ int index; /* index in group list */
+ struct fslmc_vfio_group *group;
+} fslmc_vfio_container;
+
+int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index);
+int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index);
+
+int rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
+ int vfio_dev_fd,
+ int num_irqs);
+
+int fslmc_vfio_setup_group(void);
+int fslmc_vfio_process_group(void);
+char *fslmc_get_container(void);
+int fslmc_get_container_group(int *gropuid);
+int rte_fslmc_vfio_dmamap(void);
+
+#endif /* _FSLMC_VFIO_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/dpbp.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpbp.c
new file mode 100644
index 00000000..0215d22d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpbp.c
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpbp.h>
+#include <fsl_dpbp_cmd.h>
+
+/**
+ * dpbp_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpbp_id: DPBP unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpbp_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpbp_id,
+ uint16_t *token)
+{
+ struct dpbp_cmd_open *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
+ cmd_flags, 0);
+ cmd_params = (struct dpbp_cmd_open *)cmd.params;
+ cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+
+/**
+ * dpbp_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpbp_create() - Create the DPBP object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id; use in subsequent API calls
+ *
+ * Create the DPBP object, allocate required resources and
+ * perform required initialization.
+ *
+ * This function accepts an authentication token of a parent
+ * container that this object should be assigned to and returns
+ * an object id. This object_id will be used in all subsequent calls to
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpbp_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ (void)(cfg); /* unused */
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
+ cmd_flags, dprc_token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpbp_destroy() - Destroy the DPBP object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @obj_id: ID of DPBP object
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpbp_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t obj_id)
+{
+ struct dpbp_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
+ cmd_flags, dprc_token);
+
+ cmd_params = (struct dpbp_cmd_destroy *)cmd.params;
+ cmd_params->object_id = cpu_to_le32(obj_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpbp_enable() - Enable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpbp_disable() - Disable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpbp_is_enabled() - Check if the DPBP is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpbp_rsp_is_enabled *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpbp_rsp_is_enabled *)cmd.params;
+ *en = rsp_params->enabled & DPBP_ENABLE;
+
+ return 0;
+}
+
+/**
+ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpbp_attr *attr)
+{
+ struct dpbp_rsp_get_attributes *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
+ attr->bpid = le16_to_cpu(rsp_params->bpid);
+ attr->id = le32_to_cpu(rsp_params->id);
+
+ return 0;
+}
+
+/**
+ * dpbp_get_api_version - Get Data Path Buffer Pool API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Buffer Pool API
+ * @minor_ver: Minor version of Buffer Pool API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct dpbp_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpbp_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpbp_get_num_free_bufs() - Get number of free buffers in the buffer pool
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @num_free_bufs: Number of free buffers
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+
+int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint32_t *num_free_bufs)
+{
+ struct dpbp_rsp_get_num_free_bufs *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_FREE_BUFFERS_NUM,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpbp_rsp_get_num_free_bufs *)cmd.params;
+ *num_free_bufs = le32_to_cpu(rsp_params->num_free_bufs);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/dpci.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpci.c
new file mode 100644
index 00000000..ff366bfa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpci.c
@@ -0,0 +1,440 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpci.h>
+#include <fsl_dpci_cmd.h>
+
+/**
+ * dpci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpci_id: DPCI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpci_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpci_id,
+ uint16_t *token)
+{
+ struct dpci_cmd_open *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpci_cmd_open *)cmd.params;
+ cmd_params->dpci_id = cpu_to_le32(dpci_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpci_create() - Create the DPCI object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPCI object, allocate required resources and perform required
+ * initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpci_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpci_cmd_create *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpci_cmd_create *)cmd.params;
+ cmd_params->num_of_priorities = cfg->num_of_priorities;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpci_destroy() - Destroy the DPCI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct dpci_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpci_cmd_destroy *)cmd.params;
+ cmd_params->dpci_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpci_enable() - Enable the DPCI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpci_disable() - Disable the DPCI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpci_is_enabled() - Check if the DPCI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpci_rsp_is_enabled *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_is_enabled *)cmd.params;
+ *en = dpci_get_field(rsp_params->en, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpci_reset() - Reset the DPCI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_attr *attr)
+{
+ struct dpci_rsp_get_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_of_priorities = rsp_params->num_of_priorities;
+
+ return 0;
+}
+
+int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpci_rx_queue_cfg *cfg)
+{
+ struct dpci_cmd_set_rx_queue *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpci_cmd_set_rx_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->priority = priority;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpci_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpci_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPCI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_rx_queue_attr *attr)
+{
+ struct dpci_cmd_get_queue *cmd_params;
+ struct dpci_rsp_get_rx_queue *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpci_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_get_rx_queue *)cmd.params;
+ attr->user_ctx = le64_to_cpu(rsp_params->user_ctx);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ attr->dest_cfg.priority = rsp_params->dest_priority;
+ attr->dest_cfg.dest_type = dpci_get_field(rsp_params->dest_type,
+ DEST_TYPE);
+
+ return 0;
+}
+
+/**
+ * dpci_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities of the peer DPCI object
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_tx_queue_attr *attr)
+{
+ struct dpci_cmd_get_queue *cmd_params;
+ struct dpci_rsp_get_tx_queue *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpci_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+
+ return 0;
+}
+
+/**
+ * dpci_get_api_version() - Get communication interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path communication interface API
+ * @minor_ver: Minor version of data path communication interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct dpci_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/dpcon.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpcon.c
new file mode 100644
index 00000000..3f6e04b9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpcon.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpcon.h>
+#include <fsl_dpcon_cmd.h>
+
+/**
+ * dpcon_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpcon_id: DPCON unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpcon_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpcon_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dpcon_cmd_open *dpcon_cmd;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ dpcon_cmd = (struct dpcon_cmd_open *)cmd.params;
+ dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpcon_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpcon_create() - Create the DPCON object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id; use in subsequent API calls
+ *
+ * Create the DPCON object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * This function accepts an authentication token of a parent
+ * container that this object should be assigned to and returns
+ * an object id. This object_id will be used in all subsequent calls to
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpcon_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpcon_cmd_create *dpcon_cmd;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ dpcon_cmd = (struct dpcon_cmd_create *)cmd.params;
+ dpcon_cmd->num_priorities = cfg->num_priorities;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpcon_destroy() - Destroy the DPCON object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @obj_id: ID of DPCON object
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpcon_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t obj_id)
+{
+ struct dpcon_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpcon_cmd_destroy *)cmd.params;
+ cmd_params->object_id = cpu_to_le32(obj_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpcon_enable() - Enable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpcon_disable() - Disable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpcon_is_enabled() - Check if the DPCON is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpcon_rsp_is_enabled *dpcon_rsp;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpcon_rsp = (struct dpcon_rsp_is_enabled *)cmd.params;
+ *en = dpcon_rsp->enabled & DPCON_ENABLE;
+
+ return 0;
+}
+
+/**
+ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpcon_get_attributes() - Retrieve DPCON attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_attr *attr)
+{
+ struct dpcon_rsp_get_attr *dpcon_rsp;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(dpcon_rsp->id);
+ attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id);
+ attr->num_priorities = dpcon_rsp->num_priorities;
+
+ return 0;
+}
+
+/**
+ * dpcon_get_api_version - Get Data Path Concentrator API version
+ * @mc_io: Pointer to MC portal's DPCON object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of DPCON API
+ * @minor_ver: Minor version of DPCON API
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct dpcon_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpcon_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/dpdmai.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpdmai.c
new file mode 100644
index 00000000..528889df
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpdmai.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpdmai.h>
+#include <fsl_dpdmai_cmd.h>
+
+/**
+ * dpdmai_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpdmai_id: DPDMAI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpdmai_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpdmai_id,
+ uint16_t *token)
+{
+ struct dpdmai_cmd_open *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpdmai_cmd_open *)cmd.params;
+ cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmai_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_create() - Create the DPDMAI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPDMAI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpdmai_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpdmai_cmd_create *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpdmai_cmd_create *)cmd.params;
+ cmd_params->priorities[0] = cfg->priorities[0];
+ cmd_params->priorities[1] = cfg->priorities[1];
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpdmai_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct dpdmai_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpdmai_cmd_destroy *)cmd.params;
+ cmd_params->dpdmai_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpdmai_rsp_is_enabled *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_is_enabled *)cmd.params;
+ *en = dpdmai_get_field(rsp_params->en, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpdmai_attr *attr)
+{
+ struct dpdmai_rsp_get_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_of_priorities = rsp_params->num_of_priorities;
+
+ return 0;
+}
+
+/**
+ * dpdmai_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation; use
+ * DPDMAI_ALL_QUEUES to configure all Rx queues
+ * identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpdmai_rx_queue_cfg *cfg)
+{
+ struct dpdmai_cmd_set_rx_queue *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmai_cmd_set_rx_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->priority = priority;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpdmai_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_rx_queue_attr *attr)
+{
+ struct dpdmai_cmd_get_queue *cmd_params;
+ struct dpdmai_rsp_get_rx_queue *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_rx_queue *)cmd.params;
+ attr->user_ctx = le64_to_cpu(rsp_params->user_ctx);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ attr->dest_cfg.priority = le32_to_cpu(rsp_params->dest_priority);
+ attr->dest_cfg.dest_type = dpdmai_get_field(rsp_params->dest_type,
+ DEST_TYPE);
+
+ return 0;
+}
+
+/**
+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_tx_queue_attr *attr)
+{
+ struct dpdmai_cmd_get_queue *cmd_params;
+ struct dpdmai_rsp_get_tx_queue *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/dpio.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpio.c
new file mode 100644
index 00000000..966277cc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpio.c
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpio.h>
+#include <fsl_dpio_cmd.h>
+
+/**
+ * dpio_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpio_id: DPIO unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpio_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and any MC portals
+ * assigned to the parent container; this token must be used in
+ * all subsequent commands for this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpio_id,
+ uint16_t *token)
+{
+ struct dpio_cmd_open *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpio_cmd_open *)cmd.params;
+ cmd_params->dpio_id = cpu_to_le32(dpio_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpio_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_create() - Create the DPIO object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPIO object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpio_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpio_cmd_create *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpio_cmd_create *)cmd.params;
+ cmd_params->num_priorities = cfg->num_priorities;
+ dpio_set_field(cmd_params->channel_mode,
+ CHANNEL_MODE,
+ cfg->channel_mode);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpio_destroy() - Destroy the DPIO object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct dpio_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+
+ /* set object id to destroy */
+ cmd_params = (struct dpio_cmd_destroy *)cmd.params;
+ cmd_params->dpio_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_is_enabled() - Check if the DPIO is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpio_rsp_is_enabled *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpio_rsp_is_enabled *)cmd.params;
+ *en = dpio_get_field(rsp_params->en, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpio_reset() - Reset the DPIO, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpio_attr *attr)
+{
+ struct dpio_rsp_get_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpio_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->qbman_portal_id = le16_to_cpu(rsp_params->qbman_portal_id);
+ attr->num_priorities = rsp_params->num_priorities;
+ attr->qbman_portal_ce_offset =
+ le64_to_cpu(rsp_params->qbman_portal_ce_offset);
+ attr->qbman_portal_ci_offset =
+ le64_to_cpu(rsp_params->qbman_portal_ci_offset);
+ attr->qbman_version = le32_to_cpu(rsp_params->qbman_version);
+ attr->clk = le32_to_cpu(rsp_params->clk);
+ attr->channel_mode = dpio_get_field(rsp_params->channel_mode,
+ ATTR_CHANNEL_MODE);
+
+ return 0;
+}
+
+/**
+ * dpio_set_stashing_destination() - Set the stashing destination.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @sdest: Stashing destination value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t sdest)
+{
+ struct dpio_stashing_dest *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpio_stashing_dest *)cmd.params;
+ cmd_params->sdest = sdest;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_get_stashing_destination() - Get the stashing destination..
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @sdest: Returns the stashing destination value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t *sdest)
+{
+ struct dpio_stashing_dest *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpio_stashing_dest *)cmd.params;
+ *sdest = rsp_params->sdest;
+
+ return 0;
+}
+
+/**
+ * dpio_add_static_dequeue_channel() - Add a static dequeue channel.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @dpcon_id: DPCON object ID
+ * @channel_index: Returned channel index to be used in qbman API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id,
+ uint8_t *channel_index)
+{
+ struct dpio_rsp_add_static_dequeue_channel *rsp_params;
+ struct dpio_cmd_static_dequeue_channel *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpio_cmd_static_dequeue_channel *)cmd.params;
+ cmd_params->dpcon_id = cpu_to_le32(dpcon_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpio_rsp_add_static_dequeue_channel *)cmd.params;
+ *channel_index = rsp_params->channel_index;
+
+ return 0;
+}
+
+/**
+ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @dpcon_id: DPCON object ID
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id)
+{
+ struct dpio_cmd_static_dequeue_channel *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpio_cmd_static_dequeue_channel *)cmd.params;
+ cmd_params->dpcon_id = cpu_to_le32(dpcon_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_get_api_version() - Get Data Path I/O API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path i/o API
+ * @minor_ver: Minor version of data path i/o API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct dpio_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpio_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/dpmng.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpmng.c
new file mode 100644
index 00000000..27708087
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpmng.c
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpmng.h>
+#include <fsl_dpmng_cmd.h>
+
+/**
+ * mc_get_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_ver_info: Returned version information structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int mc_get_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_version *mc_ver_info)
+{
+ struct mc_command cmd = { 0 };
+ struct dpmng_rsp_get_version *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
+ mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
+ mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
+ mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
+
+ return 0;
+}
+
+/**
+ * mc_get_soc_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_platform_info: Returned version information structure. The structure
+ * contains the values of SVR and PVR registers.
+ * Please consult platform specific reference manual
+ * for detailed information.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int mc_get_soc_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_soc_version *mc_platform_info)
+{
+ struct dpmng_rsp_get_soc_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_SOC_VERSION,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmng_rsp_get_soc_version *)cmd.params;
+ mc_platform_info->svr = le32_to_cpu(rsp_params->svr);
+ mc_platform_info->pvr = le32_to_cpu(rsp_params->pvr);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp.h
new file mode 100644
index 00000000..11183626
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef __FSL_DPBP_H
+#define __FSL_DPBP_H
+
+/*
+ * Data Path Buffer Pool API
+ * Contains initialization APIs and runtime control APIs for DPBP
+ */
+
+struct fsl_mc_io;
+
+int dpbp_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpbp_id,
+ uint16_t *token);
+
+int dpbp_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpbp_cfg - Structure representing DPBP configuration
+ * @options: place holder
+ */
+struct dpbp_cfg {
+ uint32_t options;
+};
+
+int dpbp_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpbp_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpbp_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t obj_id);
+
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpbp_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpbp_attr - Structure representing DPBP attributes
+ * @id: DPBP object ID
+ * @bpid: Hardware buffer pool ID; should be used as an argument in
+ * acquire/release operations on buffers
+ */
+struct dpbp_attr {
+ int id;
+ uint16_t bpid;
+};
+
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpbp_attr *attr);
+
+/**
+ * DPBP notifications options
+ */
+
+/**
+ * BPSCN write will attempt to allocate into a cache (coherent write)
+ */
+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint32_t *num_free_bufs);
+
+#endif /* __FSL_DPBP_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
new file mode 100644
index 00000000..18402ced
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef _FSL_DPBP_CMD_H
+#define _FSL_DPBP_CMD_H
+
+/* DPBP Version */
+#define DPBP_VER_MAJOR 3
+#define DPBP_VER_MINOR 3
+
+/* Command versioning */
+#define DPBP_CMD_BASE_VERSION 1
+#define DPBP_CMD_ID_OFFSET 4
+
+#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
+#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
+#define DPBP_CMDID_CREATE DPBP_CMD(0x904)
+#define DPBP_CMDID_DESTROY DPBP_CMD(0x984)
+#define DPBP_CMDID_GET_API_VERSION DPBP_CMD(0xa04)
+
+#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
+#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
+#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
+#define DPBP_CMDID_RESET DPBP_CMD(0x005)
+#define DPBP_CMDID_IS_ENABLED DPBP_CMD(0x006)
+
+#define DPBP_CMDID_SET_IRQ_ENABLE DPBP_CMD(0x012)
+#define DPBP_CMDID_GET_IRQ_ENABLE DPBP_CMD(0x013)
+#define DPBP_CMDID_SET_IRQ_MASK DPBP_CMD(0x014)
+#define DPBP_CMDID_GET_IRQ_MASK DPBP_CMD(0x015)
+#define DPBP_CMDID_GET_IRQ_STATUS DPBP_CMD(0x016)
+#define DPBP_CMDID_CLEAR_IRQ_STATUS DPBP_CMD(0x017)
+
+#define DPBP_CMDID_SET_NOTIFICATIONS DPBP_CMD(0x1b0)
+#define DPBP_CMDID_GET_NOTIFICATIONS DPBP_CMD(0x1b1)
+
+#define DPBP_CMDID_GET_FREE_BUFFERS_NUM DPBP_CMD(0x1b2)
+
+#pragma pack(push, 1)
+struct dpbp_cmd_open {
+ uint32_t dpbp_id;
+};
+
+struct dpbp_cmd_destroy {
+ uint32_t object_id;
+};
+
+#define DPBP_ENABLE 0x1
+
+struct dpbp_rsp_is_enabled {
+ uint8_t enabled;
+};
+
+struct dpbp_rsp_get_attributes {
+ uint16_t pad;
+ uint16_t bpid;
+ uint32_t id;
+};
+
+struct dpbp_cmd_set_notifications {
+ uint32_t depletion_entry;
+ uint32_t depletion_exit;
+ uint32_t surplus_entry;
+ uint32_t surplus_exit;
+ uint16_t options;
+ uint16_t pad[3];
+ uint64_t message_ctx;
+ uint64_t message_iova;
+};
+
+struct dpbp_rsp_get_notifications {
+ uint32_t depletion_entry;
+ uint32_t depletion_exit;
+ uint32_t surplus_entry;
+ uint32_t surplus_exit;
+ uint16_t options;
+ uint16_t pad[3];
+ uint64_t message_ctx;
+ uint64_t message_iova;
+};
+
+struct dpbp_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+struct dpbp_rsp_get_num_free_bufs {
+ uint32_t num_free_bufs;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPBP_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci.h
new file mode 100644
index 00000000..f69ed3f3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#ifndef __FSL_DPCI_H
+#define __FSL_DPCI_H
+
+/* Data Path Communication Interface API
+ * Contains initialization APIs and runtime control APIs for DPCI
+ */
+
+struct fsl_mc_io;
+
+/** General DPCI macros */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPCI object
+ */
+#define DPCI_PRIO_NUM 2
+
+/**
+ * Indicates an invalid frame queue
+ */
+#define DPCI_FQID_NOT_VALID (uint32_t)(-1)
+
+/**
+ * All queues considered; see dpci_set_rx_queue()
+ */
+#define DPCI_ALL_QUEUES (uint8_t)(-1)
+
+int dpci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpci_id,
+ uint16_t *token);
+
+int dpci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * Enable the Order Restoration support
+ */
+#define DPCI_OPT_HAS_OPR 0x000040
+
+/**
+ * Order Point Records are shared for the entire DPCI
+ */
+#define DPCI_OPT_OPR_SHARED 0x000080
+
+/**
+ * struct dpci_cfg - Structure representing DPCI configuration
+ * @options: Any combination of the following options:
+ * DPCI_OPT_HAS_OPR
+ * DPCI_OPT_OPR_SHARED
+ * @num_of_priorities: Number of receive priorities (queues) for the DPCI;
+ * note, that the number of transmit priorities (queues)
+ * is determined by the number of receive priorities of
+ * the peer DPCI object
+ */
+struct dpci_cfg {
+ uint32_t options;
+ uint8_t num_of_priorities;
+};
+
+int dpci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpci_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+int dpci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpci_attr - Structure representing DPCI attributes
+ * @id: DPCI object ID
+ * @num_of_priorities: Number of receive priorities
+ */
+struct dpci_attr {
+ int id;
+ uint8_t num_of_priorities;
+};
+
+int dpci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_attr *attr);
+
+/**
+ * enum dpci_dest - DPCI destination types
+ * @DPCI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is
+ * expected to dequeue from the queue based on polling or
+ * other user-defined method
+ * @DPCI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected
+ * to dequeue from the queue only after notification is
+ * received
+ * @DPCI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified
+ * DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpci_dest {
+ DPCI_DEST_NONE = 0,
+ DPCI_DEST_DPIO = 1,
+ DPCI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpci_dest_cfg - Structure representing DPCI destination configuration
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid
+ * values are 0-1 or 0-7, depending on the number of priorities
+ * in that channel; not relevant for 'DPCI_DEST_NONE' option
+ */
+struct dpci_dest_cfg {
+ enum dpci_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/** DPCI queue modification options */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPCI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPCI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * struct dpci_rx_queue_cfg - Structure representing RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPCI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPCI_QUEUE_OPT_USER_CTX' is contained in
+ * 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPCI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpci_rx_queue_cfg {
+ uint32_t options;
+ uint64_t user_ctx;
+ struct dpci_dest_cfg dest_cfg;
+};
+
+int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpci_rx_queue_attr - Structure representing Rx queue attributes
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpci_rx_queue_attr {
+ uint64_t user_ctx;
+ struct dpci_dest_cfg dest_cfg;
+ uint32_t fqid;
+};
+
+int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_rx_queue_attr *attr);
+
+/**
+ * struct dpci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to peer DPCI;
+ * returns 'DPCI_FQID_NOT_VALID' if a no peer is connected or if
+ * the selected priority exceeds the number of priorities of the
+ * peer DPCI object
+ */
+struct dpci_tx_queue_attr {
+ uint32_t fqid;
+};
+
+int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_tx_queue_attr *attr);
+
+int dpci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPCI_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
new file mode 100644
index 00000000..634248ac
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#ifndef _FSL_DPCI_CMD_H
+#define _FSL_DPCI_CMD_H
+
+/* DPCI Version */
+#define DPCI_VER_MAJOR 3
+#define DPCI_VER_MINOR 3
+
+#define DPCI_CMD_BASE_VERSION 1
+#define DPCI_CMD_BASE_VERSION_V2 2
+#define DPCI_CMD_ID_OFFSET 4
+
+#define DPCI_CMD_V1(id) ((id << DPCI_CMD_ID_OFFSET) | DPCI_CMD_BASE_VERSION)
+#define DPCI_CMD_V2(id) ((id << DPCI_CMD_ID_OFFSET) | DPCI_CMD_BASE_VERSION_V2)
+
+/* Command IDs */
+#define DPCI_CMDID_CLOSE DPCI_CMD_V1(0x800)
+#define DPCI_CMDID_OPEN DPCI_CMD_V1(0x807)
+#define DPCI_CMDID_CREATE DPCI_CMD_V2(0x907)
+#define DPCI_CMDID_DESTROY DPCI_CMD_V1(0x987)
+#define DPCI_CMDID_GET_API_VERSION DPCI_CMD_V1(0xa07)
+
+#define DPCI_CMDID_ENABLE DPCI_CMD_V1(0x002)
+#define DPCI_CMDID_DISABLE DPCI_CMD_V1(0x003)
+#define DPCI_CMDID_GET_ATTR DPCI_CMD_V1(0x004)
+#define DPCI_CMDID_RESET DPCI_CMD_V1(0x005)
+#define DPCI_CMDID_IS_ENABLED DPCI_CMD_V1(0x006)
+
+#define DPCI_CMDID_SET_RX_QUEUE DPCI_CMD_V1(0x0e0)
+#define DPCI_CMDID_GET_LINK_STATE DPCI_CMD_V1(0x0e1)
+#define DPCI_CMDID_GET_PEER_ATTR DPCI_CMD_V1(0x0e2)
+#define DPCI_CMDID_GET_RX_QUEUE DPCI_CMD_V1(0x0e3)
+#define DPCI_CMDID_GET_TX_QUEUE DPCI_CMD_V1(0x0e4)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPCI_MASK(field) \
+ GENMASK(DPCI_##field##_SHIFT + DPCI_##field##_SIZE - 1, \
+ DPCI_##field##_SHIFT)
+#define dpci_set_field(var, field, val) \
+ ((var) |= (((val) << DPCI_##field##_SHIFT) & DPCI_MASK(field)))
+#define dpci_get_field(var, field) \
+ (((var) & DPCI_MASK(field)) >> DPCI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpci_cmd_open {
+ uint32_t dpci_id;
+};
+
+struct dpci_cmd_create {
+ uint8_t num_of_priorities;
+ uint8_t pad[15];
+ uint32_t options;
+};
+
+struct dpci_cmd_destroy {
+ uint32_t dpci_id;
+};
+
+#define DPCI_ENABLE_SHIFT 0
+#define DPCI_ENABLE_SIZE 1
+
+struct dpci_rsp_is_enabled {
+ /* only the LSB bit */
+ uint8_t en;
+};
+
+struct dpci_rsp_get_attr {
+ uint32_t id;
+ uint16_t pad;
+ uint8_t num_of_priorities;
+};
+
+struct dpci_rsp_get_peer_attr {
+ uint32_t id;
+ uint32_t pad;
+ uint8_t num_of_priorities;
+};
+
+#define DPCI_UP_SHIFT 0
+#define DPCI_UP_SIZE 1
+
+struct dpci_rsp_get_link_state {
+ /* only the LSB bit */
+ uint8_t up;
+};
+
+#define DPCI_DEST_TYPE_SHIFT 0
+#define DPCI_DEST_TYPE_SIZE 4
+
+struct dpci_cmd_set_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t priority;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad;
+ uint64_t user_ctx;
+ uint32_t options;
+};
+
+struct dpci_cmd_get_queue {
+ uint8_t pad[5];
+ uint8_t priority;
+};
+
+struct dpci_rsp_get_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t pad;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad1;
+ uint64_t user_ctx;
+ uint32_t fqid;
+};
+
+struct dpci_rsp_get_tx_queue {
+ uint32_t pad;
+ uint32_t fqid;
+};
+
+struct dpci_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPCI_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon.h
new file mode 100644
index 00000000..36dd5f3c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#ifndef __FSL_DPCON_H
+#define __FSL_DPCON_H
+
+/* Data Path Concentrator API
+ * Contains initialization APIs and runtime control APIs for DPCON
+ */
+
+struct fsl_mc_io;
+
+/** General DPCON macros */
+
+/**
+ * Use it to disable notifications; see dpcon_set_notification()
+ */
+#define DPCON_INVALID_DPIO_ID (int)(-1)
+
+int dpcon_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpcon_id,
+ uint16_t *token);
+
+int dpcon_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpcon_cfg - Structure representing DPCON configuration
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ */
+struct dpcon_cfg {
+ uint8_t num_priorities;
+};
+
+int dpcon_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpcon_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpcon_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t obj_id);
+
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpcon_attr - Structure representing DPCON attributes
+ * @id: DPCON object ID
+ * @qbman_ch_id: Channel ID to be used by dequeue operation
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ */
+struct dpcon_attr {
+ int id;
+ uint16_t qbman_ch_id;
+ uint8_t num_priorities;
+};
+
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_attr *attr);
+
+int dpcon_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPCON_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
new file mode 100644
index 00000000..1641e320
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef _FSL_DPCON_CMD_H
+#define _FSL_DPCON_CMD_H
+
+/* DPCON Version */
+#define DPCON_VER_MAJOR 3
+#define DPCON_VER_MINOR 3
+
+
+/* Command versioning */
+#define DPCON_CMD_BASE_VERSION 1
+#define DPCON_CMD_ID_OFFSET 4
+
+#define DPCON_CMD(id) ((id << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
+#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
+#define DPCON_CMDID_CREATE DPCON_CMD(0x908)
+#define DPCON_CMDID_DESTROY DPCON_CMD(0x988)
+#define DPCON_CMDID_GET_API_VERSION DPCON_CMD(0xa08)
+
+#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
+#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
+#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
+#define DPCON_CMDID_RESET DPCON_CMD(0x005)
+#define DPCON_CMDID_IS_ENABLED DPCON_CMD(0x006)
+
+#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
+
+#pragma pack(push, 1)
+struct dpcon_cmd_open {
+ uint32_t dpcon_id;
+};
+
+struct dpcon_cmd_create {
+ uint8_t num_priorities;
+};
+
+struct dpcon_cmd_destroy {
+ uint32_t object_id;
+};
+
+#define DPCON_ENABLE 1
+
+struct dpcon_rsp_is_enabled {
+ uint8_t enabled;
+};
+
+struct dpcon_rsp_get_attr {
+ uint32_t id;
+ uint16_t qbman_ch_id;
+ uint8_t num_priorities;
+ uint8_t pad;
+};
+
+struct dpcon_cmd_set_notification {
+ uint32_t dpio_id;
+ uint8_t priority;
+ uint8_t pad[3];
+ uint64_t user_ctx;
+};
+
+struct dpcon_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPCON_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai.h
new file mode 100644
index 00000000..03e46ec1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __FSL_DPDMAI_H
+#define __FSL_DPDMAI_H
+
+struct fsl_mc_io;
+
+/* Data Path DMA Interface API
+ * Contains initialization APIs and runtime control APIs for DPDMAI
+ */
+
+/* General DPDMAI macros */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPDMAI object
+ */
+#define DPDMAI_PRIO_NUM 2
+
+/**
+ * All queues considered; see dpdmai_set_rx_queue()
+ */
+#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
+
+int dpdmai_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpdmai_id,
+ uint16_t *token);
+
+int dpdmai_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpdmai_cfg - Structure representing DPDMAI configuration
+ * @priorities: Priorities for the DMA hardware processing; valid priorities are
+ * configured with values 1-8; the entry following last valid entry
+ * should be configured with 0
+ */
+struct dpdmai_cfg {
+ uint8_t priorities[DPDMAI_PRIO_NUM];
+};
+
+int dpdmai_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpdmai_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpdmai_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+int dpdmai_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpdmai_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpdmai_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpdmai_attr - Structure representing DPDMAI attributes
+ * @id: DPDMAI object ID
+ * @num_of_priorities: number of priorities
+ */
+struct dpdmai_attr {
+ int id;
+ uint8_t num_of_priorities;
+};
+
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpdmai_attr *attr);
+
+/**
+ * enum dpdmai_dest - DPDMAI destination types
+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpdmai_dest {
+ DPDMAI_DEST_NONE = 0,
+ DPDMAI_DEST_DPIO = 1,
+ DPDMAI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPDMAI_DEST_NONE' option
+ */
+struct dpdmai_dest_cfg {
+ enum dpdmai_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/* DPDMAI queue modification options */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPDMAI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpdmai_rx_queue_cfg {
+ uint32_t options;
+ uint64_t user_ctx;
+ struct dpdmai_dest_cfg dest_cfg;
+
+};
+
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpdmai_rx_queue_cfg *cfg);
+
+/**
+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpdmai_rx_queue_attr {
+ uint64_t user_ctx;
+ struct dpdmai_dest_cfg dest_cfg;
+ uint32_t fqid;
+};
+
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_rx_queue_attr *attr);
+
+/**
+ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to DMA hardware
+ */
+
+struct dpdmai_tx_queue_attr {
+ uint32_t fqid;
+};
+
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_tx_queue_attr *attr);
+
+#endif /* __FSL_DPDMAI_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
new file mode 100644
index 00000000..618e19ea
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _FSL_DPDMAI_CMD_H
+#define _FSL_DPDMAI_CMD_H
+
+/* DPDMAI Version */
+#define DPDMAI_VER_MAJOR 3
+#define DPDMAI_VER_MINOR 2
+
+/* Command versioning */
+#define DPDMAI_CMD_BASE_VERSION 1
+#define DPDMAI_CMD_ID_OFFSET 4
+
+#define DPDMAI_CMD(id) ((id << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPDMAI_CMDID_CLOSE DPDMAI_CMD(0x800)
+#define DPDMAI_CMDID_OPEN DPDMAI_CMD(0x80E)
+#define DPDMAI_CMDID_CREATE DPDMAI_CMD(0x90E)
+#define DPDMAI_CMDID_DESTROY DPDMAI_CMD(0x98E)
+#define DPDMAI_CMDID_GET_API_VERSION DPDMAI_CMD(0xa0E)
+
+#define DPDMAI_CMDID_ENABLE DPDMAI_CMD(0x002)
+#define DPDMAI_CMDID_DISABLE DPDMAI_CMD(0x003)
+#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMD(0x004)
+#define DPDMAI_CMDID_RESET DPDMAI_CMD(0x005)
+#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMD(0x006)
+
+#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMD(0x1A0)
+#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMD(0x1A1)
+#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMD(0x1A2)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPDMAI_MASK(field) \
+ GENMASK(DPDMAI_##field##_SHIFT + DPDMAI_##field##_SIZE - 1, \
+ DPDMAI_##field##_SHIFT)
+#define dpdmai_set_field(var, field, val) \
+ ((var) |= (((val) << DPDMAI_##field##_SHIFT) & DPDMAI_MASK(field)))
+#define dpdmai_get_field(var, field) \
+ (((var) & DPDMAI_MASK(field)) >> DPDMAI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpdmai_cmd_open {
+ uint32_t dpdmai_id;
+};
+
+struct dpdmai_cmd_create {
+ uint8_t pad;
+ uint8_t priorities[2];
+};
+
+struct dpdmai_cmd_destroy {
+ uint32_t dpdmai_id;
+};
+
+#define DPDMAI_ENABLE_SHIFT 0
+#define DPDMAI_ENABLE_SIZE 1
+
+struct dpdmai_rsp_is_enabled {
+ /* only the LSB bit */
+ uint8_t en;
+};
+
+struct dpdmai_rsp_get_attr {
+ uint32_t id;
+ uint8_t num_of_priorities;
+};
+
+#define DPDMAI_DEST_TYPE_SHIFT 0
+#define DPDMAI_DEST_TYPE_SIZE 4
+
+struct dpdmai_cmd_set_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t priority;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad;
+ uint64_t user_ctx;
+ uint32_t options;
+};
+
+struct dpdmai_cmd_get_queue {
+ uint8_t pad[5];
+ uint8_t priority;
+};
+
+struct dpdmai_rsp_get_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t pad1;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad2;
+ uint64_t user_ctx;
+ uint32_t fqid;
+};
+
+struct dpdmai_rsp_get_tx_queue {
+ uint64_t pad;
+ uint32_t fqid;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPDMAI_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio.h
new file mode 100644
index 00000000..3158f531
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef __FSL_DPIO_H
+#define __FSL_DPIO_H
+
+/* Data Path I/O Portal API
+ * Contains initialization APIs and runtime control APIs for DPIO
+ */
+
+struct fsl_mc_io;
+
+int dpio_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpio_id,
+ uint16_t *token);
+
+int dpio_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * enum dpio_channel_mode - DPIO notification channel mode
+ * @DPIO_NO_CHANNEL: No support for notification channel
+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
+ * dedicated channel in the DPIO; user should point the queue's
+ * destination in the relevant interface to this DPIO
+ */
+enum dpio_channel_mode {
+ DPIO_NO_CHANNEL = 0,
+ DPIO_LOCAL_CHANNEL = 1,
+};
+
+/**
+ * struct dpio_cfg - Structure representing DPIO configuration
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ */
+struct dpio_cfg {
+ enum dpio_channel_mode channel_mode;
+ uint8_t num_priorities;
+};
+
+
+int dpio_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpio_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpio_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+int dpio_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpio_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpio_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpio_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t sdest);
+
+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t *sdest);
+
+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id,
+ uint8_t *channel_index);
+
+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id);
+
+/**
+ * struct dpio_attr - Structure representing DPIO attributes
+ * @id: DPIO object ID
+ * @qbman_portal_ce_offset: Offset of the software portal cache-enabled area
+ * @qbman_portal_ci_offset: Offset of the software portal
+ * cache-inhibited area
+ * @qbman_portal_id: Software portal ID
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification
+ * channel (1-8); relevant only if
+ * 'channel_mode = DPIO_LOCAL_CHANNEL'
+ * @qbman_version: QBMAN version
+ */
+struct dpio_attr {
+ int id;
+ uint64_t qbman_portal_ce_offset;
+ uint64_t qbman_portal_ci_offset;
+ uint16_t qbman_portal_id;
+ enum dpio_channel_mode channel_mode;
+ uint8_t num_priorities;
+ uint32_t qbman_version;
+ uint32_t clk;
+};
+
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpio_attr *attr);
+
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPIO_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
new file mode 100644
index 00000000..16a9bc41
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef _FSL_DPIO_CMD_H
+#define _FSL_DPIO_CMD_H
+
+/* DPIO Version */
+#define DPIO_VER_MAJOR 4
+#define DPIO_VER_MINOR 2
+
+#define DPIO_CMD_BASE_VERSION 1
+#define DPIO_CMD_ID_OFFSET 4
+
+#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPIO_CMDID_CLOSE DPIO_CMD(0x800)
+#define DPIO_CMDID_OPEN DPIO_CMD(0x803)
+#define DPIO_CMDID_CREATE DPIO_CMD(0x903)
+#define DPIO_CMDID_DESTROY DPIO_CMD(0x983)
+#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03)
+
+#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
+#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
+#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
+#define DPIO_CMDID_RESET DPIO_CMD(0x005)
+#define DPIO_CMDID_IS_ENABLED DPIO_CMD(0x006)
+
+#define DPIO_CMDID_SET_IRQ_ENABLE DPIO_CMD(0x012)
+#define DPIO_CMDID_GET_IRQ_ENABLE DPIO_CMD(0x013)
+#define DPIO_CMDID_SET_IRQ_MASK DPIO_CMD(0x014)
+#define DPIO_CMDID_GET_IRQ_MASK DPIO_CMD(0x015)
+#define DPIO_CMDID_GET_IRQ_STATUS DPIO_CMD(0x016)
+#define DPIO_CMDID_CLEAR_IRQ_STATUS DPIO_CMD(0x017)
+
+#define DPIO_CMDID_SET_STASHING_DEST DPIO_CMD(0x120)
+#define DPIO_CMDID_GET_STASHING_DEST DPIO_CMD(0x121)
+#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL DPIO_CMD(0x122)
+#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL DPIO_CMD(0x123)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPIO_MASK(field) \
+ GENMASK(DPIO_##field##_SHIFT + DPIO_##field##_SIZE - 1, \
+ DPIO_##field##_SHIFT)
+#define dpio_set_field(var, field, val) \
+ ((var) |= (((val) << DPIO_##field##_SHIFT) & DPIO_MASK(field)))
+#define dpio_get_field(var, field) \
+ (((var) & DPIO_MASK(field)) >> DPIO_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpio_cmd_open {
+ uint32_t dpio_id;
+};
+
+#define DPIO_CHANNEL_MODE_SHIFT 0
+#define DPIO_CHANNEL_MODE_SIZE 2
+
+struct dpio_cmd_create {
+ uint16_t pad1;
+ /* from LSB: channel_mode:2 */
+ uint8_t channel_mode;
+ uint8_t pad2;
+ uint8_t num_priorities;
+};
+
+struct dpio_cmd_destroy {
+ uint32_t dpio_id;
+};
+
+#define DPIO_ENABLE_SHIFT 0
+#define DPIO_ENABLE_SIZE 1
+
+struct dpio_rsp_is_enabled {
+ /* only the LSB */
+ uint8_t en;
+};
+
+#define DPIO_ATTR_CHANNEL_MODE_SHIFT 0
+#define DPIO_ATTR_CHANNEL_MODE_SIZE 4
+
+struct dpio_rsp_get_attr {
+ uint32_t id;
+ uint16_t qbman_portal_id;
+ uint8_t num_priorities;
+ /* from LSB: channel_mode:4 */
+ uint8_t channel_mode;
+ uint64_t qbman_portal_ce_offset;
+ uint64_t qbman_portal_ci_offset;
+ uint32_t qbman_version;
+ uint32_t pad;
+ uint32_t clk;
+};
+
+struct dpio_stashing_dest {
+ uint8_t sdest;
+};
+
+struct dpio_cmd_static_dequeue_channel {
+ uint32_t dpcon_id;
+};
+
+struct dpio_rsp_add_static_dequeue_channel {
+ uint8_t channel_index;
+};
+
+struct dpio_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPIO_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng.h
new file mode 100644
index 00000000..afaf9b71
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#ifndef __FSL_DPMNG_H
+#define __FSL_DPMNG_H
+
+/*
+ * Management Complex General API
+ * Contains general API for the Management Complex firmware
+ */
+
+struct fsl_mc_io;
+
+/**
+ * Management Complex firmware version information
+ */
+#define MC_VER_MAJOR 10
+#define MC_VER_MINOR 3
+
+/**
+ * struct mc_version
+ * @major: Major version number: incremented on API compatibility changes
+ * @minor: Minor version number: incremented on API additions (that are
+ * backward compatible); reset when major version is incremented
+ * @revision: Internal revision number: incremented on implementation changes
+ * and/or bug fixes that have no impact on API
+ */
+struct mc_version {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t revision;
+};
+
+int mc_get_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_version *mc_ver_info);
+
+/**
+ * struct mc_platform
+ * @svr: System version (content of platform SVR register)
+ * @pvr: Processor version (content of platform PVR register)
+ */
+struct mc_soc_version {
+ uint32_t svr;
+ uint32_t pvr;
+};
+
+int mc_get_soc_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_soc_version *mc_platform_info);
+#endif /* __FSL_DPMNG_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
new file mode 100644
index 00000000..ac380be1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __FSL_DPMNG_CMD_H
+#define __FSL_DPMNG_CMD_H
+
+/* Command versioning */
+#define DPMNG_CMD_BASE_VERSION 1
+#define DPMNG_CMD_ID_OFFSET 4
+
+#define DPMNG_CMD(id) ((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
+#define DPMNG_CMDID_GET_SOC_VERSION DPMNG_CMD(0x832)
+
+#pragma pack(push, 1)
+struct dpmng_rsp_get_version {
+ uint32_t revision;
+ uint32_t version_major;
+ uint32_t version_minor;
+};
+
+struct dpmng_rsp_get_soc_version {
+ uint32_t svr;
+ uint32_t pvr;
+};
+
+#pragma pack(pop)
+
+#endif /* __FSL_DPMNG_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_cmd.h
new file mode 100644
index 00000000..ac919610
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_cmd.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef __FSL_MC_CMD_H
+#define __FSL_MC_CMD_H
+
+#include <rte_byteorder.h>
+#include <stdint.h>
+
+#define MC_CMD_NUM_OF_PARAMS 7
+
+#define phys_addr_t uint64_t
+
+#define u64 uint64_t
+#define u32 uint32_t
+#define u16 uint16_t
+#define u8 uint8_t
+
+#define cpu_to_le64 rte_cpu_to_le_64
+#define cpu_to_le32 rte_cpu_to_le_32
+#define cpu_to_le16 rte_cpu_to_le_16
+
+#define le64_to_cpu rte_le_to_cpu_64
+#define le32_to_cpu rte_le_to_cpu_32
+#define le16_to_cpu rte_le_to_cpu_16
+
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+struct mc_cmd_header {
+ union {
+ struct {
+ uint8_t src_id;
+ uint8_t flags_hw;
+ uint8_t status;
+ uint8_t flags_sw;
+ uint16_t token;
+ uint16_t cmd_id;
+ };
+ uint32_t word[2];
+ };
+};
+
+struct mc_command {
+ uint64_t header;
+ uint64_t params[MC_CMD_NUM_OF_PARAMS];
+};
+
+struct mc_rsp_create {
+ uint32_t object_id;
+};
+
+enum mc_cmd_status {
+ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
+ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
+ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */
+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */
+ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */
+ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */
+ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */
+ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */
+ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */
+ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */
+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */
+ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */
+};
+
+/*
+ * MC command flags
+ */
+
+/* High priority flag */
+#define MC_CMD_FLAG_PRI 0x80
+/* Command completion flag */
+#define MC_CMD_FLAG_INTR_DIS 0x01
+
+#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00
+
+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
+
+static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ uint64_t header = 0;
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
+
+ hdr->cmd_id = cpu_to_le16(cmd_id);
+ hdr->token = cpu_to_le16(token);
+ hdr->status = MC_CMD_STATUS_READY;
+ hdr->word[0] |= cpu_to_le32(cmd_flags & MC_CMD_HDR_FLAGS_MASK);
+
+ return header;
+}
+
+static inline uint16_t mc_cmd_hdr_read_token(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ uint16_t token = le16_to_cpu(hdr->token);
+
+ return token;
+}
+
+static inline uint32_t mc_cmd_read_object_id(struct mc_command *cmd)
+{
+ struct mc_rsp_create *rsp_params;
+
+ rsp_params = (struct mc_rsp_create *)cmd->params;
+ return le32_to_cpu(rsp_params->object_id);
+}
+
+static inline enum mc_cmd_status mc_cmd_read_status(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ uint8_t status = hdr->status;
+
+ return (enum mc_cmd_status)status;
+}
+
+/**
+ * mc_write_command - writes a command to a Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @cmd: pointer to a filled command
+ */
+static inline void mc_write_command(struct mc_command __iomem *portal,
+ struct mc_command *cmd)
+{
+ struct mc_cmd_header *cmd_header = (struct mc_cmd_header *)&cmd->header;
+ char *header = (char *)&portal->header;
+ int i;
+
+ /* copy command parameters into the portal */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ iowrite64(cmd->params[i], &portal->params[i]);
+
+ /* submit the command by writing the header */
+ iowrite32(le32_to_cpu(cmd_header->word[1]), (((uint32_t *)header) + 1));
+ iowrite32(le32_to_cpu(cmd_header->word[0]), (uint32_t *)header);
+}
+
+/**
+ * mc_read_response - reads the response for the last MC command from a
+ * Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @resp: pointer to command response buffer
+ *
+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
+ */
+static inline enum mc_cmd_status mc_read_response(
+ struct mc_command __iomem *portal,
+ struct mc_command *resp)
+{
+ int i;
+ enum mc_cmd_status status;
+
+ /* Copy command response header from MC portal: */
+ resp->header = ioread64(&portal->header);
+ status = mc_cmd_read_status(resp);
+ if (status != MC_CMD_STATUS_OK)
+ return status;
+
+ /* Copy command response data from MC portal: */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ resp->params[i] = ioread64(&portal->params[i]);
+
+ return status;
+}
+
+#endif /* __FSL_MC_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_sys.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_sys.h
new file mode 100644
index 00000000..d0c7b39f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_sys.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#ifndef _FSL_MC_SYS_H
+#define _FSL_MC_SYS_H
+
+#ifdef __linux_driver__
+
+#include <linux/errno.h>
+#include <asm/io.h>
+#include <linux/slab.h>
+
+struct fsl_mc_io {
+ void *regs;
+};
+
+#ifndef ENOTSUP
+#define ENOTSUP 95
+#endif
+
+#define ioread64(_p) readq(_p)
+#define iowrite64(_v, _p) writeq(_v, _p)
+
+#else /* __linux_driver__ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <errno.h>
+#include <sys/uio.h>
+#include <linux/byteorder/little_endian.h>
+
+#ifndef dmb
+#define dmb() {__asm__ __volatile__("" : : : "memory"); }
+#endif
+#define __iormb() dmb()
+#define __iowmb() dmb()
+#define __arch_getq(a) (*(volatile uint64_t *)(a))
+#define __arch_putq(v, a) (*(volatile uint64_t *)(a) = (v))
+#define __arch_putq32(v, a) (*(volatile uint32_t *)(a) = (v))
+#define readq(c) \
+ ({ uint64_t __v = __arch_getq(c); __iormb(); __v; })
+#define writeq(v, c) \
+ ({ uint64_t __v = v; __iowmb(); __arch_putq(__v, c); __v; })
+#define writeq32(v, c) \
+ ({ uint32_t __v = v; __iowmb(); __arch_putq32(__v, c); __v; })
+#define ioread64(_p) readq(_p)
+#define iowrite64(_v, _p) writeq(_v, _p)
+#define iowrite32(_v, _p) writeq32(_v, _p)
+#define __iomem
+
+/*GPP is supposed to use MC commands with low priority*/
+#define CMD_PRI_LOW 0 /*!< Low Priority command indication */
+
+struct fsl_mc_io {
+ void *regs;
+};
+
+#endif /* __linux_driver__ */
+
+#endif /* _FSL_MC_SYS_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/mc_sys.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/mc_sys.c
new file mode 100644
index 00000000..efafdc31
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/mc_sys.c
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+
+#include <rte_spinlock.h>
+
+/** User space framework uses MC Portal in shared mode. Following change
+ * introduces lock in MC FLIB
+ */
+
+/**
+ * A static spinlock initializer.
+ */
+static rte_spinlock_t mc_portal_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int mc_status_to_error(enum mc_cmd_status status)
+{
+ switch (status) {
+ case MC_CMD_STATUS_OK:
+ return 0;
+ case MC_CMD_STATUS_AUTH_ERR:
+ return -EACCES; /* Token error */
+ case MC_CMD_STATUS_NO_PRIVILEGE:
+ return -EPERM; /* Permission denied */
+ case MC_CMD_STATUS_DMA_ERR:
+ return -EIO; /* Input/Output error */
+ case MC_CMD_STATUS_CONFIG_ERR:
+ return -EINVAL; /* Device not configured */
+ case MC_CMD_STATUS_TIMEOUT:
+ return -ETIMEDOUT; /* Operation timed out */
+ case MC_CMD_STATUS_NO_RESOURCE:
+ return -ENAVAIL; /* Resource temporarily unavailable */
+ case MC_CMD_STATUS_NO_MEMORY:
+ return -ENOMEM; /* Cannot allocate memory */
+ case MC_CMD_STATUS_BUSY:
+ return -EBUSY; /* Device busy */
+ case MC_CMD_STATUS_UNSUPPORTED_OP:
+ return -ENOTSUP; /* Operation not supported by device */
+ case MC_CMD_STATUS_INVALID_STATE:
+ return -ENODEV; /* Invalid device state */
+ default:
+ break;
+ }
+
+ /* Not expected to reach here */
+ return -EINVAL;
+}
+
+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
+{
+ enum mc_cmd_status status;
+ uint64_t response;
+
+ if (!mc_io || !mc_io->regs)
+ return -EACCES;
+
+ /* --- Call lock function here in case portal is shared --- */
+ rte_spinlock_lock(&mc_portal_lock);
+
+ mc_write_command(mc_io->regs, cmd);
+
+ /* Spin until status changes */
+ do {
+ response = ioread64(mc_io->regs);
+ status = mc_cmd_read_status((struct mc_command *)&response);
+
+ /* --- Call wait function here to prevent blocking ---
+ * Change the loop condition accordingly to exit on timeout.
+ */
+ } while (status == MC_CMD_STATUS_READY);
+
+ /* Read the response back into the command buffer */
+ mc_read_response(mc_io->regs, cmd);
+
+ /* --- Call unlock function here in case portal is shared --- */
+ rte_spinlock_unlock(&mc_portal_lock);
+
+ return mc_status_to_error(status);
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/meson.build b/src/spdk/dpdk/drivers/bus/fslmc/meson.build
new file mode 100644
index 00000000..22a56a6f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['eventdev', 'kvargs']
+sources = files('fslmc_bus.c',
+ 'fslmc_vfio.c',
+ 'mc/dpbp.c',
+ 'mc/dpci.c',
+ 'mc/dpcon.c',
+ 'mc/dpdmai.c',
+ 'mc/dpio.c',
+ 'mc/dpmng.c',
+ 'mc/mc_sys.c',
+ 'portal/dpaa2_hw_dpbp.c',
+ 'portal/dpaa2_hw_dpci.c',
+ 'portal/dpaa2_hw_dpio.c',
+ 'qbman/qbman_portal.c',
+ 'qbman/qbman_debug.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('mc', 'qbman/include', 'portal')
+cflags += ['-D_GNU_SOURCE']
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
new file mode 100644
index 00000000..39c5adf9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_mbuf_pool_ops.h>
+
+#include <fslmc_logs.h>
+#include <rte_fslmc.h>
+#include <mc/fsl_dpbp.h>
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+TAILQ_HEAD(dpbp_dev_list, dpaa2_dpbp_dev);
+static struct dpbp_dev_list dpbp_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpbp_dev_list); /*!< DPBP device list */
+
+static int
+dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpbp_id)
+{
+ struct dpaa2_dpbp_dev *dpbp_node;
+ int ret;
+ static int register_once;
+
+ /* Allocate DPAA2 dpbp handle */
+ dpbp_node = rte_malloc(NULL, sizeof(struct dpaa2_dpbp_dev), 0);
+ if (!dpbp_node) {
+ DPAA2_BUS_ERR("Memory allocation failed for DPBP Device");
+ return -1;
+ }
+
+ /* Open the dpbp object */
+ dpbp_node->dpbp.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpbp_open(&dpbp_node->dpbp,
+ CMD_PRI_LOW, dpbp_id, &dpbp_node->token);
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to open buffer pool object: err(%d)",
+ ret);
+ rte_free(dpbp_node);
+ return -1;
+ }
+
+ /* Clean the device first */
+ ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to reset buffer pool device. err(%d)",
+ ret);
+ dpbp_close(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
+ rte_free(dpbp_node);
+ return -1;
+ }
+
+ dpbp_node->dpbp_id = dpbp_id;
+ rte_atomic16_init(&dpbp_node->in_use);
+
+ TAILQ_INSERT_TAIL(&dpbp_dev_list, dpbp_node, next);
+
+ if (!register_once) {
+ rte_mbuf_set_platform_mempool_ops(DPAA2_MEMPOOL_OPS_NAME);
+ register_once = 1;
+ }
+
+ return 0;
+}
+
+struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void)
+{
+ struct dpaa2_dpbp_dev *dpbp_dev = NULL;
+
+ /* Get DPBP dev handle from list using index */
+ TAILQ_FOREACH(dpbp_dev, &dpbp_dev_list, next) {
+ if (dpbp_dev && rte_atomic16_test_and_set(&dpbp_dev->in_use))
+ break;
+ }
+
+ return dpbp_dev;
+}
+
+void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp)
+{
+ struct dpaa2_dpbp_dev *dpbp_dev = NULL;
+
+ /* Match DPBP handle and mark it free */
+ TAILQ_FOREACH(dpbp_dev, &dpbp_dev_list, next) {
+ if (dpbp_dev == dpbp) {
+ rte_atomic16_dec(&dpbp_dev->in_use);
+ return;
+ }
+ }
+}
+
+int dpaa2_dpbp_supported(void)
+{
+ if (TAILQ_EMPTY(&dpbp_dev_list))
+ return -1;
+ return 0;
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpbp_obj = {
+ .dev_type = DPAA2_BPOOL,
+ .create = dpaa2_create_dpbp_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpbp, rte_dpaa2_dpbp_obj);
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
new file mode 100644
index 00000000..5ad0374d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev_driver.h>
+
+#include <fslmc_logs.h>
+#include <rte_fslmc.h>
+#include <mc/fsl_dpci.h>
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+TAILQ_HEAD(dpci_dev_list, dpaa2_dpci_dev);
+static struct dpci_dev_list dpci_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpci_dev_list); /*!< DPCI device list */
+
+static int
+rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpci_id)
+{
+ struct dpaa2_dpci_dev *dpci_node;
+ struct dpci_attr attr;
+ struct dpci_rx_queue_cfg rx_queue_cfg;
+ struct dpci_rx_queue_attr rx_attr;
+ struct dpci_tx_queue_attr tx_attr;
+ int ret, i;
+
+ /* Allocate DPAA2 dpci handle */
+ dpci_node = rte_malloc(NULL, sizeof(struct dpaa2_dpci_dev), 0);
+ if (!dpci_node) {
+ DPAA2_BUS_ERR("Memory allocation failed for DPCI Device");
+ return -ENOMEM;
+ }
+
+ /* Open the dpci object */
+ dpci_node->dpci.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpci_open(&dpci_node->dpci,
+ CMD_PRI_LOW, dpci_id, &dpci_node->token);
+ if (ret) {
+ DPAA2_BUS_ERR("Resource alloc failure with err code: %d", ret);
+ goto err;
+ }
+
+ /* Get the device attributes */
+ ret = dpci_get_attributes(&dpci_node->dpci,
+ CMD_PRI_LOW, dpci_node->token, &attr);
+ if (ret != 0) {
+ DPAA2_BUS_ERR("Reading device failed with err code: %d", ret);
+ goto err;
+ }
+
+ for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
+ struct dpaa2_queue *rxq;
+
+ memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg));
+ ret = dpci_set_rx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token,
+ i, &rx_queue_cfg);
+ if (ret) {
+ DPAA2_BUS_ERR("Setting Rx queue failed with err code: %d",
+ ret);
+ goto err;
+ }
+
+ /* Allocate DQ storage for the DPCI Rx queues */
+ rxq = &(dpci_node->rx_queue[i]);
+ rxq->q_storage = rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->q_storage) {
+ DPAA2_BUS_ERR("q_storage allocation failed\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+ ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+ if (ret) {
+ DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed\n");
+ goto err;
+ }
+ }
+
+ /* Enable the device */
+ ret = dpci_enable(&dpci_node->dpci,
+ CMD_PRI_LOW, dpci_node->token);
+ if (ret != 0) {
+ DPAA2_BUS_ERR("Enabling device failed with err code: %d", ret);
+ goto err;
+ }
+
+ for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
+ /* Get the Rx FQID's */
+ ret = dpci_get_rx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token, i,
+ &rx_attr);
+ if (ret != 0) {
+ DPAA2_BUS_ERR("Rx queue fetch failed with err code: %d",
+ ret);
+ goto err;
+ }
+ dpci_node->rx_queue[i].fqid = rx_attr.fqid;
+
+ ret = dpci_get_tx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token, i,
+ &tx_attr);
+ if (ret != 0) {
+ DPAA2_BUS_ERR("Reading device failed with err code: %d",
+ ret);
+ goto err;
+ }
+ dpci_node->tx_queue[i].fqid = tx_attr.fqid;
+ }
+
+ dpci_node->dpci_id = dpci_id;
+ rte_atomic16_init(&dpci_node->in_use);
+
+ TAILQ_INSERT_TAIL(&dpci_dev_list, dpci_node, next);
+
+ return 0;
+
+err:
+ for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
+ struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
+
+ if (rxq->q_storage) {
+ dpaa2_free_dq_storage(rxq->q_storage);
+ rte_free(rxq->q_storage);
+ }
+ }
+ rte_free(dpci_node);
+
+ return ret;
+}
+
+struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void)
+{
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+
+ /* Get DPCI dev handle from list using index */
+ TAILQ_FOREACH(dpci_dev, &dpci_dev_list, next) {
+ if (dpci_dev && rte_atomic16_test_and_set(&dpci_dev->in_use))
+ break;
+ }
+
+ return dpci_dev;
+}
+
+void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci)
+{
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+
+ /* Match DPCI handle and mark it free */
+ TAILQ_FOREACH(dpci_dev, &dpci_dev_list, next) {
+ if (dpci_dev == dpci) {
+ rte_atomic16_dec(&dpci_dev->in_use);
+ return;
+ }
+ }
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpci_obj = {
+ .dev_type = DPAA2_CI,
+ .create = rte_dpaa2_create_dpci_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpci, rte_dpaa2_dpci_obj);
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
new file mode 100644
index 00000000..99f70be1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -0,0 +1,527 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/epoll.h>
+#include<sys/eventfd.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+
+#include <fslmc_logs.h>
+#include <rte_fslmc.h>
+#include "dpaa2_hw_pvt.h"
+#include "dpaa2_hw_dpio.h"
+#include <mc/fsl_dpmng.h>
+
+#define NUM_HOST_CPUS RTE_MAX_LCORE
+
+struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
+RTE_DEFINE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
+
+struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+
+TAILQ_HEAD(dpio_dev_list, dpaa2_dpio_dev);
+static struct dpio_dev_list dpio_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpio_dev_list); /*!< DPIO device list */
+static uint32_t io_space_count;
+
+/* Variable to store DPAA2 platform type */
+uint32_t dpaa2_svr_family;
+
+/*Stashing Macros default for LS208x*/
+static int dpaa2_core_cluster_base = 0x04;
+static int dpaa2_cluster_sz = 2;
+
+/* For LS208X platform There are four clusters with following mapping:
+ * Cluster 1 (ID = x04) : CPU0, CPU1;
+ * Cluster 2 (ID = x05) : CPU2, CPU3;
+ * Cluster 3 (ID = x06) : CPU4, CPU5;
+ * Cluster 4 (ID = x07) : CPU6, CPU7;
+ */
+/* For LS108X platform There are two clusters with following mapping:
+ * Cluster 1 (ID = x02) : CPU0, CPU1, CPU2, CPU3;
+ * Cluster 2 (ID = x03) : CPU4, CPU5, CPU6, CPU7;
+ */
+/* For LX2160 platform There are four clusters with following mapping:
+ * Cluster 1 (ID = x00) : CPU0, CPU1;
+ * Cluster 2 (ID = x01) : CPU2, CPU3;
+ * Cluster 3 (ID = x02) : CPU4, CPU5;
+ * Cluster 4 (ID = x03) : CPU6, CPU7;
+ * Cluster 1 (ID = x04) : CPU8, CPU9;
+ * Cluster 2 (ID = x05) : CPU10, CP11;
+ * Cluster 3 (ID = x06) : CPU12, CPU13;
+ * Cluster 4 (ID = x07) : CPU14, CPU15;
+ */
+
+static int
+dpaa2_core_cluster_sdest(int cpu_id)
+{
+ int x = cpu_id / dpaa2_cluster_sz;
+
+ return dpaa2_core_cluster_base + x;
+}
+
+#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
+static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
+{
+#define STRING_LEN 28
+#define COMMAND_LEN 50
+ uint32_t cpu_mask = 1;
+ int ret;
+ size_t len = 0;
+ char *temp = NULL, *token = NULL;
+ char string[STRING_LEN], command[COMMAND_LEN];
+ FILE *file;
+
+ snprintf(string, STRING_LEN, "dpio.%d", dpio_id);
+ file = fopen("/proc/interrupts", "r");
+ if (!file) {
+ DPAA2_BUS_WARN("Failed to open /proc/interrupts file");
+ return;
+ }
+ while (getline(&temp, &len, file) != -1) {
+ if ((strstr(temp, string)) != NULL) {
+ token = strtok(temp, ":");
+ break;
+ }
+ }
+
+ if (!token) {
+ DPAA2_BUS_WARN("Failed to get interrupt id for dpio.%d",
+ dpio_id);
+ if (temp)
+ free(temp);
+ fclose(file);
+ return;
+ }
+
+ cpu_mask = cpu_mask << rte_lcore_id();
+ snprintf(command, COMMAND_LEN, "echo %X > /proc/irq/%s/smp_affinity",
+ cpu_mask, token);
+ ret = system(command);
+ if (ret < 0)
+ DPAA2_BUS_WARN(
+ "Failed to affine interrupts on respective core");
+ else
+ DPAA2_BUS_DEBUG(" %s command is executed", command);
+
+ free(temp);
+ fclose(file);
+}
+
+static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
+{
+ struct epoll_event epoll_ev;
+ int eventfd, dpio_epoll_fd, ret;
+ int threshold = 0x3, timeout = 0xFF;
+
+ dpio_epoll_fd = epoll_create(1);
+ ret = rte_dpaa2_intr_enable(&dpio_dev->intr_handle, 0);
+ if (ret) {
+ DPAA2_BUS_ERR("Interrupt registeration failed");
+ return -1;
+ }
+
+ if (getenv("DPAA2_PORTAL_INTR_THRESHOLD"))
+ threshold = atoi(getenv("DPAA2_PORTAL_INTR_THRESHOLD"));
+
+ if (getenv("DPAA2_PORTAL_INTR_TIMEOUT"))
+ sscanf(getenv("DPAA2_PORTAL_INTR_TIMEOUT"), "%x", &timeout);
+
+ qbman_swp_interrupt_set_trigger(dpio_dev->sw_portal,
+ QBMAN_SWP_INTERRUPT_DQRI);
+ qbman_swp_interrupt_clear_status(dpio_dev->sw_portal, 0xffffffff);
+ qbman_swp_interrupt_set_inhibit(dpio_dev->sw_portal, 0);
+ qbman_swp_dqrr_thrshld_write(dpio_dev->sw_portal, threshold);
+ qbman_swp_intr_timeout_write(dpio_dev->sw_portal, timeout);
+
+ eventfd = dpio_dev->intr_handle.fd;
+ epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
+ epoll_ev.data.fd = eventfd;
+
+ ret = epoll_ctl(dpio_epoll_fd, EPOLL_CTL_ADD, eventfd, &epoll_ev);
+ if (ret < 0) {
+ DPAA2_BUS_ERR("epoll_ctl failed");
+ return -1;
+ }
+ dpio_dev->epoll_fd = dpio_epoll_fd;
+
+ dpaa2_affine_dpio_intr_to_respective_core(dpio_dev->hw_id);
+
+ return 0;
+}
+#endif
+
+static int
+configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
+{
+ struct qbman_swp_desc p_des;
+ struct dpio_attr attr;
+
+ dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
+ if (!dpio_dev->dpio) {
+ DPAA2_BUS_ERR("Memory allocation failure");
+ return -1;
+ }
+
+ dpio_dev->dpio->regs = dpio_dev->mc_portal;
+ if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
+ &dpio_dev->token)) {
+ DPAA2_BUS_ERR("Failed to allocate IO space");
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
+ DPAA2_BUS_ERR("Failed to reset dpio");
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
+ DPAA2_BUS_ERR("Failed to Enable dpio");
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
+ dpio_dev->token, &attr)) {
+ DPAA2_BUS_ERR("DPIO Get attribute failed");
+ dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ /* Configure & setup SW portal */
+ p_des.block = NULL;
+ p_des.idx = attr.qbman_portal_id;
+ p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
+ p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
+ p_des.irq = -1;
+ p_des.qman_version = attr.qbman_version;
+
+ dpio_dev->sw_portal = qbman_swp_init(&p_des);
+ if (dpio_dev->sw_portal == NULL) {
+ DPAA2_BUS_ERR("QBMan SW Portal Init failed");
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
+{
+ int sdest, ret;
+
+ /* Set the Stashing Destination */
+ if (cpu_id < 0) {
+ cpu_id = rte_get_master_lcore();
+ if (cpu_id < 0) {
+ DPAA2_BUS_ERR("Getting CPU Index failed");
+ return -1;
+ }
+ }
+ /* Set the STASH Destination depending on Current CPU ID.
+ * Valid values of SDEST are 4,5,6,7. Where,
+ */
+
+ sdest = dpaa2_core_cluster_sdest(cpu_id);
+ DPAA2_BUS_DEBUG("Portal= %d CPU= %u SDEST= %d",
+ dpio_dev->index, cpu_id, sdest);
+
+ ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW,
+ dpio_dev->token, sdest);
+ if (ret) {
+ DPAA2_BUS_ERR("%d ERROR in SDEST", ret);
+ return -1;
+ }
+
+#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
+ if (dpaa2_dpio_intr_init(dpio_dev)) {
+ DPAA2_BUS_ERR("Interrupt registration failed for dpio");
+ return -1;
+ }
+#endif
+
+ return 0;
+}
+
+struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id)
+{
+ struct dpaa2_dpio_dev *dpio_dev = NULL;
+ int ret;
+
+ /* Get DPIO dev handle from list using index */
+ TAILQ_FOREACH(dpio_dev, &dpio_dev_list, next) {
+ if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count))
+ break;
+ }
+ if (!dpio_dev)
+ return NULL;
+
+ DPAA2_BUS_DEBUG("New Portal %p (%d) affined thread - %lu",
+ dpio_dev, dpio_dev->index, syscall(SYS_gettid));
+
+ ret = dpaa2_configure_stashing(dpio_dev, cpu_id);
+ if (ret)
+ DPAA2_BUS_ERR("dpaa2_configure_stashing failed");
+
+ return dpio_dev;
+}
+
+int
+dpaa2_affine_qbman_swp(void)
+{
+ unsigned int lcore_id = rte_lcore_id();
+ uint64_t tid = syscall(SYS_gettid);
+
+ if (lcore_id == LCORE_ID_ANY)
+ lcore_id = rte_get_master_lcore();
+ /* if the core id is not supported */
+ else if (lcore_id >= RTE_MAX_LCORE)
+ return -1;
+
+ if (dpaa2_io_portal[lcore_id].dpio_dev) {
+ DPAA2_BUS_DP_INFO("DPAA Portal=%p (%d) is being shared"
+ " between thread %" PRIu64 " and current "
+ "%" PRIu64 "\n",
+ dpaa2_io_portal[lcore_id].dpio_dev,
+ dpaa2_io_portal[lcore_id].dpio_dev->index,
+ dpaa2_io_portal[lcore_id].net_tid,
+ tid);
+ RTE_PER_LCORE(_dpaa2_io).dpio_dev
+ = dpaa2_io_portal[lcore_id].dpio_dev;
+ rte_atomic16_inc(&dpaa2_io_portal
+ [lcore_id].dpio_dev->ref_count);
+ dpaa2_io_portal[lcore_id].net_tid = tid;
+
+ DPAA2_BUS_DP_DEBUG("Old Portal=%p (%d) affined thread - "
+ "%" PRIu64 "\n",
+ dpaa2_io_portal[lcore_id].dpio_dev,
+ dpaa2_io_portal[lcore_id].dpio_dev->index,
+ tid);
+ return 0;
+ }
+
+ /* Populate the dpaa2_io_portal structure */
+ dpaa2_io_portal[lcore_id].dpio_dev = dpaa2_get_qbman_swp(lcore_id);
+
+ if (dpaa2_io_portal[lcore_id].dpio_dev) {
+ RTE_PER_LCORE(_dpaa2_io).dpio_dev
+ = dpaa2_io_portal[lcore_id].dpio_dev;
+ dpaa2_io_portal[lcore_id].net_tid = tid;
+
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int
+dpaa2_affine_qbman_ethrx_swp(void)
+{
+ unsigned int lcore_id = rte_lcore_id();
+ uint64_t tid = syscall(SYS_gettid);
+
+ if (lcore_id == LCORE_ID_ANY)
+ lcore_id = rte_get_master_lcore();
+ /* if the core id is not supported */
+ else if (lcore_id >= RTE_MAX_LCORE)
+ return -1;
+
+ if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) {
+ DPAA2_BUS_DP_INFO(
+ "DPAA Portal=%p (%d) is being shared between thread"
+ " %" PRIu64 " and current %" PRIu64 "\n",
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev,
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index,
+ dpaa2_io_portal[lcore_id].sec_tid,
+ tid);
+ RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
+ = dpaa2_io_portal[lcore_id].ethrx_dpio_dev;
+ rte_atomic16_inc(&dpaa2_io_portal
+ [lcore_id].ethrx_dpio_dev->ref_count);
+ dpaa2_io_portal[lcore_id].sec_tid = tid;
+
+ DPAA2_BUS_DP_DEBUG(
+ "Old Portal=%p (%d) affined thread"
+ " - %" PRIu64 "\n",
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev,
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index,
+ tid);
+ return 0;
+ }
+
+ /* Populate the dpaa2_io_portal structure */
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev =
+ dpaa2_get_qbman_swp(lcore_id);
+
+ if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) {
+ RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
+ = dpaa2_io_portal[lcore_id].ethrx_dpio_dev;
+ dpaa2_io_portal[lcore_id].sec_tid = tid;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+static int
+dpaa2_create_dpio_device(int vdev_fd,
+ struct vfio_device_info *obj_info,
+ int object_id)
+{
+ struct dpaa2_dpio_dev *dpio_dev;
+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
+
+ if (obj_info->num_regions < NUM_DPIO_REGIONS) {
+ DPAA2_BUS_ERR("Not sufficient number of DPIO regions");
+ return -1;
+ }
+
+ dpio_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpio_dev),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpio_dev) {
+ DPAA2_BUS_ERR("Memory allocation failed for DPIO Device");
+ return -1;
+ }
+
+ dpio_dev->dpio = NULL;
+ dpio_dev->hw_id = object_id;
+ rte_atomic16_init(&dpio_dev->ref_count);
+ /* Using single portal for all devices */
+ dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+
+ reg_info.index = 0;
+ if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ DPAA2_BUS_ERR("vfio: error getting region info");
+ rte_free(dpio_dev);
+ return -1;
+ }
+
+ dpio_dev->ce_size = reg_info.size;
+ dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ vdev_fd, reg_info.offset);
+
+ reg_info.index = 1;
+ if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ DPAA2_BUS_ERR("vfio: error getting region info");
+ rte_free(dpio_dev);
+ return -1;
+ }
+
+ dpio_dev->ci_size = reg_info.size;
+ dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ vdev_fd, reg_info.offset);
+
+ if (configure_dpio_qbman_swp(dpio_dev)) {
+ DPAA2_BUS_ERR(
+ "Fail to configure the dpio qbman portal for %d",
+ dpio_dev->hw_id);
+ rte_free(dpio_dev);
+ return -1;
+ }
+
+ io_space_count++;
+ dpio_dev->index = io_space_count;
+
+ if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) {
+ DPAA2_BUS_ERR("Fail to setup interrupt for %d",
+ dpio_dev->hw_id);
+ rte_free(dpio_dev);
+ }
+
+ /* find the SoC type for the first time */
+ if (!dpaa2_svr_family) {
+ struct mc_soc_version mc_plat_info = {0};
+
+ if (mc_get_soc_version(dpio_dev->dpio,
+ CMD_PRI_LOW, &mc_plat_info)) {
+ DPAA2_BUS_ERR("Unable to get SoC version information");
+ } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LS1080A) {
+ dpaa2_core_cluster_base = 0x02;
+ dpaa2_cluster_sz = 4;
+ DPAA2_BUS_DEBUG("LS108x (A53) Platform Detected");
+ } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LX2160A) {
+ dpaa2_core_cluster_base = 0x00;
+ dpaa2_cluster_sz = 2;
+ DPAA2_BUS_DEBUG("LX2160 Platform Detected");
+ }
+ dpaa2_svr_family = (mc_plat_info.svr & 0xffff0000);
+ }
+
+ TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
+
+ return 0;
+}
+
+void
+dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
+{
+ int i = 0;
+
+ for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
+ if (q_storage->dq_storage[i])
+ rte_free(q_storage->dq_storage[i]);
+ }
+}
+
+int
+dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
+{
+ int i = 0;
+
+ for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
+ q_storage->dq_storage[i] = rte_malloc(NULL,
+ DPAA2_DQRR_RING_SIZE * sizeof(struct qbman_result),
+ RTE_CACHE_LINE_SIZE);
+ if (!q_storage->dq_storage[i])
+ goto fail;
+ }
+ return 0;
+fail:
+ while (--i >= 0)
+ rte_free(q_storage->dq_storage[i]);
+
+ return -1;
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpio_obj = {
+ .dev_type = DPAA2_IO,
+ .create = dpaa2_create_dpio_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpio, rte_dpaa2_dpio_obj);
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
new file mode 100644
index 00000000..d593eea7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _DPAA2_HW_DPIO_H_
+#define _DPAA2_HW_DPIO_H_
+
+#include <mc/fsl_dpio.h>
+#include <mc/fsl_mc_sys.h>
+
+struct dpaa2_io_portal_t {
+ struct dpaa2_dpio_dev *dpio_dev;
+ struct dpaa2_dpio_dev *ethrx_dpio_dev;
+ uint64_t net_tid;
+ uint64_t sec_tid;
+ void *eventdev;
+};
+
+/*! Global per thread DPIO portal */
+RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
+
+#define DPAA2_PER_LCORE_DPIO RTE_PER_LCORE(_dpaa2_io).dpio_dev
+#define DPAA2_PER_LCORE_PORTAL DPAA2_PER_LCORE_DPIO->sw_portal
+
+#define DPAA2_PER_LCORE_ETHRX_DPIO RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
+#define DPAA2_PER_LCORE_ETHRX_PORTAL DPAA2_PER_LCORE_ETHRX_DPIO->sw_portal
+
+/* Variable to store DPAA2 platform type */
+extern uint32_t dpaa2_svr_family;
+
+extern struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
+
+struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id);
+
+/* Affine a DPIO portal to current processing thread */
+int dpaa2_affine_qbman_swp(void);
+
+/* Affine additional DPIO portal to current crypto processing thread */
+int dpaa2_affine_qbman_ethrx_swp(void);
+
+/* allocate memory for FQ - dq storage */
+int
+dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage);
+
+/* free memory for FQ- dq storage */
+void
+dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage);
+
+#endif /* _DPAA2_HW_DPIO_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
new file mode 100644
index 00000000..82075936
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _DPAA2_HW_PVT_H_
+#define _DPAA2_HW_PVT_H_
+
+#include <rte_eventdev.h>
+
+#include <mc/fsl_mc_sys.h>
+#include <fsl_qbman_portal.h>
+
+#ifndef false
+#define false 0
+#endif
+#ifndef true
+#define true 1
+#endif
+#define lower_32_bits(x) ((uint32_t)(x))
+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
+
+#define SVR_LS1080A 0x87030000
+#define SVR_LS2080A 0x87010000
+#define SVR_LS2088A 0x87090000
+#define SVR_LX2160A 0x87360000
+
+#ifndef VLAN_TAG_SIZE
+#define VLAN_TAG_SIZE 4 /** < Vlan Header Length */
+#endif
+
+#define MAX_TX_RING_SLOTS 8
+ /** <Maximum number of slots available in TX ring*/
+
+#define DPAA2_DQRR_RING_SIZE 16
+ /** <Maximum number of slots available in RX ring*/
+
+#define MC_PORTAL_INDEX 0
+#define NUM_DPIO_REGIONS 2
+#define NUM_DQS_PER_QUEUE 2
+
+/* Maximum release/acquire from QBMAN */
+#define DPAA2_MBUF_MAX_ACQ_REL 7
+
+#define DPAA2_MEMPOOL_OPS_NAME "dpaa2"
+
+#define MAX_BPID 256
+#define DPAA2_MBUF_HW_ANNOTATION 64
+#define DPAA2_FD_PTA_SIZE 0
+
+#if (DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM
+#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
+#endif
+
+/* we will re-use the HEADROOM for annotation in RX */
+#define DPAA2_HW_BUF_RESERVE 0
+#define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */
+
+#define DPAA2_DPCI_MAX_QUEUES 2
+
+struct dpaa2_dpio_dev {
+ TAILQ_ENTRY(dpaa2_dpio_dev) next;
+ /**< Pointer to Next device instance */
+ uint16_t index; /**< Index of a instance in the list */
+ rte_atomic16_t ref_count;
+ /**< How many thread contexts are sharing this.*/
+ struct fsl_mc_io *dpio; /** handle to DPIO portal object */
+ uint16_t token;
+ struct qbman_swp *sw_portal; /** SW portal object */
+ const struct qbman_result *dqrr[4];
+ /**< DQRR Entry for this SW portal */
+ void *mc_portal; /**< MC Portal for configuring this device */
+ uintptr_t qbman_portal_ce_paddr;
+ /**< Physical address of Cache Enabled Area */
+ uintptr_t ce_size; /**< Size of the CE region */
+ uintptr_t qbman_portal_ci_paddr;
+ /**< Physical address of Cache Inhibit Area */
+ uintptr_t ci_size; /**< Size of the CI region */
+ struct rte_intr_handle intr_handle; /* Interrupt related info */
+ int32_t epoll_fd; /**< File descriptor created for interrupt polling */
+ int32_t hw_id; /**< An unique ID of this DPIO device instance */
+};
+
+struct dpaa2_dpbp_dev {
+ TAILQ_ENTRY(dpaa2_dpbp_dev) next;
+ /**< Pointer to Next device instance */
+ struct fsl_mc_io dpbp; /** handle to DPBP portal object */
+ uint16_t token;
+ rte_atomic16_t in_use;
+ uint32_t dpbp_id; /*HW ID for DPBP object */
+};
+
+struct queue_storage_info_t {
+ struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
+ struct qbman_result *active_dqs;
+ uint8_t active_dpio_id;
+ uint8_t toggle;
+ uint8_t last_num_pkts;
+};
+
+struct dpaa2_queue;
+
+typedef void (dpaa2_queue_cb_dqrr_t)(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev);
+
+struct dpaa2_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ void *dev;
+ int32_t eventfd; /*!< Event Fd of this queue */
+ uint32_t fqid; /*!< Unique ID of this queue */
+ uint8_t tc_index; /*!< traffic class identifier */
+ uint16_t flow_id; /*!< To be used by DPAA2 frmework */
+ uint64_t rx_pkts;
+ uint64_t tx_pkts;
+ uint64_t err_pkts;
+ union {
+ struct queue_storage_info_t *q_storage;
+ struct qbman_result *cscn;
+ };
+ struct rte_event ev;
+ dpaa2_queue_cb_dqrr_t *cb;
+};
+
+struct swp_active_dqs {
+ struct qbman_result *global_active_dqs;
+ uint64_t reserved[7];
+};
+
+#define NUM_MAX_SWP 64
+
+extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+
+struct dpaa2_dpci_dev {
+ TAILQ_ENTRY(dpaa2_dpci_dev) next;
+ /**< Pointer to Next device instance */
+ struct fsl_mc_io dpci; /** handle to DPCI portal object */
+ uint16_t token;
+ rte_atomic16_t in_use;
+ uint32_t dpci_id; /*HW ID for DPCI object */
+ struct dpaa2_queue rx_queue[DPAA2_DPCI_MAX_QUEUES];
+ struct dpaa2_queue tx_queue[DPAA2_DPCI_MAX_QUEUES];
+};
+
+/*! Global MCP list */
+extern void *(*rte_mcp_ptr_list);
+
+/* Refer to Table 7-3 in SEC BG */
+struct qbman_fle {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t length;
+ /* FMT must be 00, MSB is final bit */
+ uint32_t fin_bpid_offset;
+ uint32_t frc;
+ uint32_t reserved[3]; /* Not used currently */
+};
+
+struct qbman_sge {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t length;
+ uint32_t fin_bpid_offset;
+};
+
+/* There are three types of frames: Single, Scatter Gather and Frame Lists */
+enum qbman_fd_format {
+ qbman_fd_single = 0,
+ qbman_fd_list,
+ qbman_fd_sg
+};
+/*Macros to define operations on FD*/
+#define DPAA2_SET_FD_ADDR(fd, addr) do { \
+ (fd)->simple.addr_lo = lower_32_bits((size_t)(addr)); \
+ (fd)->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \
+} while (0)
+#define DPAA2_SET_FD_LEN(fd, length) ((fd)->simple.len = length)
+#define DPAA2_SET_FD_BPID(fd, bpid) ((fd)->simple.bpid_offset |= bpid)
+#define DPAA2_SET_ONLY_FD_BPID(fd, bpid) \
+ ((fd)->simple.bpid_offset = bpid)
+#define DPAA2_SET_FD_IVP(fd) (((fd)->simple.bpid_offset |= 0x00004000))
+#define DPAA2_SET_FD_OFFSET(fd, offset) \
+ (((fd)->simple.bpid_offset |= (uint32_t)(offset) << 16))
+#define DPAA2_SET_FD_INTERNAL_JD(fd, len) \
+ ((fd)->simple.frc = (0x80000000 | (len)))
+#define DPAA2_GET_FD_FRC_PARSE_SUM(fd) \
+ ((uint16_t)(((fd)->simple.frc & 0xffff0000) >> 16))
+#define DPAA2_SET_FD_FRC(fd, _frc) ((fd)->simple.frc = _frc)
+#define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0)
+
+#define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16))
+#define DPAA2_SET_FD_FLC(fd, addr) do { \
+ (fd)->simple.flc_lo = lower_32_bits((size_t)(addr)); \
+ (fd)->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
+} while (0)
+#define DPAA2_SET_FLE_INTERNAL_JD(fle, len) ((fle)->frc = (0x80000000 | (len)))
+#define DPAA2_GET_FLE_ADDR(fle) \
+ (size_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo)
+#define DPAA2_SET_FLE_ADDR(fle, addr) do { \
+ (fle)->addr_lo = lower_32_bits((size_t)addr); \
+ (fle)->addr_hi = upper_32_bits((uint64_t)addr); \
+} while (0)
+#define DPAA2_GET_FLE_CTXT(fle) \
+ ((((uint64_t)((fle)->reserved[1])) << 32) + (fle)->reserved[0])
+#define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
+ (fle)->reserved[0] = lower_32_bits((size_t)addr); \
+ (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \
+} while (0)
+#define DPAA2_SET_FLE_OFFSET(fle, offset) \
+ ((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
+#define DPAA2_SET_FLE_LEN(fle, len) ((fle)->length = len)
+#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (size_t)bpid)
+#define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff)
+#define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= 1 << 31)
+#define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
+#define DPAA2_SET_FLE_BMT(fle) (((fle)->fin_bpid_offset |= 0x00008000))
+#define DPAA2_SET_FD_COMPOUND_FMT(fd) \
+ ((fd)->simple.bpid_offset |= (uint32_t)1 << 28)
+#define DPAA2_GET_FD_ADDR(fd) \
+(((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
+
+#define DPAA2_GET_FD_LEN(fd) ((fd)->simple.len)
+#define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF))
+#define DPAA2_GET_FD_IVP(fd) (((fd)->simple.bpid_offset & 0x00004000) >> 14)
+#define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
+#define DPAA2_GET_FD_FRC(fd) ((fd)->simple.frc)
+#define DPAA2_GET_FD_FLC(fd) \
+ (((uint64_t)((fd)->simple.flc_hi) << 32) + (fd)->simple.flc_lo)
+#define DPAA2_GET_FD_ERR(fd) ((fd)->simple.bpid_offset & 0x000000FF)
+#define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16)
+#define DPAA2_SET_FLE_SG_EXT(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 29)
+#define DPAA2_IS_SET_FLE_SG_EXT(fle) \
+ (((fle)->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
+
+#define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \
+ ((struct rte_mbuf *)((size_t)(buf) - (meta_data_size)))
+
+#define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
+
+#define DPAA2_FD_SET_FORMAT(fd, format) do { \
+ (fd)->simple.bpid_offset &= 0xCFFFFFFF; \
+ (fd)->simple.bpid_offset |= (uint32_t)format << 28; \
+} while (0)
+#define DPAA2_FD_GET_FORMAT(fd) (((fd)->simple.bpid_offset >> 28) & 0x3)
+
+#define DPAA2_SG_SET_FINAL(sg, fin) do { \
+ (sg)->fin_bpid_offset &= 0x7FFFFFFF; \
+ (sg)->fin_bpid_offset |= (uint32_t)fin << 31; \
+} while (0)
+#define DPAA2_SG_IS_FINAL(sg) (!!((sg)->fin_bpid_offset >> 31))
+/* Only Enqueue Error responses will be
+ * pushed on FQID_ERR of Enqueue FQ
+ */
+#define DPAA2_EQ_RESP_ERR_FQ 0
+/* All Enqueue responses will be pushed on address
+ * set with qbman_eq_desc_set_response
+ */
+#define DPAA2_EQ_RESP_ALWAYS 1
+
+/* Various structures representing contiguous memory maps */
+struct dpaa2_memseg {
+ TAILQ_ENTRY(dpaa2_memseg) next;
+ char *vaddr;
+ rte_iova_t iova;
+ size_t len;
+};
+
+TAILQ_HEAD(dpaa2_memseg_list, dpaa2_memseg);
+extern struct dpaa2_memseg_list rte_dpaa2_memsegs;
+
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+extern uint8_t dpaa2_virt_mode;
+static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
+/* todo - this is costly, need to write a fast coversion routine */
+static void *dpaa2_mem_ptov(phys_addr_t paddr)
+{
+ struct dpaa2_memseg *ms;
+
+ if (dpaa2_virt_mode)
+ return (void *)(size_t)paddr;
+
+ /* Check if the address is already part of the memseg list internally
+ * maintained by the dpaa2 driver.
+ */
+ TAILQ_FOREACH(ms, &rte_dpaa2_memsegs, next) {
+ if (paddr >= ms->iova && paddr <
+ ms->iova + ms->len)
+ return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
+ }
+
+ /* If not, Fallback to full memseg list searching */
+ return rte_mem_iova2virt(paddr);
+}
+
+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
+{
+ const struct rte_memseg *memseg;
+
+ if (dpaa2_virt_mode)
+ return vaddr;
+
+ memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
+ if (memseg)
+ return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr);
+ return (size_t)NULL;
+}
+
+/**
+ * When we are using Physical addresses as IO Virtual Addresses,
+ * Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
+ * wherever required.
+ * These routines are called with help of below MACRO's
+ */
+
+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova)
+
+/**
+ * macro to convert Virtual address to IOVA
+ */
+#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((size_t)(_vaddr))
+
+/**
+ * macro to convert IOVA to Virtual address
+ */
+#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((size_t)(_iova))
+
+/**
+ * macro to convert modify the memory containing IOVA to Virtual address
+ */
+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
+ {_mem = (_type)(dpaa2_mem_ptov((size_t)(_mem))); }
+
+#else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+
+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
+#define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
+#define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
+
+#endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+
+static inline
+int check_swp_active_dqs(uint16_t dpio_index)
+{
+ if (rte_global_active_dqs_list[dpio_index].global_active_dqs != NULL)
+ return 1;
+ return 0;
+}
+
+static inline
+void clear_swp_active_dqs(uint16_t dpio_index)
+{
+ rte_global_active_dqs_list[dpio_index].global_active_dqs = NULL;
+}
+
+static inline
+struct qbman_result *get_swp_active_dqs(uint16_t dpio_index)
+{
+ return rte_global_active_dqs_list[dpio_index].global_active_dqs;
+}
+
+static inline
+void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
+{
+ rte_global_active_dqs_list[dpio_index].global_active_dqs = dqs;
+}
+struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
+void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
+int dpaa2_dpbp_supported(void);
+
+struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void);
+void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/compat.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/compat.h
new file mode 100644
index 00000000..7be8f54c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/compat.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2008-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef HEADER_COMPAT_H
+#define HEADER_COMPAT_H
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdint.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <malloc.h>
+#include <unistd.h>
+#include <error.h>
+#include <linux/types.h>
+#include <rte_atomic.h>
+
+/* The following definitions are primarily to allow the single-source driver
+ * interfaces to be included by arbitrary program code. Ie. for interfaces that
+ * are also available in kernel-space, these definitions provide compatibility
+ * with certain attributes and types used in those interfaces.
+ */
+
+/* Required compiler attributes */
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+/* Required types */
+typedef uint64_t dma_addr_t;
+
+/* Debugging */
+#define prflush(fmt, args...) \
+ do { \
+ printf(fmt, ##args); \
+ fflush(stdout); \
+ } while (0)
+#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
+#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
+#define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
+#define pr_info(fmt, args...) prflush(fmt, ##args)
+
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_BUS
+
+/* Trace the 3 different classes of read/write access to QBMan. #undef as
+ * required.
+ */
+#define QBMAN_CCSR_TRACE
+#define QBMAN_CINH_TRACE
+#define QBMAN_CENA_TRACE
+
+#define QBMAN_CHECKING
+
+#ifdef pr_debug
+#undef pr_debug
+#endif
+#define pr_debug(fmt, args...) printf(fmt, ##args)
+#define QBMAN_BUG_ON(c) \
+do { \
+ static int warned_##__LINE__; \
+ if ((c) && !warned_##__LINE__) { \
+ pr_warn("(%s:%d)\n", __FILE__, __LINE__); \
+ warned_##__LINE__ = 1; \
+ } \
+} while (0)
+#else
+#define QBMAN_BUG_ON(c) {}
+#define pr_debug(fmt, args...) {}
+#endif
+
+/* Other miscellaneous interfaces our APIs depend on; */
+
+#define lower_32_bits(x) ((uint32_t)(x))
+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
+
+
+#define __iomem
+
+#define __raw_readb(p) (*(const volatile unsigned char *)(p))
+#define __raw_readl(p) (*(const volatile unsigned int *)(p))
+#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
+
+#define atomic_t rte_atomic32_t
+#define atomic_read(v) rte_atomic32_read(v)
+#define atomic_set(v, i) rte_atomic32_set(v, i)
+
+#define atomic_inc(v) rte_atomic32_add(v, 1)
+#define atomic_dec(v) rte_atomic32_sub(v, 1)
+
+#define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v)
+#define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v)
+
+#define atomic_inc_return(v) rte_atomic32_add_return(v, 1)
+#define atomic_dec_return(v) rte_atomic32_sub_return(v, 1)
+#define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)
+
+#endif /* HEADER_COMPAT_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
new file mode 100644
index 00000000..bb60a98f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ *
+ */
+#ifndef _FSL_QBMAN_BASE_H
+#define _FSL_QBMAN_BASE_H
+
+/**
+ * DOC: QBMan basic structures
+ *
+ * The QBMan block descriptor, software portal descriptor and Frame descriptor
+ * are defined here.
+ *
+ */
+
+/**
+ * struct qbman_block_desc - qbman block descriptor structure
+ * @ccsr_reg_bar: CCSR register map.
+ * @irq_rerr: Recoverable error interrupt line.
+ * @irq_nrerr: Non-recoverable error interrupt line
+ *
+ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not
+ * control this QBMan instance, these values may simply be place-holders. The
+ * idea is simply that we be able to distinguish between them, eg. so that SWP
+ * descriptors can identify which QBMan instance they belong to.
+ */
+struct qbman_block_desc {
+ void *ccsr_reg_bar;
+ int irq_rerr;
+ int irq_nrerr;
+};
+
+enum qbman_eqcr_mode {
+ qman_eqcr_vb_ring = 2, /* Valid bit, with eqcr in ring mode */
+ qman_eqcr_vb_array, /* Valid bit, with eqcr in array mode */
+};
+
+/**
+ * struct qbman_swp_desc - qbman software portal descriptor structure
+ * @block: The QBMan instance.
+ * @cena_bar: Cache-enabled portal register map.
+ * @cinh_bar: Cache-inhibited portal register map.
+ * @irq: -1 if unused (or unassigned)
+ * @idx: SWPs within a QBMan are indexed. -1 if opaque to the user.
+ * @qman_version: the qman version.
+ * @eqcr_mode: Select the eqcr mode, currently only valid bit ring mode and
+ * valid bit array mode are supported.
+ *
+ * Descriptor for a QBMan software portal, expressed in terms that make sense to
+ * the user context. Ie. on MC, this information is likely to be true-physical,
+ * and instantiated statically at compile-time. On GPP, this information is
+ * likely to be obtained via "discovery" over a partition's "MC bus"
+ * (ie. in response to a MC portal command), and would take into account any
+ * virtualisation of the GPP user's address space and/or interrupt numbering.
+ */
+struct qbman_swp_desc {
+ const struct qbman_block_desc *block;
+ uint8_t *cena_bar;
+ uint8_t *cinh_bar;
+ int irq;
+ int idx;
+ uint32_t qman_version;
+ enum qbman_eqcr_mode eqcr_mode;
+};
+
+/* Driver object for managing a QBMan portal */
+struct qbman_swp;
+
+/**
+ * struct qbman_fd - basci structure for qbman frame descriptor
+ * @words: for easier/faster copying the whole FD structure.
+ * @addr_lo: the lower 32 bits of the address in FD.
+ * @addr_hi: the upper 32 bits of the address in FD.
+ * @len: the length field in FD.
+ * @bpid_offset: represent the bpid and offset fields in FD. offset in
+ * the MS 16 bits, BPID in the LS 16 bits.
+ * @frc: frame context
+ * @ctrl: the 32bit control bits including dd, sc,... va, err.
+ * @flc_lo: the lower 32bit of flow context.
+ * @flc_hi: the upper 32bits of flow context.
+ *
+ * Place-holder for FDs, we represent it via the simplest form that we need for
+ * now. Different overlays may be needed to support different options, etc. (It
+ * is impractical to define One True Struct, because the resulting encoding
+ * routines (lots of read-modify-writes) would be worst-case performance whether
+ * or not circumstances required them.)
+ *
+ * Note, as with all data-structures exchanged between software and hardware (be
+ * they located in the portal register map or DMA'd to and from main-memory),
+ * the driver ensures that the caller of the driver API sees the data-structures
+ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words
+ * contained within this structure are represented in host-endianness, even if
+ * hardware always treats them as little-endian. As such, if any of these fields
+ * are interpreted in a binary (rather than numerical) fashion by hardware
+ * blocks (eg. accelerators), then the user should be careful. We illustrate
+ * with an example;
+ *
+ * Suppose the desired behaviour of an accelerator is controlled by the "frc"
+ * field of the FDs that are sent to it. Suppose also that the behaviour desired
+ * by the user corresponds to an "frc" value which is expressed as the literal
+ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit
+ * value in which 0xfe is the first byte and 0xba is the last byte, and as
+ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If
+ * the software is little-endian also, this can simply be achieved by setting
+ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set
+ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is
+ * to treat the 32-bit words as numerical values, in which the offset of a field
+ * from the beginning of the first byte (as required or generated by hardware)
+ * is numerically encoded by a left-shift (ie. by raising the field to a
+ * corresponding power of 2). Ie. in the current example, software could set
+ * "frc" in the following way, and it would work correctly on both little-endian
+ * and big-endian operation;
+ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24);
+ */
+struct qbman_fd {
+ union {
+ uint32_t words[8];
+ struct qbman_fd_simple {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t len;
+ uint32_t bpid_offset;
+ uint32_t frc;
+ uint32_t ctrl;
+ uint32_t flc_lo;
+ uint32_t flc_hi;
+ } simple;
+ };
+};
+
+#endif /* !_FSL_QBMAN_BASE_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
new file mode 100644
index 00000000..072ad551
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
@@ -0,0 +1,30 @@
+/* Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+struct qbman_swp;
+
+struct qbman_fq_query_np_rslt {
+uint8_t verb;
+ uint8_t rslt;
+ uint8_t st1;
+ uint8_t st2;
+ uint8_t reserved[2];
+ uint16_t od1_sfdr;
+ uint16_t od2_sfdr;
+ uint16_t od3_sfdr;
+ uint16_t ra1_sfdr;
+ uint16_t ra2_sfdr;
+ uint32_t pfdr_hptr;
+ uint32_t pfdr_tptr;
+ uint32_t frm_cnt;
+ uint32_t byte_cnt;
+ uint16_t ics_surp;
+ uint8_t is;
+ uint8_t reserved2[29];
+};
+
+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
+ struct qbman_fq_query_np_rslt *r);
+uint32_t qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
+uint32_t qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
new file mode 100644
index 00000000..3e63db3a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -0,0 +1,1186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ *
+ */
+#ifndef _FSL_QBMAN_PORTAL_H
+#define _FSL_QBMAN_PORTAL_H
+
+#include <fsl_qbman_base.h>
+
+/**
+ * DOC - QBMan portal APIs to implement the following functions:
+ * - Initialize and destroy Software portal object.
+ * - Read and write Software portal interrupt registers.
+ * - Enqueue, including setting the enqueue descriptor, and issuing enqueue
+ * command etc.
+ * - Dequeue, including setting the dequeue descriptor, issuing dequeue command,
+ * parsing the dequeue response in DQRR and memeory, parsing the state change
+ * notifications etc.
+ * - Release, including setting the release descriptor, and issuing the buffer
+ * release command.
+ * - Acquire, acquire the buffer from the given buffer pool.
+ * - FQ management.
+ * - Channel management, enable/disable CDAN with or without context.
+ */
+
+/**
+ * qbman_swp_init() - Create a functional object representing the given
+ * QBMan portal descriptor.
+ * @d: the given qbman swp descriptor
+ *
+ * Return qbman_swp portal object for success, NULL if the object cannot
+ * be created.
+ */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
+
+/**
+ * qbman_swp_finish() - Create and destroy a functional object representing
+ * the given QBMan portal descriptor.
+ * @p: the qbman_swp object to be destroyed.
+ *
+ */
+void qbman_swp_finish(struct qbman_swp *p);
+
+/**
+ * qbman_swp_get_desc() - Get the descriptor of the given portal object.
+ * @p: the given portal object.
+ *
+ * Return the descriptor for this portal.
+ */
+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);
+
+ /**************/
+ /* Interrupts */
+ /**************/
+
+/* EQCR ring interrupt */
+#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001)
+/* Enqueue command dispatched interrupt */
+#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002)
+/* DQRR non-empty interrupt */
+#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004)
+/* RCR ring interrupt */
+#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008)
+/* Release command dispatched interrupt */
+#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010)
+/* Volatile dequeue command interrupt */
+#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)
+
+/**
+ * qbman_swp_interrupt_get_vanish() - Get the data in software portal
+ * interrupt status disable register.
+ * @p: the given software portal object.
+ *
+ * Return the settings in SWP_ISDR register.
+ */
+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);
+
+/**
+ * qbman_swp_interrupt_set_vanish() - Set the data in software portal
+ * interrupt status disable register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_IDSR register.
+ */
+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_interrupt_read_status() - Get the data in software portal
+ * interrupt status register.
+ * @p: the given software portal object.
+ *
+ * Return the settings in SWP_ISR register.
+ */
+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
+
+/**
+ * qbman_swp_interrupt_clear_status() - Set the data in software portal
+ * interrupt status register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_ISR register.
+ */
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_dqrr_thrshld_read_status() - Get the data in software portal
+ * DQRR interrupt threshold register.
+ * @p: the given software portal object.
+ */
+uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p);
+
+/**
+ * qbman_swp_dqrr_thrshld_write() - Set the data in software portal
+ * DQRR interrupt threshold register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_DQRR_ITR register.
+ */
+void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_intr_timeout_read_status() - Get the data in software portal
+ * Interrupt Time-Out period register.
+ * @p: the given software portal object.
+ */
+uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p);
+
+/**
+ * qbman_swp_intr_timeout_write() - Set the data in software portal
+ * Interrupt Time-Out period register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_ITPR register.
+ */
+void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_interrupt_get_trigger() - Get the data in software portal
+ * interrupt enable register.
+ * @p: the given software portal object.
+ *
+ * Return the settings in SWP_IER register.
+ */
+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
+
+/**
+ * qbman_swp_interrupt_set_trigger() - Set the data in software portal
+ * interrupt enable register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_IER register.
+ */
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_interrupt_get_inhibit() - Get the data in software portal
+ * interrupt inhibit register.
+ * @p: the given software portal object.
+ *
+ * Return the settings in SWP_IIR register.
+ */
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
+
+/**
+ * qbman_swp_interrupt_set_inhibit() - Set the data in software portal
+ * interrupt inhibit register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_IIR register.
+ */
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
+
+ /************/
+ /* Dequeues */
+ /************/
+
+/**
+ * struct qbman_result - structure for qbman dequeue response and/or
+ * notification.
+ * @donot_manipulate_directly: the 16 32bit data to represent the whole
+ * possible qbman dequeue result.
+ */
+struct qbman_result {
+ union {
+ struct common {
+ uint8_t verb;
+ uint8_t reserved[63];
+ } common;
+ struct dq {
+ uint8_t verb;
+ uint8_t stat;
+ __le16 seqnum;
+ __le16 oprid;
+ uint8_t reserved;
+ uint8_t tok;
+ __le32 fqid;
+ uint32_t reserved2;
+ __le32 fq_byte_cnt;
+ __le32 fq_frm_cnt;
+ __le64 fqd_ctx;
+ uint8_t fd[32];
+ } dq;
+ struct scn {
+ uint8_t verb;
+ uint8_t stat;
+ uint8_t state;
+ uint8_t reserved;
+ __le32 rid_tok;
+ __le64 ctx;
+ } scn;
+ };
+};
+
+/* TODO:
+ *A DQRI interrupt can be generated when there are dequeue results on the
+ * portal's DQRR (this mechanism does not deal with "pull" dequeues to
+ * user-supplied 'storage' addresses). There are two parameters to this
+ * interrupt source, one is a threshold and the other is a timeout. The
+ * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or
+ * if the ring has been non-empty for been longer than 'timeout' nanoseconds.
+ * For timeout, an approximation to the desired nanosecond-granularity value is
+ * made, so there are get and set APIs to allow the user to see what actual
+ * timeout is set (compared to the timeout that was requested).
+ */
+int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh);
+int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout);
+int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout);
+
+/* ------------------- */
+/* Push-mode dequeuing */
+/* ------------------- */
+
+/* The user of a portal can enable and disable push-mode dequeuing of up to 16
+ * channels independently. It does not specify this toggling by channel IDs, but
+ * rather by specifying the index (from 0 to 15) that has been mapped to the
+ * desired channel.
+ */
+
+/**
+ * qbman_swp_push_get() - Get the push dequeue setup.
+ * @s: the software portal object.
+ * @channel_idx: the channel index to query.
+ * @enabled: returned boolean to show whether the push dequeue is enabled for
+ * the given channel.
+ */
+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);
+
+/**
+ * qbman_swp_push_set() - Enable or disable push dequeue.
+ * @s: the software portal object.
+ * @channel_idx: the channel index..
+ * @enable: enable or disable push dequeue.
+ *
+ * The user of a portal can enable and disable push-mode dequeuing of up to 16
+ * channels independently. It does not specify this toggling by channel IDs, but
+ * rather by specifying the index (from 0 to 15) that has been mapped to the
+ * desired channel.
+ */
+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
+
+/* ------------------- */
+/* Pull-mode dequeuing */
+/* ------------------- */
+
+/**
+ * struct qbman_pull_desc - the structure for pull dequeue descriptor
+ */
+struct qbman_pull_desc {
+ union {
+ uint32_t donot_manipulate_directly[16];
+ struct pull {
+ uint8_t verb;
+ uint8_t numf;
+ uint8_t tok;
+ uint8_t reserved;
+ uint32_t dq_src;
+ uint64_t rsp_addr;
+ uint64_t rsp_addr_virt;
+ uint8_t padding[40];
+ } pull;
+ };
+};
+
+enum qbman_pull_type_e {
+ /* dequeue with priority precedence, respect intra-class scheduling */
+ qbman_pull_type_prio = 1,
+ /* dequeue with active FQ precedence, respect ICS */
+ qbman_pull_type_active,
+ /* dequeue with active FQ precedence, no ICS */
+ qbman_pull_type_active_noics
+};
+
+/**
+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ * @d: the pull dequeue descriptor to be cleared.
+ */
+void qbman_pull_desc_clear(struct qbman_pull_desc *d);
+
+/**
+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
+ * @d: the pull dequeue descriptor to be set.
+ * @storage: the pointer of the memory to store the dequeue result.
+ * @storage_phys: the physical address of the storage memory.
+ * @stash: to indicate whether write allocate is enabled.
+ *
+ * If not called, or if called with 'storage' as NULL, the result pull dequeues
+ * will produce results to DQRR. If 'storage' is non-NULL, then results are
+ * produced to the given memory location (using the physical/DMA address which
+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
+ * those writes to main-memory express a cache-warming attribute.
+ */
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct qbman_result *storage,
+ uint64_t storage_phys,
+ int stash);
+/**
+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.
+ * @d: the pull dequeue descriptor to be set.
+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive.
+ */
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
+ uint8_t numframes);
+/**
+ * qbman_pull_desc_set_token() - Set dequeue token for pull command
+ * @d: the dequeue descriptor
+ * @token: the token to be set
+ *
+ * token is the value that shows up in the dequeue response that can be used to
+ * detect when the results have been published. The easiest technique is to zero
+ * result "storage" before issuing a dequeue, and use any non-zero 'token' value
+ */
+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
+
+/* Exactly one of the following descriptor "actions" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - pull dequeue from the given frame queue (FQ)
+ * - pull dequeue from any FQ in the given work queue (WQ)
+ * - pull dequeue from any FQ in any WQ in the given channel
+ */
+/**
+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues.
+ * @fqid: the frame queue index of the given FQ.
+ */
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);
+
+/**
+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.
+ * @wqid: composed of channel id and wqid within the channel.
+ * @dct: the dequeue command type.
+ */
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
+ enum qbman_pull_type_e dct);
+
+/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
+ * dequeues.
+ * @chid: the channel id to be dequeued.
+ * @dct: the dequeue command type.
+ */
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
+ enum qbman_pull_type_e dct);
+
+/**
+ * qbman_swp_pull() - Issue the pull dequeue command
+ * @s: the software portal object.
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls.
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);
+
+/* -------------------------------- */
+/* Polling DQRR for dequeue results */
+/* -------------------------------- */
+
+/**
+ * qbman_swp_dqrr_next() - Get an valid DQRR entry.
+ * @s: the software portal object.
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *p);
+
+/**
+ * qbman_swp_prefetch_dqrr_next() - prefetch the next DQRR entry.
+ * @s: the software portal object.
+ */
+void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s);
+
+/**
+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
+ * qbman_swp_dqrr_next().
+ * @s: the software portal object.
+ * @dq: the DQRR entry to be consumed.
+ */
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
+
+/**
+ * qbman_swp_dqrr_idx_consume() - Given the DQRR index consume the DQRR entry
+ * @s: the software portal object.
+ * @dqrr_index: the DQRR index entry to be consumed.
+ */
+void qbman_swp_dqrr_idx_consume(struct qbman_swp *s, uint8_t dqrr_index);
+
+/**
+ * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr
+ * @dqrr: the given dqrr object.
+ *
+ * Return dqrr index.
+ */
+uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr);
+
+/**
+ * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
+ * given portal
+ * @s: the given portal.
+ * @idx: the dqrr index.
+ *
+ * Return dqrr entry object.
+ */
+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
+
+/* ------------------------------------------------- */
+/* Polling user-provided storage for dequeue results */
+/* ------------------------------------------------- */
+
+/**
+ * qbman_result_has_new_result() - Check and get the dequeue response from the
+ * dq storage memory set in pull dequeue command
+ * @s: the software portal object.
+ * @dq: the dequeue result read from the memory.
+ *
+ * Only used for user-provided storage of dequeue results, not DQRR. For
+ * efficiency purposes, the driver will perform any required endianness
+ * conversion to ensure that the user's dequeue result storage is in host-endian
+ * format (whether or not that is the same as the little-endian format that
+ * hardware DMA'd to the user's storage). As such, once the user has called
+ * qbman_result_has_new_result() and been returned a valid dequeue result,
+ * they should not call it again on the same memory location (except of course
+ * if another dequeue command has been executed to produce a new result to that
+ * location).
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ */
+int qbman_result_has_new_result(struct qbman_swp *s,
+ struct qbman_result *dq);
+
+/**
+ * qbman_check_command_complete() - Check if the previous issued dq commnd
+ * is completed and results are available in memory.
+ * @s: the software portal object.
+ * @dq: the dequeue result read from the memory.
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ */
+int qbman_check_command_complete(struct qbman_result *dq);
+
+int qbman_check_new_result(struct qbman_result *dq);
+
+/* -------------------------------------------------------- */
+/* Parsing dequeue entries (DQRR and user-provided storage) */
+/* -------------------------------------------------------- */
+
+/**
+ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not
+ * @dq: the dequeue result to be checked.
+ *
+ * DQRR entries may contain non-dequeue results, ie. notifications
+ */
+int qbman_result_is_DQ(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_SCN() - Check the dequeue result is notification or not
+ * @dq: the dequeue result to be checked.
+ *
+ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change
+ * notifications" of one type or another. Some APIs apply to all of them, of the
+ * form qbman_result_SCN_***().
+ */
+static inline int qbman_result_is_SCN(const struct qbman_result *dq)
+{
+ return !qbman_result_is_DQ(dq);
+}
+
+/* Recognise different notification types, only required if the user allows for
+ * these to occur, and cares about them when they do.
+ */
+
+/**
+ * qbman_result_is_FQDAN() - Check for FQ Data Availability
+ * @dq: the qbman_result object.
+ *
+ * Return 1 if this is FQDAN.
+ */
+int qbman_result_is_FQDAN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_CDAN() - Check for Channel Data Availability
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is CDAN.
+ */
+int qbman_result_is_CDAN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_CSCN() - Check for Congestion State Change
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is CSCN.
+ */
+int qbman_result_is_CSCN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_BPSCN() - Check for Buffer Pool State Change.
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is BPSCN.
+ */
+int qbman_result_is_BPSCN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_CGCU() - Check for Congestion Group Count Update.
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is CGCU.
+ */
+int qbman_result_is_CGCU(const struct qbman_result *dq);
+
+/* Frame queue state change notifications; (FQDAN in theory counts too as it
+ * leaves a FQ parked, but it is primarily a data availability notification)
+ */
+
+/**
+ * qbman_result_is_FQRN() - Check for FQ Retirement Notification.
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is FQRN.
+ */
+int qbman_result_is_FQRN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is FQRNI.
+ */
+int qbman_result_is_FQRNI(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_FQPN() - Check for FQ Park Notification
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is FQPN.
+ */
+int qbman_result_is_FQPN(const struct qbman_result *dq);
+
+/* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE)
+ */
+/* FQ empty */
+#define QBMAN_DQ_STAT_FQEMPTY 0x80
+/* FQ held active */
+#define QBMAN_DQ_STAT_HELDACTIVE 0x40
+/* FQ force eligible */
+#define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20
+/* Valid frame */
+#define QBMAN_DQ_STAT_VALIDFRAME 0x10
+/* FQ ODP enable */
+#define QBMAN_DQ_STAT_ODPVALID 0x04
+/* Volatile dequeue */
+#define QBMAN_DQ_STAT_VOLATILE 0x02
+/* volatile dequeue command is expired */
+#define QBMAN_DQ_STAT_EXPIRED 0x01
+
+#define QBMAN_EQCR_DCA_IDXMASK 0x0f
+#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
+
+/**
+ * qbman_result_DQ_flags() - Get the STAT field of dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the state field.
+ */
+uint8_t qbman_result_DQ_flags(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull
+ * command.
+ * @dq: the dequeue result.
+ *
+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
+ */
+static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq)
+{
+ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE);
+}
+
+/**
+ * qbman_result_DQ_is_pull_complete() - Check whether the pull command is
+ * completed.
+ * @dq: the dequeue result.
+ *
+ * Return boolean.
+ */
+static inline int qbman_result_DQ_is_pull_complete(
+ const struct qbman_result *dq)
+{
+ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED);
+}
+
+/**
+ * qbman_result_DQ_seqnum() - Get the seqnum field in dequeue response
+ * seqnum is valid only if VALIDFRAME flag is TRUE
+ * @dq: the dequeue result.
+ *
+ * Return seqnum.
+ */
+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
+ * odpid is valid only if ODPVAILD flag is TRUE.
+ * @dq: the dequeue result.
+ *
+ * Return odpid.
+ */
+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_fqid() - Get the fqid in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return fqid.
+ */
+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_byte_count() - Get the byte count in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the byte count remaining in the FQ.
+ */
+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_frame_count - Get the frame count in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the frame count remaining in the FQ.
+ */
+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the frame queue context.
+ */
+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the frame descriptor.
+ */
+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
+
+/* State-change notifications (FQDAN/CDAN/CSCN/...). */
+
+/**
+ * qbman_result_SCN_state() - Get the state field in State-change notification
+ * @scn: the state change notification.
+ *
+ * Return the state in the notifiation.
+ */
+uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
+
+/**
+ * qbman_result_SCN_rid() - Get the resource id from the notification
+ * @scn: the state change notification.
+ *
+ * Return the resource id.
+ */
+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn);
+
+/**
+ * qbman_result_SCN_ctx() - get the context from the notification
+ * @scn: the state change notification.
+ *
+ * Return the context.
+ */
+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
+
+/* Type-specific "resource IDs". Mainly for illustration purposes, though it
+ * also gives the appropriate type widths.
+ */
+/* Get the FQID from the FQDAN */
+#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq)
+/* Get the FQID from the FQRN */
+#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq)
+/* Get the FQID from the FQRNI */
+#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq)
+/* Get the FQID from the FQPN */
+#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq)
+/* Get the channel ID from the CDAN */
+#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
+/* Get the CGID from the CSCN */
+#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
+
+/**
+ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN
+ * @scn: the state change notification.
+ *
+ * Return the buffer pool id.
+ */
+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn);
+
+/**
+ * qbman_result_bpscn_has_free_bufs() - Check whether there are free
+ * buffers in the pool from BPSCN.
+ * @scn: the state change notification.
+ *
+ * Return the number of free buffers.
+ */
+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn);
+
+/**
+ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the
+ * buffer pool is depleted.
+ * @scn: the state change notification.
+ *
+ * Return the status of buffer pool depletion.
+ */
+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn);
+
+/**
+ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer
+ * pool is surplus or not.
+ * @scn: the state change notification.
+ *
+ * Return the status of buffer pool surplus.
+ */
+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn);
+
+/**
+ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message
+ * @scn: the state change notification.
+ *
+ * Return the BPSCN context.
+ */
+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
+
+/* Parsing CGCU */
+/**
+ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
+ * @scn: the state change notification.
+ *
+ * Return the CGCU resource id.
+ */
+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn);
+
+/**
+ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU
+ * @scn: the state change notification.
+ *
+ * Return instantaneous count in the CGCU notification.
+ */
+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
+
+ /************/
+ /* Enqueues */
+ /************/
+
+/* struct qbman_eq_desc - structure of enqueue descriptor */
+struct qbman_eq_desc {
+ union {
+ uint32_t donot_manipulate_directly[8];
+ struct eq {
+ uint8_t verb;
+ uint8_t dca;
+ uint16_t seqnum;
+ uint16_t orpid;
+ uint16_t reserved1;
+ uint32_t tgtid;
+ uint32_t tag;
+ uint16_t qdbin;
+ uint8_t qpri;
+ uint8_t reserved[3];
+ uint8_t wae;
+ uint8_t rspid;
+ uint64_t rsp_addr;
+ } eq;
+ };
+};
+
+/**
+ * struct qbman_eq_response - structure of enqueue response
+ * @donot_manipulate_directly: the 16 32bit data to represent the whole
+ * enqueue response.
+ */
+struct qbman_eq_response {
+ uint32_t donot_manipulate_directly[16];
+};
+
+/**
+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ * @d: the given enqueue descriptor.
+ */
+void qbman_eq_desc_clear(struct qbman_eq_desc *d);
+
+/* Exactly one of the following descriptor "actions" should be set. (Calling
+ * any one of these will replace the effect of any prior call to one of these.)
+ * - enqueue without order-restoration
+ * - enqueue with order-restoration
+ * - fill a hole in the order-restoration sequence, without any enqueue
+ * - advance NESN (Next Expected Sequence Number), without any enqueue
+ * 'respond_success' indicates whether an enqueue response should be DMA'd
+ * after success (otherwise a response is DMA'd only after failure).
+ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
+ * be enqueued.
+ */
+
+/**
+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
+ * @d: the enqueue descriptor.
+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ * rejections returned on a FQ.
+ */
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
+/**
+ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
+ * @d: the enqueue descriptor.
+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ * rejections returned on a FQ.
+ * @opr_id: the order point record id.
+ * @seqnum: the order restoration sequence number.
+ * @incomplete: indiates whether this is the last fragments using the same
+ * sequeue number.
+ */
+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
+ uint16_t opr_id, uint16_t seqnum, int incomplete);
+
+/**
+ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
+ * without any enqueue
+ * @d: the enqueue descriptor.
+ * @opr_id: the order point record id.
+ * @seqnum: the order restoration sequence number.
+ */
+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum);
+
+/**
+ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number)
+ * without any enqueue
+ * @d: the enqueue descriptor.
+ * @opr_id: the order point record id.
+ * @seqnum: the order restoration sequence number.
+ */
+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum);
+/**
+ * qbman_eq_desc_set_response() - Set the enqueue response info.
+ * @d: the enqueue descriptor
+ * @storage_phys: the physical address of the enqueue response in memory.
+ * @stash: indicate that the write allocation enabled or not.
+ *
+ * In the case where an enqueue response is DMA'd, this determines where that
+ * response should go. (The physical/DMA address is given for hardware's
+ * benefit, but software should interpret it as a "struct qbman_eq_response"
+ * data structure.) 'stash' controls whether or not the write to main-memory
+ * expresses a cache-warming attribute.
+ */
+void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
+ uint64_t storage_phys,
+ int stash);
+
+/**
+ * qbman_eq_desc_set_token() - Set token for the enqueue command
+ * @d: the enqueue descriptor
+ * @token: the token to be set.
+ *
+ * token is the value that shows up in an enqueue response that can be used to
+ * detect when the results have been published. The easiest technique is to zero
+ * result "storage" before issuing an enqueue, and use any non-zero 'token'
+ * value.
+ */
+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
+
+/**
+ * Exactly one of the following descriptor "targets" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - enqueue to a frame queue
+ * - enqueue to a queuing destination
+ * Note, that none of these will have any affect if the "action" type has been
+ * set to "orp_hole" or "orp_nesn".
+ */
+/**
+ * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command
+ * @d: the enqueue descriptor
+ * @fqid: the id of the frame queue to be enqueued.
+ */
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
+
+/**
+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command.
+ * @d: the enqueue descriptor
+ * @qdid: the id of the queuing destination to be enqueued.
+ * @qd_bin: the queuing destination bin
+ * @qd_prio: the queuing destination priority.
+ */
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
+ uint16_t qd_bin, uint8_t qd_prio);
+
+/**
+ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
+ * @d: the enqueue descriptor
+ * @enable: boolean to enable/disable EQDI
+ *
+ * Determines whether or not the portal's EQDI interrupt source should be
+ * asserted after the enqueue command is completed.
+ */
+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
+
+/**
+ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.
+ * @d: the enqueue descriptor.
+ * @enable: enabled/disable DCA mode.
+ * @dqrr_idx: DCAP_CI, the DCAP consumer index.
+ * @park: determine the whether park the FQ or not
+ *
+ * Determines whether or not a portal DQRR entry should be consumed once the
+ * enqueue command is completed. (And if so, and the DQRR entry corresponds to a
+ * held-active (order-preserving) FQ, whether the FQ should be parked instead of
+ * being rescheduled.)
+ */
+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
+ uint8_t dqrr_idx, int park);
+
+/**
+ * qbman_swp_enqueue() - Issue an enqueue command.
+ * @s: the software portal used for enqueue.
+ * @d: the enqueue descriptor.
+ * @fd: the frame descriptor to be enqueued.
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd);
+/**
+ * qbman_swp_enqueue_multiple() - Enqueue multiple frames with same
+ eq descriptor
+ * @s: the software portal used for enqueue.
+ * @d: the enqueue descriptor.
+ * @fd: the frame descriptor to be enqueued.
+ * @num_frames: the number of the frames to be enqueued.
+ *
+ * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+/**
+ * qbman_swp_enqueue_multiple_desc() - Enqueue multiple frames with
+ * individual eq descriptor.
+ * @s: the software portal used for enqueue.
+ * @d: the enqueue descriptor.
+ * @fd: the frame descriptor to be enqueued.
+ * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
+ * @num_frames: the number of the frames to be enqueued.
+ *
+ * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames);
+
+/* TODO:
+ * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
+ * @s: the software portal.
+ * @thresh: the threshold to trigger the EQRI interrupt.
+ *
+ * An EQRI interrupt can be generated when the fill-level of EQCR falls below
+ * the 'thresh' value set here. Setting thresh==0 (the default) disables.
+ */
+int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
+
+ /*******************/
+ /* Buffer releases */
+ /*******************/
+/**
+ * struct qbman_release_desc - The structure for buffer release descriptor
+ * @donot_manipulate_directly: the 32bit data to represent the whole
+ * possible settings of qbman release descriptor.
+ */
+struct qbman_release_desc {
+ union {
+ uint32_t donot_manipulate_directly[16];
+ struct br {
+ uint8_t verb;
+ uint8_t reserved;
+ uint16_t bpid;
+ uint32_t reserved2;
+ uint64_t buf[7];
+ } br;
+ };
+};
+
+/**
+ * qbman_release_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ * @d: the qbman release descriptor.
+ */
+void qbman_release_desc_clear(struct qbman_release_desc *d);
+
+/**
+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
+ * @d: the qbman release descriptor.
+ */
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid);
+
+/**
+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
+ * interrupt source should be asserted after the release command is completed.
+ * @d: the qbman release descriptor.
+ */
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
+
+/**
+ * qbman_swp_release() - Issue a buffer release command.
+ * @s: the software portal object.
+ * @d: the release descriptor.
+ * @buffers: a pointer pointing to the buffer address to be released.
+ * @num_buffers: number of buffers to be released, must be less than 8.
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers);
+
+/* TODO:
+ * qbman_swp_release_thresh() - Set threshold for RCRI interrupt
+ * @s: the software portal.
+ * @thresh: the threshold.
+ * An RCRI interrupt can be generated when the fill-level of RCR falls below
+ * the 'thresh' value set here. Setting thresh==0 (the default) disables.
+ */
+int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);
+
+ /*******************/
+ /* Buffer acquires */
+ /*******************/
+/**
+ * qbman_swp_acquire() - Issue a buffer acquire command.
+ * @s: the software portal object.
+ * @bpid: the buffer pool index.
+ * @buffers: a pointer pointing to the acquired buffer address|es.
+ * @num_buffers: number of buffers to be acquired, must be less than 8.
+ *
+ * Return 0 for success, or negative error code if the acquire command
+ * fails.
+ */
+int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
+ unsigned int num_buffers);
+
+ /*****************/
+ /* FQ management */
+ /*****************/
+/**
+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state.
+ * @s: the software portal object.
+ * @fqid: the index of frame queue to be scheduled.
+ *
+ * There are a couple of different ways that a FQ can end up parked state,
+ * This schedules it.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);
+
+/**
+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state.
+ * @s: the software portal object.
+ * @fqid: the index of frame queue to be forced.
+ *
+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
+ * and thus be available for selection by any channel-dequeuing behaviour (push
+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
+ * empty at the time this happens, the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
+
+/**
+ * These functions change the FQ flow-control stuff between XON/XOFF. (The
+ * default is XON.) This setting doesn't affect enqueues to the FQ, just
+ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
+ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
+ * changed to XOFF after it had already become truly-scheduled to a channel, and
+ * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
+ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will
+ * return NULL.)
+ */
+/**
+ * qbman_swp_fq_xon() - XON the frame queue.
+ * @s: the software portal object.
+ * @fqid: the index of frame queue.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);
+/**
+ * qbman_swp_fq_xoff() - XOFF the frame queue.
+ * @s: the software portal object.
+ * @fqid: the index of frame queue.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);
+
+ /**********************/
+ /* Channel management */
+ /**********************/
+
+/**
+ * If the user has been allocated a channel object that is going to generate
+ * CDANs to another channel, then these functions will be necessary.
+ * CDAN-enabled channels only generate a single CDAN notification, after which
+ * it they need to be reenabled before they'll generate another. (The idea is
+ * that pull dequeuing will occur in reaction to the CDAN, followed by a
+ * reenable step.) Each function generates a distinct command to hardware, so a
+ * combination function is provided if the user wishes to modify the "context"
+ * (which shows up in each CDAN message) each time they reenable, as a single
+ * command to hardware.
+ */
+
+/**
+ * qbman_swp_CDAN_set_context() - Set CDAN context
+ * @s: the software portal object.
+ * @channelid: the channel index.
+ * @ctx: the context to be set in CDAN.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
+ uint64_t ctx);
+
+/**
+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel.
+ * @s: the software portal object.
+ * @channelid: the index of the channel to generate CDAN.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid);
+
+/**
+ * qbman_swp_CDAN_disable() - disable CDAN for the channel.
+ * @s: the software portal object.
+ * @channelid: the index of the channel to generate CDAN.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
+
+/**
+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
+ * @s: the software portal object.
+ * @channelid: the index of the channel to generate CDAN.
+ * @ctx: the context set in CDAN.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
+ uint64_t ctx);
+#endif /* !_FSL_QBMAN_PORTAL_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c
new file mode 100644
index 00000000..591673ab
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c
@@ -0,0 +1,66 @@
+/* Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "compat.h"
+#include <fsl_qbman_debug.h>
+#include "qbman_portal.h"
+
+/* QBMan portal management command code */
+#define QBMAN_BP_QUERY 0x32
+#define QBMAN_FQ_QUERY 0x44
+#define QBMAN_FQ_QUERY_NP 0x45
+#define QBMAN_WQ_QUERY 0x47
+#define QBMAN_CGR_QUERY 0x51
+#define QBMAN_WRED_QUERY 0x54
+#define QBMAN_CGR_STAT_QUERY 0x55
+#define QBMAN_CGR_STAT_QUERY_CLR 0x56
+
+struct qbman_fq_query_desc {
+ uint8_t verb;
+ uint8_t reserved[3];
+ uint32_t fqid;
+ uint8_t reserved2[57];
+};
+
+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
+ struct qbman_fq_query_np_rslt *r)
+{
+ struct qbman_fq_query_desc *p;
+
+ p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->fqid = fqid;
+ *r = *(struct qbman_fq_query_np_rslt *)qbman_swp_mc_complete(s, p,
+ QBMAN_FQ_QUERY_NP);
+ if (!r) {
+ pr_err("qbman: Query FQID %d NP fields failed, no response\n",
+ fqid);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
+
+ /* Determine success or failure */
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
+ fqid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+uint32_t qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
+{
+ return (r->frm_cnt & 0x00FFFFFF);
+}
+
+uint32_t qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
+{
+ return r->byte_cnt;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.c b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.c
new file mode 100644
index 00000000..07145005
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -0,0 +1,1425 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ */
+
+#include "qbman_portal.h"
+
+/* QBMan portal management command codes */
+#define QBMAN_MC_ACQUIRE 0x30
+#define QBMAN_WQCHAN_CONFIGURE 0x46
+
+/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQCR_PI 0x800
+#define QBMAN_CINH_SWP_EQCR_CI 0x840
+#define QBMAN_CINH_SWP_EQAR 0x8c0
+#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DCAP 0xac0
+#define QBMAN_CINH_SWP_SDQCR 0xb00
+#define QBMAN_CINH_SWP_RAR 0xcc0
+#define QBMAN_CINH_SWP_ISR 0xe00
+#define QBMAN_CINH_SWP_IER 0xe40
+#define QBMAN_CINH_SWP_ISDR 0xe80
+#define QBMAN_CINH_SWP_IIR 0xec0
+#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
+#define QBMAN_CINH_SWP_ITPR 0xf40
+
+/* CENA register offsets */
+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_CR 0x600
+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
+#define QBMAN_CENA_SWP_VDQCR 0x780
+#define QBMAN_CENA_SWP_EQCR_CI 0x840
+
+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
+
+/* QBMan FQ management command codes */
+#define QBMAN_FQ_SCHEDULE 0x48
+#define QBMAN_FQ_FORCE 0x49
+#define QBMAN_FQ_XON 0x4d
+#define QBMAN_FQ_XOFF 0x4e
+
+/*******************************/
+/* Pre-defined attribute codes */
+/*******************************/
+
+#define QBMAN_RESPONSE_VERB_MASK 0x7f
+
+/*************************/
+/* SDQCR attribute codes */
+/*************************/
+#define QB_SDQCR_FC_SHIFT 29
+#define QB_SDQCR_FC_MASK 0x1
+#define QB_SDQCR_DCT_SHIFT 24
+#define QB_SDQCR_DCT_MASK 0x3
+#define QB_SDQCR_TOK_SHIFT 16
+#define QB_SDQCR_TOK_MASK 0xff
+#define QB_SDQCR_SRC_SHIFT 0
+#define QB_SDQCR_SRC_MASK 0xffff
+
+/* opaque token for static dequeues */
+#define QMAN_SDQCR_TOKEN 0xbb
+
+enum qbman_sdqcr_dct {
+ qbman_sdqcr_dct_null = 0,
+ qbman_sdqcr_dct_prio_ics,
+ qbman_sdqcr_dct_active_ics,
+ qbman_sdqcr_dct_active
+};
+
+enum qbman_sdqcr_fc {
+ qbman_sdqcr_fc_one = 0,
+ qbman_sdqcr_fc_up_to_3 = 1
+};
+
+/* We need to keep track of which SWP triggered a pull command
+ * so keep an array of portal IDs and use the token field to
+ * be able to find the proper portal
+ */
+#define MAX_QBMAN_PORTALS 64
+static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
+
+/*********************************/
+/* Portal constructor/destructor */
+/*********************************/
+
+/* Software portals should always be in the power-on state when we initialise,
+ * due to the CCSR-based portal reset functionality that MC has.
+ *
+ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
+ * valid-bits, so we need to support a workaround where we don't trust
+ * valid-bits when detecting new entries until any stale ring entries have been
+ * overwritten at least once. The idea is that we read PI for the first few
+ * entries, then switch to valid-bit after that. The trick is to clear the
+ * bug-work-around boolean once the PI wraps around the ring for the first time.
+ *
+ * Note: this still carries a slight additional cost once the decrementer hits
+ * zero.
+ */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
+{
+ int ret;
+ uint32_t eqcr_pi;
+ struct qbman_swp *p = malloc(sizeof(*p));
+
+ if (!p)
+ return NULL;
+ p->desc = *d;
+#ifdef QBMAN_CHECKING
+ p->mc.check = swp_mc_can_start;
+#endif
+ p->mc.valid_bit = QB_VALID_BIT;
+ p->sdq = 0;
+ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
+ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
+ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+
+ atomic_set(&p->vdq.busy, 1);
+ p->vdq.valid_bit = QB_VALID_BIT;
+ p->dqrr.next_idx = 0;
+ p->dqrr.valid_bit = QB_VALID_BIT;
+ if ((p->desc.qman_version & 0xFFFF0000) < QMAN_REV_4100) {
+ p->dqrr.dqrr_size = 4;
+ p->dqrr.reset_bug = 1;
+ } else {
+ p->dqrr.dqrr_size = 8;
+ p->dqrr.reset_bug = 0;
+ }
+
+ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
+ if (ret) {
+ free(p);
+ pr_err("qbman_swp_sys_init() failed %d\n", ret);
+ return NULL;
+ }
+ /* SDQCR needs to be initialized to 0 when no channels are
+ * being dequeued from or else the QMan HW will indicate an
+ * error. The values that were calculated above will be
+ * applied when dequeues from a specific channel are enabled.
+ */
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
+ eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
+ p->eqcr.pi = eqcr_pi & 0xF;
+ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
+ p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
+ p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
+ p->eqcr.ci, p->eqcr.pi);
+
+ portal_idx_map[p->desc.idx] = p;
+ return p;
+}
+
+void qbman_swp_finish(struct qbman_swp *p)
+{
+#ifdef QBMAN_CHECKING
+ QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
+#endif
+ qbman_swp_sys_finish(&p->sys);
+ portal_idx_map[p->desc.idx] = NULL;
+ free(p);
+}
+
+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
+{
+ return &p->desc;
+}
+
+/**************/
+/* Interrupts */
+/**************/
+
+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
+}
+
+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
+}
+
+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
+}
+
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
+}
+
+uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
+}
+
+void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
+}
+
+uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
+}
+
+void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
+}
+
+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
+}
+
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
+}
+
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
+}
+
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
+}
+
+/***********************/
+/* Management commands */
+/***********************/
+
+/*
+ * Internal code common to all types of management commands.
+ */
+
+void *qbman_swp_mc_start(struct qbman_swp *p)
+{
+ void *ret;
+#ifdef QBMAN_CHECKING
+ QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
+#endif
+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
+#ifdef QBMAN_CHECKING
+ if (!ret)
+ p->mc.check = swp_mc_can_submit;
+#endif
+ return ret;
+}
+
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
+{
+ uint8_t *v = cmd;
+#ifdef QBMAN_CHECKING
+ QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
+#endif
+ /* TBD: "|=" is going to hurt performance. Need to move as many fields
+ * out of word zero, and for those that remain, the "OR" needs to occur
+ * at the caller side. This debug check helps to catch cases where the
+ * caller wants to OR but has forgotten to do so.
+ */
+ QBMAN_BUG_ON((*v & cmd_verb) != *v);
+ *v = cmd_verb | p->mc.valid_bit;
+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
+#ifdef QBMAN_CHECKING
+ p->mc.check = swp_mc_can_poll;
+#endif
+}
+
+void *qbman_swp_mc_result(struct qbman_swp *p)
+{
+ uint32_t *ret, verb;
+#ifdef QBMAN_CHECKING
+ QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
+#endif
+ qbman_cena_invalidate_prefetch(&p->sys,
+ QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ /* Remove the valid-bit - command completed if the rest is non-zero */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+#ifdef QBMAN_CHECKING
+ p->mc.check = swp_mc_can_start;
+#endif
+ p->mc.valid_bit ^= QB_VALID_BIT;
+ return ret;
+}
+
+/***********/
+/* Enqueue */
+/***********/
+
+#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
+enum qb_enqueue_commands {
+ enqueue_empty = 0,
+ enqueue_response_always = 1,
+ enqueue_rejects_to_fq = 2
+};
+
+#define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
+#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
+#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
+#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
+#define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
+#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
+#define QB_ENQUEUE_CMD_NLIS_SHIFT 14
+#define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
+
+void qbman_eq_desc_clear(struct qbman_eq_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
+{
+ d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
+ if (respond_success)
+ d->eq.verb |= enqueue_response_always;
+ else
+ d->eq.verb |= enqueue_rejects_to_fq;
+}
+
+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
+ uint16_t opr_id, uint16_t seqnum, int incomplete)
+{
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
+ if (respond_success)
+ d->eq.verb |= enqueue_response_always;
+ else
+ d->eq.verb |= enqueue_rejects_to_fq;
+
+ d->eq.orpid = opr_id;
+ d->eq.seqnum = seqnum;
+ if (incomplete)
+ d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
+ else
+ d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
+}
+
+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum)
+{
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
+ d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
+ d->eq.orpid = opr_id;
+ d->eq.seqnum = seqnum;
+ d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
+ d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
+}
+
+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum)
+{
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
+ d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
+ d->eq.orpid = opr_id;
+ d->eq.seqnum = seqnum;
+ d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
+ d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
+}
+
+void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
+ dma_addr_t storage_phys,
+ int stash)
+{
+ d->eq.rsp_addr = storage_phys;
+ d->eq.wae = stash;
+}
+
+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
+{
+ d->eq.rspid = token;
+}
+
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
+{
+ d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
+ d->eq.tgtid = fqid;
+}
+
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
+ uint16_t qd_bin, uint8_t qd_prio)
+{
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
+ d->eq.tgtid = qdid;
+ d->eq.qdbin = qd_bin;
+ d->eq.qpri = qd_prio;
+}
+
+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
+{
+ if (enable)
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
+ else
+ d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
+}
+
+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
+ uint8_t dqrr_idx, int park)
+{
+ if (enable) {
+ d->eq.dca = dqrr_idx;
+ if (park)
+ d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
+ else
+ d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
+ d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
+ } else {
+ d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
+ }
+}
+
+#define EQAR_IDX(eqar) ((eqar) & 0x7)
+#define EQAR_VB(eqar) ((eqar) & 0x80)
+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+
+static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
+
+ pr_debug("EQAR=%08x\n", eqar);
+ if (!EQAR_SUCCESS(eqar))
+ return -EBUSY;
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], fd, sizeof(*fd));
+ /* Set the verb byte, have to substitute in the valid-bit */
+ lwsync();
+ p[0] = cl[0] | EQAR_VB(eqar);
+ qbman_cena_write_complete_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ return 0;
+}
+
+static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci;
+ uint8_t diff;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
+ eqcr_ci, s->eqcr.ci);
+ s->eqcr.available += diff;
+ if (!diff)
+ return -EBUSY;
+ }
+
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], fd, sizeof(*fd));
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ qbman_cena_write_complete_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
+ s->eqcr.pi++;
+ s->eqcr.pi &= 0xF;
+ s->eqcr.available--;
+ if (!(s->eqcr.pi & 7))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+
+ return 0;
+}
+
+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ if (s->sys.eqcr_mode == qman_eqcr_vb_array)
+ return qbman_swp_enqueue_array_mode(s, d, fd);
+ else /* Use ring mode by default */
+ return qbman_swp_enqueue_ring_mode(s, d, fd);
+}
+
+int qbman_swp_enqueue_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, eqcr_pi;
+ uint8_t diff;
+ int i, num_enqueued = 0;
+ uint64_t addr_cena;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
+ eqcr_ci, s->eqcr.ci);
+ s->eqcr.available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ }
+
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ if (!(eqcr_pi & 7))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ addr_cena = (size_t)s->sys.addr_cena;
+ for (i = 0; i < num_enqueued; i++) {
+ dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ }
+ s->eqcr.pi = eqcr_pi;
+
+ return num_enqueued;
+}
+
+int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi;
+ uint8_t diff;
+ int i, num_enqueued = 0;
+ uint64_t addr_cena;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
+ eqcr_ci, s->eqcr.ci);
+ s->eqcr.available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ cl = qb_cl(&d[i]);
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ }
+
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ cl = qb_cl(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ if (!(eqcr_pi & 7))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ addr_cena = (size_t)s->sys.addr_cena;
+ for (i = 0; i < num_enqueued; i++) {
+ dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ }
+ s->eqcr.pi = eqcr_pi;
+
+ return num_enqueued;
+}
+
+/*************************/
+/* Static (push) dequeue */
+/*************************/
+
+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
+{
+ uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+
+ QBMAN_BUG_ON(channel_idx > 15);
+ *enabled = src | (1 << channel_idx);
+}
+
+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
+{
+ uint16_t dqsrc;
+
+ QBMAN_BUG_ON(channel_idx > 15);
+ if (enable)
+ s->sdq |= 1 << channel_idx;
+ else
+ s->sdq &= ~(1 << channel_idx);
+
+ /* Read make the complete src map. If no channels are enabled
+ * the SDQCR must be 0 or else QMan will assert errors
+ */
+ dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+ if (dqsrc != 0)
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
+ else
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
+}
+
+/***************************/
+/* Volatile (pull) dequeue */
+/***************************/
+
+/* These should be const, eventually */
+#define QB_VDQCR_VERB_DCT_SHIFT 0
+#define QB_VDQCR_VERB_DT_SHIFT 2
+#define QB_VDQCR_VERB_RLS_SHIFT 4
+#define QB_VDQCR_VERB_WAE_SHIFT 5
+
+enum qb_pull_dt_e {
+ qb_pull_dt_channel,
+ qb_pull_dt_workqueue,
+ qb_pull_dt_framequeue
+};
+
+void qbman_pull_desc_clear(struct qbman_pull_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct qbman_result *storage,
+ dma_addr_t storage_phys,
+ int stash)
+{
+ d->pull.rsp_addr_virt = (size_t)storage;
+
+ if (!storage) {
+ d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
+ return;
+ }
+ d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
+ if (stash)
+ d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
+ else
+ d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
+
+ d->pull.rsp_addr = storage_phys;
+}
+
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
+{
+ d->pull.numf = numframes - 1;
+}
+
+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
+{
+ d->pull.tok = token;
+}
+
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
+{
+ d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
+ d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
+ d->pull.dq_src = fqid;
+}
+
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
+ enum qbman_pull_type_e dct)
+{
+ d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
+ d->pull.dq_src = wqid;
+}
+
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
+ enum qbman_pull_type_e dct)
+{
+ d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
+ d->pull.dq_src = chid;
+}
+
+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ uint32_t *p;
+ uint32_t *cl = qb_cl(d);
+
+ if (!atomic_dec_and_test(&s->vdq.busy)) {
+ atomic_inc(&s->vdq.busy);
+ return -EBUSY;
+ }
+
+ d->pull.tok = s->sys.idx + 1;
+ s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
+ p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
+ memcpy(&p[1], &cl[1], 12);
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ lwsync();
+ p[0] = cl[0] | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
+
+ return 0;
+}
+
+/****************/
+/* Polling DQRR */
+/****************/
+
+#define QMAN_DQRR_PI_MASK 0xf
+
+#define QBMAN_RESULT_DQ 0x60
+#define QBMAN_RESULT_FQRN 0x21
+#define QBMAN_RESULT_FQRNI 0x22
+#define QBMAN_RESULT_FQPN 0x24
+#define QBMAN_RESULT_FQDAN 0x25
+#define QBMAN_RESULT_CDAN 0x26
+#define QBMAN_RESULT_CSCN_MEM 0x27
+#define QBMAN_RESULT_CGCU 0x28
+#define QBMAN_RESULT_BPSCN 0x29
+#define QBMAN_RESULT_CSCN_WQ 0x2a
+
+#include <rte_prefetch.h>
+
+void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
+{
+ const struct qbman_result *p;
+
+ p = qbman_cena_read_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ rte_prefetch0(p);
+}
+
+/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ uint32_t verb;
+ uint32_t response_verb;
+ uint32_t flags;
+ const struct qbman_result *p;
+
+ /* Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (unlikely(s->dqrr.reset_bug)) {
+ /* We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
+ QMAN_DQRR_PI_MASK;
+
+ /* there are new entries if pi != next_idx */
+ if (pi == s->dqrr.next_idx)
+ return NULL;
+
+ /* if next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired. Note: this logic needs to be based on next_idx
+ * (which increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
+ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
+ s->dqrr.next_idx, pi);
+ s->dqrr.reset_bug = 0;
+ }
+ qbman_cena_invalidate_prefetch(&s->sys,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ }
+ p = qbman_cena_read_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
+ /* If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
+ return NULL;
+
+ /* There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
+ s->dqrr.next_idx = 0;
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+ }
+ /* If this is the final response to a volatile dequeue command
+ * indicate that the vdq is no longer busy
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & QBMAN_DQ_STAT_VOLATILE) &&
+ (flags & QBMAN_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.busy);
+
+ return p;
+}
+
+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
+void qbman_swp_dqrr_consume(struct qbman_swp *s,
+ const struct qbman_result *dq)
+{
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
+}
+
+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
+void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
+ uint8_t dqrr_index)
+{
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
+}
+
+/*********************************/
+/* Polling user-provided storage */
+/*********************************/
+int qbman_result_has_new_result(struct qbman_swp *s,
+ struct qbman_result *dq)
+{
+ if (dq->dq.tok == 0)
+ return 0;
+
+ /*
+ * Set token to be 0 so we will detect change back to 1
+ * next time the looping is traversed. Const is cast away here
+ * as we want users to treat the dequeue responses as read only.
+ */
+ ((struct qbman_result *)dq)->dq.tok = 0;
+
+ /*
+ * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
+ * fact "VDQCR" shows busy doesn't mean that we hold the result that
+ * makes it available. Eg. we may be looking at our 10th dequeue result,
+ * having released VDQCR after the 1st result and it is now busy due to
+ * some other command!
+ */
+ if (s->vdq.storage == dq) {
+ s->vdq.storage = NULL;
+ atomic_inc(&s->vdq.busy);
+ }
+
+ return 1;
+}
+
+int qbman_check_new_result(struct qbman_result *dq)
+{
+ if (dq->dq.tok == 0)
+ return 0;
+
+ /*
+ * Set token to be 0 so we will detect change back to 1
+ * next time the looping is traversed. Const is cast away here
+ * as we want users to treat the dequeue responses as read only.
+ */
+ ((struct qbman_result *)dq)->dq.tok = 0;
+
+ return 1;
+}
+
+int qbman_check_command_complete(struct qbman_result *dq)
+{
+ struct qbman_swp *s;
+
+ if (dq->dq.tok == 0)
+ return 0;
+
+ s = portal_idx_map[dq->dq.tok - 1];
+ /*
+ * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
+ * fact "VDQCR" shows busy doesn't mean that we hold the result that
+ * makes it available. Eg. we may be looking at our 10th dequeue result,
+ * having released VDQCR after the 1st result and it is now busy due to
+ * some other command!
+ */
+ if (s->vdq.storage == dq) {
+ s->vdq.storage = NULL;
+ atomic_inc(&s->vdq.busy);
+ }
+
+ return 1;
+}
+
+/********************************/
+/* Categorising qbman results */
+/********************************/
+
+static inline int __qbman_result_is_x(const struct qbman_result *dq,
+ uint8_t x)
+{
+ uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
+
+ return (response_verb == x);
+}
+
+int qbman_result_is_DQ(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
+}
+
+int qbman_result_is_FQDAN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
+}
+
+int qbman_result_is_CDAN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
+}
+
+int qbman_result_is_CSCN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
+ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
+}
+
+int qbman_result_is_BPSCN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
+}
+
+int qbman_result_is_CGCU(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
+}
+
+int qbman_result_is_FQRN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
+}
+
+int qbman_result_is_FQRNI(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
+}
+
+int qbman_result_is_FQPN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
+}
+
+/*********************************/
+/* Parsing frame dequeue results */
+/*********************************/
+
+/* These APIs assume qbman_result_is_DQ() is TRUE */
+
+uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
+{
+ return dq->dq.stat;
+}
+
+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
+{
+ return dq->dq.seqnum;
+}
+
+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
+{
+ return dq->dq.oprid;
+}
+
+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
+{
+ return dq->dq.fqid;
+}
+
+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
+{
+ return dq->dq.fq_byte_cnt;
+}
+
+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
+{
+ return dq->dq.fq_frm_cnt;
+}
+
+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
+{
+ return dq->dq.fqd_ctx;
+}
+
+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
+{
+ return (const struct qbman_fd *)&dq->dq.fd[0];
+}
+
+/**************************************/
+/* Parsing state-change notifications */
+/**************************************/
+uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
+{
+ return scn->scn.state;
+}
+
+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
+{
+ return scn->scn.rid_tok;
+}
+
+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
+{
+ return scn->scn.ctx;
+}
+
+/*****************/
+/* Parsing BPSCN */
+/*****************/
+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
+{
+ return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
+}
+
+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
+{
+ return !(int)(qbman_result_SCN_state(scn) & 0x1);
+}
+
+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
+{
+ return (int)(qbman_result_SCN_state(scn) & 0x2);
+}
+
+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
+{
+ return (int)(qbman_result_SCN_state(scn) & 0x4);
+}
+
+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
+{
+ return qbman_result_SCN_ctx(scn);
+}
+
+/*****************/
+/* Parsing CGCU */
+/*****************/
+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
+{
+ return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
+}
+
+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
+{
+ return qbman_result_SCN_ctx(scn);
+}
+
+/******************/
+/* Buffer release */
+/******************/
+#define QB_BR_RC_VALID_SHIFT 5
+#define QB_BR_RCDI_SHIFT 6
+
+void qbman_release_desc_clear(struct qbman_release_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+ d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
+}
+
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
+{
+ d->br.bpid = bpid;
+}
+
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
+{
+ if (enable)
+ d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
+ else
+ d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
+}
+
+#define RAR_IDX(rar) ((rar) & 0x7)
+#define RAR_VB(rar) ((rar) & 0x80)
+#define RAR_SUCCESS(rar) ((rar) & 0x100)
+
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
+
+ pr_debug("RAR=%08x\n", rar);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
+
+ /* Start the release command */
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+
+ /* Copy the caller's buffer pointers to the command */
+ u64_to_le32_copy(&p[2], buffers, num_buffers);
+
+ /* Set the verb byte, have to substitute in the valid-bit and the number
+ * of buffers.
+ */
+ lwsync();
+ p[0] = cl[0] | RAR_VB(rar) | num_buffers;
+ qbman_cena_write_complete_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+
+ return 0;
+}
+
+/*******************/
+/* Buffer acquires */
+/*******************/
+struct qbman_acquire_desc {
+ uint8_t verb;
+ uint8_t reserved;
+ uint16_t bpid;
+ uint8_t num;
+ uint8_t reserved2[59];
+};
+
+struct qbman_acquire_rslt {
+ uint8_t verb;
+ uint8_t rslt;
+ uint16_t reserved;
+ uint8_t num;
+ uint8_t reserved2[3];
+ uint64_t buf[7];
+};
+
+int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
+ unsigned int num_buffers)
+{
+ struct qbman_acquire_desc *p;
+ struct qbman_acquire_rslt *r;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ p->bpid = bpid;
+ p->num = num_buffers;
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
+ if (unlikely(!r)) {
+ pr_err("qbman: acquire from BPID %d failed, no response\n",
+ bpid);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
+ bpid, r->rslt);
+ return -EIO;
+ }
+
+ QBMAN_BUG_ON(r->num > num_buffers);
+
+ /* Copy the acquired buffers to the caller's array */
+ u64_from_le32_copy(buffers, &r->buf[0], r->num);
+
+ return (int)r->num;
+}
+
+/*****************/
+/* FQ management */
+/*****************/
+struct qbman_alt_fq_state_desc {
+ uint8_t verb;
+ uint8_t reserved[3];
+ uint32_t fqid;
+ uint8_t reserved2[56];
+};
+
+struct qbman_alt_fq_state_rslt {
+ uint8_t verb;
+ uint8_t rslt;
+ uint8_t reserved[62];
+};
+
+#define ALT_FQ_FQID_MASK 0x00FFFFFF
+
+static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
+ uint8_t alt_fq_verb)
+{
+ struct qbman_alt_fq_state_desc *p;
+ struct qbman_alt_fq_state_rslt *r;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->fqid = fqid & ALT_FQ_FQID_MASK;
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, alt_fq_verb);
+ if (unlikely(!r)) {
+ pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
+ alt_fq_verb);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
+ fqid, alt_fq_verb, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
+}
+
+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
+}
+
+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
+}
+
+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
+}
+
+/**********************/
+/* Channel management */
+/**********************/
+
+struct qbman_cdan_ctrl_desc {
+ uint8_t verb;
+ uint8_t reserved;
+ uint16_t ch;
+ uint8_t we;
+ uint8_t ctrl;
+ uint16_t reserved2;
+ uint64_t cdan_ctx;
+ uint8_t reserved3[48];
+
+};
+
+struct qbman_cdan_ctrl_rslt {
+ uint8_t verb;
+ uint8_t rslt;
+ uint16_t ch;
+ uint8_t reserved[60];
+};
+
+/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
+ * would be irresponsible to expose it.
+ */
+#define CODE_CDAN_WE_EN 0x1
+#define CODE_CDAN_WE_CTX 0x4
+
+static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
+ uint8_t we_mask, uint8_t cdan_en,
+ uint64_t ctx)
+{
+ struct qbman_cdan_ctrl_desc *p;
+ struct qbman_cdan_ctrl_rslt *r;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ p->ch = channelid;
+ p->we = we_mask;
+ if (cdan_en)
+ p->ctrl = 1;
+ else
+ p->ctrl = 0;
+ p->cdan_ctx = ctx;
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
+ if (unlikely(!r)) {
+ pr_err("qbman: wqchan config failed, no response\n");
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
+ != QBMAN_WQCHAN_CONFIGURE);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("CDAN cQID %d failed: code = 0x%02x\n",
+ channelid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
+ uint64_t ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_CTX,
+ 0, ctx);
+}
+
+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 1, 0);
+}
+
+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 0, 0);
+}
+
+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
+ uint64_t ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
+ 1, ctx);
+}
+
+uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
+{
+ return QBMAN_IDX_FROM_DQRR(dqrr);
+}
+
+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
+{
+ struct qbman_result *dq;
+
+ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
+ return dq;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.h
new file mode 100644
index 00000000..dbea22a1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ */
+
+#include "qbman_sys.h"
+#include <fsl_qbman_portal.h>
+
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+
+/* All QBMan command and result structures use this "valid bit" encoding */
+#define QB_VALID_BIT ((uint32_t)0x80)
+
+/* Management command result codes */
+#define QBMAN_MC_RSLT_OK 0xf0
+
+/* QBMan DQRR size is set at runtime in qbman_portal.c */
+
+#define QBMAN_EQCR_SIZE 8
+
+static inline uint8_t qm_cyc_diff(uint8_t ringsize, uint8_t first,
+ uint8_t last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return (2 * ringsize) + last - first;
+}
+
+/* --------------------- */
+/* portal data structure */
+/* --------------------- */
+
+struct qbman_swp {
+ struct qbman_swp_desc desc;
+ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it
+ * needs in here.
+ */
+ struct qbman_swp_sys sys;
+ /* Management commands */
+ struct {
+#ifdef QBMAN_CHECKING
+ enum swp_mc_check {
+ swp_mc_can_start, /* call __qbman_swp_mc_start() */
+ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */
+ swp_mc_can_poll, /* call __qbman_swp_mc_result() */
+ } check;
+#endif
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ } mc;
+ /* Push dequeues */
+ uint32_t sdq;
+ /* Volatile dequeues */
+ struct {
+ /* VDQCR supports a "1 deep pipeline", meaning that if you know
+ * the last-submitted command is already executing in the
+ * hardware (as evidenced by at least 1 valid dequeue result),
+ * you can write another dequeue command to the register, the
+ * hardware will start executing it as soon as the
+ * already-executing command terminates. (This minimises latency
+ * and stalls.) With that in mind, this "busy" variable refers
+ * to whether or not a command can be submitted, not whether or
+ * not a previously-submitted command is still executing. In
+ * other words, once proof is seen that the previously-submitted
+ * command is executing, "vdq" is no longer "busy".
+ */
+ atomic_t busy;
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ /* We need to determine when vdq is no longer busy. This depends
+ * on whether the "busy" (last-submitted) dequeue command is
+ * targeting DQRR or main-memory, and detected is based on the
+ * presence of the dequeue command's "token" showing up in
+ * dequeue entries in DQRR or main-memory (respectively).
+ */
+ struct qbman_result *storage; /* NULL if DQRR */
+ } vdq;
+ /* DQRR */
+ struct {
+ uint32_t next_idx;
+ uint32_t valid_bit;
+ uint8_t dqrr_size;
+ int reset_bug;
+ } dqrr;
+ struct {
+ uint32_t pi;
+ uint32_t pi_vb;
+ uint32_t ci;
+ int available;
+ } eqcr;
+};
+
+/* -------------------------- */
+/* portal management commands */
+/* -------------------------- */
+
+/* Different management commands all use this common base layer of code to issue
+ * commands and poll for results. The first function returns a pointer to where
+ * the caller should fill in their MC command (though they should ignore the
+ * verb byte), the second function commits merges in the caller-supplied command
+ * verb (which should not include the valid-bit) and submits the command to
+ * hardware, and the third function checks for a completed response (returns
+ * non-NULL if only if the response is complete).
+ */
+void *qbman_swp_mc_start(struct qbman_swp *p);
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb);
+void *qbman_swp_mc_result(struct qbman_swp *p);
+
+/* Wraps up submit + poll-for-result */
+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
+ uint8_t cmd_verb)
+{
+ int loopvar = 1000;
+
+ qbman_swp_mc_submit(swp, cmd, cmd_verb);
+ do {
+ cmd = qbman_swp_mc_result(swp);
+ } while (!cmd && loopvar--);
+ QBMAN_BUG_ON(!loopvar);
+
+ return cmd;
+}
+
+/* ---------------------- */
+/* Descriptors/cachelines */
+/* ---------------------- */
+
+/* To avoid needless dynamic allocation, the driver API often gives the caller
+ * a "descriptor" type that the caller can instantiate however they like.
+ * Ultimately though, it is just a cacheline of binary storage (or something
+ * smaller when it is known that the descriptor doesn't need all 64 bytes) for
+ * holding pre-formatted pieces of hardware commands. The performance-critical
+ * code can then copy these descriptors directly into hardware command
+ * registers more efficiently than trying to construct/format commands
+ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in
+ * order for the compiler to know its size, but the internal details are not
+ * exposed. The following macro is used within the driver for converting *any*
+ * descriptor pointer to a usable array pointer. The use of a macro (instead of
+ * an inline) is necessary to work with different descriptor types and to work
+ * correctly with const and non-const inputs (and similarly-qualified outputs).
+ */
+#define qb_cl(d) (&(d)->donot_manipulate_directly[0])
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys.h
new file mode 100644
index 00000000..2bd33ea5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys.h
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ */
+/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the
+ * driver. They are only included via qbman_private.h, which is itself a
+ * platform-independent file and is included by all the other driver source.
+ *
+ * qbman_sys_decl.h is included prior to all other declarations and logic, and
+ * it exists to provide compatibility with any linux interfaces our
+ * single-source driver code is dependent on (eg. kmalloc). Ie. this file
+ * provides linux compatibility.
+ *
+ * This qbman_sys.h header, on the other hand, is included *after* any common
+ * and platform-neutral declarations and logic in qbman_private.h, and exists to
+ * implement any platform-specific logic of the qbman driver itself. Ie. it is
+ * *not* to provide linux compatibility.
+ */
+
+#include "qbman_sys_decl.h"
+
+#define CENA_WRITE_ENABLE 0
+#define CINH_WRITE_ENABLE 1
+
+/* Debugging assists */
+static inline void __hexdump(unsigned long start, unsigned long end,
+ unsigned long p, size_t sz, const unsigned char *c)
+{
+ while (start < end) {
+ unsigned int pos = 0;
+ char buf[64];
+ int nl = 0;
+
+ pos += sprintf(buf + pos, "%08lx: ", start);
+ do {
+ if ((start < p) || (start >= (p + sz)))
+ pos += sprintf(buf + pos, "..");
+ else
+ pos += sprintf(buf + pos, "%02x", *(c++));
+ if (!(++start & 15)) {
+ buf[pos++] = '\n';
+ nl = 1;
+ } else {
+ nl = 0;
+ if (!(start & 1))
+ buf[pos++] = ' ';
+ if (!(start & 3))
+ buf[pos++] = ' ';
+ }
+ } while (start & 15);
+ if (!nl)
+ buf[pos++] = '\n';
+ buf[pos] = '\0';
+ pr_info("%s", buf);
+ }
+}
+
+static inline void hexdump(const void *ptr, size_t sz)
+{
+ unsigned long p = (unsigned long)ptr;
+ unsigned long start = p & ~15;
+ unsigned long end = (p + sz + 15) & ~15;
+ const unsigned char *c = ptr;
+
+ __hexdump(start, end, p, sz, c);
+}
+
+/* Currently, the CENA support code expects each 32-bit word to be written in
+ * host order, and these are converted to hardware (little-endian) order on
+ * command submission. However, 64-bit quantities are must be written (and read)
+ * as two 32-bit words with the least-significant word first, irrespective of
+ * host endianness.
+ */
+static inline void u64_to_le32_copy(void *d, const uint64_t *s,
+ unsigned int cnt)
+{
+ uint32_t *dd = d;
+ const uint32_t *ss = (const uint32_t *)s;
+
+ while (cnt--) {
+ /* TBD: the toolchain was choking on the use of 64-bit types up
+ * until recently so this works entirely with 32-bit variables.
+ * When 64-bit types become usable again, investigate better
+ * ways of doing this.
+ */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ *(dd++) = ss[1];
+ *(dd++) = ss[0];
+ ss += 2;
+#else
+ *(dd++) = *(ss++);
+ *(dd++) = *(ss++);
+#endif
+ }
+}
+
+static inline void u64_from_le32_copy(uint64_t *d, const void *s,
+ unsigned int cnt)
+{
+ const uint32_t *ss = s;
+ uint32_t *dd = (uint32_t *)d;
+
+ while (cnt--) {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ dd[1] = *(ss++);
+ dd[0] = *(ss++);
+ dd += 2;
+#else
+ *(dd++) = *(ss++);
+ *(dd++) = *(ss++);
+#endif
+ }
+}
+
+ /******************/
+ /* Portal access */
+ /******************/
+struct qbman_swp_sys {
+ /* On GPP, the sys support for qbman_swp is here. The CENA region isi
+ * not an mmap() of the real portal registers, but an allocated
+ * place-holder, because the actual writes/reads to/from the portal are
+ * marshalled from these allocated areas using QBMan's "MC access
+ * registers". CINH accesses are atomic so there's no need for a
+ * place-holder.
+ */
+ uint8_t *cena;
+ uint8_t __iomem *addr_cena;
+ uint8_t __iomem *addr_cinh;
+ uint32_t idx;
+ enum qbman_eqcr_mode eqcr_mode;
+};
+
+/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal
+ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH)
+ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index
+ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal)
+ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE)
+ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete)
+ */
+
+static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset,
+ uint32_t val)
+{
+ __raw_writel(val, s->addr_cinh + offset);
+#ifdef QBMAN_CINH_TRACE
+ pr_info("qbman_cinh_write(%p:%d:0x%03x) 0x%08x\n",
+ s->addr_cinh, s->idx, offset, val);
+#endif
+}
+
+static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset)
+{
+ uint32_t reg = __raw_readl(s->addr_cinh + offset);
+#ifdef QBMAN_CINH_TRACE
+ pr_info("qbman_cinh_read(%p:%d:0x%03x) 0x%08x\n",
+ s->addr_cinh, s->idx, offset, reg);
+#endif
+ return reg;
+}
+
+static inline void *qbman_cena_write_start(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ void *shadow = s->cena + offset;
+
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_start(%p:%d:0x%03x) %p\n",
+ s->addr_cena, s->idx, offset, shadow);
+#endif
+ QBMAN_BUG_ON(offset & 63);
+ dcbz(shadow);
+ return shadow;
+}
+
+static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
+ s->addr_cena, s->idx, offset);
+#endif
+ QBMAN_BUG_ON(offset & 63);
+#ifdef RTE_ARCH_64
+ return (s->addr_cena + offset);
+#else
+ return (s->addr_cinh + offset);
+#endif
+}
+
+static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
+ uint32_t offset, void *cmd)
+{
+ const uint32_t *shadow = cmd;
+ int loop;
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_complete(%p:%d:0x%03x) %p\n",
+ s->addr_cena, s->idx, offset, shadow);
+ hexdump(cmd, 64);
+#endif
+#ifdef RTE_ARCH_64
+ for (loop = 15; loop >= 1; loop--)
+ __raw_writel(shadow[loop], s->addr_cena +
+ offset + loop * 4);
+ lwsync();
+ __raw_writel(shadow[0], s->addr_cena + offset);
+#else
+ for (loop = 15; loop >= 1; loop--)
+ __raw_writel(shadow[loop], s->addr_cinh +
+ offset + loop * 4);
+ lwsync();
+ __raw_writel(shadow[0], s->addr_cinh + offset);
+#endif
+ dcbf(s->addr_cena + offset);
+}
+
+static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_complete(%p:%d:0x%03x)\n",
+ s->addr_cena, s->idx, offset);
+#endif
+ dcbf(s->addr_cena + offset);
+}
+
+static inline uint32_t qbman_cena_read_reg(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ return __raw_readl(s->addr_cena + offset);
+}
+
+static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
+{
+ uint32_t *shadow = (uint32_t *)(s->cena + offset);
+ unsigned int loop;
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
+ s->addr_cena, s->idx, offset, shadow);
+#endif
+
+#ifdef RTE_ARCH_64
+ for (loop = 0; loop < 16; loop++)
+ shadow[loop] = __raw_readl(s->addr_cena + offset
+ + loop * 4);
+#else
+ for (loop = 0; loop < 16; loop++)
+ shadow[loop] = __raw_readl(s->addr_cinh + offset
+ + loop * 4);
+#endif
+#ifdef QBMAN_CENA_TRACE
+ hexdump(shadow, 64);
+#endif
+ return shadow;
+}
+
+static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_read(%p:%d:0x%03x)\n",
+ s->addr_cena, s->idx, offset);
+#endif
+ return s->addr_cena + offset;
+}
+
+static inline void qbman_cena_invalidate(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ dccivac(s->addr_cena + offset);
+}
+
+static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ dccivac(s->addr_cena + offset);
+ prefetch_for_load(s->addr_cena + offset);
+}
+
+static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ prefetch_for_load(s->addr_cena + offset);
+}
+
+ /******************/
+ /* Portal support */
+ /******************/
+
+/* The SWP_CFG portal register is special, in that it is used by the
+ * platform-specific code rather than the platform-independent code in
+ * qbman_portal.c. So use of it is declared locally here.
+ */
+#define QBMAN_CINH_SWP_CFG 0xd00
+#define QBMAN_CINH_SWP_CFG 0xd00
+#define SWP_CFG_DQRR_MF_SHIFT 20
+#define SWP_CFG_EST_SHIFT 16
+#define SWP_CFG_WN_SHIFT 14
+#define SWP_CFG_RPM_SHIFT 12
+#define SWP_CFG_DCM_SHIFT 10
+#define SWP_CFG_EPM_SHIFT 8
+#define SWP_CFG_SD_SHIFT 5
+#define SWP_CFG_SP_SHIFT 4
+#define SWP_CFG_SE_SHIFT 3
+#define SWP_CFG_DP_SHIFT 2
+#define SWP_CFG_DE_SHIFT 1
+#define SWP_CFG_EP_SHIFT 0
+
+static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
+ uint8_t est, uint8_t rpm, uint8_t dcm,
+ uint8_t epm, int sd, int sp, int se,
+ int dp, int de, int ep)
+{
+ uint32_t reg;
+
+ reg = (max_fill << SWP_CFG_DQRR_MF_SHIFT |
+ est << SWP_CFG_EST_SHIFT |
+ wn << SWP_CFG_WN_SHIFT |
+ rpm << SWP_CFG_RPM_SHIFT |
+ dcm << SWP_CFG_DCM_SHIFT |
+ epm << SWP_CFG_EPM_SHIFT |
+ sd << SWP_CFG_SD_SHIFT |
+ sp << SWP_CFG_SP_SHIFT |
+ se << SWP_CFG_SE_SHIFT |
+ dp << SWP_CFG_DP_SHIFT |
+ de << SWP_CFG_DE_SHIFT |
+ ep << SWP_CFG_EP_SHIFT);
+
+ return reg;
+}
+
+static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
+ const struct qbman_swp_desc *d,
+ uint8_t dqrr_size)
+{
+ uint32_t reg;
+#ifdef RTE_ARCH_64
+ uint8_t wn = CENA_WRITE_ENABLE;
+#else
+ uint8_t wn = CINH_WRITE_ENABLE;
+#endif
+
+ s->addr_cena = d->cena_bar;
+ s->addr_cinh = d->cinh_bar;
+ s->idx = (uint32_t)d->idx;
+ s->cena = malloc(4096);
+ if (!s->cena) {
+ pr_err("Could not allocate page for cena shadow\n");
+ return -1;
+ }
+ s->eqcr_mode = d->eqcr_mode;
+ QBMAN_BUG_ON(d->idx < 0);
+#ifdef QBMAN_CHECKING
+ /* We should never be asked to initialise for a portal that isn't in
+ * the power-on state. (Ie. don't forget to reset portals when they are
+ * decommissioned!)
+ */
+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
+ QBMAN_BUG_ON(reg);
+#endif
+ if (s->eqcr_mode == qman_eqcr_vb_array)
+ reg = qbman_set_swp_cfg(dqrr_size, wn, 0, 3, 2, 3, 1, 1, 1, 1,
+ 1, 1);
+ else
+ reg = qbman_set_swp_cfg(dqrr_size, wn, 1, 3, 2, 2, 1, 1, 1, 1,
+ 1, 1);
+ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
+ if (!reg) {
+ pr_err("The portal %d is not enabled!\n", s->idx);
+ free(s->cena);
+ return -1;
+ }
+ return 0;
+}
+
+static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
+{
+ free(s->cena);
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys_decl.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys_decl.h
new file mode 100644
index 00000000..fa6977fe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys_decl.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ */
+#include <compat.h>
+#include <fsl_qbman_base.h>
+
+/* Sanity check */
+#if (__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) && \
+ (__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__)
+#error "Unknown endianness!"
+#endif
+
+ /****************/
+ /* arch assists */
+ /****************/
+#if defined(RTE_ARCH_ARM64)
+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
+#define lwsync() { asm volatile("dmb st" : : : "memory"); }
+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
+#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
+static inline void prefetch_for_load(void *p)
+{
+ asm volatile("prfm pldl1keep, [%0, #0]" : : "r" (p));
+}
+
+static inline void prefetch_for_store(void *p)
+{
+ asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p));
+}
+#elif defined(RTE_ARCH_ARM)
+#define dcbz(p) memset(p, 0, 64)
+#define lwsync() { asm volatile("dmb st" : : : "memory"); }
+#define dcbf(p) RTE_SET_USED(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define prefetch_for_load(p) { asm volatile ("pld [%0]" : : "r" (p)); }
+#define prefetch_for_store(p) { asm volatile ("pld [%0]" : : "r" (p)); }
+
+#else
+#define dcbz(p) RTE_SET_USED(p)
+#define lwsync()
+#define dcbf(p) RTE_SET_USED(p)
+#define dccivac(p) RTE_SET_USED(p)
+static inline void prefetch_for_load(void *p)
+{
+ RTE_SET_USED(p);
+}
+static inline void prefetch_for_store(void *p)
+{
+ RTE_SET_USED(p);
+}
+#endif
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/rte_bus_fslmc_version.map b/src/spdk/dpdk/drivers/bus/fslmc/rte_bus_fslmc_version.map
new file mode 100644
index 00000000..fe45a113
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -0,0 +1,118 @@
+DPDK_17.05 {
+ global:
+
+ dpaa2_affine_qbman_swp;
+ dpaa2_alloc_dpbp_dev;
+ dpaa2_alloc_dq_storage;
+ dpaa2_free_dpbp_dev;
+ dpaa2_free_dq_storage;
+ dpbp_disable;
+ dpbp_enable;
+ dpbp_get_attributes;
+ dpbp_get_num_free_bufs;
+ dpbp_open;
+ dpbp_reset;
+ dpio_close;
+ dpio_disable;
+ dpio_enable;
+ dpio_get_attributes;
+ dpio_open;
+ dpio_reset;
+ dpio_set_stashing_destination;
+ mc_send_command;
+ per_lcore__dpaa2_io;
+ qbman_check_command_complete;
+ qbman_eq_desc_clear;
+ qbman_eq_desc_set_fq;
+ qbman_eq_desc_set_no_orp;
+ qbman_eq_desc_set_qd;
+ qbman_eq_desc_set_response;
+ qbman_pull_desc_clear;
+ qbman_pull_desc_set_fq;
+ qbman_pull_desc_set_numframes;
+ qbman_pull_desc_set_storage;
+ qbman_release_desc_clear;
+ qbman_release_desc_set_bpid;
+ qbman_result_DQ_fd;
+ qbman_result_DQ_flags;
+ qbman_result_has_new_result;
+ qbman_swp_acquire;
+ qbman_swp_pull;
+ qbman_swp_release;
+ rte_fslmc_driver_register;
+ rte_fslmc_driver_unregister;
+ rte_fslmc_vfio_dmamap;
+ rte_mcp_ptr_list;
+
+ local: *;
+};
+
+DPDK_17.08 {
+ global:
+
+ dpaa2_io_portal;
+ dpaa2_get_qbman_swp;
+ dpci_set_rx_queue;
+ dpcon_open;
+ dpcon_get_attributes;
+ dpio_add_static_dequeue_channel;
+ dpio_remove_static_dequeue_channel;
+ mc_get_soc_version;
+ mc_get_version;
+ qbman_check_new_result;
+ qbman_eq_desc_set_dca;
+ qbman_get_dqrr_from_idx;
+ qbman_get_dqrr_idx;
+ qbman_result_DQ_fqd_ctx;
+ qbman_result_SCN_state;
+ qbman_swp_dqrr_consume;
+ qbman_swp_dqrr_next;
+ qbman_swp_enqueue_multiple;
+ qbman_swp_enqueue_multiple_desc;
+ qbman_swp_interrupt_clear_status;
+ qbman_swp_push_set;
+ rte_dpaa2_alloc_dpci_dev;
+ rte_fslmc_object_register;
+ rte_global_active_dqs_list;
+
+} DPDK_17.05;
+
+DPDK_17.11 {
+ global:
+
+ dpaa2_dpbp_supported;
+ rte_dpaa2_dev_type;
+ rte_dpaa2_intr_disable;
+ rte_dpaa2_intr_enable;
+
+} DPDK_17.08;
+
+DPDK_18.02 {
+ global:
+
+ dpaa2_svr_family;
+ dpaa2_virt_mode;
+ per_lcore_dpaa2_held_bufs;
+ qbman_fq_query_state;
+ qbman_fq_state_frame_count;
+ qbman_swp_dqrr_idx_consume;
+ qbman_swp_prefetch_dqrr_next;
+ rte_fslmc_get_device_count;
+
+} DPDK_17.11;
+
+DPDK_18.05 {
+ global:
+
+ dpaa2_affine_qbman_ethrx_swp;
+ dpdmai_close;
+ dpdmai_disable;
+ dpdmai_enable;
+ dpdmai_get_attributes;
+ dpdmai_get_rx_queue;
+ dpdmai_get_tx_queue;
+ dpdmai_open;
+ dpdmai_set_rx_queue;
+ rte_dpaa2_free_dpci_dev;
+
+} DPDK_18.02;
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/rte_fslmc.h b/src/spdk/dpdk/drivers/bus/fslmc/rte_fslmc.h
new file mode 100644
index 00000000..cea5b78f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/rte_fslmc.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _RTE_FSLMC_H_
+#define _RTE_FSLMC_H_
+
+/**
+ * @file
+ *
+ * RTE FSLMC Bus Interface
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <linux/vfio.h>
+
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_dev.h>
+#include <rte_bus.h>
+#include <rte_tailq.h>
+#include <rte_devargs.h>
+
+#include <fslmc_vfio.h>
+
+#define FSLMC_OBJECT_MAX_LEN 32 /**< Length of each device on bus */
+
+
+/** Device driver supports link state interrupt */
+#define RTE_DPAA2_DRV_INTR_LSC 0x0008
+
+/** Device driver supports IOVA as VA */
+#define RTE_DPAA2_DRV_IOVA_AS_VA 0X0040
+
+struct rte_dpaa2_driver;
+
+/* DPAA2 Device and Driver lists for FSLMC bus */
+TAILQ_HEAD(rte_fslmc_device_list, rte_dpaa2_device);
+TAILQ_HEAD(rte_fslmc_driver_list, rte_dpaa2_driver);
+
+#define RTE_DEV_TO_FSLMC_CONST(ptr) \
+ container_of(ptr, const struct rte_dpaa2_device, device)
+
+extern struct rte_fslmc_bus rte_fslmc_bus;
+
+enum rte_dpaa2_dev_type {
+ /* Devices backed by DPDK driver */
+ DPAA2_ETH, /**< DPNI type device*/
+ DPAA2_CRYPTO, /**< DPSECI type device */
+ DPAA2_CON, /**< DPCONC type device */
+ /* Devices not backed by a DPDK driver: DPIO, DPBP, DPCI, DPMCP */
+ DPAA2_BPOOL, /**< DPBP type device */
+ DPAA2_IO, /**< DPIO type device */
+ DPAA2_CI, /**< DPCI type device */
+ DPAA2_MPORTAL, /**< DPMCP type device */
+ DPAA2_QDMA, /**< DPDMAI type device */
+ /* Unknown device placeholder */
+ DPAA2_UNKNOWN,
+ DPAA2_DEVTYPE_MAX,
+};
+
+TAILQ_HEAD(rte_dpaa2_object_list, rte_dpaa2_object);
+
+typedef int (*rte_dpaa2_obj_create_t)(int vdev_fd,
+ struct vfio_device_info *obj_info,
+ int object_id);
+
+/**
+ * A structure describing a DPAA2 object.
+ */
+struct rte_dpaa2_object {
+ TAILQ_ENTRY(rte_dpaa2_object) next; /**< Next in list. */
+ const char *name; /**< Name of Object. */
+ enum rte_dpaa2_dev_type dev_type; /**< Type of device */
+ rte_dpaa2_obj_create_t create;
+};
+
+/**
+ * A structure describing a DPAA2 device.
+ */
+struct rte_dpaa2_device {
+ TAILQ_ENTRY(rte_dpaa2_device) next; /**< Next probed DPAA2 device. */
+ struct rte_device device; /**< Inherit core device */
+ union {
+ struct rte_eth_dev *eth_dev; /**< ethernet device */
+ struct rte_cryptodev *cryptodev; /**< Crypto Device */
+ struct rte_rawdev *rawdev; /**< Raw Device */
+ };
+ enum rte_dpaa2_dev_type dev_type; /**< Device Type */
+ uint16_t object_id; /**< DPAA2 Object ID */
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_dpaa2_driver *driver; /**< Associated driver */
+ char name[FSLMC_OBJECT_MAX_LEN]; /**< DPAA2 Object name*/
+};
+
+typedef int (*rte_dpaa2_probe_t)(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev);
+typedef int (*rte_dpaa2_remove_t)(struct rte_dpaa2_device *dpaa2_dev);
+
+/**
+ * A structure describing a DPAA2 driver.
+ */
+struct rte_dpaa2_driver {
+ TAILQ_ENTRY(rte_dpaa2_driver) next; /**< Next in list. */
+ struct rte_driver driver; /**< Inherit core driver. */
+ struct rte_fslmc_bus *fslmc_bus; /**< FSLMC bus reference */
+ uint32_t drv_flags; /**< Flags for controlling device.*/
+ enum rte_dpaa2_dev_type drv_type; /**< Driver Type */
+ rte_dpaa2_probe_t probe;
+ rte_dpaa2_remove_t remove;
+};
+
+/*
+ * FSLMC bus
+ */
+struct rte_fslmc_bus {
+ struct rte_bus bus; /**< Generic Bus object */
+ struct rte_fslmc_device_list device_list;
+ /**< FSLMC DPAA2 Device list */
+ struct rte_fslmc_driver_list driver_list;
+ /**< FSLMC DPAA2 Driver list */
+ int device_count[DPAA2_DEVTYPE_MAX];
+ /**< Count of all devices scanned */
+};
+
+#define DPAA2_PORTAL_DEQUEUE_DEPTH 32
+
+/* Create storage for dqrr entries per lcore */
+struct dpaa2_portal_dqrr {
+ struct rte_mbuf *mbuf[DPAA2_PORTAL_DEQUEUE_DEPTH];
+ uint64_t dqrr_held;
+ uint8_t dqrr_size;
+};
+
+RTE_DECLARE_PER_LCORE(struct dpaa2_portal_dqrr, dpaa2_held_bufs);
+
+#define DPAA2_PER_LCORE_DQRR_SIZE \
+ RTE_PER_LCORE(dpaa2_held_bufs).dqrr_size
+#define DPAA2_PER_LCORE_DQRR_HELD \
+ RTE_PER_LCORE(dpaa2_held_bufs).dqrr_held
+#define DPAA2_PER_LCORE_DQRR_MBUF(i) \
+ RTE_PER_LCORE(dpaa2_held_bufs).mbuf[i]
+
+/**
+ * Register a DPAA2 driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa2_driver structure describing the driver
+ * to be registered.
+ */
+void rte_fslmc_driver_register(struct rte_dpaa2_driver *driver);
+
+/**
+ * Unregister a DPAA2 driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa2_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver);
+
+/** Helper for DPAA2 device registration from driver (eth, crypto) instance */
+#define RTE_PMD_REGISTER_DPAA2(nm, dpaa2_drv) \
+RTE_INIT(dpaa2initfn_ ##nm) \
+{\
+ (dpaa2_drv).driver.name = RTE_STR(nm);\
+ rte_fslmc_driver_register(&dpaa2_drv); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+/**
+ * Register a DPAA2 MC Object driver.
+ *
+ * @param mc_object
+ * A pointer to a rte_dpaa_object structure describing the mc object
+ * to be registered.
+ */
+void rte_fslmc_object_register(struct rte_dpaa2_object *object);
+
+/**
+ * Count of a particular type of DPAA2 device scanned on the bus.
+ *
+ * @param dev_type
+ * Type of device as rte_dpaa2_dev_type enumerator
+ * @return
+ * >=0 for count; 0 indicates either no device of the said type scanned or
+ * invalid device type.
+ */
+uint32_t rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type);
+
+/** Helper for DPAA2 object registration */
+#define RTE_PMD_REGISTER_DPAA2_OBJECT(nm, dpaa2_obj) \
+RTE_INIT(dpaa2objinitfn_ ##nm) \
+{\
+ (dpaa2_obj).name = RTE_STR(nm);\
+ rte_fslmc_object_register(&dpaa2_obj); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FSLMC_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/Makefile b/src/spdk/dpdk/drivers/bus/ifpga/Makefile
new file mode 100644
index 00000000..3ff3bdb8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_bus_ifpga.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_kvargs
+
+# versioning export map
+EXPORT_MAP := rte_bus_ifpga_version.map
+
+# library version
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_bus.c
+SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_common.c
+
+#
+# Export include files
+#
+SYMLINK-$(CONFIG_RTE_LIBRTE_IFPGA_BUS)-include += rte_bus_ifpga.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/ifpga_bus.c b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_bus.c
new file mode 100644
index 00000000..b324872e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_bus.c
@@ -0,0 +1,467 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <rte_errno.h>
+#include <rte_bus.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+#include <rte_string_fns.h>
+
+#include "rte_rawdev.h"
+#include "rte_rawdev_pmd.h"
+#include "rte_bus_ifpga.h"
+#include "ifpga_logs.h"
+#include "ifpga_common.h"
+
+int ifpga_bus_logtype;
+
+/* Forward declaration to access Intel FPGA bus
+ * on which iFPGA devices are connected
+ */
+static struct rte_bus rte_ifpga_bus;
+
+static struct ifpga_afu_dev_list ifpga_afu_dev_list =
+ TAILQ_HEAD_INITIALIZER(ifpga_afu_dev_list);
+static struct ifpga_afu_drv_list ifpga_afu_drv_list =
+ TAILQ_HEAD_INITIALIZER(ifpga_afu_drv_list);
+
+
+/* register a ifpga bus based driver */
+void rte_ifpga_driver_register(struct rte_afu_driver *driver)
+{
+ RTE_VERIFY(driver);
+
+ TAILQ_INSERT_TAIL(&ifpga_afu_drv_list, driver, next);
+}
+
+/* un-register a fpga bus based driver */
+void rte_ifpga_driver_unregister(struct rte_afu_driver *driver)
+{
+ TAILQ_REMOVE(&ifpga_afu_drv_list, driver, next);
+}
+
+static struct rte_afu_device *
+ifpga_find_afu_dev(const struct rte_rawdev *rdev,
+ const struct rte_afu_id *afu_id)
+{
+ struct rte_afu_device *afu_dev = NULL;
+
+ TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+ if (afu_dev &&
+ afu_dev->rawdev == rdev &&
+ !ifpga_afu_id_cmp(&afu_dev->id, afu_id))
+ return afu_dev;
+ }
+ return NULL;
+}
+
+static const char * const valid_args[] = {
+#define IFPGA_ARG_NAME "ifpga"
+ IFPGA_ARG_NAME,
+#define IFPGA_ARG_PORT "port"
+ IFPGA_ARG_PORT,
+#define IFPGA_AFU_BTS "afu_bts"
+ IFPGA_AFU_BTS,
+ NULL
+};
+
+/*
+ * Scan the content of the FPGA bus, and the devices in the devices
+ * list
+ */
+static struct rte_afu_device *
+ifpga_scan_one(struct rte_rawdev *rawdev,
+ struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist = NULL;
+ struct rte_afu_device *afu_dev = NULL;
+ struct rte_afu_pr_conf afu_pr_conf;
+ int ret = 0;
+ char *path = NULL;
+
+ memset(&afu_pr_conf, 0, sizeof(struct rte_afu_pr_conf));
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_BUS_ERR("error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_PORT) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_PORT,
+ &rte_ifpga_get_integer32_arg, &afu_pr_conf.afu_id.port) < 0) {
+ IFPGA_BUS_ERR("error to parse %s",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+ } else {
+ IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_AFU_BTS) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_AFU_BTS,
+ &rte_ifpga_get_string_arg, &path) < 0) {
+ IFPGA_BUS_ERR("Failed to parse %s",
+ IFPGA_AFU_BTS);
+ goto end;
+ }
+ } else {
+ IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_AFU_BTS);
+ goto end;
+ }
+
+ afu_pr_conf.afu_id.uuid.uuid_low = 0;
+ afu_pr_conf.afu_id.uuid.uuid_high = 0;
+ afu_pr_conf.pr_enable = path?1:0;
+
+ if (ifpga_find_afu_dev(rawdev, &afu_pr_conf.afu_id))
+ goto end;
+
+ afu_dev = calloc(1, sizeof(*afu_dev));
+ if (!afu_dev)
+ goto end;
+
+ afu_dev->device.devargs = devargs;
+ afu_dev->device.numa_node = SOCKET_ID_ANY;
+ afu_dev->device.name = devargs->name;
+ afu_dev->rawdev = rawdev;
+ afu_dev->id.uuid.uuid_low = 0;
+ afu_dev->id.uuid.uuid_high = 0;
+ afu_dev->id.port = afu_pr_conf.afu_id.port;
+
+ if (rawdev->dev_ops && rawdev->dev_ops->dev_info_get)
+ rawdev->dev_ops->dev_info_get(rawdev, afu_dev);
+
+ if (rawdev->dev_ops &&
+ rawdev->dev_ops->dev_start &&
+ rawdev->dev_ops->dev_start(rawdev))
+ goto end;
+
+ strlcpy(afu_pr_conf.bs_path, path, sizeof(afu_pr_conf.bs_path));
+ if (rawdev->dev_ops &&
+ rawdev->dev_ops->firmware_load &&
+ rawdev->dev_ops->firmware_load(rawdev,
+ &afu_pr_conf)){
+ IFPGA_BUS_ERR("firmware load error %d\n", ret);
+ goto end;
+ }
+ afu_dev->id.uuid.uuid_low = afu_pr_conf.afu_id.uuid.uuid_low;
+ afu_dev->id.uuid.uuid_high = afu_pr_conf.afu_id.uuid.uuid_high;
+
+ rte_kvargs_free(kvlist);
+ free(path);
+ return afu_dev;
+
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (path)
+ free(path);
+ if (afu_dev)
+ free(afu_dev);
+
+ return NULL;
+}
+
+/*
+ * Scan the content of the FPGA bus, and the devices in the devices
+ * list
+ */
+static int
+ifpga_scan(void)
+{
+ struct rte_devargs *devargs;
+ struct rte_kvargs *kvlist = NULL;
+ struct rte_rawdev *rawdev = NULL;
+ char *name = NULL;
+ char name1[RTE_RAWDEV_NAME_MAX_LEN];
+ struct rte_afu_device *afu_dev = NULL;
+
+ /* for FPGA devices we scan the devargs_list populated via cmdline */
+ RTE_EAL_DEVARGS_FOREACH(IFPGA_ARG_NAME, devargs) {
+ if (devargs->bus != &rte_ifpga_bus)
+ continue;
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_BUS_ERR("error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_NAME) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_NAME,
+ &rte_ifpga_get_string_arg, &name) < 0) {
+ IFPGA_BUS_ERR("error to parse %s",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+ } else {
+ IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+
+ memset(name1, 0, sizeof(name1));
+ snprintf(name1, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", name);
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name1);
+ if (!rawdev)
+ goto end;
+
+ afu_dev = ifpga_scan_one(rawdev, devargs);
+ if (afu_dev != NULL)
+ TAILQ_INSERT_TAIL(&ifpga_afu_dev_list, afu_dev, next);
+ }
+
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (name)
+ free(name);
+
+ return 0;
+}
+
+/*
+ * Match the AFU Driver and AFU Device using the ID Table
+ */
+static int
+rte_afu_match(const struct rte_afu_driver *afu_drv,
+ const struct rte_afu_device *afu_dev)
+{
+ const struct rte_afu_uuid *id_table;
+
+ for (id_table = afu_drv->id_table;
+ ((id_table->uuid_low != 0) && (id_table->uuid_high != 0));
+ id_table++) {
+ /* check if device's identifiers match the driver's ones */
+ if ((id_table->uuid_low != afu_dev->id.uuid.uuid_low) ||
+ id_table->uuid_high !=
+ afu_dev->id.uuid.uuid_high)
+ continue;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+ifpga_probe_one_driver(struct rte_afu_driver *drv,
+ struct rte_afu_device *afu_dev)
+{
+ int ret;
+
+ if (!rte_afu_match(drv, afu_dev))
+ /* Match of device and driver failed */
+ return 1;
+
+ /* reference driver structure */
+ afu_dev->driver = drv;
+ afu_dev->device.driver = &drv->driver;
+
+ /* call the driver probe() function */
+ ret = drv->probe(afu_dev);
+ if (ret) {
+ afu_dev->driver = NULL;
+ afu_dev->device.driver = NULL;
+ }
+
+ return ret;
+}
+
+static int
+ifpga_probe_all_drivers(struct rte_afu_device *afu_dev)
+{
+ struct rte_afu_driver *drv = NULL;
+ int ret = 0;
+
+ if (afu_dev == NULL)
+ return -1;
+
+ /* Check if a driver is already loaded */
+ if (afu_dev->driver != NULL)
+ return 0;
+
+ TAILQ_FOREACH(drv, &ifpga_afu_drv_list, next) {
+ if (ifpga_probe_one_driver(drv, afu_dev)) {
+ ret = -1;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Scan the content of the Intel FPGA bus, and call the probe() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+static int
+ifpga_probe(void)
+{
+ struct rte_afu_device *afu_dev = NULL;
+ int ret = 0;
+
+ TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+ if (afu_dev->device.driver)
+ continue;
+
+ ret = ifpga_probe_all_drivers(afu_dev);
+ if (ret < 0)
+ IFPGA_BUS_ERR("failed to initialize %s device\n",
+ rte_ifpga_device_name(afu_dev));
+ }
+
+ return ret;
+}
+
+static int
+ifpga_plug(struct rte_device *dev)
+{
+ return ifpga_probe_all_drivers(RTE_DEV_TO_AFU(dev));
+}
+
+static int
+ifpga_remove_driver(struct rte_afu_device *afu_dev)
+{
+ const char *name;
+ const struct rte_afu_driver *driver;
+
+ name = rte_ifpga_device_name(afu_dev);
+ if (!afu_dev->device.driver) {
+ IFPGA_BUS_DEBUG("no driver attach to device %s\n", name);
+ return 1;
+ }
+
+ driver = RTE_DRV_TO_AFU_CONST(afu_dev->device.driver);
+ return driver->remove(afu_dev);
+}
+
+static int
+ifpga_unplug(struct rte_device *dev)
+{
+ struct rte_afu_device *afu_dev = NULL;
+ struct rte_devargs *devargs = NULL;
+ int ret;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ afu_dev = RTE_DEV_TO_AFU(dev);
+ if (!afu_dev)
+ return -ENOENT;
+
+ devargs = dev->devargs;
+
+ ret = ifpga_remove_driver(afu_dev);
+ if (ret)
+ return ret;
+
+ TAILQ_REMOVE(&ifpga_afu_dev_list, afu_dev, next);
+
+ rte_devargs_remove(devargs->bus->name, devargs->name);
+ free(afu_dev);
+ return 0;
+
+}
+
+static struct rte_device *
+ifpga_find_device(const struct rte_device *start,
+ rte_dev_cmp_t cmp, const void *data)
+{
+ struct rte_afu_device *afu_dev;
+
+ TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+ if (start && &afu_dev->device == start) {
+ start = NULL;
+ continue;
+ }
+ if (cmp(&afu_dev->device, data) == 0)
+ return &afu_dev->device;
+ }
+
+ return NULL;
+}
+static int
+ifpga_parse(const char *name, void *addr)
+{
+ int *out = addr;
+ struct rte_rawdev *rawdev = NULL;
+ char rawdev_name[RTE_RAWDEV_NAME_MAX_LEN];
+ char *c1 = NULL;
+ char *c2 = NULL;
+ int port = IFPGA_BUS_DEV_PORT_MAX;
+ char str_port[8];
+ int str_port_len = 0;
+ int ret;
+
+ memset(str_port, 0, 8);
+ c1 = strchr(name, '|');
+ if (c1 != NULL) {
+ str_port_len = c1 - name;
+ c2 = c1 + 1;
+ }
+
+ if (str_port_len < 8 &&
+ str_port_len > 0) {
+ memcpy(str_port, name, str_port_len);
+ ret = sscanf(str_port, "%d", &port);
+ if (ret == -1)
+ return 0;
+ }
+
+ memset(rawdev_name, 0, sizeof(rawdev_name));
+ snprintf(rawdev_name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", c2);
+ rawdev = rte_rawdev_pmd_get_named_dev(rawdev_name);
+
+ if ((port < IFPGA_BUS_DEV_PORT_MAX) &&
+ rawdev &&
+ (addr != NULL))
+ *out = port;
+
+ if ((port < IFPGA_BUS_DEV_PORT_MAX) &&
+ rawdev)
+ return 0;
+ else
+ return 1;
+}
+
+static struct rte_bus rte_ifpga_bus = {
+ .scan = ifpga_scan,
+ .probe = ifpga_probe,
+ .find_device = ifpga_find_device,
+ .plug = ifpga_plug,
+ .unplug = ifpga_unplug,
+ .parse = ifpga_parse,
+};
+
+RTE_REGISTER_BUS(IFPGA_BUS_NAME, rte_ifpga_bus);
+
+RTE_INIT(ifpga_init_log)
+{
+ ifpga_bus_logtype = rte_log_register("bus.ifpga");
+ if (ifpga_bus_logtype >= 0)
+ rte_log_set_level(ifpga_bus_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.c b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.c
new file mode 100644
index 00000000..78e2eaee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.c
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <rte_errno.h>
+#include <rte_bus.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+
+#include "rte_bus_ifpga.h"
+#include "ifpga_logs.h"
+#include "ifpga_common.h"
+
+int rte_ifpga_get_string_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(char **)extra_args = strdup(value);
+
+ if (!*(char **)extra_args)
+ return -ENOMEM;
+
+ return 0;
+}
+int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(int *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+int ifpga_get_integer64_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(uint64_t *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+int ifpga_get_unsigned_long(const char *str, int base)
+{
+ unsigned long num;
+ char *end = NULL;
+
+ errno = 0;
+
+ num = strtoul(str, &end, base);
+ if ((str[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
+ return -1;
+
+ return num;
+}
+
+int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
+ const struct rte_afu_id *afu_id1)
+{
+ if ((afu_id0->uuid.uuid_low == afu_id1->uuid.uuid_low) &&
+ (afu_id0->uuid.uuid_high == afu_id1->uuid.uuid_high) &&
+ (afu_id0->port == afu_id1->port)) {
+ return 0;
+ } else
+ return 1;
+}
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.h b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.h
new file mode 100644
index 00000000..f9254b9d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_COMMON_H_
+#define _IFPGA_COMMON_H_
+
+int rte_ifpga_get_string_arg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+int ifpga_get_integer64_arg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+int ifpga_get_unsigned_long(const char *str, int base);
+int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
+ const struct rte_afu_id *afu_id1);
+
+#endif /* _IFPGA_COMMON_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/ifpga_logs.h b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_logs.h
new file mode 100644
index 00000000..873e0a4f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_logs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_LOGS_H_
+#define _IFPGA_LOGS_H_
+
+#include <rte_log.h>
+
+extern int ifpga_bus_logtype;
+
+#define IFPGA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define IFPGA_BUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define IFPGA_BUS_FUNC_TRACE() IFPGA_BUS_LOG(DEBUG, ">>")
+
+#define IFPGA_BUS_DEBUG(fmt, args...) \
+ IFPGA_BUS_LOG(DEBUG, fmt, ## args)
+#define IFPGA_BUS_INFO(fmt, args...) \
+ IFPGA_BUS_LOG(INFO, fmt, ## args)
+#define IFPGA_BUS_ERR(fmt, args...) \
+ IFPGA_BUS_LOG(ERR, fmt, ## args)
+#define IFPGA_BUS_WARN(fmt, args...) \
+ IFPGA_BUS_LOG(WARNING, fmt, ## args)
+
+#endif /* _IFPGA_BUS_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/meson.build b/src/spdk/dpdk/drivers/bus/ifpga/meson.build
new file mode 100644
index 00000000..c9b08c86
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2018 Intel Corporation
+
+deps += ['pci', 'kvargs', 'rawdev']
+install_headers('rte_bus_ifpga.h')
+sources = files('ifpga_common.c', 'ifpga_bus.c')
+
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga.h b/src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga.h
new file mode 100644
index 00000000..51d5ae0d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _RTE_BUS_IFPGA_H_
+#define _RTE_BUS_IFPGA_H_
+
+/**
+ * @file
+ *
+ * RTE Intel FPGA Bus Interface
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_bus.h>
+#include <rte_pci.h>
+
+/** Name of Intel FPGA Bus */
+#define IFPGA_BUS_NAME ifpga
+
+/* Forward declarations */
+struct rte_afu_device;
+struct rte_afu_driver;
+
+/** Double linked list of Intel FPGA AFU device. */
+TAILQ_HEAD(ifpga_afu_dev_list, rte_afu_device);
+/** Double linked list of Intel FPGA AFU device drivers. */
+TAILQ_HEAD(ifpga_afu_drv_list, rte_afu_driver);
+
+#define IFPGA_BUS_BITSTREAM_PATH_MAX_LEN 256
+
+struct rte_afu_uuid {
+ uint64_t uuid_low;
+ uint64_t uuid_high;
+} __attribute__ ((packed));
+
+#define IFPGA_BUS_DEV_PORT_MAX 4
+
+/**
+ * A structure describing an ID for a AFU driver. Each driver provides a
+ * table of these IDs for each device that it supports.
+ */
+struct rte_afu_id {
+ struct rte_afu_uuid uuid;
+ int port; /**< port number */
+} __attribute__ ((packed));
+
+/**
+ * A structure PR (Partial Reconfiguration) configuration AFU driver.
+ */
+
+struct rte_afu_pr_conf {
+ struct rte_afu_id afu_id;
+ int pr_enable;
+ char bs_path[IFPGA_BUS_BITSTREAM_PATH_MAX_LEN];
+};
+
+#define AFU_PRI_STR_SIZE (PCI_PRI_STR_SIZE + 8)
+
+/**
+ * A structure describing a AFU device.
+ */
+struct rte_afu_device {
+ TAILQ_ENTRY(rte_afu_device) next; /**< Next in device list. */
+ struct rte_device device; /**< Inherit core device */
+ struct rte_rawdev *rawdev; /**< Point Rawdev */
+ struct rte_afu_id id; /**< AFU id within FPGA. */
+ uint32_t num_region; /**< number of regions found */
+ struct rte_mem_resource mem_resource[PCI_MAX_RESOURCE];
+ /**< AFU Memory Resource */
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_afu_driver *driver; /**< Associated driver */
+ char path[IFPGA_BUS_BITSTREAM_PATH_MAX_LEN];
+} __attribute__ ((packed));
+
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_afu_device.
+ */
+#define RTE_DEV_TO_AFU(ptr) \
+ container_of(ptr, struct rte_afu_device, device)
+
+#define RTE_DRV_TO_AFU_CONST(ptr) \
+ container_of(ptr, const struct rte_afu_driver, driver)
+
+/**
+ * Initialization function for the driver called during FPGA BUS probing.
+ */
+typedef int (afu_probe_t)(struct rte_afu_device *);
+
+/**
+ * Uninitialization function for the driver called during hotplugging.
+ */
+typedef int (afu_remove_t)(struct rte_afu_device *);
+
+/**
+ * A structure describing a AFU device.
+ */
+struct rte_afu_driver {
+ TAILQ_ENTRY(rte_afu_driver) next; /**< Next afu driver. */
+ struct rte_driver driver; /**< Inherit core driver. */
+ afu_probe_t *probe; /**< Device Probe function. */
+ afu_remove_t *remove; /**< Device Remove function. */
+ const struct rte_afu_uuid *id_table; /**< AFU uuid within FPGA. */
+};
+
+static inline const char *
+rte_ifpga_device_name(const struct rte_afu_device *afu)
+{
+ if (afu && afu->device.name)
+ return afu->device.name;
+ return NULL;
+}
+
+/**
+ * Register a ifpga afu device driver.
+ *
+ * @param driver
+ * A pointer to a rte_afu_driver structure describing the driver
+ * to be registered.
+ */
+void rte_ifpga_driver_register(struct rte_afu_driver *driver);
+
+/**
+ * Unregister a ifpga afu device driver.
+ *
+ * @param driver
+ * A pointer to a rte_afu_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_ifpga_driver_unregister(struct rte_afu_driver *driver);
+
+#define RTE_PMD_REGISTER_AFU(nm, afudrv)\
+static const char *afudrvinit_ ## nm ## _alias;\
+RTE_INIT(afudrvinitfn_ ##afudrv)\
+{\
+ (afudrv).driver.name = RTE_STR(nm);\
+ (afudrv).driver.alias = afudrvinit_ ## nm ## _alias;\
+ rte_ifpga_driver_register(&afudrv);\
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#define RTE_PMD_REGISTER_AFU_ALIAS(nm, alias)\
+static const char *afudrvinit_ ## nm ## _alias = RTE_STR(alias)
+
+#endif /* _RTE_BUS_IFPGA_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga_version.map b/src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga_version.map
new file mode 100644
index 00000000..a0279797
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga_version.map
@@ -0,0 +1,10 @@
+DPDK_18.05 {
+ global:
+
+ rte_ifpga_get_integer32_arg;
+ rte_ifpga_get_string_arg;
+ rte_ifpga_driver_register;
+ rte_ifpga_driver_unregister;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/bus/meson.build b/src/spdk/dpdk/drivers/bus/meson.build
new file mode 100644
index 00000000..80de2d91
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['dpaa', 'fslmc', 'ifpga', 'pci', 'vdev', 'vmbus']
+std_deps = ['eal']
+config_flag_fmt = 'RTE_LIBRTE_@0@_BUS'
+driver_name_fmt = 'rte_bus_@0@'
diff --git a/src/spdk/dpdk/drivers/bus/pci/Makefile b/src/spdk/dpdk/drivers/bus/pci/Makefile
new file mode 100644
index 00000000..cf373068
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_bus_pci.a
+LIBABIVER := 1
+EXPORT_MAP := rte_bus_pci_version.map
+
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),)
+SYSTEM := linux
+endif
+ifneq ($(CONFIG_RTE_EXEC_ENV_BSDAPP),)
+SYSTEM := bsd
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/pci/$(SYSTEM)
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal
+
+# memseg walk is not part of stable API yet
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_pci
+
+include $(RTE_SDK)/drivers/bus/pci/$(SYSTEM)/Makefile
+SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) := $(addprefix $(SYSTEM)/,$(SRCS))
+SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci_common_uio.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PCI_BUS)-include += rte_bus_pci.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/pci/bsd/Makefile b/src/spdk/dpdk/drivers/bus/pci/bsd/Makefile
new file mode 100644
index 00000000..c1b54c05
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/bsd/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
+
+SRCS += pci.c
diff --git a/src/spdk/dpdk/drivers/bus/pci/bsd/pci.c b/src/spdk/dpdk/drivers/bus/pci/bsd/pci.c
new file mode 100644
index 00000000..655b34b7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/bsd/pci.c
@@ -0,0 +1,651 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <dirent.h>
+#include <limits.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <sys/pciio.h>
+#include <dev/pci/pcireg.h>
+
+#if defined(RTE_ARCH_X86)
+#include <machine/cpufunc.h>
+#endif
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_launch.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_debug.h>
+#include <rte_devargs.h>
+
+#include "eal_filesystem.h"
+#include "private.h"
+
+/**
+ * @file
+ * PCI probing under BSD
+ *
+ * This code is used to simulate a PCI probe by parsing information in
+ * sysfs. Moreover, when a registered driver matches a device, the
+ * kernel driver currently using it is unloaded and replaced by
+ * igb_uio module, which is a very minimal userland driver for Intel
+ * network card, only providing access to PCI BAR to applications, and
+ * enabling bus master.
+ */
+
+extern struct rte_pci_bus rte_pci_bus;
+
+/* Map pci device */
+int
+rte_pci_map_device(struct rte_pci_device *dev)
+{
+ int ret = -1;
+
+ /* try mapping the NIC resources */
+ switch (dev->kdrv) {
+ case RTE_KDRV_NIC_UIO:
+ /* map resources for devices that use uio */
+ ret = pci_uio_map_resource(dev);
+ break;
+ default:
+ RTE_LOG(DEBUG, EAL,
+ " Not managed by a supported kernel driver, skipped\n");
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Unmap pci device */
+void
+rte_pci_unmap_device(struct rte_pci_device *dev)
+{
+ /* try unmapping the NIC resources */
+ switch (dev->kdrv) {
+ case RTE_KDRV_NIC_UIO:
+ /* unmap resources for devices that use uio */
+ pci_uio_unmap_resource(dev);
+ break;
+ default:
+ RTE_LOG(DEBUG, EAL,
+ " Not managed by a supported kernel driver, skipped\n");
+ break;
+ }
+}
+
+void
+pci_uio_free_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource *uio_res)
+{
+ rte_free(uio_res);
+
+ if (dev->intr_handle.fd) {
+ close(dev->intr_handle.fd);
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ }
+}
+
+int
+pci_uio_alloc_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource **uio_res)
+{
+ char devname[PATH_MAX]; /* contains the /dev/uioX */
+ struct rte_pci_addr *loc;
+
+ loc = &dev->addr;
+
+ snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
+ dev->addr.bus, dev->addr.devid, dev->addr.function);
+
+ if (access(devname, O_RDWR) < 0) {
+ RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, "
+ "skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
+ return 1;
+ }
+
+ /* save fd if in primary process */
+ dev->intr_handle.fd = open(devname, O_RDWR);
+ if (dev->intr_handle.fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ goto error;
+ }
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
+
+ /* allocate the mapping details for secondary processes*/
+ *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
+ if (*uio_res == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot store uio mmap details\n", __func__);
+ goto error;
+ }
+
+ snprintf((*uio_res)->path, sizeof((*uio_res)->path), "%s", devname);
+ memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
+
+ return 0;
+
+error:
+ pci_uio_free_resource(dev, *uio_res);
+ return -1;
+}
+
+int
+pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
+ struct mapped_pci_resource *uio_res, int map_idx)
+{
+ int fd;
+ char *devname;
+ void *mapaddr;
+ uint64_t offset;
+ uint64_t pagesz;
+ struct pci_map *maps;
+
+ maps = uio_res->maps;
+ devname = uio_res->path;
+ pagesz = sysconf(_SC_PAGESIZE);
+
+ /* allocate memory to keep path */
+ maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
+ if (maps[map_idx].path == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ /*
+ * open resource file, to mmap it
+ */
+ fd = open(devname, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ goto error;
+ }
+
+ /* if matching map is found, then use it */
+ offset = res_idx * pagesz;
+ mapaddr = pci_map_resource(NULL, fd, (off_t)offset,
+ (size_t)dev->mem_resource[res_idx].len, 0);
+ close(fd);
+ if (mapaddr == MAP_FAILED)
+ goto error;
+
+ maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
+ maps[map_idx].size = dev->mem_resource[res_idx].len;
+ maps[map_idx].addr = mapaddr;
+ maps[map_idx].offset = offset;
+ strcpy(maps[map_idx].path, devname);
+ dev->mem_resource[res_idx].addr = mapaddr;
+
+ return 0;
+
+error:
+ rte_free(maps[map_idx].path);
+ return -1;
+}
+
+static int
+pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
+{
+ struct rte_pci_device *dev;
+ struct pci_bar_io bar;
+ unsigned i, max;
+
+ dev = malloc(sizeof(*dev));
+ if (dev == NULL) {
+ return -1;
+ }
+
+ memset(dev, 0, sizeof(*dev));
+ dev->addr.domain = conf->pc_sel.pc_domain;
+ dev->addr.bus = conf->pc_sel.pc_bus;
+ dev->addr.devid = conf->pc_sel.pc_dev;
+ dev->addr.function = conf->pc_sel.pc_func;
+
+ /* get vendor id */
+ dev->id.vendor_id = conf->pc_vendor;
+
+ /* get device id */
+ dev->id.device_id = conf->pc_device;
+
+ /* get subsystem_vendor id */
+ dev->id.subsystem_vendor_id = conf->pc_subvendor;
+
+ /* get subsystem_device id */
+ dev->id.subsystem_device_id = conf->pc_subdevice;
+
+ /* get class id */
+ dev->id.class_id = (conf->pc_class << 16) |
+ (conf->pc_subclass << 8) |
+ (conf->pc_progif);
+
+ /* TODO: get max_vfs */
+ dev->max_vfs = 0;
+
+ /* FreeBSD has no NUMA support (yet) */
+ dev->device.numa_node = 0;
+
+ pci_name_set(dev);
+
+ /* FreeBSD has only one pass through driver */
+ dev->kdrv = RTE_KDRV_NIC_UIO;
+
+ /* parse resources */
+ switch (conf->pc_hdr & PCIM_HDRTYPE) {
+ case PCIM_HDRTYPE_NORMAL:
+ max = PCIR_MAX_BAR_0;
+ break;
+ case PCIM_HDRTYPE_BRIDGE:
+ max = PCIR_MAX_BAR_1;
+ break;
+ case PCIM_HDRTYPE_CARDBUS:
+ max = PCIR_MAX_BAR_2;
+ break;
+ default:
+ goto skipdev;
+ }
+
+ for (i = 0; i <= max; i++) {
+ bar.pbi_sel = conf->pc_sel;
+ bar.pbi_reg = PCIR_BAR(i);
+ if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
+ continue;
+
+ dev->mem_resource[i].len = bar.pbi_length;
+ if (PCI_BAR_IO(bar.pbi_base)) {
+ dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
+ continue;
+ }
+ dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
+ }
+
+ /* device is valid, add in list (sorted) */
+ if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
+ rte_pci_add_device(dev);
+ }
+ else {
+ struct rte_pci_device *dev2 = NULL;
+ int ret;
+
+ TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
+ ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
+ if (ret > 0)
+ continue;
+ else if (ret < 0) {
+ rte_pci_insert_device(dev2, dev);
+ } else { /* already registered */
+ dev2->kdrv = dev->kdrv;
+ dev2->max_vfs = dev->max_vfs;
+ pci_name_set(dev2);
+ memmove(dev2->mem_resource,
+ dev->mem_resource,
+ sizeof(dev->mem_resource));
+ free(dev);
+ }
+ return 0;
+ }
+ rte_pci_add_device(dev);
+ }
+
+ return 0;
+
+skipdev:
+ free(dev);
+ return 0;
+}
+
+/*
+ * Scan the content of the PCI bus, and add the devices in the devices
+ * list. Call pci_scan_one() for each pci entry found.
+ */
+int
+rte_pci_scan(void)
+{
+ int fd;
+ unsigned dev_count = 0;
+ struct pci_conf matches[16];
+ struct pci_conf_io conf_io = {
+ .pat_buf_len = 0,
+ .num_patterns = 0,
+ .patterns = NULL,
+ .match_buf_len = sizeof(matches),
+ .matches = &matches[0],
+ };
+
+ /* for debug purposes, PCI can be disabled */
+ if (!rte_eal_has_pci())
+ return 0;
+
+ fd = open("/dev/pci", O_RDONLY);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
+ goto error;
+ }
+
+ do {
+ unsigned i;
+ if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
+ __func__, strerror(errno));
+ goto error;
+ }
+
+ for (i = 0; i < conf_io.num_matches; i++)
+ if (pci_scan_one(fd, &matches[i]) < 0)
+ goto error;
+
+ dev_count += conf_io.num_matches;
+ } while(conf_io.status == PCI_GETCONF_MORE_DEVS);
+
+ close(fd);
+
+ RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count);
+ return 0;
+
+error:
+ if (fd >= 0)
+ close(fd);
+ return -1;
+}
+
+/*
+ * Get iommu class of PCI devices on the bus.
+ */
+enum rte_iova_mode
+rte_pci_get_iommu_class(void)
+{
+ /* Supports only RTE_KDRV_NIC_UIO */
+ return RTE_IOVA_PA;
+}
+
+int
+pci_update_device(const struct rte_pci_addr *addr)
+{
+ int fd;
+ struct pci_conf matches[2];
+ struct pci_match_conf match = {
+ .pc_sel = {
+ .pc_domain = addr->domain,
+ .pc_bus = addr->bus,
+ .pc_dev = addr->devid,
+ .pc_func = addr->function,
+ },
+ };
+ struct pci_conf_io conf_io = {
+ .pat_buf_len = 0,
+ .num_patterns = 1,
+ .patterns = &match,
+ .match_buf_len = sizeof(matches),
+ .matches = &matches[0],
+ };
+
+ fd = open("/dev/pci", O_RDONLY);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
+ goto error;
+ }
+
+ if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
+ __func__, strerror(errno));
+ goto error;
+ }
+
+ if (conf_io.num_matches != 1)
+ goto error;
+
+ if (pci_scan_one(fd, &matches[0]) < 0)
+ goto error;
+
+ close(fd);
+
+ return 0;
+
+error:
+ if (fd >= 0)
+ close(fd);
+ return -1;
+}
+
+/* Read PCI config space. */
+int rte_pci_read_config(const struct rte_pci_device *dev,
+ void *buf, size_t len, off_t offset)
+{
+ int fd = -1;
+ int size;
+ struct pci_io pi = {
+ .pi_sel = {
+ .pc_domain = dev->addr.domain,
+ .pc_bus = dev->addr.bus,
+ .pc_dev = dev->addr.devid,
+ .pc_func = dev->addr.function,
+ },
+ .pi_reg = offset,
+ };
+
+ fd = open("/dev/pci", O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
+ goto error;
+ }
+
+ while (len > 0) {
+ size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1);
+ pi.pi_width = size;
+
+ if (ioctl(fd, PCIOCREAD, &pi) < 0)
+ goto error;
+ memcpy(buf, &pi.pi_data, size);
+
+ buf = (char *)buf + size;
+ pi.pi_reg += size;
+ len -= size;
+ }
+ close(fd);
+
+ return 0;
+
+ error:
+ if (fd >= 0)
+ close(fd);
+ return -1;
+}
+
+/* Write PCI config space. */
+int rte_pci_write_config(const struct rte_pci_device *dev,
+ const void *buf, size_t len, off_t offset)
+{
+ int fd = -1;
+
+ struct pci_io pi = {
+ .pi_sel = {
+ .pc_domain = dev->addr.domain,
+ .pc_bus = dev->addr.bus,
+ .pc_dev = dev->addr.devid,
+ .pc_func = dev->addr.function,
+ },
+ .pi_reg = offset,
+ .pi_data = *(const uint32_t *)buf,
+ .pi_width = len,
+ };
+
+ if (len == 3 || len > sizeof(pi.pi_data)) {
+ RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__);
+ goto error;
+ }
+
+ memcpy(&pi.pi_data, buf, len);
+
+ fd = open("/dev/pci", O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
+ goto error;
+ }
+
+ if (ioctl(fd, PCIOCWRITE, &pi) < 0)
+ goto error;
+
+ close(fd);
+ return 0;
+
+ error:
+ if (fd >= 0)
+ close(fd);
+ return -1;
+}
+
+int
+rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
+{
+ int ret;
+
+ switch (dev->kdrv) {
+#if defined(RTE_ARCH_X86)
+ case RTE_KDRV_NIC_UIO:
+ if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) {
+ p->base = (uintptr_t)dev->mem_resource[bar].addr;
+ ret = 0;
+ } else
+ ret = -1;
+ break;
+#endif
+ default:
+ ret = -1;
+ break;
+ }
+
+ if (!ret)
+ p->dev = dev;
+
+ return ret;
+}
+
+static void
+pci_uio_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset)
+{
+#if defined(RTE_ARCH_X86)
+ uint8_t *d;
+ int size;
+ unsigned short reg = p->base + offset;
+
+ for (d = data; len > 0; d += size, reg += size, len -= size) {
+ if (len >= 4) {
+ size = 4;
+ *(uint32_t *)d = inl(reg);
+ } else if (len >= 2) {
+ size = 2;
+ *(uint16_t *)d = inw(reg);
+ } else {
+ size = 1;
+ *d = inb(reg);
+ }
+ }
+#else
+ RTE_SET_USED(p);
+ RTE_SET_USED(data);
+ RTE_SET_USED(len);
+ RTE_SET_USED(offset);
+#endif
+}
+
+void
+rte_pci_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset)
+{
+ switch (p->dev->kdrv) {
+ case RTE_KDRV_NIC_UIO:
+ pci_uio_ioport_read(p, data, len, offset);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+pci_uio_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset)
+{
+#if defined(RTE_ARCH_X86)
+ const uint8_t *s;
+ int size;
+ unsigned short reg = p->base + offset;
+
+ for (s = data; len > 0; s += size, reg += size, len -= size) {
+ if (len >= 4) {
+ size = 4;
+ outl(reg, *(const uint32_t *)s);
+ } else if (len >= 2) {
+ size = 2;
+ outw(reg, *(const uint16_t *)s);
+ } else {
+ size = 1;
+ outb(reg, *s);
+ }
+ }
+#else
+ RTE_SET_USED(p);
+ RTE_SET_USED(data);
+ RTE_SET_USED(len);
+ RTE_SET_USED(offset);
+#endif
+}
+
+void
+rte_pci_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset)
+{
+ switch (p->dev->kdrv) {
+ case RTE_KDRV_NIC_UIO:
+ pci_uio_ioport_write(p, data, len, offset);
+ break;
+ default:
+ break;
+ }
+}
+
+int
+rte_pci_ioport_unmap(struct rte_pci_ioport *p)
+{
+ int ret;
+
+ switch (p->dev->kdrv) {
+#if defined(RTE_ARCH_X86)
+ case RTE_KDRV_NIC_UIO:
+ ret = 0;
+ break;
+#endif
+ default:
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/bus/pci/linux/Makefile b/src/spdk/dpdk/drivers/bus/pci/linux/Makefile
new file mode 100644
index 00000000..96ea1d54
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/linux/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
+
+SRCS += pci.c
+SRCS += pci_uio.c
+SRCS += pci_vfio.c
+
+CFLAGS += -D_GNU_SOURCE
diff --git a/src/spdk/dpdk/drivers/bus/pci/linux/pci.c b/src/spdk/dpdk/drivers/bus/pci/linux/pci.c
new file mode 100644
index 00000000..daf087d3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/linux/pci.c
@@ -0,0 +1,926 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <string.h>
+#include <dirent.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_devargs.h>
+#include <rte_memcpy.h>
+#include <rte_vfio.h>
+
+#include "eal_filesystem.h"
+
+#include "private.h"
+#include "pci_init.h"
+
+/**
+ * @file
+ * PCI probing under linux
+ *
+ * This code is used to simulate a PCI probe by parsing information in sysfs.
+ * When a registered device matches a driver, it is then initialized with
+ * IGB_UIO driver (or doesn't initialize, if the device wasn't bound to it).
+ */
+
+extern struct rte_pci_bus rte_pci_bus;
+
+static int
+pci_get_kernel_driver_by_path(const char *filename, char *dri_name,
+ size_t len)
+{
+ int count;
+ char path[PATH_MAX];
+ char *name;
+
+ if (!filename || !dri_name)
+ return -1;
+
+ count = readlink(filename, path, PATH_MAX);
+ if (count >= PATH_MAX)
+ return -1;
+
+ /* For device does not have a driver */
+ if (count < 0)
+ return 1;
+
+ path[count] = '\0';
+
+ name = strrchr(path, '/');
+ if (name) {
+ strlcpy(dri_name, name + 1, len);
+ return 0;
+ }
+
+ return -1;
+}
+
+/* Map pci device */
+int
+rte_pci_map_device(struct rte_pci_device *dev)
+{
+ int ret = -1;
+
+ /* try mapping the NIC resources using VFIO if it exists */
+ switch (dev->kdrv) {
+ case RTE_KDRV_VFIO:
+#ifdef VFIO_PRESENT
+ if (pci_vfio_is_enabled())
+ ret = pci_vfio_map_resource(dev);
+#endif
+ break;
+ case RTE_KDRV_IGB_UIO:
+ case RTE_KDRV_UIO_GENERIC:
+ if (rte_eal_using_phys_addrs()) {
+ /* map resources for devices that use uio */
+ ret = pci_uio_map_resource(dev);
+ }
+ break;
+ default:
+ RTE_LOG(DEBUG, EAL,
+ " Not managed by a supported kernel driver, skipped\n");
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Unmap pci device */
+void
+rte_pci_unmap_device(struct rte_pci_device *dev)
+{
+ /* try unmapping the NIC resources using VFIO if it exists */
+ switch (dev->kdrv) {
+ case RTE_KDRV_VFIO:
+#ifdef VFIO_PRESENT
+ if (pci_vfio_is_enabled())
+ pci_vfio_unmap_resource(dev);
+#endif
+ break;
+ case RTE_KDRV_IGB_UIO:
+ case RTE_KDRV_UIO_GENERIC:
+ /* unmap resources for devices that use uio */
+ pci_uio_unmap_resource(dev);
+ break;
+ default:
+ RTE_LOG(DEBUG, EAL,
+ " Not managed by a supported kernel driver, skipped\n");
+ break;
+ }
+}
+
+static int
+find_max_end_va(const struct rte_memseg_list *msl, void *arg)
+{
+ size_t sz = msl->memseg_arr.len * msl->page_sz;
+ void *end_va = RTE_PTR_ADD(msl->base_va, sz);
+ void **max_va = arg;
+
+ if (*max_va < end_va)
+ *max_va = end_va;
+ return 0;
+}
+
+void *
+pci_find_max_end_va(void)
+{
+ void *va = NULL;
+
+ rte_memseg_list_walk(find_max_end_va, &va);
+ return va;
+}
+
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+int
+pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+ uint64_t *end_addr, uint64_t *flags)
+{
+ union pci_resource_info {
+ struct {
+ char *phys_addr;
+ char *end_addr;
+ char *flags;
+ };
+ char *ptrs[PCI_RESOURCE_FMT_NVAL];
+ } res_info;
+
+ if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) {
+ RTE_LOG(ERR, EAL,
+ "%s(): bad resource format\n", __func__);
+ return -1;
+ }
+ errno = 0;
+ *phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+ *end_addr = strtoull(res_info.end_addr, NULL, 16);
+ *flags = strtoull(res_info.flags, NULL, 16);
+ if (errno != 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): bad resource format\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* parse the "resource" sysfs file */
+static int
+pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ int i;
+ uint64_t phys_addr, end_addr, flags;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n");
+ return -1;
+ }
+
+ for (i = 0; i<PCI_MAX_RESOURCE; i++) {
+
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot read resource\n", __func__);
+ goto error;
+ }
+ if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr,
+ &end_addr, &flags) < 0)
+ goto error;
+
+ if (flags & IORESOURCE_MEM) {
+ dev->mem_resource[i].phys_addr = phys_addr;
+ dev->mem_resource[i].len = end_addr - phys_addr + 1;
+ /* not mapped for now */
+ dev->mem_resource[i].addr = NULL;
+ }
+ }
+ fclose(f);
+ return 0;
+
+error:
+ fclose(f);
+ return -1;
+}
+
+/* Scan one pci sysfs entry, and fill the devices list from it. */
+static int
+pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
+{
+ char filename[PATH_MAX];
+ unsigned long tmp;
+ struct rte_pci_device *dev;
+ char driver[PATH_MAX];
+ int ret;
+
+ dev = malloc(sizeof(*dev));
+ if (dev == NULL)
+ return -1;
+
+ memset(dev, 0, sizeof(*dev));
+ dev->addr = *addr;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->id.vendor_id = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->id.device_id = (uint16_t)tmp;
+
+ /* get subsystem_vendor id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+ dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->id.subsystem_vendor_id = (uint16_t)tmp;
+
+ /* get subsystem_device id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_device",
+ dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->id.subsystem_device_id = (uint16_t)tmp;
+
+ /* get class_id */
+ snprintf(filename, sizeof(filename), "%s/class",
+ dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ /* the least 24 bits are valid: class, subclass, program interface */
+ dev->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+ /* get max_vfs */
+ dev->max_vfs = 0;
+ snprintf(filename, sizeof(filename), "%s/max_vfs", dirname);
+ if (!access(filename, F_OK) &&
+ eal_parse_sysfs_value(filename, &tmp) == 0)
+ dev->max_vfs = (uint16_t)tmp;
+ else {
+ /* for non igb_uio driver, need kernel version >= 3.8 */
+ snprintf(filename, sizeof(filename),
+ "%s/sriov_numvfs", dirname);
+ if (!access(filename, F_OK) &&
+ eal_parse_sysfs_value(filename, &tmp) == 0)
+ dev->max_vfs = (uint16_t)tmp;
+ }
+
+ /* get numa node, default to 0 if not present */
+ snprintf(filename, sizeof(filename), "%s/numa_node",
+ dirname);
+
+ if (access(filename, F_OK) != -1) {
+ if (eal_parse_sysfs_value(filename, &tmp) == 0)
+ dev->device.numa_node = tmp;
+ else
+ dev->device.numa_node = -1;
+ } else {
+ dev->device.numa_node = 0;
+ }
+
+ pci_name_set(dev);
+
+ /* parse resources */
+ snprintf(filename, sizeof(filename), "%s/resource", dirname);
+ if (pci_parse_sysfs_resource(filename, dev) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__);
+ free(dev);
+ return -1;
+ }
+
+ /* parse driver */
+ snprintf(filename, sizeof(filename), "%s/driver", dirname);
+ ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver));
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Fail to get kernel driver\n");
+ free(dev);
+ return -1;
+ }
+
+ if (!ret) {
+ if (!strcmp(driver, "vfio-pci"))
+ dev->kdrv = RTE_KDRV_VFIO;
+ else if (!strcmp(driver, "igb_uio"))
+ dev->kdrv = RTE_KDRV_IGB_UIO;
+ else if (!strcmp(driver, "uio_pci_generic"))
+ dev->kdrv = RTE_KDRV_UIO_GENERIC;
+ else
+ dev->kdrv = RTE_KDRV_UNKNOWN;
+ } else
+ dev->kdrv = RTE_KDRV_NONE;
+
+ /* device is valid, add in list (sorted) */
+ if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
+ rte_pci_add_device(dev);
+ } else {
+ struct rte_pci_device *dev2;
+ int ret;
+
+ TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
+ ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
+ if (ret > 0)
+ continue;
+
+ if (ret < 0) {
+ rte_pci_insert_device(dev2, dev);
+ } else { /* already registered */
+ if (dev2->driver == NULL) {
+ dev2->kdrv = dev->kdrv;
+ dev2->max_vfs = dev->max_vfs;
+ pci_name_set(dev2);
+ memmove(dev2->mem_resource,
+ dev->mem_resource,
+ sizeof(dev->mem_resource));
+ } else {
+ /**
+ * If device is plugged and driver is
+ * probed already, we don't need to do
+ * anything here. (This happens when we
+ * call rte_eal_hotplug_add)
+ */
+ if (dev2->kdrv != dev->kdrv ||
+ dev2->max_vfs != dev->max_vfs)
+ /*
+ * This should not happens.
+ * But it is still possible if
+ * we unbind a device from
+ * vfio or uio before hotplug
+ * remove and rebind it with
+ * a different configure.
+ * So we just print out the
+ * error as an alarm.
+ */
+ RTE_LOG(ERR, EAL, "Unexpected device scan at %s!\n",
+ filename);
+ }
+ free(dev);
+ }
+ return 0;
+ }
+
+ rte_pci_add_device(dev);
+ }
+
+ return 0;
+}
+
+int
+pci_update_device(const struct rte_pci_addr *addr)
+{
+ char filename[PATH_MAX];
+
+ snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT,
+ rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
+ addr->function);
+
+ return pci_scan_one(filename, addr);
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+static int
+parse_pci_addr_format(const char *buf, int bufsize, struct rte_pci_addr *addr)
+{
+ /* first split on ':' */
+ union splitaddr {
+ struct {
+ char *domain;
+ char *bus;
+ char *devid;
+ char *function;
+ };
+ char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */
+ } splitaddr;
+
+ char *buf_copy = strndup(buf, bufsize);
+ if (buf_copy == NULL)
+ return -1;
+
+ if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+ != PCI_FMT_NVAL - 1)
+ goto error;
+ /* final split is on '.' between devid and function */
+ splitaddr.function = strchr(splitaddr.devid,'.');
+ if (splitaddr.function == NULL)
+ goto error;
+ *splitaddr.function++ = '\0';
+
+ /* now convert to int values */
+ errno = 0;
+ addr->domain = strtoul(splitaddr.domain, NULL, 16);
+ addr->bus = strtoul(splitaddr.bus, NULL, 16);
+ addr->devid = strtoul(splitaddr.devid, NULL, 16);
+ addr->function = strtoul(splitaddr.function, NULL, 10);
+ if (errno != 0)
+ goto error;
+
+ free(buf_copy); /* free the copy made with strdup */
+ return 0;
+error:
+ free(buf_copy);
+ return -1;
+}
+
+/*
+ * Scan the content of the PCI bus, and the devices in the devices
+ * list
+ */
+int
+rte_pci_scan(void)
+{
+ struct dirent *e;
+ DIR *dir;
+ char dirname[PATH_MAX];
+ struct rte_pci_addr addr;
+
+ /* for debug purposes, PCI can be disabled */
+ if (!rte_eal_has_pci())
+ return 0;
+
+#ifdef VFIO_PRESENT
+ if (!pci_vfio_is_enabled())
+ RTE_LOG(DEBUG, EAL, "VFIO PCI modules not loaded\n");
+#endif
+
+ dir = opendir(rte_pci_get_sysfs_path());
+ if (dir == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n",
+ __func__, strerror(errno));
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ if (e->d_name[0] == '.')
+ continue;
+
+ if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &addr) != 0)
+ continue;
+
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ rte_pci_get_sysfs_path(), e->d_name);
+
+ if (pci_scan_one(dirname, &addr) < 0)
+ goto error;
+ }
+ closedir(dir);
+ return 0;
+
+error:
+ closedir(dir);
+ return -1;
+}
+
+/*
+ * Is pci device bound to any kdrv
+ */
+static inline int
+pci_one_device_is_bound(void)
+{
+ struct rte_pci_device *dev = NULL;
+ int ret = 0;
+
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ if (dev->kdrv == RTE_KDRV_UNKNOWN ||
+ dev->kdrv == RTE_KDRV_NONE) {
+ continue;
+ } else {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Any one of the device bound to uio
+ */
+static inline int
+pci_one_device_bound_uio(void)
+{
+ struct rte_pci_device *dev = NULL;
+ struct rte_devargs *devargs;
+ int need_check;
+
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ devargs = dev->device.devargs;
+
+ need_check = 0;
+ switch (rte_pci_bus.bus.conf.scan_mode) {
+ case RTE_BUS_SCAN_WHITELIST:
+ if (devargs && devargs->policy == RTE_DEV_WHITELISTED)
+ need_check = 1;
+ break;
+ case RTE_BUS_SCAN_UNDEFINED:
+ case RTE_BUS_SCAN_BLACKLIST:
+ if (devargs == NULL ||
+ devargs->policy != RTE_DEV_BLACKLISTED)
+ need_check = 1;
+ break;
+ }
+
+ if (!need_check)
+ continue;
+
+ if (dev->kdrv == RTE_KDRV_IGB_UIO ||
+ dev->kdrv == RTE_KDRV_UIO_GENERIC) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Any one of the device has iova as va
+ */
+static inline int
+pci_one_device_has_iova_va(void)
+{
+ struct rte_pci_device *dev = NULL;
+ struct rte_pci_driver *drv = NULL;
+
+ FOREACH_DRIVER_ON_PCIBUS(drv) {
+ if (drv && drv->drv_flags & RTE_PCI_DRV_IOVA_AS_VA) {
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ if (dev->kdrv == RTE_KDRV_VFIO &&
+ rte_pci_match(drv, dev))
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+#if defined(RTE_ARCH_X86)
+static bool
+pci_one_device_iommu_support_va(struct rte_pci_device *dev)
+{
+#define VTD_CAP_MGAW_SHIFT 16
+#define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT)
+#define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */
+ struct rte_pci_addr *addr = &dev->addr;
+ char filename[PATH_MAX];
+ FILE *fp;
+ uint64_t mgaw, vtd_cap_reg = 0;
+
+ snprintf(filename, sizeof(filename),
+ "%s/" PCI_PRI_FMT "/iommu/intel-iommu/cap",
+ rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
+ addr->function);
+ if (access(filename, F_OK) == -1) {
+ /* We don't have an Intel IOMMU, assume VA supported*/
+ return true;
+ }
+
+ /* We have an intel IOMMU */
+ fp = fopen(filename, "r");
+ if (fp == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): can't open %s\n", __func__, filename);
+ return false;
+ }
+
+ if (fscanf(fp, "%" PRIx64, &vtd_cap_reg) != 1) {
+ RTE_LOG(ERR, EAL, "%s(): can't read %s\n", __func__, filename);
+ fclose(fp);
+ return false;
+ }
+
+ fclose(fp);
+
+ mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1;
+ if (mgaw < X86_VA_WIDTH)
+ return false;
+
+ return true;
+}
+#elif defined(RTE_ARCH_PPC_64)
+static bool
+pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev)
+{
+ return false;
+}
+#else
+static bool
+pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev)
+{
+ return true;
+}
+#endif
+
+/*
+ * All devices IOMMUs support VA as IOVA
+ */
+static bool
+pci_devices_iommu_support_va(void)
+{
+ struct rte_pci_device *dev = NULL;
+ struct rte_pci_driver *drv = NULL;
+
+ FOREACH_DRIVER_ON_PCIBUS(drv) {
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ if (!rte_pci_match(drv, dev))
+ continue;
+ if (!pci_one_device_iommu_support_va(dev))
+ return false;
+ }
+ }
+ return true;
+}
+
+/*
+ * Get iommu class of PCI devices on the bus.
+ */
+enum rte_iova_mode
+rte_pci_get_iommu_class(void)
+{
+ bool is_bound;
+ bool is_vfio_noiommu_enabled = true;
+ bool has_iova_va;
+ bool is_bound_uio;
+ bool iommu_no_va;
+
+ is_bound = pci_one_device_is_bound();
+ if (!is_bound)
+ return RTE_IOVA_DC;
+
+ has_iova_va = pci_one_device_has_iova_va();
+ is_bound_uio = pci_one_device_bound_uio();
+ iommu_no_va = !pci_devices_iommu_support_va();
+#ifdef VFIO_PRESENT
+ is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ?
+ true : false;
+#endif
+
+ if (has_iova_va && !is_bound_uio && !is_vfio_noiommu_enabled &&
+ !iommu_no_va)
+ return RTE_IOVA_VA;
+
+ if (has_iova_va) {
+ RTE_LOG(WARNING, EAL, "Some devices want iova as va but pa will be used because.. ");
+ if (is_vfio_noiommu_enabled)
+ RTE_LOG(WARNING, EAL, "vfio-noiommu mode configured\n");
+ if (is_bound_uio)
+ RTE_LOG(WARNING, EAL, "few device bound to UIO\n");
+ if (iommu_no_va)
+ RTE_LOG(WARNING, EAL, "IOMMU does not support IOVA as VA\n");
+ }
+
+ return RTE_IOVA_PA;
+}
+
+/* Read PCI config space. */
+int rte_pci_read_config(const struct rte_pci_device *device,
+ void *buf, size_t len, off_t offset)
+{
+ const struct rte_intr_handle *intr_handle = &device->intr_handle;
+
+ switch (intr_handle->type) {
+ case RTE_INTR_HANDLE_UIO:
+ case RTE_INTR_HANDLE_UIO_INTX:
+ return pci_uio_read_config(intr_handle, buf, len, offset);
+
+#ifdef VFIO_PRESENT
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ return pci_vfio_read_config(intr_handle, buf, len, offset);
+#endif
+ default:
+ RTE_LOG(ERR, EAL,
+ "Unknown handle type of fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+}
+
+/* Write PCI config space. */
+int rte_pci_write_config(const struct rte_pci_device *device,
+ const void *buf, size_t len, off_t offset)
+{
+ const struct rte_intr_handle *intr_handle = &device->intr_handle;
+
+ switch (intr_handle->type) {
+ case RTE_INTR_HANDLE_UIO:
+ case RTE_INTR_HANDLE_UIO_INTX:
+ return pci_uio_write_config(intr_handle, buf, len, offset);
+
+#ifdef VFIO_PRESENT
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ return pci_vfio_write_config(intr_handle, buf, len, offset);
+#endif
+ default:
+ RTE_LOG(ERR, EAL,
+ "Unknown handle type of fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+}
+
+#if defined(RTE_ARCH_X86)
+static int
+pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused,
+ struct rte_pci_ioport *p)
+{
+ uint16_t start, end;
+ FILE *fp;
+ char *line = NULL;
+ char pci_id[16];
+ int found = 0;
+ size_t linesz;
+
+ snprintf(pci_id, sizeof(pci_id), PCI_PRI_FMT,
+ dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+
+ fp = fopen("/proc/ioports", "r");
+ if (fp == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): can't open ioports\n", __func__);
+ return -1;
+ }
+
+ while (getdelim(&line, &linesz, '\n', fp) > 0) {
+ char *ptr = line;
+ char *left;
+ int n;
+
+ n = strcspn(ptr, ":");
+ ptr[n] = 0;
+ left = &ptr[n + 1];
+
+ while (*left && isspace(*left))
+ left++;
+
+ if (!strncmp(left, pci_id, strlen(pci_id))) {
+ found = 1;
+
+ while (*ptr && isspace(*ptr))
+ ptr++;
+
+ sscanf(ptr, "%04hx-%04hx", &start, &end);
+
+ break;
+ }
+ }
+
+ free(line);
+ fclose(fp);
+
+ if (!found)
+ return -1;
+
+ p->base = start;
+ RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start);
+
+ return 0;
+}
+#endif
+
+int
+rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
+{
+ int ret = -1;
+
+ switch (dev->kdrv) {
+#ifdef VFIO_PRESENT
+ case RTE_KDRV_VFIO:
+ if (pci_vfio_is_enabled())
+ ret = pci_vfio_ioport_map(dev, bar, p);
+ break;
+#endif
+ case RTE_KDRV_IGB_UIO:
+ ret = pci_uio_ioport_map(dev, bar, p);
+ break;
+ case RTE_KDRV_UIO_GENERIC:
+#if defined(RTE_ARCH_X86)
+ ret = pci_ioport_map(dev, bar, p);
+#else
+ ret = pci_uio_ioport_map(dev, bar, p);
+#endif
+ break;
+ case RTE_KDRV_NONE:
+#if defined(RTE_ARCH_X86)
+ ret = pci_ioport_map(dev, bar, p);
+#endif
+ break;
+ default:
+ break;
+ }
+
+ if (!ret)
+ p->dev = dev;
+
+ return ret;
+}
+
+void
+rte_pci_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset)
+{
+ switch (p->dev->kdrv) {
+#ifdef VFIO_PRESENT
+ case RTE_KDRV_VFIO:
+ pci_vfio_ioport_read(p, data, len, offset);
+ break;
+#endif
+ case RTE_KDRV_IGB_UIO:
+ pci_uio_ioport_read(p, data, len, offset);
+ break;
+ case RTE_KDRV_UIO_GENERIC:
+ pci_uio_ioport_read(p, data, len, offset);
+ break;
+ case RTE_KDRV_NONE:
+#if defined(RTE_ARCH_X86)
+ pci_uio_ioport_read(p, data, len, offset);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
+void
+rte_pci_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset)
+{
+ switch (p->dev->kdrv) {
+#ifdef VFIO_PRESENT
+ case RTE_KDRV_VFIO:
+ pci_vfio_ioport_write(p, data, len, offset);
+ break;
+#endif
+ case RTE_KDRV_IGB_UIO:
+ pci_uio_ioport_write(p, data, len, offset);
+ break;
+ case RTE_KDRV_UIO_GENERIC:
+ pci_uio_ioport_write(p, data, len, offset);
+ break;
+ case RTE_KDRV_NONE:
+#if defined(RTE_ARCH_X86)
+ pci_uio_ioport_write(p, data, len, offset);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
+int
+rte_pci_ioport_unmap(struct rte_pci_ioport *p)
+{
+ int ret = -1;
+
+ switch (p->dev->kdrv) {
+#ifdef VFIO_PRESENT
+ case RTE_KDRV_VFIO:
+ if (pci_vfio_is_enabled())
+ ret = pci_vfio_ioport_unmap(p);
+ break;
+#endif
+ case RTE_KDRV_IGB_UIO:
+ ret = pci_uio_ioport_unmap(p);
+ break;
+ case RTE_KDRV_UIO_GENERIC:
+#if defined(RTE_ARCH_X86)
+ ret = 0;
+#else
+ ret = pci_uio_ioport_unmap(p);
+#endif
+ break;
+ case RTE_KDRV_NONE:
+#if defined(RTE_ARCH_X86)
+ ret = 0;
+#endif
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/bus/pci/linux/pci_init.h b/src/spdk/dpdk/drivers/bus/pci/linux/pci_init.h
new file mode 100644
index 00000000..c2e603a3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/linux/pci_init.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef EAL_PCI_INIT_H_
+#define EAL_PCI_INIT_H_
+
+#include <rte_vfio.h>
+
+/** IO resource type: */
+#define IORESOURCE_IO 0x00000100
+#define IORESOURCE_MEM 0x00000200
+
+/*
+ * Helper function to map PCI resources right after hugepages in virtual memory
+ */
+extern void *pci_map_addr;
+void *pci_find_max_end_va(void);
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+int pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+ uint64_t *end_addr, uint64_t *flags);
+
+int pci_uio_alloc_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource **uio_res);
+void pci_uio_free_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource *uio_res);
+int pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
+ struct mapped_pci_resource *uio_res, int map_idx);
+
+int pci_uio_read_config(const struct rte_intr_handle *intr_handle,
+ void *buf, size_t len, off_t offs);
+int pci_uio_write_config(const struct rte_intr_handle *intr_handle,
+ const void *buf, size_t len, off_t offs);
+
+int pci_uio_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p);
+void pci_uio_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset);
+void pci_uio_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset);
+int pci_uio_ioport_unmap(struct rte_pci_ioport *p);
+
+#ifdef VFIO_PRESENT
+
+#ifdef PCI_MSIX_TABLE_BIR
+#define RTE_PCI_MSIX_TABLE_BIR PCI_MSIX_TABLE_BIR
+#else
+#define RTE_PCI_MSIX_TABLE_BIR 0x7
+#endif
+
+#ifdef PCI_MSIX_TABLE_OFFSET
+#define RTE_PCI_MSIX_TABLE_OFFSET PCI_MSIX_TABLE_OFFSET
+#else
+#define RTE_PCI_MSIX_TABLE_OFFSET 0xfffffff8
+#endif
+
+#ifdef PCI_MSIX_FLAGS_QSIZE
+#define RTE_PCI_MSIX_FLAGS_QSIZE PCI_MSIX_FLAGS_QSIZE
+#else
+#define RTE_PCI_MSIX_FLAGS_QSIZE 0x07ff
+#endif
+
+/* access config space */
+int pci_vfio_read_config(const struct rte_intr_handle *intr_handle,
+ void *buf, size_t len, off_t offs);
+int pci_vfio_write_config(const struct rte_intr_handle *intr_handle,
+ const void *buf, size_t len, off_t offs);
+
+int pci_vfio_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p);
+void pci_vfio_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset);
+void pci_vfio_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset);
+int pci_vfio_ioport_unmap(struct rte_pci_ioport *p);
+
+/* map/unmap VFIO resource prototype */
+int pci_vfio_map_resource(struct rte_pci_device *dev);
+int pci_vfio_unmap_resource(struct rte_pci_device *dev);
+
+int pci_vfio_is_enabled(void);
+
+#endif
+
+#endif /* EAL_PCI_INIT_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/pci/linux/pci_uio.c b/src/spdk/dpdk/drivers/bus/pci/linux/pci_uio.c
new file mode 100644
index 00000000..a7c14421
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/linux/pci_uio.c
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/sysmacros.h>
+#include <linux/pci_regs.h>
+
+#if defined(RTE_ARCH_X86)
+#include <sys/io.h>
+#endif
+
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_eal_memconfig.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include "eal_filesystem.h"
+#include "pci_init.h"
+
+void *pci_map_addr = NULL;
+
+#define OFF_MAX ((uint64_t)(off_t)-1)
+
+int
+pci_uio_read_config(const struct rte_intr_handle *intr_handle,
+ void *buf, size_t len, off_t offset)
+{
+ return pread(intr_handle->uio_cfg_fd, buf, len, offset);
+}
+
+int
+pci_uio_write_config(const struct rte_intr_handle *intr_handle,
+ const void *buf, size_t len, off_t offset)
+{
+ return pwrite(intr_handle->uio_cfg_fd, buf, len, offset);
+}
+
+static int
+pci_uio_set_bus_master(int dev_fd)
+{
+ uint16_t reg;
+ int ret;
+
+ ret = pread(dev_fd, &reg, sizeof(reg), PCI_COMMAND);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL,
+ "Cannot read command from PCI config space!\n");
+ return -1;
+ }
+
+ /* return if bus mastering is already on */
+ if (reg & PCI_COMMAND_MASTER)
+ return 0;
+
+ reg |= PCI_COMMAND_MASTER;
+
+ ret = pwrite(dev_fd, &reg, sizeof(reg), PCI_COMMAND);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL,
+ "Cannot write command to PCI config space!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+pci_mknod_uio_dev(const char *sysfs_uio_path, unsigned uio_num)
+{
+ FILE *f;
+ char filename[PATH_MAX];
+ int ret;
+ unsigned major, minor;
+ dev_t dev;
+
+ /* get the name of the sysfs file that contains the major and minor
+ * of the uio device and read its content */
+ snprintf(filename, sizeof(filename), "%s/dev", sysfs_uio_path);
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): cannot open sysfs to get major:minor\n",
+ __func__);
+ return -1;
+ }
+
+ ret = fscanf(f, "%u:%u", &major, &minor);
+ if (ret != 2) {
+ RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs to get major:minor\n",
+ __func__);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+
+ /* create the char device "mknod /dev/uioX c major minor" */
+ snprintf(filename, sizeof(filename), "/dev/uio%u", uio_num);
+ dev = makedev(major, minor);
+ ret = mknod(filename, S_IFCHR | S_IRUSR | S_IWUSR, dev);
+ if (ret != 0) {
+ RTE_LOG(ERR, EAL, "%s(): mknod() failed %s\n",
+ __func__, strerror(errno));
+ return -1;
+ }
+
+ return ret;
+}
+
+/*
+ * Return the uioX char device used for a pci device. On success, return
+ * the UIO number and fill dstbuf string with the path of the device in
+ * sysfs. On error, return a negative value. In this case dstbuf is
+ * invalid.
+ */
+static int
+pci_get_uio_dev(struct rte_pci_device *dev, char *dstbuf,
+ unsigned int buflen, int create)
+{
+ struct rte_pci_addr *loc = &dev->addr;
+ int uio_num = -1;
+ struct dirent *e;
+ DIR *dir;
+ char dirname[PATH_MAX];
+
+ /* depending on kernel version, uio can be located in uio/uioX
+ * or uio:uioX */
+
+ snprintf(dirname, sizeof(dirname),
+ "%s/" PCI_PRI_FMT "/uio", rte_pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ dir = opendir(dirname);
+ if (dir == NULL) {
+ /* retry with the parent directory */
+ snprintf(dirname, sizeof(dirname),
+ "%s/" PCI_PRI_FMT, rte_pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid, loc->function);
+ dir = opendir(dirname);
+
+ if (dir == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot opendir %s\n", dirname);
+ return -1;
+ }
+ }
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ /* format could be uio%d ...*/
+ int shortprefix_len = sizeof("uio") - 1;
+ /* ... or uio:uio%d */
+ int longprefix_len = sizeof("uio:uio") - 1;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", 3) != 0)
+ continue;
+
+ /* first try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+ snprintf(dstbuf, buflen, "%s/uio%u", dirname, uio_num);
+ break;
+ }
+
+ /* then try uio:uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+ snprintf(dstbuf, buflen, "%s/uio:uio%u", dirname, uio_num);
+ break;
+ }
+ }
+ closedir(dir);
+
+ /* No uio resource found */
+ if (e == NULL)
+ return -1;
+
+ /* create uio device if we've been asked to */
+ if (rte_eal_create_uio_dev() && create &&
+ pci_mknod_uio_dev(dstbuf, uio_num) < 0)
+ RTE_LOG(WARNING, EAL, "Cannot create /dev/uio%u\n", uio_num);
+
+ return uio_num;
+}
+
+void
+pci_uio_free_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource *uio_res)
+{
+ rte_free(uio_res);
+
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+ if (dev->intr_handle.fd >= 0) {
+ close(dev->intr_handle.fd);
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ }
+}
+
+int
+pci_uio_alloc_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource **uio_res)
+{
+ char dirname[PATH_MAX];
+ char cfgname[PATH_MAX];
+ char devname[PATH_MAX]; /* contains the /dev/uioX */
+ int uio_num;
+ struct rte_pci_addr *loc;
+
+ loc = &dev->addr;
+
+ /* find uio resource */
+ uio_num = pci_get_uio_dev(dev, dirname, sizeof(dirname), 1);
+ if (uio_num < 0) {
+ RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, "
+ "skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
+ return 1;
+ }
+ snprintf(devname, sizeof(devname), "/dev/uio%u", uio_num);
+
+ /* save fd if in primary process */
+ dev->intr_handle.fd = open(devname, O_RDWR);
+ if (dev->intr_handle.fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ goto error;
+ }
+
+ snprintf(cfgname, sizeof(cfgname),
+ "/sys/class/uio/uio%u/device/config", uio_num);
+ dev->intr_handle.uio_cfg_fd = open(cfgname, O_RDWR);
+ if (dev->intr_handle.uio_cfg_fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ cfgname, strerror(errno));
+ goto error;
+ }
+
+ if (dev->kdrv == RTE_KDRV_IGB_UIO)
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
+ else {
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO_INTX;
+
+ /* set bus master that is not done by uio_pci_generic */
+ if (pci_uio_set_bus_master(dev->intr_handle.uio_cfg_fd)) {
+ RTE_LOG(ERR, EAL, "Cannot set up bus mastering!\n");
+ goto error;
+ }
+ }
+
+ /* allocate the mapping details for secondary processes*/
+ *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
+ if (*uio_res == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot store uio mmap details\n", __func__);
+ goto error;
+ }
+
+ snprintf((*uio_res)->path, sizeof((*uio_res)->path), "%s", devname);
+ memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
+
+ return 0;
+
+error:
+ pci_uio_free_resource(dev, *uio_res);
+ return -1;
+}
+
+int
+pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
+ struct mapped_pci_resource *uio_res, int map_idx)
+{
+ int fd = -1;
+ char devname[PATH_MAX];
+ void *mapaddr;
+ struct rte_pci_addr *loc;
+ struct pci_map *maps;
+ int wc_activate = 0;
+
+ if (dev->driver != NULL)
+ wc_activate = dev->driver->drv_flags & RTE_PCI_DRV_WC_ACTIVATE;
+
+ loc = &dev->addr;
+ maps = uio_res->maps;
+
+ /* allocate memory to keep path */
+ maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
+ if (maps[map_idx].path == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ /*
+ * open resource file, to mmap it
+ */
+ if (wc_activate) {
+ /* update devname for mmap */
+ snprintf(devname, sizeof(devname),
+ "%s/" PCI_PRI_FMT "/resource%d_wc",
+ rte_pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid,
+ loc->function, res_idx);
+
+ if (access(devname, R_OK|W_OK) != -1) {
+ fd = open(devname, O_RDWR);
+ if (fd < 0)
+ RTE_LOG(INFO, EAL, "%s cannot be mapped. "
+ "Fall-back to non prefetchable mode.\n",
+ devname);
+ }
+ }
+
+ if (!wc_activate || fd < 0) {
+ snprintf(devname, sizeof(devname),
+ "%s/" PCI_PRI_FMT "/resource%d",
+ rte_pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid,
+ loc->function, res_idx);
+
+ /* then try to map resource file */
+ fd = open(devname, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ goto error;
+ }
+ }
+
+ /* try mapping somewhere close to the end of hugepages */
+ if (pci_map_addr == NULL)
+ pci_map_addr = pci_find_max_end_va();
+
+ mapaddr = pci_map_resource(pci_map_addr, fd, 0,
+ (size_t)dev->mem_resource[res_idx].len, 0);
+ close(fd);
+ if (mapaddr == MAP_FAILED)
+ goto error;
+
+ pci_map_addr = RTE_PTR_ADD(mapaddr,
+ (size_t)dev->mem_resource[res_idx].len);
+
+ maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
+ maps[map_idx].size = dev->mem_resource[res_idx].len;
+ maps[map_idx].addr = mapaddr;
+ maps[map_idx].offset = 0;
+ strcpy(maps[map_idx].path, devname);
+ dev->mem_resource[res_idx].addr = mapaddr;
+
+ return 0;
+
+error:
+ rte_free(maps[map_idx].path);
+ return -1;
+}
+
+#if defined(RTE_ARCH_X86)
+int
+pci_uio_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
+{
+ char dirname[PATH_MAX];
+ char filename[PATH_MAX];
+ int uio_num;
+ unsigned long start;
+
+ uio_num = pci_get_uio_dev(dev, dirname, sizeof(dirname), 0);
+ if (uio_num < 0)
+ return -1;
+
+ /* get portio start */
+ snprintf(filename, sizeof(filename),
+ "%s/portio/port%d/start", dirname, bar);
+ if (eal_parse_sysfs_value(filename, &start) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): cannot parse portio start\n",
+ __func__);
+ return -1;
+ }
+ /* ensure we don't get anything funny here, read/write will cast to
+ * uin16_t */
+ if (start > UINT16_MAX)
+ return -1;
+
+ /* FIXME only for primary process ? */
+ if (dev->intr_handle.type == RTE_INTR_HANDLE_UNKNOWN) {
+
+ snprintf(filename, sizeof(filename), "/dev/uio%u", uio_num);
+ dev->intr_handle.fd = open(filename, O_RDWR);
+ if (dev->intr_handle.fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ filename, strerror(errno));
+ return -1;
+ }
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
+ }
+
+ RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%lx\n", start);
+
+ p->base = start;
+ p->len = 0;
+ return 0;
+}
+#else
+int
+pci_uio_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char filename[PATH_MAX];
+ uint64_t phys_addr, end_addr, flags;
+ int fd, i;
+ void *addr;
+
+ /* open and read addresses of the corresponding resource in sysfs */
+ snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT "/resource",
+ rte_pci_get_sysfs_path(), dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot open sysfs resource: %s\n",
+ strerror(errno));
+ return -1;
+ }
+ for (i = 0; i < bar + 1; i++) {
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot read sysfs resource\n");
+ goto error;
+ }
+ }
+ if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr,
+ &end_addr, &flags) < 0)
+ goto error;
+ if ((flags & IORESOURCE_IO) == 0) {
+ RTE_LOG(ERR, EAL, "BAR %d is not an IO resource\n", bar);
+ goto error;
+ }
+ snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT "/resource%d",
+ rte_pci_get_sysfs_path(), dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function, bar);
+
+ /* mmap the pci resource */
+ fd = open(filename, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
+ strerror(errno));
+ goto error;
+ }
+ addr = mmap(NULL, end_addr + 1, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ close(fd);
+ if (addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "Cannot mmap IO port resource: %s\n",
+ strerror(errno));
+ goto error;
+ }
+
+ /* strangely, the base address is mmap addr + phys_addr */
+ p->base = (uintptr_t)addr + phys_addr;
+ p->len = end_addr + 1;
+ RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%"PRIx64"\n", p->base);
+ fclose(f);
+
+ return 0;
+
+error:
+ fclose(f);
+ return -1;
+}
+#endif
+
+void
+pci_uio_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset)
+{
+ uint8_t *d;
+ int size;
+ uintptr_t reg = p->base + offset;
+
+ for (d = data; len > 0; d += size, reg += size, len -= size) {
+ if (len >= 4) {
+ size = 4;
+#if defined(RTE_ARCH_X86)
+ *(uint32_t *)d = inl(reg);
+#else
+ *(uint32_t *)d = *(volatile uint32_t *)reg;
+#endif
+ } else if (len >= 2) {
+ size = 2;
+#if defined(RTE_ARCH_X86)
+ *(uint16_t *)d = inw(reg);
+#else
+ *(uint16_t *)d = *(volatile uint16_t *)reg;
+#endif
+ } else {
+ size = 1;
+#if defined(RTE_ARCH_X86)
+ *d = inb(reg);
+#else
+ *d = *(volatile uint8_t *)reg;
+#endif
+ }
+ }
+}
+
+void
+pci_uio_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset)
+{
+ const uint8_t *s;
+ int size;
+ uintptr_t reg = p->base + offset;
+
+ for (s = data; len > 0; s += size, reg += size, len -= size) {
+ if (len >= 4) {
+ size = 4;
+#if defined(RTE_ARCH_X86)
+ outl_p(*(const uint32_t *)s, reg);
+#else
+ *(volatile uint32_t *)reg = *(const uint32_t *)s;
+#endif
+ } else if (len >= 2) {
+ size = 2;
+#if defined(RTE_ARCH_X86)
+ outw_p(*(const uint16_t *)s, reg);
+#else
+ *(volatile uint16_t *)reg = *(const uint16_t *)s;
+#endif
+ } else {
+ size = 1;
+#if defined(RTE_ARCH_X86)
+ outb_p(*s, reg);
+#else
+ *(volatile uint8_t *)reg = *s;
+#endif
+ }
+ }
+}
+
+int
+pci_uio_ioport_unmap(struct rte_pci_ioport *p)
+{
+#if defined(RTE_ARCH_X86)
+ RTE_SET_USED(p);
+ /* FIXME close intr fd ? */
+ return 0;
+#else
+ return munmap((void *)(uintptr_t)p->base, p->len);
+#endif
+}
diff --git a/src/spdk/dpdk/drivers/bus/pci/linux/pci_vfio.c b/src/spdk/dpdk/drivers/bus/pci/linux/pci_vfio.c
new file mode 100644
index 00000000..686386d6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/linux/pci_vfio.c
@@ -0,0 +1,794 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <string.h>
+#include <fcntl.h>
+#include <linux/pci_regs.h>
+#include <sys/eventfd.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <stdbool.h>
+
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_vfio.h>
+
+#include "eal_filesystem.h"
+
+#include "pci_init.h"
+#include "private.h"
+
+/**
+ * @file
+ * PCI probing under linux (VFIO version)
+ *
+ * This code tries to determine if the PCI device is bound to VFIO driver,
+ * and initialize it (map BARs, set up interrupts) if that's the case.
+ *
+ * This file is only compiled if CONFIG_RTE_EAL_VFIO is set to "y".
+ */
+
+#ifdef VFIO_PRESENT
+
+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+static struct rte_tailq_elem rte_vfio_tailq = {
+ .name = "VFIO_RESOURCE_LIST",
+};
+EAL_REGISTER_TAILQ(rte_vfio_tailq)
+
+int
+pci_vfio_read_config(const struct rte_intr_handle *intr_handle,
+ void *buf, size_t len, off_t offs)
+{
+ return pread64(intr_handle->vfio_dev_fd, buf, len,
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
+}
+
+int
+pci_vfio_write_config(const struct rte_intr_handle *intr_handle,
+ const void *buf, size_t len, off_t offs)
+{
+ return pwrite64(intr_handle->vfio_dev_fd, buf, len,
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
+}
+
+/* get PCI BAR number where MSI-X interrupts are */
+static int
+pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table)
+{
+ int ret;
+ uint32_t reg;
+ uint16_t flags;
+ uint8_t cap_id, cap_offset;
+
+ /* read PCI capability pointer from config space */
+ ret = pread64(fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ PCI_CAPABILITY_LIST);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
+ "config space!\n");
+ return -1;
+ }
+
+ /* we need first byte */
+ cap_offset = reg & 0xFF;
+
+ while (cap_offset) {
+
+ /* read PCI capability ID */
+ ret = pread64(fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ cap_offset);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read capability ID from PCI "
+ "config space!\n");
+ return -1;
+ }
+
+ /* we need first byte */
+ cap_id = reg & 0xFF;
+
+ /* if we haven't reached MSI-X, check next capability */
+ if (cap_id != PCI_CAP_ID_MSIX) {
+ ret = pread64(fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ cap_offset);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
+ "config space!\n");
+ return -1;
+ }
+
+ /* we need second byte */
+ cap_offset = (reg & 0xFF00) >> 8;
+
+ continue;
+ }
+ /* else, read table offset */
+ else {
+ /* table offset resides in the next 4 bytes */
+ ret = pread64(fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ cap_offset + 4);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read table offset from PCI config "
+ "space!\n");
+ return -1;
+ }
+
+ ret = pread64(fd, &flags, sizeof(flags),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ cap_offset + 2);
+ if (ret != sizeof(flags)) {
+ RTE_LOG(ERR, EAL, "Cannot read table flags from PCI config "
+ "space!\n");
+ return -1;
+ }
+
+ msix_table->bar_index = reg & RTE_PCI_MSIX_TABLE_BIR;
+ msix_table->offset = reg & RTE_PCI_MSIX_TABLE_OFFSET;
+ msix_table->size =
+ 16 * (1 + (flags & RTE_PCI_MSIX_FLAGS_QSIZE));
+
+ return 0;
+ }
+ }
+ return 0;
+}
+
+/* set PCI bus mastering */
+static int
+pci_vfio_set_bus_master(int dev_fd, bool op)
+{
+ uint16_t reg;
+ int ret;
+
+ ret = pread64(dev_fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ PCI_COMMAND);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n");
+ return -1;
+ }
+
+ if (op)
+ /* set the master bit */
+ reg |= PCI_COMMAND_MASTER;
+ else
+ reg &= ~(PCI_COMMAND_MASTER);
+
+ ret = pwrite64(dev_fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ PCI_COMMAND);
+
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* set up interrupt support (but not enable interrupts) */
+static int
+pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
+{
+ int i, ret, intr_idx;
+ enum rte_intr_mode intr_mode;
+
+ /* default to invalid index */
+ intr_idx = VFIO_PCI_NUM_IRQS;
+
+ /* Get default / configured intr_mode */
+ intr_mode = rte_eal_vfio_intr_mode();
+
+ /* get interrupt type from internal config (MSI-X by default, can be
+ * overridden from the command line
+ */
+ switch (intr_mode) {
+ case RTE_INTR_MODE_MSIX:
+ intr_idx = VFIO_PCI_MSIX_IRQ_INDEX;
+ break;
+ case RTE_INTR_MODE_MSI:
+ intr_idx = VFIO_PCI_MSI_IRQ_INDEX;
+ break;
+ case RTE_INTR_MODE_LEGACY:
+ intr_idx = VFIO_PCI_INTX_IRQ_INDEX;
+ break;
+ /* don't do anything if we want to automatically determine interrupt type */
+ case RTE_INTR_MODE_NONE:
+ break;
+ default:
+ RTE_LOG(ERR, EAL, " unknown default interrupt type!\n");
+ return -1;
+ }
+
+ /* start from MSI-X interrupt type */
+ for (i = VFIO_PCI_MSIX_IRQ_INDEX; i >= 0; i--) {
+ struct vfio_irq_info irq = { .argsz = sizeof(irq) };
+ int fd = -1;
+
+ /* skip interrupt modes we don't want */
+ if (intr_mode != RTE_INTR_MODE_NONE &&
+ i != intr_idx)
+ continue;
+
+ irq.index = i;
+
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " cannot get IRQ info, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ /* if this vector cannot be used with eventfd, fail if we explicitly
+ * specified interrupt type, otherwise continue */
+ if ((irq.flags & VFIO_IRQ_INFO_EVENTFD) == 0) {
+ if (intr_mode != RTE_INTR_MODE_NONE) {
+ RTE_LOG(ERR, EAL,
+ " interrupt vector does not support eventfd!\n");
+ return -1;
+ } else
+ continue;
+ }
+
+ /* set up an eventfd for interrupts */
+ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, " cannot set up eventfd, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ dev->intr_handle.fd = fd;
+ dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
+
+ switch (i) {
+ case VFIO_PCI_MSIX_IRQ_INDEX:
+ intr_mode = RTE_INTR_MODE_MSIX;
+ dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
+ break;
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ intr_mode = RTE_INTR_MODE_MSI;
+ dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSI;
+ break;
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ intr_mode = RTE_INTR_MODE_LEGACY;
+ dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_LEGACY;
+ break;
+ default:
+ RTE_LOG(ERR, EAL, " unknown interrupt type!\n");
+ return -1;
+ }
+
+ return 0;
+ }
+
+ /* if we're here, we haven't found a suitable interrupt vector */
+ return -1;
+}
+
+static int
+pci_vfio_is_ioport_bar(int vfio_dev_fd, int bar_index)
+{
+ uint32_t ioport_bar;
+ int ret;
+
+ ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX)
+ + PCI_BASE_ADDRESS_0 + bar_index*4);
+ if (ret != sizeof(ioport_bar)) {
+ RTE_LOG(ERR, EAL, "Cannot read command (%x) from config space!\n",
+ PCI_BASE_ADDRESS_0 + bar_index*4);
+ return -1;
+ }
+
+ return (ioport_bar & PCI_BASE_ADDRESS_SPACE_IO) != 0;
+}
+
+static int
+pci_rte_vfio_setup_device(struct rte_pci_device *dev, int vfio_dev_fd)
+{
+ if (pci_vfio_setup_interrupts(dev, vfio_dev_fd) != 0) {
+ RTE_LOG(ERR, EAL, "Error setting up interrupts!\n");
+ return -1;
+ }
+
+ /* set bus mastering for the device */
+ if (pci_vfio_set_bus_master(vfio_dev_fd, true)) {
+ RTE_LOG(ERR, EAL, "Cannot set up bus mastering!\n");
+ return -1;
+ }
+
+ /*
+ * Reset the device. If the device is not capable of resetting,
+ * then it updates errno as EINVAL.
+ */
+ if (ioctl(vfio_dev_fd, VFIO_DEVICE_RESET) && errno != EINVAL) {
+ RTE_LOG(ERR, EAL, "Unable to reset device! Error: %d (%s)\n",
+ errno, strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
+ int bar_index, int additional_flags)
+{
+ struct memreg {
+ unsigned long offset, size;
+ } memreg[2] = {};
+ void *bar_addr;
+ struct pci_msix_table *msix_table = &vfio_res->msix_table;
+ struct pci_map *bar = &vfio_res->maps[bar_index];
+
+ if (bar->size == 0)
+ /* Skip this BAR */
+ return 0;
+
+ if (msix_table->bar_index == bar_index) {
+ /*
+ * VFIO will not let us map the MSI-X table,
+ * but we can map around it.
+ */
+ uint32_t table_start = msix_table->offset;
+ uint32_t table_end = table_start + msix_table->size;
+ table_end = (table_end + ~PAGE_MASK) & PAGE_MASK;
+ table_start &= PAGE_MASK;
+
+ if (table_start == 0 && table_end >= bar->size) {
+ /* Cannot map this BAR */
+ RTE_LOG(DEBUG, EAL, "Skipping BAR%d\n", bar_index);
+ bar->size = 0;
+ bar->addr = 0;
+ return 0;
+ }
+
+ memreg[0].offset = bar->offset;
+ memreg[0].size = table_start;
+ memreg[1].offset = bar->offset + table_end;
+ memreg[1].size = bar->size - table_end;
+
+ RTE_LOG(DEBUG, EAL,
+ "Trying to map BAR%d that contains the MSI-X "
+ "table. Trying offsets: "
+ "0x%04lx:0x%04lx, 0x%04lx:0x%04lx\n", bar_index,
+ memreg[0].offset, memreg[0].size,
+ memreg[1].offset, memreg[1].size);
+ } else {
+ memreg[0].offset = bar->offset;
+ memreg[0].size = bar->size;
+ }
+
+ /* reserve the address using an inaccessible mapping */
+ bar_addr = mmap(bar->addr, bar->size, 0, MAP_PRIVATE |
+ MAP_ANONYMOUS | additional_flags, -1, 0);
+ if (bar_addr != MAP_FAILED) {
+ void *map_addr = NULL;
+ if (memreg[0].size) {
+ /* actual map of first part */
+ map_addr = pci_map_resource(bar_addr, vfio_dev_fd,
+ memreg[0].offset,
+ memreg[0].size,
+ MAP_FIXED);
+ }
+
+ /* if there's a second part, try to map it */
+ if (map_addr != MAP_FAILED
+ && memreg[1].offset && memreg[1].size) {
+ void *second_addr = RTE_PTR_ADD(bar_addr,
+ memreg[1].offset -
+ (uintptr_t)bar->offset);
+ map_addr = pci_map_resource(second_addr,
+ vfio_dev_fd,
+ memreg[1].offset,
+ memreg[1].size,
+ MAP_FIXED);
+ }
+
+ if (map_addr == MAP_FAILED || !map_addr) {
+ munmap(bar_addr, bar->size);
+ bar_addr = MAP_FAILED;
+ RTE_LOG(ERR, EAL, "Failed to map pci BAR%d\n",
+ bar_index);
+ return -1;
+ }
+ } else {
+ RTE_LOG(ERR, EAL,
+ "Failed to create inaccessible mapping for BAR%d\n",
+ bar_index);
+ return -1;
+ }
+
+ bar->addr = bar_addr;
+ return 0;
+}
+
+static int
+pci_vfio_map_resource_primary(struct rte_pci_device *dev)
+{
+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
+ char pci_addr[PATH_MAX] = {0};
+ int vfio_dev_fd;
+ struct rte_pci_addr *loc = &dev->addr;
+ int i, ret;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+
+ struct pci_map *maps;
+
+ dev->intr_handle.fd = -1;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ ret = rte_vfio_setup_device(rte_pci_get_sysfs_path(), pci_addr,
+ &vfio_dev_fd, &device_info);
+ if (ret)
+ return ret;
+
+ /* allocate vfio_res and get region info */
+ vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0);
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot store uio mmap details\n", __func__);
+ goto err_vfio_dev_fd;
+ }
+ memcpy(&vfio_res->pci_addr, &dev->addr, sizeof(vfio_res->pci_addr));
+
+ /* get number of registers (up to BAR5) */
+ vfio_res->nb_maps = RTE_MIN((int) device_info.num_regions,
+ VFIO_PCI_BAR5_REGION_INDEX + 1);
+
+ /* map BARs */
+ maps = vfio_res->maps;
+
+ vfio_res->msix_table.bar_index = -1;
+ /* get MSI-X BAR, if any (we have to know where it is because we can't
+ * easily mmap it when using VFIO)
+ */
+ ret = pci_vfio_get_msix_bar(vfio_dev_fd, &vfio_res->msix_table);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " %s cannot get MSI-X BAR number!\n",
+ pci_addr);
+ goto err_vfio_dev_fd;
+ }
+
+ for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+ struct vfio_region_info reg = { .argsz = sizeof(reg) };
+ void *bar_addr;
+
+ reg.index = i;
+
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " %s cannot get device region info "
+ "error %i (%s)\n", pci_addr, errno, strerror(errno));
+ goto err_vfio_res;
+ }
+
+ /* chk for io port region */
+ ret = pci_vfio_is_ioport_bar(vfio_dev_fd, i);
+ if (ret < 0)
+ goto err_vfio_res;
+ else if (ret) {
+ RTE_LOG(INFO, EAL, "Ignore mapping IO port bar(%d)\n",
+ i);
+ continue;
+ }
+
+ /* skip non-mmapable BARs */
+ if ((reg.flags & VFIO_REGION_INFO_FLAG_MMAP) == 0)
+ continue;
+
+ /* try mapping somewhere close to the end of hugepages */
+ if (pci_map_addr == NULL)
+ pci_map_addr = pci_find_max_end_va();
+
+ bar_addr = pci_map_addr;
+ pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg.size);
+
+ maps[i].addr = bar_addr;
+ maps[i].offset = reg.offset;
+ maps[i].size = reg.size;
+ maps[i].path = NULL; /* vfio doesn't have per-resource paths */
+
+ ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n",
+ pci_addr, i, strerror(errno));
+ goto err_vfio_res;
+ }
+
+ dev->mem_resource[i].addr = maps[i].addr;
+ }
+
+ if (pci_rte_vfio_setup_device(dev, vfio_dev_fd) < 0) {
+ RTE_LOG(ERR, EAL, " %s setup device failed\n", pci_addr);
+ goto err_vfio_res;
+ }
+
+ TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
+
+ return 0;
+err_vfio_res:
+ rte_free(vfio_res);
+err_vfio_dev_fd:
+ close(vfio_dev_fd);
+ return -1;
+}
+
+static int
+pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
+{
+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
+ char pci_addr[PATH_MAX] = {0};
+ int vfio_dev_fd;
+ struct rte_pci_addr *loc = &dev->addr;
+ int i, ret;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+
+ struct pci_map *maps;
+
+ dev->intr_handle.fd = -1;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ ret = rte_vfio_setup_device(rte_pci_get_sysfs_path(), pci_addr,
+ &vfio_dev_fd, &device_info);
+ if (ret)
+ return ret;
+
+ /* if we're in a secondary process, just find our tailq entry */
+ TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
+ if (rte_pci_addr_cmp(&vfio_res->pci_addr,
+ &dev->addr))
+ continue;
+ break;
+ }
+ /* if we haven't found our tailq entry, something's wrong */
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
+ pci_addr);
+ goto err_vfio_dev_fd;
+ }
+
+ /* map BARs */
+ maps = vfio_res->maps;
+
+ for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+ ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, MAP_FIXED);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n",
+ pci_addr, i, strerror(errno));
+ goto err_vfio_dev_fd;
+ }
+
+ dev->mem_resource[i].addr = maps[i].addr;
+ }
+
+ /* we need save vfio_dev_fd, so it can be used during release */
+ dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
+
+ return 0;
+err_vfio_dev_fd:
+ close(vfio_dev_fd);
+ return -1;
+}
+
+/*
+ * map the PCI resources of a PCI device in virtual memory (VFIO version).
+ * primary and secondary processes follow almost exactly the same path
+ */
+int
+pci_vfio_map_resource(struct rte_pci_device *dev)
+{
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ return pci_vfio_map_resource_primary(dev);
+ else
+ return pci_vfio_map_resource_secondary(dev);
+}
+
+static struct mapped_pci_resource *
+find_and_unmap_vfio_resource(struct mapped_pci_res_list *vfio_res_list,
+ struct rte_pci_device *dev,
+ const char *pci_addr)
+{
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct pci_map *maps;
+ int i;
+
+ /* Get vfio_res */
+ TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
+ if (rte_pci_addr_cmp(&vfio_res->pci_addr, &dev->addr))
+ continue;
+ break;
+ }
+
+ if (vfio_res == NULL)
+ return vfio_res;
+
+ RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n",
+ pci_addr);
+
+ maps = vfio_res->maps;
+ for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+
+ /*
+ * We do not need to be aware of MSI-X table BAR mappings as
+ * when mapping. Just using current maps array is enough
+ */
+ if (maps[i].addr) {
+ RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n",
+ pci_addr, maps[i].addr);
+ pci_unmap_resource(maps[i].addr, maps[i].size);
+ }
+ }
+
+ return vfio_res;
+}
+
+static int
+pci_vfio_unmap_resource_primary(struct rte_pci_device *dev)
+{
+ char pci_addr[PATH_MAX] = {0};
+ struct rte_pci_addr *loc = &dev->addr;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list;
+ int ret;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ if (close(dev->intr_handle.fd) < 0) {
+ RTE_LOG(INFO, EAL, "Error when closing eventfd file descriptor for %s\n",
+ pci_addr);
+ return -1;
+ }
+
+ if (pci_vfio_set_bus_master(dev->intr_handle.vfio_dev_fd, false)) {
+ RTE_LOG(ERR, EAL, " %s cannot unset bus mastering for PCI device!\n",
+ pci_addr);
+ return -1;
+ }
+
+ ret = rte_vfio_release_device(rte_pci_get_sysfs_path(), pci_addr,
+ dev->intr_handle.vfio_dev_fd);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot release device\n", __func__);
+ return ret;
+ }
+
+ vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+ vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr);
+
+ /* if we haven't found our tailq entry, something's wrong */
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
+ pci_addr);
+ return -1;
+ }
+
+ TAILQ_REMOVE(vfio_res_list, vfio_res, next);
+
+ return 0;
+}
+
+static int
+pci_vfio_unmap_resource_secondary(struct rte_pci_device *dev)
+{
+ char pci_addr[PATH_MAX] = {0};
+ struct rte_pci_addr *loc = &dev->addr;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list;
+ int ret;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ ret = rte_vfio_release_device(rte_pci_get_sysfs_path(), pci_addr,
+ dev->intr_handle.vfio_dev_fd);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot release device\n", __func__);
+ return ret;
+ }
+
+ vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+ vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr);
+
+ /* if we haven't found our tailq entry, something's wrong */
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
+ pci_addr);
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+pci_vfio_unmap_resource(struct rte_pci_device *dev)
+{
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ return pci_vfio_unmap_resource_primary(dev);
+ else
+ return pci_vfio_unmap_resource_secondary(dev);
+}
+
+int
+pci_vfio_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
+{
+ if (bar < VFIO_PCI_BAR0_REGION_INDEX ||
+ bar > VFIO_PCI_BAR5_REGION_INDEX) {
+ RTE_LOG(ERR, EAL, "invalid bar (%d)!\n", bar);
+ return -1;
+ }
+
+ p->dev = dev;
+ p->base = VFIO_GET_REGION_ADDR(bar);
+ return 0;
+}
+
+void
+pci_vfio_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset)
+{
+ const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
+
+ if (pread64(intr_handle->vfio_dev_fd, data,
+ len, p->base + offset) <= 0)
+ RTE_LOG(ERR, EAL,
+ "Can't read from PCI bar (%" PRIu64 ") : offset (%x)\n",
+ VFIO_GET_REGION_IDX(p->base), (int)offset);
+}
+
+void
+pci_vfio_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset)
+{
+ const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
+
+ if (pwrite64(intr_handle->vfio_dev_fd, data,
+ len, p->base + offset) <= 0)
+ RTE_LOG(ERR, EAL,
+ "Can't write to PCI bar (%" PRIu64 ") : offset (%x)\n",
+ VFIO_GET_REGION_IDX(p->base), (int)offset);
+}
+
+int
+pci_vfio_ioport_unmap(struct rte_pci_ioport *p)
+{
+ RTE_SET_USED(p);
+ return -1;
+}
+
+int
+pci_vfio_is_enabled(void)
+{
+ return rte_vfio_is_enabled("vfio_pci");
+}
+#endif
diff --git a/src/spdk/dpdk/drivers/bus/pci/meson.build b/src/spdk/dpdk/drivers/bus/pci/meson.build
new file mode 100644
index 00000000..72939e59
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+deps += ['pci']
+install_headers('rte_bus_pci.h')
+sources = files('pci_common.c', 'pci_common_uio.c')
+if host_machine.system() == 'linux'
+ sources += files('linux/pci.c',
+ 'linux/pci_uio.c',
+ 'linux/pci_vfio.c')
+ includes += include_directories('linux')
+ cflags += ['-D_GNU_SOURCE']
+else
+ sources += files('bsd/pci.c')
+ includes += include_directories('bsd')
+endif
+
+# memseg walk is not part of stable API yet
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/bus/pci/pci_common.c b/src/spdk/dpdk/drivers/bus/pci/pci_common.c
new file mode 100644
index 00000000..7736b3f9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/pci_common.c
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright 2013-2014 6WIND S.A.
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+
+#include <rte_errno.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_string_fns.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+
+#include "private.h"
+
+
+extern struct rte_pci_bus rte_pci_bus;
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+
+const char *rte_pci_get_sysfs_path(void)
+{
+ const char *path = NULL;
+
+ path = getenv("SYSFS_PCI_DEVICES");
+ if (path == NULL)
+ return SYSFS_PCI_DEVICES;
+
+ return path;
+}
+
+static struct rte_devargs *pci_devargs_lookup(struct rte_pci_device *dev)
+{
+ struct rte_devargs *devargs;
+ struct rte_pci_addr addr;
+
+ RTE_EAL_DEVARGS_FOREACH("pci", devargs) {
+ devargs->bus->parse(devargs->name, &addr);
+ if (!rte_pci_addr_cmp(&dev->addr, &addr))
+ return devargs;
+ }
+ return NULL;
+}
+
+void
+pci_name_set(struct rte_pci_device *dev)
+{
+ struct rte_devargs *devargs;
+
+ /* Each device has its internal, canonical name set. */
+ rte_pci_device_name(&dev->addr,
+ dev->name, sizeof(dev->name));
+ devargs = pci_devargs_lookup(dev);
+ dev->device.devargs = devargs;
+ /* In blacklist mode, if the device is not blacklisted, no
+ * rte_devargs exists for it.
+ */
+ if (devargs != NULL)
+ /* If an rte_devargs exists, the generic rte_device uses the
+ * given name as its name.
+ */
+ dev->device.name = dev->device.devargs->name;
+ else
+ /* Otherwise, it uses the internal, canonical form. */
+ dev->device.name = dev->name;
+}
+
+/*
+ * Match the PCI Driver and Device using the ID Table
+ */
+int
+rte_pci_match(const struct rte_pci_driver *pci_drv,
+ const struct rte_pci_device *pci_dev)
+{
+ const struct rte_pci_id *id_table;
+
+ for (id_table = pci_drv->id_table; id_table->vendor_id != 0;
+ id_table++) {
+ /* check if device's identifiers match the driver's ones */
+ if (id_table->vendor_id != pci_dev->id.vendor_id &&
+ id_table->vendor_id != PCI_ANY_ID)
+ continue;
+ if (id_table->device_id != pci_dev->id.device_id &&
+ id_table->device_id != PCI_ANY_ID)
+ continue;
+ if (id_table->subsystem_vendor_id !=
+ pci_dev->id.subsystem_vendor_id &&
+ id_table->subsystem_vendor_id != PCI_ANY_ID)
+ continue;
+ if (id_table->subsystem_device_id !=
+ pci_dev->id.subsystem_device_id &&
+ id_table->subsystem_device_id != PCI_ANY_ID)
+ continue;
+ if (id_table->class_id != pci_dev->id.class_id &&
+ id_table->class_id != RTE_CLASS_ANY_ID)
+ continue;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * If vendor/device ID match, call the probe() function of the
+ * driver.
+ */
+static int
+rte_pci_probe_one_driver(struct rte_pci_driver *dr,
+ struct rte_pci_device *dev)
+{
+ int ret;
+ struct rte_pci_addr *loc;
+
+ if ((dr == NULL) || (dev == NULL))
+ return -EINVAL;
+
+ loc = &dev->addr;
+
+ /* The device is not blacklisted; Check if driver supports it */
+ if (!rte_pci_match(dr, dev))
+ /* Match of device and driver failed */
+ return 1;
+
+ RTE_LOG(INFO, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
+ loc->domain, loc->bus, loc->devid, loc->function,
+ dev->device.numa_node);
+
+ /* no initialization when blacklisted, return without error */
+ if (dev->device.devargs != NULL &&
+ dev->device.devargs->policy ==
+ RTE_DEV_BLACKLISTED) {
+ RTE_LOG(INFO, EAL, " Device is blacklisted, not"
+ " initializing\n");
+ return 1;
+ }
+
+ if (dev->device.numa_node < 0) {
+ RTE_LOG(WARNING, EAL, " Invalid NUMA socket, default to 0\n");
+ dev->device.numa_node = 0;
+ }
+
+ RTE_LOG(INFO, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
+ dev->id.device_id, dr->driver.name);
+
+ /*
+ * reference driver structure
+ * This needs to be before rte_pci_map_device(), as it enables to use
+ * driver flags for adjusting configuration.
+ */
+ dev->driver = dr;
+ dev->device.driver = &dr->driver;
+
+ if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
+ /* map resources for devices that use igb_uio */
+ ret = rte_pci_map_device(dev);
+ if (ret != 0) {
+ dev->driver = NULL;
+ dev->device.driver = NULL;
+ return ret;
+ }
+ }
+
+ /* call the driver probe() function */
+ ret = dr->probe(dr, dev);
+ if (ret) {
+ dev->driver = NULL;
+ dev->device.driver = NULL;
+ if ((dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) &&
+ /* Don't unmap if device is unsupported and
+ * driver needs mapped resources.
+ */
+ !(ret > 0 &&
+ (dr->drv_flags & RTE_PCI_DRV_KEEP_MAPPED_RES)))
+ rte_pci_unmap_device(dev);
+ }
+
+ return ret;
+}
+
+/*
+ * If vendor/device ID match, call the remove() function of the
+ * driver.
+ */
+static int
+rte_pci_detach_dev(struct rte_pci_device *dev)
+{
+ struct rte_pci_addr *loc;
+ struct rte_pci_driver *dr;
+ int ret = 0;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ dr = dev->driver;
+ loc = &dev->addr;
+
+ RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
+ loc->domain, loc->bus, loc->devid,
+ loc->function, dev->device.numa_node);
+
+ RTE_LOG(DEBUG, EAL, " remove driver: %x:%x %s\n", dev->id.vendor_id,
+ dev->id.device_id, dr->driver.name);
+
+ if (dr->remove) {
+ ret = dr->remove(dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* clear driver structure */
+ dev->driver = NULL;
+
+ if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)
+ /* unmap resources for devices that use igb_uio */
+ rte_pci_unmap_device(dev);
+
+ return 0;
+}
+
+/*
+ * If vendor/device ID match, call the probe() function of all
+ * registered driver for the given device. Return -1 if initialization
+ * failed, return 1 if no driver is found for this device.
+ */
+static int
+pci_probe_all_drivers(struct rte_pci_device *dev)
+{
+ struct rte_pci_driver *dr = NULL;
+ int rc = 0;
+
+ if (dev == NULL)
+ return -1;
+
+ /* Check if a driver is already loaded */
+ if (dev->driver != NULL)
+ return 0;
+
+ FOREACH_DRIVER_ON_PCIBUS(dr) {
+ rc = rte_pci_probe_one_driver(dr, dev);
+ if (rc < 0)
+ /* negative value is an error */
+ return -1;
+ if (rc > 0)
+ /* positive value means driver doesn't support it */
+ continue;
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Scan the content of the PCI bus, and call the probe() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+int
+rte_pci_probe(void)
+{
+ struct rte_pci_device *dev = NULL;
+ size_t probed = 0, failed = 0;
+ struct rte_devargs *devargs;
+ int probe_all = 0;
+ int ret = 0;
+
+ if (rte_pci_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST)
+ probe_all = 1;
+
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ probed++;
+
+ devargs = dev->device.devargs;
+ /* probe all or only whitelisted devices */
+ if (probe_all)
+ ret = pci_probe_all_drivers(dev);
+ else if (devargs != NULL &&
+ devargs->policy == RTE_DEV_WHITELISTED)
+ ret = pci_probe_all_drivers(dev);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Requested device " PCI_PRI_FMT
+ " cannot be used\n", dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ rte_errno = errno;
+ failed++;
+ ret = 0;
+ }
+ }
+
+ return (probed && probed == failed) ? -1 : 0;
+}
+
+/* dump one device */
+static int
+pci_dump_one_device(FILE *f, struct rte_pci_device *dev)
+{
+ int i;
+
+ fprintf(f, PCI_PRI_FMT, dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ fprintf(f, " - vendor:%x device:%x\n", dev->id.vendor_id,
+ dev->id.device_id);
+
+ for (i = 0; i != sizeof(dev->mem_resource) /
+ sizeof(dev->mem_resource[0]); i++) {
+ fprintf(f, " %16.16"PRIx64" %16.16"PRIx64"\n",
+ dev->mem_resource[i].phys_addr,
+ dev->mem_resource[i].len);
+ }
+ return 0;
+}
+
+/* dump devices on the bus */
+void
+rte_pci_dump(FILE *f)
+{
+ struct rte_pci_device *dev = NULL;
+
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ pci_dump_one_device(f, dev);
+ }
+}
+
+static int
+pci_parse(const char *name, void *addr)
+{
+ struct rte_pci_addr *out = addr;
+ struct rte_pci_addr pci_addr;
+ bool parse;
+
+ parse = (rte_pci_addr_parse(name, &pci_addr) == 0);
+ if (parse && addr != NULL)
+ *out = pci_addr;
+ return parse == false;
+}
+
+/* register a driver */
+void
+rte_pci_register(struct rte_pci_driver *driver)
+{
+ TAILQ_INSERT_TAIL(&rte_pci_bus.driver_list, driver, next);
+ driver->bus = &rte_pci_bus;
+}
+
+/* unregister a driver */
+void
+rte_pci_unregister(struct rte_pci_driver *driver)
+{
+ TAILQ_REMOVE(&rte_pci_bus.driver_list, driver, next);
+ driver->bus = NULL;
+}
+
+/* Add a device to PCI bus */
+void
+rte_pci_add_device(struct rte_pci_device *pci_dev)
+{
+ TAILQ_INSERT_TAIL(&rte_pci_bus.device_list, pci_dev, next);
+}
+
+/* Insert a device into a predefined position in PCI bus */
+void
+rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
+ struct rte_pci_device *new_pci_dev)
+{
+ TAILQ_INSERT_BEFORE(exist_pci_dev, new_pci_dev, next);
+}
+
+/* Remove a device from PCI bus */
+static void
+rte_pci_remove_device(struct rte_pci_device *pci_dev)
+{
+ TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next);
+}
+
+static struct rte_device *
+pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ const struct rte_pci_device *pstart;
+ struct rte_pci_device *pdev;
+
+ if (start != NULL) {
+ pstart = RTE_DEV_TO_PCI_CONST(start);
+ pdev = TAILQ_NEXT(pstart, next);
+ } else {
+ pdev = TAILQ_FIRST(&rte_pci_bus.device_list);
+ }
+ while (pdev != NULL) {
+ if (cmp(&pdev->device, data) == 0)
+ return &pdev->device;
+ pdev = TAILQ_NEXT(pdev, next);
+ }
+ return NULL;
+}
+
+static int
+pci_plug(struct rte_device *dev)
+{
+ return pci_probe_all_drivers(RTE_DEV_TO_PCI(dev));
+}
+
+static int
+pci_unplug(struct rte_device *dev)
+{
+ struct rte_pci_device *pdev;
+ int ret;
+
+ pdev = RTE_DEV_TO_PCI(dev);
+ ret = rte_pci_detach_dev(pdev);
+ if (ret == 0) {
+ rte_pci_remove_device(pdev);
+ free(pdev);
+ }
+ return ret;
+}
+
+struct rte_pci_bus rte_pci_bus = {
+ .bus = {
+ .scan = rte_pci_scan,
+ .probe = rte_pci_probe,
+ .find_device = pci_find_device,
+ .plug = pci_plug,
+ .unplug = pci_unplug,
+ .parse = pci_parse,
+ .get_iommu_class = rte_pci_get_iommu_class,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
+};
+
+RTE_REGISTER_BUS(pci, rte_pci_bus.bus);
diff --git a/src/spdk/dpdk/drivers/bus/pci/pci_common_uio.c b/src/spdk/dpdk/drivers/bus/pci/pci_common_uio.c
new file mode 100644
index 00000000..c37fcacf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/pci_common_uio.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#include <rte_eal.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "private.h"
+
+static struct rte_tailq_elem rte_uio_tailq = {
+ .name = "UIO_RESOURCE_LIST",
+};
+EAL_REGISTER_TAILQ(rte_uio_tailq)
+
+static int
+pci_uio_map_secondary(struct rte_pci_device *dev)
+{
+ int fd, i, j;
+ struct mapped_pci_resource *uio_res;
+ struct mapped_pci_res_list *uio_res_list =
+ RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+
+ /* skip this element if it doesn't match our PCI address */
+ if (rte_pci_addr_cmp(&uio_res->pci_addr, &dev->addr))
+ continue;
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ /*
+ * open devname, to mmap it
+ */
+ fd = open(uio_res->maps[i].path, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ uio_res->maps[i].path, strerror(errno));
+ return -1;
+ }
+
+ void *mapaddr = pci_map_resource(uio_res->maps[i].addr,
+ fd, (off_t)uio_res->maps[i].offset,
+ (size_t)uio_res->maps[i].size, 0);
+ /* fd is not needed in slave process, close it */
+ close(fd);
+ if (mapaddr != uio_res->maps[i].addr) {
+ RTE_LOG(ERR, EAL,
+ "Cannot mmap device resource file %s to address: %p\n",
+ uio_res->maps[i].path,
+ uio_res->maps[i].addr);
+ if (mapaddr != MAP_FAILED) {
+ /* unmap addrs correctly mapped */
+ for (j = 0; j < i; j++)
+ pci_unmap_resource(
+ uio_res->maps[j].addr,
+ (size_t)uio_res->maps[j].size);
+ /* unmap addr wrongly mapped */
+ pci_unmap_resource(mapaddr,
+ (size_t)uio_res->maps[i].size);
+ }
+ return -1;
+ }
+ dev->mem_resource[i].addr = mapaddr;
+ }
+ return 0;
+ }
+
+ RTE_LOG(ERR, EAL, "Cannot find resource for device\n");
+ return 1;
+}
+
+/* map the PCI resource of a PCI device in virtual memory */
+int
+pci_uio_map_resource(struct rte_pci_device *dev)
+{
+ int i, map_idx = 0, ret;
+ uint64_t phaddr;
+ struct mapped_pci_resource *uio_res = NULL;
+ struct mapped_pci_res_list *uio_res_list =
+ RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.uio_cfg_fd = -1;
+
+ /* secondary processes - use already recorded details */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return pci_uio_map_secondary(dev);
+
+ /* allocate uio resource */
+ ret = pci_uio_alloc_resource(dev, &uio_res);
+ if (ret)
+ return ret;
+
+ /* Map all BARs */
+ for (i = 0; i != PCI_MAX_RESOURCE; i++) {
+ /* skip empty BAR */
+ phaddr = dev->mem_resource[i].phys_addr;
+ if (phaddr == 0)
+ continue;
+
+ ret = pci_uio_map_resource_by_index(dev, i,
+ uio_res, map_idx);
+ if (ret)
+ goto error;
+
+ map_idx++;
+ }
+
+ uio_res->nb_maps = map_idx;
+
+ TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);
+
+ return 0;
+error:
+ for (i = 0; i < map_idx; i++) {
+ pci_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ rte_free(uio_res->maps[i].path);
+ }
+ pci_uio_free_resource(dev, uio_res);
+ return -1;
+}
+
+static void
+pci_uio_unmap(struct mapped_pci_resource *uio_res)
+{
+ int i;
+
+ if (uio_res == NULL)
+ return;
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ pci_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(uio_res->maps[i].path);
+ }
+}
+
+static struct mapped_pci_resource *
+pci_uio_find_resource(struct rte_pci_device *dev)
+{
+ struct mapped_pci_resource *uio_res;
+ struct mapped_pci_res_list *uio_res_list =
+ RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+
+ if (dev == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+
+ /* skip this element if it doesn't match our PCI address */
+ if (!rte_pci_addr_cmp(&uio_res->pci_addr, &dev->addr))
+ return uio_res;
+ }
+ return NULL;
+}
+
+/* unmap the PCI resource of a PCI device in virtual memory */
+void
+pci_uio_unmap_resource(struct rte_pci_device *dev)
+{
+ struct mapped_pci_resource *uio_res;
+ struct mapped_pci_res_list *uio_res_list =
+ RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+
+ if (dev == NULL)
+ return;
+
+ /* find an entry for the device */
+ uio_res = pci_uio_find_resource(dev);
+ if (uio_res == NULL)
+ return;
+
+ /* secondary processes - just free maps */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return pci_uio_unmap(uio_res);
+
+ TAILQ_REMOVE(uio_res_list, uio_res, next);
+
+ /* unmap all resources */
+ pci_uio_unmap(uio_res);
+
+ /* free uio resource */
+ rte_free(uio_res);
+
+ /* close fd if in primary process */
+ close(dev->intr_handle.fd);
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+}
diff --git a/src/spdk/dpdk/drivers/bus/pci/private.h b/src/spdk/dpdk/drivers/bus/pci/private.h
new file mode 100644
index 00000000..8ddd03e1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/private.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 6WIND S.A.
+ */
+
+#ifndef _PCI_PRIVATE_H_
+#define _PCI_PRIVATE_H_
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+struct rte_pci_driver;
+struct rte_pci_device;
+
+/**
+ * Probe the PCI bus
+ *
+ * @return
+ * - 0 on success.
+ * - !0 on error.
+ */
+int
+rte_pci_probe(void);
+
+/**
+ * Scan the content of the PCI bus, and the devices in the devices
+ * list
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_pci_scan(void);
+
+/**
+ * Find the name of a PCI device.
+ */
+void
+pci_name_set(struct rte_pci_device *dev);
+
+/**
+ * Add a PCI device to the PCI Bus (append to PCI Device list). This function
+ * also updates the bus references of the PCI Device (and the generic device
+ * object embedded within.
+ *
+ * @param pci_dev
+ * PCI device to add
+ * @return void
+ */
+void rte_pci_add_device(struct rte_pci_device *pci_dev);
+
+/**
+ * Insert a PCI device in the PCI Bus at a particular location in the device
+ * list. It also updates the PCI Bus reference of the new devices to be
+ * inserted.
+ *
+ * @param exist_pci_dev
+ * Existing PCI device in PCI Bus
+ * @param new_pci_dev
+ * PCI device to be added before exist_pci_dev
+ * @return void
+ */
+void rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
+ struct rte_pci_device *new_pci_dev);
+
+/**
+ * Update a pci device object by asking the kernel for the latest information.
+ *
+ * This function is private to EAL.
+ *
+ * @param addr
+ * The PCI Bus-Device-Function address to look for
+ * @return
+ * - 0 on success.
+ * - negative on error.
+ */
+int pci_update_device(const struct rte_pci_addr *addr);
+
+/**
+ * Map the PCI resource of a PCI device in virtual memory
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_uio_map_resource(struct rte_pci_device *dev);
+
+/**
+ * Unmap the PCI resource of a PCI device
+ *
+ * This function is private to EAL.
+ */
+void pci_uio_unmap_resource(struct rte_pci_device *dev);
+
+/**
+ * Allocate uio resource for PCI device
+ *
+ * This function is private to EAL.
+ *
+ * @param dev
+ * PCI device to allocate uio resource
+ * @param uio_res
+ * Pointer to uio resource.
+ * If the function returns 0, the pointer will be filled.
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_uio_alloc_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource **uio_res);
+
+/**
+ * Free uio resource for PCI device
+ *
+ * This function is private to EAL.
+ *
+ * @param dev
+ * PCI device to free uio resource
+ * @param uio_res
+ * Pointer to uio resource.
+ */
+void pci_uio_free_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource *uio_res);
+
+/**
+ * Map device memory to uio resource
+ *
+ * This function is private to EAL.
+ *
+ * @param dev
+ * PCI device that has memory information.
+ * @param res_idx
+ * Memory resource index of the PCI device.
+ * @param uio_res
+ * uio resource that will keep mapping information.
+ * @param map_idx
+ * Mapping information index of the uio resource.
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
+ struct mapped_pci_resource *uio_res, int map_idx);
+
+/*
+ * Match the PCI Driver and Device using the ID Table
+ *
+ * @param pci_drv
+ * PCI driver from which ID table would be extracted
+ * @param pci_dev
+ * PCI device to match against the driver
+ * @return
+ * 1 for successful match
+ * 0 for unsuccessful match
+ */
+int
+rte_pci_match(const struct rte_pci_driver *pci_drv,
+ const struct rte_pci_device *pci_dev);
+
+/**
+ * Get iommu class of PCI devices on the bus.
+ * And return their preferred iova mapping mode.
+ *
+ * @return
+ * - enum rte_iova_mode.
+ */
+enum rte_iova_mode
+rte_pci_get_iommu_class(void);
+
+#endif /* _PCI_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/pci/rte_bus_pci.h b/src/spdk/dpdk/drivers/bus/pci/rte_bus_pci.h
new file mode 100644
index 00000000..0d1955ff
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/rte_bus_pci.h
@@ -0,0 +1,315 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation.
+ * Copyright 2013-2014 6WIND S.A.
+ */
+
+#ifndef _RTE_BUS_PCI_H_
+#define _RTE_BUS_PCI_H_
+
+/**
+ * @file
+ *
+ * RTE PCI Bus Interface
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_dev.h>
+#include <rte_bus.h>
+#include <rte_pci.h>
+
+/** Pathname of PCI devices directory. */
+const char *rte_pci_get_sysfs_path(void);
+
+/* Forward declarations */
+struct rte_pci_device;
+struct rte_pci_driver;
+
+/** List of PCI devices */
+TAILQ_HEAD(rte_pci_device_list, rte_pci_device);
+/** List of PCI drivers */
+TAILQ_HEAD(rte_pci_driver_list, rte_pci_driver);
+
+/* PCI Bus iterators */
+#define FOREACH_DEVICE_ON_PCIBUS(p) \
+ TAILQ_FOREACH(p, &(rte_pci_bus.device_list), next)
+
+#define FOREACH_DRIVER_ON_PCIBUS(p) \
+ TAILQ_FOREACH(p, &(rte_pci_bus.driver_list), next)
+
+struct rte_devargs;
+
+/**
+ * A structure describing a PCI device.
+ */
+struct rte_pci_device {
+ TAILQ_ENTRY(rte_pci_device) next; /**< Next probed PCI device. */
+ struct rte_device device; /**< Inherit core device */
+ struct rte_pci_addr addr; /**< PCI location. */
+ struct rte_pci_id id; /**< PCI ID. */
+ struct rte_mem_resource mem_resource[PCI_MAX_RESOURCE];
+ /**< PCI Memory Resource */
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_pci_driver *driver; /**< Associated driver */
+ uint16_t max_vfs; /**< sriov enable if not zero */
+ enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
+ char name[PCI_PRI_STR_SIZE+1]; /**< PCI location (ASCII) */
+};
+
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_pci_device.
+ */
+#define RTE_DEV_TO_PCI(ptr) container_of(ptr, struct rte_pci_device, device)
+
+#define RTE_DEV_TO_PCI_CONST(ptr) \
+ container_of(ptr, const struct rte_pci_device, device)
+
+#define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
+
+/** Any PCI device identifier (vendor, device, ...) */
+#define PCI_ANY_ID (0xffff)
+#define RTE_CLASS_ANY_ID (0xffffff)
+
+#ifdef __cplusplus
+/** C++ macro used to help building up tables of device IDs */
+#define RTE_PCI_DEVICE(vend, dev) \
+ RTE_CLASS_ANY_ID, \
+ (vend), \
+ (dev), \
+ PCI_ANY_ID, \
+ PCI_ANY_ID
+#else
+/** Macro used to help building up tables of device IDs */
+#define RTE_PCI_DEVICE(vend, dev) \
+ .class_id = RTE_CLASS_ANY_ID, \
+ .vendor_id = (vend), \
+ .device_id = (dev), \
+ .subsystem_vendor_id = PCI_ANY_ID, \
+ .subsystem_device_id = PCI_ANY_ID
+#endif
+
+/**
+ * Initialisation function for the driver called during PCI probing.
+ */
+typedef int (pci_probe_t)(struct rte_pci_driver *, struct rte_pci_device *);
+
+/**
+ * Uninitialisation function for the driver called during hotplugging.
+ */
+typedef int (pci_remove_t)(struct rte_pci_device *);
+
+/**
+ * A structure describing a PCI driver.
+ */
+struct rte_pci_driver {
+ TAILQ_ENTRY(rte_pci_driver) next; /**< Next in list. */
+ struct rte_driver driver; /**< Inherit core driver. */
+ struct rte_pci_bus *bus; /**< PCI bus reference. */
+ pci_probe_t *probe; /**< Device Probe function. */
+ pci_remove_t *remove; /**< Device Remove function. */
+ const struct rte_pci_id *id_table; /**< ID table, NULL terminated. */
+ uint32_t drv_flags; /**< Flags contolling handling of device. */
+};
+
+/**
+ * Structure describing the PCI bus
+ */
+struct rte_pci_bus {
+ struct rte_bus bus; /**< Inherit the generic class */
+ struct rte_pci_device_list device_list; /**< List of PCI devices */
+ struct rte_pci_driver_list driver_list; /**< List of PCI drivers */
+};
+
+/** Device needs PCI BAR mapping (done with either IGB_UIO or VFIO) */
+#define RTE_PCI_DRV_NEED_MAPPING 0x0001
+/** Device needs PCI BAR mapping with enabled write combining (wc) */
+#define RTE_PCI_DRV_WC_ACTIVATE 0x0002
+/** Device driver supports link state interrupt */
+#define RTE_PCI_DRV_INTR_LSC 0x0008
+/** Device driver supports device removal interrupt */
+#define RTE_PCI_DRV_INTR_RMV 0x0010
+/** Device driver needs to keep mapped resources if unsupported dev detected */
+#define RTE_PCI_DRV_KEEP_MAPPED_RES 0x0020
+/** Device driver supports IOVA as VA */
+#define RTE_PCI_DRV_IOVA_AS_VA 0X0040
+
+/**
+ * Map the PCI device resources in user space virtual memory address
+ *
+ * Note that driver should not call this function when flag
+ * RTE_PCI_DRV_NEED_MAPPING is set, as EAL will do that for
+ * you when it's on.
+ *
+ * @param dev
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ *
+ * @return
+ * 0 on success, negative on error and positive if no driver
+ * is found for the device.
+ */
+int rte_pci_map_device(struct rte_pci_device *dev);
+
+/**
+ * Unmap this device
+ *
+ * @param dev
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ */
+void rte_pci_unmap_device(struct rte_pci_device *dev);
+
+/**
+ * Dump the content of the PCI bus.
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_pci_dump(FILE *f);
+
+/**
+ * Register a PCI driver.
+ *
+ * @param driver
+ * A pointer to a rte_pci_driver structure describing the driver
+ * to be registered.
+ */
+void rte_pci_register(struct rte_pci_driver *driver);
+
+/** Helper for PCI device registration from driver (eth, crypto) instance */
+#define RTE_PMD_REGISTER_PCI(nm, pci_drv) \
+RTE_INIT(pciinitfn_ ##nm) \
+{\
+ (pci_drv).driver.name = RTE_STR(nm);\
+ rte_pci_register(&pci_drv); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+/**
+ * Unregister a PCI driver.
+ *
+ * @param driver
+ * A pointer to a rte_pci_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_pci_unregister(struct rte_pci_driver *driver);
+
+/**
+ * Read PCI config space.
+ *
+ * @param device
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ * @param buf
+ * A data buffer where the bytes should be read into
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into PCI config space
+ */
+int rte_pci_read_config(const struct rte_pci_device *device,
+ void *buf, size_t len, off_t offset);
+
+/**
+ * Write PCI config space.
+ *
+ * @param device
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ * @param buf
+ * A data buffer containing the bytes should be written
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into PCI config space
+ */
+int rte_pci_write_config(const struct rte_pci_device *device,
+ const void *buf, size_t len, off_t offset);
+
+/**
+ * A structure used to access io resources for a pci device.
+ * rte_pci_ioport is arch, os, driver specific, and should not be used outside
+ * of pci ioport api.
+ */
+struct rte_pci_ioport {
+ struct rte_pci_device *dev;
+ uint64_t base;
+ uint64_t len; /* only filled for memory mapped ports */
+};
+
+/**
+ * Initialize a rte_pci_ioport object for a pci device io resource.
+ *
+ * This object is then used to gain access to those io resources (see below).
+ *
+ * @param dev
+ * A pointer to a rte_pci_device structure describing the device
+ * to use.
+ * @param bar
+ * Index of the io pci resource we want to access.
+ * @param p
+ * The rte_pci_ioport object to be initialized.
+ * @return
+ * 0 on success, negative on error.
+ */
+int rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p);
+
+/**
+ * Release any resources used in a rte_pci_ioport object.
+ *
+ * @param p
+ * The rte_pci_ioport object to be uninitialized.
+ * @return
+ * 0 on success, negative on error.
+ */
+int rte_pci_ioport_unmap(struct rte_pci_ioport *p);
+
+/**
+ * Read from a io pci resource.
+ *
+ * @param p
+ * The rte_pci_ioport object from which we want to read.
+ * @param data
+ * A data buffer where the bytes should be read into
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into the pci io resource.
+ */
+void rte_pci_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset);
+
+/**
+ * Write to a io pci resource.
+ *
+ * @param p
+ * The rte_pci_ioport object to which we want to write.
+ * @param data
+ * A data buffer where the bytes should be read into
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into the pci io resource.
+ */
+void rte_pci_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BUS_PCI_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/pci/rte_bus_pci_version.map b/src/spdk/dpdk/drivers/bus/pci/rte_bus_pci_version.map
new file mode 100644
index 00000000..27e9c4f1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/rte_bus_pci_version.map
@@ -0,0 +1,18 @@
+DPDK_17.11 {
+ global:
+
+ rte_pci_dump;
+ rte_pci_get_sysfs_path;
+ rte_pci_ioport_map;
+ rte_pci_ioport_read;
+ rte_pci_ioport_unmap;
+ rte_pci_ioport_write;
+ rte_pci_map_device;
+ rte_pci_read_config;
+ rte_pci_register;
+ rte_pci_unmap_device;
+ rte_pci_unregister;
+ rte_pci_write_config;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/bus/vdev/Makefile b/src/spdk/dpdk/drivers/bus/vdev/Makefile
new file mode 100644
index 00000000..bd0bb895
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vdev/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_bus_vdev.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# versioning export map
+EXPORT_MAP := rte_bus_vdev_version.map
+
+# library version
+LIBABIVER := 1
+
+SRCS-y += vdev.c
+
+LDLIBS += -lrte_eal
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_bus_vdev.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/vdev/meson.build b/src/spdk/dpdk/drivers/bus/vdev/meson.build
new file mode 100644
index 00000000..2ee648b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vdev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('vdev.c')
+install_headers('rte_bus_vdev.h')
+
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev.h b/src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev.h
new file mode 100644
index 00000000..9ae3eaae
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
+ */
+
+#ifndef RTE_VDEV_H
+#define RTE_VDEV_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/queue.h>
+#include <rte_dev.h>
+#include <rte_devargs.h>
+
+struct rte_vdev_device {
+ TAILQ_ENTRY(rte_vdev_device) next; /**< Next attached vdev */
+ struct rte_device device; /**< Inherit core device */
+};
+
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_vdev_device.
+ */
+#define RTE_DEV_TO_VDEV(ptr) \
+ container_of(ptr, struct rte_vdev_device, device)
+
+#define RTE_DEV_TO_VDEV_CONST(ptr) \
+ container_of(ptr, const struct rte_vdev_device, device)
+
+static inline const char *
+rte_vdev_device_name(const struct rte_vdev_device *dev)
+{
+ if (dev && dev->device.name)
+ return dev->device.name;
+ return NULL;
+}
+
+static inline const char *
+rte_vdev_device_args(const struct rte_vdev_device *dev)
+{
+ if (dev && dev->device.devargs)
+ return dev->device.devargs->args;
+ return "";
+}
+
+/** Double linked list of virtual device drivers. */
+TAILQ_HEAD(vdev_driver_list, rte_vdev_driver);
+
+/**
+ * Probe function called for each virtual device driver once.
+ */
+typedef int (rte_vdev_probe_t)(struct rte_vdev_device *dev);
+
+/**
+ * Remove function called for each virtual device driver once.
+ */
+typedef int (rte_vdev_remove_t)(struct rte_vdev_device *dev);
+
+/**
+ * A virtual device driver abstraction.
+ */
+struct rte_vdev_driver {
+ TAILQ_ENTRY(rte_vdev_driver) next; /**< Next in list. */
+ struct rte_driver driver; /**< Inherited general driver. */
+ rte_vdev_probe_t *probe; /**< Virtual device probe function. */
+ rte_vdev_remove_t *remove; /**< Virtual device remove function. */
+};
+
+/**
+ * Register a virtual device driver.
+ *
+ * @param driver
+ * A pointer to a rte_vdev_driver structure describing the driver
+ * to be registered.
+ */
+void rte_vdev_register(struct rte_vdev_driver *driver);
+
+/**
+ * Unregister a virtual device driver.
+ *
+ * @param driver
+ * A pointer to a rte_vdev_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_vdev_unregister(struct rte_vdev_driver *driver);
+
+#define RTE_PMD_REGISTER_VDEV(nm, vdrv)\
+static const char *vdrvinit_ ## nm ## _alias;\
+RTE_INIT(vdrvinitfn_ ##vdrv)\
+{\
+ (vdrv).driver.name = RTE_STR(nm);\
+ (vdrv).driver.alias = vdrvinit_ ## nm ## _alias;\
+ rte_vdev_register(&vdrv);\
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#define RTE_PMD_REGISTER_ALIAS(nm, alias)\
+static const char *vdrvinit_ ## nm ## _alias = RTE_STR(alias)
+
+typedef void (*rte_vdev_scan_callback)(void *user_arg);
+
+/**
+ * Add a callback to be called on vdev scan
+ * before reading the devargs list.
+ *
+ * This function cannot be called in a scan callback
+ * because of deadlock.
+ *
+ * @param callback
+ * The function to be called which can update the devargs list.
+ * @param user_arg
+ * An opaque pointer passed to callback.
+ * @return
+ * 0 on success, negative on error
+ */
+int
+rte_vdev_add_custom_scan(rte_vdev_scan_callback callback, void *user_arg);
+
+/**
+ * Remove a registered scan callback.
+ *
+ * This function cannot be called in a scan callback
+ * because of deadlock.
+ *
+ * @param callback
+ * The registered function to be removed.
+ * @param user_arg
+ * The associated opaque pointer or (void*)-1 for any.
+ * @return
+ * 0 on success
+ */
+int
+rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg);
+
+/**
+ * Initialize a driver specified by name.
+ *
+ * @param name
+ * The pointer to a driver name to be initialized.
+ * @param args
+ * The pointer to arguments used by driver initialization.
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_vdev_init(const char *name, const char *args);
+
+/**
+ * Uninitalize a driver specified by name.
+ *
+ * @param name
+ * The pointer to a driver name to be initialized.
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_vdev_uninit(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev_version.map b/src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev_version.map
new file mode 100644
index 00000000..590cf9b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev_version.map
@@ -0,0 +1,18 @@
+DPDK_17.11 {
+ global:
+
+ rte_vdev_init;
+ rte_vdev_register;
+ rte_vdev_uninit;
+ rte_vdev_unregister;
+
+ local: *;
+};
+
+DPDK_18.02 {
+ global:
+
+ rte_vdev_add_custom_scan;
+ rte_vdev_remove_custom_scan;
+
+} DPDK_17.11;
diff --git a/src/spdk/dpdk/drivers/bus/vdev/vdev.c b/src/spdk/dpdk/drivers/bus/vdev/vdev.c
new file mode 100644
index 00000000..1cdffd43
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vdev/vdev.c
@@ -0,0 +1,549 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_bus.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+
+#include "rte_bus_vdev.h"
+#include "vdev_logs.h"
+
+#define VDEV_MP_KEY "bus_vdev_mp"
+
+int vdev_logtype_bus;
+
+/* Forward declare to access virtual bus name */
+static struct rte_bus rte_vdev_bus;
+
+/** Double linked list of virtual device drivers. */
+TAILQ_HEAD(vdev_device_list, rte_vdev_device);
+
+static struct vdev_device_list vdev_device_list =
+ TAILQ_HEAD_INITIALIZER(vdev_device_list);
+/* The lock needs to be recursive because a vdev can manage another vdev. */
+static rte_spinlock_recursive_t vdev_device_list_lock =
+ RTE_SPINLOCK_RECURSIVE_INITIALIZER;
+
+struct vdev_driver_list vdev_driver_list =
+ TAILQ_HEAD_INITIALIZER(vdev_driver_list);
+
+struct vdev_custom_scan {
+ TAILQ_ENTRY(vdev_custom_scan) next;
+ rte_vdev_scan_callback callback;
+ void *user_arg;
+};
+TAILQ_HEAD(vdev_custom_scans, vdev_custom_scan);
+static struct vdev_custom_scans vdev_custom_scans =
+ TAILQ_HEAD_INITIALIZER(vdev_custom_scans);
+static rte_spinlock_t vdev_custom_scan_lock = RTE_SPINLOCK_INITIALIZER;
+
+/* register a driver */
+void
+rte_vdev_register(struct rte_vdev_driver *driver)
+{
+ TAILQ_INSERT_TAIL(&vdev_driver_list, driver, next);
+}
+
+/* unregister a driver */
+void
+rte_vdev_unregister(struct rte_vdev_driver *driver)
+{
+ TAILQ_REMOVE(&vdev_driver_list, driver, next);
+}
+
+int
+rte_vdev_add_custom_scan(rte_vdev_scan_callback callback, void *user_arg)
+{
+ struct vdev_custom_scan *custom_scan;
+
+ rte_spinlock_lock(&vdev_custom_scan_lock);
+
+ /* check if already registered */
+ TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) {
+ if (custom_scan->callback == callback &&
+ custom_scan->user_arg == user_arg)
+ break;
+ }
+
+ if (custom_scan == NULL) {
+ custom_scan = malloc(sizeof(struct vdev_custom_scan));
+ if (custom_scan != NULL) {
+ custom_scan->callback = callback;
+ custom_scan->user_arg = user_arg;
+ TAILQ_INSERT_TAIL(&vdev_custom_scans, custom_scan, next);
+ }
+ }
+
+ rte_spinlock_unlock(&vdev_custom_scan_lock);
+
+ return (custom_scan == NULL) ? -1 : 0;
+}
+
+int
+rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg)
+{
+ struct vdev_custom_scan *custom_scan, *tmp_scan;
+
+ rte_spinlock_lock(&vdev_custom_scan_lock);
+ TAILQ_FOREACH_SAFE(custom_scan, &vdev_custom_scans, next, tmp_scan) {
+ if (custom_scan->callback != callback ||
+ (custom_scan->user_arg != (void *)-1 &&
+ custom_scan->user_arg != user_arg))
+ continue;
+ TAILQ_REMOVE(&vdev_custom_scans, custom_scan, next);
+ free(custom_scan);
+ }
+ rte_spinlock_unlock(&vdev_custom_scan_lock);
+
+ return 0;
+}
+
+static int
+vdev_parse(const char *name, void *addr)
+{
+ struct rte_vdev_driver **out = addr;
+ struct rte_vdev_driver *driver = NULL;
+
+ TAILQ_FOREACH(driver, &vdev_driver_list, next) {
+ if (strncmp(driver->driver.name, name,
+ strlen(driver->driver.name)) == 0)
+ break;
+ if (driver->driver.alias &&
+ strncmp(driver->driver.alias, name,
+ strlen(driver->driver.alias)) == 0)
+ break;
+ }
+ if (driver != NULL &&
+ addr != NULL)
+ *out = driver;
+ return driver == NULL;
+}
+
+static int
+vdev_probe_all_drivers(struct rte_vdev_device *dev)
+{
+ const char *name;
+ struct rte_vdev_driver *driver;
+ int ret;
+
+ name = rte_vdev_device_name(dev);
+
+ VDEV_LOG(DEBUG, "Search driver %s to probe device %s", name,
+ rte_vdev_device_name(dev));
+
+ if (vdev_parse(name, &driver))
+ return -1;
+ dev->device.driver = &driver->driver;
+ ret = driver->probe(dev);
+ if (ret)
+ dev->device.driver = NULL;
+ return ret;
+}
+
+/* The caller shall be responsible for thread-safe */
+static struct rte_vdev_device *
+find_vdev(const char *name)
+{
+ struct rte_vdev_device *dev;
+
+ if (!name)
+ return NULL;
+
+ TAILQ_FOREACH(dev, &vdev_device_list, next) {
+ const char *devname = rte_vdev_device_name(dev);
+
+ if (!strcmp(devname, name))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static struct rte_devargs *
+alloc_devargs(const char *name, const char *args)
+{
+ struct rte_devargs *devargs;
+ int ret;
+
+ devargs = calloc(1, sizeof(*devargs));
+ if (!devargs)
+ return NULL;
+
+ devargs->bus = &rte_vdev_bus;
+ if (args)
+ devargs->args = strdup(args);
+ else
+ devargs->args = strdup("");
+
+ ret = snprintf(devargs->name, sizeof(devargs->name), "%s", name);
+ if (ret < 0 || ret >= (int)sizeof(devargs->name)) {
+ free(devargs->args);
+ free(devargs);
+ return NULL;
+ }
+
+ return devargs;
+}
+
+static int
+insert_vdev(const char *name, const char *args, struct rte_vdev_device **p_dev)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ devargs = alloc_devargs(name, args);
+ if (!devargs)
+ return -ENOMEM;
+
+ dev = calloc(1, sizeof(*dev));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dev->device.devargs = devargs;
+ dev->device.numa_node = SOCKET_ID_ANY;
+ dev->device.name = devargs->name;
+
+ if (find_vdev(name)) {
+ ret = -EEXIST;
+ goto fail;
+ }
+
+ TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
+ rte_devargs_insert(devargs);
+
+ if (p_dev)
+ *p_dev = dev;
+
+ return 0;
+fail:
+ free(devargs->args);
+ free(devargs);
+ free(dev);
+ return ret;
+}
+
+int
+rte_vdev_init(const char *name, const char *args)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ int ret;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+ ret = insert_vdev(name, args, &dev);
+ if (ret == 0) {
+ ret = vdev_probe_all_drivers(dev);
+ if (ret) {
+ if (ret > 0)
+ VDEV_LOG(ERR, "no driver found for %s", name);
+ /* If fails, remove it from vdev list */
+ devargs = dev->device.devargs;
+ TAILQ_REMOVE(&vdev_device_list, dev, next);
+ rte_devargs_remove(devargs->bus->name, devargs->name);
+ free(dev);
+ }
+ }
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ return ret;
+}
+
+static int
+vdev_remove_driver(struct rte_vdev_device *dev)
+{
+ const char *name = rte_vdev_device_name(dev);
+ const struct rte_vdev_driver *driver;
+
+ if (!dev->device.driver) {
+ VDEV_LOG(DEBUG, "no driver attach to device %s", name);
+ return 1;
+ }
+
+ driver = container_of(dev->device.driver, const struct rte_vdev_driver,
+ driver);
+ return driver->remove(dev);
+}
+
+int
+rte_vdev_uninit(const char *name)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+
+ dev = find_vdev(name);
+ if (!dev) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = vdev_remove_driver(dev);
+ if (ret)
+ goto unlock;
+
+ TAILQ_REMOVE(&vdev_device_list, dev, next);
+ devargs = dev->device.devargs;
+ rte_devargs_remove(devargs->bus->name, devargs->name);
+ free(dev);
+
+unlock:
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ return ret;
+}
+
+struct vdev_param {
+#define VDEV_SCAN_REQ 1
+#define VDEV_SCAN_ONE 2
+#define VDEV_SCAN_REP 3
+ int type;
+ int num;
+ char name[RTE_DEV_NAME_MAX_LEN];
+};
+
+static int vdev_plug(struct rte_device *dev);
+
+/**
+ * This function works as the action for both primary and secondary process
+ * for static vdev discovery when a secondary process is booting.
+ *
+ * step 1, secondary process sends a sync request to ask for vdev in primary;
+ * step 2, primary process receives the request, and send vdevs one by one;
+ * step 3, primary process sends back reply, which indicates how many vdevs
+ * are sent.
+ */
+static int
+vdev_action(const struct rte_mp_msg *mp_msg, const void *peer)
+{
+ struct rte_vdev_device *dev;
+ struct rte_mp_msg mp_resp;
+ struct vdev_param *ou = (struct vdev_param *)&mp_resp.param;
+ const struct vdev_param *in = (const struct vdev_param *)mp_msg->param;
+ const char *devname;
+ int num;
+
+ strlcpy(mp_resp.name, VDEV_MP_KEY, sizeof(mp_resp.name));
+ mp_resp.len_param = sizeof(*ou);
+ mp_resp.num_fds = 0;
+
+ switch (in->type) {
+ case VDEV_SCAN_REQ:
+ ou->type = VDEV_SCAN_ONE;
+ ou->num = 1;
+ num = 0;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+ TAILQ_FOREACH(dev, &vdev_device_list, next) {
+ devname = rte_vdev_device_name(dev);
+ if (strlen(devname) == 0) {
+ VDEV_LOG(INFO, "vdev with no name is not sent");
+ continue;
+ }
+ VDEV_LOG(INFO, "send vdev, %s", devname);
+ strlcpy(ou->name, devname, RTE_DEV_NAME_MAX_LEN);
+ if (rte_mp_sendmsg(&mp_resp) < 0)
+ VDEV_LOG(ERR, "send vdev, %s, failed, %s",
+ devname, strerror(rte_errno));
+ num++;
+ }
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+
+ ou->type = VDEV_SCAN_REP;
+ ou->num = num;
+ if (rte_mp_reply(&mp_resp, peer) < 0)
+ VDEV_LOG(ERR, "Failed to reply a scan request");
+ break;
+ case VDEV_SCAN_ONE:
+ VDEV_LOG(INFO, "receive vdev, %s", in->name);
+ if (insert_vdev(in->name, NULL, NULL) < 0)
+ VDEV_LOG(ERR, "failed to add vdev, %s", in->name);
+ break;
+ default:
+ VDEV_LOG(ERR, "vdev cannot recognize this message");
+ }
+
+ return 0;
+}
+
+static int
+vdev_scan(void)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ struct vdev_custom_scan *custom_scan;
+
+ if (rte_mp_action_register(VDEV_MP_KEY, vdev_action) < 0 &&
+ rte_errno != EEXIST) {
+ VDEV_LOG(ERR, "Failed to add vdev mp action");
+ return -1;
+ }
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ struct rte_mp_msg mp_req, *mp_rep;
+ struct rte_mp_reply mp_reply;
+ struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
+ struct vdev_param *req = (struct vdev_param *)mp_req.param;
+ struct vdev_param *resp;
+
+ strlcpy(mp_req.name, VDEV_MP_KEY, sizeof(mp_req.name));
+ mp_req.len_param = sizeof(*req);
+ mp_req.num_fds = 0;
+ req->type = VDEV_SCAN_REQ;
+ if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
+ mp_reply.nb_received == 1) {
+ mp_rep = &mp_reply.msgs[0];
+ resp = (struct vdev_param *)mp_rep->param;
+ VDEV_LOG(INFO, "Received %d vdevs", resp->num);
+ free(mp_reply.msgs);
+ } else
+ VDEV_LOG(ERR, "Failed to request vdev from primary");
+
+ /* Fall through to allow private vdevs in secondary process */
+ }
+
+ /* call custom scan callbacks if any */
+ rte_spinlock_lock(&vdev_custom_scan_lock);
+ TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) {
+ if (custom_scan->callback != NULL)
+ /*
+ * the callback should update devargs list
+ * by calling rte_devargs_insert() with
+ * devargs.bus = rte_bus_find_by_name("vdev");
+ * devargs.type = RTE_DEVTYPE_VIRTUAL;
+ * devargs.policy = RTE_DEV_WHITELISTED;
+ */
+ custom_scan->callback(custom_scan->user_arg);
+ }
+ rte_spinlock_unlock(&vdev_custom_scan_lock);
+
+ /* for virtual devices we scan the devargs_list populated via cmdline */
+ RTE_EAL_DEVARGS_FOREACH("vdev", devargs) {
+
+ dev = calloc(1, sizeof(*dev));
+ if (!dev)
+ return -1;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+
+ if (find_vdev(devargs->name)) {
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ free(dev);
+ continue;
+ }
+
+ dev->device.devargs = devargs;
+ dev->device.numa_node = SOCKET_ID_ANY;
+ dev->device.name = devargs->name;
+
+ TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
+
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ }
+
+ return 0;
+}
+
+static int
+vdev_probe(void)
+{
+ struct rte_vdev_device *dev;
+ int ret = 0;
+
+ /* call the init function for each virtual device */
+ TAILQ_FOREACH(dev, &vdev_device_list, next) {
+ /* we don't use the vdev lock here, as it's only used in DPDK
+ * initialization; and we don't want to hold such a lock when
+ * we call each driver probe.
+ */
+
+ if (dev->device.driver)
+ continue;
+
+ if (vdev_probe_all_drivers(dev)) {
+ VDEV_LOG(ERR, "failed to initialize %s device",
+ rte_vdev_device_name(dev));
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+static struct rte_device *
+vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ const struct rte_vdev_device *vstart;
+ struct rte_vdev_device *dev;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+ if (start != NULL) {
+ vstart = RTE_DEV_TO_VDEV_CONST(start);
+ dev = TAILQ_NEXT(vstart, next);
+ } else {
+ dev = TAILQ_FIRST(&vdev_device_list);
+ }
+ while (dev != NULL) {
+ if (cmp(&dev->device, data) == 0)
+ break;
+ dev = TAILQ_NEXT(dev, next);
+ }
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+
+ return dev ? &dev->device : NULL;
+}
+
+static int
+vdev_plug(struct rte_device *dev)
+{
+ return vdev_probe_all_drivers(RTE_DEV_TO_VDEV(dev));
+}
+
+static int
+vdev_unplug(struct rte_device *dev)
+{
+ return rte_vdev_uninit(dev->name);
+}
+
+static struct rte_bus rte_vdev_bus = {
+ .scan = vdev_scan,
+ .probe = vdev_probe,
+ .find_device = vdev_find_device,
+ .plug = vdev_plug,
+ .unplug = vdev_unplug,
+ .parse = vdev_parse,
+};
+
+RTE_REGISTER_BUS(vdev, rte_vdev_bus);
+
+RTE_INIT(vdev_init_log)
+{
+ vdev_logtype_bus = rte_log_register("bus.vdev");
+ if (vdev_logtype_bus >= 0)
+ rte_log_set_level(vdev_logtype_bus, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/bus/vdev/vdev_logs.h b/src/spdk/dpdk/drivers/bus/vdev/vdev_logs.h
new file mode 100644
index 00000000..87593741
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vdev/vdev_logs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _VDEV_LOGS_H_
+#define _VDEV_LOGS_H_
+
+#include <rte_log.h>
+
+extern int vdev_logtype_bus;
+
+#define VDEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, vdev_logtype_bus, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#endif /* _VDEV_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/Makefile b/src/spdk/dpdk/drivers/bus/vmbus/Makefile
new file mode 100644
index 00000000..deee9dd1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_bus_vmbus.a
+LIBABIVER := 1
+EXPORT_MAP := rte_bus_vmbus_version.map
+
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),)
+SYSTEM := linux
+endif
+ifneq ($(CONFIG_RTE_EXEC_ENV_BSDAPP),)
+$(error "VMBUS not implemented for BSD yet")
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/vmbus/$(SYSTEM)
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev
+
+include $(RTE_SDK)/drivers/bus/vmbus/$(SYSTEM)/Makefile
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) := $(addprefix $(SYSTEM)/,$(SRCS))
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_channel.c vmbus_bufring.c
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_common_uio.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_VMBUS)-include += rte_bus_vmbus.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_VMBUS)-include += rte_vmbus_reg.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/linux/Makefile b/src/spdk/dpdk/drivers/bus/vmbus/linux/Makefile
new file mode 100644
index 00000000..ef0d30b2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/linux/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+SRCS += vmbus_bus.c vmbus_uio.c
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_bus.c b/src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_bus.c
new file mode 100644
index 00000000..52d6a3c0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_bus.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+
+#include <rte_eal.h>
+#include <rte_uuid.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_devargs.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_bus_vmbus.h>
+
+#include "eal_filesystem.h"
+#include "private.h"
+
+/** Pathname of VMBUS devices directory. */
+#define SYSFS_VMBUS_DEVICES "/sys/bus/vmbus/devices"
+
+extern struct rte_vmbus_bus rte_vmbus_bus;
+
+/* Read sysfs file to get UUID */
+static int
+parse_sysfs_uuid(const char *filename, rte_uuid_t uu)
+{
+ char buf[BUFSIZ];
+ char *cp, *in = buf;
+ FILE *f;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ VMBUS_LOG(ERR, "cannot open sysfs value %s: %s",
+ filename, strerror(errno));
+ return -1;
+ }
+
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ VMBUS_LOG(ERR, "cannot read sysfs value %s",
+ filename);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+
+ cp = strchr(buf, '\n');
+ if (cp)
+ *cp = '\0';
+
+ /* strip { } notation */
+ if (buf[0] == '{') {
+ in = buf + 1;
+ cp = strchr(in, '}');
+ if (cp)
+ *cp = '\0';
+ }
+
+ if (rte_uuid_parse(in, uu) < 0) {
+ VMBUS_LOG(ERR, "%s %s not a valid UUID",
+ filename, buf);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+get_sysfs_string(const char *filename, char *buf, size_t buflen)
+{
+ char *cp;
+ FILE *f;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ VMBUS_LOG(ERR, "cannot open sysfs value %s:%s",
+ filename, strerror(errno));
+ return -1;
+ }
+
+ if (fgets(buf, buflen, f) == NULL) {
+ VMBUS_LOG(ERR, "cannot read sysfs value %s",
+ filename);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+
+ /* remove trailing newline */
+ cp = memchr(buf, '\n', buflen);
+ if (cp)
+ *cp = '\0';
+
+ return 0;
+}
+
+static int
+vmbus_get_uio_dev(const struct rte_vmbus_device *dev,
+ char *dstbuf, size_t buflen)
+{
+ char dirname[PATH_MAX];
+ unsigned int uio_num;
+ struct dirent *e;
+ DIR *dir;
+
+ /* Assume recent kernel where uio is in uio/uioX */
+ snprintf(dirname, sizeof(dirname),
+ SYSFS_VMBUS_DEVICES "/%s/uio", dev->device.name);
+
+ dir = opendir(dirname);
+ if (dir == NULL)
+ return -1; /* Not a UIO device */
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ const int prefix_len = 3;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", prefix_len) != 0)
+ continue;
+
+ /* try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + prefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + prefix_len)) {
+ snprintf(dstbuf, buflen, "%s/uio%u", dirname, uio_num);
+ break;
+ }
+ }
+ closedir(dir);
+
+ if (e == NULL)
+ return -1;
+
+ return uio_num;
+}
+
+/* Check map names with kernel names */
+static const char *map_names[VMBUS_MAX_RESOURCE] = {
+ [HV_TXRX_RING_MAP] = "txrx_rings",
+ [HV_INT_PAGE_MAP] = "int_page",
+ [HV_MON_PAGE_MAP] = "monitor_page",
+ [HV_RECV_BUF_MAP] = "recv:",
+ [HV_SEND_BUF_MAP] = "send:",
+};
+
+
+/* map the resources of a vmbus device in virtual memory */
+int
+rte_vmbus_map_device(struct rte_vmbus_device *dev)
+{
+ char uioname[PATH_MAX], filename[PATH_MAX];
+ char dirname[PATH_MAX], mapname[64];
+ int i;
+
+ dev->uio_num = vmbus_get_uio_dev(dev, uioname, sizeof(uioname));
+ if (dev->uio_num < 0) {
+ VMBUS_LOG(DEBUG, "Not managed by UIO driver, skipped");
+ return 1;
+ }
+
+ /* Extract resource value */
+ for (i = 0; i < VMBUS_MAX_RESOURCE; i++) {
+ struct rte_mem_resource *res = &dev->resource[i];
+ unsigned long len, gpad = 0;
+ char *cp;
+
+ snprintf(dirname, sizeof(dirname),
+ "%s/maps/map%d", uioname, i);
+
+ snprintf(filename, sizeof(filename),
+ "%s/name", dirname);
+
+ if (get_sysfs_string(filename, mapname, sizeof(mapname)) < 0) {
+ VMBUS_LOG(ERR, "could not read %s", filename);
+ return -1;
+ }
+
+ if (strncmp(map_names[i], mapname, strlen(map_names[i])) != 0) {
+ VMBUS_LOG(ERR,
+ "unexpected resource %s (expected %s)",
+ mapname, map_names[i]);
+ return -1;
+ }
+
+ snprintf(filename, sizeof(filename),
+ "%s/size", dirname);
+ if (eal_parse_sysfs_value(filename, &len) < 0) {
+ VMBUS_LOG(ERR,
+ "could not read %s", filename);
+ return -1;
+ }
+ res->len = len;
+
+ /* both send and receive buffers have gpad in name */
+ cp = memchr(mapname, ':', sizeof(mapname));
+ if (cp)
+ gpad = strtoul(cp+1, NULL, 0);
+
+ /* put the GPAD value in physical address */
+ res->phys_addr = gpad;
+ }
+
+ return vmbus_uio_map_resource(dev);
+}
+
+void
+rte_vmbus_unmap_device(struct rte_vmbus_device *dev)
+{
+ vmbus_uio_unmap_resource(dev);
+}
+
+/* Scan one vmbus sysfs entry, and fill the devices list from it. */
+static int
+vmbus_scan_one(const char *name)
+{
+ struct rte_vmbus_device *dev, *dev2;
+ char filename[PATH_MAX];
+ char dirname[PATH_MAX];
+ unsigned long tmp;
+
+ dev = calloc(1, sizeof(*dev));
+ if (dev == NULL)
+ return -1;
+
+ dev->device.name = strdup(name);
+ if (!dev->device.name)
+ goto error;
+
+ /* sysfs base directory
+ * /sys/bus/vmbus/devices/7a08391f-f5a0-4ac0-9802-d13fd964f8df
+ * or on older kernel
+ * /sys/bus/vmbus/devices/vmbus_1
+ */
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ SYSFS_VMBUS_DEVICES, name);
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device_id", dirname);
+ if (parse_sysfs_uuid(filename, dev->device_id) < 0)
+ goto error;
+
+ /* get device class */
+ snprintf(filename, sizeof(filename), "%s/class_id", dirname);
+ if (parse_sysfs_uuid(filename, dev->class_id) < 0)
+ goto error;
+
+ /* get relid */
+ snprintf(filename, sizeof(filename), "%s/id", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0)
+ goto error;
+ dev->relid = tmp;
+
+ /* get monitor id */
+ snprintf(filename, sizeof(filename), "%s/monitor_id", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0)
+ goto error;
+ dev->monitor_id = tmp;
+
+ /* get numa node (if present) */
+ snprintf(filename, sizeof(filename), "%s/numa_node",
+ dirname);
+
+ if (access(filename, R_OK) == 0) {
+ if (eal_parse_sysfs_value(filename, &tmp) < 0)
+ goto error;
+ dev->device.numa_node = tmp;
+ } else {
+ /* if no NUMA support, set default to 0 */
+ dev->device.numa_node = SOCKET_ID_ANY;
+ }
+
+ /* device is valid, add in list (sorted) */
+ VMBUS_LOG(DEBUG, "Adding vmbus device %s", name);
+
+ TAILQ_FOREACH(dev2, &rte_vmbus_bus.device_list, next) {
+ int ret;
+
+ ret = rte_uuid_compare(dev->device_id, dev2->device_id);
+ if (ret > 0)
+ continue;
+
+ if (ret < 0) {
+ vmbus_insert_device(dev2, dev);
+ } else { /* already registered */
+ VMBUS_LOG(NOTICE,
+ "%s already registered", name);
+ free(dev);
+ }
+ return 0;
+ }
+
+ vmbus_add_device(dev);
+ return 0;
+error:
+ VMBUS_LOG(DEBUG, "failed");
+
+ free(dev);
+ return -1;
+}
+
+/*
+ * Scan the content of the vmbus, and the devices in the devices list
+ */
+int
+rte_vmbus_scan(void)
+{
+ struct dirent *e;
+ DIR *dir;
+
+ dir = opendir(SYSFS_VMBUS_DEVICES);
+ if (dir == NULL) {
+ if (errno == ENOENT)
+ return 0;
+
+ VMBUS_LOG(ERR, "opendir %s failed: %s",
+ SYSFS_VMBUS_DEVICES, strerror(errno));
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ if (e->d_name[0] == '.')
+ continue;
+
+ if (vmbus_scan_one(e->d_name) < 0)
+ goto error;
+ }
+ closedir(dir);
+ return 0;
+
+error:
+ closedir(dir);
+ return -1;
+}
+
+void rte_vmbus_irq_mask(struct rte_vmbus_device *device)
+{
+ vmbus_uio_irq_control(device, 1);
+}
+
+void rte_vmbus_irq_unmask(struct rte_vmbus_device *device)
+{
+ vmbus_uio_irq_control(device, 0);
+}
+
+int rte_vmbus_irq_read(struct rte_vmbus_device *device)
+{
+ return vmbus_uio_irq_read(device);
+}
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_uio.c b/src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_uio.c
new file mode 100644
index 00000000..856c6d66
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_uio.c
@@ -0,0 +1,398 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_memory.h>
+#include <rte_eal_memconfig.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_bus_vmbus.h>
+#include <rte_string_fns.h>
+
+#include "private.h"
+
+/** Pathname of VMBUS devices directory. */
+#define SYSFS_VMBUS_DEVICES "/sys/bus/vmbus/devices"
+
+static void *vmbus_map_addr;
+
+/* Control interrupts */
+void vmbus_uio_irq_control(struct rte_vmbus_device *dev, int32_t onoff)
+{
+ if (write(dev->intr_handle.fd, &onoff, sizeof(onoff)) < 0) {
+ VMBUS_LOG(ERR, "cannot write to %d:%s",
+ dev->intr_handle.fd, strerror(errno));
+ }
+}
+
+int vmbus_uio_irq_read(struct rte_vmbus_device *dev)
+{
+ int32_t count;
+ int cc;
+
+ cc = read(dev->intr_handle.fd, &count, sizeof(count));
+ if (cc < (int)sizeof(count)) {
+ if (cc < 0) {
+ VMBUS_LOG(ERR, "IRQ read failed %s",
+ strerror(errno));
+ return -errno;
+ }
+ VMBUS_LOG(ERR, "can't read IRQ count");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+void
+vmbus_uio_free_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource *uio_res)
+{
+ rte_free(uio_res);
+
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+
+ if (dev->intr_handle.fd >= 0) {
+ close(dev->intr_handle.fd);
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ }
+}
+
+int
+vmbus_uio_alloc_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource **uio_res)
+{
+ char devname[PATH_MAX]; /* contains the /dev/uioX */
+
+ /* save fd if in primary process */
+ snprintf(devname, sizeof(devname), "/dev/uio%u", dev->uio_num);
+ dev->intr_handle.fd = open(devname, O_RDWR);
+ if (dev->intr_handle.fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ devname, strerror(errno));
+ goto error;
+ }
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO_INTX;
+
+ /* allocate the mapping details for secondary processes*/
+ *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
+ if (*uio_res == NULL) {
+ VMBUS_LOG(ERR, "cannot store uio mmap details");
+ goto error;
+ }
+
+ strlcpy((*uio_res)->path, devname, PATH_MAX);
+ rte_uuid_copy((*uio_res)->id, dev->device_id);
+
+ return 0;
+
+error:
+ vmbus_uio_free_resource(dev, *uio_res);
+ return -1;
+}
+
+static int
+find_max_end_va(const struct rte_memseg_list *msl, void *arg)
+{
+ size_t sz = msl->memseg_arr.len * msl->page_sz;
+ void *end_va = RTE_PTR_ADD(msl->base_va, sz);
+ void **max_va = arg;
+
+ if (*max_va < end_va)
+ *max_va = end_va;
+ return 0;
+}
+
+/*
+ * TODO: this should be part of memseg api.
+ * code is duplicated from PCI.
+ */
+static void *
+vmbus_find_max_end_va(void)
+{
+ void *va = NULL;
+
+ rte_memseg_list_walk(find_max_end_va, &va);
+ return va;
+}
+
+int
+vmbus_uio_map_resource_by_index(struct rte_vmbus_device *dev, int idx,
+ struct mapped_vmbus_resource *uio_res,
+ int flags)
+{
+ size_t size = dev->resource[idx].len;
+ struct vmbus_map *maps = uio_res->maps;
+ void *mapaddr;
+ off_t offset;
+ int fd;
+
+ /* devname for mmap */
+ fd = open(uio_res->path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ uio_res->path, strerror(errno));
+ return -1;
+ }
+
+ /* try mapping somewhere close to the end of hugepages */
+ if (vmbus_map_addr == NULL)
+ vmbus_map_addr = vmbus_find_max_end_va();
+
+ /* offset is special in uio it indicates which resource */
+ offset = idx * PAGE_SIZE;
+
+ mapaddr = vmbus_map_resource(vmbus_map_addr, fd, offset, size, flags);
+ close(fd);
+
+ if (mapaddr == MAP_FAILED)
+ return -1;
+
+ dev->resource[idx].addr = mapaddr;
+ vmbus_map_addr = RTE_PTR_ADD(mapaddr, size);
+
+ /* Record result of sucessful mapping for use by secondary */
+ maps[idx].addr = mapaddr;
+ maps[idx].size = size;
+
+ return 0;
+}
+
+static int vmbus_uio_map_primary(struct vmbus_channel *chan,
+ void **ring_buf, uint32_t *ring_size)
+{
+ struct mapped_vmbus_resource *uio_res;
+
+ uio_res = vmbus_uio_find_resource(chan->device);
+ if (!uio_res) {
+ VMBUS_LOG(ERR, "can not find resources!");
+ return -ENOMEM;
+ }
+
+ if (uio_res->nb_maps < VMBUS_MAX_RESOURCE) {
+ VMBUS_LOG(ERR, "VMBUS: only %u resources found!",
+ uio_res->nb_maps);
+ return -EINVAL;
+ }
+
+ *ring_size = uio_res->maps[HV_TXRX_RING_MAP].size / 2;
+ *ring_buf = uio_res->maps[HV_TXRX_RING_MAP].addr;
+ return 0;
+}
+
+static int vmbus_uio_map_subchan(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan,
+ void **ring_buf, uint32_t *ring_size)
+{
+ char ring_path[PATH_MAX];
+ size_t file_size;
+ struct stat sb;
+ int fd;
+
+ snprintf(ring_path, sizeof(ring_path),
+ "%s/%s/channels/%u/ring",
+ SYSFS_VMBUS_DEVICES, dev->device.name,
+ chan->relid);
+
+ fd = open(ring_path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ ring_path, strerror(errno));
+ return -errno;
+ }
+
+ if (fstat(fd, &sb) < 0) {
+ VMBUS_LOG(ERR, "Cannot state %s: %s",
+ ring_path, strerror(errno));
+ close(fd);
+ return -errno;
+ }
+ file_size = sb.st_size;
+
+ if (file_size == 0 || (file_size & (PAGE_SIZE - 1))) {
+ VMBUS_LOG(ERR, "incorrect size %s: %zu",
+ ring_path, file_size);
+
+ close(fd);
+ return -EINVAL;
+ }
+
+ *ring_size = file_size / 2;
+ *ring_buf = vmbus_map_resource(vmbus_map_addr, fd,
+ 0, sb.st_size, 0);
+ close(fd);
+
+ if (ring_buf == MAP_FAILED)
+ return -EIO;
+
+ vmbus_map_addr = RTE_PTR_ADD(ring_buf, file_size);
+ return 0;
+}
+
+int vmbus_uio_map_rings(struct vmbus_channel *chan)
+{
+ const struct rte_vmbus_device *dev = chan->device;
+ uint32_t ring_size;
+ void *ring_buf;
+ int ret;
+
+ /* Primary channel */
+ if (chan->subchannel_id == 0)
+ ret = vmbus_uio_map_primary(chan, &ring_buf, &ring_size);
+ else
+ ret = vmbus_uio_map_subchan(dev, chan, &ring_buf, &ring_size);
+
+ if (ret)
+ return ret;
+
+ vmbus_br_setup(&chan->txbr, ring_buf, ring_size);
+ vmbus_br_setup(&chan->rxbr, (char *)ring_buf + ring_size, ring_size);
+ return 0;
+}
+
+static int vmbus_uio_sysfs_read(const char *dir, const char *name,
+ unsigned long *val, unsigned long max_range)
+{
+ char path[PATH_MAX];
+ FILE *f;
+ int ret;
+
+ snprintf(path, sizeof(path), "%s/%s", dir, name);
+ f = fopen(path, "r");
+ if (!f) {
+ VMBUS_LOG(ERR, "can't open %s:%s",
+ path, strerror(errno));
+ return -errno;
+ }
+
+ if (fscanf(f, "%lu", val) != 1)
+ ret = -EIO;
+ else if (*val > max_range)
+ ret = -ERANGE;
+ else
+ ret = 0;
+ fclose(f);
+
+ return ret;
+}
+
+static bool vmbus_uio_ring_present(const struct rte_vmbus_device *dev,
+ uint32_t relid)
+{
+ char ring_path[PATH_MAX];
+
+ /* Check if kernel has subchannel sysfs files */
+ snprintf(ring_path, sizeof(ring_path),
+ "%s/%s/channels/%u/ring",
+ SYSFS_VMBUS_DEVICES, dev->device.name, relid);
+
+ return access(ring_path, R_OK|W_OK) == 0;
+}
+
+bool vmbus_uio_subchannels_supported(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan)
+{
+ return vmbus_uio_ring_present(dev, chan->relid);
+}
+
+static bool vmbus_isnew_subchannel(struct vmbus_channel *primary,
+ unsigned long id)
+{
+ const struct vmbus_channel *c;
+
+ STAILQ_FOREACH(c, &primary->subchannel_list, next) {
+ if (c->relid == id)
+ return false;
+ }
+ return true;
+}
+
+int vmbus_uio_get_subchan(struct vmbus_channel *primary,
+ struct vmbus_channel **subchan)
+{
+ const struct rte_vmbus_device *dev = primary->device;
+ char chan_path[PATH_MAX], subchan_path[PATH_MAX];
+ struct dirent *ent;
+ DIR *chan_dir;
+
+ snprintf(chan_path, sizeof(chan_path),
+ "%s/%s/channels",
+ SYSFS_VMBUS_DEVICES, dev->device.name);
+
+ chan_dir = opendir(chan_path);
+ if (!chan_dir) {
+ VMBUS_LOG(ERR, "cannot open %s: %s",
+ chan_path, strerror(errno));
+ return -errno;
+ }
+
+ while ((ent = readdir(chan_dir))) {
+ unsigned long relid, subid, monid;
+ char *endp;
+ int err;
+
+ if (ent->d_name[0] == '.')
+ continue;
+
+ errno = 0;
+ relid = strtoul(ent->d_name, &endp, 0);
+ if (*endp || errno != 0 || relid > UINT16_MAX) {
+ VMBUS_LOG(NOTICE, "not a valid channel relid: %s",
+ ent->d_name);
+ continue;
+ }
+
+ snprintf(subchan_path, sizeof(subchan_path), "%s/%lu",
+ chan_path, relid);
+ err = vmbus_uio_sysfs_read(subchan_path, "subchannel_id",
+ &subid, UINT16_MAX);
+ if (err) {
+ VMBUS_LOG(NOTICE, "invalid subchannel id %lu",
+ subid);
+ closedir(chan_dir);
+ return err;
+ }
+
+ if (subid == 0)
+ continue; /* skip primary channel */
+
+ if (!vmbus_isnew_subchannel(primary, relid))
+ continue;
+
+ if (!vmbus_uio_ring_present(dev, relid))
+ continue; /* Ring may not be ready yet */
+
+ err = vmbus_uio_sysfs_read(subchan_path, "monitor_id",
+ &monid, UINT8_MAX);
+ if (err) {
+ VMBUS_LOG(NOTICE, "invalid monitor id %lu",
+ monid);
+ return err;
+ }
+
+ err = vmbus_chan_create(dev, relid, subid, monid, subchan);
+ if (err) {
+ VMBUS_LOG(NOTICE, "subchannel setup failed");
+ return err;
+ }
+ break;
+ }
+ closedir(chan_dir);
+
+ return (ent == NULL) ? -ENOENT : 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/meson.build b/src/spdk/dpdk/drivers/bus/vmbus/meson.build
new file mode 100644
index 00000000..18daabec
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+allow_experimental_apis = true
+
+install_headers('rte_bus_vmbus.h','rte_vmbus_reg.h')
+
+sources = files('vmbus_common.c',
+ 'vmbus_channel.c',
+ 'vmbus_bufring.c',
+ 'vmbus_common_uio.c')
+
+if host_machine.system() == 'linux'
+ sources += files('linux/vmbus_bus.c',
+ 'linux/vmbus_uio.c')
+ includes += include_directories('linux')
+else
+ build = false
+endif
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/private.h b/src/spdk/dpdk/drivers/bus/vmbus/private.h
new file mode 100644
index 00000000..9964fc42
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/private.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#ifndef _VMBUS_PRIVATE_H_
+#define _VMBUS_PRIVATE_H_
+
+#include <stdbool.h>
+#include <sys/uio.h>
+#include <rte_log.h>
+#include <rte_vmbus_reg.h>
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+extern int vmbus_logtype_bus;
+#define VMBUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, vmbus_logtype_bus, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+struct vmbus_br {
+ struct vmbus_bufring *vbr;
+ uint32_t dsize;
+ uint32_t windex; /* next available location */
+};
+
+#define UIO_NAME_MAX 64
+
+struct vmbus_map {
+ void *addr; /* user mmap of resource */
+ uint64_t size; /* length */
+};
+
+/*
+ * For multi-process we need to reproduce all vmbus mappings in secondary
+ * processes, so save them in a tailq.
+ */
+struct mapped_vmbus_resource {
+ TAILQ_ENTRY(mapped_vmbus_resource) next;
+
+ rte_uuid_t id;
+ int nb_maps;
+ struct vmbus_map maps[VMBUS_MAX_RESOURCE];
+ char path[PATH_MAX];
+};
+
+TAILQ_HEAD(mapped_vmbus_res_list, mapped_vmbus_resource);
+
+#define HV_MON_TRIG_LEN 32
+#define HV_MON_TRIG_MAX 4
+
+struct vmbus_channel {
+ STAILQ_HEAD(, vmbus_channel) subchannel_list;
+ STAILQ_ENTRY(vmbus_channel) next;
+ const struct rte_vmbus_device *device;
+
+ struct vmbus_br rxbr;
+ struct vmbus_br txbr;
+
+ uint16_t relid;
+ uint16_t subchannel_id;
+ uint8_t monitor_id;
+};
+
+#define VMBUS_MAX_CHANNELS 64
+
+int vmbus_chan_create(const struct rte_vmbus_device *device,
+ uint16_t relid, uint16_t subid, uint8_t monitor_id,
+ struct vmbus_channel **new_chan);
+
+void vmbus_add_device(struct rte_vmbus_device *vmbus_dev);
+void vmbus_insert_device(struct rte_vmbus_device *exist_vmbus_dev,
+ struct rte_vmbus_device *new_vmbus_dev);
+void vmbus_remove_device(struct rte_vmbus_device *vmbus_device);
+
+void vmbus_uio_irq_control(struct rte_vmbus_device *dev, int32_t onoff);
+int vmbus_uio_irq_read(struct rte_vmbus_device *dev);
+
+int vmbus_uio_map_resource(struct rte_vmbus_device *dev);
+void vmbus_uio_unmap_resource(struct rte_vmbus_device *dev);
+
+int vmbus_uio_alloc_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource **uio_res);
+void vmbus_uio_free_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource *uio_res);
+
+struct mapped_vmbus_resource *
+vmbus_uio_find_resource(const struct rte_vmbus_device *dev);
+int vmbus_uio_map_resource_by_index(struct rte_vmbus_device *dev, int res_idx,
+ struct mapped_vmbus_resource *uio_res,
+ int flags);
+
+void *vmbus_map_resource(void *requested_addr, int fd, off_t offset,
+ size_t size, int additional_flags);
+void vmbus_unmap_resource(void *requested_addr, size_t size);
+
+bool vmbus_uio_subchannels_supported(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan);
+int vmbus_uio_get_subchan(struct vmbus_channel *primary,
+ struct vmbus_channel **subchan);
+int vmbus_uio_map_rings(struct vmbus_channel *chan);
+
+void vmbus_br_setup(struct vmbus_br *br, void *buf, unsigned int blen);
+
+/* Amount of space available for write */
+static inline uint32_t
+vmbus_br_availwrite(const struct vmbus_br *br, uint32_t windex)
+{
+ uint32_t rindex = br->vbr->rindex;
+
+ if (windex >= rindex)
+ return br->dsize - (windex - rindex);
+ else
+ return rindex - windex;
+}
+
+static inline uint32_t
+vmbus_br_availread(const struct vmbus_br *br)
+{
+ return br->dsize - vmbus_br_availwrite(br, br->vbr->windex);
+}
+
+int vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen,
+ bool *need_sig);
+
+int vmbus_rxbr_peek(const struct vmbus_br *rbr, void *data, size_t dlen);
+
+int vmbus_rxbr_read(struct vmbus_br *rbr, void *data, size_t dlen, size_t hlen);
+
+#endif /* _VMBUS_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus.h b/src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus.h
new file mode 100644
index 00000000..4a2c1f6f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus.h
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#ifndef _VMBUS_H_
+#define _VMBUS_H_
+
+/**
+ * @file
+ *
+ * VMBUS Interface
+ */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_compat.h>
+#include <rte_uuid.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_dev.h>
+#include <rte_vmbus_reg.h>
+
+/* Forward declarations */
+struct rte_vmbus_device;
+struct rte_vmbus_driver;
+struct rte_vmbus_bus;
+struct vmbus_channel;
+struct vmbus_mon_page;
+
+TAILQ_HEAD(rte_vmbus_device_list, rte_vmbus_device);
+TAILQ_HEAD(rte_vmbus_driver_list, rte_vmbus_driver);
+
+/* VMBus iterators */
+#define FOREACH_DEVICE_ON_VMBUS(p) \
+ TAILQ_FOREACH(p, &(rte_vmbus_bus.device_list), next)
+
+#define FOREACH_DRIVER_ON_VMBUS(p) \
+ TAILQ_FOREACH(p, &(rte_vmbus_bus.driver_list), next)
+
+/** Maximum number of VMBUS resources. */
+enum hv_uio_map {
+ HV_TXRX_RING_MAP = 0,
+ HV_INT_PAGE_MAP,
+ HV_MON_PAGE_MAP,
+ HV_RECV_BUF_MAP,
+ HV_SEND_BUF_MAP
+};
+#define VMBUS_MAX_RESOURCE 5
+
+/**
+ * A structure describing a VMBUS device.
+ */
+struct rte_vmbus_device {
+ TAILQ_ENTRY(rte_vmbus_device) next; /**< Next probed VMBUS device */
+ const struct rte_vmbus_driver *driver; /**< Associated driver */
+ struct rte_device device; /**< Inherit core device */
+ rte_uuid_t device_id; /**< VMBUS device id */
+ rte_uuid_t class_id; /**< VMBUS device type */
+ uint32_t relid; /**< id for primary */
+ uint8_t monitor_id; /**< monitor page */
+ int uio_num; /**< UIO device number */
+ uint32_t *int_page; /**< VMBUS interrupt page */
+ struct vmbus_channel *primary; /**< VMBUS primary channel */
+ struct vmbus_mon_page *monitor_page; /**< VMBUS monitor page */
+
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_mem_resource resource[VMBUS_MAX_RESOURCE];
+};
+
+/**
+ * Initialization function for the driver called during VMBUS probing.
+ */
+typedef int (vmbus_probe_t)(struct rte_vmbus_driver *,
+ struct rte_vmbus_device *);
+
+/**
+ * Initialization function for the driver called during hot plugging.
+ */
+typedef int (vmbus_remove_t)(struct rte_vmbus_device *);
+
+/**
+ * A structure describing a VMBUS driver.
+ */
+struct rte_vmbus_driver {
+ TAILQ_ENTRY(rte_vmbus_driver) next; /**< Next in list. */
+ struct rte_driver driver;
+ struct rte_vmbus_bus *bus; /**< VM bus reference. */
+ vmbus_probe_t *probe; /**< Device Probe function. */
+ vmbus_remove_t *remove; /**< Device Remove function. */
+
+ const rte_uuid_t *id_table; /**< ID table. */
+};
+
+
+/**
+ * Structure describing the VM bus
+ */
+struct rte_vmbus_bus {
+ struct rte_bus bus; /**< Inherit the generic class */
+ struct rte_vmbus_device_list device_list; /**< List of devices */
+ struct rte_vmbus_driver_list driver_list; /**< List of drivers */
+};
+
+/**
+ * Scan the content of the VMBUS bus, and the devices in the devices
+ * list
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_vmbus_scan(void);
+
+/**
+ * Probe the VMBUS bus
+ *
+ * @return
+ * - 0 on success.
+ * - !0 on error.
+ */
+int rte_vmbus_probe(void);
+
+/**
+ * Map the VMBUS device resources in user space virtual memory address
+ *
+ * @param dev
+ * A pointer to a rte_vmbus_device structure describing the device
+ * to use
+ *
+ * @return
+ * 0 on success, negative on error and positive if no driver
+ * is found for the device.
+ */
+int rte_vmbus_map_device(struct rte_vmbus_device *dev);
+
+/**
+ * Unmap this device
+ *
+ * @param dev
+ * A pointer to a rte_vmbus_device structure describing the device
+ * to use
+ */
+void rte_vmbus_unmap_device(struct rte_vmbus_device *dev);
+
+/**
+ * Get connection to primary VMBUS channel
+ *
+ * @param device
+ * A pointer to a rte_vmbus_device structure describing the device
+ * @param chan
+ * A pointer to a VMBUS channel pointer that will be filled.
+ * @return
+ * - 0 Success; channel opened.
+ * - -ENOMEM: Not enough memory available.
+ * - -EINVAL: Regions could not be mapped.
+ */
+int rte_vmbus_chan_open(struct rte_vmbus_device *device,
+ struct vmbus_channel **chan);
+
+/**
+ * Free connection to VMBUS channel
+ *
+ * @param chan
+ * VMBUS channel
+ */
+void rte_vmbus_chan_close(struct vmbus_channel *chan);
+
+/**
+ * Gets the maximum number of channels supported on device
+ *
+ * @param device
+ * A pointer to a rte_vmbus_device structure describing the device
+ * @return
+ * Number of channels available.
+ */
+int rte_vmbus_max_channels(const struct rte_vmbus_device *device);
+
+/**
+ * Get a connection to new secondary vmbus channel
+ *
+ * @param primary
+ * A pointer to primary VMBUS channel
+ * @param chan
+ * A pointer to a secondary VMBUS channel pointer that will be filled.
+ * @return
+ * - 0 Success; channel opened.
+ * - -ENOMEM: Not enough memory available.
+ * - -EINVAL: Regions could not be mapped.
+ */
+int rte_vmbus_subchan_open(struct vmbus_channel *primary,
+ struct vmbus_channel **new_chan);
+
+/**
+ * Disable IRQ for device
+ *
+ * @param device
+ * VMBUS device
+ */
+void rte_vmbus_irq_mask(struct rte_vmbus_device *device);
+
+/**
+ * Enable IRQ for device
+ *
+ * @param device
+ * VMBUS device
+ */
+void rte_vmbus_irq_unmask(struct rte_vmbus_device *device);
+
+/**
+ * Read (and wait) for IRQ
+ *
+ * @param device
+ * VMBUS device
+ */
+int rte_vmbus_irq_read(struct rte_vmbus_device *device);
+
+/**
+ * Test if channel is empty
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @return
+ * Return true if no data present in incoming ring.
+ */
+bool rte_vmbus_chan_rx_empty(const struct vmbus_channel *channel);
+
+/**
+ * Send the specified buffer on the given channel
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param type
+ * Type of packet that is being send e.g. negotiate, time
+ * packet etc.
+ * @param data
+ * Pointer to the buffer to send
+ * @param dlen
+ * Number of bytes of data to send
+ * @param xact
+ * Identifier of the request
+ * @param flags
+ * Message type inband, rxbuf, gpa
+ * @param need_sig
+ * Is host signal tx is required (optional)
+ *
+ * Sends data in buffer directly to hyper-v via the vmbus
+ */
+int rte_vmbus_chan_send(struct vmbus_channel *channel, uint16_t type,
+ void *data, uint32_t dlen,
+ uint64_t xact, uint32_t flags, bool *need_sig);
+
+/**
+ * Explicitly signal host that data is available
+ *
+ * @param
+ * Pointer to vmbus_channel structure.
+ *
+ * Used when batching multiple sends and only signaling host
+ * after the last send.
+ */
+void rte_vmbus_chan_signal_tx(const struct vmbus_channel *channel);
+
+/* Structure for scatter/gather I/O */
+struct iova_list {
+ rte_iova_t addr;
+ uint32_t len;
+};
+#define MAX_PAGE_BUFFER_COUNT 32
+
+/**
+ * Send a scattered buffer on the given channel
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param type
+ * Type of packet that is being send e.g. negotiate, time
+ * packet etc.
+ * @param gpa
+ * Array of buffers to send
+ * @param gpacnt
+ * Number of elements in iov
+ * @param data
+ * Pointer to the buffer additional data to send
+ * @param dlen
+ * Maximum size of what the the buffer will hold
+ * @param xact
+ * Identifier of the request
+ * @param flags
+ * Message type inband, rxbuf, gpa
+ * @param need_sig
+ * Is host signal tx is required (optional)
+ *
+ * Sends data in buffer directly to hyper-v via the vmbus
+ */
+int rte_vmbus_chan_send_sglist(struct vmbus_channel *channel,
+ struct vmbus_gpa gpa[], uint32_t gpacnt,
+ void *data, uint32_t dlen,
+ uint64_t xact, bool *need_sig);
+/**
+ * Receive response to request on the given channel
+ * skips the channel header.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param data
+ * Pointer to the buffer you want to receive the data into.
+ * @param len
+ * Pointer to size of receive buffer (in/out)
+ * @param
+ * Pointer to received transaction_id
+ * @return
+ * On success, returns 0
+ * On failure, returns negative errno.
+ */
+int rte_vmbus_chan_recv(struct vmbus_channel *chan,
+ void *data, uint32_t *len,
+ uint64_t *request_id);
+
+/**
+ * Receive response to request on the given channel
+ * includes the channel header.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param data
+ * Pointer to the buffer you want to receive the data into.
+ * @param len
+ * Pointer to size of receive buffer (in/out)
+ * @return
+ * On success, returns number of bytes read.
+ * On failure, returns negative errno.
+ */
+int rte_vmbus_chan_recv_raw(struct vmbus_channel *chan,
+ void *data, uint32_t *len);
+
+/**
+ * Notify host of bytes read (after recv_raw)
+ * Signals host if required.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param bytes_read
+ * Number of bytes read since last signal
+ */
+void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read);
+
+/**
+ * Determine sub channel index of the given channel
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @return
+ * Sub channel index (0 for primary)
+ */
+uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan);
+
+/**
+ * Register a VMBUS driver.
+ *
+ * @param driver
+ * A pointer to a rte_vmbus_driver structure describing the driver
+ * to be registered.
+ */
+void rte_vmbus_register(struct rte_vmbus_driver *driver);
+
+/**
+ * For debug dump contents of ring buffer.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ */
+void rte_vmbus_chan_dump(FILE *f, const struct vmbus_channel *chan);
+
+/**
+ * Unregister a VMBUS driver.
+ *
+ * @param driver
+ * A pointer to a rte_vmbus_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_vmbus_unregister(struct rte_vmbus_driver *driver);
+
+/** Helper for VMBUS device registration from driver instance */
+#define RTE_PMD_REGISTER_VMBUS(nm, vmbus_drv) \
+ RTE_INIT(vmbusinitfn_ ##nm); \
+ static void vmbusinitfn_ ##nm(void) \
+ { \
+ (vmbus_drv).driver.name = RTE_STR(nm); \
+ rte_vmbus_register(&vmbus_drv); \
+ } \
+ RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VMBUS_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus_version.map b/src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus_version.map
new file mode 100644
index 00000000..dabb9203
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus_version.map
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+DPDK_18.08 {
+ global:
+
+ rte_vmbus_chan_close;
+ rte_vmbus_chan_open;
+ rte_vmbus_chan_recv;
+ rte_vmbus_chan_recv_raw;
+ rte_vmbus_chan_rx_empty;
+ rte_vmbus_chan_send;
+ rte_vmbus_chan_send_sglist;
+ rte_vmbus_chan_signal_read;
+ rte_vmbus_chan_signal_tx;
+ rte_vmbus_irq_mask;
+ rte_vmbus_irq_read;
+ rte_vmbus_irq_unmask;
+ rte_vmbus_map_device;
+ rte_vmbus_max_channels;
+ rte_vmbus_probe;
+ rte_vmbus_register;
+ rte_vmbus_scan;
+ rte_vmbus_sub_channel_index;
+ rte_vmbus_subchan_open;
+ rte_vmbus_unmap_device;
+ rte_vmbus_unregister;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/rte_vmbus_reg.h b/src/spdk/dpdk/drivers/bus/vmbus/rte_vmbus_reg.h
new file mode 100644
index 00000000..f5a0693d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/rte_vmbus_reg.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#ifndef _VMBUS_REG_H_
+#define _VMBUS_REG_H_
+
+/*
+ * Hyper-V SynIC message format.
+ */
+#define VMBUS_MSG_DSIZE_MAX 240
+#define VMBUS_MSG_SIZE 256
+
+struct vmbus_message {
+ uint32_t type; /* HYPERV_MSGTYPE_ */
+ uint8_t dsize; /* data size */
+ uint8_t flags; /* VMBUS_MSGFLAG_ */
+ uint16_t rsvd;
+ uint64_t id;
+ uint8_t data[VMBUS_MSG_DSIZE_MAX];
+} __rte_packed;
+
+#define VMBUS_MSGFLAG_PENDING 0x01
+
+/*
+ * Hyper-V Monitor Notification Facility
+ */
+
+struct vmbus_mon_trig {
+ uint32_t pending;
+ uint32_t armed;
+} __rte_packed;
+
+#define VMBUS_MONTRIGS_MAX 4
+#define VMBUS_MONTRIG_LEN 32
+
+/*
+ * Hyper-V Monitor Notification Facility
+ */
+struct hyperv_mon_param {
+ uint32_t connid;
+ uint16_t evtflag_ofs;
+ uint16_t rsvd;
+} __rte_packed;
+
+struct vmbus_mon_page {
+ uint32_t state;
+ uint32_t rsvd1;
+
+ struct vmbus_mon_trig trigs[VMBUS_MONTRIGS_MAX];
+ uint8_t rsvd2[536];
+
+ uint16_t lat[VMBUS_MONTRIGS_MAX][VMBUS_MONTRIG_LEN];
+ uint8_t rsvd3[256];
+
+ struct hyperv_mon_param
+ param[VMBUS_MONTRIGS_MAX][VMBUS_MONTRIG_LEN];
+ uint8_t rsvd4[1984];
+} __rte_packed;
+
+/*
+ * Buffer ring
+ */
+
+struct vmbus_bufring {
+ volatile uint32_t windex;
+ volatile uint32_t rindex;
+
+ /*
+ * Interrupt mask {0,1}
+ *
+ * For TX bufring, host set this to 1, when it is processing
+ * the TX bufring, so that we can safely skip the TX event
+ * notification to host.
+ *
+ * For RX bufring, once this is set to 1 by us, host will not
+ * further dispatch interrupts to us, even if there are data
+ * pending on the RX bufring. This effectively disables the
+ * interrupt of the channel to which this RX bufring is attached.
+ */
+ volatile uint32_t imask;
+
+ /*
+ * Win8 uses some of the reserved bits to implement
+ * interrupt driven flow management. On the send side
+ * we can request that the receiver interrupt the sender
+ * when the ring transitions from being full to being able
+ * to handle a message of size "pending_send_sz".
+ *
+ * Add necessary state for this enhancement.
+ */
+ volatile uint32_t pending_send;
+ uint32_t reserved1[12];
+
+ union {
+ struct {
+ uint32_t feat_pending_send_sz:1;
+ };
+ uint32_t value;
+ } feature_bits;
+
+ /* Pad it to PAGE_SIZE so that data starts on page boundary */
+ uint8_t reserved2[4028];
+
+ /*
+ * Ring data starts here + RingDataStartOffset
+ * !!! DO NOT place any fields below this !!!
+ */
+ uint8_t data[0];
+} __rte_packed;
+
+/*
+ * Channel packets
+ */
+
+/* Channel packet flags */
+#define VMBUS_CHANPKT_TYPE_INBAND 0x0006
+#define VMBUS_CHANPKT_TYPE_RXBUF 0x0007
+#define VMBUS_CHANPKT_TYPE_GPA 0x0009
+#define VMBUS_CHANPKT_TYPE_COMP 0x000b
+
+#define VMBUS_CHANPKT_FLAG_NONE 0
+#define VMBUS_CHANPKT_FLAG_RC 0x0001 /* report completion */
+
+#define VMBUS_CHANPKT_SIZE_SHIFT 3
+#define VMBUS_CHANPKT_SIZE_ALIGN (1 << VMBUS_CHANPKT_SIZE_SHIFT)
+#define VMBUS_CHANPKT_HLEN_MIN \
+ (sizeof(struct vmbus_chanpkt_hdr) >> VMBUS_CHANPKT_SIZE_SHIFT)
+
+static inline uint32_t
+vmbus_chanpkt_getlen(uint16_t pktlen)
+{
+ return (uint32_t)pktlen << VMBUS_CHANPKT_SIZE_SHIFT;
+}
+
+/*
+ * GPA stuffs.
+ */
+struct vmbus_gpa_range {
+ uint32_t len;
+ uint32_t ofs;
+ uint64_t page[0];
+} __rte_packed;
+
+/* This is actually vmbus_gpa_range.gpa_page[1] */
+struct vmbus_gpa {
+ uint32_t len;
+ uint32_t ofs;
+ uint64_t page;
+} __rte_packed;
+
+struct vmbus_chanpkt_hdr {
+ uint16_t type; /* VMBUS_CHANPKT_TYPE_ */
+ uint16_t hlen; /* header len, in 8 bytes */
+ uint16_t tlen; /* total len, in 8 bytes */
+ uint16_t flags; /* VMBUS_CHANPKT_FLAG_ */
+ uint64_t xactid;
+} __rte_packed;
+
+static inline uint32_t
+vmbus_chanpkt_datalen(const struct vmbus_chanpkt_hdr *pkt)
+{
+ return vmbus_chanpkt_getlen(pkt->tlen)
+ - vmbus_chanpkt_getlen(pkt->hlen);
+}
+
+struct vmbus_chanpkt {
+ struct vmbus_chanpkt_hdr hdr;
+} __rte_packed;
+
+struct vmbus_rxbuf_desc {
+ uint32_t len;
+ uint32_t ofs;
+} __rte_packed;
+
+struct vmbus_chanpkt_rxbuf {
+ struct vmbus_chanpkt_hdr hdr;
+ uint16_t rxbuf_id;
+ uint16_t rsvd;
+ uint32_t rxbuf_cnt;
+ struct vmbus_rxbuf_desc rxbuf[];
+} __rte_packed;
+
+struct vmbus_chanpkt_sglist {
+ struct vmbus_chanpkt_hdr hdr;
+ uint32_t rsvd;
+ uint32_t gpa_cnt;
+ struct vmbus_gpa gpa[];
+} __rte_packed;
+
+/*
+ * Channel messages
+ * - Embedded in vmbus_message.msg_data, e.g. response and notification.
+ * - Embedded in hypercall_postmsg_in.hc_data, e.g. request.
+ */
+
+#define VMBUS_CHANMSG_TYPE_CHOFFER 1 /* NOTE */
+#define VMBUS_CHANMSG_TYPE_CHRESCIND 2 /* NOTE */
+#define VMBUS_CHANMSG_TYPE_CHREQUEST 3 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CHOFFER_DONE 4 /* NOTE */
+#define VMBUS_CHANMSG_TYPE_CHOPEN 5 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CHOPEN_RESP 6 /* RESP */
+#define VMBUS_CHANMSG_TYPE_CHCLOSE 7 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_CONN 8 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_SUBCONN 9 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_CONNRESP 10 /* RESP */
+#define VMBUS_CHANMSG_TYPE_GPADL_DISCONN 11 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_DISCONNRESP 12 /* RESP */
+#define VMBUS_CHANMSG_TYPE_CHFREE 13 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CONNECT 14 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CONNECT_RESP 15 /* RESP */
+#define VMBUS_CHANMSG_TYPE_DISCONNECT 16 /* REQ */
+#define VMBUS_CHANMSG_TYPE_MAX 22
+
+struct vmbus_chanmsg_hdr {
+ uint32_t type; /* VMBUS_CHANMSG_TYPE_ */
+ uint32_t rsvd;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CONNECT */
+struct vmbus_chanmsg_connect {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t ver;
+ uint32_t rsvd;
+ uint64_t evtflags;
+ uint64_t mnf1;
+ uint64_t mnf2;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CONNECT_RESP */
+struct vmbus_chanmsg_connect_resp {
+ struct vmbus_chanmsg_hdr hdr;
+ uint8_t done;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHREQUEST */
+struct vmbus_chanmsg_chrequest {
+ struct vmbus_chanmsg_hdr hdr;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_DISCONNECT */
+struct vmbus_chanmsg_disconnect {
+ struct vmbus_chanmsg_hdr hdr;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHOPEN */
+struct vmbus_chanmsg_chopen {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t openid;
+ uint32_t gpadl;
+ uint32_t vcpuid;
+ uint32_t txbr_pgcnt;
+#define VMBUS_CHANMSG_CHOPEN_UDATA_SIZE 120
+ uint8_t udata[VMBUS_CHANMSG_CHOPEN_UDATA_SIZE];
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHOPEN_RESP */
+struct vmbus_chanmsg_chopen_resp {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t openid;
+ uint32_t status;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_GPADL_CONN */
+struct vmbus_chanmsg_gpadl_conn {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t gpadl;
+ uint16_t range_len;
+ uint16_t range_cnt;
+ struct vmbus_gpa_range range;
+} __rte_packed;
+
+#define VMBUS_CHANMSG_GPADL_CONN_PGMAX 26
+
+/* VMBUS_CHANMSG_TYPE_GPADL_SUBCONN */
+struct vmbus_chanmsg_gpadl_subconn {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t msgno;
+ uint32_t gpadl;
+ uint64_t gpa_page[];
+} __rte_packed;
+
+#define VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX 28
+
+/* VMBUS_CHANMSG_TYPE_GPADL_CONNRESP */
+struct vmbus_chanmsg_gpadl_connresp {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t gpadl;
+ uint32_t status;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHCLOSE */
+struct vmbus_chanmsg_chclose {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_GPADL_DISCONN */
+struct vmbus_chanmsg_gpadl_disconn {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t gpadl;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHFREE */
+struct vmbus_chanmsg_chfree {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHRESCIND */
+struct vmbus_chanmsg_chrescind {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHOFFER */
+struct vmbus_chanmsg_choffer {
+ struct vmbus_chanmsg_hdr hdr;
+ rte_uuid_t chtype;
+ rte_uuid_t chinst;
+ uint64_t chlat; /* unit: 100ns */
+ uint32_t chrev;
+ uint32_t svrctx_sz;
+ uint16_t chflags;
+ uint16_t mmio_sz; /* unit: MB */
+ uint8_t udata[120];
+ uint16_t subidx;
+ uint16_t rsvd;
+ uint32_t chanid;
+ uint8_t montrig;
+ uint8_t flags1; /* VMBUS_CHOFFER_FLAG1_ */
+ uint16_t flags2;
+ uint32_t connid;
+} __rte_packed;
+
+#define VMBUS_CHOFFER_FLAG1_HASMNF 0x01
+
+#endif /* !_VMBUS_REG_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/vmbus_bufring.c b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_bufring.c
new file mode 100644
index 00000000..c8800160
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_bufring.c
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2009-2012,2016 Microsoft Corp.
+ * Copyright (c) 2012 NetApp Inc.
+ * Copyright (c) 2012 Citrix Inc.
+ * All rights reserved.
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/uio.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus.h>
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_pause.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+/* Increase bufring index by inc with wraparound */
+static inline uint32_t vmbus_br_idxinc(uint32_t idx, uint32_t inc, uint32_t sz)
+{
+ idx += inc;
+ if (idx >= sz)
+ idx -= sz;
+
+ return idx;
+}
+
+void vmbus_br_setup(struct vmbus_br *br, void *buf, unsigned int blen)
+{
+ br->vbr = buf;
+ br->windex = br->vbr->windex;
+ br->dsize = blen - sizeof(struct vmbus_bufring);
+}
+
+/*
+ * When we write to the ring buffer, check if the host needs to be
+ * signaled.
+ *
+ * The contract:
+ * - The host guarantees that while it is draining the TX bufring,
+ * it will set the br_imask to indicate it does not need to be
+ * interrupted when new data are added.
+ * - The host guarantees that it will completely drain the TX bufring
+ * before exiting the read loop. Further, once the TX bufring is
+ * empty, it will clear the br_imask and re-check to see if new
+ * data have arrived.
+ */
+static inline bool
+vmbus_txbr_need_signal(const struct vmbus_br *tbr, uint32_t old_windex)
+{
+ rte_smp_mb();
+ if (tbr->vbr->imask)
+ return false;
+
+ rte_smp_rmb();
+
+ /*
+ * This is the only case we need to signal when the
+ * ring transitions from being empty to non-empty.
+ */
+ return old_windex == tbr->vbr->rindex;
+}
+
+static inline uint32_t
+vmbus_txbr_copyto(const struct vmbus_br *tbr, uint32_t windex,
+ const void *src0, uint32_t cplen)
+{
+ uint8_t *br_data = tbr->vbr->data;
+ uint32_t br_dsize = tbr->dsize;
+ const uint8_t *src = src0;
+
+ /* XXX use double mapping like Linux kernel? */
+ if (cplen > br_dsize - windex) {
+ uint32_t fraglen = br_dsize - windex;
+
+ /* Wrap-around detected */
+ memcpy(br_data + windex, src, fraglen);
+ memcpy(br_data, src + fraglen, cplen - fraglen);
+ } else {
+ memcpy(br_data + windex, src, cplen);
+ }
+
+ return vmbus_br_idxinc(windex, cplen, br_dsize);
+}
+
+/*
+ * Write scattered channel packet to TX bufring.
+ *
+ * The offset of this channel packet is written as a 64bits value
+ * immediately after this channel packet.
+ *
+ * The write goes through three stages:
+ * 1. Reserve space in ring buffer for the new data.
+ * Writer atomically moves priv_write_index.
+ * 2. Copy the new data into the ring.
+ * 3. Update the tail of the ring (visible to host) that indicates
+ * next read location. Writer updates write_index
+ */
+int
+vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen,
+ bool *need_sig)
+{
+ struct vmbus_bufring *vbr = tbr->vbr;
+ uint32_t ring_size = tbr->dsize;
+ uint32_t old_windex, next_windex, windex, total;
+ uint64_t save_windex;
+ int i;
+
+ total = 0;
+ for (i = 0; i < iovlen; i++)
+ total += iov[i].iov_len;
+ total += sizeof(save_windex);
+
+ /* Reserve space in ring */
+ do {
+ uint32_t avail;
+
+ /* Get current free location */
+ old_windex = tbr->windex;
+
+ /* Prevent compiler reordering this with calculation */
+ rte_compiler_barrier();
+
+ avail = vmbus_br_availwrite(tbr, old_windex);
+
+ /* If not enough space in ring, then tell caller. */
+ if (avail <= total)
+ return -EAGAIN;
+
+ next_windex = vmbus_br_idxinc(old_windex, total, ring_size);
+
+ /* Atomic update of next write_index for other threads */
+ } while (!rte_atomic32_cmpset(&tbr->windex, old_windex, next_windex));
+
+ /* Space from old..new is now reserved */
+ windex = old_windex;
+ for (i = 0; i < iovlen; i++) {
+ windex = vmbus_txbr_copyto(tbr, windex,
+ iov[i].iov_base, iov[i].iov_len);
+ }
+
+ /* Set the offset of the current channel packet. */
+ save_windex = ((uint64_t)old_windex) << 32;
+ windex = vmbus_txbr_copyto(tbr, windex, &save_windex,
+ sizeof(save_windex));
+
+ /* The region reserved should match region used */
+ RTE_ASSERT(windex == next_windex);
+
+ /* Ensure that data is available before updating host index */
+ rte_smp_wmb();
+
+ /* Checkin for our reservation. wait for our turn to update host */
+ while (!rte_atomic32_cmpset(&vbr->windex, old_windex, next_windex))
+ rte_pause();
+
+ /* If host had read all data before this, then need to signal */
+ *need_sig |= vmbus_txbr_need_signal(tbr, old_windex);
+ return 0;
+}
+
+static inline uint32_t
+vmbus_rxbr_copyfrom(const struct vmbus_br *rbr, uint32_t rindex,
+ void *dst0, size_t cplen)
+{
+ const uint8_t *br_data = rbr->vbr->data;
+ uint32_t br_dsize = rbr->dsize;
+ uint8_t *dst = dst0;
+
+ if (cplen > br_dsize - rindex) {
+ uint32_t fraglen = br_dsize - rindex;
+
+ /* Wrap-around detected. */
+ memcpy(dst, br_data + rindex, fraglen);
+ memcpy(dst + fraglen, br_data, cplen - fraglen);
+ } else {
+ memcpy(dst, br_data + rindex, cplen);
+ }
+
+ return vmbus_br_idxinc(rindex, cplen, br_dsize);
+}
+
+/* Copy data from receive ring but don't change index */
+int
+vmbus_rxbr_peek(const struct vmbus_br *rbr, void *data, size_t dlen)
+{
+ uint32_t avail;
+
+ /*
+ * The requested data and the 64bits channel packet
+ * offset should be there at least.
+ */
+ avail = vmbus_br_availread(rbr);
+ if (avail < dlen + sizeof(uint64_t))
+ return -EAGAIN;
+
+ vmbus_rxbr_copyfrom(rbr, rbr->vbr->rindex, data, dlen);
+ return 0;
+}
+
+/*
+ * Copy data from receive ring and change index
+ * NOTE:
+ * We assume (dlen + skip) == sizeof(channel packet).
+ */
+int
+vmbus_rxbr_read(struct vmbus_br *rbr, void *data, size_t dlen, size_t skip)
+{
+ struct vmbus_bufring *vbr = rbr->vbr;
+ uint32_t br_dsize = rbr->dsize;
+ uint32_t rindex;
+
+ if (vmbus_br_availread(rbr) < dlen + skip + sizeof(uint64_t))
+ return -EAGAIN;
+
+ /* Record where host was when we started read (for debug) */
+ rbr->windex = rbr->vbr->windex;
+
+ /*
+ * Copy channel packet from RX bufring.
+ */
+ rindex = vmbus_br_idxinc(rbr->vbr->rindex, skip, br_dsize);
+ rindex = vmbus_rxbr_copyfrom(rbr, rindex, data, dlen);
+
+ /*
+ * Discard this channel packet's 64bits offset, which is useless to us.
+ */
+ rindex = vmbus_br_idxinc(rindex, sizeof(uint64_t), br_dsize);
+
+ /* Update the read index _after_ the channel packet is fetched. */
+ rte_compiler_barrier();
+
+ vbr->rindex = rindex;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/vmbus_channel.c b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_channel.c
new file mode 100644
index 00000000..cc5f3e83
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_channel.c
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/uio.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus.h>
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+static inline void
+vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
+{
+ /* Use GCC builtin which atomic does atomic OR operation */
+ __sync_or_and_fetch(addr, mask);
+}
+
+static inline void
+vmbus_send_interrupt(const struct rte_vmbus_device *dev, uint32_t relid)
+{
+ uint32_t *int_addr;
+ uint32_t int_mask;
+
+ int_addr = dev->int_page + relid / 32;
+ int_mask = 1u << (relid % 32);
+
+ vmbus_sync_set_bit(int_addr, int_mask);
+}
+
+static inline void
+vmbus_set_monitor(const struct rte_vmbus_device *dev, uint32_t monitor_id)
+{
+ uint32_t *monitor_addr, monitor_mask;
+ unsigned int trigger_index;
+
+ trigger_index = monitor_id / HV_MON_TRIG_LEN;
+ monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
+
+ monitor_addr = &dev->monitor_page->trigs[trigger_index].pending;
+ vmbus_sync_set_bit(monitor_addr, monitor_mask);
+}
+
+static void
+vmbus_set_event(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan)
+{
+ vmbus_send_interrupt(dev, chan->relid);
+ vmbus_set_monitor(dev, chan->monitor_id);
+}
+
+/*
+ * Notify host that there are data pending on our TX bufring.
+ *
+ * Since this in userspace, rely on the monitor page.
+ * Can't do a hypercall from userspace.
+ */
+void
+rte_vmbus_chan_signal_tx(const struct vmbus_channel *chan)
+{
+ const struct rte_vmbus_device *dev = chan->device;
+ const struct vmbus_br *tbr = &chan->txbr;
+
+ /* Make sure all updates are done before signaling host */
+ rte_smp_wmb();
+
+ /* If host is ignoring interrupts? */
+ if (tbr->vbr->imask)
+ return;
+
+ vmbus_set_event(dev, chan);
+}
+
+
+/* Do a simple send directly using transmit ring. */
+int rte_vmbus_chan_send(struct vmbus_channel *chan, uint16_t type,
+ void *data, uint32_t dlen,
+ uint64_t xactid, uint32_t flags, bool *need_sig)
+{
+ struct vmbus_chanpkt pkt;
+ unsigned int pktlen, pad_pktlen;
+ const uint32_t hlen = sizeof(pkt);
+ bool send_evt = false;
+ uint64_t pad = 0;
+ struct iovec iov[3];
+ int error;
+
+ pktlen = hlen + dlen;
+ pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t));
+
+ pkt.hdr.type = type;
+ pkt.hdr.flags = flags;
+ pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.xactid = xactid;
+
+ iov[0].iov_base = &pkt;
+ iov[0].iov_len = hlen;
+ iov[1].iov_base = data;
+ iov[1].iov_len = dlen;
+ iov[2].iov_base = &pad;
+ iov[2].iov_len = pad_pktlen - pktlen;
+
+ error = vmbus_txbr_write(&chan->txbr, iov, 3, &send_evt);
+
+ /*
+ * caller sets need_sig to non-NULL if it will handle
+ * signaling if required later.
+ * if need_sig is NULL, signal now if needed.
+ */
+ if (need_sig)
+ *need_sig |= send_evt;
+ else if (error == 0 && send_evt)
+ rte_vmbus_chan_signal_tx(chan);
+ return error;
+}
+
+/* Do a scatter/gather send where the descriptor points to data. */
+int rte_vmbus_chan_send_sglist(struct vmbus_channel *chan,
+ struct vmbus_gpa sg[], uint32_t sglen,
+ void *data, uint32_t dlen,
+ uint64_t xactid, bool *need_sig)
+{
+ struct vmbus_chanpkt_sglist pkt;
+ unsigned int pktlen, pad_pktlen, hlen;
+ bool send_evt = false;
+ struct iovec iov[4];
+ uint64_t pad = 0;
+ int error;
+
+ hlen = offsetof(struct vmbus_chanpkt_sglist, gpa[sglen]);
+ pktlen = hlen + dlen;
+ pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t));
+
+ pkt.hdr.type = VMBUS_CHANPKT_TYPE_GPA;
+ pkt.hdr.flags = VMBUS_CHANPKT_FLAG_RC;
+ pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.xactid = xactid;
+ pkt.rsvd = 0;
+ pkt.gpa_cnt = sglen;
+
+ iov[0].iov_base = &pkt;
+ iov[0].iov_len = sizeof(pkt);
+ iov[1].iov_base = sg;
+ iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen;
+ iov[2].iov_base = data;
+ iov[2].iov_len = dlen;
+ iov[3].iov_base = &pad;
+ iov[3].iov_len = pad_pktlen - pktlen;
+
+ error = vmbus_txbr_write(&chan->txbr, iov, 4, &send_evt);
+
+ /* if caller is batching, just propagate the status */
+ if (need_sig)
+ *need_sig |= send_evt;
+ else if (error == 0 && send_evt)
+ rte_vmbus_chan_signal_tx(chan);
+ return error;
+}
+
+bool rte_vmbus_chan_rx_empty(const struct vmbus_channel *channel)
+{
+ const struct vmbus_br *br = &channel->rxbr;
+
+ return br->vbr->rindex == br->vbr->windex;
+}
+
+/* Signal host after reading N bytes */
+void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read)
+{
+ struct vmbus_br *rbr = &chan->rxbr;
+ uint32_t write_sz, pending_sz;
+
+ /* No need for signaling on older versions */
+ if (!rbr->vbr->feature_bits.feat_pending_send_sz)
+ return;
+
+ /* Make sure reading of pending happens after new read index */
+ rte_mb();
+
+ pending_sz = rbr->vbr->pending_send;
+ if (!pending_sz)
+ return;
+
+ rte_smp_rmb();
+ write_sz = vmbus_br_availwrite(rbr, rbr->vbr->windex);
+
+ /* If there was space before then host was not blocked */
+ if (write_sz - bytes_read > pending_sz)
+ return;
+
+ /* If pending write will not fit */
+ if (write_sz <= pending_sz)
+ return;
+
+ vmbus_set_event(chan->device, chan);
+}
+
+int rte_vmbus_chan_recv(struct vmbus_channel *chan, void *data, uint32_t *len,
+ uint64_t *request_id)
+{
+ struct vmbus_chanpkt_hdr pkt;
+ uint32_t dlen, hlen, bufferlen = *len;
+ int error;
+
+ *len = 0;
+
+ error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt));
+ if (error)
+ return error;
+
+ if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) {
+ VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen);
+ /* XXX this channel is dead actually. */
+ return -EIO;
+ }
+
+ if (unlikely(pkt.hlen > pkt.tlen)) {
+ VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u",
+ pkt.hlen, pkt.tlen);
+ return -EIO;
+ }
+
+ /* Length are in quad words */
+ hlen = pkt.hlen << VMBUS_CHANPKT_SIZE_SHIFT;
+ dlen = (pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT) - hlen;
+ *len = dlen;
+
+ /* If caller buffer is not large enough */
+ if (unlikely(dlen > bufferlen))
+ return -ENOBUFS;
+
+ if (request_id)
+ *request_id = pkt.xactid;
+
+ /* Read data and skip packet header */
+ error = vmbus_rxbr_read(&chan->rxbr, data, dlen, hlen);
+ if (error)
+ return error;
+
+ rte_vmbus_chan_signal_read(chan, dlen + hlen + sizeof(uint64_t));
+ return 0;
+}
+
+/* TODO: replace this with inplace ring buffer (no copy) */
+int rte_vmbus_chan_recv_raw(struct vmbus_channel *chan,
+ void *data, uint32_t *len)
+{
+ struct vmbus_chanpkt_hdr pkt;
+ uint32_t dlen, bufferlen = *len;
+ int error;
+
+ error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt));
+ if (error)
+ return error;
+
+ if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) {
+ VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen);
+ /* XXX this channel is dead actually. */
+ return -EIO;
+ }
+
+ if (unlikely(pkt.hlen > pkt.tlen)) {
+ VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u",
+ pkt.hlen, pkt.tlen);
+ return -EIO;
+ }
+
+ /* Length are in quad words */
+ dlen = pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT;
+ *len = dlen;
+
+ /* If caller buffer is not large enough */
+ if (unlikely(dlen > bufferlen))
+ return -ENOBUFS;
+
+ /* Read data and skip packet header */
+ error = vmbus_rxbr_read(&chan->rxbr, data, dlen, 0);
+ if (error)
+ return error;
+
+ /* Return the number of bytes read */
+ return dlen + sizeof(uint64_t);
+}
+
+int vmbus_chan_create(const struct rte_vmbus_device *device,
+ uint16_t relid, uint16_t subid, uint8_t monitor_id,
+ struct vmbus_channel **new_chan)
+{
+ struct vmbus_channel *chan;
+ int err;
+
+ chan = rte_zmalloc_socket("VMBUS", sizeof(*chan), RTE_CACHE_LINE_SIZE,
+ device->device.numa_node);
+ if (!chan)
+ return -ENOMEM;
+
+ STAILQ_INIT(&chan->subchannel_list);
+ chan->device = device;
+ chan->subchannel_id = subid;
+ chan->relid = relid;
+ chan->monitor_id = monitor_id;
+ *new_chan = chan;
+
+ err = vmbus_uio_map_rings(chan);
+ if (err) {
+ rte_free(chan);
+ return err;
+ }
+
+ return 0;
+}
+
+/* Setup the primary channel */
+int rte_vmbus_chan_open(struct rte_vmbus_device *device,
+ struct vmbus_channel **new_chan)
+{
+ int err;
+
+ err = vmbus_chan_create(device, device->relid, 0,
+ device->monitor_id, new_chan);
+ if (!err)
+ device->primary = *new_chan;
+
+ return err;
+}
+
+int rte_vmbus_max_channels(const struct rte_vmbus_device *device)
+{
+ if (vmbus_uio_subchannels_supported(device, device->primary))
+ return VMBUS_MAX_CHANNELS;
+ else
+ return 1;
+}
+
+/* Setup secondary channel */
+int rte_vmbus_subchan_open(struct vmbus_channel *primary,
+ struct vmbus_channel **new_chan)
+{
+ struct vmbus_channel *chan;
+ int err;
+
+ err = vmbus_uio_get_subchan(primary, &chan);
+ if (err)
+ return err;
+
+ STAILQ_INSERT_TAIL(&primary->subchannel_list, chan, next);
+ *new_chan = chan;
+ return 0;
+}
+
+uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan)
+{
+ return chan->subchannel_id;
+}
+
+void rte_vmbus_chan_close(struct vmbus_channel *chan)
+{
+ const struct rte_vmbus_device *device = chan->device;
+ struct vmbus_channel *primary = device->primary;
+
+ if (chan != primary)
+ STAILQ_REMOVE(&primary->subchannel_list, chan,
+ vmbus_channel, next);
+
+ rte_free(chan);
+}
+
+static void vmbus_dump_ring(FILE *f, const char *id, const struct vmbus_br *br)
+{
+ const struct vmbus_bufring *vbr = br->vbr;
+ struct vmbus_chanpkt_hdr pkt;
+
+ fprintf(f, "%s windex=%u rindex=%u mask=%u pending=%u feature=%#x\n",
+ id, vbr->windex, vbr->rindex, vbr->imask,
+ vbr->pending_send, vbr->feature_bits.value);
+ fprintf(f, " size=%u avail write=%u read=%u\n",
+ br->dsize, vmbus_br_availwrite(br, vbr->windex),
+ vmbus_br_availread(br));
+
+ if (vmbus_rxbr_peek(br, &pkt, sizeof(pkt)) == 0)
+ fprintf(f, " pkt type %#x len %u flags %#x xactid %#"PRIx64"\n",
+ pkt.type,
+ pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT,
+ pkt.flags, pkt.xactid);
+}
+
+void rte_vmbus_chan_dump(FILE *f, const struct vmbus_channel *chan)
+{
+ fprintf(f, "channel[%u] relid=%u monitor=%u\n",
+ chan->subchannel_id, chan->relid, chan->monitor_id);
+ vmbus_dump_ring(f, "rxbr", &chan->rxbr);
+ vmbus_dump_ring(f, "txbr", &chan->txbr);
+}
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/vmbus_common.c b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_common.c
new file mode 100644
index 00000000..c7165ad5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_common.c
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_devargs.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+int vmbus_logtype_bus;
+extern struct rte_vmbus_bus rte_vmbus_bus;
+
+/* map a particular resource from a file */
+void *
+vmbus_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
+ int flags)
+{
+ void *mapaddr;
+
+ /* Map the memory resource of device */
+ mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | flags, fd, offset);
+ if (mapaddr == MAP_FAILED) {
+ VMBUS_LOG(ERR,
+ "mmap(%d, %p, %zu, %ld) failed: %s",
+ fd, requested_addr, size, (long)offset,
+ strerror(errno));
+ }
+ return mapaddr;
+}
+
+/* unmap a particular resource */
+void
+vmbus_unmap_resource(void *requested_addr, size_t size)
+{
+ if (requested_addr == NULL)
+ return;
+
+ /* Unmap the VMBUS memory resource of device */
+ if (munmap(requested_addr, size)) {
+ VMBUS_LOG(ERR, "munmap(%p, 0x%lx) failed: %s",
+ requested_addr, (unsigned long)size,
+ strerror(errno));
+ } else
+ VMBUS_LOG(DEBUG, " VMBUS memory unmapped at %p",
+ requested_addr);
+}
+
+/**
+ * Match the VMBUS driver and device using UUID table
+ *
+ * @param drv
+ * VMBUS driver from which ID table would be extracted
+ * @param pci_dev
+ * VMBUS device to match against the driver
+ * @return
+ * true for successful match
+ * false for unsuccessful match
+ */
+static bool
+vmbus_match(const struct rte_vmbus_driver *dr,
+ const struct rte_vmbus_device *dev)
+{
+ const rte_uuid_t *id_table;
+
+ for (id_table = dr->id_table; !rte_uuid_is_null(*id_table); ++id_table) {
+ if (rte_uuid_compare(*id_table, dev->class_id) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * If device ID match, call the devinit() function of the driver.
+ */
+static int
+vmbus_probe_one_driver(struct rte_vmbus_driver *dr,
+ struct rte_vmbus_device *dev)
+{
+ char guid[RTE_UUID_STRLEN];
+ int ret;
+
+ if (!vmbus_match(dr, dev))
+ return 1; /* not supported */
+
+ rte_uuid_unparse(dev->device_id, guid, sizeof(guid));
+ VMBUS_LOG(INFO, "VMBUS device %s on NUMA socket %i",
+ guid, dev->device.numa_node);
+
+ /* TODO add blacklisted */
+
+ /* map resources for device */
+ ret = rte_vmbus_map_device(dev);
+ if (ret != 0)
+ return ret;
+
+ /* reference driver structure */
+ dev->driver = dr;
+ dev->device.driver = &dr->driver;
+
+ if (dev->device.numa_node < 0) {
+ VMBUS_LOG(WARNING, " Invalid NUMA socket, default to 0");
+ dev->device.numa_node = 0;
+ }
+
+ /* call the driver probe() function */
+ VMBUS_LOG(INFO, " probe driver: %s", dr->driver.name);
+ ret = dr->probe(dr, dev);
+ if (ret) {
+ dev->driver = NULL;
+ rte_vmbus_unmap_device(dev);
+ }
+
+ return ret;
+}
+
+/*
+ * IF device class GUID mathces, call the probe function of
+ * registere drivers for the vmbus device.
+ * Return -1 if initialization failed,
+ * and 1 if no driver found for this device.
+ */
+static int
+vmbus_probe_all_drivers(struct rte_vmbus_device *dev)
+{
+ struct rte_vmbus_driver *dr;
+ int rc;
+
+ /* Check if a driver is already loaded */
+ if (dev->driver != NULL) {
+ VMBUS_LOG(DEBUG, "VMBUS driver already loaded");
+ return 0;
+ }
+
+ FOREACH_DRIVER_ON_VMBUS(dr) {
+ rc = vmbus_probe_one_driver(dr, dev);
+ if (rc < 0) /* negative is an error */
+ return -1;
+
+ if (rc > 0) /* positive driver doesn't support it */
+ continue;
+
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Scan the vmbus, and call the devinit() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+int
+rte_vmbus_probe(void)
+{
+ struct rte_vmbus_device *dev;
+ size_t probed = 0, failed = 0;
+ char ubuf[RTE_UUID_STRLEN];
+
+ FOREACH_DEVICE_ON_VMBUS(dev) {
+ probed++;
+
+ rte_uuid_unparse(dev->device_id, ubuf, sizeof(ubuf));
+
+ /* TODO: add whitelist/blacklist */
+
+ if (vmbus_probe_all_drivers(dev) < 0) {
+ VMBUS_LOG(NOTICE,
+ "Requested device %s cannot be used", ubuf);
+ rte_errno = errno;
+ failed++;
+ }
+ }
+
+ return (probed && probed == failed) ? -1 : 0;
+}
+
+static int
+vmbus_parse(const char *name, void *addr)
+{
+ rte_uuid_t guid;
+ int ret;
+
+ ret = rte_uuid_parse(name, guid);
+ if (ret == 0 && addr)
+ memcpy(addr, &guid, sizeof(guid));
+
+ return ret;
+}
+
+/* register vmbus driver */
+void
+rte_vmbus_register(struct rte_vmbus_driver *driver)
+{
+ VMBUS_LOG(DEBUG,
+ "Registered driver %s", driver->driver.name);
+
+ TAILQ_INSERT_TAIL(&rte_vmbus_bus.driver_list, driver, next);
+ driver->bus = &rte_vmbus_bus;
+}
+
+/* unregister vmbus driver */
+void
+rte_vmbus_unregister(struct rte_vmbus_driver *driver)
+{
+ TAILQ_REMOVE(&rte_vmbus_bus.driver_list, driver, next);
+ driver->bus = NULL;
+}
+
+/* Add a device to VMBUS bus */
+void
+vmbus_add_device(struct rte_vmbus_device *vmbus_dev)
+{
+ TAILQ_INSERT_TAIL(&rte_vmbus_bus.device_list, vmbus_dev, next);
+}
+
+/* Insert a device into a predefined position in VMBUS bus */
+void
+vmbus_insert_device(struct rte_vmbus_device *exist_vmbus_dev,
+ struct rte_vmbus_device *new_vmbus_dev)
+{
+ TAILQ_INSERT_BEFORE(exist_vmbus_dev, new_vmbus_dev, next);
+}
+
+/* Remove a device from VMBUS bus */
+void
+vmbus_remove_device(struct rte_vmbus_device *vmbus_dev)
+{
+ TAILQ_REMOVE(&rte_vmbus_bus.device_list, vmbus_dev, next);
+}
+
+/* VMBUS doesn't support hotplug */
+static struct rte_device *
+vmbus_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ struct rte_vmbus_device *dev;
+
+ FOREACH_DEVICE_ON_VMBUS(dev) {
+ if (start && &dev->device == start) {
+ start = NULL;
+ continue;
+ }
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ }
+
+ return NULL;
+}
+
+
+struct rte_vmbus_bus rte_vmbus_bus = {
+ .bus = {
+ .scan = rte_vmbus_scan,
+ .probe = rte_vmbus_probe,
+ .find_device = vmbus_find_device,
+ .parse = vmbus_parse,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_vmbus_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_vmbus_bus.driver_list),
+};
+
+RTE_REGISTER_BUS(vmbus, rte_vmbus_bus.bus);
+
+RTE_INIT(vmbus_init_log)
+{
+ vmbus_logtype_bus = rte_log_register("bus.vmbus");
+ if (vmbus_logtype_bus >= 0)
+ rte_log_set_level(vmbus_logtype_bus, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/vmbus_common_uio.c b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_common_uio.c
new file mode 100644
index 00000000..5ddd36ab
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_common_uio.c
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+static struct rte_tailq_elem vmbus_tailq = {
+ .name = "VMBUS_RESOURCE_LIST",
+};
+EAL_REGISTER_TAILQ(vmbus_tailq)
+
+static int
+vmbus_uio_map_secondary(struct rte_vmbus_device *dev)
+{
+ int fd, i;
+ struct mapped_vmbus_resource *uio_res;
+ struct mapped_vmbus_res_list *uio_res_list
+ = RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+
+ /* skip this element if it doesn't match our UUID */
+ if (rte_uuid_compare(uio_res->id, dev->device_id) != 0)
+ continue;
+
+ /* open /dev/uioX */
+ fd = open(uio_res->path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ uio_res->path, strerror(errno));
+ return -1;
+ }
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ void *mapaddr;
+
+ mapaddr = vmbus_map_resource(uio_res->maps[i].addr,
+ fd, 0,
+ uio_res->maps[i].size, 0);
+
+ if (mapaddr == uio_res->maps[i].addr)
+ continue;
+
+ VMBUS_LOG(ERR,
+ "Cannot mmap device resource file %s to address: %p",
+ uio_res->path, uio_res->maps[i].addr);
+
+ if (mapaddr != MAP_FAILED)
+ /* unmap addr wrongly mapped */
+ vmbus_unmap_resource(mapaddr,
+ (size_t)uio_res->maps[i].size);
+
+ /* unmap addrs correctly mapped */
+ while (--i >= 0)
+ vmbus_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+
+ close(fd);
+ return -1;
+ }
+
+ /* fd is not needed in slave process, close it */
+ close(fd);
+ return 0;
+ }
+
+ VMBUS_LOG(ERR, "Cannot find resource for device");
+ return 1;
+}
+
+static int
+vmbus_uio_map_primary(struct rte_vmbus_device *dev)
+{
+ int i, ret;
+ struct mapped_vmbus_resource *uio_res = NULL;
+ struct mapped_vmbus_res_list *uio_res_list =
+ RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ /* allocate uio resource */
+ ret = vmbus_uio_alloc_resource(dev, &uio_res);
+ if (ret)
+ return ret;
+
+ /* Map the resources */
+ for (i = 0; i < VMBUS_MAX_RESOURCE; i++) {
+ /* skip empty BAR */
+ if (dev->resource[i].len == 0)
+ continue;
+
+ ret = vmbus_uio_map_resource_by_index(dev, i, uio_res, 0);
+ if (ret)
+ goto error;
+ }
+
+ uio_res->nb_maps = i;
+
+ TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);
+
+ return 0;
+error:
+ while (--i >= 0) {
+ vmbus_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ }
+ vmbus_uio_free_resource(dev, uio_res);
+ return -1;
+}
+
+
+struct mapped_vmbus_resource *
+vmbus_uio_find_resource(const struct rte_vmbus_device *dev)
+{
+ struct mapped_vmbus_resource *uio_res;
+ struct mapped_vmbus_res_list *uio_res_list =
+ RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ if (dev == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+ /* skip this element if it doesn't match our VMBUS address */
+ if (rte_uuid_compare(uio_res->id, dev->device_id) == 0)
+ return uio_res;
+ }
+ return NULL;
+}
+
+/* map the VMBUS resource of a VMBUS device in virtual memory */
+int
+vmbus_uio_map_resource(struct rte_vmbus_device *dev)
+{
+ struct mapped_vmbus_resource *uio_res;
+ int ret;
+
+ /* TODO: handle rescind */
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.uio_cfg_fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+
+ /* secondary processes - use already recorded details */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ ret = vmbus_uio_map_secondary(dev);
+ else
+ ret = vmbus_uio_map_primary(dev);
+
+ if (ret != 0)
+ return ret;
+
+ uio_res = vmbus_uio_find_resource(dev);
+ if (!uio_res) {
+ VMBUS_LOG(ERR, "can not find resources!");
+ return -EIO;
+ }
+
+ if (uio_res->nb_maps <= HV_MON_PAGE_MAP) {
+ VMBUS_LOG(ERR, "VMBUS: only %u resources found!",
+ uio_res->nb_maps);
+ return -EINVAL;
+ }
+
+ dev->int_page = (uint32_t *)((char *)uio_res->maps[HV_INT_PAGE_MAP].addr
+ + (PAGE_SIZE >> 1));
+ dev->monitor_page = uio_res->maps[HV_MON_PAGE_MAP].addr;
+ return 0;
+}
+
+static void
+vmbus_uio_unmap(struct mapped_vmbus_resource *uio_res)
+{
+ int i;
+
+ if (uio_res == NULL)
+ return;
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ vmbus_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ }
+}
+
+/* unmap the VMBUS resource of a VMBUS device in virtual memory */
+void
+vmbus_uio_unmap_resource(struct rte_vmbus_device *dev)
+{
+ struct mapped_vmbus_resource *uio_res;
+ struct mapped_vmbus_res_list *uio_res_list =
+ RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ if (dev == NULL)
+ return;
+
+ /* find an entry for the device */
+ uio_res = vmbus_uio_find_resource(dev);
+ if (uio_res == NULL)
+ return;
+
+ /* secondary processes - just free maps */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return vmbus_uio_unmap(uio_res);
+
+ TAILQ_REMOVE(uio_res_list, uio_res, next);
+
+ /* unmap all resources */
+ vmbus_uio_unmap(uio_res);
+
+ /* free uio resource */
+ rte_free(uio_res);
+
+ /* close fd if in primary process */
+ close(dev->intr_handle.fd);
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+}
diff --git a/src/spdk/dpdk/drivers/common/Makefile b/src/spdk/dpdk/drivers/common/Makefile
new file mode 100644
index 00000000..0fd22376
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF)$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL),yy)
+DIRS-y += octeontx
+endif
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/common/meson.build b/src/spdk/dpdk/drivers/common/meson.build
new file mode 100644
index 00000000..d7b7d8cf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+std_deps = ['eal']
+drivers = ['octeontx', 'qat']
+config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
+driver_name_fmt = 'rte_common_@0@'
diff --git a/src/spdk/dpdk/drivers/common/octeontx/Makefile b/src/spdk/dpdk/drivers/common/octeontx/Makefile
new file mode 100644
index 00000000..dfdb9f19
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_common_octeontx.a
+
+CFLAGS += $(WERROR_FLAGS)
+EXPORT_MAP := rte_common_octeontx_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y += octeontx_mbox.c
+
+LDLIBS += -lrte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/common/octeontx/meson.build b/src/spdk/dpdk/drivers/common/octeontx/meson.build
new file mode 100644
index 00000000..203d1ef4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+sources = files('octeontx_mbox.c')
diff --git a/src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.c b/src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.c
new file mode 100644
index 00000000..880f8a40
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.c
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_spinlock.h>
+
+#include "octeontx_mbox.h"
+
+/* Mbox operation timeout in seconds */
+#define MBOX_WAIT_TIME_SEC 3
+#define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */)
+
+/* Mbox channel state */
+enum {
+ MBOX_CHAN_STATE_REQ = 1,
+ MBOX_CHAN_STATE_RES = 0,
+};
+
+/* Response messages */
+enum {
+ MBOX_RET_SUCCESS,
+ MBOX_RET_INVALID,
+ MBOX_RET_INTERNAL_ERR,
+};
+
+struct mbox {
+ int init_once;
+ uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
+ uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
+ uint16_t tag_own; /* Last tag which was written to own channel */
+ rte_spinlock_t lock;
+};
+
+static struct mbox octeontx_mbox;
+
+/*
+ * Structure used for mbox synchronization
+ * This structure sits at the begin of Mbox RAM and used as main
+ * synchronization point for channel communication
+ */
+struct mbox_ram_hdr {
+ union {
+ uint64_t u64;
+ struct {
+ uint8_t chan_state : 1;
+ uint8_t coproc : 7;
+ uint8_t msg;
+ uint8_t vfid;
+ uint8_t res_code;
+ uint16_t tag;
+ uint16_t len;
+ };
+ };
+};
+
+int octeontx_logtype_mbox;
+
+RTE_INIT(otx_init_log)
+{
+ octeontx_logtype_mbox = rte_log_register("pmd.octeontx.mbox");
+ if (octeontx_logtype_mbox >= 0)
+ rte_log_set_level(octeontx_logtype_mbox, RTE_LOG_NOTICE);
+}
+
+static inline void
+mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size)
+{
+ uint16_t i;
+
+ for (i = 0; i < size; i++)
+ d[i] = s[i];
+}
+
+static inline void
+mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
+ const void *txmsg, uint16_t txsize)
+{
+ struct mbox_ram_hdr old_hdr;
+ struct mbox_ram_hdr new_hdr = { {0} };
+ uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
+ uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
+
+ /*
+ * Initialize the channel with the tag left by last send.
+ * On success full mbox send complete, PF increments the tag by one.
+ * The sender can validate integrity of PF message with this scheme
+ */
+ old_hdr.u64 = rte_read64(ram_mbox_hdr);
+ m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */
+
+ /* Copy msg body */
+ if (txmsg)
+ mbox_msgcpy(ram_mbox_msg, txmsg, txsize);
+
+ /* Prepare new hdr */
+ new_hdr.chan_state = MBOX_CHAN_STATE_REQ;
+ new_hdr.coproc = hdr->coproc;
+ new_hdr.msg = hdr->msg;
+ new_hdr.vfid = hdr->vfid;
+ new_hdr.tag = m->tag_own;
+ new_hdr.len = txsize;
+
+ /* Write the msg header */
+ rte_write64(new_hdr.u64, ram_mbox_hdr);
+ rte_smp_wmb();
+ /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */
+ rte_write64(0, m->reg);
+}
+
+static inline int
+mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
+ void *rxmsg, uint16_t rxsize)
+{
+ int res = 0, wait;
+ uint16_t len;
+ struct mbox_ram_hdr rx_hdr;
+ uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
+ uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
+
+ /* Wait for response */
+ wait = MBOX_WAIT_TIME_SEC * 1000 * 10;
+ while (wait > 0) {
+ rte_delay_us(100);
+ rx_hdr.u64 = rte_read64(ram_mbox_hdr);
+ if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES)
+ break;
+ --wait;
+ }
+
+ hdr->res_code = rx_hdr.res_code;
+ m->tag_own++;
+
+ /* Timeout */
+ if (wait <= 0) {
+ res = -ETIMEDOUT;
+ goto error;
+ }
+
+ /* Tag mismatch */
+ if (m->tag_own != rx_hdr.tag) {
+ res = -EINVAL;
+ goto error;
+ }
+
+ /* PF nacked the msg */
+ if (rx_hdr.res_code != MBOX_RET_SUCCESS) {
+ res = -EBADMSG;
+ goto error;
+ }
+
+ len = RTE_MIN(rx_hdr.len, rxsize);
+ if (rxmsg)
+ mbox_msgcpy(rxmsg, ram_mbox_msg, len);
+
+ return len;
+
+error:
+ mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
+ m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res,
+ hdr->res_code);
+ return res;
+}
+
+static inline int
+mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
+ uint16_t txsize, void *rxmsg, uint16_t rxsize)
+{
+ int res = -EINVAL;
+
+ if (m->init_once == 0 || hdr == NULL ||
+ txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) {
+ mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
+ m->init_once, hdr, txsize, rxsize);
+ return res;
+ }
+
+ rte_spinlock_lock(&m->lock);
+
+ mbox_send_request(m, hdr, txmsg, txsize);
+ res = mbox_wait_response(m, hdr, rxmsg, rxsize);
+
+ rte_spinlock_unlock(&m->lock);
+ return res;
+}
+
+int
+octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ if (m->init_once)
+ return -EALREADY;
+
+ if (ram_mbox_base == NULL) {
+ mbox_log_err("Invalid ram_mbox_base=%p", ram_mbox_base);
+ return -EINVAL;
+ }
+
+ m->ram_mbox_base = ram_mbox_base;
+
+ if (m->reg != NULL) {
+ rte_spinlock_init(&m->lock);
+ m->init_once = 1;
+ }
+
+ return 0;
+}
+
+int
+octeontx_mbox_set_reg(uint8_t *reg)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ if (m->init_once)
+ return -EALREADY;
+
+ if (reg == NULL) {
+ mbox_log_err("Invalid reg=%p", reg);
+ return -EINVAL;
+ }
+
+ m->reg = reg;
+
+ if (m->ram_mbox_base != NULL) {
+ rte_spinlock_init(&m->lock);
+ m->init_once = 1;
+ }
+
+ return 0;
+}
+
+int
+octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
+ uint16_t txlen, void *rxdata, uint16_t rxlen)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EINVAL;
+
+ return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
+}
diff --git a/src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.h b/src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.h
new file mode 100644
index 00000000..43fbda28
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_MBOX_H__
+#define __OCTEONTX_MBOX_H__
+
+#include <rte_common.h>
+#include <rte_spinlock.h>
+
+#define SSOW_BAR4_LEN (64 * 1024)
+#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
+
+#define MBOX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, octeontx_logtype_mbox,\
+ "%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
+
+#define mbox_log_info(fmt, ...) MBOX_LOG(INFO, fmt, ##__VA_ARGS__)
+#define mbox_log_dbg(fmt, ...) MBOX_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define mbox_log_err(fmt, ...) MBOX_LOG(ERR, fmt, ##__VA_ARGS__)
+#define mbox_func_trace mbox_log_dbg
+
+extern int octeontx_logtype_mbox;
+
+struct octeontx_mbox_hdr {
+ uint16_t vfid; /* VF index or pf resource index local to the domain */
+ uint8_t coproc; /* Coprocessor id */
+ uint8_t msg; /* Message id */
+ uint8_t res_code; /* Functional layer response code */
+};
+
+int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base);
+int octeontx_mbox_set_reg(uint8_t *reg);
+int octeontx_mbox_send(struct octeontx_mbox_hdr *hdr,
+ void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
+
+#endif /* __OCTEONTX_MBOX_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx/rte_common_octeontx_version.map b/src/spdk/dpdk/drivers/common/octeontx/rte_common_octeontx_version.map
new file mode 100644
index 00000000..f04b3b7f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx/rte_common_octeontx_version.map
@@ -0,0 +1,7 @@
+DPDK_18.05 {
+ global:
+
+ octeontx_mbox_set_ram_mbox_base;
+ octeontx_mbox_set_reg;
+ octeontx_mbox_send;
+};
diff --git a/src/spdk/dpdk/drivers/common/qat/Makefile b/src/spdk/dpdk/drivers/common/qat/Makefile
new file mode 100644
index 00000000..c68a032a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/Makefile
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# build directories
+QAT_CRYPTO_DIR := $(RTE_SDK)/drivers/crypto/qat
+QAT_COMPRESS_DIR := $(RTE_SDK)/drivers/compress/qat
+VPATH=$(QAT_CRYPTO_DIR):$(QAT_COMPRESS_DIR)
+
+# external library include paths
+CFLAGS += -I$(SRCDIR)/qat_adf
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -I$(QAT_CRYPTO_DIR)
+CFLAGS += -I$(QAT_COMPRESS_DIR)
+
+
+ifeq ($(CONFIG_RTE_LIBRTE_COMPRESSDEV),y)
+ CFLAGS += -DALLOW_EXPERIMENTAL_API
+ LDLIBS += -lrte_compressdev
+ SRCS-y += qat_comp.c
+ SRCS-y += qat_comp_pmd.c
+ build_qat = yes
+endif
+
+# library symmetric crypto source files
+ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
+ LDLIBS += -lrte_cryptodev
+ LDLIBS += -lcrypto
+ CFLAGS += -DBUILD_QAT_SYM
+ SRCS-y += qat_sym.c
+ SRCS-y += qat_sym_session.c
+ SRCS-y += qat_sym_pmd.c
+ build_qat = yes
+endif
+endif
+
+ifdef build_qat
+
+ # library name
+ LIB = librte_pmd_qat.a
+
+ # library version
+ LIBABIVER := 1
+ # build flags
+ CFLAGS += $(WERROR_FLAGS)
+ CFLAGS += -O3
+
+ # library common source files
+ SRCS-y += qat_device.c
+ SRCS-y += qat_common.c
+ SRCS-y += qat_logs.c
+ SRCS-y += qat_qp.c
+
+ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+ LDLIBS += -lrte_pci -lrte_bus_pci
+
+ # export include files
+ SYMLINK-y-include +=
+
+ # versioning export map
+ EXPORT_MAP := ../../compress/qat/rte_pmd_qat_version.map
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/common/qat/meson.build b/src/spdk/dpdk/drivers/common/qat/meson.build
new file mode 100644
index 00000000..80b6b25a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+# This does not build a driver, but instead holds common files for
+# the crypto and compression drivers.
+build = false
+qat_deps = ['bus_pci']
+qat_sources = files('qat_common.c',
+ 'qat_qp.c',
+ 'qat_device.c',
+ 'qat_logs.c')
+qat_includes = [include_directories('.', 'qat_adf')]
+qat_ext_deps = []
+qat_cflags = []
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h
new file mode 100644
index 00000000..1eef5513
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include <rte_io.h>
+
+/* CSR write macro */
+#define ADF_CSR_WR(csrAddr, csrOffset, val) \
+ rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
+
+/* CSR read macro */
+#define ADF_CSR_RD(csrAddr, csrOffset) \
+ rte_read32((((uint8_t *)csrAddr) + csrOffset))
+
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+#define ADF_RING_EMPTY_SIG_BYTE 0x7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+/* Maximum number of qps on a device for any service type */
+#define ADF_MAX_QPS_ON_ANY_SERVICE 2
+#define ADF_RING_DIR_TX 0
+#define ADF_RING_DIR_RX 1
+
+/* Valid internal msg size values */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg size values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
+ ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+ SIZE) & ~0x4)
+/* Max outstanding requests */
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+ ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
+#define BUILD_RING_CONFIG(size) \
+ ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+ ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_HEAD + (ring << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_TAIL + (ring << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ uint32_t l_base = 0, u_base = 0; \
+ l_base = (uint32_t)(value & 0xFFFFFFFF); \
+ u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
+} while (0)
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_HEAD + (ring << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, value)
+
+#endif /*ADF_TRANSPORT_ACCESS_MACROS_H */
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw.h
new file mode 100644
index 00000000..8f7cb37b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <sys/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+ (((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+ (((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+ ICP_QAT_FW_COMN_RESP_SERV_NULL,
+ ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+ ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+ ICP_QAT_FW_COMN_REQ_NULL = 0,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+ ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t serv_specif_fields[4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+ uint64_t opaque_data;
+ uint64_t src_data_addr;
+ uint64_t dest_data_addr;
+ uint32_t src_length;
+ uint32_t dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+ uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+ uint8_t resrvd1;
+ uint8_t service_cmd_id;
+ uint8_t service_type;
+ uint8_t hdr_flags;
+ uint16_t serv_specif_flags;
+ uint16_t comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+ uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+ uint8_t xlat_err_code;
+ uint8_t cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+ uint8_t resrvd1;
+ uint8_t service_id;
+ uint8_t response_type;
+ uint8_t hdr_flags;
+ struct icp_qat_fw_comn_error comn_error;
+ uint8_t comn_status;
+ uint8_t cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_hdr;
+ uint64_t opaque_data;
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6
+#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+ ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+ ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+ (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+ (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+ ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+ | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+ QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET_2(next_curr_id, val) \
+ do { \
+ (next_curr_id) = \
+ (((next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ (((val) << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) & \
+ ICP_QAT_FW_COMN_NEXT_ID_MASK)) \
+ } while (0)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET_2(next_curr_id, val) \
+ do { \
+ (next_curr_id) = \
+ (((next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) \
+ } while (0)
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
+#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
+#define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+ QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_PKE_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+ QAT_COMN_RESP_PKE_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+ QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+ QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLT_WA_APPLIED_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS, \
+ QAT_COMN_RESP_XLT_WA_APPLIED_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
+ QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_COMP_OUTPUT_CORRUPTION -14
+#define ERR_CODE_HW_INCOMPLETE_FILE -15
+#define ERR_CODE_SSM_ERROR -16
+#define ERR_CODE_ENDPOINT_ERROR -17
+#define ERR_CODE_CNV_ERROR -18
+#define ERR_CODE_EMPTY_DYM_BLOCK -19
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22
+#define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23
+
+enum icp_qat_fw_slice {
+ ICP_QAT_FW_SLICE_NULL = 0,
+ ICP_QAT_FW_SLICE_CIPHER = 1,
+ ICP_QAT_FW_SLICE_AUTH = 2,
+ ICP_QAT_FW_SLICE_DRAM_RD = 3,
+ ICP_QAT_FW_SLICE_DRAM_WR = 4,
+ ICP_QAT_FW_SLICE_COMP = 5,
+ ICP_QAT_FW_SLICE_XLAT = 6,
+ ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_comp.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
new file mode 100644
index 00000000..81381772
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _ICP_QAT_FW_COMP_H_
+#define _ICP_QAT_FW_COMP_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_comp_cmd_id {
+ ICP_QAT_FW_COMP_CMD_STATIC = 0,
+ /*!< Static Compress Request */
+
+ ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
+ /*!< Dynamic Compress Request */
+
+ ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
+ /*!< Decompress Request */
+
+ ICP_QAT_FW_COMP_CMD_DELIMITER
+ /**< Delimiter type */
+};
+
+/**< Flag usage */
+
+#define ICP_QAT_FW_COMP_STATELESS_SESSION 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that session is stateless
+ */
+
+#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that session is stateful
+ */
+
+#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that autoselectbest is NOT used
+ */
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that autoselectbest is used
+ */
+
+#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is NOT used
+ */
+
+#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is used
+ */
+
+#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is NOT used
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is used
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing secure RAM from being used as
+ * an intermediate buffer is DISABLED.
+ */
+
+#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing secure RAM from being used as
+ * an intermediate buffer is ENABLED.
+ */
+
+/**< Flag mask & bit position */
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the session type
+ */
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask used to determine the session type
+ */
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for auto select best
+ */
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for auto select best
+ */
+
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for enhanced auto select best
+ */
+
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for enhanced auto select best
+ */
+
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for disabling type zero header write back
+ * when Enhanced autoselect best is enabled. If set firmware does
+ * not return type0 store block header, only copies src to dest.
+ * (if best output is Type0)
+ */
+
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for auto select best
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for flag used to disable secure ram from
+ * being used as an intermediate buffer.
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for disable secure ram for use as an intermediate
+ * buffer.
+ */
+
+#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
+ ret_uncomp, secure_ram) \
+ ((((sesstype)&ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \
+ << ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \
+ (((autoselect)&ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \
+ << ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \
+ (((enhanced_asb)&ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) \
+ << ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \
+ (((ret_uncomp)&ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) \
+ << ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \
+ (((secure_ram)&ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) \
+ << ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))
+
+union icp_qat_fw_comp_req_hdr_cd_pars {
+ /**< LWs 2-5 */
+ struct {
+ uint64_t content_desc_addr;
+ /**< Address of the content descriptor */
+
+ uint16_t content_desc_resrvd1;
+ /**< Content descriptor reserved field */
+
+ uint8_t content_desc_params_sz;
+ /**< Size of the content descriptor parameters in quad words.
+ * These parameters describe the session setup configuration
+ * info for the slices that this request relies upon i.e.
+ * the configuration word and cipher key needed by the cipher
+ * slice if there is a request for cipher processing.
+ */
+
+ uint8_t content_desc_hdr_resrvd2;
+ /**< Content descriptor reserved field */
+
+ uint32_t content_desc_resrvd3;
+ /**< Content descriptor reserved field */
+ } s;
+
+ struct {
+ uint32_t comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /* Compression Slice Config Word */
+
+ uint32_t content_desc_resrvd4;
+ /**< Content descriptor reserved field */
+
+ } sl;
+
+};
+
+struct icp_qat_fw_comp_req_params {
+ /**< LW 14 */
+ uint32_t comp_len;
+ /**< Size of input to process in bytes Note: Only EOP requests can be
+ * odd for decompression. IA must set LSB to zero for odd sized
+ * intermediate inputs
+ */
+
+ /**< LW 15 */
+ uint32_t out_buffer_sz;
+ /**< Size of output buffer in bytes */
+
+ /**< LW 16 */
+ uint32_t initial_crc32;
+ /**< CRC of previously processed bytes */
+
+ /**< LW 17 */
+ uint32_t initial_adler;
+ /**< Adler of previously processed bytes */
+
+ /**< LW 18 */
+ uint32_t req_par_flags;
+
+ /**< LW 19 */
+ uint32_t rsrvd;
+};
+
+#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr) \
+ ((((sop)&ICP_QAT_FW_COMP_SOP_MASK) << ICP_QAT_FW_COMP_SOP_BITPOS) | \
+ (((eop)&ICP_QAT_FW_COMP_EOP_MASK) << ICP_QAT_FW_COMP_EOP_BITPOS) | \
+ (((bfinal)&ICP_QAT_FW_COMP_BFINAL_MASK) \
+ << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \
+ ((cnv & ICP_QAT_FW_COMP_CNV_MASK) << ICP_QAT_FW_COMP_CNV_BITPOS) | \
+ ((cnvnr & ICP_QAT_FW_COMP_CNV_RECOVERY_MASK) \
+ << ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS))
+
+#define ICP_QAT_FW_COMP_NOT_SOP 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request is NOT Start of Packet
+ */
+
+#define ICP_QAT_FW_COMP_SOP 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request IS Start of Packet
+ */
+
+#define ICP_QAT_FW_COMP_NOT_EOP 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request is NOT Start of Packet
+ */
+
+#define ICP_QAT_FW_COMP_EOP 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request IS End of Packet
+ */
+
+#define ICP_QAT_FW_COMP_NOT_BFINAL 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing to indicate firmware this is not the last block
+ */
+
+#define ICP_QAT_FW_COMP_BFINAL 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing to indicate firmware this is the last block
+ */
+
+#define ICP_QAT_FW_COMP_NO_CNV 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that NO cnv check is to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_CNV 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that a cnv check IS to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that NO cnv recovery is to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_CNV_RECOVERY 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that a cnv recovery is to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_SOP_BITPOS 0
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for SOP
+ */
+
+#define ICP_QAT_FW_COMP_SOP_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask used to determine SOP
+ */
+
+#define ICP_QAT_FW_COMP_EOP_BITPOS 1
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for EOP
+ */
+
+#define ICP_QAT_FW_COMP_EOP_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask used to determine EOP
+ */
+
+#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for the bfinal bit
+ */
+
+#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the bfinal bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for the CNV bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_BITPOS 16
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the CNV bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_RECOVERY_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for the CNV Recovery bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS 17
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the CNV Recovery bit
+ */
+
+struct icp_qat_fw_xlt_req_params {
+ /**< LWs 20-21 */
+ uint64_t inter_buff_ptr;
+ /**< This field specifies the physical address of an intermediate
+ * buffer SGL array. The array contains a pair of 64-bit
+ * intermediate buffer pointers to SGL buffer descriptors, one pair
+ * per CPM. Please refer to the CPM1.6 Firmware Interface HLD
+ * specification for more details.
+ */
+};
+
+
+struct icp_qat_fw_comp_cd_hdr {
+ /**< LW 24 */
+ uint16_t ram_bank_flags;
+ /**< Flags to show which ram banks to access */
+
+ uint8_t comp_cfg_offset;
+ /**< Quad word offset from the content descriptor parameters address
+ * to the parameters for the compression processing
+ */
+
+ uint8_t next_curr_id;
+ /**< This field combines the next and current id (each four bits) -
+ * the next id is the most significant nibble.
+ * Next Id: Set to the next slice to pass the compressed data through.
+ * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
+ * anymore slices after compression
+ * Current Id: Initialised with the compression slice type
+ */
+
+ /**< LW 25 */
+ uint32_t resrvd;
+ /**< LWs 26-27 */
+
+ uint64_t comp_state_addr;
+ /**< Pointer to compression state */
+
+ /**< LWs 28-29 */
+ uint64_t ram_banks_addr;
+ /**< Pointer to banks */
+
+};
+
+
+struct icp_qat_fw_xlt_cd_hdr {
+ /**< LW 30 */
+ uint16_t resrvd1;
+ /**< Reserved field and assumed set to 0 */
+
+ uint8_t resrvd2;
+ /**< Reserved field and assumed set to 0 */
+
+ uint8_t next_curr_id;
+ /**< This field combines the next and current id (each four bits) -
+ * the next id is the most significant nibble.
+ * Next Id: Set to the next slice to pass the translated data through.
+ * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
+ * any more slices after compression
+ * Current Id: Initialised with the translation slice type
+ */
+
+ /**< LW 31 */
+ uint32_t resrvd3;
+ /**< Reserved and should be set to zero, needed for quadword
+ * alignment
+ */
+};
+
+struct icp_qat_fw_comp_req {
+ /**< LWs 0-1 */
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ /**< Common request header - for Service Command Id,
+ * use service-specific Compression Command Id.
+ * Service Specific Flags - use Compression Command Flags
+ */
+
+ /**< LWs 2-5 */
+ union icp_qat_fw_comp_req_hdr_cd_pars cd_pars;
+ /**< Compression service-specific content descriptor field which points
+ * either to a content descriptor parameter block or contains the
+ * compression slice config word.
+ */
+
+ /**< LWs 6-13 */
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ /**< Common request middle section */
+
+ /**< LWs 14-19 */
+ struct icp_qat_fw_comp_req_params comp_pars;
+ /**< Compression request Parameters block */
+
+ /**< LWs 20-21 */
+ union {
+ struct icp_qat_fw_xlt_req_params xlt_pars;
+ /**< Translation request Parameters block */
+ uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /**< Reserved if not used for translation */
+
+ } u1;
+
+ /**< LWs 22-23 */
+ union {
+ uint32_t resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /**< Reserved - not used if Batch and Pack is disabled.*/
+
+ uint64_t bnp_res_table_addr;
+ /**< A generic pointer to the unbounded list of
+ * icp_qat_fw_resp_comp_pars members. This pointer is only
+ * used when the Batch and Pack is enabled.
+ */
+ } u3;
+
+ /**< LWs 24-29 */
+ struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
+ /**< Compression request content descriptor control block header */
+
+ /**< LWs 30-31 */
+ union {
+ struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
+ /**< Translation request content descriptor
+ * control block header
+ */
+
+ uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /**< Reserved if not used for translation */
+ } u2;
+};
+
+struct icp_qat_fw_resp_comp_pars {
+ /**< LW 4 */
+ uint32_t input_byte_counter;
+ /**< Input byte counter */
+
+ /**< LW 5 */
+ uint32_t output_byte_counter;
+ /**< Output byte counter */
+
+ /**< LW 6 & 7*/
+ union {
+ uint64_t curr_chksum;
+ struct {
+ /**< LW 6 */
+ uint32_t curr_crc32;
+ /**< LW 7 */
+ uint32_t curr_adler_32;
+ };
+ };
+};
+
+struct icp_qat_fw_comp_resp {
+ /**< LWs 0-1 */
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ /**< Common interface response format see icp_qat_fw.h */
+
+ /**< LWs 2-3 */
+ uint64_t opaque_data;
+ /**< Opaque data passed from the request to the response message */
+
+ /**< LWs 4-7 */
+ struct icp_qat_fw_resp_comp_pars comp_resp_pars;
+ /**< Common response params (checksums and byte counts) */
+};
+
+#endif
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_la.h
new file mode 100644
index 00000000..c33bc3fe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+ ICP_QAT_FW_LA_CMD_CIPHER = 0,
+ ICP_QAT_FW_LA_CMD_AUTH = 1,
+ ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+ ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+ ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+ ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+ ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+ ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+ ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+ ICP_QAT_FW_LA_CMD_MGF1 = 9,
+ ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+ ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+ ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO 2
+#define ICP_QAT_FW_LA_CCM_PROTO 1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK 0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+ cmp_auth, ret_auth, update_state, \
+ ciph_iv, ciphcfg, partial) \
+ (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+ ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+ QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+ ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+ QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+ ((proto & QAT_LA_PROTO_MASK) << \
+ QAT_LA_PROTO_BITPOS) | \
+ ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+ QAT_LA_CMP_AUTH_RES_BITPOS) | \
+ ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+ QAT_LA_RET_AUTH_RES_BITPOS) | \
+ ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+ QAT_LA_UPDATE_STATE_BITPOS) | \
+ ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+ QAT_LA_CIPH_IV_FLD_BITPOS) | \
+ ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+ ((partial & QAT_LA_PARTIAL_MASK) << \
+ QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+ QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } sl;
+ } u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+ uint8_t cipher_state_sz;
+ uint8_t cipher_key_sz;
+ uint8_t cipher_cfg_offset;
+ uint8_t next_curr_id;
+ uint8_t cipher_padding_sz;
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+ uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+ uint32_t resrvd1;
+ uint8_t resrvd2;
+ uint8_t hash_flags;
+ uint8_t hash_cfg_offset;
+ uint8_t next_curr_id;
+ uint8_t resrvd3;
+ uint8_t outer_prefix_sz;
+ uint8_t final_sz;
+ uint8_t inner_res_sz;
+ uint8_t resrvd4;
+ uint8_t inner_state1_sz;
+ uint8_t inner_state2_offset;
+ uint8_t inner_state2_sz;
+ uint8_t outer_config_offset;
+ uint8_t outer_state1_sz;
+ uint8_t outer_res_sz;
+ uint8_t outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+ uint8_t cipher_state_sz;
+ uint8_t cipher_key_sz;
+ uint8_t cipher_cfg_offset;
+ uint8_t next_curr_id_cipher;
+ uint8_t cipher_padding_sz;
+ uint8_t hash_flags;
+ uint8_t hash_cfg_offset;
+ uint8_t next_curr_id_auth;
+ uint8_t resrvd1;
+ uint8_t outer_prefix_sz;
+ uint8_t final_sz;
+ uint8_t inner_res_sz;
+ uint8_t resrvd2;
+ uint8_t inner_state1_sz;
+ uint8_t inner_state2_offset;
+ uint8_t inner_state2_sz;
+ uint8_t outer_config_offset;
+ uint8_t outer_state1_sz;
+ uint8_t outer_res_sz;
+ uint8_t outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+ (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+ uint32_t cipher_offset;
+ uint32_t cipher_length;
+ union {
+ uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ uint64_t cipher_IV_ptr;
+ uint64_t resrvd1;
+ } s;
+ } u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+ uint32_t auth_off;
+ uint32_t auth_len;
+ union {
+ uint64_t auth_partial_st_prefix;
+ uint64_t aad_adr;
+ } u1;
+ uint64_t auth_res_addr;
+ union {
+ uint8_t inner_prefix_sz;
+ uint8_t aad_sz;
+ } u2;
+ uint8_t resrvd1;
+ uint8_t hash_state_sz;
+ uint8_t auth_res_sz;
+} __rte_packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+ union {
+ uint8_t inner_prefix_sz;
+ uint8_t aad_sz;
+ } u2;
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ uint64_t opaque_data;
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+ ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_hw.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_hw.h
new file mode 100644
index 00000000..e7961dba
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_hw.h
@@ -0,0 +1,386 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+ ICP_QAT_HW_AE_0 = 0,
+ ICP_QAT_HW_AE_1 = 1,
+ ICP_QAT_HW_AE_2 = 2,
+ ICP_QAT_HW_AE_3 = 3,
+ ICP_QAT_HW_AE_4 = 4,
+ ICP_QAT_HW_AE_5 = 5,
+ ICP_QAT_HW_AE_6 = 6,
+ ICP_QAT_HW_AE_7 = 7,
+ ICP_QAT_HW_AE_8 = 8,
+ ICP_QAT_HW_AE_9 = 9,
+ ICP_QAT_HW_AE_10 = 10,
+ ICP_QAT_HW_AE_11 = 11,
+ ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+ ICP_QAT_HW_QAT_0 = 0,
+ ICP_QAT_HW_QAT_1 = 1,
+ ICP_QAT_HW_QAT_2 = 2,
+ ICP_QAT_HW_QAT_3 = 3,
+ ICP_QAT_HW_QAT_4 = 4,
+ ICP_QAT_HW_QAT_5 = 5,
+ ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+ ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+ ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+ ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+ ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+ ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+ ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+ ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+ ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+ ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+ ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+ ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+ ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+ ICP_QAT_HW_AUTH_MODE0 = 0,
+ ICP_QAT_HW_AUTH_MODE1 = 1,
+ ICP_QAT_HW_AUTH_MODE2 = 2,
+ ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+ uint32_t config;
+ uint32_t reserved;
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_DISABLE_MASK 0x1
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS 17
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS 16
+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK 0xF
+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS 24
+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK 0xFF
+#define QAT_AUTH_SHA3_HW_PADDING_ENABLE 0
+#define QAT_AUTH_SHA3_HW_PADDING_DISABLE 1
+#define QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT 0
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT 0
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_PROGRAMMABLE 1
+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED 0
+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED 0
+
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+ ((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+ (((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+ (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) \
+ << QAT_AUTH_ALGO_SHA3_BITPOS) | \
+ (((QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT) & \
+ QAT_AUTH_SHA3_PADDING_DISABLE_MASK) \
+ << QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS) | \
+ (((QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT) & \
+ QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK) \
+ << QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS) | \
+ (((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER \
+ ((((QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED) & \
+ QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK) \
+ << QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS) | \
+ (((QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED) & \
+ QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK) \
+ << QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+ uint32_t counter;
+ uint32_t reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+ (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+ struct icp_qat_hw_auth_config auth_config;
+ struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+ ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+ struct icp_qat_hw_auth_setup inner_setup;
+ uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+ struct icp_qat_hw_auth_setup outer_setup;
+ uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_sha3_512 {
+ struct icp_qat_hw_auth_setup inner_setup;
+ uint8_t state1[ICP_QAT_HW_SHA3_512_STATE1_SZ];
+ struct icp_qat_hw_auth_setup outer_setup;
+};
+
+struct icp_qat_hw_auth_algo_blk {
+ struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+ ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+ ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+ ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+ ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+ ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+ ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+ ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+ ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+ ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+ ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+ ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+ ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+ ICP_QAT_HW_CIPHER_F8_MODE = 3,
+ ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+ ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+ uint32_t val;
+ uint32_t reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+ ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+ ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_auth_op {
+ ICP_QAT_HW_AUTH_VERIFY = 0,
+ ICP_QAT_HW_AUTH_GENERATE = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+ ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+ (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+ ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+ ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+ ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+
+#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
+
+/* These defines describe position of the bit-fields
+ * in the flags byte in B0
+ */
+#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6
+#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3
+
+#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \
+ ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \
+ | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \
+ | ((q) - 1))
+
+#define ICP_QAT_HW_CCM_NQ_CONST 15
+#define ICP_QAT_HW_CCM_AAD_B0_LEN 16
+#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2
+#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \
+ ICP_QAT_HW_CCM_AAD_LEN_INFO)
+#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16
+#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4
+#define ICP_QAT_HW_CCM_NONCE_OFFSET 1
+
+struct icp_qat_hw_cipher_algo_blk {
+ struct icp_qat_hw_cipher_config cipher_config;
+ uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
+} __rte_cache_aligned;
+
+/* ========================================================================= */
+/* COMPRESSION SLICE */
+/* ========================================================================= */
+
+enum icp_qat_hw_compression_direction {
+ ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,
+ ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,
+ ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_delayed_match {
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_algo {
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
+ ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
+ ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
+};
+
+
+enum icp_qat_hw_compression_depth {
+ ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,
+ ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,
+ ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,
+ ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,
+ ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 4
+};
+
+enum icp_qat_hw_compression_file_type {
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5
+};
+
+struct icp_qat_hw_compression_config {
+ uint32_t val;
+ uint32_t reserved;
+};
+
+#define QAT_COMPRESSION_DIR_BITPOS 4
+#define QAT_COMPRESSION_DIR_MASK 0x7
+#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16
+#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1
+#define QAT_COMPRESSION_ALGO_BITPOS 31
+#define QAT_COMPRESSION_ALGO_MASK 0x1
+#define QAT_COMPRESSION_DEPTH_BITPOS 28
+#define QAT_COMPRESSION_DEPTH_MASK 0x7
+#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24
+#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF
+
+#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( \
+ dir, delayed, algo, depth, filetype) \
+ ((((dir) & QAT_COMPRESSION_DIR_MASK) << QAT_COMPRESSION_DIR_BITPOS) | \
+ (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) \
+ << QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \
+ (((algo) & QAT_COMPRESSION_ALGO_MASK) \
+ << QAT_COMPRESSION_ALGO_BITPOS) | \
+ (((depth) & QAT_COMPRESSION_DEPTH_MASK) \
+ << QAT_COMPRESSION_DEPTH_BITPOS) | \
+ (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) \
+ << QAT_COMPRESSION_FILE_TYPE_BITPOS))
+
+#endif
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_common.c b/src/spdk/dpdk/drivers/common/qat/qat_common.c
new file mode 100644
index 00000000..47538669
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_common.c
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "qat_common.h"
+#include "qat_device.h"
+#include "qat_logs.h"
+
+int
+qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset,
+ void *list_in, uint32_t data_len,
+ const uint16_t max_segs)
+{
+ int res = -EINVAL;
+ uint32_t buf_len, nr;
+ struct qat_sgl *list = (struct qat_sgl *)list_in;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ uint8_t *virt_addr[max_segs];
+#endif
+
+ for (nr = buf_len = 0; buf && nr < max_segs; buf = buf->next) {
+ if (offset >= rte_pktmbuf_data_len(buf)) {
+ offset -= rte_pktmbuf_data_len(buf);
+ continue;
+ }
+
+ list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+ list->buffers[nr].resrvd = 0;
+ list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ virt_addr[nr] = rte_pktmbuf_mtod_offset(buf, uint8_t*, offset);
+#endif
+ offset = 0;
+ buf_len += list->buffers[nr].len;
+
+ if (buf_len >= data_len) {
+ list->buffers[nr].len -= buf_len - data_len;
+ res = 0;
+ break;
+ }
+ ++nr;
+ }
+
+ if (unlikely(res != 0)) {
+ if (nr == max_segs) {
+ QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+ max_segs);
+ } else {
+ QAT_DP_LOG(ERR, "Mbuf chain is too short");
+ }
+ } else {
+
+ list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+ for (nr = 0; nr < list->num_bufs; nr++) {
+ QAT_DP_LOG(INFO,
+ "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+ nr, list->buffers[nr].len,
+ list->buffers[nr].addr);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL",
+ virt_addr[nr],
+ list->buffers[nr].len);
+ }
+#endif
+ }
+
+ return res;
+}
+
+void qat_stats_get(struct qat_pci_device *dev,
+ struct qat_common_stats *stats,
+ enum qat_service_type service)
+{
+ int i;
+ struct qat_qp **qp;
+
+ if (stats == NULL || dev == NULL || service >= QAT_SERVICE_INVALID) {
+ QAT_LOG(ERR, "invalid param: stats %p, dev %p, service %d",
+ stats, dev, service);
+ return;
+ }
+
+ qp = dev->qps_in_use[service];
+ for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
+ if (qp[i] == NULL) {
+ QAT_LOG(DEBUG, "Service %d Uninitialised qp %d",
+ service, i);
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->stats.enqueued_count;
+ stats->dequeued_count += qp[i]->stats.dequeued_count;
+ stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
+ }
+}
+
+void qat_stats_reset(struct qat_pci_device *dev,
+ enum qat_service_type service)
+{
+ int i;
+ struct qat_qp **qp;
+
+ if (dev == NULL || service >= QAT_SERVICE_INVALID) {
+ QAT_LOG(ERR, "invalid param: dev %p, service %d",
+ dev, service);
+ return;
+ }
+
+ qp = dev->qps_in_use[service];
+ for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
+ if (qp[i] == NULL) {
+ QAT_LOG(DEBUG, "Service %d Uninitialised qp %d",
+ service, i);
+ continue;
+ }
+ memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
+ }
+
+ QAT_LOG(DEBUG, "QAT: %d stats cleared", service);
+}
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_common.h b/src/spdk/dpdk/drivers/common/qat/qat_common.h
new file mode 100644
index 00000000..d4bef539
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_common.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_COMMON_H_
+#define _QAT_COMMON_H_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+/**< Intel(R) QAT device name for PCI registration */
+#define QAT_PCI_NAME qat
+#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+
+/* Intel(R) QuickAssist Technology device generation is enumerated
+ * from one according to the generation of the device
+ */
+enum qat_device_gen {
+ QAT_GEN1 = 1,
+ QAT_GEN2
+};
+
+enum qat_service_type {
+ QAT_SERVICE_ASYMMETRIC = 0,
+ QAT_SERVICE_SYMMETRIC,
+ QAT_SERVICE_COMPRESSION,
+ QAT_SERVICE_INVALID
+};
+
+#define QAT_MAX_SERVICES (QAT_SERVICE_INVALID)
+
+/**< Common struct for scatter-gather list operations */
+struct qat_flat_buf {
+ uint32_t len;
+ uint32_t resrvd;
+ uint64_t addr;
+} __rte_packed;
+
+#define qat_sgl_hdr struct { \
+ uint64_t resrvd; \
+ uint32_t num_bufs; \
+ uint32_t num_mapped_bufs; \
+}
+
+__extension__
+struct qat_sgl {
+ qat_sgl_hdr;
+ /* flexible array of flat buffers*/
+ struct qat_flat_buf buffers[0];
+} __rte_packed __rte_cache_aligned;
+
+/** Common, i.e. not service-specific, statistics */
+struct qat_common_stats {
+ uint64_t enqueued_count;
+ /**< Count of all operations enqueued */
+ uint64_t dequeued_count;
+ /**< Count of all operations dequeued */
+
+ uint64_t enqueue_err_count;
+ /**< Total error count on operations enqueued */
+ uint64_t dequeue_err_count;
+ /**< Total error count on operations dequeued */
+};
+
+struct qat_pci_device;
+
+int
+qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset,
+ void *list_in, uint32_t data_len,
+ const uint16_t max_segs);
+void
+qat_stats_get(struct qat_pci_device *dev,
+ struct qat_common_stats *stats,
+ enum qat_service_type service);
+void
+qat_stats_reset(struct qat_pci_device *dev,
+ enum qat_service_type service);
+
+#endif /* _QAT_COMMON_H_ */
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_device.c b/src/spdk/dpdk/drivers/common/qat/qat_device.c
new file mode 100644
index 00000000..f32d7235
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_device.c
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_string_fns.h>
+
+#include "qat_device.h"
+#include "adf_transport_access_macros.h"
+#include "qat_sym_pmd.h"
+
+/* Hardware device information per generation */
+__extension__
+struct qat_gen_hw_data qat_gen_config[] = {
+ [QAT_GEN1] = {
+ .dev_gen = QAT_GEN1,
+ .qp_hw_data = qat_gen1_qps,
+ },
+ [QAT_GEN2] = {
+ .dev_gen = QAT_GEN2,
+ .qp_hw_data = qat_gen1_qps,
+ /* gen2 has same ring layout as gen1 */
+ },
+};
+
+
+static struct qat_pci_device qat_pci_devices[RTE_PMD_QAT_MAX_PCI_DEVICES];
+static int qat_nb_pci_devices;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+
+static const struct rte_pci_id pci_id_qat_map[] = {
+ {
+ RTE_PCI_DEVICE(0x8086, 0x0443),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x37c9),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x19e3),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x6f55),
+ },
+ {.device_id = 0},
+};
+
+
+static struct qat_pci_device *
+qat_pci_get_dev(uint8_t dev_id)
+{
+ return &qat_pci_devices[dev_id];
+}
+
+static struct qat_pci_device *
+qat_pci_get_named_dev(const char *name)
+{
+ struct qat_pci_device *dev;
+ unsigned int i;
+
+ if (name == NULL)
+ return NULL;
+
+ for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) {
+ dev = &qat_pci_devices[i];
+
+ if ((dev->attached == QAT_ATTACHED) &&
+ (strcmp(dev->name, name) == 0))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static uint8_t
+qat_pci_find_free_device_index(void)
+{
+ uint8_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES; dev_id++) {
+ if (qat_pci_devices[dev_id].attached == QAT_DETACHED)
+ break;
+ }
+ return dev_id;
+}
+
+struct qat_pci_device *
+qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev)
+{
+ char name[QAT_DEV_NAME_MAX_LEN];
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return qat_pci_get_named_dev(name);
+}
+
+struct qat_pci_device *
+qat_pci_device_allocate(struct rte_pci_device *pci_dev)
+{
+ struct qat_pci_device *qat_dev;
+ uint8_t qat_dev_id;
+ char name[QAT_DEV_NAME_MAX_LEN];
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+ snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
+ if (qat_pci_get_named_dev(name) != NULL) {
+ QAT_LOG(ERR, "QAT device with name %s already allocated!",
+ name);
+ return NULL;
+ }
+
+ qat_dev_id = qat_pci_find_free_device_index();
+ if (qat_dev_id == RTE_PMD_QAT_MAX_PCI_DEVICES) {
+ QAT_LOG(ERR, "Reached maximum number of QAT devices");
+ return NULL;
+ }
+
+ qat_dev = qat_pci_get_dev(qat_dev_id);
+ memset(qat_dev, 0, sizeof(*qat_dev));
+ strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN);
+ qat_dev->qat_dev_id = qat_dev_id;
+ qat_dev->pci_dev = pci_dev;
+ switch (qat_dev->pci_dev->id.device_id) {
+ case 0x0443:
+ qat_dev->qat_dev_gen = QAT_GEN1;
+ break;
+ case 0x37c9:
+ case 0x19e3:
+ case 0x6f55:
+ qat_dev->qat_dev_gen = QAT_GEN2;
+ break;
+ default:
+ QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
+ return NULL;
+ }
+
+ rte_spinlock_init(&qat_dev->arb_csr_lock);
+
+ qat_dev->attached = QAT_ATTACHED;
+
+ qat_nb_pci_devices++;
+
+ QAT_LOG(DEBUG, "QAT device %d allocated, name %s, total QATs %d",
+ qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices);
+
+ return qat_dev;
+}
+
+int
+qat_pci_device_release(struct rte_pci_device *pci_dev)
+{
+ struct qat_pci_device *qat_dev;
+ char name[QAT_DEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+ snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
+ qat_dev = qat_pci_get_named_dev(name);
+ if (qat_dev != NULL) {
+
+ /* Check that there are no service devs still on pci device */
+ if (qat_dev->sym_dev != NULL)
+ return -EBUSY;
+
+ qat_dev->attached = QAT_DETACHED;
+ qat_nb_pci_devices--;
+ }
+ QAT_LOG(DEBUG, "QAT device %s released, total QATs %d",
+ name, qat_nb_pci_devices);
+ return 0;
+}
+
+static int
+qat_pci_dev_destroy(struct qat_pci_device *qat_pci_dev,
+ struct rte_pci_device *pci_dev)
+{
+ qat_sym_dev_destroy(qat_pci_dev);
+ qat_comp_dev_destroy(qat_pci_dev);
+ qat_asym_dev_destroy(qat_pci_dev);
+ return qat_pci_device_release(pci_dev);
+}
+
+static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ int ret = 0;
+ struct qat_pci_device *qat_pci_dev;
+
+ QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ qat_pci_dev = qat_pci_device_allocate(pci_dev);
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+
+ ret = qat_sym_dev_create(qat_pci_dev);
+ if (ret != 0)
+ goto error_out;
+
+ ret = qat_comp_dev_create(qat_pci_dev);
+ if (ret != 0)
+ goto error_out;
+
+ ret = qat_asym_dev_create(qat_pci_dev);
+ if (ret != 0)
+ goto error_out;
+
+ return 0;
+
+error_out:
+ qat_pci_dev_destroy(qat_pci_dev, pci_dev);
+ return ret;
+
+}
+
+static int qat_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct qat_pci_device *qat_pci_dev;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ qat_pci_dev = qat_get_qat_dev_from_pci_dev(pci_dev);
+ if (qat_pci_dev == NULL)
+ return 0;
+
+ return qat_pci_dev_destroy(qat_pci_dev, pci_dev);
+}
+
+static struct rte_pci_driver rte_qat_pmd = {
+ .id_table = pci_id_qat_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = qat_pci_probe,
+ .remove = qat_pci_remove
+};
+
+__attribute__((weak)) int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map);
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_device.h b/src/spdk/dpdk/drivers/common/qat/qat_device.h
new file mode 100644
index 00000000..9599fc59
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_device.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_DEVICE_H_
+#define _QAT_DEVICE_H_
+
+#include <rte_bus_pci.h>
+
+#include "qat_common.h"
+#include "qat_logs.h"
+#include "adf_transport_access_macros.h"
+#include "qat_qp.h"
+
+#define QAT_DETACHED (0)
+#define QAT_ATTACHED (1)
+
+#define QAT_DEV_NAME_MAX_LEN 64
+
+/*
+ * This struct holds all the data about a QAT pci device
+ * including data about all services it supports.
+ * It contains
+ * - hw_data
+ * - config data
+ * - runtime data
+ */
+struct qat_sym_dev_private;
+struct qat_comp_dev_private;
+
+struct qat_pci_device {
+
+ /* Data used by all services */
+ char name[QAT_DEV_NAME_MAX_LEN];
+ /**< Name of qat pci device */
+ uint8_t qat_dev_id;
+ /**< Device instance for this qat pci device */
+ struct rte_pci_device *pci_dev;
+ /**< PCI information. */
+ enum qat_device_gen qat_dev_gen;
+ /**< QAT device generation */
+ rte_spinlock_t arb_csr_lock;
+ /**< lock to protect accesses to the arbiter CSR */
+ __extension__
+ uint8_t attached : 1;
+ /**< Flag indicating the device is attached */
+
+ struct qat_qp *qps_in_use[QAT_MAX_SERVICES][ADF_MAX_QPS_ON_ANY_SERVICE];
+ /**< links to qps set up for each service, index same as on API */
+
+ /* Data relating to symmetric crypto service */
+ struct qat_sym_dev_private *sym_dev;
+ /**< link back to cryptodev private data */
+ struct rte_device sym_rte_dev;
+ /**< This represents the crypto subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a crypto-specific name
+ */
+
+ /* Data relating to compression service */
+ struct qat_comp_dev_private *comp_dev;
+ /**< link back to compressdev private data */
+
+ /* Data relating to asymmetric crypto service */
+
+};
+
+struct qat_gen_hw_data {
+ enum qat_device_gen dev_gen;
+ const struct qat_qp_hw_data (*qp_hw_data)[ADF_MAX_QPS_ON_ANY_SERVICE];
+};
+
+extern struct qat_gen_hw_data qat_gen_config[];
+
+struct qat_pci_device *
+qat_pci_device_allocate(struct rte_pci_device *pci_dev);
+
+int
+qat_pci_device_release(struct rte_pci_device *pci_dev);
+
+struct qat_pci_device *
+qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev);
+
+/* declaration needed for weak functions */
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+#endif /* _QAT_DEVICE_H_ */
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_logs.c b/src/spdk/dpdk/drivers/common/qat/qat_logs.c
new file mode 100644
index 00000000..7a861709
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_logs.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_log.h>
+#include <rte_hexdump.h>
+
+#include "qat_logs.h"
+
+int qat_gen_logtype;
+int qat_dp_logtype;
+
+int
+qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+ const void *buf, unsigned int len)
+{
+ if (level > rte_log_get_global_level())
+ return 0;
+ if (level > (uint32_t)(rte_log_get_level(logtype)))
+ return 0;
+
+ rte_hexdump(rte_logs.file == NULL ? stderr : rte_logs.file,
+ title, buf, len);
+ return 0;
+}
+
+RTE_INIT(qat_pci_init_log)
+{
+ /* Non-data-path logging for pci device and all services */
+ qat_gen_logtype = rte_log_register("pmd.qat_general");
+ if (qat_gen_logtype >= 0)
+ rte_log_set_level(qat_gen_logtype, RTE_LOG_NOTICE);
+
+ /* data-path logging for all services */
+ qat_dp_logtype = rte_log_register("pmd.qat_dp");
+ if (qat_dp_logtype >= 0)
+ rte_log_set_level(qat_dp_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_logs.h b/src/spdk/dpdk/drivers/common/qat/qat_logs.h
new file mode 100644
index 00000000..4baea12c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_logs.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_LOGS_H_
+#define _QAT_LOGS_H_
+
+extern int qat_gen_logtype;
+extern int qat_dp_logtype;
+
+#define QAT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, qat_gen_logtype, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define QAT_DP_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, qat_dp_logtype, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define QAT_DP_HEXDUMP_LOG(level, title, buf, len) \
+ qat_hexdump_log(RTE_LOG_ ## level, qat_dp_logtype, title, buf, len)
+
+/**
+ * qat_hexdump_log - Dump out memory in a special hex dump format.
+ *
+ * Dump out the message buffer in a special hex dump output format with
+ * characters printed for each line of 16 hex values. The message will be sent
+ * to the stream defined by rte_logs.file or to stderr in case of rte_logs.file
+ * is undefined.
+ */
+int
+qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+ const void *buf, unsigned int len);
+
+#endif /* _QAT_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_qp.c b/src/spdk/dpdk/drivers/common/qat/qat_qp.c
new file mode 100644
index 00000000..7ca7a45e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_qp.c
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_prefetch.h>
+
+#include "qat_logs.h"
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "qat_sym.h"
+#include "qat_comp.h"
+#include "adf_transport_access_macros.h"
+
+
+#define ADF_MAX_DESC 4096
+#define ADF_MIN_DESC 128
+
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+ (ADF_ARB_REG_SLOT * index), value)
+
+__extension__
+const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
+ [ADF_MAX_QPS_ON_ANY_SERVICE] = {
+ /* queue pairs which provide an asymmetric crypto service */
+ [QAT_SERVICE_ASYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_ASYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 0,
+ .rx_ring_num = 8,
+ .tx_msg_size = 64,
+ .rx_msg_size = 32,
+
+ }, {
+ .service_type = QAT_SERVICE_ASYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 1,
+ .rx_ring_num = 9,
+ .tx_msg_size = 64,
+ .rx_msg_size = 32,
+ }
+ },
+ /* queue pairs which provide a symmetric crypto service */
+ [QAT_SERVICE_SYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_SYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 2,
+ .rx_ring_num = 10,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ },
+ {
+ .service_type = QAT_SERVICE_SYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 3,
+ .rx_ring_num = 11,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }
+ },
+ /* queue pairs which provide a compression service */
+ [QAT_SERVICE_COMPRESSION] = {
+ {
+ .service_type = QAT_SERVICE_COMPRESSION,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 6,
+ .rx_ring_num = 14,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }, {
+ .service_type = QAT_SERVICE_COMPRESSION,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 7,
+ .rx_ring_num = 15,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }
+ }
+};
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes);
+static void qat_queue_delete(struct qat_queue *queue);
+static int qat_queue_create(struct qat_pci_device *qat_dev,
+ struct qat_queue *queue, struct qat_qp_config *, uint8_t dir);
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *queue_size_for_csr);
+static void adf_configure_queues(struct qat_qp *queue);
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock);
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock);
+
+
+int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
+ enum qat_service_type service)
+{
+ int i, count;
+
+ for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++)
+ if (qp_hw_data[i].service_type == service)
+ count++;
+ return count;
+}
+
+static const struct rte_memzone *
+queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ QAT_LOG(DEBUG, "re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+
+ QAT_LOG(ERR, "Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
+}
+
+int qat_qp_setup(struct qat_pci_device *qat_dev,
+ struct qat_qp **qp_addr,
+ uint16_t queue_pair_id,
+ struct qat_qp_config *qat_qp_conf)
+
+{
+ struct qat_qp *qp;
+ struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+ char op_cookie_pool_name[RTE_RING_NAMESIZE];
+ uint32_t i;
+
+ QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d",
+ queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen);
+
+ if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) ||
+ (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) {
+ QAT_LOG(ERR, "Can't create qp for %u descriptors",
+ qat_qp_conf->nb_descriptors);
+ return -EINVAL;
+ }
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ QAT_LOG(ERR, "Could not find VF config space "
+ "(UIO driver attached?).");
+ return -EINVAL;
+ }
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc("qat PMD qp metadata",
+ sizeof(*qp), RTE_CACHE_LINE_SIZE);
+ if (qp == NULL) {
+ QAT_LOG(ERR, "Failed to alloc mem for qp struct");
+ return -ENOMEM;
+ }
+ qp->nb_descriptors = qat_qp_conf->nb_descriptors;
+ qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
+ qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
+ RTE_CACHE_LINE_SIZE);
+ if (qp->op_cookies == NULL) {
+ QAT_LOG(ERR, "Failed to alloc mem for cookie");
+ rte_free(qp);
+ return -ENOMEM;
+ }
+
+ qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
+ qp->inflights16 = 0;
+
+ if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf,
+ ADF_RING_DIR_TX) != 0) {
+ QAT_LOG(ERR, "Tx queue create failed "
+ "queue_pair_id=%u", queue_pair_id);
+ goto create_err;
+ }
+
+ if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf,
+ ADF_RING_DIR_RX) != 0) {
+ QAT_LOG(ERR, "Rx queue create failed "
+ "queue_pair_id=%hu", queue_pair_id);
+ qat_queue_delete(&(qp->tx_q));
+ goto create_err;
+ }
+
+ adf_configure_queues(qp);
+ adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr,
+ &qat_dev->arb_csr_lock);
+
+ snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE,
+ "%s%d_cookies_%s_qp%hu",
+ pci_dev->driver->driver.name, qat_dev->qat_dev_id,
+ qat_qp_conf->service_str, queue_pair_id);
+
+ QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name);
+ qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
+ if (qp->op_cookie_pool == NULL)
+ qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
+ qp->nb_descriptors,
+ qat_qp_conf->cookie_size, 64, 0,
+ NULL, NULL, NULL, NULL, qat_qp_conf->socket_id,
+ 0);
+ if (!qp->op_cookie_pool) {
+ QAT_LOG(ERR, "QAT PMD Cannot create"
+ " op mempool");
+ goto create_err;
+ }
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+ if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
+ QAT_LOG(ERR, "QAT PMD Cannot get op_cookie");
+ goto create_err;
+ }
+ }
+
+ qp->qat_dev_gen = qat_dev->qat_dev_gen;
+ qp->build_request = qat_qp_conf->build_request;
+ qp->service_type = qat_qp_conf->hw->service_type;
+ qp->qat_dev = qat_dev;
+
+ QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s",
+ queue_pair_id, op_cookie_pool_name);
+
+ *qp_addr = qp;
+ return 0;
+
+create_err:
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+ rte_free(qp->op_cookies);
+ rte_free(qp);
+ return -EFAULT;
+}
+
+int qat_qp_release(struct qat_qp **qp_addr)
+{
+ struct qat_qp *qp = *qp_addr;
+ uint32_t i;
+
+ if (qp == NULL) {
+ QAT_LOG(DEBUG, "qp already freed");
+ return 0;
+ }
+
+ QAT_LOG(DEBUG, "Free qp on qat_pci device %d",
+ qp->qat_dev->qat_dev_id);
+
+ /* Don't free memory if there are still responses to be processed */
+ if (qp->inflights16 == 0) {
+ qat_queue_delete(&(qp->tx_q));
+ qat_queue_delete(&(qp->rx_q));
+ } else {
+ return -EAGAIN;
+ }
+
+ adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr,
+ &qp->qat_dev->arb_csr_lock);
+
+ for (i = 0; i < qp->nb_descriptors; i++)
+ rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
+
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+
+ rte_free(qp->op_cookies);
+ rte_free(qp);
+ *qp_addr = NULL;
+ return 0;
+}
+
+
+static void qat_queue_delete(struct qat_queue *queue)
+{
+ const struct rte_memzone *mz;
+ int status = 0;
+
+ if (queue == NULL) {
+ QAT_LOG(DEBUG, "Invalid queue");
+ return;
+ }
+ QAT_LOG(DEBUG, "Free ring %d, memzone: %s",
+ queue->hw_queue_number, queue->memz_name);
+
+ mz = rte_memzone_lookup(queue->memz_name);
+ if (mz != NULL) {
+ /* Write an unused pattern to the queue memory. */
+ memset(queue->base_addr, 0x7F, queue->queue_size);
+ status = rte_memzone_free(mz);
+ if (status != 0)
+ QAT_LOG(ERR, "Error %d on freeing queue %s",
+ status, queue->memz_name);
+ } else {
+ QAT_LOG(DEBUG, "queue %s doesn't exist",
+ queue->memz_name);
+ }
+}
+
+static int
+qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
+ struct qat_qp_config *qp_conf, uint8_t dir)
+{
+ uint64_t queue_base;
+ void *io_addr;
+ const struct rte_memzone *qp_mz;
+ struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+ int ret = 0;
+ uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
+ qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
+ uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size);
+
+ queue->hw_bundle_number = qp_conf->hw->hw_bundle_num;
+ queue->hw_queue_number = (dir == ADF_RING_DIR_TX ?
+ qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num);
+
+ if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+ QAT_LOG(ERR, "Invalid descriptor size %d", desc_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a memzone for the queue - create a unique name.
+ */
+ snprintf(queue->memz_name, sizeof(queue->memz_name),
+ "%s_%d_%s_%s_%d_%d",
+ pci_dev->driver->driver.name, qat_dev->qat_dev_id,
+ qp_conf->service_str, "qp_mem",
+ queue->hw_bundle_number, queue->hw_queue_number);
+ qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
+ qp_conf->socket_id);
+ if (qp_mz == NULL) {
+ QAT_LOG(ERR, "Failed to allocate ring memzone");
+ return -ENOMEM;
+ }
+
+ queue->base_addr = (char *)qp_mz->addr;
+ queue->base_phys_addr = qp_mz->iova;
+ if (qat_qp_check_queue_alignment(queue->base_phys_addr,
+ queue_size_bytes)) {
+ QAT_LOG(ERR, "Invalid alignment on queue create "
+ " 0x%"PRIx64"\n",
+ queue->base_phys_addr);
+ ret = -EFAULT;
+ goto queue_create_err;
+ }
+
+ if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors,
+ &(queue->queue_size)) != 0) {
+ QAT_LOG(ERR, "Invalid num inflights");
+ ret = -EINVAL;
+ goto queue_create_err;
+ }
+
+ queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
+ ADF_BYTES_TO_MSG_SIZE(desc_size));
+ queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1;
+
+ if (queue->max_inflights < 2) {
+ QAT_LOG(ERR, "Invalid num inflights");
+ ret = -EINVAL;
+ goto queue_create_err;
+ }
+ queue->head = 0;
+ queue->tail = 0;
+ queue->msg_size = desc_size;
+
+ /*
+ * Write an unused pattern to the queue memory.
+ */
+ memset(queue->base_addr, 0x7F, queue_size_bytes);
+
+ queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
+ queue->queue_size);
+
+ io_addr = pci_dev->mem_resource[0].addr;
+
+ WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_base);
+
+ QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u,"
+ " nb msgs %u, msg_size %u, max_inflights %u modulo mask %u",
+ queue->memz_name,
+ queue->queue_size, queue_size_bytes,
+ qp_conf->nb_descriptors, desc_size,
+ queue->max_inflights, queue->modulo_mask);
+
+ return 0;
+
+queue_create_err:
+ rte_memzone_free(qp_mz);
+ return ret;
+}
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes)
+{
+ if (((queue_size_bytes - 1) & phys_addr) != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *p_queue_size_for_csr)
+{
+ uint8_t i = ADF_MIN_RING_SIZE;
+
+ for (; i <= ADF_MAX_RING_SIZE; i++)
+ if ((msg_size * msg_num) ==
+ (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
+ *p_queue_size_for_csr = i;
+ return 0;
+ }
+ QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
+ return -EINVAL;
+}
+
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ rte_spinlock_lock(lock);
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value |= (0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+ rte_spinlock_unlock(lock);
+}
+
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ rte_spinlock_lock(lock);
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value &= ~(0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+ rte_spinlock_unlock(lock);
+}
+
+static void adf_configure_queues(struct qat_qp *qp)
+{
+ uint32_t queue_config;
+ struct qat_queue *queue = &qp->tx_q;
+
+ queue_config = BUILD_RING_CONFIG(queue->queue_size);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+
+ queue = &qp->rx_q;
+ queue_config =
+ BUILD_RESP_RING_CONFIG(queue->queue_size,
+ ADF_RING_NEAR_WATERMARK_512,
+ ADF_RING_NEAR_WATERMARK_0);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+}
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
+{
+ return data & modulo_mask;
+}
+
+static inline void
+txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, q->tail);
+ q->nb_pending_requests = 0;
+ q->csr_tail = q->tail;
+}
+
+static inline
+void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+{
+ uint32_t old_head, new_head;
+ uint32_t max_head;
+
+ old_head = q->csr_head;
+ new_head = q->head;
+ max_head = qp->nb_descriptors * q->msg_size;
+
+ /* write out free descriptors */
+ void *cur_desc = (uint8_t *)q->base_addr + old_head;
+
+ if (new_head < old_head) {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
+ memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
+ } else {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
+ }
+ q->nb_processed_responses = 0;
+ q->csr_head = new_head;
+
+ /* write current head to CSR */
+ WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, new_head);
+}
+
+uint16_t
+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+ register struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ register uint32_t nb_ops_sent = 0;
+ register int ret;
+ uint16_t nb_ops_possible = nb_ops;
+ register uint8_t *base_addr;
+ register uint32_t tail;
+ int overflow;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /* read params used a lot in main loop into registers */
+ queue = &(tmp_qp->tx_q);
+ base_addr = (uint8_t *)queue->base_addr;
+ tail = queue->tail;
+
+ /* Find how many can actually fit on the ring */
+ tmp_qp->inflights16 += nb_ops;
+ overflow = tmp_qp->inflights16 - queue->max_inflights;
+ if (overflow > 0) {
+ tmp_qp->inflights16 -= overflow;
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
+ return 0;
+ }
+
+ while (nb_ops_sent != nb_ops_possible) {
+ ret = tmp_qp->build_request(*ops, base_addr + tail,
+ tmp_qp->op_cookies[tail / queue->msg_size],
+ tmp_qp->qat_dev_gen);
+ if (ret != 0) {
+ tmp_qp->stats.enqueue_err_count++;
+ /*
+ * This message cannot be enqueued,
+ * decrease number of ops that wasn't sent
+ */
+ tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ }
+
+ tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask);
+ ops++;
+ nb_ops_sent++;
+ }
+kick_tail:
+ queue->tail = tail;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ queue->nb_pending_requests += nb_ops_sent;
+ if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
+ queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
+ txq_write_tail(tmp_qp, queue);
+ }
+ return nb_ops_sent;
+}
+
+uint16_t
+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+ struct qat_queue *rx_queue, *tx_queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ uint32_t head;
+ uint32_t resp_counter = 0;
+ uint8_t *resp_msg;
+
+ rx_queue = &(tmp_qp->rx_q);
+ tx_queue = &(tmp_qp->tx_q);
+ head = rx_queue->head;
+ resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
+
+ while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
+ resp_counter != nb_ops) {
+
+ if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
+ qat_sym_process_response(ops, resp_msg);
+ else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
+ qat_comp_process_response(ops, resp_msg);
+
+ head = adf_modulo(head + rx_queue->msg_size,
+ rx_queue->modulo_mask);
+
+ resp_msg = (uint8_t *)rx_queue->base_addr + head;
+ ops++;
+ resp_counter++;
+ }
+ if (resp_counter > 0) {
+ rx_queue->head = head;
+ tmp_qp->stats.dequeued_count += resp_counter;
+ rx_queue->nb_processed_responses += resp_counter;
+ tmp_qp->inflights16 -= resp_counter;
+
+ if (rx_queue->nb_processed_responses >
+ QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(tmp_qp, rx_queue);
+ }
+ /* also check if tail needs to be advanced */
+ if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
+ tx_queue->tail != tx_queue->csr_tail) {
+ txq_write_tail(tmp_qp, tx_queue);
+ }
+ return resp_counter;
+}
+
+__attribute__((weak)) int
+qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
+{
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_qp.h b/src/spdk/dpdk/drivers/common/qat/qat_qp.h
new file mode 100644
index 00000000..69f8a613
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_qp.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_QP_H_
+#define _QAT_QP_H_
+
+#include "qat_common.h"
+#include "adf_transport_access_macros.h"
+
+struct qat_pci_device;
+
+#define QAT_CSR_HEAD_WRITE_THRESH 32U
+/* number of requests to accumulate before writing head CSR */
+#define QAT_CSR_TAIL_WRITE_THRESH 32U
+/* number of requests to accumulate before writing tail CSR */
+#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
+/* number of inflights below which no tail write coalescing should occur */
+
+typedef int (*build_request_t)(void *op,
+ uint8_t *req, void *op_cookie,
+ enum qat_device_gen qat_dev_gen);
+/**< Build a request from an op. */
+
+/**
+ * Structure with data needed for creation of queue pair.
+ */
+struct qat_qp_hw_data {
+ enum qat_service_type service_type;
+ uint8_t hw_bundle_num;
+ uint8_t tx_ring_num;
+ uint8_t rx_ring_num;
+ uint16_t tx_msg_size;
+ uint16_t rx_msg_size;
+};
+/**
+ * Structure with data needed for creation of queue pair.
+ */
+struct qat_qp_config {
+ const struct qat_qp_hw_data *hw;
+ uint32_t nb_descriptors;
+ uint32_t cookie_size;
+ int socket_id;
+ build_request_t build_request;
+ const char *service_str;
+};
+
+/**
+ * Structure associated with each queue.
+ */
+struct qat_queue {
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+ void *base_addr; /* Base address */
+ rte_iova_t base_phys_addr; /* Queue physical address */
+ uint32_t head; /* Shadow copy of the head */
+ uint32_t tail; /* Shadow copy of the tail */
+ uint32_t modulo_mask;
+ uint32_t msg_size;
+ uint16_t max_inflights;
+ uint32_t queue_size;
+ uint8_t hw_bundle_number;
+ uint8_t hw_queue_number;
+ /* HW queue aka ring offset on bundle */
+ uint32_t csr_head; /* last written head value */
+ uint32_t csr_tail; /* last written tail value */
+ uint16_t nb_processed_responses;
+ /* number of responses processed since last CSR head write */
+ uint16_t nb_pending_requests;
+ /* number of requests pending since last CSR tail write */
+};
+
+struct qat_qp {
+ void *mmap_bar_addr;
+ uint16_t inflights16;
+ struct qat_queue tx_q;
+ struct qat_queue rx_q;
+ struct qat_common_stats stats;
+ struct rte_mempool *op_cookie_pool;
+ void **op_cookies;
+ uint32_t nb_descriptors;
+ enum qat_device_gen qat_dev_gen;
+ build_request_t build_request;
+ enum qat_service_type service_type;
+ struct qat_pci_device *qat_dev;
+ /**< qat device this qp is on */
+} __rte_cache_aligned;
+
+extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
+
+uint16_t
+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
+uint16_t
+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
+int
+qat_qp_release(struct qat_qp **qp_addr);
+
+int
+qat_qp_setup(struct qat_pci_device *qat_dev,
+ struct qat_qp **qp_addr, uint16_t queue_pair_id,
+ struct qat_qp_config *qat_qp_conf);
+
+int
+qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
+ enum qat_service_type service);
+
+/* Needed for weak function*/
+int
+qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused);
+
+#endif /* _QAT_QP_H_ */
diff --git a/src/spdk/dpdk/drivers/compress/Makefile b/src/spdk/dpdk/drivers/compress/Makefile
new file mode 100644
index 00000000..286ea6ee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += octeontx
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/compress/isal/Makefile b/src/spdk/dpdk/drivers/compress/isal/Makefile
new file mode 100644
index 00000000..95904f64
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/isal/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_isal_comp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# external library dependencies
+LDLIBS += -lisal
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_bus_vdev
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_isal_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal_compress_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal_compress_pmd_ops.c
+
+# export include files
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/compress/isal/isal_compress_pmd.c b/src/spdk/dpdk/drivers/compress/isal/isal_compress_pmd.c
new file mode 100644
index 00000000..e943336b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/isal/isal_compress_pmd.c
@@ -0,0 +1,694 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <isa-l.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_compressdev_pmd.h>
+
+#include "isal_compress_pmd_private.h"
+
+#define RTE_COMP_ISAL_WINDOW_SIZE 15
+#define RTE_COMP_ISAL_LEVEL_ZERO 0 /* ISA-L Level 0 used for fixed Huffman */
+#define RTE_COMP_ISAL_LEVEL_ONE 1
+#define RTE_COMP_ISAL_LEVEL_TWO 2
+#define RTE_COMP_ISAL_LEVEL_THREE 3 /* Optimised for AVX512 & AVX2 only */
+
+int isal_logtype_driver;
+
+/* Verify and set private xform parameters */
+int
+isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform,
+ const struct rte_comp_xform *xform)
+{
+ if (xform == NULL)
+ return -EINVAL;
+
+ /* Set compression private xform variables */
+ if (xform->type == RTE_COMP_COMPRESS) {
+ /* Set private xform type - COMPRESS/DECOMPRESS */
+ priv_xform->type = RTE_COMP_COMPRESS;
+
+ /* Set private xform algorithm */
+ if (xform->compress.algo != RTE_COMP_ALGO_DEFLATE) {
+ if (xform->compress.algo == RTE_COMP_ALGO_NULL) {
+ ISAL_PMD_LOG(ERR, "By-pass not supported\n");
+ return -ENOTSUP;
+ }
+ ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
+ return -ENOTSUP;
+ }
+ priv_xform->compress.algo = RTE_COMP_ALGO_DEFLATE;
+
+ /* Set private xform checksum - raw deflate by default */
+ if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) {
+ ISAL_PMD_LOG(ERR, "Checksum not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform window size, 32K supported */
+ if (xform->compress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
+ priv_xform->compress.window_size =
+ RTE_COMP_ISAL_WINDOW_SIZE;
+ else {
+ ISAL_PMD_LOG(ERR, "Window size not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform huffman type */
+ switch (xform->compress.deflate.huffman) {
+ case(RTE_COMP_HUFFMAN_DEFAULT):
+ priv_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_DEFAULT;
+ break;
+ case(RTE_COMP_HUFFMAN_FIXED):
+ priv_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_FIXED;
+ break;
+ case(RTE_COMP_HUFFMAN_DYNAMIC):
+ priv_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_DYNAMIC;
+ break;
+ default:
+ ISAL_PMD_LOG(ERR, "Huffman code not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform level.
+ * Checking compliance with compressdev API, -1 <= level => 9
+ */
+ if (xform->compress.level < RTE_COMP_LEVEL_PMD_DEFAULT ||
+ xform->compress.level > RTE_COMP_LEVEL_MAX) {
+ ISAL_PMD_LOG(ERR, "Compression level out of range\n");
+ return -EINVAL;
+ }
+ /* Check for Compressdev API level 0, No compression
+ * not supported in ISA-L
+ */
+ else if (xform->compress.level == RTE_COMP_LEVEL_NONE) {
+ ISAL_PMD_LOG(ERR, "No Compression not supported\n");
+ return -ENOTSUP;
+ }
+ /* If using fixed huffman code, level must be 0 */
+ else if (priv_xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_FIXED) {
+ ISAL_PMD_LOG(DEBUG, "ISA-L level 0 used due to a"
+ " fixed huffman code\n");
+ priv_xform->compress.level = RTE_COMP_ISAL_LEVEL_ZERO;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL0_DEFAULT;
+ } else {
+ /* Mapping API levels to ISA-L levels 1,2 & 3 */
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_PMD_DEFAULT:
+ /* Default is 1 if not using fixed huffman */
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_ONE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL1_DEFAULT;
+ break;
+ case RTE_COMP_LEVEL_MIN:
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_ONE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL1_DEFAULT;
+ break;
+ case RTE_COMP_ISAL_LEVEL_TWO:
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_TWO;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL2_DEFAULT;
+ break;
+ /* Level 3 or higher requested */
+ default:
+ /* Check for AVX512, to use ISA-L level 3 */
+ if (rte_cpu_get_flag_enabled(
+ RTE_CPUFLAG_AVX512F)) {
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_THREE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL3_DEFAULT;
+ }
+ /* Check for AVX2, to use ISA-L level 3 */
+ else if (rte_cpu_get_flag_enabled(
+ RTE_CPUFLAG_AVX2)) {
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_THREE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL3_DEFAULT;
+ } else {
+ ISAL_PMD_LOG(DEBUG, "Requested ISA-L level"
+ " 3 or above; Level 3 optimized"
+ " for AVX512 & AVX2 only."
+ " level changed to 2.\n");
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_TWO;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL2_DEFAULT;
+ }
+ }
+ }
+ }
+
+ /* Set decompression private xform variables */
+ else if (xform->type == RTE_COMP_DECOMPRESS) {
+
+ /* Set private xform type - COMPRESS/DECOMPRESS */
+ priv_xform->type = RTE_COMP_DECOMPRESS;
+
+ /* Set private xform algorithm */
+ if (xform->decompress.algo != RTE_COMP_ALGO_DEFLATE) {
+ if (xform->decompress.algo == RTE_COMP_ALGO_NULL) {
+ ISAL_PMD_LOG(ERR, "By pass not supported\n");
+ return -ENOTSUP;
+ }
+ ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
+ return -ENOTSUP;
+ }
+ priv_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE;
+
+ /* Set private xform checksum - raw deflate by default */
+ if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) {
+ ISAL_PMD_LOG(ERR, "Checksum not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform window size, 32K supported */
+ if (xform->decompress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
+ priv_xform->decompress.window_size =
+ RTE_COMP_ISAL_WINDOW_SIZE;
+ else {
+ ISAL_PMD_LOG(ERR, "Window size not supported\n");
+ return -ENOTSUP;
+ }
+ }
+ return 0;
+}
+
+/* Compression using chained mbufs for input/output data */
+static int
+chained_mbuf_compression(struct rte_comp_op *op, struct isal_comp_qp *qp)
+{
+ int ret;
+ uint32_t remaining_offset;
+ uint32_t remaining_data = op->src.length;
+ struct rte_mbuf *src = op->m_src;
+ struct rte_mbuf *dst = op->m_dst;
+
+ /* check for source/destination offset passing multiple segments
+ * and point compression stream to input/output buffer.
+ */
+ remaining_offset = op->src.offset;
+ while (remaining_offset >= src->data_len) {
+ remaining_offset -= src->data_len;
+ src = src->next;
+ }
+ qp->stream->avail_in = RTE_MIN(src->data_len - remaining_offset,
+ op->src.length);
+ qp->stream->next_in = rte_pktmbuf_mtod_offset(src, uint8_t *,
+ remaining_offset);
+
+ remaining_offset = op->dst.offset;
+ while (remaining_offset >= dst->data_len) {
+ remaining_offset -= dst->data_len;
+ dst = dst->next;
+ }
+ qp->stream->avail_out = dst->data_len - remaining_offset;
+ qp->stream->next_out = rte_pktmbuf_mtod_offset(dst, uint8_t *,
+ remaining_offset);
+
+ if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination buffer\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ while (qp->stream->internal_state.state != ZSTATE_END) {
+ /* Last segment of data */
+ if (remaining_data <= src->data_len)
+ qp->stream->end_of_stream = 1;
+
+ /* Execute compression operation */
+ ret = isal_deflate(qp->stream);
+
+ remaining_data = op->src.length - qp->stream->total_in;
+
+ if (ret != COMP_OK) {
+ ISAL_PMD_LOG(ERR, "Compression operation failed\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+
+ if (qp->stream->avail_in == 0 &&
+ qp->stream->total_in != op->src.length) {
+ if (src->next != NULL) {
+ src = src->next;
+ qp->stream->next_in =
+ rte_pktmbuf_mtod(src, uint8_t *);
+ qp->stream->avail_in =
+ RTE_MIN(remaining_data, src->data_len);
+ } else {
+ ISAL_PMD_LOG(ERR,
+ "Not enough input buffer segments\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+ }
+
+ if (qp->stream->avail_out == 0 &&
+ qp->stream->internal_state.state != ZSTATE_END) {
+ if (dst->next != NULL) {
+ dst = dst->next;
+ qp->stream->next_out =
+ rte_pktmbuf_mtod(dst, uint8_t *);
+ qp->stream->avail_out = dst->data_len;
+ } else {
+ ISAL_PMD_LOG(ERR,
+ "Not enough output buffer segments\n");
+ op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Decompression using chained mbufs for input/output data */
+static int
+chained_mbuf_decompression(struct rte_comp_op *op, struct isal_comp_qp *qp)
+{
+ int ret;
+ uint32_t consumed_data, src_remaining_offset, dst_remaining_offset;
+ uint32_t remaining_data = op->src.length;
+ struct rte_mbuf *src = op->m_src;
+ struct rte_mbuf *dst = op->m_dst;
+
+ /* check for offset passing multiple segments
+ * and point decompression state to input/output buffer
+ */
+ src_remaining_offset = op->src.offset;
+ while (src_remaining_offset >= src->data_len) {
+ src_remaining_offset -= src->data_len;
+ src = src->next;
+ }
+ qp->state->avail_in = RTE_MIN(src->data_len - src_remaining_offset,
+ op->src.length);
+ qp->state->next_in = rte_pktmbuf_mtod_offset(src, uint8_t *,
+ src_remaining_offset);
+
+ dst_remaining_offset = op->dst.offset;
+ while (dst_remaining_offset >= dst->data_len) {
+ dst_remaining_offset -= dst->data_len;
+ dst = dst->next;
+ }
+ qp->state->avail_out = dst->data_len - dst_remaining_offset;
+ qp->state->next_out = rte_pktmbuf_mtod_offset(dst, uint8_t *,
+ dst_remaining_offset);
+
+ while (qp->state->block_state != ISAL_BLOCK_FINISH) {
+
+ ret = isal_inflate(qp->state);
+
+ /* Check for first segment, offset needs to be accounted for */
+ if (remaining_data == op->src.length) {
+ consumed_data = src->data_len - qp->state->avail_in -
+ src_remaining_offset;
+ } else
+ consumed_data = src->data_len - qp->state->avail_in;
+
+ op->consumed += consumed_data;
+ remaining_data -= consumed_data;
+
+ if (ret != ISAL_DECOMP_OK) {
+ ISAL_PMD_LOG(ERR, "Decompression operation failed\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+
+ if (qp->state->avail_in == 0
+ && op->consumed != op->src.length) {
+ if (src->next != NULL) {
+ src = src->next;
+ qp->state->next_in =
+ rte_pktmbuf_mtod(src, uint8_t *);
+ qp->state->avail_in =
+ RTE_MIN(remaining_data, src->data_len);
+ }
+ }
+
+ if (qp->state->avail_out == 0 &&
+ qp->state->block_state != ISAL_BLOCK_FINISH) {
+ if (dst->next != NULL) {
+ dst = dst->next;
+ qp->state->next_out =
+ rte_pktmbuf_mtod(dst, uint8_t *);
+ qp->state->avail_out = dst->data_len;
+ } else {
+ ISAL_PMD_LOG(ERR,
+ "Not enough output buffer segments\n");
+ op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Stateless Compression Function */
+static int
+process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
+ struct isal_priv_xform *priv_xform)
+{
+ int ret = 0;
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ /* Required due to init clearing level_buf */
+ uint8_t *temp_level_buf = qp->stream->level_buf;
+
+ /* Initialize compression stream */
+ isal_deflate_stateless_init(qp->stream);
+
+ qp->stream->level_buf = temp_level_buf;
+
+ /* Stateless operation, input will be consumed in one go */
+ qp->stream->flush = NO_FLUSH;
+
+ /* set compression level & intermediate level buffer size */
+ qp->stream->level = priv_xform->compress.level;
+ qp->stream->level_buf_size = priv_xform->level_buffer_size;
+
+ /* Set op huffman code */
+ if (priv_xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_STATIC);
+ else if (priv_xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DEFAULT)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_DEFAULT);
+ /* Dynamically change the huffman code to suit the input data */
+ else if (priv_xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DYNAMIC)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_DEFAULT);
+
+ if (op->m_src->pkt_len < (op->src.length + op->src.offset)) {
+ ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ if (op->dst.offset >= op->m_dst->pkt_len) {
+ ISAL_PMD_LOG(ERR, "Output mbuf(s) not big enough"
+ " for offset provided.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Chained mbufs */
+ if (op->m_src->nb_segs > 1 || op->m_dst->nb_segs > 1) {
+ ret = chained_mbuf_compression(op, qp);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Linear buffer */
+ qp->stream->end_of_stream = 1; /* All input consumed in one */
+ /* Point compression stream to input buffer */
+ qp->stream->avail_in = op->src.length;
+ qp->stream->next_in = rte_pktmbuf_mtod_offset(op->m_src,
+ uint8_t *, op->src.offset);
+
+ /* Point compression stream to output buffer */
+ qp->stream->avail_out = op->m_dst->data_len - op->dst.offset;
+ qp->stream->next_out = rte_pktmbuf_mtod_offset(op->m_dst,
+ uint8_t *, op->dst.offset);
+
+ if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination"
+ " buffers\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Execute compression operation */
+ ret = isal_deflate_stateless(qp->stream);
+
+ /* Check that output buffer did not run out of space */
+ if (ret == STATELESS_OVERFLOW) {
+ ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return ret;
+ }
+
+ /* Check that input buffer has been fully consumed */
+ if (qp->stream->avail_in != (uint32_t)0) {
+ ISAL_PMD_LOG(ERR, "Input buffer could not be read"
+ " entirely\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ if (ret != COMP_OK) {
+ ISAL_PMD_LOG(ERR, "Compression operation failed\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+ }
+ op->consumed = qp->stream->total_in;
+ op->produced = qp->stream->total_out;
+
+ return ret;
+}
+
+/* Stateless Decompression Function */
+static int
+process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp)
+{
+ int ret = 0;
+
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ /* Initialize decompression state */
+ isal_inflate_init(qp->state);
+
+ if (op->m_src->pkt_len < (op->src.length + op->src.offset)) {
+ ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ if (op->dst.offset >= op->m_dst->pkt_len) {
+ ISAL_PMD_LOG(ERR, "Output mbuf not big enough for "
+ "offset provided.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Chained mbufs */
+ if (op->m_src->nb_segs > 1 || op->m_dst->nb_segs > 1) {
+ ret = chained_mbuf_decompression(op, qp);
+ if (ret != 0)
+ return ret;
+ } else {
+ /* Linear buffer */
+ /* Point decompression state to input buffer */
+ qp->state->avail_in = op->src.length;
+ qp->state->next_in = rte_pktmbuf_mtod_offset(op->m_src,
+ uint8_t *, op->src.offset);
+
+ /* Point decompression state to output buffer */
+ qp->state->avail_out = op->m_dst->data_len - op->dst.offset;
+ qp->state->next_out = rte_pktmbuf_mtod_offset(op->m_dst,
+ uint8_t *, op->dst.offset);
+
+ if (unlikely(!qp->state->next_in || !qp->state->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination"
+ " buffers\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Execute decompression operation */
+ ret = isal_inflate_stateless(qp->state);
+
+ if (ret == ISAL_OUT_OVERFLOW) {
+ ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return ret;
+ }
+
+ /* Check that input buffer has been fully consumed */
+ if (qp->state->avail_in != (uint32_t)0) {
+ ISAL_PMD_LOG(ERR, "Input buffer could not be read"
+ " entirely\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ if (ret != ISAL_DECOMP_OK) {
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+ op->consumed = op->src.length - qp->state->avail_in;
+ }
+ op->produced = qp->state->total_out;
+
+ return ret;
+}
+
+/* Process compression/decompression operation */
+static int
+process_op(struct isal_comp_qp *qp, struct rte_comp_op *op,
+ struct isal_priv_xform *priv_xform)
+{
+ switch (priv_xform->type) {
+ case RTE_COMP_COMPRESS:
+ process_isal_deflate(op, qp, priv_xform);
+ break;
+ case RTE_COMP_DECOMPRESS:
+ process_isal_inflate(op, qp);
+ break;
+ default:
+ ISAL_PMD_LOG(ERR, "Operation Not Supported\n");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+/* Enqueue burst */
+static uint16_t
+isal_comp_pmd_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ struct isal_comp_qp *qp = queue_pair;
+ uint16_t i;
+ int retval;
+ int16_t num_enq = RTE_MIN(qp->num_free_elements, nb_ops);
+
+ for (i = 0; i < num_enq; i++) {
+ if (unlikely(ops[i]->op_type != RTE_COMP_OP_STATELESS)) {
+ ops[i]->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ISAL_PMD_LOG(ERR, "Stateful operation not Supported\n");
+ qp->qp_stats.enqueue_err_count++;
+ continue;
+ }
+ retval = process_op(qp, ops[i], ops[i]->private_xform);
+ if (unlikely(retval < 0) ||
+ ops[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
+ qp->qp_stats.enqueue_err_count++;
+ }
+ }
+
+ retval = rte_ring_enqueue_burst(qp->processed_pkts, (void *)ops,
+ num_enq, NULL);
+ qp->num_free_elements -= retval;
+ qp->qp_stats.enqueued_count += retval;
+
+ return retval;
+}
+
+/* Dequeue burst */
+static uint16_t
+isal_comp_pmd_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ struct isal_comp_qp *qp = queue_pair;
+ uint16_t nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, (void **)ops,
+ nb_ops, NULL);
+ qp->num_free_elements += nb_dequeued;
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Create ISA-L compression device */
+static int
+compdev_isal_create(const char *name, struct rte_vdev_device *vdev,
+ struct rte_compressdev_pmd_init_params *init_params)
+{
+ struct rte_compressdev *dev;
+
+ dev = rte_compressdev_pmd_create(name, &vdev->device,
+ sizeof(struct isal_comp_private), init_params);
+ if (dev == NULL) {
+ ISAL_PMD_LOG(ERR, "failed to create compressdev vdev");
+ return -EFAULT;
+ }
+
+ dev->dev_ops = isal_compress_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = isal_comp_pmd_dequeue_burst;
+ dev->enqueue_burst = isal_comp_pmd_enqueue_burst;
+
+ return 0;
+}
+
+/** Remove compression device */
+static int
+compdev_isal_remove_dev(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev *compdev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ compdev = rte_compressdev_pmd_get_named_dev(name);
+ if (compdev == NULL)
+ return -ENODEV;
+
+ return rte_compressdev_pmd_destroy(compdev);
+}
+
+/** Initialise ISA-L compression device */
+static int
+compdev_isal_probe(struct rte_vdev_device *dev)
+{
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ rte_socket_id(),
+ };
+ const char *name, *args;
+ int retval;
+
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ args = rte_vdev_device_args(dev);
+
+ retval = rte_compressdev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ ISAL_PMD_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]\n", args);
+ return -EINVAL;
+ }
+
+ return compdev_isal_create(name, dev, &init_params);
+}
+
+static struct rte_vdev_driver compdev_isal_pmd_drv = {
+ .probe = compdev_isal_probe,
+ .remove = compdev_isal_remove_dev,
+};
+
+RTE_PMD_REGISTER_VDEV(COMPDEV_NAME_ISAL_PMD, compdev_isal_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(COMPDEV_NAME_ISAL_PMD,
+ "socket_id=<int>");
+
+RTE_INIT(isal_init_log)
+{
+ isal_logtype_driver = rte_log_register("pmd.compress.isal");
+ if (isal_logtype_driver >= 0)
+ rte_log_set_level(isal_logtype_driver, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/compress/isal/isal_compress_pmd_ops.c b/src/spdk/dpdk/drivers/compress/isal/isal_compress_pmd_ops.c
new file mode 100644
index 00000000..41cade87
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/isal/isal_compress_pmd_ops.c
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <isa-l.h>
+
+#include <rte_common.h>
+#include <rte_compressdev_pmd.h>
+#include <rte_malloc.h>
+
+#include "isal_compress_pmd_private.h"
+
+static const struct rte_compressdev_capabilities isal_pmd_capabilities[] = {
+ {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
+ RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC,
+ .window_size = {
+ .min = 15,
+ .max = 15,
+ .increment = 0
+ },
+ },
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+isal_comp_pmd_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ int ret = 0;
+ unsigned int n;
+ char mp_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ unsigned int elt_size = sizeof(struct isal_priv_xform);
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ n = snprintf(mp_name, sizeof(mp_name), "compdev_%d_xform_mp",
+ dev->data->dev_id);
+ if (n > sizeof(mp_name)) {
+ ISAL_PMD_LOG(ERR,
+ "Unable to create unique name for xform mempool");
+ return -ENOMEM;
+ }
+
+ internals->priv_xform_mp = rte_mempool_lookup(mp_name);
+
+ if (internals->priv_xform_mp != NULL) {
+ if (((internals->priv_xform_mp)->elt_size != elt_size) ||
+ ((internals->priv_xform_mp)->size <
+ config->max_nb_priv_xforms)) {
+
+ ISAL_PMD_LOG(ERR, "%s mempool already exists with different"
+ " initialization parameters", mp_name);
+ internals->priv_xform_mp = NULL;
+ return -ENOMEM;
+ }
+ } else { /* First time configuration */
+ internals->priv_xform_mp = rte_mempool_create(
+ mp_name, /* mempool name */
+ /* number of elements*/
+ config->max_nb_priv_xforms,
+ elt_size, /* element size*/
+ 0, /* Cache size*/
+ 0, /* private data size */
+ NULL, /* obj initialization constructor */
+ NULL, /* obj initialization constructor arg */
+ NULL, /**< obj constructor*/
+ NULL, /* obj constructor arg */
+ config->socket_id, /* socket id */
+ 0); /* flags */
+ }
+
+ if (internals->priv_xform_mp == NULL) {
+ ISAL_PMD_LOG(ERR, "%s mempool allocation failed", mp_name);
+ return -ENOMEM;
+ }
+
+ dev->data->dev_private = internals;
+
+ return ret;
+}
+
+/** Start device */
+static int
+isal_comp_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+isal_comp_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+}
+
+/** Close device */
+static int
+isal_comp_pmd_close(struct rte_compressdev *dev)
+{
+ /* Free private data */
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ rte_mempool_free(internals->priv_xform_mp);
+ return 0;
+}
+
+/** Get device statistics */
+static void
+isal_comp_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Get device info */
+static void
+isal_comp_pmd_info_get(struct rte_compressdev *dev __rte_unused,
+ struct rte_compressdev_info *dev_info)
+{
+ if (dev_info != NULL) {
+ dev_info->capabilities = isal_pmd_capabilities;
+ dev_info->feature_flags = RTE_COMPDEV_FF_CPU_AVX512 |
+ RTE_COMPDEV_FF_CPU_AVX2 |
+ RTE_COMPDEV_FF_CPU_AVX |
+ RTE_COMPDEV_FF_CPU_SSE;
+ }
+}
+
+/** Reset device statistics */
+static void
+isal_comp_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+/** Release queue pair */
+static int
+isal_comp_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ if (qp->stream != NULL)
+ rte_free(qp->stream);
+
+ if (qp->stream->level_buf != NULL)
+ rte_free(qp->stream->level_buf);
+
+ if (qp->state != NULL)
+ rte_free(qp->state);
+
+ if (qp->processed_pkts != NULL)
+ rte_ring_free(qp->processed_pkts);
+
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+isal_comp_pmd_qp_create_processed_pkts_ring(struct isal_comp_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ISAL_PMD_LOG(DEBUG,
+ "Reusing existing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+
+ ISAL_PMD_LOG(ERR,
+ "Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+isal_comp_pmd_qp_set_unique_name(struct rte_compressdev *dev,
+struct isal_comp_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "isal_compression_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/* Setup a queue pair */
+static int
+isal_comp_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct isal_comp_qp *qp = NULL;
+ int retval;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ isal_comp_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("Isa-l compression PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ ISAL_PMD_LOG(ERR, "Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ /* Initialize memory for compression stream structure */
+ qp->stream = rte_zmalloc_socket("Isa-l compression stream ",
+ sizeof(struct isal_zstream), RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ /* Initialize memory for compression level buffer */
+ qp->stream->level_buf = rte_zmalloc_socket("Isa-l compression lev_buf",
+ ISAL_DEF_LVL3_DEFAULT, RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ /* Initialize memory for decompression state structure */
+ qp->state = rte_zmalloc_socket("Isa-l decompression state",
+ sizeof(struct inflate_state), RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = isal_comp_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
+ "compression device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = isal_comp_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL) {
+ ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
+ "compression device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->num_free_elements = rte_ring_free_count(qp->processed_pkts);
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Set private xform data*/
+static int
+isal_comp_pmd_priv_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform, void **priv_xform)
+{
+ int ret;
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ if (xform == NULL) {
+ ISAL_PMD_LOG(ERR, "Invalid Xform struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(internals->priv_xform_mp, priv_xform)) {
+ ISAL_PMD_LOG(ERR,
+ "Couldn't get object from private xform mempool");
+ return -ENOMEM;
+ }
+
+ ret = isal_comp_set_priv_xform_parameters(*priv_xform, xform);
+ if (ret != 0) {
+ ISAL_PMD_LOG(ERR, "Failed to configure private xform parameters");
+
+ /* Return private xform to mempool */
+ rte_mempool_put(internals->priv_xform_mp, priv_xform);
+ return ret;
+ }
+ return 0;
+}
+
+/** Clear memory of the private xform so it doesn't leave key material behind */
+static int
+isal_comp_pmd_priv_xform_free(struct rte_compressdev *dev, void *priv_xform)
+{
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ /* Zero out the whole structure */
+ if (priv_xform) {
+ memset(priv_xform, 0, sizeof(struct isal_priv_xform));
+ rte_mempool_put(internals->priv_xform_mp, priv_xform);
+ }
+ return 0;
+}
+
+struct rte_compressdev_ops isal_pmd_ops = {
+ .dev_configure = isal_comp_pmd_config,
+ .dev_start = isal_comp_pmd_start,
+ .dev_stop = isal_comp_pmd_stop,
+ .dev_close = isal_comp_pmd_close,
+
+ .stats_get = isal_comp_pmd_stats_get,
+ .stats_reset = isal_comp_pmd_stats_reset,
+
+ .dev_infos_get = isal_comp_pmd_info_get,
+
+ .queue_pair_setup = isal_comp_pmd_qp_setup,
+ .queue_pair_release = isal_comp_pmd_qp_release,
+
+ .private_xform_create = isal_comp_pmd_priv_xform_create,
+ .private_xform_free = isal_comp_pmd_priv_xform_free,
+};
+
+struct rte_compressdev_ops *isal_compress_pmd_ops = &isal_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/compress/isal/isal_compress_pmd_private.h b/src/spdk/dpdk/drivers/compress/isal/isal_compress_pmd_private.h
new file mode 100644
index 00000000..46e9fcfa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/isal/isal_compress_pmd_private.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _ISAL_COMP_PMD_PRIVATE_H_
+#define _ISAL_COMP_PMD_PRIVATE_H_
+
+#define COMPDEV_NAME_ISAL_PMD compress_isal
+/**< ISA-L comp PMD device name */
+
+extern int isal_logtype_driver;
+#define ISAL_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, isal_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+/* private data structure for each ISA-L compression device */
+struct isal_comp_private {
+ struct rte_mempool *priv_xform_mp;
+};
+
+/** ISA-L queue pair */
+struct isal_comp_qp {
+ /* Queue Pair Identifier */
+ uint16_t id;
+ /* Unique Queue Pair Name */
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ /* Ring for placing process packets */
+ struct rte_ring *processed_pkts;
+ /* Queue pair statistics */
+ struct rte_compressdev_stats qp_stats;
+ /* Compression stream information*/
+ struct isal_zstream *stream;
+ /* Decompression state information*/
+ struct inflate_state *state;
+ /* Number of free elements on ring */
+ uint16_t num_free_elements;
+} __rte_cache_aligned;
+
+/** ISA-L private xform structure */
+struct isal_priv_xform {
+ enum rte_comp_xform_type type;
+ union {
+ struct rte_comp_compress_xform compress;
+ struct rte_comp_decompress_xform decompress;
+ };
+ uint32_t level_buffer_size;
+} __rte_cache_aligned;
+
+/** Set and validate NULL comp private xform parameters */
+extern int
+isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform,
+ const struct rte_comp_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_compressdev_ops *isal_compress_pmd_ops;
+
+#endif /* _ISAL_COMP_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/compress/isal/meson.build b/src/spdk/dpdk/drivers/compress/isal/meson.build
new file mode 100644
index 00000000..94c10fd6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/isal/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 Intel Corporation
+
+dep = dependency('libisal', required: false)
+if not dep.found()
+ build =false
+endif
+
+deps += 'bus_vdev'
+sources = files('isal_compress_pmd.c', 'isal_compress_pmd_ops.c')
+ext_deps += dep
+pkgconfig_extra_libs += '-lisal'
+
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/compress/isal/rte_pmd_isal_version.map b/src/spdk/dpdk/drivers/compress/isal/rte_pmd_isal_version.map
new file mode 100644
index 00000000..de8e412f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/isal/rte_pmd_isal_version.map
@@ -0,0 +1,3 @@
+DPDK_18.05 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/compress/meson.build b/src/spdk/dpdk/drivers/compress/meson.build
new file mode 100644
index 00000000..817ef3be
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+drivers = ['isal', 'octeontx', 'qat', 'zlib']
+
+std_deps = ['compressdev'] # compressdev pulls in all other needed deps
+config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/src/spdk/dpdk/drivers/compress/octeontx/Makefile b/src/spdk/dpdk/drivers/compress/octeontx/Makefile
new file mode 100644
index 00000000..f34424c8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/octeontx/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_octeontx_zip.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -I$(RTE_SDK)/drivers/compress/octeontx/include
+
+# external library include paths
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += otx_zip_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += otx_zip.c
+
+# versioning export map
+EXPORT_MAP := rte_pmd_octeontx_compress_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/compress/octeontx/include/zip_regs.h b/src/spdk/dpdk/drivers/compress/octeontx/include/zip_regs.h
new file mode 100644
index 00000000..1e74db43
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/octeontx/include/zip_regs.h
@@ -0,0 +1,711 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _RTE_OCTEONTX_ZIP_REGS_H_
+#define _RTE_OCTEONTX_ZIP_REGS_H_
+
+
+/**
+ * Enumeration zip_cc
+ *
+ * ZIP compression coding Enumeration
+ * Enumerates ZIP_INST_S[CC].
+ */
+enum {
+ ZIP_CC_DEFAULT = 0,
+ ZIP_CC_DYN_HUFF,
+ ZIP_CC_FIXED_HUFF,
+ ZIP_CC_LZS
+} zip_cc;
+
+/**
+ * Register (NCB) zip_vq#_ena
+ *
+ * ZIP VF Queue Enable Register
+ * If a queue is disabled, ZIP CTL stops fetching instructions from the queue.
+ */
+typedef union {
+ uint64_t u;
+ struct zip_vqx_ena_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1;
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_vqx_ena_s cn; */
+} zip_vqx_ena_t;
+
+/**
+ * Register (NCB) zip_vq#_sbuf_addr
+ *
+ * ZIP VF Queue Starting Buffer Address Registers
+ * These registers set the buffer parameters for the instruction queues.
+ * When quiescent (i.e.
+ * outstanding doorbell count is 0), it is safe to rewrite this register
+ * to effectively reset the
+ * command buffer state machine.
+ * These registers must be programmed after software programs the
+ * corresponding ZIP_QUE()_SBUF_CTL.
+ */
+typedef union {
+ uint64_t u;
+ struct zip_vqx_sbuf_addr_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t ptr : 42;
+ uint64_t off : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t off : 7;
+ uint64_t ptr : 42;
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_vqx_sbuf_addr_s cn; */
+} zip_vqx_sbuf_addr_t;
+
+/**
+ * Register (NCB) zip_que#_doorbell
+ *
+ * ZIP Queue Doorbell Registers
+ * Doorbells for the ZIP instruction queues.
+ */
+typedef union {
+ uint64_t u;
+ struct zip_quex_doorbell_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t dbell_cnt : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t dbell_cnt : 20;
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_quex_doorbell_s cn; */
+} zip_quex_doorbell_t;
+
+/**
+ * Structure zip_nptr_s
+ *
+ * ZIP Instruction Next-Chunk-Buffer Pointer (NPTR) Structure
+ * This structure is used to chain all the ZIP instruction buffers
+ * together. ZIP instruction buffers are managed
+ * (allocated and released) by software.
+ */
+union zip_nptr_s {
+ uint64_t u;
+ struct zip_nptr_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t addr : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_nptr_s_s cn83xx; */
+};
+
+/**
+ * generic ptr address
+ */
+union zip_zptr_addr_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u;
+ /** generic ptr address */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t addr : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/**
+ * generic ptr ctl
+ */
+union zip_zptr_ctl_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u;
+ /** generic ptr ctl */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ uint64_t reserved_112_127 : 16;
+ uint64_t length : 16;
+ uint64_t reserved_67_95 : 29;
+ uint64_t fw : 1;
+ uint64_t nc : 1;
+ uint64_t data_be : 1;
+#else /* Word 1 - Little Endian */
+ uint64_t data_be : 1;
+ uint64_t nc : 1;
+ uint64_t fw : 1;
+ uint64_t reserved_67_95 : 29;
+ uint64_t length : 16;
+ uint64_t reserved_112_127 : 16;
+#endif /* Word 1 - End */
+ } s;
+
+};
+
+/**
+ * Structure zip_inst_s
+ *
+ * ZIP Instruction Structure
+ * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15
+ * within the structure).
+ */
+union zip_inst_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u[16];
+ /** ZIP Instruction Structure */
+ struct zip_inst_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** Done interrupt */
+ uint64_t doneint : 1;
+ /** reserved */
+ uint64_t reserved_56_62 : 7;
+ /** Total output length */
+ uint64_t totaloutputlength : 24;
+ /** reserved */
+ uint64_t reserved_27_31 : 5;
+ /** EXNUM */
+ uint64_t exn : 3;
+ /** HASH IV */
+ uint64_t iv : 1;
+ /** EXBITS */
+ uint64_t exbits : 7;
+ /** Hash more-in-file */
+ uint64_t hmif : 1;
+ /** Hash Algorithm and enable */
+ uint64_t halg : 3;
+ /** Sync flush*/
+ uint64_t sf : 1;
+ /** Compression speed/storage */
+ uint64_t ss : 2;
+ /** Compression coding */
+ uint64_t cc : 2;
+ /** End of input data */
+ uint64_t ef : 1;
+ /** Beginning of file */
+ uint64_t bf : 1;
+ // uint64_t reserved_3_4 : 2;
+ /** Comp/decomp operation */
+ uint64_t op : 2;
+ /** Data sactter */
+ uint64_t ds : 1;
+ /** Data gather */
+ uint64_t dg : 1;
+ /** History gather */
+ uint64_t hg : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t hg : 1;
+ uint64_t dg : 1;
+ uint64_t ds : 1;
+ //uint64_t reserved_3_4 : 2;
+ uint64_t op : 2;
+ uint64_t bf : 1;
+ uint64_t ef : 1;
+ uint64_t cc : 2;
+ uint64_t ss : 2;
+ uint64_t sf : 1;
+ uint64_t halg : 3;
+ uint64_t hmif : 1;
+ uint64_t exbits : 7;
+ uint64_t iv : 1;
+ uint64_t exn : 3;
+ uint64_t reserved_27_31 : 5;
+ uint64_t totaloutputlength : 24;
+ uint64_t reserved_56_62 : 7;
+ uint64_t doneint : 1;
+
+#endif /* Word 0 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** History length */
+ uint64_t historylength : 16;
+ /** reserved */
+ uint64_t reserved_96_111 : 16;
+ /** adler/crc32 checksum*/
+ uint64_t adlercrc32 : 32;
+#else /* Word 1 - Little Endian */
+ uint64_t adlercrc32 : 32;
+ uint64_t reserved_96_111 : 16;
+ uint64_t historylength : 16;
+#endif /* Word 1 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ /** Decompression Context Pointer Address */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#else /* Word 2 - Little Endian */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#endif /* Word 2 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Decompression Context Pointer Control */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#else /* Word 3 - Little Endian */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#endif /* Word 3 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Decompression history pointer address */
+ union zip_zptr_addr_s his_ptr_addr;
+#else /* Word 4 - Little Endian */
+ union zip_zptr_addr_s his_ptr_addr;
+#endif /* Word 4 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Decompression history pointer control */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#else /* Word 5 - Little Endian */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#endif /* Word 5 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Input and compression history pointer address */
+ union zip_zptr_addr_s inp_ptr_addr;
+#else /* Word 6 - Little Endian */
+ union zip_zptr_addr_s inp_ptr_addr;
+#endif /* Word 6 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Input and compression history pointer control */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#else /* Word 7 - Little Endian */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#endif /* Word 7 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Output pointer address */
+ union zip_zptr_addr_s out_ptr_addr;
+#else /* Word 8 - Little Endian */
+ union zip_zptr_addr_s out_ptr_addr;
+#endif /* Word 8 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Output pointer control */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#else /* Word 9 - Little Endian */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#endif /* Word 9 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Result pointer address */
+ union zip_zptr_addr_s res_ptr_addr;
+#else /* Word 10 - Little Endian */
+ union zip_zptr_addr_s res_ptr_addr;
+#endif /* Word 10 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Result pointer control */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#else /* Word 11 - Little Endian */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#endif /* Word 11 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 12 - Big Endian */
+ /** reserved */
+ uint64_t reserved_812_831 : 20;
+ /** SSO guest group */
+ uint64_t ggrp : 10;
+ /** SSO tag type */
+ uint64_t tt : 2;
+ /** SSO tag */
+ uint64_t tag : 32;
+#else /* Word 12 - Little Endian */
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t ggrp : 10;
+ uint64_t reserved_812_831 : 20;
+#endif /* Word 12 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 13 - Big Endian */
+ /** Work queue entry pointer */
+ uint64_t wq_ptr : 64;
+#else /* Word 13 - Little Endian */
+ uint64_t wq_ptr : 64;
+#endif /* Word 13 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** reserved */
+ uint64_t reserved_896_959 : 64;
+#else /* Word 14 - Little Endian */
+ uint64_t reserved_896_959 : 64;
+#endif /* Word 14 - End */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Hash structure pointer */
+ uint64_t hash_ptr : 64;
+#else /* Word 15 - Little Endian */
+ uint64_t hash_ptr : 64;
+#endif /* Word 15 - End */
+ } /** ZIP 88xx Instruction Structure */zip88xx;
+
+ /** ZIP Instruction Structure */
+ struct zip_inst_s_cn83xx {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** Done interrupt */
+ uint64_t doneint : 1;
+ /** reserved */
+ uint64_t reserved_56_62 : 7;
+ /** Total output length */
+ uint64_t totaloutputlength : 24;
+ /** reserved */
+ uint64_t reserved_27_31 : 5;
+ /** EXNUM */
+ uint64_t exn : 3;
+ /** HASH IV */
+ uint64_t iv : 1;
+ /** EXBITS */
+ uint64_t exbits : 7;
+ /** Hash more-in-file */
+ uint64_t hmif : 1;
+ /** Hash Algorithm and enable */
+ uint64_t halg : 3;
+ /** Sync flush*/
+ uint64_t sf : 1;
+ /** Compression speed/storage */
+ uint64_t ss : 2;
+ /** Compression coding */
+ uint64_t cc : 2;
+ /** End of input data */
+ uint64_t ef : 1;
+ /** Beginning of file */
+ uint64_t bf : 1;
+ /** Comp/decomp operation */
+ uint64_t op : 2;
+ /** Data sactter */
+ uint64_t ds : 1;
+ /** Data gather */
+ uint64_t dg : 1;
+ /** History gather */
+ uint64_t hg : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t hg : 1;
+ uint64_t dg : 1;
+ uint64_t ds : 1;
+ uint64_t op : 2;
+ uint64_t bf : 1;
+ uint64_t ef : 1;
+ uint64_t cc : 2;
+ uint64_t ss : 2;
+ uint64_t sf : 1;
+ uint64_t halg : 3;
+ uint64_t hmif : 1;
+ uint64_t exbits : 7;
+ uint64_t iv : 1;
+ uint64_t exn : 3;
+ uint64_t reserved_27_31 : 5;
+ uint64_t totaloutputlength : 24;
+ uint64_t reserved_56_62 : 7;
+ uint64_t doneint : 1;
+#endif /* Word 0 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** History length */
+ uint64_t historylength : 16;
+ /** reserved */
+ uint64_t reserved_96_111 : 16;
+ /** adler/crc32 checksum*/
+ uint64_t adlercrc32 : 32;
+#else /* Word 1 - Little Endian */
+ uint64_t adlercrc32 : 32;
+ uint64_t reserved_96_111 : 16;
+ uint64_t historylength : 16;
+#endif /* Word 1 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ /** Decompression Context Pointer Address */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#else /* Word 2 - Little Endian */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#endif /* Word 2 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 3 - Big Endian */
+ /** Decompression Context Pointer Control */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#else /* Word 3 - Little Endian */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#endif /* Word 3 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 4 - Big Endian */
+ /** Decompression history pointer address */
+ union zip_zptr_addr_s his_ptr_addr;
+#else /* Word 4 - Little Endian */
+ union zip_zptr_addr_s his_ptr_addr;
+#endif /* Word 4 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 5 - Big Endian */
+ /** Decompression history pointer control */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#else /* Word 5 - Little Endian */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#endif /* Word 5 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 6 - Big Endian */
+ /** Input and compression history pointer address */
+ union zip_zptr_addr_s inp_ptr_addr;
+#else /* Word 6 - Little Endian */
+ union zip_zptr_addr_s inp_ptr_addr;
+#endif /* Word 6 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 7 - Big Endian */
+ /** Input and compression history pointer control */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#else /* Word 7 - Little Endian */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#endif /* Word 7 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 8 - Big Endian */
+ /** Output pointer address */
+ union zip_zptr_addr_s out_ptr_addr;
+#else /* Word 8 - Little Endian */
+ union zip_zptr_addr_s out_ptr_addr;
+#endif /* Word 8 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 9 - Big Endian */
+ /** Output pointer control */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#else /* Word 9 - Little Endian */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#endif /* Word 9 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 10 - Big Endian */
+ /** Result pointer address */
+ union zip_zptr_addr_s res_ptr_addr;
+#else /* Word 10 - Little Endian */
+ union zip_zptr_addr_s res_ptr_addr;
+#endif /* Word 10 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 11 - Big Endian */
+ /** Result pointer control */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#else /* Word 11 - Little Endian */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#endif /* Word 11 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 12 - Big Endian */
+ /** reserved */
+ uint64_t reserved_812_831 : 20;
+ /** SSO guest group */
+ uint64_t ggrp : 10;
+ /** SSO tag type */
+ uint64_t tt : 2;
+ /** SSO tag */
+ uint64_t tag : 32;
+#else /* Word 12 - Little Endian */
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t ggrp : 10;
+ uint64_t reserved_812_831 : 20;
+#endif /* Word 12 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 13 - Big Endian */
+ /** Work queue entry pointer */
+ uint64_t wq_ptr : 64;
+#else /* Word 13 - Little Endian */
+ uint64_t wq_ptr : 64;
+#endif /* Word 13 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 14 - Big Endian */
+ /** reserved */
+ uint64_t reserved_896_959 : 64;
+#else /* Word 14 - Little Endian */
+ uint64_t reserved_896_959 : 64;
+#endif /* Word 14 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 15 - Big Endian */
+ /** Hash structure pointer */
+ uint64_t hash_ptr : 64;
+#else /* Word 15 - Little Endian */
+ uint64_t hash_ptr : 64;
+#endif /* Word 15 - End */
+ } /** ZIP 83xx Instruction Structure */s;
+};
+
+/**
+ * Structure zip_zres_s
+ *
+ * ZIP Result Structure
+ * The ZIP coprocessor writes the result structure after it completes the
+ * invocation. The result structure is exactly 24 bytes, and each invocation
+ * of the ZIP coprocessor produces exactly one result structure.
+ */
+union zip_zres_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u[8];
+ /** ZIP Result Structure */
+ struct zip_zres_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** crc32 checksum of uncompressed stream */
+ uint64_t crc32 : 32;
+ /** adler32 checksum of uncompressed stream*/
+ uint64_t adler32 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t adler32 : 32;
+ uint64_t crc32 : 32;
+#endif /* Word 0 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** Total numer of Bytes produced in output stream */
+ uint64_t totalbyteswritten : 32;
+ /** Total number of bytes processed from the input stream */
+ uint64_t totalbytesread : 32;
+#else /* Word 1 - Little Endian */
+ uint64_t totalbytesread : 32;
+ uint64_t totalbyteswritten : 32;
+#endif /* Word 1 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ /** Total number of compressed input bits
+ * consumed to decompress all blocks in the file
+ */
+ uint64_t totalbitsprocessed : 32;
+ /** Done interrupt*/
+ uint64_t doneint : 1;
+ /** reserved */
+ uint64_t reserved_155_158 : 4;
+ /** EXNUM */
+ uint64_t exn : 3;
+ /** reserved */
+ uint64_t reserved_151 : 1;
+ /** EXBITS */
+ uint64_t exbits : 7;
+ /** reserved */
+ uint64_t reserved_137_143 : 7;
+ /** End of file */
+ uint64_t ef : 1;
+ /** Completion/error code */
+ uint64_t compcode : 8;
+#else /* Word 2 - Little Endian */
+ uint64_t compcode : 8;
+ uint64_t ef : 1;
+ uint64_t reserved_137_143 : 7;
+ uint64_t exbits : 7;
+ uint64_t reserved_151 : 1;
+ uint64_t exn : 3;
+ uint64_t reserved_155_158 : 4;
+ uint64_t doneint : 1;
+ uint64_t totalbitsprocessed : 32;
+#endif /* Word 2 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 3 - Big Endian */
+ /** reserved */
+ uint64_t reserved_253_255 : 3;
+ /** Hash length in bytes */
+ uint64_t hshlen : 61;
+#else /* Word 3 - Little Endian */
+ uint64_t hshlen : 61;
+ uint64_t reserved_253_255 : 3;
+#endif /* Word 3 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 4 - Big Endian */
+ /** Double-word 0 of computed hash */
+ uint64_t hash0 : 64;
+#else /* Word 4 - Little Endian */
+ uint64_t hash0 : 64;
+#endif /* Word 4 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 5 - Big Endian */
+ /** Double-word 1 of computed hash */
+ uint64_t hash1 : 64;
+#else /* Word 5 - Little Endian */
+ uint64_t hash1 : 64;
+#endif /* Word 5 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 6 - Big Endian */
+ /** Double-word 2 of computed hash */
+ uint64_t hash2 : 64;
+#else /* Word 6 - Little Endian */
+ uint64_t hash2 : 64;
+#endif /* Word 6 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 7 - Big Endian */
+ /** Double-word 3 of computed hash */
+ uint64_t hash3 : 64;
+#else /* Word 7 - Little Endian */
+ uint64_t hash3 : 64;
+#endif /* Word 7 - End */
+ } /** ZIP Result Structure */s;
+
+ /* struct zip_zres_s_s cn83xx; */
+};
+
+/**
+ * Structure zip_zptr_s
+ *
+ * ZIP Generic Pointer Structure
+ * This structure is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u[2];
+ /** ZIP Generic Pointer Structure */
+ struct zip_zptr_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** Pointer to Data or scatter-gather list */
+ uint64_t addr : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64;
+#endif /* Word 0 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** reserved */
+ uint64_t reserved_112_127 : 16;
+ /** Length of Data or scatter-gather list*/
+ uint64_t length : 16;
+ /** reserved */
+ uint64_t reserved_67_95 : 29;
+ /** Full-block write */
+ uint64_t fw : 1;
+ /** No cache allocation */
+ uint64_t nc : 1;
+ /** reserved */
+ uint64_t data_be : 1;
+#else /* Word 1 - Little Endian */
+ uint64_t data_be : 1;
+ uint64_t nc : 1;
+ uint64_t fw : 1;
+ uint64_t reserved_67_95 : 29;
+ uint64_t length : 16;
+ uint64_t reserved_112_127 : 16;
+#endif /* Word 1 - End */
+ } /** ZIP Generic Pointer Structure */s;
+};
+
+/**
+ * Enumeration zip_comp_e
+ *
+ * ZIP Completion Enumeration
+ * Enumerates the values of ZIP_ZRES_S[COMPCODE].
+ */
+#define ZIP_COMP_E_NOTDONE (0)
+#define ZIP_COMP_E_SUCCESS (1)
+#define ZIP_COMP_E_DTRUNC (2)
+#define ZIP_COMP_E_DSTOP (3)
+#define ZIP_COMP_E_ITRUNC (4)
+#define ZIP_COMP_E_RBLOCK (5)
+#define ZIP_COMP_E_NLEN (6)
+#define ZIP_COMP_E_BADCODE (7)
+#define ZIP_COMP_E_BADCODE2 (8)
+#define ZIP_COMP_E_ZERO_LEN (9)
+#define ZIP_COMP_E_PARITY (0xa)
+#define ZIP_COMP_E_FATAL (0xb)
+#define ZIP_COMP_E_TIMEOUT (0xc)
+#define ZIP_COMP_E_INSTR_ERR (0xd)
+#define ZIP_COMP_E_HCTX_ERR (0xe)
+#define ZIP_COMP_E_STOP (3)
+
+/**
+ * Enumeration zip_op_e
+ *
+ * ZIP Operation Enumeration
+ * Enumerates ZIP_INST_S[OP].
+ * Internal:
+ */
+#define ZIP_OP_E_DECOMP (0)
+#define ZIP_OP_E_NOCOMP (1)
+#define ZIP_OP_E_COMP (2)
+
+/**
+ * Enumeration zip compression levels
+ *
+ * ZIP Compression Level Enumeration
+ * Enumerates ZIP_INST_S[SS].
+ * Internal:
+ */
+#define ZIP_COMP_E_LEVEL_MAX (0)
+#define ZIP_COMP_E_LEVEL_MED (1)
+#define ZIP_COMP_E_LEVEL_LOW (2)
+#define ZIP_COMP_E_LEVEL_MIN (3)
+
+#endif /* _RTE_ZIP_REGS_H_ */
diff --git a/src/spdk/dpdk/drivers/compress/octeontx/meson.build b/src/spdk/dpdk/drivers/compress/octeontx/meson.build
new file mode 100644
index 00000000..7cd202d0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/octeontx/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+name = 'octeontx_compress'
+sources = files('otx_zip.c', 'otx_zip_pmd.c')
+allow_experimental_apis = true
+includes += include_directories('include')
+deps += ['mempool_octeontx', 'bus_pci']
+ext_deps += dep
diff --git a/src/spdk/dpdk/drivers/compress/octeontx/otx_zip.c b/src/spdk/dpdk/drivers/compress/octeontx/otx_zip.c
new file mode 100644
index 00000000..a9046ff3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/octeontx/otx_zip.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include "otx_zip.h"
+
+uint64_t
+zip_reg_read64(uint8_t *hw_addr, uint64_t offset)
+{
+ uint8_t *base = hw_addr;
+ return *(volatile uint64_t *)(base + offset);
+}
+
+void
+zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val)
+{
+ uint8_t *base = hw_addr;
+ *(uint64_t *)(base + offset) = val;
+}
+
+static void
+zip_q_enable(struct zipvf_qp *qp)
+{
+ zip_vqx_ena_t que_ena;
+
+ /*ZIP VFx command queue init*/
+ que_ena.u = 0ull;
+ que_ena.s.ena = 1;
+
+ zip_reg_write64(qp->vf->vbar0, ZIP_VQ_ENA, que_ena.u);
+ rte_wmb();
+}
+
+/* initialize given qp on zip device */
+int
+zipvf_q_init(struct zipvf_qp *qp)
+{
+ zip_vqx_sbuf_addr_t que_sbuf_addr;
+
+ uint64_t size;
+ void *cmdq_addr;
+ uint64_t iova;
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ struct zip_vf *vf = qp->vf;
+
+ /* allocate and setup instruction queue */
+ size = ZIP_MAX_CMDQ_SIZE;
+ size = ZIP_ALIGN_ROUNDUP(size, ZIP_CMDQ_ALIGN);
+
+ cmdq_addr = rte_zmalloc(qp->name, size, ZIP_CMDQ_ALIGN);
+ if (cmdq_addr == NULL)
+ return -1;
+
+ cmdq->sw_head = (uint64_t *)cmdq_addr;
+ cmdq->va = (uint8_t *)cmdq_addr;
+ iova = rte_mem_virt2iova(cmdq_addr);
+
+ cmdq->iova = iova;
+
+ que_sbuf_addr.u = 0ull;
+ que_sbuf_addr.s.ptr = (cmdq->iova >> 7);
+ zip_reg_write64(vf->vbar0, ZIP_VQ_SBUF_ADDR, que_sbuf_addr.u);
+
+ zip_q_enable(qp);
+
+ memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
+ rte_spinlock_init(&cmdq->qlock);
+
+ return 0;
+}
+
+int
+zipvf_q_term(struct zipvf_qp *qp)
+{
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ zip_vqx_ena_t que_ena;
+ struct zip_vf *vf = qp->vf;
+
+ if (cmdq->va != NULL) {
+ memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
+ rte_free(cmdq->va);
+ }
+
+ /*Disabling the ZIP queue*/
+ que_ena.u = 0ull;
+ zip_reg_write64(vf->vbar0, ZIP_VQ_ENA, que_ena.u);
+
+ return 0;
+}
+
+void
+zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *cmd)
+{
+ zip_quex_doorbell_t dbell;
+ union zip_nptr_s ncp;
+ uint64_t *ncb_ptr;
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ void *reg_base = qp->vf->vbar0;
+
+ /*Held queue lock*/
+ rte_spinlock_lock(&(cmdq->qlock));
+
+ /* Check space availability in zip cmd queue */
+ if ((((cmdq->sw_head - (uint64_t *)cmdq->va) * sizeof(uint64_t *)) +
+ ZIP_CMD_SIZE) == (ZIP_MAX_CMDQ_SIZE - ZIP_MAX_NCBP_SIZE)) {
+ /*Last buffer of the command queue*/
+ memcpy((uint8_t *)cmdq->sw_head,
+ (uint8_t *)cmd,
+ sizeof(union zip_inst_s));
+ /* move pointer to next loc in unit of 64-bit word */
+ cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
+
+ /* now, point the "Next-Chunk Buffer Ptr" to sw_head */
+ ncb_ptr = cmdq->sw_head;
+ /* Pointing head again to cmdqueue base*/
+ cmdq->sw_head = (uint64_t *)cmdq->va;
+
+ ncp.u = 0ull;
+ ncp.s.addr = cmdq->iova;
+ *ncb_ptr = ncp.u;
+ } else {
+ /*Enough buffers available in the command queue*/
+ memcpy((uint8_t *)cmdq->sw_head,
+ (uint8_t *)cmd,
+ sizeof(union zip_inst_s));
+ cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
+ }
+
+ rte_wmb();
+
+ /* Ringing ZIP VF doorbell */
+ dbell.u = 0ull;
+ dbell.s.dbell_cnt = 1;
+ zip_reg_write64(reg_base, ZIP_VQ_DOORBELL, dbell.u);
+
+ rte_spinlock_unlock(&(cmdq->qlock));
+}
+
+int
+zipvf_create(struct rte_compressdev *compressdev)
+{
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(compressdev->device);
+ struct zip_vf *zipvf = NULL;
+ char *dev_name = compressdev->data->name;
+ void *vbar0;
+ uint64_t reg;
+
+ if (pdev->mem_resource[0].phys_addr == 0ULL)
+ return -EIO;
+
+ vbar0 = pdev->mem_resource[0].addr;
+ if (!vbar0) {
+ ZIP_PMD_ERR("Failed to map BAR0 of %s", dev_name);
+ return -ENODEV;
+ }
+
+ zipvf = (struct zip_vf *)(compressdev->data->dev_private);
+
+ if (!zipvf)
+ return -ENOMEM;
+
+ zipvf->vbar0 = vbar0;
+ reg = zip_reg_read64(zipvf->vbar0, ZIP_VF_PF_MBOXX(0));
+ /* Storing domain in local to ZIP VF */
+ zipvf->dom_sdom = reg;
+ zipvf->pdev = pdev;
+ zipvf->max_nb_queue_pairs = ZIP_MAX_VF_QUEUE;
+ return 0;
+}
+
+int
+zipvf_destroy(struct rte_compressdev *compressdev)
+{
+ struct zip_vf *vf = (struct zip_vf *)(compressdev->data->dev_private);
+
+ /* Rewriting the domain_id in ZIP_VF_MBOX for app rerun */
+ zip_reg_write64(vf->vbar0, ZIP_VF_PF_MBOXX(0), vf->dom_sdom);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/compress/octeontx/otx_zip.h b/src/spdk/dpdk/drivers/compress/octeontx/otx_zip.h
new file mode 100644
index 00000000..99a38d00
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/octeontx/otx_zip.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _RTE_OCTEONTX_ZIP_VF_H_
+#define _RTE_OCTEONTX_ZIP_VF_H_
+
+#include <unistd.h>
+
+#include <rte_bus_pci.h>
+#include <rte_comp.h>
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+
+#include <zip_regs.h>
+
+int octtx_zip_logtype_driver;
+
+/* ZIP VF Control/Status registers (CSRs): */
+/* VF_BAR0: */
+#define ZIP_VQ_ENA (0x10)
+#define ZIP_VQ_SBUF_ADDR (0x20)
+#define ZIP_VF_PF_MBOXX(x) (0x400 | (x)<<3)
+#define ZIP_VQ_DOORBELL (0x1000)
+
+/**< Vendor ID */
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+/**< PCI device id of ZIP VF */
+#define PCI_DEVICE_ID_OCTEONTX_ZIPVF 0xA037
+
+/* maxmum number of zip vf devices */
+#define ZIP_MAX_VFS 8
+
+/* max size of one chunk */
+#define ZIP_MAX_CHUNK_SIZE 8192
+
+/* each instruction is fixed 128 bytes */
+#define ZIP_CMD_SIZE 128
+
+#define ZIP_CMD_SIZE_WORDS (ZIP_CMD_SIZE >> 3) /* 16 64_bit words */
+
+/* size of next chunk buffer pointer */
+#define ZIP_MAX_NCBP_SIZE 8
+
+/* size of instruction queue in units of instruction size */
+#define ZIP_MAX_NUM_CMDS ((ZIP_MAX_CHUNK_SIZE - ZIP_MAX_NCBP_SIZE) / \
+ ZIP_CMD_SIZE) /* 63 */
+
+/* size of instruct queue in bytes */
+#define ZIP_MAX_CMDQ_SIZE ((ZIP_MAX_NUM_CMDS * ZIP_CMD_SIZE) + \
+ ZIP_MAX_NCBP_SIZE)/* ~8072ull */
+
+#define ZIP_BUF_SIZE 256
+
+#define ZIP_SGPTR_ALIGN 16
+#define ZIP_CMDQ_ALIGN 128
+#define MAX_SG_LEN ((ZIP_BUF_SIZE - ZIP_SGPTR_ALIGN) / sizeof(void *))
+
+/**< ZIP PMD specified queue pairs */
+#define ZIP_MAX_VF_QUEUE 1
+
+#define ZIP_ALIGN_ROUNDUP(x, _align) \
+ ((_align) * (((x) + (_align) - 1) / (_align)))
+
+/**< ZIP PMD device name */
+#define COMPRESSDEV_NAME_ZIP_PMD compress_octeonx
+
+#define ZIP_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, \
+ octtx_zip_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+#define ZIP_PMD_INFO(fmt, args...) \
+ ZIP_PMD_LOG(INFO, fmt, ## args)
+#define ZIP_PMD_ERR(fmt, args...) \
+ ZIP_PMD_LOG(ERR, fmt, ## args)
+
+/* resources required to process stream */
+enum {
+ RES_BUF = 0,
+ CMD_BUF,
+ HASH_CTX_BUF,
+ DECOMP_CTX_BUF,
+ IN_DATA_BUF,
+ OUT_DATA_BUF,
+ HISTORY_DATA_BUF,
+ MAX_BUFS_PER_STREAM
+} NUM_BUFS_PER_STREAM;
+
+struct zip_stream;
+struct zipvf_qp;
+
+/* Algorithm handler function prototype */
+typedef int (*comp_func_t)(struct rte_comp_op *op,
+ struct zipvf_qp *qp, struct zip_stream *zstrm);
+
+/**
+ * ZIP private stream structure
+ */
+struct zip_stream {
+ union zip_inst_s *inst;
+ /* zip instruction pointer */
+ comp_func_t func;
+ /* function to process comp operation */
+ void *bufs[MAX_BUFS_PER_STREAM];
+} _rte_cache_aligned;
+
+
+/**
+ * ZIP instruction Queue
+ */
+struct zipvf_cmdq {
+ rte_spinlock_t qlock;
+ /* queue lock */
+ uint64_t *sw_head;
+ /* pointer to start of 8-byte word length queue-head */
+ uint8_t *va;
+ /* pointer to instruction queue virtual address */
+ rte_iova_t iova;
+ /* iova addr of cmdq head*/
+};
+
+/**
+ * ZIP device queue structure
+ */
+struct zipvf_qp {
+ struct zipvf_cmdq cmdq;
+ /* Hardware instruction queue structure */
+ struct rte_ring *processed_pkts;
+ /* Ring for placing processed packets */
+ struct rte_compressdev_stats qp_stats;
+ /* Queue pair statistics */
+ uint16_t id;
+ /* Queue Pair Identifier */
+ const char *name;
+ /* Unique Queue Pair Name */
+ struct zip_vf *vf;
+ /* pointer to device, queue belongs to */
+} __rte_cache_aligned;
+
+/**
+ * ZIP VF device structure.
+ */
+struct zip_vf {
+ int vfid;
+ /* vf index */
+ struct rte_pci_device *pdev;
+ /* pci device */
+ void *vbar0;
+ /* CSR base address for underlying BAR0 VF.*/
+ uint64_t dom_sdom;
+ /* Storing mbox domain and subdomain id for app rerun*/
+ uint32_t max_nb_queue_pairs;
+ /* pointer to device qps */
+ struct rte_mempool *zip_mp;
+ /* pointer to pools */
+} __rte_cache_aligned;
+
+
+static inline void
+zipvf_prepare_in_buf(struct zip_stream *zstrm, struct rte_comp_op *op)
+{
+ uint32_t offset, inlen;
+ struct rte_mbuf *m_src;
+ union zip_inst_s *inst = zstrm->inst;
+
+ inlen = op->src.length;
+ offset = op->src.offset;
+ m_src = op->m_src;
+
+ /* Prepare direct input data pointer */
+ inst->s.dg = 0;
+ inst->s.inp_ptr_addr.s.addr =
+ rte_pktmbuf_iova_offset(m_src, offset);
+ inst->s.inp_ptr_ctl.s.length = inlen;
+}
+
+static inline void
+zipvf_prepare_out_buf(struct zip_stream *zstrm, struct rte_comp_op *op)
+{
+ uint32_t offset;
+ struct rte_mbuf *m_dst;
+ union zip_inst_s *inst = zstrm->inst;
+
+ offset = op->dst.offset;
+ m_dst = op->m_dst;
+
+ /* Prepare direct input data pointer */
+ inst->s.ds = 0;
+ inst->s.out_ptr_addr.s.addr =
+ rte_pktmbuf_iova_offset(m_dst, offset);
+ inst->s.totaloutputlength = rte_pktmbuf_pkt_len(m_dst) -
+ op->dst.offset;
+ inst->s.out_ptr_ctl.s.length = inst->s.totaloutputlength;
+}
+
+static inline void
+zipvf_prepare_cmd_stateless(struct rte_comp_op *op, struct zip_stream *zstrm)
+{
+ union zip_inst_s *inst = zstrm->inst;
+
+ /* set flush flag to always 1*/
+ inst->s.ef = 1;
+
+ if (inst->s.op == ZIP_OP_E_DECOMP)
+ inst->s.sf = 1;
+ else
+ inst->s.sf = 0;
+
+ /* Set input checksum */
+ inst->s.adlercrc32 = op->input_chksum;
+
+ /* Prepare gather buffers */
+ zipvf_prepare_in_buf(zstrm, op);
+ zipvf_prepare_out_buf(zstrm, op);
+}
+
+#ifdef ZIP_DBG
+static inline void
+zip_dump_instruction(void *inst)
+{
+ union zip_inst_s *cmd83 = (union zip_inst_s *)inst;
+ printf("####### START ########\n");
+ printf("doneint:%d totaloutputlength:%d\n", cmd83->s.doneint,
+ cmd83->s.totaloutputlength);
+ printf("exnum:%d iv:%d exbits:%d hmif:%d halg:%d\n", cmd83->s.exn,
+ cmd83->s.iv, cmd83->s.exbits, cmd83->s.hmif, cmd83->s.halg);
+ printf("flush:%d speed:%d cc:%d\n", cmd83->s.sf,
+ cmd83->s.ss, cmd83->s.cc);
+ printf("eof:%d bof:%d op:%d dscatter:%d dgather:%d hgather:%d\n",
+ cmd83->s.ef, cmd83->s.bf, cmd83->s.op, cmd83->s.ds,
+ cmd83->s.dg, cmd83->s.hg);
+ printf("historylength:%d adler32:%d\n", cmd83->s.historylength,
+ cmd83->s.adlercrc32);
+ printf("ctx_ptr.addr:0x%"PRIx64"\n", cmd83->s.ctx_ptr_addr.s.addr);
+ printf("ctx_ptr.len:%d\n", cmd83->s.ctx_ptr_ctl.s.length);
+ printf("history_ptr.addr:0x%"PRIx64"\n", cmd83->s.his_ptr_addr.s.addr);
+ printf("history_ptr.len:%d\n", cmd83->s.his_ptr_ctl.s.length);
+ printf("inp_ptr.addr:0x%"PRIx64"\n", cmd83->s.inp_ptr_addr.s.addr);
+ printf("inp_ptr.len:%d\n", cmd83->s.inp_ptr_ctl.s.length);
+ printf("out_ptr.addr:0x%"PRIx64"\n", cmd83->s.out_ptr_addr.s.addr);
+ printf("out_ptr.len:%d\n", cmd83->s.out_ptr_ctl.s.length);
+ printf("result_ptr.len:%d\n", cmd83->s.res_ptr_ctl.s.length);
+ printf("####### END ########\n");
+}
+#endif
+
+int
+zipvf_create(struct rte_compressdev *compressdev);
+
+int
+zipvf_destroy(struct rte_compressdev *compressdev);
+
+int
+zipvf_q_init(struct zipvf_qp *qp);
+
+int
+zipvf_q_term(struct zipvf_qp *qp);
+
+void
+zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *zcmd);
+
+int
+zip_process_op(struct rte_comp_op *op,
+ struct zipvf_qp *qp,
+ struct zip_stream *zstrm);
+
+uint64_t
+zip_reg_read64(uint8_t *hw_addr, uint64_t offset);
+
+void
+zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val);
+
+#endif /* _RTE_ZIP_VF_H_ */
diff --git a/src/spdk/dpdk/drivers/compress/octeontx/otx_zip_pmd.c b/src/spdk/dpdk/drivers/compress/octeontx/otx_zip_pmd.c
new file mode 100644
index 00000000..9d13f933
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/octeontx/otx_zip_pmd.c
@@ -0,0 +1,658 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cpuflags.h>
+#include <rte_malloc.h>
+
+#include "otx_zip.h"
+
+static const struct rte_compressdev_capabilities
+ octtx_zip_pmd_capabilities[] = {
+ { .algo = RTE_COMP_ALGO_DEFLATE,
+ /* Deflate */
+ .comp_feature_flags = RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC,
+ /* Non sharable Priv XFORM and Stateless */
+ .window_size = {
+ .min = 1,
+ .max = 14,
+ .increment = 1
+ /* size supported 2^1 to 2^14 */
+ },
+ },
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+};
+
+/*
+ * Reset session to default state for next set of stateless operation
+ */
+static inline void
+reset_stream(struct zip_stream *z_stream)
+{
+ union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
+
+ inst->s.bf = 1;
+ inst->s.ef = 0;
+}
+
+int
+zip_process_op(struct rte_comp_op *op,
+ struct zipvf_qp *qp,
+ struct zip_stream *zstrm)
+{
+ union zip_inst_s *inst = zstrm->inst;
+ volatile union zip_zres_s *zresult = NULL;
+
+
+ if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
+ (op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
+ (op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZIP_PMD_ERR("Segmented packet is not supported\n");
+ return 0;
+ }
+
+ zipvf_prepare_cmd_stateless(op, zstrm);
+
+ zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
+ zresult->s.compcode = 0;
+
+#ifdef ZIP_DBG
+ zip_dump_instruction(inst);
+#endif
+
+ /* Submit zip command */
+ zipvf_push_command(qp, (void *)inst);
+
+ /* Check and Process results in sync mode */
+ do {
+ } while (!zresult->s.compcode);
+
+ if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ } else {
+ /* FATAL error cannot do anything */
+ ZIP_PMD_ERR("operation failed with error code:%d\n",
+ zresult->s.compcode);
+ if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ else
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ }
+
+ ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
+
+ /* Update op stats */
+ switch (op->status) {
+ case RTE_COMP_OP_STATUS_SUCCESS:
+ op->consumed = zresult->s.totalbytesread;
+ /* Fall-through */
+ case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+ op->produced = zresult->s.totalbyteswritten;
+ break;
+ default:
+ ZIP_PMD_ERR("stats not updated for status:%d\n",
+ op->status);
+ break;
+ }
+ /* zstream is reset irrespective of result */
+ reset_stream(zstrm);
+
+ zresult->s.compcode = ZIP_COMP_E_NOTDONE;
+ return 0;
+}
+
+/** Parse xform parameters and setup a stream */
+static int
+zip_set_stream_parameters(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ struct zip_stream *z_stream)
+{
+ int ret;
+ union zip_inst_s *inst;
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+ void *res;
+
+ /* Allocate resources required by a stream */
+ ret = rte_mempool_get_bulk(vf->zip_mp,
+ z_stream->bufs, MAX_BUFS_PER_STREAM);
+ if (ret < 0)
+ return -1;
+
+ /* get one command buffer from pool and set up */
+ inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
+ res = z_stream->bufs[RES_BUF];
+
+ memset(inst->u, 0, sizeof(inst->u));
+
+ /* set bf for only first ops of stream */
+ inst->s.bf = 1;
+
+ if (xform->type == RTE_COMP_COMPRESS) {
+ inst->s.op = ZIP_OP_E_COMP;
+
+ switch (xform->compress.deflate.huffman) {
+ case RTE_COMP_HUFFMAN_DEFAULT:
+ inst->s.cc = ZIP_CC_DEFAULT;
+ break;
+ case RTE_COMP_HUFFMAN_FIXED:
+ inst->s.cc = ZIP_CC_FIXED_HUFF;
+ break;
+ case RTE_COMP_HUFFMAN_DYNAMIC:
+ inst->s.cc = ZIP_CC_DYN_HUFF;
+ break;
+ default:
+ ret = -1;
+ goto err;
+ }
+
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_MIN:
+ inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
+ break;
+ case RTE_COMP_LEVEL_MAX:
+ inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
+ break;
+ case RTE_COMP_LEVEL_NONE:
+ ZIP_PMD_ERR("Compression level not supported");
+ ret = -1;
+ goto err;
+ default:
+ /* for any value between min and max , choose
+ * PMD default.
+ */
+ inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
+ break;
+ }
+ } else if (xform->type == RTE_COMP_DECOMPRESS) {
+ inst->s.op = ZIP_OP_E_DECOMP;
+ /* from HRM,
+ * For DEFLATE decompression, [CC] must be 0x0.
+ * For decompression, [SS] must be 0x0
+ */
+ inst->s.cc = 0;
+ /* Speed bit should not be set for decompression */
+ inst->s.ss = 0;
+ /* decompression context is supported only for STATEFUL
+ * operations. Currently we support STATELESS ONLY so
+ * skip setting of ctx pointer
+ */
+
+ } else {
+ ZIP_PMD_ERR("\nxform type not supported");
+ ret = -1;
+ goto err;
+ }
+
+ inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
+ inst->s.res_ptr_ctl.s.length = 0;
+
+ z_stream->inst = inst;
+ z_stream->func = zip_process_op;
+
+ return 0;
+
+err:
+ rte_mempool_put_bulk(vf->zip_mp,
+ (void *)&(z_stream->bufs[0]),
+ MAX_BUFS_PER_STREAM);
+
+ return ret;
+}
+
+/** Configure device */
+static int
+zip_pmd_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ int nb_streams;
+ char res_pool[RTE_MEMZONE_NAMESIZE];
+ struct zip_vf *vf;
+ struct rte_mempool *zip_buf_mp;
+
+ if (!config || !dev)
+ return -EIO;
+
+ vf = (struct zip_vf *)(dev->data->dev_private);
+
+ /* create pool with maximum numbers of resources
+ * required by streams
+ */
+
+ /* use common pool for non-shareable priv_xform and stream */
+ nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
+
+ snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
+ dev->data->dev_id);
+
+ /** TBD Should we use the per core object cache for stream resources */
+ zip_buf_mp = rte_mempool_create(
+ res_pool,
+ nb_streams * MAX_BUFS_PER_STREAM,
+ ZIP_BUF_SIZE,
+ 0,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ SOCKET_ID_ANY,
+ 0);
+
+ if (zip_buf_mp == NULL) {
+ ZIP_PMD_ERR(
+ "Failed to create buf mempool octtx_zip_res_pool%u",
+ dev->data->dev_id);
+ return -1;
+ }
+
+ vf->zip_mp = zip_buf_mp;
+
+ return 0;
+}
+
+/** Start device */
+static int
+zip_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+
+}
+
+/** Close device */
+static int
+zip_pmd_close(struct rte_compressdev *dev)
+{
+ if (dev == NULL)
+ return -1;
+
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+ rte_mempool_free(vf->zip_mp);
+
+ return 0;
+}
+
+/** Get device statistics */
+static void
+zip_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zip_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+/** Get device info */
+static void
+zip_pmd_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *dev_info)
+{
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_name = dev->device->driver->name;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = octtx_zip_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
+ }
+}
+
+/** Release queue pair */
+static int
+zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp != NULL) {
+ zipvf_q_term(qp);
+
+ if (qp->processed_pkts)
+ rte_ring_free(qp->processed_pkts);
+
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ZIP_PMD_INFO("Reusing existing ring %s for processed"
+ " packets", qp->name);
+ return r;
+ }
+
+ ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
+ " packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_EXACT_SZ);
+}
+
+/** Setup a queue pair */
+static int
+zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct zipvf_qp *qp = NULL;
+ struct zip_vf *vf;
+ char *name;
+ int ret;
+
+ if (!dev)
+ return -1;
+
+ vf = (struct zip_vf *) (dev->data->dev_private);
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
+ return 0;
+ }
+
+ name = rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
+ snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+ "zip_pmd_%u_qp_%u",
+ dev->data->dev_id, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket(name, sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->name = name;
+
+ /* Create completion queue upto max_inflight_ops */
+ qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL)
+ goto qp_setup_cleanup;
+
+ qp->id = qp_id;
+ qp->vf = vf;
+
+ ret = zipvf_q_init(qp);
+ if (ret < 0)
+ goto qp_setup_cleanup;
+
+ dev->data->queue_pairs[qp_id] = qp;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp->processed_pkts)
+ rte_ring_free(qp->processed_pkts);
+ if (qp)
+ rte_free(qp);
+ return -1;
+}
+
+static int
+zip_pmd_stream_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform, void **stream)
+{
+ int ret;
+ struct zip_stream *strm = NULL;
+
+ strm = rte_malloc(NULL,
+ sizeof(struct zip_stream), 0);
+
+ if (strm == NULL)
+ return (-ENOMEM);
+
+ ret = zip_set_stream_parameters(dev, xform, strm);
+ if (ret < 0) {
+ ZIP_PMD_ERR("failed configure xform parameters");
+ rte_free(strm);
+ return ret;
+ }
+ *stream = strm;
+ return 0;
+}
+
+static int
+zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
+{
+ struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
+ struct zip_stream *z_stream;
+
+ if (stream == NULL)
+ return 0;
+
+ z_stream = (struct zip_stream *)stream;
+
+ /* Free resources back to pool */
+ rte_mempool_put_bulk(vf->zip_mp,
+ (void *)&(z_stream->bufs[0]),
+ MAX_BUFS_PER_STREAM);
+
+ /* Zero out the whole structure */
+ memset(stream, 0, sizeof(struct zip_stream));
+ rte_free(stream);
+
+ return 0;
+}
+
+
+static uint16_t
+zip_pmd_enqueue_burst_sync(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zipvf_qp *qp = queue_pair;
+ struct rte_comp_op *op;
+ struct zip_stream *zstrm;
+ int i, ret = 0;
+ uint16_t enqd = 0;
+
+ for (i = 0; i < nb_ops; i++) {
+ op = ops[i];
+
+ if (op->op_type == RTE_COMP_OP_STATEFUL) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ } else {
+ /* process stateless ops */
+ zstrm = (struct zip_stream *)op->private_xform;
+ if (unlikely(zstrm == NULL))
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ else
+ ret = zstrm->func(op, qp, zstrm);
+ }
+
+ /* Whatever is out of op, put it into completion queue with
+ * its status
+ */
+ if (!ret)
+ ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
+
+ if (unlikely(ret < 0)) {
+ /* increment count if failed to enqueue op*/
+ qp->qp_stats.enqueue_err_count++;
+ } else {
+ qp->qp_stats.enqueued_count++;
+ enqd++;
+ }
+ }
+ return enqd;
+}
+
+static uint16_t
+zip_pmd_dequeue_burst_sync(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zipvf_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+struct rte_compressdev_ops octtx_zip_pmd_ops = {
+ .dev_configure = zip_pmd_config,
+ .dev_start = zip_pmd_start,
+ .dev_stop = zip_pmd_stop,
+ .dev_close = zip_pmd_close,
+
+ .stats_get = zip_pmd_stats_get,
+ .stats_reset = zip_pmd_stats_reset,
+
+ .dev_infos_get = zip_pmd_info_get,
+
+ .queue_pair_setup = zip_pmd_qp_setup,
+ .queue_pair_release = zip_pmd_qp_release,
+
+ .private_xform_create = zip_pmd_stream_create,
+ .private_xform_free = zip_pmd_stream_free,
+ .stream_create = NULL,
+ .stream_free = NULL
+};
+
+static int
+zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ int ret = 0;
+ char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ struct rte_compressdev *compressdev;
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ rte_socket_id(),
+ };
+
+ ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
+ (unsigned int)pci_dev->id.vendor_id,
+ (unsigned int)pci_dev->id.device_id);
+
+ rte_pci_device_name(&pci_dev->addr, compressdev_name,
+ sizeof(compressdev_name));
+
+ compressdev = rte_compressdev_pmd_create(compressdev_name,
+ &pci_dev->device, sizeof(struct zip_vf), &init_params);
+ if (compressdev == NULL) {
+ ZIP_PMD_ERR("driver %s: create failed", init_params.name);
+ return -ENODEV;
+ }
+
+ /*
+ * create only if proc_type is primary.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* create vf dev with given pmd dev id */
+ ret = zipvf_create(compressdev);
+ if (ret < 0) {
+ ZIP_PMD_ERR("Device creation failed");
+ rte_compressdev_pmd_destroy(compressdev);
+ return ret;
+ }
+ }
+
+ compressdev->dev_ops = &octtx_zip_pmd_ops;
+ /* register rx/tx burst functions for data path */
+ compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
+ compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
+ compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+ return ret;
+}
+
+static int
+zip_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct rte_compressdev *compressdev;
+ char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL) {
+ ZIP_PMD_ERR(" Invalid PCI Device\n");
+ return -EINVAL;
+ }
+ rte_pci_device_name(&pci_dev->addr, compressdev_name,
+ sizeof(compressdev_name));
+
+ compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (zipvf_destroy(compressdev) < 0)
+ return -ENODEV;
+ }
+ return rte_compressdev_pmd_destroy(compressdev);
+}
+
+static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_ZIPVF),
+ },
+ {
+ .device_id = 0
+ },
+};
+
+/**
+ * Structure that represents a PCI driver
+ */
+static struct rte_pci_driver octtx_zip_pmd = {
+ .id_table = pci_id_octtx_zipvf_table,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = zip_pci_probe,
+ .remove = zip_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
+
+RTE_INIT(octtx_zip_init_log);
+
+static void
+octtx_zip_init_log(void)
+{
+ octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
+ if (octtx_zip_logtype_driver >= 0)
+ rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map b/src/spdk/dpdk/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/compress/qat/meson.build b/src/spdk/dpdk/drivers/compress/qat/meson.build
new file mode 100644
index 00000000..9d15076d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/qat/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+
+# Add our sources files to the list
+allow_experimental_apis = true
+qat_sources += files('qat_comp_pmd.c',
+ 'qat_comp.c')
+qat_includes += include_directories('.')
+qat_deps += 'compressdev'
+qat_ext_deps += dep
+
+# build the whole driver
+sources += qat_sources
+cflags += qat_cflags
+deps += qat_deps
+ext_deps += qat_ext_deps
+includes += qat_includes
diff --git a/src/spdk/dpdk/drivers/compress/qat/qat_comp.c b/src/spdk/dpdk/drivers/compress/qat/qat_comp.c
new file mode 100644
index 00000000..38c8a5b8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/qat/qat_comp.c
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_hexdump.h>
+#include <rte_comp.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+#include <rte_memcpy.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "qat_logs.h"
+#include "qat_comp.h"
+#include "qat_comp_pmd.h"
+
+
+int
+qat_comp_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie,
+ enum qat_device_gen qat_dev_gen __rte_unused)
+{
+ struct rte_comp_op *op = in_op;
+ struct qat_comp_op_cookie *cookie =
+ (struct qat_comp_op_cookie *)op_cookie;
+ struct qat_comp_xform *qat_xform = op->private_xform;
+ const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
+ struct icp_qat_fw_comp_req *comp_req =
+ (struct icp_qat_fw_comp_req *)out_msg;
+
+ if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
+ "operation requests, op (%p) is not a "
+ "stateless operation.", op);
+ return -EINVAL;
+ }
+
+ rte_mov128(out_msg, tmpl);
+ comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+
+ /* common for sgl and flat buffers */
+ comp_req->comp_pars.comp_len = op->src.length;
+ comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
+ op->dst.offset;
+
+ if (op->m_src->next != NULL || op->m_dst->next != NULL) {
+ /* sgl */
+ int ret = 0;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+
+ ret = qat_sgl_fill_array(op->m_src,
+ op->src.offset,
+ &cookie->qat_sgl_src,
+ op->src.length,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
+ return ret;
+ }
+
+ ret = qat_sgl_fill_array(op->m_dst,
+ op->dst.offset,
+ &cookie->qat_sgl_dst,
+ comp_req->comp_pars.out_buffer_sz,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
+ return ret;
+ }
+
+ comp_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ comp_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ comp_req->comn_mid.src_length = 0;
+ comp_req->comn_mid.dst_length = 0;
+
+ } else {
+ /* flat aka linear buffer */
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_FLAT);
+ comp_req->comn_mid.src_length = op->src.length;
+ comp_req->comn_mid.dst_length =
+ comp_req->comp_pars.out_buffer_sz;
+
+ comp_req->comn_mid.src_data_addr =
+ rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
+ comp_req->comn_mid.dest_data_addr =
+ rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(DEBUG, "Direction: %s",
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
+ "decompression" : "compression");
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
+ sizeof(struct icp_qat_fw_comp_req));
+#endif
+ return 0;
+}
+
+int
+qat_comp_process_response(void **op, uint8_t *resp)
+{
+ struct icp_qat_fw_comp_resp *resp_msg =
+ (struct icp_qat_fw_comp_resp *)resp;
+ struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+ struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
+ (rx_op->private_xform);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(DEBUG, "Direction: %s",
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
+ "decompression" : "compression");
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comp_resp));
+#endif
+
+ if (likely(qat_xform->qat_comp_request_type
+ != QAT_COMP_REQUEST_DECOMPRESS)) {
+ if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
+ resp_msg->comn_resp.hdr_flags)
+ == ICP_QAT_FW_COMP_NO_CNV)) {
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+ rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
+ *op = (void *)rx_op;
+ QAT_DP_LOG(ERR, "QAT has wrong firmware");
+ return 0;
+ }
+ }
+
+ if ((ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(resp_msg->comn_resp.comn_status)
+ | ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(
+ resp_msg->comn_resp.comn_status)) !=
+ ICP_QAT_FW_COMN_STATUS_FLAG_OK) {
+
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+ rx_op->debug_status =
+ *((uint16_t *)(&resp_msg->comn_resp.comn_error));
+ } else {
+ struct qat_comp_xform *qat_xform = rx_op->private_xform;
+ struct icp_qat_fw_resp_comp_pars *comp_resp =
+ (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
+
+ rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ rx_op->consumed = comp_resp->input_byte_counter;
+ rx_op->produced = comp_resp->output_byte_counter;
+
+ if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
+ if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
+ rx_op->output_chksum = comp_resp->curr_crc32;
+ else if (qat_xform->checksum_type ==
+ RTE_COMP_CHECKSUM_ADLER32)
+ rx_op->output_chksum = comp_resp->curr_adler_32;
+ else
+ rx_op->output_chksum = comp_resp->curr_chksum;
+ }
+ }
+ *op = (void *)rx_op;
+
+ return 0;
+}
+
+unsigned int
+qat_comp_xform_size(void)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
+}
+
+static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_comp_request_type request)
+{
+ if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ else if (request == QAT_COMP_REQUEST_DECOMPRESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+ header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
+ QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
+}
+
+static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
+ const struct rte_memzone *interm_buff_mz __rte_unused,
+ const struct rte_comp_xform *xform)
+{
+ struct icp_qat_fw_comp_req *comp_req;
+ int comp_level, algo;
+ uint32_t req_par_flags;
+ int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
+
+ if (unlikely(qat_xform == NULL)) {
+ QAT_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
+ direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
+ req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV,
+ ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
+
+ } else {
+ if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
+ else if (xform->compress.level == 1)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
+ else if (xform->compress.level == 2)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
+ else if (xform->compress.level == 3)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
+ else if (xform->compress.level >= 4 &&
+ xform->compress.level <= 9)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
+ else {
+ QAT_LOG(ERR, "compression level not supported");
+ return -EINVAL;
+ }
+ req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
+ ICP_QAT_FW_COMP_CNV_RECOVERY);
+ }
+
+ switch (xform->compress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
+ break;
+ case RTE_COMP_ALGO_LZS:
+ default:
+ /* RTE_COMP_NULL */
+ QAT_LOG(ERR, "compression algorithm not supported");
+ return -EINVAL;
+ }
+
+ comp_req = &qat_xform->qat_comp_req_tmpl;
+
+ /* Initialize header */
+ qat_comp_create_req_hdr(&comp_req->comn_hdr,
+ qat_xform->qat_comp_request_type);
+
+ comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_STATELESS_SESSION,
+ ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+
+ comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
+ direction,
+ /* In CPM 1.6 only valid mode ! */
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
+ /* Translate level to depth */
+ comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ comp_req->comp_pars.initial_adler = 1;
+ comp_req->comp_pars.initial_crc32 = 0;
+ comp_req->comp_pars.req_par_flags = req_par_flags;
+
+
+ if (qat_xform->qat_comp_request_type ==
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_COMP);
+ } else if (qat_xform->qat_comp_request_type ==
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
+
+ QAT_LOG(ERR, "Dynamic huffman encoding not supported");
+ return -EINVAL;
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
+ sizeof(struct icp_qat_fw_comp_req));
+#endif
+ return 0;
+}
+
+/**
+ * Create driver private_xform data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param xform
+ * xform data from application
+ * @param private_xform
+ * ptr where handle of pmd's private_xform data should be stored
+ * @return
+ * - if successful returns 0
+ * and valid private_xform handle
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if comp device does not support the comp transform.
+ * - Returns -ENOMEM if the private_xform could not be allocated.
+ */
+int
+qat_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform)
+{
+ struct qat_comp_dev_private *qat = dev->data->dev_private;
+
+ if (unlikely(private_xform == NULL)) {
+ QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
+ return -EINVAL;
+ }
+ if (unlikely(qat->xformpool == NULL)) {
+ QAT_LOG(ERR, "QAT device has no private_xform mempool");
+ return -ENOMEM;
+ }
+ if (rte_mempool_get(qat->xformpool, private_xform)) {
+ QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
+ return -ENOMEM;
+ }
+
+ struct qat_comp_xform *qat_xform =
+ (struct qat_comp_xform *)*private_xform;
+
+ if (xform->type == RTE_COMP_COMPRESS) {
+ if (xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DYNAMIC) {
+ QAT_LOG(ERR,
+ "QAT device doesn't support dynamic compression");
+ return -ENOTSUP;
+ }
+
+ if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
+ ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
+ && qat->interm_buff_mz == NULL))
+
+ qat_xform->qat_comp_request_type =
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
+
+
+ } else {
+ qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
+ }
+
+ qat_xform->checksum_type = xform->compress.chksum;
+
+ if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
+ QAT_LOG(ERR, "QAT: Problem with setting compression");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * Free driver private_xform data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param private_xform
+ * handle of pmd's private_xform data
+ * @return
+ * - 0 if successful
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ */
+int
+qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
+ void *private_xform)
+{
+ struct qat_comp_xform *qat_xform =
+ (struct qat_comp_xform *)private_xform;
+
+ if (qat_xform) {
+ memset(qat_xform, 0, qat_comp_xform_size());
+ struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
+
+ rte_mempool_put(mp, qat_xform);
+ return 0;
+ }
+ return -EINVAL;
+}
diff --git a/src/spdk/dpdk/drivers/compress/qat/qat_comp.h b/src/spdk/dpdk/drivers/compress/qat/qat_comp.h
new file mode 100644
index 00000000..8d315efb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/qat/qat_comp.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_COMP_H_
+#define _QAT_COMP_H_
+
+#ifdef RTE_LIBRTE_COMPRESSDEV
+
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#include "qat_common.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_fw_la.h"
+
+#define ERR_CODE_QAT_COMP_WRONG_FW -99
+
+enum qat_comp_request_type {
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS,
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS,
+ QAT_COMP_REQUEST_DECOMPRESS,
+ REQ_COMP_END
+};
+
+struct qat_comp_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_comp_op_cookie {
+ struct qat_comp_sgl qat_sgl_src;
+ struct qat_comp_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
+struct qat_comp_xform {
+ struct icp_qat_fw_comp_req qat_comp_req_tmpl;
+ enum qat_comp_request_type qat_comp_request_type;
+ enum rte_comp_checksum_type checksum_type;
+};
+
+int
+qat_comp_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+ enum qat_device_gen qat_dev_gen __rte_unused);
+
+int
+qat_comp_process_response(void **op, uint8_t *resp);
+
+
+int
+qat_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform);
+
+int
+qat_comp_private_xform_free(struct rte_compressdev *dev, void *private_xform);
+
+unsigned int
+qat_comp_xform_size(void);
+
+#endif
+#endif
diff --git a/src/spdk/dpdk/drivers/compress/qat/qat_comp_pmd.c b/src/spdk/dpdk/drivers/compress/qat/qat_comp_pmd.c
new file mode 100644
index 00000000..b89975fc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/qat/qat_comp_pmd.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include "qat_comp.h"
+#include "qat_comp_pmd.h"
+
+static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
+ {/* COMPRESSION - deflate */
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM |
+ RTE_COMP_FF_CRC32_CHECKSUM |
+ RTE_COMP_FF_ADLER32_CHECKSUM |
+ RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
+ RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
+ .window_size = {.min = 15, .max = 15, .increment = 0} },
+ {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
+
+static void
+qat_comp_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ struct qat_common_stats qat_stats = {0};
+ struct qat_comp_dev_private *qat_priv;
+
+ if (stats == NULL || dev == NULL) {
+ QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION);
+ stats->enqueued_count = qat_stats.enqueued_count;
+ stats->dequeued_count = qat_stats.dequeued_count;
+ stats->enqueue_err_count = qat_stats.enqueue_err_count;
+ stats->dequeue_err_count = qat_stats.dequeue_err_count;
+}
+
+static void
+qat_comp_stats_reset(struct rte_compressdev *dev)
+{
+ struct qat_comp_dev_private *qat_priv;
+
+ if (dev == NULL) {
+ QAT_LOG(ERR, "invalid compressdev ptr %p", dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION);
+
+}
+
+static int
+qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
+{
+ struct qat_comp_dev_private *qat_private = dev->data->dev_private;
+
+ QAT_LOG(DEBUG, "Release comp qp %u on device %d",
+ queue_pair_id, dev->data->dev_id);
+
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
+ = NULL;
+
+ return qat_qp_release((struct qat_qp **)
+ &(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int
+qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct qat_qp *qp;
+ int ret = 0;
+ uint32_t i;
+ struct qat_qp_config qat_qp_conf;
+
+ struct qat_qp **qp_addr =
+ (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct qat_comp_dev_private *qat_private = dev->data->dev_private;
+ const struct qat_qp_hw_data *comp_hw_qps =
+ qat_gen_config[qat_private->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_COMPRESSION];
+ const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id;
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (*qp_addr != NULL) {
+ ret = qat_comp_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+ if (qp_id >= qat_qps_per_service(comp_hw_qps,
+ QAT_SERVICE_COMPRESSION)) {
+ QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
+ return -EINVAL;
+ }
+
+ qat_qp_conf.hw = qp_hw_data;
+ qat_qp_conf.build_request = qat_comp_build_request;
+ qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
+ qat_qp_conf.nb_descriptors = max_inflight_ops;
+ qat_qp_conf.socket_id = socket_id;
+ qat_qp_conf.service_str = "comp";
+
+ ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
+ if (ret != 0)
+ return ret;
+
+ /* store a link to the qp in the qat_pci_device */
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
+ = *qp_addr;
+
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_comp_op_cookie *cookie =
+ qp->op_cookies[i];
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_dst);
+ }
+
+ return ret;
+}
+
+static struct rte_mempool *
+qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
+ uint32_t num_elements)
+{
+ char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE,
+ "%s_xforms", comp_dev->qat_dev->name);
+
+ QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name);
+ mp = rte_mempool_lookup(xform_pool_name);
+
+ if (mp != NULL) {
+ QAT_LOG(DEBUG, "xformpool already created");
+ if (mp->size != num_elements) {
+ QAT_LOG(DEBUG, "xformpool wrong size - delete it");
+ rte_mempool_free(mp);
+ mp = NULL;
+ comp_dev->xformpool = NULL;
+ }
+ }
+
+ if (mp == NULL)
+ mp = rte_mempool_create(xform_pool_name,
+ num_elements,
+ qat_comp_xform_size(), 0, 0,
+ NULL, NULL, NULL, NULL, rte_socket_id(),
+ 0);
+ if (mp == NULL) {
+ QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
+ xform_pool_name, num_elements, qat_comp_xform_size());
+ return NULL;
+ }
+
+ return mp;
+}
+
+static void
+_qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
+{
+ /* Free private_xform pool */
+ if (comp_dev->xformpool) {
+ /* Free internal mempool for private xforms */
+ rte_mempool_free(comp_dev->xformpool);
+ comp_dev->xformpool = NULL;
+ }
+}
+
+static int
+qat_comp_dev_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+ int ret = 0;
+
+ if (config->max_nb_streams != 0) {
+ QAT_LOG(ERR,
+ "QAT device does not support STATEFUL so max_nb_streams must be 0");
+ return -EINVAL;
+ }
+
+ comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
+ config->max_nb_priv_xforms);
+ if (comp_dev->xformpool == NULL) {
+
+ ret = -ENOMEM;
+ goto error_out;
+ }
+ return 0;
+
+error_out:
+ _qat_comp_dev_config_clear(comp_dev);
+ return ret;
+}
+
+static int
+qat_comp_dev_start(struct rte_compressdev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused)
+{
+
+}
+
+static int
+qat_comp_dev_close(struct rte_compressdev *dev)
+{
+ int i;
+ int ret = 0;
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_comp_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ _qat_comp_dev_config_clear(comp_dev);
+
+ return ret;
+}
+
+
+static void
+qat_comp_dev_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *info)
+{
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+ const struct qat_qp_hw_data *comp_hw_qps =
+ qat_gen_config[comp_dev->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_COMPRESSION];
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ qat_qps_per_service(comp_hw_qps,
+ QAT_SERVICE_COMPRESSION);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = comp_dev->qat_dev_capabilities;
+ }
+}
+
+static uint16_t
+qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused,
+ struct rte_comp_op **ops __rte_unused,
+ uint16_t nb_ops __rte_unused)
+{
+ QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !");
+ return 0;
+}
+
+static struct rte_compressdev_ops compress_qat_dummy_ops = {
+
+ /* Device related operations */
+ .dev_configure = NULL,
+ .dev_start = NULL,
+ .dev_stop = qat_comp_dev_stop,
+ .dev_close = qat_comp_dev_close,
+ .dev_infos_get = NULL,
+
+ .stats_get = NULL,
+ .stats_reset = qat_comp_stats_reset,
+ .queue_pair_setup = NULL,
+ .queue_pair_release = qat_comp_qp_release,
+
+ /* Compression related operations */
+ .private_xform_create = NULL,
+ .private_xform_free = qat_comp_private_xform_free
+};
+
+static uint16_t
+qat_comp_pmd_dequeue_frst_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+
+ if (ret) {
+ if ((*ops)->debug_status ==
+ (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) {
+ tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst =
+ qat_comp_pmd_enq_deq_dummy_op_burst;
+ tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
+ qat_comp_pmd_enq_deq_dummy_op_burst;
+
+ tmp_qp->qat_dev->comp_dev->compressdev->dev_ops =
+ &compress_qat_dummy_ops;
+ QAT_LOG(ERR, "QAT PMD detected wrong FW version !");
+
+ } else {
+ tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
+ qat_comp_pmd_dequeue_op_burst;
+ }
+ }
+ return ret;
+}
+
+static struct rte_compressdev_ops compress_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_comp_dev_config,
+ .dev_start = qat_comp_dev_start,
+ .dev_stop = qat_comp_dev_stop,
+ .dev_close = qat_comp_dev_close,
+ .dev_infos_get = qat_comp_dev_info_get,
+
+ .stats_get = qat_comp_stats_get,
+ .stats_reset = qat_comp_stats_reset,
+ .queue_pair_setup = qat_comp_qp_setup,
+ .queue_pair_release = qat_comp_qp_release,
+
+ /* Compression related operations */
+ .private_xform_create = qat_comp_private_xform_create,
+ .private_xform_free = qat_comp_private_xform_free
+};
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
+{
+ if (qat_pci_dev->qat_dev_gen == QAT_GEN1) {
+ QAT_LOG(ERR, "Compression PMD not supported on QAT dh895xcc");
+ return 0;
+ }
+
+ struct rte_compressdev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ };
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ struct rte_compressdev *compressdev;
+ struct qat_comp_dev_private *comp_dev;
+
+ snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "comp");
+ QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
+
+ compressdev = rte_compressdev_pmd_create(name,
+ &qat_pci_dev->pci_dev->device,
+ sizeof(struct qat_comp_dev_private),
+ &init_params);
+
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ compressdev->dev_ops = &compress_qat_ops;
+
+ compressdev->enqueue_burst = qat_comp_pmd_enqueue_op_burst;
+ compressdev->dequeue_burst = qat_comp_pmd_dequeue_frst_op_burst;
+
+ compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+
+ comp_dev = compressdev->data->dev_private;
+ comp_dev->qat_dev = qat_pci_dev;
+ comp_dev->compressdev = compressdev;
+ qat_pci_dev->comp_dev = comp_dev;
+
+ switch (qat_pci_dev->qat_dev_gen) {
+ case QAT_GEN1:
+ case QAT_GEN2:
+ comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
+ break;
+ default:
+ comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
+ QAT_LOG(DEBUG,
+ "QAT gen %d capabilities unknown, default to GEN1",
+ qat_pci_dev->qat_dev_gen);
+ break;
+ }
+
+ QAT_LOG(DEBUG,
+ "Created QAT COMP device %s as compressdev instance %d",
+ name, compressdev->data->dev_id);
+ return 0;
+}
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct qat_comp_dev_private *comp_dev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+
+ comp_dev = qat_pci_dev->comp_dev;
+ if (comp_dev == NULL)
+ return 0;
+
+ /* clean up any resources used by the device */
+ qat_comp_dev_close(comp_dev->compressdev);
+
+ rte_compressdev_pmd_destroy(comp_dev->compressdev);
+ qat_pci_dev->comp_dev = NULL;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/compress/qat/qat_comp_pmd.h b/src/spdk/dpdk/drivers/compress/qat/qat_comp_pmd.h
new file mode 100644
index 00000000..9ad2a283
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/qat/qat_comp_pmd.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_COMP_PMD_H_
+#define _QAT_COMP_PMD_H_
+
+#ifdef RTE_LIBRTE_COMPRESSDEV
+
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#include "qat_device.h"
+
+/** private data structure for a QAT compression device.
+ * This QAT device is a device offering only a compression service,
+ * there can be one of these on each qat_pci_device (VF).
+ */
+struct qat_comp_dev_private {
+ struct qat_pci_device *qat_dev;
+ /**< The qat pci device hosting the service */
+ struct rte_compressdev *compressdev;
+ /**< The pointer to this compression device structure */
+ const struct rte_compressdev_capabilities *qat_dev_capabilities;
+ /* QAT device compression capabilities */
+ const struct rte_memzone *interm_buff_mz;
+ /**< The device's memory for intermediate buffers */
+ struct rte_mempool *xformpool;
+ /**< The device's pool for qat_comp_xforms */
+};
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev);
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev);
+
+#endif
+#endif /* _QAT_COMP_PMD_H_ */
diff --git a/src/spdk/dpdk/drivers/compress/qat/rte_pmd_qat_version.map b/src/spdk/dpdk/drivers/compress/qat/rte_pmd_qat_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/qat/rte_pmd_qat_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/compress/zlib/Makefile b/src/spdk/dpdk/drivers/compress/zlib/Makefile
new file mode 100644
index 00000000..5cf8de6f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/zlib/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium Networks
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_zlib.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_zlib_version.map
+
+# external library dependencies
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lz
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/compress/zlib/meson.build b/src/spdk/dpdk/drivers/compress/zlib/meson.build
new file mode 100644
index 00000000..7748de2d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/zlib/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium Networks
+
+dep = dependency('zlib', required: false)
+if not dep.found()
+ build = false
+endif
+
+deps += 'bus_vdev'
+sources = files('zlib_pmd.c', 'zlib_pmd_ops.c')
+ext_deps += dep
+pkgconfig_extra_libs += '-lz'
+
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/compress/zlib/rte_pmd_zlib_version.map b/src/spdk/dpdk/drivers/compress/zlib/rte_pmd_zlib_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/zlib/rte_pmd_zlib_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/compress/zlib/zlib_pmd.c b/src/spdk/dpdk/drivers/compress/zlib/zlib_pmd.c
new file mode 100644
index 00000000..7d6871b1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/zlib/zlib_pmd.c
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+
+#include "zlib_pmd_private.h"
+
+/** Compute next mbuf in the list, assign data buffer and length,
+ * returns 0 if mbuf is NULL
+ */
+#define COMPUTE_BUF(mbuf, data, len) \
+ ((mbuf = mbuf->next) ? \
+ (data = rte_pktmbuf_mtod(mbuf, uint8_t *)), \
+ (len = rte_pktmbuf_data_len(mbuf)) : 0)
+
+static void
+process_zlib_deflate(struct rte_comp_op *op, z_stream *strm)
+{
+ int ret, flush, fin_flush;
+ struct rte_mbuf *mbuf_src = op->m_src;
+ struct rte_mbuf *mbuf_dst = op->m_dst;
+
+ switch (op->flush_flag) {
+ case RTE_COMP_FLUSH_FULL:
+ case RTE_COMP_FLUSH_FINAL:
+ fin_flush = Z_FINISH;
+ break;
+ default:
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid flush value\n");
+ }
+
+ if (unlikely(!strm)) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid z_stream\n");
+ return;
+ }
+ /* Update z_stream with the inputs provided by application */
+ strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->src.offset);
+
+ strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
+
+ strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->dst.offset);
+
+ strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
+
+ /* Set flush value to NO_FLUSH unless it is last mbuf */
+ flush = Z_NO_FLUSH;
+ /* Initialize status to SUCCESS */
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ do {
+ /* Set flush value to Z_FINISH for last block */
+ if ((op->src.length - strm->total_in) <= strm->avail_in) {
+ strm->avail_in = (op->src.length - strm->total_in);
+ flush = fin_flush;
+ }
+ do {
+ ret = deflate(strm, flush);
+ if (unlikely(ret == Z_STREAM_ERROR)) {
+ /* error return, do not process further */
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ goto def_end;
+ }
+ /* Break if Z_STREAM_END is encountered */
+ if (ret == Z_STREAM_END)
+ goto def_end;
+
+ /* Keep looping until input mbuf is consumed.
+ * Exit if destination mbuf gets exhausted.
+ */
+ } while ((strm->avail_out == 0) &&
+ COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
+
+ if (!strm->avail_out) {
+ /* there is no space for compressed output */
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ break;
+ }
+
+ /* Update source buffer to next mbuf
+ * Exit if input buffers are fully consumed
+ */
+ } while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
+
+def_end:
+ /* Update op stats */
+ switch (op->status) {
+ case RTE_COMP_OP_STATUS_SUCCESS:
+ op->consumed += strm->total_in;
+ /* Fall-through */
+ case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+ op->produced += strm->total_out;
+ break;
+ default:
+ ZLIB_PMD_ERR("stats not updated for status:%d\n",
+ op->status);
+ }
+
+ deflateReset(strm);
+}
+
+static void
+process_zlib_inflate(struct rte_comp_op *op, z_stream *strm)
+{
+ int ret, flush;
+ struct rte_mbuf *mbuf_src = op->m_src;
+ struct rte_mbuf *mbuf_dst = op->m_dst;
+
+ if (unlikely(!strm)) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid z_stream\n");
+ return;
+ }
+ strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->src.offset);
+
+ strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
+
+ strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->dst.offset);
+
+ strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
+
+ /** Ignoring flush value provided from application for decompression */
+ flush = Z_NO_FLUSH;
+ /* initialize status to SUCCESS */
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ do {
+ do {
+ ret = inflate(strm, flush);
+
+ switch (ret) {
+ /* Fall-through */
+ case Z_NEED_DICT:
+ ret = Z_DATA_ERROR;
+ /* Fall-through */
+ case Z_DATA_ERROR:
+ /* Fall-through */
+ case Z_MEM_ERROR:
+ /* Fall-through */
+ case Z_STREAM_ERROR:
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ /* Fall-through */
+ case Z_STREAM_END:
+ /* no further computation needed if
+ * Z_STREAM_END is encountered
+ */
+ goto inf_end;
+ default:
+ /* success */
+ break;
+
+ }
+ /* Keep looping until input mbuf is consumed.
+ * Exit if destination mbuf gets exhausted.
+ */
+ } while ((strm->avail_out == 0) &&
+ COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
+
+ if (!strm->avail_out) {
+ /* there is no more space for decompressed output */
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ break;
+ }
+ /* Read next input buffer to be processed, exit if compressed
+ * blocks are fully read
+ */
+ } while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
+
+inf_end:
+ /* Update op stats */
+ switch (op->status) {
+ case RTE_COMP_OP_STATUS_SUCCESS:
+ op->consumed += strm->total_in;
+ /* Fall-through */
+ case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+ op->produced += strm->total_out;
+ break;
+ default:
+ ZLIB_PMD_ERR("stats not produced for status:%d\n",
+ op->status);
+ }
+
+ inflateReset(strm);
+}
+
+/** Process comp operation for mbuf */
+static inline int
+process_zlib_op(struct zlib_qp *qp, struct rte_comp_op *op)
+{
+ struct zlib_stream *stream;
+ struct zlib_priv_xform *private_xform;
+
+ if ((op->op_type == RTE_COMP_OP_STATEFUL) ||
+ (op->src.offset > rte_pktmbuf_data_len(op->m_src)) ||
+ (op->dst.offset > rte_pktmbuf_data_len(op->m_dst))) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid source or destination buffers or "
+ "invalid Operation requested\n");
+ } else {
+ private_xform = (struct zlib_priv_xform *)op->private_xform;
+ stream = &private_xform->stream;
+ stream->comp(op, &stream->strm);
+ }
+ /* whatever is out of op, put it into completion queue with
+ * its status
+ */
+ return rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+/** Parse comp xform and set private xform/Stream parameters */
+int
+zlib_set_stream_parameters(const struct rte_comp_xform *xform,
+ struct zlib_stream *stream)
+{
+ int strategy, level, wbits;
+ z_stream *strm = &stream->strm;
+
+ /* allocate deflate state */
+ strm->zalloc = Z_NULL;
+ strm->zfree = Z_NULL;
+ strm->opaque = Z_NULL;
+
+ switch (xform->type) {
+ case RTE_COMP_COMPRESS:
+ stream->comp = process_zlib_deflate;
+ stream->free = deflateEnd;
+ /** Compression window bits */
+ switch (xform->compress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ wbits = -(xform->compress.window_size);
+ break;
+ default:
+ ZLIB_PMD_ERR("Compression algorithm not supported\n");
+ return -1;
+ }
+ /** Compression Level */
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_PMD_DEFAULT:
+ level = Z_DEFAULT_COMPRESSION;
+ break;
+ case RTE_COMP_LEVEL_NONE:
+ level = Z_NO_COMPRESSION;
+ break;
+ case RTE_COMP_LEVEL_MIN:
+ level = Z_BEST_SPEED;
+ break;
+ case RTE_COMP_LEVEL_MAX:
+ level = Z_BEST_COMPRESSION;
+ break;
+ default:
+ level = xform->compress.level;
+ if (level < RTE_COMP_LEVEL_MIN ||
+ level > RTE_COMP_LEVEL_MAX) {
+ ZLIB_PMD_ERR("Compression level %d "
+ "not supported\n",
+ level);
+ return -1;
+ }
+ break;
+ }
+ /** Compression strategy */
+ switch (xform->compress.deflate.huffman) {
+ case RTE_COMP_HUFFMAN_DEFAULT:
+ strategy = Z_DEFAULT_STRATEGY;
+ break;
+ case RTE_COMP_HUFFMAN_FIXED:
+ strategy = Z_FIXED;
+ break;
+ case RTE_COMP_HUFFMAN_DYNAMIC:
+ strategy = Z_DEFAULT_STRATEGY;
+ break;
+ default:
+ ZLIB_PMD_ERR("Compression strategy not supported\n");
+ return -1;
+ }
+ if (deflateInit2(strm, level,
+ Z_DEFLATED, wbits,
+ DEF_MEM_LEVEL, strategy) != Z_OK) {
+ ZLIB_PMD_ERR("Deflate init failed\n");
+ return -1;
+ }
+ break;
+
+ case RTE_COMP_DECOMPRESS:
+ stream->comp = process_zlib_inflate;
+ stream->free = inflateEnd;
+ /** window bits */
+ switch (xform->decompress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ wbits = -(xform->decompress.window_size);
+ break;
+ default:
+ ZLIB_PMD_ERR("Compression algorithm not supported\n");
+ return -1;
+ }
+
+ if (inflateInit2(strm, wbits) != Z_OK) {
+ ZLIB_PMD_ERR("Inflate init failed\n");
+ return -1;
+ }
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+static uint16_t
+zlib_pmd_enqueue_burst(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zlib_qp *qp = queue_pair;
+ int ret;
+ uint16_t i;
+ uint16_t enqd = 0;
+ for (i = 0; i < nb_ops; i++) {
+ ret = process_zlib_op(qp, ops[i]);
+ if (unlikely(ret < 0)) {
+ /* increment count if failed to push to completion
+ * queue
+ */
+ qp->qp_stats.enqueue_err_count++;
+ } else {
+ qp->qp_stats.enqueued_count++;
+ enqd++;
+ }
+ }
+ return enqd;
+}
+
+static uint16_t
+zlib_pmd_dequeue_burst(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zlib_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int
+zlib_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_compressdev_pmd_init_params *init_params)
+{
+ struct rte_compressdev *dev;
+
+ dev = rte_compressdev_pmd_create(name, &vdev->device,
+ sizeof(struct zlib_private), init_params);
+ if (dev == NULL) {
+ ZLIB_PMD_ERR("driver %s: create failed", init_params->name);
+ return -ENODEV;
+ }
+
+ dev->dev_ops = rte_zlib_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = zlib_pmd_dequeue_burst;
+ dev->enqueue_burst = zlib_pmd_enqueue_burst;
+
+ return 0;
+}
+
+static int
+zlib_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ rte_socket_id()
+ };
+ const char *name;
+ const char *input_args;
+ int retval;
+
+ name = rte_vdev_device_name(vdev);
+
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+
+ retval = rte_compressdev_pmd_parse_input_args(&init_params, input_args);
+ if (retval < 0) {
+ ZLIB_PMD_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]\n",
+ input_args);
+ return -EINVAL;
+ }
+
+ return zlib_create(name, vdev, &init_params);
+}
+
+static int
+zlib_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev *compressdev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ compressdev = rte_compressdev_pmd_get_named_dev(name);
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ return rte_compressdev_pmd_destroy(compressdev);
+}
+
+static struct rte_vdev_driver zlib_pmd_drv = {
+ .probe = zlib_probe,
+ .remove = zlib_remove
+};
+
+RTE_PMD_REGISTER_VDEV(COMPRESSDEV_NAME_ZLIB_PMD, zlib_pmd_drv);
+RTE_INIT(zlib_init_log);
+
+static void
+zlib_init_log(void)
+{
+ zlib_logtype_driver = rte_log_register("pmd.compress.zlib");
+ if (zlib_logtype_driver >= 0)
+ rte_log_set_level(zlib_logtype_driver, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/compress/zlib/zlib_pmd_ops.c b/src/spdk/dpdk/drivers/compress/zlib/zlib_pmd_ops.c
new file mode 100644
index 00000000..0a73aed9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/zlib/zlib_pmd_ops.c
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include "zlib_pmd_private.h"
+
+static const struct rte_compressdev_capabilities zlib_pmd_capabilities[] = {
+ { /* Deflate */
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = (RTE_COMP_FF_NONCOMPRESSED_BLOCKS |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC),
+ .window_size = {
+ .min = 8,
+ .max = 15,
+ .increment = 1
+ },
+ },
+
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+
+};
+
+/** Configure device */
+static int
+zlib_pmd_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ struct rte_mempool *mp;
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct zlib_private *internals = dev->data->dev_private;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "stream_mp_%u", dev->data->dev_id);
+ mp = internals->mp;
+ if (mp == NULL) {
+ mp = rte_mempool_create(mp_name,
+ config->max_nb_priv_xforms +
+ config->max_nb_streams,
+ sizeof(struct zlib_priv_xform),
+ 0, 0, NULL, NULL, NULL,
+ NULL, config->socket_id,
+ 0);
+ if (mp == NULL) {
+ ZLIB_PMD_ERR("Cannot create private xform pool on "
+ "socket %d\n", config->socket_id);
+ return -ENOMEM;
+ }
+ internals->mp = mp;
+ }
+ return 0;
+}
+
+/** Start device */
+static int
+zlib_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zlib_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+}
+
+/** Close device */
+static int
+zlib_pmd_close(struct rte_compressdev *dev)
+{
+ struct zlib_private *internals = dev->data->dev_private;
+ rte_mempool_free(internals->mp);
+ internals->mp = NULL;
+ return 0;
+}
+
+/** Get device statistics */
+static void
+zlib_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zlib_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+/** Get device info */
+static void
+zlib_pmd_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *dev_info)
+{
+ if (dev_info != NULL) {
+ dev_info->driver_name = dev->device->name;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = zlib_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+zlib_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp != NULL) {
+ rte_ring_free(qp->processed_pkts);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+zlib_pmd_qp_set_unique_name(struct rte_compressdev *dev,
+ struct zlib_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "zlib_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+zlib_pmd_qp_create_processed_pkts_ring(struct zlib_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r = qp->processed_pkts;
+
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ZLIB_PMD_INFO("Reusing existing ring %s for processed"
+ " packets", qp->name);
+ return r;
+ }
+
+ ZLIB_PMD_ERR("Unable to reuse existing ring %s for processed"
+ " packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_EXACT_SZ);
+}
+
+/** Setup a queue pair */
+static int
+zlib_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct zlib_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ zlib_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ZLIB PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (zlib_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_pkts = zlib_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL)
+ goto qp_setup_cleanup;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp) {
+ rte_free(qp);
+ qp = NULL;
+ }
+ return -1;
+}
+
+/** Configure stream */
+static int
+zlib_pmd_stream_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **zstream)
+{
+ int ret = 0;
+ struct zlib_stream *stream;
+ struct zlib_private *internals = dev->data->dev_private;
+
+ if (xform == NULL) {
+ ZLIB_PMD_ERR("invalid xform struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(internals->mp, zstream)) {
+ ZLIB_PMD_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ stream = *((struct zlib_stream **)zstream);
+
+ ret = zlib_set_stream_parameters(xform, stream);
+
+ if (ret < 0) {
+ ZLIB_PMD_ERR("failed configure session parameters");
+
+ memset(stream, 0, sizeof(struct zlib_stream));
+ /* Return session to mempool */
+ rte_mempool_put(internals->mp, stream);
+ return ret;
+ }
+
+ return 0;
+}
+
+/** Configure private xform */
+static int
+zlib_pmd_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform)
+{
+ return zlib_pmd_stream_create(dev, xform, private_xform);
+}
+
+/** Clear the memory of stream so it doesn't leave key material behind */
+static int
+zlib_pmd_stream_free(__rte_unused struct rte_compressdev *dev,
+ void *zstream)
+{
+ struct zlib_stream *stream = (struct zlib_stream *)zstream;
+ if (!stream)
+ return -EINVAL;
+
+ stream->free(&stream->strm);
+ /* Zero out the whole structure */
+ memset(stream, 0, sizeof(struct zlib_stream));
+ struct rte_mempool *mp = rte_mempool_from_obj(stream);
+ rte_mempool_put(mp, stream);
+
+ return 0;
+}
+
+/** Clear the memory of stream so it doesn't leave key material behind */
+static int
+zlib_pmd_private_xform_free(struct rte_compressdev *dev,
+ void *private_xform)
+{
+ return zlib_pmd_stream_free(dev, private_xform);
+}
+
+struct rte_compressdev_ops zlib_pmd_ops = {
+ .dev_configure = zlib_pmd_config,
+ .dev_start = zlib_pmd_start,
+ .dev_stop = zlib_pmd_stop,
+ .dev_close = zlib_pmd_close,
+
+ .stats_get = zlib_pmd_stats_get,
+ .stats_reset = zlib_pmd_stats_reset,
+
+ .dev_infos_get = zlib_pmd_info_get,
+
+ .queue_pair_setup = zlib_pmd_qp_setup,
+ .queue_pair_release = zlib_pmd_qp_release,
+
+ .private_xform_create = zlib_pmd_private_xform_create,
+ .private_xform_free = zlib_pmd_private_xform_free,
+
+ .stream_create = NULL,
+ .stream_free = NULL
+};
+
+struct rte_compressdev_ops *rte_zlib_pmd_ops = &zlib_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/compress/zlib/zlib_pmd_private.h b/src/spdk/dpdk/drivers/compress/zlib/zlib_pmd_private.h
new file mode 100644
index 00000000..2c6e83d4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/compress/zlib/zlib_pmd_private.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef _RTE_ZLIB_PMD_PRIVATE_H_
+#define _RTE_ZLIB_PMD_PRIVATE_H_
+
+#include <zlib.h>
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#define COMPRESSDEV_NAME_ZLIB_PMD compress_zlib
+/**< ZLIB PMD device name */
+
+#define DEF_MEM_LEVEL 8
+
+int zlib_logtype_driver;
+#define ZLIB_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, zlib_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+#define ZLIB_PMD_INFO(fmt, args...) \
+ ZLIB_PMD_LOG(INFO, fmt, ## args)
+#define ZLIB_PMD_ERR(fmt, args...) \
+ ZLIB_PMD_LOG(ERR, fmt, ## args)
+#define ZLIB_PMD_WARN(fmt, args...) \
+ ZLIB_PMD_LOG(WARNING, fmt, ## args)
+
+struct zlib_private {
+ struct rte_mempool *mp;
+};
+
+struct zlib_qp {
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_compressdev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+} __rte_cache_aligned;
+
+/* Algorithm handler function prototype */
+typedef void (*comp_func_t)(struct rte_comp_op *op, z_stream *strm);
+
+typedef int (*comp_free_t)(z_stream *strm);
+
+/** ZLIB Stream structure */
+struct zlib_stream {
+ z_stream strm;
+ /**< zlib stream structure */
+ comp_func_t comp;
+ /**< Operation (compression/decompression) */
+ comp_free_t free;
+ /**< Free Operation (compression/decompression) */
+} __rte_cache_aligned;
+
+/** ZLIB private xform structure */
+struct zlib_priv_xform {
+ struct zlib_stream stream;
+} __rte_cache_aligned;
+
+int
+zlib_set_stream_parameters(const struct rte_comp_xform *xform,
+ struct zlib_stream *stream);
+
+/** Device specific operations function pointer structure */
+extern struct rte_compressdev_ops *rte_zlib_pmd_ops;
+
+#endif /* _RTE_ZLIB_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/Makefile b/src/spdk/dpdk/drivers/crypto/Makefile
new file mode 100644
index 00000000..c480cbd3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += mvsam
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
+endif
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_gcm/Makefile b/src/spdk/dpdk/drivers/crypto/aesni_gcm/Makefile
new file mode 100644
index 00000000..0a5c1a87
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_gcm/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016-2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_aesni_gcm.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_aesni_gcm_version.map
+
+# external library dependencies
+LDLIBS += -lIPSec_MB
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
new file mode 100644
index 00000000..45061669
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#ifndef _AESNI_GCM_OPS_H_
+#define _AESNI_GCM_OPS_H_
+
+#ifndef LINUX
+#define LINUX
+#endif
+
+#include <intel-ipsec-mb.h>
+
+/** Supported vector modes */
+enum aesni_gcm_vector_mode {
+ RTE_AESNI_GCM_NOT_SUPPORTED = 0,
+ RTE_AESNI_GCM_SSE,
+ RTE_AESNI_GCM_AVX,
+ RTE_AESNI_GCM_AVX2,
+ RTE_AESNI_GCM_VECTOR_NUM
+};
+
+enum aesni_gcm_key {
+ AESNI_GCM_KEY_128,
+ AESNI_GCM_KEY_192,
+ AESNI_GCM_KEY_256,
+ AESNI_GCM_KEY_NUM
+};
+
+
+typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data, uint8_t *out,
+ const uint8_t *in, uint64_t plaintext_len, const uint8_t *iv,
+ const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+typedef void (*aesni_gcm_precomp_t)(const void *key, struct gcm_key_data *gcm_data);
+
+typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
+ const uint8_t *iv,
+ uint8_t const *aad,
+ uint64_t aad_len);
+
+typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
+ uint8_t *out,
+ const uint8_t *in,
+ uint64_t plaintext_len);
+
+typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
+ uint8_t *auth_tag,
+ uint64_t auth_tag_len);
+
+/** GCM library function pointer table */
+struct aesni_gcm_ops {
+ aesni_gcm_t enc; /**< GCM encode function pointer */
+ aesni_gcm_t dec; /**< GCM decode function pointer */
+ aesni_gcm_precomp_t precomp; /**< GCM pre-compute */
+ aesni_gcm_init_t init;
+ aesni_gcm_update_t update_enc;
+ aesni_gcm_update_t update_dec;
+ aesni_gcm_finalize_t finalize;
+};
+
+#define AES_GCM_FN(keylen, arch) \
+aes_gcm_enc_##keylen##_##arch,\
+aes_gcm_dec_##keylen##_##arch,\
+aes_gcm_pre_##keylen##_##arch,\
+aes_gcm_init_##keylen##_##arch,\
+aes_gcm_enc_##keylen##_update_##arch,\
+aes_gcm_dec_##keylen##_update_##arch,\
+aes_gcm_enc_##keylen##_finalize_##arch,
+
+static const struct aesni_gcm_ops gcm_ops[RTE_AESNI_GCM_VECTOR_NUM][AESNI_GCM_KEY_NUM] = {
+ [RTE_AESNI_GCM_NOT_SUPPORTED] = {
+ [AESNI_GCM_KEY_128] = {NULL},
+ [AESNI_GCM_KEY_192] = {NULL},
+ [AESNI_GCM_KEY_256] = {NULL}
+ },
+ [RTE_AESNI_GCM_SSE] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, sse)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, sse)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, sse)
+ }
+ },
+ [RTE_AESNI_GCM_AVX] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, avx_gen2)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, avx_gen2)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, avx_gen2)
+ }
+ },
+ [RTE_AESNI_GCM_AVX2] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, avx_gen4)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, avx_gen4)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, avx_gen4)
+ }
+ }
+};
+#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
new file mode 100644
index 00000000..752e0cd6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -0,0 +1,585 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+#include <rte_byteorder.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+static uint8_t cryptodev_driver_id;
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
+ struct aesni_gcm_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform;
+ const struct rte_crypto_sym_xform *aead_xform;
+ uint16_t digest_length;
+ uint8_t key_length;
+ uint8_t *key;
+
+ /* AES-GMAC */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = xform;
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
+ AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an "
+ "authentication only algorithm");
+ return -ENOTSUP;
+ }
+ /* Set IV parameters */
+ sess->iv.offset = auth_xform->auth.iv.offset;
+ sess->iv.length = auth_xform->auth.iv.length;
+
+ /* Select Crypto operation */
+ if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->op = AESNI_GMAC_OP_GENERATE;
+ else
+ sess->op = AESNI_GMAC_OP_VERIFY;
+
+ key_length = auth_xform->auth.key.length;
+ key = auth_xform->auth.key.data;
+ digest_length = auth_xform->auth.digest_length;
+
+ /* AES-GCM */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ aead_xform = xform;
+
+ if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
+ AESNI_GCM_LOG(ERR, "The only combined operation "
+ "supported is AES GCM");
+ return -ENOTSUP;
+ }
+
+ /* Set IV parameters */
+ sess->iv.offset = aead_xform->aead.iv.offset;
+ sess->iv.length = aead_xform->aead.iv.length;
+
+ /* Select Crypto operation */
+ if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ else
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+
+ key_length = aead_xform->aead.key.length;
+ key = aead_xform->aead.key.data;
+
+ sess->aad_length = aead_xform->aead.aad_length;
+ digest_length = aead_xform->aead.digest_length;
+ } else {
+ AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication");
+ return -ENOTSUP;
+ }
+
+
+ /* IV check */
+ if (sess->iv.length != 16 && sess->iv.length != 12 &&
+ sess->iv.length != 0) {
+ AESNI_GCM_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+
+ /* Check key length and calculate GCM pre-compute. */
+ switch (key_length) {
+ case 16:
+ sess->key = AESNI_GCM_KEY_128;
+ break;
+ case 24:
+ sess->key = AESNI_GCM_KEY_192;
+ break;
+ case 32:
+ sess->key = AESNI_GCM_KEY_256;
+ break;
+ default:
+ AESNI_GCM_LOG(ERR, "Invalid key length");
+ return -EINVAL;
+ }
+
+ gcm_ops[sess->key].precomp(key, &sess->gdata_key);
+
+ /* Digest check */
+ if (digest_length != 16 &&
+ digest_length != 12 &&
+ digest_length != 8) {
+ AESNI_GCM_LOG(ERR, "Invalid digest length");
+ return -EINVAL;
+ }
+ sess->digest_length = digest_length;
+
+ return 0;
+}
+
+/** Get gcm session */
+static struct aesni_gcm_session *
+aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
+{
+ struct aesni_gcm_session *sess = NULL;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(sym_op->session != NULL))
+ sess = (struct aesni_gcm_session *)
+ get_sym_session_private_data(
+ sym_op->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct aesni_gcm_session *)_sess_private_data;
+
+ if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
+ sess, sym_op->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(sym_op->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/**
+ * Process a crypto operation, calling
+ * the GCM API from the multi buffer library.
+ *
+ * @param qp queue pair
+ * @param op symmetric crypto operation
+ * @param session GCM session
+ *
+ * @return
+ *
+ */
+static int
+process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
+ struct aesni_gcm_session *session)
+{
+ uint8_t *src, *dst;
+ uint8_t *iv_ptr;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct rte_mbuf *m_src = sym_op->m_src;
+ uint32_t offset, data_offset, data_length;
+ uint32_t part_len, total_len, data_len;
+
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
+ session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ offset = sym_op->aead.data.offset;
+ data_offset = offset;
+ data_length = sym_op->aead.data.length;
+ } else {
+ offset = sym_op->auth.data.offset;
+ data_offset = offset;
+ data_length = sym_op->auth.data.length;
+ }
+
+ RTE_ASSERT(m_src != NULL);
+
+ while (offset >= m_src->data_len && data_length != 0) {
+ offset -= m_src->data_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+ }
+
+ data_len = m_src->data_len - offset;
+ part_len = (data_len < data_length) ? data_len :
+ data_length;
+
+ /* Destination buffer is required when segmented source buffer */
+ RTE_ASSERT((part_len == data_length) ||
+ ((part_len != data_length) &&
+ (sym_op->m_dst != NULL)));
+ /* Segmented destination buffer is not supported */
+ RTE_ASSERT((sym_op->m_dst == NULL) ||
+ ((sym_op->m_dst != NULL) &&
+ rte_pktmbuf_is_contiguous(sym_op->m_dst)));
+
+
+ dst = sym_op->m_dst ?
+ rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
+ data_offset) :
+ rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+ data_offset);
+
+ src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
+
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
+ /*
+ * GCM working in 12B IV mode => 16B pre-counter block we need
+ * to set BE LSB to 1, driver expects that 16B is allocated
+ */
+ if (session->iv.length == 12) {
+ uint32_t *iv_padd = (uint32_t *)&(iv_ptr[12]);
+ *iv_padd = rte_bswap32(1);
+ }
+
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
+
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ sym_op->aead.aad.data,
+ (uint64_t)session->aad_length);
+
+ qp->ops[session->key].update_enc(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
+ (uint64_t)part_len);
+ total_len = data_length - part_len;
+
+ while (total_len) {
+ dst += part_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ qp->ops[session->key].update_enc(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ sym_op->aead.digest.data,
+ (uint64_t)session->digest_length);
+ } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ uint8_t *auth_tag = qp->temp_digest;
+
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ sym_op->aead.aad.data,
+ (uint64_t)session->aad_length);
+
+ qp->ops[session->key].update_dec(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
+ (uint64_t)part_len);
+ total_len = data_length - part_len;
+
+ while (total_len) {
+ dst += part_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ qp->ops[session->key].update_dec(&session->gdata_key,
+ &qp->gdata_ctx,
+ dst, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ auth_tag,
+ (uint64_t)session->digest_length);
+ } else if (session->op == AESNI_GMAC_OP_GENERATE) {
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ src,
+ (uint64_t)data_length);
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ sym_op->auth.digest.data,
+ (uint64_t)session->digest_length);
+ } else { /* AESNI_GMAC_OP_VERIFY */
+ uint8_t *auth_tag = qp->temp_digest;
+
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ src,
+ (uint64_t)data_length);
+
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ auth_tag,
+ (uint64_t)session->digest_length);
+ }
+
+ return 0;
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param job JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed mbuf which is trimmed of output digest used in
+ * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns NULL on invalid job
+ */
+static void
+post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
+ struct rte_crypto_op *op,
+ struct aesni_gcm_session *session)
+{
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Verify digest if required */
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
+ session->op == AESNI_GMAC_OP_VERIFY) {
+ uint8_t *digest;
+
+ uint8_t *tag = qp->temp_digest;
+
+ if (session->op == AESNI_GMAC_OP_VERIFY)
+ digest = op->sym->auth.digest.data;
+ else
+ digest = op->sym->aead.digest.data;
+
+#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
+ rte_hexdump(stdout, "auth tag (orig):",
+ digest, session->digest_length);
+ rte_hexdump(stdout, "auth tag (calc):",
+ tag, session->digest_length);
+#endif
+
+ if (memcmp(tag, digest, session->digest_length) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+}
+
+/**
+ * Process a completed GCM request
+ *
+ * @param qp Queue Pair to process
+ * @param op Crypto operation
+ * @param job JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static void
+handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
+ struct rte_crypto_op *op,
+ struct aesni_gcm_session *sess)
+{
+ post_process_gcm_crypto_op(qp, op, sess);
+
+ /* Free session if a session-less crypto op */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct aesni_gcm_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+}
+
+static uint16_t
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct aesni_gcm_session *sess;
+ struct aesni_gcm_qp *qp = queue_pair;
+
+ int retval = 0;
+ unsigned int i, nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+
+ for (i = 0; i < nb_dequeued; i++) {
+
+ sess = aesni_gcm_get_session(qp, ops[i]);
+ if (unlikely(sess == NULL)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ qp->qp_stats.dequeue_err_count++;
+ break;
+ }
+
+ retval = process_gcm_crypto_op(qp, ops[i], sess);
+ if (retval < 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ qp->qp_stats.dequeue_err_count++;
+ break;
+ }
+
+ handle_completed_gcm_crypto_op(qp, ops[i], sess);
+ }
+
+ qp->qp_stats.dequeued_count += i;
+
+ return i;
+}
+
+static uint16_t
+aesni_gcm_pmd_enqueue_burst(void *queue_pair,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct aesni_gcm_qp *qp = queue_pair;
+
+ unsigned int nb_enqueued;
+
+ nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+static int aesni_gcm_remove(struct rte_vdev_device *vdev);
+
+static int
+aesni_gcm_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct aesni_gcm_private *internals;
+ enum aesni_gcm_vector_mode vector_mode;
+
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ AESNI_GCM_LOG(ERR, "AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ AESNI_GCM_LOG(ERR, "driver %s: create failed",
+ init_params->name);
+ return -ENODEV;
+ }
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ vector_mode = RTE_AESNI_GCM_AVX2;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ vector_mode = RTE_AESNI_GCM_AVX;
+ else
+ vector_mode = RTE_AESNI_GCM_SSE;
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_aesni_gcm_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+ dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ switch (vector_mode) {
+ case RTE_AESNI_GCM_SSE:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ break;
+ case RTE_AESNI_GCM_AVX:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ break;
+ case RTE_AESNI_GCM_AVX2:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ break;
+ default:
+ break;
+ }
+
+ internals = dev->data->dev_private;
+
+ internals->vector_mode = vector_mode;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+ imb_get_version_str());
+#else
+ AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
+#endif
+
+ return 0;
+}
+
+static int
+aesni_gcm_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct aesni_gcm_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ return aesni_gcm_create(name, vdev, &init_params);
+}
+
+static int
+aesni_gcm_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver aesni_gcm_pmd_drv = {
+ .probe = aesni_gcm_probe,
+ .remove = aesni_gcm_remove
+};
+
+static struct cryptodev_driver aesni_gcm_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
+ cryptodev_driver_id);
+
+
+RTE_INIT(aesni_gcm_init_log)
+{
+ aesni_gcm_logtype_driver = rte_log_register("pmd.crypto.aesni_gcm");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
new file mode 100644
index 00000000..b6b4dd02
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = aesni_gcm_pmd_capabilities;
+
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ }
+}
+
+/** Release queue pair */
+static int
+aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct aesni_gcm_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "aesni_gcm_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed"
+ " packets", qp->name);
+ return r;
+ }
+ AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed"
+ " packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct aesni_gcm_qp *qp = NULL;
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ aesni_gcm_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode];
+
+ qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned
+aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct aesni_gcm_session);
+}
+
+/** Configure a aesni gcm session from a crypto xform chain */
+static int
+aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
+ if (unlikely(sess == NULL)) {
+ AESNI_GCM_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ AESNI_GCM_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode],
+ sess_private_data, xform);
+ if (ret != 0) {
+ AESNI_GCM_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct aesni_gcm_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
+ .dev_configure = aesni_gcm_pmd_config,
+ .dev_start = aesni_gcm_pmd_start,
+ .dev_stop = aesni_gcm_pmd_stop,
+ .dev_close = aesni_gcm_pmd_close,
+
+ .stats_get = aesni_gcm_pmd_stats_get,
+ .stats_reset = aesni_gcm_pmd_stats_reset,
+
+ .dev_infos_get = aesni_gcm_pmd_info_get,
+
+ .queue_pair_setup = aesni_gcm_pmd_qp_setup,
+ .queue_pair_release = aesni_gcm_pmd_qp_release,
+ .queue_pair_count = aesni_gcm_pmd_qp_count,
+
+ .sym_session_get_size = aesni_gcm_pmd_sym_session_get_size,
+ .sym_session_configure = aesni_gcm_pmd_sym_session_configure,
+ .sym_session_clear = aesni_gcm_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
new file mode 100644
index 00000000..c13a12a5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_
+#define _RTE_AESNI_GCM_PMD_PRIVATE_H_
+
+#include "aesni_gcm_ops.h"
+
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
+#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
+/**< AES-NI GCM PMD device name */
+
+/** AES-NI GCM PMD LOGTYPE DRIVER */
+int aesni_gcm_logtype_driver;
+#define AESNI_GCM_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, aesni_gcm_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 16
+
+/** private data structure for each virtual AESNI GCM device */
+struct aesni_gcm_private {
+ enum aesni_gcm_vector_mode vector_mode;
+ /**< Vector mode */
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+struct aesni_gcm_qp {
+ const struct aesni_gcm_ops *ops;
+ /**< Architecture dependent function pointer table of the gcm APIs */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct gcm_context_data gdata_ctx; /* (16 * 5) + 8 = 88 B */
+ /**< GCM parameters */
+ struct rte_cryptodev_stats qp_stats; /* 8 * 4 = 32 B */
+ /**< Queue pair statistics */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+
+enum aesni_gcm_operation {
+ AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
+ AESNI_GCM_OP_AUTHENTICATED_DECRYPTION,
+ AESNI_GMAC_OP_GENERATE,
+ AESNI_GMAC_OP_VERIFY
+};
+
+/** AESNI GCM private session structure */
+struct aesni_gcm_session {
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+ uint16_t aad_length;
+ /**< AAD length */
+ uint16_t digest_length;
+ /**< Digest length */
+ enum aesni_gcm_operation op;
+ /**< GCM operation type */
+ enum aesni_gcm_key key;
+ /**< GCM key type */
+ struct gcm_key_data gdata_key;
+ /**< GCM parameters */
+};
+
+
+/**
+ * Setup GCM session parameters
+ * @param sess aesni gcm session structure
+ * @param xform crypto transform chain
+ *
+ * @return
+ * - On success returns 0
+ * - On failure returns error code < 0
+ */
+extern int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
+ struct aesni_gcm_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/**
+ * Device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
+
+
+#endif /* _RTE_AESNI_GCM_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map b/src/spdk/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
new file mode 100644
index 00000000..dc4d417b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_mb/Makefile b/src/spdk/dpdk/drivers/crypto/aesni_mb/Makefile
new file mode 100644
index 00000000..806a95eb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_mb/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_aesni_mb.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_aesni_mb_version.map
+
+# external library dependencies
+LDLIBS += -lIPSec_MB
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_mb/aesni_mb_ops.h b/src/spdk/dpdk/drivers/crypto/aesni_mb/aesni_mb_ops.h
new file mode 100644
index 00000000..5a1cba6c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Intel Corporation
+ */
+
+#ifndef _AESNI_MB_OPS_H_
+#define _AESNI_MB_OPS_H_
+
+#ifndef LINUX
+#define LINUX
+#endif
+
+#include <intel-ipsec-mb.h>
+
+enum aesni_mb_vector_mode {
+ RTE_AESNI_MB_NOT_SUPPORTED = 0,
+ RTE_AESNI_MB_SSE,
+ RTE_AESNI_MB_AVX,
+ RTE_AESNI_MB_AVX2,
+ RTE_AESNI_MB_AVX512
+};
+
+typedef void (*md5_one_block_t)(const void *data, void *digest);
+
+typedef void (*sha1_one_block_t)(const void *data, void *digest);
+typedef void (*sha224_one_block_t)(const void *data, void *digest);
+typedef void (*sha256_one_block_t)(const void *data, void *digest);
+typedef void (*sha384_one_block_t)(const void *data, void *digest);
+typedef void (*sha512_one_block_t)(const void *data, void *digest);
+
+typedef void (*aes_keyexp_128_t)
+ (const void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*aes_keyexp_192_t)
+ (const void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*aes_keyexp_256_t)
+ (const void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*aes_xcbc_expand_key_t)
+ (const void *key, void *exp_k1, void *k2, void *k3);
+typedef void (*aes_cmac_sub_key_gen_t)
+ (const void *exp_key, void *k2, void *k3);
+typedef void (*aes_cmac_keyexp_t)
+ (const void *key, void *keyexp);
+
+/** Multi-buffer library function pointer table */
+struct aesni_mb_op_fns {
+ struct {
+ init_mb_mgr_t init_mgr;
+ /**< Initialise scheduler */
+ get_next_job_t get_next;
+ /**< Get next free job structure */
+ submit_job_t submit;
+ /**< Submit job to scheduler */
+ get_completed_job_t get_completed_job;
+ /**< Get completed job */
+ flush_job_t flush_job;
+ /**< flush jobs from manager */
+ } job;
+ /**< multi buffer manager functions */
+
+ struct {
+ struct {
+ md5_one_block_t md5;
+ /**< MD5 one block hash */
+ sha1_one_block_t sha1;
+ /**< SHA1 one block hash */
+ sha224_one_block_t sha224;
+ /**< SHA224 one block hash */
+ sha256_one_block_t sha256;
+ /**< SHA256 one block hash */
+ sha384_one_block_t sha384;
+ /**< SHA384 one block hash */
+ sha512_one_block_t sha512;
+ /**< SHA512 one block hash */
+ } one_block;
+ /**< one block hash functions */
+
+ struct {
+ aes_keyexp_128_t aes128;
+ /**< AES128 key expansions */
+ aes_keyexp_192_t aes192;
+ /**< AES192 key expansions */
+ aes_keyexp_256_t aes256;
+ /**< AES256 key expansions */
+ aes_xcbc_expand_key_t aes_xcbc;
+ /**< AES XCBC key epansions */
+ aes_cmac_sub_key_gen_t aes_cmac_subkey;
+ /**< AES CMAC subkey expansions */
+ aes_cmac_keyexp_t aes_cmac_expkey;
+ /**< AES CMAC key expansions */
+ } keyexp;
+ /**< Key expansion functions */
+ } aux;
+ /**< Auxiliary functions */
+};
+
+
+static const struct aesni_mb_op_fns job_ops[] = {
+ [RTE_AESNI_MB_NOT_SUPPORTED] = {
+ .job = {
+ NULL
+ },
+ .aux = {
+ .one_block = {
+ NULL
+ },
+ .keyexp = {
+ NULL
+ }
+ }
+ },
+ [RTE_AESNI_MB_SSE] = {
+ .job = {
+ init_mb_mgr_sse,
+ get_next_job_sse,
+ submit_job_sse,
+ get_completed_job_sse,
+ flush_job_sse
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_sse,
+ sha1_one_block_sse,
+ sha224_one_block_sse,
+ sha256_one_block_sse,
+ sha384_one_block_sse,
+ sha512_one_block_sse
+ },
+ .keyexp = {
+ aes_keyexp_128_sse,
+ aes_keyexp_192_sse,
+ aes_keyexp_256_sse,
+ aes_xcbc_expand_key_sse,
+ aes_cmac_subkey_gen_sse,
+ aes_keyexp_128_enc_sse
+ }
+ }
+ },
+ [RTE_AESNI_MB_AVX] = {
+ .job = {
+ init_mb_mgr_avx,
+ get_next_job_avx,
+ submit_job_avx,
+ get_completed_job_avx,
+ flush_job_avx
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_avx,
+ sha1_one_block_avx,
+ sha224_one_block_avx,
+ sha256_one_block_avx,
+ sha384_one_block_avx,
+ sha512_one_block_avx
+ },
+ .keyexp = {
+ aes_keyexp_128_avx,
+ aes_keyexp_192_avx,
+ aes_keyexp_256_avx,
+ aes_xcbc_expand_key_avx,
+ aes_cmac_subkey_gen_avx,
+ aes_keyexp_128_enc_avx
+ }
+ }
+ },
+ [RTE_AESNI_MB_AVX2] = {
+ .job = {
+ init_mb_mgr_avx2,
+ get_next_job_avx2,
+ submit_job_avx2,
+ get_completed_job_avx2,
+ flush_job_avx2
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_avx2,
+ sha1_one_block_avx2,
+ sha224_one_block_avx2,
+ sha256_one_block_avx2,
+ sha384_one_block_avx2,
+ sha512_one_block_avx2
+ },
+ .keyexp = {
+ aes_keyexp_128_avx2,
+ aes_keyexp_192_avx2,
+ aes_keyexp_256_avx2,
+ aes_xcbc_expand_key_avx2,
+ aes_cmac_subkey_gen_avx2,
+ aes_keyexp_128_enc_avx2
+ }
+ }
+ },
+ [RTE_AESNI_MB_AVX512] = {
+ .job = {
+ init_mb_mgr_avx512,
+ get_next_job_avx512,
+ submit_job_avx512,
+ get_completed_job_avx512,
+ flush_job_avx512
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_avx512,
+ sha1_one_block_avx512,
+ sha224_one_block_avx512,
+ sha256_one_block_avx512,
+ sha384_one_block_avx512,
+ sha512_one_block_avx512
+ },
+ .keyexp = {
+ aes_keyexp_128_avx512,
+ aes_keyexp_192_avx512,
+ aes_keyexp_256_avx512,
+ aes_xcbc_expand_key_avx512,
+ aes_cmac_subkey_gen_avx512,
+ aes_keyexp_128_enc_avx512
+ }
+ }
+ }
+};
+
+
+#endif /* _AESNI_MB_OPS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
new file mode 100644
index 00000000..03ee88be
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -0,0 +1,1046 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
+ */
+
+#include <intel-ipsec-mb.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+static uint8_t cryptodev_driver_id;
+
+typedef void (*hash_one_block_t)(const void *data, void *digest);
+typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
+
+/**
+ * Calculate the authentication pre-computes
+ *
+ * @param one_block_hash Function pointer to calculate digest on ipad/opad
+ * @param ipad Inner pad output byte array
+ * @param opad Outer pad output byte array
+ * @param hkey Authentication key
+ * @param hkey_len Authentication key length
+ * @param blocksize Block size of selected hash algo
+ */
+static void
+calculate_auth_precomputes(hash_one_block_t one_block_hash,
+ uint8_t *ipad, uint8_t *opad,
+ uint8_t *hkey, uint16_t hkey_len,
+ uint16_t blocksize)
+{
+ unsigned i, length;
+
+ uint8_t ipad_buf[blocksize] __rte_aligned(16);
+ uint8_t opad_buf[blocksize] __rte_aligned(16);
+
+ /* Setup inner and outer pads */
+ memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
+ memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
+
+ /* XOR hash key with inner and outer pads */
+ length = hkey_len > blocksize ? blocksize : hkey_len;
+
+ for (i = 0; i < length; i++) {
+ ipad_buf[i] ^= hkey[i];
+ opad_buf[i] ^= hkey[i];
+ }
+
+ /* Compute partial hashes */
+ (*one_block_hash)(ipad_buf, ipad);
+ (*one_block_hash)(opad_buf, opad);
+
+ /* Clean up stack */
+ memset(ipad_buf, 0, blocksize);
+ memset(opad_buf, 0, blocksize);
+}
+
+/** Get xform chain order */
+static enum aesni_mb_operation
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return AESNI_MB_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return AESNI_MB_OP_CIPHER_ONLY;
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return AESNI_MB_OP_CIPHER_HASH;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return AESNI_MB_OP_HASH_ONLY;
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return AESNI_MB_OP_HASH_CIPHER;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ return AESNI_MB_OP_AEAD_CIPHER_HASH;
+ else
+ return AESNI_MB_OP_AEAD_HASH_CIPHER;
+ }
+ }
+
+ return AESNI_MB_OP_NOT_SUPPORTED;
+}
+
+/** Set session authentication parameters */
+static int
+aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ hash_one_block_t hash_oneblock_fn;
+
+ if (xform == NULL) {
+ sess->auth.algo = NULL_HASH;
+ return 0;
+ }
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+ AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
+ return -1;
+ }
+
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+
+ /* Set Authentication Parameters */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
+ sess->auth.algo = AES_XCBC;
+ (*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
+ sess->auth.xcbc.k1_expanded,
+ sess->auth.xcbc.k2, sess->auth.xcbc.k3);
+ return 0;
+ }
+
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
+ sess->auth.algo = AES_CMAC;
+ (*mb_ops->aux.keyexp.aes_cmac_expkey)(xform->auth.key.data,
+ sess->auth.cmac.expkey);
+
+ (*mb_ops->aux.keyexp.aes_cmac_subkey)(sess->auth.cmac.expkey,
+ sess->auth.cmac.skey1, sess->auth.cmac.skey2);
+ return 0;
+ }
+
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ sess->auth.algo = MD5;
+ hash_oneblock_fn = mb_ops->aux.one_block.md5;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ sess->auth.algo = SHA1;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ sess->auth.algo = SHA_224;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ sess->auth.algo = SHA_256;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ sess->auth.algo = SHA_384;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ sess->auth.algo = SHA_512;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha512;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
+ return -ENOTSUP;
+ }
+
+ /* Calculate Authentication precomputes */
+ calculate_auth_precomputes(hash_oneblock_fn,
+ sess->auth.pads.inner, sess->auth.pads.outer,
+ xform->auth.key.data,
+ xform->auth.key.length,
+ get_auth_algo_blocksize(sess->auth.algo));
+
+ return 0;
+}
+
+/** Set session cipher parameters */
+static int
+aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ uint8_t is_aes = 0;
+ uint8_t is_3DES = 0;
+ aes_keyexp_t aes_keyexp_fn;
+
+ if (xform == NULL) {
+ sess->cipher.mode = NULL_CIPHER;
+ return 0;
+ }
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
+ return -EINVAL;
+ }
+
+ /* Select cipher direction */
+ switch (xform->cipher.op) {
+ case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+ sess->cipher.direction = ENCRYPT;
+ break;
+ case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+ sess->cipher.direction = DECRYPT;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
+ return -EINVAL;
+ }
+
+ /* Select cipher mode */
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.mode = CBC;
+ is_aes = 1;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.mode = CNTR;
+ is_aes = 1;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ sess->cipher.mode = DOCSIS_SEC_BPI;
+ is_aes = 1;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ sess->cipher.mode = DES;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ sess->cipher.mode = DOCSIS_DES;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ sess->cipher.mode = DES3;
+ is_3DES = 1;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
+ return -ENOTSUP;
+ }
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->cipher.iv.offset;
+ sess->iv.length = xform->cipher.iv.length;
+
+ /* Check key length and choose key expansion function for AES */
+ if (is_aes) {
+ switch (xform->cipher.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ break;
+ case AES_192_BYTES:
+ sess->cipher.key_length_in_bytes = AES_192_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
+ break;
+ case AES_256_BYTES:
+ sess->cipher.key_length_in_bytes = AES_256_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ /* Expanded cipher keys */
+ (*aes_keyexp_fn)(xform->cipher.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+
+ } else if (is_3DES) {
+ uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
+ sess->cipher.exp_3des_keys.key[1],
+ sess->cipher.exp_3des_keys.key[2]};
+
+ switch (xform->cipher.key.length) {
+ case 24:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+ des_key_schedule(keys[1], xform->cipher.key.data+8);
+ des_key_schedule(keys[2], xform->cipher.key.data+16);
+
+ /* Initialize keys - 24 bytes: [K1-K2-K3] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
+ break;
+ case 16:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+ des_key_schedule(keys[1], xform->cipher.key.data+8);
+
+ /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+ break;
+ case 8:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+
+ /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ sess->cipher.key_length_in_bytes = 24;
+#else
+ sess->cipher.key_length_in_bytes = 8;
+#endif
+ } else {
+ if (xform->cipher.key.length != 8) {
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+ sess->cipher.key_length_in_bytes = 8;
+
+ des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.encode,
+ xform->cipher.key.data);
+ des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.decode,
+ xform->cipher.key.data);
+ }
+
+ return 0;
+}
+
+static int
+aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ aes_keyexp_t aes_keyexp_fn;
+
+ switch (xform->aead.op) {
+ case RTE_CRYPTO_AEAD_OP_ENCRYPT:
+ sess->cipher.direction = ENCRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
+ break;
+ case RTE_CRYPTO_AEAD_OP_DECRYPT:
+ sess->cipher.direction = DECRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
+ return -EINVAL;
+ }
+
+ switch (xform->aead.algo) {
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ sess->cipher.mode = CCM;
+ sess->auth.algo = AES_CCM;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
+ return -ENOTSUP;
+ }
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->aead.iv.offset;
+ sess->iv.length = xform->aead.iv.length;
+
+ /* Check key length and choose key expansion function for AES */
+
+ switch (xform->aead.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ /* Expanded cipher keys */
+ (*aes_keyexp_fn)(xform->aead.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+
+ return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+ int ret;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ switch (aesni_mb_get_chain_order(xform)) {
+ case AESNI_MB_OP_HASH_CIPHER:
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ sess->auth.digest_len = xform->auth.digest_length;
+ break;
+ case AESNI_MB_OP_CIPHER_HASH:
+ sess->chain_order = CIPHER_HASH;
+ auth_xform = xform->next;
+ cipher_xform = xform;
+ sess->auth.digest_len = xform->auth.digest_length;
+ break;
+ case AESNI_MB_OP_HASH_ONLY:
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = xform;
+ cipher_xform = NULL;
+ sess->auth.digest_len = xform->auth.digest_length;
+ break;
+ case AESNI_MB_OP_CIPHER_ONLY:
+ /*
+ * Multi buffer library operates only at two modes,
+ * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
+ * chain order depends on cipher operation: encryption is always
+ * the first operation and decryption the last one.
+ */
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->chain_order = CIPHER_HASH;
+ else
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = NULL;
+ cipher_xform = xform;
+ break;
+ case AESNI_MB_OP_AEAD_CIPHER_HASH:
+ sess->chain_order = CIPHER_HASH;
+ sess->aead.aad_len = xform->aead.aad_length;
+ sess->auth.digest_len = xform->aead.digest_length;
+ aead_xform = xform;
+ break;
+ case AESNI_MB_OP_AEAD_HASH_CIPHER:
+ sess->chain_order = HASH_CIPHER;
+ sess->aead.aad_len = xform->aead.aad_length;
+ sess->auth.digest_len = xform->aead.digest_length;
+ aead_xform = xform;
+ break;
+ case AESNI_MB_OP_NOT_SUPPORTED:
+ default:
+ AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
+ return -ENOTSUP;
+ }
+
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+
+ ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform);
+ if (ret != 0) {
+ AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
+ return ret;
+ }
+
+ ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess,
+ cipher_xform);
+ if (ret != 0) {
+ AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
+ return ret;
+ }
+
+ if (aead_xform) {
+ ret = aesni_mb_set_session_aead_parameters(mb_ops, sess,
+ aead_xform);
+ if (ret != 0) {
+ AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * burst enqueue, place crypto operations on ingress queue for processing.
+ *
+ * @param __qp Queue Pair to process
+ * @param ops Crypto operations for processing
+ * @param nb_ops Number of crypto operations for processing
+ *
+ * @return
+ * - Number of crypto operations enqueued
+ */
+static uint16_t
+aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct aesni_mb_qp *qp = __qp;
+
+ unsigned int nb_enqueued;
+
+ nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+ (void **)ops, nb_ops, NULL);
+
+ qp->stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+/** Get multi buffer session */
+static inline struct aesni_mb_session *
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
+{
+ struct aesni_mb_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct aesni_mb_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct aesni_mb_session *)_sess_private_data;
+
+ if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
+ sess, op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param qp queue pair
+ * @param job JOB_AES_HMAC structure to fill
+ * @param m mbuf to process
+ *
+ * @return
+ * - Completed JOB_AES_HMAC structure pointer on success
+ * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
+ */
+#define SPDK_CRYPTO_HACK
+static inline int
+set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
+ struct rte_crypto_op *op, uint8_t *digest_idx)
+{
+#ifdef SPDK_CRYPTO_HACK
+ struct rte_mbuf *m_src = op->sym->m_src;
+ struct rte_mbuf *m_dst = op->sym->m_dst;
+#else
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+#endif
+
+ struct aesni_mb_session *session;
+ uint16_t m_offset = 0;
+
+ session = get_session(qp, op);
+ if (session == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -1;
+ }
+
+ /* Set crypto operation */
+ job->chain_order = session->chain_order;
+
+ /* Set cipher parameters */
+ job->cipher_direction = session->cipher.direction;
+ job->cipher_mode = session->cipher.mode;
+
+ job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
+
+ if (job->cipher_mode == DES3) {
+ job->aes_enc_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ job->aes_dec_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ } else {
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ }
+
+
+
+
+ /* Set authentication parameters */
+ job->hash_alg = session->auth.algo;
+ if (job->hash_alg == AES_XCBC) {
+ job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
+ job->u.XCBC._k2 = session->auth.xcbc.k2;
+ job->u.XCBC._k3 = session->auth.xcbc.k3;
+ } else if (job->hash_alg == AES_CCM) {
+ job->u.CCM.aad = op->sym->aead.aad.data + 18;
+ job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
+ } else if (job->hash_alg == AES_CMAC) {
+ job->u.CMAC._key_expanded = session->auth.cmac.expkey;
+ job->u.CMAC._skey1 = session->auth.cmac.skey1;
+ job->u.CMAC._skey2 = session->auth.cmac.skey2;
+
+ } else {
+ job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
+ job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
+ }
+
+#ifdef SPDK_CRYPTO_HACK
+ if (!op->sym->m_dst) {
+ m_dst = m_src;
+ }
+ m_offset = op->sym->cipher.data.offset;
+#else
+ /* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL) {
+ AESNI_MB_LOG(ERR, "failed to allocate space in destination "
+ "mbuf for source data");
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ if (job->hash_alg == AES_CCM)
+ m_offset = op->sym->aead.data.offset;
+ else
+ m_offset = op->sym->cipher.data.offset;
+ }
+#endif
+
+ /* Set digest output location */
+ if (job->hash_alg != NULL_HASH &&
+ session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ job->auth_tag_output = qp->temp_digests[*digest_idx];
+ *digest_idx = (*digest_idx + 1) % MAX_JOBS;
+ } else {
+ if (job->hash_alg == AES_CCM)
+ job->auth_tag_output = op->sym->aead.digest.data;
+ else
+ job->auth_tag_output = op->sym->auth.digest.data;
+ }
+
+ /*
+ * Multi-buffer library current only support returning a truncated
+ * digest length as specified in the relevant IPsec RFCs
+ */
+ if (job->hash_alg != AES_CCM && job->hash_alg != AES_CMAC)
+ job->auth_tag_output_len_in_bytes =
+ get_truncated_digest_byte_length(job->hash_alg);
+ else
+ job->auth_tag_output_len_in_bytes = session->auth.digest_len;
+
+
+ /* Set IV parameters */
+
+ job->iv_len_in_bytes = session->iv.length;
+
+ /* Data Parameter */
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+
+#ifdef SPDK_CRYPTO_HACK
+ if (!op->sym->m_dst) {
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
+ } else {
+ job->dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
+ }
+#else
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
+#endif
+
+ if (job->hash_alg == AES_CCM) {
+ job->cipher_start_src_offset_in_bytes =
+ op->sym->aead.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+ job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
+
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset + 1);
+ } else {
+ job->cipher_start_src_offset_in_bytes =
+ op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
+
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
+
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
+ }
+
+ /* Set user data to be crypto operation data struct */
+ job->user_data = op;
+
+ return 0;
+}
+
+static inline void
+verify_digest(struct aesni_mb_qp *qp __rte_unused, JOB_AES_HMAC *job,
+ struct rte_crypto_op *op) {
+ /* Verify digest if required */
+ if (job->hash_alg == AES_CCM) {
+ if (memcmp(job->auth_tag_output, op->sym->aead.digest.data,
+ job->auth_tag_output_len_in_bytes) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
+ job->auth_tag_output_len_in_bytes) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param qp Queue Pair to process
+ * @param job JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed crypto operation.
+ * - Returns NULL on invalid job
+ */
+static inline struct rte_crypto_op *
+post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+ struct aesni_mb_session *sess = get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+
+ if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
+ switch (job->status) {
+ case STS_COMPLETED:
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (job->hash_alg != NULL_HASH) {
+ if (sess->auth.operation ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)
+ verify_digest(qp, job, op);
+ }
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct aesni_mb_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ return op;
+}
+
+/**
+ * Process a completed JOB_AES_HMAC job and keep processing jobs until
+ * get_completed_job return NULL
+ *
+ * @param qp Queue Pair to process
+ * @param job JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static unsigned
+handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op *op = NULL;
+ unsigned processed_jobs = 0;
+
+ while (job != NULL) {
+ op = post_process_mb_job(qp, job);
+
+ if (op) {
+ ops[processed_jobs++] = op;
+ qp->stats.dequeued_count++;
+ } else {
+ qp->stats.dequeue_err_count++;
+ break;
+ }
+ if (processed_jobs == nb_ops)
+ break;
+
+ job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
+ }
+
+ return processed_jobs;
+}
+
+static inline uint16_t
+flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ int processed_ops = 0;
+
+ /* Flush the remaining jobs */
+ JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
+
+ if (job)
+ processed_ops += handle_completed_jobs(qp, job,
+ &ops[processed_ops], nb_ops - processed_ops);
+
+ return processed_ops;
+}
+
+static inline JOB_AES_HMAC *
+set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
+{
+ job->chain_order = HASH_CIPHER;
+ job->cipher_mode = NULL_CIPHER;
+ job->hash_alg = NULL_HASH;
+ job->cipher_direction = DECRYPT;
+
+ /* Set user data to be crypto operation data struct */
+ job->user_data = op;
+
+ return job;
+}
+
+static uint16_t
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct aesni_mb_qp *qp = queue_pair;
+
+ struct rte_crypto_op *op;
+ JOB_AES_HMAC *job;
+
+ int retval, processed_jobs = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ uint8_t digest_idx = qp->digest_idx;
+ do {
+ /* Get next operation to process from ingress queue */
+ retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+ if (retval < 0)
+ break;
+
+ /* Get next free mb job struct from mb manager */
+ job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ if (unlikely(job == NULL)) {
+ /* if no free mb job structs we need to flush mb_mgr */
+ processed_jobs += flush_mb_mgr(qp,
+ &ops[processed_jobs],
+ (nb_ops - processed_jobs) - 1);
+
+ job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ }
+
+ retval = set_mb_job_params(job, qp, op, &digest_idx);
+ if (unlikely(retval != 0)) {
+ qp->stats.dequeue_err_count++;
+ set_job_null_op(job, op);
+ }
+
+ /* Submit job to multi-buffer for processing */
+ job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
+
+ /*
+ * If submit returns a processed job then handle it,
+ * before submitting subsequent jobs
+ */
+ if (job)
+ processed_jobs += handle_completed_jobs(qp, job,
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
+
+ } while (processed_jobs < nb_ops);
+
+ qp->digest_idx = digest_idx;
+
+ if (processed_jobs < 1)
+ processed_jobs += flush_mb_mgr(qp,
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
+
+ return processed_jobs;
+}
+
+static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_aesni_mb_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct aesni_mb_private *internals;
+ enum aesni_mb_vector_mode vector_mode;
+
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ AESNI_MB_LOG(ERR, "AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
+ return -ENODEV;
+ }
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ vector_mode = RTE_AESNI_MB_AVX512;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ vector_mode = RTE_AESNI_MB_AVX2;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ vector_mode = RTE_AESNI_MB_AVX;
+ else
+ vector_mode = RTE_AESNI_MB_SSE;
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_aesni_mb_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
+ dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI;
+
+ switch (vector_mode) {
+ case RTE_AESNI_MB_SSE:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ break;
+ case RTE_AESNI_MB_AVX:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ break;
+ case RTE_AESNI_MB_AVX2:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ break;
+ case RTE_AESNI_MB_AVX512:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+ break;
+ default:
+ break;
+ }
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->vector_mode = vector_mode;
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+ imb_get_version_str());
+#else
+ AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
+#endif
+
+ return 0;
+}
+
+static int
+cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct aesni_mb_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name, *args;
+ int retval;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ args = rte_vdev_device_args(vdev);
+
+ retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
+ args);
+ return -EINVAL;
+ }
+
+ return cryptodev_aesni_mb_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
+ .probe = cryptodev_aesni_mb_probe,
+ .remove = cryptodev_aesni_mb_remove
+};
+
+static struct cryptodev_driver aesni_mb_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
+ cryptodev_aesni_mb_pmd_drv.driver,
+ cryptodev_driver_id);
+
+RTE_INIT(aesni_mb_init_log)
+{
+ aesni_mb_logtype_driver = rte_log_register("pmd.crypto.aesni_mb");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
new file mode 100644
index 00000000..e5e49547
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -0,0 +1,621 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+
+static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 14,
+ .max = 14,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES XCBC HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES DOCSIS BPI */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES DOCSIS BPI */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 16,
+ .increment = 2
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 46,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 7,
+ .max = 13,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* AES CMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct aesni_mb_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = aesni_mb_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ }
+}
+
+/** Release queue pair */
+static int
+aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+ struct rte_ring *r = NULL;
+
+ if (qp != NULL) {
+ r = rte_ring_lookup(qp->name);
+ if (r)
+ rte_ring_free(r);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct aesni_mb_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "aesni_mb_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
+ const char *str, unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+ char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ unsigned int n = snprintf(ring_name, sizeof(ring_name),
+ "%s_%s",
+ qp->name, str);
+
+ if (n >= sizeof(ring_name))
+ return NULL;
+
+ r = rte_ring_lookup(ring_name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
+ ring_name);
+ return r;
+ }
+
+ AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
+ ring_name);
+ return NULL;
+ }
+
+ return rte_ring_create(ring_name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct aesni_mb_qp *qp = NULL;
+ struct aesni_mb_private *internals = dev->data->dev_private;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ aesni_mb_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+
+ qp->op_fns = &job_ops[internals->vector_mode];
+
+ qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
+ "ingress", qp_conf->nb_descriptors, socket_id);
+ if (qp->ingress_queue == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "digest_mp_%u_%u", dev->data->dev_id, qp_id);
+
+ /* Initialise multi-buffer manager */
+ (*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni multi-buffer session structure */
+static unsigned
+aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct aesni_mb_session);
+}
+
+/** Configure a aesni multi-buffer session from a crypto xform chain */
+static int
+aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ struct aesni_mb_private *internals = dev->data->dev_private;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ AESNI_MB_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ AESNI_MB_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
+ sess_private_data, xform);
+ if (ret != 0) {
+ AESNI_MB_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct aesni_mb_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops aesni_mb_pmd_ops = {
+ .dev_configure = aesni_mb_pmd_config,
+ .dev_start = aesni_mb_pmd_start,
+ .dev_stop = aesni_mb_pmd_stop,
+ .dev_close = aesni_mb_pmd_close,
+
+ .stats_get = aesni_mb_pmd_stats_get,
+ .stats_reset = aesni_mb_pmd_stats_reset,
+
+ .dev_infos_get = aesni_mb_pmd_info_get,
+
+ .queue_pair_setup = aesni_mb_pmd_qp_setup,
+ .queue_pair_release = aesni_mb_pmd_qp_release,
+ .queue_pair_count = aesni_mb_pmd_qp_count,
+
+ .sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
+ .sym_session_configure = aesni_mb_pmd_sym_session_configure,
+ .sym_session_clear = aesni_mb_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
new file mode 100644
index 00000000..1d7ea852
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2016 Intel Corporation
+ */
+
+#ifndef _RTE_AESNI_MB_PMD_PRIVATE_H_
+#define _RTE_AESNI_MB_PMD_PRIVATE_H_
+
+#include "aesni_mb_ops.h"
+
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
+#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
+/**< AES-NI Multi buffer PMD device name */
+
+/** AESNI_MB PMD LOGTYPE DRIVER */
+int aesni_mb_logtype_driver;
+
+#define AESNI_MB_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, aesni_mb_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+
+#define HMAC_IPAD_VALUE (0x36)
+#define HMAC_OPAD_VALUE (0x5C)
+
+/* Maximum length for digest (SHA-512 truncated needs 32 bytes) */
+#define DIGEST_LENGTH_MAX 32
+static const unsigned auth_blocksize[] = {
+ [MD5] = 64,
+ [SHA1] = 64,
+ [SHA_224] = 64,
+ [SHA_256] = 64,
+ [SHA_384] = 128,
+ [SHA_512] = 128,
+ [AES_XCBC] = 16,
+ [AES_CCM] = 16,
+};
+
+/**
+ * Get the blocksize in bytes for a specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_auth_algo_blocksize(JOB_HASH_ALG algo)
+{
+ return auth_blocksize[algo];
+}
+
+static const unsigned auth_truncated_digest_byte_lengths[] = {
+ [MD5] = 12,
+ [SHA1] = 12,
+ [SHA_224] = 14,
+ [SHA_256] = 16,
+ [SHA_384] = 24,
+ [SHA_512] = 32,
+ [AES_XCBC] = 12,
+ [AES_CMAC] = 16,
+ [AES_CCM] = 8,
+ [NULL_HASH] = 0
+};
+
+/**
+ * Get the IPsec specified truncated length in bytes of the HMAC digest for a
+ * specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_truncated_digest_byte_length(JOB_HASH_ALG algo)
+{
+ return auth_truncated_digest_byte_lengths[algo];
+}
+
+static const unsigned auth_digest_byte_lengths[] = {
+ [MD5] = 16,
+ [SHA1] = 20,
+ [SHA_224] = 28,
+ [SHA_256] = 32,
+ [SHA_384] = 48,
+ [SHA_512] = 64,
+ [AES_XCBC] = 16,
+ [AES_CMAC] = 16,
+ [NULL_HASH] = 0
+};
+
+/**
+ * Get the output digest size in bytes for a specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_digest_byte_length(JOB_HASH_ALG algo)
+{
+ return auth_digest_byte_lengths[algo];
+}
+
+enum aesni_mb_operation {
+ AESNI_MB_OP_HASH_CIPHER,
+ AESNI_MB_OP_CIPHER_HASH,
+ AESNI_MB_OP_HASH_ONLY,
+ AESNI_MB_OP_CIPHER_ONLY,
+ AESNI_MB_OP_AEAD_HASH_CIPHER,
+ AESNI_MB_OP_AEAD_CIPHER_HASH,
+ AESNI_MB_OP_NOT_SUPPORTED
+};
+
+/** private data structure for each virtual AESNI device */
+struct aesni_mb_private {
+ enum aesni_mb_vector_mode vector_mode;
+ /**< CPU vector instruction set mode */
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+/** AESNI Multi buffer queue pair */
+struct aesni_mb_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ const struct aesni_mb_op_fns *op_fns;
+ /**< Vector mode dependent pointer table of the multi-buffer APIs */
+ MB_MGR mb_mgr;
+ /**< Multi-buffer instance */
+ struct rte_ring *ingress_queue;
+ /**< Ring for placing operations ready for processing */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+ uint8_t digest_idx;
+ /**< Index of the next slot to be used in temp_digests,
+ * to store the digest for a given operation
+ */
+ uint8_t temp_digests[MAX_JOBS][DIGEST_LENGTH_MAX];
+ /**< Buffers used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+/** AES-NI multi-buffer private session structure */
+struct aesni_mb_session {
+ JOB_CHAIN_ORDER chain_order;
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+
+ /** Cipher Parameters */
+ struct {
+ /** Cipher direction - encrypt / decrypt */
+ JOB_CIPHER_DIRECTION direction;
+ /** Cipher mode - CBC / Counter */
+ JOB_CIPHER_MODE mode;
+
+ uint64_t key_length_in_bytes;
+
+ union {
+ struct {
+ uint32_t encode[60] __rte_aligned(16);
+ /**< encode key */
+ uint32_t decode[60] __rte_aligned(16);
+ /**< decode key */
+ } expanded_aes_keys;
+ struct {
+ const void *ks_ptr[3];
+ uint64_t key[3][16];
+ } exp_3des_keys;
+ };
+ /**< Expanded AES keys - Allocating space to
+ * contain the maximum expanded key size which
+ * is 240 bytes for 256 bit AES, calculate by:
+ * ((key size (bytes)) *
+ * ((number of rounds) + 1))
+ */
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ JOB_HASH_ALG algo; /**< Authentication Algorithm */
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ union {
+ struct {
+ uint8_t inner[128] __rte_aligned(16);
+ /**< inner pad */
+ uint8_t outer[128] __rte_aligned(16);
+ /**< outer pad */
+ } pads;
+ /**< HMAC Authentication pads -
+ * allocating space for the maximum pad
+ * size supported which is 128 bytes for
+ * SHA512
+ */
+
+ struct {
+ uint32_t k1_expanded[44] __rte_aligned(16);
+ /**< k1 (expanded key). */
+ uint8_t k2[16] __rte_aligned(16);
+ /**< k2. */
+ uint8_t k3[16] __rte_aligned(16);
+ /**< k3. */
+ } xcbc;
+
+ struct {
+ uint32_t expkey[60] __rte_aligned(16);
+ /**< k1 (expanded key). */
+ uint32_t skey1[4] __rte_aligned(16);
+ /**< k2. */
+ uint32_t skey2[4] __rte_aligned(16);
+ /**< k3. */
+ } cmac;
+ /**< Expanded XCBC authentication keys */
+ };
+ /** digest size */
+ uint16_t digest_len;
+
+ } auth;
+ struct {
+ /** AAD data length */
+ uint16_t aad_len;
+ } aead;
+} __rte_cache_aligned;
+
+
+/**
+ *
+ */
+extern int
+aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops;
+
+
+
+#endif /* _RTE_AESNI_MB_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map
new file mode 100644
index 00000000..ad607bbe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map
@@ -0,0 +1,3 @@
+DPDK_2.2 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/armv8/Makefile b/src/spdk/dpdk/drivers/crypto/armv8/Makefile
new file mode 100644
index 00000000..e862af72
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/armv8/Makefile
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(ARMV8_CRYPTO_LIB_PATH),)
+$(error "Please define ARMV8_CRYPTO_LIB_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_armv8.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_armv8_version.map
+
+# external library dependencies
+CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)
+CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)/asm/include
+LDLIBS += -L$(ARMV8_CRYPTO_LIB_PATH) -larmv8_crypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd.c b/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd.c
new file mode 100644
index 00000000..9d15fee5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -0,0 +1,851 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "armv8_crypto_defs.h"
+
+#include "rte_armv8_pmd_private.h"
+
+static uint8_t cryptodev_driver_id;
+
+static int cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev);
+
+/**
+ * Pointers to the supported combined mode crypto functions are stored
+ * in the static tables. Each combined (chained) cryptographic operation
+ * can be described by a set of numbers:
+ * - order: order of operations (cipher, auth) or (auth, cipher)
+ * - direction: encryption or decryption
+ * - calg: cipher algorithm such as AES_CBC, AES_CTR, etc.
+ * - aalg: authentication algorithm such as SHA1, SHA256, etc.
+ * - keyl: cipher key length, for example 128, 192, 256 bits
+ *
+ * In order to quickly acquire each function pointer based on those numbers,
+ * a hierarchy of arrays is maintained. The final level, 3D array is indexed
+ * by the combined mode function parameters only (cipher algorithm,
+ * authentication algorithm and key length).
+ *
+ * This gives 3 memory accesses to obtain a function pointer instead of
+ * traversing the array manually and comparing function parameters on each loop.
+ *
+ * +--+CRYPTO_FUNC
+ * +--+ENC|
+ * +--+CA|
+ * | +--+DEC
+ * ORDER|
+ * | +--+ENC
+ * +--+AC|
+ * +--+DEC
+ *
+ */
+
+/**
+ * 3D array type for ARM Combined Mode crypto functions pointers.
+ * CRYPTO_CIPHER_MAX: max cipher ID number
+ * CRYPTO_AUTH_MAX: max auth ID number
+ * CRYPTO_CIPHER_KEYLEN_MAX: max key length ID number
+ */
+typedef const crypto_func_t
+crypto_func_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_AUTH_MAX][CRYPTO_CIPHER_KEYLEN_MAX];
+
+/* Evaluate to key length definition */
+#define KEYL(keyl) (ARMV8_CRYPTO_CIPHER_KEYLEN_ ## keyl)
+
+/* Local aliases for supported ciphers */
+#define CIPH_AES_CBC RTE_CRYPTO_CIPHER_AES_CBC
+/* Local aliases for supported hashes */
+#define AUTH_SHA1_HMAC RTE_CRYPTO_AUTH_SHA1_HMAC
+#define AUTH_SHA256_HMAC RTE_CRYPTO_AUTH_SHA256_HMAC
+
+/**
+ * Arrays containing pointers to particular cryptographic,
+ * combined mode functions.
+ * crypto_op_ca_encrypt: cipher (encrypt), authenticate
+ * crypto_op_ca_decrypt: cipher (decrypt), authenticate
+ * crypto_op_ac_encrypt: authenticate, cipher (encrypt)
+ * crypto_op_ac_decrypt: authenticate, cipher (decrypt)
+ */
+static const crypto_func_tbl_t
+crypto_op_ca_encrypt = {
+ /* [cipher alg][auth alg][key length] = crypto_function, */
+ [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = aes128cbc_sha1_hmac,
+ [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = aes128cbc_sha256_hmac,
+};
+
+static const crypto_func_tbl_t
+crypto_op_ca_decrypt = {
+ NULL
+};
+
+static const crypto_func_tbl_t
+crypto_op_ac_encrypt = {
+ NULL
+};
+
+static const crypto_func_tbl_t
+crypto_op_ac_decrypt = {
+ /* [cipher alg][auth alg][key length] = crypto_function, */
+ [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = sha1_hmac_aes128cbc_dec,
+ [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = sha256_hmac_aes128cbc_dec,
+};
+
+/**
+ * Arrays containing pointers to particular cryptographic function sets,
+ * covering given cipher operation directions (encrypt, decrypt)
+ * for each order of cipher and authentication pairs.
+ */
+static const crypto_func_tbl_t *
+crypto_cipher_auth[] = {
+ &crypto_op_ca_encrypt,
+ &crypto_op_ca_decrypt,
+ NULL
+};
+
+static const crypto_func_tbl_t *
+crypto_auth_cipher[] = {
+ &crypto_op_ac_encrypt,
+ &crypto_op_ac_decrypt,
+ NULL
+};
+
+/**
+ * Top level array containing pointers to particular cryptographic
+ * function sets, covering given order of chained operations.
+ * crypto_cipher_auth: cipher first, authenticate after
+ * crypto_auth_cipher: authenticate first, cipher after
+ */
+static const crypto_func_tbl_t **
+crypto_chain_order[] = {
+ crypto_cipher_auth,
+ crypto_auth_cipher,
+ NULL
+};
+
+/**
+ * Extract particular combined mode crypto function from the 3D array.
+ */
+#define CRYPTO_GET_ALGO(order, cop, calg, aalg, keyl) \
+({ \
+ crypto_func_tbl_t *func_tbl = \
+ (crypto_chain_order[(order)])[(cop)]; \
+ \
+ ((*func_tbl)[(calg)][(aalg)][KEYL(keyl)]); \
+})
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * 2D array type for ARM key schedule functions pointers.
+ * CRYPTO_CIPHER_MAX: max cipher ID number
+ * CRYPTO_CIPHER_KEYLEN_MAX: max key length ID number
+ */
+typedef const crypto_key_sched_t
+crypto_key_sched_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_CIPHER_KEYLEN_MAX];
+
+static const crypto_key_sched_tbl_t
+crypto_key_sched_encrypt = {
+ /* [cipher alg][key length] = key_expand_func, */
+ [CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_enc,
+};
+
+static const crypto_key_sched_tbl_t
+crypto_key_sched_decrypt = {
+ /* [cipher alg][key length] = key_expand_func, */
+ [CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_dec,
+};
+
+/**
+ * Top level array containing pointers to particular key generation
+ * function sets, covering given operation direction.
+ * crypto_key_sched_encrypt: keys for encryption
+ * crypto_key_sched_decrypt: keys for decryption
+ */
+static const crypto_key_sched_tbl_t *
+crypto_key_sched_dir[] = {
+ &crypto_key_sched_encrypt,
+ &crypto_key_sched_decrypt,
+ NULL
+};
+
+/**
+ * Extract particular combined mode crypto function from the 3D array.
+ */
+#define CRYPTO_GET_KEY_SCHED(cop, calg, keyl) \
+({ \
+ crypto_key_sched_tbl_t *ks_tbl = crypto_key_sched_dir[(cop)]; \
+ \
+ ((*ks_tbl)[(calg)][KEYL(keyl)]); \
+})
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ *------------------------------------------------------------------------------
+ * Session Prepare
+ *------------------------------------------------------------------------------
+ */
+
+/** Get xform chain order */
+static enum armv8_crypto_chain_order
+armv8_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+
+ /*
+ * This driver currently covers only chained operations.
+ * Ignore only cipher or only authentication operations
+ * or chains longer than 2 xform structures.
+ */
+ if (xform->next == NULL || xform->next->next != NULL)
+ return ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ARMV8_CRYPTO_CHAIN_AUTH_CIPHER;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ARMV8_CRYPTO_CHAIN_CIPHER_AUTH;
+ }
+
+ return ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED;
+}
+
+static inline void
+auth_hmac_pad_prepare(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ size_t i;
+
+ /* Generate i_key_pad and o_key_pad */
+ memset(sess->auth.hmac.i_key_pad, 0, sizeof(sess->auth.hmac.i_key_pad));
+ rte_memcpy(sess->auth.hmac.i_key_pad, sess->auth.hmac.key,
+ xform->auth.key.length);
+ memset(sess->auth.hmac.o_key_pad, 0, sizeof(sess->auth.hmac.o_key_pad));
+ rte_memcpy(sess->auth.hmac.o_key_pad, sess->auth.hmac.key,
+ xform->auth.key.length);
+ /*
+ * XOR key with IPAD/OPAD values to obtain i_key_pad
+ * and o_key_pad.
+ * Byte-by-byte operation may seem to be the less efficient
+ * here but in fact it's the opposite.
+ * The result ASM code is likely operate on NEON registers
+ * (load auth key to Qx, load IPAD/OPAD to multiple
+ * elements of Qy, eor 128 bits at once).
+ */
+ for (i = 0; i < SHA_BLOCK_MAX; i++) {
+ sess->auth.hmac.i_key_pad[i] ^= HMAC_IPAD_VALUE;
+ sess->auth.hmac.o_key_pad[i] ^= HMAC_OPAD_VALUE;
+ }
+}
+
+static inline int
+auth_set_prerequisites(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ uint8_t partial[64] = { 0 };
+ int error;
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ /*
+ * Generate authentication key, i_key_pad and o_key_pad.
+ */
+ /* Zero memory under key */
+ memset(sess->auth.hmac.key, 0, SHA1_BLOCK_SIZE);
+
+ /*
+ * Now copy the given authentication key to the session
+ * key.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
+
+ /* Prepare HMAC padding: key|pattern */
+ auth_hmac_pad_prepare(sess, xform);
+ /*
+ * Calculate partial hash values for i_key_pad and o_key_pad.
+ * Will be used as initialization state for final HMAC.
+ */
+ error = sha1_block_partial(NULL, sess->auth.hmac.i_key_pad,
+ partial, SHA1_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.i_key_pad, partial, SHA1_BLOCK_SIZE);
+
+ error = sha1_block_partial(NULL, sess->auth.hmac.o_key_pad,
+ partial, SHA1_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.o_key_pad, partial, SHA1_BLOCK_SIZE);
+
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ /*
+ * Generate authentication key, i_key_pad and o_key_pad.
+ */
+ /* Zero memory under key */
+ memset(sess->auth.hmac.key, 0, SHA256_BLOCK_SIZE);
+
+ /*
+ * Now copy the given authentication key to the session
+ * key.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
+
+ /* Prepare HMAC padding: key|pattern */
+ auth_hmac_pad_prepare(sess, xform);
+ /*
+ * Calculate partial hash values for i_key_pad and o_key_pad.
+ * Will be used as initialization state for final HMAC.
+ */
+ error = sha256_block_partial(NULL, sess->auth.hmac.i_key_pad,
+ partial, SHA256_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.i_key_pad, partial, SHA256_BLOCK_SIZE);
+
+ error = sha256_block_partial(NULL, sess->auth.hmac.o_key_pad,
+ partial, SHA256_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.o_key_pad, partial, SHA256_BLOCK_SIZE);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline int
+cipher_set_prerequisites(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ crypto_key_sched_t cipher_key_sched;
+
+ cipher_key_sched = sess->cipher.key_sched;
+ if (likely(cipher_key_sched != NULL)) {
+ /* Set up cipher session key */
+ cipher_key_sched(sess->cipher.key.data, xform->cipher.key.data);
+ }
+
+ return 0;
+}
+
+static int
+armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *cipher_xform,
+ const struct rte_crypto_sym_xform *auth_xform)
+{
+ enum armv8_crypto_chain_order order;
+ enum armv8_crypto_cipher_operation cop;
+ enum rte_crypto_cipher_algorithm calg;
+ enum rte_crypto_auth_algorithm aalg;
+
+ /* Validate and prepare scratch order of combined operations */
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+ order = sess->chain_order;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+ /* Select cipher direction */
+ sess->cipher.direction = cipher_xform->cipher.op;
+ /* Select cipher key */
+ sess->cipher.key.length = cipher_xform->cipher.key.length;
+ /* Set cipher direction */
+ cop = sess->cipher.direction;
+ /* Set cipher algorithm */
+ calg = cipher_xform->cipher.algo;
+
+ /* Select cipher algo */
+ switch (calg) {
+ /* Cover supported cipher algorithms */
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.algo = calg;
+ /* IV len is always 16 bytes (block size) for AES CBC */
+ sess->cipher.iv.length = 16;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+ /* Select auth generate/verify */
+ sess->auth.operation = auth_xform->auth.op;
+
+ /* Select auth algo */
+ switch (auth_xform->auth.algo) {
+ /* Cover supported hash algorithms */
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC: /* Fall through */
+ aalg = auth_xform->auth.algo;
+ sess->auth.mode = ARMV8_CRYPTO_AUTH_AS_HMAC;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ /* Set the digest length */
+ sess->auth.digest_length = auth_xform->auth.digest_length;
+
+ /* Verify supported key lengths and extract proper algorithm */
+ switch (cipher_xform->cipher.key.length << 3) {
+ case 128:
+ sess->crypto_func =
+ CRYPTO_GET_ALGO(order, cop, calg, aalg, 128);
+ sess->cipher.key_sched =
+ CRYPTO_GET_KEY_SCHED(cop, calg, 128);
+ break;
+ case 192:
+ case 256:
+ /* These key lengths are not supported yet */
+ default: /* Fall through */
+ sess->crypto_func = NULL;
+ sess->cipher.key_sched = NULL;
+ return -ENOTSUP;
+ }
+
+ if (unlikely(sess->crypto_func == NULL)) {
+ /*
+ * If we got here that means that there must be a bug
+ * in the algorithms selection above. Nevertheless keep
+ * it here to catch bug immediately and avoid NULL pointer
+ * dereference in OPs processing.
+ */
+ ARMV8_CRYPTO_LOG_ERR(
+ "No appropriate crypto function for given parameters");
+ return -EINVAL;
+ }
+
+ /* Set up cipher session prerequisites */
+ if (cipher_set_prerequisites(sess, cipher_xform) != 0)
+ return -EINVAL;
+
+ /* Set up authentication session prerequisites */
+ if (auth_set_prerequisites(sess, auth_xform) != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+armv8_crypto_set_session_parameters(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ bool is_chained_op;
+ int ret;
+
+ /* Filter out spurious/broken requests */
+ if (xform == NULL)
+ return -EINVAL;
+
+ sess->chain_order = armv8_crypto_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ is_chained_op = true;
+ break;
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ is_chained_op = true;
+ break;
+ default:
+ is_chained_op = false;
+ return -ENOTSUP;
+ }
+
+ /* Set IV offset */
+ sess->cipher.iv.offset = cipher_xform->cipher.iv.offset;
+
+ if (is_chained_op) {
+ ret = armv8_crypto_set_session_chained_parameters(sess,
+ cipher_xform, auth_xform);
+ if (unlikely(ret != 0)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "Invalid/unsupported chained (cipher/auth) parameters");
+ return ret;
+ }
+ } else {
+ ARMV8_CRYPTO_LOG_ERR("Invalid/unsupported operation");
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+/** Provide session for operation */
+static inline struct armv8_crypto_session *
+get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
+{
+ struct armv8_crypto_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ /* get existing session */
+ if (likely(op->sym->session != NULL)) {
+ sess = (struct armv8_crypto_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ }
+ } else {
+ /* provide internal session */
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct armv8_crypto_session *)_sess_private_data;
+
+ if (unlikely(armv8_crypto_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Process Operations
+ *------------------------------------------------------------------------------
+ */
+
+/*----------------------------------------------------------------------------*/
+
+/** Process cipher operation */
+static inline void
+process_armv8_chained_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
+ struct armv8_crypto_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ crypto_func_t crypto_func;
+ crypto_arg_t arg;
+ struct rte_mbuf *m_asrc, *m_adst;
+ uint8_t *csrc, *cdst;
+ uint8_t *adst, *asrc;
+ uint64_t clen, alen;
+ int error;
+
+ clen = op->sym->cipher.data.length;
+ alen = op->sym->auth.data.length;
+
+ csrc = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->cipher.data.offset);
+ cdst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ m_asrc = m_adst = mbuf_dst;
+ break;
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+ m_asrc = mbuf_src;
+ m_adst = mbuf_dst;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+ asrc = rte_pktmbuf_mtod_offset(m_asrc, uint8_t *,
+ op->sym->auth.data.offset);
+
+ switch (sess->auth.mode) {
+ case ARMV8_CRYPTO_AUTH_AS_AUTH:
+ /* Nothing to do here, just verify correct option */
+ break;
+ case ARMV8_CRYPTO_AUTH_AS_HMAC:
+ arg.digest.hmac.key = sess->auth.hmac.key;
+ arg.digest.hmac.i_key_pad = sess->auth.hmac.i_key_pad;
+ arg.digest.hmac.o_key_pad = sess->auth.hmac.o_key_pad;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ adst = op->sym->auth.digest.data;
+ if (adst == NULL) {
+ adst = rte_pktmbuf_mtod_offset(m_adst,
+ uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ }
+ } else {
+ adst = qp->temp_digest;
+ }
+
+ arg.cipher.iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->cipher.iv.offset);
+ arg.cipher.key = sess->cipher.key.data;
+ /* Acquire combined mode function */
+ crypto_func = sess->crypto_func;
+ ARMV8_CRYPTO_ASSERT(crypto_func != NULL);
+ error = crypto_func(csrc, cdst, clen, asrc, adst, alen, &arg);
+ if (error != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ if (memcmp(adst, op->sym->auth.digest.data,
+ sess->auth.digest_length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+ }
+}
+
+/** Process crypto operation for mbuf */
+static inline int
+process_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
+ struct armv8_crypto_session *sess)
+{
+ struct rte_mbuf *msrc, *mdst;
+
+ msrc = op->sym->m_src;
+ mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER: /* Fall through */
+ process_armv8_chained_op(qp, op, sess, msrc, mdst);
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct armv8_crypto_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ERROR))
+ return -1;
+
+ return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * PMD Framework
+ *------------------------------------------------------------------------------
+ */
+
+/** Enqueue burst */
+static uint16_t
+armv8_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct armv8_crypto_session *sess;
+ struct armv8_crypto_qp *qp = queue_pair;
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i,
+ NULL);
+ qp->stats.enqueued_count += retval;
+
+ return retval;
+
+enqueue_err:
+ retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i,
+ NULL);
+ if (ops[i] != NULL)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+ qp->stats.enqueue_err_count++;
+ return retval;
+}
+
+/** Dequeue burst */
+static uint16_t
+armv8_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct armv8_crypto_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)ops, nb_ops, NULL);
+ qp->stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/** Create ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct armv8_crypto_private *internals;
+
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* Check CPU for support for SHA instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SHA1) ||
+ !rte_cpu_get_flag_enabled(RTE_CPUFLAG_SHA2)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "SHA1/SHA2 instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* Check CPU for support for Advance SIMD instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "Advanced SIMD instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ ARMV8_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_armv8_crypto_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = armv8_crypto_pmd_dequeue_burst;
+ dev->enqueue_burst = armv8_crypto_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_NEON |
+ RTE_CRYPTODEV_FF_CPU_ARM_CE;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+
+init_error:
+ ARMV8_CRYPTO_LOG_ERR(
+ "driver %s: cryptodev_armv8_crypto_create failed",
+ init_params->name);
+
+ cryptodev_armv8_crypto_uninit(vdev);
+ return -EFAULT;
+}
+
+/** Initialise ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct armv8_crypto_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ return cryptodev_armv8_crypto_create(name, vdev, &init_params);
+}
+
+/** Uninitialise ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD,
+ "Closing ARMv8 crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver armv8_crypto_pmd_drv = {
+ .probe = cryptodev_armv8_crypto_init,
+ .remove = cryptodev_armv8_crypto_uninit
+};
+
+static struct cryptodev_driver armv8_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ARMV8_PMD, cryptodev_armv8_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv.driver,
+ cryptodev_driver_id);
diff --git a/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_ops.c
new file mode 100644
index 00000000..ae03117e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "armv8_crypto_defs.h"
+
+#include "rte_armv8_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities
+ armv8_crypto_pmd_capabilities[] = {
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+armv8_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+armv8_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+armv8_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+armv8_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+armv8_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+armv8_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+armv8_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct armv8_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = armv8_crypto_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ }
+}
+
+/** Release queue pair */
+static int
+armv8_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+armv8_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct armv8_crypto_qp *qp)
+{
+ unsigned int n;
+
+ n = snprintf(qp->name, sizeof(qp->name), "armv8_crypto_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+armv8_crypto_pmd_qp_create_processed_ops_ring(struct armv8_crypto_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ARMV8_CRYPTO_LOG_INFO(
+ "Reusing existing ring %s for processed ops",
+ qp->name);
+ return r;
+ }
+
+ ARMV8_CRYPTO_LOG_ERR(
+ "Unable to reuse existing ring %s for processed ops",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+
+/** Setup a queue pair */
+static int
+armv8_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct armv8_crypto_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ armv8_crypto_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ARMv8 PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (armv8_crypto_pmd_qp_set_unique_name(dev, qp) != 0)
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = armv8_crypto_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure */
+static unsigned
+armv8_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct armv8_crypto_session);
+}
+
+/** Configure the session from a crypto xform chain */
+static int
+armv8_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ ARMV8_CRYPTO_LOG_ERR("invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = armv8_crypto_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ ARMV8_CRYPTO_LOG_ERR("failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+armv8_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct armv8_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops armv8_crypto_pmd_ops = {
+ .dev_configure = armv8_crypto_pmd_config,
+ .dev_start = armv8_crypto_pmd_start,
+ .dev_stop = armv8_crypto_pmd_stop,
+ .dev_close = armv8_crypto_pmd_close,
+
+ .stats_get = armv8_crypto_pmd_stats_get,
+ .stats_reset = armv8_crypto_pmd_stats_reset,
+
+ .dev_infos_get = armv8_crypto_pmd_info_get,
+
+ .queue_pair_setup = armv8_crypto_pmd_qp_setup,
+ .queue_pair_release = armv8_crypto_pmd_qp_release,
+ .queue_pair_count = armv8_crypto_pmd_qp_count,
+
+ .sym_session_get_size = armv8_crypto_pmd_sym_session_get_size,
+ .sym_session_configure = armv8_crypto_pmd_sym_session_configure,
+ .sym_session_clear = armv8_crypto_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops = &armv8_crypto_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_private.h b/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_private.h
new file mode 100644
index 00000000..7feb021d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _RTE_ARMV8_PMD_PRIVATE_H_
+#define _RTE_ARMV8_PMD_PRIVATE_H_
+
+#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
+/**< ARMv8 Crypto PMD device name */
+
+#define ARMV8_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_ARMV8_CRYPTO_DEBUG
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_ASSERT(con) \
+do { \
+ if (!(con)) { \
+ rte_panic("%s(): " \
+ con "condition failed, line %u", __func__); \
+ } \
+} while (0)
+
+#else
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...)
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...)
+#define ARMV8_CRYPTO_ASSERT(con)
+#endif
+
+#define NBBY 8 /* Number of bits in a byte */
+#define BYTE_LENGTH(x) ((x) / NBBY) /* Number of bytes in x (round down) */
+
+/* Maximum length for digest (SHA-256 needs 32 bytes) */
+#define DIGEST_LENGTH_MAX 32
+
+/** ARMv8 operation order mode enumerator */
+enum armv8_crypto_chain_order {
+ ARMV8_CRYPTO_CHAIN_CIPHER_AUTH,
+ ARMV8_CRYPTO_CHAIN_AUTH_CIPHER,
+ ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CHAIN_LIST_END = ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED
+};
+
+/** ARMv8 cipher operation enumerator */
+enum armv8_crypto_cipher_operation {
+ ARMV8_CRYPTO_CIPHER_OP_ENCRYPT = RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ ARMV8_CRYPTO_CIPHER_OP_DECRYPT = RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CIPHER_OP_LIST_END = ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED
+};
+
+enum armv8_crypto_cipher_keylen {
+ ARMV8_CRYPTO_CIPHER_KEYLEN_128,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_192,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_256,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END =
+ ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED
+};
+
+/** ARMv8 auth mode enumerator */
+enum armv8_crypto_auth_mode {
+ ARMV8_CRYPTO_AUTH_AS_AUTH,
+ ARMV8_CRYPTO_AUTH_AS_HMAC,
+ ARMV8_CRYPTO_AUTH_AS_CIPHER,
+ ARMV8_CRYPTO_AUTH_NOT_SUPPORTED,
+ ARMV8_CRYPTO_AUTH_LIST_END = ARMV8_CRYPTO_AUTH_NOT_SUPPORTED
+};
+
+#define CRYPTO_ORDER_MAX ARMV8_CRYPTO_CHAIN_LIST_END
+#define CRYPTO_CIPHER_OP_MAX ARMV8_CRYPTO_CIPHER_OP_LIST_END
+#define CRYPTO_CIPHER_KEYLEN_MAX ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END
+#define CRYPTO_CIPHER_MAX RTE_CRYPTO_CIPHER_LIST_END
+#define CRYPTO_AUTH_MAX RTE_CRYPTO_AUTH_LIST_END
+
+#define HMAC_IPAD_VALUE (0x36)
+#define HMAC_OPAD_VALUE (0x5C)
+
+#define SHA256_AUTH_KEY_LENGTH (BYTE_LENGTH(256))
+#define SHA256_BLOCK_SIZE (BYTE_LENGTH(512))
+
+#define SHA1_AUTH_KEY_LENGTH (BYTE_LENGTH(160))
+#define SHA1_BLOCK_SIZE (BYTE_LENGTH(512))
+
+#define SHA_AUTH_KEY_MAX SHA256_AUTH_KEY_LENGTH
+#define SHA_BLOCK_MAX SHA256_BLOCK_SIZE
+
+typedef int (*crypto_func_t)(uint8_t *, uint8_t *, uint64_t,
+ uint8_t *, uint8_t *, uint64_t,
+ crypto_arg_t *);
+
+typedef void (*crypto_key_sched_t)(uint8_t *, const uint8_t *);
+
+/** private data structure for each ARMv8 crypto device */
+struct armv8_crypto_private {
+ unsigned int max_nb_qpairs;
+ /**< Max number of queue pairs */
+};
+
+/** ARMv8 crypto queue pair */
+struct armv8_crypto_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+/** ARMv8 crypto private session structure */
+struct armv8_crypto_session {
+ enum armv8_crypto_chain_order chain_order;
+ /**< chain order mode */
+ crypto_func_t crypto_func;
+ /**< cryptographic function to use for this session */
+
+ /** Cipher Parameters */
+ struct {
+ enum rte_crypto_cipher_operation direction;
+ /**< cipher operation direction */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< cipher algorithm */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+
+ struct {
+ uint8_t data[256];
+ /**< key data */
+ size_t length;
+ /**< key length in bytes */
+ } key;
+
+ crypto_key_sched_t key_sched;
+ /**< Key schedule function */
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ enum armv8_crypto_auth_mode mode;
+ /**< auth operation mode */
+
+ union {
+ struct {
+ /* Add data if needed */
+ } auth;
+
+ struct {
+ uint8_t i_key_pad[SHA_BLOCK_MAX]
+ __rte_cache_aligned;
+ /**< inner pad (max supported block length) */
+ uint8_t o_key_pad[SHA_BLOCK_MAX]
+ __rte_cache_aligned;
+ /**< outer pad (max supported block length) */
+ uint8_t key[SHA_BLOCK_MAX];
+ /**< HMAC key (max supported block length)*/
+ } hmac;
+ };
+ uint16_t digest_length;
+ /* Digest length */
+ } auth;
+
+} __rte_cache_aligned;
+
+/** Set and validate ARMv8 crypto session parameters */
+extern int armv8_crypto_set_session_parameters(
+ struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops;
+
+#endif /* _RTE_ARMV8_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/armv8/rte_pmd_armv8_version.map b/src/spdk/dpdk/drivers/crypto/armv8/rte_pmd_armv8_version.map
new file mode 100644
index 00000000..1f84b68a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/armv8/rte_pmd_armv8_version.map
@@ -0,0 +1,3 @@
+DPDK_17.02 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/Makefile b/src/spdk/dpdk/drivers/crypto/ccp/Makefile
new file mode 100644
index 00000000..f51d170f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.c b/src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 00000000..19ae9153
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,2951 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <openssl/sha.h>
+#include <openssl/cmac.h> /*sub key apis*/
+#include <openssl/evp.h> /*sub key apis*/
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA1_H4, SHA1_H3,
+ SHA1_H2, SHA1_H1,
+ SHA1_H0, 0x0U,
+ 0x0U, 0x0U,
+};
+
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA224_H7, SHA224_H6,
+ SHA224_H5, SHA224_H4,
+ SHA224_H3, SHA224_H2,
+ SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA256_H7, SHA256_H6,
+ SHA256_H5, SHA256_H4,
+ SHA256_H3, SHA256_H2,
+ SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA384_H7, SHA384_H6,
+ SHA384_H5, SHA384_H4,
+ SHA384_H3, SHA384_H2,
+ SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA512_H7, SHA512_H6,
+ SHA512_H5, SHA512_H4,
+ SHA512_H3, SHA512_H2,
+ SHA512_H1, SHA512_H0,
+};
+
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+ (((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+ uint64_t saved;
+ /**
+ * The portion of the input message that we
+ * didn't consume yet
+ */
+ union {
+ uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+ /* Keccak's state */
+ uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+ /**total 200 ctx size**/
+ };
+ unsigned int byteIndex;
+ /**
+ * 0..7--the next byte after the set one
+ * (starts from 0; 0--none are buffered)
+ */
+ unsigned int wordIndex;
+ /**
+ * 0..24--the next word to integrate input
+ * (starts from 0)
+ */
+ unsigned int capacityWords;
+ /**
+ * the double size of the hash output in
+ * words (e.g. 16 for Keccak 512)
+ */
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+ (((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+ SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
+ SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
+ SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
+ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
+ SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
+ SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
+ SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
+ SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
+ SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
+ SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
+ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
+ SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+ 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+ 18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+ 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+ 14, 22, 9, 6, 1
+};
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+ if (xform == NULL)
+ return res;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return CCP_CMD_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return CCP_CMD_HASH_CIPHER;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return CCP_CMD_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return CCP_CMD_CIPHER_HASH;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ return CCP_CMD_COMBINED;
+ return res;
+}
+
+/* partial hash using openssl */
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA_CTX ctx;
+
+ if (!SHA1_Init(&ctx))
+ return -EFAULT;
+ SHA1_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA224_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA256_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA384_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA512_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static void
+keccakf(uint64_t s[25])
+{
+ int i, j, round;
+ uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+ for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+ /* Theta */
+ for (i = 0; i < 5; i++)
+ bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+ s[i + 20];
+
+ for (i = 0; i < 5; i++) {
+ t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+ for (j = 0; j < 25; j += 5)
+ s[j + i] ^= t;
+ }
+
+ /* Rho Pi */
+ t = s[1];
+ for (i = 0; i < 24; i++) {
+ j = keccakf_piln[i];
+ bc[0] = s[j];
+ s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+ t = bc[0];
+ }
+
+ /* Chi */
+ for (j = 0; j < 25; j += 5) {
+ for (i = 0; i < 5; i++)
+ bc[i] = s[j + i];
+ for (i = 0; i < 5; i++)
+ s[j + i] ^= (~bc[(i + 1) % 5]) &
+ bc[(i + 2) % 5];
+ }
+
+ /* Iota */
+ s[0] ^= keccakf_rndc[round];
+ }
+}
+
+static void
+sha3_Init224(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init384(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init512(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
+}
+
+
+/* This is simply the 'update' with the padding block.
+ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
+ * bytes are always present, but they can be the same byte.
+ */
+static void
+sha3_Update(void *priv, void const *bufIn, size_t len)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+ unsigned int old_tail = (8 - ctx->byteIndex) & 7;
+ size_t words;
+ unsigned int tail;
+ size_t i;
+ const uint8_t *buf = bufIn;
+
+ if (len < old_tail) {
+ while (len--)
+ ctx->saved |= (uint64_t) (*(buf++)) <<
+ ((ctx->byteIndex++) * 8);
+ return;
+ }
+
+ if (old_tail) {
+ len -= old_tail;
+ while (old_tail--)
+ ctx->saved |= (uint64_t) (*(buf++)) <<
+ ((ctx->byteIndex++) * 8);
+
+ ctx->s[ctx->wordIndex] ^= ctx->saved;
+ ctx->byteIndex = 0;
+ ctx->saved = 0;
+ if (++ctx->wordIndex ==
+ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+ keccakf(ctx->s);
+ ctx->wordIndex = 0;
+ }
+ }
+
+ words = len / sizeof(uint64_t);
+ tail = len - words * sizeof(uint64_t);
+
+ for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
+ const uint64_t t = (uint64_t) (buf[0]) |
+ ((uint64_t) (buf[1]) << 8 * 1) |
+ ((uint64_t) (buf[2]) << 8 * 2) |
+ ((uint64_t) (buf[3]) << 8 * 3) |
+ ((uint64_t) (buf[4]) << 8 * 4) |
+ ((uint64_t) (buf[5]) << 8 * 5) |
+ ((uint64_t) (buf[6]) << 8 * 6) |
+ ((uint64_t) (buf[7]) << 8 * 7);
+ ctx->s[ctx->wordIndex] ^= t;
+ if (++ctx->wordIndex ==
+ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+ keccakf(ctx->s);
+ ctx->wordIndex = 0;
+ }
+ }
+
+ while (tail--)
+ ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
+}
+
+int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init224(ctx);
+ sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init256(ctx);
+ sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init384(ctx);
+ sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init512(ctx);
+ sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+ uint8_t ipad[sess->auth.block_size];
+ uint8_t opad[sess->auth.block_size];
+ uint8_t *ipad_t, *opad_t;
+ uint32_t *hash_value_be32, hash_temp32[8];
+ uint64_t *hash_value_be64, hash_temp64[8];
+ int i, count;
+ uint8_t *hash_value_sha3;
+
+ opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
+
+ /* considering key size is always equal to block size of algorithm */
+ for (i = 0; i < sess->auth.block_size; i++) {
+ ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+ opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+ }
+
+ switch (sess->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ count = SHA1_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_224(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_224(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_256(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_256(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+ if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_384(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_384(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+ if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_512(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_512(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ default:
+ CCP_LOG_ERR("Invalid auth algo");
+ return -1;
+ }
+}
+
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+ int i;
+ /* Shift block to left, including carry */
+ for (i = 0; i < bl; i++) {
+ k[i] = l[i] << 1;
+ if (i < bl - 1 && l[i + 1] & 0x80)
+ k[i] |= 1;
+ }
+ /* If MSB set fixup with R */
+ if (l[0] & 0x80)
+ k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/* subkeys K1 and K2 generation for CMAC */
+static int
+generate_cmac_subkeys(struct ccp_session *sess)
+{
+ const EVP_CIPHER *algo;
+ EVP_CIPHER_CTX *ctx;
+ unsigned char *ccp_ctx;
+ size_t i;
+ int dstlen, totlen;
+ unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+ unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+ unsigned char k1[AES_BLOCK_SIZE] = {0};
+ unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+ if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+ algo = EVP_aes_128_cbc();
+ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+ algo = EVP_aes_192_cbc();
+ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+ algo = EVP_aes_256_cbc();
+ else {
+ CCP_LOG_ERR("Invalid CMAC type length");
+ return -1;
+ }
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx) {
+ CCP_LOG_ERR("ctx creation failed");
+ return -1;
+ }
+ if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+ (unsigned char *)zero_iv) <= 0)
+ goto key_generate_err;
+ if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+ goto key_generate_err;
+ if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+ AES_BLOCK_SIZE) <= 0)
+ goto key_generate_err;
+ if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+ goto key_generate_err;
+
+ memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+ ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+ prepare_key(k1, dst, AES_BLOCK_SIZE);
+ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
+ *ccp_ctx = k1[i];
+
+ ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+ (2 * CCP_SB_BYTES) - 1);
+ prepare_key(k2, k1, AES_BLOCK_SIZE);
+ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
+ *ccp_ctx = k2[i];
+
+ EVP_CIPHER_CTX_free(ctx);
+
+ return 0;
+
+key_generate_err:
+ CCP_LOG_ERR("CMAC Init failed");
+ return -1;
+}
+
+/* configure session */
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ size_t i, j, x;
+
+ cipher_xform = &xform->cipher;
+
+ /* set cipher direction */
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+ else
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+ /* set cipher key */
+ sess->cipher.key_length = cipher_xform->key.length;
+ rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+ cipher_xform->key.length);
+
+ /* set iv parameters */
+ sess->iv.offset = cipher_xform->iv.offset;
+ sess->iv.length = cipher_xform->iv.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+ sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_3DES;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo");
+ return -1;
+ }
+
+
+ switch (sess->cipher.engine) {
+ case CCP_ENGINE_AES:
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->cipher.key_length == 32)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid cipher key length");
+ return -1;
+ }
+ for (i = 0; i < sess->cipher.key_length ; i++)
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ break;
+ case CCP_ENGINE_3DES:
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+ else {
+ CCP_LOG_ERR("Invalid cipher key length");
+ return -1;
+ }
+ for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+ for (i = 0; i < 8; i++)
+ sess->cipher.key_ccp[(8 + x) - i - 1] =
+ sess->cipher.key[i + x];
+ break;
+ default:
+ CCP_LOG_ERR("Invalid CCP Engine");
+ return -ENOTSUP;
+ }
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+}
+
+static int
+ccp_configure_session_auth(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_auth_xform *auth_xform = NULL;
+ size_t i;
+
+ auth_xform = &xform->auth;
+
+ sess->auth.digest_length = auth_xform->digest_length;
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->auth.op = CCP_AUTH_OP_GENERATE;
+ else
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ if (sess->auth_opt) {
+ sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ MD5_DIGEST_SIZE);
+ sess->auth.key_length = auth_xform->key.length;
+ sess->auth.block_size = MD5_BLOCK_SIZE;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else
+ return -1; /* HMAC MD5 not supported on CCP */
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+ sess->auth.ctx = (void *)ccp_sha1_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+ sess->auth.ctx = (void *)ccp_sha224_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+ if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+ sess->auth.ctx = (void *)ccp_sha256_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+ if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+ sess->auth.ctx = (void *)ccp_sha384_init;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA384_DIGEST_SIZE);
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA384_DIGEST_SIZE);
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+ if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+ sess->auth.ctx = (void *)ccp_sha512_init;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA512_DIGEST_SIZE);
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA512_DIGEST_SIZE);
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+ if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+ sess->auth.key_length = auth_xform->key.length;
+ /* padding and hash result */
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = AES_BLOCK_SIZE;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+ if (sess->auth.key_length == 16)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->auth.key_length == 24)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->auth.key_length == 32)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid CMAC key length");
+ return -1;
+ }
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ sess->auth.key_length);
+ for (i = 0; i < sess->auth.key_length; i++)
+ sess->auth.key_ccp[sess->auth.key_length - i - 1] =
+ sess->auth.key[i];
+ if (generate_cmac_subkeys(sess))
+ return -1;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported hash algo");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int
+ccp_configure_session_aead(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_aead_xform *aead_xform = NULL;
+ size_t i;
+
+ aead_xform = &xform->aead;
+
+ sess->cipher.key_length = aead_xform->key.length;
+ rte_memcpy(sess->cipher.key, aead_xform->key.data,
+ aead_xform->key.length);
+
+ if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+ sess->auth.op = CCP_AUTH_OP_GENERATE;
+ } else {
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ }
+ sess->aead_algo = aead_xform->algo;
+ sess->auth.aad_length = aead_xform->aad_length;
+ sess->auth.digest_length = aead_xform->digest_length;
+
+ /* set iv parameters */
+ sess->iv.offset = aead_xform->iv.offset;
+ sess->iv.length = aead_xform->iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->cipher.key_length == 32)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid aead key length");
+ return -1;
+ }
+ for (i = 0; i < sess->cipher.key_length; i++)
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = 0;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+ sess->cmd_id = CCP_CMD_COMBINED;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo");
+ return -ENOTSUP;
+ }
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+}
+
+int
+ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform,
+ struct ccp_private *internals)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+ int ret = 0;
+
+ sess->auth_opt = internals->auth_opt;
+ sess->cmd_id = ccp_get_cmd_id(xform);
+
+ switch (sess->cmd_id) {
+ case CCP_CMD_CIPHER:
+ cipher_xform = xform;
+ break;
+ case CCP_CMD_AUTH:
+ auth_xform = xform;
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case CCP_CMD_COMBINED:
+ aead_xform = xform;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ return -1;
+ }
+
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+ if (cipher_xform) {
+ ret = ccp_configure_session_cipher(sess, cipher_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported cipher parameters");
+ return ret;
+ }
+ }
+ if (auth_xform) {
+ ret = ccp_configure_session_auth(sess, auth_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported auth parameters");
+ return ret;
+ }
+ }
+ if (aead_xform) {
+ ret = ccp_configure_session_aead(sess, aead_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported aead parameters");
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cipher.algo) {
+ case CCP_CIPHER_ALGO_AES_CBC:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ case CCP_CIPHER_ALGO_AES_ECB:
+ count = 1;
+ /**<only op*/
+ break;
+ case CCP_CIPHER_ALGO_AES_CTR:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ case CCP_CIPHER_ALGO_3DES_CBC:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ }
+ return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
+ case CCP_AUTH_ALGO_SHA224:
+ case CCP_AUTH_ALGO_SHA256:
+ case CCP_AUTH_ALGO_SHA384:
+ case CCP_AUTH_ALGO_SHA512:
+ count = 3;
+ /**< op + lsb passthrough cpy to/from*/
+ break;
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ if (session->auth_opt == 0)
+ count = 6;
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ /**
+ * 1. Load PHash1 = H(k ^ ipad); to LSB
+ * 2. generate IHash = H(hash on meassage with PHash1
+ * as init values);
+ * 3. Retrieve IHash 2 slots for 384/512
+ * 4. Load Phash2 = H(k ^ opad); to LSB
+ * 5. generate FHash = H(hash on Ihash with Phash2
+ * as init value);
+ * 6. Retrieve HMAC output from LSB to host memory
+ */
+ if (session->auth_opt == 0)
+ count = 7;
+ break;
+ case CCP_AUTH_ALGO_SHA3_224:
+ case CCP_AUTH_ALGO_SHA3_256:
+ case CCP_AUTH_ALGO_SHA3_384:
+ case CCP_AUTH_ALGO_SHA3_512:
+ count = 1;
+ /**< only op ctx and dst in host memory*/
+ break;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ count = 3;
+ break;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ count = 4;
+ /**
+ * 1. Op to Perform Ihash
+ * 2. Retrieve result from LSB to host memory
+ * 3. Perform final hash
+ */
+ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ count = 4;
+ /**
+ * op
+ * extra descriptor in padding case
+ * (k1/k2(255:128) with iv(127:0))
+ * Retrieve result
+ */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ }
+
+ return count;
+}
+
+static int
+ccp_aead_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->aead_algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ }
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_AES_GCM:
+ count = 5;
+ /**
+ * 1. Passthru iv
+ * 2. Hash AAD
+ * 3. GCTR
+ * 4. Reload passthru
+ * 5. Hash Final tag
+ */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+ session->auth.algo);
+ }
+ return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ count = ccp_cipher_slot(session);
+ break;
+ case CCP_CMD_AUTH:
+ count = ccp_auth_slot(session);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ case CCP_CMD_HASH_CIPHER:
+ count = ccp_cipher_slot(session);
+ count += ccp_auth_slot(session);
+ break;
+ case CCP_CMD_COMBINED:
+ count = ccp_aead_slot(session);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+
+ }
+
+ return count;
+}
+
+static uint8_t
+algo_select(int sessalgo,
+ const EVP_MD **algo)
+{
+ int res = 0;
+
+ switch (sessalgo) {
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ *algo = EVP_md5();
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ *algo = EVP_sha1();
+ break;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ *algo = EVP_sha224();
+ break;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ *algo = EVP_sha256();
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ *algo = EVP_sha384();
+ break;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ *algo = EVP_sha512();
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ return res;
+}
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+ __rte_unused uint8_t *iv,
+ EVP_PKEY *pkey,
+ int srclen,
+ EVP_MD_CTX *ctx,
+ const EVP_MD *algo,
+ uint16_t d_len)
+{
+ size_t dstlen;
+ unsigned char temp_dst[64];
+
+ if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+ goto process_auth_err;
+
+ memcpy(dst, temp_dst, d_len);
+ return 0;
+process_auth_err:
+ CCP_LOG_ERR("Process cpu auth failed");
+ return -EINVAL;
+}
+
+static int cpu_crypto_auth(struct ccp_qp *qp,
+ struct rte_crypto_op *op,
+ struct ccp_session *sess,
+ EVP_MD_CTX *ctx)
+{
+ uint8_t *src, *dst;
+ int srclen, status;
+ struct rte_mbuf *mbuf_src, *mbuf_dst;
+ const EVP_MD *algo = NULL;
+ EVP_PKEY *pkey;
+
+ algo_select(sess->auth.algo, &algo);
+ pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+ sess->auth.key_length);
+ mbuf_src = op->sym->m_src;
+ mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+ srclen = op->sym->auth.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ dst = qp->temp_digest;
+ } else {
+ dst = op->sym->auth.digest.data;
+ if (dst == NULL) {
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ sess->auth.digest_length);
+ }
+ }
+ status = process_cpu_auth_hmac(src, dst, NULL,
+ pkey, srclen,
+ ctx,
+ algo,
+ sess->auth.digest_length);
+ if (status) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return status;
+ }
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(dst, op->sym->auth.digest.data,
+ sess->auth.digest_length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ EVP_PKEY_free(pkey);
+ return 0;
+}
+
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_desc *desc;
+ union ccp_function function;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 0;
+ CCP_CMD_EOM(desc) = 0;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+ CCP_PT_BITWISE(&function) = pst->bit_mod;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = pst->len;
+
+ if (pst->dir) {
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+ CCP_CMD_DST_HI(desc) = 0;
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+ if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+ } else {
+
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+ CCP_CMD_SRC_HI(desc) = 0;
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ }
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+}
+
+static int
+ccp_perform_hmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, dest_addr_t;
+ struct ccp_passthru pst;
+ uint64_t auth_msg_bits;
+ void *append_ptr;
+ uint8_t *addr;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ addr = session->auth.pre_compute;
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+ dest_addr_t = dest_addr;
+
+ /** Load PHash1 to LSB*/
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**sha engine command descriptor for IntermediateHash*/
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ auth_msg_bits = (op->sym->auth.data.length +
+ session->auth.block_size) * 8;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Intermediate Hash value retrieve */
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
+
+ pst.src_addr =
+ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ } else {
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ }
+
+ /** Load PHash2 to LSB*/
+ addr += session->auth.ctx_len;
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**sha engine command descriptor for FinalHash*/
+ dest_addr_t += session->auth.offset;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = (session->auth.ctx_len -
+ session->auth.offset);
+ auth_msg_bits = (session->auth.block_size +
+ session->auth.ctx_len -
+ session->auth.offset) * 8;
+
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Retrieve hmac output */
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+
+}
+
+static int
+ccp_perform_sha(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr;
+ struct ccp_passthru pst;
+ void *append_ptr;
+ uint64_t auth_msg_bits;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+
+ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+
+ /** Passthru sha context*/
+
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+ session->auth.ctx);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**prepare sha command descriptor*/
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ auth_msg_bits = op->sym->auth.data.length * 8;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Hash value retrieve */
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+
+}
+
+static int
+ccp_perform_sha3_hmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ struct ccp_passthru pst;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint8_t *append_ptr;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ if (!append_ptr) {
+ CCP_LOG_ERR("CCP MBUF append failed\n");
+ return -1;
+ }
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+ *)session->auth.pre_compute);
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /*desc1 for SHA3-Ihash operation */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
+ CCP_CMD_DST_HI(desc) = 0;
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Intermediate Hash value retrieve */
+ if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
+
+ pst.src_addr =
+ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ } else {
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ }
+
+ /**sha engine command descriptor for FinalHash*/
+ ctx_paddr += CCP_SHA3_CTX_SIZE;
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
+ dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
+ CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
+ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
+ CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
+ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
+ dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
+ CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
+ } else {
+ CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
+ }
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_sha3(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint8_t *ctx_addr, *append_ptr;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, ctx_paddr;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ if (!append_ptr) {
+ CCP_LOG_ERR("CCP MBUF append failed\n");
+ return -1;
+ }
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ ctx_addr = session->auth.sha3_ctx;
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for SHA3 operation */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes_cmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint8_t *src_tb, *append_ptr, *ctx_addr;
+ phys_addr_t src_addr, dest_addr, key_addr;
+ int length, non_align_len;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
+ CCP_AES_MODE(&function) = session->auth.um.aes_mode;
+ CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
+
+ if (op->sym->auth.data.length % session->auth.block_size == 0) {
+
+ ctx_addr = session->auth.pre_compute;
+ memset(ctx_addr, 0, AES_BLOCK_SIZE);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for aes-cmac command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail =
+ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+ } else {
+ ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
+ memset(ctx_addr, 0, AES_BLOCK_SIZE);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
+ length *= AES_BLOCK_SIZE;
+ non_align_len = op->sym->auth.data.length - length;
+ /* prepare desc for aes-cmac command */
+ /*Command 1*/
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = length;
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ /*Command 2*/
+ append_ptr = append_ptr + CCP_SB_BYTES;
+ memset(append_ptr, 0, AES_BLOCK_SIZE);
+ src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
+ uint8_t *,
+ op->sym->auth.data.offset +
+ length);
+ rte_memcpy(append_ptr, src_tb, non_align_len);
+ append_ptr[non_align_len] = CMAC_PAD_VALUE;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail =
+ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+ }
+ /* Retrieve result */
+ pst.dest_addr = dest_addr;
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ uint8_t *lsb_buf;
+ struct ccp_passthru pst = {0};
+ struct ccp_desc *desc;
+ phys_addr_t src_addr, dest_addr, key_addr;
+ uint8_t *iv;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ function.raw = 0;
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
+ if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
+ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
+ iv, session->iv.length);
+ pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
+ CCP_AES_SIZE(&function) = 0x1F;
+ } else {
+ lsb_buf =
+ &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+ rte_memcpy(lsb_buf +
+ (CCP_SB_BYTES - session->iv.length),
+ iv, session->iv.length);
+ pst.src_addr = b_info->lsb_buf_phys +
+ (b_info->lsb_buf_idx * CCP_SB_BYTES);
+ b_info->lsb_buf_idx++;
+ }
+
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ }
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (likely(op->sym->m_dst != NULL))
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ else
+ dest_addr = src_addr;
+ key_addr = session->cipher.key_phys;
+
+ /* prepare desc for aes command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ unsigned char *lsb_buf;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint8_t *iv;
+ phys_addr_t src_addr, dest_addr, key_addr;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ switch (session->cipher.um.des_mode) {
+ case CCP_DES_MODE_CBC:
+ lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+ b_info->lsb_buf_idx++;
+
+ rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+ iv, session->iv.length);
+
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ break;
+ case CCP_DES_MODE_CFB:
+ case CCP_DES_MODE_ECB:
+ CCP_LOG_ERR("Unsupported DES cipher mode");
+ return -ENOTSUP;
+ }
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (unlikely(op->sym->m_dst != NULL))
+ dest_addr =
+ rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ else
+ dest_addr = src_addr;
+
+ key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for des command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_DES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_DES_MODE(&function) = session->cipher.um.des_mode;
+ CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (session->cipher.um.des_mode)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ /* Write the new tail address back to the queue register */
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ uint8_t *iv;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint64_t *temp;
+ phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+ phys_addr_t digest_dest_addr;
+ int length, non_align_len;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ key_addr = session->cipher.key_phys;
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->aead.data.offset);
+ if (unlikely(op->sym->m_dst != NULL))
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->aead.data.offset);
+ else
+ dest_addr = src_addr;
+ rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
+ digest_dest_addr = op->sym->aead.digest.phys_addr;
+ temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
+ *temp++ = rte_bswap64(session->auth.aad_length << 3);
+ *temp = rte_bswap64(op->sym->aead.data.length << 3);
+
+ non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
+ length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
+
+ aad_addr = op->sym->aead.aad.phys_addr;
+
+ /* CMD1 IV Passthru */
+ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
+ session->iv.length);
+ pst.src_addr = session->cipher.nonce_phys;
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /* CMD2 GHASH-AAD */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = session->auth.aad_length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* CMD3 : GCTR Plain text */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+ if (non_align_len == 0)
+ CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
+ else
+ CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
+
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* CMD4 : PT to copy IV */
+ pst.src_addr = session->cipher.nonce_phys;
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = AES_BLOCK_SIZE;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /* CMD5 : GHASH-Final */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ /* Last block (AAD_len || PT_len)*/
+ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
+ CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->cipher.algo) {
+ case CCP_CIPHER_ALGO_AES_CBC:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ case CCP_CIPHER_ALGO_AES_CTR:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ case CCP_CIPHER_ALGO_AES_ECB:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 1;
+ break;
+ case CCP_CIPHER_ALGO_3DES_CBC:
+ result = ccp_perform_3des(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
+ case CCP_AUTH_ALGO_SHA224:
+ case CCP_AUTH_ALGO_SHA256:
+ case CCP_AUTH_ALGO_SHA384:
+ case CCP_AUTH_ALGO_SHA512:
+ result = ccp_perform_sha(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ if (session->auth_opt == 0)
+ result = -1;
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ if (session->auth_opt == 0) {
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 6;
+ }
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ if (session->auth_opt == 0) {
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 7;
+ }
+ break;
+ case CCP_AUTH_ALGO_SHA3_224:
+ case CCP_AUTH_ALGO_SHA3_256:
+ case CCP_AUTH_ALGO_SHA3_384:
+ case CCP_AUTH_ALGO_SHA3_512:
+ result = ccp_perform_sha3(op, cmd_q);
+ b_info->desccnt += 1;
+ break;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ result = ccp_perform_sha3_hmac(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ result = ccp_perform_sha3_hmac(op, cmd_q);
+ b_info->desccnt += 4;
+ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ result = ccp_perform_aes_cmac(op, cmd_q);
+ b_info->desccnt += 4;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ return -ENOTSUP;
+ }
+
+ return result;
+}
+
+static inline int
+ccp_crypto_aead(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_AES_GCM:
+ if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
+ CCP_LOG_ERR("Incorrect chain order");
+ return -1;
+ }
+ result = ccp_perform_aes_gcm(op, cmd_q);
+ b_info->desccnt += 5;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+int
+process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req)
+{
+ int i, result = 0;
+ struct ccp_batch_info *b_info;
+ struct ccp_session *session;
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+ CCP_LOG_ERR("batch info allocation failed");
+ return 0;
+ }
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ b_info->auth_ctr = 0;
+
+ /* populate batch info necessary for dequeue */
+ b_info->op_idx = 0;
+ b_info->lsb_buf_idx = 0;
+ b_info->desccnt = 0;
+ b_info->cmd_q = cmd_q;
+ b_info->lsb_buf_phys =
+ (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+ rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+ b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+ for (i = 0; i < nb_ops; i++) {
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_AUTH:
+ if (session->auth_opt) {
+ b_info->auth_ctr++;
+ result = cpu_crypto_auth(qp, op[i],
+ session, auth_ctx);
+ } else
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ if (result)
+ break;
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ if (session->auth_opt) {
+ result = cpu_crypto_auth(qp, op[i],
+ session, auth_ctx);
+ if (op[i]->status !=
+ RTE_CRYPTO_OP_STATUS_SUCCESS)
+ continue;
+ } else
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+
+ if (result)
+ break;
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_COMBINED:
+ result = ccp_crypto_aead(op[i], cmd_q, b_info);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ result = -1;
+ }
+ if (unlikely(result < 0)) {
+ rte_atomic64_add(&b_info->cmd_q->free_slots,
+ (slots_req - b_info->desccnt));
+ break;
+ }
+ b_info->op[i] = op[i];
+ }
+
+ b_info->opcnt = i;
+ b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+
+ rte_wmb();
+ /* Write the new tail address back to the queue register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ b_info->tail_offset);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+ EVP_MD_CTX_destroy(auth_ctx);
+ return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+ struct ccp_session *session;
+ uint8_t *digest_data, *addr;
+ struct rte_mbuf *m_last;
+ int offset, digest_offset;
+ uint8_t digest_le[64];
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ if (session->cmd_id == CCP_CMD_COMBINED) {
+ digest_data = op->sym->aead.digest.data;
+ digest_offset = op->sym->aead.data.offset +
+ op->sym->aead.data.length;
+ } else {
+ digest_data = op->sym->auth.digest.data;
+ digest_offset = op->sym->auth.data.offset +
+ op->sym->auth.data.length;
+ }
+ m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+ addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+ m_last->data_len - session->auth.ctx_len);
+
+ rte_mb();
+ offset = session->auth.offset;
+
+ if (session->auth.engine == CCP_ENGINE_SHA)
+ if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+ /* All other algorithms require byte
+ * swap done by host
+ */
+ unsigned int i;
+
+ offset = session->auth.ctx_len -
+ session->auth.offset - 1;
+ for (i = 0; i < session->auth.digest_length; i++)
+ digest_le[i] = addr[offset - i];
+ offset = 0;
+ addr = digest_le;
+ }
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(addr + offset, digest_data,
+ session->auth.digest_length) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ } else {
+ if (unlikely(digest_data == 0))
+ digest_data = rte_pktmbuf_mtod_offset(
+ op->sym->m_dst, uint8_t *,
+ digest_offset);
+ rte_memcpy(digest_data, addr + offset,
+ session->auth.digest_length);
+ }
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(op->sym->m_src,
+ session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct ccp_qp *qp,
+ struct rte_crypto_op **op_d,
+ struct ccp_batch_info *b_info,
+ uint16_t nb_ops)
+{
+ int i, min_ops;
+ struct ccp_session *session;
+
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+ for (i = 0; i < min_ops; i++) {
+ op_d[i] = b_info->op[b_info->op_idx++];
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op_d[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case CCP_CMD_AUTH:
+ if (session->auth_opt == 0)
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ if (session->auth_opt)
+ cpu_crypto_auth(qp, op_d[i],
+ session, auth_ctx);
+ else
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ if (session->auth_opt)
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ else
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_COMBINED:
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ }
+ }
+
+ EVP_MD_CTX_destroy(auth_ctx);
+ b_info->opcnt -= min_ops;
+ return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops)
+{
+ struct ccp_batch_info *b_info;
+ uint32_t cur_head_offset;
+
+ if (qp->b_info != NULL) {
+ b_info = qp->b_info;
+ if (unlikely(b_info->op_idx > 0))
+ goto success;
+ } else if (rte_ring_dequeue(qp->processed_pkts,
+ (void **)&b_info))
+ return 0;
+
+ if (b_info->auth_ctr == b_info->opcnt)
+ goto success;
+ cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+ CMD_Q_HEAD_LO_BASE);
+
+ if (b_info->head_offset < b_info->tail_offset) {
+ if ((cur_head_offset >= b_info->head_offset) &&
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ } else {
+ if ((cur_head_offset >= b_info->head_offset) ||
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ }
+
+
+success:
+ nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops);
+ rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+ b_info->desccnt = 0;
+ if (b_info->opcnt > 0) {
+ qp->b_info = b_info;
+ } else {
+ rte_mempool_put(qp->batch_mp, (void *)b_info);
+ qp->b_info = NULL;
+ }
+
+ return nb_ops;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.h b/src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.h
new file mode 100644
index 00000000..882b398a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.h
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_CRYPTO_H_
+#define _CCP_CRYPTO_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+#include "ccp_dev.h"
+
+#define AES_BLOCK_SIZE 16
+#define CMAC_PAD_VALUE 0x80
+#define CTR_NONCE_SIZE 4
+#define CTR_IV_SIZE 8
+#define CCP_SHA3_CTX_SIZE 200
+
+/**Macro helpers for CCP command creation*/
+#define CCP_AES_SIZE(p) ((p)->aes.size)
+#define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt)
+#define CCP_AES_MODE(p) ((p)->aes.mode)
+#define CCP_AES_TYPE(p) ((p)->aes.type)
+#define CCP_DES_ENCRYPT(p) ((p)->des.encrypt)
+#define CCP_DES_MODE(p) ((p)->des.mode)
+#define CCP_DES_TYPE(p) ((p)->des.type)
+#define CCP_SHA_TYPE(p) ((p)->sha.type)
+#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
+#define CCP_PT_BITWISE(p) ((p)->pt.bitwise)
+
+/* HMAC */
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+/* MD5 */
+#define MD5_DIGEST_SIZE 16
+#define MD5_BLOCK_SIZE 64
+
+/* SHA */
+#define SHA_COMMON_DIGEST_SIZE 32
+#define SHA1_DIGEST_SIZE 20
+#define SHA1_BLOCK_SIZE 64
+
+#define SHA224_DIGEST_SIZE 28
+#define SHA224_BLOCK_SIZE 64
+#define SHA3_224_BLOCK_SIZE 144
+
+#define SHA256_DIGEST_SIZE 32
+#define SHA256_BLOCK_SIZE 64
+#define SHA3_256_BLOCK_SIZE 136
+
+#define SHA384_DIGEST_SIZE 48
+#define SHA384_BLOCK_SIZE 128
+#define SHA3_384_BLOCK_SIZE 104
+
+#define SHA512_DIGEST_SIZE 64
+#define SHA512_BLOCK_SIZE 128
+#define SHA3_512_BLOCK_SIZE 72
+
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 64
+
+/* SHA LSB intialiazation values */
+
+#define SHA1_H0 0x67452301UL
+#define SHA1_H1 0xefcdab89UL
+#define SHA1_H2 0x98badcfeUL
+#define SHA1_H3 0x10325476UL
+#define SHA1_H4 0xc3d2e1f0UL
+
+#define SHA224_H0 0xc1059ed8UL
+#define SHA224_H1 0x367cd507UL
+#define SHA224_H2 0x3070dd17UL
+#define SHA224_H3 0xf70e5939UL
+#define SHA224_H4 0xffc00b31UL
+#define SHA224_H5 0x68581511UL
+#define SHA224_H6 0x64f98fa7UL
+#define SHA224_H7 0xbefa4fa4UL
+
+#define SHA256_H0 0x6a09e667UL
+#define SHA256_H1 0xbb67ae85UL
+#define SHA256_H2 0x3c6ef372UL
+#define SHA256_H3 0xa54ff53aUL
+#define SHA256_H4 0x510e527fUL
+#define SHA256_H5 0x9b05688cUL
+#define SHA256_H6 0x1f83d9abUL
+#define SHA256_H7 0x5be0cd19UL
+
+#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1 0x629a292a367cd507ULL
+#define SHA384_H2 0x9159015a3070dd17ULL
+#define SHA384_H3 0x152fecd8f70e5939ULL
+#define SHA384_H4 0x67332667ffc00b31ULL
+#define SHA384_H5 0x8eb44a8768581511ULL
+#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7 0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0 0x6a09e667f3bcc908ULL
+#define SHA512_H1 0xbb67ae8584caa73bULL
+#define SHA512_H2 0x3c6ef372fe94f82bULL
+#define SHA512_H3 0xa54ff53a5f1d36f1ULL
+#define SHA512_H4 0x510e527fade682d1ULL
+#define SHA512_H5 0x9b05688c2b3e6c1fULL
+#define SHA512_H6 0x1f83d9abfb41bd6bULL
+#define SHA512_H7 0x5be0cd19137e2179ULL
+
+/**
+ * CCP supported AES modes
+ */
+enum ccp_aes_mode {
+ CCP_AES_MODE_ECB = 0,
+ CCP_AES_MODE_CBC,
+ CCP_AES_MODE_OFB,
+ CCP_AES_MODE_CFB,
+ CCP_AES_MODE_CTR,
+ CCP_AES_MODE_CMAC,
+ CCP_AES_MODE_GHASH,
+ CCP_AES_MODE_GCTR,
+ CCP_AES_MODE__LAST,
+};
+
+/**
+ * CCP AES GHASH mode
+ */
+enum ccp_aes_ghash_mode {
+ CCP_AES_MODE_GHASH_AAD = 0,
+ CCP_AES_MODE_GHASH_FINAL
+};
+
+/**
+ * CCP supported AES types
+ */
+enum ccp_aes_type {
+ CCP_AES_TYPE_128 = 0,
+ CCP_AES_TYPE_192,
+ CCP_AES_TYPE_256,
+ CCP_AES_TYPE__LAST,
+};
+
+/***** 3DES engine *****/
+
+/**
+ * CCP supported DES/3DES modes
+ */
+enum ccp_des_mode {
+ CCP_DES_MODE_ECB = 0, /* Not supported */
+ CCP_DES_MODE_CBC,
+ CCP_DES_MODE_CFB,
+};
+
+/**
+ * CCP supported DES types
+ */
+enum ccp_des_type {
+ CCP_DES_TYPE_128 = 0, /* 112 + 16 parity */
+ CCP_DES_TYPE_192, /* 168 + 24 parity */
+ CCP_DES_TYPE__LAST,
+};
+
+/***** SHA engine *****/
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+ CCP_SHA_TYPE_1 = 1,
+ CCP_SHA_TYPE_224,
+ CCP_SHA_TYPE_256,
+ CCP_SHA_TYPE_384,
+ CCP_SHA_TYPE_512,
+ CCP_SHA_TYPE_RSVD1,
+ CCP_SHA_TYPE_RSVD2,
+ CCP_SHA3_TYPE_224,
+ CCP_SHA3_TYPE_256,
+ CCP_SHA3_TYPE_384,
+ CCP_SHA3_TYPE_512,
+ CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * CCP supported cipher algorithms
+ */
+enum ccp_cipher_algo {
+ CCP_CIPHER_ALGO_AES_CBC = 0,
+ CCP_CIPHER_ALGO_AES_ECB,
+ CCP_CIPHER_ALGO_AES_CTR,
+ CCP_CIPHER_ALGO_AES_GCM,
+ CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+/**
+ * CCP cipher operation type
+ */
+enum ccp_cipher_dir {
+ CCP_CIPHER_DIR_DECRYPT = 0,
+ CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+/**
+ * CCP supported hash algorithms
+ */
+enum ccp_hash_algo {
+ CCP_AUTH_ALGO_SHA1 = 0,
+ CCP_AUTH_ALGO_SHA1_HMAC,
+ CCP_AUTH_ALGO_SHA224,
+ CCP_AUTH_ALGO_SHA224_HMAC,
+ CCP_AUTH_ALGO_SHA3_224,
+ CCP_AUTH_ALGO_SHA3_224_HMAC,
+ CCP_AUTH_ALGO_SHA256,
+ CCP_AUTH_ALGO_SHA256_HMAC,
+ CCP_AUTH_ALGO_SHA3_256,
+ CCP_AUTH_ALGO_SHA3_256_HMAC,
+ CCP_AUTH_ALGO_SHA384,
+ CCP_AUTH_ALGO_SHA384_HMAC,
+ CCP_AUTH_ALGO_SHA3_384,
+ CCP_AUTH_ALGO_SHA3_384_HMAC,
+ CCP_AUTH_ALGO_SHA512,
+ CCP_AUTH_ALGO_SHA512_HMAC,
+ CCP_AUTH_ALGO_SHA3_512,
+ CCP_AUTH_ALGO_SHA3_512_HMAC,
+ CCP_AUTH_ALGO_AES_CMAC,
+ CCP_AUTH_ALGO_AES_GCM,
+ CCP_AUTH_ALGO_MD5_HMAC,
+};
+
+/**
+ * CCP hash operation type
+ */
+enum ccp_hash_op {
+ CCP_AUTH_OP_GENERATE = 0,
+ CCP_AUTH_OP_VERIFY = 1,
+};
+
+/* CCP crypto private session structure */
+struct ccp_session {
+ bool auth_opt;
+ enum ccp_cmd_order cmd_id;
+ /**< chain order mode */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+ struct {
+ enum ccp_cipher_algo algo;
+ enum ccp_engine engine;
+ union {
+ enum ccp_aes_mode aes_mode;
+ enum ccp_des_mode des_mode;
+ } um;
+ union {
+ enum ccp_aes_type aes_type;
+ enum ccp_des_type des_type;
+ } ut;
+ enum ccp_cipher_dir dir;
+ uint64_t key_length;
+ /**< max cipher key size 256 bits */
+ uint8_t key[32];
+ /**ccp key format*/
+ uint8_t key_ccp[32];
+ phys_addr_t key_phys;
+ /**AES-ctr nonce(4) iv(8) ctr*/
+ uint8_t nonce[32];
+ phys_addr_t nonce_phys;
+ } cipher;
+ /**< Cipher Parameters */
+
+ struct {
+ enum ccp_hash_algo algo;
+ enum ccp_engine engine;
+ union {
+ enum ccp_aes_mode aes_mode;
+ } um;
+ union {
+ enum ccp_sha_type sha_type;
+ enum ccp_aes_type aes_type;
+ } ut;
+ enum ccp_hash_op op;
+ uint64_t key_length;
+ /**< max hash key size 144 bytes (struct capabilties) */
+ uint8_t key[144];
+ /**< max be key size of AES is 32*/
+ uint8_t key_ccp[32];
+ phys_addr_t key_phys;
+ uint64_t digest_length;
+ void *ctx;
+ int ctx_len;
+ int offset;
+ int block_size;
+ /**< Buffer to store Software generated precomute values*/
+ /**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+ /**< For CMAC K1 IV and K2 IV*/
+ uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
+ /**< SHA3 initial ctx all zeros*/
+ uint8_t sha3_ctx[200];
+ int aad_length;
+ } auth;
+ /**< Authentication Parameters */
+ enum rte_crypto_aead_algorithm aead_algo;
+ /**< AEAD Algorithm */
+
+ uint32_t reserved;
+} __rte_cache_aligned;
+
+extern uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_qp;
+struct ccp_private;
+
+/**
+ * Set and validate CCP crypto session parameters
+ *
+ * @param sess ccp private session
+ * @param xform crypto xform for this session
+ * @return 0 on success otherwise -1
+ */
+int ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform,
+ struct ccp_private *internals);
+
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops);
+
+
+/**
+ * Apis for SHA3 partial hash generation
+ * @param data_in buffer pointer on which phash is applied
+ * @param data_out phash result in ccp be format is written
+ */
+int partial_hash_sha3_224(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_256(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_384(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_512(uint8_t *data_in,
+ uint8_t *data_out);
+
+#endif /* _CCP_CRYPTO_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.c b/src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 00000000..80fe6a45
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,810 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "ccp_dev.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+ struct ccp_private *priv = dev->data->dev_private;
+
+ priv->last_dev = TAILQ_FIRST(&ccp_list);
+ return 0;
+}
+
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+ int i, ret = 0;
+ struct ccp_device *dev;
+ struct ccp_private *priv = cdev->data->dev_private;
+
+ dev = TAILQ_NEXT(priv->last_dev, next);
+ if (unlikely(dev == NULL))
+ dev = TAILQ_FIRST(&ccp_list);
+ priv->last_dev = dev;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->qidx++;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ }
+ return NULL;
+}
+
+int
+ccp_read_hwrng(uint32_t *value)
+{
+ struct ccp_device *dev;
+
+ TAILQ_FOREACH(dev, &ccp_list, next) {
+ void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
+ *value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+ if (*value) {
+ dev->hwrng_retries = 0;
+ return 0;
+ }
+ }
+ dev->hwrng_retries = 0;
+ }
+ return -1;
+}
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+ uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ CCP_LOG_INFO("re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+ CCP_LOG_ERR("Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
+}
+
+/* bitmap support apis */
+static inline void
+ccp_set_bit(unsigned long *bitmap, int n)
+{
+ __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
+}
+
+static inline void
+ccp_clear_bit(unsigned long *bitmap, int n)
+{
+ __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
+}
+
+static inline uint32_t
+ccp_get_bit(unsigned long *bitmap, int n)
+{
+ return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
+}
+
+
+static inline uint32_t
+ccp_ffz(unsigned long word)
+{
+ unsigned long first_zero;
+
+ first_zero = __builtin_ffsl(~word);
+ return first_zero ? (first_zero - 1) :
+ BITS_PER_WORD;
+}
+
+static inline uint32_t
+ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
+{
+ uint32_t i;
+ uint32_t nwords = 0;
+
+ nwords = (limit - 1) / BITS_PER_WORD + 1;
+ for (i = 0; i < nwords; i++) {
+ if (addr[i] == 0UL)
+ return i * BITS_PER_WORD;
+ if (addr[i] < ~(0UL))
+ break;
+ }
+ return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
+}
+
+static void
+ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + WORD_OFFSET(start);
+ const unsigned int size = start + len;
+ int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
+ unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ len -= bits_to_set;
+ bits_to_set = BITS_PER_WORD;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+
+static void
+ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + WORD_OFFSET(start);
+ const unsigned int size = start + len;
+ int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
+ unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ len -= bits_to_clear;
+ bits_to_clear = BITS_PER_WORD;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
+
+
+static unsigned long
+_ccp_find_next_bit(const unsigned long *addr,
+ unsigned long nbits,
+ unsigned long start,
+ unsigned long invert)
+{
+ unsigned long tmp;
+
+ if (!nbits || start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_WORD] ^ invert;
+
+ /* Handle 1st word. */
+ tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
+ start = ccp_round_down(start, BITS_PER_WORD);
+
+ while (!tmp) {
+ start += BITS_PER_WORD;
+ if (start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_WORD] ^ invert;
+ }
+
+ return RTE_MIN(start + (ffs(tmp) - 1), nbits);
+}
+
+static unsigned long
+ccp_find_next_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ return _ccp_find_next_bit(addr, size, offset, 0UL);
+}
+
+static unsigned long
+ccp_find_next_zero_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ return _ccp_find_next_bit(addr, size, offset, ~0UL);
+}
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ */
+static unsigned long
+ccp_bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr)
+{
+ unsigned long index, end, i;
+
+again:
+ index = ccp_find_next_zero_bit(map, size, start);
+
+ end = index + nr;
+ if (end > size)
+ return end;
+ i = ccp_find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto again;
+ }
+ return index;
+}
+
+static uint32_t
+ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
+{
+ struct ccp_device *ccp;
+ int start;
+
+ /* First look at the map for the queue */
+ if (cmd_q->lsb >= 0) {
+ start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
+ LSB_SIZE, 0,
+ count);
+ if (start < LSB_SIZE) {
+ ccp_bitmap_set(cmd_q->lsbmap, start, count);
+ return start + cmd_q->lsb * LSB_SIZE;
+ }
+ }
+
+ /* try to get an entry from the shared blocks */
+ ccp = cmd_q->dev;
+
+ rte_spinlock_lock(&ccp->lsb_lock);
+
+ start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
+ MAX_LSB_CNT * LSB_SIZE,
+ 0, count);
+ if (start <= MAX_LSB_CNT * LSB_SIZE) {
+ ccp_bitmap_set(ccp->lsbmap, start, count);
+ rte_spinlock_unlock(&ccp->lsb_lock);
+ return start * LSB_ITEM_SIZE;
+ }
+ CCP_LOG_ERR("NO LSBs available");
+
+ rte_spinlock_unlock(&ccp->lsb_lock);
+
+ return 0;
+}
+
+static void __rte_unused
+ccp_lsb_free(struct ccp_queue *cmd_q,
+ unsigned int start,
+ unsigned int count)
+{
+ int lsbno = start / LSB_SIZE;
+
+ if (!start)
+ return;
+
+ if (cmd_q->lsb == lsbno) {
+ /* An entry from the private LSB */
+ ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+ } else {
+ /* From the shared LSBs */
+ struct ccp_device *ccp = cmd_q->dev;
+
+ rte_spinlock_lock(&ccp->lsb_lock);
+ ccp_bitmap_clear(ccp->lsbmap, start, count);
+ rte_spinlock_unlock(&ccp->lsb_lock);
+ }
+}
+
+static int
+ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
+{
+ int q_mask = 1 << cmd_q->id;
+ int weight = 0;
+ int j;
+
+ /* Build a bit mask to know which LSBs
+ * this queue has access to.
+ * Don't bother with segment 0
+ * as it has special
+ * privileges.
+ */
+ cmd_q->lsbmask = 0;
+ status >>= LSB_REGION_WIDTH;
+ for (j = 1; j < MAX_LSB_CNT; j++) {
+ if (status & q_mask)
+ ccp_set_bit(&cmd_q->lsbmask, j);
+
+ status >>= LSB_REGION_WIDTH;
+ }
+
+ for (j = 0; j < MAX_LSB_CNT; j++)
+ if (ccp_get_bit(&cmd_q->lsbmask, j))
+ weight++;
+
+ printf("Queue %d can access %d LSB regions of mask %lu\n",
+ (int)cmd_q->id, weight, cmd_q->lsbmask);
+
+ return weight ? 0 : -EINVAL;
+}
+
+static int
+ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+ int lsb_cnt, int n_lsbs,
+ unsigned long *lsb_pub)
+{
+ unsigned long qlsb = 0;
+ int bitno = 0;
+ int qlsb_wgt = 0;
+ int i, j;
+
+ /* For each queue:
+ * If the count of potential LSBs available to a queue matches the
+ * ordinal given to us in lsb_cnt:
+ * Copy the mask of possible LSBs for this queue into "qlsb";
+ * For each bit in qlsb, see if the corresponding bit in the
+ * aggregation mask is set; if so, we have a match.
+ * If we have a match, clear the bit in the aggregation to
+ * mark it as no longer available.
+ * If there is no match, clear the bit in qlsb and keep looking.
+ */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct ccp_queue *cmd_q = &ccp->cmd_q[i];
+
+ qlsb_wgt = 0;
+ for (j = 0; j < MAX_LSB_CNT; j++)
+ if (ccp_get_bit(&cmd_q->lsbmask, j))
+ qlsb_wgt++;
+
+ if (qlsb_wgt == lsb_cnt) {
+ qlsb = cmd_q->lsbmask;
+
+ bitno = ffs(qlsb) - 1;
+ while (bitno < MAX_LSB_CNT) {
+ if (ccp_get_bit(lsb_pub, bitno)) {
+ /* We found an available LSB
+ * that this queue can access
+ */
+ cmd_q->lsb = bitno;
+ ccp_clear_bit(lsb_pub, bitno);
+ break;
+ }
+ ccp_clear_bit(&qlsb, bitno);
+ bitno = ffs(qlsb) - 1;
+ }
+ if (bitno >= MAX_LSB_CNT)
+ return -EINVAL;
+ n_lsbs--;
+ }
+ }
+ return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared
+ * resources.
+ */
+static int
+ccp_assign_lsbs(struct ccp_device *ccp)
+{
+ unsigned long lsb_pub = 0, qlsb = 0;
+ int n_lsbs = 0;
+ int bitno;
+ int i, lsb_cnt;
+ int rc = 0;
+
+ rte_spinlock_init(&ccp->lsb_lock);
+
+ /* Create an aggregate bitmap to get a total count of available LSBs */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ lsb_pub |= ccp->cmd_q[i].lsbmask;
+
+ for (i = 0; i < MAX_LSB_CNT; i++)
+ if (ccp_get_bit(&lsb_pub, i))
+ n_lsbs++;
+
+ if (n_lsbs >= ccp->cmd_q_count) {
+ /* We have enough LSBS to give every queue a private LSB.
+ * Brute force search to start with the queues that are more
+ * constrained in LSB choice. When an LSB is privately
+ * assigned, it is removed from the public mask.
+ * This is an ugly N squared algorithm with some optimization.
+ */
+ for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+ lsb_cnt++) {
+ rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+ &lsb_pub);
+ if (rc < 0)
+ return -EINVAL;
+ n_lsbs = rc;
+ }
+ }
+
+ rc = 0;
+ /* What's left of the LSBs, according to the public mask, now become
+ * shared. Any zero bits in the lsb_pub mask represent an LSB region
+ * that can't be used as a shared resource, so mark the LSB slots for
+ * them as "in use".
+ */
+ qlsb = lsb_pub;
+ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+ while (bitno < MAX_LSB_CNT) {
+ ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+ ccp_set_bit(&qlsb, bitno);
+ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+ }
+
+ return rc;
+}
+
+static int
+ccp_add_device(struct ccp_device *dev, int type)
+{
+ int i;
+ uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
+ uint64_t status;
+ struct ccp_queue *cmd_q;
+ const struct rte_memzone *q_mz;
+ void *vaddr;
+
+ if (dev == NULL)
+ return -1;
+
+ dev->id = ccp_dev_id++;
+ dev->qidx = 0;
+ vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ if (type == CCP_VERSION_5B) {
+ CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
+ CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
+ for (i = 0; i < 12; i++) {
+ CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
+ CCP_READ_REG(vaddr, TRNG_OUT_REG));
+ }
+ CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
+ CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
+ CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
+
+ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
+ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
+
+ CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
+ }
+ CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+
+ /* Copy the private LSB mask to the public registers */
+ status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
+ status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
+ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
+ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
+ status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
+
+ dev->cmd_q_count = 0;
+ /* Find available queues */
+ qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+ if (!(qmr & (1 << i)))
+ continue;
+ cmd_q = &dev->cmd_q[dev->cmd_q_count++];
+ cmd_q->dev = dev;
+ cmd_q->id = i;
+ cmd_q->qidx = 0;
+ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+
+ cmd_q->reg_base = (uint8_t *)vaddr +
+ CMD_Q_STATUS_INCR * (i + 1);
+
+ /* CCP queue memory */
+ snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
+ "%s_%d_%s_%d_%s",
+ "ccp_dev",
+ (int)dev->id, "queue",
+ (int)cmd_q->id, "mem");
+ q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
+ cmd_q->qsize, SOCKET_ID_ANY);
+ cmd_q->qbase_addr = (void *)q_mz->addr;
+ cmd_q->qbase_desc = (void *)q_mz->addr;
+ cmd_q->qbase_phys_addr = q_mz->phys_addr;
+
+ cmd_q->qcontrol = 0;
+ /* init control reg to zero */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol);
+
+ /* Disable the interrupts */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
+ CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
+ CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
+
+ /* Clear the interrupts */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
+ ALL_INTERRUPTS);
+
+ /* Configure size of each virtual queue accessible to host */
+ cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
+ cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
+
+ dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ (uint32_t)dma_addr_lo);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
+ (uint32_t)dma_addr_lo);
+
+ dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
+ cmd_q->qcontrol |= (dma_addr_hi << 16);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol);
+
+ /* create LSB Mask map */
+ if (ccp_find_lsb_regions(cmd_q, status))
+ CCP_LOG_ERR("queue doesn't have lsb regions");
+ cmd_q->lsb = -1;
+
+ rte_atomic64_init(&cmd_q->free_slots);
+ rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
+ /* unused slot barrier b/w H&T */
+ }
+
+ if (ccp_assign_lsbs(dev))
+ CCP_LOG_ERR("Unable to assign lsb region");
+
+ /* pre-allocate LSB slots */
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->cmd_q[i].sb_key =
+ ccp_lsb_alloc(&dev->cmd_q[i], 1);
+ dev->cmd_q[i].sb_iv =
+ ccp_lsb_alloc(&dev->cmd_q[i], 1);
+ dev->cmd_q[i].sb_sha =
+ ccp_lsb_alloc(&dev->cmd_q[i], 2);
+ dev->cmd_q[i].sb_hmac =
+ ccp_lsb_alloc(&dev->cmd_q[i], 2);
+ }
+
+ TAILQ_INSERT_TAIL(&ccp_list, dev, next);
+ return 0;
+}
+
+static void
+ccp_remove_device(struct ccp_device *dev)
+{
+ if (dev == NULL)
+ return;
+
+ TAILQ_REMOVE(&ccp_list, dev, next);
+}
+
+static int
+is_ccp_device(const char *dirname,
+ const struct rte_pci_id *ccp_id,
+ int *type)
+{
+ char filename[PATH_MAX];
+ const struct rte_pci_id *id;
+ uint16_t vendor, device_id;
+ int i;
+ unsigned long tmp;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ return 0;
+ vendor = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ return 0;
+ device_id = (uint16_t)tmp;
+
+ for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
+ if (vendor == id->vendor_id &&
+ device_id == id->device_id) {
+ *type = i;
+ return 1; /* Matched device */
+ }
+ }
+ return 0;
+}
+
+static int
+ccp_probe_device(const char *dirname, uint16_t domain,
+ uint8_t bus, uint8_t devid,
+ uint8_t function, int ccp_type)
+{
+ struct ccp_device *ccp_dev = NULL;
+ struct rte_pci_device *pci;
+ char filename[PATH_MAX];
+ unsigned long tmp;
+ int uio_fd = -1, i, uio_num;
+ char uio_devname[PATH_MAX];
+ void *map_addr;
+
+ ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
+ RTE_CACHE_LINE_SIZE);
+ if (ccp_dev == NULL)
+ goto fail;
+ pci = &(ccp_dev->pci);
+
+ pci->addr.domain = domain;
+ pci->addr.bus = bus;
+ pci->addr.devid = devid;
+ pci->addr.function = function;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.vendor_id = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.device_id = (uint16_t)tmp;
+
+ /* get subsystem_vendor id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.subsystem_vendor_id = (uint16_t)tmp;
+
+ /* get subsystem_device id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_device",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.subsystem_device_id = (uint16_t)tmp;
+
+ /* get class_id */
+ snprintf(filename, sizeof(filename), "%s/class",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ /* the least 24 bits are valid: class, subclass, program interface */
+ pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+ /* parse resources */
+ snprintf(filename, sizeof(filename), "%s/resource", dirname);
+ if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
+ goto fail;
+
+ uio_num = ccp_find_uio_devname(dirname);
+ if (uio_num < 0) {
+ /*
+ * It may take time for uio device to appear,
+ * wait here and try again
+ */
+ usleep(100000);
+ uio_num = ccp_find_uio_devname(dirname);
+ if (uio_num < 0)
+ goto fail;
+ }
+ snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
+
+ uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
+ if (uio_fd < 0)
+ goto fail;
+ if (flock(uio_fd, LOCK_EX | LOCK_NB))
+ goto fail;
+
+ /* Map the PCI memory resource of device */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+
+ char devname[PATH_MAX];
+ int res_fd;
+
+ if (pci->mem_resource[i].phys_addr == 0)
+ continue;
+ snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
+ res_fd = open(devname, O_RDWR);
+ if (res_fd < 0)
+ goto fail;
+ map_addr = mmap(NULL, pci->mem_resource[i].len,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED, res_fd, 0);
+ if (map_addr == MAP_FAILED)
+ goto fail;
+
+ pci->mem_resource[i].addr = map_addr;
+ }
+
+ /* device is valid, add in list */
+ if (ccp_add_device(ccp_dev, ccp_type)) {
+ ccp_remove_device(ccp_dev);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ CCP_LOG_ERR("CCP Device probe failed");
+ if (uio_fd > 0)
+ close(uio_fd);
+ if (ccp_dev)
+ rte_free(ccp_dev);
+ return -1;
+}
+
+int
+ccp_probe_devices(const struct rte_pci_id *ccp_id)
+{
+ int dev_cnt = 0;
+ int ccp_type = 0;
+ struct dirent *d;
+ DIR *dir;
+ int ret = 0;
+ int module_idx = 0;
+ uint16_t domain;
+ uint8_t bus, devid, function;
+ char dirname[PATH_MAX];
+
+ module_idx = ccp_check_pci_uio_module();
+ if (module_idx < 0)
+ return -1;
+
+ TAILQ_INIT(&ccp_list);
+ dir = opendir(SYSFS_PCI_DEVICES);
+ if (dir == NULL)
+ return -1;
+ while ((d = readdir(dir)) != NULL) {
+ if (d->d_name[0] == '.')
+ continue;
+ if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
+ &domain, &bus, &devid, &function) != 0)
+ continue;
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ SYSFS_PCI_DEVICES, d->d_name);
+ if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
+ printf("CCP : Detected CCP device with ID = 0x%x\n",
+ ccp_id[ccp_type].device_id);
+ ret = ccp_probe_device(dirname, domain, bus, devid,
+ function, ccp_type);
+ if (ret == 0)
+ dev_cnt++;
+ }
+ }
+ closedir(dir);
+ return dev_cnt;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.h b/src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.h
new file mode 100644
index 00000000..de3e4bcc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.h
@@ -0,0 +1,495 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_DEV_H_
+#define _CCP_DEV_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+/**< CCP sspecific */
+#define MAX_HW_QUEUES 5
+#define CCP_MAX_TRNG_RETRIES 10
+#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
+
+/**< CCP Register Mappings */
+#define Q_MASK_REG 0x000
+#define TRNG_OUT_REG 0x00c
+
+/* CCP Version 5 Specifics */
+#define CMD_QUEUE_MASK_OFFSET 0x00
+#define CMD_QUEUE_PRIO_OFFSET 0x04
+#define CMD_REQID_CONFIG_OFFSET 0x08
+#define CMD_CMD_TIMEOUT_OFFSET 0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET 0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET 0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET 0x24
+
+#define CMD_Q_CONTROL_BASE 0x0000
+#define CMD_Q_TAIL_LO_BASE 0x0004
+#define CMD_Q_HEAD_LO_BASE 0x0008
+#define CMD_Q_INT_ENABLE_BASE 0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010
+
+#define CMD_Q_STATUS_BASE 0x0100
+#define CMD_Q_INT_STATUS_BASE 0x0104
+
+#define CMD_CONFIG_0_OFFSET 0x6000
+#define CMD_TRNG_CTL_OFFSET 0x6008
+#define CMD_AES_MASK_OFFSET 0x6010
+#define CMD_CLK_GATE_CTL_OFFSET 0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD_Q_STATUS_INCR 0x1000
+
+/* Bit masks */
+#define CMD_Q_RUN 0x1
+#define CMD_Q_SIZE 0x1F
+#define CMD_Q_SHIFT 3
+#define COMMANDS_PER_QUEUE 2048
+
+#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+ CMD_Q_SIZE)
+#define Q_DESC_SIZE sizeof(struct ccp_desc)
+#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION 0x1
+#define INT_ERROR 0x2
+#define INT_QUEUE_STOPPED 0x4
+#define ALL_INTERRUPTS (INT_COMPLETION| \
+ INT_ERROR| \
+ INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH 5
+#define MAX_LSB_CNT 8
+
+#define LSB_SIZE 16
+#define LSB_ITEM_SIZE 32
+#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE)
+#define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE)
+
+/* General CCP Defines */
+
+#define CCP_SB_BYTES 32
+/* Word 0 */
+#define CCP_CMD_DW0(p) ((p)->dw0)
+#define CCP_CMD_SOC(p) (CCP_CMD_DW0(p).soc)
+#define CCP_CMD_IOC(p) (CCP_CMD_DW0(p).ioc)
+#define CCP_CMD_INIT(p) (CCP_CMD_DW0(p).init)
+#define CCP_CMD_EOM(p) (CCP_CMD_DW0(p).eom)
+#define CCP_CMD_FUNCTION(p) (CCP_CMD_DW0(p).function)
+#define CCP_CMD_ENGINE(p) (CCP_CMD_DW0(p).engine)
+#define CCP_CMD_PROT(p) (CCP_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP_CMD_DW1(p) ((p)->length)
+#define CCP_CMD_LEN(p) (CCP_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP_CMD_DW2(p) ((p)->src_lo)
+#define CCP_CMD_SRC_LO(p) (CCP_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP_CMD_DW3(p) ((p)->dw3)
+#define CCP_CMD_SRC_MEM(p) ((p)->dw3.src_mem)
+#define CCP_CMD_SRC_HI(p) ((p)->dw3.src_hi)
+#define CCP_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id)
+#define CCP_CMD_FIX_SRC(p) ((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP_CMD_DW4(p) ((p)->dw4)
+#define CCP_CMD_DST_LO(p) (CCP_CMD_DW4(p).dst_lo)
+#define CCP_CMD_DW5(p) ((p)->dw5.fields.dst_hi)
+#define CCP_CMD_DST_HI(p) (CCP_CMD_DW5(p))
+#define CCP_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem)
+#define CCP_CMD_FIX_DST(p) ((p)->dw5.fields.fixed)
+#define CCP_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo)
+#define CCP_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP_CMD_DW6(p) ((p)->key_lo)
+#define CCP_CMD_KEY_LO(p) (CCP_CMD_DW6(p))
+#define CCP_CMD_DW7(p) ((p)->dw7)
+#define CCP_CMD_KEY_HI(p) ((p)->dw7.key_hi)
+#define CCP_CMD_KEY_MEM(p) ((p)->dw7.key_mem)
+
+/* bitmap */
+enum {
+ BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+};
+
+#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
+#define BIT_OFFSET(b) ((b) % BITS_PER_WORD)
+
+#define CCP_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define CCP_BITMAP_SIZE(nr) \
+ CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
+
+#define CCP_BITMAP_FIRST_WORD_MASK(start) \
+ (~0UL << ((start) & (BITS_PER_WORD - 1)))
+#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
+ (~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
+
+#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
+#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
+
+/** CCP registers Write/Read */
+
+static inline void ccp_pci_reg_write(void *base, int offset,
+ uint32_t value)
+{
+ volatile void *reg_addr = ((uint8_t *)base + offset);
+
+ rte_write32((rte_cpu_to_le_32(value)), reg_addr);
+}
+
+static inline uint32_t ccp_pci_reg_read(void *base, int offset)
+{
+ volatile void *reg_addr = ((uint8_t *)base + offset);
+
+ return rte_le_to_cpu_32(rte_read32(reg_addr));
+}
+
+#define CCP_READ_REG(hw_addr, reg_offset) \
+ ccp_pci_reg_read(hw_addr, reg_offset)
+
+#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
+ ccp_pci_reg_write(hw_addr, reg_offset, value)
+
+TAILQ_HEAD(ccp_list, ccp_device);
+
+extern struct ccp_list ccp_list;
+
+/**
+ * CCP device version
+ */
+enum ccp_device_version {
+ CCP_VERSION_5A = 0,
+ CCP_VERSION_5B,
+};
+
+/**
+ * A structure describing a CCP command queue.
+ */
+struct ccp_queue {
+ struct ccp_device *dev;
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+
+ rte_atomic64_t free_slots;
+ /**< available free slots updated from enq/deq calls */
+
+ /* Queue identifier */
+ uint64_t id; /**< queue id */
+ uint64_t qidx; /**< queue index */
+ uint64_t qsize; /**< queue size */
+
+ /* Queue address */
+ struct ccp_desc *qbase_desc;
+ void *qbase_addr;
+ phys_addr_t qbase_phys_addr;
+ /**< queue-page registers addr */
+ void *reg_base;
+
+ uint32_t qcontrol;
+ /**< queue ctrl reg */
+
+ int lsb;
+ /**< lsb region assigned to queue */
+ unsigned long lsbmask;
+ /**< lsb regions queue can access */
+ unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
+ /**< all lsb resources which queue is using */
+ uint32_t sb_key;
+ /**< lsb assigned for queue */
+ uint32_t sb_iv;
+ /**< lsb assigned for iv */
+ uint32_t sb_sha;
+ /**< lsb assigned for sha ctx */
+ uint32_t sb_hmac;
+ /**< lsb assigned for hmac ctx */
+} ____cacheline_aligned;
+
+/**
+ * A structure describing a CCP device.
+ */
+struct ccp_device {
+ TAILQ_ENTRY(ccp_device) next;
+ int id;
+ /**< ccp dev id on platform */
+ struct ccp_queue cmd_q[MAX_HW_QUEUES];
+ /**< ccp queue */
+ int cmd_q_count;
+ /**< no. of ccp Queues */
+ struct rte_pci_device pci;
+ /**< ccp pci identifier */
+ unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
+ /**< shared lsb mask of ccp */
+ rte_spinlock_t lsb_lock;
+ /**< protection for shared lsb region allocation */
+ int qidx;
+ /**< current queue index */
+ int hwrng_retries;
+ /**< retry counter for CCP TRNG */
+} __rte_cache_aligned;
+
+/**< CCP H/W engine related */
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_3DES: DES/3DES operation
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+ CCP_ENGINE_AES = 0,
+ CCP_ENGINE_XTS_AES_128,
+ CCP_ENGINE_3DES,
+ CCP_ENGINE_SHA,
+ CCP_ENGINE_RSA,
+ CCP_ENGINE_PASSTHRU,
+ CCP_ENGINE_ZLIB_DECOMPRESS,
+ CCP_ENGINE_ECC,
+ CCP_ENGINE__LAST,
+};
+
+/* Passthru engine */
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+ CCP_PASSTHRU_BITWISE_NOOP = 0,
+ CCP_PASSTHRU_BITWISE_AND,
+ CCP_PASSTHRU_BITWISE_OR,
+ CCP_PASSTHRU_BITWISE_XOR,
+ CCP_PASSTHRU_BITWISE_MASK,
+ CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+ CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+ CCP_PASSTHRU_BYTESWAP_32BIT,
+ CCP_PASSTHRU_BYTESWAP_256BIT,
+ CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * CCP passthru
+ */
+struct ccp_passthru {
+ phys_addr_t src_addr;
+ phys_addr_t dest_addr;
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+ int len;
+ int dir;
+};
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t mode:5;
+ uint16_t type:2;
+ } aes;
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t mode:5;
+ uint16_t type:2;
+ } des;
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t rsvd:5;
+ uint16_t type:2;
+ } aes_xts;
+ struct {
+ uint16_t rsvd1:10;
+ uint16_t type:4;
+ uint16_t rsvd2:1;
+ } sha;
+ struct {
+ uint16_t mode:3;
+ uint16_t size:12;
+ } rsa;
+ struct {
+ uint16_t byteswap:2;
+ uint16_t bitwise:3;
+ uint16_t reflect:2;
+ uint16_t rsvd:8;
+ } pt;
+ struct {
+ uint16_t rsvd:13;
+ } zlib;
+ struct {
+ uint16_t size:10;
+ uint16_t type:2;
+ uint16_t mode:3;
+ } ecc;
+ uint16_t raw;
+};
+
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+ uint32_t soc:1;
+ uint32_t ioc:1;
+ uint32_t rsvd1:1;
+ uint32_t init:1;
+ uint32_t eom:1;
+ uint32_t function:15;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t rsvd2:7;
+};
+
+struct dword3 {
+ uint32_t src_hi:16;
+ uint32_t src_mem:2;
+ uint32_t lsb_cxt_id:8;
+ uint32_t rsvd1:5;
+ uint32_t fixed:1;
+};
+
+union dword4 {
+ uint32_t dst_lo; /* NON-SHA */
+ uint32_t sha_len_lo; /* SHA */
+};
+
+union dword5 {
+ struct {
+ uint32_t dst_hi:16;
+ uint32_t dst_mem:2;
+ uint32_t rsvd1:13;
+ uint32_t fixed:1;
+ }
+ fields;
+ uint32_t sha_len_hi;
+};
+
+struct dword7 {
+ uint32_t key_hi:16;
+ uint32_t key_mem:2;
+ uint32_t rsvd1:14;
+};
+
+struct ccp_desc {
+ struct dword0 dw0;
+ uint32_t length;
+ uint32_t src_lo;
+ struct dword3 dw3;
+ union dword4 dw4;
+ union dword5 dw5;
+ uint32_t key_lo;
+ struct dword7 dw7;
+};
+
+/**
+ * ccp memory type
+ */
+enum ccp_memtype {
+ CCP_MEMTYPE_SYSTEM = 0,
+ CCP_MEMTYPE_SB,
+ CCP_MEMTYPE_LOCAL,
+ CCP_MEMTYPE_LAST,
+};
+
+/**
+ * cmd id to follow order
+ */
+enum ccp_cmd_order {
+ CCP_CMD_CIPHER = 0,
+ CCP_CMD_AUTH,
+ CCP_CMD_CIPHER_HASH,
+ CCP_CMD_HASH_CIPHER,
+ CCP_CMD_COMBINED,
+ CCP_CMD_NOT_SUPPORTED,
+};
+
+static inline uint32_t
+low32_value(unsigned long addr)
+{
+ return ((uint64_t)addr) & 0x0ffffffff;
+}
+
+static inline uint32_t
+high32_value(unsigned long addr)
+{
+ return ((uint64_t)addr >> 32) & 0x00000ffff;
+}
+
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
+/**
+ * Detect ccp platform and initialize all ccp devices
+ *
+ * @param ccp_id rte_pci_id list for supported CCP devices
+ * @return no. of successfully initialized CCP devices
+ */
+int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
+#endif /* _CCP_DEV_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.c b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.c
new file mode 100644
index 00000000..59152ca5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.c
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "ccp_pci.h"
+
+static const char * const uio_module_names[] = {
+ "igb_uio",
+ "uio_pci_generic",
+};
+
+int
+ccp_check_pci_uio_module(void)
+{
+ FILE *fp;
+ int i;
+ char buf[BUFSIZ];
+
+ fp = fopen(PROC_MODULES, "r");
+ if (fp == NULL)
+ return -1;
+ i = 0;
+ while (uio_module_names[i] != NULL) {
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (!strncmp(buf, uio_module_names[i],
+ strlen(uio_module_names[i])))
+ return i;
+ }
+ i++;
+ rewind(fp);
+ }
+ printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+ return -1;/* uio not inserted */
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+int
+ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+ /* first split on ':' */
+ union splitaddr {
+ struct {
+ char *domain;
+ char *bus;
+ char *devid;
+ char *function;
+ };
+ char *str[PCI_FMT_NVAL];
+ /* last element-separator is "." not ":" */
+ } splitaddr;
+
+ char *buf_copy = strndup(buf, bufsize);
+
+ if (buf_copy == NULL)
+ return -1;
+
+ if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+ != PCI_FMT_NVAL - 1)
+ goto error;
+ /* final split is on '.' between devid and function */
+ splitaddr.function = strchr(splitaddr.devid, '.');
+ if (splitaddr.function == NULL)
+ goto error;
+ *splitaddr.function++ = '\0';
+
+ /* now convert to int values */
+ errno = 0;
+ *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+ *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+ *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+ *function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+ if (errno != 0)
+ goto error;
+
+ free(buf_copy); /* free the copy made with strdup */
+ return 0;
+error:
+ free(buf_copy);
+ return -1;
+}
+
+int
+ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char *end = NULL;
+
+ f = fopen(filename, "r");
+ if (f == NULL)
+ return -1;
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ fclose(f);
+ return -1;
+ }
+ *val = strtoul(buf, &end, 0);
+ if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+/** IO resource type: */
+#define IORESOURCE_IO 0x00000100
+#define IORESOURCE_MEM 0x00000200
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+static int
+ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+ uint64_t *end_addr, uint64_t *flags)
+{
+ union pci_resource_info {
+ struct {
+ char *phys_addr;
+ char *end_addr;
+ char *flags;
+ };
+ char *ptrs[PCI_RESOURCE_FMT_NVAL];
+ } res_info;
+
+ if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
+ return -1;
+ errno = 0;
+ *phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+ *end_addr = strtoull(res_info.end_addr, NULL, 16);
+ *flags = strtoull(res_info.flags, NULL, 16);
+ if (errno != 0)
+ return -1;
+
+ return 0;
+}
+
+/* parse the "resource" sysfs file */
+int
+ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+ FILE *fp;
+ char buf[BUFSIZ];
+ int i;
+ uint64_t phys_addr, end_addr, flags;
+
+ fp = fopen(filename, "r");
+ if (fp == NULL)
+ return -1;
+
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ if (fgets(buf, sizeof(buf), fp) == NULL)
+ goto error;
+ if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
+ &phys_addr, &end_addr, &flags) < 0)
+ goto error;
+
+ if (flags & IORESOURCE_MEM) {
+ dev->mem_resource[i].phys_addr = phys_addr;
+ dev->mem_resource[i].len = end_addr - phys_addr + 1;
+ /* not mapped for now */
+ dev->mem_resource[i].addr = NULL;
+ }
+ }
+ fclose(fp);
+ return 0;
+
+error:
+ fclose(fp);
+ return -1;
+}
+
+int
+ccp_find_uio_devname(const char *dirname)
+{
+
+ DIR *dir;
+ struct dirent *e;
+ char dirname_uio[PATH_MAX];
+ unsigned int uio_num;
+ int ret = -1;
+
+ /* depending on kernel version, uio can be located in uio/uioX
+ * or uio:uioX
+ */
+ snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
+ dir = opendir(dirname_uio);
+ if (dir == NULL) {
+ /* retry with the parent directory might be different kernel version*/
+ dir = opendir(dirname);
+ if (dir == NULL)
+ return -1;
+ }
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ /* format could be uio%d ...*/
+ int shortprefix_len = sizeof("uio") - 1;
+ /* ... or uio:uio%d */
+ int longprefix_len = sizeof("uio:uio") - 1;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", 3) != 0)
+ continue;
+
+ /* first try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+ ret = uio_num;
+ break;
+ }
+
+ /* then try uio:uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+ ret = uio_num;
+ break;
+ }
+ }
+ closedir(dir);
+ return ret;
+
+
+}
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.h b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.h
new file mode 100644
index 00000000..7ed3bac4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PCI_H_
+#define _CCP_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_bus_pci.h>
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+#define PROC_MODULES "/proc/modules"
+
+int ccp_check_pci_uio_module(void);
+
+int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function);
+
+int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+int ccp_pci_parse_sysfs_resource(const char *filename,
+ struct rte_pci_device *dev);
+
+int ccp_find_uio_devname(const char *dirname);
+
+#endif /* _CCP_PCI_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_ops.c
new file mode 100644
index 00000000..6984913f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -0,0 +1,833 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "ccp_pmd_private.h"
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+
+#define CCP_BASE_SYM_CRYPTO_CAPABILITIES \
+ { /* SHA1 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-224 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_224, \
+ .block_size = 144, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-224 HMAC*/ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC, \
+ .block_size = 144, \
+ .key_size = { \
+ .min = 1, \
+ .max = 144, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-256 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_256, \
+ .block_size = 136, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-256-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC, \
+ .block_size = 136, \
+ .key_size = { \
+ .min = 1, \
+ .max = 136, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-384 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_384, \
+ .block_size = 104, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-384-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC, \
+ .block_size = 104, \
+ .key_size = { \
+ .min = 1, \
+ .max = 104, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-512 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_512, \
+ .block_size = 72, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-512-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC, \
+ .block_size = 72, \
+ .key_size = { \
+ .min = 1, \
+ .max = 72, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /*AES-CMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ }, } \
+ }, } \
+ }, \
+ { /* AES ECB */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_ECB, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 65535, \
+ .increment = 1 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ }, } \
+ }, } \
+ }
+
+#define CCP_EXTRA_SYM_CRYPTO_CAPABILITIES \
+ { /* MD5 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }
+
+static const struct rte_cryptodev_capabilities ccp_crypto_cap[] = {
+ CCP_BASE_SYM_CRYPTO_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities ccp_crypto_cap_complete[] = {
+ CCP_EXTRA_SYM_CRYPTO_CAPABILITIES,
+ CCP_BASE_SYM_CRYPTO_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+ return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct ccp_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = ccp_crypto_cap;
+ if (internals->auth_opt == 1)
+ dev_info->capabilities = ccp_crypto_cap_complete;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ }
+}
+
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct ccp_qp *qp;
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
+ rte_ring_free(qp->processed_pkts);
+ rte_mempool_free(qp->batch_mp);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct ccp_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "ccp_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->size >= ring_size) {
+ CCP_LOG_INFO(
+ "Reusing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+ CCP_LOG_INFO(
+ "Unable to reuse ring %s for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct ccp_private *internals = dev->data->dev_private;
+ struct ccp_qp *qp;
+ int retval = 0;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ ccp_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ CCP_LOG_ERR("Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ qp->dev = dev;
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = ccp_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ CCP_LOG_ERR("Failed to create unique name for ccp qp");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL) {
+ CCP_LOG_ERR("Failed to create batch info ring");
+ goto qp_setup_cleanup;
+ }
+
+ qp->sess_mp = session_pool;
+
+ /* mempool for batch info */
+ qp->batch_mp = rte_mempool_create(
+ qp->name,
+ qp_conf->nb_descriptors,
+ sizeof(struct ccp_batch_info),
+ RTE_CACHE_LINE_SIZE,
+ 0, NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (qp->batch_mp == NULL)
+ goto qp_setup_cleanup;
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ dev->data->queue_pairs[qp_id] = NULL;
+ if (qp)
+ rte_free(qp);
+ return -1;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+static unsigned
+ccp_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct ccp_session);
+}
+
+static int
+ccp_pmd_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int ret;
+ void *sess_private_data;
+ struct ccp_private *internals;
+
+ if (unlikely(sess == NULL || xform == NULL)) {
+ CCP_LOG_ERR("Invalid session struct or xform");
+ return -ENOMEM;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CCP_LOG_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ internals = (struct ccp_private *)dev->data->dev_private;
+ ret = ccp_set_session_parameters(sess_private_data, xform, internals);
+ if (ret != 0) {
+ CCP_LOG_ERR("failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+static void
+ccp_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_mempool_put(sess_mp, sess_priv);
+ memset(sess_priv, 0, sizeof(struct ccp_session));
+ set_sym_session_private_data(sess, index, NULL);
+ }
+}
+
+struct rte_cryptodev_ops ccp_ops = {
+ .dev_configure = ccp_pmd_config,
+ .dev_start = ccp_pmd_start,
+ .dev_stop = ccp_pmd_stop,
+ .dev_close = ccp_pmd_close,
+
+ .stats_get = ccp_pmd_stats_get,
+ .stats_reset = ccp_pmd_stats_reset,
+
+ .dev_infos_get = ccp_pmd_info_get,
+
+ .queue_pair_setup = ccp_pmd_qp_setup,
+ .queue_pair_release = ccp_pmd_qp_release,
+ .queue_pair_count = ccp_pmd_qp_count,
+
+ .sym_session_get_size = ccp_pmd_sym_session_get_size,
+ .sym_session_configure = ccp_pmd_sym_session_configure,
+ .sym_session_clear = ccp_pmd_sym_session_clear,
+};
+
+struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_private.h b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_private.h
new file mode 100644
index 00000000..79752f68
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_private.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PMD_PRIVATE_H_
+#define _CCP_PMD_PRIVATE_H_
+
+#include <rte_cryptodev.h>
+#include "ccp_crypto.h"
+
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+#define CCP_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CCP_DEBUG
+#define CCP_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+
+#define CCP_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define CCP_LOG_INFO(fmt, args...)
+#define CCP_LOG_DBG(fmt, args...)
+#endif
+
+/**< Maximum queue pairs supported by CCP PMD */
+#define CCP_PMD_MAX_QUEUE_PAIRS 1
+#define CCP_NB_MAX_DESCRIPTORS 1024
+#define CCP_MAX_BURST 64
+
+#include "ccp_dev.h"
+
+/* private data structure for each CCP crypto device */
+struct ccp_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ uint8_t crypto_num_dev; /**< Number of working crypto devices */
+ bool auth_opt; /**< Authentication offload option */
+ struct ccp_device *last_dev; /**< Last working crypto device */
+};
+
+/* CCP batch info */
+struct ccp_batch_info {
+ struct rte_crypto_op *op[CCP_MAX_BURST];
+ /**< optable populated at enque time from app*/
+ int op_idx;
+ struct ccp_queue *cmd_q;
+ uint16_t opcnt;
+ /**< no. of crypto ops in batch*/
+ int desccnt;
+ /**< no. of ccp queue descriptors*/
+ uint32_t head_offset;
+ /**< ccp queue head tail offsets time of enqueue*/
+ uint32_t tail_offset;
+ uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
+ phys_addr_t lsb_buf_phys;
+ /**< LSB intermediate buf for passthru */
+ int lsb_buf_idx;
+ uint16_t auth_ctr;
+ /**< auth only ops batch for CPU based auth */
+} __rte_cache_aligned;
+
+/**< CCP crypto queue pair */
+struct ccp_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_mempool *batch_mp;
+ /**< Session Mempool for batch info */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ struct ccp_batch_info *b_info;
+ /**< Store ops pulled out of queue */
+ struct rte_cryptodev *dev;
+ /**< rte crypto device to which this qp belongs */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+
+/**< device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
+uint16_t
+ccp_cpu_pmd_enqueue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+uint16_t
+ccp_cpu_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+#endif /* _CCP_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/meson.build b/src/spdk/dpdk/drivers/crypto/ccp/meson.build
new file mode 100644
index 00000000..e43b0059
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+dep = dependency('libcrypto', required: false)
+if not dep.found()
+ build = false
+endif
+deps += 'bus_vdev'
+deps += 'bus_pci'
+
+sources = files('rte_ccp_pmd.c',
+ 'ccp_crypto.c',
+ 'ccp_dev.c',
+ 'ccp_pci.c',
+ 'ccp_pmd_ops.c')
+
+ext_deps += dep
+pkgconfig_extra_libs += '-lcrypto'
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c b/src/spdk/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c
new file mode 100644
index 00000000..92d8a955
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+
+#include "ccp_crypto.h"
+#include "ccp_dev.h"
+#include "ccp_pmd_private.h"
+
+/**
+ * Global static parameter used to find if CCP device is already initialized.
+ */
+static unsigned int ccp_pmd_init_done;
+uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_pmd_init_params {
+ struct rte_cryptodev_pmd_init_params def_p;
+ bool auth_opt;
+};
+
+#define CCP_CRYPTODEV_PARAM_NAME ("name")
+#define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id")
+#define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs")
+#define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt")
+
+const char *ccp_pmd_valid_params[] = {
+ CCP_CRYPTODEV_PARAM_NAME,
+ CCP_CRYPTODEV_PARAM_SOCKET_ID,
+ CCP_CRYPTODEV_PARAM_MAX_NB_QP,
+ CCP_CRYPTODEV_PARAM_AUTH_OPT,
+};
+
+/** ccp pmd auth option */
+enum ccp_pmd_auth_opt {
+ CCP_PMD_AUTH_OPT_CCP = 0,
+ CCP_PMD_AUTH_OPT_CPU,
+};
+
+/** parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ CCP_LOG_ERR("Argument has to be positive.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** parse name argument */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CCP_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+/** parse authentication operation option */
+static int
+parse_auth_opt_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct ccp_pmd_init_params *params = extra_args;
+ int i;
+
+ i = atoi(value);
+ if (i < CCP_PMD_AUTH_OPT_CCP || i > CCP_PMD_AUTH_OPT_CPU) {
+ CCP_LOG_ERR("Invalid ccp pmd auth option. "
+ "0->auth on CCP(default), "
+ "1->auth on CPU\n");
+ return -EINVAL;
+ }
+ params->auth_opt = i;
+ return 0;
+}
+
+static int
+ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ ccp_pmd_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_MAX_NB_QP,
+ &parse_integer_arg,
+ &params->def_p.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_SOCKET_ID,
+ &parse_integer_arg,
+ &params->def_p.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_NAME,
+ &parse_name_arg,
+ &params->def_p);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_AUTH_OPT,
+ &parse_auth_opt_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
+{
+ struct ccp_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session == NULL))
+ return NULL;
+
+ sess = (struct ccp_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ void *_sess;
+ void *_sess_private_data = NULL;
+ struct ccp_private *internals;
+
+ if (rte_mempool_get(qp->sess_mp, &_sess))
+ return NULL;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct ccp_session *)_sess_private_data;
+
+ internals = (struct ccp_private *)qp->dev->data->dev_private;
+ if (unlikely(ccp_set_session_parameters(sess, op->sym->xform,
+ internals) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ ccp_cryptodev_driver_id,
+ _sess_private_data);
+ }
+
+ return sess;
+}
+
+static uint16_t
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ccp_session *sess = NULL;
+ struct ccp_qp *qp = queue_pair;
+ struct ccp_queue *cmd_q;
+ struct rte_cryptodev *dev = qp->dev;
+ uint16_t i, enq_cnt = 0, slots_req = 0;
+
+ if (nb_ops == 0)
+ return 0;
+
+ if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_ccp_session(qp, ops[i]);
+ if (unlikely(sess == NULL) && (i == 0)) {
+ qp->qp_stats.enqueue_err_count++;
+ return 0;
+ } else if (sess == NULL) {
+ nb_ops = i;
+ break;
+ }
+ slots_req += ccp_compute_slot_count(sess);
+ }
+
+ cmd_q = ccp_allot_queue(dev, slots_req);
+ if (unlikely(cmd_q == NULL))
+ return 0;
+
+ enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+ qp->qp_stats.enqueued_count += enq_cnt;
+ return enq_cnt;
+}
+
+static uint16_t
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ccp_qp *qp = queue_pair;
+ uint16_t nb_dequeued = 0, i;
+
+ nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+ /* Free session if a session-less crypto op */
+ for (i = 0; i < nb_dequeued; i++)
+ if (unlikely(ops[i]->sess_type ==
+ RTE_CRYPTO_OP_SESSIONLESS)) {
+ rte_mempool_put(qp->sess_mp,
+ ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id ccp_pci_id[] = {
+ {
+ RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
+ },
+ {
+ RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
+ },
+ {.device_id = 0},
+};
+
+/** Remove ccp pmd */
+static int
+cryptodev_ccp_remove(struct rte_vdev_device *dev)
+{
+ const char *name;
+
+ ccp_pmd_init_done = 0;
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+/** Create crypto device */
+static int
+cryptodev_ccp_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct ccp_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct ccp_private *internals;
+ uint8_t cryptodev_cnt = 0;
+
+ if (init_params->def_p.name[0] == '\0')
+ snprintf(init_params->def_p.name,
+ sizeof(init_params->def_p.name),
+ "%s", name);
+
+ dev = rte_cryptodev_pmd_create(init_params->def_p.name,
+ &vdev->device,
+ &init_params->def_p);
+ if (dev == NULL) {
+ CCP_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
+
+ if (cryptodev_cnt == 0) {
+ CCP_LOG_ERR("failed to detect CCP crypto device");
+ goto init_error;
+ }
+
+ printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+ dev->driver_id = ccp_cryptodev_driver_id;
+
+ /* register rx/tx burst functions for data path */
+ dev->dev_ops = ccp_pmd_ops;
+ dev->enqueue_burst = ccp_pmd_enqueue_burst;
+ dev->dequeue_burst = ccp_pmd_dequeue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs;
+ internals->auth_opt = init_params->auth_opt;
+ internals->crypto_num_dev = cryptodev_cnt;
+
+ return 0;
+
+init_error:
+ CCP_LOG_ERR("driver %s: %s() failed",
+ init_params->def_p.name, __func__);
+ cryptodev_ccp_remove(vdev);
+
+ return -EFAULT;
+}
+
+/** Probe ccp pmd */
+static int
+cryptodev_ccp_probe(struct rte_vdev_device *vdev)
+{
+ int rc = 0;
+ const char *name;
+ struct ccp_pmd_init_params init_params = {
+ .def_p = {
+ "",
+ sizeof(struct ccp_private),
+ rte_socket_id(),
+ CCP_PMD_MAX_QUEUE_PAIRS
+ },
+ .auth_opt = CCP_PMD_AUTH_OPT_CCP,
+ };
+ const char *input_args;
+
+ if (ccp_pmd_init_done) {
+ RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
+ return -EFAULT;
+ }
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+ ccp_pmd_parse_input_args(&init_params, input_args);
+ init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.def_p.socket_id);
+ RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
+ init_params.def_p.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, "Authentication offload to %s\n",
+ ((init_params.auth_opt == 0) ? "CCP" : "CPU"));
+
+ rc = cryptodev_ccp_create(name, vdev, &init_params);
+ if (rc)
+ return rc;
+ ccp_pmd_init_done = 1;
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
+ .probe = cryptodev_ccp_probe,
+ .remove = cryptodev_ccp_remove
+};
+
+static struct cryptodev_driver ccp_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int> "
+ "ccp_auth_opt=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver,
+ ccp_cryptodev_driver_id);
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/rte_pmd_ccp_version.map b/src/spdk/dpdk/drivers/crypto/ccp/rte_pmd_ccp_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/rte_pmd_ccp_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/Makefile b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/Makefile
new file mode 100644
index 00000000..da3d8f84
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/Makefile
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright 2016 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
+$(error "RTE_LIBRTE_SECURITY is required to build RTE_LIBRTE_PMD_DPAA2_SEC")
+endif
+endif
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_sec.a
+
+# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -D _GNU_SOURCE
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -gt 70 && echo 1), 1)
+CFLAGS += -Wno-implicit-fallthrough
+endif
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2/
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa2_sec_version.map
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec_dpseci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += mc/dpseci.c
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
new file mode 100644
index 00000000..2a3c61c6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -0,0 +1,2931 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_cryptodev.h>
+#include <rte_security_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_common.h>
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+#include <dpaa2_hw_mempool.h>
+#include <fsl_dpseci.h>
+#include <fsl_mc_sys.h>
+
+#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_logs.h"
+
+/* Required types */
+typedef uint64_t dma_addr_t;
+
+/* RTA header files */
+#include <hw/desc/ipsec.h>
+#include <hw/desc/algo.h>
+
+/* Minimum job descriptor consists of a oneword job descriptor HEADER and
+ * a pointer to the shared descriptor
+ */
+#define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
+#define FSL_VENDOR_ID 0x1957
+#define FSL_DEVICE_ID 0x410
+#define FSL_SUBSYSTEM_SEC 1
+#define FSL_MC_DPSECI_DEVID 3
+
+#define NO_PREFETCH 0
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS 32000
+#define FLE_POOL_BUF_SIZE 256
+#define FLE_POOL_CACHE_SIZE 512
+#define FLE_SG_MEM_SIZE 2048
+#define SEC_FLC_DHR_OUTBOUND -114
+#define SEC_FLC_DHR_INBOUND 0
+
+enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
+
+static uint8_t cryptodev_driver_id;
+
+int dpaa2_logtype_sec;
+
+static inline int
+build_proto_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct sec_flow_context *flc;
+ struct rte_mbuf *mbuf = sym_op->m_src;
+
+ if (likely(bpid < MAX_BPID))
+ DPAA2_SET_FD_BPID(fd, bpid);
+ else
+ DPAA2_SET_FD_IVP(fd);
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
+ DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
+ DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
+
+ /* save physical address of mbuf */
+ op->sym->aead.digest.phys_addr = mbuf->buf_iova;
+ mbuf->buf_iova = (size_t)op;
+
+ return 0;
+}
+
+static inline int
+build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+ int icv_len = sess->digest_length;
+ uint8_t *old_icv;
+ struct rte_mbuf *mbuf;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ /* first FLE entry used to store mbuf and session ctxt */
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (unlikely(!fle)) {
+ DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "iv-len=%d data_off: 0x%x\n",
+ sym_op->aead.data.offset,
+ sym_op->aead.data.length,
+ sess->digest_length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+ op_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + icv_len + auth_only_len) :
+ sym_op->aead.data.length + auth_only_len;
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
+ auth_only_len);
+ sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sge->length -= icv_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+ sge->length = icv_len;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ mbuf = sym_op->m_src;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+ DPAA2_SET_FLE_FIN(ip_fle);
+ ip_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len +
+ icv_len);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+ sge->length = sess->iv.length;
+
+ sge++;
+ if (auth_only_len) {
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+ sge->length = auth_only_len;
+ sge++;
+ }
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->aead.data.offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->aead.digest.data, icv_len);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = icv_len;
+ }
+
+ DPAA2_SET_FLE_FIN(sge);
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+ return 0;
+}
+
+static inline int
+build_authenc_gcm_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+ int icv_len = sess->digest_length, retval;
+ uint8_t *old_icv;
+ struct rte_mbuf *dst;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
+ /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+ fle = fle + 1;
+ sge = fle + 2;
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge + 2, bpid);
+ DPAA2_SET_FLE_BPID(sge + 3, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ DPAA2_SET_FLE_IVP((sge + 2));
+ DPAA2_SET_FLE_IVP((sge + 3));
+ }
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "iv-len=%d data_off: 0x%x\n",
+ sym_op->aead.data.offset,
+ sym_op->aead.data.length,
+ sess->digest_length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + icv_len + auth_only_len) :
+ sym_op->aead.data.length + auth_only_len;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ dst->data_off - auth_only_len);
+ sge->length = sym_op->aead.data.length + auth_only_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+ sess->iv.length + auth_only_len));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ fle++;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_FIN(fle);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len +
+ sess->digest_length);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+ sge->length = sess->iv.length;
+ sge++;
+ if (auth_only_len) {
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+ sge->length = auth_only_len;
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ sge++;
+ }
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ sym_op->m_src->data_off);
+ sge->length = sym_op->aead.data.length;
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->aead.digest.data,
+ sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+ sess->digest_length +
+ sess->iv.length +
+ auth_only_len));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+
+ return 0;
+}
+
+static inline int
+build_authenc_sg_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sym_op->auth.data.length -
+ sym_op->cipher.data.length;
+ int icv_len = sess->digest_length;
+ uint8_t *old_icv;
+ struct rte_mbuf *mbuf;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ /* first FLE entry used to store mbuf and session ctxt */
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (unlikely(!fle)) {
+ DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG(
+ "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sess->digest_length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+ op_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->cipher.data.length + icv_len) :
+ sym_op->cipher.data.length;
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
+ sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sge->length -= icv_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ sge->length = icv_len;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ mbuf = sym_op->m_src;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+ DPAA2_SET_FLE_FIN(ip_fle);
+ ip_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->auth.data.length + sess->iv.length) :
+ (sym_op->auth.data.length + sess->iv.length +
+ icv_len);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
+
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+ mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sge->length -= icv_len;
+
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->auth.digest.data,
+ icv_len);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = icv_len;
+ }
+
+ DPAA2_SET_FLE_FIN(sge);
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+ return 0;
+}
+
+static inline int
+build_authenc_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sym_op->auth.data.length -
+ sym_op->cipher.data.length;
+ int icv_len = sess->digest_length, retval;
+ uint8_t *old_icv;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+ struct rte_mbuf *dst;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
+ /* we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ DPAA2_SEC_ERR("Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+ fle = fle + 1;
+ sge = fle + 2;
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge + 2, bpid);
+ DPAA2_SET_FLE_BPID(sge + 3, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ DPAA2_SET_FLE_IVP((sge + 2));
+ DPAA2_SET_FLE_IVP((sge + 3));
+ }
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG(
+ "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sess->digest_length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->cipher.data.length + icv_len) :
+ sym_op->cipher.data.length;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+ dst->data_off);
+ sge->length = sym_op->cipher.data.length;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
+ sess->iv.length));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ fle++;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_FIN(fle);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->auth.data.length + sess->iv.length) :
+ (sym_op->auth.data.length + sess->iv.length +
+ sess->digest_length);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
+ sge++;
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+ sym_op->m_src->data_off);
+ sge->length = sym_op->auth.data.length;
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->auth.digest.data,
+ sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
+ sess->digest_length +
+ sess->iv.length));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+ return 0;
+}
+
+static inline int build_auth_sg_fd(
+ dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd,
+ __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *old_digest;
+ struct rte_mbuf *mbuf;
+
+ PMD_INIT_FUNC_TRACE();
+
+ mbuf = sym_op->m_src;
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (unlikely(!fle)) {
+ DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ /* first FLE entry used to store mbuf and session ctxt */
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+ /* sg FD */
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+
+ /* o/p fle */
+ DPAA2_SET_FLE_ADDR(op_fle,
+ DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ op_fle->length = sess->digest_length;
+
+ /* i/p fle */
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ /* i/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+ /* i/p segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ if (sess->dir == DIR_ENC) {
+ /* Digest calculation case */
+ sge->length -= sess->digest_length;
+ ip_fle->length = sym_op->auth.data.length;
+ } else {
+ /* Digest verification case */
+ sge++;
+ old_digest = (uint8_t *)(sge + 1);
+ rte_memcpy(old_digest, sym_op->auth.digest.data,
+ sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+ sge->length = sess->digest_length;
+ ip_fle->length = sym_op->auth.data.length +
+ sess->digest_length;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(ip_fle);
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+ return 0;
+}
+
+static inline int
+build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *old_digest;
+ int retval;
+
+ PMD_INIT_FUNC_TRACE();
+
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ /* TODO we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+ fle = fle + 1;
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ }
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ fle->length = sess->digest_length;
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ fle++;
+
+ if (sess->dir == DIR_ENC) {
+ DPAA2_SET_FLE_ADDR(fle,
+ DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
+ sym_op->m_src->data_off);
+ DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
+ fle->length = sym_op->auth.data.length;
+ } else {
+ sge = fle + 2;
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ } else {
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ }
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+ sym_op->m_src->data_off);
+
+ DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
+ sess->digest_length);
+ sge->length = sym_op->auth.data.length;
+ sge++;
+ old_digest = (uint8_t *)(sge + 1);
+ rte_memcpy(old_digest, sym_op->auth.digest.data,
+ sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+ sge->length = sess->digest_length;
+ fle->length = sym_op->auth.data.length +
+ sess->digest_length;
+ DPAA2_SET_FLE_FIN(sge);
+ }
+ DPAA2_SET_FLE_FIN(fle);
+
+ return 0;
+}
+
+static int
+build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct rte_mbuf *mbuf;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (!fle) {
+ DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ /* first FLE entry used to store mbuf and session ctxt */
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ flc = &priv->flc_desc[0].flc;
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
+ " data_off: 0x%x\n",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* o/p fle */
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+ op_fle->length = sym_op->cipher.data.length;
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+
+ /* o/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->cipher.data.offset;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
+
+ /* i/p fle */
+ mbuf = sym_op->m_src;
+ sge++;
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+
+ /* i/p IV */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ DPAA2_SET_FLE_OFFSET(sge, 0);
+ sge->length = sess->iv.length;
+
+ sge++;
+
+ /* i/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+ mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->cipher.data.offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(ip_fle);
+
+ /* sg fd */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+ return 0;
+}
+
+static int
+build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge;
+ int retval;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+ struct rte_mbuf *dst;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ /* TODO we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+ fle = fle + 1;
+ sge = fle + 2;
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ }
+
+ flc = &priv->flc_desc[0].flc;
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
+ sess->iv.length);
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
+ " data_off: 0x%x\n",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
+ DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
+ dst->data_off);
+
+ fle->length = sym_op->cipher.data.length + sess->iv.length;
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
+
+ fle++;
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ fle->length = sym_op->cipher.data.length + sess->iv.length;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
+
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+ sym_op->m_src->data_off);
+
+ sge->length = sym_op->cipher.data.length;
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(fle);
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+
+ return 0;
+}
+
+static inline int
+build_sec_fd(struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ int ret = -1;
+ dpaa2_sec_session *sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ sess = (dpaa2_sec_session *)get_sym_session_private_data(
+ op->sym->session, cryptodev_driver_id);
+ else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+ sess = (dpaa2_sec_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+ else
+ return -1;
+
+ /* Segmented buffer */
+ if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
+ switch (sess->ctxt_type) {
+ case DPAA2_SEC_CIPHER:
+ ret = build_cipher_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AUTH:
+ ret = build_auth_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_CIPHER_HASH:
+ ret = build_authenc_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_HASH_CIPHER:
+ default:
+ DPAA2_SEC_ERR("error: Unsupported session");
+ }
+ } else {
+ switch (sess->ctxt_type) {
+ case DPAA2_SEC_CIPHER:
+ ret = build_cipher_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AUTH:
+ ret = build_auth_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_CIPHER_HASH:
+ ret = build_authenc_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_IPSEC:
+ ret = build_proto_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_HASH_CIPHER:
+ default:
+ DPAA2_SEC_ERR("error: Unsupported session");
+ }
+ }
+ return ret;
+}
+
+static uint16_t
+dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function to transmit the frames to given device and VQ*/
+ uint32_t loop;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send;
+ struct qbman_eq_desc eqdesc;
+ struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+ struct qbman_swp *swp;
+ uint16_t num_tx = 0;
+ /*todo - need to support multiple buffer pools */
+ uint16_t bpid;
+ struct rte_mempool *mb_pool;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ DPAA2_SEC_ERR("sessionless crypto op not supported");
+ return 0;
+ }
+ /*Prepare enqueue descriptor*/
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+ qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
+
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_SEC_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ while (nb_ops) {
+ frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ /*Clear the unused FD fields before sending*/
+ memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+ mb_pool = (*ops)->sym->m_src->pool;
+ bpid = mempool_to_bpid(mb_pool);
+ ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
+ if (ret) {
+ DPAA2_SEC_ERR("error: Improper packet contents"
+ " for crypto operation");
+ goto skip_tx;
+ }
+ ops++;
+ }
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
+ &fd_arr[loop],
+ NULL,
+ frames_to_send - loop);
+ }
+
+ num_tx += frames_to_send;
+ nb_ops -= frames_to_send;
+ }
+skip_tx:
+ dpaa2_qp->tx_vq.tx_pkts += num_tx;
+ dpaa2_qp->tx_vq.err_pkts += nb_ops;
+ return num_tx;
+}
+
+static inline struct rte_crypto_op *
+sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
+{
+ struct rte_crypto_op *op;
+ uint16_t len = DPAA2_GET_FD_LEN(fd);
+ uint16_t diff = 0;
+ dpaa2_sec_session *sess_priv;
+
+ struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+ op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
+ mbuf->buf_iova = op->sym->aead.digest.phys_addr;
+ op->sym->aead.digest.phys_addr = 0L;
+
+ sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+ if (sess_priv->dir == DIR_ENC)
+ mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
+ else
+ mbuf->data_off += SEC_FLC_DHR_INBOUND;
+ diff = len - mbuf->pkt_len;
+ mbuf->pkt_len += diff;
+ mbuf->data_len += diff;
+
+ return op;
+}
+
+static inline struct rte_crypto_op *
+sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
+{
+ struct qbman_fle *fle;
+ struct rte_crypto_op *op;
+ struct ctxt_priv *priv;
+ struct rte_mbuf *dst, *src;
+
+ if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
+ return sec_simple_fd_to_mbuf(fd, driver_id);
+
+ fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+ DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+ fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+
+ /* we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+
+ if (unlikely(DPAA2_GET_FD_IVP(fd))) {
+ /* TODO complete it. */
+ DPAA2_SEC_ERR("error: non inline buffer");
+ return NULL;
+ }
+ op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
+
+ /* Prefeth op */
+ src = op->sym->m_src;
+ rte_prefetch0(src);
+
+ if (op->sym->m_dst) {
+ dst = op->sym->m_dst;
+ rte_prefetch0(dst);
+ } else
+ dst = src;
+
+ DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
+ " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
+ (void *)dst,
+ dst->buf_addr,
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+
+ /* free the fle memory */
+ if (likely(rte_pktmbuf_is_contiguous(src))) {
+ priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
+ rte_mempool_put(priv->fle_pool, (void *)(fle-1));
+ } else
+ rte_free((void *)(fle-1));
+
+ return op;
+}
+
+static uint16_t
+dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function is responsible to receive frames for a given device and VQ*/
+ struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+ struct rte_cryptodev *dev =
+ (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
+ struct qbman_result *dq_storage;
+ uint32_t fqid = dpaa2_qp->rx_vq.fqid;
+ int ret, num_rx = 0;
+ uint8_t is_last = 0, status;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd;
+ struct qbman_pull_desc pulldesc;
+
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_SEC_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+ dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc,
+ (nb_ops > DPAA2_DQRR_RING_SIZE) ?
+ DPAA2_DQRR_RING_SIZE : nb_ops);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
+ 1);
+
+ /*Issue a volatile dequeue command. */
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_SEC_WARN(
+ "SEC VDQ command is not issued : QBMAN busy");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ };
+
+ /* Receive the packets till Last Dequeue entry is found with
+ * respect to the above issues PULL command.
+ */
+ while (!is_last) {
+ /* Check if the previous issued command is completed.
+ * Also seems like the SWP is shared between the Ethernet Driver
+ * and the SEC driver.
+ */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ is_last = 1;
+ /* Check for valid frame. */
+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+ if (unlikely(
+ (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
+ continue;
+ }
+ }
+
+ fd = qbman_result_DQ_fd(dq_storage);
+ ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
+
+ if (unlikely(fd->simple.frc)) {
+ /* TODO Parse SEC errors */
+ DPAA2_SEC_ERR("SEC returned Error - %x",
+ fd->simple.frc);
+ ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ } else {
+ ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+
+ num_rx++;
+ dq_storage++;
+ } /* End of Packet Rx loop */
+
+ dpaa2_qp->rx_vq.rx_pkts += num_rx;
+
+ DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+ /*Return the total number of packets received to DPAA2 app*/
+ return num_rx;
+}
+
+/** Release queue pair */
+static int
+dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct dpaa2_sec_qp *qp =
+ (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (qp->rx_vq.q_storage) {
+ dpaa2_free_dq_storage(qp->rx_vq.q_storage);
+ rte_free(qp->rx_vq.q_storage);
+ }
+ rte_free(qp);
+
+ dev->data->queue_pairs[queue_pair_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ __rte_unused int socket_id,
+ __rte_unused struct rte_mempool *session_pool)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct dpaa2_sec_qp *qp;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_rx_queue_cfg cfg;
+ int32_t retcode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ DPAA2_SEC_INFO("QP already setup");
+ return 0;
+ }
+
+ DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
+ dev, qp_id, qp_conf);
+
+ memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+
+ qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
+ RTE_CACHE_LINE_SIZE);
+ if (!qp) {
+ DPAA2_SEC_ERR("malloc failed for rx/tx queues");
+ return -1;
+ }
+
+ qp->rx_vq.dev = dev;
+ qp->tx_vq.dev = dev;
+ qp->rx_vq.q_storage = rte_malloc("sec dq storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!qp->rx_vq.q_storage) {
+ DPAA2_SEC_ERR("malloc failed for q_storage");
+ return -1;
+ }
+ memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
+
+ if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
+ DPAA2_SEC_ERR("Unable to allocate dequeue storage");
+ return -1;
+ }
+
+ dev->data->queue_pairs[qp_id] = qp;
+
+ cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
+ cfg.user_ctx = (size_t)(&qp->rx_vq);
+ retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+ qp_id, &cfg);
+ return retcode;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned int
+dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return sizeof(dpaa2_sec_session);
+}
+
+static int
+dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo cipherdata;
+ int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For SEC CIPHER only one descriptor is required. */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+
+ flc = &priv->flc_desc[0].flc;
+
+ session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL) {
+ DPAA2_SEC_ERR("No Memory for cipher key");
+ rte_free(priv);
+ return -1;
+ }
+ session->cipher_key.length = xform->cipher.key.length;
+
+ memcpy(session->cipher_key.data, xform->cipher.key.data,
+ xform->cipher.key.length);
+ cipherdata.key = (size_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ /* Set IV parameters */
+ session->iv.offset = xform->cipher.iv.offset;
+ session->iv.length = xform->cipher.iv.length;
+
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ case RTE_CRYPTO_CIPHER_NULL:
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ xform->cipher.algo);
+ goto error_out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ xform->cipher.algo);
+ goto error_out;
+ }
+ session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
+ &cipherdata, NULL, session->iv.length,
+ session->dir);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Descriptor build failed");
+ goto error_out;
+ }
+ flc->dhr = 0;
+ flc->bpv0 = 0x1;
+ flc->mode_bits = 0x8000;
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+
+ for (i = 0; i < bufsize; i++)
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_auth_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo authdata;
+ int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For SEC AUTH three descriptors are required for various stages */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + 3 *
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+
+ session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL) {
+ DPAA2_SEC_ERR("Unable to allocate memory for auth key");
+ rte_free(priv);
+ return -1;
+ }
+ session->auth_key.length = xform->auth.key.length;
+
+ memcpy(session->auth_key.data, xform->auth.key.data,
+ xform->auth.key.length);
+ authdata.key = (size_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ session->digest_length = xform->auth.digest_length;
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA1;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_MD5;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA384;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA512;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA224;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
+ xform->auth.algo);
+ goto error_out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ xform->auth.algo);
+ goto error_out;
+ }
+ session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ DIR_ENC : DIR_DEC;
+
+ bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
+ 1, 0, &authdata, !session->dir,
+ session->digest_length);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
+ i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+
+
+ return 0;
+
+error_out:
+ rte_free(session->auth_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_aead_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo aeaddata;
+ int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Set IV parameters */
+ session->iv.offset = aead_xform->iv.offset;
+ session->iv.length = aead_xform->iv.length;
+ session->ctxt_type = DPAA2_SEC_AEAD;
+
+ /* For SEC AEAD only one descriptor is required */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for aead key");
+ rte_free(priv);
+ return -1;
+ }
+ memcpy(session->aead_key.data, aead_xform->key.data,
+ aead_xform->key.length);
+
+ session->digest_length = aead_xform->digest_length;
+ session->aead_key.length = aead_xform->key.length;
+ ctxt->auth_only_len = aead_xform->aad_length;
+
+ aeaddata.key = (size_t)session->aead_key.data;
+ aeaddata.keylen = session->aead_key.length;
+ aeaddata.key_enc_flags = 0;
+ aeaddata.key_type = RTA_DATA_IMM;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ aeaddata.algtype = OP_ALG_ALGSEL_AES;
+ aeaddata.algmode = OP_ALG_AAI_GCM;
+ session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
+ aead_xform->algo);
+ goto error_out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
+ aead_xform->algo);
+ goto error_out;
+ }
+ session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ priv->flc_desc[0].desc[0] = aeaddata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)priv->flc_desc[0].desc,
+ &priv->flc_desc[0].desc[1], 1);
+
+ if (err < 0) {
+ DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
+ goto error_out;
+ }
+ if (priv->flc_desc[0].desc[1] & 1) {
+ aeaddata.key_type = RTA_DATA_IMM;
+ } else {
+ aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
+ aeaddata.key_type = RTA_DATA_PTR;
+ }
+ priv->flc_desc[0].desc[0] = 0;
+ priv->flc_desc[0].desc[1] = 0;
+
+ if (session->dir == DIR_ENC)
+ bufsize = cnstr_shdsc_gcm_encap(
+ priv->flc_desc[0].desc, 1, 0,
+ &aeaddata, session->iv.length,
+ session->digest_length);
+ else
+ bufsize = cnstr_shdsc_gcm_decap(
+ priv->flc_desc[0].desc, 1, 0,
+ &aeaddata, session->iv.length,
+ session->digest_length);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->aead_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+
+static int
+dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo authdata, cipherdata;
+ int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ struct rte_crypto_auth_xform *auth_xform;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (session->ext_params.aead_ctxt.auth_cipher_text) {
+ cipher_xform = &xform->cipher;
+ auth_xform = &xform->next->auth;
+ session->ctxt_type =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
+ } else {
+ cipher_xform = &xform->next->cipher;
+ auth_xform = &xform->auth;
+ session->ctxt_type =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
+ }
+
+ /* Set IV parameters */
+ session->iv.offset = cipher_xform->iv.offset;
+ session->iv.length = cipher_xform->iv.length;
+
+ /* For SEC AEAD only one descriptor is required */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for cipher key");
+ rte_free(priv);
+ return -1;
+ }
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ authdata.key = (size_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ session->digest_length = auth_xform->digest_length;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA1;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_MD5;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA224;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA384;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA512;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
+ goto error_out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
+ goto error_out;
+ }
+ cipherdata.key = (size_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto error_out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
+ goto error_out;
+ }
+ session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ priv->flc_desc[0].desc[0] = cipherdata.keylen;
+ priv->flc_desc[0].desc[1] = authdata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)priv->flc_desc[0].desc,
+ &priv->flc_desc[0].desc[2], 2);
+
+ if (err < 0) {
+ DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
+ goto error_out;
+ }
+ if (priv->flc_desc[0].desc[2] & 1) {
+ cipherdata.key_type = RTA_DATA_IMM;
+ } else {
+ cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
+ if (priv->flc_desc[0].desc[2] & (1 << 1)) {
+ authdata.key_type = RTA_DATA_IMM;
+ } else {
+ authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
+ authdata.key_type = RTA_DATA_PTR;
+ }
+ priv->flc_desc[0].desc[0] = 0;
+ priv->flc_desc[0].desc[1] = 0;
+ priv->flc_desc[0].desc[2] = 0;
+
+ if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
+ bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
+ 0, &cipherdata, &authdata,
+ session->iv.length,
+ ctxt->auth_only_len,
+ session->digest_length,
+ session->dir);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
+ } else {
+ DPAA2_SEC_ERR("Hash before cipher not supported");
+ goto error_out;
+ }
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
+ i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->cipher_key.data);
+ rte_free(session->auth_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ dpaa2_sec_session *session = sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(sess == NULL)) {
+ DPAA2_SEC_ERR("Invalid session struct");
+ return -1;
+ }
+
+ /* Default IV length = 0 */
+ session->iv.length = 0;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ session->ctxt_type = DPAA2_SEC_CIPHER;
+ dpaa2_sec_cipher_init(dev, xform, session);
+
+ /* Authentication Only */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ session->ctxt_type = DPAA2_SEC_AUTH;
+ dpaa2_sec_auth_init(dev, xform, session);
+
+ /* Cipher then Authenticate */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ session->ext_params.aead_ctxt.auth_cipher_text = true;
+ dpaa2_sec_aead_chain_init(dev, xform, session);
+
+ /* Authenticate then Cipher */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ session->ext_params.aead_ctxt.auth_cipher_text = false;
+ dpaa2_sec_aead_chain_init(dev, xform, session);
+
+ /* AEAD operation for AES-GCM kind of Algorithms */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ xform->next == NULL) {
+ dpaa2_sec_aead_init(dev, xform, session);
+
+ } else {
+ DPAA2_SEC_ERR("Invalid crypto type");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_auth_xform *auth_xform;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
+ struct ctxt_priv *priv;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ipsec_decap_pdb decap_pdb;
+ struct alginfo authdata, cipherdata;
+ int bufsize;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ auth_xform = &conf->crypto_xform->next->auth;
+ } else {
+ auth_xform = &conf->crypto_xform->auth;
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ }
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) +
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No memory for priv CTXT");
+ return -ENOMEM;
+ }
+
+ flc = &priv->flc_desc[0].flc;
+
+ session->ctxt_type = DPAA2_SEC_IPSEC;
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for cipher key");
+ rte_free(priv);
+ return -ENOMEM;
+ }
+
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ authdata.key = (size_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
+ session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
+ goto out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
+ goto out;
+ }
+ cipherdata.key = (size_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_PCL_IPSEC_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ cipherdata.algtype = OP_PCL_IPSEC_NULL;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
+ goto out;
+ }
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ struct ip ip4_hdr;
+
+ flc->dhr = SEC_FLC_DHR_OUTBOUND;
+ ip4_hdr.ip_v = IPVERSION;
+ ip4_hdr.ip_hl = 5;
+ ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
+ ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+ ip4_hdr.ip_id = 0;
+ ip4_hdr.ip_off = 0;
+ ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+ ip4_hdr.ip_p = 0x32;
+ ip4_hdr.ip_sum = 0;
+ ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+ ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+ ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
+ sizeof(struct ip));
+
+ /* For Sec Proto only one descriptor is required. */
+ memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
+ encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+ PDBOPTS_ESP_OIHI_PDB_INL |
+ PDBOPTS_ESP_IVSRC |
+ PDBHMO_ESP_ENCAP_DTTL;
+ encap_pdb.spi = ipsec_xform->spi;
+ encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+ session->dir = DIR_ENC;
+ bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
+ 1, 0, &encap_pdb,
+ (uint8_t *)&ip4_hdr,
+ &cipherdata, &authdata);
+ } else if (ipsec_xform->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ flc->dhr = SEC_FLC_DHR_INBOUND;
+ memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+ decap_pdb.options = sizeof(struct ip) << 16;
+ session->dir = DIR_DEC;
+ bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
+ 1, 0, &decap_pdb, &cipherdata, &authdata);
+ } else
+ goto out;
+
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto out;
+ }
+
+ flc->word1_sdl = (uint8_t)bufsize;
+
+ /* Enable the stashing control bit */
+ DPAA2_SET_FLC_RSC(flc);
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq) | 0x14);
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+
+ /* Set EWS bit i.e. enable write-safe */
+ DPAA2_SET_FLC_EWS(flc);
+ /* Set BS = 1 i.e reuse input buffers as output buffers */
+ DPAA2_SET_FLC_REUSE_BS(flc);
+ /* Set FF = 10; reuse input buffers if they provide sufficient space */
+ DPAA2_SET_FLC_REUSE_FF(flc);
+
+ session->ctxt = priv;
+
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_security_session_create(void *dev,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ DPAA2_SEC_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ switch (conf->protocol) {
+ case RTE_SECURITY_PROTOCOL_IPSEC:
+ ret = dpaa2_sec_set_ipsec_session(cdev, conf,
+ sess_private_data);
+ break;
+ case RTE_SECURITY_PROTOCOL_MACSEC:
+ return -ENOTSUP;
+ default:
+ return -EINVAL;
+ }
+ if (ret != 0) {
+ DPAA2_SEC_ERR("Failed to configure session parameters");
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sec_session_private_data(sess, sess_private_data);
+
+ return ret;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+dpaa2_sec_security_session_destroy(void *dev __rte_unused,
+ struct rte_security_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ void *sess_priv = get_sec_session_private_data(sess);
+
+ dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_free(s->ctxt);
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(dpaa2_sec_session));
+ set_sec_session_private_data(sess, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+ return 0;
+}
+
+static int
+dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ DPAA2_SEC_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ DPAA2_SEC_ERR("Failed to configure session parameters");
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+ dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ rte_free(s->ctxt);
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(dpaa2_sec_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+static int
+dpaa2_sec_dev_start(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_attr attr;
+ struct dpaa2_queue *dpaa2_q;
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ dev->data->queue_pairs;
+ struct dpseci_rx_queue_attr rx_attr;
+ struct dpseci_tx_queue_attr tx_attr;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(&attr, 0, sizeof(struct dpseci_attr));
+
+ ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
+ priv->hw_id);
+ goto get_attr_failure;
+ }
+ ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
+ if (ret) {
+ DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
+ goto get_attr_failure;
+ }
+ for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
+ dpaa2_q = &qp[i]->rx_vq;
+ dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
+ &rx_attr);
+ dpaa2_q->fqid = rx_attr.fqid;
+ DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
+ }
+ for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
+ dpaa2_q = &qp[i]->tx_vq;
+ dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
+ &tx_attr);
+ dpaa2_q->fqid = tx_attr.fqid;
+ DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
+ }
+
+ return 0;
+get_attr_failure:
+ dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
+ return -1;
+}
+
+static void
+dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
+ priv->hw_id);
+ return;
+ }
+
+ ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret < 0) {
+ DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
+ return;
+ }
+}
+
+static int
+dpaa2_sec_dev_close(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Function is reverse of dpaa2_sec_dev_init.
+ * It does the following:
+ * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
+ * 2. Close the DPSECI device
+ * 3. Free the allocated resources.
+ */
+
+ /*Close the device at underlying layer*/
+ ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
+ return -1;
+ }
+
+ /*Free the allocated memory for ethernet private data and dpseci*/
+ priv->hw = NULL;
+ rte_free(dpseci);
+
+ return 0;
+}
+
+static void
+dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = dpaa2_sec_capabilities;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
+ info->driver_id = cryptodev_driver_id;
+ }
+}
+
+static
+void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_sec_counters counters = {0};
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ dev->data->queue_pairs;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+ if (stats == NULL) {
+ DPAA2_SEC_ERR("Invalid stats ptr NULL");
+ return;
+ }
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ DPAA2_SEC_DEBUG("Uninitialised queue pair");
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
+ stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
+ stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
+ stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
+ }
+
+ ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
+ &counters);
+ if (ret) {
+ DPAA2_SEC_ERR("SEC counters failed");
+ } else {
+ DPAA2_SEC_INFO("dpseci hardware stats:"
+ "\n\tNum of Requests Dequeued = %" PRIu64
+ "\n\tNum of Outbound Encrypt Requests = %" PRIu64
+ "\n\tNum of Inbound Decrypt Requests = %" PRIu64
+ "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
+ "\n\tNum of Outbound Bytes Protected = %" PRIu64
+ "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
+ "\n\tNum of Inbound Bytes Validated = %" PRIu64,
+ counters.dequeued_requests,
+ counters.ob_enc_requests,
+ counters.ib_dec_requests,
+ counters.ob_enc_bytes,
+ counters.ob_prot_bytes,
+ counters.ib_dec_bytes,
+ counters.ib_valid_bytes);
+ }
+}
+
+static
+void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
+{
+ int i;
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ (dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ DPAA2_SEC_DEBUG("Uninitialised queue pair");
+ continue;
+ }
+ qp[i]->tx_vq.rx_pkts = 0;
+ qp[i]->tx_vq.tx_pkts = 0;
+ qp[i]->tx_vq.err_pkts = 0;
+ qp[i]->rx_vq.rx_pkts = 0;
+ qp[i]->rx_vq.tx_pkts = 0;
+ qp[i]->rx_vq.err_pkts = 0;
+ }
+}
+
+static struct rte_cryptodev_ops crypto_ops = {
+ .dev_configure = dpaa2_sec_dev_configure,
+ .dev_start = dpaa2_sec_dev_start,
+ .dev_stop = dpaa2_sec_dev_stop,
+ .dev_close = dpaa2_sec_dev_close,
+ .dev_infos_get = dpaa2_sec_dev_infos_get,
+ .stats_get = dpaa2_sec_stats_get,
+ .stats_reset = dpaa2_sec_stats_reset,
+ .queue_pair_setup = dpaa2_sec_queue_pair_setup,
+ .queue_pair_release = dpaa2_sec_queue_pair_release,
+ .queue_pair_count = dpaa2_sec_queue_pair_count,
+ .sym_session_get_size = dpaa2_sec_sym_session_get_size,
+ .sym_session_configure = dpaa2_sec_sym_session_configure,
+ .sym_session_clear = dpaa2_sec_sym_session_clear,
+};
+
+static const struct rte_security_capability *
+dpaa2_sec_capabilities_get(void *device __rte_unused)
+{
+ return dpaa2_sec_security_cap;
+}
+
+struct rte_security_ops dpaa2_sec_security_ops = {
+ .session_create = dpaa2_sec_security_session_create,
+ .session_update = NULL,
+ .session_stats_get = NULL,
+ .session_destroy = dpaa2_sec_security_session_destroy,
+ .set_pkt_metadata = NULL,
+ .capabilities_get = dpaa2_sec_capabilities_get
+};
+
+static int
+dpaa2_sec_uninit(const struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+ rte_free(dev->security_ctx);
+
+ rte_mempool_free(internals->fle_pool);
+
+ DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
+ dev->data->name, rte_socket_id());
+
+ return 0;
+}
+
+static int
+dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
+{
+ struct dpaa2_sec_dev_private *internals;
+ struct rte_device *dev = cryptodev->device;
+ struct rte_dpaa2_device *dpaa2_dev;
+ struct rte_security_ctx *security_instance;
+ struct fsl_mc_io *dpseci;
+ uint16_t token;
+ struct dpseci_attr attr;
+ int retcode, hw_id;
+ char str[20];
+
+ PMD_INIT_FUNC_TRACE();
+ dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
+ if (dpaa2_dev == NULL) {
+ DPAA2_SEC_ERR("DPAA2 SEC device not found");
+ return -1;
+ }
+ hw_id = dpaa2_dev->object_id;
+
+ cryptodev->driver_id = cryptodev_driver_id;
+ cryptodev->dev_ops = &crypto_ops;
+
+ cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
+ cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_SECURITY |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ internals = cryptodev->data->dev_private;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DPAA2_SEC_DEBUG("Device already init by primary process");
+ return 0;
+ }
+
+ /* Initialize security_ctx only for primary process*/
+ security_instance = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (security_instance == NULL)
+ return -ENOMEM;
+ security_instance->device = (void *)cryptodev;
+ security_instance->ops = &dpaa2_sec_security_ops;
+ security_instance->sess_cnt = 0;
+ cryptodev->security_ctx = security_instance;
+
+ /*Open the rte device via MC and save the handle for further use*/
+ dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
+ sizeof(struct fsl_mc_io), 0);
+ if (!dpseci) {
+ DPAA2_SEC_ERR(
+ "Error in allocating the memory for dpsec object");
+ return -1;
+ }
+ dpseci->regs = rte_mcp_ptr_list[0];
+
+ retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
+ if (retcode != 0) {
+ DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
+ retcode);
+ goto init_error;
+ }
+ retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
+ if (retcode != 0) {
+ DPAA2_SEC_ERR(
+ "Cannot get dpsec device attributed: Error = %x",
+ retcode);
+ goto init_error;
+ }
+ sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
+
+ internals->max_nb_queue_pairs = attr.num_tx_queues;
+ cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
+ internals->hw = dpseci;
+ internals->token = token;
+
+ sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
+ internals->fle_pool = rte_mempool_create((const char *)str,
+ FLE_POOL_NUM_BUFS,
+ FLE_POOL_BUF_SIZE,
+ FLE_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->fle_pool) {
+ DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
+ goto init_error;
+ }
+
+ DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
+ return 0;
+
+init_error:
+ DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
+
+ /* dpaa2_sec_uninit(crypto_dev_name); */
+ return -EFAULT;
+}
+
+static int
+cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ int retval;
+
+ sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
+
+ cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
+ if (cryptodev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ cryptodev->data->dev_private = rte_zmalloc_socket(
+ "cryptodev private structure",
+ sizeof(struct dpaa2_sec_dev_private),
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (cryptodev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ dpaa2_dev->cryptodev = cryptodev;
+ cryptodev->device = &dpaa2_dev->device;
+ cryptodev->device->driver = &dpaa2_drv->driver;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ /* Invoke PMD device initialization function */
+ retval = dpaa2_sec_dev_init(cryptodev);
+ if (retval == 0)
+ return 0;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ cryptodev->attached = RTE_CRYPTODEV_DETACHED;
+
+ return -ENXIO;
+}
+
+static int
+cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ int ret;
+
+ cryptodev = dpaa2_dev->cryptodev;
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ ret = dpaa2_sec_uninit(cryptodev);
+ if (ret)
+ return ret;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
+ .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
+ .drv_type = DPAA2_CRYPTO,
+ .driver = {
+ .name = "DPAA2 SEC PMD"
+ },
+ .probe = cryptodev_dpaa2_sec_probe,
+ .remove = cryptodev_dpaa2_sec_remove,
+};
+
+static struct cryptodev_driver dpaa2_sec_crypto_drv;
+
+RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
+ rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
+
+RTE_INIT(dpaa2_sec_init_log)
+{
+ /* Bus level logs */
+ dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
+ if (dpaa2_logtype_sec >= 0)
+ rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
new file mode 100644
index 00000000..8a990442
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _DPAA2_SEC_LOGS_H_
+#define _DPAA2_SEC_LOGS_H_
+
+extern int dpaa2_logtype_sec;
+
+#define DPAA2_SEC_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_sec, "dpaa2_sec: " \
+ fmt "\n", ##args)
+
+#define DPAA2_SEC_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_sec, "dpaa2_sec: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA2_SEC_DEBUG(">>")
+
+#define DPAA2_SEC_INFO(fmt, args...) \
+ DPAA2_SEC_LOG(INFO, fmt, ## args)
+#define DPAA2_SEC_ERR(fmt, args...) \
+ DPAA2_SEC_LOG(ERR, fmt, ## args)
+#define DPAA2_SEC_WARN(fmt, args...) \
+ DPAA2_SEC_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_SEC_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_SEC_DP_DEBUG(fmt, args...) \
+ DPAA2_SEC_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_SEC_DP_INFO(fmt, args...) \
+ DPAA2_SEC_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_SEC_DP_WARN(fmt, args...) \
+ DPAA2_SEC_DP_LOG(WARNING, fmt, ## args)
+
+
+#endif /* _DPAA2_SEC_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
new file mode 100644
index 00000000..d015be1e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+#define _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+
+#define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec
+/**< NXP DPAA2 - SEC PMD device name */
+
+#define MAX_QUEUES 64
+#define MAX_DESC_SIZE 64
+/** private data structure for each DPAA2_SEC device */
+struct dpaa2_sec_dev_private {
+ void *mc_portal; /**< MC Portal for configuring this device */
+ void *hw; /**< Hardware handle for this device.Used by NADK framework */
+ struct rte_mempool *fle_pool; /* per device memory pool for FLE */
+ int32_t hw_id; /**< An unique ID of this device instance */
+ int32_t vfio_fd; /**< File descriptor received via VFIO */
+ uint16_t token; /**< Token required by DPxxx objects */
+ unsigned int max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+struct dpaa2_sec_qp {
+ struct dpaa2_queue rx_vq;
+ struct dpaa2_queue tx_vq;
+};
+
+enum shr_desc_type {
+ DESC_UPDATE,
+ DESC_FINAL,
+ DESC_INITFINAL,
+};
+
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+#define DPAA2_SET_FLC_EWS(flc) (flc->word1_bits23_16 |= 0x1)
+#define DPAA2_SET_FLC_RSC(flc) (flc->word1_bits31_24 |= 0x1)
+#define DPAA2_SET_FLC_REUSE_BS(flc) (flc->mode_bits |= 0x8000)
+#define DPAA2_SET_FLC_REUSE_FF(flc) (flc->mode_bits |= 0x2000)
+
+/* SEC Flow Context Descriptor */
+struct sec_flow_context {
+ /* word 0 */
+ uint16_t word0_sdid; /* 11-0 SDID */
+ uint16_t word0_res; /* 31-12 reserved */
+
+ /* word 1 */
+ uint8_t word1_sdl; /* 5-0 SDL */
+ /* 7-6 reserved */
+
+ uint8_t word1_bits_15_8; /* 11-8 CRID */
+ /* 14-12 reserved */
+ /* 15 CRJD */
+
+ uint8_t word1_bits23_16; /* 16 EWS */
+ /* 17 DAC */
+ /* 18,19,20 ? */
+ /* 23-21 reserved */
+
+ uint8_t word1_bits31_24; /* 24 RSC */
+ /* 25 RBMT */
+ /* 31-26 reserved */
+
+ /* word 2 RFLC[31-0] */
+ uint32_t word2_rflc_31_0;
+
+ /* word 3 RFLC[63-32] */
+ uint32_t word3_rflc_63_32;
+
+ /* word 4 */
+ uint16_t word4_iicid; /* 15-0 IICID */
+ uint16_t word4_oicid; /* 31-16 OICID */
+
+ /* word 5 */
+ uint32_t word5_ofqid:24; /* 23-0 OFQID */
+ uint32_t word5_31_24:8;
+ /* 24 OSC */
+ /* 25 OBMT */
+ /* 29-26 reserved */
+ /* 31-30 ICR */
+
+ /* word 6 */
+ uint32_t word6_oflc_31_0;
+
+ /* word 7 */
+ uint32_t word7_oflc_63_32;
+
+ /* Word 8-15 storage profiles */
+ uint16_t dl; /**< DataLength(correction) */
+ uint16_t reserved; /**< reserved */
+ uint16_t dhr; /**< DataHeadRoom(correction) */
+ uint16_t mode_bits; /**< mode bits */
+ uint16_t bpv0; /**< buffer pool0 valid */
+ uint16_t bpid0; /**< Bypass Memory Translation */
+ uint16_t bpv1; /**< buffer pool1 valid */
+ uint16_t bpid1; /**< Bypass Memory Translation */
+ uint64_t word_12_15[2]; /**< word 12-15 are reserved */
+};
+
+struct sec_flc_desc {
+ struct sec_flow_context flc;
+ uint32_t desc[MAX_DESC_SIZE];
+};
+
+struct ctxt_priv {
+ struct rte_mempool *fle_pool; /* per device memory pool for FLE */
+ struct sec_flc_desc flc_desc[0];
+};
+
+enum dpaa2_sec_op_type {
+ DPAA2_SEC_NONE, /*!< No Cipher operations*/
+ DPAA2_SEC_CIPHER,/*!< CIPHER operations */
+ DPAA2_SEC_AUTH, /*!< Authentication Operations */
+ DPAA2_SEC_AEAD, /*!< AEAD (AES-GCM/CCM) type operations */
+ DPAA2_SEC_CIPHER_HASH, /*!< Authenticated Encryption with
+ * associated data
+ */
+ DPAA2_SEC_HASH_CIPHER, /*!< Encryption with Authenticated
+ * associated data
+ */
+ DPAA2_SEC_IPSEC, /*!< IPSEC protocol operations*/
+ DPAA2_SEC_PDCP, /*!< PDCP protocol operations*/
+ DPAA2_SEC_PKC, /*!< Public Key Cryptographic Operations */
+ DPAA2_SEC_MAX
+};
+
+struct dpaa2_sec_aead_ctxt {
+ uint16_t auth_only_len; /*!< Length of data for Auth only */
+ uint8_t auth_cipher_text; /**< Authenticate/cipher ordering */
+};
+
+typedef struct dpaa2_sec_session_entry {
+ void *ctxt;
+ uint8_t ctxt_type;
+ uint8_t dir; /*!< Operation Direction */
+ enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+ enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+ union {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } aead_key;
+ struct {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } auth_key;
+ };
+ };
+ struct {
+ uint16_t length; /**< IV length in bytes */
+ uint16_t offset; /**< IV offset in bytes */
+ } iv;
+ uint16_t digest_length;
+ uint8_t status;
+ union {
+ struct dpaa2_sec_aead_ctxt aead_ctxt;
+ } ext_params;
+} dpaa2_sec_session;
+
+static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 16,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 28,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 48,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 240,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability dpaa2_sec_security_cap[] = {
+ { /* IPsec Lookaside Protocol offload ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa2_sec_capabilities
+ },
+ { /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa2_sec_capabilities
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+ uint16_t *buf = (uint16_t *)buffer;
+ uint32_t sum = 0;
+ uint16_t result;
+
+ for (sum = 0; len > 1; len -= 2)
+ sum += *buf++;
+
+ if (len == 1)
+ sum += *(unsigned char *)buf;
+
+ sum = (sum >> 16) + (sum & 0xFFFF);
+ sum += (sum >> 16);
+ result = ~sum;
+
+ return result;
+}
+
+#endif /* _RTE_DPAA2_SEC_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/compat.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/compat.h
new file mode 100644
index 00000000..ce946ccb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/compat.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_COMPAT_H__
+#define __RTA_COMPAT_H__
+
+#include <stdint.h>
+#include <errno.h>
+
+#ifdef __GLIBC__
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#ifndef __BYTE_ORDER__
+#error "Undefined endianness"
+#endif
+
+#else
+#error Environment not supported!
+#endif
+
+#ifndef __always_inline
+#define __always_inline __rte_always_inline
+#endif
+
+#ifndef __always_unused
+#define __always_unused __attribute__((unused))
+#endif
+
+#ifndef __maybe_unused
+#define __maybe_unused __attribute__((unused))
+#endif
+
+#if defined(__GLIBC__) && !defined(pr_debug)
+#if !defined(SUPPRESS_PRINTS) && defined(RTA_DEBUG)
+#define pr_debug(fmt, ...) \
+ RTE_LOG(DEBUG, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_debug(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_debug */
+
+#if defined(__GLIBC__) && !defined(pr_err)
+#if !defined(SUPPRESS_PRINTS)
+#define pr_err(fmt, ...) \
+ RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_err(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_err */
+
+#if defined(__GLIBC__) && !defined(pr_warn)
+#if !defined(SUPPRESS_PRINTS)
+#define pr_warn(fmt, ...) \
+ RTE_LOG(WARNING, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_warn(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_warn */
+
+/**
+ * ARRAY_SIZE - returns the number of elements in an array
+ * @x: array
+ */
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef ALIGN
+#define ALIGN(x, a) (((x) + ((__typeof__(x))(a) - 1)) & \
+ ~((__typeof__(x))(a) - 1))
+#endif
+
+#ifndef BIT
+#define BIT(nr) (1UL << (nr))
+#endif
+
+#ifndef upper_32_bits
+/**
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
+ */
+#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16))
+#endif
+
+#ifndef lower_32_bits
+/**
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
+ */
+#define lower_32_bits(n) ((uint32_t)(n))
+#endif
+
+/* Use Linux naming convention */
+#ifdef __GLIBC__
+ #define swab16(x) rte_bswap16(x)
+ #define swab32(x) rte_bswap32(x)
+ #define swab64(x) rte_bswap64(x)
+ /* Define cpu_to_be32 macro if not defined in the build environment */
+ #if !defined(cpu_to_be32)
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define cpu_to_be32(x) (x)
+ #else
+ #define cpu_to_be32(x) swab32(x)
+ #endif
+ #endif
+ /* Define cpu_to_le32 macro if not defined in the build environment */
+ #if !defined(cpu_to_le32)
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define cpu_to_le32(x) swab32(x)
+ #else
+ #define cpu_to_le32(x) (x)
+ #endif
+ #endif
+#endif
+
+#endif /* __RTA_COMPAT_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc.h
new file mode 100644
index 00000000..e9255832
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc.h
@@ -0,0 +1,2568 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+/*
+ * SEC descriptor composition header.
+ * Definitions to support SEC descriptor instruction generation
+ */
+
+#ifndef __RTA_DESC_H__
+#define __RTA_DESC_H__
+
+/* hw/compat.h is not delivered in kernel */
+#ifndef __KERNEL__
+#include "hw/compat.h"
+#endif
+
+/* Max size of any SEC descriptor in 32-bit words, inclusive of header */
+#define MAX_CAAM_DESCSIZE 64
+
+#define CAAM_CMD_SZ sizeof(uint32_t)
+#define CAAM_PTR_SZ sizeof(dma_addr_t)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+/* Block size of any entity covered/uncovered with a KEK/TKEK */
+#define KEK_BLOCKSIZE 16
+
+/*
+ * Supported descriptor command types as they show up
+ * inside a descriptor command word.
+ */
+#define CMD_SHIFT 27
+#define CMD_MASK (0x1f << CMD_SHIFT)
+
+#define CMD_KEY (0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
+#define CMD_LOAD (0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
+#define CMD_MOVEDW (0x06 << CMD_SHIFT)
+#define CMD_MOVEB (0x07 << CMD_SHIFT)
+#define CMD_STORE (0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
+#define CMD_MOVE (0x0f << CMD_SHIFT)
+#define CMD_OPERATION ((uint32_t)(0x10 << CMD_SHIFT))
+#define CMD_SIGNATURE ((uint32_t)(0x12 << CMD_SHIFT))
+#define CMD_JUMP ((uint32_t)(0x14 << CMD_SHIFT))
+#define CMD_MATH ((uint32_t)(0x15 << CMD_SHIFT))
+#define CMD_DESC_HDR ((uint32_t)(0x16 << CMD_SHIFT))
+#define CMD_SHARED_DESC_HDR ((uint32_t)(0x17 << CMD_SHIFT))
+#define CMD_MATHI ((uint32_t)(0x1d << CMD_SHIFT))
+#define CMD_SEQ_IN_PTR ((uint32_t)(0x1e << CMD_SHIFT))
+#define CMD_SEQ_OUT_PTR ((uint32_t)(0x1f << CMD_SHIFT))
+
+/* General-purpose class selector for all commands */
+#define CLASS_SHIFT 25
+#define CLASS_MASK (0x03 << CLASS_SHIFT)
+
+#define CLASS_NONE (0x00 << CLASS_SHIFT)
+#define CLASS_1 (0x01 << CLASS_SHIFT)
+#define CLASS_2 (0x02 << CLASS_SHIFT)
+#define CLASS_BOTH (0x03 << CLASS_SHIFT)
+
+/* ICV Check bits for Algo Operation command */
+#define ICV_CHECK_DISABLE 0
+#define ICV_CHECK_ENABLE 1
+
+/* Encap Mode check bits for Algo Operation command */
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+/*
+ * Descriptor header command constructs
+ * Covers shared, job, and trusted descriptor headers
+ */
+
+/*
+ * Extended Job Descriptor Header
+ */
+#define HDR_EXT BIT(24)
+
+/*
+ * Read input frame as soon as possible (SHR HDR)
+ */
+#define HDR_RIF BIT(25)
+
+/*
+ * Require SEQ LIODN to be the Same (JOB HDR)
+ */
+#define HDR_RSLS BIT(25)
+
+/*
+ * Do Not Run - marks a descriptor not executable if there was
+ * a preceding error somewhere
+ */
+#define HDR_DNR BIT(24)
+
+/*
+ * ONE - should always be set. Combination of ONE (always
+ * set) and ZRO (always clear) forms an endianness sanity check
+ */
+#define HDR_ONE BIT(23)
+#define HDR_ZRO BIT(15)
+
+/* Start Index or SharedDesc Length */
+#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
+
+/* If shared descriptor header, 6-bit length */
+#define HDR_DESCLEN_SHR_MASK 0x3f
+
+/* If non-shared header, 7-bit length */
+#define HDR_DESCLEN_MASK 0x7f
+
+/* This is a TrustedDesc (if not SharedDesc) */
+#define HDR_TRUSTED BIT(14)
+
+/* Make into TrustedDesc (if not SharedDesc) */
+#define HDR_MAKE_TRUSTED BIT(13)
+
+/* Clear Input FiFO (if SharedDesc) */
+#define HDR_CLEAR_IFIFO BIT(13)
+
+/* Save context if self-shared (if SharedDesc) */
+#define HDR_SAVECTX BIT(12)
+
+/* Next item points to SharedDesc */
+#define HDR_SHARED BIT(12)
+
+/*
+ * Reverse Execution Order - execute JobDesc first, then
+ * execute SharedDesc (normally SharedDesc goes first).
+ */
+#define HDR_REVERSE BIT(11)
+
+/* Propagate DNR property to SharedDesc */
+#define HDR_PROP_DNR BIT(11)
+
+/* DECO Select Valid */
+#define HDR_EXT_DSEL_VALID BIT(7)
+
+/* Fake trusted descriptor */
+#define HDR_EXT_FTD BIT(8)
+
+/* JobDesc/SharedDesc share property */
+#define HDR_SD_SHARE_SHIFT 8
+#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_JD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
+
+#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
+
+/* JobDesc/SharedDesc descriptor length */
+#define HDR_JD_LENGTH_MASK 0x7f
+#define HDR_SD_LENGTH_MASK 0x3f
+
+/*
+ * KEY/SEQ_KEY Command Constructs
+ */
+
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT 25
+#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
+#define KEY_DEST_CLASS1 (1 << KEY_DEST_CLASS_SHIFT)
+#define KEY_DEST_CLASS2 (2 << KEY_DEST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define KEY_SGF BIT(24)
+#define KEY_VLF BIT(24)
+
+/* Immediate - Key follows command in the descriptor */
+#define KEY_IMM BIT(23)
+
+/*
+ * Already in Input Data FIFO - the Input Data Sequence is not read, since it is
+ * already in the Input Data FIFO.
+ */
+#define KEY_AIDF BIT(23)
+
+/*
+ * Encrypted - Key is encrypted either with the KEK, or
+ * with the TDKEK if this descriptor is trusted
+ */
+#define KEY_ENC BIT(22)
+
+/*
+ * No Write Back - Do not allow key to be FIFO STOREd
+ */
+#define KEY_NWB BIT(21)
+
+/*
+ * Enhanced Encryption of Key
+ */
+#define KEY_EKT BIT(20)
+
+/*
+ * Encrypted with Trusted Key
+ */
+#define KEY_TK BIT(15)
+
+/*
+ * Plaintext Store
+ */
+#define KEY_PTS BIT(14)
+
+/*
+ * KDEST - Key Destination: 0 - class key register,
+ * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split key
+ */
+#define KEY_DEST_SHIFT 16
+#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
+
+#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
+
+/* Length in bytes */
+#define KEY_LENGTH_MASK 0x000003ff
+
+/*
+ * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
+ */
+
+/*
+ * Load/Store Destination: 0 = class independent CCB,
+ * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
+ */
+#define LDST_CLASS_SHIFT 25
+#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define LDST_SGF BIT(24)
+#define LDST_VLF BIT(24)
+
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK 1
+#define LDST_IMM_SHIFT 23
+#define LDST_IMM BIT(23)
+
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT 16
+#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_JQCTRL (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_JQDAR (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_STAT (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_DCHKSM (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PID (0x04 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_GTR (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS_CTX (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STR (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB (0x41 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SZL (0x70 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SZM (0x71 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_L (0x72 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_M (0x73 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_SZL (0x74 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_SZM (0x75 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IFNSR (0x76 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_OFNSR (0x77 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_ALTSOURCE (0x78 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT 8
+#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
+
+/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
+/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
+#define LDOFF_CHG_SHARE_SHIFT 0
+#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO BIT(2)
+#define LDOFF_DISABLE_AUTO_NFIFO BIT(3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
+#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT 6
+#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes */
+#define LDST_LEN_SHIFT 0
+#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
+
+/* Special Length definitions when dst=deco-ctrl */
+#define LDLEN_ENABLE_OSL_COUNT BIT(7)
+#define LDLEN_RST_CHA_OFIFO_PTR BIT(6)
+#define LDLEN_RST_OFIFO BIT(5)
+#define LDLEN_SET_OFIFO_OFF_VALID BIT(4)
+#define LDLEN_SET_OFIFO_OFF_RSVD BIT(3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
+/* CCB Clear Written Register bits */
+#define CLRW_CLR_C1MODE BIT(0)
+#define CLRW_CLR_C1DATAS BIT(2)
+#define CLRW_CLR_C1ICV BIT(3)
+#define CLRW_CLR_C1CTX BIT(5)
+#define CLRW_CLR_C1KEY BIT(6)
+#define CLRW_CLR_PK_A BIT(12)
+#define CLRW_CLR_PK_B BIT(13)
+#define CLRW_CLR_PK_N BIT(14)
+#define CLRW_CLR_PK_E BIT(15)
+#define CLRW_CLR_C2MODE BIT(16)
+#define CLRW_CLR_C2KEYS BIT(17)
+#define CLRW_CLR_C2DATAS BIT(18)
+#define CLRW_CLR_C2CTX BIT(21)
+#define CLRW_CLR_C2KEY BIT(22)
+#define CLRW_RESET_CLS2_DONE BIT(26) /* era 4 */
+#define CLRW_RESET_CLS1_DONE BIT(27) /* era 4 */
+#define CLRW_RESET_CLS2_CHA BIT(28) /* era 4 */
+#define CLRW_RESET_CLS1_CHA BIT(29) /* era 4 */
+#define CLRW_RESET_OFIFO BIT(30) /* era 3 */
+#define CLRW_RESET_IFIFO_DFIFO BIT(31) /* era 3 */
+
+/* CHA Control Register bits */
+#define CCTRL_RESET_CHA_ALL BIT(0)
+#define CCTRL_RESET_CHA_AESA BIT(1)
+#define CCTRL_RESET_CHA_DESA BIT(2)
+#define CCTRL_RESET_CHA_AFHA BIT(3)
+#define CCTRL_RESET_CHA_KFHA BIT(4)
+#define CCTRL_RESET_CHA_SF8A BIT(5)
+#define CCTRL_RESET_CHA_PKHA BIT(6)
+#define CCTRL_RESET_CHA_MDHA BIT(7)
+#define CCTRL_RESET_CHA_CRCA BIT(8)
+#define CCTRL_RESET_CHA_RNG BIT(9)
+#define CCTRL_RESET_CHA_SF9A BIT(10)
+#define CCTRL_RESET_CHA_ZUCE BIT(11)
+#define CCTRL_RESET_CHA_ZUCA BIT(12)
+#define CCTRL_UNLOAD_PK_A0 BIT(16)
+#define CCTRL_UNLOAD_PK_A1 BIT(17)
+#define CCTRL_UNLOAD_PK_A2 BIT(18)
+#define CCTRL_UNLOAD_PK_A3 BIT(19)
+#define CCTRL_UNLOAD_PK_B0 BIT(20)
+#define CCTRL_UNLOAD_PK_B1 BIT(21)
+#define CCTRL_UNLOAD_PK_B2 BIT(22)
+#define CCTRL_UNLOAD_PK_B3 BIT(23)
+#define CCTRL_UNLOAD_PK_N BIT(24)
+#define CCTRL_UNLOAD_PK_A BIT(26)
+#define CCTRL_UNLOAD_PK_B BIT(27)
+#define CCTRL_UNLOAD_SBOX BIT(28)
+
+/* IRQ Control Register (CxCIRQ) bits */
+#define CIRQ_ADI BIT(1)
+#define CIRQ_DDI BIT(2)
+#define CIRQ_RCDI BIT(3)
+#define CIRQ_KDI BIT(4)
+#define CIRQ_S8DI BIT(5)
+#define CIRQ_PDI BIT(6)
+#define CIRQ_MDI BIT(7)
+#define CIRQ_CDI BIT(8)
+#define CIRQ_RNDI BIT(9)
+#define CIRQ_S9DI BIT(10)
+#define CIRQ_ZEDI BIT(11) /* valid for Era 5 or higher */
+#define CIRQ_ZADI BIT(12) /* valid for Era 5 or higher */
+#define CIRQ_AEI BIT(17)
+#define CIRQ_DEI BIT(18)
+#define CIRQ_RCEI BIT(19)
+#define CIRQ_KEI BIT(20)
+#define CIRQ_S8EI BIT(21)
+#define CIRQ_PEI BIT(22)
+#define CIRQ_MEI BIT(23)
+#define CIRQ_CEI BIT(24)
+#define CIRQ_RNEI BIT(25)
+#define CIRQ_S9EI BIT(26)
+#define CIRQ_ZEEI BIT(27) /* valid for Era 5 or higher */
+#define CIRQ_ZAEI BIT(28) /* valid for Era 5 or higher */
+
+/*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+ */
+
+/*
+ * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
+ * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
+ * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
+ */
+#define FIFOLD_CLASS_SHIFT 25
+#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT 25
+#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_BOTH (0x03 << FIFOST_CLASS_SHIFT)
+
+/*
+ * Scatter-Gather Table/Variable Length Field
+ * If set for FIFO_LOAD, refers to a SG table. Within
+ * SEQ_FIFO_LOAD, is variable input sequence
+ */
+#define FIFOLDST_SGF_SHIFT 24
+#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF BIT(24)
+#define FIFOLDST_VLF BIT(24)
+
+/*
+ * Immediate - Data follows command in descriptor
+ * AIDF - Already in Input Data FIFO
+ */
+#define FIFOLD_IMM_SHIFT 23
+#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_AIDF_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM BIT(23)
+#define FIFOLD_AIDF BIT(23)
+
+#define FIFOST_IMM_SHIFT 23
+#define FIFOST_IMM_MASK (1 << FIFOST_IMM_SHIFT)
+#define FIFOST_IMM BIT(23)
+
+/* Continue - Not the last FIFO store to come */
+#define FIFOST_CONT_SHIFT 23
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT BIT(23)
+
+/*
+ * Extended Length - use 32-bit extended length that
+ * follows the pointer field. Illegal with IMM set
+ */
+#define FIFOLDST_EXT_SHIFT 22
+#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT BIT(22)
+
+/* Input data type.*/
+#define FIFOLD_TYPE_SHIFT 16
+#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
+
+/* PK types */
+#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
+
+/* Other types. Need to OR in last/flush bits as desired */
+#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
+
+/* Last/Flush bits for use with "other" types above */
+#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOINFOFIFO (0x0f << FIFOLD_TYPE_SHIFT)
+
+#define FIFOLDST_LEN_MASK 0xffff
+#define FIFOLDST_EXT_LEN_MASK 0xffffffff
+
+/* Output data types */
+#define FIFOST_TYPE_SHIFT 16
+#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA2 (0x31 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
+
+/*
+ * OPERATION Command Constructs
+ */
+
+/* Operation type selectors - OP TYPE */
+#define OP_TYPE_SHIFT 24
+#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
+
+#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
+
+/* ProtocolID selectors - PROTID */
+#define OP_PCLID_SHIFT 16
+#define OP_PCLID_MASK (0xff << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
+#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12_PRF (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_DIFFIEHELLMAN (0x17 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSAENCRYPT (0x18 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSADECRYPT (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10 (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_IPSEC_NEW (0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_DCRC (0x31 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_RLC_PDU (0x32 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_RLC_SDU (0x33 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_USER (0x42 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_CTRL (0x43 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_CTRL_MIXED (0x44 << OP_PCLID_SHIFT)
+
+/*
+ * ProtocolInfo selectors
+ */
+#define OP_PCLINFO_MASK 0xffff
+
+/* for OP_PCLID_IPSEC */
+#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64 0x0100
+#define OP_PCL_IPSEC_DES 0x0200
+#define OP_PCL_IPSEC_3DES 0x0300
+#define OP_PCL_IPSEC_NULL 0x0B00
+#define OP_PCL_IPSEC_AES_CBC 0x0c00
+#define OP_PCL_IPSEC_AES_CTR 0x0d00
+#define OP_PCL_IPSEC_AES_XTS 0x1600
+#define OP_PCL_IPSEC_AES_CCM8 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16 0x1000
+#define OP_PCL_IPSEC_AES_GCM8 0x1200
+#define OP_PCL_IPSEC_AES_GCM12 0x1300
+#define OP_PCL_IPSEC_AES_GCM16 0x1400
+#define OP_PCL_IPSEC_AES_NULL_WITH_GMAC 0x1500
+
+#define OP_PCL_IPSEC_HMAC_NULL 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
+#define OP_PCL_IPSEC_AES_CMAC_96 0x0008
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
+
+/* For SRTP - OP_PCLID_SRTP */
+#define OP_PCL_SRTP_CIPHER_MASK 0xff00
+#define OP_PCL_SRTP_AUTH_MASK 0x00ff
+
+#define OP_PCL_SRTP_AES_CTR 0x0d00
+
+#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
+
+/* For SSL 3.0 - OP_PCLID_SSL30 */
+#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
+
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_1 0x009C
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_1 0x009D
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_2 0x009E
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_2 0x009F
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_3 0x00A0
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_3 0x00A1
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_4 0x00A2
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_4 0x00A3
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_5 0x00A4
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_5 0x00A5
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_6 0x00A6
+
+#define OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384 0x00A7
+#define OP_PCL_TLS_PSK_AES_128_GCM_SHA256 0x00A8
+#define OP_PCL_TLS_PSK_AES_256_GCM_SHA384 0x00A9
+#define OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256 0x00AA
+#define OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384 0x00AB
+#define OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256 0x00AC
+#define OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384 0x00AD
+#define OP_PCL_TLS_PSK_AES_128_CBC_SHA256 0x00AE
+#define OP_PCL_TLS_PSK_AES_256_CBC_SHA384 0x00AF
+#define OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256 0x00B2
+#define OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384 0x00B3
+#define OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256 0x00B6
+#define OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384 0x00B7
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
+
+#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_SSL30_RC4_128_MD5 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_SSL30_RC4_40_MD5 0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_SSL30_RC4_128_SHA 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_SSL30_RC4_40_SHA 0x0028
+
+/* For TLS 1.0 - OP_PCLID_TLS10 */
+#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
+
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256 0xC023
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384 0xC024
+#define OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256 0xC025
+#define OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384 0xC026
+#define OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256 0xC027
+#define OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384 0xC028
+#define OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256 0xC029
+#define OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384 0xC02A
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256 0xC02B
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384 0xC02C
+#define OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256 0xC02D
+#define OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384 0xC02E
+#define OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256 0xC02F
+#define OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384 0xC030
+#define OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256 0xC031
+#define OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384 0xC032
+#define OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA 0xC033
+#define OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA 0xC034
+#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA 0xC035
+#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA 0xC036
+#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256 0xC037
+#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384 0xC038
+
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS10_RC4_128_MD5 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS10_RC4_40_MD5 0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS10_RC4_128_SHA 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS10_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
+
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA160 0xff90
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA384 0xff93
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA224 0xff94
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA512 0xff95
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA256 0xff96
+#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE 0xfffe
+#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF 0xffff
+
+/* For TLS 1.1 - OP_PCLID_TLS11 */
+#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS11_RC4_128_MD5 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS11_RC4_40_MD5 0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS11_RC4_128_SHA 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS11_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
+
+
+/* For TLS 1.2 - OP_PCLID_TLS12 */
+#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS12_RC4_128_MD5 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS12_RC4_40_MD5 0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS12_RC4_128_SHA 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS12_RC4_40_SHA 0x0028
+
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
+
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
+
+/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
+
+/* For DTLS - OP_PCLID_DTLS */
+
+#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
+
+#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
+
+
+#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
+
+/* 802.16 WiMAX protinfos */
+#define OP_PCL_WIMAX_OFDM 0x0201
+#define OP_PCL_WIMAX_OFDMA 0x0231
+
+/* 802.11 WiFi protinfos */
+#define OP_PCL_WIFI 0xac04
+
+/* MacSec protinfos */
+#define OP_PCL_MACSEC 0x0001
+
+/* 3G DCRC protinfos */
+#define OP_PCL_3G_DCRC_CRC7 0x0710
+#define OP_PCL_3G_DCRC_CRC11 0x0B10
+
+/* 3G RLC protinfos */
+#define OP_PCL_3G_RLC_NULL 0x0000
+#define OP_PCL_3G_RLC_KASUMI 0x0001
+#define OP_PCL_3G_RLC_SNOW 0x0002
+
+/* LTE protinfos */
+#define OP_PCL_LTE_NULL 0x0000
+#define OP_PCL_LTE_SNOW 0x0001
+#define OP_PCL_LTE_AES 0x0002
+#define OP_PCL_LTE_ZUC 0x0003
+
+/* LTE mixed protinfos */
+#define OP_PCL_LTE_MIXED_AUTH_SHIFT 0
+#define OP_PCL_LTE_MIXED_AUTH_MASK (3 << OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_SHIFT 8
+#define OP_PCL_LTE_MIXED_ENC_MASK (3 < OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_NULL (OP_PCL_LTE_NULL << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_SNOW (OP_PCL_LTE_SNOW << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_AES (OP_PCL_LTE_AES << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_ZUC (OP_PCL_LTE_ZUC << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_NULL (OP_PCL_LTE_NULL << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_SNOW (OP_PCL_LTE_SNOW << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_AES (OP_PCL_LTE_AES << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_ZUC (OP_PCL_LTE_ZUC << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_DSA_MSG BIT(10)
+#define OP_PCL_PKPROT_HASH_SHIFT 7
+#define OP_PCL_PKPROT_HASH_MASK (7 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_MD5 (0 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA1 (1 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA224 (2 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA256 (3 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA384 (4 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA512 (5 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_EKT_Z BIT(6)
+#define OP_PCL_PKPROT_DECRYPT_Z BIT(5)
+#define OP_PCL_PKPROT_EKT_PRI BIT(4)
+#define OP_PCL_PKPROT_TEST BIT(3)
+#define OP_PCL_PKPROT_DECRYPT_PRI BIT(2)
+#define OP_PCL_PKPROT_ECC BIT(1)
+#define OP_PCL_PKPROT_F2M BIT(0)
+
+/* Blob protinfos */
+#define OP_PCL_BLOB_TKEK_SHIFT 9
+#define OP_PCL_BLOB_TKEK BIT(9)
+#define OP_PCL_BLOB_EKT_SHIFT 8
+#define OP_PCL_BLOB_EKT BIT(8)
+#define OP_PCL_BLOB_REG_SHIFT 4
+#define OP_PCL_BLOB_REG_MASK (0xF << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_MEMORY (0x0 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_KEY1 (0x1 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_KEY2 (0x3 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_AFHA_SBOX (0x5 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_SPLIT (0x7 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_PKE (0x9 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_SEC_MEM_SHIFT 3
+#define OP_PCL_BLOB_SEC_MEM BIT(3)
+#define OP_PCL_BLOB_BLACK BIT(2)
+#define OP_PCL_BLOB_FORMAT_SHIFT 0
+#define OP_PCL_BLOB_FORMAT_MASK 0x3
+#define OP_PCL_BLOB_FORMAT_NORMAL 0
+#define OP_PCL_BLOB_FORMAT_MASTER_VER 2
+#define OP_PCL_BLOB_FORMAT_TEST 3
+
+/* IKE / IKEv2 protinfos */
+#define OP_PCL_IKE_HMAC_MD5 0x0100
+#define OP_PCL_IKE_HMAC_SHA1 0x0200
+#define OP_PCL_IKE_HMAC_AES128_CBC 0x0400
+#define OP_PCL_IKE_HMAC_SHA256 0x0500
+#define OP_PCL_IKE_HMAC_SHA384 0x0600
+#define OP_PCL_IKE_HMAC_SHA512 0x0700
+#define OP_PCL_IKE_HMAC_AES128_CMAC 0x0800
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_TEST BIT(3)
+#define OP_PCL_PKPROT_DECRYPT BIT(2)
+#define OP_PCL_PKPROT_ECC BIT(1)
+#define OP_PCL_PKPROT_F2M BIT(0)
+
+/* RSA Protinfo */
+#define OP_PCL_RSAPROT_OP_MASK 3
+#define OP_PCL_RSAPROT_OP_ENC_F_IN 0
+#define OP_PCL_RSAPROT_OP_ENC_F_OUT 1
+#define OP_PCL_RSAPROT_OP_DEC_ND 0
+#define OP_PCL_RSAPROT_OP_DEC_PQD 1
+#define OP_PCL_RSAPROT_OP_DEC_PQDPDQC 2
+#define OP_PCL_RSAPROT_FFF_SHIFT 4
+#define OP_PCL_RSAPROT_FFF_MASK (7 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_RED (0 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_ENC (1 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_TK_ENC (5 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_EKT (3 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_TK_EKT (7 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_PPP_SHIFT 8
+#define OP_PCL_RSAPROT_PPP_MASK (7 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_RED (0 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_ENC (1 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_TK_ENC (5 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_EKT (3 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_TK_EKT (7 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_FMT_PKCSV15 BIT(12)
+
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT 14
+#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT 12
+#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT 0
+#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
+
+/* For non-protocol/alg-only op commands */
+#define OP_ALG_TYPE_SHIFT 24
+#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS1 (0x2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2 (0x4 << OP_ALG_TYPE_SHIFT)
+
+#define OP_ALG_ALGSEL_SHIFT 16
+#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ZUCE (0xB0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ZUCA (0xC0 << OP_ALG_ALGSEL_SHIFT)
+
+#define OP_ALG_AAI_SHIFT 4
+#define OP_ALG_AAI_MASK (0x3ff << OP_ALG_AAI_SHIFT)
+
+/* block cipher AAI set */
+#define OP_ALG_AESA_MODE_MASK (0xF0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD128 (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD8 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD16 (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD24 (0x03 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD32 (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD40 (0x05 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD48 (0x06 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD56 (0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD64 (0x08 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD72 (0x09 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD80 (0x0a << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD88 (0x0b << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD96 (0x0c << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD104 (0x0d << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD112 (0x0e << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD120 (0x0f << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_ECB (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CFB (0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_OFB (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XTS (0x50 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CMAC (0x60 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XCBC_MAC (0x70 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CCM (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GCM (0x90 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_CMAC (0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_CMAC_LTE (0xd0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_CMAC (0xe0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CHECKODD (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DK (0x100 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_C2K (0x200 << OP_ALG_AAI_SHIFT)
+
+/* randomizer AAI set */
+#define OP_ALG_RNG_MODE_MASK (0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_SHIFT OP_ALG_AAI_SHIFT
+#define OP_ALG_AAI_RNG4_SH_MASK (0x03 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
+
+/* hmac/smac AAI set */
+#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT)
+
+/* CRC AAI set*/
+#define OP_ALG_CRC_POLY_MASK (0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_802 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_3385 (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CUST_POLY (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DIS (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOS (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOC (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_IVZ (0x80 << OP_ALG_AAI_SHIFT)
+
+/* Kasumi/SNOW/ZUC AAI set */
+#define OP_ALG_AAI_F8 (0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
+
+#define OP_ALG_AS_SHIFT 2
+#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT)
+
+#define OP_ALG_ICV_SHIFT 1
+#define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_OFF 0
+#define OP_ALG_ICV_ON BIT(1)
+
+#define OP_ALG_DIR_SHIFT 0
+#define OP_ALG_DIR_MASK 1
+#define OP_ALG_DECRYPT 0
+#define OP_ALG_ENCRYPT BIT(0)
+
+/* PKHA algorithm type set */
+#define OP_ALG_PK 0x00800000
+#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_A_RAM BIT(19)
+#define OP_ALG_PKMODE_B_RAM BIT(18)
+#define OP_ALG_PKMODE_E_RAM BIT(17)
+#define OP_ALG_PKMODE_N_RAM BIT(16)
+#define OP_ALG_PKMODE_CLEARMEM BIT(0)
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_CLEARMEM_ALL (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_ABE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_ABN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AB (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AEN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_A (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BEN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_B (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_EN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_E (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_N (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_N_RAM)
+
+/* PKHA mode modular-arithmetic functions */
+#define OP_ALG_PKMODE_MOD_IN_MONTY BIT(19)
+#define OP_ALG_PKMODE_MOD_OUT_MONTY BIT(18)
+#define OP_ALG_PKMODE_MOD_F2M BIT(17)
+#define OP_ALG_PKMODE_MOD_R2_IN BIT(16)
+#define OP_ALG_PKMODE_PRJECTV BIT(11)
+#define OP_ALG_PKMODE_TIME_EQ BIT(10)
+
+#define OP_ALG_PKMODE_OUT_B 0x000
+#define OP_ALG_PKMODE_OUT_A 0x100
+
+/*
+ * PKHA mode modular-arithmetic integer functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_MOD_ADD 0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
+#define OP_ALG_PKMODE_MOD_MULT 0x005
+#define OP_ALG_PKMODE_MOD_MULT_IM (0x005 | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_MOD_MULT_IM_OM (0x005 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY)
+#define OP_ALG_PKMODE_MOD_EXPO 0x006
+#define OP_ALG_PKMODE_MOD_EXPO_TEQ (0x006 | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_MOD_EXPO_IM (0x006 | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_MOD_EXPO_IM_TEQ (0x006 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_MOD_REDUCT 0x007
+#define OP_ALG_PKMODE_MOD_INV 0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
+#define OP_ALG_PKMODE_MOD_GCD 0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
+#define OP_ALG_PKMODE_MOD_SML_EXP 0x016
+
+/*
+ * PKHA mode modular-arithmetic F2m functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_F2M_ADD (0x002 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_MUL (0x005 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_MUL_IM (0x005 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_F2M_MUL_IM_OM (0x005 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY)
+#define OP_ALG_PKMODE_F2M_EXP (0x006 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_EXP_TEQ (0x006 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_F2M_AMODN (0x007 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_INV (0x008 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_R2 (0x00c | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_GCD (0x00e | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_SML_EXP (0x016 | OP_ALG_PKMODE_MOD_F2M)
+
+/*
+ * PKHA mode ECC Integer arithmetic functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_ECC_MOD_ADD 0x009
+#define OP_ALG_PKMODE_ECC_MOD_ADD_IM_OM_PROJ \
+ (0x009 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_DBL 0x00a
+#define OP_ALG_PKMODE_ECC_MOD_DBL_IM_OM_PROJ \
+ (0x00a | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_MUL 0x00b
+#define OP_ALG_PKMODE_ECC_MOD_MUL_TEQ (0x00b | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2 (0x00b | OP_ALG_PKMODE_MOD_R2_IN)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV \
+ | OP_ALG_PKMODE_TIME_EQ)
+
+/*
+ * PKHA mode ECC F2m arithmetic functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_ECC_F2M_ADD (0x009 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_ADD_IM_OM_PROJ \
+ (0x009 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_DBL (0x00a | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_DBL_IM_OM_PROJ \
+ (0x00a | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_MUL (0x00b | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2 \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV \
+ | OP_ALG_PKMODE_TIME_EQ)
+
+/* PKHA mode copy-memory functions */
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
+#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT 10
+#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
+#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
+#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+/* PKHA mode copy-memory functions - amount based on N SIZE */
+#define OP_ALG_PKMODE_COPY_NSZ 0x10
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A_B (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_NSZ_A_N (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_NSZ_B_A (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_NSZ_B_N (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_NSZ_N_A (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_N_B (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_N_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_E)
+
+/* PKHA mode copy-memory functions - amount based on SRC SIZE */
+#define OP_ALG_PKMODE_COPY_SSZ 0x11
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A_B (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_SSZ_A_N (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_SSZ_B_A (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_SSZ_B_N (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_SSZ_N_A (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_N_B (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_N_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_E)
+
+/*
+ * SEQ_IN_PTR Command Constructs
+ */
+
+/* Release Buffers */
+#define SQIN_RBS BIT(26)
+
+/* Sequence pointer is really a descriptor */
+#define SQIN_INL BIT(25)
+
+/* Sequence pointer is a scatter-gather table */
+#define SQIN_SGF BIT(24)
+
+/* Appends to a previous pointer */
+#define SQIN_PRE BIT(23)
+
+/* Use extended length following pointer */
+#define SQIN_EXT BIT(22)
+
+/* Restore sequence with pointer/length */
+#define SQIN_RTO BIT(21)
+
+/* Replace job descriptor */
+#define SQIN_RJD BIT(20)
+
+/* Sequence Out Pointer - start a new input sequence using output sequence */
+#define SQIN_SOP BIT(19)
+
+#define SQIN_LEN_SHIFT 0
+#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
+
+/*
+ * SEQ_OUT_PTR Command Constructs
+ */
+
+/* Sequence pointer is a scatter-gather table */
+#define SQOUT_SGF BIT(24)
+
+/* Appends to a previous pointer */
+#define SQOUT_PRE BIT(23)
+
+/* Restore sequence with pointer/length */
+#define SQOUT_RTO BIT(21)
+
+/*
+ * Ignore length field, add current output frame length back to SOL register.
+ * Reset tracking length of bytes written to output frame.
+ * Must be used together with SQOUT_RTO.
+ */
+#define SQOUT_RST BIT(20)
+
+/* Allow "write safe" transactions for this Output Sequence */
+#define SQOUT_EWS BIT(19)
+
+/* Use extended length following pointer */
+#define SQOUT_EXT BIT(22)
+
+#define SQOUT_LEN_SHIFT 0
+#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
+
+/*
+ * SIGNATURE Command Constructs
+ */
+
+/* TYPE field is all that's relevant */
+#define SIGN_TYPE_SHIFT 16
+#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
+
+#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
+
+/*
+ * MOVE Command Constructs
+ */
+
+#define MOVE_AUX_SHIFT 25
+#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT 24
+#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP BIT(24)
+
+#define MOVE_SRC_SHIFT 20
+#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_NO_NFIFO (0x0a << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT 16
+#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_INFIFO (0x0a << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
+#define MOVE_DEST_ALTSOURCE (0x0f << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT 8
+#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT 0
+#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT 0
+#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
+
+#define MOVELEN_SIZE_SHIFT 6
+#define MOVELEN_SIZE_MASK (0x3 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_WORD (0x01 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_BYTE (0x02 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_DWORD (0x03 << MOVELEN_SIZE_SHIFT)
+
+/*
+ * MATH Command Constructs
+ */
+
+#define MATH_IFB_SHIFT 26
+#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
+#define MATH_IFB BIT(26)
+
+#define MATH_NFU_SHIFT 25
+#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
+#define MATH_NFU BIT(25)
+
+/* STL for MATH, SSEL for MATHI */
+#define MATH_STL_SHIFT 24
+#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
+#define MATH_STL BIT(24)
+
+#define MATH_SSEL_SHIFT 24
+#define MATH_SSEL_MASK (1 << MATH_SSEL_SHIFT)
+#define MATH_SSEL BIT(24)
+
+#define MATH_SWP_SHIFT 0
+#define MATH_SWP_MASK (1 << MATH_SWP_SHIFT)
+#define MATH_SWP BIT(0)
+
+/* Function selectors */
+#define MATH_FUN_SHIFT 20
+#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT) /* ZBYT is for MATH */
+#define MATH_FUN_FBYT (0x0a << MATH_FUN_SHIFT) /* FBYT is for MATHI */
+#define MATH_FUN_BSWAP (0x0b << MATH_FUN_SHIFT)
+
+/* Source 0 selectors */
+#define MATH_SRC0_SHIFT 16
+#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ONE (0x0f << MATH_SRC0_SHIFT)
+
+/* Source 1 selectors */
+#define MATH_SRC1_SHIFT 12
+#define MATHI_SRC1_SHIFT 16
+#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_VARSEQINLEN (0x08 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_VARSEQOUTLEN (0x09 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+#define MATH_SRC1_JOBSOURCE (0x0d << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
+
+/* Destination selectors */
+#define MATH_DEST_SHIFT 8
+#define MATHI_DEST_SHIFT 12
+#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
+
+/* MATHI Immediate value */
+#define MATHI_IMM_SHIFT 4
+#define MATHI_IMM_MASK (0xff << MATHI_IMM_SHIFT)
+
+/* Length selectors */
+#define MATH_LEN_SHIFT 0
+#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE 0x01
+#define MATH_LEN_2BYTE 0x02
+#define MATH_LEN_4BYTE 0x04
+#define MATH_LEN_8BYTE 0x08
+
+/*
+ * JUMP Command Constructs
+ */
+
+#define JUMP_CLASS_SHIFT 25
+#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_NONE 0
+#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
+
+#define JUMP_JSL_SHIFT 24
+#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL BIT(24)
+
+#define JUMP_TYPE_SHIFT 20
+#define JUMP_TYPE_MASK (0x0f << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL_INC (0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_GOSUB (0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL_DEC (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL (0x04 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_RETURN (0x06 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT (0x08 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER (0x0c << JUMP_TYPE_SHIFT)
+
+#define JUMP_TEST_SHIFT 16
+#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
+
+/* Condition codes. JSL bit is factored in */
+#define JUMP_COND_SHIFT 8
+#define JUMP_COND_MASK ((0xff << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_PK_0 BIT(15)
+#define JUMP_COND_PK_GCD_1 BIT(14)
+#define JUMP_COND_PK_PRIME BIT(13)
+#define JUMP_COND_MATH_N BIT(11)
+#define JUMP_COND_MATH_Z BIT(10)
+#define JUMP_COND_MATH_C BIT(9)
+#define JUMP_COND_MATH_NV BIT(8)
+
+#define JUMP_COND_JQP (BIT(15) | JUMP_JSL)
+#define JUMP_COND_SHRD (BIT(14) | JUMP_JSL)
+#define JUMP_COND_SELF (BIT(13) | JUMP_JSL)
+#define JUMP_COND_CALM (BIT(12) | JUMP_JSL)
+#define JUMP_COND_NIP (BIT(11) | JUMP_JSL)
+#define JUMP_COND_NIFP (BIT(10) | JUMP_JSL)
+#define JUMP_COND_NOP (BIT(9) | JUMP_JSL)
+#define JUMP_COND_NCP (BIT(8) | JUMP_JSL)
+
+/* Source / destination selectors */
+#define JUMP_SRC_DST_SHIFT 12
+#define JUMP_SRC_DST_MASK (0x0f << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH0 (0x00 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH1 (0x01 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH2 (0x02 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH3 (0x03 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_DPOVRD (0x07 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_SEQINLEN (0x08 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_SEQOUTLEN (0x09 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_VARSEQINLEN (0x0a << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_VARSEQOUTLEN (0x0b << JUMP_SRC_DST_SHIFT)
+
+#define JUMP_OFFSET_SHIFT 0
+#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
+
+/*
+ * NFIFO ENTRY
+ * Data Constructs
+ *
+ */
+#define NFIFOENTRY_DEST_SHIFT 30
+#define NFIFOENTRY_DEST_MASK ((uint32_t)(3 << NFIFOENTRY_DEST_SHIFT))
+#define NFIFOENTRY_DEST_DECO (0 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS2 ((uint32_t)(2 << NFIFOENTRY_DEST_SHIFT))
+#define NFIFOENTRY_DEST_BOTH ((uint32_t)(3 << NFIFOENTRY_DEST_SHIFT))
+
+#define NFIFOENTRY_LC2_SHIFT 29
+#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2 BIT(29)
+
+#define NFIFOENTRY_LC1_SHIFT 28
+#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1 BIT(28)
+
+#define NFIFOENTRY_FC2_SHIFT 27
+#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2 BIT(27)
+
+#define NFIFOENTRY_FC1_SHIFT 26
+#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1 BIT(26)
+
+#define NFIFOENTRY_STYPE_SHIFT 24
+#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_PAD (2 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_ALTSOURCE ((0 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+#define NFIFOENTRY_STYPE_OFIFO_SYNC ((1 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+#define NFIFOENTRY_STYPE_SNOOP_ALT ((3 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+
+#define NFIFOENTRY_DTYPE_SHIFT 20
+#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_BND_SHIFT 19
+#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND BIT(19)
+
+#define NFIFOENTRY_PTYPE_SHIFT 16
+#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_OC_SHIFT 15
+#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC BIT(15)
+
+#define NFIFOENTRY_PR_SHIFT 15
+#define NFIFOENTRY_PR_MASK (1 << NFIFOENTRY_PR_SHIFT)
+#define NFIFOENTRY_PR BIT(15)
+
+#define NFIFOENTRY_AST_SHIFT 14
+#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_AST_SHIFT)
+#define NFIFOENTRY_AST BIT(14)
+
+#define NFIFOENTRY_BM_SHIFT 11
+#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM BIT(11)
+
+#define NFIFOENTRY_PS_SHIFT 10
+#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS BIT(10)
+
+#define NFIFOENTRY_DLEN_SHIFT 0
+#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT)
+
+#define NFIFOENTRY_PLEN_SHIFT 0
+#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
+
+/* Append Load Immediate Command */
+#define FD_CMD_APPEND_LOAD_IMMEDIATE BIT(31)
+
+/* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */
+#define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN BIT(30)
+
+/* Frame Descriptor Command for Replacement Job Descriptor */
+#define FD_CMD_REPLACE_JOB_DESC BIT(29)
+
+#endif /* __RTA_DESC_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/algo.h
new file mode 100644
index 00000000..91f3e067
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -0,0 +1,646 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __DESC_ALGO_H__
+#define __DESC_ALGO_H__
+
+#include "hw/rta.h"
+#include "common.h"
+
+/**
+ * DOC: Algorithms - Shared Descriptor Constructors
+ *
+ * Shared descriptors for algorithms (i.e. not for protocols).
+ */
+
+/**
+ * cnstr_shdsc_snow_f8 - SNOW/f8 (UEA2) as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @dir: Cipher direction (DIR_ENC/DIR_DEC)
+ * @count: UEA2 count value (32 bits)
+ * @bearer: UEA2 bearer ID (5 bits)
+ * @direction: UEA2 direction (1 bit)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_snow_f8(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t dir,
+ uint32_t count, uint8_t bearer, uint8_t direction)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t ct = count;
+ uint8_t br = bearer;
+ uint8_t dr = direction;
+ uint32_t context[2] = {ct, (br << 27) | (dr << 26)};
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab32(context[0]);
+ context[1] = swab32(context[1]);
+ }
+
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8, OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT1, 0, 8, IMMED | COPY);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_snow_f9 - SNOW/f9 (UIA2) as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: UEA2 count value (32 bits)
+ * @fresh: UEA2 fresh value ID (32 bits)
+ * @direction: UEA2 direction (1 bit)
+ * @datalen: size of data
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_snow_f9(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t dir, uint32_t count,
+ uint32_t fresh, uint8_t direction, uint32_t datalen)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint64_t ct = count;
+ uint64_t fr = fresh;
+ uint64_t dr = direction;
+ uint64_t context[2];
+
+ context[0] = (ct << 32) | (dr << 26);
+ context[1] = fr << 32;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab64(context[0]);
+ context[1] = swab64(context[1]);
+ }
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT2, 0, 16, IMMED | COPY);
+ SEQFIFOLOAD(p, BIT_DATA, datalen, CLASS2 | LAST2);
+ /* Save lower half of MAC out into a 32-bit sequence */
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_blkcipher - block cipher transformation
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ * Valid modes for:
+ * AES: OP_ALG_AAI_* {CBC, CTR}
+ * DES, 3DES: OP_ALG_AAI_CBC
+ * @iv: IV data; if NULL, "ivlen" bytes from the input frame will be read as IV
+ * @ivlen: IV length
+ * @dir: DIR_ENC/DIR_DEC
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t *iv,
+ uint32_t ivlen, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t iv_off = 0;
+ const bool need_dk = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+ (cipherdata->algmode == OP_ALG_AAI_CBC);
+ LABEL(keyjmp);
+ LABEL(skipdk);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pskipdk);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (need_dk) {
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+
+ pskipdk = JUMP(p, skipdk, LOCAL_JUMP, ALL_TRUE, 0);
+ }
+ SET_LABEL(p, keyjmp);
+
+ if (need_dk) {
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
+ OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE, dir);
+ SET_LABEL(p, skipdk);
+ } else {
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ }
+
+ if (cipherdata->algmode == OP_ALG_AAI_CTR)
+ iv_off = 16;
+
+ if (iv)
+ /* IV load, convert size */
+ LOAD(p, (uintptr_t)iv, CONTEXT1, iv_off, ivlen, IMMED | COPY);
+ else
+ /* IV is present first before the actual message */
+ SEQLOAD(p, CONTEXT1, iv_off, ivlen, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+
+ /* Insert sequence load/store with VLF */
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ if (need_dk)
+ PATCH_JUMP(p, pskipdk, skipdk);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_hmac - HMAC shared
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions;
+ * message digest algorithm: OP_ALG_ALGSEL_MD5/ SHA1-512.
+ * @do_icv: 0 if ICV checking is not desired, any other value if ICV checking
+ * is needed for all the packets processed by this shared descriptor
+ * @trunc_len: Length of the truncated ICV to be written in the output buffer, 0
+ * if no truncation is needed
+ *
+ * Note: There's no support for keys longer than the block size of the
+ * underlying hash function, according to the selected algorithm.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_hmac(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t do_icv,
+ uint8_t trunc_len)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint8_t storelen, opicv, dir;
+ LABEL(keyjmp);
+ LABEL(jmpprecomp);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pjmpprecomp);
+
+ /* Compute fixed-size store based on alg selection */
+ switch (authdata->algtype) {
+ case OP_ALG_ALGSEL_MD5:
+ storelen = 16;
+ break;
+ case OP_ALG_ALGSEL_SHA1:
+ storelen = 20;
+ break;
+ case OP_ALG_ALGSEL_SHA224:
+ storelen = 28;
+ break;
+ case OP_ALG_ALGSEL_SHA256:
+ storelen = 32;
+ break;
+ case OP_ALG_ALGSEL_SHA384:
+ storelen = 48;
+ break;
+ case OP_ALG_ALGSEL_SHA512:
+ storelen = 64;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ trunc_len = trunc_len && (trunc_len < storelen) ? trunc_len : storelen;
+
+ opicv = do_icv ? ICV_CHECK_ENABLE : ICV_CHECK_DISABLE;
+ dir = do_icv ? DIR_DEC : DIR_ENC;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Do operation */
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC,
+ OP_ALG_AS_INITFINAL, opicv, dir);
+
+ pjmpprecomp = JUMP(p, jmpprecomp, LOCAL_JUMP, ALL_TRUE, 0);
+ SET_LABEL(p, keyjmp);
+
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL, opicv, dir);
+
+ SET_LABEL(p, jmpprecomp);
+
+ /* compute sequences */
+ if (opicv == ICV_CHECK_ENABLE)
+ MATHB(p, SEQINSZ, SUB, trunc_len, VSEQINSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+
+ /* Do load (variable length) */
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+
+ if (opicv == ICV_CHECK_ENABLE)
+ SEQFIFOLOAD(p, ICV2, trunc_len, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, trunc_len, 0);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pjmpprecomp, jmpprecomp);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_kasumi_f8 - KASUMI F8 (Confidentiality) as a shared descriptor
+ * (ETSI "Document 1: f8 and f9 specification")
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: count value (32 bits)
+ * @bearer: bearer ID (5 bits)
+ * @direction: direction (1 bit)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_kasumi_f8(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t dir,
+ uint32_t count, uint8_t bearer, uint8_t direction)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint64_t ct = count;
+ uint64_t br = bearer;
+ uint64_t dr = direction;
+ uint32_t context[2] = { ct, (br << 27) | (dr << 26) };
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab32(context[0]);
+ context[1] = swab32(context[1]);
+ }
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_KASUMI, OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT1, 0, 8, IMMED | COPY);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_kasumi_f9 - KASUMI F9 (Integrity) as a shared descriptor
+ * (ETSI "Document 1: f8 and f9 specification")
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: count value (32 bits)
+ * @fresh: fresh value ID (32 bits)
+ * @direction: direction (1 bit)
+ * @datalen: size of data
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_kasumi_f9(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t dir,
+ uint32_t count, uint32_t fresh, uint8_t direction,
+ uint32_t datalen)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint16_t ctx_offset = 16;
+ uint32_t context[6] = {count, direction << 26, fresh, 0, 0, 0};
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab32(context[0]);
+ context[1] = swab32(context[1]);
+ context[2] = swab32(context[2]);
+ }
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_KASUMI, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT1, 0, 24, IMMED | COPY);
+ SEQFIFOLOAD(p, BIT_DATA, datalen, CLASS1 | LAST1);
+ /* Save output MAC of DWORD 2 into a 32-bit sequence */
+ SEQSTORE(p, CONTEXT1, ctx_offset, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_gcm_encap - AES-GCM encap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ * OP_ALG_AAI_GCM.
+ * @ivlen: Initialization vector length
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_encap(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata,
+ uint32_t ivlen, uint32_t icvsize)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ LABEL(zeroassocjump2);
+ LABEL(zeroassocjump1);
+ LABEL(zeropayloadjump);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pzeroassocjump2);
+ REFERENCE(pzeroassocjump1);
+ REFERENCE(pzeropayloadjump);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ SET_LABEL(p, keyjmp);
+
+ /* class 1 operation */
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+
+ MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+
+ /* if assoclen + cryptlen is ZERO, skip to ICV write */
+ MATHB(p, SEQINSZ, SUB, ivlen, VSEQOUTSZ, 4, IMMED2);
+ pzeroassocjump2 = JUMP(p, zeroassocjump2, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+ pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* skip assoc data */
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+
+ /* cryptlen = seqinlen - assoclen */
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+ MATH_Z);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+ SET_LABEL(p, zeroassocjump1);
+
+ MATHB(p, SEQINSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ /* write encrypted data */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ /* read payload data */
+ SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | LAST1);
+
+ /* jump the zero-payload commands */
+ JUMP(p, 4, LOCAL_JUMP, ALL_TRUE, 0);
+
+ /* zero-payload commands */
+ SET_LABEL(p, zeropayloadjump);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | LAST1);
+
+ JUMP(p, 2, LOCAL_JUMP, ALL_TRUE, 0);
+
+ /* There is no input data */
+ SET_LABEL(p, zeroassocjump2);
+
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1 | LAST1);
+
+ /* write ICV */
+ SEQSTORE(p, CONTEXT1, 0, icvsize, 0);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pzeroassocjump2, zeroassocjump2);
+ PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+ PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_gcm_decap - AES-GCM decap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ * OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_decap(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata,
+ uint32_t ivlen, uint32_t icvsize)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ LABEL(zeroassocjump1);
+ LABEL(zeropayloadjump);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pzeroassocjump1);
+ REFERENCE(pzeropayloadjump);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ SET_LABEL(p, keyjmp);
+
+ /* class 1 operation */
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_ENABLE, DIR_DEC);
+
+ MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+ pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* skip assoc data */
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+
+ SET_LABEL(p, zeroassocjump1);
+
+ /* cryptlen = seqoutlen - assoclen */
+ MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+ MATH_Z);
+
+ MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQOUTSZ, 4, 0);
+
+ /* store encrypted data */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ /* read payload data */
+ SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | FLUSH1);
+
+ /* zero-payload command */
+ SET_LABEL(p, zeropayloadjump);
+
+ /* read ICV */
+ SEQFIFOLOAD(p, ICV1, icvsize, CLASS1 | LAST1);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+ PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_crc(uint32_t *descbuf, bool swap)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_CRC,
+ OP_ALG_AAI_802 | OP_ALG_AAI_DOC,
+ OP_ALG_AS_FINALIZE, 0, DIR_ENC);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_ALGO_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/common.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/common.h
new file mode 100644
index 00000000..98425d8b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/common.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __DESC_COMMON_H__
+#define __DESC_COMMON_H__
+
+#include "hw/rta.h"
+
+/**
+ * DOC: Shared Descriptor Constructors - shared structures
+ *
+ * Data structures shared between algorithm, protocol implementations.
+ */
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ * functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_type is
+ * RTA_DATA_IMM, physical (bus) address if key_type is RTA_DATA_PTR or
+ * RTA_DATA_IMM_DMA.
+ * @key_enc_flags: key encryption flags; see encrypt_flags parameter of KEY
+ * command for valid values.
+ * @key_type: enum rta_data_type
+ * @algmode: algorithm mode selector; for valid values, see documentation of the
+ * functions where it is used.
+ */
+struct alginfo {
+ uint32_t algtype;
+ uint32_t keylen;
+ uint64_t key;
+ uint32_t key_enc_flags;
+ enum rta_data_type key_type;
+ uint16_t algmode;
+};
+
+#define INLINE_KEY(alginfo) inline_flags(alginfo->key_type)
+
+/**
+ * rta_inline_query() - Provide indications on which data items can be inlined
+ * and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ * excluding the data items to be inlined (or corresponding
+ * pointer if an item is not inlined). Each cnstr_* function that
+ * generates descriptors should have a define mentioning
+ * corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ * together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ * otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ * check @inl_mask for details.
+ */
+static inline int
+rta_inline_query(unsigned int sd_base_len,
+ unsigned int jd_len,
+ unsigned int *data_len,
+ uint32_t *inl_mask,
+ unsigned int count)
+{
+ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+ unsigned int i;
+
+ *inl_mask = 0;
+ for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+ if (rem_bytes - (int)(data_len[i] +
+ (count - i - 1) * CAAM_PTR_SZ) >= 0) {
+ rem_bytes -= data_len[i];
+ *inl_mask |= (1 << i);
+ } else {
+ rem_bytes -= CAAM_PTR_SZ;
+ }
+ }
+
+ return (rem_bytes >= 0) ? 0 : -1;
+}
+
+/**
+ * struct protcmd - Container for Protocol Operation Command fields
+ * @optype: command type
+ * @protid: protocol Identifier
+ * @protinfo: protocol Information
+ */
+struct protcmd {
+ uint32_t optype;
+ uint32_t protid;
+ uint16_t protinfo;
+};
+
+#endif /* __DESC_COMMON_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
new file mode 100644
index 00000000..35cc02a6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
@@ -0,0 +1,1521 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __DESC_IPSEC_H__
+#define __DESC_IPSEC_H__
+
+#include "hw/rta.h"
+#include "common.h"
+
+/**
+ * DOC: IPsec Shared Descriptor Constructors
+ *
+ * Shared descriptors for IPsec protocol.
+ */
+
+/* General IPSec ESP encap / decap PDB options */
+
+/**
+ * PDBOPTS_ESP_ESN - Extended sequence included
+ */
+#define PDBOPTS_ESP_ESN 0x10
+
+/**
+ * PDBOPTS_ESP_IPVSN - Process IPv6 header
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_IPVSN 0x02
+
+/**
+ * PDBOPTS_ESP_TUNNEL - Tunnel mode next-header byte
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_TUNNEL 0x01
+
+/* IPSec ESP Encap PDB options */
+
+/**
+ * PDBOPTS_ESP_UPDATE_CSUM - Update ip header checksum
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_UPDATE_CSUM 0x80
+
+/**
+ * PDBOPTS_ESP_DIFFSERV - Copy TOS/TC from inner iphdr
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_DIFFSERV 0x40
+
+/**
+ * PDBOPTS_ESP_IVSRC - IV comes from internal random gen
+ */
+#define PDBOPTS_ESP_IVSRC 0x20
+
+/**
+ * PDBOPTS_ESP_IPHDRSRC - IP header comes from PDB
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_IPHDRSRC 0x08
+
+/**
+ * PDBOPTS_ESP_INCIPHDR - Prepend IP header to output frame
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_INCIPHDR 0x04
+
+/**
+ * PDBOPTS_ESP_OIHI_MASK - Mask for Outer IP Header Included
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_MASK 0x0c
+
+/**
+ * PDBOPTS_ESP_OIHI_PDB_INL - Prepend IP header to output frame from PDB (where
+ * it is inlined).
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_PDB_INL 0x0c
+
+/**
+ * PDBOPTS_ESP_OIHI_PDB_REF - Prepend IP header to output frame from PDB
+ * (referenced by pointer).
+ *
+ * Vlid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_PDB_REF 0x08
+
+/**
+ * PDBOPTS_ESP_OIHI_IF - Prepend IP header to output frame from input frame
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_IF 0x04
+
+/**
+ * PDBOPTS_ESP_NAT - Enable RFC 3948 UDP-encapsulated-ESP
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_NAT 0x02
+
+/**
+ * PDBOPTS_ESP_NUC - Enable NAT UDP Checksum
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_NUC 0x01
+
+/* IPSec ESP Decap PDB options */
+
+/**
+ * PDBOPTS_ESP_ARS_MASK - antireplay window mask
+ */
+#define PDBOPTS_ESP_ARS_MASK 0xc0
+
+/**
+ * PDBOPTS_ESP_ARSNONE - No antireplay window
+ */
+#define PDBOPTS_ESP_ARSNONE 0x00
+
+/**
+ * PDBOPTS_ESP_ARS64 - 64-entry antireplay window
+ */
+#define PDBOPTS_ESP_ARS64 0xc0
+
+/**
+ * PDBOPTS_ESP_ARS128 - 128-entry antireplay window
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_ARS128 0x80
+
+/**
+ * PDBOPTS_ESP_ARS32 - 32-entry antireplay window
+ */
+#define PDBOPTS_ESP_ARS32 0x40
+
+/**
+ * PDBOPTS_ESP_VERIFY_CSUM - Validate ip header checksum
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_VERIFY_CSUM 0x20
+
+/**
+ * PDBOPTS_ESP_TECN - Implement RRFC6040 ECN tunneling from outer header to
+ * inner header.
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_TECN 0x20
+
+/**
+ * PDBOPTS_ESP_OUTFMT - Output only decapsulation
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_OUTFMT 0x08
+
+/**
+ * PDBOPTS_ESP_AOFL - Adjust out frame len
+ *
+ * Valid only for IPsec legacy mode and for SEC >= 5.3.
+ */
+#define PDBOPTS_ESP_AOFL 0x04
+
+/**
+ * PDBOPTS_ESP_ETU - EtherType Update
+ *
+ * Add corresponding ethertype (0x0800 for IPv4, 0x86dd for IPv6) in the output
+ * frame.
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_ETU 0x01
+
+#define PDBHMO_ESP_DECAP_SHIFT 28
+#define PDBHMO_ESP_ENCAP_SHIFT 28
+#define PDBNH_ESP_ENCAP_SHIFT 16
+#define PDBNH_ESP_ENCAP_MASK (0xff << PDBNH_ESP_ENCAP_SHIFT)
+#define PDBHDRLEN_ESP_DECAP_SHIFT 16
+#define PDBHDRLEN_MASK (0x0fff << PDBHDRLEN_ESP_DECAP_SHIFT)
+#define PDB_NH_OFFSET_SHIFT 8
+#define PDB_NH_OFFSET_MASK (0xff << PDB_NH_OFFSET_SHIFT)
+
+/**
+ * PDBHMO_ESP_DECAP_DTTL - IPsec ESP decrement TTL (IPv4) / Hop limit (IPv6)
+ * HMO option.
+ */
+#define PDBHMO_ESP_DECAP_DTTL (0x02 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_ENCAP_DTTL - IPsec ESP increment TTL (IPv4) / Hop limit (IPv6)
+ * HMO option.
+ */
+#define PDBHMO_ESP_ENCAP_DTTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DIFFSERV - (Decap) DiffServ Copy - Copy the IPv4 TOS or IPv6
+ * Traffic Class byte from the outer IP header to the
+ * inner IP header.
+ */
+#define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_SNR - (Encap) - Sequence Number Rollover control
+ *
+ * Configures behaviour in case of SN / ESN rollover:
+ * error if SNR = 1, rollover allowed if SNR = 0.
+ * Valid only for IPsec new mode.
+ */
+#define PDBHMO_ESP_SNR (0x01 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DFBIT - (Encap) Copy DF bit - if an IPv4 tunnel mode outer IP
+ * header is coming from the PDB, copy the DF bit from the
+ * inner IP header to the outer IP header.
+ */
+#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DFV - (Decap) - DF bit value
+ *
+ * If ODF = 1, DF bit in output frame is replaced by DFV.
+ * Valid only from SEC Era 5 onwards.
+ */
+#define PDBHMO_ESP_DFV (0x04 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_ODF - (Decap) Override DF bit in IPv4 header of decapsulated
+ * output frame.
+ *
+ * If ODF = 1, DF is replaced with the value of DFV bit.
+ * Valid only from SEC Era 5 onwards.
+ */
+#define PDBHMO_ESP_ODF (0x08 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * struct ipsec_encap_cbc - PDB part for IPsec CBC encapsulation
+ * @iv: 16-byte array initialization vector
+ */
+struct ipsec_encap_cbc {
+ uint8_t iv[16];
+};
+
+
+/**
+ * struct ipsec_encap_ctr - PDB part for IPsec CTR encapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ctr {
+ uint8_t ctr_nonce[4];
+ uint32_t ctr_initial;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_ccm - PDB part for IPsec CCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ccm {
+ uint8_t salt[4];
+ uint32_t ccm_opt;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_gcm - PDB part for IPsec GCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @rsvd: reserved, do not use
+ * @iv: initialization vector
+ */
+struct ipsec_encap_gcm {
+ uint8_t salt[4];
+ uint32_t rsvd;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_pdb - PDB for IPsec encapsulation
+ * @options: MSB-LSB description (both for legacy and new modes)
+ * hmo (header manipulation options) - 4b
+ * reserved - 4b
+ * next header (legacy) / reserved (new) - 8b
+ * next header offset (legacy) / AOIPHO (actual outer IP header offset) - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @spi: IPsec SPI (Security Parameters Index)
+ * @ip_hdr_len: optional IP Header length (in bytes)
+ * reserved - 16b
+ * Opt. IP Hdr Len - 16b
+ * @ip_hdr: optional IP Header content (only for IPsec legacy mode)
+ */
+struct ipsec_encap_pdb {
+ uint32_t options;
+ uint32_t seq_num_ext_hi;
+ uint32_t seq_num;
+ union {
+ struct ipsec_encap_cbc cbc;
+ struct ipsec_encap_ctr ctr;
+ struct ipsec_encap_ccm ccm;
+ struct ipsec_encap_gcm gcm;
+ };
+ uint32_t spi;
+ uint32_t ip_hdr_len;
+ uint8_t ip_hdr[0];
+};
+
+static inline unsigned int
+__rta_copy_ipsec_encap_pdb(struct program *program,
+ struct ipsec_encap_pdb *pdb,
+ uint32_t algtype)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out32(program, pdb->options);
+ __rta_out32(program, pdb->seq_num_ext_hi);
+ __rta_out32(program, pdb->seq_num);
+
+ switch (algtype & OP_PCL_IPSEC_CIPHER_MASK) {
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_NULL:
+ rta_copy_data(program, pdb->cbc.iv, sizeof(pdb->cbc.iv));
+ break;
+
+ case OP_PCL_IPSEC_AES_CTR:
+ rta_copy_data(program, pdb->ctr.ctr_nonce,
+ sizeof(pdb->ctr.ctr_nonce));
+ __rta_out32(program, pdb->ctr.ctr_initial);
+ __rta_out64(program, true, pdb->ctr.iv);
+ break;
+
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ rta_copy_data(program, pdb->ccm.salt, sizeof(pdb->ccm.salt));
+ __rta_out32(program, pdb->ccm.ccm_opt);
+ __rta_out64(program, true, pdb->ccm.iv);
+ break;
+
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ rta_copy_data(program, pdb->gcm.salt, sizeof(pdb->gcm.salt));
+ __rta_out32(program, pdb->gcm.rsvd);
+ __rta_out64(program, true, pdb->gcm.iv);
+ break;
+ }
+
+ __rta_out32(program, pdb->spi);
+ __rta_out32(program, pdb->ip_hdr_len);
+
+ return start_pc;
+}
+
+/**
+ * struct ipsec_decap_cbc - PDB part for IPsec CBC decapsulation
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_cbc {
+ uint32_t rsvd[2];
+};
+
+/**
+ * struct ipsec_decap_ctr - PDB part for IPsec CTR decapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ */
+struct ipsec_decap_ctr {
+ uint8_t ctr_nonce[4];
+ uint32_t ctr_initial;
+};
+
+/**
+ * struct ipsec_decap_ccm - PDB part for IPsec CCM decapsulation
+ * @salt: 3-byte salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ */
+struct ipsec_decap_ccm {
+ uint8_t salt[4];
+ uint32_t ccm_opt;
+};
+
+/**
+ * struct ipsec_decap_gcm - PDB part for IPsec GCN decapsulation
+ * @salt: 4-byte salt
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_gcm {
+ uint8_t salt[4];
+ uint32_t rsvd;
+};
+
+/**
+ * struct ipsec_decap_pdb - PDB for IPsec decapsulation
+ * @options: MSB-LSB description (both for legacy and new modes)
+ * hmo (header manipulation options) - 4b
+ * IP header length - 12b
+ * next header offset (legacy) / AOIPHO (actual outer IP header offset) - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @anti_replay: Anti-replay window; size depends on ARS (option flags);
+ * format must be Big Endian, irrespective of platform
+ */
+struct ipsec_decap_pdb {
+ uint32_t options;
+ union {
+ struct ipsec_decap_cbc cbc;
+ struct ipsec_decap_ctr ctr;
+ struct ipsec_decap_ccm ccm;
+ struct ipsec_decap_gcm gcm;
+ };
+ uint32_t seq_num_ext_hi;
+ uint32_t seq_num;
+ uint32_t anti_replay[4];
+};
+
+static inline unsigned int
+__rta_copy_ipsec_decap_pdb(struct program *program,
+ struct ipsec_decap_pdb *pdb,
+ uint32_t algtype)
+{
+ unsigned int start_pc = program->current_pc;
+ unsigned int i, ars;
+
+ __rta_out32(program, pdb->options);
+
+ switch (algtype & OP_PCL_IPSEC_CIPHER_MASK) {
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_NULL:
+ __rta_out32(program, pdb->cbc.rsvd[0]);
+ __rta_out32(program, pdb->cbc.rsvd[1]);
+ break;
+
+ case OP_PCL_IPSEC_AES_CTR:
+ rta_copy_data(program, pdb->ctr.ctr_nonce,
+ sizeof(pdb->ctr.ctr_nonce));
+ __rta_out32(program, pdb->ctr.ctr_initial);
+ break;
+
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ rta_copy_data(program, pdb->ccm.salt, sizeof(pdb->ccm.salt));
+ __rta_out32(program, pdb->ccm.ccm_opt);
+ break;
+
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ rta_copy_data(program, pdb->gcm.salt, sizeof(pdb->gcm.salt));
+ __rta_out32(program, pdb->gcm.rsvd);
+ break;
+ }
+
+ __rta_out32(program, pdb->seq_num_ext_hi);
+ __rta_out32(program, pdb->seq_num);
+
+ switch (pdb->options & PDBOPTS_ESP_ARS_MASK) {
+ case PDBOPTS_ESP_ARS128:
+ ars = 4;
+ break;
+ case PDBOPTS_ESP_ARS64:
+ ars = 2;
+ break;
+ case PDBOPTS_ESP_ARS32:
+ ars = 1;
+ break;
+ case PDBOPTS_ESP_ARSNONE:
+ default:
+ ars = 0;
+ break;
+ }
+
+ for (i = 0; i < ars; i++)
+ __rta_out_be32(program, pdb->anti_replay[i]);
+
+ return start_pc;
+}
+
+/**
+ * enum ipsec_icv_size - Type selectors for icv size in IPsec protocol
+ * @IPSEC_ICV_MD5_SIZE: full-length MD5 ICV
+ * @IPSEC_ICV_MD5_TRUNC_SIZE: truncated MD5 ICV
+ */
+enum ipsec_icv_size {
+ IPSEC_ICV_MD5_SIZE = 16,
+ IPSEC_ICV_MD5_TRUNC_SIZE = 12
+};
+
+/*
+ * IPSec ESP Datapath Protocol Override Register (DPOVRD)
+ */
+
+#define IPSEC_DECO_DPOVRD_USE 0x80
+
+struct ipsec_deco_dpovrd {
+ uint8_t ovrd_ecn;
+ uint8_t ip_hdr_len;
+ uint8_t nh_offset;
+ union {
+ uint8_t next_header; /* next header if encap */
+ uint8_t rsvd; /* reserved if decap */
+ };
+};
+
+struct ipsec_new_encap_deco_dpovrd {
+#define IPSEC_NEW_ENCAP_DECO_DPOVRD_USE 0x8000
+ uint16_t ovrd_ip_hdr_len; /* OVRD + outer IP header material
+ * length
+ */
+#define IPSEC_NEW_ENCAP_OIMIF 0x80
+ uint8_t oimif_aoipho; /* OIMIF + actual outer IP header
+ * offset
+ */
+ uint8_t rsvd;
+};
+
+struct ipsec_new_decap_deco_dpovrd {
+ uint8_t ovrd;
+ uint8_t aoipho_hi; /* upper nibble of actual outer IP
+ * header
+ */
+ uint16_t aoipho_lo_ip_hdr_len; /* lower nibble of actual outer IP
+ * header + outer IP header material
+ */
+};
+
+static inline void
+__gen_auth_key(struct program *program, struct alginfo *authdata)
+{
+ uint32_t dkp_protid;
+
+ switch (authdata->algtype & OP_PCL_IPSEC_AUTH_MASK) {
+ case OP_PCL_IPSEC_HMAC_MD5_96:
+ case OP_PCL_IPSEC_HMAC_MD5_128:
+ dkp_protid = OP_PCLID_DKP_MD5;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA1_96:
+ case OP_PCL_IPSEC_HMAC_SHA1_160:
+ dkp_protid = OP_PCLID_DKP_SHA1;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_256_128:
+ dkp_protid = OP_PCLID_DKP_SHA256;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_384_192:
+ dkp_protid = OP_PCLID_DKP_SHA384;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_512_256:
+ dkp_protid = OP_PCLID_DKP_SHA512;
+ break;
+ default:
+ KEY(program, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ return;
+ }
+
+ if (authdata->key_type == RTA_DATA_PTR)
+ DKP_PROTOCOL(program, dkp_protid, OP_PCL_DKP_SRC_PTR,
+ OP_PCL_DKP_DST_PTR, (uint16_t)authdata->keylen,
+ authdata->key, authdata->key_type);
+ else
+ DKP_PROTOCOL(program, dkp_protid, OP_PCL_DKP_SRC_IMM,
+ OP_PCL_DKP_DST_IMM, (uint16_t)authdata->keylen,
+ authdata->key, authdata->key_type);
+}
+
+/**
+ * cnstr_shdsc_ipsec_encap - IPSec ESP encapsulation protocol-level shared
+ * descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions
+ * If an authentication key is required by the protocol:
+ * -For SEC Eras 1-5, an MDHA split key must be provided;
+ * Note that the size of the split key itself must be specified.
+ * -For SEC Eras 6+, a "normal" key must be provided; DKP (Derived
+ * Key Protocol) will be used to compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
+ struct ipsec_encap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+ COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
+ if (authdata->keylen) {
+ if (rta_sec_era < RTA_SEC_ERA_6)
+ KEY(p, MDHA_SPLIT_KEY, authdata->key_enc_flags,
+ authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ else
+ __gen_auth_key(p, authdata);
+ }
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_decap - IPSec ESP decapsulation protocol-level shared
+ * descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the decapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions
+ * If an authentication key is required by the protocol:
+ * -For SEC Eras 1-5, an MDHA split key must be provided;
+ * Note that the size of the split key itself must be specified.
+ * -For SEC Eras 6+, a "normal" key must be provided; DKP (Derived
+ * Key Protocol) will be used to compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_decap(uint32_t *descbuf, bool ps, bool swap,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
+ if (authdata->keylen) {
+ if (rta_sec_era < RTA_SEC_ERA_6)
+ KEY(p, MDHA_SPLIT_KEY, authdata->key_enc_flags,
+ authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ else
+ __gen_auth_key(p, authdata);
+ }
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_encap_des_aes_xcbc - IPSec DES-CBC/3DES-CBC and
+ * AES-XCBC-MAC-96 ESP encapsulation shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_PCL_IPSEC_DES, OP_PCL_IPSEC_3DES.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm value: OP_PCL_IPSEC_AES_XCBC_MAC_96.
+ *
+ * Supported only for platforms with 32-bit address pointers and SEC ERA 4 or
+ * higher. The tunnel/transport mode of the IPsec ESP is supported only if the
+ * Outer/Transport IP Header is present in the encapsulation output packet.
+ * The descriptor performs DES-CBC/3DES-CBC & HMAC-MD5-96 and then rereads
+ * the input packet to do the AES-XCBC-MAC-96 calculation and to overwrite
+ * the MD5 ICV.
+ * The descriptor uses all the benefits of the built-in protocol by computing
+ * the IPsec ESP with a hardware supported algorithms combination
+ * (DES-CBC/3DES-CBC & HMAC-MD5-96). The HMAC-MD5 authentication algorithm
+ * was chosen in order to speed up the computational time for this intermediate
+ * step.
+ * Warning: The user must allocate at least 32 bytes for the authentication key
+ * (in order to use it also with HMAC-MD5-96),even when using a shorter key
+ * for the AES-XCBC-MAC-96.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_encap_des_aes_xcbc(uint32_t *descbuf,
+ struct ipsec_encap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(hdr);
+ LABEL(shd_ptr);
+ LABEL(keyjump);
+ LABEL(outptr);
+ LABEL(swapped_seqin_fields);
+ LABEL(swapped_seqin_ptr);
+ REFERENCE(phdr);
+ REFERENCE(pkeyjump);
+ REFERENCE(move_outlen);
+ REFERENCE(move_seqout_ptr);
+ REFERENCE(swapped_seqin_ptr_jump);
+ REFERENCE(write_swapped_seqin_ptr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+ COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
+ SET_LABEL(p, hdr);
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF);
+ /*
+ * Hard-coded KEY arguments. The descriptor uses all the benefits of
+ * the built-in protocol by computing the IPsec ESP with a hardware
+ * supported algorithms combination (DES-CBC/3DES-CBC & HMAC-MD5-96).
+ * The HMAC-MD5 authentication algorithm was chosen with
+ * the keys options from below in order to speed up the computational
+ * time for this intermediate step.
+ * Warning: The user must allocate at least 32 bytes for
+ * the authentication key (in order to use it also with HMAC-MD5-96),
+ * even when using a shorter key for the AES-XCBC-MAC-96.
+ */
+ KEY(p, MDHA_SPLIT_KEY, 0, authdata->key, 32, INLINE_KEY(authdata));
+ SET_LABEL(p, keyjump);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ IMMED);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL, OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | OP_PCL_IPSEC_HMAC_MD5_96));
+ /* Swap SEQINPTR to SEQOUTPTR. */
+ move_seqout_ptr = MOVE(p, DESCBUF, 0, MATH1, 0, 16, WAITCOMP | IMMED);
+ MATHB(p, MATH1, AND, ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR), MATH1,
+ 8, IFB | IMMED2);
+/*
+ * TODO: RTA currently doesn't support creating a LOAD command
+ * with another command as IMM.
+ * To be changed when proper support is added in RTA.
+ */
+ LOAD(p, 0xa00000e5, MATH3, 4, 4, IMMED);
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ write_swapped_seqin_ptr = MOVE(p, MATH1, 0, DESCBUF, 0, 20, WAITCOMP |
+ IMMED);
+ swapped_seqin_ptr_jump = JUMP(p, swapped_seqin_ptr, LOCAL_JUMP,
+ ALL_TRUE, 0);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ 0);
+ SEQOUTPTR(p, 0, 65535, RTO);
+ move_outlen = MOVE(p, DESCBUF, 0, MATH0, 4, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH0, SUB,
+ (uint64_t)(pdb->ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE),
+ VSEQINSZ, 4, IMMED2);
+ MATHB(p, MATH0, SUB, IPSEC_ICV_MD5_TRUNC_SIZE, VSEQOUTSZ, 4, IMMED2);
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, OP_ALG_AAI_XCBC_MAC,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+ SEQFIFOLOAD(p, SKIP, pdb->ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | FLUSH1 | LAST1);
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+ SEQSTORE(p, CONTEXT1, 0, IPSEC_ICV_MD5_TRUNC_SIZE, 0);
+/*
+ * TODO: RTA currently doesn't support adding labels in or after Job Descriptor.
+ * To be changed when proper support is added in RTA.
+ */
+ /* Label the Shared Descriptor Pointer */
+ SET_LABEL(p, shd_ptr);
+ shd_ptr += 1;
+ /* Label the Output Pointer */
+ SET_LABEL(p, outptr);
+ outptr += 3;
+ /* Label the first word after JD */
+ SET_LABEL(p, swapped_seqin_fields);
+ swapped_seqin_fields += 8;
+ /* Label the second word after JD */
+ SET_LABEL(p, swapped_seqin_ptr);
+ swapped_seqin_ptr += 9;
+
+ PATCH_HDR(p, phdr, hdr);
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ PATCH_JUMP(p, swapped_seqin_ptr_jump, swapped_seqin_ptr);
+ PATCH_MOVE(p, move_outlen, outptr);
+ PATCH_MOVE(p, move_seqout_ptr, shd_ptr);
+ PATCH_MOVE(p, write_swapped_seqin_ptr, swapped_seqin_fields);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_decap_des_aes_xcbc - IPSec DES-CBC/3DES-CBC and
+ * AES-XCBC-MAC-96 ESP decapsulation shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_PCL_IPSEC_DES, OP_PCL_IPSEC_3DES.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm value: OP_PCL_IPSEC_AES_XCBC_MAC_96.
+ *
+ * Supported only for platforms with 32-bit address pointers and SEC ERA 4 or
+ * higher. The tunnel/transport mode of the IPsec ESP is supported only if the
+ * Outer/Transport IP Header is present in the decapsulation input packet.
+ * The descriptor computes the AES-XCBC-MAC-96 to check if the received ICV
+ * is correct, rereads the input packet to compute the MD5 ICV, overwrites
+ * the XCBC ICV, and then sends the modified input packet to the
+ * DES-CBC/3DES-CBC & HMAC-MD5-96 IPsec.
+ * The descriptor uses all the benefits of the built-in protocol by computing
+ * the IPsec ESP with a hardware supported algorithms combination
+ * (DES-CBC/3DES-CBC & HMAC-MD5-96). The HMAC-MD5 authentication algorithm
+ * was chosen in order to speed up the computational time for this intermediate
+ * step.
+ * Warning: The user must allocate at least 32 bytes for the authentication key
+ * (in order to use it also with HMAC-MD5-96),even when using a shorter key
+ * for the AES-XCBC-MAC-96.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t ip_hdr_len = (pdb->options & PDBHDRLEN_MASK) >>
+ PDBHDRLEN_ESP_DECAP_SHIFT;
+
+ LABEL(hdr);
+ LABEL(jump_cmd);
+ LABEL(keyjump);
+ LABEL(outlen);
+ LABEL(seqin_ptr);
+ LABEL(seqout_ptr);
+ LABEL(swapped_seqout_fields);
+ LABEL(swapped_seqout_ptr);
+ REFERENCE(seqout_ptr_jump);
+ REFERENCE(phdr);
+ REFERENCE(pkeyjump);
+ REFERENCE(move_jump);
+ REFERENCE(move_jump_back);
+ REFERENCE(move_seqin_ptr);
+ REFERENCE(swapped_seqout_ptr_jump);
+ REFERENCE(write_swapped_seqout_ptr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF);
+ /*
+ * Hard-coded KEY arguments. The descriptor uses all the benefits of
+ * the built-in protocol by computing the IPsec ESP with a hardware
+ * supported algorithms combination (DES-CBC/3DES-CBC & HMAC-MD5-96).
+ * The HMAC-MD5 authentication algorithm was chosen with
+ * the keys options from bellow in order to speed up the computational
+ * time for this intermediate step.
+ * Warning: The user must allocate at least 32 bytes for
+ * the authentication key (in order to use it also with HMAC-MD5-96),
+ * even when using a shorter key for the AES-XCBC-MAC-96.
+ */
+ KEY(p, MDHA_SPLIT_KEY, 0, authdata->key, 32, INLINE_KEY(authdata));
+ SET_LABEL(p, keyjump);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ 0);
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB,
+ (uint64_t)(ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE), MATH0, 4,
+ IMMED2);
+ MATHB(p, MATH0, SUB, ZERO, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_MD5, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, OP_ALG_AAI_XCBC_MAC,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_ENABLE, DIR_DEC);
+ SEQFIFOLOAD(p, SKIP, ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | FLUSH1);
+ SEQFIFOLOAD(p, ICV1, IPSEC_ICV_MD5_TRUNC_SIZE, FLUSH1 | LAST1);
+ /* Swap SEQOUTPTR to SEQINPTR. */
+ move_seqin_ptr = MOVE(p, DESCBUF, 0, MATH1, 0, 16, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR, MATH1, 8,
+ IFB | IMMED2);
+/*
+ * TODO: RTA currently doesn't support creating a LOAD command
+ * with another command as IMM.
+ * To be changed when proper support is added in RTA.
+ */
+ LOAD(p, 0xA00000e1, MATH3, 4, 4, IMMED);
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ write_swapped_seqout_ptr = MOVE(p, MATH1, 0, DESCBUF, 0, 20, WAITCOMP |
+ IMMED);
+ swapped_seqout_ptr_jump = JUMP(p, swapped_seqout_ptr, LOCAL_JUMP,
+ ALL_TRUE, 0);
+/*
+ * TODO: To be changed when proper support is added in RTA (can't load
+ * a command that is also written by RTA).
+ * Change when proper RTA support is added.
+ */
+ SET_LABEL(p, jump_cmd);
+ WORD(p, 0xA00000f3);
+ SEQINPTR(p, 0, 65535, RTO);
+ MATHB(p, MATH0, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH0, ADD, ip_hdr_len, VSEQOUTSZ, 4, IMMED2);
+ move_jump = MOVE(p, DESCBUF, 0, OFIFO, 0, 8, WAITCOMP | IMMED);
+ move_jump_back = MOVE(p, OFIFO, 0, DESCBUF, 0, 8, IMMED);
+ SEQFIFOLOAD(p, SKIP, ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+ SEQSTORE(p, CONTEXT2, 0, IPSEC_ICV_MD5_TRUNC_SIZE, 0);
+ seqout_ptr_jump = JUMP(p, seqout_ptr, LOCAL_JUMP, ALL_TRUE, CALM);
+
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_CLR_C2MODE |
+ CLRW_CLR_C2DATAS | CLRW_CLR_C2CTX | CLRW_RESET_CLS1_CHA, CLRW, 0,
+ 4, 0);
+ SEQINPTR(p, 0, 65535, RTO);
+ MATHB(p, MATH0, ADD,
+ (uint64_t)(ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE), SEQINSZ, 4,
+ IMMED2);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | OP_PCL_IPSEC_HMAC_MD5_96));
+/*
+ * TODO: RTA currently doesn't support adding labels in or after Job Descriptor.
+ * To be changed when proper support is added in RTA.
+ */
+ /* Label the SEQ OUT PTR */
+ SET_LABEL(p, seqout_ptr);
+ seqout_ptr += 2;
+ /* Label the Output Length */
+ SET_LABEL(p, outlen);
+ outlen += 4;
+ /* Label the SEQ IN PTR */
+ SET_LABEL(p, seqin_ptr);
+ seqin_ptr += 5;
+ /* Label the first word after JD */
+ SET_LABEL(p, swapped_seqout_fields);
+ swapped_seqout_fields += 8;
+ /* Label the second word after JD */
+ SET_LABEL(p, swapped_seqout_ptr);
+ swapped_seqout_ptr += 9;
+
+ PATCH_HDR(p, phdr, hdr);
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ PATCH_JUMP(p, seqout_ptr_jump, seqout_ptr);
+ PATCH_JUMP(p, swapped_seqout_ptr_jump, swapped_seqout_ptr);
+ PATCH_MOVE(p, move_jump, jump_cmd);
+ PATCH_MOVE(p, move_jump_back, seqin_ptr);
+ PATCH_MOVE(p, move_seqin_ptr, outlen);
+ PATCH_MOVE(p, write_swapped_seqout_ptr, swapped_seqout_fields);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_NEW_ENC_BASE_DESC_LEN - IPsec new mode encap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether Outer IP Header and/or keys can be inlined or
+ * not. To be used as first parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_ENC_BASE_DESC_LEN (5 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_encap_pdb))
+
+/**
+ * IPSEC_NEW_NULL_ENC_BASE_DESC_LEN - IPsec new mode encap shared descriptor
+ * length for the case of
+ * NULL encryption / authentication
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether Outer IP Header and/or key can be inlined or
+ * not. To be used as first parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_NULL_ENC_BASE_DESC_LEN (4 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_encap_pdb))
+
+/**
+ * cnstr_shdsc_ipsec_new_encap - IPSec new mode ESP encapsulation
+ * protocol-level shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the encapsulation PDB.
+ * @opt_ip_hdr: pointer to Optional IP Header
+ * -if OIHI = PDBOPTS_ESP_OIHI_PDB_INL, opt_ip_hdr points to the buffer to
+ * be inlined in the PDB. Number of bytes (buffer size) copied is provided
+ * in pdb->ip_hdr_len.
+ * -if OIHI = PDBOPTS_ESP_OIHI_PDB_REF, opt_ip_hdr points to the address of
+ * the Optional IP Header. The address will be inlined in the PDB verbatim.
+ * -for other values of OIHI options field, opt_ip_hdr is not used.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions.
+ * If an authentication key is required by the protocol, a "normal"
+ * key must be provided; DKP (Derived Key Protocol) will be used to
+ * compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
+ bool swap,
+ struct ipsec_encap_pdb *pdb,
+ uint8_t *opt_ip_hdr,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ if (rta_sec_era < RTA_SEC_ERA_8) {
+ pr_err("IPsec new mode encap: available only for Era %d or above\n",
+ USER_SEC_ERA(RTA_SEC_ERA_8));
+ return -ENOTSUP;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+
+ switch (pdb->options & PDBOPTS_ESP_OIHI_MASK) {
+ case PDBOPTS_ESP_OIHI_PDB_INL:
+ COPY_DATA(p, opt_ip_hdr, pdb->ip_hdr_len);
+ break;
+ case PDBOPTS_ESP_OIHI_PDB_REF:
+ if (ps)
+ COPY_DATA(p, opt_ip_hdr, 8);
+ else
+ COPY_DATA(p, opt_ip_hdr, 4);
+ break;
+ default:
+ break;
+ }
+ SET_LABEL(p, hdr);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ if (authdata->keylen)
+ __gen_auth_key(p, authdata);
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_IPSEC_NEW,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_NEW_DEC_BASE_DESC_LEN - IPsec new mode decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether keys can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_DEC_BASE_DESC_LEN (5 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_decap_pdb))
+
+/**
+ * IPSEC_NEW_NULL_DEC_BASE_DESC_LEN - IPsec new mode decap shared descriptor
+ * length for the case of
+ * NULL decryption / authentication
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_NULL_DEC_BASE_DESC_LEN (4 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_decap_pdb))
+
+/**
+ * cnstr_shdsc_ipsec_new_decap - IPSec new mode ESP decapsulation protocol-level
+ * shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the decapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values 0 one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions.
+ * If an authentication key is required by the protocol, a "normal"
+ * key must be provided; DKP (Derived Key Protocol) will be used to
+ * compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
+ bool swap,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ if (rta_sec_era < RTA_SEC_ERA_8) {
+ pr_err("IPsec new mode decap: available only for Era %d or above\n",
+ USER_SEC_ERA(RTA_SEC_ERA_8));
+ return -ENOTSUP;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ if (authdata->keylen)
+ __gen_auth_key(p, authdata);
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_IPSEC_NEW,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_AUTH_VAR_BASE_DESC_LEN - IPsec encap/decap shared descriptor length
+ * for the case of variable-length authentication
+ * only data.
+ * Note: Only for SoCs with SEC_ERA >= 3.
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether keys can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_VAR_BASE_DESC_LEN (27 * CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN - IPsec AES decap shared descriptor
+ * length for variable-length authentication only
+ * data.
+ * Note: Only for SoCs with SEC_ERA >= 3.
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN \
+ (IPSEC_AUTH_VAR_BASE_DESC_LEN + CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_BASE_DESC_LEN - IPsec encap/decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_BASE_DESC_LEN (19 * CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_AES_DEC_BASE_DESC_LEN - IPsec AES decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_AES_DEC_BASE_DESC_LEN (IPSEC_AUTH_BASE_DESC_LEN + \
+ CAAM_CMD_SZ)
+
+/**
+ * cnstr_shdsc_authenc - authenc-like descriptor
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @cipherdata: pointer to block cipher transform definitions.
+ * Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ * Valid modes for:
+ * AES: OP_ALG_AAI_* {CBC, CTR}
+ * DES, 3DES: OP_ALG_AAI_CBC
+ * @authdata: pointer to authentication transform definitions.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_* {MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512}
+ * Note: The key for authentication is supposed to be given as plain text.
+ * Note: There's no support for keys longer than the block size of the
+ * underlying hash function, according to the selected algorithm.
+ *
+ * @ivlen: length of the IV to be read from the input frame, before any data
+ * to be processed
+ * @auth_only_len: length of the data to be authenticated-only (commonly IP
+ * header, IV, Sequence number and SPI)
+ * Note: Extended Sequence Number processing is NOT supported
+ *
+ * @trunc_len: the length of the ICV to be written to the output frame. If 0,
+ * then the corresponding length of the digest, according to the
+ * selected algorithm shall be used.
+ * @dir: Protocol direction, encapsulation or decapsulation (DIR_ENC/DIR_DEC)
+ *
+ * Note: Here's how the input frame needs to be formatted so that the processing
+ * will be done correctly:
+ * For encapsulation:
+ * Input:
+ * +----+----------------+---------------------------------------------+
+ * | IV | Auth-only data | Padded data to be authenticated & Encrypted |
+ * +----+----------------+---------------------------------------------+
+ * Output:
+ * +--------------------------------------+
+ * | Authenticated & Encrypted data | ICV |
+ * +--------------------------------+-----+
+
+ * For decapsulation:
+ * Input:
+ * +----+----------------+--------------------------------+-----+
+ * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
+ * +----+----------------+--------------------------------+-----+
+ * Output:
+ * +----+--------------------------+
+ * | Decrypted & authenticated data |
+ * +----+--------------------------+
+ *
+ * Note: This descriptor can use per-packet commands, encoded as below in the
+ * DPOVRD register:
+ * 32 24 16 0
+ * +------+---------------------+
+ * | 0x80 | 0x00| auth_only_len |
+ * +------+---------------------+
+ *
+ * This mechanism is available only for SoCs having SEC ERA >= 3. In other
+ * words, this will not work for P4080TO2
+ *
+ * Note: The descriptor does not add any kind of padding to the input data,
+ * so the upper layer needs to ensure that the data is padded properly,
+ * according to the selected cipher. Failure to do so will result in
+ * the descriptor failing with a data-size error.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ uint16_t ivlen, uint16_t auth_only_len,
+ uint8_t trunc_len, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+ const bool need_dk = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+ (cipherdata->algmode == OP_ALG_AAI_CBC);
+
+ LABEL(skip_patch_len);
+ LABEL(keyjmp);
+ LABEL(skipkeys);
+ LABEL(aonly_len_offset);
+ REFERENCE(pskip_patch_len);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pskipkeys);
+ REFERENCE(read_len);
+ REFERENCE(write_len);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ /*
+ * Since we currently assume that key length is equal to hash digest
+ * size, it's ok to truncate keylen value.
+ */
+ trunc_len = trunc_len && (trunc_len < authdata->keylen) ?
+ trunc_len : (uint8_t)authdata->keylen;
+
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ /*
+ * M0 will contain the value provided by the user when creating
+ * the shared descriptor. If the user provided an override in
+ * DPOVRD, then M0 will contain that value
+ */
+ MATHB(p, MATH0, ADD, auth_only_len, MATH0, 4, IMMED2);
+
+ if (rta_sec_era >= RTA_SEC_ERA_3) {
+ /*
+ * Check if the user wants to override the auth-only len
+ */
+ MATHB(p, DPOVRD, ADD, 0x80000000, MATH2, 4, IMMED2);
+
+ /*
+ * No need to patch the length of the auth-only data read if
+ * the user did not override it
+ */
+ pskip_patch_len = JUMP(p, skip_patch_len, LOCAL_JUMP, ALL_TRUE,
+ MATH_N);
+
+ /* Get auth-only len in M0 */
+ MATHB(p, MATH2, AND, 0xFFFF, MATH0, 4, IMMED2);
+
+ /*
+ * Since M0 is used in calculations, don't mangle it, copy
+ * its content to M1 and use this for patching.
+ */
+ MATHB(p, MATH0, ADD, MATH1, MATH1, 4, 0);
+
+ read_len = MOVE(p, DESCBUF, 0, MATH1, 0, 6, WAITCOMP | IMMED);
+ write_len = MOVE(p, MATH1, 0, DESCBUF, 0, 8, WAITCOMP | IMMED);
+
+ SET_LABEL(p, skip_patch_len);
+ }
+ /*
+ * MATH0 contains the value in DPOVRD w/o the MSB, or the initial
+ * value, as provided by the user at descriptor creation time
+ */
+ if (dir == DIR_ENC)
+ MATHB(p, MATH0, ADD, ivlen, MATH0, 4, IMMED2);
+ else
+ MATHB(p, MATH0, ADD, ivlen + trunc_len, MATH0, 4, IMMED2);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ /* Do operation */
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC,
+ OP_ALG_AS_INITFINAL,
+ dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ dir);
+
+ if (need_dk)
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ pskipkeys = JUMP(p, skipkeys, LOCAL_JUMP, ALL_TRUE, 0);
+
+ SET_LABEL(p, keyjmp);
+
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL,
+ dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ dir);
+
+ if (need_dk) {
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
+ OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE, dir);
+ SET_LABEL(p, skipkeys);
+ } else {
+ SET_LABEL(p, skipkeys);
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ }
+
+ /*
+ * Prepare the length of the data to be both encrypted/decrypted
+ * and authenticated/checked
+ */
+ MATHB(p, SEQINSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ MATHB(p, VSEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* Prepare for writing the output frame */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ SET_LABEL(p, aonly_len_offset);
+
+ /* Read IV */
+ if (cipherdata->algmode == OP_ALG_AAI_CTR)
+ SEQLOAD(p, CONTEXT1, 16, ivlen, 0);
+ else
+ SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+
+ /*
+ * Read data needed only for authentication. This is overwritten above
+ * if the user requested it.
+ */
+ SEQFIFOLOAD(p, MSG2, auth_only_len, 0);
+
+ if (dir == DIR_ENC) {
+ /*
+ * Read input plaintext, encrypt and authenticate & write to
+ * output
+ */
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+
+ /* Finally, write the ICV */
+ SEQSTORE(p, CONTEXT2, 0, trunc_len, 0);
+ } else {
+ /*
+ * Read input ciphertext, decrypt and authenticate & write to
+ * output
+ */
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+
+ /* Read the ICV to check */
+ SEQFIFOLOAD(p, ICV2, trunc_len, LAST2);
+ }
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pskipkeys, skipkeys);
+ PATCH_JUMP(p, pskipkeys, skipkeys);
+
+ if (rta_sec_era >= RTA_SEC_ERA_3) {
+ PATCH_JUMP(p, pskip_patch_len, skip_patch_len);
+ PATCH_MOVE(p, read_len, aonly_len_offset);
+ PATCH_MOVE(p, write_len, aonly_len_offset);
+ }
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_IPSEC_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta.h
new file mode 100644
index 00000000..c4bbad0b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta.h
@@ -0,0 +1,921 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_RTA_H__
+#define __RTA_RTA_H__
+
+#include "rta/sec_run_time_asm.h"
+#include "rta/fifo_load_store_cmd.h"
+#include "rta/header_cmd.h"
+#include "rta/jump_cmd.h"
+#include "rta/key_cmd.h"
+#include "rta/load_cmd.h"
+#include "rta/math_cmd.h"
+#include "rta/move_cmd.h"
+#include "rta/nfifo_cmd.h"
+#include "rta/operation_cmd.h"
+#include "rta/protocol_cmd.h"
+#include "rta/seq_in_out_ptr_cmd.h"
+#include "rta/signature_cmd.h"
+#include "rta/store_cmd.h"
+
+/**
+ * DOC: About
+ *
+ * RTA (Runtime Assembler) Library is an easy and flexible runtime method for
+ * writing SEC descriptors. It implements a thin abstraction layer above
+ * SEC commands set; the resulting code is compact and similar to a
+ * descriptor sequence.
+ *
+ * RTA library improves comprehension of the SEC code, adds flexibility for
+ * writing complex descriptors and keeps the code lightweight. Should be used
+ * by whom needs to encode descriptors at runtime, with comprehensible flow
+ * control in descriptor.
+ */
+
+/**
+ * DOC: Usage
+ *
+ * RTA is used in kernel space by the SEC / CAAM (Cryptographic Acceleration and
+ * Assurance Module) kernel module (drivers/crypto/caam) and SEC / CAAM QI
+ * kernel module (Freescale QorIQ SDK).
+ *
+ * RTA is used in user space by USDPAA - User Space DataPath Acceleration
+ * Architecture (Freescale QorIQ SDK).
+ */
+
+/**
+ * DOC: Descriptor Buffer Management Routines
+ *
+ * Contains details of RTA descriptor buffer management and SEC Era
+ * management routines.
+ */
+
+/**
+ * PROGRAM_CNTXT_INIT - must be called before any descriptor run-time assembly
+ * call type field carry info i.e. whether descriptor is
+ * shared or job descriptor.
+ * @program: pointer to struct program
+ * @buffer: input buffer where the descriptor will be placed (uint32_t *)
+ * @offset: offset in input buffer from where the data will be written
+ * (unsigned int)
+ */
+#define PROGRAM_CNTXT_INIT(program, buffer, offset) \
+ rta_program_cntxt_init(program, buffer, offset)
+
+/**
+ * PROGRAM_FINALIZE - must be called to mark completion of RTA call.
+ * @program: pointer to struct program
+ *
+ * Return: total size of the descriptor in words or negative number on error.
+ */
+#define PROGRAM_FINALIZE(program) rta_program_finalize(program)
+
+/**
+ * PROGRAM_SET_36BIT_ADDR - must be called to set pointer size to 36 bits
+ * @program: pointer to struct program
+ *
+ * Return: current size of the descriptor in words (unsigned int).
+ */
+#define PROGRAM_SET_36BIT_ADDR(program) rta_program_set_36bit_addr(program)
+
+/**
+ * PROGRAM_SET_BSWAP - must be called to enable byte swapping
+ * @program: pointer to struct program
+ *
+ * Byte swapping on a 4-byte boundary will be performed at the end - when
+ * calling PROGRAM_FINALIZE().
+ *
+ * Return: current size of the descriptor in words (unsigned int).
+ */
+#define PROGRAM_SET_BSWAP(program) rta_program_set_bswap(program)
+
+/**
+ * WORD - must be called to insert in descriptor buffer a 32bit value
+ * @program: pointer to struct program
+ * @val: input value to be written in descriptor buffer (uint32_t)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define WORD(program, val) rta_word(program, val)
+
+/**
+ * DWORD - must be called to insert in descriptor buffer a 64bit value
+ * @program: pointer to struct program
+ * @val: input value to be written in descriptor buffer (uint64_t)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define DWORD(program, val) rta_dword(program, val)
+
+/**
+ * COPY_DATA - must be called to insert in descriptor buffer data larger than
+ * 64bits.
+ * @program: pointer to struct program
+ * @data: input data to be written in descriptor buffer (uint8_t *)
+ * @len: length of input data (unsigned int)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define COPY_DATA(program, data, len) rta_copy_data(program, (data), (len))
+
+/**
+ * DESC_LEN - determines job / shared descriptor buffer length (in words)
+ * @buffer: descriptor buffer (uint32_t *)
+ *
+ * Return: descriptor buffer length in words (unsigned int).
+ */
+#define DESC_LEN(buffer) rta_desc_len(buffer)
+
+/**
+ * DESC_BYTES - determines job / shared descriptor buffer length (in bytes)
+ * @buffer: descriptor buffer (uint32_t *)
+ *
+ * Return: descriptor buffer length in bytes (unsigned int).
+ */
+#define DESC_BYTES(buffer) rta_desc_bytes(buffer)
+
+/*
+ * SEC HW block revision.
+ *
+ * This *must not be confused with SEC version*:
+ * - SEC HW block revision format is "v"
+ * - SEC revision format is "x.y"
+ */
+extern enum rta_sec_era rta_sec_era;
+
+/**
+ * rta_set_sec_era - Set SEC Era HW block revision for which the RTA library
+ * will generate the descriptors.
+ * @era: SEC Era (enum rta_sec_era)
+ *
+ * Return: 0 if the ERA was set successfully, -1 otherwise (int)
+ *
+ * Warning 1: Must be called *only once*, *before* using any other RTA API
+ * routine.
+ *
+ * Warning 2: *Not thread safe*.
+ */
+static inline int
+rta_set_sec_era(enum rta_sec_era era)
+{
+ if (era > MAX_SEC_ERA) {
+ rta_sec_era = DEFAULT_SEC_ERA;
+ pr_err("Unsupported SEC ERA. Defaulting to ERA %d\n",
+ DEFAULT_SEC_ERA + 1);
+ return -1;
+ }
+
+ rta_sec_era = era;
+ return 0;
+}
+
+/**
+ * rta_get_sec_era - Get SEC Era HW block revision for which the RTA library
+ * will generate the descriptors.
+ *
+ * Return: SEC Era (unsigned int).
+ */
+static inline unsigned int
+rta_get_sec_era(void)
+{
+ return rta_sec_era;
+}
+
+/**
+ * DOC: SEC Commands Routines
+ *
+ * Contains details of RTA wrapper routines over SEC engine commands.
+ */
+
+/**
+ * SHR_HDR - Configures Shared Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the shared
+ * descriptor should start (@c unsigned int).
+ * @flags: operational flags: RIF, DNR, CIF, SC, PD
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SHR_HDR(program, share, start_idx, flags) \
+ rta_shr_header(program, share, start_idx, flags)
+
+/**
+ * JOB_HDR - Configures JOB Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the job
+ * descriptor should start (unsigned int). In case SHR bit is present
+ * in flags, this will be the shared descriptor length.
+ * @share_desc: pointer to shared descriptor, in case SHR bit is set (uint64_t)
+ * @flags: operational flags: RSMS, DNR, TD, MTD, REO, SHR
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JOB_HDR(program, share, start_idx, share_desc, flags) \
+ rta_job_header(program, share, start_idx, share_desc, flags, 0)
+
+/**
+ * JOB_HDR_EXT - Configures JOB Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the job
+ * descriptor should start (unsigned int). In case SHR bit is present
+ * in flags, this will be the shared descriptor length.
+ * @share_desc: pointer to shared descriptor, in case SHR bit is set (uint64_t)
+ * @flags: operational flags: RSMS, DNR, TD, MTD, REO, SHR
+ * @ext_flags: extended header flags: DSV (DECO Select Valid), DECO Id (limited
+ * by DSEL_MASK).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JOB_HDR_EXT(program, share, start_idx, share_desc, flags, ext_flags) \
+ rta_job_header(program, share, start_idx, share_desc, flags | EXT, \
+ ext_flags)
+
+/**
+ * MOVE - Configures MOVE and MOVE_LEN commands
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVE(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVE, src, src_offset, dst, dst_offset, length, opt)
+
+/**
+ * MOVEB - Configures MOVEB command
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Identical with MOVE command if byte swapping not enabled; else - when src/dst
+ * is descriptor buffer or MATH registers, data type is byte array when MOVE
+ * data type is 4-byte array and vice versa.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVEB(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVEB, src, src_offset, dst, dst_offset, length, \
+ opt)
+
+/**
+ * MOVEDW - Configures MOVEDW command
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Identical with MOVE command, with the following differences: data type is
+ * 8-byte array; word swapping is performed when SEC is programmed in little
+ * endian mode.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVEDW(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVEDW, src, src_offset, dst, dst_offset, length, \
+ opt)
+
+/**
+ * FIFOLOAD - Configures FIFOLOAD command to load message data, PKHA data, IV,
+ * ICV, AAD and bit length message data into Input Data FIFO.
+ * @program: pointer to struct program
+ * @data: input data type to store: PKHA registers, IFIFO, MSG1, MSG2,
+ * MSGOUTSNOOP, MSGINSNOOP, IV1, IV2, AAD1, ICV1, ICV2, BIT_DATA, SKIP.
+ * @src: pointer or actual data in case of immediate load; IMMED, COPY and DCOPY
+ * flags indicate action taken (inline imm data, inline ptr, inline from
+ * ptr).
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF, IMMED, EXT, CLASS1, CLASS2, BOTH, FLUSH1,
+ * LAST1, LAST2, COPY, DCOPY.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define FIFOLOAD(program, data, src, length, flags) \
+ rta_fifo_load(program, data, src, length, flags)
+
+/**
+ * SEQFIFOLOAD - Configures SEQ FIFOLOAD command to load message data, PKHA
+ * data, IV, ICV, AAD and bit length message data into Input Data
+ * FIFO.
+ * @program: pointer to struct program
+ * @data: input data type to store: PKHA registers, IFIFO, MSG1, MSG2,
+ * MSGOUTSNOOP, MSGINSNOOP, IV1, IV2, AAD1, ICV1, ICV2, BIT_DATA, SKIP.
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: VLF, CLASS1, CLASS2, BOTH, FLUSH1, LAST1, LAST2,
+ * AIDF.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQFIFOLOAD(program, data, length, flags) \
+ rta_fifo_load(program, data, NONE, length, flags|SEQ)
+
+/**
+ * FIFOSTORE - Configures FIFOSTORE command, to move data from Output Data FIFO
+ * to external memory via DMA.
+ * @program: pointer to struct program
+ * @data: output data type to store: PKHA registers, IFIFO, OFIFO, RNG,
+ * RNGOFIFO, AFHA_SBOX, MDHA_SPLIT_KEY, MSG, KEY1, KEY2, SKIP.
+ * @encrypt_flags: store data encryption mode: EKT, TK
+ * @dst: pointer to store location (uint64_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF, CONT, EXT, CLASS1, CLASS2, BOTH
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define FIFOSTORE(program, data, encrypt_flags, dst, length, flags) \
+ rta_fifo_store(program, data, encrypt_flags, dst, length, flags)
+
+/**
+ * SEQFIFOSTORE - Configures SEQ FIFOSTORE command, to move data from Output
+ * Data FIFO to external memory via DMA.
+ * @program: pointer to struct program
+ * @data: output data type to store: PKHA registers, IFIFO, OFIFO, RNG,
+ * RNGOFIFO, AFHA_SBOX, MDHA_SPLIT_KEY, MSG, KEY1, KEY2, METADATA, SKIP.
+ * @encrypt_flags: store data encryption mode: EKT, TK
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: VLF, CONT, EXT, CLASS1, CLASS2, BOTH
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQFIFOSTORE(program, data, encrypt_flags, length, flags) \
+ rta_fifo_store(program, data, encrypt_flags, 0, length, flags|SEQ)
+
+/**
+ * KEY - Configures KEY and SEQ KEY commands
+ * @program: pointer to struct program
+ * @key_dst: key store location: KEY1, KEY2, PKE, AFHA_SBOX, MDHA_SPLIT_KEY
+ * @encrypt_flags: key encryption mode: ENC, EKT, TK, NWB, PTS
+ * @src: pointer or actual data in case of immediate load (uint64_t); IMMED,
+ * COPY and DCOPY flags indicate action taken (inline imm data,
+ * inline ptr, inline from ptr).
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: for KEY: SGF, IMMED, COPY, DCOPY; for SEQKEY: SEQ,
+ * VLF, AIDF.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define KEY(program, key_dst, encrypt_flags, src, length, flags) \
+ rta_key(program, key_dst, encrypt_flags, src, length, flags)
+
+/**
+ * SEQINPTR - Configures SEQ IN PTR command
+ * @program: pointer to struct program
+ * @src: starting address for Input Sequence (uint64_t)
+ * @length: number of bytes in (or to be added to) Input Sequence (uint32_t)
+ * @flags: operational flags: RBS, INL, SGF, PRE, EXT, RTO, RJD, SOP (when PRE,
+ * RTO or SOP are set, @src parameter must be 0).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQINPTR(program, src, length, flags) \
+ rta_seq_in_ptr(program, src, length, flags)
+
+/**
+ * SEQOUTPTR - Configures SEQ OUT PTR command
+ * @program: pointer to struct program
+ * @dst: starting address for Output Sequence (uint64_t)
+ * @length: number of bytes in (or to be added to) Output Sequence (uint32_t)
+ * @flags: operational flags: SGF, PRE, EXT, RTO, RST, EWS (when PRE or RTO are
+ * set, @dst parameter must be 0).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQOUTPTR(program, dst, length, flags) \
+ rta_seq_out_ptr(program, dst, length, flags)
+
+/**
+ * ALG_OPERATION - Configures ALGORITHM OPERATION command
+ * @program: pointer to struct program
+ * @cipher_alg: algorithm to be used
+ * @aai: Additional Algorithm Information; contains mode information that is
+ * associated with the algorithm (check desc.h for specific values).
+ * @algo_state: algorithm state; defines the state of the algorithm that is
+ * being executed (check desc.h file for specific values).
+ * @icv_check: ICV checking; selects whether the algorithm should check
+ * calculated ICV with known ICV: ICV_CHECK_ENABLE,
+ * ICV_CHECK_DISABLE.
+ * @enc: selects between encryption and decryption: DIR_ENC, DIR_DEC
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define ALG_OPERATION(program, cipher_alg, aai, algo_state, icv_check, enc) \
+ rta_operation(program, cipher_alg, aai, algo_state, icv_check, enc)
+
+/**
+ * PROTOCOL - Configures PROTOCOL OPERATION command
+ * @program: pointer to struct program
+ * @optype: operation type: OP_TYPE_UNI_PROTOCOL / OP_TYPE_DECAP_PROTOCOL /
+ * OP_TYPE_ENCAP_PROTOCOL.
+ * @protid: protocol identifier value (check desc.h file for specific values)
+ * @protoinfo: protocol dependent value (check desc.h file for specific values)
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define PROTOCOL(program, optype, protid, protoinfo) \
+ rta_proto_operation(program, optype, protid, protoinfo)
+
+/**
+ * DKP_PROTOCOL - Configures DKP (Derived Key Protocol) PROTOCOL command
+ * @program: pointer to struct program
+ * @protid: protocol identifier value - one of the following:
+ * OP_PCLID_DKP_{MD5 | SHA1 | SHA224 | SHA256 | SHA384 | SHA512}
+ * @key_src: How the initial ("negotiated") key is provided to the DKP protocol.
+ * Valid values - one of OP_PCL_DKP_SRC_{IMM, SEQ, PTR, SGF}. Not all
+ * (key_src,key_dst) combinations are allowed.
+ * @key_dst: How the derived ("split") key is returned by the DKP protocol.
+ * Valid values - one of OP_PCL_DKP_DST_{IMM, SEQ, PTR, SGF}. Not all
+ * (key_src,key_dst) combinations are allowed.
+ * @keylen: length of the initial key, in bytes (uint16_t)
+ * @key: address where algorithm key resides; virtual address if key_type is
+ * RTA_DATA_IMM, physical (bus) address if key_type is RTA_DATA_PTR or
+ * RTA_DATA_IMM_DMA.
+ * @key_type: enum rta_data_type
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define DKP_PROTOCOL(program, protid, key_src, key_dst, keylen, key, key_type) \
+ rta_dkp_proto(program, protid, key_src, key_dst, keylen, key, key_type)
+
+/**
+ * PKHA_OPERATION - Configures PKHA OPERATION command
+ * @program: pointer to struct program
+ * @op_pkha: PKHA operation; indicates the modular arithmetic function to
+ * execute (check desc.h file for specific values).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define PKHA_OPERATION(program, op_pkha) rta_pkha_operation(program, op_pkha)
+
+/**
+ * JUMP - Configures JUMP command
+ * @program: pointer to struct program
+ * @addr: local offset for local jumps or address pointer for non-local jumps;
+ * IMM or PTR macros must be used to indicate type.
+ * @jump_type: type of action taken by jump (enum rta_jump_type)
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: operational flags - DONE1, DONE2, BOTH; various
+ * sharing and wait conditions (JSL = 1) - NIFP, NIP, NOP, NCP, CALM,
+ * SELF, SHARED, JQP; Math and PKHA status conditions (JSL = 0) - Z, N,
+ * NV, C, PK0, PK1, PKP.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP(program, addr, jump_type, test_type, cond) \
+ rta_jump(program, addr, jump_type, test_type, cond, NONE)
+
+/**
+ * JUMP_INC - Configures JUMP_INC command
+ * @program: pointer to struct program
+ * @addr: local offset; IMM or PTR macros must be used to indicate type
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: Math status conditions (JSL = 0): Z, N, NV, C
+ * @src_dst: register to increment / decrement: MATH0-MATH3, DPOVRD, SEQINSZ,
+ * SEQOUTSZ, VSEQINSZ, VSEQOUTSZ.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP_INC(program, addr, test_type, cond, src_dst) \
+ rta_jump(program, addr, LOCAL_JUMP_INC, test_type, cond, src_dst)
+
+/**
+ * JUMP_DEC - Configures JUMP_DEC command
+ * @program: pointer to struct program
+ * @addr: local offset; IMM or PTR macros must be used to indicate type
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: Math status conditions (JSL = 0): Z, N, NV, C
+ * @src_dst: register to increment / decrement: MATH0-MATH3, DPOVRD, SEQINSZ,
+ * SEQOUTSZ, VSEQINSZ, VSEQOUTSZ.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP_DEC(program, addr, test_type, cond, src_dst) \
+ rta_jump(program, addr, LOCAL_JUMP_DEC, test_type, cond, src_dst)
+
+/**
+ * LOAD - Configures LOAD command to load data registers from descriptor or from
+ * a memory location.
+ * @program: pointer to struct program
+ * @addr: immediate value or pointer to the data to be loaded; IMMED, COPY and
+ * DCOPY flags indicate action taken (inline imm data, inline ptr, inline
+ * from ptr).
+ * @dst: destination register (uint64_t)
+ * @offset: start point to write data in destination register (uint32_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: VLF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define LOAD(program, addr, dst, offset, length, flags) \
+ rta_load(program, addr, dst, offset, length, flags)
+
+/**
+ * SEQLOAD - Configures SEQ LOAD command to load data registers from descriptor
+ * or from a memory location.
+ * @program: pointer to struct program
+ * @dst: destination register (uint64_t)
+ * @offset: start point to write data in destination register (uint32_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQLOAD(program, dst, offset, length, flags) \
+ rta_load(program, NONE, dst, offset, length, flags|SEQ)
+
+/**
+ * STORE - Configures STORE command to read data from registers and write them
+ * to a memory location.
+ * @program: pointer to struct program
+ * @src: immediate value or source register for data to be stored: KEY1SZ,
+ * KEY2SZ, DJQDA, MODE1, MODE2, DJQCTRL, DATA1SZ, DATA2SZ, DSTAT, ICV1SZ,
+ * ICV2SZ, DPID, CCTRL, ICTRL, CLRW, CSTAT, MATH0-MATH3, PKHA registers,
+ * CONTEXT1, CONTEXT2, DESCBUF, JOBDESCBUF, SHAREDESCBUF. In case of
+ * immediate value, IMMED, COPY and DCOPY flags indicate action taken
+ * (inline imm data, inline ptr, inline from ptr).
+ * @offset: start point for reading from source register (uint16_t)
+ * @dst: pointer to store location (uint64_t)
+ * @length: number of bytes to store (uint32_t)
+ * @flags: operational flags: VLF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define STORE(program, src, offset, dst, length, flags) \
+ rta_store(program, src, offset, dst, length, flags)
+
+/**
+ * SEQSTORE - Configures SEQ STORE command to read data from registers and write
+ * them to a memory location.
+ * @program: pointer to struct program
+ * @src: immediate value or source register for data to be stored: KEY1SZ,
+ * KEY2SZ, DJQDA, MODE1, MODE2, DJQCTRL, DATA1SZ, DATA2SZ, DSTAT, ICV1SZ,
+ * ICV2SZ, DPID, CCTRL, ICTRL, CLRW, CSTAT, MATH0-MATH3, PKHA registers,
+ * CONTEXT1, CONTEXT2, DESCBUF, JOBDESCBUF, SHAREDESCBUF. In case of
+ * immediate value, IMMED, COPY and DCOPY flags indicate action taken
+ * (inline imm data, inline ptr, inline from ptr).
+ * @offset: start point for reading from source register (uint16_t)
+ * @length: number of bytes to store (uint32_t)
+ * @flags: operational flags: SGF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQSTORE(program, src, offset, length, flags) \
+ rta_store(program, src, offset, NONE, length, flags|SEQ)
+
+/**
+ * MATHB - Configures MATHB command to perform binary operations
+ * @program: pointer to struct program
+ * @operand1: first operand: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE, NONE, Immediate value. IMMED must be used to
+ * indicate immediate value.
+ * @operator: function to be performed: ADD, ADDC, SUB, SUBB, OR, AND, XOR,
+ * LSHIFT, RSHIFT, SHLD.
+ * @operand2: second operand: MATH0-MATH3, DPOVRD, VSEQINSZ, VSEQOUTSZ, ABD,
+ * OFIFO, JOBSRC, ZERO, ONE, Immediate value. IMMED2 must be used to
+ * indicate immediate value.
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int).
+ * @opt: operational flags: IFB, NFU, STL, SWP, IMMED, IMMED2
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHB(program, operand1, operator, operand2, result, length, opt) \
+ rta_math(program, operand1, MATH_FUN_##operator, operand2, result, \
+ length, opt)
+
+/**
+ * MATHI - Configures MATHI command to perform binary operations
+ * @program: pointer to struct program
+ * @operand: if !SSEL: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE.
+ * if SSEL: MATH0-MATH3, DPOVRD, VSEQINSZ, VSEQOUTSZ, ABD, OFIFO,
+ * JOBSRC, ZERO, ONE.
+ * @operator: function to be performed: ADD, ADDC, SUB, SUBB, OR, AND, XOR,
+ * LSHIFT, RSHIFT, FBYT (for !SSEL only).
+ * @imm: Immediate value (uint8_t). IMMED must be used to indicate immediate
+ * value.
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int). @imm is left-extended with zeros if needed.
+ * @opt: operational flags: NFU, SSEL, SWP, IMMED
+ *
+ * If !SSEL, @operand <@operator> @imm -> @result
+ * If SSEL, @imm <@operator> @operand -> @result
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHI(program, operand, operator, imm, result, length, opt) \
+ rta_mathi(program, operand, MATH_FUN_##operator, imm, result, length, \
+ opt)
+
+/**
+ * MATHU - Configures MATHU command to perform unary operations
+ * @program: pointer to struct program
+ * @operand1: operand: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE, NONE, Immediate value. IMMED must be used to
+ * indicate immediate value.
+ * @operator: function to be performed: ZBYT, BSWAP
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int).
+ * @opt: operational flags: NFU, STL, SWP, IMMED
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHU(program, operand1, operator, result, length, opt) \
+ rta_math(program, operand1, MATH_FUN_##operator, NONE, result, length, \
+ opt)
+
+/**
+ * SIGNATURE - Configures SIGNATURE command
+ * @program: pointer to struct program
+ * @sign_type: signature type: SIGN_TYPE_FINAL, SIGN_TYPE_FINAL_RESTORE,
+ * SIGN_TYPE_FINAL_NONZERO, SIGN_TYPE_IMM_2, SIGN_TYPE_IMM_3,
+ * SIGN_TYPE_IMM_4.
+ *
+ * After SIGNATURE command, DWORD or WORD must be used to insert signature in
+ * descriptor buffer.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SIGNATURE(program, sign_type) rta_signature(program, sign_type)
+
+/**
+ * NFIFOADD - Configures NFIFO command, a shortcut of RTA Load command to write
+ * to iNfo FIFO.
+ * @program: pointer to struct program
+ * @src: source for the input data in Alignment Block:IFIFO, OFIFO, PAD,
+ * MSGOUTSNOOP, ALTSOURCE, OFIFO_SYNC, MSGOUTSNOOP_ALT.
+ * @data: type of data that is going through the Input Data FIFO: MSG, MSG1,
+ * MSG2, IV1, IV2, ICV1, ICV2, SAD1, AAD1, AAD2, AFHA_SBOX, SKIP,
+ * PKHA registers, AB1, AB2, ABD.
+ * @length: length of the data copied in FIFO registers (uint32_t)
+ * @flags: select options between:
+ * -operational flags: LAST1, LAST2, FLUSH1, FLUSH2, OC, BP
+ * -when PAD is selected as source: BM, PR, PS
+ * -padding type: <em>PAD_ZERO, PAD_NONZERO, PAD_INCREMENT, PAD_RANDOM,
+ * PAD_ZERO_N1, PAD_NONZERO_0, PAD_N1, PAD_NONZERO_N
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define NFIFOADD(program, src, data, length, flags) \
+ rta_nfifo_load(program, src, data, length, flags)
+
+/**
+ * DOC: Self Referential Code Management Routines
+ *
+ * Contains details of RTA self referential code routines.
+ */
+
+/**
+ * REFERENCE - initialize a variable used for storing an index inside a
+ * descriptor buffer.
+ * @ref: reference to a descriptor buffer's index where an update is required
+ * with a value that will be known latter in the program flow.
+ */
+#define REFERENCE(ref) int ref = -1
+
+/**
+ * LABEL - initialize a variable used for storing an index inside a descriptor
+ * buffer.
+ * @label: label stores the value with what should be updated the REFERENCE line
+ * in the descriptor buffer.
+ */
+#define LABEL(label) unsigned int label = 0
+
+/**
+ * SET_LABEL - set a LABEL value
+ * @program: pointer to struct program
+ * @label: value that will be inserted in a line previously written in the
+ * descriptor buffer.
+ */
+#define SET_LABEL(program, label) (label = rta_set_label(program))
+
+/**
+ * PATCH_JUMP - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For JUMP command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_JUMP(program, line, new_ref) rta_patch_jmp(program, line, new_ref)
+
+/**
+ * PATCH_MOVE - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For MOVE command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_MOVE(program, line, new_ref) \
+ rta_patch_move(program, line, new_ref)
+
+/**
+ * PATCH_LOAD - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For LOAD command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_LOAD(program, line, new_ref) \
+ rta_patch_load(program, line, new_ref)
+
+/**
+ * PATCH_STORE - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For STORE command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_STORE(program, line, new_ref) \
+ rta_patch_store(program, line, new_ref)
+
+/**
+ * PATCH_HDR - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For HEADER command, the value represents the start index field.
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_HDR(program, line, new_ref) \
+ rta_patch_header(program, line, new_ref)
+
+/**
+ * PATCH_RAW - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @mask: mask to be used for applying the new value (unsigned int). The mask
+ * selects which bits from the provided @new_val are taken into
+ * consideration when overwriting the existing value.
+ * @new_val: updated value that will be masked using the provided mask value
+ * and inserted in descriptor buffer at the specified line.
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_RAW(program, line, mask, new_val) \
+ rta_patch_raw(program, line, mask, new_val)
+
+#endif /* __RTA_RTA_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
new file mode 100644
index 00000000..8c807aaa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_FIFO_LOAD_STORE_CMD_H__
+#define __RTA_FIFO_LOAD_STORE_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t fifo_load_table[][2] = {
+/*1*/ { PKA0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A0 },
+ { PKA1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A1 },
+ { PKA2, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A2 },
+ { PKA3, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A3 },
+ { PKB0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B0 },
+ { PKB1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B1 },
+ { PKB2, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B2 },
+ { PKB3, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B3 },
+ { PKA, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A },
+ { PKB, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B },
+ { PKN, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_N },
+ { SKIP, FIFOLD_CLASS_SKIP },
+ { MSG1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG },
+ { MSG2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG },
+ { MSGOUTSNOOP, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG1OUT2 },
+ { MSGINSNOOP, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG },
+ { IV1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV },
+ { IV2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_IV },
+ { AAD1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_AAD },
+ { ICV1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_ICV },
+ { ICV2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV },
+ { BIT_DATA, FIFOLD_TYPE_BITDATA },
+/*23*/ { IFIFO, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_NOINFOFIFO }
+};
+
+/*
+ * Allowed FIFO_LOAD input data types for each SEC Era.
+ * Values represent the number of entries from fifo_load_table[] that are
+ * supported.
+ */
+static const unsigned int fifo_load_table_sz[] = {22, 22, 23, 23,
+ 23, 23, 23, 23};
+
+static inline int
+rta_fifo_load(struct program *program, uint32_t src,
+ uint64_t loc, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint32_t ext_length = 0, val = 0;
+ int ret = -EINVAL;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ /* write command type field */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_FIFO_LOAD;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_FIFO_LOAD;
+ }
+
+ /* Parameters checking */
+ if (is_seq_cmd) {
+ if ((flags & IMMED) || (flags & SGF)) {
+ pr_err("SEQ FIFO LOAD: Invalid command\n");
+ goto err;
+ }
+ if ((rta_sec_era <= RTA_SEC_ERA_5) && (flags & AIDF)) {
+ pr_err("SEQ FIFO LOAD: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & VLF) && ((flags & EXT) || (length >> 16))) {
+ pr_err("SEQ FIFO LOAD: Invalid usage of VLF\n");
+ goto err;
+ }
+ } else {
+ if (src == SKIP) {
+ pr_err("FIFO LOAD: Invalid src\n");
+ goto err;
+ }
+ if ((flags & AIDF) || (flags & VLF)) {
+ pr_err("FIFO LOAD: Invalid command\n");
+ goto err;
+ }
+ if ((flags & IMMED) && (flags & SGF)) {
+ pr_err("FIFO LOAD: Invalid usage of SGF and IMM\n");
+ goto err;
+ }
+ if ((flags & IMMED) && ((flags & EXT) || (length >> 16))) {
+ pr_err("FIFO LOAD: Invalid usage of EXT and IMM\n");
+ goto err;
+ }
+ }
+
+ /* write input data type field */
+ ret = __rta_map_opcode(src, fifo_load_table,
+ fifo_load_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("FIFO LOAD: Source value is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= val;
+
+ if (flags & CLASS1)
+ opcode |= FIFOLD_CLASS_CLASS1;
+ if (flags & CLASS2)
+ opcode |= FIFOLD_CLASS_CLASS2;
+ if (flags & BOTH)
+ opcode |= FIFOLD_CLASS_BOTH;
+
+ /* write fields: SGF|VLF, IMM, [LC1, LC2, F1] */
+ if (flags & FLUSH1)
+ opcode |= FIFOLD_TYPE_FLUSH1;
+ if (flags & LAST1)
+ opcode |= FIFOLD_TYPE_LAST1;
+ if (flags & LAST2)
+ opcode |= FIFOLD_TYPE_LAST2;
+ if (!is_seq_cmd) {
+ if (flags & SGF)
+ opcode |= FIFOLDST_SGF;
+ if (flags & IMMED)
+ opcode |= FIFOLD_IMM;
+ } else {
+ if (flags & VLF)
+ opcode |= FIFOLDST_VLF;
+ if (flags & AIDF)
+ opcode |= FIFOLD_AIDF;
+ }
+
+ /*
+ * Verify if extended length is required. In case of BITDATA, calculate
+ * number of full bytes and additional valid bits.
+ */
+ if ((flags & EXT) || (length >> 16)) {
+ opcode |= FIFOLDST_EXT;
+ if (src == BIT_DATA) {
+ ext_length = (length / 8);
+ length = (length % 8);
+ } else {
+ ext_length = length;
+ length = 0;
+ }
+ }
+ opcode |= (uint16_t) length;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (flags & IMMED)
+ __rta_inline_data(program, loc, flags & __COPY_MASK, length);
+ else if (!is_seq_cmd)
+ __rta_out64(program, program->ps, loc);
+
+ /* write extended length field */
+ if (opcode & FIFOLDST_EXT)
+ __rta_out32(program, ext_length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static const uint32_t fifo_store_table[][2] = {
+/*1*/ { PKA0, FIFOST_TYPE_PKHA_A0 },
+ { PKA1, FIFOST_TYPE_PKHA_A1 },
+ { PKA2, FIFOST_TYPE_PKHA_A2 },
+ { PKA3, FIFOST_TYPE_PKHA_A3 },
+ { PKB0, FIFOST_TYPE_PKHA_B0 },
+ { PKB1, FIFOST_TYPE_PKHA_B1 },
+ { PKB2, FIFOST_TYPE_PKHA_B2 },
+ { PKB3, FIFOST_TYPE_PKHA_B3 },
+ { PKA, FIFOST_TYPE_PKHA_A },
+ { PKB, FIFOST_TYPE_PKHA_B },
+ { PKN, FIFOST_TYPE_PKHA_N },
+ { PKE, FIFOST_TYPE_PKHA_E_JKEK },
+ { RNG, FIFOST_TYPE_RNGSTORE },
+ { RNGOFIFO, FIFOST_TYPE_RNGFIFO },
+ { AFHA_SBOX, FIFOST_TYPE_AF_SBOX_JKEK },
+ { MDHA_SPLIT_KEY, FIFOST_CLASS_CLASS2KEY | FIFOST_TYPE_SPLIT_KEK },
+ { MSG, FIFOST_TYPE_MESSAGE_DATA },
+ { KEY1, FIFOST_CLASS_CLASS1KEY | FIFOST_TYPE_KEY_KEK },
+ { KEY2, FIFOST_CLASS_CLASS2KEY | FIFOST_TYPE_KEY_KEK },
+ { OFIFO, FIFOST_TYPE_OUTFIFO_KEK},
+ { SKIP, FIFOST_TYPE_SKIP },
+/*22*/ { METADATA, FIFOST_TYPE_METADATA},
+ { MSG_CKSUM, FIFOST_TYPE_MESSAGE_DATA2 }
+};
+
+/*
+ * Allowed FIFO_STORE output data types for each SEC Era.
+ * Values represent the number of entries from fifo_store_table[] that are
+ * supported.
+ */
+static const unsigned int fifo_store_table_sz[] = {21, 21, 21, 21,
+ 22, 22, 22, 23};
+
+static inline int
+rta_fifo_store(struct program *program, uint32_t src,
+ uint32_t encrypt_flags, uint64_t dst,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ /* write command type field */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_FIFO_STORE;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_FIFO_STORE;
+ }
+
+ /* Parameter checking */
+ if (is_seq_cmd) {
+ if ((flags & VLF) && ((length >> 16) || (flags & EXT))) {
+ pr_err("SEQ FIFO STORE: Invalid usage of VLF\n");
+ goto err;
+ }
+ if (dst) {
+ pr_err("SEQ FIFO STORE: Invalid command\n");
+ goto err;
+ }
+ if ((src == METADATA) && (flags & (CONT | EXT))) {
+ pr_err("SEQ FIFO STORE: Invalid flags\n");
+ goto err;
+ }
+ } else {
+ if (((src == RNGOFIFO) && ((dst) || (flags & EXT))) ||
+ (src == METADATA)) {
+ pr_err("FIFO STORE: Invalid destination\n");
+ goto err;
+ }
+ }
+ if ((rta_sec_era == RTA_SEC_ERA_7) && (src == AFHA_SBOX)) {
+ pr_err("FIFO STORE: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write output data type field */
+ ret = __rta_map_opcode(src, fifo_store_table,
+ fifo_store_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("FIFO STORE: Source type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= val;
+
+ if (encrypt_flags & TK)
+ opcode |= (0x1 << FIFOST_TYPE_SHIFT);
+ if (encrypt_flags & EKT) {
+ if (rta_sec_era == RTA_SEC_ERA_1) {
+ pr_err("FIFO STORE: AES-CCM source types not supported\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ opcode |= (0x10 << FIFOST_TYPE_SHIFT);
+ opcode &= (uint32_t)~(0x20 << FIFOST_TYPE_SHIFT);
+ }
+
+ /* write flags fields */
+ if (flags & CONT)
+ opcode |= FIFOST_CONT;
+ if ((flags & VLF) && (is_seq_cmd))
+ opcode |= FIFOLDST_VLF;
+ if ((flags & SGF) && (!is_seq_cmd))
+ opcode |= FIFOLDST_SGF;
+ if (flags & CLASS1)
+ opcode |= FIFOST_CLASS_CLASS1KEY;
+ if (flags & CLASS2)
+ opcode |= FIFOST_CLASS_CLASS2KEY;
+ if (flags & BOTH)
+ opcode |= FIFOST_CLASS_BOTH;
+
+ /* Verify if extended length is required */
+ if ((length >> 16) || (flags & EXT))
+ opcode |= FIFOLDST_EXT;
+ else
+ opcode |= (uint16_t) length;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer field */
+ if ((!is_seq_cmd) && (dst))
+ __rta_out64(program, program->ps, dst);
+
+ /* write extended length field */
+ if (opcode & FIFOLDST_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_FIFO_LOAD_STORE_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
new file mode 100644
index 00000000..0c7ea938
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_HEADER_CMD_H__
+#define __RTA_HEADER_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed job header flags for each SEC Era. */
+static const uint32_t job_header_flags[] = {
+ DNR | TD | MTD | SHR | REO,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | EXT
+};
+
+/* Allowed shared header flags for each SEC Era. */
+static const uint32_t shr_header_flags[] = {
+ DNR | SC | PD,
+ DNR | SC | PD | CIF,
+ DNR | SC | PD | CIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF
+};
+
+static inline int
+rta_shr_header(struct program *program,
+ enum rta_share_type share,
+ unsigned int start_idx,
+ uint32_t flags)
+{
+ uint32_t opcode = CMD_SHARED_DESC_HDR;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & ~shr_header_flags[rta_sec_era]) {
+ pr_err("SHR_DESC: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (share) {
+ case SHR_ALWAYS:
+ opcode |= HDR_SHARE_ALWAYS;
+ break;
+ case SHR_SERIAL:
+ opcode |= HDR_SHARE_SERIAL;
+ break;
+ case SHR_NEVER:
+ /*
+ * opcode |= HDR_SHARE_NEVER;
+ * HDR_SHARE_NEVER is 0
+ */
+ break;
+ case SHR_WAIT:
+ opcode |= HDR_SHARE_WAIT;
+ break;
+ default:
+ pr_err("SHR_DESC: SHARE VALUE is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= HDR_ONE;
+ opcode |= (start_idx << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK;
+
+ if (flags & DNR)
+ opcode |= HDR_DNR;
+ if (flags & CIF)
+ opcode |= HDR_CLEAR_IFIFO;
+ if (flags & SC)
+ opcode |= HDR_SAVECTX;
+ if (flags & PD)
+ opcode |= HDR_PROP_DNR;
+ if (flags & RIF)
+ opcode |= HDR_RIF;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (program->current_instruction == 1)
+ program->shrhdr = program->buffer;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+static inline int
+rta_job_header(struct program *program,
+ enum rta_share_type share,
+ unsigned int start_idx,
+ uint64_t shr_desc, uint32_t flags,
+ uint32_t ext_flags)
+{
+ uint32_t opcode = CMD_DESC_HDR;
+ uint32_t hdr_ext = 0;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & ~job_header_flags[rta_sec_era]) {
+ pr_err("JOB_DESC: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (share) {
+ case SHR_ALWAYS:
+ opcode |= HDR_SHARE_ALWAYS;
+ break;
+ case SHR_SERIAL:
+ opcode |= HDR_SHARE_SERIAL;
+ break;
+ case SHR_NEVER:
+ /*
+ * opcode |= HDR_SHARE_NEVER;
+ * HDR_SHARE_NEVER is 0
+ */
+ break;
+ case SHR_WAIT:
+ opcode |= HDR_SHARE_WAIT;
+ break;
+ case SHR_DEFER:
+ opcode |= HDR_SHARE_DEFER;
+ break;
+ default:
+ pr_err("JOB_DESC: SHARE VALUE is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((flags & TD) && (flags & REO)) {
+ pr_err("JOB_DESC: REO flag not supported for trusted descriptors. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((rta_sec_era < RTA_SEC_ERA_7) && (flags & MTD) && !(flags & TD)) {
+ pr_err("JOB_DESC: Trying to MTD a descriptor that is not a TD. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((flags & EXT) && !(flags & SHR) && (start_idx < 2)) {
+ pr_err("JOB_DESC: Start index must be >= 2 in case of no SHR and EXT. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= HDR_ONE;
+ opcode |= ((start_idx << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK);
+
+ if (flags & EXT) {
+ opcode |= HDR_EXT;
+
+ if (ext_flags & DSV) {
+ hdr_ext |= HDR_EXT_DSEL_VALID;
+ hdr_ext |= ext_flags & DSEL_MASK;
+ }
+
+ if (ext_flags & FTD) {
+ if (rta_sec_era <= RTA_SEC_ERA_5) {
+ pr_err("JOB_DESC: Fake trusted descriptor not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ hdr_ext |= HDR_EXT_FTD;
+ }
+ }
+ if (flags & RSMS)
+ opcode |= HDR_RSLS;
+ if (flags & DNR)
+ opcode |= HDR_DNR;
+ if (flags & TD)
+ opcode |= HDR_TRUSTED;
+ if (flags & MTD)
+ opcode |= HDR_MAKE_TRUSTED;
+ if (flags & REO)
+ opcode |= HDR_REVERSE;
+ if (flags & SHR)
+ opcode |= HDR_SHARED;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (program->current_instruction == 1) {
+ program->jobhdr = program->buffer;
+
+ if (opcode & HDR_SHARED)
+ __rta_out64(program, program->ps, shr_desc);
+ }
+
+ if (flags & EXT)
+ __rta_out32(program, hdr_ext);
+
+ /* Note: descriptor length is set in program_finalize routine */
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_HEADER_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
new file mode 100644
index 00000000..546d22e9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_JUMP_CMD_H__
+#define __RTA_JUMP_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t jump_test_cond[][2] = {
+ { NIFP, JUMP_COND_NIFP },
+ { NIP, JUMP_COND_NIP },
+ { NOP, JUMP_COND_NOP },
+ { NCP, JUMP_COND_NCP },
+ { CALM, JUMP_COND_CALM },
+ { SELF, JUMP_COND_SELF },
+ { SHRD, JUMP_COND_SHRD },
+ { JQP, JUMP_COND_JQP },
+ { MATH_Z, JUMP_COND_MATH_Z },
+ { MATH_N, JUMP_COND_MATH_N },
+ { MATH_NV, JUMP_COND_MATH_NV },
+ { MATH_C, JUMP_COND_MATH_C },
+ { PK_0, JUMP_COND_PK_0 },
+ { PK_GCD_1, JUMP_COND_PK_GCD_1 },
+ { PK_PRIME, JUMP_COND_PK_PRIME },
+ { CLASS1, JUMP_CLASS_CLASS1 },
+ { CLASS2, JUMP_CLASS_CLASS2 },
+ { BOTH, JUMP_CLASS_BOTH }
+};
+
+static const uint32_t jump_test_math_cond[][2] = {
+ { MATH_Z, JUMP_COND_MATH_Z },
+ { MATH_N, JUMP_COND_MATH_N },
+ { MATH_NV, JUMP_COND_MATH_NV },
+ { MATH_C, JUMP_COND_MATH_C }
+};
+
+static const uint32_t jump_src_dst[][2] = {
+ { MATH0, JUMP_SRC_DST_MATH0 },
+ { MATH1, JUMP_SRC_DST_MATH1 },
+ { MATH2, JUMP_SRC_DST_MATH2 },
+ { MATH3, JUMP_SRC_DST_MATH3 },
+ { DPOVRD, JUMP_SRC_DST_DPOVRD },
+ { SEQINSZ, JUMP_SRC_DST_SEQINLEN },
+ { SEQOUTSZ, JUMP_SRC_DST_SEQOUTLEN },
+ { VSEQINSZ, JUMP_SRC_DST_VARSEQINLEN },
+ { VSEQOUTSZ, JUMP_SRC_DST_VARSEQOUTLEN }
+};
+
+static inline int
+rta_jump(struct program *program, uint64_t address,
+ enum rta_jump_type jump_type,
+ enum rta_jump_cond test_type,
+ uint32_t test_condition, uint32_t src_dst)
+{
+ uint32_t opcode = CMD_JUMP;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ if (((jump_type == GOSUB) || (jump_type == RETURN)) &&
+ (rta_sec_era < RTA_SEC_ERA_4)) {
+ pr_err("JUMP: Jump type not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ if (((jump_type == LOCAL_JUMP_INC) || (jump_type == LOCAL_JUMP_DEC)) &&
+ (rta_sec_era <= RTA_SEC_ERA_5)) {
+ pr_err("JUMP_INCDEC: Jump type not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (jump_type) {
+ case (LOCAL_JUMP):
+ /*
+ * opcode |= JUMP_TYPE_LOCAL;
+ * JUMP_TYPE_LOCAL is 0
+ */
+ break;
+ case (HALT):
+ opcode |= JUMP_TYPE_HALT;
+ break;
+ case (HALT_STATUS):
+ opcode |= JUMP_TYPE_HALT_USER;
+ break;
+ case (FAR_JUMP):
+ opcode |= JUMP_TYPE_NONLOCAL;
+ break;
+ case (GOSUB):
+ opcode |= JUMP_TYPE_GOSUB;
+ break;
+ case (RETURN):
+ opcode |= JUMP_TYPE_RETURN;
+ break;
+ case (LOCAL_JUMP_INC):
+ opcode |= JUMP_TYPE_LOCAL_INC;
+ break;
+ case (LOCAL_JUMP_DEC):
+ opcode |= JUMP_TYPE_LOCAL_DEC;
+ break;
+ default:
+ pr_err("JUMP: Invalid jump type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ switch (test_type) {
+ case (ALL_TRUE):
+ /*
+ * opcode |= JUMP_TEST_ALL;
+ * JUMP_TEST_ALL is 0
+ */
+ break;
+ case (ALL_FALSE):
+ opcode |= JUMP_TEST_INVALL;
+ break;
+ case (ANY_TRUE):
+ opcode |= JUMP_TEST_ANY;
+ break;
+ case (ANY_FALSE):
+ opcode |= JUMP_TEST_INVANY;
+ break;
+ default:
+ pr_err("JUMP: test type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ /* write test condition field */
+ if ((jump_type != LOCAL_JUMP_INC) && (jump_type != LOCAL_JUMP_DEC)) {
+ __rta_map_flags(test_condition, jump_test_cond,
+ ARRAY_SIZE(jump_test_cond), &opcode);
+ } else {
+ uint32_t val = 0;
+
+ ret = __rta_map_opcode(src_dst, jump_src_dst,
+ ARRAY_SIZE(jump_src_dst), &val);
+ if (ret < 0) {
+ pr_err("JUMP_INCDEC: SRC_DST not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ __rta_map_flags(test_condition, jump_test_math_cond,
+ ARRAY_SIZE(jump_test_math_cond), &opcode);
+ }
+
+ /* write local offset field for local jumps and user-defined halt */
+ if ((jump_type == LOCAL_JUMP) || (jump_type == LOCAL_JUMP_INC) ||
+ (jump_type == LOCAL_JUMP_DEC) || (jump_type == GOSUB) ||
+ (jump_type == HALT_STATUS))
+ opcode |= (uint32_t)(address & JUMP_OFFSET_MASK);
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (jump_type == FAR_JUMP)
+ __rta_out64(program, program->ps, address);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_JUMP_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
new file mode 100644
index 00000000..1ec21234
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_KEY_CMD_H__
+#define __RTA_KEY_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed encryption flags for each SEC Era */
+static const uint32_t key_enc_flags[] = {
+ ENC,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK | PTS,
+ ENC | NWB | EKT | TK | PTS
+};
+
+static inline int
+rta_key(struct program *program, uint32_t key_dst,
+ uint32_t encrypt_flags, uint64_t src, uint32_t length,
+ uint32_t flags)
+{
+ uint32_t opcode = 0;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ if (encrypt_flags & ~key_enc_flags[rta_sec_era]) {
+ pr_err("KEY: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write cmd type */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_KEY;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_KEY;
+ }
+
+ /* check parameters */
+ if (is_seq_cmd) {
+ if ((flags & IMMED) || (flags & SGF)) {
+ pr_err("SEQKEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if ((rta_sec_era <= RTA_SEC_ERA_5) &&
+ ((flags & VLF) || (flags & AIDF))) {
+ pr_err("SEQKEY: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ } else {
+ if ((flags & AIDF) || (flags & VLF)) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if ((flags & SGF) && (flags & IMMED)) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ if ((encrypt_flags & PTS) &&
+ ((encrypt_flags & ENC) || (encrypt_flags & NWB) ||
+ (key_dst == PKE))) {
+ pr_err("KEY: Invalid flag / destination. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (key_dst == AFHA_SBOX) {
+ if (rta_sec_era == RTA_SEC_ERA_7) {
+ pr_err("KEY: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ if (flags & IMMED) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /*
+ * Sbox data loaded into the ARC-4 processor must be exactly
+ * 258 bytes long, or else a data sequence error is generated.
+ */
+ if (length != 258) {
+ pr_err("KEY: Invalid length. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ /* write key destination and class fields */
+ switch (key_dst) {
+ case (KEY1):
+ opcode |= KEY_DEST_CLASS1;
+ break;
+ case (KEY2):
+ opcode |= KEY_DEST_CLASS2;
+ break;
+ case (PKE):
+ opcode |= KEY_DEST_CLASS1 | KEY_DEST_PKHA_E;
+ break;
+ case (AFHA_SBOX):
+ opcode |= KEY_DEST_CLASS1 | KEY_DEST_AFHA_SBOX;
+ break;
+ case (MDHA_SPLIT_KEY):
+ opcode |= KEY_DEST_CLASS2 | KEY_DEST_MDHA_SPLIT;
+ break;
+ default:
+ pr_err("KEY: Invalid destination. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /* write key length */
+ length &= KEY_LENGTH_MASK;
+ opcode |= length;
+
+ /* write key command specific flags */
+ if (encrypt_flags & ENC) {
+ /* Encrypted (black) keys must be padded to 8 bytes (CCM) or
+ * 16 bytes (ECB) depending on EKT bit. AES-CCM encrypted keys
+ * (EKT = 1) have 6-byte nonce and 6-byte MAC after padding.
+ */
+ opcode |= KEY_ENC;
+ if (encrypt_flags & EKT) {
+ opcode |= KEY_EKT;
+ length = ALIGN(length, 8);
+ length += 12;
+ } else {
+ length = ALIGN(length, 16);
+ }
+ if (encrypt_flags & TK)
+ opcode |= KEY_TK;
+ }
+ if (encrypt_flags & NWB)
+ opcode |= KEY_NWB;
+ if (encrypt_flags & PTS)
+ opcode |= KEY_PTS;
+
+ /* write general command flags */
+ if (!is_seq_cmd) {
+ if (flags & IMMED)
+ opcode |= KEY_IMM;
+ if (flags & SGF)
+ opcode |= KEY_SGF;
+ } else {
+ if (flags & AIDF)
+ opcode |= KEY_AIDF;
+ if (flags & VLF)
+ opcode |= KEY_VLF;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+ else
+ __rta_out64(program, program->ps, src);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_KEY_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
new file mode 100644
index 00000000..f3b0dcfc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
@@ -0,0 +1,302 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_LOAD_CMD_H__
+#define __RTA_LOAD_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed length and offset masks for each SEC Era in case DST = DCTRL */
+static const uint32_t load_len_mask_allowed[] = {
+ 0x000000ee,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe
+};
+
+static const uint32_t load_off_mask_allowed[] = {
+ 0x0000000f,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff
+};
+
+#define IMM_MUST 0
+#define IMM_CAN 1
+#define IMM_NO 2
+#define IMM_DSNM 3 /* it doesn't matter the src type */
+
+enum e_lenoff {
+ LENOF_03,
+ LENOF_4,
+ LENOF_48,
+ LENOF_448,
+ LENOF_18,
+ LENOF_32,
+ LENOF_24,
+ LENOF_16,
+ LENOF_8,
+ LENOF_128,
+ LENOF_256,
+ DSNM /* it doesn't matter the length/offset values */
+};
+
+struct load_map {
+ uint32_t dst;
+ uint32_t dst_opcode;
+ enum e_lenoff len_off;
+ uint8_t imm_src;
+
+};
+
+static const struct load_map load_dst[] = {
+/*1*/ { KEY1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_KEYSZ_REG,
+ LENOF_4, IMM_MUST },
+ { KEY2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_KEYSZ_REG,
+ LENOF_4, IMM_MUST },
+ { DATA1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DATASZ_REG,
+ LENOF_448, IMM_MUST },
+ { DATA2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG,
+ LENOF_448, IMM_MUST },
+ { ICV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ICVSZ_REG,
+ LENOF_4, IMM_MUST },
+ { ICV2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_ICVSZ_REG,
+ LENOF_4, IMM_MUST },
+ { CCTRL, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_CHACTRL,
+ LENOF_4, IMM_MUST },
+ { DCTRL, LDST_CLASS_DECO | LDST_IMM | LDST_SRCDST_WORD_DECOCTRL,
+ DSNM, IMM_DSNM },
+ { ICTRL, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_IRQCTRL,
+ LENOF_4, IMM_MUST },
+ { DPOVRD, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_PCLOVRD,
+ LENOF_4, IMM_MUST },
+ { CLRW, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_CLRW,
+ LENOF_4, IMM_MUST },
+ { AAD1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DECO_AAD_SZ,
+ LENOF_4, IMM_MUST },
+ { IV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_CLASS1_IV_SZ,
+ LENOF_4, IMM_MUST },
+ { ALTDS1, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ALTDS_CLASS1,
+ LENOF_448, IMM_MUST },
+ { PKASZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_A_SZ,
+ LENOF_4, IMM_MUST, },
+ { PKBSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_B_SZ,
+ LENOF_4, IMM_MUST },
+ { PKNSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_N_SZ,
+ LENOF_4, IMM_MUST },
+ { PKESZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_E_SZ,
+ LENOF_4, IMM_MUST },
+ { NFIFO, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO,
+ LENOF_48, IMM_MUST },
+ { IFIFO, LDST_SRCDST_BYTE_INFIFO, LENOF_18, IMM_MUST },
+ { OFIFO, LDST_SRCDST_BYTE_OUTFIFO, LENOF_18, IMM_MUST },
+ { MATH0, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH0,
+ LENOF_32, IMM_CAN },
+ { MATH1, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH1,
+ LENOF_24, IMM_CAN },
+ { MATH2, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH2,
+ LENOF_16, IMM_CAN },
+ { MATH3, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3,
+ LENOF_8, IMM_CAN },
+ { CONTEXT1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT,
+ LENOF_128, IMM_CAN },
+ { CONTEXT2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT,
+ LENOF_128, IMM_CAN },
+ { KEY1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_KEY,
+ LENOF_32, IMM_CAN },
+ { KEY2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY,
+ LENOF_32, IMM_CAN },
+ { DESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF,
+ LENOF_256, IMM_NO },
+ { DPID, LDST_CLASS_DECO | LDST_SRCDST_WORD_PID,
+ LENOF_448, IMM_MUST },
+/*32*/ { IDFNS, LDST_SRCDST_WORD_IFNSR, LENOF_18, IMM_MUST },
+ { ODFNS, LDST_SRCDST_WORD_OFNSR, LENOF_18, IMM_MUST },
+ { ALTSOURCE, LDST_SRCDST_BYTE_ALTSOURCE, LENOF_18, IMM_MUST },
+/*35*/ { NFIFO_SZL, LDST_SRCDST_WORD_INFO_FIFO_SZL, LENOF_48, IMM_MUST },
+ { NFIFO_SZM, LDST_SRCDST_WORD_INFO_FIFO_SZM, LENOF_03, IMM_MUST },
+ { NFIFO_L, LDST_SRCDST_WORD_INFO_FIFO_L, LENOF_48, IMM_MUST },
+ { NFIFO_M, LDST_SRCDST_WORD_INFO_FIFO_M, LENOF_03, IMM_MUST },
+ { SZL, LDST_SRCDST_WORD_SZL, LENOF_48, IMM_MUST },
+/*40*/ { SZM, LDST_SRCDST_WORD_SZM, LENOF_03, IMM_MUST }
+};
+
+/*
+ * Allowed LOAD destinations for each SEC Era.
+ * Values represent the number of entries from load_dst[] that are supported.
+ */
+static const unsigned int load_dst_sz[] = { 31, 34, 34, 40, 40, 40, 40, 40 };
+
+static inline int
+load_check_len_offset(int pos, uint32_t length, uint32_t offset)
+{
+ if ((load_dst[pos].dst == DCTRL) &&
+ ((length & ~load_len_mask_allowed[rta_sec_era]) ||
+ (offset & ~load_off_mask_allowed[rta_sec_era])))
+ goto err;
+
+ switch (load_dst[pos].len_off) {
+ case (LENOF_03):
+ if ((length > 3) || (offset))
+ goto err;
+ break;
+ case (LENOF_4):
+ if ((length != 4) || (offset != 0))
+ goto err;
+ break;
+ case (LENOF_48):
+ if (!(((length == 4) && (offset == 0)) ||
+ ((length == 8) && (offset == 0))))
+ goto err;
+ break;
+ case (LENOF_448):
+ if (!(((length == 4) && (offset == 0)) ||
+ ((length == 4) && (offset == 4)) ||
+ ((length == 8) && (offset == 0))))
+ goto err;
+ break;
+ case (LENOF_18):
+ if ((length < 1) || (length > 8) || (offset != 0))
+ goto err;
+ break;
+ case (LENOF_32):
+ if ((length > 32) || (offset > 32) || ((offset + length) > 32))
+ goto err;
+ break;
+ case (LENOF_24):
+ if ((length > 24) || (offset > 24) || ((offset + length) > 24))
+ goto err;
+ break;
+ case (LENOF_16):
+ if ((length > 16) || (offset > 16) || ((offset + length) > 16))
+ goto err;
+ break;
+ case (LENOF_8):
+ if ((length > 8) || (offset > 8) || ((offset + length) > 8))
+ goto err;
+ break;
+ case (LENOF_128):
+ if ((length > 128) || (offset > 128) ||
+ ((offset + length) > 128))
+ goto err;
+ break;
+ case (LENOF_256):
+ if ((length < 1) || (length > 256) || ((length + offset) > 256))
+ goto err;
+ break;
+ case (DSNM):
+ break;
+ default:
+ goto err;
+ }
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
+static inline int
+rta_load(struct program *program, uint64_t src, uint64_t dst,
+ uint32_t offset, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ int pos = -1, ret = -EINVAL;
+ unsigned int start_pc = program->current_pc, i;
+
+ if (flags & SEQ)
+ opcode = CMD_SEQ_LOAD;
+ else
+ opcode = CMD_LOAD;
+
+ if ((length & 0xffffff00) || (offset & 0xffffff00)) {
+ pr_err("LOAD: Bad length/offset passed. Should be 8 bits\n");
+ goto err;
+ }
+
+ if (flags & SGF)
+ opcode |= LDST_SGF;
+ if (flags & VLF)
+ opcode |= LDST_VLF;
+
+ /* check load destination, length and offset and source type */
+ for (i = 0; i < load_dst_sz[rta_sec_era]; i++)
+ if (dst == load_dst[i].dst) {
+ pos = (int)i;
+ break;
+ }
+ if (-1 == pos) {
+ pr_err("LOAD: Invalid dst. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if (flags & IMMED) {
+ if (load_dst[pos].imm_src == IMM_NO) {
+ pr_err("LOAD: Invalid source type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= LDST_IMM;
+ } else if (load_dst[pos].imm_src == IMM_MUST) {
+ pr_err("LOAD IMM: Invalid source type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ ret = load_check_len_offset(pos, length, offset);
+ if (ret < 0) {
+ pr_err("LOAD: Invalid length/offset. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= load_dst[pos].dst_opcode;
+
+ /* DESC BUFFER: length / offset values are specified in 4-byte words */
+ if (dst == DESCBUF) {
+ opcode |= (length >> 2);
+ opcode |= ((offset >> 2) << LDST_OFFSET_SHIFT);
+ } else {
+ opcode |= length;
+ opcode |= (offset << LDST_OFFSET_SHIFT);
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* DECO CONTROL: skip writing pointer of imm data */
+ if (dst == DCTRL)
+ return (int)start_pc;
+
+ /*
+ * For data copy, 3 possible ways to specify how to copy data:
+ * - IMMED & !COPY: copy data directly from src( max 8 bytes)
+ * - IMMED & COPY: copy data imm from the location specified by user
+ * - !IMMED and is not SEQ cmd: copy the address
+ */
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+ else if (!(flags & SEQ))
+ __rta_out64(program, program->ps, src);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_LOAD_CMD_H__*/
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
new file mode 100644
index 00000000..5b28cbab
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
@@ -0,0 +1,369 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_MATH_CMD_H__
+#define __RTA_MATH_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t math_op1[][2] = {
+/*1*/ { MATH0, MATH_SRC0_REG0 },
+ { MATH1, MATH_SRC0_REG1 },
+ { MATH2, MATH_SRC0_REG2 },
+ { MATH3, MATH_SRC0_REG3 },
+ { SEQINSZ, MATH_SRC0_SEQINLEN },
+ { SEQOUTSZ, MATH_SRC0_SEQOUTLEN },
+ { VSEQINSZ, MATH_SRC0_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_SRC0_VARSEQOUTLEN },
+ { ZERO, MATH_SRC0_ZERO },
+/*10*/ { NONE, 0 }, /* dummy value */
+ { DPOVRD, MATH_SRC0_DPOVRD },
+ { ONE, MATH_SRC0_ONE }
+};
+
+/*
+ * Allowed MATH op1 sources for each SEC Era.
+ * Values represent the number of entries from math_op1[] that are supported.
+ */
+static const unsigned int math_op1_sz[] = {10, 10, 12, 12, 12, 12, 12, 12};
+
+static const uint32_t math_op2[][2] = {
+/*1*/ { MATH0, MATH_SRC1_REG0 },
+ { MATH1, MATH_SRC1_REG1 },
+ { MATH2, MATH_SRC1_REG2 },
+ { MATH3, MATH_SRC1_REG3 },
+ { ABD, MATH_SRC1_INFIFO },
+ { OFIFO, MATH_SRC1_OUTFIFO },
+ { ONE, MATH_SRC1_ONE },
+/*8*/ { NONE, 0 }, /* dummy value */
+ { JOBSRC, MATH_SRC1_JOBSOURCE },
+ { DPOVRD, MATH_SRC1_DPOVRD },
+ { VSEQINSZ, MATH_SRC1_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_SRC1_VARSEQOUTLEN },
+/*13*/ { ZERO, MATH_SRC1_ZERO }
+};
+
+/*
+ * Allowed MATH op2 sources for each SEC Era.
+ * Values represent the number of entries from math_op2[] that are supported.
+ */
+static const unsigned int math_op2_sz[] = {8, 9, 13, 13, 13, 13, 13, 13};
+
+static const uint32_t math_result[][2] = {
+/*1*/ { MATH0, MATH_DEST_REG0 },
+ { MATH1, MATH_DEST_REG1 },
+ { MATH2, MATH_DEST_REG2 },
+ { MATH3, MATH_DEST_REG3 },
+ { SEQINSZ, MATH_DEST_SEQINLEN },
+ { SEQOUTSZ, MATH_DEST_SEQOUTLEN },
+ { VSEQINSZ, MATH_DEST_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_DEST_VARSEQOUTLEN },
+/*9*/ { NONE, MATH_DEST_NONE },
+ { DPOVRD, MATH_DEST_DPOVRD }
+};
+
+/*
+ * Allowed MATH result destinations for each SEC Era.
+ * Values represent the number of entries from math_result[] that are
+ * supported.
+ */
+static const unsigned int math_result_sz[] = {9, 9, 10, 10, 10, 10, 10, 10};
+
+static inline int
+rta_math(struct program *program, uint64_t operand1,
+ uint32_t op, uint64_t operand2, uint32_t result,
+ int length, uint32_t options)
+{
+ uint32_t opcode = CMD_MATH;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (((op == MATH_FUN_BSWAP) && (rta_sec_era < RTA_SEC_ERA_4)) ||
+ ((op == MATH_FUN_ZBYT) && (rta_sec_era < RTA_SEC_ERA_2))) {
+ pr_err("MATH: operation not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if (options & SWP) {
+ if (rta_sec_era < RTA_SEC_ERA_7) {
+ pr_err("MATH: operation not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if ((options & IFB) ||
+ (!(options & IMMED) && !(options & IMMED2)) ||
+ ((options & IMMED) && (options & IMMED2))) {
+ pr_err("MATH: SWP - invalid configuration. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ /*
+ * SHLD operation is different from others and we
+ * assume that we can have _NONE as first operand
+ * or _SEQINSZ as second operand
+ */
+ if ((op != MATH_FUN_SHLD) && ((operand1 == NONE) ||
+ (operand2 == SEQINSZ))) {
+ pr_err("MATH: Invalid operand. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /*
+ * We first check if it is unary operation. In that
+ * case second operand must be _NONE
+ */
+ if (((op == MATH_FUN_ZBYT) || (op == MATH_FUN_BSWAP)) &&
+ (operand2 != NONE)) {
+ pr_err("MATH: Invalid operand2. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /* Write first operand field */
+ if (options & IMMED) {
+ opcode |= MATH_SRC0_IMM;
+ } else {
+ ret = __rta_map_opcode((uint32_t)operand1, math_op1,
+ math_op1_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATH: operand1 not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* Write second operand field */
+ if (options & IMMED2) {
+ opcode |= MATH_SRC1_IMM;
+ } else {
+ ret = __rta_map_opcode((uint32_t)operand2, math_op2,
+ math_op2_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATH: operand2 not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* Write result field */
+ ret = __rta_map_opcode(result, math_result, math_result_sz[rta_sec_era],
+ &val);
+ if (ret < 0) {
+ pr_err("MATH: result not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /*
+ * as we encode operations with their "real" values, we do not
+ * to translate but we do need to validate the value
+ */
+ switch (op) {
+ /*Binary operators */
+ case (MATH_FUN_ADD):
+ case (MATH_FUN_ADDC):
+ case (MATH_FUN_SUB):
+ case (MATH_FUN_SUBB):
+ case (MATH_FUN_OR):
+ case (MATH_FUN_AND):
+ case (MATH_FUN_XOR):
+ case (MATH_FUN_LSHIFT):
+ case (MATH_FUN_RSHIFT):
+ case (MATH_FUN_SHLD):
+ /* Unary operators */
+ case (MATH_FUN_ZBYT):
+ case (MATH_FUN_BSWAP):
+ opcode |= op;
+ break;
+ default:
+ pr_err("MATH: operator is not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ opcode |= (options & ~(IMMED | IMMED2));
+
+ /* Verify length */
+ switch (length) {
+ case (1):
+ opcode |= MATH_LEN_1BYTE;
+ break;
+ case (2):
+ opcode |= MATH_LEN_2BYTE;
+ break;
+ case (4):
+ opcode |= MATH_LEN_4BYTE;
+ break;
+ case (8):
+ opcode |= MATH_LEN_8BYTE;
+ break;
+ default:
+ pr_err("MATH: length is not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* Write immediate value */
+ if ((options & IMMED) && !(options & IMMED2)) {
+ __rta_out64(program, (length > 4) && !(options & IFB),
+ operand1);
+ } else if ((options & IMMED2) && !(options & IMMED)) {
+ __rta_out64(program, (length > 4) && !(options & IFB),
+ operand2);
+ } else if ((options & IMMED) && (options & IMMED2)) {
+ __rta_out32(program, lower_32_bits(operand1));
+ __rta_out32(program, lower_32_bits(operand2));
+ }
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_mathi(struct program *program, uint64_t operand,
+ uint32_t op, uint8_t imm, uint32_t result,
+ int length, uint32_t options)
+{
+ uint32_t opcode = CMD_MATHI;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (rta_sec_era < RTA_SEC_ERA_6) {
+ pr_err("MATHI: Command not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if (((op == MATH_FUN_FBYT) && (options & SSEL))) {
+ pr_err("MATHI: Illegal combination - FBYT and SSEL. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if ((options & SWP) && (rta_sec_era < RTA_SEC_ERA_7)) {
+ pr_err("MATHI: SWP not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /* Write first operand field */
+ if (!(options & SSEL))
+ ret = __rta_map_opcode((uint32_t)operand, math_op1,
+ math_op1_sz[rta_sec_era], &val);
+ else
+ ret = __rta_map_opcode((uint32_t)operand, math_op2,
+ math_op2_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATHI: operand not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (!(options & SSEL))
+ opcode |= val;
+ else
+ opcode |= (val << (MATHI_SRC1_SHIFT - MATH_SRC1_SHIFT));
+
+ /* Write second operand field */
+ opcode |= (imm << MATHI_IMM_SHIFT);
+
+ /* Write result field */
+ ret = __rta_map_opcode(result, math_result, math_result_sz[rta_sec_era],
+ &val);
+ if (ret < 0) {
+ pr_err("MATHI: result not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= (val << (MATHI_DEST_SHIFT - MATH_DEST_SHIFT));
+
+ /*
+ * as we encode operations with their "real" values, we do not have to
+ * translate but we do need to validate the value
+ */
+ switch (op) {
+ case (MATH_FUN_ADD):
+ case (MATH_FUN_ADDC):
+ case (MATH_FUN_SUB):
+ case (MATH_FUN_SUBB):
+ case (MATH_FUN_OR):
+ case (MATH_FUN_AND):
+ case (MATH_FUN_XOR):
+ case (MATH_FUN_LSHIFT):
+ case (MATH_FUN_RSHIFT):
+ case (MATH_FUN_FBYT):
+ opcode |= op;
+ break;
+ default:
+ pr_err("MATHI: operator not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ opcode |= options;
+
+ /* Verify length */
+ switch (length) {
+ case (1):
+ opcode |= MATH_LEN_1BYTE;
+ break;
+ case (2):
+ opcode |= MATH_LEN_2BYTE;
+ break;
+ case (4):
+ opcode |= MATH_LEN_4BYTE;
+ break;
+ case (8):
+ opcode |= MATH_LEN_8BYTE;
+ break;
+ default:
+ pr_err("MATHI: length %d not supported. SEC PC: %d; Instr: %d\n",
+ length, program->current_pc,
+ program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_MATH_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
new file mode 100644
index 00000000..a7ff7c67
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_MOVE_CMD_H__
+#define __RTA_MOVE_CMD_H__
+
+#define MOVE_SET_AUX_SRC 0x01
+#define MOVE_SET_AUX_DST 0x02
+#define MOVE_SET_AUX_LS 0x03
+#define MOVE_SET_LEN_16b 0x04
+
+#define MOVE_SET_AUX_MATH 0x10
+#define MOVE_SET_AUX_MATH_SRC (MOVE_SET_AUX_SRC | MOVE_SET_AUX_MATH)
+#define MOVE_SET_AUX_MATH_DST (MOVE_SET_AUX_DST | MOVE_SET_AUX_MATH)
+
+#define MASK_16b 0xFF
+
+/* MOVE command type */
+#define __MOVE 1
+#define __MOVEB 2
+#define __MOVEDW 3
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t move_src_table[][2] = {
+/*1*/ { CONTEXT1, MOVE_SRC_CLASS1CTX },
+ { CONTEXT2, MOVE_SRC_CLASS2CTX },
+ { OFIFO, MOVE_SRC_OUTFIFO },
+ { DESCBUF, MOVE_SRC_DESCBUF },
+ { MATH0, MOVE_SRC_MATH0 },
+ { MATH1, MOVE_SRC_MATH1 },
+ { MATH2, MOVE_SRC_MATH2 },
+ { MATH3, MOVE_SRC_MATH3 },
+/*9*/ { IFIFOABD, MOVE_SRC_INFIFO },
+ { IFIFOAB1, MOVE_SRC_INFIFO_CL | MOVE_AUX_LS },
+ { IFIFOAB2, MOVE_SRC_INFIFO_CL },
+/*12*/ { ABD, MOVE_SRC_INFIFO_NO_NFIFO },
+ { AB1, MOVE_SRC_INFIFO_NO_NFIFO | MOVE_AUX_LS },
+ { AB2, MOVE_SRC_INFIFO_NO_NFIFO | MOVE_AUX_MS }
+};
+
+/* Allowed MOVE / MOVE_LEN sources for each SEC Era.
+ * Values represent the number of entries from move_src_table[] that are
+ * supported.
+ */
+static const unsigned int move_src_table_sz[] = {9, 11, 14, 14, 14, 14, 14, 14};
+
+static const uint32_t move_dst_table[][2] = {
+/*1*/ { CONTEXT1, MOVE_DEST_CLASS1CTX },
+ { CONTEXT2, MOVE_DEST_CLASS2CTX },
+ { OFIFO, MOVE_DEST_OUTFIFO },
+ { DESCBUF, MOVE_DEST_DESCBUF },
+ { MATH0, MOVE_DEST_MATH0 },
+ { MATH1, MOVE_DEST_MATH1 },
+ { MATH2, MOVE_DEST_MATH2 },
+ { MATH3, MOVE_DEST_MATH3 },
+ { IFIFOAB1, MOVE_DEST_CLASS1INFIFO },
+ { IFIFOAB2, MOVE_DEST_CLASS2INFIFO },
+ { PKA, MOVE_DEST_PK_A },
+ { KEY1, MOVE_DEST_CLASS1KEY },
+ { KEY2, MOVE_DEST_CLASS2KEY },
+/*14*/ { IFIFO, MOVE_DEST_INFIFO },
+/*15*/ { ALTSOURCE, MOVE_DEST_ALTSOURCE}
+};
+
+/* Allowed MOVE / MOVE_LEN destinations for each SEC Era.
+ * Values represent the number of entries from move_dst_table[] that are
+ * supported.
+ */
+static const
+unsigned int move_dst_table_sz[] = {13, 14, 14, 15, 15, 15, 15, 15};
+
+static inline int
+set_move_offset(struct program *program __maybe_unused,
+ uint64_t src, uint16_t src_offset,
+ uint64_t dst, uint16_t dst_offset,
+ uint16_t *offset, uint16_t *opt);
+
+static inline int
+math_offset(uint16_t offset);
+
+static inline int
+rta_move(struct program *program, int cmd_type, uint64_t src,
+ uint16_t src_offset, uint64_t dst,
+ uint16_t dst_offset, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint16_t offset = 0, opt = 0;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ bool is_move_len_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ if ((rta_sec_era < RTA_SEC_ERA_7) && (cmd_type != __MOVE)) {
+ pr_err("MOVE: MOVEB / MOVEDW not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /* write command type */
+ if (cmd_type == __MOVEB) {
+ opcode = CMD_MOVEB;
+ } else if (cmd_type == __MOVEDW) {
+ opcode = CMD_MOVEDW;
+ } else if (!(flags & IMMED)) {
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ pr_err("MOVE: MOVE_LEN not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if ((length != MATH0) && (length != MATH1) &&
+ (length != MATH2) && (length != MATH3)) {
+ pr_err("MOVE: MOVE_LEN length must be MATH[0-3]. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ opcode = CMD_MOVE_LEN;
+ is_move_len_cmd = true;
+ } else {
+ opcode = CMD_MOVE;
+ }
+
+ /* write offset first, to check for invalid combinations or incorrect
+ * offset values sooner; decide which offset should be here
+ * (src or dst)
+ */
+ ret = set_move_offset(program, src, src_offset, dst, dst_offset,
+ &offset, &opt);
+ if (ret < 0)
+ goto err;
+
+ opcode |= (offset << MOVE_OFFSET_SHIFT) & MOVE_OFFSET_MASK;
+
+ /* set AUX field if required */
+ if (opt == MOVE_SET_AUX_SRC) {
+ opcode |= ((src_offset / 16) << MOVE_AUX_SHIFT) & MOVE_AUX_MASK;
+ } else if (opt == MOVE_SET_AUX_DST) {
+ opcode |= ((dst_offset / 16) << MOVE_AUX_SHIFT) & MOVE_AUX_MASK;
+ } else if (opt == MOVE_SET_AUX_LS) {
+ opcode |= MOVE_AUX_LS;
+ } else if (opt & MOVE_SET_AUX_MATH) {
+ if (opt & MOVE_SET_AUX_SRC)
+ offset = src_offset;
+ else
+ offset = dst_offset;
+
+ if (rta_sec_era < RTA_SEC_ERA_6) {
+ if (offset)
+ pr_debug("MOVE: Offset not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era),
+ program->current_pc,
+ program->current_instruction);
+ /* nothing to do for offset = 0 */
+ } else {
+ ret = math_offset(offset);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid offset in MATH register. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ opcode |= (uint32_t)ret;
+ }
+ }
+
+ /* write source field */
+ ret = __rta_map_opcode((uint32_t)src, move_src_table,
+ move_src_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid SRC. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write destination field */
+ ret = __rta_map_opcode((uint32_t)dst, move_dst_table,
+ move_dst_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write flags */
+ if (flags & (FLUSH1 | FLUSH2))
+ opcode |= MOVE_AUX_MS;
+ if (flags & (LAST2 | LAST1))
+ opcode |= MOVE_AUX_LS;
+ if (flags & WAITCOMP)
+ opcode |= MOVE_WAITCOMP;
+
+ if (!is_move_len_cmd) {
+ /* write length */
+ if (opt == MOVE_SET_LEN_16b)
+ opcode |= (length & (MOVE_OFFSET_MASK | MOVE_LEN_MASK));
+ else
+ opcode |= (length & MOVE_LEN_MASK);
+ } else {
+ /* write mrsel */
+ switch (length) {
+ case (MATH0):
+ /*
+ * opcode |= MOVELEN_MRSEL_MATH0;
+ * MOVELEN_MRSEL_MATH0 is 0
+ */
+ break;
+ case (MATH1):
+ opcode |= MOVELEN_MRSEL_MATH1;
+ break;
+ case (MATH2):
+ opcode |= MOVELEN_MRSEL_MATH2;
+ break;
+ case (MATH3):
+ opcode |= MOVELEN_MRSEL_MATH3;
+ break;
+ }
+
+ /* write size */
+ if (rta_sec_era >= RTA_SEC_ERA_7) {
+ if (flags & SIZE_WORD)
+ opcode |= MOVELEN_SIZE_WORD;
+ else if (flags & SIZE_BYTE)
+ opcode |= MOVELEN_SIZE_BYTE;
+ else if (flags & SIZE_DWORD)
+ opcode |= MOVELEN_SIZE_DWORD;
+ }
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+set_move_offset(struct program *program __maybe_unused,
+ uint64_t src, uint16_t src_offset,
+ uint64_t dst, uint16_t dst_offset,
+ uint16_t *offset, uint16_t *opt)
+{
+ switch (src) {
+ case (CONTEXT1):
+ case (CONTEXT2):
+ if (dst == DESCBUF) {
+ *opt = MOVE_SET_AUX_SRC;
+ *offset = dst_offset;
+ } else if ((dst == KEY1) || (dst == KEY2)) {
+ if ((src_offset) && (dst_offset)) {
+ pr_err("MOVE: Bad offset. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if (dst_offset) {
+ *opt = MOVE_SET_AUX_LS;
+ *offset = dst_offset;
+ } else {
+ *offset = src_offset;
+ }
+ } else {
+ if ((dst == MATH0) || (dst == MATH1) ||
+ (dst == MATH2) || (dst == MATH3)) {
+ *opt = MOVE_SET_AUX_MATH_DST;
+ } else if (((dst == OFIFO) || (dst == ALTSOURCE)) &&
+ (src_offset % 4)) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ *offset = src_offset;
+ }
+ break;
+
+ case (OFIFO):
+ if (dst == OFIFO) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if (((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA)) &&
+ (src_offset || dst_offset)) {
+ pr_err("MOVE: Offset should be zero. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = dst_offset;
+ break;
+
+ case (DESCBUF):
+ if ((dst == CONTEXT1) || (dst == CONTEXT2)) {
+ *opt = MOVE_SET_AUX_DST;
+ } else if ((dst == MATH0) || (dst == MATH1) ||
+ (dst == MATH2) || (dst == MATH3)) {
+ *opt = MOVE_SET_AUX_MATH_DST;
+ } else if (dst == DESCBUF) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ } else if (((dst == OFIFO) || (dst == ALTSOURCE)) &&
+ (src_offset % 4)) {
+ pr_err("MOVE: Invalid offset alignment. SEC PC: %d; Instr %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ *offset = src_offset;
+ break;
+
+ case (MATH0):
+ case (MATH1):
+ case (MATH2):
+ case (MATH3):
+ if ((dst == OFIFO) || (dst == ALTSOURCE)) {
+ if (src_offset % 4) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = src_offset;
+ } else if ((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA)) {
+ *offset = src_offset;
+ } else {
+ *offset = dst_offset;
+
+ /*
+ * This condition is basically the negation of:
+ * dst in { CONTEXT[1-2], MATH[0-3] }
+ */
+ if ((dst != KEY1) && (dst != KEY2))
+ *opt = MOVE_SET_AUX_MATH_SRC;
+ }
+ break;
+
+ case (IFIFOABD):
+ case (IFIFOAB1):
+ case (IFIFOAB2):
+ case (ABD):
+ case (AB1):
+ case (AB2):
+ if ((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA) || (dst == ALTSOURCE)) {
+ pr_err("MOVE: Bad DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ } else {
+ if (dst == OFIFO) {
+ *opt = MOVE_SET_LEN_16b;
+ } else {
+ if (dst_offset % 4) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = dst_offset;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+ err:
+ return -EINVAL;
+}
+
+static inline int
+math_offset(uint16_t offset)
+{
+ switch (offset) {
+ case 0:
+ return 0;
+ case 4:
+ return MOVE_AUX_LS;
+ case 6:
+ return MOVE_AUX_MS;
+ case 7:
+ return MOVE_AUX_LS | MOVE_AUX_MS;
+ }
+
+ return -EINVAL;
+}
+
+#endif /* __RTA_MOVE_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
new file mode 100644
index 00000000..94f775e2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_NFIFO_CMD_H__
+#define __RTA_NFIFO_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t nfifo_src[][2] = {
+/*1*/ { IFIFO, NFIFOENTRY_STYPE_DFIFO },
+ { OFIFO, NFIFOENTRY_STYPE_OFIFO },
+ { PAD, NFIFOENTRY_STYPE_PAD },
+/*4*/ { MSGOUTSNOOP, NFIFOENTRY_STYPE_SNOOP | NFIFOENTRY_DEST_BOTH },
+/*5*/ { ALTSOURCE, NFIFOENTRY_STYPE_ALTSOURCE },
+ { OFIFO_SYNC, NFIFOENTRY_STYPE_OFIFO_SYNC },
+/*7*/ { MSGOUTSNOOP_ALT, NFIFOENTRY_STYPE_SNOOP_ALT | NFIFOENTRY_DEST_BOTH }
+};
+
+/*
+ * Allowed NFIFO LOAD sources for each SEC Era.
+ * Values represent the number of entries from nfifo_src[] that are supported.
+ */
+static const unsigned int nfifo_src_sz[] = {4, 5, 5, 5, 5, 5, 5, 7};
+
+static const uint32_t nfifo_data[][2] = {
+ { MSG, NFIFOENTRY_DTYPE_MSG },
+ { MSG1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_MSG },
+ { MSG2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_MSG },
+ { IV1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_IV },
+ { IV2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_IV },
+ { ICV1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_ICV },
+ { ICV2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_ICV },
+ { SAD1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_SAD },
+ { AAD1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_AAD },
+ { AAD2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_AAD },
+ { AFHA_SBOX, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_SBOX },
+ { SKIP, NFIFOENTRY_DTYPE_SKIP },
+ { PKE, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_E },
+ { PKN, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_N },
+ { PKA, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A },
+ { PKA0, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A0 },
+ { PKA1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A1 },
+ { PKA2, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A2 },
+ { PKA3, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A3 },
+ { PKB, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B },
+ { PKB0, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B0 },
+ { PKB1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B1 },
+ { PKB2, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B2 },
+ { PKB3, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B3 },
+ { AB1, NFIFOENTRY_DEST_CLASS1 },
+ { AB2, NFIFOENTRY_DEST_CLASS2 },
+ { ABD, NFIFOENTRY_DEST_DECO }
+};
+
+static const uint32_t nfifo_flags[][2] = {
+/*1*/ { LAST1, NFIFOENTRY_LC1 },
+ { LAST2, NFIFOENTRY_LC2 },
+ { FLUSH1, NFIFOENTRY_FC1 },
+ { BP, NFIFOENTRY_BND },
+ { PAD_ZERO, NFIFOENTRY_PTYPE_ZEROS },
+ { PAD_NONZERO, NFIFOENTRY_PTYPE_RND_NOZEROS },
+ { PAD_INCREMENT, NFIFOENTRY_PTYPE_INCREMENT },
+ { PAD_RANDOM, NFIFOENTRY_PTYPE_RND },
+ { PAD_ZERO_N1, NFIFOENTRY_PTYPE_ZEROS_NZ },
+ { PAD_NONZERO_0, NFIFOENTRY_PTYPE_RND_NZ_LZ },
+ { PAD_N1, NFIFOENTRY_PTYPE_N },
+/*12*/ { PAD_NONZERO_N, NFIFOENTRY_PTYPE_RND_NZ_N },
+ { FLUSH2, NFIFOENTRY_FC2 },
+ { OC, NFIFOENTRY_OC }
+};
+
+/*
+ * Allowed NFIFO LOAD flags for each SEC Era.
+ * Values represent the number of entries from nfifo_flags[] that are supported.
+ */
+static const unsigned int nfifo_flags_sz[] = {12, 14, 14, 14, 14, 14, 14, 14};
+
+static const uint32_t nfifo_pad_flags[][2] = {
+ { BM, NFIFOENTRY_BM },
+ { PS, NFIFOENTRY_PS },
+ { PR, NFIFOENTRY_PR }
+};
+
+/*
+ * Allowed NFIFO LOAD pad flags for each SEC Era.
+ * Values represent the number of entries from nfifo_pad_flags[] that are
+ * supported.
+ */
+static const unsigned int nfifo_pad_flags_sz[] = {2, 2, 2, 2, 3, 3, 3, 3};
+
+static inline int
+rta_nfifo_load(struct program *program, uint32_t src,
+ uint32_t data, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0, val;
+ int ret = -EINVAL;
+ uint32_t load_cmd = CMD_LOAD | LDST_IMM | LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO;
+ unsigned int start_pc = program->current_pc;
+
+ if ((data == AFHA_SBOX) && (rta_sec_era == RTA_SEC_ERA_7)) {
+ pr_err("NFIFO: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write source field */
+ ret = __rta_map_opcode(src, nfifo_src, nfifo_src_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("NFIFO: Invalid SRC. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write type field */
+ ret = __rta_map_opcode(data, nfifo_data, ARRAY_SIZE(nfifo_data), &val);
+ if (ret < 0) {
+ pr_err("NFIFO: Invalid data. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write DL field */
+ if (!(flags & EXT)) {
+ opcode |= length & NFIFOENTRY_DLEN_MASK;
+ load_cmd |= 4;
+ } else {
+ load_cmd |= 8;
+ }
+
+ /* write flags */
+ __rta_map_flags(flags, nfifo_flags, nfifo_flags_sz[rta_sec_era],
+ &opcode);
+
+ /* in case of padding, check the destination */
+ if (src == PAD)
+ __rta_map_flags(flags, nfifo_pad_flags,
+ nfifo_pad_flags_sz[rta_sec_era], &opcode);
+
+ /* write LOAD command first */
+ __rta_out32(program, load_cmd);
+ __rta_out32(program, opcode);
+
+ if (flags & EXT)
+ __rta_out32(program, length & NFIFOENTRY_DLEN_MASK);
+
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_NFIFO_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
new file mode 100644
index 00000000..b85760e5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_OPERATION_CMD_H__
+#define __RTA_OPERATION_CMD_H__
+
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 70000)
+#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
+#endif
+
+extern enum rta_sec_era rta_sec_era;
+
+static inline int
+__rta_alg_aai_aes(uint16_t aai)
+{
+ uint16_t aes_mode = aai & OP_ALG_AESA_MODE_MASK;
+
+ if (aai & OP_ALG_AAI_C2K) {
+ if (rta_sec_era < RTA_SEC_ERA_5)
+ return -1;
+ if ((aes_mode != OP_ALG_AAI_CCM) &&
+ (aes_mode != OP_ALG_AAI_GCM))
+ return -EINVAL;
+ }
+
+ switch (aes_mode) {
+ case OP_ALG_AAI_CBC_CMAC:
+ case OP_ALG_AAI_CTR_CMAC_LTE:
+ case OP_ALG_AAI_CTR_CMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_CTR:
+ case OP_ALG_AAI_CBC:
+ case OP_ALG_AAI_ECB:
+ case OP_ALG_AAI_OFB:
+ case OP_ALG_AAI_CFB:
+ case OP_ALG_AAI_XTS:
+ case OP_ALG_AAI_CMAC:
+ case OP_ALG_AAI_XCBC_MAC:
+ case OP_ALG_AAI_CCM:
+ case OP_ALG_AAI_GCM:
+ case OP_ALG_AAI_CBC_XCBCMAC:
+ case OP_ALG_AAI_CTR_XCBCMAC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_des(uint16_t aai)
+{
+ uint16_t aai_code = (uint16_t)(aai & ~OP_ALG_AAI_CHECKODD);
+
+ switch (aai_code) {
+ case OP_ALG_AAI_CBC:
+ case OP_ALG_AAI_ECB:
+ case OP_ALG_AAI_CFB:
+ case OP_ALG_AAI_OFB:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_md5(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_HMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_SMAC:
+ case OP_ALG_AAI_HASH:
+ case OP_ALG_AAI_HMAC_PRECOMP:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_sha(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_HMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_HASH:
+ case OP_ALG_AAI_HMAC_PRECOMP:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_rng(uint16_t aai)
+{
+ uint16_t rng_mode = aai & OP_ALG_RNG_MODE_MASK;
+ uint16_t rng_sh = aai & OP_ALG_AAI_RNG4_SH_MASK;
+
+ switch (rng_mode) {
+ case OP_ALG_AAI_RNG:
+ case OP_ALG_AAI_RNG_NZB:
+ case OP_ALG_AAI_RNG_OBP:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* State Handle bits are valid only for SEC Era >= 5 */
+ if ((rta_sec_era < RTA_SEC_ERA_5) && rng_sh)
+ return -EINVAL;
+
+ /* PS, AI, SK bits are also valid only for SEC Era >= 5 */
+ if ((rta_sec_era < RTA_SEC_ERA_5) && (aai &
+ (OP_ALG_AAI_RNG4_PS | OP_ALG_AAI_RNG4_AI | OP_ALG_AAI_RNG4_SK)))
+ return -EINVAL;
+
+ switch (rng_sh) {
+ case OP_ALG_AAI_RNG4_SH_0:
+ case OP_ALG_AAI_RNG4_SH_1:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_crc(uint16_t aai)
+{
+ uint16_t aai_code = aai & OP_ALG_CRC_POLY_MASK;
+
+ switch (aai_code) {
+ case OP_ALG_AAI_802:
+ case OP_ALG_AAI_3385:
+ case OP_ALG_AAI_CUST_POLY:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_kasumi(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_GSM:
+ case OP_ALG_AAI_EDGE:
+ case OP_ALG_AAI_F8:
+ case OP_ALG_AAI_F9:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_snow_f9(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F9)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_snow_f8(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F8)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_zuce(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F8)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_zuca(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F9)
+ return 0;
+
+ return -EINVAL;
+}
+
+struct alg_aai_map {
+ uint32_t chipher_algo;
+ int (*aai_func)(uint16_t);
+ uint32_t class;
+};
+
+static const struct alg_aai_map alg_table[] = {
+/*1*/ { OP_ALG_ALGSEL_AES, __rta_alg_aai_aes, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_DES, __rta_alg_aai_des, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_3DES, __rta_alg_aai_des, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_MD5, __rta_alg_aai_md5, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA1, __rta_alg_aai_md5, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA224, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA256, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA384, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA512, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_RNG, __rta_alg_aai_rng, OP_TYPE_CLASS1_ALG },
+/*11*/ { OP_ALG_ALGSEL_CRC, __rta_alg_aai_crc, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_ARC4, NULL, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_SNOW_F8, __rta_alg_aai_snow_f8, OP_TYPE_CLASS1_ALG },
+/*14*/ { OP_ALG_ALGSEL_KASUMI, __rta_alg_aai_kasumi, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_SNOW_F9, __rta_alg_aai_snow_f9, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_ZUCE, __rta_alg_aai_zuce, OP_TYPE_CLASS1_ALG },
+/*17*/ { OP_ALG_ALGSEL_ZUCA, __rta_alg_aai_zuca, OP_TYPE_CLASS2_ALG }
+};
+
+/*
+ * Allowed OPERATION algorithms for each SEC Era.
+ * Values represent the number of entries from alg_table[] that are supported.
+ */
+static const unsigned int alg_table_sz[] = {14, 15, 15, 15, 17, 17, 11, 17};
+
+static inline int
+rta_operation(struct program *program, uint32_t cipher_algo,
+ uint16_t aai, uint8_t algo_state,
+ int icv_checking, int enc)
+{
+ uint32_t opcode = CMD_OPERATION;
+ unsigned int i, found = 0;
+ unsigned int start_pc = program->current_pc;
+ int ret;
+
+ for (i = 0; i < alg_table_sz[rta_sec_era]; i++) {
+ if (alg_table[i].chipher_algo == cipher_algo) {
+ opcode |= cipher_algo | alg_table[i].class;
+ /* nothing else to verify */
+ if (alg_table[i].aai_func == NULL) {
+ found = 1;
+ break;
+ }
+
+ aai &= OP_ALG_AAI_MASK;
+
+ ret = (*alg_table[i].aai_func)(aai);
+ if (ret < 0) {
+ pr_err("OPERATION: Bad AAI Type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= aai;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ pr_err("OPERATION: Invalid Command. SEC Program Line: %d\n",
+ program->current_pc);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (algo_state) {
+ case OP_ALG_AS_UPDATE:
+ case OP_ALG_AS_INIT:
+ case OP_ALG_AS_FINALIZE:
+ case OP_ALG_AS_INITFINAL:
+ opcode |= algo_state;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (icv_checking) {
+ case ICV_CHECK_DISABLE:
+ /*
+ * opcode |= OP_ALG_ICV_OFF;
+ * OP_ALG_ICV_OFF is 0
+ */
+ break;
+ case ICV_CHECK_ENABLE:
+ opcode |= OP_ALG_ICV_ON;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (enc) {
+ case DIR_DEC:
+ /*
+ * opcode |= OP_ALG_DECRYPT;
+ * OP_ALG_DECRYPT is 0
+ */
+ break;
+ case DIR_ENC:
+ opcode |= OP_ALG_ENCRYPT;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ return ret;
+}
+
+/*
+ * OPERATION PKHA routines
+ */
+static inline int
+__rta_pkha_clearmem(uint32_t pkha_op)
+{
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_CLEARMEM_ALL):
+ case (OP_ALG_PKMODE_CLEARMEM_ABE):
+ case (OP_ALG_PKMODE_CLEARMEM_ABN):
+ case (OP_ALG_PKMODE_CLEARMEM_AB):
+ case (OP_ALG_PKMODE_CLEARMEM_AEN):
+ case (OP_ALG_PKMODE_CLEARMEM_AE):
+ case (OP_ALG_PKMODE_CLEARMEM_AN):
+ case (OP_ALG_PKMODE_CLEARMEM_A):
+ case (OP_ALG_PKMODE_CLEARMEM_BEN):
+ case (OP_ALG_PKMODE_CLEARMEM_BE):
+ case (OP_ALG_PKMODE_CLEARMEM_BN):
+ case (OP_ALG_PKMODE_CLEARMEM_B):
+ case (OP_ALG_PKMODE_CLEARMEM_EN):
+ case (OP_ALG_PKMODE_CLEARMEM_N):
+ case (OP_ALG_PKMODE_CLEARMEM_E):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_pkha_mod_arithmetic(uint32_t pkha_op)
+{
+ pkha_op &= (uint32_t)~OP_ALG_PKMODE_OUT_A;
+
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_MOD_ADD):
+ case (OP_ALG_PKMODE_MOD_SUB_AB):
+ case (OP_ALG_PKMODE_MOD_SUB_BA):
+ case (OP_ALG_PKMODE_MOD_MULT):
+ case (OP_ALG_PKMODE_MOD_MULT_IM):
+ case (OP_ALG_PKMODE_MOD_MULT_IM_OM):
+ case (OP_ALG_PKMODE_MOD_EXPO):
+ case (OP_ALG_PKMODE_MOD_EXPO_TEQ):
+ case (OP_ALG_PKMODE_MOD_EXPO_IM):
+ case (OP_ALG_PKMODE_MOD_EXPO_IM_TEQ):
+ case (OP_ALG_PKMODE_MOD_REDUCT):
+ case (OP_ALG_PKMODE_MOD_INV):
+ case (OP_ALG_PKMODE_MOD_MONT_CNST):
+ case (OP_ALG_PKMODE_MOD_CRT_CNST):
+ case (OP_ALG_PKMODE_MOD_GCD):
+ case (OP_ALG_PKMODE_MOD_PRIMALITY):
+ case (OP_ALG_PKMODE_MOD_SML_EXP):
+ case (OP_ALG_PKMODE_F2M_ADD):
+ case (OP_ALG_PKMODE_F2M_MUL):
+ case (OP_ALG_PKMODE_F2M_MUL_IM):
+ case (OP_ALG_PKMODE_F2M_MUL_IM_OM):
+ case (OP_ALG_PKMODE_F2M_EXP):
+ case (OP_ALG_PKMODE_F2M_EXP_TEQ):
+ case (OP_ALG_PKMODE_F2M_AMODN):
+ case (OP_ALG_PKMODE_F2M_INV):
+ case (OP_ALG_PKMODE_F2M_R2):
+ case (OP_ALG_PKMODE_F2M_GCD):
+ case (OP_ALG_PKMODE_F2M_SML_EXP):
+ case (OP_ALG_PKMODE_ECC_F2M_ADD):
+ case (OP_ALG_PKMODE_ECC_F2M_ADD_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_DBL):
+ case (OP_ALG_PKMODE_ECC_F2M_DBL_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_TEQ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_TEQ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ_TEQ):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_pkha_copymem(uint32_t pkha_op)
+{
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A_E):
+ case (OP_ALG_PKMODE_COPY_NSZ_A_N):
+ case (OP_ALG_PKMODE_COPY_NSZ_B_E):
+ case (OP_ALG_PKMODE_COPY_NSZ_B_N):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_A):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_B):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_A_N):
+ case (OP_ALG_PKMODE_COPY_SSZ_B_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_B_N):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_A):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_B):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_E):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+rta_pkha_operation(struct program *program, uint32_t op_pkha)
+{
+ uint32_t opcode = CMD_OPERATION | OP_TYPE_PK | OP_ALG_PK;
+ uint32_t pkha_func;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ pkha_func = op_pkha & OP_ALG_PK_FUN_MASK;
+
+ switch (pkha_func) {
+ case (OP_ALG_PKMODE_CLEARMEM):
+ ret = __rta_pkha_clearmem(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ case (OP_ALG_PKMODE_MOD_ADD):
+ case (OP_ALG_PKMODE_MOD_SUB_AB):
+ case (OP_ALG_PKMODE_MOD_SUB_BA):
+ case (OP_ALG_PKMODE_MOD_MULT):
+ case (OP_ALG_PKMODE_MOD_EXPO):
+ case (OP_ALG_PKMODE_MOD_REDUCT):
+ case (OP_ALG_PKMODE_MOD_INV):
+ case (OP_ALG_PKMODE_MOD_MONT_CNST):
+ case (OP_ALG_PKMODE_MOD_CRT_CNST):
+ case (OP_ALG_PKMODE_MOD_GCD):
+ case (OP_ALG_PKMODE_MOD_PRIMALITY):
+ case (OP_ALG_PKMODE_MOD_SML_EXP):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL):
+ ret = __rta_pkha_mod_arithmetic(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ case (OP_ALG_PKMODE_COPY_NSZ):
+ case (OP_ALG_PKMODE_COPY_SSZ):
+ ret = __rta_pkha_copymem(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ goto err;
+ }
+
+ opcode |= op_pkha;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_OPERATION_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
new file mode 100644
index 00000000..d9a5b0e5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
@@ -0,0 +1,699 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_PROTOCOL_CMD_H__
+#define __RTA_PROTOCOL_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static inline int
+__rta_ssl_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_SSL30_RC4_40_MD5_2:
+ case OP_PCL_SSL30_RC4_128_MD5_2:
+ case OP_PCL_SSL30_RC4_128_SHA_5:
+ case OP_PCL_SSL30_RC4_40_MD5_3:
+ case OP_PCL_SSL30_RC4_128_MD5_3:
+ case OP_PCL_SSL30_RC4_128_SHA:
+ case OP_PCL_SSL30_RC4_128_MD5:
+ case OP_PCL_SSL30_RC4_40_SHA:
+ case OP_PCL_SSL30_RC4_40_MD5:
+ case OP_PCL_SSL30_RC4_128_SHA_2:
+ case OP_PCL_SSL30_RC4_128_SHA_3:
+ case OP_PCL_SSL30_RC4_128_SHA_4:
+ case OP_PCL_SSL30_RC4_128_SHA_6:
+ case OP_PCL_SSL30_RC4_128_SHA_7:
+ case OP_PCL_SSL30_RC4_128_SHA_8:
+ case OP_PCL_SSL30_RC4_128_SHA_9:
+ case OP_PCL_SSL30_RC4_128_SHA_10:
+ case OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA:
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+ /* fall through if not Era 7 */
+ case OP_PCL_SSL30_DES40_CBC_SHA:
+ case OP_PCL_SSL30_DES_CBC_SHA_2:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_5:
+ case OP_PCL_SSL30_DES40_CBC_SHA_2:
+ case OP_PCL_SSL30_DES_CBC_SHA_3:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_6:
+ case OP_PCL_SSL30_DES40_CBC_SHA_3:
+ case OP_PCL_SSL30_DES_CBC_SHA_4:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_7:
+ case OP_PCL_SSL30_DES40_CBC_SHA_4:
+ case OP_PCL_SSL30_DES_CBC_SHA_5:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_8:
+ case OP_PCL_SSL30_DES40_CBC_SHA_5:
+ case OP_PCL_SSL30_DES_CBC_SHA_6:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_9:
+ case OP_PCL_SSL30_DES40_CBC_SHA_6:
+ case OP_PCL_SSL30_DES_CBC_SHA_7:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_10:
+ case OP_PCL_SSL30_DES_CBC_SHA:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA:
+ case OP_PCL_SSL30_DES_CBC_MD5:
+ case OP_PCL_SSL30_3DES_EDE_CBC_MD5:
+ case OP_PCL_SSL30_DES40_CBC_SHA_7:
+ case OP_PCL_SSL30_DES40_CBC_MD5:
+ case OP_PCL_SSL30_AES_128_CBC_SHA:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_2:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_3:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_4:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_5:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_6:
+ case OP_PCL_SSL30_AES_256_CBC_SHA:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_2:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_3:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_4:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_5:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_6:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_2:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_3:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_4:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_5:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_2:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_3:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_4:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_5:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_6:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_6:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_2:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_7:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_7:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_3:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_8:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_8:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_4:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_9:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_9:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_1:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_1:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_2:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_2:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_3:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_3:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_4:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_4:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_5:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_5:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_6:
+ case OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_PSK_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_11:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_10:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_10:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_12:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_11:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_11:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_12:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_13:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_12:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_14:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_13:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_13:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_15:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_14:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_14:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_16:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_17:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_18:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_15:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_16:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_17:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_15:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_16:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_17:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_TLS12_3DES_EDE_CBC_MD5:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA160:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA224:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA256:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA384:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA512:
+ case OP_PCL_TLS12_AES_128_CBC_SHA160:
+ case OP_PCL_TLS12_AES_128_CBC_SHA224:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256:
+ case OP_PCL_TLS12_AES_128_CBC_SHA384:
+ case OP_PCL_TLS12_AES_128_CBC_SHA512:
+ case OP_PCL_TLS12_AES_192_CBC_SHA160:
+ case OP_PCL_TLS12_AES_192_CBC_SHA224:
+ case OP_PCL_TLS12_AES_192_CBC_SHA256:
+ case OP_PCL_TLS12_AES_192_CBC_SHA512:
+ case OP_PCL_TLS12_AES_256_CBC_SHA160:
+ case OP_PCL_TLS12_AES_256_CBC_SHA224:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256:
+ case OP_PCL_TLS12_AES_256_CBC_SHA384:
+ case OP_PCL_TLS12_AES_256_CBC_SHA512:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA160:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA384:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA224:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA512:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA256:
+ case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE:
+ case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_ike_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_IKE_HMAC_MD5:
+ case OP_PCL_IKE_HMAC_SHA1:
+ case OP_PCL_IKE_HMAC_AES128_CBC:
+ case OP_PCL_IKE_HMAC_SHA256:
+ case OP_PCL_IKE_HMAC_SHA384:
+ case OP_PCL_IKE_HMAC_SHA512:
+ case OP_PCL_IKE_HMAC_AES128_CMAC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_ipsec_proto(uint16_t protoinfo)
+{
+ uint16_t proto_cls1 = protoinfo & OP_PCL_IPSEC_CIPHER_MASK;
+ uint16_t proto_cls2 = protoinfo & OP_PCL_IPSEC_AUTH_MASK;
+
+ switch (proto_cls1) {
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ /* CCM, GCM, GMAC require PROTINFO[7:0] = 0 */
+ if (proto_cls2 == OP_PCL_IPSEC_HMAC_NULL)
+ return 0;
+ return -EINVAL;
+ case OP_PCL_IPSEC_NULL:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_AES_CTR:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (proto_cls2) {
+ case OP_PCL_IPSEC_HMAC_NULL:
+ case OP_PCL_IPSEC_HMAC_MD5_96:
+ case OP_PCL_IPSEC_HMAC_SHA1_96:
+ case OP_PCL_IPSEC_AES_XCBC_MAC_96:
+ case OP_PCL_IPSEC_HMAC_MD5_128:
+ case OP_PCL_IPSEC_HMAC_SHA1_160:
+ case OP_PCL_IPSEC_AES_CMAC_96:
+ case OP_PCL_IPSEC_HMAC_SHA2_256_128:
+ case OP_PCL_IPSEC_HMAC_SHA2_384_192:
+ case OP_PCL_IPSEC_HMAC_SHA2_512_256:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_srtp_proto(uint16_t protoinfo)
+{
+ uint16_t proto_cls1 = protoinfo & OP_PCL_SRTP_CIPHER_MASK;
+ uint16_t proto_cls2 = protoinfo & OP_PCL_SRTP_AUTH_MASK;
+
+ switch (proto_cls1) {
+ case OP_PCL_SRTP_AES_CTR:
+ switch (proto_cls2) {
+ case OP_PCL_SRTP_HMAC_SHA1_160:
+ return 0;
+ }
+ /* no break */
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_macsec_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_MACSEC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_wifi_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_WIFI:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_wimax_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_WIMAX_OFDM:
+ case OP_PCL_WIMAX_OFDMA:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Allowed blob proto flags for each SEC Era */
+static const uint32_t proto_blob_flags[] = {
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM
+};
+
+static inline int
+__rta_blob_proto(uint16_t protoinfo)
+{
+ if (protoinfo & ~proto_blob_flags[rta_sec_era])
+ return -EINVAL;
+
+ switch (protoinfo & OP_PCL_BLOB_FORMAT_MASK) {
+ case OP_PCL_BLOB_FORMAT_NORMAL:
+ case OP_PCL_BLOB_FORMAT_MASTER_VER:
+ case OP_PCL_BLOB_FORMAT_TEST:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_BLOB_REG_MASK) {
+ case OP_PCL_BLOB_AFHA_SBOX:
+ if (rta_sec_era < RTA_SEC_ERA_3)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_BLOB_REG_MEMORY:
+ case OP_PCL_BLOB_REG_KEY1:
+ case OP_PCL_BLOB_REG_KEY2:
+ case OP_PCL_BLOB_REG_SPLIT:
+ case OP_PCL_BLOB_REG_PKE:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_dlc_proto(uint16_t protoinfo)
+{
+ if ((rta_sec_era < RTA_SEC_ERA_2) &&
+ (protoinfo & (OP_PCL_PKPROT_DSA_MSG | OP_PCL_PKPROT_HASH_MASK |
+ OP_PCL_PKPROT_EKT_Z | OP_PCL_PKPROT_DECRYPT_Z |
+ OP_PCL_PKPROT_DECRYPT_PRI)))
+ return -EINVAL;
+
+ switch (protoinfo & OP_PCL_PKPROT_HASH_MASK) {
+ case OP_PCL_PKPROT_HASH_MD5:
+ case OP_PCL_PKPROT_HASH_SHA1:
+ case OP_PCL_PKPROT_HASH_SHA224:
+ case OP_PCL_PKPROT_HASH_SHA256:
+ case OP_PCL_PKPROT_HASH_SHA384:
+ case OP_PCL_PKPROT_HASH_SHA512:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+__rta_rsa_enc_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_RSAPROT_OP_MASK) {
+ case OP_PCL_RSAPROT_OP_ENC_F_IN:
+ if ((protoinfo & OP_PCL_RSAPROT_FFF_MASK) !=
+ OP_PCL_RSAPROT_FFF_RED)
+ return -EINVAL;
+ break;
+ case OP_PCL_RSAPROT_OP_ENC_F_OUT:
+ switch (protoinfo & OP_PCL_RSAPROT_FFF_MASK) {
+ case OP_PCL_RSAPROT_FFF_RED:
+ case OP_PCL_RSAPROT_FFF_ENC:
+ case OP_PCL_RSAPROT_FFF_EKT:
+ case OP_PCL_RSAPROT_FFF_TK_ENC:
+ case OP_PCL_RSAPROT_FFF_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+__rta_rsa_dec_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_RSAPROT_OP_MASK) {
+ case OP_PCL_RSAPROT_OP_DEC_ND:
+ case OP_PCL_RSAPROT_OP_DEC_PQD:
+ case OP_PCL_RSAPROT_OP_DEC_PQDPDQC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_RSAPROT_PPP_MASK) {
+ case OP_PCL_RSAPROT_PPP_RED:
+ case OP_PCL_RSAPROT_PPP_ENC:
+ case OP_PCL_RSAPROT_PPP_EKT:
+ case OP_PCL_RSAPROT_PPP_TK_ENC:
+ case OP_PCL_RSAPROT_PPP_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (protoinfo & OP_PCL_RSAPROT_FMT_PKCSV15)
+ switch (protoinfo & OP_PCL_RSAPROT_FFF_MASK) {
+ case OP_PCL_RSAPROT_FFF_RED:
+ case OP_PCL_RSAPROT_FFF_ENC:
+ case OP_PCL_RSAPROT_FFF_EKT:
+ case OP_PCL_RSAPROT_FFF_TK_ENC:
+ case OP_PCL_RSAPROT_FFF_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * DKP Protocol - Restrictions on key (SRC,DST) combinations
+ * For e.g. key_in_out[0][0] = 1 means (SRC=IMM,DST=IMM) combination is allowed
+ */
+static const uint8_t key_in_out[4][4] = { {1, 0, 0, 0},
+ {1, 1, 1, 1},
+ {1, 0, 1, 0},
+ {1, 0, 0, 1} };
+
+static inline int
+__rta_dkp_proto(uint16_t protoinfo)
+{
+ int key_src = (protoinfo & OP_PCL_DKP_SRC_MASK) >> OP_PCL_DKP_SRC_SHIFT;
+ int key_dst = (protoinfo & OP_PCL_DKP_DST_MASK) >> OP_PCL_DKP_DST_SHIFT;
+
+ if (!key_in_out[key_src][key_dst]) {
+ pr_err("PROTO_DESC: Invalid DKP key (SRC,DST)\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static inline int
+__rta_3g_dcrc_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_3G_DCRC_CRC7:
+ case OP_PCL_3G_DCRC_CRC11:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_3g_rlc_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_3G_RLC_NULL:
+ case OP_PCL_3G_RLC_KASUMI:
+ case OP_PCL_3G_RLC_SNOW:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_lte_pdcp_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_LTE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5)
+ break;
+ case OP_PCL_LTE_NULL:
+ case OP_PCL_LTE_SNOW:
+ case OP_PCL_LTE_AES:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_lte_pdcp_mixed_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_LTE_MIXED_AUTH_MASK) {
+ case OP_PCL_LTE_MIXED_AUTH_NULL:
+ case OP_PCL_LTE_MIXED_AUTH_SNOW:
+ case OP_PCL_LTE_MIXED_AUTH_AES:
+ case OP_PCL_LTE_MIXED_AUTH_ZUC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_LTE_MIXED_ENC_MASK) {
+ case OP_PCL_LTE_MIXED_ENC_NULL:
+ case OP_PCL_LTE_MIXED_ENC_SNOW:
+ case OP_PCL_LTE_MIXED_ENC_AES:
+ case OP_PCL_LTE_MIXED_ENC_ZUC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+struct proto_map {
+ uint32_t optype;
+ uint32_t protid;
+ int (*protoinfo_func)(uint16_t);
+};
+
+static const struct proto_map proto_table[] = {
+/*1*/ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_SSL30_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS10_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS11_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS12_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DTLS10_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV1_PRF, __rta_ike_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV2_PRF, __rta_ike_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DSASIGN, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DSAVERIFY, __rta_dlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC, __rta_ipsec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_SRTP, __rta_srtp_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_SSL30, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS10, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS11, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS12, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DTLS10, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_MACSEC, __rta_macsec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIFI, __rta_wifi_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIMAX, __rta_wimax_proto},
+/*21*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_BLOB, __rta_blob_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DIFFIEHELLMAN, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_RSAENCRYPT, __rta_rsa_enc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_RSADECRYPT, __rta_rsa_dec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_DCRC, __rta_3g_dcrc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_RLC_PDU, __rta_3g_rlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_RLC_SDU, __rta_3g_rlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_USER, __rta_lte_pdcp_proto},
+/*29*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_CTRL, __rta_lte_pdcp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_MD5, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA1, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA224, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA256, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA384, __rta_dkp_proto},
+/*35*/ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA512, __rta_dkp_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
+/*37*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DSASIGN, __rta_dlc_proto},
+/*38*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ __rta_lte_pdcp_mixed_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC_NEW, __rta_ipsec_proto},
+};
+
+/*
+ * Allowed OPERATION protocols for each SEC Era.
+ * Values represent the number of entries from proto_table[] that are supported.
+ */
+static const unsigned int proto_table_sz[] = {21, 29, 29, 29, 29, 35, 37, 39};
+
+static inline int
+rta_proto_operation(struct program *program, uint32_t optype,
+ uint32_t protid, uint16_t protoinfo)
+{
+ uint32_t opcode = CMD_OPERATION;
+ unsigned int i, found = 0;
+ uint32_t optype_tmp = optype;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ for (i = 0; i < proto_table_sz[rta_sec_era]; i++) {
+ /* clear last bit in optype to match also decap proto */
+ optype_tmp &= (uint32_t)~(1 << OP_TYPE_SHIFT);
+ if (optype_tmp == proto_table[i].optype) {
+ if (proto_table[i].protid == protid) {
+ /* nothing else to verify */
+ if (proto_table[i].protoinfo_func == NULL) {
+ found = 1;
+ break;
+ }
+ /* check protoinfo */
+ ret = (*proto_table[i].protoinfo_func)
+ (protoinfo);
+ if (ret < 0) {
+ pr_err("PROTO_DESC: Bad PROTO Type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ found = 1;
+ break;
+ }
+ }
+ }
+ if (!found) {
+ pr_err("PROTO_DESC: Operation Type Mismatch. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ __rta_out32(program, opcode | optype | protid | protoinfo);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_dkp_proto(struct program *program, uint32_t protid,
+ uint16_t key_src, uint16_t key_dst,
+ uint16_t keylen, uint64_t key,
+ enum rta_data_type key_type)
+{
+ unsigned int start_pc = program->current_pc;
+ unsigned int in_words = 0, out_words = 0;
+ int ret;
+
+ key_src &= OP_PCL_DKP_SRC_MASK;
+ key_dst &= OP_PCL_DKP_DST_MASK;
+ keylen &= OP_PCL_DKP_KEY_MASK;
+
+ ret = rta_proto_operation(program, OP_TYPE_UNI_PROTOCOL, protid,
+ key_src | key_dst | keylen);
+ if (ret < 0)
+ return ret;
+
+ if ((key_src == OP_PCL_DKP_SRC_PTR) ||
+ (key_src == OP_PCL_DKP_SRC_SGF)) {
+ __rta_out64(program, program->ps, key);
+ in_words = program->ps ? 2 : 1;
+ } else if (key_src == OP_PCL_DKP_SRC_IMM) {
+ __rta_inline_data(program, key, inline_flags(key_type), keylen);
+ in_words = (unsigned int)((keylen + 3) / 4);
+ }
+
+ if ((key_dst == OP_PCL_DKP_DST_PTR) ||
+ (key_dst == OP_PCL_DKP_DST_SGF)) {
+ out_words = in_words;
+ } else if (key_dst == OP_PCL_DKP_DST_IMM) {
+ out_words = split_key_len(protid) / 4;
+ }
+
+ if (out_words < in_words) {
+ pr_err("PROTO_DESC: DKP doesn't currently support a smaller descriptor\n");
+ program->first_error_pc = start_pc;
+ return -EINVAL;
+ }
+
+ /* If needed, reserve space in resulting descriptor for derived key */
+ program->current_pc += (out_words - in_words);
+
+ return (int)start_pc;
+}
+
+#endif /* __RTA_PROTOCOL_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
new file mode 100644
index 00000000..6e666108
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
@@ -0,0 +1,790 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_SEC_RUN_TIME_ASM_H__
+#define __RTA_SEC_RUN_TIME_ASM_H__
+
+#include "hw/desc.h"
+
+/* hw/compat.h is not delivered in kernel */
+#ifndef __KERNEL__
+#include "hw/compat.h"
+#endif
+
+/**
+ * enum rta_sec_era - SEC HW block revisions supported by the RTA library
+ * @RTA_SEC_ERA_1: SEC Era 1
+ * @RTA_SEC_ERA_2: SEC Era 2
+ * @RTA_SEC_ERA_3: SEC Era 3
+ * @RTA_SEC_ERA_4: SEC Era 4
+ * @RTA_SEC_ERA_5: SEC Era 5
+ * @RTA_SEC_ERA_6: SEC Era 6
+ * @RTA_SEC_ERA_7: SEC Era 7
+ * @RTA_SEC_ERA_8: SEC Era 8
+ * @MAX_SEC_ERA: maximum SEC HW block revision supported by RTA library
+ */
+enum rta_sec_era {
+ RTA_SEC_ERA_1,
+ RTA_SEC_ERA_2,
+ RTA_SEC_ERA_3,
+ RTA_SEC_ERA_4,
+ RTA_SEC_ERA_5,
+ RTA_SEC_ERA_6,
+ RTA_SEC_ERA_7,
+ RTA_SEC_ERA_8,
+ MAX_SEC_ERA = RTA_SEC_ERA_8
+};
+
+/**
+ * DEFAULT_SEC_ERA - the default value for the SEC era in case the user provides
+ * an unsupported value.
+ */
+#define DEFAULT_SEC_ERA MAX_SEC_ERA
+
+/**
+ * USER_SEC_ERA - translates the SEC Era from internal to user representation.
+ * @sec_era: SEC Era in internal (library) representation
+ */
+#define USER_SEC_ERA(sec_era) (sec_era + 1)
+
+/**
+ * INTL_SEC_ERA - translates the SEC Era from user representation to internal.
+ * @sec_era: SEC Era in user representation
+ */
+#define INTL_SEC_ERA(sec_era) (sec_era - 1)
+
+/**
+ * enum rta_jump_type - Types of action taken by JUMP command
+ * @LOCAL_JUMP: conditional jump to an offset within the descriptor buffer
+ * @FAR_JUMP: conditional jump to a location outside the descriptor buffer,
+ * indicated by the POINTER field after the JUMP command.
+ * @HALT: conditional halt - stop the execution of the current descriptor and
+ * writes PKHA / Math condition bits as status / error code.
+ * @HALT_STATUS: conditional halt with user-specified status - stop the
+ * execution of the current descriptor and writes the value of
+ * "LOCAL OFFSET" JUMP field as status / error code.
+ * @GOSUB: conditional subroutine call - similar to @LOCAL_JUMP, but also saves
+ * return address in the Return Address register; subroutine calls
+ * cannot be nested.
+ * @RETURN: conditional subroutine return - similar to @LOCAL_JUMP, but the
+ * offset is taken from the Return Address register.
+ * @LOCAL_JUMP_INC: similar to @LOCAL_JUMP, but increment the register specified
+ * in "SRC_DST" JUMP field before evaluating the jump
+ * condition.
+ * @LOCAL_JUMP_DEC: similar to @LOCAL_JUMP, but decrement the register specified
+ * in "SRC_DST" JUMP field before evaluating the jump
+ * condition.
+ */
+enum rta_jump_type {
+ LOCAL_JUMP,
+ FAR_JUMP,
+ HALT,
+ HALT_STATUS,
+ GOSUB,
+ RETURN,
+ LOCAL_JUMP_INC,
+ LOCAL_JUMP_DEC
+};
+
+/**
+ * enum rta_jump_cond - How test conditions are evaluated by JUMP command
+ * @ALL_TRUE: perform action if ALL selected conditions are true
+ * @ALL_FALSE: perform action if ALL selected conditions are false
+ * @ANY_TRUE: perform action if ANY of the selected conditions is true
+ * @ANY_FALSE: perform action if ANY of the selected conditions is false
+ */
+enum rta_jump_cond {
+ ALL_TRUE,
+ ALL_FALSE,
+ ANY_TRUE,
+ ANY_FALSE
+};
+
+/**
+ * enum rta_share_type - Types of sharing for JOB_HDR and SHR_HDR commands
+ * @SHR_NEVER: nothing is shared; descriptors can execute in parallel (i.e. no
+ * dependencies are allowed between them).
+ * @SHR_WAIT: shared descriptor and keys are shared once the descriptor sets
+ * "OK to share" in DECO Control Register (DCTRL).
+ * @SHR_SERIAL: shared descriptor and keys are shared once the descriptor has
+ * completed.
+ * @SHR_ALWAYS: shared descriptor is shared anytime after the descriptor is
+ * loaded.
+ * @SHR_DEFER: valid only for JOB_HDR; sharing type is the one specified
+ * in the shared descriptor associated with the job descriptor.
+ */
+enum rta_share_type {
+ SHR_NEVER,
+ SHR_WAIT,
+ SHR_SERIAL,
+ SHR_ALWAYS,
+ SHR_DEFER
+};
+
+/**
+ * enum rta_data_type - Indicates how is the data provided and how to include it
+ * in the descriptor.
+ * @RTA_DATA_PTR: Data is in memory and accessed by reference; data address is a
+ * physical (bus) address.
+ * @RTA_DATA_IMM: Data is inlined in descriptor and accessed as immediate data;
+ * data address is a virtual address.
+ * @RTA_DATA_IMM_DMA: (AIOP only) Data is inlined in descriptor and accessed as
+ * immediate data; data address is a physical (bus) address
+ * in external memory and CDMA is programmed to transfer the
+ * data into descriptor buffer being built in Workspace Area.
+ */
+enum rta_data_type {
+ RTA_DATA_PTR = 1,
+ RTA_DATA_IMM,
+ RTA_DATA_IMM_DMA
+};
+
+/* Registers definitions */
+enum rta_regs {
+ /* CCB Registers */
+ CONTEXT1 = 1,
+ CONTEXT2,
+ KEY1,
+ KEY2,
+ KEY1SZ,
+ KEY2SZ,
+ ICV1SZ,
+ ICV2SZ,
+ DATA1SZ,
+ DATA2SZ,
+ ALTDS1,
+ IV1SZ,
+ AAD1SZ,
+ MODE1,
+ MODE2,
+ CCTRL,
+ DCTRL,
+ ICTRL,
+ CLRW,
+ CSTAT,
+ IFIFO,
+ NFIFO,
+ OFIFO,
+ PKASZ,
+ PKBSZ,
+ PKNSZ,
+ PKESZ,
+ /* DECO Registers */
+ MATH0,
+ MATH1,
+ MATH2,
+ MATH3,
+ DESCBUF,
+ JOBDESCBUF,
+ SHAREDESCBUF,
+ DPOVRD,
+ DJQDA,
+ DSTAT,
+ DPID,
+ DJQCTRL,
+ ALTSOURCE,
+ SEQINSZ,
+ SEQOUTSZ,
+ VSEQINSZ,
+ VSEQOUTSZ,
+ /* PKHA Registers */
+ PKA,
+ PKN,
+ PKA0,
+ PKA1,
+ PKA2,
+ PKA3,
+ PKB,
+ PKB0,
+ PKB1,
+ PKB2,
+ PKB3,
+ PKE,
+ /* Pseudo registers */
+ AB1,
+ AB2,
+ ABD,
+ IFIFOABD,
+ IFIFOAB1,
+ IFIFOAB2,
+ AFHA_SBOX,
+ MDHA_SPLIT_KEY,
+ JOBSRC,
+ ZERO,
+ ONE,
+ AAD1,
+ IV1,
+ IV2,
+ MSG1,
+ MSG2,
+ MSG,
+ MSG_CKSUM,
+ MSGOUTSNOOP,
+ MSGINSNOOP,
+ ICV1,
+ ICV2,
+ SKIP,
+ NONE,
+ RNGOFIFO,
+ RNG,
+ IDFNS,
+ ODFNS,
+ NFIFOSZ,
+ SZ,
+ PAD,
+ SAD1,
+ AAD2,
+ BIT_DATA,
+ NFIFO_SZL,
+ NFIFO_SZM,
+ NFIFO_L,
+ NFIFO_M,
+ SZL,
+ SZM,
+ JOBDESCBUF_EFF,
+ SHAREDESCBUF_EFF,
+ METADATA,
+ GTR,
+ STR,
+ OFIFO_SYNC,
+ MSGOUTSNOOP_ALT
+};
+
+/* Command flags */
+#define FLUSH1 BIT(0)
+#define LAST1 BIT(1)
+#define LAST2 BIT(2)
+#define IMMED BIT(3)
+#define SGF BIT(4)
+#define VLF BIT(5)
+#define EXT BIT(6)
+#define CONT BIT(7)
+#define SEQ BIT(8)
+#define AIDF BIT(9)
+#define FLUSH2 BIT(10)
+#define CLASS1 BIT(11)
+#define CLASS2 BIT(12)
+#define BOTH BIT(13)
+
+/**
+ * DCOPY - (AIOP only) command param is pointer to external memory
+ *
+ * CDMA must be used to transfer the key via DMA into Workspace Area.
+ * Valid only in combination with IMMED flag.
+ */
+#define DCOPY BIT(30)
+
+#define COPY BIT(31) /* command param is pointer (not immediate)
+ * valid only in combination when IMMED
+ */
+
+#define __COPY_MASK (COPY | DCOPY)
+
+/* SEQ IN/OUT PTR Command specific flags */
+#define RBS BIT(16)
+#define INL BIT(17)
+#define PRE BIT(18)
+#define RTO BIT(19)
+#define RJD BIT(20)
+#define SOP BIT(21)
+#define RST BIT(22)
+#define EWS BIT(23)
+
+#define ENC BIT(14) /* Encrypted Key */
+#define EKT BIT(15) /* AES CCM Encryption (default is
+ * AES ECB Encryption)
+ */
+#define TK BIT(16) /* Trusted Descriptor Key (default is
+ * Job Descriptor Key)
+ */
+#define NWB BIT(17) /* No Write Back Key */
+#define PTS BIT(18) /* Plaintext Store */
+
+/* HEADER Command specific flags */
+#define RIF BIT(16)
+#define DNR BIT(17)
+#define CIF BIT(18)
+#define PD BIT(19)
+#define RSMS BIT(20)
+#define TD BIT(21)
+#define MTD BIT(22)
+#define REO BIT(23)
+#define SHR BIT(24)
+#define SC BIT(25)
+/* Extended HEADER specific flags */
+#define DSV BIT(7)
+#define DSEL_MASK 0x00000007 /* DECO Select */
+#define FTD BIT(8)
+
+/* JUMP Command specific flags */
+#define NIFP BIT(20)
+#define NIP BIT(21)
+#define NOP BIT(22)
+#define NCP BIT(23)
+#define CALM BIT(24)
+
+#define MATH_Z BIT(25)
+#define MATH_N BIT(26)
+#define MATH_NV BIT(27)
+#define MATH_C BIT(28)
+#define PK_0 BIT(29)
+#define PK_GCD_1 BIT(30)
+#define PK_PRIME BIT(31)
+#define SELF BIT(0)
+#define SHRD BIT(1)
+#define JQP BIT(2)
+
+/* NFIFOADD specific flags */
+#define PAD_ZERO BIT(16)
+#define PAD_NONZERO BIT(17)
+#define PAD_INCREMENT BIT(18)
+#define PAD_RANDOM BIT(19)
+#define PAD_ZERO_N1 BIT(20)
+#define PAD_NONZERO_0 BIT(21)
+#define PAD_N1 BIT(23)
+#define PAD_NONZERO_N BIT(24)
+#define OC BIT(25)
+#define BM BIT(26)
+#define PR BIT(27)
+#define PS BIT(28)
+#define BP BIT(29)
+
+/* MOVE Command specific flags */
+#define WAITCOMP BIT(16)
+#define SIZE_WORD BIT(17)
+#define SIZE_BYTE BIT(18)
+#define SIZE_DWORD BIT(19)
+
+/* MATH command specific flags */
+#define IFB MATH_IFB
+#define NFU MATH_NFU
+#define STL MATH_STL
+#define SSEL MATH_SSEL
+#define SWP MATH_SWP
+#define IMMED2 BIT(31)
+
+/**
+ * struct program - descriptor buffer management structure
+ * @current_pc: current offset in descriptor
+ * @current_instruction: current instruction in descriptor
+ * @first_error_pc: offset of the first error in descriptor
+ * @start_pc: start offset in descriptor buffer
+ * @buffer: buffer carrying descriptor
+ * @shrhdr: shared descriptor header
+ * @jobhdr: job descriptor header
+ * @ps: pointer fields size; if ps is true, pointers will be 36bits in
+ * length; if ps is false, pointers will be 32bits in length
+ * @bswap: if true, perform byte swap on a 4-byte boundary
+ */
+struct program {
+ unsigned int current_pc;
+ unsigned int current_instruction;
+ unsigned int first_error_pc;
+ unsigned int start_pc;
+ uint32_t *buffer;
+ uint32_t *shrhdr;
+ uint32_t *jobhdr;
+ bool ps;
+ bool bswap;
+};
+
+static inline void
+rta_program_cntxt_init(struct program *program,
+ uint32_t *buffer, unsigned int offset)
+{
+ program->current_pc = 0;
+ program->current_instruction = 0;
+ program->first_error_pc = 0;
+ program->start_pc = offset;
+ program->buffer = buffer;
+ program->shrhdr = NULL;
+ program->jobhdr = NULL;
+ program->ps = false;
+ program->bswap = false;
+}
+
+static inline int
+rta_program_finalize(struct program *program)
+{
+ /* Descriptor is usually not allowed to go beyond 64 words size */
+ if (program->current_pc > MAX_CAAM_DESCSIZE)
+ pr_warn("Descriptor Size exceeded max limit of 64 words\n");
+
+ /* Descriptor is erroneous */
+ if (program->first_error_pc) {
+ pr_err("Descriptor creation error\n");
+ return -EINVAL;
+ }
+
+ /* Update descriptor length in shared and job descriptor headers */
+ if (program->shrhdr != NULL)
+ *program->shrhdr |= program->bswap ?
+ swab32(program->current_pc) :
+ program->current_pc;
+ else if (program->jobhdr != NULL)
+ *program->jobhdr |= program->bswap ?
+ swab32(program->current_pc) :
+ program->current_pc;
+
+ return (int)program->current_pc;
+}
+
+static inline unsigned int
+rta_program_set_36bit_addr(struct program *program)
+{
+ program->ps = true;
+ return program->current_pc;
+}
+
+static inline unsigned int
+rta_program_set_bswap(struct program *program)
+{
+ program->bswap = true;
+ return program->current_pc;
+}
+
+static inline void
+__rta_out32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = program->bswap ?
+ swab32(val) : val;
+ program->current_pc++;
+}
+
+static inline void
+__rta_out_be32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = cpu_to_be32(val);
+ program->current_pc++;
+}
+
+static inline void
+__rta_out_le32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = cpu_to_le32(val);
+ program->current_pc++;
+}
+
+static inline void
+__rta_out64(struct program *program, bool is_ext, uint64_t val)
+{
+ if (is_ext) {
+ /*
+ * Since we are guaranteed only a 4-byte alignment in the
+ * descriptor buffer, we have to do 2 x 32-bit (word) writes.
+ * For the order of the 2 words to be correct, we need to
+ * take into account the endianness of the CPU.
+ */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ __rta_out32(program, program->bswap ? lower_32_bits(val) :
+ upper_32_bits(val));
+
+ __rta_out32(program, program->bswap ? upper_32_bits(val) :
+ lower_32_bits(val));
+#else
+ __rta_out32(program, program->bswap ? upper_32_bits(val) :
+ lower_32_bits(val));
+
+ __rta_out32(program, program->bswap ? lower_32_bits(val) :
+ upper_32_bits(val));
+#endif
+ } else {
+ __rta_out32(program, lower_32_bits(val));
+ }
+}
+
+static inline unsigned int
+rta_word(struct program *program, uint32_t val)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out32(program, val);
+
+ return start_pc;
+}
+
+static inline unsigned int
+rta_dword(struct program *program, uint64_t val)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out64(program, true, val);
+
+ return start_pc;
+}
+
+static inline uint32_t
+inline_flags(enum rta_data_type data_type)
+{
+ switch (data_type) {
+ case RTA_DATA_PTR:
+ return 0;
+ case RTA_DATA_IMM:
+ return IMMED | COPY;
+ case RTA_DATA_IMM_DMA:
+ return IMMED | DCOPY;
+ default:
+ /* warn and default to RTA_DATA_PTR */
+ pr_warn("RTA: defaulting to RTA_DATA_PTR parameter type\n");
+ return 0;
+ }
+}
+
+static inline unsigned int
+rta_copy_data(struct program *program, uint8_t *data, unsigned int length)
+{
+ unsigned int i;
+ unsigned int start_pc = program->current_pc;
+ uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
+
+ for (i = 0; i < length; i++)
+ *tmp++ = data[i];
+ program->current_pc += (length + 3) / 4;
+
+ return start_pc;
+}
+
+#if defined(__EWL__) && defined(AIOP)
+static inline void
+__rta_dma_data(void *ws_dst, uint64_t ext_address, uint16_t size)
+{ cdma_read(ws_dst, ext_address, size); }
+#else
+static inline void
+__rta_dma_data(void *ws_dst __maybe_unused,
+ uint64_t ext_address __maybe_unused,
+ uint16_t size __maybe_unused)
+{ pr_warn("RTA: DCOPY not supported, DMA will be skipped\n"); }
+#endif /* defined(__EWL__) && defined(AIOP) */
+
+static inline void
+__rta_inline_data(struct program *program, uint64_t data,
+ uint32_t copy_data, uint32_t length)
+{
+ if (!copy_data) {
+ __rta_out64(program, length > 4, data);
+ } else if (copy_data & COPY) {
+ uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
+ uint32_t i;
+
+ for (i = 0; i < length; i++)
+ *tmp++ = ((uint8_t *)(uintptr_t)data)[i];
+ program->current_pc += ((length + 3) / 4);
+ } else if (copy_data & DCOPY) {
+ __rta_dma_data(&program->buffer[program->current_pc], data,
+ (uint16_t)length);
+ program->current_pc += ((length + 3) / 4);
+ }
+}
+
+static inline unsigned int
+rta_desc_len(uint32_t *buffer)
+{
+ if ((*buffer & CMD_MASK) == CMD_DESC_HDR)
+ return *buffer & HDR_DESCLEN_MASK;
+ else
+ return *buffer & HDR_DESCLEN_SHR_MASK;
+}
+
+static inline unsigned int
+rta_desc_bytes(uint32_t *buffer)
+{
+ return (unsigned int)(rta_desc_len(buffer) * CAAM_CMD_SZ);
+}
+
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* or
+ * OP_PCLID_DKP_* - MD5, SHA1, SHA224, SHA256, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline uint32_t
+split_key_len(uint32_t hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const uint8_t mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ uint32_t idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (uint32_t)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline uint32_t
+split_key_pad_len(uint32_t hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
+static inline unsigned int
+rta_set_label(struct program *program)
+{
+ return program->current_pc + program->start_pc;
+}
+
+static inline int
+rta_patch_move(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~MOVE_OFFSET_MASK;
+ opcode |= (new_ref << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_jmp(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~JUMP_OFFSET_MASK;
+ opcode |= (new_ref - (line + program->start_pc)) & JUMP_OFFSET_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_header(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~HDR_START_IDX_MASK;
+ opcode |= (new_ref << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_load(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = (bswap ? swab32(program->buffer[line]) :
+ program->buffer[line]) & (uint32_t)~LDST_OFFSET_MASK;
+
+ if (opcode & (LDST_SRCDST_WORD_DESCBUF | LDST_CLASS_DECO))
+ opcode |= (new_ref << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
+ else
+ opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
+ LDST_OFFSET_MASK;
+
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_store(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~LDST_OFFSET_MASK;
+
+ switch (opcode & LDST_SRCDST_MASK) {
+ case LDST_SRCDST_WORD_DESCBUF:
+ case LDST_SRCDST_WORD_DESCBUF_JOB:
+ case LDST_SRCDST_WORD_DESCBUF_SHARED:
+ case LDST_SRCDST_WORD_DESCBUF_JOB_WE:
+ case LDST_SRCDST_WORD_DESCBUF_SHARED_WE:
+ opcode |= ((new_ref) << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
+ break;
+ default:
+ opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
+ LDST_OFFSET_MASK;
+ }
+
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_raw(struct program *program, int line, unsigned int mask,
+ unsigned int new_val)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~mask;
+ opcode |= new_val & mask;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+__rta_map_opcode(uint32_t name, const uint32_t (*map_table)[2],
+ unsigned int num_of_entries, uint32_t *val)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_of_entries; i++)
+ if (map_table[i][0] == name) {
+ *val = map_table[i][1];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline void
+__rta_map_flags(uint32_t flags, const uint32_t (*flags_table)[2],
+ unsigned int num_of_entries, uint32_t *opcode)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_of_entries; i++) {
+ if (flags_table[i][0] & flags)
+ *opcode |= flags_table[i][1];
+ }
+}
+
+#endif /* __RTA_SEC_RUN_TIME_ASM_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
new file mode 100644
index 00000000..ceb6a871
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_SEQ_IN_OUT_PTR_CMD_H__
+#define __RTA_SEQ_IN_OUT_PTR_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed SEQ IN PTR flags for each SEC Era. */
+static const uint32_t seq_in_ptr_flags[] = {
+ RBS | INL | SGF | PRE | EXT | RTO,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP
+};
+
+/* Allowed SEQ OUT PTR flags for each SEC Era. */
+static const uint32_t seq_out_ptr_flags[] = {
+ SGF | PRE | EXT,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS
+};
+
+static inline int
+rta_seq_in_ptr(struct program *program, uint64_t src,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = CMD_SEQ_IN_PTR;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ /* Parameters checking */
+ if ((flags & RTO) && (flags & PRE)) {
+ pr_err("SEQ IN PTR: Invalid usage of RTO and PRE flags\n");
+ goto err;
+ }
+ if (flags & ~seq_in_ptr_flags[rta_sec_era]) {
+ pr_err("SEQ IN PTR: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & INL) && (flags & RJD)) {
+ pr_err("SEQ IN PTR: Invalid usage of INL and RJD flags\n");
+ goto err;
+ }
+ if ((src) && (flags & (SOP | RTO | PRE))) {
+ pr_err("SEQ IN PTR: Invalid usage of RTO or PRE flag\n");
+ goto err;
+ }
+ if ((flags & SOP) && (flags & (RBS | PRE | RTO | EXT))) {
+ pr_err("SEQ IN PTR: Invalid usage of SOP and (RBS or PRE or RTO or EXT) flags\n");
+ goto err;
+ }
+
+ /* write flag fields */
+ if (flags & RBS)
+ opcode |= SQIN_RBS;
+ if (flags & INL)
+ opcode |= SQIN_INL;
+ if (flags & SGF)
+ opcode |= SQIN_SGF;
+ if (flags & PRE)
+ opcode |= SQIN_PRE;
+ if (flags & RTO)
+ opcode |= SQIN_RTO;
+ if (flags & RJD)
+ opcode |= SQIN_RJD;
+ if (flags & SOP)
+ opcode |= SQIN_SOP;
+ if ((length >> 16) || (flags & EXT)) {
+ if (flags & SOP) {
+ pr_err("SEQ IN PTR: Invalid usage of SOP and EXT flags\n");
+ goto err;
+ }
+
+ opcode |= SQIN_EXT;
+ } else {
+ opcode |= length & SQIN_LEN_MASK;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (!(opcode & (SQIN_PRE | SQIN_RTO | SQIN_SOP)))
+ __rta_out64(program, program->ps, src);
+
+ /* write extended length field */
+ if (opcode & SQIN_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_seq_out_ptr(struct program *program, uint64_t dst,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = CMD_SEQ_OUT_PTR;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ /* Parameters checking */
+ if (flags & ~seq_out_ptr_flags[rta_sec_era]) {
+ pr_err("SEQ OUT PTR: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & RTO) && (flags & PRE)) {
+ pr_err("SEQ OUT PTR: Invalid usage of RTO and PRE flags\n");
+ goto err;
+ }
+ if ((dst) && (flags & (RTO | PRE))) {
+ pr_err("SEQ OUT PTR: Invalid usage of RTO or PRE flag\n");
+ goto err;
+ }
+ if ((flags & RST) && !(flags & RTO)) {
+ pr_err("SEQ OUT PTR: RST flag must be used with RTO flag\n");
+ goto err;
+ }
+
+ /* write flag fields */
+ if (flags & SGF)
+ opcode |= SQOUT_SGF;
+ if (flags & PRE)
+ opcode |= SQOUT_PRE;
+ if (flags & RTO)
+ opcode |= SQOUT_RTO;
+ if (flags & RST)
+ opcode |= SQOUT_RST;
+ if (flags & EWS)
+ opcode |= SQOUT_EWS;
+ if ((length >> 16) || (flags & EXT))
+ opcode |= SQOUT_EXT;
+ else
+ opcode |= length & SQOUT_LEN_MASK;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (!(opcode & (SQOUT_PRE | SQOUT_RTO)))
+ __rta_out64(program, program->ps, dst);
+
+ /* write extended length field */
+ if (opcode & SQOUT_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_SEQ_IN_OUT_PTR_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
new file mode 100644
index 00000000..4f694ac2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_SIGNATURE_CMD_H__
+#define __RTA_SIGNATURE_CMD_H__
+
+static inline int
+rta_signature(struct program *program, uint32_t sign_type)
+{
+ uint32_t opcode = CMD_SIGNATURE;
+ unsigned int start_pc = program->current_pc;
+
+ switch (sign_type) {
+ case (SIGN_TYPE_FINAL):
+ case (SIGN_TYPE_FINAL_RESTORE):
+ case (SIGN_TYPE_FINAL_NONZERO):
+ case (SIGN_TYPE_IMM_2):
+ case (SIGN_TYPE_IMM_3):
+ case (SIGN_TYPE_IMM_4):
+ opcode |= sign_type;
+ break;
+ default:
+ pr_err("SIGNATURE Command: Invalid type selection\n");
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_SIGNATURE_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
new file mode 100644
index 00000000..8b58e544
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_STORE_CMD_H__
+#define __RTA_STORE_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t store_src_table[][2] = {
+/*1*/ { KEY1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_KEYSZ_REG },
+ { KEY2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_KEYSZ_REG },
+ { DJQDA, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_JQDAR },
+ { MODE1, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_MODE_REG },
+ { MODE2, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_MODE_REG },
+ { DJQCTRL, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_JQCTRL },
+ { DATA1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DATASZ_REG },
+ { DATA2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG },
+ { DSTAT, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_STAT },
+ { ICV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ICVSZ_REG },
+ { ICV2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_ICVSZ_REG },
+ { DPID, LDST_CLASS_DECO | LDST_SRCDST_WORD_PID },
+ { CCTRL, LDST_SRCDST_WORD_CHACTRL },
+ { ICTRL, LDST_SRCDST_WORD_IRQCTRL },
+ { CLRW, LDST_SRCDST_WORD_CLRW },
+ { MATH0, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH0 },
+ { CSTAT, LDST_SRCDST_WORD_STAT },
+ { MATH1, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH1 },
+ { MATH2, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH2 },
+ { AAD1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DECO_AAD_SZ },
+ { MATH3, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 },
+ { IV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_CLASS1_IV_SZ },
+ { PKASZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_A_SZ },
+ { PKBSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_B_SZ },
+ { PKESZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_E_SZ },
+ { PKNSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_N_SZ },
+ { CONTEXT1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT },
+ { CONTEXT2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT },
+ { DESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF },
+/*30*/ { JOBDESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF_JOB },
+ { SHAREDESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF_SHARED },
+/*32*/ { JOBDESCBUF_EFF, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DESCBUF_JOB_WE },
+ { SHAREDESCBUF_EFF, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DESCBUF_SHARED_WE },
+/*34*/ { GTR, LDST_CLASS_DECO | LDST_SRCDST_WORD_GTR },
+ { STR, LDST_CLASS_DECO | LDST_SRCDST_WORD_STR }
+};
+
+/*
+ * Allowed STORE sources for each SEC ERA.
+ * Values represent the number of entries from source_src_table[] that are
+ * supported.
+ */
+static const unsigned int store_src_table_sz[] = {29, 31, 33, 33,
+ 33, 33, 35, 35};
+
+static inline int
+rta_store(struct program *program, uint64_t src,
+ uint16_t offset, uint64_t dst, uint32_t length,
+ uint32_t flags)
+{
+ uint32_t opcode = 0, val;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & SEQ)
+ opcode = CMD_SEQ_STORE;
+ else
+ opcode = CMD_STORE;
+
+ /* parameters check */
+ if ((flags & IMMED) && (flags & SGF)) {
+ pr_err("STORE: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ if ((flags & IMMED) && (offset != 0)) {
+ pr_err("STORE: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if ((flags & SEQ) && ((src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) ||
+ (src == SHAREDESCBUF_EFF))) {
+ pr_err("STORE: Invalid SRC type. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (flags & IMMED)
+ opcode |= LDST_IMM;
+
+ if ((flags & SGF) || (flags & VLF))
+ opcode |= LDST_VLF;
+
+ /*
+ * source for data to be stored can be specified as:
+ * - register location; set in src field[9-15];
+ * - if IMMED flag is set, data is set in value field [0-31];
+ * user can give this value as actual value or pointer to data
+ */
+ if (!(flags & IMMED)) {
+ ret = __rta_map_opcode((uint32_t)src, store_src_table,
+ store_src_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("STORE: Invalid source. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* DESC BUFFER: length / offset values are specified in 4-byte words */
+ if ((src == DESCBUF) || (src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) || (src == SHAREDESCBUF_EFF)) {
+ opcode |= (length >> 2);
+ opcode |= (uint32_t)((offset >> 2) << LDST_OFFSET_SHIFT);
+ } else {
+ opcode |= length;
+ opcode |= (uint32_t)(offset << LDST_OFFSET_SHIFT);
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if ((src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) || (src == SHAREDESCBUF_EFF))
+ return (int)start_pc;
+
+ /* for STORE, a pointer to where the data will be stored if needed */
+ if (!(flags & SEQ))
+ __rta_out64(program, program->ps, dst);
+
+ /* for IMMED data, place the data here */
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_STORE_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/dpseci.c b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/dpseci.c
new file mode 100644
index 00000000..de8ca970
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -0,0 +1,643 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpseci.h>
+#include <fsl_dpseci_cmd.h>
+
+/**
+ * dpseci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpseci_id: DPSECI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpseci_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpseci_id,
+ uint16_t *token)
+{
+ struct dpseci_cmd_open *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpseci_cmd_open *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpseci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_create() - Create the DPSECI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPSECI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpseci_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpseci_cmd_create *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpseci_cmd_create *)cmd.params;
+ for (i = 0; i < DPSECI_PRIO_NUM; i++)
+ cmd_params->priorities[i] = cfg->priorities[i];
+ cmd_params->num_tx_queues = cfg->num_tx_queues;
+ cmd_params->num_rx_queues = cfg->num_rx_queues;
+ cmd_params->options = cfg->options;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpseci_destroy() - Destroy the DPSECI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpseci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct dpseci_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpseci_rsp_is_enabled *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
+ *en = dpseci_get_field(rsp_params->en, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_attributes() - Retrieve DPSECI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_attr *attr)
+{
+ struct dpseci_rsp_get_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->options = rsp_params->options;
+ attr->num_tx_queues = rsp_params->num_tx_queues;
+ attr->num_rx_queues = rsp_params->num_rx_queues;
+
+ return 0;
+}
+
+/**
+ * dpseci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of
+ * priorities configured at DPSECI creation; use
+ * DPSECI_ALL_QUEUES to configure all Rx queues identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ const struct dpseci_rx_queue_cfg *cfg)
+{
+ struct dpseci_cmd_set_rx_queue *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_set_rx_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->queue = queue;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpseci_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->order_preservation_en,
+ ORDER_PRESERVATION,
+ cfg->order_preservation_en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of
+ * priorities configured at DPSECI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_rx_queue_attr *attr)
+{
+ struct dpseci_rsp_get_rx_queue *rsp_params;
+ struct dpseci_cmd_get_queue *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_get_queue *)cmd.params;
+ cmd_params->queue = queue;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_rx_queue *)cmd.params;
+ attr->user_ctx = le64_to_cpu(rsp_params->user_ctx);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ attr->dest_cfg.priority = rsp_params->dest_priority;
+ attr->dest_cfg.dest_type =
+ dpseci_get_field(rsp_params->dest_type,
+ DEST_TYPE);
+ attr->order_preservation_en =
+ dpseci_get_field(rsp_params->order_preservation_en,
+ ORDER_PRESERVATION);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of
+ * priorities configured at DPSECI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_tx_queue_attr *attr)
+{
+ struct dpseci_rsp_get_tx_queue *rsp_params;
+ struct dpseci_cmd_get_queue *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_get_queue *)cmd.params;
+ cmd_params->queue = queue;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->priority = rsp_params->priority;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned SEC attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_attr *attr)
+{
+ struct dpseci_rsp_get_sec_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
+ attr->ip_id = le16_to_cpu(rsp_params->ip_id);
+ attr->major_rev = rsp_params->major_rev;
+ attr->minor_rev = rsp_params->minor_rev;
+ attr->era = rsp_params->era;
+ attr->deco_num = rsp_params->deco_num;
+ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
+ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
+ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
+ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
+ attr->crc_acc_num = rsp_params->crc_acc_num;
+ attr->pk_acc_num = rsp_params->pk_acc_num;
+ attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
+ attr->rng_acc_num = rsp_params->rng_acc_num;
+ attr->md_acc_num = rsp_params->md_acc_num;
+ attr->arc4_acc_num = rsp_params->arc4_acc_num;
+ attr->des_acc_num = rsp_params->des_acc_num;
+ attr->aes_acc_num = rsp_params->aes_acc_num;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @counters: Returned SEC counters
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_counters *counters)
+{
+ struct dpseci_rsp_get_sec_counters *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
+ counters->dequeued_requests =
+ le64_to_cpu(rsp_params->dequeued_requests);
+ counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
+ counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
+ counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
+ counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
+ counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
+ counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path sec API
+ * @minor_ver: Minor version of data path sec API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct dpseci_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+int dpseci_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct dpseci_cmd_set_congestion_notification *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+
+ cmd_params =
+ (struct dpseci_cmd_set_congestion_notification *)cmd.params;
+ cmd_params->dest_id = cfg->dest_cfg.dest_id;
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->message_ctx = cfg->message_ctx;
+ cmd_params->message_iova = cfg->message_iova;
+ cmd_params->notification_mode = cfg->notification_mode;
+ cmd_params->threshold_entry = cfg->threshold_entry;
+ cmd_params->threshold_exit = cfg->threshold_exit;
+ dpseci_set_field(cmd_params->type_units,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->type_units,
+ CG_UNITS,
+ cfg->units);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpseci_get_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct dpseci_cmd_set_congestion_notification *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params =
+ (struct dpseci_cmd_set_congestion_notification *)cmd.params;
+
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->dest_cfg.priority = rsp_params->dest_priority;
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+ cfg->units = dpseci_get_field(rsp_params->type_units, CG_UNITS);
+ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->type_units,
+ DEST_TYPE);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
new file mode 100644
index 00000000..12ac005a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef __FSL_DPSECI_H
+#define __FSL_DPSECI_H
+
+/* Data Path SEC Interface API
+ * Contains initialization APIs and runtime control APIs for DPSECI
+ */
+
+struct fsl_mc_io;
+
+/**
+ * General DPSECI macros
+ */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPSECI object
+ */
+#define DPSECI_PRIO_NUM 8
+
+/**
+ * All queues considered; see dpseci_set_rx_queue()
+ */
+#define DPSECI_ALL_QUEUES (uint8_t)(-1)
+
+int dpseci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpseci_id,
+ uint16_t *token);
+
+int dpseci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * Enable the Congestion Group support
+ */
+#define DPSECI_OPT_HAS_CG 0x000020
+
+/**
+ * struct dpseci_cfg - Structure representing DPSECI configuration
+ * @options: Any combination of the following options:
+ * DPSECI_OPT_HAS_CG
+ * DPSECI_OPT_HAS_OPR
+ * DPSECI_OPT_OPR_SHARED
+ * @num_tx_queues: num of queues towards the SEC
+ * @num_rx_queues: num of queues back from the SEC
+ * @priorities: Priorities for the SEC hardware processing;
+ * each place in the array is the priority of the tx queue
+ * towards the SEC,
+ * valid priorities are configured with values 1-8;
+ */
+struct dpseci_cfg {
+ uint32_t options;
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t priorities[DPSECI_PRIO_NUM];
+};
+
+int dpseci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpseci_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpseci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+int dpseci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpseci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpseci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpseci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpseci_attr - Structure representing DPSECI attributes
+ * @id: DPSECI object ID
+ * @num_tx_queues: number of queues towards the SEC
+ * @num_rx_queues: number of queues back from the SEC
+ * @options: Any combination of the following options:
+ * DPSECI_OPT_HAS_CG
+ * DPSECI_OPT_HAS_OPR
+ * DPSECI_OPT_OPR_SHARED
+ */
+struct dpseci_attr {
+ int id;
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint32_t options;
+};
+
+int dpseci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_attr *attr);
+
+/**
+ * enum dpseci_dest - DPSECI destination types
+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to
+ * dequeue from the queue based on polling or other user-defined
+ * method
+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON
+ * object; user is expected to dequeue from the DPCON channel
+ */
+enum dpseci_dest {
+ DPSECI_DEST_NONE = 0,
+ DPSECI_DEST_DPIO = 1,
+ DPSECI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPSECI_DEST_NONE' option
+ */
+struct dpseci_dest_cfg {
+ enum dpseci_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/**
+ * DPSECI queue modification options
+ */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPSECI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Select to modify the queue's order preservation
+ */
+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
+
+/**
+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
+ * @order_preservation_en: order preservation configuration for the rx queue
+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpseci_rx_queue_cfg {
+ uint32_t options;
+ int order_preservation_en;
+ uint64_t user_ctx;
+ struct dpseci_dest_cfg dest_cfg;
+};
+
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ const struct dpseci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @order_preservation_en: Status of the order preservation configuration
+ * on the queue
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpseci_rx_queue_attr {
+ uint64_t user_ctx;
+ int order_preservation_en;
+ struct dpseci_dest_cfg dest_cfg;
+ uint32_t fqid;
+};
+
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_rx_queue_attr *attr);
+
+/**
+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
+ * @priority: SEC hardware processing priority for the queue
+ */
+struct dpseci_tx_queue_attr {
+ uint32_t fqid;
+ uint8_t priority;
+};
+
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_tx_queue_attr *attr);
+
+/**
+ * struct dpseci_sec_attr - Structure representing attributes of the SEC
+ * hardware accelerator
+ * @ip_id: ID for SEC.
+ * @major_rev: Major revision number for SEC.
+ * @minor_rev: Minor revision number for SEC.
+ * @era: SEC Era.
+ * @deco_num: The number of copies of the DECO that are implemented
+ * in this version of SEC.
+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented
+ * in this version of SEC.
+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented
+ * in this version of SEC.
+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
+ * implemented in this version of SEC.
+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
+ * implemented in this version of SEC.
+ * @crc_acc_num: The number of copies of the CRC module that are
+ * implemented in this version of SEC.
+ * @pk_acc_num: The number of copies of the Public Key module that are
+ * implemented in this version of SEC.
+ * @kasumi_acc_num: The number of copies of the Kasumi module that are
+ * implemented in this version of SEC.
+ * @rng_acc_num: The number of copies of the Random Number Generator that
+ * are implemented in this version of SEC.
+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that
+ * are implemented in this version of SEC.
+ * @arc4_acc_num: The number of copies of the ARC4 module that are
+ * implemented in this version of SEC.
+ * @des_acc_num: The number of copies of the DES module that are
+ * implemented in this version of SEC.
+ * @aes_acc_num: The number of copies of the AES module that are
+ * implemented in this version of SEC.
+ **/
+
+struct dpseci_sec_attr {
+ uint16_t ip_id;
+ uint8_t major_rev;
+ uint8_t minor_rev;
+ uint8_t era;
+ uint8_t deco_num;
+ uint8_t zuc_auth_acc_num;
+ uint8_t zuc_enc_acc_num;
+ uint8_t snow_f8_acc_num;
+ uint8_t snow_f9_acc_num;
+ uint8_t crc_acc_num;
+ uint8_t pk_acc_num;
+ uint8_t kasumi_acc_num;
+ uint8_t rng_acc_num;
+ uint8_t md_acc_num;
+ uint8_t arc4_acc_num;
+ uint8_t des_acc_num;
+ uint8_t aes_acc_num;
+};
+
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_attr *attr);
+
+/**
+ * struct dpseci_sec_counters - Structure representing global SEC counters and
+ * not per dpseci counters
+ * @dequeued_requests: Number of Requests Dequeued
+ * @ob_enc_requests: Number of Outbound Encrypt Requests
+ * @ib_dec_requests: Number of Inbound Decrypt Requests
+ * @ob_enc_bytes: Number of Outbound Bytes Encrypted
+ * @ob_prot_bytes: Number of Outbound Bytes Protected
+ * @ib_dec_bytes: Number of Inbound Bytes Decrypted
+ * @ib_valid_bytes: Number of Inbound Bytes Validated
+ */
+struct dpseci_sec_counters {
+ uint64_t dequeued_requests;
+ uint64_t ob_enc_requests;
+ uint64_t ib_dec_requests;
+ uint64_t ob_enc_bytes;
+ uint64_t ob_prot_bytes;
+ uint64_t ib_dec_bytes;
+ uint64_t ib_valid_bytes;
+};
+
+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_counters *counters);
+
+int dpseci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+/**
+ * enum dpseci_congestion_unit - DPSECI congestion units
+ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpseci_congestion_unit {
+ DPSECI_CONGESTION_UNIT_BYTES = 0,
+ DPSECI_CONGESTION_UNIT_FRAMES
+};
+
+/**
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
+ */
+#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
+ * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
+ * (if enabled)
+ */
+#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
+
+/**
+ * struct dpseci_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned;
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
+ * values
+ */
+struct dpseci_congestion_notification_cfg {
+ enum dpseci_congestion_unit units;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+ uint64_t message_ctx;
+ uint64_t message_iova;
+ struct dpseci_dest_cfg dest_cfg;
+ uint16_t notification_mode;
+};
+
+int dpseci_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpseci_congestion_notification_cfg *cfg);
+
+int dpseci_get_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_congestion_notification_cfg *cfg);
+
+#endif /* __FSL_DPSECI_H */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
new file mode 100644
index 00000000..26cef0f7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef _FSL_DPSECI_CMD_H
+#define _FSL_DPSECI_CMD_H
+
+/* DPSECI Version */
+#define DPSECI_VER_MAJOR 5
+#define DPSECI_VER_MINOR 1
+
+/* Command versioning */
+#define DPSECI_CMD_BASE_VERSION 1
+#define DPSECI_CMD_BASE_VERSION_V2 2
+#define DPSECI_CMD_ID_OFFSET 4
+
+#define DPSECI_CMD_V1(id) \
+ ((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION)
+#define DPSECI_CMD_V2(id) \
+ ((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION_V2)
+
+/* Command IDs */
+#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
+#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
+#define DPSECI_CMDID_CREATE DPSECI_CMD_V2(0x909)
+#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
+#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
+
+#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
+#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
+#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
+#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
+#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
+
+#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
+#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
+#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V1(0x198)
+#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
+
+#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
+#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPSECI_MASK(field) \
+ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
+ DPSECI_##field##_SHIFT)
+#define dpseci_set_field(var, field, val) \
+ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
+#define dpseci_get_field(var, field) \
+ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpseci_cmd_open {
+ uint32_t dpseci_id;
+};
+
+struct dpseci_cmd_create {
+ uint8_t priorities[8];
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t pad[6];
+ uint32_t options;
+};
+
+struct dpseci_cmd_destroy {
+ uint32_t dpseci_id;
+};
+
+#define DPSECI_ENABLE_SHIFT 0
+#define DPSECI_ENABLE_SIZE 1
+
+struct dpseci_rsp_is_enabled {
+ /* only the first LSB */
+ uint8_t en;
+};
+
+struct dpseci_rsp_get_attr {
+ uint32_t id;
+ uint32_t pad;
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t pad1[6];
+ uint32_t options;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+
+#define DPSECI_ORDER_PRESERVATION_SHIFT 0
+#define DPSECI_ORDER_PRESERVATION_SIZE 1
+
+struct dpseci_cmd_set_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t queue;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad;
+ uint64_t user_ctx;
+ uint32_t options;
+ /* only the LSB */
+ uint8_t order_preservation_en;
+};
+
+struct dpseci_cmd_get_queue {
+ uint8_t pad[5];
+ uint8_t queue;
+};
+
+struct dpseci_rsp_get_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t pad1;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad2;
+ uint64_t user_ctx;
+ uint32_t fqid;
+ /* only the LSB */
+ uint8_t order_preservation_en;
+
+};
+
+struct dpseci_rsp_get_tx_queue {
+ uint32_t pad;
+ uint32_t fqid;
+ uint8_t priority;
+};
+
+struct dpseci_rsp_get_sec_attr {
+ uint16_t ip_id;
+ uint8_t major_rev;
+ uint8_t minor_rev;
+ uint8_t era;
+ uint8_t pad1[3];
+ uint8_t deco_num;
+ uint8_t zuc_auth_acc_num;
+ uint8_t zuc_enc_acc_num;
+ uint8_t pad2;
+ uint8_t snow_f8_acc_num;
+ uint8_t snow_f9_acc_num;
+ uint8_t crc_acc_num;
+ uint8_t pad3;
+ uint8_t pk_acc_num;
+ uint8_t kasumi_acc_num;
+ uint8_t rng_acc_num;
+ uint8_t pad4;
+ uint8_t md_acc_num;
+ uint8_t arc4_acc_num;
+ uint8_t des_acc_num;
+ uint8_t aes_acc_num;
+};
+
+struct dpseci_rsp_get_sec_counters {
+ uint64_t dequeued_requests;
+ uint64_t ob_enc_requests;
+ uint64_t ib_dec_requests;
+ uint64_t ob_enc_bytes;
+ uint64_t ob_prot_bytes;
+ uint64_t ib_dec_bytes;
+ uint64_t ib_valid_bytes;
+};
+
+struct dpseci_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+#define DPSECI_CG_UNITS_SHIFT 4
+#define DPSECI_CG_UNITS_SIZE 2
+
+struct dpseci_cmd_set_congestion_notification {
+ uint32_t dest_id;
+ uint16_t notification_mode;
+ uint8_t dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ uint8_t type_units;
+ uint64_t message_iova;
+ uint64_t message_ctx;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPSECI_CMD_H */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/meson.build b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/meson.build
new file mode 100644
index 00000000..01afc587
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['security', 'mempool_dpaa2']
+sources = files('dpaa2_sec_dpseci.c',
+ 'mc/dpseci.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('mc', 'hw')
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa_sec/Makefile b/src/spdk/dpdk/drivers/crypto/dpaa_sec/Makefile
new file mode 100644
index 00000000..9be44704
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa_sec/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa_sec.a
+
+# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -D _GNU_SOURCE
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa_sec/
+#sharing the hw flib headers from dpaa2_sec pmd
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa_sec_version.map
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec.c
+
+# library dependencies
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c b/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c
new file mode 100644
index 00000000..f571050b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -0,0 +1,2419 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sched.h>
+#include <net/if.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_security_driver.h>
+#include <rte_cycles.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <of.h>
+
+/* RTA header files */
+#include <hw/desc/common.h>
+#include <hw/desc/algo.h>
+#include <hw/desc/ipsec.h>
+
+#include <rte_dpaa_bus.h>
+#include <dpaa_sec.h>
+#include <dpaa_sec_log.h>
+
+enum rta_sec_era rta_sec_era;
+
+int dpaa_logtype_sec;
+
+static uint8_t cryptodev_driver_id;
+
+static __thread struct rte_crypto_op **dpaa_sec_ops;
+static __thread int dpaa_sec_op_nb;
+
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
+static inline void
+dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
+{
+ if (!ctx->fd_status) {
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ /* report op status to sym->op and then free the ctx memeory */
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+}
+
+static inline struct dpaa_sec_op_ctx *
+dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
+{
+ struct dpaa_sec_op_ctx *ctx;
+ int retval;
+
+ retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
+ if (!ctx || retval) {
+ DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
+ return NULL;
+ }
+ /*
+ * Clear SG memory. There are 16 SG entries of 16 Bytes each.
+ * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
+ * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
+ * each packet, memset is costlier than dcbz_64().
+ */
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
+
+ ctx->ctx_pool = ses->ctx_pool;
+ ctx->vtop_offset = (size_t) ctx
+ - rte_mempool_virt2iova(ctx);
+
+ return ctx;
+}
+
+static inline rte_iova_t
+dpaa_mem_vtop(void *vaddr)
+{
+ const struct rte_memseg *ms;
+
+ ms = rte_mem_virt2memseg(vaddr, NULL);
+ if (ms)
+ return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
+ return (size_t)NULL;
+}
+
+static inline void *
+dpaa_mem_ptov(rte_iova_t paddr)
+{
+ return rte_mem_iova2virt(paddr);
+}
+
+static void
+ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
+ fq->fqid, msg->ern.rc, msg->ern.seqnum);
+}
+
+/* initialize the queue with dest chan as caam chan so that
+ * all the packets in this queue could be dispatched into caam
+ */
+static int
+dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
+ uint32_t fqid_out)
+{
+ struct qm_mcc_initfq fq_opts;
+ uint32_t flags;
+ int ret = -1;
+
+ /* Clear FQ options */
+ memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
+
+ flags = QMAN_INITFQ_FLAG_SCHED;
+ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
+ QM_INITFQ_WE_CONTEXTB;
+
+ qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
+ fq_opts.fqd.context_b = fqid_out;
+ fq_opts.fqd.dest.channel = qm_channel_caam;
+ fq_opts.fqd.dest.wq = 0;
+
+ fq_in->cb.ern = ern_sec_fq_handler;
+
+ DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
+
+ ret = qman_init_fq(fq_in, flags, &fq_opts);
+ if (unlikely(ret != 0))
+ DPAA_SEC_ERR("qman_init_fq failed %d", ret);
+
+ return ret;
+}
+
+/* something is put into in_fq and caam put the crypto result into out_fq */
+static enum qman_cb_dqrr_result
+dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
+ struct qman_fq *fq __always_unused,
+ const struct qm_dqrr_entry *dqrr)
+{
+ const struct qm_fd *fd;
+ struct dpaa_sec_job *job;
+ struct dpaa_sec_op_ctx *ctx;
+
+ if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
+ return qman_cb_dqrr_defer;
+
+ if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
+ return qman_cb_dqrr_consume;
+
+ fd = &dqrr->fd;
+ /* sg is embedded in an op ctx,
+ * sg[0] is for output
+ * sg[1] for input
+ */
+ job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+ ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+ ctx->fd_status = fd->status;
+ if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct qm_sg_entry *sg_out;
+ uint32_t len;
+
+ sg_out = &job->sg[0];
+ hw_sg_to_cpu(sg_out);
+ len = sg_out->length;
+ ctx->op->sym->m_src->pkt_len = len;
+ ctx->op->sym->m_src->data_len = len;
+ }
+ dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
+ dpaa_sec_op_ending(ctx);
+
+ return qman_cb_dqrr_consume;
+}
+
+/* caam result is put into this queue */
+static int
+dpaa_sec_init_tx(struct qman_fq *fq)
+{
+ int ret;
+ struct qm_mcc_initfq opts;
+ uint32_t flags;
+
+ flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
+ QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ ret = qman_create_fq(0, flags, fq);
+ if (unlikely(ret)) {
+ DPAA_SEC_ERR("qman_create_fq failed");
+ return ret;
+ }
+
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
+
+ /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
+
+ fq->cb.dqrr = dqrr_out_fq_cb_rx;
+ fq->cb.ern = ern_sec_fq_handler;
+
+ ret = qman_init_fq(fq, 0, &opts);
+ if (unlikely(ret)) {
+ DPAA_SEC_ERR("unable to init caam source fq!");
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline int is_cipher_only(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int is_auth_only(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int is_aead(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg == 0) &&
+ (ses->auth_alg == 0) &&
+ (ses->aead_alg != 0));
+}
+
+static inline int is_auth_cipher(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
+ (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
+}
+
+static inline int is_proto_ipsec(dpaa_sec_session *ses)
+{
+ return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
+}
+
+static inline int is_encode(dpaa_sec_session *ses)
+{
+ return ses->dir == DIR_ENC;
+}
+
+static inline int is_decode(dpaa_sec_session *ses)
+{
+ return ses->dir == DIR_DEC;
+}
+
+static inline void
+caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
+{
+ switch (ses->auth_alg) {
+ case RTE_CRYPTO_AUTH_NULL:
+ ses->digest_length = 0;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ default:
+ DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
+ }
+}
+
+static inline void
+caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
+{
+ switch (ses->cipher_alg) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
+ alginfo_c->algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
+ alginfo_c->algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
+ alginfo_c->algmode = OP_ALG_AAI_CTR;
+ break;
+ default:
+ DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
+ }
+}
+
+static inline void
+caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
+{
+ switch (ses->aead_alg) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ alginfo->algtype = OP_ALG_ALGSEL_AES;
+ alginfo->algmode = OP_ALG_AAI_GCM;
+ break;
+ default:
+ DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
+ }
+}
+
+
+/* prepare command block of the session */
+static int
+dpaa_sec_prep_cdb(dpaa_sec_session *ses)
+{
+ struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
+ int32_t shared_desc_len = 0;
+ struct sec_cdb *cdb = &ses->cdb;
+ int err;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = false;
+#else
+ int swap = true;
+#endif
+
+ memset(cdb, 0, sizeof(struct sec_cdb));
+
+ if (is_cipher_only(ses)) {
+ caam_cipher_alg(ses, &alginfo_c);
+ if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported cipher alg");
+ return -ENOTSUP;
+ }
+
+ alginfo_c.key = (size_t)ses->cipher_key.data;
+ alginfo_c.keylen = ses->cipher_key.length;
+ alginfo_c.key_enc_flags = 0;
+ alginfo_c.key_type = RTA_DATA_IMM;
+
+ shared_desc_len = cnstr_shdsc_blkcipher(
+ cdb->sh_desc, true,
+ swap, &alginfo_c,
+ NULL,
+ ses->iv.length,
+ ses->dir);
+ } else if (is_auth_only(ses)) {
+ caam_auth_alg(ses, &alginfo_a);
+ if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported auth alg");
+ return -ENOTSUP;
+ }
+
+ alginfo_a.key = (size_t)ses->auth_key.data;
+ alginfo_a.keylen = ses->auth_key.length;
+ alginfo_a.key_enc_flags = 0;
+ alginfo_a.key_type = RTA_DATA_IMM;
+
+ shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
+ swap, &alginfo_a,
+ !ses->dir,
+ ses->digest_length);
+ } else if (is_aead(ses)) {
+ caam_aead_alg(ses, &alginfo);
+ if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported aead alg");
+ return -ENOTSUP;
+ }
+ alginfo.key = (size_t)ses->aead_key.data;
+ alginfo.keylen = ses->aead_key.length;
+ alginfo.key_enc_flags = 0;
+ alginfo.key_type = RTA_DATA_IMM;
+
+ if (ses->dir == DIR_ENC)
+ shared_desc_len = cnstr_shdsc_gcm_encap(
+ cdb->sh_desc, true, swap,
+ &alginfo,
+ ses->iv.length,
+ ses->digest_length);
+ else
+ shared_desc_len = cnstr_shdsc_gcm_decap(
+ cdb->sh_desc, true, swap,
+ &alginfo,
+ ses->iv.length,
+ ses->digest_length);
+ } else {
+ caam_cipher_alg(ses, &alginfo_c);
+ if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported cipher alg");
+ return -ENOTSUP;
+ }
+
+ alginfo_c.key = (size_t)ses->cipher_key.data;
+ alginfo_c.keylen = ses->cipher_key.length;
+ alginfo_c.key_enc_flags = 0;
+ alginfo_c.key_type = RTA_DATA_IMM;
+
+ caam_auth_alg(ses, &alginfo_a);
+ if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported auth alg");
+ return -ENOTSUP;
+ }
+
+ alginfo_a.key = (size_t)ses->auth_key.data;
+ alginfo_a.keylen = ses->auth_key.length;
+ alginfo_a.key_enc_flags = 0;
+ alginfo_a.key_type = RTA_DATA_IMM;
+
+ cdb->sh_desc[0] = alginfo_c.keylen;
+ cdb->sh_desc[1] = alginfo_a.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)cdb->sh_desc,
+ &cdb->sh_desc[2], 2);
+
+ if (err < 0) {
+ DPAA_SEC_ERR("Crypto: Incorrect key lengths");
+ return err;
+ }
+ if (cdb->sh_desc[2] & 1)
+ alginfo_c.key_type = RTA_DATA_IMM;
+ else {
+ alginfo_c.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)alginfo_c.key);
+ alginfo_c.key_type = RTA_DATA_PTR;
+ }
+ if (cdb->sh_desc[2] & (1<<1))
+ alginfo_a.key_type = RTA_DATA_IMM;
+ else {
+ alginfo_a.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)alginfo_a.key);
+ alginfo_a.key_type = RTA_DATA_PTR;
+ }
+ cdb->sh_desc[0] = 0;
+ cdb->sh_desc[1] = 0;
+ cdb->sh_desc[2] = 0;
+ if (is_proto_ipsec(ses)) {
+ if (ses->dir == DIR_ENC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+ cdb->sh_desc,
+ true, swap, &ses->encap_pdb,
+ (uint8_t *)&ses->ip4_hdr,
+ &alginfo_c, &alginfo_a);
+ } else if (ses->dir == DIR_DEC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+ cdb->sh_desc,
+ true, swap, &ses->decap_pdb,
+ &alginfo_c, &alginfo_a);
+ }
+ } else {
+ /* Auth_only_len is set as 0 here and it will be
+ * overwritten in fd for each packet.
+ */
+ shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+ true, swap, &alginfo_c, &alginfo_a,
+ ses->iv.length, 0,
+ ses->digest_length, ses->dir);
+ }
+ }
+
+ if (shared_desc_len < 0) {
+ DPAA_SEC_ERR("error in preparing command block");
+ return shared_desc_len;
+ }
+
+ cdb->sh_hdr.hi.field.idlen = shared_desc_len;
+ cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
+ cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
+
+ return 0;
+}
+
+/* qp is lockless, should be accessed by only one thread */
+static int
+dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
+{
+ struct qman_fq *fq;
+ unsigned int pkts = 0;
+ int num_rx_bufs, ret;
+ struct qm_dqrr_entry *dq;
+ uint32_t vdqcr_flags = 0;
+
+ fq = &qp->outq;
+ /*
+ * Until request for four buffers, we provide exact number of buffers.
+ * Otherwise we do not set the QM_VDQCR_EXACT flag.
+ * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+ * requested, so we request two less in this case.
+ */
+ if (nb_ops < 4) {
+ vdqcr_flags = QM_VDQCR_EXACT;
+ num_rx_bufs = nb_ops;
+ } else {
+ num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+ (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
+ }
+ ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
+ if (ret)
+ return 0;
+
+ do {
+ const struct qm_fd *fd;
+ struct dpaa_sec_job *job;
+ struct dpaa_sec_op_ctx *ctx;
+ struct rte_crypto_op *op;
+
+ dq = qman_dequeue(fq);
+ if (!dq)
+ continue;
+
+ fd = &dq->fd;
+ /* sg is embedded in an op ctx,
+ * sg[0] is for output
+ * sg[1] for input
+ */
+ job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+ ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+ ctx->fd_status = fd->status;
+ op = ctx->op;
+ if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct qm_sg_entry *sg_out;
+ uint32_t len;
+
+ sg_out = &job->sg[0];
+ hw_sg_to_cpu(sg_out);
+ len = sg_out->length;
+ op->sym->m_src->pkt_len = len;
+ op->sym->m_src->data_len = len;
+ }
+ if (!ctx->fd_status) {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ ops[pkts++] = op;
+
+ /* report op status to sym->op and then free the ctx memeory */
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+ qman_dqrr_consume(fq, dq);
+ } while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+ return pkts;
+}
+
+static inline struct dpaa_sec_job *
+build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct rte_mbuf *mbuf = sym->m_src;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ phys_addr_t start_addr;
+ uint8_t *old_digest, extra_segs;
+
+ if (is_decode(ses))
+ extra_segs = 3;
+ else
+ extra_segs = 2;
+
+ if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+ old_digest = ctx->digest;
+
+ /* output */
+ out_sg = &cf->sg[0];
+ qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
+ out_sg->length = ses->digest_length;
+ cpu_to_hw_sg(out_sg);
+
+ /* input */
+ in_sg = &cf->sg[1];
+ /* need to extend the input to a compound frame */
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ in_sg->length = sym->auth.data.length;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
+
+ /* 1st seg */
+ sg = in_sg + 1;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->auth.data.offset;
+ sg->offset = sym->auth.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ if (is_decode(ses)) {
+ /* Digest verification case */
+ cpu_to_hw_sg(sg);
+ sg++;
+ rte_memcpy(old_digest, sym->auth.digest.data,
+ ses->digest_length);
+ start_addr = dpaa_mem_vtop(old_digest);
+ qm_sg_entry_set64(sg, start_addr);
+ sg->length = ses->digest_length;
+ in_sg->length += ses->digest_length;
+ } else {
+ /* Digest calculation case */
+ sg->length -= ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ cpu_to_hw_sg(in_sg);
+
+ return cf;
+}
+
+/**
+ * packet looks like:
+ * |<----data_len------->|
+ * |ip_header|ah_header|icv|payload|
+ * ^
+ * |
+ * mbuf->pkt.data
+ */
+static inline struct dpaa_sec_job *
+build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct rte_mbuf *mbuf = sym->m_src;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ rte_iova_t start_addr;
+ uint8_t *old_digest;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+ old_digest = ctx->digest;
+
+ start_addr = rte_pktmbuf_iova(mbuf);
+ /* output */
+ sg = &cf->sg[0];
+ qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+ sg->length = ses->digest_length;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ sg = &cf->sg[1];
+ if (is_decode(ses)) {
+ /* need to extend the input to a compound frame */
+ sg->extension = 1;
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ sg->length = sym->auth.data.length + ses->digest_length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ sg = &cf->sg[2];
+ /* hash result or digest, save digest first */
+ rte_memcpy(old_digest, sym->auth.digest.data,
+ ses->digest_length);
+ qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ cpu_to_hw_sg(sg);
+
+ /* let's check digest by hw */
+ start_addr = dpaa_mem_vtop(old_digest);
+ sg++;
+ qm_sg_entry_set64(sg, start_addr);
+ sg->length = ses->digest_length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ } else {
+ qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ }
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 3;
+ }
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ out_sg->length = sym->cipher.data.length;
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
+ cpu_to_hw_sg(out_sg);
+
+ /* 1st seg */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->cipher.data.offset;
+ sg->offset = sym->cipher.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ in_sg->length = sym->cipher.data.length + ses->iv.length;
+
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(in_sg);
+
+ /* IV */
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* 1st seg */
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->cipher.data.offset;
+ sg->offset = sym->cipher.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
+
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
+
+ /* output */
+ sg = &cf->sg[0];
+ qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
+ sg->length = sym->cipher.data.length + ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ sg = &cf->sg[1];
+
+ /* need to extend the input to a compound frame */
+ sg->extension = 1;
+ sg->final = 1;
+ sg->length = sym->cipher.data.length + ses->iv.length;
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ cpu_to_hw_sg(sg);
+
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
+ sg->length = sym->cipher.data.length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 4;
+ }
+
+ if (ses->auth_only_len)
+ req_segs++;
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ rte_prefetch0(cf->sg);
+
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ if (is_encode(ses))
+ out_sg->length = sym->aead.data.length + ses->auth_only_len
+ + ses->digest_length;
+ else
+ out_sg->length = sym->aead.data.length + ses->auth_only_len;
+
+ /* output sg entries */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(out_sg);
+
+ /* 1st seg */
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->aead.data.offset +
+ ses->auth_only_len;
+ sg->offset = sym->aead.data.offset - ses->auth_only_len;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->length -= ses->digest_length;
+
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ if (is_encode(ses))
+ in_sg->length = ses->iv.length + sym->aead.data.length
+ + ses->auth_only_len;
+ else
+ in_sg->length = ses->iv.length + sym->aead.data.length
+ + ses->auth_only_len + ses->digest_length;
+
+ /* input sg entries */
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(in_sg);
+
+ /* 1st seg IV */
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* 2nd seg auth only */
+ if (ses->auth_only_len) {
+ sg++;
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
+ sg->length = ses->auth_only_len;
+ cpu_to_hw_sg(sg);
+ }
+
+ /* 3rd seg */
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->aead.data.offset;
+ sg->offset = sym->aead.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ if (is_decode(ses)) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ memcpy(ctx->digest, sym->aead.digest.data,
+ ses->digest_length);
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ uint32_t length = 0;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
+
+ if (sym->m_dst)
+ dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
+ else
+ dst_start_addr = src_start_addr;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ /* input */
+ rte_prefetch0(cf->sg);
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ if (is_encode(ses)) {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ if (ses->auth_only_len) {
+ qm_sg_entry_set64(sg,
+ dpaa_mem_vtop(sym->aead.aad.data));
+ sg->length = ses->auth_only_len;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+ sg++;
+ }
+ qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
+ sg->length = sym->aead.data.length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ } else {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ if (ses->auth_only_len) {
+ qm_sg_entry_set64(sg,
+ dpaa_mem_vtop(sym->aead.aad.data));
+ sg->length = ses->auth_only_len;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+ sg++;
+ }
+ qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
+ sg->length = sym->aead.data.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ memcpy(ctx->digest, sym->aead.digest.data,
+ ses->digest_length);
+ sg++;
+
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ }
+ /* input compound frame */
+ cf->sg[1].length = length;
+ cf->sg[1].extension = 1;
+ cf->sg[1].final = 1;
+ cpu_to_hw_sg(&cf->sg[1]);
+
+ /* output */
+ sg++;
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(sg,
+ dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
+ sg->length = sym->aead.data.length + ses->auth_only_len;
+ length = sg->length;
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
+ sg->length = ses->digest_length;
+ length += sg->length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* output compound frame */
+ cf->sg[0].length = length;
+ cf->sg[0].extension = 1;
+ cpu_to_hw_sg(&cf->sg[0]);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 4;
+ }
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ rte_prefetch0(cf->sg);
+
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ if (is_encode(ses))
+ out_sg->length = sym->auth.data.length + ses->digest_length;
+ else
+ out_sg->length = sym->auth.data.length;
+
+ /* output sg entries */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(out_sg);
+
+ /* 1st seg */
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->auth.data.offset;
+ sg->offset = sym->auth.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->length -= ses->digest_length;
+
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ if (is_encode(ses))
+ in_sg->length = ses->iv.length + sym->auth.data.length;
+ else
+ in_sg->length = ses->iv.length + sym->auth.data.length
+ + ses->digest_length;
+
+ /* input sg entries */
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(in_sg);
+
+ /* 1st seg IV */
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* 2nd seg */
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->auth.data.offset;
+ sg->offset = sym->auth.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ sg->length -= ses->digest_length;
+ if (is_decode(ses)) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint32_t length = 0;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
+ if (sym->m_dst)
+ dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
+ else
+ dst_start_addr = src_start_addr;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ /* input */
+ rte_prefetch0(cf->sg);
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ if (is_encode(ses)) {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ } else {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+
+ qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ sg++;
+
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ }
+ /* input compound frame */
+ cf->sg[1].length = length;
+ cf->sg[1].extension = 1;
+ cf->sg[1].final = 1;
+ cpu_to_hw_sg(&cf->sg[1]);
+
+ /* output */
+ sg++;
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
+ sg->length = sym->cipher.data.length;
+ length = sg->length;
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+ sg->length = ses->digest_length;
+ length += sg->length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* output compound frame */
+ cf->sg[0].length = length;
+ cf->sg[0].extension = 1;
+ cpu_to_hw_sg(&cf->sg[0]);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ phys_addr_t src_start_addr, dst_start_addr;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+ cf = &ctx->job;
+ ctx->op = op;
+
+ src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
+
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
+
+ /* input */
+ sg = &cf->sg[1];
+ qm_sg_entry_set64(sg, src_start_addr);
+ sg->length = sym->m_src->pkt_len;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
+ /* output */
+ sg = &cf->sg[0];
+ qm_sg_entry_set64(sg, dst_start_addr);
+ sg->length = sym->m_src->buf_len - sym->m_src->data_off;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static uint16_t
+dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function to transmit the frames to given device and queuepair */
+ uint32_t loop;
+ struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
+ uint16_t num_tx = 0;
+ struct qm_fd fds[DPAA_SEC_BURST], *fd;
+ uint32_t frames_to_send;
+ struct rte_crypto_op *op;
+ struct dpaa_sec_job *cf;
+ dpaa_sec_session *ses;
+ uint32_t auth_only_len;
+ struct qman_fq *inq[DPAA_SEC_BURST];
+
+ while (nb_ops) {
+ frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
+ DPAA_SEC_BURST : nb_ops;
+ for (loop = 0; loop < frames_to_send; loop++) {
+ op = *(ops++);
+ switch (op->sess_type) {
+ case RTE_CRYPTO_OP_WITH_SESSION:
+ ses = (dpaa_sec_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ break;
+ case RTE_CRYPTO_OP_SECURITY_SESSION:
+ ses = (dpaa_sec_session *)
+ get_sec_session_private_data(
+ op->sym->sec_session);
+ break;
+ default:
+ DPAA_SEC_DP_ERR(
+ "sessionless crypto op not supported");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ if (unlikely(!ses->qp || ses->qp != qp)) {
+ DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
+ ses->qp, qp);
+ if (dpaa_sec_attach_sess_q(qp, ses)) {
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ }
+
+ auth_only_len = op->sym->auth.data.length -
+ op->sym->cipher.data.length;
+ if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ if (is_auth_only(ses)) {
+ cf = build_auth_only(op, ses);
+ } else if (is_cipher_only(ses)) {
+ cf = build_cipher_only(op, ses);
+ } else if (is_aead(ses)) {
+ cf = build_cipher_auth_gcm(op, ses);
+ auth_only_len = ses->auth_only_len;
+ } else if (is_auth_cipher(ses)) {
+ cf = build_cipher_auth(op, ses);
+ } else if (is_proto_ipsec(ses)) {
+ cf = build_proto(op, ses);
+ } else {
+ DPAA_SEC_DP_ERR("not supported ops");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ } else {
+ if (is_auth_only(ses)) {
+ cf = build_auth_only_sg(op, ses);
+ } else if (is_cipher_only(ses)) {
+ cf = build_cipher_only_sg(op, ses);
+ } else if (is_aead(ses)) {
+ cf = build_cipher_auth_gcm_sg(op, ses);
+ auth_only_len = ses->auth_only_len;
+ } else if (is_auth_cipher(ses)) {
+ cf = build_cipher_auth_sg(op, ses);
+ } else {
+ DPAA_SEC_DP_ERR("not supported ops");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ }
+ if (unlikely(!cf)) {
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+
+ fd = &fds[loop];
+ inq[loop] = ses->inq;
+ fd->opaque_addr = 0;
+ fd->cmd = 0;
+ qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
+ fd->_format1 = qm_fd_compound;
+ fd->length29 = 2 * sizeof(struct qm_sg_entry);
+ /* Auth_only_len is set as 0 in descriptor and it is
+ * overwritten here in the fd.cmd which will update
+ * the DPOVRD reg.
+ */
+ if (auth_only_len)
+ fd->cmd = 0x80000000 | auth_only_len;
+
+ }
+send_pkts:
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+ frames_to_send - loop);
+ }
+ nb_ops -= frames_to_send;
+ num_tx += frames_to_send;
+ }
+
+ dpaa_qp->tx_pkts += num_tx;
+ dpaa_qp->tx_errs += nb_ops - num_tx;
+
+ return num_tx;
+}
+
+static uint16_t
+dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t num_rx;
+ struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
+
+ num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
+
+ dpaa_qp->rx_pkts += num_rx;
+ dpaa_qp->rx_errs += nb_ops - num_rx;
+
+ DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+
+ return num_rx;
+}
+
+/** Release queue pair */
+static int
+dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
+ uint16_t qp_id)
+{
+ struct dpaa_sec_dev_private *internals;
+ struct dpaa_sec_qp *qp = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+
+ DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
+
+ internals = dev->data->dev_private;
+ if (qp_id >= internals->max_nb_queue_pairs) {
+ DPAA_SEC_ERR("Max supported qpid %d",
+ internals->max_nb_queue_pairs);
+ return -EINVAL;
+ }
+
+ qp = &internals->qps[qp_id];
+ qp->internals = NULL;
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ __rte_unused int socket_id,
+ __rte_unused struct rte_mempool *session_pool)
+{
+ struct dpaa_sec_dev_private *internals;
+ struct dpaa_sec_qp *qp = NULL;
+
+ DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
+
+ internals = dev->data->dev_private;
+ if (qp_id >= internals->max_nb_queue_pairs) {
+ DPAA_SEC_ERR("Max supported qpid %d",
+ internals->max_nb_queue_pairs);
+ return -EINVAL;
+ }
+
+ qp = &internals->qps[qp_id];
+ qp->internals = internals;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ return 0;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of session structure */
+static unsigned int
+dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return sizeof(dpaa_sec_session);
+}
+
+static int
+dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ dpaa_sec_session *session)
+{
+ session->cipher_alg = xform->cipher.algo;
+ session->iv.length = xform->cipher.iv.length;
+ session->iv.offset = xform->cipher.iv.offset;
+ session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
+ DPAA_SEC_ERR("No Memory for cipher key");
+ return -ENOMEM;
+ }
+ session->cipher_key.length = xform->cipher.key.length;
+
+ memcpy(session->cipher_key.data, xform->cipher.key.data,
+ xform->cipher.key.length);
+ session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ dpaa_sec_session *session)
+{
+ session->auth_alg = xform->auth.algo;
+ session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
+ DPAA_SEC_ERR("No Memory for auth key");
+ return -ENOMEM;
+ }
+ session->auth_key.length = xform->auth.key.length;
+ session->digest_length = xform->auth.digest_length;
+
+ memcpy(session->auth_key.data, xform->auth.key.data,
+ xform->auth.key.length);
+ session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ dpaa_sec_session *session)
+{
+ session->aead_alg = xform->aead.algo;
+ session->iv.length = xform->aead.iv.length;
+ session->iv.offset = xform->aead.iv.offset;
+ session->auth_only_len = xform->aead.aad_length;
+ session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
+ DPAA_SEC_ERR("No Memory for aead key\n");
+ return -ENOMEM;
+ }
+ session->aead_key.length = xform->aead.key.length;
+ session->digest_length = xform->aead.digest_length;
+
+ memcpy(session->aead_key.data, xform->aead.key.data,
+ xform->aead.key.length);
+ session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static struct qman_fq *
+dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
+{
+ unsigned int i;
+
+ for (i = 0; i < qi->max_nb_sessions; i++) {
+ if (qi->inq_attach[i] == 0) {
+ qi->inq_attach[i] = 1;
+ return &qi->inq[i];
+ }
+ }
+ DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
+
+ return NULL;
+}
+
+static int
+dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
+{
+ unsigned int i;
+
+ for (i = 0; i < qi->max_nb_sessions; i++) {
+ if (&qi->inq[i] == fq) {
+ qman_retire_fq(fq, NULL);
+ qman_oos_fq(fq);
+ qi->inq_attach[i] = 0;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
+{
+ int ret;
+
+ sess->qp = qp;
+ ret = dpaa_sec_prep_cdb(sess);
+ if (ret) {
+ DPAA_SEC_ERR("Unable to prepare sec cdb");
+ return -1;
+ }
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_SEC_ERR("Failure in affining portal");
+ return ret;
+ }
+ }
+ ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
+ qman_fq_fqid(&qp->outq));
+ if (ret)
+ DPAA_SEC_ERR("Unable to init sec queue");
+
+ return ret;
+}
+
+static int
+dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+ dpaa_sec_session *session = sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(sess == NULL)) {
+ DPAA_SEC_ERR("invalid session struct");
+ return -EINVAL;
+ }
+
+ /* Default IV length = 0 */
+ session->iv.length = 0;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ dpaa_sec_cipher_init(dev, xform, session);
+
+ /* Authentication Only */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ dpaa_sec_auth_init(dev, xform, session);
+
+ /* Cipher then Authenticate */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ dpaa_sec_cipher_init(dev, xform, session);
+ dpaa_sec_auth_init(dev, xform->next, session);
+ } else {
+ DPAA_SEC_ERR("Not supported: Auth then Cipher");
+ return -EINVAL;
+ }
+
+ /* Authenticate then Cipher */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ dpaa_sec_auth_init(dev, xform, session);
+ dpaa_sec_cipher_init(dev, xform->next, session);
+ } else {
+ DPAA_SEC_ERR("Not supported: Auth then Cipher");
+ return -EINVAL;
+ }
+
+ /* AEAD operation for AES-GCM kind of Algorithms */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ xform->next == NULL) {
+ dpaa_sec_aead_init(dev, xform, session);
+
+ } else {
+ DPAA_SEC_ERR("Invalid crypto type");
+ return -EINVAL;
+ }
+ session->ctx_pool = internals->ctx_pool;
+ session->inq = dpaa_sec_attach_rxq(internals);
+ if (session->inq == NULL) {
+ DPAA_SEC_ERR("unable to attach sec queue");
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ rte_free(session->cipher_key.data);
+ rte_free(session->auth_key.data);
+ memset(session, 0, sizeof(dpaa_sec_session));
+
+ return -EINVAL;
+}
+
+static int
+dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ DPAA_SEC_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ DPAA_SEC_ERR("failed to configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct dpaa_sec_dev_private *qi = dev->data->dev_private;
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ PMD_INIT_FUNC_TRACE();
+
+ dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ if (s->inq)
+ dpaa_sec_detach_rxq(qi, s->inq);
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(s, 0, sizeof(dpaa_sec_session));
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_auth_xform *auth_xform;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ dpaa_sec_session *session = (dpaa_sec_session *)sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ auth_xform = &conf->crypto_xform->next->auth;
+ } else {
+ auth_xform = &conf->crypto_xform->auth;
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ }
+ session->proto_alg = conf->protocol;
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for cipher key");
+ return -ENOMEM;
+ }
+
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
+ goto out;
+ default:
+ DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
+ goto out;
+ }
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto out;
+ default:
+ DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
+ goto out;
+ }
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
+ sizeof(session->ip4_hdr));
+ session->ip4_hdr.ip_v = IPVERSION;
+ session->ip4_hdr.ip_hl = 5;
+ session->ip4_hdr.ip_len = rte_cpu_to_be_16(
+ sizeof(session->ip4_hdr));
+ session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+ session->ip4_hdr.ip_id = 0;
+ session->ip4_hdr.ip_off = 0;
+ session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+ session->ip4_hdr.ip_p = (ipsec_xform->proto ==
+ RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
+ : IPPROTO_AH;
+ session->ip4_hdr.ip_sum = 0;
+ session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+ session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+ session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
+ (void *)&session->ip4_hdr,
+ sizeof(struct ip));
+
+ session->encap_pdb.options =
+ (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+ PDBOPTS_ESP_OIHI_PDB_INL |
+ PDBOPTS_ESP_IVSRC |
+ PDBHMO_ESP_ENCAP_DTTL;
+ session->encap_pdb.spi = ipsec_xform->spi;
+ session->encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+ session->dir = DIR_ENC;
+ } else if (ipsec_xform->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+ session->decap_pdb.options = sizeof(struct ip) << 16;
+ session->dir = DIR_DEC;
+ } else
+ goto out;
+ session->ctx_pool = internals->ctx_pool;
+ session->inq = dpaa_sec_attach_rxq(internals);
+ if (session->inq == NULL) {
+ DPAA_SEC_ERR("unable to attach sec queue");
+ goto out;
+ }
+
+
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ memset(session, 0, sizeof(dpaa_sec_session));
+ return -1;
+}
+
+static int
+dpaa_sec_security_session_create(void *dev,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ DPAA_SEC_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ switch (conf->protocol) {
+ case RTE_SECURITY_PROTOCOL_IPSEC:
+ ret = dpaa_sec_set_ipsec_session(cdev, conf,
+ sess_private_data);
+ break;
+ case RTE_SECURITY_PROTOCOL_MACSEC:
+ return -ENOTSUP;
+ default:
+ return -EINVAL;
+ }
+ if (ret != 0) {
+ DPAA_SEC_ERR("failed to configure session parameters");
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sec_session_private_data(sess, sess_private_data);
+
+ return ret;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+dpaa_sec_security_session_destroy(void *dev __rte_unused,
+ struct rte_security_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ void *sess_priv = get_sec_session_private_data(sess);
+
+ dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(dpaa_sec_session));
+ set_sec_session_private_data(sess, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+ return 0;
+}
+
+
+static int
+dpaa_sec_dev_configure(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+
+ char str[20];
+ struct dpaa_sec_dev_private *internals;
+
+ PMD_INIT_FUNC_TRACE();
+
+ internals = dev->data->dev_private;
+ sprintf(str, "ctx_pool_%d", dev->data->dev_id);
+ if (!internals->ctx_pool) {
+ internals->ctx_pool = rte_mempool_create((const char *)str,
+ CTX_POOL_NUM_BUFS,
+ CTX_POOL_BUF_SIZE,
+ CTX_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->ctx_pool) {
+ DPAA_SEC_ERR("%s create failed\n", str);
+ return -ENOMEM;
+ }
+ } else
+ DPAA_SEC_INFO("mempool already created for dev_id : %d",
+ dev->data->dev_id);
+
+ return 0;
+}
+
+static int
+dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+static void
+dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+dpaa_sec_dev_close(struct rte_cryptodev *dev)
+{
+ struct dpaa_sec_dev_private *internals;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ internals = dev->data->dev_private;
+ rte_mempool_free(internals->ctx_pool);
+ internals->ctx_pool = NULL;
+
+ return 0;
+}
+
+static void
+dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = dpaa_sec_capabilities;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->driver_id = cryptodev_driver_id;
+ }
+}
+
+static struct rte_cryptodev_ops crypto_ops = {
+ .dev_configure = dpaa_sec_dev_configure,
+ .dev_start = dpaa_sec_dev_start,
+ .dev_stop = dpaa_sec_dev_stop,
+ .dev_close = dpaa_sec_dev_close,
+ .dev_infos_get = dpaa_sec_dev_infos_get,
+ .queue_pair_setup = dpaa_sec_queue_pair_setup,
+ .queue_pair_release = dpaa_sec_queue_pair_release,
+ .queue_pair_count = dpaa_sec_queue_pair_count,
+ .sym_session_get_size = dpaa_sec_sym_session_get_size,
+ .sym_session_configure = dpaa_sec_sym_session_configure,
+ .sym_session_clear = dpaa_sec_sym_session_clear
+};
+
+static const struct rte_security_capability *
+dpaa_sec_capabilities_get(void *device __rte_unused)
+{
+ return dpaa_sec_security_cap;
+}
+
+struct rte_security_ops dpaa_sec_security_ops = {
+ .session_create = dpaa_sec_security_session_create,
+ .session_update = NULL,
+ .session_stats_get = NULL,
+ .session_destroy = dpaa_sec_security_session_destroy,
+ .set_pkt_metadata = NULL,
+ .capabilities_get = dpaa_sec_capabilities_get
+};
+
+static int
+dpaa_sec_uninit(struct rte_cryptodev *dev)
+{
+ struct dpaa_sec_dev_private *internals;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ internals = dev->data->dev_private;
+ rte_free(dev->security_ctx);
+
+ /* In case close has been called, internals->ctx_pool would be NULL */
+ rte_mempool_free(internals->ctx_pool);
+ rte_free(internals);
+
+ DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
+ dev->data->name, rte_socket_id());
+
+ return 0;
+}
+
+static int
+dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
+{
+ struct dpaa_sec_dev_private *internals;
+ struct rte_security_ctx *security_instance;
+ struct dpaa_sec_qp *qp;
+ uint32_t i, flags;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ cryptodev->driver_id = cryptodev_driver_id;
+ cryptodev->dev_ops = &crypto_ops;
+
+ cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
+ cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_SECURITY |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ internals = cryptodev->data->dev_private;
+ internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
+ internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DPAA_SEC_WARN("Device already init by primary process");
+ return 0;
+ }
+
+ /* Initialize security_ctx only for primary process*/
+ security_instance = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (security_instance == NULL)
+ return -ENOMEM;
+ security_instance->device = (void *)cryptodev;
+ security_instance->ops = &dpaa_sec_security_ops;
+ security_instance->sess_cnt = 0;
+ cryptodev->security_ctx = security_instance;
+
+ for (i = 0; i < internals->max_nb_queue_pairs; i++) {
+ /* init qman fq for queue pair */
+ qp = &internals->qps[i];
+ ret = dpaa_sec_init_tx(&qp->outq);
+ if (ret) {
+ DPAA_SEC_ERR("config tx of queue pair %d", i);
+ goto init_error;
+ }
+ }
+
+ flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
+ QMAN_FQ_FLAG_TO_DCPORTAL;
+ for (i = 0; i < internals->max_nb_sessions; i++) {
+ /* create rx qman fq for sessions*/
+ ret = qman_create_fq(0, flags, &internals->inq[i]);
+ if (unlikely(ret != 0)) {
+ DPAA_SEC_ERR("sec qman_create_fq failed");
+ goto init_error;
+ }
+ }
+
+ RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
+ return 0;
+
+init_error:
+ DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
+
+ dpaa_sec_uninit(cryptodev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
+ struct rte_dpaa_device *dpaa_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ int retval;
+
+ sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
+
+ cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
+ if (cryptodev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ cryptodev->data->dev_private = rte_zmalloc_socket(
+ "cryptodev private structure",
+ sizeof(struct dpaa_sec_dev_private),
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (cryptodev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ dpaa_dev->crypto_dev = cryptodev;
+ cryptodev->device = &dpaa_dev->device;
+ cryptodev->device->driver = &dpaa_drv->driver;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ /* if sec device version is not configured */
+ if (!rta_get_sec_era()) {
+ const struct device_node *caam_node;
+
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ const uint32_t *prop = of_get_property(caam_node,
+ "fsl,sec-era",
+ NULL);
+ if (prop) {
+ rta_set_sec_era(
+ INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
+ break;
+ }
+ }
+ }
+
+ /* Invoke PMD device initialization function */
+ retval = dpaa_sec_dev_init(cryptodev);
+ if (retval == 0)
+ return 0;
+
+ /* In case of error, cleanup is done */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ rte_cryptodev_pmd_release_device(cryptodev);
+
+ return -ENXIO;
+}
+
+static int
+cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ int ret;
+
+ cryptodev = dpaa_dev->crypto_dev;
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ ret = dpaa_sec_uninit(cryptodev);
+ if (ret)
+ return ret;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_dpaa_driver rte_dpaa_sec_driver = {
+ .drv_type = FSL_DPAA_CRYPTO,
+ .driver = {
+ .name = "DPAA SEC PMD"
+ },
+ .probe = cryptodev_dpaa_sec_probe,
+ .remove = cryptodev_dpaa_sec_remove,
+};
+
+static struct cryptodev_driver dpaa_sec_crypto_drv;
+
+RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
+ cryptodev_driver_id);
+
+RTE_INIT(dpaa_sec_init_log)
+{
+ dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
+ if (dpaa_logtype_sec >= 0)
+ rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.h b/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.h
new file mode 100644
index 00000000..ac6c00a6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -0,0 +1,452 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _DPAA_SEC_H_
+#define _DPAA_SEC_H_
+
+#define CRYPTODEV_NAME_DPAA_SEC_PMD crypto_dpaa_sec
+/**< NXP DPAA - SEC PMD device name */
+
+#define NUM_POOL_CHANNELS 4
+#define DPAA_SEC_BURST 7
+#define DPAA_SEC_ALG_UNSUPPORT (-1)
+#define TDES_CBC_IV_LEN 8
+#define AES_CBC_IV_LEN 16
+#define AES_CTR_IV_LEN 16
+#define AES_GCM_IV_LEN 12
+
+/* Minimum job descriptor consists of a oneword job descriptor HEADER and
+ * a pointer to the shared descriptor.
+ */
+#define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
+/* CTX_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define CTX_POOL_NUM_BUFS 32000
+#define CTX_POOL_BUF_SIZE sizeof(struct dpaa_sec_op_ctx)
+#define CTX_POOL_CACHE_SIZE 512
+#define RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS 2048
+
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+enum dpaa_sec_op_type {
+ DPAA_SEC_NONE, /*!< No Cipher operations*/
+ DPAA_SEC_CIPHER,/*!< CIPHER operations */
+ DPAA_SEC_AUTH, /*!< Authentication Operations */
+ DPAA_SEC_AEAD, /*!< Authenticated Encryption with associated data */
+ DPAA_SEC_IPSEC, /*!< IPSEC protocol operations*/
+ DPAA_SEC_PDCP, /*!< PDCP protocol operations*/
+ DPAA_SEC_PKC, /*!< Public Key Cryptographic Operations */
+ DPAA_SEC_MAX
+};
+
+
+#define DPAA_SEC_MAX_DESC_SIZE 64
+/* code or cmd block to caam */
+struct sec_cdb {
+ struct {
+ union {
+ uint32_t word;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint16_t rsvd63_48;
+ unsigned int rsvd47_39:9;
+ unsigned int idlen:7;
+#else
+ unsigned int idlen:7;
+ unsigned int rsvd47_39:9;
+ uint16_t rsvd63_48;
+#endif
+ } field;
+ } __packed hi;
+
+ union {
+ uint32_t word;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ unsigned int rsvd31_30:2;
+ unsigned int fsgt:1;
+ unsigned int lng:1;
+ unsigned int offset:2;
+ unsigned int abs:1;
+ unsigned int add_buf:1;
+ uint8_t pool_id;
+ uint16_t pool_buffer_size;
+#else
+ uint16_t pool_buffer_size;
+ uint8_t pool_id;
+ unsigned int add_buf:1;
+ unsigned int abs:1;
+ unsigned int offset:2;
+ unsigned int lng:1;
+ unsigned int fsgt:1;
+ unsigned int rsvd31_30:2;
+#endif
+ } field;
+ } __packed lo;
+ } __packed sh_hdr;
+
+ uint32_t sh_desc[DPAA_SEC_MAX_DESC_SIZE];
+};
+
+typedef struct dpaa_sec_session_entry {
+ uint8_t dir; /*!< Operation Direction */
+ enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+ enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+ enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
+ union {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } aead_key;
+ struct {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } auth_key;
+ };
+ };
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv; /**< Initialisation vector parameters */
+ uint16_t auth_only_len; /*!< Length of data for Auth only */
+ uint32_t digest_length;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ip ip4_hdr;
+ struct ipsec_decap_pdb decap_pdb;
+ struct dpaa_sec_qp *qp;
+ struct qman_fq *inq;
+ struct sec_cdb cdb; /**< cmd block associated with qp */
+ struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
+} dpaa_sec_session;
+
+struct dpaa_sec_qp {
+ struct dpaa_sec_dev_private *internals;
+ struct qman_fq outq;
+ int rx_pkts;
+ int rx_errs;
+ int tx_pkts;
+ int tx_errs;
+};
+
+#define RTE_DPAA_MAX_NB_SEC_QPS 8
+#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
+
+/* internal sec queue interface */
+struct dpaa_sec_dev_private {
+ void *sec_hw;
+ struct rte_mempool *ctx_pool; /* per dev mempool for dpaa_sec_op_ctx */
+ struct dpaa_sec_qp qps[RTE_DPAA_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+ struct qman_fq inq[RTE_DPAA_MAX_RX_QUEUE];
+ unsigned char inq_attach[RTE_DPAA_MAX_RX_QUEUE];
+ unsigned int max_nb_queue_pairs;
+ unsigned int max_nb_sessions;
+};
+
+#define MAX_SG_ENTRIES 16
+#define SG_CACHELINE_0 0
+#define SG_CACHELINE_1 4
+#define SG_CACHELINE_2 8
+#define SG_CACHELINE_3 12
+struct dpaa_sec_job {
+ /* sg[0] output, sg[1] input, others are possible sub frames */
+ struct qm_sg_entry sg[MAX_SG_ENTRIES];
+};
+
+#define DPAA_MAX_NB_MAX_DIGEST 32
+struct dpaa_sec_op_ctx {
+ struct dpaa_sec_job job;
+ struct rte_crypto_op *op;
+ struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
+ uint32_t fd_status;
+ int64_t vtop_offset;
+ uint8_t digest[DPAA_MAX_NB_MAX_DIGEST];
+};
+
+static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 16,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 28,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 48,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 240,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability dpaa_sec_security_cap[] = {
+ { /* IPsec Lookaside Protocol offload ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa_sec_capabilities
+ },
+ { /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa_sec_capabilities
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+ uint16_t *buf = (uint16_t *)buffer;
+ uint32_t sum = 0;
+ uint16_t result;
+
+ for (sum = 0; len > 1; len -= 2)
+ sum += *buf++;
+
+ if (len == 1)
+ sum += *(unsigned char *)buf;
+
+ sum = (sum >> 16) + (sum & 0xFFFF);
+ sum += (sum >> 16);
+ result = ~sum;
+
+ return result;
+}
+
+#endif /* _DPAA_SEC_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_log.h b/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_log.h
new file mode 100644
index 00000000..fb895a8b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_log.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#ifndef _DPAA_SEC_LOG_H_
+#define _DPAA_SEC_LOG_H_
+
+extern int dpaa_logtype_sec;
+
+#define DPAA_SEC_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_sec, "dpaa_sec: " \
+ fmt "\n", ##args)
+
+#define DPAA_SEC_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa_logtype_sec, "dpaa_sec: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA_SEC_DEBUG(" >>")
+
+#define DPAA_SEC_INFO(fmt, args...) \
+ DPAA_SEC_LOG(INFO, fmt, ## args)
+#define DPAA_SEC_ERR(fmt, args...) \
+ DPAA_SEC_LOG(ERR, fmt, ## args)
+#define DPAA_SEC_WARN(fmt, args...) \
+ DPAA_SEC_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA_SEC_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA_SEC_DP_DEBUG(fmt, args...) \
+ DPAA_SEC_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA_SEC_DP_INFO(fmt, args...) \
+ DPAA_SEC_DP_LOG(INFO, fmt, ## args)
+#define DPAA_SEC_DP_WARN(fmt, args...) \
+ DPAA_SEC_DP_LOG(WARNING, fmt, ## args)
+#define DPAA_SEC_DP_ERR(fmt, args...) \
+ DPAA_SEC_DP_LOG(ERR, fmt, ## args)
+
+#endif /* _DPAA_SEC_LOG_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa_sec/meson.build b/src/spdk/dpdk/drivers/crypto/dpaa_sec/meson.build
new file mode 100644
index 00000000..8a570984
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa_sec/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_dpaa', 'security']
+sources = files('dpaa_sec.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('../dpaa2_sec/')
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map b/src/spdk/dpdk/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map
new file mode 100644
index 00000000..a70bd197
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map
@@ -0,0 +1,4 @@
+DPDK_17.11 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/kasumi/Makefile b/src/spdk/dpdk/drivers/crypto/kasumi/Makefile
new file mode 100644
index 00000000..cafe9498
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/kasumi/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_KASUMI_PATH),)
+$(error "Please define LIBSSO_KASUMI_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_kasumi.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_kasumi_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBSSO_KASUMI_PATH)
+CFLAGS += -I$(LIBSSO_KASUMI_PATH)/include
+CFLAGS += -I$(LIBSSO_KASUMI_PATH)/build
+LDLIBS += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd.c b/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd.c
new file mode 100644
index 00000000..239a1cf4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_kasumi_pmd_private.h"
+
+#define KASUMI_KEY_LENGTH 16
+#define KASUMI_IV_LENGTH 8
+#define KASUMI_MAX_BURST 4
+#define BYTE_LEN 8
+
+static uint8_t cryptodev_driver_id;
+
+/** Get xform chain order. */
+static enum kasumi_operation
+kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return KASUMI_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return KASUMI_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return KASUMI_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return KASUMI_OP_AUTH_CIPHER;
+ else
+ return KASUMI_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return KASUMI_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return KASUMI_OP_CIPHER_AUTH;
+ else
+ return KASUMI_OP_NOT_SUPPORTED;
+ }
+
+ return KASUMI_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+kasumi_set_session_parameters(struct kasumi_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ enum kasumi_operation mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = kasumi_get_mode(xform);
+
+ switch (mode) {
+ case KASUMI_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+ /* Fall-through */
+ case KASUMI_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case KASUMI_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case KASUMI_OP_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case KASUMI_OP_NOT_SUPPORTED:
+ default:
+ KASUMI_LOG(ERR, "Unsupported operation chain order parameter");
+ return -ENOTSUP;
+ }
+
+ if (cipher_xform) {
+ /* Only KASUMI F8 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
+ KASUMI_LOG(ERR, "Unsupported cipher algorithm ");
+ return -ENOTSUP;
+ }
+
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+ if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
+ KASUMI_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+
+ /* Initialize key */
+ sso_kasumi_init_f8_key_sched(cipher_xform->cipher.key.data,
+ &sess->pKeySched_cipher);
+ }
+
+ if (auth_xform) {
+ /* Only KASUMI F9 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
+ KASUMI_LOG(ERR, "Unsupported authentication");
+ return -ENOTSUP;
+ }
+
+ if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
+ KASUMI_LOG(ERR, "Wrong digest length");
+ return -EINVAL;
+ }
+
+ sess->auth_op = auth_xform->auth.op;
+
+ /* Initialize key */
+ sso_kasumi_init_f9_key_sched(auth_xform->auth.key.data,
+ &sess->pKeySched_hash);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get KASUMI session. */
+static struct kasumi_session *
+kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
+{
+ struct kasumi_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct kasumi_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct kasumi_session *)_sess_private_data;
+
+ if (unlikely(kasumi_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_kasumi_cipher_op(struct rte_crypto_op **ops,
+ struct kasumi_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[num_ops], *dst[num_ops];
+ uint8_t *iv_ptr;
+ uint64_t iv[num_ops];
+ uint32_t num_bytes[num_ops];
+
+ for (i = 0; i < num_ops; i++) {
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->cipher_iv_offset);
+ iv[i] = *((uint64_t *)(iv_ptr));
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ processed_ops++;
+ }
+
+ if (processed_ops != 0)
+ sso_kasumi_f8_n_buffer(&session->pKeySched_cipher, iv,
+ src, dst, num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Encrypt/decrypt mbuf (bit level function). */
+static uint8_t
+process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
+ struct kasumi_session *session)
+{
+ uint8_t *src, *dst;
+ uint8_t *iv_ptr;
+ uint64_t iv;
+ uint32_t length_in_bits, offset_in_bits;
+
+ offset_in_bits = op->sym->cipher.data.offset;
+ src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+ if (op->sym->m_dst == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG(ERR, "bit-level in-place not supported");
+ return 0;
+ }
+ dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->cipher_iv_offset);
+ iv = *((uint64_t *)(iv_ptr));
+ length_in_bits = op->sym->cipher.data.length;
+
+ sso_kasumi_f8_1_buffer_bit(&session->pKeySched_cipher, iv,
+ src, dst, length_in_bits, offset_in_bits);
+
+ return 1;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_kasumi_hash_op(struct kasumi_qp *qp, struct rte_crypto_op **ops,
+ struct kasumi_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src, *dst;
+ uint32_t length_in_bits;
+ uint32_t num_bytes;
+
+ for (i = 0; i < num_ops; i++) {
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG(ERR, "Invalid Offset");
+ break;
+ }
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+ /* Direction from next bit after end of message */
+ num_bytes = length_in_bits >> 3;
+
+ if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = qp->temp_digest;
+ sso_kasumi_f9_1_buffer(&session->pKeySched_hash, src,
+ num_bytes, dst);
+
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ KASUMI_DIGEST_LENGTH) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ dst = ops[i]->sym->auth.digest.data;
+
+ sso_kasumi_f9_1_buffer(&session->pKeySched_hash, src,
+ num_bytes, dst);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
+ struct kasumi_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned i;
+ unsigned enqueued_ops, processed_ops;
+
+ switch (session->op) {
+ case KASUMI_OP_ONLY_CIPHER:
+ processed_ops = process_kasumi_cipher_op(ops,
+ session, num_ops);
+ break;
+ case KASUMI_OP_ONLY_AUTH:
+ processed_ops = process_kasumi_hash_op(qp, ops, session,
+ num_ops);
+ break;
+ case KASUMI_OP_CIPHER_AUTH:
+ processed_ops = process_kasumi_cipher_op(ops, session,
+ num_ops);
+ process_kasumi_hash_op(qp, ops, session, processed_ops);
+ break;
+ case KASUMI_OP_AUTH_CIPHER:
+ processed_ops = process_kasumi_hash_op(qp, ops, session,
+ num_ops);
+ process_kasumi_cipher_op(ops, session, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(session, 0, sizeof(struct kasumi_session));
+ memset(ops[i]->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, session);
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops, NULL);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+/** Process a crypto op with length/offset in bits. */
+static int
+process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
+ struct kasumi_qp *qp, uint16_t *accumulated_enqueued_ops)
+{
+ unsigned enqueued_op, processed_op;
+
+ switch (session->op) {
+ case KASUMI_OP_ONLY_CIPHER:
+ processed_op = process_kasumi_cipher_op_bit(op,
+ session);
+ break;
+ case KASUMI_OP_ONLY_AUTH:
+ processed_op = process_kasumi_hash_op(qp, &op, session, 1);
+ break;
+ case KASUMI_OP_CIPHER_AUTH:
+ processed_op = process_kasumi_cipher_op_bit(op, session);
+ if (processed_op == 1)
+ process_kasumi_hash_op(qp, &op, session, 1);
+ break;
+ case KASUMI_OP_AUTH_CIPHER:
+ processed_op = process_kasumi_hash_op(qp, &op, session, 1);
+ if (processed_op == 1)
+ process_kasumi_cipher_op_bit(op, session);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_op = 0;
+ }
+
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Free session if a session-less crypto op. */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0, sizeof(struct kasumi_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op,
+ processed_op, NULL);
+ qp->qp_stats.enqueued_count += enqueued_op;
+ *accumulated_enqueued_ops += enqueued_op;
+
+ return enqueued_op;
+}
+
+static uint16_t
+kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[nb_ops];
+ struct rte_crypto_op *curr_c_op;
+
+ struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
+ struct kasumi_qp *qp = queue_pair;
+ unsigned i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
+ if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src) ||
+ (curr_c_op->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ curr_c_op->sym->m_dst))) {
+ KASUMI_LOG(ERR, "PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.", curr_c_op);
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+#endif
+
+ /* Set status as enqueued (not processed yet) by default. */
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ curr_sess = kasumi_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL ||
+ curr_sess->op == KASUMI_OP_NOT_SUPPORTED)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ /* If length/offset is at bit-level, process this buffer alone. */
+ if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((ops[i]->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ /* Process the ops of the previous session. */
+ if (prev_sess != NULL) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+
+ processed_ops = process_op_bit(curr_c_op, curr_sess,
+ qp, &enqueued_ops);
+ if (processed_ops != 1)
+ break;
+
+ continue;
+ }
+
+ /* Batch ops that share the same session. */
+ if (prev_sess == NULL) {
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ } else if (curr_sess == prev_sess) {
+ c_ops[burst_size++] = curr_c_op;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == KASUMI_MAX_BURST) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+ } else {
+ /*
+ * Different session, process the ops
+ * of the previous session.
+ */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = curr_sess;
+
+ c_ops[burst_size++] = curr_c_op;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last session. */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ }
+
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+}
+
+static uint16_t
+kasumi_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct kasumi_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_kasumi_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_kasumi_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct kasumi_private *internals;
+ uint64_t cpu_flags = 0;
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ KASUMI_LOG(ERR, "failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ else
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_kasumi_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = kasumi_pmd_dequeue_burst;
+ dev->enqueue_burst = kasumi_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+init_error:
+ KASUMI_LOG(ERR, "driver %s: failed",
+ init_params->name);
+
+ cryptodev_kasumi_remove(vdev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct kasumi_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ return cryptodev_kasumi_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_kasumi_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
+ .probe = cryptodev_kasumi_probe,
+ .remove = cryptodev_kasumi_remove
+};
+
+static struct cryptodev_driver kasumi_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv,
+ cryptodev_kasumi_pmd_drv.driver, cryptodev_driver_id);
+
+RTE_INIT(kasumi_init_log)
+{
+ kasumi_logtype_driver = rte_log_register("pmd.crypto.kasumi");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
new file mode 100644
index 00000000..9e4bf1b5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_kasumi_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = {
+ { /* KASUMI (F9) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F8) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+kasumi_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+kasumi_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+kasumi_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+kasumi_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+kasumi_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+kasumi_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+kasumi_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct kasumi_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = kasumi_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+kasumi_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp != NULL) {
+ rte_ring_free(qp->processed_ops);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct kasumi_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "kasumi_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) == ring_size) {
+ KASUMI_LOG(INFO, "Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ KASUMI_LOG(ERR, "Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct kasumi_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ kasumi_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("KASUMI PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (kasumi_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = kasumi_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+kasumi_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the KASUMI session structure */
+static unsigned
+kasumi_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct kasumi_session);
+}
+
+/** Configure a KASUMI session from a crypto xform chain */
+static int
+kasumi_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ KASUMI_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ KASUMI_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = kasumi_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ KASUMI_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+kasumi_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct kasumi_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops kasumi_pmd_ops = {
+ .dev_configure = kasumi_pmd_config,
+ .dev_start = kasumi_pmd_start,
+ .dev_stop = kasumi_pmd_stop,
+ .dev_close = kasumi_pmd_close,
+
+ .stats_get = kasumi_pmd_stats_get,
+ .stats_reset = kasumi_pmd_stats_reset,
+
+ .dev_infos_get = kasumi_pmd_info_get,
+
+ .queue_pair_setup = kasumi_pmd_qp_setup,
+ .queue_pair_release = kasumi_pmd_qp_release,
+ .queue_pair_count = kasumi_pmd_qp_count,
+
+ .sym_session_get_size = kasumi_pmd_sym_session_get_size,
+ .sym_session_configure = kasumi_pmd_sym_session_configure,
+ .sym_session_clear = kasumi_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
new file mode 100644
index 00000000..488777ca
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#ifndef _RTE_KASUMI_PMD_PRIVATE_H_
+#define _RTE_KASUMI_PMD_PRIVATE_H_
+
+#include <sso_kasumi.h>
+
+#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
+/**< KASUMI PMD device name */
+
+/** KASUMI PMD LOGTYPE DRIVER */
+int kasumi_logtype_driver;
+
+#define KASUMI_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, kasumi_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+#define KASUMI_DIGEST_LENGTH 4
+
+/** private data structure for each virtual KASUMI device */
+struct kasumi_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+/** KASUMI buffer queue pair */
+struct kasumi_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+enum kasumi_operation {
+ KASUMI_OP_ONLY_CIPHER,
+ KASUMI_OP_ONLY_AUTH,
+ KASUMI_OP_CIPHER_AUTH,
+ KASUMI_OP_AUTH_CIPHER,
+ KASUMI_OP_NOT_SUPPORTED
+};
+
+/** KASUMI private session structure */
+struct kasumi_session {
+ /* Keys have to be 16-byte aligned */
+ sso_kasumi_key_sched_t pKeySched_cipher;
+ sso_kasumi_key_sched_t pKeySched_hash;
+ enum kasumi_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ uint16_t cipher_iv_offset;
+} __rte_cache_aligned;
+
+
+int
+kasumi_set_session_parameters(struct kasumi_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+struct rte_cryptodev_ops *rte_kasumi_pmd_ops;
+
+#endif /* _RTE_KASUMI_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/kasumi/rte_pmd_kasumi_version.map b/src/spdk/dpdk/drivers/crypto/kasumi/rte_pmd_kasumi_version.map
new file mode 100644
index 00000000..8ffeca93
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/kasumi/rte_pmd_kasumi_version.map
@@ -0,0 +1,3 @@
+DPDK_16.07 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/meson.build b/src/spdk/dpdk/drivers/crypto/meson.build
new file mode 100644
index 00000000..d64ca418
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['ccp', 'dpaa_sec', 'dpaa2_sec', 'mvsam',
+ 'null', 'openssl', 'qat', 'virtio']
+
+std_deps = ['cryptodev'] # cryptodev pulls in all other needed deps
+config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/Makefile b/src/spdk/dpdk/drivers/crypto/mvsam/Makefile
new file mode 100644
index 00000000..c3dc72c1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Marvell International Ltd.
+# Copyright(c) 2017 Semihalf.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mvsam_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_mvsam_version.map
+
+# external library dependencies
+LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/meson.build b/src/spdk/dpdk/drivers/crypto/mvsam/meson.build
new file mode 100644
index 00000000..3c8ea3cf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+
+path = get_option('lib_musdk_dir')
+lib_dir = path + '/lib'
+inc_dir = path + '/include'
+
+lib = cc.find_library('libmusdk', dirs: [lib_dir], required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+ includes += include_directories(inc_dir)
+ cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC']
+endif
+
+sources = files('rte_mrvl_pmd.c', 'rte_mrvl_pmd_ops.c')
+
+deps += ['bus_vdev']
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_compat.h b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_compat.h
new file mode 100644
index 00000000..4ab28d39
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_compat.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _RTE_MRVL_COMPAT_H_
+#define _RTE_MRVL_COMPAT_H_
+
+/* Unluckily, container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+#include "env/mv_autogen_comp_flags.h"
+#include "drivers/mv_sam.h"
+#include "drivers/mv_sam_cio.h"
+#include "drivers/mv_sam_session.h"
+
+#endif /* _RTE_MRVL_COMPAT_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd.c b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd.c
new file mode 100644
index 00000000..73eff757
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd.c
@@ -0,0 +1,937 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_mrvl_pmd_private.h"
+
+#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+
+#define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
+#define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
+
+static uint8_t cryptodev_driver_id;
+
+struct mrvl_pmd_init_params {
+ struct rte_cryptodev_pmd_init_params common;
+ uint32_t max_nb_sessions;
+};
+
+const char *mrvl_pmd_valid_params[] = {
+ RTE_CRYPTODEV_PMD_NAME_ARG,
+ RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
+ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
+ MRVL_PMD_MAX_NB_SESS_ARG
+};
+
+/**
+ * Flag if particular crypto algorithm is supported by PMD/MUSDK.
+ *
+ * The idea is to have Not Supported value as default (0).
+ * This way we need only to define proper map sizes,
+ * non-initialized entries will be by default not supported.
+ */
+enum algo_supported {
+ ALGO_NOT_SUPPORTED = 0,
+ ALGO_SUPPORTED = 1,
+};
+
+/** Map elements for cipher mapping.*/
+struct cipher_params_mapping {
+ enum algo_supported supported; /**< On/Off switch */
+ enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */
+ enum sam_cipher_mode cipher_mode; /**< Cipher mode */
+ unsigned int max_key_len; /**< Maximum key length (in bytes)*/
+}
+/* We want to squeeze in multiple maps into the cache line. */
+__rte_aligned(32);
+
+/** Map elements for auth mapping.*/
+struct auth_params_mapping {
+ enum algo_supported supported; /**< On/off switch */
+ enum sam_auth_alg auth_alg; /**< Auth algorithm */
+}
+/* We want to squeeze in multiple maps into the cache line. */
+__rte_aligned(32);
+
+/**
+ * Map of supported cipher algorithms.
+ */
+static const
+struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
+ [RTE_CRYPTO_CIPHER_3DES_CBC] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_3DES,
+ .cipher_mode = SAM_CIPHER_CBC,
+ .max_key_len = BITS2BYTES(192) },
+ [RTE_CRYPTO_CIPHER_3DES_CTR] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_3DES,
+ .cipher_mode = SAM_CIPHER_CTR,
+ .max_key_len = BITS2BYTES(192) },
+ [RTE_CRYPTO_CIPHER_3DES_ECB] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_3DES,
+ .cipher_mode = SAM_CIPHER_ECB,
+ .max_key_len = BITS2BYTES(192) },
+ [RTE_CRYPTO_CIPHER_AES_CBC] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_CBC,
+ .max_key_len = BITS2BYTES(256) },
+ [RTE_CRYPTO_CIPHER_AES_CTR] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_CTR,
+ .max_key_len = BITS2BYTES(256) },
+};
+
+/**
+ * Map of supported auth algorithms.
+ */
+static const
+struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
+ [RTE_CRYPTO_AUTH_MD5_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_MD5 },
+ [RTE_CRYPTO_AUTH_MD5] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_MD5 },
+ [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA1 },
+ [RTE_CRYPTO_AUTH_SHA1] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA1 },
+ [RTE_CRYPTO_AUTH_SHA224] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_224 },
+ [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
+ [RTE_CRYPTO_AUTH_SHA256] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_256 },
+ [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
+ [RTE_CRYPTO_AUTH_SHA384] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_384 },
+ [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
+ [RTE_CRYPTO_AUTH_SHA512] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_512 },
+ [RTE_CRYPTO_AUTH_AES_GMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_AES_GMAC },
+};
+
+/**
+ * Map of supported aead algorithms.
+ */
+static const
+struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
+ [RTE_CRYPTO_AEAD_AES_GCM] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_GCM,
+ .max_key_len = BITS2BYTES(256) },
+};
+
+/*
+ *-----------------------------------------------------------------------------
+ * Forward declarations.
+ *-----------------------------------------------------------------------------
+ */
+static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
+
+/*
+ *-----------------------------------------------------------------------------
+ * Session Preparation.
+ *-----------------------------------------------------------------------------
+ */
+
+/**
+ * Get xform chain order.
+ *
+ * @param xform Pointer to configuration structure chain for crypto operations.
+ * @returns Order of crypto operations.
+ */
+static enum mrvl_crypto_chain_order
+mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ /* Currently, Marvell supports max 2 operations in chain */
+ if (xform->next != NULL && xform->next->next != NULL)
+ return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
+
+ if (xform->next != NULL) {
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
+ (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
+ return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
+
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+ (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
+ return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
+ } else {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ return MRVL_CRYPTO_CHAIN_COMBINED;
+ }
+ return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
+}
+
+/**
+ * Set session parameters for cipher part.
+ *
+ * @param sess Crypto session pointer.
+ * @param cipher_xform Pointer to configuration structure for cipher operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *cipher_xform)
+{
+ /* Make sure we've got proper struct */
+ if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ return -EINVAL;
+ }
+
+ /* See if map data is present and valid */
+ if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
+ (cipher_map[cipher_xform->cipher.algo].supported
+ != ALGO_SUPPORTED)) {
+ MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
+ return -EINVAL;
+ }
+
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+
+ sess->sam_sess_params.dir =
+ (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
+ sess->sam_sess_params.cipher_alg =
+ cipher_map[cipher_xform->cipher.algo].cipher_alg;
+ sess->sam_sess_params.cipher_mode =
+ cipher_map[cipher_xform->cipher.algo].cipher_mode;
+
+ /* Assume IV will be passed together with data. */
+ sess->sam_sess_params.cipher_iv = NULL;
+
+ /* Get max key length. */
+ if (cipher_xform->cipher.key.length >
+ cipher_map[cipher_xform->cipher.algo].max_key_len) {
+ MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
+ sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
+
+ return 0;
+}
+
+/**
+ * Set session parameters for authentication part.
+ *
+ * @param sess Crypto session pointer.
+ * @param auth_xform Pointer to configuration structure for auth operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *auth_xform)
+{
+ /* Make sure we've got proper struct */
+ if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+ MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ return -EINVAL;
+ }
+
+ /* See if map data is present and valid */
+ if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
+ (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
+ MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.dir =
+ (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
+ sess->sam_sess_params.auth_alg =
+ auth_map[auth_xform->auth.algo].auth_alg;
+ sess->sam_sess_params.u.basic.auth_icv_len =
+ auth_xform->auth.digest_length;
+ /* auth_key must be NULL if auth algorithm does not use HMAC */
+ sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
+ auth_xform->auth.key.data : NULL;
+ sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
+
+ return 0;
+}
+
+/**
+ * Set session parameters for aead part.
+ *
+ * @param sess Crypto session pointer.
+ * @param aead_xform Pointer to configuration structure for aead operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *aead_xform)
+{
+ /* Make sure we've got proper struct */
+ if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
+ MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ return -EINVAL;
+ }
+
+ /* See if map data is present and valid */
+ if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
+ (aead_map[aead_xform->aead.algo].supported
+ != ALGO_SUPPORTED)) {
+ MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.dir =
+ (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
+ sess->sam_sess_params.cipher_alg =
+ aead_map[aead_xform->aead.algo].cipher_alg;
+ sess->sam_sess_params.cipher_mode =
+ aead_map[aead_xform->aead.algo].cipher_mode;
+
+ /* Assume IV will be passed together with data. */
+ sess->sam_sess_params.cipher_iv = NULL;
+
+ /* Get max key length. */
+ if (aead_xform->aead.key.length >
+ aead_map[aead_xform->aead.algo].max_key_len) {
+ MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
+ sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
+
+ if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
+ sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
+
+ sess->sam_sess_params.u.basic.auth_icv_len =
+ aead_xform->aead.digest_length;
+
+ sess->sam_sess_params.u.basic.auth_aad_len =
+ aead_xform->aead.aad_length;
+
+ return 0;
+}
+
+/**
+ * Parse crypto transform chain and setup session parameters.
+ *
+ * @param dev Pointer to crypto device
+ * @param sess Poiner to crypto session
+ * @param xform Pointer to configuration structure chain for crypto operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+
+ /* Filter out spurious/broken requests */
+ if (xform == NULL)
+ return -EINVAL;
+
+ sess->chain_order = mrvl_crypto_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
+ cipher_xform = xform;
+ break;
+ case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
+ auth_xform = xform;
+ break;
+ case MRVL_CRYPTO_CHAIN_COMBINED:
+ aead_xform = xform;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((cipher_xform != NULL) &&
+ (mrvl_crypto_set_cipher_session_parameters(
+ sess, cipher_xform) < 0)) {
+ MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
+ return -EINVAL;
+ }
+
+ if ((auth_xform != NULL) &&
+ (mrvl_crypto_set_auth_session_parameters(
+ sess, auth_xform) < 0)) {
+ MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
+ return -EINVAL;
+ }
+
+ if ((aead_xform != NULL) &&
+ (mrvl_crypto_set_aead_session_parameters(
+ sess, aead_xform) < 0)) {
+ MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * Process Operations
+ *-----------------------------------------------------------------------------
+ */
+
+/**
+ * Prepare a single request.
+ *
+ * This function basically translates DPDK crypto request into one
+ * understandable by MUDSK's SAM. If this is a first request in a session,
+ * it starts the session.
+ *
+ * @param request Pointer to pre-allocated && reset request buffer [Out].
+ * @param src_bd Pointer to pre-allocated source descriptor [Out].
+ * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
+ * @param op Pointer to DPDK crypto operation struct [In].
+ */
+static inline int
+mrvl_request_prepare(struct sam_cio_op_params *request,
+ struct sam_buf_info *src_bd,
+ struct sam_buf_info *dst_bd,
+ struct rte_crypto_op *op)
+{
+ struct mrvl_crypto_session *sess;
+ struct rte_mbuf *dst_mbuf;
+ uint8_t *digest;
+
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
+ "oriented requests, op (%p) is sessionless.",
+ op);
+ return -EINVAL;
+ }
+
+ sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
+ op->sym->session, cryptodev_driver_id);
+ if (unlikely(sess == NULL)) {
+ MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
+ return -EINVAL;
+ }
+
+ /*
+ * If application delivered us null dst buffer, it means it expects
+ * us to deliver the result in src buffer.
+ */
+ dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ request->sa = sess->sam_sess;
+ request->cookie = op;
+
+ /* Single buffers only, sorry. */
+ request->num_bufs = 1;
+ request->src = src_bd;
+ src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
+ src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
+ src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
+
+ /* Empty source. */
+ if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
+ /* EIP does not support 0 length buffers. */
+ MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
+ return -1;
+ }
+
+ /* Empty destination. */
+ if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
+ /* Make dst buffer fit at least source data. */
+ if (rte_pktmbuf_append(dst_mbuf,
+ rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
+ MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
+ return -1;
+ }
+ }
+
+ request->dst = dst_bd;
+ dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
+ dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
+
+ /*
+ * We can use all available space in dst_mbuf,
+ * not only what's used currently.
+ */
+ dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
+
+ if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
+ request->cipher_len = op->sym->aead.data.length;
+ request->cipher_offset = op->sym->aead.data.offset;
+ request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->cipher_iv_offset);
+
+ request->auth_aad = op->sym->aead.aad.data;
+ request->auth_offset = request->cipher_offset;
+ request->auth_len = request->cipher_len;
+ } else {
+ request->cipher_len = op->sym->cipher.data.length;
+ request->cipher_offset = op->sym->cipher.data.offset;
+ request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->cipher_iv_offset);
+
+ request->auth_offset = op->sym->auth.data.offset;
+ request->auth_len = op->sym->auth.data.length;
+ }
+
+ digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
+ op->sym->aead.digest.data : op->sym->auth.digest.data;
+ if (digest == NULL) {
+ /* No auth - no worry. */
+ return 0;
+ }
+
+ request->auth_icv_offset = request->auth_offset + request->auth_len;
+
+ /*
+ * EIP supports only scenarios where ICV(digest buffer) is placed at
+ * auth_icv_offset. Any other placement means risking errors.
+ */
+ if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
+ /*
+ * This should be the most common case anyway,
+ * EIP will overwrite DST buffer at auth_icv_offset.
+ */
+ if (rte_pktmbuf_mtod_offset(
+ dst_mbuf, uint8_t *,
+ request->auth_icv_offset) == digest) {
+ return 0;
+ }
+ } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
+ /*
+ * EIP will look for digest at auth_icv_offset
+ * offset in SRC buffer.
+ */
+ if (rte_pktmbuf_mtod_offset(
+ op->sym->m_src, uint8_t *,
+ request->auth_icv_offset) == digest) {
+ return 0;
+ }
+ }
+
+ /*
+ * If we landed here it means that digest pointer is
+ * at different than expected place.
+ */
+ return -1;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * PMD Framework handlers
+ *-----------------------------------------------------------------------------
+ */
+
+/**
+ * Enqueue burst.
+ *
+ * @param queue_pair Pointer to queue pair.
+ * @param ops Pointer to ops requests array.
+ * @param nb_ops Number of elements in ops requests array.
+ * @returns Number of elements consumed from ops.
+ */
+static uint16_t
+mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t iter_ops = 0;
+ uint16_t to_enq = 0;
+ uint16_t consumed = 0;
+ int ret;
+ struct sam_cio_op_params requests[nb_ops];
+ /*
+ * DPDK uses single fragment buffers, so we can KISS descriptors.
+ * SAM does not store bd pointers, so on-stack scope will be enough.
+ */
+ struct sam_buf_info src_bd[nb_ops];
+ struct sam_buf_info dst_bd[nb_ops];
+ struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
+
+ if (nb_ops == 0)
+ return 0;
+
+ /* Prepare the burst. */
+ memset(&requests, 0, sizeof(requests));
+
+ /* Iterate through */
+ for (; iter_ops < nb_ops; ++iter_ops) {
+ if (mrvl_request_prepare(&requests[iter_ops],
+ &src_bd[iter_ops],
+ &dst_bd[iter_ops],
+ ops[iter_ops]) < 0) {
+ MRVL_CRYPTO_LOG_ERR(
+ "Error while parameters preparation!");
+ qp->stats.enqueue_err_count++;
+ ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ /*
+ * Number of handled ops is increased
+ * (even if the result of handling is error).
+ */
+ ++consumed;
+ break;
+ }
+
+ ops[iter_ops]->status =
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ /* Increase the number of ops to enqueue. */
+ ++to_enq;
+ } /* for (; iter_ops < nb_ops;... */
+
+ if (to_enq > 0) {
+ /* Send the burst */
+ ret = sam_cio_enq(qp->cio, requests, &to_enq);
+ consumed += to_enq;
+ if (ret < 0) {
+ /*
+ * Trust SAM that in this case returned value will be at
+ * some point correct (now it is returned unmodified).
+ */
+ qp->stats.enqueue_err_count += to_enq;
+ for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
+ ops[iter_ops]->status =
+ RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ }
+
+ qp->stats.enqueued_count += to_enq;
+ return consumed;
+}
+
+/**
+ * Dequeue burst.
+ *
+ * @param queue_pair Pointer to queue pair.
+ * @param ops Pointer to ops requests array.
+ * @param nb_ops Number of elements in ops requests array.
+ * @returns Number of elements dequeued.
+ */
+static uint16_t
+mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ int ret;
+ struct mrvl_crypto_qp *qp = queue_pair;
+ struct sam_cio *cio = qp->cio;
+ struct sam_cio_op_result results[nb_ops];
+ uint16_t i;
+
+ ret = sam_cio_deq(cio, results, &nb_ops);
+ if (ret < 0) {
+ /* Count all dequeued as error. */
+ qp->stats.dequeue_err_count += nb_ops;
+
+ /* But act as they were dequeued anyway*/
+ qp->stats.dequeued_count += nb_ops;
+
+ return 0;
+ }
+
+ /* Unpack and check results. */
+ for (i = 0; i < nb_ops; ++i) {
+ ops[i] = results[i].cookie;
+
+ switch (results[i].status) {
+ case SAM_CIO_OK:
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case SAM_CIO_ERR_ICV:
+ MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ break;
+ default:
+ MRVL_CRYPTO_LOG_DBG(
+ "CIO returned Error: %d", results[i].status);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+ }
+
+ qp->stats.dequeued_count += nb_ops;
+ return nb_ops;
+}
+
+/**
+ * Create a new crypto device.
+ *
+ * @param name Driver name.
+ * @param vdev Pointer to device structure.
+ * @param init_params Pointer to initialization parameters.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+cryptodev_mrvl_crypto_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct mrvl_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct mrvl_crypto_private *internals;
+ struct sam_init_params sam_params;
+ int ret;
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device,
+ &init_params->common);
+ if (dev == NULL) {
+ MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_mrvl_crypto_pmd_ops;
+
+ /* Register rx/tx burst functions for data path. */
+ dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
+ dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ /*
+ * ret == -EEXIST is correct, it means DMA
+ * has been already initialized.
+ */
+ ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
+ if (ret < 0) {
+ if (ret != -EEXIST)
+ return ret;
+
+ MRVL_CRYPTO_LOG_INFO(
+ "DMA memory has been already initialized by a different driver.");
+ }
+
+ sam_params.max_num_sessions = internals->max_nb_sessions;
+
+ return sam_init(&sam_params);
+
+init_error:
+ MRVL_CRYPTO_LOG_ERR(
+ "driver %s: %s failed", init_params->common.name, __func__);
+
+ cryptodev_mrvl_crypto_uninit(vdev);
+ return -EFAULT;
+}
+
+/** Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Parse name */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static int
+mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ mrvl_pmd_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ /* Common VDEV parameters */
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
+ &parse_integer_arg,
+ &params->common.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
+ &parse_integer_arg,
+ &params->common.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_NAME_ARG,
+ &parse_name_arg,
+ &params->common);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ MRVL_PMD_MAX_NB_SESS_ARG,
+ &parse_integer_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+/**
+ * Initialize the crypto device.
+ *
+ * @param vdev Pointer to device structure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
+{
+ struct mrvl_pmd_init_params init_params = {
+ .common = {
+ .name = "",
+ .private_data_size =
+ sizeof(struct mrvl_crypto_private),
+ .max_nb_queue_pairs =
+ sam_get_num_inst() * SAM_HW_RING_NUM,
+ .socket_id = rte_socket_id()
+ },
+ .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
+ };
+
+ const char *name, *args;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ args = rte_vdev_device_args(vdev);
+
+ ret = mrvl_pmd_parse_input_args(&init_params, args);
+ if (ret) {
+ RTE_LOG(ERR, PMD,
+ "Failed to parse initialisation arguments[%s]\n",
+ args);
+ return -EINVAL;
+ }
+
+ return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
+}
+
+/**
+ * Uninitialize the crypto device
+ *
+ * @param vdev Pointer to device structure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name = rte_vdev_device_name(vdev);
+
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD,
+ "Closing Marvell crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ sam_deinit();
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+/**
+ * Basic driver handlers for use in the constructor.
+ */
+static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
+ .probe = cryptodev_mrvl_crypto_init,
+ .remove = cryptodev_mrvl_crypto_uninit
+};
+
+static struct cryptodev_driver mrvl_crypto_drv;
+
+/* Register the driver in constructor. */
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
+ cryptodev_driver_id);
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
new file mode 100644
index 00000000..c045562c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
@@ -0,0 +1,722 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_mrvl_pmd_private.h"
+
+/**
+ * Capabilities list to be used in reporting to DPDK.
+ */
+static const struct rte_cryptodev_capabilities
+ mrvl_crypto_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 12,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 65532,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/**
+ * Configure device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param config Pointer to configuration structure.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/**
+ * Start device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/**
+ * Stop device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static void
+mrvl_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/**
+ * Get device statistics (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param stats Pointer to statistics structure [out].
+ */
+static void
+mrvl_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/**
+ * Reset device statistics (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ */
+static void
+mrvl_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+/**
+ * Get device info (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param dev_info Pointer to the device info structure [out].
+ */
+static void
+mrvl_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct mrvl_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = mrvl_crypto_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/**
+ * Release queue pair (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param qp_id ID of Queue Pair to release.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct mrvl_crypto_qp *qp =
+ (struct mrvl_crypto_qp *)dev->data->queue_pairs[qp_id];
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ sam_cio_flush(qp->cio);
+ sam_cio_deinit(qp->cio);
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+
+ return 0;
+}
+
+/**
+ * Close device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_close(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++)
+ mrvl_crypto_pmd_qp_release(dev, qp_id);
+
+ return 0;
+}
+
+/**
+ * Setup a queue pair (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param qp_id ID of the Queue Pair.
+ * @param qp_conf Queue pair configuration (nb of descriptors).
+ * @param socket_id NUMA socket to allocate memory on.
+ * @returns 0 upon success, negative value otherwise.
+ */
+static int
+mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct mrvl_crypto_qp *qp = NULL;
+ char match[RTE_CRYPTODEV_NAME_MAX_LEN];
+ unsigned int n;
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("MRVL Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ /* Free old qp prior setup if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ mrvl_crypto_pmd_qp_release(dev, qp_id);
+
+ do { /* Error handling block */
+
+ /*
+ * This extra check is necessary due to a bug in
+ * crypto library.
+ */
+ int num = sam_get_num_inst();
+ if (num == 0) {
+ MRVL_CRYPTO_LOG_ERR("No crypto engines detected.\n");
+ return -1;
+ }
+
+ /*
+ * In case two crypto engines are enabled qps will
+ * be evenly spread among them. Even and odd qps will
+ * be handled by cio-0 and cio-1 respectively. qp-cio mapping
+ * will look as follows:
+ *
+ * qp: 0 1 2 3
+ * cio-x:y: cio-0:0, cio-1:0, cio-0:1, cio-1:1
+ *
+ * qp: 4 5 6 7
+ * cio-x:y: cio-0:2, cio-1:2, cio-0:3, cio-1:3
+ *
+ * In case just one engine is enabled mapping will look as
+ * follows:
+ * qp: 0 1 2 3
+ * cio-x:y: cio-0:0, cio-0:1, cio-0:2, cio-0:3
+ */
+ n = snprintf(match, sizeof(match), "cio-%u:%u",
+ qp_id % num, qp_id / num);
+
+ if (n >= sizeof(match))
+ break;
+
+ qp->cio_params.match = match;
+ qp->cio_params.size = qp_conf->nb_descriptors;
+
+ if (sam_cio_init(&qp->cio_params, &qp->cio) < 0)
+ break;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ dev->data->queue_pairs[qp_id] = qp;
+ return 0;
+ } while (0);
+
+ rte_free(qp);
+ return -1;
+}
+
+/** Return the number of allocated queue pairs (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns Number of allocated queue pairs.
+ */
+static uint32_t
+mrvl_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure [Unused].
+ * @returns Size of Marvell crypto session.
+ */
+static unsigned
+mrvl_crypto_pmd_sym_session_get_size(__rte_unused struct rte_cryptodev *dev)
+{
+ return sizeof(struct mrvl_crypto_session);
+}
+
+/** Configure the session from a crypto xform chain (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param xform Pointer to the crytpo configuration structure.
+ * @param sess Pointer to the empty session structure.
+ * @returns 0 upon success, negative value otherwise.
+ */
+static int
+mrvl_crypto_pmd_sym_session_configure(__rte_unused struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mp)
+{
+ struct mrvl_crypto_session *mrvl_sess;
+ void *sess_private_data;
+ int ret;
+
+ if (sess == NULL) {
+ MRVL_CRYPTO_LOG_ERR("Invalid session struct.");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mp, &sess_private_data)) {
+ CDEV_LOG_ERR("Couldn't get object from session mempool.");
+ return -ENOMEM;
+ }
+
+ ret = mrvl_crypto_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ MRVL_CRYPTO_LOG_ERR("Failed to configure session parameters.");
+
+ /* Return session to mempool */
+ rte_mempool_put(mp, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
+
+ mrvl_sess = (struct mrvl_crypto_session *)sess_private_data;
+ if (sam_session_create(&mrvl_sess->sam_sess_params,
+ &mrvl_sess->sam_sess) < 0) {
+ MRVL_CRYPTO_LOG_DBG("Failed to create session!");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * Clear the memory of session so it doesn't leave key material behind.
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static void
+mrvl_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ struct mrvl_crypto_session *mrvl_sess =
+ (struct mrvl_crypto_session *)sess_priv;
+
+ if (mrvl_sess->sam_sess &&
+ sam_session_destroy(mrvl_sess->sam_sess) < 0) {
+ MRVL_CRYPTO_LOG_INFO("Error while destroying session!");
+ }
+
+ memset(sess, 0, sizeof(struct mrvl_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+/**
+ * PMD handlers for crypto ops.
+ */
+static struct rte_cryptodev_ops mrvl_crypto_pmd_ops = {
+ .dev_configure = mrvl_crypto_pmd_config,
+ .dev_start = mrvl_crypto_pmd_start,
+ .dev_stop = mrvl_crypto_pmd_stop,
+ .dev_close = mrvl_crypto_pmd_close,
+
+ .dev_infos_get = mrvl_crypto_pmd_info_get,
+
+ .stats_get = mrvl_crypto_pmd_stats_get,
+ .stats_reset = mrvl_crypto_pmd_stats_reset,
+
+ .queue_pair_setup = mrvl_crypto_pmd_qp_setup,
+ .queue_pair_release = mrvl_crypto_pmd_qp_release,
+ .queue_pair_count = mrvl_crypto_pmd_qp_count,
+
+ .sym_session_get_size = mrvl_crypto_pmd_sym_session_get_size,
+ .sym_session_configure = mrvl_crypto_pmd_sym_session_configure,
+ .sym_session_clear = mrvl_crypto_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops = &mrvl_crypto_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_private.h b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
new file mode 100644
index 00000000..c16d95b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _RTE_MRVL_PMD_PRIVATE_H_
+#define _RTE_MRVL_PMD_PRIVATE_H_
+
+#include "rte_mrvl_compat.h"
+
+#define CRYPTODEV_NAME_MRVL_PMD crypto_mvsam
+/**< Marvell PMD device name */
+
+#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG
+#define MRVL_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
+ __func__, __LINE__, ## args)
+
+#define MRVL_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
+ __func__, __LINE__, ## args)
+
+#else
+#define MRVL_CRYPTO_LOG_INFO(fmt, args...)
+#define MRVL_CRYPTO_LOG_DBG(fmt, args...)
+#endif
+
+/**
+ * Handy bits->bytes conversion macro.
+ */
+#define BITS2BYTES(x) ((x) >> 3)
+
+/** The operation order mode enumerator. */
+enum mrvl_crypto_chain_order {
+ MRVL_CRYPTO_CHAIN_CIPHER_ONLY,
+ MRVL_CRYPTO_CHAIN_AUTH_ONLY,
+ MRVL_CRYPTO_CHAIN_CIPHER_AUTH,
+ MRVL_CRYPTO_CHAIN_AUTH_CIPHER,
+ MRVL_CRYPTO_CHAIN_COMBINED,
+ MRVL_CRYPTO_CHAIN_NOT_SUPPORTED,
+};
+
+/** Private data structure for each crypto device. */
+struct mrvl_crypto_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned int max_nb_sessions; /**< Max number of sessions */
+};
+
+/** MRVL crypto queue pair structure. */
+struct mrvl_crypto_qp {
+ /** SAM CIO (MUSDK Queue Pair equivalent).*/
+ struct sam_cio *cio;
+
+ /** Session Mempool. */
+ struct rte_mempool *sess_mp;
+
+ /** Queue pair statistics. */
+ struct rte_cryptodev_stats stats;
+
+ /** CIO initialization parameters.*/
+ struct sam_cio_params cio_params;
+} __rte_cache_aligned;
+
+/** MRVL crypto private session structure. */
+struct mrvl_crypto_session {
+ /** Crypto operations chain order. */
+ enum mrvl_crypto_chain_order chain_order;
+
+ /** Session initialization parameters. */
+ struct sam_session_params sam_sess_params;
+
+ /** SAM session pointer. */
+ struct sam_sa *sam_sess;
+
+ /** Cipher IV offset. */
+ uint16_t cipher_iv_offset;
+} __rte_cache_aligned;
+
+/** Set and validate MRVL crypto session parameters */
+extern int
+mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops;
+
+#endif /* _RTE_MRVL_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/rte_pmd_mvsam_version.map b/src/spdk/dpdk/drivers/crypto/mvsam/rte_pmd_mvsam_version.map
new file mode 100644
index 00000000..a7530317
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/rte_pmd_mvsam_version.map
@@ -0,0 +1,3 @@
+DPDK_17.11 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/null/Makefile b/src/spdk/dpdk/drivers/crypto/null/Makefile
new file mode 100644
index 00000000..9e6400c1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/null/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+
+# library name
+LIB = librte_pmd_null_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_null_crypto_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null_crypto_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null_crypto_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/null/meson.build b/src/spdk/dpdk/drivers/crypto/null/meson.build
new file mode 100644
index 00000000..502336da
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/null/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+deps += 'bus_vdev'
+name = 'null_crypto'
+sources = files('null_crypto_pmd.c', 'null_crypto_pmd_ops.c')
diff --git a/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd.c b/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd.c
new file mode 100644
index 00000000..6e29a21a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd.c
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+
+#include "null_crypto_pmd_private.h"
+
+static uint8_t cryptodev_driver_id;
+
+/** verify and set session parameters */
+int
+null_crypto_set_session_parameters(
+ struct null_crypto_session *sess __rte_unused,
+ const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL) {
+ return -EINVAL;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ /* Authentication Only */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ /* Authentication then Cipher */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
+ xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next == NULL) {
+ /* Cipher Only */
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ /* Cipher then Authentication */
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL &&
+ xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+/** Process crypto operation for mbuf */
+static int
+process_op(const struct null_crypto_qp *qp, struct rte_crypto_op *op,
+ struct null_crypto_session *sess __rte_unused)
+{
+ /* set status as successful by default */
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Free session if a session-less crypto op. */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0,
+ sizeof(struct null_crypto_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ /*
+ * if crypto session and operation are valid just enqueue the packet
+ * in the processed ring
+ */
+ return rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+static struct null_crypto_session *
+get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op)
+{
+ struct null_crypto_session *sess = NULL;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(sym_op->session != NULL))
+ sess = (struct null_crypto_session *)
+ get_sym_session_private_data(
+ sym_op->session, cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct null_crypto_session *)_sess_private_data;
+
+ if (unlikely(null_crypto_set_session_parameters(sess,
+ sym_op->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ return sess;
+}
+
+/** Enqueue burst */
+static uint16_t
+null_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct null_crypto_session *sess;
+ struct null_crypto_qp *qp = queue_pair;
+
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ qp->qp_stats.enqueued_count += i;
+ return i;
+
+enqueue_err:
+ if (ops[i])
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+ qp->qp_stats.enqueue_err_count++;
+ return i;
+}
+
+/** Dequeue burst */
+static uint16_t
+null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct null_crypto_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/** Create crypto device */
+static int
+cryptodev_null_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct null_crypto_private *internals;
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ NULL_LOG(ERR, "failed to create cryptodev vdev");
+ return -EFAULT;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = null_crypto_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = null_crypto_pmd_dequeue_burst;
+ dev->enqueue_burst = null_crypto_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+}
+
+/** Initialise null crypto device */
+static int
+cryptodev_null_probe(struct rte_vdev_device *dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct null_crypto_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name, *args;
+ int retval;
+
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ args = rte_vdev_device_args(dev);
+
+ retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ NULL_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]",
+ args);
+ return -EINVAL;
+ }
+
+ return cryptodev_null_create(name, dev, &init_params);
+}
+
+static int
+cryptodev_null_remove_dev(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_null_pmd_drv = {
+ .probe = cryptodev_null_probe,
+ .remove = cryptodev_null_remove_dev,
+};
+
+static struct cryptodev_driver null_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv.driver,
+ cryptodev_driver_id);
+
+RTE_INIT(null_init_log)
+{
+ null_logtype_driver = rte_log_register("pmd.crypto.null");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_ops.c
new file mode 100644
index 00000000..bb2b6e14
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "null_crypto_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities null_crypto_pmd_capabilities[] = {
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, },
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+null_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+null_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+null_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+null_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Get device statistics */
+static void
+null_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+null_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+null_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct null_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = null_crypto_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+null_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+null_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct null_crypto_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "null_crypto_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ NULL_LOG(INFO,
+ "Reusing existing ring %s for "
+ " processed packets", qp->name);
+ return r;
+ }
+
+ NULL_LOG(INFO,
+ "Unable to reuse existing ring %s for "
+ " processed packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct null_crypto_private *internals = dev->data->dev_private;
+ struct null_crypto_qp *qp;
+ int retval;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ NULL_LOG(ERR, "Invalid qp_id %u, greater than maximum "
+ "number of queue pairs supported (%u).",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ null_crypto_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ NULL_LOG(ERR, "Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = null_crypto_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ NULL_LOG(ERR, "Failed to create unique name for null "
+ "crypto device");
+
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = null_crypto_pmd_qp_create_processed_pkts_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL) {
+ NULL_LOG(ERR, "Failed to create unique name for null "
+ "crypto device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+null_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the NULL crypto session structure */
+static unsigned
+null_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct null_crypto_session);
+}
+
+/** Configure a null crypto session from a crypto xform chain */
+static int
+null_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mp)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ NULL_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mp, &sess_private_data)) {
+ NULL_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = null_crypto_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ NULL_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mp, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+null_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct null_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops pmd_ops = {
+ .dev_configure = null_crypto_pmd_config,
+ .dev_start = null_crypto_pmd_start,
+ .dev_stop = null_crypto_pmd_stop,
+ .dev_close = null_crypto_pmd_close,
+
+ .stats_get = null_crypto_pmd_stats_get,
+ .stats_reset = null_crypto_pmd_stats_reset,
+
+ .dev_infos_get = null_crypto_pmd_info_get,
+
+ .queue_pair_setup = null_crypto_pmd_qp_setup,
+ .queue_pair_release = null_crypto_pmd_qp_release,
+ .queue_pair_count = null_crypto_pmd_qp_count,
+
+ .sym_session_get_size = null_crypto_pmd_sym_session_get_size,
+ .sym_session_configure = null_crypto_pmd_sym_session_configure,
+ .sym_session_clear = null_crypto_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *null_crypto_pmd_ops = &pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_private.h b/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_private.h
new file mode 100644
index 00000000..d5905afd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_private.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#ifndef _NULL_CRYPTO_PMD_PRIVATE_H_
+#define _NULL_CRYPTO_PMD_PRIVATE_H_
+
+#define CRYPTODEV_NAME_NULL_PMD crypto_null
+/**< Null crypto PMD device name */
+
+int null_logtype_driver;
+
+#define NULL_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, null_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+
+/** private data structure for each NULL crypto device */
+struct null_crypto_private {
+ unsigned max_nb_qpairs; /**< Max number of queue pairs */
+};
+
+/** NULL crypto queue pair */
+struct null_crypto_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+/** NULL crypto private session structure */
+struct null_crypto_session {
+ uint32_t reserved;
+} __rte_cache_aligned;
+
+/** Set and validate NULL crypto session parameters */
+extern int
+null_crypto_set_session_parameters(struct null_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *null_crypto_pmd_ops;
+
+#endif /* _NULL_CRYPTO_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/null/rte_pmd_null_crypto_version.map b/src/spdk/dpdk/drivers/crypto/null/rte_pmd_null_crypto_version.map
new file mode 100644
index 00000000..dc4d417b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/null/rte_pmd_null_crypto_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/Makefile b/src/spdk/dpdk/drivers/crypto/openssl/Makefile
new file mode 100644
index 00000000..8fe086b9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_openssl.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_openssl_version.map
+
+# external library dependencies
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/compat.h b/src/spdk/dpdk/drivers/crypto/openssl/compat.h
new file mode 100644
index 00000000..45f9a33d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/compat.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef __RTA_COMPAT_H__
+#define __RTA_COMPAT_H__
+
+#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+
+#define set_rsa_params(rsa, p, q, ret) \
+ do {rsa->p = p; rsa->q = q; ret = 0; } while (0)
+
+#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \
+ do { \
+ rsa->dmp1 = dmp1; \
+ rsa->dmq1 = dmq1; \
+ rsa->iqmp = iqmp; \
+ ret = 0; \
+ } while (0)
+
+#define set_rsa_keys(rsa, n, e, d, ret) \
+ do { \
+ rsa->n = n; rsa->e = e; rsa->d = d; ret = 0; \
+ } while (0)
+
+#define set_dh_params(dh, p, g, ret) \
+ do { \
+ dh->p = p; \
+ dh->q = NULL; \
+ dh->g = g; \
+ ret = 0; \
+ } while (0)
+
+#define set_dh_priv_key(dh, priv_key, ret) \
+ do { dh->priv_key = priv_key; ret = 0; } while (0)
+
+#define set_dsa_params(dsa, p, q, g, ret) \
+ do { dsa->p = p; dsa->q = q; dsa->g = g; ret = 0; } while (0)
+
+#define get_dh_pub_key(dh, pub_key) \
+ (pub_key = dh->pub_key)
+
+#define get_dh_priv_key(dh, priv_key) \
+ (priv_key = dh->priv_key)
+
+#define set_dsa_sign(sign, r, s) \
+ do { sign->r = r; sign->s = s; } while (0)
+
+#define get_dsa_sign(sign, r, s) \
+ do { r = sign->r; s = sign->s; } while (0)
+
+#define set_dsa_keys(dsa, pub, priv, ret) \
+ do { dsa->pub_key = pub; dsa->priv_key = priv; ret = 0; } while (0)
+
+#define set_dsa_pub_key(dsa, pub_key) \
+ (dsa->pub_key = pub_key)
+
+#define get_dsa_priv_key(dsa, priv_key) \
+ (priv_key = dsa->priv_key)
+
+#else
+
+#define set_rsa_params(rsa, p, q, ret) \
+ (ret = !RSA_set0_factors(rsa, p, q))
+
+#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \
+ (ret = !RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp))
+
+/* n, e must be non-null, d can be NULL */
+#define set_rsa_keys(rsa, n, e, d, ret) \
+ (ret = !RSA_set0_key(rsa, n, e, d))
+
+#define set_dh_params(dh, p, g, ret) \
+ (ret = !DH_set0_pqg(dh, p, NULL, g))
+
+#define set_dh_priv_key(dh, priv_key, ret) \
+ (ret = !DH_set0_key(dh, NULL, priv_key))
+
+#define get_dh_pub_key(dh, pub_key) \
+ (DH_get0_key(dh_key, &pub_key, NULL))
+
+#define get_dh_priv_key(dh, priv_key) \
+ (DH_get0_key(dh_key, NULL, &priv_key))
+
+#define set_dsa_params(dsa, p, q, g, ret) \
+ (ret = !DSA_set0_pqg(dsa, p, q, g))
+
+#define set_dsa_priv_key(dsa, priv_key) \
+ (DSA_set0_key(dsa, NULL, priv_key))
+
+#define set_dsa_sign(sign, r, s) \
+ (DSA_SIG_set0(sign, r, s))
+
+#define get_dsa_sign(sign, r, s) \
+ (DSA_SIG_get0(sign, &r, &s))
+
+#define set_dsa_keys(dsa, pub, priv, ret) \
+ (ret = !DSA_set0_key(dsa, pub, priv))
+
+#define set_dsa_pub_key(dsa, pub_key) \
+ (DSA_set0_key(dsa, pub_key, NULL))
+
+#define get_dsa_priv_key(dsa, priv_key) \
+ (DSA_get0_key(dsa, NULL, &priv_key))
+
+#endif /* version < 10100000 */
+
+#endif /* __RTA_COMPAT_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/meson.build b/src/spdk/dpdk/drivers/crypto/openssl/meson.build
new file mode 100644
index 00000000..c2a0dd8b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+dep = dependency('libcrypto', required: false)
+if not dep.found()
+ build = false
+endif
+deps += 'bus_vdev'
+sources = files('rte_openssl_pmd.c', 'rte_openssl_pmd_ops.c')
+ext_deps += dep
+pkgconfig_extra_libs += '-lcrypto'
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c
new file mode 100644
index 00000000..7d263aba
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -0,0 +1,2194 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include <openssl/hmac.h>
+#include <openssl/evp.h>
+
+#include "rte_openssl_pmd_private.h"
+#include "compat.h"
+
+#define DES_BLOCK_SIZE 8
+
+static uint8_t cryptodev_driver_id;
+
+#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+static HMAC_CTX *HMAC_CTX_new(void)
+{
+ HMAC_CTX *ctx = OPENSSL_malloc(sizeof(*ctx));
+
+ if (ctx != NULL)
+ HMAC_CTX_init(ctx);
+ return ctx;
+}
+
+static void HMAC_CTX_free(HMAC_CTX *ctx)
+{
+ if (ctx != NULL) {
+ HMAC_CTX_cleanup(ctx);
+ OPENSSL_free(ctx);
+ }
+}
+#endif
+
+static int cryptodev_openssl_remove(struct rte_vdev_device *vdev);
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * Increment counter by 1
+ * Counter is 64 bit array, big-endian
+ */
+static void
+ctr_inc(uint8_t *ctr)
+{
+ uint64_t *ctr64 = (uint64_t *)ctr;
+
+ *ctr64 = __builtin_bswap64(*ctr64);
+ (*ctr64)++;
+ *ctr64 = __builtin_bswap64(*ctr64);
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Session Prepare
+ *------------------------------------------------------------------------------
+ */
+
+/** Get xform chain order */
+static enum openssl_chain_order
+openssl_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ enum openssl_chain_order res = OPENSSL_CHAIN_NOT_SUPPORTED;
+
+ if (xform != NULL) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ res = OPENSSL_CHAIN_ONLY_AUTH;
+ else if (xform->next->type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER)
+ res = OPENSSL_CHAIN_AUTH_CIPHER;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ res = OPENSSL_CHAIN_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ res = OPENSSL_CHAIN_CIPHER_AUTH;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ res = OPENSSL_CHAIN_COMBINED;
+ }
+
+ return res;
+}
+
+/** Get session cipher key from input cipher key */
+static void
+get_cipher_key(uint8_t *input_key, int keylen, uint8_t *session_key)
+{
+ memcpy(session_key, input_key, keylen);
+}
+
+/** Get key ede 24 bytes standard from input key */
+static int
+get_cipher_key_ede(uint8_t *key, int keylen, uint8_t *key_ede)
+{
+ int res = 0;
+
+ /* Initialize keys - 24 bytes: [key1-key2-key3] */
+ switch (keylen) {
+ case 24:
+ memcpy(key_ede, key, 24);
+ break;
+ case 16:
+ /* K3 = K1 */
+ memcpy(key_ede, key, 16);
+ memcpy(key_ede + 16, key, 8);
+ break;
+ case 8:
+ /* K1 = K2 = K3 (DES compatibility) */
+ memcpy(key_ede, key, 8);
+ memcpy(key_ede + 8, key, 8);
+ memcpy(key_ede + 16, key, 8);
+ break;
+ default:
+ OPENSSL_LOG(ERR, "Unsupported key size");
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Get adequate openssl function for input cipher algorithm */
+static uint8_t
+get_cipher_algo(enum rte_crypto_cipher_algorithm sess_algo, size_t keylen,
+ const EVP_CIPHER **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sess_algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ switch (keylen) {
+ case 8:
+ *algo = EVP_des_cbc();
+ break;
+ case 16:
+ *algo = EVP_des_ede_cbc();
+ break;
+ case 24:
+ *algo = EVP_des_ede3_cbc();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_cbc();
+ break;
+ case 24:
+ *algo = EVP_aes_192_cbc();
+ break;
+ case 32:
+ *algo = EVP_aes_256_cbc();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_ctr();
+ break;
+ case 24:
+ *algo = EVP_aes_192_ctr();
+ break;
+ case 32:
+ *algo = EVP_aes_256_ctr();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Get adequate openssl function for input auth algorithm */
+static uint8_t
+get_auth_algo(enum rte_crypto_auth_algorithm sessalgo,
+ const EVP_MD **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sessalgo) {
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ *algo = EVP_md5();
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ *algo = EVP_sha1();
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ *algo = EVP_sha224();
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ *algo = EVP_sha256();
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ *algo = EVP_sha384();
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ *algo = EVP_sha512();
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Get adequate openssl function for input cipher algorithm */
+static uint8_t
+get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen,
+ const EVP_CIPHER **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sess_algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_gcm();
+ break;
+ case 24:
+ *algo = EVP_aes_192_gcm();
+ break;
+ case 32:
+ *algo = EVP_aes_256_gcm();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_ccm();
+ break;
+ case 24:
+ *algo = EVP_aes_192_ccm();
+ break;
+ case 32:
+ *algo = EVP_aes_256_ccm();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/* Set session AEAD encryption parameters */
+static int
+openssl_set_sess_aead_enc_param(struct openssl_session *sess,
+ enum rte_crypto_aead_algorithm algo,
+ uint8_t tag_len, uint8_t *key)
+{
+ int iv_type = 0;
+ unsigned int do_ccm;
+
+ sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ /* Select AEAD algo */
+ switch (algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ iv_type = EVP_CTRL_GCM_SET_IVLEN;
+ if (tag_len != 16)
+ return -EINVAL;
+ do_ccm = 0;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ iv_type = EVP_CTRL_CCM_SET_IVLEN;
+ /* Digest size can be 4, 6, 8, 10, 12, 14 or 16 bytes */
+ if (tag_len < 4 || tag_len > 16 || (tag_len & 1) == 1)
+ return -EINVAL;
+ do_ccm = 1;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_aead_algo(algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(key, sess->cipher.key.length, sess->cipher.key.data);
+
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+
+ if (EVP_EncryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo,
+ NULL, NULL, NULL) <= 0)
+ return -EINVAL;
+
+ if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type, sess->iv.length,
+ NULL) <= 0)
+ return -EINVAL;
+
+ if (do_ccm)
+ EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG,
+ tag_len, NULL);
+
+ if (EVP_EncryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Set session AEAD decryption parameters */
+static int
+openssl_set_sess_aead_dec_param(struct openssl_session *sess,
+ enum rte_crypto_aead_algorithm algo,
+ uint8_t tag_len, uint8_t *key)
+{
+ int iv_type = 0;
+ unsigned int do_ccm = 0;
+
+ sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ /* Select AEAD algo */
+ switch (algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ iv_type = EVP_CTRL_GCM_SET_IVLEN;
+ if (tag_len != 16)
+ return -EINVAL;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ iv_type = EVP_CTRL_CCM_SET_IVLEN;
+ /* Digest size can be 4, 6, 8, 10, 12, 14 or 16 bytes */
+ if (tag_len < 4 || tag_len > 16 || (tag_len & 1) == 1)
+ return -EINVAL;
+ do_ccm = 1;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_aead_algo(algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(key, sess->cipher.key.length, sess->cipher.key.data);
+
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+
+ if (EVP_DecryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo,
+ NULL, NULL, NULL) <= 0)
+ return -EINVAL;
+
+ if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type,
+ sess->iv.length, NULL) <= 0)
+ return -EINVAL;
+
+ if (do_ccm)
+ EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG,
+ tag_len, NULL);
+
+ if (EVP_DecryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/** Set session cipher parameters */
+static int
+openssl_set_session_cipher_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select cipher direction */
+ sess->cipher.direction = xform->cipher.op;
+ /* Select cipher key */
+ sess->cipher.key.length = xform->cipher.key.length;
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->cipher.iv.offset;
+ sess->iv.length = xform->cipher.iv.length;
+
+ /* Select cipher algo */
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.algo = xform->cipher.algo;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_cipher_algo(sess->cipher.algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (EVP_EncryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ } else if (sess->cipher.direction ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (EVP_DecryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ }
+
+ break;
+
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ sess->cipher.mode = OPENSSL_CIPHER_DES3CTR;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_cipher_key_ede(xform->cipher.key.data,
+ sess->cipher.key.length,
+ sess->cipher.key.data) != 0)
+ return -EINVAL;
+ break;
+
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ sess->cipher.algo = xform->cipher.algo;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+ sess->cipher.evp_algo = EVP_des_cbc();
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (EVP_EncryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ } else if (sess->cipher.direction ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (EVP_DecryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ }
+
+ break;
+
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ sess->cipher.algo = xform->cipher.algo;
+ sess->chain_order = OPENSSL_CHAIN_CIPHER_BPI;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+ sess->cipher.evp_algo = EVP_des_cbc();
+
+ sess->cipher.bpi_ctx = EVP_CIPHER_CTX_new();
+ /* IV will be ECB encrypted whether direction is encrypt or decrypt */
+ if (EVP_EncryptInit_ex(sess->cipher.bpi_ctx, EVP_des_ecb(),
+ NULL, xform->cipher.key.data, 0) != 1)
+ return -EINVAL;
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (EVP_EncryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ } else if (sess->cipher.direction ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (EVP_DecryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ }
+
+ break;
+ default:
+ sess->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+/* Set session auth parameters */
+static int
+openssl_set_session_auth_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+ sess->auth.algo = xform->auth.algo;
+
+ sess->auth.digest_length = xform->auth.digest_length;
+
+ /* Select auth algo */
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ /*
+ * OpenSSL requires GMAC to be a GCM operation
+ * with no cipher data length
+ */
+ sess->cipher.key.length = xform->auth.key.length;
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->auth.iv.offset;
+ sess->iv.length = xform->auth.iv.length;
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE)
+ return openssl_set_sess_aead_enc_param(sess,
+ RTE_CRYPTO_AEAD_AES_GCM,
+ xform->auth.digest_length,
+ xform->auth.key.data);
+ else
+ return openssl_set_sess_aead_dec_param(sess,
+ RTE_CRYPTO_AEAD_AES_GCM,
+ xform->auth.digest_length,
+ xform->auth.key.data);
+ break;
+
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA512:
+ sess->auth.mode = OPENSSL_AUTH_AS_AUTH;
+ if (get_auth_algo(xform->auth.algo,
+ &sess->auth.auth.evp_algo) != 0)
+ return -EINVAL;
+ sess->auth.auth.ctx = EVP_MD_CTX_create();
+ break;
+
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ sess->auth.mode = OPENSSL_AUTH_AS_HMAC;
+ sess->auth.hmac.ctx = HMAC_CTX_new();
+ if (get_auth_algo(xform->auth.algo,
+ &sess->auth.hmac.evp_algo) != 0)
+ return -EINVAL;
+
+ if (HMAC_Init_ex(sess->auth.hmac.ctx,
+ xform->auth.key.data,
+ xform->auth.key.length,
+ sess->auth.hmac.evp_algo, NULL) != 1)
+ return -EINVAL;
+ break;
+
+ default:
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+/* Set session AEAD parameters */
+static int
+openssl_set_session_aead_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select cipher key */
+ sess->cipher.key.length = xform->aead.key.length;
+
+ /* Set IV parameters */
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
+ /*
+ * For AES-CCM, the actual IV is placed
+ * one byte after the start of the IV field,
+ * according to the API.
+ */
+ sess->iv.offset = xform->aead.iv.offset + 1;
+ else
+ sess->iv.offset = xform->aead.iv.offset;
+
+ sess->iv.length = xform->aead.iv.length;
+
+ sess->auth.aad_length = xform->aead.aad_length;
+ sess->auth.digest_length = xform->aead.digest_length;
+
+ sess->aead_algo = xform->aead.algo;
+ /* Select cipher direction */
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ return openssl_set_sess_aead_enc_param(sess, xform->aead.algo,
+ xform->aead.digest_length, xform->aead.key.data);
+ else
+ return openssl_set_sess_aead_dec_param(sess, xform->aead.algo,
+ xform->aead.digest_length, xform->aead.key.data);
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+openssl_set_session_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+ int ret;
+
+ sess->chain_order = openssl_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case OPENSSL_CHAIN_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case OPENSSL_CHAIN_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case OPENSSL_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case OPENSSL_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case OPENSSL_CHAIN_COMBINED:
+ aead_xform = xform;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+
+ /* cipher_xform must be check before auth_xform */
+ if (cipher_xform) {
+ ret = openssl_set_session_cipher_parameters(
+ sess, cipher_xform);
+ if (ret != 0) {
+ OPENSSL_LOG(ERR,
+ "Invalid/unsupported cipher parameters");
+ return ret;
+ }
+ }
+
+ if (auth_xform) {
+ ret = openssl_set_session_auth_parameters(sess, auth_xform);
+ if (ret != 0) {
+ OPENSSL_LOG(ERR,
+ "Invalid/unsupported auth parameters");
+ return ret;
+ }
+ }
+
+ if (aead_xform) {
+ ret = openssl_set_session_aead_parameters(sess, aead_xform);
+ if (ret != 0) {
+ OPENSSL_LOG(ERR,
+ "Invalid/unsupported AEAD parameters");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/** Reset private session parameters */
+void
+openssl_reset_session(struct openssl_session *sess)
+{
+ EVP_CIPHER_CTX_free(sess->cipher.ctx);
+
+ if (sess->chain_order == OPENSSL_CHAIN_CIPHER_BPI)
+ EVP_CIPHER_CTX_free(sess->cipher.bpi_ctx);
+
+ switch (sess->auth.mode) {
+ case OPENSSL_AUTH_AS_AUTH:
+ EVP_MD_CTX_destroy(sess->auth.auth.ctx);
+ break;
+ case OPENSSL_AUTH_AS_HMAC:
+ EVP_PKEY_free(sess->auth.hmac.pkey);
+ HMAC_CTX_free(sess->auth.hmac.ctx);
+ break;
+ default:
+ break;
+ }
+}
+
+/** Provide session for operation */
+static void *
+get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
+{
+ struct openssl_session *sess = NULL;
+ struct openssl_asym_session *asym_sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ /* get existing session */
+ if (likely(op->sym->session != NULL))
+ sess = (struct openssl_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ if (likely(op->asym->session != NULL))
+ asym_sess = (struct openssl_asym_session *)
+ get_asym_session_private_data(
+ op->asym->session,
+ cryptodev_driver_id);
+ if (asym_sess == NULL)
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return asym_sess;
+ }
+ } else {
+ /* sessionless asymmetric not supported */
+ if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
+ return NULL;
+
+ /* provide internal session */
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct openssl_session *)_sess_private_data;
+
+ if (unlikely(openssl_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (sess == NULL)
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Process Operations
+ *------------------------------------------------------------------------------
+ */
+static inline int
+process_openssl_encryption_update(struct rte_mbuf *mbuf_src, int offset,
+ uint8_t **dst, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ struct rte_mbuf *m;
+ int dstlen;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ return -1;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, srclen) <= 0)
+ return -1;
+ *dst += l;
+ return 0;
+ }
+
+ if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+
+ *dst += dstlen;
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+ *dst += dstlen;
+ n -= l;
+ }
+
+ return 0;
+}
+
+static inline int
+process_openssl_decryption_update(struct rte_mbuf *mbuf_src, int offset,
+ uint8_t **dst, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ struct rte_mbuf *m;
+ int dstlen;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ return -1;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, srclen) <= 0)
+ return -1;
+ *dst += l;
+ return 0;
+ }
+
+ if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+
+ *dst += dstlen;
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+ *dst += dstlen;
+ n -= l;
+ }
+
+ return 0;
+}
+
+/** Process standard openssl cipher encryption */
+static int
+process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ int totlen;
+
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_cipher_encrypt_err;
+
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+
+ if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_cipher_encrypt_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst, &totlen) <= 0)
+ goto process_cipher_encrypt_err;
+
+ return 0;
+
+process_cipher_encrypt_err:
+ OPENSSL_LOG(ERR, "Process openssl cipher encrypt failed");
+ return -EINVAL;
+}
+
+/** Process standard openssl cipher encryption */
+static int
+process_openssl_cipher_bpi_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int srclen,
+ EVP_CIPHER_CTX *ctx)
+{
+ uint8_t i;
+ uint8_t encrypted_iv[DES_BLOCK_SIZE];
+ int encrypted_ivlen;
+
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen,
+ iv, DES_BLOCK_SIZE) <= 0)
+ goto process_cipher_encrypt_err;
+
+ for (i = 0; i < srclen; i++)
+ *(dst + i) = *(src + i) ^ (encrypted_iv[i]);
+
+ return 0;
+
+process_cipher_encrypt_err:
+ OPENSSL_LOG(ERR, "Process openssl cipher bpi encrypt failed");
+ return -EINVAL;
+}
+/** Process standard openssl cipher decryption */
+static int
+process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ int totlen;
+
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_cipher_decrypt_err;
+
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+
+ if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_cipher_decrypt_err;
+
+ if (EVP_DecryptFinal_ex(ctx, dst, &totlen) <= 0)
+ goto process_cipher_decrypt_err;
+ return 0;
+
+process_cipher_decrypt_err:
+ OPENSSL_LOG(ERR, "Process openssl cipher decrypt failed");
+ return -EINVAL;
+}
+
+/** Process cipher des 3 ctr encryption, decryption algorithm */
+static int
+process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ int offset, uint8_t *iv, uint8_t *key, int srclen,
+ EVP_CIPHER_CTX *ctx)
+{
+ uint8_t ebuf[8], ctr[8];
+ int unused, n;
+ struct rte_mbuf *m;
+ uint8_t *src;
+ int l;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ goto process_cipher_des3ctr_err;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+ l = rte_pktmbuf_data_len(m) - offset;
+
+ /* We use 3DES encryption also for decryption.
+ * IV is not important for 3DES ecb
+ */
+ if (EVP_EncryptInit_ex(ctx, EVP_des_ede3_ecb(), NULL, key, NULL) <= 0)
+ goto process_cipher_des3ctr_err;
+
+ memcpy(ctr, iv, 8);
+
+ for (n = 0; n < srclen; n++) {
+ if (n % 8 == 0) {
+ if (EVP_EncryptUpdate(ctx,
+ (unsigned char *)&ebuf, &unused,
+ (const unsigned char *)&ctr, 8) <= 0)
+ goto process_cipher_des3ctr_err;
+ ctr_inc(ctr);
+ }
+ dst[n] = *(src++) ^ ebuf[n % 8];
+
+ l--;
+ if (!l) {
+ m = m->next;
+ if (m) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m);
+ }
+ }
+ }
+
+ return 0;
+
+process_cipher_des3ctr_err:
+ OPENSSL_LOG(ERR, "Process openssl cipher des 3 ede ctr failed");
+ return -EINVAL;
+}
+
+/** Process AES-GCM encrypt algorithm */
+static int
+process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx)
+{
+ int len = 0, unused = 0;
+ uint8_t empty[] = {};
+
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (aadlen > 0)
+ if (EVP_EncryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (srclen > 0)
+ if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_auth_encryption_gcm_err;
+
+ /* Workaround open ssl bug in version less then 1.0.1f */
+ if (EVP_EncryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, 16, tag) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ return 0;
+
+process_auth_encryption_gcm_err:
+ OPENSSL_LOG(ERR, "Process openssl auth encryption gcm failed");
+ return -EINVAL;
+}
+
+/** Process AES-CCM encrypt algorithm */
+static int
+process_openssl_auth_encryption_ccm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, uint8_t taglen, EVP_CIPHER_CTX *ctx)
+{
+ int len = 0;
+
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (EVP_EncryptUpdate(ctx, NULL, &len, NULL, srclen) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (aadlen > 0)
+ /*
+ * For AES-CCM, the actual AAD is placed
+ * 18 bytes after the start of the AAD field,
+ * according to the API.
+ */
+ if (EVP_EncryptUpdate(ctx, NULL, &len, aad + 18, aadlen) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (srclen > 0)
+ if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_auth_encryption_ccm_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_GET_TAG, taglen, tag) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ return 0;
+
+process_auth_encryption_ccm_err:
+ OPENSSL_LOG(ERR, "Process openssl auth encryption ccm failed");
+ return -EINVAL;
+}
+
+/** Process AES-GCM decrypt algorithm */
+static int
+process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx)
+{
+ int len = 0, unused = 0;
+ uint8_t empty[] = {};
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (aadlen > 0)
+ if (EVP_DecryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (srclen > 0)
+ if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_auth_decryption_gcm_err;
+
+ /* Workaround open ssl bug in version less then 1.0.1f */
+ if (EVP_DecryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0)
+ return -EFAULT;
+
+ return 0;
+
+process_auth_decryption_gcm_err:
+ OPENSSL_LOG(ERR, "Process openssl auth decryption gcm failed");
+ return -EINVAL;
+}
+
+/** Process AES-CCM decrypt algorithm */
+static int
+process_openssl_auth_decryption_ccm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, uint8_t tag_len,
+ EVP_CIPHER_CTX *ctx)
+{
+ int len = 0;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_TAG, tag_len, tag) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (EVP_DecryptUpdate(ctx, NULL, &len, NULL, srclen) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (aadlen > 0)
+ /*
+ * For AES-CCM, the actual AAD is placed
+ * 18 bytes after the start of the AAD field,
+ * according to the API.
+ */
+ if (EVP_DecryptUpdate(ctx, NULL, &len, aad + 18, aadlen) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (srclen > 0)
+ if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ return -EFAULT;
+
+ return 0;
+
+process_auth_decryption_ccm_err:
+ OPENSSL_LOG(ERR, "Process openssl auth decryption ccm failed");
+ return -EINVAL;
+}
+
+/** Process standard openssl auth algorithms */
+static int
+process_openssl_auth(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
+ __rte_unused uint8_t *iv, __rte_unused EVP_PKEY * pkey,
+ int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
+{
+ size_t dstlen;
+ struct rte_mbuf *m;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ goto process_auth_err;
+
+ if (EVP_DigestInit_ex(ctx, algo, NULL) <= 0)
+ goto process_auth_err;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_DigestUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+ goto process_auth_final;
+ }
+
+ if (EVP_DigestUpdate(ctx, (char *)src, l) <= 0)
+ goto process_auth_err;
+
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_DigestUpdate(ctx, (char *)src, l) <= 0)
+ goto process_auth_err;
+ n -= l;
+ }
+
+process_auth_final:
+ if (EVP_DigestFinal_ex(ctx, dst, (unsigned int *)&dstlen) <= 0)
+ goto process_auth_err;
+ return 0;
+
+process_auth_err:
+ OPENSSL_LOG(ERR, "Process openssl auth failed");
+ return -EINVAL;
+}
+
+/** Process standard openssl auth algorithms with hmac */
+static int
+process_openssl_auth_hmac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
+ int srclen, HMAC_CTX *ctx)
+{
+ unsigned int dstlen;
+ struct rte_mbuf *m;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ goto process_auth_err;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (HMAC_Update(ctx, (unsigned char *)src, srclen) != 1)
+ goto process_auth_err;
+ goto process_auth_final;
+ }
+
+ if (HMAC_Update(ctx, (unsigned char *)src, l) != 1)
+ goto process_auth_err;
+
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (HMAC_Update(ctx, (unsigned char *)src, l) != 1)
+ goto process_auth_err;
+ n -= l;
+ }
+
+process_auth_final:
+ if (HMAC_Final(ctx, dst, &dstlen) != 1)
+ goto process_auth_err;
+
+ if (unlikely(HMAC_Init_ex(ctx, NULL, 0, NULL, NULL) != 1))
+ goto process_auth_err;
+
+ return 0;
+
+process_auth_err:
+ OPENSSL_LOG(ERR, "Process openssl auth failed");
+ return -EINVAL;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/** Process auth/cipher combined operation */
+static void
+process_openssl_combined_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ /* cipher */
+ uint8_t *dst = NULL, *iv, *tag, *aad;
+ int srclen, aadlen, status = -1;
+ uint32_t offset;
+ uint8_t taglen;
+
+ /*
+ * Segmented destination buffer is not supported for
+ * encryption/decryption
+ */
+ if (!rte_pktmbuf_is_contiguous(mbuf_dst)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ srclen = 0;
+ offset = op->sym->auth.data.offset;
+ aadlen = op->sym->auth.data.length;
+ aad = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+ tag = op->sym->auth.digest.data;
+ if (tag == NULL)
+ tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ offset + aadlen);
+ } else {
+ srclen = op->sym->aead.data.length;
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->aead.data.offset);
+ offset = op->sym->aead.data.offset;
+ aad = op->sym->aead.aad.data;
+ aadlen = sess->auth.aad_length;
+ tag = op->sym->aead.digest.data;
+ if (tag == NULL)
+ tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ offset + srclen);
+ }
+
+ taglen = sess->auth.digest_length;
+
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC ||
+ sess->aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
+ status = process_openssl_auth_encryption_gcm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, sess->cipher.ctx);
+ else
+ status = process_openssl_auth_encryption_ccm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, taglen, sess->cipher.ctx);
+
+ } else {
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC ||
+ sess->aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
+ status = process_openssl_auth_decryption_gcm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, sess->cipher.ctx);
+ else
+ status = process_openssl_auth_decryption_ccm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, taglen, sess->cipher.ctx);
+ }
+
+ if (status != 0) {
+ if (status == (-EFAULT) &&
+ sess->auth.operation ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+}
+
+/** Process cipher operation */
+static void
+process_openssl_cipher_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *dst, *iv;
+ int srclen, status;
+
+ /*
+ * Segmented destination buffer is not supported for
+ * encryption/decryption
+ */
+ if (!rte_pktmbuf_is_contiguous(mbuf_dst)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ srclen = op->sym->cipher.data.length;
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ if (sess->cipher.mode == OPENSSL_CIPHER_LIB)
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ status = process_openssl_cipher_encrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ srclen, sess->cipher.ctx);
+ else
+ status = process_openssl_cipher_decrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ srclen, sess->cipher.ctx);
+ else
+ status = process_openssl_cipher_des3ctr(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx);
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/** Process cipher operation */
+static void
+process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
+ struct openssl_session *sess, struct rte_mbuf *mbuf_src,
+ struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *src, *dst, *iv;
+ uint8_t block_size, last_block_len;
+ int srclen, status = 0;
+
+ srclen = op->sym->cipher.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->cipher.data.offset);
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ block_size = DES_BLOCK_SIZE;
+
+ last_block_len = srclen % block_size;
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ /* Encrypt only with ECB mode XOR IV */
+ if (srclen < block_size) {
+ status = process_openssl_cipher_bpi_encrypt(src, dst,
+ iv, srclen,
+ sess->cipher.bpi_ctx);
+ } else {
+ srclen -= last_block_len;
+ /* Encrypt with the block aligned stream with CBC mode */
+ status = process_openssl_cipher_encrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ srclen, sess->cipher.ctx);
+ if (last_block_len) {
+ /* Point at last block */
+ dst += srclen;
+ /*
+ * IV is the last encrypted block from
+ * the previous operation
+ */
+ iv = dst - block_size;
+ src += srclen;
+ srclen = last_block_len;
+ /* Encrypt the last frame with ECB mode */
+ status |= process_openssl_cipher_bpi_encrypt(src,
+ dst, iv,
+ srclen, sess->cipher.bpi_ctx);
+ }
+ }
+ } else {
+ /* Decrypt only with ECB mode (encrypt, as it is same operation) */
+ if (srclen < block_size) {
+ status = process_openssl_cipher_bpi_encrypt(src, dst,
+ iv,
+ srclen,
+ sess->cipher.bpi_ctx);
+ } else {
+ if (last_block_len) {
+ /* Point at last block */
+ dst += srclen - last_block_len;
+ src += srclen - last_block_len;
+ /*
+ * IV is the last full block
+ */
+ iv = src - block_size;
+ /*
+ * Decrypt the last frame with ECB mode
+ * (encrypt, as it is the same operation)
+ */
+ status = process_openssl_cipher_bpi_encrypt(src,
+ dst, iv,
+ last_block_len, sess->cipher.bpi_ctx);
+ /* Prepare parameters for CBC mode op */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+ dst += last_block_len - srclen;
+ srclen -= last_block_len;
+ }
+
+ /* Decrypt with CBC mode */
+ status |= process_openssl_cipher_decrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ srclen, sess->cipher.ctx);
+ }
+ }
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/** Process auth operation */
+static void
+process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_session *sess, struct rte_mbuf *mbuf_src,
+ struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *dst;
+ int srclen, status;
+
+ srclen = op->sym->auth.data.length;
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
+ dst = qp->temp_digest;
+ else {
+ dst = op->sym->auth.digest.data;
+ if (dst == NULL)
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ }
+
+ switch (sess->auth.mode) {
+ case OPENSSL_AUTH_AS_AUTH:
+ status = process_openssl_auth(mbuf_src, dst,
+ op->sym->auth.data.offset, NULL, NULL, srclen,
+ sess->auth.auth.ctx, sess->auth.auth.evp_algo);
+ break;
+ case OPENSSL_AUTH_AS_HMAC:
+ status = process_openssl_auth_hmac(mbuf_src, dst,
+ op->sym->auth.data.offset, srclen,
+ sess->auth.hmac.ctx);
+ break;
+ default:
+ status = -1;
+ break;
+ }
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ if (memcmp(dst, op->sym->auth.digest.data,
+ sess->auth.digest_length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+ }
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/* process dsa sign operation */
+static int
+process_openssl_dsa_sign_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_dsa_op_param *op = &cop->asym->dsa;
+ DSA *dsa = sess->u.s.dsa;
+ DSA_SIG *sign = NULL;
+
+ sign = DSA_do_sign(op->message.data,
+ op->message.length,
+ dsa);
+
+ if (sign == NULL) {
+ OPENSSL_LOG(ERR, "%s:%d\n", __func__, __LINE__);
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ } else {
+ const BIGNUM *r = NULL, *s = NULL;
+ get_dsa_sign(sign, r, s);
+
+ op->r.length = BN_bn2bin(r, op->r.data);
+ op->s.length = BN_bn2bin(s, op->s.data);
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+
+ DSA_SIG_free(sign);
+
+ return 0;
+}
+
+/* process dsa verify operation */
+static int
+process_openssl_dsa_verify_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_dsa_op_param *op = &cop->asym->dsa;
+ DSA *dsa = sess->u.s.dsa;
+ int ret;
+ DSA_SIG *sign = DSA_SIG_new();
+ BIGNUM *r = NULL, *s = NULL;
+ BIGNUM *pub_key = NULL;
+
+ if (sign == NULL) {
+ OPENSSL_LOG(ERR, " %s:%d\n", __func__, __LINE__);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+
+ r = BN_bin2bn(op->r.data,
+ op->r.length,
+ r);
+ s = BN_bin2bn(op->s.data,
+ op->s.length,
+ s);
+ pub_key = BN_bin2bn(op->y.data,
+ op->y.length,
+ pub_key);
+ if (!r || !s || !pub_key) {
+ if (r)
+ BN_free(r);
+ if (s)
+ BN_free(s);
+ if (pub_key)
+ BN_free(pub_key);
+
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ set_dsa_sign(sign, r, s);
+ set_dsa_pub_key(dsa, pub_key);
+
+ ret = DSA_do_verify(op->message.data,
+ op->message.length,
+ sign,
+ dsa);
+
+ if (ret != 1)
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ DSA_SIG_free(sign);
+
+ return 0;
+}
+
+/* process dh operation */
+static int
+process_openssl_dh_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_dh_op_param *op = &cop->asym->dh;
+ DH *dh_key = sess->u.dh.dh_key;
+ BIGNUM *priv_key = NULL;
+ int ret = 0;
+
+ if (sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE)) {
+ /* compute shared secret using peer public key
+ * and current private key
+ * shared secret = peer_key ^ priv_key mod p
+ */
+ BIGNUM *peer_key = NULL;
+
+ /* copy private key and peer key and compute shared secret */
+ peer_key = BN_bin2bn(op->pub_key.data,
+ op->pub_key.length,
+ peer_key);
+ if (peer_key == NULL) {
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ priv_key = BN_bin2bn(op->priv_key.data,
+ op->priv_key.length,
+ priv_key);
+ if (priv_key == NULL) {
+ BN_free(peer_key);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ set_dh_priv_key(dh_key, priv_key, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR, "Failed to set private key\n");
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ BN_free(peer_key);
+ BN_free(priv_key);
+ return 0;
+ }
+
+ ret = DH_compute_key(
+ op->shared_secret.data,
+ peer_key, dh_key);
+ if (ret < 0) {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ BN_free(peer_key);
+ /* priv key is already loaded into dh,
+ * let's not free that directly here.
+ * DH_free() will auto free it later.
+ */
+ return 0;
+ }
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->shared_secret.length = ret;
+ BN_free(peer_key);
+ return 0;
+ }
+
+ /*
+ * other options are public and private key generations.
+ *
+ * if user provides private key,
+ * then first set DH with user provided private key
+ */
+ if ((sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)) &&
+ !(sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE))) {
+ /* generate public key using user-provided private key
+ * pub_key = g ^ priv_key mod p
+ */
+
+ /* load private key into DH */
+ priv_key = BN_bin2bn(op->priv_key.data,
+ op->priv_key.length,
+ priv_key);
+ if (priv_key == NULL) {
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ set_dh_priv_key(dh_key, priv_key, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR, "Failed to set private key\n");
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ BN_free(priv_key);
+ return 0;
+ }
+ }
+
+ /* generate public and private key pair.
+ *
+ * if private key already set, generates only public key.
+ *
+ * if private key is not already set, then set it to random value
+ * and update internal private key.
+ */
+ if (!DH_generate_key(dh_key)) {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return 0;
+ }
+
+ if (sess->u.dh.key_op & (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)) {
+ const BIGNUM *pub_key = NULL;
+
+ OPENSSL_LOG(DEBUG, "%s:%d update public key\n",
+ __func__, __LINE__);
+
+ /* get the generated keys */
+ get_dh_pub_key(dh_key, pub_key);
+
+ /* output public key */
+ op->pub_key.length = BN_bn2bin(pub_key,
+ op->pub_key.data);
+ }
+
+ if (sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE)) {
+ const BIGNUM *priv_key = NULL;
+
+ OPENSSL_LOG(DEBUG, "%s:%d updated priv key\n",
+ __func__, __LINE__);
+
+ /* get the generated keys */
+ get_dh_priv_key(dh_key, priv_key);
+
+ /* provide generated private key back to user */
+ op->priv_key.length = BN_bn2bin(priv_key,
+ op->priv_key.data);
+ }
+
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ return 0;
+}
+
+/* process modinv operation */
+static int
+process_openssl_modinv_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_asym_op *op = cop->asym;
+ BIGNUM *base = BN_CTX_get(sess->u.m.ctx);
+ BIGNUM *res = BN_CTX_get(sess->u.m.ctx);
+
+ if (unlikely(base == NULL || res == NULL)) {
+ if (base)
+ BN_free(base);
+ if (res)
+ BN_free(res);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+
+ base = BN_bin2bn((const unsigned char *)op->modinv.base.data,
+ op->modinv.base.length, base);
+
+ if (BN_mod_inverse(res, base, sess->u.m.modulus, sess->u.m.ctx)) {
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data);
+ } else {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ return 0;
+}
+
+/* process modexp operation */
+static int
+process_openssl_modexp_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_asym_op *op = cop->asym;
+ BIGNUM *base = BN_CTX_get(sess->u.e.ctx);
+ BIGNUM *res = BN_CTX_get(sess->u.e.ctx);
+
+ if (unlikely(base == NULL || res == NULL)) {
+ if (base)
+ BN_free(base);
+ if (res)
+ BN_free(res);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+
+ base = BN_bin2bn((const unsigned char *)op->modinv.base.data,
+ op->modinv.base.length, base);
+
+ if (BN_mod_exp(res, base, sess->u.e.exp,
+ sess->u.e.mod, sess->u.e.ctx)) {
+ op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data);
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ return 0;
+}
+
+/* process rsa operations */
+static int
+process_openssl_rsa_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ int ret = 0;
+ struct rte_crypto_asym_op *op = cop->asym;
+ RSA *rsa = sess->u.r.rsa;
+ uint32_t pad = (op->rsa.pad);
+
+ switch (pad) {
+ case RTE_CRYPTO_RSA_PKCS1_V1_5_BT0:
+ case RTE_CRYPTO_RSA_PKCS1_V1_5_BT1:
+ case RTE_CRYPTO_RSA_PKCS1_V1_5_BT2:
+ pad = RSA_PKCS1_PADDING;
+ break;
+ case RTE_CRYPTO_RSA_PADDING_NONE:
+ pad = RSA_NO_PADDING;
+ break;
+ default:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ OPENSSL_LOG(ERR,
+ "rsa pad type not supported %d\n", pad);
+ return 0;
+ }
+
+ switch (op->rsa.op_type) {
+ case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+ ret = RSA_public_encrypt(op->rsa.message.length,
+ op->rsa.message.data,
+ op->rsa.message.data,
+ rsa,
+ pad);
+
+ if (ret > 0)
+ op->rsa.message.length = ret;
+ OPENSSL_LOG(DEBUG,
+ "length of encrypted text %d\n", ret);
+ break;
+
+ case RTE_CRYPTO_ASYM_OP_DECRYPT:
+ ret = RSA_private_decrypt(op->rsa.message.length,
+ op->rsa.message.data,
+ op->rsa.message.data,
+ rsa,
+ pad);
+ if (ret > 0)
+ op->rsa.message.length = ret;
+ break;
+
+ case RTE_CRYPTO_ASYM_OP_SIGN:
+ ret = RSA_private_encrypt(op->rsa.message.length,
+ op->rsa.message.data,
+ op->rsa.sign.data,
+ rsa,
+ pad);
+ if (ret > 0)
+ op->rsa.sign.length = ret;
+ break;
+
+ case RTE_CRYPTO_ASYM_OP_VERIFY:
+ ret = RSA_public_decrypt(op->rsa.sign.length,
+ op->rsa.sign.data,
+ op->rsa.sign.data,
+ rsa,
+ pad);
+
+ OPENSSL_LOG(DEBUG,
+ "Length of public_decrypt %d "
+ "length of message %zd\n",
+ ret, op->rsa.message.length);
+
+ if (memcmp(op->rsa.sign.data, op->rsa.message.data,
+ op->rsa.message.length)) {
+ OPENSSL_LOG(ERR,
+ "RSA sign Verification failed");
+ return -1;
+ }
+ break;
+
+ default:
+ /* allow ops with invalid args to be pushed to
+ * completion queue
+ */
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+
+ if (ret < 0)
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ return 0;
+}
+
+static int
+process_asym_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_asym_session *sess)
+{
+ int retval = 0;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ retval = process_openssl_rsa_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ retval = process_openssl_modexp_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ retval = process_openssl_modinv_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ retval = process_openssl_dh_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ if (op->asym->dsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN)
+ retval = process_openssl_dsa_sign_op(op, sess);
+ else if (op->asym->dsa.op_type ==
+ RTE_CRYPTO_ASYM_OP_VERIFY)
+ retval =
+ process_openssl_dsa_verify_op(op, sess);
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+ if (!retval) {
+ /* op processed so push to completion queue as processed */
+ retval = rte_ring_enqueue(qp->processed_ops, (void *)op);
+ if (retval)
+ /* return error if failed to put in completion queue */
+ retval = -1;
+ }
+
+ return retval;
+}
+
+/** Process crypto operation for mbuf */
+static int
+process_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_session *sess)
+{
+ struct rte_mbuf *msrc, *mdst;
+ int retval;
+
+ msrc = op->sym->m_src;
+ mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->chain_order) {
+ case OPENSSL_CHAIN_ONLY_CIPHER:
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_ONLY_AUTH:
+ process_openssl_auth_op(qp, op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_CIPHER_AUTH:
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ process_openssl_auth_op(qp, op, sess, mdst, mdst);
+ break;
+ case OPENSSL_CHAIN_AUTH_CIPHER:
+ process_openssl_auth_op(qp, op, sess, msrc, mdst);
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_COMBINED:
+ process_openssl_combined_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_CIPHER_BPI:
+ process_openssl_docsis_bpi_op(op, sess, msrc, mdst);
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ openssl_reset_session(sess);
+ memset(sess, 0, sizeof(struct openssl_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (op->status != RTE_CRYPTO_OP_STATUS_ERROR)
+ retval = rte_ring_enqueue(qp->processed_ops, (void *)op);
+ else
+ retval = -1;
+
+ return retval;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * PMD Framework
+ *------------------------------------------------------------------------------
+ */
+
+/** Enqueue burst */
+static uint16_t
+openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ void *sess;
+ struct openssl_qp *qp = queue_pair;
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ if (ops[i]->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ retval = process_op(qp, ops[i],
+ (struct openssl_session *) sess);
+ else
+ retval = process_asym_op(qp, ops[i],
+ (struct openssl_asym_session *) sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ qp->stats.enqueued_count += i;
+ return i;
+
+enqueue_err:
+ qp->stats.enqueue_err_count++;
+ return i;
+}
+
+/** Dequeue burst */
+static uint16_t
+openssl_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct openssl_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)ops, nb_ops, NULL);
+ qp->stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/** Create OPENSSL crypto device */
+static int
+cryptodev_openssl_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct openssl_private *internals;
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ OPENSSL_LOG(ERR, "failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_openssl_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = openssl_pmd_dequeue_burst;
+ dev->enqueue_burst = openssl_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+
+init_error:
+ OPENSSL_LOG(ERR, "driver %s: create failed",
+ init_params->name);
+
+ cryptodev_openssl_remove(vdev);
+ return -EFAULT;
+}
+
+/** Initialise OPENSSL crypto device */
+static int
+cryptodev_openssl_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct openssl_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ return cryptodev_openssl_create(name, vdev, &init_params);
+}
+
+/** Uninitialise OPENSSL crypto device */
+static int
+cryptodev_openssl_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_openssl_pmd_drv = {
+ .probe = cryptodev_openssl_probe,
+ .remove = cryptodev_openssl_remove
+};
+
+static struct cryptodev_driver openssl_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_OPENSSL_PMD,
+ cryptodev_openssl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv,
+ cryptodev_openssl_pmd_drv.driver, cryptodev_driver_id);
+
+RTE_INIT(openssl_init_log)
+{
+ openssl_logtype_driver = rte_log_register("pmd.crypto.openssl");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c
new file mode 100644
index 00000000..de228439
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -0,0 +1,1264 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_openssl_pmd_private.h"
+#include "compat.h"
+
+
+static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ },
+ }, }
+ }, }
+ },
+ { /* AES CCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 16,
+ .increment = 2
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 7,
+ .max = 13,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES DOCSIS BPI */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* RSA */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,
+ .op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY) |
+ (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |
+ (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),
+ {
+ .modlen = {
+ /* min length is based on openssl rsa keygen */
+ .min = 30,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* modexp */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
+ .op_types = 0,
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* modinv */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODINV,
+ .op_types = 0,
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* dh */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_DH,
+ .op_types =
+ ((1<<RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE) |
+ (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE |
+ (1 <<
+ RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE))),
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* dsa */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_DSA,
+ .op_types =
+ ((1<<RTE_CRYPTO_ASYM_OP_SIGN) |
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY)),
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+openssl_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+openssl_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+openssl_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+openssl_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+openssl_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct openssl_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+openssl_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct openssl_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+openssl_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct openssl_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = openssl_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ }
+}
+
+/** Release queue pair */
+static int
+openssl_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+openssl_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct openssl_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "openssl_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ OPENSSL_LOG(INFO,
+ "Reusing existing ring %s for processed ops",
+ qp->name);
+ return r;
+ }
+
+ OPENSSL_LOG(ERR,
+ "Unable to reuse existing ring %s for processed ops",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+
+/** Setup a queue pair */
+static int
+openssl_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct openssl_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ openssl_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("OPENSSL PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (openssl_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = openssl_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+openssl_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the symmetric session structure */
+static unsigned
+openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct openssl_session);
+}
+
+/** Returns the size of the asymmetric session structure */
+static unsigned
+openssl_pmd_asym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct openssl_asym_session);
+}
+
+/** Configure the session from a crypto xform chain */
+static int
+openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ OPENSSL_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ OPENSSL_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = openssl_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ OPENSSL_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+static int openssl_set_asym_session_parameters(
+ struct openssl_asym_session *asym_session,
+ struct rte_crypto_asym_xform *xform)
+{
+ int ret = 0;
+
+ if ((xform->xform_type != RTE_CRYPTO_ASYM_XFORM_DH) &&
+ (xform->next != NULL)) {
+ OPENSSL_LOG(ERR, "chained xfrms are not supported on %s",
+ rte_crypto_asym_xform_strings[xform->xform_type]);
+ return -1;
+ }
+
+ switch (xform->xform_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ {
+ BIGNUM *n = NULL;
+ BIGNUM *e = NULL;
+ BIGNUM *d = NULL;
+ BIGNUM *p = NULL, *q = NULL, *dmp1 = NULL;
+ BIGNUM *iqmp = NULL, *dmq1 = NULL;
+
+ /* copy xfrm data into rsa struct */
+ n = BN_bin2bn((const unsigned char *)xform->rsa.n.data,
+ xform->rsa.n.length, n);
+ e = BN_bin2bn((const unsigned char *)xform->rsa.e.data,
+ xform->rsa.e.length, e);
+
+ if (!n || !e)
+ goto err_rsa;
+
+ RSA *rsa = RSA_new();
+ if (rsa == NULL)
+ goto err_rsa;
+
+ if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_EXP) {
+ d = BN_bin2bn(
+ (const unsigned char *)xform->rsa.d.data,
+ xform->rsa.d.length,
+ d);
+ if (!d) {
+ RSA_free(rsa);
+ goto err_rsa;
+ }
+ } else {
+ p = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.p.data,
+ xform->rsa.qt.p.length,
+ p);
+ q = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.q.data,
+ xform->rsa.qt.q.length,
+ q);
+ dmp1 = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.dP.data,
+ xform->rsa.qt.dP.length,
+ dmp1);
+ dmq1 = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.dQ.data,
+ xform->rsa.qt.dQ.length,
+ dmq1);
+ iqmp = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.qInv.data,
+ xform->rsa.qt.qInv.length,
+ iqmp);
+
+ if (!p || !q || !dmp1 || !dmq1 || !iqmp) {
+ RSA_free(rsa);
+ goto err_rsa;
+ }
+ set_rsa_params(rsa, p, q, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR,
+ "failed to set rsa params\n");
+ RSA_free(rsa);
+ goto err_rsa;
+ }
+ set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR,
+ "failed to set crt params\n");
+ RSA_free(rsa);
+ /*
+ * set already populated params to NULL
+ * as its freed by call to RSA_free
+ */
+ p = q = NULL;
+ goto err_rsa;
+ }
+ }
+
+ set_rsa_keys(rsa, n, e, d, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR, "Failed to load rsa keys\n");
+ RSA_free(rsa);
+ return -1;
+ }
+ asym_session->u.r.rsa = rsa;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_RSA;
+ break;
+err_rsa:
+ if (n)
+ BN_free(n);
+ if (e)
+ BN_free(e);
+ if (d)
+ BN_free(d);
+ if (p)
+ BN_free(p);
+ if (q)
+ BN_free(q);
+ if (dmp1)
+ BN_free(dmp1);
+ if (dmq1)
+ BN_free(dmq1);
+ if (iqmp)
+ BN_free(iqmp);
+
+ return -1;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ {
+ struct rte_crypto_modex_xform *xfrm = &(xform->modex);
+
+ BN_CTX *ctx = BN_CTX_new();
+ if (ctx == NULL) {
+ OPENSSL_LOG(ERR,
+ " failed to allocate resources\n");
+ return -1;
+ }
+ BN_CTX_start(ctx);
+ BIGNUM *mod = BN_CTX_get(ctx);
+ BIGNUM *exp = BN_CTX_get(ctx);
+ if (mod == NULL || exp == NULL) {
+ BN_CTX_end(ctx);
+ BN_CTX_free(ctx);
+ return -1;
+ }
+
+ mod = BN_bin2bn((const unsigned char *)
+ xfrm->modulus.data,
+ xfrm->modulus.length, mod);
+ exp = BN_bin2bn((const unsigned char *)
+ xfrm->exponent.data,
+ xfrm->exponent.length, exp);
+ asym_session->u.e.ctx = ctx;
+ asym_session->u.e.mod = mod;
+ asym_session->u.e.exp = exp;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
+ break;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ {
+ struct rte_crypto_modinv_xform *xfrm = &(xform->modinv);
+
+ BN_CTX *ctx = BN_CTX_new();
+ if (ctx == NULL) {
+ OPENSSL_LOG(ERR,
+ " failed to allocate resources\n");
+ return -1;
+ }
+ BN_CTX_start(ctx);
+ BIGNUM *mod = BN_CTX_get(ctx);
+ if (mod == NULL) {
+ BN_CTX_end(ctx);
+ BN_CTX_free(ctx);
+ return -1;
+ }
+
+ mod = BN_bin2bn((const unsigned char *)
+ xfrm->modulus.data,
+ xfrm->modulus.length,
+ mod);
+ asym_session->u.m.ctx = ctx;
+ asym_session->u.m.modulus = mod;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_MODINV;
+ break;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ {
+ BIGNUM *p = NULL;
+ BIGNUM *g = NULL;
+
+ p = BN_bin2bn((const unsigned char *)
+ xform->dh.p.data,
+ xform->dh.p.length,
+ p);
+ g = BN_bin2bn((const unsigned char *)
+ xform->dh.g.data,
+ xform->dh.g.length,
+ g);
+ if (!p || !g)
+ goto err_dh;
+
+ DH *dh = DH_new();
+ if (dh == NULL) {
+ OPENSSL_LOG(ERR,
+ "failed to allocate resources\n");
+ goto err_dh;
+ }
+ set_dh_params(dh, p, g, ret);
+ if (ret) {
+ DH_free(dh);
+ goto err_dh;
+ }
+
+ /*
+ * setup xfrom for
+ * public key generate, or
+ * DH Priv key generate, or both
+ * public and private key generate
+ */
+ asym_session->u.dh.key_op = (1 << xform->dh.type);
+
+ if (xform->dh.type ==
+ RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE) {
+ /* check if next is pubkey */
+ if ((xform->next != NULL) &&
+ (xform->next->xform_type ==
+ RTE_CRYPTO_ASYM_XFORM_DH) &&
+ (xform->next->dh.type ==
+ RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)
+ ) {
+ /*
+ * setup op as pub/priv key
+ * pair generationi
+ */
+ asym_session->u.dh.key_op |=
+ (1 <<
+ RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE);
+ }
+ }
+ asym_session->u.dh.dh_key = dh;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DH;
+ break;
+
+err_dh:
+ OPENSSL_LOG(ERR, " failed to set dh params\n");
+ if (p)
+ BN_free(p);
+ if (g)
+ BN_free(g);
+ return -1;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ {
+ BIGNUM *p = NULL, *g = NULL;
+ BIGNUM *q = NULL, *priv_key = NULL;
+ BIGNUM *pub_key = BN_new();
+ BN_zero(pub_key);
+
+ p = BN_bin2bn((const unsigned char *)
+ xform->dsa.p.data,
+ xform->dsa.p.length,
+ p);
+
+ g = BN_bin2bn((const unsigned char *)
+ xform->dsa.g.data,
+ xform->dsa.g.length,
+ g);
+
+ q = BN_bin2bn((const unsigned char *)
+ xform->dsa.q.data,
+ xform->dsa.q.length,
+ q);
+ if (!p || !q || !g)
+ goto err_dsa;
+
+ priv_key = BN_bin2bn((const unsigned char *)
+ xform->dsa.x.data,
+ xform->dsa.x.length,
+ priv_key);
+ if (priv_key == NULL)
+ goto err_dsa;
+
+ DSA *dsa = DSA_new();
+ if (dsa == NULL) {
+ OPENSSL_LOG(ERR,
+ " failed to allocate resources\n");
+ goto err_dsa;
+ }
+
+ set_dsa_params(dsa, p, q, g, ret);
+ if (ret) {
+ DSA_free(dsa);
+ OPENSSL_LOG(ERR, "Failed to dsa params\n");
+ goto err_dsa;
+ }
+
+ /*
+ * openssl 1.1.0 mandate that public key can't be
+ * NULL in very first call. so set a dummy pub key.
+ * to keep consistency, lets follow same approach for
+ * both versions
+ */
+ /* just set dummy public for very 1st call */
+ set_dsa_keys(dsa, pub_key, priv_key, ret);
+ if (ret) {
+ DSA_free(dsa);
+ OPENSSL_LOG(ERR, "Failed to set keys\n");
+ return -1;
+ }
+ asym_session->u.s.dsa = dsa;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DSA;
+ break;
+
+err_dsa:
+ if (p)
+ BN_free(p);
+ if (q)
+ BN_free(q);
+ if (g)
+ BN_free(g);
+ if (priv_key)
+ BN_free(priv_key);
+ if (pub_key)
+ BN_free(pub_key);
+ return -1;
+ }
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Configure the session from a crypto xform chain */
+static int
+openssl_pmd_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *asym_sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ OPENSSL_LOG(ERR, "invalid asymmetric session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &asym_sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = openssl_set_asym_session_parameters(asym_sess_private_data,
+ xform);
+ if (ret != 0) {
+ OPENSSL_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, asym_sess_private_data);
+ return ret;
+ }
+
+ set_asym_session_private_data(sess, dev->driver_id,
+ asym_sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+openssl_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ openssl_reset_session(sess_priv);
+ memset(sess_priv, 0, sizeof(struct openssl_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static void openssl_reset_asym_session(struct openssl_asym_session *sess)
+{
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ if (sess->u.r.rsa)
+ RSA_free(sess->u.r.rsa);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ if (sess->u.e.ctx) {
+ BN_CTX_end(sess->u.e.ctx);
+ BN_CTX_free(sess->u.e.ctx);
+ }
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ if (sess->u.m.ctx) {
+ BN_CTX_end(sess->u.m.ctx);
+ BN_CTX_free(sess->u.m.ctx);
+ }
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ if (sess->u.dh.dh_key)
+ DH_free(sess->u.dh.dh_key);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ if (sess->u.s.dsa)
+ DSA_free(sess->u.s.dsa);
+ break;
+ default:
+ break;
+ }
+}
+
+/** Clear the memory of asymmetric session
+ * so it doesn't leave key material behind
+ */
+static void
+openssl_pmd_asym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_asym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_asym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ openssl_reset_asym_session(sess_priv);
+ memset(sess_priv, 0, sizeof(struct openssl_asym_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_asym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops openssl_pmd_ops = {
+ .dev_configure = openssl_pmd_config,
+ .dev_start = openssl_pmd_start,
+ .dev_stop = openssl_pmd_stop,
+ .dev_close = openssl_pmd_close,
+
+ .stats_get = openssl_pmd_stats_get,
+ .stats_reset = openssl_pmd_stats_reset,
+
+ .dev_infos_get = openssl_pmd_info_get,
+
+ .queue_pair_setup = openssl_pmd_qp_setup,
+ .queue_pair_release = openssl_pmd_qp_release,
+ .queue_pair_count = openssl_pmd_qp_count,
+
+ .sym_session_get_size = openssl_pmd_sym_session_get_size,
+ .asym_session_get_size = openssl_pmd_asym_session_get_size,
+ .sym_session_configure = openssl_pmd_sym_session_configure,
+ .asym_session_configure = openssl_pmd_asym_session_configure,
+ .sym_session_clear = openssl_pmd_sym_session_clear,
+ .asym_session_clear = openssl_pmd_asym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_openssl_pmd_ops = &openssl_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_private.h b/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_private.h
new file mode 100644
index 00000000..a8f2c848
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#ifndef _OPENSSL_PMD_PRIVATE_H_
+#define _OPENSSL_PMD_PRIVATE_H_
+
+#include <openssl/evp.h>
+#include <openssl/hmac.h>
+#include <openssl/des.h>
+#include <openssl/rsa.h>
+#include <openssl/dh.h>
+#include <openssl/dsa.h>
+
+#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
+/**< Open SSL Crypto PMD device name */
+
+/** OPENSSL PMD LOGTYPE DRIVER */
+int openssl_logtype_driver;
+#define OPENSSL_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, openssl_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+/* Maximum length for digest (SHA-512 needs 64 bytes) */
+#define DIGEST_LENGTH_MAX 64
+
+/** OPENSSL operation order mode enumerator */
+enum openssl_chain_order {
+ OPENSSL_CHAIN_ONLY_CIPHER,
+ OPENSSL_CHAIN_ONLY_AUTH,
+ OPENSSL_CHAIN_CIPHER_BPI,
+ OPENSSL_CHAIN_CIPHER_AUTH,
+ OPENSSL_CHAIN_AUTH_CIPHER,
+ OPENSSL_CHAIN_COMBINED,
+ OPENSSL_CHAIN_NOT_SUPPORTED
+};
+
+/** OPENSSL cipher mode enumerator */
+enum openssl_cipher_mode {
+ OPENSSL_CIPHER_LIB,
+ OPENSSL_CIPHER_DES3CTR,
+};
+
+/** OPENSSL auth mode enumerator */
+enum openssl_auth_mode {
+ OPENSSL_AUTH_AS_AUTH,
+ OPENSSL_AUTH_AS_HMAC,
+};
+
+/** private data structure for each OPENSSL crypto device */
+struct openssl_private {
+ unsigned int max_nb_qpairs;
+ /**< Max number of queue pairs */
+};
+
+/** OPENSSL crypto queue pair */
+struct openssl_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+/** OPENSSL crypto private session structure */
+struct openssl_session {
+ enum openssl_chain_order chain_order;
+ /**< chain order mode */
+
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+
+ enum rte_crypto_aead_algorithm aead_algo;
+ /**< AEAD algorithm */
+
+ /** Cipher Parameters */
+ struct {
+ enum rte_crypto_cipher_operation direction;
+ /**< cipher operation direction */
+ enum openssl_cipher_mode mode;
+ /**< cipher operation mode */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< cipher algorithm */
+
+ struct {
+ uint8_t data[32];
+ /**< key data */
+ size_t length;
+ /**< key length in bytes */
+ } key;
+
+ const EVP_CIPHER *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_CIPHER_CTX *ctx;
+ /**< pointer to EVP context structure */
+ EVP_CIPHER_CTX *bpi_ctx;
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ enum openssl_auth_mode mode;
+ /**< auth operation mode */
+ enum rte_crypto_auth_algorithm algo;
+ /**< cipher algorithm */
+
+ union {
+ struct {
+ const EVP_MD *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_MD_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } auth;
+
+ struct {
+ EVP_PKEY *pkey;
+ /**< pointer to EVP key */
+ const EVP_MD *evp_algo;
+ /**< pointer to EVP algorithm function */
+ HMAC_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } hmac;
+ };
+
+ uint16_t aad_length;
+ /**< AAD length */
+ uint16_t digest_length;
+ /**< digest length */
+ } auth;
+
+} __rte_cache_aligned;
+
+/** OPENSSL crypto private asymmetric session structure */
+struct openssl_asym_session {
+ enum rte_crypto_asym_xform_type xfrm_type;
+ union {
+ struct rsa {
+ RSA *rsa;
+ } r;
+ struct exp {
+ BIGNUM *exp;
+ BIGNUM *mod;
+ BN_CTX *ctx;
+ } e;
+ struct mod {
+ BIGNUM *modulus;
+ BN_CTX *ctx;
+ } m;
+ struct dh {
+ DH *dh_key;
+ uint32_t key_op;
+ } dh;
+ struct {
+ DSA *dsa;
+ } s;
+ } u;
+} __rte_cache_aligned;
+/** Set and validate OPENSSL crypto session parameters */
+extern int
+openssl_set_session_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** Reset OPENSSL crypto session parameters */
+extern void
+openssl_reset_session(struct openssl_session *sess);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_openssl_pmd_ops;
+
+#endif /* _OPENSSL_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/rte_pmd_openssl_version.map b/src/spdk/dpdk/drivers/crypto/openssl/rte_pmd_openssl_version.map
new file mode 100644
index 00000000..cc5829e3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/rte_pmd_openssl_version.map
@@ -0,0 +1,3 @@
+DPDK_16.11 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/qat/README b/src/spdk/dpdk/drivers/crypto/qat/README
new file mode 100644
index 00000000..444ae605
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/README
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2018 Intel Corporation
+
+Makefile for crypto QAT PMD is in common/qat directory.
+The build for the QAT driver is done from there as only one library is built for the
+whole QAT pci device and that library includes all the services (crypto, compression)
+which are enabled on the device.
diff --git a/src/spdk/dpdk/drivers/crypto/qat/meson.build b/src/spdk/dpdk/drivers/crypto/qat/meson.build
new file mode 100644
index 00000000..9cc98d2c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+# this does not build the QAT driver, instead that is done in the compression
+# driver which comes later. Here we just add our sources files to the list
+build = false
+dep = dependency('libcrypto', required: false)
+qat_includes += include_directories('.')
+qat_deps += 'cryptodev'
+if dep.found()
+ # Add our sources files to the list
+ qat_sources += files('qat_sym_pmd.c',
+ 'qat_sym.c',
+ 'qat_sym_session.c')
+ qat_ext_deps += dep
+ pkgconfig_extra_libs += '-lcrypto'
+ qat_cflags += '-DBUILD_QAT_SYM'
+endif
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym.c b/src/spdk/dpdk/drivers/crypto/qat/qat_sym.c
new file mode 100644
index 00000000..10cdf2e1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym.c
@@ -0,0 +1,569 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <openssl/evp.h>
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_crypto_sym.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+
+#include "qat_sym.h"
+
+/** Decrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
+
+ /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_decrypt_err;
+
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
+
+ return 0;
+
+cipher_decrypt_err:
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+ return -EINVAL;
+}
+
+
+static inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
+
+ if (last_block_len &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+
+ /* Decrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = last_block - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_decrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+ dst, last_block_len);
+#endif
+ }
+
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline void
+set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op,
+ struct icp_qat_fw_la_bulk_req *qat_req)
+{
+ /* copy IV into request if it fits */
+ if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset),
+ iv_length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ rte_crypto_op_ctophys_offset(op,
+ iv_offset);
+ }
+}
+
+/** Set IV for CCM is special case, 0th byte is set to q-1
+ * where q is padding of nonce in 16 byte block
+ */
+static inline void
+set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
+{
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+}
+
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen)
+{
+ int ret = 0;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ register struct icp_qat_fw_la_bulk_req *qat_req;
+ uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
+ uint32_t cipher_len = 0, cipher_ofs = 0;
+ uint32_t auth_len = 0, auth_ofs = 0;
+ uint32_t min_ofs = 0;
+ uint64_t src_buf_start = 0, dst_buf_start = 0;
+ uint8_t do_sgl = 0;
+ struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+ struct qat_sym_op_cookie *cookie =
+ (struct qat_sym_op_cookie *)op_cookie;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ return -EINVAL;
+ }
+
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
+ " requests, op (%p) is sessionless.", op);
+ return -EINVAL;
+ }
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ op->sym->session, cryptodev_qat_driver_id);
+
+ if (unlikely(ctx == NULL)) {
+ QAT_DP_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
+ QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
+ qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+ cipher_param = (void *)&qat_req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+ if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ /* AES-GCM or AES-CCM */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+ && ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+ do_aead = 1;
+ } else {
+ do_auth = 1;
+ do_cipher = 1;
+ }
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ do_auth = 1;
+ do_cipher = 0;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ do_auth = 0;
+ do_cipher = 1;
+ }
+
+ if (do_cipher) {
+
+ if (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+
+ if (unlikely(
+ (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+ (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
+ "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ cipher_len = op->sym->cipher.data.length >> 3;
+ cipher_ofs = op->sym->cipher.data.offset >> 3;
+
+ } else if (ctx->bpi_ctx) {
+ /* DOCSIS - only send complete blocks to device
+ * Process any partial block using CFB mode.
+ * Even if 0 complete blocks, still send this to device
+ * to get into rx queue for post-process and dequeuing
+ */
+ cipher_len = qat_bpicipher_preprocess(ctx, op);
+ cipher_ofs = op->sym->cipher.data.offset;
+ } else {
+ cipher_len = op->sym->cipher.data.length;
+ cipher_ofs = op->sym->cipher.data.offset;
+ }
+
+ set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+ min_ofs = cipher_ofs;
+ }
+
+ if (do_auth) {
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+ if (unlikely(
+ (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+ (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
+ "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ auth_ofs = op->sym->auth.data.offset >> 3;
+ auth_len = op->sym->auth.data.length >> 3;
+
+ auth_param->u1.aad_adr =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->auth_iv.offset);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /* AES-GMAC */
+ set_cipher_iv(ctx->auth_iv.length,
+ ctx->auth_iv.offset,
+ cipher_param, op, qat_req);
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ auth_param->u1.aad_adr = 0;
+ auth_param->u2.aad_sz = 0;
+
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->auth_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+
+ }
+ } else {
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ }
+ min_ofs = auth_ofs;
+
+ if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
+ auth_param->auth_res_addr =
+ op->sym->auth.digest.phys_addr;
+
+ }
+
+ if (do_aead) {
+ /*
+ * This address may used for setting AAD physical pointer
+ * into IV offset from op
+ */
+ rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
+ if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->cipher_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ }
+ set_cipher_iv(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
+
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data = op->sym->aead.aad.data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len = 0;
+ uint8_t aad_len_field_sz = 0;
+ uint32_t msg_len_be =
+ rte_bswap32(op->sym->aead.data.length);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ /*
+ * aad_len not greater than 18, so no actual aad
+ * data, then use IV after op for B0 block
+ */
+ aad_data = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ aad_phys_addr_aead =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
+ }
+
+ uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
+ ctx->cipher_iv.length;
+
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+ aad_len_field_sz,
+ ctx->digest_length, q);
+
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
+ = rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx],
+ 0, pad_len);
+ }
+
+ }
+
+ set_cipher_iv_ccm(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, q,
+ aad_len_field_sz);
+
+ }
+
+ cipher_len = op->sym->aead.data.length;
+ cipher_ofs = op->sym->aead.data.offset;
+ auth_len = op->sym->aead.data.length;
+ auth_ofs = op->sym->aead.data.offset;
+
+ auth_param->u1.aad_adr = aad_phys_addr_aead;
+ auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
+ min_ofs = op->sym->aead.data.offset;
+ }
+
+ if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
+ do_sgl = 1;
+
+ /* adjust for chain case */
+ if (do_cipher && do_auth)
+ min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+
+ if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
+ min_ofs = 0;
+
+ if (unlikely(op->sym->m_dst != NULL)) {
+ /* Out-of-place operation (OOP)
+ * Don't align DMA start. DMA the minimum data-set
+ * so as not to overwrite data in dest buffer
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
+ dst_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
+
+ } else {
+ /* In-place operation
+ * Start DMA at nearest aligned address below min_ofs
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
+ & QAT_64_BTYE_ALIGN_MASK;
+
+ if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
+ rte_pktmbuf_headroom(op->sym->m_src))
+ > src_buf_start)) {
+ /* alignment has pushed addr ahead of start of mbuf
+ * so revert and take the performance hit
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src,
+ min_ofs);
+ }
+ dst_buf_start = src_buf_start;
+ }
+
+ if (do_cipher || do_aead) {
+ cipher_param->cipher_offset =
+ (uint32_t)rte_pktmbuf_iova_offset(
+ op->sym->m_src, cipher_ofs) - src_buf_start;
+ cipher_param->cipher_length = cipher_len;
+ } else {
+ cipher_param->cipher_offset = 0;
+ cipher_param->cipher_length = 0;
+ }
+
+ if (do_auth || do_aead) {
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
+ op->sym->m_src, auth_ofs) - src_buf_start;
+ auth_param->auth_len = auth_len;
+ } else {
+ auth_param->auth_off = 0;
+ auth_param->auth_len = 0;
+ }
+
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ > (auth_param->auth_off + auth_param->auth_len) ?
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ : (auth_param->auth_off + auth_param->auth_len);
+
+ if (do_sgl) {
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ ret = qat_sgl_fill_array(op->sym->m_src,
+ (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
+ &cookie->qat_sgl_src,
+ qat_req->comn_mid.src_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ return ret;
+ }
+
+ if (likely(op->sym->m_dst == NULL))
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ else {
+ ret = qat_sgl_fill_array(op->sym->m_dst,
+ (int64_t)(dst_buf_start -
+ rte_pktmbuf_iova(op->sym->m_dst)),
+ &cookie->qat_sgl_dst,
+ qat_req->comn_mid.dst_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
+ return ret;
+ }
+
+ qat_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ qat_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ }
+ } else {
+ qat_req->comn_mid.src_data_addr = src_buf_start;
+ qat_req->comn_mid.dest_data_addr = dst_buf_start;
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (do_cipher) {
+ uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
+ ctx->cipher_iv.length);
+ }
+
+ if (do_auth) {
+ if (ctx->auth_iv.length) {
+ uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->auth_iv.offset);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
+ ctx->auth_iv.length);
+ }
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
+ ctx->digest_length);
+ }
+
+ if (do_aead) {
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
+ ctx->digest_length);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
+ ctx->aad_len);
+ }
+#endif
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym.h b/src/spdk/dpdk/drivers/crypto/qat/qat_sym.h
new file mode 100644
index 00000000..bc6426c3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_H_
+#define _QAT_SYM_H_
+
+#include <rte_cryptodev_pmd.h>
+
+#ifdef BUILD_QAT_SYM
+#include <openssl/evp.h>
+
+#include "qat_common.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+#include "qat_logs.h"
+
+#define BYTE_LENGTH 8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
+
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER 16
+
+struct qat_sym_session;
+
+struct qat_sym_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_op_cookie {
+ struct qat_sym_sgl qat_sgl_src;
+ struct qat_sym_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen);
+
+
+/** Encrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
+
+ /* ECB method: encrypt the IV, then XOR this with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_encrypt_err;
+
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
+
+ return 0;
+
+cipher_encrypt_err:
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+ return -EINVAL;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
+
+ if (last_block_len > 0 &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+ /* Encrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset;
+
+ last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = dst - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG,
+ "BPI: dst before post-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_encrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG,
+ "BPI: dst after post-process:",
+ dst, last_block_len);
+#endif
+ }
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline void
+qat_sym_process_response(void **op, uint8_t *resp)
+{
+
+ struct icp_qat_fw_comn_resp *resp_msg =
+ (struct icp_qat_fw_comn_resp *)resp;
+ struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+ if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status)) {
+
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ struct qat_sym_session *sess = (struct qat_sym_session *)
+ get_sym_session_private_data(
+ rx_op->sym->session,
+ cryptodev_qat_driver_id);
+
+
+ if (sess->bpi_ctx)
+ qat_bpicipher_postprocess(sess, rx_op);
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ *op = (void *)rx_op;
+}
+#else
+
+static inline void
+qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
+{
+}
+#endif
+#endif /* _QAT_SYM_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym_capabilities.h b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_capabilities.h
new file mode 100644
index 00000000..eea08bc7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_capabilities.h
@@ -0,0 +1,557 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_CAPABILITIES_H_
+#define _QAT_SYM_CAPABILITIES_H_
+
+#define QAT_BASE_GEN1_SYM_CAPABILITIES \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 20, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 28, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 32, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 48, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* MD5 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 16, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES XCBC MAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CCM */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_CCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 16, \
+ .increment = 2 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 224, \
+ .increment = 1 \
+ }, \
+ .iv_size = { \
+ .min = 7, \
+ .max = 13, \
+ .increment = 1 \
+ }, \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 8, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 240, \
+ .increment = 1 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
+ }, \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GMAC (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 8, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* SNOW 3G (UIA2) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES DOCSIS BPI */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* SNOW 3G (UEA2) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* NULL (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_NULL, \
+ .block_size = 1, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .iv_size = { 0 } \
+ }, }, \
+ }, }, \
+ }, \
+ { /* NULL (CIPHER) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_NULL, \
+ .block_size = 1, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ } \
+ }, }, \
+ }, } \
+ }, \
+ { /* KASUMI (F8) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* KASUMI (F9) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 8, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* DES DOCSISBPI */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,\
+ .block_size = 8, \
+ .key_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
+ { /* ZUC (EEA3) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* ZUC (EIA3) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#endif /* _QAT_SYM_CAPABILITIES_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.c b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.c
new file mode 100644
index 00000000..96f442e8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_pci.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_logs.h"
+#include "qat_sym.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+
+uint8_t cryptodev_qat_driver_id;
+
+static const struct rte_cryptodev_capabilities qat_gen1_sym_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities qat_gen2_sym_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ QAT_EXTRA_GEN2_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int qat_sym_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+
+static int qat_sym_dev_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+static int qat_sym_dev_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static void qat_sym_dev_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return;
+}
+
+static int qat_sym_dev_close(struct rte_cryptodev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_sym_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qat_sym_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ const struct qat_qp_hw_data *sym_hw_qps =
+ qat_gen_config[internals->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_SYMMETRIC];
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = internals->qat_dev_capabilities;
+ info->driver_id = cryptodev_qat_driver_id;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
+ }
+}
+
+static void qat_sym_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct qat_common_stats qat_stats = {0};
+ struct qat_sym_dev_private *qat_priv;
+
+ if (stats == NULL || dev == NULL) {
+ QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_SYMMETRIC);
+ stats->enqueued_count = qat_stats.enqueued_count;
+ stats->dequeued_count = qat_stats.dequeued_count;
+ stats->enqueue_err_count = qat_stats.enqueue_err_count;
+ stats->dequeue_err_count = qat_stats.dequeue_err_count;
+}
+
+static void qat_sym_stats_reset(struct rte_cryptodev *dev)
+{
+ struct qat_sym_dev_private *qat_priv;
+
+ if (dev == NULL) {
+ QAT_LOG(ERR, "invalid cryptodev ptr %p", dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_SYMMETRIC);
+
+}
+
+static int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct qat_sym_dev_private *qat_private = dev->data->dev_private;
+
+ QAT_LOG(DEBUG, "Release sym qp %u on device %d",
+ queue_pair_id, dev->data->dev_id);
+
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][queue_pair_id]
+ = NULL;
+
+ return qat_qp_release((struct qat_qp **)
+ &(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool __rte_unused)
+{
+ struct qat_qp *qp;
+ int ret = 0;
+ uint32_t i;
+ struct qat_qp_config qat_qp_conf;
+
+ struct qat_qp **qp_addr =
+ (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct qat_sym_dev_private *qat_private = dev->data->dev_private;
+ const struct qat_qp_hw_data *sym_hw_qps =
+ qat_gen_config[qat_private->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_SYMMETRIC];
+ const struct qat_qp_hw_data *qp_hw_data = sym_hw_qps + qp_id;
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (*qp_addr != NULL) {
+ ret = qat_sym_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+ if (qp_id >= qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC)) {
+ QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
+ return -EINVAL;
+ }
+
+ qat_qp_conf.hw = qp_hw_data;
+ qat_qp_conf.build_request = qat_sym_build_request;
+ qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie);
+ qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors;
+ qat_qp_conf.socket_id = socket_id;
+ qat_qp_conf.service_str = "sym";
+
+ ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
+ if (ret != 0)
+ return ret;
+
+ /* store a link to the qp in the qat_pci_device */
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][qp_id]
+ = *qp_addr;
+
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_sym_op_cookie *cookie =
+ qp->op_cookies[i];
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_dst);
+ }
+
+ return ret;
+}
+
+static struct rte_cryptodev_ops crypto_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_sym_dev_config,
+ .dev_start = qat_sym_dev_start,
+ .dev_stop = qat_sym_dev_stop,
+ .dev_close = qat_sym_dev_close,
+ .dev_infos_get = qat_sym_dev_info_get,
+
+ .stats_get = qat_sym_stats_get,
+ .stats_reset = qat_sym_stats_reset,
+ .queue_pair_setup = qat_sym_qp_setup,
+ .queue_pair_release = qat_sym_qp_release,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .sym_session_get_size = qat_sym_session_get_private_size,
+ .sym_session_configure = qat_sym_session_configure,
+ .sym_session_clear = qat_sym_session_clear
+};
+
+static uint16_t
+qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+ .name = qat_sym_drv_name,
+ .alias = qat_sym_drv_name
+};
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ .private_data_size = sizeof(struct qat_sym_dev_private)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *cryptodev;
+ struct qat_sym_dev_private *internals;
+
+ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "sym");
+ QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+ /* Populate subset device to use in cryptodev device creation */
+ qat_pci_dev->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+ qat_pci_dev->sym_rte_dev.numa_node =
+ qat_pci_dev->pci_dev->device.numa_node;
+ qat_pci_dev->sym_rte_dev.devargs = NULL;
+
+ cryptodev = rte_cryptodev_pmd_create(name,
+ &(qat_pci_dev->sym_rte_dev), &init_params);
+
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ qat_pci_dev->sym_rte_dev.name = cryptodev->data->name;
+ cryptodev->driver_id = cryptodev_qat_driver_id;
+ cryptodev->dev_ops = &crypto_qat_ops;
+
+ cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ internals = cryptodev->data->dev_private;
+ internals->qat_dev = qat_pci_dev;
+ qat_pci_dev->sym_dev = internals;
+
+ internals->sym_dev_id = cryptodev->data->dev_id;
+ switch (qat_pci_dev->qat_dev_gen) {
+ case QAT_GEN1:
+ internals->qat_dev_capabilities = qat_gen1_sym_capabilities;
+ break;
+ case QAT_GEN2:
+ internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
+ break;
+ default:
+ internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
+ QAT_LOG(DEBUG,
+ "QAT gen %d capabilities unknown, default to GEN2",
+ qat_pci_dev->qat_dev_gen);
+ break;
+ }
+
+ QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+ cryptodev->data->name, internals->sym_dev_id);
+ return 0;
+}
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+ if (qat_pci_dev->sym_dev == NULL)
+ return 0;
+
+ /* free crypto device */
+ cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->sym_dev_id);
+ rte_cryptodev_pmd_destroy(cryptodev);
+ qat_pci_dev->sym_rte_dev.name = NULL;
+ qat_pci_dev->sym_dev = NULL;
+
+ return 0;
+}
+
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+ cryptodev_qat_sym_driver,
+ cryptodev_qat_driver_id);
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.h b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.h
new file mode 100644
index 00000000..d3432854
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_PMD_H_
+#define _QAT_SYM_PMD_H_
+
+#ifdef BUILD_QAT_SYM
+
+#include <rte_cryptodev.h>
+
+#include "qat_sym_capabilities.h"
+#include "qat_device.h"
+
+/**< Intel(R) QAT Symmetric Crypto PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
+
+extern uint8_t cryptodev_qat_driver_id;
+
+/** private data structure for a QAT device.
+ * This QAT device is a device offering only symmetric crypto service,
+ * there can be one of these on each qat_pci_device (VF),
+ * in future there may also be private data structures for other services.
+ */
+struct qat_sym_dev_private {
+ struct qat_pci_device *qat_dev;
+ /**< The qat pci device hosting the service */
+ uint8_t sym_dev_id;
+ /**< Device instance for this rte_cryptodev */
+ const struct rte_cryptodev_capabilities *qat_dev_capabilities;
+ /* QAT device symmetric crypto capabilities */
+};
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev);
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
+
+#endif
+#endif /* _QAT_SYM_PMD_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.c b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.c
new file mode 100644
index 00000000..1d58220a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.c
@@ -0,0 +1,1725 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <openssl/sha.h> /* Needed to calculate pre-compute values */
+#include <openssl/aes.h> /* Needed to calculate pre-compute values */
+#include <openssl/md5.h> /* Needed to calculate pre-compute values */
+#include <openssl/evp.h> /* Needed for bpi runt block processing */
+
+#include <rte_memcpy.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_crypto_sym.h>
+
+#include "qat_logs.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+
+/** Frees a context previously created
+ * Depends on openssl libcrypto
+ */
+static void
+bpi_cipher_ctx_free(void *bpi_ctx)
+{
+ if (bpi_ctx != NULL)
+ EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
+}
+
+/** Creates a context in either AES or DES in ECB mode
+ * Depends on openssl libcrypto
+ */
+static int
+bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
+ enum rte_crypto_cipher_operation direction __rte_unused,
+ uint8_t *key, void **ctx)
+{
+ const EVP_CIPHER *algo = NULL;
+ int ret;
+ *ctx = EVP_CIPHER_CTX_new();
+
+ if (*ctx == NULL) {
+ ret = -ENOMEM;
+ goto ctx_init_err;
+ }
+
+ if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
+ algo = EVP_des_ecb();
+ else
+ algo = EVP_aes_128_ecb();
+
+ /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
+ if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
+ ret = -EINVAL;
+ goto ctx_init_err;
+ }
+
+ return 0;
+
+ctx_init_err:
+ if (*ctx != NULL)
+ EVP_CIPHER_CTX_free(*ctx);
+ return ret;
+}
+
+static int
+qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
+ struct qat_sym_dev_private *internals)
+{
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ continue;
+
+ if (capability->sym.cipher.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+static int
+qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
+ struct qat_sym_dev_private *internals)
+{
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ continue;
+
+ if (capability->sym.auth.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+void
+qat_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+ struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
+
+ if (sess_priv) {
+ if (s->bpi_ctx)
+ bpi_cipher_ctx_free(s->bpi_ctx);
+ memset(s, 0, qat_sym_session_get_private_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_AUTH;
+
+ /* AEAD */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ /* AES-GCM and AES-CCM works with different direction
+ * GCM first encrypts and generate hash where AES-CCM
+ * first generate hash and encrypts. Similar relation
+ * applies to decryption.
+ */
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ else
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ }
+
+ if (xform->next == NULL)
+ return -1;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+
+ return -1;
+}
+
+static struct rte_crypto_auth_xform *
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_cipher_xform *
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+int
+qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ int ret;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = qat_get_cipher_xform(xform);
+
+ session->cipher_iv.offset = cipher_xform->iv.offset;
+ session->cipher_iv.length = cipher_xform->iv.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ if (qat_sym_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ if (qat_sym_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid KASUMI cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ if (qat_sym_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ if (qat_sym_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ if (qat_sym_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ ret = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
+ QAT_LOG(ERR, "failed to create DES BPI ctx");
+ goto error_out;
+ }
+ if (qat_sym_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ ret = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
+ QAT_LOG(ERR, "failed to create AES BPI ctx");
+ goto error_out;
+ }
+ if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ if (!qat_is_cipher_alg_supported(
+ cipher_xform->algo, internals)) {
+ QAT_LOG(ERR, "%s not supported on this device",
+ rte_crypto_cipher_algorithm_strings
+ [cipher_xform->algo]);
+ ret = -ENOTSUP;
+ goto error_out;
+ }
+ if (qat_sym_validate_zuc_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid ZUC cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ ret = -ENOTSUP;
+ goto error_out;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ else
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ cipher_xform->key.data,
+ cipher_xform->key.length)) {
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ return 0;
+
+error_out:
+ if (session->bpi_ctx) {
+ bpi_cipher_ctx_free(session->bpi_ctx);
+ session->bpi_ctx = NULL;
+ }
+ return ret;
+}
+
+int
+qat_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ QAT_LOG(ERR,
+ "Crypto QAT PMD: failed to configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+int
+qat_sym_session_set_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private)
+{
+ struct qat_sym_session *session = session_private;
+ int ret;
+ int qat_cmd_id;
+
+ /* Set context descriptor physical address */
+ session->cd_paddr = rte_mempool_virt2iova(session) +
+ offsetof(struct qat_sym_session, cd);
+
+ session->min_qat_dev_gen = QAT_GEN1;
+
+ /* Get requested QAT command id */
+ qat_cmd_id = qat_get_cmd_id(xform);
+ if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
+ QAT_LOG(ERR, "Unsupported xform chain requested");
+ return -ENOTSUP;
+ }
+ session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
+ switch (session->qat_cmd) {
+ case ICP_QAT_FW_LA_CMD_CIPHER:
+ ret = qat_sym_session_configure_cipher(dev, xform, session);
+ if (ret < 0)
+ return ret;
+ break;
+ case ICP_QAT_FW_LA_CMD_AUTH:
+ ret = qat_sym_session_configure_auth(dev, xform, session);
+ if (ret < 0)
+ return ret;
+ break;
+ case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_sym_session_configure_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_sym_session_configure_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_sym_session_configure_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_sym_session_configure_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_sym_session_configure_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_sym_session_configure_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
+ case ICP_QAT_FW_LA_CMD_TRNG_TEST:
+ case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_MGF1:
+ case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_DELIMITER:
+ QAT_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ return -ENOTSUP;
+ default:
+ QAT_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+int
+qat_sym_session_configure_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ uint8_t *key_data = auth_xform->key.data;
+ uint8_t key_length = auth_xform->key.length;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ if (qat_sym_validate_aes_key(auth_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+
+ break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
+ QAT_LOG(ERR, "%s not supported on this device",
+ rte_crypto_auth_algorithm_strings
+ [auth_xform->algo]);
+ return -ENOTSUP;
+ }
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
+ auth_xform->algo);
+ return -ENOTSUP;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ return -EINVAL;
+ }
+
+ session->auth_iv.offset = auth_xform->iv.offset;
+ session->auth_iv.length = auth_xform->iv.length;
+
+ if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ } else {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+ }
+ /* Restore to authentication only only */
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
+ } else {
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ }
+
+ session->digest_length = auth_xform->digest_length;
+ return 0;
+}
+
+int
+qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ enum rte_crypto_auth_operation crypto_operation;
+
+ /*
+ * Store AEAD IV parameters as cipher IV,
+ * to avoid unnecessary memory usage
+ */
+ session->cipher_iv.offset = xform->aead.iv.offset;
+ session->cipher_iv.length = xform->aead.iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ if (qat_sym_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ if (qat_sym_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
+ break;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
+ aead_xform->algo);
+ return -EINVAL;
+ }
+
+ if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
+ (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ crypto_operation))
+ return -EINVAL;
+ } else {
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ crypto_operation))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+ }
+
+ session->digest_length = aead_xform->digest_length;
+ return 0;
+}
+
+unsigned int qat_sym_session_get_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
+}
+
+/* returns block size in bytes per cipher algo */
+int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
+{
+ switch (qat_cipher_alg) {
+ case ICP_QAT_HW_CIPHER_ALGO_DES:
+ return ICP_QAT_HW_DES_BLK_SZ;
+ case ICP_QAT_HW_CIPHER_ALGO_3DES:
+ return ICP_QAT_HW_3DES_BLK_SZ;
+ case ICP_QAT_HW_CIPHER_ALGO_AES128:
+ case ICP_QAT_HW_CIPHER_ALGO_AES192:
+ case ICP_QAT_HW_CIPHER_ALGO_AES256:
+ return ICP_QAT_HW_AES_BLK_SZ;
+ default:
+ QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/*
+ * Returns size in bytes per hash algo for state1 size field in cd_ctrl
+ * This is digest size rounded up to nearest quadword
+ */
+static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum state1 size in this case */
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ default:
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/* returns digest size in bytes per hash algo */
+static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return ICP_QAT_HW_SHA1_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return ICP_QAT_HW_SHA224_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return ICP_QAT_HW_SHA256_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return ICP_QAT_HW_SHA384_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return ICP_QAT_HW_SHA512_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return ICP_QAT_HW_MD5_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum digest size in this case */
+ return ICP_QAT_HW_SHA512_STATE1_SZ;
+ default:
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/* returns block size in byes per hash algo */
+static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return SHA_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return SHA256_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return SHA256_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return SHA512_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return SHA512_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ return 16;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return MD5_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum block size in this case */
+ return SHA512_CBLOCK;
+ default:
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA_CTX ctx;
+
+ if (!SHA1_Init(&ctx))
+ return -EFAULT;
+ SHA1_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA224_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA256_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA384_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA512_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
+{
+ MD5_CTX ctx;
+
+ if (!MD5_Init(&ctx))
+ return -EFAULT;
+ MD5_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
+
+ return 0;
+}
+
+static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+ uint8_t *data_in,
+ uint8_t *data_out)
+{
+ int digest_size;
+ uint8_t digest[qat_hash_get_digest_size(
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ uint32_t *hash_state_out_be32;
+ uint64_t *hash_state_out_be64;
+ int i;
+
+ digest_size = qat_hash_get_digest_size(hash_alg);
+ if (digest_size <= 0)
+ return -EFAULT;
+
+ hash_state_out_be32 = (uint32_t *)data_out;
+ hash_state_out_be64 = (uint64_t *)data_out;
+
+ switch (hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (partial_hash_sha1(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ if (partial_hash_sha224(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (partial_hash_sha256(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ if (partial_hash_sha384(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+ *hash_state_out_be64 =
+ rte_bswap64(*(((uint64_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (partial_hash_sha512(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+ *hash_state_out_be64 =
+ rte_bswap64(*(((uint64_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ if (partial_hash_md5(data_in, data_out))
+ return -EFAULT;
+ break;
+ default:
+ QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+#define HASH_XCBC_PRECOMP_KEY_NUM 3
+
+static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
+ const uint8_t *auth_key,
+ uint16_t auth_keylen,
+ uint8_t *p_state_buf,
+ uint16_t *p_state_len)
+{
+ int block_size;
+ uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ int i;
+
+ if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
+ static uint8_t qat_aes_xcbc_key_seed[
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ };
+
+ uint8_t *in = NULL;
+ uint8_t *out = p_state_buf;
+ int x;
+ AES_KEY enc_key;
+
+ in = rte_zmalloc("working mem for key",
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+ if (in == NULL) {
+ QAT_LOG(ERR, "Failed to alloc memory");
+ return -ENOMEM;
+ }
+
+ rte_memcpy(in, qat_aes_xcbc_key_seed,
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
+ if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
+ &enc_key) != 0) {
+ rte_free(in -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
+ memset(out -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
+ 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ return -EFAULT;
+ }
+ AES_encrypt(in, out, &enc_key);
+ in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ }
+ *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+ rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
+ return 0;
+ } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
+ (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ uint8_t *in = NULL;
+ uint8_t *out = p_state_buf;
+ AES_KEY enc_key;
+
+ memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
+ ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ);
+ in = rte_zmalloc("working mem for key",
+ ICP_QAT_HW_GALOIS_H_SZ, 16);
+ if (in == NULL) {
+ QAT_LOG(ERR, "Failed to alloc memory");
+ return -ENOMEM;
+ }
+
+ memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
+ if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
+ &enc_key) != 0) {
+ return -EFAULT;
+ }
+ AES_encrypt(in, out, &enc_key);
+ *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
+ ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ;
+ rte_free(in);
+ return 0;
+ }
+
+ block_size = qat_hash_get_block_size(hash_alg);
+ if (block_size <= 0)
+ return -EFAULT;
+ /* init ipad and opad from key and xor with fixed values */
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+
+ if (auth_keylen > (unsigned int)block_size) {
+ QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
+ return -EFAULT;
+ }
+ rte_memcpy(ipad, auth_key, auth_keylen);
+ rte_memcpy(opad, auth_key, auth_keylen);
+
+ for (i = 0; i < block_size; i++) {
+ uint8_t *ipad_ptr = ipad + i;
+ uint8_t *opad_ptr = opad + i;
+ *ipad_ptr ^= HMAC_IPAD_VALUE;
+ *opad_ptr ^= HMAC_OPAD_VALUE;
+ }
+
+ /* do partial hash of ipad and copy to state1 */
+ if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ QAT_LOG(ERR, "ipad precompute failed");
+ return -EFAULT;
+ }
+
+ /*
+ * State len is a multiple of 8, so may be larger than the digest.
+ * Put the partial hash of opad state_len bytes after state1
+ */
+ *p_state_len = qat_hash_get_state1_size(hash_alg);
+ if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ QAT_LOG(ERR, "opad precompute failed");
+ return -EFAULT;
+ }
+
+ /* don't leave data lying around */
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ return 0;
+}
+
+static void
+qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_sym_proto_flag proto_flags)
+{
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+ QAT_COMN_PTR_TYPE_FLAT);
+ ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_PARTIAL_NONE);
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+
+ switch (proto_flags) {
+ case QAT_CRYPTO_PROTO_FLAG_NONE:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_CCM:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CCM_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_GCM:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_SNOW_3G_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_ZUC:
+ ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_ZUC_3G_PROTO);
+ break;
+ }
+
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_UPDATE_STATE);
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
+}
+
+/*
+ * Snow3G and ZUC should never use this function
+ * and set its protocol flag in both cipher and auth part of content
+ * descriptor building function
+ */
+static enum qat_sym_proto_flag
+qat_get_crypto_proto_flag(uint16_t flags)
+{
+ int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
+ enum qat_sym_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+
+ switch (proto) {
+ case ICP_QAT_FW_LA_GCM_PROTO:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
+ break;
+ case ICP_QAT_FW_LA_CCM_PROTO:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
+ break;
+ }
+
+ return qat_proto_flag;
+}
+
+int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
+ uint8_t *cipherkey,
+ uint32_t cipherkeylen)
+{
+ struct icp_qat_hw_cipher_algo_blk *cipher;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ enum icp_qat_hw_cipher_convert key_convert;
+ enum qat_sym_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+ uint32_t total_key_size;
+ uint16_t cipher_offset, cd_size;
+ uint32_t wordIndex = 0;
+ uint32_t *temp_key = NULL;
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ QAT_LOG(ERR, "Invalid param, must be a cipher command.");
+ return -EFAULT;
+ }
+
+ if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
+ /*
+ * CTR Streaming ciphers are a special case. Decrypt = encrypt
+ * Overriding default values previously set
+ */
+ cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
+ || cdesc->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+ else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ else
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+ total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
+ cipher_cd_ctrl->cipher_padding_sz =
+ (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
+ total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
+ total_key_size = ICP_QAT_HW_DES_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ } else if (cdesc->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+ total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ cdesc->min_qat_dev_gen = QAT_GEN2;
+ } else {
+ total_key_size = cipherkeylen;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ }
+ cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
+ cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
+ cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+
+ header->service_cmd_id = cdesc->qat_cmd;
+ qat_sym_session_init_common_hdr(header, qat_proto_flag);
+
+ cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
+ cipher->cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
+ cdesc->qat_cipher_alg, key_convert,
+ cdesc->qat_dir);
+
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
+ sizeof(struct icp_qat_hw_cipher_config)
+ + cipherkeylen);
+ memcpy(cipher->key, cipherkey, cipherkeylen);
+ memcpy(temp_key, cipherkey, cipherkeylen);
+
+ /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
+ for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
+ wordIndex++)
+ temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
+
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ cipherkeylen + cipherkeylen;
+ } else {
+ memcpy(cipher->key, cipherkey, cipherkeylen);
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ cipherkeylen;
+ }
+
+ if (total_key_size > cipherkeylen) {
+ uint32_t padding_size = total_key_size-cipherkeylen;
+ if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
+ /* K3 not provided so use K1 = K3*/
+ memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
+ } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
+ /* K2 and K3 not provided so use K1 = K2 = K3*/
+ memcpy(cdesc->cd_cur_ptr, cipherkey,
+ cipherkeylen);
+ memcpy(cdesc->cd_cur_ptr+cipherkeylen,
+ cipherkey, cipherkeylen);
+ } else
+ memset(cdesc->cd_cur_ptr, 0, padding_size);
+
+ cdesc->cd_cur_ptr += padding_size;
+ }
+ cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
+ cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
+
+ return 0;
+}
+
+int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t aad_length,
+ uint32_t digestsize,
+ unsigned int operation)
+{
+ struct icp_qat_hw_auth_setup *hash;
+ struct icp_qat_hw_cipher_algo_blk *cipherconfig;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ struct icp_qat_fw_la_auth_req_params *auth_param =
+ (struct icp_qat_fw_la_auth_req_params *)
+ ((char *)&req_tmpl->serv_specif_rqpars +
+ sizeof(struct icp_qat_fw_la_cipher_req_params));
+ uint16_t state1_size = 0, state2_size = 0;
+ uint16_t hash_offset, cd_size;
+ uint32_t *aad_len = NULL;
+ uint32_t wordIndex = 0;
+ uint32_t *pTempKey;
+ enum qat_sym_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ QAT_LOG(ERR, "Invalid param, must be a hash command.");
+ return -EFAULT;
+ }
+
+ if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CMP_AUTH_RES);
+ cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
+ } else {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
+ }
+
+ /*
+ * Setup the inner hash config
+ */
+ hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
+ hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
+ hash->auth_config.reserved = 0;
+ hash->auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+ cdesc->qat_hash_alg, digestsize);
+
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
+ hash->auth_counter.counter = 0;
+ else
+ hash->auth_counter.counter = rte_bswap32(
+ qat_hash_get_block_size(cdesc->qat_hash_alg));
+
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
+
+ /*
+ * cd_cur_ptr now points at the state1 information.
+ */
+ switch (cdesc->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ QAT_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ QAT_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ QAT_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ QAT_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ QAT_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+ authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size)) {
+ QAT_LOG(ERR, "(XCBC)precompute failed");
+ return -EFAULT;
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
+ state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
+ if (qat_sym_do_precomputes(cdesc->qat_hash_alg,
+ authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size)) {
+ QAT_LOG(ERR, "(GCM)precompute failed");
+ return -EFAULT;
+ }
+ /*
+ * Write (the length of AAD) into bytes 16-19 of state2
+ * in big-endian format. This field is 8 bytes
+ */
+ auth_param->u2.aad_sz =
+ RTE_ALIGN_CEIL(aad_length, 16);
+ auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
+
+ aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
+ ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+ ICP_QAT_HW_GALOIS_H_SZ);
+ *aad_len = rte_bswap32(aad_length);
+ cdesc->aad_len = aad_length;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
+ state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
+
+ cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
+ (cdesc->cd_cur_ptr + state1_size + state2_size);
+ cipherconfig->cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT,
+ ICP_QAT_HW_CIPHER_ENCRYPT);
+ memcpy(cipherconfig->key, authkey, authkeylen);
+ memset(cipherconfig->key + authkeylen,
+ 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
+ auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ hash->auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
+ cdesc->qat_hash_alg, digestsize);
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
+ state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
+ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
+
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ cdesc->cd_cur_ptr += state1_size + state2_size
+ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
+ auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
+ cdesc->min_qat_dev_gen = QAT_GEN2;
+
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
+ authkey, authkeylen, cdesc->cd_cur_ptr,
+ &state1_size)) {
+ QAT_LOG(ERR, "(MD5)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_NULL);
+ state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
+ state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
+ ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
+
+ if (aad_length > 0) {
+ aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ auth_param->u2.aad_sz =
+ RTE_ALIGN_CEIL(aad_length,
+ ICP_QAT_HW_CCM_AAD_ALIGNMENT);
+ } else {
+ auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
+ }
+ cdesc->aad_len = aad_length;
+ hash->auth_counter.counter = 0;
+
+ hash_cd_ctrl->outer_prefix_sz = digestsize;
+ auth_param->hash_state_sz = digestsize;
+
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
+ state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
+ pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
+ + authkeylen);
+ /*
+ * The Inner Hash Initial State2 block must contain IK
+ * (Initialisation Key), followed by IK XOR-ed with KM
+ * (Key Modifier): IK||(IK^KM).
+ */
+ /* write the auth key */
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ /* initialise temp key with auth key */
+ memcpy(pTempKey, authkey, authkeylen);
+ /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
+ for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
+ pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
+ break;
+ default:
+ QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
+ return -EFAULT;
+ }
+
+ /* Request template setup */
+ qat_sym_session_init_common_hdr(header, qat_proto_flag);
+ header->service_cmd_id = cdesc->qat_cmd;
+
+ /* Auth CD config setup */
+ hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
+ hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+ hash_cd_ctrl->inner_res_sz = digestsize;
+ hash_cd_ctrl->final_sz = digestsize;
+ hash_cd_ctrl->inner_state1_sz = state1_size;
+ auth_param->auth_res_sz = digestsize;
+
+ hash_cd_ctrl->inner_state2_sz = state2_size;
+ hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+ ((sizeof(struct icp_qat_hw_auth_setup) +
+ RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
+ >> 3);
+
+ cdesc->cd_cur_ptr += state1_size + state2_size;
+ cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
+
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
+
+ return 0;
+}
+
+int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_AES_128_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ case ICP_QAT_HW_AES_192_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+ break;
+ case ICP_QAT_HW_AES_256_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_sym_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_AES_128_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_KASUMI_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_DES_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case QAT_3DES_KEY_SZ_OPT1:
+ case QAT_3DES_KEY_SZ_OPT2:
+ case QAT_3DES_KEY_SZ_OPT3:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.h b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.h
new file mode 100644
index 00000000..e8f51e5b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _QAT_SYM_SESSION_H_
+#define _QAT_SYM_SESSION_H_
+
+#include <rte_crypto.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_common.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+/*
+ * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
+ * Integrity Key (IK)
+ */
+#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
+
+#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
+
+/* 3DES key sizes */
+#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
+#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
+#define QAT_3DES_KEY_SZ_OPT3 8 /* K1=K2=K3 */
+
+
+#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
+
+enum qat_sym_proto_flag {
+ QAT_CRYPTO_PROTO_FLAG_NONE = 0,
+ QAT_CRYPTO_PROTO_FLAG_CCM = 1,
+ QAT_CRYPTO_PROTO_FLAG_GCM = 2,
+ QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3,
+ QAT_CRYPTO_PROTO_FLAG_ZUC = 4
+};
+
+/* Common content descriptor */
+struct qat_sym_cd {
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ struct icp_qat_hw_auth_algo_blk hash;
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_session {
+ enum icp_qat_fw_la_cmd_id qat_cmd;
+ enum icp_qat_hw_cipher_algo qat_cipher_alg;
+ enum icp_qat_hw_cipher_dir qat_dir;
+ enum icp_qat_hw_cipher_mode qat_mode;
+ enum icp_qat_hw_auth_algo qat_hash_alg;
+ enum icp_qat_hw_auth_op auth_op;
+ void *bpi_ctx;
+ struct qat_sym_cd cd;
+ uint8_t *cd_cur_ptr;
+ phys_addr_t cd_paddr;
+ struct icp_qat_fw_la_bulk_req fw_req;
+ uint8_t aad_len;
+ struct qat_crypto_instance *inst;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } cipher_iv;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } auth_iv;
+ uint16_t digest_length;
+ rte_spinlock_t lock; /* protects this struct */
+ enum qat_device_gen min_qat_dev_gen;
+};
+
+int
+qat_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool);
+
+int
+qat_sym_session_set_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
+
+int
+qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_configure_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cd,
+ uint8_t *enckey,
+ uint32_t enckeylen);
+
+int
+qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t aad_length,
+ uint32_t digestsize,
+ unsigned int operation);
+
+void
+qat_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *session);
+
+unsigned int
+qat_sym_session_get_private_size(struct rte_cryptodev *dev);
+
+void
+qat_sym_sesssion_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_sym_proto_flag proto_flags);
+int
+qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg);
+int
+qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+
+#endif /* _QAT_SYM_SESSION_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/Makefile b/src/spdk/dpdk/drivers/crypto/scheduler/Makefile
new file mode 100644
index 00000000..a9514e33
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/Makefile
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_crypto_scheduler.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev -lrte_kvargs -lrte_reorder
+LDLIBS += -lrte_bus_vdev
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_crypto_scheduler_version.map
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_cryptodev_scheduler_operations.h
+SYMLINK-y-include += rte_cryptodev_scheduler.h
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += rte_cryptodev_scheduler.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_roundrobin.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pkt_size_distr.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_failover.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_multicore.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
new file mode 100644
index 00000000..6e4919c4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -0,0 +1,584 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+#include <rte_reorder.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler.h"
+#include "scheduler_pmd_private.h"
+
+int scheduler_logtype_driver;
+
+/** update the scheduler pmd's capability with attaching device's
+ * capability.
+ * For each device to be attached, the scheduler's capability should be
+ * the common capability set of all slaves
+ **/
+static uint32_t
+sync_caps(struct rte_cryptodev_capabilities *caps,
+ uint32_t nb_caps,
+ const struct rte_cryptodev_capabilities *slave_caps)
+{
+ uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
+ uint32_t i;
+
+ while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
+ nb_slave_caps++;
+
+ if (nb_caps == 0) {
+ rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
+ return nb_slave_caps;
+ }
+
+ for (i = 0; i < sync_nb_caps; i++) {
+ struct rte_cryptodev_capabilities *cap = &caps[i];
+ uint32_t j;
+
+ for (j = 0; j < nb_slave_caps; j++) {
+ const struct rte_cryptodev_capabilities *s_cap =
+ &slave_caps[j];
+
+ if (s_cap->op != cap->op || s_cap->sym.xform_type !=
+ cap->sym.xform_type)
+ continue;
+
+ if (s_cap->sym.xform_type ==
+ RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (s_cap->sym.auth.algo !=
+ cap->sym.auth.algo)
+ continue;
+
+ cap->sym.auth.digest_size.min =
+ s_cap->sym.auth.digest_size.min <
+ cap->sym.auth.digest_size.min ?
+ s_cap->sym.auth.digest_size.min :
+ cap->sym.auth.digest_size.min;
+ cap->sym.auth.digest_size.max =
+ s_cap->sym.auth.digest_size.max <
+ cap->sym.auth.digest_size.max ?
+ s_cap->sym.auth.digest_size.max :
+ cap->sym.auth.digest_size.max;
+
+ }
+
+ if (s_cap->sym.xform_type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER)
+ if (s_cap->sym.cipher.algo !=
+ cap->sym.cipher.algo)
+ continue;
+
+ /* no common cap found */
+ break;
+ }
+
+ if (j < nb_slave_caps)
+ continue;
+
+ /* remove a uncommon cap from the array */
+ for (j = i; j < sync_nb_caps - 1; j++)
+ rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
+
+ memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
+ sync_nb_caps--;
+ }
+
+ return sync_nb_caps;
+}
+
+static int
+update_scheduler_capability(struct scheduler_ctx *sched_ctx)
+{
+ struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
+ uint32_t nb_caps = 0, i;
+
+ if (sched_ctx->capabilities) {
+ rte_free(sched_ctx->capabilities);
+ sched_ctx->capabilities = NULL;
+ }
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+
+ nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
+ if (nb_caps == 0)
+ return -1;
+ }
+
+ sched_ctx->capabilities = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_cryptodev_capabilities) *
+ (nb_caps + 1), 0, SOCKET_ID_ANY);
+ if (!sched_ctx->capabilities)
+ return -ENOMEM;
+
+ rte_memcpy(sched_ctx->capabilities, tmp_caps,
+ sizeof(struct rte_cryptodev_capabilities) * nb_caps);
+
+ return 0;
+}
+
+static void
+update_scheduler_feature_flag(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ dev->feature_flags = 0;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+
+ dev->feature_flags |= dev_info.feature_flags;
+ }
+}
+
+static void
+update_max_nb_qp(struct scheduler_ctx *sched_ctx)
+{
+ uint32_t i;
+ uint32_t max_nb_qp;
+
+ if (!sched_ctx->nb_slaves)
+ return;
+
+ max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+ max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
+ dev_info.max_nb_queue_pairs : max_nb_qp;
+ }
+
+ sched_ctx->max_nb_queue_pairs = max_nb_qp;
+}
+
+/** Attach a device to the scheduler. */
+int
+rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ struct scheduler_slave *slave;
+ struct rte_cryptodev_info dev_info;
+ uint32_t i;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CR_SCHED_LOG(ERR, "Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+ if (sched_ctx->nb_slaves >=
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
+ CR_SCHED_LOG(ERR, "Too many slaves attached");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++)
+ if (sched_ctx->slaves[i].dev_id == slave_id) {
+ CR_SCHED_LOG(ERR, "Slave already added");
+ return -ENOTSUP;
+ }
+
+ slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
+
+ rte_cryptodev_info_get(slave_id, &dev_info);
+
+ slave->dev_id = slave_id;
+ slave->driver_id = dev_info.driver_id;
+ sched_ctx->nb_slaves++;
+
+ if (update_scheduler_capability(sched_ctx) < 0) {
+ slave->dev_id = 0;
+ slave->driver_id = 0;
+ sched_ctx->nb_slaves--;
+
+ CR_SCHED_LOG(ERR, "capabilities update failed");
+ return -ENOTSUP;
+ }
+
+ update_scheduler_feature_flag(dev);
+
+ update_max_nb_qp(sched_ctx);
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ uint32_t i, slave_pos;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CR_SCHED_LOG(ERR, "Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
+ if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
+ break;
+ if (slave_pos == sched_ctx->nb_slaves) {
+ CR_SCHED_LOG(ERR, "Cannot find slave");
+ return -ENOTSUP;
+ }
+
+ if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to detach slave");
+ return -ENOTSUP;
+ }
+
+ for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
+ memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
+ sizeof(struct scheduler_slave));
+ }
+ memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
+ sizeof(struct scheduler_slave));
+ sched_ctx->nb_slaves--;
+
+ if (update_scheduler_capability(sched_ctx) < 0) {
+ CR_SCHED_LOG(ERR, "capabilities update failed");
+ return -ENOTSUP;
+ }
+
+ update_scheduler_feature_flag(dev);
+
+ update_max_nb_qp(sched_ctx);
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
+ enum rte_cryptodev_scheduler_mode mode)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CR_SCHED_LOG(ERR, "Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ if (mode == sched_ctx->mode)
+ return 0;
+
+ switch (mode) {
+ case CDEV_SCHED_MODE_ROUNDROBIN:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ roundrobin_scheduler) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
+ return -1;
+ }
+ break;
+ case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ pkt_size_based_distr_scheduler) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
+ return -1;
+ }
+ break;
+ case CDEV_SCHED_MODE_FAILOVER:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ failover_scheduler) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
+ return -1;
+ }
+ break;
+ case CDEV_SCHED_MODE_MULTICORE:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ multicore_scheduler) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
+ return -1;
+ }
+ break;
+ default:
+ CR_SCHED_LOG(ERR, "Not yet supported");
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+enum rte_cryptodev_scheduler_mode
+rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ return sched_ctx->mode;
+}
+
+int
+rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
+ uint32_t enable_reorder)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CR_SCHED_LOG(ERR, "Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ sched_ctx->reordering_enabled = enable_reorder;
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ return (int)sched_ctx->reordering_enabled;
+}
+
+int
+rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
+ struct rte_cryptodev_scheduler *scheduler) {
+
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CR_SCHED_LOG(ERR, "Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
+ "%u bytes.", scheduler->name,
+ RTE_CRYPTODEV_NAME_MAX_LEN);
+ return -EINVAL;
+ }
+ snprintf(sched_ctx->name, sizeof(sched_ctx->name), "%s",
+ scheduler->name);
+
+ if (strlen(scheduler->description) >
+ RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
+ CR_SCHED_LOG(ERR, "Invalid description %s, should be less than "
+ "%u bytes.", scheduler->description,
+ RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
+ return -EINVAL;
+ }
+ snprintf(sched_ctx->description, sizeof(sched_ctx->description), "%s",
+ scheduler->description);
+
+ /* load scheduler instance operations functions */
+ sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
+ sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
+ sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
+ sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
+ sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
+ sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
+ sched_ctx->ops.option_set = scheduler->ops->option_set;
+ sched_ctx->ops.option_get = scheduler->ops->option_get;
+
+ if (sched_ctx->private_ctx) {
+ rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
+
+ if (sched_ctx->ops.create_private_ctx) {
+ int ret = (*sched_ctx->ops.create_private_ctx)(dev);
+
+ if (ret < 0) {
+ CR_SCHED_LOG(ERR, "Unable to create scheduler private "
+ "context");
+ return ret;
+ }
+ }
+
+ sched_ctx->mode = scheduler->mode;
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ uint32_t nb_slaves = 0;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ nb_slaves = sched_ctx->nb_slaves;
+
+ if (slaves && nb_slaves) {
+ uint32_t i;
+
+ for (i = 0; i < nb_slaves; i++)
+ slaves[i] = sched_ctx->slaves[i].dev_id;
+ }
+
+ return (int)nb_slaves;
+}
+
+int
+rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
+ option_type >= CDEV_SCHED_OPTION_COUNT) {
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
+ return -EINVAL;
+ }
+
+ if (!option) {
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_started) {
+ CR_SCHED_LOG(ERR, "Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP);
+
+ return (*sched_ctx->ops.option_set)(dev, option_type, option);
+}
+
+int
+rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (!option) {
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
+ return -EINVAL;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP);
+
+ return (*sched_ctx->ops.option_get)(dev, option_type, option);
+}
+
+RTE_INIT(scheduler_init_log)
+{
+ scheduler_logtype_driver = rte_log_register("pmd.crypto.scheduler");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
new file mode 100644
index 00000000..3faea409
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _RTE_CRYPTO_SCHEDULER_H
+#define _RTE_CRYPTO_SCHEDULER_H
+
+/**
+ * @file rte_cryptodev_scheduler.h
+ *
+ * RTE Cryptodev Scheduler Device
+ *
+ * The RTE Cryptodev Scheduler Device allows the aggregation of multiple (slave)
+ * Cryptodevs into a single logical crypto device, and the scheduling the
+ * crypto operations to the slaves based on the mode of the specified mode of
+ * operation specified and supported. This implementation supports 3 modes of
+ * operation: round robin, packet-size based, and fail-over.
+ */
+
+#include <stdint.h>
+#include "rte_cryptodev_scheduler_operations.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Maximum number of bonded devices per device */
+#ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES (8)
+#endif
+
+/** Maximum number of multi-core worker cores */
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (RTE_MAX_LCORE - 1)
+
+/** Round-robin scheduling mode string */
+#define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
+/** Packet-size based distribution scheduling mode string */
+#define SCHEDULER_MODE_NAME_PKT_SIZE_DISTR packet-size-distr
+/** Fail-over scheduling mode string */
+#define SCHEDULER_MODE_NAME_FAIL_OVER fail-over
+/** multi-core scheduling mode string */
+#define SCHEDULER_MODE_NAME_MULTI_CORE multi-core
+
+/**
+ * Crypto scheduler PMD operation modes
+ */
+enum rte_cryptodev_scheduler_mode {
+ CDEV_SCHED_MODE_NOT_SET = 0,
+ /** User defined mode */
+ CDEV_SCHED_MODE_USERDEFINED,
+ /** Round-robin mode */
+ CDEV_SCHED_MODE_ROUNDROBIN,
+ /** Packet-size based distribution mode */
+ CDEV_SCHED_MODE_PKT_SIZE_DISTR,
+ /** Fail-over mode */
+ CDEV_SCHED_MODE_FAILOVER,
+ /** multi-core mode */
+ CDEV_SCHED_MODE_MULTICORE,
+
+ CDEV_SCHED_MODE_COUNT /**< number of modes */
+};
+
+#define RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN (64)
+#define RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN (256)
+
+/**
+ * Crypto scheduler option types
+ */
+enum rte_cryptodev_schedule_option_type {
+ CDEV_SCHED_OPTION_NOT_SET = 0,
+ CDEV_SCHED_OPTION_THRESHOLD,
+
+ CDEV_SCHED_OPTION_COUNT
+};
+
+/**
+ * Threshold option structure
+ */
+#define RTE_CRYPTODEV_SCHEDULER_PARAM_THRES "threshold"
+struct rte_cryptodev_scheduler_threshold_option {
+ uint32_t threshold; /**< Threshold for packet-size mode */
+};
+
+struct rte_cryptodev_scheduler;
+
+/**
+ * Load a user defined scheduler
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param scheduler
+ * Pointer to the user defined scheduler
+ *
+ * @return
+ * - 0 if the scheduler is successfully loaded
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ * - -EINVAL if input values are invalid.
+ */
+int
+rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
+ struct rte_cryptodev_scheduler *scheduler);
+
+/**
+ * Attach a crypto device to the scheduler
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slave_id
+ * Crypto device ID to be attached
+ *
+ * @return
+ * - 0 if the slave is attached.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ * - -ENOMEM if the scheduler's slave list is full.
+ */
+int
+rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id);
+
+/**
+ * Detach a crypto device from the scheduler
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slave_id
+ * Crypto device ID to be detached
+ *
+ * @return
+ * - 0 if the slave is detached.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id);
+
+
+/**
+ * Set the scheduling mode
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param mode
+ * The scheduling mode
+ *
+ * @return
+ * - 0 if the mode is set.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
+ enum rte_cryptodev_scheduler_mode mode);
+
+/**
+ * Get the current scheduling mode
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ *
+ * @return mode
+ * - non-negative enumerate value: the scheduling mode
+ * - -ENOTSUP if the operation is not supported.
+ */
+enum rte_cryptodev_scheduler_mode
+rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id);
+
+/**
+ * Set the crypto ops reordering feature on/off
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param enable_reorder
+ * Set the crypto op reordering feature
+ * - 0: disable reordering
+ * - 1: enable reordering
+ *
+ * @return
+ * - 0 if the ordering is set.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
+ uint32_t enable_reorder);
+
+/**
+ * Get the current crypto ops reordering feature
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ *
+ * @return
+ * - 0 if reordering is disabled
+ * - 1 if reordering is enabled
+ * - -ENOTSUP if the operation is not supported.
+ */
+int
+rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id);
+
+/**
+ * Get the attached slaves' count and/or ID
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slaves
+ * If successful, the function will write back all slaves' device IDs to it.
+ * This parameter will either be an uint8_t array of
+ * RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES elements or NULL.
+ *
+ * @return
+ * - non-negative number: the number of slaves attached
+ * - -ENOTSUP if the operation is not supported.
+ */
+int
+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves);
+
+/**
+ * Set the mode specific option
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param option_type
+ * The option type enumerate
+ * @param option
+ * The specific mode's option structure
+ *
+ * @return
+ * - 0 if successful
+ * - negative integer if otherwise.
+ */
+int
+rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option);
+
+/**
+ * Set the mode specific option
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param option_type
+ * The option type enumerate
+ * @param option
+ * If successful, the function will write back the current
+ *
+ * @return
+ * - 0 if successful
+ * - negative integer if otherwise.
+ */
+int
+rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option);
+
+typedef uint16_t (*rte_cryptodev_scheduler_burst_enqueue_t)(void *qp_ctx,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+typedef uint16_t (*rte_cryptodev_scheduler_burst_dequeue_t)(void *qp_ctx,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+/** The data structure associated with each mode of scheduler. */
+struct rte_cryptodev_scheduler {
+ const char *name; /**< Scheduler name */
+ const char *description; /**< Scheduler description */
+ enum rte_cryptodev_scheduler_mode mode; /**< Scheduling mode */
+
+ /** Pointer to scheduler operation structure */
+ struct rte_cryptodev_scheduler_ops *ops;
+};
+
+/** Round-robin mode scheduler */
+extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
+/** Packet-size based distribution mode scheduler */
+extern struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler;
+/** Fail-over mode scheduler */
+extern struct rte_cryptodev_scheduler *failover_scheduler;
+/** multi-core mode scheduler */
+extern struct rte_cryptodev_scheduler *multicore_scheduler;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_CRYPTO_SCHEDULER_H */
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h b/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
new file mode 100644
index 00000000..c4369589
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _RTE_CRYPTO_SCHEDULER_OPERATIONS_H
+#define _RTE_CRYPTO_SCHEDULER_OPERATIONS_H
+
+#include <rte_cryptodev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int (*rte_cryptodev_scheduler_slave_attach_t)(
+ struct rte_cryptodev *dev, uint8_t slave_id);
+typedef int (*rte_cryptodev_scheduler_slave_detach_t)(
+ struct rte_cryptodev *dev, uint8_t slave_id);
+
+typedef int (*rte_cryptodev_scheduler_start_t)(struct rte_cryptodev *dev);
+typedef int (*rte_cryptodev_scheduler_stop_t)(struct rte_cryptodev *dev);
+
+typedef int (*rte_cryptodev_scheduler_config_queue_pair)(
+ struct rte_cryptodev *dev, uint16_t qp_id);
+
+typedef int (*rte_cryptodev_scheduler_create_private_ctx)(
+ struct rte_cryptodev *dev);
+
+typedef int (*rte_cryptodev_scheduler_config_option_set)(
+ struct rte_cryptodev *dev,
+ uint32_t option_type,
+ void *option);
+
+typedef int (*rte_cryptodev_scheduler_config_option_get)(
+ struct rte_cryptodev *dev,
+ uint32_t option_type,
+ void *option);
+
+struct rte_cryptodev_scheduler_ops {
+ rte_cryptodev_scheduler_slave_attach_t slave_attach;
+ rte_cryptodev_scheduler_slave_attach_t slave_detach;
+
+ rte_cryptodev_scheduler_start_t scheduler_start;
+ rte_cryptodev_scheduler_stop_t scheduler_stop;
+
+ rte_cryptodev_scheduler_config_queue_pair config_queue_pair;
+
+ rte_cryptodev_scheduler_create_private_ctx create_private_ctx;
+
+ rte_cryptodev_scheduler_config_option_set option_set;
+ rte_cryptodev_scheduler_config_option_get option_get;
+};
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_CRYPTO_SCHEDULER_OPERATIONS_H */
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map b/src/spdk/dpdk/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
new file mode 100644
index 00000000..5c43127c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
@@ -0,0 +1,21 @@
+DPDK_17.02 {
+ global:
+
+ rte_cryptodev_scheduler_load_user_scheduler;
+ rte_cryptodev_scheduler_slave_attach;
+ rte_cryptodev_scheduler_slave_detach;
+ rte_cryptodev_scheduler_ordering_set;
+ rte_cryptodev_scheduler_ordering_get;
+
+};
+
+DPDK_17.05 {
+ global:
+
+ rte_cryptodev_scheduler_mode_get;
+ rte_cryptodev_scheduler_mode_set;
+ rte_cryptodev_scheduler_option_get;
+ rte_cryptodev_scheduler_option_set;
+ rte_cryptodev_scheduler_slaves_get;
+
+} DPDK_17.02;
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_failover.c b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_failover.c
new file mode 100644
index 00000000..ddfb5b81
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_failover.c
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define PRIMARY_SLAVE_IDX 0
+#define SECONDARY_SLAVE_IDX 1
+#define NB_FAILOVER_SLAVES 2
+#define SLAVE_SWITCH_MASK (0x01)
+
+struct fo_scheduler_qp_ctx {
+ struct scheduler_slave primary_slave;
+ struct scheduler_slave secondary_slave;
+
+ uint8_t deq_idx;
+};
+
+static __rte_always_inline uint16_t
+failover_slave_enqueue(struct scheduler_slave *slave,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ uint16_t i, processed_ops;
+
+ for (i = 0; i < nb_ops && i < 4; i++)
+ rte_prefetch0(ops[i]->sym->session);
+
+ processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops += processed_ops;
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ uint16_t enqueued_ops;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,
+ ops, nb_ops);
+
+ if (enqueued_ops < nb_ops)
+ enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,
+ &ops[enqueued_ops],
+ nb_ops - enqueued_ops);
+
+ return enqueued_ops;
+}
+
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_slave *slaves[NB_FAILOVER_SLAVES] = {
+ &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
+ struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
+ uint16_t nb_deq_ops = 0, nb_deq_ops2 = 0;
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops -= nb_deq_ops;
+ }
+
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_SWITCH_MASK;
+
+ if (nb_deq_ops == nb_ops)
+ return nb_deq_ops;
+
+ slave = slaves[qp_ctx->deq_idx];
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops2 = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);
+ slave->nb_inflight_cops -= nb_deq_ops2;
+ }
+
+ return nb_deq_ops + nb_deq_ops2;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ schedule_dequeue(qp, ops, nb_ops);
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint16_t i;
+
+ if (sched_ctx->nb_slaves < 2) {
+ CR_SCHED_LOG(ERR, "Number of slaves shall no less than 2");
+ return -ENOMEM;
+ }
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = schedule_enqueue_ordering;
+ dev->dequeue_burst = schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = schedule_enqueue;
+ dev->dequeue_burst = schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)
+ dev->data->queue_pairs[i])->private_qp_ctx;
+
+ rte_memcpy(&qp_ctx->primary_slave,
+ &sched_ctx->slaves[PRIMARY_SLAVE_IDX],
+ sizeof(struct scheduler_slave));
+ rte_memcpy(&qp_ctx->secondary_slave,
+ &sched_ctx->slaves[SECONDARY_SLAVE_IDX],
+ sizeof(struct scheduler_slave));
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct fo_scheduler_qp_ctx *fo_qp_ctx;
+
+ fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0,
+ rte_socket_id());
+ if (!fo_qp_ctx) {
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)fo_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /*option_get */
+};
+
+struct rte_cryptodev_scheduler fo_scheduler = {
+ .name = "failover-scheduler",
+ .description = "scheduler which enqueues to the primary slave, "
+ "and only then enqueues to the secondary slave "
+ "upon failing on enqueuing to primary",
+ .mode = CDEV_SCHED_MODE_FAILOVER,
+ .ops = &scheduler_fo_ops
+};
+
+struct rte_cryptodev_scheduler *failover_scheduler = &fo_scheduler;
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_multicore.c b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_multicore.c
new file mode 100644
index 00000000..d410e69d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_multicore.c
@@ -0,0 +1,413 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+#include <unistd.h>
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define MC_SCHED_ENQ_RING_NAME_PREFIX "MCS_ENQR_"
+#define MC_SCHED_DEQ_RING_NAME_PREFIX "MCS_DEQR_"
+
+#define MC_SCHED_BUFFER_SIZE 32
+
+#define CRYPTO_OP_STATUS_BIT_COMPLETE 0x80
+
+/** multi-core scheduler context */
+struct mc_scheduler_ctx {
+ uint32_t num_workers; /**< Number of workers polling */
+ uint32_t stop_signal;
+
+ struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
+ struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
+};
+
+struct mc_scheduler_qp_ctx {
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ uint32_t last_enq_worker_idx;
+ uint32_t last_deq_worker_idx;
+
+ struct mc_scheduler_ctx *mc_private_ctx;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
+ uint32_t worker_idx = mc_qp_ctx->last_enq_worker_idx;
+ uint16_t i, processed_ops = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
+ struct rte_ring *enq_ring = mc_ctx->sched_enq_ring[worker_idx];
+ uint16_t nb_queue_ops = rte_ring_enqueue_burst(enq_ring,
+ (void *)(&ops[processed_ops]), nb_ops, NULL);
+
+ nb_ops -= nb_queue_ops;
+ processed_ops += nb_queue_ops;
+
+ if (++worker_idx == mc_ctx->num_workers)
+ worker_idx = 0;
+ }
+ mc_qp_ctx->last_enq_worker_idx = worker_idx;
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
+ uint32_t worker_idx = mc_qp_ctx->last_deq_worker_idx;
+ uint16_t i, processed_ops = 0;
+
+ for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
+ struct rte_ring *deq_ring = mc_ctx->sched_deq_ring[worker_idx];
+ uint16_t nb_deq_ops = rte_ring_dequeue_burst(deq_ring,
+ (void *)(&ops[processed_ops]), nb_ops, NULL);
+
+ nb_ops -= nb_deq_ops;
+ processed_ops += nb_deq_ops;
+ if (++worker_idx == mc_ctx->num_workers)
+ worker_idx = 0;
+ }
+
+ mc_qp_ctx->last_deq_worker_idx = worker_idx;
+
+ return processed_ops;
+
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring;
+ struct rte_crypto_op *op;
+ uint32_t nb_objs = rte_ring_count(order_ring);
+ uint32_t nb_ops_to_deq = 0;
+ uint32_t nb_ops_deqd = 0;
+
+ if (nb_objs > nb_ops)
+ nb_objs = nb_ops;
+
+ while (nb_ops_to_deq < nb_objs) {
+ SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+
+ if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
+ break;
+
+ op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
+ nb_ops_to_deq++;
+ }
+
+ if (nb_ops_to_deq) {
+ nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
+ (void **)ops, nb_ops_to_deq, NULL);
+ }
+
+ return nb_ops_deqd;
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+mc_scheduler_worker(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ struct rte_ring *enq_ring;
+ struct rte_ring *deq_ring;
+ uint32_t core_id = rte_lcore_id();
+ int i, worker_idx = -1;
+ struct scheduler_slave *slave;
+ struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
+ struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
+ uint16_t processed_ops;
+ uint16_t pending_enq_ops = 0;
+ uint16_t pending_enq_ops_idx = 0;
+ uint16_t pending_deq_ops = 0;
+ uint16_t pending_deq_ops_idx = 0;
+ uint16_t inflight_ops = 0;
+ const uint8_t reordering_enabled = sched_ctx->reordering_enabled;
+
+ for (i = 0; i < (int)sched_ctx->nb_wc; i++) {
+ if (sched_ctx->wc_pool[i] == core_id) {
+ worker_idx = i;
+ break;
+ }
+ }
+ if (worker_idx == -1) {
+ CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!",
+ core_id);
+ return -1;
+ }
+
+ slave = &sched_ctx->slaves[worker_idx];
+ enq_ring = mc_ctx->sched_enq_ring[worker_idx];
+ deq_ring = mc_ctx->sched_deq_ring[worker_idx];
+
+ while (!mc_ctx->stop_signal) {
+ if (pending_enq_ops) {
+ processed_ops =
+ rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, &enq_ops[pending_enq_ops_idx],
+ pending_enq_ops);
+ pending_enq_ops -= processed_ops;
+ pending_enq_ops_idx += processed_ops;
+ inflight_ops += processed_ops;
+ } else {
+ processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops,
+ MC_SCHED_BUFFER_SIZE, NULL);
+ if (processed_ops) {
+ pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
+ slave->dev_id, slave->qp_id,
+ enq_ops, processed_ops);
+ pending_enq_ops = processed_ops - pending_enq_ops_idx;
+ inflight_ops += pending_enq_ops_idx;
+ }
+ }
+
+ if (pending_deq_ops) {
+ processed_ops = rte_ring_enqueue_burst(
+ deq_ring, (void *)&deq_ops[pending_deq_ops_idx],
+ pending_deq_ops, NULL);
+ pending_deq_ops -= processed_ops;
+ pending_deq_ops_idx += processed_ops;
+ } else if (inflight_ops) {
+ processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
+ if (processed_ops) {
+ inflight_ops -= processed_ops;
+ if (reordering_enabled) {
+ uint16_t j;
+
+ for (j = 0; j < processed_ops; j++) {
+ deq_ops[j]->status |=
+ CRYPTO_OP_STATUS_BIT_COMPLETE;
+ }
+ } else {
+ pending_deq_ops_idx = rte_ring_enqueue_burst(
+ deq_ring, (void *)deq_ops, processed_ops,
+ NULL);
+ pending_deq_ops = processed_ops -
+ pending_deq_ops_idx;
+ }
+ }
+ }
+
+ rte_pause();
+ }
+
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ mc_ctx->stop_signal = 0;
+
+ for (i = 0; i < sched_ctx->nb_wc; i++)
+ rte_eal_remote_launch(
+ (lcore_function_t *)mc_scheduler_worker, dev,
+ sched_ctx->wc_pool[i]);
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ qp_ctx->private_qp_ctx;
+ uint32_t j;
+
+ memset(mc_qp_ctx->slaves, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
+ sizeof(struct scheduler_slave));
+ for (j = 0; j < sched_ctx->nb_slaves; j++) {
+ mc_qp_ctx->slaves[j].dev_id =
+ sched_ctx->slaves[j].dev_id;
+ mc_qp_ctx->slaves[j].qp_id = i;
+ }
+
+ mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+
+ mc_qp_ctx->last_enq_worker_idx = 0;
+ mc_qp_ctx->last_deq_worker_idx = 0;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ mc_ctx->stop_signal = 1;
+
+ for (i = 0; i < sched_ctx->nb_wc; i++)
+ rte_eal_wait_lcore(sched_ctx->wc_pool[i]);
+
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct mc_scheduler_qp_ctx *mc_qp_ctx;
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+
+ mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
+ rte_socket_id());
+ if (!mc_qp_ctx) {
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ mc_qp_ctx->mc_private_ctx = mc_ctx;
+ qp_ctx->private_qp_ctx = (void *)mc_qp_ctx;
+
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = NULL;
+ uint16_t i;
+
+ if (sched_ctx->private_ctx) {
+ rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
+
+ mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
+ rte_socket_id());
+ if (!mc_ctx) {
+ CR_SCHED_LOG(ERR, "failed allocate memory");
+ return -ENOMEM;
+ }
+
+ mc_ctx->num_workers = sched_ctx->nb_wc;
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ char r_name[16];
+
+ snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
+ if (!mc_ctx->sched_enq_ring[i]) {
+ mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
+ PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_enq_ring[i]) {
+ CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
+ }
+ snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
+ if (!mc_ctx->sched_deq_ring[i]) {
+ mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
+ PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_deq_ring[i]) {
+ CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
+ }
+ }
+
+ sched_ctx->private_ctx = (void *)mc_ctx;
+
+ return 0;
+
+exit:
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ rte_ring_free(mc_ctx->sched_enq_ring[i]);
+ rte_ring_free(mc_ctx->sched_deq_ring[i]);
+ }
+ rte_free(mc_ctx);
+
+ return -1;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /* option_get */
+};
+
+struct rte_cryptodev_scheduler mc_scheduler = {
+ .name = "multicore-scheduler",
+ .description = "scheduler which will run burst across multiple cpu cores",
+ .mode = CDEV_SCHED_MODE_MULTICORE,
+ .ops = &scheduler_mc_ops
+};
+
+struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
new file mode 100644
index 00000000..74129b66
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -0,0 +1,420 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define DEF_PKT_SIZE_THRESHOLD (0xffffff80)
+#define SLAVE_IDX_SWITCH_MASK (0x01)
+#define PRIMARY_SLAVE_IDX 0
+#define SECONDARY_SLAVE_IDX 1
+#define NB_PKT_SIZE_SLAVES 2
+
+/** pkt size based scheduler context */
+struct psd_scheduler_ctx {
+ uint32_t threshold;
+};
+
+/** pkt size based scheduler queue pair context */
+struct psd_scheduler_qp_ctx {
+ struct scheduler_slave primary_slave;
+ struct scheduler_slave secondary_slave;
+ uint32_t threshold;
+ uint8_t deq_idx;
+} __rte_cache_aligned;
+
+/** scheduling operation variables' wrapping */
+struct psd_schedule_op {
+ uint8_t slave_idx;
+ uint16_t pos;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
+ struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
+ uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
+ psd_qp_ctx->primary_slave.nb_inflight_cops,
+ psd_qp_ctx->secondary_slave.nb_inflight_cops
+ };
+ struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
+ {PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
+ };
+ struct psd_schedule_op *p_enq_op;
+ uint16_t i, processed_ops_pri = 0, processed_ops_sec = 0;
+ uint32_t job_len;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < nb_ops && i < 4; i++) {
+ rte_prefetch0(ops[i]->sym);
+ rte_prefetch0(ops[i]->sym->session);
+ }
+
+ for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
+ rte_prefetch0(ops[i + 4]->sym);
+ rte_prefetch0(ops[i + 4]->sym->session);
+ rte_prefetch0(ops[i + 5]->sym);
+ rte_prefetch0(ops[i + 5]->sym->session);
+ rte_prefetch0(ops[i + 6]->sym);
+ rte_prefetch0(ops[i + 6]->sym->session);
+ rte_prefetch0(ops[i + 7]->sym);
+ rte_prefetch0(ops[i + 7]->sym->session);
+
+ /* job_len is initialized as cipher data length, once
+ * it is 0, equals to auth data length
+ */
+ job_len = ops[i]->sym->cipher.data.length;
+ job_len += (ops[i]->sym->cipher.data.length == 0) *
+ ops[i]->sym->auth.data.length;
+ /* decide the target op based on the job length */
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ /* stop schedule cops before the queue is full, this shall
+ * prevent the failed enqueue
+ */
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
+ p_enq_op->pos++;
+
+ job_len = ops[i+1]->sym->cipher.data.length;
+ job_len += (ops[i+1]->sym->cipher.data.length == 0) *
+ ops[i+1]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
+ p_enq_op->pos++;
+
+ job_len = ops[i+2]->sym->cipher.data.length;
+ job_len += (ops[i+2]->sym->cipher.data.length == 0) *
+ ops[i+2]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
+ p_enq_op->pos++;
+
+ job_len = ops[i+3]->sym->cipher.data.length;
+ job_len += (ops[i+3]->sym->cipher.data.length == 0) *
+ ops[i+3]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
+ p_enq_op->pos++;
+ }
+
+ for (; i < nb_ops; i++) {
+ job_len = ops[i]->sym->cipher.data.length;
+ job_len += (ops[i]->sym->cipher.data.length == 0) *
+ ops[i]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
+ p_enq_op->pos++;
+ }
+
+ processed_ops_pri = rte_cryptodev_enqueue_burst(
+ psd_qp_ctx->primary_slave.dev_id,
+ psd_qp_ctx->primary_slave.qp_id,
+ sched_ops[PRIMARY_SLAVE_IDX],
+ enq_ops[PRIMARY_SLAVE_IDX].pos);
+ /* enqueue shall not fail as the slave queue is monitored */
+ RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
+
+ psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
+
+ processed_ops_sec = rte_cryptodev_enqueue_burst(
+ psd_qp_ctx->secondary_slave.dev_id,
+ psd_qp_ctx->secondary_slave.qp_id,
+ sched_ops[SECONDARY_SLAVE_IDX],
+ enq_ops[SECONDARY_SLAVE_IDX].pos);
+ RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
+
+ psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
+
+ return processed_ops_pri + processed_ops_sec;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct psd_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_slave *slaves[NB_PKT_SIZE_SLAVES] = {
+ &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
+ struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
+ uint16_t nb_deq_ops_pri = 0, nb_deq_ops_sec = 0;
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops_pri = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops -= nb_deq_ops_pri;
+ }
+
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_IDX_SWITCH_MASK;
+
+ if (nb_deq_ops_pri == nb_ops)
+ return nb_deq_ops_pri;
+
+ slave = slaves[qp_ctx->deq_idx];
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops_sec = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, &ops[nb_deq_ops_pri],
+ nb_ops - nb_deq_ops_pri);
+ slave->nb_inflight_cops -= nb_deq_ops_sec;
+
+ if (!slave->nb_inflight_cops)
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) &
+ SLAVE_IDX_SWITCH_MASK;
+ }
+
+ return nb_deq_ops_pri + nb_deq_ops_sec;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ schedule_dequeue(qp, ops, nb_ops);
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct psd_scheduler_ctx *psd_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ /* for packet size based scheduler, nb_slaves have to >= 2 */
+ if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {
+ CR_SCHED_LOG(ERR, "not enough slaves to start");
+ return -1;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct psd_scheduler_qp_ctx *ps_qp_ctx =
+ qp_ctx->private_qp_ctx;
+
+ ps_qp_ctx->primary_slave.dev_id =
+ sched_ctx->slaves[PRIMARY_SLAVE_IDX].dev_id;
+ ps_qp_ctx->primary_slave.qp_id = i;
+ ps_qp_ctx->primary_slave.nb_inflight_cops = 0;
+
+ ps_qp_ctx->secondary_slave.dev_id =
+ sched_ctx->slaves[SECONDARY_SLAVE_IDX].dev_id;
+ ps_qp_ctx->secondary_slave.qp_id = i;
+ ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
+
+ ps_qp_ctx->threshold = psd_ctx->threshold;
+ }
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(struct rte_cryptodev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx;
+
+ if (ps_qp_ctx->primary_slave.nb_inflight_cops +
+ ps_qp_ctx->secondary_slave.nb_inflight_cops) {
+ CR_SCHED_LOG(ERR, "Some crypto ops left in slave queue");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct psd_scheduler_qp_ctx *ps_qp_ctx;
+
+ ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0,
+ rte_socket_id());
+ if (!ps_qp_ctx) {
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)ps_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct psd_scheduler_ctx *psd_ctx;
+
+ if (sched_ctx->private_ctx) {
+ rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
+
+ psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
+ rte_socket_id());
+ if (!psd_ctx) {
+ CR_SCHED_LOG(ERR, "failed allocate memory");
+ return -ENOMEM;
+ }
+
+ psd_ctx->threshold = DEF_PKT_SIZE_THRESHOLD;
+
+ sched_ctx->private_ctx = (void *)psd_ctx;
+
+ return 0;
+}
+static int
+scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type,
+ void *option)
+{
+ struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
+ dev->data->dev_private)->private_ctx;
+ uint32_t threshold;
+
+ if ((enum rte_cryptodev_schedule_option_type)option_type !=
+ CDEV_SCHED_OPTION_THRESHOLD) {
+ CR_SCHED_LOG(ERR, "Option not supported");
+ return -EINVAL;
+ }
+
+ threshold = ((struct rte_cryptodev_scheduler_threshold_option *)
+ option)->threshold;
+ if (!rte_is_power_of_2(threshold)) {
+ CR_SCHED_LOG(ERR, "Threshold is not power of 2");
+ return -EINVAL;
+ }
+
+ psd_ctx->threshold = ~(threshold - 1);
+
+ return 0;
+}
+
+static int
+scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
+ void *option)
+{
+ struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
+ dev->data->dev_private)->private_ctx;
+ struct rte_cryptodev_scheduler_threshold_option *threshold_option;
+
+ if ((enum rte_cryptodev_schedule_option_type)option_type !=
+ CDEV_SCHED_OPTION_THRESHOLD) {
+ CR_SCHED_LOG(ERR, "Option not supported");
+ return -EINVAL;
+ }
+
+ threshold_option = option;
+ threshold_option->threshold = (~psd_ctx->threshold) + 1;
+
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ scheduler_option_set,
+ scheduler_option_get
+};
+
+struct rte_cryptodev_scheduler psd_scheduler = {
+ .name = "packet-size-based-scheduler",
+ .description = "scheduler which will distribute crypto op "
+ "burst based on the packet size",
+ .mode = CDEV_SCHED_MODE_PKT_SIZE_DISTR,
+ .ops = &scheduler_ps_ops
+};
+
+struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler = &psd_scheduler;
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd.c b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd.c
new file mode 100644
index 00000000..a9221a94
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd.c
@@ -0,0 +1,572 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+#include <rte_reorder.h>
+#include <rte_string_fns.h>
+
+#include "rte_cryptodev_scheduler.h"
+#include "scheduler_pmd_private.h"
+
+uint8_t cryptodev_driver_id;
+
+struct scheduler_init_params {
+ struct rte_cryptodev_pmd_init_params def_p;
+ uint32_t nb_slaves;
+ enum rte_cryptodev_scheduler_mode mode;
+ char mode_param_str[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
+ uint32_t enable_ordering;
+ uint16_t wc_pool[RTE_MAX_LCORE];
+ uint16_t nb_wc;
+ char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
+ [RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
+};
+
+#define RTE_CRYPTODEV_VDEV_NAME ("name")
+#define RTE_CRYPTODEV_VDEV_SLAVE ("slave")
+#define RTE_CRYPTODEV_VDEV_MODE ("mode")
+#define RTE_CRYPTODEV_VDEV_MODE_PARAM ("mode_param")
+#define RTE_CRYPTODEV_VDEV_ORDERING ("ordering")
+#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
+#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
+#define RTE_CRYPTODEV_VDEV_COREMASK ("coremask")
+#define RTE_CRYPTODEV_VDEV_CORELIST ("corelist")
+
+const char *scheduler_valid_params[] = {
+ RTE_CRYPTODEV_VDEV_NAME,
+ RTE_CRYPTODEV_VDEV_SLAVE,
+ RTE_CRYPTODEV_VDEV_MODE,
+ RTE_CRYPTODEV_VDEV_MODE_PARAM,
+ RTE_CRYPTODEV_VDEV_ORDERING,
+ RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
+ RTE_CRYPTODEV_VDEV_SOCKET_ID,
+ RTE_CRYPTODEV_VDEV_COREMASK,
+ RTE_CRYPTODEV_VDEV_CORELIST
+};
+
+struct scheduler_parse_map {
+ const char *name;
+ uint32_t val;
+};
+
+const struct scheduler_parse_map scheduler_mode_map[] = {
+ {RTE_STR(SCHEDULER_MODE_NAME_ROUND_ROBIN),
+ CDEV_SCHED_MODE_ROUNDROBIN},
+ {RTE_STR(SCHEDULER_MODE_NAME_PKT_SIZE_DISTR),
+ CDEV_SCHED_MODE_PKT_SIZE_DISTR},
+ {RTE_STR(SCHEDULER_MODE_NAME_FAIL_OVER),
+ CDEV_SCHED_MODE_FAILOVER},
+ {RTE_STR(SCHEDULER_MODE_NAME_MULTI_CORE),
+ CDEV_SCHED_MODE_MULTICORE}
+};
+
+const struct scheduler_parse_map scheduler_ordering_map[] = {
+ {"enable", 1},
+ {"disable", 0}
+};
+
+#define CDEV_SCHED_MODE_PARAM_SEP_CHAR ':'
+
+static int
+cryptodev_scheduler_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct scheduler_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct scheduler_ctx *sched_ctx;
+ uint32_t i;
+ int ret;
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device,
+ &init_params->def_p);
+ if (dev == NULL) {
+ CR_SCHED_LOG(ERR, "driver %s: failed to create cryptodev vdev",
+ name);
+ return -EFAULT;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_crypto_scheduler_pmd_ops;
+
+ sched_ctx = dev->data->dev_private;
+ sched_ctx->max_nb_queue_pairs =
+ init_params->def_p.max_nb_queue_pairs;
+
+ if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
+ uint16_t i;
+
+ sched_ctx->nb_wc = init_params->nb_wc;
+
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ sched_ctx->wc_pool[i] = init_params->wc_pool[i];
+ CR_SCHED_LOG(INFO, " Worker core[%u]=%u added",
+ i, sched_ctx->wc_pool[i]);
+ }
+ }
+
+ if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED &&
+ init_params->mode < CDEV_SCHED_MODE_COUNT) {
+ union {
+ struct rte_cryptodev_scheduler_threshold_option
+ threshold_option;
+ } option;
+ enum rte_cryptodev_schedule_option_type option_type;
+ char param_name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0};
+ char param_val[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0};
+ char *s, *end;
+
+ ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id,
+ init_params->mode);
+ if (ret < 0) {
+ rte_cryptodev_pmd_release_device(dev);
+ return ret;
+ }
+
+ for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) {
+ if (scheduler_mode_map[i].val != sched_ctx->mode)
+ continue;
+
+ CR_SCHED_LOG(INFO, " Scheduling mode = %s",
+ scheduler_mode_map[i].name);
+ break;
+ }
+
+ if (strlen(init_params->mode_param_str) > 0) {
+ s = strchr(init_params->mode_param_str,
+ CDEV_SCHED_MODE_PARAM_SEP_CHAR);
+ if (s == NULL) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ strlcpy(param_name, init_params->mode_param_str,
+ s - init_params->mode_param_str + 1);
+ s++;
+ strlcpy(param_val, s,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ switch (init_params->mode) {
+ case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
+ if (strcmp(param_name,
+ RTE_CRYPTODEV_SCHEDULER_PARAM_THRES)
+ != 0) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+ option_type = CDEV_SCHED_OPTION_THRESHOLD;
+
+ option.threshold_option.threshold =
+ strtoul(param_val, &end, 0);
+ break;
+ default:
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ if (sched_ctx->ops.option_set(dev, option_type,
+ (void *)&option) < 0) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ RTE_LOG(INFO, PMD, " Sched mode param (%s = %s)\n",
+ param_name, param_val);
+ }
+ }
+
+ sched_ctx->reordering_enabled = init_params->enable_ordering;
+
+ for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) {
+ if (scheduler_ordering_map[i].val !=
+ sched_ctx->reordering_enabled)
+ continue;
+
+ CR_SCHED_LOG(INFO, " Packet ordering = %s",
+ scheduler_ordering_map[i].name);
+
+ break;
+ }
+
+ for (i = 0; i < init_params->nb_slaves; i++) {
+ sched_ctx->init_slave_names[sched_ctx->nb_init_slaves] =
+ rte_zmalloc_socket(
+ NULL,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN, 0,
+ SOCKET_ID_ANY);
+
+ if (!sched_ctx->init_slave_names[
+ sched_ctx->nb_init_slaves]) {
+ CR_SCHED_LOG(ERR, "driver %s: Insufficient memory",
+ name);
+ return -ENOMEM;
+ }
+
+ strncpy(sched_ctx->init_slave_names[
+ sched_ctx->nb_init_slaves],
+ init_params->slave_names[i],
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
+
+ sched_ctx->nb_init_slaves++;
+ }
+
+ /*
+ * Initialize capabilities structure as an empty structure,
+ * in case device information is requested when no slaves are attached
+ */
+ sched_ctx->capabilities = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_cryptodev_capabilities),
+ 0, SOCKET_ID_ANY);
+
+ if (!sched_ctx->capabilities) {
+ CR_SCHED_LOG(ERR, "Not enough memory for capability "
+ "information");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+cryptodev_scheduler_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ struct rte_cryptodev *dev;
+ struct scheduler_ctx *sched_ctx;
+
+ if (vdev == NULL)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ dev = rte_cryptodev_pmd_get_named_dev(name);
+ if (dev == NULL)
+ return -EINVAL;
+
+ sched_ctx = dev->data->dev_private;
+
+ if (sched_ctx->nb_slaves) {
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++)
+ rte_cryptodev_scheduler_slave_detach(dev->data->dev_id,
+ sched_ctx->slaves[i].dev_id);
+ }
+
+ return rte_cryptodev_pmd_destroy(dev);
+}
+
+/** Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ CR_SCHED_LOG(ERR, "Argument has to be positive.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Parse integer from hexadecimal integer argument */
+static int
+parse_coremask_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int i, j, val;
+ uint16_t idx = 0;
+ char c;
+ struct scheduler_init_params *params = extra_args;
+
+ params->nb_wc = 0;
+
+ if (value == NULL)
+ return -1;
+ /* Remove all blank characters ahead and after .
+ * Remove 0x/0X if exists.
+ */
+ while (isblank(*value))
+ value++;
+ if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X')))
+ value += 2;
+ i = strlen(value);
+ while ((i > 0) && isblank(value[i - 1]))
+ i--;
+
+ if (i == 0)
+ return -1;
+
+ for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
+ c = value[i];
+ if (isxdigit(c) == 0) {
+ /* invalid characters */
+ return -1;
+ }
+ if (isdigit(c))
+ val = c - '0';
+ else if (isupper(c))
+ val = c - 'A' + 10;
+ else
+ val = c - 'a' + 10;
+
+ for (j = 0; j < 4 && idx < RTE_MAX_LCORE; j++, idx++) {
+ if ((1 << j) & val)
+ params->wc_pool[params->nb_wc++] = idx;
+ }
+ }
+
+ return 0;
+}
+
+/** Parse integer from list of integers argument */
+static int
+parse_corelist_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *params = extra_args;
+
+ params->nb_wc = 0;
+
+ const char *token = value;
+
+ while (isdigit(token[0])) {
+ char *rval;
+ unsigned int core = strtoul(token, &rval, 10);
+
+ if (core >= RTE_MAX_LCORE) {
+ CR_SCHED_LOG(ERR, "Invalid worker core %u, should be smaller "
+ "than %u.", core, RTE_MAX_LCORE);
+ }
+ params->wc_pool[params->nb_wc++] = (uint16_t)core;
+ token = (const char *)rval;
+ if (token[0] == '\0')
+ break;
+ token++;
+ }
+
+ return 0;
+}
+
+/** Parse name */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
+ "%u bytes.", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+/** Parse slave */
+static int
+parse_slave_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+
+ if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
+ CR_SCHED_LOG(ERR, "Too many slaves.");
+ return -ENOMEM;
+ }
+
+ strncpy(param->slave_names[param->nb_slaves++], value,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
+
+ return 0;
+}
+
+static int
+parse_mode_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) {
+ if (strcmp(value, scheduler_mode_map[i].name) == 0) {
+ param->mode = (enum rte_cryptodev_scheduler_mode)
+ scheduler_mode_map[i].val;
+
+ break;
+ }
+ }
+
+ if (i == RTE_DIM(scheduler_mode_map)) {
+ CR_SCHED_LOG(ERR, "Unrecognized input.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+parse_mode_param_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+
+ strlcpy(param->mode_param_str, value,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static int
+parse_ordering_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) {
+ if (strcmp(value, scheduler_ordering_map[i].name) == 0) {
+ param->enable_ordering =
+ scheduler_ordering_map[i].val;
+ break;
+ }
+ }
+
+ if (i == RTE_DIM(scheduler_ordering_map)) {
+ CR_SCHED_LOG(ERR, "Unrecognized input.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_parse_init_params(struct scheduler_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ scheduler_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
+ &parse_integer_arg,
+ &params->def_p.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
+ &parse_integer_arg,
+ &params->def_p.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_COREMASK,
+ &parse_coremask_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_CORELIST,
+ &parse_corelist_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
+ &parse_name_arg,
+ &params->def_p);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SLAVE,
+ &parse_slave_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE,
+ &parse_mode_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE_PARAM,
+ &parse_mode_param_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_ORDERING,
+ &parse_ordering_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
+{
+ struct scheduler_init_params init_params = {
+ .def_p = {
+ "",
+ sizeof(struct scheduler_ctx),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ },
+ .nb_slaves = 0,
+ .mode = CDEV_SCHED_MODE_NOT_SET,
+ .enable_ordering = 0,
+ .slave_names = { {0} }
+ };
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ scheduler_parse_init_params(&init_params,
+ rte_vdev_device_args(vdev));
+
+
+ return cryptodev_scheduler_create(name,
+ vdev,
+ &init_params);
+}
+
+static struct rte_vdev_driver cryptodev_scheduler_pmd_drv = {
+ .probe = cryptodev_scheduler_probe,
+ .remove = cryptodev_scheduler_remove
+};
+
+static struct cryptodev_driver scheduler_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,
+ cryptodev_scheduler_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int> "
+ "slave=<name>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
+ cryptodev_scheduler_pmd_drv.driver,
+ cryptodev_driver_id);
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c
new file mode 100644
index 00000000..778071ca
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -0,0 +1,545 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_reorder.h>
+
+#include "scheduler_pmd_private.h"
+
+/** attaching the slaves predefined by scheduler's EAL options */
+static int
+scheduler_attach_init_slave(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint8_t scheduler_id = dev->data->dev_id;
+ int i;
+
+ for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) {
+ const char *dev_name = sched_ctx->init_slave_names[i];
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_named_dev(dev_name);
+ int status;
+
+ if (!slave_dev) {
+ CR_SCHED_LOG(ERR, "Failed to locate slave dev %s",
+ dev_name);
+ return -EINVAL;
+ }
+
+ status = rte_cryptodev_scheduler_slave_attach(
+ scheduler_id, slave_dev->data->dev_id);
+
+ if (status < 0) {
+ CR_SCHED_LOG(ERR, "Failed to attach slave cryptodev %u",
+ slave_dev->data->dev_id);
+ return status;
+ }
+
+ CR_SCHED_LOG(INFO, "Scheduler %s attached slave %s",
+ dev->data->name,
+ sched_ctx->init_slave_names[i]);
+
+ rte_free(sched_ctx->init_slave_names[i]);
+ sched_ctx->init_slave_names[i] = NULL;
+
+ sched_ctx->nb_init_slaves -= 1;
+ }
+
+ return 0;
+}
+/** Configure device */
+static int
+scheduler_pmd_config(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ ret = scheduler_attach_init_slave(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+ ret = rte_cryptodev_configure(slave_dev_id, config);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int
+update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+
+ if (sched_ctx->reordering_enabled) {
+ char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ uint32_t buff_size = rte_align32pow2(
+ sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
+
+ if (qp_ctx->order_ring) {
+ rte_ring_free(qp_ctx->order_ring);
+ qp_ctx->order_ring = NULL;
+ }
+
+ if (!buff_size)
+ return 0;
+
+ if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
+ dev->data->dev_id, qp_id) < 0) {
+ CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
+ "name");
+ return -ENOMEM;
+ }
+
+ qp_ctx->order_ring = rte_ring_create(order_ring_name,
+ buff_size, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (!qp_ctx->order_ring) {
+ CR_SCHED_LOG(ERR, "failed to create order ring");
+ return -ENOMEM;
+ }
+ } else {
+ if (qp_ctx->order_ring) {
+ rte_ring_free(qp_ctx->order_ring);
+ qp_ctx->order_ring = NULL;
+ }
+ }
+
+ return 0;
+}
+
+/** Start device */
+static int
+scheduler_pmd_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ if (dev->data->dev_started)
+ return 0;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ ret = scheduler_attach_init_slave(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = update_order_ring(dev, i);
+ if (ret < 0) {
+ CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
+ return ret;
+ }
+ }
+
+ if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
+ CR_SCHED_LOG(ERR, "Scheduler mode is not set");
+ return -1;
+ }
+
+ if (!sched_ctx->nb_slaves) {
+ CR_SCHED_LOG(ERR, "No slave in the scheduler");
+ return -1;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+ if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to attach slave");
+ return -ENOTSUP;
+ }
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
+
+ if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
+ CR_SCHED_LOG(ERR, "Scheduler start failed");
+ return -1;
+ }
+
+ /* start all slaves */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
+ if (ret < 0) {
+ CR_SCHED_LOG(ERR, "Failed to start slave dev %u",
+ slave_dev_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/** Stop device */
+static void
+scheduler_pmd_stop(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ if (!dev->data->dev_started)
+ return;
+
+ /* stop all slaves first */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ (*slave_dev->dev_ops->dev_stop)(slave_dev);
+ }
+
+ if (*sched_ctx->ops.scheduler_stop)
+ (*sched_ctx->ops.scheduler_stop)(dev);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+ if (*sched_ctx->ops.slave_detach)
+ (*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
+ }
+}
+
+/** Close device */
+static int
+scheduler_pmd_close(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ /* the dev should be stopped before being closed */
+ if (dev->data->dev_started)
+ return -EBUSY;
+
+ /* close all slaves first */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+
+ if (qp_ctx->order_ring) {
+ rte_ring_free(qp_ctx->order_ring);
+ qp_ctx->order_ring = NULL;
+ }
+
+ if (qp_ctx->private_qp_ctx) {
+ rte_free(qp_ctx->private_qp_ctx);
+ qp_ctx->private_qp_ctx = NULL;
+ }
+ }
+
+ if (sched_ctx->private_ctx) {
+ rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
+
+ if (sched_ctx->capabilities) {
+ rte_free(sched_ctx->capabilities);
+ sched_ctx->capabilities = NULL;
+ }
+
+ return 0;
+}
+
+/** Get device statistics */
+static void
+scheduler_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+ struct rte_cryptodev_stats slave_stats = {0};
+
+ (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
+
+ stats->enqueued_count += slave_stats.enqueued_count;
+ stats->dequeued_count += slave_stats.dequeued_count;
+
+ stats->enqueue_err_count += slave_stats.enqueue_err_count;
+ stats->dequeue_err_count += slave_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ (*slave_dev->dev_ops->stats_reset)(slave_dev);
+ }
+}
+
+/** Get device info */
+static void
+scheduler_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t max_nb_sess = 0;
+ uint16_t headroom_sz = 0;
+ uint16_t tailroom_sz = 0;
+ uint32_t i;
+
+ if (!dev_info)
+ return;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ scheduler_attach_init_slave(dev);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev_info slave_info;
+
+ rte_cryptodev_info_get(slave_dev_id, &slave_info);
+ uint32_t dev_max_sess = slave_info.sym.max_nb_sessions;
+ if (dev_max_sess != 0) {
+ if (max_nb_sess == 0 || dev_max_sess < max_nb_sess)
+ max_nb_sess = slave_info.sym.max_nb_sessions;
+ }
+
+ /* Get the max headroom requirement among slave PMDs */
+ headroom_sz = slave_info.min_mbuf_headroom_req >
+ headroom_sz ?
+ slave_info.min_mbuf_headroom_req :
+ headroom_sz;
+
+ /* Get the max tailroom requirement among slave PMDs */
+ tailroom_sz = slave_info.min_mbuf_tailroom_req >
+ tailroom_sz ?
+ slave_info.min_mbuf_tailroom_req :
+ tailroom_sz;
+ }
+
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = sched_ctx->capabilities;
+ dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
+ dev_info->min_mbuf_headroom_req = headroom_sz;
+ dev_info->min_mbuf_tailroom_req = tailroom_sz;
+ dev_info->sym.max_nb_sessions = max_nb_sess;
+}
+
+/** Release queue pair */
+static int
+scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+
+ if (!qp_ctx)
+ return 0;
+
+ if (qp_ctx->order_ring)
+ rte_ring_free(qp_ctx->order_ring);
+ if (qp_ctx->private_qp_ctx)
+ rte_free(qp_ctx->private_qp_ctx);
+
+ rte_free(qp_ctx);
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
+ struct rte_mempool *session_pool)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct scheduler_qp_ctx *qp_ctx;
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ uint32_t i;
+ int ret;
+
+ if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "CRYTO_SCHE PMD %u QP %u",
+ dev->data->dev_id, qp_id) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
+ return -EFAULT;
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ scheduler_pmd_qp_release(dev, qp_id);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_id = sched_ctx->slaves[i].dev_id;
+
+ /*
+ * All slaves will share the same session mempool
+ * for session-less operations, so the objects
+ * must be big enough for all the drivers used.
+ */
+ ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
+ qp_conf, socket_id, session_pool);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Allocate the queue pair data structure. */
+ qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (qp_ctx == NULL)
+ return -ENOMEM;
+
+ /* The actual available object number = nb_descriptors - 1 */
+ qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
+
+ dev->data->queue_pairs[qp_id] = qp_ctx;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ ret = scheduler_attach_init_slave(dev);
+ if (ret < 0) {
+ CR_SCHED_LOG(ERR, "Failed to attach slave");
+ scheduler_pmd_qp_release(dev, qp_id);
+ return ret;
+ }
+
+ if (*sched_ctx->ops.config_queue_pair) {
+ if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
+ CR_SCHED_LOG(ERR, "Unable to configure queue pair");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+scheduler_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+static uint32_t
+scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint8_t i = 0;
+ uint32_t max_priv_sess_size = 0;
+
+ /* Check what is the maximum private session size for all slaves */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
+ uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
+
+ if (max_priv_sess_size < priv_sess_size)
+ max_priv_sess_size = priv_sess_size;
+ }
+
+ return max_priv_sess_size;
+}
+
+static int
+scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct scheduler_slave *slave = &sched_ctx->slaves[i];
+
+ ret = rte_cryptodev_sym_session_init(slave->dev_id, sess,
+ xform, mempool);
+ if (ret < 0) {
+ CR_SCHED_LOG(ERR, "unable to config sym session");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ /* Clear private data of slaves */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct scheduler_slave *slave = &sched_ctx->slaves[i];
+
+ rte_cryptodev_sym_session_clear(slave->dev_id, sess);
+ }
+}
+
+struct rte_cryptodev_ops scheduler_pmd_ops = {
+ .dev_configure = scheduler_pmd_config,
+ .dev_start = scheduler_pmd_start,
+ .dev_stop = scheduler_pmd_stop,
+ .dev_close = scheduler_pmd_close,
+
+ .stats_get = scheduler_pmd_stats_get,
+ .stats_reset = scheduler_pmd_stats_reset,
+
+ .dev_infos_get = scheduler_pmd_info_get,
+
+ .queue_pair_setup = scheduler_pmd_qp_setup,
+ .queue_pair_release = scheduler_pmd_qp_release,
+ .queue_pair_count = scheduler_pmd_qp_count,
+
+ .sym_session_get_size = scheduler_pmd_sym_session_get_size,
+ .sym_session_configure = scheduler_pmd_sym_session_configure,
+ .sym_session_clear = scheduler_pmd_sym_session_clear,
+};
+
+struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h
new file mode 100644
index 00000000..d5e602a2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _SCHEDULER_PMD_PRIVATE_H
+#define _SCHEDULER_PMD_PRIVATE_H
+
+#include "rte_cryptodev_scheduler.h"
+
+#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
+/**< Scheduler Crypto PMD device name */
+
+#define PER_SLAVE_BUFF_SIZE (256)
+
+extern int scheduler_logtype_driver;
+
+#define CR_SCHED_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, scheduler_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, ##args)
+
+struct scheduler_slave {
+ uint8_t dev_id;
+ uint16_t qp_id;
+ uint32_t nb_inflight_cops;
+
+ uint8_t driver_id;
+};
+
+struct scheduler_ctx {
+ void *private_ctx;
+ /**< private scheduler context pointer */
+
+ struct rte_cryptodev_capabilities *capabilities;
+ uint32_t nb_capabilities;
+
+ uint32_t max_nb_queue_pairs;
+
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ enum rte_cryptodev_scheduler_mode mode;
+
+ struct rte_cryptodev_scheduler_ops ops;
+
+ uint8_t reordering_enabled;
+
+ char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
+ char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
+ uint16_t wc_pool[RTE_MAX_LCORE];
+ uint16_t nb_wc;
+
+ char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ int nb_init_slaves;
+} __rte_cache_aligned;
+
+struct scheduler_qp_ctx {
+ void *private_qp_ctx;
+
+ uint32_t max_nb_objs;
+
+ struct rte_ring *order_ring;
+ uint32_t seqn;
+} __rte_cache_aligned;
+
+
+extern uint8_t cryptodev_driver_id;
+
+static __rte_always_inline uint16_t
+get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
+{
+ uint32_t count = rte_ring_free_count(order_ring);
+
+ return count > nb_ops ? nb_ops : count;
+}
+
+static __rte_always_inline void
+scheduler_order_insert(struct rte_ring *order_ring,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
+}
+
+#define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do { \
+ struct rte_crypto_op **ring = (void *)&order_ring[1]; \
+ op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
+} while (0)
+
+static __rte_always_inline uint16_t
+scheduler_order_drain(struct rte_ring *order_ring,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op *op;
+ uint32_t nb_objs = rte_ring_count(order_ring);
+ uint32_t nb_ops_to_deq = 0;
+ uint32_t nb_ops_deqd = 0;
+
+ if (nb_objs > nb_ops)
+ nb_objs = nb_ops;
+
+ while (nb_ops_to_deq < nb_objs) {
+ SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ break;
+ nb_ops_to_deq++;
+ }
+
+ if (nb_ops_to_deq)
+ nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
+ (void **)ops, nb_ops_to_deq, NULL);
+
+ return nb_ops_deqd;
+}
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
+
+#endif /* _SCHEDULER_PMD_PRIVATE_H */
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c
new file mode 100644
index 00000000..c7082a64
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+struct rr_scheduler_qp_ctx {
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ uint32_t last_enq_slave_idx;
+ uint32_t last_deq_slave_idx;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rr_scheduler_qp_ctx *rr_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
+ struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
+ uint16_t i, processed_ops;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < nb_ops && i < 4; i++)
+ rte_prefetch0(ops[i]->sym->session);
+
+ processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+
+ slave->nb_inflight_cops += processed_ops;
+
+ rr_qp_ctx->last_enq_slave_idx += 1;
+ rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rr_scheduler_qp_ctx *rr_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_slave *slave;
+ uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
+ uint16_t nb_deq_ops;
+
+ if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
+ do {
+ last_slave_idx += 1;
+
+ if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
+ last_slave_idx = 0;
+ /* looped back, means no inflight cops in the queue */
+ if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
+ return 0;
+ } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
+ == 0);
+ }
+
+ slave = &rr_qp_ctx->slaves[last_slave_idx];
+
+ nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+
+ last_slave_idx += 1;
+ last_slave_idx %= rr_qp_ctx->nb_slaves;
+
+ rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
+
+ slave->nb_inflight_cops -= nb_deq_ops;
+
+ return nb_deq_ops;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ schedule_dequeue(qp, ops, nb_ops);
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint16_t i;
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct rr_scheduler_qp_ctx *rr_qp_ctx =
+ qp_ctx->private_qp_ctx;
+ uint32_t j;
+
+ memset(rr_qp_ctx->slaves, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
+ sizeof(struct scheduler_slave));
+ for (j = 0; j < sched_ctx->nb_slaves; j++) {
+ rr_qp_ctx->slaves[j].dev_id =
+ sched_ctx->slaves[j].dev_id;
+ rr_qp_ctx->slaves[j].qp_id = i;
+ }
+
+ rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+
+ rr_qp_ctx->last_enq_slave_idx = 0;
+ rr_qp_ctx->last_deq_slave_idx = 0;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct rr_scheduler_qp_ctx *rr_qp_ctx;
+
+ rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
+ rte_socket_id());
+ if (!rr_qp_ctx) {
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /* option_get */
+};
+
+struct rte_cryptodev_scheduler scheduler = {
+ .name = "roundrobin-scheduler",
+ .description = "scheduler which will round robin burst across "
+ "slave crypto devices",
+ .mode = CDEV_SCHED_MODE_ROUNDROBIN,
+ .ops = &scheduler_rr_ops
+};
+
+struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;
diff --git a/src/spdk/dpdk/drivers/crypto/snow3g/Makefile b/src/spdk/dpdk/drivers/crypto/snow3g/Makefile
new file mode 100644
index 00000000..ee5027d0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/snow3g/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_SNOW3G_PATH),)
+$(error "Please define LIBSSO_SNOW3G_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_snow3g.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_snow3g_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBSSO_SNOW3G_PATH)
+CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/include
+CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/build
+LDLIBS += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/snow3g/rte_pmd_snow3g_version.map b/src/spdk/dpdk/drivers/crypto/snow3g/rte_pmd_snow3g_version.map
new file mode 100644
index 00000000..dc4d417b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/snow3g/rte_pmd_snow3g_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c b/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c
new file mode 100644
index 00000000..a17536b7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -0,0 +1,625 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_snow3g_pmd_private.h"
+
+#define SNOW3G_IV_LENGTH 16
+#define SNOW3G_MAX_BURST 8
+#define BYTE_LEN 8
+
+static uint8_t cryptodev_driver_id;
+
+/** Get xform chain order. */
+static enum snow3g_operation
+snow3g_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return SNOW3G_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return SNOW3G_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return SNOW3G_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return SNOW3G_OP_AUTH_CIPHER;
+ else
+ return SNOW3G_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return SNOW3G_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return SNOW3G_OP_CIPHER_AUTH;
+ else
+ return SNOW3G_OP_NOT_SUPPORTED;
+ }
+
+ return SNOW3G_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+snow3g_set_session_parameters(struct snow3g_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ enum snow3g_operation mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = snow3g_get_mode(xform);
+
+ switch (mode) {
+ case SNOW3G_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+
+ /* Fall-through */
+ case SNOW3G_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case SNOW3G_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case SNOW3G_OP_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case SNOW3G_OP_NOT_SUPPORTED:
+ default:
+ SNOW3G_LOG(ERR, "Unsupported operation chain order parameter");
+ return -ENOTSUP;
+ }
+
+ if (cipher_xform) {
+ /* Only SNOW 3G UEA2 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
+ return -ENOTSUP;
+
+ if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
+ SNOW3G_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+
+ /* Initialize key */
+ sso_snow3g_init_key_sched(cipher_xform->cipher.key.data,
+ &sess->pKeySched_cipher);
+ }
+
+ if (auth_xform) {
+ /* Only SNOW 3G UIA2 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
+ return -ENOTSUP;
+
+ if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
+ SNOW3G_LOG(ERR, "Wrong digest length");
+ return -EINVAL;
+ }
+
+ sess->auth_op = auth_xform->auth.op;
+
+ if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
+ SNOW3G_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+ sess->auth_iv_offset = auth_xform->auth.iv.offset;
+
+ /* Initialize key */
+ sso_snow3g_init_key_sched(auth_xform->auth.key.data,
+ &sess->pKeySched_hash);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get SNOW 3G session. */
+static struct snow3g_session *
+snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
+{
+ struct snow3g_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct snow3g_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct snow3g_session *)_sess_private_data;
+
+ if (unlikely(snow3g_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_snow3g_cipher_op(struct rte_crypto_op **ops,
+ struct snow3g_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[SNOW3G_MAX_BURST], *dst[SNOW3G_MAX_BURST];
+ uint8_t *iv[SNOW3G_MAX_BURST];
+ uint32_t num_bytes[SNOW3G_MAX_BURST];
+
+ for (i = 0; i < num_ops; i++) {
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->cipher_iv_offset);
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ processed_ops++;
+ }
+
+ sso_snow3g_f8_n_buffer(&session->pKeySched_cipher, iv, src, dst,
+ num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Encrypt/decrypt mbuf (bit level function). */
+static uint8_t
+process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
+ struct snow3g_session *session)
+{
+ uint8_t *src, *dst;
+ uint8_t *iv;
+ uint32_t length_in_bits, offset_in_bits;
+
+ offset_in_bits = op->sym->cipher.data.offset;
+ src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+ if (op->sym->m_dst == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG(ERR, "bit-level in-place not supported\n");
+ return 0;
+ }
+ dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->cipher_iv_offset);
+ length_in_bits = op->sym->cipher.data.length;
+
+ sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, iv,
+ src, dst, length_in_bits, offset_in_bits);
+
+ return 1;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
+ struct snow3g_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src, *dst;
+ uint32_t length_in_bits;
+ uint8_t *iv;
+
+ for (i = 0; i < num_ops; i++) {
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG(ERR, "Offset");
+ break;
+ }
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+ iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->auth_iv_offset);
+
+ if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = qp->temp_digest;
+
+ sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
+ iv, src,
+ length_in_bits, dst);
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ SNOW3G_DIGEST_LENGTH) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ dst = ops[i]->sym->auth.digest.data;
+
+ sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
+ iv, src,
+ length_in_bits, dst);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
+ struct snow3g_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned i;
+ unsigned enqueued_ops, processed_ops;
+
+#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
+ for (i = 0; i < num_ops; i++) {
+ if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
+ (ops[i]->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ ops[i]->sym->m_dst))) {
+ SNOW3G_LOG(ERR, "PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", ops[i]);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return 0;
+ }
+ }
+#endif
+
+ switch (session->op) {
+ case SNOW3G_OP_ONLY_CIPHER:
+ processed_ops = process_snow3g_cipher_op(ops,
+ session, num_ops);
+ break;
+ case SNOW3G_OP_ONLY_AUTH:
+ processed_ops = process_snow3g_hash_op(qp, ops, session,
+ num_ops);
+ break;
+ case SNOW3G_OP_CIPHER_AUTH:
+ processed_ops = process_snow3g_cipher_op(ops, session,
+ num_ops);
+ process_snow3g_hash_op(qp, ops, session, processed_ops);
+ break;
+ case SNOW3G_OP_AUTH_CIPHER:
+ processed_ops = process_snow3g_hash_op(qp, ops, session,
+ num_ops);
+ process_snow3g_cipher_op(ops, session, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(session, 0, sizeof(struct snow3g_session));
+ memset(ops[i]->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, session);
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops, NULL);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+/** Process a crypto op with length/offset in bits. */
+static int
+process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
+ struct snow3g_qp *qp, uint16_t *accumulated_enqueued_ops)
+{
+ unsigned enqueued_op, processed_op;
+
+ switch (session->op) {
+ case SNOW3G_OP_ONLY_CIPHER:
+ processed_op = process_snow3g_cipher_op_bit(op,
+ session);
+ break;
+ case SNOW3G_OP_ONLY_AUTH:
+ processed_op = process_snow3g_hash_op(qp, &op, session, 1);
+ break;
+ case SNOW3G_OP_CIPHER_AUTH:
+ processed_op = process_snow3g_cipher_op_bit(op, session);
+ if (processed_op == 1)
+ process_snow3g_hash_op(qp, &op, session, 1);
+ break;
+ case SNOW3G_OP_AUTH_CIPHER:
+ processed_op = process_snow3g_hash_op(qp, &op, session, 1);
+ if (processed_op == 1)
+ process_snow3g_cipher_op_bit(op, session);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_op = 0;
+ }
+
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Free session if a session-less crypto op. */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0, sizeof(struct snow3g_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ enqueued_op = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)&op, processed_op, NULL);
+ qp->qp_stats.enqueued_count += enqueued_op;
+ *accumulated_enqueued_ops += enqueued_op;
+
+ return enqueued_op;
+}
+
+static uint16_t
+snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
+ struct rte_crypto_op *curr_c_op;
+
+ struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
+ struct snow3g_qp *qp = queue_pair;
+ unsigned i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+ /* Set status as enqueued (not processed yet) by default. */
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ curr_sess = snow3g_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL ||
+ curr_sess->op == SNOW3G_OP_NOT_SUPPORTED)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ /* If length/offset is at bit-level, process this buffer alone. */
+ if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((curr_c_op->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ /* Process the ops of the previous session. */
+ if (prev_sess != NULL) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+
+ processed_ops = process_op_bit(curr_c_op, curr_sess,
+ qp, &enqueued_ops);
+ if (processed_ops != 1)
+ break;
+
+ continue;
+ }
+
+ /* Batch ops that share the same session. */
+ if (prev_sess == NULL) {
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ } else if (curr_sess == prev_sess) {
+ c_ops[burst_size++] = curr_c_op;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == SNOW3G_MAX_BURST) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+ } else {
+ /*
+ * Different session, process the ops
+ * of the previous session.
+ */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = curr_sess;
+
+ c_ops[burst_size++] = curr_c_op;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last session. */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ }
+
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+}
+
+static uint16_t
+snow3g_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct snow3g_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_snow3g_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_snow3g_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct snow3g_private *internals;
+ uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ SNOW3G_LOG(ERR, "failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_snow3g_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = snow3g_pmd_dequeue_burst;
+ dev->enqueue_burst = snow3g_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+init_error:
+ SNOW3G_LOG(ERR, "driver %s: cryptodev_snow3g_create failed",
+ init_params->name);
+
+ cryptodev_snow3g_remove(vdev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct snow3g_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ return cryptodev_snow3g_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_snow3g_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
+ .probe = cryptodev_snow3g_probe,
+ .remove = cryptodev_snow3g_remove
+};
+
+static struct cryptodev_driver snow3g_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv,
+ cryptodev_snow3g_pmd_drv.driver, cryptodev_driver_id);
+
+RTE_INIT(snow3g_init_log)
+{
+ snow3g_logtype_driver = rte_log_register("pmd.crypto.snow3g");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
new file mode 100644
index 00000000..cfbc9522
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_snow3g_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
+ { /* SNOW 3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+snow3g_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+snow3g_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+snow3g_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+snow3g_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+snow3g_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct snow3g_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+snow3g_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct snow3g_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+snow3g_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct snow3g_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = snow3g_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+snow3g_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+snow3g_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct snow3g_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "snow3g_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ SNOW3G_LOG(INFO, "Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ SNOW3G_LOG(ERR, "Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+snow3g_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct snow3g_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ snow3g_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("SNOW 3G PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (snow3g_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = snow3g_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+snow3g_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the SNOW 3G session structure */
+static unsigned
+snow3g_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct snow3g_session);
+}
+
+/** Configure a SNOW 3G session from a crypto xform chain */
+static int
+snow3g_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ SNOW3G_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ SNOW3G_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = snow3g_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ SNOW3G_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+snow3g_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct snow3g_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops snow3g_pmd_ops = {
+ .dev_configure = snow3g_pmd_config,
+ .dev_start = snow3g_pmd_start,
+ .dev_stop = snow3g_pmd_stop,
+ .dev_close = snow3g_pmd_close,
+
+ .stats_get = snow3g_pmd_stats_get,
+ .stats_reset = snow3g_pmd_stats_reset,
+
+ .dev_infos_get = snow3g_pmd_info_get,
+
+ .queue_pair_setup = snow3g_pmd_qp_setup,
+ .queue_pair_release = snow3g_pmd_qp_release,
+ .queue_pair_count = snow3g_pmd_qp_count,
+
+ .sym_session_get_size = snow3g_pmd_sym_session_get_size,
+ .sym_session_configure = snow3g_pmd_sym_session_configure,
+ .sym_session_clear = snow3g_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
new file mode 100644
index 00000000..b7807b62
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#ifndef _RTE_SNOW3G_PMD_PRIVATE_H_
+#define _RTE_SNOW3G_PMD_PRIVATE_H_
+
+#include <sso_snow3g.h>
+
+#define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
+/**< SNOW 3G PMD device name */
+
+/** SNOW 3G PMD LOGTYPE DRIVER */
+int snow3g_logtype_driver;
+
+#define SNOW3G_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, snow3g_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+#define SNOW3G_DIGEST_LENGTH 4
+
+/** private data structure for each virtual SNOW 3G device */
+struct snow3g_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+/** SNOW 3G buffer queue pair */
+struct snow3g_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+enum snow3g_operation {
+ SNOW3G_OP_ONLY_CIPHER,
+ SNOW3G_OP_ONLY_AUTH,
+ SNOW3G_OP_CIPHER_AUTH,
+ SNOW3G_OP_AUTH_CIPHER,
+ SNOW3G_OP_NOT_SUPPORTED
+};
+
+/** SNOW 3G private session structure */
+struct snow3g_session {
+ enum snow3g_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ sso_snow3g_key_schedule_t pKeySched_cipher;
+ sso_snow3g_key_schedule_t pKeySched_hash;
+ uint16_t cipher_iv_offset;
+ uint16_t auth_iv_offset;
+} __rte_cache_aligned;
+
+
+extern int
+snow3g_set_session_parameters(struct snow3g_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_snow3g_pmd_ops;
+
+
+
+#endif /* _RTE_SNOW3G_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/Makefile b/src/spdk/dpdk/drivers/crypto/virtio/Makefile
new file mode 100644
index 00000000..be7b828f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_virtio_crypto.a
+
+#
+# include virtio_crypto.h
+#
+CFLAGS += -I$(RTE_SDK)/lib/librte_vhost
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_virtio_crypto_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtqueue.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_cryptodev.c
+
+# this lib depends upon:
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/meson.build b/src/spdk/dpdk/drivers/crypto/virtio/meson.build
new file mode 100644
index 00000000..b15b3f9f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+includes += include_directories('../../../lib/librte_vhost')
+deps += 'bus_pci'
+name = 'virtio_crypto'
+sources = files('virtio_cryptodev.c', 'virtio_pci.c',
+ 'virtio_rxtx.c', 'virtqueue.c')
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map b/src/spdk/dpdk/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map
new file mode 100644
index 00000000..de8e412f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map
@@ -0,0 +1,3 @@
+DPDK_18.05 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_algs.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_algs.h
new file mode 100644
index 00000000..4c44af37
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_algs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_ALGS_H_
+#define _VIRTIO_CRYPTO_ALGS_H_
+
+#include <rte_memory.h>
+
+#include "virtio_crypto.h"
+
+struct virtio_crypto_session {
+ uint64_t session_id;
+
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } iv;
+
+ struct {
+ uint32_t length;
+ phys_addr_t phys_addr;
+ } aad;
+
+ struct virtio_crypto_op_ctrl_req ctrl;
+};
+
+#endif /* _VIRTIO_CRYPTO_ALGS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_capabilities.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_capabilities.h
new file mode 100644
index 00000000..03c30dee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_capabilities.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_CAPABILITIES_H_
+#define _VIRTIO_CRYPTO_CAPABILITIES_H_
+
+#define VIRTIO_SYM_CAPABILITIES \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 20, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#endif /* _VIRTIO_CRYPTO_CAPABILITIES_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.c b/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.c
new file mode 100644
index 00000000..568b5a40
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.c
@@ -0,0 +1,1505 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_eal.h>
+
+#include "virtio_cryptodev.h"
+#include "virtqueue.h"
+#include "virtio_crypto_algs.h"
+#include "virtio_crypto_capabilities.h"
+
+int virtio_crypto_logtype_init;
+int virtio_crypto_logtype_session;
+int virtio_crypto_logtype_rx;
+int virtio_crypto_logtype_tx;
+int virtio_crypto_logtype_driver;
+
+static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config);
+static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
+static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info);
+static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats);
+static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
+static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id,
+ struct rte_mempool *session_pool);
+static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
+static unsigned int virtio_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev);
+static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess);
+static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_mempool *mp);
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
+ { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
+ VIRTIO_CRYPTO_PCI_DEVICEID) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
+ VIRTIO_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+uint8_t cryptodev_virtio_driver_id;
+
+#define NUM_ENTRY_SYM_CREATE_SESSION 4
+
+static int
+virtio_crypto_send_command(struct virtqueue *vq,
+ struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
+ uint8_t *auth_key, struct virtio_crypto_session *session)
+{
+ uint8_t idx = 0;
+ uint8_t needed = 1;
+ uint32_t head = 0;
+ uint32_t len_cipher_key = 0;
+ uint32_t len_auth_key = 0;
+ uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+ uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
+ uint32_t len_total = 0;
+ uint32_t input_offset = 0;
+ void *virt_addr_started = NULL;
+ phys_addr_t phys_addr_started;
+ struct vring_desc *desc;
+ uint32_t desc_offset;
+ struct virtio_crypto_session_input *input;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (session == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
+ return -EINVAL;
+ }
+ /* cipher only is supported, it is available if auth_key is NULL */
+ if (!cipher_key) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
+ return -EINVAL;
+ }
+
+ head = vq->vq_desc_head_idx;
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
+ head, vq);
+
+ if (vq->vq_free_cnt < needed) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
+ return -ENOSPC;
+ }
+
+ /* calculate the length of cipher key */
+ if (cipher_key) {
+ switch (ctrl->u.sym_create_session.op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ len_cipher_key
+ = ctrl->u.sym_create_session.u.cipher
+ .para.keylen;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ len_cipher_key
+ = ctrl->u.sym_create_session.u.chain
+ .para.cipher_param.keylen;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
+ return -EINVAL;
+ }
+ }
+
+ /* calculate the length of auth key */
+ if (auth_key) {
+ len_auth_key =
+ ctrl->u.sym_create_session.u.chain.para.u.mac_param
+ .auth_key_len;
+ }
+
+ /*
+ * malloc memory to store indirect vring_desc entries, including
+ * ctrl request, cipher key, auth key, session input and desc vring
+ */
+ desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
+ + len_session_input;
+ virt_addr_started = rte_malloc(NULL,
+ desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
+ * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+ if (virt_addr_started == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
+ return -ENOSPC;
+ }
+ phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
+
+ /* address to store indirect vring desc entries */
+ desc = (struct vring_desc *)
+ ((uint8_t *)virt_addr_started + desc_offset);
+
+ /* ctrl req part */
+ memcpy(virt_addr_started, ctrl, len_ctrl_req);
+ desc[idx].addr = phys_addr_started;
+ desc[idx].len = len_ctrl_req;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_ctrl_req;
+ input_offset += len_ctrl_req;
+
+ /* cipher key part */
+ if (len_cipher_key > 0) {
+ memcpy((uint8_t *)virt_addr_started + len_total,
+ cipher_key, len_cipher_key);
+
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_cipher_key;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_cipher_key;
+ input_offset += len_cipher_key;
+ }
+
+ /* auth key part */
+ if (len_auth_key > 0) {
+ memcpy((uint8_t *)virt_addr_started + len_total,
+ auth_key, len_auth_key);
+
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_auth_key;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_auth_key;
+ input_offset += len_auth_key;
+ }
+
+ /* input part */
+ input = (struct virtio_crypto_session_input *)
+ ((uint8_t *)virt_addr_started + input_offset);
+ input->status = VIRTIO_CRYPTO_ERR;
+ input->session_id = ~0ULL;
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_session_input;
+ desc[idx].flags = VRING_DESC_F_WRITE;
+ idx++;
+
+ /* use a single desc entry */
+ vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
+ vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vq_free_cnt--;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
+ vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+
+ while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
+ "vq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ /* get the result */
+ if (input->status != VIRTIO_CRYPTO_OK) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
+ "status=%u, session_id=%" PRIu64 "",
+ input->status, input->session_id);
+ rte_free(virt_addr_started);
+ ret = -1;
+ } else {
+ session->session_id = input->session_id;
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
+ "session_id=%" PRIu64 "", input->session_id);
+ rte_free(virt_addr_started);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+void
+virtio_crypto_queue_release(struct virtqueue *vq)
+{
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (vq) {
+ hw = vq->hw;
+ /* Select and deactivate the queue */
+ VTPCI_OPS(hw)->del_queue(hw, vq);
+
+ rte_memzone_free(vq->mz);
+ rte_mempool_free(vq->mpool);
+ rte_free(vq);
+ }
+}
+
+#define MPOOL_MAX_NAME_SZ 32
+
+int
+virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+ int queue_type,
+ uint16_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ int socket_id,
+ struct virtqueue **pvq)
+{
+ char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+ char mpool_name[MPOOL_MAX_NAME_SZ];
+ const struct rte_memzone *mz;
+ unsigned int vq_size, size;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = NULL;
+ uint32_t i = 0;
+ uint32_t j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
+
+ /*
+ * Read the virtqueue size from the Queue Size field
+ * Always power of 2 and if 0 virtqueue does not exist
+ */
+ vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
+ if (vq_size == 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
+ return -EINVAL;
+ }
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
+
+ if (!rte_is_power_of_2(vq_size)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
+ return -EINVAL;
+ }
+
+ if (queue_type == VTCRYPTO_DATAQ) {
+ snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
+ dev->data->dev_id, vtpci_queue_idx);
+ snprintf(mpool_name, sizeof(mpool_name),
+ "dev%d_dataqueue%d_mpool",
+ dev->data->dev_id, vtpci_queue_idx);
+ } else if (queue_type == VTCRYPTO_CTRLQ) {
+ snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
+ dev->data->dev_id);
+ snprintf(mpool_name, sizeof(mpool_name),
+ "dev%d_controlqueue_mpool",
+ dev->data->dev_id);
+ }
+ size = RTE_ALIGN_CEIL(sizeof(*vq) +
+ vq_size * sizeof(struct vq_desc_extra),
+ RTE_CACHE_LINE_SIZE);
+ vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (vq == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
+ return -ENOMEM;
+ }
+
+ if (queue_type == VTCRYPTO_DATAQ) {
+ /* pre-allocate a mempool and use it in the data plane to
+ * improve performance
+ */
+ vq->mpool = rte_mempool_lookup(mpool_name);
+ if (vq->mpool == NULL)
+ vq->mpool = rte_mempool_create(mpool_name,
+ vq_size,
+ sizeof(struct virtio_crypto_op_cookie),
+ RTE_CACHE_LINE_SIZE, 0,
+ NULL, NULL, NULL, NULL, socket_id,
+ 0);
+ if (!vq->mpool) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
+ "Cannot create mempool");
+ goto mpool_create_err;
+ }
+ for (i = 0; i < vq_size; i++) {
+ vq->vq_descx[i].cookie =
+ rte_zmalloc("crypto PMD op cookie pointer",
+ sizeof(struct virtio_crypto_op_cookie),
+ RTE_CACHE_LINE_SIZE);
+ if (vq->vq_descx[i].cookie == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
+ "alloc mem for cookie");
+ goto cookie_alloc_err;
+ }
+ }
+ }
+
+ vq->hw = hw;
+ vq->dev_id = dev->data->dev_id;
+ vq->vq_queue_index = vtpci_queue_idx;
+ vq->vq_nentries = vq_size;
+
+ /*
+ * Using part of the vring entries is permitted, but the maximum
+ * is vq_size
+ */
+ if (nb_desc == 0 || nb_desc > vq_size)
+ nb_desc = vq_size;
+ vq->vq_free_cnt = nb_desc;
+
+ /*
+ * Reserve a memzone for vring elements
+ */
+ size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
+ (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
+ size, vq->vq_ring_size);
+
+ mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+ socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
+ if (mz == NULL) {
+ if (rte_errno == EEXIST)
+ mz = rte_memzone_lookup(vq_name);
+ if (mz == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
+ goto mz_reserve_err;
+ }
+ }
+
+ /*
+ * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((mz->phys_addr + vq->vq_ring_size - 1)
+ >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
+ "above 16TB!");
+ goto vring_addr_err;
+ }
+
+ memset(mz->addr, 0, sizeof(mz->len));
+ vq->mz = mz;
+ vq->vq_ring_mem = mz->phys_addr;
+ vq->vq_ring_virt_mem = mz->addr;
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
+ (uint64_t)mz->phys_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
+ (uint64_t)(uintptr_t)mz->addr);
+
+ *pvq = vq;
+
+ return 0;
+
+vring_addr_err:
+ rte_memzone_free(mz);
+mz_reserve_err:
+cookie_alloc_err:
+ rte_mempool_free(vq->mpool);
+ if (i != 0) {
+ for (j = 0; j < i; j++)
+ rte_free(vq->vq_descx[j].cookie);
+ }
+mpool_create_err:
+ rte_free(vq);
+ return -ENOMEM;
+}
+
+static int
+virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
+{
+ int ret;
+ struct virtqueue *vq;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ /* if virtio device has started, do not touch the virtqueues */
+ if (dev->data->dev_started)
+ return 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
+ 0, SOCKET_ID_ANY, &vq);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
+ return ret;
+ }
+
+ hw->cvq = vq;
+
+ return 0;
+}
+
+static void
+virtio_crypto_free_queues(struct rte_cryptodev *dev)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* control queue release */
+ virtio_crypto_queue_release(hw->cvq);
+
+ /* data queue release */
+ for (i = 0; i < hw->max_dataqueues; i++)
+ virtio_crypto_queue_release(dev->data->queue_pairs[i]);
+}
+
+static int
+virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
+{
+ return 0;
+}
+
+/*
+ * dev_ops for virtio, bare necessities for basic operation
+ */
+static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
+ /* Device related operations */
+ .dev_configure = virtio_crypto_dev_configure,
+ .dev_start = virtio_crypto_dev_start,
+ .dev_stop = virtio_crypto_dev_stop,
+ .dev_close = virtio_crypto_dev_close,
+ .dev_infos_get = virtio_crypto_dev_info_get,
+
+ .stats_get = virtio_crypto_dev_stats_get,
+ .stats_reset = virtio_crypto_dev_stats_reset,
+
+ .queue_pair_setup = virtio_crypto_qp_setup,
+ .queue_pair_release = virtio_crypto_qp_release,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .sym_session_get_size = virtio_crypto_sym_get_session_private_size,
+ .sym_session_configure = virtio_crypto_sym_configure_session,
+ .sym_session_clear = virtio_crypto_sym_clear_session
+};
+
+static void
+virtio_crypto_update_stats(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (stats == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
+ return;
+ }
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ const struct virtqueue *data_queue
+ = dev->data->queue_pairs[i];
+ if (data_queue == NULL)
+ continue;
+
+ stats->enqueued_count += data_queue->packets_sent_total;
+ stats->enqueue_err_count += data_queue->packets_sent_failed;
+
+ stats->dequeued_count += data_queue->packets_received_total;
+ stats->dequeue_err_count
+ += data_queue->packets_received_failed;
+ }
+}
+
+static void
+virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ virtio_crypto_update_stats(dev, stats);
+}
+
+static void
+virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ struct virtqueue *data_queue = dev->data->queue_pairs[i];
+ if (data_queue == NULL)
+ continue;
+
+ data_queue->packets_sent_total = 0;
+ data_queue->packets_sent_failed = 0;
+
+ data_queue->packets_received_total = 0;
+ data_queue->packets_received_failed = 0;
+ }
+}
+
+static int
+virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id,
+ struct rte_mempool *session_pool __rte_unused)
+{
+ int ret;
+ struct virtqueue *vq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* if virtio dev is started, do not touch the virtqueues */
+ if (dev->data->dev_started)
+ return 0;
+
+ ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
+ qp_conf->nb_descriptors, socket_id, &vq);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "virtio crypto data queue initialization failed\n");
+ return ret;
+ }
+
+ dev->data->queue_pairs[queue_pair_id] = vq;
+
+ return 0;
+}
+
+static int
+virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct virtqueue *vq
+ = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (vq == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
+ return 0;
+ }
+
+ virtio_crypto_queue_release(vq);
+ return 0;
+}
+
+static int
+virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
+{
+ uint64_t host_features;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Prepare guest_features: feature that driver wants to support */
+ VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
+ req_features);
+
+ /* Read device(host) feature bits */
+ host_features = VTPCI_OPS(hw)->get_features(hw);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
+ host_features);
+
+ /*
+ * Negotiate features: Subset of device feature bits are written back
+ * guest feature bits.
+ */
+ hw->guest_features = req_features;
+ hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
+ host_features);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
+ hw->guest_features);
+
+ if (hw->modern) {
+ if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "VIRTIO_F_VERSION_1 features is not enabled.");
+ return -1;
+ }
+ vtpci_cryptodev_set_status(hw,
+ VIRTIO_CONFIG_STATUS_FEATURES_OK);
+ if (!(vtpci_cryptodev_get_status(hw) &
+ VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
+ "status!");
+ return -1;
+ }
+ }
+
+ hw->req_guest_features = req_features;
+
+ return 0;
+}
+
+/* reset device and renegotiate features if needed */
+static int
+virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
+ uint64_t req_features)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+ struct virtio_crypto_config local_config;
+ struct virtio_crypto_config *config = &local_config;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Reset the device although not necessary at startup */
+ vtpci_cryptodev_reset(hw);
+
+ /* Tell the host we've noticed this device. */
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+
+ /* Tell the host we've known how to drive the device. */
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+ if (virtio_negotiate_features(hw, req_features) < 0)
+ return -1;
+
+ /* Get status of the device */
+ vtpci_read_cryptodev_config(hw,
+ offsetof(struct virtio_crypto_config, status),
+ &config->status, sizeof(config->status));
+ if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
+ "not ready");
+ return -1;
+ }
+
+ /* Get number of data queues */
+ vtpci_read_cryptodev_config(hw,
+ offsetof(struct virtio_crypto_config, max_dataqueues),
+ &config->max_dataqueues,
+ sizeof(config->max_dataqueues));
+ hw->max_dataqueues = config->max_dataqueues;
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
+ hw->max_dataqueues);
+
+ return 0;
+}
+
+/*
+ * This function is based on probe() function
+ * It returns 0 on success.
+ */
+static int
+crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *cryptodev;
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
+ init_params);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ cryptodev->driver_id = cryptodev_virtio_driver_id;
+ cryptodev->dev_ops = &virtio_crypto_dev_ops;
+
+ cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ hw = cryptodev->data->dev_private;
+ hw->dev_id = cryptodev->data->dev_id;
+ hw->virtio_dev_capabilities = virtio_capabilities;
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
+ cryptodev->data->dev_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ /* pci device init */
+ if (vtpci_cryptodev_init(pci_dev, hw))
+ return -1;
+
+ if (virtio_crypto_init_device(cryptodev,
+ VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return -EPERM;
+
+ if (cryptodev->data->dev_started) {
+ virtio_crypto_dev_stop(cryptodev);
+ virtio_crypto_dev_close(cryptodev);
+ }
+
+ cryptodev->dev_ops = NULL;
+ cryptodev->enqueue_burst = NULL;
+ cryptodev->dequeue_burst = NULL;
+
+ /* release control queue */
+ virtio_crypto_queue_release(hw->cvq);
+
+ rte_free(cryptodev->data);
+ cryptodev->data = NULL;
+
+ VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
+
+ return 0;
+}
+
+static int
+virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (virtio_crypto_init_device(cryptodev,
+ VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+ return -1;
+
+ /* setup control queue
+ * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
+ * config->max_dataqueues is the control queue
+ */
+ if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
+ return -1;
+ }
+ virtio_crypto_ctrlq_start(cryptodev);
+
+ return 0;
+}
+
+static void
+virtio_crypto_dev_stop(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
+
+ vtpci_cryptodev_reset(hw);
+
+ virtio_crypto_dev_free_mbufs(dev);
+ virtio_crypto_free_queues(dev);
+
+ dev->data->dev_started = 0;
+}
+
+static int
+virtio_crypto_dev_start(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ if (dev->data->dev_started)
+ return 0;
+
+ /* Do final configuration before queue engine starts */
+ virtio_crypto_dataq_start(dev);
+ vtpci_cryptodev_reinit_complete(hw);
+
+ dev->data->dev_started = 1;
+
+ return 0;
+}
+
+static void
+virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
+{
+ uint32_t i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
+ "and unused buf", i);
+ VIRTQUEUE_DUMP((struct virtqueue *)
+ dev->data->queue_pairs[i]);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
+ i, dev->data->queue_pairs[i]);
+
+ virtqueue_detatch_unused(dev->data->queue_pairs[i]);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
+ "unused buf", i);
+ VIRTQUEUE_DUMP(
+ (struct virtqueue *)dev->data->queue_pairs[i]);
+ }
+}
+
+static unsigned int
+virtio_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
+}
+
+static int
+virtio_crypto_check_sym_session_paras(
+ struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(dev == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
+ return -1;
+ }
+ if (unlikely(dev->data == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
+ return -1;
+ }
+ hw = dev->data->dev_private;
+ if (unlikely(hw == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
+ return -1;
+ }
+ if (unlikely(hw->cvq == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_check_sym_clear_session_paras(
+ struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (sess == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
+ return -1;
+ }
+
+ return virtio_crypto_check_sym_session_paras(dev);
+}
+
+#define NUM_ENTRY_SYM_CLEAR_SESSION 2
+
+static void
+virtio_crypto_sym_clear_session(
+ struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct virtio_crypto_hw *hw;
+ struct virtqueue *vq;
+ struct virtio_crypto_session *session;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct vring_desc *desc;
+ uint8_t *status;
+ uint8_t needed = 1;
+ uint32_t head;
+ uint8_t *malloc_virt_addr;
+ uint64_t malloc_phys_addr;
+ uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
+ uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+ uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
+ return;
+
+ hw = dev->data->dev_private;
+ vq = hw->cvq;
+ session = (struct virtio_crypto_session *)get_sym_session_private_data(
+ sess, cryptodev_virtio_driver_id);
+ if (session == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
+ return;
+ }
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
+ "vq = %p", vq->vq_desc_head_idx, vq);
+
+ if (vq->vq_free_cnt < needed) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "vq->vq_free_cnt = %d is less than %d, "
+ "not enough", vq->vq_free_cnt, needed);
+ return;
+ }
+
+ /*
+ * malloc memory to store information of ctrl request op,
+ * returned status and desc vring
+ */
+ malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
+ + NUM_ENTRY_SYM_CLEAR_SESSION
+ * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+ if (malloc_virt_addr == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
+ return;
+ }
+ malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
+
+ /* assign ctrl request op part */
+ ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
+ ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
+ /* default data virtqueue is 0 */
+ ctrl->header.queue_id = 0;
+ ctrl->u.destroy_session.session_id = session->session_id;
+
+ /* status part */
+ status = &(((struct virtio_crypto_inhdr *)
+ ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
+ *status = VIRTIO_CRYPTO_ERR;
+
+ /* indirect desc vring part */
+ desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
+ + desc_offset);
+
+ /* ctrl request part */
+ desc[0].addr = malloc_phys_addr;
+ desc[0].len = len_op_ctrl_req;
+ desc[0].flags = VRING_DESC_F_NEXT;
+ desc[0].next = 1;
+
+ /* status part */
+ desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
+ desc[1].len = len_inhdr;
+ desc[1].flags = VRING_DESC_F_WRITE;
+
+ /* use only a single desc entry */
+ head = vq->vq_desc_head_idx;
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
+ vq->vq_ring.desc[head].len
+ = NUM_ENTRY_SYM_CLEAR_SESSION
+ * sizeof(struct vring_desc);
+ vq->vq_free_cnt -= needed;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
+ vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+ while (vq->vq_ring.desc[desc_idx].flags
+ & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ if (*status != VIRTIO_CRYPTO_OK) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
+ "status=%"PRIu32", session_id=%"PRIu64"",
+ *status, session->session_id);
+ rte_free(malloc_virt_addr);
+ return;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
+ "vq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
+ session->session_id);
+
+ memset(session, 0, sizeof(struct virtio_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(session);
+ set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL);
+ rte_mempool_put(sess_mp, session);
+ rte_free(malloc_virt_addr);
+}
+
+static struct rte_crypto_cipher_xform *
+virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_auth_xform *
+virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+/** Get xform chain order */
+static int
+virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return -1;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next == NULL)
+ return VIRTIO_CRYPTO_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL)
+ return VIRTIO_CRYPTO_CMD_AUTH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
+
+ return -1;
+}
+
+static int
+virtio_crypto_sym_pad_cipher_param(
+ struct virtio_crypto_cipher_session_para *para,
+ struct rte_crypto_cipher_xform *cipher_xform)
+{
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
+ "Cipher alg %u", cipher_xform->algo);
+ return -1;
+ }
+
+ para->keylen = cipher_xform->key.length;
+ switch (cipher_xform->op) {
+ case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+ para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
+ break;
+ case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+ para->op = VIRTIO_CRYPTO_OP_DECRYPT;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
+ "parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_pad_auth_param(
+ struct virtio_crypto_op_ctrl_req *ctrl,
+ struct rte_crypto_auth_xform *auth_xform)
+{
+ uint32_t *algo;
+ struct virtio_crypto_alg_chain_session_para *para =
+ &(ctrl->u.sym_create_session.u.chain.para);
+
+ switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
+ case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
+ algo = &(para->u.hash_param.algo);
+ break;
+ case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
+ algo = &(para->u.mac_param.algo);
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
+ "specified",
+ ctrl->u.sym_create_session.u.chain.para.hash_mode);
+ return -1;
+ }
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_pad_op_ctrl_req(
+ struct virtio_crypto_op_ctrl_req *ctrl,
+ struct rte_crypto_sym_xform *xform, bool is_chainned,
+ uint8_t **cipher_key_data, uint8_t **auth_key_data,
+ struct virtio_crypto_session *session)
+{
+ int ret;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = virtio_crypto_get_cipher_xform(xform);
+ if (cipher_xform) {
+ if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "cipher IV size cannot be longer than %u",
+ VIRTIO_CRYPTO_MAX_IV_SIZE);
+ return -1;
+ }
+ if (is_chainned)
+ ret = virtio_crypto_sym_pad_cipher_param(
+ &ctrl->u.sym_create_session.u.chain.para
+ .cipher_param, cipher_xform);
+ else
+ ret = virtio_crypto_sym_pad_cipher_param(
+ &ctrl->u.sym_create_session.u.cipher.para,
+ cipher_xform);
+
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "pad cipher parameter failed");
+ return -1;
+ }
+
+ *cipher_key_data = cipher_xform->key.data;
+
+ session->iv.offset = cipher_xform->iv.offset;
+ session->iv.length = cipher_xform->iv.length;
+ }
+
+ /* Get auth xform from crypto xform chain */
+ auth_xform = virtio_crypto_get_auth_xform(xform);
+ if (auth_xform) {
+ /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
+ struct virtio_crypto_alg_chain_session_para *para =
+ &(ctrl->u.sym_create_session.u.chain.para);
+ if (auth_xform->key.length) {
+ para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
+ para->u.mac_param.auth_key_len =
+ (uint32_t)auth_xform->key.length;
+ para->u.mac_param.hash_result_len =
+ auth_xform->digest_length;
+
+ *auth_key_data = auth_xform->key.data;
+ } else {
+ para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
+ para->u.hash_param.hash_result_len =
+ auth_xform->digest_length;
+ }
+
+ ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
+ "failed");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_check_sym_configure_session_paras(
+ struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sym_sess,
+ struct rte_mempool *mempool)
+{
+ if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
+ unlikely(mempool == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
+ return -1;
+ }
+
+ if (virtio_crypto_check_sym_session_paras(dev) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_configure_session(
+ struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int ret;
+ struct virtio_crypto_session crypto_sess;
+ void *session_private = &crypto_sess;
+ struct virtio_crypto_session *session;
+ struct virtio_crypto_op_ctrl_req *ctrl_req;
+ enum virtio_crypto_cmd_id cmd_id;
+ uint8_t *cipher_key_data = NULL;
+ uint8_t *auth_key_data = NULL;
+ struct virtio_crypto_hw *hw;
+ struct virtqueue *control_vq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
+ sess, mempool);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
+ return ret;
+ }
+
+ if (rte_mempool_get(mempool, &session_private)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ session = (struct virtio_crypto_session *)session_private;
+ memset(session, 0, sizeof(struct virtio_crypto_session));
+ ctrl_req = &session->ctrl;
+ ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
+ /* FIXME: support multiqueue */
+ ctrl_req->header.queue_id = 0;
+
+ hw = dev->data->dev_private;
+ control_vq = hw->cvq;
+
+ cmd_id = virtio_crypto_get_chain_order(xform);
+ if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
+ ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+ = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
+ if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
+ ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+ = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
+
+ switch (cmd_id) {
+ case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
+ case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
+ ctrl_req->u.sym_create_session.op_type
+ = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+ ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
+ xform, true, &cipher_key_data, &auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "padding sym op ctrl req failed");
+ goto error_out;
+ }
+ ret = virtio_crypto_send_command(control_vq, ctrl_req,
+ cipher_key_data, auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "create session failed: %d", ret);
+ goto error_out;
+ }
+ break;
+ case VIRTIO_CRYPTO_CMD_CIPHER:
+ ctrl_req->u.sym_create_session.op_type
+ = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
+ false, &cipher_key_data, &auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "padding sym op ctrl req failed");
+ goto error_out;
+ }
+ ret = virtio_crypto_send_command(control_vq, ctrl_req,
+ cipher_key_data, NULL, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "create session failed: %d", ret);
+ goto error_out;
+ }
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Unsupported operation chain order parameter");
+ goto error_out;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ session_private);
+
+ return 0;
+
+error_out:
+ return -1;
+}
+
+static void
+virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (info != NULL) {
+ info->driver_id = cryptodev_virtio_driver_id;
+ info->feature_flags = dev->feature_flags;
+ info->max_nb_queue_pairs = hw->max_dataqueues;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
+ info->capabilities = hw->virtio_dev_capabilities;
+ }
+}
+
+static int
+crypto_virtio_pci_probe(
+ struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = rte_socket_id(),
+ .private_data_size = sizeof(struct virtio_crypto_hw)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return crypto_virtio_create(name, pci_dev, &init_params);
+}
+
+static int
+crypto_virtio_pci_remove(
+ struct rte_pci_device *pci_dev __rte_unused)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, cryptodev_name,
+ sizeof(cryptodev_name));
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return virtio_crypto_dev_uninit(cryptodev);
+}
+
+static struct rte_pci_driver rte_virtio_crypto_driver = {
+ .id_table = pci_id_virtio_crypto_map,
+ .drv_flags = 0,
+ .probe = crypto_virtio_pci_probe,
+ .remove = crypto_virtio_pci_remove
+};
+
+static struct cryptodev_driver virtio_crypto_drv;
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
+ rte_virtio_crypto_driver.driver,
+ cryptodev_virtio_driver_id);
+
+RTE_INIT(virtio_crypto_init_log)
+{
+ virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
+ if (virtio_crypto_logtype_init >= 0)
+ rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_session =
+ rte_log_register("pmd.crypto.virtio.session");
+ if (virtio_crypto_logtype_session >= 0)
+ rte_log_set_level(virtio_crypto_logtype_session,
+ RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
+ if (virtio_crypto_logtype_rx >= 0)
+ rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
+ if (virtio_crypto_logtype_tx >= 0)
+ rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_driver =
+ rte_log_register("pmd.crypto.virtio.driver");
+ if (virtio_crypto_logtype_driver >= 0)
+ rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.h
new file mode 100644
index 00000000..0fd7b722
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTODEV_H_
+#define _VIRTIO_CRYPTODEV_H_
+
+#include "virtio_crypto.h"
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+
+/* Features desired/implemented by this driver. */
+#define VIRTIO_CRYPTO_PMD_GUEST_FEATURES (1ULL << VIRTIO_F_VERSION_1)
+
+#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio
+
+#define NUM_ENTRY_VIRTIO_CRYPTO_OP 7
+
+#define VIRTIO_CRYPTO_MAX_IV_SIZE 16
+
+extern uint8_t cryptodev_virtio_driver_id;
+
+enum virtio_crypto_cmd_id {
+ VIRTIO_CRYPTO_CMD_CIPHER = 0,
+ VIRTIO_CRYPTO_CMD_AUTH = 1,
+ VIRTIO_CRYPTO_CMD_CIPHER_HASH = 2,
+ VIRTIO_CRYPTO_CMD_HASH_CIPHER = 3
+};
+
+struct virtio_crypto_op_cookie {
+ struct virtio_crypto_op_data_req data_req;
+ struct virtio_crypto_inhdr inhdr;
+ struct vring_desc desc[NUM_ENTRY_VIRTIO_CRYPTO_OP];
+ uint8_t iv[VIRTIO_CRYPTO_MAX_IV_SIZE];
+};
+
+/*
+ * Control queue function prototype
+ */
+void virtio_crypto_ctrlq_start(struct rte_cryptodev *dev);
+
+/*
+ * Data queue function prototype
+ */
+void virtio_crypto_dataq_start(struct rte_cryptodev *dev);
+
+int virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+ int queue_type,
+ uint16_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ int socket_id,
+ struct virtqueue **pvq);
+
+void virtio_crypto_queue_release(struct virtqueue *vq);
+
+uint16_t virtio_crypto_pkt_tx_burst(void *tx_queue,
+ struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,
+ struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _VIRTIO_CRYPTODEV_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_logs.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_logs.h
new file mode 100644
index 00000000..26a286cf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_logs.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_LOGS_H_
+#define _VIRTIO_LOGS_H_
+
+#include <rte_log.h>
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+ "PMD: %s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int virtio_crypto_logtype_init;
+
+#define VIRTIO_CRYPTO_INIT_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_init, \
+ "INIT: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_session;
+
+#define VIRTIO_CRYPTO_SESSION_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_session, \
+ "SESSION: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_rx;
+
+#define VIRTIO_CRYPTO_RX_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_rx, \
+ "RX: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_RX_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_RX_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_RX_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_tx;
+
+#define VIRTIO_CRYPTO_TX_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_tx, \
+ "TX: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_TX_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_TX_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_TX_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_driver;
+
+#define VIRTIO_CRYPTO_DRV_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_driver, \
+ "DRIVER: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(ERR, fmt, ## args)
+
+#endif /* _VIRTIO_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.c b/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.c
new file mode 100644
index 00000000..832c465b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.c
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ #include <dirent.h>
+ #include <fcntl.h>
+#endif
+
+#include <rte_io.h>
+#include <rte_bus.h>
+
+#include "virtio_pci.h"
+#include "virtqueue.h"
+
+/*
+ * Following macros are derived from linux/pci_regs.h, however,
+ * we can't simply include that header here, as there is no such
+ * file for non-Linux platform.
+ */
+#define PCI_CAPABILITY_LIST 0x34
+#define PCI_CAP_ID_VNDR 0x09
+#define PCI_CAP_ID_MSIX 0x11
+
+/*
+ * The remaining space is defined by each driver as the per-driver
+ * configuration space.
+ */
+#define VIRTIO_PCI_CONFIG(hw) \
+ (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
+
+struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
+
+static inline int
+check_vq_phys_addr_ok(struct virtqueue *vq)
+{
+ /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
+ (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!");
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline void
+io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
+{
+ rte_write32(val & ((1ULL << 32) - 1), lo);
+ rte_write32(val >> 32, hi);
+}
+
+static void
+modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ int i;
+ uint8_t *p;
+ uint8_t old_gen, new_gen;
+
+ do {
+ old_gen = rte_read8(&hw->common_cfg->config_generation);
+
+ p = dst;
+ for (i = 0; i < length; i++)
+ *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
+
+ new_gen = rte_read8(&hw->common_cfg->config_generation);
+ } while (old_gen != new_gen);
+}
+
+static void
+modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ int i;
+ const uint8_t *p = src;
+
+ for (i = 0; i < length; i++)
+ rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
+}
+
+static uint64_t
+modern_get_features(struct virtio_crypto_hw *hw)
+{
+ uint32_t features_lo, features_hi;
+
+ rte_write32(0, &hw->common_cfg->device_feature_select);
+ features_lo = rte_read32(&hw->common_cfg->device_feature);
+
+ rte_write32(1, &hw->common_cfg->device_feature_select);
+ features_hi = rte_read32(&hw->common_cfg->device_feature);
+
+ return ((uint64_t)features_hi << 32) | features_lo;
+}
+
+static void
+modern_set_features(struct virtio_crypto_hw *hw, uint64_t features)
+{
+ rte_write32(0, &hw->common_cfg->guest_feature_select);
+ rte_write32(features & ((1ULL << 32) - 1),
+ &hw->common_cfg->guest_feature);
+
+ rte_write32(1, &hw->common_cfg->guest_feature_select);
+ rte_write32(features >> 32,
+ &hw->common_cfg->guest_feature);
+}
+
+static uint8_t
+modern_get_status(struct virtio_crypto_hw *hw)
+{
+ return rte_read8(&hw->common_cfg->device_status);
+}
+
+static void
+modern_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+ rte_write8(status, &hw->common_cfg->device_status);
+}
+
+static void
+modern_reset(struct virtio_crypto_hw *hw)
+{
+ modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ modern_get_status(hw);
+}
+
+static uint8_t
+modern_get_isr(struct virtio_crypto_hw *hw)
+{
+ return rte_read8(hw->isr);
+}
+
+static uint16_t
+modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec)
+{
+ rte_write16(vec, &hw->common_cfg->msix_config);
+ return rte_read16(&hw->common_cfg->msix_config);
+}
+
+static uint16_t
+modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq,
+ uint16_t vec)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vec, &hw->common_cfg->queue_msix_vector);
+ return rte_read16(&hw->common_cfg->queue_msix_vector);
+}
+
+static uint16_t
+modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id)
+{
+ rte_write16(queue_id, &hw->common_cfg->queue_select);
+ return rte_read16(&hw->common_cfg->queue_size);
+}
+
+static int
+modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+ uint64_t desc_addr, avail_addr, used_addr;
+ uint16_t notify_off;
+
+ if (!check_vq_phys_addr_ok(vq))
+ return -1;
+
+ desc_addr = vq->vq_ring_mem;
+ avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
+ ring[vq->vq_nentries]),
+ VIRTIO_PCI_VRING_ALIGN);
+
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
+ vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
+ notify_off * hw->notify_off_multiplier);
+
+ rte_write16(1, &hw->common_cfg->queue_enable);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)",
+ vq->notify_addr, notify_off);
+
+ return 0;
+}
+
+static void
+modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ rte_write16(0, &hw->common_cfg->queue_enable);
+}
+
+static void
+modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused,
+ struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, vq->notify_addr);
+}
+
+const struct virtio_pci_ops virtio_crypto_modern_ops = {
+ .read_dev_cfg = modern_read_dev_config,
+ .write_dev_cfg = modern_write_dev_config,
+ .reset = modern_reset,
+ .get_status = modern_get_status,
+ .set_status = modern_set_status,
+ .get_features = modern_get_features,
+ .set_features = modern_set_features,
+ .get_isr = modern_get_isr,
+ .set_config_irq = modern_set_config_irq,
+ .set_queue_irq = modern_set_queue_irq,
+ .get_queue_num = modern_get_queue_num,
+ .setup_queue = modern_setup_queue,
+ .del_queue = modern_del_queue,
+ .notify_queue = modern_notify_queue,
+};
+
+void
+vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
+}
+
+void
+vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
+}
+
+uint64_t
+vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
+ uint64_t host_features)
+{
+ uint64_t features;
+
+ /*
+ * Limit negotiated features to what the driver, virtqueue, and
+ * host all support.
+ */
+ features = host_features & hw->guest_features;
+ VTPCI_OPS(hw)->set_features(hw, features);
+
+ return features;
+}
+
+void
+vtpci_cryptodev_reset(struct virtio_crypto_hw *hw)
+{
+ VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ /* flush status write */
+ VTPCI_OPS(hw)->get_status(hw);
+}
+
+void
+vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw)
+{
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+}
+
+void
+vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+ if (status != VIRTIO_CONFIG_STATUS_RESET)
+ status |= VTPCI_OPS(hw)->get_status(hw);
+
+ VTPCI_OPS(hw)->set_status(hw, status);
+}
+
+uint8_t
+vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_status(hw);
+}
+
+uint8_t
+vtpci_cryptodev_isr(struct virtio_crypto_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_isr(hw);
+}
+
+static void *
+get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
+{
+ uint8_t bar = cap->bar;
+ uint32_t length = cap->length;
+ uint32_t offset = cap->offset;
+ uint8_t *base;
+
+ if (bar >= PCI_MAX_RESOURCE) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar);
+ return NULL;
+ }
+
+ if (offset + length < offset) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows",
+ offset, length);
+ return NULL;
+ }
+
+ if (offset + length > dev->mem_resource[bar].len) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "invalid cap: overflows bar space: %u > %" PRIu64,
+ offset + length, dev->mem_resource[bar].len);
+ return NULL;
+ }
+
+ base = dev->mem_resource[bar].addr;
+ if (base == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar);
+ return NULL;
+ }
+
+ return base + offset;
+}
+
+#define PCI_MSIX_ENABLE 0x8000
+
+static int
+virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
+{
+ uint8_t pos;
+ struct virtio_pci_cap cap;
+ int ret;
+
+ if (rte_pci_map_device(dev)) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!");
+ return -1;
+ }
+
+ ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("failed to read pci capability list");
+ return -1;
+ }
+
+ while (pos) {
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "failed to read pci cap at pos: %x", pos);
+ break;
+ }
+
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
+ /* Transitional devices would also have this capability,
+ * that's why we also check if msix is enabled.
+ * 1st byte is cap ID; 2nd byte is the position of next
+ * cap; next two bytes are the flags.
+ */
+ uint16_t flags = ((uint16_t *)&cap)[1];
+
+ if (flags & PCI_MSIX_ENABLE)
+ hw->use_msix = VIRTIO_MSIX_ENABLED;
+ else
+ hw->use_msix = VIRTIO_MSIX_DISABLED;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG(
+ "[%2x] skipping non VNDR cap id: %02x",
+ pos, cap.cap_vndr);
+ goto next;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG(
+ "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
+ pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
+
+ switch (cap.cfg_type) {
+ case VIRTIO_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_NOTIFY_CFG:
+ rte_pci_read_config(dev, &hw->notify_off_multiplier,
+ 4, pos + sizeof(cap));
+ hw->notify_base = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_DEVICE_CFG:
+ hw->dev_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_ISR_CFG:
+ hw->isr = get_cfg_addr(dev, &cap);
+ break;
+ }
+
+next:
+ pos = cap.cap_next;
+ }
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->dev_cfg == NULL || hw->isr == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found.");
+ return -1;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device.");
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u",
+ hw->notify_base, hw->notify_off_multiplier);
+
+ return 0;
+}
+
+/*
+ * Return -1:
+ * if there is error mapping with VFIO/UIO.
+ * if port map error when driver type is KDRV_NONE.
+ * if whitelisted but driver type is KDRV_UNKNOWN.
+ * Return 1 if kernel driver is managing the device.
+ * Return 0 on success.
+ */
+int
+vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
+{
+ /*
+ * Try if we can succeed reading virtio pci caps, which exists
+ * only on modern pci device. If failed, we fallback to legacy
+ * virtio handling.
+ */
+ if (virtio_read_caps(dev, hw) == 0) {
+ VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected.");
+ virtio_hw_internal[hw->dev_id].vtpci_ops =
+ &virtio_crypto_modern_ops;
+ hw->modern = 1;
+ return 0;
+ }
+
+ /*
+ * virtio crypto conforms to virtio 1.0 and doesn't support
+ * legacy mode
+ */
+ return -1;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.h
new file mode 100644
index 00000000..604ec366
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_PCI_H_
+#define _VIRTIO_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+
+#include "virtio_crypto.h"
+
+struct virtqueue;
+
+/* VirtIO PCI vendor/device ID. */
+#define VIRTIO_CRYPTO_PCI_VENDORID 0x1AF4
+#define VIRTIO_CRYPTO_PCI_DEVICEID 0x1054
+
+/* VirtIO ABI version, this must match exactly. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+/*
+ * VirtIO Header, located in BAR 0.
+ */
+#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/
+#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */
+#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */
+#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */
+#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */
+#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */
+#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */
+#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading
+ * also clears the register (8, RO)
+ */
+/* Only if MSIX is enabled: */
+
+/* configuration change vector (16, RW) */
+#define VIRTIO_MSI_CONFIG_VECTOR 20
+/* vector for selected VQ notifications */
+#define VIRTIO_MSI_QUEUE_VECTOR 22
+
+/* The bit of the ISR which indicates a device has an interrupt. */
+#define VIRTIO_PCI_ISR_INTR 0x1
+/* The bit of the ISR which indicates a device configuration change. */
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* Vector value used to disable MSI for queue. */
+#define VIRTIO_MSI_NO_VECTOR 0xFFFF
+
+/* Status byte for guest to report progress. */
+#define VIRTIO_CONFIG_STATUS_RESET 0x00
+#define VIRTIO_CONFIG_STATUS_ACK 0x01
+#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
+#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
+#define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08
+#define VIRTIO_CONFIG_STATUS_FAILED 0x80
+
+/*
+ * Each virtqueue indirect descriptor list must be physically contiguous.
+ * To allow us to malloc(9) each list individually, limit the number
+ * supported to what will fit in one page. With 4KB pages, this is a limit
+ * of 256 descriptors. If there is ever a need for more, we can switch to
+ * contigmalloc(9) for the larger allocations, similar to what
+ * bus_dmamem_alloc(9) does.
+ *
+ * Note the sizeof(struct vring_desc) is 16 bytes.
+ */
+#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
+
+/* Do we get callbacks when the ring is completely used, even if we've
+ * suppressed them?
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY 24
+
+/* Can the device handle any descriptor layout? */
+#define VIRTIO_F_ANY_LAYOUT 27
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+#define VIRTIO_F_VERSION_1 32
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field.
+ */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+/* Common configuration */
+#define VIRTIO_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
+/* ISR Status */
+#define VIRTIO_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define VIRTIO_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define VIRTIO_PCI_CAP_PCI_CFG 5
+
+/* This is the PCI capability header: */
+struct virtio_pci_cap {
+ uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ uint8_t cap_next; /* Generic PCI field: next ptr. */
+ uint8_t cap_len; /* Generic PCI field: capability length */
+ uint8_t cfg_type; /* Identifies the structure. */
+ uint8_t bar; /* Where to find it. */
+ uint8_t padding[3]; /* Pad to full dword. */
+ uint32_t offset; /* Offset within bar. */
+ uint32_t length; /* Length of the structure, in bytes. */
+};
+
+struct virtio_pci_notify_cap {
+ struct virtio_pci_cap cap;
+ uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
+struct virtio_pci_common_cfg {
+ /* About the whole device. */
+ uint32_t device_feature_select; /* read-write */
+ uint32_t device_feature; /* read-only */
+ uint32_t guest_feature_select; /* read-write */
+ uint32_t guest_feature; /* read-write */
+ uint16_t msix_config; /* read-write */
+ uint16_t num_queues; /* read-only */
+ uint8_t device_status; /* read-write */
+ uint8_t config_generation; /* read-only */
+
+ /* About a specific virtqueue. */
+ uint16_t queue_select; /* read-write */
+ uint16_t queue_size; /* read-write, power of 2. */
+ uint16_t queue_msix_vector; /* read-write */
+ uint16_t queue_enable; /* read-write */
+ uint16_t queue_notify_off; /* read-only */
+ uint32_t queue_desc_lo; /* read-write */
+ uint32_t queue_desc_hi; /* read-write */
+ uint32_t queue_avail_lo; /* read-write */
+ uint32_t queue_avail_hi; /* read-write */
+ uint32_t queue_used_lo; /* read-write */
+ uint32_t queue_used_hi; /* read-write */
+};
+
+struct virtio_crypto_hw;
+
+struct virtio_pci_ops {
+ void (*read_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int len);
+ void (*write_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int len);
+ void (*reset)(struct virtio_crypto_hw *hw);
+
+ uint8_t (*get_status)(struct virtio_crypto_hw *hw);
+ void (*set_status)(struct virtio_crypto_hw *hw, uint8_t status);
+
+ uint64_t (*get_features)(struct virtio_crypto_hw *hw);
+ void (*set_features)(struct virtio_crypto_hw *hw, uint64_t features);
+
+ uint8_t (*get_isr)(struct virtio_crypto_hw *hw);
+
+ uint16_t (*set_config_irq)(struct virtio_crypto_hw *hw, uint16_t vec);
+
+ uint16_t (*set_queue_irq)(struct virtio_crypto_hw *hw,
+ struct virtqueue *vq, uint16_t vec);
+
+ uint16_t (*get_queue_num)(struct virtio_crypto_hw *hw,
+ uint16_t queue_id);
+ int (*setup_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+ void (*del_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+ void (*notify_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+};
+
+struct virtio_crypto_hw {
+ /* control queue */
+ struct virtqueue *cvq;
+ uint16_t dev_id;
+ uint16_t max_dataqueues;
+ uint64_t req_guest_features;
+ uint64_t guest_features;
+ uint8_t use_msix;
+ uint8_t modern;
+ uint32_t notify_off_multiplier;
+ uint8_t *isr;
+ uint16_t *notify_base;
+ struct virtio_pci_common_cfg *common_cfg;
+ struct virtio_crypto_config *dev_cfg;
+ const struct rte_cryptodev_capabilities *virtio_dev_capabilities;
+};
+
+/*
+ * While virtio_crypto_hw is stored in shared memory, this structure stores
+ * some infos that may vary in the multiple process model locally.
+ * For example, the vtpci_ops pointer.
+ */
+struct virtio_hw_internal {
+ const struct virtio_pci_ops *vtpci_ops;
+ struct rte_pci_ioport io;
+};
+
+#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->dev_id].vtpci_ops)
+#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->dev_id].io)
+
+extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
+
+/*
+ * How many bits to shift physical queue address written to QUEUE_PFN.
+ * 12 is historical, and due to x86 page size.
+ */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
+
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+enum virtio_msix_status {
+ VIRTIO_MSIX_NONE = 0,
+ VIRTIO_MSIX_DISABLED = 1,
+ VIRTIO_MSIX_ENABLED = 2
+};
+
+static inline int
+vtpci_with_feature(struct virtio_crypto_hw *hw, uint64_t bit)
+{
+ return (hw->guest_features & (1ULL << bit)) != 0;
+}
+
+/*
+ * Function declaration from virtio_pci.c
+ */
+int vtpci_cryptodev_init(struct rte_pci_device *dev,
+ struct virtio_crypto_hw *hw);
+void vtpci_cryptodev_reset(struct virtio_crypto_hw *hw);
+
+void vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw);
+
+uint8_t vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw);
+void vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status);
+
+uint64_t vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
+ uint64_t host_features);
+
+void vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length);
+
+void vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length);
+
+uint8_t vtpci_cryptodev_isr(struct virtio_crypto_hw *hw);
+
+#endif /* _VIRTIO_PCI_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_ring.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_ring.h
new file mode 100644
index 00000000..ee306745
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_ring.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_RING_H_
+#define _VIRTIO_RING_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me
+ * when you add a buffer. It's unreliable, so it's simply an
+ * optimization. Guest will still kick if it's out of buffers.
+ */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't
+ * interrupt me when you consume a buffer. It's unreliable, so it's
+ * simply an optimization.
+ */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* VirtIO ring descriptors: 16 bytes.
+ * These can chain together via "next".
+ */
+struct vring_desc {
+ uint64_t addr; /* Address (guest-physical). */
+ uint32_t len; /* Length. */
+ uint16_t flags; /* The flags as indicated above. */
+ uint16_t next; /* We chain unused descriptors via this. */
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[0];
+};
+
+/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was written to. */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ volatile uint16_t idx;
+ struct vring_used_elem ring[0];
+};
+
+struct vring {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which
+ * looks like this. We assume num is a power of 2.
+ *
+ * struct vring {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * __u16 avail_flags;
+ * __u16 avail_idx;
+ * __u16 available[num];
+ * __u16 used_event_idx;
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * __u16 used_flags;
+ * __u16 used_idx;
+ * struct vring_used_elem used[num];
+ * __u16 avail_event_idx;
+ * };
+ *
+ * NOTE: for VirtIO PCI, align is 4096.
+ */
+
+/*
+ * We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility.
+ */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
+
+static inline size_t
+vring_size(unsigned int num, unsigned long align)
+{
+ size_t size;
+
+ size = num * sizeof(struct vring_desc);
+ size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct vring_used) +
+ (num * sizeof(struct vring_used_elem));
+ return size;
+}
+
+static inline void
+vring_init(struct vring *vr, unsigned int num, uint8_t *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->desc = (struct vring_desc *) p;
+ vr->avail = (struct vring_avail *) (p +
+ num * sizeof(struct vring_desc));
+ vr->used = (void *)
+ RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
+}
+
+/*
+ * The following is used with VIRTIO_RING_F_EVENT_IDX.
+ * Assuming a given event_idx value from the other size, if we have
+ * just incremented index from old to new_idx, should we trigger an
+ * event?
+ */
+static inline int
+vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+#endif /* _VIRTIO_RING_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_rxtx.c b/src/spdk/dpdk/drivers/crypto/virtio/virtio_rxtx.c
new file mode 100644
index 00000000..e32a1ecd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_rxtx.c
@@ -0,0 +1,527 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+#include <rte_cryptodev_pmd.h>
+
+#include "virtqueue.h"
+#include "virtio_cryptodev.h"
+#include "virtio_crypto_algs.h"
+
+static void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+ struct vring_desc *dp, *dp_tail;
+ struct vq_desc_extra *dxp;
+ uint16_t desc_idx_last = desc_idx;
+
+ dp = &vq->vq_ring.desc[desc_idx];
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+ if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+ while (dp->flags & VRING_DESC_F_NEXT) {
+ desc_idx_last = dp->next;
+ dp = &vq->vq_ring.desc[dp->next];
+ }
+ }
+ dxp->ndescs = 0;
+
+ /*
+ * We must append the existing free chain, if any, to the end of
+ * newly freed chain. If the virtqueue was completely used, then
+ * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+ */
+ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+ vq->vq_desc_head_idx = desc_idx;
+ } else {
+ dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+ dp_tail->next = desc_idx;
+ }
+
+ vq->vq_desc_tail_idx = desc_idx_last;
+ dp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx(struct virtqueue *vq,
+ struct rte_crypto_op **rx_pkts, uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_crypto_op *cop;
+ uint16_t used_idx, desc_idx;
+ uint16_t i;
+ struct virtio_crypto_inhdr *inhdr;
+ struct virtio_crypto_op_cookie *op_cookie;
+
+ /* Caller does the check */
+ for (i = 0; i < num ; i++) {
+ used_idx = (uint16_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t)uep->id;
+ cop = (struct rte_crypto_op *)
+ vq->vq_descx[desc_idx].crypto_op;
+ if (unlikely(cop == NULL)) {
+ VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
+ "mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+
+ op_cookie = (struct virtio_crypto_op_cookie *)
+ vq->vq_descx[desc_idx].cookie;
+ inhdr = &(op_cookie->inhdr);
+ switch (inhdr->status) {
+ case VIRTIO_CRYPTO_OK:
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case VIRTIO_CRYPTO_ERR:
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_NOTSUPP:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ vq->packets_received_failed++;
+ break;
+ default:
+ break;
+ }
+
+ vq->packets_received_total++;
+
+ rx_pkts[i] = cop;
+ rte_mempool_put(vq->mpool, op_cookie);
+
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+ vq->vq_descx[desc_idx].crypto_op = NULL;
+ }
+
+ return i;
+}
+
+static int
+virtqueue_crypto_sym_pkt_header_arrange(
+ struct rte_crypto_op *cop,
+ struct virtio_crypto_op_data_req *data,
+ struct virtio_crypto_session *session)
+{
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct virtio_crypto_op_data_req *req_data = data;
+ struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
+ struct virtio_crypto_sym_create_session_req *sym_sess_req =
+ &ctrl->u.sym_create_session;
+ struct virtio_crypto_alg_chain_session_para *chain_para =
+ &sym_sess_req->u.chain.para;
+ struct virtio_crypto_cipher_session_para *cipher_para;
+
+ req_data->header.session_id = session->session_id;
+
+ switch (sym_sess_req->op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+
+ cipher_para = &sym_sess_req->u.cipher.para;
+ if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+ else
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+ req_data->u.sym_req.u.cipher.para.iv_len
+ = session->iv.length;
+
+ req_data->u.sym_req.u.cipher.para.src_data_len =
+ (sym_op->cipher.data.length +
+ sym_op->cipher.data.offset);
+ req_data->u.sym_req.u.cipher.para.dst_data_len =
+ req_data->u.sym_req.u.cipher.para.src_data_len;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ req_data->u.sym_req.op_type =
+ VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+ cipher_para = &chain_para->cipher_param;
+ if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+ else
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+ req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
+ req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
+
+ req_data->u.sym_req.u.chain.para.src_data_len =
+ (sym_op->cipher.data.length +
+ sym_op->cipher.data.offset);
+ req_data->u.sym_req.u.chain.para.dst_data_len =
+ req_data->u.sym_req.u.chain.para.src_data_len;
+ req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
+ sym_op->cipher.data.offset;
+ req_data->u.sym_req.u.chain.para.len_to_cipher =
+ sym_op->cipher.data.length;
+ req_data->u.sym_req.u.chain.para.hash_start_src_offset =
+ sym_op->auth.data.offset;
+ req_data->u.sym_req.u.chain.para.len_to_hash =
+ sym_op->auth.data.length;
+ req_data->u.sym_req.u.chain.para.aad_len =
+ chain_para->aad_len;
+
+ if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+ req_data->u.sym_req.u.chain.para.hash_result_len =
+ chain_para->u.hash_param.hash_result_len;
+ if (chain_para->hash_mode ==
+ VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+ req_data->u.sym_req.u.chain.para.hash_result_len =
+ chain_para->u.mac_param.hash_result_len;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtqueue_crypto_sym_enqueue_xmit(
+ struct virtqueue *txvq,
+ struct rte_crypto_op *cop)
+{
+ uint16_t idx = 0;
+ uint16_t num_entry;
+ uint16_t needed = 1;
+ uint16_t head_idx;
+ struct vq_desc_extra *dxp;
+ struct vring_desc *start_dp;
+ struct vring_desc *desc;
+ uint64_t indirect_op_data_req_phys_addr;
+ uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
+ uint32_t indirect_vring_addr_offset = req_data_len +
+ sizeof(struct virtio_crypto_inhdr);
+ uint32_t indirect_iv_addr_offset = indirect_vring_addr_offset +
+ sizeof(struct vring_desc) * NUM_ENTRY_VIRTIO_CRYPTO_OP;
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct virtio_crypto_session *session =
+ (struct virtio_crypto_session *)get_sym_session_private_data(
+ cop->sym->session, cryptodev_virtio_driver_id);
+ struct virtio_crypto_op_data_req *op_data_req;
+ uint32_t hash_result_len = 0;
+ struct virtio_crypto_op_cookie *crypto_op_cookie;
+ struct virtio_crypto_alg_chain_session_para *para;
+
+ if (unlikely(sym_op->m_src->nb_segs != 1))
+ return -EMSGSIZE;
+ if (unlikely(txvq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(txvq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+ head_idx = txvq->vq_desc_head_idx;
+ if (unlikely(head_idx >= txvq->vq_nentries))
+ return -EFAULT;
+ if (unlikely(session == NULL))
+ return -EFAULT;
+
+ dxp = &txvq->vq_descx[head_idx];
+
+ if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
+ VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
+ return -EFAULT;
+ }
+ crypto_op_cookie = dxp->cookie;
+ indirect_op_data_req_phys_addr =
+ rte_mempool_virt2iova(crypto_op_cookie);
+ op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
+
+ if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
+ return -EFAULT;
+
+ /* status is initialized to VIRTIO_CRYPTO_ERR */
+ ((struct virtio_crypto_inhdr *)
+ ((uint8_t *)op_data_req + req_data_len))->status =
+ VIRTIO_CRYPTO_ERR;
+
+ /* point to indirect vring entry */
+ desc = (struct vring_desc *)
+ ((uint8_t *)op_data_req + indirect_vring_addr_offset);
+ for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
+ desc[idx].next = idx + 1;
+ desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
+
+ idx = 0;
+
+ /* indirect vring: first part, virtio_crypto_op_data_req */
+ desc[idx].addr = indirect_op_data_req_phys_addr;
+ desc[idx].len = req_data_len;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+
+ /* indirect vring: iv of cipher */
+ if (session->iv.length) {
+ if (cop->phys_addr)
+ desc[idx].addr = cop->phys_addr + session->iv.offset;
+ else {
+ rte_memcpy(crypto_op_cookie->iv,
+ rte_crypto_op_ctod_offset(cop,
+ uint8_t *, session->iv.offset),
+ session->iv.length);
+ desc[idx].addr = indirect_op_data_req_phys_addr +
+ indirect_iv_addr_offset;
+ }
+
+ desc[idx].len = session->iv.length;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: additional auth data */
+ if (session->aad.length) {
+ desc[idx].addr = session->aad.phys_addr;
+ desc[idx].len = session->aad.length;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: src data */
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+
+ /* indirect vring: dst data */
+ if (sym_op->m_dst) {
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ } else {
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ }
+ desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+
+ /* indirect vring: digest result */
+ para = &(session->ctrl.u.sym_create_session.u.chain.para);
+ if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+ hash_result_len = para->u.hash_param.hash_result_len;
+ if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+ hash_result_len = para->u.mac_param.hash_result_len;
+ if (hash_result_len > 0) {
+ desc[idx].addr = sym_op->auth.digest.phys_addr;
+ desc[idx].len = hash_result_len;
+ desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: last part, status returned */
+ desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
+ desc[idx].len = sizeof(struct virtio_crypto_inhdr);
+ desc[idx++].flags = VRING_DESC_F_WRITE;
+
+ num_entry = idx;
+
+ /* save the infos to use when receiving packets */
+ dxp->crypto_op = (void *)cop;
+ dxp->ndescs = needed;
+
+ /* use a single buffer */
+ start_dp = txvq->vq_ring.desc;
+ start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
+ indirect_vring_addr_offset;
+ start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
+ start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
+
+ idx = start_dp[head_idx].next;
+ txvq->vq_desc_head_idx = idx;
+ if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ txvq->vq_desc_tail_idx = idx;
+ txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
+ vq_update_avail_ring(txvq, head_idx);
+
+ return 0;
+}
+
+static int
+virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
+ struct rte_crypto_op *cop)
+{
+ int ret;
+
+ switch (cop->type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
+ break;
+ default:
+ VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
+ cop->type);
+ ret = -EFAULT;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+virtio_crypto_vring_start(struct virtqueue *vq)
+{
+ struct virtio_crypto_hw *hw = vq->hw;
+ int i, size = vq->vq_nentries;
+ struct vring *vr = &vq->vq_ring;
+ uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+
+ /* Chain all the descriptors in the ring with an END */
+ for (i = 0; i < size - 1; i++)
+ vr->desc[i].next = (uint16_t)(i + 1);
+ vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
+
+ /*
+ * Disable device(host) interrupting guest
+ */
+ virtqueue_disable_intr(vq);
+
+ /*
+ * Set guest physical address of the virtqueue
+ * in VIRTIO_PCI_QUEUE_PFN config register of device
+ * to share with the backend
+ */
+ if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ if (hw->cvq) {
+ virtio_crypto_vring_start(hw->cvq);
+ VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
+ }
+}
+
+void
+virtio_crypto_dataq_start(struct rte_cryptodev *dev)
+{
+ /*
+ * Start data vrings
+ * - Setup vring structure for data queues
+ */
+ uint16_t i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Start data vring. */
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ virtio_crypto_vring_start(dev->data->queue_pairs[i]);
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
+ }
+}
+
+/* vring size of data queue is 1024 */
+#define VIRTIO_MBUF_BURST_SZ 1024
+
+uint16_t
+virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq = tx_queue;
+ uint16_t nb_used, num, nb_rx;
+
+ nb_used = VIRTQUEUE_NUSED(txvq);
+
+ virtio_rmb();
+
+ num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
+ num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
+ ? num : VIRTIO_MBUF_BURST_SZ);
+
+ if (num == 0)
+ return 0;
+
+ nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
+ VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num);
+
+ return nb_rx;
+}
+
+uint16_t
+virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq;
+ uint16_t nb_tx;
+ int error;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+ if (unlikely(tx_queue == NULL)) {
+ VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
+ return 0;
+ }
+ txvq = tx_queue;
+
+ VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
+ /* nb_segs is always 1 at virtio crypto situation */
+ int need = txm->nb_segs - txvq->vq_free_cnt;
+
+ /*
+ * Positive value indicates it hasn't enough space in vring
+ * descriptors
+ */
+ if (unlikely(need > 0)) {
+ /*
+ * try it again because the receive process may be
+ * free some space
+ */
+ need = txm->nb_segs - txvq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
+ "descriptors to transmit");
+ break;
+ }
+ }
+
+ txvq->packets_sent_total++;
+
+ /* Enqueue Packet buffers */
+ error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
+ if (unlikely(error)) {
+ if (error == ENOSPC)
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue Free count = 0");
+ else if (error == EMSGSIZE)
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue Free count < 1");
+ else
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue error: %d", error);
+ txvq->packets_sent_failed++;
+ break;
+ }
+ }
+
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(txvq);
+
+ if (unlikely(virtqueue_kick_prepare(txvq))) {
+ virtqueue_notify(txvq);
+ VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");
+ }
+ }
+
+ return nb_tx;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.c b/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.c
new file mode 100644
index 00000000..fd8be581
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.c
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_crypto.h>
+#include <rte_malloc.h>
+
+#include "virtqueue.h"
+
+void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+ /*
+ * Set VRING_AVAIL_F_NO_INTERRUPT to hint host
+ * not to interrupt when it consumes packets
+ * Note: this is only considered a hint to the host
+ */
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+void
+virtqueue_detatch_unused(struct virtqueue *vq)
+{
+ struct rte_crypto_op *cop = NULL;
+
+ int idx;
+
+ if (vq != NULL)
+ for (idx = 0; idx < vq->vq_nentries; idx++) {
+ cop = vq->vq_descx[idx].crypto_op;
+ if (cop) {
+ if (cop->sym->m_src)
+ rte_pktmbuf_free(cop->sym->m_src);
+ if (cop->sym->m_dst)
+ rte_pktmbuf_free(cop->sym->m_dst);
+ rte_crypto_op_free(cop);
+ vq->vq_descx[idx].crypto_op = NULL;
+ }
+ }
+}
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.h b/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.h
new file mode 100644
index 00000000..bf10c657
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTQUEUE_H_
+#define _VIRTQUEUE_H_
+
+#include <stdint.h>
+
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+#include "virtio_logs.h"
+#include "virtio_crypto.h"
+
+struct rte_mbuf;
+
+/*
+ * Per virtio_config.h in Linux.
+ * For virtio_pci on SMP, we don't need to order with respect to MMIO
+ * accesses through relaxed memory I/O windows, so smp_mb() et al are
+ * sufficient.
+ *
+ */
+#define virtio_mb() rte_smp_mb()
+#define virtio_rmb() rte_smp_rmb()
+#define virtio_wmb() rte_smp_wmb()
+
+#define VIRTQUEUE_MAX_NAME_SZ 32
+
+enum { VTCRYPTO_DATAQ = 0, VTCRYPTO_CTRLQ = 1 };
+
+/**
+ * The maximum virtqueue size is 2^15. Use that value as the end of
+ * descriptor chain terminator since it will never be a valid index
+ * in the descriptor table. This is used to verify we are correctly
+ * handling vq_free_cnt.
+ */
+#define VQ_RING_DESC_CHAIN_END 32768
+
+struct vq_desc_extra {
+ void *crypto_op;
+ void *cookie;
+ uint16_t ndescs;
+};
+
+struct virtqueue {
+ /**< virtio_crypto_hw structure pointer. */
+ struct virtio_crypto_hw *hw;
+ /**< mem zone to populate RX ring. */
+ const struct rte_memzone *mz;
+ /**< memzone to populate hdr and request. */
+ struct rte_mempool *mpool;
+ uint8_t dev_id; /**< Device identifier. */
+ uint16_t vq_queue_index; /**< PCI queue index */
+
+ void *vq_ring_virt_mem; /**< linear address of vring*/
+ unsigned int vq_ring_size;
+ phys_addr_t vq_ring_mem; /**< physical address of vring */
+
+ struct vring vq_ring; /**< vring keeping desc, used and avail */
+ uint16_t vq_free_cnt; /**< num of desc available */
+ uint16_t vq_nentries; /**< vring desc numbers */
+
+ /**
+ * Head of the free chain in the descriptor table. If
+ * there are no free descriptors, this will be set to
+ * VQ_RING_DESC_CHAIN_END.
+ */
+ uint16_t vq_desc_head_idx;
+ uint16_t vq_desc_tail_idx;
+ /**
+ * Last consumed descriptor in the used table,
+ * trails vq_ring.used->idx.
+ */
+ uint16_t vq_used_cons_idx;
+ uint16_t vq_avail_idx;
+
+ /* Statistics */
+ uint64_t packets_sent_total;
+ uint64_t packets_sent_failed;
+ uint64_t packets_received_total;
+ uint64_t packets_received_failed;
+
+ uint16_t *notify_addr;
+
+ struct vq_desc_extra vq_descx[0];
+};
+
+/**
+ * Tell the backend not to interrupt us.
+ */
+void virtqueue_disable_intr(struct virtqueue *vq);
+
+/**
+ * Get all mbufs to be freed.
+ */
+void virtqueue_detatch_unused(struct virtqueue *vq);
+
+static inline int
+virtqueue_full(const struct virtqueue *vq)
+{
+ return vq->vq_free_cnt == 0;
+}
+
+#define VIRTQUEUE_NUSED(vq) \
+ ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+
+static inline void
+vq_update_avail_idx(struct virtqueue *vq)
+{
+ virtio_wmb();
+ vq->vq_ring.avail->idx = vq->vq_avail_idx;
+}
+
+static inline void
+vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
+{
+ uint16_t avail_idx;
+ /*
+ * Place the head of the descriptor chain into the next slot and make
+ * it usable to the host. The chain is made available now rather than
+ * deferring to virtqueue_notify() in the hopes that if the host is
+ * currently running on another CPU, we can keep it processing the new
+ * descriptor.
+ */
+ avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
+ if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
+ vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+ vq->vq_avail_idx++;
+}
+
+static inline int
+virtqueue_kick_prepare(struct virtqueue *vq)
+{
+ return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+}
+
+static inline void
+virtqueue_notify(struct virtqueue *vq)
+{
+ /*
+ * Ensure updated avail->idx is visible to host.
+ * For virtio on IA, the notificaiton is through io port operation
+ * which is a serialization instruction itself.
+ */
+ VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
+}
+
+/**
+ * Dump virtqueue internal structures, for debug purpose only.
+ */
+#define VIRTQUEUE_DUMP(vq) do { \
+ uint16_t used_idx, nused; \
+ used_idx = (vq)->vq_ring.used->idx; \
+ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ VIRTIO_CRYPTO_INIT_LOG_DBG(\
+ "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
+ " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
+ " avail.flags=0x%x; used.flags=0x%x", \
+ (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
+ (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
+ (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
+ (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+} while (0)
+
+#endif /* _VIRTQUEUE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/zuc/Makefile b/src/spdk/dpdk/drivers/crypto/zuc/Makefile
new file mode 100644
index 00000000..68d84eeb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/zuc/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_ZUC_PATH),)
+$(error "Please define LIBSSO_ZUC_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_zuc.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_zuc_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBSSO_ZUC_PATH)
+CFLAGS += -I$(LIBSSO_ZUC_PATH)/include
+CFLAGS += -I$(LIBSSO_ZUC_PATH)/build
+LDLIBS += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/zuc/rte_pmd_zuc_version.map b/src/spdk/dpdk/drivers/crypto/zuc/rte_pmd_zuc_version.map
new file mode 100644
index 00000000..cc5829e3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/zuc/rte_pmd_zuc_version.map
@@ -0,0 +1,3 @@
+DPDK_16.11 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd.c b/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd.c
new file mode 100644
index 00000000..313f4590
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -0,0 +1,548 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_zuc_pmd_private.h"
+#define ZUC_MAX_BURST 4
+#define BYTE_LEN 8
+
+static uint8_t cryptodev_driver_id;
+
+/** Get xform chain order. */
+static enum zuc_operation
+zuc_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return ZUC_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return ZUC_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return ZUC_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ZUC_OP_AUTH_CIPHER;
+ else
+ return ZUC_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return ZUC_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ZUC_OP_CIPHER_AUTH;
+ else
+ return ZUC_OP_NOT_SUPPORTED;
+ }
+
+ return ZUC_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+zuc_set_session_parameters(struct zuc_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ enum zuc_operation mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = zuc_get_mode(xform);
+
+ switch (mode) {
+ case ZUC_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+
+ /* Fall-through */
+ case ZUC_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case ZUC_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case ZUC_OP_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case ZUC_OP_NOT_SUPPORTED:
+ default:
+ ZUC_LOG(ERR, "Unsupported operation chain order parameter");
+ return -ENOTSUP;
+ }
+
+ if (cipher_xform) {
+ /* Only ZUC EEA3 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
+ return -ENOTSUP;
+
+ if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
+ ZUC_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+
+ /* Copy the key */
+ memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
+ ZUC_IV_KEY_LENGTH);
+ }
+
+ if (auth_xform) {
+ /* Only ZUC EIA3 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
+ return -ENOTSUP;
+
+ if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
+ ZUC_LOG(ERR, "Wrong digest length");
+ return -EINVAL;
+ }
+
+ sess->auth_op = auth_xform->auth.op;
+
+ if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
+ ZUC_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+ sess->auth_iv_offset = auth_xform->auth.iv.offset;
+
+ /* Copy the key */
+ memcpy(sess->pKey_hash, auth_xform->auth.key.data,
+ ZUC_IV_KEY_LENGTH);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get ZUC session. */
+static struct zuc_session *
+zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
+{
+ struct zuc_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct zuc_session *)get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct zuc_session *)_sess_private_data;
+
+ if (unlikely(zuc_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs. */
+static uint8_t
+process_zuc_cipher_op(struct rte_crypto_op **ops,
+ struct zuc_session **sessions,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[ZUC_MAX_BURST], *dst[ZUC_MAX_BURST];
+ uint8_t *iv[ZUC_MAX_BURST];
+ uint32_t num_bytes[ZUC_MAX_BURST];
+ uint8_t *cipher_keys[ZUC_MAX_BURST];
+ struct zuc_session *sess;
+
+ for (i = 0; i < num_ops; i++) {
+ if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((ops[i]->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG(ERR, "Data Length or offset");
+ break;
+ }
+
+ sess = sessions[i];
+
+#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
+ if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
+ (ops[i]->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ ops[i]->sym->m_dst))) {
+ ZUC_LOG(ERR, "PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", ops[i]);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+#endif
+
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ sess->cipher_iv_offset);
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ cipher_keys[i] = sess->pKey_cipher;
+
+ processed_ops++;
+ }
+
+ sso_zuc_eea3_n_buffer(cipher_keys, iv, src, dst,
+ num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Generate/verify hash from mbufs. */
+static int
+process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
+ struct zuc_session **sessions,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src;
+ uint32_t *dst;
+ uint32_t length_in_bits;
+ uint8_t *iv;
+ struct zuc_session *sess;
+
+ for (i = 0; i < num_ops; i++) {
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG(ERR, "Offset");
+ break;
+ }
+
+ sess = sessions[i];
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+ iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ sess->auth_iv_offset);
+
+ if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = (uint32_t *)qp->temp_digest;
+
+ sso_zuc_eia3_1_buffer(sess->pKey_hash,
+ iv, src,
+ length_in_bits, dst);
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ ZUC_DIGEST_LENGTH) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ dst = (uint32_t *)ops[i]->sym->auth.digest.data;
+
+ sso_zuc_eia3_1_buffer(sess->pKey_hash,
+ iv, src,
+ length_in_bits, dst);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same operation type. */
+static int
+process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type,
+ struct zuc_session **sessions,
+ struct zuc_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned i;
+ unsigned enqueued_ops, processed_ops;
+
+ switch (op_type) {
+ case ZUC_OP_ONLY_CIPHER:
+ processed_ops = process_zuc_cipher_op(ops,
+ sessions, num_ops);
+ break;
+ case ZUC_OP_ONLY_AUTH:
+ processed_ops = process_zuc_hash_op(qp, ops, sessions,
+ num_ops);
+ break;
+ case ZUC_OP_CIPHER_AUTH:
+ processed_ops = process_zuc_cipher_op(ops, sessions,
+ num_ops);
+ process_zuc_hash_op(qp, ops, sessions, processed_ops);
+ break;
+ case ZUC_OP_AUTH_CIPHER:
+ processed_ops = process_zuc_hash_op(qp, ops, sessions,
+ num_ops);
+ process_zuc_cipher_op(ops, sessions, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sessions[i], 0, sizeof(struct zuc_session));
+ memset(ops[i]->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sessions[i]);
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops, NULL);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+static uint16_t
+zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[ZUC_MAX_BURST];
+ struct rte_crypto_op *curr_c_op;
+
+ struct zuc_session *curr_sess;
+ struct zuc_session *sessions[ZUC_MAX_BURST];
+ enum zuc_operation prev_zuc_op = ZUC_OP_NOT_SUPPORTED;
+ enum zuc_operation curr_zuc_op;
+ struct zuc_qp *qp = queue_pair;
+ unsigned i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+ curr_sess = zuc_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ curr_zuc_op = curr_sess->op;
+
+ /*
+ * Batch ops that share the same operation type
+ * (cipher only, auth only...).
+ */
+ if (burst_size == 0) {
+ prev_zuc_op = curr_zuc_op;
+ c_ops[0] = curr_c_op;
+ sessions[0] = curr_sess;
+ burst_size++;
+ } else if (curr_zuc_op == prev_zuc_op) {
+ c_ops[burst_size] = curr_c_op;
+ sessions[burst_size] = curr_sess;
+ burst_size++;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == ZUC_MAX_BURST) {
+ processed_ops = process_ops(c_ops, curr_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ }
+ } else {
+ /*
+ * Different operation type, process the ops
+ * of the previous type.
+ */
+ processed_ops = process_ops(c_ops, prev_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_zuc_op = curr_zuc_op;
+
+ c_ops[0] = curr_c_op;
+ sessions[0] = curr_sess;
+ burst_size++;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last operation type. */
+ processed_ops = process_ops(c_ops, prev_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
+ }
+
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+}
+
+static uint16_t
+zuc_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct zuc_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_zuc_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_zuc_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct zuc_private *internals;
+ uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
+
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ ZUC_LOG(ERR, "failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_zuc_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = zuc_pmd_dequeue_burst;
+ dev->enqueue_burst = zuc_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+init_error:
+ ZUC_LOG(ERR, "driver %s: failed",
+ init_params->name);
+
+ cryptodev_zuc_remove(vdev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_zuc_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct zuc_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ return cryptodev_zuc_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_zuc_remove(struct rte_vdev_device *vdev)
+{
+
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_zuc_pmd_drv = {
+ .probe = cryptodev_zuc_probe,
+ .remove = cryptodev_zuc_remove
+};
+
+static struct cryptodev_driver zuc_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv.driver,
+ cryptodev_driver_id);
+
+RTE_INIT(zuc_init_log)
+{
+ zuc_logtype_driver = rte_log_register("pmd.crypto.zuc");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_ops.c
new file mode 100644
index 00000000..6da39654
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -0,0 +1,322 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_zuc_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities zuc_pmd_capabilities[] = {
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+zuc_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+zuc_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zuc_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+zuc_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+zuc_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zuc_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+zuc_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct zuc_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = zuc_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+zuc_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+zuc_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct zuc_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "zuc_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ZUC_LOG(INFO, "Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ ZUC_LOG(ERR, "Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+zuc_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct zuc_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ zuc_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ZUC PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (zuc_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = zuc_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+zuc_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the ZUC session structure */
+static unsigned
+zuc_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct zuc_session);
+}
+
+/** Configure a ZUC session from a crypto xform chain */
+static int
+zuc_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ ZUC_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ ZUC_LOG(ERR,
+ "Couldn't get object from session mempool");
+
+ return -ENOMEM;
+ }
+
+ ret = zuc_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ ZUC_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+zuc_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct zuc_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops zuc_pmd_ops = {
+ .dev_configure = zuc_pmd_config,
+ .dev_start = zuc_pmd_start,
+ .dev_stop = zuc_pmd_stop,
+ .dev_close = zuc_pmd_close,
+
+ .stats_get = zuc_pmd_stats_get,
+ .stats_reset = zuc_pmd_stats_reset,
+
+ .dev_infos_get = zuc_pmd_info_get,
+
+ .queue_pair_setup = zuc_pmd_qp_setup,
+ .queue_pair_release = zuc_pmd_qp_release,
+ .queue_pair_count = zuc_pmd_qp_count,
+
+ .sym_session_get_size = zuc_pmd_sym_session_get_size,
+ .sym_session_configure = zuc_pmd_sym_session_configure,
+ .sym_session_clear = zuc_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_zuc_pmd_ops = &zuc_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_private.h b/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_private.h
new file mode 100644
index 00000000..5e5906dd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_private.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#ifndef _RTE_ZUC_PMD_PRIVATE_H_
+#define _RTE_ZUC_PMD_PRIVATE_H_
+
+#include <sso_zuc.h>
+
+#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
+/**< KASUMI PMD device name */
+
+/** ZUC PMD LOGTYPE DRIVER */
+int zuc_logtype_driver;
+#define ZUC_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, zuc_logtype_driver, \
+ "%s()... line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+#define ZUC_IV_KEY_LENGTH 16
+#define ZUC_DIGEST_LENGTH 4
+
+/** private data structure for each virtual ZUC device */
+struct zuc_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+/** ZUC buffer queue pair */
+struct zuc_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint8_t temp_digest[ZUC_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+enum zuc_operation {
+ ZUC_OP_ONLY_CIPHER,
+ ZUC_OP_ONLY_AUTH,
+ ZUC_OP_CIPHER_AUTH,
+ ZUC_OP_AUTH_CIPHER,
+ ZUC_OP_NOT_SUPPORTED
+};
+
+/** ZUC private session structure */
+struct zuc_session {
+ enum zuc_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
+ uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
+ uint16_t cipher_iv_offset;
+ uint16_t auth_iv_offset;
+} __rte_cache_aligned;
+
+
+extern int
+zuc_set_session_parameters(struct zuc_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_zuc_pmd_ops;
+
+
+
+#endif /* _RTE_ZUC_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/event/Makefile b/src/spdk/dpdk/drivers/event/Makefile
new file mode 100644
index 00000000..f301d8dc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV) += dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/event/dpaa/Makefile b/src/spdk/dpdk/drivers/event/dpaa/Makefile
new file mode 100644
index 00000000..ddd85522
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+RTE_SDK_DPAA=$(RTE_SDK)/drivers/net/dpaa
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa_event.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -I$(RTE_SDK_DPAA)/
+CFLAGS += -I$(RTE_SDK_DPAA)/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include
+
+EXPORT_MAP := rte_pmd_dpaa_event_version.map
+
+LIBABIVER := 1
+
+# Interfaces with DPDK
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV) += dpaa_eventdev.c
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_eventdev -lrte_pmd_dpaa -lrte_bus_vdev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/event/dpaa/dpaa_eventdev.c b/src/spdk/dpdk/drivers/event/dpaa/dpaa_eventdev.c
new file mode 100644
index 00000000..5443ef56
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa/dpaa_eventdev.c
@@ -0,0 +1,655 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/epoll.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_ethdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+#include <rte_cycles.h>
+
+#include <dpaa_ethdev.h>
+#include "dpaa_eventdev.h"
+#include <dpaa_mempool.h>
+
+/*
+ * Clarifications
+ * Evendev = Virtual Instance for SoC
+ * Eventport = Portal Instance
+ * Eventqueue = Channel Instance
+ * 1 Eventdev can have N Eventqueue
+ */
+
+static int
+dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ uint64_t cycles_per_second;
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ cycles_per_second = rte_get_timer_hz();
+ *timeout_ticks = ns * (cycles_per_second / NS_PER_S);
+
+ return 0;
+}
+
+static void
+dpaa_eventq_portal_add(u16 ch_id)
+{
+ uint32_t sdqcr;
+
+ sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
+ qman_static_dequeue_add(sdqcr, NULL);
+}
+
+static uint16_t
+dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ uint16_t i;
+ struct rte_mbuf *mbuf;
+
+ RTE_SET_USED(port);
+ /*Release all the contexts saved previously*/
+ for (i = 0; i < nb_events; i++) {
+ switch (ev[i].op) {
+ case RTE_EVENT_OP_RELEASE:
+ qman_dca_index(ev[i].impl_opaque, 0);
+ mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
+ mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+ DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
+ DPAA_PER_LCORE_DQRR_SIZE--;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return nb_events;
+}
+
+static uint16_t
+dpaa_event_enqueue(void *port, const struct rte_event *ev)
+{
+ return dpaa_event_enqueue_burst(port, ev, 1);
+}
+
+static uint16_t
+dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ int ret;
+ u16 ch_id;
+ void *buffers[8];
+ u32 num_frames, i;
+ uint64_t wait_time, cur_ticks, start_ticks;
+ struct dpaa_port *portal = (struct dpaa_port *)port;
+ struct rte_mbuf *mbuf;
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ /* Affine current thread context to a qman portal */
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_EVENTDEV_ERR("Unable to initialize portal");
+ return ret;
+ }
+ }
+
+ if (unlikely(!portal->is_port_linked)) {
+ /*
+ * Affine event queue for current thread context
+ * to a qman portal.
+ */
+ for (i = 0; i < portal->num_linked_evq; i++) {
+ ch_id = portal->evq_info[i].ch_id;
+ dpaa_eventq_portal_add(ch_id);
+ }
+ portal->is_port_linked = true;
+ }
+
+ /* Check if there are atomic contexts to be released */
+ i = 0;
+ while (DPAA_PER_LCORE_DQRR_SIZE) {
+ if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
+ qman_dca_index(i, 0);
+ mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
+ mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+ DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
+ DPAA_PER_LCORE_DQRR_SIZE--;
+ }
+ i++;
+ }
+ DPAA_PER_LCORE_DQRR_HELD = 0;
+
+ if (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID)
+ wait_time = timeout_ticks;
+ else
+ wait_time = portal->timeout;
+
+ /* Lets dequeue the frames */
+ start_ticks = rte_get_timer_cycles();
+ wait_time += start_ticks;
+ do {
+ num_frames = qman_portal_dequeue(ev, nb_events, buffers);
+ if (num_frames != 0)
+ break;
+ cur_ticks = rte_get_timer_cycles();
+ } while (cur_ticks < wait_time);
+
+ return num_frames;
+}
+
+static uint16_t
+dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+ return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
+}
+
+static void
+dpaa_event_dev_info_get(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ dev_info->driver_name = "event_dpaa";
+ dev_info->min_dequeue_timeout_ns =
+ DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_dequeue_timeout_ns =
+ DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
+ dev_info->dequeue_timeout_ns =
+ DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_event_queues =
+ DPAA_EVENT_MAX_QUEUES;
+ dev_info->max_event_queue_flows =
+ DPAA_EVENT_MAX_QUEUE_FLOWS;
+ dev_info->max_event_queue_priority_levels =
+ DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
+ dev_info->max_event_priority_levels =
+ DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
+ dev_info->max_event_ports =
+ DPAA_EVENT_MAX_EVENT_PORT;
+ dev_info->max_event_port_dequeue_depth =
+ DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ dev_info->max_event_port_enqueue_depth =
+ DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+ /*
+ * TODO: Need to find out that how to fetch this info
+ * from kernel or somewhere else.
+ */
+ dev_info->max_num_events =
+ DPAA_EVENT_MAX_NUM_EVENTS;
+ dev_info->event_dev_cap =
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_BURST_MODE |
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+}
+
+static int
+dpaa_event_dev_configure(const struct rte_eventdev *dev)
+{
+ struct dpaa_eventdev *priv = dev->data->dev_private;
+ struct rte_event_dev_config *conf = &dev->data->dev_conf;
+ int ret, i;
+ uint32_t *ch_id;
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+ priv->nb_events_limit = conf->nb_events_limit;
+ priv->nb_event_queues = conf->nb_event_queues;
+ priv->nb_event_ports = conf->nb_event_ports;
+ priv->nb_event_queue_flows = conf->nb_event_queue_flows;
+ priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
+ priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
+ priv->event_dev_cfg = conf->event_dev_cfg;
+
+ /* Check dequeue timeout method is per dequeue or global */
+ if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ /*
+ * Use timeout value as given in dequeue operation.
+ * So invalidating this timetout value.
+ */
+ priv->dequeue_timeout_ns = 0;
+ }
+
+ ch_id = rte_malloc("dpaa-channels",
+ sizeof(uint32_t) * priv->nb_event_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (ch_id == NULL) {
+ EVENTDEV_DRV_ERR("Fail to allocate memory for dpaa channels\n");
+ return -ENOMEM;
+ }
+ /* Create requested event queues within the given event device */
+ ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
+ if (ret < 0) {
+ EVENTDEV_DRV_ERR("Failed to create internal channel\n");
+ rte_free(ch_id);
+ return ret;
+ }
+ for (i = 0; i < priv->nb_event_queues; i++)
+ priv->evq_info[i].ch_id = (u16)ch_id[i];
+
+ /* Lets prepare event ports */
+ memset(&priv->ports[0], 0,
+ sizeof(struct dpaa_port) * priv->nb_event_ports);
+ if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ for (i = 0; i < priv->nb_event_ports; i++) {
+ priv->ports[i].timeout =
+ DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID;
+ }
+ } else if (priv->dequeue_timeout_ns == 0) {
+ for (i = 0; i < priv->nb_event_ports; i++) {
+ dpaa_event_dequeue_timeout_ticks(NULL,
+ DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS,
+ &priv->ports[i].timeout);
+ }
+ } else {
+ for (i = 0; i < priv->nb_event_ports; i++) {
+ dpaa_event_dequeue_timeout_ticks(NULL,
+ priv->dequeue_timeout_ns,
+ &priv->ports[i].timeout);
+ }
+ }
+ /*
+ * TODO: Currently portals are affined with threads. Maximum threads
+ * can be created equals to number of lcore.
+ */
+ rte_free(ch_id);
+ EVENTDEV_DRV_LOG("Configured eventdev devid=%d", dev->data->dev_id);
+
+ return 0;
+}
+
+static int
+dpaa_event_dev_start(struct rte_eventdev *dev)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa_event_dev_stop(struct rte_eventdev *dev)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+ RTE_SET_USED(dev);
+}
+
+static int
+dpaa_event_dev_close(struct rte_eventdev *dev)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
+ queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
+}
+
+static int
+dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct dpaa_eventdev *priv = dev->data->dev_private;
+ struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ switch (queue_conf->schedule_type) {
+ case RTE_SCHED_TYPE_PARALLEL:
+ case RTE_SCHED_TYPE_ATOMIC:
+ break;
+ case RTE_SCHED_TYPE_ORDERED:
+ EVENTDEV_DRV_ERR("Schedule type is not supported.");
+ return -1;
+ }
+ evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+ evq_info->event_queue_id = queue_id;
+
+ return 0;
+}
+
+static void
+dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static void
+dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+
+ port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
+ port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+}
+
+static int
+dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct dpaa_eventdev *eventdev = dev->data->dev_private;
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(port_conf);
+ dev->data->ports[port_id] = &eventdev->ports[port_id];
+
+ return 0;
+}
+
+static void
+dpaa_event_port_release(void *port)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(port);
+}
+
+static int
+dpaa_event_port_link(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct dpaa_eventdev *priv = dev->data->dev_private;
+ struct dpaa_port *event_port = (struct dpaa_port *)port;
+ struct dpaa_eventq *event_queue;
+ uint8_t eventq_id;
+ int i;
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(priorities);
+
+ /* First check that input configuration are valid */
+ for (i = 0; i < nb_links; i++) {
+ eventq_id = queues[i];
+ event_queue = &priv->evq_info[eventq_id];
+ if ((event_queue->event_queue_cfg
+ & RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
+ && (event_queue->event_port)) {
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < nb_links; i++) {
+ eventq_id = queues[i];
+ event_queue = &priv->evq_info[eventq_id];
+ event_port->evq_info[i].event_queue_id = eventq_id;
+ event_port->evq_info[i].ch_id = event_queue->ch_id;
+ event_queue->event_port = port;
+ }
+
+ event_port->num_linked_evq = event_port->num_linked_evq + i;
+
+ return (int)i;
+}
+
+static int
+dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_links)
+{
+ int i;
+ uint8_t eventq_id;
+ struct dpaa_eventq *event_queue;
+ struct dpaa_eventdev *priv = dev->data->dev_private;
+ struct dpaa_port *event_port = (struct dpaa_port *)port;
+
+ if (!event_port->num_linked_evq)
+ return nb_links;
+
+ for (i = 0; i < nb_links; i++) {
+ eventq_id = queues[i];
+ event_port->evq_info[eventq_id].event_queue_id = -1;
+ event_port->evq_info[eventq_id].ch_id = 0;
+ event_queue = &priv->evq_info[eventq_id];
+ event_queue->event_port = NULL;
+ }
+
+ event_port->num_linked_evq = event_port->num_linked_evq - i;
+
+ return (int)i;
+}
+
+static int
+dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ const char *ethdev_driver = eth_dev->device->driver->name;
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ if (!strcmp(ethdev_driver, "net_dpaa"))
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
+ else
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+
+ return 0;
+}
+
+static int
+dpaa_event_eth_rx_adapter_queue_add(
+ const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa_eventdev *eventdev = dev->data->dev_private;
+ uint8_t ev_qid = queue_conf->ev.queue_id;
+ u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
+ struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
+ int ret, i;
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
+ ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
+ queue_conf);
+ if (ret) {
+ EVENTDEV_DRV_ERR(
+ "Event Queue attach failed:%d\n", ret);
+ goto detach_configured_queues;
+ }
+ }
+ return 0;
+ }
+
+ ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
+ if (ret)
+ EVENTDEV_DRV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
+ return ret;
+
+detach_configured_queues:
+
+ for (i = (i - 1); i >= 0 ; i--)
+ dpaa_eth_eventq_detach(eth_dev, i);
+
+ return ret;
+}
+
+static int
+dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id)
+{
+ int ret, i;
+ struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
+ ret = dpaa_eth_eventq_detach(eth_dev, i);
+ if (ret)
+ EVENTDEV_DRV_ERR(
+ "Event Queue detach failed:%d\n", ret);
+ }
+
+ return 0;
+ }
+
+ ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
+ if (ret)
+ EVENTDEV_DRV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
+ return ret;
+}
+
+static int
+dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+static int
+dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+static struct rte_eventdev_ops dpaa_eventdev_ops = {
+ .dev_infos_get = dpaa_event_dev_info_get,
+ .dev_configure = dpaa_event_dev_configure,
+ .dev_start = dpaa_event_dev_start,
+ .dev_stop = dpaa_event_dev_stop,
+ .dev_close = dpaa_event_dev_close,
+ .queue_def_conf = dpaa_event_queue_def_conf,
+ .queue_setup = dpaa_event_queue_setup,
+ .queue_release = dpaa_event_queue_release,
+ .port_def_conf = dpaa_event_port_default_conf_get,
+ .port_setup = dpaa_event_port_setup,
+ .port_release = dpaa_event_port_release,
+ .port_link = dpaa_event_port_link,
+ .port_unlink = dpaa_event_port_unlink,
+ .timeout_ticks = dpaa_event_dequeue_timeout_ticks,
+ .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get,
+ .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add,
+ .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del,
+ .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start,
+ .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
+};
+
+static int
+dpaa_event_dev_create(const char *name)
+{
+ struct rte_eventdev *eventdev;
+ struct dpaa_eventdev *priv;
+
+ eventdev = rte_event_pmd_vdev_init(name,
+ sizeof(struct dpaa_eventdev),
+ rte_socket_id());
+ if (eventdev == NULL) {
+ EVENTDEV_DRV_ERR("Failed to create eventdev vdev %s", name);
+ goto fail;
+ }
+
+ eventdev->dev_ops = &dpaa_eventdev_ops;
+ eventdev->enqueue = dpaa_event_enqueue;
+ eventdev->enqueue_burst = dpaa_event_enqueue_burst;
+ eventdev->dequeue = dpaa_event_dequeue;
+ eventdev->dequeue_burst = dpaa_event_dequeue_burst;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ priv = eventdev->data->dev_private;
+ priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
+
+ return 0;
+fail:
+ return -EFAULT;
+}
+
+static int
+dpaa_event_dev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ EVENTDEV_DRV_LOG("Initializing %s", name);
+
+ return dpaa_event_dev_create(name);
+}
+
+static int
+dpaa_event_dev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ EVENTDEV_DRV_LOG("Closing %s", name);
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
+ .probe = dpaa_event_dev_probe,
+ .remove = dpaa_event_dev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
diff --git a/src/spdk/dpdk/drivers/event/dpaa/dpaa_eventdev.h b/src/spdk/dpdk/drivers/event/dpaa/dpaa_eventdev.h
new file mode 100644
index 00000000..583e46ca
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa/dpaa_eventdev.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef __DPAA_EVENTDEV_H__
+#define __DPAA_EVENTDEV_H__
+
+#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_atomic.h>
+#include <rte_per_lcore.h>
+
+#define EVENTDEV_NAME_DPAA_PMD event_dpaa1
+
+#define EVENTDEV_DRV_LOG(fmt, args...) \
+ DPAA_EVENTDEV_INFO(fmt, ## args)
+#define EVENTDEV_DRV_FUNC_TRACE() \
+ DPAA_EVENTDEV_DEBUG("%s() Called:\n", __func__)
+#define EVENTDEV_DRV_ERR(fmt, args...) \
+ DPAA_EVENTDEV_ERR("%s(): " fmt "\n", __func__, ## args)
+
+#define DPAA_EVENT_MAX_PORTS 8
+#define DPAA_EVENT_MAX_QUEUES 16
+#define DPAA_EVENT_MIN_DEQUEUE_TIMEOUT 1
+#define DPAA_EVENT_MAX_DEQUEUE_TIMEOUT (UINT32_MAX - 1)
+#define DPAA_EVENT_MAX_QUEUE_FLOWS 2048
+#define DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8
+#define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS 0
+#define DPAA_EVENT_MAX_EVENT_PORT RTE_MIN(RTE_MAX_LCORE, INT8_MAX)
+#define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH 8
+#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS 100UL
+#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID ((uint64_t)-1)
+#define DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH 1
+#define DPAA_EVENT_MAX_NUM_EVENTS (INT32_MAX - 1)
+
+#define DPAA_EVENT_DEV_CAP \
+do { \
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | \
+ RTE_EVENT_DEV_CAP_BURST_MODE; \
+} while (0)
+
+#define DPAA_EVENT_QUEUE_ATOMIC_FLOWS 0
+#define DPAA_EVENT_QUEUE_ORDER_SEQUENCES 2048
+
+#define RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP \
+ (RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT | \
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ | \
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID)
+
+struct dpaa_eventq {
+ /* Channel Id */
+ uint16_t ch_id;
+ /* Configuration provided by the user */
+ uint32_t event_queue_cfg;
+ uint32_t event_queue_id;
+ /* Event port */
+ void *event_port;
+};
+
+struct dpaa_port {
+ struct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];
+ uint8_t num_linked_evq;
+ uint8_t is_port_linked;
+ uint64_t timeout;
+};
+
+struct dpaa_eventdev {
+ struct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];
+ struct dpaa_port ports[DPAA_EVENT_MAX_PORTS];
+ uint32_t dequeue_timeout_ns;
+ uint32_t nb_events_limit;
+ uint8_t max_event_queues;
+ uint8_t nb_event_queues;
+ uint8_t nb_event_ports;
+ uint8_t resvd;
+ uint32_t nb_event_queue_flows;
+ uint32_t nb_event_port_dequeue_depth;
+ uint32_t nb_event_port_enqueue_depth;
+ uint32_t event_dev_cfg;
+};
+#endif /* __DPAA_EVENTDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/event/dpaa/meson.build b/src/spdk/dpdk/drivers/event/dpaa/meson.build
new file mode 100644
index 00000000..0914f858
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += ['pmd_dpaa']
+sources = files('dpaa_eventdev.c')
+
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/event/dpaa/rte_pmd_dpaa_event_version.map b/src/spdk/dpdk/drivers/event/dpaa/rte_pmd_dpaa_event_version.map
new file mode 100644
index 00000000..179140fb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa/rte_pmd_dpaa_event_version.map
@@ -0,0 +1,4 @@
+DPDK_18.02 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/Makefile b/src/spdk/dpdk/drivers/event/dpaa2/Makefile
new file mode 100644
index 00000000..5e1a6320
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/Makefile
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_event.a
+
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+LDLIBS += -lrte_eal -lrte_eventdev
+LDLIBS += -lrte_bus_fslmc -lrte_mempool_dpaa2 -lrte_pmd_dpaa2
+LDLIBS += -lrte_bus_vdev
+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa2_event_version.map
+
+LIBABIVER := 1
+
+# depends on fslmc bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_hw_dpcon.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c
new file mode 100644
index 00000000..ea1e5cc6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -0,0 +1,830 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/epoll.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_fslmc.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_event_eth_rx_adapter.h>
+
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+#include <dpaa2_hw_dpio.h>
+#include <dpaa2_ethdev.h>
+#include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
+#include <portal/dpaa2_hw_pvt.h>
+#include <mc/fsl_dpci.h>
+
+/* Clarifications
+ * Evendev = SoC Instance
+ * Eventport = DPIO Instance
+ * Eventqueue = DPCON Instance
+ * 1 Eventdev can have N Eventqueue
+ * Soft Event Flow is DPCI Instance
+ */
+
+/* Dynamic logging identified for mempool */
+int dpaa2_logtype_event;
+
+static uint16_t
+dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_eventdev *ev_dev =
+ ((struct dpaa2_io_portal_t *)port)->eventdev;
+ struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
+ uint32_t queue_id = ev[0].queue_id;
+ struct evq_info_t *evq_info = &priv->evq_info[queue_id];
+ uint32_t fqid;
+ struct qbman_swp *swp;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t loop, frames_to_send;
+ struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+ uint16_t num_tx = 0;
+ int ret;
+
+ RTE_SET_USED(port);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_EVENTDEV_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ while (nb_events) {
+ frames_to_send = (nb_events >> 3) ?
+ MAX_TX_RING_SLOTS : nb_events;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ const struct rte_event *event = &ev[num_tx + loop];
+
+ if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
+ fqid = evq_info->dpci->rx_queue[
+ DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
+ else
+ fqid = evq_info->dpci->rx_queue[
+ DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc[loop]);
+ qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
+ qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
+
+ if (event->mbuf->seqn) {
+ uint8_t dqrr_index = event->mbuf->seqn - 1;
+
+ qbman_eq_desc_set_dca(&eqdesc[loop], 1,
+ dqrr_index, 0);
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_HELD &=
+ ~(1 << dqrr_index);
+ }
+
+ memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+
+ /*
+ * todo - need to align with hw context data
+ * to avoid copy
+ */
+ struct rte_event *ev_temp = rte_malloc(NULL,
+ sizeof(struct rte_event), 0);
+
+ if (!ev_temp) {
+ if (!loop)
+ return num_tx;
+ frames_to_send = loop;
+ DPAA2_EVENTDEV_ERR(
+ "Unable to allocate event object");
+ goto send_partial;
+ }
+ rte_memcpy(ev_temp, event, sizeof(struct rte_event));
+ DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
+ DPAA2_SET_FD_LEN((&fd_arr[loop]),
+ sizeof(struct rte_event));
+ }
+send_partial:
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qbman_swp_enqueue_multiple_desc(swp,
+ &eqdesc[loop], &fd_arr[loop],
+ frames_to_send - loop);
+ }
+ num_tx += frames_to_send;
+ nb_events -= frames_to_send;
+ }
+
+ return num_tx;
+}
+
+static uint16_t
+dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
+{
+ return dpaa2_eventdev_enqueue_burst(port, ev, 1);
+}
+
+static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
+{
+ struct epoll_event epoll_ev;
+
+ qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
+ QBMAN_SWP_INTERRUPT_DQRI);
+
+ epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
+ &epoll_ev, 1, timeout_ticks);
+}
+
+static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ struct rte_event *ev_temp =
+ (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
+
+ RTE_SET_USED(rxq);
+
+ rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
+ rte_free(ev_temp);
+
+ qbman_swp_dqrr_consume(swp, dq);
+}
+
+static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ struct rte_event *ev_temp =
+ (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
+ uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
+
+ RTE_SET_USED(swp);
+ RTE_SET_USED(rxq);
+
+ rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
+ rte_free(ev_temp);
+ ev->mbuf->seqn = dqrr_index + 1;
+ DPAA2_PER_LCORE_DQRR_SIZE++;
+ DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+}
+
+static uint16_t
+dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ const struct qbman_result *dq;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd;
+ struct dpaa2_queue *rxq;
+ int num_pkts = 0, ret, i = 0;
+
+ RTE_SET_USED(port);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_EVENTDEV_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ /* Check if there are atomic contexts to be released */
+ while (DPAA2_PER_LCORE_DQRR_SIZE) {
+ if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
+ qbman_swp_dqrr_idx_consume(swp, i);
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
+ DPAA2_INVALID_MBUF_SEQN;
+ }
+ i++;
+ }
+ DPAA2_PER_LCORE_DQRR_HELD = 0;
+
+ do {
+ dq = qbman_swp_dqrr_next(swp);
+ if (!dq) {
+ if (!num_pkts && timeout_ticks) {
+ dpaa2_eventdev_dequeue_wait(timeout_ticks);
+ timeout_ticks = 0;
+ continue;
+ }
+ return num_pkts;
+ }
+ qbman_swp_prefetch_dqrr_next(swp);
+
+ fd = qbman_result_DQ_fd(dq);
+ rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
+ if (rxq) {
+ rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
+ } else {
+ qbman_swp_dqrr_consume(swp, dq);
+ DPAA2_EVENTDEV_ERR("Null Return VQ received");
+ return 0;
+ }
+
+ num_pkts++;
+ } while (num_pkts < nb_events);
+
+ return num_pkts;
+}
+
+static uint16_t
+dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks)
+{
+ return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
+}
+
+static void
+dpaa2_eventdev_info_get(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ memset(dev_info, 0, sizeof(struct rte_event_dev_info));
+ dev_info->min_dequeue_timeout_ns =
+ DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_dequeue_timeout_ns =
+ DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
+ dev_info->dequeue_timeout_ns =
+ DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_event_queues = priv->max_event_queues;
+ dev_info->max_event_queue_flows =
+ DPAA2_EVENT_MAX_QUEUE_FLOWS;
+ dev_info->max_event_queue_priority_levels =
+ DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
+ dev_info->max_event_priority_levels =
+ DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
+ dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
+ dev_info->max_event_port_dequeue_depth =
+ DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ dev_info->max_event_port_enqueue_depth =
+ DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+ dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_BURST_MODE|
+ RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+
+}
+
+static int
+dpaa2_eventdev_configure(const struct rte_eventdev *dev)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct rte_event_dev_config *conf = &dev->data->dev_conf;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+ priv->nb_event_queues = conf->nb_event_queues;
+ priv->nb_event_ports = conf->nb_event_ports;
+ priv->nb_event_queue_flows = conf->nb_event_queue_flows;
+ priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
+ priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
+ priv->event_dev_cfg = conf->event_dev_cfg;
+
+ DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
+ dev->data->dev_id);
+ return 0;
+}
+
+static int
+dpaa2_eventdev_start(struct rte_eventdev *dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_stop(struct rte_eventdev *dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+}
+
+static int
+dpaa2_eventdev_close(struct rte_eventdev *dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+ RTE_SET_USED(queue_conf);
+
+ queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
+ queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
+ RTE_SCHED_TYPE_PARALLEL;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static void
+dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static int
+dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct evq_info_t *evq_info =
+ &priv->evq_info[queue_id];
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+ RTE_SET_USED(port_conf);
+
+ port_conf->new_event_threshold =
+ DPAA2_EVENT_MAX_NUM_EVENTS;
+ port_conf->dequeue_depth =
+ DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ port_conf->enqueue_depth =
+ DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+ port_conf->disable_implicit_release = 0;
+}
+
+static void
+dpaa2_eventdev_port_release(void *port)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(port);
+}
+
+static int
+dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(port_conf);
+
+ if (!dpaa2_io_portal[port_id].dpio_dev) {
+ dpaa2_io_portal[port_id].dpio_dev =
+ dpaa2_get_qbman_swp(port_id);
+ rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
+ if (!dpaa2_io_portal[port_id].dpio_dev)
+ return -1;
+ }
+
+ dpaa2_io_portal[port_id].eventdev = dev;
+ dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
+ return 0;
+}
+
+static int
+dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct dpaa2_io_portal_t *dpaa2_portal = port;
+ struct evq_info_t *evq_info;
+ int i;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ for (i = 0; i < nb_unlinks; i++) {
+ evq_info = &priv->evq_info[queues[i]];
+ qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
+ evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
+ 0, dpaa2_portal->dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ }
+
+ return (int)nb_unlinks;
+}
+
+static int
+dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct dpaa2_io_portal_t *dpaa2_portal = port;
+ struct evq_info_t *evq_info;
+ uint8_t channel_index;
+ int ret, i, n;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ for (i = 0; i < nb_links; i++) {
+ evq_info = &priv->evq_info[queues[i]];
+
+ ret = dpio_add_static_dequeue_channel(
+ dpaa2_portal->dpio_dev->dpio,
+ CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
+ evq_info->dpcon->dpcon_id, &channel_index);
+ if (ret < 0) {
+ DPAA2_EVENTDEV_ERR(
+ "Static dequeue config failed: err(%d)", ret);
+ goto err;
+ }
+
+ qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
+ channel_index, 1);
+ evq_info->dpcon->channel_index = channel_index;
+ }
+
+ RTE_SET_USED(priorities);
+
+ return (int)nb_links;
+err:
+ for (n = 0; n < i; n++) {
+ evq_info = &priv->evq_info[queues[n]];
+ qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
+ evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
+ 0, dpaa2_portal->dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ }
+ return ret;
+}
+
+static int
+dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ uint32_t scale = 1;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ *timeout_ticks = ns * scale;
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(f);
+}
+
+static int
+dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ const char *ethdev_driver = eth_dev->device->driver->name;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ if (!strcmp(ethdev_driver, "net_dpaa2"))
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
+ else
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = queue_conf->ev.queue_id;
+ uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ ret = dpaa2_eth_eventq_attach(eth_dev, i,
+ dpcon_id, queue_conf);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "Event queue attach failed: err(%d)", ret);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ for (i = (i - 1); i >= 0 ; i--)
+ dpaa2_eth_eventq_detach(eth_dev, i);
+
+ return ret;
+}
+
+static int
+dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = queue_conf->ev.queue_id;
+ uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_eth_queue_add_all(dev,
+ eth_dev, queue_conf);
+
+ ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
+ dpcon_id, queue_conf);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "Event queue attach failed: err(%d)", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ ret = dpaa2_eth_eventq_detach(eth_dev, i);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "Event queue detach failed: err(%d)", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id)
+{
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
+
+ ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "Event queue detach failed: err(%d)", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+static struct rte_eventdev_ops dpaa2_eventdev_ops = {
+ .dev_infos_get = dpaa2_eventdev_info_get,
+ .dev_configure = dpaa2_eventdev_configure,
+ .dev_start = dpaa2_eventdev_start,
+ .dev_stop = dpaa2_eventdev_stop,
+ .dev_close = dpaa2_eventdev_close,
+ .queue_def_conf = dpaa2_eventdev_queue_def_conf,
+ .queue_setup = dpaa2_eventdev_queue_setup,
+ .queue_release = dpaa2_eventdev_queue_release,
+ .port_def_conf = dpaa2_eventdev_port_def_conf,
+ .port_setup = dpaa2_eventdev_port_setup,
+ .port_release = dpaa2_eventdev_port_release,
+ .port_link = dpaa2_eventdev_port_link,
+ .port_unlink = dpaa2_eventdev_port_unlink,
+ .timeout_ticks = dpaa2_eventdev_timeout_ticks,
+ .dump = dpaa2_eventdev_dump,
+ .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
+ .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
+ .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
+ .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
+ .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
+};
+
+static int
+dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
+ struct dpaa2_dpcon_dev *dpcon_dev)
+{
+ struct dpci_rx_queue_cfg rx_queue_cfg;
+ int ret, i;
+
+ /*Do settings to get the frame on a DPCON object*/
+ rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
+ DPCI_QUEUE_OPT_USER_CTX;
+ rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
+ rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
+ rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
+
+ dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
+ dpaa2_eventdev_process_parallel;
+ dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
+ dpaa2_eventdev_process_atomic;
+
+ for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
+ rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
+ ret = dpci_set_rx_queue(&dpci_dev->dpci,
+ CMD_PRI_LOW,
+ dpci_dev->token, i,
+ &rx_queue_cfg);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "DPCI Rx queue setup failed: err(%d)",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+dpaa2_eventdev_create(const char *name)
+{
+ struct rte_eventdev *eventdev;
+ struct dpaa2_eventdev *priv;
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+ int ret;
+
+ eventdev = rte_event_pmd_vdev_init(name,
+ sizeof(struct dpaa2_eventdev),
+ rte_socket_id());
+ if (eventdev == NULL) {
+ DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
+ goto fail;
+ }
+
+ eventdev->dev_ops = &dpaa2_eventdev_ops;
+ eventdev->enqueue = dpaa2_eventdev_enqueue;
+ eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->dequeue = dpaa2_eventdev_dequeue;
+ eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ priv = eventdev->data->dev_private;
+ priv->max_event_queues = 0;
+
+ do {
+ dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
+ if (!dpcon_dev)
+ break;
+ priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
+
+ dpci_dev = rte_dpaa2_alloc_dpci_dev();
+ if (!dpci_dev) {
+ rte_dpaa2_free_dpcon_dev(dpcon_dev);
+ break;
+ }
+ priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
+
+ ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "DPCI setup failed: err(%d)", ret);
+ return ret;
+ }
+ priv->max_event_queues++;
+ } while (dpcon_dev && dpci_dev);
+
+ return 0;
+fail:
+ return -EFAULT;
+}
+
+static int
+dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ DPAA2_EVENTDEV_INFO("Initializing %s", name);
+ return dpaa2_eventdev_create(name);
+}
+
+static int
+dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ DPAA2_EVENTDEV_INFO("Closing %s", name);
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
+ .probe = dpaa2_eventdev_probe,
+ .remove = dpaa2_eventdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
+
+RTE_INIT(dpaa2_eventdev_init_log)
+{
+ dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
+ if (dpaa2_logtype_event >= 0)
+ rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.h b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.h
new file mode 100644
index 00000000..229f66af
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -0,0 +1,84 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA2_EVENTDEV_H__
+#define __DPAA2_EVENTDEV_H__
+
+#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_atomic.h>
+#include <mc/fsl_dpcon.h>
+#include <mc/fsl_mc_sys.h>
+
+#define EVENTDEV_NAME_DPAA2_PMD event_dpaa2
+
+#define DPAA2_EVENT_DEFAULT_DPCI_PRIO 0
+
+#define DPAA2_EVENT_MAX_QUEUES 16
+#define DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT 1
+#define DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT (UINT32_MAX - 1)
+#define DPAA2_EVENT_MAX_QUEUE_FLOWS 2048
+#define DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8
+#define DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS 0
+#define DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH 8
+#define DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH 8
+#define DPAA2_EVENT_MAX_NUM_EVENTS (INT32_MAX - 1)
+
+#define DPAA2_EVENT_QUEUE_ATOMIC_FLOWS 2048
+#define DPAA2_EVENT_QUEUE_ORDER_SEQUENCES 2048
+
+enum {
+ DPAA2_EVENT_DPCI_PARALLEL_QUEUE,
+ DPAA2_EVENT_DPCI_ATOMIC_QUEUE,
+ DPAA2_EVENT_DPCI_MAX_QUEUES
+};
+
+#define RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP \
+ (RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT | \
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ | \
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID)
+/**< Ethernet Rx adapter cap to return If the packet transfers from
+ * the ethdev to eventdev with DPAA2 devices.
+ */
+
+struct dpaa2_dpcon_dev {
+ TAILQ_ENTRY(dpaa2_dpcon_dev) next;
+ struct fsl_mc_io dpcon;
+ uint16_t token;
+ rte_atomic16_t in_use;
+ uint32_t dpcon_id;
+ uint16_t qbman_ch_id;
+ uint8_t num_priorities;
+ uint8_t channel_index;
+};
+
+struct evq_info_t {
+ /* DPcon device */
+ struct dpaa2_dpcon_dev *dpcon;
+ /* Attached DPCI device */
+ struct dpaa2_dpci_dev *dpci;
+ /* Configuration provided by the user */
+ uint32_t event_queue_cfg;
+};
+
+struct dpaa2_eventdev {
+ struct evq_info_t evq_info[DPAA2_EVENT_MAX_QUEUES];
+ uint32_t dequeue_timeout_ns;
+ uint8_t max_event_queues;
+ uint8_t nb_event_queues;
+ uint8_t nb_event_ports;
+ uint8_t resvd_1;
+ uint32_t nb_event_queue_flows;
+ uint32_t nb_event_port_dequeue_depth;
+ uint32_t nb_event_port_enqueue_depth;
+ uint32_t event_dev_cfg;
+};
+
+struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void);
+void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon);
+
+#endif /* __DPAA2_EVENTDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev_logs.h
new file mode 100644
index 00000000..a2c2060c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -0,0 +1,39 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _DPAA2_EVENTDEV_LOGS_H_
+#define _DPAA2_EVENTDEV_LOGS_H_
+
+extern int dpaa2_logtype_event;
+
+#define DPAA2_EVENTDEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_event, "dpaa2_event: " \
+ fmt "\n", ##args)
+
+#define DPAA2_EVENTDEV_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_event, "dpaa2_event: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define EVENTDEV_INIT_FUNC_TRACE() DPAA2_EVENTDEV_DEBUG(" >>")
+
+#define DPAA2_EVENTDEV_INFO(fmt, args...) \
+ DPAA2_EVENTDEV_LOG(INFO, fmt, ## args)
+#define DPAA2_EVENTDEV_ERR(fmt, args...) \
+ DPAA2_EVENTDEV_LOG(ERR, fmt, ## args)
+#define DPAA2_EVENTDEV_WARN(fmt, args...) \
+ DPAA2_EVENTDEV_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_EVENTDEV_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_EVENTDEV_DP_DEBUG(fmt, args...) \
+ DPAA2_EVENTDEV_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_EVENTDEV_DP_INFO(fmt, args...) \
+ DPAA2_EVENTDEV_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_EVENTDEV_DP_WARN(fmt, args...) \
+ DPAA2_EVENTDEV_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAA2_EVENTDEV_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_hw_dpcon.c
new file mode 100644
index 00000000..d64e588a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_hw_dpcon.c
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev_driver.h>
+
+#include <rte_fslmc.h>
+#include <mc/fsl_dpcon.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
+
+TAILQ_HEAD(dpcon_dev_list, dpaa2_dpcon_dev);
+static struct dpcon_dev_list dpcon_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpcon_dev_list); /*!< DPCON device list */
+
+static int
+rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpcon_id)
+{
+ struct dpaa2_dpcon_dev *dpcon_node;
+ struct dpcon_attr attr;
+ int ret;
+
+ /* Allocate DPAA2 dpcon handle */
+ dpcon_node = rte_malloc(NULL, sizeof(struct dpaa2_dpcon_dev), 0);
+ if (!dpcon_node) {
+ DPAA2_EVENTDEV_ERR(
+ "Memory allocation failed for dpcon device");
+ return -1;
+ }
+
+ /* Open the dpcon object */
+ dpcon_node->dpcon.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpcon_open(&dpcon_node->dpcon,
+ CMD_PRI_LOW, dpcon_id, &dpcon_node->token);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR("Unable to open dpcon device: err(%d)",
+ ret);
+ rte_free(dpcon_node);
+ return -1;
+ }
+
+ /* Get the device attributes */
+ ret = dpcon_get_attributes(&dpcon_node->dpcon,
+ CMD_PRI_LOW, dpcon_node->token, &attr);
+ if (ret != 0) {
+ DPAA2_EVENTDEV_ERR("dpcon attribute fetch failed: err(%d)",
+ ret);
+ rte_free(dpcon_node);
+ return -1;
+ }
+
+ /* Updating device specific private information*/
+ dpcon_node->qbman_ch_id = attr.qbman_ch_id;
+ dpcon_node->num_priorities = attr.num_priorities;
+ dpcon_node->dpcon_id = dpcon_id;
+ rte_atomic16_init(&dpcon_node->in_use);
+
+ TAILQ_INSERT_TAIL(&dpcon_dev_list, dpcon_node, next);
+
+ return 0;
+}
+
+struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void)
+{
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+
+ /* Get DPCON dev handle from list using index */
+ TAILQ_FOREACH(dpcon_dev, &dpcon_dev_list, next) {
+ if (dpcon_dev && rte_atomic16_test_and_set(&dpcon_dev->in_use))
+ break;
+ }
+
+ return dpcon_dev;
+}
+
+void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon)
+{
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+
+ /* Match DPCON handle and mark it free */
+ TAILQ_FOREACH(dpcon_dev, &dpcon_dev_list, next) {
+ if (dpcon_dev == dpcon) {
+ rte_atomic16_dec(&dpcon_dev->in_use);
+ return;
+ }
+ }
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpcon_obj = {
+ .dev_type = DPAA2_CON,
+ .create = rte_dpaa2_create_dpcon_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpcon, rte_dpaa2_dpcon_obj);
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/meson.build b/src/spdk/dpdk/drivers/event/dpaa2/meson.build
new file mode 100644
index 00000000..de7a4615
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += ['bus_vdev', 'pmd_dpaa2']
+sources = files('dpaa2_hw_dpcon.c',
+ 'dpaa2_eventdev.c')
+
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map b/src/spdk/dpdk/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map
new file mode 100644
index 00000000..1c0b7559
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map
@@ -0,0 +1,3 @@
+DPDK_17.08 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/event/meson.build b/src/spdk/dpdk/drivers/event/meson.build
new file mode 100644
index 00000000..e9511993
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['dpaa', 'dpaa2', 'octeontx', 'skeleton', 'sw']
+std_deps = ['eventdev', 'kvargs']
+config_flag_fmt = 'RTE_LIBRTE_@0@_EVENTDEV_PMD'
+driver_name_fmt = 'rte_pmd_@0@_event'
diff --git a/src/spdk/dpdk/drivers/event/octeontx/Makefile b/src/spdk/dpdk/drivers/event/octeontx/Makefile
new file mode 100644
index 00000000..90ad2217
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/Makefile
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_octeontx_ssovf.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
+CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_common_octeontx -lrte_pmd_octeontx
+LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_mbuf -lrte_kvargs
+LDLIBS += -lrte_bus_vdev
+
+EXPORT_MAP := rte_pmd_octeontx_event_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev_selftest.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_probe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_probe.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays
+CFLAGS_timvf_worker.o += -fno-prefetch-loop-arrays
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_ssovf_worker.o += -Ofast
+CFLAGS_timvf_worker.o += -Ofast
+else
+CFLAGS_ssovf_worker.o += -O3 -ffast-math
+CFLAGS_timvf_worker.o += -O3 -ffast-math
+endif
+
+else
+CFLAGS_ssovf_worker.o += -Ofast
+CFLAGS_timvf_worker.o += -Ofast
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/event/octeontx/meson.build b/src/spdk/dpdk/drivers/event/octeontx/meson.build
new file mode 100644
index 00000000..04185533
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+sources = files('ssovf_worker.c',
+ 'ssovf_evdev.c',
+ 'ssovf_evdev_selftest.c',
+ 'ssovf_probe.c',
+ 'timvf_worker.c',
+ 'timvf_evdev.c',
+ 'timvf_probe.c'
+)
+
+allow_experimental_apis = true
+deps += ['common_octeontx', 'mempool_octeontx', 'bus_vdev', 'pmd_octeontx']
diff --git a/src/spdk/dpdk/drivers/event/octeontx/rte_pmd_octeontx_event_version.map b/src/spdk/dpdk/drivers/event/octeontx/rte_pmd_octeontx_event_version.map
new file mode 100644
index 00000000..5352e7e3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/rte_pmd_octeontx_event_version.map
@@ -0,0 +1,3 @@
+DPDK_17.05 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/event/octeontx/ssovf_evdev.c b/src/spdk/dpdk/drivers/event/octeontx/ssovf_evdev.c
new file mode 100644
index 00000000..16a3a04b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/ssovf_evdev.c
@@ -0,0 +1,763 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_ethdev_driver.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_vdev.h>
+
+#include "ssovf_evdev.h"
+#include "timvf_evdev.h"
+
+int otx_logtype_ssovf;
+static uint8_t timvf_enable_stats;
+
+RTE_INIT(otx_ssovf_init_log)
+{
+ otx_logtype_ssovf = rte_log_register("pmd.event.octeontx");
+ if (otx_logtype_ssovf >= 0)
+ rte_log_set_level(otx_logtype_ssovf, RTE_LOG_NOTICE);
+}
+
+/* SSOPF Mailbox messages */
+
+struct ssovf_mbox_dev_info {
+ uint64_t min_deq_timeout_ns;
+ uint64_t max_deq_timeout_ns;
+ uint32_t max_num_events;
+};
+
+static int
+ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ uint16_t len = sizeof(struct ssovf_mbox_dev_info);
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_GET_DEV_INFO;
+ hdr.vfid = 0;
+
+ memset(info, 0, len);
+ return octeontx_mbox_send(&hdr, NULL, 0, info, len);
+}
+
+struct ssovf_mbox_getwork_wait {
+ uint64_t wait_ns;
+};
+
+static int
+ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ struct ssovf_mbox_getwork_wait tmo_set;
+ uint16_t len = sizeof(struct ssovf_mbox_getwork_wait);
+ int ret;
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_SET_GETWORK_WAIT;
+ hdr.vfid = 0;
+
+ tmo_set.wait_ns = timeout_ns;
+ ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0);
+ if (ret)
+ ssovf_log_err("Failed to set getwork timeout(%d)", ret);
+
+ return ret;
+}
+
+struct ssovf_mbox_grp_pri {
+ uint8_t wgt_left; /* Read only */
+ uint8_t weight;
+ uint8_t affinity;
+ uint8_t priority;
+};
+
+static int
+ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ struct ssovf_mbox_grp_pri grp;
+ uint16_t len = sizeof(struct ssovf_mbox_grp_pri);
+ int ret;
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_GRP_SET_PRIORITY;
+ hdr.vfid = queue;
+
+ grp.weight = 0xff;
+ grp.affinity = 0xff;
+ grp.priority = prio / 32; /* Normalize to 0 to 7 */
+
+ ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0);
+ if (ret)
+ ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio);
+
+ return ret;
+}
+
+struct ssovf_mbox_convert_ns_getworks_iter {
+ uint64_t wait_ns;
+ uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */
+};
+
+static int
+ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ struct ssovf_mbox_convert_ns_getworks_iter ns2iter;
+ uint16_t len = sizeof(ns2iter);
+ int ret;
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_CONVERT_NS_GETWORK_ITER;
+ hdr.vfid = 0;
+
+ memset(&ns2iter, 0, len);
+ ns2iter.wait_ns = ns;
+ ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
+ if (ret < 0 || (ret != len)) {
+ ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns);
+ return -EIO;
+ }
+
+ *tmo_ticks = ns2iter.getwork_iter;
+ return 0;
+}
+
+static void
+ssovf_fastpath_fns_set(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ dev->enqueue = ssows_enq;
+ dev->enqueue_burst = ssows_enq_burst;
+ dev->enqueue_new_burst = ssows_enq_new_burst;
+ dev->enqueue_forward_burst = ssows_enq_fwd_burst;
+ dev->dequeue = ssows_deq;
+ dev->dequeue_burst = ssows_deq_burst;
+
+ if (edev->is_timeout_deq) {
+ dev->dequeue = ssows_deq_timeout;
+ dev->dequeue_burst = ssows_deq_timeout_burst;
+ }
+}
+
+static void
+ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD);
+ dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
+ dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
+ dev_info->max_event_queues = edev->max_event_queues;
+ dev_info->max_event_queue_flows = (1ULL << 20);
+ dev_info->max_event_queue_priority_levels = 8;
+ dev_info->max_event_priority_levels = 1;
+ dev_info->max_event_ports = edev->max_event_ports;
+ dev_info->max_event_port_dequeue_depth = 1;
+ dev_info->max_event_port_enqueue_depth = 1;
+ dev_info->max_num_events = edev->max_num_events;
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES|
+ RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+
+}
+
+static int
+ssovf_configure(const struct rte_eventdev *dev)
+{
+ struct rte_event_dev_config *conf = &dev->data->dev_conf;
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint64_t deq_tmo_ns;
+
+ ssovf_func_trace();
+ deq_tmo_ns = conf->dequeue_timeout_ns;
+ if (deq_tmo_ns == 0)
+ deq_tmo_ns = edev->min_deq_timeout_ns;
+
+ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ edev->is_timeout_deq = 1;
+ deq_tmo_ns = edev->min_deq_timeout_ns;
+ }
+ edev->nb_event_queues = conf->nb_event_queues;
+ edev->nb_event_ports = conf->nb_event_ports;
+
+ return ssovf_mbox_getwork_tmo_set(deq_tmo_ns);
+}
+
+static void
+ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ queue_conf->nb_atomic_flows = (1ULL << 20);
+ queue_conf->nb_atomic_order_sequences = (1ULL << 20);
+ queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static void
+ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static int
+ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ RTE_SET_USED(dev);
+ ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority);
+
+ return ssovf_mbox_priority_set(queue_id, queue_conf->priority);
+}
+
+static void
+ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ RTE_SET_USED(port_id);
+ port_conf->new_event_threshold = edev->max_num_events;
+ port_conf->dequeue_depth = 1;
+ port_conf->enqueue_depth = 1;
+ port_conf->disable_implicit_release = 0;
+}
+
+static void
+ssovf_port_release(void *port)
+{
+ rte_free(port);
+}
+
+static int
+ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct ssows *ws;
+ uint32_t reg_off;
+ uint8_t q;
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ ssovf_func_trace("port=%d", port_id);
+ RTE_SET_USED(port_conf);
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->ports[port_id] != NULL) {
+ ssovf_port_release(dev->data->ports[port_id]);
+ dev->data->ports[port_id] = NULL;
+ }
+
+ /* Allocate event port memory */
+ ws = rte_zmalloc_socket("eventdev ssows",
+ sizeof(struct ssows), RTE_CACHE_LINE_SIZE,
+ dev->data->socket_id);
+ if (ws == NULL) {
+ ssovf_log_err("Failed to alloc memory for port=%d", port_id);
+ return -ENOMEM;
+ }
+
+ ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
+ if (ws->base == NULL) {
+ rte_free(ws);
+ ssovf_log_err("Failed to get hws base addr port=%d", port_id);
+ return -EINVAL;
+ }
+
+ reg_off = SSOW_VHWS_OP_GET_WORK0;
+ reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
+ reg_off |= 1 << 16; /* Wait */
+ ws->getwork = ws->base + reg_off;
+ ws->port = port_id;
+
+ for (q = 0; q < edev->nb_event_queues; q++) {
+ ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
+ if (ws->grps[q] == NULL) {
+ rte_free(ws);
+ ssovf_log_err("Failed to get grp%d base addr", q);
+ return -EINVAL;
+ }
+ }
+
+ dev->data->ports[port_id] = ws;
+ ssovf_log_dbg("port=%d ws=%p", port_id, ws);
+ return 0;
+}
+
+static int
+ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
+ const uint8_t priorities[], uint16_t nb_links)
+{
+ uint16_t link;
+ uint64_t val;
+ struct ssows *ws = port;
+
+ ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links);
+ RTE_SET_USED(dev);
+ RTE_SET_USED(priorities);
+
+ for (link = 0; link < nb_links; link++) {
+ val = queues[link];
+ val |= (1ULL << 24); /* Set membership */
+ ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
+ }
+ return (int)nb_links;
+}
+
+static int
+ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
+ uint16_t nb_unlinks)
+{
+ uint16_t unlink;
+ uint64_t val;
+ struct ssows *ws = port;
+
+ ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks);
+ RTE_SET_USED(dev);
+
+ for (unlink = 0; unlink < nb_unlinks; unlink++) {
+ val = queues[unlink];
+ val &= ~(1ULL << 24); /* Clear membership */
+ ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
+ }
+ return (int)nb_unlinks;
+}
+
+static int
+ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks)
+{
+ RTE_SET_USED(dev);
+
+ return ssovf_mbox_timeout_ticks(ns, tmo_ticks);
+}
+
+static void
+ssows_dump(struct ssows *ws, FILE *f)
+{
+ uint8_t *base = ws->base;
+ uint64_t val;
+
+ fprintf(f, "\t---------------port%d---------------\n", ws->port);
+ val = ssovf_read64(base + SSOW_VHWS_TAG);
+ fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n",
+ (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
+ (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1,
+ (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff,
+ (int)(val >> 63) & 0x1);
+
+ val = ssovf_read64(base + SSOW_VHWS_WQP);
+ fprintf(f, "\twqp=0x%"PRIx64"\n", val);
+
+ val = ssovf_read64(base + SSOW_VHWS_LINKS);
+ fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n",
+ (int)(val & 0x3ff), (int)(val >> 10) & 0x1,
+ (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1,
+ (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff);
+
+ val = ssovf_read64(base + SSOW_VHWS_PENDTAG);
+ fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n",
+ (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
+ (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1,
+ (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1,
+ (int)(val >> 63) & 0x1);
+
+ val = ssovf_read64(base + SSOW_VHWS_PENDWQP);
+ fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
+}
+
+static int
+ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+ int ret;
+ RTE_SET_USED(dev);
+
+ ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+ if (ret)
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+ else
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
+
+ return 0;
+}
+
+static int
+ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ int ret = 0;
+ const struct octeontx_nic *nic = eth_dev->data->dev_private;
+ pki_mod_qos_t pki_qos;
+ RTE_SET_USED(dev);
+
+ ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+ if (ret)
+ return -EINVAL;
+
+ if (rx_queue_id >= 0)
+ return -EINVAL;
+
+ if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
+ return -ENOTSUP;
+
+ memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
+
+ pki_qos.port_type = 0;
+ pki_qos.index = 0;
+ pki_qos.mmask.f_tag_type = 1;
+ pki_qos.mmask.f_port_add = 1;
+ pki_qos.mmask.f_grp_ok = 1;
+ pki_qos.mmask.f_grp_bad = 1;
+ pki_qos.mmask.f_grptag_ok = 1;
+ pki_qos.mmask.f_grptag_bad = 1;
+
+ pki_qos.tag_type = queue_conf->ev.sched_type;
+ pki_qos.qos_entry.port_add = 0;
+ pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
+ pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
+ pki_qos.qos_entry.grptag_bad = 0;
+ pki_qos.qos_entry.grptag_ok = 0;
+
+ ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
+ if (ret < 0)
+ ssovf_log_err("failed to modify QOS, port=%d, q=%d",
+ nic->port_id, queue_conf->ev.queue_id);
+
+ return ret;
+}
+
+static int
+ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
+{
+ int ret = 0;
+ const struct octeontx_nic *nic = eth_dev->data->dev_private;
+ pki_del_qos_t pki_qos;
+ RTE_SET_USED(dev);
+ RTE_SET_USED(rx_queue_id);
+
+ ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+ if (ret)
+ return -EINVAL;
+
+ pki_qos.port_type = 0;
+ pki_qos.index = 0;
+ memset(&pki_qos, 0, sizeof(pki_del_qos_t));
+ ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
+ if (ret < 0)
+ ssovf_log_err("Failed to delete QOS port=%d, q=%d",
+ nic->port_id, queue_conf->ev.queue_id);
+ return ret;
+}
+
+static int
+ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+
+static int
+ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+static void
+ssovf_dump(struct rte_eventdev *dev, FILE *f)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint8_t port;
+
+ /* Dump SSOWVF debug registers */
+ for (port = 0; port < edev->nb_event_ports; port++)
+ ssows_dump(dev->data->ports[port], f);
+}
+
+static int
+ssovf_start(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ struct ssows *ws;
+ uint8_t *base;
+ uint8_t i;
+
+ ssovf_func_trace();
+ for (i = 0; i < edev->nb_event_ports; i++) {
+ ws = dev->data->ports[i];
+ ssows_reset(ws);
+ ws->swtag_req = 0;
+ }
+
+ for (i = 0; i < edev->nb_event_queues; i++) {
+ /* Consume all the events through HWS0 */
+ ssows_flush_events(dev->data->ports[0], i, NULL, NULL);
+
+ base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base += SSO_VHGRP_QCTL;
+ ssovf_write64(1, base); /* Enable SSO group */
+ }
+
+ ssovf_fastpath_fns_set(dev);
+ return 0;
+}
+
+static void
+ssows_handle_event(void *arg, struct rte_event event)
+{
+ struct rte_eventdev *dev = arg;
+
+ if (dev->dev_ops->dev_stop_flush != NULL)
+ dev->dev_ops->dev_stop_flush(dev->data->dev_id, event,
+ dev->data->dev_stop_flush_arg);
+}
+
+static void
+ssovf_stop(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ struct ssows *ws;
+ uint8_t *base;
+ uint8_t i;
+
+ ssovf_func_trace();
+ for (i = 0; i < edev->nb_event_ports; i++) {
+ ws = dev->data->ports[i];
+ ssows_reset(ws);
+ ws->swtag_req = 0;
+ }
+
+ for (i = 0; i < edev->nb_event_queues; i++) {
+ /* Consume all the events through HWS0 */
+ ssows_flush_events(dev->data->ports[0], i,
+ ssows_handle_event, dev);
+
+ base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base += SSO_VHGRP_QCTL;
+ ssovf_write64(0, base); /* Disable SSO group */
+ }
+}
+
+static int
+ssovf_close(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t i;
+
+ for (i = 0; i < edev->nb_event_queues; i++)
+ all_queues[i] = i;
+
+ for (i = 0; i < edev->nb_event_ports; i++)
+ ssovf_port_unlink(dev, dev->data->ports[i], all_queues,
+ edev->nb_event_queues);
+ return 0;
+}
+
+static int
+ssovf_selftest(const char *key __rte_unused, const char *value,
+ void *opaque)
+{
+ int *flag = opaque;
+ *flag = !!atoi(value);
+ return 0;
+}
+
+static int
+ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps, const struct rte_event_timer_adapter_ops **ops)
+{
+ return timvf_timer_adapter_caps_get(dev, flags, caps, ops,
+ timvf_enable_stats);
+}
+
+/* Initialize and register event driver with DPDK Application */
+static struct rte_eventdev_ops ssovf_ops = {
+ .dev_infos_get = ssovf_info_get,
+ .dev_configure = ssovf_configure,
+ .queue_def_conf = ssovf_queue_def_conf,
+ .queue_setup = ssovf_queue_setup,
+ .queue_release = ssovf_queue_release,
+ .port_def_conf = ssovf_port_def_conf,
+ .port_setup = ssovf_port_setup,
+ .port_release = ssovf_port_release,
+ .port_link = ssovf_port_link,
+ .port_unlink = ssovf_port_unlink,
+ .timeout_ticks = ssovf_timeout_ticks,
+
+ .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get,
+ .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add,
+ .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del,
+ .eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
+ .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
+
+ .timer_adapter_caps_get = ssovf_timvf_caps_get,
+
+ .dev_selftest = test_eventdev_octeontx,
+
+ .dump = ssovf_dump,
+ .dev_start = ssovf_start,
+ .dev_stop = ssovf_stop,
+ .dev_close = ssovf_close
+};
+
+static int
+ssovf_vdev_probe(struct rte_vdev_device *vdev)
+{
+ struct ssovf_info oinfo;
+ struct ssovf_mbox_dev_info info;
+ struct ssovf_evdev *edev;
+ struct rte_eventdev *eventdev;
+ static int ssovf_init_once;
+ const char *name;
+ const char *params;
+ int ret;
+ int selftest = 0;
+
+ static const char *const args[] = {
+ SSOVF_SELFTEST_ARG,
+ TIMVF_ENABLE_STATS_ARG,
+ NULL
+ };
+
+ name = rte_vdev_device_name(vdev);
+ /* More than one instance is not supported */
+ if (ssovf_init_once) {
+ ssovf_log_err("Request to create >1 %s instance", name);
+ return -EINVAL;
+ }
+
+ params = rte_vdev_device_args(vdev);
+ if (params != NULL && params[0] != '\0') {
+ struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+ if (!kvlist) {
+ ssovf_log_info(
+ "Ignoring unsupported params supplied '%s'",
+ name);
+ } else {
+ int ret = rte_kvargs_process(kvlist,
+ SSOVF_SELFTEST_ARG,
+ ssovf_selftest, &selftest);
+ if (ret != 0) {
+ ssovf_log_err("%s: Error in selftest", name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ ret = rte_kvargs_process(kvlist,
+ TIMVF_ENABLE_STATS_ARG,
+ ssovf_selftest, &timvf_enable_stats);
+ if (ret != 0) {
+ ssovf_log_err("%s: Error in timvf stats", name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+ }
+
+ rte_kvargs_free(kvlist);
+ }
+
+ eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
+ rte_socket_id());
+ if (eventdev == NULL) {
+ ssovf_log_err("Failed to create eventdev vdev %s", name);
+ return -ENOMEM;
+ }
+ eventdev->dev_ops = &ssovf_ops;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ ssovf_fastpath_fns_set(eventdev);
+ return 0;
+ }
+
+ ret = ssovf_info(&oinfo);
+ if (ret) {
+ ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
+ goto error;
+ }
+
+ edev = ssovf_pmd_priv(eventdev);
+ edev->max_event_ports = oinfo.total_ssowvfs;
+ edev->max_event_queues = oinfo.total_ssovfs;
+ edev->is_timeout_deq = 0;
+
+ ret = ssovf_mbox_dev_info(&info);
+ if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) {
+ ssovf_log_err("Failed to get mbox devinfo %d", ret);
+ goto error;
+ }
+
+ edev->min_deq_timeout_ns = info.min_deq_timeout_ns;
+ edev->max_deq_timeout_ns = info.max_deq_timeout_ns;
+ edev->max_num_events = info.max_num_events;
+ ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d",
+ info.min_deq_timeout_ns, info.max_deq_timeout_ns,
+ info.max_num_events);
+
+ if (!edev->max_event_ports || !edev->max_event_queues) {
+ ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
+ edev->max_event_queues, edev->max_event_ports);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d",
+ name, oinfo.domain, edev->max_event_queues,
+ edev->max_event_ports);
+
+ ssovf_init_once = 1;
+ if (selftest)
+ test_eventdev_octeontx();
+ return 0;
+
+error:
+ rte_event_pmd_vdev_uninit(name);
+ return ret;
+}
+
+static int
+ssovf_vdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ ssovf_log_info("Closing %s", name);
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_ssovf_pmd = {
+ .probe = ssovf_vdev_probe,
+ .remove = ssovf_vdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd);
diff --git a/src/spdk/dpdk/drivers/event/octeontx/ssovf_evdev.h b/src/spdk/dpdk/drivers/event/octeontx/ssovf_evdev.h
new file mode 100644
index 00000000..18293e96
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/ssovf_evdev.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __SSOVF_EVDEV_H__
+#define __SSOVF_EVDEV_H__
+
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_io.h>
+
+#include <octeontx_mbox.h>
+#include <octeontx_ethdev.h>
+
+#define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
+
+#define SSOVF_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, otx_logtype_ssovf, \
+ "[%s] %s() " fmt "\n", \
+ RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+
+#define ssovf_log_info(fmt, ...) SSOVF_LOG(INFO, fmt, ##__VA_ARGS__)
+#define ssovf_log_dbg(fmt, ...) SSOVF_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define ssovf_log_err(fmt, ...) SSOVF_LOG(ERR, fmt, ##__VA_ARGS__)
+#define ssovf_func_trace ssovf_log_dbg
+#define ssovf_log_selftest ssovf_log_info
+
+#define SSO_MAX_VHGRP (64)
+#define SSO_MAX_VHWS (32)
+
+/* SSO VF register offsets */
+#define SSO_VHGRP_QCTL (0x010ULL)
+#define SSO_VHGRP_INT (0x100ULL)
+#define SSO_VHGRP_INT_W1S (0x108ULL)
+#define SSO_VHGRP_INT_ENA_W1S (0x110ULL)
+#define SSO_VHGRP_INT_ENA_W1C (0x118ULL)
+#define SSO_VHGRP_INT_THR (0x140ULL)
+#define SSO_VHGRP_INT_CNT (0x180ULL)
+#define SSO_VHGRP_XAQ_CNT (0x1B0ULL)
+#define SSO_VHGRP_AQ_CNT (0x1C0ULL)
+#define SSO_VHGRP_AQ_THR (0x1E0ULL)
+
+/* BAR2 */
+#define SSO_VHGRP_OP_ADD_WORK0 (0x00ULL)
+#define SSO_VHGRP_OP_ADD_WORK1 (0x08ULL)
+
+/* SSOW VF register offsets (BAR0) */
+#define SSOW_VHWS_GRPMSK_CHGX(x) (0x080ULL | ((x) << 3))
+#define SSOW_VHWS_TAG (0x300ULL)
+#define SSOW_VHWS_WQP (0x308ULL)
+#define SSOW_VHWS_LINKS (0x310ULL)
+#define SSOW_VHWS_PENDTAG (0x340ULL)
+#define SSOW_VHWS_PENDWQP (0x348ULL)
+#define SSOW_VHWS_SWTP (0x400ULL)
+#define SSOW_VHWS_OP_ALLOC_WE (0x410ULL)
+#define SSOW_VHWS_OP_UPD_WQP_GRP0 (0x440ULL)
+#define SSOW_VHWS_OP_UPD_WQP_GRP1 (0x448ULL)
+#define SSOW_VHWS_OP_SWTAG_UNTAG (0x490ULL)
+#define SSOW_VHWS_OP_SWTAG_CLR (0x820ULL)
+#define SSOW_VHWS_OP_DESCHED (0x860ULL)
+#define SSOW_VHWS_OP_DESCHED_NOSCH (0x870ULL)
+#define SSOW_VHWS_OP_SWTAG_DESCHED (0x8C0ULL)
+#define SSOW_VHWS_OP_SWTAG_NOSCHED (0x8D0ULL)
+#define SSOW_VHWS_OP_SWTP_SET (0xC20ULL)
+#define SSOW_VHWS_OP_SWTAG_NORM (0xC80ULL)
+#define SSOW_VHWS_OP_SWTAG_FULL0 (0xCA0UL)
+#define SSOW_VHWS_OP_SWTAG_FULL1 (0xCA8ULL)
+#define SSOW_VHWS_OP_CLR_NSCHED (0x10000ULL)
+#define SSOW_VHWS_OP_GET_WORK0 (0x80000ULL)
+#define SSOW_VHWS_OP_GET_WORK1 (0x80008ULL)
+
+/* Mailbox message constants */
+#define SSO_COPROC 0x2
+
+#define SSO_GETDOMAINCFG 0x1
+#define SSO_IDENTIFY 0x2
+#define SSO_GET_DEV_INFO 0x3
+#define SSO_GET_GETWORK_WAIT 0x4
+#define SSO_SET_GETWORK_WAIT 0x5
+#define SSO_CONVERT_NS_GETWORK_ITER 0x6
+#define SSO_GRP_GET_PRIORITY 0x7
+#define SSO_GRP_SET_PRIORITY 0x8
+
+#define SSOVF_SELFTEST_ARG ("selftest")
+
+/*
+ * In Cavium OcteonTX SoC, all accesses to the device registers are
+ * implictly strongly ordered. So, The relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define ssovf_read64 rte_read64_relaxed
+#define ssovf_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define ssovf_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define ssovf_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1]]" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define ssovf_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64(addr); \
+ val1 = rte_read64(((uint8_t *)addr) + 8); \
+} while (0)
+
+#define ssovf_store_pair(val0, val1, addr) \
+do { \
+ rte_write64(val0, addr); \
+ rte_write64(val1, (((uint8_t *)addr) + 8)); \
+} while (0)
+#endif
+
+struct ssovf_info {
+ uint16_t domain; /* Domain id */
+ uint8_t total_ssovfs; /* Total sso groups available in domain */
+ uint8_t total_ssowvfs;/* Total sso hws available in domain */
+};
+
+enum ssovf_type {
+ OCTEONTX_SSO_GROUP, /* SSO group vf */
+ OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */
+};
+
+struct ssovf_evdev {
+ uint8_t max_event_queues;
+ uint8_t max_event_ports;
+ uint8_t is_timeout_deq;
+ uint8_t nb_event_queues;
+ uint8_t nb_event_ports;
+ uint32_t min_deq_timeout_ns;
+ uint32_t max_deq_timeout_ns;
+ int32_t max_num_events;
+} __rte_cache_aligned;
+
+/* Event port aka HWS */
+struct ssows {
+ uint8_t cur_tt;
+ uint8_t cur_grp;
+ uint8_t swtag_req;
+ uint8_t *base;
+ uint8_t *getwork;
+ uint8_t *grps[SSO_MAX_VHGRP];
+ uint8_t port;
+} __rte_cache_aligned;
+
+static inline struct ssovf_evdev *
+ssovf_pmd_priv(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+extern int otx_logtype_ssovf;
+
+uint16_t ssows_enq(void *port, const struct rte_event *ev);
+uint16_t ssows_enq_burst(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_enq_new_burst(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_enq_fwd_burst(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks);
+uint16_t ssows_deq_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks);
+
+typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev);
+void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
+ ssows_handle_event_t fn, void *arg);
+void ssows_reset(struct ssows *ws);
+int ssovf_info(struct ssovf_info *info);
+void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar);
+int test_eventdev_octeontx(void);
+
+#endif /* __SSOVF_EVDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/event/octeontx/ssovf_evdev_selftest.c b/src/spdk/dpdk/drivers/event/octeontx/ssovf_evdev_selftest.c
new file mode 100644
index 00000000..239362fc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/ssovf_evdev_selftest.c
@@ -0,0 +1,1523 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_random.h>
+#include <rte_bus_vdev.h>
+#include <rte_test.h>
+
+#include "ssovf_evdev.h"
+
+#define NUM_PACKETS (1 << 18)
+#define MAX_EVENTS (16 * 1024)
+
+#define OCTEONTX_TEST_RUN(setup, teardown, test) \
+ octeontx_test_run(setup, teardown, test, #test)
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int evdev;
+static struct rte_mempool *eventdev_test_mempool;
+
+struct event_attr {
+ uint32_t flow_id;
+ uint8_t event_type;
+ uint8_t sub_event_type;
+ uint8_t sched_type;
+ uint8_t queue;
+ uint8_t port;
+};
+
+static uint32_t seqn_list_index;
+static int seqn_list[NUM_PACKETS];
+
+static inline void
+seqn_list_init(void)
+{
+ RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
+ memset(seqn_list, 0, sizeof(seqn_list));
+ seqn_list_index = 0;
+}
+
+static inline int
+seqn_list_update(int val)
+{
+ if (seqn_list_index >= NUM_PACKETS)
+ return -1;
+
+ seqn_list[seqn_list_index++] = val;
+ rte_smp_wmb();
+ return 0;
+}
+
+static inline int
+seqn_list_check(int limit)
+{
+ int i;
+
+ for (i = 0; i < limit; i++) {
+ if (seqn_list[i] != i) {
+ ssovf_log_dbg("Seqn mismatch %d %d", seqn_list[i], i);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+struct test_core_param {
+ rte_atomic32_t *total_events;
+ uint64_t dequeue_tmo_ticks;
+ uint8_t port;
+ uint8_t sched_type;
+};
+
+static int
+testsuite_setup(void)
+{
+ const char *eventdev_name = "event_octeontx";
+
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ ssovf_log_dbg("%d: Eventdev %s not found - creating.",
+ __LINE__, eventdev_name);
+ if (rte_vdev_init(eventdev_name, NULL) < 0) {
+ ssovf_log_dbg("Error creating eventdev %s",
+ eventdev_name);
+ return -1;
+ }
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ ssovf_log_dbg("Error finding newly created eventdev");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+ rte_event_dev_close(evdev);
+}
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+ dev_conf->nb_event_ports = info->max_event_ports;
+ dev_conf->nb_event_queues = info->max_event_queues;
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+enum {
+ TEST_EVENTDEV_SETUP_DEFAULT,
+ TEST_EVENTDEV_SETUP_PRIORITY,
+ TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
+};
+
+static inline int
+_eventdev_setup(int mode)
+{
+ int i, ret;
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_dev_info info;
+ const char *pool_name = "evdev_octeontx_test_pool";
+
+ /* Create and destrory pool for each test case to make it standalone */
+ eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
+ MAX_EVENTS,
+ 0 /*MBUF_CACHE_SIZE*/,
+ 0,
+ 512, /* Use very small mbufs */
+ rte_socket_id());
+ if (!eventdev_test_mempool) {
+ ssovf_log_dbg("ERROR creating mempool");
+ return -1;
+ }
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
+ "ERROR max_num_events=%d < max_events=%d",
+ info.max_num_events, MAX_EVENTS);
+
+ devconf_set_default_sane_values(&dev_conf, &info);
+ if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
+ dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
+
+ ret = rte_event_dev_configure(evdev, &dev_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
+ if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
+ if (queue_count > 8) {
+ ssovf_log_dbg(
+ "test expects the unique priority per queue");
+ return -ENOTSUP;
+ }
+
+ /* Configure event queues(0 to n) with
+ * RTE_EVENT_DEV_PRIORITY_HIGHEST to
+ * RTE_EVENT_DEV_PRIORITY_LOWEST
+ */
+ uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
+ queue_count;
+ for (i = 0; i < (int)queue_count; i++) {
+ struct rte_event_queue_conf queue_conf;
+
+ ret = rte_event_queue_default_conf_get(evdev, i,
+ &queue_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
+ i);
+ queue_conf.priority = i * step;
+ ret = rte_event_queue_setup(evdev, i, &queue_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+ i);
+ }
+
+ } else {
+ /* Configure event queues with default priority */
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(evdev, i, NULL);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+ i);
+ }
+ }
+ /* Configure event ports */
+ uint32_t port_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+ for (i = 0; i < (int)port_count; i++) {
+ ret = rte_event_port_setup(evdev, i, NULL);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
+ ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
+ RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
+ i);
+ }
+
+ ret = rte_event_dev_start(evdev);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
+
+ return 0;
+}
+
+static inline int
+eventdev_setup(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
+}
+
+static inline int
+eventdev_setup_priority(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
+}
+
+static inline int
+eventdev_setup_dequeue_timeout(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
+}
+
+static inline void
+eventdev_teardown(void)
+{
+ rte_event_dev_stop(evdev);
+ rte_mempool_free(eventdev_test_mempool);
+}
+
+static inline void
+update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
+ uint32_t flow_id, uint8_t event_type,
+ uint8_t sub_event_type, uint8_t sched_type,
+ uint8_t queue, uint8_t port)
+{
+ struct event_attr *attr;
+
+ /* Store the event attributes in mbuf for future reference */
+ attr = rte_pktmbuf_mtod(m, struct event_attr *);
+ attr->flow_id = flow_id;
+ attr->event_type = event_type;
+ attr->sub_event_type = sub_event_type;
+ attr->sched_type = sched_type;
+ attr->queue = queue;
+ attr->port = port;
+
+ ev->flow_id = flow_id;
+ ev->sub_event_type = sub_event_type;
+ ev->event_type = event_type;
+ /* Inject the new event */
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = sched_type;
+ ev->queue_id = queue;
+ ev->mbuf = m;
+}
+
+static inline int
+inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
+ uint8_t sched_type, uint8_t queue, uint8_t port,
+ unsigned int events)
+{
+ struct rte_mbuf *m;
+ unsigned int i;
+
+ for (i = 0; i < events; i++) {
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+ m->seqn = i;
+ update_event_and_validation_attr(m, &ev, flow_id, event_type,
+ sub_event_type, sched_type, queue, port);
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ return 0;
+}
+
+static inline int
+check_excess_events(uint8_t port)
+{
+ int i;
+ uint16_t valid_event;
+ struct rte_event ev;
+
+ /* Check for excess events, try for a few times and exit */
+ for (i = 0; i < 32; i++) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+
+ RTE_TEST_ASSERT_SUCCESS(valid_event,
+ "Unexpected valid event=%d", ev.mbuf->seqn);
+ }
+ return 0;
+}
+
+static inline int
+generate_random_events(const unsigned int total_events)
+{
+ struct rte_event_dev_info info;
+ unsigned int i;
+ int ret;
+
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ for (i = 0; i < total_events; i++) {
+ ret = inject_events(
+ rte_rand() % info.max_event_queue_flows /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ rte_rand() % queue_count /* queue */,
+ 0 /* port */,
+ 1 /* events */);
+ if (ret)
+ return -1;
+ }
+ return ret;
+}
+
+
+static inline int
+validate_event(struct rte_event *ev)
+{
+ struct event_attr *attr;
+
+ attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+ RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
+ "flow_id mismatch enq=%d deq =%d",
+ attr->flow_id, ev->flow_id);
+ RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
+ "event_type mismatch enq=%d deq =%d",
+ attr->event_type, ev->event_type);
+ RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
+ "sub_event_type mismatch enq=%d deq =%d",
+ attr->sub_event_type, ev->sub_event_type);
+ RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
+ "sched_type mismatch enq=%d deq =%d",
+ attr->sched_type, ev->sched_type);
+ RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
+ "queue mismatch enq=%d deq =%d",
+ attr->queue, ev->queue_id);
+ return 0;
+}
+
+typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
+ struct rte_event *ev);
+
+static inline int
+consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
+{
+ int ret;
+ uint16_t valid_event;
+ uint32_t events = 0, forward_progress_cnt = 0, index = 0;
+ struct rte_event ev;
+
+ while (1) {
+ if (++forward_progress_cnt > UINT16_MAX) {
+ ssovf_log_dbg("Detected deadlock");
+ return -1;
+ }
+
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ forward_progress_cnt = 0;
+ ret = validate_event(&ev);
+ if (ret)
+ return -1;
+
+ if (fn != NULL) {
+ ret = fn(index, port, &ev);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to validate test specific event");
+ }
+
+ ++index;
+
+ rte_pktmbuf_free(ev.mbuf);
+ if (++events >= total_events)
+ break;
+ }
+
+ return check_excess_events(port);
+}
+
+static int
+validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+ RTE_SET_USED(port);
+ RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
+ index, ev->mbuf->seqn);
+ return 0;
+}
+
+static inline int
+test_simple_enqdeq(uint8_t sched_type)
+{
+ int ret;
+
+ ret = inject_events(0 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type */,
+ sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ MAX_EVENTS);
+ if (ret)
+ return -1;
+
+ return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
+}
+
+static int
+test_simple_enqdeq_ordered(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_simple_enqdeq_atomic(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_simple_enqdeq_parallel(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. On dequeue, using single event port(port 0) verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_single_port_deq(void)
+{
+ int ret;
+
+ ret = generate_random_events(MAX_EVENTS);
+ if (ret)
+ return -1;
+
+ return consume_events(0 /* port */, MAX_EVENTS, NULL);
+}
+
+/*
+ * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
+ * operation
+ *
+ * For example, Inject 32 events over 0..7 queues
+ * enqueue events 0, 8, 16, 24 in queue 0
+ * enqueue events 1, 9, 17, 25 in queue 1
+ * ..
+ * ..
+ * enqueue events 7, 15, 23, 31 in queue 7
+ *
+ * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
+ * order from queue0(highest priority) to queue7(lowest_priority)
+ */
+static int
+validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ uint32_t range = MAX_EVENTS / queue_count;
+ uint32_t expected_val = (index % range) * queue_count;
+
+ expected_val += ev->queue_id;
+ RTE_SET_USED(port);
+ RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
+ "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+ ev->mbuf->seqn, index, expected_val, range,
+ queue_count, MAX_EVENTS);
+ return 0;
+}
+
+static int
+test_multi_queue_priority(void)
+{
+ uint8_t queue;
+ struct rte_mbuf *m;
+ int i, max_evts_roundoff;
+
+ /* See validate_queue_priority() comments for priority validate logic */
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ max_evts_roundoff = MAX_EVENTS / queue_count;
+ max_evts_roundoff *= queue_count;
+
+ for (i = 0; i < max_evts_roundoff; i++) {
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+ m->seqn = i;
+ queue = i % queue_count;
+ update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
+ 0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
+ rte_event_enqueue_burst(evdev, 0, &ev, 1);
+ }
+
+ return consume_events(0, max_evts_roundoff, validate_queue_priority);
+}
+
+static int
+worker_multi_port_fn(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ rte_atomic32_t *total_events = param->total_events;
+ int ret;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ ret = validate_event(&ev);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ }
+ return 0;
+}
+
+static inline int
+wait_workers_to_join(int lcore, const rte_atomic32_t *count)
+{
+ uint64_t cycles, print_cycles;
+ RTE_SET_USED(count);
+
+ print_cycles = cycles = rte_get_timer_cycles();
+ while (rte_eal_get_lcore_state(lcore) != FINISHED) {
+ uint64_t new_cycles = rte_get_timer_cycles();
+
+ if (new_cycles - print_cycles > rte_get_timer_hz()) {
+ ssovf_log_dbg("\r%s: events %d", __func__,
+ rte_atomic32_read(count));
+ print_cycles = new_cycles;
+ }
+ if (new_cycles - cycles > rte_get_timer_hz() * 10) {
+ ssovf_log_dbg(
+ "%s: No schedules for seconds, deadlock (%d)",
+ __func__,
+ rte_atomic32_read(count));
+ rte_event_dev_dump(evdev, stdout);
+ cycles = new_cycles;
+ return -1;
+ }
+ }
+ rte_eal_mp_wait_lcore();
+ return 0;
+}
+
+
+static inline int
+launch_workers_and_wait(int (*master_worker)(void *),
+ int (*slave_workers)(void *), uint32_t total_events,
+ uint8_t nb_workers, uint8_t sched_type)
+{
+ uint8_t port = 0;
+ int w_lcore;
+ int ret;
+ struct test_core_param *param;
+ rte_atomic32_t atomic_total_events;
+ uint64_t dequeue_tmo_ticks;
+
+ if (!nb_workers)
+ return 0;
+
+ rte_atomic32_set(&atomic_total_events, total_events);
+ seqn_list_init();
+
+ param = malloc(sizeof(struct test_core_param) * nb_workers);
+ if (!param)
+ return -1;
+
+ ret = rte_event_dequeue_timeout_ticks(evdev,
+ rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
+ if (ret) {
+ free(param);
+ return -1;
+ }
+
+ param[0].total_events = &atomic_total_events;
+ param[0].sched_type = sched_type;
+ param[0].port = 0;
+ param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
+ rte_smp_wmb();
+
+ w_lcore = rte_get_next_lcore(
+ /* start core */ -1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+ rte_eal_remote_launch(master_worker, &param[0], w_lcore);
+
+ for (port = 1; port < nb_workers; port++) {
+ param[port].total_events = &atomic_total_events;
+ param[port].sched_type = sched_type;
+ param[port].port = port;
+ param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
+ rte_smp_wmb();
+ w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
+ rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
+ }
+
+ ret = wait_workers_to_join(w_lcore, &atomic_total_events);
+ free(param);
+ return ret;
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. Dequeue the events through multiple ports and verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_multi_port_deq(void)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint32_t nr_ports;
+ int ret;
+
+ ret = generate_random_events(total_events);
+ if (ret)
+ return -1;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
+ nr_ports, rte_lcore_count() - 1);
+ return 0;
+ }
+
+ return launch_workers_and_wait(worker_multi_port_fn,
+ worker_multi_port_fn, total_events,
+ nr_ports, 0xff /* invalid */);
+}
+
+static
+void flush(uint8_t dev_id, struct rte_event event, void *arg)
+{
+ unsigned int *count = arg;
+
+ RTE_SET_USED(dev_id);
+ if (event.event_type == RTE_EVENT_TYPE_CPU)
+ *count = *count + 1;
+
+}
+
+static int
+test_dev_stop_flush(void)
+{
+ unsigned int total_events = MAX_EVENTS, count = 0;
+ int ret;
+
+ ret = generate_random_events(total_events);
+ if (ret)
+ return -1;
+
+ ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
+ if (ret)
+ return -2;
+ rte_event_dev_stop(evdev);
+ ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
+ if (ret)
+ return -3;
+ RTE_TEST_ASSERT_EQUAL(total_events, count,
+ "count mismatch total_events=%d count=%d",
+ total_events, count);
+ return 0;
+}
+
+static int
+validate_queue_to_port_single_link(uint32_t index, uint8_t port,
+ struct rte_event *ev)
+{
+ RTE_SET_USED(index);
+ RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
+ "queue mismatch enq=%d deq =%d",
+ port, ev->queue_id);
+ return 0;
+}
+
+/*
+ * Link queue x to port x and check correctness of link by checking
+ * queue_id == x on dequeue on the specific port x
+ */
+static int
+test_queue_to_port_single_link(void)
+{
+ int i, nr_links, ret;
+
+ uint32_t port_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+
+ /* Unlink all connections that created in eventdev_setup */
+ for (i = 0; i < (int)port_count; i++) {
+ ret = rte_event_port_unlink(evdev, i, NULL, 0);
+ RTE_TEST_ASSERT(ret >= 0,
+ "Failed to unlink all queues port=%d", i);
+ }
+
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
+ nr_links = RTE_MIN(port_count, queue_count);
+ const unsigned int total_events = MAX_EVENTS / nr_links;
+
+ /* Link queue x to port x and inject events to queue x through port x */
+ for (i = 0; i < nr_links; i++) {
+ uint8_t queue = (uint8_t)i;
+
+ ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
+ RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
+
+ ret = inject_events(
+ 0x100 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ queue /* queue */,
+ i /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+ }
+
+ /* Verify the events generated from correct queue */
+ for (i = 0; i < nr_links; i++) {
+ ret = consume_events(i /* port */, total_events,
+ validate_queue_to_port_single_link);
+ if (ret)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
+ struct rte_event *ev)
+{
+ RTE_SET_USED(index);
+ RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
+ "queue mismatch enq=%d deq =%d",
+ port, ev->queue_id);
+ return 0;
+}
+
+/*
+ * Link all even number of queues to port 0 and all odd number of queues to
+ * port 1 and verify the link connection on dequeue
+ */
+static int
+test_queue_to_port_multi_link(void)
+{
+ int ret, port0_events = 0, port1_events = 0;
+ uint8_t queue, port;
+ uint32_t nr_queues = 0;
+ uint32_t nr_ports = 0;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &nr_queues), "Queue count get failed");
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &nr_queues), "Queue count get failed");
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+
+ if (nr_ports < 2) {
+ ssovf_log_dbg("%s: Not enough ports to test ports=%d",
+ __func__, nr_ports);
+ return 0;
+ }
+
+ /* Unlink all connections that created in eventdev_setup */
+ for (port = 0; port < nr_ports; port++) {
+ ret = rte_event_port_unlink(evdev, port, NULL, 0);
+ RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
+ port);
+ }
+
+ const unsigned int total_events = MAX_EVENTS / nr_queues;
+
+ /* Link all even number of queues to port0 and odd numbers to port 1*/
+ for (queue = 0; queue < nr_queues; queue++) {
+ port = queue & 0x1;
+ ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
+ RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
+ queue, port);
+
+ ret = inject_events(
+ 0x100 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ queue /* queue */,
+ port /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+
+ if (port == 0)
+ port0_events += total_events;
+ else
+ port1_events += total_events;
+ }
+
+ ret = consume_events(0 /* port */, port0_events,
+ validate_queue_to_port_multi_link);
+ if (ret)
+ return -1;
+ ret = consume_events(1 /* port */, port1_events,
+ validate_queue_to_port_multi_link);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+static int
+worker_flow_based_pipeline(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint8_t new_sched_type = param->sched_type;
+ rte_atomic32_t *total_events = param->total_events;
+ uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+ dequeue_tmo_ticks);
+ if (!valid_event)
+ continue;
+
+ /* Events from stage 0 */
+ if (ev.sub_event_type == 0) {
+ /* Move to atomic flow to maintain the ordering */
+ ev.flow_id = 0x2;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type = 1; /* stage 1 */
+ ev.sched_type = new_sched_type;
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
+ if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ssovf_log_dbg("Failed to update seqn_list");
+ return -1;
+ }
+ } else {
+ ssovf_log_dbg("Invalid ev.sub_event_type = %d",
+ ev.sub_event_type);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
+test_multiport_flow_sched_type_test(uint8_t in_sched_type,
+ uint8_t out_sched_type)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint32_t nr_ports;
+ int ret;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
+ nr_ports, rte_lcore_count() - 1);
+ return 0;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(
+ 0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ in_sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+
+ ret = launch_workers_and_wait(worker_flow_based_pipeline,
+ worker_flow_based_pipeline,
+ total_events, nr_ports, out_sched_type);
+ if (ret)
+ return -1;
+
+ if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+ out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ /* Check the events order maintained or not */
+ return seqn_list_check(total_events);
+ }
+ return 0;
+}
+
+
+/* Multi port ordered to atomic transaction */
+static int
+test_multi_port_flow_ordered_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_ordered_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_ordered_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_atomic_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_atomic_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_atomic_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_parallel_to_atomic(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_parallel_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_parallel_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_group_based_pipeline(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint8_t new_sched_type = param->sched_type;
+ rte_atomic32_t *total_events = param->total_events;
+ uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+ dequeue_tmo_ticks);
+ if (!valid_event)
+ continue;
+
+ /* Events from stage 0(group 0) */
+ if (ev.queue_id == 0) {
+ /* Move to atomic flow to maintain the ordering */
+ ev.flow_id = 0x2;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sched_type = new_sched_type;
+ ev.queue_id = 1; /* Stage 1*/
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
+ if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ssovf_log_dbg("Failed to update seqn_list");
+ return -1;
+ }
+ } else {
+ ssovf_log_dbg("Invalid ev.queue_id = %d", ev.queue_id);
+ return -1;
+ }
+ }
+
+
+ return 0;
+}
+
+static int
+test_multiport_queue_sched_type_test(uint8_t in_sched_type,
+ uint8_t out_sched_type)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint32_t nr_ports;
+ int ret;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ if (queue_count < 2 || !nr_ports) {
+ ssovf_log_dbg("%s: Not enough queues=%d ports=%d or workers=%d",
+ __func__, queue_count, nr_ports,
+ rte_lcore_count() - 1);
+ return 0;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(
+ 0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ in_sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+
+ ret = launch_workers_and_wait(worker_group_based_pipeline,
+ worker_group_based_pipeline,
+ total_events, nr_ports, out_sched_type);
+ if (ret)
+ return -1;
+
+ if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+ out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ /* Check the events order maintained or not */
+ return seqn_list_check(total_events);
+ }
+ return 0;
+}
+
+static int
+test_multi_port_queue_ordered_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_ordered_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_ordered_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_atomic_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_atomic_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_atomic_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_parallel_to_atomic(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_parallel_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_parallel_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.sub_event_type == 255) { /* last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type++;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+ return 0;
+}
+
+static int
+launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
+{
+ uint32_t nr_ports;
+ int ret;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
+ nr_ports, rte_lcore_count() - 1);
+ return 0;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(
+ 0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
+ 0 /* queue */,
+ 0 /* port */,
+ MAX_EVENTS /* events */);
+ if (ret)
+ return -1;
+
+ return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
+ 0xff /* invalid */);
+}
+
+/* Flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_flow_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_flow_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ uint8_t nr_queues = queue_count;
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.queue_id == nr_queues - 1) { /* last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.queue_id++;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+ return 0;
+}
+
+/* Queue based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_queue_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_queue_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ uint8_t nr_queues = queue_count;
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.queue_id == nr_queues - 1) { /* Last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.queue_id++;
+ ev.sub_event_type = rte_rand() % 256;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+ return 0;
+}
+
+/* Queue and flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_mixed_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_mixed_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_ordered_flow_producer(void *arg)
+{
+ struct test_core_param *param = arg;
+ uint8_t port = param->port;
+ struct rte_mbuf *m;
+ int counter = 0;
+
+ while (counter < NUM_PACKETS) {
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ if (m == NULL)
+ continue;
+
+ m->seqn = counter++;
+
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ ev.flow_id = 0x1; /* Generate a fat flow */
+ ev.sub_event_type = 0;
+ /* Inject the new event */
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+ ev.queue_id = 0;
+ ev.mbuf = m;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+
+ return 0;
+}
+
+static inline int
+test_producer_consumer_ingress_order_test(int (*fn)(void *))
+{
+ uint32_t nr_ports;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (rte_lcore_count() < 3 || nr_ports < 2) {
+ ssovf_log_dbg("### Not enough cores for %s test.", __func__);
+ return 0;
+ }
+
+ launch_workers_and_wait(worker_ordered_flow_producer, fn,
+ NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
+ /* Check the events order maintained or not */
+ return seqn_list_check(NUM_PACKETS);
+}
+
+/* Flow based producer consumer ingress order test */
+static int
+test_flow_producer_consumer_ingress_order_test(void)
+{
+ return test_producer_consumer_ingress_order_test(
+ worker_flow_based_pipeline);
+}
+
+/* Queue based producer consumer ingress order test */
+static int
+test_queue_producer_consumer_ingress_order_test(void)
+{
+ return test_producer_consumer_ingress_order_test(
+ worker_group_based_pipeline);
+}
+
+static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
+ int (*test)(void), const char *name)
+{
+ if (setup() < 0) {
+ ssovf_log_selftest("Error setting up test %s", name);
+ unsupported++;
+ } else {
+ if (test() < 0) {
+ failed++;
+ ssovf_log_selftest("%s Failed", name);
+ } else {
+ passed++;
+ ssovf_log_selftest("%s Passed", name);
+ }
+ }
+
+ total++;
+ tdown();
+}
+
+int
+test_eventdev_octeontx(void)
+{
+ testsuite_setup();
+
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_queue_enq_single_port_deq);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_dev_stop_flush);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_queue_enq_multi_port_deq);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_queue_to_port_single_link);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_queue_to_port_multi_link);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_max_stages_random_sched_type);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_max_stages_random_sched_type);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_mixed_max_stages_random_sched_type);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_flow_producer_consumer_ingress_order_test);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_queue_producer_consumer_ingress_order_test);
+ OCTEONTX_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
+ test_multi_queue_priority);
+ OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
+ test_multi_port_flow_ordered_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
+ test_multi_port_queue_ordered_to_atomic);
+
+ ssovf_log_selftest("Total tests : %d", total);
+ ssovf_log_selftest("Passed : %d", passed);
+ ssovf_log_selftest("Failed : %d", failed);
+ ssovf_log_selftest("Not supported : %d", unsupported);
+
+ testsuite_teardown();
+
+ if (failed)
+ return -1;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/event/octeontx/ssovf_probe.c b/src/spdk/dpdk/drivers/event/octeontx/ssovf_probe.c
new file mode 100644
index 00000000..b3db596d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/ssovf_probe.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+#include "octeontx_mbox.h"
+#include "ssovf_evdev.h"
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B
+#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D
+
+#define SSO_MAX_VHGRP (64)
+#define SSO_MAX_VHWS (32)
+
+#define SSO_VHGRP_AQ_THR (0x1E0ULL)
+
+struct ssovf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+};
+
+struct ssowvf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+ void *bar4;
+};
+
+struct ssowvf_identify {
+ uint16_t domain;
+ uint16_t vfid;
+};
+
+struct ssodev {
+ uint8_t total_ssovfs;
+ uint8_t total_ssowvfs;
+ struct ssovf_res grp[SSO_MAX_VHGRP];
+ struct ssowvf_res hws[SSO_MAX_VHWS];
+};
+
+static struct ssodev sdev;
+
+/* Interface functions */
+int
+ssovf_info(struct ssovf_info *info)
+{
+ uint8_t i;
+ uint16_t domain;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY || info == NULL)
+ return -EINVAL;
+
+ if (sdev.total_ssovfs == 0 || sdev.total_ssowvfs == 0)
+ return -ENODEV;
+
+ domain = sdev.grp[0].domain;
+ for (i = 0; i < sdev.total_ssovfs; i++) {
+ /* Check vfid's are contiguous and belong to same domain */
+ if (sdev.grp[i].vfid != i ||
+ sdev.grp[i].bar0 == NULL ||
+ sdev.grp[i].domain != domain) {
+ mbox_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
+ i, sdev.grp[i].vfid,
+ domain, sdev.grp[i].domain,
+ sdev.grp[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < sdev.total_ssowvfs; i++) {
+ /* Check vfid's are contiguous and belong to same domain */
+ if (sdev.hws[i].vfid != i ||
+ sdev.hws[i].bar0 == NULL ||
+ sdev.hws[i].domain != domain) {
+ mbox_log_err("HWS error, vfid=%d/%d domain=%d/%d %p",
+ i, sdev.hws[i].vfid,
+ domain, sdev.hws[i].domain,
+ sdev.hws[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ info->domain = domain;
+ info->total_ssovfs = sdev.total_ssovfs;
+ info->total_ssowvfs = sdev.total_ssowvfs;
+ return 0;
+}
+
+void*
+ssovf_bar(enum ssovf_type type, uint8_t id, uint8_t bar)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+ type > OCTEONTX_SSO_HWS)
+ return NULL;
+
+ if (type == OCTEONTX_SSO_GROUP) {
+ if (id >= sdev.total_ssovfs)
+ return NULL;
+ } else {
+ if (id >= sdev.total_ssowvfs)
+ return NULL;
+ }
+
+ if (type == OCTEONTX_SSO_GROUP) {
+ switch (bar) {
+ case 0:
+ return sdev.grp[id].bar0;
+ case 2:
+ return sdev.grp[id].bar2;
+ default:
+ return NULL;
+ }
+ } else {
+ switch (bar) {
+ case 0:
+ return sdev.hws[id].bar0;
+ case 2:
+ return sdev.hws[id].bar2;
+ case 4:
+ return sdev.hws[id].bar4;
+ default:
+ return NULL;
+ }
+ }
+}
+
+/* SSOWVF pcie device aka event port probe */
+
+static int
+ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint16_t vfid;
+ struct ssowvf_res *res;
+ struct ssowvf_identify *id;
+ uint8_t *ram_mbox_base;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL ||
+ pci_dev->mem_resource[4].addr == NULL) {
+ mbox_log_err("Empty bars %p %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr,
+ pci_dev->mem_resource[4].addr);
+ return -ENODEV;
+ }
+
+ if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) {
+ mbox_log_err("Bar4 len mismatch %d != %d",
+ SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len);
+ return -EINVAL;
+ }
+
+ id = pci_dev->mem_resource[4].addr;
+ vfid = id->vfid;
+ if (vfid >= SSO_MAX_VHWS) {
+ mbox_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS);
+ return -EINVAL;
+ }
+
+ res = &sdev.hws[vfid];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->bar4 = pci_dev->mem_resource[4].addr;
+ res->domain = id->domain;
+
+ sdev.total_ssowvfs++;
+ if (vfid == 0) {
+ ram_mbox_base = ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
+ if (octeontx_mbox_set_ram_mbox_base(ram_mbox_base)) {
+ mbox_log_err("Invalid Failed to set ram mbox base");
+ return -EINVAL;
+ }
+ }
+
+ rte_wmb();
+ mbox_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain,
+ res->vfid, sdev.total_ssowvfs);
+ return 0;
+}
+
+static const struct rte_pci_id pci_ssowvf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_SSOWS_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_ssowvf = {
+ .id_table = pci_ssowvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ssowvf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_ssowvf, pci_ssowvf);
+
+/* SSOVF pcie device aka event queue probe */
+
+static int
+ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ uint8_t *idreg;
+ struct ssovf_res *res;
+ uint8_t *reg;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL) {
+ mbox_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr);
+ return -ENODEV;
+ }
+ idreg = pci_dev->mem_resource[0].addr;
+ idreg += SSO_VHGRP_AQ_THR;
+ val = rte_read64(idreg);
+
+ /* Write back the default value of aq_thr */
+ rte_write64((1ULL << 33) - 1, idreg);
+ vfid = (val >> 16) & 0xffff;
+ if (vfid >= SSO_MAX_VHGRP) {
+ mbox_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP);
+ return -EINVAL;
+ }
+
+ res = &sdev.grp[vfid];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->domain = val & 0xffff;
+
+ sdev.total_ssovfs++;
+ if (vfid == 0) {
+ reg = ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
+ reg += SSO_VHGRP_PF_MBOX(1);
+ if (octeontx_mbox_set_reg(reg)) {
+ mbox_log_err("Invalid Failed to set mbox_reg");
+ return -EINVAL;
+ }
+ }
+
+ rte_wmb();
+ mbox_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain,
+ res->vfid, sdev.total_ssovfs);
+ return 0;
+}
+
+static const struct rte_pci_id pci_ssovf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_ssovf = {
+ .id_table = pci_ssovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ssovf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_ssovf, pci_ssovf);
diff --git a/src/spdk/dpdk/drivers/event/octeontx/ssovf_worker.c b/src/spdk/dpdk/drivers/event/octeontx/ssovf_worker.c
new file mode 100644
index 00000000..fffa9024
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/ssovf_worker.c
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "ssovf_worker.h"
+
+static __rte_always_inline void
+ssows_new_event(struct ssows *ws, const struct rte_event *ev)
+{
+ const uint64_t event_ptr = ev->u64;
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t new_tt = ev->sched_type;
+ const uint8_t grp = ev->queue_id;
+
+ ssows_add_work(ws, event_ptr, tag, new_tt, grp);
+}
+
+static __rte_always_inline void
+ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
+{
+ const uint8_t cur_tt = ws->cur_tt;
+ const uint8_t new_tt = ev->sched_type;
+ const uint32_t tag = (uint32_t)ev->event;
+ /*
+ * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
+ *
+ * SSO_SYNC_ORDERED norm norm untag
+ * SSO_SYNC_ATOMIC norm norm untag
+ * SSO_SYNC_UNTAGGED full full NOOP
+ */
+ if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
+ if (new_tt != SSO_SYNC_UNTAGGED) {
+ ssows_swtag_full(ws, ev->u64, tag,
+ new_tt, grp);
+ }
+ } else {
+ if (likely(new_tt != SSO_SYNC_UNTAGGED))
+ ssows_swtag_norm(ws, tag, new_tt);
+ else
+ ssows_swtag_untag(ws);
+ }
+ ws->swtag_req = 1;
+}
+
+#define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
+
+static __rte_always_inline void
+ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
+{
+ const uint64_t event_ptr = ev->u64;
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t cur_tt = ws->cur_tt;
+ const uint8_t new_tt = ev->sched_type;
+
+ if (cur_tt == SSO_SYNC_ORDERED) {
+ /* Create unique tag based on custom event type and new grp */
+ uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
+
+ newtag |= grp << 20;
+ newtag |= tag;
+ ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
+ rte_smp_wmb();
+ ssows_swtag_wait(ws);
+ } else {
+ rte_smp_wmb();
+ }
+ ssows_add_work(ws, event_ptr, tag, new_tt, grp);
+}
+
+static __rte_always_inline void
+ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
+{
+ const uint8_t grp = ev->queue_id;
+
+ /* Group hasn't changed, Use SWTAG to forward the event */
+ if (ws->cur_grp == grp)
+ ssows_fwd_swtag(ws, ev, grp);
+ else
+ /*
+ * Group has been changed for group based work pipelining,
+ * Use deschedule/add_work operation to transfer the event to
+ * new group/core
+ */
+ ssows_fwd_group(ws, ev, grp);
+}
+
+static __rte_always_inline void
+ssows_release_event(struct ssows *ws)
+{
+ if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
+ ssows_swtag_untag(ws);
+}
+
+__rte_always_inline uint16_t __hot
+ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+ struct ssows *ws = port;
+
+ RTE_SET_USED(timeout_ticks);
+
+ if (ws->swtag_req) {
+ ws->swtag_req = 0;
+ ssows_swtag_wait(ws);
+ return 1;
+ } else {
+ return ssows_get_work(ws, ev);
+ }
+}
+
+__rte_always_inline uint16_t __hot
+ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+ struct ssows *ws = port;
+ uint64_t iter;
+ uint16_t ret = 1;
+
+ if (ws->swtag_req) {
+ ws->swtag_req = 0;
+ ssows_swtag_wait(ws);
+ } else {
+ ret = ssows_get_work(ws, ev);
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
+ ret = ssows_get_work(ws, ev);
+ }
+ return ret;
+}
+
+uint16_t __hot
+ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
+ uint64_t timeout_ticks)
+{
+ RTE_SET_USED(nb_events);
+
+ return ssows_deq(port, ev, timeout_ticks);
+}
+
+uint16_t __hot
+ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
+ uint64_t timeout_ticks)
+{
+ RTE_SET_USED(nb_events);
+
+ return ssows_deq_timeout(port, ev, timeout_ticks);
+}
+
+__rte_always_inline uint16_t __hot
+ssows_enq(void *port, const struct rte_event *ev)
+{
+ struct ssows *ws = port;
+ uint16_t ret = 1;
+
+ switch (ev->op) {
+ case RTE_EVENT_OP_NEW:
+ rte_smp_wmb();
+ ssows_new_event(ws, ev);
+ break;
+ case RTE_EVENT_OP_FORWARD:
+ ssows_forward_event(ws, ev);
+ break;
+ case RTE_EVENT_OP_RELEASE:
+ ssows_release_event(ws);
+ break;
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+uint16_t __hot
+ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+{
+ RTE_SET_USED(nb_events);
+ return ssows_enq(port, ev);
+}
+
+uint16_t __hot
+ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+{
+ uint16_t i;
+ struct ssows *ws = port;
+
+ rte_smp_wmb();
+ for (i = 0; i < nb_events; i++)
+ ssows_new_event(ws, &ev[i]);
+
+ return nb_events;
+}
+
+uint16_t __hot
+ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+{
+ struct ssows *ws = port;
+ RTE_SET_USED(nb_events);
+
+ ssows_forward_event(ws, ev);
+
+ return 1;
+}
+
+void
+ssows_flush_events(struct ssows *ws, uint8_t queue_id,
+ ssows_handle_event_t fn, void *arg)
+{
+ uint32_t reg_off;
+ struct rte_event ev;
+ uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
+ uint64_t get_work0, get_work1;
+ uint64_t sched_type_queue;
+ uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
+
+ enable = ssovf_read64(base + SSO_VHGRP_QCTL);
+ if (!enable)
+ return;
+
+ reg_off = SSOW_VHWS_OP_GET_WORK0;
+ reg_off |= 1 << 17; /* Grouped */
+ reg_off |= 1 << 16; /* WAIT */
+ reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
+ while (aq_cnt || cq_ds_cnt) {
+ aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
+ cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
+ /* Extract cq and ds count */
+ cq_ds_cnt &= 0x1FFF1FFF0000;
+
+ ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
+
+ sched_type_queue = (get_work0 >> 32) & 0xfff;
+ ws->cur_tt = sched_type_queue & 0x3;
+ ws->cur_grp = sched_type_queue >> 2;
+ sched_type_queue = sched_type_queue << 38;
+ ev.event = sched_type_queue | (get_work0 & 0xffffffff);
+ if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
+ ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
+ (ev.event >> 20) & 0x7F);
+ else
+ ev.u64 = get_work1;
+
+ if (fn != NULL && ev.u64 != 0)
+ fn(arg, ev);
+ }
+}
+
+void
+ssows_reset(struct ssows *ws)
+{
+ uint64_t tag;
+ uint64_t pend_tag;
+ uint8_t pend_tt;
+ uint8_t tt;
+
+ tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
+ pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
+
+ if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
+ pend_tt = (pend_tag >> 32) & 0x3;
+ if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
+ ssows_desched(ws);
+ } else {
+ tt = (tag >> 32) & 0x3;
+ if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
+ ssows_swtag_untag(ws);
+ }
+}
diff --git a/src/spdk/dpdk/drivers/event/octeontx/ssovf_worker.h b/src/spdk/dpdk/drivers/event/octeontx/ssovf_worker.h
new file mode 100644
index 00000000..7c7306b5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/ssovf_worker.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_common.h>
+#include <rte_branch_prediction.h>
+
+#include <octeontx_mbox.h>
+
+#include "ssovf_evdev.h"
+#include "octeontx_rxtx.h"
+
+enum {
+ SSO_SYNC_ORDERED,
+ SSO_SYNC_ATOMIC,
+ SSO_SYNC_UNTAGGED,
+ SSO_SYNC_EMPTY
+};
+
+#ifndef __hot
+#define __hot __attribute__((hot))
+#endif
+
+/* SSO Operations */
+
+static __rte_always_inline struct rte_mbuf *
+ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
+{
+ struct rte_mbuf *mbuf;
+ octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
+
+ /* Get mbuf from wqe */
+ mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
+ OCTTX_PACKET_WQE_SKIP);
+ rte_prefetch_non_temporal(mbuf);
+ mbuf->packet_type =
+ ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
+ mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
+ mbuf->pkt_len = wqe->s.w1.len;
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->nb_segs = 1;
+ mbuf->ol_flags = 0;
+ mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
+ rte_mbuf_refcnt_set(mbuf, 1);
+ return mbuf;
+}
+
+static __rte_always_inline uint16_t
+ssows_get_work(struct ssows *ws, struct rte_event *ev)
+{
+ uint64_t get_work0, get_work1;
+ uint64_t sched_type_queue;
+
+ ssovf_load_pair(get_work0, get_work1, ws->getwork);
+
+ sched_type_queue = (get_work0 >> 32) & 0xfff;
+ ws->cur_tt = sched_type_queue & 0x3;
+ ws->cur_grp = sched_type_queue >> 2;
+ sched_type_queue = sched_type_queue << 38;
+ ev->event = sched_type_queue | (get_work0 & 0xffffffff);
+ if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
+ ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
+ (ev->event >> 20) & 0x7F);
+ } else {
+ ev->u64 = get_work1;
+ }
+
+ return !!get_work1;
+}
+
+static __rte_always_inline void
+ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
+ const uint8_t new_tt, const uint8_t grp)
+{
+ uint64_t add_work0;
+
+ add_work0 = tag | ((uint64_t)(new_tt) << 32);
+ ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
+}
+
+static __rte_always_inline void
+ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
+ const uint8_t new_tt, const uint8_t grp)
+{
+ uint64_t swtag_full0;
+
+ swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
+ ((uint64_t)grp << 34);
+ ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
+ SSOW_VHWS_OP_SWTAG_FULL0));
+}
+
+static __rte_always_inline void
+ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
+{
+ uint64_t val;
+
+ val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
+ ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
+}
+
+static __rte_always_inline void
+ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
+{
+ uint64_t val;
+
+ val = tag | ((uint64_t)(new_tt & 0x3) << 32);
+ ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
+}
+
+static __rte_always_inline void
+ssows_swtag_untag(struct ssows *ws)
+{
+ ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
+ ws->cur_tt = SSO_SYNC_UNTAGGED;
+}
+
+static __rte_always_inline void
+ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
+{
+ ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
+ SSOW_VHWS_OP_UPD_WQP_GRP0));
+}
+
+static __rte_always_inline void
+ssows_desched(struct ssows *ws)
+{
+ ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
+}
+
+static __rte_always_inline void
+ssows_swtag_wait(struct ssows *ws)
+{
+ /* Wait for the SWTAG/SWTAG_FULL operation */
+ while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
+ ;
+}
diff --git a/src/spdk/dpdk/drivers/event/octeontx/timvf_evdev.c b/src/spdk/dpdk/drivers/event/octeontx/timvf_evdev.c
new file mode 100644
index 00000000..abbc9a77
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/timvf_evdev.c
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "timvf_evdev.h"
+
+int otx_logtype_timvf;
+
+RTE_INIT(otx_timvf_init_log)
+{
+ otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
+ if (otx_logtype_timvf >= 0)
+ rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE);
+}
+
+struct __rte_packed timvf_mbox_dev_info {
+ uint64_t ring_active[4];
+ uint64_t clk_freq;
+};
+
+/* Response messages */
+enum {
+ MBOX_RET_SUCCESS,
+ MBOX_RET_INVALID,
+ MBOX_RET_INTERNAL_ERR,
+};
+
+static int
+timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ uint16_t len = sizeof(struct timvf_mbox_dev_info);
+
+ hdr.coproc = TIM_COPROC;
+ hdr.msg = TIM_GET_DEV_INFO;
+ hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
+
+ memset(info, 0, len);
+ return octeontx_mbox_send(&hdr, NULL, 0, info, len);
+}
+
+static void
+timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer_adapter_info *adptr_info)
+{
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ adptr_info->max_tmo_ns = timr->max_tout;
+ adptr_info->min_resolution_ns = timr->tck_nsec;
+ rte_memcpy(&adptr_info->conf, &adptr->data->conf,
+ sizeof(struct rte_event_timer_adapter_conf));
+}
+
+static int
+timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ uint16_t len = sizeof(struct timvf_ctrl_reg);
+ int ret;
+
+ hdr.coproc = TIM_COPROC;
+ hdr.msg = TIM_SET_RING_INFO;
+ hdr.vfid = ring_id;
+
+ ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
+ if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
+ return -EACCES;
+ return 0;
+}
+
+static int
+timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+
+ hdr.coproc = TIM_COPROC;
+ hdr.msg = TIM_RING_START_CYC_GET;
+ hdr.vfid = ring_id;
+ *now = 0;
+ return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
+}
+
+static int
+optimize_bucket_parameters(struct timvf_ring *timr)
+{
+ uint32_t hbkts;
+ uint32_t lbkts;
+ uint64_t tck_nsec;
+
+ hbkts = rte_align32pow2(timr->nb_bkts);
+ tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10);
+
+ if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
+ hbkts = 0;
+
+ lbkts = rte_align32prevpow2(timr->nb_bkts);
+ tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10);
+
+ if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
+ lbkts = 0;
+
+ if (!hbkts && !lbkts)
+ return 0;
+
+ if (!hbkts) {
+ timr->nb_bkts = lbkts;
+ goto end;
+ } else if (!lbkts) {
+ timr->nb_bkts = hbkts;
+ goto end;
+ }
+
+ timr->nb_bkts = (hbkts - timr->nb_bkts) <
+ (timr->nb_bkts - lbkts) ? hbkts : lbkts;
+end:
+ timr->get_target_bkt = bkt_and;
+ timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout /
+ (timr->nb_bkts - 1)), 10);
+ return 1;
+}
+
+static int
+timvf_ring_start(const struct rte_event_timer_adapter *adptr)
+{
+ int ret;
+ uint8_t use_fpa = 0;
+ uint64_t interval;
+ uintptr_t pool;
+ struct timvf_ctrl_reg rctrl;
+ struct timvf_mbox_dev_info dinfo;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ ret = timvf_mbox_dev_info_get(&dinfo);
+ if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
+ return -EINVAL;
+
+ /* Calculate the interval cycles according to clock source. */
+ switch (timr->clk_src) {
+ case TIM_CLK_SRC_SCLK:
+ interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
+ break;
+ case TIM_CLK_SRC_GPIO:
+ /* GPIO doesn't work on tck_nsec. */
+ interval = 0;
+ break;
+ case TIM_CLK_SRC_GTI:
+ interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
+ break;
+ case TIM_CLK_SRC_PTP:
+ interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
+ break;
+ default:
+ timvf_log_err("Unsupported clock source configured %d",
+ timr->clk_src);
+ return -EINVAL;
+ }
+
+ if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
+ use_fpa = 1;
+
+ /*CTRL0 register.*/
+ rctrl.rctrl0 = interval;
+
+ /*CTRL1 register.*/
+ rctrl.rctrl1 = (uint64_t)(timr->clk_src) << 51 |
+ 1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
+ 1ull << 47 /* ENA */ |
+ 1ull << 44 /* ENA_LDWB */ |
+ (timr->nb_bkts - 1);
+
+ rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
+
+ if (use_fpa) {
+ pool = (uintptr_t)((struct rte_mempool *)
+ timr->chunk_pool)->pool_id;
+ ret = octeontx_fpa_bufpool_gaura(pool);
+ if (ret < 0) {
+ timvf_log_dbg("Unable to get gaura id");
+ ret = -ENOMEM;
+ goto error;
+ }
+ timvf_write64((uint64_t)ret,
+ (uint8_t *)timr->vbar0 + TIM_VRING_AURA);
+ } else {
+ rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
+ }
+
+ timvf_write64((uintptr_t)timr->bkt,
+ (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
+ timvf_set_chunk_refill(timr, use_fpa);
+ if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
+ ret = -EACCES;
+ goto error;
+ }
+
+ if (timvf_get_start_cyc(&timr->ring_start_cyc,
+ timr->tim_ring_id) < 0) {
+ ret = -EACCES;
+ goto error;
+ }
+ timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
+ timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
+ timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
+ " maxtmo %"PRIu64"\n",
+ timr->nb_bkts, timr->tck_nsec, interval,
+ timr->max_tout);
+
+ return 0;
+error:
+ rte_free(timr->bkt);
+ rte_mempool_free(timr->chunk_pool);
+ return ret;
+}
+
+static int
+timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
+{
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ struct timvf_ctrl_reg rctrl = {0};
+ rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
+ rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
+ rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
+ rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
+
+ if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
+ return -EACCES;
+ return 0;
+}
+
+static int
+timvf_ring_create(struct rte_event_timer_adapter *adptr)
+{
+ char pool_name[25];
+ int ret;
+ uint64_t nb_timers;
+ struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
+ struct timvf_ring *timr;
+ struct timvf_info tinfo;
+ const char *mempool_ops;
+ unsigned int mp_flags = 0;
+
+ if (timvf_info(&tinfo) < 0)
+ return -ENODEV;
+
+ if (adptr->data->id >= tinfo.total_timvfs)
+ return -ENODEV;
+
+ timr = rte_zmalloc("octeontx_timvf_priv",
+ sizeof(struct timvf_ring), 0);
+ if (timr == NULL)
+ return -ENOMEM;
+
+ adptr->data->adapter_priv = timr;
+ /* Check config parameters. */
+ if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
+ (!rcfg->timer_tick_ns ||
+ rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
+ timvf_log_err("Too low timer ticks");
+ goto cfg_err;
+ }
+
+ timr->clk_src = (int) rcfg->clk_src;
+ timr->tim_ring_id = adptr->data->id;
+ timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
+ timr->max_tout = rcfg->max_tmo_ns;
+ timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
+ timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
+ timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
+ nb_timers = rcfg->nb_timers;
+ timr->get_target_bkt = bkt_mod;
+
+ timr->nb_chunks = nb_timers / nb_chunk_slots;
+
+ /* Try to optimize the bucket parameters. */
+ if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
+ && !rte_is_power_of_2(timr->nb_bkts)) {
+ if (optimize_bucket_parameters(timr)) {
+ timvf_log_info("Optimized configured values");
+ timvf_log_dbg("nb_bkts : %"PRIu32"", timr->nb_bkts);
+ timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec);
+ } else
+ timvf_log_info("Failed to Optimize configured values");
+ }
+
+ if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
+ mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
+ timvf_log_info("Using single producer mode");
+ }
+
+ timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
+ (timr->nb_bkts) * sizeof(struct tim_mem_bucket),
+ 0);
+ if (timr->bkt == NULL)
+ goto mem_err;
+
+ snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d",
+ timr->tim_ring_id);
+ timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
+ timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
+ mp_flags);
+
+ if (!timr->chunk_pool) {
+ rte_free(timr->bkt);
+ timvf_log_err("Unable to create chunkpool.");
+ return -ENOMEM;
+ }
+
+ mempool_ops = rte_mbuf_best_mempool_ops();
+ ret = rte_mempool_set_ops_byname(timr->chunk_pool,
+ mempool_ops, NULL);
+
+ if (ret != 0) {
+ timvf_log_err("Unable to set chunkpool ops.");
+ goto mem_err;
+ }
+
+ ret = rte_mempool_populate_default(timr->chunk_pool);
+ if (ret < 0) {
+ timvf_log_err("Unable to set populate chunkpool.");
+ goto mem_err;
+ }
+ timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
+ timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
+ timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
+ timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
+ timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
+
+ return 0;
+mem_err:
+ rte_free(timr);
+ return -ENOMEM;
+cfg_err:
+ rte_free(timr);
+ return -EINVAL;
+}
+
+static int
+timvf_ring_free(struct rte_event_timer_adapter *adptr)
+{
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ rte_mempool_free(timr->chunk_pool);
+ rte_free(timr->bkt);
+ rte_free(adptr->data->adapter_priv);
+ return 0;
+}
+
+static int
+timvf_stats_get(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats)
+{
+ struct timvf_ring *timr = adapter->data->adapter_priv;
+ uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
+
+ stats->evtim_exp_count = timr->tim_arm_cnt;
+ stats->ev_enq_count = timr->tim_arm_cnt;
+ stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
+ &timr->fast_div);
+ return 0;
+}
+
+static int
+timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
+{
+ struct timvf_ring *timr = adapter->data->adapter_priv;
+
+ timr->tim_arm_cnt = 0;
+ return 0;
+}
+
+static struct rte_event_timer_adapter_ops timvf_ops = {
+ .init = timvf_ring_create,
+ .uninit = timvf_ring_free,
+ .start = timvf_ring_start,
+ .stop = timvf_ring_stop,
+ .get_info = timvf_ring_info_get,
+};
+
+int
+timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
+ uint8_t enable_stats)
+{
+ RTE_SET_USED(dev);
+
+ if (enable_stats) {
+ timvf_ops.stats_get = timvf_stats_get;
+ timvf_ops.stats_reset = timvf_stats_reset;
+ }
+
+ if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT)
+ timvf_ops.arm_burst = enable_stats ?
+ timvf_timer_arm_burst_sp_stats :
+ timvf_timer_arm_burst_sp;
+ else
+ timvf_ops.arm_burst = enable_stats ?
+ timvf_timer_arm_burst_mp_stats :
+ timvf_timer_arm_burst_mp;
+
+ timvf_ops.arm_tmo_tick_burst = enable_stats ?
+ timvf_timer_arm_tmo_brst_stats :
+ timvf_timer_arm_tmo_brst;
+ timvf_ops.cancel_burst = timvf_timer_cancel_burst;
+ *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
+ *ops = &timvf_ops;
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/event/octeontx/timvf_evdev.h b/src/spdk/dpdk/drivers/event/octeontx/timvf_evdev.h
new file mode 100644
index 00000000..0185593f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/timvf_evdev.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __TIMVF_EVDEV_H__
+#define __TIMVF_EVDEV_H__
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_eventdev.h>
+#include <rte_event_timer_adapter.h>
+#include <rte_event_timer_adapter_pmd.h>
+#include <rte_io.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_mempool.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_prefetch.h>
+#include <rte_reciprocal.h>
+
+#include <octeontx_mbox.h>
+#include <octeontx_fpavf.h>
+
+#define timvf_log(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, otx_logtype_timvf, \
+ "[%s] %s() " fmt "\n", \
+ RTE_STR(event_timer_octeontx), __func__, ## args)
+
+#define timvf_log_info(fmt, ...) timvf_log(INFO, fmt, ##__VA_ARGS__)
+#define timvf_log_dbg(fmt, ...) timvf_log(DEBUG, fmt, ##__VA_ARGS__)
+#define timvf_log_err(fmt, ...) timvf_log(ERR, fmt, ##__VA_ARGS__)
+#define timvf_func_trace timvf_log_dbg
+
+#define TIM_COPROC (8)
+#define TIM_GET_DEV_INFO (1)
+#define TIM_GET_RING_INFO (2)
+#define TIM_SET_RING_INFO (3)
+#define TIM_RING_START_CYC_GET (4)
+
+#define TIM_MAX_RINGS (64)
+#define TIM_DEV_PER_NODE (1)
+#define TIM_VF_PER_DEV (64)
+#define TIM_RING_PER_DEV (TIM_VF_PER_DEV)
+#define TIM_RING_NODE_SHIFT (6)
+#define TIM_RING_MASK ((TIM_RING_PER_DEV) - 1)
+#define TIM_RING_INVALID (-1)
+
+#define TIM_MIN_INTERVAL (1E3)
+#define TIM_MAX_INTERVAL ((1ull << 32) - 1)
+#define TIM_MAX_BUCKETS (1ull << 20)
+#define TIM_CHUNK_SIZE (4096)
+#define TIM_MAX_CHUNKS_PER_BUCKET (1ull << 32)
+
+#define TIMVF_MAX_BURST (8)
+
+/* TIM VF Control/Status registers (CSRs): */
+/* VF_BAR0: */
+#define TIM_VF_NRSPERR_INT (0x0)
+#define TIM_VF_NRSPERR_INT_W1S (0x8)
+#define TIM_VF_NRSPERR_ENA_W1C (0x10)
+#define TIM_VF_NRSPERR_ENA_W1S (0x18)
+#define TIM_VRING_FR_RN_CYCLES (0x20)
+#define TIM_VRING_FR_RN_GPIOS (0x28)
+#define TIM_VRING_FR_RN_GTI (0x30)
+#define TIM_VRING_FR_RN_PTP (0x38)
+#define TIM_VRING_CTL0 (0x40)
+#define TIM_VRING_CTL1 (0x50)
+#define TIM_VRING_CTL2 (0x60)
+#define TIM_VRING_BASE (0x100)
+#define TIM_VRING_AURA (0x108)
+#define TIM_VRING_REL (0x110)
+
+#define TIM_CTL1_W0_S_BUCKET 20
+#define TIM_CTL1_W0_M_BUCKET ((1ull << (40 - 20)) - 1)
+
+#define TIM_BUCKET_W1_S_NUM_ENTRIES (0) /*Shift*/
+#define TIM_BUCKET_W1_M_NUM_ENTRIES ((1ull << (32 - 0)) - 1)
+#define TIM_BUCKET_W1_S_SBT (32)
+#define TIM_BUCKET_W1_M_SBT ((1ull << (33 - 32)) - 1)
+#define TIM_BUCKET_W1_S_HBT (33)
+#define TIM_BUCKET_W1_M_HBT ((1ull << (34 - 33)) - 1)
+#define TIM_BUCKET_W1_S_BSK (34)
+#define TIM_BUCKET_W1_M_BSK ((1ull << (35 - 34)) - 1)
+#define TIM_BUCKET_W1_S_LOCK (40)
+#define TIM_BUCKET_W1_M_LOCK ((1ull << (48 - 40)) - 1)
+#define TIM_BUCKET_W1_S_CHUNK_REMAINDER (48)
+#define TIM_BUCKET_W1_M_CHUNK_REMAINDER ((1ull << (64 - 48)) - 1)
+
+#define TIM_BUCKET_SEMA \
+ (TIM_BUCKET_CHUNK_REMAIN)
+
+#define TIM_BUCKET_CHUNK_REMAIN \
+ (TIM_BUCKET_W1_M_CHUNK_REMAINDER << TIM_BUCKET_W1_S_CHUNK_REMAINDER)
+
+#define TIM_BUCKET_LOCK \
+ (TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK)
+
+#define TIM_BUCKET_SEMA_WLOCK \
+ (TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK))
+
+#define NSEC_PER_SEC 1E9
+#define NSEC2CLK(__ns, __freq) (((__ns) * (__freq)) / NSEC_PER_SEC)
+#define CLK2NSEC(__clk, __freq) (((__clk) * NSEC_PER_SEC) / (__freq))
+
+#define timvf_read64 rte_read64_relaxed
+#define timvf_write64 rte_write64_relaxed
+
+#define TIMVF_ENABLE_STATS_ARG ("timvf_stats")
+
+extern int otx_logtype_timvf;
+static const uint16_t nb_chunk_slots = (TIM_CHUNK_SIZE / 16) - 1;
+
+struct timvf_info {
+ uint16_t domain; /* Domain id */
+ uint8_t total_timvfs; /* Total timvf available in domain */
+};
+
+enum timvf_clk_src {
+ TIM_CLK_SRC_SCLK = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
+ TIM_CLK_SRC_GTI = RTE_EVENT_TIMER_ADAPTER_EXT_CLK1,
+ TIM_CLK_SRC_PTP = RTE_EVENT_TIMER_ADAPTER_EXT_CLK2,
+};
+
+/* TIM_MEM_BUCKET */
+struct tim_mem_bucket {
+ uint64_t first_chunk;
+ union {
+ uint64_t w1;
+ struct {
+ uint32_t nb_entry;
+ uint8_t sbt:1;
+ uint8_t hbt:1;
+ uint8_t bsk:1;
+ uint8_t rsvd:5;
+ uint8_t lock;
+ int16_t chunk_remainder;
+ };
+ };
+ uint64_t current_chunk;
+ uint64_t pad;
+} __rte_packed __rte_aligned(8);
+
+struct tim_mem_entry {
+ uint64_t w0;
+ uint64_t wqe;
+} __rte_packed;
+
+struct timvf_ctrl_reg {
+ uint64_t rctrl0;
+ uint64_t rctrl1;
+ uint64_t rctrl2;
+ uint8_t use_pmu;
+} __rte_packed;
+
+struct timvf_ring;
+
+typedef uint32_t (*bkt_id)(const uint32_t bkt_tcks, const uint32_t nb_bkts);
+typedef struct tim_mem_entry * (*refill_chunk)(
+ struct tim_mem_bucket * const bkt,
+ struct timvf_ring * const timr);
+
+struct timvf_ring {
+ bkt_id get_target_bkt;
+ refill_chunk refill_chunk;
+ struct rte_reciprocal_u64 fast_div;
+ uint64_t ring_start_cyc;
+ uint32_t nb_bkts;
+ struct tim_mem_bucket *bkt;
+ void *chunk_pool;
+ uint64_t tck_int;
+ volatile uint64_t tim_arm_cnt;
+ uint64_t tck_nsec;
+ void *vbar0;
+ void *bkt_pos;
+ uint64_t max_tout;
+ uint64_t nb_chunks;
+ enum timvf_clk_src clk_src;
+ uint16_t tim_ring_id;
+} __rte_cache_aligned;
+
+static __rte_always_inline uint32_t
+bkt_mod(const uint32_t rel_bkt, const uint32_t nb_bkts)
+{
+ return rel_bkt % nb_bkts;
+}
+
+static __rte_always_inline uint32_t
+bkt_and(uint32_t rel_bkt, uint32_t nb_bkts)
+{
+ return rel_bkt & (nb_bkts - 1);
+}
+
+int timvf_info(struct timvf_info *tinfo);
+void *timvf_bar(uint8_t id, uint8_t bar);
+int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
+ uint8_t enable_stats);
+uint16_t timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_sp_stats(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_mp_stats(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers);
+uint16_t timvf_timer_arm_tmo_brst_stats(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers);
+void timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa);
+
+#endif /* __TIMVF_EVDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/event/octeontx/timvf_probe.c b/src/spdk/dpdk/drivers/event/octeontx/timvf_probe.c
new file mode 100644
index 00000000..08dbd2be
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/timvf_probe.c
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+#include <octeontx_mbox.h>
+
+#include "ssovf_evdev.h"
+#include "timvf_evdev.h"
+
+#ifndef PCI_VENDOR_ID_CAVIUM
+#define PCI_VENDOR_ID_CAVIUM (0x177D)
+#endif
+
+#define PCI_DEVICE_ID_OCTEONTX_TIM_VF (0xA051)
+#define TIM_MAX_RINGS (64)
+
+struct timvf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+ void *bar4;
+};
+
+struct timdev {
+ uint8_t total_timvfs;
+ struct timvf_res rings[TIM_MAX_RINGS];
+};
+
+static struct timdev tdev;
+
+int
+timvf_info(struct timvf_info *tinfo)
+{
+ int i;
+ struct ssovf_info info;
+
+ if (tinfo == NULL)
+ return -EINVAL;
+
+ if (!tdev.total_timvfs)
+ return -ENODEV;
+
+ if (ssovf_info(&info) < 0)
+ return -EINVAL;
+
+ for (i = 0; i < tdev.total_timvfs; i++) {
+ if (info.domain != tdev.rings[i].domain) {
+ timvf_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
+ i, tdev.rings[i].vfid,
+ info.domain, tdev.rings[i].domain,
+ tdev.rings[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ tinfo->total_timvfs = tdev.total_timvfs;
+ tinfo->domain = info.domain;
+ return 0;
+}
+
+void*
+timvf_bar(uint8_t id, uint8_t bar)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return NULL;
+
+ if (id > tdev.total_timvfs)
+ return NULL;
+
+ switch (bar) {
+ case 0:
+ return tdev.rings[id].bar0;
+ case 4:
+ return tdev.rings[id].bar4;
+ default:
+ return NULL;
+ }
+}
+
+static int
+timvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ struct timvf_res *res;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[4].addr == NULL) {
+ timvf_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[4].addr);
+ return -ENODEV;
+ }
+
+ val = rte_read64((uint8_t *)pci_dev->mem_resource[0].addr +
+ 0x100 /* TIM_VRINGX_BASE */);
+ vfid = (val >> 23) & 0xff;
+ if (vfid >= TIM_MAX_RINGS) {
+ timvf_log_err("Invalid vfid(%d/%d)", vfid, TIM_MAX_RINGS);
+ return -EINVAL;
+ }
+
+ res = &tdev.rings[tdev.total_timvfs];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->bar4 = pci_dev->mem_resource[4].addr;
+ res->domain = (val >> 7) & 0xffff;
+ tdev.total_timvfs++;
+ rte_wmb();
+
+ timvf_log_dbg("Domain=%d VFid=%d bar0 %p total_timvfs=%d", res->domain,
+ res->vfid, pci_dev->mem_resource[0].addr,
+ tdev.total_timvfs);
+ return 0;
+}
+
+
+static const struct rte_pci_id pci_timvf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_TIM_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_timvf = {
+ .id_table = pci_timvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = timvf_probe,
+ .remove = NULL,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_timvf, pci_timvf);
diff --git a/src/spdk/dpdk/drivers/event/octeontx/timvf_worker.c b/src/spdk/dpdk/drivers/event/octeontx/timvf_worker.c
new file mode 100644
index 00000000..50790e19
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/timvf_worker.c
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "timvf_worker.h"
+
+static inline int
+timvf_timer_reg_checks(const struct timvf_ring * const timr,
+ struct rte_event_timer * const tim)
+{
+ if (unlikely(tim->state)) {
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ rte_errno = EALREADY;
+ goto fail;
+ }
+
+ if (unlikely(!tim->timeout_ticks ||
+ tim->timeout_ticks >= timr->nb_bkts)) {
+ tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE
+ : RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ rte_errno = EINVAL;
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static inline void
+timvf_format_event(const struct rte_event_timer * const tim,
+ struct tim_mem_entry * const entry)
+{
+ entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
+ (tim->ev.event & 0xFFFFFFFFF);
+ entry->wqe = tim->ev.u64;
+}
+
+uint16_t
+timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ RTE_SET_USED(adptr);
+ int ret;
+ uint16_t index;
+
+ for (index = 0; index < nb_timers; index++) {
+ if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
+ rte_errno = EALREADY;
+ break;
+ }
+
+ if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
+ rte_errno = EINVAL;
+ break;
+ }
+ ret = timvf_rem_entry(tim[index]);
+ if (ret) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+ return index;
+}
+
+uint16_t
+timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ int ret;
+ uint16_t index;
+ struct tim_mem_entry entry;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ for (index = 0; index < nb_timers; index++) {
+ if (timvf_timer_reg_checks(timr, tim[index]))
+ break;
+
+ timvf_format_event(tim[index], &entry);
+ ret = timvf_add_entry_sp(timr, tim[index]->timeout_ticks,
+ tim[index], &entry);
+ if (unlikely(ret)) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+
+ return index;
+}
+
+uint16_t
+timvf_timer_arm_burst_sp_stats(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ uint16_t ret;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ ret = timvf_timer_arm_burst_sp(adptr, tim, nb_timers);
+ timr->tim_arm_cnt += ret;
+
+ return ret;
+}
+
+uint16_t
+timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ int ret;
+ uint16_t index;
+ struct tim_mem_entry entry;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ for (index = 0; index < nb_timers; index++) {
+ if (timvf_timer_reg_checks(timr, tim[index]))
+ break;
+ timvf_format_event(tim[index], &entry);
+ ret = timvf_add_entry_mp(timr, tim[index]->timeout_ticks,
+ tim[index], &entry);
+ if (unlikely(ret)) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+
+ return index;
+}
+
+uint16_t
+timvf_timer_arm_burst_mp_stats(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ uint16_t ret;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ ret = timvf_timer_arm_burst_mp(adptr, tim, nb_timers);
+ timr->tim_arm_cnt += ret;
+
+ return ret;
+}
+
+uint16_t
+timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers)
+{
+ int ret;
+ uint16_t set_timers = 0;
+ uint16_t idx;
+ uint16_t arr_idx = 0;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ struct tim_mem_entry entry[TIMVF_MAX_BURST] __rte_cache_aligned;
+
+ if (unlikely(!timeout_tick || timeout_tick >= timr->nb_bkts)) {
+ const enum rte_event_timer_state state = timeout_tick ?
+ RTE_EVENT_TIMER_ERROR_TOOLATE :
+ RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ for (idx = 0; idx < nb_timers; idx++)
+ tim[idx]->state = state;
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ while (arr_idx < nb_timers) {
+ for (idx = 0; idx < TIMVF_MAX_BURST && (arr_idx < nb_timers);
+ idx++, arr_idx++) {
+ timvf_format_event(tim[arr_idx], &entry[idx]);
+ }
+ ret = timvf_add_entry_brst(timr, timeout_tick, &tim[set_timers],
+ entry, idx);
+ set_timers += ret;
+ if (ret != idx)
+ break;
+ }
+
+ return set_timers;
+}
+
+
+uint16_t
+timvf_timer_arm_tmo_brst_stats(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers)
+{
+ uint16_t set_timers;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ set_timers = timvf_timer_arm_tmo_brst(adptr, tim, timeout_tick,
+ nb_timers);
+ timr->tim_arm_cnt += set_timers;
+
+ return set_timers;
+}
+
+void
+timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa)
+{
+ if (use_fpa)
+ timr->refill_chunk = timvf_refill_chunk_fpa;
+ else
+ timr->refill_chunk = timvf_refill_chunk_generic;
+}
diff --git a/src/spdk/dpdk/drivers/event/octeontx/timvf_worker.h b/src/spdk/dpdk/drivers/event/octeontx/timvf_worker.h
new file mode 100644
index 00000000..dede1a4a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx/timvf_worker.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_common.h>
+#include <rte_branch_prediction.h>
+
+#include "timvf_evdev.h"
+
+static inline int16_t
+timr_bkt_fetch_rem(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
+ TIM_BUCKET_W1_M_CHUNK_REMAINDER;
+}
+
+static inline int16_t
+timr_bkt_get_rem(struct tim_mem_bucket *bktp)
+{
+ return __atomic_load_n(&bktp->chunk_remainder,
+ __ATOMIC_ACQUIRE);
+}
+
+static inline void
+timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
+{
+ __atomic_store_n(&bktp->chunk_remainder, v,
+ __ATOMIC_RELEASE);
+}
+
+static inline void
+timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
+{
+ __atomic_fetch_sub(&bktp->chunk_remainder, v,
+ __ATOMIC_RELEASE);
+}
+
+static inline uint8_t
+timr_bkt_get_sbt(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT;
+}
+
+static inline uint64_t
+timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
+ return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
+ return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint8_t
+timr_bkt_get_shbt(uint64_t w1)
+{
+ return ((w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT) |
+ ((w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT);
+}
+
+static inline uint8_t
+timr_bkt_get_hbt(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
+}
+
+static inline uint8_t
+timr_bkt_get_bsk(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
+}
+
+static inline uint64_t
+timr_bkt_clr_bsk(struct tim_mem_bucket *bktp)
+{
+ /*Clear everything except lock. */
+ const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
+ return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
+{
+ return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+ __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
+{
+ return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
+ __ATOMIC_RELAXED);
+}
+
+static inline uint64_t
+timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
+ return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline void
+timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
+{
+ __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
+}
+
+static inline uint32_t
+timr_bkt_get_nent(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
+ TIM_BUCKET_W1_M_NUM_ENTRIES;
+}
+
+static inline void
+timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
+{
+ __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+}
+
+static inline void
+timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
+{
+ __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+}
+
+static inline uint64_t
+timr_bkt_clr_nent(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
+ TIM_BUCKET_W1_S_NUM_ENTRIES);
+ return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline struct tim_mem_entry *
+timr_clr_bkt(struct timvf_ring * const timr, struct tim_mem_bucket * const bkt)
+{
+ struct tim_mem_entry *chunk;
+ struct tim_mem_entry *pnext;
+ chunk = ((struct tim_mem_entry *)(uintptr_t)bkt->first_chunk);
+ chunk = (struct tim_mem_entry *)(uintptr_t)(chunk + nb_chunk_slots)->w0;
+
+ while (chunk) {
+ pnext = (struct tim_mem_entry *)(uintptr_t)
+ ((chunk + nb_chunk_slots)->w0);
+ rte_mempool_put(timr->chunk_pool, chunk);
+ chunk = pnext;
+ }
+ return (struct tim_mem_entry *)(uintptr_t)bkt->first_chunk;
+}
+
+static inline int
+timvf_rem_entry(struct rte_event_timer *tim)
+{
+ uint64_t lock_sema;
+ struct tim_mem_entry *entry;
+ struct tim_mem_bucket *bkt;
+ if (tim->impl_opaque[1] == 0 ||
+ tim->impl_opaque[0] == 0)
+ return -ENOENT;
+
+ entry = (struct tim_mem_entry *)(uintptr_t)tim->impl_opaque[0];
+ if (entry->wqe != tim->ev.u64) {
+ tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
+ return -ENOENT;
+ }
+ bkt = (struct tim_mem_bucket *)(uintptr_t)tim->impl_opaque[1];
+ lock_sema = timr_bkt_inc_lock(bkt);
+ if (timr_bkt_get_shbt(lock_sema)
+ || !timr_bkt_get_nent(lock_sema)) {
+ timr_bkt_dec_lock(bkt);
+ tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
+ return -ENOENT;
+ }
+
+ entry->w0 = entry->wqe = 0;
+ timr_bkt_dec_lock(bkt);
+
+ tim->state = RTE_EVENT_TIMER_CANCELED;
+ tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
+ return 0;
+}
+
+static inline struct tim_mem_entry *
+timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
+ struct timvf_ring * const timr)
+{
+ struct tim_mem_entry *chunk;
+
+ if (bkt->nb_entry || !bkt->first_chunk) {
+ if (unlikely(rte_mempool_get(timr->chunk_pool,
+ (void **)&chunk))) {
+ return NULL;
+ }
+ if (bkt->nb_entry) {
+ *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
+ bkt->current_chunk) +
+ nb_chunk_slots) =
+ (uintptr_t) chunk;
+ } else {
+ bkt->first_chunk = (uintptr_t) chunk;
+ }
+ } else {
+ chunk = timr_clr_bkt(timr, bkt);
+ bkt->first_chunk = (uintptr_t)chunk;
+ }
+ *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+
+ return chunk;
+}
+
+static inline struct tim_mem_entry *
+timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,
+ struct timvf_ring * const timr)
+{
+ struct tim_mem_entry *chunk;
+
+ if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk)))
+ return NULL;
+
+ *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+ if (bkt->nb_entry) {
+ *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
+ bkt->current_chunk) +
+ nb_chunk_slots) =
+ (uintptr_t) chunk;
+ } else {
+ bkt->first_chunk = (uintptr_t) chunk;
+ }
+
+ return chunk;
+}
+
+static inline struct tim_mem_bucket *
+timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
+{
+ const uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
+ const uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc,
+ &timr->fast_div) + rel_bkt;
+ const uint32_t tbkt_id = timr->get_target_bkt(bucket,
+ timr->nb_bkts);
+ return &timr->bkt[tbkt_id];
+}
+
+/* Single producer functions. */
+static inline int
+timvf_add_entry_sp(struct timvf_ring * const timr, const uint32_t rel_bkt,
+ struct rte_event_timer * const tim,
+ const struct tim_mem_entry * const pent)
+{
+ int16_t rem;
+ uint64_t lock_sema;
+ struct tim_mem_bucket *bkt;
+ struct tim_mem_entry *chunk;
+
+
+ bkt = timvf_get_target_bucket(timr, rel_bkt);
+__retry:
+ /*Get Bucket sema*/
+ lock_sema = timr_bkt_fetch_sema(bkt);
+ /* Bucket related checks. */
+ if (unlikely(timr_bkt_get_hbt(lock_sema)))
+ goto __retry;
+
+ /* Insert the work. */
+ rem = timr_bkt_fetch_rem(lock_sema);
+
+ if (!rem) {
+ chunk = timr->refill_chunk(bkt, timr);
+ if (unlikely(chunk == NULL)) {
+ timr_bkt_set_rem(bkt, 0);
+ tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ return -ENOMEM;
+ }
+ bkt->current_chunk = (uintptr_t) chunk;
+ timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
+ } else {
+ chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
+ chunk += nb_chunk_slots - rem;
+ }
+ /* Copy work entry. */
+ *chunk = *pent;
+ timr_bkt_inc_nent(bkt);
+
+ tim->impl_opaque[0] = (uintptr_t)chunk;
+ tim->impl_opaque[1] = (uintptr_t)bkt;
+ tim->state = RTE_EVENT_TIMER_ARMED;
+ return 0;
+}
+
+/* Multi producer functions. */
+static inline int
+timvf_add_entry_mp(struct timvf_ring * const timr, const uint32_t rel_bkt,
+ struct rte_event_timer * const tim,
+ const struct tim_mem_entry * const pent)
+{
+ int16_t rem;
+ uint64_t lock_sema;
+ struct tim_mem_bucket *bkt;
+ struct tim_mem_entry *chunk;
+
+__retry:
+ bkt = timvf_get_target_bucket(timr, rel_bkt);
+ /* Bucket related checks. */
+ /*Get Bucket sema*/
+ lock_sema = timr_bkt_fetch_sema_lock(bkt);
+ if (unlikely(timr_bkt_get_shbt(lock_sema))) {
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ rem = timr_bkt_fetch_rem(lock_sema);
+
+ if (rem < 0) {
+ /* goto diff bucket. */
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ } else if (!rem) {
+ /*Only one thread can be here*/
+ chunk = timr->refill_chunk(bkt, timr);
+ if (unlikely(chunk == NULL)) {
+ timr_bkt_set_rem(bkt, 0);
+ timr_bkt_dec_lock(bkt);
+ tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ return -ENOMEM;
+ }
+ bkt->current_chunk = (uintptr_t) chunk;
+ timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
+ } else {
+ chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
+ chunk += nb_chunk_slots - rem;
+ }
+ /* Copy work entry. */
+ *chunk = *pent;
+ timr_bkt_inc_nent(bkt);
+ timr_bkt_dec_lock(bkt);
+
+ tim->impl_opaque[0] = (uintptr_t)chunk;
+ tim->impl_opaque[1] = (uintptr_t)bkt;
+ tim->state = RTE_EVENT_TIMER_ARMED;
+ return 0;
+}
+
+static inline uint16_t
+timvf_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
+ struct tim_mem_entry *chunk,
+ struct rte_event_timer ** const tim,
+ const struct tim_mem_entry * const ents,
+ const struct tim_mem_bucket * const bkt)
+{
+ for (; index < cpy_lmt; index++) {
+ *chunk = *(ents + index);
+ tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
+ tim[index]->impl_opaque[1] = (uintptr_t)bkt;
+ tim[index]->state = RTE_EVENT_TIMER_ARMED;
+ }
+
+ return index;
+}
+
+/* Burst mode functions */
+static inline int
+timvf_add_entry_brst(struct timvf_ring * const timr, const uint16_t rel_bkt,
+ struct rte_event_timer ** const tim,
+ const struct tim_mem_entry *ents,
+ const uint16_t nb_timers)
+{
+ int16_t rem;
+ int16_t crem;
+ uint8_t lock_cnt;
+ uint16_t index = 0;
+ uint16_t chunk_remainder;
+ uint64_t lock_sema;
+ struct tim_mem_bucket *bkt;
+ struct tim_mem_entry *chunk;
+
+__retry:
+ bkt = timvf_get_target_bucket(timr, rel_bkt);
+
+ /* Only one thread beyond this. */
+ lock_sema = timr_bkt_inc_lock(bkt);
+ lock_cnt = (uint8_t)
+ ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
+
+ if (lock_cnt) {
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ /* Bucket related checks. */
+ if (unlikely(timr_bkt_get_hbt(lock_sema))) {
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ chunk_remainder = timr_bkt_fetch_rem(lock_sema);
+ rem = chunk_remainder - nb_timers;
+ if (rem < 0) {
+ crem = nb_chunk_slots - chunk_remainder;
+ if (chunk_remainder && crem) {
+ chunk = ((struct tim_mem_entry *)
+ (uintptr_t)bkt->current_chunk) + crem;
+
+ index = timvf_cpy_wrk(index, chunk_remainder,
+ chunk, tim, ents, bkt);
+ timr_bkt_sub_rem(bkt, chunk_remainder);
+ timr_bkt_add_nent(bkt, chunk_remainder);
+ }
+ rem = nb_timers - chunk_remainder;
+ ents = ents + chunk_remainder;
+
+ chunk = timr->refill_chunk(bkt, timr);
+ if (unlikely(chunk == NULL)) {
+ timr_bkt_dec_lock(bkt);
+ rte_errno = ENOMEM;
+ tim[index]->state = RTE_EVENT_TIMER_ERROR;
+ return crem;
+ }
+ *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+ bkt->current_chunk = (uintptr_t) chunk;
+
+ index = timvf_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
+ timr_bkt_set_rem(bkt, nb_chunk_slots - rem);
+ timr_bkt_add_nent(bkt, rem);
+ } else {
+ chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
+ chunk += (nb_chunk_slots - chunk_remainder);
+
+ index = timvf_cpy_wrk(index, nb_timers,
+ chunk, tim, ents, bkt);
+ timr_bkt_sub_rem(bkt, nb_timers);
+ timr_bkt_add_nent(bkt, nb_timers);
+ }
+
+ timr_bkt_dec_lock(bkt);
+ return nb_timers;
+}
diff --git a/src/spdk/dpdk/drivers/event/opdl/Makefile b/src/spdk/dpdk/drivers/event/opdl/Makefile
new file mode 100644
index 00000000..cea8118d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/opdl/Makefile
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_opdl_event.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+# for older GCC versions, allow us to initialize an event using
+# designated initializers.
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -le 50 && echo 1), 1)
+CFLAGS += -Wno-missing-field-initializers
+endif
+endif
+
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_kvargs
+LDLIBS += -lrte_bus_vdev -lrte_mbuf -lrte_mempool
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_evdev_opdl_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_ring.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_evdev_init.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_evdev_xstats.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_test.c
+
+# export include files
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/event/opdl/opdl_evdev.c b/src/spdk/dpdk/drivers/event/opdl/opdl_evdev.c
new file mode 100644
index 00000000..a4f0bc8b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/opdl/opdl_evdev.c
@@ -0,0 +1,766 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_lcore.h>
+#include <rte_memzone.h>
+#include <rte_kvargs.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+
+#include "opdl_evdev.h"
+#include "opdl_ring.h"
+#include "opdl_log.h"
+
+#define EVENTDEV_NAME_OPDL_PMD event_opdl
+#define NUMA_NODE_ARG "numa_node"
+#define DO_VALIDATION_ARG "do_validation"
+#define DO_TEST_ARG "self_test"
+
+
+static void
+opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
+
+uint16_t
+opdl_event_enqueue_burst(void *port,
+ const struct rte_event ev[],
+ uint16_t num)
+{
+ struct opdl_port *p = port;
+
+ if (unlikely(!p->opdl->data->dev_started))
+ return 0;
+
+
+ /* either rx_enqueue or disclaim*/
+ return p->enq(p, ev, num);
+}
+
+uint16_t
+opdl_event_enqueue(void *port, const struct rte_event *ev)
+{
+ struct opdl_port *p = port;
+
+ if (unlikely(!p->opdl->data->dev_started))
+ return 0;
+
+
+ return p->enq(p, ev, 1);
+}
+
+uint16_t
+opdl_event_dequeue_burst(void *port,
+ struct rte_event *ev,
+ uint16_t num,
+ uint64_t wait)
+{
+ struct opdl_port *p = (void *)port;
+
+ RTE_SET_USED(wait);
+
+ if (unlikely(!p->opdl->data->dev_started))
+ return 0;
+
+ /* This function pointer can point to tx_dequeue or claim*/
+ return p->deq(p, ev, num);
+}
+
+uint16_t
+opdl_event_dequeue(void *port,
+ struct rte_event *ev,
+ uint64_t wait)
+{
+ struct opdl_port *p = (void *)port;
+
+ if (unlikely(!p->opdl->data->dev_started))
+ return 0;
+
+ RTE_SET_USED(wait);
+
+ return p->deq(p, ev, 1);
+}
+
+static int
+opdl_port_link(struct rte_eventdev *dev,
+ void *port,
+ const uint8_t queues[],
+ const uint8_t priorities[],
+ uint16_t num)
+{
+ struct opdl_port *p = port;
+
+ RTE_SET_USED(priorities);
+ RTE_SET_USED(dev);
+
+ if (unlikely(dev->data->dev_started)) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Attempt to link queue (%u) to port %d while device started\n",
+ dev->data->dev_id,
+ queues[0],
+ p->id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ /* Max of 1 queue per port */
+ if (num > 1) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Attempt to link more than one queue (%u) to port %d requested\n",
+ dev->data->dev_id,
+ num,
+ p->id);
+ rte_errno = -EDQUOT;
+ return 0;
+ }
+
+ if (!p->configured) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "port %d not configured, cannot link to %u\n",
+ dev->data->dev_id,
+ p->id,
+ queues[0]);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (p->external_qid != OPDL_INVALID_QID) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "port %d already linked to queue %u, cannot link to %u\n",
+ dev->data->dev_id,
+ p->id,
+ p->external_qid,
+ queues[0]);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ p->external_qid = queues[0];
+
+ return 1;
+}
+
+static int
+opdl_port_unlink(struct rte_eventdev *dev,
+ void *port,
+ uint8_t queues[],
+ uint16_t nb_unlinks)
+{
+ struct opdl_port *p = port;
+
+ RTE_SET_USED(queues);
+ RTE_SET_USED(nb_unlinks);
+
+ if (unlikely(dev->data->dev_started)) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Attempt to unlink queue (%u) to port %d while device started\n",
+ dev->data->dev_id,
+ queues[0],
+ p->id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+ RTE_SET_USED(nb_unlinks);
+
+ /* Port Stuff */
+ p->queue_id = OPDL_INVALID_QID;
+ p->p_type = OPDL_INVALID_PORT;
+ p->external_qid = OPDL_INVALID_QID;
+
+ /* always unlink 0 queue due to statice pipeline */
+ return 0;
+}
+
+static int
+opdl_port_setup(struct rte_eventdev *dev,
+ uint8_t port_id,
+ const struct rte_event_port_conf *conf)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+ struct opdl_port *p = &device->ports[port_id];
+
+ RTE_SET_USED(conf);
+
+ /* Check if port already configured */
+ if (p->configured) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Attempt to setup port %d which is already setup\n",
+ dev->data->dev_id,
+ p->id);
+ return -EDQUOT;
+ }
+
+ *p = (struct opdl_port){0}; /* zero entire structure */
+ p->id = port_id;
+ p->opdl = device;
+ p->queue_id = OPDL_INVALID_QID;
+ p->external_qid = OPDL_INVALID_QID;
+ dev->data->ports[port_id] = p;
+ rte_smp_wmb();
+ p->configured = 1;
+ device->nb_ports++;
+ return 0;
+}
+
+static void
+opdl_port_release(void *port)
+{
+ struct opdl_port *p = (void *)port;
+
+ if (p == NULL ||
+ p->opdl->data->dev_started) {
+ return;
+ }
+
+ p->configured = 0;
+ p->initialized = 0;
+}
+
+static void
+opdl_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+
+ port_conf->new_event_threshold = MAX_OPDL_CONS_Q_DEPTH;
+ port_conf->dequeue_depth = MAX_OPDL_CONS_Q_DEPTH;
+ port_conf->enqueue_depth = MAX_OPDL_CONS_Q_DEPTH;
+}
+
+static int
+opdl_queue_setup(struct rte_eventdev *dev,
+ uint8_t queue_id,
+ const struct rte_event_queue_conf *conf)
+{
+ enum queue_type type;
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ /* Extra sanity check, probably not needed */
+ if (queue_id == OPDL_INVALID_QID) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Invalid queue id %u requested\n",
+ dev->data->dev_id,
+ queue_id);
+ return -EINVAL;
+ }
+
+ if (device->nb_q_md > device->max_queue_nb) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Max number of queues %u exceeded by request %u\n",
+ dev->data->dev_id,
+ device->max_queue_nb,
+ device->nb_q_md);
+ return -EINVAL;
+ }
+
+ if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
+ & conf->event_queue_cfg) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "QUEUE_CFG_ALL_TYPES not supported\n",
+ dev->data->dev_id);
+ return -ENOTSUP;
+ } else if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK
+ & conf->event_queue_cfg) {
+ type = OPDL_Q_TYPE_SINGLE_LINK;
+ } else {
+ switch (conf->schedule_type) {
+ case RTE_SCHED_TYPE_ORDERED:
+ type = OPDL_Q_TYPE_ORDERED;
+ break;
+ case RTE_SCHED_TYPE_ATOMIC:
+ type = OPDL_Q_TYPE_ATOMIC;
+ break;
+ case RTE_SCHED_TYPE_PARALLEL:
+ type = OPDL_Q_TYPE_ORDERED;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Unknown queue type %d requested\n",
+ dev->data->dev_id,
+ conf->event_queue_cfg);
+ return -EINVAL;
+ }
+ }
+ /* Check if queue id has been setup already */
+ uint32_t i;
+ for (i = 0; i < device->nb_q_md; i++) {
+ if (device->q_md[i].ext_id == queue_id) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "queue id %u already setup\n",
+ dev->data->dev_id,
+ queue_id);
+ return -EINVAL;
+ }
+ }
+
+ device->q_md[device->nb_q_md].ext_id = queue_id;
+ device->q_md[device->nb_q_md].type = type;
+ device->q_md[device->nb_q_md].setup = 1;
+ device->nb_q_md++;
+
+ return 1;
+}
+
+static void
+opdl_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ RTE_SET_USED(queue_id);
+
+ if (device->data->dev_started)
+ return;
+
+}
+
+static void
+opdl_queue_def_conf(struct rte_eventdev *dev,
+ uint8_t queue_id,
+ struct rte_event_queue_conf *conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ static const struct rte_event_queue_conf default_conf = {
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1,
+ .event_queue_cfg = 0,
+ .schedule_type = RTE_SCHED_TYPE_ORDERED,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ };
+
+ *conf = default_conf;
+}
+
+
+static int
+opdl_dev_configure(const struct rte_eventdev *dev)
+{
+ struct opdl_evdev *opdl = opdl_pmd_priv(dev);
+ const struct rte_eventdev_data *data = dev->data;
+ const struct rte_event_dev_config *conf = &data->dev_conf;
+
+ opdl->max_queue_nb = conf->nb_event_queues;
+ opdl->max_port_nb = conf->nb_event_ports;
+ opdl->nb_events_limit = conf->nb_events_limit;
+
+ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "DEQUEUE_TIMEOUT not supported\n",
+ dev->data->dev_id);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+static void
+opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
+{
+ RTE_SET_USED(dev);
+
+ static const struct rte_event_dev_info evdev_opdl_info = {
+ .driver_name = OPDL_PMD_NAME,
+ .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
+ .max_event_queue_flows = OPDL_QID_NUM_FIDS,
+ .max_event_queue_priority_levels = OPDL_Q_PRIORITY_MAX,
+ .max_event_priority_levels = OPDL_IQS_MAX,
+ .max_event_ports = OPDL_PORTS_MAX,
+ .max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,
+ .max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
+ .max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
+ .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,
+ };
+
+ *info = evdev_opdl_info;
+}
+
+static void
+opdl_dump(struct rte_eventdev *dev, FILE *f)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return;
+
+ fprintf(f,
+ "\n\n -- RING STATISTICS --\n");
+ uint32_t i;
+ for (i = 0; i < device->nb_opdls; i++)
+ opdl_ring_dump(device->opdl[i], f);
+
+ fprintf(f,
+ "\n\n -- PORT STATISTICS --\n"
+ "Type Port Index Port Id Queue Id Av. Req Size "
+ "Av. Grant Size Av. Cycles PP"
+ " Empty DEQs Non Empty DEQs Pkts Processed\n");
+
+ for (i = 0; i < device->max_port_nb; i++) {
+ char queue_id[64];
+ char total_cyc[64];
+ const char *p_type;
+
+ uint64_t cne, cpg;
+ struct opdl_port *port = &device->ports[i];
+
+ if (port->initialized) {
+ cne = port->port_stat[claim_non_empty];
+ cpg = port->port_stat[claim_pkts_granted];
+ if (port->p_type == OPDL_REGULAR_PORT)
+ p_type = "REG";
+ else if (port->p_type == OPDL_PURE_RX_PORT)
+ p_type = " RX";
+ else if (port->p_type == OPDL_PURE_TX_PORT)
+ p_type = " TX";
+ else if (port->p_type == OPDL_ASYNC_PORT)
+ p_type = "SYNC";
+ else
+ p_type = "????";
+
+ sprintf(queue_id, "%02u", port->external_qid);
+ if (port->p_type == OPDL_REGULAR_PORT ||
+ port->p_type == OPDL_ASYNC_PORT)
+ sprintf(total_cyc,
+ " %'16"PRIu64"",
+ (cpg != 0 ?
+ port->port_stat[total_cycles] / cpg
+ : 0));
+ else
+ sprintf(total_cyc,
+ " ----");
+ fprintf(f,
+ "%4s %10u %8u %9s %'16"PRIu64" %'16"PRIu64" %s "
+ "%'16"PRIu64" %'16"PRIu64" %'16"PRIu64"\n",
+ p_type,
+ i,
+ port->id,
+ (port->external_qid == OPDL_INVALID_QID ? "---"
+ : queue_id),
+ (cne != 0 ?
+ port->port_stat[claim_pkts_requested] / cne
+ : 0),
+ (cne != 0 ?
+ port->port_stat[claim_pkts_granted] / cne
+ : 0),
+ total_cyc,
+ port->port_stat[claim_empty],
+ port->port_stat[claim_non_empty],
+ port->port_stat[claim_pkts_granted]);
+ }
+ }
+ fprintf(f, "\n");
+}
+
+
+static void
+opdl_stop(struct rte_eventdev *dev)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ opdl_xstats_uninit(dev);
+
+ destroy_queues_and_rings(dev);
+
+
+ device->started = 0;
+
+ rte_smp_wmb();
+}
+
+static int
+opdl_start(struct rte_eventdev *dev)
+{
+ int err = 0;
+
+ if (!err)
+ err = create_queues_and_rings(dev);
+
+
+ if (!err)
+ err = assign_internal_queue_ids(dev);
+
+
+ if (!err)
+ err = initialise_queue_zero_ports(dev);
+
+
+ if (!err)
+ err = initialise_all_other_ports(dev);
+
+
+ if (!err)
+ err = check_queues_linked(dev);
+
+
+ if (!err)
+ err = opdl_add_event_handlers(dev);
+
+
+ if (!err)
+ err = build_all_dependencies(dev);
+
+ if (!err) {
+ opdl_xstats_init(dev);
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
+ "SUCCESS : Created %u total queues (%u ex, %u in),"
+ " %u opdls, %u event_dev ports, %u input ports",
+ opdl_pmd_dev_id(device),
+ device->nb_queues,
+ (device->nb_queues - device->nb_opdls),
+ device->nb_opdls,
+ device->nb_opdls,
+ device->nb_ports,
+ device->queue[0].nb_ports);
+ } else
+ opdl_stop(dev);
+
+ return err;
+}
+
+static int
+opdl_close(struct rte_eventdev *dev)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+ uint32_t i;
+
+ for (i = 0; i < device->max_port_nb; i++) {
+ memset(&device->ports[i],
+ 0,
+ sizeof(struct opdl_port));
+ }
+
+ memset(&device->s_md,
+ 0x0,
+ sizeof(struct opdl_stage_meta_data)*OPDL_PORTS_MAX);
+
+ memset(&device->q_md,
+ 0xFF,
+ sizeof(struct opdl_queue_meta_data)*OPDL_MAX_QUEUES);
+
+
+ memset(device->q_map_ex_to_in,
+ 0,
+ sizeof(uint8_t)*OPDL_INVALID_QID);
+
+ opdl_xstats_uninit(dev);
+
+ device->max_port_nb = 0;
+
+ device->max_queue_nb = 0;
+
+ device->nb_opdls = 0;
+
+ device->nb_queues = 0;
+
+ device->nb_ports = 0;
+
+ device->nb_q_md = 0;
+
+ dev->data->nb_queues = 0;
+
+ dev->data->nb_ports = 0;
+
+
+ return 0;
+}
+
+static int
+assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *socket_id = opaque;
+ *socket_id = atoi(value);
+ if (*socket_id >= RTE_MAX_NUMA_NODES)
+ return -1;
+ return 0;
+}
+
+static int
+set_do_validation(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *do_val = opaque;
+ *do_val = atoi(value);
+ if (*do_val != 0)
+ *do_val = 1;
+
+ return 0;
+}
+static int
+set_do_test(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *do_test = opaque;
+
+ *do_test = atoi(value);
+
+ if (*do_test != 0)
+ *do_test = 1;
+ return 0;
+}
+
+static int
+opdl_probe(struct rte_vdev_device *vdev)
+{
+ static struct rte_eventdev_ops evdev_opdl_ops = {
+ .dev_configure = opdl_dev_configure,
+ .dev_infos_get = opdl_info_get,
+ .dev_close = opdl_close,
+ .dev_start = opdl_start,
+ .dev_stop = opdl_stop,
+ .dump = opdl_dump,
+
+ .queue_def_conf = opdl_queue_def_conf,
+ .queue_setup = opdl_queue_setup,
+ .queue_release = opdl_queue_release,
+ .port_def_conf = opdl_port_def_conf,
+ .port_setup = opdl_port_setup,
+ .port_release = opdl_port_release,
+ .port_link = opdl_port_link,
+ .port_unlink = opdl_port_unlink,
+
+
+ .xstats_get = opdl_xstats_get,
+ .xstats_get_names = opdl_xstats_get_names,
+ .xstats_get_by_name = opdl_xstats_get_by_name,
+ .xstats_reset = opdl_xstats_reset,
+ };
+
+ static const char *const args[] = {
+ NUMA_NODE_ARG,
+ DO_VALIDATION_ARG,
+ DO_TEST_ARG,
+ NULL
+ };
+ const char *name;
+ const char *params;
+ struct rte_eventdev *dev;
+ struct opdl_evdev *opdl;
+ int socket_id = rte_socket_id();
+ int do_validation = 0;
+ int do_test = 0;
+ int str_len;
+ int test_result = 0;
+
+ name = rte_vdev_device_name(vdev);
+ params = rte_vdev_device_args(vdev);
+ if (params != NULL && params[0] != '\0') {
+ struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+ if (!kvlist) {
+ PMD_DRV_LOG(INFO,
+ "Ignoring unsupported parameters when creating device '%s'\n",
+ name);
+ } else {
+ int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
+ assign_numa_node, &socket_id);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "%s: Error parsing numa node parameter",
+ name);
+
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ ret = rte_kvargs_process(kvlist, DO_VALIDATION_ARG,
+ set_do_validation, &do_validation);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "%s: Error parsing do validation parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ ret = rte_kvargs_process(kvlist, DO_TEST_ARG,
+ set_do_test, &do_test);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "%s: Error parsing do test parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ rte_kvargs_free(kvlist);
+ }
+ }
+ dev = rte_event_pmd_vdev_init(name,
+ sizeof(struct opdl_evdev), socket_id);
+
+ if (dev == NULL) {
+ PMD_DRV_LOG(ERR, "eventdev vdev init() failed");
+ return -EFAULT;
+ }
+
+ PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
+ "Success - creating eventdev device %s, numa_node:[%d], do_valdation:[%s]"
+ " , self_test:[%s]\n",
+ dev->data->dev_id,
+ name,
+ socket_id,
+ (do_validation ? "true" : "false"),
+ (do_test ? "true" : "false"));
+
+ dev->dev_ops = &evdev_opdl_ops;
+
+ dev->enqueue = opdl_event_enqueue;
+ dev->enqueue_burst = opdl_event_enqueue_burst;
+ dev->enqueue_new_burst = opdl_event_enqueue_burst;
+ dev->enqueue_forward_burst = opdl_event_enqueue_burst;
+ dev->dequeue = opdl_event_dequeue;
+ dev->dequeue_burst = opdl_event_dequeue_burst;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ opdl = dev->data->dev_private;
+ opdl->data = dev->data;
+ opdl->socket = socket_id;
+ opdl->do_validation = do_validation;
+ opdl->do_test = do_test;
+ str_len = strlen(name);
+ memcpy(opdl->service_name, name, str_len);
+
+ if (do_test == 1)
+ test_result = opdl_selftest();
+
+ return test_result;
+}
+
+static int
+opdl_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ PMD_DRV_LOG(INFO, "Closing eventdev opdl device %s\n", name);
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver evdev_opdl_pmd_drv = {
+ .probe = opdl_probe,
+ .remove = opdl_remove
+};
+
+RTE_INIT(opdl_init_log)
+{
+ opdl_logtype_driver = rte_log_register("pmd.event.opdl.driver");
+ if (opdl_logtype_driver >= 0)
+ rte_log_set_level(opdl_logtype_driver, RTE_LOG_INFO);
+}
+
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OPDL_PMD, evdev_opdl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(event_opdl, NUMA_NODE_ARG "=<int>"
+ DO_VALIDATION_ARG "=<int>" DO_TEST_ARG "=<int>");
diff --git a/src/spdk/dpdk/drivers/event/opdl/opdl_evdev.h b/src/spdk/dpdk/drivers/event/opdl/opdl_evdev.h
new file mode 100644
index 00000000..610b58b3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/opdl/opdl_evdev.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _OPDL_EVDEV_H_
+#define _OPDL_EVDEV_H_
+
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_atomic.h>
+#include "opdl_ring.h"
+
+#define OPDL_QID_NUM_FIDS 1024
+#define OPDL_IQS_MAX 1
+#define OPDL_Q_PRIORITY_MAX 1
+#define OPDL_PORTS_MAX 64
+#define MAX_OPDL_CONS_Q_DEPTH 128
+/* OPDL size */
+#define OPDL_INFLIGHT_EVENTS_TOTAL 4096
+/* allow for lots of over-provisioning */
+#define OPDL_FRAGMENTS_MAX 1
+
+/* report dequeue burst sizes in buckets */
+#define OPDL_DEQ_STAT_BUCKET_SHIFT 2
+/* how many packets pulled from port by sched */
+#define SCHED_DEQUEUE_BURST_SIZE 32
+
+/* size of our history list */
+#define OPDL_PORT_HIST_LIST (MAX_OPDL_PROD_Q_DEPTH)
+
+/* how many data points use for average stats */
+#define NUM_SAMPLES 64
+
+#define EVENTDEV_NAME_OPDL_PMD event_opdl
+#define OPDL_PMD_NAME RTE_STR(event_opdl)
+#define OPDL_PMD_NAME_MAX 64
+
+#define OPDL_INVALID_QID 255
+
+#define OPDL_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
+
+#define OPDL_NUM_POLL_BUCKETS \
+ (MAX_OPDL_CONS_Q_DEPTH >> OPDL_DEQ_STAT_BUCKET_SHIFT)
+
+enum {
+ QE_FLAG_VALID_SHIFT = 0,
+ QE_FLAG_COMPLETE_SHIFT,
+ QE_FLAG_NOT_EOP_SHIFT,
+ _QE_FLAG_COUNT
+};
+
+enum port_type {
+ OPDL_INVALID_PORT = 0,
+ OPDL_REGULAR_PORT = 1,
+ OPDL_PURE_RX_PORT,
+ OPDL_PURE_TX_PORT,
+ OPDL_ASYNC_PORT
+};
+
+enum queue_type {
+ OPDL_Q_TYPE_INVALID = 0,
+ OPDL_Q_TYPE_SINGLE_LINK = 1,
+ OPDL_Q_TYPE_ATOMIC,
+ OPDL_Q_TYPE_ORDERED
+};
+
+enum queue_pos {
+ OPDL_Q_POS_START = 0,
+ OPDL_Q_POS_MIDDLE,
+ OPDL_Q_POS_END
+};
+
+#define QE_FLAG_VALID (1 << QE_FLAG_VALID_SHIFT) /* for NEW FWD, FRAG */
+#define QE_FLAG_COMPLETE (1 << QE_FLAG_COMPLETE_SHIFT) /* set for FWD, DROP */
+#define QE_FLAG_NOT_EOP (1 << QE_FLAG_NOT_EOP_SHIFT) /* set for FRAG only */
+
+static const uint8_t opdl_qe_flag_map[] = {
+ QE_FLAG_VALID /* NEW Event */,
+ QE_FLAG_VALID | QE_FLAG_COMPLETE /* FWD Event */,
+ QE_FLAG_COMPLETE /* RELEASE Event */,
+
+ /* Values which can be used for future support for partial
+ * events, i.e. where one event comes back to the scheduler
+ * as multiple which need to be tracked together
+ */
+ QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
+};
+
+
+enum port_xstat_name {
+ claim_pkts_requested = 0,
+ claim_pkts_granted,
+ claim_non_empty,
+ claim_empty,
+ total_cycles,
+ max_num_port_xstat
+};
+
+#define OPDL_MAX_PORT_XSTAT_NUM (OPDL_PORTS_MAX * max_num_port_xstat)
+
+struct opdl_port;
+
+typedef uint16_t (*opdl_enq_operation)(struct opdl_port *port,
+ const struct rte_event ev[],
+ uint16_t num);
+
+typedef uint16_t (*opdl_deq_operation)(struct opdl_port *port,
+ struct rte_event ev[],
+ uint16_t num);
+
+struct opdl_evdev;
+
+struct opdl_stage_meta_data {
+ uint32_t num_claimed; /* number of entries claimed by this stage */
+ uint32_t burst_sz; /* Port claim burst size */
+};
+
+struct opdl_port {
+
+ /* back pointer */
+ struct opdl_evdev *opdl;
+
+ /* enq handler & stage instance */
+ opdl_enq_operation enq;
+ struct opdl_stage *enq_stage_inst;
+
+ /* deq handler & stage instance */
+ opdl_deq_operation deq;
+ struct opdl_stage *deq_stage_inst;
+
+ /* port id has correctly been set */
+ uint8_t configured;
+
+ /* set when the port is initialized */
+ uint8_t initialized;
+
+ /* A numeric ID for the port */
+ uint8_t id;
+
+ /* Space for claimed entries */
+ struct rte_event *entries[MAX_OPDL_CONS_Q_DEPTH];
+
+ /* RX/REGULAR/TX/ASYNC - determined on position in queue */
+ enum port_type p_type;
+
+ /* if the claim is static atomic type */
+ bool atomic_claim;
+
+ /* Queue linked to this port - internal queue id*/
+ uint8_t queue_id;
+
+ /* Queue linked to this port - external queue id*/
+ uint8_t external_qid;
+
+ /* Next queue linked to this port - external queue id*/
+ uint8_t next_external_qid;
+
+ /* number of instances of this stage */
+ uint32_t num_instance;
+
+ /* instance ID of this stage*/
+ uint32_t instance_id;
+
+ /* track packets in and out of this port */
+ uint64_t port_stat[max_num_port_xstat];
+ uint64_t start_cycles;
+};
+
+struct opdl_queue_meta_data {
+ uint8_t ext_id;
+ enum queue_type type;
+ int8_t setup;
+};
+
+struct opdl_xstats_entry {
+ struct rte_event_dev_xstats_name stat;
+ unsigned int id;
+ uint64_t *value;
+};
+
+struct opdl_queue {
+
+ /* Opdl ring this queue is associated with */
+ uint32_t opdl_id;
+
+ /* type and position have correctly been set */
+ uint8_t configured;
+
+ /* port number and associated ports have been associated */
+ uint8_t initialized;
+
+ /* type of this queue (Atomic, Ordered, Parallel, Direct)*/
+ enum queue_type q_type;
+
+ /* position of queue (START, MIDDLE, END) */
+ enum queue_pos q_pos;
+
+ /* external queue id. It is mapped to the queue position */
+ uint8_t external_qid;
+
+ struct opdl_port *ports[OPDL_PORTS_MAX];
+ uint32_t nb_ports;
+
+ /* priority, reserved for future */
+ uint8_t priority;
+};
+
+
+#define OPDL_TUR_PER_DEV 12
+
+/* PMD needs an extra queue per Opdl */
+#define OPDL_MAX_QUEUES (RTE_EVENT_MAX_QUEUES_PER_DEV - OPDL_TUR_PER_DEV)
+
+
+struct opdl_evdev {
+ struct rte_eventdev_data *data;
+
+ uint8_t started;
+
+ /* Max number of ports and queues*/
+ uint32_t max_port_nb;
+ uint32_t max_queue_nb;
+
+ /* slots in the opdl ring */
+ uint32_t nb_events_limit;
+
+ /*
+ * Array holding all opdl for this device
+ */
+ struct opdl_ring *opdl[OPDL_TUR_PER_DEV];
+ uint32_t nb_opdls;
+
+ struct opdl_queue_meta_data q_md[OPDL_MAX_QUEUES];
+ uint32_t nb_q_md;
+
+ /* Internal queues - one per logical queue */
+ struct opdl_queue
+ queue[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;
+
+ uint32_t nb_queues;
+
+ struct opdl_stage_meta_data s_md[OPDL_PORTS_MAX];
+
+ /* Contains all ports - load balanced and directed */
+ struct opdl_port ports[OPDL_PORTS_MAX] __rte_cache_aligned;
+ uint32_t nb_ports;
+
+ uint8_t q_map_ex_to_in[OPDL_INVALID_QID];
+
+ /* Stats */
+ struct opdl_xstats_entry port_xstat[OPDL_MAX_PORT_XSTAT_NUM];
+
+ char service_name[OPDL_PMD_NAME_MAX];
+ int socket;
+ int do_validation;
+ int do_test;
+};
+
+
+static inline struct opdl_evdev *
+opdl_pmd_priv(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+static inline uint8_t
+opdl_pmd_dev_id(const struct opdl_evdev *opdl)
+{
+ return opdl->data->dev_id;
+}
+
+static inline const struct opdl_evdev *
+opdl_pmd_priv_const(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+uint16_t opdl_event_enqueue(void *port, const struct rte_event *ev);
+uint16_t opdl_event_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t num);
+
+uint16_t opdl_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
+uint16_t opdl_event_dequeue_burst(void *port, struct rte_event *ev,
+ uint16_t num, uint64_t wait);
+void opdl_event_schedule(struct rte_eventdev *dev);
+
+void opdl_xstats_init(struct rte_eventdev *dev);
+int opdl_xstats_uninit(struct rte_eventdev *dev);
+int opdl_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size);
+int opdl_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n);
+uint64_t opdl_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id);
+int opdl_xstats_reset(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids);
+
+int opdl_add_event_handlers(struct rte_eventdev *dev);
+int build_all_dependencies(struct rte_eventdev *dev);
+int check_queues_linked(struct rte_eventdev *dev);
+int create_queues_and_rings(struct rte_eventdev *dev);
+int initialise_all_other_ports(struct rte_eventdev *dev);
+int initialise_queue_zero_ports(struct rte_eventdev *dev);
+int assign_internal_queue_ids(struct rte_eventdev *dev);
+void destroy_queues_and_rings(struct rte_eventdev *dev);
+int opdl_selftest(void);
+
+#endif /* _OPDL_EVDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/event/opdl/opdl_evdev_init.c b/src/spdk/dpdk/drivers/event/opdl/opdl_evdev_init.c
new file mode 100644
index 00000000..582ad698
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/opdl/opdl_evdev_init.c
@@ -0,0 +1,943 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+#include <rte_memzone.h>
+
+#include "opdl_evdev.h"
+#include "opdl_ring.h"
+#include "opdl_log.h"
+
+
+static __rte_always_inline uint32_t
+enqueue_check(struct opdl_port *p,
+ const struct rte_event ev[],
+ uint16_t num,
+ uint16_t num_events)
+{
+ uint16_t i;
+
+ if (p->opdl->do_validation) {
+
+ for (i = 0; i < num; i++) {
+ if (ev[i].queue_id != p->next_external_qid) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "ERROR - port:[%u] - event wants"
+ " to enq to q_id[%u],"
+ " but should be [%u]",
+ opdl_pmd_dev_id(p->opdl),
+ p->id,
+ ev[i].queue_id,
+ p->next_external_qid);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+ }
+
+ /* Stats */
+ if (p->p_type == OPDL_PURE_RX_PORT ||
+ p->p_type == OPDL_ASYNC_PORT) {
+ /* Stats */
+ if (num_events) {
+ p->port_stat[claim_pkts_requested] += num;
+ p->port_stat[claim_pkts_granted] += num_events;
+ p->port_stat[claim_non_empty]++;
+ p->start_cycles = rte_rdtsc();
+ } else {
+ p->port_stat[claim_empty]++;
+ p->start_cycles = 0;
+ }
+ } else {
+ if (p->start_cycles) {
+ uint64_t end_cycles = rte_rdtsc();
+ p->port_stat[total_cycles] +=
+ end_cycles - p->start_cycles;
+ }
+ }
+ } else {
+ if (num > 0 &&
+ ev[0].queue_id != p->next_external_qid) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+ }
+
+ return num;
+}
+
+static __rte_always_inline void
+update_on_dequeue(struct opdl_port *p,
+ struct rte_event ev[],
+ uint16_t num,
+ uint16_t num_events)
+{
+ if (p->opdl->do_validation) {
+ int16_t i;
+ for (i = 0; i < num; i++)
+ ev[i].queue_id =
+ p->opdl->queue[p->queue_id].external_qid;
+
+ /* Stats */
+ if (num_events) {
+ p->port_stat[claim_pkts_requested] += num;
+ p->port_stat[claim_pkts_granted] += num_events;
+ p->port_stat[claim_non_empty]++;
+ p->start_cycles = rte_rdtsc();
+ } else {
+ p->port_stat[claim_empty]++;
+ p->start_cycles = 0;
+ }
+ } else {
+ if (num > 0)
+ ev[0].queue_id =
+ p->opdl->queue[p->queue_id].external_qid;
+ }
+}
+
+
+/*
+ * Error RX enqueue:
+ *
+ *
+ */
+
+static uint16_t
+opdl_rx_error_enqueue(struct opdl_port *p,
+ const struct rte_event ev[],
+ uint16_t num)
+{
+ RTE_SET_USED(p);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(num);
+
+ rte_errno = -ENOSPC;
+
+ return 0;
+}
+
+/*
+ * RX enqueue:
+ *
+ * This function handles enqueue for a single input stage_inst with
+ * threadsafe disabled or enabled. eg 1 thread using a stage_inst or
+ * multiple threads sharing a stage_inst
+ */
+
+static uint16_t
+opdl_rx_enqueue(struct opdl_port *p,
+ const struct rte_event ev[],
+ uint16_t num)
+{
+ uint16_t enqueued = 0;
+
+ enqueued = opdl_ring_input(opdl_stage_get_opdl_ring(p->enq_stage_inst),
+ ev,
+ num,
+ false);
+ if (!enqueue_check(p, ev, num, enqueued))
+ return 0;
+
+
+ if (enqueued < num)
+ rte_errno = -ENOSPC;
+
+ return enqueued;
+}
+
+/*
+ * Error TX handler
+ *
+ */
+
+static uint16_t
+opdl_tx_error_dequeue(struct opdl_port *p,
+ struct rte_event ev[],
+ uint16_t num)
+{
+ RTE_SET_USED(p);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(num);
+
+ rte_errno = -ENOSPC;
+
+ return 0;
+}
+
+/*
+ * TX single threaded claim
+ *
+ * This function handles dequeue for a single worker stage_inst with
+ * threadsafe disabled. eg 1 thread using an stage_inst
+ */
+
+static uint16_t
+opdl_tx_dequeue_single_thread(struct opdl_port *p,
+ struct rte_event ev[],
+ uint16_t num)
+{
+ uint16_t returned;
+
+ struct opdl_ring *ring;
+
+ ring = opdl_stage_get_opdl_ring(p->deq_stage_inst);
+
+ returned = opdl_ring_copy_to_burst(ring,
+ p->deq_stage_inst,
+ ev,
+ num,
+ false);
+
+ update_on_dequeue(p, ev, num, returned);
+
+ return returned;
+}
+
+/*
+ * TX multi threaded claim
+ *
+ * This function handles dequeue for multiple worker stage_inst with
+ * threadsafe disabled. eg multiple stage_inst each with its own instance
+ */
+
+static uint16_t
+opdl_tx_dequeue_multi_inst(struct opdl_port *p,
+ struct rte_event ev[],
+ uint16_t num)
+{
+ uint32_t num_events = 0;
+
+ num_events = opdl_stage_claim(p->deq_stage_inst,
+ (void *)ev,
+ num,
+ NULL,
+ false,
+ false);
+
+ update_on_dequeue(p, ev, num, num_events);
+
+ return opdl_stage_disclaim(p->deq_stage_inst, num_events, false);
+}
+
+
+/*
+ * Worker thread claim
+ *
+ */
+
+static uint16_t
+opdl_claim(struct opdl_port *p, struct rte_event ev[], uint16_t num)
+{
+ uint32_t num_events = 0;
+
+ if (unlikely(num > MAX_OPDL_CONS_Q_DEPTH)) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Attempt to dequeue num of events larger than port (%d) max",
+ opdl_pmd_dev_id(p->opdl),
+ p->id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+
+ num_events = opdl_stage_claim(p->deq_stage_inst,
+ (void *)ev,
+ num,
+ NULL,
+ false,
+ p->atomic_claim);
+
+
+ update_on_dequeue(p, ev, num, num_events);
+
+ return num_events;
+}
+
+/*
+ * Worker thread disclaim
+ */
+
+static uint16_t
+opdl_disclaim(struct opdl_port *p, const struct rte_event ev[], uint16_t num)
+{
+ uint16_t enqueued = 0;
+
+ uint32_t i = 0;
+
+ for (i = 0; i < num; i++)
+ opdl_ring_cas_slot(p->enq_stage_inst, &ev[i],
+ i, p->atomic_claim);
+
+ enqueued = opdl_stage_disclaim(p->enq_stage_inst,
+ num,
+ false);
+
+ return enqueue_check(p, ev, num, enqueued);
+}
+
+static __rte_always_inline struct opdl_stage *
+stage_for_port(struct opdl_queue *q, unsigned int i)
+{
+ if (q->q_pos == OPDL_Q_POS_START || q->q_pos == OPDL_Q_POS_MIDDLE)
+ return q->ports[i]->enq_stage_inst;
+ else
+ return q->ports[i]->deq_stage_inst;
+}
+
+static int opdl_add_deps(struct opdl_evdev *device,
+ int q_id,
+ int deps_q_id)
+{
+ unsigned int i, j;
+ int status;
+ struct opdl_ring *ring;
+ struct opdl_queue *queue = &device->queue[q_id];
+ struct opdl_queue *queue_deps = &device->queue[deps_q_id];
+ struct opdl_stage *dep_stages[OPDL_PORTS_MAX];
+
+ /* sanity check that all stages are for same opdl ring */
+ for (i = 0; i < queue->nb_ports; i++) {
+ struct opdl_ring *r =
+ opdl_stage_get_opdl_ring(stage_for_port(queue, i));
+ for (j = 0; j < queue_deps->nb_ports; j++) {
+ struct opdl_ring *rj =
+ opdl_stage_get_opdl_ring(
+ stage_for_port(queue_deps, j));
+ if (r != rj) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Stages and dependents"
+ " are not for same opdl ring",
+ opdl_pmd_dev_id(device));
+ uint32_t k;
+ for (k = 0; k < device->nb_opdls; k++) {
+ opdl_ring_dump(device->opdl[k],
+ stdout);
+ }
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Gather all stages instance in deps */
+ for (i = 0; i < queue_deps->nb_ports; i++)
+ dep_stages[i] = stage_for_port(queue_deps, i);
+
+
+ /* Add all deps for each port->stage_inst in this queue */
+ for (i = 0; i < queue->nb_ports; i++) {
+
+ ring = opdl_stage_get_opdl_ring(stage_for_port(queue, i));
+
+ status = opdl_stage_deps_add(ring,
+ stage_for_port(queue, i),
+ queue->ports[i]->num_instance,
+ queue->ports[i]->instance_id,
+ dep_stages,
+ queue_deps->nb_ports);
+ if (status < 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+opdl_add_event_handlers(struct rte_eventdev *dev)
+{
+ int err = 0;
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+ unsigned int i;
+
+ for (i = 0; i < device->max_port_nb; i++) {
+
+ struct opdl_port *port = &device->ports[i];
+
+ if (port->configured) {
+ if (port->p_type == OPDL_PURE_RX_PORT) {
+ port->enq = opdl_rx_enqueue;
+ port->deq = opdl_tx_error_dequeue;
+
+ } else if (port->p_type == OPDL_PURE_TX_PORT) {
+
+ port->enq = opdl_rx_error_enqueue;
+
+ if (port->num_instance == 1)
+ port->deq =
+ opdl_tx_dequeue_single_thread;
+ else
+ port->deq = opdl_tx_dequeue_multi_inst;
+
+ } else if (port->p_type == OPDL_REGULAR_PORT) {
+
+ port->enq = opdl_disclaim;
+ port->deq = opdl_claim;
+
+ } else if (port->p_type == OPDL_ASYNC_PORT) {
+
+ port->enq = opdl_rx_enqueue;
+
+ /* Always single instance */
+ port->deq = opdl_tx_dequeue_single_thread;
+ } else {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "port:[%u] has invalid port type - ",
+ opdl_pmd_dev_id(port->opdl),
+ port->id);
+ err = -EINVAL;
+ break;
+ }
+ port->initialized = 1;
+ }
+ }
+
+ if (!err)
+ fprintf(stdout, "Success - enqueue/dequeue handler(s) added\n");
+ return err;
+}
+
+int
+build_all_dependencies(struct rte_eventdev *dev)
+{
+
+ int err = 0;
+ unsigned int i;
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ uint8_t start_qid = 0;
+
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ struct opdl_queue *queue = &device->queue[i];
+ if (!queue->initialized)
+ break;
+
+ if (queue->q_pos == OPDL_Q_POS_START) {
+ start_qid = i;
+ continue;
+ }
+
+ if (queue->q_pos == OPDL_Q_POS_MIDDLE) {
+ err = opdl_add_deps(device, i, i-1);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "dependency addition for queue:[%u] - FAILED",
+ dev->data->dev_id,
+ queue->external_qid);
+ break;
+ }
+ }
+
+ if (queue->q_pos == OPDL_Q_POS_END) {
+ /* Add this dependency */
+ err = opdl_add_deps(device, i, i-1);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "dependency addition for queue:[%u] - FAILED",
+ dev->data->dev_id,
+ queue->external_qid);
+ break;
+ }
+ /* Add dependency for rx on tx */
+ err = opdl_add_deps(device, start_qid, i);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "dependency addition for queue:[%u] - FAILED",
+ dev->data->dev_id,
+ queue->external_qid);
+ break;
+ }
+ }
+ }
+
+ if (!err)
+ fprintf(stdout, "Success - dependencies built\n");
+
+ return err;
+}
+int
+check_queues_linked(struct rte_eventdev *dev)
+{
+
+ int err = 0;
+ unsigned int i;
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+ uint32_t nb_iq = 0;
+
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ struct opdl_queue *queue = &device->queue[i];
+
+ if (!queue->initialized)
+ break;
+
+ if (queue->external_qid == OPDL_INVALID_QID)
+ nb_iq++;
+
+ if (queue->nb_ports == 0) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "queue:[%u] has no associated ports",
+ dev->data->dev_id,
+ i);
+ err = -EINVAL;
+ break;
+ }
+ }
+ if (!err) {
+ if ((i - nb_iq) != device->max_queue_nb) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "%u queues counted but should be %u",
+ dev->data->dev_id,
+ i - nb_iq,
+ device->max_queue_nb);
+ err = -1;
+ }
+
+ }
+ return err;
+}
+
+void
+destroy_queues_and_rings(struct rte_eventdev *dev)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+ uint32_t i;
+
+ for (i = 0; i < device->nb_opdls; i++) {
+ if (device->opdl[i])
+ opdl_ring_free(device->opdl[i]);
+ }
+
+ memset(&device->queue,
+ 0,
+ sizeof(struct opdl_queue)
+ * RTE_EVENT_MAX_QUEUES_PER_DEV);
+}
+
+#define OPDL_ID(d)(d->nb_opdls - 1)
+
+static __rte_always_inline void
+initialise_queue(struct opdl_evdev *device,
+ enum queue_pos pos,
+ int32_t i)
+{
+ struct opdl_queue *queue = &device->queue[device->nb_queues];
+
+ if (i == -1) {
+ queue->q_type = OPDL_Q_TYPE_ORDERED;
+ queue->external_qid = OPDL_INVALID_QID;
+ } else {
+ queue->q_type = device->q_md[i].type;
+ queue->external_qid = device->q_md[i].ext_id;
+ /* Add ex->in for queues setup */
+ device->q_map_ex_to_in[queue->external_qid] = device->nb_queues;
+ }
+ queue->opdl_id = OPDL_ID(device);
+ queue->q_pos = pos;
+ queue->nb_ports = 0;
+ queue->configured = 1;
+
+ device->nb_queues++;
+}
+
+
+static __rte_always_inline int
+create_opdl(struct opdl_evdev *device)
+{
+ int err = 0;
+
+ char name[RTE_MEMZONE_NAMESIZE];
+
+ snprintf(name, RTE_MEMZONE_NAMESIZE,
+ "%s_%u", device->service_name, device->nb_opdls);
+
+ device->opdl[device->nb_opdls] =
+ opdl_ring_create(name,
+ device->nb_events_limit,
+ sizeof(struct rte_event),
+ device->max_port_nb * 2,
+ device->socket);
+
+ if (!device->opdl[device->nb_opdls]) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "opdl ring %u creation - FAILED",
+ opdl_pmd_dev_id(device),
+ device->nb_opdls);
+ err = -EINVAL;
+ } else {
+ device->nb_opdls++;
+ }
+ return err;
+}
+
+static __rte_always_inline int
+create_link_opdl(struct opdl_evdev *device, uint32_t index)
+{
+
+ int err = 0;
+
+ if (device->q_md[index + 1].type !=
+ OPDL_Q_TYPE_SINGLE_LINK) {
+
+ /* async queue with regular
+ * queue following it
+ */
+
+ /* create a new opdl ring */
+ err = create_opdl(device);
+ if (!err) {
+ /* create an initial
+ * dummy queue for new opdl
+ */
+ initialise_queue(device,
+ OPDL_Q_POS_START,
+ -1);
+ } else {
+ err = -EINVAL;
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "queue %u, two consecutive"
+ " SINGLE_LINK queues, not allowed",
+ opdl_pmd_dev_id(device),
+ index);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+int
+create_queues_and_rings(struct rte_eventdev *dev)
+{
+ int err = 0;
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ device->nb_queues = 0;
+
+ if (device->nb_ports != device->max_port_nb) {
+ PMD_DRV_LOG(ERR, "Number ports setup:%u NOT EQUAL to max port"
+ " number:%u for this device",
+ device->nb_ports,
+ device->max_port_nb);
+ err = -1;
+ }
+
+ if (!err) {
+ /* We will have at least one opdl so create it now */
+ err = create_opdl(device);
+ }
+
+ if (!err) {
+
+ /* Create 1st "dummy" queue */
+ initialise_queue(device,
+ OPDL_Q_POS_START,
+ -1);
+
+ uint32_t i;
+ for (i = 0; i < device->nb_q_md; i++) {
+
+ /* Check */
+ if (!device->q_md[i].setup) {
+
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "queue meta data slot %u"
+ " not setup - FAILING",
+ dev->data->dev_id,
+ i);
+ err = -EINVAL;
+ break;
+ } else if (device->q_md[i].type !=
+ OPDL_Q_TYPE_SINGLE_LINK) {
+
+ if (!device->q_md[i + 1].setup) {
+ /* Create a simple ORDERED/ATOMIC
+ * queue at the end
+ */
+ initialise_queue(device,
+ OPDL_Q_POS_END,
+ i);
+
+ } else {
+ /* Create a simple ORDERED/ATOMIC
+ * queue in the middle
+ */
+ initialise_queue(device,
+ OPDL_Q_POS_MIDDLE,
+ i);
+ }
+ } else if (device->q_md[i].type ==
+ OPDL_Q_TYPE_SINGLE_LINK) {
+
+ /* create last queue for this opdl */
+ initialise_queue(device,
+ OPDL_Q_POS_END,
+ i);
+
+ err = create_link_opdl(device, i);
+
+ if (err)
+ break;
+
+
+ }
+ }
+ }
+ if (err)
+ destroy_queues_and_rings(dev);
+
+ return err;
+}
+
+
+int
+initialise_all_other_ports(struct rte_eventdev *dev)
+{
+ int err = 0;
+ struct opdl_stage *stage_inst = NULL;
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ uint32_t i;
+ for (i = 0; i < device->nb_ports; i++) {
+ struct opdl_port *port = &device->ports[i];
+ struct opdl_queue *queue = &device->queue[port->queue_id];
+
+ if (port->queue_id == 0) {
+ continue;
+ } else if (queue->q_type != OPDL_Q_TYPE_SINGLE_LINK) {
+
+ if (queue->q_pos == OPDL_Q_POS_MIDDLE) {
+
+ /* Regular port with claim/disclaim */
+ stage_inst = opdl_stage_add(
+ device->opdl[queue->opdl_id],
+ false,
+ false);
+ port->deq_stage_inst = stage_inst;
+ port->enq_stage_inst = stage_inst;
+
+ if (queue->q_type == OPDL_Q_TYPE_ATOMIC)
+ port->atomic_claim = true;
+ else
+ port->atomic_claim = false;
+
+ port->p_type = OPDL_REGULAR_PORT;
+
+ /* Add the port to the queue array of ports */
+ queue->ports[queue->nb_ports] = port;
+ port->instance_id = queue->nb_ports;
+ queue->nb_ports++;
+ opdl_stage_set_queue_id(stage_inst,
+ port->queue_id);
+
+ } else if (queue->q_pos == OPDL_Q_POS_END) {
+
+ /* tx port */
+ stage_inst = opdl_stage_add(
+ device->opdl[queue->opdl_id],
+ false,
+ false);
+ port->deq_stage_inst = stage_inst;
+ port->enq_stage_inst = NULL;
+ port->p_type = OPDL_PURE_TX_PORT;
+
+ /* Add the port to the queue array of ports */
+ queue->ports[queue->nb_ports] = port;
+ port->instance_id = queue->nb_ports;
+ queue->nb_ports++;
+ } else {
+
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "port %u:, linked incorrectly"
+ " to a q_pos START/INVALID %u",
+ opdl_pmd_dev_id(port->opdl),
+ port->id,
+ queue->q_pos);
+ err = -EINVAL;
+ break;
+ }
+
+ } else if (queue->q_type == OPDL_Q_TYPE_SINGLE_LINK) {
+
+ port->p_type = OPDL_ASYNC_PORT;
+
+ /* -- tx -- */
+ stage_inst = opdl_stage_add(
+ device->opdl[queue->opdl_id],
+ false,
+ false); /* First stage */
+ port->deq_stage_inst = stage_inst;
+
+ /* Add the port to the queue array of ports */
+ queue->ports[queue->nb_ports] = port;
+ port->instance_id = queue->nb_ports;
+ queue->nb_ports++;
+
+ if (queue->nb_ports > 1) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "queue %u:, setup as SINGLE_LINK"
+ " but has more than one port linked",
+ opdl_pmd_dev_id(port->opdl),
+ queue->external_qid);
+ err = -EINVAL;
+ break;
+ }
+
+ /* -- single instance rx for next opdl -- */
+ uint8_t next_qid =
+ device->q_map_ex_to_in[queue->external_qid] + 1;
+ if (next_qid < RTE_EVENT_MAX_QUEUES_PER_DEV &&
+ device->queue[next_qid].configured) {
+
+ /* Remap the queue */
+ queue = &device->queue[next_qid];
+
+ stage_inst = opdl_stage_add(
+ device->opdl[queue->opdl_id],
+ false,
+ true);
+ port->enq_stage_inst = stage_inst;
+
+ /* Add the port to the queue array of ports */
+ queue->ports[queue->nb_ports] = port;
+ port->instance_id = queue->nb_ports;
+ queue->nb_ports++;
+ if (queue->nb_ports > 1) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "dummy queue %u: for "
+ "port %u, "
+ "SINGLE_LINK but has more "
+ "than one port linked",
+ opdl_pmd_dev_id(port->opdl),
+ next_qid,
+ port->id);
+ err = -EINVAL;
+ break;
+ }
+ /* Set this queue to initialized as it is never
+ * referenced by any ports
+ */
+ queue->initialized = 1;
+ }
+ }
+ }
+
+ /* Now that all ports are initialised we need to
+ * setup the last bit of stage md
+ */
+ if (!err) {
+ for (i = 0; i < device->nb_ports; i++) {
+ struct opdl_port *port = &device->ports[i];
+ struct opdl_queue *queue =
+ &device->queue[port->queue_id];
+
+ if (port->configured &&
+ (port->queue_id != OPDL_INVALID_QID)) {
+ if (queue->nb_ports == 0) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "queue:[%u] has no ports"
+ " linked to it",
+ opdl_pmd_dev_id(port->opdl),
+ port->id);
+ err = -EINVAL;
+ break;
+ }
+
+ port->num_instance = queue->nb_ports;
+ port->initialized = 1;
+ queue->initialized = 1;
+ } else {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Port:[%u] not configured invalid"
+ " queue configuration",
+ opdl_pmd_dev_id(port->opdl),
+ port->id);
+ err = -EINVAL;
+ break;
+ }
+ }
+ }
+ return err;
+}
+
+int
+initialise_queue_zero_ports(struct rte_eventdev *dev)
+{
+ int err = 0;
+ uint8_t mt_rx = 0;
+ struct opdl_stage *stage_inst = NULL;
+ struct opdl_queue *queue = NULL;
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ /* Assign queue zero and figure out how many Q0 ports we have */
+ uint32_t i;
+ for (i = 0; i < device->nb_ports; i++) {
+ struct opdl_port *port = &device->ports[i];
+ if (port->queue_id == OPDL_INVALID_QID) {
+ port->queue_id = 0;
+ port->external_qid = OPDL_INVALID_QID;
+ port->p_type = OPDL_PURE_RX_PORT;
+ mt_rx++;
+ }
+ }
+
+ /* Create the stage */
+ stage_inst = opdl_stage_add(device->opdl[0],
+ (mt_rx > 1 ? true : false),
+ true);
+ if (stage_inst) {
+
+ /* Assign the new created input stage to all relevant ports */
+ for (i = 0; i < device->nb_ports; i++) {
+ struct opdl_port *port = &device->ports[i];
+ if (port->queue_id == 0) {
+ queue = &device->queue[port->queue_id];
+ port->enq_stage_inst = stage_inst;
+ port->deq_stage_inst = NULL;
+ port->configured = 1;
+ port->initialized = 1;
+
+ queue->ports[queue->nb_ports] = port;
+ port->instance_id = queue->nb_ports;
+ queue->nb_ports++;
+ }
+ }
+ } else {
+ err = -1;
+ }
+ return err;
+}
+
+int
+assign_internal_queue_ids(struct rte_eventdev *dev)
+{
+ int err = 0;
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+ uint32_t i;
+
+ for (i = 0; i < device->nb_ports; i++) {
+ struct opdl_port *port = &device->ports[i];
+ if (port->external_qid != OPDL_INVALID_QID) {
+ port->queue_id =
+ device->q_map_ex_to_in[port->external_qid];
+
+ /* Now do the external_qid of the next queue */
+ struct opdl_queue *queue =
+ &device->queue[port->queue_id];
+ if (queue->q_pos == OPDL_Q_POS_END)
+ port->next_external_qid =
+ device->queue[port->queue_id + 2].external_qid;
+ else
+ port->next_external_qid =
+ device->queue[port->queue_id + 1].external_qid;
+ }
+ }
+ return err;
+}
diff --git a/src/spdk/dpdk/drivers/event/opdl/opdl_evdev_xstats.c b/src/spdk/dpdk/drivers/event/opdl/opdl_evdev_xstats.c
new file mode 100644
index 00000000..0e6c6bd5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/opdl/opdl_evdev_xstats.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include "opdl_evdev.h"
+#include "opdl_log.h"
+
+static const char * const port_xstat_str[] = {
+
+ "claim_pkts_requested",
+ "claim_pkts_granted",
+ "claim_non_empty",
+ "claim_empty",
+ "total_cycles",
+};
+
+
+void
+opdl_xstats_init(struct rte_eventdev *dev)
+{
+ uint32_t i, j;
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return;
+
+ for (i = 0; i < device->max_port_nb; i++) {
+ struct opdl_port *port = &device->ports[i];
+
+ for (j = 0; j < max_num_port_xstat; j++) {
+ uint32_t index = (i * max_num_port_xstat) + j;
+
+ /* Name */
+ sprintf(device->port_xstat[index].stat.name,
+ "port_%02u_%s",
+ i,
+ port_xstat_str[j]);
+
+ /* ID */
+ device->port_xstat[index].id = index;
+
+ /* Stats ptr */
+ device->port_xstat[index].value = &port->port_stat[j];
+ }
+ }
+}
+
+int
+opdl_xstats_uninit(struct rte_eventdev *dev)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return 0;
+
+ memset(device->port_xstat,
+ 0,
+ sizeof(device->port_xstat));
+
+ return 0;
+}
+
+int
+opdl_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return -ENOTSUP;
+
+ if (mode == RTE_EVENT_DEV_XSTATS_DEVICE ||
+ mode == RTE_EVENT_DEV_XSTATS_QUEUE)
+ return -EINVAL;
+
+ if (queue_port_id >= device->max_port_nb)
+ return -EINVAL;
+
+ if (size < max_num_port_xstat)
+ return max_num_port_xstat;
+
+ uint32_t port_idx = queue_port_id * max_num_port_xstat;
+
+ uint32_t j;
+ for (j = 0; j < max_num_port_xstat; j++) {
+
+ strcpy(xstats_names[j].name,
+ device->port_xstat[j + port_idx].stat.name);
+ ids[j] = device->port_xstat[j + port_idx].id;
+ }
+
+ return max_num_port_xstat;
+}
+
+int
+opdl_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ const unsigned int ids[],
+ uint64_t values[], unsigned int n)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return -ENOTSUP;
+
+ if (mode == RTE_EVENT_DEV_XSTATS_DEVICE ||
+ mode == RTE_EVENT_DEV_XSTATS_QUEUE)
+ return -EINVAL;
+
+ if (queue_port_id >= device->max_port_nb)
+ return -EINVAL;
+
+ if (n > max_num_port_xstat)
+ return -EINVAL;
+
+ uint32_t p_start = queue_port_id * max_num_port_xstat;
+ uint32_t p_finish = p_start + max_num_port_xstat;
+
+ uint32_t i;
+ for (i = 0; i < n; i++) {
+ if (ids[i] < p_start || ids[i] >= p_finish)
+ return -EINVAL;
+
+ values[i] = *(device->port_xstat[ids[i]].value);
+ }
+
+ return n;
+}
+
+uint64_t
+opdl_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return -ENOTSUP;
+
+ uint32_t max_index = device->max_port_nb * max_num_port_xstat;
+
+ uint32_t i;
+ for (i = 0; i < max_index; i++) {
+
+ if (strncmp(name,
+ device->port_xstat[i].stat.name,
+ RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0) {
+ if (id != NULL)
+ *id = i;
+ if (device->port_xstat[i].value)
+ return *(device->port_xstat[i].value);
+ break;
+ }
+ }
+ return -EINVAL;
+}
+
+int
+opdl_xstats_reset(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id, const uint32_t ids[],
+ uint32_t nb_ids)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return -ENOTSUP;
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(mode);
+ RTE_SET_USED(queue_port_id);
+ RTE_SET_USED(ids);
+ RTE_SET_USED(nb_ids);
+
+ return -ENOTSUP;
+}
diff --git a/src/spdk/dpdk/drivers/event/opdl/opdl_log.h b/src/spdk/dpdk/drivers/event/opdl/opdl_log.h
new file mode 100644
index 00000000..ae5221c1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/opdl/opdl_log.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _OPDL_LOGS_H_
+#define _OPDL_LOGS_H_
+
+#include <rte_log.h>
+
+extern int opdl_logtype_driver;
+
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, opdl_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+
+
+#endif /* _OPDL_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/event/opdl/opdl_ring.c b/src/spdk/dpdk/drivers/event/opdl/opdl_ring.c
new file mode 100644
index 00000000..8aca481c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/opdl/opdl_ring.c
@@ -0,0 +1,1272 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_debug.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal_memconfig.h>
+
+#include "opdl_ring.h"
+#include "opdl_log.h"
+
+#define LIB_NAME "opdl_ring"
+
+#define OPDL_NAME_SIZE 64
+
+
+#define OPDL_EVENT_MASK (0x00000000000FFFFFULL)
+#define OPDL_FLOWID_MASK (0xFFFFF)
+#define OPDL_OPA_MASK (0xFF)
+#define OPDL_OPA_OFFSET (0x38)
+
+int opdl_logtype_driver;
+
+/* Types of dependency between stages */
+enum dep_type {
+ DEP_NONE = 0, /* no dependency */
+ DEP_DIRECT, /* stage has direct dependency */
+ DEP_INDIRECT, /* in-direct dependency through other stage(s) */
+ DEP_SELF, /* stage dependency on itself, used to detect loops */
+};
+
+/* Shared section of stage state.
+ * Care is needed when accessing and the layout is important, especially to
+ * limit the adjacent cache-line HW prefetcher from impacting performance.
+ */
+struct shared_state {
+ /* Last known minimum sequence number of dependencies, used for multi
+ * thread operation
+ */
+ uint32_t available_seq;
+ char _pad1[RTE_CACHE_LINE_SIZE * 3];
+ uint32_t head; /* Head sequence number (for multi thread operation) */
+ char _pad2[RTE_CACHE_LINE_SIZE * 3];
+ struct opdl_stage *stage; /* back pointer */
+ uint32_t tail; /* Tail sequence number */
+ char _pad3[RTE_CACHE_LINE_SIZE * 2];
+} __rte_cache_aligned;
+
+/* A structure to keep track of "unfinished" claims. This is only used for
+ * stages that are threadsafe. Each lcore accesses its own instance of this
+ * structure to record the entries it has claimed. This allows one lcore to make
+ * multiple claims without being blocked by another. When disclaiming it moves
+ * forward the shared tail when the shared tail matches the tail value recorded
+ * here.
+ */
+struct claim_manager {
+ uint32_t num_to_disclaim;
+ uint32_t num_claimed;
+ uint32_t mgr_head;
+ uint32_t mgr_tail;
+ struct {
+ uint32_t head;
+ uint32_t tail;
+ } claims[OPDL_DISCLAIMS_PER_LCORE];
+} __rte_cache_aligned;
+
+/* Context for each stage of opdl_ring.
+ * Calculations on sequence numbers need to be done with other uint32_t values
+ * so that results are modulus 2^32, and not undefined.
+ */
+struct opdl_stage {
+ struct opdl_ring *t; /* back pointer, set at init */
+ uint32_t num_slots; /* Number of slots for entries, set at init */
+ uint32_t index; /* ID for this stage, set at init */
+ bool threadsafe; /* Set to 1 if this stage supports threadsafe use */
+ /* Last known min seq number of dependencies for used for single thread
+ * operation
+ */
+ uint32_t available_seq;
+ uint32_t head; /* Current head for single-thread operation */
+ uint32_t nb_instance; /* Number of instances */
+ uint32_t instance_id; /* ID of this stage instance */
+ uint16_t num_claimed; /* Number of slots claimed */
+ uint16_t num_event; /* Number of events */
+ uint32_t seq; /* sequence number */
+ uint32_t num_deps; /* Number of direct dependencies */
+ /* Keep track of all dependencies, used during init only */
+ enum dep_type *dep_tracking;
+ /* Direct dependencies of this stage */
+ struct shared_state **deps;
+ /* Other stages read this! */
+ struct shared_state shared __rte_cache_aligned;
+ /* For managing disclaims in multi-threaded processing stages */
+ struct claim_manager pending_disclaims[RTE_MAX_LCORE]
+ __rte_cache_aligned;
+ uint32_t shadow_head; /* Shadow head for single-thread operation */
+ uint32_t queue_id; /* ID of Queue which is assigned to this stage */
+ uint32_t pos; /* Atomic scan position */
+} __rte_cache_aligned;
+
+/* Context for opdl_ring */
+struct opdl_ring {
+ char name[OPDL_NAME_SIZE]; /* OPDL queue instance name */
+ int socket; /* NUMA socket that memory is allocated on */
+ uint32_t num_slots; /* Number of slots for entries */
+ uint32_t mask; /* Mask for sequence numbers (num_slots - 1) */
+ uint32_t slot_size; /* Size of each slot in bytes */
+ uint32_t num_stages; /* Number of stages that have been added */
+ uint32_t max_num_stages; /* Max number of stages */
+ /* Stages indexed by ID */
+ struct opdl_stage *stages;
+ /* Memory for storing slot data */
+ uint8_t slots[0] __rte_cache_aligned;
+};
+
+
+/* Return input stage of a opdl_ring */
+static __rte_always_inline struct opdl_stage *
+input_stage(const struct opdl_ring *t)
+{
+ return &t->stages[0];
+}
+
+/* Check if a stage is the input stage */
+static __rte_always_inline bool
+is_input_stage(const struct opdl_stage *s)
+{
+ return s->index == 0;
+}
+
+/* Get slot pointer from sequence number */
+static __rte_always_inline void *
+get_slot(const struct opdl_ring *t, uint32_t n)
+{
+ return (void *)(uintptr_t)&t->slots[(n & t->mask) * t->slot_size];
+}
+
+/* Find how many entries are available for processing */
+static __rte_always_inline uint32_t
+available(const struct opdl_stage *s)
+{
+ if (s->threadsafe == true) {
+ uint32_t n = __atomic_load_n(&s->shared.available_seq,
+ __ATOMIC_ACQUIRE) -
+ __atomic_load_n(&s->shared.head,
+ __ATOMIC_ACQUIRE);
+
+ /* Return 0 if available_seq needs to be updated */
+ return (n <= s->num_slots) ? n : 0;
+ }
+
+ /* Single threaded */
+ return s->available_seq - s->head;
+}
+
+/* Read sequence number of dependencies and find minimum */
+static __rte_always_inline void
+update_available_seq(struct opdl_stage *s)
+{
+ uint32_t i;
+ uint32_t this_tail = s->shared.tail;
+ uint32_t min_seq = __atomic_load_n(&s->deps[0]->tail, __ATOMIC_ACQUIRE);
+ /* Input stage sequence numbers are greater than the sequence numbers of
+ * its dependencies so an offset of t->num_slots is needed when
+ * calculating available slots and also the condition which is used to
+ * determine the dependencies minimum sequence number must be reverted.
+ */
+ uint32_t wrap;
+
+ if (is_input_stage(s)) {
+ wrap = s->num_slots;
+ for (i = 1; i < s->num_deps; i++) {
+ uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
+ __ATOMIC_ACQUIRE);
+ if ((this_tail - seq) > (this_tail - min_seq))
+ min_seq = seq;
+ }
+ } else {
+ wrap = 0;
+ for (i = 1; i < s->num_deps; i++) {
+ uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
+ __ATOMIC_ACQUIRE);
+ if ((seq - this_tail) < (min_seq - this_tail))
+ min_seq = seq;
+ }
+ }
+
+ if (s->threadsafe == false)
+ s->available_seq = min_seq + wrap;
+ else
+ __atomic_store_n(&s->shared.available_seq, min_seq + wrap,
+ __ATOMIC_RELEASE);
+}
+
+/* Wait until the number of available slots reaches number requested */
+static __rte_always_inline void
+wait_for_available(struct opdl_stage *s, uint32_t n)
+{
+ while (available(s) < n) {
+ rte_pause();
+ update_available_seq(s);
+ }
+}
+
+/* Return number of slots to process based on number requested and mode */
+static __rte_always_inline uint32_t
+num_to_process(struct opdl_stage *s, uint32_t n, bool block)
+{
+ /* Don't read tail sequences of dependencies if not needed */
+ if (available(s) >= n)
+ return n;
+
+ update_available_seq(s);
+
+ if (block == false) {
+ uint32_t avail = available(s);
+
+ if (avail == 0) {
+ rte_pause();
+ return 0;
+ }
+ return (avail <= n) ? avail : n;
+ }
+
+ if (unlikely(n > s->num_slots)) {
+ PMD_DRV_LOG(ERR, "%u entries is more than max (%u)",
+ n, s->num_slots);
+ return 0; /* Avoid infinite loop */
+ }
+ /* blocking */
+ wait_for_available(s, n);
+ return n;
+}
+
+/* Copy entries in to slots with wrap-around */
+static __rte_always_inline void
+copy_entries_in(struct opdl_ring *t, uint32_t start, const void *entries,
+ uint32_t num_entries)
+{
+ uint32_t slot_size = t->slot_size;
+ uint32_t slot_index = start & t->mask;
+
+ if (slot_index + num_entries <= t->num_slots) {
+ rte_memcpy(get_slot(t, start), entries,
+ num_entries * slot_size);
+ } else {
+ uint32_t split = t->num_slots - slot_index;
+
+ rte_memcpy(get_slot(t, start), entries, split * slot_size);
+ rte_memcpy(get_slot(t, 0),
+ RTE_PTR_ADD(entries, split * slot_size),
+ (num_entries - split) * slot_size);
+ }
+}
+
+/* Copy entries out from slots with wrap-around */
+static __rte_always_inline void
+copy_entries_out(struct opdl_ring *t, uint32_t start, void *entries,
+ uint32_t num_entries)
+{
+ uint32_t slot_size = t->slot_size;
+ uint32_t slot_index = start & t->mask;
+
+ if (slot_index + num_entries <= t->num_slots) {
+ rte_memcpy(entries, get_slot(t, start),
+ num_entries * slot_size);
+ } else {
+ uint32_t split = t->num_slots - slot_index;
+
+ rte_memcpy(entries, get_slot(t, start), split * slot_size);
+ rte_memcpy(RTE_PTR_ADD(entries, split * slot_size),
+ get_slot(t, 0),
+ (num_entries - split) * slot_size);
+ }
+}
+
+/* Input function optimised for single thread */
+static __rte_always_inline uint32_t
+opdl_ring_input_singlethread(struct opdl_ring *t, const void *entries,
+ uint32_t num_entries, bool block)
+{
+ struct opdl_stage *s = input_stage(t);
+ uint32_t head = s->head;
+
+ num_entries = num_to_process(s, num_entries, block);
+ if (num_entries == 0)
+ return 0;
+
+ copy_entries_in(t, head, entries, num_entries);
+
+ s->head += num_entries;
+ __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+
+ return num_entries;
+}
+
+/* Convert head and tail of claim_manager into valid index */
+static __rte_always_inline uint32_t
+claim_mgr_index(uint32_t n)
+{
+ return n & (OPDL_DISCLAIMS_PER_LCORE - 1);
+}
+
+/* Check if there are available slots in claim_manager */
+static __rte_always_inline bool
+claim_mgr_available(struct claim_manager *mgr)
+{
+ return (mgr->mgr_head < (mgr->mgr_tail + OPDL_DISCLAIMS_PER_LCORE)) ?
+ true : false;
+}
+
+/* Record a new claim. Only use after first checking an entry is available */
+static __rte_always_inline void
+claim_mgr_add(struct claim_manager *mgr, uint32_t tail, uint32_t head)
+{
+ if ((mgr->mgr_head != mgr->mgr_tail) &&
+ (mgr->claims[claim_mgr_index(mgr->mgr_head - 1)].head ==
+ tail)) {
+ /* Combine with previous claim */
+ mgr->claims[claim_mgr_index(mgr->mgr_head - 1)].head = head;
+ } else {
+ mgr->claims[claim_mgr_index(mgr->mgr_head)].head = head;
+ mgr->claims[claim_mgr_index(mgr->mgr_head)].tail = tail;
+ mgr->mgr_head++;
+ }
+
+ mgr->num_claimed += (head - tail);
+}
+
+/* Read the oldest recorded claim */
+static __rte_always_inline bool
+claim_mgr_read(struct claim_manager *mgr, uint32_t *tail, uint32_t *head)
+{
+ if (mgr->mgr_head == mgr->mgr_tail)
+ return false;
+
+ *head = mgr->claims[claim_mgr_index(mgr->mgr_tail)].head;
+ *tail = mgr->claims[claim_mgr_index(mgr->mgr_tail)].tail;
+ return true;
+}
+
+/* Remove the oldest recorded claim. Only use after first reading the entry */
+static __rte_always_inline void
+claim_mgr_remove(struct claim_manager *mgr)
+{
+ mgr->num_claimed -= (mgr->claims[claim_mgr_index(mgr->mgr_tail)].head -
+ mgr->claims[claim_mgr_index(mgr->mgr_tail)].tail);
+ mgr->mgr_tail++;
+}
+
+/* Update tail in the oldest claim. Only use after first reading the entry */
+static __rte_always_inline void
+claim_mgr_move_tail(struct claim_manager *mgr, uint32_t num_entries)
+{
+ mgr->num_claimed -= num_entries;
+ mgr->claims[claim_mgr_index(mgr->mgr_tail)].tail += num_entries;
+}
+
+static __rte_always_inline void
+opdl_stage_disclaim_multithread_n(struct opdl_stage *s,
+ uint32_t num_entries, bool block)
+{
+ struct claim_manager *disclaims = &s->pending_disclaims[rte_lcore_id()];
+ uint32_t head;
+ uint32_t tail;
+
+ while (num_entries) {
+ bool ret = claim_mgr_read(disclaims, &tail, &head);
+
+ if (ret == false)
+ break; /* nothing is claimed */
+ /* There should be no race condition here. If shared.tail
+ * matches, no other core can update it until this one does.
+ */
+ if (__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) ==
+ tail) {
+ if (num_entries >= (head - tail)) {
+ claim_mgr_remove(disclaims);
+ __atomic_store_n(&s->shared.tail, head,
+ __ATOMIC_RELEASE);
+ num_entries -= (head - tail);
+ } else {
+ claim_mgr_move_tail(disclaims, num_entries);
+ __atomic_store_n(&s->shared.tail,
+ num_entries + tail,
+ __ATOMIC_RELEASE);
+ num_entries = 0;
+ }
+ } else if (block == false)
+ break; /* blocked by other thread */
+ /* Keep going until num_entries are disclaimed. */
+ rte_pause();
+ }
+
+ disclaims->num_to_disclaim = num_entries;
+}
+
+/* Move head atomically, returning number of entries available to process and
+ * the original value of head. For non-input stages, the claim is recorded
+ * so that the tail can be updated later by opdl_stage_disclaim().
+ */
+static __rte_always_inline void
+move_head_atomically(struct opdl_stage *s, uint32_t *num_entries,
+ uint32_t *old_head, bool block, bool claim_func)
+{
+ uint32_t orig_num_entries = *num_entries;
+ uint32_t ret;
+ struct claim_manager *disclaims = &s->pending_disclaims[rte_lcore_id()];
+
+ /* Attempt to disclaim any outstanding claims */
+ opdl_stage_disclaim_multithread_n(s, disclaims->num_to_disclaim,
+ false);
+
+ *old_head = __atomic_load_n(&s->shared.head, __ATOMIC_ACQUIRE);
+ while (true) {
+ bool success;
+ /* If called by opdl_ring_input(), claim does not need to be
+ * recorded, as there will be no disclaim.
+ */
+ if (claim_func) {
+ /* Check that the claim can be recorded */
+ ret = claim_mgr_available(disclaims);
+ if (ret == false) {
+ /* exit out if claim can't be recorded */
+ *num_entries = 0;
+ return;
+ }
+ }
+
+ *num_entries = num_to_process(s, orig_num_entries, block);
+ if (*num_entries == 0)
+ return;
+
+ success = __atomic_compare_exchange_n(&s->shared.head, old_head,
+ *old_head + *num_entries,
+ true, /* may fail spuriously */
+ __ATOMIC_RELEASE, /* memory order on success */
+ __ATOMIC_ACQUIRE); /* memory order on fail */
+ if (likely(success))
+ break;
+ rte_pause();
+ }
+
+ if (claim_func)
+ /* Store the claim record */
+ claim_mgr_add(disclaims, *old_head, *old_head + *num_entries);
+}
+
+/* Input function that supports multiple threads */
+static __rte_always_inline uint32_t
+opdl_ring_input_multithread(struct opdl_ring *t, const void *entries,
+ uint32_t num_entries, bool block)
+{
+ struct opdl_stage *s = input_stage(t);
+ uint32_t old_head;
+
+ move_head_atomically(s, &num_entries, &old_head, block, false);
+ if (num_entries == 0)
+ return 0;
+
+ copy_entries_in(t, old_head, entries, num_entries);
+
+ /* If another thread started inputting before this one, but hasn't
+ * finished, we need to wait for it to complete to update the tail.
+ */
+ while (unlikely(__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) !=
+ old_head))
+ rte_pause();
+
+ __atomic_store_n(&s->shared.tail, old_head + num_entries,
+ __ATOMIC_RELEASE);
+
+ return num_entries;
+}
+
+static __rte_always_inline uint32_t
+opdl_first_entry_id(uint32_t start_seq, uint8_t nb_p_lcores,
+ uint8_t this_lcore)
+{
+ return ((nb_p_lcores <= 1) ? 0 :
+ (nb_p_lcores - (start_seq % nb_p_lcores) + this_lcore) %
+ nb_p_lcores);
+}
+
+/* Claim slots to process, optimised for single-thread operation */
+static __rte_always_inline uint32_t
+opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block, bool atomic)
+{
+ uint32_t i = 0, j = 0, offset;
+ uint32_t opa_id = 0;
+ uint32_t flow_id = 0;
+ uint64_t event = 0;
+ void *get_slots;
+ struct rte_event *ev;
+ RTE_SET_USED(seq);
+ struct opdl_ring *t = s->t;
+ uint8_t *entries_offset = (uint8_t *)entries;
+
+ if (!atomic) {
+
+ offset = opdl_first_entry_id(s->seq, s->nb_instance,
+ s->instance_id);
+
+ num_entries = s->nb_instance * num_entries;
+
+ num_entries = num_to_process(s, num_entries, block);
+
+ for (; offset < num_entries; offset += s->nb_instance) {
+ get_slots = get_slot(t, s->head + offset);
+ memcpy(entries_offset, get_slots, t->slot_size);
+ entries_offset += t->slot_size;
+ i++;
+ }
+ } else {
+ num_entries = num_to_process(s, num_entries, block);
+
+ for (j = 0; j < num_entries; j++) {
+ ev = (struct rte_event *)get_slot(t, s->head+j);
+
+ event = __atomic_load_n(&(ev->event),
+ __ATOMIC_ACQUIRE);
+
+ opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
+ flow_id = OPDL_FLOWID_MASK & event;
+
+ if (opa_id >= s->queue_id)
+ continue;
+
+ if ((flow_id % s->nb_instance) == s->instance_id) {
+ memcpy(entries_offset, ev, t->slot_size);
+ entries_offset += t->slot_size;
+ i++;
+ }
+ }
+ }
+ s->shadow_head = s->head;
+ s->head += num_entries;
+ s->num_claimed = num_entries;
+ s->num_event = i;
+ s->pos = 0;
+
+ /* automatically disclaim entries if number of rte_events is zero */
+ if (unlikely(i == 0))
+ opdl_stage_disclaim(s, 0, false);
+
+ return i;
+}
+
+/* Thread-safe version of function to claim slots for processing */
+static __rte_always_inline uint32_t
+opdl_stage_claim_multithread(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block)
+{
+ uint32_t old_head;
+ struct opdl_ring *t = s->t;
+ uint32_t i = 0, offset;
+ uint8_t *entries_offset = (uint8_t *)entries;
+
+ if (seq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid seq PTR");
+ return 0;
+ }
+ offset = opdl_first_entry_id(*seq, s->nb_instance, s->instance_id);
+ num_entries = offset + (s->nb_instance * num_entries);
+
+ move_head_atomically(s, &num_entries, &old_head, block, true);
+
+ for (; offset < num_entries; offset += s->nb_instance) {
+ memcpy(entries_offset, get_slot(t, s->head + offset),
+ t->slot_size);
+ entries_offset += t->slot_size;
+ i++;
+ }
+
+ *seq = old_head;
+
+ return i;
+}
+
+/* Claim and copy slot pointers, optimised for single-thread operation */
+static __rte_always_inline uint32_t
+opdl_stage_claim_copy_singlethread(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block)
+{
+ num_entries = num_to_process(s, num_entries, block);
+ if (num_entries == 0)
+ return 0;
+ copy_entries_out(s->t, s->head, entries, num_entries);
+ if (seq != NULL)
+ *seq = s->head;
+ s->head += num_entries;
+ return num_entries;
+}
+
+/* Thread-safe version of function to claim and copy pointers to slots */
+static __rte_always_inline uint32_t
+opdl_stage_claim_copy_multithread(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block)
+{
+ uint32_t old_head;
+
+ move_head_atomically(s, &num_entries, &old_head, block, true);
+ if (num_entries == 0)
+ return 0;
+ copy_entries_out(s->t, old_head, entries, num_entries);
+ if (seq != NULL)
+ *seq = old_head;
+ return num_entries;
+}
+
+static __rte_always_inline void
+opdl_stage_disclaim_singlethread_n(struct opdl_stage *s,
+ uint32_t num_entries)
+{
+ uint32_t old_tail = s->shared.tail;
+
+ if (unlikely(num_entries > (s->head - old_tail))) {
+ PMD_DRV_LOG(WARNING, "Attempt to disclaim (%u) more than claimed (%u)",
+ num_entries, s->head - old_tail);
+ num_entries = s->head - old_tail;
+ }
+ __atomic_store_n(&s->shared.tail, num_entries + old_tail,
+ __ATOMIC_RELEASE);
+}
+
+uint32_t
+opdl_ring_input(struct opdl_ring *t, const void *entries, uint32_t num_entries,
+ bool block)
+{
+ if (input_stage(t)->threadsafe == false)
+ return opdl_ring_input_singlethread(t, entries, num_entries,
+ block);
+ else
+ return opdl_ring_input_multithread(t, entries, num_entries,
+ block);
+}
+
+uint32_t
+opdl_ring_copy_from_burst(struct opdl_ring *t, struct opdl_stage *s,
+ const void *entries, uint32_t num_entries, bool block)
+{
+ uint32_t head = s->head;
+
+ num_entries = num_to_process(s, num_entries, block);
+
+ if (num_entries == 0)
+ return 0;
+
+ copy_entries_in(t, head, entries, num_entries);
+
+ s->head += num_entries;
+ __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+
+ return num_entries;
+
+}
+
+uint32_t
+opdl_ring_copy_to_burst(struct opdl_ring *t, struct opdl_stage *s,
+ void *entries, uint32_t num_entries, bool block)
+{
+ uint32_t head = s->head;
+
+ num_entries = num_to_process(s, num_entries, block);
+ if (num_entries == 0)
+ return 0;
+
+ copy_entries_out(t, head, entries, num_entries);
+
+ s->head += num_entries;
+ __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+
+ return num_entries;
+}
+
+uint32_t
+opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries)
+{
+ /* return (num_to_process(s, num_entries, false)); */
+
+ if (available(s) >= num_entries)
+ return num_entries;
+
+ update_available_seq(s);
+
+ uint32_t avail = available(s);
+
+ if (avail == 0) {
+ rte_pause();
+ return 0;
+ }
+ return (avail <= num_entries) ? avail : num_entries;
+}
+
+uint32_t
+opdl_stage_claim(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block, bool atomic)
+{
+ if (s->threadsafe == false)
+ return opdl_stage_claim_singlethread(s, entries, num_entries,
+ seq, block, atomic);
+ else
+ return opdl_stage_claim_multithread(s, entries, num_entries,
+ seq, block);
+}
+
+uint32_t
+opdl_stage_claim_copy(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block)
+{
+ if (s->threadsafe == false)
+ return opdl_stage_claim_copy_singlethread(s, entries,
+ num_entries, seq, block);
+ else
+ return opdl_stage_claim_copy_multithread(s, entries,
+ num_entries, seq, block);
+}
+
+void
+opdl_stage_disclaim_n(struct opdl_stage *s, uint32_t num_entries,
+ bool block)
+{
+
+ if (s->threadsafe == false) {
+ opdl_stage_disclaim_singlethread_n(s, s->num_claimed);
+ } else {
+ struct claim_manager *disclaims =
+ &s->pending_disclaims[rte_lcore_id()];
+
+ if (unlikely(num_entries > s->num_slots)) {
+ PMD_DRV_LOG(WARNING, "Attempt to disclaim (%u) more than claimed (%u)",
+ num_entries, disclaims->num_claimed);
+ num_entries = disclaims->num_claimed;
+ }
+
+ num_entries = RTE_MIN(num_entries + disclaims->num_to_disclaim,
+ disclaims->num_claimed);
+ opdl_stage_disclaim_multithread_n(s, num_entries, block);
+ }
+}
+
+int
+opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries, bool block)
+{
+ if (num_entries != s->num_event) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+ if (s->threadsafe == false) {
+ __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ s->seq += s->num_claimed;
+ s->shadow_head = s->head;
+ s->num_claimed = 0;
+ } else {
+ struct claim_manager *disclaims =
+ &s->pending_disclaims[rte_lcore_id()];
+ opdl_stage_disclaim_multithread_n(s, disclaims->num_claimed,
+ block);
+ }
+ return num_entries;
+}
+
+uint32_t
+opdl_ring_available(struct opdl_ring *t)
+{
+ return opdl_stage_available(&t->stages[0]);
+}
+
+uint32_t
+opdl_stage_available(struct opdl_stage *s)
+{
+ update_available_seq(s);
+ return available(s);
+}
+
+void
+opdl_ring_flush(struct opdl_ring *t)
+{
+ struct opdl_stage *s = input_stage(t);
+
+ wait_for_available(s, s->num_slots);
+}
+
+/******************** Non performance sensitive functions ********************/
+
+/* Initial setup of a new stage's context */
+static int
+init_stage(struct opdl_ring *t, struct opdl_stage *s, bool threadsafe,
+ bool is_input)
+{
+ uint32_t available = (is_input) ? t->num_slots : 0;
+
+ s->t = t;
+ s->num_slots = t->num_slots;
+ s->index = t->num_stages;
+ s->threadsafe = threadsafe;
+ s->shared.stage = s;
+
+ /* Alloc memory for deps */
+ s->dep_tracking = rte_zmalloc_socket(LIB_NAME,
+ t->max_num_stages * sizeof(enum dep_type),
+ 0, t->socket);
+ if (s->dep_tracking == NULL)
+ return -ENOMEM;
+
+ s->deps = rte_zmalloc_socket(LIB_NAME,
+ t->max_num_stages * sizeof(struct shared_state *),
+ 0, t->socket);
+ if (s->deps == NULL) {
+ rte_free(s->dep_tracking);
+ return -ENOMEM;
+ }
+
+ s->dep_tracking[s->index] = DEP_SELF;
+
+ if (threadsafe == true)
+ s->shared.available_seq = available;
+ else
+ s->available_seq = available;
+
+ return 0;
+}
+
+/* Add direct or indirect dependencies between stages */
+static int
+add_dep(struct opdl_stage *dependent, const struct opdl_stage *dependency,
+ enum dep_type type)
+{
+ struct opdl_ring *t = dependent->t;
+ uint32_t i;
+
+ /* Add new direct dependency */
+ if ((type == DEP_DIRECT) &&
+ (dependent->dep_tracking[dependency->index] ==
+ DEP_NONE)) {
+ PMD_DRV_LOG(DEBUG, "%s:%u direct dependency on %u",
+ t->name, dependent->index, dependency->index);
+ dependent->dep_tracking[dependency->index] = DEP_DIRECT;
+ }
+
+ /* Add new indirect dependency or change direct to indirect */
+ if ((type == DEP_INDIRECT) &&
+ ((dependent->dep_tracking[dependency->index] ==
+ DEP_NONE) ||
+ (dependent->dep_tracking[dependency->index] ==
+ DEP_DIRECT))) {
+ PMD_DRV_LOG(DEBUG, "%s:%u indirect dependency on %u",
+ t->name, dependent->index, dependency->index);
+ dependent->dep_tracking[dependency->index] = DEP_INDIRECT;
+ }
+
+ /* Shouldn't happen... */
+ if ((dependent->dep_tracking[dependency->index] == DEP_SELF) &&
+ (dependent != input_stage(t))) {
+ PMD_DRV_LOG(ERR, "Loop in dependency graph %s:%u",
+ t->name, dependent->index);
+ return -EINVAL;
+ }
+
+ /* Keep going to dependencies of the dependency, until input stage */
+ if (dependency != input_stage(t))
+ for (i = 0; i < dependency->num_deps; i++) {
+ int ret = add_dep(dependent, dependency->deps[i]->stage,
+ DEP_INDIRECT);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Make list of sequence numbers for direct dependencies only */
+ if (type == DEP_DIRECT)
+ for (i = 0, dependent->num_deps = 0; i < t->num_stages; i++)
+ if (dependent->dep_tracking[i] == DEP_DIRECT) {
+ if ((i == 0) && (dependent->num_deps > 1))
+ rte_panic("%s:%u depends on > input",
+ t->name,
+ dependent->index);
+ dependent->deps[dependent->num_deps++] =
+ &t->stages[i].shared;
+ }
+
+ return 0;
+}
+
+struct opdl_ring *
+opdl_ring_create(const char *name, uint32_t num_slots, uint32_t slot_size,
+ uint32_t max_num_stages, int socket)
+{
+ struct opdl_ring *t;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ int mz_flags = 0;
+ struct opdl_stage *st = NULL;
+ const struct rte_memzone *mz = NULL;
+ size_t alloc_size = RTE_CACHE_LINE_ROUNDUP(sizeof(*t) +
+ (num_slots * slot_size));
+
+ /* Compile time checking */
+ RTE_BUILD_BUG_ON((sizeof(struct shared_state) & RTE_CACHE_LINE_MASK) !=
+ 0);
+ RTE_BUILD_BUG_ON((offsetof(struct opdl_stage, shared) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((offsetof(struct opdl_ring, slots) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON(!rte_is_power_of_2(OPDL_DISCLAIMS_PER_LCORE));
+
+ /* Parameter checking */
+ if (name == NULL) {
+ PMD_DRV_LOG(ERR, "name param is NULL");
+ return NULL;
+ }
+ if (!rte_is_power_of_2(num_slots)) {
+ PMD_DRV_LOG(ERR, "num_slots (%u) for %s is not power of 2",
+ num_slots, name);
+ return NULL;
+ }
+
+ /* Alloc memory for stages */
+ st = rte_zmalloc_socket(LIB_NAME,
+ max_num_stages * sizeof(struct opdl_stage),
+ RTE_CACHE_LINE_SIZE, socket);
+ if (st == NULL)
+ goto exit_fail;
+
+ snprintf(mz_name, sizeof(mz_name), "%s%s", LIB_NAME, name);
+
+ /* Alloc memory for memzone */
+ mz = rte_memzone_reserve(mz_name, alloc_size, socket, mz_flags);
+ if (mz == NULL)
+ goto exit_fail;
+
+ t = mz->addr;
+
+ /* Initialise opdl_ring queue */
+ memset(t, 0, sizeof(*t));
+ snprintf(t->name, sizeof(t->name), "%s", name);
+ t->socket = socket;
+ t->num_slots = num_slots;
+ t->mask = num_slots - 1;
+ t->slot_size = slot_size;
+ t->max_num_stages = max_num_stages;
+ t->stages = st;
+
+ PMD_DRV_LOG(DEBUG, "Created %s at %p (num_slots=%u,socket=%i,slot_size=%u)",
+ t->name, t, num_slots, socket, slot_size);
+
+ return t;
+
+exit_fail:
+ PMD_DRV_LOG(ERR, "Cannot reserve memory");
+ rte_free(st);
+ rte_memzone_free(mz);
+
+ return NULL;
+}
+
+void *
+opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index)
+{
+ return get_slot(t, index);
+}
+
+bool
+opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev,
+ uint32_t index, bool atomic)
+{
+ uint32_t i = 0, offset;
+ struct opdl_ring *t = s->t;
+ struct rte_event *ev_orig = NULL;
+ bool ev_updated = false;
+ uint64_t ev_temp = 0;
+ uint64_t ev_update = 0;
+
+ uint32_t opa_id = 0;
+ uint32_t flow_id = 0;
+ uint64_t event = 0;
+
+ if (index > s->num_event) {
+ PMD_DRV_LOG(ERR, "index is overflow");
+ return ev_updated;
+ }
+
+ ev_temp = ev->event & OPDL_EVENT_MASK;
+
+ if (!atomic) {
+ offset = opdl_first_entry_id(s->seq, s->nb_instance,
+ s->instance_id);
+ offset += index*s->nb_instance;
+ ev_orig = get_slot(t, s->shadow_head+offset);
+ if ((ev_orig->event&OPDL_EVENT_MASK) != ev_temp) {
+ ev_orig->event = ev->event;
+ ev_updated = true;
+ }
+ if (ev_orig->u64 != ev->u64) {
+ ev_orig->u64 = ev->u64;
+ ev_updated = true;
+ }
+
+ } else {
+ for (i = s->pos; i < s->num_claimed; i++) {
+ ev_orig = (struct rte_event *)
+ get_slot(t, s->shadow_head+i);
+
+ event = __atomic_load_n(&(ev_orig->event),
+ __ATOMIC_ACQUIRE);
+
+ opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
+ flow_id = OPDL_FLOWID_MASK & event;
+
+ if (opa_id >= s->queue_id)
+ continue;
+
+ if ((flow_id % s->nb_instance) == s->instance_id) {
+ ev_update = s->queue_id;
+ ev_update = (ev_update << OPDL_OPA_OFFSET)
+ | ev->event;
+
+ s->pos = i + 1;
+
+ if ((event & OPDL_EVENT_MASK) !=
+ ev_temp) {
+ __atomic_store_n(&(ev_orig->event),
+ ev_update,
+ __ATOMIC_RELEASE);
+ ev_updated = true;
+ }
+ if (ev_orig->u64 != ev->u64) {
+ ev_orig->u64 = ev->u64;
+ ev_updated = true;
+ }
+
+ break;
+ }
+ }
+
+ }
+
+ return ev_updated;
+}
+
+int
+opdl_ring_get_socket(const struct opdl_ring *t)
+{
+ return t->socket;
+}
+
+uint32_t
+opdl_ring_get_num_slots(const struct opdl_ring *t)
+{
+ return t->num_slots;
+}
+
+const char *
+opdl_ring_get_name(const struct opdl_ring *t)
+{
+ return t->name;
+}
+
+/* Check dependency list is valid for a given opdl_ring */
+static int
+check_deps(struct opdl_ring *t, struct opdl_stage *deps[],
+ uint32_t num_deps)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_deps; ++i) {
+ if (!deps[i]) {
+ PMD_DRV_LOG(ERR, "deps[%u] is NULL", i);
+ return -EINVAL;
+ }
+ if (t != deps[i]->t) {
+ PMD_DRV_LOG(ERR, "deps[%u] is in opdl_ring %s, not %s",
+ i, deps[i]->t->name, t->name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+struct opdl_stage *
+opdl_stage_add(struct opdl_ring *t, bool threadsafe, bool is_input)
+{
+ struct opdl_stage *s;
+
+ /* Parameter checking */
+ if (!t) {
+ PMD_DRV_LOG(ERR, "opdl_ring is NULL");
+ return NULL;
+ }
+ if (t->num_stages == t->max_num_stages) {
+ PMD_DRV_LOG(ERR, "%s has max number of stages (%u)",
+ t->name, t->max_num_stages);
+ return NULL;
+ }
+
+ s = &t->stages[t->num_stages];
+
+ if (((uintptr_t)&s->shared & RTE_CACHE_LINE_MASK) != 0)
+ PMD_DRV_LOG(WARNING, "Tail seq num (%p) of %s stage not cache aligned",
+ &s->shared, t->name);
+
+ if (init_stage(t, s, threadsafe, is_input) < 0) {
+ PMD_DRV_LOG(ERR, "Cannot reserve memory");
+ return NULL;
+ }
+ t->num_stages++;
+
+ return s;
+}
+
+uint32_t
+opdl_stage_deps_add(struct opdl_ring *t, struct opdl_stage *s,
+ uint32_t nb_instance, uint32_t instance_id,
+ struct opdl_stage *deps[],
+ uint32_t num_deps)
+{
+ uint32_t i;
+ int ret = 0;
+
+ if ((num_deps > 0) && (!deps)) {
+ PMD_DRV_LOG(ERR, "%s stage has NULL dependencies", t->name);
+ return -1;
+ }
+ ret = check_deps(t, deps, num_deps);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_deps; i++) {
+ ret = add_dep(s, deps[i], DEP_DIRECT);
+ if (ret < 0)
+ return ret;
+ }
+
+ s->nb_instance = nb_instance;
+ s->instance_id = instance_id;
+
+ return ret;
+}
+
+struct opdl_stage *
+opdl_ring_get_input_stage(const struct opdl_ring *t)
+{
+ return input_stage(t);
+}
+
+int
+opdl_stage_set_deps(struct opdl_stage *s, struct opdl_stage *deps[],
+ uint32_t num_deps)
+{
+ unsigned int i;
+ int ret;
+
+ if ((num_deps == 0) || (!deps)) {
+ PMD_DRV_LOG(ERR, "cannot set NULL dependencies");
+ return -EINVAL;
+ }
+
+ ret = check_deps(s->t, deps, num_deps);
+ if (ret < 0)
+ return ret;
+
+ /* Update deps */
+ for (i = 0; i < num_deps; i++)
+ s->deps[i] = &deps[i]->shared;
+ s->num_deps = num_deps;
+
+ return 0;
+}
+
+struct opdl_ring *
+opdl_stage_get_opdl_ring(const struct opdl_stage *s)
+{
+ return s->t;
+}
+
+void
+opdl_stage_set_queue_id(struct opdl_stage *s,
+ uint32_t queue_id)
+{
+ s->queue_id = queue_id;
+}
+
+void
+opdl_ring_dump(const struct opdl_ring *t, FILE *f)
+{
+ uint32_t i;
+
+ if (t == NULL) {
+ fprintf(f, "NULL OPDL!\n");
+ return;
+ }
+ fprintf(f, "OPDL \"%s\": num_slots=%u; mask=%#x; slot_size=%u; num_stages=%u; socket=%i\n",
+ t->name, t->num_slots, t->mask, t->slot_size,
+ t->num_stages, t->socket);
+ for (i = 0; i < t->num_stages; i++) {
+ uint32_t j;
+ const struct opdl_stage *s = &t->stages[i];
+
+ fprintf(f, " %s[%u]: threadsafe=%s; head=%u; available_seq=%u; tail=%u; deps=%u",
+ t->name, i, (s->threadsafe) ? "true" : "false",
+ (s->threadsafe) ? s->shared.head : s->head,
+ (s->threadsafe) ? s->shared.available_seq :
+ s->available_seq,
+ s->shared.tail, (s->num_deps > 0) ?
+ s->deps[0]->stage->index : 0);
+ for (j = 1; j < s->num_deps; j++)
+ fprintf(f, ",%u", s->deps[j]->stage->index);
+ fprintf(f, "\n");
+ }
+ fflush(f);
+}
+
+void
+opdl_ring_free(struct opdl_ring *t)
+{
+ uint32_t i;
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+
+ if (t == NULL) {
+ PMD_DRV_LOG(DEBUG, "Freeing NULL OPDL Ring!");
+ return;
+ }
+
+ PMD_DRV_LOG(DEBUG, "Freeing %s opdl_ring at %p", t->name, t);
+
+ for (i = 0; i < t->num_stages; ++i) {
+ rte_free(t->stages[i].deps);
+ rte_free(t->stages[i].dep_tracking);
+ }
+
+ rte_free(t->stages);
+
+ snprintf(mz_name, sizeof(mz_name), "%s%s", LIB_NAME, t->name);
+ mz = rte_memzone_lookup(mz_name);
+ if (rte_memzone_free(mz) != 0)
+ PMD_DRV_LOG(ERR, "Cannot free memzone for %s", t->name);
+}
+
+/* search a opdl_ring from its name */
+struct opdl_ring *
+opdl_ring_lookup(const char *name)
+{
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+
+ snprintf(mz_name, sizeof(mz_name), "%s%s", LIB_NAME, name);
+
+ mz = rte_memzone_lookup(mz_name);
+ if (mz == NULL)
+ return NULL;
+
+ return mz->addr;
+}
+
+void
+opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe)
+{
+ s->threadsafe = threadsafe;
+}
diff --git a/src/spdk/dpdk/drivers/event/opdl/opdl_ring.h b/src/spdk/dpdk/drivers/event/opdl/opdl_ring.h
new file mode 100644
index 00000000..751a59db
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/opdl/opdl_ring.h
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _OPDL_H_
+#define _OPDL_H_
+
+/**
+ * @file
+ * The "opdl_ring" is a data structure that contains a fixed number of slots,
+ * with each slot having the same, but configurable, size. Entries are input
+ * into the opdl_ring by copying into available slots. Once in the opdl_ring,
+ * an entry is processed by a number of stages, with the ordering of stage
+ * processing controlled by making stages dependent on one or more other stages.
+ * An entry is not available for a stage to process until it has been processed
+ * by that stages dependencies. Entries are always made available for
+ * processing in the same order that they were input in to the opdl_ring.
+ * Inputting is considered as a stage that depends on all other stages,
+ * and is also a dependency of all stages.
+ *
+ * Inputting and processing in a stage can support multi-threading. Note that
+ * multi-thread processing can also be done by making stages co-operate e.g. two
+ * stages where one processes the even packets and the other processes odd
+ * packets.
+ *
+ * A opdl_ring can be used as the basis for pipeline based applications. Instead
+ * of each stage in a pipeline dequeueing from a ring, processing and enqueueing
+ * to another ring, it can process entries in-place on the ring. If stages do
+ * not depend on each other, they can run in parallel.
+ *
+ * The opdl_ring works with entries of configurable size, these could be
+ * pointers to mbufs, pointers to mbufs with application specific meta-data,
+ * tasks etc.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <rte_eventdev.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef OPDL_DISCLAIMS_PER_LCORE
+/** Multi-threaded processing allows one thread to process multiple batches in a
+ * stage, while another thread is processing a single large batch. This number
+ * controls how many non-contiguous batches one stage can process before being
+ * blocked by the other stage.
+ */
+#define OPDL_DISCLAIMS_PER_LCORE 8
+#endif
+
+/** Opaque handle to a opdl_ring instance */
+struct opdl_ring;
+
+/** Opaque handle to a single stage in a opdl_ring */
+struct opdl_stage;
+
+/**
+ * Create a new instance of a opdl_ring.
+ *
+ * @param name
+ * String containing the name to give the new opdl_ring instance.
+ * @param num_slots
+ * How many slots the opdl_ring contains. Must be a power a 2!
+ * @param slot_size
+ * How many bytes in each slot.
+ * @param max_num_stages
+ * Maximum number of stages.
+ * @param socket
+ * The NUMA socket (or SOCKET_ID_ANY) to allocate the memory used for this
+ * opdl_ring instance.
+ * @param threadsafe
+ * Whether to support multiple threads inputting to the opdl_ring or not.
+ * Enabling this may have a negative impact on performance if only one thread
+ * will be inputting.
+ *
+ * @return
+ * A pointer to a new opdl_ring instance, or NULL on error.
+ */
+struct opdl_ring *
+opdl_ring_create(const char *name, uint32_t num_slots, uint32_t slot_size,
+ uint32_t max_num_stages, int socket);
+
+/**
+ * Get pointer to individual slot in a opdl_ring.
+ *
+ * @param t
+ * The opdl_ring.
+ * @param index
+ * Index of slot. If greater than the number of slots it will be masked to be
+ * within correct range.
+ *
+ * @return
+ * A pointer to that slot.
+ */
+void *
+opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index);
+
+/**
+ * Get NUMA socket used by a opdl_ring.
+ *
+ * @param t
+ * The opdl_ring.
+ *
+ * @return
+ * NUMA socket.
+ */
+int
+opdl_ring_get_socket(const struct opdl_ring *t);
+
+/**
+ * Get number of slots in a opdl_ring.
+ *
+ * @param t
+ * The opdl_ring.
+ *
+ * @return
+ * Number of slots.
+ */
+uint32_t
+opdl_ring_get_num_slots(const struct opdl_ring *t);
+
+/**
+ * Get name of a opdl_ring.
+ *
+ * @param t
+ * The opdl_ring.
+ *
+ * @return
+ * Name string.
+ */
+const char *
+opdl_ring_get_name(const struct opdl_ring *t);
+
+/**
+ * Adds a new processing stage to a specified opdl_ring instance. Adding a stage
+ * while there are entries in the opdl_ring being processed will cause undefined
+ * behaviour.
+ *
+ * @param t
+ * The opdl_ring to add the stage to.
+ * @param deps
+ * An array of pointers to other stages that this stage depends on. The other
+ * stages must be part of the same opdl_ring! Note that input is an implied
+ * dependency. This can be NULL if num_deps is 0.
+ * @param num_deps
+ * The size of the deps array.
+ * @param threadsafe
+ * Whether to support multiple threads processing this stage or not.
+ * Enabling this may have a negative impact on performance if only one thread
+ * will be processing this stage.
+ * @param is_input
+ * Indication to nitialise the stage with all slots available or none
+ *
+ * @return
+ * A pointer to the new stage, or NULL on error.
+ */
+struct opdl_stage *
+opdl_stage_add(struct opdl_ring *t, bool threadsafe, bool is_input);
+
+/**
+ * Returns the input stage of a opdl_ring to be used by other API functions.
+ *
+ * @param t
+ * The opdl_ring.
+ *
+ * @return
+ * A pointer to the input stage.
+ */
+struct opdl_stage *
+opdl_ring_get_input_stage(const struct opdl_ring *t);
+
+/**
+ * Sets the dependencies for a stage (clears all the previous deps!). Changing
+ * dependencies while there are entries in the opdl_ring being processed will
+ * cause undefined behaviour.
+ *
+ * @param s
+ * The stage to set the dependencies for.
+ * @param deps
+ * An array of pointers to other stages that this stage will depends on. The
+ * other stages must be part of the same opdl_ring!
+ * @param num_deps
+ * The size of the deps array. This must be > 0.
+ *
+ * @return
+ * 0 on success, a negative value on error.
+ */
+int
+opdl_stage_set_deps(struct opdl_stage *s, struct opdl_stage *deps[],
+ uint32_t num_deps);
+
+/**
+ * Returns the opdl_ring that a stage belongs to.
+ *
+ * @param s
+ * The stage
+ *
+ * @return
+ * A pointer to the opdl_ring that the stage belongs to.
+ */
+struct opdl_ring *
+opdl_stage_get_opdl_ring(const struct opdl_stage *s);
+
+/**
+ * Inputs a new batch of entries into the opdl_ring. This function is only
+ * threadsafe (with the same opdl_ring parameter) if the threadsafe parameter of
+ * opdl_ring_create() was true. For performance reasons, this function does not
+ * check input parameters.
+ *
+ * @param t
+ * The opdl_ring to input entries in to.
+ * @param entries
+ * An array of entries that will be copied in to the opdl_ring.
+ * @param num_entries
+ * The size of the entries array.
+ * @param block
+ * If this is true, the function blocks until enough slots are available to
+ * input all the requested entries. If false, then the function inputs as
+ * many entries as currently possible.
+ *
+ * @return
+ * The number of entries successfully input.
+ */
+uint32_t
+opdl_ring_input(struct opdl_ring *t, const void *entries, uint32_t num_entries,
+ bool block);
+
+/**
+ * Inputs a new batch of entries into a opdl stage. This function is only
+ * threadsafe (with the same opdl parameter) if the threadsafe parameter of
+ * opdl_create() was true. For performance reasons, this function does not
+ * check input parameters.
+ *
+ * @param t
+ * The opdl ring to input entries in to.
+ * @param s
+ * The stage to copy entries to.
+ * @param entries
+ * An array of entries that will be copied in to the opdl ring.
+ * @param num_entries
+ * The size of the entries array.
+ * @param block
+ * If this is true, the function blocks until enough slots are available to
+ * input all the requested entries. If false, then the function inputs as
+ * many entries as currently possible.
+ *
+ * @return
+ * The number of entries successfully input.
+ */
+uint32_t
+opdl_ring_copy_from_burst(struct opdl_ring *t, struct opdl_stage *s,
+ const void *entries, uint32_t num_entries, bool block);
+
+/**
+ * Copy a batch of entries from the opdl ring. This function is only
+ * threadsafe (with the same opdl parameter) if the threadsafe parameter of
+ * opdl_create() was true. For performance reasons, this function does not
+ * check input parameters.
+ *
+ * @param t
+ * The opdl ring to copy entries from.
+ * @param s
+ * The stage to copy entries from.
+ * @param entries
+ * An array of entries that will be copied from the opdl ring.
+ * @param num_entries
+ * The size of the entries array.
+ * @param block
+ * If this is true, the function blocks until enough slots are available to
+ * input all the requested entries. If false, then the function inputs as
+ * many entries as currently possible.
+ *
+ * @return
+ * The number of entries successfully input.
+ */
+uint32_t
+opdl_ring_copy_to_burst(struct opdl_ring *t, struct opdl_stage *s,
+ void *entries, uint32_t num_entries, bool block);
+
+/**
+ * Before processing a batch of entries, a stage must first claim them to get
+ * access. This function is threadsafe using same opdl_stage parameter if
+ * the stage was created with threadsafe set to true, otherwise it is only
+ * threadsafe with a different opdl_stage per thread. For performance
+ * reasons, this function does not check input parameters.
+ *
+ * @param s
+ * The opdl_ring stage to read entries in.
+ * @param entries
+ * An array of pointers to entries that will be filled in by this function.
+ * @param num_entries
+ * The number of entries to attempt to claim for processing (and the size of
+ * the entries array).
+ * @param seq
+ * If not NULL, this is set to the value of the internal stage sequence number
+ * associated with the first entry returned.
+ * @param block
+ * If this is true, the function blocks until num_entries slots are available
+ * to process. If false, then the function claims as many entries as
+ * currently possible.
+ *
+ * @param atomic
+ * if this is true, the function will return event according to event flow id
+ * @return
+ * The number of pointers to entries filled in to the entries array.
+ */
+uint32_t
+opdl_stage_claim(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block, bool atomic);
+
+uint32_t
+opdl_stage_deps_add(struct opdl_ring *t, struct opdl_stage *s,
+ uint32_t nb_instance, uint32_t instance_id,
+ struct opdl_stage *deps[], uint32_t num_deps);
+
+/**
+ * A function to check how many entries are ready to be claimed.
+ *
+ * @param entries
+ * An array of pointers to entries.
+ * @param num_entries
+ * Number of entries in an array.
+ * @param arg
+ * An opaque pointer to data passed to the claim function.
+ * @param block
+ * When set to true, the function should wait until num_entries are ready to
+ * be processed. Otherwise it should return immediately.
+ *
+ * @return
+ * Number of entries ready to be claimed.
+ */
+typedef uint32_t (opdl_ring_check_entries_t)(void *entries[],
+ uint32_t num_entries, void *arg, bool block);
+
+/**
+ * Before processing a batch of entries, a stage must first claim them to get
+ * access. Each entry is checked by the passed check() function and depending
+ * on block value, it waits until num_entries are ready or returns immediately.
+ * This function is only threadsafe with a different opdl_stage per thread.
+ *
+ * @param s
+ * The opdl_ring stage to read entries in.
+ * @param entries
+ * An array of pointers to entries that will be filled in by this function.
+ * @param num_entries
+ * The number of entries to attempt to claim for processing (and the size of
+ * the entries array).
+ * @param seq
+ * If not NULL, this is set to the value of the internal stage sequence number
+ * associated with the first entry returned.
+ * @param block
+ * If this is true, the function blocks until num_entries ready slots are
+ * available to process. If false, then the function claims as many ready
+ * entries as currently possible.
+ * @param check
+ * Pointer to a function called to check entries.
+ * @param arg
+ * Opaque data passed to check() function.
+ *
+ * @return
+ * The number of pointers to ready entries filled in to the entries array.
+ */
+uint32_t
+opdl_stage_claim_check(struct opdl_stage *s, void **entries,
+ uint32_t num_entries, uint32_t *seq, bool block,
+ opdl_ring_check_entries_t *check, void *arg);
+
+/**
+ * Before processing a batch of entries, a stage must first claim them to get
+ * access. This function is threadsafe using same opdl_stage parameter if
+ * the stage was created with threadsafe set to true, otherwise it is only
+ * threadsafe with a different opdl_stage per thread.
+ *
+ * The difference between this function and opdl_stage_claim() is that this
+ * function copies the entries from the opdl_ring. Note that any changes made to
+ * the copied entries will not be reflected back in to the entries in the
+ * opdl_ring, so this function probably only makes sense if the entries are
+ * pointers to other data. For performance reasons, this function does not check
+ * input parameters.
+ *
+ * @param s
+ * The opdl_ring stage to read entries in.
+ * @param entries
+ * An array of entries that will be filled in by this function.
+ * @param num_entries
+ * The number of entries to attempt to claim for processing (and the size of
+ * the entries array).
+ * @param seq
+ * If not NULL, this is set to the value of the internal stage sequence number
+ * associated with the first entry returned.
+ * @param block
+ * If this is true, the function blocks until num_entries slots are available
+ * to process. If false, then the function claims as many entries as
+ * currently possible.
+ *
+ * @return
+ * The number of entries copied in to the entries array.
+ */
+uint32_t
+opdl_stage_claim_copy(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block);
+
+/**
+ * This function must be called when a stage has finished its processing of
+ * entries, to make them available to any dependent stages. All entries that are
+ * claimed by the calling thread in the stage will be disclaimed. It is possible
+ * to claim multiple batches before disclaiming. For performance reasons, this
+ * function does not check input parameters.
+ *
+ * @param s
+ * The opdl_ring stage in which to disclaim all claimed entries.
+ *
+ * @param block
+ * Entries are always made available to a stage in the same order that they
+ * were input in the stage. If a stage is multithread safe, this may mean that
+ * full disclaiming of a batch of entries can not be considered complete until
+ * all earlier threads in the stage have disclaimed. If this parameter is true
+ * then the function blocks until all entries are fully disclaimed, otherwise
+ * it disclaims as many as currently possible, with non fully disclaimed
+ * batches stored until the next call to a claim or disclaim function for this
+ * stage on this thread.
+ *
+ * If a thread is not going to process any more entries in this stage, it
+ * *must* first call this function with this parameter set to true to ensure
+ * it does not block the entire opdl_ring.
+ *
+ * In a single threaded stage, this parameter has no effect.
+ */
+int
+opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries,
+ bool block);
+
+/**
+ * This function can be called when a stage has finished its processing of
+ * entries, to make them available to any dependent stages. The difference
+ * between this function and opdl_stage_disclaim() is that here only a
+ * portion of entries are disclaimed, not all of them. For performance reasons,
+ * this function does not check input parameters.
+ *
+ * @param s
+ * The opdl_ring stage in which to disclaim entries.
+ *
+ * @param num_entries
+ * The number of entries to disclaim.
+ *
+ * @param block
+ * Entries are always made available to a stage in the same order that they
+ * were input in the stage. If a stage is multithread safe, this may mean that
+ * full disclaiming of a batch of entries can not be considered complete until
+ * all earlier threads in the stage have disclaimed. If this parameter is true
+ * then the function blocks until the specified number of entries has been
+ * disclaimed (or there are no more entries to disclaim). Otherwise it
+ * disclaims as many claims as currently possible and an attempt to disclaim
+ * them is made the next time a claim or disclaim function for this stage on
+ * this thread is called.
+ *
+ * In a single threaded stage, this parameter has no effect.
+ */
+void
+opdl_stage_disclaim_n(struct opdl_stage *s, uint32_t num_entries,
+ bool block);
+
+/**
+ * Check how many entries can be input.
+ *
+ * @param t
+ * The opdl_ring instance to check.
+ *
+ * @return
+ * The number of new entries currently allowed to be input.
+ */
+uint32_t
+opdl_ring_available(struct opdl_ring *t);
+
+/**
+ * Check how many entries can be processed in a stage.
+ *
+ * @param s
+ * The stage to check.
+ *
+ * @return
+ * The number of entries currently available to be processed in this stage.
+ */
+uint32_t
+opdl_stage_available(struct opdl_stage *s);
+
+/**
+ * Check how many entries are available to be processed.
+ *
+ * NOTE : DOES NOT CHANGE ANY STATE WITHIN THE STAGE
+ *
+ * @param s
+ * The stage to check.
+ *
+ * @param num_entries
+ * The number of entries to check for availability.
+ *
+ * @return
+ * The number of entries currently available to be processed in this stage.
+ */
+uint32_t
+opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries);
+
+/**
+ * Create empty stage instance and return the pointer.
+ *
+ * @param t
+ * The pointer of opdl_ring.
+ *
+ * @param threadsafe
+ * enable multiple thread or not.
+ * @return
+ * The pointer of one empty stage instance.
+ */
+struct opdl_stage *
+opdl_stage_create(struct opdl_ring *t, bool threadsafe);
+
+
+/**
+ * Set the internal queue id for each stage instance.
+ *
+ * @param s
+ * The pointer of stage instance.
+ *
+ * @param queue_id
+ * The value of internal queue id.
+ */
+void
+opdl_stage_set_queue_id(struct opdl_stage *s,
+ uint32_t queue_id);
+
+/**
+ * Prints information on opdl_ring instance and all its stages
+ *
+ * @param t
+ * The stage to print info on.
+ * @param f
+ * Where to print the info.
+ */
+void
+opdl_ring_dump(const struct opdl_ring *t, FILE *f);
+
+/**
+ * Blocks until all entries in a opdl_ring have been processed by all stages.
+ *
+ * @param t
+ * The opdl_ring instance to flush.
+ */
+void
+opdl_ring_flush(struct opdl_ring *t);
+
+/**
+ * Deallocates all resources used by a opdl_ring instance
+ *
+ * @param t
+ * The opdl_ring instance to free.
+ */
+void
+opdl_ring_free(struct opdl_ring *t);
+
+/**
+ * Search for a opdl_ring by its name
+ *
+ * @param name
+ * The name of the opdl_ring.
+ * @return
+ * The pointer to the opdl_ring matching the name, or NULL if not found.
+ *
+ */
+struct opdl_ring *
+opdl_ring_lookup(const char *name);
+
+/**
+ * Set a opdl_stage to threadsafe variable.
+ *
+ * @param s
+ * The opdl_stage.
+ * @param threadsafe
+ * Threadsafe value.
+ */
+void
+opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe);
+
+
+/**
+ * Compare the event descriptor with original version in the ring.
+ * if key field event descriptor is changed by application, then
+ * update the slot in the ring otherwise do nothing with it.
+ * the key field is flow_id, prioirty, mbuf, impl_opaque
+ *
+ * @param s
+ * The opdl_stage.
+ * @param ev
+ * pointer of the event descriptor.
+ * @param index
+ * index of the event descriptor.
+ * @param atomic
+ * queue type associate with the stage.
+ * @return
+ * if the evevnt key field is changed compare with previous record.
+ */
+
+bool
+opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev,
+ uint32_t index, bool atomic);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _OPDL_H_ */
diff --git a/src/spdk/dpdk/drivers/event/opdl/opdl_test.c b/src/spdk/dpdk/drivers/event/opdl/opdl_test.c
new file mode 100644
index 00000000..5868ec1b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/opdl/opdl_test.c
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_eventdev.h>
+#include <rte_bus_vdev.h>
+#include <rte_pause.h>
+
+#include "opdl_evdev.h"
+#include "opdl_log.h"
+
+
+#define MAX_PORTS 16
+#define MAX_QIDS 16
+#define NUM_PACKETS (1<<18)
+#define NUM_EVENTS 256
+#define BURST_SIZE 32
+
+
+
+static int evdev;
+
+struct test {
+ struct rte_mempool *mbuf_pool;
+ uint8_t port[MAX_PORTS];
+ uint8_t qid[MAX_QIDS];
+ int nb_qids;
+};
+
+static struct rte_mempool *eventdev_func_mempool;
+
+static __rte_always_inline struct rte_mbuf *
+rte_gen_arp(int portid, struct rte_mempool *mp)
+{
+ /*
+ * len = 14 + 46
+ * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
+ */
+ static const uint8_t arp_request[] = {
+ /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
+ 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
+ /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
+ 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
+ /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+ };
+ struct rte_mbuf *m;
+ int pkt_len = sizeof(arp_request) - 1;
+
+ m = rte_pktmbuf_alloc(mp);
+ if (!m)
+ return 0;
+
+ memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
+ arp_request, pkt_len);
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+ rte_pktmbuf_data_len(m) = pkt_len;
+
+ RTE_SET_USED(portid);
+
+ return m;
+}
+
+/* initialization and config */
+static __rte_always_inline int
+init(struct test *t, int nb_queues, int nb_ports)
+{
+ struct rte_event_dev_config config = {
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_event_queue_flows = 1024,
+ .nb_events_limit = 4096,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+ int ret;
+
+ void *temp = t->mbuf_pool; /* save and restore mbuf pool */
+
+ memset(t, 0, sizeof(*t));
+ t->mbuf_pool = temp;
+
+ ret = rte_event_dev_configure(evdev, &config);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "%d: Error configuring device\n", __LINE__);
+ return ret;
+};
+
+static __rte_always_inline int
+create_ports(struct test *t, int num_ports)
+{
+ int i;
+ static const struct rte_event_port_conf conf = {
+ .new_event_threshold = 1024,
+ .dequeue_depth = 32,
+ .enqueue_depth = 32,
+ };
+ if (num_ports > MAX_PORTS)
+ return -1;
+
+ for (i = 0; i < num_ports; i++) {
+ if (rte_event_port_setup(evdev, i, &conf) < 0) {
+ PMD_DRV_LOG(ERR, "Error setting up port %d\n", i);
+ return -1;
+ }
+ t->port[i] = i;
+ }
+
+ return 0;
+};
+
+static __rte_always_inline int
+create_queues_type(struct test *t, int num_qids, enum queue_type flags)
+{
+ int i;
+ uint8_t type;
+
+ switch (flags) {
+ case OPDL_Q_TYPE_ORDERED:
+ type = RTE_SCHED_TYPE_ORDERED;
+ break;
+ case OPDL_Q_TYPE_ATOMIC:
+ type = RTE_SCHED_TYPE_ATOMIC;
+ break;
+ default:
+ type = 0;
+ }
+
+ /* Q creation */
+ const struct rte_event_queue_conf conf = {
+ .event_queue_cfg =
+ (flags == OPDL_Q_TYPE_SINGLE_LINK ?
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK : 0),
+ .schedule_type = type,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ for (i = t->nb_qids ; i < t->nb_qids + num_qids; i++) {
+ if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+ PMD_DRV_LOG(ERR, "%d: error creating qid %d\n ",
+ __LINE__, i);
+ return -1;
+ }
+ t->qid[i] = i;
+ }
+
+ t->nb_qids += num_qids;
+
+ if (t->nb_qids > MAX_QIDS)
+ return -1;
+
+ return 0;
+}
+
+
+/* destruction */
+static __rte_always_inline int
+cleanup(struct test *t __rte_unused)
+{
+ rte_event_dev_stop(evdev);
+ rte_event_dev_close(evdev);
+ PMD_DRV_LOG(ERR, "clean up for test done\n");
+ return 0;
+};
+
+static int
+ordered_basic(struct test *t)
+{
+ const uint8_t rx_port = 0;
+ const uint8_t w1_port = 1;
+ const uint8_t w3_port = 3;
+ const uint8_t tx_port = 4;
+ int err;
+ uint32_t i;
+ uint32_t deq_pkts;
+ struct rte_mbuf *mbufs[3];
+
+ const uint32_t MAGIC_SEQN = 1234;
+
+ /* Create instance with 5 ports */
+ if (init(t, 2, tx_port+1) < 0 ||
+ create_ports(t, tx_port+1) < 0 ||
+ create_queues_type(t, 2, OPDL_Q_TYPE_ORDERED)) {
+ PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * CQ mapping to QID
+ * We need three ports, all mapped to the same ordered qid0. Then we'll
+ * take a packet out to each port, re-enqueue in reverse order,
+ * then make sure the reordering has taken place properly when we
+ * dequeue from the tx_port.
+ *
+ * Simplified test setup diagram:
+ *
+ * rx_port w1_port
+ * \ / \
+ * qid0 - w2_port - qid1
+ * \ / \
+ * w3_port tx_port
+ */
+ /* CQ mapping to QID for LB ports (directed mapped on create) */
+ for (i = w1_port; i <= w3_port; i++) {
+ err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n",
+ __LINE__);
+ cleanup(t);
+ return -1;
+ }
+ }
+
+ err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error mapping TX qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+ /* Enqueue 3 packets to the rx port */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+ if (!mbufs[i]) {
+ PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = mbufs[i];
+ mbufs[i]->seqn = MAGIC_SEQN + i;
+
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
+ __LINE__, i, err);
+ return -1;
+ }
+ }
+
+ /* use extra slot to make logic in loops easier */
+ struct rte_event deq_ev[w3_port + 1];
+
+ uint32_t seq = 0;
+
+ /* Dequeue the 3 packets, one from each worker port */
+ for (i = w1_port; i <= w3_port; i++) {
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
+ &deq_ev[i], 1, 0);
+ if (deq_pkts != 1) {
+ PMD_DRV_LOG(ERR, "%d: Failed to deq\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ seq = deq_ev[i].mbuf->seqn - MAGIC_SEQN;
+
+ if (seq != (i-1)) {
+ PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , "
+ "port number is %u\n", seq, i);
+ return -1;
+ }
+ }
+
+ /* Enqueue each packet in reverse order, flushing after each one */
+ for (i = w3_port; i >= w1_port; i--) {
+
+ deq_ev[i].op = RTE_EVENT_OP_FORWARD;
+ deq_ev[i].queue_id = t->qid[1];
+ err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ /* dequeue from the tx ports, we should get 3 packets */
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
+ 3, 0);
+
+ /* Check to see if we've got all 3 packets */
+ if (deq_pkts != 3) {
+ PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n",
+ __LINE__, deq_pkts, tx_port);
+ rte_event_dev_dump(evdev, stdout);
+ return 1;
+ }
+
+ /* Destroy the instance */
+ cleanup(t);
+
+ return 0;
+}
+
+
+static int
+atomic_basic(struct test *t)
+{
+ const uint8_t rx_port = 0;
+ const uint8_t w1_port = 1;
+ const uint8_t w3_port = 3;
+ const uint8_t tx_port = 4;
+ int err;
+ int i;
+ uint32_t deq_pkts;
+ struct rte_mbuf *mbufs[3];
+ const uint32_t MAGIC_SEQN = 1234;
+
+ /* Create instance with 5 ports */
+ if (init(t, 2, tx_port+1) < 0 ||
+ create_ports(t, tx_port+1) < 0 ||
+ create_queues_type(t, 2, OPDL_Q_TYPE_ATOMIC)) {
+ PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+
+ /*
+ * CQ mapping to QID
+ * We need three ports, all mapped to the same ordered qid0. Then we'll
+ * take a packet out to each port, re-enqueue in reverse order,
+ * then make sure the reordering has taken place properly when we
+ * dequeue from the tx_port.
+ *
+ * Simplified test setup diagram:
+ *
+ * rx_port w1_port
+ * \ / \
+ * qid0 - w2_port - qid1
+ * \ / \
+ * w3_port tx_port
+ */
+ /* CQ mapping to QID for Atomic ports (directed mapped on create) */
+ for (i = w1_port; i <= w3_port; i++) {
+ err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n",
+ __LINE__);
+ cleanup(t);
+ return -1;
+ }
+ }
+
+ err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error mapping TX qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /* Enqueue 3 packets to the rx port */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+ if (!mbufs[i]) {
+ PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.flow_id = 1;
+ ev.mbuf = mbufs[i];
+ mbufs[i]->seqn = MAGIC_SEQN + i;
+
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
+ __LINE__, i, err);
+ return -1;
+ }
+ }
+
+ /* use extra slot to make logic in loops easier */
+ struct rte_event deq_ev[w3_port + 1];
+
+ /* Dequeue the 3 packets, one from each worker port */
+ for (i = w1_port; i <= w3_port; i++) {
+
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
+ deq_ev, 3, 0);
+
+ if (t->port[i] != 2) {
+ if (deq_pkts != 0) {
+ PMD_DRV_LOG(ERR, "%d: deq none zero !\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ } else {
+
+ if (deq_pkts != 3) {
+ PMD_DRV_LOG(ERR, "%d: deq not eqal to 3 %u !\n",
+ __LINE__, deq_pkts);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ int j;
+ for (j = 0; j < 3; j++) {
+ deq_ev[j].op = RTE_EVENT_OP_FORWARD;
+ deq_ev[j].queue_id = t->qid[1];
+ }
+
+ err = rte_event_enqueue_burst(evdev, t->port[i],
+ deq_ev, 3);
+
+ if (err != 3) {
+ PMD_DRV_LOG(ERR, "port %d: Failed to enqueue pkt %u, "
+ "retval = %u\n",
+ t->port[i], 3, err);
+ return -1;
+ }
+
+ }
+
+ }
+
+
+ /* dequeue from the tx ports, we should get 3 packets */
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
+ 3, 0);
+
+ /* Check to see if we've got all 3 packets */
+ if (deq_pkts != 3) {
+ PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n",
+ __LINE__, deq_pkts, tx_port);
+ rte_event_dev_dump(evdev, stdout);
+ return 1;
+ }
+
+ cleanup(t);
+
+ return 0;
+}
+static __rte_always_inline int
+check_qid_stats(uint32_t id[], int index)
+{
+
+ if (index == 0) {
+ if (id[0] != 3 || id[1] != 3
+ || id[2] != 3)
+ return -1;
+ } else if (index == 1) {
+ if (id[0] != 5 || id[1] != 5
+ || id[2] != 2)
+ return -1;
+ } else if (index == 2) {
+ if (id[0] != 3 || id[1] != 1
+ || id[2] != 1)
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static int
+check_statistics(void)
+{
+ int num_ports = 3; /* Hard-coded for this app */
+ int i;
+
+ for (i = 0; i < num_ports; i++) {
+ int num_stats, num_stats_returned;
+
+ num_stats = rte_event_dev_xstats_names_get(0,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ i,
+ NULL,
+ NULL,
+ 0);
+ if (num_stats > 0) {
+
+ uint32_t id[num_stats];
+ struct rte_event_dev_xstats_name names[num_stats];
+ uint64_t values[num_stats];
+
+ num_stats_returned = rte_event_dev_xstats_names_get(0,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ i,
+ names,
+ id,
+ num_stats);
+
+ if (num_stats == num_stats_returned) {
+ num_stats_returned = rte_event_dev_xstats_get(0,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ i,
+ id,
+ values,
+ num_stats);
+
+ if (num_stats == num_stats_returned) {
+ int err;
+
+ err = check_qid_stats(id, i);
+
+ if (err)
+ return err;
+
+ } else {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+#define OLD_NUM_PACKETS 3
+#define NEW_NUM_PACKETS 2
+static int
+single_link_w_stats(struct test *t)
+{
+ const uint8_t rx_port = 0;
+ const uint8_t w1_port = 1;
+ const uint8_t tx_port = 2;
+ int err;
+ int i;
+ uint32_t deq_pkts;
+ struct rte_mbuf *mbufs[3];
+ RTE_SET_USED(mbufs);
+
+ /* Create instance with 3 ports */
+ if (init(t, 2, tx_port + 1) < 0 ||
+ create_ports(t, 3) < 0 || /* 0,1,2 */
+ create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
+ create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
+ PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+
+ /*
+ *
+ * Simplified test setup diagram:
+ *
+ * rx_port(0)
+ * \
+ * qid0 - w1_port(1) - qid1
+ * \
+ * tx_port(2)
+ */
+
+ err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n",
+ __LINE__,
+ t->port[1],
+ t->qid[0]);
+ cleanup(t);
+ return -1;
+ }
+
+ err = rte_event_port_link(evdev, t->port[2], &t->qid[1], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n",
+ __LINE__,
+ t->port[2],
+ t->qid[1]);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) != 0) {
+ PMD_DRV_LOG(ERR, "%d: failed to start device\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ /*
+ * Enqueue 3 packets to the rx port
+ */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+ if (!mbufs[i]) {
+ PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = mbufs[i];
+ mbufs[i]->seqn = 1234 + i;
+
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
+ __LINE__,
+ t->port[rx_port],
+ err);
+ return -1;
+ }
+ }
+
+ /* Dequeue the 3 packets, from SINGLE_LINK worker port */
+ struct rte_event deq_ev[3];
+
+ deq_pkts = rte_event_dequeue_burst(evdev,
+ t->port[w1_port],
+ deq_ev, 3, 0);
+
+ if (deq_pkts != 3) {
+ PMD_DRV_LOG(ERR, "%d: deq not 3 !\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ /* Just enqueue 2 onto new ring */
+ for (i = 0; i < NEW_NUM_PACKETS; i++)
+ deq_ev[i].queue_id = t->qid[1];
+
+ deq_pkts = rte_event_enqueue_burst(evdev,
+ t->port[w1_port],
+ deq_ev,
+ NEW_NUM_PACKETS);
+
+ if (deq_pkts != 2) {
+ PMD_DRV_LOG(ERR, "%d: enq not 2 but %u!\n", __LINE__, deq_pkts);
+ cleanup(t);
+ return -1;
+ }
+
+ /* dequeue from the tx ports, we should get 2 packets */
+ deq_pkts = rte_event_dequeue_burst(evdev,
+ t->port[tx_port],
+ deq_ev,
+ 3,
+ 0);
+
+ /* Check to see if we've got all 2 packets */
+ if (deq_pkts != 2) {
+ PMD_DRV_LOG(ERR, "%d: expected 2 pkts at tx port got %d from port %d\n",
+ __LINE__, deq_pkts, tx_port);
+ cleanup(t);
+ return -1;
+ }
+
+ if (!check_statistics()) {
+ PMD_DRV_LOG(ERR, "xstats check failed");
+ cleanup(t);
+ return -1;
+ }
+
+ cleanup(t);
+
+ return 0;
+}
+
+static int
+single_link(struct test *t)
+{
+ /* const uint8_t rx_port = 0; */
+ /* const uint8_t w1_port = 1; */
+ /* const uint8_t w3_port = 3; */
+ const uint8_t tx_port = 2;
+ int err;
+ struct rte_mbuf *mbufs[3];
+ RTE_SET_USED(mbufs);
+
+ /* Create instance with 5 ports */
+ if (init(t, 2, tx_port+1) < 0 ||
+ create_ports(t, 3) < 0 || /* 0,1,2 */
+ create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
+ create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
+ PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+
+ /*
+ *
+ * Simplified test setup diagram:
+ *
+ * rx_port(0)
+ * \
+ * qid0 - w1_port(1) - qid1
+ * \
+ * tx_port(2)
+ */
+
+ err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ err = rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) == 0) {
+ PMD_DRV_LOG(ERR, "%d: start DIDN'T FAIL with more than 1 "
+ "SINGLE_LINK PORT\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ cleanup(t);
+
+ return 0;
+}
+
+
+static __rte_always_inline void
+populate_event_burst(struct rte_event ev[],
+ uint8_t qid,
+ uint16_t num_events)
+{
+ uint16_t i;
+ for (i = 0; i < num_events; i++) {
+ ev[i].flow_id = 1;
+ ev[i].op = RTE_EVENT_OP_NEW;
+ ev[i].sched_type = RTE_SCHED_TYPE_ORDERED;
+ ev[i].queue_id = qid;
+ ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev[i].sub_event_type = 0;
+ ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev[i].mbuf = (struct rte_mbuf *)0xdead0000;
+ }
+}
+
+#define NUM_QUEUES 3
+#define BATCH_SIZE 32
+
+static int
+qid_basic(struct test *t)
+{
+ int err = 0;
+
+ uint8_t q_id = 0;
+ uint8_t p_id = 0;
+
+ uint32_t num_events;
+ uint32_t i;
+
+ struct rte_event ev[BATCH_SIZE];
+
+ /* Create instance with 4 ports */
+ if (init(t, NUM_QUEUES, NUM_QUEUES+1) < 0 ||
+ create_ports(t, NUM_QUEUES+1) < 0 ||
+ create_queues_type(t, NUM_QUEUES, OPDL_Q_TYPE_ORDERED)) {
+ PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ for (i = 0; i < NUM_QUEUES; i++) {
+ int nb_linked;
+ q_id = i;
+
+ nb_linked = rte_event_port_link(evdev,
+ i+1, /* port = q_id + 1*/
+ &q_id,
+ NULL,
+ 1);
+
+ if (nb_linked != 1) {
+
+ PMD_DRV_LOG(ERR, "%s:%d: error mapping port:%u to queue:%u\n",
+ __FILE__,
+ __LINE__,
+ i + 1,
+ q_id);
+
+ err = -1;
+ break;
+ }
+
+ }
+
+
+ /* Try and link to the same port again */
+ if (!err) {
+ uint8_t t_qid = 0;
+ if (rte_event_port_link(evdev,
+ 1,
+ &t_qid,
+ NULL,
+ 1) > 0) {
+ PMD_DRV_LOG(ERR, "%s:%d: Second call to port link on same port DID NOT fail\n",
+ __FILE__,
+ __LINE__);
+ err = -1;
+ }
+
+ uint32_t test_num_events;
+
+ if (!err) {
+ test_num_events = rte_event_dequeue_burst(evdev,
+ p_id,
+ ev,
+ BATCH_SIZE,
+ 0);
+ if (test_num_events != 0) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing 0 packets from port %u on stopped device\n",
+ __FILE__,
+ __LINE__,
+ p_id);
+ err = -1;
+ }
+ }
+
+ if (!err) {
+ test_num_events = rte_event_enqueue_burst(evdev,
+ p_id,
+ ev,
+ BATCH_SIZE);
+ if (test_num_events != 0) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing 0 packets to port %u on stopped device\n",
+ __FILE__,
+ __LINE__,
+ p_id);
+ err = -1;
+ }
+ }
+ }
+
+
+ /* Start the devicea */
+ if (!err) {
+ if (rte_event_dev_start(evdev) < 0) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error with start call\n",
+ __FILE__,
+ __LINE__);
+ err = -1;
+ }
+ }
+
+
+ /* Check we can't do any more links now that device is started.*/
+ if (!err) {
+ uint8_t t_qid = 0;
+ if (rte_event_port_link(evdev,
+ 1,
+ &t_qid,
+ NULL,
+ 1) > 0) {
+ PMD_DRV_LOG(ERR, "%s:%d: Call to port link on started device DID NOT fail\n",
+ __FILE__,
+ __LINE__);
+ err = -1;
+ }
+ }
+
+ if (!err) {
+
+ q_id = 0;
+
+ populate_event_burst(ev,
+ q_id,
+ BATCH_SIZE);
+
+ num_events = rte_event_enqueue_burst(evdev,
+ p_id,
+ ev,
+ BATCH_SIZE);
+ if (num_events != BATCH_SIZE) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing rx packets\n",
+ __FILE__,
+ __LINE__);
+ err = -1;
+ }
+ }
+
+ if (!err) {
+ while (++p_id < NUM_QUEUES) {
+
+ num_events = rte_event_dequeue_burst(evdev,
+ p_id,
+ ev,
+ BATCH_SIZE,
+ 0);
+
+ if (num_events != BATCH_SIZE) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from port %u\n",
+ __FILE__,
+ __LINE__,
+ p_id);
+ err = -1;
+ break;
+ }
+
+ if (ev[0].queue_id != q_id) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error event portid[%u] q_id:[%u] does not match expected:[%u]\n",
+ __FILE__,
+ __LINE__,
+ p_id,
+ ev[0].queue_id,
+ q_id);
+ err = -1;
+ break;
+ }
+
+ populate_event_burst(ev,
+ ++q_id,
+ BATCH_SIZE);
+
+ num_events = rte_event_enqueue_burst(evdev,
+ p_id,
+ ev,
+ BATCH_SIZE);
+ if (num_events != BATCH_SIZE) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing packets from port:%u to queue:%u\n",
+ __FILE__,
+ __LINE__,
+ p_id,
+ q_id);
+ err = -1;
+ break;
+ }
+ }
+ }
+
+ if (!err) {
+ num_events = rte_event_dequeue_burst(evdev,
+ p_id,
+ ev,
+ BATCH_SIZE,
+ 0);
+ if (num_events != BATCH_SIZE) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from tx port %u\n",
+ __FILE__,
+ __LINE__,
+ p_id);
+ err = -1;
+ }
+ }
+
+ cleanup(t);
+
+ return err;
+}
+
+
+
+int
+opdl_selftest(void)
+{
+ struct test *t = malloc(sizeof(struct test));
+ int ret;
+
+ const char *eventdev_name = "event_opdl0";
+
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+
+ if (evdev < 0) {
+ PMD_DRV_LOG(ERR, "%d: Eventdev %s not found - creating.\n",
+ __LINE__, eventdev_name);
+ /* turn on stats by default */
+ if (rte_vdev_init(eventdev_name, "do_validation=1") < 0) {
+ PMD_DRV_LOG(ERR, "Error creating eventdev\n");
+ free(t);
+ return -1;
+ }
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ PMD_DRV_LOG(ERR, "Error finding newly created eventdev\n");
+ free(t);
+ return -1;
+ }
+ }
+
+ /* Only create mbuf pool once, reuse for each test run */
+ if (!eventdev_func_mempool) {
+ eventdev_func_mempool = rte_pktmbuf_pool_create(
+ "EVENTDEV_SW_SA_MBUF_POOL",
+ (1<<12), /* 4k buffers */
+ 32 /*MBUF_CACHE_SIZE*/,
+ 0,
+ 512, /* use very small mbufs */
+ rte_socket_id());
+ if (!eventdev_func_mempool) {
+ PMD_DRV_LOG(ERR, "ERROR creating mempool\n");
+ free(t);
+ return -1;
+ }
+ }
+ t->mbuf_pool = eventdev_func_mempool;
+
+ PMD_DRV_LOG(ERR, "*** Running Ordered Basic test...\n");
+ ret = ordered_basic(t);
+
+ PMD_DRV_LOG(ERR, "*** Running Atomic Basic test...\n");
+ ret = atomic_basic(t);
+
+
+ PMD_DRV_LOG(ERR, "*** Running QID Basic test...\n");
+ ret = qid_basic(t);
+
+ PMD_DRV_LOG(ERR, "*** Running SINGLE LINK failure test...\n");
+ ret = single_link(t);
+
+ PMD_DRV_LOG(ERR, "*** Running SINGLE LINK w stats test...\n");
+ ret = single_link_w_stats(t);
+
+ /*
+ * Free test instance, free mempool
+ */
+ rte_mempool_free(t->mbuf_pool);
+ free(t);
+
+ if (ret != 0)
+ return ret;
+ return 0;
+
+}
diff --git a/src/spdk/dpdk/drivers/event/opdl/rte_pmd_evdev_opdl_version.map b/src/spdk/dpdk/drivers/event/opdl/rte_pmd_evdev_opdl_version.map
new file mode 100644
index 00000000..58b94270
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/opdl/rte_pmd_evdev_opdl_version.map
@@ -0,0 +1,3 @@
+DPDK_18.02 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/event/skeleton/Makefile b/src/spdk/dpdk/drivers/event/skeleton/Makefile
new file mode 100644
index 00000000..0f7f07ea
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/skeleton/Makefile
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_skeleton_event.a
+
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_eventdev
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
+
+EXPORT_MAP := rte_pmd_skeleton_event_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton_eventdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/event/skeleton/meson.build b/src/spdk/dpdk/drivers/event/skeleton/meson.build
new file mode 100644
index 00000000..acfe1565
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/skeleton/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('skeleton_eventdev.c')
+deps += ['bus_pci', 'bus_vdev']
diff --git a/src/spdk/dpdk/drivers/event/skeleton/rte_pmd_skeleton_event_version.map b/src/spdk/dpdk/drivers/event/skeleton/rte_pmd_skeleton_event_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/skeleton/rte_pmd_skeleton_event_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/event/skeleton/skeleton_eventdev.c b/src/spdk/dpdk/drivers/event/skeleton/skeleton_eventdev.c
new file mode 100644
index 00000000..c889220e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/skeleton/skeleton_eventdev.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_lcore.h>
+#include <rte_bus_vdev.h>
+
+#include "skeleton_eventdev.h"
+
+#define EVENTDEV_NAME_SKELETON_PMD event_skeleton
+/**< Skeleton event device PMD name */
+
+static uint16_t
+skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
+{
+ struct skeleton_port *sp = port;
+
+ RTE_SET_USED(sp);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(port);
+
+ return 0;
+}
+
+static uint16_t
+skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct skeleton_port *sp = port;
+
+ RTE_SET_USED(sp);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(port);
+ RTE_SET_USED(nb_events);
+
+ return 0;
+}
+
+static uint16_t
+skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks)
+{
+ struct skeleton_port *sp = port;
+
+ RTE_SET_USED(sp);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(timeout_ticks);
+
+ return 0;
+}
+
+static uint16_t
+skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ struct skeleton_port *sp = port;
+
+ RTE_SET_USED(sp);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(nb_events);
+ RTE_SET_USED(timeout_ticks);
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_info_get(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+
+ dev_info->min_dequeue_timeout_ns = 1;
+ dev_info->max_dequeue_timeout_ns = 10000;
+ dev_info->dequeue_timeout_ns = 25;
+ dev_info->max_event_queues = 64;
+ dev_info->max_event_queue_flows = (1ULL << 20);
+ dev_info->max_event_queue_priority_levels = 8;
+ dev_info->max_event_priority_levels = 8;
+ dev_info->max_event_ports = 32;
+ dev_info->max_event_port_dequeue_depth = 16;
+ dev_info->max_event_port_enqueue_depth = 16;
+ dev_info->max_num_events = (1ULL << 20);
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_BURST_MODE |
+ RTE_EVENT_DEV_CAP_EVENT_QOS;
+}
+
+static int
+skeleton_eventdev_configure(const struct rte_eventdev *dev)
+{
+ struct rte_eventdev_data *data = dev->data;
+ struct rte_event_dev_config *conf = &data->dev_conf;
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(conf);
+ RTE_SET_USED(skel);
+
+ PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
+ return 0;
+}
+
+static int
+skeleton_eventdev_start(struct rte_eventdev *dev)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_stop(struct rte_eventdev *dev)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+}
+
+static int
+skeleton_eventdev_close(struct rte_eventdev *dev)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(queue_id);
+
+ queue_conf->nb_atomic_flows = (1ULL << 20);
+ queue_conf->nb_atomic_order_sequences = (1ULL << 20);
+ queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static void
+skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static int
+skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(queue_conf);
+ RTE_SET_USED(queue_id);
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(port_id);
+
+ port_conf->new_event_threshold = 32 * 1024;
+ port_conf->dequeue_depth = 16;
+ port_conf->enqueue_depth = 16;
+ port_conf->disable_implicit_release = 0;
+}
+
+static void
+skeleton_eventdev_port_release(void *port)
+{
+ struct skeleton_port *sp = port;
+ PMD_DRV_FUNC_TRACE();
+
+ rte_free(sp);
+}
+
+static int
+skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct skeleton_port *sp;
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(port_conf);
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->ports[port_id] != NULL) {
+ PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+ port_id);
+ skeleton_eventdev_port_release(dev->data->ports[port_id]);
+ dev->data->ports[port_id] = NULL;
+ }
+
+ /* Allocate event port memory */
+ sp = rte_zmalloc_socket("eventdev port",
+ sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
+ dev->data->socket_id);
+ if (sp == NULL) {
+ PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
+ return -ENOMEM;
+ }
+
+ sp->port_id = port_id;
+
+ PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
+
+ dev->data->ports[port_id] = sp;
+ return 0;
+}
+
+static int
+skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct skeleton_port *sp = port;
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(sp);
+ RTE_SET_USED(queues);
+ RTE_SET_USED(priorities);
+
+ /* Linked all the queues */
+ return (int)nb_links;
+}
+
+static int
+skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct skeleton_port *sp = port;
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(sp);
+ RTE_SET_USED(queues);
+
+ /* Unlinked all the queues */
+ return (int)nb_unlinks;
+
+}
+
+static int
+skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+ uint32_t scale = 1;
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ *timeout_ticks = ns * scale;
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(f);
+}
+
+
+/* Initialize and register event driver with DPDK Application */
+static struct rte_eventdev_ops skeleton_eventdev_ops = {
+ .dev_infos_get = skeleton_eventdev_info_get,
+ .dev_configure = skeleton_eventdev_configure,
+ .dev_start = skeleton_eventdev_start,
+ .dev_stop = skeleton_eventdev_stop,
+ .dev_close = skeleton_eventdev_close,
+ .queue_def_conf = skeleton_eventdev_queue_def_conf,
+ .queue_setup = skeleton_eventdev_queue_setup,
+ .queue_release = skeleton_eventdev_queue_release,
+ .port_def_conf = skeleton_eventdev_port_def_conf,
+ .port_setup = skeleton_eventdev_port_setup,
+ .port_release = skeleton_eventdev_port_release,
+ .port_link = skeleton_eventdev_port_link,
+ .port_unlink = skeleton_eventdev_port_unlink,
+ .timeout_ticks = skeleton_eventdev_timeout_ticks,
+ .dump = skeleton_eventdev_dump
+};
+
+static int
+skeleton_eventdev_init(struct rte_eventdev *eventdev)
+{
+ struct rte_pci_device *pci_dev;
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
+ int ret = 0;
+
+ PMD_DRV_FUNC_TRACE();
+
+ eventdev->dev_ops = &skeleton_eventdev_ops;
+ eventdev->enqueue = skeleton_eventdev_enqueue;
+ eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
+ eventdev->dequeue = skeleton_eventdev_dequeue;
+ eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
+
+ skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
+ if (!skel->reg_base) {
+ PMD_DRV_ERR("Failed to map BAR0");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ skel->device_id = pci_dev->id.device_id;
+ skel->vendor_id = pci_dev->id.vendor_id;
+ skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+
+ PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
+ pci_dev->id.vendor_id, pci_dev->id.device_id,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+
+ PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
+ eventdev->data->dev_id, eventdev->data->socket_id,
+ skel->vendor_id, skel->device_id);
+
+fail:
+ return ret;
+}
+
+/* PCI based event device */
+
+#define EVENTDEV_SKEL_VENDOR_ID 0x177d
+#define EVENTDEV_SKEL_PRODUCT_ID 0x0001
+
+static const struct rte_pci_id pci_id_skeleton_map[] = {
+ {
+ RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
+ EVENTDEV_SKEL_PRODUCT_ID)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static int
+event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_probe(pci_drv, pci_dev,
+ sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
+}
+
+static int
+event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
+ .id_table = pci_id_skeleton_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = event_skeleton_pci_probe,
+ .remove = event_skeleton_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
+
+/* VDEV based event device */
+
+static int
+skeleton_eventdev_create(const char *name, int socket_id)
+{
+ struct rte_eventdev *eventdev;
+
+ eventdev = rte_event_pmd_vdev_init(name,
+ sizeof(struct skeleton_eventdev), socket_id);
+ if (eventdev == NULL) {
+ PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
+ goto fail;
+ }
+
+ eventdev->dev_ops = &skeleton_eventdev_ops;
+ eventdev->enqueue = skeleton_eventdev_enqueue;
+ eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
+ eventdev->dequeue = skeleton_eventdev_dequeue;
+ eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
+
+ return 0;
+fail:
+ return -EFAULT;
+}
+
+static int
+skeleton_eventdev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
+ rte_socket_id());
+ return skeleton_eventdev_create(name, rte_socket_id());
+}
+
+static int
+skeleton_eventdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
+ .probe = skeleton_eventdev_probe,
+ .remove = skeleton_eventdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);
diff --git a/src/spdk/dpdk/drivers/event/skeleton/skeleton_eventdev.h b/src/spdk/dpdk/drivers/event/skeleton/skeleton_eventdev.h
new file mode 100644
index 00000000..ba64b8ae
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/skeleton/skeleton_eventdev.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef __SKELETON_EVENTDEV_H__
+#define __SKELETON_EVENTDEV_H__
+
+#include <rte_eventdev_pmd_pci.h>
+#include <rte_eventdev_pmd_vdev.h>
+
+#ifdef RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
+#define PMD_DRV_FUNC_TRACE() do { } while (0)
+#endif
+
+#define PMD_DRV_ERR(fmt, args...) \
+ RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ## args)
+
+struct skeleton_eventdev {
+ uintptr_t reg_base;
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t subsystem_device_id;
+ uint16_t subsystem_vendor_id;
+} __rte_cache_aligned;
+
+struct skeleton_port {
+ uint8_t port_id;
+} __rte_cache_aligned;
+
+static inline struct skeleton_eventdev *
+skeleton_pmd_priv(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+#endif /* __SKELETON_EVENTDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/event/sw/Makefile b/src/spdk/dpdk/drivers/event/sw/Makefile
new file mode 100644
index 00000000..81236a39
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/sw/Makefile
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016-2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_sw_event.a
+
+# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+# for older GCC versions, allow us to initialize an event using
+# designated initializers.
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -le 50 && echo 1), 1)
+CFLAGS += -Wno-missing-field-initializers
+endif
+endif
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_kvargs -lrte_ring
+LDLIBS += -lrte_mempool -lrte_mbuf
+LDLIBS += -lrte_bus_vdev
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_sw_event_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_scheduler.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_xstats.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_selftest.c
+
+# export include files
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/event/sw/event_ring.h b/src/spdk/dpdk/drivers/event/sw/event_ring.h
new file mode 100644
index 00000000..02308728
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/sw/event_ring.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+/*
+ * Generic ring structure for passing events from one core to another.
+ *
+ * Used by the software scheduler for the producer and consumer rings for
+ * each port, i.e. for passing events from worker cores to scheduler and
+ * vice-versa. Designed for single-producer, single-consumer use with two
+ * cores working on each ring.
+ */
+
+#ifndef _EVENT_RING_
+#define _EVENT_RING_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+
+#define QE_RING_NAMESIZE 32
+
+struct qe_ring {
+ char name[QE_RING_NAMESIZE] __rte_cache_aligned;
+ uint32_t ring_size; /* size of memory block allocated to the ring */
+ uint32_t mask; /* mask for read/write values == ring_size -1 */
+ uint32_t size; /* actual usable space in the ring */
+ volatile uint32_t write_idx __rte_cache_aligned;
+ volatile uint32_t read_idx __rte_cache_aligned;
+
+ struct rte_event ring[0] __rte_cache_aligned;
+};
+
+static inline struct qe_ring *
+qe_ring_create(const char *name, unsigned int size, unsigned int socket_id)
+{
+ struct qe_ring *retval;
+ const uint32_t ring_size = rte_align32pow2(size + 1);
+ size_t memsize = sizeof(*retval) +
+ (ring_size * sizeof(retval->ring[0]));
+
+ retval = rte_zmalloc_socket(NULL, memsize, 0, socket_id);
+ if (retval == NULL)
+ goto end;
+
+ snprintf(retval->name, sizeof(retval->name), "EVDEV_RG_%s", name);
+ retval->ring_size = ring_size;
+ retval->mask = ring_size - 1;
+ retval->size = size;
+end:
+ return retval;
+}
+
+static inline void
+qe_ring_destroy(struct qe_ring *r)
+{
+ rte_free(r);
+}
+
+static __rte_always_inline unsigned int
+qe_ring_count(const struct qe_ring *r)
+{
+ return r->write_idx - r->read_idx;
+}
+
+static __rte_always_inline unsigned int
+qe_ring_free_count(const struct qe_ring *r)
+{
+ return r->size - qe_ring_count(r);
+}
+
+static __rte_always_inline unsigned int
+qe_ring_enqueue_burst(struct qe_ring *r, const struct rte_event *qes,
+ unsigned int nb_qes, uint16_t *free_count)
+{
+ const uint32_t size = r->size;
+ const uint32_t mask = r->mask;
+ const uint32_t read = r->read_idx;
+ uint32_t write = r->write_idx;
+ const uint32_t space = read + size - write;
+ uint32_t i;
+
+ if (space < nb_qes)
+ nb_qes = space;
+
+ for (i = 0; i < nb_qes; i++, write++)
+ r->ring[write & mask] = qes[i];
+
+ rte_smp_wmb();
+
+ if (nb_qes != 0)
+ r->write_idx = write;
+
+ *free_count = space - nb_qes;
+
+ return nb_qes;
+}
+
+static __rte_always_inline unsigned int
+qe_ring_enqueue_burst_with_ops(struct qe_ring *r, const struct rte_event *qes,
+ unsigned int nb_qes, uint8_t *ops)
+{
+ const uint32_t size = r->size;
+ const uint32_t mask = r->mask;
+ const uint32_t read = r->read_idx;
+ uint32_t write = r->write_idx;
+ const uint32_t space = read + size - write;
+ uint32_t i;
+
+ if (space < nb_qes)
+ nb_qes = space;
+
+ for (i = 0; i < nb_qes; i++, write++) {
+ r->ring[write & mask] = qes[i];
+ r->ring[write & mask].op = ops[i];
+ }
+
+ rte_smp_wmb();
+
+ if (nb_qes != 0)
+ r->write_idx = write;
+
+ return nb_qes;
+}
+
+static __rte_always_inline unsigned int
+qe_ring_dequeue_burst(struct qe_ring *r, struct rte_event *qes,
+ unsigned int nb_qes)
+{
+ const uint32_t mask = r->mask;
+ uint32_t read = r->read_idx;
+ const uint32_t write = r->write_idx;
+ const uint32_t items = write - read;
+ uint32_t i;
+
+ if (items < nb_qes)
+ nb_qes = items;
+
+
+ for (i = 0; i < nb_qes; i++, read++)
+ qes[i] = r->ring[read & mask];
+
+ rte_smp_rmb();
+
+ if (nb_qes != 0)
+ r->read_idx += nb_qes;
+
+ return nb_qes;
+}
+
+#endif
diff --git a/src/spdk/dpdk/drivers/event/sw/iq_chunk.h b/src/spdk/dpdk/drivers/event/sw/iq_chunk.h
new file mode 100644
index 00000000..31d013ea
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/sw/iq_chunk.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _IQ_CHUNK_H_
+#define _IQ_CHUNK_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <rte_eventdev.h>
+
+#define IQ_ROB_NAMESIZE 12
+
+struct sw_queue_chunk {
+ struct rte_event events[SW_EVS_PER_Q_CHUNK];
+ struct sw_queue_chunk *next;
+} __rte_cache_aligned;
+
+static __rte_always_inline bool
+iq_empty(struct sw_iq *iq)
+{
+ return (iq->count == 0);
+}
+
+static __rte_always_inline uint16_t
+iq_count(const struct sw_iq *iq)
+{
+ return iq->count;
+}
+
+static __rte_always_inline struct sw_queue_chunk *
+iq_alloc_chunk(struct sw_evdev *sw)
+{
+ struct sw_queue_chunk *chunk = sw->chunk_list_head;
+ sw->chunk_list_head = chunk->next;
+ chunk->next = NULL;
+ return chunk;
+}
+
+static __rte_always_inline void
+iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
+{
+ chunk->next = sw->chunk_list_head;
+ sw->chunk_list_head = chunk;
+}
+
+static __rte_always_inline void
+iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head)
+{
+ while (head) {
+ struct sw_queue_chunk *next;
+ next = head->next;
+ iq_free_chunk(sw, head);
+ head = next;
+ }
+}
+
+static __rte_always_inline void
+iq_init(struct sw_evdev *sw, struct sw_iq *iq)
+{
+ iq->head = iq_alloc_chunk(sw);
+ iq->tail = iq->head;
+ iq->head_idx = 0;
+ iq->tail_idx = 0;
+ iq->count = 0;
+}
+
+static __rte_always_inline void
+iq_enqueue(struct sw_evdev *sw, struct sw_iq *iq, const struct rte_event *ev)
+{
+ iq->tail->events[iq->tail_idx++] = *ev;
+ iq->count++;
+
+ if (unlikely(iq->tail_idx == SW_EVS_PER_Q_CHUNK)) {
+ /* The number of chunks is defined in relation to the total
+ * number of inflight events and number of IQS such that
+ * allocation will always succeed.
+ */
+ struct sw_queue_chunk *chunk = iq_alloc_chunk(sw);
+ iq->tail->next = chunk;
+ iq->tail = chunk;
+ iq->tail_idx = 0;
+ }
+}
+
+static __rte_always_inline void
+iq_pop(struct sw_evdev *sw, struct sw_iq *iq)
+{
+ iq->head_idx++;
+ iq->count--;
+
+ if (unlikely(iq->head_idx == SW_EVS_PER_Q_CHUNK)) {
+ struct sw_queue_chunk *next = iq->head->next;
+ iq_free_chunk(sw, iq->head);
+ iq->head = next;
+ iq->head_idx = 0;
+ }
+}
+
+static __rte_always_inline const struct rte_event *
+iq_peek(struct sw_iq *iq)
+{
+ return &iq->head->events[iq->head_idx];
+}
+
+/* Note: the caller must ensure that count <= iq_count() */
+static __rte_always_inline uint16_t
+iq_dequeue_burst(struct sw_evdev *sw,
+ struct sw_iq *iq,
+ struct rte_event *ev,
+ uint16_t count)
+{
+ struct sw_queue_chunk *current;
+ uint16_t total, index;
+
+ count = RTE_MIN(count, iq_count(iq));
+
+ current = iq->head;
+ index = iq->head_idx;
+ total = 0;
+
+ /* Loop over the chunks */
+ while (1) {
+ struct sw_queue_chunk *next;
+ for (; index < SW_EVS_PER_Q_CHUNK;) {
+ ev[total++] = current->events[index++];
+
+ if (unlikely(total == count))
+ goto done;
+ }
+
+ /* Move to the next chunk */
+ next = current->next;
+ iq_free_chunk(sw, current);
+ current = next;
+ index = 0;
+ }
+
+done:
+ if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
+ struct sw_queue_chunk *next = current->next;
+ iq_free_chunk(sw, current);
+ iq->head = next;
+ iq->head_idx = 0;
+ } else {
+ iq->head = current;
+ iq->head_idx = index;
+ }
+
+ iq->count -= total;
+
+ return total;
+}
+
+static __rte_always_inline void
+iq_put_back(struct sw_evdev *sw,
+ struct sw_iq *iq,
+ struct rte_event *ev,
+ unsigned int count)
+{
+ /* Put back events that fit in the current head chunk. If necessary,
+ * put back events in a new head chunk. The caller must ensure that
+ * count <= SW_EVS_PER_Q_CHUNK, to ensure that at most one new head is
+ * needed.
+ */
+ uint16_t avail_space = iq->head_idx;
+
+ if (avail_space >= count) {
+ const uint16_t idx = avail_space - count;
+ uint16_t i;
+
+ for (i = 0; i < count; i++)
+ iq->head->events[idx + i] = ev[i];
+
+ iq->head_idx = idx;
+ } else if (avail_space < count) {
+ const uint16_t remaining = count - avail_space;
+ struct sw_queue_chunk *new_head;
+ uint16_t i;
+
+ for (i = 0; i < avail_space; i++)
+ iq->head->events[i] = ev[remaining + i];
+
+ new_head = iq_alloc_chunk(sw);
+ new_head->next = iq->head;
+ iq->head = new_head;
+ iq->head_idx = SW_EVS_PER_Q_CHUNK - remaining;
+
+ for (i = 0; i < remaining; i++)
+ iq->head->events[iq->head_idx + i] = ev[i];
+ }
+
+ iq->count += count;
+}
+
+#endif /* _IQ_CHUNK_H_ */
diff --git a/src/spdk/dpdk/drivers/event/sw/meson.build b/src/spdk/dpdk/drivers/event/sw/meson.build
new file mode 100644
index 00000000..30d22164
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/sw/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+allow_experimental_apis = true
+sources = files('sw_evdev_scheduler.c',
+ 'sw_evdev_selftest.c',
+ 'sw_evdev_worker.c',
+ 'sw_evdev_xstats.c',
+ 'sw_evdev.c'
+)
+deps += ['hash', 'bus_vdev']
diff --git a/src/spdk/dpdk/drivers/event/sw/rte_pmd_sw_event_version.map b/src/spdk/dpdk/drivers/event/sw/rte_pmd_sw_event_version.map
new file mode 100644
index 00000000..5352e7e3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/sw/rte_pmd_sw_event_version.map
@@ -0,0 +1,3 @@
+DPDK_17.05 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/event/sw/sw_evdev.c b/src/spdk/dpdk/drivers/event/sw/sw_evdev.c
new file mode 100644
index 00000000..a6bb9138
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/sw/sw_evdev.c
@@ -0,0 +1,1082 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_kvargs.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+#include <rte_event_ring.h>
+#include <rte_service_component.h>
+
+#include "sw_evdev.h"
+#include "iq_chunk.h"
+
+#define EVENTDEV_NAME_SW_PMD event_sw
+#define NUMA_NODE_ARG "numa_node"
+#define SCHED_QUANTA_ARG "sched_quanta"
+#define CREDIT_QUANTA_ARG "credit_quanta"
+
+static void
+sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
+
+static int
+sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
+ const uint8_t priorities[], uint16_t num)
+{
+ struct sw_port *p = port;
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ int i;
+
+ RTE_SET_USED(priorities);
+ for (i = 0; i < num; i++) {
+ struct sw_qid *q = &sw->qids[queues[i]];
+ unsigned int j;
+
+ /* check for qid map overflow */
+ if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+
+ if (p->is_directed && p->num_qids_mapped > 0) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+
+ for (j = 0; j < q->cq_num_mapped_cqs; j++) {
+ if (q->cq_map[j] == p->id)
+ break;
+ }
+
+ /* check if port is already linked */
+ if (j < q->cq_num_mapped_cqs)
+ continue;
+
+ if (q->type == SW_SCHED_TYPE_DIRECT) {
+ /* check directed qids only map to one port */
+ if (p->num_qids_mapped > 0) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+ /* check port only takes a directed flow */
+ if (num > 1) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+
+ p->is_directed = 1;
+ p->num_qids_mapped = 1;
+ } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
+ p->num_ordered_qids++;
+ p->num_qids_mapped++;
+ } else if (q->type == RTE_SCHED_TYPE_ATOMIC ||
+ q->type == RTE_SCHED_TYPE_PARALLEL) {
+ p->num_qids_mapped++;
+ }
+
+ q->cq_map[q->cq_num_mapped_cqs] = p->id;
+ rte_smp_wmb();
+ q->cq_num_mapped_cqs++;
+ }
+ return i;
+}
+
+static int
+sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
+ uint16_t nb_unlinks)
+{
+ struct sw_port *p = port;
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ unsigned int i, j;
+
+ int unlinked = 0;
+ for (i = 0; i < nb_unlinks; i++) {
+ struct sw_qid *q = &sw->qids[queues[i]];
+ for (j = 0; j < q->cq_num_mapped_cqs; j++) {
+ if (q->cq_map[j] == p->id) {
+ q->cq_map[j] =
+ q->cq_map[q->cq_num_mapped_cqs - 1];
+ rte_smp_wmb();
+ q->cq_num_mapped_cqs--;
+ unlinked++;
+
+ p->num_qids_mapped--;
+
+ if (q->type == RTE_SCHED_TYPE_ORDERED)
+ p->num_ordered_qids--;
+
+ continue;
+ }
+ }
+ }
+ return unlinked;
+}
+
+static int
+sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *conf)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ struct sw_port *p = &sw->ports[port_id];
+ char buf[RTE_RING_NAMESIZE];
+ unsigned int i;
+
+ struct rte_event_dev_info info;
+ sw_info_get(dev, &info);
+
+ /* detect re-configuring and return credits to instance if needed */
+ if (p->initialized) {
+ /* taking credits from pool is done one quanta at a time, and
+ * credits may be spend (counted in p->inflights) or still
+ * available in the port (p->inflight_credits). We must return
+ * the sum to no leak credits
+ */
+ int possible_inflights = p->inflight_credits + p->inflights;
+ rte_atomic32_sub(&sw->inflights, possible_inflights);
+ }
+
+ *p = (struct sw_port){0}; /* zero entire structure */
+ p->id = port_id;
+ p->sw = sw;
+
+ /* check to see if rings exists - port_setup() can be called multiple
+ * times legally (assuming device is stopped). If ring exists, free it
+ * to so it gets re-created with the correct size
+ */
+ snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
+ port_id, "rx_worker_ring");
+ struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf);
+ if (existing_ring)
+ rte_event_ring_free(existing_ring);
+
+ p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
+ dev->data->socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
+ if (p->rx_worker_ring == NULL) {
+ SW_LOG_ERR("Error creating RX worker ring for port %d\n",
+ port_id);
+ return -1;
+ }
+
+ p->inflight_max = conf->new_event_threshold;
+ p->implicit_release = !conf->disable_implicit_release;
+
+ /* check if ring exists, same as rx_worker above */
+ snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
+ port_id, "cq_worker_ring");
+ existing_ring = rte_event_ring_lookup(buf);
+ if (existing_ring)
+ rte_event_ring_free(existing_ring);
+
+ p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
+ dev->data->socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
+ if (p->cq_worker_ring == NULL) {
+ rte_event_ring_free(p->rx_worker_ring);
+ SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
+ port_id);
+ return -1;
+ }
+ sw->cq_ring_space[port_id] = conf->dequeue_depth;
+
+ /* set hist list contents to empty */
+ for (i = 0; i < SW_PORT_HIST_LIST; i++) {
+ p->hist_list[i].fid = -1;
+ p->hist_list[i].qid = -1;
+ }
+ dev->data->ports[port_id] = p;
+
+ rte_smp_wmb();
+ p->initialized = 1;
+ return 0;
+}
+
+static void
+sw_port_release(void *port)
+{
+ struct sw_port *p = (void *)port;
+ if (p == NULL)
+ return;
+
+ rte_event_ring_free(p->rx_worker_ring);
+ rte_event_ring_free(p->cq_worker_ring);
+ memset(p, 0, sizeof(*p));
+}
+
+static int32_t
+qid_init(struct sw_evdev *sw, unsigned int idx, int type,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ unsigned int i;
+ int dev_id = sw->data->dev_id;
+ int socket_id = sw->data->socket_id;
+ char buf[IQ_ROB_NAMESIZE];
+ struct sw_qid *qid = &sw->qids[idx];
+
+ /* Initialize the FID structures to no pinning (-1), and zero packets */
+ const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
+ for (i = 0; i < RTE_DIM(qid->fids); i++)
+ qid->fids[i] = fid;
+
+ qid->id = idx;
+ qid->type = type;
+ qid->priority = queue_conf->priority;
+
+ if (qid->type == RTE_SCHED_TYPE_ORDERED) {
+ char ring_name[RTE_RING_NAMESIZE];
+ uint32_t window_size;
+
+ /* rte_ring and window_size_mask require require window_size to
+ * be a power-of-2.
+ */
+ window_size = rte_align32pow2(
+ queue_conf->nb_atomic_order_sequences);
+
+ qid->window_size = window_size - 1;
+
+ if (!window_size) {
+ SW_LOG_DBG(
+ "invalid reorder_window_size for ordered queue\n"
+ );
+ goto cleanup;
+ }
+
+ snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
+ qid->reorder_buffer = rte_zmalloc_socket(buf,
+ window_size * sizeof(qid->reorder_buffer[0]),
+ 0, socket_id);
+ if (!qid->reorder_buffer) {
+ SW_LOG_DBG("reorder_buffer malloc failed\n");
+ goto cleanup;
+ }
+
+ memset(&qid->reorder_buffer[0],
+ 0,
+ window_size * sizeof(qid->reorder_buffer[0]));
+
+ snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist",
+ dev_id, idx);
+
+ /* lookup the ring, and if it already exists, free it */
+ struct rte_ring *cleanup = rte_ring_lookup(ring_name);
+ if (cleanup)
+ rte_ring_free(cleanup);
+
+ qid->reorder_buffer_freelist = rte_ring_create(ring_name,
+ window_size,
+ socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (!qid->reorder_buffer_freelist) {
+ SW_LOG_DBG("freelist ring create failed");
+ goto cleanup;
+ }
+
+ /* Populate the freelist with reorder buffer entries. Enqueue
+ * 'window_size - 1' entries because the rte_ring holds only
+ * that many.
+ */
+ for (i = 0; i < window_size - 1; i++) {
+ if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,
+ &qid->reorder_buffer[i]) < 0)
+ goto cleanup;
+ }
+
+ qid->reorder_buffer_index = 0;
+ qid->cq_next_tx = 0;
+ }
+
+ qid->initialized = 1;
+
+ return 0;
+
+cleanup:
+ if (qid->reorder_buffer) {
+ rte_free(qid->reorder_buffer);
+ qid->reorder_buffer = NULL;
+ }
+
+ if (qid->reorder_buffer_freelist) {
+ rte_ring_free(qid->reorder_buffer_freelist);
+ qid->reorder_buffer_freelist = NULL;
+ }
+
+ return -EINVAL;
+}
+
+static void
+sw_queue_release(struct rte_eventdev *dev, uint8_t id)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ struct sw_qid *qid = &sw->qids[id];
+
+ if (qid->type == RTE_SCHED_TYPE_ORDERED) {
+ rte_free(qid->reorder_buffer);
+ rte_ring_free(qid->reorder_buffer_freelist);
+ }
+ memset(qid, 0, sizeof(*qid));
+}
+
+static int
+sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *conf)
+{
+ int type;
+
+ type = conf->schedule_type;
+
+ if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
+ type = SW_SCHED_TYPE_DIRECT;
+ } else if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
+ & conf->event_queue_cfg) {
+ SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
+ return -ENOTSUP;
+ }
+
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+
+ if (sw->qids[queue_id].initialized)
+ sw_queue_release(dev, queue_id);
+
+ return qid_init(sw, queue_id, type, conf);
+}
+
+static void
+sw_init_qid_iqs(struct sw_evdev *sw)
+{
+ int i, j;
+
+ /* Initialize the IQ memory of all configured qids */
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ struct sw_qid *qid = &sw->qids[i];
+
+ if (!qid->initialized)
+ continue;
+
+ for (j = 0; j < SW_IQS_MAX; j++)
+ iq_init(sw, &qid->iq[j]);
+ }
+}
+
+static int
+sw_qids_empty(struct sw_evdev *sw)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < sw->qid_count; i++) {
+ for (j = 0; j < SW_IQS_MAX; j++) {
+ if (iq_count(&sw->qids[i].iq[j]))
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int
+sw_ports_empty(struct sw_evdev *sw)
+{
+ unsigned int i;
+
+ for (i = 0; i < sw->port_count; i++) {
+ if ((rte_event_ring_count(sw->ports[i].rx_worker_ring)) ||
+ rte_event_ring_count(sw->ports[i].cq_worker_ring))
+ return 0;
+ }
+
+ return 1;
+}
+
+static void
+sw_drain_ports(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ eventdev_stop_flush_t flush;
+ unsigned int i;
+ uint8_t dev_id;
+ void *arg;
+
+ flush = dev->dev_ops->dev_stop_flush;
+ dev_id = dev->data->dev_id;
+ arg = dev->data->dev_stop_flush_arg;
+
+ for (i = 0; i < sw->port_count; i++) {
+ struct rte_event ev;
+
+ while (rte_event_dequeue_burst(dev_id, i, &ev, 1, 0)) {
+ if (flush)
+ flush(dev_id, ev, arg);
+
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev_id, i, &ev, 1);
+ }
+ }
+}
+
+static void
+sw_drain_queue(struct rte_eventdev *dev, struct sw_iq *iq)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ eventdev_stop_flush_t flush;
+ uint8_t dev_id;
+ void *arg;
+
+ flush = dev->dev_ops->dev_stop_flush;
+ dev_id = dev->data->dev_id;
+ arg = dev->data->dev_stop_flush_arg;
+
+ while (iq_count(iq) > 0) {
+ struct rte_event ev;
+
+ iq_dequeue_burst(sw, iq, &ev, 1);
+
+ if (flush)
+ flush(dev_id, ev, arg);
+ }
+}
+
+static void
+sw_drain_queues(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ unsigned int i, j;
+
+ for (i = 0; i < sw->qid_count; i++) {
+ for (j = 0; j < SW_IQS_MAX; j++)
+ sw_drain_queue(dev, &sw->qids[i].iq[j]);
+ }
+}
+
+static void
+sw_clean_qid_iqs(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ int i, j;
+
+ /* Release the IQ memory of all configured qids */
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ struct sw_qid *qid = &sw->qids[i];
+
+ for (j = 0; j < SW_IQS_MAX; j++) {
+ if (!qid->iq[j].head)
+ continue;
+ iq_free_chunk_list(sw, qid->iq[j].head);
+ qid->iq[j].head = NULL;
+ }
+ }
+}
+
+static void
+sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ static const struct rte_event_queue_conf default_conf = {
+ .nb_atomic_flows = 4096,
+ .nb_atomic_order_sequences = 1,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ };
+
+ *conf = default_conf;
+}
+
+static void
+sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+
+ port_conf->new_event_threshold = 1024;
+ port_conf->dequeue_depth = 16;
+ port_conf->enqueue_depth = 16;
+ port_conf->disable_implicit_release = 0;
+}
+
+static int
+sw_dev_configure(const struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ const struct rte_eventdev_data *data = dev->data;
+ const struct rte_event_dev_config *conf = &data->dev_conf;
+ int num_chunks, i;
+
+ sw->qid_count = conf->nb_event_queues;
+ sw->port_count = conf->nb_event_ports;
+ sw->nb_events_limit = conf->nb_events_limit;
+ rte_atomic32_set(&sw->inflights, 0);
+
+ /* Number of chunks sized for worst-case spread of events across IQs */
+ num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) +
+ sw->qid_count*SW_IQS_MAX*2;
+
+ /* If this is a reconfiguration, free the previous IQ allocation. All
+ * IQ chunk references were cleaned out of the QIDs in sw_stop(), and
+ * will be reinitialized in sw_start().
+ */
+ if (sw->chunks)
+ rte_free(sw->chunks);
+
+ sw->chunks = rte_malloc_socket(NULL,
+ sizeof(struct sw_queue_chunk) *
+ num_chunks,
+ 0,
+ sw->data->socket_id);
+ if (!sw->chunks)
+ return -ENOMEM;
+
+ sw->chunk_list_head = NULL;
+ for (i = 0; i < num_chunks; i++)
+ iq_free_chunk(sw, &sw->chunks[i]);
+
+ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct rte_eth_dev;
+
+static int
+sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+ return 0;
+}
+
+static int
+sw_timer_adapter_caps_get(const struct rte_eventdev *dev,
+ uint64_t flags,
+ uint32_t *caps,
+ const struct rte_event_timer_adapter_ops **ops)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(flags);
+ *caps = 0;
+
+ /* Use default SW ops */
+ *ops = NULL;
+
+ return 0;
+}
+
+static int
+sw_crypto_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cdev);
+ *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
+ return 0;
+}
+
+static void
+sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
+{
+ RTE_SET_USED(dev);
+
+ static const struct rte_event_dev_info evdev_sw_info = {
+ .driver_name = SW_PMD_NAME,
+ .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
+ .max_event_queue_flows = SW_QID_NUM_FIDS,
+ .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
+ .max_event_priority_levels = SW_IQS_MAX,
+ .max_event_ports = SW_PORTS_MAX,
+ .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
+ .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
+ .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
+ .event_dev_cap = (
+ RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_BURST_MODE |
+ RTE_EVENT_DEV_CAP_EVENT_QOS |
+ RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE|
+ RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE),
+ };
+
+ *info = evdev_sw_info;
+}
+
+static void
+sw_dump(struct rte_eventdev *dev, FILE *f)
+{
+ const struct sw_evdev *sw = sw_pmd_priv(dev);
+
+ static const char * const q_type_strings[] = {
+ "Ordered", "Atomic", "Parallel", "Directed"
+ };
+ uint32_t i;
+ fprintf(f, "EventDev %s: ports %d, qids %d\n", "todo-fix-name",
+ sw->port_count, sw->qid_count);
+
+ fprintf(f, "\trx %"PRIu64"\n\tdrop %"PRIu64"\n\ttx %"PRIu64"\n",
+ sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts);
+ fprintf(f, "\tsched calls: %"PRIu64"\n", sw->sched_called);
+ fprintf(f, "\tsched cq/qid call: %"PRIu64"\n", sw->sched_cq_qid_called);
+ fprintf(f, "\tsched no IQ enq: %"PRIu64"\n", sw->sched_no_iq_enqueues);
+ fprintf(f, "\tsched no CQ enq: %"PRIu64"\n", sw->sched_no_cq_enqueues);
+ uint32_t inflights = rte_atomic32_read(&sw->inflights);
+ uint32_t credits = sw->nb_events_limit - inflights;
+ fprintf(f, "\tinflight %d, credits: %d\n", inflights, credits);
+
+#define COL_RED "\x1b[31m"
+#define COL_RESET "\x1b[0m"
+
+ for (i = 0; i < sw->port_count; i++) {
+ int max, j;
+ const struct sw_port *p = &sw->ports[i];
+ if (!p->initialized) {
+ fprintf(f, " %sPort %d not initialized.%s\n",
+ COL_RED, i, COL_RESET);
+ continue;
+ }
+ fprintf(f, " Port %d %s\n", i,
+ p->is_directed ? " (SingleCons)" : "");
+ fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64
+ "\t%sinflight %d%s\n", sw->ports[i].stats.rx_pkts,
+ sw->ports[i].stats.rx_dropped,
+ sw->ports[i].stats.tx_pkts,
+ (p->inflights == p->inflight_max) ?
+ COL_RED : COL_RESET,
+ sw->ports[i].inflights, COL_RESET);
+
+ fprintf(f, "\tMax New: %u"
+ "\tAvg cycles PP: %"PRIu64"\tCredits: %u\n",
+ sw->ports[i].inflight_max,
+ sw->ports[i].avg_pkt_ticks,
+ sw->ports[i].inflight_credits);
+ fprintf(f, "\tReceive burst distribution:\n");
+ float zp_percent = p->zero_polls * 100.0 / p->total_polls;
+ fprintf(f, zp_percent < 10 ? "\t\t0:%.02f%% " : "\t\t0:%.0f%% ",
+ zp_percent);
+ for (max = (int)RTE_DIM(p->poll_buckets); max-- > 0;)
+ if (p->poll_buckets[max] != 0)
+ break;
+ for (j = 0; j <= max; j++) {
+ if (p->poll_buckets[j] != 0) {
+ float poll_pc = p->poll_buckets[j] * 100.0 /
+ p->total_polls;
+ fprintf(f, "%u-%u:%.02f%% ",
+ ((j << SW_DEQ_STAT_BUCKET_SHIFT) + 1),
+ ((j+1) << SW_DEQ_STAT_BUCKET_SHIFT),
+ poll_pc);
+ }
+ }
+ fprintf(f, "\n");
+
+ if (p->rx_worker_ring) {
+ uint64_t used = rte_event_ring_count(p->rx_worker_ring);
+ uint64_t space = rte_event_ring_free_count(
+ p->rx_worker_ring);
+ const char *col = (space == 0) ? COL_RED : COL_RESET;
+ fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
+ PRIu64 COL_RESET"\n", col, used, space);
+ } else
+ fprintf(f, "\trx ring not initialized.\n");
+
+ if (p->cq_worker_ring) {
+ uint64_t used = rte_event_ring_count(p->cq_worker_ring);
+ uint64_t space = rte_event_ring_free_count(
+ p->cq_worker_ring);
+ const char *col = (space == 0) ? COL_RED : COL_RESET;
+ fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
+ PRIu64 COL_RESET"\n", col, used, space);
+ } else
+ fprintf(f, "\tcq ring not initialized.\n");
+ }
+
+ for (i = 0; i < sw->qid_count; i++) {
+ const struct sw_qid *qid = &sw->qids[i];
+ if (!qid->initialized) {
+ fprintf(f, " %sQueue %d not initialized.%s\n",
+ COL_RED, i, COL_RESET);
+ continue;
+ }
+ int affinities_per_port[SW_PORTS_MAX] = {0};
+ uint32_t inflights = 0;
+
+ fprintf(f, " Queue %d (%s)\n", i, q_type_strings[qid->type]);
+ fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64"\n",
+ qid->stats.rx_pkts, qid->stats.rx_dropped,
+ qid->stats.tx_pkts);
+ if (qid->type == RTE_SCHED_TYPE_ORDERED) {
+ struct rte_ring *rob_buf_free =
+ qid->reorder_buffer_freelist;
+ if (rob_buf_free)
+ fprintf(f, "\tReorder entries in use: %u\n",
+ rte_ring_free_count(rob_buf_free));
+ else
+ fprintf(f,
+ "\tReorder buffer not initialized\n");
+ }
+
+ uint32_t flow;
+ for (flow = 0; flow < RTE_DIM(qid->fids); flow++)
+ if (qid->fids[flow].cq != -1) {
+ affinities_per_port[qid->fids[flow].cq]++;
+ inflights += qid->fids[flow].pcount;
+ }
+
+ uint32_t port;
+ fprintf(f, "\tPer Port Stats:\n");
+ for (port = 0; port < sw->port_count; port++) {
+ fprintf(f, "\t Port %d: Pkts: %"PRIu64, port,
+ qid->to_port[port]);
+ fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
+ }
+
+ uint32_t iq;
+ uint32_t iq_printed = 0;
+ for (iq = 0; iq < SW_IQS_MAX; iq++) {
+ if (!qid->iq[iq].head) {
+ fprintf(f, "\tiq %d is not initialized.\n", iq);
+ iq_printed = 1;
+ continue;
+ }
+ uint32_t used = iq_count(&qid->iq[iq]);
+ const char *col = COL_RESET;
+ if (used > 0) {
+ fprintf(f, "\t%siq %d: Used %d"
+ COL_RESET"\n", col, iq, used);
+ iq_printed = 1;
+ }
+ }
+ if (iq_printed == 0)
+ fprintf(f, "\t-- iqs empty --\n");
+ }
+}
+
+static int
+sw_start(struct rte_eventdev *dev)
+{
+ unsigned int i, j;
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+
+ rte_service_component_runstate_set(sw->service_id, 1);
+
+ /* check a service core is mapped to this service */
+ if (!rte_service_runstate_get(sw->service_id)) {
+ SW_LOG_ERR("Warning: No Service core enabled on service %s\n",
+ sw->service_name);
+ return -ENOENT;
+ }
+
+ /* check all ports are set up */
+ for (i = 0; i < sw->port_count; i++)
+ if (sw->ports[i].rx_worker_ring == NULL) {
+ SW_LOG_ERR("Port %d not configured\n", i);
+ return -ESTALE;
+ }
+
+ /* check all queues are configured and mapped to ports*/
+ for (i = 0; i < sw->qid_count; i++)
+ if (!sw->qids[i].initialized ||
+ sw->qids[i].cq_num_mapped_cqs == 0) {
+ SW_LOG_ERR("Queue %d not configured\n", i);
+ return -ENOLINK;
+ }
+
+ /* build up our prioritized array of qids */
+ /* We don't use qsort here, as if all/multiple entries have the same
+ * priority, the result is non-deterministic. From "man 3 qsort":
+ * "If two members compare as equal, their order in the sorted
+ * array is undefined."
+ */
+ uint32_t qidx = 0;
+ for (j = 0; j <= RTE_EVENT_DEV_PRIORITY_LOWEST; j++) {
+ for (i = 0; i < sw->qid_count; i++) {
+ if (sw->qids[i].priority == j) {
+ sw->qids_prioritized[qidx] = &sw->qids[i];
+ qidx++;
+ }
+ }
+ }
+
+ sw_init_qid_iqs(sw);
+
+ if (sw_xstats_init(sw) < 0)
+ return -EINVAL;
+
+ rte_smp_wmb();
+ sw->started = 1;
+
+ return 0;
+}
+
+static void
+sw_stop(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ int32_t runstate;
+
+ /* Stop the scheduler if it's running */
+ runstate = rte_service_runstate_get(sw->service_id);
+ if (runstate == 1)
+ rte_service_runstate_set(sw->service_id, 0);
+
+ while (rte_service_may_be_active(sw->service_id))
+ rte_pause();
+
+ /* Flush all events out of the device */
+ while (!(sw_qids_empty(sw) && sw_ports_empty(sw))) {
+ sw_event_schedule(dev);
+ sw_drain_ports(dev);
+ sw_drain_queues(dev);
+ }
+
+ sw_clean_qid_iqs(dev);
+ sw_xstats_uninit(sw);
+ sw->started = 0;
+ rte_smp_wmb();
+
+ if (runstate == 1)
+ rte_service_runstate_set(sw->service_id, 1);
+}
+
+static int
+sw_close(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ uint32_t i;
+
+ for (i = 0; i < sw->qid_count; i++)
+ sw_queue_release(dev, i);
+ sw->qid_count = 0;
+
+ for (i = 0; i < sw->port_count; i++)
+ sw_port_release(&sw->ports[i]);
+ sw->port_count = 0;
+
+ memset(&sw->stats, 0, sizeof(sw->stats));
+ sw->sched_called = 0;
+ sw->sched_no_iq_enqueues = 0;
+ sw->sched_no_cq_enqueues = 0;
+ sw->sched_cq_qid_called = 0;
+
+ return 0;
+}
+
+static int
+assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *socket_id = opaque;
+ *socket_id = atoi(value);
+ if (*socket_id >= RTE_MAX_NUMA_NODES)
+ return -1;
+ return 0;
+}
+
+static int
+set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *quanta = opaque;
+ *quanta = atoi(value);
+ if (*quanta < 0 || *quanta >= 4096)
+ return -1;
+ return 0;
+}
+
+static int
+set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *credit = opaque;
+ *credit = atoi(value);
+ if (*credit < 0 || *credit >= 128)
+ return -1;
+ return 0;
+}
+
+
+static int32_t sw_sched_service_func(void *args)
+{
+ struct rte_eventdev *dev = args;
+ sw_event_schedule(dev);
+ return 0;
+}
+
+static int
+sw_probe(struct rte_vdev_device *vdev)
+{
+ static struct rte_eventdev_ops evdev_sw_ops = {
+ .dev_configure = sw_dev_configure,
+ .dev_infos_get = sw_info_get,
+ .dev_close = sw_close,
+ .dev_start = sw_start,
+ .dev_stop = sw_stop,
+ .dump = sw_dump,
+
+ .queue_def_conf = sw_queue_def_conf,
+ .queue_setup = sw_queue_setup,
+ .queue_release = sw_queue_release,
+ .port_def_conf = sw_port_def_conf,
+ .port_setup = sw_port_setup,
+ .port_release = sw_port_release,
+ .port_link = sw_port_link,
+ .port_unlink = sw_port_unlink,
+
+ .eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
+
+ .timer_adapter_caps_get = sw_timer_adapter_caps_get,
+
+ .crypto_adapter_caps_get = sw_crypto_adapter_caps_get,
+
+ .xstats_get = sw_xstats_get,
+ .xstats_get_names = sw_xstats_get_names,
+ .xstats_get_by_name = sw_xstats_get_by_name,
+ .xstats_reset = sw_xstats_reset,
+
+ .dev_selftest = test_sw_eventdev,
+ };
+
+ static const char *const args[] = {
+ NUMA_NODE_ARG,
+ SCHED_QUANTA_ARG,
+ CREDIT_QUANTA_ARG,
+ NULL
+ };
+ const char *name;
+ const char *params;
+ struct rte_eventdev *dev;
+ struct sw_evdev *sw;
+ int socket_id = rte_socket_id();
+ int sched_quanta = SW_DEFAULT_SCHED_QUANTA;
+ int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
+
+ name = rte_vdev_device_name(vdev);
+ params = rte_vdev_device_args(vdev);
+ if (params != NULL && params[0] != '\0') {
+ struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+ if (!kvlist) {
+ SW_LOG_INFO(
+ "Ignoring unsupported parameters when creating device '%s'\n",
+ name);
+ } else {
+ int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
+ assign_numa_node, &socket_id);
+ if (ret != 0) {
+ SW_LOG_ERR(
+ "%s: Error parsing numa node parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
+ set_sched_quanta, &sched_quanta);
+ if (ret != 0) {
+ SW_LOG_ERR(
+ "%s: Error parsing sched quanta parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
+ set_credit_quanta, &credit_quanta);
+ if (ret != 0) {
+ SW_LOG_ERR(
+ "%s: Error parsing credit quanta parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ rte_kvargs_free(kvlist);
+ }
+ }
+
+ SW_LOG_INFO(
+ "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
+ name, socket_id, sched_quanta, credit_quanta);
+
+ dev = rte_event_pmd_vdev_init(name,
+ sizeof(struct sw_evdev), socket_id);
+ if (dev == NULL) {
+ SW_LOG_ERR("eventdev vdev init() failed");
+ return -EFAULT;
+ }
+ dev->dev_ops = &evdev_sw_ops;
+ dev->enqueue = sw_event_enqueue;
+ dev->enqueue_burst = sw_event_enqueue_burst;
+ dev->enqueue_new_burst = sw_event_enqueue_burst;
+ dev->enqueue_forward_burst = sw_event_enqueue_burst;
+ dev->dequeue = sw_event_dequeue;
+ dev->dequeue_burst = sw_event_dequeue_burst;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ sw = dev->data->dev_private;
+ sw->data = dev->data;
+
+ /* copy values passed from vdev command line to instance */
+ sw->credit_update_quanta = credit_quanta;
+ sw->sched_quanta = sched_quanta;
+
+ /* register service with EAL */
+ struct rte_service_spec service;
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ snprintf(service.name, sizeof(service.name), "%s_service", name);
+ snprintf(sw->service_name, sizeof(sw->service_name), "%s_service",
+ name);
+ service.socket_id = socket_id;
+ service.callback = sw_sched_service_func;
+ service.callback_userdata = (void *)dev;
+
+ int32_t ret = rte_service_component_register(&service, &sw->service_id);
+ if (ret) {
+ SW_LOG_ERR("service register() failed");
+ return -ENOEXEC;
+ }
+
+ dev->data->service_inited = 1;
+ dev->data->service_id = sw->service_id;
+
+ return 0;
+}
+
+static int
+sw_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ SW_LOG_INFO("Closing eventdev sw device %s\n", name);
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver evdev_sw_pmd_drv = {
+ .probe = sw_probe,
+ .remove = sw_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
+ SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");
+
+/* declared extern in header, for access from other .c files */
+int eventdev_sw_log_level;
+
+RTE_INIT(evdev_sw_init_log)
+{
+ eventdev_sw_log_level = rte_log_register("pmd.event.sw");
+ if (eventdev_sw_log_level >= 0)
+ rte_log_set_level(eventdev_sw_log_level, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/event/sw/sw_evdev.h b/src/spdk/dpdk/drivers/event/sw/sw_evdev.h
new file mode 100644
index 00000000..d90b96d4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/sw/sw_evdev.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#ifndef _SW_EVDEV_H_
+#define _SW_EVDEV_H_
+
+#include "sw_evdev_log.h"
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_atomic.h>
+
+#define SW_DEFAULT_CREDIT_QUANTA 32
+#define SW_DEFAULT_SCHED_QUANTA 128
+#define SW_QID_NUM_FIDS 16384
+#define SW_IQS_MAX 4
+#define SW_Q_PRIORITY_MAX 255
+#define SW_PORTS_MAX 64
+#define MAX_SW_CONS_Q_DEPTH 128
+#define SW_INFLIGHT_EVENTS_TOTAL 4096
+/* allow for lots of over-provisioning */
+#define MAX_SW_PROD_Q_DEPTH 4096
+#define SW_FRAGMENTS_MAX 16
+
+/* Should be power-of-two minus one, to leave room for the next pointer */
+#define SW_EVS_PER_Q_CHUNK 255
+#define SW_Q_CHUNK_SIZE ((SW_EVS_PER_Q_CHUNK + 1) * sizeof(struct rte_event))
+
+/* report dequeue burst sizes in buckets */
+#define SW_DEQ_STAT_BUCKET_SHIFT 2
+/* how many packets pulled from port by sched */
+#define SCHED_DEQUEUE_BURST_SIZE 32
+
+#define SW_PORT_HIST_LIST (MAX_SW_PROD_Q_DEPTH) /* size of our history list */
+#define NUM_SAMPLES 64 /* how many data points use for average stats */
+
+#define EVENTDEV_NAME_SW_PMD event_sw
+#define SW_PMD_NAME RTE_STR(event_sw)
+#define SW_PMD_NAME_MAX 64
+
+#define SW_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
+
+#define SW_NUM_POLL_BUCKETS (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT)
+
+enum {
+ QE_FLAG_VALID_SHIFT = 0,
+ QE_FLAG_COMPLETE_SHIFT,
+ QE_FLAG_NOT_EOP_SHIFT,
+ _QE_FLAG_COUNT
+};
+
+#define QE_FLAG_VALID (1 << QE_FLAG_VALID_SHIFT) /* for NEW FWD, FRAG */
+#define QE_FLAG_COMPLETE (1 << QE_FLAG_COMPLETE_SHIFT) /* set for FWD, DROP */
+#define QE_FLAG_NOT_EOP (1 << QE_FLAG_NOT_EOP_SHIFT) /* set for FRAG only */
+
+static const uint8_t sw_qe_flag_map[] = {
+ QE_FLAG_VALID /* NEW Event */,
+ QE_FLAG_VALID | QE_FLAG_COMPLETE /* FWD Event */,
+ QE_FLAG_COMPLETE /* RELEASE Event */,
+
+ /* Values which can be used for future support for partial
+ * events, i.e. where one event comes back to the scheduler
+ * as multiple which need to be tracked together
+ */
+ QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
+};
+
+/* Records basic event stats at a given point. Used in port and qid structs */
+struct sw_point_stats {
+ uint64_t rx_pkts;
+ uint64_t rx_dropped;
+ uint64_t tx_pkts;
+};
+
+/* structure used to track what port a flow (FID) is pinned to */
+struct sw_fid_t {
+ /* which CQ this FID is currently pinned to */
+ int32_t cq;
+ /* number of packets gone to the CQ with this FID */
+ uint32_t pcount;
+};
+
+struct reorder_buffer_entry {
+ uint16_t num_fragments; /**< Number of packet fragments */
+ uint16_t fragment_index; /**< Points to the oldest valid frag */
+ uint8_t ready; /**< Entry is ready to be reordered */
+ struct rte_event fragments[SW_FRAGMENTS_MAX];
+};
+
+struct sw_iq {
+ struct sw_queue_chunk *head;
+ struct sw_queue_chunk *tail;
+ uint16_t head_idx;
+ uint16_t tail_idx;
+ uint16_t count;
+};
+
+struct sw_qid {
+ /* set when the QID has been initialized */
+ uint8_t initialized;
+ /* The type of this QID */
+ int8_t type;
+ /* Integer ID representing the queue. This is used in history lists,
+ * to identify the stage of processing.
+ */
+ uint32_t id;
+ struct sw_point_stats stats;
+
+ /* Internal priority rings for packets */
+ struct sw_iq iq[SW_IQS_MAX];
+ uint32_t iq_pkt_mask; /* A mask to indicate packets in an IQ */
+ uint64_t iq_pkt_count[SW_IQS_MAX];
+
+ /* Information on what CQs are polling this IQ */
+ uint32_t cq_num_mapped_cqs;
+ uint32_t cq_next_tx; /* cq to write next (non-atomic) packet */
+ uint32_t cq_map[SW_PORTS_MAX];
+ uint64_t to_port[SW_PORTS_MAX];
+
+ /* Track flow ids for atomic load balancing */
+ struct sw_fid_t fids[SW_QID_NUM_FIDS];
+
+ /* Track packet order for reordering when needed */
+ struct reorder_buffer_entry *reorder_buffer; /*< pkts await reorder */
+ struct rte_ring *reorder_buffer_freelist; /* available reorder slots */
+ uint32_t reorder_buffer_index; /* oldest valid reorder buffer entry */
+ uint32_t window_size; /* Used to wrap reorder_buffer_index */
+
+ uint8_t priority;
+};
+
+struct sw_hist_list_entry {
+ int32_t qid;
+ int32_t fid;
+ struct reorder_buffer_entry *rob_entry;
+};
+
+struct sw_evdev;
+
+struct sw_port {
+ /* new enqueue / dequeue API doesn't have an instance pointer, only the
+ * pointer to the port being enqueue/dequeued from
+ */
+ struct sw_evdev *sw;
+
+ /* set when the port is initialized */
+ uint8_t initialized;
+ /* A numeric ID for the port */
+ uint8_t id;
+
+ int16_t is_directed; /** Takes from a single directed QID */
+ /**
+ * For loadbalanced we can optimise pulling packets from
+ * producers if there is no reordering involved
+ */
+ int16_t num_ordered_qids;
+
+ /** Ring and buffer for pulling events from workers for scheduling */
+ struct rte_event_ring *rx_worker_ring __rte_cache_aligned;
+ /** Ring and buffer for pushing packets to workers after scheduling */
+ struct rte_event_ring *cq_worker_ring;
+
+ /* hole */
+
+ /* num releases yet to be completed on this port */
+ uint16_t outstanding_releases __rte_cache_aligned;
+ uint16_t inflight_max; /* app requested max inflights for this port */
+ uint16_t inflight_credits; /* num credits this port has right now */
+ uint8_t implicit_release; /* release events before dequeueing */
+
+ uint16_t last_dequeue_burst_sz; /* how big the burst was */
+ uint64_t last_dequeue_ticks; /* used to track burst processing time */
+ uint64_t avg_pkt_ticks; /* tracks average over NUM_SAMPLES burst */
+ uint64_t total_polls; /* how many polls were counted in stats */
+ uint64_t zero_polls; /* tracks polls returning nothing */
+ uint32_t poll_buckets[SW_NUM_POLL_BUCKETS];
+ /* bucket values in 4s for shorter reporting */
+
+ /* History list structs, containing info on pkts egressed to worker */
+ uint16_t hist_head __rte_cache_aligned;
+ uint16_t hist_tail;
+ uint16_t inflights;
+ struct sw_hist_list_entry hist_list[SW_PORT_HIST_LIST];
+
+ /* track packets in and out of this port */
+ struct sw_point_stats stats;
+
+
+ uint32_t pp_buf_start;
+ uint32_t pp_buf_count;
+ uint16_t cq_buf_count;
+ struct rte_event pp_buf[SCHED_DEQUEUE_BURST_SIZE];
+ struct rte_event cq_buf[MAX_SW_CONS_Q_DEPTH];
+
+ uint8_t num_qids_mapped;
+};
+
+struct sw_evdev {
+ struct rte_eventdev_data *data;
+
+ uint32_t port_count;
+ uint32_t qid_count;
+ uint32_t xstats_count;
+ struct sw_xstats_entry *xstats;
+ uint32_t xstats_count_mode_dev;
+ uint32_t xstats_count_mode_port;
+ uint32_t xstats_count_mode_queue;
+
+ /* Contains all ports - load balanced and directed */
+ struct sw_port ports[SW_PORTS_MAX] __rte_cache_aligned;
+
+ rte_atomic32_t inflights __rte_cache_aligned;
+
+ /*
+ * max events in this instance. Cached here for performance.
+ * (also available in data->conf.nb_events_limit)
+ */
+ uint32_t nb_events_limit;
+
+ /* Internal queues - one per logical queue */
+ struct sw_qid qids[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;
+ struct sw_queue_chunk *chunk_list_head;
+ struct sw_queue_chunk *chunks;
+
+ /* Cache how many packets are in each cq */
+ uint16_t cq_ring_space[SW_PORTS_MAX] __rte_cache_aligned;
+
+ /* Array of pointers to load-balanced QIDs sorted by priority level */
+ struct sw_qid *qids_prioritized[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+ /* Stats */
+ struct sw_point_stats stats __rte_cache_aligned;
+ uint64_t sched_called;
+ int32_t sched_quanta;
+ uint64_t sched_no_iq_enqueues;
+ uint64_t sched_no_cq_enqueues;
+ uint64_t sched_cq_qid_called;
+
+ uint8_t started;
+ uint32_t credit_update_quanta;
+
+ /* store num stats and offset of the stats for each port */
+ uint16_t xstats_count_per_port[SW_PORTS_MAX];
+ uint16_t xstats_offset_for_port[SW_PORTS_MAX];
+ /* store num stats and offset of the stats for each queue */
+ uint16_t xstats_count_per_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint16_t xstats_offset_for_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+ uint32_t service_id;
+ char service_name[SW_PMD_NAME_MAX];
+};
+
+static inline struct sw_evdev *
+sw_pmd_priv(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+static inline const struct sw_evdev *
+sw_pmd_priv_const(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+uint16_t sw_event_enqueue(void *port, const struct rte_event *ev);
+uint16_t sw_event_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t num);
+
+uint16_t sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
+uint16_t sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
+ uint64_t wait);
+void sw_event_schedule(struct rte_eventdev *dev);
+int sw_xstats_init(struct sw_evdev *dev);
+int sw_xstats_uninit(struct sw_evdev *dev);
+int sw_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size);
+int sw_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n);
+uint64_t sw_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id);
+int sw_xstats_reset(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids);
+
+int test_sw_eventdev(void);
+
+#endif /* _SW_EVDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/event/sw/sw_evdev_log.h b/src/spdk/dpdk/drivers/event/sw/sw_evdev_log.h
new file mode 100644
index 00000000..f76825ab
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/sw/sw_evdev_log.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _SW_EVDEV_LOG_H_
+#define _SW_EVDEV_LOG_H_
+
+extern int eventdev_sw_log_level;
+
+#define SW_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eventdev_sw_log_level, "%s" fmt "\n", \
+ __func__, ##args)
+
+#define SW_LOG_INFO(fmt, args...) \
+ SW_LOG_IMPL(INFO, fmt, ## args)
+
+#define SW_LOG_DBG(fmt, args...) \
+ SW_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define SW_LOG_ERR(fmt, args...) \
+ SW_LOG_IMPL(ERR, fmt, ## args)
+
+#endif /* _SW_EVDEV_LOG_H_ */
diff --git a/src/spdk/dpdk/drivers/event/sw/sw_evdev_scheduler.c b/src/spdk/dpdk/drivers/event/sw/sw_evdev_scheduler.c
new file mode 100644
index 00000000..e3a41e02
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/sw/sw_evdev_scheduler.c
@@ -0,0 +1,560 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <rte_ring.h>
+#include <rte_hash_crc.h>
+#include <rte_event_ring.h>
+#include "sw_evdev.h"
+#include "iq_chunk.h"
+
+#define SW_IQS_MASK (SW_IQS_MAX-1)
+
+/* Retrieve the highest priority IQ or -1 if no pkts available. Doing the
+ * CLZ twice is faster than caching the value due to data dependencies
+ */
+#define PKT_MASK_TO_IQ(pkts) \
+ (__builtin_ctz(pkts | (1 << SW_IQS_MAX)))
+
+#if SW_IQS_MAX != 4
+#error Misconfigured PRIO_TO_IQ caused by SW_IQS_MAX value change
+#endif
+#define PRIO_TO_IQ(prio) (prio >> 6)
+
+#define MAX_PER_IQ_DEQUEUE 48
+#define FLOWID_MASK (SW_QID_NUM_FIDS-1)
+/* use cheap bit mixing, we only need to lose a few bits */
+#define SW_HASH_FLOWID(f) (((f) ^ (f >> 10)) & FLOWID_MASK)
+
+static inline uint32_t
+sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
+ uint32_t iq_num, unsigned int count)
+{
+ struct rte_event qes[MAX_PER_IQ_DEQUEUE]; /* count <= MAX */
+ struct rte_event blocked_qes[MAX_PER_IQ_DEQUEUE];
+ uint32_t nb_blocked = 0;
+ uint32_t i;
+
+ if (count > MAX_PER_IQ_DEQUEUE)
+ count = MAX_PER_IQ_DEQUEUE;
+
+ /* This is the QID ID. The QID ID is static, hence it can be
+ * used to identify the stage of processing in history lists etc
+ */
+ uint32_t qid_id = qid->id;
+
+ iq_dequeue_burst(sw, &qid->iq[iq_num], qes, count);
+ for (i = 0; i < count; i++) {
+ const struct rte_event *qe = &qes[i];
+ const uint16_t flow_id = SW_HASH_FLOWID(qes[i].flow_id);
+ struct sw_fid_t *fid = &qid->fids[flow_id];
+ int cq = fid->cq;
+
+ if (cq < 0) {
+ uint32_t cq_idx = qid->cq_next_tx++;
+ if (qid->cq_next_tx == qid->cq_num_mapped_cqs)
+ qid->cq_next_tx = 0;
+ cq = qid->cq_map[cq_idx];
+
+ /* find least used */
+ int cq_free_cnt = sw->cq_ring_space[cq];
+ for (cq_idx = 0; cq_idx < qid->cq_num_mapped_cqs;
+ cq_idx++) {
+ int test_cq = qid->cq_map[cq_idx];
+ int test_cq_free = sw->cq_ring_space[test_cq];
+ if (test_cq_free > cq_free_cnt) {
+ cq = test_cq;
+ cq_free_cnt = test_cq_free;
+ }
+ }
+
+ fid->cq = cq; /* this pins early */
+ }
+
+ if (sw->cq_ring_space[cq] == 0 ||
+ sw->ports[cq].inflights == SW_PORT_HIST_LIST) {
+ blocked_qes[nb_blocked++] = *qe;
+ continue;
+ }
+
+ struct sw_port *p = &sw->ports[cq];
+
+ /* at this point we can queue up the packet on the cq_buf */
+ fid->pcount++;
+ p->cq_buf[p->cq_buf_count++] = *qe;
+ p->inflights++;
+ sw->cq_ring_space[cq]--;
+
+ int head = (p->hist_head++ & (SW_PORT_HIST_LIST-1));
+ p->hist_list[head].fid = flow_id;
+ p->hist_list[head].qid = qid_id;
+
+ p->stats.tx_pkts++;
+ qid->stats.tx_pkts++;
+ qid->to_port[cq]++;
+
+ /* if we just filled in the last slot, flush the buffer */
+ if (sw->cq_ring_space[cq] == 0) {
+ struct rte_event_ring *worker = p->cq_worker_ring;
+ rte_event_ring_enqueue_burst(worker, p->cq_buf,
+ p->cq_buf_count,
+ &sw->cq_ring_space[cq]);
+ p->cq_buf_count = 0;
+ }
+ }
+ iq_put_back(sw, &qid->iq[iq_num], blocked_qes, nb_blocked);
+
+ return count - nb_blocked;
+}
+
+static inline uint32_t
+sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
+ uint32_t iq_num, unsigned int count, int keep_order)
+{
+ uint32_t i;
+ uint32_t cq_idx = qid->cq_next_tx;
+
+ /* This is the QID ID. The QID ID is static, hence it can be
+ * used to identify the stage of processing in history lists etc
+ */
+ uint32_t qid_id = qid->id;
+
+ if (count > MAX_PER_IQ_DEQUEUE)
+ count = MAX_PER_IQ_DEQUEUE;
+
+ if (keep_order)
+ /* only schedule as many as we have reorder buffer entries */
+ count = RTE_MIN(count,
+ rte_ring_count(qid->reorder_buffer_freelist));
+
+ for (i = 0; i < count; i++) {
+ const struct rte_event *qe = iq_peek(&qid->iq[iq_num]);
+ uint32_t cq_check_count = 0;
+ uint32_t cq;
+
+ /*
+ * for parallel, just send to next available CQ in round-robin
+ * fashion. So scan for an available CQ. If all CQs are full
+ * just return and move on to next QID
+ */
+ do {
+ if (++cq_check_count > qid->cq_num_mapped_cqs)
+ goto exit;
+ cq = qid->cq_map[cq_idx];
+ if (++cq_idx == qid->cq_num_mapped_cqs)
+ cq_idx = 0;
+ } while (rte_event_ring_free_count(
+ sw->ports[cq].cq_worker_ring) == 0 ||
+ sw->ports[cq].inflights == SW_PORT_HIST_LIST);
+
+ struct sw_port *p = &sw->ports[cq];
+ if (sw->cq_ring_space[cq] == 0 ||
+ p->inflights == SW_PORT_HIST_LIST)
+ break;
+
+ sw->cq_ring_space[cq]--;
+
+ qid->stats.tx_pkts++;
+
+ const int head = (p->hist_head & (SW_PORT_HIST_LIST-1));
+ p->hist_list[head].fid = SW_HASH_FLOWID(qe->flow_id);
+ p->hist_list[head].qid = qid_id;
+
+ if (keep_order)
+ rte_ring_sc_dequeue(qid->reorder_buffer_freelist,
+ (void *)&p->hist_list[head].rob_entry);
+
+ sw->ports[cq].cq_buf[sw->ports[cq].cq_buf_count++] = *qe;
+ iq_pop(sw, &qid->iq[iq_num]);
+
+ rte_compiler_barrier();
+ p->inflights++;
+ p->stats.tx_pkts++;
+ p->hist_head++;
+ }
+exit:
+ qid->cq_next_tx = cq_idx;
+ return i;
+}
+
+static uint32_t
+sw_schedule_dir_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
+ uint32_t iq_num, unsigned int count __rte_unused)
+{
+ uint32_t cq_id = qid->cq_map[0];
+ struct sw_port *port = &sw->ports[cq_id];
+
+ /* get max burst enq size for cq_ring */
+ uint32_t count_free = sw->cq_ring_space[cq_id];
+ if (count_free == 0)
+ return 0;
+
+ /* burst dequeue from the QID IQ ring */
+ struct sw_iq *iq = &qid->iq[iq_num];
+ uint32_t ret = iq_dequeue_burst(sw, iq,
+ &port->cq_buf[port->cq_buf_count], count_free);
+ port->cq_buf_count += ret;
+
+ /* Update QID, Port and Total TX stats */
+ qid->stats.tx_pkts += ret;
+ port->stats.tx_pkts += ret;
+
+ /* Subtract credits from cached value */
+ sw->cq_ring_space[cq_id] -= ret;
+
+ return ret;
+}
+
+static uint32_t
+sw_schedule_qid_to_cq(struct sw_evdev *sw)
+{
+ uint32_t pkts = 0;
+ uint32_t qid_idx;
+
+ sw->sched_cq_qid_called++;
+
+ for (qid_idx = 0; qid_idx < sw->qid_count; qid_idx++) {
+ struct sw_qid *qid = sw->qids_prioritized[qid_idx];
+
+ int type = qid->type;
+ int iq_num = PKT_MASK_TO_IQ(qid->iq_pkt_mask);
+
+ /* zero mapped CQs indicates directed */
+ if (iq_num >= SW_IQS_MAX)
+ continue;
+
+ uint32_t pkts_done = 0;
+ uint32_t count = iq_count(&qid->iq[iq_num]);
+
+ if (count > 0) {
+ if (type == SW_SCHED_TYPE_DIRECT)
+ pkts_done += sw_schedule_dir_to_cq(sw, qid,
+ iq_num, count);
+ else if (type == RTE_SCHED_TYPE_ATOMIC)
+ pkts_done += sw_schedule_atomic_to_cq(sw, qid,
+ iq_num, count);
+ else
+ pkts_done += sw_schedule_parallel_to_cq(sw, qid,
+ iq_num, count,
+ type == RTE_SCHED_TYPE_ORDERED);
+ }
+
+ /* Check if the IQ that was polled is now empty, and unset it
+ * in the IQ mask if its empty.
+ */
+ int all_done = (pkts_done == count);
+
+ qid->iq_pkt_mask &= ~(all_done << (iq_num));
+ pkts += pkts_done;
+ }
+
+ return pkts;
+}
+
+/* This function will perform re-ordering of packets, and injecting into
+ * the appropriate QID IQ. As LB and DIR QIDs are in the same array, but *NOT*
+ * contiguous in that array, this function accepts a "range" of QIDs to scan.
+ */
+static uint16_t
+sw_schedule_reorder(struct sw_evdev *sw, int qid_start, int qid_end)
+{
+ /* Perform egress reordering */
+ struct rte_event *qe;
+ uint32_t pkts_iter = 0;
+
+ for (; qid_start < qid_end; qid_start++) {
+ struct sw_qid *qid = &sw->qids[qid_start];
+ int i, num_entries_in_use;
+
+ if (qid->type != RTE_SCHED_TYPE_ORDERED)
+ continue;
+
+ num_entries_in_use = rte_ring_free_count(
+ qid->reorder_buffer_freelist);
+
+ for (i = 0; i < num_entries_in_use; i++) {
+ struct reorder_buffer_entry *entry;
+ int j;
+
+ entry = &qid->reorder_buffer[qid->reorder_buffer_index];
+
+ if (!entry->ready)
+ break;
+
+ for (j = 0; j < entry->num_fragments; j++) {
+ uint16_t dest_qid;
+ uint16_t dest_iq;
+
+ int idx = entry->fragment_index + j;
+ qe = &entry->fragments[idx];
+
+ dest_qid = qe->queue_id;
+ dest_iq = PRIO_TO_IQ(qe->priority);
+
+ if (dest_qid >= sw->qid_count) {
+ sw->stats.rx_dropped++;
+ continue;
+ }
+
+ pkts_iter++;
+
+ struct sw_qid *q = &sw->qids[dest_qid];
+ struct sw_iq *iq = &q->iq[dest_iq];
+
+ /* we checked for space above, so enqueue must
+ * succeed
+ */
+ iq_enqueue(sw, iq, qe);
+ q->iq_pkt_mask |= (1 << (dest_iq));
+ q->iq_pkt_count[dest_iq]++;
+ q->stats.rx_pkts++;
+ }
+
+ entry->ready = (j != entry->num_fragments);
+ entry->num_fragments -= j;
+ entry->fragment_index += j;
+
+ if (!entry->ready) {
+ entry->fragment_index = 0;
+
+ rte_ring_sp_enqueue(
+ qid->reorder_buffer_freelist,
+ entry);
+
+ qid->reorder_buffer_index++;
+ qid->reorder_buffer_index %= qid->window_size;
+ }
+ }
+ }
+ return pkts_iter;
+}
+
+static __rte_always_inline void
+sw_refill_pp_buf(struct sw_evdev *sw, struct sw_port *port)
+{
+ RTE_SET_USED(sw);
+ struct rte_event_ring *worker = port->rx_worker_ring;
+ port->pp_buf_start = 0;
+ port->pp_buf_count = rte_event_ring_dequeue_burst(worker, port->pp_buf,
+ RTE_DIM(port->pp_buf), NULL);
+}
+
+static __rte_always_inline uint32_t
+__pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
+{
+ static struct reorder_buffer_entry dummy_rob;
+ uint32_t pkts_iter = 0;
+ struct sw_port *port = &sw->ports[port_id];
+
+ /* If shadow ring has 0 pkts, pull from worker ring */
+ if (port->pp_buf_count == 0)
+ sw_refill_pp_buf(sw, port);
+
+ while (port->pp_buf_count) {
+ const struct rte_event *qe = &port->pp_buf[port->pp_buf_start];
+ struct sw_hist_list_entry *hist_entry = NULL;
+ uint8_t flags = qe->op;
+ const uint16_t eop = !(flags & QE_FLAG_NOT_EOP);
+ int needs_reorder = 0;
+ /* if no-reordering, having PARTIAL == NEW */
+ if (!allow_reorder && !eop)
+ flags = QE_FLAG_VALID;
+
+ /*
+ * if we don't have space for this packet in an IQ,
+ * then move on to next queue. Technically, for a
+ * packet that needs reordering, we don't need to check
+ * here, but it simplifies things not to special-case
+ */
+ uint32_t iq_num = PRIO_TO_IQ(qe->priority);
+ struct sw_qid *qid = &sw->qids[qe->queue_id];
+
+ /* now process based on flags. Note that for directed
+ * queues, the enqueue_flush masks off all but the
+ * valid flag. This makes FWD and PARTIAL enqueues just
+ * NEW type, and makes DROPS no-op calls.
+ */
+ if ((flags & QE_FLAG_COMPLETE) && port->inflights > 0) {
+ const uint32_t hist_tail = port->hist_tail &
+ (SW_PORT_HIST_LIST - 1);
+
+ hist_entry = &port->hist_list[hist_tail];
+ const uint32_t hist_qid = hist_entry->qid;
+ const uint32_t hist_fid = hist_entry->fid;
+
+ struct sw_fid_t *fid =
+ &sw->qids[hist_qid].fids[hist_fid];
+ fid->pcount -= eop;
+ if (fid->pcount == 0)
+ fid->cq = -1;
+
+ if (allow_reorder) {
+ /* set reorder ready if an ordered QID */
+ uintptr_t rob_ptr =
+ (uintptr_t)hist_entry->rob_entry;
+ const uintptr_t valid = (rob_ptr != 0);
+ needs_reorder = valid;
+ rob_ptr |=
+ ((valid - 1) & (uintptr_t)&dummy_rob);
+ struct reorder_buffer_entry *tmp_rob_ptr =
+ (struct reorder_buffer_entry *)rob_ptr;
+ tmp_rob_ptr->ready = eop * needs_reorder;
+ }
+
+ port->inflights -= eop;
+ port->hist_tail += eop;
+ }
+ if (flags & QE_FLAG_VALID) {
+ port->stats.rx_pkts++;
+
+ if (allow_reorder && needs_reorder) {
+ struct reorder_buffer_entry *rob_entry =
+ hist_entry->rob_entry;
+
+ hist_entry->rob_entry = NULL;
+ /* Although fragmentation not currently
+ * supported by eventdev API, we support it
+ * here. Open: How do we alert the user that
+ * they've exceeded max frags?
+ */
+ int num_frag = rob_entry->num_fragments;
+ if (num_frag == SW_FRAGMENTS_MAX)
+ sw->stats.rx_dropped++;
+ else {
+ int idx = rob_entry->num_fragments++;
+ rob_entry->fragments[idx] = *qe;
+ }
+ goto end_qe;
+ }
+
+ /* Use the iq_num from above to push the QE
+ * into the qid at the right priority
+ */
+
+ qid->iq_pkt_mask |= (1 << (iq_num));
+ iq_enqueue(sw, &qid->iq[iq_num], qe);
+ qid->iq_pkt_count[iq_num]++;
+ qid->stats.rx_pkts++;
+ pkts_iter++;
+ }
+
+end_qe:
+ port->pp_buf_start++;
+ port->pp_buf_count--;
+ } /* while (avail_qes) */
+
+ return pkts_iter;
+}
+
+static uint32_t
+sw_schedule_pull_port_lb(struct sw_evdev *sw, uint32_t port_id)
+{
+ return __pull_port_lb(sw, port_id, 1);
+}
+
+static uint32_t
+sw_schedule_pull_port_no_reorder(struct sw_evdev *sw, uint32_t port_id)
+{
+ return __pull_port_lb(sw, port_id, 0);
+}
+
+static uint32_t
+sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id)
+{
+ uint32_t pkts_iter = 0;
+ struct sw_port *port = &sw->ports[port_id];
+
+ /* If shadow ring has 0 pkts, pull from worker ring */
+ if (port->pp_buf_count == 0)
+ sw_refill_pp_buf(sw, port);
+
+ while (port->pp_buf_count) {
+ const struct rte_event *qe = &port->pp_buf[port->pp_buf_start];
+ uint8_t flags = qe->op;
+
+ if ((flags & QE_FLAG_VALID) == 0)
+ goto end_qe;
+
+ uint32_t iq_num = PRIO_TO_IQ(qe->priority);
+ struct sw_qid *qid = &sw->qids[qe->queue_id];
+ struct sw_iq *iq = &qid->iq[iq_num];
+
+ port->stats.rx_pkts++;
+
+ /* Use the iq_num from above to push the QE
+ * into the qid at the right priority
+ */
+ qid->iq_pkt_mask |= (1 << (iq_num));
+ iq_enqueue(sw, iq, qe);
+ qid->iq_pkt_count[iq_num]++;
+ qid->stats.rx_pkts++;
+ pkts_iter++;
+
+end_qe:
+ port->pp_buf_start++;
+ port->pp_buf_count--;
+ } /* while port->pp_buf_count */
+
+ return pkts_iter;
+}
+
+void
+sw_event_schedule(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ uint32_t in_pkts, out_pkts;
+ uint32_t out_pkts_total = 0, in_pkts_total = 0;
+ int32_t sched_quanta = sw->sched_quanta;
+ uint32_t i;
+
+ sw->sched_called++;
+ if (unlikely(!sw->started))
+ return;
+
+ do {
+ uint32_t in_pkts_this_iteration = 0;
+
+ /* Pull from rx_ring for ports */
+ do {
+ in_pkts = 0;
+ for (i = 0; i < sw->port_count; i++)
+ if (sw->ports[i].is_directed)
+ in_pkts += sw_schedule_pull_port_dir(sw, i);
+ else if (sw->ports[i].num_ordered_qids > 0)
+ in_pkts += sw_schedule_pull_port_lb(sw, i);
+ else
+ in_pkts += sw_schedule_pull_port_no_reorder(sw, i);
+
+ /* QID scan for re-ordered */
+ in_pkts += sw_schedule_reorder(sw, 0,
+ sw->qid_count);
+ in_pkts_this_iteration += in_pkts;
+ } while (in_pkts > 4 &&
+ (int)in_pkts_this_iteration < sched_quanta);
+
+ out_pkts = sw_schedule_qid_to_cq(sw);
+ out_pkts_total += out_pkts;
+ in_pkts_total += in_pkts_this_iteration;
+
+ if (in_pkts == 0 && out_pkts == 0)
+ break;
+ } while ((int)out_pkts_total < sched_quanta);
+
+ sw->stats.tx_pkts += out_pkts_total;
+ sw->stats.rx_pkts += in_pkts_total;
+
+ sw->sched_no_iq_enqueues += (in_pkts_total == 0);
+ sw->sched_no_cq_enqueues += (out_pkts_total == 0);
+
+ /* push all the internal buffered QEs in port->cq_ring to the
+ * worker cores: aka, do the ring transfers batched.
+ */
+ for (i = 0; i < sw->port_count; i++) {
+ struct rte_event_ring *worker = sw->ports[i].cq_worker_ring;
+ rte_event_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
+ sw->ports[i].cq_buf_count,
+ &sw->cq_ring_space[i]);
+ sw->ports[i].cq_buf_count = 0;
+ }
+
+}
diff --git a/src/spdk/dpdk/drivers/event/sw/sw_evdev_selftest.c b/src/spdk/dpdk/drivers/event/sw/sw_evdev_selftest.c
new file mode 100644
index 00000000..c40912db
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/sw/sw_evdev_selftest.c
@@ -0,0 +1,3324 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_eventdev.h>
+#include <rte_pause.h>
+#include <rte_service.h>
+#include <rte_service_component.h>
+#include <rte_bus_vdev.h>
+
+#include "sw_evdev.h"
+
+#define MAX_PORTS 16
+#define MAX_QIDS 16
+#define NUM_PACKETS (1<<18)
+#define DEQUEUE_DEPTH 128
+
+static int evdev;
+
+struct test {
+ struct rte_mempool *mbuf_pool;
+ uint8_t port[MAX_PORTS];
+ uint8_t qid[MAX_QIDS];
+ int nb_qids;
+ uint32_t service_id;
+};
+
+static struct rte_event release_ev;
+
+static inline struct rte_mbuf *
+rte_gen_arp(int portid, struct rte_mempool *mp)
+{
+ /*
+ * len = 14 + 46
+ * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
+ */
+ static const uint8_t arp_request[] = {
+ /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
+ 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
+ /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
+ 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
+ /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+ };
+ struct rte_mbuf *m;
+ int pkt_len = sizeof(arp_request) - 1;
+
+ m = rte_pktmbuf_alloc(mp);
+ if (!m)
+ return 0;
+
+ memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
+ arp_request, pkt_len);
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+ rte_pktmbuf_data_len(m) = pkt_len;
+
+ RTE_SET_USED(portid);
+
+ return m;
+}
+
+static void
+xstats_print(void)
+{
+ const uint32_t XSTATS_MAX = 1024;
+ uint32_t i;
+ uint32_t ids[XSTATS_MAX];
+ uint64_t values[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+ for (i = 0; i < XSTATS_MAX; i++)
+ ids[i] = i;
+
+ /* Device names / values */
+ int ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE, 0,
+ xstats_names, ids, XSTATS_MAX);
+ if (ret < 0) {
+ printf("%d: xstats names get() returned error\n",
+ __LINE__);
+ return;
+ }
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, ret);
+ if (ret > (signed int)XSTATS_MAX)
+ printf("%s %d: more xstats available than space\n",
+ __func__, __LINE__);
+ for (i = 0; (signed int)i < ret; i++) {
+ printf("%d : %s : %"PRIu64"\n",
+ i, xstats_names[i].name, values[i]);
+ }
+
+ /* Port names / values */
+ ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 0,
+ xstats_names, ids, XSTATS_MAX);
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 1,
+ ids, values, ret);
+ if (ret > (signed int)XSTATS_MAX)
+ printf("%s %d: more xstats available than space\n",
+ __func__, __LINE__);
+ for (i = 0; (signed int)i < ret; i++) {
+ printf("%d : %s : %"PRIu64"\n",
+ i, xstats_names[i].name, values[i]);
+ }
+
+ /* Queue names / values */
+ ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+ xstats_names, ids, XSTATS_MAX);
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ 1, ids, values, ret);
+ if (ret > (signed int)XSTATS_MAX)
+ printf("%s %d: more xstats available than space\n",
+ __func__, __LINE__);
+ for (i = 0; (signed int)i < ret; i++) {
+ printf("%d : %s : %"PRIu64"\n",
+ i, xstats_names[i].name, values[i]);
+ }
+}
+
+/* initialization and config */
+static inline int
+init(struct test *t, int nb_queues, int nb_ports)
+{
+ struct rte_event_dev_config config = {
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_event_queue_flows = 1024,
+ .nb_events_limit = 4096,
+ .nb_event_port_dequeue_depth = DEQUEUE_DEPTH,
+ .nb_event_port_enqueue_depth = 128,
+ };
+ int ret;
+
+ void *temp = t->mbuf_pool; /* save and restore mbuf pool */
+
+ memset(t, 0, sizeof(*t));
+ t->mbuf_pool = temp;
+
+ ret = rte_event_dev_configure(evdev, &config);
+ if (ret < 0)
+ printf("%d: Error configuring device\n", __LINE__);
+ return ret;
+};
+
+static inline int
+create_ports(struct test *t, int num_ports)
+{
+ int i;
+ static const struct rte_event_port_conf conf = {
+ .new_event_threshold = 1024,
+ .dequeue_depth = 32,
+ .enqueue_depth = 64,
+ .disable_implicit_release = 0,
+ };
+ if (num_ports > MAX_PORTS)
+ return -1;
+
+ for (i = 0; i < num_ports; i++) {
+ if (rte_event_port_setup(evdev, i, &conf) < 0) {
+ printf("Error setting up port %d\n", i);
+ return -1;
+ }
+ t->port[i] = i;
+ }
+
+ return 0;
+}
+
+static inline int
+create_lb_qids(struct test *t, int num_qids, uint32_t flags)
+{
+ int i;
+
+ /* Q creation */
+ const struct rte_event_queue_conf conf = {
+ .schedule_type = flags,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
+ if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ t->qid[i] = i;
+ }
+ t->nb_qids += num_qids;
+ if (t->nb_qids > MAX_QIDS)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+create_atomic_qids(struct test *t, int num_qids)
+{
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
+}
+
+static inline int
+create_ordered_qids(struct test *t, int num_qids)
+{
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
+}
+
+
+static inline int
+create_unordered_qids(struct test *t, int num_qids)
+{
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
+}
+
+static inline int
+create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
+{
+ int i;
+
+ /* Q creation */
+ static const struct rte_event_queue_conf conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
+ };
+
+ for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
+ if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ t->qid[i] = i;
+
+ if (rte_event_port_link(evdev, ports[i - t->nb_qids],
+ &t->qid[i], NULL, 1) != 1) {
+ printf("%d: error creating link for qid %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ }
+ t->nb_qids += num_qids;
+ if (t->nb_qids > MAX_QIDS)
+ return -1;
+
+ return 0;
+}
+
+/* destruction */
+static inline int
+cleanup(struct test *t __rte_unused)
+{
+ rte_event_dev_stop(evdev);
+ rte_event_dev_close(evdev);
+ return 0;
+};
+
+struct test_event_dev_stats {
+ uint64_t rx_pkts; /**< Total packets received */
+ uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
+ uint64_t tx_pkts; /**< Total packets transmitted */
+
+ /** Packets received on this port */
+ uint64_t port_rx_pkts[MAX_PORTS];
+ /** Packets dropped on this port */
+ uint64_t port_rx_dropped[MAX_PORTS];
+ /** Packets inflight on this port */
+ uint64_t port_inflight[MAX_PORTS];
+ /** Packets transmitted on this port */
+ uint64_t port_tx_pkts[MAX_PORTS];
+ /** Packets received on this qid */
+ uint64_t qid_rx_pkts[MAX_QIDS];
+ /** Packets dropped on this qid */
+ uint64_t qid_rx_dropped[MAX_QIDS];
+ /** Packets transmitted on this qid */
+ uint64_t qid_tx_pkts[MAX_QIDS];
+};
+
+static inline int
+test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
+{
+ static uint32_t i;
+ static uint32_t total_ids[3]; /* rx, tx and drop */
+ static uint32_t port_rx_pkts_ids[MAX_PORTS];
+ static uint32_t port_rx_dropped_ids[MAX_PORTS];
+ static uint32_t port_inflight_ids[MAX_PORTS];
+ static uint32_t port_tx_pkts_ids[MAX_PORTS];
+ static uint32_t qid_rx_pkts_ids[MAX_QIDS];
+ static uint32_t qid_rx_dropped_ids[MAX_QIDS];
+ static uint32_t qid_tx_pkts_ids[MAX_QIDS];
+
+
+ stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
+ "dev_rx", &total_ids[0]);
+ stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
+ "dev_drop", &total_ids[1]);
+ stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
+ "dev_tx", &total_ids[2]);
+ for (i = 0; i < MAX_PORTS; i++) {
+ char name[32];
+ snprintf(name, sizeof(name), "port_%u_rx", i);
+ stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &port_rx_pkts_ids[i]);
+ snprintf(name, sizeof(name), "port_%u_drop", i);
+ stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &port_rx_dropped_ids[i]);
+ snprintf(name, sizeof(name), "port_%u_inflight", i);
+ stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &port_inflight_ids[i]);
+ snprintf(name, sizeof(name), "port_%u_tx", i);
+ stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &port_tx_pkts_ids[i]);
+ }
+ for (i = 0; i < MAX_QIDS; i++) {
+ char name[32];
+ snprintf(name, sizeof(name), "qid_%u_rx", i);
+ stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &qid_rx_pkts_ids[i]);
+ snprintf(name, sizeof(name), "qid_%u_drop", i);
+ stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &qid_rx_dropped_ids[i]);
+ snprintf(name, sizeof(name), "qid_%u_tx", i);
+ stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &qid_tx_pkts_ids[i]);
+ }
+
+ return 0;
+}
+
+/* run_prio_packet_test
+ * This performs a basic packet priority check on the test instance passed in.
+ * It is factored out of the main priority tests as the same tests must be
+ * performed to ensure prioritization of each type of QID.
+ *
+ * Requirements:
+ * - An initialized test structure, including mempool
+ * - t->port[0] is initialized for both Enq / Deq of packets to the QID
+ * - t->qid[0] is the QID to be tested
+ * - if LB QID, the CQ must be mapped to the QID.
+ */
+static int
+run_prio_packet_test(struct test *t)
+{
+ int err;
+ const uint32_t MAGIC_SEQN[] = {4711, 1234};
+ const uint32_t PRIORITY[] = {
+ RTE_EVENT_DEV_PRIORITY_NORMAL,
+ RTE_EVENT_DEV_PRIORITY_HIGHEST
+ };
+ unsigned int i;
+ for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
+ /* generate pkt and enqueue */
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ arp->seqn = MAGIC_SEQN[i];
+
+ ev = (struct rte_event){
+ .priority = PRIORITY[i],
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .mbuf = arp
+ };
+ err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+ if (err < 0) {
+ printf("%d: error failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ struct test_event_dev_stats stats;
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: error failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.port_rx_pkts[t->port[0]] != 2) {
+ printf("%d: error stats incorrect for directed port\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ struct rte_event ev, ev2;
+ uint32_t deq_pkts;
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
+ printf("%d: first packet out not highest priority\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ rte_pktmbuf_free(ev.mbuf);
+
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
+ printf("%d: second packet out not lower priority\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ rte_pktmbuf_free(ev2.mbuf);
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+test_single_directed_packet(struct test *t)
+{
+ const int rx_enq = 0;
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 3 directed QIDs going to 3 ports */
+ if (init(t, 3, 3) < 0 ||
+ create_ports(t, 3) < 0 ||
+ create_directed_qids(t, 3, t->port) < 0)
+ return -1;
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** FORWARD ****************/
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = wrk_enq,
+ .mbuf = arp,
+ };
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ const uint32_t MAGIC_SEQN = 4711;
+ arp->seqn = MAGIC_SEQN;
+
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
+ if (err < 0) {
+ printf("%d: error failed to enqueue\n", __LINE__);
+ return -1;
+ }
+
+ /* Run schedule() as dir packets may need to be re-ordered */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ struct test_event_dev_stats stats;
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: error failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.port_rx_pkts[rx_enq] != 1) {
+ printf("%d: error stats incorrect for directed port\n",
+ __LINE__);
+ return -1;
+ }
+
+ uint32_t deq_pkts;
+ deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ return -1;
+ }
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_rx_pkts[wrk_enq] != 0 &&
+ stats.port_rx_pkts[wrk_enq] != 1) {
+ printf("%d: error directed stats post-dequeue\n", __LINE__);
+ return -1;
+ }
+
+ if (ev.mbuf->seqn != MAGIC_SEQN) {
+ printf("%d: error magic sequence number not dequeued\n",
+ __LINE__);
+ return -1;
+ }
+
+ rte_pktmbuf_free(ev.mbuf);
+ cleanup(t);
+ return 0;
+}
+
+static int
+test_directed_forward_credits(struct test *t)
+{
+ uint32_t i;
+ int32_t err;
+
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_directed_qids(t, 1, t->port) < 0)
+ return -1;
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = 0,
+ };
+
+ for (i = 0; i < 1000; i++) {
+ err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
+ if (err < 0) {
+ printf("%d: error failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ uint32_t deq_pkts;
+ deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ return -1;
+ }
+
+ /* re-write event to be a forward, and continue looping it */
+ ev.op = RTE_EVENT_OP_FORWARD;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+
+static int
+test_priority_directed(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_directed_qids(t, 1, t->port) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ return run_prio_packet_test(t);
+}
+
+static int
+test_priority_atomic(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* map the QID */
+ if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping qid to port\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ return run_prio_packet_test(t);
+}
+
+static int
+test_priority_ordered(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_ordered_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* map the QID */
+ if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping qid to port\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ return run_prio_packet_test(t);
+}
+
+static int
+test_priority_unordered(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_unordered_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* map the QID */
+ if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping qid to port\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ return run_prio_packet_test(t);
+}
+
+static int
+burst_packets(struct test *t)
+{
+ /************** CONFIG ****************/
+ uint32_t i;
+ int err;
+ int ret;
+
+ /* Create instance with 2 ports and 2 queues */
+ if (init(t, 2, 2) < 0 ||
+ create_ports(t, 2) < 0 ||
+ create_atomic_qids(t, 2) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
+ if (ret != 1) {
+ printf("%d: error mapping lb qid0\n", __LINE__);
+ return -1;
+ }
+ ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
+ if (ret != 1) {
+ printf("%d: error mapping lb qid1\n", __LINE__);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** FORWARD ****************/
+ const uint32_t rx_port = 0;
+ const uint32_t NUM_PKTS = 2;
+
+ for (i = 0; i < NUM_PKTS; i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: error generating pkt\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = i % 2,
+ .flow_id = i % 3,
+ .mbuf = arp,
+ };
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* Check stats for all NUM_PKTS arrived to sched core */
+ struct test_event_dev_stats stats;
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+ if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
+ printf("%d: Sched core didn't receive all %d pkts\n",
+ __LINE__, NUM_PKTS);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ uint32_t deq_pkts;
+ int p;
+
+ deq_pkts = 0;
+ /******** DEQ QID 1 *******/
+ do {
+ struct rte_event ev;
+ p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
+ deq_pkts += p;
+ rte_pktmbuf_free(ev.mbuf);
+ } while (p);
+
+ if (deq_pkts != NUM_PKTS/2) {
+ printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
+ __LINE__);
+ return -1;
+ }
+
+ /******** DEQ QID 2 *******/
+ deq_pkts = 0;
+ do {
+ struct rte_event ev;
+ p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
+ deq_pkts += p;
+ rte_pktmbuf_free(ev.mbuf);
+ } while (p);
+ if (deq_pkts != NUM_PKTS/2) {
+ printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
+ __LINE__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+abuse_inflights(struct test *t)
+{
+ const int rx_enq = 0;
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /* Enqueue op only */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+
+ /* schedule */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ struct test_event_dev_stats stats;
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.rx_pkts != 0 ||
+ stats.tx_pkts != 0 ||
+ stats.port_inflight[wrk_enq] != 0) {
+ printf("%d: Sched core didn't handle pkt as expected\n",
+ __LINE__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+xstats_tests(struct test *t)
+{
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ const uint32_t XSTATS_MAX = 1024;
+
+ uint32_t i;
+ uint32_t ids[XSTATS_MAX];
+ uint64_t values[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+ for (i = 0; i < XSTATS_MAX; i++)
+ ids[i] = i;
+
+ /* Device names / values */
+ int ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, xstats_names, ids, XSTATS_MAX);
+ if (ret != 6) {
+ printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, ret);
+ if (ret != 6) {
+ printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ /* Port names / values */
+ ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 0,
+ xstats_names, ids, XSTATS_MAX);
+ if (ret != 21) {
+ printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 0,
+ ids, values, ret);
+ if (ret != 21) {
+ printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ /* Queue names / values */
+ ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ 0, xstats_names, ids, XSTATS_MAX);
+ if (ret != 16) {
+ printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ 1, ids, values, ret);
+ if (ret != -EINVAL) {
+ printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ 0, ids, values, ret);
+ if (ret != 16) {
+ printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ /* enqueue packets to check values */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ ev.queue_id = t->qid[i];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ ev.flow_id = 7;
+ arp->seqn = i;
+
+ int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* Device names / values */
+ int num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE, 0,
+ xstats_names, ids, XSTATS_MAX);
+ if (num_stats < 0)
+ goto fail;
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, num_stats);
+ static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
+ for (i = 0; (signed int)i < ret; i++) {
+ if (expected[i] != values[i]) {
+ printf(
+ "%d Error xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], expected[i]);
+ goto fail;
+ }
+ }
+
+ ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, NULL, 0);
+
+ /* ensure reset statistics are zero-ed */
+ static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, num_stats);
+ for (i = 0; (signed int)i < ret; i++) {
+ if (expected_zero[i] != values[i]) {
+ printf(
+ "%d Error, xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], expected_zero[i]);
+ goto fail;
+ }
+ }
+
+ /* port reset checks */
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 0,
+ xstats_names, ids, XSTATS_MAX);
+ if (num_stats < 0)
+ goto fail;
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
+ 0, ids, values, num_stats);
+
+ static const uint64_t port_expected[] = {
+ 3 /* rx */,
+ 0 /* tx */,
+ 0 /* drop */,
+ 0 /* inflights */,
+ 0 /* avg pkt cycles */,
+ 29 /* credits */,
+ 0 /* rx ring used */,
+ 4096 /* rx ring free */,
+ 0 /* cq ring used */,
+ 32 /* cq ring free */,
+ 0 /* dequeue calls */,
+ /* 10 dequeue burst buckets */
+ 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ };
+ if (ret != RTE_DIM(port_expected)) {
+ printf(
+ "%s %d: wrong number of port stats (%d), expected %zu\n",
+ __func__, __LINE__, ret, RTE_DIM(port_expected));
+ }
+
+ for (i = 0; (signed int)i < ret; i++) {
+ if (port_expected[i] != values[i]) {
+ printf(
+ "%s : %d: Error stat %s is %"PRIu64
+ ", expected %"PRIu64"\n",
+ __func__, __LINE__, xstats_names[i].name,
+ values[i], port_expected[i]);
+ goto fail;
+ }
+ }
+
+ ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
+ 0, NULL, 0);
+
+ /* ensure reset statistics are zero-ed */
+ static const uint64_t port_expected_zero[] = {
+ 0 /* rx */,
+ 0 /* tx */,
+ 0 /* drop */,
+ 0 /* inflights */,
+ 0 /* avg pkt cycles */,
+ 29 /* credits */,
+ 0 /* rx ring used */,
+ 4096 /* rx ring free */,
+ 0 /* cq ring used */,
+ 32 /* cq ring free */,
+ 0 /* dequeue calls */,
+ /* 10 dequeue burst buckets */
+ 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ };
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ 0, ids, values, num_stats);
+ for (i = 0; (signed int)i < ret; i++) {
+ if (port_expected_zero[i] != values[i]) {
+ printf(
+ "%d, Error, xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], port_expected_zero[i]);
+ goto fail;
+ }
+ }
+
+ /* QUEUE STATS TESTS */
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+ xstats_names, ids, XSTATS_MAX);
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
+ 0, ids, values, num_stats);
+ if (ret < 0) {
+ printf("xstats get returned %d\n", ret);
+ goto fail;
+ }
+ if ((unsigned int)ret > XSTATS_MAX)
+ printf("%s %d: more xstats available than space\n",
+ __func__, __LINE__);
+
+ static const uint64_t queue_expected[] = {
+ 3 /* rx */,
+ 3 /* tx */,
+ 0 /* drop */,
+ 3 /* inflights */,
+ 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 3,
+ 0, 0,
+ };
+ for (i = 0; (signed int)i < ret; i++) {
+ if (queue_expected[i] != values[i]) {
+ printf(
+ "%d, Error, xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], queue_expected[i]);
+ goto fail;
+ }
+ }
+
+ /* Reset the queue stats here */
+ ret = rte_event_dev_xstats_reset(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+ NULL,
+ 0);
+
+ /* Verify that the resetable stats are reset, and others are not */
+ static const uint64_t queue_expected_zero[] = {
+ 0 /* rx */,
+ 0 /* tx */,
+ 0 /* drop */,
+ 3 /* inflight */,
+ 0, 0, 0, 0, /* 4 iq used */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 0,
+ 0, 0,
+ };
+
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+ ids, values, num_stats);
+ int fails = 0;
+ for (i = 0; (signed int)i < ret; i++) {
+ if (queue_expected_zero[i] != values[i]) {
+ printf(
+ "%d, Error, xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], queue_expected_zero[i]);
+ fails++;
+ }
+ }
+ if (fails) {
+ printf("%d : %d of values were not as expected above\n",
+ __LINE__, fails);
+ goto fail;
+ }
+
+ cleanup(t);
+ return 0;
+
+fail:
+ rte_event_dev_dump(0, stdout);
+ cleanup(t);
+ return -1;
+}
+
+
+static int
+xstats_id_abuse_tests(struct test *t)
+{
+ int err;
+ const uint32_t XSTATS_MAX = 1024;
+ const uint32_t link_port = 2;
+
+ uint32_t ids[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ goto fail;
+ }
+
+ err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+ /* no test for device, as it ignores the port/q number */
+ int num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ UINT8_MAX-1, xstats_names, ids,
+ XSTATS_MAX);
+ if (num_stats != 0) {
+ printf("%d: expected %d stats, got return %d\n", __LINE__,
+ 0, num_stats);
+ goto fail;
+ }
+
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ UINT8_MAX-1, xstats_names, ids,
+ XSTATS_MAX);
+ if (num_stats != 0) {
+ printf("%d: expected %d stats, got return %d\n", __LINE__,
+ 0, num_stats);
+ goto fail;
+ }
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+port_reconfig_credits(struct test *t)
+{
+ if (init(t, 1, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ uint32_t i;
+ const uint32_t NUM_ITERS = 32;
+ for (i = 0; i < NUM_ITERS; i++) {
+ const struct rte_event_queue_conf conf = {
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+ if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
+ printf("%d: error creating qid\n", __LINE__);
+ return -1;
+ }
+ t->qid[0] = 0;
+
+ static const struct rte_event_port_conf port_conf = {
+ .new_event_threshold = 128,
+ .dequeue_depth = 32,
+ .enqueue_depth = 64,
+ .disable_implicit_release = 0,
+ };
+ if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
+ printf("%d Error setting up port\n", __LINE__);
+ return -1;
+ }
+
+ int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
+ if (links != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+ const uint32_t NPKTS = 1;
+ uint32_t j;
+ for (j = 0; j < NPKTS; j++) {
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ goto fail;
+ }
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ rte_event_dev_dump(0, stdout);
+ goto fail;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ struct rte_event ev[NPKTS];
+ int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
+ NPKTS, 0);
+ if (deq != 1)
+ printf("%d error; no packet dequeued\n", __LINE__);
+
+ /* let cleanup below stop the device on last iter */
+ if (i != NUM_ITERS-1)
+ rte_event_dev_stop(evdev);
+ }
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+port_single_lb_reconfig(struct test *t)
+{
+ if (init(t, 2, 2) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ goto fail;
+ }
+
+ static const struct rte_event_queue_conf conf_lb_atomic = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+ if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
+ printf("%d: error creating qid\n", __LINE__);
+ goto fail;
+ }
+
+ static const struct rte_event_queue_conf conf_single_link = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
+ };
+ if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
+ printf("%d: error creating qid\n", __LINE__);
+ goto fail;
+ }
+
+ struct rte_event_port_conf port_conf = {
+ .new_event_threshold = 128,
+ .dequeue_depth = 32,
+ .enqueue_depth = 64,
+ .disable_implicit_release = 0,
+ };
+ if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
+ printf("%d Error setting up port\n", __LINE__);
+ goto fail;
+ }
+ if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
+ printf("%d Error setting up port\n", __LINE__);
+ goto fail;
+ }
+
+ /* link port to lb queue */
+ uint8_t queue_id = 0;
+ if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
+ printf("%d: error creating link for qid\n", __LINE__);
+ goto fail;
+ }
+
+ int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
+ if (ret != 1) {
+ printf("%d: Error unlinking lb port\n", __LINE__);
+ goto fail;
+ }
+
+ queue_id = 1;
+ if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
+ printf("%d: error creating link for qid\n", __LINE__);
+ goto fail;
+ }
+
+ queue_id = 0;
+ int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+xstats_brute_force(struct test *t)
+{
+ uint32_t i;
+ const uint32_t XSTATS_MAX = 1024;
+ uint32_t ids[XSTATS_MAX];
+ uint64_t values[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+ for (i = 0; i < XSTATS_MAX; i++)
+ ids[i] = i;
+
+ for (i = 0; i < 3; i++) {
+ uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
+ uint32_t j;
+ for (j = 0; j < UINT8_MAX; j++) {
+ rte_event_dev_xstats_names_get(evdev, mode,
+ j, xstats_names, ids, XSTATS_MAX);
+
+ rte_event_dev_xstats_get(evdev, mode, j, ids,
+ values, XSTATS_MAX);
+ }
+ }
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+xstats_id_reset_tests(struct test *t)
+{
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+#define XSTATS_MAX 1024
+ int ret;
+ uint32_t i;
+ uint32_t ids[XSTATS_MAX];
+ uint64_t values[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+ for (i = 0; i < XSTATS_MAX; i++)
+ ids[i] = i;
+
+#define NUM_DEV_STATS 6
+ /* Device names / values */
+ int num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, xstats_names, ids, XSTATS_MAX);
+ if (num_stats != NUM_DEV_STATS) {
+ printf("%d: expected %d stats, got return %d\n", __LINE__,
+ NUM_DEV_STATS, num_stats);
+ goto fail;
+ }
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, num_stats);
+ if (ret != NUM_DEV_STATS) {
+ printf("%d: expected %d stats, got return %d\n", __LINE__,
+ NUM_DEV_STATS, ret);
+ goto fail;
+ }
+
+#define NPKTS 7
+ for (i = 0; i < NPKTS; i++) {
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ goto fail;
+ }
+ ev.queue_id = t->qid[i];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ arp->seqn = i;
+
+ int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ goto fail;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ static const char * const dev_names[] = {
+ "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
+ "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
+ };
+ uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
+ for (i = 0; (int)i < ret; i++) {
+ unsigned int id;
+ uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
+ dev_names[i],
+ &id);
+ if (id != i) {
+ printf("%d: %s id incorrect, expected %d got %d\n",
+ __LINE__, dev_names[i], i, id);
+ goto fail;
+ }
+ if (val != dev_expected[i]) {
+ printf("%d: %s value incorrect, expected %"
+ PRIu64" got %d\n", __LINE__, dev_names[i],
+ dev_expected[i], id);
+ goto fail;
+ }
+ /* reset to zero */
+ int reset_ret = rte_event_dev_xstats_reset(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE, 0,
+ &id,
+ 1);
+ if (reset_ret) {
+ printf("%d: failed to reset successfully\n", __LINE__);
+ goto fail;
+ }
+ dev_expected[i] = 0;
+ /* check value again */
+ val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
+ if (val != dev_expected[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %"PRIu64"\n", __LINE__, dev_names[i],
+ dev_expected[i], val);
+ goto fail;
+ }
+ };
+
+/* 48 is stat offset from start of the devices whole xstats.
+ * This WILL break every time we add a statistic to a port
+ * or the device, but there is no other way to test
+ */
+#define PORT_OFF 48
+/* num stats for the tested port. CQ size adds more stats to a port */
+#define NUM_PORT_STATS 21
+/* the port to test. */
+#define PORT 2
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, PORT,
+ xstats_names, ids, XSTATS_MAX);
+ if (num_stats != NUM_PORT_STATS) {
+ printf("%d: expected %d stats, got return %d\n",
+ __LINE__, NUM_PORT_STATS, num_stats);
+ goto fail;
+ }
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
+ ids, values, num_stats);
+
+ if (ret != NUM_PORT_STATS) {
+ printf("%d: expected %d stats, got return %d\n",
+ __LINE__, NUM_PORT_STATS, ret);
+ goto fail;
+ }
+ static const char * const port_names[] = {
+ "port_2_rx",
+ "port_2_tx",
+ "port_2_drop",
+ "port_2_inflight",
+ "port_2_avg_pkt_cycles",
+ "port_2_credits",
+ "port_2_rx_ring_used",
+ "port_2_rx_ring_free",
+ "port_2_cq_ring_used",
+ "port_2_cq_ring_free",
+ "port_2_dequeue_calls",
+ "port_2_dequeues_returning_0",
+ "port_2_dequeues_returning_1-4",
+ "port_2_dequeues_returning_5-8",
+ "port_2_dequeues_returning_9-12",
+ "port_2_dequeues_returning_13-16",
+ "port_2_dequeues_returning_17-20",
+ "port_2_dequeues_returning_21-24",
+ "port_2_dequeues_returning_25-28",
+ "port_2_dequeues_returning_29-32",
+ "port_2_dequeues_returning_33-36",
+ };
+ uint64_t port_expected[] = {
+ 0, /* rx */
+ NPKTS, /* tx */
+ 0, /* drop */
+ NPKTS, /* inflight */
+ 0, /* avg pkt cycles */
+ 0, /* credits */
+ 0, /* rx ring used */
+ 4096, /* rx ring free */
+ NPKTS, /* cq ring used */
+ 25, /* cq ring free */
+ 0, /* dequeue zero calls */
+ 0, 0, 0, 0, 0, /* 10 dequeue buckets */
+ 0, 0, 0, 0, 0,
+ };
+ uint64_t port_expected_zero[] = {
+ 0, /* rx */
+ 0, /* tx */
+ 0, /* drop */
+ NPKTS, /* inflight */
+ 0, /* avg pkt cycles */
+ 0, /* credits */
+ 0, /* rx ring used */
+ 4096, /* rx ring free */
+ NPKTS, /* cq ring used */
+ 25, /* cq ring free */
+ 0, /* dequeue zero calls */
+ 0, 0, 0, 0, 0, /* 10 dequeue buckets */
+ 0, 0, 0, 0, 0,
+ };
+ if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
+ RTE_DIM(port_names) != NUM_PORT_STATS) {
+ printf("%d: port array of wrong size\n", __LINE__);
+ goto fail;
+ }
+
+ int failed = 0;
+ for (i = 0; (int)i < ret; i++) {
+ unsigned int id;
+ uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
+ port_names[i],
+ &id);
+ if (id != i + PORT_OFF) {
+ printf("%d: %s id incorrect, expected %d got %d\n",
+ __LINE__, port_names[i], i+PORT_OFF,
+ id);
+ failed = 1;
+ }
+ if (val != port_expected[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %d\n", __LINE__, port_names[i],
+ port_expected[i], id);
+ failed = 1;
+ }
+ /* reset to zero */
+ int reset_ret = rte_event_dev_xstats_reset(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, PORT,
+ &id,
+ 1);
+ if (reset_ret) {
+ printf("%d: failed to reset successfully\n", __LINE__);
+ failed = 1;
+ }
+ /* check value again */
+ val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
+ if (val != port_expected_zero[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %"PRIu64"\n", __LINE__, port_names[i],
+ port_expected_zero[i], val);
+ failed = 1;
+ }
+ };
+ if (failed)
+ goto fail;
+
+/* num queue stats */
+#define NUM_Q_STATS 16
+/* queue offset from start of the devices whole xstats.
+ * This will break every time we add a statistic to a device/port/queue
+ */
+#define QUEUE_OFF 90
+ const uint32_t queue = 0;
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE, queue,
+ xstats_names, ids, XSTATS_MAX);
+ if (num_stats != NUM_Q_STATS) {
+ printf("%d: expected %d stats, got return %d\n",
+ __LINE__, NUM_Q_STATS, num_stats);
+ goto fail;
+ }
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
+ queue, ids, values, num_stats);
+ if (ret != NUM_Q_STATS) {
+ printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
+ goto fail;
+ }
+ static const char * const queue_names[] = {
+ "qid_0_rx",
+ "qid_0_tx",
+ "qid_0_drop",
+ "qid_0_inflight",
+ "qid_0_iq_0_used",
+ "qid_0_iq_1_used",
+ "qid_0_iq_2_used",
+ "qid_0_iq_3_used",
+ "qid_0_port_0_pinned_flows",
+ "qid_0_port_0_packets",
+ "qid_0_port_1_pinned_flows",
+ "qid_0_port_1_packets",
+ "qid_0_port_2_pinned_flows",
+ "qid_0_port_2_packets",
+ "qid_0_port_3_pinned_flows",
+ "qid_0_port_3_packets",
+ };
+ uint64_t queue_expected[] = {
+ 7, /* rx */
+ 7, /* tx */
+ 0, /* drop */
+ 7, /* inflight */
+ 0, /* iq 0 used */
+ 0, /* iq 1 used */
+ 0, /* iq 2 used */
+ 0, /* iq 3 used */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 7,
+ 0, 0,
+ };
+ uint64_t queue_expected_zero[] = {
+ 0, /* rx */
+ 0, /* tx */
+ 0, /* drop */
+ 7, /* inflight */
+ 0, /* iq 0 used */
+ 0, /* iq 1 used */
+ 0, /* iq 2 used */
+ 0, /* iq 3 used */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 0,
+ 0, 0,
+ };
+ if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
+ RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
+ RTE_DIM(queue_names) != NUM_Q_STATS) {
+ printf("%d : queue array of wrong size\n", __LINE__);
+ goto fail;
+ }
+
+ failed = 0;
+ for (i = 0; (int)i < ret; i++) {
+ unsigned int id;
+ uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
+ queue_names[i],
+ &id);
+ if (id != i + QUEUE_OFF) {
+ printf("%d: %s id incorrect, expected %d got %d\n",
+ __LINE__, queue_names[i], i+QUEUE_OFF,
+ id);
+ failed = 1;
+ }
+ if (val != queue_expected[i]) {
+ printf("%d: %d: %s value , expected %"PRIu64
+ " got %"PRIu64"\n", i, __LINE__,
+ queue_names[i], queue_expected[i], val);
+ failed = 1;
+ }
+ /* reset to zero */
+ int reset_ret = rte_event_dev_xstats_reset(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ queue, &id, 1);
+ if (reset_ret) {
+ printf("%d: failed to reset successfully\n", __LINE__);
+ failed = 1;
+ }
+ /* check value again */
+ val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
+ 0);
+ if (val != queue_expected_zero[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %"PRIu64"\n", __LINE__, queue_names[i],
+ queue_expected_zero[i], val);
+ failed = 1;
+ }
+ };
+
+ if (failed)
+ goto fail;
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+ordered_reconfigure(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ const struct rte_event_queue_conf conf = {
+ .schedule_type = RTE_SCHED_TYPE_ORDERED,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
+ printf("%d: error creating qid\n", __LINE__);
+ goto failed;
+ }
+
+ if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
+ printf("%d: error creating qid, for 2nd time\n", __LINE__);
+ goto failed;
+ }
+
+ rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+failed:
+ cleanup(t);
+ return -1;
+}
+
+static int
+qid_priorities(struct test *t)
+{
+ /* Test works by having a CQ with enough empty space for all packets,
+ * and enqueueing 3 packets to 3 QIDs. They must return based on the
+ * priority of the QID, not the ingress order, to pass the test
+ */
+ unsigned int i;
+ /* Create instance with 1 ports, and 3 qids */
+ if (init(t, 3, 1) < 0 ||
+ create_ports(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ for (i = 0; i < 3; i++) {
+ /* Create QID */
+ const struct rte_event_queue_conf conf = {
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
+ /* increase priority (0 == highest), as we go */
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ t->qid[i] = i;
+ }
+ t->nb_qids = i;
+ /* map all QIDs to port */
+ rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /* enqueue 3 packets, setting seqn and QID to check priority */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ ev.queue_id = t->qid[i];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ arp->seqn = i;
+
+ int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* dequeue packets, verify priority was upheld */
+ struct rte_event ev[32];
+ uint32_t deq_pkts =
+ rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
+ if (deq_pkts != 3) {
+ printf("%d: failed to deq packets\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ for (i = 0; i < 3; i++) {
+ if (ev[i].mbuf->seqn != 2-i) {
+ printf(
+ "%d: qid priority test: seqn %d incorrectly prioritized\n",
+ __LINE__, i);
+ }
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+load_balancing(struct test *t)
+{
+ const int rx_enq = 0;
+ int err;
+ uint32_t i;
+
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ for (i = 0; i < 3; i++) {
+ /* map port 1 - 3 inclusive */
+ if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
+ NULL, 1) != 1) {
+ printf("%d: error mapping qid to port %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** FORWARD ****************/
+ /*
+ * Create a set of flows that test the load-balancing operation of the
+ * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
+ * with a new flow, which should be sent to the 3rd mapped CQ
+ */
+ static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
+
+ for (i = 0; i < RTE_DIM(flows); i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .flow_id = flows[i],
+ .mbuf = arp,
+ };
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ struct test_event_dev_stats stats;
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.port_inflight[1] != 4) {
+ printf("%d:%s: port 1 inflight not correct\n", __LINE__,
+ __func__);
+ return -1;
+ }
+ if (stats.port_inflight[2] != 2) {
+ printf("%d:%s: port 2 inflight not correct\n", __LINE__,
+ __func__);
+ return -1;
+ }
+ if (stats.port_inflight[3] != 3) {
+ printf("%d:%s: port 3 inflight not correct\n", __LINE__,
+ __func__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+load_balancing_history(struct test *t)
+{
+ struct test_event_dev_stats stats = {0};
+ const int rx_enq = 0;
+ int err;
+ uint32_t i;
+
+ /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0)
+ return -1;
+
+ /* CQ mapping to QID */
+ if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping port 1 qid\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping port 2 qid\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping port 3 qid\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * Create a set of flows that test the load-balancing operation of the
+ * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
+ * the packet from CQ 0, send in a new set of flows. Ensure that:
+ * 1. The new flow 3 gets into the empty CQ0
+ * 2. packets for existing flow gets added into CQ1
+ * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
+ * more outstanding pkts
+ *
+ * This test makes sure that when a flow ends (i.e. all packets
+ * have been completed for that flow), that the flow can be moved
+ * to a different CQ when new packets come in for that flow.
+ */
+ static uint32_t flows1[] = {0, 1, 1, 2};
+
+ for (i = 0; i < RTE_DIM(flows1); i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ struct rte_event ev = {
+ .flow_id = flows1[i],
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .event_type = RTE_EVENT_TYPE_CPU,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .mbuf = arp
+ };
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ arp->hash.rss = flows1[i];
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ /* call the scheduler */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* Dequeue the flow 0 packet from port 1, so that we can then drop */
+ struct rte_event ev;
+ if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
+ printf("%d: failed to dequeue\n", __LINE__);
+ return -1;
+ }
+ if (ev.mbuf->hash.rss != flows1[0]) {
+ printf("%d: unexpected flow received\n", __LINE__);
+ return -1;
+ }
+
+ /* drop the flow 0 packet from port 1 */
+ rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
+
+ /* call the scheduler */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /*
+ * Set up the next set of flows, first a new flow to fill up
+ * CQ 0, so that the next flow 0 packet should go to CQ2
+ */
+ static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
+
+ for (i = 0; i < RTE_DIM(flows2); i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ struct rte_event ev = {
+ .flow_id = flows2[i],
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .event_type = RTE_EVENT_TYPE_CPU,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .mbuf = arp
+ };
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ arp->hash.rss = flows2[i];
+
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ /* schedule */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d:failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * Now check the resulting inflights on each port.
+ */
+ if (stats.port_inflight[1] != 3) {
+ printf("%d:%s: port 1 inflight not correct\n", __LINE__,
+ __func__);
+ printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
+ (unsigned int)stats.port_inflight[1],
+ (unsigned int)stats.port_inflight[2],
+ (unsigned int)stats.port_inflight[3]);
+ return -1;
+ }
+ if (stats.port_inflight[2] != 4) {
+ printf("%d:%s: port 2 inflight not correct\n", __LINE__,
+ __func__);
+ printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
+ (unsigned int)stats.port_inflight[1],
+ (unsigned int)stats.port_inflight[2],
+ (unsigned int)stats.port_inflight[3]);
+ return -1;
+ }
+ if (stats.port_inflight[3] != 2) {
+ printf("%d:%s: port 3 inflight not correct\n", __LINE__,
+ __func__);
+ printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
+ (unsigned int)stats.port_inflight[1],
+ (unsigned int)stats.port_inflight[2],
+ (unsigned int)stats.port_inflight[3]);
+ return -1;
+ }
+
+ for (i = 1; i <= 3; i++) {
+ struct rte_event ev;
+ while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
+ rte_event_enqueue_burst(evdev, i, &release_ev, 1);
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+invalid_qid(struct test *t)
+{
+ struct test_event_dev_stats stats;
+ const int rx_enq = 0;
+ int err;
+ uint32_t i;
+
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ for (i = 0; i < 4; i++) {
+ err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
+ NULL, 1);
+ if (err != 1) {
+ printf("%d: error mapping port 1 qid\n", __LINE__);
+ return -1;
+ }
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * Send in a packet with an invalid qid to the scheduler.
+ * We should see the packed enqueued OK, but the inflights for
+ * that packet should not be incremented, and the rx_dropped
+ * should be incremented.
+ */
+ static uint32_t flows1[] = {20};
+
+ for (i = 0; i < RTE_DIM(flows1); i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0] + flows1[i],
+ .flow_id = i,
+ .mbuf = arp,
+ };
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ /* call the scheduler */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * Now check the resulting inflights on the port, and the rx_dropped.
+ */
+ if (stats.port_inflight[0] != 0) {
+ printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
+ __func__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ if (stats.port_rx_dropped[0] != 1) {
+ printf("%d:%s: port 1 drops\n", __LINE__, __func__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ /* each packet drop should only be counted in one place - port or dev */
+ if (stats.rx_dropped != 0) {
+ printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
+ __func__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+single_packet(struct test *t)
+{
+ const uint32_t MAGIC_SEQN = 7321;
+ struct rte_event ev;
+ struct test_event_dev_stats stats;
+ const int rx_enq = 0;
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** Gen pkt and enqueue ****************/
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev.mbuf = arp;
+ ev.queue_id = 0;
+ ev.flow_id = 3;
+ arp->seqn = MAGIC_SEQN;
+
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.rx_pkts != 1 ||
+ stats.tx_pkts != 1 ||
+ stats.port_inflight[wrk_enq] != 1) {
+ printf("%d: Sched core didn't handle pkt as expected\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ uint32_t deq_pkts;
+
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
+ if (deq_pkts < 1) {
+ printf("%d: Failed to deq\n", __LINE__);
+ return -1;
+ }
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (ev.mbuf->seqn != MAGIC_SEQN) {
+ printf("%d: magic sequence number not dequeued\n", __LINE__);
+ return -1;
+ }
+
+ rte_pktmbuf_free(ev.mbuf);
+ err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[wrk_enq] != 0) {
+ printf("%d: port inflight not correct\n", __LINE__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+inflight_counts(struct test *t)
+{
+ struct rte_event ev;
+ struct test_event_dev_stats stats;
+ const int rx_enq = 0;
+ const int p1 = 1;
+ const int p2 = 2;
+ int err;
+ int i;
+
+ /* Create instance with 4 ports */
+ if (init(t, 2, 3) < 0 ||
+ create_ports(t, 3) < 0 ||
+ create_atomic_qids(t, 2) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+ err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** FORWARD ****************/
+#define QID1_NUM 5
+ for (i = 0; i < QID1_NUM; i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ goto err;
+ }
+
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ goto err;
+ }
+ }
+#define QID2_NUM 3
+ for (i = 0; i < QID2_NUM; i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ goto err;
+ }
+ ev.queue_id = t->qid[1];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ goto err;
+ }
+ }
+
+ /* schedule */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ goto err;
+ }
+
+ if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
+ stats.tx_pkts != QID1_NUM + QID2_NUM) {
+ printf("%d: Sched core didn't handle pkt as expected\n",
+ __LINE__);
+ goto err;
+ }
+
+ if (stats.port_inflight[p1] != QID1_NUM) {
+ printf("%d: %s port 1 inflight not correct\n", __LINE__,
+ __func__);
+ goto err;
+ }
+ if (stats.port_inflight[p2] != QID2_NUM) {
+ printf("%d: %s port 2 inflight not correct\n", __LINE__,
+ __func__);
+ goto err;
+ }
+
+ /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
+ /* port 1 */
+ struct rte_event events[QID1_NUM + QID2_NUM];
+ uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
+ RTE_DIM(events), 0);
+
+ if (deq_pkts != QID1_NUM) {
+ printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
+ goto err;
+ }
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[p1] != QID1_NUM) {
+ printf("%d: port 1 inflight decrement after DEQ != 0\n",
+ __LINE__);
+ goto err;
+ }
+ for (i = 0; i < QID1_NUM; i++) {
+ err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
+ 1);
+ if (err != 1) {
+ printf("%d: %s rte enqueue of inf release failed\n",
+ __LINE__, __func__);
+ goto err;
+ }
+ }
+
+ /*
+ * As the scheduler core decrements inflights, it needs to run to
+ * process packets to act on the drop messages
+ */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[p1] != 0) {
+ printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
+ goto err;
+ }
+
+ /* port2 */
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
+ RTE_DIM(events), 0);
+ if (deq_pkts != QID2_NUM) {
+ printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
+ goto err;
+ }
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[p2] != QID2_NUM) {
+ printf("%d: port 1 inflight decrement after DEQ != 0\n",
+ __LINE__);
+ goto err;
+ }
+ for (i = 0; i < QID2_NUM; i++) {
+ err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
+ 1);
+ if (err != 1) {
+ printf("%d: %s rte enqueue of inf release failed\n",
+ __LINE__, __func__);
+ goto err;
+ }
+ }
+
+ /*
+ * As the scheduler core decrements inflights, it needs to run to
+ * process packets to act on the drop messages
+ */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[p2] != 0) {
+ printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
+ goto err;
+ }
+ cleanup(t);
+ return 0;
+
+err:
+ rte_event_dev_dump(evdev, stdout);
+ cleanup(t);
+ return -1;
+}
+
+static int
+parallel_basic(struct test *t, int check_order)
+{
+ const uint8_t rx_port = 0;
+ const uint8_t w1_port = 1;
+ const uint8_t w3_port = 3;
+ const uint8_t tx_port = 4;
+ int err;
+ int i;
+ uint32_t deq_pkts, j;
+ struct rte_mbuf *mbufs[3];
+ struct rte_mbuf *mbufs_out[3] = { 0 };
+ const uint32_t MAGIC_SEQN = 1234;
+
+ /* Create instance with 4 ports */
+ if (init(t, 2, tx_port + 1) < 0 ||
+ create_ports(t, tx_port + 1) < 0 ||
+ (check_order ? create_ordered_qids(t, 1) :
+ create_unordered_qids(t, 1)) < 0 ||
+ create_directed_qids(t, 1, &tx_port)) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * CQ mapping to QID
+ * We need three ports, all mapped to the same ordered qid0. Then we'll
+ * take a packet out to each port, re-enqueue in reverse order,
+ * then make sure the reordering has taken place properly when we
+ * dequeue from the tx_port.
+ *
+ * Simplified test setup diagram:
+ *
+ * rx_port w1_port
+ * \ / \
+ * qid0 - w2_port - qid1
+ * \ / \
+ * w3_port tx_port
+ */
+ /* CQ mapping to QID for LB ports (directed mapped on create) */
+ for (i = w1_port; i <= w3_port; i++) {
+ err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /* Enqueue 3 packets to the rx port */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+ if (!mbufs[i]) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = mbufs[i];
+ mbufs[i]->seqn = MAGIC_SEQN + i;
+
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue pkt %u, retval = %u\n",
+ __LINE__, i, err);
+ return -1;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* use extra slot to make logic in loops easier */
+ struct rte_event deq_ev[w3_port + 1];
+
+ /* Dequeue the 3 packets, one from each worker port */
+ for (i = w1_port; i <= w3_port; i++) {
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
+ &deq_ev[i], 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: Failed to deq\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ }
+
+ /* Enqueue each packet in reverse order, flushing after each one */
+ for (i = w3_port; i >= w1_port; i--) {
+
+ deq_ev[i].op = RTE_EVENT_OP_FORWARD;
+ deq_ev[i].queue_id = t->qid[1];
+ err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* dequeue from the tx ports, we should get 3 packets */
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
+ 3, 0);
+
+ /* Check to see if we've got all 3 packets */
+ if (deq_pkts != 3) {
+ printf("%d: expected 3 pkts at tx port got %d from port %d\n",
+ __LINE__, deq_pkts, tx_port);
+ rte_event_dev_dump(evdev, stdout);
+ return 1;
+ }
+
+ /* Check to see if the sequence numbers are in expected order */
+ if (check_order) {
+ for (j = 0 ; j < deq_pkts ; j++) {
+ if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
+ printf(
+ "%d: Incorrect sequence number(%d) from port %d\n",
+ __LINE__, mbufs_out[j]->seqn, tx_port);
+ return -1;
+ }
+ }
+ }
+
+ /* Destroy the instance */
+ cleanup(t);
+ return 0;
+}
+
+static int
+ordered_basic(struct test *t)
+{
+ return parallel_basic(t, 1);
+}
+
+static int
+unordered_basic(struct test *t)
+{
+ return parallel_basic(t, 0);
+}
+
+static int
+holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
+{
+ const struct rte_event new_ev = {
+ .op = RTE_EVENT_OP_NEW
+ /* all other fields zero */
+ };
+ struct rte_event ev = new_ev;
+ unsigned int rx_port = 0; /* port we get the first flow on */
+ char rx_port_used_stat[64];
+ char rx_port_free_stat[64];
+ char other_port_used_stat[64];
+
+ if (init(t, 1, 2) < 0 ||
+ create_ports(t, 2) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+ int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
+ if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
+ nb_links != 1) {
+ printf("%d: Error links queue to ports\n", __LINE__);
+ goto err;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto err;
+ }
+
+ /* send one packet and see where it goes, port 0 or 1 */
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error doing first enqueue\n", __LINE__);
+ goto err;
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
+ != 1)
+ rx_port = 1;
+
+ snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
+ "port_%u_cq_ring_used", rx_port);
+ snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
+ "port_%u_cq_ring_free", rx_port);
+ snprintf(other_port_used_stat, sizeof(other_port_used_stat),
+ "port_%u_cq_ring_used", rx_port ^ 1);
+ if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
+ != 1) {
+ printf("%d: Error, first event not scheduled\n", __LINE__);
+ goto err;
+ }
+
+ /* now fill up the rx port's queue with one flow to cause HOLB */
+ do {
+ ev = new_ev;
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error with enqueue\n", __LINE__);
+ goto err;
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+ } while (rte_event_dev_xstats_by_name_get(evdev,
+ rx_port_free_stat, NULL) != 0);
+
+ /* one more packet, which needs to stay in IQ - i.e. HOLB */
+ ev = new_ev;
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error with enqueue\n", __LINE__);
+ goto err;
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* check that the other port still has an empty CQ */
+ if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
+ != 0) {
+ printf("%d: Error, second port CQ is not empty\n", __LINE__);
+ goto err;
+ }
+ /* check IQ now has one packet */
+ if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
+ != 1) {
+ printf("%d: Error, QID does not have exactly 1 packet\n",
+ __LINE__);
+ goto err;
+ }
+
+ /* send another flow, which should pass the other IQ entry */
+ ev = new_ev;
+ ev.flow_id = 1;
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error with enqueue\n", __LINE__);
+ goto err;
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
+ != 1) {
+ printf("%d: Error, second flow did not pass out first\n",
+ __LINE__);
+ goto err;
+ }
+
+ if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
+ != 1) {
+ printf("%d: Error, QID does not have exactly 1 packet\n",
+ __LINE__);
+ goto err;
+ }
+ cleanup(t);
+ return 0;
+err:
+ rte_event_dev_dump(evdev, stdout);
+ cleanup(t);
+ return -1;
+}
+
+static void
+flush(uint8_t dev_id __rte_unused, struct rte_event event, void *arg)
+{
+ *((uint8_t *) arg) += (event.u64 == 0xCA11BACC) ? 1 : 0;
+}
+
+static int
+dev_stop_flush(struct test *t) /* test to check we can properly flush events */
+{
+ const struct rte_event new_ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .u64 = 0xCA11BACC,
+ .queue_id = 0
+ };
+ struct rte_event ev = new_ev;
+ uint8_t count = 0;
+ int i;
+
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* Link the queue so *_start() doesn't error out */
+ if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1) {
+ printf("%d: Error linking queue to port\n", __LINE__);
+ goto err;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto err;
+ }
+
+ for (i = 0; i < DEQUEUE_DEPTH + 1; i++) {
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error enqueuing events\n", __LINE__);
+ goto err;
+ }
+ }
+
+ /* Schedule the events from the port to the IQ. At least one event
+ * should be remaining in the queue.
+ */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ if (rte_event_dev_stop_flush_callback_register(evdev, flush, &count)) {
+ printf("%d: Error installing the flush callback\n", __LINE__);
+ goto err;
+ }
+
+ cleanup(t);
+
+ if (count == 0) {
+ printf("%d: Error executing the flush callback\n", __LINE__);
+ goto err;
+ }
+
+ if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
+ printf("%d: Error uninstalling the flush callback\n", __LINE__);
+ goto err;
+ }
+
+ return 0;
+err:
+ rte_event_dev_dump(evdev, stdout);
+ cleanup(t);
+ return -1;
+}
+
+static int
+worker_loopback_worker_fn(void *arg)
+{
+ struct test *t = arg;
+ uint8_t port = t->port[1];
+ int count = 0;
+ int enqd;
+
+ /*
+ * Takes packets from the input port and then loops them back through
+ * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
+ * so each packet goes through 8*16 = 128 times.
+ */
+ printf("%d: \tWorker function started\n", __LINE__);
+ while (count < NUM_PACKETS) {
+#define BURST_SIZE 32
+ struct rte_event ev[BURST_SIZE];
+ uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
+ BURST_SIZE, 0);
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ ev[i].queue_id++;
+ if (ev[i].queue_id != 8) {
+ ev[i].op = RTE_EVENT_OP_FORWARD;
+ enqd = rte_event_enqueue_burst(evdev, port,
+ &ev[i], 1);
+ if (enqd != 1) {
+ printf("%d: Can't enqueue FWD!!\n",
+ __LINE__);
+ return -1;
+ }
+ continue;
+ }
+
+ ev[i].queue_id = 0;
+ ev[i].mbuf->udata64++;
+ if (ev[i].mbuf->udata64 != 16) {
+ ev[i].op = RTE_EVENT_OP_FORWARD;
+ enqd = rte_event_enqueue_burst(evdev, port,
+ &ev[i], 1);
+ if (enqd != 1) {
+ printf("%d: Can't enqueue FWD!!\n",
+ __LINE__);
+ return -1;
+ }
+ continue;
+ }
+ /* we have hit 16 iterations through system - drop */
+ rte_pktmbuf_free(ev[i].mbuf);
+ count++;
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
+ if (enqd != 1) {
+ printf("%d drop enqueue failed\n", __LINE__);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+worker_loopback_producer_fn(void *arg)
+{
+ struct test *t = arg;
+ uint8_t port = t->port[0];
+ uint64_t count = 0;
+
+ printf("%d: \tProducer function started\n", __LINE__);
+ while (count < NUM_PACKETS) {
+ struct rte_mbuf *m = 0;
+ do {
+ m = rte_pktmbuf_alloc(t->mbuf_pool);
+ } while (m == NULL);
+
+ m->udata64 = 0;
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .flow_id = (uintptr_t)m & 0xFFFF,
+ .mbuf = m,
+ };
+
+ if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
+ while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
+ 1)
+ rte_pause();
+ }
+
+ count++;
+ }
+
+ return 0;
+}
+
+static int
+worker_loopback(struct test *t, uint8_t disable_implicit_release)
+{
+ /* use a single producer core, and a worker core to see what happens
+ * if the worker loops packets back multiple times
+ */
+ struct test_event_dev_stats stats;
+ uint64_t print_cycles = 0, cycles = 0;
+ uint64_t tx_pkts = 0;
+ int err;
+ int w_lcore, p_lcore;
+
+ if (init(t, 8, 2) < 0 ||
+ create_atomic_qids(t, 8) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* RX with low max events */
+ static struct rte_event_port_conf conf = {
+ .dequeue_depth = 32,
+ .enqueue_depth = 64,
+ };
+ /* beware: this cannot be initialized in the static above as it would
+ * only be initialized once - and this needs to be set for multiple runs
+ */
+ conf.new_event_threshold = 512;
+ conf.disable_implicit_release = disable_implicit_release;
+
+ if (rte_event_port_setup(evdev, 0, &conf) < 0) {
+ printf("Error setting up RX port\n");
+ return -1;
+ }
+ t->port[0] = 0;
+ /* TX with higher max events */
+ conf.new_event_threshold = 4096;
+ if (rte_event_port_setup(evdev, 1, &conf) < 0) {
+ printf("Error setting up TX port\n");
+ return -1;
+ }
+ t->port[1] = 1;
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
+ if (err != 8) { /* should have mapped all queues*/
+ printf("%d: error mapping port 2 to all qids\n", __LINE__);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ p_lcore = rte_get_next_lcore(
+ /* start core */ -1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+ w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
+
+ rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
+ rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
+
+ print_cycles = cycles = rte_get_timer_cycles();
+ while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
+ rte_eal_get_lcore_state(w_lcore) != FINISHED) {
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ uint64_t new_cycles = rte_get_timer_cycles();
+
+ if (new_cycles - print_cycles > rte_get_timer_hz()) {
+ test_event_dev_stats_get(evdev, &stats);
+ printf(
+ "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
+ __LINE__, stats.rx_pkts, stats.tx_pkts);
+
+ print_cycles = new_cycles;
+ }
+ if (new_cycles - cycles > rte_get_timer_hz() * 3) {
+ test_event_dev_stats_get(evdev, &stats);
+ if (stats.tx_pkts == tx_pkts) {
+ rte_event_dev_dump(evdev, stdout);
+ printf("Dumping xstats:\n");
+ xstats_print();
+ printf(
+ "%d: No schedules for seconds, deadlock\n",
+ __LINE__);
+ return -1;
+ }
+ tx_pkts = stats.tx_pkts;
+ cycles = new_cycles;
+ }
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+ /* ensure all completions are flushed */
+
+ rte_eal_mp_wait_lcore();
+
+ cleanup(t);
+ return 0;
+}
+
+static struct rte_mempool *eventdev_func_mempool;
+
+int
+test_sw_eventdev(void)
+{
+ struct test *t;
+ int ret;
+
+ t = malloc(sizeof(struct test));
+ if (t == NULL)
+ return -1;
+ /* manually initialize the op, older gcc's complain on static
+ * initialization of struct elements that are a bitfield.
+ */
+ release_ev.op = RTE_EVENT_OP_RELEASE;
+
+ const char *eventdev_name = "event_sw";
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ printf("%d: Eventdev %s not found - creating.\n",
+ __LINE__, eventdev_name);
+ if (rte_vdev_init(eventdev_name, NULL) < 0) {
+ printf("Error creating eventdev\n");
+ goto test_fail;
+ }
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ printf("Error finding newly created eventdev\n");
+ goto test_fail;
+ }
+ }
+
+ if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
+ printf("Failed to get service ID for software event dev\n");
+ goto test_fail;
+ }
+
+ rte_service_runstate_set(t->service_id, 1);
+ rte_service_set_runstate_mapped_check(t->service_id, 0);
+
+ /* Only create mbuf pool once, reuse for each test run */
+ if (!eventdev_func_mempool) {
+ eventdev_func_mempool = rte_pktmbuf_pool_create(
+ "EVENTDEV_SW_SA_MBUF_POOL",
+ (1<<12), /* 4k buffers */
+ 32 /*MBUF_CACHE_SIZE*/,
+ 0,
+ 512, /* use very small mbufs */
+ rte_socket_id());
+ if (!eventdev_func_mempool) {
+ printf("ERROR creating mempool\n");
+ goto test_fail;
+ }
+ }
+ t->mbuf_pool = eventdev_func_mempool;
+ printf("*** Running Single Directed Packet test...\n");
+ ret = test_single_directed_packet(t);
+ if (ret != 0) {
+ printf("ERROR - Single Directed Packet test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Directed Forward Credit test...\n");
+ ret = test_directed_forward_credits(t);
+ if (ret != 0) {
+ printf("ERROR - Directed Forward Credit test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Single Load Balanced Packet test...\n");
+ ret = single_packet(t);
+ if (ret != 0) {
+ printf("ERROR - Single Packet test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Unordered Basic test...\n");
+ ret = unordered_basic(t);
+ if (ret != 0) {
+ printf("ERROR - Unordered Basic test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Ordered Basic test...\n");
+ ret = ordered_basic(t);
+ if (ret != 0) {
+ printf("ERROR - Ordered Basic test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Burst Packets test...\n");
+ ret = burst_packets(t);
+ if (ret != 0) {
+ printf("ERROR - Burst Packets test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Load Balancing test...\n");
+ ret = load_balancing(t);
+ if (ret != 0) {
+ printf("ERROR - Load Balancing test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Prioritized Directed test...\n");
+ ret = test_priority_directed(t);
+ if (ret != 0) {
+ printf("ERROR - Prioritized Directed test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Prioritized Atomic test...\n");
+ ret = test_priority_atomic(t);
+ if (ret != 0) {
+ printf("ERROR - Prioritized Atomic test FAILED.\n");
+ goto test_fail;
+ }
+
+ printf("*** Running Prioritized Ordered test...\n");
+ ret = test_priority_ordered(t);
+ if (ret != 0) {
+ printf("ERROR - Prioritized Ordered test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Prioritized Unordered test...\n");
+ ret = test_priority_unordered(t);
+ if (ret != 0) {
+ printf("ERROR - Prioritized Unordered test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Invalid QID test...\n");
+ ret = invalid_qid(t);
+ if (ret != 0) {
+ printf("ERROR - Invalid QID test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Load Balancing History test...\n");
+ ret = load_balancing_history(t);
+ if (ret != 0) {
+ printf("ERROR - Load Balancing History test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Inflight Count test...\n");
+ ret = inflight_counts(t);
+ if (ret != 0) {
+ printf("ERROR - Inflight Count test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Abuse Inflights test...\n");
+ ret = abuse_inflights(t);
+ if (ret != 0) {
+ printf("ERROR - Abuse Inflights test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running XStats test...\n");
+ ret = xstats_tests(t);
+ if (ret != 0) {
+ printf("ERROR - XStats test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running XStats ID Reset test...\n");
+ ret = xstats_id_reset_tests(t);
+ if (ret != 0) {
+ printf("ERROR - XStats ID Reset test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running XStats Brute Force test...\n");
+ ret = xstats_brute_force(t);
+ if (ret != 0) {
+ printf("ERROR - XStats Brute Force test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running XStats ID Abuse test...\n");
+ ret = xstats_id_abuse_tests(t);
+ if (ret != 0) {
+ printf("ERROR - XStats ID Abuse test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running QID Priority test...\n");
+ ret = qid_priorities(t);
+ if (ret != 0) {
+ printf("ERROR - QID Priority test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Ordered Reconfigure test...\n");
+ ret = ordered_reconfigure(t);
+ if (ret != 0) {
+ printf("ERROR - Ordered Reconfigure test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Port LB Single Reconfig test...\n");
+ ret = port_single_lb_reconfig(t);
+ if (ret != 0) {
+ printf("ERROR - Port LB Single Reconfig test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Port Reconfig Credits test...\n");
+ ret = port_reconfig_credits(t);
+ if (ret != 0) {
+ printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Head-of-line-blocking test...\n");
+ ret = holb(t);
+ if (ret != 0) {
+ printf("ERROR - Head-of-line-blocking test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Stop Flush test...\n");
+ ret = dev_stop_flush(t);
+ if (ret != 0) {
+ printf("ERROR - Stop Flush test FAILED.\n");
+ goto test_fail;
+ }
+ if (rte_lcore_count() >= 3) {
+ printf("*** Running Worker loopback test...\n");
+ ret = worker_loopback(t, 0);
+ if (ret != 0) {
+ printf("ERROR - Worker loopback test FAILED.\n");
+ return ret;
+ }
+
+ printf("*** Running Worker loopback test (implicit release disabled)...\n");
+ ret = worker_loopback(t, 1);
+ if (ret != 0) {
+ printf("ERROR - Worker loopback test FAILED.\n");
+ goto test_fail;
+ }
+ } else {
+ printf("### Not enough cores for worker loopback tests.\n");
+ printf("### Need at least 3 cores for the tests.\n");
+ }
+
+ /*
+ * Free test instance, leaving mempool initialized, and a pointer to it
+ * in static eventdev_func_mempool, as it is re-used on re-runs
+ */
+ free(t);
+
+ printf("SW Eventdev Selftest Successful.\n");
+ return 0;
+test_fail:
+ free(t);
+ printf("SW Eventdev Selftest Failed.\n");
+ return -1;
+}
diff --git a/src/spdk/dpdk/drivers/event/sw/sw_evdev_worker.c b/src/spdk/dpdk/drivers/event/sw/sw_evdev_worker.c
new file mode 100644
index 00000000..063b919c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/sw/sw_evdev_worker.c
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_event_ring.h>
+
+#include "sw_evdev.h"
+
+#define PORT_ENQUEUE_MAX_BURST_SIZE 64
+
+static inline void
+sw_event_release(struct sw_port *p, uint8_t index)
+{
+ /*
+ * Drops the next outstanding event in our history. Used on dequeue
+ * to clear any history before dequeuing more events.
+ */
+ RTE_SET_USED(index);
+
+ /* create drop message */
+ struct rte_event ev;
+ ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE];
+
+ uint16_t free_count;
+ rte_event_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
+
+ /* each release returns one credit */
+ p->outstanding_releases--;
+ p->inflight_credits++;
+}
+
+/*
+ * special-case of rte_event_ring enqueue, with overriding the ops member on
+ * the events that get written to the ring.
+ */
+static inline unsigned int
+enqueue_burst_with_ops(struct rte_event_ring *r, const struct rte_event *events,
+ unsigned int n, uint8_t *ops)
+{
+ struct rte_event tmp_evs[PORT_ENQUEUE_MAX_BURST_SIZE];
+ unsigned int i;
+
+ memcpy(tmp_evs, events, n * sizeof(events[0]));
+ for (i = 0; i < n; i++)
+ tmp_evs[i].op = ops[i];
+
+ return rte_event_ring_enqueue_burst(r, tmp_evs, n, NULL);
+}
+
+uint16_t
+sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
+{
+ int32_t i;
+ uint8_t new_ops[PORT_ENQUEUE_MAX_BURST_SIZE];
+ struct sw_port *p = port;
+ struct sw_evdev *sw = (void *)p->sw;
+ uint32_t sw_inflights = rte_atomic32_read(&sw->inflights);
+ uint32_t credit_update_quanta = sw->credit_update_quanta;
+ int new = 0;
+
+ if (num > PORT_ENQUEUE_MAX_BURST_SIZE)
+ num = PORT_ENQUEUE_MAX_BURST_SIZE;
+
+ for (i = 0; i < num; i++)
+ new += (ev[i].op == RTE_EVENT_OP_NEW);
+
+ if (unlikely(new > 0 && p->inflight_max < sw_inflights))
+ return 0;
+
+ if (p->inflight_credits < new) {
+ /* check if event enqueue brings port over max threshold */
+ if (sw_inflights + credit_update_quanta > sw->nb_events_limit)
+ return 0;
+
+ rte_atomic32_add(&sw->inflights, credit_update_quanta);
+ p->inflight_credits += (credit_update_quanta);
+
+ /* If there are fewer inflight credits than new events, limit
+ * the number of enqueued events.
+ */
+ num = (p->inflight_credits < new) ? p->inflight_credits : new;
+ }
+
+ for (i = 0; i < num; i++) {
+ int op = ev[i].op;
+ int outstanding = p->outstanding_releases > 0;
+ const uint8_t invalid_qid = (ev[i].queue_id >= sw->qid_count);
+
+ p->inflight_credits -= (op == RTE_EVENT_OP_NEW);
+ p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) *
+ outstanding;
+
+ new_ops[i] = sw_qe_flag_map[op];
+ new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT);
+
+ /* FWD and RELEASE packets will both resolve to taken (assuming
+ * correct usage of the API), providing very high correct
+ * prediction rate.
+ */
+ if ((new_ops[i] & QE_FLAG_COMPLETE) && outstanding)
+ p->outstanding_releases--;
+
+ /* error case: branch to avoid touching p->stats */
+ if (unlikely(invalid_qid && op != RTE_EVENT_OP_RELEASE)) {
+ p->stats.rx_dropped++;
+ p->inflight_credits++;
+ }
+ }
+
+ /* returns number of events actually enqueued */
+ uint32_t enq = enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
+ new_ops);
+ if (p->outstanding_releases == 0 && p->last_dequeue_burst_sz != 0) {
+ uint64_t burst_ticks = rte_get_timer_cycles() -
+ p->last_dequeue_ticks;
+ uint64_t burst_pkt_ticks =
+ burst_ticks / p->last_dequeue_burst_sz;
+ p->avg_pkt_ticks -= p->avg_pkt_ticks / NUM_SAMPLES;
+ p->avg_pkt_ticks += burst_pkt_ticks / NUM_SAMPLES;
+ p->last_dequeue_ticks = 0;
+ }
+
+ /* Replenish credits if enough releases are performed */
+ if (p->inflight_credits >= credit_update_quanta * 2) {
+ rte_atomic32_sub(&sw->inflights, credit_update_quanta);
+ p->inflight_credits -= credit_update_quanta;
+ }
+
+ return enq;
+}
+
+uint16_t
+sw_event_enqueue(void *port, const struct rte_event *ev)
+{
+ return sw_event_enqueue_burst(port, ev, 1);
+}
+
+uint16_t
+sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
+ uint64_t wait)
+{
+ RTE_SET_USED(wait);
+ struct sw_port *p = (void *)port;
+ struct rte_event_ring *ring = p->cq_worker_ring;
+
+ /* check that all previous dequeues have been released */
+ if (p->implicit_release) {
+ struct sw_evdev *sw = (void *)p->sw;
+ uint32_t credit_update_quanta = sw->credit_update_quanta;
+ uint16_t out_rels = p->outstanding_releases;
+ uint16_t i;
+ for (i = 0; i < out_rels; i++)
+ sw_event_release(p, i);
+
+ /* Replenish credits if enough releases are performed */
+ if (p->inflight_credits >= credit_update_quanta * 2) {
+ rte_atomic32_sub(&sw->inflights, credit_update_quanta);
+ p->inflight_credits -= credit_update_quanta;
+ }
+ }
+
+ /* returns number of events actually dequeued */
+ uint16_t ndeq = rte_event_ring_dequeue_burst(ring, ev, num, NULL);
+ if (unlikely(ndeq == 0)) {
+ p->zero_polls++;
+ p->total_polls++;
+ goto end;
+ }
+
+ p->outstanding_releases += ndeq;
+ p->last_dequeue_burst_sz = ndeq;
+ p->last_dequeue_ticks = rte_get_timer_cycles();
+ p->poll_buckets[(ndeq - 1) >> SW_DEQ_STAT_BUCKET_SHIFT]++;
+ p->total_polls++;
+
+end:
+ return ndeq;
+}
+
+uint16_t
+sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait)
+{
+ return sw_event_dequeue_burst(port, ev, 1, wait);
+}
diff --git a/src/spdk/dpdk/drivers/event/sw/sw_evdev_xstats.c b/src/spdk/dpdk/drivers/event/sw/sw_evdev_xstats.c
new file mode 100644
index 00000000..7a6caa64
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/sw/sw_evdev_xstats.c
@@ -0,0 +1,652 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <rte_event_ring.h>
+#include "sw_evdev.h"
+#include "iq_chunk.h"
+
+enum xstats_type {
+ /* common stats */
+ rx,
+ tx,
+ dropped,
+ inflight,
+ calls,
+ credits,
+ /* device instance specific */
+ no_iq_enq,
+ no_cq_enq,
+ /* port_specific */
+ rx_used,
+ rx_free,
+ tx_used,
+ tx_free,
+ pkt_cycles,
+ poll_return, /* for zero-count and used also for port bucket loop */
+ /* qid_specific */
+ iq_used,
+ /* qid port mapping specific */
+ pinned,
+ pkts, /* note: qid-to-port pkts */
+};
+
+typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
+ uint16_t obj_idx, /* port or queue id */
+ enum xstats_type stat, int extra_arg);
+
+struct sw_xstats_entry {
+ struct rte_event_dev_xstats_name name;
+ xstats_fn fn;
+ uint16_t obj_idx;
+ enum xstats_type stat;
+ enum rte_event_dev_xstats_mode mode;
+ int extra_arg;
+ uint8_t reset_allowed; /* when set, this value can be reset */
+ uint64_t reset_value; /* an offset to be taken away to emulate resets */
+};
+
+static uint64_t
+get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
+ enum xstats_type type, int extra_arg __rte_unused)
+{
+ switch (type) {
+ case rx: return sw->stats.rx_pkts;
+ case tx: return sw->stats.tx_pkts;
+ case dropped: return sw->stats.rx_dropped;
+ case calls: return sw->sched_called;
+ case no_iq_enq: return sw->sched_no_iq_enqueues;
+ case no_cq_enq: return sw->sched_no_cq_enqueues;
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg __rte_unused)
+{
+ const struct sw_port *p = &sw->ports[obj_idx];
+
+ switch (type) {
+ case rx: return p->stats.rx_pkts;
+ case tx: return p->stats.tx_pkts;
+ case dropped: return p->stats.rx_dropped;
+ case inflight: return p->inflights;
+ case pkt_cycles: return p->avg_pkt_ticks;
+ case calls: return p->total_polls;
+ case credits: return p->inflight_credits;
+ case poll_return: return p->zero_polls;
+ case rx_used: return rte_event_ring_count(p->rx_worker_ring);
+ case rx_free: return rte_event_ring_free_count(p->rx_worker_ring);
+ case tx_used: return rte_event_ring_count(p->cq_worker_ring);
+ case tx_free: return rte_event_ring_free_count(p->cq_worker_ring);
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_port_bucket_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg)
+{
+ const struct sw_port *p = &sw->ports[obj_idx];
+
+ switch (type) {
+ case poll_return: return p->poll_buckets[extra_arg];
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_qid_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg __rte_unused)
+{
+ const struct sw_qid *qid = &sw->qids[obj_idx];
+
+ switch (type) {
+ case rx: return qid->stats.rx_pkts;
+ case tx: return qid->stats.tx_pkts;
+ case dropped: return qid->stats.rx_dropped;
+ case inflight:
+ do {
+ uint64_t infl = 0;
+ unsigned int i;
+ for (i = 0; i < RTE_DIM(qid->fids); i++)
+ infl += qid->fids[i].pcount;
+ return infl;
+ } while (0);
+ break;
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_qid_iq_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg)
+{
+ const struct sw_qid *qid = &sw->qids[obj_idx];
+ const int iq_idx = extra_arg;
+
+ switch (type) {
+ case iq_used: return iq_count(&qid->iq[iq_idx]);
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_qid_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg)
+{
+ const struct sw_qid *qid = &sw->qids[obj_idx];
+ uint16_t port = extra_arg;
+
+ switch (type) {
+ case pinned:
+ do {
+ uint64_t pin = 0;
+ unsigned int i;
+ for (i = 0; i < RTE_DIM(qid->fids); i++)
+ if (qid->fids[i].cq == port)
+ pin++;
+ return pin;
+ } while (0);
+ break;
+ case pkts:
+ return qid->to_port[port];
+ default: return -1;
+ }
+}
+
+int
+sw_xstats_init(struct sw_evdev *sw)
+{
+ /*
+ * define the stats names and types. Used to build up the device
+ * xstats array
+ * There are multiple set of stats:
+ * - device-level,
+ * - per-port,
+ * - per-port-dequeue-burst-sizes
+ * - per-qid,
+ * - per-iq
+ * - per-port-per-qid
+ *
+ * For each of these sets, we have three parallel arrays, one for the
+ * names, the other for the stat type parameter to be passed in the fn
+ * call to get that stat. The third array allows resetting or not.
+ * All these arrays must be kept in sync
+ */
+ static const char * const dev_stats[] = { "rx", "tx", "drop",
+ "sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
+ };
+ static const enum xstats_type dev_types[] = { rx, tx, dropped,
+ calls, no_iq_enq, no_cq_enq,
+ };
+ /* all device stats are allowed to be reset */
+
+ static const char * const port_stats[] = {"rx", "tx", "drop",
+ "inflight", "avg_pkt_cycles", "credits",
+ "rx_ring_used", "rx_ring_free",
+ "cq_ring_used", "cq_ring_free",
+ "dequeue_calls", "dequeues_returning_0",
+ };
+ static const enum xstats_type port_types[] = { rx, tx, dropped,
+ inflight, pkt_cycles, credits,
+ rx_used, rx_free, tx_used, tx_free,
+ calls, poll_return,
+ };
+ static const uint8_t port_reset_allowed[] = {1, 1, 1,
+ 0, 1, 0,
+ 0, 0, 0, 0,
+ 1, 1,
+ };
+
+ static const char * const port_bucket_stats[] = {
+ "dequeues_returning" };
+ static const enum xstats_type port_bucket_types[] = { poll_return };
+ /* all bucket dequeues are allowed to be reset, handled in loop below */
+
+ static const char * const qid_stats[] = {"rx", "tx", "drop",
+ "inflight"
+ };
+ static const enum xstats_type qid_types[] = { rx, tx, dropped,
+ inflight
+ };
+ static const uint8_t qid_reset_allowed[] = {1, 1, 1,
+ 0
+ };
+
+ static const char * const qid_iq_stats[] = { "used" };
+ static const enum xstats_type qid_iq_types[] = { iq_used };
+ /* reset allowed */
+
+ static const char * const qid_port_stats[] = { "pinned_flows",
+ "packets"
+ };
+ static const enum xstats_type qid_port_types[] = { pinned, pkts };
+ static const uint8_t qid_port_reset_allowed[] = {0, 1};
+ /* reset allowed */
+ /* ---- end of stat definitions ---- */
+
+ /* check sizes, since a missed comma can lead to strings being
+ * joined by the compiler.
+ */
+ RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_iq_stats) != RTE_DIM(qid_iq_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_port_stats) != RTE_DIM(qid_port_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(port_bucket_stats) !=
+ RTE_DIM(port_bucket_types));
+
+ RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
+
+ /* other vars */
+ const uint32_t cons_bkt_shift =
+ (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT);
+ const unsigned int count = RTE_DIM(dev_stats) +
+ sw->port_count * RTE_DIM(port_stats) +
+ sw->port_count * RTE_DIM(port_bucket_stats) *
+ (cons_bkt_shift + 1) +
+ sw->qid_count * RTE_DIM(qid_stats) +
+ sw->qid_count * SW_IQS_MAX * RTE_DIM(qid_iq_stats) +
+ sw->qid_count * sw->port_count *
+ RTE_DIM(qid_port_stats);
+ unsigned int i, port, qid, iq, bkt, stat = 0;
+
+ sw->xstats = rte_zmalloc_socket(NULL, sizeof(sw->xstats[0]) * count, 0,
+ sw->data->socket_id);
+ if (sw->xstats == NULL)
+ return -ENOMEM;
+
+#define sname sw->xstats[stat].name.name
+ for (i = 0; i < RTE_DIM(dev_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_dev_stat,
+ .stat = dev_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
+ .reset_allowed = 1,
+ };
+ snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
+ }
+ sw->xstats_count_mode_dev = stat;
+
+ for (port = 0; port < sw->port_count; port++) {
+ sw->xstats_offset_for_port[port] = stat;
+
+ uint32_t count_offset = stat;
+
+ for (i = 0; i < RTE_DIM(port_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_port_stat,
+ .obj_idx = port,
+ .stat = port_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_PORT,
+ .reset_allowed = port_reset_allowed[i],
+ };
+ snprintf(sname, sizeof(sname), "port_%u_%s",
+ port, port_stats[i]);
+ }
+
+ for (bkt = 0; bkt < (rte_event_ring_get_capacity(
+ sw->ports[port].cq_worker_ring) >>
+ SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
+ for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_port_bucket_stat,
+ .obj_idx = port,
+ .stat = port_bucket_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_PORT,
+ .extra_arg = bkt,
+ .reset_allowed = 1,
+ };
+ snprintf(sname, sizeof(sname),
+ "port_%u_%s_%u-%u",
+ port, port_bucket_stats[i],
+ (bkt << SW_DEQ_STAT_BUCKET_SHIFT) + 1,
+ (bkt + 1) << SW_DEQ_STAT_BUCKET_SHIFT);
+ stat++;
+ }
+ }
+
+ sw->xstats_count_per_port[port] = stat - count_offset;
+ }
+
+ sw->xstats_count_mode_port = stat - sw->xstats_count_mode_dev;
+
+ for (qid = 0; qid < sw->qid_count; qid++) {
+ uint32_t count_offset = stat;
+ sw->xstats_offset_for_qid[qid] = stat;
+
+ for (i = 0; i < RTE_DIM(qid_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_qid_stat,
+ .obj_idx = qid,
+ .stat = qid_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+ .reset_allowed = qid_reset_allowed[i],
+ };
+ snprintf(sname, sizeof(sname), "qid_%u_%s",
+ qid, qid_stats[i]);
+ }
+ for (iq = 0; iq < SW_IQS_MAX; iq++)
+ for (i = 0; i < RTE_DIM(qid_iq_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_qid_iq_stat,
+ .obj_idx = qid,
+ .stat = qid_iq_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+ .extra_arg = iq,
+ .reset_allowed = 0,
+ };
+ snprintf(sname, sizeof(sname),
+ "qid_%u_iq_%u_%s",
+ qid, iq,
+ qid_iq_stats[i]);
+ }
+
+ for (port = 0; port < sw->port_count; port++)
+ for (i = 0; i < RTE_DIM(qid_port_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_qid_port_stat,
+ .obj_idx = qid,
+ .stat = qid_port_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+ .extra_arg = port,
+ .reset_allowed =
+ qid_port_reset_allowed[i],
+ };
+ snprintf(sname, sizeof(sname),
+ "qid_%u_port_%u_%s",
+ qid, port,
+ qid_port_stats[i]);
+ }
+
+ sw->xstats_count_per_qid[qid] = stat - count_offset;
+ }
+
+ sw->xstats_count_mode_queue = stat -
+ (sw->xstats_count_mode_dev + sw->xstats_count_mode_port);
+#undef sname
+
+ sw->xstats_count = stat;
+
+ return stat;
+}
+
+int
+sw_xstats_uninit(struct sw_evdev *sw)
+{
+ rte_free(sw->xstats);
+ sw->xstats_count = 0;
+ return 0;
+}
+
+int
+sw_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ const struct sw_evdev *sw = sw_pmd_priv_const(dev);
+ unsigned int i;
+ unsigned int xidx = 0;
+ RTE_SET_USED(mode);
+ RTE_SET_USED(queue_port_id);
+
+ uint32_t xstats_mode_count = 0;
+ uint32_t start_offset = 0;
+
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ xstats_mode_count = sw->xstats_count_mode_dev;
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id >= (signed int)sw->port_count)
+ break;
+ xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
+ start_offset = sw->xstats_offset_for_port[queue_port_id];
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id >= (signed int)sw->qid_count)
+ break;
+ xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
+ start_offset = sw->xstats_offset_for_qid[queue_port_id];
+ break;
+ default:
+ SW_LOG_ERR("Invalid mode received in sw_xstats_get_names()\n");
+ return -EINVAL;
+ };
+
+ if (xstats_mode_count > size || !ids || !xstats_names)
+ return xstats_mode_count;
+
+ for (i = 0; i < sw->xstats_count && xidx < size; i++) {
+ if (sw->xstats[i].mode != mode)
+ continue;
+
+ if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
+ queue_port_id != sw->xstats[i].obj_idx)
+ continue;
+
+ xstats_names[xidx] = sw->xstats[i].name;
+ if (ids)
+ ids[xidx] = start_offset + xidx;
+ xidx++;
+ }
+ return xidx;
+}
+
+static int
+sw_xstats_update(struct sw_evdev *sw, enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id, const unsigned int ids[],
+ uint64_t values[], unsigned int n, const uint32_t reset,
+ const uint32_t ret_if_n_lt_nstats)
+{
+ unsigned int i;
+ unsigned int xidx = 0;
+ RTE_SET_USED(mode);
+ RTE_SET_USED(queue_port_id);
+
+ uint32_t xstats_mode_count = 0;
+
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ xstats_mode_count = sw->xstats_count_mode_dev;
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id >= (signed int)sw->port_count)
+ goto invalid_value;
+ xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id >= (signed int)sw->qid_count)
+ goto invalid_value;
+ xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
+ break;
+ default:
+ SW_LOG_ERR("Invalid mode received in sw_xstats_get()\n");
+ goto invalid_value;
+ };
+
+ /* this function can check num stats and return them (xstats_get() style
+ * behaviour) or ignore n for reset() of a single stat style behaviour.
+ */
+ if (ret_if_n_lt_nstats && xstats_mode_count > n)
+ return xstats_mode_count;
+
+ for (i = 0; i < n && xidx < xstats_mode_count; i++) {
+ struct sw_xstats_entry *xs = &sw->xstats[ids[i]];
+ if (ids[i] > sw->xstats_count || xs->mode != mode)
+ continue;
+
+ if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
+ queue_port_id != xs->obj_idx)
+ continue;
+
+ uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
+ - xs->reset_value;
+
+ if (values)
+ values[xidx] = val;
+
+ if (xs->reset_allowed && reset)
+ xs->reset_value = val;
+
+ xidx++;
+ }
+
+ return xidx;
+invalid_value:
+ return -EINVAL;
+}
+
+int
+sw_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ const uint32_t reset = 0;
+ const uint32_t ret_n_lt_stats = 0;
+ return sw_xstats_update(sw, mode, queue_port_id, ids, values, n,
+ reset, ret_n_lt_stats);
+}
+
+uint64_t
+sw_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id)
+{
+ const struct sw_evdev *sw = sw_pmd_priv_const(dev);
+ unsigned int i;
+
+ for (i = 0; i < sw->xstats_count; i++) {
+ struct sw_xstats_entry *xs = &sw->xstats[i];
+ if (strncmp(xs->name.name, name,
+ RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
+ if (id != NULL)
+ *id = i;
+ return xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
+ - xs->reset_value;
+ }
+ }
+ if (id != NULL)
+ *id = (uint32_t)-1;
+ return (uint64_t)-1;
+}
+
+static void
+sw_xstats_reset_range(struct sw_evdev *sw, uint32_t start, uint32_t num)
+{
+ uint32_t i;
+ for (i = start; i < start + num; i++) {
+ struct sw_xstats_entry *xs = &sw->xstats[i];
+ if (!xs->reset_allowed)
+ continue;
+
+ uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
+ - xs->reset_value;
+ xs->reset_value = val;
+ }
+}
+
+static int
+sw_xstats_reset_queue(struct sw_evdev *sw, uint8_t queue_id,
+ const uint32_t ids[], uint32_t nb_ids)
+{
+ const uint32_t reset = 1;
+ const uint32_t ret_n_lt_stats = 0;
+ if (ids) {
+ uint32_t nb_reset = sw_xstats_update(sw,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ queue_id, ids, NULL, nb_ids,
+ reset, ret_n_lt_stats);
+ return nb_reset == nb_ids ? 0 : -EINVAL;
+ }
+
+ if (ids == NULL)
+ sw_xstats_reset_range(sw, sw->xstats_offset_for_qid[queue_id],
+ sw->xstats_count_per_qid[queue_id]);
+
+ return 0;
+}
+
+static int
+sw_xstats_reset_port(struct sw_evdev *sw, uint8_t port_id,
+ const uint32_t ids[], uint32_t nb_ids)
+{
+ const uint32_t reset = 1;
+ const uint32_t ret_n_lt_stats = 0;
+ int offset = sw->xstats_offset_for_port[port_id];
+ int nb_stat = sw->xstats_count_per_port[port_id];
+
+ if (ids) {
+ uint32_t nb_reset = sw_xstats_update(sw,
+ RTE_EVENT_DEV_XSTATS_PORT, port_id,
+ ids, NULL, nb_ids,
+ reset, ret_n_lt_stats);
+ return nb_reset == nb_ids ? 0 : -EINVAL;
+ }
+
+ sw_xstats_reset_range(sw, offset, nb_stat);
+ return 0;
+}
+
+static int
+sw_xstats_reset_dev(struct sw_evdev *sw, const uint32_t ids[], uint32_t nb_ids)
+{
+ uint32_t i;
+ if (ids) {
+ for (i = 0; i < nb_ids; i++) {
+ uint32_t id = ids[i];
+ if (id >= sw->xstats_count_mode_dev)
+ return -EINVAL;
+ sw_xstats_reset_range(sw, id, 1);
+ }
+ } else {
+ for (i = 0; i < sw->xstats_count_mode_dev; i++)
+ sw_xstats_reset_range(sw, i, 1);
+ }
+
+ return 0;
+}
+
+int
+sw_xstats_reset(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ uint32_t i, err;
+
+ /* handle -1 for queue_port_id here, looping over all ports/queues */
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ sw_xstats_reset_dev(sw, ids, nb_ids);
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id == -1) {
+ for (i = 0; i < sw->port_count; i++) {
+ err = sw_xstats_reset_port(sw, i, ids, nb_ids);
+ if (err)
+ return -EINVAL;
+ }
+ } else if (queue_port_id < (int16_t)sw->port_count)
+ sw_xstats_reset_port(sw, queue_port_id, ids, nb_ids);
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id == -1) {
+ for (i = 0; i < sw->qid_count; i++) {
+ err = sw_xstats_reset_queue(sw, i, ids, nb_ids);
+ if (err)
+ return -EINVAL;
+ }
+ } else if (queue_port_id < (int16_t)sw->qid_count)
+ sw_xstats_reset_queue(sw, queue_port_id, ids, nb_ids);
+ break;
+ };
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/mempool/Makefile b/src/spdk/dpdk/drivers/mempool/Makefile
new file mode 100644
index 00000000..28c2e836
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += bucket
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2
+endif
+DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += ring
+DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += stack
+DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/mempool/bucket/Makefile b/src/spdk/dpdk/drivers/mempool/bucket/Makefile
new file mode 100644
index 00000000..7364916b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/bucket/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2017-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_bucket.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+
+EXPORT_MAP := rte_mempool_bucket_version.map
+
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += rte_mempool_bucket.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/bucket/meson.build b/src/spdk/dpdk/drivers/mempool/bucket/meson.build
new file mode 100644
index 00000000..618d7912
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/bucket/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2017-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+sources = files('rte_mempool_bucket.c')
diff --git a/src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket.c b/src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket.c
new file mode 100644
index 00000000..78d2b9d0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket.c
@@ -0,0 +1,628 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_errno.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+/*
+ * The general idea of the bucket mempool driver is as follows.
+ * We keep track of physically contiguous groups (buckets) of objects
+ * of a certain size. Every such a group has a counter that is
+ * incremented every time an object from that group is enqueued.
+ * Until the bucket is full, no objects from it are eligible for allocation.
+ * If a request is made to dequeue a multiply of bucket size, it is
+ * satisfied by returning the whole buckets, instead of separate objects.
+ */
+
+
+struct bucket_header {
+ unsigned int lcore_id;
+ uint8_t fill_cnt;
+};
+
+struct bucket_stack {
+ unsigned int top;
+ unsigned int limit;
+ void *objects[];
+};
+
+struct bucket_data {
+ unsigned int header_size;
+ unsigned int total_elt_size;
+ unsigned int obj_per_bucket;
+ unsigned int bucket_stack_thresh;
+ uintptr_t bucket_page_mask;
+ struct rte_ring *shared_bucket_ring;
+ struct bucket_stack *buckets[RTE_MAX_LCORE];
+ /*
+ * Multi-producer single-consumer ring to hold objects that are
+ * returned to the mempool at a different lcore than initially
+ * dequeued
+ */
+ struct rte_ring *adoption_buffer_rings[RTE_MAX_LCORE];
+ struct rte_ring *shared_orphan_ring;
+ struct rte_mempool *pool;
+ unsigned int bucket_mem_size;
+};
+
+static struct bucket_stack *
+bucket_stack_create(const struct rte_mempool *mp, unsigned int n_elts)
+{
+ struct bucket_stack *stack;
+
+ stack = rte_zmalloc_socket("bucket_stack",
+ sizeof(struct bucket_stack) +
+ n_elts * sizeof(void *),
+ RTE_CACHE_LINE_SIZE,
+ mp->socket_id);
+ if (stack == NULL)
+ return NULL;
+ stack->limit = n_elts;
+ stack->top = 0;
+
+ return stack;
+}
+
+static void
+bucket_stack_push(struct bucket_stack *stack, void *obj)
+{
+ RTE_ASSERT(stack->top < stack->limit);
+ stack->objects[stack->top++] = obj;
+}
+
+static void *
+bucket_stack_pop_unsafe(struct bucket_stack *stack)
+{
+ RTE_ASSERT(stack->top > 0);
+ return stack->objects[--stack->top];
+}
+
+static void *
+bucket_stack_pop(struct bucket_stack *stack)
+{
+ if (stack->top == 0)
+ return NULL;
+ return bucket_stack_pop_unsafe(stack);
+}
+
+static int
+bucket_enqueue_single(struct bucket_data *bd, void *obj)
+{
+ int rc = 0;
+ uintptr_t addr = (uintptr_t)obj;
+ struct bucket_header *hdr;
+ unsigned int lcore_id = rte_lcore_id();
+
+ addr &= bd->bucket_page_mask;
+ hdr = (struct bucket_header *)addr;
+
+ if (likely(hdr->lcore_id == lcore_id)) {
+ if (hdr->fill_cnt < bd->obj_per_bucket - 1) {
+ hdr->fill_cnt++;
+ } else {
+ hdr->fill_cnt = 0;
+ /* Stack is big enough to put all buckets */
+ bucket_stack_push(bd->buckets[lcore_id], hdr);
+ }
+ } else if (hdr->lcore_id != LCORE_ID_ANY) {
+ struct rte_ring *adopt_ring =
+ bd->adoption_buffer_rings[hdr->lcore_id];
+
+ rc = rte_ring_enqueue(adopt_ring, obj);
+ /* Ring is big enough to put all objects */
+ RTE_ASSERT(rc == 0);
+ } else if (hdr->fill_cnt < bd->obj_per_bucket - 1) {
+ hdr->fill_cnt++;
+ } else {
+ hdr->fill_cnt = 0;
+ rc = rte_ring_enqueue(bd->shared_bucket_ring, hdr);
+ /* Ring is big enough to put all buckets */
+ RTE_ASSERT(rc == 0);
+ }
+
+ return rc;
+}
+
+static int
+bucket_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ struct bucket_stack *local_stack = bd->buckets[rte_lcore_id()];
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < n; i++) {
+ rc = bucket_enqueue_single(bd, obj_table[i]);
+ RTE_ASSERT(rc == 0);
+ }
+ if (local_stack->top > bd->bucket_stack_thresh) {
+ rte_ring_enqueue_bulk(bd->shared_bucket_ring,
+ &local_stack->objects
+ [bd->bucket_stack_thresh],
+ local_stack->top -
+ bd->bucket_stack_thresh,
+ NULL);
+ local_stack->top = bd->bucket_stack_thresh;
+ }
+ return rc;
+}
+
+static void **
+bucket_fill_obj_table(const struct bucket_data *bd, void **pstart,
+ void **obj_table, unsigned int n)
+{
+ unsigned int i;
+ uint8_t *objptr = *pstart;
+
+ for (objptr += bd->header_size, i = 0; i < n;
+ i++, objptr += bd->total_elt_size)
+ *obj_table++ = objptr;
+ *pstart = objptr;
+ return obj_table;
+}
+
+static int
+bucket_dequeue_orphans(struct bucket_data *bd, void **obj_table,
+ unsigned int n_orphans)
+{
+ unsigned int i;
+ int rc;
+ uint8_t *objptr;
+
+ rc = rte_ring_dequeue_bulk(bd->shared_orphan_ring, obj_table,
+ n_orphans, NULL);
+ if (unlikely(rc != (int)n_orphans)) {
+ struct bucket_header *hdr;
+
+ objptr = bucket_stack_pop(bd->buckets[rte_lcore_id()]);
+ hdr = (struct bucket_header *)objptr;
+
+ if (objptr == NULL) {
+ rc = rte_ring_dequeue(bd->shared_bucket_ring,
+ (void **)&objptr);
+ if (rc != 0) {
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ hdr = (struct bucket_header *)objptr;
+ hdr->lcore_id = rte_lcore_id();
+ }
+ hdr->fill_cnt = 0;
+ bucket_fill_obj_table(bd, (void **)&objptr, obj_table,
+ n_orphans);
+ for (i = n_orphans; i < bd->obj_per_bucket; i++,
+ objptr += bd->total_elt_size) {
+ rc = rte_ring_enqueue(bd->shared_orphan_ring,
+ objptr);
+ if (rc != 0) {
+ RTE_ASSERT(0);
+ rte_errno = -rc;
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+bucket_dequeue_buckets(struct bucket_data *bd, void **obj_table,
+ unsigned int n_buckets)
+{
+ struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+ unsigned int n_buckets_from_stack = RTE_MIN(n_buckets, cur_stack->top);
+ void **obj_table_base = obj_table;
+
+ n_buckets -= n_buckets_from_stack;
+ while (n_buckets_from_stack-- > 0) {
+ void *obj = bucket_stack_pop_unsafe(cur_stack);
+
+ obj_table = bucket_fill_obj_table(bd, &obj, obj_table,
+ bd->obj_per_bucket);
+ }
+ while (n_buckets-- > 0) {
+ struct bucket_header *hdr;
+
+ if (unlikely(rte_ring_dequeue(bd->shared_bucket_ring,
+ (void **)&hdr) != 0)) {
+ /*
+ * Return the already-dequeued buffers
+ * back to the mempool
+ */
+ bucket_enqueue(bd->pool, obj_table_base,
+ obj_table - obj_table_base);
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ hdr->lcore_id = rte_lcore_id();
+ obj_table = bucket_fill_obj_table(bd, (void **)&hdr,
+ obj_table,
+ bd->obj_per_bucket);
+ }
+
+ return 0;
+}
+
+static int
+bucket_adopt_orphans(struct bucket_data *bd)
+{
+ int rc = 0;
+ struct rte_ring *adopt_ring =
+ bd->adoption_buffer_rings[rte_lcore_id()];
+
+ if (unlikely(!rte_ring_empty(adopt_ring))) {
+ void *orphan;
+
+ while (rte_ring_sc_dequeue(adopt_ring, &orphan) == 0) {
+ rc = bucket_enqueue_single(bd, orphan);
+ RTE_ASSERT(rc == 0);
+ }
+ }
+ return rc;
+}
+
+static int
+bucket_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int n_buckets = n / bd->obj_per_bucket;
+ unsigned int n_orphans = n - n_buckets * bd->obj_per_bucket;
+ int rc = 0;
+
+ bucket_adopt_orphans(bd);
+
+ if (unlikely(n_orphans > 0)) {
+ rc = bucket_dequeue_orphans(bd, obj_table +
+ (n_buckets * bd->obj_per_bucket),
+ n_orphans);
+ if (rc != 0)
+ return rc;
+ }
+
+ if (likely(n_buckets > 0)) {
+ rc = bucket_dequeue_buckets(bd, obj_table, n_buckets);
+ if (unlikely(rc != 0) && n_orphans > 0) {
+ rte_ring_enqueue_bulk(bd->shared_orphan_ring,
+ obj_table + (n_buckets *
+ bd->obj_per_bucket),
+ n_orphans, NULL);
+ }
+ }
+
+ return rc;
+}
+
+static int
+bucket_dequeue_contig_blocks(struct rte_mempool *mp, void **first_obj_table,
+ unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ const uint32_t header_size = bd->header_size;
+ struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+ unsigned int n_buckets_from_stack = RTE_MIN(n, cur_stack->top);
+ struct bucket_header *hdr;
+ void **first_objp = first_obj_table;
+
+ bucket_adopt_orphans(bd);
+
+ n -= n_buckets_from_stack;
+ while (n_buckets_from_stack-- > 0) {
+ hdr = bucket_stack_pop_unsafe(cur_stack);
+ *first_objp++ = (uint8_t *)hdr + header_size;
+ }
+ if (n > 0) {
+ if (unlikely(rte_ring_dequeue_bulk(bd->shared_bucket_ring,
+ first_objp, n, NULL) != n)) {
+ /* Return the already dequeued buckets */
+ while (first_objp-- != first_obj_table) {
+ bucket_stack_push(cur_stack,
+ (uint8_t *)*first_objp -
+ header_size);
+ }
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ while (n-- > 0) {
+ hdr = (struct bucket_header *)*first_objp;
+ hdr->lcore_id = rte_lcore_id();
+ *first_objp++ = (uint8_t *)hdr + header_size;
+ }
+ }
+
+ return 0;
+}
+
+static void
+count_underfilled_buckets(struct rte_mempool *mp,
+ void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ __rte_unused unsigned int mem_idx)
+{
+ unsigned int *pcount = opaque;
+ const struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz =
+ (unsigned int)(~bd->bucket_page_mask + 1);
+ uintptr_t align;
+ uint8_t *iter;
+
+ align = (uintptr_t)RTE_PTR_ALIGN_CEIL(memhdr->addr, bucket_page_sz) -
+ (uintptr_t)memhdr->addr;
+
+ for (iter = (uint8_t *)memhdr->addr + align;
+ iter < (uint8_t *)memhdr->addr + memhdr->len;
+ iter += bucket_page_sz) {
+ struct bucket_header *hdr = (struct bucket_header *)iter;
+
+ *pcount += hdr->fill_cnt;
+ }
+}
+
+static unsigned int
+bucket_get_count(const struct rte_mempool *mp)
+{
+ const struct bucket_data *bd = mp->pool_data;
+ unsigned int count =
+ bd->obj_per_bucket * rte_ring_count(bd->shared_bucket_ring) +
+ rte_ring_count(bd->shared_orphan_ring);
+ unsigned int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
+ count += bd->obj_per_bucket * bd->buckets[i]->top +
+ rte_ring_count(bd->adoption_buffer_rings[i]);
+ }
+
+ rte_mempool_mem_iter((struct rte_mempool *)(uintptr_t)mp,
+ count_underfilled_buckets, &count);
+
+ return count;
+}
+
+static int
+bucket_alloc(struct rte_mempool *mp)
+{
+ int rg_flags = 0;
+ int rc = 0;
+ char rg_name[RTE_RING_NAMESIZE];
+ struct bucket_data *bd;
+ unsigned int i;
+ unsigned int bucket_header_size;
+
+ bd = rte_zmalloc_socket("bucket_pool", sizeof(*bd),
+ RTE_CACHE_LINE_SIZE, mp->socket_id);
+ if (bd == NULL) {
+ rc = -ENOMEM;
+ goto no_mem_for_data;
+ }
+ bd->pool = mp;
+ if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ bucket_header_size = sizeof(struct bucket_header);
+ else
+ bucket_header_size = RTE_CACHE_LINE_SIZE;
+ RTE_BUILD_BUG_ON(sizeof(struct bucket_header) > RTE_CACHE_LINE_SIZE);
+ bd->header_size = mp->header_size + bucket_header_size;
+ bd->total_elt_size = mp->header_size + mp->elt_size + mp->trailer_size;
+ bd->bucket_mem_size = RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB * 1024;
+ bd->obj_per_bucket = (bd->bucket_mem_size - bucket_header_size) /
+ bd->total_elt_size;
+ bd->bucket_page_mask = ~(rte_align64pow2(bd->bucket_mem_size) - 1);
+ /* eventually this should be a tunable parameter */
+ bd->bucket_stack_thresh = (mp->size / bd->obj_per_bucket) * 4 / 3;
+
+ if (mp->flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (mp->flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
+ bd->buckets[i] =
+ bucket_stack_create(mp, mp->size / bd->obj_per_bucket);
+ if (bd->buckets[i] == NULL) {
+ rc = -ENOMEM;
+ goto no_mem_for_stacks;
+ }
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".a%u", mp->name, i);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto no_mem_for_stacks;
+ }
+ bd->adoption_buffer_rings[i] =
+ rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id,
+ rg_flags | RING_F_SC_DEQ);
+ if (bd->adoption_buffer_rings[i] == NULL) {
+ rc = -rte_errno;
+ goto no_mem_for_stacks;
+ }
+ }
+
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".0", mp->name);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto invalid_shared_orphan_ring;
+ }
+ bd->shared_orphan_ring =
+ rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id, rg_flags);
+ if (bd->shared_orphan_ring == NULL) {
+ rc = -rte_errno;
+ goto cannot_create_shared_orphan_ring;
+ }
+
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".1", mp->name);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto invalid_shared_bucket_ring;
+ }
+ bd->shared_bucket_ring =
+ rte_ring_create(rg_name,
+ rte_align32pow2((mp->size + 1) /
+ bd->obj_per_bucket),
+ mp->socket_id, rg_flags);
+ if (bd->shared_bucket_ring == NULL) {
+ rc = -rte_errno;
+ goto cannot_create_shared_bucket_ring;
+ }
+
+ mp->pool_data = bd;
+
+ return 0;
+
+cannot_create_shared_bucket_ring:
+invalid_shared_bucket_ring:
+ rte_ring_free(bd->shared_orphan_ring);
+cannot_create_shared_orphan_ring:
+invalid_shared_orphan_ring:
+no_mem_for_stacks:
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ rte_free(bd->buckets[i]);
+ rte_ring_free(bd->adoption_buffer_rings[i]);
+ }
+ rte_free(bd);
+no_mem_for_data:
+ rte_errno = -rc;
+ return rc;
+}
+
+static void
+bucket_free(struct rte_mempool *mp)
+{
+ unsigned int i;
+ struct bucket_data *bd = mp->pool_data;
+
+ if (bd == NULL)
+ return;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ rte_free(bd->buckets[i]);
+ rte_ring_free(bd->adoption_buffer_rings[i]);
+ }
+
+ rte_ring_free(bd->shared_orphan_ring);
+ rte_ring_free(bd->shared_bucket_ring);
+
+ rte_free(bd);
+}
+
+static ssize_t
+bucket_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
+ __rte_unused uint32_t pg_shift, size_t *min_total_elt_size,
+ size_t *align)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz;
+
+ if (bd == NULL)
+ return -EINVAL;
+
+ bucket_page_sz = rte_align32pow2(bd->bucket_mem_size);
+ *align = bucket_page_sz;
+ *min_total_elt_size = bucket_page_sz;
+ /*
+ * Each bucket occupies its own block aligned to
+ * bucket_page_sz, so the required amount of memory is
+ * a multiple of bucket_page_sz.
+ * We also need extra space for a bucket header
+ */
+ return ((obj_num + bd->obj_per_bucket - 1) /
+ bd->obj_per_bucket) * bucket_page_sz;
+}
+
+static int
+bucket_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz;
+ unsigned int bucket_header_sz;
+ unsigned int n_objs;
+ uintptr_t align;
+ uint8_t *iter;
+ int rc;
+
+ if (bd == NULL)
+ return -EINVAL;
+
+ bucket_page_sz = rte_align32pow2(bd->bucket_mem_size);
+ align = RTE_PTR_ALIGN_CEIL((uintptr_t)vaddr, bucket_page_sz) -
+ (uintptr_t)vaddr;
+
+ bucket_header_sz = bd->header_size - mp->header_size;
+ if (iova != RTE_BAD_IOVA)
+ iova += align + bucket_header_sz;
+
+ for (iter = (uint8_t *)vaddr + align, n_objs = 0;
+ iter < (uint8_t *)vaddr + len && n_objs < max_objs;
+ iter += bucket_page_sz) {
+ struct bucket_header *hdr = (struct bucket_header *)iter;
+ unsigned int chunk_len = bd->bucket_mem_size;
+
+ if ((size_t)(iter - (uint8_t *)vaddr) + chunk_len > len)
+ chunk_len = len - (iter - (uint8_t *)vaddr);
+ if (chunk_len <= bucket_header_sz)
+ break;
+ chunk_len -= bucket_header_sz;
+
+ hdr->fill_cnt = 0;
+ hdr->lcore_id = LCORE_ID_ANY;
+ rc = rte_mempool_op_populate_default(mp,
+ RTE_MIN(bd->obj_per_bucket,
+ max_objs - n_objs),
+ iter + bucket_header_sz,
+ iova, chunk_len,
+ obj_cb, obj_cb_arg);
+ if (rc < 0)
+ return rc;
+ n_objs += rc;
+ if (iova != RTE_BAD_IOVA)
+ iova += bucket_page_sz;
+ }
+
+ return n_objs;
+}
+
+static int
+bucket_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
+{
+ struct bucket_data *bd = mp->pool_data;
+
+ info->contig_block_size = bd->obj_per_bucket;
+ return 0;
+}
+
+
+static const struct rte_mempool_ops ops_bucket = {
+ .name = "bucket",
+ .alloc = bucket_alloc,
+ .free = bucket_free,
+ .enqueue = bucket_enqueue,
+ .dequeue = bucket_dequeue,
+ .get_count = bucket_get_count,
+ .calc_mem_size = bucket_calc_mem_size,
+ .populate = bucket_populate,
+ .get_info = bucket_get_info,
+ .dequeue_contig_blocks = bucket_dequeue_contig_blocks,
+};
+
+
+MEMPOOL_REGISTER_OPS(ops_bucket);
diff --git a/src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket_version.map b/src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa/Makefile b/src/spdk/dpdk/drivers/mempool/dpaa/Makefile
new file mode 100644
index 00000000..da8da1e9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_dpaa.a
+
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -D _GNU_SOURCE
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
+CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
+
+# versioning export map
+EXPORT_MAP := rte_mempool_dpaa_version.map
+
+# Lbrary version
+LIBABIVER := 1
+
+# depends on dpaa bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa_mempool.c
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.c b/src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.c
new file mode 100644
index 00000000..10c536bf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.c
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+
+#include <dpaa_mempool.h>
+
+/* List of all the memseg information locally maintained in dpaa driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa_memseg_list rte_dpaa_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
+
+struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS];
+
+static int
+dpaa_mbuf_create_pool(struct rte_mempool *mp)
+{
+ struct bman_pool *bp;
+ struct bm_buffer bufs[8];
+ struct dpaa_bp_info *bp_info;
+ uint8_t bpid;
+ int num_bufs = 0, ret = 0;
+ struct bman_pool_params params = {
+ .flags = BMAN_POOL_FLAG_DYNAMIC_BPID
+ };
+
+ MEMPOOL_INIT_FUNC_TRACE();
+
+ bp = bman_new_pool(&params);
+ if (!bp) {
+ DPAA_MEMPOOL_ERR("bman_new_pool() failed");
+ return -ENODEV;
+ }
+ bpid = bman_get_params(bp)->bpid;
+
+ /* Drain the pool of anything already in it. */
+ do {
+ /* Acquire is all-or-nothing, so we drain in 8s,
+ * then in 1s for the remainder.
+ */
+ if (ret != 1)
+ ret = bman_acquire(bp, bufs, 8, 0);
+ if (ret < 8)
+ ret = bman_acquire(bp, bufs, 1, 0);
+ if (ret > 0)
+ num_bufs += ret;
+ } while (ret > 0);
+ if (num_bufs)
+ DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d",
+ num_bufs, bpid);
+
+ rte_dpaa_bpid_info[bpid].mp = mp;
+ rte_dpaa_bpid_info[bpid].bpid = bpid;
+ rte_dpaa_bpid_info[bpid].size = mp->elt_size;
+ rte_dpaa_bpid_info[bpid].bp = bp;
+ rte_dpaa_bpid_info[bpid].meta_data_size =
+ sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
+ rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
+ rte_dpaa_bpid_info[bpid].ptov_off = 0;
+ rte_dpaa_bpid_info[bpid].flags = 0;
+
+ bp_info = rte_malloc(NULL,
+ sizeof(struct dpaa_bp_info),
+ RTE_CACHE_LINE_SIZE);
+ if (!bp_info) {
+ DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info");
+ bman_free_pool(bp);
+ return -ENOMEM;
+ }
+
+ rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid],
+ sizeof(struct dpaa_bp_info));
+ mp->pool_data = (void *)bp_info;
+
+ DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid);
+ return 0;
+}
+
+static void
+dpaa_mbuf_free_pool(struct rte_mempool *mp)
+{
+ struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+ MEMPOOL_INIT_FUNC_TRACE();
+
+ if (bp_info) {
+ bman_free_pool(bp_info->bp);
+ DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d",
+ bp_info->bpid);
+ rte_free(mp->pool_data);
+ mp->pool_data = NULL;
+ }
+}
+
+static void
+dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
+{
+ struct bm_buffer buf;
+ int ret;
+
+ DPAA_MEMPOOL_DEBUG("Free 0x%" PRIx64 " to bpid: %d",
+ addr, bp_info->bpid);
+
+ bm_buffer_set64(&buf, addr);
+retry:
+ ret = bman_release(bp_info->bp, &buf, 1, 0);
+ if (ret) {
+ DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying...");
+ cpu_spin(CPU_SPIN_BACKOFF_CYCLES);
+ goto retry;
+ }
+}
+
+static int
+dpaa_mbuf_free_bulk(struct rte_mempool *pool,
+ void *const *obj_table,
+ unsigned int n)
+{
+ struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
+ int ret;
+ unsigned int i = 0;
+
+ DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
+ n, bp_info->bpid);
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
+ ret);
+ return 0;
+ }
+ }
+
+ while (i < n) {
+ uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
+
+ if (unlikely(!bp_info->ptov_off)) {
+ /* buffers are from single mem segment */
+ if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
+ bp_info->ptov_off = (size_t)obj_table[i] - phy;
+ rte_dpaa_bpid_info[bp_info->bpid].ptov_off
+ = bp_info->ptov_off;
+ }
+ }
+
+ dpaa_buf_free(bp_info,
+ (uint64_t)phy + bp_info->meta_data_size);
+ i = i + 1;
+ }
+
+ DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
+ n, bp_info->bpid);
+
+ return 0;
+}
+
+static int
+dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
+ void **obj_table,
+ unsigned int count)
+{
+ struct rte_mbuf **m = (struct rte_mbuf **)obj_table;
+ struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL];
+ struct dpaa_bp_info *bp_info;
+ void *bufaddr;
+ int i, ret;
+ unsigned int n = 0;
+
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
+
+ DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
+ count, bp_info->bpid);
+
+ if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
+ DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
+ count);
+ return -1;
+ }
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
+ ret);
+ return -1;
+ }
+ }
+
+ while (n < count) {
+ /* Acquire is all-or-nothing, so we drain in 7s,
+ * then the remainder.
+ */
+ if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) {
+ ret = bman_acquire(bp_info->bp, bufs,
+ DPAA_MBUF_MAX_ACQ_REL, 0);
+ } else {
+ ret = bman_acquire(bp_info->bp, bufs, count - n, 0);
+ }
+ /* In case of less than requested number of buffers available
+ * in pool, qbman_swp_acquire returns 0
+ */
+ if (ret <= 0) {
+ DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)",
+ ret);
+ /* The API expect the exact number of requested
+ * buffers. Releasing all buffers allocated
+ */
+ dpaa_mbuf_free_bulk(pool, obj_table, n);
+ return -ENOBUFS;
+ }
+ /* assigning mbuf from the acquired objects */
+ for (i = 0; (i < ret) && bufs[i].addr; i++) {
+ /* TODO-errata - objerved that bufs may be null
+ * i.e. first buffer is valid, remaining 6 buffers
+ * may be null.
+ */
+ bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
+ m[n] = (struct rte_mbuf *)((char *)bufaddr
+ - bp_info->meta_data_size);
+ DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
+ (void *)bufaddr, (void *)m[n]);
+ n++;
+ }
+ }
+
+ DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d",
+ n, bp_info->bpid);
+ return 0;
+}
+
+static unsigned int
+dpaa_mbuf_get_count(const struct rte_mempool *mp)
+{
+ struct dpaa_bp_info *bp_info;
+
+ MEMPOOL_INIT_FUNC_TRACE();
+
+ if (!mp || !mp->pool_data) {
+ DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
+ return 0;
+ }
+
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+ return bman_query_free_buffers(bp_info->bp);
+}
+
+static int
+dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t paddr, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct dpaa_bp_info *bp_info;
+ unsigned int total_elt_sz;
+
+ MEMPOOL_INIT_FUNC_TRACE();
+
+ if (!mp || !mp->pool_data) {
+ DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
+ return 0;
+ }
+
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ DPAA_MEMPOOL_DEBUG("Req size %" PRIx64 " vs Available %u\n",
+ (uint64_t)len, total_elt_sz * mp->size);
+
+ /* Detect pool area has sufficient space for elements in this memzone */
+ if (len >= total_elt_sz * mp->size)
+ bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
+ struct dpaa_memseg *ms;
+
+ /* For each memory chunk pinned to the Mempool, a linked list of the
+ * contained memsegs is created for searching when PA to VA
+ * conversion is required.
+ */
+ ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0);
+ if (!ms) {
+ DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
+ DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
+ /* If the element is not added, it would only lead to failure
+ * in searching for the element and the logic would Fallback
+ * to traditional DPDK memseg traversal code. So, this is not
+ * a blocking error - but, error would be printed on screen.
+ */
+ return 0;
+ }
+
+ ms->vaddr = vaddr;
+ ms->iova = paddr;
+ ms->len = len;
+ /* Head insertions are generally faster than tail insertions as the
+ * buffers pinned are picked from rear end.
+ */
+ TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
+ obj_cb, obj_cb_arg);
+}
+
+struct rte_mempool_ops dpaa_mpool_ops = {
+ .name = DPAA_MEMPOOL_OPS_NAME,
+ .alloc = dpaa_mbuf_create_pool,
+ .free = dpaa_mbuf_free_pool,
+ .enqueue = dpaa_mbuf_free_bulk,
+ .dequeue = dpaa_mbuf_alloc_bulk,
+ .get_count = dpaa_mbuf_get_count,
+ .populate = dpaa_populate,
+};
+
+MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.h b/src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.h
new file mode 100644
index 00000000..092f326c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+#ifndef __DPAA_MEMPOOL_H__
+#define __DPAA_MEMPOOL_H__
+
+/* System headers */
+#include <stdio.h>
+#include <stdbool.h>
+#include <inttypes.h>
+#include <unistd.h>
+
+#include <rte_mempool.h>
+
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+
+#include <fsl_usd.h>
+#include <fsl_bman.h>
+
+#define CPU_SPIN_BACKOFF_CYCLES 512
+
+/* total number of bpools on SoC */
+#define DPAA_MAX_BPOOLS 256
+
+/* Maximum release/acquire from BMAN */
+#define DPAA_MBUF_MAX_ACQ_REL 8
+
+/* Buffers are allocated from single mem segment i.e. phys contiguous */
+#define DPAA_MPOOL_SINGLE_SEGMENT 0x01
+
+struct dpaa_bp_info {
+ struct rte_mempool *mp;
+ struct bman_pool *bp;
+ uint32_t bpid;
+ uint32_t size;
+ uint32_t meta_data_size;
+ int32_t dpaa_ops_index;
+ int64_t ptov_off;
+ uint8_t flags;
+};
+
+static inline void *
+DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info, uint64_t addr)
+{
+ if (bp_info->ptov_off)
+ return ((void *) (size_t)(addr + bp_info->ptov_off));
+ return rte_dpaa_mem_ptov(addr);
+}
+
+#define DPAA_MEMPOOL_TO_POOL_INFO(__mp) \
+ ((struct dpaa_bp_info *)__mp->pool_data)
+
+#define DPAA_MEMPOOL_TO_BPID(__mp) \
+ (((struct dpaa_bp_info *)__mp->pool_data)->bpid)
+
+extern struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS];
+
+#define DPAA_BPID_TO_POOL_INFO(__bpid) (&rte_dpaa_bpid_info[__bpid])
+
+#endif
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa/meson.build b/src/spdk/dpdk/drivers/mempool/dpaa/meson.build
new file mode 100644
index 00000000..9163b3db
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_dpaa']
+sources = files('dpaa_mempool.c')
+
+# depends on dpaa bus which uses experimental API
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa/rte_mempool_dpaa_version.map b/src/spdk/dpdk/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
new file mode 100644
index 00000000..60bf50b2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
@@ -0,0 +1,8 @@
+DPDK_17.11 {
+ global:
+
+ rte_dpaa_bpid_info;
+ rte_dpaa_memsegs;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/Makefile b/src/spdk/dpdk/drivers/mempool/dpaa2/Makefile
new file mode 100644
index 00000000..9e4c87d7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_dpaa2.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_mempool_dpaa2_version.map
+
+# Lbrary version
+LIBABIVER := 1
+
+# depends on fslmc bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL)-include := rte_dpaa2_mempool.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
new file mode 100644
index 00000000..7d0435f5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include "rte_dpaa2_mempool.h"
+
+#include <fslmc_logs.h>
+#include <mc/fsl_dpbp.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+#include "dpaa2_hw_mempool.h"
+#include "dpaa2_hw_mempool_logs.h"
+
+struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
+static struct dpaa2_bp_list *h_bp_list;
+
+/* List of all the memseg information locally maintained in dpaa2 driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa2_memseg_list rte_dpaa2_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs);
+
+/* Dynamic logging identified for mempool */
+int dpaa2_logtype_mempool;
+
+static int
+rte_hw_mbuf_create_pool(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_list *bp_list;
+ struct dpaa2_dpbp_dev *avail_dpbp;
+ struct dpaa2_bp_info *bp_info;
+ struct dpbp_attr dpbp_attr;
+ uint32_t bpid;
+ int ret;
+
+ avail_dpbp = dpaa2_alloc_dpbp_dev();
+
+ if (!avail_dpbp) {
+ DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
+ return -ENOENT;
+ }
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_MEMPOOL_ERR("Failure in affining portal");
+ goto err1;
+ }
+ }
+
+ ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
+ if (ret != 0) {
+ DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d",
+ ret);
+ goto err1;
+ }
+
+ ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
+ avail_dpbp->token, &dpbp_attr);
+ if (ret != 0) {
+ DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d",
+ ret);
+ goto err2;
+ }
+
+ bp_info = rte_malloc(NULL,
+ sizeof(struct dpaa2_bp_info),
+ RTE_CACHE_LINE_SIZE);
+ if (!bp_info) {
+ DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ /* Allocate the bp_list which will be added into global_bp_list */
+ bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
+ RTE_CACHE_LINE_SIZE);
+ if (!bp_list) {
+ DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
+ ret = -ENOMEM;
+ goto err3;
+ }
+
+ /* Set parameters of buffer pool list */
+ bp_list->buf_pool.num_bufs = mp->size;
+ bp_list->buf_pool.size = mp->elt_size
+ - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
+ bp_list->buf_pool.bpid = dpbp_attr.bpid;
+ bp_list->buf_pool.h_bpool_mem = NULL;
+ bp_list->buf_pool.dpbp_node = avail_dpbp;
+ /* Identification for our offloaded pool_data structure */
+ bp_list->dpaa2_ops_index = mp->ops_index;
+ bp_list->next = h_bp_list;
+ bp_list->mp = mp;
+
+ bpid = dpbp_attr.bpid;
+
+ rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
+ + rte_pktmbuf_priv_size(mp);
+ rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
+ rte_dpaa2_bpid_info[bpid].bpid = bpid;
+
+ rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
+ sizeof(struct dpaa2_bp_info));
+ mp->pool_data = (void *)bp_info;
+
+ DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
+
+ h_bp_list = bp_list;
+ return 0;
+err3:
+ rte_free(bp_info);
+err2:
+ dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
+err1:
+ dpaa2_free_dpbp_dev(avail_dpbp);
+
+ return ret;
+}
+
+static void
+rte_hw_mbuf_free_pool(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bpinfo;
+ struct dpaa2_bp_list *bp;
+ struct dpaa2_dpbp_dev *dpbp_node;
+
+ if (!mp->pool_data) {
+ DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool");
+ return;
+ }
+
+ bpinfo = (struct dpaa2_bp_info *)mp->pool_data;
+ bp = bpinfo->bp_list;
+ dpbp_node = bp->buf_pool.dpbp_node;
+
+ dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token);
+
+ if (h_bp_list == bp) {
+ h_bp_list = h_bp_list->next;
+ } else { /* if it is not the first node */
+ struct dpaa2_bp_list *prev = h_bp_list, *temp;
+ temp = h_bp_list->next;
+ while (temp) {
+ if (temp == bp) {
+ prev->next = temp->next;
+ rte_free(bp);
+ break;
+ }
+ prev = temp;
+ temp = temp->next;
+ }
+ }
+
+ rte_free(mp->pool_data);
+ dpaa2_free_dpbp_dev(dpbp_node);
+}
+
+static void
+rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
+ void * const *obj_table,
+ uint32_t bpid,
+ uint32_t meta_data_size,
+ int count)
+{
+ struct qbman_release_desc releasedesc;
+ struct qbman_swp *swp;
+ int ret;
+ int i, n;
+ uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret != 0) {
+ DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
+ return;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ /* Create a release descriptor required for releasing
+ * buffers into QBMAN
+ */
+ qbman_release_desc_clear(&releasedesc);
+ qbman_release_desc_set_bpid(&releasedesc, bpid);
+
+ n = count % DPAA2_MBUF_MAX_ACQ_REL;
+ if (unlikely(!n))
+ goto aligned;
+
+ /* convert mbuf to buffers for the remainder */
+ for (i = 0; i < n ; i++) {
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i])
+ + meta_data_size;
+#else
+ bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
+#endif
+ }
+
+ /* feed them to bman */
+ do {
+ ret = qbman_swp_release(swp, &releasedesc, bufs, n);
+ } while (ret == -EBUSY);
+
+aligned:
+ /* if there are more buffers to free */
+ while (n < count) {
+ /* convert mbuf to buffers */
+ for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ bufs[i] = (uint64_t)
+ rte_mempool_virt2iova(obj_table[n + i])
+ + meta_data_size;
+#else
+ bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
+#endif
+ }
+
+ do {
+ ret = qbman_swp_release(swp, &releasedesc, bufs,
+ DPAA2_MBUF_MAX_ACQ_REL);
+ } while (ret == -EBUSY);
+ n += DPAA2_MBUF_MAX_ACQ_REL;
+ }
+}
+
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return -ENOMEM;
+ }
+
+ return bp_info->bpid;
+}
+
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return NULL;
+ }
+
+ return (struct rte_mbuf *)((uint8_t *)buf_addr -
+ bp_info->meta_data_size);
+}
+
+int
+rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
+ void **obj_table, unsigned int count)
+{
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
+ static int alloc;
+#endif
+ struct qbman_swp *swp;
+ uint16_t bpid;
+ size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+ int i, ret;
+ unsigned int n = 0;
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(pool);
+
+ if (!(bp_info->bp_list)) {
+ DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
+ return -ENOENT;
+ }
+
+ bpid = bp_info->bpid;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret != 0) {
+ DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
+ return ret;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ while (n < count) {
+ /* Acquire is all-or-nothing, so we drain in 7s,
+ * then the remainder.
+ */
+ if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
+ ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
+ DPAA2_MBUF_MAX_ACQ_REL);
+ } else {
+ ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
+ count - n);
+ }
+ /* In case of less than requested number of buffers available
+ * in pool, qbman_swp_acquire returns 0
+ */
+ if (ret <= 0) {
+ DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
+ " err code: %d", ret);
+ /* The API expect the exact number of requested bufs */
+ /* Releasing all buffers allocated */
+ rte_dpaa2_mbuf_release(pool, obj_table, bpid,
+ bp_info->meta_data_size, n);
+ return -ENOBUFS;
+ }
+ /* assigning mbuf from the acquired objects */
+ for (i = 0; (i < ret) && bufs[i]; i++) {
+ DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
+ obj_table[n] = (struct rte_mbuf *)
+ (bufs[i] - bp_info->meta_data_size);
+ DPAA2_MEMPOOL_DP_DEBUG(
+ "Acquired %p address %p from BMAN\n",
+ (void *)bufs[i], (void *)obj_table[n]);
+ n++;
+ }
+ }
+
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
+ alloc += n;
+ DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
+ alloc, count, n);
+#endif
+ return 0;
+}
+
+static int
+rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
+ void * const *obj_table, unsigned int n)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(pool);
+ if (!(bp_info->bp_list)) {
+ DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
+ return -ENOENT;
+ }
+ rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
+ bp_info->meta_data_size, n);
+
+ return 0;
+}
+
+static unsigned int
+rte_hw_mbuf_get_count(const struct rte_mempool *mp)
+{
+ int ret;
+ unsigned int num_of_bufs = 0;
+ struct dpaa2_bp_info *bp_info;
+ struct dpaa2_dpbp_dev *dpbp_node;
+
+ if (!mp || !mp->pool_data) {
+ DPAA2_MEMPOOL_ERR("Invalid mempool provided");
+ return 0;
+ }
+
+ bp_info = (struct dpaa2_bp_info *)mp->pool_data;
+ dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
+
+ ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
+ dpbp_node->token, &num_of_bufs);
+ if (ret) {
+ DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
+ ret);
+ return 0;
+ }
+
+ DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
+
+ return num_of_bufs;
+}
+
+static int
+dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t paddr, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct dpaa2_memseg *ms;
+
+ /* For each memory chunk pinned to the Mempool, a linked list of the
+ * contained memsegs is created for searching when PA to VA
+ * conversion is required.
+ */
+ ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0);
+ if (!ms) {
+ DPAA2_MEMPOOL_ERR("Unable to allocate internal memory.");
+ DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
+ /* If the element is not added, it would only lead to failure
+ * in searching for the element and the logic would Fallback
+ * to traditional DPDK memseg traversal code. So, this is not
+ * a blocking error - but, error would be printed on screen.
+ */
+ return 0;
+ }
+
+ ms->vaddr = vaddr;
+ ms->iova = paddr;
+ ms->len = len;
+ /* Head insertions are generally faster than tail insertions as the
+ * buffers pinned are picked from rear end.
+ */
+ TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next);
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
+ obj_cb, obj_cb_arg);
+}
+
+struct rte_mempool_ops dpaa2_mpool_ops = {
+ .name = DPAA2_MEMPOOL_OPS_NAME,
+ .alloc = rte_hw_mbuf_create_pool,
+ .free = rte_hw_mbuf_free_pool,
+ .enqueue = rte_hw_mbuf_free_bulk,
+ .dequeue = rte_dpaa2_mbuf_alloc_bulk,
+ .get_count = rte_hw_mbuf_get_count,
+ .populate = dpaa2_populate,
+};
+
+MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
+
+RTE_INIT(dpaa2_mempool_init_log)
+{
+ dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
+ if (dpaa2_logtype_mempool >= 0)
+ rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.h b/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
new file mode 100644
index 00000000..4d346874
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _DPAA2_HW_DPBP_H_
+#define _DPAA2_HW_DPBP_H_
+
+#define DPAA2_MAX_BUF_POOLS 8
+
+#define DPAA2_INVALID_MBUF_SEQN 0
+
+struct buf_pool_cfg {
+ void *addr;
+ /**< The address from where DPAA2 will carve out the buffers */
+ rte_iova_t phys_addr;
+ /**< Physical address of the memory provided in addr */
+ uint32_t num;
+ /**< Number of buffers */
+ uint32_t size;
+ /**< Size including headroom for each buffer */
+ uint16_t align;
+ /**< Buffer alignment (in bytes) */
+ uint16_t bpid;
+ /**< Autogenerated buffer pool ID for internal use */
+};
+
+struct buf_pool {
+ uint32_t size; /**< Size of the Pool */
+ uint32_t num_bufs; /**< Number of buffers in Pool */
+ uint16_t bpid; /**< Pool ID, from pool configuration */
+ uint8_t *h_bpool_mem; /**< Internal context data */
+ struct dpaa2_dpbp_dev *dpbp_node; /**< Hardware context */
+};
+
+/*!
+ * Buffer pool list configuration structure. User need to give DPAA2 the
+ * valid number of 'num_buf_pools'.
+ */
+struct dpaa2_bp_list_cfg {
+ struct buf_pool_cfg buf_pool; /* Configuration of each buffer pool*/
+};
+
+struct dpaa2_bp_list {
+ struct dpaa2_bp_list *next;
+ struct rte_mempool *mp; /**< DPDK RTE EAL pool reference */
+ int32_t dpaa2_ops_index; /**< Index into DPDK Mempool ops table */
+ struct buf_pool buf_pool;
+};
+
+struct dpaa2_bp_info {
+ uint32_t meta_data_size;
+ uint32_t bpid;
+ struct dpaa2_bp_list *bp_list;
+};
+
+#define mempool_to_bpinfo(mp) ((struct dpaa2_bp_info *)(mp)->pool_data)
+#define mempool_to_bpid(mp) ((mempool_to_bpinfo(mp))->bpid)
+
+extern struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
+
+int rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
+ void **obj_table, unsigned int count);
+
+#endif /* _DPAA2_HW_DPBP_H_ */
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h b/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
new file mode 100644
index 00000000..c79b3d1c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
@@ -0,0 +1,38 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef _DPAA2_HW_MEMPOOL_LOGS_H_
+#define _DPAA2_HW_MEMPOOL_LOGS_H_
+
+extern int dpaa2_logtype_mempool;
+
+#define DPAA2_MEMPOOL_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_mempool, \
+ "mempool/dpaa2: " fmt "\n", ##args)
+
+/* Debug logs are with Function names */
+#define DPAA2_MEMPOOL_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_mempool, \
+ "mempool/dpaa2: %s(): " fmt "\n", __func__, ##args)
+
+#define DPAA2_MEMPOOL_INFO(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(INFO, fmt, ## args)
+#define DPAA2_MEMPOOL_ERR(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(ERR, fmt, ## args)
+#define DPAA2_MEMPOOL_WARN(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_MEMPOOL_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_MEMPOOL_DP_DEBUG(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_MEMPOOL_DP_INFO(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_MEMPOOL_DP_WARN(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAA2_HW_MEMPOOL_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/meson.build b/src/spdk/dpdk/drivers/mempool/dpaa2/meson.build
new file mode 100644
index 00000000..90bab606
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_fslmc']
+sources = files('dpaa2_hw_mempool.c')
+
+# depends on fslmc bus which uses experimental API
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/rte_dpaa2_mempool.h b/src/spdk/dpdk/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
new file mode 100644
index 00000000..4a22b7c4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_DPAA2_MEMPOOL_H__
+#define __RTE_DPAA2_MEMPOOL_H__
+
+/**
+ * @file
+ *
+ * NXP specific mempool related functions.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mempool.h>
+
+/**
+ * Get BPID corresponding to the packet pool
+ *
+ * @param mp
+ * memory pool
+ *
+ * @return
+ * BPID of the buffer pool
+ */
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp);
+
+/**
+ * Get MBUF from the corresponding 'buf_addr'
+ *
+ * @param mp
+ * memory pool
+ * @param buf_addr
+ * The 'buf_addr' of the mbuf. This is the start buffer address
+ * of the packet buffer (mbuf).
+ *
+ * @return
+ * - MBUF pointer for success
+ * - NULL in case of error
+ */
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DPAA2_MEMPOOL_H__ */
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map b/src/spdk/dpdk/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
new file mode 100644
index 00000000..b9d996a6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
@@ -0,0 +1,17 @@
+DPDK_17.05 {
+ global:
+
+ rte_dpaa2_bpid_info;
+ rte_dpaa2_mbuf_alloc_bulk;
+ rte_dpaa2_memsegs;
+
+ local: *;
+};
+
+DPDK_18.05 {
+ global:
+
+ rte_dpaa2_mbuf_from_buf_addr;
+ rte_dpaa2_mbuf_pool_bpid;
+
+} DPDK_17.05;
diff --git a/src/spdk/dpdk/drivers/mempool/meson.build b/src/spdk/dpdk/drivers/mempool/meson.build
new file mode 100644
index 00000000..4527d980
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['bucket', 'dpaa', 'dpaa2', 'octeontx', 'ring', 'stack']
+std_deps = ['mempool']
+config_flag_fmt = 'RTE_LIBRTE_@0@_MEMPOOL'
+driver_name_fmt = 'rte_mempool_@0@'
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/Makefile b/src/spdk/dpdk/drivers/mempool/octeontx/Makefile
new file mode 100644
index 00000000..a3e1dce8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/Makefile
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_octeontx.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
+EXPORT_MAP := rte_mempool_octeontx_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_fpavf.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += rte_mempool_octeontx.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_rte_mempool_octeontx.o += -fno-prefetch-loop-arrays
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_rte_mempool_octeontx.o += -Ofast
+else
+CFLAGS_rte_mempool_octeontx.o += -O3 -ffast-math
+endif
+
+else
+CFLAGS_rte_mempool_octeontx.o += -Ofast
+endif
+
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf
+LDLIBS += -lrte_bus_pci -lrte_common_octeontx
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/meson.build b/src/spdk/dpdk/drivers/mempool/octeontx/meson.build
new file mode 100644
index 00000000..3baaf7db
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+sources = files('octeontx_fpavf.c',
+ 'rte_mempool_octeontx.c'
+)
+
+deps += ['mbuf', 'bus_pci', 'common_octeontx']
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c b/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c
new file mode 100644
index 00000000..4cf387e8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -0,0 +1,806 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/mman.h>
+
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_bus_pci.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+#include <rte_mbuf.h>
+
+#include "octeontx_mbox.h"
+#include "octeontx_fpavf.h"
+
+/* FPA Mbox Message */
+#define IDENTIFY 0x0
+
+#define FPA_CONFIGSET 0x1
+#define FPA_CONFIGGET 0x2
+#define FPA_START_COUNT 0x3
+#define FPA_STOP_COUNT 0x4
+#define FPA_ATTACHAURA 0x5
+#define FPA_DETACHAURA 0x6
+#define FPA_SETAURALVL 0x7
+#define FPA_GETAURALVL 0x8
+
+#define FPA_COPROC 0x1
+
+/* fpa mbox struct */
+struct octeontx_mbox_fpa_cfg {
+ int aid;
+ uint64_t pool_cfg;
+ uint64_t pool_stack_base;
+ uint64_t pool_stack_end;
+ uint64_t aura_cfg;
+};
+
+struct __attribute__((__packed__)) gen_req {
+ uint32_t value;
+};
+
+struct __attribute__((__packed__)) idn_req {
+ uint8_t domain_id;
+};
+
+struct __attribute__((__packed__)) gen_resp {
+ uint16_t domain_id;
+ uint16_t vfid;
+};
+
+struct __attribute__((__packed__)) dcfg_resp {
+ uint8_t sso_count;
+ uint8_t ssow_count;
+ uint8_t fpa_count;
+ uint8_t pko_count;
+ uint8_t tim_count;
+ uint8_t net_port_count;
+ uint8_t virt_port_count;
+};
+
+#define FPA_MAX_POOL 32
+#define FPA_PF_PAGE_SZ 4096
+
+#define FPA_LN_SIZE 128
+#define FPA_ROUND_UP(x, size) \
+ ((((unsigned long)(x)) + size-1) & (~(size-1)))
+#define FPA_OBJSZ_2_CACHE_LINE(sz) (((sz) + RTE_CACHE_LINE_MASK) >> 7)
+#define FPA_CACHE_LINE_2_OBJSZ(sz) ((sz) << 7)
+
+#define POOL_ENA (0x1 << 0)
+#define POOL_DIS (0x0 << 0)
+#define POOL_SET_NAT_ALIGN (0x1 << 1)
+#define POOL_DIS_NAT_ALIGN (0x0 << 1)
+#define POOL_STYPE(x) (((x) & 0x1) << 2)
+#define POOL_LTYPE(x) (((x) & 0x3) << 3)
+#define POOL_BUF_OFFSET(x) (((x) & 0x7fffULL) << 16)
+#define POOL_BUF_SIZE(x) (((x) & 0x7ffULL) << 32)
+
+struct fpavf_res {
+ void *pool_stack_base;
+ void *bar0;
+ uint64_t stack_ln_ptr;
+ uint16_t domain_id;
+ uint16_t vf_id; /* gpool_id */
+ uint16_t sz128; /* Block size in cache lines */
+ bool is_inuse;
+};
+
+struct octeontx_fpadev {
+ rte_spinlock_t lock;
+ uint8_t total_gpool_cnt;
+ struct fpavf_res pool[FPA_VF_MAX];
+};
+
+static struct octeontx_fpadev fpadev;
+
+int octeontx_logtype_fpavf;
+int octeontx_logtype_fpavf_mbox;
+
+RTE_INIT(otx_pool_init_log)
+{
+ octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx");
+ if (octeontx_logtype_fpavf >= 0)
+ rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE);
+}
+
+/* lock is taken by caller */
+static int
+octeontx_fpa_gpool_alloc(unsigned int object_size)
+{
+ struct fpavf_res *res = NULL;
+ uint16_t gpool;
+ unsigned int sz128;
+
+ sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size);
+
+ for (gpool = 0; gpool < FPA_VF_MAX; gpool++) {
+
+ /* Skip VF that is not mapped Or _inuse */
+ if ((fpadev.pool[gpool].bar0 == NULL) ||
+ (fpadev.pool[gpool].is_inuse == true))
+ continue;
+
+ res = &fpadev.pool[gpool];
+
+ RTE_ASSERT(res->domain_id != (uint16_t)~0);
+ RTE_ASSERT(res->vf_id != (uint16_t)~0);
+ RTE_ASSERT(res->stack_ln_ptr != 0);
+
+ if (res->sz128 == 0) {
+ res->sz128 = sz128;
+
+ fpavf_log_dbg("gpool %d blk_sz %d\n", gpool, sz128);
+ return gpool;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/* lock is taken by caller */
+static __rte_always_inline uintptr_t
+octeontx_fpa_gpool2handle(uint16_t gpool)
+{
+ struct fpavf_res *res = NULL;
+
+ RTE_ASSERT(gpool < FPA_VF_MAX);
+
+ res = &fpadev.pool[gpool];
+ return (uintptr_t)res->bar0 | gpool;
+}
+
+static __rte_always_inline bool
+octeontx_fpa_handle_valid(uintptr_t handle)
+{
+ struct fpavf_res *res = NULL;
+ uint8_t gpool;
+ int i;
+ bool ret = false;
+
+ if (unlikely(!handle))
+ return ret;
+
+ /* get the gpool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+
+ /* get the bar address */
+ handle &= ~(uint64_t)FPA_GPOOL_MASK;
+ for (i = 0; i < FPA_VF_MAX; i++) {
+ if ((uintptr_t)fpadev.pool[i].bar0 != handle)
+ continue;
+
+ /* validate gpool */
+ if (gpool != i)
+ return false;
+
+ res = &fpadev.pool[i];
+
+ if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 ||
+ res->stack_ln_ptr == 0)
+ ret = false;
+ else
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
+ signed short buf_offset, unsigned int max_buf_count)
+{
+ void *memptr = NULL;
+ rte_iova_t phys_addr;
+ unsigned int memsz;
+ struct fpavf_res *fpa = NULL;
+ uint64_t reg;
+ struct octeontx_mbox_hdr hdr;
+ struct dcfg_resp resp;
+ struct octeontx_mbox_fpa_cfg cfg;
+ int ret = -1;
+
+ fpa = &fpadev.pool[gpool];
+ memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) *
+ FPA_LN_SIZE;
+
+ /* Round-up to page size */
+ memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1);
+ memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE);
+ if (memptr == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Configure stack */
+ fpa->pool_stack_base = memptr;
+ phys_addr = rte_malloc_virt2iova(memptr);
+
+ buf_size /= FPA_LN_SIZE;
+
+ /* POOL setup */
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_CONFIGSET;
+ hdr.vfid = fpa->vf_id;
+ hdr.res_code = 0;
+
+ buf_offset /= FPA_LN_SIZE;
+ reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) |
+ POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
+ POOL_ENA;
+
+ cfg.aid = FPA_AURA_IDX(gpool);
+ cfg.pool_cfg = reg;
+ cfg.pool_stack_base = phys_addr;
+ cfg.pool_stack_end = phys_addr + memsz;
+ cfg.aura_cfg = (1 << 9);
+
+ ret = octeontx_mbox_send(&hdr, &cfg,
+ sizeof(struct octeontx_mbox_fpa_cfg),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n",
+ fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg,
+ cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg);
+
+ /* Now pool is in_use */
+ fpa->is_inuse = true;
+
+err:
+ if (ret < 0)
+ rte_free(memptr);
+
+ return ret;
+}
+
+static int
+octeontx_fpapf_pool_destroy(unsigned int gpool_index)
+{
+ struct octeontx_mbox_hdr hdr;
+ struct dcfg_resp resp;
+ struct octeontx_mbox_fpa_cfg cfg;
+ struct fpavf_res *fpa = NULL;
+ int ret = -1;
+
+ fpa = &fpadev.pool[gpool_index];
+
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_CONFIGSET;
+ hdr.vfid = fpa->vf_id;
+ hdr.res_code = 0;
+
+ /* reset and free the pool */
+ cfg.aid = 0;
+ cfg.pool_cfg = 0;
+ cfg.pool_stack_base = 0;
+ cfg.pool_stack_end = 0;
+ cfg.aura_cfg = 0;
+
+ ret = octeontx_mbox_send(&hdr, &cfg,
+ sizeof(struct octeontx_mbox_fpa_cfg),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ ret = 0;
+err:
+ /* anycase free pool stack memory */
+ rte_free(fpa->pool_stack_base);
+ fpa->pool_stack_base = NULL;
+ return ret;
+}
+
+static int
+octeontx_fpapf_aura_attach(unsigned int gpool_index)
+{
+ struct octeontx_mbox_hdr hdr;
+ struct dcfg_resp resp;
+ struct octeontx_mbox_fpa_cfg cfg;
+ int ret = 0;
+
+ if (gpool_index >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_ATTACHAURA;
+ hdr.vfid = gpool_index;
+ hdr.res_code = 0;
+ memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
+ cfg.aid = FPA_AURA_IDX(gpool_index);
+
+ ret = octeontx_mbox_send(&hdr, &cfg,
+ sizeof(struct octeontx_mbox_fpa_cfg),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ fpavf_log_err("Could not attach fpa ");
+ fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
+ FPA_AURA_IDX(gpool_index), gpool_index, ret,
+ hdr.res_code);
+ ret = -EACCES;
+ goto err;
+ }
+err:
+ return ret;
+}
+
+static int
+octeontx_fpapf_aura_detach(unsigned int gpool_index)
+{
+ struct octeontx_mbox_fpa_cfg cfg = {0};
+ struct octeontx_mbox_hdr hdr = {0};
+ int ret = 0;
+
+ if (gpool_index >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ cfg.aid = FPA_AURA_IDX(gpool_index);
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_DETACHAURA;
+ hdr.vfid = gpool_index;
+ ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
+ if (ret < 0) {
+ fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
+ FPA_AURA_IDX(gpool_index), ret,
+ hdr.res_code);
+ ret = -EINVAL;
+ }
+
+err:
+ return ret;
+}
+
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
+ void *memva, uint16_t gpool)
+{
+ uint64_t va_end;
+
+ if (unlikely(!handle))
+ return -ENODEV;
+
+ va_end = (uintptr_t)memva + memsz;
+ va_end &= ~RTE_CACHE_LINE_MASK;
+
+ /* VHPOOL setup */
+ fpavf_write64((uintptr_t)memva,
+ (void *)((uintptr_t)handle +
+ FPA_VF_VHPOOL_START_ADDR(gpool)));
+ fpavf_write64(va_end,
+ (void *)((uintptr_t)handle +
+ FPA_VF_VHPOOL_END_ADDR(gpool)));
+ return 0;
+}
+
+static int
+octeontx_fpapf_start_count(uint16_t gpool_index)
+{
+ int ret = 0;
+ struct octeontx_mbox_hdr hdr = {0};
+
+ if (gpool_index >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_START_COUNT;
+ hdr.vfid = gpool_index;
+ ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (ret < 0) {
+ fpavf_log_err("Could not start buffer counting for ");
+ fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
+ gpool_index, ret, hdr.res_code);
+ ret = -EINVAL;
+ goto err;
+ }
+
+err:
+ return ret;
+}
+
+static __rte_always_inline int
+octeontx_fpavf_free(unsigned int gpool)
+{
+ int ret = 0;
+
+ if (gpool >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Pool is free */
+ fpadev.pool[gpool].is_inuse = false;
+
+err:
+ return ret;
+}
+
+static __rte_always_inline int
+octeontx_gpool_free(uint16_t gpool)
+{
+ if (fpadev.pool[gpool].sz128 != 0) {
+ fpadev.pool[gpool].sz128 = 0;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Return buffer size for a given pool
+ */
+int
+octeontx_fpa_bufpool_block_size(uintptr_t handle)
+{
+ struct fpavf_res *res = NULL;
+ uint8_t gpool;
+
+ if (unlikely(!octeontx_fpa_handle_valid(handle)))
+ return -EINVAL;
+
+ /* get the gpool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+ res = &fpadev.pool[gpool];
+ return FPA_CACHE_LINE_2_OBJSZ(res->sz128);
+}
+
+int
+octeontx_fpa_bufpool_free_count(uintptr_t handle)
+{
+ uint64_t cnt, limit, avail;
+ uint8_t gpool;
+ uint16_t gaura;
+ uintptr_t pool_bar;
+
+ if (unlikely(!octeontx_fpa_handle_valid(handle)))
+ return -EINVAL;
+
+ /* get the gpool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+ /* get the aura */
+ gaura = octeontx_fpa_bufpool_gaura(handle);
+
+ /* Get pool bar address from handle */
+ pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+ cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT(gaura)));
+ limit = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
+
+ avail = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_AVAILABLE(gpool)));
+
+ return RTE_MIN(avail, (limit - cnt));
+}
+
+uintptr_t
+octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
+ unsigned int buf_offset, int node_id)
+{
+ unsigned int gpool;
+ unsigned int gaura;
+ uintptr_t gpool_handle;
+ uintptr_t pool_bar;
+ int res;
+
+ RTE_SET_USED(node_id);
+ RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
+
+ object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
+ if (object_size > FPA_MAX_OBJ_SIZE) {
+ errno = EINVAL;
+ goto error_end;
+ }
+
+ rte_spinlock_lock(&fpadev.lock);
+ res = octeontx_fpa_gpool_alloc(object_size);
+
+ /* Bail if failed */
+ if (unlikely(res < 0)) {
+ errno = res;
+ goto error_unlock;
+ }
+
+ /* get fpavf */
+ gpool = res;
+
+ /* get pool handle */
+ gpool_handle = octeontx_fpa_gpool2handle(gpool);
+ if (!octeontx_fpa_handle_valid(gpool_handle)) {
+ errno = ENOSPC;
+ goto error_gpool_free;
+ }
+
+ /* Get pool bar address from handle */
+ pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+ res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset,
+ object_count);
+ if (res < 0) {
+ errno = res;
+ goto error_gpool_free;
+ }
+
+ /* populate AURA fields */
+ res = octeontx_fpapf_aura_attach(gpool);
+ if (res < 0) {
+ errno = res;
+ goto error_pool_destroy;
+ }
+
+ gaura = FPA_AURA_IDX(gpool);
+
+ /* Release lock */
+ rte_spinlock_unlock(&fpadev.lock);
+
+ /* populate AURA registers */
+ fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT(gaura)));
+ fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
+ fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
+
+ octeontx_fpapf_start_count(gpool);
+
+ return gpool_handle;
+
+error_pool_destroy:
+ octeontx_fpavf_free(gpool);
+ octeontx_fpapf_pool_destroy(gpool);
+error_gpool_free:
+ octeontx_gpool_free(gpool);
+error_unlock:
+ rte_spinlock_unlock(&fpadev.lock);
+error_end:
+ return (uintptr_t)NULL;
+}
+
+/*
+ * Destroy a buffer pool.
+ */
+int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
+{
+ void **node, **curr, *head = NULL;
+ uint64_t sz;
+ uint64_t cnt, avail;
+ uint8_t gpool;
+ uint16_t gaura;
+ uintptr_t pool_bar;
+ int ret;
+
+ RTE_SET_USED(node_id);
+
+ /* Wait for all outstanding writes to be committed */
+ rte_smp_wmb();
+
+ if (unlikely(!octeontx_fpa_handle_valid(handle)))
+ return -EINVAL;
+
+ /* get the pool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+ /* get the aura */
+ gaura = octeontx_fpa_bufpool_gaura(handle);
+
+ /* Get pool bar address from handle */
+ pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+ /* Check for no outstanding buffers */
+ cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT(gaura)));
+ if (cnt) {
+ fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
+ return -EBUSY;
+ }
+
+ rte_spinlock_lock(&fpadev.lock);
+
+ avail = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_AVAILABLE(gpool)));
+
+ /* Prepare to empty the entire POOL */
+ fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
+ fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
+
+ /* Empty the pool */
+ /* Invalidate the POOL */
+ octeontx_gpool_free(gpool);
+
+ /* Process all buffers in the pool */
+ while (avail--) {
+
+ /* Yank a buffer from the pool */
+ node = (void *)(uintptr_t)
+ fpavf_read64((void *)
+ (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura)));
+
+ if (node == NULL) {
+ fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
+ gaura, avail);
+ break;
+ }
+
+ /* Imsert it into an ordered linked list */
+ for (curr = &head; curr[0] != NULL; curr = curr[0]) {
+ if ((uintptr_t)node <= (uintptr_t)curr[0])
+ break;
+ }
+ node[0] = curr[0];
+ curr[0] = node;
+ }
+
+ /* Verify the linked list to be a perfect series */
+ sz = octeontx_fpa_bufpool_block_size(handle) << 7;
+ for (curr = head; curr != NULL && curr[0] != NULL;
+ curr = curr[0]) {
+ if (curr == curr[0] ||
+ ((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) {
+ fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
+ gpool, curr, curr[0]);
+ }
+ }
+
+ /* Disable pool operation */
+ fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_START_ADDR(gpool)));
+ fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_END_ADDR(gpool)));
+
+ (void)octeontx_fpapf_pool_destroy(gpool);
+
+ /* Deactivate the AURA */
+ fpavf_write64(0, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
+ fpavf_write64(0, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
+
+ ret = octeontx_fpapf_aura_detach(gpool);
+ if (ret) {
+ fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
+ gpool, ret);
+ }
+
+ /* Free VF */
+ (void)octeontx_fpavf_free(gpool);
+
+ rte_spinlock_unlock(&fpadev.lock);
+ return 0;
+}
+
+static void
+octeontx_fpavf_setup(void)
+{
+ uint8_t i;
+ static bool init_once;
+
+ if (!init_once) {
+ rte_spinlock_init(&fpadev.lock);
+ fpadev.total_gpool_cnt = 0;
+
+ for (i = 0; i < FPA_VF_MAX; i++) {
+
+ fpadev.pool[i].domain_id = ~0;
+ fpadev.pool[i].stack_ln_ptr = 0;
+ fpadev.pool[i].sz128 = 0;
+ fpadev.pool[i].bar0 = NULL;
+ fpadev.pool[i].pool_stack_base = NULL;
+ fpadev.pool[i].is_inuse = false;
+ }
+ init_once = 1;
+ }
+}
+
+static int
+octeontx_fpavf_identify(void *bar0)
+{
+ uint64_t val;
+ uint16_t domain_id;
+ uint16_t vf_id;
+ uint64_t stack_ln_ptr;
+
+ val = fpavf_read64((void *)((uintptr_t)bar0 +
+ FPA_VF_VHAURA_CNT_THRESHOLD(0)));
+
+ domain_id = (val >> 8) & 0xffff;
+ vf_id = (val >> 24) & 0xffff;
+
+ stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
+ FPA_VF_VHPOOL_THRESHOLD(0)));
+ if (vf_id >= FPA_VF_MAX) {
+ fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
+ return -1;
+ }
+
+ if (fpadev.pool[vf_id].is_inuse) {
+ fpavf_log_err("vf_id %d is_inuse\n", vf_id);
+ return -1;
+ }
+
+ fpadev.pool[vf_id].domain_id = domain_id;
+ fpadev.pool[vf_id].vf_id = vf_id;
+ fpadev.pool[vf_id].bar0 = bar0;
+ fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr;
+
+ /* SUCCESS */
+ return vf_id;
+}
+
+/* FPAVF pcie device aka mempool probe */
+static int
+fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint8_t *idreg;
+ int res;
+ struct fpavf_res *fpa = NULL;
+
+ RTE_SET_USED(pci_drv);
+ RTE_SET_USED(fpa);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr);
+ return -ENODEV;
+ }
+ idreg = pci_dev->mem_resource[0].addr;
+
+ octeontx_fpavf_setup();
+
+ res = octeontx_fpavf_identify(idreg);
+ if (res < 0)
+ return -1;
+
+ fpa = &fpadev.pool[res];
+ fpadev.total_gpool_cnt++;
+ rte_wmb();
+
+ fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x",
+ fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id,
+ fpa->vf_id, (unsigned int)fpa->stack_ln_ptr);
+
+ return 0;
+}
+
+static const struct rte_pci_id pci_fpavf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_FPA_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_fpavf = {
+ .id_table = pci_fpavf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = fpavf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf);
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.h b/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.h
new file mode 100644
index 00000000..b00be137
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_FPAVF_H__
+#define __OCTEONTX_FPAVF_H__
+
+#include <rte_io.h>
+#include "octeontx_pool_logs.h"
+
+/* fpa pool Vendor ID and Device ID */
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_FPA_VF 0xA053
+
+#define FPA_VF_MAX 32
+#define FPA_GPOOL_MASK (FPA_VF_MAX-1)
+#define FPA_GAURA_SHIFT 4
+
+/* FPA VF register offsets */
+#define FPA_VF_INT(x) (0x200ULL | ((x) << 22))
+#define FPA_VF_INT_W1S(x) (0x210ULL | ((x) << 22))
+#define FPA_VF_INT_ENA_W1S(x) (0x220ULL | ((x) << 22))
+#define FPA_VF_INT_ENA_W1C(x) (0x230ULL | ((x) << 22))
+
+#define FPA_VF_VHPOOL_AVAILABLE(vhpool) (0x04150 | ((vhpool)&0x0))
+#define FPA_VF_VHPOOL_THRESHOLD(vhpool) (0x04160 | ((vhpool)&0x0))
+#define FPA_VF_VHPOOL_START_ADDR(vhpool) (0x04200 | ((vhpool)&0x0))
+#define FPA_VF_VHPOOL_END_ADDR(vhpool) (0x04210 | ((vhpool)&0x0))
+
+#define FPA_VF_VHAURA_CNT(vaura) (0x20120 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_CNT_ADD(vaura) (0x20128 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_CNT_LIMIT(vaura) (0x20130 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_CNT_THRESHOLD(vaura) (0x20140 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_OP_ALLOC(vaura) (0x30000 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_OP_FREE(vaura) (0x38000 | ((vaura)&0xf)<<18)
+
+#define FPA_VF_FREE_ADDRS_S(x, y, z) \
+ ((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14))
+
+#define FPA_AURA_IDX(gpool) (gpool << FPA_GAURA_SHIFT)
+/* FPA VF register offsets from VF_BAR4, size 2 MByte */
+#define FPA_VF_MSIX_VEC_ADDR 0x00000
+#define FPA_VF_MSIX_VEC_CTL 0x00008
+#define FPA_VF_MSIX_PBA 0xF0000
+
+#define FPA_VF0_APERTURE_SHIFT 22
+#define FPA_AURA_SET_SIZE 16
+
+#define FPA_MAX_OBJ_SIZE (128 * 1024)
+#define OCTEONTX_FPAVF_BUF_OFFSET 128
+
+/*
+ * In Cavium OcteonTX SoC, all accesses to the device registers are
+ * implicitly strongly ordered. So, the relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define fpavf_read64 rte_read64_relaxed
+#define fpavf_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define fpavf_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define fpavf_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1]]" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define fpavf_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64(addr); \
+ val1 = rte_read64(((uint8_t *)addr) + 8); \
+} while (0)
+
+#define fpavf_store_pair(val0, val1, addr) \
+do { \
+ rte_write64(val0, addr); \
+ rte_write64(val1, (((uint8_t *)addr) + 8)); \
+} while (0)
+#endif
+
+uintptr_t
+octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
+ unsigned int buf_offset, int node);
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
+ void *memva, uint16_t gpool);
+int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
+int
+octeontx_fpa_bufpool_block_size(uintptr_t handle);
+int
+octeontx_fpa_bufpool_free_count(uintptr_t handle);
+
+static __rte_always_inline uint8_t
+octeontx_fpa_bufpool_gpool(uintptr_t handle)
+{
+ return (uint8_t)handle & FPA_GPOOL_MASK;
+}
+
+static __rte_always_inline uint16_t
+octeontx_fpa_bufpool_gaura(uintptr_t handle)
+{
+ return octeontx_fpa_bufpool_gpool(handle) << FPA_GAURA_SHIFT;
+}
+
+#endif /* __OCTEONTX_FPAVF_H__ */
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_pool_logs.h b/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_pool_logs.h
new file mode 100644
index 00000000..7b4e1b38
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_pool_logs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_POOL_LOGS_H__
+#define __OCTEONTX_POOL_LOGS_H__
+
+#include <rte_debug.h>
+
+#define FPAVF_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf,\
+ "%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
+
+#define fpavf_log_info(fmt, ...) FPAVF_LOG(INFO, fmt, ##__VA_ARGS__)
+#define fpavf_log_dbg(fmt, ...) FPAVF_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define fpavf_log_err(fmt, ...) FPAVF_LOG(ERR, fmt, ##__VA_ARGS__)
+#define fpavf_func_trace fpavf_log_dbg
+
+
+extern int octeontx_logtype_fpavf;
+
+#endif /* __OCTEONTX_POOL_LOGS_H__*/
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c b/src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c
new file mode 100644
index 00000000..ab94dfe9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+
+#include "octeontx_fpavf.h"
+
+static int
+octeontx_fpavf_alloc(struct rte_mempool *mp)
+{
+ uintptr_t pool;
+ uint32_t memseg_count = mp->size;
+ uint32_t object_size;
+ int rc = 0;
+
+ object_size = mp->elt_size + mp->header_size + mp->trailer_size;
+
+ pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
+ OCTEONTX_FPAVF_BUF_OFFSET,
+ mp->socket_id);
+ rc = octeontx_fpa_bufpool_block_size(pool);
+ if (rc < 0)
+ goto _end;
+
+ if ((uint32_t)rc != object_size)
+ fpavf_log_err("buffer size mismatch: %d instead of %u\n",
+ rc, object_size);
+
+ fpavf_log_info("Pool created %p with .. ", (void *)pool);
+ fpavf_log_info("obj_sz %d, cnt %d\n", object_size, memseg_count);
+
+ /* assign pool handle to mempool */
+ mp->pool_id = (uint64_t)pool;
+
+ return 0;
+
+_end:
+ return rc;
+}
+
+static void
+octeontx_fpavf_free(struct rte_mempool *mp)
+{
+ uintptr_t pool;
+ pool = (uintptr_t)mp->pool_id;
+
+ octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
+}
+
+static __rte_always_inline void *
+octeontx_fpa_bufpool_alloc(uintptr_t handle)
+{
+ return (void *)(uintptr_t)fpavf_read64((void *)(handle +
+ FPA_VF_VHAURA_OP_ALLOC(0)));
+}
+
+static __rte_always_inline void
+octeontx_fpa_bufpool_free(uintptr_t handle, void *buf)
+{
+ uint64_t free_addr = FPA_VF_FREE_ADDRS_S(FPA_VF_VHAURA_OP_FREE(0),
+ 0 /* DWB */, 1 /* FABS */);
+
+ fpavf_write64((uintptr_t)buf, (void *)(uintptr_t)(handle + free_addr));
+}
+
+static int
+octeontx_fpavf_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ uintptr_t pool;
+ unsigned int index;
+
+ pool = (uintptr_t)mp->pool_id;
+ /* Get pool bar address from handle */
+ pool &= ~(uint64_t)FPA_GPOOL_MASK;
+ for (index = 0; index < n; index++, obj_table++)
+ octeontx_fpa_bufpool_free(pool, *obj_table);
+
+ return 0;
+}
+
+static int
+octeontx_fpavf_dequeue(struct rte_mempool *mp, void **obj_table,
+ unsigned int n)
+{
+ unsigned int index;
+ uintptr_t pool;
+ void *obj;
+
+ pool = (uintptr_t)mp->pool_id;
+ /* Get pool bar address from handle */
+ pool &= ~(uint64_t)FPA_GPOOL_MASK;
+ for (index = 0; index < n; index++, obj_table++) {
+ obj = octeontx_fpa_bufpool_alloc(pool);
+ if (obj == NULL) {
+ /*
+ * Failed to allocate the requested number of objects
+ * from the pool. Current pool implementation requires
+ * completing the entire request or returning error
+ * otherwise.
+ * Free already allocated buffers to the pool.
+ */
+ for (; index > 0; index--) {
+ obj_table--;
+ octeontx_fpa_bufpool_free(pool, *obj_table);
+ }
+ return -ENOMEM;
+ }
+ *obj_table = obj;
+ }
+
+ return 0;
+}
+
+static unsigned int
+octeontx_fpavf_get_count(const struct rte_mempool *mp)
+{
+ uintptr_t pool;
+
+ pool = (uintptr_t)mp->pool_id;
+
+ return octeontx_fpa_bufpool_free_count(pool);
+}
+
+static ssize_t
+octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
+{
+ ssize_t mem_size;
+
+ /*
+ * Simply need space for one more object to be able to
+ * fulfil alignment requirements.
+ */
+ mem_size = rte_mempool_op_calc_mem_size_default(mp, obj_num + 1,
+ pg_shift,
+ min_chunk_size, align);
+ if (mem_size >= 0) {
+ /*
+ * Memory area which contains objects must be physically
+ * contiguous.
+ */
+ *min_chunk_size = mem_size;
+ }
+
+ return mem_size;
+}
+
+static int
+octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ size_t total_elt_sz;
+ size_t off;
+ uint8_t gpool;
+ uintptr_t pool_bar;
+ int ret;
+
+ if (iova == RTE_BAD_IOVA)
+ return -EINVAL;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ /* align object start address to a multiple of total_elt_sz */
+ off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);
+
+ if (len < off)
+ return -EINVAL;
+
+ vaddr = (char *)vaddr + off;
+ iova += off;
+ len -= off;
+
+ gpool = octeontx_fpa_bufpool_gpool(mp->pool_id);
+ pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK;
+
+ ret = octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
+ if (ret < 0)
+ return ret;
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, iova, len,
+ obj_cb, obj_cb_arg);
+}
+
+static struct rte_mempool_ops octeontx_fpavf_ops = {
+ .name = "octeontx_fpavf",
+ .alloc = octeontx_fpavf_alloc,
+ .free = octeontx_fpavf_free,
+ .enqueue = octeontx_fpavf_enqueue,
+ .dequeue = octeontx_fpavf_dequeue,
+ .get_count = octeontx_fpavf_get_count,
+ .calc_mem_size = octeontx_fpavf_calc_mem_size,
+ .populate = octeontx_fpavf_populate,
+};
+
+MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx_version.map b/src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
new file mode 100644
index 00000000..a7530317
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
@@ -0,0 +1,3 @@
+DPDK_17.11 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/mempool/ring/Makefile b/src/spdk/dpdk/drivers/mempool/ring/Makefile
new file mode 100644
index 00000000..ddab522f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/ring/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_ring.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+
+EXPORT_MAP := rte_mempool_ring_version.map
+
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += rte_mempool_ring.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/ring/meson.build b/src/spdk/dpdk/drivers/mempool/ring/meson.build
new file mode 100644
index 00000000..a021e908
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/ring/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('rte_mempool_ring.c')
diff --git a/src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring.c b/src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring.c
new file mode 100644
index 00000000..bc123fc5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring.c
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_errno.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+
+static int
+common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ return rte_ring_mp_enqueue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ return rte_ring_sp_enqueue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ return rte_ring_mc_dequeue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ return rte_ring_sc_dequeue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static unsigned
+common_ring_get_count(const struct rte_mempool *mp)
+{
+ return rte_ring_count(mp->pool_data);
+}
+
+
+static int
+common_ring_alloc(struct rte_mempool *mp)
+{
+ int rg_flags = 0, ret;
+ char rg_name[RTE_RING_NAMESIZE];
+ struct rte_ring *r;
+
+ ret = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT, mp->name);
+ if (ret < 0 || ret >= (int)sizeof(rg_name)) {
+ rte_errno = ENAMETOOLONG;
+ return -rte_errno;
+ }
+
+ /* ring flags */
+ if (mp->flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (mp->flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ /*
+ * Allocate the ring that will be used to store objects.
+ * Ring functions will return appropriate errors if we are
+ * running as a secondary process etc., so no checks made
+ * in this function for that condition.
+ */
+ r = rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id, rg_flags);
+ if (r == NULL)
+ return -rte_errno;
+
+ mp->pool_data = r;
+
+ return 0;
+}
+
+static void
+common_ring_free(struct rte_mempool *mp)
+{
+ rte_ring_free(mp->pool_data);
+}
+
+/*
+ * The following 4 declarations of mempool ops structs address
+ * the need for the backward compatible mempool handlers for
+ * single/multi producers and single/multi consumers as dictated by the
+ * flags provided to the rte_mempool_create function
+ */
+static const struct rte_mempool_ops ops_mp_mc = {
+ .name = "ring_mp_mc",
+ .alloc = common_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = common_ring_mp_enqueue,
+ .dequeue = common_ring_mc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+static const struct rte_mempool_ops ops_sp_sc = {
+ .name = "ring_sp_sc",
+ .alloc = common_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = common_ring_sp_enqueue,
+ .dequeue = common_ring_sc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+static const struct rte_mempool_ops ops_mp_sc = {
+ .name = "ring_mp_sc",
+ .alloc = common_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = common_ring_mp_enqueue,
+ .dequeue = common_ring_sc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+static const struct rte_mempool_ops ops_sp_mc = {
+ .name = "ring_sp_mc",
+ .alloc = common_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = common_ring_sp_enqueue,
+ .dequeue = common_ring_mc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+MEMPOOL_REGISTER_OPS(ops_mp_mc);
+MEMPOOL_REGISTER_OPS(ops_sp_sc);
+MEMPOOL_REGISTER_OPS(ops_mp_sc);
+MEMPOOL_REGISTER_OPS(ops_sp_mc);
diff --git a/src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring_version.map b/src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/mempool/stack/Makefile b/src/spdk/dpdk/drivers/mempool/stack/Makefile
new file mode 100644
index 00000000..0444aeda
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/stack/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_stack.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# Headers
+CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+
+EXPORT_MAP := rte_mempool_stack_version.map
+
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += rte_mempool_stack.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/stack/meson.build b/src/spdk/dpdk/drivers/mempool/stack/meson.build
new file mode 100644
index 00000000..b75a3bb5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/stack/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('rte_mempool_stack.c')
diff --git a/src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack.c b/src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack.c
new file mode 100644
index 00000000..e6d504af
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack.c
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+struct rte_mempool_stack {
+ rte_spinlock_t sl;
+
+ uint32_t size;
+ uint32_t len;
+ void *objs[];
+};
+
+static int
+stack_alloc(struct rte_mempool *mp)
+{
+ struct rte_mempool_stack *s;
+ unsigned n = mp->size;
+ int size = sizeof(*s) + (n+16)*sizeof(void *);
+
+ /* Allocate our local memory structure */
+ s = rte_zmalloc_socket("mempool-stack",
+ size,
+ RTE_CACHE_LINE_SIZE,
+ mp->socket_id);
+ if (s == NULL) {
+ RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
+ return -ENOMEM;
+ }
+
+ rte_spinlock_init(&s->sl);
+
+ s->size = n;
+ mp->pool_data = s;
+
+ return 0;
+}
+
+static int
+stack_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ struct rte_mempool_stack *s = mp->pool_data;
+ void **cache_objs;
+ unsigned index;
+
+ rte_spinlock_lock(&s->sl);
+ cache_objs = &s->objs[s->len];
+
+ /* Is there sufficient space in the stack ? */
+ if ((s->len + n) > s->size) {
+ rte_spinlock_unlock(&s->sl);
+ return -ENOBUFS;
+ }
+
+ /* Add elements back into the cache */
+ for (index = 0; index < n; ++index, obj_table++)
+ cache_objs[index] = *obj_table;
+
+ s->len += n;
+
+ rte_spinlock_unlock(&s->sl);
+ return 0;
+}
+
+static int
+stack_dequeue(struct rte_mempool *mp, void **obj_table,
+ unsigned n)
+{
+ struct rte_mempool_stack *s = mp->pool_data;
+ void **cache_objs;
+ unsigned index, len;
+
+ rte_spinlock_lock(&s->sl);
+
+ if (unlikely(n > s->len)) {
+ rte_spinlock_unlock(&s->sl);
+ return -ENOENT;
+ }
+
+ cache_objs = s->objs;
+
+ for (index = 0, len = s->len - 1; index < n;
+ ++index, len--, obj_table++)
+ *obj_table = cache_objs[len];
+
+ s->len -= n;
+ rte_spinlock_unlock(&s->sl);
+ return 0;
+}
+
+static unsigned
+stack_get_count(const struct rte_mempool *mp)
+{
+ struct rte_mempool_stack *s = mp->pool_data;
+
+ return s->len;
+}
+
+static void
+stack_free(struct rte_mempool *mp)
+{
+ rte_free((void *)(mp->pool_data));
+}
+
+static struct rte_mempool_ops ops_stack = {
+ .name = "stack",
+ .alloc = stack_alloc,
+ .free = stack_free,
+ .enqueue = stack_enqueue,
+ .dequeue = stack_dequeue,
+ .get_count = stack_get_count
+};
+
+MEMPOOL_REGISTER_OPS(ops_stack);
diff --git a/src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack_version.map b/src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/meson.build b/src/spdk/dpdk/drivers/meson.build
new file mode 100644
index 00000000..f94e2fe6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/meson.build
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+# Defines the order in which the drivers are buit.
+driver_classes = ['common',
+ 'bus',
+ 'mempool', # depends on common and bus.
+ 'net', # depends on common, bus and mempool.
+ 'crypto', # depends on common, bus and mempool (net in future).
+ 'compress', # depends on common, bus, mempool.
+ 'event', # depends on common, bus, mempool and net.
+ 'raw'] # depends on common, bus, mempool, net and event.
+
+default_cflags = machine_args
+if cc.has_argument('-Wno-format-truncation')
+ default_cflags += '-Wno-format-truncation'
+endif
+foreach class:driver_classes
+ drivers = []
+ std_deps = []
+ config_flag_fmt = '' # format string used to set the value in dpdk_conf
+ driver_name_fmt = '' # format string for driver name, used to name
+ # the library, the dependency and to find the
+ # version file for linking
+
+ subdir(class)
+
+ foreach drv:drivers
+ drv_path = join_paths(class, drv)
+
+ # set up empty variables used for build
+ build = true # set to false to disable, e.g. missing deps
+ name = drv
+ version = 1
+ allow_experimental_apis = false
+ sources = []
+ objs = []
+ cflags = default_cflags
+ includes = [include_directories(drv_path)]
+ # set up internal deps. Drivers can append/override as necessary
+ deps = std_deps
+ # ext_deps: Stores external library dependency got
+ # using dependency() or cc.find_library(). For most cases, we
+ # probably also need to specify the "-l" flags in
+ # pkgconfig_extra_libs variable too, so that it can be reflected
+ # in the pkgconfig output for static builds
+ ext_deps = []
+ pkgconfig_extra_libs = []
+
+ # pull in driver directory which should assign to each of the above
+ subdir(drv_path)
+
+ if build
+ dpdk_conf.set(config_flag_fmt.format(name.to_upper()),1)
+ lib_name = driver_name_fmt.format(name)
+
+ if allow_experimental_apis
+ cflags += '-DALLOW_EXPERIMENTAL_API'
+ endif
+
+ # get dependency objs from strings
+ shared_objs = []
+ static_objs = []
+ foreach d:deps
+ if not is_variable('shared_rte_' + d)
+ error('Missing dependency ' + d +
+ ' for driver ' + lib_name)
+ endif
+ shared_objs += [get_variable('shared_rte_' + d)]
+ static_objs += [get_variable('static_rte_' + d)]
+ endforeach
+ shared_objs += ext_deps
+ static_objs += ext_deps
+ dpdk_extra_ldflags += pkgconfig_extra_libs
+
+ # generate pmdinfo sources by building a temporary
+ # lib and then running pmdinfogen on the contents of
+ # that lib. The final lib reuses the object files and
+ # adds in the new source file.
+ out_filename = lib_name + '.pmd.c'
+ tmp_lib = static_library('tmp_' + lib_name,
+ sources,
+ include_directories: includes,
+ dependencies: static_objs,
+ c_args: cflags)
+ objs += tmp_lib.extract_all_objects()
+ sources = custom_target(out_filename,
+ command: [pmdinfo, tmp_lib.full_path(),
+ '@OUTPUT@', pmdinfogen],
+ output: out_filename,
+ depends: [pmdinfogen, tmp_lib])
+
+ if get_option('per_library_versions')
+ lib_version = '@0@.1'.format(version)
+ so_version = '@0@'.format(version)
+ else
+ pver = meson.project_version().split('.')
+ lib_version = '@0@.@1@'.format(pver.get(0),
+ pver.get(1))
+ so_version = lib_version
+ endif
+
+ # now build the static driver
+ static_lib = static_library(lib_name,
+ sources,
+ objects: objs,
+ include_directories: includes,
+ dependencies: static_objs,
+ c_args: cflags,
+ install: true)
+
+ # now build the shared driver
+ version_map = '@0@/@1@/@2@_version.map'.format(
+ meson.current_source_dir(),
+ drv_path, lib_name)
+ shared_lib = shared_library(lib_name,
+ sources,
+ objects: objs,
+ include_directories: includes,
+ dependencies: shared_objs,
+ c_args: cflags,
+ link_args: '-Wl,--version-script=' + version_map,
+ link_depends: version_map,
+ version: lib_version,
+ soversion: so_version,
+ install: true,
+ install_dir: driver_install_path)
+
+ # create a dependency object and add it to the global dictionary so
+ # testpmd or other built-in apps can find it if necessary
+ shared_dep = declare_dependency(link_with: shared_lib,
+ include_directories: includes,
+ dependencies: shared_objs)
+ static_dep = declare_dependency(link_with: static_lib,
+ include_directories: includes,
+ dependencies: static_objs)
+
+ dpdk_drivers += static_lib
+
+ set_variable('shared_@0@'.format(lib_name), shared_dep)
+ set_variable('static_@0@'.format(lib_name), static_dep)
+ endif # build
+ endforeach
+endforeach
diff --git a/src/spdk/dpdk/drivers/net/Makefile b/src/spdk/dpdk/drivers/net/Makefile
new file mode 100644
index 00000000..664398de
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/Makefile
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2015 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# set in mk/toolchain/xxx/rte.toolchain-compat.mk
+ifeq ($(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD),d)
+ $(warning thunderx pmd is not supported by old compilers)
+endif
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet
+DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark
+DIRS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf
+DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp
+DIRS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe
+DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding
+DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000
+DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena
+DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe
+DIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k
+DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e
+DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe
+DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio
+DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
+DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5
+DIRS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mvpp2
+DIRS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += netvsc
+DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp
+DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null
+DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += pcap
+DIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += ring
+DIRS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap
+DIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += thunderx
+DIRS-$(CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD) += vdev_netvsc
+DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio
+DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3
+
+ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_KNI) += kni
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_SCHED),y)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += softnic
+endif # $(CONFIG_RTE_LIBRTE_SCHED)
+
+ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost
+ifeq ($(CONFIG_RTE_EAL_VFIO),y)
+DIRS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += ifc
+endif
+endif # $(CONFIG_RTE_LIBRTE_VHOST)
+
+ifeq ($(CONFIG_RTE_LIBRTE_MVPP2_PMD),y)
+ifeq ($(CONFIG_RTE_LIBRTE_CFGFILE),n)
+$(error "RTE_LIBRTE_CFGFILE must be enabled in configuration!")
+endif
+endif
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/net/af_packet/Makefile b/src/spdk/dpdk/drivers/net/af_packet/Makefile
new file mode 100644
index 00000000..39a1e0d2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/af_packet/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2014 John W. Linville <linville@redhat.com>
+# Copyright(c) 2010-2014 Intel Corporation.
+# Copyright(c) 2014 6WIND S.A.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_af_packet.a
+
+EXPORT_MAP := rte_pmd_af_packet_version.map
+
+LIBABIVER := 1
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vdev
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += rte_eth_af_packet.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/af_packet/meson.build b/src/spdk/dpdk/drivers/net/af_packet/meson.build
new file mode 100644
index 00000000..92f6a971
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/af_packet/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+sources = files('rte_eth_af_packet.c')
diff --git a/src/spdk/dpdk/drivers/net/af_packet/rte_eth_af_packet.c b/src/spdk/dpdk/drivers/net/af_packet/rte_eth_af_packet.c
new file mode 100644
index 00000000..eb3cce3a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/af_packet/rte_eth_af_packet.c
@@ -0,0 +1,1025 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014 John W. Linville <linville@tuxdriver.com>
+ * Originally based upon librte_pmd_pcap code:
+ * Copyright(c) 2010-2015 Intel Corporation.
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
+ */
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_malloc.h>
+#include <rte_kvargs.h>
+#include <rte_bus_vdev.h>
+
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <arpa/inet.h>
+#include <net/if.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <poll.h>
+
+#define ETH_AF_PACKET_IFACE_ARG "iface"
+#define ETH_AF_PACKET_NUM_Q_ARG "qpairs"
+#define ETH_AF_PACKET_BLOCKSIZE_ARG "blocksz"
+#define ETH_AF_PACKET_FRAMESIZE_ARG "framesz"
+#define ETH_AF_PACKET_FRAMECOUNT_ARG "framecnt"
+#define ETH_AF_PACKET_QDISC_BYPASS_ARG "qdisc_bypass"
+
+#define DFLT_BLOCK_SIZE (1 << 12)
+#define DFLT_FRAME_SIZE (1 << 11)
+#define DFLT_FRAME_COUNT (1 << 9)
+
+#define RTE_PMD_AF_PACKET_MAX_RINGS 16
+
+struct pkt_rx_queue {
+ int sockfd;
+
+ struct iovec *rd;
+ uint8_t *map;
+ unsigned int framecount;
+ unsigned int framenum;
+
+ struct rte_mempool *mb_pool;
+ uint16_t in_port;
+
+ volatile unsigned long rx_pkts;
+ volatile unsigned long err_pkts;
+ volatile unsigned long rx_bytes;
+};
+
+struct pkt_tx_queue {
+ int sockfd;
+ unsigned int frame_data_size;
+
+ struct iovec *rd;
+ uint8_t *map;
+ unsigned int framecount;
+ unsigned int framenum;
+
+ volatile unsigned long tx_pkts;
+ volatile unsigned long err_pkts;
+ volatile unsigned long tx_bytes;
+};
+
+struct pmd_internals {
+ unsigned nb_queues;
+
+ int if_index;
+ char *if_name;
+ struct ether_addr eth_addr;
+
+ struct tpacket_req req;
+
+ struct pkt_rx_queue rx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
+ struct pkt_tx_queue tx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
+};
+
+static const char *valid_arguments[] = {
+ ETH_AF_PACKET_IFACE_ARG,
+ ETH_AF_PACKET_NUM_Q_ARG,
+ ETH_AF_PACKET_BLOCKSIZE_ARG,
+ ETH_AF_PACKET_FRAMESIZE_ARG,
+ ETH_AF_PACKET_FRAMECOUNT_ARG,
+ ETH_AF_PACKET_QDISC_BYPASS_ARG,
+ NULL
+};
+
+static struct rte_eth_link pmd_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_FIXED,
+};
+
+static int af_packet_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, af_packet_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+static uint16_t
+eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ unsigned i;
+ struct tpacket2_hdr *ppd;
+ struct rte_mbuf *mbuf;
+ uint8_t *pbuf;
+ struct pkt_rx_queue *pkt_q = queue;
+ uint16_t num_rx = 0;
+ unsigned long num_rx_bytes = 0;
+ unsigned int framecount, framenum;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ /*
+ * Reads the given number of packets from the AF_PACKET socket one by
+ * one and copies the packet data into a newly allocated mbuf.
+ */
+ framecount = pkt_q->framecount;
+ framenum = pkt_q->framenum;
+ for (i = 0; i < nb_pkts; i++) {
+ /* point at the next incoming frame */
+ ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
+ if ((ppd->tp_status & TP_STATUS_USER) == 0)
+ break;
+
+ /* allocate the next mbuf */
+ mbuf = rte_pktmbuf_alloc(pkt_q->mb_pool);
+ if (unlikely(mbuf == NULL))
+ break;
+
+ /* packet will fit in the mbuf, go ahead and receive it */
+ rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) = ppd->tp_snaplen;
+ pbuf = (uint8_t *) ppd + ppd->tp_mac;
+ memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf));
+
+ /* check for vlan info */
+ if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
+ mbuf->vlan_tci = ppd->tp_vlan_tci;
+ mbuf->ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ }
+
+ /* release incoming frame and advance ring buffer */
+ ppd->tp_status = TP_STATUS_KERNEL;
+ if (++framenum >= framecount)
+ framenum = 0;
+ mbuf->port = pkt_q->in_port;
+
+ /* account for the receive frame */
+ bufs[i] = mbuf;
+ num_rx++;
+ num_rx_bytes += mbuf->pkt_len;
+ }
+ pkt_q->framenum = framenum;
+ pkt_q->rx_pkts += num_rx;
+ pkt_q->rx_bytes += num_rx_bytes;
+ return num_rx;
+}
+
+/*
+ * Callback to handle sending packets through a real NIC.
+ */
+static uint16_t
+eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct tpacket2_hdr *ppd;
+ struct rte_mbuf *mbuf;
+ uint8_t *pbuf;
+ unsigned int framecount, framenum;
+ struct pollfd pfd;
+ struct pkt_tx_queue *pkt_q = queue;
+ uint16_t num_tx = 0;
+ unsigned long num_tx_bytes = 0;
+ int i;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ memset(&pfd, 0, sizeof(pfd));
+ pfd.fd = pkt_q->sockfd;
+ pfd.events = POLLOUT;
+ pfd.revents = 0;
+
+ framecount = pkt_q->framecount;
+ framenum = pkt_q->framenum;
+ ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
+ for (i = 0; i < nb_pkts; i++) {
+ mbuf = *bufs++;
+
+ /* drop oversized packets */
+ if (mbuf->pkt_len > pkt_q->frame_data_size) {
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+
+ /* insert vlan info if necessary */
+ if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (rte_vlan_insert(&mbuf)) {
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+ }
+
+ /* point at the next incoming frame */
+ if ((ppd->tp_status != TP_STATUS_AVAILABLE) &&
+ (poll(&pfd, 1, -1) < 0))
+ break;
+
+ /* copy the tx frame data */
+ pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN -
+ sizeof(struct sockaddr_ll);
+
+ struct rte_mbuf *tmp_mbuf = mbuf;
+ while (tmp_mbuf) {
+ uint16_t data_len = rte_pktmbuf_data_len(tmp_mbuf);
+ memcpy(pbuf, rte_pktmbuf_mtod(tmp_mbuf, void*), data_len);
+ pbuf += data_len;
+ tmp_mbuf = tmp_mbuf->next;
+ }
+
+ ppd->tp_len = mbuf->pkt_len;
+ ppd->tp_snaplen = mbuf->pkt_len;
+
+ /* release incoming frame and advance ring buffer */
+ ppd->tp_status = TP_STATUS_SEND_REQUEST;
+ if (++framenum >= framecount)
+ framenum = 0;
+ ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
+
+ num_tx++;
+ num_tx_bytes += mbuf->pkt_len;
+ rte_pktmbuf_free(mbuf);
+ }
+
+ /* kick-off transmits */
+ if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1) {
+ /* error sending -- no packets transmitted */
+ num_tx = 0;
+ num_tx_bytes = 0;
+ }
+
+ pkt_q->framenum = framenum;
+ pkt_q->tx_pkts += num_tx;
+ pkt_q->err_pkts += i - num_tx;
+ pkt_q->tx_bytes += num_tx_bytes;
+ return i;
+}
+
+static int
+eth_dev_start(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+ return 0;
+}
+
+/*
+ * This function gets called when the current port gets stopped.
+ */
+static void
+eth_dev_stop(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ int sockfd;
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ for (i = 0; i < internals->nb_queues; i++) {
+ sockfd = internals->rx_queue[i].sockfd;
+ if (sockfd != -1)
+ close(sockfd);
+
+ /* Prevent use after free in case tx fd == rx fd */
+ if (sockfd != internals->tx_queue[i].sockfd) {
+ sockfd = internals->tx_queue[i].sockfd;
+ if (sockfd != -1)
+ close(sockfd);
+ }
+
+ internals->rx_queue[i].sockfd = -1;
+ internals->tx_queue[i].sockfd = -1;
+ }
+
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+}
+
+static int
+eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ dev_info->if_index = internals->if_index;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
+ dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
+ dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
+}
+
+static int
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
+{
+ unsigned i, imax;
+ unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
+ const struct pmd_internals *internal = dev->data->dev_private;
+
+ imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
+ internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (i = 0; i < imax; i++) {
+ igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
+ igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
+ rx_total += igb_stats->q_ipackets[i];
+ rx_bytes_total += igb_stats->q_ibytes[i];
+ }
+
+ imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
+ internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (i = 0; i < imax; i++) {
+ igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
+ igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
+ igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
+ tx_total += igb_stats->q_opackets[i];
+ tx_err_total += igb_stats->q_errors[i];
+ tx_bytes_total += igb_stats->q_obytes[i];
+ }
+
+ igb_stats->ipackets = rx_total;
+ igb_stats->ibytes = rx_bytes_total;
+ igb_stats->opackets = tx_total;
+ igb_stats->oerrors = tx_err_total;
+ igb_stats->obytes = tx_bytes_total;
+ return 0;
+}
+
+static void
+eth_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ struct pmd_internals *internal = dev->data->dev_private;
+
+ for (i = 0; i < internal->nb_queues; i++) {
+ internal->rx_queue[i].rx_pkts = 0;
+ internal->rx_queue[i].rx_bytes = 0;
+ }
+
+ for (i = 0; i < internal->nb_queues; i++) {
+ internal->tx_queue[i].tx_pkts = 0;
+ internal->tx_queue[i].err_pkts = 0;
+ internal->tx_queue[i].tx_bytes = 0;
+ }
+}
+
+static void
+eth_dev_close(struct rte_eth_dev *dev __rte_unused)
+{
+}
+
+static void
+eth_queue_release(void *q __rte_unused)
+{
+}
+
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
+{
+ return 0;
+}
+
+static int
+eth_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id];
+ unsigned int buf_size, data_size;
+
+ pkt_q->mb_pool = mb_pool;
+
+ /* Now get the space available for data in the mbuf */
+ buf_size = rte_pktmbuf_data_room_size(pkt_q->mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ data_size = internals->req.tp_frame_size;
+ data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll);
+
+ if (data_size > buf_size) {
+ PMD_LOG(ERR,
+ "%s: %d bytes will not fit in mbuf (%d bytes)",
+ dev->device->name, data_size, buf_size);
+ return -ENOMEM;
+ }
+
+ dev->data->rx_queues[rx_queue_id] = pkt_q;
+ pkt_q->in_port = dev->data->port_id;
+
+ return 0;
+}
+
+static int
+eth_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
+ return 0;
+}
+
+static int
+eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_mtu = mtu };
+ int ret;
+ int s;
+ unsigned int data_size = internals->req.tp_frame_size -
+ TPACKET2_HDRLEN -
+ sizeof(struct sockaddr_ll);
+
+ if (mtu > data_size)
+ return -EINVAL;
+
+ s = socket(PF_INET, SOCK_DGRAM, 0);
+ if (s < 0)
+ return -EINVAL;
+
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", internals->if_name);
+ ret = ioctl(s, SIOCSIFMTU, &ifr);
+ close(s);
+
+ if (ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
+{
+ struct ifreq ifr;
+ int s;
+
+ s = socket(PF_INET, SOCK_DGRAM, 0);
+ if (s < 0)
+ return;
+
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", if_name);
+ if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0)
+ goto out;
+ ifr.ifr_flags &= mask;
+ ifr.ifr_flags |= flags;
+ if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0)
+ goto out;
+out:
+ close(s);
+}
+
+static void
+eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
+}
+
+static void
+eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
+}
+
+static const struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_close = eth_dev_close,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .mtu_set = eth_dev_mtu_set,
+ .promiscuous_enable = eth_dev_promiscuous_enable,
+ .promiscuous_disable = eth_dev_promiscuous_disable,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_release = eth_queue_release,
+ .tx_queue_release = eth_queue_release,
+ .link_update = eth_link_update,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
+};
+
+/*
+ * Opens an AF_PACKET socket
+ */
+static int
+open_packet_iface(const char *key __rte_unused,
+ const char *value __rte_unused,
+ void *extra_args)
+{
+ int *sockfd = extra_args;
+
+ /* Open an AF_PACKET socket... */
+ *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
+ if (*sockfd == -1) {
+ PMD_LOG(ERR, "Could not open AF_PACKET socket");
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_af_packet_drv;
+
+static int
+rte_pmd_init_internals(struct rte_vdev_device *dev,
+ const int sockfd,
+ const unsigned nb_queues,
+ unsigned int blocksize,
+ unsigned int blockcnt,
+ unsigned int framesize,
+ unsigned int framecnt,
+ unsigned int qdisc_bypass,
+ struct pmd_internals **internals,
+ struct rte_eth_dev **eth_dev,
+ struct rte_kvargs *kvlist)
+{
+ const char *name = rte_vdev_device_name(dev);
+ const unsigned int numa_node = dev->device.numa_node;
+ struct rte_eth_dev_data *data = NULL;
+ struct rte_kvargs_pair *pair = NULL;
+ struct ifreq ifr;
+ size_t ifnamelen;
+ unsigned k_idx;
+ struct sockaddr_ll sockaddr;
+ struct tpacket_req *req;
+ struct pkt_rx_queue *rx_queue;
+ struct pkt_tx_queue *tx_queue;
+ int rc, tpver, discard;
+ int qsockfd = -1;
+ unsigned int i, q, rdsize;
+#if defined(PACKET_FANOUT)
+ int fanout_arg;
+#endif
+
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ if (strstr(pair->key, ETH_AF_PACKET_IFACE_ARG) != NULL)
+ break;
+ }
+ if (pair == NULL) {
+ PMD_LOG(ERR,
+ "%s: no interface specified for AF_PACKET ethdev",
+ name);
+ return -1;
+ }
+
+ PMD_LOG(INFO,
+ "%s: creating AF_PACKET-backed ethdev on numa socket %u",
+ name, numa_node);
+
+ *internals = rte_zmalloc_socket(name, sizeof(**internals),
+ 0, numa_node);
+ if (*internals == NULL)
+ return -1;
+
+ for (q = 0; q < nb_queues; q++) {
+ (*internals)->rx_queue[q].map = MAP_FAILED;
+ (*internals)->tx_queue[q].map = MAP_FAILED;
+ }
+
+ req = &((*internals)->req);
+
+ req->tp_block_size = blocksize;
+ req->tp_block_nr = blockcnt;
+ req->tp_frame_size = framesize;
+ req->tp_frame_nr = framecnt;
+
+ ifnamelen = strlen(pair->value);
+ if (ifnamelen < sizeof(ifr.ifr_name)) {
+ memcpy(ifr.ifr_name, pair->value, ifnamelen);
+ ifr.ifr_name[ifnamelen] = '\0';
+ } else {
+ PMD_LOG(ERR,
+ "%s: I/F name too long (%s)",
+ name, pair->value);
+ return -1;
+ }
+ if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
+ PMD_LOG(ERR,
+ "%s: ioctl failed (SIOCGIFINDEX)",
+ name);
+ return -1;
+ }
+ (*internals)->if_name = strdup(pair->value);
+ if ((*internals)->if_name == NULL)
+ return -1;
+ (*internals)->if_index = ifr.ifr_ifindex;
+
+ if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
+ PMD_LOG(ERR,
+ "%s: ioctl failed (SIOCGIFHWADDR)",
+ name);
+ return -1;
+ }
+ memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
+
+ memset(&sockaddr, 0, sizeof(sockaddr));
+ sockaddr.sll_family = AF_PACKET;
+ sockaddr.sll_protocol = htons(ETH_P_ALL);
+ sockaddr.sll_ifindex = (*internals)->if_index;
+
+#if defined(PACKET_FANOUT)
+ fanout_arg = (getpid() ^ (*internals)->if_index) & 0xffff;
+ fanout_arg |= (PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG) << 16;
+#if defined(PACKET_FANOUT_FLAG_ROLLOVER)
+ fanout_arg |= PACKET_FANOUT_FLAG_ROLLOVER << 16;
+#endif
+#endif
+
+ for (q = 0; q < nb_queues; q++) {
+ /* Open an AF_PACKET socket for this queue... */
+ qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
+ if (qsockfd == -1) {
+ PMD_LOG(ERR,
+ "%s: could not open AF_PACKET socket",
+ name);
+ return -1;
+ }
+
+ tpver = TPACKET_V2;
+ rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION,
+ &tpver, sizeof(tpver));
+ if (rc == -1) {
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_VERSION on AF_PACKET socket for %s",
+ name, pair->value);
+ goto error;
+ }
+
+ discard = 1;
+ rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS,
+ &discard, sizeof(discard));
+ if (rc == -1) {
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_LOSS on AF_PACKET socket for %s",
+ name, pair->value);
+ goto error;
+ }
+
+#if defined(PACKET_QDISC_BYPASS)
+ rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
+ &qdisc_bypass, sizeof(qdisc_bypass));
+ if (rc == -1) {
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_QDISC_BYPASS on AF_PACKET socket for %s",
+ name, pair->value);
+ goto error;
+ }
+#else
+ RTE_SET_USED(qdisc_bypass);
+#endif
+
+ rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req));
+ if (rc == -1) {
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_RX_RING on AF_PACKET socket for %s",
+ name, pair->value);
+ goto error;
+ }
+
+ rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req));
+ if (rc == -1) {
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_TX_RING on AF_PACKET "
+ "socket for %s", name, pair->value);
+ goto error;
+ }
+
+ rx_queue = &((*internals)->rx_queue[q]);
+ rx_queue->framecount = req->tp_frame_nr;
+
+ rx_queue->map = mmap(NULL, 2 * req->tp_block_size * req->tp_block_nr,
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
+ qsockfd, 0);
+ if (rx_queue->map == MAP_FAILED) {
+ PMD_LOG(ERR,
+ "%s: call to mmap failed on AF_PACKET socket for %s",
+ name, pair->value);
+ goto error;
+ }
+
+ /* rdsize is same for both Tx and Rx */
+ rdsize = req->tp_frame_nr * sizeof(*(rx_queue->rd));
+
+ rx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
+ if (rx_queue->rd == NULL)
+ goto error;
+ for (i = 0; i < req->tp_frame_nr; ++i) {
+ rx_queue->rd[i].iov_base = rx_queue->map + (i * framesize);
+ rx_queue->rd[i].iov_len = req->tp_frame_size;
+ }
+ rx_queue->sockfd = qsockfd;
+
+ tx_queue = &((*internals)->tx_queue[q]);
+ tx_queue->framecount = req->tp_frame_nr;
+ tx_queue->frame_data_size = req->tp_frame_size;
+ tx_queue->frame_data_size -= TPACKET2_HDRLEN -
+ sizeof(struct sockaddr_ll);
+
+ tx_queue->map = rx_queue->map + req->tp_block_size * req->tp_block_nr;
+
+ tx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
+ if (tx_queue->rd == NULL)
+ goto error;
+ for (i = 0; i < req->tp_frame_nr; ++i) {
+ tx_queue->rd[i].iov_base = tx_queue->map + (i * framesize);
+ tx_queue->rd[i].iov_len = req->tp_frame_size;
+ }
+ tx_queue->sockfd = qsockfd;
+
+ rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr));
+ if (rc == -1) {
+ PMD_LOG(ERR,
+ "%s: could not bind AF_PACKET socket to %s",
+ name, pair->value);
+ goto error;
+ }
+
+#if defined(PACKET_FANOUT)
+ rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT,
+ &fanout_arg, sizeof(fanout_arg));
+ if (rc == -1) {
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_FANOUT on AF_PACKET socket "
+ "for %s", name, pair->value);
+ goto error;
+ }
+#endif
+ }
+
+ /* reserve an ethdev entry */
+ *eth_dev = rte_eth_vdev_allocate(dev, 0);
+ if (*eth_dev == NULL)
+ goto error;
+
+ /*
+ * now put it all together
+ * - store queue data in internals,
+ * - store numa_node in eth_dev
+ * - point eth_dev_data to internals
+ * - and point eth_dev structure to new eth_dev_data structure
+ */
+
+ (*internals)->nb_queues = nb_queues;
+
+ data = (*eth_dev)->data;
+ data->dev_private = *internals;
+ data->nb_rx_queues = (uint16_t)nb_queues;
+ data->nb_tx_queues = (uint16_t)nb_queues;
+ data->dev_link = pmd_link;
+ data->mac_addrs = &(*internals)->eth_addr;
+
+ (*eth_dev)->dev_ops = &ops;
+
+ return 0;
+
+error:
+ if (qsockfd != -1)
+ close(qsockfd);
+ for (q = 0; q < nb_queues; q++) {
+ munmap((*internals)->rx_queue[q].map,
+ 2 * req->tp_block_size * req->tp_block_nr);
+
+ rte_free((*internals)->rx_queue[q].rd);
+ rte_free((*internals)->tx_queue[q].rd);
+ if (((*internals)->rx_queue[q].sockfd != 0) &&
+ ((*internals)->rx_queue[q].sockfd != qsockfd))
+ close((*internals)->rx_queue[q].sockfd);
+ }
+ free((*internals)->if_name);
+ rte_free(*internals);
+ return -1;
+}
+
+static int
+rte_eth_from_packet(struct rte_vdev_device *dev,
+ int const *sockfd,
+ struct rte_kvargs *kvlist)
+{
+ const char *name = rte_vdev_device_name(dev);
+ struct pmd_internals *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct rte_kvargs_pair *pair = NULL;
+ unsigned k_idx;
+ unsigned int blockcount;
+ unsigned int blocksize = DFLT_BLOCK_SIZE;
+ unsigned int framesize = DFLT_FRAME_SIZE;
+ unsigned int framecount = DFLT_FRAME_COUNT;
+ unsigned int qpairs = 1;
+ unsigned int qdisc_bypass = 1;
+
+ /* do some parameter checking */
+ if (*sockfd < 0)
+ return -1;
+
+ /*
+ * Walk arguments for configurable settings
+ */
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ if (strstr(pair->key, ETH_AF_PACKET_NUM_Q_ARG) != NULL) {
+ qpairs = atoi(pair->value);
+ if (qpairs < 1 ||
+ qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) {
+ PMD_LOG(ERR,
+ "%s: invalid qpairs value",
+ name);
+ return -1;
+ }
+ continue;
+ }
+ if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) {
+ blocksize = atoi(pair->value);
+ if (!blocksize) {
+ PMD_LOG(ERR,
+ "%s: invalid blocksize value",
+ name);
+ return -1;
+ }
+ continue;
+ }
+ if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) {
+ framesize = atoi(pair->value);
+ if (!framesize) {
+ PMD_LOG(ERR,
+ "%s: invalid framesize value",
+ name);
+ return -1;
+ }
+ continue;
+ }
+ if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) {
+ framecount = atoi(pair->value);
+ if (!framecount) {
+ PMD_LOG(ERR,
+ "%s: invalid framecount value",
+ name);
+ return -1;
+ }
+ continue;
+ }
+ if (strstr(pair->key, ETH_AF_PACKET_QDISC_BYPASS_ARG) != NULL) {
+ qdisc_bypass = atoi(pair->value);
+ if (qdisc_bypass > 1) {
+ PMD_LOG(ERR,
+ "%s: invalid bypass value",
+ name);
+ return -1;
+ }
+ continue;
+ }
+ }
+
+ if (framesize > blocksize) {
+ PMD_LOG(ERR,
+ "%s: AF_PACKET MMAP frame size exceeds block size!",
+ name);
+ return -1;
+ }
+
+ blockcount = framecount / (blocksize / framesize);
+ if (!blockcount) {
+ PMD_LOG(ERR,
+ "%s: invalid AF_PACKET MMAP parameters", name);
+ return -1;
+ }
+
+ PMD_LOG(INFO, "%s: AF_PACKET MMAP parameters:", name);
+ PMD_LOG(INFO, "%s:\tblock size %d", name, blocksize);
+ PMD_LOG(INFO, "%s:\tblock count %d", name, blockcount);
+ PMD_LOG(INFO, "%s:\tframe size %d", name, framesize);
+ PMD_LOG(INFO, "%s:\tframe count %d", name, framecount);
+
+ if (rte_pmd_init_internals(dev, *sockfd, qpairs,
+ blocksize, blockcount,
+ framesize, framecount,
+ qdisc_bypass,
+ &internals, &eth_dev,
+ kvlist) < 0)
+ return -1;
+
+ eth_dev->rx_pkt_burst = eth_af_packet_rx;
+ eth_dev->tx_pkt_burst = eth_af_packet_tx;
+
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+}
+
+static int
+rte_pmd_af_packet_probe(struct rte_vdev_device *dev)
+{
+ int ret = 0;
+ struct rte_kvargs *kvlist;
+ int sockfd = -1;
+ struct rte_eth_dev *eth_dev;
+ const char *name = rte_vdev_device_name(dev);
+
+ PMD_LOG(INFO, "Initializing pmd_af_packet for %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
+ if (kvlist == NULL) {
+ ret = -1;
+ goto exit;
+ }
+
+ /*
+ * If iface argument is passed we open the NICs and use them for
+ * reading / writing
+ */
+ if (rte_kvargs_count(kvlist, ETH_AF_PACKET_IFACE_ARG) == 1) {
+
+ ret = rte_kvargs_process(kvlist, ETH_AF_PACKET_IFACE_ARG,
+ &open_packet_iface, &sockfd);
+ if (ret < 0)
+ goto exit;
+ }
+
+ if (dev->device.numa_node == SOCKET_ID_ANY)
+ dev->device.numa_node = rte_socket_id();
+
+ ret = rte_eth_from_packet(dev, &sockfd, kvlist);
+ close(sockfd); /* no longer needed */
+
+exit:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
+{
+ struct rte_eth_dev *eth_dev = NULL;
+ struct pmd_internals *internals;
+ unsigned q;
+
+ PMD_LOG(INFO, "Closing AF_PACKET ethdev on numa socket %u",
+ rte_socket_id());
+
+ if (dev == NULL)
+ return -1;
+
+ /* find the ethdev entry */
+ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
+ if (eth_dev == NULL)
+ return -1;
+
+ internals = eth_dev->data->dev_private;
+ for (q = 0; q < internals->nb_queues; q++) {
+ rte_free(internals->rx_queue[q].rd);
+ rte_free(internals->tx_queue[q].rd);
+ }
+ free(internals->if_name);
+
+ rte_free(eth_dev->data->dev_private);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_af_packet_drv = {
+ .probe = rte_pmd_af_packet_probe,
+ .remove = rte_pmd_af_packet_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_af_packet, pmd_af_packet_drv);
+RTE_PMD_REGISTER_ALIAS(net_af_packet, eth_af_packet);
+RTE_PMD_REGISTER_PARAM_STRING(net_af_packet,
+ "iface=<string> "
+ "qpairs=<int> "
+ "blocksz=<int> "
+ "framesz=<int> "
+ "framecnt=<int> "
+ "qdisc_bypass=<0|1>");
+
+RTE_INIT(af_packet_init_log)
+{
+ af_packet_logtype = rte_log_register("pmd.net.packet");
+ if (af_packet_logtype >= 0)
+ rte_log_set_level(af_packet_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/af_packet/rte_pmd_af_packet_version.map b/src/spdk/dpdk/drivers/net/af_packet/rte_pmd_af_packet_version.map
new file mode 100644
index 00000000..ef353984
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/af_packet/rte_pmd_af_packet_version.map
@@ -0,0 +1,4 @@
+DPDK_2.0 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/ark/Makefile b/src/spdk/dpdk/drivers/net/ark/Makefile
new file mode 100644
index 00000000..2e232be8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/Makefile
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2015-2018 Atomic Rules LLC
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ark.a
+
+CFLAGS += -O3 -I./
+CFLAGS += $(WERROR_FLAGS) -Werror
+
+EXPORT_MAP := rte_pmd_ark_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ddm.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_mpu.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktchkr.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktgen.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_rqp.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_udm.c
+
+# this lib depends upon:
+LDLIBS += -lpthread
+ifdef CONFIG_RTE_EXEC_ENV_LINUXAPP
+LDLIBS += -ldl
+endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ddm.c b/src/spdk/dpdk/drivers/net/ark/ark_ddm.c
new file mode 100644
index 00000000..eea388a1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ddm.c
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+
+#include "ark_logs.h"
+#include "ark_ddm.h"
+
+/* ************************************************************************* */
+int
+ark_ddm_verify(struct ark_ddm_t *ddm)
+{
+ if (sizeof(struct ark_ddm_t) != ARK_DDM_EXPECTED_SIZE) {
+ PMD_DRV_LOG(ERR, "ARK: DDM structure looks incorrect %d vs %zd\n",
+ ARK_DDM_EXPECTED_SIZE, sizeof(struct ark_ddm_t));
+ return -1;
+ }
+
+ if (ddm->cfg.const0 != ARK_DDM_CONST) {
+ PMD_DRV_LOG(ERR, "ARK: DDM module not found as expected 0x%08x\n",
+ ddm->cfg.const0);
+ return -1;
+ }
+ return 0;
+}
+
+void
+ark_ddm_start(struct ark_ddm_t *ddm)
+{
+ ddm->cfg.command = 1;
+}
+
+int
+ark_ddm_stop(struct ark_ddm_t *ddm, const int wait)
+{
+ int cnt = 0;
+
+ ddm->cfg.command = 2;
+ while (wait && (ddm->cfg.stop_flushed & 0x01) == 0) {
+ if (cnt++ > 1000)
+ return 1;
+
+ usleep(10);
+ }
+ return 0;
+}
+
+void
+ark_ddm_reset(struct ark_ddm_t *ddm)
+{
+ int status;
+
+ /* reset only works if ddm has stopped properly. */
+ status = ark_ddm_stop(ddm, 1);
+
+ if (status != 0) {
+ PMD_DEBUG_LOG(INFO, "%s stop failed doing forced reset\n",
+ __func__);
+ ddm->cfg.command = 4;
+ usleep(10);
+ }
+ ddm->cfg.command = 3;
+}
+
+void
+ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr, uint32_t interval)
+{
+ ddm->setup.cons_write_index_addr = cons_addr;
+ ddm->setup.write_index_interval = interval / 4; /* 4 ns period */
+}
+
+void
+ark_ddm_stats_reset(struct ark_ddm_t *ddm)
+{
+ ddm->cfg.tlp_stats_clear = 1;
+}
+
+void
+ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg)
+{
+ PMD_FUNC_LOG(DEBUG, "%s Stopped: %d\n", msg,
+ ark_ddm_is_stopped(ddm)
+ );
+}
+
+void
+ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg)
+{
+ struct ark_ddm_stats_t *stats = &ddm->stats;
+
+ PMD_STATS_LOG(INFO, "DDM Stats: %s"
+ ARK_SU64 ARK_SU64 ARK_SU64
+ "\n", msg,
+ "Bytes:", stats->tx_byte_count,
+ "Packets:", stats->tx_pkt_count,
+ "MBufs", stats->tx_mbuf_count);
+}
+
+int
+ark_ddm_is_stopped(struct ark_ddm_t *ddm)
+{
+ return (ddm->cfg.stop_flushed & 0x01) != 0;
+}
+
+uint64_t
+ark_ddm_queue_byte_count(struct ark_ddm_t *ddm)
+{
+ return ddm->queue_stats.byte_count;
+}
+
+uint64_t
+ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm)
+{
+ return ddm->queue_stats.pkt_count;
+}
+
+void
+ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm)
+{
+ ddm->queue_stats.byte_count = 1;
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ddm.h b/src/spdk/dpdk/drivers/net/ark/ark_ddm.h
new file mode 100644
index 00000000..b37d1e09
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ddm.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_DDM_H_
+#define _ARK_DDM_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+
+/* The DDM or Downstream Data Mover is an internal Arkville hardware
+ * module for moving packet from host memory to the TX packet streams.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/* struct defining Tx meta data -- fixed in FPGA -- 16 bytes */
+struct ark_tx_meta {
+ uint64_t physaddr;
+ uint32_t delta_ns;
+ uint16_t data_len; /* of this MBUF */
+#define ARK_DDM_EOP 0x01
+#define ARK_DDM_SOP 0x02
+ uint8_t flags; /* bit 0 indicates last mbuf in chain. */
+ uint8_t reserved[1];
+};
+
+
+/*
+ * DDM core hardware structures
+ * These are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+#define ARK_DDM_CFG 0x0000
+#define ARK_DDM_CONST 0xfacecafe
+struct ark_ddm_cfg_t {
+ uint32_t r0;
+ volatile uint32_t tlp_stats_clear;
+ uint32_t const0;
+ volatile uint32_t tag_max;
+ volatile uint32_t command;
+ volatile uint32_t stop_flushed;
+};
+
+#define ARK_DDM_STATS 0x0020
+struct ark_ddm_stats_t {
+ volatile uint64_t tx_byte_count;
+ volatile uint64_t tx_pkt_count;
+ volatile uint64_t tx_mbuf_count;
+};
+
+#define ARK_DDM_MRDQ 0x0040
+struct ark_ddm_mrdq_t {
+ volatile uint32_t mrd_q1;
+ volatile uint32_t mrd_q2;
+ volatile uint32_t mrd_q3;
+ volatile uint32_t mrd_q4;
+ volatile uint32_t mrd_full;
+};
+
+#define ARK_DDM_CPLDQ 0x0068
+struct ark_ddm_cpldq_t {
+ volatile uint32_t cpld_q1;
+ volatile uint32_t cpld_q2;
+ volatile uint32_t cpld_q3;
+ volatile uint32_t cpld_q4;
+ volatile uint32_t cpld_full;
+};
+
+#define ARK_DDM_MRD_PS 0x0090
+struct ark_ddm_mrd_ps_t {
+ volatile uint32_t mrd_ps_min;
+ volatile uint32_t mrd_ps_max;
+ volatile uint32_t mrd_full_ps_min;
+ volatile uint32_t mrd_full_ps_max;
+ volatile uint32_t mrd_dw_ps_min;
+ volatile uint32_t mrd_dw_ps_max;
+};
+
+#define ARK_DDM_QUEUE_STATS 0x00a8
+struct ark_ddm_qstats_t {
+ volatile uint64_t byte_count;
+ volatile uint64_t pkt_count;
+ volatile uint64_t mbuf_count;
+};
+
+#define ARK_DDM_CPLD_PS 0x00c0
+struct ark_ddm_cpld_ps_t {
+ volatile uint32_t cpld_ps_min;
+ volatile uint32_t cpld_ps_max;
+ volatile uint32_t cpld_full_ps_min;
+ volatile uint32_t cpld_full_ps_max;
+ volatile uint32_t cpld_dw_ps_min;
+ volatile uint32_t cpld_dw_ps_max;
+};
+
+#define ARK_DDM_SETUP 0x00e0
+struct ark_ddm_setup_t {
+ rte_iova_t cons_write_index_addr;
+ uint32_t write_index_interval; /* 4ns each */
+ volatile uint32_t cons_index;
+};
+
+#define ARK_DDM_EXPECTED_SIZE 256
+#define ARK_DDM_QOFFSET ARK_DDM_EXPECTED_SIZE
+/* Consolidated structure */
+struct ark_ddm_t {
+ struct ark_ddm_cfg_t cfg;
+ uint8_t reserved0[(ARK_DDM_STATS - ARK_DDM_CFG) -
+ sizeof(struct ark_ddm_cfg_t)];
+ struct ark_ddm_stats_t stats;
+ uint8_t reserved1[(ARK_DDM_MRDQ - ARK_DDM_STATS) -
+ sizeof(struct ark_ddm_stats_t)];
+ struct ark_ddm_mrdq_t mrdq;
+ uint8_t reserved2[(ARK_DDM_CPLDQ - ARK_DDM_MRDQ) -
+ sizeof(struct ark_ddm_mrdq_t)];
+ struct ark_ddm_cpldq_t cpldq;
+ uint8_t reserved3[(ARK_DDM_MRD_PS - ARK_DDM_CPLDQ) -
+ sizeof(struct ark_ddm_cpldq_t)];
+ struct ark_ddm_mrd_ps_t mrd_ps;
+ struct ark_ddm_qstats_t queue_stats;
+ struct ark_ddm_cpld_ps_t cpld_ps;
+ uint8_t reserved5[(ARK_DDM_SETUP - ARK_DDM_CPLD_PS) -
+ sizeof(struct ark_ddm_cpld_ps_t)];
+ struct ark_ddm_setup_t setup;
+ uint8_t reserved_p[(ARK_DDM_EXPECTED_SIZE - ARK_DDM_SETUP) -
+ sizeof(struct ark_ddm_setup_t)];
+};
+
+
+/* DDM function prototype */
+int ark_ddm_verify(struct ark_ddm_t *ddm);
+void ark_ddm_start(struct ark_ddm_t *ddm);
+int ark_ddm_stop(struct ark_ddm_t *ddm, const int wait);
+void ark_ddm_reset(struct ark_ddm_t *ddm);
+void ark_ddm_stats_reset(struct ark_ddm_t *ddm);
+void ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr,
+ uint32_t interval);
+void ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg);
+void ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg);
+int ark_ddm_is_stopped(struct ark_ddm_t *ddm);
+uint64_t ark_ddm_queue_byte_count(struct ark_ddm_t *ddm);
+uint64_t ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm);
+void ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev.c b/src/spdk/dpdk/drivers/net/ark/ark_ethdev.c
new file mode 100644
index 00000000..552ca01a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev.c
@@ -0,0 +1,1023 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+#include <sys/stat.h>
+#include <dlfcn.h>
+
+#include <rte_bus_pci.h>
+#include <rte_ethdev_pci.h>
+#include <rte_kvargs.h>
+
+#include "ark_global.h"
+#include "ark_logs.h"
+#include "ark_ethdev_tx.h"
+#include "ark_ethdev_rx.h"
+#include "ark_mpu.h"
+#include "ark_ddm.h"
+#include "ark_udm.h"
+#include "ark_rqp.h"
+#include "ark_pktdir.h"
+#include "ark_pktgen.h"
+#include "ark_pktchkr.h"
+
+/* Internal prototypes */
+static int eth_ark_check_args(struct ark_adapter *ark, const char *params);
+static int eth_ark_dev_init(struct rte_eth_dev *dev);
+static int ark_config_device(struct rte_eth_dev *dev);
+static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev);
+static int eth_ark_dev_configure(struct rte_eth_dev *dev);
+static int eth_ark_dev_start(struct rte_eth_dev *dev);
+static void eth_ark_dev_stop(struct rte_eth_dev *dev);
+static void eth_ark_dev_close(struct rte_eth_dev *dev);
+static void eth_ark_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int eth_ark_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev);
+static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev);
+static int eth_ark_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev);
+static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+static int eth_ark_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool);
+static void eth_ark_macaddr_remove(struct rte_eth_dev *dev,
+ uint32_t index);
+static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size);
+
+/*
+ * The packet generator is a functional block used to generate packet
+ * patterns for testing. It is not intended for nominal use.
+ */
+#define ARK_PKTGEN_ARG "Pkt_gen"
+
+/*
+ * The packet checker is a functional block used to verify packet
+ * patterns for testing. It is not intended for nominal use.
+ */
+#define ARK_PKTCHKR_ARG "Pkt_chkr"
+
+/*
+ * The packet director is used to select the internal ingress and
+ * egress packets paths during testing. It is not intended for
+ * nominal use.
+ */
+#define ARK_PKTDIR_ARG "Pkt_dir"
+
+/* Devinfo configurations */
+#define ARK_RX_MAX_QUEUE (4096 * 4)
+#define ARK_RX_MIN_QUEUE (512)
+#define ARK_RX_MAX_PKT_LEN ((16 * 1024) - 128)
+#define ARK_RX_MIN_BUFSIZE (1024)
+
+#define ARK_TX_MAX_QUEUE (4096 * 4)
+#define ARK_TX_MIN_QUEUE (256)
+
+static const char * const valid_arguments[] = {
+ ARK_PKTGEN_ARG,
+ ARK_PKTCHKR_ARG,
+ ARK_PKTDIR_ARG,
+ NULL
+};
+
+static const struct rte_pci_id pci_id_ark_map[] = {
+ {RTE_PCI_DEVICE(0x1d6c, 0x100d)},
+ {RTE_PCI_DEVICE(0x1d6c, 0x100e)},
+ {.vendor_id = 0, /* sentinel */ },
+};
+
+static int
+eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct ark_adapter));
+
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ ret = eth_ark_dev_init(eth_dev);
+ if (ret)
+ rte_eth_dev_pci_release(eth_dev);
+
+ return ret;
+}
+
+static int
+eth_ark_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ark_dev_uninit);
+}
+
+static struct rte_pci_driver rte_ark_pmd = {
+ .id_table = pci_id_ark_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_ark_pci_probe,
+ .remove = eth_ark_pci_remove,
+};
+
+static const struct eth_dev_ops ark_eth_dev_ops = {
+ .dev_configure = eth_ark_dev_configure,
+ .dev_start = eth_ark_dev_start,
+ .dev_stop = eth_ark_dev_stop,
+ .dev_close = eth_ark_dev_close,
+
+ .dev_infos_get = eth_ark_dev_info_get,
+
+ .rx_queue_setup = eth_ark_dev_rx_queue_setup,
+ .rx_queue_count = eth_ark_dev_rx_queue_count,
+ .tx_queue_setup = eth_ark_tx_queue_setup,
+
+ .link_update = eth_ark_dev_link_update,
+ .dev_set_link_up = eth_ark_dev_set_link_up,
+ .dev_set_link_down = eth_ark_dev_set_link_down,
+
+ .rx_queue_start = eth_ark_rx_start_queue,
+ .rx_queue_stop = eth_ark_rx_stop_queue,
+
+ .tx_queue_start = eth_ark_tx_queue_start,
+ .tx_queue_stop = eth_ark_tx_queue_stop,
+
+ .stats_get = eth_ark_dev_stats_get,
+ .stats_reset = eth_ark_dev_stats_reset,
+
+ .mac_addr_add = eth_ark_macaddr_add,
+ .mac_addr_remove = eth_ark_macaddr_remove,
+ .mac_addr_set = eth_ark_set_default_mac_addr,
+
+ .mtu_set = eth_ark_set_mtu,
+};
+
+static int
+check_for_ext(struct ark_adapter *ark)
+{
+ int found = 0;
+
+ /* Get the env */
+ const char *dllpath = getenv("ARK_EXT_PATH");
+
+ if (dllpath == NULL) {
+ PMD_DEBUG_LOG(DEBUG, "ARK EXT NO dll path specified\n");
+ return 0;
+ }
+ PMD_DRV_LOG(INFO, "ARK EXT found dll path at %s\n", dllpath);
+
+ /* Open and load the .so */
+ ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY);
+ if (ark->d_handle == NULL) {
+ PMD_DRV_LOG(ERR, "Could not load user extension %s\n",
+ dllpath);
+ return -1;
+ }
+ PMD_DRV_LOG(INFO, "SUCCESS: loaded user extension %s\n",
+ dllpath);
+
+ /* Get the entry points */
+ ark->user_ext.dev_init =
+ (void *(*)(struct rte_eth_dev *, void *, int))
+ dlsym(ark->d_handle, "dev_init");
+ PMD_DEBUG_LOG(DEBUG, "device ext init pointer = %p\n",
+ ark->user_ext.dev_init);
+ ark->user_ext.dev_get_port_count =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_get_port_count");
+ ark->user_ext.dev_uninit =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_uninit");
+ ark->user_ext.dev_configure =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_configure");
+ ark->user_ext.dev_start =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_start");
+ ark->user_ext.dev_stop =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_stop");
+ ark->user_ext.dev_close =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_close");
+ ark->user_ext.link_update =
+ (int (*)(struct rte_eth_dev *, int, void *))
+ dlsym(ark->d_handle, "link_update");
+ ark->user_ext.dev_set_link_up =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_set_link_up");
+ ark->user_ext.dev_set_link_down =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_set_link_down");
+ ark->user_ext.stats_get =
+ (int (*)(struct rte_eth_dev *, struct rte_eth_stats *,
+ void *))
+ dlsym(ark->d_handle, "stats_get");
+ ark->user_ext.stats_reset =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "stats_reset");
+ ark->user_ext.mac_addr_add =
+ (void (*)(struct rte_eth_dev *, struct ether_addr *, uint32_t,
+ uint32_t, void *))
+ dlsym(ark->d_handle, "mac_addr_add");
+ ark->user_ext.mac_addr_remove =
+ (void (*)(struct rte_eth_dev *, uint32_t, void *))
+ dlsym(ark->d_handle, "mac_addr_remove");
+ ark->user_ext.mac_addr_set =
+ (void (*)(struct rte_eth_dev *, struct ether_addr *,
+ void *))
+ dlsym(ark->d_handle, "mac_addr_set");
+ ark->user_ext.set_mtu =
+ (int (*)(struct rte_eth_dev *, uint16_t,
+ void *))
+ dlsym(ark->d_handle, "set_mtu");
+
+ return found;
+}
+
+static int
+eth_ark_dev_init(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ struct rte_pci_device *pci_dev;
+ int ret;
+ int port_count = 1;
+ int p;
+
+ ark->eth_dev = dev;
+
+ PMD_FUNC_LOG(DEBUG, "\n");
+
+ /* Check to see if there is an extension that we need to load */
+ ret = check_for_ext(ark);
+ if (ret)
+ return ret;
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ rte_eth_copy_pci_info(dev, pci_dev);
+
+ /* Use dummy function until setup */
+ dev->rx_pkt_burst = &eth_ark_recv_pkts_noop;
+ dev->tx_pkt_burst = &eth_ark_xmit_pkts_noop;
+
+ ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;
+ ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;
+
+ ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE];
+ ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE];
+ ark->udm.v = (void *)&ark->bar0[ARK_UDM_BASE];
+ ark->mputx.v = (void *)&ark->bar0[ARK_MPU_TX_BASE];
+ ark->ddm.v = (void *)&ark->bar0[ARK_DDM_BASE];
+ ark->cmac.v = (void *)&ark->bar0[ARK_CMAC_BASE];
+ ark->external.v = (void *)&ark->bar0[ARK_EXTERNAL_BASE];
+ ark->pktdir.v = (void *)&ark->bar0[ARK_PKTDIR_BASE];
+ ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE];
+ ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE];
+
+ ark->rqpacing =
+ (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE);
+ ark->started = 0;
+
+ PMD_DEBUG_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n",
+ ark->sysctrl.t32[4],
+ rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
+ PMD_DRV_LOG(INFO, "Arkville HW Commit_ID: %08x\n",
+ rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
+
+ /* If HW sanity test fails, return an error */
+ if (ark->sysctrl.t32[4] != 0xcafef00d) {
+ PMD_DRV_LOG(ERR,
+ "HW Sanity test has failed, expected constant"
+ " 0x%x, read 0x%x (%s)\n",
+ 0xcafef00d,
+ ark->sysctrl.t32[4], __func__);
+ return -1;
+ }
+ if (ark->sysctrl.t32[3] != 0) {
+ if (ark_rqp_lasped(ark->rqpacing)) {
+ PMD_DRV_LOG(ERR, "Arkville Evaluation System - "
+ "Timer has Expired\n");
+ return -1;
+ }
+ PMD_DRV_LOG(WARNING, "Arkville Evaluation System - "
+ "Timer is Running\n");
+ }
+
+ PMD_DRV_LOG(INFO,
+ "HW Sanity test has PASSED, expected constant"
+ " 0x%x, read 0x%x (%s)\n",
+ 0xcafef00d, ark->sysctrl.t32[4], __func__);
+
+ /* We are a single function multi-port device. */
+ ret = ark_config_device(dev);
+ dev->dev_ops = &ark_eth_dev_ops;
+
+ dev->data->mac_addrs = rte_zmalloc("ark", ETHER_ADDR_LEN, 0);
+ if (!dev->data->mac_addrs) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocated memory for storing mac address"
+ );
+ }
+
+ if (ark->user_ext.dev_init) {
+ ark->user_data[dev->data->port_id] =
+ ark->user_ext.dev_init(dev, ark->a_bar, 0);
+ if (!ark->user_data[dev->data->port_id]) {
+ PMD_DRV_LOG(INFO,
+ "Failed to initialize PMD extension!"
+ " continuing without it\n");
+ memset(&ark->user_ext, 0, sizeof(struct ark_user_ext));
+ dlclose(ark->d_handle);
+ }
+ }
+
+ if (pci_dev->device.devargs)
+ ret = eth_ark_check_args(ark, pci_dev->device.devargs->args);
+ else
+ PMD_DRV_LOG(INFO, "No Device args found\n");
+
+ if (ret)
+ goto error;
+ /*
+ * We will create additional devices based on the number of requested
+ * ports
+ */
+ if (ark->user_ext.dev_get_port_count)
+ port_count =
+ ark->user_ext.dev_get_port_count(dev,
+ ark->user_data[dev->data->port_id]);
+ ark->num_ports = port_count;
+
+ for (p = 0; p < port_count; p++) {
+ struct rte_eth_dev *eth_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(name, sizeof(name), "arketh%d",
+ dev->data->port_id + p);
+
+ if (p == 0) {
+ /* First port is already allocated by DPDK */
+ eth_dev = ark->eth_dev;
+ rte_eth_dev_probing_finish(eth_dev);
+ continue;
+ }
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(ERR,
+ "Could not allocate eth_dev for port %d\n",
+ p);
+ goto error;
+ }
+
+ eth_dev->device = &pci_dev->device;
+ eth_dev->data->dev_private = ark;
+ eth_dev->dev_ops = ark->eth_dev->dev_ops;
+ eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst;
+ eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
+ if (!eth_dev->data->mac_addrs) {
+ PMD_DRV_LOG(ERR,
+ "Memory allocation for MAC failed!"
+ " Exiting.\n");
+ goto error;
+ }
+
+ if (ark->user_ext.dev_init) {
+ ark->user_data[eth_dev->data->port_id] =
+ ark->user_ext.dev_init(dev, ark->a_bar, p);
+ }
+
+ rte_eth_dev_probing_finish(eth_dev);
+ }
+
+ return ret;
+
+ error:
+ if (dev->data->mac_addrs)
+ rte_free(dev->data->mac_addrs);
+ return -1;
+}
+
+/*
+ *Initial device configuration when device is opened
+ * setup the DDM, and UDM
+ * Called once per PCIE device
+ */
+static int
+ark_config_device(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ uint16_t num_q, i;
+ struct ark_mpu_t *mpu;
+
+ /*
+ * Make sure that the packet director, generator and checker are in a
+ * known state
+ */
+ ark->start_pg = 0;
+ ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1);
+ if (ark->pg == NULL)
+ return -1;
+ ark_pktgen_reset(ark->pg);
+ ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1);
+ if (ark->pc == NULL)
+ return -1;
+ ark_pktchkr_stop(ark->pc);
+ ark->pd = ark_pktdir_init(ark->pktdir.v);
+ if (ark->pd == NULL)
+ return -1;
+
+ /* Verify HW */
+ if (ark_udm_verify(ark->udm.v))
+ return -1;
+ if (ark_ddm_verify(ark->ddm.v))
+ return -1;
+
+ /* UDM */
+ if (ark_udm_reset(ark->udm.v)) {
+ PMD_DRV_LOG(ERR, "Unable to stop and reset UDM\n");
+ return -1;
+ }
+ /* Keep in reset until the MPU are cleared */
+
+ /* MPU reset */
+ mpu = ark->mpurx.v;
+ num_q = ark_api_num_queues(mpu);
+ ark->rx_queues = num_q;
+ for (i = 0; i < num_q; i++) {
+ ark_mpu_reset(mpu);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+
+ ark_udm_stop(ark->udm.v, 0);
+ ark_udm_configure(ark->udm.v,
+ RTE_PKTMBUF_HEADROOM,
+ RTE_MBUF_DEFAULT_DATAROOM,
+ ARK_RX_WRITE_TIME_NS);
+ ark_udm_stats_reset(ark->udm.v);
+ ark_udm_stop(ark->udm.v, 0);
+
+ /* TX -- DDM */
+ if (ark_ddm_stop(ark->ddm.v, 1))
+ PMD_DRV_LOG(ERR, "Unable to stop DDM\n");
+
+ mpu = ark->mputx.v;
+ num_q = ark_api_num_queues(mpu);
+ ark->tx_queues = num_q;
+ for (i = 0; i < num_q; i++) {
+ ark_mpu_reset(mpu);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+
+ ark_ddm_reset(ark->ddm.v);
+ ark_ddm_stats_reset(ark->ddm.v);
+
+ ark_ddm_stop(ark->ddm.v, 0);
+ ark_rqp_stats_reset(ark->rqpacing);
+
+ return 0;
+}
+
+static int
+eth_ark_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (ark->user_ext.dev_uninit)
+ ark->user_ext.dev_uninit(dev,
+ ark->user_data[dev->data->port_id]);
+
+ ark_pktgen_uninit(ark->pg);
+ ark_pktchkr_uninit(ark->pc);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+ rte_free(dev->data->mac_addrs);
+ return 0;
+}
+
+static int
+eth_ark_dev_configure(struct rte_eth_dev *dev)
+{
+ PMD_FUNC_LOG(DEBUG, "\n");
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ eth_ark_dev_set_link_up(dev);
+ if (ark->user_ext.dev_configure)
+ return ark->user_ext.dev_configure(dev,
+ ark->user_data[dev->data->port_id]);
+ return 0;
+}
+
+static void *
+delay_pg_start(void *arg)
+{
+ struct ark_adapter *ark = (struct ark_adapter *)arg;
+
+ /* This function is used exclusively for regression testing, We
+ * perform a blind sleep here to ensure that the external test
+ * application has time to setup the test before we generate packets
+ */
+ usleep(100000);
+ ark_pktgen_run(ark->pg);
+ return NULL;
+}
+
+static int
+eth_ark_dev_start(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ int i;
+
+ PMD_FUNC_LOG(DEBUG, "\n");
+
+ /* RX Side */
+ /* start UDM */
+ ark_udm_start(ark->udm.v);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_ark_rx_start_queue(dev, i);
+
+ /* TX Side */
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_ark_tx_queue_start(dev, i);
+
+ /* start DDM */
+ ark_ddm_start(ark->ddm.v);
+
+ ark->started = 1;
+ /* set xmit and receive function */
+ dev->rx_pkt_burst = &eth_ark_recv_pkts;
+ dev->tx_pkt_burst = &eth_ark_xmit_pkts;
+
+ if (ark->start_pg)
+ ark_pktchkr_run(ark->pc);
+
+ if (ark->start_pg && (dev->data->port_id == 0)) {
+ pthread_t thread;
+
+ /* Delay packet generatpr start allow the hardware to be ready
+ * This is only used for sanity checking with internal generator
+ */
+ if (pthread_create(&thread, NULL, delay_pg_start, ark)) {
+ PMD_DRV_LOG(ERR, "Could not create pktgen "
+ "starter thread\n");
+ return -1;
+ }
+ }
+
+ if (ark->user_ext.dev_start)
+ ark->user_ext.dev_start(dev,
+ ark->user_data[dev->data->port_id]);
+
+ return 0;
+}
+
+static void
+eth_ark_dev_stop(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ int status;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ struct ark_mpu_t *mpu;
+
+ PMD_FUNC_LOG(DEBUG, "\n");
+
+ if (ark->started == 0)
+ return;
+ ark->started = 0;
+
+ /* Stop the extension first */
+ if (ark->user_ext.dev_stop)
+ ark->user_ext.dev_stop(dev,
+ ark->user_data[dev->data->port_id]);
+
+ /* Stop the packet generator */
+ if (ark->start_pg)
+ ark_pktgen_pause(ark->pg);
+
+ dev->rx_pkt_burst = &eth_ark_recv_pkts_noop;
+ dev->tx_pkt_burst = &eth_ark_xmit_pkts_noop;
+
+ /* STOP TX Side */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ status = eth_ark_tx_queue_stop(dev, i);
+ if (status != 0) {
+ uint16_t port = dev->data->port_id;
+ PMD_DRV_LOG(ERR,
+ "tx_queue stop anomaly"
+ " port %u, queue %u\n",
+ port, i);
+ }
+ }
+
+ /* Stop DDM */
+ /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */
+ for (i = 0; i < 10; i++) {
+ status = ark_ddm_stop(ark->ddm.v, 1);
+ if (status == 0)
+ break;
+ }
+ if (status || i != 0) {
+ PMD_DRV_LOG(ERR, "DDM stop anomaly. status:"
+ " %d iter: %u. (%s)\n",
+ status,
+ i,
+ __func__);
+ ark_ddm_dump(ark->ddm.v, "Stop anomaly");
+
+ mpu = ark->mputx.v;
+ for (i = 0; i < ark->tx_queues; i++) {
+ ark_mpu_dump(mpu, "DDM failure dump", i);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+ }
+
+ /* STOP RX Side */
+ /* Stop UDM multiple tries attempted */
+ for (i = 0; i < 10; i++) {
+ status = ark_udm_stop(ark->udm.v, 1);
+ if (status == 0)
+ break;
+ }
+ if (status || i != 0) {
+ PMD_DRV_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n",
+ status, i, __func__);
+ ark_udm_dump(ark->udm.v, "Stop anomaly");
+
+ mpu = ark->mpurx.v;
+ for (i = 0; i < ark->rx_queues; i++) {
+ ark_mpu_dump(mpu, "UDM Stop anomaly", i);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+ }
+
+ ark_udm_dump_stats(ark->udm.v, "Post stop");
+ ark_udm_dump_perf(ark->udm.v, "Post stop");
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_ark_rx_dump_queue(dev, i, __func__);
+
+ /* Stop the packet checker if it is running */
+ if (ark->start_pg) {
+ ark_pktchkr_dump_stats(ark->pc);
+ ark_pktchkr_stop(ark->pc);
+ }
+}
+
+static void
+eth_ark_dev_close(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ uint16_t i;
+
+ if (ark->user_ext.dev_close)
+ ark->user_ext.dev_close(dev,
+ ark->user_data[dev->data->port_id]);
+
+ eth_ark_dev_stop(dev);
+ eth_ark_udm_force_close(dev);
+
+ /*
+ * TODO This should only be called once for the device during shutdown
+ */
+ ark_rqp_dump(ark->rqpacing);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_ark_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = 0;
+ }
+}
+
+static void
+eth_ark_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE);
+ struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE);
+ uint16_t ports = ark->num_ports;
+
+ dev_info->max_rx_pktlen = ARK_RX_MAX_PKT_LEN;
+ dev_info->min_rx_bufsize = ARK_RX_MIN_BUFSIZE;
+
+ dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports);
+ dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports);
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ARK_RX_MAX_QUEUE,
+ .nb_min = ARK_RX_MIN_QUEUE,
+ .nb_align = ARK_RX_MIN_QUEUE}; /* power of 2 */
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ARK_TX_MAX_QUEUE,
+ .nb_min = ARK_TX_MIN_QUEUE,
+ .nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
+
+ /* ARK PMD supports all line rates, how do we indicate that here ?? */
+ dev_info->speed_capa = (ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G |
+ ETH_LINK_SPEED_100G);
+}
+
+static int
+eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ PMD_DEBUG_LOG(DEBUG, "link status = %d\n",
+ dev->data->dev_link.link_status);
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.link_update) {
+ return ark->user_ext.link_update
+ (dev, wait_to_complete,
+ ark->user_data[dev->data->port_id]);
+ }
+ return 0;
+}
+
+static int
+eth_ark_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 1;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.dev_set_link_up)
+ return ark->user_ext.dev_set_link_up(dev,
+ ark->user_data[dev->data->port_id]);
+ return 0;
+}
+
+static int
+eth_ark_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 0;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.dev_set_link_down)
+ return ark->user_ext.dev_set_link_down(dev,
+ ark->user_data[dev->data->port_id]);
+ return 0;
+}
+
+static int
+eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint16_t i;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ stats->ipackets = 0;
+ stats->ibytes = 0;
+ stats->opackets = 0;
+ stats->obytes = 0;
+ stats->imissed = 0;
+ stats->oerrors = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_tx_queue_stats_get(dev->data->tx_queues[i], stats);
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_rx_queue_stats_get(dev->data->rx_queues[i], stats);
+ if (ark->user_ext.stats_get)
+ return ark->user_ext.stats_get(dev, stats,
+ ark->user_data[dev->data->port_id]);
+ return 0;
+}
+
+static void
+eth_ark_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_tx_queue_stats_reset(dev->data->tx_queues[i]);
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_rx_queue_stats_reset(dev->data->rx_queues[i]);
+ if (ark->user_ext.stats_reset)
+ ark->user_ext.stats_reset(dev,
+ ark->user_data[dev->data->port_id]);
+}
+
+static int
+eth_ark_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_add) {
+ ark->user_ext.mac_addr_add(dev,
+ mac_addr,
+ index,
+ pool,
+ ark->user_data[dev->data->port_id]);
+ return 0;
+ }
+ return -ENOTSUP;
+}
+
+static void
+eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_remove)
+ ark->user_ext.mac_addr_remove(dev, index,
+ ark->user_data[dev->data->port_id]);
+}
+
+static int
+eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_set) {
+ ark->user_ext.mac_addr_set(dev, mac_addr,
+ ark->user_data[dev->data->port_id]);
+ return 0;
+ }
+ return -ENOTSUP;
+}
+
+static int
+eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.set_mtu)
+ return ark->user_ext.set_mtu(dev, size,
+ ark->user_data[dev->data->port_id]);
+
+ return -ENOTSUP;
+}
+
+static inline int
+process_pktdir_arg(const char *key, const char *value,
+ void *extra_args)
+{
+ PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n",
+ key, value);
+ struct ark_adapter *ark =
+ (struct ark_adapter *)extra_args;
+
+ ark->pkt_dir_v = strtol(value, NULL, 16);
+ PMD_FUNC_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v);
+ return 0;
+}
+
+static inline int
+process_file_args(const char *key, const char *value, void *extra_args)
+{
+ PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n",
+ key, value);
+ char *args = (char *)extra_args;
+
+ /* Open the configuration file */
+ FILE *file = fopen(value, "r");
+ char line[ARK_MAX_ARG_LEN];
+ int size = 0;
+ int first = 1;
+
+ if (file == NULL) {
+ PMD_DRV_LOG(ERR, "Unable to open "
+ "config file %s\n", value);
+ return -1;
+ }
+
+ while (fgets(line, sizeof(line), file)) {
+ size += strlen(line);
+ if (size >= ARK_MAX_ARG_LEN) {
+ PMD_DRV_LOG(ERR, "Unable to parse file %s args, "
+ "parameter list is too long\n", value);
+ fclose(file);
+ return -1;
+ }
+ if (first) {
+ strncpy(args, line, ARK_MAX_ARG_LEN);
+ first = 0;
+ } else {
+ strncat(args, line, ARK_MAX_ARG_LEN);
+ }
+ }
+ PMD_FUNC_LOG(DEBUG, "file = %s\n", args);
+ fclose(file);
+ return 0;
+}
+
+static int
+eth_ark_check_args(struct ark_adapter *ark, const char *params)
+{
+ struct rte_kvargs *kvlist;
+ unsigned int k_idx;
+ struct rte_kvargs_pair *pair = NULL;
+ int ret = -1;
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist == NULL)
+ return 0;
+
+ ark->pkt_gen_args[0] = 0;
+ ark->pkt_chkr_args[0] = 0;
+
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ PMD_FUNC_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n",
+ pair->key,
+ pair->value);
+ }
+
+ if (rte_kvargs_process(kvlist,
+ ARK_PKTDIR_ARG,
+ &process_pktdir_arg,
+ ark) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG);
+ goto free_kvlist;
+ }
+
+ if (rte_kvargs_process(kvlist,
+ ARK_PKTGEN_ARG,
+ &process_file_args,
+ ark->pkt_gen_args) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG);
+ goto free_kvlist;
+ }
+
+ if (rte_kvargs_process(kvlist,
+ ARK_PKTCHKR_ARG,
+ &process_file_args,
+ ark->pkt_chkr_args) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG);
+ goto free_kvlist;
+ }
+
+ PMD_DRV_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v);
+ /* Setup the packet director */
+ ark_pktdir_setup(ark->pd, ark->pkt_dir_v);
+
+ /* Setup the packet generator */
+ if (ark->pkt_gen_args[0]) {
+ PMD_DRV_LOG(INFO, "Setting up the packet generator\n");
+ ark_pktgen_parse(ark->pkt_gen_args);
+ ark_pktgen_reset(ark->pg);
+ ark_pktgen_setup(ark->pg);
+ ark->start_pg = 1;
+ }
+
+ /* Setup the packet checker */
+ if (ark->pkt_chkr_args[0]) {
+ ark_pktchkr_parse(ark->pkt_chkr_args);
+ ark_pktchkr_setup(ark->pc);
+ }
+
+ ret = 0;
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+RTE_PMD_REGISTER_PCI(net_ark, rte_ark_pmd);
+RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic ");
+RTE_PMD_REGISTER_PCI_TABLE(net_ark, pci_id_ark_map);
+RTE_PMD_REGISTER_PARAM_STRING(net_ark,
+ ARK_PKTGEN_ARG "=<filename> "
+ ARK_PKTCHKR_ARG "=<filename> "
+ ARK_PKTDIR_ARG "=<bitmap>");
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c
new file mode 100644
index 00000000..16f0d11e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c
@@ -0,0 +1,643 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+
+#include "ark_ethdev_rx.h"
+#include "ark_global.h"
+#include "ark_logs.h"
+#include "ark_mpu.h"
+#include "ark_udm.h"
+
+#define ARK_RX_META_SIZE 32
+#define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
+#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+
+/* Forward declarations */
+struct ark_rx_queue;
+struct ark_rx_meta;
+
+static void dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi);
+static void ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue);
+static uint32_t eth_ark_rx_jumbo(struct ark_rx_queue *queue,
+ struct ark_rx_meta *meta,
+ struct rte_mbuf *mbuf0,
+ uint32_t cons_index);
+static inline int eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue);
+
+/* ************************************************************************* */
+struct ark_rx_queue {
+ /* array of mbufs to populate */
+ struct rte_mbuf **reserve_q;
+ /* array of physical addresses of the mbuf data pointer */
+ /* This point is a virtual address */
+ rte_iova_t *paddress_q;
+ struct rte_mempool *mb_pool;
+
+ struct ark_udm_t *udm;
+ struct ark_mpu_t *mpu;
+
+ uint32_t queue_size;
+ uint32_t queue_mask;
+
+ uint32_t seed_index; /* step 1 set with empty mbuf */
+ uint32_t cons_index; /* step 3 consumed by driver */
+
+ /* The queue Id is used to identify the HW Q */
+ uint16_t phys_qid;
+
+ /* The queue Index is used within the dpdk device structures */
+ uint16_t queue_index;
+
+ uint32_t pad1;
+
+ /* separate cache line */
+ /* second cache line - fields only used in slow path */
+ MARKER cacheline1 __rte_cache_min_aligned;
+
+ volatile uint32_t prod_index; /* step 2 filled by FPGA */
+} __rte_cache_aligned;
+
+
+/* ************************************************************************* */
+static int
+eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
+ struct ark_rx_queue *queue,
+ uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx)
+{
+ rte_iova_t queue_base;
+ rte_iova_t phys_addr_q_base;
+ rte_iova_t phys_addr_prod_index;
+
+ queue_base = rte_malloc_virt2iova(queue);
+ phys_addr_prod_index = queue_base +
+ offsetof(struct ark_rx_queue, prod_index);
+
+ phys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q);
+
+ /* Verify HW */
+ if (ark_mpu_verify(queue->mpu, sizeof(rte_iova_t))) {
+ PMD_DRV_LOG(ERR, "Illegal configuration rx queue\n");
+ return -1;
+ }
+
+ /* Stop and Reset and configure MPU */
+ ark_mpu_configure(queue->mpu, phys_addr_q_base, queue->queue_size, 0);
+
+ ark_udm_write_addr(queue->udm, phys_addr_prod_index);
+
+ /* advance the valid pointer, but don't start until the queue starts */
+ ark_mpu_reset_stats(queue->mpu);
+
+ /* The seed is the producer index for the HW */
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+ dev->data->rx_queue_state[rx_queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static inline void
+eth_ark_rx_update_cons_index(struct ark_rx_queue *queue, uint32_t cons_index)
+{
+ queue->cons_index = cons_index;
+ eth_ark_rx_seed_mbufs(queue);
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+}
+
+/* ************************************************************************* */
+int
+eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ static int warning1; /* = 0 */
+ struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
+
+ struct ark_rx_queue *queue;
+ uint32_t i;
+ int status;
+
+ /* Future works: divide the Q's evenly with multi-ports */
+ int port = dev->data->port_id;
+ int qidx = port + queue_idx;
+
+ /* We may already be setup, free memory prior to re-allocation */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ eth_ark_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ if (rx_conf != NULL && warning1 == 0) {
+ warning1 = 1;
+ PMD_DRV_LOG(INFO,
+ "Arkville ignores rte_eth_rxconf argument.\n");
+ }
+
+ if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "Error: DPDK Arkville requires head room > %d bytes (%s)\n",
+ ARK_RX_META_SIZE, __func__);
+ return -1; /* ERROR CODE */
+ }
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_DRV_LOG(ERR,
+ "DPDK Arkville configuration queue size must be power of two %u (%s)\n",
+ nb_desc, __func__);
+ return -1; /* ERROR CODE */
+ }
+
+ /* Allocate queue struct */
+ queue = rte_zmalloc_socket("Ark_rxqueue",
+ sizeof(struct ark_rx_queue),
+ 64,
+ socket_id);
+ if (queue == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory in %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* NOTE zmalloc is used, no need to 0 indexes, etc. */
+ queue->mb_pool = mb_pool;
+ queue->phys_qid = qidx;
+ queue->queue_index = queue_idx;
+ queue->queue_size = nb_desc;
+ queue->queue_mask = nb_desc - 1;
+
+ queue->reserve_q =
+ rte_zmalloc_socket("Ark_rx_queue mbuf",
+ nb_desc * sizeof(struct rte_mbuf *),
+ 64,
+ socket_id);
+ queue->paddress_q =
+ rte_zmalloc_socket("Ark_rx_queue paddr",
+ nb_desc * sizeof(rte_iova_t),
+ 64,
+ socket_id);
+
+ if (queue->reserve_q == 0 || queue->paddress_q == 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate queue memory in %s\n",
+ __func__);
+ rte_free(queue->reserve_q);
+ rte_free(queue->paddress_q);
+ rte_free(queue);
+ return -ENOMEM;
+ }
+
+ dev->data->rx_queues[queue_idx] = queue;
+ queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET);
+ queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET);
+
+ /* populate mbuf reserve */
+ status = eth_ark_rx_seed_mbufs(queue);
+
+ /* MPU Setup */
+ if (status == 0)
+ status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx);
+
+ if (unlikely(status != 0)) {
+ struct rte_mbuf *mbuf;
+
+ PMD_DRV_LOG(ERR, "Failed to initialize RX queue %d %s\n",
+ qidx,
+ __func__);
+ /* Free the mbufs allocated */
+ for (i = 0, mbuf = queue->reserve_q[0];
+ i < nb_desc; ++i, mbuf++) {
+ rte_pktmbuf_free(mbuf);
+ }
+ rte_free(queue->reserve_q);
+ rte_free(queue->paddress_q);
+ rte_free(queue);
+ return -1; /* ERROR CODE */
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_recv_pkts_noop(void *rx_queue __rte_unused,
+ struct rte_mbuf **rx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ark_rx_queue *queue;
+ register uint32_t cons_index, prod_index;
+ uint16_t nb;
+ struct rte_mbuf *mbuf;
+ struct ark_rx_meta *meta;
+
+ queue = (struct ark_rx_queue *)rx_queue;
+ if (unlikely(queue == 0))
+ return 0;
+ if (unlikely(nb_pkts == 0))
+ return 0;
+ prod_index = queue->prod_index;
+ cons_index = queue->cons_index;
+ nb = 0;
+
+ while (prod_index != cons_index) {
+ mbuf = queue->reserve_q[cons_index & queue->queue_mask];
+ /* prefetch mbuf */
+ rte_mbuf_prefetch_part1(mbuf);
+ rte_mbuf_prefetch_part2(mbuf);
+
+ /* META DATA embedded in headroom */
+ meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
+
+ mbuf->port = meta->port;
+ mbuf->pkt_len = meta->pkt_len;
+ mbuf->data_len = meta->pkt_len;
+ mbuf->timestamp = meta->timestamp;
+ mbuf->udata64 = meta->user_data;
+
+ if (ARK_RX_DEBUG) { /* debug sanity checks */
+ if ((meta->pkt_len > (1024 * 16)) ||
+ (meta->pkt_len == 0)) {
+ PMD_RX_LOG(DEBUG, "RX: Bad Meta Q: %u"
+ " cons: %" PRIU32
+ " prod: %" PRIU32
+ " seed_index %" PRIU32
+ "\n",
+ queue->phys_qid,
+ cons_index,
+ queue->prod_index,
+ queue->seed_index);
+
+
+ PMD_RX_LOG(DEBUG, " : UDM"
+ " prod: %" PRIU32
+ " len: %u\n",
+ queue->udm->rt_cfg.prod_idx,
+ meta->pkt_len);
+ ark_mpu_dump(queue->mpu,
+ " ",
+ queue->phys_qid);
+ dump_mbuf_data(mbuf, 0, 256);
+ /* its FUBAR so fix it */
+ mbuf->pkt_len = 63;
+ meta->pkt_len = 63;
+ }
+ /* seqn is only set under debug */
+ mbuf->seqn = cons_index;
+ }
+
+ if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN))
+ cons_index = eth_ark_rx_jumbo
+ (queue, meta, mbuf, cons_index + 1);
+ else
+ cons_index += 1;
+
+ rx_pkts[nb] = mbuf;
+ nb++;
+ if (nb >= nb_pkts)
+ break;
+ }
+
+ if (unlikely(nb != 0))
+ /* report next free to FPGA */
+ eth_ark_rx_update_cons_index(queue, cons_index);
+
+ return nb;
+}
+
+/* ************************************************************************* */
+static uint32_t
+eth_ark_rx_jumbo(struct ark_rx_queue *queue,
+ struct ark_rx_meta *meta,
+ struct rte_mbuf *mbuf0,
+ uint32_t cons_index)
+{
+ struct rte_mbuf *mbuf_prev;
+ struct rte_mbuf *mbuf;
+
+ uint16_t remaining;
+ uint16_t data_len;
+ uint16_t segments;
+
+ /* first buf populated by called */
+ mbuf_prev = mbuf0;
+ segments = 1;
+ data_len = RTE_MIN(meta->pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
+ remaining = meta->pkt_len - data_len;
+ mbuf0->data_len = data_len;
+
+ /* HW guarantees that the data does not exceed prod_index! */
+ while (remaining != 0) {
+ data_len = RTE_MIN(remaining,
+ RTE_MBUF_DEFAULT_DATAROOM +
+ RTE_PKTMBUF_HEADROOM);
+
+ remaining -= data_len;
+ segments += 1;
+
+ mbuf = queue->reserve_q[cons_index & queue->queue_mask];
+ mbuf_prev->next = mbuf;
+ mbuf_prev = mbuf;
+ mbuf->data_len = data_len;
+ mbuf->data_off = 0;
+ if (ARK_RX_DEBUG)
+ mbuf->seqn = cons_index; /* for debug only */
+
+ cons_index += 1;
+ }
+
+ mbuf0->nb_segs = segments;
+ return cons_index;
+}
+
+/* Drain the internal queue allowing hw to clear out. */
+static void
+eth_ark_rx_queue_drain(struct ark_rx_queue *queue)
+{
+ register uint32_t cons_index;
+ struct rte_mbuf *mbuf;
+
+ cons_index = queue->cons_index;
+
+ /* NOT performance optimized, since this is a one-shot call */
+ while ((cons_index ^ queue->prod_index) & queue->queue_mask) {
+ mbuf = queue->reserve_q[cons_index & queue->queue_mask];
+ rte_pktmbuf_free(mbuf);
+ cons_index++;
+ eth_ark_rx_update_cons_index(queue, cons_index);
+ }
+}
+
+uint32_t
+eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ return (queue->prod_index - queue->cons_index); /* mod arith */
+}
+
+/* ************************************************************************* */
+int
+eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ if (queue == 0)
+ return -1;
+
+ dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+ ark_mpu_start(queue->mpu);
+
+ ark_udm_queue_enable(queue->udm, 1);
+
+ return 0;
+}
+
+/* ************************************************************************* */
+
+/* Queue can be restarted. data remains
+ */
+int
+eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ if (queue == 0)
+ return -1;
+
+ ark_udm_queue_enable(queue->udm, 0);
+
+ dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static inline int
+eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
+{
+ uint32_t limit = queue->cons_index + queue->queue_size;
+ uint32_t seed_index = queue->seed_index;
+
+ uint32_t count = 0;
+ uint32_t seed_m = queue->seed_index & queue->queue_mask;
+
+ uint32_t nb = limit - seed_index;
+
+ /* Handle wrap around -- remainder is filled on the next call */
+ if (unlikely(seed_m + nb > queue->queue_size))
+ nb = queue->queue_size - seed_m;
+
+ struct rte_mbuf **mbufs = &queue->reserve_q[seed_m];
+ int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
+
+ if (unlikely(status != 0))
+ return -1;
+
+ if (ARK_RX_DEBUG) { /* DEBUG */
+ while (count != nb) {
+ struct rte_mbuf *mbuf_init =
+ queue->reserve_q[seed_m + count];
+
+ memset(mbuf_init->buf_addr, -1, 512);
+ *((uint32_t *)mbuf_init->buf_addr) =
+ seed_index + count;
+ *(uint16_t *)RTE_PTR_ADD(mbuf_init->buf_addr, 4) =
+ queue->phys_qid;
+ count++;
+ }
+ count = 0;
+ } /* DEBUG */
+ queue->seed_index += nb;
+
+ /* Duff's device https://en.wikipedia.org/wiki/Duff's_device */
+ switch (nb % 4) {
+ case 0:
+ while (count != nb) {
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_iova;
+ count++;
+ /* FALLTHROUGH */
+ case 3:
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_iova;
+ count++;
+ /* FALLTHROUGH */
+ case 2:
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_iova;
+ count++;
+ /* FALLTHROUGH */
+ case 1:
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_iova;
+ count++;
+ /* FALLTHROUGH */
+
+ } /* while (count != nb) */
+ } /* switch */
+
+ return 0;
+}
+
+void
+eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
+ const char *msg)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+
+ ark_ethdev_rx_dump(msg, queue);
+}
+
+/* ************************************************************************* */
+/* Call on device closed no user API, queue is stopped */
+void
+eth_ark_dev_rx_queue_release(void *vqueue)
+{
+ struct ark_rx_queue *queue;
+ uint32_t i;
+
+ queue = (struct ark_rx_queue *)vqueue;
+ if (queue == 0)
+ return;
+
+ ark_udm_queue_enable(queue->udm, 0);
+ /* Stop the MPU since pointer are going away */
+ ark_mpu_stop(queue->mpu);
+
+ /* Need to clear out mbufs here, dropping packets along the way */
+ eth_ark_rx_queue_drain(queue);
+
+ for (i = 0; i < queue->queue_size; ++i)
+ rte_pktmbuf_free(queue->reserve_q[i]);
+
+ rte_free(queue->reserve_q);
+ rte_free(queue->paddress_q);
+ rte_free(queue);
+}
+
+void
+eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
+{
+ struct ark_rx_queue *queue;
+ struct ark_udm_t *udm;
+
+ queue = vqueue;
+ if (queue == 0)
+ return;
+ udm = queue->udm;
+
+ uint64_t ibytes = ark_udm_bytes(udm);
+ uint64_t ipackets = ark_udm_packets(udm);
+ uint64_t idropped = ark_udm_dropped(queue->udm);
+
+ stats->q_ipackets[queue->queue_index] = ipackets;
+ stats->q_ibytes[queue->queue_index] = ibytes;
+ stats->q_errors[queue->queue_index] = idropped;
+ stats->ipackets += ipackets;
+ stats->ibytes += ibytes;
+ stats->imissed += idropped;
+}
+
+void
+eth_rx_queue_stats_reset(void *vqueue)
+{
+ struct ark_rx_queue *queue;
+
+ queue = vqueue;
+ if (queue == 0)
+ return;
+
+ ark_mpu_reset_stats(queue->mpu);
+ ark_udm_queue_stats_reset(queue->udm);
+}
+
+void
+eth_ark_udm_force_close(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
+ struct ark_rx_queue *queue;
+ uint32_t index;
+ uint16_t i;
+
+ if (!ark_udm_is_flushed(ark->udm.v)) {
+ /* restart the MPUs */
+ PMD_DRV_LOG(ERR, "ARK: %s UDM not flushed\n", __func__);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ queue = (struct ark_rx_queue *)dev->data->rx_queues[i];
+ if (queue == 0)
+ continue;
+
+ ark_mpu_start(queue->mpu);
+ /* Add some buffers */
+ index = 100000 + queue->seed_index;
+ ark_mpu_set_producer(queue->mpu, index);
+ }
+ /* Wait to allow data to pass */
+ usleep(100);
+
+ PMD_DEBUG_LOG(DEBUG, "UDM forced flush attempt, stopped = %d\n",
+ ark_udm_is_flushed(ark->udm.v));
+ }
+ ark_udm_reset(ark->udm.v);
+}
+
+static void
+ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue)
+{
+ if (queue == NULL)
+ return;
+ PMD_DEBUG_LOG(DEBUG, "RX QUEUE %d -- %s", queue->phys_qid, name);
+ PMD_DEBUG_LOG(DEBUG, ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
+ "queue_size", queue->queue_size,
+ "seed_index", queue->seed_index,
+ "prod_index", queue->prod_index,
+ "cons_index", queue->cons_index);
+
+ ark_mpu_dump(queue->mpu, name, queue->phys_qid);
+ ark_mpu_dump_setup(queue->mpu, queue->phys_qid);
+ ark_udm_dump(queue->udm, name);
+ ark_udm_dump_setup(queue->udm, queue->phys_qid);
+}
+
+/* Only used in debug.
+ * This function is a raw memory dump of a portion of an mbuf's memory
+ * region. The usual function, rte_pktmbuf_dump() only shows data
+ * with respect to the data_off field. This function show data
+ * anywhere in the mbuf's buffer. This is useful for examining
+ * data in the headroom or tailroom portion of an mbuf.
+ */
+static void
+dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi)
+{
+ uint16_t i, j;
+
+ PMD_DRV_LOG(INFO, " MBUF: %p len %d, off: %d, seq: %" PRIU32 "\n", mbuf,
+ mbuf->pkt_len, mbuf->data_off, mbuf->seqn);
+ for (i = lo; i < hi; i += 16) {
+ uint8_t *dp = RTE_PTR_ADD(mbuf->buf_addr, i);
+
+ PMD_DRV_LOG(INFO, " %6d: ", i);
+ for (j = 0; j < 16; j++)
+ PMD_DRV_LOG(INFO, " %02x", dp[j]);
+
+ PMD_DRV_LOG(INFO, "\n");
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h
new file mode 100644
index 00000000..0fdd29b1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_ETHDEV_RX_H_
+#define _ARK_ETHDEV_RX_H_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_ethdev_driver.h>
+
+
+int eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+uint32_t eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id);
+int eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id);
+uint16_t eth_ark_recv_pkts_noop(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t eth_ark_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+void eth_ark_dev_rx_queue_release(void *rx_queue);
+void eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats);
+void eth_rx_queue_stats_reset(void *vqueue);
+void eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
+ const char *msg);
+void eth_ark_udm_force_close(struct rte_eth_dev *dev);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c
new file mode 100644
index 00000000..57188c24
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c
@@ -0,0 +1,438 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+
+#include "ark_ethdev_tx.h"
+#include "ark_global.h"
+#include "ark_mpu.h"
+#include "ark_ddm.h"
+#include "ark_logs.h"
+
+#define ARK_TX_META_SIZE 32
+#define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE)
+#define ARK_TX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+
+
+/* ************************************************************************* */
+struct ark_tx_queue {
+ struct ark_tx_meta *meta_q;
+ struct rte_mbuf **bufs;
+
+ /* handles for hw objects */
+ struct ark_mpu_t *mpu;
+ struct ark_ddm_t *ddm;
+
+ /* Stats HW tracks bytes and packets, need to count send errors */
+ uint64_t tx_errors;
+
+ uint32_t queue_size;
+ uint32_t queue_mask;
+
+ /* 3 indexes to the paired data rings. */
+ uint32_t prod_index; /* where to put the next one */
+ uint32_t free_index; /* mbuf has been freed */
+
+ /* The queue Id is used to identify the HW Q */
+ uint16_t phys_qid;
+ /* The queue Index within the dpdk device structures */
+ uint16_t queue_index;
+
+ uint32_t pad[1];
+
+ /* second cache line - fields only used in slow path */
+ MARKER cacheline1 __rte_cache_min_aligned;
+ uint32_t cons_index; /* hw is done, can be freed */
+} __rte_cache_aligned;
+
+/* Forward declarations */
+static uint32_t eth_ark_tx_jumbo(struct ark_tx_queue *queue,
+ struct rte_mbuf *mbuf);
+static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue);
+static void free_completed_tx(struct ark_tx_queue *queue);
+
+static inline void
+ark_tx_hw_queue_stop(struct ark_tx_queue *queue)
+{
+ ark_mpu_stop(queue->mpu);
+}
+
+/* ************************************************************************* */
+static inline void
+eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta,
+ const struct rte_mbuf *mbuf,
+ uint8_t flags)
+{
+ meta->physaddr = rte_mbuf_data_iova(mbuf);
+ meta->delta_ns = 0;
+ meta->data_len = rte_pktmbuf_data_len(mbuf);
+ meta->flags = flags;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_xmit_pkts_noop(void *vtxq __rte_unused,
+ struct rte_mbuf **tx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct ark_tx_queue *queue;
+ struct rte_mbuf *mbuf;
+ struct ark_tx_meta *meta;
+
+ uint32_t idx;
+ uint32_t prod_index_limit;
+ int stat;
+ uint16_t nb;
+
+ queue = (struct ark_tx_queue *)vtxq;
+
+ /* free any packets after the HW is done with them */
+ free_completed_tx(queue);
+
+ prod_index_limit = queue->queue_size + queue->free_index;
+
+ for (nb = 0;
+ (nb < nb_pkts) && (queue->prod_index != prod_index_limit);
+ ++nb) {
+ mbuf = tx_pkts[nb];
+
+ if (ARK_TX_PAD_TO_60) {
+ if (unlikely(rte_pktmbuf_pkt_len(mbuf) < 60)) {
+ /* this packet even if it is small can be split,
+ * be sure to add to the end mbuf
+ */
+ uint16_t to_add =
+ 60 - rte_pktmbuf_pkt_len(mbuf);
+ char *appended =
+ rte_pktmbuf_append(mbuf, to_add);
+
+ if (appended == 0) {
+ /* This packet is in error,
+ * we cannot send it so just
+ * count it and delete it.
+ */
+ queue->tx_errors += 1;
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+ memset(appended, 0, to_add);
+ }
+ }
+
+ if (unlikely(mbuf->nb_segs != 1)) {
+ stat = eth_ark_tx_jumbo(queue, mbuf);
+ if (unlikely(stat != 0))
+ break; /* Queue is full */
+ } else {
+ idx = queue->prod_index & queue->queue_mask;
+ queue->bufs[idx] = mbuf;
+ meta = &queue->meta_q[idx];
+ eth_ark_tx_meta_from_mbuf(meta,
+ mbuf,
+ ARK_DDM_SOP |
+ ARK_DDM_EOP);
+ queue->prod_index++;
+ }
+ }
+
+ if (ARK_TX_DEBUG && (nb != nb_pkts)) {
+ PMD_TX_LOG(DEBUG, "TX: Failure to send:"
+ " req: %" PRIU32
+ " sent: %" PRIU32
+ " prod: %" PRIU32
+ " cons: %" PRIU32
+ " free: %" PRIU32 "\n",
+ nb_pkts, nb,
+ queue->prod_index,
+ queue->cons_index,
+ queue->free_index);
+ ark_mpu_dump(queue->mpu,
+ "TX Failure MPU: ",
+ queue->phys_qid);
+ }
+
+ /* let FPGA know producer index. */
+ if (likely(nb != 0))
+ ark_mpu_set_producer(queue->mpu, queue->prod_index);
+
+ return nb;
+}
+
+/* ************************************************************************* */
+static uint32_t
+eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf)
+{
+ struct rte_mbuf *next;
+ struct ark_tx_meta *meta;
+ uint32_t free_queue_space;
+ uint32_t idx;
+ uint8_t flags = ARK_DDM_SOP;
+
+ free_queue_space = queue->queue_mask -
+ (queue->prod_index - queue->free_index);
+ if (unlikely(free_queue_space < mbuf->nb_segs))
+ return -1;
+
+ while (mbuf != NULL) {
+ next = mbuf->next;
+
+ idx = queue->prod_index & queue->queue_mask;
+ queue->bufs[idx] = mbuf;
+ meta = &queue->meta_q[idx];
+
+ flags |= (next == NULL) ? ARK_DDM_EOP : 0;
+ eth_ark_tx_meta_from_mbuf(meta, mbuf, flags);
+ queue->prod_index++;
+
+ flags &= ~ARK_DDM_SOP; /* drop SOP flags */
+ mbuf = next;
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+int
+eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
+ struct ark_tx_queue *queue;
+ int status;
+
+ /* Future: divide the Q's evenly with multi-ports */
+ int port = dev->data->port_id;
+ int qidx = port + queue_idx;
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_DRV_LOG(ERR,
+ "DPDK Arkville configuration queue size"
+ " must be power of two %u (%s)\n",
+ nb_desc, __func__);
+ return -1;
+ }
+
+ /* Allocate queue struct */
+ queue = rte_zmalloc_socket("Ark_txqueue",
+ sizeof(struct ark_tx_queue),
+ 64,
+ socket_id);
+ if (queue == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate tx "
+ "queue memory in %s\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* we use zmalloc no need to initialize fields */
+ queue->queue_size = nb_desc;
+ queue->queue_mask = nb_desc - 1;
+ queue->phys_qid = qidx;
+ queue->queue_index = queue_idx;
+ dev->data->tx_queues[queue_idx] = queue;
+
+ queue->meta_q =
+ rte_zmalloc_socket("Ark_txqueue meta",
+ nb_desc * sizeof(struct ark_tx_meta),
+ 64,
+ socket_id);
+ queue->bufs =
+ rte_zmalloc_socket("Ark_txqueue bufs",
+ nb_desc * sizeof(struct rte_mbuf *),
+ 64,
+ socket_id);
+
+ if (queue->meta_q == 0 || queue->bufs == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate "
+ "queue memory in %s\n", __func__);
+ rte_free(queue->meta_q);
+ rte_free(queue->bufs);
+ rte_free(queue);
+ return -ENOMEM;
+ }
+
+ queue->ddm = RTE_PTR_ADD(ark->ddm.v, qidx * ARK_DDM_QOFFSET);
+ queue->mpu = RTE_PTR_ADD(ark->mputx.v, qidx * ARK_MPU_QOFFSET);
+
+ status = eth_ark_tx_hw_queue_config(queue);
+
+ if (unlikely(status != 0)) {
+ rte_free(queue->meta_q);
+ rte_free(queue->bufs);
+ rte_free(queue);
+ return -1; /* ERROR CODE */
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static int
+eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
+{
+ rte_iova_t queue_base, ring_base, cons_index_addr;
+ uint32_t write_interval_ns;
+
+ /* Verify HW -- MPU */
+ if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta)))
+ return -1;
+
+ queue_base = rte_malloc_virt2iova(queue);
+ ring_base = rte_malloc_virt2iova(queue->meta_q);
+ cons_index_addr =
+ queue_base + offsetof(struct ark_tx_queue, cons_index);
+
+ ark_mpu_stop(queue->mpu);
+ ark_mpu_reset(queue->mpu);
+
+ /* Stop and Reset and configure MPU */
+ ark_mpu_configure(queue->mpu, ring_base, queue->queue_size, 1);
+
+ /*
+ * Adjust the write interval based on queue size --
+ * increase pcie traffic when low mbuf count
+ * Queue sizes less than 128 are not allowed
+ */
+ switch (queue->queue_size) {
+ case 128:
+ write_interval_ns = 500;
+ break;
+ case 256:
+ write_interval_ns = 500;
+ break;
+ case 512:
+ write_interval_ns = 1000;
+ break;
+ default:
+ write_interval_ns = 2000;
+ break;
+ }
+
+ /* Completion address in UDM */
+ ark_ddm_setup(queue->ddm, cons_index_addr, write_interval_ns);
+
+ return 0;
+}
+
+/* ************************************************************************* */
+void
+eth_ark_tx_queue_release(void *vtx_queue)
+{
+ struct ark_tx_queue *queue;
+
+ queue = (struct ark_tx_queue *)vtx_queue;
+
+ ark_tx_hw_queue_stop(queue);
+
+ queue->cons_index = queue->prod_index;
+ free_completed_tx(queue);
+
+ rte_free(queue->meta_q);
+ rte_free(queue->bufs);
+ rte_free(queue);
+}
+
+/* ************************************************************************* */
+int
+eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_tx_queue *queue;
+ int cnt = 0;
+
+ queue = dev->data->tx_queues[queue_id];
+
+ /* Wait for DDM to send out all packets. */
+ while (queue->cons_index != queue->prod_index) {
+ usleep(100);
+ if (cnt++ > 10000)
+ return -1;
+ }
+
+ ark_mpu_stop(queue->mpu);
+ free_completed_tx(queue);
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_tx_queue *queue;
+
+ queue = dev->data->tx_queues[queue_id];
+ if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ ark_mpu_start(queue->mpu);
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static void
+free_completed_tx(struct ark_tx_queue *queue)
+{
+ struct rte_mbuf *mbuf;
+ struct ark_tx_meta *meta;
+ uint32_t top_index;
+
+ top_index = queue->cons_index; /* read once */
+ while (queue->free_index != top_index) {
+ meta = &queue->meta_q[queue->free_index & queue->queue_mask];
+ mbuf = queue->bufs[queue->free_index & queue->queue_mask];
+
+ if (likely((meta->flags & ARK_DDM_SOP) != 0)) {
+ /* ref count of the mbuf is checked in this call. */
+ rte_pktmbuf_free(mbuf);
+ }
+ queue->free_index++;
+ }
+}
+
+/* ************************************************************************* */
+void
+eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
+{
+ struct ark_tx_queue *queue;
+ struct ark_ddm_t *ddm;
+ uint64_t bytes, pkts;
+
+ queue = vqueue;
+ ddm = queue->ddm;
+
+ bytes = ark_ddm_queue_byte_count(ddm);
+ pkts = ark_ddm_queue_pkt_count(ddm);
+
+ stats->q_opackets[queue->queue_index] = pkts;
+ stats->q_obytes[queue->queue_index] = bytes;
+ stats->opackets += pkts;
+ stats->obytes += bytes;
+ stats->oerrors += queue->tx_errors;
+}
+
+void
+eth_tx_queue_stats_reset(void *vqueue)
+{
+ struct ark_tx_queue *queue;
+ struct ark_ddm_t *ddm;
+
+ queue = vqueue;
+ ddm = queue->ddm;
+
+ ark_ddm_queue_reset_stats(ddm);
+ queue->tx_errors = 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h
new file mode 100644
index 00000000..e448ce22
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_ETHDEV_TX_H_
+#define _ARK_ETHDEV_TX_H_
+
+#include <stdint.h>
+
+#include <rte_ethdev_driver.h>
+
+
+uint16_t eth_ark_xmit_pkts_noop(void *vtxq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t eth_ark_xmit_pkts(void *vtxq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void eth_ark_tx_queue_release(void *vtx_queue);
+int eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
+int eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
+void eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats);
+void eth_tx_queue_stats_reset(void *vqueue);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ext.h b/src/spdk/dpdk/drivers/net/ark/ark_ext.h
new file mode 100644
index 00000000..f5af2153
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ext.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_EXT_H_
+#define _ARK_EXT_H_
+
+#include <rte_ethdev_driver.h>
+
+/*
+ * This is the template file for users who which to define a dynamic
+ * extension to the Arkville PMD. User's who create an extension
+ * should include this file and define the necessary and desired
+ * functions.
+ * Only 1 function is required for an extension, dev_init(); all other
+ * functions prototyped in this file are optional.
+ */
+
+/*
+ * Called post PMD init.
+ * The implementation returns its private data that gets passed into
+ * all other functions as user_data
+ * The ARK extension implementation MUST implement this function
+ */
+void *dev_init(struct rte_eth_dev *dev, void *a_bar, int port_id);
+
+/* Called during device shutdown */
+void dev_uninit(struct rte_eth_dev *dev, void *user_data);
+
+/* This call is optional and allows the
+ * extension to specify the number of supported ports.
+ */
+uint8_t dev_get_port_count(struct rte_eth_dev *dev,
+ void *user_data);
+
+/*
+ * The following functions are optional and are directly mapped
+ * from the DPDK PMD ops structure.
+ * Each function if implemented is called after the ARK PMD
+ * implementation executes.
+ */
+
+int dev_configure(struct rte_eth_dev *dev,
+ void *user_data);
+
+int dev_start(struct rte_eth_dev *dev,
+ void *user_data);
+
+void dev_stop(struct rte_eth_dev *dev,
+ void *user_data);
+
+void dev_close(struct rte_eth_dev *dev,
+ void *user_data);
+
+int link_update(struct rte_eth_dev *dev,
+ int wait_to_complete,
+ void *user_data);
+
+int dev_set_link_up(struct rte_eth_dev *dev,
+ void *user_data);
+
+int dev_set_link_down(struct rte_eth_dev *dev,
+ void *user_data);
+
+int stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats,
+ void *user_data);
+
+void stats_reset(struct rte_eth_dev *dev,
+ void *user_data);
+
+void mac_addr_add(struct rte_eth_dev *dev,
+ struct ether_addr *macadr,
+ uint32_t index,
+ uint32_t pool,
+ void *user_data);
+
+void mac_addr_remove(struct rte_eth_dev *dev,
+ uint32_t index,
+ void *user_data);
+
+void mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ void *user_data);
+
+int set_mtu(struct rte_eth_dev *dev,
+ uint16_t size,
+ void *user_data);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_global.h b/src/spdk/dpdk/drivers/net/ark/ark_global.h
new file mode 100644
index 00000000..f820091d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_global.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_GLOBAL_H_
+#define _ARK_GLOBAL_H_
+
+#include <time.h>
+#include <assert.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_version.h>
+
+#include "ark_pktdir.h"
+#include "ark_pktgen.h"
+#include "ark_pktchkr.h"
+
+#define ETH_ARK_ARG_MAXLEN 64
+#define ARK_SYSCTRL_BASE 0x0
+#define ARK_PKTGEN_BASE 0x10000
+#define ARK_MPU_RX_BASE 0x20000
+#define ARK_UDM_BASE 0x30000
+#define ARK_MPU_TX_BASE 0x40000
+#define ARK_DDM_BASE 0x60000
+#define ARK_CMAC_BASE 0x80000
+#define ARK_PKTDIR_BASE 0xa0000
+#define ARK_PKTCHKR_BASE 0x90000
+#define ARK_RCPACING_BASE 0xb0000
+#define ARK_EXTERNAL_BASE 0x100000
+#define ARK_MPU_QOFFSET 0x00100
+#define ARK_MAX_PORTS RTE_MAX_ETHPORTS
+
+#define offset8(n) n
+#define offset16(n) ((n) / 2)
+#define offset32(n) ((n) / 4)
+#define offset64(n) ((n) / 8)
+
+/* Maximum length of arg list in bytes */
+#define ARK_MAX_ARG_LEN 256
+
+/*
+ * Structure to store private data for each PF/VF instance.
+ */
+#define def_ptr(type, name) \
+ union type { \
+ uint64_t *t64; \
+ uint32_t *t32; \
+ uint16_t *t16; \
+ uint8_t *t8; \
+ void *v; \
+ } name
+
+struct ark_user_ext {
+ void *(*dev_init)(struct rte_eth_dev *, void *abar, int port_id);
+ void (*dev_uninit)(struct rte_eth_dev *, void *);
+ int (*dev_get_port_count)(struct rte_eth_dev *, void *);
+ int (*dev_configure)(struct rte_eth_dev *, void *);
+ int (*dev_start)(struct rte_eth_dev *, void *);
+ void (*dev_stop)(struct rte_eth_dev *, void *);
+ void (*dev_close)(struct rte_eth_dev *, void *);
+ int (*link_update)(struct rte_eth_dev *, int wait_to_complete, void *);
+ int (*dev_set_link_up)(struct rte_eth_dev *, void *);
+ int (*dev_set_link_down)(struct rte_eth_dev *, void *);
+ int (*stats_get)(struct rte_eth_dev *, struct rte_eth_stats *, void *);
+ void (*stats_reset)(struct rte_eth_dev *, void *);
+ void (*mac_addr_add)(struct rte_eth_dev *,
+ struct ether_addr *,
+ uint32_t,
+ uint32_t,
+ void *);
+ void (*mac_addr_remove)(struct rte_eth_dev *, uint32_t, void *);
+ void (*mac_addr_set)(struct rte_eth_dev *, struct ether_addr *, void *);
+ int (*set_mtu)(struct rte_eth_dev *, uint16_t, void *);
+};
+
+struct ark_adapter {
+ /* User extension private data */
+ void *user_data[ARK_MAX_PORTS];
+
+ /* Pointers to packet generator and checker */
+ int start_pg;
+ ark_pkt_gen_t pg;
+ ark_pkt_chkr_t pc;
+ ark_pkt_dir_t pd;
+
+ int num_ports;
+
+ /* Packet generator/checker args */
+ char pkt_gen_args[ARK_MAX_ARG_LEN];
+ char pkt_chkr_args[ARK_MAX_ARG_LEN];
+ uint32_t pkt_dir_v;
+
+ /* eth device */
+ struct rte_eth_dev *eth_dev;
+
+ void *d_handle;
+ struct ark_user_ext user_ext;
+
+ /* Our Bar 0 */
+ uint8_t *bar0;
+
+ /* Application Bar */
+ uint8_t *a_bar;
+
+ /* Arkville demo block offsets */
+ def_ptr(sys_ctrl, sysctrl);
+ def_ptr(pkt_gen, pktgen);
+ def_ptr(mpu_rx, mpurx);
+ def_ptr(UDM, udm);
+ def_ptr(mpu_tx, mputx);
+ def_ptr(DDM, ddm);
+ def_ptr(CMAC, cmac);
+ def_ptr(external, external);
+ def_ptr(pkt_dir, pktdir);
+ def_ptr(pkt_chkr, pktchkr);
+
+ int started;
+ uint16_t rx_queues;
+ uint16_t tx_queues;
+
+ struct ark_rqpace_t *rqpacing;
+};
+
+typedef uint32_t *ark_t;
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_logs.h b/src/spdk/dpdk/drivers/net/ark/ark_logs.h
new file mode 100644
index 00000000..b90e9f0a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_logs.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_DEBUG_H_
+#define _ARK_DEBUG_H_
+
+#include <inttypes.h>
+#include <rte_log.h>
+
+
+/* Configuration option to pad TX packets to 60 bytes */
+#ifdef RTE_LIBRTE_ARK_PAD_TX
+#define ARK_TX_PAD_TO_60 1
+#else
+#define ARK_TX_PAD_TO_60 0
+#endif
+
+/* system camel case definition changed to upper case */
+#define PRIU32 PRIu32
+#define PRIU64 PRIu64
+
+/* Format specifiers for string data pairs */
+#define ARK_SU32 "\n\t%-20s %'20" PRIU32
+#define ARK_SU64 "\n\t%-20s %'20" PRIU64
+#define ARK_SU64X "\n\t%-20s %#20" PRIx64
+#define ARK_SPTR "\n\t%-20s %20p"
+
+
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, fmt, ## args)
+
+/* Conditional trace definitions */
+#define ARK_TRACE_ON(level, fmt, ...) \
+ RTE_LOG(level, PMD, fmt, ##__VA_ARGS__)
+
+/* This pattern allows compiler check arguments even if disabled */
+#define ARK_TRACE_OFF(level, fmt, ...) \
+ do {if (0) RTE_LOG(level, PMD, fmt, ##__VA_ARGS__); } \
+ while (0)
+
+
+/* tracing including the function name */
+#define ARK_FUNC_ON(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+
+/* tracing including the function name */
+#define ARK_FUNC_OFF(level, fmt, args...) \
+ do { if (0) RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args); } \
+ while (0)
+
+
+/* Debug macro for tracing full behavior, function tracing and messages*/
+#ifdef RTE_LIBRTE_ARK_DEBUG_TRACE
+#define PMD_FUNC_LOG(level, fmt, ...) ARK_FUNC_ON(level, fmt, ##__VA_ARGS__)
+#define PMD_DEBUG_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define PMD_FUNC_LOG(level, fmt, ...) ARK_FUNC_OFF(level, fmt, ##__VA_ARGS__)
+#define PMD_DEBUG_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+
+/* Debug macro for reporting FPGA statistics */
+#ifdef RTE_LIBRTE_ARK_DEBUG_STATS
+#define PMD_STATS_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define PMD_STATS_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+
+/* Debug macro for RX path */
+#ifdef RTE_LIBRTE_ARK_DEBUG_RX
+#define ARK_RX_DEBUG 1
+#define PMD_RX_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define ARK_RX_DEBUG 0
+#define PMD_RX_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+/* Debug macro for TX path */
+#ifdef RTE_LIBRTE_ARK_DEBUG_TX
+#define ARK_TX_DEBUG 1
+#define PMD_TX_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define ARK_TX_DEBUG 0
+#define PMD_TX_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_mpu.c b/src/spdk/dpdk/drivers/net/ark/ark_mpu.c
new file mode 100644
index 00000000..21f840f3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_mpu.c
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+
+#include "ark_logs.h"
+#include "ark_mpu.h"
+
+uint16_t
+ark_api_num_queues(struct ark_mpu_t *mpu)
+{
+ return mpu->hw.num_queues;
+}
+
+uint16_t
+ark_api_num_queues_per_port(struct ark_mpu_t *mpu, uint16_t ark_ports)
+{
+ return mpu->hw.num_queues / ark_ports;
+}
+
+int
+ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size)
+{
+ uint32_t version;
+
+ version = mpu->id.vernum & 0x0000fF00;
+ if ((mpu->id.idnum != 0x2055504d) ||
+ (mpu->hw.obj_size != obj_size) ||
+ (version != 0x00003100)) {
+ PMD_DRV_LOG(ERR,
+ " MPU module not found as expected %08x"
+ " \"%c%c%c%c %c%c%c%c\"\n",
+ mpu->id.idnum,
+ mpu->id.id[0], mpu->id.id[1],
+ mpu->id.id[2], mpu->id.id[3],
+ mpu->id.ver[0], mpu->id.ver[1],
+ mpu->id.ver[2], mpu->id.ver[3]);
+ PMD_DRV_LOG(ERR,
+ " MPU HW num_queues: %u hw_depth %u,"
+ " obj_size: %u, obj_per_mrr: %u"
+ " Expected size %u\n",
+ mpu->hw.num_queues,
+ mpu->hw.hw_depth,
+ mpu->hw.obj_size,
+ mpu->hw.obj_per_mrr,
+ obj_size);
+ return -1;
+ }
+ return 0;
+}
+
+void
+ark_mpu_stop(struct ark_mpu_t *mpu)
+{
+ mpu->cfg.command = MPU_CMD_STOP;
+}
+
+void
+ark_mpu_start(struct ark_mpu_t *mpu)
+{
+ mpu->cfg.command = MPU_CMD_RUN;
+}
+
+int
+ark_mpu_reset(struct ark_mpu_t *mpu)
+{
+ int cnt = 0;
+
+ mpu->cfg.command = MPU_CMD_RESET;
+
+ while (mpu->cfg.command != MPU_CMD_IDLE) {
+ if (cnt++ > 1000)
+ break;
+ usleep(10);
+ }
+ if (mpu->cfg.command != MPU_CMD_IDLE) {
+ mpu->cfg.command = MPU_CMD_FORCE_RESET;
+ usleep(10);
+ }
+ ark_mpu_reset_stats(mpu);
+ return mpu->cfg.command != MPU_CMD_IDLE;
+}
+
+void
+ark_mpu_reset_stats(struct ark_mpu_t *mpu)
+{
+ mpu->stats.pci_request = 1; /* reset stats */
+}
+
+int
+ark_mpu_configure(struct ark_mpu_t *mpu, rte_iova_t ring, uint32_t ring_size,
+ int is_tx)
+{
+ ark_mpu_reset(mpu);
+
+ if (!rte_is_power_of_2(ring_size)) {
+ PMD_DRV_LOG(ERR, "ARK: Invalid ring size for MPU %d\n",
+ ring_size);
+ return -1;
+ }
+
+ mpu->cfg.ring_base = ring;
+ mpu->cfg.ring_size = ring_size;
+ mpu->cfg.ring_mask = ring_size - 1;
+ mpu->cfg.min_host_move = is_tx ? 1 : mpu->hw.obj_per_mrr;
+ mpu->cfg.min_hw_move = mpu->hw.obj_per_mrr;
+ mpu->cfg.sw_prod_index = 0;
+ mpu->cfg.hw_cons_index = 0;
+ return 0;
+}
+
+void
+ark_mpu_dump(struct ark_mpu_t *mpu, const char *code, uint16_t qid)
+{
+ /* DUMP to see that we have started */
+ PMD_DEBUG_LOG(DEBUG, "MPU: %s Q: %3u sw_prod %u, hw_cons: %u\n",
+ code, qid,
+ mpu->cfg.sw_prod_index, mpu->cfg.hw_cons_index);
+ PMD_DEBUG_LOG(DEBUG, "MPU: %s state: %d count %d, reserved %d"
+ " data 0x%08x_%08x 0x%08x_%08x\n",
+ code,
+ mpu->debug.state, mpu->debug.count,
+ mpu->debug.reserved,
+ mpu->debug.peek[1],
+ mpu->debug.peek[0],
+ mpu->debug.peek[3],
+ mpu->debug.peek[2]
+ );
+ PMD_STATS_LOG(INFO, "MPU: %s Q: %3u"
+ ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64
+ ARK_SU64 ARK_SU64 ARK_SU64 "\n",
+ code, qid,
+ "PCI Request:", mpu->stats.pci_request,
+ "Queue_empty", mpu->stats.q_empty,
+ "Queue_q1", mpu->stats.q_q1,
+ "Queue_q2", mpu->stats.q_q2,
+ "Queue_q3", mpu->stats.q_q3,
+ "Queue_q4", mpu->stats.q_q4,
+ "Queue_full", mpu->stats.q_full
+ );
+}
+
+void
+ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t q_id)
+{
+ PMD_DEBUG_LOG(DEBUG, "MPU Setup Q: %u"
+ ARK_SU64X "\n",
+ q_id,
+ "ring_base", mpu->cfg.ring_base
+ );
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_mpu.h b/src/spdk/dpdk/drivers/net/ark/ark_mpu.h
new file mode 100644
index 00000000..92c3e67c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_mpu.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_MPU_H_
+#define _ARK_MPU_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* The MPU or Memory Prefetch Unit is an internal Arkville hardware
+ * module for moving data between host memory and the hardware FPGA.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * MPU hardware structures
+ * These are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+
+#define ARK_MPU_ID 0x00
+struct ark_mpu_id_t {
+ union {
+ char id[4];
+ uint32_t idnum;
+ };
+ union {
+ char ver[4];
+ uint32_t vernum;
+ };
+ uint32_t phys_id;
+ uint32_t mrr_code;
+};
+
+#define ARK_MPU_HW 0x010
+struct ark_mpu_hw_t {
+ uint16_t num_queues;
+ uint16_t reserved;
+ uint32_t hw_depth;
+ uint32_t obj_size;
+ uint32_t obj_per_mrr;
+};
+
+#define ARK_MPU_CFG 0x040
+struct ark_mpu_cfg_t {
+ rte_iova_t ring_base; /* rte_iova_t is a uint64_t */
+ uint32_t ring_size;
+ uint32_t ring_mask;
+ uint32_t min_host_move;
+ uint32_t min_hw_move;
+ volatile uint32_t sw_prod_index;
+ volatile uint32_t hw_cons_index;
+ volatile uint32_t command;
+};
+enum ARK_MPU_COMMAND {
+ MPU_CMD_IDLE = 1,
+ MPU_CMD_RUN = 2,
+ MPU_CMD_STOP = 4,
+ MPU_CMD_RESET = 8,
+ MPU_CMD_FORCE_RESET = 16,
+ MPU_COMMAND_LIMIT = 0xfFFFFFFF
+};
+
+#define ARK_MPU_STATS 0x080
+struct ark_mpu_stats_t {
+ volatile uint64_t pci_request;
+ volatile uint64_t q_empty;
+ volatile uint64_t q_q1;
+ volatile uint64_t q_q2;
+ volatile uint64_t q_q3;
+ volatile uint64_t q_q4;
+ volatile uint64_t q_full;
+};
+
+#define ARK_MPU_DEBUG 0x0C0
+struct ark_mpu_debug_t {
+ volatile uint32_t state;
+ uint32_t reserved;
+ volatile uint32_t count;
+ volatile uint32_t take;
+ volatile uint32_t peek[4];
+};
+
+/* Consolidated structure */
+struct ark_mpu_t {
+ struct ark_mpu_id_t id;
+ uint8_t reserved0[(ARK_MPU_HW - ARK_MPU_ID)
+ - sizeof(struct ark_mpu_id_t)];
+ struct ark_mpu_hw_t hw;
+ uint8_t reserved1[(ARK_MPU_CFG - ARK_MPU_HW) -
+ sizeof(struct ark_mpu_hw_t)];
+ struct ark_mpu_cfg_t cfg;
+ uint8_t reserved2[(ARK_MPU_STATS - ARK_MPU_CFG) -
+ sizeof(struct ark_mpu_cfg_t)];
+ struct ark_mpu_stats_t stats;
+ uint8_t reserved3[(ARK_MPU_DEBUG - ARK_MPU_STATS) -
+ sizeof(struct ark_mpu_stats_t)];
+ struct ark_mpu_debug_t debug;
+};
+
+uint16_t ark_api_num_queues(struct ark_mpu_t *mpu);
+uint16_t ark_api_num_queues_per_port(struct ark_mpu_t *mpu,
+ uint16_t ark_ports);
+int ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size);
+void ark_mpu_stop(struct ark_mpu_t *mpu);
+void ark_mpu_start(struct ark_mpu_t *mpu);
+int ark_mpu_reset(struct ark_mpu_t *mpu);
+int ark_mpu_configure(struct ark_mpu_t *mpu, rte_iova_t ring,
+ uint32_t ring_size, int is_tx);
+
+void ark_mpu_dump(struct ark_mpu_t *mpu, const char *msg, uint16_t idx);
+void ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t qid);
+void ark_mpu_reset_stats(struct ark_mpu_t *mpu);
+
+/* this action is in a performance critical path */
+static inline void
+ark_mpu_set_producer(struct ark_mpu_t *mpu, uint32_t idx)
+{
+ mpu->cfg.sw_prod_index = idx;
+}
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c b/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c
new file mode 100644
index 00000000..c21003a0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <getopt.h>
+#include <sys/time.h>
+#include <locale.h>
+#include <unistd.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ark_pktchkr.h"
+#include "ark_logs.h"
+
+static int set_arg(char *arg, char *val);
+static int ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle);
+
+#define ARK_MAX_STR_LEN 64
+union OPTV {
+ int INT;
+ int BOOL;
+ uint64_t LONG;
+ char STR[ARK_MAX_STR_LEN];
+};
+
+enum OPTYPE {
+ OTINT,
+ OTLONG,
+ OTBOOL,
+ OTSTRING
+};
+
+struct OPTIONS {
+ char opt[ARK_MAX_STR_LEN];
+ enum OPTYPE t;
+ union OPTV v;
+};
+
+static struct OPTIONS toptions[] = {
+ {{"configure"}, OTBOOL, {1} },
+ {{"port"}, OTINT, {0} },
+ {{"mac-dump"}, OTBOOL, {0} },
+ {{"dg-mode"}, OTBOOL, {1} },
+ {{"run"}, OTBOOL, {0} },
+ {{"stop"}, OTBOOL, {0} },
+ {{"dump"}, OTBOOL, {0} },
+ {{"en_resync"}, OTBOOL, {0} },
+ {{"tuser_err_val"}, OTINT, {1} },
+ {{"gen_forever"}, OTBOOL, {0} },
+ {{"en_slaved_start"}, OTBOOL, {0} },
+ {{"vary_length"}, OTBOOL, {0} },
+ {{"incr_payload"}, OTINT, {0} },
+ {{"incr_first_byte"}, OTBOOL, {0} },
+ {{"ins_seq_num"}, OTBOOL, {0} },
+ {{"ins_time_stamp"}, OTBOOL, {1} },
+ {{"ins_udp_hdr"}, OTBOOL, {0} },
+ {{"num_pkts"}, OTLONG, .v.LONG = 10000000000000L},
+ {{"payload_byte"}, OTINT, {0x55} },
+ {{"pkt_spacing"}, OTINT, {60} },
+ {{"pkt_size_min"}, OTINT, {2005} },
+ {{"pkt_size_max"}, OTINT, {1514} },
+ {{"pkt_size_incr"}, OTINT, {1} },
+ {{"eth_type"}, OTINT, {0x0800} },
+ {{"src_mac_addr"}, OTLONG, .v.LONG = 0xdC3cF6425060L},
+ {{"dst_mac_addr"}, OTLONG, .v.LONG = 0x112233445566L},
+ {{"hdr_dW0"}, OTINT, {0x0016e319} },
+ {{"hdr_dW1"}, OTINT, {0x27150004} },
+ {{"hdr_dW2"}, OTINT, {0x76967bda} },
+ {{"hdr_dW3"}, OTINT, {0x08004500} },
+ {{"hdr_dW4"}, OTINT, {0x005276ed} },
+ {{"hdr_dW5"}, OTINT, {0x40004006} },
+ {{"hdr_dW6"}, OTINT, {0x56cfc0a8} },
+ {{"start_offset"}, OTINT, {0} },
+ {{"dst_ip"}, OTSTRING, .v.STR = "169.254.10.240"},
+ {{"dst_port"}, OTINT, {65536} },
+ {{"src_port"}, OTINT, {65536} },
+};
+
+ark_pkt_chkr_t
+ark_pktchkr_init(void *addr, int ord, int l2_mode)
+{
+ struct ark_pkt_chkr_inst *inst =
+ rte_malloc("ark_pkt_chkr_inst",
+ sizeof(struct ark_pkt_chkr_inst), 0);
+ if (inst == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_chkr_inst.\n");
+ return inst;
+ }
+ inst->sregs = (struct ark_pkt_chkr_stat_regs *)addr;
+ inst->cregs =
+ (struct ark_pkt_chkr_ctl_regs *)(((uint8_t *)addr) + 0x100);
+ inst->ordinal = ord;
+ inst->l2_mode = l2_mode;
+ return inst;
+}
+
+void
+ark_pktchkr_uninit(ark_pkt_chkr_t handle)
+{
+ rte_free(handle);
+}
+
+void
+ark_pktchkr_run(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->sregs->pkt_start_stop = 0;
+ inst->sregs->pkt_start_stop = 0x1;
+}
+
+int
+ark_pktchkr_stopped(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = inst->sregs->pkt_start_stop;
+
+ return (((r >> 16) & 1) == 1);
+}
+
+void
+ark_pktchkr_stop(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ int wait_cycle = 10;
+
+ inst->sregs->pkt_start_stop = 0;
+ while (!ark_pktchkr_stopped(handle) && (wait_cycle > 0)) {
+ usleep(1000);
+ wait_cycle--;
+ PMD_DEBUG_LOG(DEBUG, "Waiting for pktchk %d to stop...\n",
+ inst->ordinal);
+ }
+ PMD_DEBUG_LOG(DEBUG, "Pktchk %d stopped.\n", inst->ordinal);
+}
+
+int
+ark_pktchkr_is_running(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = inst->sregs->pkt_start_stop;
+
+ return ((r & 1) == 1);
+}
+
+static void
+ark_pktchkr_set_pkt_ctrl(ark_pkt_chkr_t handle,
+ uint32_t gen_forever,
+ uint32_t vary_length,
+ uint32_t incr_payload,
+ uint32_t incr_first_byte,
+ uint32_t ins_seq_num,
+ uint32_t ins_udp_hdr,
+ uint32_t en_resync,
+ uint32_t tuser_err_val,
+ uint32_t ins_time_stamp)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = (tuser_err_val << 16) | (en_resync << 0);
+
+ inst->sregs->pkt_ctrl = r;
+ if (!inst->l2_mode)
+ ins_udp_hdr = 0;
+ r = ((gen_forever << 24) |
+ (vary_length << 16) |
+ (incr_payload << 12) |
+ (incr_first_byte << 8) |
+ (ins_time_stamp << 5) |
+ (ins_seq_num << 4) |
+ ins_udp_hdr);
+ inst->cregs->pkt_ctrl = r;
+}
+
+static
+int
+ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = inst->cregs->pkt_ctrl;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+int
+ark_pktchkr_wait_done(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ if (ark_pktchkr_is_gen_forever(handle)) {
+ PMD_DEBUG_LOG(ERR, "Pktchk wait_done will not terminate"
+ " because gen_forever=1\n");
+ return -1;
+ }
+ int wait_cycle = 10;
+
+ while (!ark_pktchkr_stopped(handle) && (wait_cycle > 0)) {
+ usleep(1000);
+ wait_cycle--;
+ PMD_DEBUG_LOG(DEBUG, "Waiting for packet checker %d's"
+ " internal pktgen to finish sending...\n",
+ inst->ordinal);
+ PMD_DEBUG_LOG(DEBUG, "Pktchk %d's pktgen done.\n",
+ inst->ordinal);
+ }
+ return 0;
+}
+
+int
+ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ return inst->cregs->pkts_sent;
+}
+
+void
+ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_payload = b;
+}
+
+void
+ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_size_min = x;
+}
+
+void
+ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_size_max = x;
+}
+
+void
+ark_pktchkr_set_pkt_size_incr(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_size_incr = x;
+}
+
+void
+ark_pktchkr_set_num_pkts(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->num_pkts = x;
+}
+
+void
+ark_pktchkr_set_src_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->src_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->cregs->src_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktchkr_set_dst_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->dst_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->cregs->dst_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktchkr_set_eth_type(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->eth_type = x;
+}
+
+void
+ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr)
+{
+ uint32_t i;
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ for (i = 0; i < 7; i++)
+ inst->cregs->hdr_dw[i] = hdr[i];
+}
+
+void
+ark_pktchkr_dump_stats(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ PMD_STATS_LOG(INFO, "pkts_rcvd = (%'u)\n",
+ inst->sregs->pkts_rcvd);
+ PMD_STATS_LOG(INFO, "bytes_rcvd = (%'" PRIU64 ")\n",
+ inst->sregs->bytes_rcvd);
+ PMD_STATS_LOG(INFO, "pkts_ok = (%'u)\n",
+ inst->sregs->pkts_ok);
+ PMD_STATS_LOG(INFO, "pkts_mismatch = (%'u)\n",
+ inst->sregs->pkts_mismatch);
+ PMD_STATS_LOG(INFO, "pkts_err = (%'u)\n",
+ inst->sregs->pkts_err);
+ PMD_STATS_LOG(INFO, "first_mismatch = (%'u)\n",
+ inst->sregs->first_mismatch);
+ PMD_STATS_LOG(INFO, "resync_events = (%'u)\n",
+ inst->sregs->resync_events);
+ PMD_STATS_LOG(INFO, "pkts_missing = (%'u)\n",
+ inst->sregs->pkts_missing);
+ PMD_STATS_LOG(INFO, "min_latency = (%'u)\n",
+ inst->sregs->min_latency);
+ PMD_STATS_LOG(INFO, "max_latency = (%'u)\n",
+ inst->sregs->max_latency);
+}
+
+static struct OPTIONS *
+options(const char *id)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(toptions) / sizeof(struct OPTIONS); i++) {
+ if (strcmp(id, toptions[i].opt) == 0)
+ return &toptions[i];
+ }
+ PMD_DRV_LOG(ERR,
+ "pktchkr: Could not find requested option!, option = %s\n",
+ id);
+ return NULL;
+}
+
+static int
+set_arg(char *arg, char *val)
+{
+ struct OPTIONS *o = options(arg);
+
+ if (o) {
+ switch (o->t) {
+ case OTINT:
+ case OTBOOL:
+ o->v.INT = atoi(val);
+ break;
+ case OTLONG:
+ o->v.INT = atoll(val);
+ break;
+ case OTSTRING:
+ snprintf(o->v.STR, ARK_MAX_STR_LEN, "%s", val);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/******
+ * Arg format = "opt0=v,opt_n=v ..."
+ ******/
+void
+ark_pktchkr_parse(char *args)
+{
+ char *argv, *v;
+ const char toks[] = "=\n\t\v\f \r";
+ argv = strtok(args, toks);
+ v = strtok(NULL, toks);
+ while (argv && v) {
+ set_arg(argv, v);
+ argv = strtok(NULL, toks);
+ v = strtok(NULL, toks);
+ }
+}
+
+static int32_t parse_ipv4_string(char const *ip_address);
+static int32_t
+parse_ipv4_string(char const *ip_address)
+{
+ unsigned int ip[4];
+
+ if (sscanf(ip_address, "%u.%u.%u.%u",
+ &ip[0], &ip[1], &ip[2], &ip[3]) != 4)
+ return 0;
+ return ip[3] + ip[2] * 0x100 + ip[1] * 0x10000ul + ip[0] * 0x1000000ul;
+}
+
+void
+ark_pktchkr_setup(ark_pkt_chkr_t handle)
+{
+ uint32_t hdr[7];
+ int32_t dst_ip = parse_ipv4_string(options("dst_ip")->v.STR);
+
+ if (!options("stop")->v.BOOL && options("configure")->v.BOOL) {
+ ark_pktchkr_set_payload_byte(handle,
+ options("payload_byte")->v.INT);
+ ark_pktchkr_set_src_mac_addr(handle,
+ options("src_mac_addr")->v.INT);
+ ark_pktchkr_set_dst_mac_addr(handle,
+ options("dst_mac_addr")->v.LONG);
+
+ ark_pktchkr_set_eth_type(handle,
+ options("eth_type")->v.INT);
+ if (options("dg-mode")->v.BOOL) {
+ hdr[0] = options("hdr_dW0")->v.INT;
+ hdr[1] = options("hdr_dW1")->v.INT;
+ hdr[2] = options("hdr_dW2")->v.INT;
+ hdr[3] = options("hdr_dW3")->v.INT;
+ hdr[4] = options("hdr_dW4")->v.INT;
+ hdr[5] = options("hdr_dW5")->v.INT;
+ hdr[6] = options("hdr_dW6")->v.INT;
+ } else {
+ hdr[0] = dst_ip;
+ hdr[1] = options("dst_port")->v.INT;
+ hdr[2] = options("src_port")->v.INT;
+ hdr[3] = 0;
+ hdr[4] = 0;
+ hdr[5] = 0;
+ hdr[6] = 0;
+ }
+ ark_pktchkr_set_hdr_dW(handle, hdr);
+ ark_pktchkr_set_num_pkts(handle,
+ options("num_pkts")->v.INT);
+ ark_pktchkr_set_pkt_size_min(handle,
+ options("pkt_size_min")->v.INT);
+ ark_pktchkr_set_pkt_size_max(handle,
+ options("pkt_size_max")->v.INT);
+ ark_pktchkr_set_pkt_size_incr(handle,
+ options("pkt_size_incr")->v.INT);
+ ark_pktchkr_set_pkt_ctrl(handle,
+ options("gen_forever")->v.BOOL,
+ options("vary_length")->v.BOOL,
+ options("incr_payload")->v.BOOL,
+ options("incr_first_byte")->v.BOOL,
+ options("ins_seq_num")->v.INT,
+ options("ins_udp_hdr")->v.BOOL,
+ options("en_resync")->v.BOOL,
+ options("tuser_err_val")->v.INT,
+ options("ins_time_stamp")->v.INT);
+ }
+
+ if (options("stop")->v.BOOL)
+ ark_pktchkr_stop(handle);
+
+ if (options("run")->v.BOOL) {
+ PMD_DEBUG_LOG(DEBUG, "Starting packet checker on port %d\n",
+ options("port")->v.INT);
+ ark_pktchkr_run(handle);
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h b/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h
new file mode 100644
index 00000000..a50f428b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_PKTCHKR_H_
+#define _ARK_PKTCHKR_H_
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#define ARK_PKTCHKR_BASE_ADR 0x90000
+
+typedef void *ark_pkt_chkr_t;
+
+/* The packet checker is an internal Arkville hardware module, which
+ * verifies packet streams generated from the corresponding packet
+ * generator. This module is used for Arkville testing.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * This are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_pkt_chkr_stat_regs {
+ uint32_t r0;
+ uint32_t pkt_start_stop;
+ uint32_t pkt_ctrl;
+ uint32_t pkts_rcvd;
+ uint64_t bytes_rcvd;
+ uint32_t pkts_ok;
+ uint32_t pkts_mismatch;
+ uint32_t pkts_err;
+ uint32_t first_mismatch;
+ uint32_t resync_events;
+ uint32_t pkts_missing;
+ uint32_t min_latency;
+ uint32_t max_latency;
+} __attribute__ ((packed));
+
+struct ark_pkt_chkr_ctl_regs {
+ uint32_t pkt_ctrl;
+ uint32_t pkt_payload;
+ uint32_t pkt_size_min;
+ uint32_t pkt_size_max;
+ uint32_t pkt_size_incr;
+ uint32_t num_pkts;
+ uint32_t pkts_sent;
+ uint32_t src_mac_addr_l;
+ uint32_t src_mac_addr_h;
+ uint32_t dst_mac_addr_l;
+ uint32_t dst_mac_addr_h;
+ uint32_t eth_type;
+ uint32_t hdr_dw[7];
+} __attribute__ ((packed));
+
+struct ark_pkt_chkr_inst {
+ struct rte_eth_dev_info *dev_info;
+ volatile struct ark_pkt_chkr_stat_regs *sregs;
+ volatile struct ark_pkt_chkr_ctl_regs *cregs;
+ int l2_mode;
+ int ordinal;
+};
+
+/* packet checker functions */
+ark_pkt_chkr_t ark_pktchkr_init(void *addr, int ord, int l2_mode);
+void ark_pktchkr_uninit(ark_pkt_chkr_t handle);
+void ark_pktchkr_run(ark_pkt_chkr_t handle);
+int ark_pktchkr_stopped(ark_pkt_chkr_t handle);
+void ark_pktchkr_stop(ark_pkt_chkr_t handle);
+int ark_pktchkr_is_running(ark_pkt_chkr_t handle);
+int ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle);
+void ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b);
+void ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_pkt_size_incr(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_num_pkts(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_src_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr);
+void ark_pktchkr_set_dst_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr);
+void ark_pktchkr_set_eth_type(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr);
+void ark_pktchkr_parse(char *args);
+void ark_pktchkr_setup(ark_pkt_chkr_t handle);
+void ark_pktchkr_dump_stats(ark_pkt_chkr_t handle);
+int ark_pktchkr_wait_done(ark_pkt_chkr_t handle);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktdir.c b/src/spdk/dpdk/drivers/net/ark/ark_pktdir.c
new file mode 100644
index 00000000..1f2c8182
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_pktdir.c
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ark_pktdir.h"
+#include "ark_global.h"
+#include "ark_logs.h"
+
+
+ark_pkt_dir_t
+ark_pktdir_init(void *base)
+{
+ struct ark_pkt_dir_inst *inst =
+ rte_malloc("ark_pkt_dir_inst",
+ sizeof(struct ark_pkt_dir_inst),
+ 0);
+ if (inst == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_dir_inst.\n");
+ return inst;
+ }
+ inst->regs = (struct ark_pkt_dir_regs *)base;
+ inst->regs->ctrl = 0x00110110; /* POR state */
+ return inst;
+}
+
+void
+ark_pktdir_uninit(ark_pkt_dir_t handle)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+
+ rte_free(inst);
+}
+
+void
+ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+ inst->regs->ctrl = v;
+}
+
+uint32_t
+ark_pktdir_status(ark_pkt_dir_t handle)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+ return inst->regs->ctrl;
+}
+
+uint32_t
+ark_pktdir_stall_cnt(ark_pkt_dir_t handle)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+ return inst->regs->stall_cnt;
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktdir.h b/src/spdk/dpdk/drivers/net/ark/ark_pktdir.h
new file mode 100644
index 00000000..314e6dea
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_pktdir.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_PKTDIR_H_
+#define _ARK_PKTDIR_H_
+
+#include <stdint.h>
+
+#define ARK_PKTDIR_BASE_ADR 0xa0000
+
+typedef void *ark_pkt_dir_t;
+
+
+/* The packet director is an internal Arkville hardware module for
+ * directing packet data in non-typical flows, such as testing.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * This is an overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_pkt_dir_regs {
+ uint32_t ctrl;
+ uint32_t status;
+ uint32_t stall_cnt;
+} __attribute__ ((packed));
+
+struct ark_pkt_dir_inst {
+ volatile struct ark_pkt_dir_regs *regs;
+};
+
+ark_pkt_dir_t ark_pktdir_init(void *base);
+void ark_pktdir_uninit(ark_pkt_dir_t handle);
+void ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v);
+uint32_t ark_pktdir_stall_cnt(ark_pkt_dir_t handle);
+uint32_t ark_pktdir_status(ark_pkt_dir_t handle);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktgen.c b/src/spdk/dpdk/drivers/net/ark/ark_pktgen.c
new file mode 100644
index 00000000..2a2b428e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_pktgen.c
@@ -0,0 +1,471 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <getopt.h>
+#include <sys/time.h>
+#include <locale.h>
+#include <unistd.h>
+
+#include <rte_eal.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ark_pktgen.h"
+#include "ark_logs.h"
+
+#define ARK_MAX_STR_LEN 64
+union OPTV {
+ int INT;
+ int BOOL;
+ uint64_t LONG;
+ char STR[ARK_MAX_STR_LEN];
+};
+
+enum OPTYPE {
+ OTINT,
+ OTLONG,
+ OTBOOL,
+ OTSTRING
+};
+
+struct OPTIONS {
+ char opt[ARK_MAX_STR_LEN];
+ enum OPTYPE t;
+ union OPTV v;
+};
+
+static struct OPTIONS toptions[] = {
+ {{"configure"}, OTBOOL, {1} },
+ {{"dg-mode"}, OTBOOL, {1} },
+ {{"run"}, OTBOOL, {0} },
+ {{"pause"}, OTBOOL, {0} },
+ {{"reset"}, OTBOOL, {0} },
+ {{"dump"}, OTBOOL, {0} },
+ {{"gen_forever"}, OTBOOL, {0} },
+ {{"en_slaved_start"}, OTBOOL, {0} },
+ {{"vary_length"}, OTBOOL, {0} },
+ {{"incr_payload"}, OTBOOL, {0} },
+ {{"incr_first_byte"}, OTBOOL, {0} },
+ {{"ins_seq_num"}, OTBOOL, {0} },
+ {{"ins_time_stamp"}, OTBOOL, {1} },
+ {{"ins_udp_hdr"}, OTBOOL, {0} },
+ {{"num_pkts"}, OTLONG, .v.LONG = 100000000},
+ {{"payload_byte"}, OTINT, {0x55} },
+ {{"pkt_spacing"}, OTINT, {130} },
+ {{"pkt_size_min"}, OTINT, {2006} },
+ {{"pkt_size_max"}, OTINT, {1514} },
+ {{"pkt_size_incr"}, OTINT, {1} },
+ {{"eth_type"}, OTINT, {0x0800} },
+ {{"src_mac_addr"}, OTLONG, .v.LONG = 0xdC3cF6425060L},
+ {{"dst_mac_addr"}, OTLONG, .v.LONG = 0x112233445566L},
+ {{"hdr_dW0"}, OTINT, {0x0016e319} },
+ {{"hdr_dW1"}, OTINT, {0x27150004} },
+ {{"hdr_dW2"}, OTINT, {0x76967bda} },
+ {{"hdr_dW3"}, OTINT, {0x08004500} },
+ {{"hdr_dW4"}, OTINT, {0x005276ed} },
+ {{"hdr_dW5"}, OTINT, {0x40004006} },
+ {{"hdr_dW6"}, OTINT, {0x56cfc0a8} },
+ {{"start_offset"}, OTINT, {0} },
+ {{"bytes_per_cycle"}, OTINT, {10} },
+ {{"shaping"}, OTBOOL, {0} },
+ {{"dst_ip"}, OTSTRING, .v.STR = "169.254.10.240"},
+ {{"dst_port"}, OTINT, {65536} },
+ {{"src_port"}, OTINT, {65536} },
+};
+
+ark_pkt_gen_t
+ark_pktgen_init(void *adr, int ord, int l2_mode)
+{
+ struct ark_pkt_gen_inst *inst =
+ rte_malloc("ark_pkt_gen_inst_pmd",
+ sizeof(struct ark_pkt_gen_inst), 0);
+ if (inst == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_gen_inst.\n");
+ return inst;
+ }
+ inst->regs = (struct ark_pkt_gen_regs *)adr;
+ inst->ordinal = ord;
+ inst->l2_mode = l2_mode;
+ return inst;
+}
+
+void
+ark_pktgen_uninit(ark_pkt_gen_t handle)
+{
+ rte_free(handle);
+}
+
+void
+ark_pktgen_run(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ inst->regs->pkt_start_stop = 1;
+}
+
+uint32_t
+ark_pktgen_paused(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_start_stop;
+
+ return (((r >> 16) & 1) == 1);
+}
+
+void
+ark_pktgen_pause(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ int cnt = 0;
+
+ inst->regs->pkt_start_stop = 0;
+
+ while (!ark_pktgen_paused(handle)) {
+ usleep(1000);
+ if (cnt++ > 100) {
+ PMD_DRV_LOG(ERR, "Pktgen %d failed to pause.\n",
+ inst->ordinal);
+ break;
+ }
+ }
+ PMD_DEBUG_LOG(DEBUG, "Pktgen %d paused.\n", inst->ordinal);
+}
+
+void
+ark_pktgen_reset(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ if (!ark_pktgen_is_running(handle) &&
+ !ark_pktgen_paused(handle)) {
+ PMD_DEBUG_LOG(DEBUG, "Pktgen %d is not running"
+ " and is not paused. No need to reset.\n",
+ inst->ordinal);
+ return;
+ }
+
+ if (ark_pktgen_is_running(handle) &&
+ !ark_pktgen_paused(handle)) {
+ PMD_DEBUG_LOG(DEBUG,
+ "Pktgen %d is not paused. Pausing first.\n",
+ inst->ordinal);
+ ark_pktgen_pause(handle);
+ }
+
+ PMD_DEBUG_LOG(DEBUG, "Resetting pktgen %d.\n", inst->ordinal);
+ inst->regs->pkt_start_stop = (1 << 8);
+}
+
+uint32_t
+ark_pktgen_tx_done(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_start_stop;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+uint32_t
+ark_pktgen_is_running(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_start_stop;
+
+ return ((r & 1) == 1);
+}
+
+uint32_t
+ark_pktgen_is_gen_forever(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_ctrl;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+void
+ark_pktgen_wait_done(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ int wait_cycle = 10;
+
+ if (ark_pktgen_is_gen_forever(handle))
+ PMD_DRV_LOG(ERR, "Pktgen wait_done will not terminate"
+ " because gen_forever=1\n");
+
+ while (!ark_pktgen_tx_done(handle) && (wait_cycle > 0)) {
+ usleep(1000);
+ wait_cycle--;
+ PMD_DEBUG_LOG(DEBUG,
+ "Waiting for pktgen %d to finish sending...\n",
+ inst->ordinal);
+ }
+ PMD_DEBUG_LOG(DEBUG, "Pktgen %d done.\n", inst->ordinal);
+}
+
+uint32_t
+ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ return inst->regs->pkts_sent;
+}
+
+void
+ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_payload = b;
+}
+
+void
+ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_spacing = x;
+}
+
+void
+ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_size_min = x;
+}
+
+void
+ark_pktgen_set_pkt_size_max(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_size_max = x;
+}
+
+void
+ark_pktgen_set_pkt_size_incr(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_size_incr = x;
+}
+
+void
+ark_pktgen_set_num_pkts(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->num_pkts = x;
+}
+
+void
+ark_pktgen_set_src_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->src_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->regs->src_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktgen_set_dst_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->dst_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->regs->dst_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktgen_set_eth_type(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->eth_type = x;
+}
+
+void
+ark_pktgen_set_hdr_dW(ark_pkt_gen_t handle, uint32_t *hdr)
+{
+ uint32_t i;
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ for (i = 0; i < 7; i++)
+ inst->regs->hdr_dw[i] = hdr[i];
+}
+
+void
+ark_pktgen_set_start_offset(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ inst->regs->start_offset = x;
+}
+
+static struct OPTIONS *
+options(const char *id)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(toptions) / sizeof(struct OPTIONS); i++) {
+ if (strcmp(id, toptions[i].opt) == 0)
+ return &toptions[i];
+ }
+
+ PMD_DRV_LOG(ERR,
+ "Pktgen: Could not find requested option!, "
+ "option = %s\n",
+ id
+ );
+ return NULL;
+}
+
+static int pmd_set_arg(char *arg, char *val);
+static int
+pmd_set_arg(char *arg, char *val)
+{
+ struct OPTIONS *o = options(arg);
+
+ if (o) {
+ switch (o->t) {
+ case OTINT:
+ case OTBOOL:
+ o->v.INT = atoi(val);
+ break;
+ case OTLONG:
+ o->v.INT = atoll(val);
+ break;
+ case OTSTRING:
+ snprintf(o->v.STR, ARK_MAX_STR_LEN, "%s", val);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/******
+ * Arg format = "opt0=v,opt_n=v ..."
+ ******/
+void
+ark_pktgen_parse(char *args)
+{
+ char *argv, *v;
+ const char toks[] = " =\n\t\v\f \r";
+ argv = strtok(args, toks);
+ v = strtok(NULL, toks);
+ while (argv && v) {
+ pmd_set_arg(argv, v);
+ argv = strtok(NULL, toks);
+ v = strtok(NULL, toks);
+ }
+}
+
+static int32_t parse_ipv4_string(char const *ip_address);
+static int32_t
+parse_ipv4_string(char const *ip_address)
+{
+ unsigned int ip[4];
+
+ if (sscanf(ip_address, "%u.%u.%u.%u",
+ &ip[0], &ip[1], &ip[2], &ip[3]) != 4)
+ return 0;
+ return ip[3] + ip[2] * 0x100 + ip[1] * 0x10000ul + ip[0] * 0x1000000ul;
+}
+
+static void
+ark_pktgen_set_pkt_ctrl(ark_pkt_gen_t handle,
+ uint32_t gen_forever,
+ uint32_t en_slaved_start,
+ uint32_t vary_length,
+ uint32_t incr_payload,
+ uint32_t incr_first_byte,
+ uint32_t ins_seq_num,
+ uint32_t ins_udp_hdr,
+ uint32_t ins_time_stamp)
+{
+ uint32_t r;
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ if (!inst->l2_mode)
+ ins_udp_hdr = 0;
+
+ r = ((gen_forever << 24) |
+ (en_slaved_start << 20) |
+ (vary_length << 16) |
+ (incr_payload << 12) |
+ (incr_first_byte << 8) |
+ (ins_time_stamp << 5) |
+ (ins_seq_num << 4) |
+ ins_udp_hdr);
+
+ inst->regs->bytes_per_cycle = options("bytes_per_cycle")->v.INT;
+ if (options("shaping")->v.BOOL)
+ r = r | (1 << 28); /* enable shaping */
+
+ inst->regs->pkt_ctrl = r;
+}
+
+void
+ark_pktgen_setup(ark_pkt_gen_t handle)
+{
+ uint32_t hdr[7];
+ int32_t dst_ip = parse_ipv4_string(options("dst_ip")->v.STR);
+
+ if (!options("pause")->v.BOOL &&
+ (!options("reset")->v.BOOL &&
+ (options("configure")->v.BOOL))) {
+ ark_pktgen_set_payload_byte(handle,
+ options("payload_byte")->v.INT);
+ ark_pktgen_set_src_mac_addr(handle,
+ options("src_mac_addr")->v.INT);
+ ark_pktgen_set_dst_mac_addr(handle,
+ options("dst_mac_addr")->v.LONG);
+ ark_pktgen_set_eth_type(handle,
+ options("eth_type")->v.INT);
+
+ if (options("dg-mode")->v.BOOL) {
+ hdr[0] = options("hdr_dW0")->v.INT;
+ hdr[1] = options("hdr_dW1")->v.INT;
+ hdr[2] = options("hdr_dW2")->v.INT;
+ hdr[3] = options("hdr_dW3")->v.INT;
+ hdr[4] = options("hdr_dW4")->v.INT;
+ hdr[5] = options("hdr_dW5")->v.INT;
+ hdr[6] = options("hdr_dW6")->v.INT;
+ } else {
+ hdr[0] = dst_ip;
+ hdr[1] = options("dst_port")->v.INT;
+ hdr[2] = options("src_port")->v.INT;
+ hdr[3] = 0;
+ hdr[4] = 0;
+ hdr[5] = 0;
+ hdr[6] = 0;
+ }
+ ark_pktgen_set_hdr_dW(handle, hdr);
+ ark_pktgen_set_num_pkts(handle,
+ options("num_pkts")->v.INT);
+ ark_pktgen_set_pkt_size_min(handle,
+ options("pkt_size_min")->v.INT);
+ ark_pktgen_set_pkt_size_max(handle,
+ options("pkt_size_max")->v.INT);
+ ark_pktgen_set_pkt_size_incr(handle,
+ options("pkt_size_incr")->v.INT);
+ ark_pktgen_set_pkt_spacing(handle,
+ options("pkt_spacing")->v.INT);
+ ark_pktgen_set_start_offset(handle,
+ options("start_offset")->v.INT);
+ ark_pktgen_set_pkt_ctrl(handle,
+ options("gen_forever")->v.BOOL,
+ options("en_slaved_start")->v.BOOL,
+ options("vary_length")->v.BOOL,
+ options("incr_payload")->v.BOOL,
+ options("incr_first_byte")->v.BOOL,
+ options("ins_seq_num")->v.INT,
+ options("ins_udp_hdr")->v.BOOL,
+ options("ins_time_stamp")->v.INT);
+ }
+
+ if (options("pause")->v.BOOL)
+ ark_pktgen_pause(handle);
+
+ if (options("reset")->v.BOOL)
+ ark_pktgen_reset(handle);
+ if (options("run")->v.BOOL) {
+ PMD_DEBUG_LOG(DEBUG, "Starting packet generator on port %d\n",
+ options("port")->v.INT);
+ ark_pktgen_run(handle);
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktgen.h b/src/spdk/dpdk/drivers/net/ark/ark_pktgen.h
new file mode 100644
index 00000000..0e5f76aa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_pktgen.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_PKTGEN_H_
+#define _ARK_PKTGEN_H_
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#define ARK_PKTGEN_BASE_ADR 0x10000
+
+typedef void *ark_pkt_gen_t;
+
+/* The packet generator is an internal Arkville hardware module, which
+ * generates known packets for use in integrity and line-rate testing.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * This is an overlay structure to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_pkt_gen_regs {
+ uint32_t r0;
+ volatile uint32_t pkt_start_stop;
+ volatile uint32_t pkt_ctrl;
+ uint32_t pkt_payload;
+ uint32_t pkt_spacing;
+ uint32_t pkt_size_min;
+ uint32_t pkt_size_max;
+ uint32_t pkt_size_incr;
+ volatile uint32_t num_pkts;
+ volatile uint32_t pkts_sent;
+ uint32_t src_mac_addr_l;
+ uint32_t src_mac_addr_h;
+ uint32_t dst_mac_addr_l;
+ uint32_t dst_mac_addr_h;
+ uint32_t eth_type;
+ uint32_t hdr_dw[7];
+ uint32_t start_offset;
+ uint32_t bytes_per_cycle;
+} __attribute__ ((packed));
+
+struct ark_pkt_gen_inst {
+ struct rte_eth_dev_info *dev_info;
+ struct ark_pkt_gen_regs *regs;
+ int l2_mode;
+ int ordinal;
+};
+
+/* packet generator functions */
+ark_pkt_gen_t ark_pktgen_init(void *arg, int ord, int l2_mode);
+void ark_pktgen_uninit(ark_pkt_gen_t handle);
+void ark_pktgen_run(ark_pkt_gen_t handle);
+void ark_pktgen_pause(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_paused(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_is_gen_forever(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_is_running(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_tx_done(ark_pkt_gen_t handle);
+void ark_pktgen_reset(ark_pkt_gen_t handle);
+void ark_pktgen_wait_done(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle);
+void ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b);
+void ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_pkt_size_max(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_pkt_size_incr(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_num_pkts(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_src_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr);
+void ark_pktgen_set_dst_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr);
+void ark_pktgen_set_eth_type(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_hdr_dW(ark_pkt_gen_t handle, uint32_t *hdr);
+void ark_pktgen_set_start_offset(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_parse(char *argv);
+void ark_pktgen_setup(ark_pkt_gen_t handle);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_rqp.c b/src/spdk/dpdk/drivers/net/ark/ark_rqp.c
new file mode 100644
index 00000000..bf1af4d6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_rqp.c
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+
+#include "ark_rqp.h"
+#include "ark_logs.h"
+
+/* ************************************************************************* */
+void
+ark_rqp_stats_reset(struct ark_rqpace_t *rqp)
+{
+ rqp->stats_clear = 1;
+ /* POR 992 */
+ /* rqp->cpld_max = 992; */
+ /* POR 64 */
+ /* rqp->cplh_max = 64; */
+}
+
+/* ************************************************************************* */
+void
+ark_rqp_dump(struct ark_rqpace_t *rqp)
+{
+ if (rqp->err_count_other != 0)
+ PMD_DRV_LOG(ERR,
+ "RQP Errors noted: ctrl: %d cplh_hmax %d cpld_max %d"
+ ARK_SU32
+ ARK_SU32 "\n",
+ rqp->ctrl, rqp->cplh_max, rqp->cpld_max,
+ "Error Count", rqp->err_cnt,
+ "Error General", rqp->err_count_other);
+
+ PMD_STATS_LOG(INFO, "RQP Dump: ctrl: %d cplh_hmax %d cpld_max %d"
+ ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
+ rqp->ctrl, rqp->cplh_max, rqp->cpld_max,
+ "Error Count", rqp->err_cnt,
+ "Error General", rqp->err_count_other,
+ "stall_pS", rqp->stall_ps,
+ "stall_pS Min", rqp->stall_ps_min,
+ "stall_pS Max", rqp->stall_ps_max,
+ "req_pS", rqp->req_ps,
+ "req_pS Min", rqp->req_ps_min,
+ "req_pS Max", rqp->req_ps_max,
+ "req_dWPS", rqp->req_dw_ps,
+ "req_dWPS Min", rqp->req_dw_ps_min,
+ "req_dWPS Max", rqp->req_dw_ps_max,
+ "cpl_pS", rqp->cpl_ps,
+ "cpl_pS Min", rqp->cpl_ps_min,
+ "cpl_pS Max", rqp->cpl_ps_max,
+ "cpl_dWPS", rqp->cpl_dw_ps,
+ "cpl_dWPS Min", rqp->cpl_dw_ps_min,
+ "cpl_dWPS Max", rqp->cpl_dw_ps_max,
+ "cplh pending", rqp->cplh_pending,
+ "cpld pending", rqp->cpld_pending,
+ "cplh pending max", rqp->cplh_pending_max,
+ "cpld pending max", rqp->cpld_pending_max);
+}
+
+int
+ark_rqp_lasped(struct ark_rqpace_t *rqp)
+{
+ return rqp->lasped;
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_rqp.h b/src/spdk/dpdk/drivers/net/ark/ark_rqp.h
new file mode 100644
index 00000000..6c804606
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_rqp.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_RQP_H_
+#define _ARK_RQP_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* The RQP or ReQuest Pacer is an internal Arkville hardware module
+ * which limits the PCIE data flow to insure correct operation for the
+ * particular hardware PCIE endpoint.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * RQ Pacing core hardware structure
+ * This is an overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_rqpace_t {
+ volatile uint32_t ctrl;
+ volatile uint32_t stats_clear;
+ volatile uint32_t cplh_max;
+ volatile uint32_t cpld_max;
+ volatile uint32_t err_cnt;
+ volatile uint32_t stall_ps;
+ volatile uint32_t stall_ps_min;
+ volatile uint32_t stall_ps_max;
+ volatile uint32_t req_ps;
+ volatile uint32_t req_ps_min;
+ volatile uint32_t req_ps_max;
+ volatile uint32_t req_dw_ps;
+ volatile uint32_t req_dw_ps_min;
+ volatile uint32_t req_dw_ps_max;
+ volatile uint32_t cpl_ps;
+ volatile uint32_t cpl_ps_min;
+ volatile uint32_t cpl_ps_max;
+ volatile uint32_t cpl_dw_ps;
+ volatile uint32_t cpl_dw_ps_min;
+ volatile uint32_t cpl_dw_ps_max;
+ volatile uint32_t cplh_pending;
+ volatile uint32_t cpld_pending;
+ volatile uint32_t cplh_pending_max;
+ volatile uint32_t cpld_pending_max;
+ volatile uint32_t err_count_other;
+ char eval[4];
+ volatile int lasped;
+};
+
+void ark_rqp_dump(struct ark_rqpace_t *rqp);
+void ark_rqp_stats_reset(struct ark_rqpace_t *rqp);
+int ark_rqp_lasped(struct ark_rqpace_t *rqp);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_udm.c b/src/spdk/dpdk/drivers/net/ark/ark_udm.c
new file mode 100644
index 00000000..03f1922c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_udm.c
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+
+#include "ark_logs.h"
+#include "ark_udm.h"
+
+int
+ark_udm_verify(struct ark_udm_t *udm)
+{
+ if (sizeof(struct ark_udm_t) != ARK_UDM_EXPECT_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "ARK: UDM structure looks incorrect %d vs %zd\n",
+ ARK_UDM_EXPECT_SIZE, sizeof(struct ark_udm_t));
+ return -1;
+ }
+
+ if (udm->setup.const0 != ARK_UDM_CONST) {
+ PMD_DRV_LOG(ERR,
+ "ARK: UDM module not found as expected 0x%08x\n",
+ udm->setup.const0);
+ return -1;
+ }
+ return 0;
+}
+
+int
+ark_udm_stop(struct ark_udm_t *udm, const int wait)
+{
+ int cnt = 0;
+
+ udm->cfg.command = 2;
+
+ while (wait && (udm->cfg.stop_flushed & 0x01) == 0) {
+ if (cnt++ > 1000)
+ return 1;
+
+ usleep(10);
+ }
+ return 0;
+}
+
+int
+ark_udm_reset(struct ark_udm_t *udm)
+{
+ int status;
+
+ status = ark_udm_stop(udm, 1);
+ if (status != 0) {
+ PMD_DEBUG_LOG(INFO, "%s stop failed doing forced reset\n",
+ __func__);
+ udm->cfg.command = 4;
+ usleep(10);
+ udm->cfg.command = 3;
+ status = ark_udm_stop(udm, 0);
+ PMD_DEBUG_LOG(INFO, "%s stop status %d post failure"
+ " and forced reset\n",
+ __func__, status);
+ } else {
+ udm->cfg.command = 3;
+ }
+
+ return status;
+}
+
+void
+ark_udm_start(struct ark_udm_t *udm)
+{
+ udm->cfg.command = 1;
+}
+
+void
+ark_udm_stats_reset(struct ark_udm_t *udm)
+{
+ udm->pcibp.pci_clear = 1;
+ udm->tlp_ps.tlp_clear = 1;
+}
+
+void
+ark_udm_configure(struct ark_udm_t *udm,
+ uint32_t headroom,
+ uint32_t dataroom,
+ uint32_t write_interval_ns)
+{
+ /* headroom and data room are in DWords in the UDM */
+ udm->cfg.dataroom = dataroom / 4;
+ udm->cfg.headroom = headroom / 4;
+
+ /* 4 NS period ns */
+ udm->rt_cfg.write_interval = write_interval_ns / 4;
+}
+
+void
+ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr)
+{
+ udm->rt_cfg.hw_prod_addr = addr;
+}
+
+int
+ark_udm_is_flushed(struct ark_udm_t *udm)
+{
+ return (udm->cfg.stop_flushed & 0x01) != 0;
+}
+
+uint64_t
+ark_udm_dropped(struct ark_udm_t *udm)
+{
+ return udm->qstats.q_pkt_drop;
+}
+
+uint64_t
+ark_udm_bytes(struct ark_udm_t *udm)
+{
+ return udm->qstats.q_byte_count;
+}
+
+uint64_t
+ark_udm_packets(struct ark_udm_t *udm)
+{
+ return udm->qstats.q_ff_packet_count;
+}
+
+void
+ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg)
+{
+ PMD_STATS_LOG(INFO, "UDM Stats: %s"
+ ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 "\n",
+ msg,
+ "Pkts Received", udm->stats.rx_packet_count,
+ "Pkts Finalized", udm->stats.rx_sent_packets,
+ "Pkts Dropped", udm->tlp.pkt_drop,
+ "Bytes Count", udm->stats.rx_byte_count,
+ "MBuf Count", udm->stats.rx_mbuf_count);
+}
+
+void
+ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg, uint16_t qid)
+{
+ PMD_STATS_LOG(INFO, "UDM Queue %3u Stats: %s"
+ ARK_SU64 ARK_SU64
+ ARK_SU64 ARK_SU64
+ ARK_SU64 "\n",
+ qid, msg,
+ "Pkts Received", udm->qstats.q_packet_count,
+ "Pkts Finalized", udm->qstats.q_ff_packet_count,
+ "Pkts Dropped", udm->qstats.q_pkt_drop,
+ "Bytes Count", udm->qstats.q_byte_count,
+ "MBuf Count", udm->qstats.q_mbuf_count);
+}
+
+void
+ark_udm_dump(struct ark_udm_t *udm, const char *msg)
+{
+ PMD_DEBUG_LOG(DEBUG, "UDM Dump: %s Stopped: %d\n", msg,
+ udm->cfg.stop_flushed);
+}
+
+void
+ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id)
+{
+ PMD_DEBUG_LOG(DEBUG, "UDM Setup Q: %u"
+ ARK_SU64X ARK_SU32 "\n",
+ q_id,
+ "hw_prod_addr", udm->rt_cfg.hw_prod_addr,
+ "prod_idx", udm->rt_cfg.prod_idx);
+}
+
+void
+ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg)
+{
+ struct ark_udm_pcibp_t *bp = &udm->pcibp;
+
+ PMD_STATS_LOG(INFO, "UDM Performance %s"
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
+ "\n",
+ msg,
+ "PCI Empty", bp->pci_empty,
+ "PCI Q1", bp->pci_q1,
+ "PCI Q2", bp->pci_q2,
+ "PCI Q3", bp->pci_q3,
+ "PCI Q4", bp->pci_q4,
+ "PCI Full", bp->pci_full);
+}
+
+void
+ark_udm_queue_stats_reset(struct ark_udm_t *udm)
+{
+ udm->qstats.q_byte_count = 1;
+}
+
+void
+ark_udm_queue_enable(struct ark_udm_t *udm, int enable)
+{
+ udm->qstats.q_enable = enable ? 1 : 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_udm.h b/src/spdk/dpdk/drivers/net/ark/ark_udm.h
new file mode 100644
index 00000000..5846c825
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_udm.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_UDM_H_
+#define _ARK_UDM_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* The UDM or Upstream Data Mover is an internal Arkville hardware
+ * module for moving packet from the RX packet streams to host memory.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/* Meta data structure apssed from FPGA, must match layout in FPGA */
+struct ark_rx_meta {
+ uint64_t timestamp;
+ uint64_t user_data;
+ uint8_t port;
+ uint8_t dst_queue;
+ uint16_t pkt_len;
+};
+
+/*
+ * UDM hardware structures
+ * These are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+
+#define ARK_RX_WRITE_TIME_NS 2500
+#define ARK_UDM_SETUP 0
+#define ARK_UDM_CONST 0xbACECACE
+struct ark_udm_setup_t {
+ uint32_t r0;
+ uint32_t r4;
+ volatile uint32_t cycle_count;
+ uint32_t const0;
+};
+
+#define ARK_UDM_CFG 0x010
+struct ark_udm_cfg_t {
+ volatile uint32_t stop_flushed; /* RO */
+ volatile uint32_t command;
+ uint32_t dataroom;
+ uint32_t headroom;
+};
+
+typedef enum {
+ ARK_UDM_START = 0x1,
+ ARK_UDM_STOP = 0x2,
+ ARK_UDM_RESET = 0x3
+} ark_udm_commands;
+
+#define ARK_UDM_STATS 0x020
+struct ark_udm_stats_t {
+ volatile uint64_t rx_byte_count;
+ volatile uint64_t rx_packet_count;
+ volatile uint64_t rx_mbuf_count;
+ volatile uint64_t rx_sent_packets;
+};
+
+#define ARK_UDM_PQ 0x040
+struct ark_udm_queue_stats_t {
+ volatile uint64_t q_byte_count;
+ volatile uint64_t q_packet_count; /* includes drops */
+ volatile uint64_t q_mbuf_count;
+ volatile uint64_t q_ff_packet_count;
+ volatile uint64_t q_pkt_drop;
+ uint32_t q_enable;
+};
+
+#define ARK_UDM_TLP 0x0070
+struct ark_udm_tlp_t {
+ volatile uint64_t pkt_drop; /* global */
+ volatile uint32_t tlp_q1;
+ volatile uint32_t tlp_q2;
+ volatile uint32_t tlp_q3;
+ volatile uint32_t tlp_q4;
+ volatile uint32_t tlp_full;
+};
+
+#define ARK_UDM_PCIBP 0x00a0
+struct ark_udm_pcibp_t {
+ volatile uint32_t pci_clear;
+ volatile uint32_t pci_empty;
+ volatile uint32_t pci_q1;
+ volatile uint32_t pci_q2;
+ volatile uint32_t pci_q3;
+ volatile uint32_t pci_q4;
+ volatile uint32_t pci_full;
+};
+
+#define ARK_UDM_TLP_PS 0x00bc
+struct ark_udm_tlp_ps_t {
+ volatile uint32_t tlp_clear;
+ volatile uint32_t tlp_ps_min;
+ volatile uint32_t tlp_ps_max;
+ volatile uint32_t tlp_full_ps_min;
+ volatile uint32_t tlp_full_ps_max;
+ volatile uint32_t tlp_dw_ps_min;
+ volatile uint32_t tlp_dw_ps_max;
+ volatile uint32_t tlp_pldw_ps_min;
+ volatile uint32_t tlp_pldw_ps_max;
+};
+
+#define ARK_UDM_RT_CFG 0x00e0
+struct ark_udm_rt_cfg_t {
+ rte_iova_t hw_prod_addr;
+ uint32_t write_interval; /* 4ns cycles */
+ volatile uint32_t prod_idx; /* RO */
+};
+
+/* Consolidated structure */
+#define ARK_UDM_EXPECT_SIZE (0x00fc + 4)
+#define ARK_UDM_QOFFSET ARK_UDM_EXPECT_SIZE
+struct ark_udm_t {
+ struct ark_udm_setup_t setup;
+ struct ark_udm_cfg_t cfg;
+ struct ark_udm_stats_t stats;
+ struct ark_udm_queue_stats_t qstats;
+ uint8_t reserved1[(ARK_UDM_TLP - ARK_UDM_PQ) -
+ sizeof(struct ark_udm_queue_stats_t)];
+ struct ark_udm_tlp_t tlp;
+ uint8_t reserved2[(ARK_UDM_PCIBP - ARK_UDM_TLP) -
+ sizeof(struct ark_udm_tlp_t)];
+ struct ark_udm_pcibp_t pcibp;
+ struct ark_udm_tlp_ps_t tlp_ps;
+ struct ark_udm_rt_cfg_t rt_cfg;
+ int8_t reserved3[(ARK_UDM_EXPECT_SIZE - ARK_UDM_RT_CFG) -
+ sizeof(struct ark_udm_rt_cfg_t)];
+};
+
+
+int ark_udm_verify(struct ark_udm_t *udm);
+int ark_udm_stop(struct ark_udm_t *udm, int wait);
+void ark_udm_start(struct ark_udm_t *udm);
+int ark_udm_reset(struct ark_udm_t *udm);
+void ark_udm_configure(struct ark_udm_t *udm,
+ uint32_t headroom,
+ uint32_t dataroom,
+ uint32_t write_interval_ns);
+void ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr);
+void ark_udm_stats_reset(struct ark_udm_t *udm);
+void ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg,
+ uint16_t qid);
+void ark_udm_dump(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id);
+int ark_udm_is_flushed(struct ark_udm_t *udm);
+
+/* Per queue data */
+uint64_t ark_udm_dropped(struct ark_udm_t *udm);
+uint64_t ark_udm_bytes(struct ark_udm_t *udm);
+uint64_t ark_udm_packets(struct ark_udm_t *udm);
+
+void ark_udm_queue_stats_reset(struct ark_udm_t *udm);
+void ark_udm_queue_enable(struct ark_udm_t *udm, int enable);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/meson.build b/src/spdk/dpdk/drivers/net/ark/meson.build
new file mode 100644
index 00000000..99151bba
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('ark_ddm.c',
+ 'ark_ethdev.c',
+ 'ark_ethdev_rx.c',
+ 'ark_ethdev_tx.c',
+ 'ark_mpu.c',
+ 'ark_pktchkr.c',
+ 'ark_pktdir.c',
+ 'ark_pktgen.c',
+ 'ark_rqp.c',
+ 'ark_udm.c')
diff --git a/src/spdk/dpdk/drivers/net/ark/rte_pmd_ark_version.map b/src/spdk/dpdk/drivers/net/ark/rte_pmd_ark_version.map
new file mode 100644
index 00000000..1062e042
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/rte_pmd_ark_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+ local: *;
+
+};
diff --git a/src/spdk/dpdk/drivers/net/avf/Makefile b/src/spdk/dpdk/drivers/net/avf/Makefile
new file mode 100644
index 00000000..3f815bbc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/Makefile
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_avf.a
+
+CFLAGS += -O3
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+
+# used to dump HW descriptor for debugging
+# CFLAGS += -DDEBUG_DUMP_DESC
+
+EXPORT_MAP := rte_pmd_avf_version.map
+
+LIBABIVER := 1
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings
+#
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS_BASE_DRIVER =
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+CFLAGS_BASE_DRIVER = -Wno-pointer-to-int-cast
+else
+CFLAGS_BASE_DRIVER = -Wno-pointer-to-int-cast
+
+endif
+OBJS_BASE_DRIVER=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_adminq.c
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_common.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_vchnl.c
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_rxtx.c
+ifeq ($(CONFIG_RTE_ARCH_X86), y)
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_INC_VECTOR) += avf_rxtx_vec_sse.c
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/avf/avf.h b/src/spdk/dpdk/drivers/net/avf/avf.h
new file mode 100644
index 00000000..dcf8d1c7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/avf.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _AVF_ETHDEV_H_
+#define _AVF_ETHDEV_H_
+
+#include <rte_kvargs.h>
+
+#define AVF_AQ_LEN 32
+#define AVF_AQ_BUF_SZ 4096
+#define AVF_RESET_WAIT_CNT 50
+#define AVF_BUF_SIZE_MIN 1024
+#define AVF_FRAME_SIZE_MAX 9728
+#define AVF_QUEUE_BASE_ADDR_UNIT 128
+
+#define AVF_MAX_NUM_QUEUES 16
+
+#define AVF_NUM_MACADDR_MAX 64
+
+#define AVF_DEFAULT_RX_PTHRESH 8
+#define AVF_DEFAULT_RX_HTHRESH 8
+#define AVF_DEFAULT_RX_WTHRESH 0
+
+#define AVF_DEFAULT_RX_FREE_THRESH 32
+
+#define AVF_DEFAULT_TX_PTHRESH 32
+#define AVF_DEFAULT_TX_HTHRESH 0
+#define AVF_DEFAULT_TX_WTHRESH 0
+
+#define AVF_DEFAULT_TX_FREE_THRESH 32
+#define AVF_DEFAULT_TX_RS_THRESH 32
+
+#define AVF_BASIC_OFFLOAD_CAPS ( \
+ VF_BASE_MODE_OFFLOADS | \
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | \
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+
+#define AVF_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_SCTP | \
+ ETH_RSS_NONFRAG_IPV4_OTHER)
+
+#define AVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define AVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+/* Default queue interrupt throttling time in microseconds */
+#define AVF_ITR_INDEX_DEFAULT 0
+#define AVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
+#define AVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
+
+/* The overhead from MTU to max frame size.
+ * Considering QinQ packet, the VLAN tag needs to be counted twice.
+ */
+#define AVF_VLAN_TAG_SIZE 4
+#define AVF_ETH_OVERHEAD \
+ (ETHER_HDR_LEN + ETHER_CRC_LEN + AVF_VLAN_TAG_SIZE * 2)
+
+struct avf_adapter;
+struct avf_rx_queue;
+struct avf_tx_queue;
+
+/* Structure that defines a VSI, associated with a adapter. */
+struct avf_vsi {
+ struct avf_adapter *adapter; /* Backreference to associated adapter */
+ uint16_t vsi_id;
+ uint16_t nb_qps; /* Number of queue pairs VSI can occupy */
+ uint16_t nb_used_qps; /* Number of queue pairs VSI uses */
+ uint16_t max_macaddrs; /* Maximum number of MAC addresses */
+ uint16_t base_vector;
+ uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
+};
+
+/* TODO: is that correct to assume the max number to be 16 ?*/
+#define AVF_MAX_MSIX_VECTORS 16
+
+/* Structure to store private data specific for VF instance. */
+struct avf_info {
+ uint16_t num_queue_pairs;
+ uint16_t max_pkt_len; /* Maximum packet length */
+ uint16_t mac_num; /* Number of MAC addresses */
+ bool promisc_unicast_enabled;
+ bool promisc_multicast_enabled;
+
+ struct virtchnl_version_info virtchnl_version;
+ struct virtchnl_vf_resource *vf_res; /* VF resource */
+ struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
+
+ volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+ uint32_t cmd_retval; /* return value of the cmd response from PF */
+ uint8_t *aq_resp; /* buffer to store the adminq response from PF */
+
+ /* Event from pf */
+ bool dev_closed;
+ bool link_up;
+ enum virtchnl_link_speed link_speed;
+
+ struct avf_vsi vsi;
+ bool vf_reset;
+ uint64_t flags;
+
+ uint8_t *rss_lut;
+ uint8_t *rss_key;
+ uint16_t nb_msix; /* number of MSI-X interrupts on Rx */
+ uint16_t msix_base; /* msix vector base from */
+ /* queue bitmask for each vector */
+ uint16_t rxq_map[AVF_MAX_MSIX_VECTORS];
+};
+
+#define AVF_MAX_PKT_TYPE 256
+
+/* Structure to store private data for each VF instance. */
+struct avf_adapter {
+ struct avf_hw hw;
+ struct rte_eth_dev *eth_dev;
+ struct avf_info vf;
+
+ bool rx_bulk_alloc_allowed;
+ /* For vector PMD */
+ bool rx_vec_allowed;
+ bool tx_vec_allowed;
+};
+
+/* AVF_DEV_PRIVATE_TO */
+#define AVF_DEV_PRIVATE_TO_ADAPTER(adapter) \
+ ((struct avf_adapter *)adapter)
+#define AVF_DEV_PRIVATE_TO_VF(adapter) \
+ (&((struct avf_adapter *)adapter)->vf)
+#define AVF_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct avf_adapter *)adapter)->hw)
+
+/* AVF_VSI_TO */
+#define AVF_VSI_TO_HW(vsi) \
+ (&(((struct avf_vsi *)vsi)->adapter->hw))
+#define AVF_VSI_TO_VF(vsi) \
+ (&(((struct avf_vsi *)vsi)->adapter->vf))
+#define AVF_VSI_TO_ETH_DEV(vsi) \
+ (((struct avf_vsi *)vsi)->adapter->eth_dev)
+
+static inline void
+avf_init_adminq_parameter(struct avf_hw *hw)
+{
+ hw->aq.num_arq_entries = AVF_AQ_LEN;
+ hw->aq.num_asq_entries = AVF_AQ_LEN;
+ hw->aq.arq_buf_size = AVF_AQ_BUF_SZ;
+ hw->aq.asq_buf_size = AVF_AQ_BUF_SZ;
+}
+
+static inline uint16_t
+avf_calc_itr_interval(int16_t interval)
+{
+ if (interval < 0 || interval > AVF_QUEUE_ITR_INTERVAL_MAX)
+ interval = AVF_QUEUE_ITR_INTERVAL_DEFAULT;
+
+ /* Convert to hardware count, as writing each 1 represents 2 us */
+ return interval / 2;
+}
+
+/* structure used for sending and checking response of virtchnl ops */
+struct avf_cmd_info {
+ enum virtchnl_ops ops;
+ uint8_t *in_args; /* buffer for sending */
+ uint32_t in_args_size; /* buffer size for sending */
+ uint8_t *out_buffer; /* buffer for response */
+ uint32_t out_size; /* buffer size for response */
+};
+
+/* clear current command. Only call in case execute
+ * _atomic_set_cmd successfully.
+ */
+static inline void
+_clear_cmd(struct avf_info *vf)
+{
+ rte_wmb();
+ vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
+ vf->cmd_retval = VIRTCHNL_STATUS_SUCCESS;
+}
+
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_cmd(struct avf_info *vf, enum virtchnl_ops ops)
+{
+ int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+ if (!ret)
+ PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+ return !ret;
+}
+
+int avf_check_api_version(struct avf_adapter *adapter);
+int avf_get_vf_resource(struct avf_adapter *adapter);
+void avf_handle_virtchnl_msg(struct rte_eth_dev *dev);
+int avf_enable_vlan_strip(struct avf_adapter *adapter);
+int avf_disable_vlan_strip(struct avf_adapter *adapter);
+int avf_switch_queue(struct avf_adapter *adapter, uint16_t qid,
+ bool rx, bool on);
+int avf_enable_queues(struct avf_adapter *adapter);
+int avf_disable_queues(struct avf_adapter *adapter);
+int avf_configure_rss_lut(struct avf_adapter *adapter);
+int avf_configure_rss_key(struct avf_adapter *adapter);
+int avf_configure_queues(struct avf_adapter *adapter);
+int avf_config_irq_map(struct avf_adapter *adapter);
+void avf_add_del_all_mac_addr(struct avf_adapter *adapter, bool add);
+int avf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete);
+int avf_query_stats(struct avf_adapter *adapter,
+ struct virtchnl_eth_stats **pstats);
+int avf_config_promisc(struct avf_adapter *adapter, bool enable_unicast,
+ bool enable_multicast);
+int avf_add_del_eth_addr(struct avf_adapter *adapter,
+ struct ether_addr *addr, bool add);
+int avf_add_del_vlan(struct avf_adapter *adapter, uint16_t vlanid, bool add);
+#endif /* _AVF_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/avf_ethdev.c b/src/spdk/dpdk/drivers/net/avf/avf_ethdev.c
new file mode 100644
index 00000000..3a2baaf2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/avf_ethdev.c
@@ -0,0 +1,1446 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#include <rte_interrupts.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_dev.h>
+
+#include "avf_log.h"
+#include "base/avf_prototype.h"
+#include "base/avf_adminq_cmd.h"
+#include "base/avf_type.h"
+
+#include "avf.h"
+#include "avf_rxtx.h"
+
+static int avf_dev_configure(struct rte_eth_dev *dev);
+static int avf_dev_start(struct rte_eth_dev *dev);
+static void avf_dev_stop(struct rte_eth_dev *dev);
+static void avf_dev_close(struct rte_eth_dev *dev);
+static void avf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static const uint32_t *avf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static int avf_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void avf_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void avf_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void avf_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void avf_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int avf_dev_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ uint32_t index,
+ uint32_t pool);
+static void avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int avf_dev_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static int avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int avf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int avf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int avf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+static int avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int avf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+
+int avf_logtype_init;
+int avf_logtype_driver;
+
+static const struct rte_pci_id pci_id_avf_map[] = {
+ { RTE_PCI_DEVICE(AVF_INTEL_VENDOR_ID, AVF_DEV_ID_ADAPTIVE_VF) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct eth_dev_ops avf_eth_dev_ops = {
+ .dev_configure = avf_dev_configure,
+ .dev_start = avf_dev_start,
+ .dev_stop = avf_dev_stop,
+ .dev_close = avf_dev_close,
+ .dev_infos_get = avf_dev_info_get,
+ .dev_supported_ptypes_get = avf_dev_supported_ptypes_get,
+ .link_update = avf_dev_link_update,
+ .stats_get = avf_dev_stats_get,
+ .promiscuous_enable = avf_dev_promiscuous_enable,
+ .promiscuous_disable = avf_dev_promiscuous_disable,
+ .allmulticast_enable = avf_dev_allmulticast_enable,
+ .allmulticast_disable = avf_dev_allmulticast_disable,
+ .mac_addr_add = avf_dev_add_mac_addr,
+ .mac_addr_remove = avf_dev_del_mac_addr,
+ .vlan_filter_set = avf_dev_vlan_filter_set,
+ .vlan_offload_set = avf_dev_vlan_offload_set,
+ .rx_queue_start = avf_dev_rx_queue_start,
+ .rx_queue_stop = avf_dev_rx_queue_stop,
+ .tx_queue_start = avf_dev_tx_queue_start,
+ .tx_queue_stop = avf_dev_tx_queue_stop,
+ .rx_queue_setup = avf_dev_rx_queue_setup,
+ .rx_queue_release = avf_dev_rx_queue_release,
+ .tx_queue_setup = avf_dev_tx_queue_setup,
+ .tx_queue_release = avf_dev_tx_queue_release,
+ .mac_addr_set = avf_dev_set_default_mac_addr,
+ .reta_update = avf_dev_rss_reta_update,
+ .reta_query = avf_dev_rss_reta_query,
+ .rss_hash_update = avf_dev_rss_hash_update,
+ .rss_hash_conf_get = avf_dev_rss_hash_conf_get,
+ .rxq_info_get = avf_dev_rxq_info_get,
+ .txq_info_get = avf_dev_txq_info_get,
+ .rx_queue_count = avf_dev_rxq_count,
+ .rx_descriptor_status = avf_dev_rx_desc_status,
+ .tx_descriptor_status = avf_dev_tx_desc_status,
+ .mtu_set = avf_dev_mtu_set,
+ .rx_queue_intr_enable = avf_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = avf_dev_rx_queue_intr_disable,
+};
+
+static int
+avf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *ad =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(ad);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+ ad->rx_bulk_alloc_allowed = true;
+#ifdef RTE_LIBRTE_AVF_INC_VECTOR
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the
+ * vector Rx/Tx preconditions, it will be reset.
+ */
+ ad->rx_vec_allowed = true;
+ ad->tx_vec_allowed = true;
+#else
+ ad->rx_vec_allowed = false;
+ ad->tx_vec_allowed = false;
+#endif
+
+ /* Vlan stripping setting */
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ avf_enable_vlan_strip(ad);
+ else
+ avf_disable_vlan_strip(ad);
+ }
+ return 0;
+}
+
+static int
+avf_init_rss(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct rte_eth_rss_conf *rss_conf;
+ uint8_t i, j, nb_q;
+ int ret;
+
+ rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
+ AVF_MAX_NUM_QUEUES);
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
+ PMD_DRV_LOG(DEBUG, "RSS is not supported");
+ return -ENOTSUP;
+ }
+ if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
+ /* set all lut items to default queue */
+ for (i = 0; i < vf->vf_res->rss_lut_size; i++)
+ vf->rss_lut[i] = 0;
+ ret = avf_configure_rss_lut(adapter);
+ return ret;
+ }
+
+ /* In AVF, RSS enablement is set by PF driver. It is not supported
+ * to set based on rss_conf->rss_hf.
+ */
+
+ /* configure RSS key */
+ if (!rss_conf->rss_key) {
+ /* Calculate the default hash key */
+ for (i = 0; i <= vf->vf_res->rss_key_size; i++)
+ vf->rss_key[i] = (uint8_t)rte_rand();
+ } else
+ rte_memcpy(vf->rss_key, rss_conf->rss_key,
+ RTE_MIN(rss_conf->rss_key_len,
+ vf->vf_res->rss_key_size));
+
+ /* init RSS LUT table */
+ for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
+ if (j >= nb_q)
+ j = 0;
+ vf->rss_lut[i] = j;
+ }
+ /* send virtchnnl ops to configure rss*/
+ ret = avf_configure_rss_lut(adapter);
+ if (ret)
+ return ret;
+ ret = avf_configure_rss_key(adapter);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = dev->data;
+ uint16_t buf_size, max_pkt_len, len;
+
+ buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+
+ /* Calculate the maximum packet length allowed */
+ len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS;
+ max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+ /* Check if the jumbo frame and maximum packet length are set
+ * correctly.
+ */
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (max_pkt_len <= ETHER_MAX_LEN ||
+ max_pkt_len > AVF_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is enabled",
+ (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)AVF_FRAME_SIZE_MAX);
+ return -EINVAL;
+ }
+ } else {
+ if (max_pkt_len < ETHER_MIN_LEN ||
+ max_pkt_len > ETHER_MAX_LEN) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is disabled",
+ (uint32_t)ETHER_MIN_LEN,
+ (uint32_t)ETHER_MAX_LEN);
+ return -EINVAL;
+ }
+ }
+
+ rxq->max_pkt_len = max_pkt_len;
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) {
+ dev_data->scattered_rx = 1;
+ }
+ AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ AVF_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+avf_init_queues(struct rte_eth_dev *dev)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct avf_rx_queue **rxq =
+ (struct avf_rx_queue **)dev->data->rx_queues;
+ struct avf_tx_queue **txq =
+ (struct avf_tx_queue **)dev->data->tx_queues;
+ int i, ret = AVF_SUCCESS;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!rxq[i] || !rxq[i]->q_set)
+ continue;
+ ret = avf_init_rxq(dev, rxq[i]);
+ if (ret != AVF_SUCCESS)
+ break;
+ }
+ /* set rx/tx function to vector/scatter/single-segment
+ * according to parameters
+ */
+ avf_set_rx_function(dev);
+ avf_set_tx_function(dev);
+
+ return ret;
+}
+
+static int avf_config_rx_queues_irqs(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ uint16_t interval, i;
+ int vec;
+
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
+ if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (!intr_handle->intr_vec) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
+ dev->data->nb_rx_queues);
+ return -1;
+ }
+ }
+
+ if (!dev->data->dev_conf.intr_conf.rxq ||
+ !rte_intr_dp_is_en(intr_handle)) {
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+ vf->nb_msix = 1;
+ if (vf->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
+ /* If WB_ON_ITR supports, enable it */
+ vf->msix_base = AVF_RX_VEC_START;
+ AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1),
+ AVFINT_DYN_CTLN1_ITR_INDX_MASK |
+ AVFINT_DYN_CTLN1_WB_ON_ITR_MASK);
+ } else {
+ /* If no WB_ON_ITR offload flags, need to set
+ * interrupt for descriptor write back.
+ */
+ vf->msix_base = AVF_MISC_VEC_ID;
+
+ /* set ITR to max */
+ interval = avf_calc_itr_interval(
+ AVF_QUEUE_ITR_INTERVAL_MAX);
+ AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
+ AVFINT_DYN_CTL01_INTENA_MASK |
+ (AVF_ITR_INDEX_DEFAULT <<
+ AVFINT_DYN_CTL01_ITR_INDX_SHIFT) |
+ (interval <<
+ AVFINT_DYN_CTL01_INTERVAL_SHIFT));
+ }
+ AVF_WRITE_FLUSH(hw);
+ /* map all queues to the same interrupt */
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ vf->rxq_map[vf->msix_base] |= 1 << i;
+ } else {
+ if (!rte_intr_allow_others(intr_handle)) {
+ vf->nb_msix = 1;
+ vf->msix_base = AVF_MISC_VEC_ID;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vf->rxq_map[vf->msix_base] |= 1 << i;
+ intr_handle->intr_vec[i] = AVF_MISC_VEC_ID;
+ }
+ PMD_DRV_LOG(DEBUG,
+ "vector %u are mapping to all Rx queues",
+ vf->msix_base);
+ } else {
+ /* If Rx interrupt is reuquired, and we can use
+ * multi interrupts, then the vec is from 1
+ */
+ vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors,
+ intr_handle->nb_efd);
+ vf->msix_base = AVF_RX_VEC_START;
+ vec = AVF_RX_VEC_START;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vf->rxq_map[vec] |= 1 << i;
+ intr_handle->intr_vec[i] = vec++;
+ if (vec >= vf->nb_msix)
+ vec = AVF_RX_VEC_START;
+ }
+ PMD_DRV_LOG(DEBUG,
+ "%u vectors are mapping to %u Rx queues",
+ vf->nb_msix, dev->data->nb_rx_queues);
+ }
+ }
+
+ if (avf_config_irq_map(adapter)) {
+ PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+avf_start_queues(struct rte_eth_dev *dev)
+{
+ struct avf_rx_queue *rxq;
+ struct avf_tx_queue *txq;
+ int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq->tx_deferred_start)
+ continue;
+ if (avf_dev_tx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq->rx_deferred_start)
+ continue;
+ if (avf_dev_rx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+avf_dev_start(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->adapter_stopped = 0;
+
+ vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
+
+ if (avf_init_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "failed to do Queue init");
+ return -1;
+ }
+
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (avf_init_rss(adapter) != 0) {
+ PMD_DRV_LOG(ERR, "configure rss failed");
+ goto err_rss;
+ }
+ }
+
+ if (avf_configure_queues(adapter) != 0) {
+ PMD_DRV_LOG(ERR, "configure queues failed");
+ goto err_queue;
+ }
+
+ if (avf_config_rx_queues_irqs(dev, intr_handle) != 0) {
+ PMD_DRV_LOG(ERR, "configure irq failed");
+ goto err_queue;
+ }
+ /* re-enable intr again, because efd assign may change */
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ rte_intr_disable(intr_handle);
+ rte_intr_enable(intr_handle);
+ }
+
+ /* Set all mac addrs */
+ avf_add_del_all_mac_addr(adapter, TRUE);
+
+ if (avf_start_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "enable queues failed");
+ goto err_mac;
+ }
+
+ return 0;
+
+err_mac:
+ avf_add_del_all_mac_addr(adapter, FALSE);
+err_queue:
+err_rss:
+ return -1;
+}
+
+static void
+avf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->adapter_stopped == 1)
+ return;
+
+ avf_stop_queues(dev);
+
+ /* Disable the interrupt for Rx */
+ rte_intr_efd_disable(intr_handle);
+ /* Rx interrupt vector mapping free */
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+
+ /* remove all mac addrs */
+ avf_add_del_all_mac_addr(adapter, FALSE);
+ hw->adapter_stopped = 1;
+}
+
+static void
+avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ memset(dev_info, 0, sizeof(*dev_info));
+ dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX;
+ dev_info->hash_key_size = vf->vf_res->rss_key_size;
+ dev_info->reta_size = vf->vf_res->rss_lut_size;
+ dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL;
+ dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = AVF_MAX_RING_DESC,
+ .nb_min = AVF_MIN_RING_DESC,
+ .nb_align = AVF_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = AVF_MAX_RING_DESC,
+ .nb_min = AVF_MIN_RING_DESC,
+ .nb_align = AVF_ALIGN_RING_DESC,
+ };
+}
+
+static const uint32_t *
+avf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+ return ptypes;
+}
+
+int
+avf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct rte_eth_link new_link;
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ /* Only read status info stored in VF, and the info is updated
+ * when receive LINK_CHANGE evnet from PF by Virtchnnl.
+ */
+ switch (vf->link_speed) {
+ case VIRTCHNL_LINK_SPEED_100MB:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case VIRTCHNL_LINK_SPEED_1GB:
+ new_link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case VIRTCHNL_LINK_SPEED_10GB:
+ new_link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case VIRTCHNL_LINK_SPEED_20GB:
+ new_link.link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case VIRTCHNL_LINK_SPEED_25GB:
+ new_link.link_speed = ETH_SPEED_NUM_25G;
+ break;
+ case VIRTCHNL_LINK_SPEED_40GB:
+ new_link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+ default:
+ new_link.link_speed = ETH_SPEED_NUM_NONE;
+ break;
+ }
+
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = vf->link_up ? ETH_LINK_UP :
+ ETH_LINK_DOWN;
+ new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+
+ if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
+ *(uint64_t *)&dev->data->dev_link,
+ *(uint64_t *)&new_link) == 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+avf_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ int ret;
+
+ if (vf->promisc_unicast_enabled)
+ return;
+
+ ret = avf_config_promisc(adapter, TRUE, vf->promisc_multicast_enabled);
+ if (!ret)
+ vf->promisc_unicast_enabled = TRUE;
+}
+
+static void
+avf_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ int ret;
+
+ if (!vf->promisc_unicast_enabled)
+ return;
+
+ ret = avf_config_promisc(adapter, FALSE, vf->promisc_multicast_enabled);
+ if (!ret)
+ vf->promisc_unicast_enabled = FALSE;
+}
+
+static void
+avf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ int ret;
+
+ if (vf->promisc_multicast_enabled)
+ return;
+
+ ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, TRUE);
+ if (!ret)
+ vf->promisc_multicast_enabled = TRUE;
+}
+
+static void
+avf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ int ret;
+
+ if (!vf->promisc_multicast_enabled)
+ return;
+
+ ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, FALSE);
+ if (!ret)
+ vf->promisc_multicast_enabled = FALSE;
+}
+
+static int
+avf_dev_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ int err;
+
+ if (is_zero_ether_addr(addr)) {
+ PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
+ return -EINVAL;
+ }
+
+ err = avf_add_del_eth_addr(adapter, addr, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to add MAC address");
+ return -EIO;
+ }
+
+ vf->mac_num++;
+
+ return 0;
+}
+
+static void
+avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct ether_addr *addr;
+ int err;
+
+ addr = &dev->data->mac_addrs[index];
+
+ err = avf_add_del_eth_addr(adapter, addr, FALSE);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to delete MAC address");
+
+ vf->mac_num--;
+}
+
+static int
+avf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ int err;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ return -ENOTSUP;
+
+ err = avf_add_del_vlan(adapter, vlan_id, on);
+ if (err)
+ return -EIO;
+ return 0;
+}
+
+static int
+avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ int err;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ return -ENOTSUP;
+
+ /* Vlan stripping setting */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ err = avf_enable_vlan_strip(adapter);
+ else
+ err = avf_disable_vlan_strip(adapter);
+
+ if (err)
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+avf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ uint8_t *lut;
+ uint16_t i, idx, shift;
+ int ret;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ return -ENOTSUP;
+
+ if (reta_size != vf->vf_res->rss_lut_size) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)", reta_size, vf->vf_res->rss_lut_size);
+ return -EINVAL;
+ }
+
+ lut = rte_zmalloc("rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+ /* store the old lut table temporarily */
+ rte_memcpy(lut, vf->rss_lut, reta_size);
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ lut[i] = reta_conf[idx].reta[shift];
+ }
+
+ rte_memcpy(vf->rss_lut, lut, reta_size);
+ /* send virtchnnl ops to configure rss*/
+ ret = avf_configure_rss_lut(adapter);
+ if (ret) /* revert back */
+ rte_memcpy(vf->rss_lut, lut, reta_size);
+ rte_free(lut);
+
+ return ret;
+}
+
+static int
+avf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ uint16_t i, idx, shift;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ return -ENOTSUP;
+
+ if (reta_size != vf->vf_res->rss_lut_size) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)", reta_size, vf->vf_res->rss_lut_size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = vf->rss_lut[i];
+ }
+
+ return 0;
+}
+
+static int
+avf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ return -ENOTSUP;
+
+ /* HENA setting, it is enabled by default, no change */
+ if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
+ PMD_DRV_LOG(DEBUG, "No key to be configured");
+ return 0;
+ } else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) {
+ PMD_DRV_LOG(ERR, "The size of hash key configured "
+ "(%d) doesn't match the size of hardware can "
+ "support (%d)", rss_conf->rss_key_len,
+ vf->vf_res->rss_key_size);
+ return -EINVAL;
+ }
+
+ rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
+
+ return avf_configure_rss_key(adapter);
+}
+
+static int
+avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ return -ENOTSUP;
+
+ /* Just set it to default value now. */
+ rss_conf->rss_hf = AVF_RSS_OFFLOAD_ALL;
+
+ if (!rss_conf->rss_key)
+ return 0;
+
+ rss_conf->rss_key_len = vf->vf_res->rss_key_size;
+ rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
+
+ return 0;
+}
+
+static int
+avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint32_t frame_size = mtu + AVF_ETH_OVERHEAD;
+ int ret = 0;
+
+ if (mtu < ETHER_MIN_MTU || frame_size > AVF_FRAME_SIZE_MAX)
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev->data->dev_started) {
+ PMD_DRV_LOG(ERR, "port must be stopped before configuration");
+ return -EBUSY;
+ }
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return ret;
+}
+
+static int
+avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct ether_addr *perm_addr, *old_addr;
+ int ret;
+
+ old_addr = (struct ether_addr *)hw->mac.addr;
+ perm_addr = (struct ether_addr *)hw->mac.perm_addr;
+
+ if (is_same_ether_addr(mac_addr, old_addr))
+ return 0;
+
+ /* If the MAC address is configured by host, skip the setting */
+ if (is_valid_assigned_ether_addr(perm_addr))
+ return -EPERM;
+
+ ret = avf_add_del_eth_addr(adapter, old_addr, FALSE);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
+ " %02X:%02X:%02X:%02X:%02X:%02X",
+ old_addr->addr_bytes[0],
+ old_addr->addr_bytes[1],
+ old_addr->addr_bytes[2],
+ old_addr->addr_bytes[3],
+ old_addr->addr_bytes[4],
+ old_addr->addr_bytes[5]);
+
+ ret = avf_add_del_eth_addr(adapter, mac_addr, TRUE);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Fail to add new MAC:"
+ " %02X:%02X:%02X:%02X:%02X:%02X",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5]);
+
+ if (ret)
+ return -EIO;
+
+ ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
+ return 0;
+}
+
+static int
+avf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct virtchnl_eth_stats *pstats = NULL;
+ int ret;
+
+ ret = avf_query_stats(adapter, &pstats);
+ if (ret == 0) {
+ stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
+ pstats->rx_broadcast;
+ stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
+ pstats->tx_unicast;
+ stats->imissed = pstats->rx_discards;
+ stats->oerrors = pstats->tx_errors + pstats->tx_discards;
+ stats->ibytes = pstats->rx_bytes;
+ stats->obytes = pstats->tx_bytes;
+ } else {
+ PMD_DRV_LOG(ERR, "Get statistics failed");
+ }
+ return -EIO;
+}
+
+static int
+avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ uint16_t msix_intr;
+
+ msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
+ if (msix_intr == AVF_MISC_VEC_ID) {
+ PMD_DRV_LOG(INFO, "MISC is also enabled for control");
+ AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
+ AVFINT_DYN_CTL01_INTENA_MASK |
+ AVFINT_DYN_CTL01_ITR_INDX_MASK);
+ } else {
+ AVF_WRITE_REG(hw,
+ AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START),
+ AVFINT_DYN_CTLN1_INTENA_MASK |
+ AVFINT_DYN_CTLN1_ITR_INDX_MASK);
+ }
+
+ AVF_WRITE_FLUSH(hw);
+
+ rte_intr_enable(&pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+avf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t msix_intr;
+
+ msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
+ if (msix_intr == AVF_MISC_VEC_ID) {
+ PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
+ return -EIO;
+ }
+
+ AVF_WRITE_REG(hw,
+ AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START),
+ 0);
+
+ AVF_WRITE_FLUSH(hw);
+ return 0;
+}
+
+static int
+avf_check_vf_reset_done(struct avf_hw *hw)
+{
+ int i, reset;
+
+ for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
+ reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
+ AVFGEN_RSTAT_VFR_STATE_MASK;
+ reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
+ if (reset == VIRTCHNL_VFR_VFACTIVE ||
+ reset == VIRTCHNL_VFR_COMPLETED)
+ break;
+ rte_delay_ms(20);
+ }
+
+ if (i >= AVF_RESET_WAIT_CNT)
+ return -1;
+
+ return 0;
+}
+
+static int
+avf_init_vf(struct rte_eth_dev *dev)
+{
+ int i, err, bufsz;
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ err = avf_set_mac_type(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
+ goto err;
+ }
+
+ err = avf_check_vf_reset_done(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "VF is still resetting");
+ goto err;
+ }
+
+ avf_init_adminq_parameter(hw);
+ err = avf_init_adminq(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
+ goto err;
+ }
+
+ vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
+ if (!vf->aq_resp) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
+ goto err_aq;
+ }
+ if (avf_check_api_version(adapter) != 0) {
+ PMD_INIT_LOG(ERR, "check_api version failed");
+ goto err_api;
+ }
+
+ bufsz = sizeof(struct virtchnl_vf_resource) +
+ (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
+ vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
+ if (!vf->vf_res) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
+ goto err_api;
+ }
+ if (avf_get_vf_resource(adapter) != 0) {
+ PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
+ goto err_alloc;
+ }
+ /* Allocate memort for RSS info */
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vf->rss_key = rte_zmalloc("rss_key",
+ vf->vf_res->rss_key_size, 0);
+ if (!vf->rss_key) {
+ PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
+ goto err_rss;
+ }
+ vf->rss_lut = rte_zmalloc("rss_lut",
+ vf->vf_res->rss_lut_size, 0);
+ if (!vf->rss_lut) {
+ PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
+ goto err_rss;
+ }
+ }
+ return 0;
+err_rss:
+ rte_free(vf->rss_key);
+ rte_free(vf->rss_lut);
+err_alloc:
+ rte_free(vf->vf_res);
+ vf->vsi_res = NULL;
+err_api:
+ rte_free(vf->aq_resp);
+err_aq:
+ avf_shutdown_adminq(hw);
+err:
+ return -1;
+}
+
+/* Enable default admin queue interrupt setting */
+static inline void
+avf_enable_irq0(struct avf_hw *hw)
+{
+ /* Enable admin queue interrupt trigger */
+ AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
+
+ AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
+ AVFINT_DYN_CTL01_ITR_INDX_MASK);
+
+ AVF_WRITE_FLUSH(hw);
+}
+
+static inline void
+avf_disable_irq0(struct avf_hw *hw)
+{
+ /* Disable all interrupt types */
+ AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, 0);
+ AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
+ AVFINT_DYN_CTL01_ITR_INDX_MASK);
+ AVF_WRITE_FLUSH(hw);
+}
+
+static void
+avf_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ avf_disable_irq0(hw);
+
+ avf_handle_virtchnl_msg(dev);
+
+done:
+ avf_enable_irq0(hw);
+}
+
+static int
+avf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* assign ops func pointer */
+ eth_dev->dev_ops = &avf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &avf_recv_pkts;
+ eth_dev->tx_pkt_burst = &avf_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &avf_prep_pkts;
+
+ /* For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check if we need a different RX
+ * and TX function.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ avf_set_rx_function(eth_dev);
+ avf_set_tx_function(eth_dev);
+ return 0;
+ }
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.bus_id = pci_dev->addr.bus;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+ adapter->eth_dev = eth_dev;
+
+ if (avf_init_vf(eth_dev) != 0) {
+ PMD_INIT_LOG(ERR, "Init vf failed");
+ return -1;
+ }
+
+ /* copy mac addr */
+ eth_dev->data->mac_addrs = rte_zmalloc(
+ "avf_mac",
+ ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
+ 0);
+ if (!eth_dev->data->mac_addrs) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
+ " store MAC addresses",
+ ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
+ return -ENOMEM;
+ }
+ /* If the MAC address is not configured by host,
+ * generate a random one.
+ */
+ if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
+ eth_random_addr(hw->mac.addr);
+ ether_addr_copy((struct ether_addr *)hw->mac.addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ /* register callback func to eal lib */
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ avf_dev_interrupt_handler,
+ (void *)eth_dev);
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(&pci_dev->intr_handle);
+
+ /* configure and enable device interrupt */
+ avf_enable_irq0(hw);
+
+ return 0;
+}
+
+static void
+avf_dev_close(struct rte_eth_dev *dev)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ avf_dev_stop(dev);
+ avf_shutdown_adminq(hw);
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+
+ /* unregister callback func from eal lib */
+ rte_intr_callback_unregister(intr_handle,
+ avf_dev_interrupt_handler, dev);
+ avf_disable_irq0(hw);
+}
+
+static int
+avf_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+ if (hw->adapter_stopped == 0)
+ avf_dev_close(dev);
+
+ rte_free(vf->vf_res);
+ vf->vsi_res = NULL;
+ vf->vf_res = NULL;
+
+ rte_free(vf->aq_resp);
+ vf->aq_resp = NULL;
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ if (vf->rss_lut) {
+ rte_free(vf->rss_lut);
+ vf->rss_lut = NULL;
+ }
+ if (vf->rss_key) {
+ rte_free(vf->rss_key);
+ vf->rss_key = NULL;
+ }
+
+ return 0;
+}
+
+static int eth_avf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct avf_adapter), avf_dev_init);
+}
+
+static int eth_avf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, avf_dev_uninit);
+}
+
+/* Adaptive virtual function driver struct */
+static struct rte_pci_driver rte_avf_pmd = {
+ .id_table = pci_id_avf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = eth_avf_pci_probe,
+ .remove = eth_avf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
+RTE_INIT(avf_init_log)
+{
+ avf_logtype_init = rte_log_register("pmd.net.avf.init");
+ if (avf_logtype_init >= 0)
+ rte_log_set_level(avf_logtype_init, RTE_LOG_NOTICE);
+ avf_logtype_driver = rte_log_register("pmd.net.avf.driver");
+ if (avf_logtype_driver >= 0)
+ rte_log_set_level(avf_logtype_driver, RTE_LOG_NOTICE);
+}
+
+/* memory func for base code */
+enum avf_status_code
+avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
+ struct avf_dma_mem *mem,
+ u64 size,
+ u32 alignment)
+{
+ const struct rte_memzone *mz = NULL;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+
+ if (!mem)
+ return AVF_ERR_PARAM;
+
+ snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
+ if (!mz)
+ return AVF_ERR_NO_MEMORY;
+
+ mem->size = size;
+ mem->va = mz->addr;
+ mem->pa = mz->phys_addr;
+ mem->zone = (const void *)mz;
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s allocated with physical address: %"PRIu64,
+ mz->name, mem->pa);
+
+ return AVF_SUCCESS;
+}
+
+enum avf_status_code
+avf_free_dma_mem_d(__rte_unused struct avf_hw *hw,
+ struct avf_dma_mem *mem)
+{
+ if (!mem)
+ return AVF_ERR_PARAM;
+
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s to be freed with physical address: %"PRIu64,
+ ((const struct rte_memzone *)mem->zone)->name, mem->pa);
+ rte_memzone_free((const struct rte_memzone *)mem->zone);
+ mem->zone = NULL;
+ mem->va = NULL;
+ mem->pa = (u64)0;
+
+ return AVF_SUCCESS;
+}
+
+enum avf_status_code
+avf_allocate_virt_mem_d(__rte_unused struct avf_hw *hw,
+ struct avf_virt_mem *mem,
+ u32 size)
+{
+ if (!mem)
+ return AVF_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = rte_zmalloc("avf", size, 0);
+
+ if (mem->va)
+ return AVF_SUCCESS;
+ else
+ return AVF_ERR_NO_MEMORY;
+}
+
+enum avf_status_code
+avf_free_virt_mem_d(__rte_unused struct avf_hw *hw,
+ struct avf_virt_mem *mem)
+{
+ if (!mem)
+ return AVF_ERR_PARAM;
+
+ rte_free(mem->va);
+ mem->va = NULL;
+
+ return AVF_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/avf/avf_log.h b/src/spdk/dpdk/drivers/net/avf/avf_log.h
new file mode 100644
index 00000000..8d574d3f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/avf_log.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _AVF_LOG_H_
+#define _AVF_LOG_H_
+
+extern int avf_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, avf_logtype_init, "%s(): " fmt "\n", \
+ __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int avf_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, avf_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_AVF_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_AVF_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_AVF_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#endif /* _AVF_LOG_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/avf_rxtx.c b/src/spdk/dpdk/drivers/net/avf/avf_rxtx.c
new file mode 100644
index 00000000..e03a136f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/avf_rxtx.c
@@ -0,0 +1,1962 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_udp.h>
+#include <rte_ip.h>
+#include <rte_net.h>
+
+#include "avf_log.h"
+#include "base/avf_prototype.h"
+#include "base/avf_type.h"
+#include "avf.h"
+#include "avf_rxtx.h"
+
+static inline int
+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
+{
+ /* The following constraints must be satisfied:
+ * thresh < rxq->nb_rx_desc
+ */
+ if (thresh >= nb_desc) {
+ PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
+ thresh, nb_desc);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int
+check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
+ uint16_t tx_free_thresh)
+{
+ /* TX descriptors will have their RS bit set after tx_rs_thresh
+ * descriptors have been used. The TX descriptor ring will be cleaned
+ * after tx_free_thresh descriptors are used or if the number of
+ * descriptors required to transmit a packet is greater than the
+ * number of free TX descriptors.
+ *
+ * The following constraints must be satisfied:
+ * - tx_rs_thresh must be less than the size of the ring minus 2.
+ * - tx_free_thresh must be less than the size of the ring minus 3.
+ * - tx_rs_thresh must be less than or equal to tx_free_thresh.
+ * - tx_rs_thresh must be a divisor of the ring size.
+ *
+ * One descriptor in the TX ring is used as a sentinel to avoid a H/W
+ * race condition, hence the maximum threshold constraints. When set
+ * to zero use default values.
+ */
+ if (tx_rs_thresh >= (nb_desc - 2)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
+ "number of TX descriptors (%u) minus 2",
+ tx_rs_thresh, nb_desc);
+ return -EINVAL;
+ }
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
+ "number of TX descriptors (%u) minus 3.",
+ tx_free_thresh, nb_desc);
+ return -EINVAL;
+ }
+ if (tx_rs_thresh > tx_free_thresh) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
+ "equal to tx_free_thresh (%u).",
+ tx_rs_thresh, tx_free_thresh);
+ return -EINVAL;
+ }
+ if ((nb_desc % tx_rs_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
+ "number of TX descriptors (%u).",
+ tx_rs_thresh, nb_desc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef RTE_LIBRTE_AVF_INC_VECTOR
+static inline bool
+check_rx_vec_allow(struct avf_rx_queue *rxq)
+{
+ if (rxq->rx_free_thresh >= AVF_VPMD_RX_MAX_BURST &&
+ rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
+ PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
+ return TRUE;
+ }
+
+ PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
+ return FALSE;
+}
+
+static inline bool
+check_tx_vec_allow(struct avf_tx_queue *txq)
+{
+ if (!(txq->offloads & AVF_NO_VECTOR_FLAGS) &&
+ txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST &&
+ txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF) {
+ PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
+ return TRUE;
+ }
+ PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
+ return FALSE;
+}
+#endif
+
+static inline bool
+check_rx_bulk_allow(struct avf_rx_queue *rxq)
+{
+ int ret = TRUE;
+
+ if (!(rxq->rx_free_thresh >= AVF_RX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "AVF_RX_MAX_BURST=%d",
+ rxq->rx_free_thresh, AVF_RX_MAX_BURST);
+ ret = FALSE;
+ } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "rxq->rx_free_thresh=%d",
+ rxq->nb_rx_desc, rxq->rx_free_thresh);
+ ret = FALSE;
+ }
+ return ret;
+}
+
+static inline void
+reset_rx_queue(struct avf_rx_queue *rxq)
+{
+ uint16_t len, i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc + AVF_RX_MAX_BURST;
+
+ for (i = 0; i < len * sizeof(union avf_rx_desc); i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+ for (i = 0; i < AVF_RX_MAX_BURST; i++)
+ rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+ /* for rx bulk */
+ rxq->rx_nb_avail = 0;
+ rxq->rx_next_avail = 0;
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+}
+
+static inline void
+reset_tx_queue(struct avf_tx_queue *txq)
+{
+ struct avf_tx_entry *txe;
+ uint16_t i, prev, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ txe = txq->sw_ring;
+ size = sizeof(struct avf_tx_desc) * txq->nb_tx_desc;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->tx_ring)[i] = 0;
+
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->tx_ring[i].cmd_type_offset_bsz =
+ rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_tail = 0;
+ txq->nb_used = 0;
+
+ txq->last_desc_cleaned = txq->nb_tx_desc - 1;
+ txq->nb_free = txq->nb_tx_desc - 1;
+
+ txq->next_dd = txq->rs_thresh - 1;
+ txq->next_rs = txq->rs_thresh - 1;
+}
+
+static int
+alloc_rxq_mbufs(struct avf_rx_queue *rxq)
+{
+ volatile union avf_rx_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &rxq->rx_ring[i];
+ rxd->read.pkt_addr = dma_addr;
+ rxd->read.hdr_addr = 0;
+#ifndef RTE_LIBRTE_AVF_16BYTE_RX_DESC
+ rxd->read.rsvd1 = 0;
+ rxd->read.rsvd2 = 0;
+#endif
+
+ rxq->sw_ring[i] = mbuf;
+ }
+
+ return 0;
+}
+
+static inline void
+release_rxq_mbufs(struct avf_rx_queue *rxq)
+{
+ struct rte_mbuf *mbuf;
+ uint16_t i;
+
+ if (!rxq->sw_ring)
+ return;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i]) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ rxq->sw_ring[i] = NULL;
+ }
+ }
+
+ /* for rx bulk */
+ if (rxq->rx_nb_avail == 0)
+ return;
+ for (i = 0; i < rxq->rx_nb_avail; i++) {
+ struct rte_mbuf *mbuf;
+
+ mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
+ rte_pktmbuf_free_seg(mbuf);
+ }
+ rxq->rx_nb_avail = 0;
+}
+
+static inline void
+release_txq_mbufs(struct avf_tx_queue *txq)
+{
+ uint16_t i;
+
+ if (!txq || !txq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+}
+
+static const struct avf_rxq_ops def_rxq_ops = {
+ .release_mbufs = release_rxq_mbufs,
+};
+
+static const struct avf_txq_ops def_txq_ops = {
+ .release_mbufs = release_txq_mbufs,
+};
+
+int
+avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_adapter *ad =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_rx_queue *rxq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t len, i;
+ uint16_t rx_free_thresh;
+ uint16_t base, bsf, tc_mapping;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
+ nb_desc > AVF_MAX_RING_DESC ||
+ nb_desc < AVF_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
+ "invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Check free threshold */
+ rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
+ AVF_DEFAULT_RX_FREE_THRESH :
+ rx_conf->rx_free_thresh;
+ if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
+ return -EINVAL;
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ avf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("avf rxq",
+ sizeof(struct avf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for "
+ "rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq->mp = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->crc_len = 0; /* crc stripping by default */
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->rx_hdr_len = 0;
+
+ len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+ rxq->rx_buf_len = RTE_ALIGN(len, (1 << AVF_RXQ_CTX_DBUFF_SHIFT));
+
+ /* Allocate the software ring. */
+ len = nb_desc + AVF_RX_MAX_BURST;
+ rxq->sw_ring =
+ rte_zmalloc_socket("avf rx sw ring",
+ sizeof(struct rte_mbuf *) * len,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+
+ /* Allocate the maximun number of RX ring hardware descriptor with
+ * a liitle more to support bulk allocate.
+ */
+ len = AVF_MAX_RING_DESC + AVF_RX_MAX_BURST;
+ ring_size = RTE_ALIGN(len * sizeof(union avf_rx_desc),
+ AVF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ ring_size, AVF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+ /* Zero all the descriptors in the ring. */
+ memset(mz->addr, 0, ring_size);
+ rxq->rx_ring_phys_addr = mz->iova;
+ rxq->rx_ring = (union avf_rx_desc *)mz->addr;
+
+ rxq->mz = mz;
+ reset_rx_queue(rxq);
+ rxq->q_set = TRUE;
+ dev->data->rx_queues[queue_idx] = rxq;
+ rxq->qrx_tail = hw->hw_addr + AVF_QRX_TAIL1(rxq->queue_id);
+ rxq->ops = &def_rxq_ops;
+
+ if (check_rx_bulk_allow(rxq) == TRUE) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "not satisfied, Scattered Rx is requested "
+ "on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
+ }
+
+#ifdef RTE_LIBRTE_AVF_INC_VECTOR
+ if (check_rx_vec_allow(rxq) == FALSE)
+ ad->rx_vec_allowed = false;
+#endif
+ return 0;
+}
+
+int
+avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_adapter *ad =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_tx_queue *txq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+ uint16_t i, base, bsf, tc_mapping;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
+ nb_desc > AVF_MAX_RING_DESC ||
+ nb_desc < AVF_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
+ "invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+ tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ avf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("avf txq",
+ sizeof(struct avf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->rs_thresh = tx_rs_thresh;
+ txq->free_thresh = tx_free_thresh;
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->offloads = offloads;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+ /* Allocate software ring */
+ txq->sw_ring =
+ rte_zmalloc_socket("avf tx sw ring",
+ sizeof(struct avf_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct avf_tx_desc) * AVF_MAX_RING_DESC;
+ ring_size = RTE_ALIGN(ring_size, AVF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ ring_size, AVF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->tx_ring = (struct avf_tx_desc *)mz->addr;
+
+ txq->mz = mz;
+ reset_tx_queue(txq);
+ txq->q_set = TRUE;
+ dev->data->tx_queues[queue_idx] = txq;
+ txq->qtx_tail = hw->hw_addr + AVF_QTX_TAIL1(queue_idx);
+ txq->ops = &def_txq_ops;
+
+#ifdef RTE_LIBRTE_AVF_INC_VECTOR
+ if (check_tx_vec_allow(txq) == FALSE)
+ ad->tx_vec_allowed = false;
+#endif
+
+ return 0;
+}
+
+int
+avf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_rx_queue *rxq;
+ int err = 0;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (rx_queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = alloc_rxq_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail register. */
+ AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ AVF_WRITE_FLUSH(hw);
+
+ /* Ready to switch the queue on */
+ err = avf_switch_queue(adapter, rx_queue_id, TRUE, TRUE);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ else
+ dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+
+ return err;
+}
+
+int
+avf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_tx_queue *txq;
+ int err = 0;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /* Init the RX tail register. */
+ AVF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ AVF_WRITE_FLUSH(hw);
+
+ /* Ready to switch the queue on */
+ err = avf_switch_queue(adapter, tx_queue_id, FALSE, TRUE);
+
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ else
+ dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+
+ return err;
+}
+
+int
+avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_rx_queue *rxq;
+ int err;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (rx_queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ err = avf_switch_queue(adapter, rx_queue_id, TRUE, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxq->ops->release_mbufs(rxq);
+ reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_tx_queue *txq;
+ int err;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ err = avf_switch_queue(adapter, tx_queue_id, FALSE, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
+ }
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ txq->ops->release_mbufs(txq);
+ reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+void
+avf_dev_rx_queue_release(void *rxq)
+{
+ struct avf_rx_queue *q = (struct avf_rx_queue *)rxq;
+
+ if (!q)
+ return;
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(q);
+}
+
+void
+avf_dev_tx_queue_release(void *txq)
+{
+ struct avf_tx_queue *q = (struct avf_tx_queue *)txq;
+
+ if (!q)
+ return;
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(q);
+}
+
+void
+avf_stop_queues(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_rx_queue *rxq;
+ struct avf_tx_queue *txq;
+ int ret, i;
+
+ /* Stop All queues */
+ ret = avf_disable_queues(adapter);
+ if (ret)
+ PMD_DRV_LOG(WARNING, "Fail to stop queues");
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+ txq->ops->release_mbufs(txq);
+ reset_tx_queue(txq);
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq)
+ continue;
+ rxq->ops->release_mbufs(rxq);
+ reset_rx_queue(rxq);
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+}
+
+static inline void
+avf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union avf_rx_desc *rxdp)
+{
+ if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ (1 << AVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->vlan_tci =
+ rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
+ } else {
+ mb->vlan_tci = 0;
+ }
+}
+
+/* Translate the rx descriptor status and error fields to pkt flags */
+static inline uint64_t
+avf_rxd_to_pkt_flags(uint64_t qword)
+{
+ uint64_t flags;
+ uint64_t error_bits = (qword >> AVF_RXD_QW1_ERROR_SHIFT);
+
+#define AVF_RX_ERR_BITS 0x3f
+
+ /* Check if RSS_HASH */
+ flags = (((qword >> AVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+ AVF_RX_DESC_FLTSTAT_RSS_HASH) ==
+ AVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
+
+ if (likely((error_bits & AVF_RX_ERR_BITS) == 0)) {
+ flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ return flags;
+ }
+
+ if (unlikely(error_bits & (1 << AVF_RX_DESC_ERROR_IPE_SHIFT)))
+ flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (unlikely(error_bits & (1 << AVF_RX_DESC_ERROR_L4E_SHIFT)))
+ flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ /* TODO: Oversize error bit is not processed here */
+
+ return flags;
+}
+
+/* implement recv_pkts */
+uint16_t
+avf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ volatile union avf_rx_desc *rx_ring;
+ volatile union avf_rx_desc *rxdp;
+ struct avf_rx_queue *rxq;
+ union avf_rx_desc rxd;
+ struct rte_mbuf *rxe;
+ struct rte_eth_dev *dev;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ uint16_t nb_rx;
+ uint32_t rx_status;
+ uint64_t qword1;
+ uint16_t rx_packet_len;
+ uint16_t rx_id, nb_hold;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+ static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ /* [2] - [21] reserved */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ /* All others reserved */
+ };
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = &rx_ring[rx_id];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
+ AVF_RXD_QW1_STATUS_SHIFT;
+
+ /* Check the DD bit first */
+ if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+ AVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ dev = &rte_eth_devices[rxq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", rxq->port_id, rxq->queue_id);
+ break;
+ }
+
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = rxq->sw_ring[rx_id];
+ rx_id++;
+ if (unlikely(rx_id == rxq->nb_rx_desc))
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+
+ /* When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+ }
+ rxm = rxe;
+ rxe = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+
+ rx_packet_len = ((qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+ AVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = rx_packet_len;
+ rxm->data_len = rx_packet_len;
+ rxm->port = rxq->port_id;
+ rxm->ol_flags = 0;
+ avf_rxd_to_vlan_tci(rxm, &rxd);
+ pkt_flags = avf_rxd_to_pkt_flags(qword1);
+ rxm->packet_type =
+ ptype_tbl[(uint8_t)((qword1 &
+ AVF_RXD_QW1_PTYPE_MASK) >> AVF_RXD_QW1_PTYPE_SHIFT)];
+
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ rxm->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+
+ rxm->ol_flags |= pkt_flags;
+
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /* If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the receive tail register of queue.
+ * Update that register with the value of the last processed RX
+ * descriptor minus 1.
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ rxq->port_id, rxq->queue_id,
+ rx_id, nb_hold, nb_rx);
+ rx_id = (uint16_t)((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+
+ return nb_rx;
+}
+
+/* implement recv_scattered_pkts */
+uint16_t
+avf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avf_rx_queue *rxq = rx_queue;
+ union avf_rx_desc rxd;
+ struct rte_mbuf *rxe;
+ struct rte_mbuf *first_seg = rxq->pkt_first_seg;
+ struct rte_mbuf *last_seg = rxq->pkt_last_seg;
+ struct rte_mbuf *nmb, *rxm;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ struct rte_eth_dev *dev;
+ uint32_t rx_status;
+ uint64_t qword1;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+
+ volatile union avf_rx_desc *rx_ring = rxq->rx_ring;
+ volatile union avf_rx_desc *rxdp;
+ static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ /* [2] - [21] reserved */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ /* All others reserved */
+ };
+
+ while (nb_rx < nb_pkts) {
+ rxdp = &rx_ring[rx_id];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
+ AVF_RXD_QW1_STATUS_SHIFT;
+
+ /* Check the DD bit */
+ if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+ AVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", rxq->port_id, rxq->queue_id);
+ dev = &rte_eth_devices[rxq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = rxq->sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+
+ /* When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+ }
+
+ rxm = rxe;
+ rxe = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+
+ /* Set data buffer address and data length of the mbuf */
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+ rx_packet_len = (qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+ AVF_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rxm->data_len = rx_packet_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /* If this is the first buffer of the received packet, set the
+ * pointer to the first mbuf of the packet and initialize its
+ * context. Otherwise, update the total length and the number
+ * of segments of the current scattered packet, and update the
+ * pointer to the last mbuf of the current packet.
+ */
+ if (!first_seg) {
+ first_seg = rxm;
+ first_seg->nb_segs = 1;
+ first_seg->pkt_len = rx_packet_len;
+ } else {
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
+ rx_packet_len);
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /* If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (!(rx_status & (1 << AVF_RX_DESC_STATUS_EOF_SHIFT))) {
+ last_seg = rxm;
+ continue;
+ }
+
+ /* This is the last buffer of the received packet. If the CRC
+ * is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer. If part
+ * of the CRC is also contained in the previous mbuf, subtract
+ * the length of that CRC part from the data length of the
+ * previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= ETHER_CRC_LEN;
+ if (rx_packet_len <= ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len =
+ (uint16_t)(last_seg->data_len -
+ (ETHER_CRC_LEN - rx_packet_len));
+ last_seg->next = NULL;
+ } else
+ rxm->data_len = (uint16_t)(rx_packet_len -
+ ETHER_CRC_LEN);
+ }
+
+ first_seg->port = rxq->port_id;
+ first_seg->ol_flags = 0;
+ avf_rxd_to_vlan_tci(first_seg, &rxd);
+ pkt_flags = avf_rxd_to_pkt_flags(qword1);
+ first_seg->packet_type =
+ ptype_tbl[(uint8_t)((qword1 &
+ AVF_RXD_QW1_PTYPE_MASK) >> AVF_RXD_QW1_PTYPE_SHIFT)];
+
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ first_seg->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+
+ first_seg->ol_flags |= pkt_flags;
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
+ first_seg->data_off));
+ rx_pkts[nb_rx++] = first_seg;
+ first_seg = NULL;
+ }
+
+ /* Record index of the next RX descriptor to probe. */
+ rxq->rx_tail = rx_id;
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ /* If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register. Update the RDT with the value of the last processed RX
+ * descriptor minus 1, to guarantee that the RDT register is never
+ * equal to the RDH register, which creates a "full" ring situtation
+ * from the hardware point of view.
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ rxq->port_id, rxq->queue_id,
+ rx_id, nb_hold, nb_rx);
+ rx_id = (uint16_t)(rx_id == 0 ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+
+ return nb_rx;
+}
+
+#define AVF_LOOK_AHEAD 8
+static inline int
+avf_rx_scan_hw_ring(struct avf_rx_queue *rxq)
+{
+ volatile union avf_rx_desc *rxdp;
+ struct rte_mbuf **rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ uint64_t qword1;
+ uint32_t rx_status;
+ int32_t s[AVF_LOOK_AHEAD], nb_dd;
+ int32_t i, j, nb_rx = 0;
+ uint64_t pkt_flags;
+ static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ /* [2] - [21] reserved */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ /* All others reserved */
+ };
+
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
+ AVF_RXD_QW1_STATUS_SHIFT;
+
+ /* Make sure there is at least 1 packet to receive */
+ if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* Scan LOOK_AHEAD descriptors at a time to determine which
+ * descriptors reference packets that are ready to be received.
+ */
+ for (i = 0; i < AVF_RX_MAX_BURST; i += AVF_LOOK_AHEAD,
+ rxdp += AVF_LOOK_AHEAD, rxep += AVF_LOOK_AHEAD) {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = AVF_LOOK_AHEAD - 1; j >= 0; j--) {
+ qword1 = rte_le_to_cpu_64(
+ rxdp[j].wb.qword1.status_error_len);
+ s[j] = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
+ AVF_RXD_QW1_STATUS_SHIFT;
+ }
+
+ rte_smp_rmb();
+
+ /* Compute how many status bits were set */
+ for (j = 0, nb_dd = 0; j < AVF_LOOK_AHEAD; j++)
+ nb_dd += s[j] & (1 << AVF_RX_DESC_STATUS_DD_SHIFT);
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf parameters */
+ for (j = 0; j < nb_dd; j++) {
+ AVF_DUMP_RX_DESC(rxq, &rxdp[j],
+ rxq->rx_tail + i * AVF_LOOK_AHEAD + j);
+
+ mb = rxep[j];
+ qword1 = rte_le_to_cpu_64
+ (rxdp[j].wb.qword1.status_error_len);
+ pkt_len = ((qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+ AVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->ol_flags = 0;
+ avf_rxd_to_vlan_tci(mb, &rxdp[j]);
+ pkt_flags = avf_rxd_to_pkt_flags(qword1);
+ mb->packet_type =
+ ptype_tbl[(uint8_t)((qword1 &
+ AVF_RXD_QW1_PTYPE_MASK) >>
+ AVF_RXD_QW1_PTYPE_SHIFT)];
+
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ mb->hash.rss = rte_le_to_cpu_32(
+ rxdp[j].wb.qword0.hi_dword.rss);
+
+ mb->ol_flags |= pkt_flags;
+ }
+
+ for (j = 0; j < AVF_LOOK_AHEAD; j++)
+ rxq->rx_stage[i + j] = rxep[j];
+
+ if (nb_dd != AVF_LOOK_AHEAD)
+ break;
+ }
+
+ /* Clear software ring entries */
+ for (i = 0; i < nb_rx; i++)
+ rxq->sw_ring[rxq->rx_tail + i] = NULL;
+
+ return nb_rx;
+}
+
+static inline uint16_t
+avf_rx_fill_from_stage(struct avf_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t i;
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ for (i = 0; i < nb_pkts; i++)
+ rx_pkts[i] = stage[i];
+
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+ return nb_pkts;
+}
+
+static inline int
+avf_rx_alloc_bufs(struct avf_rx_queue *rxq)
+{
+ volatile union avf_rx_desc *rxdp;
+ struct rte_mbuf **rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx, i;
+ uint64_t dma_addr;
+ int diag;
+
+ /* Allocate buffers in bulk */
+ alloc_idx = (uint16_t)(rxq->rx_free_trigger -
+ (rxq->rx_free_thresh - 1));
+ rxep = &rxq->sw_ring[alloc_idx];
+ diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0)) {
+ PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
+ return -ENOMEM;
+ }
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; i++) {
+ if (likely(i < (rxq->rx_free_thresh - 1)))
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxep[i + 1]);
+
+ mb = rxep[i];
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->next = NULL;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
+ rxdp[i].read.hdr_addr = 0;
+ rxdp[i].read.pkt_addr = dma_addr;
+ }
+
+ /* Update rx tail register */
+ rte_wmb();
+ AVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
+
+ rxq->rx_free_trigger =
+ (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+
+ return 0;
+}
+
+static inline uint16_t
+rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct avf_rx_queue *rxq = (struct avf_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev;
+ uint16_t nb_rx = 0;
+
+ if (!nb_pkts)
+ return 0;
+
+ if (rxq->rx_nb_avail)
+ return avf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ nb_rx = (uint16_t)avf_rx_scan_hw_ring(rxq);
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ if (avf_rx_alloc_bufs(rxq) != 0) {
+ uint16_t i, j;
+
+ /* TODO: count rx_mbuf_alloc_failed here */
+
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
+ rxq->sw_ring[j] = rxq->rx_stage[i];
+
+ return 0;
+ }
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
+ rxq->port_id, rxq->queue_id,
+ rxq->rx_tail, nb_rx);
+
+ if (rxq->rx_nb_avail)
+ return avf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+static uint16_t
+avf_recv_pkts_bulk_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx = 0, n, count;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= AVF_RX_MAX_BURST))
+ return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ while (nb_pkts) {
+ n = RTE_MIN(nb_pkts, AVF_RX_MAX_BURST);
+ count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx = (uint16_t)(nb_rx + count);
+ nb_pkts = (uint16_t)(nb_pkts - count);
+ if (count < n)
+ break;
+ }
+
+ return nb_rx;
+}
+
+static inline int
+avf_xmit_cleanup(struct avf_tx_queue *txq)
+{
+ struct avf_tx_entry *sw_ring = txq->sw_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+
+ volatile struct avf_tx_desc *txd = txq->tx_ring;
+
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(AVF_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE)) {
+ PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
+ "(port=%d queue=%d)", desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ return -1;
+ }
+
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
+
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
+
+ return 0;
+}
+
+/* Check if the context descriptor is needed for TX offloading */
+static inline uint16_t
+avf_calc_context_desc(uint64_t flags)
+{
+ static uint64_t mask = PKT_TX_TCP_SEG;
+
+ return (flags & mask) ? 1 : 0;
+}
+
+static inline void
+avf_txd_enable_checksum(uint64_t ol_flags,
+ uint32_t *td_cmd,
+ uint32_t *td_offset,
+ union avf_tx_offload tx_offload)
+{
+ /* Set MACLEN */
+ *td_offset |= (tx_offload.l2_len >> 1) <<
+ AVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L3 checksum offloads */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ *td_offset |= (tx_offload.l3_len >> 2) <<
+ AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (ol_flags & PKT_TX_IPV4) {
+ *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV4;
+ *td_offset |= (tx_offload.l3_len >> 2) <<
+ AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (ol_flags & PKT_TX_IPV6) {
+ *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV6;
+ *td_offset |= (tx_offload.l3_len >> 2) <<
+ AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (tx_offload.l4_len >> 2) <<
+ AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ return;
+ }
+
+ /* Enable L4 checksum offloads */
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
+ AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
+ AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case PKT_TX_UDP_CKSUM:
+ *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
+ AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/* set TSO context descriptor
+ * support IP -> L4 and IP -> IP -> L4
+ */
+static inline uint64_t
+avf_set_tso_ctx(struct rte_mbuf *mbuf, union avf_tx_offload tx_offload)
+{
+ uint64_t ctx_desc = 0;
+ uint32_t cd_cmd, hdr_len, cd_tso_len;
+
+ if (!tx_offload.l4_len) {
+ PMD_TX_LOG(DEBUG, "L4 length set to 0");
+ return ctx_desc;
+ }
+
+ /* in case of non tunneling packet, the outer_l2_len and
+ * outer_l3_len must be 0.
+ */
+ hdr_len = tx_offload.l2_len +
+ tx_offload.l3_len +
+ tx_offload.l4_len;
+
+ cd_cmd = AVF_TX_CTX_DESC_TSO;
+ cd_tso_len = mbuf->pkt_len - hdr_len;
+ ctx_desc |= ((uint64_t)cd_cmd << AVF_TXD_CTX_QW1_CMD_SHIFT) |
+ ((uint64_t)cd_tso_len << AVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((uint64_t)mbuf->tso_segsz << AVF_TXD_CTX_QW1_MSS_SHIFT);
+
+ return ctx_desc;
+}
+
+/* Construct the tx flags */
+static inline uint64_t
+avf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
+ uint32_t td_tag)
+{
+ return rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DATA |
+ ((uint64_t)td_cmd << AVF_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)td_offset <<
+ AVF_TXD_QW1_OFFSET_SHIFT) |
+ ((uint64_t)size <<
+ AVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((uint64_t)td_tag <<
+ AVF_TXD_QW1_L2TAG1_SHIFT));
+}
+
+/* TX function */
+uint16_t
+avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ volatile struct avf_tx_desc *txd;
+ volatile struct avf_tx_desc *txr;
+ struct avf_tx_queue *txq;
+ struct avf_tx_entry *sw_ring;
+ struct avf_tx_entry *txe, *txn;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint16_t tx_id;
+ uint16_t nb_tx;
+ uint32_t td_cmd;
+ uint32_t td_offset;
+ uint32_t td_tag;
+ uint64_t ol_flags;
+ uint16_t nb_used;
+ uint16_t nb_ctx;
+ uint16_t tx_last;
+ uint16_t slen;
+ uint64_t buf_dma_addr;
+ union avf_tx_offload tx_offload = {0};
+
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Check if the descriptor ring needs to be cleaned. */
+ if (txq->nb_free < txq->free_thresh)
+ avf_xmit_cleanup(txq);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ td_cmd = 0;
+ td_tag = 0;
+ td_offset = 0;
+
+ tx_pkt = *tx_pkts++;
+ RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+ ol_flags = tx_pkt->ol_flags;
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+
+ /* Calculate the number of context descriptors needed. */
+ nb_ctx = avf_calc_context_desc(ol_flags);
+
+ /* The number of descriptors that must be allocated for
+ * a packet equals to the number of the segments of that
+ * packet plus 1 context descriptor if needed.
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+ tx_last = (uint16_t)(tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
+ " tx_first=%u tx_last=%u",
+ txq->port_id, txq->queue_id, tx_id, tx_last);
+
+ if (nb_used > txq->nb_free) {
+ if (avf_xmit_cleanup(txq)) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ if (unlikely(nb_used > txq->rs_thresh)) {
+ while (nb_used > txq->nb_free) {
+ if (avf_xmit_cleanup(txq)) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ }
+ }
+ }
+
+ /* Descriptor based VLAN insertion */
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ td_cmd |= AVF_TX_DESC_CMD_IL2TAG1;
+ td_tag = tx_pkt->vlan_tci;
+ }
+
+ /* According to datasheet, the bit2 is reserved and must be
+ * set to 1.
+ */
+ td_cmd |= 0x04;
+
+ /* Enable checksum offloading */
+ if (ol_flags & AVF_TX_CKSUM_OFFLOAD_MASK)
+ avf_txd_enable_checksum(ol_flags, &td_cmd,
+ &td_offset, tx_offload);
+
+ if (nb_ctx) {
+ /* Setup TX context descriptor if required */
+ volatile struct avf_tx_context_desc *ctx_txd =
+ (volatile struct avf_tx_context_desc *)
+ &txr[tx_id];
+ uint16_t cd_l2tag2 = 0;
+ uint64_t cd_type_cmd_tso_mss =
+ AVF_TX_DESC_DTYPE_CONTEXT;
+
+ txn = &sw_ring[txe->next_id];
+ RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+ if (txe->mbuf) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ /* TSO enabled */
+ if (ol_flags & PKT_TX_TCP_SEG)
+ cd_type_cmd_tso_mss |=
+ avf_set_tso_ctx(tx_pkt, tx_offload);
+
+ AVF_DUMP_TX_DESC(txq, ctx_txd, tx_id);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+
+ if (txe->mbuf)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /* Setup TX Descriptor */
+ slen = m_seg->data_len;
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+ txd->cmd_type_offset_bsz = avf_build_ctob(td_cmd,
+ td_offset,
+ slen,
+ td_tag);
+
+ AVF_DUMP_TX_DESC(txq, txd, tx_id);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg);
+
+ /* The last packet data descriptor needs End Of Packet (EOP) */
+ td_cmd |= AVF_TX_DESC_CMD_EOP;
+ txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
+ txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+
+ if (txq->nb_used >= txq->rs_thresh) {
+ PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
+ "%4u (port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
+
+ td_cmd |= AVF_TX_DESC_CMD_RS;
+
+ /* Update txq RS bit counters */
+ txq->nb_used = 0;
+ }
+
+ txd->cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)td_cmd) <<
+ AVF_TXD_QW1_CMD_SHIFT);
+ AVF_DUMP_TX_DESC(txq, txd, tx_id);
+ }
+
+end_of_tx:
+ rte_wmb();
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ txq->port_id, txq->queue_id, tx_id, nb_tx);
+
+ AVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
+static uint16_t
+avf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct avf_tx_queue *txq = (struct avf_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
+ ret = avf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+/* TX prep functions */
+uint16_t
+avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /* Check condition for nb_segs > AVF_TX_MAX_MTU_SEG. */
+ if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (m->nb_segs > AVF_TX_MAX_MTU_SEG) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+ } else if ((m->tso_segsz < AVF_MIN_TSO_MSS) ||
+ (m->tso_segsz > AVF_MAX_TSO_MSS)) {
+ /* MSS outside the range are considered malicious */
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & AVF_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/* choose rx function*/
+void
+avf_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_rx_queue *rxq;
+ int i;
+
+ if (adapter->rx_vec_allowed) {
+ if (dev->data->scattered_rx) {
+ PMD_DRV_LOG(DEBUG, "Using Vector Scattered Rx callback"
+ " (port=%d).", dev->data->port_id);
+ dev->rx_pkt_burst = avf_recv_scattered_pkts_vec;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using Vector Rx callback"
+ " (port=%d).", dev->data->port_id);
+ dev->rx_pkt_burst = avf_recv_pkts_vec;
+ }
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq)
+ continue;
+ avf_rxq_vec_setup(rxq);
+ }
+ } else if (dev->data->scattered_rx) {
+ PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = avf_recv_scattered_pkts;
+ } else if (adapter->rx_bulk_alloc_allowed) {
+ PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = avf_recv_pkts_bulk_alloc;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = avf_recv_pkts;
+ }
+}
+
+/* choose tx function*/
+void
+avf_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_tx_queue *txq;
+ int i;
+
+ if (adapter->tx_vec_allowed) {
+ PMD_DRV_LOG(DEBUG, "Using Vector Tx callback (port=%d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst = avf_xmit_pkts_vec;
+ dev->tx_pkt_prepare = NULL;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+ avf_txq_vec_setup(txq);
+ }
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst = avf_xmit_pkts;
+ dev->tx_pkt_prepare = avf_prep_pkts;
+ }
+}
+
+void
+avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct avf_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mp;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = TRUE;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct avf_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_free_thresh = txq->free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->rs_thresh;
+ qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+/* Get the number of used descriptors of a rx queue */
+uint32_t
+avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+#define AVF_RXQ_SCAN_INTERVAL 4
+ volatile union avf_rx_desc *rxdp;
+ struct avf_rx_queue *rxq;
+ uint16_t desc = 0;
+
+ rxq = dev->data->rx_queues[queue_id];
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ while ((desc < rxq->nb_rx_desc) &&
+ ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ AVF_RXD_QW1_STATUS_MASK) >> AVF_RXD_QW1_STATUS_SHIFT) &
+ (1 << AVF_RX_DESC_STATUS_DD_SHIFT)) {
+ /* Check the DD bit of a rx descriptor of each 4 in a group,
+ * to avoid checking too frequently and downgrading performance
+ * too much.
+ */
+ desc += AVF_RXQ_SCAN_INTERVAL;
+ rxdp += AVF_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+avf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
+{
+ struct avf_rx_queue *rxq = rx_queue;
+ volatile uint64_t *status;
+ uint64_t mask;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+ mask = rte_le_to_cpu_64((1ULL << AVF_RX_DESC_STATUS_DD_SHIFT)
+ << AVF_RXD_QW1_STATUS_SHIFT);
+ if (*status & mask)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+avf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
+{
+ struct avf_tx_queue *txq = tx_queue;
+ volatile uint64_t *status;
+ uint64_t mask, expect;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
+ txq->rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ mask = rte_le_to_cpu_64(AVF_TXD_QW1_DTYPE_MASK);
+ expect = rte_cpu_to_le_64(
+ AVF_TX_DESC_DTYPE_DESC_DONE << AVF_TXD_QW1_DTYPE_SHIFT);
+ if ((*status & mask) == expect)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+uint16_t __attribute__((weak))
+avf_recv_pkts_vec(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+avf_recv_scattered_pkts_vec(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+avf_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+avf_rxq_vec_setup(__rte_unused struct avf_rx_queue *rxq)
+{
+ return -1;
+}
+
+int __attribute__((weak))
+avf_txq_vec_setup(__rte_unused struct avf_tx_queue *txq)
+{
+ return -1;
+}
diff --git a/src/spdk/dpdk/drivers/net/avf/avf_rxtx.h b/src/spdk/dpdk/drivers/net/avf/avf_rxtx.h
new file mode 100644
index 00000000..297d0776
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/avf_rxtx.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _AVF_RXTX_H_
+#define _AVF_RXTX_H_
+
+/* In QLEN must be whole number of 32 descriptors. */
+#define AVF_ALIGN_RING_DESC 32
+#define AVF_MIN_RING_DESC 64
+#define AVF_MAX_RING_DESC 4096
+#define AVF_DMA_MEM_ALIGN 4096
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define AVF_RING_BASE_ALIGN 128
+
+/* used for Rx Bulk Allocate */
+#define AVF_RX_MAX_BURST 32
+
+/* used for Vector PMD */
+#define AVF_VPMD_RX_MAX_BURST 32
+#define AVF_VPMD_TX_MAX_BURST 32
+#define AVF_VPMD_DESCS_PER_LOOP 4
+#define AVF_VPMD_TX_MAX_FREE_BUF 64
+
+#define AVF_NO_VECTOR_FLAGS ( \
+ DEV_TX_OFFLOAD_MULTI_SEGS | \
+ DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_SCTP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM)
+
+#define DEFAULT_TX_RS_THRESH 32
+#define DEFAULT_TX_FREE_THRESH 32
+
+#define AVF_MIN_TSO_MSS 256
+#define AVF_MAX_TSO_MSS 9668
+#define AVF_TSO_MAX_SEG UINT8_MAX
+#define AVF_TX_MAX_MTU_SEG 8
+
+#define AVF_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define AVF_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define AVF_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ AVF_TX_OFFLOAD_MASK)
+
+/* HW desc structure, both 16-byte and 32-byte types are supported */
+#ifdef RTE_LIBRTE_AVF_16BYTE_RX_DESC
+#define avf_rx_desc avf_16byte_rx_desc
+#else
+#define avf_rx_desc avf_32byte_rx_desc
+#endif
+
+struct avf_rxq_ops {
+ void (*release_mbufs)(struct avf_rx_queue *rxq);
+};
+
+struct avf_txq_ops {
+ void (*release_mbufs)(struct avf_tx_queue *txq);
+};
+
+/* Structure associated with each Rx queue. */
+struct avf_rx_queue {
+ struct rte_mempool *mp; /* mbuf pool to populate Rx ring */
+ const struct rte_memzone *mz; /* memzone for Rx ring */
+ volatile union avf_rx_desc *rx_ring; /* Rx ring virtual address */
+ uint64_t rx_ring_phys_addr; /* Rx ring DMA address */
+ struct rte_mbuf **sw_ring; /* address of SW ring */
+ uint16_t nb_rx_desc; /* ring length */
+ uint16_t rx_tail; /* current value of tail */
+ volatile uint8_t *qrx_tail; /* register address of tail */
+ uint16_t rx_free_thresh; /* max free RX desc to hold */
+ uint16_t nb_rx_hold; /* number of held free RX desc */
+ struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
+ struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
+ struct rte_mbuf fake_mbuf; /* dummy mbuf */
+
+ /* used for VPMD */
+ uint16_t rxrearm_nb; /* number of remaining to be re-armed */
+ uint16_t rxrearm_start; /* the idx we start the re-arming from */
+ uint64_t mbuf_initializer; /* value to init mbufs */
+
+ /* for rx bulk */
+ uint16_t rx_nb_avail; /* number of staged packets ready */
+ uint16_t rx_next_avail; /* index of next staged packets */
+ uint16_t rx_free_trigger; /* triggers rx buffer allocation */
+ struct rte_mbuf *rx_stage[AVF_RX_MAX_BURST * 2]; /* store mbuf */
+
+ uint16_t port_id; /* device port ID */
+ uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+ uint16_t queue_id; /* Rx queue index */
+ uint16_t rx_buf_len; /* The packet buffer size */
+ uint16_t rx_hdr_len; /* The header buffer size */
+ uint16_t max_pkt_len; /* Maximum packet length */
+
+ bool q_set; /* if rx queue has been configured */
+ bool rx_deferred_start; /* don't start this queue in dev start */
+ const struct avf_rxq_ops *ops;
+};
+
+struct avf_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint16_t next_id;
+ uint16_t last_id;
+};
+
+/* Structure associated with each TX queue. */
+struct avf_tx_queue {
+ const struct rte_memzone *mz; /* memzone for Tx ring */
+ volatile struct avf_tx_desc *tx_ring; /* Tx ring virtual address */
+ uint64_t tx_ring_phys_addr; /* Tx ring DMA address */
+ struct avf_tx_entry *sw_ring; /* address array of SW ring */
+ uint16_t nb_tx_desc; /* ring length */
+ uint16_t tx_tail; /* current value of tail */
+ volatile uint8_t *qtx_tail; /* register address of tail */
+ /* number of used desc since RS bit set */
+ uint16_t nb_used;
+ uint16_t nb_free;
+ uint16_t last_desc_cleaned; /* last desc have been cleaned*/
+ uint16_t free_thresh;
+ uint16_t rs_thresh;
+
+ uint16_t port_id;
+ uint16_t queue_id;
+ uint64_t offloads;
+ uint16_t next_dd; /* next to set RS, for VPMD */
+ uint16_t next_rs; /* next to check DD, for VPMD */
+
+ bool q_set; /* if rx queue has been configured */
+ bool tx_deferred_start; /* don't start this queue in dev start */
+ const struct avf_txq_ops *ops;
+};
+
+/* Offload features */
+union avf_tx_offload {
+ uint64_t data;
+ struct {
+ uint64_t l2_len:7; /* L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /* L3 (IP) Header Length. */
+ uint64_t l4_len:8; /* L4 Header Length. */
+ uint64_t tso_segsz:16; /* TCP TSO segment size */
+ /* uint64_t unused : 24; */
+ };
+};
+
+int avf_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+
+int avf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+void avf_dev_rx_queue_release(void *rxq);
+
+int avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+int avf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+void avf_dev_tx_queue_release(void *txq);
+void avf_stop_queues(struct rte_eth_dev *dev);
+uint16_t avf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t avf_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t avf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+void avf_set_rx_function(struct rte_eth_dev *dev);
+void avf_set_tx_function(struct rte_eth_dev *dev);
+void avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+void avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+uint32_t avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id);
+int avf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
+int avf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
+
+uint16_t avf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t avf_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t avf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int avf_rxq_vec_setup(struct avf_rx_queue *rxq);
+int avf_txq_vec_setup(struct avf_tx_queue *txq);
+
+static inline
+void avf_dump_rx_descriptor(struct avf_rx_queue *rxq,
+ const void *desc,
+ uint16_t rx_id)
+{
+#ifdef RTE_LIBRTE_AVF_16BYTE_RX_DESC
+ const union avf_16byte_rx_desc *rx_desc = desc;
+
+ printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
+ rxq->queue_id, rx_id, rx_desc->read.pkt_addr,
+ rx_desc->read.hdr_addr);
+#else
+ const union avf_32byte_rx_desc *rx_desc = desc;
+
+ printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64
+ " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id,
+ rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr,
+ rx_desc->read.rsvd1, rx_desc->read.rsvd2);
+#endif
+}
+
+/* All the descriptors are 16 bytes, so just use one of them
+ * to print the qwords
+ */
+static inline
+void avf_dump_tx_descriptor(const struct avf_tx_queue *txq,
+ const void *desc, uint16_t tx_id)
+{
+ char *name;
+ const struct avf_tx_desc *tx_desc = desc;
+ enum avf_tx_desc_dtype_value type;
+
+ type = (enum avf_tx_desc_dtype_value)rte_le_to_cpu_64(
+ tx_desc->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(AVF_TXD_QW1_DTYPE_MASK));
+ switch (type) {
+ case AVF_TX_DESC_DTYPE_DATA:
+ name = "Tx_data_desc";
+ break;
+ case AVF_TX_DESC_DTYPE_CONTEXT:
+ name = "Tx_context_desc";
+ break;
+ default:
+ name = "unknown_desc";
+ break;
+ }
+
+ printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
+ txq->queue_id, name, tx_id, tx_desc->buffer_addr,
+ tx_desc->cmd_type_offset_bsz);
+}
+
+#ifdef DEBUG_DUMP_DESC
+#define AVF_DUMP_RX_DESC(rxq, desc, rx_id) \
+ avf_dump_rx_descriptor(rxq, desc, rx_id)
+#define AVF_DUMP_TX_DESC(txq, desc, tx_id) \
+ avf_dump_tx_descriptor(txq, desc, tx_id)
+#else
+#define AVF_DUMP_RX_DESC(rxq, desc, rx_id) do { } while (0)
+#define AVF_DUMP_TX_DESC(txq, desc, tx_id) do { } while (0)
+#endif
+
+#endif /* _AVF_RXTX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/avf_rxtx_vec_common.h b/src/spdk/dpdk/drivers/net/avf/avf_rxtx_vec_common.h
new file mode 100644
index 00000000..8057b968
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/avf_rxtx_vec_common.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _AVF_RXTX_VEC_COMMON_H_
+#define _AVF_RXTX_VEC_COMMON_H_
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "avf.h"
+#include "avf_rxtx.h"
+
+static inline uint16_t
+reassemble_packets(struct avf_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[AVF_VPMD_RX_MAX_BURST];
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned int pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len) {
+ end->data_len -= rxq->crc_len;
+ } else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ start->nb_segs--;
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ }
+ pkts[pkt_idx++] = start;
+ start = NULL;
+ end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+static __rte_always_inline int
+avf_tx_free_bufs(struct avf_tx_queue *txq)
+{
+ struct avf_tx_entry *txep;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[AVF_VPMD_TX_MAX_FREE_BUF];
+
+ /* check DD bits on threshold descriptor */
+ if ((txq->tx_ring[txq->next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(AVF_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ n = txq->rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->next_dd - (n - 1)];
+ m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free,
+ nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
+ txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
+ if (txq->next_dd >= txq->nb_tx_desc)
+ txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+
+ return txq->rs_thresh;
+}
+
+static __rte_always_inline void
+tx_backlog_entry(struct avf_tx_entry *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+static inline void
+_avf_rx_queue_release_mbufs_vec(struct avf_rx_queue *rxq)
+{
+ const unsigned int mask = rxq->nb_rx_desc - 1;
+ unsigned int i;
+
+ if (!rxq->sw_ring || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i])
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ }
+ } else {
+ for (i = rxq->rx_tail;
+ i != rxq->rxrearm_start;
+ i = (i + 1) & mask) {
+ if (rxq->sw_ring[i])
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ }
+ }
+
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static inline void
+_avf_tx_queue_release_mbufs_vec(struct avf_tx_queue *txq)
+{
+ unsigned i;
+ const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
+
+ if (!txq->sw_ring || txq->nb_free == max_desc)
+ return;
+
+ i = txq->next_dd - txq->rs_thresh + 1;
+ if (txq->tx_tail < i) {
+ for (; i < txq->nb_tx_desc; i++) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ i = 0;
+ }
+}
+
+static inline int
+avf_rxq_vec_setup_default(struct avf_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+#endif
diff --git a/src/spdk/dpdk/drivers/net/avf/avf_rxtx_vec_sse.c b/src/spdk/dpdk/drivers/net/avf/avf_rxtx_vec_sse.c
new file mode 100644
index 00000000..8275100f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/avf_rxtx_vec_sse.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "base/avf_prototype.h"
+#include "base/avf_type.h"
+#include "avf.h"
+#include "avf_rxtx.h"
+#include "avf_rxtx_vec_common.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+avf_rxq_rearm(struct avf_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+
+ volatile union avf_rx_desc *rxdp;
+ struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ __m128i dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp, (void *)rxp,
+ rxq->rx_free_thresh) < 0) {
+ if (rxq->rxrearm_nb + rxq->rx_free_thresh >= rxq->nb_rx_desc) {
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < AVF_VPMD_DESCS_PER_LOOP; i++) {
+ rxp[i] = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < rxq->rx_free_thresh; i += 2, rxp += 2) {
+ __m128i vaddr0, vaddr1;
+
+ mb0 = rxp[0];
+ mb1 = rxp[1];
+
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += rxq->rx_free_thresh;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= rxq->rx_free_thresh;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "rearm_start=%u rearm_nb=%u",
+ rxq->port_id, rxq->queue_id,
+ rx_id, rxq->rxrearm_start, rxq->rxrearm_nb);
+
+ /* Update the tail pointer on the NIC */
+ AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+static inline void
+desc_to_olflags_v(struct avf_rx_queue *rxq, __m128i descs[4],
+ struct rte_mbuf **rx_pkts)
+{
+ const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+ __m128i rearm0, rearm1, rearm2, rearm3;
+
+ __m128i vlan0, vlan1, rss, l3_l4e;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const __m128i rss_vlan_msk = _mm_set_epi32(
+ 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
+
+ const __m128i cksum_mask = _mm_set_epi32(
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD);
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ 0, 0, 0, 0);
+
+ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
+ 0, 0, PKT_RX_FDIR, 0);
+
+ const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ /* shift right 1 bit to make sure it not exceed 255 */
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+
+ vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
+ vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
+ vlan0 = _mm_unpacklo_epi64(vlan0, vlan1);
+
+ vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
+ vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
+
+ rss = _mm_srli_epi32(vlan1, 11);
+ rss = _mm_shuffle_epi8(rss_flags, rss);
+
+ l3_l4e = _mm_srli_epi32(vlan1, 22);
+ l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
+ /* then we shift left 1 bit */
+ l3_l4e = _mm_slli_epi32(l3_l4e, 1);
+ /* we need to mask out the reduntant bits */
+ l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
+
+ vlan0 = _mm_or_si128(vlan0, rss);
+ vlan0 = _mm_or_si128(vlan0, l3_l4e);
+
+ /* At this point, we have the 4 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
+
+ /* write the rearm data and the olflags in one write */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+}
+
+#define PKTLEN_SHIFT 10
+
+static inline void
+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
+ __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
+ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ /* [2] - [21] reserved */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ /* All others reserved */
+ };
+
+ ptype0 = _mm_srli_epi64(ptype0, 30);
+ ptype1 = _mm_srli_epi64(ptype1, 30);
+
+ rx_pkts[0]->packet_type = type_table[_mm_extract_epi8(ptype0, 0)];
+ rx_pkts[1]->packet_type = type_table[_mm_extract_epi8(ptype0, 8)];
+ rx_pkts[2]->packet_type = type_table[_mm_extract_epi8(ptype1, 0)];
+ rx_pkts[3]->packet_type = type_table[_mm_extract_epi8(ptype1, 8)];
+}
+
+/* Notice:
+ * - nb_pkts < AVF_VPMD_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > AVF_VPMD_RX_MAX_BURST, only scan AVF_VPMD_RX_MAX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct avf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union avf_rx_desc *rxdp;
+ struct rte_mbuf **sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ __m128i shuf_msk;
+
+ __m128i crc_adjust = _mm_set_epi16(
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+ /* compile-time check the above crc_adjust layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi16
+ * call above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ __m128i dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than AVF_VPMD_RX_MAX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, AVF_VPMD_RX_MAX_BURST);
+
+ /* nb_pkts has to be floor-aligned to AVF_VPMD_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, AVF_VPMD_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > rxq->rx_free_thresh)
+ avf_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, 0xFF, 0xFF /* pkt_type set as unknown */
+ );
+ /* Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += AVF_VPMD_DESCS_PER_LOOP,
+ rxdp += AVF_VPMD_DESCS_PER_LOOP) {
+ __m128i descs[AVF_VPMD_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
+
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
+ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
+ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
+#endif
+
+ descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ rte_compiler_barrier();
+ descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2);
+#endif
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT);
+ const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT);
+
+ /* merge the now-aligned packet length fields back in */
+ descs[3] = _mm_blend_epi16(descs[3], len3, 0x80);
+ descs[2] = _mm_blend_epi16(descs[2], len2, 0x80);
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* C.1 4=>2 status err info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+ desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+ pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT);
+ const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT);
+
+ /* merge the now-aligned packet length fields back in */
+ descs[1] = _mm_blend_epi16(descs[1], len1, 0x80);
+ descs[0] = _mm_blend_epi16(descs[0], len0, 0x80);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+ /* C.2 get 4 pkts status err value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128(
+ (void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128(
+ (void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 remove crc */
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += AVF_VPMD_DESCS_PER_LOOP;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128(
+ (void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+ desc_to_ptype_v(descs, &rx_pkts[pos]);
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != AVF_VPMD_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+/* Notice:
+ * - nb_pkts < AVF_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > AVF_VPMD_RX_MAX_BURST, only scan AVF_VPMD_RX_MAX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+avf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+/* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < AVF_VPMD_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > VPMD_RX_MAX_BURST, only scan AVF_VPMD_RX_MAX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+avf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avf_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[AVF_VPMD_RX_MAX_BURST] = {0};
+ unsigned int i = 0;
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (!rxq->pkt_first_seg &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct avf_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw =
+ (AVF_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << AVF_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len <<
+ AVF_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_iova + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+avf_vtx(volatile struct avf_tx_desc *txdp, struct rte_mbuf **pkt,
+ uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+avf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avf_tx_queue *txq = (struct avf_tx_queue *)tx_queue;
+ volatile struct avf_tx_desc *txdp;
+ struct avf_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = AVF_TX_DESC_CMD_EOP | 0x04; /* bit 2 must be set */
+ uint64_t rs = AVF_TX_DESC_CMD_RS | flags;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
+
+ if (txq->nb_free < txq->free_thresh)
+ avf_tx_free_bufs(txq);
+
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+ nb_commit = nb_pkts;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ avf_vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->next_rs) {
+ txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)AVF_TX_DESC_CMD_RS) <<
+ AVF_TXD_QW1_CMD_SHIFT);
+ txq->next_rs =
+ (uint16_t)(txq->next_rs + txq->rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_pkts=%u",
+ txq->port_id, txq->queue_id, tx_id, nb_pkts);
+
+ AVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+avf_rx_queue_release_mbufs_sse(struct avf_rx_queue *rxq)
+{
+ _avf_rx_queue_release_mbufs_vec(rxq);
+}
+
+static void __attribute__((cold))
+avf_tx_queue_release_mbufs_sse(struct avf_tx_queue *txq)
+{
+ _avf_tx_queue_release_mbufs_vec(txq);
+}
+
+static const struct avf_rxq_ops sse_vec_rxq_ops = {
+ .release_mbufs = avf_rx_queue_release_mbufs_sse,
+};
+
+static const struct avf_txq_ops sse_vec_txq_ops = {
+ .release_mbufs = avf_tx_queue_release_mbufs_sse,
+};
+
+int __attribute__((cold))
+avf_txq_vec_setup(struct avf_tx_queue *txq)
+{
+ txq->ops = &sse_vec_txq_ops;
+ return 0;
+}
+
+int __attribute__((cold))
+avf_rxq_vec_setup(struct avf_rx_queue *rxq)
+{
+ rxq->ops = &sse_vec_rxq_ops;
+ return avf_rxq_vec_setup_default(rxq);
+}
diff --git a/src/spdk/dpdk/drivers/net/avf/avf_vchnl.c b/src/spdk/dpdk/drivers/net/avf/avf_vchnl.c
new file mode 100644
index 00000000..fa71014e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/avf_vchnl.c
@@ -0,0 +1,812 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_dev.h>
+
+#include "avf_log.h"
+#include "base/avf_prototype.h"
+#include "base/avf_adminq_cmd.h"
+#include "base/avf_type.h"
+
+#include "avf.h"
+#include "avf_rxtx.h"
+
+#define MAX_TRY_TIMES 200
+#define ASQ_DELAY_MS 10
+
+/* Read data in admin queue to get msg from pf driver */
+static enum avf_status_code
+avf_read_msg_from_pf(struct avf_adapter *adapter, uint16_t buf_len,
+ uint8_t *buf)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_arq_event_info event;
+ enum virtchnl_ops opcode;
+ int ret;
+
+ event.buf_len = buf_len;
+ event.msg_buf = buf;
+ ret = avf_clean_arq_element(hw, &event, NULL);
+ /* Can't read any msg from adminQ */
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "Can't read msg from AQ");
+ return ret;
+ }
+
+ opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
+ vf->cmd_retval = (enum virtchnl_status_code)rte_le_to_cpu_32(
+ event.desc.cookie_low);
+
+ PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d",
+ opcode, vf->cmd_retval);
+
+ if (opcode != vf->pend_cmd)
+ PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
+ vf->pend_cmd, opcode);
+
+ return AVF_SUCCESS;
+}
+
+static int
+avf_execute_vf_cmd(struct avf_adapter *adapter, struct avf_cmd_info *args)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_arq_event_info event_info;
+ enum avf_status_code ret;
+ int err = 0;
+ int i = 0;
+
+ if (_atomic_set_cmd(vf, args->ops))
+ return -1;
+
+ ret = avf_aq_send_msg_to_pf(hw, args->ops, AVF_SUCCESS,
+ args->in_args, args->in_args_size, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
+ _clear_cmd(vf);
+ return err;
+ }
+
+ switch (args->ops) {
+ case VIRTCHNL_OP_RESET_VF:
+ /*no need to wait for response */
+ _clear_cmd(vf);
+ break;
+ case VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ /* for init virtchnl ops, need to poll the response */
+ do {
+ ret = avf_read_msg_from_pf(adapter, args->out_size,
+ args->out_buffer);
+ if (ret == AVF_SUCCESS)
+ break;
+ rte_delay_ms(ASQ_DELAY_MS);
+ } while (i++ < MAX_TRY_TIMES);
+ if (i >= MAX_TRY_TIMES ||
+ vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+ err = -1;
+ PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+ " for cmd %d", vf->cmd_retval, args->ops);
+ }
+ _clear_cmd(vf);
+ break;
+
+ default:
+ /* For other virtchnl ops in running time,
+ * wait for the cmd done flag.
+ */
+ do {
+ if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN)
+ break;
+ rte_delay_ms(ASQ_DELAY_MS);
+ /* If don't read msg or read sys event, continue */
+ } while (i++ < MAX_TRY_TIMES);
+ /* If there's no response is received, clear command */
+ if (i >= MAX_TRY_TIMES ||
+ vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+ err = -1;
+ PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+ " for cmd %d", vf->cmd_retval, args->ops);
+ _clear_cmd(vf);
+ }
+ break;
+ }
+
+ return err;
+}
+
+static void
+avf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
+ uint16_t msglen)
+{
+ struct virtchnl_pf_event *pf_msg =
+ (struct virtchnl_pf_event *)msg;
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (msglen < sizeof(struct virtchnl_pf_event)) {
+ PMD_DRV_LOG(DEBUG, "Error event");
+ return;
+ }
+ switch (pf_msg->event) {
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ break;
+ case VIRTCHNL_EVENT_LINK_CHANGE:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
+ vf->link_up = pf_msg->event_data.link_event.link_status;
+ vf->link_speed = pf_msg->event_data.link_event.link_speed;
+ avf_dev_link_update(dev, 0);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ break;
+ case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
+ break;
+ default:
+ PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
+ break;
+ }
+}
+
+void
+avf_handle_virtchnl_msg(struct rte_eth_dev *dev)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct avf_arq_event_info info;
+ uint16_t pending, aq_opc;
+ enum virtchnl_ops msg_opc;
+ enum avf_status_code msg_ret;
+ int ret;
+
+ info.buf_len = AVF_AQ_BUF_SZ;
+ if (!vf->aq_resp) {
+ PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
+ return;
+ }
+ info.msg_buf = vf->aq_resp;
+
+ pending = 1;
+ while (pending) {
+ ret = avf_clean_arq_element(hw, &info, &pending);
+
+ if (ret != AVF_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
+ "ret: %d", ret);
+ break;
+ }
+ aq_opc = rte_le_to_cpu_16(info.desc.opcode);
+ /* For the message sent from pf to vf, opcode is stored in
+ * cookie_high of struct avf_aq_desc, while return error code
+ * are stored in cookie_low, Which is done by PF driver.
+ */
+ msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
+ info.desc.cookie_high);
+ msg_ret = (enum avf_status_code)rte_le_to_cpu_32(
+ info.desc.cookie_low);
+ switch (aq_opc) {
+ case avf_aqc_opc_send_msg_to_vf:
+ if (msg_opc == VIRTCHNL_OP_EVENT) {
+ avf_handle_pf_event_msg(dev, info.msg_buf,
+ info.msg_len);
+ } else {
+ /* read message and it's expected one */
+ if (msg_opc == vf->pend_cmd) {
+ vf->cmd_retval = msg_ret;
+ /* prevent compiler reordering */
+ rte_compiler_barrier();
+ _clear_cmd(vf);
+ } else
+ PMD_DRV_LOG(ERR, "command mismatch,"
+ "expect %u, get %u",
+ vf->pend_cmd, msg_opc);
+ PMD_DRV_LOG(DEBUG,
+ "adminq response is received,"
+ " opcode = %d", msg_opc);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+ aq_opc);
+ break;
+ }
+ }
+}
+
+int
+avf_enable_vlan_strip(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_cmd_info args;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ ret = avf_execute_vf_cmd(adapter, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of"
+ " OP_ENABLE_VLAN_STRIPPING");
+
+ return ret;
+}
+
+int
+avf_disable_vlan_strip(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_cmd_info args;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ ret = avf_execute_vf_cmd(adapter, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of"
+ " OP_DISABLE_VLAN_STRIPPING");
+
+ return ret;
+}
+
+#define VIRTCHNL_VERSION_MAJOR_START 1
+#define VIRTCHNL_VERSION_MINOR_START 1
+
+/* Check API version with sync wait until version read from admin queue */
+int
+avf_check_api_version(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_version_info version, *pver;
+ struct avf_cmd_info args;
+ int err;
+
+ version.major = VIRTCHNL_VERSION_MAJOR;
+ version.minor = VIRTCHNL_VERSION_MINOR;
+
+ args.ops = VIRTCHNL_OP_VERSION;
+ args.in_args = (uint8_t *)&version;
+ args.in_args_size = sizeof(version);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
+ return err;
+ }
+
+ pver = (struct virtchnl_version_info *)args.out_buffer;
+ vf->virtchnl_version = *pver;
+
+ if (vf->virtchnl_version.major < VIRTCHNL_VERSION_MAJOR_START ||
+ (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR_START &&
+ vf->virtchnl_version.minor < VIRTCHNL_VERSION_MINOR_START)) {
+ PMD_INIT_LOG(ERR, "VIRTCHNL API version should not be lower"
+ " than (%u.%u) to support Adapative VF",
+ VIRTCHNL_VERSION_MAJOR_START,
+ VIRTCHNL_VERSION_MAJOR_START);
+ return -1;
+ } else if (vf->virtchnl_version.major > VIRTCHNL_VERSION_MAJOR ||
+ (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR &&
+ vf->virtchnl_version.minor > VIRTCHNL_VERSION_MINOR)) {
+ PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
+ vf->virtchnl_version.major,
+ vf->virtchnl_version.minor,
+ VIRTCHNL_VERSION_MAJOR,
+ VIRTCHNL_VERSION_MINOR);
+ return -1;
+ }
+
+ PMD_DRV_LOG(DEBUG, "Peer is supported PF host");
+ return 0;
+}
+
+int
+avf_get_vf_resource(struct avf_adapter *adapter)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_cmd_info args;
+ uint32_t caps, len;
+ int err, i;
+
+ args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ /* TODO: basic offload capabilities, need to
+ * add advanced/optional offload capabilities
+ */
+
+ caps = AVF_BASIC_OFFLOAD_CAPS;
+
+ args.in_args = (uint8_t *)&caps;
+ args.in_args_size = sizeof(caps);
+
+ err = avf_execute_vf_cmd(adapter, &args);
+
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_GET_VF_RESOURCE");
+ return -1;
+ }
+
+ len = sizeof(struct virtchnl_vf_resource) +
+ AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
+
+ rte_memcpy(vf->vf_res, args.out_buffer,
+ RTE_MIN(args.out_size, len));
+ /* parse VF config message back from PF*/
+ avf_parse_hw_config(hw, vf->vf_res);
+ for (i = 0; i < vf->vf_res->num_vsis; i++) {
+ if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
+ vf->vsi_res = &vf->vf_res->vsi_res[i];
+ }
+
+ if (!vf->vsi_res) {
+ PMD_INIT_LOG(ERR, "no LAN VSI found");
+ return -1;
+ }
+
+ vf->vsi.vsi_id = vf->vsi_res->vsi_id;
+ vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
+ vf->vsi.adapter = adapter;
+
+ return 0;
+}
+
+int
+avf_enable_queues(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_select queue_select;
+ struct avf_cmd_info args;
+ int err;
+
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = vf->vsi_res->vsi_id;
+
+ queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
+ queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
+
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
+ args.in_args = (u8 *)&queue_select;
+ args.in_args_size = sizeof(queue_select);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_ENABLE_QUEUES");
+ return err;
+ }
+ return 0;
+}
+
+int
+avf_disable_queues(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_select queue_select;
+ struct avf_cmd_info args;
+ int err;
+
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = vf->vsi_res->vsi_id;
+
+ queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
+ queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
+
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
+ args.in_args = (u8 *)&queue_select;
+ args.in_args_size = sizeof(queue_select);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_DISABLE_QUEUES");
+ return err;
+ }
+ return 0;
+}
+
+int
+avf_switch_queue(struct avf_adapter *adapter, uint16_t qid,
+ bool rx, bool on)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_select queue_select;
+ struct avf_cmd_info args;
+ int err;
+
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = vf->vsi_res->vsi_id;
+ if (rx)
+ queue_select.rx_queues |= 1 << qid;
+ else
+ queue_select.tx_queues |= 1 << qid;
+
+ if (on)
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
+ else
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
+ args.in_args = (u8 *)&queue_select;
+ args.in_args_size = sizeof(queue_select);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of %s",
+ on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
+ return err;
+}
+
+int
+avf_configure_rss_lut(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_rss_lut *rss_lut;
+ struct avf_cmd_info args;
+ int len, err = 0;
+
+ len = sizeof(*rss_lut) + vf->vf_res->rss_lut_size - 1;
+ rss_lut = rte_zmalloc("rss_lut", len, 0);
+ if (!rss_lut)
+ return -ENOMEM;
+
+ rss_lut->vsi_id = vf->vsi_res->vsi_id;
+ rss_lut->lut_entries = vf->vf_res->rss_lut_size;
+ rte_memcpy(rss_lut->lut, vf->rss_lut, vf->vf_res->rss_lut_size);
+
+ args.ops = VIRTCHNL_OP_CONFIG_RSS_LUT;
+ args.in_args = (u8 *)rss_lut;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_CONFIG_RSS_LUT");
+
+ rte_free(rss_lut);
+ return err;
+}
+
+int
+avf_configure_rss_key(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_rss_key *rss_key;
+ struct avf_cmd_info args;
+ int len, err = 0;
+
+ len = sizeof(*rss_key) + vf->vf_res->rss_key_size - 1;
+ rss_key = rte_zmalloc("rss_key", len, 0);
+ if (!rss_key)
+ return -ENOMEM;
+
+ rss_key->vsi_id = vf->vsi_res->vsi_id;
+ rss_key->key_len = vf->vf_res->rss_key_size;
+ rte_memcpy(rss_key->key, vf->rss_key, vf->vf_res->rss_key_size);
+
+ args.ops = VIRTCHNL_OP_CONFIG_RSS_KEY;
+ args.in_args = (u8 *)rss_key;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_CONFIG_RSS_KEY");
+
+ rte_free(rss_key);
+ return err;
+}
+
+int
+avf_configure_queues(struct avf_adapter *adapter)
+{
+ struct avf_rx_queue **rxq =
+ (struct avf_rx_queue **)adapter->eth_dev->data->rx_queues;
+ struct avf_tx_queue **txq =
+ (struct avf_tx_queue **)adapter->eth_dev->data->tx_queues;
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_vsi_queue_config_info *vc_config;
+ struct virtchnl_queue_pair_info *vc_qp;
+ struct avf_cmd_info args;
+ uint16_t i, size;
+ int err;
+
+ size = sizeof(*vc_config) +
+ sizeof(vc_config->qpair[0]) * vf->num_queue_pairs;
+ vc_config = rte_zmalloc("cfg_queue", size, 0);
+ if (!vc_config)
+ return -ENOMEM;
+
+ vc_config->vsi_id = vf->vsi_res->vsi_id;
+ vc_config->num_queue_pairs = vf->num_queue_pairs;
+
+ for (i = 0, vc_qp = vc_config->qpair;
+ i < vf->num_queue_pairs;
+ i++, vc_qp++) {
+ vc_qp->txq.vsi_id = vf->vsi_res->vsi_id;
+ vc_qp->txq.queue_id = i;
+ /* Virtchnnl configure queues by pairs */
+ if (i < adapter->eth_dev->data->nb_tx_queues) {
+ vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
+ vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
+ }
+ vc_qp->rxq.vsi_id = vf->vsi_res->vsi_id;
+ vc_qp->rxq.queue_id = i;
+ vc_qp->rxq.max_pkt_size = vf->max_pkt_len;
+ /* Virtchnnl configure queues by pairs */
+ if (i < adapter->eth_dev->data->nb_rx_queues) {
+ vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
+ vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
+ vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
+ }
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ args.in_args = (uint8_t *)vc_config;
+ args.in_args_size = size;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of"
+ " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
+
+ rte_free(vc_config);
+ return err;
+}
+
+int
+avf_config_irq_map(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_irq_map_info *map_info;
+ struct virtchnl_vector_map *vecmap;
+ struct avf_cmd_info args;
+ uint32_t vector_id;
+ int len, i, err;
+
+ len = sizeof(struct virtchnl_irq_map_info) +
+ sizeof(struct virtchnl_vector_map) * vf->nb_msix;
+
+ map_info = rte_zmalloc("map_info", len, 0);
+ if (!map_info)
+ return -ENOMEM;
+
+ map_info->num_vectors = vf->nb_msix;
+ for (i = 0; i < vf->nb_msix; i++) {
+ vecmap = &map_info->vecmap[i];
+ vecmap->vsi_id = vf->vsi_res->vsi_id;
+ vecmap->rxitr_idx = AVF_ITR_INDEX_DEFAULT;
+ vecmap->vector_id = vf->msix_base + i;
+ vecmap->txq_map = 0;
+ vecmap->rxq_map = vf->rxq_map[vf->msix_base + i];
+ }
+
+ args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ args.in_args = (u8 *)map_info;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
+
+ rte_free(map_info);
+ return err;
+}
+
+void
+avf_add_del_all_mac_addr(struct avf_adapter *adapter, bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct ether_addr *addr;
+ struct avf_cmd_info args;
+ int len, err, i, j;
+ int next_begin = 0;
+ int begin = 0;
+
+ do {
+ j = 0;
+ len = sizeof(struct virtchnl_ether_addr_list);
+ for (i = begin; i < AVF_NUM_MACADDR_MAX; i++, next_begin++) {
+ addr = &adapter->eth_dev->data->mac_addrs[i];
+ if (is_zero_ether_addr(addr))
+ continue;
+ len += sizeof(struct virtchnl_ether_addr);
+ if (len >= AVF_AQ_BUF_SZ) {
+ next_begin = i + 1;
+ break;
+ }
+ }
+
+ list = rte_zmalloc("avf_del_mac_buffer", len, 0);
+ if (!list) {
+ PMD_DRV_LOG(ERR, "fail to allocate memory");
+ return;
+ }
+
+ for (i = begin; i < next_begin; i++) {
+ addr = &adapter->eth_dev->data->mac_addrs[i];
+ if (is_zero_ether_addr(addr))
+ continue;
+ rte_memcpy(list->list[j].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+ PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+ j++;
+ }
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = j;
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
+ VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = (uint8_t *)list;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETHER_ADDRESS" :
+ "OP_DEL_ETHER_ADDRESS");
+ rte_free(list);
+ begin = next_begin;
+ } while (begin < AVF_NUM_MACADDR_MAX);
+}
+
+int
+avf_query_stats(struct avf_adapter *adapter,
+ struct virtchnl_eth_stats **pstats)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_select q_stats;
+ struct avf_cmd_info args;
+ int err;
+
+ memset(&q_stats, 0, sizeof(q_stats));
+ q_stats.vsi_id = vf->vsi_res->vsi_id;
+ args.ops = VIRTCHNL_OP_GET_STATS;
+ args.in_args = (uint8_t *)&q_stats;
+ args.in_args_size = sizeof(q_stats);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
+ *pstats = NULL;
+ return err;
+ }
+ *pstats = (struct virtchnl_eth_stats *)args.out_buffer;
+ return 0;
+}
+
+int
+avf_config_promisc(struct avf_adapter *adapter,
+ bool enable_unicast,
+ bool enable_multicast)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_promisc_info promisc;
+ struct avf_cmd_info args;
+ int err;
+
+ promisc.flags = 0;
+ promisc.vsi_id = vf->vsi_res->vsi_id;
+
+ if (enable_unicast)
+ promisc.flags |= FLAG_VF_UNICAST_PROMISC;
+
+ if (enable_multicast)
+ promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
+
+ args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ args.in_args = (uint8_t *)&promisc;
+ args.in_args_size = sizeof(promisc);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "fail to execute command CONFIG_PROMISCUOUS_MODE");
+ return err;
+}
+
+int
+avf_add_del_eth_addr(struct avf_adapter *adapter, struct ether_addr *addr,
+ bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
+ sizeof(struct virtchnl_ether_addr)];
+ struct avf_cmd_info args;
+ int err;
+
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = 1;
+ rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR");
+ return err;
+}
+
+int
+avf_add_del_vlan(struct avf_adapter *adapter, uint16_t vlanid, bool add)
+{
+ struct virtchnl_vlan_filter_list *vlan_list;
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
+ sizeof(uint16_t)];
+ struct avf_cmd_info args;
+ int err;
+
+ vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list->vsi_id = vf->vsi_res->vsi_id;
+ vlan_list->num_elements = 1;
+ vlan_list->vlan_id[0] = vlanid;
+
+ args.ops = add ? VIRTCHNL_OP_ADD_VLAN : VIRTCHNL_OP_DEL_VLAN;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_VLAN" : "OP_DEL_VLAN");
+
+ return err;
+}
diff --git a/src/spdk/dpdk/drivers/net/avf/base/README b/src/spdk/dpdk/drivers/net/avf/base/README
new file mode 100644
index 00000000..4710ae27
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/README
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+Intel® AVF driver
+=================
+
+This directory contains source code of FreeBSD AVF driver of version
+cid-avf.2018.01.02.tar.gz released by the team which develops
+basic drivers for any AVF NIC. The directory of base/ contains the
+original source package.
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ avf_osdep.h
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_adminq.c b/src/spdk/dpdk/drivers/net/avf/base/avf_adminq.c
new file mode 100644
index 00000000..616e2a9c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_adminq.c
@@ -0,0 +1,1010 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "avf_status.h"
+#include "avf_type.h"
+#include "avf_register.h"
+#include "avf_adminq.h"
+#include "avf_prototype.h"
+
+/**
+ * avf_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+STATIC void avf_adminq_init_regs(struct avf_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (avf_is_vf(hw)) {
+ hw->aq.asq.tail = AVF_ATQT1;
+ hw->aq.asq.head = AVF_ATQH1;
+ hw->aq.asq.len = AVF_ATQLEN1;
+ hw->aq.asq.bal = AVF_ATQBAL1;
+ hw->aq.asq.bah = AVF_ATQBAH1;
+ hw->aq.arq.tail = AVF_ARQT1;
+ hw->aq.arq.head = AVF_ARQH1;
+ hw->aq.arq.len = AVF_ARQLEN1;
+ hw->aq.arq.bal = AVF_ARQBAL1;
+ hw->aq.arq.bah = AVF_ARQBAH1;
+ }
+}
+
+/**
+ * avf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+enum avf_status_code avf_alloc_adminq_asq_ring(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code;
+
+ ret_code = avf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ avf_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct avf_aq_desc)),
+ AVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = avf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct avf_asq_cmd_details)));
+ if (ret_code) {
+ avf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * avf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+enum avf_status_code avf_alloc_adminq_arq_ring(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code;
+
+ ret_code = avf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ avf_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct avf_aq_desc)),
+ AVF_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * avf_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+void avf_free_adminq_asq(struct avf_hw *hw)
+{
+ avf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * avf_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+void avf_free_adminq_arq(struct avf_hw *hw)
+{
+ avf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * avf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum avf_status_code avf_alloc_arq_bufs(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code;
+ struct avf_aq_desc *desc;
+ struct avf_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = avf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct avf_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct avf_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = avf_allocate_dma_mem(hw, bi,
+ avf_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ AVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = AVF_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = CPU_TO_LE16(AVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > AVF_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(AVF_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ CPU_TO_LE32(AVF_HI_DWORD(bi->pa));
+ desc->params.external.addr_low =
+ CPU_TO_LE32(AVF_LO_DWORD(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ avf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ avf_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * avf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum avf_status_code avf_alloc_asq_bufs(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code;
+ struct avf_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = avf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct avf_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct avf_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = avf_allocate_dma_mem(hw, bi,
+ avf_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ AVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ avf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ avf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * avf_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void avf_free_arq_bufs(struct avf_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ avf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ avf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ avf_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * avf_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void avf_free_asq_bufs(struct avf_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ avf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ avf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ avf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ avf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * avf_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+STATIC enum avf_status_code avf_config_asq_regs(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+
+ /* set starting point */
+#ifdef INTEGRATED_VF
+ if (avf_is_vf(hw))
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ AVF_ATQLEN1_ATQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ AVF_ATQLEN1_ATQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+ wr32(hw, hw->aq.asq.bal, AVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
+ wr32(hw, hw->aq.asq.bah, AVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.asq.bal);
+ if (reg != AVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
+ ret_code = AVF_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * avf_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+STATIC enum avf_status_code avf_config_arq_regs(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+
+ /* set starting point */
+#ifdef INTEGRATED_VF
+ if (avf_is_vf(hw))
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ AVF_ARQLEN1_ARQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ AVF_ARQLEN1_ARQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+ wr32(hw, hw->aq.arq.bal, AVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
+ wr32(hw, hw->aq.arq.bah, AVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.arq.bal);
+ if (reg != AVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
+ ret_code = AVF_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * avf_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum avf_status_code avf_init_asq(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = AVF_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = AVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = avf_alloc_adminq_asq_ring(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = avf_alloc_asq_bufs(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = avf_config_asq_regs(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ avf_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * avf_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum avf_status_code avf_init_arq(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = AVF_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = AVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = avf_alloc_adminq_arq_ring(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = avf_alloc_arq_bufs(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = avf_config_arq_regs(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ avf_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * avf_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+enum avf_status_code avf_shutdown_asq(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+
+ avf_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = AVF_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+ wr32(hw, hw->aq.asq.bal, 0);
+ wr32(hw, hw->aq.asq.bah, 0);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ avf_free_asq_bufs(hw);
+
+shutdown_asq_out:
+ avf_release_spinlock(&hw->aq.asq_spinlock);
+ return ret_code;
+}
+
+/**
+ * avf_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+enum avf_status_code avf_shutdown_arq(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+
+ avf_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = AVF_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+ wr32(hw, hw->aq.arq.bal, 0);
+ wr32(hw, hw->aq.arq.bah, 0);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ avf_free_arq_bufs(hw);
+
+shutdown_arq_out:
+ avf_release_spinlock(&hw->aq.arq_spinlock);
+ return ret_code;
+}
+
+/**
+ * avf_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+enum avf_status_code avf_init_adminq(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = AVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+ avf_init_spinlock(&hw->aq.asq_spinlock);
+ avf_init_spinlock(&hw->aq.arq_spinlock);
+
+ /* Set up register offsets */
+ avf_adminq_init_regs(hw);
+
+ /* setup ASQ command write back timeout */
+ hw->aq.asq_cmd_timeout = AVF_ASQ_CMD_TIMEOUT;
+
+ /* allocate the ASQ */
+ ret_code = avf_init_asq(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_destroy_spinlocks;
+
+ /* allocate the ARQ */
+ ret_code = avf_init_arq(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_free_asq;
+
+ ret_code = AVF_SUCCESS;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_asq:
+ avf_shutdown_asq(hw);
+init_adminq_destroy_spinlocks:
+ avf_destroy_spinlock(&hw->aq.asq_spinlock);
+ avf_destroy_spinlock(&hw->aq.arq_spinlock);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * avf_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+enum avf_status_code avf_shutdown_adminq(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+
+ if (avf_check_asq_alive(hw))
+ avf_aq_queue_shutdown(hw, true);
+
+ avf_shutdown_asq(hw);
+ avf_shutdown_arq(hw);
+ avf_destroy_spinlock(&hw->aq.asq_spinlock);
+ avf_destroy_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->nvm_buff.va)
+ avf_free_virt_mem(hw, &hw->nvm_buff);
+
+ return ret_code;
+}
+
+/**
+ * avf_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+u16 avf_clean_asq(struct avf_hw *hw)
+{
+ struct avf_adminq_ring *asq = &(hw->aq.asq);
+ struct avf_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct avf_aq_desc desc_cb;
+ struct avf_aq_desc *desc;
+
+ desc = AVF_ADMINQ_DESC(*asq, ntc);
+ details = AVF_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+ if (details->callback) {
+ AVF_ADMINQ_CALLBACK cb_func =
+ (AVF_ADMINQ_CALLBACK)details->callback;
+ avf_memcpy(&desc_cb, desc, sizeof(struct avf_aq_desc),
+ AVF_DMA_TO_DMA);
+ cb_func(hw, &desc_cb);
+ }
+ avf_memset(desc, 0, sizeof(*desc), AVF_DMA_MEM);
+ avf_memset(details, 0, sizeof(*details), AVF_NONDMA_MEM);
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = AVF_ADMINQ_DESC(*asq, ntc);
+ details = AVF_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return AVF_DESC_UNUSED(asq);
+}
+
+/**
+ * avf_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool avf_asq_done(struct avf_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * avf_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+enum avf_status_code avf_asq_send_command(struct avf_hw *hw,
+ struct avf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+ struct avf_dma_mem *dma_buff = NULL;
+ struct avf_asq_cmd_details *details;
+ struct avf_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+ u32 val = 0;
+
+ avf_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ hw->aq.asq_last_status = AVF_AQ_RC_OK;
+
+ if (hw->aq.asq.count == 0) {
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = AVF_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = AVF_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ details = AVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ avf_memcpy(details,
+ cmd_details,
+ sizeof(struct avf_asq_cmd_details),
+ AVF_NONDMA_TO_NONDMA);
+
+ /* If the cmd_details are defined copy the cookie. The
+ * CPU_TO_LE32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ CPU_TO_LE32(AVF_HI_DWORD(details->cookie));
+ desc->cookie_low =
+ CPU_TO_LE32(AVF_LO_DWORD(details->cookie));
+ }
+ } else {
+ avf_memset(details, 0,
+ sizeof(struct avf_asq_cmd_details),
+ AVF_NONDMA_MEM);
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+ desc->flags |= CPU_TO_LE16(details->flags_ena);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ avf_debug(hw,
+ AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = AVF_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ avf_debug(hw,
+ AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = AVF_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (avf_clean_asq(hw) == 0) {
+ avf_debug(hw,
+ AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = AVF_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = AVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ avf_memcpy(desc_on_ring, desc, sizeof(struct avf_aq_desc),
+ AVF_NONDMA_TO_DMA);
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ avf_memcpy(dma_buff->va, buff, buff_size,
+ AVF_NONDMA_TO_DMA);
+ desc_on_ring->datalen = CPU_TO_LE16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ CPU_TO_LE32(AVF_HI_DWORD(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ CPU_TO_LE32(AVF_LO_DWORD(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ avf_debug_aq(hw, AVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (avf_asq_done(hw))
+ break;
+ avf_usec_delay(50);
+ total_delay += 50;
+ } while (total_delay < hw->aq.asq_cmd_timeout);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (avf_asq_done(hw)) {
+ avf_memcpy(desc, desc_on_ring, sizeof(struct avf_aq_desc),
+ AVF_DMA_TO_NONDMA);
+ if (buff != NULL)
+ avf_memcpy(buff, dma_buff->va, buff_size,
+ AVF_DMA_TO_NONDMA);
+ retval = LE16_TO_CPU(desc->retval);
+ if (retval != 0) {
+ avf_debug(hw,
+ AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum avf_admin_queue_err)retval == AVF_AQ_RC_OK)
+ status = AVF_SUCCESS;
+ else
+ status = AVF_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum avf_admin_queue_err)retval;
+ }
+
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: desc and buffer writeback:\n");
+ avf_debug_aq(hw, AVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ avf_memcpy(details->wb_desc, desc_on_ring,
+ sizeof(struct avf_aq_desc), AVF_DMA_TO_NONDMA);
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ if (rd32(hw, hw->aq.asq.len) & AVF_ATQLEN1_ATQCRIT_MASK) {
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = AVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = AVF_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+ }
+
+asq_send_command_error:
+ avf_release_spinlock(&hw->aq.asq_spinlock);
+ return status;
+}
+
+/**
+ * avf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void avf_fill_default_direct_cmd_desc(struct avf_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ avf_memset((void *)desc, 0, sizeof(struct avf_aq_desc),
+ AVF_NONDMA_MEM);
+ desc->opcode = CPU_TO_LE16(opcode);
+ desc->flags = CPU_TO_LE16(AVF_AQ_FLAG_SI);
+}
+
+/**
+ * avf_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+enum avf_status_code avf_clean_arq_element(struct avf_hw *hw,
+ struct avf_arq_event_info *e,
+ u16 *pending)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct avf_aq_desc *desc;
+ struct avf_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ avf_memset(&e->desc, 0, sizeof(e->desc), AVF_NONDMA_MEM);
+
+ /* take the lock before we start messing with the ring */
+ avf_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = AVF_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
+ /* set next_to_use to head */
+#ifdef INTEGRATED_VF
+ if (!avf_is_vf(hw))
+ ntu = rd32(hw, hw->aq.arq.head) & AVF_PF_ARQH_ARQH_MASK;
+ else
+ ntu = rd32(hw, hw->aq.arq.head) & AVF_ARQH1_ARQH_MASK;
+#else
+ ntu = rd32(hw, hw->aq.arq.head) & AVF_ARQH1_ARQH_MASK;
+#endif /* INTEGRATED_VF */
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = AVF_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = AVF_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+
+ hw->aq.arq_last_status =
+ (enum avf_admin_queue_err)LE16_TO_CPU(desc->retval);
+ flags = LE16_TO_CPU(desc->flags);
+ if (flags & AVF_AQ_FLAG_ERR) {
+ ret_code = AVF_ERR_ADMIN_QUEUE_ERROR;
+ avf_debug(hw,
+ AVF_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ }
+
+ avf_memcpy(&e->desc, desc, sizeof(struct avf_aq_desc),
+ AVF_DMA_TO_NONDMA);
+ datalen = LE16_TO_CPU(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
+ avf_memcpy(e->msg_buf,
+ hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_len, AVF_DMA_TO_NONDMA);
+
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ avf_debug_aq(hw, AVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ avf_memset((void *)desc, 0, sizeof(struct avf_aq_desc), AVF_DMA_MEM);
+
+ desc->flags = CPU_TO_LE16(AVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > AVF_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(AVF_AQ_FLAG_LB);
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->params.external.addr_high = CPU_TO_LE32(AVF_HI_DWORD(bi->pa));
+ desc->params.external.addr_low = CPU_TO_LE32(AVF_LO_DWORD(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+clean_arq_element_err:
+ avf_release_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_adminq.h b/src/spdk/dpdk/drivers/net/avf/base/avf_adminq.h
new file mode 100644
index 00000000..d7d242a9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_adminq.h
@@ -0,0 +1,166 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_ADMINQ_H_
+#define _AVF_ADMINQ_H_
+
+#include "avf_osdep.h"
+#include "avf_status.h"
+#include "avf_adminq_cmd.h"
+
+#define AVF_ADMINQ_DESC(R, i) \
+ (&(((struct avf_aq_desc *)((R).desc_buf.va))[i]))
+
+#define AVF_ADMINQ_DESC_ALIGNMENT 4096
+
+struct avf_adminq_ring {
+ struct avf_virt_mem dma_head; /* space for dma structures */
+ struct avf_dma_mem desc_buf; /* descriptor ring memory */
+ struct avf_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct avf_dma_mem *asq_bi;
+ struct avf_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+};
+
+/* ASQ transaction details */
+struct avf_asq_cmd_details {
+ void *callback; /* cast from type AVF_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+ struct avf_aq_desc *wb_desc;
+};
+
+#define AVF_ADMINQ_DETAILS(R, i) \
+ (&(((struct avf_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct avf_arq_event_info {
+ struct avf_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct avf_adminq_info {
+ struct avf_adminq_ring arq; /* receive queue */
+ struct avf_adminq_ring asq; /* send queue */
+ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct avf_spinlock asq_spinlock; /* Send queue spinlock */
+ struct avf_spinlock arq_spinlock; /* Receive queue spinlock */
+
+ /* last status values on send and receive queues */
+ enum avf_admin_queue_err asq_last_status;
+ enum avf_admin_queue_err arq_last_status;
+};
+
+/**
+ * avf_aq_rc_to_posix - convert errors to user-land codes
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
+ **/
+STATIC INLINE int avf_aq_rc_to_posix(int aq_ret, int aq_rc)
+{
+ int aq_to_posix[] = {
+ 0, /* AVF_AQ_RC_OK */
+ -EPERM, /* AVF_AQ_RC_EPERM */
+ -ENOENT, /* AVF_AQ_RC_ENOENT */
+ -ESRCH, /* AVF_AQ_RC_ESRCH */
+ -EINTR, /* AVF_AQ_RC_EINTR */
+ -EIO, /* AVF_AQ_RC_EIO */
+ -ENXIO, /* AVF_AQ_RC_ENXIO */
+ -E2BIG, /* AVF_AQ_RC_E2BIG */
+ -EAGAIN, /* AVF_AQ_RC_EAGAIN */
+ -ENOMEM, /* AVF_AQ_RC_ENOMEM */
+ -EACCES, /* AVF_AQ_RC_EACCES */
+ -EFAULT, /* AVF_AQ_RC_EFAULT */
+ -EBUSY, /* AVF_AQ_RC_EBUSY */
+ -EEXIST, /* AVF_AQ_RC_EEXIST */
+ -EINVAL, /* AVF_AQ_RC_EINVAL */
+ -ENOTTY, /* AVF_AQ_RC_ENOTTY */
+ -ENOSPC, /* AVF_AQ_RC_ENOSPC */
+ -ENOSYS, /* AVF_AQ_RC_ENOSYS */
+ -ERANGE, /* AVF_AQ_RC_ERANGE */
+ -EPIPE, /* AVF_AQ_RC_EFLUSHED */
+ -ESPIPE, /* AVF_AQ_RC_BAD_ADDR */
+ -EROFS, /* AVF_AQ_RC_EMODE */
+ -EFBIG, /* AVF_AQ_RC_EFBIG */
+ };
+
+ /* aq_rc is invalid if AQ timed out */
+ if (aq_ret == AVF_ERR_ADMIN_QUEUE_TIMEOUT)
+ return -EAGAIN;
+
+ if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
+ return -ERANGE;
+
+ return aq_to_posix[aq_rc];
+}
+
+/* general information */
+#define AVF_AQ_LARGE_BUF 512
+#define AVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
+
+void avf_fill_default_direct_cmd_desc(struct avf_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _AVF_ADMINQ_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_adminq_cmd.h b/src/spdk/dpdk/drivers/net/avf/base/avf_adminq_cmd.h
new file mode 100644
index 00000000..1709f317
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_adminq_cmd.h
@@ -0,0 +1,2842 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_ADMINQ_CMD_H_
+#define _AVF_ADMINQ_CMD_H_
+
+/* This header file defines the avf Admin Queue commands and is shared between
+ * avf Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+
+#define AVF_FW_API_VERSION_MAJOR 0x0001
+#define AVF_FW_API_VERSION_MINOR_X722 0x0005
+#define AVF_FW_API_VERSION_MINOR_X710 0x0007
+
+#define AVF_FW_MINOR_VERSION(_h) ((_h)->mac.type == AVF_MAC_XL710 ? \
+ AVF_FW_API_VERSION_MINOR_X710 : \
+ AVF_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define AVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+
+struct avf_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define AVF_AQ_FLAG_DD_SHIFT 0
+#define AVF_AQ_FLAG_CMP_SHIFT 1
+#define AVF_AQ_FLAG_ERR_SHIFT 2
+#define AVF_AQ_FLAG_VFE_SHIFT 3
+#define AVF_AQ_FLAG_LB_SHIFT 9
+#define AVF_AQ_FLAG_RD_SHIFT 10
+#define AVF_AQ_FLAG_VFC_SHIFT 11
+#define AVF_AQ_FLAG_BUF_SHIFT 12
+#define AVF_AQ_FLAG_SI_SHIFT 13
+#define AVF_AQ_FLAG_EI_SHIFT 14
+#define AVF_AQ_FLAG_FE_SHIFT 15
+
+#define AVF_AQ_FLAG_DD (1 << AVF_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define AVF_AQ_FLAG_CMP (1 << AVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define AVF_AQ_FLAG_ERR (1 << AVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define AVF_AQ_FLAG_VFE (1 << AVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define AVF_AQ_FLAG_LB (1 << AVF_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define AVF_AQ_FLAG_RD (1 << AVF_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define AVF_AQ_FLAG_VFC (1 << AVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define AVF_AQ_FLAG_BUF (1 << AVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define AVF_AQ_FLAG_SI (1 << AVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define AVF_AQ_FLAG_EI (1 << AVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define AVF_AQ_FLAG_FE (1 << AVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum avf_admin_queue_err {
+ AVF_AQ_RC_OK = 0, /* success */
+ AVF_AQ_RC_EPERM = 1, /* Operation not permitted */
+ AVF_AQ_RC_ENOENT = 2, /* No such element */
+ AVF_AQ_RC_ESRCH = 3, /* Bad opcode */
+ AVF_AQ_RC_EINTR = 4, /* operation interrupted */
+ AVF_AQ_RC_EIO = 5, /* I/O error */
+ AVF_AQ_RC_ENXIO = 6, /* No such resource */
+ AVF_AQ_RC_E2BIG = 7, /* Arg too long */
+ AVF_AQ_RC_EAGAIN = 8, /* Try again */
+ AVF_AQ_RC_ENOMEM = 9, /* Out of memory */
+ AVF_AQ_RC_EACCES = 10, /* Permission denied */
+ AVF_AQ_RC_EFAULT = 11, /* Bad address */
+ AVF_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ AVF_AQ_RC_EEXIST = 13, /* object already exists */
+ AVF_AQ_RC_EINVAL = 14, /* Invalid argument */
+ AVF_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ AVF_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ AVF_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ AVF_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ AVF_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ AVF_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ AVF_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ AVF_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum avf_admin_queue_opc {
+ /* aq commands */
+ avf_aqc_opc_get_version = 0x0001,
+ avf_aqc_opc_driver_version = 0x0002,
+ avf_aqc_opc_queue_shutdown = 0x0003,
+ avf_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ avf_aqc_opc_request_resource = 0x0008,
+ avf_aqc_opc_release_resource = 0x0009,
+
+ avf_aqc_opc_list_func_capabilities = 0x000A,
+ avf_aqc_opc_list_dev_capabilities = 0x000B,
+
+ /* Proxy commands */
+ avf_aqc_opc_set_proxy_config = 0x0104,
+ avf_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
+ /* LAA */
+ avf_aqc_opc_mac_address_read = 0x0107,
+ avf_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ avf_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* WoL commands */
+ avf_aqc_opc_set_wol_filter = 0x0120,
+ avf_aqc_opc_get_wake_reason = 0x0121,
+ avf_aqc_opc_clear_all_wol_filters = 0x025E,
+
+ /* internal switch commands */
+ avf_aqc_opc_get_switch_config = 0x0200,
+ avf_aqc_opc_add_statistics = 0x0201,
+ avf_aqc_opc_remove_statistics = 0x0202,
+ avf_aqc_opc_set_port_parameters = 0x0203,
+ avf_aqc_opc_get_switch_resource_alloc = 0x0204,
+ avf_aqc_opc_set_switch_config = 0x0205,
+ avf_aqc_opc_rx_ctl_reg_read = 0x0206,
+ avf_aqc_opc_rx_ctl_reg_write = 0x0207,
+
+ avf_aqc_opc_add_vsi = 0x0210,
+ avf_aqc_opc_update_vsi_parameters = 0x0211,
+ avf_aqc_opc_get_vsi_parameters = 0x0212,
+
+ avf_aqc_opc_add_pv = 0x0220,
+ avf_aqc_opc_update_pv_parameters = 0x0221,
+ avf_aqc_opc_get_pv_parameters = 0x0222,
+
+ avf_aqc_opc_add_veb = 0x0230,
+ avf_aqc_opc_update_veb_parameters = 0x0231,
+ avf_aqc_opc_get_veb_parameters = 0x0232,
+
+ avf_aqc_opc_delete_element = 0x0243,
+
+ avf_aqc_opc_add_macvlan = 0x0250,
+ avf_aqc_opc_remove_macvlan = 0x0251,
+ avf_aqc_opc_add_vlan = 0x0252,
+ avf_aqc_opc_remove_vlan = 0x0253,
+ avf_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ avf_aqc_opc_add_tag = 0x0255,
+ avf_aqc_opc_remove_tag = 0x0256,
+ avf_aqc_opc_add_multicast_etag = 0x0257,
+ avf_aqc_opc_remove_multicast_etag = 0x0258,
+ avf_aqc_opc_update_tag = 0x0259,
+ avf_aqc_opc_add_control_packet_filter = 0x025A,
+ avf_aqc_opc_remove_control_packet_filter = 0x025B,
+ avf_aqc_opc_add_cloud_filters = 0x025C,
+ avf_aqc_opc_remove_cloud_filters = 0x025D,
+ avf_aqc_opc_clear_wol_switch_filters = 0x025E,
+ avf_aqc_opc_replace_cloud_filters = 0x025F,
+
+ avf_aqc_opc_add_mirror_rule = 0x0260,
+ avf_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* Dynamic Device Personalization */
+ avf_aqc_opc_write_personalization_profile = 0x0270,
+ avf_aqc_opc_get_personalization_profile_list = 0x0271,
+
+ /* DCB commands */
+ avf_aqc_opc_dcb_ignore_pfc = 0x0301,
+ avf_aqc_opc_dcb_updated = 0x0302,
+ avf_aqc_opc_set_dcb_parameters = 0x0303,
+
+ /* TX scheduler */
+ avf_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ avf_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ avf_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ avf_aqc_opc_query_vsi_bw_config = 0x0408,
+ avf_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ avf_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ avf_aqc_opc_enable_switching_comp_ets = 0x0413,
+ avf_aqc_opc_modify_switching_comp_ets = 0x0414,
+ avf_aqc_opc_disable_switching_comp_ets = 0x0415,
+ avf_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ avf_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ avf_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ avf_aqc_opc_query_port_ets_config = 0x0419,
+ avf_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ avf_aqc_opc_suspend_port_tx = 0x041B,
+ avf_aqc_opc_resume_port_tx = 0x041C,
+ avf_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ avf_aqc_opc_query_hmc_resource_profile = 0x0500,
+ avf_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+
+ /* phy commands*/
+ avf_aqc_opc_get_phy_abilities = 0x0600,
+ avf_aqc_opc_set_phy_config = 0x0601,
+ avf_aqc_opc_set_mac_config = 0x0603,
+ avf_aqc_opc_set_link_restart_an = 0x0605,
+ avf_aqc_opc_get_link_status = 0x0607,
+ avf_aqc_opc_set_phy_int_mask = 0x0613,
+ avf_aqc_opc_get_local_advt_reg = 0x0614,
+ avf_aqc_opc_set_local_advt_reg = 0x0615,
+ avf_aqc_opc_get_partner_advt = 0x0616,
+ avf_aqc_opc_set_lb_modes = 0x0618,
+ avf_aqc_opc_get_phy_wol_caps = 0x0621,
+ avf_aqc_opc_set_phy_debug = 0x0622,
+ avf_aqc_opc_upload_ext_phy_fm = 0x0625,
+ avf_aqc_opc_run_phy_activity = 0x0626,
+ avf_aqc_opc_set_phy_register = 0x0628,
+ avf_aqc_opc_get_phy_register = 0x0629,
+
+ /* NVM commands */
+ avf_aqc_opc_nvm_read = 0x0701,
+ avf_aqc_opc_nvm_erase = 0x0702,
+ avf_aqc_opc_nvm_update = 0x0703,
+ avf_aqc_opc_nvm_config_read = 0x0704,
+ avf_aqc_opc_nvm_config_write = 0x0705,
+ avf_aqc_opc_nvm_progress = 0x0706,
+ avf_aqc_opc_oem_post_update = 0x0720,
+ avf_aqc_opc_thermal_sensor = 0x0721,
+
+ /* virtualization commands */
+ avf_aqc_opc_send_msg_to_pf = 0x0801,
+ avf_aqc_opc_send_msg_to_vf = 0x0802,
+ avf_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ avf_aqc_opc_alternate_write = 0x0900,
+ avf_aqc_opc_alternate_write_indirect = 0x0901,
+ avf_aqc_opc_alternate_read = 0x0902,
+ avf_aqc_opc_alternate_read_indirect = 0x0903,
+ avf_aqc_opc_alternate_write_done = 0x0904,
+ avf_aqc_opc_alternate_set_mode = 0x0905,
+ avf_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ avf_aqc_opc_lldp_get_mib = 0x0A00,
+ avf_aqc_opc_lldp_update_mib = 0x0A01,
+ avf_aqc_opc_lldp_add_tlv = 0x0A02,
+ avf_aqc_opc_lldp_update_tlv = 0x0A03,
+ avf_aqc_opc_lldp_delete_tlv = 0x0A04,
+ avf_aqc_opc_lldp_stop = 0x0A05,
+ avf_aqc_opc_lldp_start = 0x0A06,
+ avf_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ avf_aqc_opc_lldp_set_local_mib = 0x0A08,
+ avf_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+
+ /* Tunnel commands */
+ avf_aqc_opc_add_udp_tunnel = 0x0B00,
+ avf_aqc_opc_del_udp_tunnel = 0x0B01,
+ avf_aqc_opc_set_rss_key = 0x0B02,
+ avf_aqc_opc_set_rss_lut = 0x0B03,
+ avf_aqc_opc_get_rss_key = 0x0B04,
+ avf_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Async Events */
+ avf_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ avf_aqc_opc_oem_parameter_change = 0xFE00,
+ avf_aqc_opc_oem_device_status_change = 0xFE01,
+ avf_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ avf_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ avf_aqc_opc_debug_read_reg = 0xFF03,
+ avf_aqc_opc_debug_write_reg = 0xFF04,
+ avf_aqc_opc_debug_modify_reg = 0xFF07,
+ avf_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define AVF_CHECK_STRUCT_LEN(n, X) enum avf_static_assert_enum_##X \
+ { avf_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define AVF_CHECK_CMD_LENGTH(X) AVF_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct avf_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct avf_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct avf_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define AVF_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_queue_shutdown);
+
+/* Set PF context (0x0004, direct) */
+struct avf_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_pf_context);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define AVF_AQ_RESOURCE_NVM 1
+#define AVF_AQ_RESOURCE_SDP 2
+#define AVF_AQ_RESOURCE_ACCESS_READ 1
+#define AVF_AQ_RESOURCE_ACCESS_WRITE 2
+#define AVF_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define AVF_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct avf_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct avf_aqc_list_capabilites {
+ u8 command_flags;
+#define AVF_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_list_capabilites);
+
+struct avf_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define AVF_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define AVF_AQ_CAP_ID_MNG_MODE 0x0002
+#define AVF_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define AVF_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define AVF_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define AVF_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define AVF_AQ_CAP_ID_WOL_AND_PROXY 0x0008
+#define AVF_AQ_CAP_ID_SRIOV 0x0012
+#define AVF_AQ_CAP_ID_VF 0x0013
+#define AVF_AQ_CAP_ID_VMDQ 0x0014
+#define AVF_AQ_CAP_ID_8021QBG 0x0015
+#define AVF_AQ_CAP_ID_8021QBR 0x0016
+#define AVF_AQ_CAP_ID_VSI 0x0017
+#define AVF_AQ_CAP_ID_DCB 0x0018
+#define AVF_AQ_CAP_ID_FCOE 0x0021
+#define AVF_AQ_CAP_ID_ISCSI 0x0022
+#define AVF_AQ_CAP_ID_RSS 0x0040
+#define AVF_AQ_CAP_ID_RXQ 0x0041
+#define AVF_AQ_CAP_ID_TXQ 0x0042
+#define AVF_AQ_CAP_ID_MSIX 0x0043
+#define AVF_AQ_CAP_ID_VF_MSIX 0x0044
+#define AVF_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define AVF_AQ_CAP_ID_1588 0x0046
+#define AVF_AQ_CAP_ID_IWARP 0x0051
+#define AVF_AQ_CAP_ID_LED 0x0061
+#define AVF_AQ_CAP_ID_SDP 0x0062
+#define AVF_AQ_CAP_ID_MDIO 0x0063
+#define AVF_AQ_CAP_ID_WSR_PROT 0x0064
+#define AVF_AQ_CAP_ID_NVM_MGMT 0x0080
+#define AVF_AQ_CAP_ID_FLEX10 0x00F1
+#define AVF_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct avf_aqc_cppm_configuration {
+ __le16 command_flags;
+#define AVF_AQ_CPPM_EN_LTRC 0x0800
+#define AVF_AQ_CPPM_EN_DMCTH 0x1000
+#define AVF_AQ_CPPM_EN_DMCTLX 0x2000
+#define AVF_AQ_CPPM_EN_HPTC 0x4000
+#define AVF_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct avf_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define AVF_AQ_ARP_INIT_IPV4 0x0800
+#define AVF_AQ_ARP_UNSUP_CTL 0x1000
+#define AVF_AQ_ARP_ENA 0x2000
+#define AVF_AQ_ARP_ADD_IPV4 0x4000
+#define AVF_AQ_ARP_DEL_IPV4 0x8000
+ __le16 table_id;
+ __le32 enabled_offloads;
+#define AVF_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
+#define AVF_AQ_ARP_OFFLOAD_ENABLE 0x00000800
+ __le32 ip_addr;
+ u8 mac_addr[6];
+ u8 reserved[2];
+};
+
+AVF_CHECK_STRUCT_LEN(0x14, avf_aqc_arp_proxy_data);
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct avf_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define AVF_AQ_NS_PROXY_ADD_0 0x0001
+#define AVF_AQ_NS_PROXY_DEL_0 0x0002
+#define AVF_AQ_NS_PROXY_ADD_1 0x0004
+#define AVF_AQ_NS_PROXY_DEL_1 0x0008
+#define AVF_AQ_NS_PROXY_ADD_IPV6_0 0x0010
+#define AVF_AQ_NS_PROXY_DEL_IPV6_0 0x0020
+#define AVF_AQ_NS_PROXY_ADD_IPV6_1 0x0040
+#define AVF_AQ_NS_PROXY_DEL_IPV6_1 0x0080
+#define AVF_AQ_NS_PROXY_COMMAND_SEQ 0x0100
+#define AVF_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
+#define AVF_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
+#define AVF_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
+#define AVF_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+AVF_CHECK_STRUCT_LEN(0x3c, avf_aqc_ns_proxy_data);
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct avf_aqc_mng_laa {
+ __le16 command_flags;
+#define AVF_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_mng_laa);
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct avf_aqc_mac_address_read {
+ __le16 command_flags;
+#define AVF_AQC_LAN_ADDR_VALID 0x10
+#define AVF_AQC_SAN_ADDR_VALID 0x20
+#define AVF_AQC_PORT_ADDR_VALID 0x40
+#define AVF_AQC_WOL_ADDR_VALID 0x80
+#define AVF_AQC_MC_MAG_EN_VALID 0x100
+#define AVF_AQC_WOL_PRESERVE_STATUS 0x200
+#define AVF_AQC_ADDR_VALID_MASK 0x3F0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_mac_address_read);
+
+struct avf_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+AVF_CHECK_STRUCT_LEN(24, avf_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct avf_aqc_mac_address_write {
+ __le16 command_flags;
+#define AVF_AQC_MC_MAG_EN 0x0100
+#define AVF_AQC_WOL_PRESERVE_ON_PFR 0x0200
+#define AVF_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define AVF_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define AVF_AQC_WRITE_TYPE_PORT 0x8000
+#define AVF_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define AVF_AQC_WRITE_TYPE_MASK 0xC000
+
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct avf_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_clear_pxe);
+
+/* Set WoL Filter (0x0120) */
+
+struct avf_aqc_set_wol_filter {
+ __le16 filter_index;
+#define AVF_AQC_MAX_NUM_WOL_FILTERS 8
+#define AVF_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define AVF_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ AVF_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define AVF_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define AVF_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ AVF_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+ __le16 cmd_flags;
+#define AVF_AQC_SET_WOL_FILTER 0x8000
+#define AVF_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define AVF_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
+#define AVF_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define AVF_AQC_SET_WOL_FILTER_ACTION_SET 1
+ __le16 valid_flags;
+#define AVF_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define AVF_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_wol_filter);
+
+struct avf_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+AVF_CHECK_STRUCT_LEN(0x90, avf_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct avf_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+#define AVF_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define AVF_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ AVF_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define AVF_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define AVF_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ AVF_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+ u8 reserved_2[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_wake_reason_completion);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct avf_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses avf_aqc_switch_seid for the descriptor
+ */
+struct avf_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_switch_config_header_resp);
+
+struct avf_aqc_switch_config_element_resp {
+ u8 element_type;
+#define AVF_AQ_SW_ELEM_TYPE_MAC 1
+#define AVF_AQ_SW_ELEM_TYPE_PF 2
+#define AVF_AQ_SW_ELEM_TYPE_VF 3
+#define AVF_AQ_SW_ELEM_TYPE_EMP 4
+#define AVF_AQ_SW_ELEM_TYPE_BMC 5
+#define AVF_AQ_SW_ELEM_TYPE_PV 16
+#define AVF_AQ_SW_ELEM_TYPE_VEB 17
+#define AVF_AQ_SW_ELEM_TYPE_PA 18
+#define AVF_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define AVF_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define AVF_AQ_CONN_TYPE_REGULAR 0x1
+#define AVF_AQ_CONN_TYPE_DEFAULT 0x2
+#define AVF_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+AVF_CHECK_STRUCT_LEN(0x10, avf_aqc_switch_config_element_resp);
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct avf_aqc_get_switch_config_resp {
+ struct avf_aqc_get_switch_config_header_resp header;
+ struct avf_aqc_switch_config_element_resp element[1];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_get_switch_config_resp);
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct avf_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct avf_aqc_set_port_parameters {
+ __le16 command_flags;
+#define AVF_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define AVF_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define AVF_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+#define AVF_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define AVF_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct avf_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct avf_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define AVF_AQ_RESOURCE_TYPE_VEB 0x0
+#define AVF_AQ_RESOURCE_TYPE_VSI 0x1
+#define AVF_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define AVF_AQ_RESOURCE_TYPE_STAG 0x3
+#define AVF_AQ_RESOURCE_TYPE_ETAG 0x4
+#define AVF_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define AVF_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define AVF_AQ_RESOURCE_TYPE_VLAN 0x7
+#define AVF_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define AVF_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define AVF_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define AVF_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define AVF_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define AVF_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define AVF_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define AVF_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define AVF_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define AVF_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define AVF_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+AVF_CHECK_STRUCT_LEN(0x10, avf_aqc_switch_resource_alloc_element_resp);
+
+/* Set Switch Configuration (direct 0x0205) */
+struct avf_aqc_set_switch_config {
+ __le16 flags;
+/* flags used for both fields below */
+#define AVF_AQ_SET_SWITCH_CFG_PROMISC 0x0001
+#define AVF_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
+#define AVF_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
+ __le16 valid_flags;
+ /* The ethertype in switch_tag is dropped on ingress and used
+ * internally by the switch. Set this to zero for the default
+ * of 0x88a8 (802.1ad). Should be zero for firmware API
+ * versions lower than 1.7.
+ */
+ __le16 switch_tag;
+ /* The ethertypes in first_tag and second_tag are used to
+ * match the outer and inner VLAN tags (respectively) when HW
+ * double VLAN tagging is enabled via the set port parameters
+ * AQ command. Otherwise these are both ignored. Set them to
+ * zero for their defaults of 0x8100 (802.1Q). Should be zero
+ * for firmware API versions lower than 1.7.
+ */
+ __le16 first_tag;
+ __le16 second_tag;
+ u8 reserved[6];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_switch_config);
+
+/* Read Receive control registers (direct 0x0206)
+ * Write Receive control registers (direct 0x0207)
+ * used for accessing Rx control registers that can be
+ * slow and need special handling when under high Rx load
+ */
+struct avf_aqc_rx_ctl_reg_read_write {
+ __le32 reserved1;
+ __le32 address;
+ __le32 reserved2;
+ __le32 value;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_rx_ctl_reg_read_write);
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct avf_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct avf_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define AVF_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define AVF_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define AVF_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define AVF_AQ_VSI_TYPE_SHIFT 0x0
+#define AVF_AQ_VSI_TYPE_MASK (0x3 << AVF_AQ_VSI_TYPE_SHIFT)
+#define AVF_AQ_VSI_TYPE_VF 0x0
+#define AVF_AQ_VSI_TYPE_VMDQ2 0x1
+#define AVF_AQ_VSI_TYPE_PF 0x2
+#define AVF_AQ_VSI_TYPE_EMP_MNG 0x3
+#define AVF_AQ_VSI_FLAG_CASCADED_PV 0x4
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_get_update_vsi);
+
+struct avf_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_get_update_vsi_completion);
+
+struct avf_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define AVF_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define AVF_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define AVF_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define AVF_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define AVF_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define AVF_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define AVF_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define AVF_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define AVF_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define AVF_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define AVF_AQ_VSI_SW_ID_SHIFT 0x0000
+#define AVF_AQ_VSI_SW_ID_MASK (0xFFF << AVF_AQ_VSI_SW_ID_SHIFT)
+#define AVF_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define AVF_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define AVF_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define AVF_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define AVF_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define AVF_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define AVF_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define AVF_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ AVF_AQ_VSI_PVLAN_MODE_SHIFT)
+#define AVF_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define AVF_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define AVF_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define AVF_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define AVF_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define AVF_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ AVF_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define AVF_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define AVF_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define AVF_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define AVF_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define AVF_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define AVF_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define AVF_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define AVF_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define AVF_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define AVF_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define AVF_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define AVF_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define AVF_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define AVF_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define AVF_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ AVF_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define AVF_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define AVF_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define AVF_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define AVF_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define AVF_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define AVF_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define AVF_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define AVF_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define AVF_AQ_VSI_QUEUE_SHIFT 0x0
+#define AVF_AQ_VSI_QUEUE_MASK (0x7FF << AVF_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define AVF_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define AVF_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ AVF_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define AVF_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define AVF_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ AVF_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define AVF_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define AVF_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#define AVF_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define AVF_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define AVF_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define AVF_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define AVF_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+AVF_CHECK_STRUCT_LEN(128, avf_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct avf_aqc_add_update_pv {
+ __le16 command_flags;
+#define AVF_AQC_PV_FLAG_PV_TYPE 0x1
+#define AVF_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define AVF_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define AVF_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_update_pv);
+
+struct avf_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define AVF_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define AVF_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define AVF_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define AVF_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses avf_aqc_switch_seid for the descriptor
+ */
+
+struct avf_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define AVF_AQC_GET_PV_PV_TYPE 0x1
+#define AVF_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define AVF_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct avf_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define AVF_AQC_ADD_VEB_FLOATING 0x1
+#define AVF_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define AVF_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ AVF_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define AVF_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define AVF_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define AVF_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */
+#define AVF_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_veb);
+
+struct avf_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define AVF_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define AVF_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define AVF_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define AVF_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses avf_aqc_switch_seid for the descriptor
+ */
+struct avf_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic avf_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct avf_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define AVF_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define AVF_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ AVF_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define AVF_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_macvlan);
+
+/* indirect data for command and response */
+struct avf_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define AVF_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define AVF_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define AVF_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define AVF_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+#define AVF_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010
+ __le16 queue_number;
+#define AVF_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define AVF_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ AVF_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define AVF_AQC_MM_PERFECT_MATCH 0x01
+#define AVF_AQC_MM_HASH_MATCH 0x02
+#define AVF_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct avf_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses avf_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct avf_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define AVF_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define AVF_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define AVF_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define AVF_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define AVF_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define AVF_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic avf_aqc_macvlan for the command
+ */
+struct avf_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define AVF_AQC_ADD_VLAN_LOCAL 0x1
+#define AVF_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define AVF_AQC_ADD_PVLAN_TYPE_MASK (0x3 << AVF_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define AVF_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define AVF_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define AVF_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define AVF_AQC_VLAN_PTYPE_SHIFT 3
+#define AVF_AQC_VLAN_PTYPE_MASK (0x3 << AVF_AQC_VLAN_PTYPE_SHIFT)
+#define AVF_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define AVF_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define AVF_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define AVF_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define AVF_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define AVF_AQC_ADD_VLAN_SUCCESS 0x0
+#define AVF_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define AVF_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define AVF_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define AVF_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct avf_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct avf_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define AVF_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define AVF_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define AVF_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define AVF_AQC_SET_VSI_DEFAULT 0x08
+#define AVF_AQC_SET_VSI_PROMISC_VLAN 0x10
+#define AVF_AQC_SET_VSI_PROMISC_TX 0x8000
+ __le16 seid;
+#define AVF_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+#define AVF_AQC_SET_VSI_VLAN_MASK 0x0FFF
+#define AVF_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic avf_aqc_add_remove_tag_completion for completion
+ */
+struct avf_aqc_add_tag {
+ __le16 flags;
+#define AVF_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define AVF_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define AVF_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ AVF_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_tag);
+
+struct avf_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic avf_aqc_add_remove_tag_completion for completion
+ */
+struct avf_aqc_remove_tag {
+ __le16 seid;
+#define AVF_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define AVF_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ AVF_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_remove_tag);
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct avf_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_mcast_etag);
+
+struct avf_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct avf_aqc_update_tag {
+ __le16 seid;
+#define AVF_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define AVF_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ AVF_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_update_tag);
+
+struct avf_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the avf_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct avf_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define AVF_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define AVF_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define AVF_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define AVF_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define AVF_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define AVF_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define AVF_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ AVF_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_control_packet_filter);
+
+struct avf_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the avf_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct avf_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define AVF_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define AVF_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ AVF_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 big_buffer_flag;
+#define AVF_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1
+ u8 reserved2[3];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_cloud_filters);
+
+struct avf_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define AVF_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define AVF_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ AVF_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
+#define AVF_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0002 reserved */
+#define AVF_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define AVF_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define AVF_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define AVF_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define AVF_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define AVF_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define AVF_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+/* 0x0010 to 0x0017 is for custom filters */
+
+#define AVF_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define AVF_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define AVF_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define AVF_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define AVF_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5
+
+#define AVF_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000
+#define AVF_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000
+#define AVF_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+#define AVF_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define AVF_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
+ AVF_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+#define AVF_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define AVF_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+/* avf_aqc_add_rm_cloud_filt_elem_ext is used when
+ * AVF_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set. refer to
+ * DCR288
+ */
+struct avf_aqc_add_rm_cloud_filt_elem_ext {
+ struct avf_aqc_add_remove_cloud_filters_element_data element;
+ u16 general_fields[32];
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
+};
+
+struct avf_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_remove_cloud_filters_completion);
+
+/* Replace filter Command 0x025F
+ * uses the avf_aqc_replace_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct avf_filter_data {
+ u8 filter_type;
+ u8 input[3];
+};
+
+struct avf_aqc_replace_cloud_filters_cmd {
+ u8 valid_flags;
+#define AVF_AQC_REPLACE_L1_FILTER 0x0
+#define AVF_AQC_REPLACE_CLOUD_FILTER 0x1
+#define AVF_AQC_GET_CLOUD_FILTERS 0x2
+#define AVF_AQC_MIRROR_CLOUD_FILTER 0x4
+#define AVF_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
+ u8 old_filter_type;
+ u8 new_filter_type;
+ u8 tr_bit;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct avf_aqc_replace_cloud_filters_cmd_buf {
+ u8 data[32];
+/* Filter type INPUT codes*/
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED (1 << 7UL)
+
+/* Field Vector offsets */
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
+/* big FLU */
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
+/* big FLU */
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
+
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
+ struct avf_filter_data filters[8];
+};
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct avf_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define AVF_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define AVF_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ AVF_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define AVF_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define AVF_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define AVF_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define AVF_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define AVF_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_delete_mirror_rule);
+
+struct avf_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_delete_mirror_rule_completion);
+
+/* Dynamic Device Personalization */
+struct avf_aqc_write_personalization_profile {
+ u8 flags;
+ u8 reserved[3];
+ __le32 profile_track_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_write_personalization_profile);
+
+struct avf_aqc_write_ddp_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct avf_aqc_get_applied_profiles {
+ u8 flags;
+#define AVF_AQC_GET_DDP_GET_CONF 0x1
+#define AVF_AQC_GET_DDP_GET_RDPU_CONF 0x2
+ u8 rsv[3];
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_applied_profiles);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct avf_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define AVF_AQC_PFC_IGNORE_SET 0x80
+#define AVF_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the avf_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct avf_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct avf_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct avf_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with avf_aqc_qs_handles_resp
+ */
+struct avf_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+AVF_CHECK_STRUCT_LEN(0x40, avf_aqc_configure_vsi_ets_sla_bw_data);
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with avf_aqc_qs_handles_resp
+ */
+struct avf_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_configure_vsi_tc_bw_data);
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct avf_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+AVF_CHECK_STRUCT_LEN(0x40, avf_aqc_query_vsi_bw_config_resp);
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct avf_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_query_vsi_ets_sla_config_resp);
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct avf_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct avf_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+#define AVF_AQ_ETS_SEEPAGE_EN_MASK 0x1
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
+};
+
+AVF_CHECK_STRUCT_LEN(0x80, avf_aqc_configure_switching_comp_ets_data);
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct avf_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+AVF_CHECK_STRUCT_LEN(0x40,
+ avf_aqc_configure_switching_comp_ets_bw_limit_data);
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct avf_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_configure_switching_comp_bw_config_data);
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct avf_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+AVF_CHECK_STRUCT_LEN(0x40, avf_aqc_query_switching_comp_ets_config_resp);
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct avf_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+AVF_CHECK_STRUCT_LEN(0x44, avf_aqc_query_port_ets_config_resp);
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct avf_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_query_switching_comp_bw_config_resp);
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct avf_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
+AVF_CHECK_STRUCT_LEN(0x22, avf_aqc_configure_partition_bw_data);
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct avf_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aq_get_set_hmc_resource_profile);
+
+enum avf_aq_hmc_profile {
+ /* AVF_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ AVF_HMC_PROFILE_DEFAULT = 1,
+ AVF_HMC_PROFILE_FAVOR_VF = 2,
+ AVF_HMC_PROFILE_EQUAL = 3,
+};
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define AVF_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define AVF_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum avf_aq_phy_type {
+ AVF_PHY_TYPE_SGMII = 0x0,
+ AVF_PHY_TYPE_1000BASE_KX = 0x1,
+ AVF_PHY_TYPE_10GBASE_KX4 = 0x2,
+ AVF_PHY_TYPE_10GBASE_KR = 0x3,
+ AVF_PHY_TYPE_40GBASE_KR4 = 0x4,
+ AVF_PHY_TYPE_XAUI = 0x5,
+ AVF_PHY_TYPE_XFI = 0x6,
+ AVF_PHY_TYPE_SFI = 0x7,
+ AVF_PHY_TYPE_XLAUI = 0x8,
+ AVF_PHY_TYPE_XLPPI = 0x9,
+ AVF_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ AVF_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ AVF_PHY_TYPE_10GBASE_AOC = 0xC,
+ AVF_PHY_TYPE_40GBASE_AOC = 0xD,
+ AVF_PHY_TYPE_UNRECOGNIZED = 0xE,
+ AVF_PHY_TYPE_UNSUPPORTED = 0xF,
+ AVF_PHY_TYPE_100BASE_TX = 0x11,
+ AVF_PHY_TYPE_1000BASE_T = 0x12,
+ AVF_PHY_TYPE_10GBASE_T = 0x13,
+ AVF_PHY_TYPE_10GBASE_SR = 0x14,
+ AVF_PHY_TYPE_10GBASE_LR = 0x15,
+ AVF_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ AVF_PHY_TYPE_10GBASE_CR1 = 0x17,
+ AVF_PHY_TYPE_40GBASE_CR4 = 0x18,
+ AVF_PHY_TYPE_40GBASE_SR4 = 0x19,
+ AVF_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ AVF_PHY_TYPE_1000BASE_SX = 0x1B,
+ AVF_PHY_TYPE_1000BASE_LX = 0x1C,
+ AVF_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ AVF_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ AVF_PHY_TYPE_25GBASE_KR = 0x1F,
+ AVF_PHY_TYPE_25GBASE_CR = 0x20,
+ AVF_PHY_TYPE_25GBASE_SR = 0x21,
+ AVF_PHY_TYPE_25GBASE_LR = 0x22,
+ AVF_PHY_TYPE_25GBASE_AOC = 0x23,
+ AVF_PHY_TYPE_25GBASE_ACC = 0x24,
+ AVF_PHY_TYPE_MAX,
+ AVF_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
+ AVF_PHY_TYPE_EMPTY = 0xFE,
+ AVF_PHY_TYPE_DEFAULT = 0xFF,
+};
+
+#define AVF_LINK_SPEED_100MB_SHIFT 0x1
+#define AVF_LINK_SPEED_1000MB_SHIFT 0x2
+#define AVF_LINK_SPEED_10GB_SHIFT 0x3
+#define AVF_LINK_SPEED_40GB_SHIFT 0x4
+#define AVF_LINK_SPEED_20GB_SHIFT 0x5
+#define AVF_LINK_SPEED_25GB_SHIFT 0x6
+
+enum avf_aq_link_speed {
+ AVF_LINK_SPEED_UNKNOWN = 0,
+ AVF_LINK_SPEED_100MB = (1 << AVF_LINK_SPEED_100MB_SHIFT),
+ AVF_LINK_SPEED_1GB = (1 << AVF_LINK_SPEED_1000MB_SHIFT),
+ AVF_LINK_SPEED_10GB = (1 << AVF_LINK_SPEED_10GB_SHIFT),
+ AVF_LINK_SPEED_40GB = (1 << AVF_LINK_SPEED_40GB_SHIFT),
+ AVF_LINK_SPEED_20GB = (1 << AVF_LINK_SPEED_20GB_SHIFT),
+ AVF_LINK_SPEED_25GB = (1 << AVF_LINK_SPEED_25GB_SHIFT),
+};
+
+struct avf_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_module_desc);
+
+struct avf_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define AVF_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define AVF_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define AVF_AQ_PHY_FLAG_LOW_POWER 0x04
+#define AVF_AQ_PHY_LINK_ENABLED 0x08
+#define AVF_AQ_PHY_AN_ENABLED 0x10
+#define AVF_AQ_PHY_FLAG_MODULE_QUAL 0x20
+#define AVF_AQ_PHY_FEC_ABILITY_KR 0x40
+#define AVF_AQ_PHY_FEC_ABILITY_RS 0x80
+ __le16 eee_capability;
+#define AVF_AQ_EEE_100BASE_TX 0x0002
+#define AVF_AQ_EEE_1000BASE_T 0x0004
+#define AVF_AQ_EEE_10GBASE_T 0x0008
+#define AVF_AQ_EEE_1000BASE_KX 0x0010
+#define AVF_AQ_EEE_10GBASE_KX4 0x0020
+#define AVF_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define AVF_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 phy_type_ext;
+#define AVF_AQ_PHY_TYPE_EXT_25G_KR 0x01
+#define AVF_AQ_PHY_TYPE_EXT_25G_CR 0x02
+#define AVF_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define AVF_AQ_PHY_TYPE_EXT_25G_LR 0x08
+#define AVF_AQ_PHY_TYPE_EXT_25G_AOC 0x10
+#define AVF_AQ_PHY_TYPE_EXT_25G_ACC 0x20
+ u8 fec_cfg_curr_mod_ext_info;
+#define AVF_AQ_ENABLE_FEC_KR 0x01
+#define AVF_AQ_ENABLE_FEC_RS 0x02
+#define AVF_AQ_REQUEST_FEC_KR 0x04
+#define AVF_AQ_REQUEST_FEC_RS 0x08
+#define AVF_AQ_ENABLE_FEC_AUTO 0x10
+#define AVF_AQ_FEC
+#define AVF_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define AVF_AQ_MODULE_TYPE_EXT_SHIFT 5
+
+ u8 ext_comp_code;
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define AVF_AQ_PHY_MAX_QMS 16
+ struct avf_aqc_module_desc qualified_module[AVF_AQ_PHY_MAX_QMS];
+};
+
+AVF_CHECK_STRUCT_LEN(0x218, avf_aq_get_phy_abilities_resp);
+
+/* Set PHY Config (direct 0x0601) */
+struct avf_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define AVF_AQ_PHY_ENABLE_LINK 0x08
+#define AVF_AQ_PHY_ENABLE_AN 0x10
+#define AVF_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 phy_type_ext;
+ u8 fec_config;
+#define AVF_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define AVF_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define AVF_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define AVF_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define AVF_AQ_SET_FEC_AUTO BIT(4)
+#define AVF_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define AVF_AQ_PHY_FEC_CONFIG_MASK (0x1F << AVF_AQ_PHY_FEC_CONFIG_SHIFT)
+ u8 reserved;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct avf_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define AVF_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define AVF_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define AVF_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define AVF_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define AVF_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define AVF_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define AVF_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define AVF_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define AVF_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define AVF_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define AVF_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define AVF_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define AVF_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define AVF_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct avf_aqc_set_link_restart_an {
+ u8 command;
+#define AVF_AQ_PHY_RESTART_AN 0x02
+#define AVF_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct avf_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define AVF_AQ_LSE_MASK 0x3
+#define AVF_AQ_LSE_NOP 0x0
+#define AVF_AQ_LSE_DISABLE 0x2
+#define AVF_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define AVF_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* avf_aq_phy_type */
+ u8 link_speed; /* avf_aq_link_speed */
+ u8 link_info;
+#define AVF_AQ_LINK_UP 0x01 /* obsolete */
+#define AVF_AQ_LINK_UP_FUNCTION 0x01
+#define AVF_AQ_LINK_FAULT 0x02
+#define AVF_AQ_LINK_FAULT_TX 0x04
+#define AVF_AQ_LINK_FAULT_RX 0x08
+#define AVF_AQ_LINK_FAULT_REMOTE 0x10
+#define AVF_AQ_LINK_UP_PORT 0x20
+#define AVF_AQ_MEDIA_AVAILABLE 0x40
+#define AVF_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define AVF_AQ_AN_COMPLETED 0x01
+#define AVF_AQ_LP_AN_ABILITY 0x02
+#define AVF_AQ_PD_FAULT 0x04
+#define AVF_AQ_FEC_EN 0x08
+#define AVF_AQ_PHY_LOW_POWER 0x10
+#define AVF_AQ_LINK_PAUSE_TX 0x20
+#define AVF_AQ_LINK_PAUSE_RX 0x40
+#define AVF_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define AVF_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define AVF_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define AVF_AQ_LINK_TX_SHIFT 0x02
+#define AVF_AQ_LINK_TX_MASK (0x03 << AVF_AQ_LINK_TX_SHIFT)
+#define AVF_AQ_LINK_TX_ACTIVE 0x00
+#define AVF_AQ_LINK_TX_DRAINED 0x01
+#define AVF_AQ_LINK_TX_FLUSHED 0x03
+#define AVF_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define AVF_AQ_25G_NO_ERR 0X00
+#define AVF_AQ_25G_NOT_PRESENT 0X01
+#define AVF_AQ_25G_NVM_CRC_ERR 0X02
+#define AVF_AQ_25G_SBUS_UCODE_ERR 0X03
+#define AVF_AQ_25G_SERDES_UCODE_ERR 0X04
+#define AVF_AQ_25G_NIMB_UCODE_ERR 0X05
+ u8 loopback; /* use defines from avf_aqc_set_lb_mode */
+/* Since firmware API 1.7 loopback field keeps power class info as well */
+#define AVF_AQ_LOOPBACK_MASK 0x07
+#define AVF_AQ_PWR_CLASS_SHIFT_LB 6
+#define AVF_AQ_PWR_CLASS_MASK_LB (0x03 << AVF_AQ_PWR_CLASS_SHIFT_LB)
+ __le16 max_frame_size;
+ u8 config;
+#define AVF_AQ_CONFIG_FEC_KR_ENA 0x01
+#define AVF_AQ_CONFIG_FEC_RS_ENA 0x02
+#define AVF_AQ_CONFIG_CRC_ENA 0x04
+#define AVF_AQ_CONFIG_PACING_MASK 0x78
+ union {
+ struct {
+ u8 power_desc;
+#define AVF_AQ_LINK_POWER_CLASS_1 0x00
+#define AVF_AQ_LINK_POWER_CLASS_2 0x01
+#define AVF_AQ_LINK_POWER_CLASS_3 0x02
+#define AVF_AQ_LINK_POWER_CLASS_4 0x03
+#define AVF_AQ_PWR_CLASS_MASK 0x03
+ u8 reserved[4];
+ };
+ struct {
+ u8 link_type[4];
+ u8 link_type_ext;
+ };
+ };
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct avf_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define AVF_AQ_EVENT_LINK_UPDOWN 0x0002
+#define AVF_AQ_EVENT_MEDIA_NA 0x0004
+#define AVF_AQ_EVENT_LINK_FAULT 0x0008
+#define AVF_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define AVF_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define AVF_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define AVF_AQ_EVENT_AN_COMPLETED 0x0080
+#define AVF_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define AVF_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct avf_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct avf_aqc_set_lb_mode {
+ u8 lb_level;
+#define AVF_AQ_LB_NONE 0
+#define AVF_AQ_LB_MAC 1
+#define AVF_AQ_LB_SERDES 2
+#define AVF_AQ_LB_PHY_INT 3
+#define AVF_AQ_LB_PHY_EXT 4
+#define AVF_AQ_LB_CPVL_PCS 5
+#define AVF_AQ_LB_CPVL_EXT 6
+#define AVF_AQ_LB_PHY_LOCAL 0x01
+#define AVF_AQ_LB_PHY_REMOTE 0x02
+#define AVF_AQ_LB_MAC_LOCAL 0x04
+ u8 lb_type;
+#define AVF_AQ_LB_LOCAL 0
+#define AVF_AQ_LB_FAR 0x01
+ u8 speed;
+#define AVF_AQ_LB_SPEED_NONE 0
+#define AVF_AQ_LB_SPEED_1G 1
+#define AVF_AQ_LB_SPEED_10G 2
+#define AVF_AQ_LB_SPEED_40G 3
+#define AVF_AQ_LB_SPEED_20G 4
+ u8 force_speed;
+ u8 reserved[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_lb_mode);
+
+/* Set PHY Debug command (0x0622) */
+struct avf_aqc_set_phy_debug {
+ u8 command_flags;
+#define AVF_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define AVF_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define AVF_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ AVF_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define AVF_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define AVF_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define AVF_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+/* Disable link manageability on a single port */
+#define AVF_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+/* Disable link manageability on all ports needs both bits 4 and 5 */
+#define AVF_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_phy_debug);
+
+enum avf_aq_phy_reg_type {
+ AVF_AQC_PHY_REG_INTERNAL = 0x1,
+ AVF_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ AVF_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* Run PHY Activity (0x0626) */
+struct avf_aqc_run_phy_activity {
+ __le16 activity_id;
+ u8 flags;
+ u8 reserved1;
+ __le32 control;
+ __le32 data;
+ u8 reserved2[4];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_run_phy_activity);
+
+/* Set PHY Register command (0x0628) */
+/* Get PHY Register command (0x0629) */
+struct avf_aqc_phy_register_access {
+ u8 phy_interface;
+#define AVF_AQ_PHY_REG_ACCESS_INTERNAL 0
+#define AVF_AQ_PHY_REG_ACCESS_EXTERNAL 1
+#define AVF_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
+ u8 dev_addres;
+ u8 reserved1[2];
+ __le32 reg_address;
+ __le32 reg_value;
+ u8 reserved2[4];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_phy_register_access);
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct avf_aqc_nvm_update {
+ u8 command_flags;
+#define AVF_AQ_NVM_LAST_CMD 0x01
+#define AVF_AQ_NVM_FLASH_ONLY 0x80
+#define AVF_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define AVF_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
+#define AVF_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define AVF_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_nvm_update);
+
+/* NVM Config Read (indirect 0x0704) */
+struct avf_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define AVF_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define AVF_AQ_ANVM_READ_SINGLE_FEATURE 0
+#define AVF_AQ_ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ __le16 element_id_msw; /* MSWord of field ID */
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct avf_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_nvm_config_write);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define AVF_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
+#define AVF_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+ (1 << AVF_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define AVF_AQ_ANVM_FEATURE 0
+#define AVF_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+struct avf_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+#define AVF_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
+#define AVF_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
+#define AVF_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+AVF_CHECK_STRUCT_LEN(0x6, avf_aqc_nvm_config_data_feature);
+
+struct avf_aqc_nvm_config_data_immediate_field {
+ __le32 field_id;
+ __le32 field_value;
+ __le16 field_options;
+ __le16 reserved;
+};
+
+AVF_CHECK_STRUCT_LEN(0xc, avf_aqc_nvm_config_data_immediate_field);
+
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+struct avf_aqc_nvm_oem_post_update {
+#define AVF_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01
+ u8 sel_data;
+ u8 reserved[7];
+};
+
+AVF_CHECK_STRUCT_LEN(0x8, avf_aqc_nvm_oem_post_update);
+
+struct avf_aqc_nvm_oem_post_update_buffer {
+ u8 str_len;
+ u8 dev_addr;
+ __le16 eeprom_addr;
+ u8 data[36];
+};
+
+AVF_CHECK_STRUCT_LEN(0x28, avf_aqc_nvm_oem_post_update_buffer);
+
+/* Thermal Sensor (indirect 0x0721)
+ * read or set thermal sensor configs and values
+ * takes a sensor and command specific data buffer, not detailed here
+ */
+struct avf_aqc_thermal_sensor {
+ u8 sensor_action;
+#define AVF_AQ_THERMAL_SENSOR_READ_CONFIG 0
+#define AVF_AQ_THERMAL_SENSOR_SET_CONFIG 1
+#define AVF_AQ_THERMAL_SENSOR_READ_TEMP 2
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_thermal_sensor);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct avf_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct avf_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct avf_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses avf_aq_desc
+ */
+struct avf_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define AVF_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define AVF_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define AVF_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define AVF_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct avf_aqc_alternate_set_mode {
+ __le32 mode;
+#define AVF_AQ_ALTERNATE_MODE_NONE 0
+#define AVF_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses avf_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct avf_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct avf_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define AVF_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define AVF_AQ_LLDP_MIB_LOCAL 0x0
+#define AVF_AQ_LLDP_MIB_REMOTE 0x1
+#define AVF_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define AVF_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define AVF_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define AVF_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define AVF_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define AVF_AQ_LLDP_TX_SHIFT 0x4
+#define AVF_AQ_LLDP_TX_MASK (0x03 << AVF_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use AVF_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct avf_aqc_lldp_update_mib {
+ u8 command;
+#define AVF_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define AVF_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct avf_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct avf_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct avf_aqc_lldp_stop {
+ u8 command;
+#define AVF_AQ_LLDP_AGENT_STOP 0x0
+#define AVF_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct avf_aqc_lldp_start {
+ u8 command;
+#define AVF_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_start);
+
+/* Set DCB (direct 0x0303) */
+struct avf_aqc_set_dcb_parameters {
+ u8 command;
+#define AVF_AQ_DCB_SET_AGENT 0x1
+#define AVF_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_dcb_parameters);
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
+ */
+
+#define AVF_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define AVF_AQC_CEE_APP_FCOE_MASK (0x7 << AVF_AQC_CEE_APP_FCOE_SHIFT)
+#define AVF_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define AVF_AQC_CEE_APP_ISCSI_MASK (0x7 << AVF_AQC_CEE_APP_ISCSI_SHIFT)
+#define AVF_AQC_CEE_APP_FIP_SHIFT 0x8
+#define AVF_AQC_CEE_APP_FIP_MASK (0x7 << AVF_AQC_CEE_APP_FIP_SHIFT)
+
+#define AVF_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define AVF_AQC_CEE_PG_STATUS_MASK (0x7 << AVF_AQC_CEE_PG_STATUS_SHIFT)
+#define AVF_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define AVF_AQC_CEE_PFC_STATUS_MASK (0x7 << AVF_AQC_CEE_PFC_STATUS_SHIFT)
+#define AVF_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define AVF_AQC_CEE_APP_STATUS_MASK (0x7 << AVF_AQC_CEE_APP_STATUS_SHIFT)
+#define AVF_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define AVF_AQC_CEE_FCOE_STATUS_MASK (0x7 << AVF_AQC_CEE_FCOE_STATUS_SHIFT)
+#define AVF_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
+#define AVF_AQC_CEE_ISCSI_STATUS_MASK (0x7 << AVF_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define AVF_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define AVF_AQC_CEE_FIP_STATUS_MASK (0x7 << AVF_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct avf_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
+struct avf_aqc_get_cee_dcb_cfg_v1_resp {
+ u8 reserved1;
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 reserved2;
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ u8 reserved3[2];
+ __le16 oper_app_prio;
+ u8 reserved4[2];
+ __le16 tlv_status;
+};
+
+AVF_CHECK_STRUCT_LEN(0x18, avf_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct avf_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+ __le32 tlv_status;
+ u8 reserved[12];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_get_cee_dcb_cfg_resp);
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct avf_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
+ u8 type;
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_set_local_mib);
+
+struct avf_aqc_lldp_set_local_mib_resp {
+#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01
+ u8 status;
+ u8 reserved[15];
+};
+
+AVF_CHECK_STRUCT_LEN(0x10, avf_aqc_lldp_set_local_mib_resp);
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct avf_aqc_lldp_stop_start_specific_agent {
+#define AVF_AQC_START_SPECIFIC_AGENT_SHIFT 0
+#define AVF_AQC_START_SPECIFIC_AGENT_MASK \
+ (1 << AVF_AQC_START_SPECIFIC_AGENT_SHIFT)
+ u8 command;
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_stop_start_specific_agent);
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct avf_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
+#define AVF_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define AVF_AQC_TUNNEL_TYPE_NGE 0x01
+#define AVF_AQC_TUNNEL_TYPE_TEREDO 0x10
+#define AVF_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11
+ u8 reserved1[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_udp_tunnel);
+
+struct avf_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define AVF_AQC_SINGLE_PF 0x0
+#define AVF_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct avf_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 reserved2[13];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_remove_udp_tunnel);
+
+struct avf_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_del_udp_tunnel_completion);
+
+struct avf_aqc_get_set_rss_key {
+#define AVF_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define AVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define AVF_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ AVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_set_rss_key);
+
+struct avf_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+AVF_CHECK_STRUCT_LEN(0x34, avf_aqc_get_set_rss_key_data);
+
+struct avf_aqc_get_set_rss_lut {
+#define AVF_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define AVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define AVF_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ AVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define AVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define AVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ AVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define AVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define AVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_set_rss_lut);
+
+/* tunnel key structure 0x0B10 */
+
+struct avf_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+#define AVF_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define AVF_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define AVF_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define AVF_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 network_key_index;
+#define AVF_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define AVF_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define AVF_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define AVF_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct avf_aqc_oem_param_change {
+ __le32 param_type;
+#define AVF_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define AVF_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define AVF_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ __le16 param_value2;
+ u8 reserved[6];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_oem_param_change);
+
+struct avf_aqc_oem_state_change {
+ __le32 state;
+#define AVF_AQ_OEM_STATE_LINK_DOWN 0x0
+#define AVF_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_oem_state_change);
+
+/* Initialize OCSD (0xFE02, direct) */
+struct avf_aqc_opc_oem_ocsd_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocsd_memory_block_addr_high;
+ __le32 ocsd_memory_block_addr_low;
+ __le32 requested_update_interval;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB (0xFE03, direct) */
+struct avf_aqc_opc_oem_ocbb_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocbb_memory_block_addr_high;
+ __le32 ocbb_memory_block_addr_low;
+ u8 reserved2[4];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_opc_oem_ocbb_initialize);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct avf_acq_set_test_mode {
+ u8 mode;
+#define AVF_AQ_TEST_PARTIAL 0
+#define AVF_AQ_TEST_FULL 1
+#define AVF_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define AVF_AQ_TEST_OPEN 0
+#define AVF_AQ_TEST_CLOSE 1
+#define AVF_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct avf_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* avf_aq_desc is used for the command */
+struct avf_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct avf_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define AVF_AQ_CLUSTER_ID_AUX 0
+#define AVF_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define AVF_AQ_CLUSTER_ID_TXSCHED 2
+#define AVF_AQ_CLUSTER_ID_HMC 3
+#define AVF_AQ_CLUSTER_ID_MAC0 4
+#define AVF_AQ_CLUSTER_ID_MAC1 5
+#define AVF_AQ_CLUSTER_ID_MAC2 6
+#define AVF_AQ_CLUSTER_ID_MAC3 7
+#define AVF_AQ_CLUSTER_ID_DCB 8
+#define AVF_AQ_CLUSTER_ID_EMP_MEM 9
+#define AVF_AQ_CLUSTER_ID_PKT_BUF 10
+#define AVF_AQ_CLUSTER_ID_ALTRAM 11
+
+struct avf_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_debug_dump_internals);
+
+struct avf_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_debug_modify_internals);
+
+#endif /* _AVF_ADMINQ_CMD_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_alloc.h b/src/spdk/dpdk/drivers/net/avf/base/avf_alloc.h
new file mode 100644
index 00000000..21e29bd0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_alloc.h
@@ -0,0 +1,65 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_ALLOC_H_
+#define _AVF_ALLOC_H_
+
+struct avf_hw;
+
+/* Memory allocation types */
+enum avf_memory_type {
+ avf_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ avf_mem_asq_buf = 1,
+ avf_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ avf_mem_arq_ring = 3, /* ARQ descriptor ring */
+ avf_mem_atq_ring = 4, /* ATQ descriptor ring */
+ avf_mem_pd = 5, /* Page Descriptor */
+ avf_mem_bp = 6, /* Backing Page - 4KB */
+ avf_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ avf_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+enum avf_status_code avf_allocate_dma_mem(struct avf_hw *hw,
+ struct avf_dma_mem *mem,
+ enum avf_memory_type type,
+ u64 size, u32 alignment);
+enum avf_status_code avf_free_dma_mem(struct avf_hw *hw,
+ struct avf_dma_mem *mem);
+enum avf_status_code avf_allocate_virt_mem(struct avf_hw *hw,
+ struct avf_virt_mem *mem,
+ u32 size);
+enum avf_status_code avf_free_virt_mem(struct avf_hw *hw,
+ struct avf_virt_mem *mem);
+
+#endif /* _AVF_ALLOC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_common.c b/src/spdk/dpdk/drivers/net/avf/base/avf_common.c
new file mode 100644
index 00000000..bbaadada
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_common.c
@@ -0,0 +1,1845 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "avf_type.h"
+#include "avf_adminq.h"
+#include "avf_prototype.h"
+#include "virtchnl.h"
+
+
+/**
+ * avf_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+enum avf_status_code avf_set_mac_type(struct avf_hw *hw)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+
+ DEBUGFUNC("avf_set_mac_type\n");
+
+ if (hw->vendor_id == AVF_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ /* TODO: remove undefined device ID now, need to think how to
+ * remove them in share code
+ */
+ case AVF_DEV_ID_ADAPTIVE_VF:
+ hw->mac.type = AVF_MAC_VF;
+ break;
+ default:
+ hw->mac.type = AVF_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = AVF_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ DEBUGOUT2("avf_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * avf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *avf_aq_str(struct avf_hw *hw, enum avf_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case AVF_AQ_RC_OK:
+ return "OK";
+ case AVF_AQ_RC_EPERM:
+ return "AVF_AQ_RC_EPERM";
+ case AVF_AQ_RC_ENOENT:
+ return "AVF_AQ_RC_ENOENT";
+ case AVF_AQ_RC_ESRCH:
+ return "AVF_AQ_RC_ESRCH";
+ case AVF_AQ_RC_EINTR:
+ return "AVF_AQ_RC_EINTR";
+ case AVF_AQ_RC_EIO:
+ return "AVF_AQ_RC_EIO";
+ case AVF_AQ_RC_ENXIO:
+ return "AVF_AQ_RC_ENXIO";
+ case AVF_AQ_RC_E2BIG:
+ return "AVF_AQ_RC_E2BIG";
+ case AVF_AQ_RC_EAGAIN:
+ return "AVF_AQ_RC_EAGAIN";
+ case AVF_AQ_RC_ENOMEM:
+ return "AVF_AQ_RC_ENOMEM";
+ case AVF_AQ_RC_EACCES:
+ return "AVF_AQ_RC_EACCES";
+ case AVF_AQ_RC_EFAULT:
+ return "AVF_AQ_RC_EFAULT";
+ case AVF_AQ_RC_EBUSY:
+ return "AVF_AQ_RC_EBUSY";
+ case AVF_AQ_RC_EEXIST:
+ return "AVF_AQ_RC_EEXIST";
+ case AVF_AQ_RC_EINVAL:
+ return "AVF_AQ_RC_EINVAL";
+ case AVF_AQ_RC_ENOTTY:
+ return "AVF_AQ_RC_ENOTTY";
+ case AVF_AQ_RC_ENOSPC:
+ return "AVF_AQ_RC_ENOSPC";
+ case AVF_AQ_RC_ENOSYS:
+ return "AVF_AQ_RC_ENOSYS";
+ case AVF_AQ_RC_ERANGE:
+ return "AVF_AQ_RC_ERANGE";
+ case AVF_AQ_RC_EFLUSHED:
+ return "AVF_AQ_RC_EFLUSHED";
+ case AVF_AQ_RC_BAD_ADDR:
+ return "AVF_AQ_RC_BAD_ADDR";
+ case AVF_AQ_RC_EMODE:
+ return "AVF_AQ_RC_EMODE";
+ case AVF_AQ_RC_EFBIG:
+ return "AVF_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * avf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *avf_stat_str(struct avf_hw *hw, enum avf_status_code stat_err)
+{
+ switch (stat_err) {
+ case AVF_SUCCESS:
+ return "OK";
+ case AVF_ERR_NVM:
+ return "AVF_ERR_NVM";
+ case AVF_ERR_NVM_CHECKSUM:
+ return "AVF_ERR_NVM_CHECKSUM";
+ case AVF_ERR_PHY:
+ return "AVF_ERR_PHY";
+ case AVF_ERR_CONFIG:
+ return "AVF_ERR_CONFIG";
+ case AVF_ERR_PARAM:
+ return "AVF_ERR_PARAM";
+ case AVF_ERR_MAC_TYPE:
+ return "AVF_ERR_MAC_TYPE";
+ case AVF_ERR_UNKNOWN_PHY:
+ return "AVF_ERR_UNKNOWN_PHY";
+ case AVF_ERR_LINK_SETUP:
+ return "AVF_ERR_LINK_SETUP";
+ case AVF_ERR_ADAPTER_STOPPED:
+ return "AVF_ERR_ADAPTER_STOPPED";
+ case AVF_ERR_INVALID_MAC_ADDR:
+ return "AVF_ERR_INVALID_MAC_ADDR";
+ case AVF_ERR_DEVICE_NOT_SUPPORTED:
+ return "AVF_ERR_DEVICE_NOT_SUPPORTED";
+ case AVF_ERR_MASTER_REQUESTS_PENDING:
+ return "AVF_ERR_MASTER_REQUESTS_PENDING";
+ case AVF_ERR_INVALID_LINK_SETTINGS:
+ return "AVF_ERR_INVALID_LINK_SETTINGS";
+ case AVF_ERR_AUTONEG_NOT_COMPLETE:
+ return "AVF_ERR_AUTONEG_NOT_COMPLETE";
+ case AVF_ERR_RESET_FAILED:
+ return "AVF_ERR_RESET_FAILED";
+ case AVF_ERR_SWFW_SYNC:
+ return "AVF_ERR_SWFW_SYNC";
+ case AVF_ERR_NO_AVAILABLE_VSI:
+ return "AVF_ERR_NO_AVAILABLE_VSI";
+ case AVF_ERR_NO_MEMORY:
+ return "AVF_ERR_NO_MEMORY";
+ case AVF_ERR_BAD_PTR:
+ return "AVF_ERR_BAD_PTR";
+ case AVF_ERR_RING_FULL:
+ return "AVF_ERR_RING_FULL";
+ case AVF_ERR_INVALID_PD_ID:
+ return "AVF_ERR_INVALID_PD_ID";
+ case AVF_ERR_INVALID_QP_ID:
+ return "AVF_ERR_INVALID_QP_ID";
+ case AVF_ERR_INVALID_CQ_ID:
+ return "AVF_ERR_INVALID_CQ_ID";
+ case AVF_ERR_INVALID_CEQ_ID:
+ return "AVF_ERR_INVALID_CEQ_ID";
+ case AVF_ERR_INVALID_AEQ_ID:
+ return "AVF_ERR_INVALID_AEQ_ID";
+ case AVF_ERR_INVALID_SIZE:
+ return "AVF_ERR_INVALID_SIZE";
+ case AVF_ERR_INVALID_ARP_INDEX:
+ return "AVF_ERR_INVALID_ARP_INDEX";
+ case AVF_ERR_INVALID_FPM_FUNC_ID:
+ return "AVF_ERR_INVALID_FPM_FUNC_ID";
+ case AVF_ERR_QP_INVALID_MSG_SIZE:
+ return "AVF_ERR_QP_INVALID_MSG_SIZE";
+ case AVF_ERR_QP_TOOMANY_WRS_POSTED:
+ return "AVF_ERR_QP_TOOMANY_WRS_POSTED";
+ case AVF_ERR_INVALID_FRAG_COUNT:
+ return "AVF_ERR_INVALID_FRAG_COUNT";
+ case AVF_ERR_QUEUE_EMPTY:
+ return "AVF_ERR_QUEUE_EMPTY";
+ case AVF_ERR_INVALID_ALIGNMENT:
+ return "AVF_ERR_INVALID_ALIGNMENT";
+ case AVF_ERR_FLUSHED_QUEUE:
+ return "AVF_ERR_FLUSHED_QUEUE";
+ case AVF_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "AVF_ERR_INVALID_PUSH_PAGE_INDEX";
+ case AVF_ERR_INVALID_IMM_DATA_SIZE:
+ return "AVF_ERR_INVALID_IMM_DATA_SIZE";
+ case AVF_ERR_TIMEOUT:
+ return "AVF_ERR_TIMEOUT";
+ case AVF_ERR_OPCODE_MISMATCH:
+ return "AVF_ERR_OPCODE_MISMATCH";
+ case AVF_ERR_CQP_COMPL_ERROR:
+ return "AVF_ERR_CQP_COMPL_ERROR";
+ case AVF_ERR_INVALID_VF_ID:
+ return "AVF_ERR_INVALID_VF_ID";
+ case AVF_ERR_INVALID_HMCFN_ID:
+ return "AVF_ERR_INVALID_HMCFN_ID";
+ case AVF_ERR_BACKING_PAGE_ERROR:
+ return "AVF_ERR_BACKING_PAGE_ERROR";
+ case AVF_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "AVF_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case AVF_ERR_INVALID_PBLE_INDEX:
+ return "AVF_ERR_INVALID_PBLE_INDEX";
+ case AVF_ERR_INVALID_SD_INDEX:
+ return "AVF_ERR_INVALID_SD_INDEX";
+ case AVF_ERR_INVALID_PAGE_DESC_INDEX:
+ return "AVF_ERR_INVALID_PAGE_DESC_INDEX";
+ case AVF_ERR_INVALID_SD_TYPE:
+ return "AVF_ERR_INVALID_SD_TYPE";
+ case AVF_ERR_MEMCPY_FAILED:
+ return "AVF_ERR_MEMCPY_FAILED";
+ case AVF_ERR_INVALID_HMC_OBJ_INDEX:
+ return "AVF_ERR_INVALID_HMC_OBJ_INDEX";
+ case AVF_ERR_INVALID_HMC_OBJ_COUNT:
+ return "AVF_ERR_INVALID_HMC_OBJ_COUNT";
+ case AVF_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "AVF_ERR_INVALID_SRQ_ARM_LIMIT";
+ case AVF_ERR_SRQ_ENABLED:
+ return "AVF_ERR_SRQ_ENABLED";
+ case AVF_ERR_ADMIN_QUEUE_ERROR:
+ return "AVF_ERR_ADMIN_QUEUE_ERROR";
+ case AVF_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "AVF_ERR_ADMIN_QUEUE_TIMEOUT";
+ case AVF_ERR_BUF_TOO_SHORT:
+ return "AVF_ERR_BUF_TOO_SHORT";
+ case AVF_ERR_ADMIN_QUEUE_FULL:
+ return "AVF_ERR_ADMIN_QUEUE_FULL";
+ case AVF_ERR_ADMIN_QUEUE_NO_WORK:
+ return "AVF_ERR_ADMIN_QUEUE_NO_WORK";
+ case AVF_ERR_BAD_IWARP_CQE:
+ return "AVF_ERR_BAD_IWARP_CQE";
+ case AVF_ERR_NVM_BLANK_MODE:
+ return "AVF_ERR_NVM_BLANK_MODE";
+ case AVF_ERR_NOT_IMPLEMENTED:
+ return "AVF_ERR_NOT_IMPLEMENTED";
+ case AVF_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "AVF_ERR_PE_DOORBELL_NOT_ENABLED";
+ case AVF_ERR_DIAG_TEST_FAILED:
+ return "AVF_ERR_DIAG_TEST_FAILED";
+ case AVF_ERR_NOT_READY:
+ return "AVF_ERR_NOT_READY";
+ case AVF_NOT_SUPPORTED:
+ return "AVF_NOT_SUPPORTED";
+ case AVF_ERR_FIRMWARE_API_VERSION:
+ return "AVF_ERR_FIRMWARE_API_VERSION";
+ case AVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "AVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
+ * avf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void avf_debug_aq(struct avf_hw *hw, enum avf_debug_mask mask, void *desc,
+ void *buffer, u16 buf_len)
+{
+ struct avf_aq_desc *aq_desc = (struct avf_aq_desc *)desc;
+ u8 *buf = (u8 *)buffer;
+ u16 len;
+ u16 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ len = LE16_TO_CPU(aq_desc->datalen);
+
+ avf_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ LE16_TO_CPU(aq_desc->opcode),
+ LE16_TO_CPU(aq_desc->flags),
+ LE16_TO_CPU(aq_desc->datalen),
+ LE16_TO_CPU(aq_desc->retval));
+ avf_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->cookie_high),
+ LE32_TO_CPU(aq_desc->cookie_low));
+ avf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.internal.param0),
+ LE32_TO_CPU(aq_desc->params.internal.param1));
+ avf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.external.addr_high),
+ LE32_TO_CPU(aq_desc->params.external.addr_low));
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ avf_debug(hw, mask, "AQ CMD Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+ /* write the full 16-byte chunks */
+ for (i = 0; i < (len - 16); i += 16)
+ avf_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, buf[i], buf[i+1], buf[i+2], buf[i+3],
+ buf[i+4], buf[i+5], buf[i+6], buf[i+7],
+ buf[i+8], buf[i+9], buf[i+10], buf[i+11],
+ buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
+ /* the most we could have left is 16 bytes, pad with zeros */
+ if (i < len) {
+ char d_buf[16];
+ int j, i_sav;
+
+ i_sav = i;
+ memset(d_buf, 0, sizeof(d_buf));
+ for (j = 0; i < len; j++, i++)
+ d_buf[j] = buf[i];
+ avf_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
+ d_buf[4], d_buf[5], d_buf[6], d_buf[7],
+ d_buf[8], d_buf[9], d_buf[10], d_buf[11],
+ d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
+ }
+ }
+}
+
+/**
+ * avf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool avf_check_asq_alive(struct avf_hw *hw)
+{
+ if (hw->aq.asq.len)
+#ifdef INTEGRATED_VF
+ if (avf_is_vf(hw))
+ return !!(rd32(hw, hw->aq.asq.len) &
+ AVF_ATQLEN1_ATQENABLE_MASK);
+#else
+ return !!(rd32(hw, hw->aq.asq.len) &
+ AVF_ATQLEN1_ATQENABLE_MASK);
+#endif /* INTEGRATED_VF */
+ return false;
+}
+
+/**
+ * avf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+enum avf_status_code avf_aq_queue_shutdown(struct avf_hw *hw,
+ bool unloading)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_queue_shutdown *cmd =
+ (struct avf_aqc_queue_shutdown *)&desc.params.raw;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = CPU_TO_LE32(AVF_AQ_DRIVER_UNLOADING);
+ status = avf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * avf_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+STATIC enum avf_status_code avf_aq_get_set_rss_lut(struct avf_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ enum avf_status_code status;
+ struct avf_aq_desc desc;
+ struct avf_aqc_get_set_rss_lut *cmd_resp =
+ (struct avf_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_set_rss_lut);
+ else
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ AVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ AVF_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)AVF_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((AVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ AVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ AVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((AVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ AVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ AVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = avf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * avf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum avf_status_code avf_aq_get_rss_lut(struct avf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return avf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * avf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum avf_status_code avf_aq_set_rss_lut(struct avf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return avf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * avf_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+STATIC enum avf_status_code avf_aq_get_set_rss_key(struct avf_hw *hw,
+ u16 vsi_id,
+ struct avf_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ enum avf_status_code status;
+ struct avf_aq_desc desc;
+ struct avf_aqc_get_set_rss_key *cmd_resp =
+ (struct avf_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct avf_aqc_get_set_rss_key_data);
+
+ if (set)
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_set_rss_key);
+ else
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ AVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ AVF_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)AVF_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = avf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * avf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum avf_status_code avf_aq_get_rss_key(struct avf_hw *hw,
+ u16 vsi_id,
+ struct avf_aqc_get_set_rss_key_data *key)
+{
+ return avf_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * avf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum avf_status_code avf_aq_set_rss_key(struct avf_hw *hw,
+ u16 vsi_id,
+ struct avf_aqc_get_set_rss_key_data *key)
+{
+ return avf_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
+/* The avf_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT avf_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF avf_ptype_lookup[ptype].outer_ip == AVF_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum avf_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define AVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ AVF_RX_PTYPE_OUTER_##OUTER_IP, \
+ AVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ AVF_RX_PTYPE_##OUTER_FRAG, \
+ AVF_RX_PTYPE_TUNNEL_##T, \
+ AVF_RX_PTYPE_TUNNEL_END_##TE, \
+ AVF_RX_PTYPE_##TEF, \
+ AVF_RX_PTYPE_INNER_PROT_##I, \
+ AVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define AVF_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define AVF_RX_PTYPE_NOF AVF_RX_PTYPE_NOT_FRAG
+#define AVF_RX_PTYPE_FRG AVF_RX_PTYPE_FRAG
+#define AVF_RX_PTYPE_INNER_PROT_TS AVF_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct avf_rx_ptype_decoded avf_ptype_lookup[] = {
+ /* L2 Packet types */
+ AVF_PTT_UNUSED_ENTRY(0),
+ AVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ AVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ AVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ AVF_PTT_UNUSED_ENTRY(4),
+ AVF_PTT_UNUSED_ENTRY(5),
+ AVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ AVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ AVF_PTT_UNUSED_ENTRY(8),
+ AVF_PTT_UNUSED_ENTRY(9),
+ AVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ AVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ AVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ AVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(25),
+ AVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ AVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ AVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ AVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(32),
+ AVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ AVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(39),
+ AVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ AVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ AVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(47),
+ AVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ AVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(54),
+ AVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ AVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ AVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(62),
+ AVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ AVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(69),
+ AVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ AVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ AVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(77),
+ AVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ AVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(84),
+ AVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ AVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(91),
+ AVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ AVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ AVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ AVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(98),
+ AVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ AVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(105),
+ AVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ AVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ AVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(113),
+ AVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ AVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(120),
+ AVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ AVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ AVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(128),
+ AVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ AVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(135),
+ AVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ AVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ AVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(143),
+ AVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ AVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(150),
+ AVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ AVF_PTT_UNUSED_ENTRY(154),
+ AVF_PTT_UNUSED_ENTRY(155),
+ AVF_PTT_UNUSED_ENTRY(156),
+ AVF_PTT_UNUSED_ENTRY(157),
+ AVF_PTT_UNUSED_ENTRY(158),
+ AVF_PTT_UNUSED_ENTRY(159),
+
+ AVF_PTT_UNUSED_ENTRY(160),
+ AVF_PTT_UNUSED_ENTRY(161),
+ AVF_PTT_UNUSED_ENTRY(162),
+ AVF_PTT_UNUSED_ENTRY(163),
+ AVF_PTT_UNUSED_ENTRY(164),
+ AVF_PTT_UNUSED_ENTRY(165),
+ AVF_PTT_UNUSED_ENTRY(166),
+ AVF_PTT_UNUSED_ENTRY(167),
+ AVF_PTT_UNUSED_ENTRY(168),
+ AVF_PTT_UNUSED_ENTRY(169),
+
+ AVF_PTT_UNUSED_ENTRY(170),
+ AVF_PTT_UNUSED_ENTRY(171),
+ AVF_PTT_UNUSED_ENTRY(172),
+ AVF_PTT_UNUSED_ENTRY(173),
+ AVF_PTT_UNUSED_ENTRY(174),
+ AVF_PTT_UNUSED_ENTRY(175),
+ AVF_PTT_UNUSED_ENTRY(176),
+ AVF_PTT_UNUSED_ENTRY(177),
+ AVF_PTT_UNUSED_ENTRY(178),
+ AVF_PTT_UNUSED_ENTRY(179),
+
+ AVF_PTT_UNUSED_ENTRY(180),
+ AVF_PTT_UNUSED_ENTRY(181),
+ AVF_PTT_UNUSED_ENTRY(182),
+ AVF_PTT_UNUSED_ENTRY(183),
+ AVF_PTT_UNUSED_ENTRY(184),
+ AVF_PTT_UNUSED_ENTRY(185),
+ AVF_PTT_UNUSED_ENTRY(186),
+ AVF_PTT_UNUSED_ENTRY(187),
+ AVF_PTT_UNUSED_ENTRY(188),
+ AVF_PTT_UNUSED_ENTRY(189),
+
+ AVF_PTT_UNUSED_ENTRY(190),
+ AVF_PTT_UNUSED_ENTRY(191),
+ AVF_PTT_UNUSED_ENTRY(192),
+ AVF_PTT_UNUSED_ENTRY(193),
+ AVF_PTT_UNUSED_ENTRY(194),
+ AVF_PTT_UNUSED_ENTRY(195),
+ AVF_PTT_UNUSED_ENTRY(196),
+ AVF_PTT_UNUSED_ENTRY(197),
+ AVF_PTT_UNUSED_ENTRY(198),
+ AVF_PTT_UNUSED_ENTRY(199),
+
+ AVF_PTT_UNUSED_ENTRY(200),
+ AVF_PTT_UNUSED_ENTRY(201),
+ AVF_PTT_UNUSED_ENTRY(202),
+ AVF_PTT_UNUSED_ENTRY(203),
+ AVF_PTT_UNUSED_ENTRY(204),
+ AVF_PTT_UNUSED_ENTRY(205),
+ AVF_PTT_UNUSED_ENTRY(206),
+ AVF_PTT_UNUSED_ENTRY(207),
+ AVF_PTT_UNUSED_ENTRY(208),
+ AVF_PTT_UNUSED_ENTRY(209),
+
+ AVF_PTT_UNUSED_ENTRY(210),
+ AVF_PTT_UNUSED_ENTRY(211),
+ AVF_PTT_UNUSED_ENTRY(212),
+ AVF_PTT_UNUSED_ENTRY(213),
+ AVF_PTT_UNUSED_ENTRY(214),
+ AVF_PTT_UNUSED_ENTRY(215),
+ AVF_PTT_UNUSED_ENTRY(216),
+ AVF_PTT_UNUSED_ENTRY(217),
+ AVF_PTT_UNUSED_ENTRY(218),
+ AVF_PTT_UNUSED_ENTRY(219),
+
+ AVF_PTT_UNUSED_ENTRY(220),
+ AVF_PTT_UNUSED_ENTRY(221),
+ AVF_PTT_UNUSED_ENTRY(222),
+ AVF_PTT_UNUSED_ENTRY(223),
+ AVF_PTT_UNUSED_ENTRY(224),
+ AVF_PTT_UNUSED_ENTRY(225),
+ AVF_PTT_UNUSED_ENTRY(226),
+ AVF_PTT_UNUSED_ENTRY(227),
+ AVF_PTT_UNUSED_ENTRY(228),
+ AVF_PTT_UNUSED_ENTRY(229),
+
+ AVF_PTT_UNUSED_ENTRY(230),
+ AVF_PTT_UNUSED_ENTRY(231),
+ AVF_PTT_UNUSED_ENTRY(232),
+ AVF_PTT_UNUSED_ENTRY(233),
+ AVF_PTT_UNUSED_ENTRY(234),
+ AVF_PTT_UNUSED_ENTRY(235),
+ AVF_PTT_UNUSED_ENTRY(236),
+ AVF_PTT_UNUSED_ENTRY(237),
+ AVF_PTT_UNUSED_ENTRY(238),
+ AVF_PTT_UNUSED_ENTRY(239),
+
+ AVF_PTT_UNUSED_ENTRY(240),
+ AVF_PTT_UNUSED_ENTRY(241),
+ AVF_PTT_UNUSED_ENTRY(242),
+ AVF_PTT_UNUSED_ENTRY(243),
+ AVF_PTT_UNUSED_ENTRY(244),
+ AVF_PTT_UNUSED_ENTRY(245),
+ AVF_PTT_UNUSED_ENTRY(246),
+ AVF_PTT_UNUSED_ENTRY(247),
+ AVF_PTT_UNUSED_ENTRY(248),
+ AVF_PTT_UNUSED_ENTRY(249),
+
+ AVF_PTT_UNUSED_ENTRY(250),
+ AVF_PTT_UNUSED_ENTRY(251),
+ AVF_PTT_UNUSED_ENTRY(252),
+ AVF_PTT_UNUSED_ENTRY(253),
+ AVF_PTT_UNUSED_ENTRY(254),
+ AVF_PTT_UNUSED_ENTRY(255)
+};
+
+
+/**
+ * avf_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+enum avf_status_code avf_validate_mac_addr(u8 *mac_addr)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+
+ DEBUGFUNC("avf_validate_mac_addr");
+
+ /* Broadcast addresses ARE multicast addresses
+ * Make sure it is not a multicast address
+ * Reject the zero address
+ */
+ if (AVF_IS_MULTICAST(mac_addr) ||
+ (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+ status = AVF_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * avf_aq_rx_ctl_read_register - use FW to read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: ptr to register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to read the Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+enum avf_status_code avf_aq_rx_ctl_read_register(struct avf_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_rx_ctl_reg_read_write *cmd_resp =
+ (struct avf_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ enum avf_status_code status;
+
+ if (reg_val == NULL)
+ return AVF_ERR_PARAM;
+
+ avf_fill_default_direct_cmd_desc(&desc, avf_aqc_opc_rx_ctl_reg_read);
+
+ cmd_resp->address = CPU_TO_LE32(reg_addr);
+
+ status = avf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == AVF_SUCCESS)
+ *reg_val = LE32_TO_CPU(cmd_resp->value);
+
+ return status;
+}
+
+/**
+ * avf_read_rx_ctl - read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ **/
+u32 avf_read_rx_ctl(struct avf_hw *hw, u32 reg_addr)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+ bool use_register;
+ int retry = 5;
+ u32 val = 0;
+
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == AVF_MAC_X722));
+ if (!use_register) {
+do_retry:
+ status = avf_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
+ if (hw->aq.asq_last_status == AVF_AQ_RC_EAGAIN && retry) {
+ avf_msec_delay(1);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ val = rd32(hw, reg_addr);
+
+ return val;
+}
+
+/**
+ * avf_aq_rx_ctl_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to write to an Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+enum avf_status_code avf_aq_rx_ctl_write_register(struct avf_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_rx_ctl_reg_read_write *cmd =
+ (struct avf_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc, avf_aqc_opc_rx_ctl_reg_write);
+
+ cmd->address = CPU_TO_LE32(reg_addr);
+ cmd->value = CPU_TO_LE32(reg_val);
+
+ status = avf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_write_rx_ctl - write to an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ **/
+void avf_write_rx_ctl(struct avf_hw *hw, u32 reg_addr, u32 reg_val)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+ bool use_register;
+ int retry = 5;
+
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == AVF_MAC_X722));
+ if (!use_register) {
+do_retry:
+ status = avf_aq_rx_ctl_write_register(hw, reg_addr,
+ reg_val, NULL);
+ if (hw->aq.asq_last_status == AVF_AQ_RC_EAGAIN && retry) {
+ avf_msec_delay(1);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ wr32(hw, reg_addr, reg_val);
+}
+
+/**
+ * avf_aq_set_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: new register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write the external PHY register.
+ **/
+enum avf_status_code avf_aq_set_phy_register(struct avf_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_phy_register_access *cmd =
+ (struct avf_aqc_phy_register_access *)&desc.params.raw;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_set_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = CPU_TO_LE32(reg_addr);
+ cmd->reg_value = CPU_TO_LE32(reg_val);
+
+ status = avf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_aq_get_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: read register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the external PHY register.
+ **/
+enum avf_status_code avf_aq_get_phy_register(struct avf_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_phy_register_access *cmd =
+ (struct avf_aqc_phy_register_access *)&desc.params.raw;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_get_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = CPU_TO_LE32(reg_addr);
+
+ status = avf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (!status)
+ *reg_val = LE32_TO_CPU(cmd->reg_value);
+
+ return status;
+}
+
+
+/**
+ * avf_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. avf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+enum avf_status_code avf_aq_send_msg_to_pf(struct avf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum avf_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_asq_cmd_details details;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc, avf_aqc_opc_send_msg_to_pf);
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_SI);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(AVF_AQ_FLAG_BUF
+ | AVF_AQ_FLAG_RD));
+ if (msglen > AVF_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ if (!cmd_details) {
+ avf_memset(&details, 0, sizeof(details), AVF_NONDMA_MEM);
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = avf_asq_send_command(hw, (struct avf_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * avf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void avf_parse_hw_config(struct avf_hw *hw,
+ struct virtchnl_vf_resource *msg)
+{
+ struct virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.iwarp = (msg->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
+ avf_memcpy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr,
+ ETH_ALEN,
+ AVF_NONDMA_TO_NONDMA);
+ avf_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ ETH_ALEN,
+ AVF_NONDMA_TO_NONDMA);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * avf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+enum avf_status_code avf_reset(struct avf_hw *hw)
+{
+ return avf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
+ AVF_SUCCESS, NULL, 0, NULL);
+}
+
+/**
+ * avf_aq_set_arp_proxy_config
+ * @hw: pointer to the HW structure
+ * @proxy_config: pointer to proxy config command table struct
+ * @cmd_details: pointer to command details
+ *
+ * Set ARP offload parameters from pre-populated
+ * avf_aqc_arp_proxy_data struct
+ **/
+enum avf_status_code avf_aq_set_arp_proxy_config(struct avf_hw *hw,
+ struct avf_aqc_arp_proxy_data *proxy_config,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ enum avf_status_code status;
+
+ if (!proxy_config)
+ return AVF_ERR_PARAM;
+
+ avf_fill_default_direct_cmd_desc(&desc, avf_aqc_opc_set_proxy_config);
+
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_RD);
+ desc.params.external.addr_high =
+ CPU_TO_LE32(AVF_HI_DWORD((u64)proxy_config));
+ desc.params.external.addr_low =
+ CPU_TO_LE32(AVF_LO_DWORD((u64)proxy_config));
+ desc.datalen = CPU_TO_LE16(sizeof(struct avf_aqc_arp_proxy_data));
+
+ status = avf_asq_send_command(hw, &desc, proxy_config,
+ sizeof(struct avf_aqc_arp_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_aq_opc_set_ns_proxy_table_entry
+ * @hw: pointer to the HW structure
+ * @ns_proxy_table_entry: pointer to NS table entry command struct
+ * @cmd_details: pointer to command details
+ *
+ * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters
+ * from pre-populated avf_aqc_ns_proxy_data struct
+ **/
+enum avf_status_code avf_aq_set_ns_proxy_table_entry(struct avf_hw *hw,
+ struct avf_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ enum avf_status_code status;
+
+ if (!ns_proxy_table_entry)
+ return AVF_ERR_PARAM;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_set_ns_proxy_table_entry);
+
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_RD);
+ desc.params.external.addr_high =
+ CPU_TO_LE32(AVF_HI_DWORD((u64)ns_proxy_table_entry));
+ desc.params.external.addr_low =
+ CPU_TO_LE32(AVF_LO_DWORD((u64)ns_proxy_table_entry));
+ desc.datalen = CPU_TO_LE16(sizeof(struct avf_aqc_ns_proxy_data));
+
+ status = avf_asq_send_command(hw, &desc, ns_proxy_table_entry,
+ sizeof(struct avf_aqc_ns_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_aq_set_clear_wol_filter
+ * @hw: pointer to the hw struct
+ * @filter_index: index of filter to modify (0-7)
+ * @filter: buffer containing filter to be set
+ * @set_filter: true to set filter, false to clear filter
+ * @no_wol_tco: if true, pass through packets cannot cause wake-up
+ * if false, pass through packets may cause wake-up
+ * @filter_valid: true if filter action is valid
+ * @no_wol_tco_valid: true if no WoL in TCO traffic action valid
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear WoL filter for port attached to the PF
+ **/
+enum avf_status_code avf_aq_set_clear_wol_filter(struct avf_hw *hw,
+ u8 filter_index,
+ struct avf_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_set_wol_filter *cmd =
+ (struct avf_aqc_set_wol_filter *)&desc.params.raw;
+ enum avf_status_code status;
+ u16 cmd_flags = 0;
+ u16 valid_flags = 0;
+ u16 buff_len = 0;
+
+ avf_fill_default_direct_cmd_desc(&desc, avf_aqc_opc_set_wol_filter);
+
+ if (filter_index >= AVF_AQC_MAX_NUM_WOL_FILTERS)
+ return AVF_ERR_PARAM;
+ cmd->filter_index = CPU_TO_LE16(filter_index);
+
+ if (set_filter) {
+ if (!filter)
+ return AVF_ERR_PARAM;
+
+ cmd_flags |= AVF_AQC_SET_WOL_FILTER;
+ cmd_flags |= AVF_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
+ }
+
+ if (no_wol_tco)
+ cmd_flags |= AVF_AQC_SET_WOL_FILTER_NO_TCO_WOL;
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+
+ if (filter_valid)
+ valid_flags |= AVF_AQC_SET_WOL_FILTER_ACTION_VALID;
+ if (no_wol_tco_valid)
+ valid_flags |= AVF_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
+ cmd->valid_flags = CPU_TO_LE16(valid_flags);
+
+ buff_len = sizeof(*filter);
+ desc.datalen = CPU_TO_LE16(buff_len);
+
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_RD);
+
+ cmd->address_high = CPU_TO_LE32(AVF_HI_DWORD((u64)filter));
+ cmd->address_low = CPU_TO_LE32(AVF_LO_DWORD((u64)filter));
+
+ status = avf_asq_send_command(hw, &desc, filter,
+ buff_len, cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_aq_get_wake_event_reason
+ * @hw: pointer to the hw struct
+ * @wake_reason: return value, index of matching filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get information for the reason of a Wake Up event
+ **/
+enum avf_status_code avf_aq_get_wake_event_reason(struct avf_hw *hw,
+ u16 *wake_reason,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_get_wake_reason_completion *resp =
+ (struct avf_aqc_get_wake_reason_completion *)&desc.params.raw;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc, avf_aqc_opc_get_wake_reason);
+
+ status = avf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == AVF_SUCCESS)
+ *wake_reason = LE16_TO_CPU(resp->wake_reason);
+
+ return status;
+}
+
+/**
+* avf_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+enum avf_status_code avf_aq_clear_all_wol_filters(struct avf_hw *hw,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_clear_all_wol_filters);
+
+ status = avf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_aq_write_ddp - Write dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @track_id: package tracking id
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+avf_status_code avf_aq_write_ddp(struct avf_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_write_personalization_profile *cmd =
+ (struct avf_aqc_write_personalization_profile *)
+ &desc.params.raw;
+ struct avf_aqc_write_ddp_resp *resp;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_write_personalization_profile);
+
+ desc.flags |= CPU_TO_LE16(AVF_AQ_FLAG_BUF | AVF_AQ_FLAG_RD);
+ if (buff_size > AVF_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->profile_track_id = CPU_TO_LE32(track_id);
+
+ status = avf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ resp = (struct avf_aqc_write_ddp_resp *)&desc.params.raw;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * avf_aq_get_ddp_list - Read dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @flags: AdminQ command flags
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+avf_status_code avf_aq_get_ddp_list(struct avf_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_get_applied_profiles *cmd =
+ (struct avf_aqc_get_applied_profiles *)&desc.params.raw;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_get_personalization_profile_list);
+
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_BUF);
+ if (buff_size > AVF_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->flags = flags;
+
+ status = avf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_find_segment_in_package
+ * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_AVF)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ **/
+struct avf_generic_seg_header *
+avf_find_segment_in_package(u32 segment_type,
+ struct avf_package_header *pkg_hdr)
+{
+ struct avf_generic_seg_header *segment;
+ u32 i;
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < pkg_hdr->segment_count; i++) {
+ segment =
+ (struct avf_generic_seg_header *)((u8 *)pkg_hdr +
+ pkg_hdr->segment_offset[i]);
+
+ if (segment->type == segment_type)
+ return segment;
+ }
+
+ return NULL;
+}
+
+/* Get section table in profile */
+#define AVF_SECTION_TABLE(profile, sec_tbl) \
+ do { \
+ struct avf_profile_segment *p = (profile); \
+ u32 count; \
+ u32 *nvm; \
+ count = p->device_table_count; \
+ nvm = (u32 *)&p->device_table[count]; \
+ sec_tbl = (struct avf_section_table *)&nvm[nvm[0] + 1]; \
+ } while (0)
+
+/* Get section header in profile */
+#define AVF_SECTION_HEADER(profile, offset) \
+ (struct avf_profile_section_header *)((u8 *)(profile) + (offset))
+
+/**
+ * avf_find_section_in_profile
+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
+ * @profile: pointer to the avf segment header to be searched
+ *
+ * This function searches avf segment for a particular section type. On
+ * success it returns a pointer to the section header, otherwise it will
+ * return NULL.
+ **/
+struct avf_profile_section_header *
+avf_find_section_in_profile(u32 section_type,
+ struct avf_profile_segment *profile)
+{
+ struct avf_profile_section_header *sec;
+ struct avf_section_table *sec_tbl;
+ u32 sec_off;
+ u32 i;
+
+ if (profile->header.type != SEGMENT_TYPE_AVF)
+ return NULL;
+
+ AVF_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = AVF_SECTION_HEADER(profile, sec_off);
+ if (sec->section.type == section_type)
+ return sec;
+ }
+
+ return NULL;
+}
+
+/**
+ * avf_ddp_exec_aq_section - Execute generic AQ for DDP
+ * @hw: pointer to the hw struct
+ * @aq: command buffer containing all data to execute AQ
+ **/
+STATIC enum
+avf_status_code avf_ddp_exec_aq_section(struct avf_hw *hw,
+ struct avf_profile_aq_section *aq)
+{
+ enum avf_status_code status;
+ struct avf_aq_desc desc;
+ u8 *msg = NULL;
+ u16 msglen;
+
+ avf_fill_default_direct_cmd_desc(&desc, aq->opcode);
+ desc.flags |= CPU_TO_LE16(aq->flags);
+ avf_memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw),
+ AVF_NONDMA_TO_NONDMA);
+
+ msglen = aq->datalen;
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(AVF_AQ_FLAG_BUF |
+ AVF_AQ_FLAG_RD));
+ if (msglen > AVF_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ msg = &aq->data[0];
+ }
+
+ status = avf_asq_send_command(hw, &desc, msg, msglen, NULL);
+
+ if (status != AVF_SUCCESS) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "unable to exec DDP AQ opcode %u, error %d\n",
+ aq->opcode, status);
+ return status;
+ }
+
+ /* copy returned desc to aq_buf */
+ avf_memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw),
+ AVF_NONDMA_TO_NONDMA);
+
+ return AVF_SUCCESS;
+}
+
+/**
+ * avf_validate_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be validated
+ * @track_id: package tracking id
+ * @rollback: flag if the profile is for rollback.
+ *
+ * Validates supported devices and profile's sections.
+ */
+STATIC enum avf_status_code
+avf_validate_profile(struct avf_hw *hw, struct avf_profile_segment *profile,
+ u32 track_id, bool rollback)
+{
+ struct avf_profile_section_header *sec = NULL;
+ enum avf_status_code status = AVF_SUCCESS;
+ struct avf_section_table *sec_tbl;
+ u32 vendor_dev_id;
+ u32 dev_cnt;
+ u32 sec_off;
+ u32 i;
+
+ if (track_id == AVF_DDP_TRACKID_INVALID) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE, "Invalid track_id\n");
+ return AVF_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == AVF_INTEL_VENDOR_ID &&
+ hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (dev_cnt && (i == dev_cnt)) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "Device doesn't support DDP\n");
+ return AVF_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ AVF_SECTION_TABLE(profile, sec_tbl);
+
+ /* Validate sections types */
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = AVF_SECTION_HEADER(profile, sec_off);
+ if (rollback) {
+ if (sec->section.type == SECTION_TYPE_MMIO ||
+ sec->section.type == SECTION_TYPE_AQ ||
+ sec->section.type == SECTION_TYPE_RB_AQ) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "Not a roll-back package\n");
+ return AVF_NOT_SUPPORTED;
+ }
+ } else {
+ if (sec->section.type == SECTION_TYPE_RB_AQ ||
+ sec->section.type == SECTION_TYPE_RB_MMIO) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "Not an original package\n");
+ return AVF_NOT_SUPPORTED;
+ }
+ }
+ }
+
+ return status;
+}
+
+/**
+ * avf_write_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be downloaded
+ * @track_id: package tracking id
+ *
+ * Handles the download of a complete package.
+ */
+enum avf_status_code
+avf_write_profile(struct avf_hw *hw, struct avf_profile_segment *profile,
+ u32 track_id)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+ struct avf_section_table *sec_tbl;
+ struct avf_profile_section_header *sec = NULL;
+ struct avf_profile_aq_section *ddp_aq;
+ u32 section_size = 0;
+ u32 offset = 0, info = 0;
+ u32 sec_off;
+ u32 i;
+
+ status = avf_validate_profile(hw, profile, track_id, false);
+ if (status)
+ return status;
+
+ AVF_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = AVF_SECTION_HEADER(profile, sec_off);
+ /* Process generic admin command */
+ if (sec->section.type == SECTION_TYPE_AQ) {
+ ddp_aq = (struct avf_profile_aq_section *)&sec[1];
+ status = avf_ddp_exec_aq_section(hw, ddp_aq);
+ if (status) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "Failed to execute aq: section %d, opcode %u\n",
+ i, ddp_aq->opcode);
+ break;
+ }
+ sec->section.type = SECTION_TYPE_RB_AQ;
+ }
+
+ /* Skip any non-mmio sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct avf_profile_section_header);
+
+ /* Write MMIO section */
+ status = avf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * avf_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+enum avf_status_code
+avf_rollback_profile(struct avf_hw *hw, struct avf_profile_segment *profile,
+ u32 track_id)
+{
+ struct avf_profile_section_header *sec = NULL;
+ enum avf_status_code status = AVF_SUCCESS;
+ struct avf_section_table *sec_tbl;
+ u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ u32 sec_off;
+ int i;
+
+ status = avf_validate_profile(hw, profile, track_id, true);
+ if (status)
+ return status;
+
+ AVF_SECTION_TABLE(profile, sec_tbl);
+
+ /* For rollback write sections in reverse */
+ for (i = sec_tbl->section_count - 1; i >= 0; i--) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = AVF_SECTION_HEADER(profile, sec_off);
+
+ /* Skip any non-rollback sections */
+ if (sec->section.type != SECTION_TYPE_RB_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct avf_profile_section_header);
+
+ /* Write roll-back MMIO section */
+ status = avf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * avf_add_pinfo_to_list
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+enum avf_status_code
+avf_add_pinfo_to_list(struct avf_hw *hw,
+ struct avf_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+ struct avf_profile_section_header *sec = NULL;
+ struct avf_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+
+ sec = (struct avf_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct avf_profile_section_header) +
+ sizeof(struct avf_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct avf_profile_section_header);
+ sec->section.size = sizeof(struct avf_profile_info);
+ pinfo = (struct avf_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = AVF_DDP_ADD_TRACKID;
+ avf_memcpy(pinfo->name, profile->name, AVF_DDP_NAME_SIZE,
+ AVF_NONDMA_TO_NONDMA);
+
+ status = avf_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_devids.h b/src/spdk/dpdk/drivers/net/avf/base/avf_devids.h
new file mode 100644
index 00000000..7d9fed25
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_devids.h
@@ -0,0 +1,43 @@
+/*******************************************************************************
+
+Copyright (c) 2017, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_DEVIDS_H_
+#define _AVF_DEVIDS_H_
+
+/* Vendor ID */
+#define AVF_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define AVF_DEV_ID_ADAPTIVE_VF 0x1889
+
+#endif /* _AVF_DEVIDS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_hmc.h b/src/spdk/dpdk/drivers/net/avf/base/avf_hmc.h
new file mode 100644
index 00000000..b9b7b5be
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_hmc.h
@@ -0,0 +1,245 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_HMC_H_
+#define _AVF_HMC_H_
+
+#define AVF_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct avf_hw;
+
+#define AVF_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define AVF_HMC_PD_CNT_IN_SD 512
+#define AVF_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define AVF_HMC_PAGED_BP_SIZE 4096
+#define AVF_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define AVF_FIRST_VF_FPM_ID 16
+
+struct avf_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum avf_sd_entry_type {
+ AVF_SD_TYPE_INVALID = 0,
+ AVF_SD_TYPE_PAGED = 1,
+ AVF_SD_TYPE_DIRECT = 2
+};
+
+struct avf_hmc_bp {
+ enum avf_sd_entry_type entry_type;
+ struct avf_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct avf_hmc_pd_entry {
+ struct avf_hmc_bp bp;
+ u32 sd_index;
+ bool rsrc_pg;
+ bool valid;
+};
+
+struct avf_hmc_pd_table {
+ struct avf_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct avf_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct avf_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct avf_hmc_sd_entry {
+ enum avf_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct avf_hmc_pd_table pd_table;
+ struct avf_hmc_bp bp;
+ } u;
+};
+
+struct avf_hmc_sd_table {
+ struct avf_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct avf_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct avf_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct avf_hmc_obj_info *hmc_obj;
+ struct avf_virt_mem hmc_obj_virt_mem;
+ struct avf_hmc_sd_table sd_table;
+};
+
+#define AVF_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define AVF_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define AVF_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define AVF_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define AVF_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define AVF_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * AVF_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define AVF_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(AVF_HI_DWORD(pa)); \
+ val2 = (u32)(pa) | (AVF_HMC_MAX_BP_COUNT << \
+ AVF_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == AVF_SD_TYPE_PAGED) ? 0 : 1) << \
+ AVF_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ BIT(AVF_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(AVF_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), AVF_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), AVF_PFHMC_SDDATALOW, val2); \
+ wr32((hw), AVF_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * AVF_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define AVF_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (AVF_HMC_MAX_BP_COUNT << \
+ AVF_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == AVF_SD_TYPE_PAGED) ? 0 : 1) << \
+ AVF_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(AVF_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), AVF_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), AVF_PFHMC_SDDATALOW, val2); \
+ wr32((hw), AVF_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * AVF_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define AVF_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), AVF_PFHMC_PDINV, \
+ (((sd_idx) << AVF_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << AVF_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * AVF_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by avf_hmc_rsrc_type.
+ **/
+#define AVF_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / AVF_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / AVF_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * AVF_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by avf_hmc_rsrc_type.
+ **/
+#define AVF_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / AVF_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / AVF_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+enum avf_status_code avf_add_sd_table_entry(struct avf_hw *hw,
+ struct avf_hmc_info *hmc_info,
+ u32 sd_index,
+ enum avf_sd_entry_type type,
+ u64 direct_mode_sz);
+
+enum avf_status_code avf_add_pd_table_entry(struct avf_hw *hw,
+ struct avf_hmc_info *hmc_info,
+ u32 pd_index,
+ struct avf_dma_mem *rsrc_pg);
+enum avf_status_code avf_remove_pd_bp(struct avf_hw *hw,
+ struct avf_hmc_info *hmc_info,
+ u32 idx);
+enum avf_status_code avf_prep_remove_sd_bp(struct avf_hmc_info *hmc_info,
+ u32 idx);
+enum avf_status_code avf_remove_sd_bp_new(struct avf_hw *hw,
+ struct avf_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+enum avf_status_code avf_prep_remove_pd_page(struct avf_hmc_info *hmc_info,
+ u32 idx);
+enum avf_status_code avf_remove_pd_page_new(struct avf_hw *hw,
+ struct avf_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _AVF_HMC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_lan_hmc.h b/src/spdk/dpdk/drivers/net/avf/base/avf_lan_hmc.h
new file mode 100644
index 00000000..48805d89
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_lan_hmc.h
@@ -0,0 +1,200 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_LAN_HMC_H_
+#define _AVF_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct avf_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct avf_hmc_obj_rxq {
+ u16 head;
+ u16 cpuid; /* bigger than needed, see above for reason */
+ u64 base;
+ u16 qlen;
+#define AVF_RXQ_CTX_DBUFF_SHIFT 7
+ u16 dbuff; /* bigger than needed, see above for reason */
+#define AVF_RXQ_CTX_HBUFF_SHIFT 6
+ u16 hbuff; /* bigger than needed, see above for reason */
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u32 rxmax; /* bigger than needed, see above for reason */
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
+};
+
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
+struct avf_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u8 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum avf_hmc_obj_rx_hsplit_0 {
+ AVF_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ AVF_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ AVF_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ AVF_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ AVF_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct avf_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct avf_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum avf_hmc_lan_object_size {
+ AVF_HMC_LAN_OBJ_SZ_8 = 0x3,
+ AVF_HMC_LAN_OBJ_SZ_16 = 0x4,
+ AVF_HMC_LAN_OBJ_SZ_32 = 0x5,
+ AVF_HMC_LAN_OBJ_SZ_64 = 0x6,
+ AVF_HMC_LAN_OBJ_SZ_128 = 0x7,
+ AVF_HMC_LAN_OBJ_SZ_256 = 0x8,
+ AVF_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define AVF_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define AVF_HMC_OBJ_SIZE_TXQ 128
+#define AVF_HMC_OBJ_SIZE_RXQ 32
+#define AVF_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define AVF_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum avf_hmc_lan_rsrc_type {
+ AVF_HMC_LAN_FULL = 0,
+ AVF_HMC_LAN_TX = 1,
+ AVF_HMC_LAN_RX = 2,
+ AVF_HMC_FCOE_CTX = 3,
+ AVF_HMC_FCOE_FILT = 4,
+ AVF_HMC_LAN_MAX = 5
+};
+
+enum avf_hmc_model {
+ AVF_HMC_MODEL_DIRECT_PREFERRED = 0,
+ AVF_HMC_MODEL_DIRECT_ONLY = 1,
+ AVF_HMC_MODEL_PAGED_ONLY = 2,
+ AVF_HMC_MODEL_UNKNOWN,
+};
+
+struct avf_hmc_lan_create_obj_info {
+ struct avf_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum avf_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct avf_hmc_lan_delete_obj_info {
+ struct avf_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+enum avf_status_code avf_init_lan_hmc(struct avf_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+enum avf_status_code avf_configure_lan_hmc(struct avf_hw *hw,
+ enum avf_hmc_model model);
+enum avf_status_code avf_shutdown_lan_hmc(struct avf_hw *hw);
+
+u64 avf_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num);
+enum avf_status_code avf_get_lan_tx_queue_context(struct avf_hw *hw,
+ u16 queue,
+ struct avf_hmc_obj_txq *s);
+enum avf_status_code avf_clear_lan_tx_queue_context(struct avf_hw *hw,
+ u16 queue);
+enum avf_status_code avf_set_lan_tx_queue_context(struct avf_hw *hw,
+ u16 queue,
+ struct avf_hmc_obj_txq *s);
+enum avf_status_code avf_get_lan_rx_queue_context(struct avf_hw *hw,
+ u16 queue,
+ struct avf_hmc_obj_rxq *s);
+enum avf_status_code avf_clear_lan_rx_queue_context(struct avf_hw *hw,
+ u16 queue);
+enum avf_status_code avf_set_lan_rx_queue_context(struct avf_hw *hw,
+ u16 queue,
+ struct avf_hmc_obj_rxq *s);
+enum avf_status_code avf_create_lan_hmc_object(struct avf_hw *hw,
+ struct avf_hmc_lan_create_obj_info *info);
+enum avf_status_code avf_delete_lan_hmc_object(struct avf_hw *hw,
+ struct avf_hmc_lan_delete_obj_info *info);
+
+#endif /* _AVF_LAN_HMC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_osdep.h b/src/spdk/dpdk/drivers/net/avf/base/avf_osdep.h
new file mode 100644
index 00000000..9ef45968
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_osdep.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _AVF_OSDEP_H_
+#define _AVF_OSDEP_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#include "../avf_log.h"
+
+#define INLINE inline
+#define STATIC static
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+
+#define __iomem
+#define hw_dbg(hw, S, A...) do {} while (0)
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+#define lower_32_bits(n) ((u32)(n))
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
+
+#ifndef __le16
+#define __le16 uint16_t
+#endif
+#ifndef __le32
+#define __le32 uint32_t
+#endif
+#ifndef __le64
+#define __le64 uint64_t
+#endif
+#ifndef __be16
+#define __be16 uint16_t
+#endif
+#ifndef __be32
+#define __be32 uint32_t
+#endif
+#ifndef __be64
+#define __be64 uint64_t
+#endif
+
+#define FALSE 0
+#define TRUE 1
+#define false 0
+#define true 1
+
+#define min(a,b) RTE_MIN(a,b)
+#define max(a,b) RTE_MAX(a,b)
+
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#define ASSERT(x) if(!(x)) rte_panic("AVF: x")
+
+#define DEBUGOUT(S) PMD_DRV_LOG_RAW(DEBUG, S)
+#define DEBUGOUT2(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A)
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+
+#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
+#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
+#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
+#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
+#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
+#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
+
+#define cpu_to_le16(o) rte_cpu_to_le_16(o)
+#define cpu_to_le32(s) rte_cpu_to_le_32(s)
+#define cpu_to_le64(h) rte_cpu_to_le_64(h)
+#define le16_to_cpu(a) rte_le_to_cpu_16(a)
+#define le32_to_cpu(c) rte_le_to_cpu_32(c)
+#define le64_to_cpu(k) rte_le_to_cpu_64(k)
+
+#define avf_memset(a, b, c, d) memset((a), (b), (c))
+#define avf_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
+
+#define avf_usec_delay(x) rte_delay_us(x)
+#define avf_msec_delay(x) rte_delay_us(1000*(x))
+
+#define AVF_PCI_REG(reg) rte_read32(reg)
+#define AVF_PCI_REG_ADDR(a, reg) \
+ ((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
+
+#define AVF_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+#define AVF_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
+static inline
+uint32_t avf_read_addr(volatile void *addr)
+{
+ return rte_le_to_cpu_32(AVF_PCI_REG(addr));
+}
+
+#define AVF_READ_REG(hw, reg) \
+ avf_read_addr(AVF_PCI_REG_ADDR((hw), (reg)))
+#define AVF_WRITE_REG(hw, reg, value) \
+ AVF_PCI_REG_WRITE(AVF_PCI_REG_ADDR((hw), (reg)), (value))
+#define AVF_WRITE_FLUSH(a) \
+ AVF_READ_REG(a, AVFGEN_RSTAT)
+
+#define rd32(a, reg) avf_read_addr(AVF_PCI_REG_ADDR((a), (reg)))
+#define wr32(a, reg, value) \
+ AVF_PCI_REG_WRITE(AVF_PCI_REG_ADDR((a), (reg)), (value))
+
+#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0]))
+
+#define avf_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ PMD_DRV_LOG_RAW(DEBUG, "avf %02x.%x " s, \
+ (h)->bus.device, (h)->bus.func, \
+ ##__VA_ARGS__); \
+} while (0)
+
+/* memory allocation tracking */
+struct avf_dma_mem {
+ void *va;
+ u64 pa;
+ u32 size;
+ const void *zone;
+} __attribute__((packed));
+
+struct avf_virt_mem {
+ void *va;
+ u32 size;
+} __attribute__((packed));
+
+/* SW spinlock */
+struct avf_spinlock {
+ rte_spinlock_t spinlock;
+};
+
+#define avf_allocate_dma_mem(h, m, unused, s, a) \
+ avf_allocate_dma_mem_d(h, m, s, a)
+#define avf_free_dma_mem(h, m) avf_free_dma_mem_d(h, m)
+
+#define avf_allocate_virt_mem(h, m, s) avf_allocate_virt_mem_d(h, m, s)
+#define avf_free_virt_mem(h, m) avf_free_virt_mem_d(h, m)
+
+static inline void
+avf_init_spinlock_d(struct avf_spinlock *sp)
+{
+ rte_spinlock_init(&sp->spinlock);
+}
+
+static inline void
+avf_acquire_spinlock_d(struct avf_spinlock *sp)
+{
+ rte_spinlock_lock(&sp->spinlock);
+}
+
+static inline void
+avf_release_spinlock_d(struct avf_spinlock *sp)
+{
+ rte_spinlock_unlock(&sp->spinlock);
+}
+
+static inline void
+avf_destroy_spinlock_d(__rte_unused struct avf_spinlock *sp)
+{
+}
+
+#define avf_init_spinlock(_sp) avf_init_spinlock_d(_sp)
+#define avf_acquire_spinlock(_sp) avf_acquire_spinlock_d(_sp)
+#define avf_release_spinlock(_sp) avf_release_spinlock_d(_sp)
+#define avf_destroy_spinlock(_sp) avf_destroy_spinlock_d(_sp)
+
+#endif /* _AVF_OSDEP_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_prototype.h b/src/spdk/dpdk/drivers/net/avf/base/avf_prototype.h
new file mode 100644
index 00000000..de031dc6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_prototype.h
@@ -0,0 +1,206 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_PROTOTYPE_H_
+#define _AVF_PROTOTYPE_H_
+
+#include "avf_type.h"
+#include "avf_alloc.h"
+#include "virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+enum avf_status_code avf_init_adminq(struct avf_hw *hw);
+enum avf_status_code avf_shutdown_adminq(struct avf_hw *hw);
+enum avf_status_code avf_init_asq(struct avf_hw *hw);
+enum avf_status_code avf_init_arq(struct avf_hw *hw);
+enum avf_status_code avf_alloc_adminq_asq_ring(struct avf_hw *hw);
+enum avf_status_code avf_alloc_adminq_arq_ring(struct avf_hw *hw);
+enum avf_status_code avf_shutdown_asq(struct avf_hw *hw);
+enum avf_status_code avf_shutdown_arq(struct avf_hw *hw);
+u16 avf_clean_asq(struct avf_hw *hw);
+void avf_free_adminq_asq(struct avf_hw *hw);
+void avf_free_adminq_arq(struct avf_hw *hw);
+enum avf_status_code avf_validate_mac_addr(u8 *mac_addr);
+void avf_adminq_init_ring_data(struct avf_hw *hw);
+enum avf_status_code avf_clean_arq_element(struct avf_hw *hw,
+ struct avf_arq_event_info *e,
+ u16 *events_pending);
+enum avf_status_code avf_asq_send_command(struct avf_hw *hw,
+ struct avf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct avf_asq_cmd_details *cmd_details);
+bool avf_asq_done(struct avf_hw *hw);
+
+/* debug function for adminq */
+void avf_debug_aq(struct avf_hw *hw, enum avf_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
+
+void avf_idle_aq(struct avf_hw *hw);
+bool avf_check_asq_alive(struct avf_hw *hw);
+enum avf_status_code avf_aq_queue_shutdown(struct avf_hw *hw, bool unloading);
+
+enum avf_status_code avf_aq_get_rss_lut(struct avf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum avf_status_code avf_aq_set_rss_lut(struct avf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum avf_status_code avf_aq_get_rss_key(struct avf_hw *hw,
+ u16 seid,
+ struct avf_aqc_get_set_rss_key_data *key);
+enum avf_status_code avf_aq_set_rss_key(struct avf_hw *hw,
+ u16 seid,
+ struct avf_aqc_get_set_rss_key_data *key);
+const char *avf_aq_str(struct avf_hw *hw, enum avf_admin_queue_err aq_err);
+const char *avf_stat_str(struct avf_hw *hw, enum avf_status_code stat_err);
+
+
+enum avf_status_code avf_set_mac_type(struct avf_hw *hw);
+
+extern struct avf_rx_ptype_decoded avf_ptype_lookup[];
+
+STATIC INLINE struct avf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return avf_ptype_lookup[ptype];
+}
+
+/* prototype for functions used for SW spinlocks */
+void avf_init_spinlock(struct avf_spinlock *sp);
+void avf_acquire_spinlock(struct avf_spinlock *sp);
+void avf_release_spinlock(struct avf_spinlock *sp);
+void avf_destroy_spinlock(struct avf_spinlock *sp);
+
+/* avf_common for VF drivers*/
+void avf_parse_hw_config(struct avf_hw *hw,
+ struct virtchnl_vf_resource *msg);
+enum avf_status_code avf_reset(struct avf_hw *hw);
+enum avf_status_code avf_aq_send_msg_to_pf(struct avf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum avf_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_set_filter_control(struct avf_hw *hw,
+ struct avf_filter_control_settings *settings);
+enum avf_status_code avf_aq_add_rem_control_packet_filter(struct avf_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct avf_control_filter_stats *stats,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_aq_debug_dump(struct avf_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct avf_asq_cmd_details *cmd_details);
+void avf_add_filter_to_drop_tx_flow_control_frames(struct avf_hw *hw,
+ u16 vsi_seid);
+enum avf_status_code avf_aq_rx_ctl_read_register(struct avf_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct avf_asq_cmd_details *cmd_details);
+u32 avf_read_rx_ctl(struct avf_hw *hw, u32 reg_addr);
+enum avf_status_code avf_aq_rx_ctl_write_register(struct avf_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct avf_asq_cmd_details *cmd_details);
+void avf_write_rx_ctl(struct avf_hw *hw, u32 reg_addr, u32 reg_val);
+enum avf_status_code avf_aq_set_phy_register(struct avf_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_aq_get_phy_register(struct avf_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct avf_asq_cmd_details *cmd_details);
+
+enum avf_status_code avf_aq_set_arp_proxy_config(struct avf_hw *hw,
+ struct avf_aqc_arp_proxy_data *proxy_config,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_aq_set_ns_proxy_table_entry(struct avf_hw *hw,
+ struct avf_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_aq_set_clear_wol_filter(struct avf_hw *hw,
+ u8 filter_index,
+ struct avf_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_aq_get_wake_event_reason(struct avf_hw *hw,
+ u16 *wake_reason,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_aq_clear_all_wol_filters(struct avf_hw *hw,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_read_phy_register_clause22(struct avf_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+enum avf_status_code avf_write_phy_register_clause22(struct avf_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+enum avf_status_code avf_read_phy_register_clause45(struct avf_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum avf_status_code avf_write_phy_register_clause45(struct avf_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+enum avf_status_code avf_read_phy_register(struct avf_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum avf_status_code avf_write_phy_register(struct avf_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+u8 avf_get_phy_address(struct avf_hw *hw, u8 dev_num);
+enum avf_status_code avf_blink_phy_link_led(struct avf_hw *hw,
+ u32 time, u32 interval);
+enum avf_status_code avf_aq_write_ddp(struct avf_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct avf_asq_cmd_details *
+ cmd_details);
+enum avf_status_code avf_aq_get_ddp_list(struct avf_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct avf_asq_cmd_details *
+ cmd_details);
+struct avf_generic_seg_header *
+avf_find_segment_in_package(u32 segment_type,
+ struct avf_package_header *pkg_header);
+struct avf_profile_section_header *
+avf_find_section_in_profile(u32 section_type,
+ struct avf_profile_segment *profile);
+enum avf_status_code
+avf_write_profile(struct avf_hw *hw, struct avf_profile_segment *avf_seg,
+ u32 track_id);
+enum avf_status_code
+avf_rollback_profile(struct avf_hw *hw, struct avf_profile_segment *avf_seg,
+ u32 track_id);
+enum avf_status_code
+avf_add_pinfo_to_list(struct avf_hw *hw,
+ struct avf_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id);
+#endif /* _AVF_PROTOTYPE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_register.h b/src/spdk/dpdk/drivers/net/avf/base/avf_register.h
new file mode 100644
index 00000000..ba5a9f3f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_register.h
@@ -0,0 +1,346 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_REGISTER_H_
+#define _AVF_REGISTER_H_
+
+
+#define AVFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
+#define AVFMSIX_PBA1_MAX_INDEX 19
+#define AVFMSIX_PBA1_PENBIT_SHIFT 0
+#define AVFMSIX_PBA1_PENBIT_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_PBA1_PENBIT_SHIFT)
+#define AVFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define AVFMSIX_TADD1_MAX_INDEX 639
+#define AVFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define AVFMSIX_TADD1_MSIXTADD10_MASK AVF_MASK(0x3, AVFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define AVFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define AVFMSIX_TADD1_MSIXTADD_MASK AVF_MASK(0x3FFFFFFF, AVFMSIX_TADD1_MSIXTADD_SHIFT)
+#define AVFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define AVFMSIX_TMSG1_MAX_INDEX 639
+#define AVFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define AVFMSIX_TMSG1_MSIXTMSG_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define AVFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define AVFMSIX_TUADD1_MAX_INDEX 639
+#define AVFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define AVFMSIX_TUADD1_MSIXTUADD_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define AVFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define AVFMSIX_TVCTRL1_MAX_INDEX 639
+#define AVFMSIX_TVCTRL1_MASK_SHIFT 0
+#define AVFMSIX_TVCTRL1_MASK_MASK AVF_MASK(0x1, AVFMSIX_TVCTRL1_MASK_SHIFT)
+#define AVF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define AVF_ARQBAH1_ARQBAH_SHIFT 0
+#define AVF_ARQBAH1_ARQBAH_MASK AVF_MASK(0xFFFFFFFF, AVF_ARQBAH1_ARQBAH_SHIFT)
+#define AVF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define AVF_ARQBAL1_ARQBAL_SHIFT 0
+#define AVF_ARQBAL1_ARQBAL_MASK AVF_MASK(0xFFFFFFFF, AVF_ARQBAL1_ARQBAL_SHIFT)
+#define AVF_ARQH1 0x00007400 /* Reset: EMPR */
+#define AVF_ARQH1_ARQH_SHIFT 0
+#define AVF_ARQH1_ARQH_MASK AVF_MASK(0x3FF, AVF_ARQH1_ARQH_SHIFT)
+#define AVF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define AVF_ARQLEN1_ARQLEN_SHIFT 0
+#define AVF_ARQLEN1_ARQLEN_MASK AVF_MASK(0x3FF, AVF_ARQLEN1_ARQLEN_SHIFT)
+#define AVF_ARQLEN1_ARQVFE_SHIFT 28
+#define AVF_ARQLEN1_ARQVFE_MASK AVF_MASK(0x1, AVF_ARQLEN1_ARQVFE_SHIFT)
+#define AVF_ARQLEN1_ARQOVFL_SHIFT 29
+#define AVF_ARQLEN1_ARQOVFL_MASK AVF_MASK(0x1, AVF_ARQLEN1_ARQOVFL_SHIFT)
+#define AVF_ARQLEN1_ARQCRIT_SHIFT 30
+#define AVF_ARQLEN1_ARQCRIT_MASK AVF_MASK(0x1, AVF_ARQLEN1_ARQCRIT_SHIFT)
+#define AVF_ARQLEN1_ARQENABLE_SHIFT 31
+#define AVF_ARQLEN1_ARQENABLE_MASK AVF_MASK(0x1, AVF_ARQLEN1_ARQENABLE_SHIFT)
+#define AVF_ARQT1 0x00007000 /* Reset: EMPR */
+#define AVF_ARQT1_ARQT_SHIFT 0
+#define AVF_ARQT1_ARQT_MASK AVF_MASK(0x3FF, AVF_ARQT1_ARQT_SHIFT)
+#define AVF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define AVF_ATQBAH1_ATQBAH_SHIFT 0
+#define AVF_ATQBAH1_ATQBAH_MASK AVF_MASK(0xFFFFFFFF, AVF_ATQBAH1_ATQBAH_SHIFT)
+#define AVF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define AVF_ATQBAL1_ATQBAL_SHIFT 0
+#define AVF_ATQBAL1_ATQBAL_MASK AVF_MASK(0xFFFFFFFF, AVF_ATQBAL1_ATQBAL_SHIFT)
+#define AVF_ATQH1 0x00006400 /* Reset: EMPR */
+#define AVF_ATQH1_ATQH_SHIFT 0
+#define AVF_ATQH1_ATQH_MASK AVF_MASK(0x3FF, AVF_ATQH1_ATQH_SHIFT)
+#define AVF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define AVF_ATQLEN1_ATQLEN_SHIFT 0
+#define AVF_ATQLEN1_ATQLEN_MASK AVF_MASK(0x3FF, AVF_ATQLEN1_ATQLEN_SHIFT)
+#define AVF_ATQLEN1_ATQVFE_SHIFT 28
+#define AVF_ATQLEN1_ATQVFE_MASK AVF_MASK(0x1, AVF_ATQLEN1_ATQVFE_SHIFT)
+#define AVF_ATQLEN1_ATQOVFL_SHIFT 29
+#define AVF_ATQLEN1_ATQOVFL_MASK AVF_MASK(0x1, AVF_ATQLEN1_ATQOVFL_SHIFT)
+#define AVF_ATQLEN1_ATQCRIT_SHIFT 30
+#define AVF_ATQLEN1_ATQCRIT_MASK AVF_MASK(0x1, AVF_ATQLEN1_ATQCRIT_SHIFT)
+#define AVF_ATQLEN1_ATQENABLE_SHIFT 31
+#define AVF_ATQLEN1_ATQENABLE_MASK AVF_MASK(0x1, AVF_ATQLEN1_ATQENABLE_SHIFT)
+#define AVF_ATQT1 0x00008400 /* Reset: EMPR */
+#define AVF_ATQT1_ATQT_SHIFT 0
+#define AVF_ATQT1_ATQT_MASK AVF_MASK(0x3FF, AVF_ATQT1_ATQT_SHIFT)
+#define AVFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define AVFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define AVFGEN_RSTAT_VFR_STATE_MASK AVF_MASK(0x3, AVFGEN_RSTAT_VFR_STATE_SHIFT)
+#define AVFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define AVFINT_DYN_CTL01_INTENA_SHIFT 0
+#define AVFINT_DYN_CTL01_INTENA_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_INTENA_SHIFT)
+#define AVFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define AVFINT_DYN_CTL01_CLEARPBA_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define AVFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define AVFINT_DYN_CTL01_SWINT_TRIG_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define AVFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define AVFINT_DYN_CTL01_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define AVFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define AVFINT_DYN_CTL01_INTERVAL_MASK AVF_MASK(0xFFF, AVFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define AVFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define AVFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define AVFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define AVFINT_DYN_CTL01_SW_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define AVFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define AVFINT_DYN_CTL01_INTENA_MSK_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define AVFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define AVFINT_DYN_CTLN1_MAX_INDEX 15
+#define AVFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define AVFINT_DYN_CTLN1_INTENA_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_INTENA_SHIFT)
+#define AVFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define AVFINT_DYN_CTLN1_CLEARPBA_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define AVFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define AVFINT_DYN_CTLN1_SWINT_TRIG_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define AVFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define AVFINT_DYN_CTLN1_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define AVFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define AVFINT_DYN_CTLN1_INTERVAL_MASK AVF_MASK(0xFFF, AVFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define AVFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define AVFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define AVFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define AVFINT_DYN_CTLN1_SW_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define AVFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define AVFINT_DYN_CTLN1_INTENA_MSK_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define AVFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define AVFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define AVFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK AVF_MASK(0x1, AVFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define AVFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define AVFINT_ICR0_ENA1_ADMINQ_MASK AVF_MASK(0x1, AVFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define AVFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define AVFINT_ICR0_ENA1_RSVD_MASK AVF_MASK(0x1, AVFINT_ICR0_ENA1_RSVD_SHIFT)
+#define AVFINT_ICR01 0x00004800 /* Reset: CORER */
+#define AVFINT_ICR01_INTEVENT_SHIFT 0
+#define AVFINT_ICR01_INTEVENT_MASK AVF_MASK(0x1, AVFINT_ICR01_INTEVENT_SHIFT)
+#define AVFINT_ICR01_QUEUE_0_SHIFT 1
+#define AVFINT_ICR01_QUEUE_0_MASK AVF_MASK(0x1, AVFINT_ICR01_QUEUE_0_SHIFT)
+#define AVFINT_ICR01_QUEUE_1_SHIFT 2
+#define AVFINT_ICR01_QUEUE_1_MASK AVF_MASK(0x1, AVFINT_ICR01_QUEUE_1_SHIFT)
+#define AVFINT_ICR01_QUEUE_2_SHIFT 3
+#define AVFINT_ICR01_QUEUE_2_MASK AVF_MASK(0x1, AVFINT_ICR01_QUEUE_2_SHIFT)
+#define AVFINT_ICR01_QUEUE_3_SHIFT 4
+#define AVFINT_ICR01_QUEUE_3_MASK AVF_MASK(0x1, AVFINT_ICR01_QUEUE_3_SHIFT)
+#define AVFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define AVFINT_ICR01_LINK_STAT_CHANGE_MASK AVF_MASK(0x1, AVFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define AVFINT_ICR01_ADMINQ_SHIFT 30
+#define AVFINT_ICR01_ADMINQ_MASK AVF_MASK(0x1, AVFINT_ICR01_ADMINQ_SHIFT)
+#define AVFINT_ICR01_SWINT_SHIFT 31
+#define AVFINT_ICR01_SWINT_MASK AVF_MASK(0x1, AVFINT_ICR01_SWINT_SHIFT)
+#define AVFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define AVFINT_ITR01_MAX_INDEX 2
+#define AVFINT_ITR01_INTERVAL_SHIFT 0
+#define AVFINT_ITR01_INTERVAL_MASK AVF_MASK(0xFFF, AVFINT_ITR01_INTERVAL_SHIFT)
+#define AVFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define AVFINT_ITRN1_MAX_INDEX 2
+#define AVFINT_ITRN1_INTERVAL_SHIFT 0
+#define AVFINT_ITRN1_INTERVAL_MASK AVF_MASK(0xFFF, AVFINT_ITRN1_INTERVAL_SHIFT)
+#define AVFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
+#define AVFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define AVFINT_STAT_CTL01_OTHER_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define AVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define AVF_QRX_TAIL1_MAX_INDEX 15
+#define AVF_QRX_TAIL1_TAIL_SHIFT 0
+#define AVF_QRX_TAIL1_TAIL_MASK AVF_MASK(0x1FFF, AVF_QRX_TAIL1_TAIL_SHIFT)
+#define AVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define AVF_QTX_TAIL1_MAX_INDEX 15
+#define AVF_QTX_TAIL1_TAIL_SHIFT 0
+#define AVF_QTX_TAIL1_TAIL_MASK AVF_MASK(0x1FFF, AVF_QTX_TAIL1_TAIL_SHIFT)
+#define AVFMSIX_PBA 0x00002000 /* Reset: VFLR */
+#define AVFMSIX_PBA_PENBIT_SHIFT 0
+#define AVFMSIX_PBA_PENBIT_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_PBA_PENBIT_SHIFT)
+#define AVFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define AVFMSIX_TADD_MAX_INDEX 16
+#define AVFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define AVFMSIX_TADD_MSIXTADD10_MASK AVF_MASK(0x3, AVFMSIX_TADD_MSIXTADD10_SHIFT)
+#define AVFMSIX_TADD_MSIXTADD_SHIFT 2
+#define AVFMSIX_TADD_MSIXTADD_MASK AVF_MASK(0x3FFFFFFF, AVFMSIX_TADD_MSIXTADD_SHIFT)
+#define AVFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define AVFMSIX_TMSG_MAX_INDEX 16
+#define AVFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define AVFMSIX_TMSG_MSIXTMSG_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define AVFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define AVFMSIX_TUADD_MAX_INDEX 16
+#define AVFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define AVFMSIX_TUADD_MSIXTUADD_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define AVFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define AVFMSIX_TVCTRL_MAX_INDEX 16
+#define AVFMSIX_TVCTRL_MASK_SHIFT 0
+#define AVFMSIX_TVCTRL_MASK_MASK AVF_MASK(0x1, AVFMSIX_TVCTRL_MASK_SHIFT)
+#define AVFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
+#define AVFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define AVFCM_PE_ERRDATA_ERROR_CODE_MASK AVF_MASK(0xF, AVFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define AVFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define AVFCM_PE_ERRDATA_Q_TYPE_MASK AVF_MASK(0x7, AVFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define AVFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define AVFCM_PE_ERRDATA_Q_NUM_MASK AVF_MASK(0x3FFFF, AVFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define AVFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
+#define AVFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define AVFCM_PE_ERRINFO_ERROR_VALID_MASK AVF_MASK(0x1, AVFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define AVFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define AVFCM_PE_ERRINFO_ERROR_INST_MASK AVF_MASK(0x7, AVFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define AVFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define AVFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK AVF_MASK(0xFF, AVFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define AVFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define AVFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK AVF_MASK(0xFF, AVFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define AVFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define AVFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK AVF_MASK(0xFF, AVFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define AVFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define AVFQF_HENA_MAX_INDEX 1
+#define AVFQF_HENA_PTYPE_ENA_SHIFT 0
+#define AVFQF_HENA_PTYPE_ENA_MASK AVF_MASK(0xFFFFFFFF, AVFQF_HENA_PTYPE_ENA_SHIFT)
+#define AVFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define AVFQF_HKEY_MAX_INDEX 12
+#define AVFQF_HKEY_KEY_0_SHIFT 0
+#define AVFQF_HKEY_KEY_0_MASK AVF_MASK(0xFF, AVFQF_HKEY_KEY_0_SHIFT)
+#define AVFQF_HKEY_KEY_1_SHIFT 8
+#define AVFQF_HKEY_KEY_1_MASK AVF_MASK(0xFF, AVFQF_HKEY_KEY_1_SHIFT)
+#define AVFQF_HKEY_KEY_2_SHIFT 16
+#define AVFQF_HKEY_KEY_2_MASK AVF_MASK(0xFF, AVFQF_HKEY_KEY_2_SHIFT)
+#define AVFQF_HKEY_KEY_3_SHIFT 24
+#define AVFQF_HKEY_KEY_3_MASK AVF_MASK(0xFF, AVFQF_HKEY_KEY_3_SHIFT)
+#define AVFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define AVFQF_HLUT_MAX_INDEX 15
+#define AVFQF_HLUT_LUT0_SHIFT 0
+#define AVFQF_HLUT_LUT0_MASK AVF_MASK(0xF, AVFQF_HLUT_LUT0_SHIFT)
+#define AVFQF_HLUT_LUT1_SHIFT 8
+#define AVFQF_HLUT_LUT1_MASK AVF_MASK(0xF, AVFQF_HLUT_LUT1_SHIFT)
+#define AVFQF_HLUT_LUT2_SHIFT 16
+#define AVFQF_HLUT_LUT2_MASK AVF_MASK(0xF, AVFQF_HLUT_LUT2_SHIFT)
+#define AVFQF_HLUT_LUT3_SHIFT 24
+#define AVFQF_HLUT_LUT3_MASK AVF_MASK(0xF, AVFQF_HLUT_LUT3_SHIFT)
+#define AVFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define AVFQF_HREGION_MAX_INDEX 7
+#define AVFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define AVFQF_HREGION_OVERRIDE_ENA_0_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define AVFQF_HREGION_REGION_0_SHIFT 1
+#define AVFQF_HREGION_REGION_0_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_0_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define AVFQF_HREGION_OVERRIDE_ENA_1_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define AVFQF_HREGION_REGION_1_SHIFT 5
+#define AVFQF_HREGION_REGION_1_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_1_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define AVFQF_HREGION_OVERRIDE_ENA_2_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define AVFQF_HREGION_REGION_2_SHIFT 9
+#define AVFQF_HREGION_REGION_2_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_2_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define AVFQF_HREGION_OVERRIDE_ENA_3_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define AVFQF_HREGION_REGION_3_SHIFT 13
+#define AVFQF_HREGION_REGION_3_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_3_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define AVFQF_HREGION_OVERRIDE_ENA_4_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define AVFQF_HREGION_REGION_4_SHIFT 17
+#define AVFQF_HREGION_REGION_4_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_4_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define AVFQF_HREGION_OVERRIDE_ENA_5_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define AVFQF_HREGION_REGION_5_SHIFT 21
+#define AVFQF_HREGION_REGION_5_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_5_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define AVFQF_HREGION_OVERRIDE_ENA_6_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define AVFQF_HREGION_REGION_6_SHIFT 25
+#define AVFQF_HREGION_REGION_6_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_6_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define AVFQF_HREGION_OVERRIDE_ENA_7_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define AVFQF_HREGION_REGION_7_SHIFT 29
+#define AVFQF_HREGION_REGION_7_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_7_SHIFT)
+
+#define AVFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define AVFINT_DYN_CTL01_WB_ON_ITR_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define AVFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define AVFINT_DYN_CTLN1_WB_ON_ITR_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define AVFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define AVFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define AVFPE_AEQALLOC1_AECOUNT_MASK AVF_MASK(0xFFFFFFFF, AVFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define AVFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define AVFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define AVFPE_CCQPHIGH1_PECCQPHIGH_MASK AVF_MASK(0xFFFFFFFF, AVFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define AVFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define AVFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define AVFPE_CCQPLOW1_PECCQPLOW_MASK AVF_MASK(0xFFFFFFFF, AVFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define AVFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define AVFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define AVFPE_CCQPSTATUS1_CCQP_DONE_MASK AVF_MASK(0x1, AVFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define AVFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define AVFPE_CCQPSTATUS1_HMC_PROFILE_MASK AVF_MASK(0x7, AVFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define AVFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define AVFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK AVF_MASK(0x3F, AVFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define AVFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define AVFPE_CCQPSTATUS1_CCQP_ERR_MASK AVF_MASK(0x1, AVFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define AVFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define AVFPE_CQACK1_PECQID_SHIFT 0
+#define AVFPE_CQACK1_PECQID_MASK AVF_MASK(0x1FFFF, AVFPE_CQACK1_PECQID_SHIFT)
+#define AVFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define AVFPE_CQARM1_PECQID_SHIFT 0
+#define AVFPE_CQARM1_PECQID_MASK AVF_MASK(0x1FFFF, AVFPE_CQARM1_PECQID_SHIFT)
+#define AVFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define AVFPE_CQPDB1_WQHEAD_SHIFT 0
+#define AVFPE_CQPDB1_WQHEAD_MASK AVF_MASK(0x7FF, AVFPE_CQPDB1_WQHEAD_SHIFT)
+#define AVFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define AVFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define AVFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK AVF_MASK(0xFFFF, AVFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define AVFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define AVFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK AVF_MASK(0xFFFF, AVFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define AVFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define AVFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define AVFPE_CQPTAIL1_WQTAIL_MASK AVF_MASK(0x7FF, AVFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define AVFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define AVFPE_CQPTAIL1_CQP_OP_ERR_MASK AVF_MASK(0x1, AVFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define AVFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define AVFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define AVFPE_IPCONFIG01_PEIPID_MASK AVF_MASK(0xFFFF, AVFPE_IPCONFIG01_PEIPID_SHIFT)
+#define AVFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define AVFPE_IPCONFIG01_USEENTIREIDRANGE_MASK AVF_MASK(0x1, AVFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define AVFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define AVFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define AVFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK AVF_MASK(0x1F, AVFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define AVFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define AVFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define AVFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK AVF_MASK(0xFFFFFF, AVFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define AVFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define AVFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define AVFPE_TCPNOWTIMER1_TCP_NOW_MASK AVF_MASK(0xFFFFFFFF, AVFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define AVFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define AVFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define AVFPE_WQEALLOC1_PEQPID_MASK AVF_MASK(0x3FFFF, AVFPE_WQEALLOC1_PEQPID_SHIFT)
+#define AVFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define AVFPE_WQEALLOC1_WQE_DESC_INDEX_MASK AVF_MASK(0xFFF, AVFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+
+#endif /* _AVF_REGISTER_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_status.h b/src/spdk/dpdk/drivers/net/avf/base/avf_status.h
new file mode 100644
index 00000000..e8a673bd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_status.h
@@ -0,0 +1,108 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_STATUS_H_
+#define _AVF_STATUS_H_
+
+/* Error Codes */
+enum avf_status_code {
+ AVF_SUCCESS = 0,
+ AVF_ERR_NVM = -1,
+ AVF_ERR_NVM_CHECKSUM = -2,
+ AVF_ERR_PHY = -3,
+ AVF_ERR_CONFIG = -4,
+ AVF_ERR_PARAM = -5,
+ AVF_ERR_MAC_TYPE = -6,
+ AVF_ERR_UNKNOWN_PHY = -7,
+ AVF_ERR_LINK_SETUP = -8,
+ AVF_ERR_ADAPTER_STOPPED = -9,
+ AVF_ERR_INVALID_MAC_ADDR = -10,
+ AVF_ERR_DEVICE_NOT_SUPPORTED = -11,
+ AVF_ERR_MASTER_REQUESTS_PENDING = -12,
+ AVF_ERR_INVALID_LINK_SETTINGS = -13,
+ AVF_ERR_AUTONEG_NOT_COMPLETE = -14,
+ AVF_ERR_RESET_FAILED = -15,
+ AVF_ERR_SWFW_SYNC = -16,
+ AVF_ERR_NO_AVAILABLE_VSI = -17,
+ AVF_ERR_NO_MEMORY = -18,
+ AVF_ERR_BAD_PTR = -19,
+ AVF_ERR_RING_FULL = -20,
+ AVF_ERR_INVALID_PD_ID = -21,
+ AVF_ERR_INVALID_QP_ID = -22,
+ AVF_ERR_INVALID_CQ_ID = -23,
+ AVF_ERR_INVALID_CEQ_ID = -24,
+ AVF_ERR_INVALID_AEQ_ID = -25,
+ AVF_ERR_INVALID_SIZE = -26,
+ AVF_ERR_INVALID_ARP_INDEX = -27,
+ AVF_ERR_INVALID_FPM_FUNC_ID = -28,
+ AVF_ERR_QP_INVALID_MSG_SIZE = -29,
+ AVF_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ AVF_ERR_INVALID_FRAG_COUNT = -31,
+ AVF_ERR_QUEUE_EMPTY = -32,
+ AVF_ERR_INVALID_ALIGNMENT = -33,
+ AVF_ERR_FLUSHED_QUEUE = -34,
+ AVF_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ AVF_ERR_INVALID_IMM_DATA_SIZE = -36,
+ AVF_ERR_TIMEOUT = -37,
+ AVF_ERR_OPCODE_MISMATCH = -38,
+ AVF_ERR_CQP_COMPL_ERROR = -39,
+ AVF_ERR_INVALID_VF_ID = -40,
+ AVF_ERR_INVALID_HMCFN_ID = -41,
+ AVF_ERR_BACKING_PAGE_ERROR = -42,
+ AVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ AVF_ERR_INVALID_PBLE_INDEX = -44,
+ AVF_ERR_INVALID_SD_INDEX = -45,
+ AVF_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ AVF_ERR_INVALID_SD_TYPE = -47,
+ AVF_ERR_MEMCPY_FAILED = -48,
+ AVF_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ AVF_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ AVF_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ AVF_ERR_SRQ_ENABLED = -52,
+ AVF_ERR_ADMIN_QUEUE_ERROR = -53,
+ AVF_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ AVF_ERR_BUF_TOO_SHORT = -55,
+ AVF_ERR_ADMIN_QUEUE_FULL = -56,
+ AVF_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ AVF_ERR_BAD_IWARP_CQE = -58,
+ AVF_ERR_NVM_BLANK_MODE = -59,
+ AVF_ERR_NOT_IMPLEMENTED = -60,
+ AVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ AVF_ERR_DIAG_TEST_FAILED = -62,
+ AVF_ERR_NOT_READY = -63,
+ AVF_NOT_SUPPORTED = -64,
+ AVF_ERR_FIRMWARE_API_VERSION = -65,
+ AVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
+};
+
+#endif /* _AVF_STATUS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/base/avf_type.h b/src/spdk/dpdk/drivers/net/avf/base/avf_type.h
new file mode 100644
index 00000000..546c6d2a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/avf_type.h
@@ -0,0 +1,2024 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_TYPE_H_
+#define _AVF_TYPE_H_
+
+#include "avf_status.h"
+#include "avf_osdep.h"
+#include "avf_register.h"
+#include "avf_adminq.h"
+#include "avf_hmc.h"
+#include "avf_lan_hmc.h"
+#include "avf_devids.h"
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_1PARAMETER(_p) (_p);
+#define UNREFERENCED_2PARAMETER(_p, _q) (_p); (_q);
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) (_p); (_q); (_r);
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) (_p); (_q); (_r); (_s);
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) (_p); (_q); (_r); (_s); (_t);
+
+#ifndef LINUX_MACROS
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+#endif /* LINUX_MACROS */
+
+#ifndef AVF_MASK
+/* AVF_MASK is a macro used on 32 bit registers */
+#define AVF_MASK(mask, shift) (mask << shift)
+#endif
+
+#define AVF_MAX_PF 16
+#define AVF_MAX_PF_VSI 64
+#define AVF_MAX_PF_QP 128
+#define AVF_MAX_VSI_QP 16
+#define AVF_MAX_VF_VSI 3
+#define AVF_MAX_CHAINED_RX_BUFFERS 5
+#define AVF_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* something less than 1 minute */
+#define AVF_HEARTBEAT_TIMEOUT (HZ * 50)
+
+/* Max default timeout in ms, */
+#define AVF_MAX_NVM_TIMEOUT 18000
+
+/* Max timeout in ms for the phy to respond */
+#define AVF_MAX_PHY_TIMEOUT 500
+
+/* Check whether address is multicast. */
+#define AVF_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define AVF_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define AVF_MS_TO_GTIME(time) ((time) * 1000)
+
+/* forward declaration */
+struct avf_hw;
+typedef void (*AVF_ADMINQ_CALLBACK)(struct avf_hw *, struct avf_aq_desc *);
+
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
+/* Data type manipulation macros. */
+#define AVF_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define AVF_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+
+#define AVF_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define AVF_LO_WORD(x) ((u16)((x) & 0xFFFF))
+
+#define AVF_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define AVF_LO_BYTE(x) ((u8)((x) & 0xFF))
+
+/* Number of Transmit Descriptors must be a multiple of 8. */
+#define AVF_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Receive Descriptors must be a multiple of 32 if
+ * the number of descriptors is greater than 32.
+ */
+#define AVF_REQ_RX_DESCRIPTOR_MULTIPLE 32
+
+#define AVF_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define AVF_QTX_CTL_VF_QUEUE 0x0
+#define AVF_QTX_CTL_VM_QUEUE 0x1
+#define AVF_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum avf_debug_mask {
+ AVF_DEBUG_INIT = 0x00000001,
+ AVF_DEBUG_RELEASE = 0x00000002,
+
+ AVF_DEBUG_LINK = 0x00000010,
+ AVF_DEBUG_PHY = 0x00000020,
+ AVF_DEBUG_HMC = 0x00000040,
+ AVF_DEBUG_NVM = 0x00000080,
+ AVF_DEBUG_LAN = 0x00000100,
+ AVF_DEBUG_FLOW = 0x00000200,
+ AVF_DEBUG_DCB = 0x00000400,
+ AVF_DEBUG_DIAG = 0x00000800,
+ AVF_DEBUG_FD = 0x00001000,
+ AVF_DEBUG_PACKAGE = 0x00002000,
+
+ AVF_DEBUG_AQ_MESSAGE = 0x01000000,
+ AVF_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ AVF_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ AVF_DEBUG_AQ_COMMAND = 0x06000000,
+ AVF_DEBUG_AQ = 0x0F000000,
+
+ AVF_DEBUG_USER = 0xF0000000,
+
+ AVF_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define AVF_PCI_LINK_STATUS 0xB2
+#define AVF_PCI_LINK_WIDTH 0x3F0
+#define AVF_PCI_LINK_WIDTH_1 0x10
+#define AVF_PCI_LINK_WIDTH_2 0x20
+#define AVF_PCI_LINK_WIDTH_4 0x40
+#define AVF_PCI_LINK_WIDTH_8 0x80
+#define AVF_PCI_LINK_SPEED 0xF
+#define AVF_PCI_LINK_SPEED_2500 0x1
+#define AVF_PCI_LINK_SPEED_5000 0x2
+#define AVF_PCI_LINK_SPEED_8000 0x3
+
+#define AVF_MDIO_CLAUSE22_STCODE_MASK AVF_MASK(1, \
+ AVF_GLGEN_MSCA_STCODE_SHIFT)
+#define AVF_MDIO_CLAUSE22_OPCODE_WRITE_MASK AVF_MASK(1, \
+ AVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define AVF_MDIO_CLAUSE22_OPCODE_READ_MASK AVF_MASK(2, \
+ AVF_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define AVF_MDIO_CLAUSE45_STCODE_MASK AVF_MASK(0, \
+ AVF_GLGEN_MSCA_STCODE_SHIFT)
+#define AVF_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK AVF_MASK(0, \
+ AVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define AVF_MDIO_CLAUSE45_OPCODE_WRITE_MASK AVF_MASK(1, \
+ AVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define AVF_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK AVF_MASK(2, \
+ AVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define AVF_MDIO_CLAUSE45_OPCODE_READ_MASK AVF_MASK(3, \
+ AVF_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define AVF_PHY_COM_REG_PAGE 0x1E
+#define AVF_PHY_LED_LINK_MODE_MASK 0xF0
+#define AVF_PHY_LED_MANUAL_ON 0x100
+#define AVF_PHY_LED_PROV_REG_1 0xC430
+#define AVF_PHY_LED_MODE_MASK 0xFFFF
+#define AVF_PHY_LED_MODE_ORIG 0x80000000
+
+/* Memory types */
+enum avf_memset_type {
+ AVF_NONDMA_MEM = 0,
+ AVF_DMA_MEM
+};
+
+/* Memcpy types */
+enum avf_memcpy_type {
+ AVF_NONDMA_TO_NONDMA = 0,
+ AVF_NONDMA_TO_DMA,
+ AVF_DMA_TO_DMA,
+ AVF_DMA_TO_NONDMA
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum avf_mac_type {
+ AVF_MAC_UNKNOWN = 0,
+ AVF_MAC_XL710,
+ AVF_MAC_VF,
+ AVF_MAC_X722,
+ AVF_MAC_X722_VF,
+ AVF_MAC_GENERIC,
+};
+
+enum avf_media_type {
+ AVF_MEDIA_TYPE_UNKNOWN = 0,
+ AVF_MEDIA_TYPE_FIBER,
+ AVF_MEDIA_TYPE_BASET,
+ AVF_MEDIA_TYPE_BACKPLANE,
+ AVF_MEDIA_TYPE_CX4,
+ AVF_MEDIA_TYPE_DA,
+ AVF_MEDIA_TYPE_VIRTUAL
+};
+
+enum avf_fc_mode {
+ AVF_FC_NONE = 0,
+ AVF_FC_RX_PAUSE,
+ AVF_FC_TX_PAUSE,
+ AVF_FC_FULL,
+ AVF_FC_PFC,
+ AVF_FC_DEFAULT
+};
+
+enum avf_set_fc_aq_failures {
+ AVF_SET_FC_AQ_FAIL_NONE = 0,
+ AVF_SET_FC_AQ_FAIL_GET = 1,
+ AVF_SET_FC_AQ_FAIL_SET = 2,
+ AVF_SET_FC_AQ_FAIL_UPDATE = 4,
+ AVF_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
+enum avf_vsi_type {
+ AVF_VSI_MAIN = 0,
+ AVF_VSI_VMDQ1 = 1,
+ AVF_VSI_VMDQ2 = 2,
+ AVF_VSI_CTRL = 3,
+ AVF_VSI_FCOE = 4,
+ AVF_VSI_MIRROR = 5,
+ AVF_VSI_SRIOV = 6,
+ AVF_VSI_FDIR = 7,
+ AVF_VSI_TYPE_UNKNOWN
+};
+
+enum avf_queue_type {
+ AVF_QUEUE_TYPE_RX = 0,
+ AVF_QUEUE_TYPE_TX,
+ AVF_QUEUE_TYPE_PE_CEQ,
+ AVF_QUEUE_TYPE_UNKNOWN
+};
+
+struct avf_link_status {
+ enum avf_aq_phy_type phy_type;
+ enum avf_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 req_fec_info;
+ u8 fec_info;
+ u8 ext_info;
+ u8 loopback;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
+ u8 requested_speeds;
+ u8 module_type[3];
+ /* 1st byte: module identifier */
+#define AVF_MODULE_TYPE_SFP 0x03
+#define AVF_MODULE_TYPE_QSFP 0x0D
+ /* 2nd byte: ethernet compliance codes for 10/40G */
+#define AVF_MODULE_TYPE_40G_ACTIVE 0x01
+#define AVF_MODULE_TYPE_40G_LR4 0x02
+#define AVF_MODULE_TYPE_40G_SR4 0x04
+#define AVF_MODULE_TYPE_40G_CR4 0x08
+#define AVF_MODULE_TYPE_10G_BASE_SR 0x10
+#define AVF_MODULE_TYPE_10G_BASE_LR 0x20
+#define AVF_MODULE_TYPE_10G_BASE_LRM 0x40
+#define AVF_MODULE_TYPE_10G_BASE_ER 0x80
+ /* 3rd byte: ethernet compliance codes for 1G */
+#define AVF_MODULE_TYPE_1000BASE_SX 0x01
+#define AVF_MODULE_TYPE_1000BASE_LX 0x02
+#define AVF_MODULE_TYPE_1000BASE_CX 0x04
+#define AVF_MODULE_TYPE_1000BASE_T 0x08
+};
+
+struct avf_phy_info {
+ struct avf_link_status link_info;
+ struct avf_link_status link_info_old;
+ bool get_link_info;
+ enum avf_media_type media_type;
+ /* all the phy types the NVM is capable of */
+ u64 phy_types;
+};
+
+#define AVF_CAP_PHY_TYPE_SGMII BIT_ULL(AVF_PHY_TYPE_SGMII)
+#define AVF_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(AVF_PHY_TYPE_1000BASE_KX)
+#define AVF_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(AVF_PHY_TYPE_10GBASE_KX4)
+#define AVF_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(AVF_PHY_TYPE_10GBASE_KR)
+#define AVF_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(AVF_PHY_TYPE_40GBASE_KR4)
+#define AVF_CAP_PHY_TYPE_XAUI BIT_ULL(AVF_PHY_TYPE_XAUI)
+#define AVF_CAP_PHY_TYPE_XFI BIT_ULL(AVF_PHY_TYPE_XFI)
+#define AVF_CAP_PHY_TYPE_SFI BIT_ULL(AVF_PHY_TYPE_SFI)
+#define AVF_CAP_PHY_TYPE_XLAUI BIT_ULL(AVF_PHY_TYPE_XLAUI)
+#define AVF_CAP_PHY_TYPE_XLPPI BIT_ULL(AVF_PHY_TYPE_XLPPI)
+#define AVF_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(AVF_PHY_TYPE_40GBASE_CR4_CU)
+#define AVF_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(AVF_PHY_TYPE_10GBASE_CR1_CU)
+#define AVF_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(AVF_PHY_TYPE_10GBASE_AOC)
+#define AVF_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(AVF_PHY_TYPE_40GBASE_AOC)
+#define AVF_CAP_PHY_TYPE_100BASE_TX BIT_ULL(AVF_PHY_TYPE_100BASE_TX)
+#define AVF_CAP_PHY_TYPE_1000BASE_T BIT_ULL(AVF_PHY_TYPE_1000BASE_T)
+#define AVF_CAP_PHY_TYPE_10GBASE_T BIT_ULL(AVF_PHY_TYPE_10GBASE_T)
+#define AVF_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(AVF_PHY_TYPE_10GBASE_SR)
+#define AVF_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(AVF_PHY_TYPE_10GBASE_LR)
+#define AVF_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(AVF_PHY_TYPE_10GBASE_SFPP_CU)
+#define AVF_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(AVF_PHY_TYPE_10GBASE_CR1)
+#define AVF_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(AVF_PHY_TYPE_40GBASE_CR4)
+#define AVF_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(AVF_PHY_TYPE_40GBASE_SR4)
+#define AVF_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(AVF_PHY_TYPE_40GBASE_LR4)
+#define AVF_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(AVF_PHY_TYPE_1000BASE_SX)
+#define AVF_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(AVF_PHY_TYPE_1000BASE_LX)
+#define AVF_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(AVF_PHY_TYPE_1000BASE_T_OPTICAL)
+#define AVF_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(AVF_PHY_TYPE_20GBASE_KR2)
+/*
+ * Defining the macro AVF_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the AVF_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the avf_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are AVF_PHY_TYPE_25GBASE_*.
+ */
+#define AVF_PHY_TYPE_OFFSET 1
+#define AVF_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(AVF_PHY_TYPE_25GBASE_KR + \
+ AVF_PHY_TYPE_OFFSET)
+#define AVF_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(AVF_PHY_TYPE_25GBASE_CR + \
+ AVF_PHY_TYPE_OFFSET)
+#define AVF_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(AVF_PHY_TYPE_25GBASE_SR + \
+ AVF_PHY_TYPE_OFFSET)
+#define AVF_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(AVF_PHY_TYPE_25GBASE_LR + \
+ AVF_PHY_TYPE_OFFSET)
+#define AVF_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(AVF_PHY_TYPE_25GBASE_AOC + \
+ AVF_PHY_TYPE_OFFSET)
+#define AVF_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(AVF_PHY_TYPE_25GBASE_ACC + \
+ AVF_PHY_TYPE_OFFSET)
+#define AVF_HW_CAP_MAX_GPIO 30
+#define AVF_HW_CAP_MDIO_PORT_MODE_MDIO 0
+#define AVF_HW_CAP_MDIO_PORT_MODE_I2C 1
+
+enum avf_acpi_programming_method {
+ AVF_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
+ AVF_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
+};
+
+#define AVF_WOL_SUPPORT_MASK 0x1
+#define AVF_ACPI_PROGRAMMING_METHOD_MASK 0x2
+#define AVF_PROXY_SUPPORT_MASK 0x4
+
+/* Capabilities of a PF or a VF or the whole device */
+struct avf_hw_capabilities {
+ u32 switch_mode;
+#define AVF_NVM_IMAGE_TYPE_EVB 0x0
+#define AVF_NVM_IMAGE_TYPE_CLOUD 0x2
+#define AVF_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 mng_protocols_over_mctp;
+#define AVF_MNG_PROTOCOL_PLDM 0x2
+#define AVF_MNG_PROTOCOL_OEM_COMMANDS 0x4
+#define AVF_MNG_PROTOCOL_NCSI 0x8
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool iscsi; /* Indicates iSCSI enabled */
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define AVF_FLEX10_MODE_UNKNOWN 0x0
+#define AVF_FLEX10_MODE_DCC 0x1
+#define AVF_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define AVF_FLEX10_STATUS_DCC_ERROR 0x1
+#define AVF_FLEX10_STATUS_VC_MODE 0x2
+
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define AVF_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define AVF_NVM_MGMT_UPDATE_DISABLED 0x2
+
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[AVF_HW_CAP_MAX_GPIO];
+ bool sdp[AVF_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+ u64 wr_csr_prot;
+ bool apm_wol_support;
+ enum avf_acpi_programming_method acpi_prog_method;
+ bool proxy_support;
+};
+
+struct avf_mac_info {
+ enum avf_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+enum avf_aq_resources_ids {
+ AVF_NVM_RESOURCE_ID = 1
+};
+
+enum avf_aq_resource_access_type {
+ AVF_RESOURCE_READ = 1,
+ AVF_RESOURCE_WRITE
+};
+
+struct avf_nvm_info {
+ u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
+};
+
+/* definitions used in NVM update support */
+
+enum avf_nvmupd_cmd {
+ AVF_NVMUPD_INVALID,
+ AVF_NVMUPD_READ_CON,
+ AVF_NVMUPD_READ_SNT,
+ AVF_NVMUPD_READ_LCB,
+ AVF_NVMUPD_READ_SA,
+ AVF_NVMUPD_WRITE_ERA,
+ AVF_NVMUPD_WRITE_CON,
+ AVF_NVMUPD_WRITE_SNT,
+ AVF_NVMUPD_WRITE_LCB,
+ AVF_NVMUPD_WRITE_SA,
+ AVF_NVMUPD_CSUM_CON,
+ AVF_NVMUPD_CSUM_SA,
+ AVF_NVMUPD_CSUM_LCB,
+ AVF_NVMUPD_STATUS,
+ AVF_NVMUPD_EXEC_AQ,
+ AVF_NVMUPD_GET_AQ_RESULT,
+ AVF_NVMUPD_GET_AQ_EVENT,
+};
+
+enum avf_nvmupd_state {
+ AVF_NVMUPD_STATE_INIT,
+ AVF_NVMUPD_STATE_READING,
+ AVF_NVMUPD_STATE_WRITING,
+ AVF_NVMUPD_STATE_INIT_WAIT,
+ AVF_NVMUPD_STATE_WRITE_WAIT,
+ AVF_NVMUPD_STATE_ERROR
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code. Where is the right file?
+ */
+#define AVF_NVM_READ 0xB
+#define AVF_NVM_WRITE 0xC
+
+#define AVF_NVM_MOD_PNT_MASK 0xFF
+
+#define AVF_NVM_TRANS_SHIFT 8
+#define AVF_NVM_TRANS_MASK (0xf << AVF_NVM_TRANS_SHIFT)
+#define AVF_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define AVF_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << AVF_NVM_PRESERVATION_FLAGS_SHIFT)
+#define AVF_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define AVF_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define AVF_NVM_CON 0x0
+#define AVF_NVM_SNT 0x1
+#define AVF_NVM_LCB 0x2
+#define AVF_NVM_SA (AVF_NVM_SNT | AVF_NVM_LCB)
+#define AVF_NVM_ERA 0x4
+#define AVF_NVM_CSUM 0x8
+#define AVF_NVM_AQE 0xe
+#define AVF_NVM_EXEC 0xf
+
+#define AVF_NVM_ADAPT_SHIFT 16
+#define AVF_NVM_ADAPT_MASK (0xffffULL << AVF_NVM_ADAPT_SHIFT)
+
+#define AVF_NVMUPD_MAX_DATA 4096
+#define AVF_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct avf_nvm_access {
+ u32 command;
+ u32 config;
+ u32 offset; /* in bytes */
+ u32 data_size; /* in bytes */
+ u8 data[1];
+};
+
+/* (Q)SFP module access definitions */
+#define AVF_I2C_EEPROM_DEV_ADDR 0xA0
+#define AVF_I2C_EEPROM_DEV_ADDR2 0xA2
+#define AVF_MODULE_TYPE_ADDR 0x00
+#define AVF_MODULE_REVISION_ADDR 0x01
+#define AVF_MODULE_SFF_8472_COMP 0x5E
+#define AVF_MODULE_SFF_8472_SWAP 0x5C
+#define AVF_MODULE_SFF_ADDR_MODE 0x04
+#define AVF_MODULE_SFF_DIAG_CAPAB 0x40
+#define AVF_MODULE_TYPE_QSFP_PLUS 0x0D
+#define AVF_MODULE_TYPE_QSFP28 0x11
+#define AVF_MODULE_QSFP_MAX_LEN 640
+
+/* PCI bus types */
+enum avf_bus_type {
+ avf_bus_type_unknown = 0,
+ avf_bus_type_pci,
+ avf_bus_type_pcix,
+ avf_bus_type_pci_express,
+ avf_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum avf_bus_speed {
+ avf_bus_speed_unknown = 0,
+ avf_bus_speed_33 = 33,
+ avf_bus_speed_66 = 66,
+ avf_bus_speed_100 = 100,
+ avf_bus_speed_120 = 120,
+ avf_bus_speed_133 = 133,
+ avf_bus_speed_2500 = 2500,
+ avf_bus_speed_5000 = 5000,
+ avf_bus_speed_8000 = 8000,
+ avf_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum avf_bus_width {
+ avf_bus_width_unknown = 0,
+ avf_bus_width_pcie_x1 = 1,
+ avf_bus_width_pcie_x2 = 2,
+ avf_bus_width_pcie_x4 = 4,
+ avf_bus_width_pcie_x8 = 8,
+ avf_bus_width_32 = 32,
+ avf_bus_width_64 = 64,
+ avf_bus_width_reserved
+};
+
+/* Bus parameters */
+struct avf_bus_info {
+ enum avf_bus_speed speed;
+ enum avf_bus_width width;
+ enum avf_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+ u16 bus_id;
+};
+
+/* Flow control (FC) parameters */
+struct avf_fc_info {
+ enum avf_fc_mode current_mode; /* FC mode in effect */
+ enum avf_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define AVF_MAX_TRAFFIC_CLASS 8
+#define AVF_MAX_USER_PRIORITY 8
+#define AVF_DCBX_MAX_APPS 32
+#define AVF_LLDPDU_SIZE 1500
+#define AVF_TLV_STATUS_OPER 0x1
+#define AVF_TLV_STATUS_SYNC 0x2
+#define AVF_TLV_STATUS_ERR 0x4
+#define AVF_CEE_OPER_MAX_APPS 3
+#define AVF_APP_PROTOID_FCOE 0x8906
+#define AVF_APP_PROTOID_ISCSI 0x0cbc
+#define AVF_APP_PROTOID_FIP 0x8914
+#define AVF_APP_SEL_ETHTYPE 0x1
+#define AVF_APP_SEL_TCPIP 0x2
+#define AVF_CEE_APP_SEL_ETHTYPE 0x0
+#define AVF_CEE_APP_SEL_TCPIP 0x1
+
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct avf_dcb_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[AVF_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[AVF_MAX_TRAFFIC_CLASS];
+ u8 tsatable[AVF_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct avf_dcb_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct avf_dcb_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct avf_dcbx_config {
+ u8 dcbx_mode;
+#define AVF_DCBX_MODE_CEE 0x1
+#define AVF_DCBX_MODE_IEEE 0x2
+ u8 app_mode;
+#define AVF_DCBX_APPS_NON_WILLING 0x1
+ u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
+ struct avf_dcb_ets_config etscfg;
+ struct avf_dcb_ets_config etsrec;
+ struct avf_dcb_pfc_config pfc;
+ struct avf_dcb_app_priority_table app[AVF_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct avf_hw {
+ u8 *hw_addr;
+ void *back;
+
+ /* subsystem structs */
+ struct avf_phy_info phy;
+ struct avf_mac_info mac;
+ struct avf_bus_info bus;
+ struct avf_nvm_info nvm;
+ struct avf_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct avf_hw_capabilities dev_caps;
+ struct avf_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* for multi-function MACs */
+ u16 partition_id;
+ u16 num_partitions;
+ u16 num_ports;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct avf_adminq_info aq;
+
+ /* state of nvm update process */
+ enum avf_nvmupd_state nvmupd_state;
+ struct avf_aq_desc nvm_wb_desc;
+ struct avf_aq_desc nvm_aq_event_desc;
+ struct avf_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
+
+ /* HMC info */
+ struct avf_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct avf_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+ struct avf_dcbx_config remote_dcbx_config; /* Peer Cfg */
+ struct avf_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+
+ /* WoL and proxy support */
+ u16 num_wol_proxy_filters;
+ u16 wol_proxy_vsi_seid;
+
+#define AVF_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define AVF_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define AVF_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define AVF_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+ u64 flags;
+
+ /* Used in set switch config AQ command */
+ u16 switch_tag;
+ u16 first_tag;
+ u16 second_tag;
+
+ /* debug mask */
+ u32 debug_mask;
+ char err_str[16];
+};
+
+STATIC INLINE bool avf_is_vf(struct avf_hw *hw)
+{
+ return (hw->mac.type == AVF_MAC_VF ||
+ hw->mac.type == AVF_MAC_X722_VF);
+}
+
+struct avf_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union avf_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union avf_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define AVF_RXD_QW0_MIRROR_STATUS_SHIFT 8
+#define AVF_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \
+ AVF_RXD_QW0_MIRROR_STATUS_SHIFT)
+#define AVF_RXD_QW0_FCOEINDX_SHIFT 0
+#define AVF_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \
+ AVF_RXD_QW0_FCOEINDX_SHIFT)
+
+enum avf_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_RX_DESC_STATUS_DD_SHIFT = 0,
+ AVF_RX_DESC_STATUS_EOF_SHIFT = 1,
+ AVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ AVF_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ AVF_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ AVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ AVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ AVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
+
+ AVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ AVF_RX_DESC_STATUS_FLM_SHIFT = 11,
+ AVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ AVF_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ AVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ AVF_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
+ AVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
+ AVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define AVF_RXD_QW1_STATUS_SHIFT 0
+#define AVF_RXD_QW1_STATUS_MASK ((BIT(AVF_RX_DESC_STATUS_LAST) - 1) << \
+ AVF_RXD_QW1_STATUS_SHIFT)
+
+#define AVF_RXD_QW1_STATUS_TSYNINDX_SHIFT AVF_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define AVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ AVF_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define AVF_RXD_QW1_STATUS_TSYNVALID_SHIFT AVF_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define AVF_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(AVF_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+#define AVF_RXD_QW1_STATUS_UMBCAST_SHIFT AVF_RX_DESC_STATUS_UMBCAST
+#define AVF_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
+ AVF_RXD_QW1_STATUS_UMBCAST_SHIFT)
+
+enum avf_rx_desc_fltstat_values {
+ AVF_RX_DESC_FLTSTAT_NO_DATA = 0,
+ AVF_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ AVF_RX_DESC_FLTSTAT_RSV = 2,
+ AVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define AVF_RXD_PACKET_TYPE_UNICAST 0
+#define AVF_RXD_PACKET_TYPE_MULTICAST 1
+#define AVF_RXD_PACKET_TYPE_BROADCAST 2
+#define AVF_RXD_PACKET_TYPE_MIRRORED 3
+
+#define AVF_RXD_QW1_ERROR_SHIFT 19
+#define AVF_RXD_QW1_ERROR_MASK (0xFFUL << AVF_RXD_QW1_ERROR_SHIFT)
+
+enum avf_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_RX_DESC_ERROR_RXE_SHIFT = 0,
+ AVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ AVF_RX_DESC_ERROR_HBO_SHIFT = 2,
+ AVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ AVF_RX_DESC_ERROR_IPE_SHIFT = 3,
+ AVF_RX_DESC_ERROR_L4E_SHIFT = 4,
+ AVF_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ AVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ AVF_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum avf_rx_desc_error_l3l4e_fcoe_masks {
+ AVF_RX_DESC_ERROR_L3L4E_NONE = 0,
+ AVF_RX_DESC_ERROR_L3L4E_PROT = 1,
+ AVF_RX_DESC_ERROR_L3L4E_FC = 2,
+ AVF_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ AVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define AVF_RXD_QW1_PTYPE_SHIFT 30
+#define AVF_RXD_QW1_PTYPE_MASK (0xFFULL << AVF_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum avf_rx_l2_ptype {
+ AVF_RX_PTYPE_L2_RESERVED = 0,
+ AVF_RX_PTYPE_L2_MAC_PAY2 = 1,
+ AVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ AVF_RX_PTYPE_L2_FIP_PAY2 = 3,
+ AVF_RX_PTYPE_L2_OUI_PAY2 = 4,
+ AVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ AVF_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ AVF_RX_PTYPE_L2_ECP_PAY2 = 7,
+ AVF_RX_PTYPE_L2_EVB_PAY2 = 8,
+ AVF_RX_PTYPE_L2_QCN_PAY2 = 9,
+ AVF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ AVF_RX_PTYPE_L2_ARP = 11,
+ AVF_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ AVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ AVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ AVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ AVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ AVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ AVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ AVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ AVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ AVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ AVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ AVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ AVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ AVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct avf_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum avf_rx_ptype_outer_ip {
+ AVF_RX_PTYPE_OUTER_L2 = 0,
+ AVF_RX_PTYPE_OUTER_IP = 1
+};
+
+enum avf_rx_ptype_outer_ip_ver {
+ AVF_RX_PTYPE_OUTER_NONE = 0,
+ AVF_RX_PTYPE_OUTER_IPV4 = 0,
+ AVF_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum avf_rx_ptype_outer_fragmented {
+ AVF_RX_PTYPE_NOT_FRAG = 0,
+ AVF_RX_PTYPE_FRAG = 1
+};
+
+enum avf_rx_ptype_tunnel_type {
+ AVF_RX_PTYPE_TUNNEL_NONE = 0,
+ AVF_RX_PTYPE_TUNNEL_IP_IP = 1,
+ AVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ AVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ AVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum avf_rx_ptype_tunnel_end_prot {
+ AVF_RX_PTYPE_TUNNEL_END_NONE = 0,
+ AVF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ AVF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum avf_rx_ptype_inner_prot {
+ AVF_RX_PTYPE_INNER_PROT_NONE = 0,
+ AVF_RX_PTYPE_INNER_PROT_UDP = 1,
+ AVF_RX_PTYPE_INNER_PROT_TCP = 2,
+ AVF_RX_PTYPE_INNER_PROT_SCTP = 3,
+ AVF_RX_PTYPE_INNER_PROT_ICMP = 4,
+ AVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum avf_rx_ptype_payload_layer {
+ AVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ AVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ AVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ AVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define AVF_RX_PTYPE_BIT_MASK 0x0FFFFFFF
+#define AVF_RX_PTYPE_SHIFT 56
+
+#define AVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define AVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ AVF_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define AVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define AVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ AVF_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define AVF_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define AVF_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(AVF_RXD_QW1_LENGTH_SPH_SHIFT)
+
+#define AVF_RXD_QW1_NEXTP_SHIFT 38
+#define AVF_RXD_QW1_NEXTP_MASK (0x1FFFULL << AVF_RXD_QW1_NEXTP_SHIFT)
+
+#define AVF_RXD_QW2_EXT_STATUS_SHIFT 0
+#define AVF_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \
+ AVF_RXD_QW2_EXT_STATUS_SHIFT)
+
+enum avf_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ AVF_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ AVF_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ AVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ AVF_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ AVF_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ AVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+#define AVF_RXD_QW2_L2TAG2_SHIFT 0
+#define AVF_RXD_QW2_L2TAG2_MASK (0xFFFFUL << AVF_RXD_QW2_L2TAG2_SHIFT)
+
+#define AVF_RXD_QW2_L2TAG3_SHIFT 16
+#define AVF_RXD_QW2_L2TAG3_MASK (0xFFFFUL << AVF_RXD_QW2_L2TAG3_SHIFT)
+
+enum avf_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ AVF_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ AVF_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ AVF_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ AVF_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ AVF_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ AVF_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ AVF_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ AVF_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define AVF_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define AVF_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define AVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define AVF_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ AVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define AVF_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0
+#define AVF_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \
+ AVF_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
+
+#define AVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define AVF_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ AVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum avf_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ AVF_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum avf_rx_prog_status_desc_prog_id_masks {
+ AVF_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ AVF_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ AVF_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum avf_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ AVF_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ AVF_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ AVF_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+#define AVF_TWO_BIT_MASK 0x3
+#define AVF_THREE_BIT_MASK 0x7
+#define AVF_FOUR_BIT_MASK 0xF
+#define AVF_EIGHTEEN_BIT_MASK 0x3FFFF
+
+/* TX Descriptor */
+struct avf_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define AVF_TXD_QW1_DTYPE_SHIFT 0
+#define AVF_TXD_QW1_DTYPE_MASK (0xFUL << AVF_TXD_QW1_DTYPE_SHIFT)
+
+enum avf_tx_desc_dtype_value {
+ AVF_TX_DESC_DTYPE_DATA = 0x0,
+ AVF_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ AVF_TX_DESC_DTYPE_CONTEXT = 0x1,
+ AVF_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ AVF_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ AVF_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ AVF_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ AVF_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ AVF_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ AVF_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define AVF_TXD_QW1_CMD_SHIFT 4
+#define AVF_TXD_QW1_CMD_MASK (0x3FFUL << AVF_TXD_QW1_CMD_SHIFT)
+
+enum avf_tx_desc_cmd_bits {
+ AVF_TX_DESC_CMD_EOP = 0x0001,
+ AVF_TX_DESC_CMD_RS = 0x0002,
+ AVF_TX_DESC_CMD_ICRC = 0x0004,
+ AVF_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ AVF_TX_DESC_CMD_DUMMY = 0x0010,
+ AVF_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ AVF_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ AVF_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ AVF_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ AVF_TX_DESC_CMD_FCOET = 0x0080,
+ AVF_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define AVF_TXD_QW1_OFFSET_SHIFT 16
+#define AVF_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ AVF_TXD_QW1_OFFSET_SHIFT)
+
+enum avf_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ AVF_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ AVF_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define AVF_TXD_QW1_MACLEN_MASK (0x7FUL << AVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define AVF_TXD_QW1_IPLEN_MASK (0x7FUL << AVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define AVF_TXD_QW1_L4LEN_MASK (0xFUL << AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define AVF_TXD_QW1_FCLEN_MASK (0xFUL << AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define AVF_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define AVF_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ AVF_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define AVF_TXD_QW1_L2TAG1_SHIFT 48
+#define AVF_TXD_QW1_L2TAG1_MASK (0xFFFFULL << AVF_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct avf_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define AVF_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define AVF_TXD_CTX_QW1_DTYPE_MASK (0xFUL << AVF_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define AVF_TXD_CTX_QW1_CMD_SHIFT 4
+#define AVF_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << AVF_TXD_CTX_QW1_CMD_SHIFT)
+
+enum avf_tx_ctx_desc_cmd_bits {
+ AVF_TX_CTX_DESC_TSO = 0x01,
+ AVF_TX_CTX_DESC_TSYN = 0x02,
+ AVF_TX_CTX_DESC_IL2TAG2 = 0x04,
+ AVF_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ AVF_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ AVF_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ AVF_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ AVF_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ AVF_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define AVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define AVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ AVF_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define AVF_TXD_CTX_QW1_MSS_SHIFT 50
+#define AVF_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ AVF_TXD_CTX_QW1_MSS_SHIFT)
+
+#define AVF_TXD_CTX_QW1_VSI_SHIFT 50
+#define AVF_TXD_CTX_QW1_VSI_MASK (0x1FFULL << AVF_TXD_CTX_QW1_VSI_SHIFT)
+
+#define AVF_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define AVF_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ AVF_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum avf_tx_ctx_desc_eipt_offload {
+ AVF_TX_CTX_EXT_IP_NONE = 0x0,
+ AVF_TX_CTX_EXT_IP_IPV6 = 0x1,
+ AVF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ AVF_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define AVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define AVF_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ AVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define AVF_TXD_CTX_QW0_NATT_SHIFT 9
+#define AVF_TXD_CTX_QW0_NATT_MASK (0x3ULL << AVF_TXD_CTX_QW0_NATT_SHIFT)
+
+#define AVF_TXD_CTX_UDP_TUNNELING BIT_ULL(AVF_TXD_CTX_QW0_NATT_SHIFT)
+#define AVF_TXD_CTX_GRE_TUNNELING (0x2ULL << AVF_TXD_CTX_QW0_NATT_SHIFT)
+
+#define AVF_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define AVF_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(AVF_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define AVF_TXD_CTX_EIP_NOINC_IPID_CONST AVF_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define AVF_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define AVF_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ AVF_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define AVF_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define AVF_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ AVF_TXD_CTX_QW0_DECTTL_SHIFT)
+
+#define AVF_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define AVF_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(AVF_TXD_CTX_QW0_L4T_CS_SHIFT)
+struct avf_nop_desc {
+ __le64 rsvd;
+ __le64 dtype_cmd;
+};
+
+#define AVF_TXD_NOP_QW1_DTYPE_SHIFT 0
+#define AVF_TXD_NOP_QW1_DTYPE_MASK (0xFUL << AVF_TXD_NOP_QW1_DTYPE_SHIFT)
+
+#define AVF_TXD_NOP_QW1_CMD_SHIFT 4
+#define AVF_TXD_NOP_QW1_CMD_MASK (0x7FUL << AVF_TXD_NOP_QW1_CMD_SHIFT)
+
+enum avf_tx_nop_desc_cmd_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_TX_NOP_DESC_EOP_SHIFT = 0,
+ AVF_TX_NOP_DESC_RS_SHIFT = 1,
+ AVF_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */
+};
+
+struct avf_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define AVF_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define AVF_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ AVF_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define AVF_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define AVF_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ AVF_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define AVF_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define AVF_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ AVF_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum avf_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ AVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ AVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ AVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ AVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+ AVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ AVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ AVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ AVF_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ AVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ AVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ AVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ AVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+ AVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ AVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ AVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ AVF_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ AVF_FILTER_PCTYPE_FCOE_OX = 48,
+ AVF_FILTER_PCTYPE_FCOE_RX = 49,
+ AVF_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ AVF_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum avf_filter_program_desc_dest {
+ AVF_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ AVF_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ AVF_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum avf_filter_program_desc_fd_status {
+ AVF_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ AVF_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ AVF_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ AVF_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define AVF_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define AVF_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ AVF_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_DTYPE_SHIFT 0
+#define AVF_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << AVF_TXD_FLTR_QW1_DTYPE_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_CMD_SHIFT 4
+#define AVF_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ AVF_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + AVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define AVF_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << AVF_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum avf_filter_program_desc_pcmd {
+ AVF_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ AVF_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define AVF_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + AVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define AVF_TXD_FLTR_QW1_DEST_MASK (0x3ULL << AVF_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + AVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define AVF_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(AVF_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ AVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define AVF_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ AVF_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ AVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define AVF_TXD_FLTR_QW1_ATR_MASK BIT_ULL(AVF_TXD_FLTR_QW1_ATR_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define AVF_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ AVF_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum avf_filter_type {
+ AVF_FLOW_DIRECTOR_FLTR = 0,
+ AVF_PE_QUAD_HASH_FLTR = 1,
+ AVF_ETHERTYPE_FLTR,
+ AVF_FCOE_CTX_FLTR,
+ AVF_MAC_VLAN_FLTR,
+ AVF_HASH_FLTR
+};
+
+struct avf_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct avf_aqc_vsi_properties_data info;
+};
+
+struct avf_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct avf_aqc_get_veb_parameters_completion info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct avf_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected per VEB per TC */
+struct avf_veb_tc_stats {
+ u64 tc_rx_packets[AVF_MAX_TRAFFIC_CLASS];
+ u64 tc_rx_bytes[AVF_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_packets[AVF_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_bytes[AVF_MAX_TRAFFIC_CLASS];
+};
+
+/* Statistics collected per function for FCoE */
+struct avf_fcoe_stats {
+ u64 rx_fcoe_packets; /* fcoeprc */
+ u64 rx_fcoe_dwords; /* focedwrc */
+ u64 rx_fcoe_dropped; /* fcoerpdc */
+ u64 tx_fcoe_packets; /* fcoeptc */
+ u64 tx_fcoe_dwords; /* focedwtc */
+ u64 fcoe_bad_fccrc; /* fcoecrc */
+ u64 fcoe_last_error; /* fcoelast */
+ u64 fcoe_ddp_count; /* fcoeddpc */
+};
+
+/* offset to per function FCoE statistics block */
+#define AVF_FCOE_VF_STAT_OFFSET 0
+#define AVF_FCOE_PF_STAT_OFFSET 128
+#define AVF_FCOE_STAT_MAX (AVF_FCOE_PF_STAT_OFFSET + AVF_MAX_PF)
+
+/* Statistics collected by the MAC */
+struct avf_hw_port_stats {
+ /* eth stats collected by the port */
+ struct avf_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
+ /* EEE LPI */
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define AVF_SR_NVM_CONTROL_WORD 0x00
+#define AVF_SR_PCIE_ANALOG_CONFIG_PTR 0x03
+#define AVF_SR_PHY_ANALOG_CONFIG_PTR 0x04
+#define AVF_SR_OPTION_ROM_PTR 0x05
+#define AVF_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
+#define AVF_SR_AUTO_GENERATED_POINTERS_PTR 0x07
+#define AVF_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
+#define AVF_SR_EMP_GLOBAL_MODULE_PTR 0x09
+#define AVF_SR_RO_PCIE_LCB_PTR 0x0A
+#define AVF_SR_EMP_IMAGE_PTR 0x0B
+#define AVF_SR_PE_IMAGE_PTR 0x0C
+#define AVF_SR_CSR_PROTECTED_LIST_PTR 0x0D
+#define AVF_SR_MNG_CONFIG_PTR 0x0E
+#define AVF_EMP_MODULE_PTR 0x0F
+#define AVF_SR_EMP_MODULE_PTR 0x48
+#define AVF_SR_PBA_FLAGS 0x15
+#define AVF_SR_PBA_BLOCK_PTR 0x16
+#define AVF_SR_BOOT_CONFIG_PTR 0x17
+#define AVF_NVM_OEM_VER_OFF 0x83
+#define AVF_SR_NVM_DEV_STARTER_VERSION 0x18
+#define AVF_SR_NVM_WAKE_ON_LAN 0x19
+#define AVF_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define AVF_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
+#define AVF_SR_NVM_MAP_VERSION 0x29
+#define AVF_SR_NVM_IMAGE_VERSION 0x2A
+#define AVF_SR_NVM_STRUCTURE_VERSION 0x2B
+#define AVF_SR_NVM_EETRACK_LO 0x2D
+#define AVF_SR_NVM_EETRACK_HI 0x2E
+#define AVF_SR_VPD_PTR 0x2F
+#define AVF_SR_PXE_SETUP_PTR 0x30
+#define AVF_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
+#define AVF_SR_NVM_ORIGINAL_EETRACK_LO 0x34
+#define AVF_SR_NVM_ORIGINAL_EETRACK_HI 0x35
+#define AVF_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
+#define AVF_SR_POR_REGS_AUTO_LOAD_PTR 0x38
+#define AVF_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
+#define AVF_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
+#define AVF_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define AVF_SR_PHY_ACTIVITY_LIST_PTR 0x3D
+#define AVF_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define AVF_SR_SW_CHECKSUM_WORD 0x3F
+#define AVF_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
+#define AVF_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
+#define AVF_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
+#define AVF_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
+#define AVF_SR_EMP_SR_SETTINGS_PTR 0x48
+#define AVF_SR_FEATURE_CONFIGURATION_PTR 0x49
+#define AVF_SR_CONFIGURATION_METADATA_PTR 0x4D
+#define AVF_SR_IMMEDIATE_VALUES_PTR 0x4E
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define AVF_SR_VPD_MODULE_MAX_SIZE 1024
+#define AVF_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define AVF_SR_CONTROL_WORD_1_SHIFT 0x06
+#define AVF_SR_CONTROL_WORD_1_MASK (0x03 << AVF_SR_CONTROL_WORD_1_SHIFT)
+#define AVF_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
+#define AVF_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
+#define AVF_PTR_TYPE BIT(15)
+
+/* Shadow RAM related */
+#define AVF_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define AVF_SR_BUF_ALIGNMENT 4096
+#define AVF_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define AVF_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define AVF_SRRD_SRCTL_ATTEMPTS 100000
+
+/* FCoE Tx context descriptor - Use the avf_tx_context_desc struct */
+
+enum i40E_fcoe_tx_ctx_desc_cmd_bits {
+ AVF_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_RELOFF = 0x10,
+ AVF_FCOE_TX_CTX_DESC_CLRSEQ = 0x20,
+ AVF_FCOE_TX_CTX_DESC_DIFENA = 0x40,
+ AVF_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80
+};
+
+/* FCoE DIF/DIX Context descriptor */
+struct avf_fcoe_difdix_context_desc {
+ __le64 flags_buff0_buff1_ref;
+ __le64 difapp_msk_bias;
+};
+
+#define AVF_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT 0
+#define AVF_FCOE_DIFDIX_CTX_QW0_FLAGS_MASK (0xFFFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT)
+
+enum avf_fcoe_difdix_ctx_desc_flags_bits {
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_RSVD = 0x0000,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGCHK = 0x0000,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGNOTCHK = 0x0004,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_GTYPE_OPAQUE = 0x0000,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY = 0x0008,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPTAG = 0x0010,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPREFTAG = 0x0018,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_REFTYPE_CNST = 0x0000,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_REFTYPE_INC1BLK = 0x0020,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_REFTYPE_APPTAG = 0x0040,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_REFTYPE_RSVD = 0x0060,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIXMODE_XSUM = 0x0000,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIXMODE_CRC = 0x0080,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFHOST_UNTAG = 0x0000,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFHOST_BUF = 0x0100,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFHOST_RSVD = 0x0200,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFHOST_EMBDTAGS = 0x0300,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFLAN_UNTAG = 0x0000,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFLAN_TAG = 0x0400,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFBLK_512B = 0x0000,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFBLK_4K = 0x0800
+};
+
+#define AVF_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT 12
+#define AVF_FCOE_DIFDIX_CTX_QW0_BUFF0_MASK (0x3FFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT)
+
+#define AVF_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT 22
+#define AVF_FCOE_DIFDIX_CTX_QW0_BUFF1_MASK (0x3FFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT)
+
+#define AVF_FCOE_DIFDIX_CTX_QW0_REF_SHIFT 32
+#define AVF_FCOE_DIFDIX_CTX_QW0_REF_MASK (0xFFFFFFFFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW0_REF_SHIFT)
+
+#define AVF_FCOE_DIFDIX_CTX_QW1_APP_SHIFT 0
+#define AVF_FCOE_DIFDIX_CTX_QW1_APP_MASK (0xFFFFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW1_APP_SHIFT)
+
+#define AVF_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT 16
+#define AVF_FCOE_DIFDIX_CTX_QW1_APP_MSK_MASK (0xFFFFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT)
+
+#define AVF_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT 32
+#define AVF_FCOE_DIFDIX_CTX_QW0_REF_BIAS_MASK (0xFFFFFFFFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT)
+
+/* FCoE DIF/DIX Buffers descriptor */
+struct avf_fcoe_difdix_buffers_desc {
+ __le64 buff_addr0;
+ __le64 buff_addr1;
+};
+
+/* FCoE DDP Context descriptor */
+struct avf_fcoe_ddp_context_desc {
+ __le64 rsvd;
+ __le64 type_cmd_foff_lsize;
+};
+
+#define AVF_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0
+#define AVF_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \
+ AVF_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
+
+#define AVF_FCOE_DDP_CTX_QW1_CMD_SHIFT 4
+#define AVF_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
+ AVF_FCOE_DDP_CTX_QW1_CMD_SHIFT)
+
+enum avf_fcoe_ddp_ctx_desc_cmd_bits {
+ AVF_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */
+ AVF_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */
+ AVF_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */
+ AVF_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */
+ AVF_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */
+ AVF_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */
+};
+
+#define AVF_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16
+#define AVF_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \
+ AVF_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
+
+#define AVF_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32
+#define AVF_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \
+ AVF_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
+
+/* FCoE DDP/DWO Queue Context descriptor */
+struct avf_fcoe_queue_context_desc {
+ __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */
+ __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */
+};
+
+#define AVF_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0
+#define AVF_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \
+ AVF_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
+
+#define AVF_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12
+#define AVF_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \
+ AVF_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
+
+#define AVF_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0
+#define AVF_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \
+ AVF_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+#define AVF_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13
+#define AVF_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \
+ AVF_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+enum avf_fcoe_queue_ctx_desc_tph_bits {
+ AVF_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1,
+ AVF_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2
+};
+
+#define AVF_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30
+#define AVF_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \
+ AVF_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
+
+/* FCoE DDP/DWO Filter Context descriptor */
+struct avf_fcoe_filter_context_desc {
+ __le32 param;
+ __le16 seqn;
+
+ /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
+ __le16 rsvd_dmaindx;
+
+ /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
+ __le64 flags_rsvd_lanq;
+};
+
+#define AVF_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
+#define AVF_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \
+ AVF_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
+
+enum avf_fcoe_filter_ctx_desc_flags_bits {
+ AVF_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00,
+ AVF_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01,
+ AVF_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00,
+ AVF_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02,
+ AVF_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00,
+ AVF_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04
+};
+
+#define AVF_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0
+#define AVF_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \
+ AVF_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
+
+#define AVF_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
+#define AVF_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \
+ AVF_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
+
+#define AVF_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53
+#define AVF_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \
+ AVF_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
+
+enum avf_switch_element_types {
+ AVF_SWITCH_ELEMENT_TYPE_MAC = 1,
+ AVF_SWITCH_ELEMENT_TYPE_PF = 2,
+ AVF_SWITCH_ELEMENT_TYPE_VF = 3,
+ AVF_SWITCH_ELEMENT_TYPE_EMP = 4,
+ AVF_SWITCH_ELEMENT_TYPE_BMC = 6,
+ AVF_SWITCH_ELEMENT_TYPE_PE = 16,
+ AVF_SWITCH_ELEMENT_TYPE_VEB = 17,
+ AVF_SWITCH_ELEMENT_TYPE_PA = 18,
+ AVF_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum avf_ether_type_index {
+ AVF_ETHER_TYPE_1588 = 0,
+ AVF_ETHER_TYPE_FIP = 1,
+ AVF_ETHER_TYPE_OUI_EXTENDED = 2,
+ AVF_ETHER_TYPE_MAC_CONTROL = 3,
+ AVF_ETHER_TYPE_LLDP = 4,
+ AVF_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ AVF_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ AVF_ETHER_TYPE_QCN_CNM = 7,
+ AVF_ETHER_TYPE_8021X = 8,
+ AVF_ETHER_TYPE_ARP = 9,
+ AVF_ETHER_TYPE_RSV1 = 10,
+ AVF_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define AVF_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum avf_hash_filter_size {
+ AVF_HASH_FILTER_SIZE_1K = 0,
+ AVF_HASH_FILTER_SIZE_2K = 1,
+ AVF_HASH_FILTER_SIZE_4K = 2,
+ AVF_HASH_FILTER_SIZE_8K = 3,
+ AVF_HASH_FILTER_SIZE_16K = 4,
+ AVF_HASH_FILTER_SIZE_32K = 5,
+ AVF_HASH_FILTER_SIZE_64K = 6,
+ AVF_HASH_FILTER_SIZE_128K = 7,
+ AVF_HASH_FILTER_SIZE_256K = 8,
+ AVF_HASH_FILTER_SIZE_512K = 9,
+ AVF_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define AVF_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum avf_dma_cntx_size {
+ AVF_DMA_CNTX_SIZE_512 = 0,
+ AVF_DMA_CNTX_SIZE_1K = 1,
+ AVF_DMA_CNTX_SIZE_2K = 2,
+ AVF_DMA_CNTX_SIZE_4K = 3,
+ AVF_DMA_CNTX_SIZE_8K = 4,
+ AVF_DMA_CNTX_SIZE_16K = 5,
+ AVF_DMA_CNTX_SIZE_32K = 6,
+ AVF_DMA_CNTX_SIZE_64K = 7,
+ AVF_DMA_CNTX_SIZE_128K = 8,
+ AVF_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum avf_hash_lut_size {
+ AVF_HASH_LUT_SIZE_128 = 0,
+ AVF_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct avf_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum avf_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum avf_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum avf_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum avf_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum avf_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct avf_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum avf_reset_type {
+ AVF_RESET_POR = 0,
+ AVF_RESET_CORER = 1,
+ AVF_RESET_GLOBR = 2,
+ AVF_RESET_EMPR = 3,
+};
+
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define AVF_NVM_LLDP_CFG_PTR 0x06
+#define AVF_SR_LLDP_CFG_PTR 0x31
+struct avf_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
+/* Offsets into Alternate Ram */
+#define AVF_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define AVF_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define AVF_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
+#define AVF_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
+#define AVF_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define AVF_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define AVF_ALT_BW_VALUE_MASK 0xFF
+#define AVF_ALT_BW_RELATIVE_MASK 0x40000000
+#define AVF_ALT_BW_VALID_MASK 0x80000000
+
+/* RSS Hash Table Size */
+#define AVF_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define AVF_L3_SRC_SHIFT 47
+#define AVF_L3_SRC_MASK (0x3ULL << AVF_L3_SRC_SHIFT)
+#define AVF_L3_V6_SRC_SHIFT 43
+#define AVF_L3_V6_SRC_MASK (0xFFULL << AVF_L3_V6_SRC_SHIFT)
+#define AVF_L3_DST_SHIFT 35
+#define AVF_L3_DST_MASK (0x3ULL << AVF_L3_DST_SHIFT)
+#define AVF_L3_V6_DST_SHIFT 35
+#define AVF_L3_V6_DST_MASK (0xFFULL << AVF_L3_V6_DST_SHIFT)
+#define AVF_L4_SRC_SHIFT 34
+#define AVF_L4_SRC_MASK (0x1ULL << AVF_L4_SRC_SHIFT)
+#define AVF_L4_DST_SHIFT 33
+#define AVF_L4_DST_MASK (0x1ULL << AVF_L4_DST_SHIFT)
+#define AVF_VERIFY_TAG_SHIFT 31
+#define AVF_VERIFY_TAG_MASK (0x3ULL << AVF_VERIFY_TAG_SHIFT)
+
+#define AVF_FLEX_50_SHIFT 13
+#define AVF_FLEX_50_MASK (0x1ULL << AVF_FLEX_50_SHIFT)
+#define AVF_FLEX_51_SHIFT 12
+#define AVF_FLEX_51_MASK (0x1ULL << AVF_FLEX_51_SHIFT)
+#define AVF_FLEX_52_SHIFT 11
+#define AVF_FLEX_52_MASK (0x1ULL << AVF_FLEX_52_SHIFT)
+#define AVF_FLEX_53_SHIFT 10
+#define AVF_FLEX_53_MASK (0x1ULL << AVF_FLEX_53_SHIFT)
+#define AVF_FLEX_54_SHIFT 9
+#define AVF_FLEX_54_MASK (0x1ULL << AVF_FLEX_54_SHIFT)
+#define AVF_FLEX_55_SHIFT 8
+#define AVF_FLEX_55_MASK (0x1ULL << AVF_FLEX_55_SHIFT)
+#define AVF_FLEX_56_SHIFT 7
+#define AVF_FLEX_56_MASK (0x1ULL << AVF_FLEX_56_SHIFT)
+#define AVF_FLEX_57_SHIFT 6
+#define AVF_FLEX_57_MASK (0x1ULL << AVF_FLEX_57_SHIFT)
+
+/* Version format for Dynamic Device Personalization(DDP) */
+struct avf_ddp_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define AVF_DDP_NAME_SIZE 32
+
+/* Package header */
+struct avf_package_header {
+ struct avf_ddp_version version;
+ u32 segment_count;
+ u32 segment_offset[1];
+};
+
+/* Generic segment header */
+struct avf_generic_seg_header {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_NOTES 0x00000002
+#define SEGMENT_TYPE_AVF 0x00000011
+#define SEGMENT_TYPE_X722 0x00000012
+ u32 type;
+ struct avf_ddp_version version;
+ u32 size;
+ char name[AVF_DDP_NAME_SIZE];
+};
+
+struct avf_metadata_segment {
+ struct avf_generic_seg_header header;
+ struct avf_ddp_version version;
+#define AVF_DDP_TRACKID_RDONLY 0
+#define AVF_DDP_TRACKID_INVALID 0xFFFFFFFF
+ u32 track_id;
+ char name[AVF_DDP_NAME_SIZE];
+};
+
+struct avf_device_id_entry {
+ u32 vendor_dev_id;
+ u32 sub_vendor_dev_id;
+};
+
+struct avf_profile_segment {
+ struct avf_generic_seg_header header;
+ struct avf_ddp_version version;
+ char name[AVF_DDP_NAME_SIZE];
+ u32 device_table_count;
+ struct avf_device_id_entry device_table[1];
+};
+
+struct avf_section_table {
+ u32 section_count;
+ u32 section_offset[1];
+};
+
+struct avf_profile_section_header {
+ u16 tbl_size;
+ u16 data_end;
+ struct {
+#define SECTION_TYPE_INFO 0x00000010
+#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_RB_MMIO 0x00001800
+#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_RB_AQ 0x00001801
+#define SECTION_TYPE_NOTE 0x80000000
+#define SECTION_TYPE_NAME 0x80000001
+#define SECTION_TYPE_PROTO 0x80000002
+#define SECTION_TYPE_PCTYPE 0x80000003
+#define SECTION_TYPE_PTYPE 0x80000004
+ u32 type;
+ u32 offset;
+ u32 size;
+ } section;
+};
+
+struct avf_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct avf_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
+struct avf_profile_info {
+ u32 track_id;
+ struct avf_ddp_version version;
+ u8 op;
+#define AVF_DDP_ADD_TRACKID 0x01
+#define AVF_DDP_REMOVE_TRACKID 0x02
+ u8 reserved[7];
+ u8 name[AVF_DDP_NAME_SIZE];
+};
+#endif /* _AVF_TYPE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/base/virtchnl.h b/src/spdk/dpdk/drivers/net/avf/base/virtchnl.h
new file mode 100644
index 00000000..167518f0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/base/virtchnl.h
@@ -0,0 +1,787 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+};
+
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+#ifdef VIRTCHNL_SOL_VF_SUPPORT
+ VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
+#endif
+#ifdef VIRTCHNL_IWARP
+ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+#endif
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ {virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)}
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures.*/
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF capability flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_cap_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u32 pad1;
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support. If the request is successful, PF will
+ * then reset the VF to institute required changes.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+#ifdef VIRTCHNL_SOL_VF_SUPPORT
+/* VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG
+ * VF sends this message to get the default MTU and list of additional ethernet
+ * addresses it is allowed to use.
+ * PF responds with an indirect message containing
+ * virtchnl_addnl_solaris_config with zero or more
+ * virtchnl_ether_addr structures.
+ *
+ * It is expected that this operation will only ever be needed for Solaris VFs
+ * running under a Solaris PF.
+ */
+struct virtchnl_addnl_solaris_config {
+ u16 default_mtu;
+ struct virtchnl_ether_addr_list al;
+};
+
+#endif
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct virtchnl_eth_stats in an external buffer.
+ */
+
+struct virtchnl_eth_stats {
+ u64 rx_bytes; /* received bytes */
+ u64 rx_unicast; /* received unicast pkts */
+ u64 rx_multicast; /* received multicast pkts */
+ u64 rx_broadcast; /* received broadcast pkts */
+ u64 rx_discards;
+ u64 rx_unknown_protocol;
+ u64 tx_bytes; /* transmitted bytes*/
+ u64 tx_unicast; /* transmitted unicast pkts */
+ u64 tx_multicast; /* transmitted multicast pkts */
+ u64 tx_broadcast; /* transmitted broadcast pkts */
+ u64 tx_discards;
+ u64 tx_errors;
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_ATTENTION 1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ struct {
+ enum virtchnl_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+#ifdef VIRTCHNL_IWARP
+
+/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define QUEUE_TYPE_PE_AEQ 0x80
+#define QUEUE_INVALID_IDX 0xFFFF
+
+struct virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+
+struct virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct virtchnl_iwarp_qv_info qv_info[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+
+#endif
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ int valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+#ifdef VIRTCHNL_IWARP
+ case VIRTCHNL_OP_IWARP:
+ /* These messages are opaque to us and will be validated in
+ * the RDMA client code. We just need to check for nonzero
+ * length. The firmware will enforce max length restrictions.
+ */
+ if (msglen)
+ valid_len = msglen;
+ else
+ err_msg_format = true;
+ break;
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ break;
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_iwarp_qvlist_info *qv =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
+ if (qv->num_vectors == 0) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += ((qv->num_vectors - 1) *
+ sizeof(struct virtchnl_iwarp_qv_info));
+ }
+ break;
+#endif
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_ERR_PARAM;
+ }
+ /* few more checks */
+ if (err_msg_format || valid_len != msglen)
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avf/rte_pmd_avf_version.map b/src/spdk/dpdk/drivers/net/avf/rte_pmd_avf_version.map
new file mode 100644
index 00000000..179140fb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avf/rte_pmd_avf_version.map
@@ -0,0 +1,4 @@
+DPDK_18.02 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/avp/Makefile b/src/spdk/dpdk/drivers/net/avp/Makefile
new file mode 100644
index 00000000..c9db667f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avp/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2013-2017, Wind River Systems, Inc.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_avp.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+EXPORT_MAP := rte_pmd_avp_version.map
+
+LIBABIVER := 1
+
+# install public header files to enable compilation of the hypervisor level
+# dpdk application
+SYMLINK-$(CONFIG_RTE_LIBRTE_AVP_PMD)-include += rte_avp_common.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_AVP_PMD)-include += rte_avp_fifo.h
+
+#
+# all source files are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp_ethdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/avp/avp_ethdev.c b/src/spdk/dpdk/drivers/net/avp/avp_ethdev.c
new file mode 100644
index 00000000..761f6c1c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avp/avp_ethdev.c
@@ -0,0 +1,2279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2017 Wind River Systems, Inc.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_dev.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_io.h>
+
+#include "rte_avp_common.h"
+#include "rte_avp_fifo.h"
+
+#include "avp_logs.h"
+
+int avp_logtype_driver;
+
+static int avp_dev_create(struct rte_pci_device *pci_dev,
+ struct rte_eth_dev *eth_dev);
+
+static int avp_dev_configure(struct rte_eth_dev *dev);
+static int avp_dev_start(struct rte_eth_dev *dev);
+static void avp_dev_stop(struct rte_eth_dev *dev);
+static void avp_dev_close(struct rte_eth_dev *dev);
+static void avp_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int avp_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
+
+static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *pool);
+
+static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+static uint16_t avp_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+static uint16_t avp_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+static uint16_t avp_xmit_scattered_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+static uint16_t avp_xmit_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+static void avp_dev_rx_queue_release(void *rxq);
+static void avp_dev_tx_queue_release(void *txq);
+
+static int avp_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void avp_dev_stats_reset(struct rte_eth_dev *dev);
+
+
+#define AVP_MAX_RX_BURST 64
+#define AVP_MAX_TX_BURST 64
+#define AVP_MAX_MAC_ADDRS 1
+#define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
+
+
+/*
+ * Defines the number of microseconds to wait before checking the response
+ * queue for completion.
+ */
+#define AVP_REQUEST_DELAY_USECS (5000)
+
+/*
+ * Defines the number times to check the response queue for completion before
+ * declaring a timeout.
+ */
+#define AVP_MAX_REQUEST_RETRY (100)
+
+/* Defines the current PCI driver version number */
+#define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_avp_map[] = {
+ { .vendor_id = RTE_AVP_PCI_VENDOR_ID,
+ .device_id = RTE_AVP_PCI_DEVICE_ID,
+ .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID,
+ .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID,
+ .class_id = RTE_CLASS_ANY_ID,
+ },
+
+ { .vendor_id = 0, /* sentinel */
+ },
+};
+
+/*
+ * dev_ops for avp, bare necessities for basic operation
+ */
+static const struct eth_dev_ops avp_eth_dev_ops = {
+ .dev_configure = avp_dev_configure,
+ .dev_start = avp_dev_start,
+ .dev_stop = avp_dev_stop,
+ .dev_close = avp_dev_close,
+ .dev_infos_get = avp_dev_info_get,
+ .vlan_offload_set = avp_vlan_offload_set,
+ .stats_get = avp_dev_stats_get,
+ .stats_reset = avp_dev_stats_reset,
+ .link_update = avp_dev_link_update,
+ .promiscuous_enable = avp_dev_promiscuous_enable,
+ .promiscuous_disable = avp_dev_promiscuous_disable,
+ .rx_queue_setup = avp_dev_rx_queue_setup,
+ .rx_queue_release = avp_dev_rx_queue_release,
+ .tx_queue_setup = avp_dev_tx_queue_setup,
+ .tx_queue_release = avp_dev_tx_queue_release,
+};
+
+/**@{ AVP device flags */
+#define AVP_F_PROMISC (1 << 1)
+#define AVP_F_CONFIGURED (1 << 2)
+#define AVP_F_LINKUP (1 << 3)
+#define AVP_F_DETACHED (1 << 4)
+/**@} */
+
+/* Ethernet device validation marker */
+#define AVP_ETHDEV_MAGIC 0x92972862
+
+/*
+ * Defines the AVP device attributes which are attached to an RTE ethernet
+ * device
+ */
+struct avp_dev {
+ uint32_t magic; /**< Memory validation marker */
+ uint64_t device_id; /**< Unique system identifier */
+ struct ether_addr ethaddr; /**< Host specified MAC address */
+ struct rte_eth_dev_data *dev_data;
+ /**< Back pointer to ethernet device data */
+ volatile uint32_t flags; /**< Device operational flags */
+ uint16_t port_id; /**< Ethernet port identifier */
+ struct rte_mempool *pool; /**< pkt mbuf mempool */
+ unsigned int guest_mbuf_size; /**< local pool mbuf size */
+ unsigned int host_mbuf_size; /**< host mbuf size */
+ unsigned int max_rx_pkt_len; /**< maximum receive unit */
+ uint32_t host_features; /**< Supported feature bitmap */
+ uint32_t features; /**< Enabled feature bitmap */
+ unsigned int num_tx_queues; /**< Negotiated number of transmit queues */
+ unsigned int max_tx_queues; /**< Maximum number of transmit queues */
+ unsigned int num_rx_queues; /**< Negotiated number of receive queues */
+ unsigned int max_rx_queues; /**< Maximum number of receive queues */
+
+ struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */
+ struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */
+ struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES];
+ /**< Allocated mbufs queue */
+ struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES];
+ /**< To be freed mbufs queue */
+
+ /* mutual exclusion over the 'flag' and 'resp_q/req_q' fields */
+ rte_spinlock_t lock;
+
+ /* For request & response */
+ struct rte_avp_fifo *req_q; /**< Request queue */
+ struct rte_avp_fifo *resp_q; /**< Response queue */
+ void *host_sync_addr; /**< (host) Req/Resp Mem address */
+ void *sync_addr; /**< Req/Resp Mem address */
+ void *host_mbuf_addr; /**< (host) MBUF pool start address */
+ void *mbuf_addr; /**< MBUF pool start address */
+} __rte_cache_aligned;
+
+/* RTE ethernet private data */
+struct avp_adapter {
+ struct avp_dev avp;
+} __rte_cache_aligned;
+
+
+/* 32-bit MMIO register write */
+#define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
+
+/* 32-bit MMIO register read */
+#define AVP_READ32(_addr) rte_read32_relaxed((_addr))
+
+/* Macro to cast the ethernet device private data to a AVP object */
+#define AVP_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct avp_adapter *)adapter)->avp)
+
+/*
+ * Defines the structure of a AVP device queue for the purpose of handling the
+ * receive and transmit burst callback functions
+ */
+struct avp_queue {
+ struct rte_eth_dev_data *dev_data;
+ /**< Backpointer to ethernet device data */
+ struct avp_dev *avp; /**< Backpointer to AVP device */
+ uint16_t queue_id;
+ /**< Queue identifier used for indexing current queue */
+ uint16_t queue_base;
+ /**< Base queue identifier for queue servicing */
+ uint16_t queue_limit;
+ /**< Maximum queue identifier for queue servicing */
+
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+};
+
+/* send a request and wait for a response
+ *
+ * @warning must be called while holding the avp->lock spinlock.
+ */
+static int
+avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
+{
+ unsigned int retry = AVP_MAX_REQUEST_RETRY;
+ void *resp_addr = NULL;
+ unsigned int count;
+ int ret;
+
+ PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
+
+ request->result = -ENOTSUP;
+
+ /* Discard any stale responses before starting a new request */
+ while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
+ PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
+
+ rte_memcpy(avp->sync_addr, request, sizeof(*request));
+ count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
+ if (count < 1) {
+ PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
+ request->req_id);
+ ret = -EBUSY;
+ goto done;
+ }
+
+ while (retry--) {
+ /* wait for a response */
+ usleep(AVP_REQUEST_DELAY_USECS);
+
+ count = avp_fifo_count(avp->resp_q);
+ if (count >= 1) {
+ /* response received */
+ break;
+ }
+
+ if ((count < 1) && (retry == 0)) {
+ PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
+ request->req_id);
+ ret = -ETIME;
+ goto done;
+ }
+ }
+
+ /* retrieve the response */
+ count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
+ if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
+ PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
+ count, resp_addr, avp->host_sync_addr);
+ ret = -ENODATA;
+ goto done;
+ }
+
+ /* copy to user buffer */
+ rte_memcpy(request, avp->sync_addr, sizeof(*request));
+ ret = 0;
+
+ PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
+ request->result, request->req_id);
+
+done:
+ return ret;
+}
+
+static int
+avp_dev_ctrl_set_link_state(struct rte_eth_dev *eth_dev, unsigned int state)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_request request;
+ int ret;
+
+ /* setup a link state change request */
+ memset(&request, 0, sizeof(request));
+ request.req_id = RTE_AVP_REQ_CFG_NETWORK_IF;
+ request.if_up = state;
+
+ ret = avp_dev_process_request(avp, &request);
+
+ return ret == 0 ? request.result : ret;
+}
+
+static int
+avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev,
+ struct rte_avp_device_config *config)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_request request;
+ int ret;
+
+ /* setup a configure request */
+ memset(&request, 0, sizeof(request));
+ request.req_id = RTE_AVP_REQ_CFG_DEVICE;
+ memcpy(&request.config, config, sizeof(request.config));
+
+ ret = avp_dev_process_request(avp, &request);
+
+ return ret == 0 ? request.result : ret;
+}
+
+static int
+avp_dev_ctrl_shutdown(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_request request;
+ int ret;
+
+ /* setup a shutdown request */
+ memset(&request, 0, sizeof(request));
+ request.req_id = RTE_AVP_REQ_SHUTDOWN_DEVICE;
+
+ ret = avp_dev_process_request(avp, &request);
+
+ return ret == 0 ? request.result : ret;
+}
+
+/* translate from host mbuf virtual address to guest virtual address */
+static inline void *
+avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
+{
+ return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,
+ (uintptr_t)avp->host_mbuf_addr),
+ (uintptr_t)avp->mbuf_addr);
+}
+
+/* translate from host physical address to guest virtual address */
+static void *
+avp_dev_translate_address(struct rte_eth_dev *eth_dev,
+ rte_iova_t host_phys_addr)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_mem_resource *resource;
+ struct rte_avp_memmap_info *info;
+ struct rte_avp_memmap *map;
+ off_t offset;
+ void *addr;
+ unsigned int i;
+
+ addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
+ resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
+ info = (struct rte_avp_memmap_info *)resource->addr;
+
+ offset = 0;
+ for (i = 0; i < info->nb_maps; i++) {
+ /* search all segments looking for a matching address */
+ map = &info->maps[i];
+
+ if ((host_phys_addr >= map->phys_addr) &&
+ (host_phys_addr < (map->phys_addr + map->length))) {
+ /* address is within this segment */
+ offset += (host_phys_addr - map->phys_addr);
+ addr = RTE_PTR_ADD(addr, (uintptr_t)offset);
+
+ PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
+ host_phys_addr, addr);
+
+ return addr;
+ }
+ offset += map->length;
+ }
+
+ return NULL;
+}
+
+/* verify that the incoming device version is compatible with our version */
+static int
+avp_dev_version_check(uint32_t version)
+{
+ uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION);
+ uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version);
+
+ if (device <= driver) {
+ /* the host driver version is less than or equal to ours */
+ return 0;
+ }
+
+ return 1;
+}
+
+/* verify that memory regions have expected version and validation markers */
+static int
+avp_dev_check_regions(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_avp_memmap_info *memmap;
+ struct rte_avp_device_info *info;
+ struct rte_mem_resource *resource;
+ unsigned int i;
+
+ /* Dump resource info for debug */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ resource = &pci_dev->mem_resource[i];
+ if ((resource->phys_addr == 0) || (resource->len == 0))
+ continue;
+
+ PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
+ i, resource->phys_addr,
+ resource->len, resource->addr);
+
+ switch (i) {
+ case RTE_AVP_PCI_MEMMAP_BAR:
+ memmap = (struct rte_avp_memmap_info *)resource->addr;
+ if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
+ (memmap->version != RTE_AVP_MEMMAP_VERSION)) {
+ PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
+ memmap->magic, memmap->version);
+ return -EINVAL;
+ }
+ break;
+
+ case RTE_AVP_PCI_DEVICE_BAR:
+ info = (struct rte_avp_device_info *)resource->addr;
+ if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
+ avp_dev_version_check(info->version)) {
+ PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
+ info->magic, info->version,
+ AVP_DPDK_DRIVER_VERSION);
+ return -EINVAL;
+ }
+ break;
+
+ case RTE_AVP_PCI_MEMORY_BAR:
+ case RTE_AVP_PCI_MMIO_BAR:
+ if (resource->addr == NULL) {
+ PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
+ i);
+ return -EINVAL;
+ }
+ break;
+
+ case RTE_AVP_PCI_MSIX_BAR:
+ default:
+ /* no validation required */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+avp_dev_detach(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int ret;
+
+ PMD_DRV_LOG(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "\n",
+ eth_dev->data->port_id, avp->device_id);
+
+ rte_spinlock_lock(&avp->lock);
+
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(NOTICE, "port %u already detached\n",
+ eth_dev->data->port_id);
+ ret = 0;
+ goto unlock;
+ }
+
+ /* shutdown the device first so the host stops sending us packets. */
+ ret = avp_dev_ctrl_shutdown(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to send/recv shutdown to host, ret=%d\n",
+ ret);
+ avp->flags &= ~AVP_F_DETACHED;
+ goto unlock;
+ }
+
+ avp->flags |= AVP_F_DETACHED;
+ rte_wmb();
+
+ /* wait for queues to acknowledge the presence of the detach flag */
+ rte_delay_ms(1);
+
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+ return ret;
+}
+
+static void
+_avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+ struct avp_dev *avp =
+ AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct avp_queue *rxq;
+ uint16_t queue_count;
+ uint16_t remainder;
+
+ rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id];
+
+ /*
+ * Must map all AVP fifos as evenly as possible between the configured
+ * device queues. Each device queue will service a subset of the AVP
+ * fifos. If there is an odd number of device queues the first set of
+ * device queues will get the extra AVP fifos.
+ */
+ queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues;
+ remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues;
+ if (rx_queue_id < remainder) {
+ /* these queues must service one extra FIFO */
+ rxq->queue_base = rx_queue_id * (queue_count + 1);
+ rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1;
+ } else {
+ /* these queues service the regular number of FIFO */
+ rxq->queue_base = ((remainder * (queue_count + 1)) +
+ ((rx_queue_id - remainder) * queue_count));
+ rxq->queue_limit = rxq->queue_base + queue_count - 1;
+ }
+
+ PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n",
+ rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit);
+
+ rxq->queue_id = rxq->queue_base;
+}
+
+static void
+_avp_set_queue_counts(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_device_info *host_info;
+ void *addr;
+
+ addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
+ host_info = (struct rte_avp_device_info *)addr;
+
+ /*
+ * the transmit direction is not negotiated beyond respecting the max
+ * number of queues because the host can handle arbitrary guest tx
+ * queues (host rx queues).
+ */
+ avp->num_tx_queues = eth_dev->data->nb_tx_queues;
+
+ /*
+ * the receive direction is more restrictive. The host requires a
+ * minimum number of guest rx queues (host tx queues) therefore
+ * negotiate a value that is at least as large as the host minimum
+ * requirement. If the host and guest values are not identical then a
+ * mapping will be established in the receive_queue_setup function.
+ */
+ avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
+ eth_dev->data->nb_rx_queues);
+
+ PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
+ avp->num_tx_queues, avp->num_rx_queues);
+}
+
+static int
+avp_dev_attach(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_device_config config;
+ unsigned int i;
+ int ret;
+
+ PMD_DRV_LOG(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "\n",
+ eth_dev->data->port_id, avp->device_id);
+
+ rte_spinlock_lock(&avp->lock);
+
+ if (!(avp->flags & AVP_F_DETACHED)) {
+ PMD_DRV_LOG(NOTICE, "port %u already attached\n",
+ eth_dev->data->port_id);
+ ret = 0;
+ goto unlock;
+ }
+
+ /*
+ * make sure that the detached flag is set prior to reconfiguring the
+ * queues.
+ */
+ avp->flags |= AVP_F_DETACHED;
+ rte_wmb();
+
+ /*
+ * re-run the device create utility which will parse the new host info
+ * and setup the AVP device queue pointers.
+ */
+ ret = avp_dev_create(RTE_ETH_DEV_TO_PCI(eth_dev), eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+
+ if (avp->flags & AVP_F_CONFIGURED) {
+ /*
+ * Update the receive queue mapping to handle cases where the
+ * source and destination hosts have different queue
+ * requirements. As long as the DETACHED flag is asserted the
+ * queue table should not be referenced so it should be safe to
+ * update it.
+ */
+ _avp_set_queue_counts(eth_dev);
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+ _avp_set_rx_queue_mappings(eth_dev, i);
+
+ /*
+ * Update the host with our config details so that it knows the
+ * device is active.
+ */
+ memset(&config, 0, sizeof(config));
+ config.device_id = avp->device_id;
+ config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
+ config.driver_version = AVP_DPDK_DRIVER_VERSION;
+ config.features = avp->features;
+ config.num_tx_queues = avp->num_tx_queues;
+ config.num_rx_queues = avp->num_rx_queues;
+ config.if_up = !!(avp->flags & AVP_F_LINKUP);
+
+ ret = avp_dev_ctrl_set_config(eth_dev, &config);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+ }
+
+ rte_wmb();
+ avp->flags &= ~AVP_F_DETACHED;
+
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+ return ret;
+}
+
+static void
+avp_dev_interrupt_handler(void *data)
+{
+ struct rte_eth_dev *eth_dev = data;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ uint32_t status, value;
+ int ret;
+
+ if (registers == NULL)
+ rte_panic("no mapped MMIO register space\n");
+
+ /* read the interrupt status register
+ * note: this register clears on read so all raised interrupts must be
+ * handled or remembered for later processing
+ */
+ status = AVP_READ32(
+ RTE_PTR_ADD(registers,
+ RTE_AVP_INTERRUPT_STATUS_OFFSET));
+
+ if (status & RTE_AVP_MIGRATION_INTERRUPT_MASK) {
+ /* handle interrupt based on current status */
+ value = AVP_READ32(
+ RTE_PTR_ADD(registers,
+ RTE_AVP_MIGRATION_STATUS_OFFSET));
+ switch (value) {
+ case RTE_AVP_MIGRATION_DETACHED:
+ ret = avp_dev_detach(eth_dev);
+ break;
+ case RTE_AVP_MIGRATION_ATTACHED:
+ ret = avp_dev_attach(eth_dev);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unexpected migration status, status=%u\n",
+ value);
+ ret = -EINVAL;
+ }
+
+ /* acknowledge the request by writing out our current status */
+ value = (ret == 0 ? value : RTE_AVP_MIGRATION_ERROR);
+ AVP_WRITE32(value,
+ RTE_PTR_ADD(registers,
+ RTE_AVP_MIGRATION_ACK_OFFSET));
+
+ PMD_DRV_LOG(NOTICE, "AVP migration interrupt handled\n");
+ }
+
+ if (status & ~RTE_AVP_MIGRATION_INTERRUPT_MASK)
+ PMD_DRV_LOG(WARNING, "AVP unexpected interrupt, status=0x%08x\n",
+ status);
+
+ /* re-enable UIO interrupt handling */
+ ret = rte_intr_enable(&pci_dev->intr_handle);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n",
+ ret);
+ /* continue */
+ }
+}
+
+static int
+avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ int ret;
+
+ if (registers == NULL)
+ return -EINVAL;
+
+ /* enable UIO interrupt handling */
+ ret = rte_intr_enable(&pci_dev->intr_handle);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ /* inform the device that all interrupts are enabled */
+ AVP_WRITE32(RTE_AVP_APP_INTERRUPTS_MASK,
+ RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
+
+ return 0;
+}
+
+static int
+avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ int ret;
+
+ if (registers == NULL)
+ return 0;
+
+ /* inform the device that all interrupts are disabled */
+ AVP_WRITE32(RTE_AVP_NO_INTERRUPTS_MASK,
+ RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
+
+ /* enable UIO interrupt handling */
+ ret = rte_intr_disable(&pci_dev->intr_handle);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ int ret;
+
+ /* register a callback handler with UIO for interrupt notifications */
+ ret = rte_intr_callback_register(&pci_dev->intr_handle,
+ avp_dev_interrupt_handler,
+ (void *)eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to register UIO interrupt callback, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ /* enable interrupt processing */
+ return avp_dev_enable_interrupts(eth_dev);
+}
+
+static int
+avp_dev_migration_pending(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ uint32_t value;
+
+ if (registers == NULL)
+ return 0;
+
+ value = AVP_READ32(RTE_PTR_ADD(registers,
+ RTE_AVP_MIGRATION_STATUS_OFFSET));
+ if (value == RTE_AVP_MIGRATION_DETACHED) {
+ /* migration is in progress; ack it if we have not already */
+ AVP_WRITE32(value,
+ RTE_PTR_ADD(registers,
+ RTE_AVP_MIGRATION_ACK_OFFSET));
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * create a AVP device using the supplied device info by first translating it
+ * to guest address space(s).
+ */
+static int
+avp_dev_create(struct rte_pci_device *pci_dev,
+ struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_device_info *host_info;
+ struct rte_mem_resource *resource;
+ unsigned int i;
+
+ resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
+ if (resource->addr == NULL) {
+ PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
+ RTE_AVP_PCI_DEVICE_BAR);
+ return -EFAULT;
+ }
+ host_info = (struct rte_avp_device_info *)resource->addr;
+
+ if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
+ avp_dev_version_check(host_info->version)) {
+ PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
+ host_info->magic, host_info->version,
+ AVP_DPDK_DRIVER_VERSION);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
+ RTE_AVP_GET_RELEASE_VERSION(host_info->version),
+ RTE_AVP_GET_MAJOR_VERSION(host_info->version),
+ RTE_AVP_GET_MINOR_VERSION(host_info->version));
+
+ PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
+ host_info->min_tx_queues, host_info->max_tx_queues);
+ PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
+ host_info->min_rx_queues, host_info->max_rx_queues);
+ PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
+ host_info->features);
+
+ if (avp->magic != AVP_ETHDEV_MAGIC) {
+ /*
+ * First time initialization (i.e., not during a VM
+ * migration)
+ */
+ memset(avp, 0, sizeof(*avp));
+ avp->magic = AVP_ETHDEV_MAGIC;
+ avp->dev_data = eth_dev->data;
+ avp->port_id = eth_dev->data->port_id;
+ avp->host_mbuf_size = host_info->mbuf_size;
+ avp->host_features = host_info->features;
+ rte_spinlock_init(&avp->lock);
+ memcpy(&avp->ethaddr.addr_bytes[0],
+ host_info->ethaddr, ETHER_ADDR_LEN);
+ /* adjust max values to not exceed our max */
+ avp->max_tx_queues =
+ RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
+ avp->max_rx_queues =
+ RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES);
+ } else {
+ /* Re-attaching during migration */
+
+ /* TODO... requires validation of host values */
+ if ((host_info->features & avp->features) != avp->features) {
+ PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
+ avp->features, host_info->features);
+ /* this should not be possible; continue for now */
+ }
+ }
+
+ /* the device id is allowed to change over migrations */
+ avp->device_id = host_info->device_id;
+
+ /* translate incoming host addresses to guest address space */
+ PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
+ host_info->tx_phys);
+ PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
+ host_info->alloc_phys);
+ for (i = 0; i < avp->max_tx_queues; i++) {
+ avp->tx_q[i] = avp_dev_translate_address(eth_dev,
+ host_info->tx_phys + (i * host_info->tx_size));
+
+ avp->alloc_q[i] = avp_dev_translate_address(eth_dev,
+ host_info->alloc_phys + (i * host_info->alloc_size));
+ }
+
+ PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
+ host_info->rx_phys);
+ PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
+ host_info->free_phys);
+ for (i = 0; i < avp->max_rx_queues; i++) {
+ avp->rx_q[i] = avp_dev_translate_address(eth_dev,
+ host_info->rx_phys + (i * host_info->rx_size));
+ avp->free_q[i] = avp_dev_translate_address(eth_dev,
+ host_info->free_phys + (i * host_info->free_size));
+ }
+
+ PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
+ host_info->req_phys);
+ PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
+ host_info->resp_phys);
+ PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
+ host_info->sync_phys);
+ PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
+ host_info->mbuf_phys);
+ avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
+ avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
+ avp->sync_addr =
+ avp_dev_translate_address(eth_dev, host_info->sync_phys);
+ avp->mbuf_addr =
+ avp_dev_translate_address(eth_dev, host_info->mbuf_phys);
+
+ /*
+ * store the host mbuf virtual address so that we can calculate
+ * relative offsets for each mbuf as they are processed
+ */
+ avp->host_mbuf_addr = host_info->mbuf_va;
+ avp->host_sync_addr = host_info->sync_va;
+
+ /*
+ * store the maximum packet length that is supported by the host.
+ */
+ avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
+ PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
+ host_info->max_rx_pkt_len);
+
+ return 0;
+}
+
+/*
+ * This function is based on probe() function in avp_pci.c
+ * It returns 0 on success.
+ */
+static int
+eth_avp_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp =
+ AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev;
+ int ret;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ eth_dev->dev_ops = &avp_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &avp_recv_pkts;
+ eth_dev->tx_pkt_burst = &avp_xmit_pkts;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ /*
+ * no setup required on secondary processes. All data is saved
+ * in dev_private by the primary process. All resource should
+ * be mapped to the same virtual address so all pointers should
+ * be valid.
+ */
+ if (eth_dev->data->scattered_rx) {
+ PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
+ eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
+ eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
+ }
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ /* Check current migration status */
+ if (avp_dev_migration_pending(eth_dev)) {
+ PMD_DRV_LOG(ERR, "VM live migration operation in progress\n");
+ return -EBUSY;
+ }
+
+ /* Check BAR resources */
+ ret = avp_dev_check_regions(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ /* Enable interrupts */
+ ret = avp_dev_setup_interrupts(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to enable interrupts, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Handle each subtype */
+ ret = avp_dev_create(pci_dev, eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
+ ETHER_ADDR_LEN);
+ return -ENOMEM;
+ }
+
+ /* Get a mac from device config */
+ ether_addr_copy(&avp->ethaddr, &eth_dev->data->mac_addrs[0]);
+
+ return 0;
+}
+
+static int
+eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ int ret;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (eth_dev->data == NULL)
+ return 0;
+
+ ret = avp_dev_disable_interrupts(eth_dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to disable interrupts, ret=%d\n", ret);
+ return ret;
+ }
+
+ if (eth_dev->data->mac_addrs != NULL) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ }
+
+ return 0;
+}
+
+static int
+eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct avp_adapter),
+ eth_avp_dev_init);
+}
+
+static int
+eth_avp_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ eth_avp_dev_uninit);
+}
+
+static struct rte_pci_driver rte_avp_pmd = {
+ .id_table = pci_id_avp_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_avp_pci_probe,
+ .remove = eth_avp_pci_remove,
+};
+
+static int
+avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
+ struct avp_dev *avp)
+{
+ unsigned int max_rx_pkt_len;
+
+ max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
+ (max_rx_pkt_len > avp->host_mbuf_size)) {
+ /*
+ * If the guest MTU is greater than either the host or guest
+ * buffers then chained mbufs have to be enabled in the TX
+ * direction. It is assumed that the application will not need
+ * to send packets larger than their max_rx_pkt_len (MRU).
+ */
+ return 1;
+ }
+
+ if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||
+ (avp->max_rx_pkt_len > avp->host_mbuf_size)) {
+ /*
+ * If the host MRU is greater than its own mbuf size or the
+ * guest mbuf size then chained mbufs have to be enabled in the
+ * RX direction.
+ */
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *pool)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct avp_queue *rxq;
+
+ if (rx_queue_id >= eth_dev->data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
+ rx_queue_id, eth_dev->data->nb_rx_queues);
+ return -EINVAL;
+ }
+
+ /* Save mbuf pool pointer */
+ avp->pool = pool;
+
+ /* Save the local mbuf size */
+ mbp_priv = rte_mempool_get_priv(pool);
+ avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);
+ avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;
+
+ if (avp_dev_enable_scattered(eth_dev, avp)) {
+ if (!eth_dev->data->scattered_rx) {
+ PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
+ eth_dev->data->scattered_rx = 1;
+ eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
+ eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
+ }
+ }
+
+ PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
+ avp->max_rx_pkt_len,
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ avp->host_mbuf_size,
+ avp->guest_mbuf_size);
+
+ /* allocate a queue object */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n");
+ return -ENOMEM;
+ }
+
+ /* save back pointers to AVP and Ethernet devices */
+ rxq->avp = avp;
+ rxq->dev_data = eth_dev->data;
+ eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq;
+
+ /* setup the queue receive mapping for the current queue. */
+ _avp_set_rx_queue_mappings(eth_dev, rx_queue_id);
+
+ PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq);
+
+ (void)nb_rx_desc;
+ (void)rx_conf;
+ return 0;
+}
+
+static int
+avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct avp_queue *txq;
+
+ if (tx_queue_id >= eth_dev->data->nb_tx_queues) {
+ PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
+ tx_queue_id, eth_dev->data->nb_tx_queues);
+ return -EINVAL;
+ }
+
+ /* allocate a queue object */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n");
+ return -ENOMEM;
+ }
+
+ /* only the configured set of transmit queues are used */
+ txq->queue_id = tx_queue_id;
+ txq->queue_base = tx_queue_id;
+ txq->queue_limit = tx_queue_id;
+
+ /* save back pointers to AVP and Ethernet devices */
+ txq->avp = avp;
+ txq->dev_data = eth_dev->data;
+ eth_dev->data->tx_queues[tx_queue_id] = (void *)txq;
+
+ PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq);
+
+ (void)nb_tx_desc;
+ (void)tx_conf;
+ return 0;
+}
+
+static inline int
+_avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)
+{
+ uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
+ uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
+ return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);
+}
+
+static inline int
+_avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
+{
+ struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+ if (likely(_avp_cmp_ether_addr(&avp->ethaddr, &eth->d_addr) == 0)) {
+ /* allow all packets destined to our address */
+ return 0;
+ }
+
+ if (likely(is_broadcast_ether_addr(&eth->d_addr))) {
+ /* allow all broadcast packets */
+ return 0;
+ }
+
+ if (likely(is_multicast_ether_addr(&eth->d_addr))) {
+ /* allow all multicast packets */
+ return 0;
+ }
+
+ if (avp->flags & AVP_F_PROMISC) {
+ /* allow all packets when in promiscuous mode */
+ return 0;
+ }
+
+ return -1;
+}
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
+static inline void
+__avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)
+{
+ struct rte_avp_desc *first_buf;
+ struct rte_avp_desc *pkt_buf;
+ unsigned int pkt_len;
+ unsigned int nb_segs;
+ void *pkt_data;
+ unsigned int i;
+
+ first_buf = avp_dev_translate_buffer(avp, buf);
+
+ i = 0;
+ pkt_len = 0;
+ nb_segs = first_buf->nb_segs;
+ do {
+ /* Adjust pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ if (pkt_buf == NULL)
+ rte_panic("bad buffer: segment %u has an invalid address %p\n",
+ i, buf);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ if (pkt_data == NULL)
+ rte_panic("bad buffer: segment %u has a NULL data pointer\n",
+ i);
+ if (pkt_buf->data_len == 0)
+ rte_panic("bad buffer: segment %u has 0 data length\n",
+ i);
+ pkt_len += pkt_buf->data_len;
+ nb_segs--;
+ i++;
+
+ } while (nb_segs && (buf = pkt_buf->next) != NULL);
+
+ if (nb_segs != 0)
+ rte_panic("bad buffer: expected %u segments found %u\n",
+ first_buf->nb_segs, (first_buf->nb_segs - nb_segs));
+ if (pkt_len != first_buf->pkt_len)
+ rte_panic("bad buffer: expected length %u found %u\n",
+ first_buf->pkt_len, pkt_len);
+}
+
+#define avp_dev_buffer_sanity_check(a, b) \
+ __avp_dev_buffer_sanity_check((a), (b))
+
+#else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */
+
+#define avp_dev_buffer_sanity_check(a, b) do {} while (0)
+
+#endif
+
+/*
+ * Copy a host buffer chain to a set of mbufs. This function assumes that
+ * there exactly the required number of mbufs to copy all source bytes.
+ */
+static inline struct rte_mbuf *
+avp_dev_copy_from_buffers(struct avp_dev *avp,
+ struct rte_avp_desc *buf,
+ struct rte_mbuf **mbufs,
+ unsigned int count)
+{
+ struct rte_mbuf *m_previous = NULL;
+ struct rte_avp_desc *pkt_buf;
+ unsigned int total_length = 0;
+ unsigned int copy_length;
+ unsigned int src_offset;
+ struct rte_mbuf *m;
+ uint16_t ol_flags;
+ uint16_t vlan_tci;
+ void *pkt_data;
+ unsigned int i;
+
+ avp_dev_buffer_sanity_check(avp, buf);
+
+ /* setup the first source buffer */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ total_length = pkt_buf->pkt_len;
+ src_offset = 0;
+
+ if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
+ ol_flags = PKT_RX_VLAN;
+ vlan_tci = pkt_buf->vlan_tci;
+ } else {
+ ol_flags = 0;
+ vlan_tci = 0;
+ }
+
+ for (i = 0; (i < count) && (buf != NULL); i++) {
+ /* fill each destination buffer */
+ m = mbufs[i];
+
+ if (m_previous != NULL)
+ m_previous->next = m;
+
+ m_previous = m;
+
+ do {
+ /*
+ * Copy as many source buffers as will fit in the
+ * destination buffer.
+ */
+ copy_length = RTE_MIN((avp->guest_mbuf_size -
+ rte_pktmbuf_data_len(m)),
+ (pkt_buf->data_len -
+ src_offset));
+ rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
+ rte_pktmbuf_data_len(m)),
+ RTE_PTR_ADD(pkt_data, src_offset),
+ copy_length);
+ rte_pktmbuf_data_len(m) += copy_length;
+ src_offset += copy_length;
+
+ if (likely(src_offset == pkt_buf->data_len)) {
+ /* need a new source buffer */
+ buf = pkt_buf->next;
+ if (buf != NULL) {
+ pkt_buf = avp_dev_translate_buffer(
+ avp, buf);
+ pkt_data = avp_dev_translate_buffer(
+ avp, pkt_buf->data);
+ src_offset = 0;
+ }
+ }
+
+ if (unlikely(rte_pktmbuf_data_len(m) ==
+ avp->guest_mbuf_size)) {
+ /* need a new destination mbuf */
+ break;
+ }
+
+ } while (buf != NULL);
+ }
+
+ m = mbufs[0];
+ m->ol_flags = ol_flags;
+ m->nb_segs = count;
+ rte_pktmbuf_pkt_len(m) = total_length;
+ m->vlan_tci = vlan_tci;
+
+ __rte_mbuf_sanity_check(m, 1);
+
+ return m;
+}
+
+static uint16_t
+avp_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avp_queue *rxq = (struct avp_queue *)rx_queue;
+ struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
+ struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];
+ struct avp_dev *avp = rxq->avp;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_fifo *free_q;
+ struct rte_avp_fifo *rx_q;
+ struct rte_avp_desc *buf;
+ unsigned int count, avail, n;
+ unsigned int guest_mbuf_size;
+ struct rte_mbuf *m;
+ unsigned int required;
+ unsigned int buf_len;
+ unsigned int port_id;
+ unsigned int i;
+
+ if (unlikely(avp->flags & AVP_F_DETACHED)) {
+ /* VM live migration in progress */
+ return 0;
+ }
+
+ guest_mbuf_size = avp->guest_mbuf_size;
+ port_id = avp->port_id;
+ rx_q = avp->rx_q[rxq->queue_id];
+ free_q = avp->free_q[rxq->queue_id];
+
+ /* setup next queue to service */
+ rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
+ (rxq->queue_id + 1) : rxq->queue_base;
+
+ /* determine how many slots are available in the free queue */
+ count = avp_fifo_free_count(free_q);
+
+ /* determine how many packets are available in the rx queue */
+ avail = avp_fifo_count(rx_q);
+
+ /* determine how many packets can be received */
+ count = RTE_MIN(count, avail);
+ count = RTE_MIN(count, nb_pkts);
+ count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
+
+ if (unlikely(count == 0)) {
+ /* no free buffers, or no buffers on the rx queue */
+ return 0;
+ }
+
+ /* retrieve pending packets */
+ n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
+ PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
+ count, rx_q);
+
+ count = 0;
+ for (i = 0; i < n; i++) {
+ /* prefetch next entry while processing current one */
+ if (i + 1 < n) {
+ pkt_buf = avp_dev_translate_buffer(avp,
+ avp_bufs[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+ buf = avp_bufs[i];
+
+ /* Peek into the first buffer to determine the total length */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ buf_len = pkt_buf->pkt_len;
+
+ /* Allocate enough mbufs to receive the entire packet */
+ required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;
+ if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {
+ rxq->dev_data->rx_mbuf_alloc_failed++;
+ continue;
+ }
+
+ /* Copy the data from the buffers to our mbufs */
+ m = avp_dev_copy_from_buffers(avp, buf, mbufs, required);
+
+ /* finalize mbuf */
+ m->port = port_id;
+
+ if (_avp_mac_filter(avp, m) != 0) {
+ /* silently discard packets not destined to our MAC */
+ rte_pktmbuf_free(m);
+ continue;
+ }
+
+ /* return new mbuf to caller */
+ rx_pkts[count++] = m;
+ rxq->bytes += buf_len;
+ }
+
+ rxq->packets += count;
+
+ /* return the buffers to the free queue */
+ avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
+
+ return count;
+}
+
+
+static uint16_t
+avp_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avp_queue *rxq = (struct avp_queue *)rx_queue;
+ struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
+ struct avp_dev *avp = rxq->avp;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_fifo *free_q;
+ struct rte_avp_fifo *rx_q;
+ unsigned int count, avail, n;
+ unsigned int pkt_len;
+ struct rte_mbuf *m;
+ char *pkt_data;
+ unsigned int i;
+
+ if (unlikely(avp->flags & AVP_F_DETACHED)) {
+ /* VM live migration in progress */
+ return 0;
+ }
+
+ rx_q = avp->rx_q[rxq->queue_id];
+ free_q = avp->free_q[rxq->queue_id];
+
+ /* setup next queue to service */
+ rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
+ (rxq->queue_id + 1) : rxq->queue_base;
+
+ /* determine how many slots are available in the free queue */
+ count = avp_fifo_free_count(free_q);
+
+ /* determine how many packets are available in the rx queue */
+ avail = avp_fifo_count(rx_q);
+
+ /* determine how many packets can be received */
+ count = RTE_MIN(count, avail);
+ count = RTE_MIN(count, nb_pkts);
+ count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
+
+ if (unlikely(count == 0)) {
+ /* no free buffers, or no buffers on the rx queue */
+ return 0;
+ }
+
+ /* retrieve pending packets */
+ n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
+ PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
+ count, rx_q);
+
+ count = 0;
+ for (i = 0; i < n; i++) {
+ /* prefetch next entry while processing current one */
+ if (i < n - 1) {
+ pkt_buf = avp_dev_translate_buffer(avp,
+ avp_bufs[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+
+ /* Adjust host pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ pkt_len = pkt_buf->pkt_len;
+
+ if (unlikely((pkt_len > avp->guest_mbuf_size) ||
+ (pkt_buf->nb_segs > 1))) {
+ /*
+ * application should be using the scattered receive
+ * function
+ */
+ rxq->errors++;
+ continue;
+ }
+
+ /* process each packet to be transmitted */
+ m = rte_pktmbuf_alloc(avp->pool);
+ if (unlikely(m == NULL)) {
+ rxq->dev_data->rx_mbuf_alloc_failed++;
+ continue;
+ }
+
+ /* copy data out of the host buffer to our buffer */
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);
+
+ /* initialize the local mbuf */
+ rte_pktmbuf_data_len(m) = pkt_len;
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+ m->port = avp->port_id;
+
+ if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
+ m->ol_flags = PKT_RX_VLAN;
+ m->vlan_tci = pkt_buf->vlan_tci;
+ }
+
+ if (_avp_mac_filter(avp, m) != 0) {
+ /* silently discard packets not destined to our MAC */
+ rte_pktmbuf_free(m);
+ continue;
+ }
+
+ /* return new mbuf to caller */
+ rx_pkts[count++] = m;
+ rxq->bytes += pkt_len;
+ }
+
+ rxq->packets += count;
+
+ /* return the buffers to the free queue */
+ avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
+
+ return count;
+}
+
+/*
+ * Copy a chained mbuf to a set of host buffers. This function assumes that
+ * there are sufficient destination buffers to contain the entire source
+ * packet.
+ */
+static inline uint16_t
+avp_dev_copy_to_buffers(struct avp_dev *avp,
+ struct rte_mbuf *mbuf,
+ struct rte_avp_desc **buffers,
+ unsigned int count)
+{
+ struct rte_avp_desc *previous_buf = NULL;
+ struct rte_avp_desc *first_buf = NULL;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_desc *buf;
+ size_t total_length;
+ struct rte_mbuf *m;
+ size_t copy_length;
+ size_t src_offset;
+ char *pkt_data;
+ unsigned int i;
+
+ __rte_mbuf_sanity_check(mbuf, 1);
+
+ m = mbuf;
+ src_offset = 0;
+ total_length = rte_pktmbuf_pkt_len(m);
+ for (i = 0; (i < count) && (m != NULL); i++) {
+ /* fill each destination buffer */
+ buf = buffers[i];
+
+ if (i < count - 1) {
+ /* prefetch next entry while processing this one */
+ pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+
+ /* Adjust pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+
+ /* setup the buffer chain */
+ if (previous_buf != NULL)
+ previous_buf->next = buf;
+ else
+ first_buf = pkt_buf;
+
+ previous_buf = pkt_buf;
+
+ do {
+ /*
+ * copy as many source mbuf segments as will fit in the
+ * destination buffer.
+ */
+ copy_length = RTE_MIN((avp->host_mbuf_size -
+ pkt_buf->data_len),
+ (rte_pktmbuf_data_len(m) -
+ src_offset));
+ rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len),
+ RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
+ src_offset),
+ copy_length);
+ pkt_buf->data_len += copy_length;
+ src_offset += copy_length;
+
+ if (likely(src_offset == rte_pktmbuf_data_len(m))) {
+ /* need a new source buffer */
+ m = m->next;
+ src_offset = 0;
+ }
+
+ if (unlikely(pkt_buf->data_len ==
+ avp->host_mbuf_size)) {
+ /* need a new destination buffer */
+ break;
+ }
+
+ } while (m != NULL);
+ }
+
+ first_buf->nb_segs = count;
+ first_buf->pkt_len = total_length;
+
+ if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
+ first_buf->vlan_tci = mbuf->vlan_tci;
+ }
+
+ avp_dev_buffer_sanity_check(avp, buffers[0]);
+
+ return total_length;
+}
+
+
+static uint16_t
+avp_xmit_scattered_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST *
+ RTE_AVP_MAX_MBUF_SEGMENTS)];
+ struct avp_queue *txq = (struct avp_queue *)tx_queue;
+ struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST];
+ struct avp_dev *avp = txq->avp;
+ struct rte_avp_fifo *alloc_q;
+ struct rte_avp_fifo *tx_q;
+ unsigned int count, avail, n;
+ unsigned int orig_nb_pkts;
+ struct rte_mbuf *m;
+ unsigned int required;
+ unsigned int segments;
+ unsigned int tx_bytes;
+ unsigned int i;
+
+ orig_nb_pkts = nb_pkts;
+ if (unlikely(avp->flags & AVP_F_DETACHED)) {
+ /* VM live migration in progress */
+ /* TODO ... buffer for X packets then drop? */
+ txq->errors += nb_pkts;
+ return 0;
+ }
+
+ tx_q = avp->tx_q[txq->queue_id];
+ alloc_q = avp->alloc_q[txq->queue_id];
+
+ /* limit the number of transmitted packets to the max burst size */
+ if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
+ nb_pkts = AVP_MAX_TX_BURST;
+
+ /* determine how many buffers are available to copy into */
+ avail = avp_fifo_count(alloc_q);
+ if (unlikely(avail > (AVP_MAX_TX_BURST *
+ RTE_AVP_MAX_MBUF_SEGMENTS)))
+ avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS;
+
+ /* determine how many slots are available in the transmit queue */
+ count = avp_fifo_free_count(tx_q);
+
+ /* determine how many packets can be sent */
+ nb_pkts = RTE_MIN(count, nb_pkts);
+
+ /* determine how many packets will fit in the available buffers */
+ count = 0;
+ segments = 0;
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ if (likely(i < (unsigned int)nb_pkts - 1)) {
+ /* prefetch next entry while processing this one */
+ rte_prefetch0(tx_pkts[i + 1]);
+ }
+ required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
+ avp->host_mbuf_size;
+
+ if (unlikely((required == 0) ||
+ (required > RTE_AVP_MAX_MBUF_SEGMENTS)))
+ break;
+ else if (unlikely(required + segments > avail))
+ break;
+ segments += required;
+ count++;
+ }
+ nb_pkts = count;
+
+ if (unlikely(nb_pkts == 0)) {
+ /* no available buffers, or no space on the tx queue */
+ txq->errors += orig_nb_pkts;
+ return 0;
+ }
+
+ PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
+ nb_pkts, tx_q);
+
+ /* retrieve sufficient send buffers */
+ n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments);
+ if (unlikely(n != segments)) {
+ PMD_TX_LOG(DEBUG, "Failed to allocate buffers "
+ "n=%u, segments=%u, orig=%u\n",
+ n, segments, orig_nb_pkts);
+ txq->errors += orig_nb_pkts;
+ return 0;
+ }
+
+ tx_bytes = 0;
+ count = 0;
+ for (i = 0; i < nb_pkts; i++) {
+ /* process each packet to be transmitted */
+ m = tx_pkts[i];
+
+ /* determine how many buffers are required for this packet */
+ required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
+ avp->host_mbuf_size;
+
+ tx_bytes += avp_dev_copy_to_buffers(avp, m,
+ &avp_bufs[count], required);
+ tx_bufs[i] = avp_bufs[count];
+ count += required;
+
+ /* free the original mbuf */
+ rte_pktmbuf_free(m);
+ }
+
+ txq->packets += nb_pkts;
+ txq->bytes += tx_bytes;
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
+ for (i = 0; i < nb_pkts; i++)
+ avp_dev_buffer_sanity_check(avp, tx_bufs[i]);
+#endif
+
+ /* send the packets */
+ n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts);
+ if (unlikely(n != orig_nb_pkts))
+ txq->errors += (orig_nb_pkts - n);
+
+ return n;
+}
+
+
+static uint16_t
+avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct avp_queue *txq = (struct avp_queue *)tx_queue;
+ struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST];
+ struct avp_dev *avp = txq->avp;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_fifo *alloc_q;
+ struct rte_avp_fifo *tx_q;
+ unsigned int count, avail, n;
+ struct rte_mbuf *m;
+ unsigned int pkt_len;
+ unsigned int tx_bytes;
+ char *pkt_data;
+ unsigned int i;
+
+ if (unlikely(avp->flags & AVP_F_DETACHED)) {
+ /* VM live migration in progress */
+ /* TODO ... buffer for X packets then drop?! */
+ txq->errors++;
+ return 0;
+ }
+
+ tx_q = avp->tx_q[txq->queue_id];
+ alloc_q = avp->alloc_q[txq->queue_id];
+
+ /* limit the number of transmitted packets to the max burst size */
+ if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
+ nb_pkts = AVP_MAX_TX_BURST;
+
+ /* determine how many buffers are available to copy into */
+ avail = avp_fifo_count(alloc_q);
+
+ /* determine how many slots are available in the transmit queue */
+ count = avp_fifo_free_count(tx_q);
+
+ /* determine how many packets can be sent */
+ count = RTE_MIN(count, avail);
+ count = RTE_MIN(count, nb_pkts);
+
+ if (unlikely(count == 0)) {
+ /* no available buffers, or no space on the tx queue */
+ txq->errors += nb_pkts;
+ return 0;
+ }
+
+ PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
+ count, tx_q);
+
+ /* retrieve sufficient send buffers */
+ n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count);
+ if (unlikely(n != count)) {
+ txq->errors++;
+ return 0;
+ }
+
+ tx_bytes = 0;
+ for (i = 0; i < count; i++) {
+ /* prefetch next entry while processing the current one */
+ if (i < count - 1) {
+ pkt_buf = avp_dev_translate_buffer(avp,
+ avp_bufs[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+
+ /* process each packet to be transmitted */
+ m = tx_pkts[i];
+
+ /* Adjust pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ pkt_len = rte_pktmbuf_pkt_len(m);
+
+ if (unlikely((pkt_len > avp->guest_mbuf_size) ||
+ (pkt_len > avp->host_mbuf_size))) {
+ /*
+ * application should be using the scattered transmit
+ * function; send it truncated to avoid the performance
+ * hit of having to manage returning the already
+ * allocated buffer to the free list. This should not
+ * happen since the application should have set the
+ * max_rx_pkt_len based on its MTU and it should be
+ * policing its own packet sizes.
+ */
+ txq->errors++;
+ pkt_len = RTE_MIN(avp->guest_mbuf_size,
+ avp->host_mbuf_size);
+ }
+
+ /* copy data out of our mbuf and into the AVP buffer */
+ rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len);
+ pkt_buf->pkt_len = pkt_len;
+ pkt_buf->data_len = pkt_len;
+ pkt_buf->nb_segs = 1;
+ pkt_buf->next = NULL;
+
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
+ pkt_buf->vlan_tci = m->vlan_tci;
+ }
+
+ tx_bytes += pkt_len;
+
+ /* free the original mbuf */
+ rte_pktmbuf_free(m);
+ }
+
+ txq->packets += count;
+ txq->bytes += tx_bytes;
+
+ /* send the packets */
+ n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count);
+
+ return n;
+}
+
+static void
+avp_dev_rx_queue_release(void *rx_queue)
+{
+ struct avp_queue *rxq = (struct avp_queue *)rx_queue;
+ struct avp_dev *avp = rxq->avp;
+ struct rte_eth_dev_data *data = avp->dev_data;
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ if (data->rx_queues[i] == rxq)
+ data->rx_queues[i] = NULL;
+ }
+}
+
+static void
+avp_dev_tx_queue_release(void *tx_queue)
+{
+ struct avp_queue *txq = (struct avp_queue *)tx_queue;
+ struct avp_dev *avp = txq->avp;
+ struct rte_eth_dev_data *data = avp->dev_data;
+ unsigned int i;
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ if (data->tx_queues[i] == txq)
+ data->tx_queues[i] = NULL;
+ }
+}
+
+static int
+avp_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_device_info *host_info;
+ struct rte_avp_device_config config;
+ int mask = 0;
+ void *addr;
+ int ret;
+
+ rte_spinlock_lock(&avp->lock);
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ ret = -ENOTSUP;
+ goto unlock;
+ }
+
+ addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
+ host_info = (struct rte_avp_device_info *)addr;
+
+ /* Setup required number of queues */
+ _avp_set_queue_counts(eth_dev);
+
+ mask = (ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK);
+ ret = avp_vlan_offload_set(eth_dev, mask);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+
+ /* update device config */
+ memset(&config, 0, sizeof(config));
+ config.device_id = host_info->device_id;
+ config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
+ config.driver_version = AVP_DPDK_DRIVER_VERSION;
+ config.features = avp->features;
+ config.num_tx_queues = avp->num_tx_queues;
+ config.num_rx_queues = avp->num_rx_queues;
+
+ ret = avp_dev_ctrl_set_config(eth_dev, &config);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+
+ avp->flags |= AVP_F_CONFIGURED;
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+ return ret;
+}
+
+static int
+avp_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&avp->lock);
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ ret = -ENOTSUP;
+ goto unlock;
+ }
+
+ /* update link state */
+ ret = avp_dev_ctrl_set_link_state(eth_dev, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+
+ /* remember current link state */
+ avp->flags |= AVP_F_LINKUP;
+
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+ return ret;
+}
+
+static void
+avp_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&avp->lock);
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ goto unlock;
+ }
+
+ /* remember current link state */
+ avp->flags &= ~AVP_F_LINKUP;
+
+ /* update link state */
+ ret = avp_dev_ctrl_set_link_state(eth_dev, 0);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
+ ret);
+ }
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+}
+
+static void
+avp_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&avp->lock);
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ goto unlock;
+ }
+
+ /* remember current link state */
+ avp->flags &= ~AVP_F_LINKUP;
+ avp->flags &= ~AVP_F_CONFIGURED;
+
+ ret = avp_dev_disable_interrupts(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to disable interrupts\n");
+ /* continue */
+ }
+
+ /* update device state */
+ ret = avp_dev_ctrl_shutdown(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Device shutdown failed by host, ret=%d\n",
+ ret);
+ /* continue */
+ }
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+}
+
+static int
+avp_dev_link_update(struct rte_eth_dev *eth_dev,
+ __rte_unused int wait_to_complete)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_eth_link *link = &eth_dev->data->dev_link;
+
+ link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_status = !!(avp->flags & AVP_F_LINKUP);
+
+ return -1;
+}
+
+static void
+avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ rte_spinlock_lock(&avp->lock);
+ if ((avp->flags & AVP_F_PROMISC) == 0) {
+ avp->flags |= AVP_F_PROMISC;
+ PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n",
+ eth_dev->data->port_id);
+ }
+ rte_spinlock_unlock(&avp->lock);
+}
+
+static void
+avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ rte_spinlock_lock(&avp->lock);
+ if ((avp->flags & AVP_F_PROMISC) != 0) {
+ avp->flags &= ~AVP_F_PROMISC;
+ PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n",
+ eth_dev->data->port_id);
+ }
+ rte_spinlock_unlock(&avp->lock);
+}
+
+static void
+avp_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ dev_info->max_rx_queues = avp->max_rx_queues;
+ dev_info->max_tx_queues = avp->max_tx_queues;
+ dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
+ dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
+ dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
+ if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+ }
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_CRC_STRIP;
+}
+
+static int
+avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
+ uint64_t offloads = dev_conf->rxmode.offloads;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
+ if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
+ else
+ avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
+ } else {
+ PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
+ }
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
+ }
+
+ return 0;
+}
+
+static int
+avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ struct avp_queue *rxq = avp->dev_data->rx_queues[i];
+
+ if (rxq) {
+ stats->ipackets += rxq->packets;
+ stats->ibytes += rxq->bytes;
+ stats->ierrors += rxq->errors;
+
+ stats->q_ipackets[i] += rxq->packets;
+ stats->q_ibytes[i] += rxq->bytes;
+ stats->q_errors[i] += rxq->errors;
+ }
+ }
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ struct avp_queue *txq = avp->dev_data->tx_queues[i];
+
+ if (txq) {
+ stats->opackets += txq->packets;
+ stats->obytes += txq->bytes;
+ stats->oerrors += txq->errors;
+
+ stats->q_opackets[i] += txq->packets;
+ stats->q_obytes[i] += txq->bytes;
+ stats->q_errors[i] += txq->errors;
+ }
+ }
+
+ return 0;
+}
+
+static void
+avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ struct avp_queue *rxq = avp->dev_data->rx_queues[i];
+
+ if (rxq) {
+ rxq->bytes = 0;
+ rxq->packets = 0;
+ rxq->errors = 0;
+ }
+ }
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ struct avp_queue *txq = avp->dev_data->tx_queues[i];
+
+ if (txq) {
+ txq->bytes = 0;
+ txq->packets = 0;
+ txq->errors = 0;
+ }
+ }
+}
+
+RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);
+
+RTE_INIT(avp_init_log)
+{
+ avp_logtype_driver = rte_log_register("pmd.net.avp.driver");
+ if (avp_logtype_driver >= 0)
+ rte_log_set_level(avp_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/avp/avp_logs.h b/src/spdk/dpdk/drivers/net/avp/avp_logs.h
new file mode 100644
index 00000000..6e297c7a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avp/avp_logs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2017 Wind River Systems, Inc.
+ */
+
+#ifndef _AVP_LOGS_H_
+#define _AVP_LOGS_H_
+
+#include <rte_log.h>
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() rx: " fmt, __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() tx: " fmt, __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+extern int avp_logtype_driver;
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, avp_logtype_driver, \
+ "%s(): " fmt, __func__, ## args)
+
+#endif /* _AVP_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avp/meson.build b/src/spdk/dpdk/drivers/net/avp/meson.build
new file mode 100644
index 00000000..6076c31b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avp/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('avp_ethdev.c')
+install_headers('rte_avp_common.h', 'rte_avp_fifo.h')
diff --git a/src/spdk/dpdk/drivers/net/avp/rte_avp_common.h b/src/spdk/dpdk/drivers/net/avp/rte_avp_common.h
new file mode 100644
index 00000000..aa95159c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avp/rte_avp_common.h
@@ -0,0 +1,382 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1)
+ * Copyright(c) 2010-2013 Intel Corporation.
+ * Copyright(c) 2014-2017 Wind River Systems, Inc.
+ */
+
+#ifndef _RTE_AVP_COMMON_H_
+#define _RTE_AVP_COMMON_H_
+
+#ifdef __KERNEL__
+#include <linux/if.h>
+#define RTE_STD_C11
+#else
+#include <stdint.h>
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_memory.h>
+#include <rte_ether.h>
+#include <rte_atomic.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * AVP name is part of network device name.
+ */
+#define RTE_AVP_NAMESIZE 32
+
+/**
+ * AVP alias is a user-defined value used for lookups from secondary
+ * processes. Typically, this is a UUID.
+ */
+#define RTE_AVP_ALIASSIZE 128
+
+/*
+ * Request id.
+ */
+enum rte_avp_req_id {
+ RTE_AVP_REQ_UNKNOWN = 0,
+ RTE_AVP_REQ_CHANGE_MTU,
+ RTE_AVP_REQ_CFG_NETWORK_IF,
+ RTE_AVP_REQ_CFG_DEVICE,
+ RTE_AVP_REQ_SHUTDOWN_DEVICE,
+ RTE_AVP_REQ_MAX,
+};
+
+/**@{ AVP device driver types */
+#define RTE_AVP_DRIVER_TYPE_UNKNOWN 0
+#define RTE_AVP_DRIVER_TYPE_DPDK 1
+#define RTE_AVP_DRIVER_TYPE_KERNEL 2
+#define RTE_AVP_DRIVER_TYPE_QEMU 3
+/**@} */
+
+/**@{ AVP device operational modes */
+#define RTE_AVP_MODE_HOST 0 /**< AVP interface created in host */
+#define RTE_AVP_MODE_GUEST 1 /**< AVP interface created for export to guest */
+#define RTE_AVP_MODE_TRACE 2 /**< AVP interface created for packet tracing */
+/**@} */
+
+/*
+ * Structure for AVP queue configuration query request/result
+ */
+struct rte_avp_device_config {
+ uint64_t device_id; /**< Unique system identifier */
+ uint32_t driver_type; /**< Device Driver type */
+ uint32_t driver_version; /**< Device Driver version */
+ uint32_t features; /**< Negotiated features */
+ uint16_t num_tx_queues; /**< Number of active transmit queues */
+ uint16_t num_rx_queues; /**< Number of active receive queues */
+ uint8_t if_up; /**< 1: interface up, 0: interface down */
+} __attribute__ ((__packed__));
+
+/*
+ * Structure for AVP request.
+ */
+struct rte_avp_request {
+ uint32_t req_id; /**< Request id */
+ RTE_STD_C11
+ union {
+ uint32_t new_mtu; /**< New MTU */
+ uint8_t if_up; /**< 1: interface up, 0: interface down */
+ struct rte_avp_device_config config; /**< Queue configuration */
+ };
+ int32_t result; /**< Result for processing request */
+} __attribute__ ((__packed__));
+
+/*
+ * FIFO struct mapped in a shared memory. It describes a circular buffer FIFO
+ * Write and read should wrap around. FIFO is empty when write == read
+ * Writing should never overwrite the read position
+ */
+struct rte_avp_fifo {
+ volatile unsigned int write; /**< Next position to be written*/
+ volatile unsigned int read; /**< Next position to be read */
+ unsigned int len; /**< Circular buffer length */
+ unsigned int elem_size; /**< Pointer size - for 32/64 bit OS */
+ void *volatile buffer[]; /**< The buffer contains mbuf pointers */
+};
+
+
+/*
+ * AVP packet buffer header used to define the exchange of packet data.
+ */
+struct rte_avp_desc {
+ uint64_t pad0;
+ void *pkt_mbuf; /**< Reference to packet mbuf */
+ uint8_t pad1[14];
+ uint16_t ol_flags; /**< Offload features. */
+ void *next; /**< Reference to next buffer in chain */
+ void *data; /**< Start address of data in segment buffer. */
+ uint16_t data_len; /**< Amount of data in segment buffer. */
+ uint8_t nb_segs; /**< Number of segments */
+ uint8_t pad2;
+ uint16_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
+ uint32_t pad3;
+ uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order). */
+ uint32_t pad4;
+} __attribute__ ((__aligned__(RTE_CACHE_LINE_SIZE), __packed__));
+
+
+/**{ AVP device features */
+#define RTE_AVP_FEATURE_VLAN_OFFLOAD (1 << 0) /**< Emulated HW VLAN offload */
+/**@} */
+
+
+/**@{ Offload feature flags */
+#define RTE_AVP_TX_VLAN_PKT 0x0001 /**< TX packet is a 802.1q VLAN packet. */
+#define RTE_AVP_RX_VLAN_PKT 0x0800 /**< RX packet is a 802.1q VLAN packet. */
+/**@} */
+
+
+/**@{ AVP PCI identifiers */
+#define RTE_AVP_PCI_VENDOR_ID 0x1af4
+#define RTE_AVP_PCI_DEVICE_ID 0x1110
+/**@} */
+
+/**@{ AVP PCI subsystem identifiers */
+#define RTE_AVP_PCI_SUB_VENDOR_ID RTE_AVP_PCI_VENDOR_ID
+#define RTE_AVP_PCI_SUB_DEVICE_ID 0x1104
+/**@} */
+
+/**@{ AVP PCI BAR definitions */
+#define RTE_AVP_PCI_MMIO_BAR 0
+#define RTE_AVP_PCI_MSIX_BAR 1
+#define RTE_AVP_PCI_MEMORY_BAR 2
+#define RTE_AVP_PCI_MEMMAP_BAR 4
+#define RTE_AVP_PCI_DEVICE_BAR 5
+#define RTE_AVP_PCI_MAX_BAR 6
+/**@} */
+
+/**@{ AVP PCI BAR name definitions */
+#define RTE_AVP_MMIO_BAR_NAME "avp-mmio"
+#define RTE_AVP_MSIX_BAR_NAME "avp-msix"
+#define RTE_AVP_MEMORY_BAR_NAME "avp-memory"
+#define RTE_AVP_MEMMAP_BAR_NAME "avp-memmap"
+#define RTE_AVP_DEVICE_BAR_NAME "avp-device"
+/**@} */
+
+/**@{ AVP PCI MSI-X vectors */
+#define RTE_AVP_MIGRATION_MSIX_VECTOR 0 /**< Migration interrupts */
+#define RTE_AVP_MAX_MSIX_VECTORS 1
+/**@} */
+
+/**@} AVP Migration status/ack register values */
+#define RTE_AVP_MIGRATION_NONE 0 /**< Migration never executed */
+#define RTE_AVP_MIGRATION_DETACHED 1 /**< Device attached during migration */
+#define RTE_AVP_MIGRATION_ATTACHED 2 /**< Device reattached during migration */
+#define RTE_AVP_MIGRATION_ERROR 3 /**< Device failed to attach/detach */
+/**@} */
+
+/**@} AVP MMIO Register Offsets */
+#define RTE_AVP_REGISTER_BASE 0
+#define RTE_AVP_INTERRUPT_MASK_OFFSET (RTE_AVP_REGISTER_BASE + 0)
+#define RTE_AVP_INTERRUPT_STATUS_OFFSET (RTE_AVP_REGISTER_BASE + 4)
+#define RTE_AVP_MIGRATION_STATUS_OFFSET (RTE_AVP_REGISTER_BASE + 8)
+#define RTE_AVP_MIGRATION_ACK_OFFSET (RTE_AVP_REGISTER_BASE + 12)
+/**@} */
+
+/**@} AVP Interrupt Status Mask */
+#define RTE_AVP_MIGRATION_INTERRUPT_MASK (1 << 1)
+#define RTE_AVP_APP_INTERRUPTS_MASK 0xFFFFFFFF
+#define RTE_AVP_NO_INTERRUPTS_MASK 0
+/**@} */
+
+/*
+ * Maximum number of memory regions to export
+ */
+#define RTE_AVP_MAX_MAPS 2048
+
+/*
+ * Description of a single memory region
+ */
+struct rte_avp_memmap {
+ void *addr;
+ rte_iova_t phys_addr;
+ uint64_t length;
+};
+
+/*
+ * AVP memory mapping validation marker
+ */
+#define RTE_AVP_MEMMAP_MAGIC 0x20131969
+
+/**@{ AVP memory map versions */
+#define RTE_AVP_MEMMAP_VERSION_1 1
+#define RTE_AVP_MEMMAP_VERSION RTE_AVP_MEMMAP_VERSION_1
+/**@} */
+
+/*
+ * Defines a list of memory regions exported from the host to the guest
+ */
+struct rte_avp_memmap_info {
+ uint32_t magic; /**< Memory validation marker */
+ uint32_t version; /**< Data format version */
+ uint32_t nb_maps;
+ struct rte_avp_memmap maps[RTE_AVP_MAX_MAPS];
+};
+
+/*
+ * AVP device memory validation marker
+ */
+#define RTE_AVP_DEVICE_MAGIC 0x20131975
+
+/**@{ AVP device map versions
+ * WARNING: do not change the format or names of these variables. They are
+ * automatically parsed from the build system to generate the SDK package
+ * name.
+ **/
+#define RTE_AVP_RELEASE_VERSION_1 1
+#define RTE_AVP_RELEASE_VERSION RTE_AVP_RELEASE_VERSION_1
+#define RTE_AVP_MAJOR_VERSION_0 0
+#define RTE_AVP_MAJOR_VERSION_1 1
+#define RTE_AVP_MAJOR_VERSION_2 2
+#define RTE_AVP_MAJOR_VERSION RTE_AVP_MAJOR_VERSION_2
+#define RTE_AVP_MINOR_VERSION_0 0
+#define RTE_AVP_MINOR_VERSION_1 1
+#define RTE_AVP_MINOR_VERSION_13 13
+#define RTE_AVP_MINOR_VERSION RTE_AVP_MINOR_VERSION_13
+/**@} */
+
+
+/**
+ * Generates a 32-bit version number from the specified version number
+ * components
+ */
+#define RTE_AVP_MAKE_VERSION(_release, _major, _minor) \
+((((_release) & 0xffff) << 16) | (((_major) & 0xff) << 8) | ((_minor) & 0xff))
+
+
+/**
+ * Represents the current version of the AVP host driver
+ * WARNING: in the current development branch the host and guest driver
+ * version should always be the same. When patching guest features back to
+ * GA releases the host version number should not be updated unless there was
+ * an actual change made to the host driver.
+ */
+#define RTE_AVP_CURRENT_HOST_VERSION \
+RTE_AVP_MAKE_VERSION(RTE_AVP_RELEASE_VERSION_1, \
+ RTE_AVP_MAJOR_VERSION_0, \
+ RTE_AVP_MINOR_VERSION_1)
+
+
+/**
+ * Represents the current version of the AVP guest drivers
+ */
+#define RTE_AVP_CURRENT_GUEST_VERSION \
+RTE_AVP_MAKE_VERSION(RTE_AVP_RELEASE_VERSION_1, \
+ RTE_AVP_MAJOR_VERSION_2, \
+ RTE_AVP_MINOR_VERSION_13)
+
+/**
+ * Access AVP device version values
+ */
+#define RTE_AVP_GET_RELEASE_VERSION(_version) (((_version) >> 16) & 0xffff)
+#define RTE_AVP_GET_MAJOR_VERSION(_version) (((_version) >> 8) & 0xff)
+#define RTE_AVP_GET_MINOR_VERSION(_version) ((_version) & 0xff)
+/**@}*/
+
+
+/**
+ * Remove the minor version number so that only the release and major versions
+ * are used for comparisons.
+ */
+#define RTE_AVP_STRIP_MINOR_VERSION(_version) ((_version) >> 8)
+
+
+/**
+ * Defines the number of mbuf pools supported per device (1 per socket)
+ */
+#define RTE_AVP_MAX_MEMPOOLS 8
+
+/*
+ * Defines address translation parameters for each support mbuf pool
+ */
+struct rte_avp_mempool_info {
+ void *addr;
+ rte_iova_t phys_addr;
+ uint64_t length;
+};
+
+/*
+ * Struct used to create a AVP device. Passed to the kernel in IOCTL call or
+ * via inter-VM shared memory when used in a guest.
+ */
+struct rte_avp_device_info {
+ uint32_t magic; /**< Memory validation marker */
+ uint32_t version; /**< Data format version */
+
+ char ifname[RTE_AVP_NAMESIZE]; /**< Network device name for AVP */
+
+ rte_iova_t tx_phys;
+ rte_iova_t rx_phys;
+ rte_iova_t alloc_phys;
+ rte_iova_t free_phys;
+
+ uint32_t features; /**< Supported feature bitmap */
+ uint8_t min_rx_queues; /**< Minimum supported receive/free queues */
+ uint8_t num_rx_queues; /**< Recommended number of receive/free queues */
+ uint8_t max_rx_queues; /**< Maximum supported receive/free queues */
+ uint8_t min_tx_queues; /**< Minimum supported transmit/alloc queues */
+ uint8_t num_tx_queues;
+ /**< Recommended number of transmit/alloc queues */
+ uint8_t max_tx_queues; /**< Maximum supported transmit/alloc queues */
+
+ uint32_t tx_size; /**< Size of each transmit queue */
+ uint32_t rx_size; /**< Size of each receive queue */
+ uint32_t alloc_size; /**< Size of each alloc queue */
+ uint32_t free_size; /**< Size of each free queue */
+
+ /* Used by Ethtool */
+ rte_iova_t req_phys;
+ rte_iova_t resp_phys;
+ rte_iova_t sync_phys;
+ void *sync_va;
+
+ /* mbuf mempool (used when a single memory area is supported) */
+ void *mbuf_va;
+ rte_iova_t mbuf_phys;
+
+ /* mbuf mempools */
+ struct rte_avp_mempool_info pool[RTE_AVP_MAX_MEMPOOLS];
+
+#ifdef __KERNEL__
+ /* Ethernet info */
+ char ethaddr[ETH_ALEN];
+#else
+ char ethaddr[ETHER_ADDR_LEN];
+#endif
+
+ uint8_t mode; /**< device mode, i.e guest, host, trace */
+
+ /* mbuf size */
+ unsigned int mbuf_size;
+
+ /*
+ * unique id to differentiate between two instantiations of the same
+ * AVP device (i.e., the guest needs to know if the device has been
+ * deleted and recreated).
+ */
+ uint64_t device_id;
+
+ uint32_t max_rx_pkt_len; /**< Maximum receive unit size */
+};
+
+#define RTE_AVP_MAX_QUEUES 8 /**< Maximum number of queues per device */
+
+/** Maximum number of chained mbufs in a packet */
+#define RTE_AVP_MAX_MBUF_SEGMENTS 5
+
+#define RTE_AVP_DEVICE "avp"
+
+#define RTE_AVP_IOCTL_TEST _IOWR(0, 1, int)
+#define RTE_AVP_IOCTL_CREATE _IOWR(0, 2, struct rte_avp_device_info)
+#define RTE_AVP_IOCTL_RELEASE _IOWR(0, 3, struct rte_avp_device_info)
+#define RTE_AVP_IOCTL_QUERY _IOWR(0, 4, struct rte_avp_device_config)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_AVP_COMMON_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avp/rte_avp_fifo.h b/src/spdk/dpdk/drivers/net/avp/rte_avp_fifo.h
new file mode 100644
index 00000000..c1658da6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avp/rte_avp_fifo.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1)
+ * Copyright(c) 2010-2013 Intel Corporation.
+ * Copyright(c) 2013-2017 Wind River Systems, Inc.
+ */
+
+#ifndef _RTE_AVP_FIFO_H_
+#define _RTE_AVP_FIFO_H_
+
+#include "rte_avp_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __KERNEL__
+/* Write memory barrier for kernel compiles */
+#define AVP_WMB() smp_wmb()
+/* Read memory barrier for kernel compiles */
+#define AVP_RMB() smp_rmb()
+#else
+/* Write memory barrier for userspace compiles */
+#define AVP_WMB() rte_wmb()
+/* Read memory barrier for userspace compiles */
+#define AVP_RMB() rte_rmb()
+#endif
+
+#ifndef __KERNEL__
+#include <rte_debug.h>
+
+/**
+ * Initializes the avp fifo structure
+ */
+static inline void
+avp_fifo_init(struct rte_avp_fifo *fifo, unsigned int size)
+{
+ /* Ensure size is power of 2 */
+ if (size & (size - 1))
+ rte_panic("AVP fifo size must be power of 2\n");
+
+ fifo->write = 0;
+ fifo->read = 0;
+ fifo->len = size;
+ fifo->elem_size = sizeof(void *);
+}
+#endif
+
+/**
+ * Adds num elements into the fifo. Return the number actually written
+ */
+static inline unsigned
+avp_fifo_put(struct rte_avp_fifo *fifo, void **data, unsigned int num)
+{
+ unsigned int i = 0;
+ unsigned int fifo_write = fifo->write;
+ unsigned int fifo_read = fifo->read;
+ unsigned int new_write = fifo_write;
+
+ for (i = 0; i < num; i++) {
+ new_write = (new_write + 1) & (fifo->len - 1);
+
+ if (new_write == fifo_read)
+ break;
+ fifo->buffer[fifo_write] = data[i];
+ fifo_write = new_write;
+ }
+ AVP_WMB();
+ fifo->write = fifo_write;
+ return i;
+}
+
+/**
+ * Get up to num elements from the fifo. Return the number actually read
+ */
+static inline unsigned int
+avp_fifo_get(struct rte_avp_fifo *fifo, void **data, unsigned int num)
+{
+ unsigned int i = 0;
+ unsigned int new_read = fifo->read;
+ unsigned int fifo_write = fifo->write;
+
+ if (new_read == fifo_write)
+ return 0; /* empty */
+
+ for (i = 0; i < num; i++) {
+ if (new_read == fifo_write)
+ break;
+
+ data[i] = fifo->buffer[new_read];
+ new_read = (new_read + 1) & (fifo->len - 1);
+ }
+ AVP_RMB();
+ fifo->read = new_read;
+ return i;
+}
+
+/**
+ * Get the num of elements in the fifo
+ */
+static inline unsigned int
+avp_fifo_count(struct rte_avp_fifo *fifo)
+{
+ return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1);
+}
+
+/**
+ * Get the num of available elements in the fifo
+ */
+static inline unsigned int
+avp_fifo_free_count(struct rte_avp_fifo *fifo)
+{
+ return (fifo->read - fifo->write - 1) & (fifo->len - 1);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_AVP_FIFO_H_ */
diff --git a/src/spdk/dpdk/drivers/net/avp/rte_pmd_avp_version.map b/src/spdk/dpdk/drivers/net/avp/rte_pmd_avp_version.map
new file mode 100644
index 00000000..af8f3f47
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/avp/rte_pmd_avp_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/axgbe/Makefile b/src/spdk/dpdk/drivers/net/axgbe/Makefile
new file mode 100644
index 00000000..72215aed
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_axgbe.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_axgbe_version.map
+
+LIBABIVER := 1
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_ethdev
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_mdio.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_phy_impl.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_i2c.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx.c
+ifeq ($(CONFIG_RTE_ARCH_X86),y)
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx_vec_sse.c
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_common.h b/src/spdk/dpdk/drivers/net/axgbe/axgbe_common.h
new file mode 100644
index 00000000..d25d54ca
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_common.h
@@ -0,0 +1,1710 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef __AXGBE_COMMON_H__
+#define __AXGBE_COMMON_H__
+
+#include "axgbe_logs.h"
+
+#include <stdbool.h>
+#include <limits.h>
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <pthread.h>
+
+#include <rte_byteorder.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_hexdump.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_branch_prediction.h>
+#include <rte_eal.h>
+#include <rte_memzone.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_ethdev_pci.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+
+#define BIT(nr) (1 << (nr))
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#define AXGBE_HZ 250
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_AXIARCR 0x3010
+#define DMA_AXIAWCR 0x3018
+#define DMA_AXIAWRCR 0x301c
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+#define EDMA_TX_CONTROL 0x3040
+#define EDMA_RX_CONTROL 0x3044
+
+/* DMA register entry bit positions and sizes */
+#define DMA_AXIARCR_DRC_INDEX 0
+#define DMA_AXIARCR_DRC_WIDTH 4
+#define DMA_AXIARCR_DRD_INDEX 4
+#define DMA_AXIARCR_DRD_WIDTH 2
+#define DMA_AXIARCR_TEC_INDEX 8
+#define DMA_AXIARCR_TEC_WIDTH 4
+#define DMA_AXIARCR_TED_INDEX 12
+#define DMA_AXIARCR_TED_WIDTH 2
+#define DMA_AXIARCR_THC_INDEX 16
+#define DMA_AXIARCR_THC_WIDTH 4
+#define DMA_AXIARCR_THD_INDEX 20
+#define DMA_AXIARCR_THD_WIDTH 2
+#define DMA_AXIAWCR_DWC_INDEX 0
+#define DMA_AXIAWCR_DWC_WIDTH 4
+#define DMA_AXIAWCR_DWD_INDEX 4
+#define DMA_AXIAWCR_DWD_WIDTH 2
+#define DMA_AXIAWCR_RPC_INDEX 8
+#define DMA_AXIAWCR_RPC_WIDTH 4
+#define DMA_AXIAWCR_RPD_INDEX 12
+#define DMA_AXIAWCR_RPD_WIDTH 2
+#define DMA_AXIAWCR_RHC_INDEX 16
+#define DMA_AXIAWCR_RHC_WIDTH 4
+#define DMA_AXIAWCR_RHD_INDEX 20
+#define DMA_AXIAWCR_RHD_WIDTH 2
+#define DMA_AXIAWCR_RDC_INDEX 24
+#define DMA_AXIAWCR_RDC_WIDTH 4
+#define DMA_AXIAWCR_RDD_INDEX 28
+#define DMA_AXIAWCR_RDD_WIDTH 2
+#define DMA_AXIAWRCR_TDWC_INDEX 0
+#define DMA_AXIAWRCR_TDWC_WIDTH 4
+#define DMA_AXIAWRCR_TDWD_INDEX 4
+#define DMA_AXIAWRCR_TDWD_WIDTH 4
+#define DMA_AXIAWRCR_RDRC_INDEX 8
+#define DMA_AXIAWRCR_RDRC_WIDTH 4
+#define DMA_ISR_MACIS_INDEX 17
+#define DMA_ISR_MACIS_WIDTH 1
+#define DMA_ISR_MTLIS_INDEX 16
+#define DMA_ISR_MTLIS_WIDTH 1
+#define DMA_MR_INTM_INDEX 12
+#define DMA_MR_INTM_WIDTH 2
+#define DMA_MR_SWR_INDEX 0
+#define DMA_MR_SWR_WIDTH 1
+#define DMA_SBMR_WR_OSR_INDEX 24
+#define DMA_SBMR_WR_OSR_WIDTH 6
+#define DMA_SBMR_RD_OSR_INDEX 16
+#define DMA_SBMR_RD_OSR_WIDTH 6
+#define DMA_SBMR_AAL_INDEX 12
+#define DMA_SBMR_AAL_WIDTH 1
+#define DMA_SBMR_EAME_INDEX 11
+#define DMA_SBMR_EAME_WIDTH 1
+#define DMA_SBMR_BLEN_256_INDEX 7
+#define DMA_SBMR_BLEN_256_WIDTH 1
+#define DMA_SBMR_BLEN_32_INDEX 4
+#define DMA_SBMR_BLEN_32_WIDTH 1
+#define DMA_SBMR_UNDEF_INDEX 0
+#define DMA_SBMR_UNDEF_WIDTH 1
+
+/* DMA register values */
+#define DMA_DSR_RPS_WIDTH 4
+#define DMA_DSR_TPS_WIDTH 4
+#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
+#define DMA_DSR0_RPS_START 8
+#define DMA_DSR0_TPS_START 12
+#define DMA_DSRX_FIRST_QUEUE 3
+#define DMA_DSRX_INC 4
+#define DMA_DSRX_QPR 4
+#define DMA_DSRX_RPS_START 0
+#define DMA_DSRX_TPS_START 4
+#define DMA_TPS_STOPPED 0x00
+#define DMA_TPS_SUSPENDED 0x06
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_CATDR_LO 0x44
+#define DMA_CH_CARDR_LO 0x4c
+#define DMA_CH_CATBR_HI 0x50
+#define DMA_CH_CATBR_LO 0x54
+#define DMA_CH_CARBR_HI 0x58
+#define DMA_CH_CARBR_LO 0x5c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_INDEX 16
+#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_CR_SPH_INDEX 24
+#define DMA_CH_CR_SPH_WIDTH 1
+#define DMA_CH_IER_AIE_INDEX 14
+#define DMA_CH_IER_AIE_WIDTH 1
+#define DMA_CH_IER_FBEE_INDEX 12
+#define DMA_CH_IER_FBEE_WIDTH 1
+#define DMA_CH_IER_NIE_INDEX 15
+#define DMA_CH_IER_NIE_WIDTH 1
+#define DMA_CH_IER_RBUE_INDEX 7
+#define DMA_CH_IER_RBUE_WIDTH 1
+#define DMA_CH_IER_RIE_INDEX 6
+#define DMA_CH_IER_RIE_WIDTH 1
+#define DMA_CH_IER_RSE_INDEX 8
+#define DMA_CH_IER_RSE_WIDTH 1
+#define DMA_CH_IER_TBUE_INDEX 2
+#define DMA_CH_IER_TBUE_WIDTH 1
+#define DMA_CH_IER_TIE_INDEX 0
+#define DMA_CH_IER_TIE_WIDTH 1
+#define DMA_CH_IER_TXSE_INDEX 1
+#define DMA_CH_IER_TXSE_WIDTH 1
+#define DMA_CH_RCR_PBL_INDEX 16
+#define DMA_CH_RCR_PBL_WIDTH 6
+#define DMA_CH_RCR_RBSZ_INDEX 1
+#define DMA_CH_RCR_RBSZ_WIDTH 14
+#define DMA_CH_RCR_SR_INDEX 0
+#define DMA_CH_RCR_SR_WIDTH 1
+#define DMA_CH_RIWT_RWT_INDEX 0
+#define DMA_CH_RIWT_RWT_WIDTH 8
+#define DMA_CH_SR_FBE_INDEX 12
+#define DMA_CH_SR_FBE_WIDTH 1
+#define DMA_CH_SR_RBU_INDEX 7
+#define DMA_CH_SR_RBU_WIDTH 1
+#define DMA_CH_SR_RI_INDEX 6
+#define DMA_CH_SR_RI_WIDTH 1
+#define DMA_CH_SR_RPS_INDEX 8
+#define DMA_CH_SR_RPS_WIDTH 1
+#define DMA_CH_SR_TBU_INDEX 2
+#define DMA_CH_SR_TBU_WIDTH 1
+#define DMA_CH_SR_TI_INDEX 0
+#define DMA_CH_SR_TI_WIDTH 1
+#define DMA_CH_SR_TPS_INDEX 1
+#define DMA_CH_SR_TPS_WIDTH 1
+#define DMA_CH_TCR_OSP_INDEX 4
+#define DMA_CH_TCR_OSP_WIDTH 1
+#define DMA_CH_TCR_PBL_INDEX 16
+#define DMA_CH_TCR_PBL_WIDTH 6
+#define DMA_CH_TCR_ST_INDEX 0
+#define DMA_CH_TCR_ST_WIDTH 1
+#define DMA_CH_TCR_TSE_INDEX 12
+#define DMA_CH_TCR_TSE_WIDTH 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64 /* 8 x 8 */
+#define DMA_PBL_128 128 /* 8 x 16 */
+#define DMA_PBL_256 256 /* 8 x 32 */
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_WTR 0x000c
+#define MAC_HTR0 0x0010
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_IVLANIR 0x0064
+#define MAC_RETMR 0x006c
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_RTSR 0x00b8
+#define MAC_PMTCSR 0x00c0
+#define MAC_RWKPFR 0x00c4
+#define MAC_LPICSR 0x00d0
+#define MAC_LPITCR 0x00d4
+#define MAC_VR 0x0110
+#define MAC_DR 0x0114
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_MDIOSCAR 0x0200
+#define MAC_MDIOSCCDR 0x0204
+#define MAC_MDIOISR 0x0214
+#define MAC_MDIOIER 0x0218
+#define MAC_MDIOCL22R 0x0220
+#define MAC_GPIOCR 0x0278
+#define MAC_GPIOSR 0x027c
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
+#define MAC_TSCR 0x0d00
+#define MAC_SSIR 0x0d04
+#define MAC_STSR 0x0d08
+#define MAC_STNR 0x0d0c
+#define MAC_STSUR 0x0d10
+#define MAC_STNUR 0x0d14
+#define MAC_TSAR 0x0d18
+#define MAC_TSSR 0x0d20
+#define MAC_TXSNR 0x0d30
+#define MAC_TXSSR 0x0d34
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+#define MAC_HTR_INC 4
+
+#define MAC_RQC2_INC 4
+#define MAC_RQC2_Q_PER_REG 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
+#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
+#define MAC_HWF0R_ARPOFFSEL_INDEX 9
+#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
+#define MAC_HWF0R_EEESEL_INDEX 13
+#define MAC_HWF0R_EEESEL_WIDTH 1
+#define MAC_HWF0R_GMIISEL_INDEX 1
+#define MAC_HWF0R_GMIISEL_WIDTH 1
+#define MAC_HWF0R_MGKSEL_INDEX 7
+#define MAC_HWF0R_MGKSEL_WIDTH 1
+#define MAC_HWF0R_MMCSEL_INDEX 8
+#define MAC_HWF0R_MMCSEL_WIDTH 1
+#define MAC_HWF0R_RWKSEL_INDEX 6
+#define MAC_HWF0R_RWKSEL_WIDTH 1
+#define MAC_HWF0R_RXCOESEL_INDEX 16
+#define MAC_HWF0R_RXCOESEL_WIDTH 1
+#define MAC_HWF0R_SAVLANINS_INDEX 27
+#define MAC_HWF0R_SAVLANINS_WIDTH 1
+#define MAC_HWF0R_SMASEL_INDEX 5
+#define MAC_HWF0R_SMASEL_WIDTH 1
+#define MAC_HWF0R_TSSEL_INDEX 12
+#define MAC_HWF0R_TSSEL_WIDTH 1
+#define MAC_HWF0R_TSSTSSEL_INDEX 25
+#define MAC_HWF0R_TSSTSSEL_WIDTH 2
+#define MAC_HWF0R_TXCOESEL_INDEX 14
+#define MAC_HWF0R_TXCOESEL_WIDTH 1
+#define MAC_HWF0R_VLHASH_INDEX 4
+#define MAC_HWF0R_VLHASH_WIDTH 1
+#define MAC_HWF1R_ADDR64_INDEX 14
+#define MAC_HWF1R_ADDR64_WIDTH 2
+#define MAC_HWF1R_ADVTHWORD_INDEX 13
+#define MAC_HWF1R_ADVTHWORD_WIDTH 1
+#define MAC_HWF1R_DBGMEMA_INDEX 19
+#define MAC_HWF1R_DBGMEMA_WIDTH 1
+#define MAC_HWF1R_DCBEN_INDEX 16
+#define MAC_HWF1R_DCBEN_WIDTH 1
+#define MAC_HWF1R_HASHTBLSZ_INDEX 24
+#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
+#define MAC_HWF1R_L3L4FNUM_INDEX 27
+#define MAC_HWF1R_L3L4FNUM_WIDTH 4
+#define MAC_HWF1R_NUMTC_INDEX 21
+#define MAC_HWF1R_NUMTC_WIDTH 3
+#define MAC_HWF1R_RSSEN_INDEX 20
+#define MAC_HWF1R_RSSEN_WIDTH 1
+#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
+#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
+#define MAC_HWF1R_SPHEN_INDEX 17
+#define MAC_HWF1R_SPHEN_WIDTH 1
+#define MAC_HWF1R_TSOEN_INDEX 18
+#define MAC_HWF1R_TSOEN_WIDTH 1
+#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
+#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
+#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
+#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
+#define MAC_HWF2R_PPSOUTNUM_INDEX 24
+#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
+#define MAC_HWF2R_RXCHCNT_INDEX 12
+#define MAC_HWF2R_RXCHCNT_WIDTH 4
+#define MAC_HWF2R_RXQCNT_INDEX 0
+#define MAC_HWF2R_RXQCNT_WIDTH 4
+#define MAC_HWF2R_TXCHCNT_INDEX 18
+#define MAC_HWF2R_TXCHCNT_WIDTH 4
+#define MAC_HWF2R_TXQCNT_INDEX 6
+#define MAC_HWF2R_TXQCNT_WIDTH 4
+#define MAC_IER_TSIE_INDEX 12
+#define MAC_IER_TSIE_WIDTH 1
+#define MAC_ISR_MMCRXIS_INDEX 9
+#define MAC_ISR_MMCRXIS_WIDTH 1
+#define MAC_ISR_MMCTXIS_INDEX 10
+#define MAC_ISR_MMCTXIS_WIDTH 1
+#define MAC_ISR_PMTIS_INDEX 4
+#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_ISR_SMI_INDEX 1
+#define MAC_ISR_SMI_WIDTH 1
+#define MAC_ISR_LSI_INDEX 0
+#define MAC_ISR_LSI_WIDTH 1
+#define MAC_ISR_LS_INDEX 24
+#define MAC_ISR_LS_WIDTH 2
+#define MAC_ISR_TSIS_INDEX 12
+#define MAC_ISR_TSIS_WIDTH 1
+#define MAC_MACA1HR_AE_INDEX 31
+#define MAC_MACA1HR_AE_WIDTH 1
+#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12
+#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1
+#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12
+#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1
+#define MAC_MDIOSCAR_DA_INDEX 21
+#define MAC_MDIOSCAR_DA_WIDTH 5
+#define MAC_MDIOSCAR_PA_INDEX 16
+#define MAC_MDIOSCAR_PA_WIDTH 5
+#define MAC_MDIOSCAR_RA_INDEX 0
+#define MAC_MDIOSCAR_RA_WIDTH 16
+#define MAC_MDIOSCAR_REG_INDEX 0
+#define MAC_MDIOSCAR_REG_WIDTH 21
+#define MAC_MDIOSCCDR_BUSY_INDEX 22
+#define MAC_MDIOSCCDR_BUSY_WIDTH 1
+#define MAC_MDIOSCCDR_CMD_INDEX 16
+#define MAC_MDIOSCCDR_CMD_WIDTH 2
+#define MAC_MDIOSCCDR_CR_INDEX 19
+#define MAC_MDIOSCCDR_CR_WIDTH 3
+#define MAC_MDIOSCCDR_DATA_INDEX 0
+#define MAC_MDIOSCCDR_DATA_WIDTH 16
+#define MAC_MDIOSCCDR_SADDR_INDEX 18
+#define MAC_MDIOSCCDR_SADDR_WIDTH 1
+#define MAC_PFR_HMC_INDEX 2
+#define MAC_PFR_HMC_WIDTH 1
+#define MAC_PFR_HPF_INDEX 10
+#define MAC_PFR_HPF_WIDTH 1
+#define MAC_PFR_HUC_INDEX 1
+#define MAC_PFR_HUC_WIDTH 1
+#define MAC_PFR_PM_INDEX 4
+#define MAC_PFR_PM_WIDTH 1
+#define MAC_PFR_PR_INDEX 0
+#define MAC_PFR_PR_WIDTH 1
+#define MAC_PFR_VTFE_INDEX 16
+#define MAC_PFR_VTFE_WIDTH 1
+#define MAC_PMTCSR_MGKPKTEN_INDEX 1
+#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
+#define MAC_PMTCSR_PWRDWN_INDEX 0
+#define MAC_PMTCSR_PWRDWN_WIDTH 1
+#define MAC_PMTCSR_RWKFILTRST_INDEX 31
+#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
+#define MAC_PMTCSR_RWKPKTEN_INDEX 2
+#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
+#define MAC_Q0TFCR_PT_INDEX 16
+#define MAC_Q0TFCR_PT_WIDTH 16
+#define MAC_Q0TFCR_TFE_INDEX 1
+#define MAC_Q0TFCR_TFE_WIDTH 1
+#define MAC_RCR_ACS_INDEX 1
+#define MAC_RCR_ACS_WIDTH 1
+#define MAC_RCR_CST_INDEX 2
+#define MAC_RCR_CST_WIDTH 1
+#define MAC_RCR_DCRCC_INDEX 3
+#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_HDSMS_INDEX 12
+#define MAC_RCR_HDSMS_WIDTH 3
+#define MAC_RCR_IPC_INDEX 9
+#define MAC_RCR_IPC_WIDTH 1
+#define MAC_RCR_JE_INDEX 8
+#define MAC_RCR_JE_WIDTH 1
+#define MAC_RCR_LM_INDEX 10
+#define MAC_RCR_LM_WIDTH 1
+#define MAC_RCR_RE_INDEX 0
+#define MAC_RCR_RE_WIDTH 1
+#define MAC_RFCR_PFCE_INDEX 8
+#define MAC_RFCR_PFCE_WIDTH 1
+#define MAC_RFCR_RFE_INDEX 0
+#define MAC_RFCR_RFE_WIDTH 1
+#define MAC_RFCR_UP_INDEX 1
+#define MAC_RFCR_UP_WIDTH 1
+#define MAC_RQC0R_RXQ0EN_INDEX 0
+#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_RSSAR_ADDRT_INDEX 2
+#define MAC_RSSAR_ADDRT_WIDTH 1
+#define MAC_RSSAR_CT_INDEX 1
+#define MAC_RSSAR_CT_WIDTH 1
+#define MAC_RSSAR_OB_INDEX 0
+#define MAC_RSSAR_OB_WIDTH 1
+#define MAC_RSSAR_RSSIA_INDEX 8
+#define MAC_RSSAR_RSSIA_WIDTH 8
+#define MAC_RSSCR_IP2TE_INDEX 1
+#define MAC_RSSCR_IP2TE_WIDTH 1
+#define MAC_RSSCR_RSSE_INDEX 0
+#define MAC_RSSCR_RSSE_WIDTH 1
+#define MAC_RSSCR_TCP4TE_INDEX 2
+#define MAC_RSSCR_TCP4TE_WIDTH 1
+#define MAC_RSSCR_UDP4TE_INDEX 3
+#define MAC_RSSCR_UDP4TE_WIDTH 1
+#define MAC_RSSDR_DMCH_INDEX 0
+#define MAC_RSSDR_DMCH_WIDTH 4
+#define MAC_SSIR_SNSINC_INDEX 8
+#define MAC_SSIR_SNSINC_WIDTH 8
+#define MAC_SSIR_SSINC_INDEX 16
+#define MAC_SSIR_SSINC_WIDTH 8
+#define MAC_TCR_SS_INDEX 29
+#define MAC_TCR_SS_WIDTH 2
+#define MAC_TCR_TE_INDEX 0
+#define MAC_TCR_TE_WIDTH 1
+#define MAC_TSCR_AV8021ASMEN_INDEX 28
+#define MAC_TSCR_AV8021ASMEN_WIDTH 1
+#define MAC_TSCR_SNAPTYPSEL_INDEX 16
+#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
+#define MAC_TSCR_TSADDREG_INDEX 5
+#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSCFUPDT_INDEX 1
+#define MAC_TSCR_TSCFUPDT_WIDTH 1
+#define MAC_TSCR_TSCTRLSSR_INDEX 9
+#define MAC_TSCR_TSCTRLSSR_WIDTH 1
+#define MAC_TSCR_TSENA_INDEX 0
+#define MAC_TSCR_TSENA_WIDTH 1
+#define MAC_TSCR_TSENALL_INDEX 8
+#define MAC_TSCR_TSENALL_WIDTH 1
+#define MAC_TSCR_TSEVNTENA_INDEX 14
+#define MAC_TSCR_TSEVNTENA_WIDTH 1
+#define MAC_TSCR_TSINIT_INDEX 2
+#define MAC_TSCR_TSINIT_WIDTH 1
+#define MAC_TSCR_TSIPENA_INDEX 11
+#define MAC_TSCR_TSIPENA_WIDTH 1
+#define MAC_TSCR_TSIPV4ENA_INDEX 13
+#define MAC_TSCR_TSIPV4ENA_WIDTH 1
+#define MAC_TSCR_TSIPV6ENA_INDEX 12
+#define MAC_TSCR_TSIPV6ENA_WIDTH 1
+#define MAC_TSCR_TSMSTRENA_INDEX 15
+#define MAC_TSCR_TSMSTRENA_WIDTH 1
+#define MAC_TSCR_TSVER2ENA_INDEX 10
+#define MAC_TSCR_TSVER2ENA_WIDTH 1
+#define MAC_TSCR_TXTSSTSM_INDEX 24
+#define MAC_TSCR_TXTSSTSM_WIDTH 1
+#define MAC_TSSR_TXTSC_INDEX 15
+#define MAC_TSSR_TXTSC_WIDTH 1
+#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
+#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_VLANHTR_VLHT_INDEX 0
+#define MAC_VLANHTR_VLHT_WIDTH 16
+#define MAC_VLANIR_VLTI_INDEX 20
+#define MAC_VLANIR_VLTI_WIDTH 1
+#define MAC_VLANIR_CSVL_INDEX 19
+#define MAC_VLANIR_CSVL_WIDTH 1
+#define MAC_VLANTR_DOVLTC_INDEX 20
+#define MAC_VLANTR_DOVLTC_WIDTH 1
+#define MAC_VLANTR_ERSVLM_INDEX 19
+#define MAC_VLANTR_ERSVLM_WIDTH 1
+#define MAC_VLANTR_ESVL_INDEX 18
+#define MAC_VLANTR_ESVL_WIDTH 1
+#define MAC_VLANTR_ETV_INDEX 16
+#define MAC_VLANTR_ETV_WIDTH 1
+#define MAC_VLANTR_EVLS_INDEX 21
+#define MAC_VLANTR_EVLS_WIDTH 2
+#define MAC_VLANTR_EVLRXS_INDEX 24
+#define MAC_VLANTR_EVLRXS_WIDTH 1
+#define MAC_VLANTR_VL_INDEX 0
+#define MAC_VLANTR_VL_WIDTH 16
+#define MAC_VLANTR_VTHM_INDEX 25
+#define MAC_VLANTR_VTHM_WIDTH 1
+#define MAC_VLANTR_VTIM_INDEX 17
+#define MAC_VLANTR_VTIM_WIDTH 1
+#define MAC_VR_DEVID_INDEX 8
+#define MAC_VR_DEVID_WIDTH 8
+#define MAC_VR_SNPSVER_INDEX 0
+#define MAC_VR_SNPSVER_WIDTH 8
+#define MAC_VR_USERVER_INDEX 16
+#define MAC_VR_USERVER_WIDTH 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXOCTETCOUNT_GB_HI 0x0818
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXFRAMECOUNT_GB_HI 0x0820
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX64OCTETS_GB_HI 0x0838
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX65TO127OCTETS_GB_HI 0x0840
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX128TO255OCTETS_GB_HI 0x0848
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX256TO511OCTETS_GB_HI 0x0850
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXUNDERFLOWERROR_HI 0x0880
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXOCTETCOUNT_G_HI 0x0888
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXFRAMECOUNT_G_HI 0x0890
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXPAUSEFRAMES_HI 0x0898
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_TXVLANFRAMES_G_HI 0x08a0
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXFRAMECOUNT_GB_HI 0x0904
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_GB_HI 0x090c
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXOCTETCOUNT_G_HI 0x0914
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXCRCERROR_HI 0x092c
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX64OCTETS_GB_HI 0x0944
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX65TO127OCTETS_GB_HI 0x094c
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX128TO255OCTETS_GB_HI 0x0954
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX256TO511OCTETS_GB_HI 0x095c
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXUNICASTFRAMES_G_HI 0x0974
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXLENGTHERROR_HI 0x097c
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXOUTOFRANGETYPE_HI 0x0984
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXPAUSEFRAMES_HI 0x098c
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXFIFOOVERFLOW_HI 0x0994
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXVLANFRAMES_GB_HI 0x099c
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_INDEX 0
+#define MMC_CR_CR_WIDTH 1
+#define MMC_CR_CSR_INDEX 1
+#define MMC_CR_CSR_WIDTH 1
+#define MMC_CR_ROR_INDEX 2
+#define MMC_CR_ROR_WIDTH 1
+#define MMC_CR_MCF_INDEX 3
+#define MMC_CR_MCF_WIDTH 1
+#define MMC_CR_MCT_INDEX 4
+#define MMC_CR_MCT_WIDTH 2
+#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
+#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
+#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
+#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
+#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXCRCERROR_INDEX 5
+#define MMC_RISR_RXCRCERROR_WIDTH 1
+#define MMC_RISR_RXRUNTERROR_INDEX 6
+#define MMC_RISR_RXRUNTERROR_WIDTH 1
+#define MMC_RISR_RXJABBERERROR_INDEX 7
+#define MMC_RISR_RXJABBERERROR_WIDTH 1
+#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
+#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
+#define MMC_RISR_RXOVERSIZE_G_INDEX 9
+#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
+#define MMC_RISR_RX64OCTETS_GB_INDEX 10
+#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
+#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
+#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
+#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
+#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXLENGTHERROR_INDEX 17
+#define MMC_RISR_RXLENGTHERROR_WIDTH 1
+#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
+#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
+#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
+#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
+#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
+#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
+#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
+#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
+#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
+#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
+#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
+#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
+#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
+#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TX64OCTETS_GB_INDEX 4
+#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
+#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
+#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
+#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
+#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
+#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
+#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
+#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
+#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
+#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
+#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
+#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDCR 0x1008
+#define MTL_FDSR 0x100c
+#define MTL_FDDR 0x1010
+#define MTL_ISR 0x1020
+#define MTL_RQDCM0R 0x1030
+#define MTL_TCPM0R 0x1040
+#define MTL_TCPM1R 0x1044
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+#define MTL_TCPM_INC 4
+#define MTL_TCPM_TC_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_INDEX 5
+#define MTL_OMR_ETSALG_WIDTH 2
+#define MTL_OMR_RAA_INDEX 2
+#define MTL_OMR_RAA_WIDTH 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_TQUR 0x04
+#define MTL_Q_TQDR 0x08
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQMPOCR 0x44
+#define MTL_Q_RQDR 0x48
+#define MTL_Q_RQFCR 0x50
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQDR_PRXQ_INDEX 16
+#define MTL_Q_RQDR_PRXQ_WIDTH 14
+#define MTL_Q_RQDR_RXQSTS_INDEX 4
+#define MTL_Q_RQDR_RXQSTS_WIDTH 2
+#define MTL_Q_RQFCR_RFA_INDEX 1
+#define MTL_Q_RQFCR_RFA_WIDTH 6
+#define MTL_Q_RQFCR_RFD_INDEX 17
+#define MTL_Q_RQFCR_RFD_WIDTH 6
+#define MTL_Q_RQOMR_EHFC_INDEX 7
+#define MTL_Q_RQOMR_EHFC_WIDTH 1
+#define MTL_Q_RQOMR_RQS_INDEX 16
+#define MTL_Q_RQOMR_RQS_WIDTH 9
+#define MTL_Q_RQOMR_RSF_INDEX 5
+#define MTL_Q_RQOMR_RSF_WIDTH 1
+#define MTL_Q_RQOMR_RTC_INDEX 0
+#define MTL_Q_RQOMR_RTC_WIDTH 2
+#define MTL_Q_TQDR_TRCSTS_INDEX 1
+#define MTL_Q_TQDR_TRCSTS_WIDTH 2
+#define MTL_Q_TQDR_TXQSTS_INDEX 4
+#define MTL_Q_TQDR_TXQSTS_WIDTH 1
+#define MTL_Q_TQOMR_FTQ_INDEX 0
+#define MTL_Q_TQOMR_FTQ_WIDTH 1
+#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
+#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3
+#define MTL_Q_TQOMR_TQS_INDEX 16
+#define MTL_Q_TQOMR_TQS_WIDTH 10
+#define MTL_Q_TQOMR_TSF_INDEX 1
+#define MTL_Q_TQOMR_TSF_WIDTH 1
+#define MTL_Q_TQOMR_TTC_INDEX 4
+#define MTL_Q_TQOMR_TTC_WIDTH 3
+#define MTL_Q_TQOMR_TXQEN_INDEX 2
+#define MTL_Q_TQOMR_TXQEN_WIDTH 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_32 0x01
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+#define MTL_TC_ETSSR 0x14
+#define MTL_TC_QWR 0x18
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_INDEX 0
+#define MTL_TC_ETSCR_TSA_WIDTH 2
+#define MTL_TC_QWR_QW_INDEX 0
+#define MTL_TC_QWR_QW_WIDTH 21
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+/* PCS register offsets */
+#define PCS_V1_WINDOW_SELECT 0x03fc
+#define PCS_V2_WINDOW_DEF 0x9060
+#define PCS_V2_WINDOW_SELECT 0x9064
+
+/* PCS register entry bit positions and sizes */
+#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
+#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14
+#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2
+#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4
+
+/* SerDes integration register offsets */
+#define SIR0_KR_RT_1 0x002c
+#define SIR0_STATUS 0x0040
+#define SIR1_SPEED 0x0000
+
+/* SerDes integration register entry bit positions and sizes */
+#define SIR0_KR_RT_1_RESET_INDEX 11
+#define SIR0_KR_RT_1_RESET_WIDTH 1
+#define SIR0_STATUS_RX_READY_INDEX 0
+#define SIR0_STATUS_RX_READY_WIDTH 1
+#define SIR0_STATUS_TX_READY_INDEX 8
+#define SIR0_STATUS_TX_READY_WIDTH 1
+#define SIR1_SPEED_CDR_RATE_INDEX 12
+#define SIR1_SPEED_CDR_RATE_WIDTH 4
+#define SIR1_SPEED_DATARATE_INDEX 4
+#define SIR1_SPEED_DATARATE_WIDTH 2
+#define SIR1_SPEED_PLLSEL_INDEX 3
+#define SIR1_SPEED_PLLSEL_WIDTH 1
+#define SIR1_SPEED_RATECHANGE_INDEX 6
+#define SIR1_SPEED_RATECHANGE_WIDTH 1
+#define SIR1_SPEED_TXAMP_INDEX 8
+#define SIR1_SPEED_TXAMP_WIDTH 4
+#define SIR1_SPEED_WORDMODE_INDEX 0
+#define SIR1_SPEED_WORDMODE_WIDTH 3
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG6 0x0018
+#define RXTX_REG20 0x0050
+#define RXTX_REG22 0x0058
+#define RXTX_REG114 0x01c8
+#define RXTX_REG129 0x0204
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX 8
+#define RXTX_REG6_RESETB_RXD_WIDTH 1
+#define RXTX_REG20_BLWC_ENA_INDEX 2
+#define RXTX_REG20_BLWC_ENA_WIDTH 1
+#define RXTX_REG114_PQ_REG_INDEX 9
+#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+
+/* MAC Control register offsets */
+#define XP_PROP_0 0x0000
+#define XP_PROP_1 0x0004
+#define XP_PROP_2 0x0008
+#define XP_PROP_3 0x000c
+#define XP_PROP_4 0x0010
+#define XP_PROP_5 0x0014
+#define XP_MAC_ADDR_LO 0x0020
+#define XP_MAC_ADDR_HI 0x0024
+#define XP_ECC_ISR 0x0030
+#define XP_ECC_IER 0x0034
+#define XP_ECC_CNT0 0x003c
+#define XP_ECC_CNT1 0x0040
+#define XP_DRIVER_INT_REQ 0x0060
+#define XP_DRIVER_INT_RO 0x0064
+#define XP_DRIVER_SCRATCH_0 0x0068
+#define XP_DRIVER_SCRATCH_1 0x006c
+#define XP_INT_EN 0x0078
+#define XP_I2C_MUTEX 0x0080
+#define XP_MDIO_MUTEX 0x0084
+
+/* MAC Control register entry bit positions and sizes */
+#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0
+#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1
+#define XP_DRIVER_INT_RO_STATUS_INDEX 0
+#define XP_DRIVER_INT_RO_STATUS_WIDTH 1
+#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0
+#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8
+#define XP_ECC_CNT0_RX_DED_INDEX 24
+#define XP_ECC_CNT0_RX_DED_WIDTH 8
+#define XP_ECC_CNT0_RX_SEC_INDEX 16
+#define XP_ECC_CNT0_RX_SEC_WIDTH 8
+#define XP_ECC_CNT0_TX_DED_INDEX 8
+#define XP_ECC_CNT0_TX_DED_WIDTH 8
+#define XP_ECC_CNT0_TX_SEC_INDEX 0
+#define XP_ECC_CNT0_TX_SEC_WIDTH 8
+#define XP_ECC_CNT1_DESC_DED_INDEX 8
+#define XP_ECC_CNT1_DESC_DED_WIDTH 8
+#define XP_ECC_CNT1_DESC_SEC_INDEX 0
+#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
+#define XP_ECC_IER_DESC_DED_INDEX 0
+#define XP_ECC_IER_DESC_DED_WIDTH 1
+#define XP_ECC_IER_DESC_SEC_INDEX 1
+#define XP_ECC_IER_DESC_SEC_WIDTH 1
+#define XP_ECC_IER_RX_DED_INDEX 2
+#define XP_ECC_IER_RX_DED_WIDTH 1
+#define XP_ECC_IER_RX_SEC_INDEX 3
+#define XP_ECC_IER_RX_SEC_WIDTH 1
+#define XP_ECC_IER_TX_DED_INDEX 4
+#define XP_ECC_IER_TX_DED_WIDTH 1
+#define XP_ECC_IER_TX_SEC_INDEX 5
+#define XP_ECC_IER_TX_SEC_WIDTH 1
+#define XP_ECC_ISR_DESC_DED_INDEX 0
+#define XP_ECC_ISR_DESC_DED_WIDTH 1
+#define XP_ECC_ISR_DESC_SEC_INDEX 1
+#define XP_ECC_ISR_DESC_SEC_WIDTH 1
+#define XP_ECC_ISR_RX_DED_INDEX 2
+#define XP_ECC_ISR_RX_DED_WIDTH 1
+#define XP_ECC_ISR_RX_SEC_INDEX 3
+#define XP_ECC_ISR_RX_SEC_WIDTH 1
+#define XP_ECC_ISR_TX_DED_INDEX 4
+#define XP_ECC_ISR_TX_DED_WIDTH 1
+#define XP_ECC_ISR_TX_SEC_INDEX 5
+#define XP_ECC_ISR_TX_SEC_WIDTH 1
+#define XP_I2C_MUTEX_BUSY_INDEX 31
+#define XP_I2C_MUTEX_BUSY_WIDTH 1
+#define XP_I2C_MUTEX_ID_INDEX 29
+#define XP_I2C_MUTEX_ID_WIDTH 2
+#define XP_I2C_MUTEX_ACTIVE_INDEX 0
+#define XP_I2C_MUTEX_ACTIVE_WIDTH 1
+#define XP_MAC_ADDR_HI_VALID_INDEX 31
+#define XP_MAC_ADDR_HI_VALID_WIDTH 1
+#define XP_PROP_0_CONN_TYPE_INDEX 28
+#define XP_PROP_0_CONN_TYPE_WIDTH 3
+#define XP_PROP_0_MDIO_ADDR_INDEX 16
+#define XP_PROP_0_MDIO_ADDR_WIDTH 5
+#define XP_PROP_0_PORT_ID_INDEX 0
+#define XP_PROP_0_PORT_ID_WIDTH 8
+#define XP_PROP_0_PORT_MODE_INDEX 8
+#define XP_PROP_0_PORT_MODE_WIDTH 4
+#define XP_PROP_0_PORT_SPEEDS_INDEX 23
+#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
+#define XP_PROP_1_MAX_RX_DMA_INDEX 24
+#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
+#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5
+#define XP_PROP_1_MAX_TX_DMA_INDEX 16
+#define XP_PROP_1_MAX_TX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0
+#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5
+#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16
+#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0
+#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_3_GPIO_MASK_INDEX 28
+#define XP_PROP_3_GPIO_MASK_WIDTH 4
+#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20
+#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4
+#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16
+#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4
+#define XP_PROP_3_GPIO_RX_LOS_INDEX 24
+#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4
+#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12
+#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4
+#define XP_PROP_3_GPIO_ADDR_INDEX 8
+#define XP_PROP_3_GPIO_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_INDEX 0
+#define XP_PROP_3_MDIO_RESET_WIDTH 2
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2
+#define XP_PROP_4_MUX_ADDR_HI_INDEX 8
+#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5
+#define XP_PROP_4_MUX_ADDR_LO_INDEX 0
+#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3
+#define XP_PROP_4_MUX_CHAN_INDEX 4
+#define XP_PROP_4_MUX_CHAN_WIDTH 3
+#define XP_PROP_4_REDRV_ADDR_INDEX 16
+#define XP_PROP_4_REDRV_ADDR_WIDTH 7
+#define XP_PROP_4_REDRV_IF_INDEX 23
+#define XP_PROP_4_REDRV_IF_WIDTH 1
+#define XP_PROP_4_REDRV_LANE_INDEX 24
+#define XP_PROP_4_REDRV_LANE_WIDTH 3
+#define XP_PROP_4_REDRV_MODEL_INDEX 28
+#define XP_PROP_4_REDRV_MODEL_WIDTH 3
+#define XP_PROP_4_REDRV_PRESENT_INDEX 31
+#define XP_PROP_4_REDRV_PRESENT_WIDTH 1
+
+/* I2C Control register offsets */
+#define IC_CON 0x0000
+#define IC_TAR 0x0004
+#define IC_DATA_CMD 0x0010
+#define IC_INTR_STAT 0x002c
+#define IC_INTR_MASK 0x0030
+#define IC_RAW_INTR_STAT 0x0034
+#define IC_CLR_INTR 0x0040
+#define IC_CLR_TX_ABRT 0x0054
+#define IC_CLR_STOP_DET 0x0060
+#define IC_ENABLE 0x006c
+#define IC_TXFLR 0x0074
+#define IC_RXFLR 0x0078
+#define IC_TX_ABRT_SOURCE 0x0080
+#define IC_ENABLE_STATUS 0x009c
+#define IC_COMP_PARAM_1 0x00f4
+
+/* I2C Control register entry bit positions and sizes */
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8
+#define IC_CON_MASTER_MODE_INDEX 0
+#define IC_CON_MASTER_MODE_WIDTH 1
+#define IC_CON_RESTART_EN_INDEX 5
+#define IC_CON_RESTART_EN_WIDTH 1
+#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9
+#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1
+#define IC_CON_SLAVE_DISABLE_INDEX 6
+#define IC_CON_SLAVE_DISABLE_WIDTH 1
+#define IC_CON_SPEED_INDEX 1
+#define IC_CON_SPEED_WIDTH 2
+#define IC_DATA_CMD_CMD_INDEX 8
+#define IC_DATA_CMD_CMD_WIDTH 1
+#define IC_DATA_CMD_STOP_INDEX 9
+#define IC_DATA_CMD_STOP_WIDTH 1
+#define IC_ENABLE_ABORT_INDEX 1
+#define IC_ENABLE_ABORT_WIDTH 1
+#define IC_ENABLE_EN_INDEX 0
+#define IC_ENABLE_EN_WIDTH 1
+#define IC_ENABLE_STATUS_EN_INDEX 0
+#define IC_ENABLE_STATUS_EN_WIDTH 1
+#define IC_INTR_MASK_TX_EMPTY_INDEX 4
+#define IC_INTR_MASK_TX_EMPTY_WIDTH 1
+#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2
+#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1
+#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9
+#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6
+#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4
+#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1
+
+/* I2C Control register value */
+#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001
+#define IC_TX_ABRT_ARB_LOST 0x1000
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_INDEX 2
+#define RX_PACKET_ERRORS_CRC_WIDTH 1
+#define RX_PACKET_ERRORS_FRAME_INDEX 3
+#define RX_PACKET_ERRORS_FRAME_WIDTH 1
+#define RX_PACKET_ERRORS_LENGTH_INDEX 0
+#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
+#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
+#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
+
+#define RX_NORMAL_DESC0_OVT_INDEX 0
+#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC2_HL_INDEX 0
+#define RX_NORMAL_DESC2_HL_WIDTH 10
+#define RX_NORMAL_DESC3_CDA_INDEX 27
+#define RX_NORMAL_DESC3_CDA_WIDTH 1
+#define RX_NORMAL_DESC3_CTXT_INDEX 30
+#define RX_NORMAL_DESC3_CTXT_WIDTH 1
+#define RX_NORMAL_DESC3_ES_INDEX 15
+#define RX_NORMAL_DESC3_ES_WIDTH 1
+#define RX_NORMAL_DESC3_ETLT_INDEX 16
+#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_FD_INDEX 29
+#define RX_NORMAL_DESC3_FD_WIDTH 1
+#define RX_NORMAL_DESC3_INTE_INDEX 30
+#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_L34T_INDEX 20
+#define RX_NORMAL_DESC3_L34T_WIDTH 4
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
+#define RX_NORMAL_DESC3_OWN_INDEX 31
+#define RX_NORMAL_DESC3_OWN_WIDTH 1
+#define RX_NORMAL_DESC3_PL_INDEX 0
+#define RX_NORMAL_DESC3_PL_WIDTH 14
+#define RX_NORMAL_DESC3_RSV_INDEX 26
+#define RX_NORMAL_DESC3_RSV_WIDTH 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
+
+#define RX_CONTEXT_DESC3_TSA_INDEX 4
+#define RX_CONTEXT_DESC3_TSA_WIDTH 1
+#define RX_CONTEXT_DESC3_TSD_INDEX 6
+#define RX_CONTEXT_DESC3_TSD_WIDTH 1
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3
+#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1
+
+#define TX_CONTEXT_DESC2_MSS_INDEX 0
+#define TX_CONTEXT_DESC2_MSS_WIDTH 15
+#define TX_CONTEXT_DESC3_CTXT_INDEX 30
+#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
+#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
+#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
+#define TX_CONTEXT_DESC3_VLTV_INDEX 16
+#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
+#define TX_CONTEXT_DESC3_VT_INDEX 0
+#define TX_CONTEXT_DESC3_VT_WIDTH 16
+
+#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
+#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
+#define TX_NORMAL_DESC2_IC_INDEX 31
+#define TX_NORMAL_DESC2_IC_WIDTH 1
+#define TX_NORMAL_DESC2_TTSE_INDEX 30
+#define TX_NORMAL_DESC2_TTSE_WIDTH 1
+#define TX_NORMAL_DESC2_VTIR_INDEX 14
+#define TX_NORMAL_DESC2_VTIR_WIDTH 2
+#define TX_NORMAL_DESC3_CIC_INDEX 16
+#define TX_NORMAL_DESC3_CIC_WIDTH 2
+#define TX_NORMAL_DESC3_CPC_INDEX 26
+#define TX_NORMAL_DESC3_CPC_WIDTH 2
+#define TX_NORMAL_DESC3_CTXT_INDEX 30
+#define TX_NORMAL_DESC3_CTXT_WIDTH 1
+#define TX_NORMAL_DESC3_FD_INDEX 29
+#define TX_NORMAL_DESC3_FD_WIDTH 1
+#define TX_NORMAL_DESC3_FL_INDEX 0
+#define TX_NORMAL_DESC3_FL_WIDTH 15
+#define TX_NORMAL_DESC3_LD_INDEX 28
+#define TX_NORMAL_DESC3_LD_WIDTH 1
+#define TX_NORMAL_DESC3_OWN_INDEX 31
+#define TX_NORMAL_DESC3_OWN_WIDTH 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
+#define TX_NORMAL_DESC3_TCPPL_INDEX 0
+#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
+#define TX_NORMAL_DESC3_TSE_INDEX 18
+#define TX_NORMAL_DESC3_TSE_WIDTH 1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+
+/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
+#endif
+
+#ifndef MDIO_PMA_10GBR_FECCTRL
+#define MDIO_PMA_10GBR_FECCTRL 0x00ab
+#endif
+
+#ifndef MDIO_PCS_DIG_CTRL
+#define MDIO_PCS_DIG_CTRL 0x8000
+#endif
+
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP 0x0016
+#endif
+
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX 0x0019
+#endif
+
+#ifndef MDIO_AN_COMP_STAT
+#define MDIO_AN_COMP_STAT 0x0030
+#endif
+
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK 0x8001
+#endif
+
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT 0x8002
+#endif
+
+#ifndef MDIO_VEND2_AN_ADVERTISE
+#define MDIO_VEND2_AN_ADVERTISE 0x0004
+#endif
+
+#ifndef MDIO_VEND2_AN_LP_ABILITY
+#define MDIO_VEND2_AN_LP_ABILITY 0x0005
+#endif
+
+#ifndef MDIO_VEND2_AN_CTRL
+#define MDIO_VEND2_AN_CTRL 0x8001
+#endif
+
+#ifndef MDIO_VEND2_AN_STAT
+#define MDIO_VEND2_AN_STAT 0x8002
+#endif
+
+#ifndef MDIO_VEND2_PMA_CDR_CONTROL
+#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_AN_ENABLE
+#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_AN_RESTART
+#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS6
+#define MDIO_VEND2_CTRL1_SS6 BIT(6)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS13
+#define MDIO_VEND2_CTRL1_SS13 BIT(13)
+#endif
+
+/* MDIO mask values */
+#define AXGBE_AN_CL73_INT_CMPLT BIT(0)
+#define AXGBE_AN_CL73_INC_LINK BIT(1)
+#define AXGBE_AN_CL73_PG_RCV BIT(2)
+#define AXGBE_AN_CL73_INT_MASK 0x07
+
+#define AXGBE_XNP_MCF_NULL_MESSAGE 0x001
+#define AXGBE_XNP_ACK_PROCESSED BIT(12)
+#define AXGBE_XNP_MP_FORMATTED BIT(13)
+#define AXGBE_XNP_NP_EXCHANGE BIT(15)
+
+#define AXGBE_KR_TRAINING_START BIT(0)
+#define AXGBE_KR_TRAINING_ENABLE BIT(1)
+
+#define AXGBE_PCS_CL37_BP BIT(12)
+
+#define AXGBE_AN_CL37_INT_CMPLT BIT(0)
+#define AXGBE_AN_CL37_INT_MASK 0x01
+
+#define AXGBE_AN_CL37_HD_MASK 0x40
+#define AXGBE_AN_CL37_FD_MASK 0x20
+
+#define AXGBE_AN_CL37_PCS_MODE_MASK 0x06
+#define AXGBE_AN_CL37_PCS_MODE_BASEX 0x00
+#define AXGBE_AN_CL37_PCS_MODE_SGMII 0x04
+#define AXGBE_AN_CL37_TX_CONFIG_MASK 0x08
+
+#define AXGBE_PMA_CDR_TRACK_EN_MASK 0x01
+#define AXGBE_PMA_CDR_TRACK_EN_OFF 0x00
+#define AXGBE_PMA_CDR_TRACK_EN_ON 0x01
+
+/*generic*/
+#define __iomem
+
+#define rmb() rte_rmb() /* dpdk rte provided rmb */
+#define wmb() rte_wmb() /* dpdk rte provided wmb */
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+typedef unsigned long long dma_addr_t;
+
+static inline uint32_t low32_value(uint64_t addr)
+{
+ return (addr) & 0x0ffffffff;
+}
+
+static inline uint32_t high32_value(uint64_t addr)
+{
+ return (addr >> 32) & 0x0ffffffff;
+}
+
+/*END*/
+
+/* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+ *
+ * The set macro will clear the current bit field value within the
+ * variable and then set the bit field of the variable to the
+ * specified value
+ */
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+#define GET_BITS_LE(_var, _index, _width) \
+ ((rte_le_to_cpu_32((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS_LE(_var, _index, _width, _val) \
+do { \
+ (_var) &= rte_cpu_to_le_32(~(((0x1 << (_width)) - 1) << (_index)));\
+ (_var) |= rte_cpu_to_le_32((((_val) & \
+ ((0x1 << (_width)) - 1)) << (_index))); \
+} while (0)
+
+/* Bit setting and getting macros based on register fields
+ * The get macro uses the bit field definitions formed using the input
+ * names to extract the current bit field value from within the
+ * variable
+ *
+ * The set macro uses the bit field definitions formed using the input
+ * names to set the bit field of the variable to the specified value
+ */
+#define AXGMAC_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define AXGMAC_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define AXGMAC_GET_BITS_LE(_var, _prefix, _field) \
+ GET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define AXGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
+ SET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+/* Macros for reading or writing registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define AXGMAC_IOREAD(_pdata, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xgmac_regs) + (_reg))
+
+#define AXGMAC_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(AXGMAC_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define AXGMAC_IOWRITE(_pdata, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_pdata)->xgmac_regs) + (_reg))
+
+#define AXGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = AXGMAC_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ AXGMAC_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing MTL queue or traffic class registers
+ * Similar to the standard read and write macros except that the
+ * base register value is calculated by the queue or traffic class number
+ */
+#define AXGMAC_MTL_IOREAD(_pdata, _n, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xgmac_regs) + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg))
+
+#define AXGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
+ GET_BITS(AXGMAC_MTL_IOREAD((_pdata), (_n), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define AXGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
+ rte_write32((_val), (uint8_t *)((_pdata)->xgmac_regs) +\
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg))
+
+#define AXGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
+do { \
+ u32 reg_val = AXGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ AXGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing DMA channel registers
+ * Similar to the standard read and write macros except that the
+ * base register value is obtained from the ring
+ */
+#define AXGMAC_DMA_IOREAD(_channel, _reg) \
+ rte_read32((uint8_t *)((_channel)->dma_regs) + (_reg))
+
+#define AXGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
+ GET_BITS(AXGMAC_DMA_IOREAD((_channel), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define AXGMAC_DMA_IOWRITE(_channel, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_channel)->dma_regs) + (_reg))
+
+#define AXGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
+do { \
+ u32 reg_val = AXGMAC_DMA_IOREAD((_channel), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ AXGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of XPCS registers.
+ */
+#define XPCS_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XPCS_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XPCS32_IOWRITE(_pdata, _off, _val) \
+ rte_write32(_val, \
+ (uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+#define XPCS32_IOREAD(_pdata, _off) \
+ rte_read32((uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+#define XPCS16_IOWRITE(_pdata, _off, _val) \
+ rte_write16(_val, \
+ (uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+#define XPCS16_IOREAD(_pdata, _off) \
+ rte_read16((uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes integration registers.
+ */
+#define XSIR_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XSIR0_IOREAD(_pdata, _reg) \
+ rte_read16((uint8_t *)((_pdata)->sir0_regs) + (_reg))
+
+#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR0_IOWRITE(_pdata, _reg, _val) \
+ rte_write16((_val), \
+ (uint8_t *)((_pdata)->sir0_regs) + (_reg))
+
+#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR0_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+#define XSIR1_IOREAD(_pdata, _reg) \
+ rte_read16((uint8_t *)((_pdata)->sir1_regs) + _reg)
+
+#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR1_IOWRITE(_pdata, _reg, _val) \
+ rte_write16((_val), \
+ (uint8_t *)((_pdata)->sir1_regs) + (_reg))
+
+#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR1_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes RxTx registers.
+ */
+#define XRXTX_IOREAD(_pdata, _reg) \
+ rte_read16((uint8_t *)((_pdata)->rxtx_regs) + (_reg))
+
+#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_pdata, _reg, _val) \
+ rte_write16((_val), \
+ (uint8_t *)((_pdata)->rxtx_regs) + (_reg))
+
+#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XRXTX_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of MAC Control registers.
+ */
+#define XP_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XP_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XP_IOREAD(_pdata, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xprop_regs) + (_reg))
+
+#define XP_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XP_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XP_IOWRITE(_pdata, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_pdata)->xprop_regs) + (_reg))
+
+#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XP_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XP_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of I2C Control registers.
+ */
+#define XI2C_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XI2C_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XI2C_IOREAD(_pdata, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xi2c_regs) + (_reg))
+
+#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XI2C_IOWRITE(_pdata, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_pdata)->xi2c_regs) + (_reg))
+
+#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XI2C_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XI2C_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * using MDIO. Different from above because of the use of standardized
+ * Linux include values. No shifting is performed with the bit
+ * operations, everything works on mask values.
+ */
+#define XMDIO_READ(_pdata, _mmd, _reg) \
+ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff)))
+
+#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
+ (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
+
+#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
+ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff), (_val)))
+
+#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
+do { \
+ u32 mmd_val = XMDIO_READ((_pdata), (_mmd), (_reg)); \
+ mmd_val &= ~(_mask); \
+ mmd_val |= (_val); \
+ XMDIO_WRITE((_pdata), (_mmd), (_reg), (mmd_val)); \
+} while (0)
+
+/*
+ * time_after(a,b) returns true if the time a is after time b.
+ *
+ * Do this with "<0" and ">=0" to only test the sign of the result. A
+ * good compiler would generate better code (and a really good compiler
+ * wouldn't care). Gcc is currently neither.
+ */
+#define time_after(a, b) ((long)((b) - (a)) < 0)
+#define time_before(a, b) time_after(b, a)
+
+#define time_after_eq(a, b) ((long)((a) - (b)) >= 0)
+#define time_before_eq(a, b) time_after_eq(b, a)
+
+/*---bitmap support apis---*/
+static inline int axgbe_test_bit(int nr, volatile unsigned long *addr)
+{
+ int res;
+
+ rte_mb();
+ res = ((*addr) & (1UL << nr)) != 0;
+ rte_mb();
+ return res;
+}
+
+static inline void axgbe_set_bit(unsigned int nr, volatile unsigned long *addr)
+{
+ __sync_fetch_and_or(addr, (1UL << nr));
+}
+
+static inline void axgbe_clear_bit(int nr, volatile unsigned long *addr)
+{
+ __sync_fetch_and_and(addr, ~(1UL << nr));
+}
+
+static inline int axgbe_test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << nr);
+
+ return __sync_fetch_and_and(addr, ~mask) & mask;
+}
+
+static inline unsigned long msecs_to_timer_cycles(unsigned int m)
+{
+ return rte_get_timer_hz() * (m / 1000);
+}
+
+#endif /* __AXGBE_COMMON_H__ */
diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_dev.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_dev.c
new file mode 100644
index 00000000..707f1ee9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_dev.c
@@ -0,0 +1,1103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+#include "axgbe_rxtx.h"
+
+static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
+{
+ return pdata->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_HLEN;
+}
+
+/* query busy bit */
+static int mdio_complete(struct axgbe_port *pdata)
+{
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
+ return 1;
+
+ return 0;
+}
+
+static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr,
+ int reg, u16 val)
+{
+ unsigned int mdio_sca, mdio_sccd;
+ uint64_t timeout;
+
+ mdio_sca = 0;
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ rte_delay_us(100);
+ if (mdio_complete(pdata))
+ return 0;
+ }
+
+ PMD_DRV_LOG(ERR, "Mdio write operation timed out\n");
+ return -ETIMEDOUT;
+}
+
+static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr,
+ int reg)
+{
+ unsigned int mdio_sca, mdio_sccd;
+ uint64_t timeout;
+
+ mdio_sca = 0;
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
+
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ rte_delay_us(100);
+ if (mdio_complete(pdata))
+ goto success;
+ }
+
+ PMD_DRV_LOG(ERR, "Mdio read operation timed out\n");
+ return -ETIMEDOUT;
+
+success:
+ return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
+}
+
+static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
+ enum axgbe_mdio_mode mode)
+{
+ unsigned int reg_val = 0;
+
+ switch (mode) {
+ case AXGBE_MDIO_MODE_CL22:
+ if (port > AXGMAC_MAX_C22_PORT)
+ return -EINVAL;
+ reg_val |= (1 << port);
+ break;
+ case AXGBE_MDIO_MODE_CL45:
+ break;
+ default:
+ return -EINVAL;
+ }
+ AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
+
+ return 0;
+}
+
+static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
+ int prtad __rte_unused, int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and reading 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ pthread_mutex_lock(&pdata->xpcs_mutex);
+
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ mmd_data = XPCS16_IOREAD(pdata, offset);
+
+ pthread_mutex_unlock(&pdata->xpcs_mutex);
+
+ return mmd_data;
+}
+
+static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
+ int prtad __rte_unused,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int mmd_address, index, offset;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and writing 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ pthread_mutex_lock(&pdata->xpcs_mutex);
+
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ XPCS16_IOWRITE(pdata, offset, mmd_data);
+
+ pthread_mutex_unlock(&pdata->xpcs_mutex);
+}
+
+static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
+ int mmd_reg)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case AXGBE_XPCS_ACCESS_V1:
+ PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
+ return -1;
+ case AXGBE_XPCS_ACCESS_V2:
+ default:
+ return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
+ }
+}
+
+static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case AXGBE_XPCS_ACCESS_V1:
+ PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
+ return;
+ case AXGBE_XPCS_ACCESS_V2:
+ default:
+ return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
+ }
+}
+
+static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
+{
+ unsigned int ss;
+
+ switch (speed) {
+ case SPEED_1000:
+ ss = 0x03;
+ break;
+ case SPEED_2500:
+ ss = 0x02;
+ break;
+ case SPEED_10000:
+ ss = 0x00;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
+ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
+
+ return 0;
+}
+
+static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+ /* Clear MAC flow control */
+ max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = RTE_MIN(pdata->tx_q_count,
+ max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = AXGMAC_IOREAD(pdata, reg);
+ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
+ AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ unsigned int ehfc = 0;
+
+ /* Flow control thresholds are established */
+ if (pdata->rx_rfd[i])
+ ehfc = 1;
+
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
+ }
+
+ /* Set MAC flow control */
+ max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = RTE_MIN(pdata->tx_q_count,
+ max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = AXGMAC_IOREAD(pdata, reg);
+
+ /* Enable transmit flow control */
+ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
+ /* Set pause time */
+ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
+
+ AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
+
+ return 0;
+}
+
+static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
+
+ return 0;
+}
+
+static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
+{
+ if (pdata->tx_pause)
+ axgbe_enable_tx_flow_control(pdata);
+ else
+ axgbe_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
+{
+ if (pdata->rx_pause)
+ axgbe_enable_rx_flow_control(pdata);
+ else
+ axgbe_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static void axgbe_config_flow_control(struct axgbe_port *pdata)
+{
+ axgbe_config_tx_flow_control(pdata);
+ axgbe_config_rx_flow_control(pdata);
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
+}
+
+static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
+ unsigned int queue,
+ unsigned int q_fifo_size)
+{
+ unsigned int frame_fifo_size;
+ unsigned int rfa, rfd;
+
+ frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
+
+ /* This path deals with just maximum frame sizes which are
+ * limited to a jumbo frame of 9,000 (plus headers, etc.)
+ * so we can never exceed the maximum allowable RFA/RFD
+ * values.
+ */
+ if (q_fifo_size <= 2048) {
+ /* rx_rfd to zero to signal no flow control */
+ pdata->rx_rfa[queue] = 0;
+ pdata->rx_rfd[queue] = 0;
+ return;
+ }
+
+ if (q_fifo_size <= 4096) {
+ /* Between 2048 and 4096 */
+ pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
+ pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= frame_fifo_size) {
+ /* Between 4096 and max-frame */
+ pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
+ pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= (frame_fifo_size * 3)) {
+ /* Between max-frame and 3 max-frames,
+ * trigger if we get just over a frame of data and
+ * resume when we have just under half a frame left.
+ */
+ rfa = q_fifo_size - frame_fifo_size;
+ rfd = rfa + (frame_fifo_size / 2);
+ } else {
+ /* Above 3 max-frames - trigger when just over
+ * 2 frames of space available
+ */
+ rfa = frame_fifo_size * 2;
+ rfa += AXGMAC_FLOW_CONTROL_UNIT;
+ rfd = rfa + frame_fifo_size;
+ }
+
+ pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
+ pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
+}
+
+static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
+{
+ unsigned int q_fifo_size;
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
+
+ axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
+ }
+}
+
+static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
+ pdata->rx_rfa[i]);
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
+ pdata->rx_rfd[i]);
+ }
+}
+
+static int __axgbe_exit(struct axgbe_port *pdata)
+{
+ unsigned int count = 2000;
+
+ /* Issue a software reset */
+ AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
+ rte_delay_us(10);
+
+ /* Poll Until Poll Condition */
+ while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+ rte_delay_us(500);
+
+ if (!count)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int axgbe_exit(struct axgbe_port *pdata)
+{
+ int ret;
+
+ /* To guard against possible incorrectly generated interrupts,
+ * issue the software reset twice.
+ */
+ ret = __axgbe_exit(pdata);
+ if (ret)
+ return ret;
+
+ return __axgbe_exit(pdata);
+}
+
+static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
+{
+ unsigned int i, count;
+
+ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+ return 0;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ count = 2000;
+ while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
+ MTL_Q_TQOMR, FTQ))
+ rte_delay_us(500);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void axgbe_config_dma_bus(struct axgbe_port *pdata)
+{
+ /* Set enhanced addressing mode */
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+
+ /* Out standing read/write requests*/
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
+
+ /* Set the System Bus mode */
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
+}
+
+static void axgbe_config_dma_cache(struct axgbe_port *pdata)
+{
+ unsigned int arcache, awcache, arwcache;
+
+ arcache = 0;
+ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);
+ AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+
+ awcache = 0;
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);
+ AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+
+ arwcache = 0;
+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);
+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);
+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);
+ AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
+}
+
+static void axgbe_config_edma_control(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
+ AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
+}
+
+static int axgbe_config_osp_mode(struct axgbe_port *pdata)
+{
+ /* Force DMA to operate on second packet before closing descriptors
+ * of first packet
+ */
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
+ pdata->tx_osp_mode);
+ }
+
+ return 0;
+}
+
+static int axgbe_config_pblx8(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
+ pdata->pblx8);
+ }
+ return 0;
+}
+
+static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
+ pdata->tx_pbl);
+ }
+
+ return 0;
+}
+
+static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
+{
+ struct axgbe_rx_queue *rxq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+ rxq = pdata->eth_dev->data->rx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
+ pdata->rx_pbl);
+ }
+
+ return 0;
+}
+
+static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
+{
+ struct axgbe_rx_queue *rxq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+ rxq = pdata->eth_dev->data->rx_queues[i];
+
+ rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
+ ~(AXGBE_RX_BUF_ALIGN - 1);
+
+ if (rxq->buf_size > pdata->rx_buf_size)
+ pdata->rx_buf_size = rxq->buf_size;
+
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
+ rxq->buf_size);
+ }
+}
+
+static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ return -EBUSY;
+
+ AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+ wait = 1000;
+ while (wait--) {
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ return 0;
+
+ rte_delay_us(1500);
+ }
+
+ return -EBUSY;
+}
+
+static int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
+{
+ struct rte_eth_rss_conf *rss_conf;
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key;
+ int ret;
+
+ rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+
+ if (!rss_conf->rss_key)
+ key = (unsigned int *)&pdata->rss_key;
+ else
+ key = (unsigned int *)&rss_conf->rss_key;
+
+ while (key_regs--) {
+ ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = axgbe_write_rss_reg(pdata,
+ AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axgbe_enable_rss(struct axgbe_port *pdata)
+{
+ int ret;
+
+ /* Program the hash key */
+ ret = axgbe_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = axgbe_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+ /* Enable RSS */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+ return 0;
+}
+
+static void axgbe_rss_options(struct axgbe_port *pdata)
+{
+ struct rte_eth_rss_conf *rss_conf;
+ uint64_t rss_hf;
+
+ rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ rss_hf = rss_conf->rss_hf;
+
+ if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+}
+
+static int axgbe_config_rss(struct axgbe_port *pdata)
+{
+ uint32_t i;
+
+ if (pdata->rss_enable) {
+ /* Initialize RSS hash key and lookup table */
+ uint32_t *key = (uint32_t *)pdata->rss_key;
+
+ for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
+ *key++ = (uint32_t)rte_rand();
+ for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
+ AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+ i % pdata->eth_dev->data->nb_rx_queues);
+ axgbe_rss_options(pdata);
+ if (axgbe_enable_rss(pdata)) {
+ PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
+ return -1;
+ }
+ } else {
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+ }
+
+ return 0;
+}
+
+static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int dma_ch_isr, dma_ch_ier;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
+
+ /* Clear all interrupt enable bits */
+ dma_ch_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts in edge triggered
+ * mode)
+ */
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
+
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
+ }
+}
+
+static void wrapper_tx_desc_init(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ txq->cur = 0;
+ txq->dirty = 0;
+ /* Update the total number of Tx descriptors */
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
+ /* Update the starting address of descriptor ring */
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
+ high32_value(txq->ring_phys_addr));
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
+ low32_value(txq->ring_phys_addr));
+ }
+}
+
+static int wrapper_rx_desc_init(struct axgbe_port *pdata)
+{
+ struct axgbe_rx_queue *rxq;
+ struct rte_mbuf *mbuf;
+ volatile union axgbe_rx_desc *desc;
+ unsigned int i, j;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+ rxq = pdata->eth_dev->data->rx_queues[i];
+
+ /* Initialize software ring entries */
+ rxq->mbuf_alloc = 0;
+ rxq->cur = 0;
+ rxq->dirty = 0;
+ desc = AXGBE_GET_DESC_PT(rxq, 0);
+
+ for (j = 0; j < rxq->nb_desc; j++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
+ (unsigned int)rxq->queue_id, j);
+ axgbe_dev_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ rxq->sw_ring[j] = mbuf;
+ /* Mbuf populate */
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ desc->read.baddr =
+ rte_cpu_to_le_64(
+ rte_mbuf_data_iova_default(mbuf));
+ rte_wmb();
+ AXGMAC_SET_BITS_LE(desc->read.desc3,
+ RX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+ rxq->mbuf_alloc++;
+ desc++;
+ }
+ /* Update the total number of Rx descriptors */
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
+ rxq->nb_desc - 1);
+ /* Update the starting address of descriptor ring */
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
+ high32_value(rxq->ring_phys_addr));
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
+ low32_value(rxq->ring_phys_addr));
+ /* Update the Rx Descriptor Tail Pointer */
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+ low32_value(rxq->ring_phys_addr +
+ (rxq->nb_desc - 1) *
+ sizeof(union axgbe_rx_desc)));
+ }
+ return 0;
+}
+
+static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
+{
+ unsigned int i;
+
+ /* Set Tx to weighted round robin scheduling algorithm */
+ AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
+
+ /* Set Tx traffic classes to use WRR algorithm with equal weights */
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
+ }
+
+ /* Set Rx to strict priority algorithm */
+ AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
+}
+
+static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
+
+ return 0;
+}
+
+static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
+
+ return 0;
+}
+
+static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
+
+ return 0;
+}
+
+static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
+
+ return 0;
+}
+
+/*Distrubting fifo size */
+static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int q_fifo_size;
+ unsigned int p_fifo, i;
+
+ fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
+ pdata->hw_feat.rx_fifo_size);
+ q_fifo_size = fifo_size / pdata->rx_q_count;
+
+ /* Calculate the fifo setting by dividing the queue's fifo size
+ * by the fifo allocation increment (with 0 representing the
+ * base allocation increment so decrement the result
+ * by 1).
+ */
+ p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
+ if (p_fifo)
+ p_fifo--;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
+ pdata->fifo = p_fifo;
+
+ /*Calculate and config Flow control threshold*/
+ axgbe_calculate_flow_control_threshold(pdata);
+ axgbe_config_flow_control_threshold(pdata);
+}
+
+static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int q_fifo_size;
+ unsigned int p_fifo, i;
+
+ fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
+ pdata->hw_feat.tx_fifo_size);
+ q_fifo_size = fifo_size / pdata->tx_q_count;
+
+ /* Calculate the fifo setting by dividing the queue's fifo size
+ * by the fifo allocation increment (with 0 representing the
+ * base allocation increment so decrement the result
+ * by 1).
+ */
+ p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
+ if (p_fifo)
+ p_fifo--;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
+}
+
+static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
+{
+ unsigned int qptc, qptc_extra, queue;
+ unsigned int i, j, reg, reg_val;
+
+ /* Map the MTL Tx Queues to Traffic Classes
+ * Note: Tx Queues >= Traffic Classes
+ */
+ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+ qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+ for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ for (j = 0; j < qptc; j++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ if (i < qptc_extra)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ }
+
+ if (pdata->rss_enable) {
+ /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
+ reg = MTL_RQDCM0R;
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count;) {
+ reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
+
+ if ((i % MTL_RQDCM_Q_PER_REG) &&
+ (i != pdata->rx_q_count))
+ continue;
+
+ AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MTL_RQDCM_INC;
+ reg_val = 0;
+ }
+ }
+}
+
+static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
+{
+ unsigned int mtl_q_isr;
+ unsigned int q_count, i;
+
+ q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
+ AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
+
+ /* No MTL interrupts to be enabled */
+ AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
+ }
+}
+
+static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
+ AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
+
+ return 0;
+}
+
+static void axgbe_config_mac_address(struct axgbe_port *pdata)
+{
+ axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
+}
+
+static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
+{
+ unsigned int val;
+
+ val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+}
+
+static void axgbe_config_mac_speed(struct axgbe_port *pdata)
+{
+ axgbe_set_speed(pdata, pdata->phy_speed);
+}
+
+static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
+{
+ if (pdata->rx_csum_enable)
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
+ else
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
+}
+
+static int axgbe_init(struct axgbe_port *pdata)
+{
+ int ret;
+
+ /* Flush Tx queues */
+ ret = axgbe_flush_tx_queues(pdata);
+ if (ret)
+ return ret;
+ /* Initialize DMA related features */
+ axgbe_config_dma_bus(pdata);
+ axgbe_config_dma_cache(pdata);
+ axgbe_config_edma_control(pdata);
+ axgbe_config_osp_mode(pdata);
+ axgbe_config_pblx8(pdata);
+ axgbe_config_tx_pbl_val(pdata);
+ axgbe_config_rx_pbl_val(pdata);
+ axgbe_config_rx_buffer_size(pdata);
+ axgbe_config_rss(pdata);
+ wrapper_tx_desc_init(pdata);
+ ret = wrapper_rx_desc_init(pdata);
+ if (ret)
+ return ret;
+ axgbe_enable_dma_interrupts(pdata);
+
+ /* Initialize MTL related features */
+ axgbe_config_mtl_mode(pdata);
+ axgbe_config_queue_mapping(pdata);
+ axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
+ axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
+ axgbe_config_tx_fifo_size(pdata);
+ axgbe_config_rx_fifo_size(pdata);
+
+ axgbe_enable_mtl_interrupts(pdata);
+
+ /* Initialize MAC related features */
+ axgbe_config_mac_address(pdata);
+ axgbe_config_jumbo_enable(pdata);
+ axgbe_config_flow_control(pdata);
+ axgbe_config_mac_speed(pdata);
+ axgbe_config_checksum_offload(pdata);
+
+ return 0;
+}
+
+void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
+{
+ hw_if->exit = axgbe_exit;
+ hw_if->config_flow_control = axgbe_config_flow_control;
+
+ hw_if->init = axgbe_init;
+
+ hw_if->read_mmd_regs = axgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = axgbe_write_mmd_regs;
+
+ hw_if->set_speed = axgbe_set_speed;
+
+ hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
+ hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
+ hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
+ /* For FLOW ctrl */
+ hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
+ hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
+}
diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.c
new file mode 100644
index 00000000..9ae9f063
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.c
@@ -0,0 +1,770 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_rxtx.h"
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+
+static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+static int axgbe_dev_configure(struct rte_eth_dev *dev);
+static int axgbe_dev_start(struct rte_eth_dev *dev);
+static void axgbe_dev_stop(struct rte_eth_dev *dev);
+static void axgbe_dev_interrupt_handler(void *param);
+static void axgbe_dev_close(struct rte_eth_dev *dev);
+static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int axgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void axgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static void axgbe_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+
+/* The set of PCI devices this driver supports */
+#define AMD_PCI_VENDOR_ID 0x1022
+#define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
+#define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
+
+int axgbe_logtype_init;
+int axgbe_logtype_driver;
+
+static const struct rte_pci_id pci_id_axgbe_map[] = {
+ {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
+ {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
+ { .vendor_id = 0, },
+};
+
+static struct axgbe_version_data axgbe_v2a = {
+ .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = AXGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 229376,
+ .rx_max_fifo_size = 229376,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .an_cdr_workaround = 1,
+};
+
+static struct axgbe_version_data axgbe_v2b = {
+ .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = AXGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .an_cdr_workaround = 1,
+};
+
+static const struct rte_eth_desc_lim rx_desc_lim = {
+ .nb_max = AXGBE_MAX_RING_DESC,
+ .nb_min = AXGBE_MIN_RING_DESC,
+ .nb_align = 8,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+ .nb_max = AXGBE_MAX_RING_DESC,
+ .nb_min = AXGBE_MIN_RING_DESC,
+ .nb_align = 8,
+};
+
+static const struct eth_dev_ops axgbe_eth_dev_ops = {
+ .dev_configure = axgbe_dev_configure,
+ .dev_start = axgbe_dev_start,
+ .dev_stop = axgbe_dev_stop,
+ .dev_close = axgbe_dev_close,
+ .promiscuous_enable = axgbe_dev_promiscuous_enable,
+ .promiscuous_disable = axgbe_dev_promiscuous_disable,
+ .allmulticast_enable = axgbe_dev_allmulticast_enable,
+ .allmulticast_disable = axgbe_dev_allmulticast_disable,
+ .link_update = axgbe_dev_link_update,
+ .stats_get = axgbe_dev_stats_get,
+ .stats_reset = axgbe_dev_stats_reset,
+ .dev_infos_get = axgbe_dev_info_get,
+ .rx_queue_setup = axgbe_dev_rx_queue_setup,
+ .rx_queue_release = axgbe_dev_rx_queue_release,
+ .tx_queue_setup = axgbe_dev_tx_queue_setup,
+ .tx_queue_release = axgbe_dev_tx_queue_release,
+};
+
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ return pdata->phy_if.phy_reset(pdata);
+}
+
+/*
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+axgbe_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int dma_isr, dma_ch_isr;
+
+ pdata->phy_if.an_isr(pdata);
+ /*DMA related interrupts*/
+ dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
+ if (dma_isr) {
+ if (dma_isr & 1) {
+ dma_ch_isr =
+ AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
+ pdata->rx_queues[0],
+ DMA_CH_SR);
+ AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
+ pdata->rx_queues[0],
+ DMA_CH_SR, dma_ch_isr);
+ }
+ }
+ /* Enable interrupts since disabled after generation*/
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+}
+
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+axgbe_dev_configure(struct rte_eth_dev *dev)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+ /* Checksum offload to hardware */
+ pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CHECKSUM;
+ return 0;
+}
+
+static int
+axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
+{
+ struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ pdata->rss_enable = 1;
+ else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+ pdata->rss_enable = 0;
+ else
+ return -1;
+ return 0;
+}
+
+static int
+axgbe_dev_start(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+ int ret;
+
+ /* Multiqueue RSS */
+ ret = axgbe_dev_rx_mq_config(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
+ return ret;
+ }
+ ret = axgbe_phy_reset(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "phy reset failed\n");
+ return ret;
+ }
+ ret = pdata->hw_if.init(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dev_init failed\n");
+ return ret;
+ }
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+
+ /* phy start*/
+ pdata->phy_if.phy_start(pdata);
+ axgbe_dev_enable_tx(dev);
+ axgbe_dev_enable_rx(dev);
+
+ axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
+ axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
+ return 0;
+}
+
+/* Stop device: disable rx and tx functions to allow for reconfiguring. */
+static void
+axgbe_dev_stop(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ rte_intr_disable(&pdata->pci_dev->intr_handle);
+
+ if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
+ return;
+
+ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+ axgbe_dev_disable_tx(dev);
+ axgbe_dev_disable_rx(dev);
+
+ pdata->phy_if.phy_stop(pdata);
+ pdata->hw_if.exit(pdata);
+ memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
+ axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
+}
+
+/* Clear all resources like TX/RX queues. */
+static void
+axgbe_dev_close(struct rte_eth_dev *dev)
+{
+ axgbe_dev_clear_queues(dev);
+}
+
+static void
+axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
+}
+
+static void
+axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
+}
+
+static void
+axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
+ return;
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
+}
+
+static void
+axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
+ return;
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+axgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+ struct rte_eth_link link;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ rte_delay_ms(800);
+
+ pdata->phy_if.phy_status(pdata);
+
+ memset(&link, 0, sizeof(struct rte_eth_link));
+ link.link_duplex = pdata->phy.duplex;
+ link.link_status = pdata->phy_link;
+ link.link_speed = pdata->phy_speed;
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ ret = rte_eth_linkstatus_set(dev, &link);
+ if (ret == -1)
+ PMD_DRV_LOG(ERR, "No change in link status\n");
+
+ return ret;
+}
+
+static int
+axgbe_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ stats->q_ipackets[i] = rxq->pkts;
+ stats->ipackets += rxq->pkts;
+ stats->q_ibytes[i] = rxq->bytes;
+ stats->ibytes += rxq->bytes;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ stats->q_opackets[i] = txq->pkts;
+ stats->opackets += txq->pkts;
+ stats->q_obytes[i] = txq->bytes;
+ stats->obytes += txq->bytes;
+ }
+
+ return 0;
+}
+
+static void
+axgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->pkts = 0;
+ rxq->bytes = 0;
+ rxq->errors = 0;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ txq->pkts = 0;
+ txq->bytes = 0;
+ txq->errors = 0;
+ }
+}
+
+static void
+axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ dev_info->max_rx_queues = pdata->rx_ring_count;
+ dev_info->max_tx_queues = pdata->tx_ring_count;
+ dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
+ dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
+ dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
+ dev_info->speed_capa = ETH_LINK_SPEED_10G;
+
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC;
+
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if (pdata->hw_feat.rss) {
+ dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
+ dev_info->reta_size = pdata->hw_feat.hash_table_size;
+ dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
+ }
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = AXGBE_RX_FREE_THRESH,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = AXGBE_TX_FREE_THRESH,
+ };
+}
+
+static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
+{
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+ struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
+ mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
+ mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
+
+ /* Hardware feature register 0 */
+ hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
+ hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
+ hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
+ hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
+ hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
+ hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
+ hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
+ hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
+ hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
+ hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
+ hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
+ hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
+ ADDMACADRSEL);
+ hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
+ hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ RXFIFOSIZE);
+ hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ TXFIFOSIZE);
+ hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
+ MAC_HWF1R, ADVTHWORD);
+ hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
+ hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
+ hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
+ hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
+ hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
+ hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
+ hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ HASHTBLSZ);
+ hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ L3L4FNUM);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
+ hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
+ hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
+ hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
+ hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
+ hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
+ AUXSNAPNUM);
+
+ /* Translate the Hash Table size into actual number */
+ switch (hw_feat->hash_table_size) {
+ case 0:
+ break;
+ case 1:
+ hw_feat->hash_table_size = 64;
+ break;
+ case 2:
+ hw_feat->hash_table_size = 128;
+ break;
+ case 3:
+ hw_feat->hash_table_size = 256;
+ break;
+ }
+
+ /* Translate the address width setting into actual number */
+ switch (hw_feat->dma_width) {
+ case 0:
+ hw_feat->dma_width = 32;
+ break;
+ case 1:
+ hw_feat->dma_width = 40;
+ break;
+ case 2:
+ hw_feat->dma_width = 48;
+ break;
+ default:
+ hw_feat->dma_width = 32;
+ }
+
+ /* The Queue, Channel and TC counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+ hw_feat->tc_cnt++;
+
+ /* Translate the fifo sizes into actual numbers */
+ hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
+ hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
+}
+
+static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
+{
+ axgbe_init_function_ptrs_dev(&pdata->hw_if);
+ axgbe_init_function_ptrs_phy(&pdata->phy_if);
+ axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
+ pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
+}
+
+static void axgbe_set_counts(struct axgbe_port *pdata)
+{
+ /* Set all the function pointers */
+ axgbe_init_all_fptrs(pdata);
+
+ /* Populate the hardware features */
+ axgbe_get_all_hw_features(pdata);
+
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_channel_count)
+ pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
+ if (!pdata->rx_max_channel_count)
+ pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
+
+ if (!pdata->tx_max_q_count)
+ pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
+ if (!pdata->rx_max_q_count)
+ pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
+
+ /* Calculate the number of Tx and Rx rings to be created
+ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+ * the number of Tx queues to the number of Tx channels
+ * enabled
+ * -Rx (DMA) Channels do not map 1-to-1 so use the actual
+ * number of Rx queues or maximum allowed
+ */
+ pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
+ pdata->tx_max_channel_count);
+ pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
+ pdata->tx_max_q_count);
+
+ pdata->tx_q_count = pdata->tx_ring_count;
+
+ pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
+ pdata->rx_max_channel_count);
+
+ pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
+ pdata->rx_max_q_count);
+}
+
+static void axgbe_default_config(struct axgbe_port *pdata)
+{
+ pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+ pdata->tx_pbl = DMA_PBL_32;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_ENABLE;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
+ pdata->rx_pbl = DMA_PBL_32;
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 0;
+ pdata->rx_pause = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->power_down = 0;
+}
+
+/*
+ * It returns 0 on success.
+ */
+static int
+eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, mac_lo, mac_hi;
+ int ret;
+
+ eth_dev->dev_ops = &axgbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pdata = (struct axgbe_port *)eth_dev->data->dev_private;
+ /* initial state */
+ axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
+ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+ pdata->eth_dev = eth_dev;
+
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pdata->pci_dev = pci_dev;
+
+ pdata->xgmac_regs =
+ (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
+ pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
+ + AXGBE_MAC_PROP_OFFSET);
+ pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
+ + AXGBE_I2C_CTRL_OFFSET);
+ pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
+
+ /* version specific driver data*/
+ if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
+ pdata->vdata = &axgbe_v2a;
+ else
+ pdata->vdata = &axgbe_v2b;
+
+ /* Configure the PCS indirect addressing support */
+ reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
+ pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
+ pdata->xpcs_window <<= 6;
+ pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
+ pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
+ pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ PMD_INIT_LOG(DEBUG,
+ "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
+ pdata->xpcs_window_size, pdata->xpcs_window_mask);
+ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
+
+ /* Retrieve the MAC address */
+ mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
+ mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
+ pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
+ pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
+ pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
+ pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
+ pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
+ pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
+
+ eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
+ ETHER_ADDR_LEN, 0);
+ if (!eth_dev->data->mac_addrs) {
+ PMD_INIT_LOG(ERR,
+ "Failed to alloc %u bytes needed to store MAC addr tbl",
+ ETHER_ADDR_LEN);
+ return -ENOMEM;
+ }
+
+ if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
+ eth_random_addr(pdata->mac_addr.addr_bytes);
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
+
+ /* Clock settings */
+ pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
+ pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
+
+ /* Set the DMA coherency values */
+ pdata->coherent = 1;
+ pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
+ pdata->arcache = AXGBE_DMA_OS_ARCACHE;
+ pdata->awcache = AXGBE_DMA_OS_AWCACHE;
+
+ /* Set the maximum channels and queues */
+ reg = XP_IOREAD(pdata, XP_PROP_1);
+ pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
+ pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
+ pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
+ pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
+
+ /* Set the hardware channel and queue counts */
+ axgbe_set_counts(pdata);
+
+ /* Set the maximum fifo amounts */
+ reg = XP_IOREAD(pdata, XP_PROP_2);
+ pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
+ pdata->tx_max_fifo_size *= 16384;
+ pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
+ pdata->vdata->tx_max_fifo_size);
+ pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
+ pdata->rx_max_fifo_size *= 16384;
+ pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
+ pdata->vdata->rx_max_fifo_size);
+ /* Issue software reset to DMA */
+ ret = pdata->hw_if.exit(pdata);
+ if (ret)
+ PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
+
+ /* Set default configuration data */
+ axgbe_default_config(pdata);
+
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_fifo_size)
+ pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
+ if (!pdata->rx_max_fifo_size)
+ pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
+
+ pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
+ pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
+ pthread_mutex_init(&pdata->xpcs_mutex, NULL);
+ pthread_mutex_init(&pdata->i2c_mutex, NULL);
+ pthread_mutex_init(&pdata->an_mutex, NULL);
+ pthread_mutex_init(&pdata->phy_mutex, NULL);
+
+ ret = pdata->phy_if.phy_init(pdata);
+ if (ret) {
+ rte_free(eth_dev->data->mac_addrs);
+ return ret;
+ }
+
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ axgbe_dev_interrupt_handler,
+ (void *)eth_dev);
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ return 0;
+}
+
+static int
+eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ /*Free macaddres*/
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ axgbe_dev_clear_queues(eth_dev);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ axgbe_dev_interrupt_handler,
+ (void *)eth_dev);
+
+ return 0;
+}
+
+static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct axgbe_port), eth_axgbe_dev_init);
+}
+
+static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
+}
+
+static struct rte_pci_driver rte_axgbe_pmd = {
+ .id_table = pci_id_axgbe_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_axgbe_pci_probe,
+ .remove = eth_axgbe_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(axgbe_init_log)
+{
+ axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
+ if (axgbe_logtype_init >= 0)
+ rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
+ axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
+ if (axgbe_logtype_driver >= 0)
+ rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.h b/src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.h
new file mode 100644
index 00000000..b1cd2980
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.h
@@ -0,0 +1,586 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef RTE_ETH_AXGBE_H_
+#define RTE_ETH_AXGBE_H_
+
+#include <rte_mempool.h>
+#include <rte_lcore.h>
+#include "axgbe_common.h"
+
+#define IRQ 0xff
+#define VLAN_HLEN 4
+
+#define AXGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define AXGBE_RX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define AXGBE_RX_MIN_BUF_SIZE (ETHER_MAX_LEN + VLAN_HLEN)
+#define AXGBE_MAX_MAC_ADDRS 1
+
+#define AXGBE_RX_BUF_ALIGN 64
+
+#define AXGBE_MAX_DMA_CHANNELS 16
+#define AXGBE_MAX_QUEUES 16
+#define AXGBE_PRIORITY_QUEUES 8
+#define AXGBE_DMA_STOP_TIMEOUT 1
+
+/* DMA cache settings - Outer sharable, write-back, write-allocate */
+#define AXGBE_DMA_OS_AXDOMAIN 0x2
+#define AXGBE_DMA_OS_ARCACHE 0xb
+#define AXGBE_DMA_OS_AWCACHE 0xf
+
+/* DMA cache settings - System, no caches used */
+#define AXGBE_DMA_SYS_AXDOMAIN 0x3
+#define AXGBE_DMA_SYS_ARCACHE 0x0
+#define AXGBE_DMA_SYS_AWCACHE 0x0
+
+/* DMA channel interrupt modes */
+#define AXGBE_IRQ_MODE_EDGE 0
+#define AXGBE_IRQ_MODE_LEVEL 1
+
+#define AXGBE_DMA_INTERRUPT_MASK 0x31c7
+
+#define AXGMAC_MIN_PACKET 60
+#define AXGMAC_STD_PACKET_MTU 1500
+#define AXGMAC_MAX_STD_PACKET 1518
+#define AXGMAC_JUMBO_PACKET_MTU 9000
+#define AXGMAC_MAX_JUMBO_PACKET 9018
+/* Inter-frame gap + preamble */
+#define AXGMAC_ETH_PREAMBLE (12 + 8)
+
+#define AXGMAC_PFC_DATA_LEN 46
+#define AXGMAC_PFC_DELAYS 14000
+
+/* PCI BAR mapping */
+#define AXGBE_AXGMAC_BAR 0
+#define AXGBE_XPCS_BAR 1
+#define AXGBE_MAC_PROP_OFFSET 0x1d000
+#define AXGBE_I2C_CTRL_OFFSET 0x1e000
+
+/* PCI clock frequencies */
+#define AXGBE_V2_DMA_CLOCK_FREQ 500000000
+#define AXGBE_V2_PTP_CLOCK_FREQ 125000000
+
+#define AXGMAC_FIFO_MIN_ALLOC 2048
+#define AXGMAC_FIFO_UNIT 256
+#define AXGMAC_FIFO_ALIGN(_x) \
+ (((_x) + AXGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1))
+#define AXGMAC_FIFO_FC_OFF 2048
+#define AXGMAC_FIFO_FC_MIN 4096
+
+#define AXGBE_TC_MIN_QUANTUM 10
+
+/* Flow control queue count */
+#define AXGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+/* Flow control threshold units */
+#define AXGMAC_FLOW_CONTROL_UNIT 512
+#define AXGMAC_FLOW_CONTROL_ALIGN(_x) \
+ (((_x) + AXGMAC_FLOW_CONTROL_UNIT - 1) & \
+ ~(AXGMAC_FLOW_CONTROL_UNIT - 1))
+#define AXGMAC_FLOW_CONTROL_VALUE(_x) \
+ (((_x) < 1024) ? 0 : ((_x) / AXGMAC_FLOW_CONTROL_UNIT) - 2)
+#define AXGMAC_FLOW_CONTROL_MAX 33280
+
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define AXGBE_MAC_HASH_TABLE_SIZE 8
+
+/* Receive Side Scaling */
+#define AXGBE_RSS_OFFLOAD ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP)
+
+#define AXGBE_RSS_HASH_KEY_SIZE 40
+#define AXGBE_RSS_MAX_TABLE_SIZE 256
+#define AXGBE_RSS_LOOKUP_TABLE_TYPE 0
+#define AXGBE_RSS_HASH_KEY_TYPE 1
+
+/* Auto-negotiation */
+#define AXGBE_AN_MS_TIMEOUT 500
+#define AXGBE_LINK_TIMEOUT 5
+
+#define AXGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define AXGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+#define AXGBE_SGMII_AN_LINK_SPEED_100 0x04
+#define AXGBE_SGMII_AN_LINK_SPEED_1000 0x08
+#define AXGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+
+/* ECC correctable error notification window (seconds) */
+#define AXGBE_ECC_LIMIT 60
+
+/* MDIO port types */
+#define AXGMAC_MAX_C22_PORT 3
+
+/* Helper macro for descriptor handling
+ * Always use AXGBE_GET_DESC_DATA to access the descriptor data
+ * since the index is free-running and needs to be and-ed
+ * with the descriptor count value of the ring to index to
+ * the proper descriptor data.
+ */
+#define AXGBE_GET_DESC_DATA(_ring, _idx) \
+ ((_ring)->rdata + \
+ ((_idx) & ((_ring)->rdesc_count - 1)))
+
+struct axgbe_port;
+
+enum axgbe_state {
+ AXGBE_DOWN,
+ AXGBE_LINK_INIT,
+ AXGBE_LINK_ERR,
+ AXGBE_STOPPED,
+};
+
+enum axgbe_int {
+ AXGMAC_INT_DMA_CH_SR_TI,
+ AXGMAC_INT_DMA_CH_SR_TPS,
+ AXGMAC_INT_DMA_CH_SR_TBU,
+ AXGMAC_INT_DMA_CH_SR_RI,
+ AXGMAC_INT_DMA_CH_SR_RBU,
+ AXGMAC_INT_DMA_CH_SR_RPS,
+ AXGMAC_INT_DMA_CH_SR_TI_RI,
+ AXGMAC_INT_DMA_CH_SR_FBE,
+ AXGMAC_INT_DMA_ALL,
+};
+
+enum axgbe_int_state {
+ AXGMAC_INT_STATE_SAVE,
+ AXGMAC_INT_STATE_RESTORE,
+};
+
+enum axgbe_ecc_sec {
+ AXGBE_ECC_SEC_TX,
+ AXGBE_ECC_SEC_RX,
+ AXGBE_ECC_SEC_DESC,
+};
+
+enum axgbe_speed {
+ AXGBE_SPEED_1000 = 0,
+ AXGBE_SPEED_2500,
+ AXGBE_SPEED_10000,
+ AXGBE_SPEEDS,
+};
+
+enum axgbe_xpcs_access {
+ AXGBE_XPCS_ACCESS_V1 = 0,
+ AXGBE_XPCS_ACCESS_V2,
+};
+
+enum axgbe_an_mode {
+ AXGBE_AN_MODE_CL73 = 0,
+ AXGBE_AN_MODE_CL73_REDRV,
+ AXGBE_AN_MODE_CL37,
+ AXGBE_AN_MODE_CL37_SGMII,
+ AXGBE_AN_MODE_NONE,
+};
+
+enum axgbe_an {
+ AXGBE_AN_READY = 0,
+ AXGBE_AN_PAGE_RECEIVED,
+ AXGBE_AN_INCOMPAT_LINK,
+ AXGBE_AN_COMPLETE,
+ AXGBE_AN_NO_LINK,
+ AXGBE_AN_ERROR,
+};
+
+enum axgbe_rx {
+ AXGBE_RX_BPA = 0,
+ AXGBE_RX_XNP,
+ AXGBE_RX_COMPLETE,
+ AXGBE_RX_ERROR,
+};
+
+enum axgbe_mode {
+ AXGBE_MODE_KX_1000 = 0,
+ AXGBE_MODE_KX_2500,
+ AXGBE_MODE_KR,
+ AXGBE_MODE_X,
+ AXGBE_MODE_SGMII_100,
+ AXGBE_MODE_SGMII_1000,
+ AXGBE_MODE_SFI,
+ AXGBE_MODE_UNKNOWN,
+};
+
+enum axgbe_speedset {
+ AXGBE_SPEEDSET_1000_10000 = 0,
+ AXGBE_SPEEDSET_2500_10000,
+};
+
+enum axgbe_mdio_mode {
+ AXGBE_MDIO_MODE_NONE = 0,
+ AXGBE_MDIO_MODE_CL22,
+ AXGBE_MDIO_MODE_CL45,
+};
+
+struct axgbe_phy {
+ uint32_t supported;
+ uint32_t advertising;
+ uint32_t lp_advertising;
+
+ int address;
+
+ int autoneg;
+ int speed;
+ int duplex;
+
+ int link;
+
+ int pause_autoneg;
+ int tx_pause;
+ int rx_pause;
+};
+
+enum axgbe_i2c_cmd {
+ AXGBE_I2C_CMD_READ = 0,
+ AXGBE_I2C_CMD_WRITE,
+};
+
+struct axgbe_i2c_op {
+ enum axgbe_i2c_cmd cmd;
+
+ unsigned int target;
+
+ uint8_t *buf;
+ unsigned int len;
+};
+
+struct axgbe_i2c_op_state {
+ struct axgbe_i2c_op *op;
+
+ unsigned int tx_len;
+ unsigned char *tx_buf;
+
+ unsigned int rx_len;
+ unsigned char *rx_buf;
+
+ unsigned int tx_abort_source;
+
+ int ret;
+};
+
+struct axgbe_i2c {
+ unsigned int started;
+ unsigned int max_speed_mode;
+ unsigned int rx_fifo_size;
+ unsigned int tx_fifo_size;
+
+ struct axgbe_i2c_op_state op_state;
+};
+
+struct axgbe_hw_if {
+ void (*config_flow_control)(struct axgbe_port *);
+ int (*config_rx_mode)(struct axgbe_port *);
+
+ int (*init)(struct axgbe_port *);
+
+ int (*read_mmd_regs)(struct axgbe_port *, int, int);
+ void (*write_mmd_regs)(struct axgbe_port *, int, int, int);
+ int (*set_speed)(struct axgbe_port *, int);
+
+ int (*set_ext_mii_mode)(struct axgbe_port *, unsigned int,
+ enum axgbe_mdio_mode);
+ int (*read_ext_mii_regs)(struct axgbe_port *, int, int);
+ int (*write_ext_mii_regs)(struct axgbe_port *, int, int, uint16_t);
+
+ /* For FLOW ctrl */
+ int (*config_tx_flow_control)(struct axgbe_port *);
+ int (*config_rx_flow_control)(struct axgbe_port *);
+
+ int (*exit)(struct axgbe_port *);
+};
+
+/* This structure represents implementation specific routines for an
+ * implementation of a PHY. All routines are required unless noted below.
+ * Optional routines:
+ * kr_training_pre, kr_training_post
+ */
+struct axgbe_phy_impl_if {
+ /* Perform Setup/teardown actions */
+ int (*init)(struct axgbe_port *);
+ void (*exit)(struct axgbe_port *);
+
+ /* Perform start/stop specific actions */
+ int (*reset)(struct axgbe_port *);
+ int (*start)(struct axgbe_port *);
+ void (*stop)(struct axgbe_port *);
+
+ /* Return the link status */
+ int (*link_status)(struct axgbe_port *, int *);
+
+ /* Indicate if a particular speed is valid */
+ int (*valid_speed)(struct axgbe_port *, int);
+
+ /* Check if the specified mode can/should be used */
+ bool (*use_mode)(struct axgbe_port *, enum axgbe_mode);
+ /* Switch the PHY into various modes */
+ void (*set_mode)(struct axgbe_port *, enum axgbe_mode);
+ /* Retrieve mode needed for a specific speed */
+ enum axgbe_mode (*get_mode)(struct axgbe_port *, int);
+ /* Retrieve new/next mode when trying to auto-negotiate */
+ enum axgbe_mode (*switch_mode)(struct axgbe_port *);
+ /* Retrieve current mode */
+ enum axgbe_mode (*cur_mode)(struct axgbe_port *);
+
+ /* Retrieve current auto-negotiation mode */
+ enum axgbe_an_mode (*an_mode)(struct axgbe_port *);
+
+ /* Configure auto-negotiation settings */
+ int (*an_config)(struct axgbe_port *);
+
+ /* Set/override auto-negotiation advertisement settings */
+ unsigned int (*an_advertising)(struct axgbe_port *port);
+
+ /* Process results of auto-negotiation */
+ enum axgbe_mode (*an_outcome)(struct axgbe_port *);
+
+ /* Pre/Post auto-negotiation support */
+ void (*an_pre)(struct axgbe_port *port);
+ void (*an_post)(struct axgbe_port *port);
+
+ /* Pre/Post KR training enablement support */
+ void (*kr_training_pre)(struct axgbe_port *);
+ void (*kr_training_post)(struct axgbe_port *);
+};
+
+struct axgbe_phy_if {
+ /* For PHY setup/teardown */
+ int (*phy_init)(struct axgbe_port *);
+ void (*phy_exit)(struct axgbe_port *);
+
+ /* For PHY support when setting device up/down */
+ int (*phy_reset)(struct axgbe_port *);
+ int (*phy_start)(struct axgbe_port *);
+ void (*phy_stop)(struct axgbe_port *);
+
+ /* For PHY support while device is up */
+ void (*phy_status)(struct axgbe_port *);
+ int (*phy_config_aneg)(struct axgbe_port *);
+
+ /* For PHY settings validation */
+ int (*phy_valid_speed)(struct axgbe_port *, int);
+ /* For single interrupt support */
+ void (*an_isr)(struct axgbe_port *);
+ /* PHY implementation specific services */
+ struct axgbe_phy_impl_if phy_impl;
+};
+
+struct axgbe_i2c_if {
+ /* For initial I2C setup */
+ int (*i2c_init)(struct axgbe_port *);
+
+ /* For I2C support when setting device up/down */
+ int (*i2c_start)(struct axgbe_port *);
+ void (*i2c_stop)(struct axgbe_port *);
+
+ /* For performing I2C operations */
+ int (*i2c_xfer)(struct axgbe_port *, struct axgbe_i2c_op *);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct axgbe_hw_features {
+ /* HW Version */
+ unsigned int version;
+
+ /* HW Feature Register0 */
+ unsigned int gmii; /* 1000 Mbps support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dma_width; /* DMA width */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int tc_cnt; /* Number of Traffic Classes */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct axgbe_version_data {
+ void (*init_function_ptrs_phy_impl)(struct axgbe_phy_if *);
+ enum axgbe_xpcs_access xpcs_access;
+ unsigned int mmc_64bit;
+ unsigned int tx_max_fifo_size;
+ unsigned int rx_max_fifo_size;
+ unsigned int tx_tstamp_workaround;
+ unsigned int ecc_support;
+ unsigned int i2c_support;
+ unsigned int an_cdr_workaround;
+};
+
+/*
+ * Structure to store private data for each port.
+ */
+struct axgbe_port {
+ /* Ethdev where port belongs*/
+ struct rte_eth_dev *eth_dev;
+ /* Pci dev info */
+ const struct rte_pci_device *pci_dev;
+ /* Version related data */
+ struct axgbe_version_data *vdata;
+
+ /* AXGMAC/XPCS related mmio registers */
+ void *xgmac_regs; /* AXGMAC CSRs */
+ void *xpcs_regs; /* XPCS MMD registers */
+ void *xprop_regs; /* AXGBE property registers */
+ void *xi2c_regs; /* AXGBE I2C CSRs */
+
+ bool cdr_track_early;
+ /* XPCS indirect addressing lock */
+ unsigned int xpcs_window_def_reg;
+ unsigned int xpcs_window_sel_reg;
+ unsigned int xpcs_window;
+ unsigned int xpcs_window_size;
+ unsigned int xpcs_window_mask;
+
+ /* Flags representing axgbe_state */
+ unsigned long dev_state;
+
+ struct axgbe_hw_if hw_if;
+ struct axgbe_phy_if phy_if;
+ struct axgbe_i2c_if i2c_if;
+
+ /* AXI DMA settings */
+ unsigned int coherent;
+ unsigned int axdomain;
+ unsigned int arcache;
+ unsigned int awcache;
+
+ unsigned int tx_max_channel_count;
+ unsigned int rx_max_channel_count;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_ring_count;
+ unsigned int rx_desc_count;
+
+ unsigned int tx_max_q_count;
+ unsigned int rx_max_q_count;
+ unsigned int tx_q_count;
+ unsigned int rx_q_count;
+
+ /* Tx/Rx common settings */
+ unsigned int pblx8;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+ unsigned int tx_max_fifo_size;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
+ unsigned int rx_max_fifo_size;
+ unsigned int rx_buf_size;
+
+ /* Device clocks */
+ unsigned long sysclk_rate;
+ unsigned long ptpclk_rate;
+
+ /* Keeps track of power mode */
+ unsigned int power_down;
+
+ /* Current PHY settings */
+ int phy_link;
+ int phy_speed;
+
+ pthread_mutex_t xpcs_mutex;
+ pthread_mutex_t i2c_mutex;
+ pthread_mutex_t an_mutex;
+ pthread_mutex_t phy_mutex;
+
+ /* Flow control settings */
+ unsigned int pause_autoneg;
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+ unsigned int rx_rfa[AXGBE_MAX_QUEUES];
+ unsigned int rx_rfd[AXGBE_MAX_QUEUES];
+ unsigned int fifo;
+
+ /* Receive Side Scaling settings */
+ u8 rss_key[AXGBE_RSS_HASH_KEY_SIZE];
+ uint32_t rss_table[AXGBE_RSS_MAX_TABLE_SIZE];
+ uint32_t rss_options;
+ int rss_enable;
+
+ /* Hardware features of the device */
+ struct axgbe_hw_features hw_feat;
+
+ struct ether_addr mac_addr;
+
+ /* Software Tx/Rx structure pointers*/
+ void **rx_queues;
+ void **tx_queues;
+
+ /* MDIO/PHY related settings */
+ unsigned int phy_started;
+ void *phy_data;
+ struct axgbe_phy phy;
+ int mdio_mmd;
+ unsigned long link_check;
+ volatile int mdio_completion;
+
+ unsigned int kr_redrv;
+
+ /* Auto-negotiation atate machine support */
+ unsigned int an_int;
+ unsigned int an_status;
+ enum axgbe_an an_result;
+ enum axgbe_an an_state;
+ enum axgbe_rx kr_state;
+ enum axgbe_rx kx_state;
+ unsigned int an_supported;
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+ unsigned long an_start;
+ enum axgbe_an_mode an_mode;
+
+ /* I2C support */
+ struct axgbe_i2c i2c;
+ volatile int i2c_complete;
+
+ /* CRC stripping by H/w for Rx packet*/
+ int crc_strip_enable;
+ /* csum enable to hardware */
+ uint32_t rx_csum_enable;
+};
+
+void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if);
+void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if);
+void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if);
+void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if);
+
+#endif /* RTE_ETH_AXGBE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_i2c.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_i2c.c
new file mode 100644
index 00000000..204ec367
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_i2c.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+
+#define AXGBE_ABORT_COUNT 500
+#define AXGBE_DISABLE_COUNT 1000
+
+#define AXGBE_STD_SPEED 1
+
+#define AXGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX)
+#define AXGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX)
+#define AXGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX)
+#define AXGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX)
+#define AXGBE_DEFAULT_INT_MASK (AXGBE_INTR_RX_FULL | \
+ AXGBE_INTR_TX_EMPTY | \
+ AXGBE_INTR_TX_ABRT | \
+ AXGBE_INTR_STOP_DET)
+
+#define AXGBE_I2C_READ BIT(8)
+#define AXGBE_I2C_STOP BIT(9)
+
+static int axgbe_i2c_abort(struct axgbe_port *pdata)
+{
+ unsigned int wait = AXGBE_ABORT_COUNT;
+
+ /* Must be enabled to recognize the abort request */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1);
+
+ /* Issue the abort */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1);
+
+ while (wait--) {
+ if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT))
+ return 0;
+ rte_delay_us(500);
+ }
+
+ return -EBUSY;
+}
+
+static int axgbe_i2c_set_enable(struct axgbe_port *pdata, bool enable)
+{
+ unsigned int wait = AXGBE_DISABLE_COUNT;
+ unsigned int mode = enable ? 1 : 0;
+
+ while (wait--) {
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode);
+ if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode)
+ return 0;
+
+ rte_delay_us(100);
+ }
+
+ return -EBUSY;
+}
+
+static int axgbe_i2c_disable(struct axgbe_port *pdata)
+{
+ unsigned int ret;
+
+ ret = axgbe_i2c_set_enable(pdata, false);
+ if (ret) {
+ /* Disable failed, try an abort */
+ ret = axgbe_i2c_abort(pdata);
+ if (ret)
+ return ret;
+
+ /* Abort succeeded, try to disable again */
+ ret = axgbe_i2c_set_enable(pdata, false);
+ }
+
+ return ret;
+}
+
+static int axgbe_i2c_enable(struct axgbe_port *pdata)
+{
+ return axgbe_i2c_set_enable(pdata, true);
+}
+
+static void axgbe_i2c_clear_all_interrupts(struct axgbe_port *pdata)
+{
+ XI2C_IOREAD(pdata, IC_CLR_INTR);
+}
+
+static void axgbe_i2c_disable_interrupts(struct axgbe_port *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, 0);
+}
+
+static void axgbe_i2c_enable_interrupts(struct axgbe_port *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, AXGBE_DEFAULT_INT_MASK);
+}
+
+static void axgbe_i2c_write(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int tx_slots;
+ unsigned int cmd;
+
+ /* Configured to never receive Rx overflows, so fill up Tx fifo */
+ tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR);
+ while (tx_slots && state->tx_len) {
+ if (state->op->cmd == AXGBE_I2C_CMD_READ)
+ cmd = AXGBE_I2C_READ;
+ else
+ cmd = *state->tx_buf++;
+
+ if (state->tx_len == 1)
+ XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1);
+
+ XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd);
+
+ tx_slots--;
+ state->tx_len--;
+ }
+
+ /* No more Tx operations, so ignore TX_EMPTY and return */
+ if (!state->tx_len)
+ XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0);
+}
+
+static void axgbe_i2c_read(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int rx_slots;
+
+ /* Anything to be read? */
+ if (state->op->cmd != AXGBE_I2C_CMD_READ)
+ return;
+
+ rx_slots = XI2C_IOREAD(pdata, IC_RXFLR);
+ while (rx_slots && state->rx_len) {
+ *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD);
+ state->rx_len--;
+ rx_slots--;
+ }
+}
+
+static void axgbe_i2c_clear_isr_interrupts(struct axgbe_port *pdata,
+ unsigned int isr)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+
+ if (isr & AXGBE_INTR_TX_ABRT) {
+ state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE);
+ XI2C_IOREAD(pdata, IC_CLR_TX_ABRT);
+ }
+
+ if (isr & AXGBE_INTR_STOP_DET)
+ XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
+}
+
+static int axgbe_i2c_isr(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int isr;
+
+ isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
+
+ axgbe_i2c_clear_isr_interrupts(pdata, isr);
+
+ if (isr & AXGBE_INTR_TX_ABRT) {
+ axgbe_i2c_disable_interrupts(pdata);
+
+ state->ret = -EIO;
+ goto out;
+ }
+
+ /* Check for data in the Rx fifo */
+ axgbe_i2c_read(pdata);
+
+ /* Fill up the Tx fifo next */
+ axgbe_i2c_write(pdata);
+
+out:
+ /* Complete on an error or STOP condition */
+ if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET))
+ return 1;
+
+ return 0;
+}
+
+static void axgbe_i2c_set_mode(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_CON);
+ XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1);
+ XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1);
+ XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1);
+ XI2C_SET_BITS(reg, IC_CON, SPEED, AXGBE_STD_SPEED);
+ XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1);
+ XI2C_IOWRITE(pdata, IC_CON, reg);
+}
+
+static void axgbe_i2c_get_features(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c *i2c = &pdata->i2c;
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1);
+ i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ MAX_SPEED_MODE);
+ i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ RX_BUFFER_DEPTH);
+ i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ TX_BUFFER_DEPTH);
+}
+
+static void axgbe_i2c_set_target(struct axgbe_port *pdata, unsigned int addr)
+{
+ XI2C_IOWRITE(pdata, IC_TAR, addr);
+}
+
+static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ int ret;
+ uint64_t timeout;
+
+ pthread_mutex_lock(&pdata->i2c_mutex);
+ ret = axgbe_i2c_disable(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
+ return ret;
+ }
+
+ axgbe_i2c_set_target(pdata, op->target);
+
+ memset(state, 0, sizeof(*state));
+ state->op = op;
+ state->tx_len = op->len;
+ state->tx_buf = (unsigned char *)op->buf;
+ state->rx_len = op->len;
+ state->rx_buf = (unsigned char *)op->buf;
+
+ axgbe_i2c_clear_all_interrupts(pdata);
+ ret = axgbe_i2c_enable(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to enable i2c master\n");
+ return ret;
+ }
+
+ /* Enabling the interrupts will cause the TX FIFO empty interrupt to
+ * fire and begin to process the command via the ISR.
+ */
+ axgbe_i2c_enable_interrupts(pdata);
+ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
+
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ rte_delay_us(100);
+ if (XI2C_IOREAD(pdata, IC_RAW_INTR_STAT)) {
+ if (axgbe_i2c_isr(pdata))
+ goto success;
+ }
+ }
+
+ PMD_DRV_LOG(ERR, "i2c operation timed out\n");
+ axgbe_i2c_disable_interrupts(pdata);
+ axgbe_i2c_disable(pdata);
+ ret = -ETIMEDOUT;
+ goto unlock;
+
+success:
+ ret = state->ret;
+ if (ret) {
+ if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK)
+ ret = -ENOTCONN;
+ else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST)
+ ret = -EAGAIN;
+ }
+
+unlock:
+ pthread_mutex_unlock(&pdata->i2c_mutex);
+ return ret;
+}
+
+static void axgbe_i2c_stop(struct axgbe_port *pdata)
+{
+ if (!pdata->i2c.started)
+ return;
+
+ pdata->i2c.started = 0;
+ axgbe_i2c_disable_interrupts(pdata);
+ axgbe_i2c_disable(pdata);
+ axgbe_i2c_clear_all_interrupts(pdata);
+}
+
+static int axgbe_i2c_start(struct axgbe_port *pdata)
+{
+ if (pdata->i2c.started)
+ return 0;
+
+ pdata->i2c.started = 1;
+
+ return 0;
+}
+
+static int axgbe_i2c_init(struct axgbe_port *pdata)
+{
+ int ret;
+
+ axgbe_i2c_disable_interrupts(pdata);
+
+ ret = axgbe_i2c_disable(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
+ return ret;
+ }
+
+ axgbe_i2c_get_features(pdata);
+
+ axgbe_i2c_set_mode(pdata);
+
+ axgbe_i2c_clear_all_interrupts(pdata);
+
+ return 0;
+}
+
+void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if)
+{
+ i2c_if->i2c_init = axgbe_i2c_init;
+ i2c_if->i2c_start = axgbe_i2c_start;
+ i2c_if->i2c_stop = axgbe_i2c_stop;
+ i2c_if->i2c_xfer = axgbe_i2c_xfer;
+}
diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_logs.h b/src/spdk/dpdk/drivers/net/axgbe/axgbe_logs.h
new file mode 100644
index 00000000..d1487017
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_logs.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _AXGBE_LOGS_H_
+#define _AXGBE_LOGS_H_
+
+#include <stdio.h>
+
+extern int axgbe_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, axgbe_logtype_init, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#ifdef RTE_LIBRTE_AXGBE_PMD_DEBUG
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+extern int axgbe_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, axgbe_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#endif /* _AXGBE_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_mdio.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_mdio.c
new file mode 100644
index 00000000..2721e5cc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_mdio.c
@@ -0,0 +1,1066 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+
+static void axgbe_an37_clear_interrupts(struct axgbe_port *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
+ reg &= ~AXGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
+}
+
+static void axgbe_an37_disable_interrupts(struct axgbe_port *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ reg &= ~AXGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
+ reg &= ~AXGBE_PCS_CL37_BP;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
+}
+
+static void axgbe_an73_clear_interrupts(struct axgbe_port *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+}
+
+static void axgbe_an73_disable_interrupts(struct axgbe_port *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+}
+
+static void axgbe_an73_enable_interrupts(struct axgbe_port *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
+ AXGBE_AN_CL73_INT_MASK);
+}
+
+static void axgbe_an_enable_interrupts(struct axgbe_port *pdata)
+{
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_enable_interrupts(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_MOD_37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_clear_interrupts_all(struct axgbe_port *pdata)
+{
+ axgbe_an73_clear_interrupts(pdata);
+ axgbe_an37_clear_interrupts(pdata);
+}
+
+static void axgbe_an73_enable_kr_training(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+
+ reg |= AXGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
+
+static void axgbe_an73_disable_kr_training(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+
+ reg &= ~AXGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
+
+static void axgbe_kr_mode(struct axgbe_port *pdata)
+{
+ /* Enable KR training */
+ axgbe_an73_enable_kr_training(pdata);
+
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KR);
+}
+
+static void axgbe_kx_2500_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 2.5G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_2500);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_2500);
+}
+
+static void axgbe_kx_1000_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_1000);
+}
+
+static void axgbe_sfi_mode(struct axgbe_port *pdata)
+{
+ /* If a KR re-driver is present, change to KR mode instead */
+ if (pdata->kr_redrv)
+ return axgbe_kr_mode(pdata);
+
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SFI);
+}
+
+static void axgbe_x_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_X);
+}
+
+static void axgbe_sgmii_1000_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_1000);
+}
+
+static void axgbe_sgmii_100_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_100);
+}
+
+static enum axgbe_mode axgbe_cur_mode(struct axgbe_port *pdata)
+{
+ return pdata->phy_if.phy_impl.cur_mode(pdata);
+}
+
+static bool axgbe_in_kr_mode(struct axgbe_port *pdata)
+{
+ return axgbe_cur_mode(pdata) == AXGBE_MODE_KR;
+}
+
+static void axgbe_change_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KX_1000:
+ axgbe_kx_1000_mode(pdata);
+ break;
+ case AXGBE_MODE_KX_2500:
+ axgbe_kx_2500_mode(pdata);
+ break;
+ case AXGBE_MODE_KR:
+ axgbe_kr_mode(pdata);
+ break;
+ case AXGBE_MODE_SGMII_100:
+ axgbe_sgmii_100_mode(pdata);
+ break;
+ case AXGBE_MODE_SGMII_1000:
+ axgbe_sgmii_1000_mode(pdata);
+ break;
+ case AXGBE_MODE_X:
+ axgbe_x_mode(pdata);
+ break;
+ case AXGBE_MODE_SFI:
+ axgbe_sfi_mode(pdata);
+ break;
+ case AXGBE_MODE_UNKNOWN:
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid operation mode requested (%u)\n", mode);
+ }
+}
+
+static void axgbe_switch_mode(struct axgbe_port *pdata)
+{
+ axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
+}
+
+static void axgbe_set_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ if (mode == axgbe_cur_mode(pdata))
+ return;
+
+ axgbe_change_mode(pdata, mode);
+}
+
+static bool axgbe_use_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ return pdata->phy_if.phy_impl.use_mode(pdata, mode);
+}
+
+static void axgbe_an37_set(struct axgbe_port *pdata, bool enable,
+ bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1);
+ reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE;
+
+ if (enable)
+ reg |= MDIO_VEND2_CTRL1_AN_ENABLE;
+
+ if (restart)
+ reg |= MDIO_VEND2_CTRL1_AN_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
+}
+
+static void axgbe_an37_disable(struct axgbe_port *pdata)
+{
+ axgbe_an37_set(pdata, false, false);
+ axgbe_an37_disable_interrupts(pdata);
+}
+
+static void axgbe_an73_set(struct axgbe_port *pdata, bool enable,
+ bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+
+ if (enable)
+ reg |= MDIO_AN_CTRL1_ENABLE;
+
+ if (restart)
+ reg |= MDIO_AN_CTRL1_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
+
+static void axgbe_an73_restart(struct axgbe_port *pdata)
+{
+ axgbe_an73_enable_interrupts(pdata);
+ axgbe_an73_set(pdata, true, true);
+}
+
+static void axgbe_an73_disable(struct axgbe_port *pdata)
+{
+ axgbe_an73_set(pdata, false, false);
+ axgbe_an73_disable_interrupts(pdata);
+ pdata->an_start = 0;
+}
+
+static void axgbe_an_restart(struct axgbe_port *pdata)
+{
+ if (pdata->phy_if.phy_impl.an_pre)
+ pdata->phy_if.phy_impl.an_pre(pdata);
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_restart(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_disable(struct axgbe_port *pdata)
+{
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_disable(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_disable_all(struct axgbe_port *pdata)
+{
+ axgbe_an73_disable(pdata);
+ axgbe_an37_disable(pdata);
+}
+
+static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg, reg;
+
+ *state = AXGBE_RX_COMPLETE;
+
+ /* If we're not in KR mode then we're done */
+ if (!axgbe_in_kr_mode(pdata))
+ return AXGBE_AN_PAGE_RECEIVED;
+
+ /* Enable/Disable FEC */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL);
+ reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE);
+ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+ reg |= pdata->fec_ability;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
+
+ /* Start KR training */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (reg & AXGBE_KR_TRAINING_ENABLE) {
+ if (pdata->phy_if.phy_impl.kr_training_pre)
+ pdata->phy_if.phy_impl.kr_training_pre(pdata);
+
+ reg |= AXGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+ reg);
+
+ if (pdata->phy_if.phy_impl.kr_training_post)
+ pdata->phy_if.phy_impl.kr_training_post(pdata);
+ }
+
+ return AXGBE_AN_PAGE_RECEIVED;
+}
+
+static enum axgbe_an axgbe_an73_tx_xnp(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ u16 msg;
+
+ *state = AXGBE_RX_XNP;
+
+ msg = AXGBE_XNP_MCF_NULL_MESSAGE;
+ msg |= AXGBE_XNP_MP_FORMATTED;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+ return AXGBE_AN_PAGE_RECEIVED;
+}
+
+static enum axgbe_an axgbe_an73_rx_bpa(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ unsigned int link_support;
+ unsigned int reg, ad_reg, lp_reg;
+
+ /* Read Base Ability register 2 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+
+ /* Check for a supported mode, otherwise restart in a different one */
+ link_support = axgbe_in_kr_mode(pdata) ? 0x80 : 0x20;
+ if (!(reg & link_support))
+ return AXGBE_AN_INCOMPAT_LINK;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+
+ return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & AXGBE_XNP_NP_EXCHANGE))
+ ? axgbe_an73_tx_xnp(pdata, state)
+ : axgbe_an73_tx_training(pdata, state);
+}
+
+static enum axgbe_an axgbe_an73_rx_xnp(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX);
+
+ return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & AXGBE_XNP_NP_EXCHANGE))
+ ? axgbe_an73_tx_xnp(pdata, state)
+ : axgbe_an73_tx_training(pdata, state);
+}
+
+static enum axgbe_an axgbe_an73_page_received(struct axgbe_port *pdata)
+{
+ enum axgbe_rx *state;
+ unsigned long an_timeout;
+ enum axgbe_an ret;
+ unsigned long ticks;
+
+ if (!pdata->an_start) {
+ pdata->an_start = rte_get_timer_cycles();
+ } else {
+ an_timeout = pdata->an_start +
+ msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT);
+ ticks = rte_get_timer_cycles();
+ if (time_after(ticks, an_timeout)) {
+ /* Auto-negotiation timed out, reset state */
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+
+ pdata->an_start = rte_get_timer_cycles();
+ }
+ }
+
+ state = axgbe_in_kr_mode(pdata) ? &pdata->kr_state
+ : &pdata->kx_state;
+
+ switch (*state) {
+ case AXGBE_RX_BPA:
+ ret = axgbe_an73_rx_bpa(pdata, state);
+ break;
+ case AXGBE_RX_XNP:
+ ret = axgbe_an73_rx_xnp(pdata, state);
+ break;
+ default:
+ ret = AXGBE_AN_ERROR;
+ }
+
+ return ret;
+}
+
+static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata)
+{
+ /* Be sure we aren't looping trying to negotiate */
+ if (axgbe_in_kr_mode(pdata)) {
+ pdata->kr_state = AXGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) &&
+ !(pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+ return AXGBE_AN_NO_LINK;
+
+ if (pdata->kx_state != AXGBE_RX_BPA)
+ return AXGBE_AN_NO_LINK;
+ } else {
+ pdata->kx_state = AXGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full))
+ return AXGBE_AN_NO_LINK;
+
+ if (pdata->kr_state != AXGBE_RX_BPA)
+ return AXGBE_AN_NO_LINK;
+ }
+
+ axgbe_an_disable(pdata);
+ axgbe_switch_mode(pdata);
+ axgbe_an_restart(pdata);
+
+ return AXGBE_AN_INCOMPAT_LINK;
+}
+
+static void axgbe_an73_state_machine(struct axgbe_port *pdata)
+{
+ enum axgbe_an cur_state = pdata->an_state;
+
+ if (!pdata->an_int)
+ return;
+
+next_int:
+ if (pdata->an_int & AXGBE_AN_CL73_PG_RCV) {
+ pdata->an_state = AXGBE_AN_PAGE_RECEIVED;
+ pdata->an_int &= ~AXGBE_AN_CL73_PG_RCV;
+ } else if (pdata->an_int & AXGBE_AN_CL73_INC_LINK) {
+ pdata->an_state = AXGBE_AN_INCOMPAT_LINK;
+ pdata->an_int &= ~AXGBE_AN_CL73_INC_LINK;
+ } else if (pdata->an_int & AXGBE_AN_CL73_INT_CMPLT) {
+ pdata->an_state = AXGBE_AN_COMPLETE;
+ pdata->an_int &= ~AXGBE_AN_CL73_INT_CMPLT;
+ } else {
+ pdata->an_state = AXGBE_AN_ERROR;
+ }
+
+again:
+ cur_state = pdata->an_state;
+
+ switch (pdata->an_state) {
+ case AXGBE_AN_READY:
+ pdata->an_supported = 0;
+ break;
+ case AXGBE_AN_PAGE_RECEIVED:
+ pdata->an_state = axgbe_an73_page_received(pdata);
+ pdata->an_supported++;
+ break;
+ case AXGBE_AN_INCOMPAT_LINK:
+ pdata->an_supported = 0;
+ pdata->parallel_detect = 0;
+ pdata->an_state = axgbe_an73_incompat_link(pdata);
+ break;
+ case AXGBE_AN_COMPLETE:
+ pdata->parallel_detect = pdata->an_supported ? 0 : 1;
+ break;
+ case AXGBE_AN_NO_LINK:
+ break;
+ default:
+ pdata->an_state = AXGBE_AN_ERROR;
+ }
+
+ if (pdata->an_state == AXGBE_AN_NO_LINK) {
+ pdata->an_int = 0;
+ axgbe_an73_clear_interrupts(pdata);
+ pdata->eth_dev->data->dev_link.link_status =
+ ETH_LINK_DOWN;
+ } else if (pdata->an_state == AXGBE_AN_ERROR) {
+ PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
+ cur_state);
+ pdata->an_int = 0;
+ axgbe_an73_clear_interrupts(pdata);
+ }
+
+ if (pdata->an_state >= AXGBE_AN_COMPLETE) {
+ pdata->an_result = pdata->an_state;
+ pdata->an_state = AXGBE_AN_READY;
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+ pdata->an_start = 0;
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+ }
+
+ if (cur_state != pdata->an_state)
+ goto again;
+
+ if (pdata->an_int)
+ goto next_int;
+
+ axgbe_an73_enable_interrupts(pdata);
+}
+
+static void axgbe_an73_isr(struct axgbe_port *pdata)
+{
+ /* Disable AN interrupts */
+ axgbe_an73_disable_interrupts(pdata);
+
+ /* Save the interrupt(s) that fired */
+ pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
+
+ if (pdata->an_int) {
+ /* Clear the interrupt(s) that fired and process them */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
+ pthread_mutex_lock(&pdata->an_mutex);
+ axgbe_an73_state_machine(pdata);
+ pthread_mutex_unlock(&pdata->an_mutex);
+ } else {
+ /* Enable AN interrupts */
+ axgbe_an73_enable_interrupts(pdata);
+ }
+}
+
+static void axgbe_an_isr(struct axgbe_port *pdata)
+{
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_isr(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "AN_MODE_37 not supported\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_combined_isr(struct axgbe_port *pdata)
+{
+ axgbe_an_isr(pdata);
+}
+
+static void axgbe_an73_init(struct axgbe_port *pdata)
+{
+ unsigned int advertising, reg;
+
+ advertising = pdata->phy_if.phy_impl.an_advertising(pdata);
+
+ /* Set up Advertisement register 3 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (advertising & ADVERTISED_10000baseR_FEC)
+ reg |= 0xc000;
+ else
+ reg &= ~0xc000;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
+
+ /* Set up Advertisement register 2 next */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ if (advertising & ADVERTISED_10000baseKR_Full)
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+ if ((advertising & ADVERTISED_1000baseKX_Full) ||
+ (advertising & ADVERTISED_2500baseX_Full))
+ reg |= 0x20;
+ else
+ reg &= ~0x20;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg);
+
+ /* Set up Advertisement register 1 last */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (advertising & ADVERTISED_Pause)
+ reg |= 0x400;
+ else
+ reg &= ~0x400;
+
+ if (advertising & ADVERTISED_Asym_Pause)
+ reg |= 0x800;
+ else
+ reg &= ~0x800;
+
+ /* We don't intend to perform XNP */
+ reg &= ~AXGBE_XNP_NP_EXCHANGE;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+}
+
+static void axgbe_an_init(struct axgbe_port *pdata)
+{
+ /* Set up advertisement registers based on current settings */
+ pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata);
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_init(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_CL37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_phy_adjust_link(struct axgbe_port *pdata)
+{
+ if (pdata->phy.link) {
+ /* Flow control support */
+ pdata->pause_autoneg = pdata->phy.pause_autoneg;
+
+ if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) {
+ pdata->hw_if.config_tx_flow_control(pdata);
+ pdata->tx_pause = pdata->phy.tx_pause;
+ }
+
+ if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) {
+ pdata->hw_if.config_rx_flow_control(pdata);
+ pdata->rx_pause = pdata->phy.rx_pause;
+ }
+
+ /* Speed support */
+ if (pdata->phy_speed != pdata->phy.speed)
+ pdata->phy_speed = pdata->phy.speed;
+ if (pdata->phy_link != pdata->phy.link)
+ pdata->phy_link = pdata->phy.link;
+ } else if (pdata->phy_link) {
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+}
+
+static int axgbe_phy_config_fixed(struct axgbe_port *pdata)
+{
+ enum axgbe_mode mode;
+
+ /* Disable auto-negotiation */
+ axgbe_an_disable(pdata);
+
+ /* Set specified mode for specified speed */
+ mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed);
+ switch (mode) {
+ case AXGBE_MODE_KX_1000:
+ case AXGBE_MODE_KX_2500:
+ case AXGBE_MODE_KR:
+ case AXGBE_MODE_SGMII_100:
+ case AXGBE_MODE_SGMII_1000:
+ case AXGBE_MODE_X:
+ case AXGBE_MODE_SFI:
+ break;
+ case AXGBE_MODE_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+
+ /* Validate duplex mode */
+ if (pdata->phy.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ axgbe_set_mode(pdata, mode);
+
+ return 0;
+}
+
+static int __axgbe_phy_config_aneg(struct axgbe_port *pdata)
+{
+ int ret;
+
+ axgbe_set_bit(AXGBE_LINK_INIT, &pdata->dev_state);
+ pdata->link_check = rte_get_timer_cycles();
+
+ ret = pdata->phy_if.phy_impl.an_config(pdata);
+ if (ret)
+ return ret;
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
+ ret = axgbe_phy_config_fixed(pdata);
+ if (ret || !pdata->kr_redrv)
+ return ret;
+ }
+
+ /* Disable auto-negotiation interrupt */
+ rte_intr_disable(&pdata->pci_dev->intr_handle);
+
+ /* Start auto-negotiation in a supported mode */
+ if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_KR);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_KX_2500);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_KX_1000);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_SFI);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_X);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_SGMII_1000);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_SGMII_100);
+ } else {
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+ return -EINVAL;
+ }
+
+ /* Disable and stop any in progress auto-negotiation */
+ axgbe_an_disable_all(pdata);
+
+ /* Clear any auto-negotitation interrupts */
+ axgbe_an_clear_interrupts_all(pdata);
+
+ pdata->an_result = AXGBE_AN_READY;
+ pdata->an_state = AXGBE_AN_READY;
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+
+ /* Re-enable auto-negotiation interrupt */
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+
+ axgbe_an_init(pdata);
+ axgbe_an_restart(pdata);
+
+ return 0;
+}
+
+static int axgbe_phy_config_aneg(struct axgbe_port *pdata)
+{
+ int ret;
+
+ pthread_mutex_lock(&pdata->an_mutex);
+
+ ret = __axgbe_phy_config_aneg(pdata);
+ if (ret)
+ axgbe_set_bit(AXGBE_LINK_ERR, &pdata->dev_state);
+ else
+ axgbe_clear_bit(AXGBE_LINK_ERR, &pdata->dev_state);
+
+ pthread_mutex_unlock(&pdata->an_mutex);
+
+ return ret;
+}
+
+static bool axgbe_phy_aneg_done(struct axgbe_port *pdata)
+{
+ return pdata->an_result == AXGBE_AN_COMPLETE;
+}
+
+static void axgbe_check_link_timeout(struct axgbe_port *pdata)
+{
+ unsigned long link_timeout;
+ unsigned long ticks;
+
+ link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT *
+ 2 * rte_get_timer_hz());
+ ticks = rte_get_timer_cycles();
+ if (time_after(ticks, link_timeout))
+ axgbe_phy_config_aneg(pdata);
+}
+
+static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata)
+{
+ return pdata->phy_if.phy_impl.an_outcome(pdata);
+}
+
+static void axgbe_phy_status_result(struct axgbe_port *pdata)
+{
+ enum axgbe_mode mode;
+
+ pdata->phy.lp_advertising = 0;
+
+ if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
+ mode = axgbe_cur_mode(pdata);
+ else
+ mode = axgbe_phy_status_aneg(pdata);
+
+ switch (mode) {
+ case AXGBE_MODE_SGMII_100:
+ pdata->phy.speed = SPEED_100;
+ break;
+ case AXGBE_MODE_X:
+ case AXGBE_MODE_KX_1000:
+ case AXGBE_MODE_SGMII_1000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+ case AXGBE_MODE_KX_2500:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ case AXGBE_MODE_KR:
+ case AXGBE_MODE_SFI:
+ pdata->phy.speed = SPEED_10000;
+ break;
+ case AXGBE_MODE_UNKNOWN:
+ default:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ }
+
+ pdata->phy.duplex = DUPLEX_FULL;
+
+ axgbe_set_mode(pdata, mode);
+}
+
+static void axgbe_phy_status(struct axgbe_port *pdata)
+{
+ unsigned int link_aneg;
+ int an_restart;
+
+ if (axgbe_test_bit(AXGBE_LINK_ERR, &pdata->dev_state)) {
+ pdata->phy.link = 0;
+ goto adjust_link;
+ }
+
+ link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+ pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
+ &an_restart);
+ if (an_restart) {
+ axgbe_phy_config_aneg(pdata);
+ return;
+ }
+
+ if (pdata->phy.link) {
+ if (link_aneg && !axgbe_phy_aneg_done(pdata)) {
+ axgbe_check_link_timeout(pdata);
+ return;
+ }
+ axgbe_phy_status_result(pdata);
+ if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state))
+ axgbe_clear_bit(AXGBE_LINK_INIT, &pdata->dev_state);
+ } else {
+ if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state)) {
+ axgbe_check_link_timeout(pdata);
+
+ if (link_aneg)
+ return;
+ }
+ axgbe_phy_status_result(pdata);
+ }
+
+adjust_link:
+ axgbe_phy_adjust_link(pdata);
+}
+
+static void axgbe_phy_stop(struct axgbe_port *pdata)
+{
+ if (!pdata->phy_started)
+ return;
+ /* Indicate the PHY is down */
+ pdata->phy_started = 0;
+ /* Disable auto-negotiation */
+ axgbe_an_disable_all(pdata);
+ pdata->phy_if.phy_impl.stop(pdata);
+ pdata->phy.link = 0;
+ axgbe_phy_adjust_link(pdata);
+}
+
+static int axgbe_phy_start(struct axgbe_port *pdata)
+{
+ int ret;
+
+ ret = pdata->phy_if.phy_impl.start(pdata);
+ if (ret)
+ return ret;
+ /* Set initial mode - call the mode setting routines
+ * directly to insure we are properly configured
+ */
+ if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) {
+ axgbe_kr_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) {
+ axgbe_kx_2500_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) {
+ axgbe_kx_1000_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) {
+ axgbe_sfi_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) {
+ axgbe_x_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) {
+ axgbe_sgmii_1000_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) {
+ axgbe_sgmii_100_mode(pdata);
+ } else {
+ ret = -EINVAL;
+ goto err_stop;
+ }
+ /* Indicate the PHY is up and running */
+ pdata->phy_started = 1;
+ axgbe_an_init(pdata);
+ axgbe_an_enable_interrupts(pdata);
+ return axgbe_phy_config_aneg(pdata);
+
+err_stop:
+ pdata->phy_if.phy_impl.stop(pdata);
+
+ return ret;
+}
+
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+ int ret;
+
+ ret = pdata->phy_if.phy_impl.reset(pdata);
+ if (ret)
+ return ret;
+
+ /* Disable auto-negotiation for now */
+ axgbe_an_disable_all(pdata);
+
+ /* Clear auto-negotiation interrupts */
+ axgbe_an_clear_interrupts_all(pdata);
+
+ return 0;
+}
+
+static int axgbe_phy_best_advertised_speed(struct axgbe_port *pdata)
+{
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ return SPEED_10000;
+ else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full)
+ return SPEED_10000;
+ else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
+ return SPEED_2500;
+ else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
+ return SPEED_1000;
+ else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full)
+ return SPEED_1000;
+ else if (pdata->phy.advertising & ADVERTISED_100baseT_Full)
+ return SPEED_100;
+
+ return SPEED_UNKNOWN;
+}
+
+static int axgbe_phy_init(struct axgbe_port *pdata)
+{
+ int ret;
+
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ /* Check for FEC support */
+ pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBR_FECABLE);
+ pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
+ MDIO_PMA_10GBR_FECABLE_ERRABLE);
+
+ /* Setup the phy (including supported features) */
+ ret = pdata->phy_if.phy_impl.init(pdata);
+ if (ret)
+ return ret;
+ pdata->phy.advertising = pdata->phy.supported;
+
+ pdata->phy.address = 0;
+
+ if (pdata->phy.advertising & ADVERTISED_Autoneg) {
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ } else {
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.speed = axgbe_phy_best_advertised_speed(pdata);
+ pdata->phy.duplex = DUPLEX_FULL;
+ }
+
+ pdata->phy.link = 0;
+
+ pdata->phy.pause_autoneg = pdata->pause_autoneg;
+ pdata->phy.tx_pause = pdata->tx_pause;
+ pdata->phy.rx_pause = pdata->rx_pause;
+
+ /* Fix up Flow Control advertising */
+ pdata->phy.advertising &= ~ADVERTISED_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
+
+ if (pdata->rx_pause) {
+ pdata->phy.advertising |= ADVERTISED_Pause;
+ pdata->phy.advertising |= ADVERTISED_Asym_Pause;
+ }
+
+ if (pdata->tx_pause)
+ pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+ return 0;
+}
+
+void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if)
+{
+ phy_if->phy_init = axgbe_phy_init;
+ phy_if->phy_reset = axgbe_phy_reset;
+ phy_if->phy_start = axgbe_phy_start;
+ phy_if->phy_stop = axgbe_phy_stop;
+ phy_if->phy_status = axgbe_phy_status;
+ phy_if->phy_config_aneg = axgbe_phy_config_aneg;
+ phy_if->an_isr = axgbe_an_combined_isr;
+}
diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_phy.h b/src/spdk/dpdk/drivers/net/axgbe/axgbe_phy.h
new file mode 100644
index 00000000..77ee20a3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_phy.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef __AXGBE_PHY_H__
+#define __AXGBE_PHY_H__
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define SPEED_10000 10000
+
+
+/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
+ * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips.
+ */
+#define MII_ADDR_C45 (1 << 30)
+
+/* Basic mode status register. */
+#define BMSR_LSTATUS 0x0004 /* Link status */
+
+/* Status register 1. */
+#define MDIO_STAT1_LSTATUS BMSR_LSTATUS
+
+/* Generic MII registers. */
+#define MII_BMCR 0x00 /* Basic mode control register */
+#define MII_BMSR 0x01 /* Basic mode status register */
+#define MII_PHYSID1 0x02 /* PHYS ID 1 */
+#define MII_PHYSID2 0x03 /* PHYS ID 2 */
+#define MII_ADVERTISE 0x04 /* Advertisement control reg */
+#define MII_LPA 0x05 /* Link partner ability reg */
+#define MII_EXPANSION 0x06 /* Expansion register */
+#define MII_CTRL1000 0x09 /* 1000BASE-T control */
+#define MII_STAT1000 0x0a /* 1000BASE-T status */
+#define MII_MMD_CTRL 0x0d /* MMD Access Control Register */
+#define MII_MMD_DATA 0x0e /* MMD Access Data Register */
+#define MII_ESTATUS 0x0f /* Extended Status */
+#define MII_DCOUNTER 0x12 /* Disconnect counter */
+#define MII_FCSCOUNTER 0x13 /* False carrier counter */
+#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
+#define MII_RERRCOUNTER 0x15 /* Receive error counter */
+#define MII_SREVISION 0x16 /* Silicon revision */
+#define MII_RESV1 0x17 /* Reserved... */
+#define MII_LBRERROR 0x18 /* Lpback, rx, bypass error */
+#define MII_PHYADDR 0x19 /* PHY address */
+#define MII_RESV2 0x1a /* Reserved... */
+#define MII_TPISTATUS 0x1b /* TPI status for 10mbps */
+#define MII_NCONFIG 0x1c /* Network interface config */
+
+/* Basic mode control register. */
+#define BMCR_RESV 0x003f /* Unused... */
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
+#define BMCR_CTST 0x0080 /* Collision test */
+#define BMCR_FULLDPLX 0x0100 /* Full duplex */
+#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */
+#define BMCR_ISOLATE 0x0400 /* Isolate data paths from MII */
+#define BMCR_PDOWN 0x0800 /* Enable low power state */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
+#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
+#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
+#define BMCR_RESET 0x8000 /* Reset to default state */
+#define BMCR_SPEED10 0x0000 /* Select 10Mbps */
+
+
+/* MDIO Manageable Devices (MMDs). */
+#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment
+ * Physical Medium Dependent
+ */
+#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */
+#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */
+#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */
+#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */
+#define MDIO_MMD_TC 6 /* Transmission Convergence */
+#define MDIO_MMD_AN 7 /* Auto-Negotiation */
+#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */
+#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */
+#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */
+
+/* Generic MDIO registers. */
+#define MDIO_CTRL1 MII_BMCR
+#define MDIO_STAT1 MII_BMSR
+#define MDIO_DEVID1 MII_PHYSID1
+#define MDIO_DEVID2 MII_PHYSID2
+#define MDIO_SPEED 4 /* Speed ability */
+#define MDIO_DEVS1 5 /* Devices in package */
+#define MDIO_DEVS2 6
+#define MDIO_CTRL2 7 /* 10G control 2 */
+#define MDIO_STAT2 8 /* 10G status 2 */
+#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */
+#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */
+#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */
+#define MDIO_PKGID1 14 /* Package identifier */
+#define MDIO_PKGID2 15
+#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */
+#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */
+#define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */
+#define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */
+#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */
+#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */
+#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */
+
+/* Media-dependent registers. */
+#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
+#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */
+#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A.
+ * Lanes B-D are numbered 134-136.
+ */
+#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */
+#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
+#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
+#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
+
+/* Control register 1. */
+/* Enable extended speed selection */
+#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100)
+/* All speed selection bits */
+#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c)
+#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX
+#define MDIO_CTRL1_LPOWER BMCR_PDOWN
+#define MDIO_CTRL1_RESET BMCR_RESET
+#define MDIO_PMA_CTRL1_LOOPBACK 0x0001
+#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000
+#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100
+#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART
+#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE
+#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */
+#define MDIO_PCS_CTRL1_CLKSTOP_EN 0x400 /* Stop the clock during LPI */
+
+
+
+
+
+/* PMA 10GBASE-R FEC ability register. */
+#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */
+#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */
+
+
+/* Autoneg related */
+#define ADVERTISED_Autoneg (1 << 6)
+#define SUPPORTED_Autoneg (1 << 6)
+#define AUTONEG_DISABLE 0x00
+#define AUTONEG_ENABLE 0x01
+
+#define ADVERTISED_Pause (1 << 13)
+#define ADVERTISED_Asym_Pause (1 << 14)
+
+#define SUPPORTED_Pause (1 << 13)
+#define SUPPORTED_Asym_Pause (1 << 14)
+
+#define SUPPORTED_Backplane (1 << 16)
+#define SUPPORTED_TP (1 << 7)
+
+#define ADVERTISED_10000baseR_FEC (1 << 20)
+
+#define SUPPORTED_10000baseR_FEC (1 << 20)
+
+#define SUPPORTED_FIBRE (1 << 10)
+
+#define ADVERTISED_10000baseKR_Full (1 << 19)
+#define ADVERTISED_10000baseT_Full (1 << 12)
+#define ADVERTISED_2500baseX_Full (1 << 15)
+#define ADVERTISED_1000baseKX_Full (1 << 17)
+#define ADVERTISED_1000baseT_Full (1 << 5)
+#define ADVERTISED_100baseT_Full (1 << 3)
+#define ADVERTISED_TP (1 << 7)
+#define ADVERTISED_FIBRE (1 << 10)
+#define ADVERTISED_Backplane (1 << 16)
+
+#define SUPPORTED_1000baseKX_Full (1 << 17)
+#define SUPPORTED_10000baseKR_Full (1 << 19)
+#define SUPPORTED_2500baseX_Full (1 << 15)
+#define SUPPORTED_100baseT_Full (1 << 2)
+#define SUPPORTED_1000baseT_Full (1 << 5)
+#define SUPPORTED_10000baseT_Full (1 << 12)
+#define SUPPORTED_2500baseX_Full (1 << 15)
+
+
+#define SPEED_UNKNOWN -1
+
+/* Duplex, half or full. */
+#define DUPLEX_HALF 0x00
+#define DUPLEX_FULL 0x01
+#define DUPLEX_UNKNOWN 0xff
+
+#endif
+/* PHY */
diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_phy_impl.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_phy_impl.c
new file mode 100644
index 00000000..973177f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_phy_impl.c
@@ -0,0 +1,2191 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+
+#define AXGBE_PHY_PORT_SPEED_100 BIT(0)
+#define AXGBE_PHY_PORT_SPEED_1000 BIT(1)
+#define AXGBE_PHY_PORT_SPEED_2500 BIT(2)
+#define AXGBE_PHY_PORT_SPEED_10000 BIT(3)
+
+#define AXGBE_MUTEX_RELEASE 0x80000000
+
+#define AXGBE_SFP_DIRECT 7
+
+/* I2C target addresses */
+#define AXGBE_SFP_SERIAL_ID_ADDRESS 0x50
+#define AXGBE_SFP_DIAG_INFO_ADDRESS 0x51
+#define AXGBE_SFP_PHY_ADDRESS 0x56
+#define AXGBE_GPIO_ADDRESS_PCA9555 0x20
+
+/* SFP sideband signal indicators */
+#define AXGBE_GPIO_NO_TX_FAULT BIT(0)
+#define AXGBE_GPIO_NO_RATE_SELECT BIT(1)
+#define AXGBE_GPIO_NO_MOD_ABSENT BIT(2)
+#define AXGBE_GPIO_NO_RX_LOS BIT(3)
+
+/* Rate-change complete wait/retry count */
+#define AXGBE_RATECHANGE_COUNT 500
+
+/* CDR delay values for KR support (in usec) */
+#define AXGBE_CDR_DELAY_INIT 10000
+#define AXGBE_CDR_DELAY_INC 10000
+#define AXGBE_CDR_DELAY_MAX 100000
+
+enum axgbe_port_mode {
+ AXGBE_PORT_MODE_RSVD = 0,
+ AXGBE_PORT_MODE_BACKPLANE,
+ AXGBE_PORT_MODE_BACKPLANE_2500,
+ AXGBE_PORT_MODE_1000BASE_T,
+ AXGBE_PORT_MODE_1000BASE_X,
+ AXGBE_PORT_MODE_NBASE_T,
+ AXGBE_PORT_MODE_10GBASE_T,
+ AXGBE_PORT_MODE_10GBASE_R,
+ AXGBE_PORT_MODE_SFP,
+ AXGBE_PORT_MODE_MAX,
+};
+
+enum axgbe_conn_type {
+ AXGBE_CONN_TYPE_NONE = 0,
+ AXGBE_CONN_TYPE_SFP,
+ AXGBE_CONN_TYPE_MDIO,
+ AXGBE_CONN_TYPE_RSVD1,
+ AXGBE_CONN_TYPE_BACKPLANE,
+ AXGBE_CONN_TYPE_MAX,
+};
+
+/* SFP/SFP+ related definitions */
+enum axgbe_sfp_comm {
+ AXGBE_SFP_COMM_DIRECT = 0,
+ AXGBE_SFP_COMM_PCA9545,
+};
+
+enum axgbe_sfp_cable {
+ AXGBE_SFP_CABLE_UNKNOWN = 0,
+ AXGBE_SFP_CABLE_ACTIVE,
+ AXGBE_SFP_CABLE_PASSIVE,
+};
+
+enum axgbe_sfp_base {
+ AXGBE_SFP_BASE_UNKNOWN = 0,
+ AXGBE_SFP_BASE_1000_T,
+ AXGBE_SFP_BASE_1000_SX,
+ AXGBE_SFP_BASE_1000_LX,
+ AXGBE_SFP_BASE_1000_CX,
+ AXGBE_SFP_BASE_10000_SR,
+ AXGBE_SFP_BASE_10000_LR,
+ AXGBE_SFP_BASE_10000_LRM,
+ AXGBE_SFP_BASE_10000_ER,
+ AXGBE_SFP_BASE_10000_CR,
+};
+
+enum axgbe_sfp_speed {
+ AXGBE_SFP_SPEED_UNKNOWN = 0,
+ AXGBE_SFP_SPEED_100_1000,
+ AXGBE_SFP_SPEED_1000,
+ AXGBE_SFP_SPEED_10000,
+};
+
+/* SFP Serial ID Base ID values relative to an offset of 0 */
+#define AXGBE_SFP_BASE_ID 0
+#define AXGBE_SFP_ID_SFP 0x03
+
+#define AXGBE_SFP_BASE_EXT_ID 1
+#define AXGBE_SFP_EXT_ID_SFP 0x04
+
+#define AXGBE_SFP_BASE_10GBE_CC 3
+#define AXGBE_SFP_BASE_10GBE_CC_SR BIT(4)
+#define AXGBE_SFP_BASE_10GBE_CC_LR BIT(5)
+#define AXGBE_SFP_BASE_10GBE_CC_LRM BIT(6)
+#define AXGBE_SFP_BASE_10GBE_CC_ER BIT(7)
+
+#define AXGBE_SFP_BASE_1GBE_CC 6
+#define AXGBE_SFP_BASE_1GBE_CC_SX BIT(0)
+#define AXGBE_SFP_BASE_1GBE_CC_LX BIT(1)
+#define AXGBE_SFP_BASE_1GBE_CC_CX BIT(2)
+#define AXGBE_SFP_BASE_1GBE_CC_T BIT(3)
+
+#define AXGBE_SFP_BASE_CABLE 8
+#define AXGBE_SFP_BASE_CABLE_PASSIVE BIT(2)
+#define AXGBE_SFP_BASE_CABLE_ACTIVE BIT(3)
+
+#define AXGBE_SFP_BASE_BR 12
+#define AXGBE_SFP_BASE_BR_1GBE_MIN 0x0a
+#define AXGBE_SFP_BASE_BR_1GBE_MAX 0x0d
+#define AXGBE_SFP_BASE_BR_10GBE_MIN 0x64
+#define AXGBE_SFP_BASE_BR_10GBE_MAX 0x68
+
+#define AXGBE_SFP_BASE_CU_CABLE_LEN 18
+
+#define AXGBE_SFP_BASE_VENDOR_NAME 20
+#define AXGBE_SFP_BASE_VENDOR_NAME_LEN 16
+#define AXGBE_SFP_BASE_VENDOR_PN 40
+#define AXGBE_SFP_BASE_VENDOR_PN_LEN 16
+#define AXGBE_SFP_BASE_VENDOR_REV 56
+#define AXGBE_SFP_BASE_VENDOR_REV_LEN 4
+
+#define AXGBE_SFP_BASE_CC 63
+
+/* SFP Serial ID Extended ID values relative to an offset of 64 */
+#define AXGBE_SFP_BASE_VENDOR_SN 4
+#define AXGBE_SFP_BASE_VENDOR_SN_LEN 16
+
+#define AXGBE_SFP_EXTD_DIAG 28
+#define AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
+
+#define AXGBE_SFP_EXTD_SFF_8472 30
+
+#define AXGBE_SFP_EXTD_CC 31
+
+struct axgbe_sfp_eeprom {
+ u8 base[64];
+ u8 extd[32];
+ u8 vendor[32];
+};
+
+#define AXGBE_BEL_FUSE_VENDOR "BEL-FUSE"
+#define AXGBE_BEL_FUSE_PARTNO "1GBT-SFP06"
+
+struct axgbe_sfp_ascii {
+ union {
+ char vendor[AXGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
+ char partno[AXGBE_SFP_BASE_VENDOR_PN_LEN + 1];
+ char rev[AXGBE_SFP_BASE_VENDOR_REV_LEN + 1];
+ char serno[AXGBE_SFP_BASE_VENDOR_SN_LEN + 1];
+ } u;
+};
+
+/* MDIO PHY reset types */
+enum axgbe_mdio_reset {
+ AXGBE_MDIO_RESET_NONE = 0,
+ AXGBE_MDIO_RESET_I2C_GPIO,
+ AXGBE_MDIO_RESET_INT_GPIO,
+ AXGBE_MDIO_RESET_MAX,
+};
+
+/* Re-driver related definitions */
+enum axgbe_phy_redrv_if {
+ AXGBE_PHY_REDRV_IF_MDIO = 0,
+ AXGBE_PHY_REDRV_IF_I2C,
+ AXGBE_PHY_REDRV_IF_MAX,
+};
+
+enum axgbe_phy_redrv_model {
+ AXGBE_PHY_REDRV_MODEL_4223 = 0,
+ AXGBE_PHY_REDRV_MODEL_4227,
+ AXGBE_PHY_REDRV_MODEL_MAX,
+};
+
+enum axgbe_phy_redrv_mode {
+ AXGBE_PHY_REDRV_MODE_CX = 5,
+ AXGBE_PHY_REDRV_MODE_SR = 9,
+};
+
+#define AXGBE_PHY_REDRV_MODE_REG 0x12b0
+
+/* PHY related configuration information */
+struct axgbe_phy_data {
+ enum axgbe_port_mode port_mode;
+
+ unsigned int port_id;
+
+ unsigned int port_speeds;
+
+ enum axgbe_conn_type conn_type;
+
+ enum axgbe_mode cur_mode;
+ enum axgbe_mode start_mode;
+
+ unsigned int rrc_count;
+
+ unsigned int mdio_addr;
+
+ unsigned int comm_owned;
+
+ /* SFP Support */
+ enum axgbe_sfp_comm sfp_comm;
+ unsigned int sfp_mux_address;
+ unsigned int sfp_mux_channel;
+
+ unsigned int sfp_gpio_address;
+ unsigned int sfp_gpio_mask;
+ unsigned int sfp_gpio_rx_los;
+ unsigned int sfp_gpio_tx_fault;
+ unsigned int sfp_gpio_mod_absent;
+ unsigned int sfp_gpio_rate_select;
+
+ unsigned int sfp_rx_los;
+ unsigned int sfp_tx_fault;
+ unsigned int sfp_mod_absent;
+ unsigned int sfp_diags;
+ unsigned int sfp_changed;
+ unsigned int sfp_phy_avail;
+ unsigned int sfp_cable_len;
+ enum axgbe_sfp_base sfp_base;
+ enum axgbe_sfp_cable sfp_cable;
+ enum axgbe_sfp_speed sfp_speed;
+ struct axgbe_sfp_eeprom sfp_eeprom;
+
+ /* External PHY support */
+ enum axgbe_mdio_mode phydev_mode;
+ enum axgbe_mdio_reset mdio_reset;
+ unsigned int mdio_reset_addr;
+ unsigned int mdio_reset_gpio;
+
+ /* Re-driver support */
+ unsigned int redrv;
+ unsigned int redrv_if;
+ unsigned int redrv_addr;
+ unsigned int redrv_lane;
+ unsigned int redrv_model;
+
+ /* KR AN support */
+ unsigned int phy_cdr_notrack;
+ unsigned int phy_cdr_delay;
+};
+
+static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata);
+
+static int axgbe_phy_i2c_xfer(struct axgbe_port *pdata,
+ struct axgbe_i2c_op *i2c_op)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Be sure we own the bus */
+ if (!phy_data->comm_owned)
+ return -EIO;
+
+ return pdata->i2c_if.i2c_xfer(pdata, i2c_op);
+}
+
+static int axgbe_phy_redrv_write(struct axgbe_port *pdata, unsigned int reg,
+ unsigned int val)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_i2c_op i2c_op;
+ uint16_t *redrv_val;
+ u8 redrv_data[5], csum;
+ unsigned int i, retry;
+ int ret;
+
+ /* High byte of register contains read/write indicator */
+ redrv_data[0] = ((reg >> 8) & 0xff) << 1;
+ redrv_data[1] = reg & 0xff;
+ redrv_val = (uint16_t *)&redrv_data[2];
+ *redrv_val = rte_cpu_to_be_16(val);
+
+ /* Calculate 1 byte checksum */
+ csum = 0;
+ for (i = 0; i < 4; i++) {
+ csum += redrv_data[i];
+ if (redrv_data[i] > csum)
+ csum++;
+ }
+ redrv_data[4] = ~csum;
+
+ retry = 1;
+again1:
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = sizeof(redrv_data);
+ i2c_op.buf = redrv_data;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return ret;
+ }
+
+ retry = 1;
+again2:
+ i2c_op.cmd = AXGBE_I2C_CMD_READ;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = 1;
+ i2c_op.buf = redrv_data;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return ret;
+ }
+
+ if (redrv_data[0] != 0xff) {
+ PMD_DRV_LOG(ERR, "Redriver write checksum error\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target,
+ void *reg, unsigned int reg_len,
+ void *val, unsigned int val_len)
+{
+ struct axgbe_i2c_op i2c_op;
+ int retry, ret;
+
+ retry = 1;
+again1:
+ /* Set the specified register to read */
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = target;
+ i2c_op.len = reg_len;
+ i2c_op.buf = reg;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return ret;
+ }
+
+ retry = 1;
+again2:
+ /* Read the specfied register */
+ i2c_op.cmd = AXGBE_I2C_CMD_READ;
+ i2c_op.target = target;
+ i2c_op.len = val_len;
+ i2c_op.buf = val;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return ret;
+}
+
+static int axgbe_phy_sfp_put_mux(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_i2c_op i2c_op;
+ uint8_t mux_channel;
+
+ if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT)
+ return 0;
+
+ /* Select no mux channels */
+ mux_channel = 0;
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return axgbe_phy_i2c_xfer(pdata, &i2c_op);
+}
+
+static int axgbe_phy_sfp_get_mux(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_i2c_op i2c_op;
+ u8 mux_channel;
+
+ if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT)
+ return 0;
+
+ /* Select desired mux channel */
+ mux_channel = 1 << phy_data->sfp_mux_channel;
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return axgbe_phy_i2c_xfer(pdata, &i2c_op);
+}
+
+static void axgbe_phy_put_comm_ownership(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ phy_data->comm_owned = 0;
+
+ pthread_mutex_unlock(&pdata->phy_mutex);
+}
+
+static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ uint64_t timeout;
+ unsigned int mutex_id;
+
+ if (phy_data->comm_owned)
+ return 0;
+
+ /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices,
+ * the driver needs to take the software mutex and then the hardware
+ * mutexes before being able to use the busses.
+ */
+ pthread_mutex_lock(&pdata->phy_mutex);
+
+ /* Clear the mutexes */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, AXGBE_MUTEX_RELEASE);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, AXGBE_MUTEX_RELEASE);
+
+ /* Mutex formats are the same for I2C and MDIO/GPIO */
+ mutex_id = 0;
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id);
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1);
+
+ timeout = rte_get_timer_cycles() + (rte_get_timer_hz() * 5);
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ /* Must be all zeroes in order to obtain the mutex */
+ if (XP_IOREAD(pdata, XP_I2C_MUTEX) ||
+ XP_IOREAD(pdata, XP_MDIO_MUTEX)) {
+ rte_delay_us(100);
+ continue;
+ }
+
+ /* Obtain the mutex */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id);
+
+ phy_data->comm_owned = 1;
+ return 0;
+ }
+
+ pthread_mutex_unlock(&pdata->phy_mutex);
+
+ PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes\n");
+
+ return -ETIMEDOUT;
+}
+
+static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->sfp_mod_absent) {
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.advertising = pdata->phy.supported;
+ }
+
+ pdata->phy.advertising &= ~ADVERTISED_Autoneg;
+ pdata->phy.advertising &= ~ADVERTISED_TP;
+ pdata->phy.advertising &= ~ADVERTISED_FIBRE;
+ pdata->phy.advertising &= ~ADVERTISED_100baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC;
+
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.advertising |= ADVERTISED_Autoneg;
+ break;
+ case AXGBE_SFP_BASE_10000_SR:
+ case AXGBE_SFP_BASE_10000_LR:
+ case AXGBE_SFP_BASE_10000_LRM:
+ case AXGBE_SFP_BASE_10000_ER:
+ case AXGBE_SFP_BASE_10000_CR:
+ default:
+ pdata->phy.speed = SPEED_10000;
+ pdata->phy.duplex = DUPLEX_FULL;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ break;
+ }
+
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ case AXGBE_SFP_BASE_1000_CX:
+ case AXGBE_SFP_BASE_10000_CR:
+ pdata->phy.advertising |= ADVERTISED_TP;
+ break;
+ default:
+ pdata->phy.advertising |= ADVERTISED_FIBRE;
+ }
+
+ switch (phy_data->sfp_speed) {
+ case AXGBE_SFP_SPEED_100_1000:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100)
+ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case AXGBE_SFP_SPEED_1000:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case AXGBE_SFP_SPEED_10000:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
+ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ break;
+ default:
+ /* Choose the fastest supported speed */
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
+ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100)
+ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ }
+}
+
+static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom,
+ enum axgbe_sfp_speed sfp_speed)
+{
+ u8 *sfp_base, min, max;
+
+ sfp_base = sfp_eeprom->base;
+
+ switch (sfp_speed) {
+ case AXGBE_SFP_SPEED_1000:
+ min = AXGBE_SFP_BASE_BR_1GBE_MIN;
+ max = AXGBE_SFP_BASE_BR_1GBE_MAX;
+ break;
+ case AXGBE_SFP_SPEED_10000:
+ min = AXGBE_SFP_BASE_BR_10GBE_MIN;
+ max = AXGBE_SFP_BASE_BR_10GBE_MAX;
+ break;
+ default:
+ return false;
+ }
+
+ return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) &&
+ (sfp_base[AXGBE_SFP_BASE_BR] <= max));
+}
+
+static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!phy_data->sfp_changed)
+ return;
+
+ phy_data->sfp_phy_avail = 0;
+
+ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
+ return;
+}
+
+static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+
+ if (memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_NAME],
+ AXGBE_BEL_FUSE_VENDOR, strlen(AXGBE_BEL_FUSE_VENDOR)))
+ return false;
+
+ if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN],
+ AXGBE_BEL_FUSE_PARTNO, strlen(AXGBE_BEL_FUSE_PARTNO))) {
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX;
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE;
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_1000;
+ return true;
+ }
+
+ return false;
+}
+
+static bool axgbe_phy_sfp_parse_quirks(struct axgbe_port *pdata)
+{
+ if (axgbe_phy_belfuse_parse_quirks(pdata))
+ return true;
+
+ return false;
+}
+
+static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+ uint8_t *sfp_base;
+
+ sfp_base = sfp_eeprom->base;
+
+ if (sfp_base[AXGBE_SFP_BASE_ID] != AXGBE_SFP_ID_SFP)
+ return;
+
+ if (sfp_base[AXGBE_SFP_BASE_EXT_ID] != AXGBE_SFP_EXT_ID_SFP)
+ return;
+
+ if (axgbe_phy_sfp_parse_quirks(pdata))
+ return;
+
+ /* Assume ACTIVE cable unless told it is PASSIVE */
+ if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) {
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE;
+ phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN];
+ } else {
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE;
+ }
+
+ /* Determine the type of SFP */
+ if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR;
+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR;
+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] &
+ AXGBE_SFP_BASE_10GBE_CC_LRM)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_LRM;
+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_ER)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_ER;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_SX)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_LX)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_LX;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_CX)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_T;
+ else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) &&
+ axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR;
+
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_100_1000;
+ break;
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_1000;
+ break;
+ case AXGBE_SFP_BASE_10000_SR:
+ case AXGBE_SFP_BASE_10000_LR:
+ case AXGBE_SFP_BASE_10000_LRM:
+ case AXGBE_SFP_BASE_10000_ER:
+ case AXGBE_SFP_BASE_10000_CR:
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_10000;
+ break;
+ default:
+ break;
+ }
+}
+
+static bool axgbe_phy_sfp_verify_eeprom(uint8_t cc_in, uint8_t *buf,
+ unsigned int len)
+{
+ uint8_t cc;
+
+ for (cc = 0; len; buf++, len--)
+ cc += *buf;
+
+ return (cc == cc_in) ? true : false;
+}
+
+static int axgbe_phy_sfp_read_eeprom(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_sfp_eeprom sfp_eeprom;
+ uint8_t eeprom_addr;
+ int ret;
+
+ ret = axgbe_phy_sfp_get_mux(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "I2C error setting SFP MUX\n");
+ return ret;
+ }
+
+ /* Read the SFP serial ID eeprom */
+ eeprom_addr = 0;
+ ret = axgbe_phy_i2c_read(pdata, AXGBE_SFP_SERIAL_ID_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ &sfp_eeprom, sizeof(sfp_eeprom));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "I2C error reading SFP EEPROM\n");
+ goto put;
+ }
+
+ /* Validate the contents read */
+ if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[AXGBE_SFP_BASE_CC],
+ sfp_eeprom.base,
+ sizeof(sfp_eeprom.base) - 1)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[AXGBE_SFP_EXTD_CC],
+ sfp_eeprom.extd,
+ sizeof(sfp_eeprom.extd) - 1)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ /* Check for an added or changed SFP */
+ if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) {
+ phy_data->sfp_changed = 1;
+ memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom));
+
+ if (sfp_eeprom.extd[AXGBE_SFP_EXTD_SFF_8472]) {
+ uint8_t diag_type;
+ diag_type = sfp_eeprom.extd[AXGBE_SFP_EXTD_DIAG];
+
+ if (!(diag_type & AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
+ phy_data->sfp_diags = 1;
+ }
+ } else {
+ phy_data->sfp_changed = 0;
+ }
+
+put:
+ axgbe_phy_sfp_put_mux(pdata);
+
+ return ret;
+}
+
+static void axgbe_phy_sfp_signals(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int gpio_input;
+ u8 gpio_reg, gpio_ports[2];
+ int ret;
+
+ /* Read the input port registers */
+ gpio_reg = 0;
+ ret = axgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address,
+ &gpio_reg, sizeof(gpio_reg),
+ gpio_ports, sizeof(gpio_ports));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "I2C error reading SFP GPIOs\n");
+ return;
+ }
+
+ gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
+
+ if (phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_MOD_ABSENT) {
+ /* No GPIO, just assume the module is present for now */
+ phy_data->sfp_mod_absent = 0;
+ } else {
+ if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
+ phy_data->sfp_mod_absent = 0;
+ }
+
+ if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_RX_LOS) &&
+ (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
+ phy_data->sfp_rx_los = 1;
+
+ if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_TX_FAULT) &&
+ (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
+ phy_data->sfp_tx_fault = 1;
+}
+
+static void axgbe_phy_sfp_mod_absent(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_phy_avail = 0;
+ memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom));
+}
+
+static void axgbe_phy_sfp_reset(struct axgbe_phy_data *phy_data)
+{
+ phy_data->sfp_rx_los = 0;
+ phy_data->sfp_tx_fault = 0;
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_diags = 0;
+ phy_data->sfp_base = AXGBE_SFP_BASE_UNKNOWN;
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_UNKNOWN;
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_UNKNOWN;
+}
+
+static void axgbe_phy_sfp_detect(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ /* Reset the SFP signals and info */
+ axgbe_phy_sfp_reset(phy_data);
+
+ ret = axgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ /* Read the SFP signals and check for module presence */
+ axgbe_phy_sfp_signals(pdata);
+ if (phy_data->sfp_mod_absent) {
+ axgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ ret = axgbe_phy_sfp_read_eeprom(pdata);
+ if (ret) {
+ /* Treat any error as if there isn't an SFP plugged in */
+ axgbe_phy_sfp_reset(phy_data);
+ axgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ axgbe_phy_sfp_parse_eeprom(pdata);
+ axgbe_phy_sfp_external_phy(pdata);
+
+put:
+ axgbe_phy_sfp_phy_settings(pdata);
+ axgbe_phy_put_comm_ownership(pdata);
+}
+
+static void axgbe_phy_phydev_flowctrl(struct axgbe_port *pdata)
+{
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+}
+
+static enum axgbe_mode axgbe_phy_an73_redrv_outcome(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ enum axgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Use external PHY to determine flow control */
+ if (pdata->phy.pause_autoneg)
+ axgbe_phy_phydev_flowctrl(pdata);
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20)
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ mode = AXGBE_MODE_KR;
+ break;
+ default:
+ mode = AXGBE_MODE_SFI;
+ break;
+ }
+ } else if (ad_reg & 0x20) {
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ mode = AXGBE_MODE_KX_1000;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_X:
+ mode = AXGBE_MODE_X;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ mode = AXGBE_MODE_SGMII_1000;
+ break;
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ default:
+ mode = AXGBE_MODE_X;
+ break;
+ }
+ break;
+ default:
+ mode = AXGBE_MODE_SGMII_1000;
+ break;
+ }
+ } else {
+ mode = AXGBE_MODE_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ return mode;
+}
+
+static enum axgbe_mode axgbe_phy_an73_outcome(struct axgbe_port *pdata)
+{
+ enum axgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ pdata->phy.lp_advertising |= ADVERTISED_Pause;
+ if (lp_reg & 0x800)
+ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20)
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80)
+ mode = AXGBE_MODE_KR;
+ else if (ad_reg & 0x20)
+ mode = AXGBE_MODE_KX_1000;
+ else
+ mode = AXGBE_MODE_UNKNOWN;
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ return mode;
+}
+
+static enum axgbe_mode axgbe_phy_an_outcome(struct axgbe_port *pdata)
+{
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ return axgbe_phy_an73_outcome(pdata);
+ case AXGBE_AN_MODE_CL73_REDRV:
+ return axgbe_phy_an73_redrv_outcome(pdata);
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static unsigned int axgbe_phy_an_advertising(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int advertising;
+
+ /* Without a re-driver, just return current advertising */
+ if (!phy_data->redrv)
+ return pdata->phy.advertising;
+
+ /* With the KR re-driver we need to advertise a single speed */
+ advertising = pdata->phy.advertising;
+ advertising &= ~ADVERTISED_1000baseKX_Full;
+ advertising &= ~ADVERTISED_10000baseKR_Full;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_NBASE_T:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case AXGBE_PORT_MODE_10GBASE_T:
+ PMD_DRV_LOG(ERR, "10GBASE_T mode is not supported\n");
+ break;
+ case AXGBE_PORT_MODE_10GBASE_R:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ default:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ }
+ break;
+ default:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ }
+
+ return advertising;
+}
+
+static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused)
+{
+ return 0;
+ /* Dummy API since there is no case to support
+ * external phy devices registred through kerenl apis
+ */
+}
+
+static enum axgbe_an_mode axgbe_phy_an_sfp_mode(struct axgbe_phy_data *phy_data)
+{
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ return AXGBE_AN_MODE_CL37_SGMII;
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ return AXGBE_AN_MODE_CL37;
+ default:
+ return AXGBE_AN_MODE_NONE;
+ }
+}
+
+static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* A KR re-driver will always require CL73 AN */
+ if (phy_data->redrv)
+ return AXGBE_AN_MODE_CL73_REDRV;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return AXGBE_AN_MODE_CL73;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return AXGBE_AN_MODE_NONE;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ return AXGBE_AN_MODE_CL37_SGMII;
+ case AXGBE_PORT_MODE_1000BASE_X:
+ return AXGBE_AN_MODE_CL37;
+ case AXGBE_PORT_MODE_NBASE_T:
+ return AXGBE_AN_MODE_CL37_SGMII;
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return AXGBE_AN_MODE_CL73;
+ case AXGBE_PORT_MODE_10GBASE_R:
+ return AXGBE_AN_MODE_NONE;
+ case AXGBE_PORT_MODE_SFP:
+ return axgbe_phy_an_sfp_mode(phy_data);
+ default:
+ return AXGBE_AN_MODE_NONE;
+ }
+}
+
+static int axgbe_phy_set_redrv_mode_mdio(struct axgbe_port *pdata,
+ enum axgbe_phy_redrv_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ u16 redrv_reg, redrv_val;
+
+ redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+ redrv_val = (u16)mode;
+
+ return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
+ redrv_reg, redrv_val);
+}
+
+static int axgbe_phy_set_redrv_mode_i2c(struct axgbe_port *pdata,
+ enum axgbe_phy_redrv_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int redrv_reg;
+ int ret;
+
+ /* Calculate the register to write */
+ redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+
+ ret = axgbe_phy_redrv_write(pdata, redrv_reg, mode);
+
+ return ret;
+}
+
+static void axgbe_phy_set_redrv_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ enum axgbe_phy_redrv_mode mode;
+ int ret;
+
+ if (!phy_data->redrv)
+ return;
+
+ mode = AXGBE_PHY_REDRV_MODE_CX;
+ if ((phy_data->port_mode == AXGBE_PORT_MODE_SFP) &&
+ (phy_data->sfp_base != AXGBE_SFP_BASE_1000_CX) &&
+ (phy_data->sfp_base != AXGBE_SFP_BASE_10000_CR))
+ mode = AXGBE_PHY_REDRV_MODE_SR;
+
+ ret = axgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ if (phy_data->redrv_if)
+ axgbe_phy_set_redrv_mode_i2c(pdata, mode);
+ else
+ axgbe_phy_set_redrv_mode_mdio(pdata, mode);
+
+ axgbe_phy_put_comm_ownership(pdata);
+}
+
+static void axgbe_phy_start_ratechange(struct axgbe_port *pdata)
+{
+ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ return;
+}
+
+static void axgbe_phy_complete_ratechange(struct axgbe_port *pdata)
+{
+ unsigned int wait;
+
+ /* Wait for command to complete */
+ wait = AXGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ return;
+
+ rte_delay_us(1500);
+ }
+}
+
+static void axgbe_phy_rrc(struct axgbe_port *pdata)
+{
+ unsigned int s0;
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* Receiver Reset Cycle */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ axgbe_phy_complete_ratechange(pdata);
+}
+
+static void axgbe_phy_power_off(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+ axgbe_phy_complete_ratechange(pdata);
+ phy_data->cur_mode = AXGBE_MODE_UNKNOWN;
+}
+
+static void axgbe_phy_sfi_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ axgbe_phy_set_redrv_mode(pdata);
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* 10G/SFI */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3);
+ if (phy_data->sfp_cable != AXGBE_SFP_CABLE_PASSIVE) {
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+ } else {
+ if (phy_data->sfp_cable_len <= 1)
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
+ else if (phy_data->sfp_cable_len <= 3)
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
+ else
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
+ }
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+ axgbe_phy_complete_ratechange(pdata);
+ phy_data->cur_mode = AXGBE_MODE_SFI;
+}
+
+static void axgbe_phy_kr_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ axgbe_phy_set_redrv_mode(pdata);
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* 10G/KR */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+ axgbe_phy_complete_ratechange(pdata);
+ phy_data->cur_mode = AXGBE_MODE_KR;
+}
+
+static enum axgbe_mode axgbe_phy_cur_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ return phy_data->cur_mode;
+}
+
+static enum axgbe_mode axgbe_phy_switch_baset_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* No switching if not 10GBase-T */
+ if (phy_data->port_mode != AXGBE_PORT_MODE_10GBASE_T)
+ return axgbe_phy_cur_mode(pdata);
+
+ switch (axgbe_phy_cur_mode(pdata)) {
+ case AXGBE_MODE_SGMII_100:
+ case AXGBE_MODE_SGMII_1000:
+ return AXGBE_MODE_KR;
+ case AXGBE_MODE_KR:
+ default:
+ return AXGBE_MODE_SGMII_1000;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_switch_bp_2500_mode(struct axgbe_port *pdata
+ __rte_unused)
+{
+ return AXGBE_MODE_KX_2500;
+}
+
+static enum axgbe_mode axgbe_phy_switch_bp_mode(struct axgbe_port *pdata)
+{
+ /* If we are in KR switch to KX, and vice-versa */
+ switch (axgbe_phy_cur_mode(pdata)) {
+ case AXGBE_MODE_KX_1000:
+ return AXGBE_MODE_KR;
+ case AXGBE_MODE_KR:
+ default:
+ return AXGBE_MODE_KX_1000;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_switch_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return axgbe_phy_switch_bp_mode(pdata);
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return axgbe_phy_switch_bp_2500_mode(pdata);
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return axgbe_phy_switch_baset_mode(pdata);
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ case AXGBE_PORT_MODE_SFP:
+ /* No switching, so just return current mode */
+ return axgbe_phy_cur_mode(pdata);
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_basex_mode(struct axgbe_phy_data *phy_data
+ __rte_unused,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return AXGBE_MODE_X;
+ case SPEED_10000:
+ return AXGBE_MODE_KR;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_baset_mode(struct axgbe_phy_data *phy_data
+ __rte_unused,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return AXGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ return AXGBE_MODE_SGMII_1000;
+ case SPEED_10000:
+ return AXGBE_MODE_KR;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_sfp_mode(struct axgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return AXGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T)
+ return AXGBE_MODE_SGMII_1000;
+ else
+ return AXGBE_MODE_X;
+ case SPEED_10000:
+ case SPEED_UNKNOWN:
+ return AXGBE_MODE_SFI;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_bp_2500_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_2500:
+ return AXGBE_MODE_KX_2500;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_bp_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return AXGBE_MODE_KX_1000;
+ case SPEED_10000:
+ return AXGBE_MODE_KR;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_mode(struct axgbe_port *pdata,
+ int speed)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return axgbe_phy_get_bp_mode(speed);
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return axgbe_phy_get_bp_2500_mode(speed);
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return axgbe_phy_get_baset_mode(phy_data, speed);
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ return axgbe_phy_get_basex_mode(phy_data, speed);
+ case AXGBE_PORT_MODE_SFP:
+ return axgbe_phy_get_sfp_mode(phy_data, speed);
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static void axgbe_phy_set_mode(struct axgbe_port *pdata, enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KR:
+ axgbe_phy_kr_mode(pdata);
+ break;
+ case AXGBE_MODE_SFI:
+ axgbe_phy_sfi_mode(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool axgbe_phy_check_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode, u32 advert)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & advert)
+ return true;
+ } else {
+ enum axgbe_mode cur_mode;
+
+ cur_mode = axgbe_phy_get_mode(pdata, pdata->phy.speed);
+ if (cur_mode == mode)
+ return true;
+ }
+
+ return false;
+}
+
+static bool axgbe_phy_use_basex_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_X:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_KR:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_baset_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_SGMII_100:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_100baseT_Full);
+ case AXGBE_MODE_SGMII_1000:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_KR:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_sfp_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (mode) {
+ case AXGBE_MODE_X:
+ if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T)
+ return false;
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_SGMII_100:
+ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
+ return false;
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_100baseT_Full);
+ case AXGBE_MODE_SGMII_1000:
+ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
+ return false;
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_SFI:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_bp_2500_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KX_2500:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_2500baseX_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_bp_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KX_1000:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseKX_Full);
+ case AXGBE_MODE_KR:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseKR_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_mode(struct axgbe_port *pdata, enum axgbe_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return axgbe_phy_use_bp_mode(pdata, mode);
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return axgbe_phy_use_bp_2500_mode(pdata, mode);
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return axgbe_phy_use_baset_mode(pdata, mode);
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ return axgbe_phy_use_basex_mode(pdata, mode);
+ case AXGBE_PORT_MODE_SFP:
+ return axgbe_phy_use_sfp_mode(pdata, mode);
+ default:
+ return false;
+ }
+}
+
+static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ *an_restart = 0;
+
+ if (phy_data->port_mode == AXGBE_PORT_MODE_SFP) {
+ /* Check SFP signals */
+ axgbe_phy_sfp_detect(pdata);
+
+ if (phy_data->sfp_changed) {
+ *an_restart = 1;
+ return 0;
+ }
+
+ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
+ return 0;
+ }
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS)
+ return 1;
+
+ /* No link, attempt a receiver reset cycle */
+ if (phy_data->rrc_count++) {
+ phy_data->rrc_count = 0;
+ axgbe_phy_rrc(pdata);
+ }
+
+ return 0;
+}
+
+static void axgbe_phy_sfp_gpio_setup(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ reg = XP_IOREAD(pdata, XP_PROP_3);
+
+ phy_data->sfp_gpio_address = AXGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR);
+
+ phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK);
+
+ phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_RX_LOS);
+ phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_TX_FAULT);
+ phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_MOD_ABS);
+ phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_RATE_SELECT);
+}
+
+static void axgbe_phy_sfp_comm_setup(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg, mux_addr_hi, mux_addr_lo;
+
+ reg = XP_IOREAD(pdata, XP_PROP_4);
+
+ mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI);
+ mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO);
+ if (mux_addr_lo == AXGBE_SFP_DIRECT)
+ return;
+
+ phy_data->sfp_comm = AXGBE_SFP_COMM_PCA9545;
+ phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo;
+ phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN);
+}
+
+static void axgbe_phy_sfp_setup(struct axgbe_port *pdata)
+{
+ axgbe_phy_sfp_comm_setup(pdata);
+ axgbe_phy_sfp_gpio_setup(pdata);
+}
+
+static bool axgbe_phy_redrv_error(struct axgbe_phy_data *phy_data)
+{
+ if (!phy_data->redrv)
+ return false;
+
+ if (phy_data->redrv_if >= AXGBE_PHY_REDRV_IF_MAX)
+ return true;
+
+ switch (phy_data->redrv_model) {
+ case AXGBE_PHY_REDRV_MODEL_4223:
+ if (phy_data->redrv_lane > 3)
+ return true;
+ break;
+ case AXGBE_PHY_REDRV_MODEL_4227:
+ if (phy_data->redrv_lane > 1)
+ return true;
+ break;
+ default:
+ return true;
+ }
+
+ return false;
+}
+
+static int axgbe_phy_mdio_reset_setup(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ if (phy_data->conn_type != AXGBE_CONN_TYPE_MDIO)
+ return 0;
+ reg = XP_IOREAD(pdata, XP_PROP_3);
+ phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET);
+ switch (phy_data->mdio_reset) {
+ case AXGBE_MDIO_RESET_NONE:
+ case AXGBE_MDIO_RESET_I2C_GPIO:
+ case AXGBE_MDIO_RESET_INT_GPIO:
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported MDIO reset (%#x)\n",
+ phy_data->mdio_reset);
+ return -EINVAL;
+ }
+ if (phy_data->mdio_reset == AXGBE_MDIO_RESET_I2C_GPIO) {
+ phy_data->mdio_reset_addr = AXGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_I2C_ADDR);
+ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_I2C_GPIO);
+ } else if (phy_data->mdio_reset == AXGBE_MDIO_RESET_INT_GPIO) {
+ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_INT_GPIO);
+ }
+
+ return 0;
+}
+
+static bool axgbe_phy_port_mode_mismatch(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_X:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_NBASE_T:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_10GBASE_T:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool axgbe_phy_conn_type_mismatch(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->conn_type == AXGBE_CONN_TYPE_BACKPLANE)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->conn_type == AXGBE_CONN_TYPE_MDIO)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ if (phy_data->conn_type == AXGBE_CONN_TYPE_SFP)
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool axgbe_phy_port_enabled(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS))
+ return false;
+ if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE))
+ return false;
+
+ return true;
+}
+
+static void axgbe_phy_cdr_track(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->vdata->an_cdr_workaround)
+ return;
+
+ if (!phy_data->phy_cdr_notrack)
+ return;
+
+ rte_delay_us(phy_data->phy_cdr_delay + 400);
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ AXGBE_PMA_CDR_TRACK_EN_MASK,
+ AXGBE_PMA_CDR_TRACK_EN_ON);
+
+ phy_data->phy_cdr_notrack = 0;
+}
+
+static void axgbe_phy_cdr_notrack(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->vdata->an_cdr_workaround)
+ return;
+
+ if (phy_data->phy_cdr_notrack)
+ return;
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ AXGBE_PMA_CDR_TRACK_EN_MASK,
+ AXGBE_PMA_CDR_TRACK_EN_OFF);
+
+ axgbe_phy_rrc(pdata);
+
+ phy_data->phy_cdr_notrack = 1;
+}
+
+static void axgbe_phy_kr_training_post(struct axgbe_port *pdata)
+{
+ if (!pdata->cdr_track_early)
+ axgbe_phy_cdr_track(pdata);
+}
+
+static void axgbe_phy_kr_training_pre(struct axgbe_port *pdata)
+{
+ if (pdata->cdr_track_early)
+ axgbe_phy_cdr_track(pdata);
+}
+
+static void axgbe_phy_an_post(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != AXGBE_MODE_KR)
+ break;
+
+ axgbe_phy_cdr_track(pdata);
+
+ switch (pdata->an_result) {
+ case AXGBE_AN_READY:
+ case AXGBE_AN_COMPLETE:
+ break;
+ default:
+ if (phy_data->phy_cdr_delay < AXGBE_CDR_DELAY_MAX)
+ phy_data->phy_cdr_delay += AXGBE_CDR_DELAY_INC;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_phy_an_pre(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != AXGBE_MODE_KR)
+ break;
+
+ axgbe_phy_cdr_notrack(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_phy_stop(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Reset SFP data */
+ axgbe_phy_sfp_reset(phy_data);
+ axgbe_phy_sfp_mod_absent(pdata);
+
+ /* Reset CDR support */
+ axgbe_phy_cdr_track(pdata);
+
+ /* Power off the PHY */
+ axgbe_phy_power_off(pdata);
+
+ /* Stop the I2C controller */
+ pdata->i2c_if.i2c_stop(pdata);
+}
+
+static int axgbe_phy_start(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ /* Start the I2C controller */
+ ret = pdata->i2c_if.i2c_start(pdata);
+ if (ret)
+ return ret;
+
+ /* Start in highest supported mode */
+ axgbe_phy_set_mode(pdata, phy_data->start_mode);
+
+ /* Reset CDR support */
+ axgbe_phy_cdr_track(pdata);
+
+ /* After starting the I2C controller, we can check for an SFP */
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_SFP:
+ axgbe_phy_sfp_detect(pdata);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ enum axgbe_mode cur_mode;
+
+ /* Reset by power cycling the PHY */
+ cur_mode = phy_data->cur_mode;
+ axgbe_phy_power_off(pdata);
+ /* First time reset is done with passed unknown mode*/
+ axgbe_phy_set_mode(pdata, cur_mode);
+ return 0;
+}
+
+static int axgbe_phy_init(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data;
+ unsigned int reg;
+ int ret;
+
+ /* Check if enabled */
+ if (!axgbe_phy_port_enabled(pdata)) {
+ PMD_DRV_LOG(ERR, "device is not enabled\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the I2C controller */
+ ret = pdata->i2c_if.i2c_init(pdata);
+ if (ret)
+ return ret;
+
+ phy_data = rte_zmalloc("phy_data memory", sizeof(*phy_data), 0);
+ if (!phy_data) {
+ PMD_DRV_LOG(ERR, "phy_data allocation failed\n");
+ return -ENOMEM;
+ }
+ pdata->phy_data = phy_data;
+
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE);
+ phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
+ phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS);
+ phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE);
+ phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR);
+
+ reg = XP_IOREAD(pdata, XP_PROP_4);
+ phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT);
+ phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF);
+ phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR);
+ phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE);
+ phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL);
+
+ /* Validate the connection requested */
+ if (axgbe_phy_conn_type_mismatch(pdata)) {
+ PMD_DRV_LOG(ERR, "phy mode/connection mismatch (%#x/%#x)\n",
+ phy_data->port_mode, phy_data->conn_type);
+ return -EINVAL;
+ }
+
+ /* Validate the mode requested */
+ if (axgbe_phy_port_mode_mismatch(pdata)) {
+ PMD_DRV_LOG(ERR, "phy mode/speed mismatch (%#x/%#x)\n",
+ phy_data->port_mode, phy_data->port_speeds);
+ return -EINVAL;
+ }
+
+ /* Check for and validate MDIO reset support */
+ ret = axgbe_phy_mdio_reset_setup(pdata);
+ if (ret)
+ return ret;
+
+ /* Validate the re-driver information */
+ if (axgbe_phy_redrv_error(phy_data)) {
+ PMD_DRV_LOG(ERR, "phy re-driver settings error\n");
+ return -EINVAL;
+ }
+ pdata->kr_redrv = phy_data->redrv;
+
+ /* Indicate current mode is unknown */
+ phy_data->cur_mode = AXGBE_MODE_UNKNOWN;
+
+ /* Initialize supported features */
+ pdata->phy.supported = 0;
+
+ switch (phy_data->port_mode) {
+ /* Backplane support */
+ case AXGBE_PORT_MODE_BACKPLANE:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
+ phy_data->start_mode = AXGBE_MODE_KX_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |=
+ SUPPORTED_10000baseR_FEC;
+ phy_data->start_mode = AXGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ phy_data->start_mode = AXGBE_MODE_KX_2500;
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+
+ /* MDIO 1GBase-T support */
+ case AXGBE_PORT_MODE_1000BASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO Base-X support */
+ case AXGBE_PORT_MODE_1000BASE_X:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_X;
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO NBase-T support */
+ case AXGBE_PORT_MODE_NBASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500) {
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ phy_data->start_mode = AXGBE_MODE_KX_2500;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL45;
+ break;
+
+ /* 10GBase-T support */
+ case AXGBE_PORT_MODE_10GBASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+
+ /* 10GBase-R support */
+ case AXGBE_PORT_MODE_10GBASE_R:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
+ phy_data->start_mode = AXGBE_MODE_SFI;
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+
+ /* SFP support */
+ case AXGBE_PORT_MODE_SFP:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SFI;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |=
+ SUPPORTED_10000baseR_FEC;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
+
+ axgbe_phy_sfp_setup(pdata);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((phy_data->conn_type & AXGBE_CONN_TYPE_MDIO) &&
+ (phy_data->phydev_mode != AXGBE_MDIO_MODE_NONE)) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
+ phy_data->phydev_mode);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "mdio port/clause not compatible (%d/%u)\n",
+ phy_data->mdio_addr, phy_data->phydev_mode);
+ return -EINVAL;
+ }
+ }
+
+ if (phy_data->redrv && !phy_data->redrv_if) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
+ AXGBE_MDIO_MODE_CL22);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "redriver mdio port not compatible (%u)\n",
+ phy_data->redrv_addr);
+ return -EINVAL;
+ }
+ }
+
+ phy_data->phy_cdr_delay = AXGBE_CDR_DELAY_INIT;
+ return 0;
+}
+void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if)
+{
+ struct axgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
+
+ phy_impl->init = axgbe_phy_init;
+ phy_impl->reset = axgbe_phy_reset;
+ phy_impl->start = axgbe_phy_start;
+ phy_impl->stop = axgbe_phy_stop;
+ phy_impl->link_status = axgbe_phy_link_status;
+ phy_impl->use_mode = axgbe_phy_use_mode;
+ phy_impl->set_mode = axgbe_phy_set_mode;
+ phy_impl->get_mode = axgbe_phy_get_mode;
+ phy_impl->switch_mode = axgbe_phy_switch_mode;
+ phy_impl->cur_mode = axgbe_phy_cur_mode;
+ phy_impl->an_mode = axgbe_phy_an_mode;
+ phy_impl->an_config = axgbe_phy_an_config;
+ phy_impl->an_advertising = axgbe_phy_an_advertising;
+ phy_impl->an_outcome = axgbe_phy_an_outcome;
+
+ phy_impl->an_pre = axgbe_phy_an_pre;
+ phy_impl->an_post = axgbe_phy_an_post;
+
+ phy_impl->kr_training_pre = axgbe_phy_kr_training_pre;
+ phy_impl->kr_training_post = axgbe_phy_kr_training_post;
+}
diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.c
new file mode 100644
index 00000000..c5fd5f41
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_rxtx.h"
+#include "axgbe_phy.h"
+
+#include <rte_time.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+static void
+axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
+{
+ uint16_t i;
+ struct rte_mbuf **sw_ring;
+
+ if (rx_queue) {
+ sw_ring = rx_queue->sw_ring;
+ if (sw_ring) {
+ for (i = 0; i < rx_queue->nb_desc; i++) {
+ if (sw_ring[i])
+ rte_pktmbuf_free(sw_ring[i]);
+ }
+ rte_free(sw_ring);
+ }
+ rte_free(rx_queue);
+ }
+}
+
+void axgbe_dev_rx_queue_release(void *rxq)
+{
+ axgbe_rx_queue_release(rxq);
+}
+
+int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint32_t size;
+ const struct rte_memzone *dma;
+ struct axgbe_rx_queue *rxq;
+ uint32_t rx_desc = nb_desc;
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ /*
+ * validate Rx descriptors count
+ * should be power of 2 and less than h/w supported
+ */
+ if ((!rte_is_power_of_2(rx_desc)) ||
+ rx_desc > pdata->rx_desc_count)
+ return -EINVAL;
+ /* First allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue",
+ sizeof(struct axgbe_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
+ return -ENOMEM;
+ }
+
+ rxq->cur = 0;
+ rxq->dirty = 0;
+ rxq->pdata = pdata;
+ rxq->mb_pool = mp;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->nb_desc = rx_desc;
+ rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * rxq->queue_id));
+ rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
+ DMA_CH_RDTR_LO);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
+ /* CRC strip in AXGBE supports per port not per queue */
+ pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
+ rxq->free_thresh = rx_conf->rx_free_thresh ?
+ rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
+ if (rxq->free_thresh > rxq->nb_desc)
+ rxq->free_thresh = rxq->nb_desc >> 3;
+
+ /* Allocate RX ring hardware descriptors */
+ size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
+ dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
+ socket_id);
+ if (!dma) {
+ PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n");
+ axgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ rxq->ring_phys_addr = (uint64_t)dma->phys_addr;
+ rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
+ memset((void *)rxq->desc, 0, size);
+ /* Allocate software ring */
+ size = rxq->nb_desc * sizeof(struct rte_mbuf *);
+ rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq->sw_ring) {
+ PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n");
+ axgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ dev->data->rx_queues[queue_idx] = rxq;
+ if (!pdata->rx_queues)
+ pdata->rx_queues = dev->data->rx_queues;
+
+ return 0;
+}
+
+static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,
+ unsigned int queue)
+{
+ unsigned int rx_status;
+ unsigned long rx_timeout;
+
+ /* The Rx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Rx queue to empty the Rx fifo. Don't
+ * wait forever though...
+ */
+ rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
+ rte_get_timer_hz());
+
+ while (time_before(rte_get_timer_cycles(), rx_timeout)) {
+ rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
+ if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
+ (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
+ break;
+
+ rte_delay_us(900);
+ }
+
+ if (!time_before(rte_get_timer_cycles(), rx_timeout))
+ PMD_DRV_LOG(ERR,
+ "timed out waiting for Rx queue %u to empty\n",
+ queue);
+}
+
+void axgbe_dev_disable_rx(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+
+ /* Disable MAC Rx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
+
+ /* Prepare for Rx DMA channel stop */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ axgbe_prepare_rx_stop(pdata, i);
+ }
+ /* Disable each Rx queue */
+ AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ /* Disable Rx DMA channel */
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0);
+ }
+}
+
+void axgbe_dev_enable_rx(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+ unsigned int reg_val = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ /* Enable Rx DMA channel */
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1);
+ }
+
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count; i++)
+ reg_val |= (0x02 << (i << 1));
+ AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
+
+ /* Enable MAC Rx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
+ /* Frame is forwarded after stripping CRC to application*/
+ if (pdata->crc_strip_enable) {
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
+ }
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
+}
+
+/* Rx function one to one refresh */
+uint16_t
+axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint16_t nb_rx = 0;
+ struct axgbe_rx_queue *rxq = rx_queue;
+ volatile union axgbe_rx_desc *desc;
+ uint64_t old_dirty = rxq->dirty;
+ struct rte_mbuf *mbuf, *tmbuf;
+ unsigned int err;
+ uint32_t error_status;
+ uint16_t idx, pidx, pkt_len;
+
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
+ while (nb_rx < nb_pkts) {
+ if (unlikely(idx == rxq->nb_desc))
+ idx = 0;
+
+ desc = &rxq->desc[idx];
+
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
+ break;
+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!tmbuf)) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
+ " queue_id = %u\n",
+ (unsigned int)rxq->port_id,
+ (unsigned int)rxq->queue_id);
+ rte_eth_devices[
+ rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ pidx = idx + 1;
+ if (unlikely(pidx == rxq->nb_desc))
+ pidx = 0;
+
+ rte_prefetch0(rxq->sw_ring[pidx]);
+ if ((pidx & 0x3) == 0) {
+ rte_prefetch0(&rxq->desc[pidx]);
+ rte_prefetch0(&rxq->sw_ring[pidx]);
+ }
+
+ mbuf = rxq->sw_ring[idx];
+ /* Check for any errors and free mbuf*/
+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, ES);
+ error_status = 0;
+ if (unlikely(err)) {
+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
+ if ((error_status != AXGBE_L3_CSUM_ERR) &&
+ (error_status != AXGBE_L4_CSUM_ERR)) {
+ rxq->errors++;
+ rte_pktmbuf_free(mbuf);
+ goto err_set;
+ }
+ }
+ if (rxq->pdata->rx_csum_enable) {
+ mbuf->ol_flags = 0;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ } else if (
+ unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
+ /* Get the RSS hash */
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
+ PL) - rxq->crc_len;
+ /* Mbuf populate */
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ mbuf->pkt_len = pkt_len;
+ mbuf->data_len = pkt_len;
+ rxq->bytes += pkt_len;
+ rx_pkts[nb_rx++] = mbuf;
+err_set:
+ rxq->cur++;
+ rxq->sw_ring[idx++] = tmbuf;
+ desc->read.baddr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
+ memset((void *)(&desc->read.desc2), 0, 8);
+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
+ rxq->dirty++;
+ }
+ rxq->pkts += nb_rx;
+ if (rxq->dirty != old_dirty) {
+ rte_wmb();
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+ low32_value(rxq->ring_phys_addr +
+ (idx * sizeof(union axgbe_rx_desc))));
+ }
+
+ return nb_rx;
+}
+
+/* Tx Apis */
+static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
+{
+ uint16_t i;
+ struct rte_mbuf **sw_ring;
+
+ if (tx_queue) {
+ sw_ring = tx_queue->sw_ring;
+ if (sw_ring) {
+ for (i = 0; i < tx_queue->nb_desc; i++) {
+ if (sw_ring[i])
+ rte_pktmbuf_free(sw_ring[i]);
+ }
+ rte_free(sw_ring);
+ }
+ rte_free(tx_queue);
+ }
+}
+
+void axgbe_dev_tx_queue_release(void *txq)
+{
+ axgbe_tx_queue_release(txq);
+}
+
+int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint32_t tx_desc;
+ struct axgbe_port *pdata;
+ struct axgbe_tx_queue *txq;
+ unsigned int tsize;
+ const struct rte_memzone *tz;
+
+ tx_desc = nb_desc;
+ pdata = (struct axgbe_port *)dev->data->dev_private;
+
+ /*
+ * validate tx descriptors count
+ * should be power of 2 and less than h/w supported
+ */
+ if ((!rte_is_power_of_2(tx_desc)) ||
+ tx_desc > pdata->tx_desc_count ||
+ tx_desc < AXGBE_MIN_RING_DESC)
+ return -EINVAL;
+
+ /* First allocate the tx queue data structure */
+ txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
+ RTE_CACHE_LINE_SIZE);
+ if (!txq)
+ return -ENOMEM;
+ txq->pdata = pdata;
+
+ txq->nb_desc = tx_desc;
+ txq->free_thresh = tx_conf->tx_free_thresh ?
+ tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
+ if (txq->free_thresh > txq->nb_desc)
+ txq->free_thresh = (txq->nb_desc >> 1);
+ txq->free_batch_cnt = txq->free_thresh;
+
+ /* In vector_tx path threshold should be multiple of queue_size*/
+ if (txq->nb_desc % txq->free_thresh != 0)
+ txq->vector_disable = 1;
+
+ if (tx_conf->offloads != 0)
+ txq->vector_disable = 1;
+
+ /* Allocate TX ring hardware descriptors */
+ tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tsize, AXGBE_DESC_ALIGN, socket_id);
+ if (!tz) {
+ axgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ memset(tz->addr, 0, tsize);
+ txq->ring_phys_addr = (uint64_t)tz->phys_addr;
+ txq->desc = tz->addr;
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * txq->queue_id));
+ txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
+ DMA_CH_TDTR_LO);
+ txq->cur = 0;
+ txq->dirty = 0;
+ txq->nb_desc_free = txq->nb_desc;
+ /* Allocate software ring */
+ tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
+ txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
+ RTE_CACHE_LINE_SIZE);
+ if (!txq->sw_ring) {
+ axgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ dev->data->tx_queues[queue_idx] = txq;
+ if (!pdata->tx_queues)
+ pdata->tx_queues = dev->data->tx_queues;
+
+ if (txq->vector_disable)
+ dev->tx_pkt_burst = &axgbe_xmit_pkts;
+ else
+#ifdef RTE_ARCH_X86
+ dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
+#else
+ dev->tx_pkt_burst = &axgbe_xmit_pkts;
+#endif
+
+ return 0;
+}
+
+static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
+ unsigned int queue)
+{
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Tx queue to empty the Tx fifo. Don't
+ * wait forever though...
+ */
+ tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
+ rte_get_timer_hz());
+ while (time_before(rte_get_timer_cycles(), tx_timeout)) {
+ tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
+ if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
+ (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
+ break;
+
+ rte_delay_us(900);
+ }
+
+ if (!time_before(rte_get_timer_cycles(), tx_timeout))
+ PMD_DRV_LOG(ERR,
+ "timed out waiting for Tx queue %u to empty\n",
+ queue);
+}
+
+static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,
+ unsigned int queue)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
+ return axgbe_txq_prepare_tx_stop(pdata, queue);
+
+ /* Calculate the status register to read and the position within */
+ if (queue < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
+ rte_get_timer_hz());
+ while (time_before(rte_get_timer_cycles(), tx_timeout)) {
+ tx_status = AXGMAC_IOREAD(pdata, tx_dsr);
+ tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ rte_delay_us(900);
+ }
+
+ if (!time_before(rte_get_timer_cycles(), tx_timeout))
+ PMD_DRV_LOG(ERR,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ queue);
+}
+
+void axgbe_dev_disable_tx(struct rte_eth_dev *dev)
+{
+ struct axgbe_tx_queue *txq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+
+ /* Prepare for stopping DMA channel */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ txq = dev->data->tx_queues[i];
+ axgbe_prepare_tx_stop(pdata, i);
+ }
+ /* Disable MAC Tx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+ /* Disable each Tx queue*/
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ 0);
+ /* Disable each Tx DMA channel */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0);
+ }
+}
+
+void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
+{
+ struct axgbe_tx_queue *txq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Enable Tx DMA channel */
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1);
+ }
+ /* Enable Tx queue*/
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ MTL_Q_ENABLED);
+ /* Enable MAC Tx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+/* Free Tx conformed mbufs */
+static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
+ while (txq->cur != txq->dirty) {
+ if (unlikely(idx == txq->nb_desc))
+ idx = 0;
+ desc = &txq->desc[idx];
+ /* Check for ownership */
+ if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
+ return;
+ memset((void *)&desc->desc2, 0, 8);
+ /* Free mbuf */
+ rte_pktmbuf_free(txq->sw_ring[idx]);
+ txq->sw_ring[idx++] = NULL;
+ txq->dirty++;
+ }
+}
+
+/* Tx Descriptor formation
+ * Considering each mbuf requires one desc
+ * mbuf is linear
+ */
+static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
+ struct rte_mbuf *mbuf)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+ uint64_t mask;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+
+ /* Update buffer address and length */
+ desc->baddr = rte_mbuf_data_iova(mbuf);
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ mbuf->pkt_len);
+ /* Total msg length to transmit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+ mbuf->pkt_len);
+ /* Mark it as First and Last Descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+ /* Mark it as a NORMAL descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ /* configure h/w Offload */
+ mask = mbuf->ol_flags & PKT_TX_L4_MASK;
+ if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM))
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
+ else if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
+ rte_wmb();
+
+ /* Set OWN bit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+
+ /* Save mbuf */
+ txq->sw_ring[idx] = mbuf;
+ /* Update current index*/
+ txq->cur++;
+ /* Update stats */
+ txq->bytes += mbuf->pkt_len;
+
+ return 0;
+}
+
+/* Eal supported tx wrapper*/
+uint16_t
+axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ struct axgbe_tx_queue *txq;
+ uint16_t nb_desc_free;
+ uint16_t nb_pkt_sent = 0;
+ uint16_t idx;
+ uint32_t tail_addr;
+ struct rte_mbuf *mbuf;
+
+ txq = (struct axgbe_tx_queue *)tx_queue;
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+
+ if (unlikely(nb_desc_free <= txq->free_thresh)) {
+ axgbe_xmit_cleanup(txq);
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(nb_desc_free == 0))
+ return 0;
+ }
+ nb_pkts = RTE_MIN(nb_desc_free, nb_pkts);
+ while (nb_pkts--) {
+ mbuf = *tx_pkts++;
+ if (axgbe_xmit_hw(txq, mbuf))
+ goto out;
+ nb_pkt_sent++;
+ }
+out:
+ /* Sync read and write */
+ rte_mb();
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ tail_addr = low32_value(txq->ring_phys_addr +
+ idx * sizeof(struct axgbe_tx_desc));
+ /* Update tail reg with next immediate address to kick Tx DMA channel*/
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
+ txq->pkts += nb_pkt_sent;
+ return nb_pkt_sent;
+}
+
+void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint8_t i;
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq) {
+ axgbe_rx_queue_release(rxq);
+ dev->data->rx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+
+ if (txq) {
+ axgbe_tx_queue_release(txq);
+ dev->data->tx_queues[i] = NULL;
+ }
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.h b/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.h
new file mode 100644
index 00000000..917da58c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef _AXGBE_RXTX_H_
+#define _AXGBE_RXTX_H_
+
+/* to suppress gcc warnings related to descriptor casting*/
+#ifdef RTE_TOOLCHAIN_GCC
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+#ifdef RTE_TOOLCHAIN_CLANG
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+/* Descriptor related defines */
+#define AXGBE_MAX_RING_DESC 4096 /*should be power of 2*/
+#define AXGBE_TX_DESC_MIN_FREE (AXGBE_MAX_RING_DESC >> 3)
+#define AXGBE_TX_DESC_MAX_PROC (AXGBE_MAX_RING_DESC >> 1)
+#define AXGBE_MIN_RING_DESC 32
+#define RTE_AXGBE_DESCS_PER_LOOP 4
+#define RTE_AXGBE_MAX_RX_BURST 32
+
+#define AXGBE_RX_FREE_THRESH 32
+#define AXGBE_TX_FREE_THRESH 32
+
+#define AXGBE_DESC_ALIGN 128
+#define AXGBE_DESC_OWN 0x80000000
+#define AXGBE_ERR_STATUS 0x000f0000
+#define AXGBE_L3_CSUM_ERR 0x00050000
+#define AXGBE_L4_CSUM_ERR 0x00060000
+
+#include "axgbe_common.h"
+
+#define AXGBE_GET_DESC_PT(_queue, _idx) \
+ (((_queue)->desc) + \
+ ((_idx) & ((_queue)->nb_desc - 1)))
+
+#define AXGBE_GET_DESC_IDX(_queue, _idx) \
+ ((_idx) & ((_queue)->nb_desc - 1)) \
+
+/* Rx desc format */
+union axgbe_rx_desc {
+ struct {
+ uint64_t baddr;
+ uint32_t desc2;
+ uint32_t desc3;
+ } read;
+ struct {
+ uint32_t desc0;
+ uint32_t desc1;
+ uint32_t desc2;
+ uint32_t desc3;
+ } write;
+};
+
+struct axgbe_rx_queue {
+ /* membuf pool for rx buffers */
+ struct rte_mempool *mb_pool;
+ /* H/w Rx buffer size configured in DMA */
+ unsigned int buf_size;
+ /* CRC h/w offload */
+ uint16_t crc_len;
+ /* address of s/w rx buffers */
+ struct rte_mbuf **sw_ring;
+ /* Port private data */
+ struct axgbe_port *pdata;
+ /* Number of Rx descriptors in queue */
+ uint16_t nb_desc;
+ /* max free RX desc to hold */
+ uint16_t free_thresh;
+ /* Index of descriptor to check for packet availability */
+ uint64_t cur;
+ /* Index of descriptor to check for buffer reallocation */
+ uint64_t dirty;
+ /* Software Rx descriptor ring*/
+ volatile union axgbe_rx_desc *desc;
+ /* Ring physical address */
+ uint64_t ring_phys_addr;
+ /* Dma Channel register address */
+ void *dma_regs;
+ /* Dma channel tail register address*/
+ volatile uint32_t *dma_tail_reg;
+ /* DPDK queue index */
+ uint16_t queue_id;
+ /* dpdk port id*/
+ uint16_t port_id;
+ /* queue stats */
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t errors;
+ /* Number of mbufs allocated from pool*/
+ uint64_t mbuf_alloc;
+
+} __rte_cache_aligned;
+
+/*Tx descriptor format */
+struct axgbe_tx_desc {
+ phys_addr_t baddr;
+ uint32_t desc2;
+ uint32_t desc3;
+};
+
+struct axgbe_tx_queue {
+ /* Port private data reference */
+ struct axgbe_port *pdata;
+ /* Number of Tx descriptors in queue*/
+ uint16_t nb_desc;
+ /* Start freeing TX buffers if there are less free descriptors than
+ * this value
+ */
+ uint16_t free_thresh;
+ /* Available descriptors for Tx processing*/
+ uint16_t nb_desc_free;
+ /* Batch of mbufs/descs to release */
+ uint16_t free_batch_cnt;
+ /* Flag for vector support */
+ uint16_t vector_disable;
+ /* Index of descriptor to be used for current transfer */
+ uint64_t cur;
+ /* Index of descriptor to check for transfer complete */
+ uint64_t dirty;
+ /* Virtual address of ring */
+ volatile struct axgbe_tx_desc *desc;
+ /* Physical address of ring */
+ uint64_t ring_phys_addr;
+ /* Dma channel register space */
+ void *dma_regs;
+ /* Dma tail register address of ring*/
+ volatile uint32_t *dma_tail_reg;
+ /* Tx queue index/id*/
+ uint16_t queue_id;
+ /* Reference to hold Tx mbufs mapped to Tx descriptors freed
+ * after transmission confirmation
+ */
+ struct rte_mbuf **sw_ring;
+ /* dpdk port id*/
+ uint16_t port_id;
+ /* queue stats */
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t errors;
+
+} __rte_cache_aligned;
+
+/*Queue related APIs */
+
+/*
+ * RX/TX function prototypes
+ */
+
+
+void axgbe_dev_tx_queue_release(void *txq);
+int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void axgbe_dev_enable_tx(struct rte_eth_dev *dev);
+void axgbe_dev_disable_tx(struct rte_eth_dev *dev);
+int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+
+void axgbe_dev_rx_queue_release(void *rxq);
+int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+void axgbe_dev_enable_rx(struct rte_eth_dev *dev);
+void axgbe_dev_disable_rx(struct rte_eth_dev *dev);
+int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
+
+#endif /* _AXGBE_RXTX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
new file mode 100644
index 00000000..9be70371
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_rxtx.h"
+#include "axgbe_phy.h"
+
+#include <rte_time.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+/* Useful to avoid shifting for every descriptor prepration*/
+#define TX_DESC_CTRL_FLAGS 0xb000000000000000
+#define TX_FREE_BULK 8
+#define TX_FREE_BULK_CHECK (TX_FREE_BULK - 1)
+
+static inline void
+axgbe_vec_tx(volatile struct axgbe_tx_desc *desc,
+ struct rte_mbuf *mbuf)
+{
+ __m128i descriptor = _mm_set_epi64x((uint64_t)mbuf->pkt_len << 32 |
+ TX_DESC_CTRL_FLAGS | mbuf->data_len,
+ mbuf->buf_iova
+ + mbuf->data_off);
+ _mm_store_si128((__m128i *)desc, descriptor);
+}
+
+static void
+axgbe_xmit_cleanup_vec(struct axgbe_tx_queue *txq)
+{
+ volatile struct axgbe_tx_desc *desc;
+ int idx, i;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt
+ - 1);
+ desc = &txq->desc[idx];
+ if (desc->desc3 & AXGBE_DESC_OWN)
+ return;
+ /* memset avoided for desc ctrl fields since in vec_tx path
+ * all 128 bits are populated
+ */
+ for (i = 0; i < txq->free_batch_cnt; i++, idx--)
+ rte_pktmbuf_free_seg(txq->sw_ring[idx]);
+
+
+ txq->dirty += txq->free_batch_cnt;
+ txq->nb_desc_free += txq->free_batch_cnt;
+}
+
+uint16_t
+axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ struct axgbe_tx_queue *txq;
+ uint16_t idx, nb_commit, loop, i;
+ uint32_t tail_addr;
+
+ txq = (struct axgbe_tx_queue *)tx_queue;
+ if (txq->nb_desc_free < txq->free_thresh) {
+ axgbe_xmit_cleanup_vec(txq);
+ if (unlikely(txq->nb_desc_free == 0))
+ return 0;
+ }
+ nb_pkts = RTE_MIN(txq->nb_desc_free, nb_pkts);
+ nb_commit = nb_pkts;
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ loop = txq->nb_desc - idx;
+ if (nb_commit >= loop) {
+ for (i = 0; i < loop; ++i, ++idx, ++tx_pkts) {
+ axgbe_vec_tx(&txq->desc[idx], *tx_pkts);
+ txq->sw_ring[idx] = *tx_pkts;
+ }
+ nb_commit -= loop;
+ idx = 0;
+ }
+ for (i = 0; i < nb_commit; ++i, ++idx, ++tx_pkts) {
+ axgbe_vec_tx(&txq->desc[idx], *tx_pkts);
+ txq->sw_ring[idx] = *tx_pkts;
+ }
+ txq->cur += nb_pkts;
+ tail_addr = (uint32_t)(txq->ring_phys_addr +
+ idx * sizeof(struct axgbe_tx_desc));
+ /* Update tail reg with next immediate address to kick Tx DMA channel*/
+ rte_write32(tail_addr, (void *)txq->dma_tail_reg);
+ txq->pkts += nb_pkts;
+ txq->nb_desc_free -= nb_pkts;
+
+ return nb_pkts;
+}
diff --git a/src/spdk/dpdk/drivers/net/axgbe/meson.build b/src/spdk/dpdk/drivers/net/axgbe/meson.build
new file mode 100644
index 00000000..548ffff7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+sources = files('axgbe_ethdev.c',
+ 'axgbe_dev.c',
+ 'axgbe_mdio.c',
+ 'axgbe_phy_impl.c',
+ 'axgbe_i2c.c',
+ 'axgbe_rxtx.c')
+
+cflags += '-Wno-cast-qual'
+
+if arch_subdir == 'x86'
+ sources += files('axgbe_rxtx_vec_sse.c')
+endif
diff --git a/src/spdk/dpdk/drivers/net/axgbe/rte_pmd_axgbe_version.map b/src/spdk/dpdk/drivers/net/axgbe/rte_pmd_axgbe_version.map
new file mode 100644
index 00000000..b26efa67
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/axgbe/rte_pmd_axgbe_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/Makefile b/src/spdk/dpdk/drivers/net/bnx2x/Makefile
new file mode 100644
index 00000000..55d1ad6e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2014 - 2018 Cavium Inc.
+# All rights reserved.
+# www.cavium.com
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_bnx2x.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DZLIB_CONST
+LDLIBS += -lz
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+EXPORT_MAP := rte_pmd_bnx2x_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += ecore_sp.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += elink.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_vfpf.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x.c b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x.c
new file mode 100644
index 00000000..4904eaf3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x.c
@@ -0,0 +1,11785 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#define BNX2X_DRIVER_VERSION "1.78.18"
+
+#include "bnx2x.h"
+#include "bnx2x_vfpf.h"
+#include "ecore_sp.h"
+#include "ecore_init.h"
+#include "ecore_init_ops.h"
+
+#include "rte_version.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <zlib.h>
+
+#define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
+#define BNX2X_PMD_VERSION_MAJOR 1
+#define BNX2X_PMD_VERSION_MINOR 0
+#define BNX2X_PMD_VERSION_REVISION 6
+#define BNX2X_PMD_VERSION_PATCH 1
+
+static inline const char *
+bnx2x_pmd_version(void)
+{
+ static char version[32];
+
+ snprintf(version, sizeof(version), "%s %s_%d.%d.%d.%d",
+ BNX2X_PMD_VER_PREFIX,
+ BNX2X_DRIVER_VERSION,
+ BNX2X_PMD_VERSION_MAJOR,
+ BNX2X_PMD_VERSION_MINOR,
+ BNX2X_PMD_VERSION_REVISION,
+ BNX2X_PMD_VERSION_PATCH);
+
+ return version;
+}
+
+static z_stream zlib_stream;
+
+#define EVL_VLID_MASK 0x0FFF
+
+#define BNX2X_DEF_SB_ATT_IDX 0x0001
+#define BNX2X_DEF_SB_IDX 0x0002
+
+/*
+ * FLR Support - bnx2x_pf_flr_clnup() is called during nic_load in the per
+ * function HW initialization.
+ */
+#define FLR_WAIT_USEC 10000 /* 10 msecs */
+#define FLR_WAIT_INTERVAL 50 /* usecs */
+#define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
+
+struct pbf_pN_buf_regs {
+ int pN;
+ uint32_t init_crd;
+ uint32_t crd;
+ uint32_t crd_freed;
+};
+
+struct pbf_pN_cmd_regs {
+ int pN;
+ uint32_t lines_occup;
+ uint32_t lines_freed;
+};
+
+/* resources needed for unloading a previously loaded device */
+
+#define BNX2X_PREV_WAIT_NEEDED 1
+rte_spinlock_t bnx2x_prev_mtx;
+struct bnx2x_prev_list_node {
+ LIST_ENTRY(bnx2x_prev_list_node) node;
+ uint8_t bus;
+ uint8_t slot;
+ uint8_t path;
+ uint8_t aer;
+ uint8_t undi;
+};
+
+static LIST_HEAD(, bnx2x_prev_list_node) bnx2x_prev_list
+ = LIST_HEAD_INITIALIZER(bnx2x_prev_list);
+
+static int load_count[2][3] = { { 0 } };
+ /* per-path: 0-common, 1-port0, 2-port1 */
+
+static void bnx2x_cmng_fns_init(struct bnx2x_softc *sc, uint8_t read_cfg,
+ uint8_t cmng_type);
+static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc *sc);
+static void storm_memset_cmng(struct bnx2x_softc *sc, struct cmng_init *cmng,
+ uint8_t port);
+static void bnx2x_set_reset_global(struct bnx2x_softc *sc);
+static void bnx2x_set_reset_in_progress(struct bnx2x_softc *sc);
+static uint8_t bnx2x_reset_is_done(struct bnx2x_softc *sc, int engine);
+static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc);
+static uint8_t bnx2x_chk_parity_attn(struct bnx2x_softc *sc, uint8_t * global,
+ uint8_t print);
+static void bnx2x_int_disable(struct bnx2x_softc *sc);
+static int bnx2x_release_leader_lock(struct bnx2x_softc *sc);
+static void bnx2x_pf_disable(struct bnx2x_softc *sc);
+static void bnx2x_update_rx_prod(struct bnx2x_softc *sc,
+ struct bnx2x_fastpath *fp,
+ uint16_t rx_bd_prod, uint16_t rx_cq_prod);
+static void bnx2x_link_report(struct bnx2x_softc *sc);
+void bnx2x_link_status_update(struct bnx2x_softc *sc);
+static int bnx2x_alloc_mem(struct bnx2x_softc *sc);
+static void bnx2x_free_mem(struct bnx2x_softc *sc);
+static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc);
+static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc);
+static __rte_noinline
+int bnx2x_nic_load(struct bnx2x_softc *sc);
+
+static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
+static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp);
+static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id,
+ uint8_t storm, uint16_t index, uint8_t op,
+ uint8_t update);
+
+int bnx2x_test_bit(int nr, volatile unsigned long *addr)
+{
+ int res;
+
+ mb();
+ res = ((*addr) & (1UL << nr)) != 0;
+ mb();
+ return res;
+}
+
+void bnx2x_set_bit(unsigned int nr, volatile unsigned long *addr)
+{
+ __sync_fetch_and_or(addr, (1UL << nr));
+}
+
+void bnx2x_clear_bit(int nr, volatile unsigned long *addr)
+{
+ __sync_fetch_and_and(addr, ~(1UL << nr));
+}
+
+int bnx2x_test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << nr);
+ return __sync_fetch_and_and(addr, ~mask) & mask;
+}
+
+int bnx2x_cmpxchg(volatile int *addr, int old, int new)
+{
+ return __sync_val_compare_and_swap(addr, old, new);
+}
+
+int
+bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
+ const char *msg, uint32_t align)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *z;
+
+ dma->sc = sc;
+ if (IS_PF(sc))
+ snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
+ rte_get_timer_cycles());
+ else
+ snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
+ rte_get_timer_cycles());
+
+ /* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */
+ z = rte_memzone_reserve_aligned(mz_name, (uint64_t)size,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, align);
+ if (z == NULL) {
+ PMD_DRV_LOG(ERR, "DMA alloc failed for %s", msg);
+ return -ENOMEM;
+ }
+ dma->paddr = (uint64_t) z->iova;
+ dma->vaddr = z->addr;
+
+ PMD_DRV_LOG(DEBUG, "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
+
+ return 0;
+}
+
+static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
+{
+ uint32_t lock_status;
+ uint32_t resource_bit = (1 << resource);
+ int func = SC_FUNC(sc);
+ uint32_t hw_lock_control_reg;
+ int cnt;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* validate the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ PMD_DRV_LOG(NOTICE,
+ "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE",
+ resource);
+ return -1;
+ }
+
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
+ }
+
+ /* validate the resource is not already taken */
+ lock_status = REG_RD(sc, hw_lock_control_reg);
+ if (lock_status & resource_bit) {
+ PMD_DRV_LOG(NOTICE,
+ "resource in use (status 0x%x bit 0x%x)",
+ lock_status, resource_bit);
+ return -1;
+ }
+
+ /* try every 5ms for 5 seconds */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
+ lock_status = REG_RD(sc, hw_lock_control_reg);
+ if (lock_status & resource_bit) {
+ return 0;
+ }
+ DELAY(5000);
+ }
+
+ PMD_DRV_LOG(NOTICE, "Resource lock timeout!");
+ return -1;
+}
+
+static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
+{
+ uint32_t lock_status;
+ uint32_t resource_bit = (1 << resource);
+ int func = SC_FUNC(sc);
+ uint32_t hw_lock_control_reg;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* validate the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ PMD_DRV_LOG(NOTICE,
+ "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE",
+ resource);
+ return -1;
+ }
+
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
+ }
+
+ /* validate the resource is currently taken */
+ lock_status = REG_RD(sc, hw_lock_control_reg);
+ if (!(lock_status & resource_bit)) {
+ PMD_DRV_LOG(NOTICE,
+ "resource not in use (status 0x%x bit 0x%x)",
+ lock_status, resource_bit);
+ return -1;
+ }
+
+ REG_WR(sc, hw_lock_control_reg, resource_bit);
+ return 0;
+}
+
+/* copy command into DMAE command memory and set DMAE command Go */
+void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx)
+{
+ uint32_t cmd_offset;
+ uint32_t i;
+
+ cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx));
+ for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) {
+ REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *) dmae) + i));
+ }
+
+ REG_WR(sc, dmae_reg_go_c[idx], 1);
+}
+
+uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type)
+{
+ return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
+ DMAE_COMMAND_C_TYPE_ENABLE);
+}
+
+uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode)
+{
+ return opcode & ~DMAE_COMMAND_SRC_RESET;
+}
+
+uint32_t
+bnx2x_dmae_opcode(struct bnx2x_softc * sc, uint8_t src_type, uint8_t dst_type,
+ uint8_t with_comp, uint8_t comp_type)
+{
+ uint32_t opcode = 0;
+
+ opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
+ (dst_type << DMAE_COMMAND_DST_SHIFT));
+
+ opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET);
+
+ opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
+
+ opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) |
+ (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT));
+
+ opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
+
+#ifdef __BIG_ENDIAN
+ opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
+#else
+ opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
+#endif
+
+ if (with_comp) {
+ opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
+ }
+
+ return opcode;
+}
+
+static void
+bnx2x_prep_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae,
+ uint8_t src_type, uint8_t dst_type)
+{
+ memset(dmae, 0, sizeof(struct dmae_command));
+
+ /* set the opcode */
+ dmae->opcode = bnx2x_dmae_opcode(sc, src_type, dst_type,
+ TRUE, DMAE_COMP_PCI);
+
+ /* fill in the completion parameters */
+ dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, wb_comp));
+ dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, wb_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+}
+
+/* issue a DMAE command over the init channel and wait for completion */
+static int
+bnx2x_issue_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae)
+{
+ uint32_t *wb_comp = BNX2X_SP(sc, wb_comp);
+ int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
+
+ /* reset completion */
+ *wb_comp = 0;
+
+ /* post the command on the channel used for initializations */
+ bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc));
+
+ /* wait for completion */
+ DELAY(500);
+
+ while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+ if (!timeout ||
+ (sc->recovery_state != BNX2X_RECOVERY_DONE &&
+ sc->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
+ PMD_DRV_LOG(INFO, "DMAE timeout!");
+ return DMAE_TIMEOUT;
+ }
+
+ timeout--;
+ DELAY(50);
+ }
+
+ if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+ PMD_DRV_LOG(INFO, "DMAE PCI error!");
+ return DMAE_PCI_ERROR;
+ }
+
+ return 0;
+}
+
+void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32)
+{
+ struct dmae_command dmae;
+ uint32_t *data;
+ uint32_t i;
+ int rc;
+
+ if (!sc->dmae_ready) {
+ data = BNX2X_SP(sc, wb_data[0]);
+
+ for (i = 0; i < len32; i++) {
+ data[i] = REG_RD(sc, (src_addr + (i * 4)));
+ }
+
+ return;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
+
+ /* fill in addresses and len */
+ dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
+ dmae.src_addr_hi = 0;
+ dmae.dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, wb_data));
+ dmae.dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, wb_data));
+ dmae.len = len32;
+
+ /* issue the command and wait for completion */
+ if ((rc = bnx2x_issue_dmae_with_comp(sc, &dmae)) != 0) {
+ rte_panic("DMAE failed (%d)", rc);
+ };
+}
+
+void
+bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr, uint32_t dst_addr,
+ uint32_t len32)
+{
+ struct dmae_command dmae;
+ int rc;
+
+ if (!sc->dmae_ready) {
+ ecore_init_str_wr(sc, dst_addr, BNX2X_SP(sc, wb_data[0]), len32);
+ return;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
+
+ /* fill in addresses and len */
+ dmae.src_addr_lo = U64_LO(dma_addr);
+ dmae.src_addr_hi = U64_HI(dma_addr);
+ dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
+ dmae.dst_addr_hi = 0;
+ dmae.len = len32;
+
+ /* issue the command and wait for completion */
+ if ((rc = bnx2x_issue_dmae_with_comp(sc, &dmae)) != 0) {
+ rte_panic("DMAE failed (%d)", rc);
+ }
+}
+
+static void
+bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr,
+ uint32_t addr, uint32_t len)
+{
+ uint32_t dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
+ uint32_t offset = 0;
+
+ while (len > dmae_wr_max) {
+ bnx2x_write_dmae(sc, (phys_addr + offset), /* src DMA address */
+ (addr + offset), /* dst GRC address */
+ dmae_wr_max);
+ offset += (dmae_wr_max * 4);
+ len -= dmae_wr_max;
+ }
+
+ bnx2x_write_dmae(sc, (phys_addr + offset), /* src DMA address */
+ (addr + offset), /* dst GRC address */
+ len);
+}
+
+void
+bnx2x_set_ctx_validation(struct bnx2x_softc *sc, struct eth_context *cxt,
+ uint32_t cid)
+{
+ /* ustorm cxt validation */
+ cxt->ustorm_ag_context.cdu_usage =
+ CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
+ CDU_REGION_NUMBER_UCM_AG,
+ ETH_CONNECTION_TYPE);
+ /* xcontext validation */
+ cxt->xstorm_ag_context.cdu_reserved =
+ CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
+ CDU_REGION_NUMBER_XCM_AG,
+ ETH_CONNECTION_TYPE);
+}
+
+static void
+bnx2x_storm_memset_hc_timeout(struct bnx2x_softc *sc, uint8_t fw_sb_id,
+ uint8_t sb_index, uint8_t ticks)
+{
+ uint32_t addr =
+ (BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
+
+ REG_WR8(sc, addr, ticks);
+}
+
+static void
+bnx2x_storm_memset_hc_disable(struct bnx2x_softc *sc, uint16_t fw_sb_id,
+ uint8_t sb_index, uint8_t disable)
+{
+ uint32_t enable_flag =
+ (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
+ uint32_t addr =
+ (BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
+ uint8_t flags;
+
+ /* clear and set */
+ flags = REG_RD8(sc, addr);
+ flags &= ~HC_INDEX_DATA_HC_ENABLED;
+ flags |= enable_flag;
+ REG_WR8(sc, addr, flags);
+}
+
+void
+bnx2x_update_coalesce_sb_index(struct bnx2x_softc *sc, uint8_t fw_sb_id,
+ uint8_t sb_index, uint8_t disable, uint16_t usec)
+{
+ uint8_t ticks = (usec / 4);
+
+ bnx2x_storm_memset_hc_timeout(sc, fw_sb_id, sb_index, ticks);
+
+ disable = (disable) ? 1 : ((usec) ? 0 : 1);
+ bnx2x_storm_memset_hc_disable(sc, fw_sb_id, sb_index, disable);
+}
+
+uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr)
+{
+ return REG_RD(sc, reg_addr);
+}
+
+void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val)
+{
+ REG_WR(sc, reg_addr, val);
+}
+
+void
+elink_cb_event_log(__rte_unused struct bnx2x_softc *sc,
+ __rte_unused const elink_log_id_t elink_log_id, ...)
+{
+ PMD_DRV_LOG(DEBUG, "ELINK EVENT LOG (%d)", elink_log_id);
+}
+
+static int bnx2x_set_spio(struct bnx2x_softc *sc, int spio, uint32_t mode)
+{
+ uint32_t spio_reg;
+
+ /* Only 2 SPIOs are configurable */
+ if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
+ PMD_DRV_LOG(NOTICE, "Invalid SPIO 0x%x", spio);
+ return -1;
+ }
+
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
+
+ /* read SPIO and mask except the float bits */
+ spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
+
+ switch (mode) {
+ case MISC_SPIO_OUTPUT_LOW:
+ /* clear FLOAT and set CLR */
+ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
+ spio_reg |= (spio << MISC_SPIO_CLR_POS);
+ break;
+
+ case MISC_SPIO_OUTPUT_HIGH:
+ /* clear FLOAT and set SET */
+ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
+ spio_reg |= (spio << MISC_SPIO_SET_POS);
+ break;
+
+ case MISC_SPIO_INPUT_HI_Z:
+ /* set FLOAT */
+ spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(sc, MISC_REG_SPIO, spio_reg);
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
+
+ return 0;
+}
+
+static int bnx2x_gpio_read(struct bnx2x_softc *sc, int gpio_num, uint8_t port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
+ REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
+ int gpio_shift = gpio_num;
+ if (gpio_port)
+ gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT;
+
+ uint32_t gpio_mask = (1 << gpio_shift);
+ uint32_t gpio_reg;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ PMD_DRV_LOG(NOTICE, "Invalid GPIO %d", gpio_num);
+ return -1;
+ }
+
+ /* read GPIO value */
+ gpio_reg = REG_RD(sc, MISC_REG_GPIO);
+
+ /* get the requested pin value */
+ return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
+}
+
+static int
+bnx2x_gpio_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, uint8_t port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
+ REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
+ int gpio_shift = gpio_num;
+ if (gpio_port)
+ gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT;
+
+ uint32_t gpio_mask = (1 << gpio_shift);
+ uint32_t gpio_reg;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ PMD_DRV_LOG(NOTICE, "Invalid GPIO %d", gpio_num);
+ return -1;
+ }
+
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
+
+ /* read GPIO and mask except the float bits */
+ gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+ /* clear FLOAT and set CLR */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+ /* clear FLOAT and set SET */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+ /* set FLOAT */
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(sc, MISC_REG_GPIO, gpio_reg);
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
+
+ return 0;
+}
+
+static int
+bnx2x_gpio_mult_write(struct bnx2x_softc *sc, uint8_t pins, uint32_t mode)
+{
+ uint32_t gpio_reg;
+
+ /* any port swapping should be handled by caller */
+
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
+
+ /* read GPIO and mask except the float bits */
+ gpio_reg = REG_RD(sc, MISC_REG_GPIO);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+ /* set CLR */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+ /* set SET */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+ /* set FLOAT */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+ break;
+
+ default:
+ PMD_DRV_LOG(NOTICE, "Invalid GPIO mode assignment %d", mode);
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
+ return -1;
+ }
+
+ REG_WR(sc, MISC_REG_GPIO, gpio_reg);
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
+
+ return 0;
+}
+
+static int
+bnx2x_gpio_int_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode,
+ uint8_t port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
+ REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
+ int gpio_shift = gpio_num;
+ if (gpio_port)
+ gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT;
+
+ uint32_t gpio_mask = (1 << gpio_shift);
+ uint32_t gpio_reg;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ PMD_DRV_LOG(NOTICE, "Invalid GPIO %d", gpio_num);
+ return -1;
+ }
+
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
+
+ /* read GPIO int */
+ gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
+ /* clear SET and set CLR */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
+ /* clear CLR and set SET */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
+
+ return 0;
+}
+
+uint32_t
+elink_cb_gpio_read(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t port)
+{
+ return bnx2x_gpio_read(sc, gpio_num, port);
+}
+
+uint8_t elink_cb_gpio_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */
+ uint8_t port)
+{
+ return bnx2x_gpio_write(sc, gpio_num, mode, port);
+}
+
+uint8_t
+elink_cb_gpio_mult_write(struct bnx2x_softc * sc, uint8_t pins,
+ uint8_t mode /* 0=low 1=high */ )
+{
+ return bnx2x_gpio_mult_write(sc, pins, mode);
+}
+
+uint8_t elink_cb_gpio_int_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */
+ uint8_t port)
+{
+ return bnx2x_gpio_int_write(sc, gpio_num, mode, port);
+}
+
+void elink_cb_notify_link_changed(struct bnx2x_softc *sc)
+{
+ REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
+ (SC_FUNC(sc) * sizeof(uint32_t))), 1);
+}
+
+/* send the MCP a request, block until there is a reply */
+uint32_t
+elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param)
+{
+ int mb_idx = SC_FW_MB_IDX(sc);
+ uint32_t seq;
+ uint32_t rc = 0;
+ uint32_t cnt = 1;
+ uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
+
+ seq = ++sc->fw_seq;
+ SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
+ SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
+
+ PMD_DRV_LOG(DEBUG,
+ "wrote command 0x%08x to FW MB param 0x%08x",
+ (command | seq), param);
+
+ /* Let the FW do it's magic. GIve it up to 5 seconds... */
+ do {
+ DELAY(delay * 1000);
+ rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
+ } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
+
+ /* is this a reply to our command? */
+ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
+ rc &= FW_MSG_CODE_MASK;
+ } else {
+ /* Ruh-roh! */
+ PMD_DRV_LOG(NOTICE, "FW failed to respond!");
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static uint32_t
+bnx2x_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param)
+{
+ return elink_cb_fw_command(sc, command, param);
+}
+
+static void
+__storm_memset_dma_mapping(struct bnx2x_softc *sc, uint32_t addr,
+ rte_iova_t mapping)
+{
+ REG_WR(sc, addr, U64_LO(mapping));
+ REG_WR(sc, (addr + 4), U64_HI(mapping));
+}
+
+static void
+storm_memset_spq_addr(struct bnx2x_softc *sc, rte_iova_t mapping,
+ uint16_t abs_fid)
+{
+ uint32_t addr = (XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
+ __storm_memset_dma_mapping(sc, addr, mapping);
+}
+
+static void
+storm_memset_vf_to_pf(struct bnx2x_softc *sc, uint16_t abs_fid, uint16_t pf_id)
+{
+ REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)),
+ pf_id);
+ REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)),
+ pf_id);
+ REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)),
+ pf_id);
+ REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)),
+ pf_id);
+}
+
+static void
+storm_memset_func_en(struct bnx2x_softc *sc, uint16_t abs_fid, uint8_t enable)
+{
+ REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)),
+ enable);
+ REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)),
+ enable);
+ REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)),
+ enable);
+ REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)),
+ enable);
+}
+
+static void
+storm_memset_eq_data(struct bnx2x_softc *sc, struct event_ring_data *eq_data,
+ uint16_t pfid)
+{
+ uint32_t addr;
+ size_t size;
+
+ addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
+ size = sizeof(struct event_ring_data);
+ ecore_storm_memset_struct(sc, addr, size, (uint32_t *) eq_data);
+}
+
+static void
+storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid)
+{
+ uint32_t addr = (BAR_CSTRORM_INTMEM +
+ CSTORM_EVENT_RING_PROD_OFFSET(pfid));
+ REG_WR16(sc, addr, eq_prod);
+}
+
+/*
+ * Post a slowpath command.
+ *
+ * A slowpath command is used to propagate a configuration change through
+ * the controller in a controlled manner, allowing each STORM processor and
+ * other H/W blocks to phase in the change. The commands sent on the
+ * slowpath are referred to as ramrods. Depending on the ramrod used the
+ * completion of the ramrod will occur in different ways. Here's a
+ * breakdown of ramrods and how they complete:
+ *
+ * RAMROD_CMD_ID_ETH_PORT_SETUP
+ * Used to setup the leading connection on a port. Completes on the
+ * Receive Completion Queue (RCQ) of that port (typically fp[0]).
+ *
+ * RAMROD_CMD_ID_ETH_CLIENT_SETUP
+ * Used to setup an additional connection on a port. Completes on the
+ * RCQ of the multi-queue/RSS connection being initialized.
+ *
+ * RAMROD_CMD_ID_ETH_STAT_QUERY
+ * Used to force the storm processors to update the statistics database
+ * in host memory. This ramrod is send on the leading connection CID and
+ * completes as an index increment of the CSTORM on the default status
+ * block.
+ *
+ * RAMROD_CMD_ID_ETH_UPDATE
+ * Used to update the state of the leading connection, usually to udpate
+ * the RSS indirection table. Completes on the RCQ of the leading
+ * connection. (Not currently used under FreeBSD until OS support becomes
+ * available.)
+ *
+ * RAMROD_CMD_ID_ETH_HALT
+ * Used when tearing down a connection prior to driver unload. Completes
+ * on the RCQ of the multi-queue/RSS connection being torn down. Don't
+ * use this on the leading connection.
+ *
+ * RAMROD_CMD_ID_ETH_SET_MAC
+ * Sets the Unicast/Broadcast/Multicast used by the port. Completes on
+ * the RCQ of the leading connection.
+ *
+ * RAMROD_CMD_ID_ETH_CFC_DEL
+ * Used when tearing down a conneciton prior to driver unload. Completes
+ * on the RCQ of the leading connection (since the current connection
+ * has been completely removed from controller memory).
+ *
+ * RAMROD_CMD_ID_ETH_PORT_DEL
+ * Used to tear down the leading connection prior to driver unload,
+ * typically fp[0]. Completes as an index increment of the CSTORM on the
+ * default status block.
+ *
+ * RAMROD_CMD_ID_ETH_FORWARD_SETUP
+ * Used for connection offload. Completes on the RCQ of the multi-queue
+ * RSS connection that is being offloaded. (Not currently used under
+ * FreeBSD.)
+ *
+ * There can only be one command pending per function.
+ *
+ * Returns:
+ * 0 = Success, !0 = Failure.
+ */
+
+/* must be called under the spq lock */
+static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x_softc *sc)
+{
+ struct eth_spe *next_spe = sc->spq_prod_bd;
+
+ if (sc->spq_prod_bd == sc->spq_last_bd) {
+ /* wrap back to the first eth_spq */
+ sc->spq_prod_bd = sc->spq;
+ sc->spq_prod_idx = 0;
+ } else {
+ sc->spq_prod_bd++;
+ sc->spq_prod_idx++;
+ }
+
+ return next_spe;
+}
+
+/* must be called under the spq lock */
+static void bnx2x_sp_prod_update(struct bnx2x_softc *sc)
+{
+ int func = SC_FUNC(sc);
+
+ /*
+ * Make sure that BD data is updated before writing the producer.
+ * BD data is written to the memory, the producer is read from the
+ * memory, thus we need a full memory barrier to ensure the ordering.
+ */
+ mb();
+
+ REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
+ sc->spq_prod_idx);
+
+ mb();
+}
+
+/**
+ * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
+ *
+ * @cmd: command to check
+ * @cmd_type: command type
+ */
+static int bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
+{
+ if ((cmd_type == NONE_CONNECTION_TYPE) ||
+ (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
+ (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
+ (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+/**
+ * bnx2x_sp_post - place a single command on an SP ring
+ *
+ * @sc: driver handle
+ * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
+ * @cid: SW CID the command is related to
+ * @data_hi: command private data address (high 32 bits)
+ * @data_lo: command private data address (low 32 bits)
+ * @cmd_type: command type (e.g. NONE, ETH)
+ *
+ * SP data is handled as if it's always an address pair, thus data fields are
+ * not swapped to little endian in upper functions. Instead this function swaps
+ * data as if it's two uint32 fields.
+ */
+int
+bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi,
+ uint32_t data_lo, int cmd_type)
+{
+ struct eth_spe *spe;
+ uint16_t type;
+ int common;
+
+ common = bnx2x_is_contextless_ramrod(command, cmd_type);
+
+ if (common) {
+ if (!atomic_load_acq_long(&sc->eq_spq_left)) {
+ PMD_DRV_LOG(INFO, "EQ ring is full!");
+ return -1;
+ }
+ } else {
+ if (!atomic_load_acq_long(&sc->cq_spq_left)) {
+ PMD_DRV_LOG(INFO, "SPQ ring is full!");
+ return -1;
+ }
+ }
+
+ spe = bnx2x_sp_get_next(sc);
+
+ /* CID needs port number to be encoded int it */
+ spe->hdr.conn_and_cmd_data =
+ htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid));
+
+ type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
+
+ /* TBD: Check if it works for VFs */
+ type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+
+ spe->hdr.type = htole16(type);
+
+ spe->data.update_data_addr.hi = htole32(data_hi);
+ spe->data.update_data_addr.lo = htole32(data_lo);
+
+ /*
+ * It's ok if the actual decrement is issued towards the memory
+ * somewhere between the lock and unlock. Thus no more explict
+ * memory barrier is needed.
+ */
+ if (common) {
+ atomic_subtract_acq_long(&sc->eq_spq_left, 1);
+ } else {
+ atomic_subtract_acq_long(&sc->cq_spq_left, 1);
+ }
+
+ PMD_DRV_LOG(DEBUG,
+ "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x"
+ "data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)",
+ sc->spq_prod_idx,
+ (uint32_t) U64_HI(sc->spq_dma.paddr),
+ (uint32_t) (U64_LO(sc->spq_dma.paddr) +
+ (uint8_t *) sc->spq_prod_bd -
+ (uint8_t *) sc->spq), command, common,
+ HW_CID(sc, cid), data_hi, data_lo, type,
+ atomic_load_acq_long(&sc->cq_spq_left),
+ atomic_load_acq_long(&sc->eq_spq_left));
+
+ bnx2x_sp_prod_update(sc);
+
+ return 0;
+}
+
+static void bnx2x_drv_pulse(struct bnx2x_softc *sc)
+{
+ SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
+ sc->fw_drv_pulse_wr_seq);
+}
+
+static int bnx2x_tx_queue_has_work(const struct bnx2x_fastpath *fp)
+{
+ uint16_t hw_cons;
+ struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index];
+
+ if (unlikely(!txq)) {
+ PMD_TX_LOG(ERR, "ERROR: TX queue is NULL");
+ return 0;
+ }
+
+ mb(); /* status block fields can change */
+ hw_cons = le16toh(*fp->tx_cons_sb);
+ return hw_cons != txq->tx_pkt_head;
+}
+
+static uint8_t bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
+{
+ /* expand this for multi-cos if ever supported */
+ return bnx2x_tx_queue_has_work(fp);
+}
+
+static int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
+{
+ uint16_t rx_cq_cons_sb;
+ struct bnx2x_rx_queue *rxq;
+ rxq = fp->sc->rx_queues[fp->index];
+ if (unlikely(!rxq)) {
+ PMD_RX_LOG(ERR, "ERROR: RX queue is NULL");
+ return 0;
+ }
+
+ mb(); /* status block fields can change */
+ rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
+ if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) ==
+ MAX_RCQ_ENTRIES(rxq)))
+ rx_cq_cons_sb++;
+ return rxq->rx_cq_head != rx_cq_cons_sb;
+}
+
+static void
+bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ union eth_rx_cqe *rr_cqe)
+{
+ int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+ int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+ enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
+ struct ecore_queue_sp_obj *q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj;
+
+ PMD_DRV_LOG(DEBUG,
+ "fp=%d cid=%d got ramrod #%d state is %x type is %d",
+ fp->index, cid, command, sc->state,
+ rr_cqe->ramrod_cqe.ramrod_type);
+
+ switch (command) {
+ case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
+ PMD_DRV_LOG(DEBUG, "got UPDATE ramrod. CID %d", cid);
+ drv_cmd = ECORE_Q_CMD_UPDATE;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
+ PMD_DRV_LOG(DEBUG, "got MULTI[%d] setup ramrod", cid);
+ drv_cmd = ECORE_Q_CMD_SETUP;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
+ PMD_DRV_LOG(DEBUG, "got MULTI[%d] tx-only setup ramrod", cid);
+ drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_HALT):
+ PMD_DRV_LOG(DEBUG, "got MULTI[%d] halt ramrod", cid);
+ drv_cmd = ECORE_Q_CMD_HALT;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_TERMINATE):
+ PMD_DRV_LOG(DEBUG, "got MULTI[%d] teminate ramrod", cid);
+ drv_cmd = ECORE_Q_CMD_TERMINATE;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_EMPTY):
+ PMD_DRV_LOG(DEBUG, "got MULTI[%d] empty ramrod", cid);
+ drv_cmd = ECORE_Q_CMD_EMPTY;
+ break;
+
+ default:
+ PMD_DRV_LOG(DEBUG,
+ "ERROR: unexpected MC reply (%d)"
+ "on fp[%d]", command, fp->index);
+ return;
+ }
+
+ if ((drv_cmd != ECORE_Q_CMD_MAX) &&
+ q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
+ /*
+ * q_obj->complete_cmd() failure means that this was
+ * an unexpected completion.
+ *
+ * In this case we don't want to increase the sc->spq_left
+ * because apparently we haven't sent this command the first
+ * place.
+ */
+ // rte_panic("Unexpected SP completion");
+ return;
+ }
+
+ atomic_add_acq_long(&sc->cq_spq_left, 1);
+
+ PMD_DRV_LOG(DEBUG, "sc->cq_spq_left 0x%lx",
+ atomic_load_acq_long(&sc->cq_spq_left));
+}
+
+static uint8_t bnx2x_rxeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp)
+{
+ struct bnx2x_rx_queue *rxq;
+ uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
+ uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
+
+ rxq = sc->rx_queues[fp->index];
+ if (!rxq) {
+ PMD_RX_LOG(ERR, "RX queue %d is NULL", fp->index);
+ return 0;
+ }
+
+ /* CQ "next element" is of the size of the regular element */
+ hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
+ if (unlikely((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) ==
+ USABLE_RCQ_ENTRIES_PER_PAGE)) {
+ hw_cq_cons++;
+ }
+
+ bd_cons = rxq->rx_bd_head;
+ bd_prod = rxq->rx_bd_tail;
+ bd_prod_fw = bd_prod;
+ sw_cq_cons = rxq->rx_cq_head;
+ sw_cq_prod = rxq->rx_cq_tail;
+
+ /*
+ * Memory barrier necessary as speculative reads of the rx
+ * buffer can be ahead of the index in the status block
+ */
+ rmb();
+
+ while (sw_cq_cons != hw_cq_cons) {
+ union eth_rx_cqe *cqe;
+ struct eth_fast_path_rx_cqe *cqe_fp;
+ uint8_t cqe_fp_flags;
+ enum eth_rx_cqe_type cqe_fp_type;
+
+ comp_ring_cons = RCQ_ENTRY(sw_cq_cons, rxq);
+ bd_prod = RX_BD(bd_prod, rxq);
+ bd_cons = RX_BD(bd_cons, rxq);
+
+ cqe = &rxq->cq_ring[comp_ring_cons];
+ cqe_fp = &cqe->fast_path_cqe;
+ cqe_fp_flags = cqe_fp->type_error_flags;
+ cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+
+ /* is this a slowpath msg? */
+ if (CQE_TYPE_SLOW(cqe_fp_type)) {
+ bnx2x_sp_event(sc, fp, cqe);
+ goto next_cqe;
+ }
+
+ /* is this an error packet? */
+ if (unlikely(cqe_fp_flags &
+ ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
+ PMD_RX_LOG(DEBUG, "flags 0x%x rx packet %u",
+ cqe_fp_flags, sw_cq_cons);
+ goto next_rx;
+ }
+
+ PMD_RX_LOG(DEBUG, "Dropping fastpath called from attn poller!");
+
+next_rx:
+ bd_cons = NEXT_RX_BD(bd_cons);
+ bd_prod = NEXT_RX_BD(bd_prod);
+ bd_prod_fw = NEXT_RX_BD(bd_prod_fw);
+
+next_cqe:
+ sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod);
+ sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons);
+
+ } /* while work to do */
+
+ rxq->rx_bd_head = bd_cons;
+ rxq->rx_bd_tail = bd_prod_fw;
+ rxq->rx_cq_head = sw_cq_cons;
+ rxq->rx_cq_tail = sw_cq_prod;
+
+ /* Update producers */
+ bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod);
+
+ return sw_cq_cons != hw_cq_cons;
+}
+
+static uint16_t
+bnx2x_free_tx_pkt(__rte_unused struct bnx2x_fastpath *fp, struct bnx2x_tx_queue *txq,
+ uint16_t pkt_idx, uint16_t bd_idx)
+{
+ struct eth_tx_start_bd *tx_start_bd =
+ &txq->tx_ring[TX_BD(bd_idx, txq)].start_bd;
+ uint16_t nbd = rte_le_to_cpu_16(tx_start_bd->nbd);
+ struct rte_mbuf *tx_mbuf = txq->sw_ring[TX_BD(pkt_idx, txq)];
+
+ if (likely(tx_mbuf != NULL)) {
+ rte_pktmbuf_free_seg(tx_mbuf);
+ } else {
+ PMD_RX_LOG(ERR, "fp[%02d] lost mbuf %lu",
+ fp->index, (unsigned long)TX_BD(pkt_idx, txq));
+ }
+
+ txq->sw_ring[TX_BD(pkt_idx, txq)] = NULL;
+ txq->nb_tx_avail += nbd;
+
+ while (nbd--)
+ bd_idx = NEXT_TX_BD(bd_idx);
+
+ return bd_idx;
+}
+
+/* processes transmit completions */
+uint8_t bnx2x_txeof(__rte_unused struct bnx2x_softc * sc, struct bnx2x_fastpath * fp)
+{
+ uint16_t bd_cons, hw_cons, sw_cons;
+ __rte_unused uint16_t tx_bd_avail;
+
+ struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index];
+
+ if (unlikely(!txq)) {
+ PMD_TX_LOG(ERR, "ERROR: TX queue is NULL");
+ return 0;
+ }
+
+ bd_cons = txq->tx_bd_head;
+ hw_cons = rte_le_to_cpu_16(*fp->tx_cons_sb);
+ sw_cons = txq->tx_pkt_head;
+
+ while (sw_cons != hw_cons) {
+ bd_cons = bnx2x_free_tx_pkt(fp, txq, sw_cons, bd_cons);
+ sw_cons++;
+ }
+
+ txq->tx_pkt_head = sw_cons;
+ txq->tx_bd_head = bd_cons;
+
+ tx_bd_avail = txq->nb_tx_avail;
+
+ PMD_TX_LOG(DEBUG, "fp[%02d] avail=%u cons_sb=%u, "
+ "pkt_head=%u pkt_tail=%u bd_head=%u bd_tail=%u",
+ fp->index, tx_bd_avail, hw_cons,
+ txq->tx_pkt_head, txq->tx_pkt_tail,
+ txq->tx_bd_head, txq->tx_bd_tail);
+ return TRUE;
+}
+
+static void bnx2x_drain_tx_queues(struct bnx2x_softc *sc)
+{
+ struct bnx2x_fastpath *fp;
+ int i, count;
+
+ /* wait until all TX fastpath tasks have completed */
+ for (i = 0; i < sc->num_queues; i++) {
+ fp = &sc->fp[i];
+
+ count = 1000;
+
+ while (bnx2x_has_tx_work(fp)) {
+ bnx2x_txeof(sc, fp);
+
+ if (count == 0) {
+ PMD_TX_LOG(ERR,
+ "Timeout waiting for fp[%d] "
+ "transmits to complete!", i);
+ rte_panic("tx drain failure");
+ return;
+ }
+
+ count--;
+ DELAY(1000);
+ rmb();
+ }
+ }
+
+ return;
+}
+
+static int
+bnx2x_del_all_macs(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *mac_obj,
+ int mac_type, uint8_t wait_for_comp)
+{
+ unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+ int rc;
+
+ /* wait for completion of requested */
+ if (wait_for_comp) {
+ bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ }
+
+ /* Set the mac type of addresses we want to clear */
+ bnx2x_set_bit(mac_type, &vlan_mac_flags);
+
+ rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
+ if (rc < 0)
+ PMD_DRV_LOG(ERR, "Failed to delete MACs (%d)", rc);
+
+ return rc;
+}
+
+static int
+bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode,
+ unsigned long *rx_accept_flags,
+ unsigned long *tx_accept_flags)
+{
+ /* Clear the flags first */
+ *rx_accept_flags = 0;
+ *tx_accept_flags = 0;
+
+ switch (rx_mode) {
+ case BNX2X_RX_MODE_NONE:
+ /*
+ * 'drop all' supersedes any accept flags that may have been
+ * passed to the function.
+ */
+ break;
+
+ case BNX2X_RX_MODE_NORMAL:
+ bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
+
+ /* internal switching mode */
+ bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
+
+ break;
+
+ case BNX2X_RX_MODE_ALLMULTI:
+ bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
+
+ /* internal switching mode */
+ bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
+
+ break;
+
+ case BNX2X_RX_MODE_ALLMULTI_PROMISC:
+ case BNX2X_RX_MODE_PROMISC:
+ /*
+ * According to deffinition of SI mode, iface in promisc mode
+ * should receive matched and unmatched (in resolution of port)
+ * unicast packets.
+ */
+ bnx2x_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
+
+ /* internal switching mode */
+ bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
+
+ if (IS_MF_SI(sc)) {
+ bnx2x_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
+ } else {
+ bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
+ }
+
+ break;
+
+ default:
+ PMD_RX_LOG(ERR, "Unknown rx_mode (%d)", rx_mode);
+ return -1;
+ }
+
+ /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
+ if (rx_mode != BNX2X_RX_MODE_NONE) {
+ bnx2x_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
+ bnx2x_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
+ }
+
+ return 0;
+}
+
+static int
+bnx2x_set_q_rx_mode(struct bnx2x_softc *sc, uint8_t cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags, unsigned long ramrod_flags)
+{
+ struct ecore_rx_mode_ramrod_params ramrod_param;
+ int rc;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Prepare ramrod parameters */
+ ramrod_param.cid = 0;
+ ramrod_param.cl_id = cl_id;
+ ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
+ ramrod_param.func_id = SC_FUNC(sc);
+
+ ramrod_param.pstate = &sc->sp_state;
+ ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
+
+ ramrod_param.rdata = BNX2X_SP(sc, rx_mode_rdata);
+ ramrod_param.rdata_mapping =
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata),
+ bnx2x_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
+
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.rx_mode_flags = rx_mode_flags;
+
+ ramrod_param.rx_accept_flags = rx_accept_flags;
+ ramrod_param.tx_accept_flags = tx_accept_flags;
+
+ rc = ecore_config_rx_mode(sc, &ramrod_param);
+ if (rc < 0) {
+ PMD_RX_LOG(ERR, "Set rx_mode %d failed", sc->rx_mode);
+ return rc;
+ }
+
+ return 0;
+}
+
+int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc)
+{
+ unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+ unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+ int rc;
+
+ rc = bnx2x_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
+ &tx_accept_flags);
+ if (rc) {
+ return rc;
+ }
+
+ bnx2x_set_bit(RAMROD_RX, &ramrod_flags);
+ bnx2x_set_bit(RAMROD_TX, &ramrod_flags);
+ bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+
+ return bnx2x_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
+ rx_accept_flags, tx_accept_flags,
+ ramrod_flags);
+}
+
+/* returns the "mcp load_code" according to global load_count array */
+static int bnx2x_nic_load_no_mcp(struct bnx2x_softc *sc)
+{
+ int path = SC_PATH(sc);
+ int port = SC_PORT(sc);
+
+ PMD_DRV_LOG(INFO, "NO MCP - load counts[%d] %d, %d, %d",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+
+ load_count[path][0]++;
+ load_count[path][1 + port]++;
+ PMD_DRV_LOG(INFO, "NO MCP - new load counts[%d] %d, %d, %d",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ if (load_count[path][0] == 1)
+ return FW_MSG_CODE_DRV_LOAD_COMMON;
+ else if (load_count[path][1 + port] == 1)
+ return FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ return FW_MSG_CODE_DRV_LOAD_FUNCTION;
+}
+
+/* returns the "mcp load_code" according to global load_count array */
+static int bnx2x_nic_unload_no_mcp(struct bnx2x_softc *sc)
+{
+ int port = SC_PORT(sc);
+ int path = SC_PATH(sc);
+
+ PMD_DRV_LOG(INFO, "NO MCP - load counts[%d] %d, %d, %d",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ load_count[path][0]--;
+ load_count[path][1 + port]--;
+ PMD_DRV_LOG(INFO, "NO MCP - new load counts[%d] %d, %d, %d",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ if (load_count[path][0] == 0) {
+ return FW_MSG_CODE_DRV_UNLOAD_COMMON;
+ } else if (load_count[path][1 + port] == 0) {
+ return FW_MSG_CODE_DRV_UNLOAD_PORT;
+ } else {
+ return FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
+ }
+}
+
+/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
+static uint32_t bnx2x_send_unload_req(struct bnx2x_softc *sc, int unload_mode)
+{
+ uint32_t reset_code = 0;
+
+ /* Select the UNLOAD request mode */
+ if (unload_mode == UNLOAD_NORMAL) {
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+ } else {
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+ }
+
+ /* Send the request to the MCP */
+ if (!BNX2X_NOMCP(sc)) {
+ reset_code = bnx2x_fw_command(sc, reset_code, 0);
+ } else {
+ reset_code = bnx2x_nic_unload_no_mcp(sc);
+ }
+
+ return reset_code;
+}
+
+/* send UNLOAD_DONE command to the MCP */
+static void bnx2x_send_unload_done(struct bnx2x_softc *sc, uint8_t keep_link)
+{
+ uint32_t reset_param =
+ keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
+
+ /* Report UNLOAD_DONE to MCP */
+ if (!BNX2X_NOMCP(sc)) {
+ bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
+ }
+}
+
+static int bnx2x_func_wait_started(struct bnx2x_softc *sc)
+{
+ int tout = 50;
+
+ if (!sc->port.pmf) {
+ return 0;
+ }
+
+ /*
+ * (assumption: No Attention from MCP at this stage)
+ * PMF probably in the middle of TX disable/enable transaction
+ * 1. Sync IRS for default SB
+ * 2. Sync SP queue - this guarantees us that attention handling started
+ * 3. Wait, that TX disable/enable transaction completes
+ *
+ * 1+2 guarantee that if DCBX attention was scheduled it already changed
+ * pending bit of transaction from STARTED-->TX_STOPPED, if we already
+ * received completion for the transaction the state is TX_STOPPED.
+ * State will return to STARTED after completion of TX_STOPPED-->STARTED
+ * transaction.
+ */
+
+ while (ecore_func_get_state(sc, &sc->func_obj) !=
+ ECORE_F_STATE_STARTED && tout--) {
+ DELAY(20000);
+ }
+
+ if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
+ /*
+ * Failed to complete the transaction in a "good way"
+ * Force both transactions with CLR bit.
+ */
+ struct ecore_func_state_params func_params = { NULL };
+
+ PMD_DRV_LOG(NOTICE, "Unexpected function state! "
+ "Forcing STARTED-->TX_STOPPED-->STARTED");
+
+ func_params.f_obj = &sc->func_obj;
+ bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
+
+ /* STARTED-->TX_STOPPED */
+ func_params.cmd = ECORE_F_CMD_TX_STOP;
+ ecore_func_state_change(sc, &func_params);
+
+ /* TX_STOPPED-->STARTED */
+ func_params.cmd = ECORE_F_CMD_TX_START;
+ return ecore_func_state_change(sc, &func_params);
+ }
+
+ return 0;
+}
+
+static int bnx2x_stop_queue(struct bnx2x_softc *sc, int index)
+{
+ struct bnx2x_fastpath *fp = &sc->fp[index];
+ struct ecore_queue_state_params q_params = { NULL };
+ int rc;
+
+ PMD_DRV_LOG(DEBUG, "stopping queue %d cid %d", index, fp->index);
+
+ q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
+ /* We want to wait for completion in this context */
+ bnx2x_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+ /* Stop the primary connection: */
+
+ /* ...halt the connection */
+ q_params.cmd = ECORE_Q_CMD_HALT;
+ rc = ecore_queue_state_change(sc, &q_params);
+ if (rc) {
+ return rc;
+ }
+
+ /* ...terminate the connection */
+ q_params.cmd = ECORE_Q_CMD_TERMINATE;
+ memset(&q_params.params.terminate, 0,
+ sizeof(q_params.params.terminate));
+ q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
+ rc = ecore_queue_state_change(sc, &q_params);
+ if (rc) {
+ return rc;
+ }
+
+ /* ...delete cfc entry */
+ q_params.cmd = ECORE_Q_CMD_CFC_DEL;
+ memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
+ q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
+ return ecore_queue_state_change(sc, &q_params);
+}
+
+/* wait for the outstanding SP commands */
+static uint8_t bnx2x_wait_sp_comp(struct bnx2x_softc *sc, unsigned long mask)
+{
+ unsigned long tmp;
+ int tout = 5000; /* wait for 5 secs tops */
+
+ while (tout--) {
+ mb();
+ if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
+ return TRUE;
+ }
+
+ DELAY(1000);
+ }
+
+ mb();
+
+ tmp = atomic_load_acq_long(&sc->sp_state);
+ if (tmp & mask) {
+ PMD_DRV_LOG(INFO, "Filtering completion timed out: "
+ "sp_state 0x%lx, mask 0x%lx", tmp, mask);
+ return FALSE;
+ }
+
+ return FALSE;
+}
+
+static int bnx2x_func_stop(struct bnx2x_softc *sc)
+{
+ struct ecore_func_state_params func_params = { NULL };
+ int rc;
+
+ /* prepare parameters for function state transitions */
+ bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ func_params.f_obj = &sc->func_obj;
+ func_params.cmd = ECORE_F_CMD_STOP;
+
+ /*
+ * Try to stop the function the 'good way'. If it fails (in case
+ * of a parity error during bnx2x_chip_cleanup()) and we are
+ * not in a debug mode, perform a state transaction in order to
+ * enable further HW_RESET transaction.
+ */
+ rc = ecore_func_state_change(sc, &func_params);
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "FUNC_STOP ramrod failed. "
+ "Running a dry transaction");
+ bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
+ return ecore_func_state_change(sc, &func_params);
+ }
+
+ return 0;
+}
+
+static int bnx2x_reset_hw(struct bnx2x_softc *sc, uint32_t load_code)
+{
+ struct ecore_func_state_params func_params = { NULL };
+
+ /* Prepare parameters for function state transitions */
+ bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &sc->func_obj;
+ func_params.cmd = ECORE_F_CMD_HW_RESET;
+
+ func_params.params.hw_init.load_phase = load_code;
+
+ return ecore_func_state_change(sc, &func_params);
+}
+
+static void bnx2x_int_disable_sync(struct bnx2x_softc *sc, int disable_hw)
+{
+ if (disable_hw) {
+ /* prevent the HW from sending interrupts */
+ bnx2x_int_disable(sc);
+ }
+}
+
+static void
+bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link)
+{
+ int port = SC_PORT(sc);
+ struct ecore_mcast_ramrod_params rparam = { NULL };
+ uint32_t reset_code;
+ int i, rc = 0;
+
+ bnx2x_drain_tx_queues(sc);
+
+ /* give HW time to discard old tx messages */
+ DELAY(1000);
+
+ /* Clean all ETH MACs */
+ rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC,
+ FALSE);
+ if (rc < 0) {
+ PMD_DRV_LOG(NOTICE, "Failed to delete all ETH MACs (%d)", rc);
+ }
+
+ /* Clean up UC list */
+ rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC,
+ TRUE);
+ if (rc < 0) {
+ PMD_DRV_LOG(NOTICE, "Failed to delete UC MACs list (%d)", rc);
+ }
+
+ /* Disable LLH */
+ REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0);
+
+ /* Set "drop all" to stop Rx */
+
+ /*
+ * We need to take the if_maddr_lock() here in order to prevent
+ * a race between the completion code and this code.
+ */
+
+ if (bnx2x_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
+ bnx2x_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
+ } else {
+ bnx2x_set_storm_rx_mode(sc);
+ }
+
+ /* Clean up multicast configuration */
+ rparam.mcast_obj = &sc->mcast_obj;
+ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
+ if (rc < 0) {
+ PMD_DRV_LOG(NOTICE,
+ "Failed to send DEL MCAST command (%d)", rc);
+ }
+
+ /*
+ * Send the UNLOAD_REQUEST to the MCP. This will return if
+ * this function should perform FUNCTION, PORT, or COMMON HW
+ * reset.
+ */
+ reset_code = bnx2x_send_unload_req(sc, unload_mode);
+
+ /*
+ * (assumption: No Attention from MCP at this stage)
+ * PMF probably in the middle of TX disable/enable transaction
+ */
+ rc = bnx2x_func_wait_started(sc);
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "bnx2x_func_wait_started failed");
+ }
+
+ /*
+ * Close multi and leading connections
+ * Completions for ramrods are collected in a synchronous way
+ */
+ for (i = 0; i < sc->num_queues; i++) {
+ if (bnx2x_stop_queue(sc, i)) {
+ goto unload_error;
+ }
+ }
+
+ /*
+ * If SP settings didn't get completed so far - something
+ * very wrong has happen.
+ */
+ if (!bnx2x_wait_sp_comp(sc, ~0x0UL)) {
+ PMD_DRV_LOG(NOTICE, "Common slow path ramrods got stuck!");
+ }
+
+unload_error:
+
+ rc = bnx2x_func_stop(sc);
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "Function stop failed!");
+ }
+
+ /* disable HW interrupts */
+ bnx2x_int_disable_sync(sc, TRUE);
+
+ /* Reset the chip */
+ rc = bnx2x_reset_hw(sc, reset_code);
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "Hardware reset failed");
+ }
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(sc, keep_link);
+}
+
+static void bnx2x_disable_close_the_gate(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+
+ PMD_DRV_LOG(DEBUG, "Disabling 'close the gates'");
+
+ val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
+ val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+ MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+ REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
+}
+
+/*
+ * Cleans the object that have internal lists without sending
+ * ramrods. Should be run when interrutps are disabled.
+ */
+static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
+{
+ unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+ struct ecore_mcast_ramrod_params rparam = { NULL };
+ struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
+ int rc;
+
+ /* Cleanup MACs' object first... */
+
+ /* Wait for completion of requested */
+ bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ /* Perform a dry cleanup */
+ bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
+
+ /* Clean ETH primary MAC */
+ bnx2x_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
+ rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc != 0) {
+ PMD_DRV_LOG(NOTICE, "Failed to clean ETH MACs (%d)", rc);
+ }
+
+ /* Cleanup UC list */
+ vlan_mac_flags = 0;
+ bnx2x_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
+ rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
+ if (rc != 0) {
+ PMD_DRV_LOG(NOTICE, "Failed to clean UC list MACs (%d)", rc);
+ }
+
+ /* Now clean mcast object... */
+
+ rparam.mcast_obj = &sc->mcast_obj;
+ bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
+
+ /* Add a DEL command... */
+ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
+ if (rc < 0) {
+ PMD_DRV_LOG(NOTICE,
+ "Failed to send DEL MCAST command (%d)", rc);
+ }
+
+ /* now wait until all pending commands are cleared */
+
+ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
+ while (rc != 0) {
+ if (rc < 0) {
+ PMD_DRV_LOG(NOTICE,
+ "Failed to clean MCAST object (%d)", rc);
+ return;
+ }
+
+ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
+ }
+}
+
+/* stop the controller */
+__rte_noinline
+int
+bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link)
+{
+ uint8_t global = FALSE;
+ uint32_t val;
+
+ PMD_DRV_LOG(DEBUG, "Starting NIC unload...");
+
+ /* mark driver as unloaded in shmem2 */
+ if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
+ val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
+ SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
+ val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+
+ if (IS_PF(sc) && sc->recovery_state != BNX2X_RECOVERY_DONE &&
+ (sc->state == BNX2X_STATE_CLOSED || sc->state == BNX2X_STATE_ERROR)) {
+ /*
+ * We can get here if the driver has been unloaded
+ * during parity error recovery and is either waiting for a
+ * leader to complete or for other functions to unload and
+ * then ifconfig down has been issued. In this case we want to
+ * unload and let other functions to complete a recovery
+ * process.
+ */
+ sc->recovery_state = BNX2X_RECOVERY_DONE;
+ sc->is_leader = 0;
+ bnx2x_release_leader_lock(sc);
+ mb();
+
+ PMD_DRV_LOG(NOTICE, "Can't unload in closed or error state");
+ return -1;
+ }
+
+ /*
+ * Nothing to do during unload if previous bnx2x_nic_load()
+ * did not completed successfully - all resourses are released.
+ */
+ if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) {
+ return 0;
+ }
+
+ sc->state = BNX2X_STATE_CLOSING_WAITING_HALT;
+ mb();
+
+ sc->rx_mode = BNX2X_RX_MODE_NONE;
+ bnx2x_set_rx_mode(sc);
+ mb();
+
+ if (IS_PF(sc)) {
+ /* set ALWAYS_ALIVE bit in shmem */
+ sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+
+ bnx2x_drv_pulse(sc);
+
+ bnx2x_stats_handle(sc, STATS_EVENT_STOP);
+ bnx2x_save_statistics(sc);
+ }
+
+ /* wait till consumers catch up with producers in all queues */
+ bnx2x_drain_tx_queues(sc);
+
+ /* if VF indicate to PF this function is going down (PF will delete sp
+ * elements and clear initializations
+ */
+ if (IS_VF(sc)) {
+ bnx2x_vf_unload(sc);
+ } else if (unload_mode != UNLOAD_RECOVERY) {
+ /* if this is a normal/close unload need to clean up chip */
+ bnx2x_chip_cleanup(sc, unload_mode, keep_link);
+ } else {
+ /* Send the UNLOAD_REQUEST to the MCP */
+ bnx2x_send_unload_req(sc, unload_mode);
+
+ /*
+ * Prevent transactions to host from the functions on the
+ * engine that doesn't reset global blocks in case of global
+ * attention once gloabl blocks are reset and gates are opened
+ * (the engine which leader will perform the recovery
+ * last).
+ */
+ if (!CHIP_IS_E1x(sc)) {
+ bnx2x_pf_disable(sc);
+ }
+
+ /* disable HW interrupts */
+ bnx2x_int_disable_sync(sc, TRUE);
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(sc, FALSE);
+ }
+
+ /*
+ * At this stage no more interrupts will arrive so we may safely clean
+ * the queue'able objects here in case they failed to get cleaned so far.
+ */
+ if (IS_PF(sc)) {
+ bnx2x_squeeze_objects(sc);
+ }
+
+ /* There should be no more pending SP commands at this stage */
+ sc->sp_state = 0;
+
+ sc->port.pmf = 0;
+
+ if (IS_PF(sc)) {
+ bnx2x_free_mem(sc);
+ }
+
+ bnx2x_free_fw_stats_mem(sc);
+
+ sc->state = BNX2X_STATE_CLOSED;
+
+ /*
+ * Check if there are pending parity attentions. If there are - set
+ * RECOVERY_IN_PROGRESS.
+ */
+ if (IS_PF(sc) && bnx2x_chk_parity_attn(sc, &global, FALSE)) {
+ bnx2x_set_reset_in_progress(sc);
+
+ /* Set RESET_IS_GLOBAL if needed */
+ if (global) {
+ bnx2x_set_reset_global(sc);
+ }
+ }
+
+ /*
+ * The last driver must disable a "close the gate" if there is no
+ * parity attention or "process kill" pending.
+ */
+ if (IS_PF(sc) && !bnx2x_clear_pf_load(sc) &&
+ bnx2x_reset_is_done(sc, SC_PATH(sc))) {
+ bnx2x_disable_close_the_gate(sc);
+ }
+
+ PMD_DRV_LOG(DEBUG, "Ended NIC unload");
+
+ return 0;
+}
+
+/*
+ * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
+ * visible to the controller.
+ *
+ * If an mbuf is submitted to this routine and cannot be given to the
+ * controller (e.g. it has too many fragments) then the function may free
+ * the mbuf and return to the caller.
+ *
+ * Returns:
+ * int: Number of TX BDs used for the mbuf
+ *
+ * Note the side effect that an mbuf may be freed if it causes a problem.
+ */
+int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0)
+{
+ struct eth_tx_start_bd *tx_start_bd;
+ uint16_t bd_prod, pkt_prod;
+ struct bnx2x_softc *sc;
+ uint32_t nbds = 0;
+
+ sc = txq->sc;
+ bd_prod = txq->tx_bd_tail;
+ pkt_prod = txq->tx_pkt_tail;
+
+ txq->sw_ring[TX_BD(pkt_prod, txq)] = m0;
+
+ tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
+
+ tx_start_bd->addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova(m0));
+ tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
+ tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+ tx_start_bd->general_data =
+ (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+
+ tx_start_bd->nbd = rte_cpu_to_le_16(2);
+
+ if (m0->ol_flags & PKT_TX_VLAN_PKT) {
+ tx_start_bd->vlan_or_ethertype =
+ rte_cpu_to_le_16(m0->vlan_tci);
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_OUTBAND_VLAN <<
+ ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ } else {
+ if (IS_PF(sc))
+ tx_start_bd->vlan_or_ethertype =
+ rte_cpu_to_le_16(pkt_prod);
+ else {
+ struct ether_hdr *eh =
+ rte_pktmbuf_mtod(m0, struct ether_hdr *);
+
+ tx_start_bd->vlan_or_ethertype =
+ rte_cpu_to_le_16(rte_be_to_cpu_16(eh->ether_type));
+ }
+ }
+
+ bd_prod = NEXT_TX_BD(bd_prod);
+ if (IS_VF(sc)) {
+ struct eth_tx_parse_bd_e2 *tx_parse_bd;
+ const struct ether_hdr *eh =
+ rte_pktmbuf_mtod(m0, struct ether_hdr *);
+ uint8_t mac_type = UNICAST_ADDRESS;
+
+ tx_parse_bd =
+ &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2;
+ if (is_multicast_ether_addr(&eh->d_addr)) {
+ if (is_broadcast_ether_addr(&eh->d_addr))
+ mac_type = BROADCAST_ADDRESS;
+ else
+ mac_type = MULTICAST_ADDRESS;
+ }
+ tx_parse_bd->parsing_data =
+ (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
+
+ rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi,
+ &eh->d_addr.addr_bytes[0], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid,
+ &eh->d_addr.addr_bytes[2], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo,
+ &eh->d_addr.addr_bytes[4], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi,
+ &eh->s_addr.addr_bytes[0], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid,
+ &eh->s_addr.addr_bytes[2], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo,
+ &eh->s_addr.addr_bytes[4], 2);
+
+ tx_parse_bd->data.mac_addr.dst_hi =
+ rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi);
+ tx_parse_bd->data.mac_addr.dst_mid =
+ rte_cpu_to_be_16(tx_parse_bd->data.
+ mac_addr.dst_mid);
+ tx_parse_bd->data.mac_addr.dst_lo =
+ rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo);
+ tx_parse_bd->data.mac_addr.src_hi =
+ rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi);
+ tx_parse_bd->data.mac_addr.src_mid =
+ rte_cpu_to_be_16(tx_parse_bd->data.
+ mac_addr.src_mid);
+ tx_parse_bd->data.mac_addr.src_lo =
+ rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo);
+
+ PMD_TX_LOG(DEBUG,
+ "PBD dst %x %x %x src %x %x %x p_data %x",
+ tx_parse_bd->data.mac_addr.dst_hi,
+ tx_parse_bd->data.mac_addr.dst_mid,
+ tx_parse_bd->data.mac_addr.dst_lo,
+ tx_parse_bd->data.mac_addr.src_hi,
+ tx_parse_bd->data.mac_addr.src_mid,
+ tx_parse_bd->data.mac_addr.src_lo,
+ tx_parse_bd->parsing_data);
+ }
+
+ PMD_TX_LOG(DEBUG,
+ "start bd: nbytes %d flags %x vlan %x",
+ tx_start_bd->nbytes,
+ tx_start_bd->bd_flags.as_bitfield,
+ tx_start_bd->vlan_or_ethertype);
+
+ bd_prod = NEXT_TX_BD(bd_prod);
+ pkt_prod++;
+
+ if (TX_IDX(bd_prod) < 2)
+ nbds++;
+
+ txq->nb_tx_avail -= 2;
+ txq->tx_bd_tail = bd_prod;
+ txq->tx_pkt_tail = pkt_prod;
+
+ return nbds + 2;
+}
+
+static uint16_t bnx2x_cid_ilt_lines(struct bnx2x_softc *sc)
+{
+ return L2_ILT_LINES(sc);
+}
+
+static void bnx2x_ilt_set_info(struct bnx2x_softc *sc)
+{
+ struct ilt_client_info *ilt_client;
+ struct ecore_ilt *ilt = sc->ilt;
+ uint16_t line = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
+
+ /* CDU */
+ ilt_client = &ilt->clients[ILT_CLIENT_CDU];
+ ilt_client->client_num = ILT_CLIENT_CDU;
+ ilt_client->page_size = CDU_ILT_PAGE_SZ;
+ ilt_client->flags = ILT_CLIENT_SKIP_MEM;
+ ilt_client->start = line;
+ line += bnx2x_cid_ilt_lines(sc);
+
+ if (CNIC_SUPPORT(sc)) {
+ line += CNIC_ILT_LINES;
+ }
+
+ ilt_client->end = (line - 1);
+
+ /* QM */
+ if (QM_INIT(sc->qm_cid_count)) {
+ ilt_client = &ilt->clients[ILT_CLIENT_QM];
+ ilt_client->client_num = ILT_CLIENT_QM;
+ ilt_client->page_size = QM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+
+ /* 4 bytes for each cid */
+ line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
+ QM_ILT_PAGE_SZ);
+
+ ilt_client->end = (line - 1);
+ }
+
+ if (CNIC_SUPPORT(sc)) {
+ /* SRC */
+ ilt_client = &ilt->clients[ILT_CLIENT_SRC];
+ ilt_client->client_num = ILT_CLIENT_SRC;
+ ilt_client->page_size = SRC_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += SRC_ILT_LINES;
+ ilt_client->end = (line - 1);
+
+ /* TM */
+ ilt_client = &ilt->clients[ILT_CLIENT_TM];
+ ilt_client->client_num = ILT_CLIENT_TM;
+ ilt_client->page_size = TM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += TM_ILT_LINES;
+ ilt_client->end = (line - 1);
+ }
+
+ assert((line <= ILT_MAX_LINES));
+}
+
+static void bnx2x_set_fp_rx_buf_size(struct bnx2x_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < sc->num_queues; i++) {
+ /* get the Rx buffer size for RX frames */
+ sc->fp[i].rx_buf_size =
+ (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
+ }
+}
+
+int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc)
+{
+
+ sc->ilt = rte_malloc("", sizeof(struct ecore_ilt), RTE_CACHE_LINE_SIZE);
+
+ return sc->ilt == NULL;
+}
+
+static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc)
+{
+ sc->ilt->lines = rte_calloc("",
+ sizeof(struct ilt_line), ILT_MAX_LINES,
+ RTE_CACHE_LINE_SIZE);
+ return sc->ilt->lines == NULL;
+}
+
+void bnx2x_free_ilt_mem(struct bnx2x_softc *sc)
+{
+ rte_free(sc->ilt);
+ sc->ilt = NULL;
+}
+
+static void bnx2x_free_ilt_lines_mem(struct bnx2x_softc *sc)
+{
+ if (sc->ilt->lines != NULL) {
+ rte_free(sc->ilt->lines);
+ sc->ilt->lines = NULL;
+ }
+}
+
+static void bnx2x_free_mem(struct bnx2x_softc *sc)
+{
+ uint32_t i;
+
+ for (i = 0; i < L2_ILT_LINES(sc); i++) {
+ sc->context[i].vcxt = NULL;
+ sc->context[i].size = 0;
+ }
+
+ ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
+
+ bnx2x_free_ilt_lines_mem(sc);
+}
+
+static int bnx2x_alloc_mem(struct bnx2x_softc *sc)
+{
+ int context_size;
+ int allocated;
+ int i;
+ char cdu_name[RTE_MEMZONE_NAMESIZE];
+
+ /*
+ * Allocate memory for CDU context:
+ * This memory is allocated separately and not in the generic ILT
+ * functions because CDU differs in few aspects:
+ * 1. There can be multiple entities allocating memory for context -
+ * regular L2, CNIC, and SRIOV drivers. Each separately controls
+ * its own ILT lines.
+ * 2. Since CDU page-size is not a single 4KB page (which is the case
+ * for the other ILT clients), to be efficient we want to support
+ * allocation of sub-page-size in the last entry.
+ * 3. Context pointers are used by the driver to pass to FW / update
+ * the context (for the other ILT clients the pointers are used just to
+ * free the memory during unload).
+ */
+ context_size = (sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(sc));
+ for (i = 0, allocated = 0; allocated < context_size; i++) {
+ sc->context[i].size = min(CDU_ILT_PAGE_SZ,
+ (context_size - allocated));
+
+ snprintf(cdu_name, sizeof(cdu_name), "cdu_%d", i);
+ if (bnx2x_dma_alloc(sc, sc->context[i].size,
+ &sc->context[i].vcxt_dma,
+ cdu_name, BNX2X_PAGE_SIZE) != 0) {
+ bnx2x_free_mem(sc);
+ return -1;
+ }
+
+ sc->context[i].vcxt =
+ (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
+
+ allocated += sc->context[i].size;
+ }
+
+ bnx2x_alloc_ilt_lines_mem(sc);
+
+ if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
+ PMD_DRV_LOG(NOTICE, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed");
+ bnx2x_free_mem(sc);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc)
+{
+ sc->fw_stats_num = 0;
+
+ sc->fw_stats_req_size = 0;
+ sc->fw_stats_req = NULL;
+ sc->fw_stats_req_mapping = 0;
+
+ sc->fw_stats_data_size = 0;
+ sc->fw_stats_data = NULL;
+ sc->fw_stats_data_mapping = 0;
+}
+
+static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc)
+{
+ uint8_t num_queue_stats;
+ int num_groups, vf_headroom = 0;
+
+ /* number of queues for statistics is number of eth queues */
+ num_queue_stats = BNX2X_NUM_ETH_QUEUES(sc);
+
+ /*
+ * Total number of FW statistics requests =
+ * 1 for port stats + 1 for PF stats + num of queues
+ */
+ sc->fw_stats_num = (2 + num_queue_stats);
+
+ /*
+ * Request is built from stats_query_header and an array of
+ * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
+ * rules. The real number or requests is configured in the
+ * stats_query_header.
+ */
+ num_groups = (sc->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT;
+ if ((sc->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT)
+ num_groups++;
+
+ sc->fw_stats_req_size =
+ (sizeof(struct stats_query_header) +
+ (num_groups * sizeof(struct stats_query_cmd_group)));
+
+ /*
+ * Data for statistics requests + stats_counter.
+ * stats_counter holds per-STORM counters that are incremented when
+ * STORM has finished with the current request. Memory for FCoE
+ * offloaded statistics are counted anyway, even if they will not be sent.
+ * VF stats are not accounted for here as the data of VF stats is stored
+ * in memory allocated by the VF, not here.
+ */
+ sc->fw_stats_data_size =
+ (sizeof(struct stats_counter) +
+ sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) +
+ /* sizeof(struct fcoe_statistics_params) + */
+ (sizeof(struct per_queue_stats) * num_queue_stats));
+
+ if (bnx2x_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
+ &sc->fw_stats_dma, "fw_stats",
+ RTE_CACHE_LINE_SIZE) != 0) {
+ bnx2x_free_fw_stats_mem(sc);
+ return -1;
+ }
+
+ /* set up the shortcuts */
+
+ sc->fw_stats_req = (struct bnx2x_fw_stats_req *)sc->fw_stats_dma.vaddr;
+ sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
+
+ sc->fw_stats_data =
+ (struct bnx2x_fw_stats_data *)((uint8_t *) sc->fw_stats_dma.vaddr +
+ sc->fw_stats_req_size);
+ sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
+ sc->fw_stats_req_size);
+
+ return 0;
+}
+
+/*
+ * Bits map:
+ * 0-7 - Engine0 load counter.
+ * 8-15 - Engine1 load counter.
+ * 16 - Engine0 RESET_IN_PROGRESS bit.
+ * 17 - Engine1 RESET_IN_PROGRESS bit.
+ * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active
+ * function on the engine
+ * 19 - Engine1 ONE_IS_LOADED.
+ * 20 - Chip reset flow bit. When set none-leader must wait for both engines
+ * leader to complete (check for both RESET_IN_PROGRESS bits and not
+ * for just the one belonging to its engine).
+ */
+#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
+#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
+#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
+#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
+#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
+#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
+#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
+#define BNX2X_GLOBAL_RESET_BIT 0x00040000
+
+/* set the GLOBAL_RESET bit, should be run under rtnl lock */
+static void bnx2x_set_reset_global(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG);
+ REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
+static void bnx2x_clear_reset_global(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG);
+ REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
+static uint8_t bnx2x_reset_is_global(struct bnx2x_softc *sc)
+{
+ return REG_RD(sc, BNX2X_RECOVERY_GLOB_REG) & BNX2X_GLOBAL_RESET_BIT;
+}
+
+/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
+static void bnx2x_set_reset_done(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+ uint32_t bit = SC_PATH(sc) ? BNX2X_PATH1_RST_IN_PROG_BIT :
+ BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
+
+ val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG);
+ /* Clear the bit */
+ val &= ~bit;
+ REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val);
+
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
+static void bnx2x_set_reset_in_progress(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+ uint32_t bit = SC_PATH(sc) ? BNX2X_PATH1_RST_IN_PROG_BIT :
+ BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
+
+ val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG);
+ /* Set the bit */
+ val |= bit;
+ REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val);
+
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
+static uint8_t bnx2x_reset_is_done(struct bnx2x_softc *sc, int engine)
+{
+ uint32_t val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG);
+ uint32_t bit = engine ? BNX2X_PATH1_RST_IN_PROG_BIT :
+ BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* return false if bit is set */
+ return (val & bit) ? FALSE : TRUE;
+}
+
+/* get the load status for an engine, should be run under rtnl lock */
+static uint8_t bnx2x_get_load_status(struct bnx2x_softc *sc, int engine)
+{
+ uint32_t mask = engine ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ uint32_t shift = engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
+ uint32_t val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG);
+
+ val = ((val & mask) >> shift);
+
+ return val != 0;
+}
+
+/* set pf load mark */
+static void bnx2x_set_pf_load(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+ uint32_t val1;
+ uint32_t mask = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ uint32_t shift = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
+
+ PMD_INIT_FUNC_TRACE();
+
+ val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG);
+
+ /* get the current counter value */
+ val1 = ((val & mask) >> shift);
+
+ /* set bit of this PF */
+ val1 |= (1 << SC_ABS_FUNC(sc));
+
+ /* clear the old value */
+ val &= ~mask;
+
+ /* set the new one */
+ val |= ((val1 << shift) & mask);
+
+ REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val);
+
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/* clear pf load mark */
+static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc)
+{
+ uint32_t val1, val;
+ uint32_t mask = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ uint32_t shift = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG);
+
+ /* get the current counter value */
+ val1 = (val & mask) >> shift;
+
+ /* clear bit of that PF */
+ val1 &= ~(1 << SC_ABS_FUNC(sc));
+
+ /* clear the old value */
+ val &= ~mask;
+
+ /* set the new one */
+ val |= ((val1 << shift) & mask);
+
+ REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val);
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
+ return val1 != 0;
+}
+
+/* send load requrest to mcp and analyze response */
+static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /* init fw_seq */
+ sc->fw_seq =
+ (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+
+ PMD_DRV_LOG(DEBUG, "initial fw_seq 0x%04x", sc->fw_seq);
+
+#ifdef BNX2X_PULSE
+ /* get the current FW pulse sequence */
+ sc->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+#else
+ /* set ALWAYS_ALIVE bit in shmem */
+ sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+ bnx2x_drv_pulse(sc);
+#endif
+
+ /* load request */
+ (*load_code) = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
+ DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
+
+ /* if the MCP fails to respond we must abort */
+ if (!(*load_code)) {
+ PMD_DRV_LOG(NOTICE, "MCP response failure!");
+ return -1;
+ }
+
+ /* if MCP refused then must abort */
+ if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+ PMD_DRV_LOG(NOTICE, "MCP refused load request");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Check whether another PF has already loaded FW to chip. In virtualized
+ * environments a pf from anoth VM may have already initialized the device
+ * including loading FW.
+ */
+static int bnx2x_nic_load_analyze_req(struct bnx2x_softc *sc, uint32_t load_code)
+{
+ uint32_t my_fw, loaded_fw;
+
+ /* is another pf loaded on this engine? */
+ if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
+ (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
+ /* build my FW version dword */
+ my_fw = (BNX2X_5710_FW_MAJOR_VERSION +
+ (BNX2X_5710_FW_MINOR_VERSION << 8) +
+ (BNX2X_5710_FW_REVISION_VERSION << 16) +
+ (BNX2X_5710_FW_ENGINEERING_VERSION << 24));
+
+ /* read loaded FW from chip */
+ loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
+ PMD_DRV_LOG(DEBUG, "loaded FW 0x%08x / my FW 0x%08x",
+ loaded_fw, my_fw);
+
+ /* abort nic load if version mismatch */
+ if (my_fw != loaded_fw) {
+ PMD_DRV_LOG(NOTICE,
+ "FW 0x%08x already loaded (mine is 0x%08x)",
+ loaded_fw, my_fw);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/* mark PMF if applicable */
+static void bnx2x_nic_load_pmf(struct bnx2x_softc *sc, uint32_t load_code)
+{
+ uint32_t ncsi_oem_data_addr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
+ /*
+ * Barrier here for ordering between the writing to sc->port.pmf here
+ * and reading it from the periodic task.
+ */
+ sc->port.pmf = 1;
+ mb();
+ } else {
+ sc->port.pmf = 0;
+ }
+
+ PMD_DRV_LOG(DEBUG, "pmf %d", sc->port.pmf);
+
+ if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
+ if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
+ ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
+ if (ncsi_oem_data_addr) {
+ REG_WR(sc,
+ (ncsi_oem_data_addr +
+ offsetof(struct glob_ncsi_oem_data,
+ driver_version)), 0);
+ }
+ }
+ }
+}
+
+static void bnx2x_read_mf_cfg(struct bnx2x_softc *sc)
+{
+ int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
+ int abs_func;
+ int vn;
+
+ if (BNX2X_NOMCP(sc)) {
+ return; /* what should be the default bvalue in this case */
+ }
+
+ /*
+ * The formula for computing the absolute function number is...
+ * For 2 port configuration (4 functions per port):
+ * abs_func = 2 * vn + SC_PORT + SC_PATH
+ * For 4 port configuration (2 functions per port):
+ * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
+ */
+ for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
+ abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
+ if (abs_func >= E1H_FUNC_MAX) {
+ break;
+ }
+ sc->devinfo.mf_info.mf_config[vn] =
+ MFCFG_RD(sc, func_mf_config[abs_func].config);
+ }
+
+ if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
+ FUNC_MF_CFG_FUNC_DISABLED) {
+ PMD_DRV_LOG(DEBUG, "mf_cfg function disabled");
+ sc->flags |= BNX2X_MF_FUNC_DIS;
+ } else {
+ PMD_DRV_LOG(DEBUG, "mf_cfg function enabled");
+ sc->flags &= ~BNX2X_MF_FUNC_DIS;
+ }
+}
+
+/* acquire split MCP access lock register */
+static int bnx2x_acquire_alr(struct bnx2x_softc *sc)
+{
+ uint32_t j, val;
+
+ for (j = 0; j < 1000; j++) {
+ val = (1UL << 31);
+ REG_WR(sc, GRCBASE_MCP + 0x9c, val);
+ val = REG_RD(sc, GRCBASE_MCP + 0x9c);
+ if (val & (1L << 31))
+ break;
+
+ DELAY(5000);
+ }
+
+ if (!(val & (1L << 31))) {
+ PMD_DRV_LOG(NOTICE, "Cannot acquire MCP access lock register");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* release split MCP access lock register */
+static void bnx2x_release_alr(struct bnx2x_softc *sc)
+{
+ REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
+}
+
+static void bnx2x_fan_failure(struct bnx2x_softc *sc)
+{
+ int port = SC_PORT(sc);
+ uint32_t ext_phy_config;
+
+ /* mark the failure */
+ ext_phy_config =
+ SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
+
+ ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+ ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+ SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
+ ext_phy_config);
+
+ /* log the failure */
+ PMD_DRV_LOG(INFO,
+ "Fan Failure has caused the driver to shutdown "
+ "the card to prevent permanent damage. "
+ "Please contact OEM Support for assistance");
+
+ rte_panic("Schedule task to handle fan failure");
+}
+
+/* this function is called upon a link interrupt */
+static void bnx2x_link_attn(struct bnx2x_softc *sc)
+{
+ uint32_t pause_enabled = 0;
+ struct host_port_stats *pstats;
+ int cmng_fns;
+
+ /* Make sure that we are synced with the current statistics */
+ bnx2x_stats_handle(sc, STATS_EVENT_STOP);
+
+ elink_link_update(&sc->link_params, &sc->link_vars);
+
+ if (sc->link_vars.link_up) {
+
+ /* dropless flow control */
+ if (sc->dropless_fc) {
+ pause_enabled = 0;
+
+ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
+ pause_enabled = 1;
+ }
+
+ REG_WR(sc,
+ (BAR_USTRORM_INTMEM +
+ USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
+ pause_enabled);
+ }
+
+ if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
+ pstats = BNX2X_SP(sc, port_stats);
+ /* reset old mac stats */
+ memset(&(pstats->mac_stx[0]), 0,
+ sizeof(struct mac_stx));
+ }
+
+ if (sc->state == BNX2X_STATE_OPEN) {
+ bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP);
+ }
+ }
+
+ if (sc->link_vars.link_up && sc->link_vars.line_speed) {
+ cmng_fns = bnx2x_get_cmng_fns_mode(sc);
+
+ if (cmng_fns != CMNG_FNS_NONE) {
+ bnx2x_cmng_fns_init(sc, FALSE, cmng_fns);
+ storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
+ }
+ }
+
+ bnx2x_link_report(sc);
+
+ if (IS_MF(sc)) {
+ bnx2x_link_sync_notify(sc);
+ }
+}
+
+static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
+{
+ int port = SC_PORT(sc);
+ uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+ uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
+ NIG_REG_MASK_INTERRUPT_PORT0;
+ uint32_t aeu_mask;
+ uint32_t nig_mask = 0;
+ uint32_t reg_addr;
+ uint32_t igu_acked;
+ uint32_t cnt;
+
+ if (sc->attn_state & asserted) {
+ PMD_DRV_LOG(ERR, "IGU ERROR attn=0x%08x", asserted);
+ }
+
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+ aeu_mask = REG_RD(sc, aeu_addr);
+
+ aeu_mask &= ~(asserted & 0x3ff);
+
+ REG_WR(sc, aeu_addr, aeu_mask);
+
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+ sc->attn_state |= asserted;
+
+ if (asserted & ATTN_HARD_WIRED_MASK) {
+ if (asserted & ATTN_NIG_FOR_FUNC) {
+
+ /* save nig interrupt mask */
+ nig_mask = REG_RD(sc, nig_int_mask_addr);
+
+ /* If nig_mask is not set, no need to call the update function */
+ if (nig_mask) {
+ REG_WR(sc, nig_int_mask_addr, 0);
+
+ bnx2x_link_attn(sc);
+ }
+
+ /* handle unicore attn? */
+ }
+
+ if (asserted & ATTN_SW_TIMER_4_FUNC) {
+ PMD_DRV_LOG(DEBUG, "ATTN_SW_TIMER_4_FUNC!");
+ }
+
+ if (asserted & GPIO_2_FUNC) {
+ PMD_DRV_LOG(DEBUG, "GPIO_2_FUNC!");
+ }
+
+ if (asserted & GPIO_3_FUNC) {
+ PMD_DRV_LOG(DEBUG, "GPIO_3_FUNC!");
+ }
+
+ if (asserted & GPIO_4_FUNC) {
+ PMD_DRV_LOG(DEBUG, "GPIO_4_FUNC!");
+ }
+
+ if (port == 0) {
+ if (asserted & ATTN_GENERAL_ATTN_1) {
+ PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_1!");
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_2) {
+ PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_2!");
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_3) {
+ PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_3!");
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
+ }
+ } else {
+ if (asserted & ATTN_GENERAL_ATTN_4) {
+ PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_4!");
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_5) {
+ PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_5!");
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_6) {
+ PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_6!");
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
+ }
+ }
+ }
+ /* hardwired */
+ if (sc->devinfo.int_block == INT_BLOCK_HC) {
+ reg_addr =
+ (HC_REG_COMMAND_REG + port * 32 +
+ COMMAND_REG_ATTN_BITS_SET);
+ } else {
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER * 8);
+ }
+
+ PMD_DRV_LOG(DEBUG, "about to mask 0x%08x at %s addr 0x%08x",
+ asserted,
+ (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU",
+ reg_addr);
+ REG_WR(sc, reg_addr, asserted);
+
+ /* now set back the mask */
+ if (asserted & ATTN_NIG_FOR_FUNC) {
+ /*
+ * Verify that IGU ack through BAR was written before restoring
+ * NIG mask. This loop should exit after 2-3 iterations max.
+ */
+ if (sc->devinfo.int_block != INT_BLOCK_HC) {
+ cnt = 0;
+
+ do {
+ igu_acked =
+ REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
+ } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0)
+ && (++cnt < MAX_IGU_ATTN_ACK_TO));
+
+ if (!igu_acked) {
+ PMD_DRV_LOG(ERR,
+ "Failed to verify IGU ack on time");
+ }
+
+ mb();
+ }
+
+ REG_WR(sc, nig_int_mask_addr, nig_mask);
+
+ }
+}
+
+static void
+bnx2x_print_next_block(__rte_unused struct bnx2x_softc *sc, __rte_unused int idx,
+ __rte_unused const char *blk)
+{
+ PMD_DRV_LOG(INFO, "%s%s", idx ? ", " : "", blk);
+}
+
+static int
+bnx2x_check_blocks_with_parity0(struct bnx2x_softc *sc, uint32_t sig, int par_num,
+ uint8_t print)
+{
+ uint32_t cur_bit = 0;
+ int i = 0;
+
+ for (i = 0; sig; i++) {
+ cur_bit = ((uint32_t) 0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "BRB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "PARSER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "TSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "SEARCHER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "TCM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "TSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "XPB");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static int
+bnx2x_check_blocks_with_parity1(struct bnx2x_softc *sc, uint32_t sig, int par_num,
+ uint8_t * global, uint8_t print)
+{
+ int i = 0;
+ uint32_t cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((uint32_t) 0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "PBF");
+ break;
+ case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "QM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "TM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "XSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "XCM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "XSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "DOORBELLQ");
+ break;
+ case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "NIG");
+ break;
+ case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "VAUX PCI CORE");
+ *global = TRUE;
+ break;
+ case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "DEBUG");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "USDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "UCM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "USEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "UPB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "CSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "CCM");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static int
+bnx2x_check_blocks_with_parity2(struct bnx2x_softc *sc, uint32_t sig, int par_num,
+ uint8_t print)
+{
+ uint32_t cur_bit = 0;
+ int i = 0;
+
+ for (i = 0; sig; i++) {
+ cur_bit = ((uint32_t) 0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "CSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "PXP");
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "CFC");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "CDU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "DMAE");
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "IGU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "MISC");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static int
+bnx2x_check_blocks_with_parity3(struct bnx2x_softc *sc, uint32_t sig, int par_num,
+ uint8_t * global, uint8_t print)
+{
+ uint32_t cur_bit = 0;
+ int i = 0;
+
+ for (i = 0; sig; i++) {
+ cur_bit = ((uint32_t) 0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "MCP ROM");
+ *global = TRUE;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "MCP UMP RX");
+ *global = TRUE;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "MCP UMP TX");
+ *global = TRUE;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "MCP SCPAD");
+ *global = TRUE;
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static int
+bnx2x_check_blocks_with_parity4(struct bnx2x_softc *sc, uint32_t sig, int par_num,
+ uint8_t print)
+{
+ uint32_t cur_bit = 0;
+ int i = 0;
+
+ for (i = 0; sig; i++) {
+ cur_bit = ((uint32_t) 0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "PGLUE_B");
+ break;
+ case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+ if (print)
+ bnx2x_print_next_block(sc, par_num++,
+ "ATC");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static uint8_t
+bnx2x_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print,
+ uint32_t * sig)
+{
+ int par_num = 0;
+
+ if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
+ (sig[1] & HW_PRTY_ASSERT_SET_1) ||
+ (sig[2] & HW_PRTY_ASSERT_SET_2) ||
+ (sig[3] & HW_PRTY_ASSERT_SET_3) ||
+ (sig[4] & HW_PRTY_ASSERT_SET_4)) {
+ PMD_DRV_LOG(ERR,
+ "Parity error: HW block parity attention:"
+ "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x",
+ (uint32_t) (sig[0] & HW_PRTY_ASSERT_SET_0),
+ (uint32_t) (sig[1] & HW_PRTY_ASSERT_SET_1),
+ (uint32_t) (sig[2] & HW_PRTY_ASSERT_SET_2),
+ (uint32_t) (sig[3] & HW_PRTY_ASSERT_SET_3),
+ (uint32_t) (sig[4] & HW_PRTY_ASSERT_SET_4));
+
+ if (print)
+ PMD_DRV_LOG(INFO, "Parity errors detected in blocks: ");
+
+ par_num =
+ bnx2x_check_blocks_with_parity0(sc, sig[0] &
+ HW_PRTY_ASSERT_SET_0,
+ par_num, print);
+ par_num =
+ bnx2x_check_blocks_with_parity1(sc, sig[1] &
+ HW_PRTY_ASSERT_SET_1,
+ par_num, global, print);
+ par_num =
+ bnx2x_check_blocks_with_parity2(sc, sig[2] &
+ HW_PRTY_ASSERT_SET_2,
+ par_num, print);
+ par_num =
+ bnx2x_check_blocks_with_parity3(sc, sig[3] &
+ HW_PRTY_ASSERT_SET_3,
+ par_num, global, print);
+ par_num =
+ bnx2x_check_blocks_with_parity4(sc, sig[4] &
+ HW_PRTY_ASSERT_SET_4,
+ par_num, print);
+
+ if (print)
+ PMD_DRV_LOG(INFO, "");
+
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+static uint8_t
+bnx2x_chk_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print)
+{
+ struct attn_route attn = { {0} };
+ int port = SC_PORT(sc);
+
+ attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4);
+ attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4);
+ attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4);
+ attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4);
+
+ if (!CHIP_IS_E1x(sc))
+ attn.sig[4] =
+ REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port * 4);
+
+ return bnx2x_parity_attn(sc, global, print, attn.sig);
+}
+
+static void bnx2x_attn_int_deasserted4(struct bnx2x_softc *sc, uint32_t attn)
+{
+ uint32_t val;
+
+ if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
+ val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
+ PMD_DRV_LOG(INFO, "ERROR: PGLUE hw attention 0x%08x", val);
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
+ PMD_DRV_LOG(INFO,
+ "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
+ PMD_DRV_LOG(INFO,
+ "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
+ PMD_DRV_LOG(INFO,
+ "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
+ PMD_DRV_LOG(INFO,
+ "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN");
+ if (val &
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
+ PMD_DRV_LOG(INFO,
+ "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN");
+ if (val &
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
+ PMD_DRV_LOG(INFO,
+ "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
+ PMD_DRV_LOG(INFO,
+ "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
+ PMD_DRV_LOG(INFO,
+ "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
+ PMD_DRV_LOG(INFO,
+ "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW");
+ }
+
+ if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
+ val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
+ PMD_DRV_LOG(INFO, "ERROR: ATC hw attention 0x%08x", val);
+ if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
+ PMD_DRV_LOG(INFO,
+ "ERROR: ATC_ATC_INT_STS_REG_ADDRESS_ERROR");
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
+ PMD_DRV_LOG(INFO,
+ "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND");
+ if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
+ PMD_DRV_LOG(INFO,
+ "ERROR: ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS");
+ if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
+ PMD_DRV_LOG(INFO,
+ "ERROR: ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT");
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
+ PMD_DRV_LOG(INFO,
+ "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR");
+ if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
+ PMD_DRV_LOG(INFO,
+ "ERROR: ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU");
+ }
+
+ if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
+ PMD_DRV_LOG(INFO,
+ "ERROR: FATAL parity attention set4 0x%08x",
+ (uint32_t) (attn &
+ (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR
+ |
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
+ }
+}
+
+static void bnx2x_e1h_disable(struct bnx2x_softc *sc)
+{
+ int port = SC_PORT(sc);
+
+ REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0);
+}
+
+static void bnx2x_e1h_enable(struct bnx2x_softc *sc)
+{
+ int port = SC_PORT(sc);
+
+ REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
+}
+
+/*
+ * called due to MCP event (on pmf):
+ * reread new bandwidth configuration
+ * configure FW
+ * notify others function about the change
+ */
+static void bnx2x_config_mf_bw(struct bnx2x_softc *sc)
+{
+ if (sc->link_vars.link_up) {
+ bnx2x_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
+ bnx2x_link_sync_notify(sc);
+ }
+
+ storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
+}
+
+static void bnx2x_set_mf_bw(struct bnx2x_softc *sc)
+{
+ bnx2x_config_mf_bw(sc);
+ bnx2x_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
+}
+
+static void bnx2x_handle_eee_event(struct bnx2x_softc *sc)
+{
+ bnx2x_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
+}
+
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x_softc *sc)
+{
+ struct eth_stats_info *ether_stat = &sc->sp->drv_info_to_mcp.ether_stat;
+
+ strncpy(ether_stat->version, BNX2X_DRIVER_VERSION,
+ ETH_STAT_INFO_VERSION_LEN);
+
+ sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local + MAC_PAD,
+ MAC_PAD, ETH_ALEN);
+
+ ether_stat->mtu_size = sc->mtu;
+
+ ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+ ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
+
+ ether_stat->txq_size = sc->tx_ring_size;
+ ether_stat->rxq_size = sc->rx_ring_size;
+}
+
+static void bnx2x_handle_drv_info_req(struct bnx2x_softc *sc)
+{
+ enum drv_info_opcode op_code;
+ uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
+
+ /* if drv_info version supported by MFW doesn't match - send NACK */
+ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+ bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+ return;
+ }
+
+ op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+ DRV_INFO_CONTROL_OP_CODE_SHIFT);
+
+ memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
+
+ switch (op_code) {
+ case ETH_STATS_OPCODE:
+ bnx2x_drv_info_ether_stat(sc);
+ break;
+ case FCOE_STATS_OPCODE:
+ case ISCSI_STATS_OPCODE:
+ default:
+ /* if op code isn't supported - send NACK */
+ bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+ return;
+ }
+
+ /*
+ * If we got drv_info attn from MFW then these fields are defined in
+ * shmem2 for sure
+ */
+ SHMEM2_WR(sc, drv_info_host_addr_lo,
+ U64_LO(BNX2X_SP_MAPPING(sc, drv_info_to_mcp)));
+ SHMEM2_WR(sc, drv_info_host_addr_hi,
+ U64_HI(BNX2X_SP_MAPPING(sc, drv_info_to_mcp)));
+
+ bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+}
+
+static void bnx2x_dcc_event(struct bnx2x_softc *sc, uint32_t dcc_event)
+{
+ if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
+/*
+ * This is the only place besides the function initialization
+ * where the sc->flags can change so it is done without any
+ * locks
+ */
+ if (sc->devinfo.
+ mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
+ PMD_DRV_LOG(DEBUG, "mf_cfg function disabled");
+ sc->flags |= BNX2X_MF_FUNC_DIS;
+ bnx2x_e1h_disable(sc);
+ } else {
+ PMD_DRV_LOG(DEBUG, "mf_cfg function enabled");
+ sc->flags &= ~BNX2X_MF_FUNC_DIS;
+ bnx2x_e1h_enable(sc);
+ }
+ dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
+ }
+
+ if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
+ bnx2x_config_mf_bw(sc);
+ dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
+ }
+
+ /* Report results to MCP */
+ if (dcc_event)
+ bnx2x_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
+ else
+ bnx2x_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
+}
+
+static void bnx2x_pmf_update(struct bnx2x_softc *sc)
+{
+ int port = SC_PORT(sc);
+ uint32_t val;
+
+ sc->port.pmf = 1;
+
+ /*
+ * We need the mb() to ensure the ordering between the writing to
+ * sc->port.pmf here and reading it from the bnx2x_periodic_task().
+ */
+ mb();
+
+ /* enable nig attention */
+ val = (0xff0f | (1 << (SC_VN(sc) + 4)));
+ if (sc->devinfo.int_block == INT_BLOCK_HC) {
+ REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, val);
+ REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, val);
+ } else if (!CHIP_IS_E1x(sc)) {
+ REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
+ REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
+ }
+
+ bnx2x_stats_handle(sc, STATS_EVENT_PMF);
+}
+
+static int bnx2x_mc_assert(struct bnx2x_softc *sc)
+{
+ char last_idx;
+ int i, rc = 0;
+ __rte_unused uint32_t row0, row1, row2, row3;
+
+ /* XSTORM */
+ last_idx =
+ REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ PMD_DRV_LOG(ERR, "XSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 =
+ REG_RD(sc,
+ BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
+ row1 =
+ REG_RD(sc,
+ BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) +
+ 4);
+ row2 =
+ REG_RD(sc,
+ BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) +
+ 8);
+ row3 =
+ REG_RD(sc,
+ BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) +
+ 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ PMD_DRV_LOG(ERR,
+ "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ /* TSTORM */
+ last_idx =
+ REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx) {
+ PMD_DRV_LOG(ERR, "TSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
+ }
+
+ /* print the asserts */
+ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 =
+ REG_RD(sc,
+ BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
+ row1 =
+ REG_RD(sc,
+ BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) +
+ 4);
+ row2 =
+ REG_RD(sc,
+ BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) +
+ 8);
+ row3 =
+ REG_RD(sc,
+ BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) +
+ 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ PMD_DRV_LOG(ERR,
+ "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ /* CSTORM */
+ last_idx =
+ REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx) {
+ PMD_DRV_LOG(ERR, "CSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
+ }
+
+ /* print the asserts */
+ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 =
+ REG_RD(sc,
+ BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
+ row1 =
+ REG_RD(sc,
+ BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) +
+ 4);
+ row2 =
+ REG_RD(sc,
+ BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) +
+ 8);
+ row3 =
+ REG_RD(sc,
+ BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) +
+ 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ PMD_DRV_LOG(ERR,
+ "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ /* USTORM */
+ last_idx =
+ REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx) {
+ PMD_DRV_LOG(ERR, "USTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
+ }
+
+ /* print the asserts */
+ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 =
+ REG_RD(sc,
+ BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
+ row1 =
+ REG_RD(sc,
+ BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) +
+ 4);
+ row2 =
+ REG_RD(sc,
+ BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) +
+ 8);
+ row3 =
+ REG_RD(sc,
+ BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) +
+ 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ PMD_DRV_LOG(ERR,
+ "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static void bnx2x_attn_int_deasserted3(struct bnx2x_softc *sc, uint32_t attn)
+{
+ int func = SC_FUNC(sc);
+ uint32_t val;
+
+ if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
+
+ if (attn & BNX2X_PMF_LINK_ASSERT(sc)) {
+
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0);
+ bnx2x_read_mf_cfg(sc);
+ sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
+ MFCFG_RD(sc,
+ func_mf_config[SC_ABS_FUNC(sc)].config);
+ val =
+ SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
+
+ if (val & DRV_STATUS_DCC_EVENT_MASK)
+ bnx2x_dcc_event(sc,
+ (val &
+ DRV_STATUS_DCC_EVENT_MASK));
+
+ if (val & DRV_STATUS_SET_MF_BW)
+ bnx2x_set_mf_bw(sc);
+
+ if (val & DRV_STATUS_DRV_INFO_REQ)
+ bnx2x_handle_drv_info_req(sc);
+
+ if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
+ bnx2x_pmf_update(sc);
+
+ if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
+ bnx2x_handle_eee_event(sc);
+
+ if (sc->link_vars.periodic_flags &
+ ELINK_PERIODIC_FLAGS_LINK_EVENT) {
+ /* sync with link */
+ sc->link_vars.periodic_flags &=
+ ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
+ if (IS_MF(sc)) {
+ bnx2x_link_sync_notify(sc);
+ }
+ bnx2x_link_report(sc);
+ }
+
+ /*
+ * Always call it here: bnx2x_link_report() will
+ * prevent the link indication duplication.
+ */
+ bnx2x_link_status_update(sc);
+
+ } else if (attn & BNX2X_MC_ASSERT_BITS) {
+
+ PMD_DRV_LOG(ERR, "MC assert!");
+ bnx2x_mc_assert(sc);
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
+ rte_panic("MC assert!");
+
+ } else if (attn & BNX2X_MCP_ASSERT) {
+
+ PMD_DRV_LOG(ERR, "MCP assert!");
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
+
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Unknown HW assert! (attn 0x%08x)", attn);
+ }
+ }
+
+ if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
+ PMD_DRV_LOG(ERR, "LATCHED attention 0x%08x (masked)", attn);
+ if (attn & BNX2X_GRC_TIMEOUT) {
+ val = REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
+ PMD_DRV_LOG(ERR, "GRC time-out 0x%08x", val);
+ }
+ if (attn & BNX2X_GRC_RSV) {
+ val = REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
+ PMD_DRV_LOG(ERR, "GRC reserved 0x%08x", val);
+ }
+ REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
+ }
+}
+
+static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)
+{
+ int port = SC_PORT(sc);
+ int reg_offset;
+ uint32_t val0, mask0, val1, mask1;
+ uint32_t val;
+
+ if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
+ val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
+ PMD_DRV_LOG(ERR, "CFC hw attention 0x%08x", val);
+/* CFC error attention */
+ if (val & 0x2) {
+ PMD_DRV_LOG(ERR, "FATAL error from CFC");
+ }
+ }
+
+ if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
+ val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
+ PMD_DRV_LOG(ERR, "PXP hw attention-0 0x%08x", val);
+/* RQ_USDMDP_FIFO_OVERFLOW */
+ if (val & 0x18000) {
+ PMD_DRV_LOG(ERR, "FATAL error from PXP");
+ }
+
+ if (!CHIP_IS_E1x(sc)) {
+ val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
+ PMD_DRV_LOG(ERR, "PXP hw attention-1 0x%08x", val);
+ }
+ }
+#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
+#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
+
+ if (attn & AEU_PXP2_HW_INT_BIT) {
+/* CQ47854 workaround do not panic on
+ * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
+ */
+ if (!CHIP_IS_E1x(sc)) {
+ mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
+ val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
+ mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
+ val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
+ /*
+ * If the only PXP2_EOP_ERROR_BIT is set in
+ * STS0 and STS1 - clear it
+ *
+ * probably we lose additional attentions between
+ * STS0 and STS_CLR0, in this case user will not
+ * be notified about them
+ */
+ if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
+ !(val1 & mask1))
+ val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
+
+ /* print the register, since no one can restore it */
+ PMD_DRV_LOG(ERR,
+ "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x", val0);
+
+ /*
+ * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
+ * then notify
+ */
+ if (val0 & PXP2_EOP_ERROR_BIT) {
+ PMD_DRV_LOG(ERR, "PXP2_WR_PGLUE_EOP_ERROR");
+
+ /*
+ * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
+ * set then clear attention from PXP2 block without panic
+ */
+ if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
+ ((val1 & mask1) == 0))
+ attn &= ~AEU_PXP2_HW_INT_BIT;
+ }
+ }
+ }
+
+ if (attn & HW_INTERRUT_ASSERT_SET_2) {
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
+
+ val = REG_RD(sc, reg_offset);
+ val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
+ REG_WR(sc, reg_offset, val);
+
+ PMD_DRV_LOG(ERR,
+ "FATAL HW block attention set2 0x%x",
+ (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_2));
+ rte_panic("HW block attention set2");
+ }
+}
+
+static void bnx2x_attn_int_deasserted1(struct bnx2x_softc *sc, uint32_t attn)
+{
+ int port = SC_PORT(sc);
+ int reg_offset;
+ uint32_t val;
+
+ if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
+ val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
+ PMD_DRV_LOG(ERR, "DB hw attention 0x%08x", val);
+/* DORQ discard attention */
+ if (val & 0x2) {
+ PMD_DRV_LOG(ERR, "FATAL error from DORQ");
+ }
+ }
+
+ if (attn & HW_INTERRUT_ASSERT_SET_1) {
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
+
+ val = REG_RD(sc, reg_offset);
+ val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
+ REG_WR(sc, reg_offset, val);
+
+ PMD_DRV_LOG(ERR,
+ "FATAL HW block attention set1 0x%08x",
+ (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_1));
+ rte_panic("HW block attention set1");
+ }
+}
+
+static void bnx2x_attn_int_deasserted0(struct bnx2x_softc *sc, uint32_t attn)
+{
+ int port = SC_PORT(sc);
+ int reg_offset;
+ uint32_t val;
+
+ reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+
+ if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
+ val = REG_RD(sc, reg_offset);
+ val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
+ REG_WR(sc, reg_offset, val);
+
+ PMD_DRV_LOG(WARNING, "SPIO5 hw attention");
+
+/* Fan failure attention */
+ elink_hw_reset_phy(&sc->link_params);
+ bnx2x_fan_failure(sc);
+ }
+
+ if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
+ elink_handle_module_detect_int(&sc->link_params);
+ }
+
+ if (attn & HW_INTERRUT_ASSERT_SET_0) {
+ val = REG_RD(sc, reg_offset);
+ val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
+ REG_WR(sc, reg_offset, val);
+
+ rte_panic("FATAL HW block attention set0 0x%lx",
+ (attn & HW_INTERRUT_ASSERT_SET_0));
+ }
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x_softc *sc, uint32_t deasserted)
+{
+ struct attn_route attn;
+ struct attn_route *group_mask;
+ int port = SC_PORT(sc);
+ int index;
+ uint32_t reg_addr;
+ uint32_t val;
+ uint32_t aeu_mask;
+ uint8_t global = FALSE;
+
+ /*
+ * Need to take HW lock because MCP or other port might also
+ * try to handle this event.
+ */
+ bnx2x_acquire_alr(sc);
+
+ if (bnx2x_chk_parity_attn(sc, &global, TRUE)) {
+ sc->recovery_state = BNX2X_RECOVERY_INIT;
+
+/* disable HW interrupts */
+ bnx2x_int_disable(sc);
+ bnx2x_release_alr(sc);
+ return;
+ }
+
+ attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4);
+ attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4);
+ attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4);
+ attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4);
+ if (!CHIP_IS_E1x(sc)) {
+ attn.sig[4] =
+ REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port * 4);
+ } else {
+ attn.sig[4] = 0;
+ }
+
+ for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+ if (deasserted & (1 << index)) {
+ group_mask = &sc->attn_group[index];
+
+ bnx2x_attn_int_deasserted4(sc,
+ attn.
+ sig[4] & group_mask->sig[4]);
+ bnx2x_attn_int_deasserted3(sc,
+ attn.
+ sig[3] & group_mask->sig[3]);
+ bnx2x_attn_int_deasserted1(sc,
+ attn.
+ sig[1] & group_mask->sig[1]);
+ bnx2x_attn_int_deasserted2(sc,
+ attn.
+ sig[2] & group_mask->sig[2]);
+ bnx2x_attn_int_deasserted0(sc,
+ attn.
+ sig[0] & group_mask->sig[0]);
+ }
+ }
+
+ bnx2x_release_alr(sc);
+
+ if (sc->devinfo.int_block == INT_BLOCK_HC) {
+ reg_addr = (HC_REG_COMMAND_REG + port * 32 +
+ COMMAND_REG_ATTN_BITS_CLR);
+ } else {
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER * 8);
+ }
+
+ val = ~deasserted;
+ PMD_DRV_LOG(DEBUG,
+ "about to mask 0x%08x at %s addr 0x%08x", val,
+ (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU",
+ reg_addr);
+ REG_WR(sc, reg_addr, val);
+
+ if (~sc->attn_state & deasserted) {
+ PMD_DRV_LOG(ERR, "IGU error");
+ }
+
+ reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+ aeu_mask = REG_RD(sc, reg_addr);
+
+ aeu_mask |= (deasserted & 0x3ff);
+
+ REG_WR(sc, reg_addr, aeu_mask);
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+ sc->attn_state &= ~deasserted;
+}
+
+static void bnx2x_attn_int(struct bnx2x_softc *sc)
+{
+ /* read local copy of bits */
+ uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
+ uint32_t attn_ack =
+ le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
+ uint32_t attn_state = sc->attn_state;
+
+ /* look for changed bits */
+ uint32_t asserted = attn_bits & ~attn_ack & ~attn_state;
+ uint32_t deasserted = ~attn_bits & attn_ack & attn_state;
+
+ PMD_DRV_LOG(DEBUG,
+ "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x",
+ attn_bits, attn_ack, asserted, deasserted);
+
+ if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
+ PMD_DRV_LOG(ERR, "BAD attention state");
+ }
+
+ /* handle bits that were raised */
+ if (asserted) {
+ bnx2x_attn_int_asserted(sc, asserted);
+ }
+
+ if (deasserted) {
+ bnx2x_attn_int_deasserted(sc, deasserted);
+ }
+}
+
+static uint16_t bnx2x_update_dsb_idx(struct bnx2x_softc *sc)
+{
+ struct host_sp_status_block *def_sb = sc->def_sb;
+ uint16_t rc = 0;
+
+ mb(); /* status block is written to by the chip */
+
+ if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
+ sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
+ rc |= BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ if (sc->def_idx != def_sb->sp_sb.running_index) {
+ sc->def_idx = def_sb->sp_sb.running_index;
+ rc |= BNX2X_DEF_SB_IDX;
+ }
+
+ mb();
+
+ return rc;
+}
+
+static struct ecore_queue_sp_obj *bnx2x_cid_to_q_obj(struct bnx2x_softc *sc,
+ uint32_t cid)
+{
+ return &sc->sp_objs[CID_TO_FP(cid, sc)].q_obj;
+}
+
+static void bnx2x_handle_mcast_eqe(struct bnx2x_softc *sc)
+{
+ struct ecore_mcast_ramrod_params rparam;
+ int rc;
+
+ memset(&rparam, 0, sizeof(rparam));
+
+ rparam.mcast_obj = &sc->mcast_obj;
+
+ /* clear pending state for the last command */
+ sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
+
+ /* if there are pending mcast commands - send them */
+ if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
+ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
+ if (rc < 0) {
+ PMD_DRV_LOG(INFO,
+ "Failed to send pending mcast commands (%d)",
+ rc);
+ }
+ }
+}
+
+static void
+bnx2x_handle_classification_eqe(struct bnx2x_softc *sc, union event_ring_elem *elem)
+{
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+ uint32_t cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+ struct ecore_vlan_mac_obj *vlan_mac_obj;
+
+ /* always push next commands out, don't wait here */
+ bnx2x_set_bit(RAMROD_CONT, &ramrod_flags);
+
+ switch (le32toh(elem->message.data.eth_event.echo) >> BNX2X_SWCID_SHIFT) {
+ case ECORE_FILTER_MAC_PENDING:
+ PMD_DRV_LOG(DEBUG, "Got SETUP_MAC completions");
+ vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
+ break;
+
+ case ECORE_FILTER_MCAST_PENDING:
+ PMD_DRV_LOG(DEBUG, "Got SETUP_MCAST completions");
+ bnx2x_handle_mcast_eqe(sc);
+ return;
+
+ default:
+ PMD_DRV_LOG(NOTICE, "Unsupported classification command: %d",
+ elem->message.data.eth_event.echo);
+ return;
+ }
+
+ rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
+
+ if (rc < 0) {
+ PMD_DRV_LOG(NOTICE, "Failed to schedule new commands (%d)", rc);
+ } else if (rc > 0) {
+ PMD_DRV_LOG(DEBUG, "Scheduled next pending commands...");
+ }
+}
+
+static void bnx2x_handle_rx_mode_eqe(struct bnx2x_softc *sc)
+{
+ bnx2x_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
+
+ /* send rx_mode command again if was requested */
+ if (bnx2x_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state)) {
+ bnx2x_set_storm_rx_mode(sc);
+ }
+}
+
+static void bnx2x_update_eq_prod(struct bnx2x_softc *sc, uint16_t prod)
+{
+ storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
+ wmb(); /* keep prod updates ordered */
+}
+
+static void bnx2x_eq_int(struct bnx2x_softc *sc)
+{
+ uint16_t hw_cons, sw_cons, sw_prod;
+ union event_ring_elem *elem;
+ uint8_t echo;
+ uint32_t cid;
+ uint8_t opcode;
+ int spqe_cnt = 0;
+ struct ecore_queue_sp_obj *q_obj;
+ struct ecore_func_sp_obj *f_obj = &sc->func_obj;
+ struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
+
+ hw_cons = le16toh(*sc->eq_cons_sb);
+
+ /*
+ * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
+ * when we get to the next-page we need to adjust so the loop
+ * condition below will be met. The next element is the size of a
+ * regular element and hence incrementing by 1
+ */
+ if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
+ hw_cons++;
+ }
+
+ /*
+ * This function may never run in parallel with itself for a
+ * specific sc and no need for a read memory barrier here.
+ */
+ sw_cons = sc->eq_cons;
+ sw_prod = sc->eq_prod;
+
+ for (;
+ sw_cons != hw_cons;
+ sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
+
+ elem = &sc->eq[EQ_DESC(sw_cons)];
+
+/* elem CID originates from FW, actually LE */
+ cid = SW_CID(elem->message.data.cfc_del_event.cid);
+ opcode = elem->message.opcode;
+
+/* handle eq element */
+ switch (opcode) {
+ case EVENT_RING_OPCODE_STAT_QUERY:
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "got statistics completion event %d",
+ sc->stats_comp++);
+ /* nothing to do with stats comp */
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_CFC_DEL:
+ /* handle according to cid range */
+ /* we may want to verify here that the sc state is HALTING */
+ PMD_DRV_LOG(DEBUG, "got delete ramrod for MULTI[%d]",
+ cid);
+ q_obj = bnx2x_cid_to_q_obj(sc, cid);
+ if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
+ break;
+ }
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_STOP_TRAFFIC:
+ PMD_DRV_LOG(DEBUG, "got STOP TRAFFIC");
+ if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
+ break;
+ }
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_START_TRAFFIC:
+ PMD_DRV_LOG(DEBUG, "got START TRAFFIC");
+ if (f_obj->complete_cmd
+ (sc, f_obj, ECORE_F_CMD_TX_START)) {
+ break;
+ }
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_FUNCTION_UPDATE:
+ echo = elem->message.data.function_update_event.echo;
+ if (echo == SWITCH_UPDATE) {
+ PMD_DRV_LOG(DEBUG,
+ "got FUNC_SWITCH_UPDATE ramrod");
+ if (f_obj->complete_cmd(sc, f_obj,
+ ECORE_F_CMD_SWITCH_UPDATE))
+ {
+ break;
+ }
+ } else {
+ PMD_DRV_LOG(DEBUG,
+ "AFEX: ramrod completed FUNCTION_UPDATE");
+ f_obj->complete_cmd(sc, f_obj,
+ ECORE_F_CMD_AFEX_UPDATE);
+ }
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_FORWARD_SETUP:
+ q_obj = &bnx2x_fwd_sp_obj(sc, q_obj);
+ if (q_obj->complete_cmd(sc, q_obj,
+ ECORE_Q_CMD_SETUP_TX_ONLY)) {
+ break;
+ }
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_FUNCTION_START:
+ PMD_DRV_LOG(DEBUG, "got FUNC_START ramrod");
+ if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
+ break;
+ }
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_FUNCTION_STOP:
+ PMD_DRV_LOG(DEBUG, "got FUNC_STOP ramrod");
+ if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
+ break;
+ }
+ goto next_spqe;
+ }
+
+ switch (opcode | sc->state) {
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAITING_PORT):
+ cid =
+ elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+ PMD_DRV_LOG(DEBUG, "got RSS_UPDATE ramrod. CID %d",
+ cid);
+ rss_raw->clear_pending(rss_raw);
+ break;
+
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_CLOSING_WAITING_HALT):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_CLOSING_WAITING_HALT):
+ PMD_DRV_LOG(DEBUG,
+ "got (un)set mac ramrod");
+ bnx2x_handle_classification_eqe(sc, elem);
+ break;
+
+ case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_CLOSING_WAITING_HALT):
+ PMD_DRV_LOG(DEBUG,
+ "got mcast ramrod");
+ bnx2x_handle_mcast_eqe(sc);
+ break;
+
+ case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_CLOSING_WAITING_HALT):
+ PMD_DRV_LOG(DEBUG,
+ "got rx_mode ramrod");
+ bnx2x_handle_rx_mode_eqe(sc);
+ break;
+
+ default:
+ /* unknown event log error and continue */
+ PMD_DRV_LOG(INFO, "Unknown EQ event %d, sc->state 0x%x",
+ elem->message.opcode, sc->state);
+ }
+
+next_spqe:
+ spqe_cnt++;
+ } /* for */
+
+ mb();
+ atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
+
+ sc->eq_cons = sw_cons;
+ sc->eq_prod = sw_prod;
+
+ /* make sure that above mem writes were issued towards the memory */
+ wmb();
+
+ /* update producer */
+ bnx2x_update_eq_prod(sc, sc->eq_prod);
+}
+
+static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc)
+{
+ uint16_t status;
+ int rc = 0;
+
+ /* what work needs to be performed? */
+ status = bnx2x_update_dsb_idx(sc);
+
+ /* HW attentions */
+ if (status & BNX2X_DEF_SB_ATT_IDX) {
+ PMD_DRV_LOG(DEBUG, "---> ATTN INTR <---");
+ bnx2x_attn_int(sc);
+ status &= ~BNX2X_DEF_SB_ATT_IDX;
+ rc = 1;
+ }
+
+ /* SP events: STAT_QUERY and others */
+ if (status & BNX2X_DEF_SB_IDX) {
+/* handle EQ completions */
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "---> EQ INTR <---");
+ bnx2x_eq_int(sc);
+ bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
+ le16toh(sc->def_idx), IGU_INT_NOP, 1);
+ status &= ~BNX2X_DEF_SB_IDX;
+ }
+
+ /* if status is non zero then something went wrong */
+ if (unlikely(status)) {
+ PMD_DRV_LOG(INFO,
+ "Got an unknown SP interrupt! (0x%04x)", status);
+ }
+
+ /* ack status block only if something was actually handled */
+ bnx2x_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
+ le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
+
+ return rc;
+}
+
+static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
+{
+ struct bnx2x_softc *sc = fp->sc;
+ uint8_t more_rx = FALSE;
+
+ PMD_DRV_LOG(DEBUG, "---> FP TASK QUEUE (%d) <--", fp->index);
+
+ /* update the fastpath index */
+ bnx2x_update_fp_sb_idx(fp);
+
+ if (scan_fp) {
+ if (bnx2x_has_rx_work(fp)) {
+ more_rx = bnx2x_rxeof(sc, fp);
+ }
+
+ if (more_rx) {
+ /* still more work to do */
+ bnx2x_handle_fp_tq(fp, scan_fp);
+ return;
+ }
+ }
+
+ bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
+ le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
+}
+
+/*
+ * Legacy interrupt entry point.
+ *
+ * Verifies that the controller generated the interrupt and
+ * then calls a separate routine to handle the various
+ * interrupt causes: link, RX, and TX.
+ */
+int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp)
+{
+ struct bnx2x_fastpath *fp;
+ uint32_t status, mask;
+ int i, rc = 0;
+
+ /*
+ * 0 for ustorm, 1 for cstorm
+ * the bits returned from ack_int() are 0-15
+ * bit 0 = attention status block
+ * bit 1 = fast path status block
+ * a mask of 0x2 or more = tx/rx event
+ * a mask of 1 = slow path event
+ */
+
+ status = bnx2x_ack_int(sc);
+
+ /* the interrupt is not for us */
+ if (unlikely(status == 0)) {
+ return 0;
+ }
+
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "Interrupt status 0x%04x", status);
+ //bnx2x_dump_status_block(sc);
+
+ FOR_EACH_ETH_QUEUE(sc, i) {
+ fp = &sc->fp[i];
+ mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
+ if (status & mask) {
+ bnx2x_handle_fp_tq(fp, scan_fp);
+ status &= ~mask;
+ }
+ }
+
+ if (unlikely(status & 0x1)) {
+ rc = bnx2x_handle_sp_tq(sc);
+ status &= ~0x1;
+ }
+
+ if (unlikely(status)) {
+ PMD_DRV_LOG(WARNING,
+ "Unexpected fastpath status (0x%08x)!", status);
+ }
+
+ return rc;
+}
+
+static int bnx2x_init_hw_common_chip(struct bnx2x_softc *sc);
+static int bnx2x_init_hw_common(struct bnx2x_softc *sc);
+static int bnx2x_init_hw_port(struct bnx2x_softc *sc);
+static int bnx2x_init_hw_func(struct bnx2x_softc *sc);
+static void bnx2x_reset_common(struct bnx2x_softc *sc);
+static void bnx2x_reset_port(struct bnx2x_softc *sc);
+static void bnx2x_reset_func(struct bnx2x_softc *sc);
+static int bnx2x_init_firmware(struct bnx2x_softc *sc);
+static void bnx2x_release_firmware(struct bnx2x_softc *sc);
+
+static struct
+ecore_func_sp_drv_ops bnx2x_func_sp_drv = {
+ .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
+ .init_hw_cmn = bnx2x_init_hw_common,
+ .init_hw_port = bnx2x_init_hw_port,
+ .init_hw_func = bnx2x_init_hw_func,
+
+ .reset_hw_cmn = bnx2x_reset_common,
+ .reset_hw_port = bnx2x_reset_port,
+ .reset_hw_func = bnx2x_reset_func,
+
+ .init_fw = bnx2x_init_firmware,
+ .release_fw = bnx2x_release_firmware,
+};
+
+static void bnx2x_init_func_obj(struct bnx2x_softc *sc)
+{
+ sc->dmae_ready = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ecore_init_func_obj(sc,
+ &sc->func_obj,
+ BNX2X_SP(sc, func_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, func_rdata),
+ BNX2X_SP(sc, func_afex_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, func_afex_rdata),
+ &bnx2x_func_sp_drv);
+}
+
+static int bnx2x_init_hw(struct bnx2x_softc *sc, uint32_t load_code)
+{
+ struct ecore_func_state_params func_params = { NULL };
+ int rc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* prepare the parameters for function state transitions */
+ bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &sc->func_obj;
+ func_params.cmd = ECORE_F_CMD_HW_INIT;
+
+ func_params.params.hw_init.load_phase = load_code;
+
+ /*
+ * Via a plethora of function pointers, we will eventually reach
+ * bnx2x_init_hw_common(), bnx2x_init_hw_port(), or bnx2x_init_hw_func().
+ */
+ rc = ecore_func_state_change(sc, &func_params);
+
+ return rc;
+}
+
+static void
+bnx2x_fill(struct bnx2x_softc *sc, uint32_t addr, int fill, uint32_t len)
+{
+ uint32_t i;
+
+ if (!(len % 4) && !(addr % 4)) {
+ for (i = 0; i < len; i += 4) {
+ REG_WR(sc, (addr + i), fill);
+ }
+ } else {
+ for (i = 0; i < len; i++) {
+ REG_WR8(sc, (addr + i), fill);
+ }
+ }
+}
+
+/* writes FP SP data to FW - data_size in dwords */
+static void
+bnx2x_wr_fp_sb_data(struct bnx2x_softc *sc, int fw_sb_id, uint32_t * sb_data_p,
+ uint32_t data_size)
+{
+ uint32_t index;
+
+ for (index = 0; index < data_size; index++) {
+ REG_WR(sc,
+ (BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
+ (sizeof(uint32_t) * index)), *(sb_data_p + index));
+ }
+}
+
+static void bnx2x_zero_fp_sb(struct bnx2x_softc *sc, int fw_sb_id)
+{
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ uint32_t *sb_data_p;
+ uint32_t data_size = 0;
+
+ if (!CHIP_IS_E1x(sc)) {
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.state = SB_DISABLED;
+ sb_data_e2.common.p_func.vf_valid = FALSE;
+ sb_data_p = (uint32_t *) & sb_data_e2;
+ data_size = (sizeof(struct hc_status_block_data_e2) /
+ sizeof(uint32_t));
+ } else {
+ memset(&sb_data_e1x, 0,
+ sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.state = SB_DISABLED;
+ sb_data_e1x.common.p_func.vf_valid = FALSE;
+ sb_data_p = (uint32_t *) & sb_data_e1x;
+ data_size = (sizeof(struct hc_status_block_data_e1x) /
+ sizeof(uint32_t));
+ }
+
+ bnx2x_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
+
+ bnx2x_fill(sc,
+ (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 0,
+ CSTORM_STATUS_BLOCK_SIZE);
+ bnx2x_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
+ 0, CSTORM_SYNC_BLOCK_SIZE);
+}
+
+static void
+bnx2x_wr_sp_sb_data(struct bnx2x_softc *sc,
+ struct hc_sp_status_block_data *sp_sb_data)
+{
+ uint32_t i;
+
+ for (i = 0;
+ i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
+ i++) {
+ REG_WR(sc,
+ (BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
+ (i * sizeof(uint32_t))),
+ *((uint32_t *) sp_sb_data + i));
+ }
+}
+
+static void bnx2x_zero_sp_sb(struct bnx2x_softc *sc)
+{
+ struct hc_sp_status_block_data sp_sb_data;
+
+ memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+ sp_sb_data.state = SB_DISABLED;
+ sp_sb_data.p_func.vf_valid = FALSE;
+
+ bnx2x_wr_sp_sb_data(sc, &sp_sb_data);
+
+ bnx2x_fill(sc,
+ (BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
+ 0, CSTORM_SP_STATUS_BLOCK_SIZE);
+ bnx2x_fill(sc,
+ (BAR_CSTRORM_INTMEM +
+ CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
+ 0, CSTORM_SP_SYNC_BLOCK_SIZE);
+}
+
+static void
+bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id,
+ int igu_seg_id)
+{
+ hc_sm->igu_sb_id = igu_sb_id;
+ hc_sm->igu_seg_id = igu_seg_id;
+ hc_sm->timer_value = 0xFF;
+ hc_sm->time_to_expire = 0xFFFFFFFF;
+}
+
+static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
+{
+ /* zero out state machine indices */
+
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* map indices */
+
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
+ (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
+ (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
+ (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
+ (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
+ (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
+}
+
+static void
+bnx2x_init_sb(struct bnx2x_softc *sc, rte_iova_t busaddr, int vfid,
+ uint8_t vf_valid, int fw_sb_id, int igu_sb_id)
+{
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ struct hc_status_block_sm *hc_sm_p;
+ uint32_t *sb_data_p;
+ int igu_seg_id;
+ int data_size;
+
+ if (CHIP_INT_MODE_IS_BC(sc)) {
+ igu_seg_id = HC_SEG_ACCESS_NORM;
+ } else {
+ igu_seg_id = IGU_SEG_ACCESS_NORM;
+ }
+
+ bnx2x_zero_fp_sb(sc, fw_sb_id);
+
+ if (!CHIP_IS_E1x(sc)) {
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.state = SB_ENABLED;
+ sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
+ sb_data_e2.common.p_func.vf_id = vfid;
+ sb_data_e2.common.p_func.vf_valid = vf_valid;
+ sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
+ sb_data_e2.common.same_igu_sb_1b = TRUE;
+ sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
+ sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
+ hc_sm_p = sb_data_e2.common.state_machine;
+ sb_data_p = (uint32_t *) & sb_data_e2;
+ data_size = (sizeof(struct hc_status_block_data_e2) /
+ sizeof(uint32_t));
+ bnx2x_map_sb_state_machines(sb_data_e2.index_data);
+ } else {
+ memset(&sb_data_e1x, 0,
+ sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.state = SB_ENABLED;
+ sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
+ sb_data_e1x.common.p_func.vf_id = 0xff;
+ sb_data_e1x.common.p_func.vf_valid = FALSE;
+ sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
+ sb_data_e1x.common.same_igu_sb_1b = TRUE;
+ sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
+ sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
+ hc_sm_p = sb_data_e1x.common.state_machine;
+ sb_data_p = (uint32_t *) & sb_data_e1x;
+ data_size = (sizeof(struct hc_status_block_data_e1x) /
+ sizeof(uint32_t));
+ bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
+ }
+
+ bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
+ bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
+
+ /* write indices to HW - PCI guarantees endianity of regpairs */
+ bnx2x_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
+}
+
+static uint8_t bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
+{
+ if (CHIP_IS_E1x(fp->sc)) {
+ return fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H;
+ } else {
+ return fp->cl_id;
+ }
+}
+
+static uint32_t
+bnx2x_rx_ustorm_prods_offset(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp)
+{
+ uint32_t offset = BAR_USTRORM_INTMEM;
+
+ if (IS_VF(sc)) {
+ return PXP_VF_ADDR_USDM_QUEUES_START +
+ (sc->acquire_resp.resc.hw_qid[fp->index] *
+ sizeof(struct ustorm_queue_zone_data));
+ } else if (!CHIP_IS_E1x(sc)) {
+ offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+ } else {
+ offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
+ }
+
+ return offset;
+}
+
+static void bnx2x_init_eth_fp(struct bnx2x_softc *sc, int idx)
+{
+ struct bnx2x_fastpath *fp = &sc->fp[idx];
+ uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
+ unsigned long q_type = 0;
+ int cos;
+
+ fp->sc = sc;
+ fp->index = idx;
+
+ fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
+ fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
+
+ if (CHIP_IS_E1x(sc))
+ fp->cl_id = SC_L_ID(sc) + idx;
+ else
+/* want client ID same as IGU SB ID for non-E1 */
+ fp->cl_id = fp->igu_sb_id;
+ fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
+
+ /* setup sb indices */
+ if (!CHIP_IS_E1x(sc)) {
+ fp->sb_index_values = fp->status_block.e2_sb->sb.index_values;
+ fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
+ } else {
+ fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values;
+ fp->sb_running_index =
+ fp->status_block.e1x_sb->sb.running_index;
+ }
+
+ /* init shortcut */
+ fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(sc, fp);
+
+ fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
+
+ for (cos = 0; cos < sc->max_cos; cos++) {
+ cids[cos] = idx;
+ }
+ fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
+
+ /* nothing more for a VF to do */
+ if (IS_VF(sc)) {
+ return;
+ }
+
+ bnx2x_init_sb(sc, fp->sb_dma.paddr, BNX2X_VF_ID_INVALID, FALSE,
+ fp->fw_sb_id, fp->igu_sb_id);
+
+ bnx2x_update_fp_sb_idx(fp);
+
+ /* Configure Queue State object */
+ bnx2x_set_bit(ECORE_Q_TYPE_HAS_RX, &q_type);
+ bnx2x_set_bit(ECORE_Q_TYPE_HAS_TX, &q_type);
+
+ ecore_init_queue_obj(sc,
+ &sc->sp_objs[idx].q_obj,
+ fp->cl_id,
+ cids,
+ sc->max_cos,
+ SC_FUNC(sc),
+ BNX2X_SP(sc, q_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, q_rdata),
+ q_type);
+
+ /* configure classification DBs */
+ ecore_init_mac_obj(sc,
+ &sc->sp_objs[idx].mac_obj,
+ fp->cl_id,
+ idx,
+ SC_FUNC(sc),
+ BNX2X_SP(sc, mac_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, mac_rdata),
+ ECORE_FILTER_MAC_PENDING, &sc->sp_state,
+ ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool);
+}
+
+static void
+bnx2x_update_rx_prod(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ uint16_t rx_bd_prod, uint16_t rx_cq_prod)
+{
+ union ustorm_eth_rx_producers rx_prods;
+ uint32_t i;
+
+ /* update producers */
+ rx_prods.prod.bd_prod = rx_bd_prod;
+ rx_prods.prod.cqe_prod = rx_cq_prod;
+ rx_prods.prod.reserved = 0;
+
+ /*
+ * Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ * This is only applicable for weak-ordered memory model archs such
+ * as IA-64. The following barrier is also mandatory since FW will
+ * assumes BDs must have buffers.
+ */
+ wmb();
+
+ for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
+ REG_WR(sc,
+ (fp->ustorm_rx_prods_offset + (i * 4)),
+ rx_prods.raw_data[i]);
+ }
+
+ wmb(); /* keep prod updates ordered */
+}
+
+static void bnx2x_init_rx_rings(struct bnx2x_softc *sc)
+{
+ struct bnx2x_fastpath *fp;
+ int i;
+ struct bnx2x_rx_queue *rxq;
+
+ for (i = 0; i < sc->num_queues; i++) {
+ fp = &sc->fp[i];
+ rxq = sc->rx_queues[fp->index];
+ if (!rxq) {
+ PMD_RX_LOG(ERR, "RX queue is NULL");
+ return;
+ }
+
+ rxq->rx_bd_head = 0;
+ rxq->rx_bd_tail = rxq->nb_rx_desc;
+ rxq->rx_cq_head = 0;
+ rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq);
+ *fp->rx_cq_cons_sb = 0;
+
+ /*
+ * Activate the BD ring...
+ * Warning, this will generate an interrupt (to the TSTORM)
+ * so this can only be done after the chip is initialized
+ */
+ bnx2x_update_rx_prod(sc, fp, rxq->rx_bd_tail, rxq->rx_cq_tail);
+
+ if (i != 0) {
+ continue;
+ }
+ }
+}
+
+static void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index];
+
+ fp->tx_db.data.header.header = 1 << DOORBELL_HDR_DB_TYPE_SHIFT;
+ fp->tx_db.data.zero_fill1 = 0;
+ fp->tx_db.data.prod = 0;
+
+ if (!txq) {
+ PMD_TX_LOG(ERR, "ERROR: TX queue is NULL");
+ return;
+ }
+
+ txq->tx_pkt_tail = 0;
+ txq->tx_pkt_head = 0;
+ txq->tx_bd_tail = 0;
+ txq->tx_bd_head = 0;
+}
+
+static void bnx2x_init_tx_rings(struct bnx2x_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < sc->num_queues; i++) {
+ bnx2x_init_tx_ring_one(&sc->fp[i]);
+ }
+}
+
+static void bnx2x_init_def_sb(struct bnx2x_softc *sc)
+{
+ struct host_sp_status_block *def_sb = sc->def_sb;
+ rte_iova_t mapping = sc->def_sb_dma.paddr;
+ int igu_sp_sb_index;
+ int igu_seg_id;
+ int port = SC_PORT(sc);
+ int func = SC_FUNC(sc);
+ int reg_offset, reg_offset_en5;
+ uint64_t section;
+ int index, sindex;
+ struct hc_sp_status_block_data sp_sb_data;
+
+ memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+ if (CHIP_INT_MODE_IS_BC(sc)) {
+ igu_sp_sb_index = DEF_SB_IGU_ID;
+ igu_seg_id = HC_SEG_ACCESS_DEF;
+ } else {
+ igu_sp_sb_index = sc->igu_dsb_id;
+ igu_seg_id = IGU_SEG_ACCESS_DEF;
+ }
+
+ /* attentions */
+ section = ((uint64_t) mapping +
+ offsetof(struct host_sp_status_block, atten_status_block));
+ def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
+ sc->attn_state = 0;
+
+ reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+
+ reg_offset_en5 = (port) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
+
+ for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+/* take care of sig[0]..sig[4] */
+ for (sindex = 0; sindex < 4; sindex++) {
+ sc->attn_group[index].sig[sindex] =
+ REG_RD(sc,
+ (reg_offset + (sindex * 0x4) +
+ (0x10 * index)));
+ }
+
+ if (!CHIP_IS_E1x(sc)) {
+ /*
+ * enable5 is separate from the rest of the registers,
+ * and the address skip is 4 and not 16 between the
+ * different groups
+ */
+ sc->attn_group[index].sig[4] =
+ REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
+ } else {
+ sc->attn_group[index].sig[4] = 0;
+ }
+ }
+
+ if (sc->devinfo.int_block == INT_BLOCK_HC) {
+ reg_offset =
+ port ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L;
+ REG_WR(sc, reg_offset, U64_LO(section));
+ REG_WR(sc, (reg_offset + 4), U64_HI(section));
+ } else if (!CHIP_IS_E1x(sc)) {
+ REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
+ REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
+ }
+
+ section = ((uint64_t) mapping +
+ offsetof(struct host_sp_status_block, sp_sb));
+
+ bnx2x_zero_sp_sb(sc);
+
+ /* PCI guarantees endianity of regpair */
+ sp_sb_data.state = SB_ENABLED;
+ sp_sb_data.host_sb_addr.lo = U64_LO(section);
+ sp_sb_data.host_sb_addr.hi = U64_HI(section);
+ sp_sb_data.igu_sb_id = igu_sp_sb_index;
+ sp_sb_data.igu_seg_id = igu_seg_id;
+ sp_sb_data.p_func.pf_id = func;
+ sp_sb_data.p_func.vnic_id = SC_VN(sc);
+ sp_sb_data.p_func.vf_id = 0xff;
+
+ bnx2x_wr_sp_sb_data(sc, &sp_sb_data);
+
+ bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
+}
+
+static void bnx2x_init_sp_ring(struct bnx2x_softc *sc)
+{
+ atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
+ sc->spq_prod_idx = 0;
+ sc->dsb_sp_prod =
+ &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
+ sc->spq_prod_bd = sc->spq;
+ sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
+}
+
+static void bnx2x_init_eq_ring(struct bnx2x_softc *sc)
+{
+ union event_ring_elem *elem;
+ int i;
+
+ for (i = 1; i <= NUM_EQ_PAGES; i++) {
+ elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
+
+ elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
+ BNX2X_PAGE_SIZE *
+ (i % NUM_EQ_PAGES)));
+ elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
+ BNX2X_PAGE_SIZE *
+ (i % NUM_EQ_PAGES)));
+ }
+
+ sc->eq_cons = 0;
+ sc->eq_prod = NUM_EQ_DESC;
+ sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
+
+ atomic_store_rel_long(&sc->eq_spq_left,
+ (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
+ NUM_EQ_DESC) - 1));
+}
+
+static void bnx2x_init_internal_common(struct bnx2x_softc *sc)
+{
+ int i;
+
+ if (IS_MF_SI(sc)) {
+/*
+ * In switch independent mode, the TSTORM needs to accept
+ * packets that failed classification, since approximate match
+ * mac addresses aren't written to NIG LLH.
+ */
+ REG_WR8(sc,
+ (BAR_TSTRORM_INTMEM +
+ TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 2);
+ } else
+ REG_WR8(sc,
+ (BAR_TSTRORM_INTMEM +
+ TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 0);
+
+ /*
+ * Zero this manually as its initialization is currently missing
+ * in the initTool.
+ */
+ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
+ REG_WR(sc,
+ (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
+ 0);
+ }
+
+ if (!CHIP_IS_E1x(sc)) {
+ REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
+ CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE :
+ HC_IGU_NBC_MODE);
+ }
+}
+
+static void bnx2x_init_internal(struct bnx2x_softc *sc, uint32_t load_code)
+{
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_COMMON:
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+ bnx2x_init_internal_common(sc);
+ /* no break */
+
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ /* nothing to do */
+ /* no break */
+
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ /* internal memory per function is initialized inside bnx2x_pf_init */
+ break;
+
+ default:
+ PMD_DRV_LOG(NOTICE, "Unknown load_code (0x%x) from MCP",
+ load_code);
+ break;
+ }
+}
+
+static void
+storm_memset_func_cfg(struct bnx2x_softc *sc,
+ struct tstorm_eth_function_common_config *tcfg,
+ uint16_t abs_fid)
+{
+ uint32_t addr;
+ size_t size;
+
+ addr = (BAR_TSTRORM_INTMEM +
+ TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
+ size = sizeof(struct tstorm_eth_function_common_config);
+ ecore_storm_memset_struct(sc, addr, size, (uint32_t *) tcfg);
+}
+
+static void bnx2x_func_init(struct bnx2x_softc *sc, struct bnx2x_func_init_params *p)
+{
+ struct tstorm_eth_function_common_config tcfg = { 0 };
+
+ if (CHIP_IS_E1x(sc)) {
+ storm_memset_func_cfg(sc, &tcfg, p->func_id);
+ }
+
+ /* Enable the function in the FW */
+ storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
+ storm_memset_func_en(sc, p->func_id, 1);
+
+ /* spq */
+ if (p->func_flgs & FUNC_FLG_SPQ) {
+ storm_memset_spq_addr(sc, p->spq_map, p->func_id);
+ REG_WR(sc,
+ (XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PROD_OFFSET(p->func_id)), p->spq_prod);
+ }
+}
+
+/*
+ * Calculates the sum of vn_min_rates.
+ * It's needed for further normalizing of the min_rates.
+ * Returns:
+ * sum of vn_min_rates.
+ * or
+ * 0 - if all the min_rates are 0.
+ * In the later case fainess algorithm should be deactivated.
+ * If all min rates are not zero then those that are zeroes will be set to 1.
+ */
+static void bnx2x_calc_vn_min(struct bnx2x_softc *sc, struct cmng_init_input *input)
+{
+ uint32_t vn_cfg;
+ uint32_t vn_min_rate;
+ int all_zero = 1;
+ int vn;
+
+ for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
+ vn_cfg = sc->devinfo.mf_info.mf_config[vn];
+ vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
+
+ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
+ /* skip hidden VNs */
+ vn_min_rate = 0;
+ } else if (!vn_min_rate) {
+ /* If min rate is zero - set it to 100 */
+ vn_min_rate = DEF_MIN_RATE;
+ } else {
+ all_zero = 0;
+ }
+
+ input->vnic_min_rate[vn] = vn_min_rate;
+ }
+
+ /* if ETS or all min rates are zeros - disable fairness */
+ if (all_zero) {
+ input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+ } else {
+ input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+ }
+}
+
+static uint16_t
+bnx2x_extract_max_cfg(__rte_unused struct bnx2x_softc *sc, uint32_t mf_cfg)
+{
+ uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT);
+
+ if (!max_cfg) {
+ PMD_DRV_LOG(DEBUG,
+ "Max BW configured to 0 - using 100 instead");
+ max_cfg = 100;
+ }
+
+ return max_cfg;
+}
+
+static void
+bnx2x_calc_vn_max(struct bnx2x_softc *sc, int vn, struct cmng_init_input *input)
+{
+ uint16_t vn_max_rate;
+ uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
+ uint32_t max_cfg;
+
+ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
+ vn_max_rate = 0;
+ } else {
+ max_cfg = bnx2x_extract_max_cfg(sc, vn_cfg);
+
+ if (IS_MF_SI(sc)) {
+ /* max_cfg in percents of linkspeed */
+ vn_max_rate =
+ ((sc->link_vars.line_speed * max_cfg) / 100);
+ } else { /* SD modes */
+ /* max_cfg is absolute in 100Mb units */
+ vn_max_rate = (max_cfg * 100);
+ }
+ }
+
+ input->vnic_max_rate[vn] = vn_max_rate;
+}
+
+static void
+bnx2x_cmng_fns_init(struct bnx2x_softc *sc, uint8_t read_cfg, uint8_t cmng_type)
+{
+ struct cmng_init_input input;
+ int vn;
+
+ memset(&input, 0, sizeof(struct cmng_init_input));
+
+ input.port_rate = sc->link_vars.line_speed;
+
+ if (cmng_type == CMNG_FNS_MINMAX) {
+/* read mf conf from shmem */
+ if (read_cfg) {
+ bnx2x_read_mf_cfg(sc);
+ }
+
+/* get VN min rate and enable fairness if not 0 */
+ bnx2x_calc_vn_min(sc, &input);
+
+/* get VN max rate */
+ if (sc->port.pmf) {
+ for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
+ bnx2x_calc_vn_max(sc, vn, &input);
+ }
+ }
+
+/* always enable rate shaping and fairness */
+ input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
+
+ ecore_init_cmng(&input, &sc->cmng);
+ return;
+ }
+}
+
+static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc *sc)
+{
+ if (CHIP_REV_IS_SLOW(sc)) {
+ return CMNG_FNS_NONE;
+ }
+
+ if (IS_MF(sc)) {
+ return CMNG_FNS_MINMAX;
+ }
+
+ return CMNG_FNS_NONE;
+}
+
+static void
+storm_memset_cmng(struct bnx2x_softc *sc, struct cmng_init *cmng, uint8_t port)
+{
+ int vn;
+ int func;
+ uint32_t addr;
+ size_t size;
+
+ addr = (BAR_XSTRORM_INTMEM + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
+ size = sizeof(struct cmng_struct_per_port);
+ ecore_storm_memset_struct(sc, addr, size, (uint32_t *) & cmng->port);
+
+ for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
+ func = func_by_vn(sc, vn);
+
+ addr = (BAR_XSTRORM_INTMEM +
+ XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
+ size = sizeof(struct rate_shaping_vars_per_vn);
+ ecore_storm_memset_struct(sc, addr, size,
+ (uint32_t *) & cmng->
+ vnic.vnic_max_rate[vn]);
+
+ addr = (BAR_XSTRORM_INTMEM +
+ XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
+ size = sizeof(struct fairness_vars_per_vn);
+ ecore_storm_memset_struct(sc, addr, size,
+ (uint32_t *) & cmng->
+ vnic.vnic_min_rate[vn]);
+ }
+}
+
+static void bnx2x_pf_init(struct bnx2x_softc *sc)
+{
+ struct bnx2x_func_init_params func_init;
+ struct event_ring_data eq_data;
+ uint16_t flags;
+
+ memset(&eq_data, 0, sizeof(struct event_ring_data));
+ memset(&func_init, 0, sizeof(struct bnx2x_func_init_params));
+
+ if (!CHIP_IS_E1x(sc)) {
+/* reset IGU PF statistics: MSIX + ATTN */
+/* PF */
+ REG_WR(sc,
+ (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+ (BNX2X_IGU_STAS_MSG_VF_CNT * 4) +
+ ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) *
+ 4)), 0);
+/* ATTN */
+ REG_WR(sc,
+ (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+ (BNX2X_IGU_STAS_MSG_VF_CNT * 4) +
+ (BNX2X_IGU_STAS_MSG_PF_CNT * 4) +
+ ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) *
+ 4)), 0);
+ }
+
+ /* function setup flags */
+ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
+
+ func_init.func_flgs = flags;
+ func_init.pf_id = SC_FUNC(sc);
+ func_init.func_id = SC_FUNC(sc);
+ func_init.spq_map = sc->spq_dma.paddr;
+ func_init.spq_prod = sc->spq_prod_idx;
+
+ bnx2x_func_init(sc, &func_init);
+
+ memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
+
+ /*
+ * Congestion management values depend on the link rate.
+ * There is no active link so initial link rate is set to 10Gbps.
+ * When the link comes up the congestion management values are
+ * re-calculated according to the actual link rate.
+ */
+ sc->link_vars.line_speed = SPEED_10000;
+ bnx2x_cmng_fns_init(sc, TRUE, bnx2x_get_cmng_fns_mode(sc));
+
+ /* Only the PMF sets the HW */
+ if (sc->port.pmf) {
+ storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
+ }
+
+ /* init Event Queue - PCI bus guarantees correct endainity */
+ eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
+ eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
+ eq_data.producer = sc->eq_prod;
+ eq_data.index_id = HC_SP_INDEX_EQ_CONS;
+ eq_data.sb_id = DEF_SB_ID;
+ storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
+}
+
+static void bnx2x_hc_int_enable(struct bnx2x_softc *sc)
+{
+ int port = SC_PORT(sc);
+ uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ uint32_t val = REG_RD(sc, addr);
+ uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX)
+ || (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX);
+ uint8_t single_msix = (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX);
+ uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI);
+
+ if (msix) {
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0);
+ val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ if (single_msix) {
+ val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
+ }
+ } else if (msi) {
+ val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
+ val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else {
+ val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ REG_WR(sc, addr, val);
+
+ val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
+ }
+
+ REG_WR(sc, addr, val);
+
+ /* ensure that HC_CONFIG is written before leading/trailing edge config */
+ mb();
+
+ /* init leading/trailing edge */
+ if (IS_MF(sc)) {
+ val = (0xee0f | (1 << (SC_VN(sc) + 4)));
+ if (sc->port.pmf) {
+ /* enable nig and gpio3 attention */
+ val |= 0x1100;
+ }
+ } else {
+ val = 0xffff;
+ }
+
+ REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port * 8), val);
+ REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port * 8), val);
+
+ /* make sure that interrupts are indeed enabled from here on */
+ mb();
+}
+
+static void bnx2x_igu_int_enable(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+ uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX)
+ || (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX);
+ uint8_t single_msix = (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX);
+ uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI);
+
+ val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
+
+ if (msix) {
+ val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN);
+ val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN);
+ if (single_msix) {
+ val |= IGU_PF_CONF_SINGLE_ISR_EN;
+ }
+ } else if (msi) {
+ val &= ~IGU_PF_CONF_INT_LINE_EN;
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN);
+ } else {
+ val &= ~IGU_PF_CONF_MSI_MSIX_EN;
+ val |= (IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN);
+ }
+
+ /* clean previous status - need to configure igu prior to ack */
+ if ((!msix) || single_msix) {
+ REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
+ bnx2x_ack_int(sc);
+ }
+
+ val |= IGU_PF_CONF_FUNC_EN;
+
+ PMD_DRV_LOG(DEBUG, "write 0x%x to IGU mode %s",
+ val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
+
+ REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
+
+ mb();
+
+ /* init leading/trailing edge */
+ if (IS_MF(sc)) {
+ val = (0xee0f | (1 << (SC_VN(sc) + 4)));
+ if (sc->port.pmf) {
+ /* enable nig and gpio3 attention */
+ val |= 0x1100;
+ }
+ } else {
+ val = 0xffff;
+ }
+
+ REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
+ REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
+
+ /* make sure that interrupts are indeed enabled from here on */
+ mb();
+}
+
+static void bnx2x_int_enable(struct bnx2x_softc *sc)
+{
+ if (sc->devinfo.int_block == INT_BLOCK_HC) {
+ bnx2x_hc_int_enable(sc);
+ } else {
+ bnx2x_igu_int_enable(sc);
+ }
+}
+
+static void bnx2x_hc_int_disable(struct bnx2x_softc *sc)
+{
+ int port = SC_PORT(sc);
+ uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ uint32_t val = REG_RD(sc, addr);
+
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ /* flush all outstanding writes */
+ mb();
+
+ REG_WR(sc, addr, val);
+ if (REG_RD(sc, addr) != val) {
+ PMD_DRV_LOG(ERR, "proper val not read from HC IGU!");
+ }
+}
+
+static void bnx2x_igu_int_disable(struct bnx2x_softc *sc)
+{
+ uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
+
+ val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN);
+
+ PMD_DRV_LOG(DEBUG, "write %x to IGU", val);
+
+ /* flush all outstanding writes */
+ mb();
+
+ REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
+ if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
+ PMD_DRV_LOG(ERR, "proper val not read from IGU!");
+ }
+}
+
+static void bnx2x_int_disable(struct bnx2x_softc *sc)
+{
+ if (sc->devinfo.int_block == INT_BLOCK_HC) {
+ bnx2x_hc_int_disable(sc);
+ } else {
+ bnx2x_igu_int_disable(sc);
+ }
+}
+
+static void bnx2x_nic_init(struct bnx2x_softc *sc, int load_code)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < sc->num_queues; i++) {
+ bnx2x_init_eth_fp(sc, i);
+ }
+
+ rmb(); /* ensure status block indices were read */
+
+ bnx2x_init_rx_rings(sc);
+ bnx2x_init_tx_rings(sc);
+
+ if (IS_VF(sc)) {
+ bnx2x_memset_stats(sc);
+ return;
+ }
+
+ /* initialize MOD_ABS interrupts */
+ elink_init_mod_abs_int(sc, &sc->link_vars,
+ sc->devinfo.chip_id,
+ sc->devinfo.shmem_base,
+ sc->devinfo.shmem2_base, SC_PORT(sc));
+
+ bnx2x_init_def_sb(sc);
+ bnx2x_update_dsb_idx(sc);
+ bnx2x_init_sp_ring(sc);
+ bnx2x_init_eq_ring(sc);
+ bnx2x_init_internal(sc, load_code);
+ bnx2x_pf_init(sc);
+ bnx2x_stats_init(sc);
+
+ /* flush all before enabling interrupts */
+ mb();
+
+ bnx2x_int_enable(sc);
+
+ /* check for SPIO5 */
+ bnx2x_attn_int_deasserted0(sc,
+ REG_RD(sc,
+ (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+ SC_PORT(sc) * 4)) &
+ AEU_INPUTS_ATTN_BITS_SPIO5);
+}
+
+static void bnx2x_init_objs(struct bnx2x_softc *sc)
+{
+ /* mcast rules must be added to tx if tx switching is enabled */
+ ecore_obj_type o_type;
+ if (sc->flags & BNX2X_TX_SWITCHING)
+ o_type = ECORE_OBJ_TYPE_RX_TX;
+ else
+ o_type = ECORE_OBJ_TYPE_RX;
+
+ /* RX_MODE controlling object */
+ ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
+
+ /* multicast configuration controlling object */
+ ecore_init_mcast_obj(sc,
+ &sc->mcast_obj,
+ sc->fp[0].cl_id,
+ sc->fp[0].index,
+ SC_FUNC(sc),
+ SC_FUNC(sc),
+ BNX2X_SP(sc, mcast_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, mcast_rdata),
+ ECORE_FILTER_MCAST_PENDING,
+ &sc->sp_state, o_type);
+
+ /* Setup CAM credit pools */
+ ecore_init_mac_credit_pool(sc,
+ &sc->macs_pool,
+ SC_FUNC(sc),
+ CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
+ VNICS_PER_PATH(sc));
+
+ ecore_init_vlan_credit_pool(sc,
+ &sc->vlans_pool,
+ SC_ABS_FUNC(sc) >> 1,
+ CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
+ VNICS_PER_PATH(sc));
+
+ /* RSS configuration object */
+ ecore_init_rss_config_obj(&sc->rss_conf_obj,
+ sc->fp[0].cl_id,
+ sc->fp[0].index,
+ SC_FUNC(sc),
+ SC_FUNC(sc),
+ BNX2X_SP(sc, rss_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, rss_rdata),
+ ECORE_FILTER_RSS_CONF_PENDING,
+ &sc->sp_state, ECORE_OBJ_TYPE_RX);
+}
+
+/*
+ * Initialize the function. This must be called before sending CLIENT_SETUP
+ * for the first client.
+ */
+static int bnx2x_func_start(struct bnx2x_softc *sc)
+{
+ struct ecore_func_state_params func_params = { NULL };
+ struct ecore_func_start_params *start_params =
+ &func_params.params.start;
+
+ /* Prepare parameters for function state transitions */
+ bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &sc->func_obj;
+ func_params.cmd = ECORE_F_CMD_START;
+
+ /* Function parameters */
+ start_params->mf_mode = sc->devinfo.mf_info.mf_mode;
+ start_params->sd_vlan_tag = OVLAN(sc);
+
+ if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
+ start_params->network_cos_mode = STATIC_COS;
+ } else { /* CHIP_IS_E1X */
+ start_params->network_cos_mode = FW_WRR;
+ }
+
+ start_params->gre_tunnel_mode = 0;
+ start_params->gre_tunnel_rss = 0;
+
+ return ecore_func_state_change(sc, &func_params);
+}
+
+static int bnx2x_set_power_state(struct bnx2x_softc *sc, uint8_t state)
+{
+ uint16_t pmcsr;
+
+ /* If there is no power capability, silently succeed */
+ if (!(sc->devinfo.pcie_cap_flags & BNX2X_PM_CAPABLE_FLAG)) {
+ PMD_DRV_LOG(WARNING, "No power capability");
+ return 0;
+ }
+
+ pci_read(sc, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), &pmcsr,
+ 2);
+
+ switch (state) {
+ case PCI_PM_D0:
+ pci_write_word(sc,
+ (sc->devinfo.pcie_pm_cap_reg +
+ PCIR_POWER_STATUS),
+ ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME));
+
+ if (pmcsr & PCIM_PSTAT_DMASK) {
+ /* delay required during transition out of D3hot */
+ DELAY(20000);
+ }
+
+ break;
+
+ case PCI_PM_D3hot:
+ /* don't shut down the power for emulation and FPGA */
+ if (CHIP_REV_IS_SLOW(sc)) {
+ return 0;
+ }
+
+ pmcsr &= ~PCIM_PSTAT_DMASK;
+ pmcsr |= PCIM_PSTAT_D3;
+
+ if (sc->wol) {
+ pmcsr |= PCIM_PSTAT_PMEENABLE;
+ }
+
+ pci_write_long(sc,
+ (sc->devinfo.pcie_pm_cap_reg +
+ PCIR_POWER_STATUS), pmcsr);
+
+ /*
+ * No more memory access after this point until device is brought back
+ * to D0 state.
+ */
+ break;
+
+ default:
+ PMD_DRV_LOG(NOTICE, "Can't support PCI power state = %d",
+ state);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* return true if succeeded to acquire the lock */
+static uint8_t bnx2x_trylock_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
+{
+ uint32_t lock_status;
+ uint32_t resource_bit = (1 << resource);
+ int func = SC_FUNC(sc);
+ uint32_t hw_lock_control_reg;
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ PMD_DRV_LOG(INFO,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return FALSE;
+ }
+
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func * 8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6) * 8);
+ }
+
+ /* try to acquire the lock */
+ REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(sc, hw_lock_control_reg);
+ if (lock_status & resource_bit) {
+ return TRUE;
+ }
+
+ PMD_DRV_LOG(NOTICE, "Failed to get a resource lock 0x%x", resource);
+
+ return FALSE;
+}
+
+/*
+ * Get the recovery leader resource id according to the engine this function
+ * belongs to. Currently only only 2 engines is supported.
+ */
+static int bnx2x_get_leader_lock_resource(struct bnx2x_softc *sc)
+{
+ if (SC_PATH(sc)) {
+ return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
+ } else {
+ return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
+ }
+}
+
+/* try to acquire a leader lock for current engine */
+static uint8_t bnx2x_trylock_leader_lock(struct bnx2x_softc *sc)
+{
+ return bnx2x_trylock_hw_lock(sc, bnx2x_get_leader_lock_resource(sc));
+}
+
+static int bnx2x_release_leader_lock(struct bnx2x_softc *sc)
+{
+ return bnx2x_release_hw_lock(sc, bnx2x_get_leader_lock_resource(sc));
+}
+
+/* close gates #2, #3 and #4 */
+static void bnx2x_set_234_gates(struct bnx2x_softc *sc, uint8_t close)
+{
+ uint32_t val;
+
+ /* gates #2 and #4a are closed/opened */
+ /* #4 */
+ REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, ! !close);
+ /* #2 */
+ REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, ! !close);
+
+ /* #3 */
+ if (CHIP_IS_E1x(sc)) {
+/* prevent interrupts from HC on both ports */
+ val = REG_RD(sc, HC_REG_CONFIG_1);
+ if (close)
+ REG_WR(sc, HC_REG_CONFIG_1, (val & ~(uint32_t)
+ HC_CONFIG_1_REG_BLOCK_DISABLE_1));
+ else
+ REG_WR(sc, HC_REG_CONFIG_1,
+ (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1));
+
+ val = REG_RD(sc, HC_REG_CONFIG_0);
+ if (close)
+ REG_WR(sc, HC_REG_CONFIG_0, (val & ~(uint32_t)
+ HC_CONFIG_0_REG_BLOCK_DISABLE_0));
+ else
+ REG_WR(sc, HC_REG_CONFIG_0,
+ (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0));
+
+ } else {
+/* Prevent incoming interrupts in IGU */
+ val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
+
+ if (close)
+ REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
+ (val & ~(uint32_t)
+ IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
+ else
+ REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
+ (val |
+ IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
+ }
+
+ wmb();
+}
+
+/* poll for pending writes bit, it should get cleared in no more than 1s */
+static int bnx2x_er_poll_igu_vq(struct bnx2x_softc *sc)
+{
+ uint32_t cnt = 1000;
+ uint32_t pend_bits = 0;
+
+ do {
+ pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
+
+ if (pend_bits == 0) {
+ break;
+ }
+
+ DELAY(1000);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ PMD_DRV_LOG(NOTICE, "Still pending IGU requests bits=0x%08x!",
+ pend_bits);
+ return -1;
+ }
+
+ return 0;
+}
+
+#define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */
+
+static void bnx2x_clp_reset_prep(struct bnx2x_softc *sc, uint32_t * magic_val)
+{
+ /* Do some magic... */
+ uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
+ *magic_val = val & SHARED_MF_CLP_MAGIC;
+ MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
+}
+
+/* restore the value of the 'magic' bit */
+static void bnx2x_clp_reset_done(struct bnx2x_softc *sc, uint32_t magic_val)
+{
+ /* Restore the 'magic' bit value... */
+ uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
+ MFCFG_WR(sc, shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
+}
+
+/* prepare for MCP reset, takes care of CLP configurations */
+static void bnx2x_reset_mcp_prep(struct bnx2x_softc *sc, uint32_t * magic_val)
+{
+ uint32_t shmem;
+ uint32_t validity_offset;
+
+ /* set `magic' bit in order to save MF config */
+ bnx2x_clp_reset_prep(sc, magic_val);
+
+ /* get shmem offset */
+ shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
+ validity_offset =
+ offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
+
+ /* Clear validity map flags */
+ if (shmem > 0) {
+ REG_WR(sc, shmem + validity_offset, 0);
+ }
+}
+
+#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT 100 /* 100 ms */
+
+static void bnx2x_mcp_wait_one(struct bnx2x_softc *sc)
+{
+ /* special handling for emulation and FPGA (10 times longer) */
+ if (CHIP_REV_IS_SLOW(sc)) {
+ DELAY((MCP_ONE_TIMEOUT * 10) * 1000);
+ } else {
+ DELAY((MCP_ONE_TIMEOUT) * 1000);
+ }
+}
+
+/* initialize shmem_base and waits for validity signature to appear */
+static int bnx2x_init_shmem(struct bnx2x_softc *sc)
+{
+ int cnt = 0;
+ uint32_t val = 0;
+
+ do {
+ sc->devinfo.shmem_base =
+ sc->link_params.shmem_base =
+ REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
+
+ if (sc->devinfo.shmem_base) {
+ val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
+ if (val & SHR_MEM_VALIDITY_MB)
+ return 0;
+ }
+
+ bnx2x_mcp_wait_one(sc);
+
+ } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
+
+ PMD_DRV_LOG(NOTICE, "BAD MCP validity signature");
+
+ return -1;
+}
+
+static int bnx2x_reset_mcp_comp(struct bnx2x_softc *sc, uint32_t magic_val)
+{
+ int rc = bnx2x_init_shmem(sc);
+
+ /* Restore the `magic' bit value */
+ bnx2x_clp_reset_done(sc, magic_val);
+
+ return rc;
+}
+
+static void bnx2x_pxp_prep(struct bnx2x_softc *sc)
+{
+ REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
+ REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
+ wmb();
+}
+
+/*
+ * Reset the whole chip except for:
+ * - PCIE core
+ * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
+ * - IGU
+ * - MISC (including AEU)
+ * - GRC
+ * - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x_softc *sc, uint8_t global)
+{
+ uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+ uint32_t global_bits2, stay_reset2;
+
+ /*
+ * Bits that have to be set in reset_mask2 if we want to reset 'global'
+ * (per chip) blocks.
+ */
+ global_bits2 =
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
+
+ /*
+ * Don't reset the following blocks.
+ * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
+ * reset, as in 4 port device they might still be owned
+ * by the MCP (there is only one leader per path).
+ */
+ not_reset_mask1 =
+ MISC_REGISTERS_RESET_REG_1_RST_HC |
+ MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+ MISC_REGISTERS_RESET_REG_1_RST_PXP;
+
+ not_reset_mask2 =
+ MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+ MISC_REGISTERS_RESET_REG_2_RST_GRC |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
+ MISC_REGISTERS_RESET_REG_2_RST_ATC |
+ MISC_REGISTERS_RESET_REG_2_PGLC |
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
+ MISC_REGISTERS_RESET_REG_2_UMAC0 | MISC_REGISTERS_RESET_REG_2_UMAC1;
+
+ /*
+ * Keep the following blocks in reset:
+ * - all xxMACs are handled by the elink code.
+ */
+ stay_reset2 =
+ MISC_REGISTERS_RESET_REG_2_XMAC |
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
+
+ /* Full reset masks according to the chip */
+ reset_mask1 = 0xffffffff;
+
+ if (CHIP_IS_E1H(sc))
+ reset_mask2 = 0x1ffff;
+ else if (CHIP_IS_E2(sc))
+ reset_mask2 = 0xfffff;
+ else /* CHIP_IS_E3 */
+ reset_mask2 = 0x3ffffff;
+
+ /* Don't reset global blocks unless we need to */
+ if (!global)
+ reset_mask2 &= ~global_bits2;
+
+ /*
+ * In case of attention in the QM, we need to reset PXP
+ * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
+ * because otherwise QM reset would release 'close the gates' shortly
+ * before resetting the PXP, then the PSWRQ would send a write
+ * request to PGLUE. Then when PXP is reset, PGLUE would try to
+ * read the payload data from PSWWR, but PSWWR would not
+ * respond. The write queue in PGLUE would stuck, dmae commands
+ * would not return. Therefore it's important to reset the second
+ * reset register (containing the
+ * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
+ * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
+ * bit).
+ */
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ reset_mask2 & (~not_reset_mask2));
+
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ reset_mask1 & (~not_reset_mask1));
+
+ mb();
+ wmb();
+
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ reset_mask2 & (~stay_reset2));
+
+ mb();
+ wmb();
+
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+ wmb();
+}
+
+static int bnx2x_process_kill(struct bnx2x_softc *sc, uint8_t global)
+{
+ int cnt = 1000;
+ uint32_t val = 0;
+ uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+ uint32_t tags_63_32 = 0;
+
+ /* Empty the Tetris buffer, wait for 1s */
+ do {
+ sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT);
+ blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
+ port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
+ port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
+ pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
+ if (CHIP_IS_E3(sc)) {
+ tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
+ }
+
+ if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+ ((port_is_idle_0 & 0x1) == 0x1) &&
+ ((port_is_idle_1 & 0x1) == 0x1) &&
+ (pgl_exp_rom2 == 0xffffffff) &&
+ (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
+ break;
+ DELAY(1000);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ PMD_DRV_LOG(NOTICE,
+ "ERROR: Tetris buffer didn't get empty or there "
+ "are still outstanding read requests after 1s! "
+ "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
+ "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x",
+ sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+ pgl_exp_rom2);
+ return -1;
+ }
+
+ mb();
+
+ /* Close gates #2, #3 and #4 */
+ bnx2x_set_234_gates(sc, TRUE);
+
+ /* Poll for IGU VQs for 57712 and newer chips */
+ if (!CHIP_IS_E1x(sc) && bnx2x_er_poll_igu_vq(sc)) {
+ return -1;
+ }
+
+ /* clear "unprepared" bit */
+ REG_WR(sc, MISC_REG_UNPREPARED, 0);
+ mb();
+
+ /* Make sure all is written to the chip before the reset */
+ wmb();
+
+ /*
+ * Wait for 1ms to empty GLUE and PCI-E core queues,
+ * PSWHST, GRC and PSWRD Tetris buffer.
+ */
+ DELAY(1000);
+
+ /* Prepare to chip reset: */
+ /* MCP */
+ if (global) {
+ bnx2x_reset_mcp_prep(sc, &val);
+ }
+
+ /* PXP */
+ bnx2x_pxp_prep(sc);
+ mb();
+
+ /* reset the chip */
+ bnx2x_process_kill_chip_reset(sc, global);
+ mb();
+
+ /* Recover after reset: */
+ /* MCP */
+ if (global && bnx2x_reset_mcp_comp(sc, val)) {
+ return -1;
+ }
+
+ /* Open the gates #2, #3 and #4 */
+ bnx2x_set_234_gates(sc, FALSE);
+
+ return 0;
+}
+
+static int bnx2x_leader_reset(struct bnx2x_softc *sc)
+{
+ int rc = 0;
+ uint8_t global = bnx2x_reset_is_global(sc);
+ uint32_t load_code;
+
+ /*
+ * If not going to reset MCP, load "fake" driver to reset HW while
+ * driver is owner of the HW.
+ */
+ if (!global && !BNX2X_NOMCP(sc)) {
+ load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
+ DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
+ if (!load_code) {
+ PMD_DRV_LOG(NOTICE, "MCP response failure, aborting");
+ rc = -1;
+ goto exit_leader_reset;
+ }
+
+ if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
+ (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
+ PMD_DRV_LOG(NOTICE,
+ "MCP unexpected response, aborting");
+ rc = -1;
+ goto exit_leader_reset2;
+ }
+
+ load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
+ if (!load_code) {
+ PMD_DRV_LOG(NOTICE, "MCP response failure, aborting");
+ rc = -1;
+ goto exit_leader_reset2;
+ }
+ }
+
+ /* try to recover after the failure */
+ if (bnx2x_process_kill(sc, global)) {
+ PMD_DRV_LOG(NOTICE, "Something bad occurred on engine %d!",
+ SC_PATH(sc));
+ rc = -1;
+ goto exit_leader_reset2;
+ }
+
+ /*
+ * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
+ * state.
+ */
+ bnx2x_set_reset_done(sc);
+ if (global) {
+ bnx2x_clear_reset_global(sc);
+ }
+
+exit_leader_reset2:
+
+ /* unload "fake driver" if it was loaded */
+ if (!global &&!BNX2X_NOMCP(sc)) {
+ bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+ bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ }
+
+exit_leader_reset:
+
+ sc->is_leader = 0;
+ bnx2x_release_leader_lock(sc);
+
+ mb();
+ return rc;
+}
+
+/*
+ * prepare INIT transition, parameters configured:
+ * - HC configuration
+ * - Queue's CDU context
+ */
+static void
+bnx2x_pf_q_prep_init(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ struct ecore_queue_init_params *init_params)
+{
+ uint8_t cos;
+ int cxt_index, cxt_offset;
+
+ bnx2x_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
+ bnx2x_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
+
+ bnx2x_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
+ bnx2x_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
+
+ /* HC rate */
+ init_params->rx.hc_rate =
+ sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
+ init_params->tx.hc_rate =
+ sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
+
+ /* FW SB ID */
+ init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
+
+ /* CQ index among the SB indices */
+ init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+ init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
+
+ /* set maximum number of COSs supported by this queue */
+ init_params->max_cos = sc->max_cos;
+
+ /* set the context pointers queue object */
+ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
+ cxt_index = fp->index / ILT_PAGE_CIDS;
+ cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
+ init_params->cxts[cos] =
+ &sc->context[cxt_index].vcxt[cxt_offset].eth;
+ }
+}
+
+/* set flags that are common for the Tx-only and not normal connections */
+static unsigned long
+bnx2x_get_common_flags(struct bnx2x_softc *sc, uint8_t zero_stats)
+{
+ unsigned long flags = 0;
+
+ /* PF driver will always initialize the Queue to an ACTIVE state */
+ bnx2x_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
+
+ /*
+ * tx only connections collect statistics (on the same index as the
+ * parent connection). The statistics are zeroed when the parent
+ * connection is initialized.
+ */
+
+ bnx2x_set_bit(ECORE_Q_FLG_STATS, &flags);
+ if (zero_stats) {
+ bnx2x_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
+ }
+
+ /*
+ * tx only connections can support tx-switching, though their
+ * CoS-ness doesn't survive the loopback
+ */
+ if (sc->flags & BNX2X_TX_SWITCHING) {
+ bnx2x_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
+ }
+
+ bnx2x_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
+
+ return flags;
+}
+
+static unsigned long bnx2x_get_q_flags(struct bnx2x_softc *sc, uint8_t leading)
+{
+ unsigned long flags = 0;
+
+ if (IS_MF_SD(sc)) {
+ bnx2x_set_bit(ECORE_Q_FLG_OV, &flags);
+ }
+
+ if (leading) {
+ bnx2x_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
+ bnx2x_set_bit(ECORE_Q_FLG_MCAST, &flags);
+ }
+
+ bnx2x_set_bit(ECORE_Q_FLG_VLAN, &flags);
+
+ /* merge with common flags */
+ return flags | bnx2x_get_common_flags(sc, TRUE);
+}
+
+static void
+bnx2x_pf_q_prep_general(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ struct ecore_general_setup_params *gen_init, uint8_t cos)
+{
+ gen_init->stat_id = bnx2x_stats_id(fp);
+ gen_init->spcl_id = fp->cl_id;
+ gen_init->mtu = sc->mtu;
+ gen_init->cos = cos;
+}
+
+static void
+bnx2x_pf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ struct rxq_pause_params *pause,
+ struct ecore_rxq_setup_params *rxq_init)
+{
+ struct bnx2x_rx_queue *rxq;
+
+ rxq = sc->rx_queues[fp->index];
+ if (!rxq) {
+ PMD_RX_LOG(ERR, "RX queue is NULL");
+ return;
+ }
+ /* pause */
+ pause->bd_th_lo = BD_TH_LO(sc);
+ pause->bd_th_hi = BD_TH_HI(sc);
+
+ pause->rcq_th_lo = RCQ_TH_LO(sc);
+ pause->rcq_th_hi = RCQ_TH_HI(sc);
+
+ /* validate rings have enough entries to cross high thresholds */
+ if (sc->dropless_fc &&
+ pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) {
+ PMD_DRV_LOG(WARNING, "rx bd ring threshold limit");
+ }
+
+ if (sc->dropless_fc &&
+ pause->rcq_th_hi + FW_PREFETCH_CNT > USABLE_RCQ_ENTRIES(rxq)) {
+ PMD_DRV_LOG(WARNING, "rcq ring threshold limit");
+ }
+
+ pause->pri_map = 1;
+
+ /* rxq setup */
+ rxq_init->dscr_map = (rte_iova_t)rxq->rx_ring_phys_addr;
+ rxq_init->rcq_map = (rte_iova_t)rxq->cq_ring_phys_addr;
+ rxq_init->rcq_np_map = (rte_iova_t)(rxq->cq_ring_phys_addr +
+ BNX2X_PAGE_SIZE);
+
+ /*
+ * This should be a maximum number of data bytes that may be
+ * placed on the BD (not including paddings).
+ */
+ rxq_init->buf_sz = (fp->rx_buf_size - IP_HEADER_ALIGNMENT_PADDING);
+
+ rxq_init->cl_qzone_id = fp->cl_qzone_id;
+ rxq_init->rss_engine_id = SC_FUNC(sc);
+ rxq_init->mcast_engine_id = SC_FUNC(sc);
+
+ rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ rxq_init->fw_sb_id = fp->fw_sb_id;
+
+ rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+
+ /*
+ * configure silent vlan removal
+ * if multi function mode is afex, then mask default vlan
+ */
+ if (IS_MF_AFEX(sc)) {
+ rxq_init->silent_removal_value =
+ sc->devinfo.mf_info.afex_def_vlan_tag;
+ rxq_init->silent_removal_mask = EVL_VLID_MASK;
+ }
+}
+
+static void
+bnx2x_pf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ struct ecore_txq_setup_params *txq_init, uint8_t cos)
+{
+ struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index];
+
+ if (!txq) {
+ PMD_TX_LOG(ERR, "ERROR: TX queue is NULL");
+ return;
+ }
+ txq_init->dscr_map = (rte_iova_t)txq->tx_ring_phys_addr;
+ txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
+ txq_init->fw_sb_id = fp->fw_sb_id;
+
+ /*
+ * set the TSS leading client id for TX classfication to the
+ * leading RSS client id
+ */
+ txq_init->tss_leading_cl_id = BNX2X_FP(sc, 0, cl_id);
+}
+
+/*
+ * This function performs 2 steps in a queue state machine:
+ * 1) RESET->INIT
+ * 2) INIT->SETUP
+ */
+static int
+bnx2x_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint8_t leading)
+{
+ struct ecore_queue_state_params q_params = { NULL };
+ struct ecore_queue_setup_params *setup_params = &q_params.params.setup;
+ int rc;
+
+ PMD_DRV_LOG(DEBUG, "setting up queue %d", fp->index);
+
+ bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
+
+ q_params.q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj;
+
+ /* we want to wait for completion in this context */
+ bnx2x_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+ /* prepare the INIT parameters */
+ bnx2x_pf_q_prep_init(sc, fp, &q_params.params.init);
+
+ /* Set the command */
+ q_params.cmd = ECORE_Q_CMD_INIT;
+
+ /* Change the state to INIT */
+ rc = ecore_queue_state_change(sc, &q_params);
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "Queue(%d) INIT failed", fp->index);
+ return rc;
+ }
+
+ PMD_DRV_LOG(DEBUG, "init complete");
+
+ /* now move the Queue to the SETUP state */
+ memset(setup_params, 0, sizeof(*setup_params));
+
+ /* set Queue flags */
+ setup_params->flags = bnx2x_get_q_flags(sc, leading);
+
+ /* set general SETUP parameters */
+ bnx2x_pf_q_prep_general(sc, fp, &setup_params->gen_params,
+ FIRST_TX_COS_INDEX);
+
+ bnx2x_pf_rx_q_prep(sc, fp,
+ &setup_params->pause_params,
+ &setup_params->rxq_params);
+
+ bnx2x_pf_tx_q_prep(sc, fp, &setup_params->txq_params, FIRST_TX_COS_INDEX);
+
+ /* Set the command */
+ q_params.cmd = ECORE_Q_CMD_SETUP;
+
+ /* change the state to SETUP */
+ rc = ecore_queue_state_change(sc, &q_params);
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "Queue(%d) SETUP failed", fp->index);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int bnx2x_setup_leading(struct bnx2x_softc *sc)
+{
+ if (IS_PF(sc))
+ return bnx2x_setup_queue(sc, &sc->fp[0], TRUE);
+ else /* VF */
+ return bnx2x_vf_setup_queue(sc, &sc->fp[0], TRUE);
+}
+
+static int
+bnx2x_config_rss_pf(struct bnx2x_softc *sc, struct ecore_rss_config_obj *rss_obj,
+ uint8_t config_hash)
+{
+ struct ecore_config_rss_params params = { NULL };
+ uint32_t i;
+
+ /*
+ * Although RSS is meaningless when there is a single HW queue we
+ * still need it enabled in order to have HW Rx hash generated.
+ */
+
+ params.rss_obj = rss_obj;
+
+ bnx2x_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+ bnx2x_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
+
+ /* RSS configuration */
+ bnx2x_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
+ bnx2x_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
+ bnx2x_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
+ bnx2x_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
+ if (rss_obj->udp_rss_v4) {
+ bnx2x_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
+ }
+ if (rss_obj->udp_rss_v6) {
+ bnx2x_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
+ }
+
+ /* Hash bits */
+ params.rss_result_mask = MULTI_MASK;
+
+ rte_memcpy(params.ind_table, rss_obj->ind_table,
+ sizeof(params.ind_table));
+
+ if (config_hash) {
+/* RSS keys */
+ for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
+ params.rss_key[i] = (uint32_t) rte_rand();
+ }
+
+ bnx2x_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
+ }
+
+ if (IS_PF(sc))
+ return ecore_config_rss(sc, &params);
+ else
+ return bnx2x_vf_config_rss(sc, &params);
+}
+
+static int bnx2x_config_rss_eth(struct bnx2x_softc *sc, uint8_t config_hash)
+{
+ return bnx2x_config_rss_pf(sc, &sc->rss_conf_obj, config_hash);
+}
+
+static int bnx2x_init_rss_pf(struct bnx2x_softc *sc)
+{
+ uint8_t num_eth_queues = BNX2X_NUM_ETH_QUEUES(sc);
+ uint32_t i;
+
+ /*
+ * Prepare the initial contents of the indirection table if
+ * RSS is enabled
+ */
+ for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
+ sc->rss_conf_obj.ind_table[i] =
+ (sc->fp->cl_id + (i % num_eth_queues));
+ }
+
+ if (sc->udp_rss) {
+ sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
+ }
+
+ /*
+ * For 57711 SEARCHER configuration (rss_keys) is
+ * per-port, so if explicit configuration is needed, do it only
+ * for a PMF.
+ *
+ * For 57712 and newer it's a per-function configuration.
+ */
+ return bnx2x_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc));
+}
+
+static int
+bnx2x_set_mac_one(struct bnx2x_softc *sc, uint8_t * mac,
+ struct ecore_vlan_mac_obj *obj, uint8_t set, int mac_type,
+ unsigned long *ramrod_flags)
+{
+ struct ecore_vlan_mac_ramrod_params ramrod_param;
+ int rc;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* fill in general parameters */
+ ramrod_param.vlan_mac_obj = obj;
+ ramrod_param.ramrod_flags = *ramrod_flags;
+
+ /* fill a user request section if needed */
+ if (!bnx2x_test_bit(RAMROD_CONT, ramrod_flags)) {
+ rte_memcpy(ramrod_param.user_req.u.mac.mac, mac,
+ ETH_ALEN);
+
+ bnx2x_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
+
+/* Set the command: ADD or DEL */
+ ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
+ ECORE_VLAN_MAC_DEL;
+ }
+
+ rc = ecore_config_vlan_mac(sc, &ramrod_param);
+
+ if (rc == ECORE_EXISTS) {
+ PMD_DRV_LOG(INFO, "Failed to schedule ADD operations (EEXIST)");
+/* do not treat adding same MAC as error */
+ rc = 0;
+ } else if (rc < 0) {
+ PMD_DRV_LOG(ERR,
+ "%s MAC failed (%d)", (set ? "Set" : "Delete"), rc);
+ }
+
+ return rc;
+}
+
+static int bnx2x_set_eth_mac(struct bnx2x_softc *sc, uint8_t set)
+{
+ unsigned long ramrod_flags = 0;
+
+ PMD_DRV_LOG(DEBUG, "Adding Ethernet MAC");
+
+ bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+
+ /* Eth MAC is set on RSS leading client (fp[0]) */
+ return bnx2x_set_mac_one(sc, sc->link_params.mac_addr,
+ &sc->sp_objs->mac_obj,
+ set, ECORE_ETH_MAC, &ramrod_flags);
+}
+
+static int bnx2x_get_cur_phy_idx(struct bnx2x_softc *sc)
+{
+ uint32_t sel_phy_idx = 0;
+
+ if (sc->link_params.num_phys <= 1) {
+ return ELINK_INT_PHY;
+ }
+
+ if (sc->link_vars.link_up) {
+ sel_phy_idx = ELINK_EXT_PHY1;
+/* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
+ if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
+ (sc->link_params.phy[ELINK_EXT_PHY2].supported &
+ ELINK_SUPPORTED_FIBRE))
+ sel_phy_idx = ELINK_EXT_PHY2;
+ } else {
+ switch (elink_phy_selection(&sc->link_params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ sel_phy_idx = ELINK_EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ sel_phy_idx = ELINK_EXT_PHY2;
+ break;
+ }
+ }
+
+ return sel_phy_idx;
+}
+
+static int bnx2x_get_link_cfg_idx(struct bnx2x_softc *sc)
+{
+ uint32_t sel_phy_idx = bnx2x_get_cur_phy_idx(sc);
+
+ /*
+ * The selected activated PHY is always after swapping (in case PHY
+ * swapping is enabled). So when swapping is enabled, we need to reverse
+ * the configuration
+ */
+
+ if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ if (sel_phy_idx == ELINK_EXT_PHY1)
+ sel_phy_idx = ELINK_EXT_PHY2;
+ else if (sel_phy_idx == ELINK_EXT_PHY2)
+ sel_phy_idx = ELINK_EXT_PHY1;
+ }
+
+ return ELINK_LINK_CONFIG_IDX(sel_phy_idx);
+}
+
+static void bnx2x_set_requested_fc(struct bnx2x_softc *sc)
+{
+ /*
+ * Initialize link parameters structure variables
+ * It is recommended to turn off RX FC for jumbo frames
+ * for better performance
+ */
+ if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
+ sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
+ } else {
+ sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
+ }
+}
+
+static void bnx2x_calc_fc_adv(struct bnx2x_softc *sc)
+{
+ uint8_t cfg_idx = bnx2x_get_link_cfg_idx(sc);
+ switch (sc->link_vars.ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
+ default:
+ sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ break;
+
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
+ sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ break;
+
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
+ sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
+ break;
+ }
+}
+
+static uint16_t bnx2x_get_mf_speed(struct bnx2x_softc *sc)
+{
+ uint16_t line_speed = sc->link_vars.line_speed;
+ if (IS_MF(sc)) {
+ uint16_t maxCfg = bnx2x_extract_max_cfg(sc,
+ sc->devinfo.
+ mf_info.mf_config[SC_VN
+ (sc)]);
+
+/* calculate the current MAX line speed limit for the MF devices */
+ if (IS_MF_SI(sc)) {
+ line_speed = (line_speed * maxCfg) / 100;
+ } else { /* SD mode */
+ uint16_t vn_max_rate = maxCfg * 100;
+
+ if (vn_max_rate < line_speed) {
+ line_speed = vn_max_rate;
+ }
+ }
+ }
+
+ return line_speed;
+}
+
+static void
+bnx2x_fill_report_data(struct bnx2x_softc *sc, struct bnx2x_link_report_data *data)
+{
+ uint16_t line_speed = bnx2x_get_mf_speed(sc);
+
+ memset(data, 0, sizeof(*data));
+
+ /* fill the report data with the effective line speed */
+ data->line_speed = line_speed;
+
+ /* Link is down */
+ if (!sc->link_vars.link_up || (sc->flags & BNX2X_MF_FUNC_DIS)) {
+ bnx2x_set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &data->link_report_flags);
+ }
+
+ /* Full DUPLEX */
+ if (sc->link_vars.duplex == DUPLEX_FULL) {
+ bnx2x_set_bit(BNX2X_LINK_REPORT_FULL_DUPLEX,
+ &data->link_report_flags);
+ }
+
+ /* Rx Flow Control is ON */
+ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
+ bnx2x_set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
+ }
+
+ /* Tx Flow Control is ON */
+ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
+ bnx2x_set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
+ }
+}
+
+/* report link status to OS, should be called under phy_lock */
+static void bnx2x_link_report(struct bnx2x_softc *sc)
+{
+ struct bnx2x_link_report_data cur_data;
+
+ /* reread mf_cfg */
+ if (IS_PF(sc)) {
+ bnx2x_read_mf_cfg(sc);
+ }
+
+ /* Read the current link report info */
+ bnx2x_fill_report_data(sc, &cur_data);
+
+ /* Don't report link down or exactly the same link status twice */
+ if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
+ (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &sc->last_reported_link.link_report_flags) &&
+ bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &cur_data.link_report_flags))) {
+ return;
+ }
+
+ sc->link_cnt++;
+
+ /* report new link params and remember the state for the next time */
+ rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
+
+ if (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &cur_data.link_report_flags)) {
+ PMD_DRV_LOG(INFO, "NIC Link is Down");
+ } else {
+ __rte_unused const char *duplex;
+ __rte_unused const char *flow;
+
+ if (bnx2x_test_and_clear_bit(BNX2X_LINK_REPORT_FULL_DUPLEX,
+ &cur_data.link_report_flags)) {
+ duplex = "full";
+ } else {
+ duplex = "half";
+ }
+
+/*
+ * Handle the FC at the end so that only these flags would be
+ * possibly set. This way we may easily check if there is no FC
+ * enabled.
+ */
+ if (cur_data.link_report_flags) {
+ if (bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+ &cur_data.link_report_flags) &&
+ bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+ &cur_data.link_report_flags)) {
+ flow = "ON - receive & transmit";
+ } else if (bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+ &cur_data.link_report_flags) &&
+ !bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+ &cur_data.link_report_flags)) {
+ flow = "ON - receive";
+ } else if (!bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+ &cur_data.link_report_flags) &&
+ bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+ &cur_data.link_report_flags)) {
+ flow = "ON - transmit";
+ } else {
+ flow = "none"; /* possible? */
+ }
+ } else {
+ flow = "none";
+ }
+
+ PMD_DRV_LOG(INFO,
+ "NIC Link is Up, %d Mbps %s duplex, Flow control: %s",
+ cur_data.line_speed, duplex, flow);
+ }
+}
+
+void bnx2x_link_status_update(struct bnx2x_softc *sc)
+{
+ if (sc->state != BNX2X_STATE_OPEN) {
+ return;
+ }
+
+ if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
+ elink_link_status_update(&sc->link_params, &sc->link_vars);
+ } else {
+ sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_2500baseX_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause);
+ sc->port.advertising[0] = sc->port.supported[0];
+
+ sc->link_params.sc = sc;
+ sc->link_params.port = SC_PORT(sc);
+ sc->link_params.req_duplex[0] = DUPLEX_FULL;
+ sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE;
+ sc->link_params.req_line_speed[0] = SPEED_10000;
+ sc->link_params.speed_cap_mask[0] = 0x7f0000;
+ sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G;
+
+ if (CHIP_REV_IS_FPGA(sc)) {
+ sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC;
+ sc->link_vars.line_speed = ELINK_SPEED_1000;
+ sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
+ LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
+ } else {
+ sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC;
+ sc->link_vars.line_speed = ELINK_SPEED_10000;
+ sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
+ LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
+ }
+
+ sc->link_vars.link_up = 1;
+
+ sc->link_vars.duplex = DUPLEX_FULL;
+ sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
+
+ if (IS_PF(sc)) {
+ REG_WR(sc,
+ NIG_REG_EGRESS_DRAIN0_MODE +
+ sc->link_params.port * 4, 0);
+ bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP);
+ bnx2x_link_report(sc);
+ }
+ }
+
+ if (IS_PF(sc)) {
+ if (sc->link_vars.link_up) {
+ bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP);
+ } else {
+ bnx2x_stats_handle(sc, STATS_EVENT_STOP);
+ }
+ bnx2x_link_report(sc);
+ } else {
+ bnx2x_link_report(sc);
+ bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP);
+ }
+}
+
+static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
+{
+ int rc, cfg_idx = bnx2x_get_link_cfg_idx(sc);
+ uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
+ struct elink_params *lp = &sc->link_params;
+
+ bnx2x_set_requested_fc(sc);
+
+ if (load_mode == LOAD_DIAG) {
+ lp->loopback_mode = ELINK_LOOPBACK_XGXS;
+/* Prefer doing PHY loopback at 10G speed, if possible */
+ if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
+ if (lp->speed_cap_mask[cfg_idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
+ lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
+ } else {
+ lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
+ }
+ }
+ }
+
+ if (load_mode == LOAD_LOOPBACK_EXT) {
+ lp->loopback_mode = ELINK_LOOPBACK_EXT;
+ }
+
+ rc = elink_phy_init(&sc->link_params, &sc->link_vars);
+
+ bnx2x_calc_fc_adv(sc);
+
+ if (sc->link_vars.link_up) {
+ bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP);
+ bnx2x_link_report(sc);
+ }
+
+ sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
+ return rc;
+}
+
+/* update flags in shmem */
+static void
+bnx2x_update_drv_flags(struct bnx2x_softc *sc, uint32_t flags, uint32_t set)
+{
+ uint32_t drv_flags;
+
+ if (SHMEM2_HAS(sc, drv_flags)) {
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
+ drv_flags = SHMEM2_RD(sc, drv_flags);
+
+ if (set) {
+ drv_flags |= flags;
+ } else {
+ drv_flags &= ~flags;
+ }
+
+ SHMEM2_WR(sc, drv_flags, drv_flags);
+
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
+ }
+}
+
+/* periodic timer callout routine, only runs when the interface is up */
+void bnx2x_periodic_callout(struct bnx2x_softc *sc)
+{
+ if ((sc->state != BNX2X_STATE_OPEN) ||
+ (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
+ PMD_DRV_LOG(WARNING, "periodic callout exit (state=0x%x)",
+ sc->state);
+ return;
+ }
+ if (!CHIP_REV_IS_SLOW(sc)) {
+/*
+ * This barrier is needed to ensure the ordering between the writing
+ * to the sc->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
+ * the reading here.
+ */
+ mb();
+ if (sc->port.pmf) {
+ elink_period_func(&sc->link_params, &sc->link_vars);
+ }
+ }
+#ifdef BNX2X_PULSE
+ if (IS_PF(sc) && !BNX2X_NOMCP(sc)) {
+ int mb_idx = SC_FW_MB_IDX(sc);
+ uint32_t drv_pulse;
+ uint32_t mcp_pulse;
+
+ ++sc->fw_drv_pulse_wr_seq;
+ sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
+
+ drv_pulse = sc->fw_drv_pulse_wr_seq;
+ bnx2x_drv_pulse(sc);
+
+ mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
+ MCP_PULSE_SEQ_MASK);
+
+/*
+ * The delta between driver pulse and mcp response should
+ * be 1 (before mcp response) or 0 (after mcp response).
+ */
+ if ((drv_pulse != mcp_pulse) &&
+ (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
+ /* someone lost a heartbeat... */
+ PMD_DRV_LOG(ERR,
+ "drv_pulse (0x%x) != mcp_pulse (0x%x)",
+ drv_pulse, mcp_pulse);
+ }
+ }
+#endif
+}
+
+/* start the controller */
+static __rte_noinline
+int bnx2x_nic_load(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+ uint32_t load_code = 0;
+ int i, rc = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ sc->state = BNX2X_STATE_OPENING_WAITING_LOAD;
+
+ if (IS_PF(sc)) {
+/* must be called before memory allocation and HW init */
+ bnx2x_ilt_set_info(sc);
+ }
+
+ bnx2x_set_fp_rx_buf_size(sc);
+
+ if (IS_PF(sc)) {
+ if (bnx2x_alloc_mem(sc) != 0) {
+ sc->state = BNX2X_STATE_CLOSED;
+ rc = -ENOMEM;
+ goto bnx2x_nic_load_error0;
+ }
+ }
+
+ if (bnx2x_alloc_fw_stats_mem(sc) != 0) {
+ sc->state = BNX2X_STATE_CLOSED;
+ rc = -ENOMEM;
+ goto bnx2x_nic_load_error0;
+ }
+
+ if (IS_VF(sc)) {
+ rc = bnx2x_vf_init(sc);
+ if (rc) {
+ sc->state = BNX2X_STATE_ERROR;
+ goto bnx2x_nic_load_error0;
+ }
+ }
+
+ if (IS_PF(sc)) {
+/* set pf load just before approaching the MCP */
+ bnx2x_set_pf_load(sc);
+
+/* if MCP exists send load request and analyze response */
+ if (!BNX2X_NOMCP(sc)) {
+ /* attempt to load pf */
+ if (bnx2x_nic_load_request(sc, &load_code) != 0) {
+ sc->state = BNX2X_STATE_CLOSED;
+ rc = -ENXIO;
+ goto bnx2x_nic_load_error1;
+ }
+
+ /* what did the MCP say? */
+ if (bnx2x_nic_load_analyze_req(sc, load_code) != 0) {
+ bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
+ sc->state = BNX2X_STATE_CLOSED;
+ rc = -ENXIO;
+ goto bnx2x_nic_load_error2;
+ }
+ } else {
+ PMD_DRV_LOG(INFO, "Device has no MCP!");
+ load_code = bnx2x_nic_load_no_mcp(sc);
+ }
+
+/* mark PMF if applicable */
+ bnx2x_nic_load_pmf(sc, load_code);
+
+/* Init Function state controlling object */
+ bnx2x_init_func_obj(sc);
+
+/* Initialize HW */
+ if (bnx2x_init_hw(sc, load_code) != 0) {
+ PMD_DRV_LOG(NOTICE, "HW init failed");
+ bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
+ sc->state = BNX2X_STATE_CLOSED;
+ rc = -ENXIO;
+ goto bnx2x_nic_load_error2;
+ }
+ }
+
+ bnx2x_nic_init(sc, load_code);
+
+ /* Init per-function objects */
+ if (IS_PF(sc)) {
+ bnx2x_init_objs(sc);
+
+/* set AFEX default VLAN tag to an invalid value */
+ sc->devinfo.mf_info.afex_def_vlan_tag = -1;
+
+ sc->state = BNX2X_STATE_OPENING_WAITING_PORT;
+ rc = bnx2x_func_start(sc);
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "Function start failed!");
+ bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
+ sc->state = BNX2X_STATE_ERROR;
+ goto bnx2x_nic_load_error3;
+ }
+
+/* send LOAD_DONE command to MCP */
+ if (!BNX2X_NOMCP(sc)) {
+ load_code =
+ bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
+ if (!load_code) {
+ PMD_DRV_LOG(NOTICE,
+ "MCP response failure, aborting");
+ sc->state = BNX2X_STATE_ERROR;
+ rc = -ENXIO;
+ goto bnx2x_nic_load_error3;
+ }
+ }
+ }
+
+ rc = bnx2x_setup_leading(sc);
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "Setup leading failed!");
+ sc->state = BNX2X_STATE_ERROR;
+ goto bnx2x_nic_load_error3;
+ }
+
+ FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
+ if (IS_PF(sc))
+ rc = bnx2x_setup_queue(sc, &sc->fp[i], FALSE);
+ else /* IS_VF(sc) */
+ rc = bnx2x_vf_setup_queue(sc, &sc->fp[i], FALSE);
+
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "Queue(%d) setup failed", i);
+ sc->state = BNX2X_STATE_ERROR;
+ goto bnx2x_nic_load_error3;
+ }
+ }
+
+ rc = bnx2x_init_rss_pf(sc);
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "PF RSS init failed");
+ sc->state = BNX2X_STATE_ERROR;
+ goto bnx2x_nic_load_error3;
+ }
+
+ /* now when Clients are configured we are ready to work */
+ sc->state = BNX2X_STATE_OPEN;
+
+ /* Configure a ucast MAC */
+ if (IS_PF(sc)) {
+ rc = bnx2x_set_eth_mac(sc, TRUE);
+ } else { /* IS_VF(sc) */
+ rc = bnx2x_vf_set_mac(sc, TRUE);
+ }
+
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "Setting Ethernet MAC failed");
+ sc->state = BNX2X_STATE_ERROR;
+ goto bnx2x_nic_load_error3;
+ }
+
+ if (sc->port.pmf) {
+ rc = bnx2x_initial_phy_init(sc, LOAD_OPEN);
+ if (rc) {
+ sc->state = BNX2X_STATE_ERROR;
+ goto bnx2x_nic_load_error3;
+ }
+ }
+
+ sc->link_params.feature_config_flags &=
+ ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
+
+ /* start the Tx */
+ switch (LOAD_OPEN) {
+ case LOAD_NORMAL:
+ case LOAD_OPEN:
+ break;
+
+ case LOAD_DIAG:
+ case LOAD_LOOPBACK_EXT:
+ sc->state = BNX2X_STATE_DIAG;
+ break;
+
+ default:
+ break;
+ }
+
+ if (sc->port.pmf) {
+ bnx2x_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
+ } else {
+ bnx2x_link_status_update(sc);
+ }
+
+ if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
+/* mark driver is loaded in shmem2 */
+ val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
+ SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
+ (val |
+ DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
+ DRV_FLAGS_CAPABILITIES_LOADED_L2));
+ }
+
+ /* start fast path */
+ /* Initialize Rx filter */
+ bnx2x_set_rx_mode(sc);
+
+ /* wait for all pending SP commands to complete */
+ if (IS_PF(sc) && !bnx2x_wait_sp_comp(sc, ~0x0UL)) {
+ PMD_DRV_LOG(NOTICE, "Timeout waiting for all SPs to complete!");
+ bnx2x_periodic_stop(sc);
+ bnx2x_nic_unload(sc, UNLOAD_CLOSE, FALSE);
+ return -ENXIO;
+ }
+
+ PMD_DRV_LOG(DEBUG, "NIC successfully loaded");
+
+ return 0;
+
+bnx2x_nic_load_error3:
+
+ if (IS_PF(sc)) {
+ bnx2x_int_disable_sync(sc, 1);
+
+/* clean out queued objects */
+ bnx2x_squeeze_objects(sc);
+ }
+
+bnx2x_nic_load_error2:
+
+ if (IS_PF(sc) && !BNX2X_NOMCP(sc)) {
+ bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+ bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ }
+
+ sc->port.pmf = 0;
+
+bnx2x_nic_load_error1:
+
+ /* clear pf_load status, as it was already set */
+ if (IS_PF(sc)) {
+ bnx2x_clear_pf_load(sc);
+ }
+
+bnx2x_nic_load_error0:
+
+ bnx2x_free_fw_stats_mem(sc);
+ bnx2x_free_mem(sc);
+
+ return rc;
+}
+
+/*
+* Handles controller initialization.
+*/
+int bnx2x_init(struct bnx2x_softc *sc)
+{
+ int other_engine = SC_PATH(sc) ? 0 : 1;
+ uint8_t other_load_status, load_status;
+ uint8_t global = FALSE;
+ int rc;
+
+ /* Check if the driver is still running and bail out if it is. */
+ if (sc->state != BNX2X_STATE_CLOSED) {
+ PMD_DRV_LOG(DEBUG, "Init called while driver is running!");
+ rc = 0;
+ goto bnx2x_init_done;
+ }
+
+ bnx2x_set_power_state(sc, PCI_PM_D0);
+
+ /*
+ * If parity occurred during the unload, then attentions and/or
+ * RECOVERY_IN_PROGRESS may still be set. If so we want the first function
+ * loaded on the current engine to complete the recovery. Parity recovery
+ * is only relevant for PF driver.
+ */
+ if (IS_PF(sc)) {
+ other_load_status = bnx2x_get_load_status(sc, other_engine);
+ load_status = bnx2x_get_load_status(sc, SC_PATH(sc));
+
+ if (!bnx2x_reset_is_done(sc, SC_PATH(sc)) ||
+ bnx2x_chk_parity_attn(sc, &global, TRUE)) {
+ do {
+ /*
+ * If there are attentions and they are in global blocks, set
+ * the GLOBAL_RESET bit regardless whether it will be this
+ * function that will complete the recovery or not.
+ */
+ if (global) {
+ bnx2x_set_reset_global(sc);
+ }
+
+ /*
+ * Only the first function on the current engine should try
+ * to recover in open. In case of attentions in global blocks
+ * only the first in the chip should try to recover.
+ */
+ if ((!load_status
+ && (!global ||!other_load_status))
+ && bnx2x_trylock_leader_lock(sc)
+ && !bnx2x_leader_reset(sc)) {
+ PMD_DRV_LOG(INFO,
+ "Recovered during init");
+ break;
+ }
+
+ /* recovery has failed... */
+ bnx2x_set_power_state(sc, PCI_PM_D3hot);
+
+ sc->recovery_state = BNX2X_RECOVERY_FAILED;
+
+ PMD_DRV_LOG(NOTICE,
+ "Recovery flow hasn't properly "
+ "completed yet, try again later. "
+ "If you still see this message after a "
+ "few retries then power cycle is required.");
+
+ rc = -ENXIO;
+ goto bnx2x_init_done;
+ } while (0);
+ }
+ }
+
+ sc->recovery_state = BNX2X_RECOVERY_DONE;
+
+ rc = bnx2x_nic_load(sc);
+
+bnx2x_init_done:
+
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "Initialization failed, "
+ "stack notified driver is NOT running!");
+ }
+
+ return rc;
+}
+
+static void bnx2x_get_function_num(struct bnx2x_softc *sc)
+{
+ uint32_t val = 0;
+
+ /*
+ * Read the ME register to get the function number. The ME register
+ * holds the relative-function number and absolute-function number. The
+ * absolute-function number appears only in E2 and above. Before that
+ * these bits always contained zero, therefore we cannot blindly use them.
+ */
+
+ val = REG_RD(sc, BAR_ME_REGISTER);
+
+ sc->pfunc_rel =
+ (uint8_t) ((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
+ sc->path_id =
+ (uint8_t) ((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) &
+ 1;
+
+ if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
+ sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
+ } else {
+ sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
+ }
+
+ PMD_DRV_LOG(DEBUG,
+ "Relative function %d, Absolute function %d, Path %d",
+ sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
+}
+
+static uint32_t bnx2x_get_shmem_mf_cfg_base(struct bnx2x_softc *sc)
+{
+ uint32_t shmem2_size;
+ uint32_t offset;
+ uint32_t mf_cfg_offset_value;
+
+ /* Non 57712 */
+ offset = (SHMEM_ADDR(sc, func_mb) +
+ (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
+
+ /* 57712 plus */
+ if (sc->devinfo.shmem2_base != 0) {
+ shmem2_size = SHMEM2_RD(sc, size);
+ if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
+ mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
+ if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
+ offset = mf_cfg_offset_value;
+ }
+ }
+ }
+
+ return offset;
+}
+
+static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc *sc, int reg)
+{
+ uint32_t ret;
+ struct bnx2x_pci_cap *caps;
+
+ /* ensure PCIe capability is enabled */
+ caps = pci_find_cap(sc, PCIY_EXPRESS, BNX2X_PCI_CAP);
+ if (NULL != caps) {
+ PMD_DRV_LOG(DEBUG, "Found PCIe capability: "
+ "id=0x%04X type=0x%04X addr=0x%08X",
+ caps->id, caps->type, caps->addr);
+ pci_read(sc, (caps->addr + reg), &ret, 2);
+ return ret;
+ }
+
+ PMD_DRV_LOG(WARNING, "PCIe capability NOT FOUND!!!");
+
+ return 0;
+}
+
+static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc)
+{
+ return bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) &
+ PCIM_EXP_STA_TRANSACTION_PND;
+}
+
+/*
+* Walk the PCI capabiites list for the device to find what features are
+* supported. These capabilites may be enabled/disabled by firmware so it's
+* best to walk the list rather than make assumptions.
+*/
+static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ struct bnx2x_pci_cap *caps;
+ uint16_t link_status;
+ int reg = 0;
+
+ /* check if PCI Power Management is enabled */
+ caps = pci_find_cap(sc, PCIY_PMG, BNX2X_PCI_CAP);
+ if (NULL != caps) {
+ PMD_DRV_LOG(DEBUG, "Found PM capability: "
+ "id=0x%04X type=0x%04X addr=0x%08X",
+ caps->id, caps->type, caps->addr);
+
+ sc->devinfo.pcie_cap_flags |= BNX2X_PM_CAPABLE_FLAG;
+ sc->devinfo.pcie_pm_cap_reg = caps->addr;
+ }
+
+ link_status = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA);
+
+ sc->devinfo.pcie_link_speed = (link_status & PCIM_LINK_STA_SPEED);
+ sc->devinfo.pcie_link_width =
+ ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
+
+ PMD_DRV_LOG(DEBUG, "PCIe link speed=%d width=%d",
+ sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
+
+ sc->devinfo.pcie_cap_flags |= BNX2X_PCIE_CAPABLE_FLAG;
+
+ /* check if MSI capability is enabled */
+ caps = pci_find_cap(sc, PCIY_MSI, BNX2X_PCI_CAP);
+ if (NULL != caps) {
+ PMD_DRV_LOG(DEBUG, "Found MSI capability at 0x%04x", reg);
+
+ sc->devinfo.pcie_cap_flags |= BNX2X_MSI_CAPABLE_FLAG;
+ sc->devinfo.pcie_msi_cap_reg = caps->addr;
+ }
+
+ /* check if MSI-X capability is enabled */
+ caps = pci_find_cap(sc, PCIY_MSIX, BNX2X_PCI_CAP);
+ if (NULL != caps) {
+ PMD_DRV_LOG(DEBUG, "Found MSI-X capability at 0x%04x", reg);
+
+ sc->devinfo.pcie_cap_flags |= BNX2X_MSIX_CAPABLE_FLAG;
+ sc->devinfo.pcie_msix_cap_reg = caps->addr;
+ }
+}
+
+static int bnx2x_get_shmem_mf_cfg_info_sd(struct bnx2x_softc *sc)
+{
+ struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info;
+ uint32_t val;
+
+ /* get the outer vlan if we're in switch-dependent mode */
+
+ val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
+ mf_info->ext_id = (uint16_t) val;
+
+ mf_info->multi_vnics_mode = 1;
+
+ if (!VALID_OVLAN(mf_info->ext_id)) {
+ PMD_DRV_LOG(NOTICE, "Invalid VLAN (%d)", mf_info->ext_id);
+ return 1;
+ }
+
+ /* get the capabilities */
+ if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
+ FUNC_MF_CFG_PROTOCOL_ISCSI) {
+ mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
+ } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK)
+ == FUNC_MF_CFG_PROTOCOL_FCOE) {
+ mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
+ } else {
+ mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
+ }
+
+ mf_info->vnics_per_port =
+ (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
+
+ return 0;
+}
+
+static uint32_t bnx2x_get_shmem_ext_proto_support_flags(struct bnx2x_softc *sc)
+{
+ uint32_t retval = 0;
+ uint32_t val;
+
+ val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
+
+ if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
+ if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
+ retval |= MF_PROTO_SUPPORT_ETHERNET;
+ }
+ if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
+ retval |= MF_PROTO_SUPPORT_ISCSI;
+ }
+ if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ retval |= MF_PROTO_SUPPORT_FCOE;
+ }
+ }
+
+ return retval;
+}
+
+static int bnx2x_get_shmem_mf_cfg_info_si(struct bnx2x_softc *sc)
+{
+ struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info;
+ uint32_t val;
+
+ /*
+ * There is no outer vlan if we're in switch-independent mode.
+ * If the mac is valid then assume multi-function.
+ */
+
+ val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
+
+ mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
+
+ mf_info->mf_protos_supported =
+ bnx2x_get_shmem_ext_proto_support_flags(sc);
+
+ mf_info->vnics_per_port =
+ (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
+
+ return 0;
+}
+
+static int bnx2x_get_shmem_mf_cfg_info_niv(struct bnx2x_softc *sc)
+{
+ struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info;
+ uint32_t e1hov_tag;
+ uint32_t func_config;
+ uint32_t niv_config;
+
+ mf_info->multi_vnics_mode = 1;
+
+ e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
+ func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
+ niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
+
+ mf_info->ext_id =
+ (uint16_t) ((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
+ FUNC_MF_CFG_E1HOV_TAG_SHIFT);
+
+ mf_info->default_vlan =
+ (uint16_t) ((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
+ FUNC_MF_CFG_AFEX_VLAN_SHIFT);
+
+ mf_info->niv_allowed_priorities =
+ (uint8_t) ((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
+ FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
+
+ mf_info->niv_default_cos =
+ (uint8_t) ((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
+ FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
+
+ mf_info->afex_vlan_mode =
+ ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
+ FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
+
+ mf_info->niv_mba_enabled =
+ ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
+ FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
+
+ mf_info->mf_protos_supported =
+ bnx2x_get_shmem_ext_proto_support_flags(sc);
+
+ mf_info->vnics_per_port =
+ (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
+
+ return 0;
+}
+
+static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc)
+{
+ struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info;
+ uint32_t mf_cfg1;
+ uint32_t mf_cfg2;
+ uint32_t ovlan1;
+ uint32_t ovlan2;
+ uint8_t i, j;
+
+ /* various MF mode sanity checks... */
+
+ if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
+ PMD_DRV_LOG(NOTICE,
+ "Enumerated function %d is marked as hidden",
+ SC_PORT(sc));
+ return 1;
+ }
+
+ if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
+ PMD_DRV_LOG(NOTICE, "vnics_per_port=%d multi_vnics_mode=%d",
+ mf_info->vnics_per_port, mf_info->multi_vnics_mode);
+ return 1;
+ }
+
+ if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
+/* vnic id > 0 must have valid ovlan in switch-dependent mode */
+ if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
+ PMD_DRV_LOG(NOTICE, "mf_mode=SD vnic_id=%d ovlan=%d",
+ SC_VN(sc), OVLAN(sc));
+ return 1;
+ }
+
+ if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
+ PMD_DRV_LOG(NOTICE,
+ "mf_mode=SD multi_vnics_mode=%d ovlan=%d",
+ mf_info->multi_vnics_mode, OVLAN(sc));
+ return 1;
+ }
+
+/*
+ * Verify all functions are either MF or SF mode. If MF, make sure
+ * sure that all non-hidden functions have a valid ovlan. If SF,
+ * make sure that all non-hidden functions have an invalid ovlan.
+ */
+ FOREACH_ABS_FUNC_IN_PORT(sc, i) {
+ mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
+ ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
+ if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
+ (((mf_info->multi_vnics_mode)
+ && !VALID_OVLAN(ovlan1))
+ || ((!mf_info->multi_vnics_mode)
+ && VALID_OVLAN(ovlan1)))) {
+ PMD_DRV_LOG(NOTICE,
+ "mf_mode=SD function %d MF config "
+ "mismatch, multi_vnics_mode=%d ovlan=%d",
+ i, mf_info->multi_vnics_mode,
+ ovlan1);
+ return 1;
+ }
+ }
+
+/* Verify all funcs on the same port each have a different ovlan. */
+ FOREACH_ABS_FUNC_IN_PORT(sc, i) {
+ mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
+ ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
+ /* iterate from the next function on the port to the max func */
+ for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
+ mf_cfg2 =
+ MFCFG_RD(sc, func_mf_config[j].config);
+ ovlan2 =
+ MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
+ if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE)
+ && VALID_OVLAN(ovlan1)
+ && !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE)
+ && VALID_OVLAN(ovlan2)
+ && (ovlan1 == ovlan2)) {
+ PMD_DRV_LOG(NOTICE,
+ "mf_mode=SD functions %d and %d "
+ "have the same ovlan (%d)",
+ i, j, ovlan1);
+ return 1;
+ }
+ }
+ }
+ }
+ /* MULTI_FUNCTION_SD */
+ return 0;
+}
+
+static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
+{
+ struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info;
+ uint32_t val, mac_upper;
+ uint8_t i, vnic;
+
+ /* initialize mf_info defaults */
+ mf_info->vnics_per_port = 1;
+ mf_info->multi_vnics_mode = FALSE;
+ mf_info->path_has_ovlan = FALSE;
+ mf_info->mf_mode = SINGLE_FUNCTION;
+
+ if (!CHIP_IS_MF_CAP(sc)) {
+ return 0;
+ }
+
+ if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
+ PMD_DRV_LOG(NOTICE, "Invalid mf_cfg_base!");
+ return 1;
+ }
+
+ /* get the MF mode (switch dependent / independent / single-function) */
+
+ val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
+
+ switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) {
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
+
+ mac_upper =
+ MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
+
+ /* check for legal upper mac bytes */
+ if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
+ mf_info->mf_mode = MULTI_FUNCTION_SI;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Invalid config for Switch Independent mode");
+ }
+
+ break;
+
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
+
+ /* get outer vlan configuration */
+ val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
+
+ if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
+ FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ mf_info->mf_mode = MULTI_FUNCTION_SD;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Invalid config for Switch Dependent mode");
+ }
+
+ break;
+
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
+
+ /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
+ return 0;
+
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
+
+ /*
+ * Mark MF mode as NIV if MCP version includes NPAR-SD support
+ * and the MAC address is valid.
+ */
+ mac_upper =
+ MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
+
+ if ((SHMEM2_HAS(sc, afex_driver_support)) &&
+ (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
+ mf_info->mf_mode = MULTI_FUNCTION_AFEX;
+ } else {
+ PMD_DRV_LOG(NOTICE, "Invalid config for AFEX mode");
+ }
+
+ break;
+
+ default:
+
+ PMD_DRV_LOG(NOTICE, "Unknown MF mode (0x%08x)",
+ (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
+
+ return 1;
+ }
+
+ /* set path mf_mode (which could be different than function mf_mode) */
+ if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
+ mf_info->path_has_ovlan = TRUE;
+ } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
+/*
+ * Decide on path multi vnics mode. If we're not in MF mode and in
+ * 4-port mode, this is good enough to check vnic-0 of the other port
+ * on the same path
+ */
+ if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
+ uint8_t other_port = !(PORT_ID(sc) & 1);
+ uint8_t abs_func_other_port =
+ (SC_PATH(sc) + (2 * other_port));
+
+ val =
+ MFCFG_RD(sc,
+ func_mf_config
+ [abs_func_other_port].e1hov_tag);
+
+ mf_info->path_has_ovlan = VALID_OVLAN((uint16_t) val);
+ }
+ }
+
+ if (mf_info->mf_mode == SINGLE_FUNCTION) {
+/* invalid MF config */
+ if (SC_VN(sc) >= 1) {
+ PMD_DRV_LOG(NOTICE, "VNIC ID >= 1 in SF mode");
+ return 1;
+ }
+
+ return 0;
+ }
+
+ /* get the MF configuration */
+ mf_info->mf_config[SC_VN(sc)] =
+ MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
+
+ switch (mf_info->mf_mode) {
+ case MULTI_FUNCTION_SD:
+
+ bnx2x_get_shmem_mf_cfg_info_sd(sc);
+ break;
+
+ case MULTI_FUNCTION_SI:
+
+ bnx2x_get_shmem_mf_cfg_info_si(sc);
+ break;
+
+ case MULTI_FUNCTION_AFEX:
+
+ bnx2x_get_shmem_mf_cfg_info_niv(sc);
+ break;
+
+ default:
+
+ PMD_DRV_LOG(NOTICE, "Get MF config failed (mf_mode=0x%08x)",
+ mf_info->mf_mode);
+ return 1;
+ }
+
+ /* get the congestion management parameters */
+
+ vnic = 0;
+ FOREACH_ABS_FUNC_IN_PORT(sc, i) {
+/* get min/max bw */
+ val = MFCFG_RD(sc, func_mf_config[i].config);
+ mf_info->min_bw[vnic] =
+ ((val & FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT);
+ mf_info->max_bw[vnic] =
+ ((val & FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT);
+ vnic++;
+ }
+
+ return bnx2x_check_valid_mf_cfg(sc);
+}
+
+static int bnx2x_get_shmem_info(struct bnx2x_softc *sc)
+{
+ int port;
+ uint32_t mac_hi, mac_lo, val;
+
+ PMD_INIT_FUNC_TRACE();
+
+ port = SC_PORT(sc);
+ mac_hi = mac_lo = 0;
+
+ sc->link_params.sc = sc;
+ sc->link_params.port = port;
+
+ /* get the hardware config info */
+ sc->devinfo.hw_config = SHMEM_RD(sc, dev_info.shared_hw_config.config);
+ sc->devinfo.hw_config2 =
+ SHMEM_RD(sc, dev_info.shared_hw_config.config2);
+
+ sc->link_params.hw_led_mode =
+ ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
+ SHARED_HW_CFG_LED_MODE_SHIFT);
+
+ /* get the port feature config */
+ sc->port.config =
+ SHMEM_RD(sc, dev_info.port_feature_config[port].config);
+
+ /* get the link params */
+ sc->link_params.speed_cap_mask[ELINK_INT_PHY] =
+ SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask)
+ & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
+ sc->link_params.speed_cap_mask[ELINK_EXT_PHY1] =
+ SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2)
+ & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
+
+ /* get the lane config */
+ sc->link_params.lane_config =
+ SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
+
+ /* get the link config */
+ val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
+ sc->port.link_config[ELINK_INT_PHY] = val;
+ sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ sc->port.link_config[ELINK_EXT_PHY1] =
+ SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
+
+ /* get the override preemphasis flag and enable it or turn it off */
+ val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
+ if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
+ sc->link_params.feature_config_flags |=
+ ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+ } else {
+ sc->link_params.feature_config_flags &=
+ ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+ }
+
+ /* get the initial value of the link params */
+ sc->link_params.multi_phy_config =
+ SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
+
+ /* get external phy info */
+ sc->port.ext_phy_config =
+ SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
+
+ /* get the multifunction configuration */
+ bnx2x_get_mf_cfg_info(sc);
+
+ /* get the mac address */
+ if (IS_MF(sc)) {
+ mac_hi =
+ MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
+ mac_lo =
+ MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
+ } else {
+ mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
+ mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
+ }
+
+ if ((mac_lo == 0) && (mac_hi == 0)) {
+ *sc->mac_addr_str = 0;
+ PMD_DRV_LOG(NOTICE, "No Ethernet address programmed!");
+ } else {
+ sc->link_params.mac_addr[0] = (uint8_t) (mac_hi >> 8);
+ sc->link_params.mac_addr[1] = (uint8_t) (mac_hi);
+ sc->link_params.mac_addr[2] = (uint8_t) (mac_lo >> 24);
+ sc->link_params.mac_addr[3] = (uint8_t) (mac_lo >> 16);
+ sc->link_params.mac_addr[4] = (uint8_t) (mac_lo >> 8);
+ sc->link_params.mac_addr[5] = (uint8_t) (mac_lo);
+ snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ sc->link_params.mac_addr[0],
+ sc->link_params.mac_addr[1],
+ sc->link_params.mac_addr[2],
+ sc->link_params.mac_addr[3],
+ sc->link_params.mac_addr[4],
+ sc->link_params.mac_addr[5]);
+ PMD_DRV_LOG(DEBUG, "Ethernet address: %s", sc->mac_addr_str);
+ }
+
+ return 0;
+}
+
+static void bnx2x_media_detect(struct bnx2x_softc *sc)
+{
+ uint32_t phy_idx = bnx2x_get_cur_phy_idx(sc);
+ switch (sc->link_params.phy[phy_idx].media_type) {
+ case ELINK_ETH_PHY_SFPP_10G_FIBER:
+ case ELINK_ETH_PHY_SFP_1G_FIBER:
+ case ELINK_ETH_PHY_XFP_FIBER:
+ case ELINK_ETH_PHY_KR:
+ case ELINK_ETH_PHY_CX4:
+ PMD_DRV_LOG(INFO, "Found 10GBase-CX4 media.");
+ sc->media = IFM_10G_CX4;
+ break;
+ case ELINK_ETH_PHY_DA_TWINAX:
+ PMD_DRV_LOG(INFO, "Found 10Gb Twinax media.");
+ sc->media = IFM_10G_TWINAX;
+ break;
+ case ELINK_ETH_PHY_BASE_T:
+ PMD_DRV_LOG(INFO, "Found 10GBase-T media.");
+ sc->media = IFM_10G_T;
+ break;
+ case ELINK_ETH_PHY_NOT_PRESENT:
+ PMD_DRV_LOG(INFO, "Media not present.");
+ sc->media = 0;
+ break;
+ case ELINK_ETH_PHY_UNSPECIFIED:
+ default:
+ PMD_DRV_LOG(INFO, "Unknown media!");
+ sc->media = 0;
+ break;
+ }
+}
+
+#define GET_FIELD(value, fname) \
+(((value) & (fname##_MASK)) >> (fname##_SHIFT))
+#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
+#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
+
+static int bnx2x_get_igu_cam_info(struct bnx2x_softc *sc)
+{
+ int pfid = SC_FUNC(sc);
+ int igu_sb_id;
+ uint32_t val;
+ uint8_t fid, igu_sb_cnt = 0;
+
+ sc->igu_base_sb = 0xff;
+
+ if (CHIP_INT_MODE_IS_BC(sc)) {
+ int vn = SC_VN(sc);
+ igu_sb_cnt = sc->igu_sb_cnt;
+ sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
+ FP_SB_MAX_E1x);
+ sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
+ (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
+ return 0;
+ }
+
+ /* IGU in normal mode - read CAM */
+ for (igu_sb_id = 0;
+ igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++) {
+ val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
+ if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
+ continue;
+ }
+ fid = IGU_FID(val);
+ if (fid & IGU_FID_ENCODE_IS_PF) {
+ if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
+ continue;
+ }
+ if (IGU_VEC(val) == 0) {
+ /* default status block */
+ sc->igu_dsb_id = igu_sb_id;
+ } else {
+ if (sc->igu_base_sb == 0xff) {
+ sc->igu_base_sb = igu_sb_id;
+ }
+ igu_sb_cnt++;
+ }
+ }
+ }
+
+ /*
+ * Due to new PF resource allocation by MFW T7.4 and above, it's optional
+ * that number of CAM entries will not be equal to the value advertised in
+ * PCI. Driver should use the minimal value of both as the actual status
+ * block count
+ */
+ sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
+
+ if (igu_sb_cnt == 0) {
+ PMD_DRV_LOG(ERR, "CAM configuration error");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+* Gather various information from the device config space, the device itself,
+* shmem, and the user input.
+*/
+static int bnx2x_get_device_info(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+ int rc;
+
+ /* get the chip revision (chip metal comes from pci config space) */
+ sc->devinfo.chip_id = sc->link_params.chip_id =
+ (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) |
+ ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) |
+ (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) |
+ ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0));
+
+ /* force 57811 according to MISC register */
+ if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
+ if (CHIP_IS_57810(sc)) {
+ sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
+ (sc->
+ devinfo.chip_id & 0x0000ffff));
+ } else if (CHIP_IS_57810_MF(sc)) {
+ sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
+ (sc->
+ devinfo.chip_id & 0x0000ffff));
+ }
+ sc->devinfo.chip_id |= 0x1;
+ }
+
+ PMD_DRV_LOG(DEBUG,
+ "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)",
+ sc->devinfo.chip_id,
+ ((sc->devinfo.chip_id >> 16) & 0xffff),
+ ((sc->devinfo.chip_id >> 12) & 0xf),
+ ((sc->devinfo.chip_id >> 4) & 0xff),
+ ((sc->devinfo.chip_id >> 0) & 0xf));
+
+ val = (REG_RD(sc, 0x2874) & 0x55);
+ if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1H(sc) && (val == 0x55))) {
+ sc->flags |= BNX2X_ONE_PORT_FLAG;
+ PMD_DRV_LOG(DEBUG, "single port device");
+ }
+
+ /* set the doorbell size */
+ sc->doorbell_size = (1 << BNX2X_DB_SHIFT);
+
+ /* determine whether the device is in 2 port or 4 port mode */
+ sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1h */
+ if (CHIP_IS_E2E3(sc)) {
+/*
+ * Read port4mode_en_ovwr[0]:
+ * If 1, four port mode is in port4mode_en_ovwr[1].
+ * If 0, four port mode is in port4mode_en[0].
+ */
+ val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
+ if (val & 1) {
+ val = ((val >> 1) & 1);
+ } else {
+ val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
+ }
+
+ sc->devinfo.chip_port_mode =
+ (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
+
+ PMD_DRV_LOG(DEBUG, "Port mode = %s", (val) ? "4" : "2");
+ }
+
+ /* get the function and path info for the device */
+ bnx2x_get_function_num(sc);
+
+ /* get the shared memory base address */
+ sc->devinfo.shmem_base =
+ sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
+ sc->devinfo.shmem2_base =
+ REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
+ MISC_REG_GENERIC_CR_0));
+
+ if (!sc->devinfo.shmem_base) {
+/* this should ONLY prevent upcoming shmem reads */
+ PMD_DRV_LOG(INFO, "MCP not active");
+ sc->flags |= BNX2X_NO_MCP_FLAG;
+ return 0;
+ }
+
+ /* make sure the shared memory contents are valid */
+ val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
+ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
+ (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
+ PMD_DRV_LOG(NOTICE, "Invalid SHMEM validity signature: 0x%08x",
+ val);
+ return 0;
+ }
+
+ /* get the bootcode version */
+ sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
+ snprintf(sc->devinfo.bc_ver_str,
+ sizeof(sc->devinfo.bc_ver_str),
+ "%d.%d.%d",
+ ((sc->devinfo.bc_ver >> 24) & 0xff),
+ ((sc->devinfo.bc_ver >> 16) & 0xff),
+ ((sc->devinfo.bc_ver >> 8) & 0xff));
+ PMD_DRV_LOG(INFO, "Bootcode version: %s", sc->devinfo.bc_ver_str);
+
+ /* get the bootcode shmem address */
+ sc->devinfo.mf_cfg_base = bnx2x_get_shmem_mf_cfg_base(sc);
+
+ /* clean indirect addresses as they're not used */
+ pci_write_long(sc, PCICFG_GRC_ADDRESS, 0);
+ if (IS_PF(sc)) {
+ REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
+ REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
+ REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
+ REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
+ if (CHIP_IS_E1x(sc)) {
+ REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
+ }
+ }
+
+ /* get the nvram size */
+ val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
+ sc->devinfo.flash_size =
+ (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
+
+ bnx2x_set_power_state(sc, PCI_PM_D0);
+ /* get various configuration parameters from shmem */
+ bnx2x_get_shmem_info(sc);
+
+ /* initialize IGU parameters */
+ if (CHIP_IS_E1x(sc)) {
+ sc->devinfo.int_block = INT_BLOCK_HC;
+ sc->igu_dsb_id = DEF_SB_IGU_ID;
+ sc->igu_base_sb = 0;
+ } else {
+ sc->devinfo.int_block = INT_BLOCK_IGU;
+
+/* do not allow device reset during IGU info preocessing */
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
+
+ val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
+
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+ int tout = 5000;
+
+ val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
+ REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
+ REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
+
+ while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
+ tout--;
+ DELAY(1000);
+ }
+
+ if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
+ PMD_DRV_LOG(NOTICE,
+ "FORCING IGU Normal Mode failed!!!");
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
+ return -1;
+ }
+ }
+
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+ PMD_DRV_LOG(DEBUG, "IGU Backward Compatible Mode");
+ sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
+ } else {
+ PMD_DRV_LOG(DEBUG, "IGU Normal Mode");
+ }
+
+ rc = bnx2x_get_igu_cam_info(sc);
+
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
+
+ if (rc) {
+ return rc;
+ }
+ }
+
+ /*
+ * Get base FW non-default (fast path) status block ID. This value is
+ * used to initialize the fw_sb_id saved on the fp/queue structure to
+ * determine the id used by the FW.
+ */
+ if (CHIP_IS_E1x(sc)) {
+ sc->base_fw_ndsb =
+ ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
+ } else {
+/*
+ * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
+ * the same queue are indicated on the same IGU SB). So we prefer
+ * FW and IGU SBs to be the same value.
+ */
+ sc->base_fw_ndsb = sc->igu_base_sb;
+ }
+
+ elink_phy_probe(&sc->link_params);
+
+ return 0;
+}
+
+static void
+bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg)
+{
+ uint32_t cfg_size = 0;
+ uint32_t idx;
+ uint8_t port = SC_PORT(sc);
+
+ /* aggregation of supported attributes of all external phys */
+ sc->port.supported[0] = 0;
+ sc->port.supported[1] = 0;
+
+ switch (sc->link_params.num_phys) {
+ case 1:
+ sc->port.supported[0] =
+ sc->link_params.phy[ELINK_INT_PHY].supported;
+ cfg_size = 1;
+ break;
+ case 2:
+ sc->port.supported[0] =
+ sc->link_params.phy[ELINK_EXT_PHY1].supported;
+ cfg_size = 1;
+ break;
+ case 3:
+ if (sc->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ sc->port.supported[1] =
+ sc->link_params.phy[ELINK_EXT_PHY1].supported;
+ sc->port.supported[0] =
+ sc->link_params.phy[ELINK_EXT_PHY2].supported;
+ } else {
+ sc->port.supported[0] =
+ sc->link_params.phy[ELINK_EXT_PHY1].supported;
+ sc->port.supported[1] =
+ sc->link_params.phy[ELINK_EXT_PHY2].supported;
+ }
+ cfg_size = 2;
+ break;
+ }
+
+ if (!(sc->port.supported[0] || sc->port.supported[1])) {
+ PMD_DRV_LOG(ERR,
+ "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)",
+ SHMEM_RD(sc,
+ dev_info.port_hw_config
+ [port].external_phy_config),
+ SHMEM_RD(sc,
+ dev_info.port_hw_config
+ [port].external_phy_config2));
+ return;
+ }
+
+ if (CHIP_IS_E3(sc))
+ sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
+ else {
+ switch (switch_cfg) {
+ case ELINK_SWITCH_CFG_1G:
+ sc->port.phy_addr =
+ REG_RD(sc,
+ NIG_REG_SERDES0_CTRL_PHY_ADDR + port * 0x10);
+ break;
+ case ELINK_SWITCH_CFG_10G:
+ sc->port.phy_addr =
+ REG_RD(sc,
+ NIG_REG_XGXS0_CTRL_PHY_ADDR + port * 0x18);
+ break;
+ default:
+ PMD_DRV_LOG(ERR,
+ "Invalid switch config in"
+ "link_config=0x%08x",
+ sc->port.link_config[0]);
+ return;
+ }
+ }
+
+ PMD_DRV_LOG(INFO, "PHY addr 0x%08x", sc->port.phy_addr);
+
+ /* mask what we support according to speed_cap_mask per configuration */
+ for (idx = 0; idx < cfg_size; idx++) {
+ if (!(sc->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
+ sc->port.supported[idx] &=
+ ~ELINK_SUPPORTED_10baseT_Half;
+ }
+
+ if (!(sc->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
+ sc->port.supported[idx] &=
+ ~ELINK_SUPPORTED_10baseT_Full;
+ }
+
+ if (!(sc->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
+ sc->port.supported[idx] &=
+ ~ELINK_SUPPORTED_100baseT_Half;
+ }
+
+ if (!(sc->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
+ sc->port.supported[idx] &=
+ ~ELINK_SUPPORTED_100baseT_Full;
+ }
+
+ if (!(sc->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
+ sc->port.supported[idx] &=
+ ~ELINK_SUPPORTED_1000baseT_Full;
+ }
+
+ if (!(sc->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
+ sc->port.supported[idx] &=
+ ~ELINK_SUPPORTED_2500baseX_Full;
+ }
+
+ if (!(sc->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ sc->port.supported[idx] &=
+ ~ELINK_SUPPORTED_10000baseT_Full;
+ }
+
+ if (!(sc->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
+ sc->port.supported[idx] &=
+ ~ELINK_SUPPORTED_20000baseKR2_Full;
+ }
+ }
+
+ PMD_DRV_LOG(INFO, "PHY supported 0=0x%08x 1=0x%08x",
+ sc->port.supported[0], sc->port.supported[1]);
+}
+
+static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
+{
+ uint32_t link_config;
+ uint32_t idx;
+ uint32_t cfg_size = 0;
+
+ sc->port.advertising[0] = 0;
+ sc->port.advertising[1] = 0;
+
+ switch (sc->link_params.num_phys) {
+ case 1:
+ case 2:
+ cfg_size = 1;
+ break;
+ case 3:
+ cfg_size = 2;
+ break;
+ }
+
+ for (idx = 0; idx < cfg_size; idx++) {
+ sc->link_params.req_duplex[idx] = DUPLEX_FULL;
+ link_config = sc->port.link_config[idx];
+
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ case PORT_FEATURE_LINK_SPEED_AUTO:
+ if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
+ sc->link_params.req_line_speed[idx] =
+ ELINK_SPEED_AUTO_NEG;
+ sc->port.advertising[idx] |=
+ sc->port.supported[idx];
+ if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833)
+ sc->port.advertising[idx] |=
+ (ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full);
+ } else {
+ /* force 10G, no AN */
+ sc->link_params.req_line_speed[idx] =
+ ELINK_SPEED_10000;
+ sc->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ continue;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10M_FULL:
+ if (sc->
+ port.supported[idx] & ELINK_SUPPORTED_10baseT_Full)
+ {
+ sc->link_params.req_line_speed[idx] =
+ ELINK_SPEED_10;
+ sc->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Full | ADVERTISED_TP);
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Invalid NVRAM config link_config=0x%08x "
+ "speed_cap_mask=0x%08x",
+ link_config,
+ sc->
+ link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10M_HALF:
+ if (sc->
+ port.supported[idx] & ELINK_SUPPORTED_10baseT_Half)
+ {
+ sc->link_params.req_line_speed[idx] =
+ ELINK_SPEED_10;
+ sc->link_params.req_duplex[idx] = DUPLEX_HALF;
+ sc->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Half | ADVERTISED_TP);
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Invalid NVRAM config link_config=0x%08x "
+ "speed_cap_mask=0x%08x",
+ link_config,
+ sc->
+ link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_100M_FULL:
+ if (sc->
+ port.supported[idx] & ELINK_SUPPORTED_100baseT_Full)
+ {
+ sc->link_params.req_line_speed[idx] =
+ ELINK_SPEED_100;
+ sc->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Full | ADVERTISED_TP);
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Invalid NVRAM config link_config=0x%08x "
+ "speed_cap_mask=0x%08x",
+ link_config,
+ sc->
+ link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_100M_HALF:
+ if (sc->
+ port.supported[idx] & ELINK_SUPPORTED_100baseT_Half)
+ {
+ sc->link_params.req_line_speed[idx] =
+ ELINK_SPEED_100;
+ sc->link_params.req_duplex[idx] = DUPLEX_HALF;
+ sc->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Half | ADVERTISED_TP);
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Invalid NVRAM config link_config=0x%08x "
+ "speed_cap_mask=0x%08x",
+ link_config,
+ sc->
+ link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_1G:
+ if (sc->port.supported[idx] &
+ ELINK_SUPPORTED_1000baseT_Full) {
+ sc->link_params.req_line_speed[idx] =
+ ELINK_SPEED_1000;
+ sc->port.advertising[idx] |=
+ (ADVERTISED_1000baseT_Full | ADVERTISED_TP);
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Invalid NVRAM config link_config=0x%08x "
+ "speed_cap_mask=0x%08x",
+ link_config,
+ sc->
+ link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_2_5G:
+ if (sc->port.supported[idx] &
+ ELINK_SUPPORTED_2500baseX_Full) {
+ sc->link_params.req_line_speed[idx] =
+ ELINK_SPEED_2500;
+ sc->port.advertising[idx] |=
+ (ADVERTISED_2500baseX_Full | ADVERTISED_TP);
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Invalid NVRAM config link_config=0x%08x "
+ "speed_cap_mask=0x%08x",
+ link_config,
+ sc->
+ link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10G_CX4:
+ if (sc->port.supported[idx] &
+ ELINK_SUPPORTED_10000baseT_Full) {
+ sc->link_params.req_line_speed[idx] =
+ ELINK_SPEED_10000;
+ sc->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Invalid NVRAM config link_config=0x%08x "
+ "speed_cap_mask=0x%08x",
+ link_config,
+ sc->
+ link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_20G:
+ sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR,
+ "Invalid NVRAM config link_config=0x%08x "
+ "speed_cap_mask=0x%08x", link_config,
+ sc->link_params.speed_cap_mask[idx]);
+ sc->link_params.req_line_speed[idx] =
+ ELINK_SPEED_AUTO_NEG;
+ sc->port.advertising[idx] = sc->port.supported[idx];
+ break;
+ }
+
+ sc->link_params.req_flow_ctrl[idx] =
+ (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
+
+ if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
+ if (!
+ (sc->
+ port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
+ sc->link_params.req_flow_ctrl[idx] =
+ ELINK_FLOW_CTRL_NONE;
+ } else {
+ bnx2x_set_requested_fc(sc);
+ }
+ }
+ }
+}
+
+static void bnx2x_get_phy_info(struct bnx2x_softc *sc)
+{
+ uint8_t port = SC_PORT(sc);
+ uint32_t eee_mode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* shmem data already read in bnx2x_get_shmem_info() */
+
+ bnx2x_link_settings_supported(sc, sc->link_params.switch_cfg);
+ bnx2x_link_settings_requested(sc);
+
+ /* configure link feature according to nvram value */
+ eee_mode =
+ (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode))
+ & PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+ if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
+ sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
+ ELINK_EEE_MODE_ENABLE_LPI |
+ ELINK_EEE_MODE_OUTPUT_TIME);
+ } else {
+ sc->link_params.eee_mode = 0;
+ }
+
+ /* get the media type */
+ bnx2x_media_detect(sc);
+}
+
+static void bnx2x_set_modes_bitmap(struct bnx2x_softc *sc)
+{
+ uint32_t flags = MODE_ASIC | MODE_PORT2;
+
+ if (CHIP_IS_E2(sc)) {
+ flags |= MODE_E2;
+ } else if (CHIP_IS_E3(sc)) {
+ flags |= MODE_E3;
+ if (CHIP_REV(sc) == CHIP_REV_Ax) {
+ flags |= MODE_E3_A0;
+ } else { /*if (CHIP_REV(sc) == CHIP_REV_Bx) */
+
+ flags |= MODE_E3_B0 | MODE_COS3;
+ }
+ }
+
+ if (IS_MF(sc)) {
+ flags |= MODE_MF;
+ switch (sc->devinfo.mf_info.mf_mode) {
+ case MULTI_FUNCTION_SD:
+ flags |= MODE_MF_SD;
+ break;
+ case MULTI_FUNCTION_SI:
+ flags |= MODE_MF_SI;
+ break;
+ case MULTI_FUNCTION_AFEX:
+ flags |= MODE_MF_AFEX;
+ break;
+ }
+ } else {
+ flags |= MODE_SF;
+ }
+
+#if defined(__LITTLE_ENDIAN)
+ flags |= MODE_LITTLE_ENDIAN;
+#else /* __BIG_ENDIAN */
+ flags |= MODE_BIG_ENDIAN;
+#endif
+
+ INIT_MODE_FLAGS(sc) = flags;
+}
+
+int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc)
+{
+ struct bnx2x_fastpath *fp;
+ char buf[32];
+ uint32_t i;
+
+ if (IS_PF(sc)) {
+/************************/
+/* DEFAULT STATUS BLOCK */
+/************************/
+
+ if (bnx2x_dma_alloc(sc, sizeof(struct host_sp_status_block),
+ &sc->def_sb_dma, "def_sb",
+ RTE_CACHE_LINE_SIZE) != 0) {
+ return -1;
+ }
+
+ sc->def_sb =
+ (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
+/***************/
+/* EVENT QUEUE */
+/***************/
+
+ if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE,
+ &sc->eq_dma, "ev_queue",
+ RTE_CACHE_LINE_SIZE) != 0) {
+ sc->def_sb = NULL;
+ return -1;
+ }
+
+ sc->eq = (union event_ring_elem *)sc->eq_dma.vaddr;
+
+/*************/
+/* SLOW PATH */
+/*************/
+
+ if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_slowpath),
+ &sc->sp_dma, "sp",
+ RTE_CACHE_LINE_SIZE) != 0) {
+ sc->eq = NULL;
+ sc->def_sb = NULL;
+ return -1;
+ }
+
+ sc->sp = (struct bnx2x_slowpath *)sc->sp_dma.vaddr;
+
+/*******************/
+/* SLOW PATH QUEUE */
+/*******************/
+
+ if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE,
+ &sc->spq_dma, "sp_queue",
+ RTE_CACHE_LINE_SIZE) != 0) {
+ sc->sp = NULL;
+ sc->eq = NULL;
+ sc->def_sb = NULL;
+ return -1;
+ }
+
+ sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
+
+/***************************/
+/* FW DECOMPRESSION BUFFER */
+/***************************/
+
+ if (bnx2x_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
+ "fw_buf", RTE_CACHE_LINE_SIZE) != 0) {
+ sc->spq = NULL;
+ sc->sp = NULL;
+ sc->eq = NULL;
+ sc->def_sb = NULL;
+ return -1;
+ }
+
+ sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
+ }
+
+ /*************/
+ /* FASTPATHS */
+ /*************/
+
+ /* allocate DMA memory for each fastpath structure */
+ for (i = 0; i < sc->num_queues; i++) {
+ fp = &sc->fp[i];
+ fp->sc = sc;
+ fp->index = i;
+
+/*******************/
+/* FP STATUS BLOCK */
+/*******************/
+
+ snprintf(buf, sizeof(buf), "fp_%d_sb", i);
+ if (bnx2x_dma_alloc(sc, sizeof(union bnx2x_host_hc_status_block),
+ &fp->sb_dma, buf, RTE_CACHE_LINE_SIZE) != 0) {
+ PMD_DRV_LOG(NOTICE, "Failed to alloc %s", buf);
+ return -1;
+ } else {
+ if (CHIP_IS_E2E3(sc)) {
+ fp->status_block.e2_sb =
+ (struct host_hc_status_block_e2 *)
+ fp->sb_dma.vaddr;
+ } else {
+ fp->status_block.e1x_sb =
+ (struct host_hc_status_block_e1x *)
+ fp->sb_dma.vaddr;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void bnx2x_free_hsi_mem(struct bnx2x_softc *sc)
+{
+ struct bnx2x_fastpath *fp;
+ int i;
+
+ for (i = 0; i < sc->num_queues; i++) {
+ fp = &sc->fp[i];
+
+/*******************/
+/* FP STATUS BLOCK */
+/*******************/
+
+ memset(&fp->status_block, 0, sizeof(fp->status_block));
+ }
+
+ /***************************/
+ /* FW DECOMPRESSION BUFFER */
+ /***************************/
+
+ sc->gz_buf = NULL;
+
+ /*******************/
+ /* SLOW PATH QUEUE */
+ /*******************/
+
+ sc->spq = NULL;
+
+ /*************/
+ /* SLOW PATH */
+ /*************/
+
+ sc->sp = NULL;
+
+ /***************/
+ /* EVENT QUEUE */
+ /***************/
+
+ sc->eq = NULL;
+
+ /************************/
+ /* DEFAULT STATUS BLOCK */
+ /************************/
+
+ sc->def_sb = NULL;
+
+}
+
+/*
+* Previous driver DMAE transaction may have occurred when pre-boot stage
+* ended and boot began. This would invalidate the addresses of the
+* transaction, resulting in was-error bit set in the PCI causing all
+* hw-to-host PCIe transactions to timeout. If this happened we want to clear
+* the interrupt which detected this from the pglueb and the was-done bit
+*/
+static void bnx2x_prev_interrupted_dmae(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+
+ if (!CHIP_IS_E1x(sc)) {
+ val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
+ REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+ 1 << SC_FUNC(sc));
+ }
+ }
+}
+
+static int bnx2x_prev_mcp_done(struct bnx2x_softc *sc)
+{
+ uint32_t rc = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
+ DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
+ if (!rc) {
+ PMD_DRV_LOG(NOTICE, "MCP response failure, aborting");
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct bnx2x_prev_list_node *bnx2x_prev_path_get_entry(struct bnx2x_softc *sc)
+{
+ struct bnx2x_prev_list_node *tmp;
+
+ LIST_FOREACH(tmp, &bnx2x_prev_list, node) {
+ if ((sc->pcie_bus == tmp->bus) &&
+ (sc->pcie_device == tmp->slot) &&
+ (SC_PATH(sc) == tmp->path)) {
+ return tmp;
+ }
+ }
+
+ return NULL;
+}
+
+static uint8_t bnx2x_prev_is_path_marked(struct bnx2x_softc *sc)
+{
+ struct bnx2x_prev_list_node *tmp;
+ int rc = FALSE;
+
+ rte_spinlock_lock(&bnx2x_prev_mtx);
+
+ tmp = bnx2x_prev_path_get_entry(sc);
+ if (tmp) {
+ if (tmp->aer) {
+ PMD_DRV_LOG(DEBUG,
+ "Path %d/%d/%d was marked by AER",
+ sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
+ } else {
+ rc = TRUE;
+ PMD_DRV_LOG(DEBUG,
+ "Path %d/%d/%d was already cleaned from previous drivers",
+ sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
+ }
+ }
+
+ rte_spinlock_unlock(&bnx2x_prev_mtx);
+
+ return rc;
+}
+
+static int bnx2x_prev_mark_path(struct bnx2x_softc *sc, uint8_t after_undi)
+{
+ struct bnx2x_prev_list_node *tmp;
+
+ rte_spinlock_lock(&bnx2x_prev_mtx);
+
+ /* Check whether the entry for this path already exists */
+ tmp = bnx2x_prev_path_get_entry(sc);
+ if (tmp) {
+ if (!tmp->aer) {
+ PMD_DRV_LOG(DEBUG,
+ "Re-marking AER in path %d/%d/%d",
+ sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
+ } else {
+ PMD_DRV_LOG(DEBUG,
+ "Removing AER indication from path %d/%d/%d",
+ sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
+ tmp->aer = 0;
+ }
+
+ rte_spinlock_unlock(&bnx2x_prev_mtx);
+ return 0;
+ }
+
+ rte_spinlock_unlock(&bnx2x_prev_mtx);
+
+ /* Create an entry for this path and add it */
+ tmp = rte_malloc("", sizeof(struct bnx2x_prev_list_node),
+ RTE_CACHE_LINE_SIZE);
+ if (!tmp) {
+ PMD_DRV_LOG(NOTICE, "Failed to allocate 'bnx2x_prev_list_node'");
+ return -1;
+ }
+
+ tmp->bus = sc->pcie_bus;
+ tmp->slot = sc->pcie_device;
+ tmp->path = SC_PATH(sc);
+ tmp->aer = 0;
+ tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
+
+ rte_spinlock_lock(&bnx2x_prev_mtx);
+
+ LIST_INSERT_HEAD(&bnx2x_prev_list, tmp, node);
+
+ rte_spinlock_unlock(&bnx2x_prev_mtx);
+
+ return 0;
+}
+
+static int bnx2x_do_flr(struct bnx2x_softc *sc)
+{
+ int i;
+
+ /* only E2 and onwards support FLR */
+ if (CHIP_IS_E1x(sc)) {
+ PMD_DRV_LOG(WARNING, "FLR not supported in E1H");
+ return -1;
+ }
+
+ /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
+ if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
+ PMD_DRV_LOG(WARNING,
+ "FLR not supported by BC_VER: 0x%08x",
+ sc->devinfo.bc_ver);
+ return -1;
+ }
+
+ /* Wait for Transaction Pending bit clean */
+ for (i = 0; i < 4; i++) {
+ if (i) {
+ DELAY(((1 << (i - 1)) * 100) * 1000);
+ }
+
+ if (!bnx2x_is_pcie_pending(sc)) {
+ goto clear;
+ }
+ }
+
+ PMD_DRV_LOG(NOTICE, "PCIE transaction is not cleared, "
+ "proceeding with reset anyway");
+
+clear:
+ bnx2x_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
+
+ return 0;
+}
+
+struct bnx2x_mac_vals {
+ uint32_t xmac_addr;
+ uint32_t xmac_val;
+ uint32_t emac_addr;
+ uint32_t emac_val;
+ uint32_t umac_addr;
+ uint32_t umac_val;
+ uint32_t bmac_addr;
+ uint32_t bmac_val[2];
+};
+
+static void
+bnx2x_prev_unload_close_mac(struct bnx2x_softc *sc, struct bnx2x_mac_vals *vals)
+{
+ uint32_t val, base_addr, offset, mask, reset_reg;
+ uint8_t mac_stopped = FALSE;
+ uint8_t port = SC_PORT(sc);
+ uint32_t wb_data[2];
+
+ /* reset addresses as they also mark which values were changed */
+ vals->bmac_addr = 0;
+ vals->umac_addr = 0;
+ vals->xmac_addr = 0;
+ vals->emac_addr = 0;
+
+ reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
+
+ if (!CHIP_IS_E3(sc)) {
+ val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
+ mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
+ if ((mask & reset_reg) && val) {
+ base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
+ : NIG_REG_INGRESS_BMAC0_MEM;
+ offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
+ : BIGMAC_REGISTER_BMAC_CONTROL;
+
+ /*
+ * use rd/wr since we cannot use dmae. This is safe
+ * since MCP won't access the bus due to the request
+ * to unload, and no function on the path can be
+ * loaded at this time.
+ */
+ wb_data[0] = REG_RD(sc, base_addr + offset);
+ wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
+ vals->bmac_addr = base_addr + offset;
+ vals->bmac_val[0] = wb_data[0];
+ vals->bmac_val[1] = wb_data[1];
+ wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
+ REG_WR(sc, vals->bmac_addr, wb_data[0]);
+ REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
+ }
+
+ vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc) * 4;
+ vals->emac_val = REG_RD(sc, vals->emac_addr);
+ REG_WR(sc, vals->emac_addr, 0);
+ mac_stopped = TRUE;
+ } else {
+ if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
+ base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+ val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
+ REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI,
+ val & ~(1 << 1));
+ REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI,
+ val | (1 << 1));
+ vals->xmac_addr = base_addr + XMAC_REG_CTRL;
+ vals->xmac_val = REG_RD(sc, vals->xmac_addr);
+ REG_WR(sc, vals->xmac_addr, 0);
+ mac_stopped = TRUE;
+ }
+
+ mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
+ if (mask & reset_reg) {
+ base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
+ vals->umac_val = REG_RD(sc, vals->umac_addr);
+ REG_WR(sc, vals->umac_addr, 0);
+ mac_stopped = TRUE;
+ }
+ }
+
+ if (mac_stopped) {
+ DELAY(20000);
+ }
+}
+
+#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
+#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
+#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
+#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
+
+static void
+bnx2x_prev_unload_undi_inc(struct bnx2x_softc *sc, uint8_t port, uint8_t inc)
+{
+ uint16_t rcq, bd;
+ uint32_t tmp_reg = REG_RD(sc, BNX2X_PREV_UNDI_PROD_ADDR(port));
+
+ rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
+ bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
+
+ tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
+ REG_WR(sc, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
+}
+
+static int bnx2x_prev_unload_common(struct bnx2x_softc *sc)
+{
+ uint32_t reset_reg, tmp_reg = 0, rc;
+ uint8_t prev_undi = FALSE;
+ struct bnx2x_mac_vals mac_vals;
+ uint32_t timer_count = 1000;
+ uint32_t prev_brb;
+
+ /*
+ * It is possible a previous function received 'common' answer,
+ * but hasn't loaded yet, therefore creating a scenario of
+ * multiple functions receiving 'common' on the same path.
+ */
+ memset(&mac_vals, 0, sizeof(mac_vals));
+
+ if (bnx2x_prev_is_path_marked(sc)) {
+ return bnx2x_prev_mcp_done(sc);
+ }
+
+ reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
+
+ /* Reset should be performed after BRB is emptied */
+ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
+ /* Close the MAC Rx to prevent BRB from filling up */
+ bnx2x_prev_unload_close_mac(sc, &mac_vals);
+
+ /* close LLH filters towards the BRB */
+ elink_set_rx_filter(&sc->link_params, 0);
+
+ /*
+ * Check if the UNDI driver was previously loaded.
+ * UNDI driver initializes CID offset for normal bell to 0x7
+ */
+ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
+ tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
+ if (tmp_reg == 0x7) {
+ PMD_DRV_LOG(DEBUG, "UNDI previously loaded");
+ prev_undi = TRUE;
+ /* clear the UNDI indication */
+ REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
+ /* clear possible idle check errors */
+ REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
+ }
+ }
+
+ /* wait until BRB is empty */
+ tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
+ while (timer_count) {
+ prev_brb = tmp_reg;
+
+ tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
+ if (!tmp_reg) {
+ break;
+ }
+
+ PMD_DRV_LOG(DEBUG, "BRB still has 0x%08x", tmp_reg);
+
+ /* reset timer as long as BRB actually gets emptied */
+ if (prev_brb > tmp_reg) {
+ timer_count = 1000;
+ } else {
+ timer_count--;
+ }
+
+ /* If UNDI resides in memory, manually increment it */
+ if (prev_undi) {
+ bnx2x_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
+ }
+
+ DELAY(10);
+ }
+
+ if (!timer_count) {
+ PMD_DRV_LOG(NOTICE, "Failed to empty BRB");
+ }
+ }
+
+ /* No packets are in the pipeline, path is ready for reset */
+ bnx2x_reset_common(sc);
+
+ if (mac_vals.xmac_addr) {
+ REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
+ }
+ if (mac_vals.umac_addr) {
+ REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
+ }
+ if (mac_vals.emac_addr) {
+ REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
+ }
+ if (mac_vals.bmac_addr) {
+ REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
+ REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
+ }
+
+ rc = bnx2x_prev_mark_path(sc, prev_undi);
+ if (rc) {
+ bnx2x_prev_mcp_done(sc);
+ return rc;
+ }
+
+ return bnx2x_prev_mcp_done(sc);
+}
+
+static int bnx2x_prev_unload_uncommon(struct bnx2x_softc *sc)
+{
+ int rc;
+
+ /* Test if previous unload process was already finished for this path */
+ if (bnx2x_prev_is_path_marked(sc)) {
+ return bnx2x_prev_mcp_done(sc);
+ }
+
+ /*
+ * If function has FLR capabilities, and existing FW version matches
+ * the one required, then FLR will be sufficient to clean any residue
+ * left by previous driver
+ */
+ rc = bnx2x_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
+ if (!rc) {
+ /* fw version is good */
+ rc = bnx2x_do_flr(sc);
+ }
+
+ if (!rc) {
+ /* FLR was performed */
+ return 0;
+ }
+
+ PMD_DRV_LOG(INFO, "Could not FLR");
+
+ /* Close the MCP request, return failure */
+ rc = bnx2x_prev_mcp_done(sc);
+ if (!rc) {
+ rc = BNX2X_PREV_WAIT_NEEDED;
+ }
+
+ return rc;
+}
+
+static int bnx2x_prev_unload(struct bnx2x_softc *sc)
+{
+ int time_counter = 10;
+ uint32_t fw, hw_lock_reg, hw_lock_val;
+ uint32_t rc = 0;
+
+ /*
+ * Clear HW from errors which may have resulted from an interrupted
+ * DMAE transaction.
+ */
+ bnx2x_prev_interrupted_dmae(sc);
+
+ /* Release previously held locks */
+ if (SC_FUNC(sc) <= 5)
+ hw_lock_reg = (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8);
+ else
+ hw_lock_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
+
+ hw_lock_val = (REG_RD(sc, hw_lock_reg));
+ if (hw_lock_val) {
+ if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
+ REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
+ (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
+ }
+ REG_WR(sc, hw_lock_reg, 0xffffffff);
+ }
+
+ if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
+ REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
+ }
+
+ do {
+ /* Lock MCP using an unload request */
+ fw = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
+ if (!fw) {
+ PMD_DRV_LOG(NOTICE, "MCP response failure, aborting");
+ rc = -1;
+ break;
+ }
+
+ if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+ rc = bnx2x_prev_unload_common(sc);
+ break;
+ }
+
+ /* non-common reply from MCP might require looping */
+ rc = bnx2x_prev_unload_uncommon(sc);
+ if (rc != BNX2X_PREV_WAIT_NEEDED) {
+ break;
+ }
+
+ DELAY(20000);
+ } while (--time_counter);
+
+ if (!time_counter || rc) {
+ PMD_DRV_LOG(NOTICE, "Failed to unload previous driver!");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static void
+bnx2x_dcbx_set_state(struct bnx2x_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabled)
+{
+ if (!CHIP_IS_E1x(sc)) {
+ sc->dcb_state = dcb_on;
+ sc->dcbx_enabled = dcbx_enabled;
+ } else {
+ sc->dcb_state = FALSE;
+ sc->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
+ }
+ PMD_DRV_LOG(DEBUG,
+ "DCB state [%s:%s]",
+ dcb_on ? "ON" : "OFF",
+ (dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ? "user-mode" :
+ (dcbx_enabled ==
+ BNX2X_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static"
+ : (dcbx_enabled ==
+ BNX2X_DCBX_ENABLED_ON_NEG_ON) ?
+ "on-chip with negotiation" : "invalid");
+}
+
+static int bnx2x_set_qm_cid_count(struct bnx2x_softc *sc)
+{
+ int cid_count = BNX2X_L2_MAX_CID(sc);
+
+ if (CNIC_SUPPORT(sc)) {
+ cid_count += CNIC_CID_MAX;
+ }
+
+ return roundup(cid_count, QM_CID_ROUND);
+}
+
+static void bnx2x_init_multi_cos(struct bnx2x_softc *sc)
+{
+ int pri, cos;
+
+ uint32_t pri_map = 0;
+
+ for (pri = 0; pri < BNX2X_MAX_PRIORITY; pri++) {
+ cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
+ if (cos < sc->max_cos) {
+ sc->prio_to_cos[pri] = cos;
+ } else {
+ PMD_DRV_LOG(WARNING,
+ "Invalid COS %d for priority %d "
+ "(max COS is %d), setting to 0", cos, pri,
+ (sc->max_cos - 1));
+ sc->prio_to_cos[pri] = 0;
+ }
+ }
+}
+
+static int bnx2x_pci_get_caps(struct bnx2x_softc *sc)
+{
+ struct {
+ uint8_t id;
+ uint8_t next;
+ } pci_cap;
+ uint16_t status;
+ struct bnx2x_pci_cap *cap;
+
+ cap = sc->pci_caps = rte_zmalloc("caps", sizeof(struct bnx2x_pci_cap),
+ RTE_CACHE_LINE_SIZE);
+ if (!cap) {
+ PMD_DRV_LOG(NOTICE, "Failed to allocate memory");
+ return -ENOMEM;
+ }
+
+#ifndef __FreeBSD__
+ pci_read(sc, PCI_STATUS, &status, 2);
+ if (!(status & PCI_STATUS_CAP_LIST)) {
+#else
+ pci_read(sc, PCIR_STATUS, &status, 2);
+ if (!(status & PCIM_STATUS_CAPPRESENT)) {
+#endif
+ PMD_DRV_LOG(NOTICE, "PCIe capability reading failed");
+ return -1;
+ }
+
+#ifndef __FreeBSD__
+ pci_read(sc, PCI_CAPABILITY_LIST, &pci_cap.next, 1);
+#else
+ pci_read(sc, PCIR_CAP_PTR, &pci_cap.next, 1);
+#endif
+ while (pci_cap.next) {
+ cap->addr = pci_cap.next & ~3;
+ pci_read(sc, pci_cap.next & ~3, &pci_cap, 2);
+ if (pci_cap.id == 0xff)
+ break;
+ cap->id = pci_cap.id;
+ cap->type = BNX2X_PCI_CAP;
+ cap->next = rte_zmalloc("pci_cap",
+ sizeof(struct bnx2x_pci_cap),
+ RTE_CACHE_LINE_SIZE);
+ if (!cap->next) {
+ PMD_DRV_LOG(NOTICE, "Failed to allocate memory");
+ return -ENOMEM;
+ }
+ cap = cap->next;
+ }
+
+ return 0;
+}
+
+static void bnx2x_init_rte(struct bnx2x_softc *sc)
+{
+ if (IS_VF(sc)) {
+ sc->max_tx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF,
+ sc->igu_sb_cnt);
+ sc->max_rx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF,
+ sc->igu_sb_cnt);
+ } else {
+ sc->max_rx_queues = BNX2X_MAX_RSS_COUNT(sc);
+ sc->max_tx_queues = sc->max_rx_queues;
+ }
+}
+
+#define FW_HEADER_LEN 104
+#define FW_NAME_57711 "/lib/firmware/bnx2x/bnx2x-e1h-7.2.51.0.fw"
+#define FW_NAME_57810 "/lib/firmware/bnx2x/bnx2x-e2-7.2.51.0.fw"
+
+void bnx2x_load_firmware(struct bnx2x_softc *sc)
+{
+ const char *fwname;
+ int f;
+ struct stat st;
+
+ fwname = sc->devinfo.device_id == CHIP_NUM_57711
+ ? FW_NAME_57711 : FW_NAME_57810;
+ f = open(fwname, O_RDONLY);
+ if (f < 0) {
+ PMD_DRV_LOG(NOTICE, "Can't open firmware file");
+ return;
+ }
+
+ if (fstat(f, &st) < 0) {
+ PMD_DRV_LOG(NOTICE, "Can't stat firmware file");
+ close(f);
+ return;
+ }
+
+ sc->firmware = rte_zmalloc("bnx2x_fw", st.st_size, RTE_CACHE_LINE_SIZE);
+ if (!sc->firmware) {
+ PMD_DRV_LOG(NOTICE, "Can't allocate memory for firmware");
+ close(f);
+ return;
+ }
+
+ if (read(f, sc->firmware, st.st_size) != st.st_size) {
+ PMD_DRV_LOG(NOTICE, "Can't read firmware data");
+ close(f);
+ return;
+ }
+ close(f);
+
+ sc->fw_len = st.st_size;
+ if (sc->fw_len < FW_HEADER_LEN) {
+ PMD_DRV_LOG(NOTICE, "Invalid fw size: %" PRIu64, sc->fw_len);
+ return;
+ }
+ PMD_DRV_LOG(DEBUG, "fw_len = %" PRIu64, sc->fw_len);
+}
+
+static void
+bnx2x_data_to_init_ops(uint8_t * data, struct raw_op *dst, uint32_t len)
+{
+ uint32_t *src = (uint32_t *) data;
+ uint32_t i, j, tmp;
+
+ for (i = 0, j = 0; i < len / 8; ++i, j += 2) {
+ tmp = rte_be_to_cpu_32(src[j]);
+ dst[i].op = (tmp >> 24) & 0xFF;
+ dst[i].offset = tmp & 0xFFFFFF;
+ dst[i].raw_data = rte_be_to_cpu_32(src[j + 1]);
+ }
+}
+
+static void
+bnx2x_data_to_init_offsets(uint8_t * data, uint16_t * dst, uint32_t len)
+{
+ uint16_t *src = (uint16_t *) data;
+ uint32_t i;
+
+ for (i = 0; i < len / 2; ++i)
+ dst[i] = rte_be_to_cpu_16(src[i]);
+}
+
+static void bnx2x_data_to_init_data(uint8_t * data, uint32_t * dst, uint32_t len)
+{
+ uint32_t *src = (uint32_t *) data;
+ uint32_t i;
+
+ for (i = 0; i < len / 4; ++i)
+ dst[i] = rte_be_to_cpu_32(src[i]);
+}
+
+static void bnx2x_data_to_iro_array(uint8_t * data, struct iro *dst, uint32_t len)
+{
+ uint32_t *src = (uint32_t *) data;
+ uint32_t i, j, tmp;
+
+ for (i = 0, j = 0; i < len / sizeof(struct iro); ++i, ++j) {
+ dst[i].base = rte_be_to_cpu_32(src[j++]);
+ tmp = rte_be_to_cpu_32(src[j]);
+ dst[i].m1 = (tmp >> 16) & 0xFFFF;
+ dst[i].m2 = tmp & 0xFFFF;
+ ++j;
+ tmp = rte_be_to_cpu_32(src[j]);
+ dst[i].m3 = (tmp >> 16) & 0xFFFF;
+ dst[i].size = tmp & 0xFFFF;
+ }
+}
+
+/*
+* Device attach function.
+*
+* Allocates device resources, performs secondary chip identification, and
+* initializes driver instance variables. This function is called from driver
+* load after a successful probe.
+*
+* Returns:
+* 0 = Success, >0 = Failure
+*/
+int bnx2x_attach(struct bnx2x_softc *sc)
+{
+ int rc;
+
+ PMD_DRV_LOG(DEBUG, "Starting attach...");
+
+ rc = bnx2x_pci_get_caps(sc);
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "PCIe caps reading was failed");
+ return rc;
+ }
+
+ sc->state = BNX2X_STATE_CLOSED;
+
+ pci_write_long(sc, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET);
+
+ sc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
+
+ /* get PCI capabilites */
+ bnx2x_probe_pci_caps(sc);
+
+ if (sc->devinfo.pcie_msix_cap_reg != 0) {
+ uint32_t val;
+ pci_read(sc,
+ (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), &val,
+ 2);
+ sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
+ } else {
+ sc->igu_sb_cnt = 1;
+ }
+
+ /* Init RTE stuff */
+ bnx2x_init_rte(sc);
+
+ if (IS_PF(sc)) {
+ /* Enable internal target-read (in case we are probed after PF
+ * FLR). Must be done prior to any BAR read access. Only for
+ * 57712 and up
+ */
+ if (!CHIP_IS_E1x(sc)) {
+ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
+ 1);
+ DELAY(200000);
+ }
+
+ /* get device info and set params */
+ if (bnx2x_get_device_info(sc) != 0) {
+ PMD_DRV_LOG(NOTICE, "getting device info");
+ return -ENXIO;
+ }
+
+/* get phy settings from shmem and 'and' against admin settings */
+ bnx2x_get_phy_info(sc);
+ } else {
+ /* Left mac of VF unfilled, PF should set it for VF */
+ memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN);
+ }
+
+ sc->wol = 0;
+
+ /* set the default MTU (changed via ifconfig) */
+ sc->mtu = ETHER_MTU;
+
+ bnx2x_set_modes_bitmap(sc);
+
+ /* need to reset chip if UNDI was active */
+ if (IS_PF(sc) && !BNX2X_NOMCP(sc)) {
+/* init fw_seq */
+ sc->fw_seq =
+ (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ bnx2x_prev_unload(sc);
+ }
+
+ bnx2x_dcbx_set_state(sc, FALSE, BNX2X_DCBX_ENABLED_OFF);
+
+ /* calculate qm_cid_count */
+ sc->qm_cid_count = bnx2x_set_qm_cid_count(sc);
+
+ sc->max_cos = 1;
+ bnx2x_init_multi_cos(sc);
+
+ return 0;
+}
+
+static void
+bnx2x_igu_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t segment,
+ uint16_t index, uint8_t op, uint8_t update)
+{
+ uint32_t igu_addr = sc->igu_base_addr;
+ igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
+ bnx2x_igu_ack_sb_gen(sc, segment, index, op, update, igu_addr);
+}
+
+static void
+bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t storm,
+ uint16_t index, uint8_t op, uint8_t update)
+{
+ if (unlikely(sc->devinfo.int_block == INT_BLOCK_HC))
+ bnx2x_hc_ack_sb(sc, igu_sb_id, storm, index, op, update);
+ else {
+ uint8_t segment;
+ if (CHIP_INT_MODE_IS_BC(sc)) {
+ segment = storm;
+ } else if (igu_sb_id != sc->igu_dsb_id) {
+ segment = IGU_SEG_ACCESS_DEF;
+ } else if (storm == ATTENTION_ID) {
+ segment = IGU_SEG_ACCESS_ATTN;
+ } else {
+ segment = IGU_SEG_ACCESS_DEF;
+ }
+ bnx2x_igu_ack_sb(sc, igu_sb_id, segment, index, op, update);
+ }
+}
+
+static void
+bnx2x_igu_clear_sb_gen(struct bnx2x_softc *sc, uint8_t func, uint8_t idu_sb_id,
+ uint8_t is_pf)
+{
+ uint32_t data, ctl, cnt = 100;
+ uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+ uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+ uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP +
+ (idu_sb_id / 32) * 4;
+ uint32_t sb_bit = 1 << (idu_sb_id % 32);
+ uint32_t func_encode = func |
+ (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
+ uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
+
+ /* Not supported in BC mode */
+ if (CHIP_INT_MODE_IS_BC(sc)) {
+ return;
+ }
+
+ data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
+ IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
+ IGU_REGULAR_CLEANUP_SET | IGU_REGULAR_BCLEANUP);
+
+ ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
+ (func_encode << IGU_CTRL_REG_FID_SHIFT) |
+ (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
+
+ REG_WR(sc, igu_addr_data, data);
+
+ mb();
+
+ PMD_DRV_LOG(DEBUG, "write 0x%08x to IGU(via GRC) addr 0x%x",
+ ctl, igu_addr_ctl);
+ REG_WR(sc, igu_addr_ctl, ctl);
+
+ mb();
+
+ /* wait for clean up to finish */
+ while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
+ DELAY(20000);
+ }
+
+ if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
+ PMD_DRV_LOG(DEBUG,
+ "Unable to finish IGU cleanup: "
+ "idu_sb_id %d offset %d bit %d (cnt %d)",
+ idu_sb_id, idu_sb_id / 32, idu_sb_id % 32, cnt);
+ }
+}
+
+static void bnx2x_igu_clear_sb(struct bnx2x_softc *sc, uint8_t idu_sb_id)
+{
+ bnx2x_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
+}
+
+/*******************/
+/* ECORE CALLBACKS */
+/*******************/
+
+static void bnx2x_reset_common(struct bnx2x_softc *sc)
+{
+ uint32_t val = 0x1400;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* reset_common */
+ REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR),
+ 0xd3ffff7f);
+
+ if (CHIP_IS_E3(sc)) {
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+
+ REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
+}
+
+static void bnx2x_common_init_phy(struct bnx2x_softc *sc)
+{
+ uint32_t shmem_base[2];
+ uint32_t shmem2_base[2];
+
+ /* Avoid common init in case MFW supports LFA */
+ if (SHMEM2_RD(sc, size) >
+ (uint32_t) offsetof(struct shmem2_region,
+ lfa_host_addr[SC_PORT(sc)])) {
+ return;
+ }
+
+ shmem_base[0] = sc->devinfo.shmem_base;
+ shmem2_base[0] = sc->devinfo.shmem2_base;
+
+ if (!CHIP_IS_E1x(sc)) {
+ shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr);
+ shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
+ }
+
+ elink_common_init_phy(sc, shmem_base, shmem2_base,
+ sc->devinfo.chip_id, 0);
+}
+
+static void bnx2x_pf_disable(struct bnx2x_softc *sc)
+{
+ uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
+
+ val &= ~IGU_PF_CONF_FUNC_EN;
+
+ REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
+ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
+}
+
+static void bnx2x_init_pxp(struct bnx2x_softc *sc)
+{
+ uint16_t devctl;
+ int r_order, w_order;
+
+ devctl = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL);
+
+ w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
+ r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
+
+ ecore_init_pxp_arb(sc, r_order, w_order);
+}
+
+static uint32_t bnx2x_get_pretend_reg(struct bnx2x_softc *sc)
+{
+ uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
+ uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
+ return base + (SC_ABS_FUNC(sc)) * stride;
+}
+
+/*
+ * Called only on E1H or E2.
+ * When pretending to be PF, the pretend value is the function number 0..7.
+ * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
+ * combination.
+ */
+static int bnx2x_pretend_func(struct bnx2x_softc *sc, uint16_t pretend_func_val)
+{
+ uint32_t pretend_reg;
+
+ if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX))
+ return -1;
+
+ /* get my own pretend register */
+ pretend_reg = bnx2x_get_pretend_reg(sc);
+ REG_WR(sc, pretend_reg, pretend_func_val);
+ REG_RD(sc, pretend_reg);
+ return 0;
+}
+
+static void bnx2x_setup_fan_failure_detection(struct bnx2x_softc *sc)
+{
+ int is_required;
+ uint32_t val;
+ int port;
+
+ is_required = 0;
+ val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
+ SHARED_HW_CFG_FAN_FAILURE_MASK);
+
+ if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
+ is_required = 1;
+ }
+ /*
+ * The fan failure mechanism is usually related to the PHY type since
+ * the power consumption of the board is affected by the PHY. Currently,
+ * fan is required for most designs with SFX7101, BNX2X8727 and BNX2X8481.
+ */
+ else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
+ for (port = PORT_0; port < PORT_MAX; port++) {
+ is_required |= elink_fan_failure_det_req(sc,
+ sc->
+ devinfo.shmem_base,
+ sc->
+ devinfo.shmem2_base,
+ port);
+ }
+ }
+
+ if (is_required == 0) {
+ return;
+ }
+
+ /* Fan failure is indicated by SPIO 5 */
+ bnx2x_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
+
+ /* set to active low mode */
+ val = REG_RD(sc, MISC_REG_SPIO_INT);
+ val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
+ REG_WR(sc, MISC_REG_SPIO_INT, val);
+
+ /* enable interrupt to signal the IGU */
+ val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
+ val |= MISC_SPIO_SPIO5;
+ REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
+}
+
+static void bnx2x_enable_blocks_attention(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+
+ REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
+ if (!CHIP_IS_E1x(sc)) {
+ REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
+ } else {
+ REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
+ }
+ REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
+ REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
+ /*
+ * mask read length error interrupts in brb for parser
+ * (parsing unit and 'checksum and crc' unit)
+ * these errors are legal (PU reads fixed length and CAC can cause
+ * read length error on truncated packets)
+ */
+ REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
+ REG_WR(sc, QM_REG_QM_INT_MASK, 0);
+ REG_WR(sc, TM_REG_TM_INT_MASK, 0);
+ REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
+ REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
+ REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
+ /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
+ /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
+ REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
+ REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
+ REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
+ /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
+ /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
+ REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
+ REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
+ REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
+ REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
+ /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
+ /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
+
+ val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
+ PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
+ PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
+ if (!CHIP_IS_E1x(sc)) {
+ val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
+ PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
+ }
+ REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
+
+ REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
+ REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
+ REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
+ /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
+
+ if (!CHIP_IS_E1x(sc)) {
+/* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
+ REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
+ }
+
+ REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
+ REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
+ /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
+ REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
+}
+
+/**
+ * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
+ *
+ * @sc: driver handle
+ */
+static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
+{
+ uint8_t abs_func_id;
+ uint32_t val;
+
+ PMD_DRV_LOG(DEBUG, "starting common init for func %d", SC_ABS_FUNC(sc));
+
+ /*
+ * take the RESET lock to protect undi_unload flow from accessing
+ * registers while we are resetting the chip
+ */
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
+
+ bnx2x_reset_common(sc);
+
+ REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
+
+ val = 0xfffc;
+ if (CHIP_IS_E3(sc)) {
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+
+ REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
+
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
+
+ ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(sc)) {
+/*
+ * 4-port mode or 2-port mode we need to turn off master-enable for
+ * everyone. After that we turn it back on for self. So, we disregard
+ * multi-function, and always disable all functions on the given path,
+ * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
+ */
+ for (abs_func_id = SC_PATH(sc);
+ abs_func_id < (E2_FUNC_MAX * 2); abs_func_id += 2) {
+ if (abs_func_id == SC_ABS_FUNC(sc)) {
+ REG_WR(sc,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
+ 1);
+ continue;
+ }
+
+ bnx2x_pretend_func(sc, abs_func_id);
+
+ /* clear pf enable */
+ bnx2x_pf_disable(sc);
+
+ bnx2x_pretend_func(sc, SC_ABS_FUNC(sc));
+ }
+ }
+
+ ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
+
+ ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
+ bnx2x_init_pxp(sc);
+
+#ifdef __BIG_ENDIAN
+ REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
+ REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
+ REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
+ REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
+ REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
+ /* make sure this value is 0 */
+ REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+ //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
+ REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
+ REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
+ REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
+ REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
+#endif
+
+ ecore_ilt_init_page_size(sc, INITOP_SET);
+
+ if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
+ REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
+ }
+
+ /* let the HW do it's magic... */
+ DELAY(100000);
+
+ /* finish PXP init */
+
+ val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
+ if (val != 1) {
+ PMD_DRV_LOG(NOTICE, "PXP2 CFG failed");
+ return -1;
+ }
+ val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
+ if (val != 1) {
+ PMD_DRV_LOG(NOTICE, "PXP2 RD_INIT failed");
+ return -1;
+ }
+
+ /*
+ * Timer bug workaround for E2 only. We need to set the entire ILT to have
+ * entries with value "0" and valid bit on. This needs to be done by the
+ * first PF that is loaded in a path (i.e. common phase)
+ */
+ if (!CHIP_IS_E1x(sc)) {
+/*
+ * In E2 there is a bug in the timers block that can cause function 6 / 7
+ * (i.e. vnic3) to start even if it is marked as "scan-off".
+ * This occurs when a different function (func2,3) is being marked
+ * as "scan-off". Real-life scenario for example: if a driver is being
+ * load-unloaded while func6,7 are down. This will cause the timer to access
+ * the ilt, translate to a logical address and send a request to read/write.
+ * Since the ilt for the function that is down is not valid, this will cause
+ * a translation error which is unrecoverable.
+ * The Workaround is intended to make sure that when this happens nothing
+ * fatal will occur. The workaround:
+ * 1. First PF driver which loads on a path will:
+ * a. After taking the chip out of reset, by using pretend,
+ * it will write "0" to the following registers of
+ * the other vnics.
+ * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
+ * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
+ * And for itself it will write '1' to
+ * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
+ * dmae-operations (writing to pram for example.)
+ * note: can be done for only function 6,7 but cleaner this
+ * way.
+ * b. Write zero+valid to the entire ILT.
+ * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
+ * VNIC3 (of that port). The range allocated will be the
+ * entire ILT. This is needed to prevent ILT range error.
+ * 2. Any PF driver load flow:
+ * a. ILT update with the physical addresses of the allocated
+ * logical pages.
+ * b. Wait 20msec. - note that this timeout is needed to make
+ * sure there are no requests in one of the PXP internal
+ * queues with "old" ILT addresses.
+ * c. PF enable in the PGLC.
+ * d. Clear the was_error of the PF in the PGLC. (could have
+ * occurred while driver was down)
+ * e. PF enable in the CFC (WEAK + STRONG)
+ * f. Timers scan enable
+ * 3. PF driver unload flow:
+ * a. Clear the Timers scan_en.
+ * b. Polling for scan_on=0 for that PF.
+ * c. Clear the PF enable bit in the PXP.
+ * d. Clear the PF enable in the CFC (WEAK + STRONG)
+ * e. Write zero+valid to all ILT entries (The valid bit must
+ * stay set)
+ * f. If this is VNIC 3 of a port then also init
+ * first_timers_ilt_entry to zero and last_timers_ilt_entry
+ * to the last enrty in the ILT.
+ *
+ * Notes:
+ * Currently the PF error in the PGLC is non recoverable.
+ * In the future the there will be a recovery routine for this error.
+ * Currently attention is masked.
+ * Having an MCP lock on the load/unload process does not guarantee that
+ * there is no Timer disable during Func6/7 enable. This is because the
+ * Timers scan is currently being cleared by the MCP on FLR.
+ * Step 2.d can be done only for PF6/7 and the driver can also check if
+ * there is error before clearing it. But the flow above is simpler and
+ * more general.
+ * All ILT entries are written by zero+valid and not just PF6/7
+ * ILT entries since in the future the ILT entries allocation for
+ * PF-s might be dynamic.
+ */
+ struct ilt_client_info ilt_cli;
+ struct ecore_ilt ilt;
+
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+ memset(&ilt, 0, sizeof(struct ecore_ilt));
+
+/* initialize dummy TM client */
+ ilt_cli.start = 0;
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+ ilt_cli.client_num = ILT_CLIENT_TM;
+
+/*
+ * Step 1: set zeroes to all ilt page entries with valid bit on
+ * Step 2: set the timers first/last ilt entry to point
+ * to the entire range to prevent ILT range error for 3rd/4th
+ * vnic (this code assumes existence of the vnic)
+ *
+ * both steps performed by call to ecore_ilt_client_init_op()
+ * with dummy TM client
+ *
+ * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
+ * and his brother are split registers
+ */
+
+ bnx2x_pretend_func(sc, (SC_PATH(sc) + 6));
+ ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
+ bnx2x_pretend_func(sc, SC_ABS_FUNC(sc));
+
+ REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
+ REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
+ REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
+ }
+
+ REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
+ REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
+
+ if (!CHIP_IS_E1x(sc)) {
+ int factor = 0;
+
+ ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
+ ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
+
+/* let the HW do it's magic... */
+ do {
+ DELAY(200000);
+ val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
+ } while (factor-- && (val != 1));
+
+ if (val != 1) {
+ PMD_DRV_LOG(NOTICE, "ATC_INIT failed");
+ return -1;
+ }
+ }
+
+ ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
+
+ /* clean the DMAE memory */
+ sc->dmae_ready = 1;
+ ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8);
+
+ ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
+
+ ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
+
+ ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
+
+ ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
+
+ bnx2x_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
+
+ ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
+
+ /* QM queues pointers table */
+ ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
+
+ /* soft reset pulse */
+ REG_WR(sc, QM_REG_SOFT_RESET, 1);
+ REG_WR(sc, QM_REG_SOFT_RESET, 0);
+
+ if (CNIC_SUPPORT(sc))
+ ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
+
+ ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
+ REG_WR(sc, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
+
+ if (!CHIP_REV_IS_SLOW(sc)) {
+/* enable hw interrupt from doorbell Q */
+ REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
+ }
+
+ ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
+
+ ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
+ REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
+ REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
+
+ if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
+ if (IS_MF_AFEX(sc)) {
+ /*
+ * configure that AFEX and VLAN headers must be
+ * received in AFEX mode
+ */
+ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
+ REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
+ REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
+ REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
+ REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
+ } else {
+ /*
+ * Bit-map indicating which L2 hdrs may appear
+ * after the basic Ethernet header
+ */
+ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
+ sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
+ }
+ }
+
+ ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
+ ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
+ ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
+ ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(sc)) {
+/* reset VFC memories */
+ REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+ VFC_MEMORIES_RST_REG_CAM_RST |
+ VFC_MEMORIES_RST_REG_RAM_RST);
+ REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+ VFC_MEMORIES_RST_REG_CAM_RST |
+ VFC_MEMORIES_RST_REG_RAM_RST);
+
+ DELAY(20000);
+ }
+
+ ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
+ ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
+ ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
+ ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
+
+ /* sync semi rtc */
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000);
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000);
+
+ ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
+ ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
+ ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(sc)) {
+ if (IS_MF_AFEX(sc)) {
+ /*
+ * configure that AFEX and VLAN headers must be
+ * sent in AFEX mode
+ */
+ REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
+ REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
+ REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
+ REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
+ REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
+ } else {
+ REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
+ sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
+ }
+ }
+
+ REG_WR(sc, SRC_REG_SOFT_RST, 1);
+
+ ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
+
+ if (CNIC_SUPPORT(sc)) {
+ REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
+ REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+ REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+ REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+ REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+ REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+ REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+ REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+ REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+ REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+ }
+ REG_WR(sc, SRC_REG_SOFT_RST, 0);
+
+ if (sizeof(union cdu_context) != 1024) {
+/* we currently assume that a context is 1024 bytes */
+ PMD_DRV_LOG(NOTICE,
+ "please adjust the size of cdu_context(%ld)",
+ (long)sizeof(union cdu_context));
+ }
+
+ ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
+ val = (4 << 24) + (0 << 12) + 1024;
+ REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
+
+ ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
+
+ REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
+ /* enable context validation interrupt from CFC */
+ REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
+
+ /* set the thresholds to prevent CFC/CDU race */
+ REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
+ ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(sc) && BNX2X_NOMCP(sc)) {
+ REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
+ }
+
+ ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
+ ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
+
+ /* Reset PCIE errors for debug */
+ REG_WR(sc, 0x2814, 0xffffffff);
+ REG_WR(sc, 0x3820, 0xffffffff);
+
+ if (!CHIP_IS_E1x(sc)) {
+ REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
+ (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
+ PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
+ REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
+ (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
+ REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
+ (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
+ }
+
+ ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
+
+ /* in E3 this done in per-port section */
+ if (!CHIP_IS_E3(sc))
+ REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
+
+ if (CHIP_IS_E1H(sc)) {
+/* not applicable for E2 (and above ...) */
+ REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
+ }
+
+ if (CHIP_REV_IS_SLOW(sc)) {
+ DELAY(200000);
+ }
+
+ /* finish CFC init */
+ val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ PMD_DRV_LOG(NOTICE, "CFC LL_INIT failed");
+ return -1;
+ }
+ val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ PMD_DRV_LOG(NOTICE, "CFC AC_INIT failed");
+ return -1;
+ }
+ val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ PMD_DRV_LOG(NOTICE, "CFC CAM_INIT failed");
+ return -1;
+ }
+ REG_WR(sc, CFC_REG_DEBUG0, 0);
+
+ bnx2x_setup_fan_failure_detection(sc);
+
+ /* clear PXP2 attentions */
+ REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
+
+ bnx2x_enable_blocks_attention(sc);
+
+ if (!CHIP_REV_IS_SLOW(sc)) {
+ ecore_enable_blocks_parity(sc);
+ }
+
+ if (!BNX2X_NOMCP(sc)) {
+ if (CHIP_IS_E1x(sc)) {
+ bnx2x_common_init_phy(sc);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
+ *
+ * @sc: driver handle
+ */
+static int bnx2x_init_hw_common_chip(struct bnx2x_softc *sc)
+{
+ int rc = bnx2x_init_hw_common(sc);
+
+ if (rc) {
+ return rc;
+ }
+
+ /* In E2 2-PORT mode, same ext phy is used for the two paths */
+ if (!BNX2X_NOMCP(sc)) {
+ bnx2x_common_init_phy(sc);
+ }
+
+ return 0;
+}
+
+static int bnx2x_init_hw_port(struct bnx2x_softc *sc)
+{
+ int port = SC_PORT(sc);
+ int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
+ uint32_t low, high;
+ uint32_t val;
+
+ PMD_DRV_LOG(DEBUG, "starting port init for port %d", port);
+
+ REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0);
+
+ ecore_init_block(sc, BLOCK_MISC, init_phase);
+ ecore_init_block(sc, BLOCK_PXP, init_phase);
+ ecore_init_block(sc, BLOCK_PXP2, init_phase);
+
+ /*
+ * Timers bug workaround: disables the pf_master bit in pglue at
+ * common phase, we need to enable it here before any dmae access are
+ * attempted. Therefore we manually added the enable-master to the
+ * port phase (it also happens in the function phase)
+ */
+ if (!CHIP_IS_E1x(sc)) {
+ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+ }
+
+ ecore_init_block(sc, BLOCK_ATC, init_phase);
+ ecore_init_block(sc, BLOCK_DMAE, init_phase);
+ ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
+ ecore_init_block(sc, BLOCK_QM, init_phase);
+
+ ecore_init_block(sc, BLOCK_TCM, init_phase);
+ ecore_init_block(sc, BLOCK_UCM, init_phase);
+ ecore_init_block(sc, BLOCK_CCM, init_phase);
+ ecore_init_block(sc, BLOCK_XCM, init_phase);
+
+ /* QM cid (connection) count */
+ ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
+
+ if (CNIC_SUPPORT(sc)) {
+ ecore_init_block(sc, BLOCK_TM, init_phase);
+ REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port * 4, 20);
+ REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port * 4, 31);
+ }
+
+ ecore_init_block(sc, BLOCK_DORQ, init_phase);
+
+ ecore_init_block(sc, BLOCK_BRB1, init_phase);
+
+ if (CHIP_IS_E1H(sc)) {
+ if (IS_MF(sc)) {
+ low = (BNX2X_ONE_PORT(sc) ? 160 : 246);
+ } else if (sc->mtu > 4096) {
+ if (BNX2X_ONE_PORT(sc)) {
+ low = 160;
+ } else {
+ val = sc->mtu;
+ /* (24*1024 + val*4)/256 */
+ low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
+ }
+ } else {
+ low = (BNX2X_ONE_PORT(sc) ? 80 : 160);
+ }
+ high = (low + 56); /* 14*1024/256 */
+ REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port * 4, low);
+ REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port * 4, high);
+ }
+
+ if (CHIP_IS_MODE_4_PORT(sc)) {
+ REG_WR(sc, SC_PORT(sc) ?
+ BRB1_REG_MAC_GUARANTIED_1 :
+ BRB1_REG_MAC_GUARANTIED_0, 40);
+ }
+
+ ecore_init_block(sc, BLOCK_PRS, init_phase);
+ if (CHIP_IS_E3B0(sc)) {
+ if (IS_MF_AFEX(sc)) {
+ /* configure headers for AFEX mode */
+ if (SC_PORT(sc)) {
+ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC_PORT_1,
+ 0xE);
+ REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0_PORT_1,
+ 0x6);
+ REG_WR(sc, PRS_REG_MUST_HAVE_HDRS_PORT_1, 0xA);
+ } else {
+ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+ 0xE);
+ REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0_PORT_0,
+ 0x6);
+ REG_WR(sc, PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
+ }
+ } else {
+ /* Ovlan exists only if we are in multi-function +
+ * switch-dependent mode, in switch-independent there
+ * is no ovlan headers
+ */
+ REG_WR(sc, SC_PORT(sc) ?
+ PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+ PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+ (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
+ }
+ }
+
+ ecore_init_block(sc, BLOCK_TSDM, init_phase);
+ ecore_init_block(sc, BLOCK_CSDM, init_phase);
+ ecore_init_block(sc, BLOCK_USDM, init_phase);
+ ecore_init_block(sc, BLOCK_XSDM, init_phase);
+
+ ecore_init_block(sc, BLOCK_TSEM, init_phase);
+ ecore_init_block(sc, BLOCK_USEM, init_phase);
+ ecore_init_block(sc, BLOCK_CSEM, init_phase);
+ ecore_init_block(sc, BLOCK_XSEM, init_phase);
+
+ ecore_init_block(sc, BLOCK_UPB, init_phase);
+ ecore_init_block(sc, BLOCK_XPB, init_phase);
+
+ ecore_init_block(sc, BLOCK_PBF, init_phase);
+
+ if (CHIP_IS_E1x(sc)) {
+/* configure PBF to work without PAUSE mtu 9000 */
+ REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0);
+
+/* update threshold */
+ REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, (9040 / 16));
+/* update init credit */
+ REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4,
+ (9040 / 16) + 553 - 22);
+
+/* probe changes */
+ REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 1);
+ DELAY(50);
+ REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0);
+ }
+
+ if (CNIC_SUPPORT(sc)) {
+ ecore_init_block(sc, BLOCK_SRC, init_phase);
+ }
+
+ ecore_init_block(sc, BLOCK_CDU, init_phase);
+ ecore_init_block(sc, BLOCK_CFC, init_phase);
+ ecore_init_block(sc, BLOCK_HC, init_phase);
+ ecore_init_block(sc, BLOCK_IGU, init_phase);
+ ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
+ /* init aeu_mask_attn_func_0/1:
+ * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
+ * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
+ * bits 4-7 are used for "per vn group attention" */
+ val = IS_MF(sc) ? 0xF7 : 0x7;
+ val |= 0x10;
+ REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, val);
+
+ ecore_init_block(sc, BLOCK_NIG, init_phase);
+
+ if (!CHIP_IS_E1x(sc)) {
+/* Bit-map indicating which L2 hdrs may appear after the
+ * basic Ethernet header
+ */
+ if (IS_MF_AFEX(sc)) {
+ REG_WR(sc, SC_PORT(sc) ?
+ NIG_REG_P1_HDRS_AFTER_BASIC :
+ NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
+ } else {
+ REG_WR(sc, SC_PORT(sc) ?
+ NIG_REG_P1_HDRS_AFTER_BASIC :
+ NIG_REG_P0_HDRS_AFTER_BASIC,
+ IS_MF_SD(sc) ? 7 : 6);
+ }
+
+ if (CHIP_IS_E3(sc)) {
+ REG_WR(sc, SC_PORT(sc) ?
+ NIG_REG_LLH1_MF_MODE :
+ NIG_REG_LLH_MF_MODE, IS_MF(sc));
+ }
+ }
+ if (!CHIP_IS_E3(sc)) {
+ REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1);
+ }
+
+ /* 0x2 disable mf_ov, 0x1 enable */
+ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port * 4,
+ (IS_MF_SD(sc) ? 0x1 : 0x2));
+
+ if (!CHIP_IS_E1x(sc)) {
+ val = 0;
+ switch (sc->devinfo.mf_info.mf_mode) {
+ case MULTI_FUNCTION_SD:
+ val = 1;
+ break;
+ case MULTI_FUNCTION_SI:
+ case MULTI_FUNCTION_AFEX:
+ val = 2;
+ break;
+ }
+
+ REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
+ NIG_REG_LLH0_CLS_TYPE), val);
+ }
+ REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port * 4, 0);
+ REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port * 4, 0);
+ REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port * 4, 1);
+
+ /* If SPIO5 is set to generate interrupts, enable it for this port */
+ val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
+ if (val & MISC_SPIO_SPIO5) {
+ uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+ val = REG_RD(sc, reg_addr);
+ val |= AEU_INPUTS_ATTN_BITS_SPIO5;
+ REG_WR(sc, reg_addr, val);
+ }
+
+ return 0;
+}
+
+static uint32_t
+bnx2x_flr_clnup_reg_poll(struct bnx2x_softc *sc, uint32_t reg,
+ uint32_t expected, uint32_t poll_count)
+{
+ uint32_t cur_cnt = poll_count;
+ uint32_t val;
+
+ while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
+ DELAY(FLR_WAIT_INTERVAL);
+ }
+
+ return val;
+}
+
+static int
+bnx2x_flr_clnup_poll_hw_counter(struct bnx2x_softc *sc, uint32_t reg,
+ __rte_unused const char *msg, uint32_t poll_cnt)
+{
+ uint32_t val = bnx2x_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
+
+ if (val != 0) {
+ PMD_DRV_LOG(NOTICE, "%s usage count=%d", msg, val);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Common routines with VF FLR cleanup */
+static uint32_t bnx2x_flr_clnup_poll_count(struct bnx2x_softc *sc)
+{
+ /* adjust polling timeout */
+ if (CHIP_REV_IS_EMUL(sc)) {
+ return FLR_POLL_CNT * 2000;
+ }
+
+ if (CHIP_REV_IS_FPGA(sc)) {
+ return FLR_POLL_CNT * 120;
+ }
+
+ return FLR_POLL_CNT;
+}
+
+static int bnx2x_poll_hw_usage_counters(struct bnx2x_softc *sc, uint32_t poll_cnt)
+{
+ /* wait for CFC PF usage-counter to zero (includes all the VFs) */
+ if (bnx2x_flr_clnup_poll_hw_counter(sc,
+ CFC_REG_NUM_LCIDS_INSIDE_PF,
+ "CFC PF usage counter timed out",
+ poll_cnt)) {
+ return -1;
+ }
+
+ /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(sc,
+ DORQ_REG_PF_USAGE_CNT,
+ "DQ PF usage counter timed out",
+ poll_cnt)) {
+ return -1;
+ }
+
+ /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(sc,
+ QM_REG_PF_USG_CNT_0 + 4 * SC_FUNC(sc),
+ "QM PF usage counter timed out",
+ poll_cnt)) {
+ return -1;
+ }
+
+ /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(sc,
+ TM_REG_LIN0_VNIC_UC + 4 * SC_PORT(sc),
+ "Timers VNIC usage counter timed out",
+ poll_cnt)) {
+ return -1;
+ }
+
+ if (bnx2x_flr_clnup_poll_hw_counter(sc,
+ TM_REG_LIN0_NUM_SCANS +
+ 4 * SC_PORT(sc),
+ "Timers NUM_SCANS usage counter timed out",
+ poll_cnt)) {
+ return -1;
+ }
+
+ /* Wait DMAE PF usage counter to zero */
+ if (bnx2x_flr_clnup_poll_hw_counter(sc,
+ dmae_reg_go_c[INIT_DMAE_C(sc)],
+ "DMAE dommand register timed out",
+ poll_cnt)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+#define OP_GEN_PARAM(param) \
+ (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
+#define OP_GEN_TYPE(type) \
+ (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
+#define OP_GEN_AGG_VECT(index) \
+ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
+
+static int
+bnx2x_send_final_clnup(struct bnx2x_softc *sc, uint8_t clnup_func,
+ uint32_t poll_cnt)
+{
+ uint32_t op_gen_command = 0;
+ uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
+ CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
+ int ret = 0;
+
+ if (REG_RD(sc, comp_addr)) {
+ PMD_DRV_LOG(NOTICE,
+ "Cleanup complete was not 0 before sending");
+ return -1;
+ }
+
+ op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+ op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+ op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
+ op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+
+ REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
+
+ if (bnx2x_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
+ PMD_DRV_LOG(NOTICE, "FW final cleanup did not succeed");
+ PMD_DRV_LOG(DEBUG, "At timeout completion address contained %x",
+ (REG_RD(sc, comp_addr)));
+ rte_panic("FLR cleanup failed");
+ return -1;
+ }
+
+ /* Zero completion for nxt FLR */
+ REG_WR(sc, comp_addr, 0);
+
+ return ret;
+}
+
+static void
+bnx2x_pbf_pN_buf_flushed(struct bnx2x_softc *sc, struct pbf_pN_buf_regs *regs,
+ uint32_t poll_count)
+{
+ uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
+ uint32_t cur_cnt = poll_count;
+
+ crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
+ crd = crd_start = REG_RD(sc, regs->crd);
+ init_crd = REG_RD(sc, regs->init_crd);
+
+ while ((crd != init_crd) &&
+ ((uint32_t) ((int32_t) crd_freed - (int32_t) crd_freed_start) <
+ (init_crd - crd_start))) {
+ if (cur_cnt--) {
+ DELAY(FLR_WAIT_INTERVAL);
+ crd = REG_RD(sc, regs->crd);
+ crd_freed = REG_RD(sc, regs->crd_freed);
+ } else {
+ break;
+ }
+ }
+}
+
+static void
+bnx2x_pbf_pN_cmd_flushed(struct bnx2x_softc *sc, struct pbf_pN_cmd_regs *regs,
+ uint32_t poll_count)
+{
+ uint32_t occup, to_free, freed, freed_start;
+ uint32_t cur_cnt = poll_count;
+
+ occup = to_free = REG_RD(sc, regs->lines_occup);
+ freed = freed_start = REG_RD(sc, regs->lines_freed);
+
+ while (occup &&
+ ((uint32_t) ((int32_t) freed - (int32_t) freed_start) <
+ to_free)) {
+ if (cur_cnt--) {
+ DELAY(FLR_WAIT_INTERVAL);
+ occup = REG_RD(sc, regs->lines_occup);
+ freed = REG_RD(sc, regs->lines_freed);
+ } else {
+ break;
+ }
+ }
+}
+
+static void bnx2x_tx_hw_flushed(struct bnx2x_softc *sc, uint32_t poll_count)
+{
+ struct pbf_pN_cmd_regs cmd_regs[] = {
+ {0, (CHIP_IS_E3B0(sc)) ?
+ PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(sc)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT},
+ {1, (CHIP_IS_E3B0(sc)) ?
+ PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(sc)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT},
+ {4, (CHIP_IS_E3B0(sc)) ?
+ PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(sc)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
+ PBF_REG_P4_TQ_LINES_FREED_CNT}
+ };
+
+ struct pbf_pN_buf_regs buf_regs[] = {
+ {0, (CHIP_IS_E3B0(sc)) ?
+ PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD,
+ (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT,
+ (CHIP_IS_E3B0(sc)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
+ PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
+ {1, (CHIP_IS_E3B0(sc)) ?
+ PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD,
+ (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT,
+ (CHIP_IS_E3B0(sc)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
+ PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
+ {4, (CHIP_IS_E3B0(sc)) ?
+ PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD,
+ (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT,
+ (CHIP_IS_E3B0(sc)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
+ PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
+ };
+
+ uint32_t i;
+
+ /* Verify the command queues are flushed P0, P1, P4 */
+ for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
+ bnx2x_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
+ }
+
+ /* Verify the transmission buffers are flushed P0, P1, P4 */
+ for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
+ bnx2x_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
+ }
+}
+
+static void bnx2x_hw_enable_status(struct bnx2x_softc *sc)
+{
+ __rte_unused uint32_t val;
+
+ val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
+ PMD_DRV_LOG(DEBUG, "CFC_REG_WEAK_ENABLE_PF is 0x%x", val);
+
+ val = REG_RD(sc, PBF_REG_DISABLE_PF);
+ PMD_DRV_LOG(DEBUG, "PBF_REG_DISABLE_PF is 0x%x", val);
+
+ val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
+ PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSI_EN is 0x%x", val);
+
+ val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
+ PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSIX_EN is 0x%x", val);
+
+ val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
+ PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x", val);
+
+ val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
+ PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x", val);
+
+ val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
+ PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x", val);
+
+ val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x",
+ val);
+}
+
+/**
+ * bnx2x_pf_flr_clnup
+ * a. re-enable target read on the PF
+ * b. poll cfc per function usgae counter
+ * c. poll the qm perfunction usage counter
+ * d. poll the tm per function usage counter
+ * e. poll the tm per function scan-done indication
+ * f. clear the dmae channel associated wit hthe PF
+ * g. zero the igu 'trailing edge' and 'leading edge' regs (attentions)
+ * h. call the common flr cleanup code with -1 (pf indication)
+ */
+static int bnx2x_pf_flr_clnup(struct bnx2x_softc *sc)
+{
+ uint32_t poll_cnt = bnx2x_flr_clnup_poll_count(sc);
+
+ /* Re-enable PF target read access */
+ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ /* Poll HW usage counters */
+ if (bnx2x_poll_hw_usage_counters(sc, poll_cnt)) {
+ return -1;
+ }
+
+ /* Zero the igu 'trailing edge' and 'leading edge' */
+
+ /* Send the FW cleanup command */
+ if (bnx2x_send_final_clnup(sc, (uint8_t) SC_FUNC(sc), poll_cnt)) {
+ return -1;
+ }
+
+ /* ATC cleanup */
+
+ /* Verify TX hw is flushed */
+ bnx2x_tx_hw_flushed(sc, poll_cnt);
+
+ /* Wait 100ms (not adjusted according to platform) */
+ DELAY(100000);
+
+ /* Verify no pending pci transactions */
+ if (bnx2x_is_pcie_pending(sc)) {
+ PMD_DRV_LOG(NOTICE, "PCIE Transactions still pending");
+ }
+
+ /* Debug */
+ bnx2x_hw_enable_status(sc);
+
+ /*
+ * Master enable - Due to WB DMAE writes performed before this
+ * register is re-initialized as part of the regular function init
+ */
+ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+ return 0;
+}
+
+static int bnx2x_init_hw_func(struct bnx2x_softc *sc)
+{
+ int port = SC_PORT(sc);
+ int func = SC_FUNC(sc);
+ int init_phase = PHASE_PF0 + func;
+ struct ecore_ilt *ilt = sc->ilt;
+ uint16_t cdu_ilt_start;
+ uint32_t addr, val;
+ uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
+ int main_mem_width, rc;
+ uint32_t i;
+
+ PMD_DRV_LOG(DEBUG, "starting func init for func %d", func);
+
+ /* FLR cleanup */
+ if (!CHIP_IS_E1x(sc)) {
+ rc = bnx2x_pf_flr_clnup(sc);
+ if (rc) {
+ PMD_DRV_LOG(NOTICE, "FLR cleanup failed!");
+ return rc;
+ }
+ }
+
+ /* set MSI reconfigure capability */
+ if (sc->devinfo.int_block == INT_BLOCK_HC) {
+ addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
+ val = REG_RD(sc, addr);
+ val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
+ REG_WR(sc, addr, val);
+ }
+
+ ecore_init_block(sc, BLOCK_PXP, init_phase);
+ ecore_init_block(sc, BLOCK_PXP2, init_phase);
+
+ ilt = sc->ilt;
+ cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+
+ for (i = 0; i < L2_ILT_LINES(sc); i++) {
+ ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
+ ilt->lines[cdu_ilt_start + i].page_mapping =
+ (rte_iova_t)sc->context[i].vcxt_dma.paddr;
+ ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
+ }
+ ecore_ilt_init_op(sc, INITOP_SET);
+
+ REG_WR(sc, PRS_REG_NIC_MODE, 1);
+
+ if (!CHIP_IS_E1x(sc)) {
+ uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
+
+/* Turn on a single ISR mode in IGU if driver is going to use
+ * INT#x or MSI
+ */
+ if ((sc->interrupt_mode != INTR_MODE_MSIX)
+ || (sc->interrupt_mode != INTR_MODE_SINGLE_MSIX)) {
+ pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ }
+
+/*
+ * Timers workaround bug: function init part.
+ * Need to wait 20msec after initializing ILT,
+ * needed to make sure there are no requests in
+ * one of the PXP internal queues with "old" ILT addresses
+ */
+ DELAY(20000);
+
+/*
+ * Master enable - Due to WB DMAE writes performed before this
+ * register is re-initialized as part of the regular function
+ * init
+ */
+ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+/* Enable the function in IGU */
+ REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
+ }
+
+ sc->dmae_ready = 1;
+
+ ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
+
+ if (!CHIP_IS_E1x(sc))
+ REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
+
+ ecore_init_block(sc, BLOCK_ATC, init_phase);
+ ecore_init_block(sc, BLOCK_DMAE, init_phase);
+ ecore_init_block(sc, BLOCK_NIG, init_phase);
+ ecore_init_block(sc, BLOCK_SRC, init_phase);
+ ecore_init_block(sc, BLOCK_MISC, init_phase);
+ ecore_init_block(sc, BLOCK_TCM, init_phase);
+ ecore_init_block(sc, BLOCK_UCM, init_phase);
+ ecore_init_block(sc, BLOCK_CCM, init_phase);
+ ecore_init_block(sc, BLOCK_XCM, init_phase);
+ ecore_init_block(sc, BLOCK_TSEM, init_phase);
+ ecore_init_block(sc, BLOCK_USEM, init_phase);
+ ecore_init_block(sc, BLOCK_CSEM, init_phase);
+ ecore_init_block(sc, BLOCK_XSEM, init_phase);
+
+ if (!CHIP_IS_E1x(sc))
+ REG_WR(sc, QM_REG_PF_EN, 1);
+
+ if (!CHIP_IS_E1x(sc)) {
+ REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ }
+ ecore_init_block(sc, BLOCK_QM, init_phase);
+
+ ecore_init_block(sc, BLOCK_TM, init_phase);
+ ecore_init_block(sc, BLOCK_DORQ, init_phase);
+
+ ecore_init_block(sc, BLOCK_BRB1, init_phase);
+ ecore_init_block(sc, BLOCK_PRS, init_phase);
+ ecore_init_block(sc, BLOCK_TSDM, init_phase);
+ ecore_init_block(sc, BLOCK_CSDM, init_phase);
+ ecore_init_block(sc, BLOCK_USDM, init_phase);
+ ecore_init_block(sc, BLOCK_XSDM, init_phase);
+ ecore_init_block(sc, BLOCK_UPB, init_phase);
+ ecore_init_block(sc, BLOCK_XPB, init_phase);
+ ecore_init_block(sc, BLOCK_PBF, init_phase);
+ if (!CHIP_IS_E1x(sc))
+ REG_WR(sc, PBF_REG_DISABLE_PF, 0);
+
+ ecore_init_block(sc, BLOCK_CDU, init_phase);
+
+ ecore_init_block(sc, BLOCK_CFC, init_phase);
+
+ if (!CHIP_IS_E1x(sc))
+ REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
+
+ if (IS_MF(sc)) {
+ REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
+ REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, OVLAN(sc));
+ }
+
+ ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
+
+ /* HC init per function */
+ if (sc->devinfo.int_block == INT_BLOCK_HC) {
+ if (CHIP_IS_E1H(sc)) {
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0);
+
+ REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0);
+ REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0);
+ }
+ ecore_init_block(sc, BLOCK_HC, init_phase);
+
+ } else {
+ uint32_t num_segs, sb_idx, prod_offset;
+
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0);
+
+ if (!CHIP_IS_E1x(sc)) {
+ REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
+ REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ }
+
+ ecore_init_block(sc, BLOCK_IGU, init_phase);
+
+ if (!CHIP_IS_E1x(sc)) {
+ int dsb_idx = 0;
+ /**
+ * Producer memory:
+ * E2 mode: address 0-135 match to the mapping memory;
+ * 136 - PF0 default prod; 137 - PF1 default prod;
+ * 138 - PF2 default prod; 139 - PF3 default prod;
+ * 140 - PF0 attn prod; 141 - PF1 attn prod;
+ * 142 - PF2 attn prod; 143 - PF3 attn prod;
+ * 144-147 reserved.
+ *
+ * E1.5 mode - In backward compatible mode;
+ * for non default SB; each even line in the memory
+ * holds the U producer and each odd line hold
+ * the C producer. The first 128 producers are for
+ * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
+ * producers are for the DSB for each PF.
+ * Each PF has five segments: (the order inside each
+ * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods;
+ * 144-147 attn prods;
+ */
+ /* non-default-status-blocks */
+ num_segs = CHIP_INT_MODE_IS_BC(sc) ?
+ IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
+ for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
+ prod_offset = (sc->igu_base_sb + sb_idx) *
+ num_segs;
+
+ for (i = 0; i < num_segs; i++) {
+ addr = IGU_REG_PROD_CONS_MEMORY +
+ (prod_offset + i) * 4;
+ REG_WR(sc, addr, 0);
+ }
+ /* send consumer update with value 0 */
+ bnx2x_ack_sb(sc, sc->igu_base_sb + sb_idx,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
+ }
+
+ /* default-status-blocks */
+ num_segs = CHIP_INT_MODE_IS_BC(sc) ?
+ IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
+
+ if (CHIP_IS_MODE_4_PORT(sc))
+ dsb_idx = SC_FUNC(sc);
+ else
+ dsb_idx = SC_VN(sc);
+
+ prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
+ IGU_BC_BASE_DSB_PROD + dsb_idx :
+ IGU_NORM_BASE_DSB_PROD + dsb_idx);
+
+ /*
+ * igu prods come in chunks of E1HVN_MAX (4) -
+ * does not matters what is the current chip mode
+ */
+ for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) {
+ addr = IGU_REG_PROD_CONS_MEMORY +
+ (prod_offset + i) * 4;
+ REG_WR(sc, addr, 0);
+ }
+ /* send consumer update with 0 */
+ if (CHIP_INT_MODE_IS_BC(sc)) {
+ bnx2x_ack_sb(sc, sc->igu_dsb_id,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(sc, sc->igu_dsb_id,
+ CSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(sc, sc->igu_dsb_id,
+ XSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(sc, sc->igu_dsb_id,
+ TSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(sc, sc->igu_dsb_id,
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
+ } else {
+ bnx2x_ack_sb(sc, sc->igu_dsb_id,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(sc, sc->igu_dsb_id,
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
+ }
+ bnx2x_igu_clear_sb(sc, sc->igu_dsb_id);
+
+ /* !!! these should become driver const once
+ rf-tool supports split-68 const */
+ REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+ REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+ REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
+ REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
+ REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
+ REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
+ }
+ }
+
+ /* Reset PCIE errors for debug */
+ REG_WR(sc, 0x2114, 0xffffffff);
+ REG_WR(sc, 0x2120, 0xffffffff);
+
+ if (CHIP_IS_E1x(sc)) {
+ main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords */
+ main_mem_base = HC_REG_MAIN_MEMORY +
+ SC_PORT(sc) * (main_mem_size * 4);
+ main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
+ main_mem_width = 8;
+
+ val = REG_RD(sc, main_mem_prty_clr);
+ if (val) {
+ PMD_DRV_LOG(DEBUG,
+ "Parity errors in HC block during function init (0x%x)!",
+ val);
+ }
+
+/* Clear "false" parity errors in MSI-X table */
+ for (i = main_mem_base;
+ i < main_mem_base + main_mem_size * 4;
+ i += main_mem_width) {
+ bnx2x_read_dmae(sc, i, main_mem_width / 4);
+ bnx2x_write_dmae(sc, BNX2X_SP_MAPPING(sc, wb_data),
+ i, main_mem_width / 4);
+ }
+/* Clear HC parity attention */
+ REG_RD(sc, main_mem_prty_clr);
+ }
+
+ /* Enable STORMs SP logging */
+ REG_WR8(sc, BAR_USTRORM_INTMEM +
+ USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
+ REG_WR8(sc, BAR_TSTRORM_INTMEM +
+ TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
+ REG_WR8(sc, BAR_CSTRORM_INTMEM +
+ CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
+ REG_WR8(sc, BAR_XSTRORM_INTMEM +
+ XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
+
+ elink_phy_probe(&sc->link_params);
+
+ return 0;
+}
+
+static void bnx2x_link_reset(struct bnx2x_softc *sc)
+{
+ if (!BNX2X_NOMCP(sc)) {
+ elink_lfa_reset(&sc->link_params, &sc->link_vars);
+ } else {
+ if (!CHIP_REV_IS_SLOW(sc)) {
+ PMD_DRV_LOG(WARNING,
+ "Bootcode is missing - cannot reset link");
+ }
+ }
+}
+
+static void bnx2x_reset_port(struct bnx2x_softc *sc)
+{
+ int port = SC_PORT(sc);
+ uint32_t val;
+
+ /* reset physical Link */
+ bnx2x_link_reset(sc);
+
+ REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0);
+
+ /* Do not rcv packets to BRB */
+ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port * 4, 0x0);
+ /* Do not direct rcv packets that are not for MCP to the BRB */
+ REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+
+ /* Configure AEU */
+ REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, 0);
+
+ DELAY(100000);
+
+ /* Check for BRB port occupancy */
+ val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port * 4);
+ if (val) {
+ PMD_DRV_LOG(DEBUG,
+ "BRB1 is not empty, %d blocks are occupied", val);
+ }
+}
+
+static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, rte_iova_t addr)
+{
+ int reg;
+ uint32_t wb_write[2];
+
+ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index * 8;
+
+ wb_write[0] = ONCHIP_ADDR1(addr);
+ wb_write[1] = ONCHIP_ADDR2(addr);
+ REG_WR_DMAE(sc, reg, wb_write, 2);
+}
+
+static void bnx2x_clear_func_ilt(struct bnx2x_softc *sc, uint32_t func)
+{
+ uint32_t i, base = FUNC_ILT_BASE(func);
+ for (i = base; i < base + ILT_PER_FUNC; i++) {
+ bnx2x_ilt_wr(sc, i, 0);
+ }
+}
+
+static void bnx2x_reset_func(struct bnx2x_softc *sc)
+{
+ struct bnx2x_fastpath *fp;
+ int port = SC_PORT(sc);
+ int func = SC_FUNC(sc);
+ int i;
+
+ /* Disable the function in the FW */
+ REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
+
+ /* FP SBs */
+ FOR_EACH_ETH_QUEUE(sc, i) {
+ fp = &sc->fp[i];
+ REG_WR8(sc, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
+ SB_DISABLED);
+ }
+
+ /* SP SB */
+ REG_WR8(sc, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), SB_DISABLED);
+
+ for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
+ REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
+ 0);
+ }
+
+ /* Configure IGU */
+ if (sc->devinfo.int_block == INT_BLOCK_HC) {
+ REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0);
+ REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0);
+ } else {
+ REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
+ REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ }
+
+ if (CNIC_LOADED(sc)) {
+/* Disable Timer scan */
+ REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port * 4, 0);
+/*
+ * Wait for at least 10ms and up to 2 second for the timers
+ * scan to complete
+ */
+ for (i = 0; i < 200; i++) {
+ DELAY(10000);
+ if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port * 4))
+ break;
+ }
+ }
+
+ /* Clear ILT */
+ bnx2x_clear_func_ilt(sc, func);
+
+ /*
+ * Timers workaround bug for E2: if this is vnic-3,
+ * we need to set the entire ilt range for this timers.
+ */
+ if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
+ struct ilt_client_info ilt_cli;
+/* use dummy TM client */
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+ ilt_cli.start = 0;
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+ ilt_cli.client_num = ILT_CLIENT_TM;
+
+ ecore_ilt_boundry_init_op(sc, &ilt_cli, 0);
+ }
+
+ /* this assumes that reset_port() called before reset_func() */
+ if (!CHIP_IS_E1x(sc)) {
+ bnx2x_pf_disable(sc);
+ }
+
+ sc->dmae_ready = 0;
+}
+
+static void bnx2x_release_firmware(struct bnx2x_softc *sc)
+{
+ rte_free(sc->init_ops);
+ rte_free(sc->init_ops_offsets);
+ rte_free(sc->init_data);
+ rte_free(sc->iro_array);
+}
+
+static int bnx2x_init_firmware(struct bnx2x_softc *sc)
+{
+ uint32_t len, i;
+ uint8_t *p = sc->firmware;
+ uint32_t off[24];
+
+ for (i = 0; i < 24; ++i)
+ off[i] = rte_be_to_cpu_32(*((uint32_t *) sc->firmware + i));
+
+ len = off[0];
+ sc->init_ops = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE);
+ if (!sc->init_ops)
+ goto alloc_failed;
+ bnx2x_data_to_init_ops(p + off[1], sc->init_ops, len);
+
+ len = off[2];
+ sc->init_ops_offsets = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE);
+ if (!sc->init_ops_offsets)
+ goto alloc_failed;
+ bnx2x_data_to_init_offsets(p + off[3], sc->init_ops_offsets, len);
+
+ len = off[4];
+ sc->init_data = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE);
+ if (!sc->init_data)
+ goto alloc_failed;
+ bnx2x_data_to_init_data(p + off[5], sc->init_data, len);
+
+ sc->tsem_int_table_data = p + off[7];
+ sc->tsem_pram_data = p + off[9];
+ sc->usem_int_table_data = p + off[11];
+ sc->usem_pram_data = p + off[13];
+ sc->csem_int_table_data = p + off[15];
+ sc->csem_pram_data = p + off[17];
+ sc->xsem_int_table_data = p + off[19];
+ sc->xsem_pram_data = p + off[21];
+
+ len = off[22];
+ sc->iro_array = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE);
+ if (!sc->iro_array)
+ goto alloc_failed;
+ bnx2x_data_to_iro_array(p + off[23], sc->iro_array, len);
+
+ return 0;
+
+alloc_failed:
+ bnx2x_release_firmware(sc);
+ return -1;
+}
+
+static int cut_gzip_prefix(const uint8_t * zbuf, int len)
+{
+#define MIN_PREFIX_SIZE (10)
+
+ int n = MIN_PREFIX_SIZE;
+ uint16_t xlen;
+
+ if (!(zbuf[0] == 0x1f && zbuf[1] == 0x8b && zbuf[2] == Z_DEFLATED) ||
+ len <= MIN_PREFIX_SIZE) {
+ return -1;
+ }
+
+ /* optional extra fields are present */
+ if (zbuf[3] & 0x4) {
+ xlen = zbuf[13];
+ xlen <<= 8;
+ xlen += zbuf[12];
+
+ n += xlen;
+ }
+ /* file name is present */
+ if (zbuf[3] & 0x8) {
+ while ((zbuf[n++] != 0) && (n < len)) ;
+ }
+
+ return n;
+}
+
+static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t * zbuf, int len)
+{
+ int ret;
+ int data_begin = cut_gzip_prefix(zbuf, len);
+
+ PMD_DRV_LOG(DEBUG, "ecore_gunzip %d", len);
+
+ if (data_begin <= 0) {
+ PMD_DRV_LOG(NOTICE, "bad gzip prefix");
+ return -1;
+ }
+
+ memset(&zlib_stream, 0, sizeof(zlib_stream));
+ zlib_stream.next_in = zbuf + data_begin;
+ zlib_stream.avail_in = len - data_begin;
+ zlib_stream.next_out = sc->gz_buf;
+ zlib_stream.avail_out = FW_BUF_SIZE;
+
+ ret = inflateInit2(&zlib_stream, -MAX_WBITS);
+ if (ret != Z_OK) {
+ PMD_DRV_LOG(NOTICE, "zlib inflateInit2 error");
+ return ret;
+ }
+
+ ret = inflate(&zlib_stream, Z_FINISH);
+ if ((ret != Z_STREAM_END) && (ret != Z_OK)) {
+ PMD_DRV_LOG(NOTICE, "zlib inflate error: %d %s", ret,
+ zlib_stream.msg);
+ }
+
+ sc->gz_outlen = zlib_stream.total_out;
+ if (sc->gz_outlen & 0x3) {
+ PMD_DRV_LOG(NOTICE, "firmware is not aligned. gz_outlen == %d",
+ sc->gz_outlen);
+ }
+ sc->gz_outlen >>= 2;
+
+ inflateEnd(&zlib_stream);
+
+ if (ret == Z_STREAM_END)
+ return 0;
+
+ return ret;
+}
+
+static void
+ecore_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr,
+ uint32_t addr, uint32_t len)
+{
+ bnx2x_write_dmae_phys_len(sc, phys_addr, addr, len);
+}
+
+void
+ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr, size_t size,
+ uint32_t * data)
+{
+ uint8_t i;
+ for (i = 0; i < size / 4; i++) {
+ REG_WR(sc, addr + (i * 4), data[i]);
+ }
+}
+
+static const char *get_ext_phy_type(uint32_t ext_phy_type)
+{
+ uint32_t phy_type_idx = ext_phy_type >> 8;
+ static const char *types[] =
+ { "DIRECT", "BNX2X-8071", "BNX2X-8072", "BNX2X-8073",
+ "BNX2X-8705", "BNX2X-8706", "BNX2X-8726", "BNX2X-8481", "SFX-7101",
+ "BNX2X-8727",
+ "BNX2X-8727-NOC", "BNX2X-84823", "NOT_CONN", "FAILURE"
+ };
+
+ if (phy_type_idx < 12)
+ return types[phy_type_idx];
+ else if (PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN == ext_phy_type)
+ return types[12];
+ else
+ return types[13];
+}
+
+static const char *get_state(uint32_t state)
+{
+ uint32_t state_idx = state >> 12;
+ static const char *states[] = { "CLOSED", "OPENING_WAIT4_LOAD",
+ "OPENING_WAIT4_PORT", "OPEN", "CLOSING_WAIT4_HALT",
+ "CLOSING_WAIT4_DELETE", "CLOSING_WAIT4_UNLOAD",
+ "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
+ "UNKNOWN", "DISABLED", "DIAG", "ERROR", "UNDEFINED"
+ };
+
+ if (state_idx <= 0xF)
+ return states[state_idx];
+ else
+ return states[0x10];
+}
+
+static const char *get_recovery_state(uint32_t state)
+{
+ static const char *states[] = { "NONE", "DONE", "INIT",
+ "WAIT", "FAILED", "NIC_LOADING"
+ };
+ return states[state];
+}
+
+static const char *get_rx_mode(uint32_t mode)
+{
+ static const char *modes[] = { "NONE", "NORMAL", "ALLMULTI",
+ "PROMISC", "MAX_MULTICAST", "ERROR"
+ };
+
+ if (mode < 0x4)
+ return modes[mode];
+ else if (BNX2X_MAX_MULTICAST == mode)
+ return modes[4];
+ else
+ return modes[5];
+}
+
+#define BNX2X_INFO_STR_MAX 256
+static const char *get_bnx2x_flags(uint32_t flags)
+{
+ int i;
+ static const char *flag[] = { "ONE_PORT ", "NO_ISCSI ",
+ "NO_FCOE ", "NO_WOL ", "USING_DAC ", "USING_MSIX ",
+ "USING_MSI ", "DISABLE_MSI ", "UNKNOWN ", "NO_MCP ",
+ "SAFC_TX_FLAG ", "MF_FUNC_DIS ", "TX_SWITCHING "
+ };
+ static char flag_str[BNX2X_INFO_STR_MAX];
+ memset(flag_str, 0, BNX2X_INFO_STR_MAX);
+
+ for (i = 0; i < 5; i++)
+ if (flags & (1 << i)) {
+ strcat(flag_str, flag[i]);
+ flags ^= (1 << i);
+ }
+ if (flags) {
+ static char unknown[BNX2X_INFO_STR_MAX];
+ snprintf(unknown, 32, "Unknown flag mask %x", flags);
+ strcat(flag_str, unknown);
+ }
+ return flag_str;
+}
+
+/*
+ * Prints useful adapter info.
+ */
+void bnx2x_print_adapter_info(struct bnx2x_softc *sc)
+{
+ int i = 0;
+ __rte_unused uint32_t ext_phy_type;
+
+ PMD_INIT_FUNC_TRACE();
+ if (sc->link_vars.phy_flags & PHY_XGXS_FLAG)
+ ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(REG_RD(sc,
+ sc->
+ devinfo.shmem_base
+ + offsetof(struct
+ shmem_region,
+ dev_info.port_hw_config
+ [0].external_phy_config)));
+ else
+ ext_phy_type = ELINK_SERDES_EXT_PHY_TYPE(REG_RD(sc,
+ sc->
+ devinfo.shmem_base
+ +
+ offsetof(struct
+ shmem_region,
+ dev_info.port_hw_config
+ [0].external_phy_config)));
+
+ PMD_INIT_LOG(DEBUG, "\n\n===================================\n");
+ /* Hardware chip info. */
+ PMD_INIT_LOG(DEBUG, "%12s : %#08x", "ASIC", sc->devinfo.chip_id);
+ PMD_INIT_LOG(DEBUG, "%12s : %c%d", "Rev", (CHIP_REV(sc) >> 12) + 'A',
+ (CHIP_METAL(sc) >> 4));
+
+ /* Bus info. */
+ PMD_INIT_LOG(DEBUG, "%12s : %d, ", "Bus PCIe", sc->devinfo.pcie_link_width);
+ switch (sc->devinfo.pcie_link_speed) {
+ case 1:
+ PMD_INIT_LOG(DEBUG, "%23s", "2.5 Gbps");
+ break;
+ case 2:
+ PMD_INIT_LOG(DEBUG, "%21s", "5 Gbps");
+ break;
+ case 4:
+ PMD_INIT_LOG(DEBUG, "%21s", "8 Gbps");
+ break;
+ default:
+ PMD_INIT_LOG(DEBUG, "%33s", "Unknown link speed");
+ }
+
+ /* Device features. */
+ PMD_INIT_LOG(DEBUG, "%12s : ", "Flags");
+
+ /* Miscellaneous flags. */
+ if (sc->devinfo.pcie_cap_flags & BNX2X_MSI_CAPABLE_FLAG) {
+ PMD_INIT_LOG(DEBUG, "%18s", "MSI");
+ i++;
+ }
+
+ if (sc->devinfo.pcie_cap_flags & BNX2X_MSIX_CAPABLE_FLAG) {
+ if (i > 0)
+ PMD_INIT_LOG(DEBUG, "|");
+ PMD_INIT_LOG(DEBUG, "%20s", "MSI-X");
+ i++;
+ }
+
+ if (IS_PF(sc)) {
+ PMD_INIT_LOG(DEBUG, "%12s : ", "Queues");
+ switch (sc->sp->rss_rdata.rss_mode) {
+ case ETH_RSS_MODE_DISABLED:
+ PMD_INIT_LOG(DEBUG, "%19s", "None");
+ break;
+ case ETH_RSS_MODE_REGULAR:
+ PMD_INIT_LOG(DEBUG, "%18s : %d", "RSS", sc->num_queues);
+ break;
+ default:
+ PMD_INIT_LOG(DEBUG, "%22s", "Unknown");
+ break;
+ }
+ }
+
+ /* RTE and Driver versions */
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "DPDK",
+ rte_version());
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "Driver",
+ bnx2x_pmd_version());
+
+ /* Firmware versions and device features. */
+ PMD_INIT_LOG(DEBUG, "%12s : %d.%d.%d",
+ "Firmware",
+ BNX2X_5710_FW_MAJOR_VERSION,
+ BNX2X_5710_FW_MINOR_VERSION,
+ BNX2X_5710_FW_REVISION_VERSION);
+ PMD_INIT_LOG(DEBUG, "%12s : %s",
+ "Bootcode", sc->devinfo.bc_ver_str);
+
+ PMD_INIT_LOG(DEBUG, "\n\n===================================\n");
+ PMD_INIT_LOG(DEBUG, "%12s : %u", "Bnx2x Func", sc->pcie_func);
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc->flags));
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "DMAE Is",
+ (sc->dmae_ready ? "Ready" : "Not Ready"));
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO"));
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO"));
+ PMD_INIT_LOG(DEBUG, "%12s : %u", "MTU", sc->mtu);
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type));
+ PMD_INIT_LOG(DEBUG, "%12s : %x:%x:%x:%x:%x:%x", "MAC Addr",
+ sc->link_params.mac_addr[0],
+ sc->link_params.mac_addr[1],
+ sc->link_params.mac_addr[2],
+ sc->link_params.mac_addr[3],
+ sc->link_params.mac_addr[4],
+ sc->link_params.mac_addr[5]);
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "RX Mode", get_rx_mode(sc->rx_mode));
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "State", get_state(sc->state));
+ if (sc->recovery_state)
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "Recovery",
+ get_recovery_state(sc->recovery_state));
+ PMD_INIT_LOG(DEBUG, "%12s : CQ = %lx, EQ = %lx", "SPQ Left",
+ sc->cq_spq_left, sc->eq_spq_left);
+ PMD_INIT_LOG(DEBUG, "%12s : %x", "Switch", sc->link_params.switch_cfg);
+ PMD_INIT_LOG(DEBUG, "\n\n===================================\n");
+}
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x.h
new file mode 100644
index 00000000..0f6024fb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x.h
@@ -0,0 +1,2013 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __BNX2X_H__
+#define __BNX2X_H__
+
+#include <rte_byteorder.h>
+#include <rte_spinlock.h>
+#include <rte_bus_pci.h>
+#include <rte_io.h>
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN RTE_LITTLE_ENDIAN
+#endif
+#undef __BIG_ENDIAN
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN RTE_BIG_ENDIAN
+#endif
+#undef __LITTLE_ENDIAN
+#endif
+
+#include "bnx2x_ethdev.h"
+#include "ecore_mfw_req.h"
+#include "ecore_fw_defs.h"
+#include "ecore_hsi.h"
+#include "ecore_reg.h"
+#include "bnx2x_stats.h"
+#include "bnx2x_vfpf.h"
+
+#include "elink.h"
+
+#ifndef __FreeBSD__
+#include <linux/pci_regs.h>
+
+#define PCIY_PMG PCI_CAP_ID_PM
+#define PCIY_MSI PCI_CAP_ID_MSI
+#define PCIY_EXPRESS PCI_CAP_ID_EXP
+#define PCIY_MSIX PCI_CAP_ID_MSIX
+#define PCIR_EXPRESS_DEVICE_STA PCI_EXP_TYPE_RC_EC
+#define PCIM_EXP_STA_TRANSACTION_PND PCI_EXP_DEVSTA_TRPND
+#define PCIR_EXPRESS_LINK_STA PCI_EXP_LNKSTA
+#define PCIM_LINK_STA_WIDTH PCI_EXP_LNKSTA_NLW
+#define PCIM_LINK_STA_SPEED PCI_EXP_LNKSTA_CLS
+#define PCIR_EXPRESS_DEVICE_CTL PCI_EXP_DEVCTL
+#define PCIM_EXP_CTL_MAX_PAYLOAD PCI_EXP_DEVCTL_PAYLOAD
+#define PCIM_EXP_CTL_MAX_READ_REQUEST PCI_EXP_DEVCTL_READRQ
+#define PCIR_POWER_STATUS PCI_PM_CTRL
+#define PCIM_PSTAT_DMASK PCI_PM_CTRL_STATE_MASK
+#define PCIM_PSTAT_PME PCI_PM_CTRL_PME_STATUS
+#define PCIM_PSTAT_D3 0x3
+#define PCIM_PSTAT_PMEENABLE PCI_PM_CTRL_PME_ENABLE
+#define PCIR_MSIX_CTRL PCI_MSIX_FLAGS
+#define PCIM_MSIXCTRL_TABLE_SIZE PCI_MSIX_FLAGS_QSIZE
+#else
+#include <dev/pci/pcireg.h>
+#endif
+
+#define IFM_10G_CX4 20 /* 10GBase CX4 copper */
+#define IFM_10G_TWINAX 22 /* 10GBase Twinax copper */
+#define IFM_10G_T 26 /* 10GBase-T - RJ45 */
+
+#ifndef __FreeBSD__
+#define PCIR_EXPRESS_DEVICE_STA PCI_EXP_TYPE_RC_EC
+#define PCIM_EXP_STA_TRANSACTION_PND PCI_EXP_DEVSTA_TRPND
+#define PCIR_EXPRESS_LINK_STA PCI_EXP_LNKSTA
+#define PCIM_LINK_STA_WIDTH PCI_EXP_LNKSTA_NLW
+#define PCIM_LINK_STA_SPEED PCI_EXP_LNKSTA_CLS
+#define PCIR_EXPRESS_DEVICE_CTL PCI_EXP_DEVCTL
+#define PCIM_EXP_CTL_MAX_PAYLOAD PCI_EXP_DEVCTL_PAYLOAD
+#define PCIM_EXP_CTL_MAX_READ_REQUEST PCI_EXP_DEVCTL_READRQ
+#else
+#define PCIR_EXPRESS_DEVICE_STA PCIER_DEVICE_STA
+#define PCIM_EXP_STA_TRANSACTION_PND PCIEM_STA_TRANSACTION_PND
+#define PCIR_EXPRESS_LINK_STA PCIER_LINK_STA
+#define PCIM_LINK_STA_WIDTH PCIEM_LINK_STA_WIDTH
+#define PCIM_LINK_STA_SPEED PCIEM_LINK_STA_SPEED
+#define PCIR_EXPRESS_DEVICE_CTL PCIER_DEVICE_CTL
+#define PCIM_EXP_CTL_MAX_PAYLOAD PCIEM_CTL_MAX_PAYLOAD
+#define PCIM_EXP_CTL_MAX_READ_REQUEST PCIEM_CTL_MAX_READ_REQUEST
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+#ifndef ARRSIZE
+#define ARRSIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef roundup
+#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#endif
+#ifndef ilog2
+static inline
+int bnx2x_ilog2(int x)
+{
+ int log = 0;
+ x >>= 1;
+
+ while(x) {
+ log++;
+ x >>= 1;
+ }
+ return log;
+}
+#define ilog2(x) bnx2x_ilog2(x)
+#endif
+
+#include "ecore_sp.h"
+
+struct bnx2x_device_type {
+ uint16_t bnx2x_vid;
+ uint16_t bnx2x_did;
+ uint16_t bnx2x_svid;
+ uint16_t bnx2x_sdid;
+ char *bnx2x_name;
+};
+
+#define BNX2X_PAGE_SHIFT 12
+#define BNX2X_PAGE_SIZE (1 << BNX2X_PAGE_SHIFT)
+#define BNX2X_PAGE_MASK (~(BNX2X_PAGE_SIZE - 1))
+#define BNX2X_PAGE_ALIGN(addr) ((addr + BNX2X_PAGE_SIZE - 1) & BNX2X_PAGE_MASK)
+
+#if BNX2X_PAGE_SIZE != 4096
+#error Page sizes other than 4KB are unsupported!
+#endif
+
+#define U64_LO(addr) ((uint32_t)(((uint64_t)(addr)) & 0xFFFFFFFF))
+#define U64_HI(addr) ((uint32_t)(((uint64_t)(addr)) >> 32))
+#define HILO_U64(hi, lo) ((((uint64_t)(hi)) << 32) + (lo))
+
+/* dropless fc FW/HW related params */
+#define BRB_SIZE(sc) (CHIP_IS_E3(sc) ? 1024 : 512)
+#define MAX_AGG_QS(sc) ETH_MAX_AGGREGATION_QUEUES_E1H_E2
+#define FW_DROP_LEVEL(sc) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(sc))
+#define FW_PREFETCH_CNT 16U
+#define DROPLESS_FC_HEADROOM 100
+
+/*
+ * Transmit Buffer Descriptor (tx_bd) definitions*
+ */
+/* NUM_TX_PAGES must be a power of 2. */
+#define TOTAL_TX_BD_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(union eth_tx_bd_types)) /* 256 */
+#define USABLE_TX_BD_PER_PAGE (TOTAL_TX_BD_PER_PAGE - 1) /* 255 */
+
+#define TOTAL_TX_BD(q) (TOTAL_TX_BD_PER_PAGE * q->nb_tx_pages) /* 512 */
+#define USABLE_TX_BD(q) (USABLE_TX_BD_PER_PAGE * q->nb_tx_pages) /* 510 */
+#define MAX_TX_BD(q) (TOTAL_TX_BD(q) - 1) /* 511 */
+
+#define NEXT_TX_BD(x) \
+ ((((x) & USABLE_TX_BD_PER_PAGE) == \
+ (USABLE_TX_BD_PER_PAGE - 1)) ? (x) + 2 : (x) + 1)
+
+#define TX_BD(x, q) ((x) & MAX_TX_BD(q))
+#define TX_PAGE(x) (((x) & ~USABLE_TX_BD_PER_PAGE) >> 8)
+#define TX_IDX(x) ((x) & USABLE_TX_BD_PER_PAGE)
+
+#define BDS_PER_TX_PKT (3)
+
+/*
+ * Trigger pending transmits when the number of available BDs is greater
+ * than 1/8 of the total number of usable BDs.
+ */
+#define BNX2X_TX_CLEANUP_THRESHOLD(q) (USABLE_TX_BD(q) / 8)
+#define BNX2X_TX_TIMEOUT 5
+
+/*
+ * Receive Buffer Descriptor (rx_bd) definitions*
+ */
+//#define NUM_RX_PAGES 1
+#define TOTAL_RX_BD_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(struct eth_rx_bd)) /* 512 */
+#define USABLE_RX_BD_PER_PAGE (TOTAL_RX_BD_PER_PAGE - 2) /* 510 */
+#define RX_BD_PER_PAGE_MASK (TOTAL_RX_BD_PER_PAGE - 1) /* 511 */
+#define TOTAL_RX_BD(q) (TOTAL_RX_BD_PER_PAGE * q->nb_rx_pages) /* 512 */
+#define USABLE_RX_BD(q) (USABLE_RX_BD_PER_PAGE * q->nb_rx_pages) /* 510 */
+#define MAX_RX_BD(q) (TOTAL_RX_BD(q) - 1) /* 511 */
+#define RX_BD_NEXT_PAGE_DESC_CNT 2
+
+#define NEXT_RX_BD(x) \
+ ((((x) & RX_BD_PER_PAGE_MASK) == \
+ (USABLE_RX_BD_PER_PAGE - 1)) ? (x) + 3 : (x) + 1)
+
+/* x & 0x3ff */
+#define RX_BD(x, q) ((x) & MAX_RX_BD(q))
+#define RX_PAGE(x) (((x) & ~RX_BD_PER_PAGE_MASK) >> 9)
+#define RX_IDX(x) ((x) & RX_BD_PER_PAGE_MASK)
+
+/*
+ * Receive Completion Queue definitions*
+ */
+//#define NUM_RCQ_PAGES (NUM_RX_PAGES * 4)
+#define TOTAL_RCQ_ENTRIES_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(union eth_rx_cqe)) /* 128 */
+#define USABLE_RCQ_ENTRIES_PER_PAGE (TOTAL_RCQ_ENTRIES_PER_PAGE - 1) /* 127 */
+#define TOTAL_RCQ_ENTRIES(q) (TOTAL_RCQ_ENTRIES_PER_PAGE * q->nb_cq_pages) /* 512 */
+#define USABLE_RCQ_ENTRIES(q) (USABLE_RCQ_ENTRIES_PER_PAGE * q->nb_cq_pages) /* 508 */
+#define MAX_RCQ_ENTRIES(q) (TOTAL_RCQ_ENTRIES(q) - 1) /* 511 */
+#define RCQ_NEXT_PAGE_DESC_CNT 1
+
+#define NEXT_RCQ_IDX(x) \
+ ((((x) & USABLE_RCQ_ENTRIES_PER_PAGE) == \
+ (USABLE_RCQ_ENTRIES_PER_PAGE - 1)) ? (x) + 2 : (x) + 1)
+
+#define CQE_BD_REL \
+ (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
+
+#define RCQ_BD_PAGES(q) \
+ (q->nb_rx_pages * CQE_BD_REL)
+
+#define RCQ_ENTRY(x, q) ((x) & MAX_RCQ_ENTRIES(q))
+#define RCQ_PAGE(x) (((x) & ~USABLE_RCQ_ENTRIES_PER_PAGE) >> 7)
+#define RCQ_IDX(x) ((x) & USABLE_RCQ_ENTRIES_PER_PAGE)
+
+/*
+ * dropless fc calculations for BDs
+ * Number of BDs should be as number of buffers in BRB:
+ * Low threshold takes into account RX_BD_NEXT_PAGE_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_BD_REQ(sc) \
+ BRB_SIZE(sc)
+#define NUM_BD_PG_REQ(sc) \
+ ((NUM_BD_REQ(sc) + USABLE_RX_BD_PER_PAGE - 1) / USABLE_RX_BD_PER_PAGE)
+#define BD_TH_LO(sc) \
+ (NUM_BD_REQ(sc) + \
+ NUM_BD_PG_REQ(sc) * RX_BD_NEXT_PAGE_DESC_CNT + \
+ FW_DROP_LEVEL(sc))
+#define BD_TH_HI(sc) \
+ (BD_TH_LO(sc) + DROPLESS_FC_HEADROOM)
+#define MIN_RX_AVAIL(sc) \
+ ((sc)->dropless_fc ? BD_TH_HI(sc) + 128 : 128)
+
+/*
+ * dropless fc calculations for RCQs
+ * Number of RCQs should be as number of buffers in BRB:
+ * Low threshold takes into account RCQ_NEXT_PAGE_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_RCQ_REQ(sc) \
+ BRB_SIZE(sc)
+#define NUM_RCQ_PG_REQ(sc) \
+ ((NUM_RCQ_REQ(sc) + USABLE_RCQ_ENTRIES_PER_PAGE - 1) / USABLE_RCQ_ENTRIES_PER_PAGE)
+#define RCQ_TH_LO(sc) \
+ (NUM_RCQ_REQ(sc) + \
+ NUM_RCQ_PG_REQ(sc) * RCQ_NEXT_PAGE_DESC_CNT + \
+ FW_DROP_LEVEL(sc))
+#define RCQ_TH_HI(sc) \
+ (RCQ_TH_LO(sc) + DROPLESS_FC_HEADROOM)
+
+/* Load / Unload modes */
+#define LOAD_NORMAL 0
+#define LOAD_OPEN 1
+#define LOAD_DIAG 2
+#define LOAD_LOOPBACK_EXT 3
+#define UNLOAD_NORMAL 0
+#define UNLOAD_CLOSE 1
+#define UNLOAD_RECOVERY 2
+
+/* Some constants... */
+//#define MAX_PATH_NUM 2
+//#define E2_MAX_NUM_OF_VFS 64
+//#define E1H_FUNC_MAX 8
+//#define E2_FUNC_MAX 4 /* per path */
+#define MAX_VNIC_NUM 4
+#define MAX_FUNC_NUM 8 /* common to all chips */
+//#define MAX_NDSB HC_SB_MAX_SB_E2 /* max non-default status block */
+#define MAX_RSS_CHAINS 16 /* a constant for HW limit */
+#define MAX_MSI_VECTOR 8 /* a constant for HW limit */
+
+#define ILT_NUM_PAGE_ENTRIES 3072
+/*
+ * 57711 we use whole table since we have 8 functions.
+ * 57712 we have only 4 functions, but use same size per func, so only half
+ * of the table is used.
+ */
+#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES / 8)
+#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
+/*
+ * the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ONCHIP_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF))
+#define ONCHIP_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44)))
+
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_HLEN 14
+#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
+#define ETH_MIN_PACKET_SIZE 60
+#define ETH_MAX_PACKET_SIZE ETHERMTU /* 1500 */
+#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+/* TCP with Timestamp Option (32) + IPv6 (40) */
+
+/* max supported alignment is 256 (8 shift) */
+#define BNX2X_RX_ALIGN_SHIFT RTE_MAX(6, min(8, RTE_CACHE_LINE_SIZE_LOG2))
+
+#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
+
+struct bnx2x_bar {
+ void *base_addr;
+};
+
+/* Used to manage DMA allocations. */
+struct bnx2x_dma {
+ struct bnx2x_softc *sc;
+ rte_iova_t paddr;
+ void *vaddr;
+ int nseg;
+ char msg[RTE_MEMZONE_NAMESIZE - 6];
+};
+
+/* attn group wiring */
+#define MAX_DYNAMIC_ATTN_GRPS 8
+
+struct attn_route {
+ uint32_t sig[5];
+};
+
+struct iro {
+ uint32_t base;
+ uint16_t m1;
+ uint16_t m2;
+ uint16_t m3;
+ uint16_t size;
+};
+
+union bnx2x_host_hc_status_block {
+ /* pointer to fp status block e2 */
+ struct host_hc_status_block_e2 *e2_sb;
+ /* pointer to fp status block e1x */
+ struct host_hc_status_block_e1x *e1x_sb;
+};
+
+union bnx2x_db_prod {
+ struct doorbell_set_prod data;
+ uint32_t raw;
+};
+
+struct bnx2x_sw_tx_bd {
+ struct mbuf *m;
+ uint16_t first_bd;
+ uint8_t flags;
+/* set on the first BD descriptor when there is a split BD */
+#define BNX2X_TSO_SPLIT_BD (1 << 0)
+};
+
+/*
+ * This is the HSI fastpath data structure. There can be up to MAX_RSS_CHAIN
+ * instances of the fastpath structure when using multiple queues.
+ */
+struct bnx2x_fastpath {
+ /* pointer back to parent structure */
+ struct bnx2x_softc *sc;
+
+ /* status block */
+ struct bnx2x_dma sb_dma;
+ union bnx2x_host_hc_status_block status_block;
+
+ rte_iova_t tx_desc_mapping;
+
+ rte_iova_t rx_desc_mapping;
+ rte_iova_t rx_comp_mapping;
+
+ uint16_t *sb_index_values;
+ uint16_t *sb_running_index;
+ uint32_t ustorm_rx_prods_offset;
+
+ uint8_t igu_sb_id; /* status block number in HW */
+ uint8_t fw_sb_id; /* status block number in FW */
+
+ uint32_t rx_buf_size;
+
+ int state;
+#define BNX2X_FP_STATE_CLOSED 0x01
+#define BNX2X_FP_STATE_IRQ 0x02
+#define BNX2X_FP_STATE_OPENING 0x04
+#define BNX2X_FP_STATE_OPEN 0x08
+#define BNX2X_FP_STATE_HALTING 0x10
+#define BNX2X_FP_STATE_HALTED 0x20
+
+ /* reference back to this fastpath queue number */
+ uint8_t index; /* this is also the 'cid' */
+#define FP_IDX(fp) (fp->index)
+
+ /* ethernet client ID (each fastpath set of RX/TX/CQE is a client) */
+ uint8_t cl_id;
+#define FP_CL_ID(fp) (fp->cl_id)
+ uint8_t cl_qzone_id;
+
+ uint16_t fp_hc_idx;
+
+ union bnx2x_db_prod tx_db;
+
+ struct tstorm_per_queue_stats old_tclient;
+ struct ustorm_per_queue_stats old_uclient;
+ struct xstorm_per_queue_stats old_xclient;
+ struct bnx2x_eth_q_stats eth_q_stats;
+ struct bnx2x_eth_q_stats_old eth_q_stats_old;
+
+ /* Pointer to the receive consumer in the status block */
+ uint16_t *rx_cq_cons_sb;
+
+ /* Pointer to the transmit consumer in the status block */
+ uint16_t *tx_cons_sb;
+
+ /* transmit timeout until chip reset */
+ int watchdog_timer;
+
+}; /* struct bnx2x_fastpath */
+
+#define BNX2X_MAX_NUM_OF_VFS 64
+#define BNX2X_VF_ID_INVALID 0xFF
+
+/* maximum number of fast-path interrupt contexts */
+#define FP_SB_MAX_E1x 16
+#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2
+
+union cdu_context {
+ struct eth_context eth;
+ char pad[1024];
+};
+
+/* CDU host DB constants */
+#define CDU_ILT_PAGE_SZ_HW 2
+#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
+#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
+
+#define CNIC_ISCSI_CID_MAX 256
+#define CNIC_FCOE_CID_MAX 2048
+#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
+#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
+
+#define QM_ILT_PAGE_SZ_HW 0
+#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
+#define QM_CID_ROUND 1024
+
+/* TM (timers) host DB constants */
+#define TM_ILT_PAGE_SZ_HW 0
+#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
+/*#define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
+#define TM_CONN_NUM 1024
+#define TM_ILT_SZ (8 * TM_CONN_NUM)
+#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
+
+/* SRC (Searcher) host DB constants */
+#define SRC_ILT_PAGE_SZ_HW 0
+#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */
+#define SRC_HASH_BITS 10
+#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
+#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
+#define SRC_T2_SZ SRC_ILT_SZ
+#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
+
+struct hw_context {
+ struct bnx2x_dma vcxt_dma;
+ union cdu_context *vcxt;
+ //rte_iova_t cxt_mapping;
+ size_t size;
+};
+
+#define SM_RX_ID 0
+#define SM_TX_ID 1
+
+/* defines for multiple tx priority indices */
+#define FIRST_TX_ONLY_COS_INDEX 1
+#define FIRST_TX_COS_INDEX 0
+
+#define CID_TO_FP(cid, sc) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(sc))
+
+#define HC_INDEX_ETH_RX_CQ_CONS 1
+#define HC_INDEX_OOO_TX_CQ_CONS 4
+#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
+#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
+#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
+#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
+
+/* congestion management fairness mode */
+#define CMNG_FNS_NONE 0
+#define CMNG_FNS_MINMAX 1
+
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE 100
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
+/* number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES 160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH 32768
+/* fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+/* memory of fairness algorithm - 2 cycles */
+#define FAIR_MEM 2
+
+#define HC_SEG_ACCESS_DEF 0 /* Driver decision 0-3 */
+#define HC_SEG_ACCESS_ATTN 4
+#define HC_SEG_ACCESS_NORM 0 /* Driver decision 0-1 */
+
+/*
+ * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
+ * control by the number of fast-path status blocks supported by the
+ * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
+ * status block represents an independent interrupts context that can
+ * serve a regular L2 networking queue. However special L2 queues such
+ * as the FCoE queue do not require a FP-SB and other components like
+ * the CNIC may consume FP-SB reducing the number of possible L2 queues
+ *
+ * If the maximum number of FP-SB available is X then:
+ * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
+ * regular L2 queues is Y=X-1
+ * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
+ * c. If the FCoE L2 queue is supported the actual number of L2 queues
+ * is Y+1
+ * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
+ * slow-path interrupts) or Y+2 if CNIC is supported (one additional
+ * FP interrupt context for the CNIC).
+ * e. The number of HW context (CID count) is always X or X+1 if FCoE
+ * L2 queue is supported. the cid for the FCoE L2 queue is always X.
+ *
+ * So this is quite simple for now as no ULPs are supported yet. :-)
+ */
+#define BNX2X_NUM_QUEUES(sc) ((sc)->num_queues)
+#define BNX2X_NUM_ETH_QUEUES(sc) BNX2X_NUM_QUEUES(sc)
+#define BNX2X_NUM_NON_CNIC_QUEUES(sc) BNX2X_NUM_QUEUES(sc)
+#define BNX2X_NUM_RX_QUEUES(sc) BNX2X_NUM_QUEUES(sc)
+
+#define FOR_EACH_QUEUE(sc, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(sc); (var)++)
+
+#define FOR_EACH_NONDEFAULT_QUEUE(sc, var) \
+ for ((var) = 1; (var) < BNX2X_NUM_QUEUES(sc); (var)++)
+
+#define FOR_EACH_ETH_QUEUE(sc, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(sc); (var)++)
+
+#define FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, var) \
+ for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(sc); (var)++)
+
+#define FOR_EACH_COS_IN_TX_QUEUE(sc, var) \
+ for ((var) = 0; (var) < (sc)->max_cos; (var)++)
+
+#define FOR_EACH_CNIC_QUEUE(sc, var) \
+ for ((var) = BNX2X_NUM_ETH_QUEUES(sc); \
+ (var) < BNX2X_NUM_QUEUES(sc); \
+ (var)++)
+
+enum {
+ OOO_IDX_OFFSET,
+ FCOE_IDX_OFFSET,
+ FWD_IDX_OFFSET,
+};
+
+#define FCOE_IDX(sc) (BNX2X_NUM_NON_CNIC_QUEUES(sc) + FCOE_IDX_OFFSET)
+#define bnx2x_fcoe_fp(sc) (&sc->fp[FCOE_IDX(sc)])
+#define bnx2x_fcoe(sc, var) (bnx2x_fcoe_fp(sc)->var)
+#define bnx2x_fcoe_inner_sp_obj(sc) (&sc->sp_objs[FCOE_IDX(sc)])
+#define bnx2x_fcoe_sp_obj(sc, var) (bnx2x_fcoe_inner_sp_obj(sc)->var)
+#define bnx2x_fcoe_tx(sc, var) (bnx2x_fcoe_fp(sc)->txdata_ptr[FIRST_TX_COS_INDEX]->var)
+
+#define OOO_IDX(sc) (BNX2X_NUM_NON_CNIC_QUEUES(sc) + OOO_IDX_OFFSET)
+#define bnx2x_ooo_fp(sc) (&sc->fp[OOO_IDX(sc)])
+#define bnx2x_ooo(sc, var) (bnx2x_ooo_fp(sc)->var)
+#define bnx2x_ooo_inner_sp_obj(sc) (&sc->sp_objs[OOO_IDX(sc)])
+#define bnx2x_ooo_sp_obj(sc, var) (bnx2x_ooo_inner_sp_obj(sc)->var)
+
+#define FWD_IDX(sc) (BNX2X_NUM_NON_CNIC_QUEUES(sc) + FWD_IDX_OFFSET)
+#define bnx2x_fwd_fp(sc) (&sc->fp[FWD_IDX(sc)])
+#define bnx2x_fwd(sc, var) (bnx2x_fwd_fp(sc)->var)
+#define bnx2x_fwd_inner_sp_obj(sc) (&sc->sp_objs[FWD_IDX(sc)])
+#define bnx2x_fwd_sp_obj(sc, var) (bnx2x_fwd_inner_sp_obj(sc)->var)
+#define bnx2x_fwd_txdata(fp) (fp->txdata_ptr[FIRST_TX_COS_INDEX])
+
+#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->sc))
+#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->sc))
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(sc))
+#define IS_FWD_FP(fp) ((fp)->index == FWD_IDX((fp)->sc))
+#define IS_FWD_IDX(idx) ((idx) == FWD_IDX(sc))
+#define IS_OOO_FP(fp) ((fp)->index == OOO_IDX((fp)->sc))
+#define IS_OOO_IDX(idx) ((idx) == OOO_IDX(sc))
+
+enum {
+ BNX2X_PORT_QUERY_IDX,
+ BNX2X_PF_QUERY_IDX,
+ BNX2X_FCOE_QUERY_IDX,
+ BNX2X_FIRST_QUEUE_QUERY_IDX,
+};
+
+struct bnx2x_fw_stats_req {
+ struct stats_query_header hdr;
+ struct stats_query_entry query[FP_SB_MAX_E1x +
+ BNX2X_FIRST_QUEUE_QUERY_IDX];
+};
+
+struct bnx2x_fw_stats_data {
+ struct stats_counter storm_counters;
+ struct per_port_stats port;
+ struct per_pf_stats pf;
+ struct per_queue_stats queue_stats[1];
+};
+
+/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
+#define BNX2X_IGU_STAS_MSG_VF_CNT 64
+#define BNX2X_IGU_STAS_MSG_PF_CNT 4
+
+#define MAX_DMAE_C 8
+
+/*
+ * This is the slowpath data structure. It is mapped into non-paged memory
+ * so that the hardware can access it's contents directly and must be page
+ * aligned.
+ */
+struct bnx2x_slowpath {
+
+ /* used by the DMAE command executer */
+ struct dmae_command dmae[MAX_DMAE_C];
+
+ /* statistics completion */
+ uint32_t stats_comp;
+
+ /* firmware defined statistics blocks */
+ union mac_stats mac_stats;
+ struct nig_stats nig_stats;
+ struct host_port_stats port_stats;
+ struct host_func_stats func_stats;
+
+ /* DMAE completion value and data source/sink */
+ uint32_t wb_comp;
+ uint32_t wb_data[4];
+
+ union {
+ struct mac_configuration_cmd e1x;
+ struct eth_classify_rules_ramrod_data e2;
+ } mac_rdata;
+
+ union {
+ struct tstorm_eth_mac_filter_config e1x;
+ struct eth_filter_rules_ramrod_data e2;
+ } rx_mode_rdata;
+
+ struct eth_rss_update_ramrod_data rss_rdata;
+
+ union {
+ struct mac_configuration_cmd e1;
+ struct eth_multicast_rules_ramrod_data e2;
+ } mcast_rdata;
+
+ union {
+ struct function_start_data func_start;
+ struct flow_control_configuration pfc_config; /* for DCBX ramrod */
+ } func_rdata;
+
+ /* Queue State related ramrods */
+ union {
+ struct client_init_ramrod_data init_data;
+ struct client_update_ramrod_data update_data;
+ } q_rdata;
+
+ /*
+ * AFEX ramrod can not be a part of func_rdata union because these
+ * events might arrive in parallel to other events from func_rdata.
+ * If they were defined in the same union the data can get corrupted.
+ */
+ struct afex_vif_list_ramrod_data func_afex_rdata;
+
+ union drv_info_to_mcp drv_info_to_mcp;
+}; /* struct bnx2x_slowpath */
+
+/*
+ * Port specifc data structure.
+ */
+struct bnx2x_port {
+ /*
+ * Port Management Function (for 57711E only).
+ * When this field is set the driver instance is
+ * responsible for managing port specifc
+ * configurations such as handling link attentions.
+ */
+ uint32_t pmf;
+
+ /* Ethernet maximum transmission unit. */
+ uint16_t ether_mtu;
+
+ uint32_t link_config[ELINK_LINK_CONFIG_SIZE];
+
+ uint32_t ext_phy_config;
+
+ /* Port feature config.*/
+ uint32_t config;
+
+ /* Defines the features supported by the PHY. */
+ uint32_t supported[ELINK_LINK_CONFIG_SIZE];
+
+ /* Defines the features advertised by the PHY. */
+ uint32_t advertising[ELINK_LINK_CONFIG_SIZE];
+#define ADVERTISED_10baseT_Half (1 << 1)
+#define ADVERTISED_10baseT_Full (1 << 2)
+#define ADVERTISED_100baseT_Half (1 << 3)
+#define ADVERTISED_100baseT_Full (1 << 4)
+#define ADVERTISED_1000baseT_Half (1 << 5)
+#define ADVERTISED_1000baseT_Full (1 << 6)
+#define ADVERTISED_TP (1 << 7)
+#define ADVERTISED_FIBRE (1 << 8)
+#define ADVERTISED_Autoneg (1 << 9)
+#define ADVERTISED_Asym_Pause (1 << 10)
+#define ADVERTISED_Pause (1 << 11)
+#define ADVERTISED_2500baseX_Full (1 << 15)
+#define ADVERTISED_10000baseT_Full (1 << 16)
+
+ uint32_t phy_addr;
+
+ /*
+ * MCP scratchpad address for port specific statistics.
+ * The device is responsible for writing statistcss
+ * back to the MCP for use with management firmware such
+ * as UMP/NC-SI.
+ */
+ uint32_t port_stx;
+
+ struct nig_stats old_nig_stats;
+}; /* struct bnx2x_port */
+
+struct bnx2x_mf_info {
+ uint32_t mf_config[E1HVN_MAX];
+
+ uint32_t vnics_per_port; /* 1, 2 or 4 */
+ uint32_t multi_vnics_mode; /* can be set even if vnics_per_port = 1 */
+ uint32_t path_has_ovlan; /* MF mode in the path (can be different than the MF mode of the function */
+
+#define IS_MULTI_VNIC(sc) ((sc)->devinfo.mf_info.multi_vnics_mode)
+#define VNICS_PER_PORT(sc) ((sc)->devinfo.mf_info.vnics_per_port)
+#define VNICS_PER_PATH(sc) \
+ ((sc)->devinfo.mf_info.vnics_per_port * \
+ ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 1 ))
+
+ uint8_t min_bw[MAX_VNIC_NUM];
+ uint8_t max_bw[MAX_VNIC_NUM];
+
+ uint16_t ext_id; /* vnic outer vlan or VIF ID */
+#define VALID_OVLAN(ovlan) ((ovlan) <= 4096)
+#define INVALID_VIF_ID 0xFFFF
+#define OVLAN(sc) ((sc)->devinfo.mf_info.ext_id)
+#define VIF_ID(sc) ((sc)->devinfo.mf_info.ext_id)
+
+ uint16_t default_vlan;
+#define NIV_DEFAULT_VLAN(sc) ((sc)->devinfo.mf_info.default_vlan)
+
+ uint8_t niv_allowed_priorities;
+#define NIV_ALLOWED_PRIORITIES(sc) ((sc)->devinfo.mf_info.niv_allowed_priorities)
+
+ uint8_t niv_default_cos;
+#define NIV_DEFAULT_COS(sc) ((sc)->devinfo.mf_info.niv_default_cos)
+
+ uint8_t niv_mba_enabled;
+
+ enum mf_cfg_afex_vlan_mode afex_vlan_mode;
+#define AFEX_VLAN_MODE(sc) ((sc)->devinfo.mf_info.afex_vlan_mode)
+ int afex_def_vlan_tag;
+ uint32_t pending_max;
+
+ uint16_t flags;
+#define MF_INFO_VALID_MAC 0x0001
+
+ uint16_t mf_ov;
+ uint8_t mf_mode; /* Switch-Dependent or Switch-Independent */
+#define IS_MF(sc) \
+ (IS_MULTI_VNIC(sc) && \
+ ((sc)->devinfo.mf_info.mf_mode != 0))
+#define IS_MF_SD(sc) \
+ (IS_MULTI_VNIC(sc) && \
+ ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD))
+#define IS_MF_SI(sc) \
+ (IS_MULTI_VNIC(sc) && \
+ ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI))
+#define IS_MF_AFEX(sc) \
+ (IS_MULTI_VNIC(sc) && \
+ ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX))
+#define IS_MF_SD_MODE(sc) IS_MF_SD(sc)
+#define IS_MF_SI_MODE(sc) IS_MF_SI(sc)
+#define IS_MF_AFEX_MODE(sc) IS_MF_AFEX(sc)
+
+ uint32_t mf_protos_supported;
+ #define MF_PROTO_SUPPORT_ETHERNET 0x1
+ #define MF_PROTO_SUPPORT_ISCSI 0x2
+ #define MF_PROTO_SUPPORT_FCOE 0x4
+}; /* struct bnx2x_mf_info */
+
+/* Device information data structure. */
+struct bnx2x_devinfo {
+ /* PCIe info */
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint16_t subvendor_id;
+ uint16_t subdevice_id;
+
+ /*
+ * chip_id = 0b'CCCCCCCCCCCCCCCCRRRRMMMMMMMMBBBB'
+ * C = Chip Number (bits 16-31)
+ * R = Chip Revision (bits 12-15)
+ * M = Chip Metal (bits 4-11)
+ * B = Chip Bond ID (bits 0-3)
+ */
+ uint32_t chip_id;
+#define CHIP_ID(sc) ((sc)->devinfo.chip_id & 0xffff0000)
+#define CHIP_NUM(sc) ((sc)->devinfo.chip_id >> 16)
+/* device ids */
+#define CHIP_NUM_57711 0x164f
+#define CHIP_NUM_57711E 0x1650
+#define CHIP_NUM_57712 0x1662
+#define CHIP_NUM_57712_MF 0x1663
+#define CHIP_NUM_57712_VF 0x166f
+#define CHIP_NUM_57800 0x168a
+#define CHIP_NUM_57800_MF 0x16a5
+#define CHIP_NUM_57800_VF 0x16a9
+#define CHIP_NUM_57810 0x168e
+#define CHIP_NUM_57810_MF 0x16ae
+#define CHIP_NUM_57810_VF 0x16af
+#define CHIP_NUM_57811 0x163d
+#define CHIP_NUM_57811_MF 0x163e
+#define CHIP_NUM_57811_VF 0x163f
+#define CHIP_NUM_57840_OBS 0x168d
+#define CHIP_NUM_57840_OBS_MF 0x16ab
+#define CHIP_NUM_57840_4_10 0x16a1
+#define CHIP_NUM_57840_2_20 0x16a2
+#define CHIP_NUM_57840_MF 0x16a4
+#define CHIP_NUM_57840_VF 0x16ad
+
+#define CHIP_REV_SHIFT 12
+#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT)
+#define CHIP_REV(sc) ((sc)->devinfo.chip_id & CHIP_REV_MASK)
+
+#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT)
+#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT)
+#define CHIP_REV_Cx (0x2 << CHIP_REV_SHIFT)
+
+#define CHIP_REV_IS_SLOW(sc) \
+ (CHIP_REV(sc) > 0x00005000)
+#define CHIP_REV_IS_FPGA(sc) \
+ (CHIP_REV_IS_SLOW(sc) && (CHIP_REV(sc) & 0x00001000))
+#define CHIP_REV_IS_EMUL(sc) \
+ (CHIP_REV_IS_SLOW(sc) && !(CHIP_REV(sc) & 0x00001000))
+#define CHIP_REV_IS_ASIC(sc) \
+ (!CHIP_REV_IS_SLOW(sc))
+
+#define CHIP_METAL(sc) ((sc->devinfo.chip_id) & 0x00000ff0)
+#define CHIP_BOND_ID(sc) ((sc->devinfo.chip_id) & 0x0000000f)
+
+#define CHIP_IS_57711(sc) (CHIP_NUM(sc) == CHIP_NUM_57711)
+#define CHIP_IS_57711E(sc) (CHIP_NUM(sc) == CHIP_NUM_57711E)
+#define CHIP_IS_E1H(sc) ((CHIP_IS_57711(sc)) || \
+ (CHIP_IS_57711E(sc)))
+#define CHIP_IS_E1x(sc) CHIP_IS_E1H(sc)
+
+#define CHIP_IS_57712(sc) (CHIP_NUM(sc) == CHIP_NUM_57712)
+#define CHIP_IS_57712_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57712_MF)
+#define CHIP_IS_57712_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57712_VF)
+#define CHIP_IS_E2(sc) (CHIP_IS_57712(sc) || \
+ CHIP_IS_57712_MF(sc))
+
+#define CHIP_IS_57800(sc) (CHIP_NUM(sc) == CHIP_NUM_57800)
+#define CHIP_IS_57800_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57800_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57800_VF)
+#define CHIP_IS_57810(sc) (CHIP_NUM(sc) == CHIP_NUM_57810)
+#define CHIP_IS_57810_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57810_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57810_VF)
+#define CHIP_IS_57811(sc) (CHIP_NUM(sc) == CHIP_NUM_57811)
+#define CHIP_IS_57811_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57811_MF)
+#define CHIP_IS_57811_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57811_VF)
+#define CHIP_IS_57840(sc) ((CHIP_NUM(sc) == CHIP_NUM_57840_OBS) || \
+ (CHIP_NUM(sc) == CHIP_NUM_57840_4_10) || \
+ (CHIP_NUM(sc) == CHIP_NUM_57840_2_20))
+#define CHIP_IS_57840_MF(sc) ((CHIP_NUM(sc) == CHIP_NUM_57840_OBS_MF) || \
+ (CHIP_NUM(sc) == CHIP_NUM_57840_MF))
+#define CHIP_IS_57840_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57840_VF)
+
+#define CHIP_IS_E3(sc) (CHIP_IS_57800(sc) || \
+ CHIP_IS_57800_MF(sc) || \
+ CHIP_IS_57800_VF(sc) || \
+ CHIP_IS_57810(sc) || \
+ CHIP_IS_57810_MF(sc) || \
+ CHIP_IS_57810_VF(sc) || \
+ CHIP_IS_57811(sc) || \
+ CHIP_IS_57811_MF(sc) || \
+ CHIP_IS_57811_VF(sc) || \
+ CHIP_IS_57840(sc) || \
+ CHIP_IS_57840_MF(sc) || \
+ CHIP_IS_57840_VF(sc))
+#define CHIP_IS_E3A0(sc) (CHIP_IS_E3(sc) && \
+ (CHIP_REV(sc) == CHIP_REV_Ax))
+#define CHIP_IS_E3B0(sc) (CHIP_IS_E3(sc) && \
+ (CHIP_REV(sc) == CHIP_REV_Bx))
+
+#define USES_WARPCORE(sc) (CHIP_IS_E3(sc))
+#define CHIP_IS_E2E3(sc) (CHIP_IS_E2(sc) || \
+ CHIP_IS_E3(sc))
+
+#define CHIP_IS_MF_CAP(sc) (CHIP_IS_57711E(sc) || \
+ CHIP_IS_57712_MF(sc) || \
+ CHIP_IS_E3(sc))
+
+#define IS_VF(sc) ((sc)->flags & BNX2X_IS_VF_FLAG)
+#define IS_PF(sc) (!IS_VF(sc))
+
+/*
+ * This define is used in two main places:
+ * 1. In the early stages of nic_load, to know if to configure Parser/Searcher
+ * to nic-only mode or to offload mode. Offload mode is configured if either
+ * the chip is E1x (where NIC_MODE register is not applicable), or if cnic
+ * already registered for this port (which means that the user wants storage
+ * services).
+ * 2. During cnic-related load, to know if offload mode is already configured
+ * in the HW or needs to be configrued. Since the transition from nic-mode to
+ * offload-mode in HW causes traffic coruption, nic-mode is configured only
+ * in ports on which storage services where never requested.
+ */
+#define CONFIGURE_NIC_MODE(sc) (!CHIP_IS_E1x(sc) && !CNIC_ENABLED(sc))
+
+ uint8_t chip_port_mode;
+#define CHIP_4_PORT_MODE 0x0
+#define CHIP_2_PORT_MODE 0x1
+#define CHIP_PORT_MODE_NONE 0x2
+#define CHIP_PORT_MODE(sc) ((sc)->devinfo.chip_port_mode)
+#define CHIP_IS_MODE_4_PORT(sc) (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE)
+
+ uint8_t int_block;
+#define INT_BLOCK_HC 0
+#define INT_BLOCK_IGU 1
+#define INT_BLOCK_MODE_NORMAL 0
+#define INT_BLOCK_MODE_BW_COMP 2
+#define CHIP_INT_MODE_IS_NBC(sc) \
+ (!CHIP_IS_E1x(sc) && \
+ !((sc)->devinfo.int_block & INT_BLOCK_MODE_BW_COMP))
+#define CHIP_INT_MODE_IS_BC(sc) (!CHIP_INT_MODE_IS_NBC(sc))
+
+ uint32_t shmem_base;
+ uint32_t shmem2_base;
+ uint32_t bc_ver;
+ char bc_ver_str[32];
+ uint32_t mf_cfg_base; /* bootcode shmem address in BAR memory */
+ struct bnx2x_mf_info mf_info;
+
+ uint32_t flash_size;
+#define NVRAM_1MB_SIZE 0x20000
+#define NVRAM_TIMEOUT_COUNT 30000
+#define NVRAM_PAGE_SIZE 256
+
+ /* PCIe capability information */
+ uint32_t pcie_cap_flags;
+#define BNX2X_PM_CAPABLE_FLAG 0x00000001
+#define BNX2X_PCIE_CAPABLE_FLAG 0x00000002
+#define BNX2X_MSI_CAPABLE_FLAG 0x00000004
+#define BNX2X_MSIX_CAPABLE_FLAG 0x00000008
+ uint16_t pcie_pm_cap_reg;
+ uint16_t pcie_link_width;
+ uint16_t pcie_link_speed;
+ uint16_t pcie_msi_cap_reg;
+ uint16_t pcie_msix_cap_reg;
+
+ /* device configuration read from bootcode shared memory */
+ uint32_t hw_config;
+ uint32_t hw_config2;
+}; /* struct bnx2x_devinfo */
+
+struct bnx2x_sp_objs {
+ struct ecore_vlan_mac_obj mac_obj; /* MACs object */
+ struct ecore_queue_sp_obj q_obj; /* Queue State object */
+}; /* struct bnx2x_sp_objs */
+
+/*
+ * Data that will be used to create a link report message. We will keep the
+ * data used for the last link report in order to prevent reporting the same
+ * link parameters twice.
+ */
+struct bnx2x_link_report_data {
+ uint16_t line_speed; /* Effective line speed */
+ unsigned long link_report_flags; /* BNX2X_LINK_REPORT_XXX flags */
+};
+
+enum {
+ BNX2X_LINK_REPORT_FULL_DUPLEX,
+ BNX2X_LINK_REPORT_LINK_DOWN,
+ BNX2X_LINK_REPORT_RX_FC_ON,
+ BNX2X_LINK_REPORT_TX_FC_ON
+};
+
+#define BNX2X_RX_CHAIN_PAGE_SZ BNX2X_PAGE_SIZE
+
+struct bnx2x_pci_cap {
+ struct bnx2x_pci_cap *next;
+ uint16_t id;
+ uint16_t type;
+ uint16_t addr;
+};
+
+struct bnx2x_vfdb;
+
+/* Top level device private data structure. */
+struct bnx2x_softc {
+
+ void **rx_queues;
+ void **tx_queues;
+ uint32_t max_tx_queues;
+ uint32_t max_rx_queues;
+ const struct rte_pci_device *pci_dev;
+ uint32_t pci_val;
+ struct bnx2x_pci_cap *pci_caps;
+#define BNX2X_INTRS_POLL_PERIOD 1
+
+ void *firmware;
+ uint64_t fw_len;
+
+ /* MAC address operations */
+ struct bnx2x_mac_ops mac_ops;
+
+ /* structures for VF mbox/response/bulletin */
+ struct bnx2x_vf_mbx_msg *vf2pf_mbox;
+ struct bnx2x_dma vf2pf_mbox_mapping;
+ struct vf_acquire_resp_tlv acquire_resp;
+ struct bnx2x_vf_bulletin *pf2vf_bulletin;
+ struct bnx2x_dma pf2vf_bulletin_mapping;
+ struct bnx2x_vf_bulletin old_bulletin;
+ rte_spinlock_t vf2pf_lock;
+
+ int media;
+
+ int state; /* device state */
+#define BNX2X_STATE_CLOSED 0x0000
+#define BNX2X_STATE_OPENING_WAITING_LOAD 0x1000
+#define BNX2X_STATE_OPENING_WAITING_PORT 0x2000
+#define BNX2X_STATE_OPEN 0x3000
+#define BNX2X_STATE_CLOSING_WAITING_HALT 0x4000
+#define BNX2X_STATE_CLOSING_WAITING_DELETE 0x5000
+#define BNX2X_STATE_CLOSING_WAITING_UNLOAD 0x6000
+#define BNX2X_STATE_DISABLED 0xD000
+#define BNX2X_STATE_DIAG 0xE000
+#define BNX2X_STATE_ERROR 0xF000
+
+ int flags;
+#define BNX2X_ONE_PORT_FLAG 0x1
+#define BNX2X_NO_FCOE_FLAG 0x2
+#define BNX2X_NO_WOL_FLAG 0x4
+#define BNX2X_NO_MCP_FLAG 0x8
+#define BNX2X_NO_ISCSI_OOO_FLAG 0x10
+#define BNX2X_NO_ISCSI_FLAG 0x20
+#define BNX2X_MF_FUNC_DIS 0x40
+#define BNX2X_TX_SWITCHING 0x80
+#define BNX2X_IS_VF_FLAG 0x100
+
+#define BNX2X_ONE_PORT(sc) (sc->flags & BNX2X_ONE_PORT_FLAG)
+#define BNX2X_NOFCOE(sc) (sc->flags & BNX2X_NO_FCOE_FLAG)
+#define BNX2X_NOMCP(sc) (sc->flags & BNX2X_NO_MCP_FLAG)
+
+#define MAX_BARS 5
+ struct bnx2x_bar bar[MAX_BARS]; /* map BARs 0, 2, 4 */
+
+ uint16_t doorbell_size;
+
+ /* periodic timer callout */
+#define PERIODIC_STOP 0
+#define PERIODIC_GO 1
+ volatile unsigned long periodic_flags;
+
+ struct bnx2x_fastpath fp[MAX_RSS_CHAINS];
+ struct bnx2x_sp_objs sp_objs[MAX_RSS_CHAINS];
+
+ uint8_t unit; /* driver instance number */
+
+ int pcie_bus; /* PCIe bus number */
+ int pcie_device; /* PCIe device/slot number */
+ int pcie_func; /* PCIe function number */
+
+ uint8_t pfunc_rel; /* function relative */
+ uint8_t pfunc_abs; /* function absolute */
+ uint8_t path_id; /* function absolute */
+#define SC_PATH(sc) (sc->path_id)
+#define SC_PORT(sc) (sc->pfunc_rel & 1)
+#define SC_FUNC(sc) (sc->pfunc_rel)
+#define SC_ABS_FUNC(sc) (sc->pfunc_abs)
+#define SC_VN(sc) (sc->pfunc_rel >> 1)
+#define SC_L_ID(sc) (SC_VN(sc) << 2)
+#define PORT_ID(sc) SC_PORT(sc)
+#define PATH_ID(sc) SC_PATH(sc)
+#define VNIC_ID(sc) SC_VN(sc)
+#define FUNC_ID(sc) SC_FUNC(sc)
+#define ABS_FUNC_ID(sc) SC_ABS_FUNC(sc)
+#define SC_FW_MB_IDX_VN(sc, vn) \
+ (SC_PORT(sc) + (vn) * \
+ ((CHIP_IS_E1x(sc) || (CHIP_IS_MODE_4_PORT(sc))) ? 2 : 1))
+#define SC_FW_MB_IDX(sc) SC_FW_MB_IDX_VN(sc, SC_VN(sc))
+
+ int if_capen; /* enabled interface capabilities */
+
+ struct bnx2x_devinfo devinfo;
+ char fw_ver_str[32];
+ char mf_mode_str[32];
+ char pci_link_str[32];
+
+ struct iro *iro_array;
+
+ int dmae_ready;
+#define DMAE_READY(sc) (sc->dmae_ready)
+
+ struct ecore_credit_pool_obj vlans_pool;
+ struct ecore_credit_pool_obj macs_pool;
+ struct ecore_rx_mode_obj rx_mode_obj;
+ struct ecore_mcast_obj mcast_obj;
+ struct ecore_rss_config_obj rss_conf_obj;
+ struct ecore_func_sp_obj func_obj;
+
+ uint16_t fw_seq;
+ uint16_t fw_drv_pulse_wr_seq;
+ uint32_t func_stx;
+
+ struct elink_params link_params;
+ struct elink_vars link_vars;
+ uint32_t link_cnt;
+ struct bnx2x_link_report_data last_reported_link;
+ char mac_addr_str[32];
+
+ uint32_t tx_ring_size;
+ uint32_t rx_ring_size;
+ int wol;
+
+ int is_leader;
+ int recovery_state;
+#define BNX2X_RECOVERY_DONE 1
+#define BNX2X_RECOVERY_INIT 2
+#define BNX2X_RECOVERY_WAIT 3
+#define BNX2X_RECOVERY_FAILED 4
+#define BNX2X_RECOVERY_NIC_LOADING 5
+
+ uint32_t rx_mode;
+#define BNX2X_RX_MODE_NONE 0
+#define BNX2X_RX_MODE_NORMAL 1
+#define BNX2X_RX_MODE_ALLMULTI 2
+#define BNX2X_RX_MODE_ALLMULTI_PROMISC 3
+#define BNX2X_RX_MODE_PROMISC 4
+#define BNX2X_MAX_MULTICAST 64
+
+ struct bnx2x_port port;
+
+ struct cmng_init cmng;
+
+ /* user configs */
+ uint8_t num_queues;
+ int hc_rx_ticks;
+ int hc_tx_ticks;
+ uint32_t rx_budget;
+ int interrupt_mode;
+#define INTR_MODE_INTX 0
+#define INTR_MODE_MSI 1
+#define INTR_MODE_MSIX 2
+#define INTR_MODE_SINGLE_MSIX 3
+ int udp_rss;
+
+ uint8_t igu_dsb_id;
+ uint8_t igu_base_sb;
+ uint8_t igu_sb_cnt;
+ uint32_t igu_base_addr;
+ uint8_t base_fw_ndsb;
+#define DEF_SB_IGU_ID 16
+#define DEF_SB_ID HC_SP_SB_ID
+
+ /* default status block */
+ struct bnx2x_dma def_sb_dma;
+ struct host_sp_status_block *def_sb;
+ uint16_t def_idx;
+ uint16_t def_att_idx;
+ uint32_t attn_state;
+ struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
+
+ /* general SP events - stats query, cfc delete, etc */
+#define HC_SP_INDEX_ETH_DEF_CONS 3
+ /* EQ completions */
+#define HC_SP_INDEX_EQ_CONS 7
+ /* FCoE L2 connection completions */
+#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6
+#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4
+ /* iSCSI L2 */
+#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
+#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
+
+ /* event queue */
+ struct bnx2x_dma eq_dma;
+ union event_ring_elem *eq;
+ uint16_t eq_prod;
+ uint16_t eq_cons;
+ uint16_t *eq_cons_sb;
+#define NUM_EQ_PAGES 1 /* must be a power of 2 */
+#define EQ_DESC_CNT_PAGE (BNX2X_PAGE_SIZE / sizeof(union event_ring_elem))
+#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
+#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
+#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
+#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
+ /* depends on EQ_DESC_CNT_PAGE being a power of 2 */
+#define NEXT_EQ_IDX(x) \
+ ((((x) & EQ_DESC_MAX_PAGE) == (EQ_DESC_MAX_PAGE - 1)) ? \
+ ((x) + 2) : ((x) + 1))
+ /* depends on the above and on NUM_EQ_PAGES being a power of 2 */
+#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
+
+ /* slow path */
+ struct bnx2x_dma sp_dma;
+ struct bnx2x_slowpath *sp;
+ unsigned long sp_state;
+
+ /* slow path queue */
+ struct bnx2x_dma spq_dma;
+ struct eth_spe *spq;
+#define SP_DESC_CNT (BNX2X_PAGE_SIZE / sizeof(struct eth_spe))
+#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
+#define MAX_SPQ_PENDING 8
+
+ uint16_t spq_prod_idx;
+ struct eth_spe *spq_prod_bd;
+ struct eth_spe *spq_last_bd;
+ uint16_t *dsb_sp_prod;
+
+ volatile unsigned long eq_spq_left; /* COMMON_xxx ramrod credit */
+ volatile unsigned long cq_spq_left; /* ETH_xxx ramrod credit */
+
+ /* fw decompression buffer */
+ struct bnx2x_dma gz_buf_dma;
+ void *gz_buf;
+ uint32_t gz_outlen;
+#define GUNZIP_BUF(sc) (sc->gz_buf)
+#define GUNZIP_OUTLEN(sc) (sc->gz_outlen)
+#define GUNZIP_PHYS(sc) (rte_iova_t)(sc->gz_buf_dma.paddr)
+#define FW_BUF_SIZE 0x40000
+
+ struct raw_op *init_ops;
+ uint16_t *init_ops_offsets; /* init block offsets inside init_ops */
+ uint32_t *init_data; /* data blob, 32 bit granularity */
+ uint32_t init_mode_flags;
+#define INIT_MODE_FLAGS(sc) (sc->init_mode_flags)
+ /* PRAM blobs - raw data */
+ const uint8_t *tsem_int_table_data;
+ const uint8_t *tsem_pram_data;
+ const uint8_t *usem_int_table_data;
+ const uint8_t *usem_pram_data;
+ const uint8_t *xsem_int_table_data;
+ const uint8_t *xsem_pram_data;
+ const uint8_t *csem_int_table_data;
+ const uint8_t *csem_pram_data;
+#define INIT_OPS(sc) (sc->init_ops)
+#define INIT_OPS_OFFSETS(sc) (sc->init_ops_offsets)
+#define INIT_DATA(sc) (sc->init_data)
+#define INIT_TSEM_INT_TABLE_DATA(sc) (sc->tsem_int_table_data)
+#define INIT_TSEM_PRAM_DATA(sc) (sc->tsem_pram_data)
+#define INIT_USEM_INT_TABLE_DATA(sc) (sc->usem_int_table_data)
+#define INIT_USEM_PRAM_DATA(sc) (sc->usem_pram_data)
+#define INIT_XSEM_INT_TABLE_DATA(sc) (sc->xsem_int_table_data)
+#define INIT_XSEM_PRAM_DATA(sc) (sc->xsem_pram_data)
+#define INIT_CSEM_INT_TABLE_DATA(sc) (sc->csem_int_table_data)
+#define INIT_CSEM_PRAM_DATA(sc) (sc->csem_pram_data)
+
+#define PHY_FW_VER_LEN 20
+ char fw_ver[32];
+
+ /* ILT
+ * For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ * context size we need 8 ILT entries.
+ */
+#define ILT_MAX_L2_LINES 8
+ struct hw_context context[ILT_MAX_L2_LINES];
+ struct ecore_ilt *ilt;
+#define ILT_MAX_LINES 256
+
+ /* max supported number of RSS queues: IGU SBs minus one for CNIC */
+#define BNX2X_MAX_RSS_COUNT(sc) ((sc)->igu_sb_cnt - CNIC_SUPPORT(sc))
+ /* max CID count: Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI */
+#define BNX2X_L2_MAX_CID(sc) \
+ (BNX2X_MAX_RSS_COUNT(sc) * ECORE_MULTI_TX_COS + 2 * CNIC_SUPPORT(sc))
+#define BNX2X_L2_CID_COUNT(sc) \
+ (BNX2X_NUM_ETH_QUEUES(sc) * ECORE_MULTI_TX_COS + 2 * CNIC_SUPPORT(sc))
+#define L2_ILT_LINES(sc) \
+ (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(sc), ILT_PAGE_CIDS))
+
+ int qm_cid_count;
+
+ uint8_t dropless_fc;
+
+ /* total number of FW statistics requests */
+ uint8_t fw_stats_num;
+ /*
+ * This is a memory buffer that will contain both statistics ramrod
+ * request and data.
+ */
+ struct bnx2x_dma fw_stats_dma;
+ /*
+ * FW statistics request shortcut (points at the beginning of fw_stats
+ * buffer).
+ */
+ int fw_stats_req_size;
+ struct bnx2x_fw_stats_req *fw_stats_req;
+ rte_iova_t fw_stats_req_mapping;
+ /*
+ * FW statistics data shortcut (points at the beginning of fw_stats
+ * buffer + fw_stats_req_size).
+ */
+ int fw_stats_data_size;
+ struct bnx2x_fw_stats_data *fw_stats_data;
+ rte_iova_t fw_stats_data_mapping;
+
+ /* tracking a pending STAT_QUERY ramrod */
+ uint16_t stats_pending;
+ /* number of completed statistics ramrods */
+ uint16_t stats_comp;
+ uint16_t stats_counter;
+ uint8_t stats_init;
+ int stats_state;
+
+ struct bnx2x_eth_stats eth_stats;
+ struct host_func_stats func_stats;
+ struct bnx2x_eth_stats_old eth_stats_old;
+ struct bnx2x_net_stats_old net_stats_old;
+ struct bnx2x_fw_port_stats_old fw_stats_old;
+
+ struct dmae_command stats_dmae; /* used by dmae command loader */
+ int executer_idx;
+
+ int mtu;
+
+ /* DCB support on/off */
+ int dcb_state;
+#define BNX2X_DCB_STATE_OFF 0
+#define BNX2X_DCB_STATE_ON 1
+ /* DCBX engine mode */
+ int dcbx_enabled;
+#define BNX2X_DCBX_ENABLED_OFF 0
+#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1
+#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2
+#define BNX2X_DCBX_ENABLED_INVALID -1
+
+ uint8_t cnic_support;
+ uint8_t cnic_enabled;
+ uint8_t cnic_loaded;
+#define CNIC_SUPPORT(sc) 0 /* ((sc)->cnic_support) */
+#define CNIC_ENABLED(sc) 0 /* ((sc)->cnic_enabled) */
+#define CNIC_LOADED(sc) 0 /* ((sc)->cnic_loaded) */
+
+ /* multiple tx classes of service */
+ uint8_t max_cos;
+#define BNX2X_MAX_PRIORITY 8
+ /* priority to cos mapping */
+ uint8_t prio_to_cos[BNX2X_MAX_PRIORITY];
+
+ int panic;
+}; /* struct bnx2x_softc */
+
+/* IOCTL sub-commands for edebug and firmware upgrade */
+#define BNX2X_IOC_RD_NVRAM 1
+#define BNX2X_IOC_WR_NVRAM 2
+#define BNX2X_IOC_STATS_SHOW_NUM 3
+#define BNX2X_IOC_STATS_SHOW_STR 4
+#define BNX2X_IOC_STATS_SHOW_CNT 5
+
+struct bnx2x_nvram_data {
+ uint32_t op; /* ioctl sub-command */
+ uint32_t offset;
+ uint32_t len;
+ uint32_t value[1]; /* variable */
+};
+
+union bnx2x_stats_show_data {
+ uint32_t op; /* ioctl sub-command */
+
+ struct {
+ uint32_t num; /* return number of stats */
+ uint32_t len; /* length of each string item */
+ } desc;
+
+ /* variable length... */
+ char str[1]; /* holds names of desc.num stats, each desc.len in length */
+
+ /* variable length... */
+ uint64_t stats[1]; /* holds all stats */
+};
+
+/* function init flags */
+#define FUNC_FLG_RSS 0x0001
+#define FUNC_FLG_STATS 0x0002
+/* FUNC_FLG_UNMATCHED 0x0004 */
+#define FUNC_FLG_SPQ 0x0010
+#define FUNC_FLG_LEADING 0x0020 /* PF only */
+
+struct bnx2x_func_init_params {
+ rte_iova_t fw_stat_map; /* (dma) valid if FUNC_FLG_STATS */
+ rte_iova_t spq_map; /* (dma) valid if FUNC_FLG_SPQ */
+ uint16_t func_flgs;
+ uint16_t func_id; /* abs function id */
+ uint16_t pf_id;
+ uint16_t spq_prod; /* valid if FUNC_FLG_SPQ */
+};
+
+/* memory resources reside at BARs 0, 2, 4 */
+/* Run `pciconf -lb` to see mappings */
+#define BAR0 0
+#define BAR1 2
+#define BAR2 4
+
+static inline void
+bnx2x_reg_write8(struct bnx2x_softc *sc, size_t offset, uint8_t val)
+{
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x",
+ (unsigned long)offset, val);
+ rte_write8(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
+}
+
+static inline void
+bnx2x_reg_write16(struct bnx2x_softc *sc, size_t offset, uint16_t val)
+{
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+ if ((offset % 2) != 0)
+ PMD_DRV_LOG(NOTICE, "Unaligned 16-bit write to 0x%08lx",
+ (unsigned long)offset);
+#endif
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%04x",
+ (unsigned long)offset, val);
+ rte_write16(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
+
+}
+
+static inline void
+bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val)
+{
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+ if ((offset % 4) != 0)
+ PMD_DRV_LOG(NOTICE, "Unaligned 32-bit write to 0x%08lx",
+ (unsigned long)offset);
+#endif
+
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
+ (unsigned long)offset, val);
+ rte_write32(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
+}
+
+static inline uint8_t
+bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset)
+{
+ uint8_t val;
+
+ val = rte_read8((uint8_t *)sc->bar[BAR0].base_addr + offset);
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x",
+ (unsigned long)offset, val);
+
+ return val;
+}
+
+static inline uint16_t
+bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset)
+{
+ uint16_t val;
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+ if ((offset % 2) != 0)
+ PMD_DRV_LOG(NOTICE, "Unaligned 16-bit read from 0x%08lx",
+ (unsigned long)offset);
+#endif
+
+ val = rte_read16(((uint8_t *)sc->bar[BAR0].base_addr + offset));
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
+ (unsigned long)offset, val);
+
+ return val;
+}
+
+static inline uint32_t
+bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset)
+{
+ uint32_t val;
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+ if ((offset % 4) != 0)
+ PMD_DRV_LOG(NOTICE, "Unaligned 32-bit read from 0x%08lx",
+ (unsigned long)offset);
+#endif
+
+ val = rte_read32(((uint8_t *)sc->bar[BAR0].base_addr + offset));
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
+ (unsigned long)offset, val);
+
+ return val;
+}
+
+#define REG_ADDR(sc, offset) (((uint64_t)sc->bar[BAR0].base_addr) + (offset))
+
+#define REG_RD8(sc, offset) bnx2x_reg_read8(sc, (offset))
+#define REG_RD16(sc, offset) bnx2x_reg_read16(sc, (offset))
+#define REG_RD32(sc, offset) bnx2x_reg_read32(sc, (offset))
+
+#define REG_WR8(sc, offset, val) bnx2x_reg_write8(sc, (offset), val)
+#define REG_WR16(sc, offset, val) bnx2x_reg_write16(sc, (offset), val)
+#define REG_WR32(sc, offset, val) bnx2x_reg_write32(sc, (offset), val)
+
+#define REG_RD(sc, offset) REG_RD32(sc, offset)
+#define REG_WR(sc, offset, val) REG_WR32(sc, offset, val)
+
+#define BNX2X_SP(sc, var) (&(sc)->sp->var)
+#define BNX2X_SP_MAPPING(sc, var) \
+ (sc->sp_dma.paddr + offsetof(struct bnx2x_slowpath, var))
+
+#define BNX2X_FP(sc, nr, var) ((sc)->fp[(nr)].var)
+#define BNX2X_SP_OBJ(sc, fp) ((sc)->sp_objs[(fp)->index])
+
+#define bnx2x_fp(sc, nr, var) ((sc)->fp[nr].var)
+
+#define REG_RD_DMAE(sc, offset, valp, len32) \
+ do { \
+ (void)bnx2x_read_dmae(sc, offset, len32); \
+ rte_memcpy(valp, BNX2X_SP(sc, wb_data[0]), (len32) * 4); \
+ } while (0)
+
+#define REG_WR_DMAE(sc, offset, valp, len32) \
+ do { \
+ rte_memcpy(BNX2X_SP(sc, wb_data[0]), valp, (len32) * 4); \
+ (void)bnx2x_write_dmae(sc, BNX2X_SP_MAPPING(sc, wb_data), offset, len32); \
+ } while (0)
+
+#define REG_WR_DMAE_LEN(sc, offset, valp, len32) \
+ REG_WR_DMAE(sc, offset, valp, len32)
+
+#define REG_RD_DMAE_LEN(sc, offset, valp, len32) \
+ REG_RD_DMAE(sc, offset, valp, len32)
+
+#define VIRT_WR_DMAE_LEN(sc, data, addr, len32, le32_swap) \
+ do { \
+ /* if (le32_swap) { */ \
+ /* PMD_PWARN_LOG(sc, "VIRT_WR_DMAE_LEN with le32_swap=1"); */ \
+ /* } */ \
+ rte_memcpy(GUNZIP_BUF(sc), data, len32 * 4); \
+ ecore_write_big_buf_wb(sc, addr, len32); \
+ } while (0)
+
+#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
+#define BNX2X_DB_SHIFT 7 /* 128 bytes */
+#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
+#error "Minimum DB doorbell stride is 8"
+#endif
+#define DPM_TRIGGER_TYPE 0x40
+
+/* Doorbell macro */
+#define BNX2X_DB_WRITE(db_bar, val) rte_write32_relaxed((val), (db_bar))
+
+#define BNX2X_DB_READ(db_bar) rte_read32_relaxed(db_bar)
+
+#define DOORBELL_ADDR(sc, offset) \
+ (volatile uint32_t *)(((char *)(sc)->bar[BAR1].base_addr + (offset)))
+
+#define DOORBELL(sc, cid, val) \
+ if (IS_PF(sc)) \
+ BNX2X_DB_WRITE((DOORBELL_ADDR(sc, sc->doorbell_size * (cid) + DPM_TRIGGER_TYPE)), (val)); \
+ else \
+ BNX2X_DB_WRITE((DOORBELL_ADDR(sc, sc->doorbell_size * (cid))), (val)) \
+
+#define SHMEM_ADDR(sc, field) \
+ (sc->devinfo.shmem_base + offsetof(struct shmem_region, field))
+#define SHMEM_RD(sc, field) REG_RD(sc, SHMEM_ADDR(sc, field))
+#define SHMEM_RD16(sc, field) REG_RD16(sc, SHMEM_ADDR(sc, field))
+#define SHMEM_WR(sc, field, val) REG_WR(sc, SHMEM_ADDR(sc, field), val)
+
+#define SHMEM2_ADDR(sc, field) \
+ (sc->devinfo.shmem2_base + offsetof(struct shmem2_region, field))
+#define SHMEM2_HAS(sc, field) \
+ (sc->devinfo.shmem2_base && (REG_RD(sc, SHMEM2_ADDR(sc, size)) > \
+ offsetof(struct shmem2_region, field)))
+#define SHMEM2_RD(sc, field) REG_RD(sc, SHMEM2_ADDR(sc, field))
+#define SHMEM2_WR(sc, field, val) REG_WR(sc, SHMEM2_ADDR(sc, field), val)
+
+#define MFCFG_ADDR(sc, field) \
+ (sc->devinfo.mf_cfg_base + offsetof(struct mf_cfg, field))
+#define MFCFG_RD(sc, field) REG_RD(sc, MFCFG_ADDR(sc, field))
+#define MFCFG_RD16(sc, field) REG_RD16(sc, MFCFG_ADDR(sc, field))
+#define MFCFG_WR(sc, field, val) REG_WR(sc, MFCFG_ADDR(sc, field), val)
+
+/* DMAE command defines */
+
+#define DMAE_TIMEOUT -1
+#define DMAE_PCI_ERROR -2 /* E2 and onward */
+#define DMAE_NOT_RDY -3
+#define DMAE_PCI_ERR_FLAG 0x80000000
+
+#define DMAE_SRC_PCI 0
+#define DMAE_SRC_GRC 1
+
+#define DMAE_DST_NONE 0
+#define DMAE_DST_PCI 1
+#define DMAE_DST_GRC 2
+
+#define DMAE_COMP_PCI 0
+#define DMAE_COMP_GRC 1
+
+#define DMAE_COMP_REGULAR 0
+#define DMAE_COM_SET_ERR 1
+
+#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << DMAE_COMMAND_SRC_SHIFT)
+#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << DMAE_COMMAND_SRC_SHIFT)
+#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << DMAE_COMMAND_DST_SHIFT)
+#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << DMAE_COMMAND_DST_SHIFT)
+
+#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << DMAE_COMMAND_C_DST_SHIFT)
+#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << DMAE_COMMAND_C_DST_SHIFT)
+
+#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
+
+#define DMAE_CMD_PORT_0 0
+#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
+
+#define DMAE_SRC_PF 0
+#define DMAE_SRC_VF 1
+
+#define DMAE_DST_PF 0
+#define DMAE_DST_VF 1
+
+#define DMAE_C_SRC 0
+#define DMAE_C_DST 1
+
+#define DMAE_LEN32_RD_MAX 0x80
+#define DMAE_LEN32_WR_MAX(sc) 0x2000
+
+#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and beyond, upper bit indicates error */
+
+#define MAX_DMAE_C_PER_PORT 8
+#define INIT_DMAE_C(sc) ((SC_PORT(sc) * MAX_DMAE_C_PER_PORT) + SC_VN(sc))
+#define PMF_DMAE_C(sc) ((SC_PORT(sc) * MAX_DMAE_C_PER_PORT) + E1HVN_MAX)
+
+static const uint32_t dmae_reg_go_c[] = {
+ DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+ DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+ DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+ DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+};
+
+#define ATTN_NIG_FOR_FUNC (1L << 8)
+#define ATTN_SW_TIMER_4_FUNC (1L << 9)
+#define GPIO_2_FUNC (1L << 10)
+#define GPIO_3_FUNC (1L << 11)
+#define GPIO_4_FUNC (1L << 12)
+#define ATTN_GENERAL_ATTN_1 (1L << 13)
+#define ATTN_GENERAL_ATTN_2 (1L << 14)
+#define ATTN_GENERAL_ATTN_3 (1L << 15)
+#define ATTN_GENERAL_ATTN_4 (1L << 13)
+#define ATTN_GENERAL_ATTN_5 (1L << 14)
+#define ATTN_GENERAL_ATTN_6 (1L << 15)
+#define ATTN_HARD_WIRED_MASK 0xff00
+#define ATTENTION_ID 4
+
+#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
+
+#define MAX_IGU_ATTN_ACK_TO 100
+
+#define STORM_ASSERT_ARRAY_SIZE 50
+
+#define BNX2X_PMF_LINK_ASSERT(sc) \
+ GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + SC_FUNC(sc))
+
+#define BNX2X_MC_ASSERT_BITS \
+ (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
+
+#define BNX2X_MCP_ASSERT \
+ GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
+
+#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
+#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
+
+#define MULTI_MASK 0x7f
+
+#define PFS_PER_PORT(sc) \
+ ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4)
+#define SC_MAX_VN_NUM(sc) PFS_PER_PORT(sc)
+
+#define FIRST_ABS_FUNC_IN_PORT(sc) \
+ ((CHIP_PORT_MODE(sc) == CHIP_PORT_MODE_NONE) ? \
+ PORT_ID(sc) : (PATH_ID(sc) + (2 * PORT_ID(sc))))
+
+#define FOREACH_ABS_FUNC_IN_PORT(sc, i) \
+ for ((i) = FIRST_ABS_FUNC_IN_PORT(sc); \
+ (i) < MAX_FUNC_NUM; \
+ (i) += (MAX_FUNC_NUM / PFS_PER_PORT(sc)))
+
+#define BNX2X_SWCID_SHIFT 17
+#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1)
+
+#define SW_CID(x) (le32toh(x) & BNX2X_SWCID_MASK)
+#define CQE_CMD(x) (le32toh(x) >> COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
+
+#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
+#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
+#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
+#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
+#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
+
+/* must be used on a CID before placing it on a HW ring */
+#define HW_CID(sc, x) \
+ ((SC_PORT(sc) << 23) | (SC_VN(sc) << BNX2X_SWCID_SHIFT) | (x))
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define SPEED_10000 10000
+
+#define PCI_PM_D0 1
+#define PCI_PM_D3hot 2
+
+int bnx2x_test_bit(int nr, volatile unsigned long * addr);
+void bnx2x_set_bit(unsigned int nr, volatile unsigned long * addr);
+void bnx2x_clear_bit(int nr, volatile unsigned long * addr);
+int bnx2x_test_and_clear_bit(int nr, volatile unsigned long * addr);
+int bnx2x_cmpxchg(volatile int *addr, int old, int new);
+
+int bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size,
+ struct bnx2x_dma *dma, const char *msg, uint32_t align);
+
+uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type);
+uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode);
+uint32_t bnx2x_dmae_opcode(struct bnx2x_softc *sc, uint8_t src_type,
+ uint8_t dst_type, uint8_t with_comp,
+ uint8_t comp_type);
+void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx);
+void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32);
+void bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr,
+ uint32_t dst_addr, uint32_t len32);
+void bnx2x_set_ctx_validation(struct bnx2x_softc *sc, struct eth_context *cxt,
+ uint32_t cid);
+void bnx2x_update_coalesce_sb_index(struct bnx2x_softc *sc, uint8_t fw_sb_id,
+ uint8_t sb_index, uint8_t disable,
+ uint16_t usec);
+
+int bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid,
+ uint32_t data_hi, uint32_t data_lo, int cmd_type);
+
+void ecore_init_e1h_firmware(struct bnx2x_softc *sc);
+void ecore_init_e2_firmware(struct bnx2x_softc *sc);
+
+void ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr,
+ size_t size, uint32_t *data);
+
+#define CATC_TRIGGER(sc, data) REG_WR((sc), 0x2000, (data));
+#define CATC_TRIGGER_START(sc) CATC_TRIGGER((sc), 0xcafecafe)
+
+#define BNX2X_MAC_FMT "%pM"
+#define BNX2X_MAC_PRN_LIST(mac) (mac)
+
+/***********/
+/* INLINES */
+/***********/
+
+static inline uint32_t
+reg_poll(struct bnx2x_softc *sc, uint32_t reg, uint32_t expected, int ms, int wait)
+{
+ uint32_t val;
+ do {
+ val = REG_RD(sc, reg);
+ if (val == expected) {
+ break;
+ }
+ ms -= wait;
+ DELAY(wait * 1000);
+ } while (ms > 0);
+
+ return val;
+}
+
+static inline void
+bnx2x_update_fp_sb_idx(struct bnx2x_fastpath *fp)
+{
+ mb(); /* status block is written to by the chip */
+ fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
+}
+
+static inline void
+bnx2x_igu_ack_sb_gen(struct bnx2x_softc *sc, uint8_t segment,
+ uint16_t index, uint8_t op, uint8_t update, uint32_t igu_addr)
+{
+ struct igu_regular cmd_data = {0};
+
+ cmd_data.sb_id_and_flags =
+ ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+ (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+ (update << IGU_REGULAR_BUPDATE_SHIFT) |
+ (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+ REG_WR(sc, igu_addr, cmd_data.sb_id_and_flags);
+
+ /* Make sure that ACK is written */
+ mb();
+}
+
+static inline void
+bnx2x_hc_ack_sb(struct bnx2x_softc *sc, uint8_t sb_id, uint8_t storm,
+ uint16_t index, uint8_t op, uint8_t update)
+{
+ uint32_t hc_addr = (HC_REG_COMMAND_REG + SC_PORT(sc) * 32 +
+ COMMAND_REG_INT_ACK);
+ union igu_ack_register igu_ack;
+
+ igu_ack.sb.status_block_index = index;
+ igu_ack.sb.sb_id_and_flags =
+ ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+ (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+ (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+ (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+
+ REG_WR(sc, hc_addr, igu_ack.raw_data);
+
+ /* Make sure that ACK is written */
+ mb();
+}
+
+static inline uint32_t
+bnx2x_hc_ack_int(struct bnx2x_softc *sc)
+{
+ uint32_t hc_addr = (HC_REG_COMMAND_REG + SC_PORT(sc) * 32 +
+ COMMAND_REG_SIMD_MASK);
+ uint32_t result = REG_RD(sc, hc_addr);
+
+ mb();
+ return result;
+}
+
+static inline uint32_t
+bnx2x_igu_ack_int(struct bnx2x_softc *sc)
+{
+ uint32_t igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER * 8);
+ uint32_t result = REG_RD(sc, igu_addr);
+
+ /* PMD_PDEBUG_LOG(sc, DBG_INTR, "read 0x%08x from IGU addr 0x%x",
+ result, igu_addr); */
+
+ mb();
+ return result;
+}
+
+static inline uint32_t
+bnx2x_ack_int(struct bnx2x_softc *sc)
+{
+ mb();
+ if (sc->devinfo.int_block == INT_BLOCK_HC) {
+ return bnx2x_hc_ack_int(sc);
+ } else {
+ return bnx2x_igu_ack_int(sc);
+ }
+}
+
+static inline int
+func_by_vn(struct bnx2x_softc *sc, int vn)
+{
+ return 2 * vn + SC_PORT(sc);
+}
+
+/*
+ * send notification to other functions.
+ */
+static inline void
+bnx2x_link_sync_notify(struct bnx2x_softc *sc)
+{
+ int func, vn;
+
+ /* Set the attention towards other drivers on the same port */
+ for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
+ if (vn == SC_VN(sc))
+ continue;
+
+ func = func_by_vn(sc, vn);
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_0 +
+ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func) * 4, 1);
+ }
+}
+
+/*
+ * Statistics ID are global per chip/path, while Client IDs for E1x
+ * are per port.
+ */
+static inline uint8_t
+bnx2x_stats_id(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x_softc *sc = fp->sc;
+
+ if (!CHIP_IS_E1x(sc)) {
+ return fp->cl_id;
+ }
+
+ return fp->cl_id + SC_PORT(sc) * FP_SB_MAX_E1x;
+}
+
+int bnx2x_init(struct bnx2x_softc *sc);
+void bnx2x_load_firmware(struct bnx2x_softc *sc);
+int bnx2x_attach(struct bnx2x_softc *sc);
+int bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link);
+int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc);
+int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc);
+void bnx2x_free_ilt_mem(struct bnx2x_softc *sc);
+void bnx2x_dump_tx_chain(struct bnx2x_fastpath * fp, int bd_prod, int count);
+int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0);
+uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp);
+void bnx2x_print_adapter_info(struct bnx2x_softc *sc);
+int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp);
+void bnx2x_link_status_update(struct bnx2x_softc *sc);
+int bnx2x_complete_sp(struct bnx2x_softc *sc);
+int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc);
+void bnx2x_periodic_callout(struct bnx2x_softc *sc);
+void bnx2x_periodic_stop(void *param);
+
+int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count);
+void bnx2x_vf_close(struct bnx2x_softc *sc);
+int bnx2x_vf_init(struct bnx2x_softc *sc);
+void bnx2x_vf_unload(struct bnx2x_softc *sc);
+int bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ int leading);
+void bnx2x_free_hsi_mem(struct bnx2x_softc *sc);
+int bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc);
+int bnx2x_check_bull(struct bnx2x_softc *sc);
+
+//#define BNX2X_PULSE
+
+#define BNX2X_PCI_CAP 1
+#define BNX2X_PCI_ECAP 2
+
+static inline struct bnx2x_pci_cap*
+pci_find_cap(struct bnx2x_softc *sc, uint8_t id, uint8_t type)
+{
+ struct bnx2x_pci_cap *cap = sc->pci_caps;
+
+ while (cap) {
+ if (cap->id == id && cap->type == type)
+ return cap;
+ cap = cap->next;
+ }
+
+ return NULL;
+}
+
+static inline void
+bnx2x_set_rx_mode(struct bnx2x_softc *sc)
+{
+ if (sc->state == BNX2X_STATE_OPEN) {
+ if (IS_PF(sc)) {
+ bnx2x_set_storm_rx_mode(sc);
+ } else {
+ sc->rx_mode = BNX2X_RX_MODE_PROMISC;
+ bnx2x_vf_set_rx_mode(sc);
+ }
+ } else {
+ PMD_DRV_LOG(NOTICE, "Card is not ready to change mode");
+ }
+}
+
+static inline int pci_read(struct bnx2x_softc *sc, size_t addr,
+ void *val, uint8_t size)
+{
+ if (rte_pci_read_config(sc->pci_dev, val, size, addr) <= 0) {
+ PMD_DRV_LOG(ERR, "Can't read from PCI config space");
+ return ENXIO;
+ }
+
+ return 0;
+}
+
+static inline int pci_write_word(struct bnx2x_softc *sc, size_t addr, off_t val)
+{
+ uint16_t val16 = val;
+
+ if (rte_pci_write_config(sc->pci_dev, &val16,
+ sizeof(val16), addr) <= 0) {
+ PMD_DRV_LOG(ERR, "Can't write to PCI config space");
+ return ENXIO;
+ }
+
+ return 0;
+}
+
+static inline int pci_write_long(struct bnx2x_softc *sc, size_t addr, off_t val)
+{
+ uint32_t val32 = val;
+ if (rte_pci_write_config(sc->pci_dev, &val32,
+ sizeof(val32), addr) <= 0) {
+ PMD_DRV_LOG(ERR, "Can't write to PCI config space");
+ return ENXIO;
+ }
+
+ return 0;
+}
+
+#endif /* __BNX2X_H__ */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c
new file mode 100644
index 00000000..575271a8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -0,0 +1,746 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bnx2x.h"
+#include "bnx2x_rxtx.h"
+
+#include <rte_dev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_alarm.h>
+
+int bnx2x_logtype_init;
+int bnx2x_logtype_driver;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+#define BROADCOM_PCI_VENDOR_ID 0x14E4
+static const struct rte_pci_id pci_id_bnx2x_map[] = {
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) },
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) },
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) },
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) },
+#ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) },
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) },
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
+#endif
+ { .vendor_id = 0, }
+};
+
+static const struct rte_pci_id pci_id_bnx2xvf_map[] = {
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) },
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
+ { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
+ { .vendor_id = 0, }
+};
+
+struct rte_bnx2x_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint32_t offset_hi;
+ uint32_t offset_lo;
+};
+
+static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
+ {"rx_buffer_drops",
+ offsetof(struct bnx2x_eth_stats, brb_drop_hi),
+ offsetof(struct bnx2x_eth_stats, brb_drop_lo)},
+ {"rx_buffer_truncates",
+ offsetof(struct bnx2x_eth_stats, brb_truncate_hi),
+ offsetof(struct bnx2x_eth_stats, brb_truncate_lo)},
+ {"rx_buffer_truncate_discard",
+ offsetof(struct bnx2x_eth_stats, brb_truncate_discard),
+ offsetof(struct bnx2x_eth_stats, brb_truncate_discard)},
+ {"mac_filter_discard",
+ offsetof(struct bnx2x_eth_stats, mac_filter_discard),
+ offsetof(struct bnx2x_eth_stats, mac_filter_discard)},
+ {"no_match_vlan_tag_discard",
+ offsetof(struct bnx2x_eth_stats, mf_tag_discard),
+ offsetof(struct bnx2x_eth_stats, mf_tag_discard)},
+ {"tx_pause",
+ offsetof(struct bnx2x_eth_stats, pause_frames_sent_hi),
+ offsetof(struct bnx2x_eth_stats, pause_frames_sent_lo)},
+ {"rx_pause",
+ offsetof(struct bnx2x_eth_stats, pause_frames_received_hi),
+ offsetof(struct bnx2x_eth_stats, pause_frames_received_lo)},
+ {"tx_priority_flow_control",
+ offsetof(struct bnx2x_eth_stats, pfc_frames_sent_hi),
+ offsetof(struct bnx2x_eth_stats, pfc_frames_sent_lo)},
+ {"rx_priority_flow_control",
+ offsetof(struct bnx2x_eth_stats, pfc_frames_received_hi),
+ offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
+};
+
+static int
+bnx2x_link_update(struct rte_eth_dev *dev)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ struct rte_eth_link link;
+
+ PMD_INIT_FUNC_TRACE();
+
+ bnx2x_link_status_update(sc);
+ memset(&link, 0, sizeof(link));
+ mb();
+ link.link_speed = sc->link_vars.line_speed;
+ switch (sc->link_vars.duplex) {
+ case DUPLEX_FULL:
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case DUPLEX_HALF:
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ }
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ link.link_status = sc->link_vars.link_up;
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static void
+bnx2x_interrupt_action(struct rte_eth_dev *dev)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ uint32_t link_status;
+
+ bnx2x_intr_legacy(sc, 0);
+
+ if (sc->periodic_flags & PERIODIC_GO)
+ bnx2x_periodic_callout(sc);
+ link_status = REG_RD(sc, sc->link_params.shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[sc->link_params.port].link_status));
+ if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
+ bnx2x_link_update(dev);
+}
+
+static void
+bnx2x_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
+
+ bnx2x_interrupt_action(dev);
+ rte_intr_enable(&sc->pci_dev->intr_handle);
+}
+
+static void bnx2x_periodic_start(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ int ret = 0;
+
+ atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
+ bnx2x_interrupt_action(dev);
+ if (IS_PF(sc)) {
+ ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
+ bnx2x_periodic_start, (void *)dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to start periodic"
+ " timer rc %d", ret);
+ assert(false && "Unable to start periodic timer");
+ }
+ }
+}
+
+void bnx2x_periodic_stop(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
+
+ rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev);
+}
+
+/*
+ * Devops - helper functions can be called from user application
+ */
+
+static int
+bnx2x_dev_configure(struct rte_eth_dev *dev)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+ int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
+ return -EINVAL;
+ }
+
+ sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
+ if (sc->num_queues > mp_ncpus) {
+ PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
+ sc->num_queues, sc->mtu);
+
+ /* allocate ilt */
+ if (bnx2x_alloc_ilt_mem(sc) != 0) {
+ PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
+ return -ENXIO;
+ }
+
+ /* allocate the host hardware/software hsi structures */
+ if (bnx2x_alloc_hsi_mem(sc) != 0) {
+ PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
+ bnx2x_free_ilt_mem(sc);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int
+bnx2x_dev_start(struct rte_eth_dev *dev)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* start the periodic callout */
+ if (sc->periodic_flags & PERIODIC_STOP)
+ bnx2x_periodic_start(dev);
+
+ ret = bnx2x_init(sc);
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
+ return -1;
+ }
+
+ if (IS_PF(sc)) {
+ rte_intr_callback_register(&sc->pci_dev->intr_handle,
+ bnx2x_interrupt_handler, (void *)dev);
+
+ if (rte_intr_enable(&sc->pci_dev->intr_handle))
+ PMD_DRV_LOG(ERR, "rte_intr_enable failed");
+ }
+
+ ret = bnx2x_dev_rx_init(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
+ return -3;
+ }
+
+ /* Print important adapter info for the user. */
+ bnx2x_print_adapter_info(sc);
+
+ return ret;
+}
+
+static void
+bnx2x_dev_stop(struct rte_eth_dev *dev)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (IS_PF(sc)) {
+ rte_intr_disable(&sc->pci_dev->intr_handle);
+ rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
+ bnx2x_interrupt_handler, (void *)dev);
+ }
+
+ /* stop the periodic callout */
+ bnx2x_periodic_stop(dev);
+
+ ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
+ return;
+ }
+
+ return;
+}
+
+static void
+bnx2x_dev_close(struct rte_eth_dev *dev)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (IS_VF(sc))
+ bnx2x_vf_close(sc);
+
+ bnx2x_dev_clear_queues(dev);
+ memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
+
+ /* free the host hardware/software hsi structures */
+ bnx2x_free_hsi_mem(sc);
+
+ /* free ilt */
+ bnx2x_free_ilt_mem(sc);
+}
+
+static void
+bnx2x_promisc_enable(struct rte_eth_dev *dev)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ sc->rx_mode = BNX2X_RX_MODE_PROMISC;
+ if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
+ sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
+ bnx2x_set_rx_mode(sc);
+}
+
+static void
+bnx2x_promisc_disable(struct rte_eth_dev *dev)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ sc->rx_mode = BNX2X_RX_MODE_NORMAL;
+ if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
+ sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ bnx2x_set_rx_mode(sc);
+}
+
+static void
+bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
+ sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
+ bnx2x_set_rx_mode(sc);
+}
+
+static void
+bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ sc->rx_mode = BNX2X_RX_MODE_NORMAL;
+ if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
+ sc->rx_mode = BNX2X_RX_MODE_PROMISC;
+ bnx2x_set_rx_mode(sc);
+}
+
+static int
+bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return bnx2x_link_update(dev);
+}
+
+static int
+bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ int ret = 0;
+
+ ret = bnx2x_link_update(dev);
+
+ bnx2x_check_bull(sc);
+ if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
+ PMD_DRV_LOG(ERR, "PF indicated channel is down."
+ "VF device is no longer operational");
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ }
+
+ return ret;
+}
+
+static int
+bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ uint32_t brb_truncate_discard;
+ uint64_t brb_drops;
+ uint64_t brb_truncates;
+
+ PMD_INIT_FUNC_TRACE();
+
+ bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
+
+ memset(stats, 0, sizeof (struct rte_eth_stats));
+
+ stats->ipackets =
+ HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
+ sc->eth_stats.total_unicast_packets_received_lo) +
+ HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
+ sc->eth_stats.total_multicast_packets_received_lo) +
+ HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
+ sc->eth_stats.total_broadcast_packets_received_lo);
+
+ stats->opackets =
+ HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
+ sc->eth_stats.total_unicast_packets_transmitted_lo) +
+ HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
+ sc->eth_stats.total_multicast_packets_transmitted_lo) +
+ HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
+ sc->eth_stats.total_broadcast_packets_transmitted_lo);
+
+ stats->ibytes =
+ HILO_U64(sc->eth_stats.total_bytes_received_hi,
+ sc->eth_stats.total_bytes_received_lo);
+
+ stats->obytes =
+ HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
+ sc->eth_stats.total_bytes_transmitted_lo);
+
+ stats->ierrors =
+ HILO_U64(sc->eth_stats.error_bytes_received_hi,
+ sc->eth_stats.error_bytes_received_lo);
+
+ stats->oerrors = 0;
+
+ stats->rx_nombuf =
+ HILO_U64(sc->eth_stats.no_buff_discard_hi,
+ sc->eth_stats.no_buff_discard_lo);
+
+ brb_drops =
+ HILO_U64(sc->eth_stats.brb_drop_hi,
+ sc->eth_stats.brb_drop_lo);
+
+ brb_truncates =
+ HILO_U64(sc->eth_stats.brb_truncate_hi,
+ sc->eth_stats.brb_truncate_lo);
+
+ brb_truncate_discard = sc->eth_stats.brb_truncate_discard;
+
+ stats->imissed = brb_drops + brb_truncates +
+ brb_truncate_discard + stats->rx_nombuf;
+
+ return 0;
+}
+
+static int
+bnx2x_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned limit)
+{
+ unsigned int i, stat_cnt = RTE_DIM(bnx2x_xstats_strings);
+
+ if (xstats_names != NULL)
+ for (i = 0; i < stat_cnt; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s",
+ bnx2x_xstats_strings[i].name);
+
+ return stat_cnt;
+}
+
+static int
+bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ unsigned int num = RTE_DIM(bnx2x_xstats_strings);
+
+ if (n < num)
+ return num;
+
+ bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
+
+ for (num = 0; num < n; num++) {
+ if (bnx2x_xstats_strings[num].offset_hi !=
+ bnx2x_xstats_strings[num].offset_lo)
+ xstats[num].value = HILO_U64(
+ *(uint32_t *)((char *)&sc->eth_stats +
+ bnx2x_xstats_strings[num].offset_hi),
+ *(uint32_t *)((char *)&sc->eth_stats +
+ bnx2x_xstats_strings[num].offset_lo));
+ else
+ xstats[num].value =
+ *(uint64_t *)((char *)&sc->eth_stats +
+ bnx2x_xstats_strings[num].offset_lo);
+ xstats[num].id = num;
+ }
+
+ return num;
+}
+
+static void
+bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ dev_info->max_rx_queues = sc->max_rx_queues;
+ dev_info->max_tx_queues = sc->max_tx_queues;
+ dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
+ dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
+ dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
+}
+
+static int
+bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ if (sc->mac_ops.mac_addr_add) {
+ sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
+ return 0;
+ }
+ return -ENOTSUP;
+}
+
+static void
+bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ if (sc->mac_ops.mac_addr_remove)
+ sc->mac_ops.mac_addr_remove(dev, index);
+}
+
+static const struct eth_dev_ops bnx2x_eth_dev_ops = {
+ .dev_configure = bnx2x_dev_configure,
+ .dev_start = bnx2x_dev_start,
+ .dev_stop = bnx2x_dev_stop,
+ .dev_close = bnx2x_dev_close,
+ .promiscuous_enable = bnx2x_promisc_enable,
+ .promiscuous_disable = bnx2x_promisc_disable,
+ .allmulticast_enable = bnx2x_dev_allmulticast_enable,
+ .allmulticast_disable = bnx2x_dev_allmulticast_disable,
+ .link_update = bnx2x_dev_link_update,
+ .stats_get = bnx2x_dev_stats_get,
+ .xstats_get = bnx2x_dev_xstats_get,
+ .xstats_get_names = bnx2x_get_xstats_names,
+ .dev_infos_get = bnx2x_dev_infos_get,
+ .rx_queue_setup = bnx2x_dev_rx_queue_setup,
+ .rx_queue_release = bnx2x_dev_rx_queue_release,
+ .tx_queue_setup = bnx2x_dev_tx_queue_setup,
+ .tx_queue_release = bnx2x_dev_tx_queue_release,
+ .mac_addr_add = bnx2x_mac_addr_add,
+ .mac_addr_remove = bnx2x_mac_addr_remove,
+};
+
+/*
+ * dev_ops for virtual function
+ */
+static const struct eth_dev_ops bnx2xvf_eth_dev_ops = {
+ .dev_configure = bnx2x_dev_configure,
+ .dev_start = bnx2x_dev_start,
+ .dev_stop = bnx2x_dev_stop,
+ .dev_close = bnx2x_dev_close,
+ .promiscuous_enable = bnx2x_promisc_enable,
+ .promiscuous_disable = bnx2x_promisc_disable,
+ .allmulticast_enable = bnx2x_dev_allmulticast_enable,
+ .allmulticast_disable = bnx2x_dev_allmulticast_disable,
+ .link_update = bnx2xvf_dev_link_update,
+ .stats_get = bnx2x_dev_stats_get,
+ .xstats_get = bnx2x_dev_xstats_get,
+ .xstats_get_names = bnx2x_get_xstats_names,
+ .dev_infos_get = bnx2x_dev_infos_get,
+ .rx_queue_setup = bnx2x_dev_rx_queue_setup,
+ .rx_queue_release = bnx2x_dev_rx_queue_release,
+ .tx_queue_setup = bnx2x_dev_tx_queue_setup,
+ .tx_queue_release = bnx2x_dev_tx_queue_release,
+ .mac_addr_add = bnx2x_mac_addr_add,
+ .mac_addr_remove = bnx2x_mac_addr_remove,
+};
+
+
+static int
+bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
+{
+ int ret = 0;
+ struct rte_pci_device *pci_dev;
+ struct bnx2x_softc *sc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ sc = eth_dev->data->dev_private;
+ sc->pcie_bus = pci_dev->addr.bus;
+ sc->pcie_device = pci_dev->addr.devid;
+
+ if (is_vf)
+ sc->flags = BNX2X_IS_VF_FLAG;
+
+ sc->devinfo.vendor_id = pci_dev->id.vendor_id;
+ sc->devinfo.device_id = pci_dev->id.device_id;
+ sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
+ sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
+
+ sc->pcie_func = pci_dev->addr.function;
+ sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
+ if (is_vf)
+ sc->bar[BAR1].base_addr = (void *)
+ ((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
+ else
+ sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
+
+ assert(sc->bar[BAR0].base_addr);
+ assert(sc->bar[BAR1].base_addr);
+
+ bnx2x_load_firmware(sc);
+ assert(sc->firmware);
+
+ if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ sc->udp_rss = 1;
+
+ sc->rx_budget = BNX2X_RX_BUDGET;
+ sc->hc_rx_ticks = BNX2X_RX_TICKS;
+ sc->hc_tx_ticks = BNX2X_TX_TICKS;
+
+ sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
+ sc->rx_mode = BNX2X_RX_MODE_NORMAL;
+
+ sc->pci_dev = pci_dev;
+ ret = bnx2x_attach(sc);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret);
+ return ret;
+ }
+
+ /* schedule periodic poll for slowpath link events */
+ if (IS_PF(sc)) {
+ ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
+ bnx2x_periodic_start, (void *)eth_dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to start periodic"
+ " timer rc %d", ret);
+ return -EINVAL;
+ }
+ }
+
+ eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
+
+ PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
+ sc->pcie_bus, sc->pcie_device);
+ PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p",
+ sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr);
+ PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d",
+ PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc));
+ PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id);
+
+ if (IS_VF(sc)) {
+ rte_spinlock_init(&sc->vf2pf_lock);
+
+ ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
+ &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
+ RTE_CACHE_LINE_SIZE);
+ if (ret)
+ goto out;
+
+ sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
+ sc->vf2pf_mbox_mapping.vaddr;
+
+ ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
+ &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
+ RTE_CACHE_LINE_SIZE);
+ if (ret)
+ goto out;
+
+ sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
+ sc->pf2vf_bulletin_mapping.vaddr;
+
+ ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
+ sc->max_rx_queues);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ bnx2x_periodic_stop(eth_dev);
+ return ret;
+}
+
+static int
+eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ return bnx2x_common_dev_init(eth_dev, 0);
+}
+
+static int
+eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ return bnx2x_common_dev_init(eth_dev, 1);
+}
+
+static struct rte_pci_driver rte_bnx2x_pmd;
+static struct rte_pci_driver rte_bnx2xvf_pmd;
+
+static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ if (pci_drv == &rte_bnx2x_pmd)
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct bnx2x_softc), eth_bnx2x_dev_init);
+ else if (pci_drv == &rte_bnx2xvf_pmd)
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init);
+ else
+ return -EINVAL;
+}
+
+static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_bnx2x_pmd = {
+ .id_table = pci_id_bnx2x_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_bnx2x_pci_probe,
+ .remove = eth_bnx2x_pci_remove,
+};
+
+/*
+ * virtual function driver struct
+ */
+static struct rte_pci_driver rte_bnx2xvf_pmd = {
+ .id_table = pci_id_bnx2xvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_bnx2x_pci_probe,
+ .remove = eth_bnx2x_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci");
+
+RTE_INIT(bnx2x_init_log)
+{
+ bnx2x_logtype_init = rte_log_register("pmd.net.bnx2x.init");
+ if (bnx2x_logtype_init >= 0)
+ rte_log_set_level(bnx2x_logtype_init, RTE_LOG_NOTICE);
+ bnx2x_logtype_driver = rte_log_register("pmd.net.bnx2x.driver");
+ if (bnx2x_logtype_driver >= 0)
+ rte_log_set_level(bnx2x_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.h
new file mode 100644
index 00000000..807ba178
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef PMD_BNX2X_ETHDEV_H
+#define PMD_BNX2X_ETHDEV_H
+
+#include <sys/queue.h>
+#include <sys/param.h>
+#include <sys/user.h>
+#include <sys/stat.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+#include <rte_ethdev_driver.h>
+#include <rte_spinlock.h>
+#include <rte_eal.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+#include "bnx2x_rxtx.h"
+#include "bnx2x_logs.h"
+
+#define DELAY(x) rte_delay_us(x)
+#define DELAY_MS(x) rte_delay_ms(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000*(x))
+
+#define FALSE 0
+#define TRUE 1
+
+#define false 0
+#define true 1
+#define min(a,b) RTE_MIN(a,b)
+
+#define mb() rte_mb()
+#define wmb() rte_wmb()
+#define rmb() rte_rmb()
+
+#define MAX_QUEUES sysconf(_SC_NPROCESSORS_CONF)
+
+#define BNX2X_MIN_RX_BUF_SIZE 1024
+#define BNX2X_MAX_RX_PKT_LEN 15872
+#define BNX2X_MAX_MAC_ADDRS 1
+
+/* Hardware RX tick timer (usecs) */
+#define BNX2X_RX_TICKS 25
+/* Hardware TX tick timer (usecs) */
+#define BNX2X_TX_TICKS 50
+/* Maximum number of Rx packets to process at a time */
+#define BNX2X_RX_BUDGET 0xffffffff
+
+#define BNX2X_SP_TIMER_PERIOD US_PER_S /* 1 second */
+
+#endif
+
+/* MAC address operations */
+struct bnx2x_mac_ops {
+ void (*mac_addr_add)(struct rte_eth_dev *dev, struct ether_addr *addr,
+ uint16_t index, uint32_t pool); /* not implemented yet */
+ void (*mac_addr_remove)(struct rte_eth_dev *dev, uint16_t index); /* not implemented yet */
+};
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_logs.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_logs.h
new file mode 100644
index 00000000..9e232a9b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_logs.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef _PMD_LOGS_H_
+#define _PMD_LOGS_H_
+
+extern int bnx2x_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, bnx2x_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+extern int bnx2x_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, bnx2x_logtype_driver, \
+ "%s(): " fmt, __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+#define PMD_DEBUG_PERIODIC_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_DEBUG_PERIODIC_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#endif /* _PMD_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.c b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.c
new file mode 100644
index 00000000..d9a4127d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -0,0 +1,483 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bnx2x.h"
+#include "bnx2x_rxtx.h"
+
+static const struct rte_memzone *
+ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
+ uint16_t queue_id, uint32_t ring_size, int socket_id)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ dev->device->driver->name, ring_name,
+ dev->data->port_id, queue_id);
+
+ mz = rte_memzone_lookup(z_name);
+ if (mz)
+ return mz;
+
+ return rte_memzone_reserve_aligned(z_name, ring_size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, BNX2X_PAGE_SIZE);
+}
+
+static void
+bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue)
+{
+ uint16_t i;
+ struct rte_mbuf **sw_ring;
+
+ if (NULL != rx_queue) {
+
+ sw_ring = rx_queue->sw_ring;
+ if (NULL != sw_ring) {
+ for (i = 0; i < rx_queue->nb_rx_desc; i++) {
+ if (NULL != sw_ring[i])
+ rte_pktmbuf_free(sw_ring[i]);
+ }
+ rte_free(sw_ring);
+ }
+ rte_free(rx_queue);
+ }
+}
+
+void
+bnx2x_dev_rx_queue_release(void *rxq)
+{
+ bnx2x_rx_queue_release(rxq);
+}
+
+int
+bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ uint16_t j, idx;
+ const struct rte_memzone *dma;
+ struct bnx2x_rx_queue *rxq;
+ uint32_t dma_size;
+ struct rte_mbuf *mbuf;
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
+ struct eth_rx_cqe_next_page *nextpg;
+ rte_iova_t *rx_bd;
+ rte_iova_t busaddr;
+
+ /* First allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (NULL == rxq) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
+ return -ENOMEM;
+ }
+ rxq->sc = sc;
+ rxq->mb_pool = mp;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+
+ rxq->nb_rx_pages = 1;
+ while (USABLE_RX_BD(rxq) < nb_desc)
+ rxq->nb_rx_pages <<= 1;
+
+ rxq->nb_rx_desc = TOTAL_RX_BD(rxq);
+ sc->rx_ring_size = USABLE_RX_BD(rxq);
+ rxq->nb_cq_pages = RCQ_BD_PAGES(rxq);
+
+ PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, usable_bd=%lu, "
+ "total_bd=%lu, rx_pages=%u, cq_pages=%u",
+ queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq),
+ (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages,
+ rxq->nb_cq_pages);
+
+ /* Allocate RX ring hardware descriptors */
+ dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd);
+ dma = ring_dma_zone_reserve(dev, "hw_ring", queue_idx, dma_size, socket_id);
+ if (NULL == dma) {
+ PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!");
+ bnx2x_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->iova;
+ rxq->rx_ring = (uint64_t*)dma->addr;
+ memset((void *)rxq->rx_ring, 0, dma_size);
+
+ /* Link the RX chain pages. */
+ for (j = 1; j <= rxq->nb_rx_pages; j++) {
+ rx_bd = &rxq->rx_ring[TOTAL_RX_BD_PER_PAGE * j - 2];
+ busaddr = rxq->rx_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_rx_pages);
+ *rx_bd = busaddr;
+ }
+
+ /* Allocate software ring */
+ dma_size = rxq->nb_rx_desc * sizeof(struct bnx2x_rx_entry);
+ rxq->sw_ring = rte_zmalloc_socket("sw_ring", dma_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (NULL == rxq->sw_ring) {
+ PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!");
+ bnx2x_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ /* Initialize software ring entries */
+ for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) {
+ mbuf = rte_mbuf_raw_alloc(mp);
+ if (NULL == mbuf) {
+ PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d",
+ (unsigned)rxq->queue_id, idx);
+ bnx2x_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ rxq->sw_ring[idx] = mbuf;
+ rxq->rx_ring[idx] =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ }
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+ rxq->rx_bd_head = 0;
+ rxq->rx_bd_tail = rxq->nb_rx_desc;
+
+ /* Allocate CQ chain. */
+ dma_size = BNX2X_RX_CHAIN_PAGE_SZ * rxq->nb_cq_pages;
+ dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id);
+ if (NULL == dma) {
+ PMD_RX_LOG(ERR, "RCQ alloc failed");
+ return -ENOMEM;
+ }
+ fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->iova;
+ rxq->cq_ring = (union eth_rx_cqe*)dma->addr;
+
+ /* Link the CQ chain pages. */
+ for (j = 1; j <= rxq->nb_cq_pages; j++) {
+ nextpg = &rxq->cq_ring[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1].next_page_cqe;
+ busaddr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_cq_pages);
+ nextpg->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
+ nextpg->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
+ }
+ rxq->rx_cq_head = 0;
+ rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq);
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ if (!sc->rx_queues) sc->rx_queues = dev->data->rx_queues;
+
+ return 0;
+}
+
+static void
+bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue)
+{
+ uint16_t i;
+ struct rte_mbuf **sw_ring;
+
+ if (NULL != tx_queue) {
+
+ sw_ring = tx_queue->sw_ring;
+ if (NULL != sw_ring) {
+ for (i = 0; i < tx_queue->nb_tx_desc; i++) {
+ if (NULL != sw_ring[i])
+ rte_pktmbuf_free(sw_ring[i]);
+ }
+ rte_free(sw_ring);
+ }
+ rte_free(tx_queue);
+ }
+}
+
+void
+bnx2x_dev_tx_queue_release(void *txq)
+{
+ bnx2x_tx_queue_release(txq);
+}
+
+static uint16_t
+bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct bnx2x_tx_queue *txq;
+ struct bnx2x_softc *sc;
+ struct bnx2x_fastpath *fp;
+ uint16_t nb_tx_pkts;
+ uint16_t nb_pkt_sent = 0;
+ uint32_t ret;
+
+ txq = p_txq;
+ sc = txq->sc;
+ fp = &sc->fp[txq->queue_id];
+
+ if ((unlikely((txq->nb_tx_desc - txq->nb_tx_avail) >
+ txq->tx_free_thresh)))
+ bnx2x_txeof(sc, fp);
+
+ nb_tx_pkts = RTE_MIN(nb_pkts, txq->nb_tx_avail / BDS_PER_TX_PKT);
+ if (unlikely(nb_tx_pkts == 0))
+ return 0;
+
+ while (nb_tx_pkts--) {
+ struct rte_mbuf *m = *tx_pkts++;
+ assert(m != NULL);
+ ret = bnx2x_tx_encap(txq, m);
+ fp->tx_db.data.prod += ret;
+ nb_pkt_sent++;
+ }
+
+ bnx2x_update_fp_sb_idx(fp);
+ mb();
+ DOORBELL(sc, txq->queue_id, fp->tx_db.raw);
+ mb();
+
+ if ((txq->nb_tx_desc - txq->nb_tx_avail) >
+ txq->tx_free_thresh)
+ bnx2x_txeof(sc, fp);
+
+ return nb_pkt_sent;
+}
+
+int
+bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ uint16_t i;
+ unsigned int tsize;
+ const struct rte_memzone *tz;
+ struct bnx2x_tx_queue *txq;
+ struct eth_tx_next_bd *tx_n_bd;
+ uint64_t busaddr;
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
+
+ /* First allocate the tx queue data structure */
+ txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue),
+ RTE_CACHE_LINE_SIZE);
+ if (txq == NULL)
+ return -ENOMEM;
+ txq->sc = sc;
+
+ txq->nb_tx_pages = 1;
+ while (USABLE_TX_BD(txq) < nb_desc)
+ txq->nb_tx_pages <<= 1;
+
+ txq->nb_tx_desc = TOTAL_TX_BD(txq);
+ sc->tx_ring_size = TOTAL_TX_BD(txq);
+
+ txq->tx_free_thresh = tx_conf->tx_free_thresh ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
+ txq->tx_free_thresh = min(txq->tx_free_thresh,
+ txq->nb_tx_desc - BDS_PER_TX_PKT);
+
+ PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
+ "total_bd=%lu, tx_pages=%u",
+ queue_idx, nb_desc, txq->tx_free_thresh,
+ (unsigned long)USABLE_TX_BD(txq),
+ (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages);
+
+ /* Allocate TX ring hardware descriptors */
+ tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types);
+ tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id);
+ if (tz == NULL) {
+ bnx2x_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->iova;
+ txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
+ memset(txq->tx_ring, 0, tsize);
+
+ /* Allocate software ring */
+ tsize = txq->nb_tx_desc * sizeof(struct rte_mbuf *);
+ txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
+ RTE_CACHE_LINE_SIZE);
+ if (txq->sw_ring == NULL) {
+ bnx2x_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */
+
+ /* Link TX pages */
+ for (i = 1; i <= txq->nb_tx_pages; i++) {
+ tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd;
+ busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages);
+ tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
+ tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
+ /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */
+ }
+
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->tx_pkt_tail = 0;
+ txq->tx_pkt_head = 0;
+ txq->tx_bd_tail = 0;
+ txq->tx_bd_head = 0;
+ txq->nb_tx_avail = txq->nb_tx_desc;
+ dev->tx_pkt_burst = bnx2x_xmit_pkts;
+ dev->data->tx_queues[queue_idx] = txq;
+ if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues;
+
+ return 0;
+}
+
+static inline void
+bnx2x_upd_rx_prod_fast(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ uint16_t rx_bd_prod, uint16_t rx_cq_prod)
+{
+ union ustorm_eth_rx_producers rx_prods;
+
+ rx_prods.prod.bd_prod = rx_bd_prod;
+ rx_prods.prod.cqe_prod = rx_cq_prod;
+
+ REG_WR(sc, fp->ustorm_rx_prods_offset, rx_prods.raw_data[0]);
+}
+
+static uint16_t
+bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct bnx2x_rx_queue *rxq = p_rxq;
+ struct bnx2x_softc *sc = rxq->sc;
+ struct bnx2x_fastpath *fp = &sc->fp[rxq->queue_id];
+ uint32_t nb_rx = 0;
+ uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
+ uint16_t bd_cons, bd_prod;
+ struct rte_mbuf *new_mb;
+ uint16_t rx_pref;
+ struct eth_fast_path_rx_cqe *cqe_fp;
+ uint16_t len, pad;
+ struct rte_mbuf *rx_mb = NULL;
+
+ hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
+ if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) ==
+ USABLE_RCQ_ENTRIES_PER_PAGE) {
+ ++hw_cq_cons;
+ }
+
+ bd_cons = rxq->rx_bd_head;
+ bd_prod = rxq->rx_bd_tail;
+ sw_cq_cons = rxq->rx_cq_head;
+ sw_cq_prod = rxq->rx_cq_tail;
+
+ if (sw_cq_cons == hw_cq_cons)
+ return 0;
+
+ while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) {
+
+ bd_prod &= MAX_RX_BD(rxq);
+ bd_cons &= MAX_RX_BD(rxq);
+
+ cqe_fp = &rxq->cq_ring[sw_cq_cons & MAX_RX_BD(rxq)].fast_path_cqe;
+
+ if (unlikely(CQE_TYPE_SLOW(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_TYPE))) {
+ PMD_RX_LOG(ERR, "slowpath event during traffic processing");
+ break;
+ }
+
+ if (unlikely(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
+ PMD_RX_LOG(ERR, "flags 0x%x rx packet %u",
+ cqe_fp->type_error_flags, sw_cq_cons);
+ goto next_rx;
+ }
+
+ len = cqe_fp->pkt_len_or_gro_seg_len;
+ pad = cqe_fp->placement_offset;
+
+ new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!new_mb)) {
+ PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index);
+ rte_eth_devices[rxq->port_id].data->
+ rx_mbuf_alloc_failed++;
+ goto next_rx;
+ }
+
+ rx_mb = rxq->sw_ring[bd_cons];
+ rxq->sw_ring[bd_cons] = new_mb;
+ rxq->rx_ring[bd_prod] =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb));
+
+ rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq);
+ rte_prefetch0(rxq->sw_ring[rx_pref]);
+ if ((rx_pref & 0x3) == 0) {
+ rte_prefetch0(&rxq->rx_ring[rx_pref]);
+ rte_prefetch0(&rxq->sw_ring[rx_pref]);
+ }
+
+ rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
+ rx_mb->nb_segs = 1;
+ rx_mb->next = NULL;
+ rx_mb->pkt_len = rx_mb->data_len = len;
+ rx_mb->port = rxq->port_id;
+ rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
+
+ /*
+ * If we received a packet with a vlan tag,
+ * attach that information to the packet.
+ */
+ if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) {
+ rx_mb->vlan_tci = cqe_fp->vlan_tag;
+ rx_mb->ol_flags |= PKT_RX_VLAN;
+ }
+
+ rx_pkts[nb_rx] = rx_mb;
+ nb_rx++;
+
+ /* limit spinning on the queue */
+ if (unlikely(nb_rx == sc->rx_budget)) {
+ PMD_RX_LOG(ERR, "Limit spinning on the queue");
+ break;
+ }
+
+next_rx:
+ bd_cons = NEXT_RX_BD(bd_cons);
+ bd_prod = NEXT_RX_BD(bd_prod);
+ sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod);
+ sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons);
+ }
+ rxq->rx_bd_head = bd_cons;
+ rxq->rx_bd_tail = bd_prod;
+ rxq->rx_cq_head = sw_cq_cons;
+ rxq->rx_cq_tail = sw_cq_prod;
+
+ bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod);
+
+ return nb_rx;
+}
+
+int
+bnx2x_dev_rx_init(struct rte_eth_dev *dev)
+{
+ dev->rx_pkt_burst = bnx2x_recv_pkts;
+
+ return 0;
+}
+
+void
+bnx2x_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ uint8_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];
+ if (txq != NULL) {
+ bnx2x_tx_queue_release(txq);
+ dev->data->tx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct bnx2x_rx_queue *rxq = dev->data->rx_queues[i];
+ if (rxq != NULL) {
+ bnx2x_rx_queue_release(rxq);
+ dev->data->rx_queues[i] = NULL;
+ }
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.h
new file mode 100644
index 00000000..6ad4928c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef _BNX2X_RXTX_H_
+#define _BNX2X_RXTX_H_
+
+#define DEFAULT_TX_FREE_THRESH 64
+#define RTE_PMD_BNX2X_TX_MAX_BURST 1
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct bnx2x_rx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct bnx2x_rx_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ union eth_rx_cqe *cq_ring; /**< RCQ ring virtual address. */
+ uint64_t cq_ring_phys_addr; /**< RCQ ring DMA address. */
+ uint64_t *rx_ring; /**< RX ring virtual address. */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ struct rte_mbuf **sw_ring; /**< address of RX software ring. */
+ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint16_t nb_cq_pages; /**< number of RCQ pages. */
+ uint16_t nb_rx_desc; /**< number of RX descriptors. */
+ uint16_t nb_rx_pages; /**< number of RX pages. */
+ uint16_t rx_bd_head; /**< Index of current rx bd. */
+ uint16_t rx_bd_tail; /**< Index of last rx bd. */
+ uint16_t rx_cq_head; /**< Index of current rcq bd. */
+ uint16_t rx_cq_tail; /**< Index of last rcq bd. */
+ uint16_t queue_id; /**< RX queue index. */
+ uint16_t port_id; /**< Device port identifier. */
+ struct bnx2x_softc *sc; /**< Ptr to dev_private data. */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct bnx2x_tx_queue {
+ /** TX ring virtual address. */
+ union eth_tx_bd_types *tx_ring; /**< TX ring virtual address. */
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ struct rte_mbuf **sw_ring; /**< virtual address of SW ring. */
+ uint16_t tx_pkt_tail; /**< Index of current tx pkt. */
+ uint16_t tx_pkt_head; /**< Index of last pkt counted by txeof. */
+ uint16_t tx_bd_tail; /**< Index of current tx bd. */
+ uint16_t tx_bd_head; /**< Index of last bd counted by txeof. */
+ uint16_t nb_tx_desc; /**< number of TX descriptors. */
+ uint16_t tx_free_thresh; /**< minimum TX before freeing. */
+ uint16_t nb_tx_avail; /**< Number of TX descriptors available. */
+ uint16_t nb_tx_pages; /**< number of TX pages */
+ uint16_t queue_id; /**< TX queue index. */
+ uint16_t port_id; /**< Device port identifier. */
+ struct bnx2x_softc *sc; /**< Ptr to dev_private data */
+};
+
+int bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+int bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+void bnx2x_dev_rx_queue_release(void *rxq);
+void bnx2x_dev_tx_queue_release(void *txq);
+int bnx2x_dev_rx_init(struct rte_eth_dev *dev);
+void bnx2x_dev_clear_queues(struct rte_eth_dev *dev);
+
+#endif /* _BNX2X_RXTX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.c b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.c
new file mode 100644
index 00000000..edc86ccc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.c
@@ -0,0 +1,1584 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bnx2x.h"
+#include "bnx2x_stats.h"
+
+#ifdef __i386__
+#define BITS_PER_LONG 32
+#else
+#define BITS_PER_LONG 64
+#endif
+
+static inline uint16_t
+bnx2x_get_port_stats_dma_len(struct bnx2x_softc *sc)
+{
+ uint16_t res = 0;
+ uint32_t size;
+
+ /* 'newest' convention - shmem2 contains the size of the port stats */
+ if (SHMEM2_HAS(sc, sizeof_port_stats)) {
+ size = SHMEM2_RD(sc, sizeof_port_stats);
+ if (size) {
+ res = size;
+ }
+
+ /* prevent newer BC from causing buffer overflow */
+ if (res > sizeof(struct host_port_stats)) {
+ res = sizeof(struct host_port_stats);
+ }
+ }
+
+ /*
+ * Older convention - all BCs support the port stats fields up until
+ * the 'not_used' field
+ */
+ if (!res) {
+ res = (offsetof(struct host_port_stats, not_used) + 4);
+
+ /* if PFC stats are supported by the MFW, DMA them as well */
+ if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
+ res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
+ offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
+ }
+ }
+
+ res >>= 2;
+
+ return res;
+}
+
+/*
+ * Init service functions
+ */
+
+/*
+ * Post the next statistics ramrod. Protect it with the lock in
+ * order to ensure the strict order between statistics ramrods
+ * (each ramrod has a sequence number passed in a
+ * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
+ * sent in order).
+ */
+static void
+bnx2x_storm_stats_post(struct bnx2x_softc *sc)
+{
+ int rc;
+
+ if (!sc->stats_pending) {
+ if (sc->stats_pending) {
+ return;
+ }
+
+ sc->fw_stats_req->hdr.drv_stats_counter =
+ htole16(sc->stats_counter++);
+
+ PMD_DEBUG_PERIODIC_LOG(DEBUG,
+ "sending statistics ramrod %d",
+ le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
+
+ /* adjust the ramrod to include VF queues statistics */
+
+ /* send FW stats ramrod */
+ rc = bnx2x_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
+ U64_HI(sc->fw_stats_req_mapping),
+ U64_LO(sc->fw_stats_req_mapping),
+ NONE_CONNECTION_TYPE);
+ if (rc == 0) {
+ sc->stats_pending = 1;
+ }
+ }
+}
+
+static void
+bnx2x_hw_stats_post(struct bnx2x_softc *sc)
+{
+ struct dmae_command *dmae = &sc->stats_dmae;
+ uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
+ int loader_idx;
+ uint32_t opcode;
+
+ *stats_comp = DMAE_COMP_VAL;
+ if (CHIP_REV_IS_SLOW(sc)) {
+ return;
+ }
+
+ /* Update MCP's statistics if possible */
+ if (sc->func_stx) {
+ rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats,
+ sizeof(sc->func_stats));
+ }
+
+ /* loader */
+ if (sc->executer_idx) {
+ loader_idx = PMF_DMAE_C(sc);
+ opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
+ TRUE, DMAE_COMP_GRC);
+ opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
+
+ memset(dmae, 0, sizeof(struct dmae_command));
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, dmae[0]));
+ dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, dmae[0]));
+ dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
+ sizeof(struct dmae_command) *
+ (loader_idx + 1)) >> 2);
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct dmae_command) >> 2;
+ dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ *stats_comp = 0;
+ bnx2x_post_dmae(sc, dmae, loader_idx);
+ } else if (sc->func_stx) {
+ *stats_comp = 0;
+ bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc));
+ }
+}
+
+static int
+bnx2x_stats_comp(struct bnx2x_softc *sc)
+{
+ uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
+ int cnt = 10;
+
+ while (*stats_comp != DMAE_COMP_VAL) {
+ if (!cnt) {
+ PMD_DRV_LOG(ERR, "Timeout waiting for stats finished");
+ break;
+ }
+
+ cnt--;
+ DELAY(1000);
+ }
+
+ return 1;
+}
+
+/*
+ * Statistics service functions
+ */
+
+static void
+bnx2x_stats_pmf_update(struct bnx2x_softc *sc)
+{
+ struct dmae_command *dmae;
+ uint32_t opcode;
+ int loader_idx = PMF_DMAE_C(sc);
+ uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
+
+ if (sc->devinfo.bc_ver <= 0x06001400) {
+ /*
+ * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing
+ * BRB registers while the BRB block is in reset. The DMA transfer
+ * below triggers this issue resulting in the DMAE to stop
+ * functioning. Skip this initial stats transfer for old bootcode
+ * versions <= 6.0.20.
+ */
+ return;
+ }
+ /* sanity */
+ if (!sc->port.pmf || !sc->port.port_stx) {
+ PMD_DRV_LOG(ERR, "BUG!");
+ return;
+ }
+
+ sc->executer_idx = 0;
+
+ opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
+
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
+ dmae->src_addr_lo = (sc->port.port_stx >> 2);
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
+ dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
+ dmae->len = DMAE_LEN32_RD_MAX;
+ dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+ dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats) +
+ DMAE_LEN32_RD_MAX * 4);
+ dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats) +
+ DMAE_LEN32_RD_MAX * 4);
+ dmae->len = (bnx2x_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
+
+ dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
+ dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ bnx2x_hw_stats_post(sc);
+ bnx2x_stats_comp(sc);
+}
+
+static void
+bnx2x_port_stats_init(struct bnx2x_softc *sc)
+{
+ struct dmae_command *dmae;
+ int port = SC_PORT(sc);
+ uint32_t opcode;
+ int loader_idx = PMF_DMAE_C(sc);
+ uint32_t mac_addr;
+ uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
+
+ /* sanity */
+ if (!sc->link_vars.link_up || !sc->port.pmf) {
+ PMD_DRV_LOG(ERR, "BUG!");
+ return;
+ }
+
+ sc->executer_idx = 0;
+
+ /* MCP */
+ opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
+ TRUE, DMAE_COMP_GRC);
+
+ if (sc->port.port_stx) {
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
+ dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
+ dmae->dst_addr_lo = sc->port.port_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = bnx2x_get_port_stats_dma_len(sc);
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ if (sc->func_stx) {
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
+ dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
+ dmae->dst_addr_lo = (sc->func_stx >> 2);
+ dmae->dst_addr_hi = 0;
+ dmae->len = (sizeof(struct host_func_stats) >> 2);
+ dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ /* MAC */
+ opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
+ TRUE, DMAE_COMP_GRC);
+
+ /* EMAC is special */
+ if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
+ mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
+
+ /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats));
+ dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats));
+ dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
+ dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* EMAC_REG_EMAC_RX_STAT_AC_28 */
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) +
+ offsetof(struct emac_stats,
+ rx_stat_falsecarriererrors));
+ dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) +
+ offsetof(struct emac_stats,
+ rx_stat_falsecarriererrors));
+ dmae->len = 1;
+ dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) +
+ offsetof(struct emac_stats,
+ tx_stat_ifhcoutoctets));
+ dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) +
+ offsetof(struct emac_stats,
+ tx_stat_ifhcoutoctets));
+ dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
+ dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ } else {
+ uint32_t tx_src_addr_lo, rx_src_addr_lo;
+ uint16_t rx_len, tx_len;
+
+ /* configure the params according to MAC type */
+ switch (sc->link_vars.mac_type) {
+ case ELINK_MAC_TYPE_BMAC:
+ mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+
+ /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
+ BIGMAC_REGISTER_TX_STAT_GTBYT */
+ if (CHIP_IS_E1x(sc)) {
+ tx_src_addr_lo =
+ ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
+ tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+ BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
+ rx_src_addr_lo =
+ ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
+ rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+ BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
+ } else {
+ tx_src_addr_lo =
+ ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
+ tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
+ BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
+ rx_src_addr_lo =
+ ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
+ rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
+ BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
+ }
+
+ break;
+
+ case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
+ case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
+ default:
+ mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
+ tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
+ rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
+ tx_len =
+ (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
+ rx_len =
+ (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
+ break;
+ }
+
+ /* TX stats */
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = tx_src_addr_lo;
+ dmae->src_addr_hi = 0;
+ dmae->len = tx_len;
+ dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats));
+ dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats));
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* RX stats */
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_hi = 0;
+ dmae->src_addr_lo = rx_src_addr_lo;
+ dmae->dst_addr_lo =
+ U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
+ dmae->dst_addr_hi =
+ U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
+ dmae->len = rx_len;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ /* NIG */
+ if (!CHIP_IS_E3(sc)) {
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo =
+ (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
+ NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) +
+ offsetof(struct nig_stats,
+ egress_mac_pkt0_lo));
+ dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) +
+ offsetof(struct nig_stats,
+ egress_mac_pkt0_lo));
+ dmae->len = ((2 * sizeof(uint32_t)) >> 2);
+ dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo =
+ (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
+ NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) +
+ offsetof(struct nig_stats,
+ egress_mac_pkt1_lo));
+ dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) +
+ offsetof(struct nig_stats,
+ egress_mac_pkt1_lo));
+ dmae->len = ((2 * sizeof(uint32_t)) >> 2);
+ dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
+ TRUE, DMAE_COMP_PCI);
+ dmae->src_addr_lo =
+ (port ? NIG_REG_STAT1_BRB_DISCARD :
+ NIG_REG_STAT0_BRB_DISCARD) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats));
+ dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats));
+ dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2;
+
+ dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
+ dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+}
+
+static void
+bnx2x_func_stats_init(struct bnx2x_softc *sc)
+{
+ struct dmae_command *dmae = &sc->stats_dmae;
+ uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
+
+ /* sanity */
+ if (!sc->func_stx) {
+ PMD_DRV_LOG(ERR, "BUG!");
+ return;
+ }
+
+ sc->executer_idx = 0;
+ memset(dmae, 0, sizeof(struct dmae_command));
+
+ dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
+ TRUE, DMAE_COMP_PCI);
+ dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
+ dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
+ dmae->dst_addr_lo = (sc->func_stx >> 2);
+ dmae->dst_addr_hi = 0;
+ dmae->len = (sizeof(struct host_func_stats) >> 2);
+ dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
+ dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+}
+
+static void
+bnx2x_stats_start(struct bnx2x_softc *sc)
+{
+ /*
+ * VFs travel through here as part of the statistics FSM, but no action
+ * is required
+ */
+ if (IS_VF(sc)) {
+ return;
+ }
+
+ if (sc->port.pmf) {
+ bnx2x_port_stats_init(sc);
+ }
+
+ else if (sc->func_stx) {
+ bnx2x_func_stats_init(sc);
+ }
+
+ bnx2x_hw_stats_post(sc);
+ bnx2x_storm_stats_post(sc);
+}
+
+static void
+bnx2x_stats_pmf_start(struct bnx2x_softc *sc)
+{
+ bnx2x_stats_comp(sc);
+ bnx2x_stats_pmf_update(sc);
+ bnx2x_stats_start(sc);
+}
+
+static void
+bnx2x_stats_restart(struct bnx2x_softc *sc)
+{
+ /*
+ * VFs travel through here as part of the statistics FSM, but no action
+ * is required
+ */
+ if (IS_VF(sc)) {
+ return;
+ }
+
+ bnx2x_stats_comp(sc);
+ bnx2x_stats_start(sc);
+}
+
+static void
+bnx2x_bmac_stats_update(struct bnx2x_softc *sc)
+{
+ struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
+ struct bnx2x_eth_stats *estats = &sc->eth_stats;
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ } diff;
+
+ if (CHIP_IS_E1x(sc)) {
+ struct bmac1_stats *new = BNX2X_SP(sc, mac_stats.bmac1_stats);
+
+ /* the macros below will use "bmac1_stats" type */
+ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+ UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+ UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+ UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+ UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+ UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+ UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+ UPDATE_STAT64(tx_stat_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_STAT64(tx_stat_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_STAT64(tx_stat_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_STAT64(tx_stat_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_STAT64(tx_stat_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+ UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+ UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+ UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
+ UPDATE_STAT64(tx_stat_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+ } else {
+ struct bmac2_stats *new = BNX2X_SP(sc, mac_stats.bmac2_stats);
+ struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
+
+ /* the macros below will use "bmac2_stats" type */
+ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+ UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+ UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+ UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+ UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+ UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+ UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+ UPDATE_STAT64(tx_stat_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_STAT64(tx_stat_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_STAT64(tx_stat_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_STAT64(tx_stat_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_STAT64(tx_stat_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+ UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+ UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+ UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
+ UPDATE_STAT64(tx_stat_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+ /* collect PFC stats */
+ pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
+ pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
+ ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
+ pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
+
+ pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
+ pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
+ ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
+ pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
+ }
+
+ estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+ estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+ estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+ estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+ estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
+ estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
+ estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
+ estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
+}
+
+static void
+bnx2x_mstat_stats_update(struct bnx2x_softc *sc)
+{
+ struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
+ struct bnx2x_eth_stats *estats = &sc->eth_stats;
+ struct mstat_stats *new = BNX2X_SP(sc, mac_stats.mstat_stats);
+
+ ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
+ ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
+ ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
+ ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
+ ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
+ ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
+ ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
+ ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
+ ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
+ ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
+
+ /* collect pfc stats */
+ ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
+ pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
+ ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
+ pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
+
+ ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
+ ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
+ ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
+ ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
+ ADD_STAT64(stats_tx.tx_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ ADD_STAT64(stats_tx.tx_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
+
+ ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
+ ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
+ ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
+
+ ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
+ ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
+
+ estats->etherstatspkts1024octetsto1522octets_hi =
+ pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ estats->etherstatspkts1024octetsto1522octets_lo =
+ pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
+
+ estats->etherstatspktsover1522octets_hi =
+ pstats->mac_stx[1].tx_stat_mac_2047_hi;
+ estats->etherstatspktsover1522octets_lo =
+ pstats->mac_stx[1].tx_stat_mac_2047_lo;
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ pstats->mac_stx[1].tx_stat_mac_4095_hi,
+ estats->etherstatspktsover1522octets_lo,
+ pstats->mac_stx[1].tx_stat_mac_4095_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ pstats->mac_stx[1].tx_stat_mac_9216_hi,
+ estats->etherstatspktsover1522octets_lo,
+ pstats->mac_stx[1].tx_stat_mac_9216_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ pstats->mac_stx[1].tx_stat_mac_16383_hi,
+ estats->etherstatspktsover1522octets_lo,
+ pstats->mac_stx[1].tx_stat_mac_16383_lo);
+
+ estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+ estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+ estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+ estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+ estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
+ estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
+ estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
+ estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
+}
+
+static void
+bnx2x_emac_stats_update(struct bnx2x_softc *sc)
+{
+ struct emac_stats *new = BNX2X_SP(sc, mac_stats.emac_stats);
+ struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
+ struct bnx2x_eth_stats *estats = &sc->eth_stats;
+
+ UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
+ UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
+ UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
+ UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
+ UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
+ UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
+ UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
+ UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
+ UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
+ UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
+ UPDATE_EXTEND_STAT(tx_stat_outxonsent);
+ UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
+ UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
+
+ estats->pause_frames_received_hi =
+ pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
+ estats->pause_frames_received_lo =
+ pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
+ ADD_64(estats->pause_frames_received_hi,
+ pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
+ estats->pause_frames_received_lo,
+ pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
+
+ estats->pause_frames_sent_hi =
+ pstats->mac_stx[1].tx_stat_outxonsent_hi;
+ estats->pause_frames_sent_lo =
+ pstats->mac_stx[1].tx_stat_outxonsent_lo;
+ ADD_64(estats->pause_frames_sent_hi,
+ pstats->mac_stx[1].tx_stat_outxoffsent_hi,
+ estats->pause_frames_sent_lo,
+ pstats->mac_stx[1].tx_stat_outxoffsent_lo);
+}
+
+static int
+bnx2x_hw_stats_update(struct bnx2x_softc *sc)
+{
+ struct nig_stats *new = BNX2X_SP(sc, nig_stats);
+ struct nig_stats *old = &(sc->port.old_nig_stats);
+ struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
+ struct bnx2x_eth_stats *estats = &sc->eth_stats;
+ uint32_t lpi_reg, nig_timer_max;
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ } diff;
+
+ switch (sc->link_vars.mac_type) {
+ case ELINK_MAC_TYPE_BMAC:
+ bnx2x_bmac_stats_update(sc);
+ break;
+
+ case ELINK_MAC_TYPE_EMAC:
+ bnx2x_emac_stats_update(sc);
+ break;
+
+ case ELINK_MAC_TYPE_UMAC:
+ case ELINK_MAC_TYPE_XMAC:
+ bnx2x_mstat_stats_update(sc);
+ break;
+
+ case ELINK_MAC_TYPE_NONE: /* unreached */
+ PMD_DRV_LOG(DEBUG,
+ "stats updated by DMAE but no MAC active");
+ return -1;
+
+ default: /* unreached */
+ PMD_DRV_LOG(ERR, "stats update failed, unknown MAC type");
+ }
+
+ ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
+ new->brb_discard - old->brb_discard);
+ ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
+ new->brb_truncate - old->brb_truncate);
+
+ if (!CHIP_IS_E3(sc)) {
+ UPDATE_STAT64_NIG(egress_mac_pkt0,
+ etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64_NIG(egress_mac_pkt1,
+ etherstatspktsover1522octets);
+ }
+
+ rte_memcpy(old, new, sizeof(struct nig_stats));
+
+ rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+ sizeof(struct mac_stx));
+ estats->brb_drop_hi = pstats->brb_drop_hi;
+ estats->brb_drop_lo = pstats->brb_drop_lo;
+
+ pstats->host_port_stats_counter++;
+
+ if (CHIP_IS_E3(sc)) {
+ lpi_reg = (SC_PORT(sc)) ?
+ MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
+ MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
+ estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
+ }
+
+ if (!BNX2X_NOMCP(sc)) {
+ nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
+ if (nig_timer_max != estats->nig_timer_max) {
+ estats->nig_timer_max = nig_timer_max;
+ PMD_DRV_LOG(ERR, "invalid NIG timer max (%u)",
+ estats->nig_timer_max);
+ }
+ }
+
+ return 0;
+}
+
+static int
+bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
+{
+ struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
+ uint16_t cur_stats_counter;
+
+ /*
+ * Make sure we use the value of the counter
+ * used for sending the last stats ramrod.
+ */
+ cur_stats_counter = (sc->stats_counter - 1);
+
+ /* are storm stats valid? */
+ if (le16toh(counters->xstats_counter) != cur_stats_counter) {
+ PMD_DRV_LOG(DEBUG,
+ "stats not updated by xstorm, "
+ "counter 0x%x != stats_counter 0x%x",
+ le16toh(counters->xstats_counter), sc->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16toh(counters->ustats_counter) != cur_stats_counter) {
+ PMD_DRV_LOG(DEBUG,
+ "stats not updated by ustorm, "
+ "counter 0x%x != stats_counter 0x%x",
+ le16toh(counters->ustats_counter), sc->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16toh(counters->cstats_counter) != cur_stats_counter) {
+ PMD_DRV_LOG(DEBUG,
+ "stats not updated by cstorm, "
+ "counter 0x%x != stats_counter 0x%x",
+ le16toh(counters->cstats_counter), sc->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16toh(counters->tstats_counter) != cur_stats_counter) {
+ PMD_DRV_LOG(DEBUG,
+ "stats not updated by tstorm, "
+ "counter 0x%x != stats_counter 0x%x",
+ le16toh(counters->tstats_counter), sc->stats_counter);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int
+bnx2x_storm_stats_update(struct bnx2x_softc *sc)
+{
+ struct tstorm_per_port_stats *tport =
+ &sc->fw_stats_data->port.tstorm_port_statistics;
+ struct tstorm_per_pf_stats *tfunc =
+ &sc->fw_stats_data->pf.tstorm_pf_statistics;
+ struct host_func_stats *fstats = &sc->func_stats;
+ struct bnx2x_eth_stats *estats = &sc->eth_stats;
+ struct bnx2x_eth_stats_old *estats_old = &sc->eth_stats_old;
+ int i;
+
+ /* vfs stat counter is managed by pf */
+ if (IS_PF(sc) && bnx2x_storm_stats_validate_counters(sc)) {
+ return -EAGAIN;
+ }
+
+ estats->error_bytes_received_hi = 0;
+ estats->error_bytes_received_lo = 0;
+
+ for (i = 0; i < sc->num_queues; i++) {
+ struct bnx2x_fastpath *fp = &sc->fp[i];
+ struct tstorm_per_queue_stats *tclient =
+ &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
+ struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
+ struct ustorm_per_queue_stats *uclient =
+ &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
+ struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
+ struct xstorm_per_queue_stats *xclient =
+ &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
+ struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+
+ uint32_t diff;
+
+ /* PMD_DRV_LOG(DEBUG,
+ "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x",
+ i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
+ xclient->mcast_pkts_sent);
+
+ PMD_DRV_LOG(DEBUG, "---------------"); */
+
+ UPDATE_QSTAT(tclient->rcv_bcast_bytes,
+ total_broadcast_bytes_received);
+ UPDATE_QSTAT(tclient->rcv_mcast_bytes,
+ total_multicast_bytes_received);
+ UPDATE_QSTAT(tclient->rcv_ucast_bytes,
+ total_unicast_bytes_received);
+
+ /*
+ * sum to total_bytes_received all
+ * unicast/multicast/broadcast
+ */
+ qstats->total_bytes_received_hi =
+ qstats->total_broadcast_bytes_received_hi;
+ qstats->total_bytes_received_lo =
+ qstats->total_broadcast_bytes_received_lo;
+
+ ADD_64(qstats->total_bytes_received_hi,
+ qstats->total_multicast_bytes_received_hi,
+ qstats->total_bytes_received_lo,
+ qstats->total_multicast_bytes_received_lo);
+
+ ADD_64(qstats->total_bytes_received_hi,
+ qstats->total_unicast_bytes_received_hi,
+ qstats->total_bytes_received_lo,
+ qstats->total_unicast_bytes_received_lo);
+
+ qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
+ qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
+
+ UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
+ UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
+ UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
+ UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
+ etherstatsoverrsizepkts, 32);
+ UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
+
+ SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
+ SUB_EXTEND_USTAT(mcast_no_buff_pkts,
+ total_multicast_packets_received);
+ SUB_EXTEND_USTAT(bcast_no_buff_pkts,
+ total_broadcast_packets_received);
+ UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
+ UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
+ UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
+
+ UPDATE_QSTAT(xclient->bcast_bytes_sent,
+ total_broadcast_bytes_transmitted);
+ UPDATE_QSTAT(xclient->mcast_bytes_sent,
+ total_multicast_bytes_transmitted);
+ UPDATE_QSTAT(xclient->ucast_bytes_sent,
+ total_unicast_bytes_transmitted);
+
+ /*
+ * sum to total_bytes_transmitted all
+ * unicast/multicast/broadcast
+ */
+ qstats->total_bytes_transmitted_hi =
+ qstats->total_unicast_bytes_transmitted_hi;
+ qstats->total_bytes_transmitted_lo =
+ qstats->total_unicast_bytes_transmitted_lo;
+
+ ADD_64(qstats->total_bytes_transmitted_hi,
+ qstats->total_broadcast_bytes_transmitted_hi,
+ qstats->total_bytes_transmitted_lo,
+ qstats->total_broadcast_bytes_transmitted_lo);
+
+ ADD_64(qstats->total_bytes_transmitted_hi,
+ qstats->total_multicast_bytes_transmitted_hi,
+ qstats->total_bytes_transmitted_lo,
+ qstats->total_multicast_bytes_transmitted_lo);
+
+ UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
+ total_unicast_packets_transmitted);
+ UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
+ total_multicast_packets_transmitted);
+ UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
+ total_broadcast_packets_transmitted);
+
+ UPDATE_EXTEND_TSTAT(checksum_discard,
+ total_packets_received_checksum_discarded);
+ UPDATE_EXTEND_TSTAT(ttl0_discard,
+ total_packets_received_ttl0_discarded);
+
+ UPDATE_EXTEND_XSTAT(error_drop_pkts,
+ total_transmitted_dropped_packets_error);
+
+ UPDATE_FSTAT_QSTAT(total_bytes_received);
+ UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
+ UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
+ UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
+ UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
+ UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
+ UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
+ UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
+ UPDATE_FSTAT_QSTAT(valid_bytes_received);
+ }
+
+ ADD_64(estats->total_bytes_received_hi,
+ estats->rx_stat_ifhcinbadoctets_hi,
+ estats->total_bytes_received_lo,
+ estats->rx_stat_ifhcinbadoctets_lo);
+
+ ADD_64_LE(estats->total_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->total_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
+
+ ADD_64_LE(estats->error_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->error_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
+
+ UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
+
+ ADD_64(estats->error_bytes_received_hi,
+ estats->rx_stat_ifhcinbadoctets_hi,
+ estats->error_bytes_received_lo,
+ estats->rx_stat_ifhcinbadoctets_lo);
+
+ if (sc->port.pmf) {
+ struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
+ UPDATE_FW_STAT(mac_filter_discard);
+ UPDATE_FW_STAT(mf_tag_discard);
+ UPDATE_FW_STAT(brb_truncate_discard);
+ UPDATE_FW_STAT(mac_discard);
+ }
+
+ fstats->host_func_stats_start = ++fstats->host_func_stats_end;
+
+ sc->stats_pending = 0;
+
+ return 0;
+}
+
+static void
+bnx2x_drv_stats_update(struct bnx2x_softc *sc)
+{
+ struct bnx2x_eth_stats *estats = &sc->eth_stats;
+ int i;
+
+ for (i = 0; i < sc->num_queues; i++) {
+ struct bnx2x_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
+
+ UPDATE_ESTAT_QSTAT(rx_calls);
+ UPDATE_ESTAT_QSTAT(rx_pkts);
+ UPDATE_ESTAT_QSTAT(rx_soft_errors);
+ UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
+ UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
+ UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
+ UPDATE_ESTAT_QSTAT(rx_budget_reached);
+ UPDATE_ESTAT_QSTAT(tx_pkts);
+ UPDATE_ESTAT_QSTAT(tx_soft_errors);
+ UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
+ UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
+ UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
+ UPDATE_ESTAT_QSTAT(tx_encap_failures);
+ UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
+ UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
+ UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
+ UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
+ UPDATE_ESTAT_QSTAT(tx_window_violation_std);
+ UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
+ UPDATE_ESTAT_QSTAT(tx_frames_deferred);
+ UPDATE_ESTAT_QSTAT(tx_queue_xoff);
+
+ /* mbuf driver statistics */
+ UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
+ UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
+ UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
+ UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
+
+ /* track the number of allocated mbufs */
+ UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
+ UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
+ }
+}
+
+static uint8_t
+bnx2x_edebug_stats_stopped(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+
+ if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
+ val = SHMEM2_RD(sc, edebug_driver_if[1]);
+
+ if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+static void
+bnx2x_stats_update(struct bnx2x_softc *sc)
+{
+ uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
+
+ if (bnx2x_edebug_stats_stopped(sc)) {
+ return;
+ }
+
+ if (IS_PF(sc)) {
+
+ bnx2x_storm_stats_update(sc);
+ bnx2x_hw_stats_post(sc);
+ bnx2x_storm_stats_post(sc);
+ DELAY_MS(5);
+
+ if (*stats_comp != DMAE_COMP_VAL) {
+ return;
+ }
+
+ if (sc->port.pmf) {
+ bnx2x_hw_stats_update(sc);
+ }
+
+ if (bnx2x_storm_stats_update(sc)) {
+ if (sc->stats_pending++ == 3) {
+ rte_panic("storm stats not updated for 3 times");
+ }
+ return;
+ }
+ } else {
+ /*
+ * VF doesn't collect HW statistics, and doesn't get completions,
+ * performs only update.
+ */
+ bnx2x_storm_stats_update(sc);
+ }
+
+ bnx2x_drv_stats_update(sc);
+}
+
+static void
+bnx2x_port_stats_stop(struct bnx2x_softc *sc)
+{
+ struct dmae_command *dmae;
+ uint32_t opcode;
+ int loader_idx = PMF_DMAE_C(sc);
+ uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
+
+ sc->executer_idx = 0;
+
+ opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
+
+ if (sc->port.port_stx) {
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+
+ if (sc->func_stx) {
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
+ } else {
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+ }
+
+ dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
+ dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
+ dmae->dst_addr_lo = sc->port.port_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = bnx2x_get_port_stats_dma_len(sc);
+ if (sc->func_stx) {
+ dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ } else {
+ dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
+ dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ }
+ }
+
+ if (sc->func_stx) {
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+ dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
+ dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
+ dmae->dst_addr_lo = (sc->func_stx >> 2);
+ dmae->dst_addr_hi = 0;
+ dmae->len = (sizeof(struct host_func_stats) >> 2);
+ dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
+ dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ }
+}
+
+static void
+bnx2x_stats_stop(struct bnx2x_softc *sc)
+{
+ uint8_t update = FALSE;
+
+ bnx2x_stats_comp(sc);
+
+ if (sc->port.pmf) {
+ update = bnx2x_hw_stats_update(sc) == 0;
+ }
+
+ update |= bnx2x_storm_stats_update(sc) == 0;
+
+ if (update) {
+
+ if (sc->port.pmf) {
+ bnx2x_port_stats_stop(sc);
+ }
+
+ bnx2x_hw_stats_post(sc);
+ bnx2x_stats_comp(sc);
+ }
+}
+
+static void
+bnx2x_stats_do_nothing(__rte_unused struct bnx2x_softc *sc)
+{
+ return;
+}
+
+static const struct {
+ void (*action)(struct bnx2x_softc *sc);
+ enum bnx2x_stats_state next_state;
+} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
+ {
+ /* DISABLED PMF */ { bnx2x_stats_pmf_update, STATS_STATE_DISABLED },
+ /* LINK_UP */ { bnx2x_stats_start, STATS_STATE_ENABLED },
+ /* UPDATE */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED },
+ /* STOP */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED }
+ },
+ {
+ /* ENABLED PMF */ { bnx2x_stats_pmf_start, STATS_STATE_ENABLED },
+ /* LINK_UP */ { bnx2x_stats_restart, STATS_STATE_ENABLED },
+ /* UPDATE */ { bnx2x_stats_update, STATS_STATE_ENABLED },
+ /* STOP */ { bnx2x_stats_stop, STATS_STATE_DISABLED }
+ }
+};
+
+void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event)
+{
+ enum bnx2x_stats_state state;
+
+ if (unlikely(sc->panic)) {
+ return;
+ }
+
+ state = sc->stats_state;
+ sc->stats_state = bnx2x_stats_stm[state][event].next_state;
+
+ bnx2x_stats_stm[state][event].action(sc);
+
+ if (event != STATS_EVENT_UPDATE) {
+ PMD_DRV_LOG(DEBUG,
+ "state %d -> event %d -> state %d",
+ state, event, sc->stats_state);
+ }
+}
+
+static void
+bnx2x_port_stats_base_init(struct bnx2x_softc *sc)
+{
+ struct dmae_command *dmae;
+ uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
+
+ /* sanity */
+ if (!sc->port.pmf || !sc->port.port_stx) {
+ PMD_DRV_LOG(ERR, "BUG!");
+ return;
+ }
+
+ sc->executer_idx = 0;
+
+ dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
+ TRUE, DMAE_COMP_PCI);
+ dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
+ dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
+ dmae->dst_addr_lo = (sc->port.port_stx >> 2);
+ dmae->dst_addr_hi = 0;
+ dmae->len = bnx2x_get_port_stats_dma_len(sc);
+ dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
+ dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ bnx2x_hw_stats_post(sc);
+ bnx2x_stats_comp(sc);
+}
+
+/*
+ * This function will prepare the statistics ramrod data the way
+ * we will only have to increment the statistics counter and
+ * send the ramrod each time we have to.
+ */
+static void
+bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
+{
+ int i;
+ int first_queue_query_index;
+ struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
+ rte_iova_t cur_data_offset;
+ struct stats_query_entry *cur_query_entry;
+
+ stats_hdr->cmd_num = sc->fw_stats_num;
+ stats_hdr->drv_stats_counter = 0;
+
+ /*
+ * The storm_counters struct contains the counters of completed
+ * statistics requests per storm which are incremented by FW
+ * each time it completes hadning a statistics ramrod. We will
+ * check these counters in the timer handler and discard a
+ * (statistics) ramrod completion.
+ */
+ cur_data_offset = (sc->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, storm_counters));
+
+ stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
+ stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
+
+ /*
+ * Prepare the first stats ramrod (will be completed with
+ * the counters equal to zero) - init counters to somethig different.
+ */
+ memset(&sc->fw_stats_data->storm_counters, 0xff,
+ sizeof(struct stats_counter));
+
+ /**** Port FW statistics data ****/
+ cur_data_offset = (sc->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, port));
+
+ cur_query_entry = &sc->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_PORT;
+ /* For port query index is a DON'T CARE */
+ cur_query_entry->index = SC_PORT(sc);
+ /* For port query funcID is a DON'T CARE */
+ cur_query_entry->funcID = htole16(SC_FUNC(sc));
+ cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
+
+ /**** PF FW statistics data ****/
+ cur_data_offset = (sc->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, pf));
+
+ cur_query_entry = &sc->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_PF;
+ /* For PF query index is a DON'T CARE */
+ cur_query_entry->index = SC_PORT(sc);
+ cur_query_entry->funcID = htole16(SC_FUNC(sc));
+ cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
+
+ /**** Clients' queries ****/
+ cur_data_offset = (sc->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats));
+
+ /*
+ * First queue query index depends whether FCoE offloaded request will
+ * be included in the ramrod
+ */
+ first_queue_query_index = (BNX2X_FIRST_QUEUE_QUERY_IDX - 1);
+
+ for (i = 0; i < sc->num_queues; i++) {
+ cur_query_entry =
+ &sc->fw_stats_req->query[first_queue_query_index + i];
+
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = bnx2x_stats_id(&sc->fp[i]);
+ cur_query_entry->funcID = htole16(SC_FUNC(sc));
+ cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
+
+ cur_data_offset += sizeof(struct per_queue_stats);
+ }
+}
+
+void bnx2x_memset_stats(struct bnx2x_softc *sc)
+{
+ int i;
+
+ /* function stats */
+ for (i = 0; i < sc->num_queues; i++) {
+ struct bnx2x_fastpath *fp = &sc->fp[i];
+
+ memset(&fp->old_tclient, 0,
+ sizeof(fp->old_tclient));
+ memset(&fp->old_uclient, 0,
+ sizeof(fp->old_uclient));
+ memset(&fp->old_xclient, 0,
+ sizeof(fp->old_xclient));
+ if (sc->stats_init) {
+ memset(&fp->eth_q_stats, 0,
+ sizeof(fp->eth_q_stats));
+ memset(&fp->eth_q_stats_old, 0,
+ sizeof(fp->eth_q_stats_old));
+ }
+ }
+
+ if (sc->stats_init) {
+ memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
+ memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
+ memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
+ memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
+ memset(&sc->func_stats, 0, sizeof(sc->func_stats));
+ }
+
+ sc->stats_state = STATS_STATE_DISABLED;
+
+ if (sc->port.pmf && sc->port.port_stx)
+ bnx2x_port_stats_base_init(sc);
+
+ /* mark the end of statistics initialization */
+ sc->stats_init = false;
+}
+
+void
+bnx2x_stats_init(struct bnx2x_softc *sc)
+{
+ int /*abs*/port = SC_PORT(sc);
+ int mb_idx = SC_FW_MB_IDX(sc);
+ int i;
+
+ sc->stats_pending = 0;
+ sc->executer_idx = 0;
+ sc->stats_counter = 0;
+
+ sc->stats_init = TRUE;
+
+ /* port and func stats for management */
+ if (!BNX2X_NOMCP(sc)) {
+ sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
+ sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
+ } else {
+ sc->port.port_stx = 0;
+ sc->func_stx = 0;
+ }
+
+ PMD_DRV_LOG(DEBUG, "port_stx 0x%x func_stx 0x%x",
+ sc->port.port_stx, sc->func_stx);
+
+ /* pmf should retrieve port statistics from SP on a non-init*/
+ if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) {
+ bnx2x_stats_handle(sc, STATS_EVENT_PMF);
+ }
+
+ port = SC_PORT(sc);
+ /* port stats */
+ memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
+ sc->port.old_nig_stats.brb_discard =
+ REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
+ sc->port.old_nig_stats.brb_truncate =
+ REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
+ if (!CHIP_IS_E3(sc)) {
+ REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
+ &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+ REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
+ &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+ }
+
+ /* function stats */
+ for (i = 0; i < sc->num_queues; i++) {
+ memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
+ memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
+ memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
+ if (sc->stats_init) {
+ memset(&sc->fp[i].eth_q_stats, 0,
+ sizeof(sc->fp[i].eth_q_stats));
+ memset(&sc->fp[i].eth_q_stats_old, 0,
+ sizeof(sc->fp[i].eth_q_stats_old));
+ }
+ }
+
+ /* prepare statistics ramrod data */
+ bnx2x_prep_fw_stats_req(sc);
+
+ if (sc->stats_init) {
+ memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
+ memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
+ memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
+ memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
+ memset(&sc->func_stats, 0, sizeof(sc->func_stats));
+
+ /* Clean SP from previous statistics */
+ if (sc->func_stx) {
+ memset(BNX2X_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
+ bnx2x_func_stats_init(sc);
+ bnx2x_hw_stats_post(sc);
+ bnx2x_stats_comp(sc);
+ }
+ }
+
+ sc->stats_state = STATS_STATE_DISABLED;
+
+ if (sc->port.pmf && sc->port.port_stx) {
+ bnx2x_port_stats_base_init(sc);
+ }
+
+ /* mark the end of statistics initialization */
+ sc->stats_init = FALSE;
+}
+
+void
+bnx2x_save_statistics(struct bnx2x_softc *sc)
+{
+ int i;
+
+ /* save queue statistics */
+ for (i = 0; i < sc->num_queues; i++) {
+ struct bnx2x_fastpath *fp = &sc->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+
+ UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
+ UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
+ UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
+ UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
+ }
+
+ /* store port firmware statistics */
+ if (sc->port.pmf) {
+ struct bnx2x_eth_stats *estats = &sc->eth_stats;
+ struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
+ struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
+
+ fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
+ fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
+ fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
+ fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
+
+ if (IS_MF(sc)) {
+ UPDATE_FW_STAT_OLD(mac_filter_discard);
+ UPDATE_FW_STAT_OLD(mf_tag_discard);
+ UPDATE_FW_STAT_OLD(brb_truncate_discard);
+ UPDATE_FW_STAT_OLD(mac_discard);
+ }
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.h
new file mode 100644
index 00000000..635412bd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.h
@@ -0,0 +1,609 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef BNX2X_STATS_H
+#define BNX2X_STATS_H
+
+#include <sys/types.h>
+
+struct nig_stats {
+ uint32_t brb_discard;
+ uint32_t brb_packet;
+ uint32_t brb_truncate;
+ uint32_t flow_ctrl_discard;
+ uint32_t flow_ctrl_octets;
+ uint32_t flow_ctrl_packet;
+ uint32_t mng_discard;
+ uint32_t mng_octet_inp;
+ uint32_t mng_octet_out;
+ uint32_t mng_packet_inp;
+ uint32_t mng_packet_out;
+ uint32_t pbf_octets;
+ uint32_t pbf_packet;
+ uint32_t safc_inp;
+ uint32_t egress_mac_pkt0_lo;
+ uint32_t egress_mac_pkt0_hi;
+ uint32_t egress_mac_pkt1_lo;
+ uint32_t egress_mac_pkt1_hi;
+};
+
+
+enum bnx2x_stats_event {
+ STATS_EVENT_PMF = 0,
+ STATS_EVENT_LINK_UP,
+ STATS_EVENT_UPDATE,
+ STATS_EVENT_STOP,
+ STATS_EVENT_MAX
+};
+
+enum bnx2x_stats_state {
+ STATS_STATE_DISABLED = 0,
+ STATS_STATE_ENABLED,
+ STATS_STATE_MAX
+};
+
+struct bnx2x_eth_stats {
+ uint32_t total_bytes_received_hi;
+ uint32_t total_bytes_received_lo;
+ uint32_t total_bytes_transmitted_hi;
+ uint32_t total_bytes_transmitted_lo;
+ uint32_t total_unicast_packets_received_hi;
+ uint32_t total_unicast_packets_received_lo;
+ uint32_t total_multicast_packets_received_hi;
+ uint32_t total_multicast_packets_received_lo;
+ uint32_t total_broadcast_packets_received_hi;
+ uint32_t total_broadcast_packets_received_lo;
+ uint32_t total_unicast_packets_transmitted_hi;
+ uint32_t total_unicast_packets_transmitted_lo;
+ uint32_t total_multicast_packets_transmitted_hi;
+ uint32_t total_multicast_packets_transmitted_lo;
+ uint32_t total_broadcast_packets_transmitted_hi;
+ uint32_t total_broadcast_packets_transmitted_lo;
+ uint32_t valid_bytes_received_hi;
+ uint32_t valid_bytes_received_lo;
+
+ uint32_t error_bytes_received_hi;
+ uint32_t error_bytes_received_lo;
+ uint32_t etherstatsoverrsizepkts_hi;
+ uint32_t etherstatsoverrsizepkts_lo;
+ uint32_t no_buff_discard_hi;
+ uint32_t no_buff_discard_lo;
+
+ uint32_t rx_stat_ifhcinbadoctets_hi;
+ uint32_t rx_stat_ifhcinbadoctets_lo;
+ uint32_t tx_stat_ifhcoutbadoctets_hi;
+ uint32_t tx_stat_ifhcoutbadoctets_lo;
+ uint32_t rx_stat_dot3statsfcserrors_hi;
+ uint32_t rx_stat_dot3statsfcserrors_lo;
+ uint32_t rx_stat_dot3statsalignmenterrors_hi;
+ uint32_t rx_stat_dot3statsalignmenterrors_lo;
+ uint32_t rx_stat_dot3statscarriersenseerrors_hi;
+ uint32_t rx_stat_dot3statscarriersenseerrors_lo;
+ uint32_t rx_stat_falsecarriererrors_hi;
+ uint32_t rx_stat_falsecarriererrors_lo;
+ uint32_t rx_stat_etherstatsundersizepkts_hi;
+ uint32_t rx_stat_etherstatsundersizepkts_lo;
+ uint32_t rx_stat_dot3statsframestoolong_hi;
+ uint32_t rx_stat_dot3statsframestoolong_lo;
+ uint32_t rx_stat_etherstatsfragments_hi;
+ uint32_t rx_stat_etherstatsfragments_lo;
+ uint32_t rx_stat_etherstatsjabbers_hi;
+ uint32_t rx_stat_etherstatsjabbers_lo;
+ uint32_t rx_stat_maccontrolframesreceived_hi;
+ uint32_t rx_stat_maccontrolframesreceived_lo;
+ uint32_t rx_stat_bmac_xpf_hi;
+ uint32_t rx_stat_bmac_xpf_lo;
+ uint32_t rx_stat_bmac_xcf_hi;
+ uint32_t rx_stat_bmac_xcf_lo;
+ uint32_t rx_stat_xoffstateentered_hi;
+ uint32_t rx_stat_xoffstateentered_lo;
+ uint32_t rx_stat_xonpauseframesreceived_hi;
+ uint32_t rx_stat_xonpauseframesreceived_lo;
+ uint32_t rx_stat_xoffpauseframesreceived_hi;
+ uint32_t rx_stat_xoffpauseframesreceived_lo;
+ uint32_t tx_stat_outxonsent_hi;
+ uint32_t tx_stat_outxonsent_lo;
+ uint32_t tx_stat_outxoffsent_hi;
+ uint32_t tx_stat_outxoffsent_lo;
+ uint32_t tx_stat_flowcontroldone_hi;
+ uint32_t tx_stat_flowcontroldone_lo;
+ uint32_t tx_stat_etherstatscollisions_hi;
+ uint32_t tx_stat_etherstatscollisions_lo;
+ uint32_t tx_stat_dot3statssinglecollisionframes_hi;
+ uint32_t tx_stat_dot3statssinglecollisionframes_lo;
+ uint32_t tx_stat_dot3statsmultiplecollisionframes_hi;
+ uint32_t tx_stat_dot3statsmultiplecollisionframes_lo;
+ uint32_t tx_stat_dot3statsdeferredtransmissions_hi;
+ uint32_t tx_stat_dot3statsdeferredtransmissions_lo;
+ uint32_t tx_stat_dot3statsexcessivecollisions_hi;
+ uint32_t tx_stat_dot3statsexcessivecollisions_lo;
+ uint32_t tx_stat_dot3statslatecollisions_hi;
+ uint32_t tx_stat_dot3statslatecollisions_lo;
+ uint32_t tx_stat_etherstatspkts64octets_hi;
+ uint32_t tx_stat_etherstatspkts64octets_lo;
+ uint32_t tx_stat_etherstatspkts65octetsto127octets_hi;
+ uint32_t tx_stat_etherstatspkts65octetsto127octets_lo;
+ uint32_t tx_stat_etherstatspkts128octetsto255octets_hi;
+ uint32_t tx_stat_etherstatspkts128octetsto255octets_lo;
+ uint32_t tx_stat_etherstatspkts256octetsto511octets_hi;
+ uint32_t tx_stat_etherstatspkts256octetsto511octets_lo;
+ uint32_t tx_stat_etherstatspkts512octetsto1023octets_hi;
+ uint32_t tx_stat_etherstatspkts512octetsto1023octets_lo;
+ uint32_t tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ uint32_t tx_stat_etherstatspkts1024octetsto1522octets_lo;
+ uint32_t tx_stat_etherstatspktsover1522octets_hi;
+ uint32_t tx_stat_etherstatspktsover1522octets_lo;
+ uint32_t tx_stat_bmac_2047_hi;
+ uint32_t tx_stat_bmac_2047_lo;
+ uint32_t tx_stat_bmac_4095_hi;
+ uint32_t tx_stat_bmac_4095_lo;
+ uint32_t tx_stat_bmac_9216_hi;
+ uint32_t tx_stat_bmac_9216_lo;
+ uint32_t tx_stat_bmac_16383_hi;
+ uint32_t tx_stat_bmac_16383_lo;
+ uint32_t tx_stat_dot3statsinternalmactransmiterrors_hi;
+ uint32_t tx_stat_dot3statsinternalmactransmiterrors_lo;
+ uint32_t tx_stat_bmac_ufl_hi;
+ uint32_t tx_stat_bmac_ufl_lo;
+
+ uint32_t pause_frames_received_hi;
+ uint32_t pause_frames_received_lo;
+ uint32_t pause_frames_sent_hi;
+ uint32_t pause_frames_sent_lo;
+
+ uint32_t etherstatspkts1024octetsto1522octets_hi;
+ uint32_t etherstatspkts1024octetsto1522octets_lo;
+ uint32_t etherstatspktsover1522octets_hi;
+ uint32_t etherstatspktsover1522octets_lo;
+
+ uint32_t brb_drop_hi;
+ uint32_t brb_drop_lo;
+ uint32_t brb_truncate_hi;
+ uint32_t brb_truncate_lo;
+
+ uint32_t mac_filter_discard;
+ uint32_t mf_tag_discard;
+ uint32_t brb_truncate_discard;
+ uint32_t mac_discard;
+
+ uint32_t nig_timer_max;
+
+ /* PFC */
+ uint32_t pfc_frames_received_hi;
+ uint32_t pfc_frames_received_lo;
+ uint32_t pfc_frames_sent_hi;
+ uint32_t pfc_frames_sent_lo;
+
+ /* Recovery */
+ uint32_t recoverable_error;
+ uint32_t unrecoverable_error;
+
+ /* src: Clear-on-Read register; Will not survive PMF Migration */
+ uint32_t eee_tx_lpi;
+
+ /* receive path driver statistics */
+ uint32_t rx_calls;
+ uint32_t rx_pkts;
+ uint32_t rx_soft_errors;
+ uint32_t rx_hw_csum_errors;
+ uint32_t rx_ofld_frames_csum_ip;
+ uint32_t rx_ofld_frames_csum_tcp_udp;
+ uint32_t rx_budget_reached;
+
+ /* tx path driver statistics */
+ uint32_t tx_pkts;
+ uint32_t tx_soft_errors;
+ uint32_t tx_ofld_frames_csum_ip;
+ uint32_t tx_ofld_frames_csum_tcp;
+ uint32_t tx_ofld_frames_csum_udp;
+ uint32_t tx_encap_failures;
+ uint32_t tx_hw_queue_full;
+ uint32_t tx_hw_max_queue_depth;
+ uint32_t tx_dma_mapping_failure;
+ uint32_t tx_max_drbr_queue_depth;
+ uint32_t tx_window_violation_std;
+ uint32_t tx_chain_lost_mbuf;
+ uint32_t tx_frames_deferred;
+ uint32_t tx_queue_xoff;
+
+ /* mbuf driver statistics */
+ uint32_t mbuf_defrag_attempts;
+ uint32_t mbuf_defrag_failures;
+ uint32_t mbuf_rx_bd_alloc_failed;
+ uint32_t mbuf_rx_bd_mapping_failed;
+
+ /* track the number of allocated mbufs */
+ uint32_t mbuf_alloc_tx;
+ uint32_t mbuf_alloc_rx;
+};
+
+
+struct bnx2x_eth_q_stats {
+ uint32_t total_unicast_bytes_received_hi;
+ uint32_t total_unicast_bytes_received_lo;
+ uint32_t total_broadcast_bytes_received_hi;
+ uint32_t total_broadcast_bytes_received_lo;
+ uint32_t total_multicast_bytes_received_hi;
+ uint32_t total_multicast_bytes_received_lo;
+ uint32_t total_bytes_received_hi;
+ uint32_t total_bytes_received_lo;
+ uint32_t total_unicast_bytes_transmitted_hi;
+ uint32_t total_unicast_bytes_transmitted_lo;
+ uint32_t total_broadcast_bytes_transmitted_hi;
+ uint32_t total_broadcast_bytes_transmitted_lo;
+ uint32_t total_multicast_bytes_transmitted_hi;
+ uint32_t total_multicast_bytes_transmitted_lo;
+ uint32_t total_bytes_transmitted_hi;
+ uint32_t total_bytes_transmitted_lo;
+ uint32_t total_unicast_packets_received_hi;
+ uint32_t total_unicast_packets_received_lo;
+ uint32_t total_multicast_packets_received_hi;
+ uint32_t total_multicast_packets_received_lo;
+ uint32_t total_broadcast_packets_received_hi;
+ uint32_t total_broadcast_packets_received_lo;
+ uint32_t total_unicast_packets_transmitted_hi;
+ uint32_t total_unicast_packets_transmitted_lo;
+ uint32_t total_multicast_packets_transmitted_hi;
+ uint32_t total_multicast_packets_transmitted_lo;
+ uint32_t total_broadcast_packets_transmitted_hi;
+ uint32_t total_broadcast_packets_transmitted_lo;
+ uint32_t valid_bytes_received_hi;
+ uint32_t valid_bytes_received_lo;
+
+ uint32_t etherstatsoverrsizepkts_hi;
+ uint32_t etherstatsoverrsizepkts_lo;
+ uint32_t no_buff_discard_hi;
+ uint32_t no_buff_discard_lo;
+
+ uint32_t total_packets_received_checksum_discarded_hi;
+ uint32_t total_packets_received_checksum_discarded_lo;
+ uint32_t total_packets_received_ttl0_discarded_hi;
+ uint32_t total_packets_received_ttl0_discarded_lo;
+ uint32_t total_transmitted_dropped_packets_error_hi;
+ uint32_t total_transmitted_dropped_packets_error_lo;
+
+ /* receive path driver statistics */
+ uint32_t rx_calls;
+ uint32_t rx_pkts;
+ uint32_t rx_soft_errors;
+ uint32_t rx_hw_csum_errors;
+ uint32_t rx_ofld_frames_csum_ip;
+ uint32_t rx_ofld_frames_csum_tcp_udp;
+ uint32_t rx_budget_reached;
+
+ /* tx path driver statistics */
+ uint32_t tx_pkts;
+ uint32_t tx_soft_errors;
+ uint32_t tx_ofld_frames_csum_ip;
+ uint32_t tx_ofld_frames_csum_tcp;
+ uint32_t tx_ofld_frames_csum_udp;
+ uint32_t tx_encap_failures;
+ uint32_t tx_hw_queue_full;
+ uint32_t tx_hw_max_queue_depth;
+ uint32_t tx_dma_mapping_failure;
+ uint32_t tx_max_drbr_queue_depth;
+ uint32_t tx_window_violation_std;
+ uint32_t tx_chain_lost_mbuf;
+ uint32_t tx_frames_deferred;
+ uint32_t tx_queue_xoff;
+
+ /* mbuf driver statistics */
+ uint32_t mbuf_defrag_attempts;
+ uint32_t mbuf_defrag_failures;
+ uint32_t mbuf_rx_bd_alloc_failed;
+ uint32_t mbuf_rx_bd_mapping_failed;
+
+ /* track the number of allocated mbufs */
+ uint32_t mbuf_alloc_tx;
+ uint32_t mbuf_alloc_rx;
+};
+
+struct bnx2x_eth_stats_old {
+ uint32_t rx_stat_dot3statsframestoolong_hi;
+ uint32_t rx_stat_dot3statsframestoolong_lo;
+};
+
+struct bnx2x_eth_q_stats_old {
+ /* Fields to perserve over fw reset*/
+ uint32_t total_unicast_bytes_received_hi;
+ uint32_t total_unicast_bytes_received_lo;
+ uint32_t total_broadcast_bytes_received_hi;
+ uint32_t total_broadcast_bytes_received_lo;
+ uint32_t total_multicast_bytes_received_hi;
+ uint32_t total_multicast_bytes_received_lo;
+ uint32_t total_unicast_bytes_transmitted_hi;
+ uint32_t total_unicast_bytes_transmitted_lo;
+ uint32_t total_broadcast_bytes_transmitted_hi;
+ uint32_t total_broadcast_bytes_transmitted_lo;
+ uint32_t total_multicast_bytes_transmitted_hi;
+ uint32_t total_multicast_bytes_transmitted_lo;
+
+ /* Fields to perserve last of */
+ uint32_t total_bytes_received_hi;
+ uint32_t total_bytes_received_lo;
+ uint32_t total_bytes_transmitted_hi;
+ uint32_t total_bytes_transmitted_lo;
+ uint32_t total_unicast_packets_received_hi;
+ uint32_t total_unicast_packets_received_lo;
+ uint32_t total_multicast_packets_received_hi;
+ uint32_t total_multicast_packets_received_lo;
+ uint32_t total_broadcast_packets_received_hi;
+ uint32_t total_broadcast_packets_received_lo;
+ uint32_t total_unicast_packets_transmitted_hi;
+ uint32_t total_unicast_packets_transmitted_lo;
+ uint32_t total_multicast_packets_transmitted_hi;
+ uint32_t total_multicast_packets_transmitted_lo;
+ uint32_t total_broadcast_packets_transmitted_hi;
+ uint32_t total_broadcast_packets_transmitted_lo;
+ uint32_t valid_bytes_received_hi;
+ uint32_t valid_bytes_received_lo;
+
+ /* receive path driver statistics */
+ uint32_t rx_calls_old;
+ uint32_t rx_pkts_old;
+ uint32_t rx_soft_errors_old;
+ uint32_t rx_hw_csum_errors_old;
+ uint32_t rx_ofld_frames_csum_ip_old;
+ uint32_t rx_ofld_frames_csum_tcp_udp_old;
+ uint32_t rx_budget_reached_old;
+
+ /* tx path driver statistics */
+ uint32_t tx_pkts_old;
+ uint32_t tx_soft_errors_old;
+ uint32_t tx_ofld_frames_csum_ip_old;
+ uint32_t tx_ofld_frames_csum_tcp_old;
+ uint32_t tx_ofld_frames_csum_udp_old;
+ uint32_t tx_encap_failures_old;
+ uint32_t tx_hw_queue_full_old;
+ uint32_t tx_hw_max_queue_depth_old;
+ uint32_t tx_dma_mapping_failure_old;
+ uint32_t tx_max_drbr_queue_depth_old;
+ uint32_t tx_window_violation_std_old;
+ uint32_t tx_chain_lost_mbuf_old;
+ uint32_t tx_frames_deferred_old;
+ uint32_t tx_queue_xoff_old;
+
+ /* mbuf driver statistics */
+ uint32_t mbuf_defrag_attempts_old;
+ uint32_t mbuf_defrag_failures_old;
+ uint32_t mbuf_rx_bd_alloc_failed_old;
+ uint32_t mbuf_rx_bd_mapping_failed_old;
+
+ /* track the number of allocated mbufs */
+ int mbuf_alloc_tx_old;
+ int mbuf_alloc_rx_old;
+};
+
+struct bnx2x_net_stats_old {
+ uint32_t rx_dropped;
+};
+
+struct bnx2x_fw_port_stats_old {
+ uint32_t pfc_frames_tx_hi;
+ uint32_t pfc_frames_tx_lo;
+ uint32_t pfc_frames_rx_hi;
+ uint32_t pfc_frames_rx_lo;
+
+ uint32_t mac_filter_discard;
+ uint32_t mf_tag_discard;
+ uint32_t brb_truncate_discard;
+ uint32_t mac_discard;
+};
+
+/* sum[hi:lo] += add[hi:lo] */
+#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+ do { \
+ s_lo += a_lo; \
+ s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
+ } while (0)
+
+#define LE32_0 ((uint32_t) 0)
+#define LE16_0 ((uint16_t) 0)
+
+/* The _force is for cases where high value is 0 */
+#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
+ ADD_64(s_hi, le32toh(a_hi_le), \
+ s_lo, le32toh(a_lo_le))
+
+#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
+ ADD_64(s_hi, le16toh(a_hi_le), \
+ s_lo, le16toh(a_lo_le))
+
+/* difference = minuend - subtrahend */
+#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+ do { \
+ if (m_lo < s_lo) { \
+ /* underflow */ \
+ d_hi = m_hi - s_hi; \
+ if (d_hi > 0) { \
+ /* we can 'loan' 1 */ \
+ d_hi--; \
+ d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+ } else { \
+ /* m_hi <= s_hi */ \
+ d_hi = 0; \
+ d_lo = 0; \
+ } \
+ } else { \
+ /* m_lo >= s_lo */ \
+ if (m_hi < s_hi) { \
+ d_hi = 0; \
+ d_lo = 0; \
+ } else { \
+ /* m_hi >= s_hi */ \
+ d_hi = m_hi - s_hi; \
+ d_lo = m_lo - s_lo; \
+ } \
+ } \
+ } while (0)
+
+#define UPDATE_STAT64(s, t) \
+ do { \
+ DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
+ diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
+ pstats->mac_stx[0].t##_hi = new->s##_hi; \
+ pstats->mac_stx[0].t##_lo = new->s##_lo; \
+ ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
+ pstats->mac_stx[1].t##_lo, diff.lo); \
+ } while (0)
+
+#define UPDATE_STAT64_NIG(s, t) \
+ do { \
+ DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
+ diff.lo, new->s##_lo, old->s##_lo); \
+ ADD_64(estats->t##_hi, diff.hi, \
+ estats->t##_lo, diff.lo); \
+ } while (0)
+
+/* sum[hi:lo] += add */
+#define ADD_EXTEND_64(s_hi, s_lo, a) \
+ do { \
+ s_lo += a; \
+ s_hi += (s_lo < a) ? 1 : 0; \
+ } while (0)
+
+#define ADD_STAT64(diff, t) \
+ do { \
+ ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
+ pstats->mac_stx[1].t##_lo, new->diff##_lo); \
+ } while (0)
+
+#define UPDATE_EXTEND_STAT(s) \
+ do { \
+ ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
+ pstats->mac_stx[1].s##_lo, \
+ new->s); \
+ } while (0)
+
+#define UPDATE_EXTEND_TSTAT_X(s, t, size) \
+ do { \
+ diff = le##size##toh(tclient->s) - \
+ le##size##toh(old_tclient->s); \
+ old_tclient->s = tclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32)
+
+#define UPDATE_EXTEND_E_TSTAT(s, t, size) \
+ do { \
+ UPDATE_EXTEND_TSTAT_X(s, t, size); \
+ ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_USTAT(s, t) \
+ do { \
+ diff = le32toh(uclient->s) - le32toh(old_uclient->s); \
+ old_uclient->s = uclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_E_USTAT(s, t) \
+ do { \
+ UPDATE_EXTEND_USTAT(s, t); \
+ ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_XSTAT(s, t) \
+ do { \
+ diff = le32toh(xclient->s) - le32toh(old_xclient->s); \
+ old_xclient->s = xclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_QSTAT(s, t) \
+ do { \
+ qstats->t##_hi = qstats_old->t##_hi + le32toh(s.hi); \
+ qstats->t##_lo = qstats_old->t##_lo + le32toh(s.lo); \
+ } while (0)
+
+#define UPDATE_QSTAT_OLD(f) \
+ do { \
+ qstats_old->f = qstats->f; \
+ } while (0)
+
+#define UPDATE_ESTAT_QSTAT_64(s) \
+ do { \
+ ADD_64(estats->s##_hi, qstats->s##_hi, \
+ estats->s##_lo, qstats->s##_lo); \
+ SUB_64(estats->s##_hi, qstats_old->s##_hi_old, \
+ estats->s##_lo, qstats_old->s##_lo_old); \
+ qstats_old->s##_hi_old = qstats->s##_hi; \
+ qstats_old->s##_lo_old = qstats->s##_lo; \
+ } while (0)
+
+#define UPDATE_ESTAT_QSTAT(s) \
+ do { \
+ estats->s += qstats->s; \
+ estats->s -= qstats_old->s##_old; \
+ qstats_old->s##_old = qstats->s; \
+ } while (0)
+
+#define UPDATE_FSTAT_QSTAT(s) \
+ do { \
+ ADD_64(fstats->s##_hi, qstats->s##_hi, \
+ fstats->s##_lo, qstats->s##_lo); \
+ SUB_64(fstats->s##_hi, qstats_old->s##_hi, \
+ fstats->s##_lo, qstats_old->s##_lo); \
+ estats->s##_hi = fstats->s##_hi; \
+ estats->s##_lo = fstats->s##_lo; \
+ qstats_old->s##_hi = qstats->s##_hi; \
+ qstats_old->s##_lo = qstats->s##_lo; \
+ } while (0)
+
+#define UPDATE_FW_STAT(s) \
+ do { \
+ estats->s = le32toh(tport->s) + fwstats->s; \
+ } while (0)
+
+#define UPDATE_FW_STAT_OLD(f) \
+ do { \
+ fwstats->f = estats->f; \
+ } while (0)
+
+#define UPDATE_ESTAT(s, t) \
+ do { \
+ SUB_64(estats->s##_hi, estats_old->t##_hi, \
+ estats->s##_lo, estats_old->t##_lo); \
+ ADD_64(estats->s##_hi, estats->t##_hi, \
+ estats->s##_lo, estats->t##_lo); \
+ estats_old->t##_hi = estats->t##_hi; \
+ estats_old->t##_lo = estats->t##_lo; \
+ } while (0)
+
+/* minuend -= subtrahend */
+#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+ do { \
+ DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+ } while (0)
+
+/* minuend[hi:lo] -= subtrahend */
+#define SUB_EXTEND_64(m_hi, m_lo, s) \
+ do { \
+ uint32_t s_hi = 0; \
+ SUB_64(m_hi, s_hi, m_lo, s); \
+ } while (0)
+
+#define SUB_EXTEND_USTAT(s, t) \
+ do { \
+ diff = le32toh(uclient->s) - le32toh(old_uclient->s); \
+ SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+struct bnx2x_softc;
+void bnx2x_stats_init(struct bnx2x_softc *sc);
+void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event);
+void bnx2x_save_statistics(struct bnx2x_softc *sc);
+void bnx2x_memset_stats(struct bnx2x_softc *sc);
+
+#endif /* BNX2X_STATS_H */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c
new file mode 100644
index 00000000..50099d46
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c
@@ -0,0 +1,677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bnx2x.h"
+
+/* calculate the crc in the bulletin board */
+static inline uint32_t
+bnx2x_vf_crc(struct bnx2x_vf_bulletin *bull)
+{
+ uint32_t crc_sz = sizeof(bull->crc), length = bull->length - crc_sz;
+
+ return ECORE_CRC32_LE(0, (uint8_t *)bull + crc_sz, length);
+}
+
+/* Checks are there mac/channel updates for VF
+ * returns TRUE if something was updated
+*/
+int
+bnx2x_check_bull(struct bnx2x_softc *sc)
+{
+ struct bnx2x_vf_bulletin *bull;
+ uint8_t tries = 0;
+ uint16_t old_version = sc->old_bulletin.version;
+ uint64_t valid_bitmap;
+
+ bull = sc->pf2vf_bulletin;
+ if (old_version == bull->version) {
+ return FALSE;
+ } else {
+ /* Check the crc until we get the correct data */
+ while (tries < BNX2X_VF_BULLETIN_TRIES) {
+ bull = sc->pf2vf_bulletin;
+ if (bull->crc == bnx2x_vf_crc(bull))
+ break;
+
+ PMD_DRV_LOG(ERR, "bad crc on bulletin board. contained %x computed %x",
+ bull->crc, bnx2x_vf_crc(bull));
+ ++tries;
+ }
+ if (tries == BNX2X_VF_BULLETIN_TRIES) {
+ PMD_DRV_LOG(ERR, "pf to vf bulletin board crc was wrong %d consecutive times. Aborting",
+ tries);
+ return FALSE;
+ }
+ }
+
+ valid_bitmap = bull->valid_bitmap;
+
+ /* check the mac address and VLAN and allocate memory if valid */
+ if (valid_bitmap & (1 << MAC_ADDR_VALID) && memcmp(bull->mac, sc->old_bulletin.mac, ETH_ALEN))
+ rte_memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN);
+ if (valid_bitmap & (1 << VLAN_VALID))
+ rte_memcpy(&bull->vlan, &sc->old_bulletin.vlan, VLAN_HLEN);
+
+ sc->old_bulletin = *bull;
+
+ return TRUE;
+}
+
+/* place a given tlv on the tlv buffer at a given offset */
+static void
+bnx2x_add_tlv(__rte_unused struct bnx2x_softc *sc, void *tlvs_list,
+ uint16_t offset, uint16_t type, uint16_t length)
+{
+ struct channel_tlv *tl = (struct channel_tlv *)
+ ((unsigned long)tlvs_list + offset);
+
+ tl->type = type;
+ tl->length = length;
+}
+
+/* Initiliaze header of the first tlv and clear mailbox*/
+static void
+bnx2x_vf_prep(struct bnx2x_softc *sc, struct vf_first_tlv *first_tlv,
+ uint16_t type, uint16_t length)
+{
+ struct bnx2x_vf_mbx_msg *mbox = sc->vf2pf_mbox;
+
+ rte_spinlock_lock(&sc->vf2pf_lock);
+
+ PMD_DRV_LOG(DEBUG, "Preparing %d tlv for sending", type);
+
+ memset(mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
+
+ bnx2x_add_tlv(sc, &first_tlv->tl, 0, type, length);
+
+ /* Initialize header of the first tlv */
+ first_tlv->reply_offset = sizeof(mbox->query);
+}
+
+/* releases the mailbox */
+static void
+bnx2x_vf_finalize(struct bnx2x_softc *sc,
+ __rte_unused struct vf_first_tlv *first_tlv)
+{
+ PMD_DRV_LOG(DEBUG, "done sending [%d] tlv over vf pf channel",
+ first_tlv->tl.type);
+
+ rte_spinlock_unlock(&sc->vf2pf_lock);
+}
+
+#define BNX2X_VF_CMD_ADDR_LO PXP_VF_ADDR_CSDM_GLOBAL_START
+#define BNX2X_VF_CMD_ADDR_HI BNX2X_VF_CMD_ADDR_LO + 4
+#define BNX2X_VF_CMD_TRIGGER BNX2X_VF_CMD_ADDR_HI + 4
+#define BNX2X_VF_CHANNEL_DELAY 100
+#define BNX2X_VF_CHANNEL_TRIES 100
+
+static int
+bnx2x_do_req4pf(struct bnx2x_softc *sc, rte_iova_t phys_addr)
+{
+ uint8_t *status = &sc->vf2pf_mbox->resp.common_reply.status;
+ uint8_t i;
+
+ if (*status) {
+ PMD_DRV_LOG(ERR, "status should be zero before message"
+ " to pf was sent");
+ return -EINVAL;
+ }
+
+ bnx2x_check_bull(sc);
+ if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
+ PMD_DRV_LOG(ERR, "channel is down. Aborting message sending");
+ return -EINVAL;
+ }
+
+ REG_WR(sc, BNX2X_VF_CMD_ADDR_LO, U64_LO(phys_addr));
+ REG_WR(sc, BNX2X_VF_CMD_ADDR_HI, U64_HI(phys_addr));
+
+ /* memory barrier to ensure that FW can read phys_addr */
+ wmb();
+
+ REG_WR8(sc, BNX2X_VF_CMD_TRIGGER, 1);
+
+ /* Do several attempts until PF completes */
+ for (i = 0; i < BNX2X_VF_CHANNEL_TRIES; i++) {
+ DELAY_MS(BNX2X_VF_CHANNEL_DELAY);
+ if (*status)
+ break;
+ }
+
+ if (!*status) {
+ PMD_DRV_LOG(ERR, "Response from PF timed out");
+ return -EAGAIN;
+ }
+
+ PMD_DRV_LOG(DEBUG, "Response from PF was received");
+ return 0;
+}
+
+static inline uint16_t bnx2x_check_me_flags(uint32_t val)
+{
+ if (((val) & ME_REG_VF_VALID) && (!((val) & ME_REG_VF_ERR)))
+ return ME_REG_VF_VALID;
+ else
+ return 0;
+}
+
+#define BNX2X_ME_ANSWER_DELAY 100
+#define BNX2X_ME_ANSWER_TRIES 10
+
+static inline int bnx2x_read_vf_id(struct bnx2x_softc *sc)
+{
+ uint32_t val;
+ uint8_t i = 0;
+
+ while (i <= BNX2X_ME_ANSWER_TRIES) {
+ val = BNX2X_DB_READ(DOORBELL_ADDR(sc, 0));
+ if (bnx2x_check_me_flags(val))
+ return VF_ID(val);
+
+ DELAY_MS(BNX2X_ME_ANSWER_DELAY);
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+#define BNX2X_VF_OBTAIN_MAX_TRIES 3
+#define BNX2X_VF_OBTAIN_MAC_FILTERS 1
+#define BNX2X_VF_OBTAIN_MC_FILTERS 10
+
+static
+int bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
+{
+ struct vf_acquire_resp_tlv *resp = &sc->vf2pf_mbox->resp.acquire_resp,
+ *sc_resp = &sc->acquire_resp;
+ struct vf_resource_query *res_query;
+ struct vf_resc *resc;
+ int res_obtained = false;
+ int tries = 0;
+ int rc;
+
+ do {
+ PMD_DRV_LOG(DEBUG, "trying to get resources");
+
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ return rc;
+
+ memcpy(sc_resp, resp, sizeof(sc->acquire_resp));
+
+ tries++;
+
+ /* check PF to request acceptance */
+ if (sc_resp->status == BNX2X_VF_STATUS_SUCCESS) {
+ PMD_DRV_LOG(DEBUG, "resources obtained successfully");
+ res_obtained = true;
+ } else if (sc_resp->status == BNX2X_VF_STATUS_NO_RESOURCES &&
+ tries < BNX2X_VF_OBTAIN_MAX_TRIES) {
+ PMD_DRV_LOG(DEBUG,
+ "PF cannot allocate requested amount of resources");
+
+ res_query = &sc->vf2pf_mbox->query[0].acquire.res_query;
+ resc = &sc_resp->resc;
+
+ /* PF refused our request. Try to decrease request params */
+ res_query->num_txqs = min(res_query->num_txqs, resc->num_txqs);
+ res_query->num_rxqs = min(res_query->num_rxqs, resc->num_rxqs);
+ res_query->num_sbs = min(res_query->num_sbs, resc->num_sbs);
+ res_query->num_mac_filters = min(res_query->num_mac_filters, resc->num_mac_filters);
+ res_query->num_vlan_filters = min(res_query->num_vlan_filters, resc->num_vlan_filters);
+ res_query->num_mc_filters = min(res_query->num_mc_filters, resc->num_mc_filters);
+
+ memset(&sc->vf2pf_mbox->resp, 0, sizeof(union resp_tlvs));
+ } else {
+ PMD_DRV_LOG(ERR, "Failed to get the requested "
+ "amount of resources: %d.",
+ sc_resp->status);
+ return -EINVAL;
+ }
+ } while (!res_obtained);
+
+ return 0;
+}
+
+int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count)
+{
+ struct vf_acquire_tlv *acq = &sc->vf2pf_mbox->query[0].acquire;
+ int vf_id;
+ int rc;
+
+ bnx2x_vf_close(sc);
+ bnx2x_vf_prep(sc, &acq->first_tlv, BNX2X_VF_TLV_ACQUIRE, sizeof(*acq));
+
+ vf_id = bnx2x_read_vf_id(sc);
+ if (vf_id < 0) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ acq->vf_id = vf_id;
+
+ acq->res_query.num_rxqs = rx_count;
+ acq->res_query.num_txqs = tx_count;
+ acq->res_query.num_sbs = sc->igu_sb_cnt;
+ acq->res_query.num_mac_filters = BNX2X_VF_OBTAIN_MAC_FILTERS;
+ acq->res_query.num_mc_filters = BNX2X_VF_OBTAIN_MC_FILTERS;
+
+ acq->bulletin_addr = sc->pf2vf_bulletin_mapping.paddr;
+
+ /* Request physical port identifier */
+ bnx2x_add_tlv(sc, acq, acq->first_tlv.tl.length,
+ BNX2X_VF_TLV_PHYS_PORT_ID,
+ sizeof(struct channel_tlv));
+
+ bnx2x_add_tlv(sc, acq,
+ (acq->first_tlv.tl.length + sizeof(struct channel_tlv)),
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* requesting the resources in loop */
+ rc = bnx2x_loop_obtain_resources(sc);
+ if (rc)
+ goto out;
+
+ struct vf_acquire_resp_tlv sc_resp = sc->acquire_resp;
+
+ sc->devinfo.chip_id |= (sc_resp.chip_num & 0xFFFF);
+ sc->devinfo.int_block = INT_BLOCK_IGU;
+ sc->devinfo.chip_port_mode = CHIP_2_PORT_MODE;
+ sc->devinfo.mf_info.mf_ov = 0;
+ sc->devinfo.mf_info.mf_mode = 0;
+ sc->devinfo.flash_size = 0;
+
+ sc->igu_sb_cnt = sc_resp.resc.num_sbs;
+ sc->igu_base_sb = sc_resp.resc.hw_sbs[0] & 0xFF;
+ sc->igu_dsb_id = -1;
+ sc->max_tx_queues = sc_resp.resc.num_txqs;
+ sc->max_rx_queues = sc_resp.resc.num_rxqs;
+
+ sc->link_params.chip_id = sc->devinfo.chip_id;
+ sc->doorbell_size = sc_resp.db_size;
+ sc->flags |= BNX2X_NO_WOL_FLAG | BNX2X_NO_ISCSI_OOO_FLAG | BNX2X_NO_ISCSI_FLAG | BNX2X_NO_FCOE_FLAG;
+
+ PMD_DRV_LOG(DEBUG, "status block count = %d, base status block = %x",
+ sc->igu_sb_cnt, sc->igu_base_sb);
+ strncpy(sc->fw_ver, sc_resp.fw_ver, sizeof(sc->fw_ver));
+
+ if (is_valid_assigned_ether_addr(&sc_resp.resc.current_mac_addr))
+ ether_addr_copy(&sc_resp.resc.current_mac_addr,
+ (struct ether_addr *)sc->link_params.mac_addr);
+ else
+ eth_random_addr(sc->link_params.mac_addr);
+
+out:
+ bnx2x_vf_finalize(sc, &acq->first_tlv);
+
+ return rc;
+}
+
+/* Ask PF to release VF's resources */
+void
+bnx2x_vf_close(struct bnx2x_softc *sc)
+{
+ struct vf_release_tlv *query;
+ struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
+ int vf_id = bnx2x_read_vf_id(sc);
+ int rc;
+
+ if (vf_id >= 0) {
+ query = &sc->vf2pf_mbox->query[0].release;
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_RELEASE,
+ sizeof(*query));
+
+ query->vf_id = vf_id;
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to release VF");
+
+ bnx2x_vf_finalize(sc, &query->first_tlv);
+ }
+}
+
+/* Let PF know the VF status blocks phys_addrs */
+int
+bnx2x_vf_init(struct bnx2x_softc *sc)
+{
+ struct vf_init_tlv *query;
+ struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
+ int i, rc;
+
+ query = &sc->vf2pf_mbox->query[0].init;
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_INIT,
+ sizeof(*query));
+
+ FOR_EACH_QUEUE(sc, i) {
+ query->sb_addr[i] = (unsigned long)(sc->fp[i].sb_dma.paddr);
+ }
+
+ query->stats_step = sizeof(struct per_queue_stats);
+ query->stats_addr = sc->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ goto out;
+ if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to init VF");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ PMD_DRV_LOG(DEBUG, "VF was initialized");
+out:
+ bnx2x_vf_finalize(sc, &query->first_tlv);
+ return rc;
+}
+
+void
+bnx2x_vf_unload(struct bnx2x_softc *sc)
+{
+ struct vf_close_tlv *query;
+ struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
+ struct vf_q_op_tlv *query_op;
+ int i, vf_id, rc;
+
+ vf_id = bnx2x_read_vf_id(sc);
+ if (vf_id > 0) {
+ FOR_EACH_QUEUE(sc, i) {
+ query_op = &sc->vf2pf_mbox->query[0].q_op;
+ bnx2x_vf_prep(sc, &query_op->first_tlv,
+ BNX2X_VF_TLV_TEARDOWN_Q,
+ sizeof(*query_op));
+
+ query_op->vf_qid = i;
+
+ bnx2x_add_tlv(sc, query_op,
+ query_op->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
+ PMD_DRV_LOG(ERR,
+ "Bad reply for vf_q %d teardown", i);
+
+ bnx2x_vf_finalize(sc, &query_op->first_tlv);
+ }
+
+ bnx2x_vf_set_mac(sc, false);
+
+ query = &sc->vf2pf_mbox->query[0].close;
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_CLOSE,
+ sizeof(*query));
+
+ query->vf_id = vf_id;
+
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
+ PMD_DRV_LOG(ERR,
+ "Bad reply from PF for close message");
+
+ bnx2x_vf_finalize(sc, &query->first_tlv);
+ }
+}
+
+static inline uint16_t
+bnx2x_vf_q_flags(uint8_t leading)
+{
+ uint16_t flags = leading ? BNX2X_VF_Q_FLAG_LEADING_RSS : 0;
+
+ flags |= BNX2X_VF_Q_FLAG_CACHE_ALIGN;
+ flags |= BNX2X_VF_Q_FLAG_STATS;
+ flags |= BNX2X_VF_Q_FLAG_VLAN;
+
+ return flags;
+}
+
+static void
+bnx2x_vf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ struct vf_rxq_params *rxq_init, uint16_t flags)
+{
+ struct bnx2x_rx_queue *rxq;
+
+ rxq = sc->rx_queues[fp->index];
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "RX queue %d is NULL", fp->index);
+ return;
+ }
+
+ rxq_init->rcq_addr = rxq->cq_ring_phys_addr;
+ rxq_init->rcq_np_addr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE;
+ rxq_init->rxq_addr = rxq->rx_ring_phys_addr;
+ rxq_init->vf_sb_id = fp->index;
+ rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+ rxq_init->mtu = sc->mtu;
+ rxq_init->buf_sz = fp->rx_buf_size;
+ rxq_init->flags = flags;
+ rxq_init->stat_id = -1;
+ rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+}
+
+static void
+bnx2x_vf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ struct vf_txq_params *txq_init, uint16_t flags)
+{
+ struct bnx2x_tx_queue *txq;
+
+ txq = sc->tx_queues[fp->index];
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "TX queue %d is NULL", fp->index);
+ return;
+ }
+
+ txq_init->txq_addr = txq->tx_ring_phys_addr;
+ txq_init->sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
+ txq_init->flags = flags;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
+ txq_init->vf_sb_id = fp->index;
+}
+
+int
+bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, int leading)
+{
+ struct vf_setup_q_tlv *query;
+ struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
+ uint16_t flags = bnx2x_vf_q_flags(leading);
+ int rc;
+
+ query = &sc->vf2pf_mbox->query[0].setup_q;
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SETUP_Q,
+ sizeof(*query));
+
+ query->vf_qid = fp->index;
+ query->param_valid = VF_RXQ_VALID | VF_TXQ_VALID;
+
+ bnx2x_vf_rx_q_prep(sc, fp, &query->rxq, flags);
+ bnx2x_vf_tx_q_prep(sc, fp, &query->txq, flags);
+
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ goto out;
+ if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to setup VF queue[%d]",
+ fp->index);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vf_finalize(sc, &query->first_tlv);
+
+ return rc;
+}
+
+int
+bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set)
+{
+ struct vf_set_q_filters_tlv *query;
+ struct vf_common_reply_tlv *reply;
+ int rc;
+
+ query = &sc->vf2pf_mbox->query[0].set_q_filters;
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
+ sizeof(*query));
+
+ query->vf_qid = sc->fp->index;
+ query->mac_filters_cnt = 1;
+ query->flags = BNX2X_VF_MAC_VLAN_CHANGED;
+
+ query->filters[0].flags = (set ? BNX2X_VF_Q_FILTER_SET_MAC : 0) |
+ BNX2X_VF_Q_FILTER_DEST_MAC_VALID;
+
+ bnx2x_check_bull(sc);
+
+ rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN);
+
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ goto out;
+ reply = &sc->vf2pf_mbox->resp.common_reply;
+
+ while (BNX2X_VF_STATUS_FAILURE == reply->status &&
+ bnx2x_check_bull(sc)) {
+ /* A new mac was configured by PF for us */
+ rte_memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac,
+ ETH_ALEN);
+ rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac,
+ ETH_ALEN);
+
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ goto out;
+ }
+
+ if (BNX2X_VF_STATUS_SUCCESS != reply->status) {
+ PMD_DRV_LOG(ERR, "Bad reply from PF for SET MAC message: %d",
+ reply->status);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vf_finalize(sc, &query->first_tlv);
+
+ return rc;
+}
+
+int
+bnx2x_vf_config_rss(struct bnx2x_softc *sc,
+ struct ecore_config_rss_params *params)
+{
+ struct vf_rss_tlv *query;
+ struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
+ int rc;
+
+ query = &sc->vf2pf_mbox->query[0].update_rss;
+
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_UPDATE_RSS,
+ sizeof(*query));
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key));
+ query->rss_key_size = T_ETH_RSS_KEY;
+
+ rte_memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+ query->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
+
+ query->rss_result_mask = params->rss_result_mask;
+ query->rss_flags = params->rss_flags;
+
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ goto out;
+
+ if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure RSS");
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vf_finalize(sc, &query->first_tlv);
+
+ return rc;
+}
+
+int
+bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc)
+{
+ struct vf_set_q_filters_tlv *query;
+ struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
+ int rc;
+
+ query = &sc->vf2pf_mbox->query[0].set_q_filters;
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
+ sizeof(*query));
+
+ query->vf_qid = 0;
+ query->flags = BNX2X_VF_RX_MASK_CHANGED;
+
+ switch (sc->rx_mode) {
+ case BNX2X_RX_MODE_NONE: /* no Rx */
+ query->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
+ break;
+ case BNX2X_RX_MODE_NORMAL:
+ query->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
+ query->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ case BNX2X_RX_MODE_ALLMULTI:
+ query->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
+ query->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ case BNX2X_RX_MODE_ALLMULTI_PROMISC:
+ case BNX2X_RX_MODE_PROMISC:
+ query->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
+ query->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
+ query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "BAD rx mode (%d)", sc->rx_mode);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ goto out;
+
+ if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to set RX mode");
+ rc = -EINVAL;
+ }
+
+out:
+ bnx2x_vf_finalize(sc, &query->first_tlv);
+
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h
new file mode 100644
index 00000000..cc6fef95
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef BNX2X_VFPF_H
+#define BNX2X_VFPF_H
+
+#include "ecore_sp.h"
+
+#define VLAN_HLEN 4
+
+struct vf_resource_query {
+ uint8_t num_rxqs;
+ uint8_t num_txqs;
+ uint8_t num_sbs;
+ uint8_t num_mac_filters;
+ uint8_t num_vlan_filters;
+ uint8_t num_mc_filters;
+};
+
+#define BNX2X_VF_STATUS_SUCCESS 1
+#define BNX2X_VF_STATUS_FAILURE 2
+#define BNX2X_VF_STATUS_NO_RESOURCES 4
+#define BNX2X_VF_BULLETIN_TRIES 5
+
+#define BNX2X_VF_Q_FLAG_CACHE_ALIGN 0x0008
+#define BNX2X_VF_Q_FLAG_STATS 0x0010
+#define BNX2X_VF_Q_FLAG_OV 0x0020
+#define BNX2X_VF_Q_FLAG_VLAN 0x0040
+#define BNX2X_VF_Q_FLAG_COS 0x0080
+#define BNX2X_VF_Q_FLAG_HC 0x0100
+#define BNX2X_VF_Q_FLAG_DHC 0x0200
+#define BNX2X_VF_Q_FLAG_LEADING_RSS 0x0400
+
+#define TLV_BUFFER_SIZE 1024
+
+#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000
+#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001
+#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002
+#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
+#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
+#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
+
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ uint16_t type;
+ uint16_t length;
+};
+
+struct vf_first_tlv {
+ struct channel_tlv tl;
+ uint32_t reply_offset;
+};
+
+struct tlv_buffer_size {
+ uint8_t tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+/* tlv struct for all PF replies except acquire */
+struct vf_common_reply_tlv {
+ struct channel_tlv tl;
+ uint8_t status;
+ uint8_t pad[3];
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ uint32_t pad;
+};
+
+/* Acquire */
+struct vf_acquire_tlv {
+ struct vf_first_tlv first_tlv;
+
+ uint8_t vf_id;
+ uint8_t pad[3];
+
+ struct vf_resource_query res_query;
+
+ uint64_t bulletin_addr;
+};
+
+/* simple operation request on queue */
+struct vf_q_op_tlv {
+ struct vf_first_tlv first_tlv;
+ uint8_t vf_qid;
+ uint8_t pad[3];
+};
+
+/* receive side scaling tlv */
+struct vf_rss_tlv {
+ struct vf_first_tlv first_tlv;
+ uint32_t rss_flags;
+ uint8_t rss_result_mask;
+ uint8_t ind_table_size;
+ uint8_t rss_key_size;
+ uint8_t pad;
+ uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ uint32_t rss_key[T_ETH_RSS_KEY]; /* hash values */
+};
+
+struct vf_resc {
+#define BNX2X_VF_MAX_QUEUES_PER_VF 16
+#define BNX2X_VF_MAX_SBS_PER_VF 16
+ uint16_t hw_sbs[BNX2X_VF_MAX_SBS_PER_VF];
+ uint8_t hw_qid[BNX2X_VF_MAX_QUEUES_PER_VF];
+ uint8_t num_rxqs;
+ uint8_t num_txqs;
+ uint8_t num_sbs;
+ uint8_t num_mac_filters;
+ uint8_t num_vlan_filters;
+ uint8_t num_mc_filters;
+ uint8_t permanent_mac_addr[ETH_ALEN];
+ struct ether_addr current_mac_addr;
+ uint16_t pf_link_speed;
+ uint32_t pf_link_supported;
+};
+
+/* tlv struct holding reply for acquire */
+struct vf_acquire_resp_tlv {
+ uint16_t type;
+ uint16_t length;
+ uint8_t status;
+ uint8_t pad1[3];
+ uint32_t chip_num;
+ uint8_t pad2[4];
+ char fw_ver[32];
+ uint16_t db_size;
+ uint8_t pad3[2];
+ struct vf_resc resc;
+};
+
+/* Init VF */
+struct vf_init_tlv {
+ struct vf_first_tlv first_tlv;
+ uint64_t sb_addr[BNX2X_VF_MAX_SBS_PER_VF];
+ uint64_t spq_addr;
+ uint64_t stats_addr;
+ uint16_t stats_step;
+ uint32_t flags;
+ uint32_t pad[2];
+};
+
+struct vf_rxq_params {
+ /* physical addresses */
+ uint64_t rcq_addr;
+ uint64_t rcq_np_addr;
+ uint64_t rxq_addr;
+ uint64_t pad1;
+
+ /* sb + hc info */
+ uint8_t vf_sb_id;
+ uint8_t sb_cq_index;
+ uint16_t hc_rate; /* desired interrupts per sec. */
+ /* rx buffer info */
+ uint16_t mtu;
+ uint16_t buf_sz;
+ uint16_t flags; /* for BNX2X_VF_Q_FLAG_X flags */
+ uint16_t stat_id; /* valid if BNX2X_VF_Q_FLAG_STATS */
+
+ uint8_t pad2[5];
+
+ uint8_t drop_flags;
+ uint8_t cache_line_log; /* BNX2X_VF_Q_FLAG_CACHE_ALIGN */
+ uint8_t pad3;
+};
+
+struct vf_txq_params {
+ /* physical addresses */
+ uint64_t txq_addr;
+
+ /* sb + hc info */
+ uint8_t vf_sb_id; /* index in hw_sbs[] */
+ uint8_t sb_index; /* Index in the SB */
+ uint16_t hc_rate; /* desired interrupts per sec. */
+ uint32_t flags; /* for BNX2X_VF_Q_FLAG_X flags */
+ uint16_t stat_id; /* valid if BNX2X_VF_Q_FLAG_STATS */
+ uint8_t traffic_type; /* see in setup_context() */
+ uint8_t pad;
+};
+
+/* Setup Queue */
+struct vf_setup_q_tlv {
+ struct vf_first_tlv first_tlv;
+
+ struct vf_rxq_params rxq;
+ struct vf_txq_params txq;
+
+ uint8_t vf_qid; /* index in hw_qid[] */
+ uint8_t param_valid;
+ #define VF_RXQ_VALID 0x01
+ #define VF_TXQ_VALID 0x02
+ uint8_t pad[2];
+};
+
+/* Set Queue Filters */
+struct vf_q_mac_vlan_filter {
+ uint32_t flags;
+ #define BNX2X_VF_Q_FILTER_DEST_MAC_VALID 0x01
+ #define BNX2X_VF_Q_FILTER_VLAN_TAG_VALID 0x02
+ #define BNX2X_VF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+ uint8_t mac[ETH_ALEN];
+ uint16_t vlan_tag;
+};
+
+
+#define _UP_ETH_ALEN (6)
+
+/* configure queue filters */
+struct vf_set_q_filters_tlv {
+ struct vf_first_tlv first_tlv;
+
+ uint32_t flags;
+ #define BNX2X_VF_MAC_VLAN_CHANGED 0x01
+ #define BNX2X_VF_MULTICAST_CHANGED 0x02
+ #define BNX2X_VF_RX_MASK_CHANGED 0x04
+
+ uint8_t vf_qid; /* index in hw_qid[] */
+ uint8_t mac_filters_cnt;
+ uint8_t multicast_cnt;
+ uint8_t pad;
+
+ #define VF_MAX_MAC_FILTERS 16
+ #define VF_MAX_VLAN_FILTERS 16
+ #define VF_MAX_FILTERS (VF_MAX_MAC_FILTERS +\
+ VF_MAX_VLAN_FILTERS)
+ struct vf_q_mac_vlan_filter filters[VF_MAX_FILTERS];
+
+ #define VF_MAX_MULTICAST_PER_VF 32
+ uint8_t multicast[VF_MAX_MULTICAST_PER_VF][_UP_ETH_ALEN];
+ unsigned long rx_mask;
+};
+
+
+/* close VF (disable VF) */
+struct vf_close_tlv {
+ struct vf_first_tlv first_tlv;
+ uint16_t vf_id; /* for debug */
+ uint8_t pad[2];
+};
+
+/* rlease the VF's acquired resources */
+struct vf_release_tlv {
+ struct vf_first_tlv first_tlv;
+ uint16_t vf_id; /* for debug */
+ uint8_t pad[2];
+};
+
+union query_tlvs {
+ struct vf_first_tlv first_tlv;
+ struct vf_acquire_tlv acquire;
+ struct vf_init_tlv init;
+ struct vf_close_tlv close;
+ struct vf_q_op_tlv q_op;
+ struct vf_setup_q_tlv setup_q;
+ struct vf_set_q_filters_tlv set_q_filters;
+ struct vf_release_tlv release;
+ struct vf_rss_tlv update_rss;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union resp_tlvs {
+ struct vf_common_reply_tlv common_reply;
+ struct vf_acquire_resp_tlv acquire_resp;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+/* struct allocated by VF driver, PF sends updates to VF via bulletin */
+struct bnx2x_vf_bulletin {
+ uint32_t crc; /* crc of structure to ensure is not in
+ * mid-update
+ */
+ uint16_t version;
+ uint16_t length;
+
+ uint64_t valid_bitmap; /* bitmap indicating which fields
+ * hold valid values
+ */
+
+#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
+ * is available for it
+ */
+#define VLAN_VALID 1 /* when set, the vf should no access the
+ * vf channel
+ */
+#define CHANNEL_DOWN 2 /* vf channel is disabled. VFs are not
+ * to attempt to send messages on the
+ * channel after this bit is set
+ */
+ uint8_t mac[ETH_ALEN];
+ uint8_t mac_pad[2];
+
+ uint16_t vlan;
+ uint8_t vlan_pad[6];
+};
+
+#define MAX_TLVS_IN_LIST 50
+enum channel_tlvs {
+ BNX2X_VF_TLV_NONE, /* ends tlv sequence */
+ BNX2X_VF_TLV_ACQUIRE,
+ BNX2X_VF_TLV_INIT,
+ BNX2X_VF_TLV_SETUP_Q,
+ BNX2X_VF_TLV_SET_Q_FILTERS,
+ BNX2X_VF_TLV_ACTIVATE_Q,
+ BNX2X_VF_TLV_DEACTIVATE_Q,
+ BNX2X_VF_TLV_TEARDOWN_Q,
+ BNX2X_VF_TLV_CLOSE,
+ BNX2X_VF_TLV_RELEASE,
+ BNX2X_VF_TLV_UPDATE_RSS_OLD,
+ BNX2X_VF_TLV_PF_RELEASE_VF,
+ BNX2X_VF_TLV_LIST_END,
+ BNX2X_VF_TLV_FLR,
+ BNX2X_VF_TLV_PF_SET_MAC,
+ BNX2X_VF_TLV_PF_SET_VLAN,
+ BNX2X_VF_TLV_UPDATE_RSS,
+ BNX2X_VF_TLV_PHYS_PORT_ID,
+ BNX2X_VF_TLV_MAX
+};
+
+struct bnx2x_vf_mbx_msg {
+ union query_tlvs query[BNX2X_VF_MAX_QUEUES_PER_VF];
+ union resp_tlvs resp;
+};
+
+int bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set);
+int bnx2x_vf_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *params);
+
+#endif /* BNX2X_VFPF_H */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_fw_defs.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_fw_defs.h
new file mode 100644
index 00000000..5984acd9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_fw_defs.h
@@ -0,0 +1,401 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2014-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef ECORE_FW_DEFS_H
+#define ECORE_FW_DEFS_H
+
+
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base)
+#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[147].base + ((assertListEntry) * IRO[147].m1))
+#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
+ (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
+ IRO[153].m2))
+#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
+ (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
+ IRO[154].m2))
+#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
+ (IRO[155].base + ((vfId) * IRO[155].m1))
+#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
+ (IRO[156].base + ((vfId) * IRO[156].m1))
+#define CSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[150].base + ((funcId) * IRO[150].m1))
+#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
+ (IRO[159].base + ((funcId) * IRO[159].m1))
+#define CSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[149].base + ((funcId) * IRO[149].m1))
+#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \
+ (IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2))
+#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \
+ (IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \
+ * IRO[138].m2) + ((sbId) * IRO[138].m3))
+#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
+#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+ (IRO[317].base + ((pfId) * IRO[317].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[318].base + ((pfId) * IRO[318].m1))
+#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
+ (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
+ (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
+ (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
+ (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
+ (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+ (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
+#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
+ (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
+#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+ (IRO[316].base + ((pfId) * IRO[316].m1))
+#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[308].base + ((pfId) * IRO[308].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[307].base + ((pfId) * IRO[307].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[306].base + ((pfId) * IRO[306].m1))
+#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[151].base + ((funcId) * IRO[151].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
+ (IRO[142].base + ((pfId) * IRO[142].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
+ (IRO[143].base + ((pfId) * IRO[143].m1))
+#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
+ (IRO[141].base + ((pfId) * IRO[141].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
+#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
+ (IRO[144].base + ((pfId) * IRO[144].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
+#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
+ (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
+#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
+ (IRO[133].base + ((sbId) * IRO[133].m1))
+#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
+ (IRO[134].base + ((sbId) * IRO[134].m1))
+#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
+ (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
+#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
+ (IRO[132].base + ((sbId) * IRO[132].m1))
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
+#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
+ (IRO[137].base + ((sbId) * IRO[137].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
+#define CSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[150].base + ((funcId) * IRO[150].m1))
+#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
+#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
+ (IRO[203].base + ((pfId) * IRO[203].m1))
+#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
+#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[101].base + ((assertListEntry) * IRO[101].m1))
+#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
+ (IRO[201].base + ((pfId) * IRO[201].m1))
+#define TSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[103].base + ((funcId) * IRO[103].m1))
+#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+ (IRO[272].base + ((pfId) * IRO[272].m1))
+#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[271].base + ((pfId) * IRO[271].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[270].base + ((pfId) * IRO[270].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[269].base + ((pfId) * IRO[269].m1))
+#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[268].base + ((pfId) * IRO[268].m1))
+#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
+ (IRO[278].base + ((pfId) * IRO[278].m1))
+#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+ (IRO[264].base + ((pfId) * IRO[264].m1))
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[265].base + ((pfId) * IRO[265].m1))
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[266].base + ((pfId) * IRO[266].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[267].base + ((pfId) * IRO[267].m1))
+#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
+ (IRO[202].base + ((pfId) * IRO[202].m1))
+#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[105].base + ((funcId) * IRO[105].m1))
+#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
+ (IRO[217].base + ((pfId) * IRO[217].m1))
+#define TSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[104].base + ((funcId) * IRO[104].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
+#define USTORM_AGG_DATA_SIZE (IRO[206].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
+#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[176].base + ((assertListEntry) * IRO[176].m1))
+#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
+ (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * IRO[205].m2))
+#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
+ (IRO[183].base + ((portId) * IRO[183].m1))
+#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
+ (IRO[319].base + ((pfId) * IRO[319].m1))
+#define USTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[178].base + ((funcId) * IRO[178].m1))
+#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+ (IRO[283].base + ((pfId) * IRO[283].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[284].base + ((pfId) * IRO[284].m1))
+#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+ (IRO[288].base + ((pfId) * IRO[288].m1))
+#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
+ (IRO[285].base + ((pfId) * IRO[285].m1))
+#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[281].base + ((pfId) * IRO[281].m1))
+#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[280].base + ((pfId) * IRO[280].m1))
+#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[279].base + ((pfId) * IRO[279].m1))
+#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+ (IRO[282].base + ((pfId) * IRO[282].m1))
+#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
+ (IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[287].base + ((pfId) * IRO[287].m1))
+#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
+ (IRO[182].base + ((pfId) * IRO[182].m1))
+#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[180].base + ((funcId) * IRO[180].m1))
+#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
+ (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
+ IRO[209].m2))
+#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
+ (IRO[210].base + ((qzoneId) * IRO[210].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
+#define USTORM_TPA_BTR_SIZE (IRO[207].size)
+#define USTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[179].base + ((funcId) * IRO[179].m1))
+#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
+#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
+#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base)
+#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[50].base + ((assertListEntry) * IRO[50].m1))
+#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
+ (IRO[43].base + ((portId) * IRO[43].m1))
+#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
+ (IRO[45].base + ((pfId) * IRO[45].m1))
+#define XSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[47].base + ((funcId) * IRO[47].m1))
+#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+ (IRO[296].base + ((pfId) * IRO[296].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
+ (IRO[299].base + ((pfId) * IRO[299].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
+ (IRO[300].base + ((pfId) * IRO[300].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+ (IRO[301].base + ((pfId) * IRO[301].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+ (IRO[302].base + ((pfId) * IRO[302].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+ (IRO[303].base + ((pfId) * IRO[303].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+ (IRO[304].base + ((pfId) * IRO[304].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[305].base + ((pfId) * IRO[305].m1))
+#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[295].base + ((pfId) * IRO[295].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[294].base + ((pfId) * IRO[294].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[293].base + ((pfId) * IRO[293].m1))
+#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+ (IRO[298].base + ((pfId) * IRO[298].m1))
+#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
+ (IRO[297].base + ((pfId) * IRO[297].m1))
+#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
+ (IRO[292].base + ((pfId) * IRO[292].m1))
+#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+ (IRO[291].base + ((pfId) * IRO[291].m1))
+#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
+ (IRO[290].base + ((pfId) * IRO[290].m1))
+#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
+ (IRO[289].base + ((pfId) * IRO[289].m1))
+#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
+ (IRO[44].base + ((pfId) * IRO[44].m1))
+#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[49].base + ((funcId) * IRO[49].m1))
+#define XSTORM_SPQ_DATA_OFFSET(funcId) \
+ (IRO[32].base + ((funcId) * IRO[32].m1))
+#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
+#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
+ (IRO[30].base + ((funcId) * IRO[30].m1))
+#define XSTORM_SPQ_PROD_OFFSET(funcId) \
+ (IRO[31].base + ((funcId) * IRO[31].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
+ (IRO[211].base + ((portId) * IRO[211].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
+ (IRO[212].base + ((portId) * IRO[212].m1))
+#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
+ (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
+ IRO[214].m2))
+#define XSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[48].base + ((funcId) * IRO[48].m1))
+#define COMMON_ASM_INVALID_ASSERT_OPCODE (IRO[7].base)
+
+
+/* Ethernet Ring parameters */
+#define X_ETH_LOCAL_RING_SIZE 13
+#define FIRST_BD_IN_PKT 0
+#define PARSE_BD_INDEX 1
+#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
+
+/* Rx ring params */
+#define U_ETH_LOCAL_BD_RING_SIZE 8
+#define U_ETH_SGL_SIZE 8
+ /* The fw will padd the buffer with this value, so the IP header \
+ will be align to 4 Byte */
+#define IP_HEADER_ALIGNMENT_PADDING 2
+
+#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
+#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
+
+#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
+#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
+
+#define U_ETH_UNDEFINED_Q 0xFF
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY 10
+#define ETH_NUM_OF_RSS_ENGINES_E2 72
+
+#define FILTER_RULES_COUNT 16
+#define MULTICAST_RULES_COUNT 16
+#define CLASSIFY_RULES_COUNT 16
+
+/*The CRC32 seed, that is used for the hash(reduction) multicast address */
+#define ETH_CRC32_HASH_SEED 0x00000000
+
+#define ETH_CRC32_HASH_BIT_SIZE (8)
+#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
+
+/* Maximal L2 clients supported */
+#define ETH_MAX_RX_CLIENTS_E1H 28
+#define ETH_MAX_RX_CLIENTS_E2 152
+
+/* Maximal statistics client Ids */
+#define MAX_STAT_COUNTER_ID_E1H 56
+#define MAX_STAT_COUNTER_ID_E2 140
+
+#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
+#define MAX_MAC_CREDIT_E2 272 /* Per Path */
+#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
+
+
+/* Maximal aggregation queues supported */
+#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
+
+
+#define ETH_NUM_OF_MCAST_BINS 256
+#define ETH_NUM_OF_MCAST_ENGINES_E2 72
+
+#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
+ (ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+
+#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
+
+
+/* This file defines HSI constants common to all microcode flows */
+
+/* offset in bits of protocol in the state context parameter */
+#define PROTOCOL_STATE_BIT_OFFSET 6
+
+#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+
+/* microcode fixed page page size 4K (chains and ring segments) */
+#define MC_PAGE_SIZE 4096
+
+/* Number of indices per slow-path SB */
+#define HC_SP_SB_MAX_INDICES 16 /* The Maximum of all */
+
+/* Number of indices per SB */
+#define HC_SB_MAX_INDICES_E1X 8 /* Multiple of 4 */
+#define HC_SB_MAX_INDICES_E2 8 /* Multiple of 4 */
+
+/* Number of SB */
+#define HC_SB_MAX_SB_E1X 32
+#define HC_SB_MAX_SB_E2 136 /* include PF */
+
+/* ID of slow path status block */
+#define HC_SP_SB_ID 0xde
+
+/* Num of State machines */
+#define HC_SB_MAX_SM 2 /* Fixed */
+
+/* Num of dynamic indices */
+#define HC_SB_MAX_DYNAMIC_INDICES 4 /* 0..3 fixed */
+
+/* max number of slow path commands per port */
+#define MAX_RAMRODS_PER_PORT 8
+
+
+/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
+/* chip timers frequency constants */
+#define TIMERS_TICK_SIZE_CHIP (1e-3)
+
+/* used in toe: TsRecentAge, MaxRt, and temporarily RTT */
+#define TSEMI_CLK1_RESUL_CHIP (1e-3)
+
+/* temporarily used for RTT */
+#define XSEMI_CLK1_RESUL_CHIP (1e-3)
+
+/* used for Host Coallescing */
+#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
+
+/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
+#define XSTORM_IP_ID_ROLL_HALF 0x8000
+#define XSTORM_IP_ID_ROLL_ALL 0
+
+/* assert list: number of entries */
+#define FW_LOG_LIST_SIZE 50
+
+#define NUM_OF_SAFC_BITS 16
+#define MAX_COS_NUMBER 4
+#define MAX_TRAFFIC_TYPES 8
+#define MAX_PFC_PRIORITIES 8
+
+ /* used by array traffic_type_to_priority[] to mark traffic type \
+ that is not mapped to priority*/
+#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
+
+/* Event Ring definitions */
+#define C_ERES_PER_PAGE \
+ (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
+#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
+
+/* number of statistic command */
+#define STATS_QUERY_CMD_COUNT 16
+
+/* niv list table size */
+#define AFEX_LIST_TABLE_SIZE 4096
+
+/* invalid VNIC Id. used in VNIC classification */
+#define INVALID_VNIC_ID 0xFF
+
+/* used for indicating an undefined RAM offset in the IRO arrays */
+#define UNDEF_IRO 0x80000000
+
+/* used for defining the amount of FCoE tasks supported for PF */
+#define MAX_FCOE_FUNCS_PER_ENGINE 2
+#define MAX_NUM_FCOE_TASKS_PER_ENGINE \
+ 4096 /*Each port can have at max 1 function*/
+
+
+#endif /* ECORE_FW_DEFS_H */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_hsi.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_hsi.h
new file mode 100644
index 00000000..57085ebb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_hsi.h
@@ -0,0 +1,6328 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2014-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef ECORE_HSI_H
+#define ECORE_HSI_H
+
+#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
+
+struct license_key {
+ uint32_t reserved[6];
+
+ uint32_t max_iscsi_conn;
+#define LICENSE_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
+#define LICENSE_MAX_ISCSI_TRGT_CONN_SHIFT 0
+#define LICENSE_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
+#define LICENSE_MAX_ISCSI_INIT_CONN_SHIFT 16
+
+ uint32_t reserved_a;
+
+ uint32_t max_fcoe_conn;
+#define LICENSE_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
+#define LICENSE_MAX_FCOE_TRGT_CONN_SHIFT 0
+#define LICENSE_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
+#define LICENSE_MAX_FCOE_INIT_CONN_SHIFT 16
+
+ uint32_t reserved_b[4];
+};
+
+typedef struct license_key license_key_t;
+
+
+/****************************************************************************
+ * Shared HW configuration *
+ ****************************************************************************/
+#define PIN_CFG_NA 0x00000000
+#define PIN_CFG_GPIO0_P0 0x00000001
+#define PIN_CFG_GPIO1_P0 0x00000002
+#define PIN_CFG_GPIO2_P0 0x00000003
+#define PIN_CFG_GPIO3_P0 0x00000004
+#define PIN_CFG_GPIO0_P1 0x00000005
+#define PIN_CFG_GPIO1_P1 0x00000006
+#define PIN_CFG_GPIO2_P1 0x00000007
+#define PIN_CFG_GPIO3_P1 0x00000008
+#define PIN_CFG_EPIO0 0x00000009
+#define PIN_CFG_EPIO1 0x0000000a
+#define PIN_CFG_EPIO2 0x0000000b
+#define PIN_CFG_EPIO3 0x0000000c
+#define PIN_CFG_EPIO4 0x0000000d
+#define PIN_CFG_EPIO5 0x0000000e
+#define PIN_CFG_EPIO6 0x0000000f
+#define PIN_CFG_EPIO7 0x00000010
+#define PIN_CFG_EPIO8 0x00000011
+#define PIN_CFG_EPIO9 0x00000012
+#define PIN_CFG_EPIO10 0x00000013
+#define PIN_CFG_EPIO11 0x00000014
+#define PIN_CFG_EPIO12 0x00000015
+#define PIN_CFG_EPIO13 0x00000016
+#define PIN_CFG_EPIO14 0x00000017
+#define PIN_CFG_EPIO15 0x00000018
+#define PIN_CFG_EPIO16 0x00000019
+#define PIN_CFG_EPIO17 0x0000001a
+#define PIN_CFG_EPIO18 0x0000001b
+#define PIN_CFG_EPIO19 0x0000001c
+#define PIN_CFG_EPIO20 0x0000001d
+#define PIN_CFG_EPIO21 0x0000001e
+#define PIN_CFG_EPIO22 0x0000001f
+#define PIN_CFG_EPIO23 0x00000020
+#define PIN_CFG_EPIO24 0x00000021
+#define PIN_CFG_EPIO25 0x00000022
+#define PIN_CFG_EPIO26 0x00000023
+#define PIN_CFG_EPIO27 0x00000024
+#define PIN_CFG_EPIO28 0x00000025
+#define PIN_CFG_EPIO29 0x00000026
+#define PIN_CFG_EPIO30 0x00000027
+#define PIN_CFG_EPIO31 0x00000028
+
+/* EPIO definition */
+#define EPIO_CFG_NA 0x00000000
+#define EPIO_CFG_EPIO0 0x00000001
+#define EPIO_CFG_EPIO1 0x00000002
+#define EPIO_CFG_EPIO2 0x00000003
+#define EPIO_CFG_EPIO3 0x00000004
+#define EPIO_CFG_EPIO4 0x00000005
+#define EPIO_CFG_EPIO5 0x00000006
+#define EPIO_CFG_EPIO6 0x00000007
+#define EPIO_CFG_EPIO7 0x00000008
+#define EPIO_CFG_EPIO8 0x00000009
+#define EPIO_CFG_EPIO9 0x0000000a
+#define EPIO_CFG_EPIO10 0x0000000b
+#define EPIO_CFG_EPIO11 0x0000000c
+#define EPIO_CFG_EPIO12 0x0000000d
+#define EPIO_CFG_EPIO13 0x0000000e
+#define EPIO_CFG_EPIO14 0x0000000f
+#define EPIO_CFG_EPIO15 0x00000010
+#define EPIO_CFG_EPIO16 0x00000011
+#define EPIO_CFG_EPIO17 0x00000012
+#define EPIO_CFG_EPIO18 0x00000013
+#define EPIO_CFG_EPIO19 0x00000014
+#define EPIO_CFG_EPIO20 0x00000015
+#define EPIO_CFG_EPIO21 0x00000016
+#define EPIO_CFG_EPIO22 0x00000017
+#define EPIO_CFG_EPIO23 0x00000018
+#define EPIO_CFG_EPIO24 0x00000019
+#define EPIO_CFG_EPIO25 0x0000001a
+#define EPIO_CFG_EPIO26 0x0000001b
+#define EPIO_CFG_EPIO27 0x0000001c
+#define EPIO_CFG_EPIO28 0x0000001d
+#define EPIO_CFG_EPIO29 0x0000001e
+#define EPIO_CFG_EPIO30 0x0000001f
+#define EPIO_CFG_EPIO31 0x00000020
+
+struct mac_addr {
+ uint32_t upper;
+ uint32_t lower;
+};
+
+
+struct shared_hw_cfg { /* NVRAM Offset */
+ /* Up to 16 bytes of NULL-terminated string */
+ uint8_t part_num[16]; /* 0x104 */
+
+ uint32_t config; /* 0x114 */
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001
+
+ #define SHARED_HW_CFG_PORT_SWAP 0x00000004
+
+ #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008
+
+ #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010
+
+ #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700
+ #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8
+ /* Whatever MFW found in NVM
+ (if multiple found, priority order is: NC-SI, UMP, IPMI) */
+ #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000
+ #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100
+ #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200
+ #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300
+ /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI
+ (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400
+ /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI
+ (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500
+ /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP
+ (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600
+
+ /* Adjust the PCIe G2 Tx amplitude driver for all Tx lanes. For
+ backwards compatibility, value of 0 is disabling this feature.
+ That means that though 0 is a valid value, it cannot be
+ configured. */
+ #define SHARED_HW_CFG_G2_TX_DRIVE_MASK 0x0000F000
+ #define SHARED_HW_CFG_G2_TX_DRIVE_SHIFT 12
+
+ #define SHARED_HW_CFG_LED_MODE_MASK 0x000F0000
+ #define SHARED_HW_CFG_LED_MODE_SHIFT 16
+ #define SHARED_HW_CFG_LED_MAC1 0x00000000
+ #define SHARED_HW_CFG_LED_PHY1 0x00010000
+ #define SHARED_HW_CFG_LED_PHY2 0x00020000
+ #define SHARED_HW_CFG_LED_PHY3 0x00030000
+ #define SHARED_HW_CFG_LED_MAC2 0x00040000
+ #define SHARED_HW_CFG_LED_PHY4 0x00050000
+ #define SHARED_HW_CFG_LED_PHY5 0x00060000
+ #define SHARED_HW_CFG_LED_PHY6 0x00070000
+ #define SHARED_HW_CFG_LED_MAC3 0x00080000
+ #define SHARED_HW_CFG_LED_PHY7 0x00090000
+ #define SHARED_HW_CFG_LED_PHY9 0x000a0000
+ #define SHARED_HW_CFG_LED_PHY11 0x000b0000
+ #define SHARED_HW_CFG_LED_MAC4 0x000c0000
+ #define SHARED_HW_CFG_LED_PHY8 0x000d0000
+ #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
+ #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000
+
+ #define SHARED_HW_CFG_SRIOV_MASK 0x40000000
+ #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000
+ #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000
+
+ #define SHARED_HW_CFG_ATC_MASK 0x80000000
+ #define SHARED_HW_CFG_ATC_DISABLED 0x00000000
+ #define SHARED_HW_CFG_ATC_ENABLED 0x80000000
+
+ uint32_t config2; /* 0x118 */
+
+ #define SHARED_HW_CFG_PCIE_GEN2_MASK 0x00000100
+ #define SHARED_HW_CFG_PCIE_GEN2_SHIFT 8
+ #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100
+
+ #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000
+ #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000
+ #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000
+
+ #define SHARED_HW_CFG_HIDE_PORT1 0x00002000
+
+
+ /* Output low when PERST is asserted */
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000
+
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000
+
+ /* The fan failure mechanism is usually related to the PHY type
+ since the power consumption of the board is determined by the PHY.
+ Currently, fan is required for most designs with SFX7101, BNX2X8727
+ and BNX2X8481. If a fan is not required for a board which uses one
+ of those PHYs, this field should be set to "Disabled". If a fan is
+ required for a different PHY type, this option should be set to
+ "Enabled". The fan failure indication is expected on SPIO5 */
+ #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000
+ #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19
+ #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
+ #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
+
+ /* ASPM Power Management support */
+ #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000
+
+ /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register
+ tl_control_0 (register 0x2800) */
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000
+
+
+ /* Set the MDC/MDIO access for the first external phy */
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000
+
+ /* Set the MDC/MDIO access for the second external phy */
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
+
+ /* Max number of PF MSIX vectors */
+ uint32_t config_3; /* 0x11C */
+ #define SHARED_HW_CFG_PF_MSIX_MAX_NUM_MASK 0x0000007F
+ #define SHARED_HW_CFG_PF_MSIX_MAX_NUM_SHIFT 0
+
+ uint32_t ump_nc_si_config; /* 0x120 */
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002
+
+ /* Reserved bits: 226-230 */
+
+ /* The output pin template BSC_SEL which selects the I2C for this
+ port in the I2C Mux */
+ uint32_t board; /* 0x124 */
+ #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F
+ #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0
+
+ #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0
+ #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6
+ /* Use the PIN_CFG_XXX defines on top */
+ #define SHARED_HW_CFG_BOARD_REV_MASK 0x00FF0000
+ #define SHARED_HW_CFG_BOARD_REV_SHIFT 16
+
+ #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0F000000
+ #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24
+
+ #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xF0000000
+ #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28
+
+ uint32_t wc_lane_config; /* 0x128 */
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_31200213 0x000027d8
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_02133120 0x0000d827
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
+
+ /* TX lane Polarity swap */
+ #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000
+ #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000
+ #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000
+ #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000
+ /* TX lane Polarity swap */
+ #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000
+ #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000
+ #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000
+ #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000
+
+ /* Selects the port layout of the board */
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000
+};
+
+
+/****************************************************************************
+ * Port HW configuration *
+ ****************************************************************************/
+struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
+
+ uint32_t pci_id;
+ #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000FFFF
+ #define PORT_HW_CFG_PCI_DEVICE_ID_SHIFT 0
+
+ #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xFFFF0000
+ #define PORT_HW_CFG_PCI_VENDOR_ID_SHIFT 16
+
+ uint32_t pci_sub_id;
+ #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000FFFF
+ #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_SHIFT 0
+
+ #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xFFFF0000
+ #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_SHIFT 16
+
+ uint32_t power_dissipated;
+ #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000FF
+ #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0
+ #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000FF00
+ #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8
+ #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00FF0000
+ #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16
+ #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xFF000000
+ #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24
+
+ uint32_t power_consumed;
+ #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000FF
+ #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0
+ #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000FF00
+ #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8
+ #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00FF0000
+ #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16
+ #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xFF000000
+ #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24
+
+ uint32_t mac_upper;
+ uint32_t mac_lower; /* 0x140 */
+ #define PORT_HW_CFG_UPPERMAC_MASK 0x0000FFFF
+ #define PORT_HW_CFG_UPPERMAC_SHIFT 0
+
+
+ uint32_t iscsi_mac_upper; /* Upper 16 bits are always zeroes */
+ uint32_t iscsi_mac_lower;
+
+ uint32_t rdma_mac_upper; /* Upper 16 bits are always zeroes */
+ uint32_t rdma_mac_lower;
+
+ uint32_t serdes_config;
+ #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000FFFF
+ #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0
+
+ #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xFFFF0000
+ #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
+
+
+ /* Default values: 2P-64, 4P-32 */
+ uint32_t reserved;
+
+ uint32_t vf_config; /* 0x15C */
+ #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000
+ #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16
+
+ uint32_t mf_pci_id; /* 0x160 */
+ #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF
+ #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0
+
+ /* Controls the TX laser of the SFP+ module */
+ uint32_t sfp_ctrl; /* 0x164 */
+ #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
+ #define PORT_HW_CFG_TX_LASER_SHIFT 0
+ #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
+ #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
+ #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
+ #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
+ #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
+
+ /* Controls the fault module LED of the SFP+ */
+ #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
+ #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
+ #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
+
+ /* The output pin TX_DIS that controls the TX laser of the SFP+
+ module. Use the PIN_CFG_XXX defines on top */
+ uint32_t e3_sfp_ctrl; /* 0x168 */
+ #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0
+
+ /* The output pin for SFPP_TYPE which turns on the Fault module LED */
+ #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00
+ #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8
+
+ /* The input pin MOD_ABS that indicates whether SFP+ module is
+ present or not. Use the PIN_CFG_XXX defines on top */
+ #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000
+ #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16
+
+ /* The output pin PWRDIS_SFP_X which disable the power of the SFP+
+ module. Use the PIN_CFG_XXX defines on top */
+ #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000
+ #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24
+
+ /*
+ * The input pin which signals module transmit fault. Use the
+ * PIN_CFG_XXX defines on top
+ */
+ uint32_t e3_cmn_pin_cfg; /* 0x16C */
+ #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0
+
+ /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on
+ top */
+ #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00
+ #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8
+
+ /*
+ * The output pin which powers down the PHY. Use the PIN_CFG_XXX
+ * defines on top
+ */
+ #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000
+ #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16
+
+ /* The output pin values BSC_SEL which selects the I2C for this port
+ in the I2C Mux */
+ #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000
+ #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000
+
+
+ /*
+ * The input pin I_FAULT which indicate over-current has occurred.
+ * Use the PIN_CFG_XXX defines on top
+ */
+ uint32_t e3_cmn_pin_cfg1; /* 0x170 */
+ #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0
+
+ /* pause on host ring */
+ uint32_t generic_features; /* 0x174 */
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_MASK 0x00000001
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_SHIFT 0
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
+
+ /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2
+ * LOM recommended and tested value is 0xBEB2. Using a different
+ * value means using a value not tested by BRCM
+ */
+ uint32_t sfi_tap_values; /* 0x178 */
+ #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF
+ #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0
+
+ /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested
+ * value is 0x2. LOM recommended and tested value is 0x2. Using a
+ * different value means using a value not tested by BRCM
+ */
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
+
+ uint32_t reserved0[5]; /* 0x17c */
+
+ uint32_t aeu_int_mask; /* 0x190 */
+
+ uint32_t media_type; /* 0x194 */
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0
+
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8
+
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16
+
+ /* 4 times 16 bits for all 4 lanes. In case external PHY is present
+ (not direct mode), those values will not take effect on the 4 XGXS
+ lanes. For some external PHYs (such as 8706 and 8726) the values
+ will be used to configure the external PHY in those cases, not
+ all 4 values are needed. */
+ uint16_t xgxs_config_rx[4]; /* 0x198 */
+ uint16_t xgxs_config_tx[4]; /* 0x1A0 */
+
+
+ /* For storing FCOE mac on shared memory */
+ uint32_t fcoe_fip_mac_upper;
+ #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff
+ #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0
+ uint32_t fcoe_fip_mac_lower;
+
+ uint32_t fcoe_wwn_port_name_upper;
+ uint32_t fcoe_wwn_port_name_lower;
+
+ uint32_t fcoe_wwn_node_name_upper;
+ uint32_t fcoe_wwn_node_name_lower;
+
+ /* wwpn for npiv enabled */
+ uint32_t wwpn_for_npiv_config; /* 0x1C0 */
+ #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_MASK 0x00000001
+ #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_SHIFT 0
+ #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_DISABLED 0x00000000
+ #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_ENABLED 0x00000001
+
+ /* wwpn for npiv valid addresses */
+ uint32_t wwpn_for_npiv_valid_addresses; /* 0x1C4 */
+ #define PORT_HW_CFG_WWPN_FOR_NPIV_ADDRESS_BITMAP_MASK 0x0000FFFF
+ #define PORT_HW_CFG_WWPN_FOR_NPIV_ADDRESS_BITMAP_SHIFT 0
+
+ struct mac_addr wwpn_for_niv_macs[16];
+
+ /* Reserved bits: 2272-2336 For storing FCOE mac on shared memory */
+ uint32_t Reserved1[14];
+
+ uint32_t pf_allocation; /* 0x280 */
+ /* number of vfs per PF, if 0 - sriov disabled */
+ #define PORT_HW_CFG_NUMBER_OF_VFS_MASK 0x000000FF
+ #define PORT_HW_CFG_NUMBER_OF_VFS_SHIFT 0
+
+ /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default),
+ 84833 only */
+ uint32_t xgbt_phy_cfg; /* 0x284 */
+ #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF
+ #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0
+
+ uint32_t default_cfg; /* 0x288 */
+ #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
+ #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
+ #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
+ #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
+ #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
+
+ #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
+ #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
+ #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
+ #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
+ #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
+
+ #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
+ #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
+ #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
+ #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
+ #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
+
+ #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
+ #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
+ #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
+ #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
+ #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
+
+ /* When KR link is required to be set to force which is not
+ KR-compliant, this parameter determine what is the trigger for it.
+ When GPIO is selected, low input will force the speed. Currently
+ default speed is 1G. In the future, it may be widen to select the
+ forced speed in with another parameter. Note when force-1G is
+ enabled, it override option 56: Link Speed option. */
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
+ /* Enable to determine with which GPIO to reset the external phy */
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
+
+ /* Enable BAM on KR */
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+
+ /* Enable Common Mode Sense */
+ #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
+ #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
+ #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
+ #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
+
+ /* Determine the Serdes electrical interface */
+ #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000
+ #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24
+ #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000
+ #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000
+ #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000
+ #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000
+ #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000
+ #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000
+
+ /* SFP+ main TAP and post TAP volumes */
+ #define PORT_HW_CFG_TAP_LEVELS_MASK 0x70000000
+ #define PORT_HW_CFG_TAP_LEVELS_SHIFT 28
+ #define PORT_HW_CFG_TAP_LEVELS_POST_15_MAIN_43 0x00000000
+ #define PORT_HW_CFG_TAP_LEVELS_POST_14_MAIN_44 0x10000000
+ #define PORT_HW_CFG_TAP_LEVELS_POST_13_MAIN_45 0x20000000
+ #define PORT_HW_CFG_TAP_LEVELS_POST_12_MAIN_46 0x30000000
+ #define PORT_HW_CFG_TAP_LEVELS_POST_11_MAIN_47 0x40000000
+ #define PORT_HW_CFG_TAP_LEVELS_POST_10_MAIN_48 0x50000000
+
+ uint32_t speed_capability_mask2; /* 0x28C */
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_HALF 0x00000002
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_HALF 0x00000004
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_5G 0x00000020
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080
+
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_HALF 0x00020000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_HALF 0x00040000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_5G 0x00200000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000
+
+
+ /* In the case where two media types (e.g. copper and fiber) are
+ present and electrically active at the same time, PHY Selection
+ will determine which of the two PHYs will be designated as the
+ Active PHY and used for a connection to the network. */
+ uint32_t multi_phy_config; /* 0x290 */
+ #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007
+ #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0
+ #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000
+ #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001
+ #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002
+ #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
+ #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
+
+ /* When enabled, all second phy nvram parameters will be swapped
+ with the first phy parameters */
+ #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008
+ #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3
+ #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000
+ #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008
+
+
+ /* Address of the second external phy */
+ uint32_t external_phy_config2; /* 0x294 */
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0
+
+ /* The second XGXS external PHY type */
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8071 0x00000100
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8072 0x00000200
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8073 0x00000300
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8705 0x00000400
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8706 0x00000500
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8726 0x00000600
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8481 0x00000700
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8727 0x00000900
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8727_NOC 0x00000a00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84823 0x00000b00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54640 0x00000c00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84833 0x00000d00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE 0x00000e00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8722 0x00000f00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54616 0x00001000
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
+
+
+ /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
+ 8706, 8726 and 8727) not all 4 values are needed. */
+ uint16_t xgxs_config2_rx[4]; /* 0x296 */
+ uint16_t xgxs_config2_tx[4]; /* 0x2A0 */
+
+ uint32_t lane_config;
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF
+ #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
+ /* AN and forced */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
+ #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF
+ #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
+ #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00
+ #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000C000
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14
+
+ /* Indicate whether to swap the external phy polarity */
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
+
+
+ uint32_t external_phy_config;
+ #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000FF
+ #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0
+
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000FF00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8071 0x00000100
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8072 0x00000200
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073 0x00000300
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8705 0x00000400
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8706 0x00000500
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726 0x00000600
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8481 0x00000700
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727 0x00000900
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727_NOC 0x00000a00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823 0x00000b00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54640 0x00000c00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833 0x00000d00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE 0x00000e00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722 0x00000f00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54616 0x00001000
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
+
+ #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00FF0000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16
+
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xFF000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BNX2X5482 0x01000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000
+
+ uint32_t speed_capability_mask;
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000FFFF
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000
+
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xFFFF0000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000
+
+ /* A place to hold the original MAC address as a backup */
+ uint32_t backup_mac_upper; /* 0x2B4 */
+ uint32_t backup_mac_lower; /* 0x2B8 */
+
+};
+
+
+/****************************************************************************
+ * Shared Feature configuration *
+ ****************************************************************************/
+struct shared_feat_cfg { /* NVRAM Offset */
+
+ uint32_t config; /* 0x450 */
+ #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001
+
+ /* Use NVRAM values instead of HW default values */
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \
+ 0x00000002
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \
+ 0x00000000
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \
+ 0x00000002
+
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008
+
+ #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030
+ #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4
+
+ /* Override the OTP back to single function mode. When using GPIO,
+ high means only SF, 0 is according to CLP configuration */
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400
+
+ /* Act as if the FCoE license is invalid */
+ #define SHARED_FEAT_CFG_PREVENT_FCOE 0x00001000
+
+ /* Force FLR capability to all ports */
+ #define SHARED_FEAT_CFG_FORCE_FLR_CAPABILITY 0x00002000
+
+ /* Act as if the iSCSI license is invalid */
+ #define SHARED_FEAT_CFG_PREVENT_ISCSI_MASK 0x00004000
+ #define SHARED_FEAT_CFG_PREVENT_ISCSI_SHIFT 14
+ #define SHARED_FEAT_CFG_PREVENT_ISCSI_DISABLED 0x00000000
+ #define SHARED_FEAT_CFG_PREVENT_ISCSI_ENABLED 0x00004000
+
+ /* The interval in seconds between sending LLDP packets. Set to zero
+ to disable the feature */
+ #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00FF0000
+ #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16
+
+ /* The assigned device type ID for LLDP usage */
+ #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xFF000000
+ #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24
+
+};
+
+
+/****************************************************************************
+ * Port Feature configuration *
+ ****************************************************************************/
+struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
+
+ uint32_t config;
+ #define PORT_FEAT_CFG_BAR1_SIZE_MASK 0x0000000F
+ #define PORT_FEAT_CFG_BAR1_SIZE_SHIFT 0
+ #define PORT_FEAT_CFG_BAR1_SIZE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_BAR1_SIZE_64K 0x00000001
+ #define PORT_FEAT_CFG_BAR1_SIZE_128K 0x00000002
+ #define PORT_FEAT_CFG_BAR1_SIZE_256K 0x00000003
+ #define PORT_FEAT_CFG_BAR1_SIZE_512K 0x00000004
+ #define PORT_FEAT_CFG_BAR1_SIZE_1M 0x00000005
+ #define PORT_FEAT_CFG_BAR1_SIZE_2M 0x00000006
+ #define PORT_FEAT_CFG_BAR1_SIZE_4M 0x00000007
+ #define PORT_FEAT_CFG_BAR1_SIZE_8M 0x00000008
+ #define PORT_FEAT_CFG_BAR1_SIZE_16M 0x00000009
+ #define PORT_FEAT_CFG_BAR1_SIZE_32M 0x0000000a
+ #define PORT_FEAT_CFG_BAR1_SIZE_64M 0x0000000b
+ #define PORT_FEAT_CFG_BAR1_SIZE_128M 0x0000000c
+ #define PORT_FEAT_CFG_BAR1_SIZE_256M 0x0000000d
+ #define PORT_FEAT_CFG_BAR1_SIZE_512M 0x0000000e
+ #define PORT_FEAT_CFG_BAR1_SIZE_1G 0x0000000f
+ #define PORT_FEAT_CFG_BAR2_SIZE_MASK 0x000000F0
+ #define PORT_FEAT_CFG_BAR2_SIZE_SHIFT 4
+ #define PORT_FEAT_CFG_BAR2_SIZE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_BAR2_SIZE_64K 0x00000010
+ #define PORT_FEAT_CFG_BAR2_SIZE_128K 0x00000020
+ #define PORT_FEAT_CFG_BAR2_SIZE_256K 0x00000030
+ #define PORT_FEAT_CFG_BAR2_SIZE_512K 0x00000040
+ #define PORT_FEAT_CFG_BAR2_SIZE_1M 0x00000050
+ #define PORT_FEAT_CFG_BAR2_SIZE_2M 0x00000060
+ #define PORT_FEAT_CFG_BAR2_SIZE_4M 0x00000070
+ #define PORT_FEAT_CFG_BAR2_SIZE_8M 0x00000080
+ #define PORT_FEAT_CFG_BAR2_SIZE_16M 0x00000090
+ #define PORT_FEAT_CFG_BAR2_SIZE_32M 0x000000a0
+ #define PORT_FEAT_CFG_BAR2_SIZE_64M 0x000000b0
+ #define PORT_FEAT_CFG_BAR2_SIZE_128M 0x000000c0
+ #define PORT_FEAT_CFG_BAR2_SIZE_256M 0x000000d0
+ #define PORT_FEAT_CFG_BAR2_SIZE_512M 0x000000e0
+ #define PORT_FEAT_CFG_BAR2_SIZE_1G 0x000000f0
+
+ #define PORT_FEAT_CFG_DCBX_MASK 0x00000100
+ #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
+
+ #define PORT_FEAT_CFG_AUTOGREEEN_MASK 0x00000200
+ #define PORT_FEAT_CFG_AUTOGREEEN_SHIFT 9
+ #define PORT_FEAT_CFG_AUTOGREEEN_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_AUTOGREEEN_ENABLED 0x00000200
+
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK 0x00000C00
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_SHIFT 10
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_DEFAULT 0x00000000
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE 0x00000400
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI 0x00000800
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_BOTH 0x00000c00
+
+ #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
+ #define PORT_FEATURE_EN_SIZE_SHIFT 24
+ #define PORT_FEATURE_WOL_ENABLED 0x01000000
+ #define PORT_FEATURE_MBA_ENABLED 0x02000000
+ #define PORT_FEATURE_MFW_ENABLED 0x04000000
+
+ /* Advertise expansion ROM even if MBA is disabled */
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000
+
+ /* Check the optic vendor via i2c against a list of approved modules
+ in a separate nvram image */
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xE0000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \
+ 0x00000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \
+ 0x20000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000
+
+ uint32_t wol_config;
+ /* Default is used when driver sets to "auto" mode */
+ #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010
+
+ uint32_t mba_config;
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007
+
+ #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038
+ #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3
+
+ #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400
+ #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800
+ #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000
+ #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800
+
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000FF000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000
+ #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00F00000
+ #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3C000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26
+ #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10M_HALF 0x04000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10M_FULL 0x08000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_100M_HALF 0x0c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_100M_FULL 0x10000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_1G 0x14000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_2_5G 0x18000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10G 0x1c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_20G 0x20000000
+
+ uint32_t Reserved0; /* 0x460 */
+
+ uint32_t mba_vlan_cfg;
+ #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000FFFF
+ #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0
+ #define PORT_FEATURE_MBA_VLAN_EN 0x00010000
+
+ uint32_t Reserved1;
+ uint32_t smbus_config;
+ #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe
+ #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1
+
+ uint32_t vf_config;
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000F
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f
+
+ uint32_t link_config; /* Used as HW defaults for the driver */
+
+ #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700
+ #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8
+ #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000
+ #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100
+ #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200
+ #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300
+ #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
+ #define PORT_FEATURE_FLOW_CONTROL_SAFC_RX 0x00000500
+ #define PORT_FEATURE_FLOW_CONTROL_SAFC_TX 0x00000600
+ #define PORT_FEATURE_FLOW_CONTROL_SAFC_BOTH 0x00000700
+
+ #define PORT_FEATURE_LINK_SPEED_MASK 0x000F0000
+ #define PORT_FEATURE_LINK_SPEED_SHIFT 16
+ #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000
+ #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000
+ #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000
+ #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000
+ #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000
+ #define PORT_FEATURE_LINK_SPEED_1G 0x00050000
+ #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000
+ #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000
+ #define PORT_FEATURE_LINK_SPEED_20G 0x00080000
+
+ #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000
+ #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24
+ /* (forced) low speed switch (< 10G) */
+ #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000
+ /* (forced) high speed switch (>= 10G) */
+ #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000
+ #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000
+ #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000
+
+
+ /* The default for MCP link configuration,
+ uses the same defines as link_config */
+ uint32_t mfw_wol_link_cfg;
+
+ /* The default for the driver of the second external phy,
+ uses the same defines as link_config */
+ uint32_t link_config2; /* 0x47C */
+
+ /* The default for MCP of the second external phy,
+ uses the same defines as link_config */
+ uint32_t mfw_wol_link_cfg2; /* 0x480 */
+
+
+ /* EEE power saving mode */
+ uint32_t eee_power_mode; /* 0x484 */
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003
+
+
+ uint32_t Reserved2[16]; /* 0x488 */
+};
+
+/****************************************************************************
+ * Device Information *
+ ****************************************************************************/
+struct shm_dev_info { /* size */
+
+ uint32_t bc_rev; /* 8 bits each: major, minor, build */ /* 4 */
+
+ struct shared_hw_cfg shared_hw_config; /* 40 */
+
+ struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */
+
+ struct shared_feat_cfg shared_feature_config; /* 4 */
+
+ struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */
+
+};
+
+struct extended_dev_info_shared_cfg { /* NVRAM OFFSET */
+
+ /* Threshold in celcius to start using the fan */
+ uint32_t temperature_monitor1; /* 0x4000 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_THRESH_MASK 0x0000007F
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_THRESH_SHIFT 0
+
+ /* Threshold in celcius to shut down the board */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_THRESH_MASK 0x00007F00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_THRESH_SHIFT 8
+
+ /* EPIO of fan temperature status */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_MASK 0x00FF0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_SHIFT 16
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_NA 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO0 0x00010000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO1 0x00020000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO2 0x00030000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO3 0x00040000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO4 0x00050000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO5 0x00060000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO6 0x00070000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO7 0x00080000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO8 0x00090000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO9 0x000a0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO10 0x000b0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO11 0x000c0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO12 0x000d0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO13 0x000e0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO14 0x000f0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO15 0x00100000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO16 0x00110000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO17 0x00120000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO18 0x00130000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO19 0x00140000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO20 0x00150000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO21 0x00160000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO22 0x00170000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO23 0x00180000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO24 0x00190000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO25 0x001a0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO26 0x001b0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO27 0x001c0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO28 0x001d0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO29 0x001e0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO30 0x001f0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO31 0x00200000
+
+ /* EPIO of shut down temperature status */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_MASK 0xFF000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_SHIFT 24
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_NA 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO0 0x01000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO1 0x02000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO2 0x03000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO3 0x04000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO4 0x05000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO5 0x06000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO6 0x07000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO7 0x08000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO8 0x09000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO9 0x0a000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO10 0x0b000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO11 0x0c000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO12 0x0d000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO13 0x0e000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO14 0x0f000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO15 0x10000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO16 0x11000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO17 0x12000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO18 0x13000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO19 0x14000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO20 0x15000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO21 0x16000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO22 0x17000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO23 0x18000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO24 0x19000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO25 0x1a000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO26 0x1b000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO27 0x1c000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO28 0x1d000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO29 0x1e000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO30 0x1f000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO31 0x20000000
+
+
+ /* EPIO of shut down temperature status */
+ uint32_t temperature_monitor2; /* 0x4004 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_PERIOD_MASK 0x0000FFFF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_PERIOD_SHIFT 0
+
+
+ /* MFW flavor to be used */
+ uint32_t mfw_cfg; /* 0x4008 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_NA 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_A 0x00000001
+
+ /* Should NIC data query remain enabled upon last drv unload */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_MASK 0x00000100
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_SHIFT 8
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_ENABLED 0x00000100
+
+ /* Hide DCBX feature in CCM/BACS menus */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_MASK 0x00010000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_SHIFT 16
+ #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_ENABLED 0x00010000
+
+ uint32_t smbus_config; /* 0x400C */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SMBUS_ADDR_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SMBUS_ADDR_SHIFT 0
+
+ /* Switching regulator loop gain */
+ uint32_t board_cfg; /* 0x4010 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_MASK 0x0000000F
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_HW_DEFAULT 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X2 0x00000008
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X4 0x00000009
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X8 0x0000000a
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X16 0x0000000b
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_DIV8 0x0000000c
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_DIV4 0x0000000d
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_DIV2 0x0000000e
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X1 0x0000000f
+
+ /* whether shadow swim feature is supported */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_MASK 0x00000100
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_SHIFT 8
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_ENABLED 0x00000100
+
+ /* whether to show/hide SRIOV menu in CCM */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_SHOW_MENU_MASK 0x00000200
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_SHOW_MENU_SHIFT 9
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_SHOW_MENU 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_HIDE_MENU 0x00000200
+
+ /* Threshold in celcius for max continuous operation */
+ uint32_t temperature_report; /* 0x4014 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_MCOT_MASK 0x0000007F
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_MCOT_SHIFT 0
+
+ /* Threshold in celcius for sensor caution */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SCT_MASK 0x00007F00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SCT_SHIFT 8
+
+ /* wwn node prefix to be used (unless value is 0) */
+ uint32_t wwn_prefix; /* 0x4018 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX0_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX0_SHIFT 0
+
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX1_MASK 0x0000FF00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX1_SHIFT 8
+
+ /* wwn port prefix to be used (unless value is 0) */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX0_MASK 0x00FF0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX0_SHIFT 16
+
+ /* wwn port prefix to be used (unless value is 0) */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX1_MASK 0xFF000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX1_SHIFT 24
+
+ /* General debug nvm cfg */
+ uint32_t dbg_cfg_flags; /* 0x401C */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_MASK 0x000FFFFF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_ENABLE 0x00000001
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_EN_SIGDET_FILTER 0x00000002
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_LP_TX_PRESET7 0x00000004
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_TX_ANA_DEFAULT 0x00000008
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_PLL_ANA_DEFAULT 0x00000010
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_FORCE_G1PLL_RETUNE 0x00000020
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_RX_ANA_DEFAULT 0x00000040
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_FORCE_SERDES_RX_CLK 0x00000080
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_DIS_RX_LP_EIEOS 0x00000100
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_FINALIZE_UCODE 0x00000200
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_HOLDOFF_REQ 0x00000400
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_RX_SIGDET_OVERRIDE 0x00000800
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_GP_PORG_UC_RESET 0x00001000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SUPPRESS_COMPEN_EVT 0x00002000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_ADJ_TXEQ_P0_P1 0x00004000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_G3_PLL_RETUNE 0x00008000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_MAC_PHY_CTL8 0x00010000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_DIS_MAC_G3_FRM_ERR 0x00020000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_INFERRED_EI 0x00040000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_GEN3_COMPLI_ENA 0x00080000
+
+ /* Debug signet rx threshold */
+ uint32_t dbg_rx_sigdet_threshold; /* 0x4020 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_RX_SIGDET_MASK 0x00000007
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_RX_SIGDET_SHIFT 0
+
+ /* Enable IFFE feature */
+ uint32_t iffe_features; /* 0x4024 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_MASK 0x00000001
+ #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_ENABLED 0x00000001
+
+ /* Allowable port enablement (bitmask for ports 3-1) */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_PORT_MASK 0x0000000E
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_PORT_SHIFT 1
+
+ /* Allow iSCSI offload override */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_MASK 0x00000010
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_SHIFT 4
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_ENABLED 0x00000010
+
+ /* Allow FCoE offload override */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_MASK 0x00000020
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_SHIFT 5
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_ENABLED 0x00000020
+
+ /* Tie to adaptor */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_MASK 0x00008000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_SHIFT 15
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_ENABLED 0x00008000
+
+ /* Currently enabled port(s) (bitmask for ports 3-1) */
+ uint32_t current_iffe_mask; /* 0x4028 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_CFG_MASK 0x0000000E
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_CFG_SHIFT 1
+
+ /* Current iSCSI offload */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_MASK 0x00000010
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_SHIFT 4
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_ENABLED 0x00000010
+
+ /* Current FCoE offload */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_MASK 0x00000020
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_SHIFT 5
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_ENABLED 0x00000020
+
+ /* FW set this pin to "0" (assert) these signal if either of its MAC
+ * or PHY specific threshold values is exceeded.
+ * Values are standard GPIO/EPIO pins.
+ */
+ uint32_t threshold_pin; /* 0x402C */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TCONTROL_PIN_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TCONTROL_PIN_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TWARNING_PIN_MASK 0x0000FF00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TWARNING_PIN_SHIFT 8
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TCRITICAL_PIN_MASK 0x00FF0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TCRITICAL_PIN_SHIFT 16
+
+ /* MAC die temperature threshold in Celsius. */
+ uint32_t mac_threshold_val; /* 0x4030 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_MAC_THRESH_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_MAC_THRESH_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_MAC_THRESH_MASK 0x0000FF00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_MAC_THRESH_SHIFT 8
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_MAC_THRESH_MASK 0x00FF0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_MAC_THRESH_SHIFT 16
+
+ /* PHY die temperature threshold in Celsius. */
+ uint32_t phy_threshold_val; /* 0x4034 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_PHY_THRESH_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_PHY_THRESH_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_PHY_THRESH_MASK 0x0000FF00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_PHY_THRESH_SHIFT 8
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_PHY_THRESH_MASK 0x00FF0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_PHY_THRESH_SHIFT 16
+
+ /* External pins to communicate with host.
+ * Values are standard GPIO/EPIO pins.
+ */
+ uint32_t host_pin; /* 0x4038 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_I2C_ISOLATE_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_I2C_ISOLATE_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_FAULT_MASK 0x0000FF00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_FAULT_SHIFT 8
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_VPD_UPDATE_MASK 0x00FF0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_VPD_UPDATE_SHIFT 16
+ #define EXTENDED_DEV_INFO_SHARED_CFG_VPD_CACHE_COMP_MASK 0xFF000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_VPD_CACHE_COMP_SHIFT 24
+};
+
+
+#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
+ #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
+#endif
+
+#define FUNC_0 0
+#define FUNC_1 1
+#define FUNC_2 2
+#define FUNC_3 3
+#define FUNC_4 4
+#define FUNC_5 5
+#define FUNC_6 6
+#define FUNC_7 7
+#define E1H_FUNC_MAX 8
+#define E2_FUNC_MAX 4 /* per path */
+
+#define VN_0 0
+#define VN_1 1
+#define VN_2 2
+#define VN_3 3
+#define E1VN_MAX 1
+#define E1HVN_MAX 4
+
+#define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */
+/* This value (in milliseconds) determines the frequency of the driver
+ * issuing the PULSE message code. The firmware monitors this periodic
+ * pulse to determine when to switch to an OS-absent mode. */
+#define DRV_PULSE_PERIOD_MS 250
+
+/* This value (in milliseconds) determines how long the driver should
+ * wait for an acknowledgement from the firmware before timing out. Once
+ * the firmware has timed out, the driver will assume there is no firmware
+ * running and there won't be any firmware-driver synchronization during a
+ * driver reset. */
+#define FW_ACK_TIME_OUT_MS 5000
+
+#define FW_ACK_POLL_TIME_MS 1
+
+#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
+
+#define MFW_TRACE_SIGNATURE 0x54524342
+
+/****************************************************************************
+ * Driver <-> FW Mailbox *
+ ****************************************************************************/
+struct drv_port_mb {
+
+ uint32_t link_status;
+ /* Driver should update this field on any link change event */
+
+ #define LINK_STATUS_NONE (0<<0)
+ #define LINK_STATUS_LINK_FLAG_MASK 0x00000001
+ #define LINK_STATUS_LINK_UP 0x00000001
+ #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
+ #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1)
+
+ #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020
+ #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+ #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+ #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080
+ #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+ #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+ #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+ #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800
+ #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000
+ #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000
+ #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000
+ #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000
+
+ #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000
+ #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000
+
+ #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000
+ #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000
+
+ #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+ #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18)
+ #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18)
+ #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18)
+ #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18)
+
+ #define LINK_STATUS_SERDES_LINK 0x00100000
+
+ #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000
+ #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000
+ #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000
+ #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000
+
+ #define LINK_STATUS_PFC_ENABLED 0x20000000
+
+ #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
+ #define LINK_STATUS_SFP_TX_FAULT 0x80000000
+
+ uint32_t port_stx;
+
+ uint32_t stat_nig_timer;
+
+ /* MCP firmware does not use this field */
+ uint32_t ext_phy_fw_version;
+
+};
+
+
+struct drv_func_mb {
+
+ uint32_t drv_mb_header;
+ #define DRV_MSG_CODE_MASK 0xffff0000
+ #define DRV_MSG_CODE_LOAD_REQ 0x10000000
+ #define DRV_MSG_CODE_LOAD_DONE 0x11000000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000
+ #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+ #define DRV_MSG_CODE_DCC_OK 0x30000000
+ #define DRV_MSG_CODE_DCC_FAILURE 0x31000000
+ #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000
+ #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000
+ #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000
+ #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000
+ #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
+ #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
+ #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
+
+ /*
+ * The optic module verification command requires bootcode
+ * v5.0.6 or later, te specific optic module verification command
+ * requires bootcode v5.2.12 or later
+ */
+ #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000
+ #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
+ #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
+ #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
+ #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000
+ #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
+ #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
+ #define REQ_BC_VER_4_MT_SUPPORTED 0x00070201
+ #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
+ #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209
+
+ #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
+ #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
+ #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401
+
+ #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+
+ #define DRV_MSG_CODE_AFEX_DRIVER_SETMAC 0xd0000000
+ #define DRV_MSG_CODE_AFEX_LISTGET_ACK 0xd1000000
+ #define DRV_MSG_CODE_AFEX_LISTSET_ACK 0xd2000000
+ #define DRV_MSG_CODE_AFEX_STATSGET_ACK 0xd3000000
+ #define DRV_MSG_CODE_AFEX_VIFSET_ACK 0xd4000000
+
+ #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
+ #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
+
+ #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+
+ #define DRV_MSG_CODE_RMMOD 0xdb000000
+ #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f
+
+ #define DRV_MSG_CODE_SET_MF_BW 0xe0000000
+ #define REQ_BC_VER_4_SET_MF_BW 0x00060202
+ #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
+
+ #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000
+
+ #define DRV_MSG_CODE_INITIATE_FLR 0x02000000
+ #define REQ_BC_VER_4_INITIATE_FLR 0x00070213
+
+ #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
+ #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
+ #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
+ #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
+
+ #define DRV_MSG_CODE_IMG_OFFSET_REQ 0xe2000000
+ #define DRV_MSG_CODE_IMG_SIZE_REQ 0xe3000000
+
+ #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ uint32_t drv_mb_param;
+ #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000
+ #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000
+
+ #define DRV_MSG_CODE_UNLOAD_NON_D3_POWER 0x00000001
+ #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002
+
+ #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a
+ #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000
+
+ #define DRV_MSG_CODE_USR_BLK_IMAGE_REQ 0x00000001
+
+ uint32_t fw_mb_header;
+ #define FW_MSG_CODE_MASK 0xffff0000
+ #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
+ #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+ #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+ /* Load common chip is supported from bc 6.0.0 */
+ #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000
+ #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000
+
+ #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
+ #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+ #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
+ #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000
+ #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000
+ #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+ #define FW_MSG_CODE_DCC_DONE 0x30100000
+ #define FW_MSG_CODE_LLDP_DONE 0x40100000
+ #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000
+ #define FW_MSG_CODE_DIAG_REFUSE 0x50200000
+ #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000
+ #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000
+ #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000
+ #define FW_MSG_CODE_GET_KEY_DONE 0x80100000
+ #define FW_MSG_CODE_NO_KEY 0x80f00000
+ #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000
+ #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000
+ #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000
+ #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000
+ #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000
+ #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
+ #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+ #define FW_MSG_CODE_HW_SET_INVALID_IMAGE 0xb0100000
+
+ #define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE 0xd0100000
+ #define FW_MSG_CODE_AFEX_LISTGET_ACK 0xd1100000
+ #define FW_MSG_CODE_AFEX_LISTSET_ACK 0xd2100000
+ #define FW_MSG_CODE_AFEX_STATSGET_ACK 0xd3100000
+ #define FW_MSG_CODE_AFEX_VIFSET_ACK 0xd4100000
+
+ #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
+ #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
+
+ #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+
+ #define FW_MSG_CODE_RMMOD_ACK 0xdb100000
+
+ #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
+ #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
+
+ #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000
+
+ #define FW_MSG_CODE_FLR_ACK 0x02000000
+ #define FW_MSG_CODE_FLR_NACK 0x02100000
+
+ #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000
+ #define FW_MSG_CODE_LIC_RESPONSE 0xff020000
+ #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000
+ #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
+
+ #define FW_MSG_CODE_IMG_OFFSET_RESPONSE 0xe2100000
+ #define FW_MSG_CODE_IMG_SIZE_RESPONSE 0xe3100000
+
+ #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ uint32_t fw_mb_param;
+
+ #define FW_PARAM_INVALID_IMG 0xffffffff
+
+ uint32_t drv_pulse_mb;
+ #define DRV_PULSE_SEQ_MASK 0x00007fff
+ #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+ /*
+ * The system time is in the format of
+ * (year-2001)*12*32 + month*32 + day.
+ */
+ #define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+ /*
+ * Indicate to the firmware not to go into the
+ * OS-absent when it is not getting driver pulse.
+ * This is used for debugging as well for PXE(MBA).
+ */
+
+ uint32_t mcp_pulse_mb;
+ #define MCP_PULSE_SEQ_MASK 0x00007fff
+ #define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+ /* Indicates to the driver not to assert due to lack
+ * of MCP response */
+ #define MCP_EVENT_MASK 0xffff0000
+ #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ uint32_t iscsi_boot_signature;
+ uint32_t iscsi_boot_block_offset;
+
+ uint32_t drv_status;
+ #define DRV_STATUS_PMF 0x00000001
+ #define DRV_STATUS_VF_DISABLED 0x00000002
+ #define DRV_STATUS_SET_MF_BW 0x00000004
+ #define DRV_STATUS_LINK_EVENT 0x00000008
+
+ #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
+ #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
+ #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200
+ #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400
+ #define DRV_STATUS_DCC_RESERVED1 0x00000800
+ #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
+ #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
+
+ #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
+ #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
+ #define DRV_STATUS_AFEX_EVENT_MASK 0x03f00000
+ #define DRV_STATUS_AFEX_LISTGET_REQ 0x00100000
+ #define DRV_STATUS_AFEX_LISTSET_REQ 0x00200000
+ #define DRV_STATUS_AFEX_STATSGET_REQ 0x00400000
+ #define DRV_STATUS_AFEX_VIFSET_REQ 0x00800000
+
+ #define DRV_STATUS_DRV_INFO_REQ 0x04000000
+
+ #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000
+
+ uint32_t virt_mac_upper;
+ #define VIRT_MAC_SIGN_MASK 0xffff0000
+ #define VIRT_MAC_SIGNATURE 0x564d0000
+ uint32_t virt_mac_lower;
+
+};
+
+
+/****************************************************************************
+ * Management firmware state *
+ ****************************************************************************/
+/* Allocate 440 bytes for management firmware */
+#define MGMTFW_STATE_WORD_SIZE 110
+
+struct mgmtfw_state {
+ uint32_t opaque[MGMTFW_STATE_WORD_SIZE];
+};
+
+
+/****************************************************************************
+ * Multi-Function configuration *
+ ****************************************************************************/
+struct shared_mf_cfg {
+
+ uint32_t clp_mb;
+ #define SHARED_MF_CLP_SET_DEFAULT 0x00000000
+ /* set by CLP */
+ #define SHARED_MF_CLP_EXIT 0x00000001
+ /* set by MCP */
+ #define SHARED_MF_CLP_EXIT_DONE 0x00010000
+
+};
+
+struct port_mf_cfg {
+
+ uint32_t dynamic_cfg; /* device control channel */
+ #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
+ #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
+ #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
+
+ uint32_t reserved[1];
+
+};
+
+struct func_mf_cfg {
+
+ uint32_t config;
+ /* E/R/I/D */
+ /* function 0 of each port cannot be hidden */
+ #define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+
+ #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006
+ #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000
+ #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002
+ #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
+ #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006
+ #define FUNC_MF_CFG_PROTOCOL_DEFAULT \
+ FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
+
+ #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008
+ #define FUNC_MF_CFG_FUNC_DELETED 0x00000010
+
+ #define FUNC_MF_CFG_FUNC_BOOT_MASK 0x00000060
+ #define FUNC_MF_CFG_FUNC_BOOT_BIOS_CTRL 0x00000000
+ #define FUNC_MF_CFG_FUNC_BOOT_VCM_DISABLED 0x00000020
+ #define FUNC_MF_CFG_FUNC_BOOT_VCM_ENABLED 0x00000040
+
+ /* PRI */
+ /* 0 - low priority, 3 - high priority */
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000
+
+ /* MINBW, MAXBW */
+ /* value range - 0..100, increments in 100Mbps */
+ #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000
+ #define FUNC_MF_CFG_MIN_BW_SHIFT 16
+ #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+ #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000
+ #define FUNC_MF_CFG_MAX_BW_SHIFT 24
+ #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000
+
+ uint32_t mac_upper; /* MAC */
+ #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+ #define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+ #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ uint32_t mac_lower;
+ #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ uint32_t e1hov_tag; /* VNI */
+ #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
+ #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
+ #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
+
+ /* afex default VLAN ID - 12 bits */
+ #define FUNC_MF_CFG_AFEX_VLAN_MASK 0x0fff0000
+ #define FUNC_MF_CFG_AFEX_VLAN_SHIFT 16
+
+ uint32_t afex_config;
+ #define FUNC_MF_CFG_AFEX_COS_FILTER_MASK 0x000000ff
+ #define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT 0
+ #define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK 0x0000ff00
+ #define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT 8
+ #define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL 0x00000100
+ #define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK 0x000f0000
+ #define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT 16
+
+ uint32_t pf_allocation;
+ /* number of vfs in function, if 0 - sriov disabled */
+ #define FUNC_MF_CFG_NUMBER_OF_VFS_MASK 0x000000FF
+ #define FUNC_MF_CFG_NUMBER_OF_VFS_SHIFT 0
+};
+
+enum mf_cfg_afex_vlan_mode {
+ FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0,
+ FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE,
+ FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE
+};
+
+/* This structure is not applicable and should not be accessed on 57711 */
+struct func_ext_cfg {
+ uint32_t func_cfg;
+ #define MACP_FUNC_CFG_FLAGS_MASK 0x0000007F
+ #define MACP_FUNC_CFG_FLAGS_SHIFT 0
+ #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
+ #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
+ #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
+ #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
+ #define MACP_FUNC_CFG_PAUSE_ON_HOST_RING 0x00000080
+
+ uint32_t iscsi_mac_addr_upper;
+ uint32_t iscsi_mac_addr_lower;
+
+ uint32_t fcoe_mac_addr_upper;
+ uint32_t fcoe_mac_addr_lower;
+
+ uint32_t fcoe_wwn_port_name_upper;
+ uint32_t fcoe_wwn_port_name_lower;
+
+ uint32_t fcoe_wwn_node_name_upper;
+ uint32_t fcoe_wwn_node_name_lower;
+
+ uint32_t preserve_data;
+ #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
+ #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
+ #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5)
+};
+
+struct mf_cfg {
+
+ struct shared_mf_cfg shared_mf_config; /* 0x4 */
+ struct port_mf_cfg port_mf_config[NVM_PATH_MAX][PORT_MAX];
+ /* 0x10*2=0x20 */
+ /* for all chips, there are 8 mf functions */
+ struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
+ /*
+ * Extended configuration per function - this array does not exist and
+ * should not be accessed on 57711
+ */
+ struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/
+}; /* 0x224 */
+
+/****************************************************************************
+ * Shared Memory Region *
+ ****************************************************************************/
+struct shmem_region { /* SharedMem Offset (size) */
+
+ uint32_t validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */
+ #define SHR_MEM_FORMAT_REV_MASK 0xff000000
+ #define SHR_MEM_FORMAT_REV_ID ('A'<<24)
+ /* validity bits */
+ #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000
+ #define SHR_MEM_VALIDITY_MB 0x00200000
+ #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000
+ #define SHR_MEM_VALIDITY_RESERVED 0x00000007
+ /* One licensing bit should be set */
+ #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
+ #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
+ #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
+ #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
+ /* Active MFW */
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+
+ struct shm_dev_info dev_info; /* 0x8 (0x438) */
+
+ license_key_t drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
+
+ /* FW information (for internal FW use) */
+ uint32_t fw_info_fio_offset; /* 0x4a8 (0x4) */
+ struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
+
+ struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
+
+
+#ifdef BMAPI
+ /* This is a variable length array */
+ /* the number of function depends on the chip type */
+ struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#else
+ /* the number of function depends on the chip type */
+ struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#endif /* BMAPI */
+
+}; /* 57711 = 0x7E4 | 57712 = 0x734 */
+
+/****************************************************************************
+ * Shared Memory 2 Region *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way: */
+/* 8 bit: PF ack */
+/* 64 bit: VF ack */
+/* 8 bit: ios_dis_ack */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* uint32_t. The fw must have the VF right after the PF since this is how it */
+/* access arrays(it expects always the VF to reside after the PF, and that */
+/* makes the calculation much easier for it. ) */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition */
+/* above */
+/****************************************************************************/
+struct fw_flr_ack {
+ uint32_t pf_ack;
+ uint32_t vf_ack[1];
+ uint32_t iov_dis_ack;
+};
+
+struct fw_flr_mb {
+ uint32_t aggint;
+ uint32_t opgen_addr;
+ struct fw_flr_ack ack;
+};
+
+struct eee_remote_vals {
+ uint32_t tx_tw;
+ uint32_t rx_tw;
+};
+
+/**** SUPPORT FOR SHMEM ARRRAYS ***
+ * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
+ * define arrays with storage types smaller then unsigned dwords.
+ * The macros below add generic support for SHMEM arrays with numeric elements
+ * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword
+ * array with individual bit-filed elements accessed using shifts and masks.
+ *
+ */
+
+/* eb is the bitwidth of a single element */
+#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1)
+#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb)))
+
+/* the bit-position macro allows the used to flip the order of the arrays
+ * elements on a per byte or word boundary.
+ *
+ * example: an array with 8 entries each 4 bit wide. This array will fit into
+ * a single dword. The diagrmas below show the array order of the nibbles.
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
+ *
+ * | | | |
+ * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | | | |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
+ *
+ * | | | |
+ * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 |
+ * | | | |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
+ *
+ * | | | |
+ * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 |
+ * | | | |
+ */
+#define SHMEM_ARRAY_BITPOS(i, eb, fb) \
+ ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
+ (((i)%((fb)/(eb))) * (eb)))
+
+#define SHMEM_ARRAY_GET(a, i, eb, fb) \
+ ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
+ SHMEM_ARRAY_MASK(eb))
+
+#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
+do { \
+ a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+ a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+} while (0)
+
+
+/****START OF DCBX STRUCTURES DECLARATIONS****/
+#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8
+#define DCBX_PRI_PG_BITWIDTH 4
+#define DCBX_PRI_PG_FBITS 8
+#define DCBX_PRI_PG_GET(a, i) \
+ SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS)
+#define DCBX_PRI_PG_SET(a, i, val) \
+ SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val)
+#define DCBX_MAX_NUM_PG_BW_ENTRIES 8
+#define DCBX_BW_PG_BITWIDTH 8
+#define DCBX_PG_BW_GET(a, i) \
+ SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH)
+#define DCBX_PG_BW_SET(a, i, val) \
+ SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val)
+#define DCBX_STRICT_PRI_PG 15
+#define DCBX_MAX_APP_PROTOCOL 16
+#define DCBX_MAX_APP_LOCAL 32
+#define FCOE_APP_IDX 0
+#define ISCSI_APP_IDX 1
+#define PREDEFINED_APP_IDX_MAX 2
+
+
+/* Big/Little endian have the same representation. */
+struct dcbx_ets_feature {
+ /*
+ * For Admin MIB - is this feature supported by the
+ * driver | For Local MIB - should this feature be enabled.
+ */
+ uint32_t enabled;
+ uint32_t pg_bw_tbl[2];
+ uint32_t pri_pg_tbl[1];
+};
+
+/* Driver structure in LE */
+struct dcbx_pfc_feature {
+#ifdef __BIG_ENDIAN
+ uint8_t pri_en_bitmap;
+ #define DCBX_PFC_PRI_0 0x01
+ #define DCBX_PFC_PRI_1 0x02
+ #define DCBX_PFC_PRI_2 0x04
+ #define DCBX_PFC_PRI_3 0x08
+ #define DCBX_PFC_PRI_4 0x10
+ #define DCBX_PFC_PRI_5 0x20
+ #define DCBX_PFC_PRI_6 0x40
+ #define DCBX_PFC_PRI_7 0x80
+ uint8_t pfc_caps;
+ uint8_t reserved;
+ uint8_t enabled;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t enabled;
+ uint8_t reserved;
+ uint8_t pfc_caps;
+ uint8_t pri_en_bitmap;
+ #define DCBX_PFC_PRI_0 0x01
+ #define DCBX_PFC_PRI_1 0x02
+ #define DCBX_PFC_PRI_2 0x04
+ #define DCBX_PFC_PRI_3 0x08
+ #define DCBX_PFC_PRI_4 0x10
+ #define DCBX_PFC_PRI_5 0x20
+ #define DCBX_PFC_PRI_6 0x40
+ #define DCBX_PFC_PRI_7 0x80
+#endif
+};
+
+struct dcbx_app_priority_entry {
+#ifdef __BIG_ENDIAN
+ uint16_t app_id;
+ uint8_t pri_bitmap;
+ uint8_t appBitfield;
+ #define DCBX_APP_ENTRY_VALID 0x01
+ #define DCBX_APP_ENTRY_SF_MASK 0x30
+ #define DCBX_APP_ENTRY_SF_SHIFT 4
+ #define DCBX_APP_SF_ETH_TYPE 0x10
+ #define DCBX_APP_SF_PORT 0x20
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t appBitfield;
+ #define DCBX_APP_ENTRY_VALID 0x01
+ #define DCBX_APP_ENTRY_SF_MASK 0x30
+ #define DCBX_APP_ENTRY_SF_SHIFT 4
+ #define DCBX_APP_SF_ETH_TYPE 0x10
+ #define DCBX_APP_SF_PORT 0x20
+ uint8_t pri_bitmap;
+ uint16_t app_id;
+#endif
+};
+
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+#ifdef __BIG_ENDIAN
+ uint8_t reserved;
+ uint8_t default_pri;
+ uint8_t tc_supported;
+ uint8_t enabled;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t enabled;
+ uint8_t tc_supported;
+ uint8_t default_pri;
+ uint8_t reserved;
+#endif
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+ /* PG feature */
+ struct dcbx_ets_feature ets;
+ /* PFC feature */
+ struct dcbx_pfc_feature pfc;
+ /* APP feature */
+ struct dcbx_app_priority_feature app;
+};
+
+/* LLDP protocol parameters */
+/* FW structure in BE */
+struct lldp_params {
+#ifdef __BIG_ENDIAN
+ uint8_t msg_fast_tx_interval;
+ uint8_t msg_tx_hold;
+ uint8_t msg_tx_interval;
+ uint8_t admin_status;
+ #define LLDP_TX_ONLY 0x01
+ #define LLDP_RX_ONLY 0x02
+ #define LLDP_TX_RX 0x03
+ #define LLDP_DISABLED 0x04
+ uint8_t reserved1;
+ uint8_t tx_fast;
+ uint8_t tx_crd_max;
+ uint8_t tx_crd;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t admin_status;
+ #define LLDP_TX_ONLY 0x01
+ #define LLDP_RX_ONLY 0x02
+ #define LLDP_TX_RX 0x03
+ #define LLDP_DISABLED 0x04
+ uint8_t msg_tx_interval;
+ uint8_t msg_tx_hold;
+ uint8_t msg_fast_tx_interval;
+ uint8_t tx_crd;
+ uint8_t tx_crd_max;
+ uint8_t tx_fast;
+ uint8_t reserved1;
+#endif
+ #define REM_CHASSIS_ID_STAT_LEN 4
+ #define REM_PORT_ID_STAT_LEN 4
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+ uint32_t peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
+ /* Holds remote Port ID TLV header, subtype and 9B of payload. */
+ uint32_t peer_port_id[REM_PORT_ID_STAT_LEN];
+};
+
+struct lldp_dcbx_stat {
+ #define LOCAL_CHASSIS_ID_STAT_LEN 2
+ #define LOCAL_PORT_ID_STAT_LEN 2
+ /* Holds local Chassis ID 8B payload of constant subtype 4. */
+ uint32_t local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
+ /* Holds local Port ID 8B payload of constant subtype 3. */
+ uint32_t local_port_id[LOCAL_PORT_ID_STAT_LEN];
+ /* Number of DCBX frames transmitted. */
+ uint32_t num_tx_dcbx_pkts;
+ /* Number of DCBX frames received. */
+ uint32_t num_rx_dcbx_pkts;
+};
+
+/* ADMIN MIB - DCBX local machine default configuration. */
+struct lldp_admin_mib {
+ uint32_t ver_cfg_flags;
+ #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001
+ #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002
+ #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004
+ #define DCBX_ETS_RECO_TX_ENABLED 0x00000008
+ #define DCBX_ETS_RECO_VALID 0x00000010
+ #define DCBX_ETS_WILLING 0x00000020
+ #define DCBX_PFC_WILLING 0x00000040
+ #define DCBX_APP_WILLING 0x00000080
+ #define DCBX_VERSION_CEE 0x00000100
+ #define DCBX_VERSION_IEEE 0x00000200
+ #define DCBX_DCBX_ENABLED 0x00000400
+ #define DCBX_CEE_VERSION_MASK 0x0000f000
+ #define DCBX_CEE_VERSION_SHIFT 12
+ #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000
+ #define DCBX_CEE_MAX_VERSION_SHIFT 16
+ struct dcbx_features features;
+};
+
+/* REMOTE MIB - remote machine DCBX configuration. */
+struct lldp_remote_mib {
+ uint32_t prefix_seq_num;
+ uint32_t flags;
+ #define DCBX_ETS_TLV_RX 0x00000001
+ #define DCBX_PFC_TLV_RX 0x00000002
+ #define DCBX_APP_TLV_RX 0x00000004
+ #define DCBX_ETS_RX_ERROR 0x00000010
+ #define DCBX_PFC_RX_ERROR 0x00000020
+ #define DCBX_APP_RX_ERROR 0x00000040
+ #define DCBX_ETS_REM_WILLING 0x00000100
+ #define DCBX_PFC_REM_WILLING 0x00000200
+ #define DCBX_APP_REM_WILLING 0x00000400
+ #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000
+ #define DCBX_REMOTE_MIB_VALID 0x00002000
+ struct dcbx_features features;
+ uint32_t suffix_seq_num;
+};
+
+/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */
+struct lldp_local_mib {
+ uint32_t prefix_seq_num;
+ /* Indicates if there is mismatch with negotiation results. */
+ uint32_t error;
+ #define DCBX_LOCAL_ETS_ERROR 0x00000001
+ #define DCBX_LOCAL_PFC_ERROR 0x00000002
+ #define DCBX_LOCAL_APP_ERROR 0x00000004
+ #define DCBX_LOCAL_PFC_MISMATCH 0x00000010
+ #define DCBX_LOCAL_APP_MISMATCH 0x00000020
+ #define DCBX_REMOTE_MIB_ERROR 0x00000040
+ #define DCBX_REMOTE_ETS_TLV_NOT_FOUND 0x00000080
+ #define DCBX_REMOTE_PFC_TLV_NOT_FOUND 0x00000100
+ #define DCBX_REMOTE_APP_TLV_NOT_FOUND 0x00000200
+ struct dcbx_features features;
+ uint32_t suffix_seq_num;
+};
+
+struct lldp_local_mib_ext {
+ uint32_t prefix_seq_num;
+ /* APP TLV extension - 16 more entries for negotiation results*/
+ struct dcbx_app_priority_entry app_pri_tbl_ext[DCBX_MAX_APP_PROTOCOL];
+ uint32_t suffix_seq_num;
+};
+/***END OF DCBX STRUCTURES DECLARATIONS***/
+
+/***********************************************************/
+/* Elink section */
+/***********************************************************/
+#define SHMEM_LINK_CONFIG_SIZE 2
+struct shmem_lfa {
+ uint32_t req_duplex;
+ #define REQ_DUPLEX_PHY0_MASK 0x0000ffff
+ #define REQ_DUPLEX_PHY0_SHIFT 0
+ #define REQ_DUPLEX_PHY1_MASK 0xffff0000
+ #define REQ_DUPLEX_PHY1_SHIFT 16
+ uint32_t req_flow_ctrl;
+ #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff
+ #define REQ_FLOW_CTRL_PHY0_SHIFT 0
+ #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000
+ #define REQ_FLOW_CTRL_PHY1_SHIFT 16
+ uint32_t req_line_speed; /* Also determine AutoNeg */
+ #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff
+ #define REQ_LINE_SPD_PHY0_SHIFT 0
+ #define REQ_LINE_SPD_PHY1_MASK 0xffff0000
+ #define REQ_LINE_SPD_PHY1_SHIFT 16
+ uint32_t speed_cap_mask[SHMEM_LINK_CONFIG_SIZE];
+ uint32_t additional_config;
+ #define REQ_FC_AUTO_ADV_MASK 0x0000ffff
+ #define REQ_FC_AUTO_ADV0_SHIFT 0
+ #define NO_LFA_DUE_TO_DCC_MASK 0x00010000
+ uint32_t lfa_sts;
+ #define LFA_LINK_FLAP_REASON_OFFSET 0
+ #define LFA_LINK_FLAP_REASON_MASK 0x000000ff
+ #define LFA_LINK_DOWN 0x1
+ #define LFA_LOOPBACK_ENABLED 0x2
+ #define LFA_DUPLEX_MISMATCH 0x3
+ #define LFA_MFW_IS_TOO_OLD 0x4
+ #define LFA_LINK_SPEED_MISMATCH 0x5
+ #define LFA_FLOW_CTRL_MISMATCH 0x6
+ #define LFA_SPEED_CAP_MISMATCH 0x7
+ #define LFA_DCC_LFA_DISABLED 0x8
+ #define LFA_EEE_MISMATCH 0x9
+
+ #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
+ #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
+
+ #define LINK_FLAP_COUNT_OFFSET 16
+ #define LINK_FLAP_COUNT_MASK 0x00ff0000
+
+ #define LFA_FLAGS_MASK 0xff000000
+ #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
+
+};
+
+struct shmem2_region {
+
+ uint32_t size; /* 0x0000 */
+
+ uint32_t dcc_support; /* 0x0004 */
+ #define SHMEM_DCC_SUPPORT_NONE 0x00000000
+ #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001
+ #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004
+ #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008
+ #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
+ #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
+
+ uint32_t ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */
+ /*
+ * For backwards compatibility, if the mf_cfg_addr does not exist
+ * (the size filed is smaller than 0xc) the mf_cfg resides at the
+ * end of struct shmem_region
+ */
+ uint32_t mf_cfg_addr; /* 0x0010 */
+ #define SHMEM_MF_CFG_ADDR_NONE 0x00000000
+
+ struct fw_flr_mb flr_mb; /* 0x0014 */
+ uint32_t dcbx_lldp_params_offset; /* 0x0028 */
+ #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000
+ uint32_t dcbx_neg_res_offset; /* 0x002c */
+ #define SHMEM_DCBX_NEG_RES_NONE 0x00000000
+ uint32_t dcbx_remote_mib_offset; /* 0x0030 */
+ #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000
+ /*
+ * The other shmemX_base_addr holds the other path's shmem address
+ * required for example in case of common phy init, or for path1 to know
+ * the address of mcp debug trace which is located in offset from shmem
+ * of path0
+ */
+ uint32_t other_shmem_base_addr; /* 0x0034 */
+ uint32_t other_shmem2_base_addr; /* 0x0038 */
+ /*
+ * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+ * which were disabled/flred
+ */
+ uint32_t mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */
+
+ /*
+ * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+ * VFs
+ */
+ uint32_t drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */
+
+ uint32_t dcbx_lldp_dcbx_stat_offset; /* 0x0064 */
+ #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000
+
+ /*
+ * edebug_driver_if field is used to transfer messages between edebug
+ * app to the driver through shmem2.
+ *
+ * message format:
+ * bits 0-2 - function number / instance of driver to perform request
+ * bits 3-5 - op code / is_ack?
+ * bits 6-63 - data
+ */
+ uint32_t edebug_driver_if[2]; /* 0x0068 */
+ #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1
+ #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2
+ #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3
+
+ uint32_t nvm_retain_bitmap_addr; /* 0x0070 */
+
+ /* afex support of that driver */
+ uint32_t afex_driver_support; /* 0x0074 */
+ #define SHMEM_AFEX_VERSION_MASK 0x100f
+ #define SHMEM_AFEX_SUPPORTED_VERSION_ONE 0x1001
+ #define SHMEM_AFEX_REDUCED_DRV_LOADED 0x8000
+
+ /* driver receives addr in scratchpad to which it should respond */
+ uint32_t afex_scratchpad_addr_to_write[E2_FUNC_MAX];
+
+ /*
+ * generic params from MCP to driver (value depends on the msg sent
+ * to driver
+ */
+ uint32_t afex_param1_to_driver[E2_FUNC_MAX]; /* 0x0088 */
+ uint32_t afex_param2_to_driver[E2_FUNC_MAX]; /* 0x0098 */
+
+ uint32_t swim_base_addr; /* 0x0108 */
+ uint32_t swim_funcs;
+ uint32_t swim_main_cb;
+
+ /*
+ * bitmap notifying which VIF profiles stored in nvram are enabled by
+ * switch
+ */
+ uint32_t afex_profiles_enabled[2];
+
+ /* generic flags controlled by the driver */
+ uint32_t drv_flags;
+ #define DRV_FLAGS_DCB_CONFIGURED 0x0
+ #define DRV_FLAGS_DCB_CONFIGURATION_ABORTED 0x1
+ #define DRV_FLAGS_DCB_MFW_CONFIGURED 0x2
+
+ #define DRV_FLAGS_PORT_MASK ((1 << DRV_FLAGS_DCB_CONFIGURED) | \
+ (1 << DRV_FLAGS_DCB_CONFIGURATION_ABORTED) | \
+ (1 << DRV_FLAGS_DCB_MFW_CONFIGURED))
+ /* Port offset*/
+ #define DRV_FLAGS_P0_OFFSET 0
+ #define DRV_FLAGS_P1_OFFSET 16
+ #define DRV_FLAGS_GET_PORT_OFFSET(_port) ((0 == _port) ? \
+ DRV_FLAGS_P0_OFFSET : \
+ DRV_FLAGS_P1_OFFSET)
+
+ #define DRV_FLAGS_GET_PORT_MASK(_port) (DRV_FLAGS_PORT_MASK << \
+ DRV_FLAGS_GET_PORT_OFFSET(_port))
+
+ #define DRV_FLAGS_FILED_BY_PORT(_field_bit, _port) (1 << ( \
+ (_field_bit) + DRV_FLAGS_GET_PORT_OFFSET(_port)))
+
+ /* pointer to extended dev_info shared data copied from nvm image */
+ uint32_t extended_dev_info_shared_addr;
+ uint32_t ncsi_oem_data_addr;
+
+ uint32_t sensor_data_addr;
+ uint32_t buffer_block_addr;
+ uint32_t sensor_data_req_update_interval;
+ uint32_t temperature_in_half_celsius;
+ uint32_t glob_struct_in_host;
+
+ uint32_t dcbx_neg_res_ext_offset;
+ #define SHMEM_DCBX_NEG_RES_EXT_NONE 0x00000000
+
+ uint32_t drv_capabilities_flag[E2_FUNC_MAX];
+ #define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001
+ #define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002
+ #define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004
+ #define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008
+
+ uint32_t extended_dev_info_shared_cfg_size;
+
+ uint32_t dcbx_en[PORT_MAX];
+
+ /* The offset points to the multi threaded meta structure */
+ uint32_t multi_thread_data_offset;
+
+ /* address of DMAable host address holding values from the drivers */
+ uint32_t drv_info_host_addr_lo;
+ uint32_t drv_info_host_addr_hi;
+
+ /* general values written by the MFW (such as current version) */
+ uint32_t drv_info_control;
+ #define DRV_INFO_CONTROL_VER_MASK 0x000000ff
+ #define DRV_INFO_CONTROL_VER_SHIFT 0
+ #define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
+ #define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
+ uint32_t ibft_host_addr; /* initialized by option ROM */
+
+ struct eee_remote_vals eee_remote_vals[PORT_MAX];
+ uint32_t pf_allocation[E2_FUNC_MAX];
+ #define PF_ALLOACTION_MSIX_VECTORS_MASK 0x000000ff /* real value, as PCI config space can show only maximum of 64 vectors */
+ #define PF_ALLOACTION_MSIX_VECTORS_SHIFT 0
+
+ /* the status of EEE auto-negotiation
+ * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31.
+ * bits 19:16 the supported modes for EEE.
+ * bits 23:20 the speeds advertised for EEE.
+ * bits 27:24 the speeds the Link partner advertised for EEE.
+ * The supported/adv. modes in bits 27:19 originate from the
+ * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+ * bit 28 when 1'b1 EEE was requested.
+ * bit 29 when 1'b1 tx lpi was requested.
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted if
+ * 30:29 are 2'b11.
+ * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
+ * value. When 1'b1 those bits contains a value times 16 microseconds.
+ */
+ uint32_t eee_status[PORT_MAX];
+ #define SHMEM_EEE_TIMER_MASK 0x0000ffff
+ #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000
+ #define SHMEM_EEE_SUPPORTED_SHIFT 16
+ #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000
+ #define SHMEM_EEE_100M_ADV (1<<0)
+ #define SHMEM_EEE_1G_ADV (1U<<1)
+ #define SHMEM_EEE_10G_ADV (1<<2)
+ #define SHMEM_EEE_ADV_STATUS_SHIFT 20
+ #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000
+ #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24
+ #define SHMEM_EEE_REQUESTED_BIT 0x10000000
+ #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000
+ #define SHMEM_EEE_ACTIVE_BIT 0x40000000
+ #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000
+
+ uint32_t sizeof_port_stats;
+
+ /* Link Flap Avoidance */
+ uint32_t lfa_host_addr[PORT_MAX];
+
+ /* External PHY temperature in deg C. */
+ uint32_t extphy_temps_in_celsius;
+ #define EXTPHY1_TEMP_MASK 0x0000ffff
+ #define EXTPHY1_TEMP_SHIFT 0
+
+ uint32_t ocdata_info_addr; /* Offset 0x148 */
+ uint32_t drv_func_info_addr; /* Offset 0x14C */
+ uint32_t drv_func_info_size; /* Offset 0x150 */
+ uint32_t link_attr_sync[PORT_MAX]; /* Offset 0x154 */
+ #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
+};
+
+
+struct emac_stats {
+ uint32_t rx_stat_ifhcinoctets;
+ uint32_t rx_stat_ifhcinbadoctets;
+ uint32_t rx_stat_etherstatsfragments;
+ uint32_t rx_stat_ifhcinucastpkts;
+ uint32_t rx_stat_ifhcinmulticastpkts;
+ uint32_t rx_stat_ifhcinbroadcastpkts;
+ uint32_t rx_stat_dot3statsfcserrors;
+ uint32_t rx_stat_dot3statsalignmenterrors;
+ uint32_t rx_stat_dot3statscarriersenseerrors;
+ uint32_t rx_stat_xonpauseframesreceived;
+ uint32_t rx_stat_xoffpauseframesreceived;
+ uint32_t rx_stat_maccontrolframesreceived;
+ uint32_t rx_stat_xoffstateentered;
+ uint32_t rx_stat_dot3statsframestoolong;
+ uint32_t rx_stat_etherstatsjabbers;
+ uint32_t rx_stat_etherstatsundersizepkts;
+ uint32_t rx_stat_etherstatspkts64octets;
+ uint32_t rx_stat_etherstatspkts65octetsto127octets;
+ uint32_t rx_stat_etherstatspkts128octetsto255octets;
+ uint32_t rx_stat_etherstatspkts256octetsto511octets;
+ uint32_t rx_stat_etherstatspkts512octetsto1023octets;
+ uint32_t rx_stat_etherstatspkts1024octetsto1522octets;
+ uint32_t rx_stat_etherstatspktsover1522octets;
+
+ uint32_t rx_stat_falsecarriererrors;
+
+ uint32_t tx_stat_ifhcoutoctets;
+ uint32_t tx_stat_ifhcoutbadoctets;
+ uint32_t tx_stat_etherstatscollisions;
+ uint32_t tx_stat_outxonsent;
+ uint32_t tx_stat_outxoffsent;
+ uint32_t tx_stat_flowcontroldone;
+ uint32_t tx_stat_dot3statssinglecollisionframes;
+ uint32_t tx_stat_dot3statsmultiplecollisionframes;
+ uint32_t tx_stat_dot3statsdeferredtransmissions;
+ uint32_t tx_stat_dot3statsexcessivecollisions;
+ uint32_t tx_stat_dot3statslatecollisions;
+ uint32_t tx_stat_ifhcoutucastpkts;
+ uint32_t tx_stat_ifhcoutmulticastpkts;
+ uint32_t tx_stat_ifhcoutbroadcastpkts;
+ uint32_t tx_stat_etherstatspkts64octets;
+ uint32_t tx_stat_etherstatspkts65octetsto127octets;
+ uint32_t tx_stat_etherstatspkts128octetsto255octets;
+ uint32_t tx_stat_etherstatspkts256octetsto511octets;
+ uint32_t tx_stat_etherstatspkts512octetsto1023octets;
+ uint32_t tx_stat_etherstatspkts1024octetsto1522octets;
+ uint32_t tx_stat_etherstatspktsover1522octets;
+ uint32_t tx_stat_dot3statsinternalmactransmiterrors;
+};
+
+
+struct bmac1_stats {
+ uint32_t tx_stat_gtpkt_lo;
+ uint32_t tx_stat_gtpkt_hi;
+ uint32_t tx_stat_gtxpf_lo;
+ uint32_t tx_stat_gtxpf_hi;
+ uint32_t tx_stat_gtfcs_lo;
+ uint32_t tx_stat_gtfcs_hi;
+ uint32_t tx_stat_gtmca_lo;
+ uint32_t tx_stat_gtmca_hi;
+ uint32_t tx_stat_gtbca_lo;
+ uint32_t tx_stat_gtbca_hi;
+ uint32_t tx_stat_gtfrg_lo;
+ uint32_t tx_stat_gtfrg_hi;
+ uint32_t tx_stat_gtovr_lo;
+ uint32_t tx_stat_gtovr_hi;
+ uint32_t tx_stat_gt64_lo;
+ uint32_t tx_stat_gt64_hi;
+ uint32_t tx_stat_gt127_lo;
+ uint32_t tx_stat_gt127_hi;
+ uint32_t tx_stat_gt255_lo;
+ uint32_t tx_stat_gt255_hi;
+ uint32_t tx_stat_gt511_lo;
+ uint32_t tx_stat_gt511_hi;
+ uint32_t tx_stat_gt1023_lo;
+ uint32_t tx_stat_gt1023_hi;
+ uint32_t tx_stat_gt1518_lo;
+ uint32_t tx_stat_gt1518_hi;
+ uint32_t tx_stat_gt2047_lo;
+ uint32_t tx_stat_gt2047_hi;
+ uint32_t tx_stat_gt4095_lo;
+ uint32_t tx_stat_gt4095_hi;
+ uint32_t tx_stat_gt9216_lo;
+ uint32_t tx_stat_gt9216_hi;
+ uint32_t tx_stat_gt16383_lo;
+ uint32_t tx_stat_gt16383_hi;
+ uint32_t tx_stat_gtmax_lo;
+ uint32_t tx_stat_gtmax_hi;
+ uint32_t tx_stat_gtufl_lo;
+ uint32_t tx_stat_gtufl_hi;
+ uint32_t tx_stat_gterr_lo;
+ uint32_t tx_stat_gterr_hi;
+ uint32_t tx_stat_gtbyt_lo;
+ uint32_t tx_stat_gtbyt_hi;
+
+ uint32_t rx_stat_gr64_lo;
+ uint32_t rx_stat_gr64_hi;
+ uint32_t rx_stat_gr127_lo;
+ uint32_t rx_stat_gr127_hi;
+ uint32_t rx_stat_gr255_lo;
+ uint32_t rx_stat_gr255_hi;
+ uint32_t rx_stat_gr511_lo;
+ uint32_t rx_stat_gr511_hi;
+ uint32_t rx_stat_gr1023_lo;
+ uint32_t rx_stat_gr1023_hi;
+ uint32_t rx_stat_gr1518_lo;
+ uint32_t rx_stat_gr1518_hi;
+ uint32_t rx_stat_gr2047_lo;
+ uint32_t rx_stat_gr2047_hi;
+ uint32_t rx_stat_gr4095_lo;
+ uint32_t rx_stat_gr4095_hi;
+ uint32_t rx_stat_gr9216_lo;
+ uint32_t rx_stat_gr9216_hi;
+ uint32_t rx_stat_gr16383_lo;
+ uint32_t rx_stat_gr16383_hi;
+ uint32_t rx_stat_grmax_lo;
+ uint32_t rx_stat_grmax_hi;
+ uint32_t rx_stat_grpkt_lo;
+ uint32_t rx_stat_grpkt_hi;
+ uint32_t rx_stat_grfcs_lo;
+ uint32_t rx_stat_grfcs_hi;
+ uint32_t rx_stat_grmca_lo;
+ uint32_t rx_stat_grmca_hi;
+ uint32_t rx_stat_grbca_lo;
+ uint32_t rx_stat_grbca_hi;
+ uint32_t rx_stat_grxcf_lo;
+ uint32_t rx_stat_grxcf_hi;
+ uint32_t rx_stat_grxpf_lo;
+ uint32_t rx_stat_grxpf_hi;
+ uint32_t rx_stat_grxuo_lo;
+ uint32_t rx_stat_grxuo_hi;
+ uint32_t rx_stat_grjbr_lo;
+ uint32_t rx_stat_grjbr_hi;
+ uint32_t rx_stat_grovr_lo;
+ uint32_t rx_stat_grovr_hi;
+ uint32_t rx_stat_grflr_lo;
+ uint32_t rx_stat_grflr_hi;
+ uint32_t rx_stat_grmeg_lo;
+ uint32_t rx_stat_grmeg_hi;
+ uint32_t rx_stat_grmeb_lo;
+ uint32_t rx_stat_grmeb_hi;
+ uint32_t rx_stat_grbyt_lo;
+ uint32_t rx_stat_grbyt_hi;
+ uint32_t rx_stat_grund_lo;
+ uint32_t rx_stat_grund_hi;
+ uint32_t rx_stat_grfrg_lo;
+ uint32_t rx_stat_grfrg_hi;
+ uint32_t rx_stat_grerb_lo;
+ uint32_t rx_stat_grerb_hi;
+ uint32_t rx_stat_grfre_lo;
+ uint32_t rx_stat_grfre_hi;
+ uint32_t rx_stat_gripj_lo;
+ uint32_t rx_stat_gripj_hi;
+};
+
+struct bmac2_stats {
+ uint32_t tx_stat_gtpk_lo; /* gtpok */
+ uint32_t tx_stat_gtpk_hi; /* gtpok */
+ uint32_t tx_stat_gtxpf_lo; /* gtpf */
+ uint32_t tx_stat_gtxpf_hi; /* gtpf */
+ uint32_t tx_stat_gtpp_lo; /* NEW BMAC2 */
+ uint32_t tx_stat_gtpp_hi; /* NEW BMAC2 */
+ uint32_t tx_stat_gtfcs_lo;
+ uint32_t tx_stat_gtfcs_hi;
+ uint32_t tx_stat_gtuca_lo; /* NEW BMAC2 */
+ uint32_t tx_stat_gtuca_hi; /* NEW BMAC2 */
+ uint32_t tx_stat_gtmca_lo;
+ uint32_t tx_stat_gtmca_hi;
+ uint32_t tx_stat_gtbca_lo;
+ uint32_t tx_stat_gtbca_hi;
+ uint32_t tx_stat_gtovr_lo;
+ uint32_t tx_stat_gtovr_hi;
+ uint32_t tx_stat_gtfrg_lo;
+ uint32_t tx_stat_gtfrg_hi;
+ uint32_t tx_stat_gtpkt1_lo; /* gtpkt */
+ uint32_t tx_stat_gtpkt1_hi; /* gtpkt */
+ uint32_t tx_stat_gt64_lo;
+ uint32_t tx_stat_gt64_hi;
+ uint32_t tx_stat_gt127_lo;
+ uint32_t tx_stat_gt127_hi;
+ uint32_t tx_stat_gt255_lo;
+ uint32_t tx_stat_gt255_hi;
+ uint32_t tx_stat_gt511_lo;
+ uint32_t tx_stat_gt511_hi;
+ uint32_t tx_stat_gt1023_lo;
+ uint32_t tx_stat_gt1023_hi;
+ uint32_t tx_stat_gt1518_lo;
+ uint32_t tx_stat_gt1518_hi;
+ uint32_t tx_stat_gt2047_lo;
+ uint32_t tx_stat_gt2047_hi;
+ uint32_t tx_stat_gt4095_lo;
+ uint32_t tx_stat_gt4095_hi;
+ uint32_t tx_stat_gt9216_lo;
+ uint32_t tx_stat_gt9216_hi;
+ uint32_t tx_stat_gt16383_lo;
+ uint32_t tx_stat_gt16383_hi;
+ uint32_t tx_stat_gtmax_lo;
+ uint32_t tx_stat_gtmax_hi;
+ uint32_t tx_stat_gtufl_lo;
+ uint32_t tx_stat_gtufl_hi;
+ uint32_t tx_stat_gterr_lo;
+ uint32_t tx_stat_gterr_hi;
+ uint32_t tx_stat_gtbyt_lo;
+ uint32_t tx_stat_gtbyt_hi;
+
+ uint32_t rx_stat_gr64_lo;
+ uint32_t rx_stat_gr64_hi;
+ uint32_t rx_stat_gr127_lo;
+ uint32_t rx_stat_gr127_hi;
+ uint32_t rx_stat_gr255_lo;
+ uint32_t rx_stat_gr255_hi;
+ uint32_t rx_stat_gr511_lo;
+ uint32_t rx_stat_gr511_hi;
+ uint32_t rx_stat_gr1023_lo;
+ uint32_t rx_stat_gr1023_hi;
+ uint32_t rx_stat_gr1518_lo;
+ uint32_t rx_stat_gr1518_hi;
+ uint32_t rx_stat_gr2047_lo;
+ uint32_t rx_stat_gr2047_hi;
+ uint32_t rx_stat_gr4095_lo;
+ uint32_t rx_stat_gr4095_hi;
+ uint32_t rx_stat_gr9216_lo;
+ uint32_t rx_stat_gr9216_hi;
+ uint32_t rx_stat_gr16383_lo;
+ uint32_t rx_stat_gr16383_hi;
+ uint32_t rx_stat_grmax_lo;
+ uint32_t rx_stat_grmax_hi;
+ uint32_t rx_stat_grpkt_lo;
+ uint32_t rx_stat_grpkt_hi;
+ uint32_t rx_stat_grfcs_lo;
+ uint32_t rx_stat_grfcs_hi;
+ uint32_t rx_stat_gruca_lo;
+ uint32_t rx_stat_gruca_hi;
+ uint32_t rx_stat_grmca_lo;
+ uint32_t rx_stat_grmca_hi;
+ uint32_t rx_stat_grbca_lo;
+ uint32_t rx_stat_grbca_hi;
+ uint32_t rx_stat_grxpf_lo; /* grpf */
+ uint32_t rx_stat_grxpf_hi; /* grpf */
+ uint32_t rx_stat_grpp_lo;
+ uint32_t rx_stat_grpp_hi;
+ uint32_t rx_stat_grxuo_lo; /* gruo */
+ uint32_t rx_stat_grxuo_hi; /* gruo */
+ uint32_t rx_stat_grjbr_lo;
+ uint32_t rx_stat_grjbr_hi;
+ uint32_t rx_stat_grovr_lo;
+ uint32_t rx_stat_grovr_hi;
+ uint32_t rx_stat_grxcf_lo; /* grcf */
+ uint32_t rx_stat_grxcf_hi; /* grcf */
+ uint32_t rx_stat_grflr_lo;
+ uint32_t rx_stat_grflr_hi;
+ uint32_t rx_stat_grpok_lo;
+ uint32_t rx_stat_grpok_hi;
+ uint32_t rx_stat_grmeg_lo;
+ uint32_t rx_stat_grmeg_hi;
+ uint32_t rx_stat_grmeb_lo;
+ uint32_t rx_stat_grmeb_hi;
+ uint32_t rx_stat_grbyt_lo;
+ uint32_t rx_stat_grbyt_hi;
+ uint32_t rx_stat_grund_lo;
+ uint32_t rx_stat_grund_hi;
+ uint32_t rx_stat_grfrg_lo;
+ uint32_t rx_stat_grfrg_hi;
+ uint32_t rx_stat_grerb_lo; /* grerrbyt */
+ uint32_t rx_stat_grerb_hi; /* grerrbyt */
+ uint32_t rx_stat_grfre_lo; /* grfrerr */
+ uint32_t rx_stat_grfre_hi; /* grfrerr */
+ uint32_t rx_stat_gripj_lo;
+ uint32_t rx_stat_gripj_hi;
+};
+
+struct mstat_stats {
+ struct {
+ /* OTE MSTAT on E3 has a bug where this register's contents are
+ * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp
+ */
+ uint32_t tx_gtxpok_lo;
+ uint32_t tx_gtxpok_hi;
+ uint32_t tx_gtxpf_lo;
+ uint32_t tx_gtxpf_hi;
+ uint32_t tx_gtxpp_lo;
+ uint32_t tx_gtxpp_hi;
+ uint32_t tx_gtfcs_lo;
+ uint32_t tx_gtfcs_hi;
+ uint32_t tx_gtuca_lo;
+ uint32_t tx_gtuca_hi;
+ uint32_t tx_gtmca_lo;
+ uint32_t tx_gtmca_hi;
+ uint32_t tx_gtgca_lo;
+ uint32_t tx_gtgca_hi;
+ uint32_t tx_gtpkt_lo;
+ uint32_t tx_gtpkt_hi;
+ uint32_t tx_gt64_lo;
+ uint32_t tx_gt64_hi;
+ uint32_t tx_gt127_lo;
+ uint32_t tx_gt127_hi;
+ uint32_t tx_gt255_lo;
+ uint32_t tx_gt255_hi;
+ uint32_t tx_gt511_lo;
+ uint32_t tx_gt511_hi;
+ uint32_t tx_gt1023_lo;
+ uint32_t tx_gt1023_hi;
+ uint32_t tx_gt1518_lo;
+ uint32_t tx_gt1518_hi;
+ uint32_t tx_gt2047_lo;
+ uint32_t tx_gt2047_hi;
+ uint32_t tx_gt4095_lo;
+ uint32_t tx_gt4095_hi;
+ uint32_t tx_gt9216_lo;
+ uint32_t tx_gt9216_hi;
+ uint32_t tx_gt16383_lo;
+ uint32_t tx_gt16383_hi;
+ uint32_t tx_gtufl_lo;
+ uint32_t tx_gtufl_hi;
+ uint32_t tx_gterr_lo;
+ uint32_t tx_gterr_hi;
+ uint32_t tx_gtbyt_lo;
+ uint32_t tx_gtbyt_hi;
+ uint32_t tx_collisions_lo;
+ uint32_t tx_collisions_hi;
+ uint32_t tx_singlecollision_lo;
+ uint32_t tx_singlecollision_hi;
+ uint32_t tx_multiplecollisions_lo;
+ uint32_t tx_multiplecollisions_hi;
+ uint32_t tx_deferred_lo;
+ uint32_t tx_deferred_hi;
+ uint32_t tx_excessivecollisions_lo;
+ uint32_t tx_excessivecollisions_hi;
+ uint32_t tx_latecollisions_lo;
+ uint32_t tx_latecollisions_hi;
+ } stats_tx;
+
+ struct {
+ uint32_t rx_gr64_lo;
+ uint32_t rx_gr64_hi;
+ uint32_t rx_gr127_lo;
+ uint32_t rx_gr127_hi;
+ uint32_t rx_gr255_lo;
+ uint32_t rx_gr255_hi;
+ uint32_t rx_gr511_lo;
+ uint32_t rx_gr511_hi;
+ uint32_t rx_gr1023_lo;
+ uint32_t rx_gr1023_hi;
+ uint32_t rx_gr1518_lo;
+ uint32_t rx_gr1518_hi;
+ uint32_t rx_gr2047_lo;
+ uint32_t rx_gr2047_hi;
+ uint32_t rx_gr4095_lo;
+ uint32_t rx_gr4095_hi;
+ uint32_t rx_gr9216_lo;
+ uint32_t rx_gr9216_hi;
+ uint32_t rx_gr16383_lo;
+ uint32_t rx_gr16383_hi;
+ uint32_t rx_grpkt_lo;
+ uint32_t rx_grpkt_hi;
+ uint32_t rx_grfcs_lo;
+ uint32_t rx_grfcs_hi;
+ uint32_t rx_gruca_lo;
+ uint32_t rx_gruca_hi;
+ uint32_t rx_grmca_lo;
+ uint32_t rx_grmca_hi;
+ uint32_t rx_grbca_lo;
+ uint32_t rx_grbca_hi;
+ uint32_t rx_grxpf_lo;
+ uint32_t rx_grxpf_hi;
+ uint32_t rx_grxpp_lo;
+ uint32_t rx_grxpp_hi;
+ uint32_t rx_grxuo_lo;
+ uint32_t rx_grxuo_hi;
+ uint32_t rx_grovr_lo;
+ uint32_t rx_grovr_hi;
+ uint32_t rx_grxcf_lo;
+ uint32_t rx_grxcf_hi;
+ uint32_t rx_grflr_lo;
+ uint32_t rx_grflr_hi;
+ uint32_t rx_grpok_lo;
+ uint32_t rx_grpok_hi;
+ uint32_t rx_grbyt_lo;
+ uint32_t rx_grbyt_hi;
+ uint32_t rx_grund_lo;
+ uint32_t rx_grund_hi;
+ uint32_t rx_grfrg_lo;
+ uint32_t rx_grfrg_hi;
+ uint32_t rx_grerb_lo;
+ uint32_t rx_grerb_hi;
+ uint32_t rx_grfre_lo;
+ uint32_t rx_grfre_hi;
+
+ uint32_t rx_alignmenterrors_lo;
+ uint32_t rx_alignmenterrors_hi;
+ uint32_t rx_falsecarrier_lo;
+ uint32_t rx_falsecarrier_hi;
+ uint32_t rx_llfcmsgcnt_lo;
+ uint32_t rx_llfcmsgcnt_hi;
+ } stats_rx;
+};
+
+union mac_stats {
+ struct emac_stats emac_stats;
+ struct bmac1_stats bmac1_stats;
+ struct bmac2_stats bmac2_stats;
+ struct mstat_stats mstat_stats;
+};
+
+
+struct mac_stx {
+ /* in_bad_octets */
+ uint32_t rx_stat_ifhcinbadoctets_hi;
+ uint32_t rx_stat_ifhcinbadoctets_lo;
+
+ /* out_bad_octets */
+ uint32_t tx_stat_ifhcoutbadoctets_hi;
+ uint32_t tx_stat_ifhcoutbadoctets_lo;
+
+ /* crc_receive_errors */
+ uint32_t rx_stat_dot3statsfcserrors_hi;
+ uint32_t rx_stat_dot3statsfcserrors_lo;
+ /* alignment_errors */
+ uint32_t rx_stat_dot3statsalignmenterrors_hi;
+ uint32_t rx_stat_dot3statsalignmenterrors_lo;
+ /* carrier_sense_errors */
+ uint32_t rx_stat_dot3statscarriersenseerrors_hi;
+ uint32_t rx_stat_dot3statscarriersenseerrors_lo;
+ /* false_carrier_detections */
+ uint32_t rx_stat_falsecarriererrors_hi;
+ uint32_t rx_stat_falsecarriererrors_lo;
+
+ /* runt_packets_received */
+ uint32_t rx_stat_etherstatsundersizepkts_hi;
+ uint32_t rx_stat_etherstatsundersizepkts_lo;
+ /* jabber_packets_received */
+ uint32_t rx_stat_dot3statsframestoolong_hi;
+ uint32_t rx_stat_dot3statsframestoolong_lo;
+
+ /* error_runt_packets_received */
+ uint32_t rx_stat_etherstatsfragments_hi;
+ uint32_t rx_stat_etherstatsfragments_lo;
+ /* error_jabber_packets_received */
+ uint32_t rx_stat_etherstatsjabbers_hi;
+ uint32_t rx_stat_etherstatsjabbers_lo;
+
+ /* control_frames_received */
+ uint32_t rx_stat_maccontrolframesreceived_hi;
+ uint32_t rx_stat_maccontrolframesreceived_lo;
+ uint32_t rx_stat_mac_xpf_hi;
+ uint32_t rx_stat_mac_xpf_lo;
+ uint32_t rx_stat_mac_xcf_hi;
+ uint32_t rx_stat_mac_xcf_lo;
+
+ /* xoff_state_entered */
+ uint32_t rx_stat_xoffstateentered_hi;
+ uint32_t rx_stat_xoffstateentered_lo;
+ /* pause_xon_frames_received */
+ uint32_t rx_stat_xonpauseframesreceived_hi;
+ uint32_t rx_stat_xonpauseframesreceived_lo;
+ /* pause_xoff_frames_received */
+ uint32_t rx_stat_xoffpauseframesreceived_hi;
+ uint32_t rx_stat_xoffpauseframesreceived_lo;
+ /* pause_xon_frames_transmitted */
+ uint32_t tx_stat_outxonsent_hi;
+ uint32_t tx_stat_outxonsent_lo;
+ /* pause_xoff_frames_transmitted */
+ uint32_t tx_stat_outxoffsent_hi;
+ uint32_t tx_stat_outxoffsent_lo;
+ /* flow_control_done */
+ uint32_t tx_stat_flowcontroldone_hi;
+ uint32_t tx_stat_flowcontroldone_lo;
+
+ /* ether_stats_collisions */
+ uint32_t tx_stat_etherstatscollisions_hi;
+ uint32_t tx_stat_etherstatscollisions_lo;
+ /* single_collision_transmit_frames */
+ uint32_t tx_stat_dot3statssinglecollisionframes_hi;
+ uint32_t tx_stat_dot3statssinglecollisionframes_lo;
+ /* multiple_collision_transmit_frames */
+ uint32_t tx_stat_dot3statsmultiplecollisionframes_hi;
+ uint32_t tx_stat_dot3statsmultiplecollisionframes_lo;
+ /* deferred_transmissions */
+ uint32_t tx_stat_dot3statsdeferredtransmissions_hi;
+ uint32_t tx_stat_dot3statsdeferredtransmissions_lo;
+ /* excessive_collision_frames */
+ uint32_t tx_stat_dot3statsexcessivecollisions_hi;
+ uint32_t tx_stat_dot3statsexcessivecollisions_lo;
+ /* late_collision_frames */
+ uint32_t tx_stat_dot3statslatecollisions_hi;
+ uint32_t tx_stat_dot3statslatecollisions_lo;
+
+ /* frames_transmitted_64_bytes */
+ uint32_t tx_stat_etherstatspkts64octets_hi;
+ uint32_t tx_stat_etherstatspkts64octets_lo;
+ /* frames_transmitted_65_127_bytes */
+ uint32_t tx_stat_etherstatspkts65octetsto127octets_hi;
+ uint32_t tx_stat_etherstatspkts65octetsto127octets_lo;
+ /* frames_transmitted_128_255_bytes */
+ uint32_t tx_stat_etherstatspkts128octetsto255octets_hi;
+ uint32_t tx_stat_etherstatspkts128octetsto255octets_lo;
+ /* frames_transmitted_256_511_bytes */
+ uint32_t tx_stat_etherstatspkts256octetsto511octets_hi;
+ uint32_t tx_stat_etherstatspkts256octetsto511octets_lo;
+ /* frames_transmitted_512_1023_bytes */
+ uint32_t tx_stat_etherstatspkts512octetsto1023octets_hi;
+ uint32_t tx_stat_etherstatspkts512octetsto1023octets_lo;
+ /* frames_transmitted_1024_1522_bytes */
+ uint32_t tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ uint32_t tx_stat_etherstatspkts1024octetsto1522octets_lo;
+ /* frames_transmitted_1523_9022_bytes */
+ uint32_t tx_stat_etherstatspktsover1522octets_hi;
+ uint32_t tx_stat_etherstatspktsover1522octets_lo;
+ uint32_t tx_stat_mac_2047_hi;
+ uint32_t tx_stat_mac_2047_lo;
+ uint32_t tx_stat_mac_4095_hi;
+ uint32_t tx_stat_mac_4095_lo;
+ uint32_t tx_stat_mac_9216_hi;
+ uint32_t tx_stat_mac_9216_lo;
+ uint32_t tx_stat_mac_16383_hi;
+ uint32_t tx_stat_mac_16383_lo;
+
+ /* internal_mac_transmit_errors */
+ uint32_t tx_stat_dot3statsinternalmactransmiterrors_hi;
+ uint32_t tx_stat_dot3statsinternalmactransmiterrors_lo;
+
+ /* if_out_discards */
+ uint32_t tx_stat_mac_ufl_hi;
+ uint32_t tx_stat_mac_ufl_lo;
+};
+
+
+#define MAC_STX_IDX_MAX 2
+
+struct host_port_stats {
+ uint32_t host_port_stats_counter;
+
+ struct mac_stx mac_stx[MAC_STX_IDX_MAX];
+
+ uint32_t brb_drop_hi;
+ uint32_t brb_drop_lo;
+
+ uint32_t not_used; /* obsolete as of MFW 7.2.1 */
+
+ uint32_t pfc_frames_tx_hi;
+ uint32_t pfc_frames_tx_lo;
+ uint32_t pfc_frames_rx_hi;
+ uint32_t pfc_frames_rx_lo;
+
+ uint32_t eee_lpi_count_hi;
+ uint32_t eee_lpi_count_lo;
+};
+
+
+struct host_func_stats {
+ uint32_t host_func_stats_start;
+
+ uint32_t total_bytes_received_hi;
+ uint32_t total_bytes_received_lo;
+
+ uint32_t total_bytes_transmitted_hi;
+ uint32_t total_bytes_transmitted_lo;
+
+ uint32_t total_unicast_packets_received_hi;
+ uint32_t total_unicast_packets_received_lo;
+
+ uint32_t total_multicast_packets_received_hi;
+ uint32_t total_multicast_packets_received_lo;
+
+ uint32_t total_broadcast_packets_received_hi;
+ uint32_t total_broadcast_packets_received_lo;
+
+ uint32_t total_unicast_packets_transmitted_hi;
+ uint32_t total_unicast_packets_transmitted_lo;
+
+ uint32_t total_multicast_packets_transmitted_hi;
+ uint32_t total_multicast_packets_transmitted_lo;
+
+ uint32_t total_broadcast_packets_transmitted_hi;
+ uint32_t total_broadcast_packets_transmitted_lo;
+
+ uint32_t valid_bytes_received_hi;
+ uint32_t valid_bytes_received_lo;
+
+ uint32_t host_func_stats_end;
+};
+
+/* VIC definitions */
+#define VICSTATST_UIF_INDEX 2
+
+/*
+ * stats collected for afex.
+ * NOTE: structure is exactly as expected to be received by the switch.
+ * order must remain exactly as is unless protocol changes !
+ */
+struct afex_stats {
+ uint32_t tx_unicast_frames_hi;
+ uint32_t tx_unicast_frames_lo;
+ uint32_t tx_unicast_bytes_hi;
+ uint32_t tx_unicast_bytes_lo;
+ uint32_t tx_multicast_frames_hi;
+ uint32_t tx_multicast_frames_lo;
+ uint32_t tx_multicast_bytes_hi;
+ uint32_t tx_multicast_bytes_lo;
+ uint32_t tx_broadcast_frames_hi;
+ uint32_t tx_broadcast_frames_lo;
+ uint32_t tx_broadcast_bytes_hi;
+ uint32_t tx_broadcast_bytes_lo;
+ uint32_t tx_frames_discarded_hi;
+ uint32_t tx_frames_discarded_lo;
+ uint32_t tx_frames_dropped_hi;
+ uint32_t tx_frames_dropped_lo;
+
+ uint32_t rx_unicast_frames_hi;
+ uint32_t rx_unicast_frames_lo;
+ uint32_t rx_unicast_bytes_hi;
+ uint32_t rx_unicast_bytes_lo;
+ uint32_t rx_multicast_frames_hi;
+ uint32_t rx_multicast_frames_lo;
+ uint32_t rx_multicast_bytes_hi;
+ uint32_t rx_multicast_bytes_lo;
+ uint32_t rx_broadcast_frames_hi;
+ uint32_t rx_broadcast_frames_lo;
+ uint32_t rx_broadcast_bytes_hi;
+ uint32_t rx_broadcast_bytes_lo;
+ uint32_t rx_frames_discarded_hi;
+ uint32_t rx_frames_discarded_lo;
+ uint32_t rx_frames_dropped_hi;
+ uint32_t rx_frames_dropped_lo;
+};
+
+/* To maintain backward compatibility between FW and drivers, new elements */
+/* should be added to the end of the structure. */
+
+/* Per Port Statistics */
+struct port_info {
+ uint32_t size; /* size of this structure (i.e. sizeof(port_info)) */
+ uint32_t enabled; /* 0 =Disabled, 1= Enabled */
+ uint32_t link_speed; /* multiplier of 100Mb */
+ uint32_t wol_support; /* WoL Support (i.e. Non-Zero if WOL supported ) */
+ uint32_t flow_control; /* 802.3X Flow Ctrl. 0=off 1=RX 2=TX 3=RX&TX.*/
+ uint32_t flex10; /* Flex10 mode enabled. non zero = yes */
+ uint32_t rx_drops; /* RX Discards. Counters roll over, never reset */
+ uint32_t rx_errors; /* RX Errors. Physical Port Stats L95, All PFs and NC-SI.
+ This is flagged by Consumer as an error. */
+ uint32_t rx_uncast_lo; /* RX Unicast Packets. Free running counters: */
+ uint32_t rx_uncast_hi; /* RX Unicast Packets. Free running counters: */
+ uint32_t rx_mcast_lo; /* RX Multicast Packets */
+ uint32_t rx_mcast_hi; /* RX Multicast Packets */
+ uint32_t rx_bcast_lo; /* RX Broadcast Packets */
+ uint32_t rx_bcast_hi; /* RX Broadcast Packets */
+ uint32_t tx_uncast_lo; /* TX Unicast Packets */
+ uint32_t tx_uncast_hi; /* TX Unicast Packets */
+ uint32_t tx_mcast_lo; /* TX Multicast Packets */
+ uint32_t tx_mcast_hi; /* TX Multicast Packets */
+ uint32_t tx_bcast_lo; /* TX Broadcast Packets */
+ uint32_t tx_bcast_hi; /* TX Broadcast Packets */
+ uint32_t tx_errors; /* TX Errors */
+ uint32_t tx_discards; /* TX Discards */
+ uint32_t rx_frames_lo; /* RX Frames received */
+ uint32_t rx_frames_hi; /* RX Frames received */
+ uint32_t rx_bytes_lo; /* RX Bytes received */
+ uint32_t rx_bytes_hi; /* RX Bytes received */
+ uint32_t tx_frames_lo; /* TX Frames sent */
+ uint32_t tx_frames_hi; /* TX Frames sent */
+ uint32_t tx_bytes_lo; /* TX Bytes sent */
+ uint32_t tx_bytes_hi; /* TX Bytes sent */
+ uint32_t link_status; /* Port P Link Status. 1:0 bit for port enabled.
+ 1:1 bit for link good,
+ 2:1 Set if link changed between last poll. */
+ uint32_t tx_pfc_frames_lo; /* PFC Frames sent. */
+ uint32_t tx_pfc_frames_hi; /* PFC Frames sent. */
+ uint32_t rx_pfc_frames_lo; /* PFC Frames Received. */
+ uint32_t rx_pfc_frames_hi; /* PFC Frames Received. */
+};
+
+
+#define BNX2X_5710_FW_MAJOR_VERSION 7
+#define BNX2X_5710_FW_MINOR_VERSION 2
+#define BNX2X_5710_FW_REVISION_VERSION 51
+#define BNX2X_5710_FW_ENGINEERING_VERSION 0
+#define BNX2X_5710_FW_COMPILE_FLAGS 1
+
+
+/*
+ * attention bits $$KEEP_ENDIANNESS$$
+ */
+struct atten_sp_status_block
+{
+ uint32_t attn_bits /* 16 bit of attention signal lines */;
+ uint32_t attn_bits_ack /* 16 bit of attention signal ack */;
+ uint8_t status_block_id /* status block id */;
+ uint8_t reserved0 /* resreved for padding */;
+ uint16_t attn_bits_index /* attention bits running index */;
+ uint32_t reserved1 /* resreved for padding */;
+};
+
+
+/*
+ * The eth aggregative context of Cstorm
+ */
+struct cstorm_eth_ag_context
+{
+ uint32_t __reserved0[10];
+};
+
+
+/*
+ * dmae command structure
+ */
+struct dmae_command
+{
+ uint32_t opcode;
+#define DMAE_COMMAND_SRC (0x1<<0) /* BitField opcode Whether the source is the PCIe or the GRC. 0- The source is the PCIe 1- The source is the GRC. */
+#define DMAE_COMMAND_SRC_SHIFT 0
+#define DMAE_COMMAND_DST (0x3<<1) /* BitField opcode The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+#define DMAE_COMMAND_DST_SHIFT 1
+#define DMAE_COMMAND_C_DST (0x1<<3) /* BitField opcode The destination of the completion: 0-PCIe 1-GRC */
+#define DMAE_COMMAND_C_DST_SHIFT 3
+#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4) /* BitField opcode Whether to write a completion word to the completion destination: 0-Do not write a completion word 1-Write the completion word */
+#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5) /* BitField opcode Whether to write a CRC word to the completion destination 0-Do not write a CRC word 1-Write a CRC word */
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6) /* BitField opcode The CRC word should be taken from the DMAE GRC space from address 9+X, where X is the value in these bits. */
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
+#define DMAE_COMMAND_ENDIANITY (0x3<<9) /* BitField opcode swapping mode. */
+#define DMAE_COMMAND_ENDIANITY_SHIFT 9
+#define DMAE_COMMAND_PORT (0x1<<11) /* BitField opcode Which network port ID to present to the PCI request interface */
+#define DMAE_COMMAND_PORT_SHIFT 11
+#define DMAE_COMMAND_CRC_RESET (0x1<<12) /* BitField opcode reset crc result */
+#define DMAE_COMMAND_CRC_RESET_SHIFT 12
+#define DMAE_COMMAND_SRC_RESET (0x1<<13) /* BitField opcode reset source address in next go */
+#define DMAE_COMMAND_SRC_RESET_SHIFT 13
+#define DMAE_COMMAND_DST_RESET (0x1<<14) /* BitField opcode reset dest address in next go */
+#define DMAE_COMMAND_DST_RESET_SHIFT 14
+#define DMAE_COMMAND_E1HVN (0x3<<15) /* BitField opcode vnic number E2 and onwards source vnic */
+#define DMAE_COMMAND_E1HVN_SHIFT 15
+#define DMAE_COMMAND_DST_VN (0x3<<17) /* BitField opcode E2 and onwards dest vnic */
+#define DMAE_COMMAND_DST_VN_SHIFT 17
+#define DMAE_COMMAND_C_FUNC (0x1<<19) /* BitField opcode E2 and onwards which function gets the completion src_vn(e1hvn)-0 dst_vn-1 */
+#define DMAE_COMMAND_C_FUNC_SHIFT 19
+#define DMAE_COMMAND_ERR_POLICY (0x3<<20) /* BitField opcode E2 and onwards what to do when theres a completion and a PCI error regular-0 error indication-1 no completion-2 */
+#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
+#define DMAE_COMMAND_RESERVED0 (0x3FF<<22) /* BitField opcode */
+#define DMAE_COMMAND_RESERVED0_SHIFT 22
+ uint32_t src_addr_lo /* source address low/grc address */;
+ uint32_t src_addr_hi /* source address hi */;
+ uint32_t dst_addr_lo /* dest address low/grc address */;
+ uint32_t dst_addr_hi /* dest address hi */;
+#if defined(__BIG_ENDIAN)
+ uint16_t opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility source VF id */
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility selects the source function PF-0, VF-1 */
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility */
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility destination VF id */
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility selects the destination function PF-0, VF-1 */
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility */
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+ uint16_t len /* copy length */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t len /* copy length */;
+ uint16_t opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility source VF id */
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility selects the source function PF-0, VF-1 */
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility */
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility destination VF id */
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility selects the destination function PF-0, VF-1 */
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility */
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+#endif
+ uint32_t comp_addr_lo /* completion address low/grc address */;
+ uint32_t comp_addr_hi /* completion address hi */;
+ uint32_t comp_val /* value to write to completion address */;
+ uint32_t crc32 /* crc32 result */;
+ uint32_t crc32_c /* crc32_c result */;
+#if defined(__BIG_ENDIAN)
+ uint16_t crc16_c /* crc16_c result */;
+ uint16_t crc16 /* crc16 result */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t crc16 /* crc16 result */;
+ uint16_t crc16_c /* crc16_c result */;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t reserved3;
+ uint16_t crc_t10 /* crc_t10 result */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t crc_t10 /* crc_t10 result */;
+ uint16_t reserved3;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t xsum8 /* checksum8 result */;
+ uint16_t xsum16 /* checksum16 result */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t xsum16 /* checksum16 result */;
+ uint16_t xsum8 /* checksum8 result */;
+#endif
+};
+
+
+/*
+ * common data for all protocols
+ */
+struct doorbell_hdr
+{
+ uint8_t header;
+#define DOORBELL_HDR_RX (0x1<<0) /* BitField header 1 for rx doorbell, 0 for tx doorbell */
+#define DOORBELL_HDR_RX_SHIFT 0
+#define DOORBELL_HDR_DB_TYPE (0x1<<1) /* BitField header 0 for normal doorbell, 1 for advertise wnd doorbell */
+#define DOORBELL_HDR_DB_TYPE_SHIFT 1
+#define DOORBELL_HDR_DPM_SIZE (0x3<<2) /* BitField header rdma tx only: DPM transaction size specifier (64/128/256/512 bytes) */
+#define DOORBELL_HDR_DPM_SIZE_SHIFT 2
+#define DOORBELL_HDR_CONN_TYPE (0xF<<4) /* BitField header connection type */
+#define DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+/*
+ * Ethernet doorbell
+ */
+struct eth_tx_doorbell
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t npackets /* number of data bytes that were added in the doorbell */;
+ uint8_t params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) /* BitField params number of buffer descriptors that were added in the doorbell */
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) /* BitField params tx fin command flag */
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7) /* BitField params doorbell queue spare flag */
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+ struct doorbell_hdr hdr;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr hdr;
+ uint8_t params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) /* BitField params number of buffer descriptors that were added in the doorbell */
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) /* BitField params tx fin command flag */
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7) /* BitField params doorbell queue spare flag */
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+ uint16_t npackets /* number of data bytes that were added in the doorbell */;
+#endif
+};
+
+
+/*
+ * 3 lines. status block $$KEEP_ENDIANNESS$$
+ */
+struct hc_status_block_e1x
+{
+ uint16_t index_values[HC_SB_MAX_INDICES_E1X] /* indices reported by cstorm */;
+ uint16_t running_index[HC_SB_MAX_SM] /* Status Block running indices */;
+ uint32_t rsrv[11];
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e1x
+{
+ struct hc_status_block_e1x sb /* fast path indices */;
+};
+
+
+/*
+ * 3 lines. status block $$KEEP_ENDIANNESS$$
+ */
+struct hc_status_block_e2
+{
+ uint16_t index_values[HC_SB_MAX_INDICES_E2] /* indices reported by cstorm */;
+ uint16_t running_index[HC_SB_MAX_SM] /* Status Block running indices */;
+ uint32_t reserved[11];
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e2
+{
+ struct hc_status_block_e2 sb /* fast path indices */;
+};
+
+
+/*
+ * 5 lines. slow-path status block $$KEEP_ENDIANNESS$$
+ */
+struct hc_sp_status_block
+{
+ uint16_t index_values[HC_SP_SB_MAX_INDICES] /* indices reported by cstorm */;
+ uint16_t running_index /* Status Block running index */;
+ uint16_t rsrv;
+ uint32_t rsrv1;
+};
+
+/*
+ * host status block
+ */
+struct host_sp_status_block
+{
+ struct atten_sp_status_block atten_status_block /* attention bits section */;
+ struct hc_sp_status_block sp_sb /* slow path indices */;
+};
+
+
+/*
+ * IGU driver acknowledgment register
+ */
+union igu_ack_register
+{
+ struct {
+#if defined(__BIG_ENDIAN)
+ uint16_t sb_id_and_flags;
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0) /* BitField sb_id_and_flags 0-15: non default status blocks, 16: default status block */
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
+#define IGU_ACK_REGISTER_STORM_ID (0x7<<5) /* BitField sb_id_and_flags 0-3:storm id, 4: attn status block (valid in default sb only) */
+#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
+#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8) /* BitField sb_id_and_flags if set, acknowledges status block index */
+#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
+#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9) /* BitField sb_id_and_flags interrupt enable/disable/nop: use IGU_INT_xxx constants */
+#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
+#define IGU_ACK_REGISTER_RESERVED (0x1F<<11) /* BitField sb_id_and_flags */
+#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
+ uint16_t status_block_index /* status block index acknowledgement */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t status_block_index /* status block index acknowledgement */;
+ uint16_t sb_id_and_flags;
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0) /* BitField sb_id_and_flags 0-15: non default status blocks, 16: default status block */
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
+#define IGU_ACK_REGISTER_STORM_ID (0x7<<5) /* BitField sb_id_and_flags 0-3:storm id, 4: attn status block (valid in default sb only) */
+#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
+#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8) /* BitField sb_id_and_flags if set, acknowledges status block index */
+#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
+#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9) /* BitField sb_id_and_flags interrupt enable/disable/nop: use IGU_INT_xxx constants */
+#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
+#define IGU_ACK_REGISTER_RESERVED (0x1F<<11) /* BitField sb_id_and_flags */
+#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
+#endif
+ } sb;
+ uint32_t raw_data;
+};
+
+
+/*
+ * IGU driver acknowledgement register
+ */
+struct igu_backward_compatible
+{
+ uint32_t sb_id_and_flags;
+#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0) /* BitField sb_id_and_flags */
+#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0
+#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16) /* BitField sb_id_and_flags */
+#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16
+#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21) /* BitField sb_id_and_flags 0-3:storm id, 4: attn status block (valid in default sb only) */
+#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21
+#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24) /* BitField sb_id_and_flags if set, acknowledges status block index */
+#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24
+#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25) /* BitField sb_id_and_flags interrupt enable/disable/nop: use IGU_INT_xxx constants */
+#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25
+#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27) /* BitField sb_id_and_flags */
+#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27
+ uint32_t reserved_2;
+};
+
+
+/*
+ * IGU driver acknowledgement register
+ */
+struct igu_regular
+{
+ uint32_t sb_id_and_flags;
+#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_SB_INDEX_SHIFT 0
+#define IGU_REGULAR_RESERVED0 (0x1<<20) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_RESERVED0_SHIFT 20
+#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21) /* BitField sb_id_and_flags 21-23 (use enum igu_seg_access) */
+#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21
+#define IGU_REGULAR_BUPDATE (0x1<<24) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_BUPDATE_SHIFT 24
+#define IGU_REGULAR_ENABLE_INT (0x3<<25) /* BitField sb_id_and_flags interrupt enable/disable/nop (use enum igu_int_cmd) */
+#define IGU_REGULAR_ENABLE_INT_SHIFT 25
+#define IGU_REGULAR_RESERVED_1 (0x1<<27) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_RESERVED_1_SHIFT 27
+#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28
+#define IGU_REGULAR_CLEANUP_SET (0x1<<30) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_CLEANUP_SET_SHIFT 30
+#define IGU_REGULAR_BCLEANUP (0x1<<31) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_BCLEANUP_SHIFT 31
+ uint32_t reserved_2;
+};
+
+/*
+ * IGU driver acknowledgement register
+ */
+union igu_consprod_reg
+{
+ struct igu_regular regular;
+ struct igu_backward_compatible backward_compatible;
+};
+
+
+/*
+ * Igu control commands
+ */
+enum igu_ctrl_cmd
+{
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD};
+
+
+/*
+ * Control register for the IGU command register
+ */
+struct igu_ctrl_reg
+{
+ uint32_t ctrl_data;
+#define IGU_CTRL_REG_ADDRESS (0xFFF<<0) /* BitField ctrl_data */
+#define IGU_CTRL_REG_ADDRESS_SHIFT 0
+#define IGU_CTRL_REG_FID (0x7F<<12) /* BitField ctrl_data */
+#define IGU_CTRL_REG_FID_SHIFT 12
+#define IGU_CTRL_REG_RESERVED (0x1<<19) /* BitField ctrl_data */
+#define IGU_CTRL_REG_RESERVED_SHIFT 19
+#define IGU_CTRL_REG_TYPE (0x1<<20) /* BitField ctrl_data (use enum igu_ctrl_cmd) */
+#define IGU_CTRL_REG_TYPE_SHIFT 20
+#define IGU_CTRL_REG_UNUSED (0x7FF<<21) /* BitField ctrl_data */
+#define IGU_CTRL_REG_UNUSED_SHIFT 21
+};
+
+
+/*
+ * Igu interrupt command
+ */
+enum igu_int_cmd
+{
+ IGU_INT_ENABLE,
+ IGU_INT_DISABLE,
+ IGU_INT_NOP,
+ IGU_INT_NOP2,
+ MAX_IGU_INT_CMD};
+
+
+/*
+ * Igu segments
+ */
+enum igu_seg_access
+{
+ IGU_SEG_ACCESS_NORM,
+ IGU_SEG_ACCESS_DEF,
+ IGU_SEG_ACCESS_ATTN,
+ MAX_IGU_SEG_ACCESS};
+
+
+/*
+ * Parser parsing flags field
+ */
+struct parsing_flags
+{
+ uint16_t flags;
+#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0) /* BitField flagscontext flags 0=non-unicast, 1=unicast (use enum prs_flags_eth_addr_type) */
+#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0
+#define PARSING_FLAGS_VLAN (0x1<<1) /* BitField flagscontext flags 0 or 1 */
+#define PARSING_FLAGS_VLAN_SHIFT 1
+#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2) /* BitField flagscontext flags 0 or 1 */
+#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2
+#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3) /* BitField flagscontext flags 0=un-known, 1=Ipv4, 2=Ipv6,3=LLC SNAP un-known. LLC SNAP here refers only to LLC/SNAP packets that do not have Ipv4 or Ipv6 above them. Ipv4 and Ipv6 indications are even if they are over LLC/SNAP and not directly over Ethernet (use enum prs_flags_over_eth) */
+#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3
+#define PARSING_FLAGS_IP_OPTIONS (0x1<<5) /* BitField flagscontext flags 0=no IP options / extension headers. 1=IP options / extension header exist */
+#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5
+#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6) /* BitField flagscontext flags 0=non-fragmented, 1=fragmented */
+#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6
+#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7) /* BitField flagscontext flags 0=un-known, 1=TCP, 2=UDP (use enum prs_flags_over_ip) */
+#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7
+#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9) /* BitField flagscontext flags 0=packet with data, 1=pure-ACK (use enum prs_flags_ack_type) */
+#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9
+#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10) /* BitField flagscontext flags 0=no TCP options. 1=TCP options */
+#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10
+#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11) /* BitField flagscontext flags According to the TCP header options parsing */
+#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11
+#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12) /* BitField flagscontext flags connection match in searcher indication */
+#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12
+#define PARSING_FLAGS_LLC_SNAP (0x1<<13) /* BitField flagscontext flags LLC SNAP indication */
+#define PARSING_FLAGS_LLC_SNAP_SHIFT 13
+#define PARSING_FLAGS_RESERVED0 (0x3<<14) /* BitField flagscontext flags */
+#define PARSING_FLAGS_RESERVED0_SHIFT 14
+};
+
+
+/*
+ * Parsing flags for TCP ACK type
+ */
+enum prs_flags_ack_type
+{
+ PRS_FLAG_PUREACK_PIGGY,
+ PRS_FLAG_PUREACK_PURE,
+ MAX_PRS_FLAGS_ACK_TYPE};
+
+
+/*
+ * Parsing flags for Ethernet address type
+ */
+enum prs_flags_eth_addr_type
+{
+ PRS_FLAG_ETHTYPE_NON_UNICAST,
+ PRS_FLAG_ETHTYPE_UNICAST,
+ MAX_PRS_FLAGS_ETH_ADDR_TYPE};
+
+
+/*
+ * Parsing flags for over-ethernet protocol
+ */
+enum prs_flags_over_eth
+{
+ PRS_FLAG_OVERETH_UNKNOWN,
+ PRS_FLAG_OVERETH_IPV4,
+ PRS_FLAG_OVERETH_IPV6,
+ PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN,
+ MAX_PRS_FLAGS_OVER_ETH};
+
+
+/*
+ * Parsing flags for over-IP protocol
+ */
+enum prs_flags_over_ip
+{
+ PRS_FLAG_OVERIP_UNKNOWN,
+ PRS_FLAG_OVERIP_TCP,
+ PRS_FLAG_OVERIP_UDP,
+ MAX_PRS_FLAGS_OVER_IP};
+
+
+/*
+ * SDM operation gen command (generate aggregative interrupt)
+ */
+struct sdm_op_gen
+{
+ uint32_t command;
+#define SDM_OP_GEN_COMP_PARAM (0x1F<<0) /* BitField commandcomp_param and comp_type thread ID/aggr interrupt number/counter depending on the completion type */
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE (0x7<<5) /* BitField commandcomp_param and comp_type Direct messages to CM / PCI switch are not supported in operation_gen completion */
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 5
+#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8) /* BitField commandcomp_param and comp_type bit index in aggregated interrupt vector */
+#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16) /* BitField commandcomp_param and comp_type */
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16
+#define SDM_OP_GEN_RESERVED (0x7FFF<<17) /* BitField commandcomp_param and comp_type */
+#define SDM_OP_GEN_RESERVED_SHIFT 17
+};
+
+
+/*
+ * Timers connection context
+ */
+struct timers_block_context
+{
+ uint32_t __reserved_0 /* data of client 0 of the timers block*/;
+ uint32_t __reserved_1 /* data of client 1 of the timers block*/;
+ uint32_t __reserved_2 /* data of client 2 of the timers block*/;
+ uint32_t flags;
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) /* BitField flagscontext flags number of active timers running */
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) /* BitField flagscontext flags flag: is connection valid (should be set by driver to 1 in toe/iscsi connections) */
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) /* BitField flagscontext flags */
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
+};
+
+
+/*
+ * The eth aggregative context of Tstorm
+ */
+struct tstorm_eth_ag_context
+{
+ uint32_t __reserved0[14];
+};
+
+
+/*
+ * The eth aggregative context of Ustorm
+ */
+struct ustorm_eth_ag_context
+{
+ uint32_t __reserved0;
+#if defined(__BIG_ENDIAN)
+ uint8_t cdu_usage /* Will be used by the CDU for validation of the CID/connection type on doorbells. */;
+ uint8_t __reserved2;
+ uint16_t __reserved1;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t __reserved1;
+ uint8_t __reserved2;
+ uint8_t cdu_usage /* Will be used by the CDU for validation of the CID/connection type on doorbells. */;
+#endif
+ uint32_t __reserved3[6];
+};
+
+
+/*
+ * The eth aggregative context of Xstorm
+ */
+struct xstorm_eth_ag_context
+{
+ uint32_t reserved0;
+#if defined(__BIG_ENDIAN)
+ uint8_t cdu_reserved /* Used by the CDU for validation and debugging */;
+ uint8_t reserved2;
+ uint16_t reserved1;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t reserved1;
+ uint8_t reserved2;
+ uint8_t cdu_reserved /* Used by the CDU for validation and debugging */;
+#endif
+ uint32_t reserved3[30];
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t zero_fill2 /* driver must zero this field! */;
+ uint8_t zero_fill1 /* driver must zero this field! */;
+ struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr header;
+ uint8_t zero_fill1 /* driver must zero this field! */;
+ uint16_t zero_fill2 /* driver must zero this field! */;
+#endif
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell_set_prod
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t prod /* Producer index to be set */;
+ uint8_t zero_fill1 /* driver must zero this field! */;
+ struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr header;
+ uint8_t zero_fill1 /* driver must zero this field! */;
+ uint16_t prod /* Producer index to be set */;
+#endif
+};
+
+
+struct regpair
+{
+ uint32_t lo /* low word for reg-pair */;
+ uint32_t hi /* high word for reg-pair */;
+};
+
+
+struct regpair_native
+{
+ uint32_t lo /* low word for reg-pair */;
+ uint32_t hi /* high word for reg-pair */;
+};
+
+
+/*
+ * Classify rule opcodes in E2/E3
+ */
+enum classify_rule
+{
+ CLASSIFY_RULE_OPCODE_MAC /* Add/remove a MAC address */,
+ CLASSIFY_RULE_OPCODE_VLAN /* Add/remove a VLAN */,
+ CLASSIFY_RULE_OPCODE_PAIR /* Add/remove a MAC-VLAN pair */,
+ MAX_CLASSIFY_RULE};
+
+
+/*
+ * Classify rule types in E2/E3
+ */
+enum classify_rule_action_type
+{
+ CLASSIFY_RULE_REMOVE,
+ CLASSIFY_RULE_ADD,
+ MAX_CLASSIFY_RULE_ACTION_TYPE};
+
+
+/*
+ * client init ramrod data $$KEEP_ENDIANNESS$$
+ */
+struct client_init_general_data
+{
+ uint8_t client_id /* client_id */;
+ uint8_t statistics_counter_id /* statistics counter id */;
+ uint8_t statistics_en_flg /* statistics en flg */;
+ uint8_t is_fcoe_flg /* is this an fcoe connection. (1 bit is used) */;
+ uint8_t activate_flg /* if 0 - the client is deactivate else the client is activate client (1 bit is used) */;
+ uint8_t sp_client_id /* the slow path rings client Id. */;
+ uint16_t mtu /* Host MTU from client config */;
+ uint8_t statistics_zero_flg /* if set FW will reset the statistic counter of this client */;
+ uint8_t func_id /* PCI function ID (0-71) */;
+ uint8_t cos /* The connection cos, if applicable */;
+ uint8_t traffic_type;
+ uint32_t reserved0;
+};
+
+
+/*
+ * client init rx data $$KEEP_ENDIANNESS$$
+ */
+struct client_init_rx_data
+{
+ uint8_t tpa_en;
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0) /* BitField tpa_entpa_enable tpa enable flg ipv4 */
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1) /* BitField tpa_entpa_enable tpa enable flg ipv6 */
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
+#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2) /* BitField tpa_entpa_enable tpa mode (LRO or GRO) (use enum tpa_mode) */
+#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3) /* BitField tpa_entpa_enable */
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
+ uint8_t vmqueue_mode_en_flg /* If set, working in VMQueue mode (always consume one sge) */;
+ uint8_t extra_data_over_sgl_en_flg /* if set, put over sgl data from end of input message */;
+ uint8_t cache_line_alignment_log_size /* The log size of cache line alignment in bytes. Must be a power of 2. */;
+ uint8_t enable_dynamic_hc /* If set, dynamic HC is enabled */;
+ uint8_t max_sges_for_packet /* The maximal number of SGEs that can be used for one packet. depends on MTU and SGE size. must be 0 if SGEs are disabled */;
+ uint8_t client_qzone_id /* used in E2 only, to specify the HW queue zone ID used for this client rx producers */;
+ uint8_t drop_ip_cs_err_flg /* If set, this client drops packets with IP checksum error */;
+ uint8_t drop_tcp_cs_err_flg /* If set, this client drops packets with TCP checksum error */;
+ uint8_t drop_ttl0_flg /* If set, this client drops packets with TTL=0 */;
+ uint8_t drop_udp_cs_err_flg /* If set, this client drops packets with UDP checksum error */;
+ uint8_t inner_vlan_removal_enable_flg /* If set, inner VLAN removal is enabled for this client */;
+ uint8_t outer_vlan_removal_enable_flg /* If set, outer VLAN removal is enabled for this client */;
+ uint8_t status_block_id /* rx status block id */;
+ uint8_t rx_sb_index_number /* status block indices */;
+ uint8_t dont_verify_rings_pause_thr_flg /* If set, the rings pause thresholds will not be verified by firmware. */;
+ uint8_t max_tpa_queues /* maximal TPA queues allowed for this client */;
+ uint8_t silent_vlan_removal_flg /* if set, and the vlan is equal to requested vlan according to mask, the vlan will be remove without notifying the driver */;
+ uint16_t max_bytes_on_bd /* Maximum bytes that can be placed on a BD. The BD allocated size should include 2 more bytes (ip alignment) and alignment size (in case the address is not aligned) */;
+ uint16_t sge_buff_size /* Size of the buffers pointed by SGEs */;
+ uint8_t approx_mcast_engine_id /* In Everest2, if is_approx_mcast is set, this field specified which approximate multicast engine is associate with this client */;
+ uint8_t rss_engine_id /* In Everest2, if rss_mode is set, this field specified which RSS engine is associate with this client */;
+ struct regpair bd_page_base /* BD page base address at the host */;
+ struct regpair sge_page_base /* SGE page base address at the host */;
+ struct regpair cqe_page_base /* Completion queue base address */;
+ uint8_t is_leading_rss;
+ uint8_t is_approx_mcast;
+ uint16_t max_agg_size /* maximal size for the aggregated TPA packets, reprted by the host */;
+ uint16_t state;
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0) /* BitField staterx filters state drop all unicast packets */
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1) /* BitField staterx filters state accept all unicast packets (subject to vlan) */
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2) /* BitField staterx filters state accept all unmatched unicast packets (subject to vlan) */
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3) /* BitField staterx filters state drop all multicast packets */
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4) /* BitField staterx filters state accept all multicast packets (subject to vlan) */
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5) /* BitField staterx filters state accept all broadcast packets (subject to vlan) */
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6) /* BitField staterx filters state accept packets matched only by MAC (without checking vlan) */
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6
+#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7) /* BitField staterx filters state */
+#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7
+ uint16_t cqe_pause_thr_low /* number of remaining cqes under which, we send pause message */;
+ uint16_t cqe_pause_thr_high /* number of remaining cqes above which, we send un-pause message */;
+ uint16_t bd_pause_thr_low /* number of remaining bds under which, we send pause message */;
+ uint16_t bd_pause_thr_high /* number of remaining bds above which, we send un-pause message */;
+ uint16_t sge_pause_thr_low /* number of remaining sges under which, we send pause message */;
+ uint16_t sge_pause_thr_high /* number of remaining sges above which, we send un-pause message */;
+ uint16_t rx_cos_mask /* the bits that will be set on pfc/ safc paket with will be genratet when this ring is full. for regular flow control set this to 1 */;
+ uint16_t silent_vlan_value /* The vlan to compare, in case, silent vlan is set */;
+ uint16_t silent_vlan_mask /* The vlan mask, in case, silent vlan is set */;
+ uint32_t reserved6[2];
+};
+
+/*
+ * client init tx data $$KEEP_ENDIANNESS$$
+ */
+struct client_init_tx_data
+{
+ uint8_t enforce_security_flg /* if set, security checks will be made for this connection */;
+ uint8_t tx_status_block_id /* the number of status block to update */;
+ uint8_t tx_sb_index_number /* the index to use inside the status block */;
+ uint8_t tss_leading_client_id /* client ID of the leading TSS client, for TX classification source knock out */;
+ uint8_t tx_switching_flg /* if set, tx switching will be done to packets on this connection */;
+ uint8_t anti_spoofing_flg /* if set, anti spoofing check will be done to packets on this connection */;
+ uint16_t default_vlan /* default vlan tag (id+pri). (valid if default_vlan_flg is set) */;
+ struct regpair tx_bd_page_base /* BD page base address at the host for TxBdCons */;
+ uint16_t state;
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0) /* BitField statetx filters state accept all unicast packets (subject to vlan) */
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1) /* BitField statetx filters state accept all multicast packets (subject to vlan) */
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2) /* BitField statetx filters state accept all broadcast packets (subject to vlan) */
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) /* BitField statetx filters state accept packets matched only by MAC (without checking vlan) */
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
+#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4) /* BitField statetx filters state */
+#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4
+ uint8_t default_vlan_flg /* is default vlan valid for this client. */;
+ uint8_t force_default_pri_flg /* if set, force default priority */;
+ uint8_t tunnel_lso_inc_ip_id /* In case of LSO over IPv4 tunnel, whether to increment IP ID on external IP header or internal IP header */;
+ uint8_t refuse_outband_vlan_flg /* if set, the FW will not add outband vlan on packet (even if will exist on BD). */;
+ uint8_t tunnel_non_lso_pcsum_location /* In case of non-Lso encapsulated packets with L4 checksum offload, the pseudo checksum location - on packet or on BD. */;
+ uint8_t tunnel_non_lso_outer_ip_csum_location /* In case of non-Lso encapsulated packets with outer L3 ip checksum offload, the pseudo checksum location - on packet or on BD. */;
+};
+
+/*
+ * client init ramrod data $$KEEP_ENDIANNESS$$
+ */
+struct client_init_ramrod_data
+{
+ struct client_init_general_data general /* client init general data */;
+ struct client_init_rx_data rx /* client init rx data */;
+ struct client_init_tx_data tx /* client init tx data */;
+};
+
+
+/*
+ * client update ramrod data $$KEEP_ENDIANNESS$$
+ */
+struct client_update_ramrod_data
+{
+ uint8_t client_id /* the client to update */;
+ uint8_t func_id /* PCI function ID this client belongs to (0-71) */;
+ uint8_t inner_vlan_removal_enable_flg /* If set, inner VLAN removal is enabled for this client, will be change according to change flag */;
+ uint8_t inner_vlan_removal_change_flg /* If set, inner VLAN removal flag will be set according to the enable flag */;
+ uint8_t outer_vlan_removal_enable_flg /* If set, outer VLAN removal is enabled for this client, will be change according to change flag */;
+ uint8_t outer_vlan_removal_change_flg /* If set, outer VLAN removal flag will be set according to the enable flag */;
+ uint8_t anti_spoofing_enable_flg /* If set, anti spoofing is enabled for this client, will be change according to change flag */;
+ uint8_t anti_spoofing_change_flg /* If set, anti spoofing flag will be set according to anti spoofing flag */;
+ uint8_t activate_flg /* if 0 - the client is deactivate else the client is activate client (1 bit is used) */;
+ uint8_t activate_change_flg /* If set, activate_flg will be checked */;
+ uint16_t default_vlan /* default vlan tag (id+pri). (valid if default_vlan_flg is set) */;
+ uint8_t default_vlan_enable_flg;
+ uint8_t default_vlan_change_flg;
+ uint16_t silent_vlan_value /* The vlan to compare, in case, silent vlan is set */;
+ uint16_t silent_vlan_mask /* The vlan mask, in case, silent vlan is set */;
+ uint8_t silent_vlan_removal_flg /* if set, and the vlan is equal to requested vlan according to mask, the vlan will be remove without notifying the driver */;
+ uint8_t silent_vlan_change_flg;
+ uint8_t refuse_outband_vlan_flg /* If set, the FW will not add outband vlan on packet (even if will exist on BD). */;
+ uint8_t refuse_outband_vlan_change_flg /* If set, refuse_outband_vlan_flg will be updated. */;
+ uint8_t tx_switching_flg /* If set, tx switching will be done to packets on this connection. */;
+ uint8_t tx_switching_change_flg /* If set, tx_switching_flg will be updated. */;
+ uint32_t reserved1;
+ uint32_t echo /* echo value to be sent to driver on event ring */;
+};
+
+
+/*
+ * The eth storm context of Cstorm
+ */
+struct cstorm_eth_st_context
+{
+ uint32_t __reserved0[4];
+};
+
+
+struct double_regpair
+{
+ uint32_t regpair0_lo /* low word for reg-pair0 */;
+ uint32_t regpair0_hi /* high word for reg-pair0 */;
+ uint32_t regpair1_lo /* low word for reg-pair1 */;
+ uint32_t regpair1_hi /* high word for reg-pair1 */;
+};
+
+
+/*
+ * Ethernet address types used in ethernet tx BDs
+ */
+enum eth_addr_type
+{
+ UNKNOWN_ADDRESS,
+ UNICAST_ADDRESS,
+ MULTICAST_ADDRESS,
+ BROADCAST_ADDRESS,
+ MAX_ETH_ADDR_TYPE
+};
+
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct eth_classify_cmd_header
+{
+ uint8_t cmd_general_data;
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0) /* BitField cmd_general_data should this cmd be applied for Rx */
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1) /* BitField cmd_general_data should this cmd be applied for Tx */
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2) /* BitField cmd_general_data command opcode for MAC/VLAN/PAIR (use enum classify_rule) */
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4) /* BitField cmd_general_data (use enum classify_rule_action_type) */
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5) /* BitField cmd_general_data */
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5
+ uint8_t func_id /* the function id */;
+ uint8_t client_id;
+ uint8_t reserved1;
+};
+
+
+/*
+ * header for eth classification config ramrod $$KEEP_ENDIANNESS$$
+ */
+struct eth_classify_header
+{
+ uint8_t rule_cnt /* number of rules in classification config ramrod */;
+ uint8_t reserved0;
+ uint16_t reserved1;
+ uint32_t echo /* echo value to be sent to driver on event ring */;
+};
+
+
+/*
+ * Command for adding/removing a MAC classification rule $$KEEP_ENDIANNESS$$
+ */
+struct eth_classify_mac_cmd
+{
+ struct eth_classify_cmd_header header;
+ uint16_t reserved0;
+ uint16_t inner_mac;
+ uint16_t mac_lsb;
+ uint16_t mac_mid;
+ uint16_t mac_msb;
+ uint16_t reserved1;
+};
+
+
+/*
+ * Command for adding/removing a MAC-VLAN pair classification rule $$KEEP_ENDIANNESS$$
+ */
+struct eth_classify_pair_cmd
+{
+ struct eth_classify_cmd_header header;
+ uint16_t reserved0;
+ uint16_t inner_mac;
+ uint16_t mac_lsb;
+ uint16_t mac_mid;
+ uint16_t mac_msb;
+ uint16_t vlan;
+};
+
+
+/*
+ * Command for adding/removing a VLAN classification rule $$KEEP_ENDIANNESS$$
+ */
+struct eth_classify_vlan_cmd
+{
+ struct eth_classify_cmd_header header;
+ uint32_t reserved0;
+ uint32_t reserved1;
+ uint16_t reserved2;
+ uint16_t vlan;
+};
+
+/*
+ * union for eth classification rule $$KEEP_ENDIANNESS$$
+ */
+union eth_classify_rule_cmd
+{
+ struct eth_classify_mac_cmd mac;
+ struct eth_classify_vlan_cmd vlan;
+ struct eth_classify_pair_cmd pair;
+};
+
+/*
+ * parameters for eth classification configuration ramrod $$KEEP_ENDIANNESS$$
+ */
+struct eth_classify_rules_ramrod_data
+{
+ struct eth_classify_header header;
+ union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data contain client ID need to the ramrod $$KEEP_ENDIANNESS$$
+ */
+struct eth_common_ramrod_data
+{
+ uint32_t client_id /* id of this client. (5 bits are used) */;
+ uint32_t reserved1;
+};
+
+
+/*
+ * The eth storm context of Ustorm
+ */
+struct ustorm_eth_st_context
+{
+ uint32_t reserved0[52];
+};
+
+/*
+ * The eth storm context of Tstorm
+ */
+struct tstorm_eth_st_context
+{
+ uint32_t __reserved0[28];
+};
+
+/*
+ * The eth storm context of Xstorm
+ */
+struct xstorm_eth_st_context
+{
+ uint32_t reserved0[60];
+};
+
+/*
+ * Ethernet connection context
+ */
+struct eth_context
+{
+ struct ustorm_eth_st_context ustorm_st_context /* Ustorm storm context */;
+ struct tstorm_eth_st_context tstorm_st_context /* Tstorm storm context */;
+ struct xstorm_eth_ag_context xstorm_ag_context /* Xstorm aggregative context */;
+ struct tstorm_eth_ag_context tstorm_ag_context /* Tstorm aggregative context */;
+ struct cstorm_eth_ag_context cstorm_ag_context /* Cstorm aggregative context */;
+ struct ustorm_eth_ag_context ustorm_ag_context /* Ustorm aggregative context */;
+ struct timers_block_context timers_context /* Timers block context */;
+ struct xstorm_eth_st_context xstorm_st_context /* Xstorm storm context */;
+ struct cstorm_eth_st_context cstorm_st_context /* Cstorm storm context */;
+};
+
+
+/*
+ * union for sgl and raw data.
+ */
+union eth_sgl_or_raw_data
+{
+ uint16_t sgl[8] /* Scatter-gather list of SGEs used by this packet. This list includes the indices of the SGEs. */;
+ uint32_t raw_data[4] /* raw data from Tstorm to the driver. */;
+};
+
+/*
+ * eth FP end aggregation CQE parameters struct $$KEEP_ENDIANNESS$$
+ */
+struct eth_end_agg_rx_cqe
+{
+ uint8_t type_error_flags;
+#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0) /* BitField type_error_flags (use enum eth_rx_cqe_type) */
+#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2) /* BitField type_error_flags (use enum eth_rx_fp_sel) */
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3) /* BitField type_error_flags */
+#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3
+ uint8_t reserved1;
+ uint8_t queue_index /* The aggregation queue index of this packet */;
+ uint8_t reserved2;
+ uint32_t timestamp_delta /* timestamp delta between first packet to last packet in aggregation */;
+ uint16_t num_of_coalesced_segs /* Num of coalesced segments. */;
+ uint16_t pkt_len /* Packet length */;
+ uint8_t pure_ack_count /* Number of pure acks coalesced. */;
+ uint8_t reserved3;
+ uint16_t reserved4;
+ union eth_sgl_or_raw_data sgl_or_raw_data /* union for sgl and raw data. */;
+ uint32_t reserved5[8];
+};
+
+
+/*
+ * regular eth FP CQE parameters struct $$KEEP_ENDIANNESS$$
+ */
+struct eth_fast_path_rx_cqe
+{
+ uint8_t type_error_flags;
+#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0) /* BitField type_error_flags (use enum eth_rx_cqe_type) */
+#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2) /* BitField type_error_flags (use enum eth_rx_fp_sel) */
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3) /* BitField type_error_flags Physical layer errors */
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4) /* BitField type_error_flags IP checksum error */
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) /* BitField type_error_flags TCP/UDP checksum error */
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) /* BitField type_error_flags */
+#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
+ uint8_t status_flags;
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) /* BitField status_flags (use enum eth_rss_hash_type) */
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3) /* BitField status_flags RSS hashing on/off */
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4) /* BitField status_flags if set to 1, this is a broadcast packet */
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5) /* BitField status_flags if set to 1, the MAC address was matched in the tstorm CAM search */
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6) /* BitField status_flags IP checksum validation was not performed (if packet is not IPv4) */
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) /* BitField status_flags TCP/UDP checksum validation was not performed (if packet is not TCP/UDP or IPv6 extheaders exist) */
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
+ uint8_t queue_index /* The aggregation queue index of this packet */;
+ uint8_t placement_offset /* Placement offset from the start of the BD, in bytes */;
+ uint32_t rss_hash_result /* RSS toeplitz hash result */;
+ uint16_t vlan_tag /* Ethernet VLAN tag field */;
+ uint16_t pkt_len_or_gro_seg_len /* Packet length (for non-TPA CQE) or GRO Segment Length (for TPA in GRO Mode) otherwise 0 */;
+ uint16_t len_on_bd /* Number of bytes placed on the BD */;
+ struct parsing_flags pars_flags;
+ union eth_sgl_or_raw_data sgl_or_raw_data /* union for sgl and raw data. */;
+ uint32_t reserved1[8];
+};
+
+
+/*
+ * Command for setting classification flags for a client $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_rules_cmd
+{
+ uint8_t cmd_general_data;
+#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0) /* BitField cmd_general_data should this cmd be applied for Rx */
+#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1) /* BitField cmd_general_data should this cmd be applied for Tx */
+#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2) /* BitField cmd_general_data */
+#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2
+ uint8_t func_id /* the function id */;
+ uint8_t client_id /* the client id */;
+ uint8_t reserved1;
+ uint16_t state;
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0) /* BitField state drop all unicast packets */
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1) /* BitField state accept all unicast packets (subject to vlan) */
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2) /* BitField state accept all unmatched unicast packets */
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3) /* BitField state drop all multicast packets */
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4) /* BitField state accept all multicast packets (subject to vlan) */
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5) /* BitField state accept all broadcast packets (subject to vlan) */
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6) /* BitField state accept packets matched only by MAC (without checking vlan) */
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6
+#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7) /* BitField state */
+#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7
+ uint16_t reserved3;
+ struct regpair reserved4;
+};
+
+
+/*
+ * parameters for eth classification filters ramrod $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_rules_ramrod_data
+{
+ struct eth_classify_header header;
+ struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
+};
+
+
+/*
+ * parameters for eth classification configuration ramrod $$KEEP_ENDIANNESS$$
+ */
+struct eth_general_rules_ramrod_data
+{
+ struct eth_classify_header header;
+ union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data for Halt ramrod
+ */
+struct eth_halt_ramrod_data
+{
+ uint32_t client_id /* id of this client. (5 bits are used) */;
+ uint32_t reserved0;
+};
+
+
+/*
+ * destination and source mac address.
+ */
+struct eth_mac_addresses
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t dst_mid /* destination mac address 16 middle bits */;
+ uint16_t dst_lo /* destination mac address 16 low bits */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t dst_lo /* destination mac address 16 low bits */;
+ uint16_t dst_mid /* destination mac address 16 middle bits */;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t src_lo /* source mac address 16 low bits */;
+ uint16_t dst_hi /* destination mac address 16 high bits */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t dst_hi /* destination mac address 16 high bits */;
+ uint16_t src_lo /* source mac address 16 low bits */;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t src_hi /* source mac address 16 high bits */;
+ uint16_t src_mid /* source mac address 16 middle bits */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t src_mid /* source mac address 16 middle bits */;
+ uint16_t src_hi /* source mac address 16 high bits */;
+#endif
+};
+
+
+/*
+ * tunneling related data.
+ */
+struct eth_tunnel_data
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t dst_mid /* destination mac address 16 middle bits */;
+ uint16_t dst_lo /* destination mac address 16 low bits */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t dst_lo /* destination mac address 16 low bits */;
+ uint16_t dst_mid /* destination mac address 16 middle bits */;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t fw_ip_hdr_csum /* Fw Ip header checksum (with ALL ip header fields) for the outer IP header */;
+ uint16_t dst_hi /* destination mac address 16 high bits */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t dst_hi /* destination mac address 16 high bits */;
+ uint16_t fw_ip_hdr_csum /* Fw Ip header checksum (with ALL ip header fields) for the outer IP header */;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint8_t flags;
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) /* BitField flags Set in case outer IP header is ipV6 */
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) /* BitField flags Should be set with 0 */
+#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
+ uint8_t ip_hdr_start_inner_w /* Inner IP header offset in WORDs (16-bit) from start of packet */;
+ uint16_t pseudo_csum /* Pseudo checksum with length field=0 */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t pseudo_csum /* Pseudo checksum with length field=0 */;
+ uint8_t ip_hdr_start_inner_w /* Inner IP header offset in WORDs (16-bit) from start of packet */;
+ uint8_t flags;
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) /* BitField flags Set in case outer IP header is ipV6 */
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) /* BitField flags Should be set with 0 */
+#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
+#endif
+};
+
+/*
+ * union for mac addresses and for tunneling data. considered as tunneling data only if (tunnel_exist == 1).
+ */
+union eth_mac_addr_or_tunnel_data
+{
+ struct eth_mac_addresses mac_addr /* destination and source mac addresses. */;
+ struct eth_tunnel_data tunnel_data /* tunneling related data. */;
+};
+
+
+/*
+ * Command for setting multicast classification for a client $$KEEP_ENDIANNESS$$
+ */
+struct eth_multicast_rules_cmd
+{
+ uint8_t cmd_general_data;
+#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) /* BitField cmd_general_data should this cmd be applied for Rx */
+#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1) /* BitField cmd_general_data should this cmd be applied for Tx */
+#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2) /* BitField cmd_general_data 1 for add rule, 0 for remove rule */
+#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2
+#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3) /* BitField cmd_general_data */
+#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3
+ uint8_t func_id /* the function id */;
+ uint8_t bin_id /* the bin to add this function to (0-255) */;
+ uint8_t engine_id /* the approximate multicast engine id */;
+ uint32_t reserved2;
+ struct regpair reserved3;
+};
+
+
+/*
+ * parameters for multicast classification ramrod $$KEEP_ENDIANNESS$$
+ */
+struct eth_multicast_rules_ramrod_data
+{
+ struct eth_classify_header header;
+ struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
+};
+
+
+/*
+ * Place holder for ramrods protocol specific data
+ */
+struct ramrod_data
+{
+ uint32_t data_lo;
+ uint32_t data_hi;
+};
+
+/*
+ * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
+ */
+union eth_ramrod_data
+{
+ struct ramrod_data general;
+};
+
+
+/*
+ * RSS toeplitz hash type, as reported in CQE
+ */
+enum eth_rss_hash_type
+{
+ DEFAULT_HASH_TYPE,
+ IPV4_HASH_TYPE,
+ TCP_IPV4_HASH_TYPE,
+ IPV6_HASH_TYPE,
+ TCP_IPV6_HASH_TYPE,
+ VLAN_PRI_HASH_TYPE,
+ E1HOV_PRI_HASH_TYPE,
+ DSCP_HASH_TYPE,
+ MAX_ETH_RSS_HASH_TYPE};
+
+
+/*
+ * Ethernet RSS mode
+ */
+enum eth_rss_mode
+{
+ ETH_RSS_MODE_DISABLED,
+ ETH_RSS_MODE_ESX51 /* RSS mode for Vmware ESX 5.1 (Only do RSS if packet is UDP with dst port that matches the UDP 4-tuble Destination Port mask and value) */,
+ ETH_RSS_MODE_REGULAR /* Regular (ndis-like) RSS */,
+ ETH_RSS_MODE_VLAN_PRI /* RSS based on inner-vlan priority field */,
+ ETH_RSS_MODE_E1HOV_PRI /* RSS based on outer-vlan priority field */,
+ ETH_RSS_MODE_IP_DSCP /* RSS based on IPv4 DSCP field */,
+ MAX_ETH_RSS_MODE};
+
+
+/*
+ * parameters for RSS update ramrod (E2) $$KEEP_ENDIANNESS$$
+ */
+struct eth_rss_update_ramrod_data
+{
+ uint8_t rss_engine_id;
+ uint8_t capabilities;
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV4 2-tupple capability */
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV4 4-tupple capability for TCP */
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV4 4-tupple capability for UDP */
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV6 2-tupple capability */
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV6 4-tupple capability for TCP */
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV6 4-tupple capability for UDP */
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6) /* BitField capabilitiesFunction RSS capabilities configuration of the 5-tupple capability */
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7) /* BitField capabilitiesFunction RSS capabilities if set update the rss keys */
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
+ uint8_t rss_result_mask /* The mask for the lower byte of RSS result - defines which section of the indirection table will be used. To enable all table put here 0x7F */;
+ uint8_t rss_mode /* The RSS mode for this function */;
+ uint16_t udp_4tuple_dst_port_mask /* If UDP 4-tuple enabled, packets that match the mask and value are 4-tupled, the rest are 2-tupled. (Set to 0 to match all) */;
+ uint16_t udp_4tuple_dst_port_value /* If UDP 4-tuple enabled, packets that match the mask and value are 4-tupled, the rest are 2-tupled. (Set to 0 to match all) */;
+ uint8_t indirection_table[T_ETH_INDIRECTION_TABLE_SIZE] /* RSS indirection table */;
+ uint32_t rss_key[T_ETH_RSS_KEY] /* RSS key supplied as by OS */;
+ uint32_t echo;
+ uint32_t reserved3;
+};
+
+
+/*
+ * The eth Rx Buffer Descriptor
+ */
+struct eth_rx_bd
+{
+ uint32_t addr_lo /* Single continuous buffer low pointer */;
+ uint32_t addr_hi /* Single continuous buffer high pointer */;
+};
+
+
+/*
+ * Eth Rx Cqe structure- general structure for ramrods $$KEEP_ENDIANNESS$$
+ */
+struct common_ramrod_eth_rx_cqe
+{
+ uint8_t ramrod_type;
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0) /* BitField ramrod_type (use enum eth_rx_cqe_type) */
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2) /* BitField ramrod_type */
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3) /* BitField ramrod_type */
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3
+ uint8_t conn_type /* only 3 bits are used */;
+ uint16_t reserved1 /* protocol specific data */;
+ uint32_t conn_and_cmd_data;
+#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0) /* BitField conn_and_cmd_data */
+#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) /* BitField conn_and_cmd_data command id of the ramrod- use RamrodCommandIdEnum */
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
+ struct ramrod_data protocol_data /* protocol specific data */;
+ uint32_t echo;
+ uint32_t reserved2[11];
+};
+
+/*
+ * Rx Last CQE in page (in ETH)
+ */
+struct eth_rx_cqe_next_page
+{
+ uint32_t addr_lo /* Next page low pointer */;
+ uint32_t addr_hi /* Next page high pointer */;
+ uint32_t reserved[14];
+};
+
+/*
+ * union for all eth rx cqe types (fix their sizes)
+ */
+union eth_rx_cqe
+{
+ struct eth_fast_path_rx_cqe fast_path_cqe;
+ struct common_ramrod_eth_rx_cqe ramrod_cqe;
+ struct eth_rx_cqe_next_page next_page_cqe;
+ struct eth_end_agg_rx_cqe end_agg_cqe;
+};
+
+
+/*
+ * Values for RX ETH CQE type field
+ */
+enum eth_rx_cqe_type
+{
+ RX_ETH_CQE_TYPE_ETH_FASTPATH /* Fast path CQE */,
+ RX_ETH_CQE_TYPE_ETH_RAMROD /* Slow path CQE */,
+ RX_ETH_CQE_TYPE_ETH_START_AGG /* Fast path CQE */,
+ RX_ETH_CQE_TYPE_ETH_STOP_AGG /* Slow path CQE */,
+ MAX_ETH_RX_CQE_TYPE};
+
+
+/*
+ * Type of SGL/Raw field in ETH RX fast path CQE
+ */
+enum eth_rx_fp_sel
+{
+ ETH_FP_CQE_REGULAR /* Regular CQE- no extra data */,
+ ETH_FP_CQE_RAW /* Extra data is raw data- iscsi OOO */,
+ MAX_ETH_RX_FP_SEL};
+
+
+/*
+ * The eth Rx SGE Descriptor
+ */
+struct eth_rx_sge
+{
+ uint32_t addr_lo /* Single continuous buffer low pointer */;
+ uint32_t addr_hi /* Single continuous buffer high pointer */;
+};
+
+
+/*
+ * common data for all protocols $$KEEP_ENDIANNESS$$
+ */
+struct spe_hdr
+{
+ uint32_t conn_and_cmd_data;
+#define SPE_HDR_CID (0xFFFFFF<<0) /* BitField conn_and_cmd_data */
+#define SPE_HDR_CID_SHIFT 0
+#define SPE_HDR_CMD_ID (0xFF<<24) /* BitField conn_and_cmd_data command id of the ramrod- use enum common_spqe_cmd_id/eth_spqe_cmd_id/toe_spqe_cmd_id */
+#define SPE_HDR_CMD_ID_SHIFT 24
+ uint16_t type;
+#define SPE_HDR_CONN_TYPE (0xFF<<0) /* BitField type connection type. (3 bits are used) (use enum connection_type) */
+#define SPE_HDR_CONN_TYPE_SHIFT 0
+#define SPE_HDR_FUNCTION_ID (0xFF<<8) /* BitField type */
+#define SPE_HDR_FUNCTION_ID_SHIFT 8
+ uint16_t reserved1;
+};
+
+/*
+ * specific data for ethernet slow path element
+ */
+union eth_specific_data
+{
+ uint8_t protocol_data[8] /* to fix this structure size to 8 bytes */;
+ struct regpair client_update_ramrod_data /* The address of the data for client update ramrod */;
+ struct regpair client_init_ramrod_init_data /* The data for client setup ramrod */;
+ struct eth_halt_ramrod_data halt_ramrod_data /* Includes the client id to be deleted */;
+ struct regpair update_data_addr /* physical address of the eth_rss_update_ramrod_data struct, as allocated by the driver */;
+ struct eth_common_ramrod_data common_ramrod_data /* The data contain client ID need to the ramrod */;
+ struct regpair classify_cfg_addr /* physical address of the eth_classify_rules_ramrod_data struct, as allocated by the driver */;
+ struct regpair filter_cfg_addr /* physical address of the eth_filter_cfg_ramrod_data struct, as allocated by the driver */;
+ struct regpair mcast_cfg_addr /* physical address of the eth_mcast_cfg_ramrod_data struct, as allocated by the driver */;
+};
+
+/*
+ * Ethernet slow path element
+ */
+struct eth_spe
+{
+ struct spe_hdr hdr /* common data for all protocols */;
+ union eth_specific_data data /* data specific to ethernet protocol */;
+};
+
+
+/*
+ * Ethernet command ID for slow path elements
+ */
+enum eth_spqe_cmd_id
+{
+ RAMROD_CMD_ID_ETH_UNUSED,
+ RAMROD_CMD_ID_ETH_CLIENT_SETUP /* Setup a new L2 client */,
+ RAMROD_CMD_ID_ETH_HALT /* Halt an L2 client */,
+ RAMROD_CMD_ID_ETH_FORWARD_SETUP /* Setup a new FW channel */,
+ RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP /* Setup a new Tx only queue */,
+ RAMROD_CMD_ID_ETH_CLIENT_UPDATE /* Update an L2 client configuration */,
+ RAMROD_CMD_ID_ETH_EMPTY /* Empty ramrod - used to synchronize iSCSI OOO */,
+ RAMROD_CMD_ID_ETH_TERMINATE /* Terminate an L2 client */,
+ RAMROD_CMD_ID_ETH_TPA_UPDATE /* update the tpa roles in L2 client */,
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES /* Add/remove classification filters for L2 client (in E2/E3 only) */,
+ RAMROD_CMD_ID_ETH_FILTER_RULES /* Add/remove classification filters for L2 client (in E2/E3 only) */,
+ RAMROD_CMD_ID_ETH_MULTICAST_RULES /* Add/remove multicast classification bin (in E2/E3 only) */,
+ RAMROD_CMD_ID_ETH_RSS_UPDATE /* Update RSS configuration */,
+ RAMROD_CMD_ID_ETH_SET_MAC /* Update RSS configuration */,
+ MAX_ETH_SPQE_CMD_ID};
+
+
+/*
+ * eth tpa update command
+ */
+enum eth_tpa_update_command
+{
+ TPA_UPDATE_NONE_COMMAND /* nop command */,
+ TPA_UPDATE_ENABLE_COMMAND /* enable command */,
+ TPA_UPDATE_DISABLE_COMMAND /* disable command */,
+ MAX_ETH_TPA_UPDATE_COMMAND};
+
+
+/*
+ * In case of LSO over IPv4 tunnel, whether to increment IP ID on external IP header or internal IP header
+ */
+enum eth_tunnel_lso_inc_ip_id
+{
+ EXT_HEADER /* Increment IP ID of external header (HW works on external, FW works on internal */,
+ INT_HEADER /* Increment IP ID of internal header (HW works on internal, FW works on external */,
+ MAX_ETH_TUNNEL_LSO_INC_IP_ID};
+
+
+/*
+ * In case tunnel exist and L4 checksum offload (or outer ip header checksum), the pseudo checksum location, on packet or on BD.
+ */
+enum eth_tunnel_non_lso_csum_location
+{
+ CSUM_ON_PKT /* checksum is on the packet. */,
+ CSUM_ON_BD /* checksum is on the BD. */,
+ MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION};
+
+
+/*
+ * Tx regular BD structure $$KEEP_ENDIANNESS$$
+ */
+struct eth_tx_bd
+{
+ uint32_t addr_lo /* Single continuous buffer low pointer */;
+ uint32_t addr_hi /* Single continuous buffer high pointer */;
+ uint16_t total_pkt_bytes /* Size of the entire packet, valid for non-LSO packets */;
+ uint16_t nbytes /* Size of the data represented by the BD */;
+ uint8_t reserved[4] /* keeps same size as other eth tx bd types */;
+};
+
+
+/*
+ * structure for easy accessibility to assembler
+ */
+struct eth_tx_bd_flags
+{
+ uint8_t as_bitfield;
+#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0) /* BitField as_bitfield IP CKSUM flag,Relevant in START */
+#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
+#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1) /* BitField as_bitfield L4 CKSUM flag,Relevant in START */
+#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
+#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2) /* BitField as_bitfield 00 - no vlan; 01 - inband Vlan; 10 outband Vlan (use enum eth_tx_vlan_type) */
+#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
+#define ETH_TX_BD_FLAGS_START_BD (0x1<<4) /* BitField as_bitfield Start of packet BD */
+#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
+#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5) /* BitField as_bitfield flag that indicates that the current packet is a udp packet */
+#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
+#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) /* BitField as_bitfield LSO flag, Relevant in START */
+#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
+#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) /* BitField as_bitfield set in case ipV6 packet, Relevant in START */
+#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7
+};
+
+/*
+ * The eth Tx Buffer Descriptor $$KEEP_ENDIANNESS$$
+ */
+struct eth_tx_start_bd
+{
+ uint64_t addr;
+ uint16_t nbd /* Num of BDs in packet: include parsInfoBD, Relevant in START(only in Everest) */;
+ uint16_t nbytes /* Size of the data represented by the BD */;
+ uint16_t vlan_or_ethertype /* Vlan structure: vlan_id is in lsb, then cfi and then priority vlan_id 12 bits (lsb), cfi 1 bit, priority 3 bits. In E2, this field should be set with etherType for VFs with no vlan */;
+ struct eth_tx_bd_flags bd_flags;
+ uint8_t general_data;
+#define ETH_TX_START_BD_HDR_NBDS (0xF<<0) /* BitField general_data contains the number of BDs that contain Ethernet/IP/TCP headers, for full/partial LSO modes */
+#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
+#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) /* BitField general_data force vlan mode according to bds (vlan mode can change accroding to global configuration) */
+#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
+#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5) /* BitField general_data Determines the number of parsing BDs in packet. Number of parsing BDs in packet is (parse_nbds+1). */
+#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
+#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7) /* BitField general_data set in case of tunneling encapsulated packet */
+#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7
+};
+
+/*
+ * Tx parsing BD structure for ETH E1h $$KEEP_ENDIANNESS$$
+ */
+struct eth_tx_parse_bd_e1x
+{
+ uint16_t global_data;
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) /* BitField global_data IP header Offset in WORDs from start of packet */
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4) /* BitField global_data marks ethernet address type (use enum eth_addr_type) */
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6) /* BitField global_data */
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7) /* BitField global_data */
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8) /* BitField global_data an optional addition to ECN that protects against accidental or malicious concealment of marked packets from the TCP sender. */
+#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8
+#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9) /* BitField global_data reserved bit, should be set with 0 */
+#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9
+ uint8_t tcp_flags;
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) /* BitField tcp_flagsState flags End of data flag */
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1) /* BitField tcp_flagsState flags Synchronize sequence numbers flag */
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2) /* BitField tcp_flagsState flags Reset connection flag */
+#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3) /* BitField tcp_flagsState flags Push flag */
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4) /* BitField tcp_flagsState flags Acknowledgment number valid flag */
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5) /* BitField tcp_flagsState flags Urgent pointer valid flag */
+#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6) /* BitField tcp_flagsState flags ECN-Echo */
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) /* BitField tcp_flagsState flags Congestion Window Reduced */
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
+ uint8_t ip_hlen_w /* IP header length in WORDs */;
+ uint16_t total_hlen_w /* IP+TCP+ETH */;
+ uint16_t tcp_pseudo_csum /* Checksum of pseudo header with length field=0 */;
+ uint16_t lso_mss /* for LSO mode */;
+ uint16_t ip_id /* for LSO mode */;
+ uint32_t tcp_send_seq /* for LSO mode */;
+};
+
+/*
+ * Tx parsing BD structure for ETH E2 $$KEEP_ENDIANNESS$$
+ */
+struct eth_tx_parse_bd_e2
+{
+ union eth_mac_addr_or_tunnel_data data /* union for mac addresses and for tunneling data. considered as tunneling data only if (tunnel_exist == 1). */;
+ uint32_t parsing_data;
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0) /* BitField parsing_data TCP/UDP header Offset in WORDs from start of packet */
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11) /* BitField parsing_data TCP header size in DOUBLE WORDS */
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15) /* BitField parsing_data a flag to indicate an ipv6 packet with extension headers. If set on LSO packet, pseudo CS should be placed in TCP CS field without length field */
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15
+#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16) /* BitField parsing_data for LSO mode */
+#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30) /* BitField parsing_data marks ethernet address type (use enum eth_addr_type) */
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30
+};
+
+/*
+ * Tx 2nd parsing BD structure for ETH packet $$KEEP_ENDIANNESS$$
+ */
+struct eth_tx_parse_2nd_bd
+{
+ uint16_t global_data;
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0) /* BitField global_data Outer IP header offset in WORDs (16-bit) from start of packet */
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4) /* BitField global_data should be set with 0 */
+#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5) /* BitField global_data */
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6) /* BitField global_data an optional addition to ECN that protects against accidental or malicious concealment of marked packets from the TCP sender. */
+#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7) /* BitField global_data Set in case UDP header exists in tunnel outer hedears. */
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8) /* BitField global_data Outer IP header length in WORDs (16-bit). Valid only for IpV4. */
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
+#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13) /* BitField global_data should be set with 0 */
+#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13
+ uint16_t reserved2;
+ uint8_t tcp_flags;
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0) /* BitField tcp_flagsState flags End of data flag */
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1) /* BitField tcp_flagsState flags Synchronize sequence numbers flag */
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2) /* BitField tcp_flagsState flags Reset connection flag */
+#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3) /* BitField tcp_flagsState flags Push flag */
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4) /* BitField tcp_flagsState flags Acknowledgment number valid flag */
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5) /* BitField tcp_flagsState flags Urgent pointer valid flag */
+#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6) /* BitField tcp_flagsState flags ECN-Echo */
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7) /* BitField tcp_flagsState flags Congestion Window Reduced */
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
+ uint8_t reserved3;
+ uint8_t tunnel_udp_hdr_start_w /* Offset (in WORDs) from start of packet to tunnel UDP header. (if exist) */;
+ uint8_t fw_ip_hdr_to_payload_w /* In IpV4, the length (in WORDs) from the FW IpV4 header start to the payload start. In IpV6, the length (in WORDs) from the FW IpV6 header end to the payload start. However, if extension headers are included, their length is counted here as well. */;
+ uint16_t fw_ip_csum_wo_len_flags_frag /* For the IP header which is set by the FW, the IP checksum without length, flags and fragment offset. */;
+ uint16_t hw_ip_id /* The IP ID to be set by HW for LSO packets in tunnel mode. */;
+ uint32_t tcp_send_seq /* The TCP sequence number for LSO packets. */;
+};
+
+/*
+ * The last BD in the BD memory will hold a pointer to the next BD memory
+ */
+struct eth_tx_next_bd
+{
+ uint32_t addr_lo /* Single continuous buffer low pointer */;
+ uint32_t addr_hi /* Single continuous buffer high pointer */;
+ uint8_t reserved[8] /* keeps same size as other eth tx bd types */;
+};
+
+/*
+ * union for 4 Bd types
+ */
+union eth_tx_bd_types
+{
+ struct eth_tx_start_bd start_bd /* the first bd in a packets */;
+ struct eth_tx_bd reg_bd /* the common bd */;
+ struct eth_tx_parse_bd_e1x parse_bd_e1x /* parsing info BD for e1/e1h */;
+ struct eth_tx_parse_bd_e2 parse_bd_e2 /* parsing info BD for e2 */;
+ struct eth_tx_parse_2nd_bd parse_2nd_bd /* 2nd parsing info BD */;
+ struct eth_tx_next_bd next_bd /* Bd that contains the address of the next page */;
+};
+
+/*
+ * array of 13 bds as appears in the eth xstorm context
+ */
+struct eth_tx_bds_array
+{
+ union eth_tx_bd_types bds[13];
+};
+
+
+/*
+ * VLAN mode on TX BDs
+ */
+enum eth_tx_vlan_type
+{
+ X_ETH_NO_VLAN,
+ X_ETH_OUTBAND_VLAN,
+ X_ETH_INBAND_VLAN,
+ X_ETH_FW_ADDED_VLAN /* Driver should not use this! */,
+ MAX_ETH_TX_VLAN_TYPE};
+
+
+/*
+ * Ethernet VLAN filtering mode in E1x
+ */
+enum eth_vlan_filter_mode
+{
+ ETH_VLAN_FILTER_ANY_VLAN /* Don't filter by vlan */,
+ ETH_VLAN_FILTER_SPECIFIC_VLAN /* Only the vlan_id is allowed */,
+ ETH_VLAN_FILTER_CLASSIFY /* Vlan will be added to CAM for classification */,
+ MAX_ETH_VLAN_FILTER_MODE};
+
+
+/*
+ * MAC filtering configuration command header $$KEEP_ENDIANNESS$$
+ */
+struct mac_configuration_hdr
+{
+ uint8_t length /* number of entries valid in this command (6 bits) */;
+ uint8_t offset /* offset of the first entry in the list */;
+ uint16_t client_id /* the client id which this ramrod is sent on. 5b is used. */;
+ uint32_t echo /* echo value to be sent to driver on event ring */;
+};
+
+/*
+ * MAC address in list for ramrod $$KEEP_ENDIANNESS$$
+ */
+struct mac_configuration_entry
+{
+ uint16_t lsb_mac_addr /* 2 LSB of MAC address (should be given in big endien - driver should do hton to this number!!!) */;
+ uint16_t middle_mac_addr /* 2 middle bytes of MAC address (should be given in big endien - driver should do hton to this number!!!) */;
+ uint16_t msb_mac_addr /* 2 MSB of MAC address (should be given in big endien - driver should do hton to this number!!!) */;
+ uint16_t vlan_id /* The inner vlan id (12b). Used either in vlan_in_cam for mac_valn pair or for vlan filtering */;
+ uint8_t pf_id /* The pf id, for multi function mode */;
+ uint8_t flags;
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0) /* BitField flags configures the action to be done in cam (used only is slow path handlers) (use enum set_mac_action_type) */
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1) /* BitField flags If set, this MAC also belongs to RDMA client */
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2) /* BitField flags (use enum eth_vlan_filter_mode) */
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4) /* BitField flags BitField flags 0 - can't remove vlan 1 - can remove vlan. relevant only to everest1 */
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
+#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5) /* BitField flags BitField flags 0 - not broadcast 1 - broadcast. relevant only to everest1 */
+#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
+#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6) /* BitField flags */
+#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
+ uint16_t reserved0;
+ uint32_t clients_bit_vector /* Bit vector for the clients which should receive this MAC. */;
+};
+
+/*
+ * MAC filtering configuration command
+ */
+struct mac_configuration_cmd
+{
+ struct mac_configuration_hdr hdr /* header */;
+ struct mac_configuration_entry config_table[64] /* table of 64 MAC configuration entries: addresses and target table entries */;
+};
+
+
+/*
+ * Set-MAC command type (in E1x)
+ */
+enum set_mac_action_type
+{
+ T_ETH_MAC_COMMAND_INVALIDATE,
+ T_ETH_MAC_COMMAND_SET,
+ MAX_SET_MAC_ACTION_TYPE};
+
+
+/*
+ * Ethernet TPA Modes
+ */
+enum tpa_mode
+{
+ TPA_LRO /* LRO mode TPA */,
+ TPA_GRO /* GRO mode TPA */,
+ MAX_TPA_MODE};
+
+
+/*
+ * tpa update ramrod data $$KEEP_ENDIANNESS$$
+ */
+struct tpa_update_ramrod_data
+{
+ uint8_t update_ipv4 /* none, enable or disable */;
+ uint8_t update_ipv6 /* none, enable or disable */;
+ uint8_t client_id /* client init flow control data */;
+ uint8_t max_tpa_queues /* maximal TPA queues allowed for this client */;
+ uint8_t max_sges_for_packet /* The maximal number of SGEs that can be used for one packet. depends on MTU and SGE size. must be 0 if SGEs are disabled */;
+ uint8_t complete_on_both_clients /* If set and the client has different sp_client, completion will be sent to both rings */;
+ uint8_t dont_verify_rings_pause_thr_flg /* If set, the rings pause thresholds will not be verified by firmware. */;
+ uint8_t tpa_mode /* TPA mode to use (LRO or GRO) */;
+ uint16_t sge_buff_size /* Size of the buffers pointed by SGEs */;
+ uint16_t max_agg_size /* maximal size for the aggregated TPA packets, reprted by the host */;
+ uint32_t sge_page_base_lo /* The address to fetch the next sges from (low) */;
+ uint32_t sge_page_base_hi /* The address to fetch the next sges from (high) */;
+ uint16_t sge_pause_thr_low /* number of remaining sges under which, we send pause message */;
+ uint16_t sge_pause_thr_high /* number of remaining sges above which, we send un-pause message */;
+};
+
+
+/*
+ * approximate-match multicast filtering for E1H per function in Tstorm
+ */
+struct tstorm_eth_approximate_match_multicast_filtering
+{
+ uint32_t mcast_add_hash_bit_array[8] /* Bit array for multicast hash filtering.Each bit supports a hash function result if to accept this multicast dst address. */;
+};
+
+
+/*
+ * Common configuration parameters per function in Tstorm $$KEEP_ENDIANNESS$$
+ */
+struct tstorm_eth_function_common_config
+{
+ uint16_t config_flags;
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) /* BitField config_flagsGeneral configuration flags configuration of the port RSS IpV4 2-tupple capability */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) /* BitField config_flagsGeneral configuration flags configuration of the port RSS IpV4 4-tupple capability */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) /* BitField config_flagsGeneral configuration flags configuration of the port RSS IpV4 2-tupple capability */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) /* BitField config_flagsGeneral configuration flags configuration of the port RSS IpV6 4-tupple capability */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) /* BitField config_flagsGeneral configuration flags RSS mode of operation (use enum eth_rss_mode) */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) /* BitField config_flagsGeneral configuration flags 0 - Don't filter by vlan, 1 - Filter according to the vlans specificied in mac_filter_config */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8) /* BitField config_flagsGeneral configuration flags */
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8
+ uint8_t rss_result_mask /* The mask for the lower byte of RSS result - defines which section of the indirection table will be used. To enable all table put here 0x7F */;
+ uint8_t reserved1;
+ uint16_t vlan_id[2] /* VLANs of this function. VLAN filtering is determine according to vlan_filtering_enable. */;
+};
+
+
+/*
+ * MAC filtering configuration parameters per port in Tstorm $$KEEP_ENDIANNESS$$
+ */
+struct tstorm_eth_mac_filter_config
+{
+ uint32_t ucast_drop_all /* bit vector in which the clients which drop all unicast packets are set */;
+ uint32_t ucast_accept_all /* bit vector in which clients that accept all unicast packets are set */;
+ uint32_t mcast_drop_all /* bit vector in which the clients which drop all multicast packets are set */;
+ uint32_t mcast_accept_all /* bit vector in which clients that accept all multicast packets are set */;
+ uint32_t bcast_accept_all /* bit vector in which clients that accept all broadcast packets are set */;
+ uint32_t vlan_filter[2] /* bit vector for VLAN filtering. Clients which enforce filtering of vlan[x] should be marked in vlan_filter[x]. The primary vlan is taken from the CAM target table. */;
+ uint32_t unmatched_unicast /* bit vector in which clients that accept unmatched unicast packets are set */;
+};
+
+
+/*
+ * tx only queue init ramrod data $$KEEP_ENDIANNESS$$
+ */
+struct tx_queue_init_ramrod_data
+{
+ struct client_init_general_data general /* client init general data */;
+ struct client_init_tx_data tx /* client init tx data */;
+};
+
+
+/*
+ * Three RX producers for ETH
+ */
+union ustorm_eth_rx_producers
+{
+ struct {
+#if defined(__BIG_ENDIAN)
+ uint16_t bd_prod /* Producer of the RX BD ring */;
+ uint16_t cqe_prod /* Producer of the RX CQE ring */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t cqe_prod /* Producer of the RX CQE ring */;
+ uint16_t bd_prod /* Producer of the RX BD ring */;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t reserved;
+ uint16_t sge_prod /* Producer of the RX SGE ring */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t sge_prod /* Producer of the RX SGE ring */;
+ uint16_t reserved;
+#endif
+ } prod;
+ uint32_t raw_data[2];
+};
+
+
+/*
+ * The data afex vif list ramrod need $$KEEP_ENDIANNESS$$
+ */
+struct afex_vif_list_ramrod_data
+{
+ uint8_t afex_vif_list_command /* set get, clear all a VIF list id defined by enum vif_list_rule_kind */;
+ uint8_t func_bit_map /* the function bit map to set */;
+ uint16_t vif_list_index /* the VIF list, in a per pf vector to add this function to */;
+ uint8_t func_to_clear /* the func id to clear in case of clear func mode */;
+ uint8_t echo;
+ uint16_t reserved1;
+};
+
+
+/*
+ * cfc delete event data $$KEEP_ENDIANNESS$$
+ */
+struct cfc_del_event_data
+{
+ uint32_t cid /* cid of deleted connection */;
+ uint32_t reserved0;
+ uint32_t reserved1;
+};
+
+
+/*
+ * per-port SAFC demo variables
+ */
+struct cmng_flags_per_port
+{
+ uint32_t cmng_enables;
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes if set, enable fairness between vnics */
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0
+#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes if set, enable rate shaping between vnics */
+#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes if set, enable fairness between COSes */
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes (use enum fairness_mode) */
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3
+#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes reserved */
+#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4
+ uint32_t __reserved1;
+};
+
+
+/*
+ * per-port rate shaping variables
+ */
+struct rate_shaping_vars_per_port
+{
+ uint32_t rs_periodic_timeout /* timeout of periodic timer */;
+ uint32_t rs_threshold /* threshold, below which we start to stop queues */;
+};
+
+/*
+ * per-port fairness variables
+ */
+struct fairness_vars_per_port
+{
+ uint32_t upper_bound /* Quota for a protocol/vnic */;
+ uint32_t fair_threshold /* almost-empty threshold */;
+ uint32_t fairness_timeout /* timeout of fairness timer */;
+ uint32_t reserved0;
+};
+
+/*
+ * per-port SAFC variables
+ */
+struct safc_struct_per_port
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t __reserved1;
+ uint8_t __reserved0;
+ uint8_t safc_timeout_usec /* timeout to stop queues on SAFC pause command */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t safc_timeout_usec /* timeout to stop queues on SAFC pause command */;
+ uint8_t __reserved0;
+ uint16_t __reserved1;
+#endif
+ uint8_t cos_to_traffic_types[MAX_COS_NUMBER] /* translate cos to service traffics types */;
+ uint16_t cos_to_pause_mask[NUM_OF_SAFC_BITS] /* QM pause mask for each class of service in the SAFC frame */;
+};
+
+/*
+ * Per-port congestion management variables
+ */
+struct cmng_struct_per_port
+{
+ struct rate_shaping_vars_per_port rs_vars;
+ struct fairness_vars_per_port fair_vars;
+ struct safc_struct_per_port safc_vars;
+ struct cmng_flags_per_port flags;
+};
+
+/*
+ * a single rate shaping counter. can be used as protocol or vnic counter
+ */
+struct rate_shaping_counter
+{
+ uint32_t quota /* Quota for a protocol/vnic */;
+#if defined(__BIG_ENDIAN)
+ uint16_t __reserved0;
+ uint16_t rate /* Vnic/Protocol rate in units of Mega-bits/sec */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t rate /* Vnic/Protocol rate in units of Mega-bits/sec */;
+ uint16_t __reserved0;
+#endif
+};
+
+/*
+ * per-vnic rate shaping variables
+ */
+struct rate_shaping_vars_per_vn
+{
+ struct rate_shaping_counter vn_counter /* per-vnic counter */;
+};
+
+/*
+ * per-vnic fairness variables
+ */
+struct fairness_vars_per_vn
+{
+ uint32_t cos_credit_delta[MAX_COS_NUMBER] /* used for incrementing the credit */;
+ uint32_t vn_credit_delta /* used for incrementing the credit */;
+ uint32_t __reserved0;
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_vnic
+{
+ struct rate_shaping_vars_per_vn vnic_max_rate[4];
+ struct fairness_vars_per_vn vnic_min_rate[4];
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_init
+{
+ struct cmng_struct_per_port port;
+ struct cmng_vnic vnic;
+};
+
+
+/*
+ * driver parameters for congestion management init, all rates are in Mbps
+ */
+struct cmng_init_input
+{
+ uint32_t port_rate;
+ uint16_t vnic_min_rate[4] /* rates are in Mbps */;
+ uint16_t vnic_max_rate[4] /* rates are in Mbps */;
+ uint16_t cos_min_rate[MAX_COS_NUMBER] /* rates are in Mbps */;
+ uint16_t cos_to_pause_mask[MAX_COS_NUMBER];
+ struct cmng_flags_per_port flags;
+};
+
+
+/*
+ * Protocol-common command ID for slow path elements
+ */
+enum common_spqe_cmd_id
+{
+ RAMROD_CMD_ID_COMMON_UNUSED,
+ RAMROD_CMD_ID_COMMON_FUNCTION_START /* Start a function (for PFs only) */,
+ RAMROD_CMD_ID_COMMON_FUNCTION_STOP /* Stop a function (for PFs only) */,
+ RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE /* niv update function */,
+ RAMROD_CMD_ID_COMMON_CFC_DEL /* Delete a connection from CFC */,
+ RAMROD_CMD_ID_COMMON_CFC_DEL_WB /* Delete a connection from CFC (with write back) */,
+ RAMROD_CMD_ID_COMMON_STAT_QUERY /* Collect statistics counters */,
+ RAMROD_CMD_ID_COMMON_STOP_TRAFFIC /* Stop Tx traffic (before DCB updates) */,
+ RAMROD_CMD_ID_COMMON_START_TRAFFIC /* Start Tx traffic (after DCB updates) */,
+ RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS /* niv vif lists */,
+ RAMROD_CMD_ID_COMMON_SET_TIMESYNC /* Set Timesync Parameters (E3 Only) */,
+ MAX_COMMON_SPQE_CMD_ID};
+
+
+/*
+ * Per-protocol connection types
+ */
+enum connection_type
+{
+ ETH_CONNECTION_TYPE /* Ethernet */,
+ TOE_CONNECTION_TYPE /* TOE */,
+ RDMA_CONNECTION_TYPE /* RDMA */,
+ ISCSI_CONNECTION_TYPE /* iSCSI */,
+ FCOE_CONNECTION_TYPE /* FCoE */,
+ RESERVED_CONNECTION_TYPE_0,
+ RESERVED_CONNECTION_TYPE_1,
+ RESERVED_CONNECTION_TYPE_2,
+ NONE_CONNECTION_TYPE /* General- used for common slow path */,
+ MAX_CONNECTION_TYPE};
+
+
+/*
+ * Cos modes
+ */
+enum cos_mode
+{
+ OVERRIDE_COS /* Firmware deduce cos according to DCB */,
+ STATIC_COS /* Firmware has constant queues per CoS */,
+ FW_WRR /* Firmware keep fairness between different CoSes */,
+ MAX_COS_MODE};
+
+
+/*
+ * Dynamic HC counters set by the driver
+ */
+struct hc_dynamic_drv_counter
+{
+ uint32_t val[HC_SB_MAX_DYNAMIC_INDICES] /* 4 bytes * 4 indices = 2 lines */;
+};
+
+/*
+ * zone A per-queue data
+ */
+struct cstorm_queue_zone_data
+{
+ struct hc_dynamic_drv_counter hc_dyn_drv_cnt /* 4 bytes * 4 indices = 2 lines */;
+ struct regpair reserved[2];
+};
+
+
+/*
+ * Vf-PF channel data in cstorm ram (non-triggered zone)
+ */
+struct vf_pf_channel_zone_data
+{
+ uint32_t msg_addr_lo /* the message address on VF memory */;
+ uint32_t msg_addr_hi /* the message address on VF memory */;
+};
+
+/*
+ * zone for VF non-triggered data
+ */
+struct non_trigger_vf_zone
+{
+ struct vf_pf_channel_zone_data vf_pf_channel /* vf-pf channel zone data */;
+};
+
+/*
+ * Vf-PF channel trigger zone in cstorm ram
+ */
+struct vf_pf_channel_zone_trigger
+{
+ uint8_t addr_valid /* indicates that a vf-pf message is pending. MUST be set AFTER the message address. */;
+};
+
+/*
+ * zone that triggers the in-bound interrupt
+ */
+struct trigger_vf_zone
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t reserved1;
+ uint8_t reserved0;
+ struct vf_pf_channel_zone_trigger vf_pf_channel;
+#elif defined(__LITTLE_ENDIAN)
+ struct vf_pf_channel_zone_trigger vf_pf_channel;
+ uint8_t reserved0;
+ uint16_t reserved1;
+#endif
+ uint32_t reserved2;
+};
+
+/*
+ * zone B per-VF data
+ */
+struct cstorm_vf_zone_data
+{
+ struct non_trigger_vf_zone non_trigger /* zone for VF non-triggered data */;
+ struct trigger_vf_zone trigger /* zone that triggers the in-bound interrupt */;
+};
+
+
+/*
+ * Dynamic host coalescing init parameters, per state machine
+ */
+struct dynamic_hc_sm_config
+{
+ uint32_t threshold[3] /* thresholds of number of outstanding bytes */;
+ uint8_t shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES] /* bytes difference of each protocol is shifted right by this value */;
+ uint8_t hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES] /* timeout for level 0 for each protocol, in units of usec */;
+ uint8_t hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES] /* timeout for level 1 for each protocol, in units of usec */;
+ uint8_t hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES] /* timeout for level 2 for each protocol, in units of usec */;
+ uint8_t hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES] /* timeout for level 3 for each protocol, in units of usec */;
+};
+
+/*
+ * Dynamic host coalescing init parameters
+ */
+struct dynamic_hc_config
+{
+ struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM] /* Configuration per state machine */;
+};
+
+
+struct e2_integ_data
+{
+#if defined(__BIG_ENDIAN)
+ uint8_t flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0) /* BitField flags integration testing enabled */
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1) /* BitField flags flag indicating this connection will transmit on loopback */
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2) /* BitField flags flag indicating this connection will transmit according to cos field */
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) /* BitField flags flag indicating this connection will activate the opportunistic QM credit flow */
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) /* BitField flags flag indicating this connection will release the door bell queue (DQ) */
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5) /* BitField flags */
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+ uint8_t cos /* cos of the connection (relevant only in cos transmitting connections, when cosTx is set */;
+ uint8_t voq /* voq to return credit on. Normally equal to port (i.e. always 0 in E2 operational connections). in cos tests equal to cos. in loopback tests equal to LB_PORT (=4) */;
+ uint8_t pbf_queue /* pbf queue to transmit on. Normally equal to port (i.e. always 0 in E2 operational connections). in cos tests equal to cos. in loopback tests equal to LB_PORT (=4) */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t pbf_queue /* pbf queue to transmit on. Normally equal to port (i.e. always 0 in E2 operational connections). in cos tests equal to cos. in loopback tests equal to LB_PORT (=4) */;
+ uint8_t voq /* voq to return credit on. Normally equal to port (i.e. always 0 in E2 operational connections). in cos tests equal to cos. in loopback tests equal to LB_PORT (=4) */;
+ uint8_t cos /* cos of the connection (relevant only in cos transmitting connections, when cosTx is set */;
+ uint8_t flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0) /* BitField flags integration testing enabled */
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1) /* BitField flags flag indicating this connection will transmit on loopback */
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2) /* BitField flags flag indicating this connection will transmit according to cos field */
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) /* BitField flags flag indicating this connection will activate the opportunistic QM credit flow */
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) /* BitField flags flag indicating this connection will release the door bell queue (DQ) */
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5) /* BitField flags */
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t reserved3;
+ uint8_t reserved2;
+ uint8_t ramEn /* context area reserved for reading enable bit from ram */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t ramEn /* context area reserved for reading enable bit from ram */;
+ uint8_t reserved2;
+ uint16_t reserved3;
+#endif
+};
+
+
+/*
+ * set mac event data $$KEEP_ENDIANNESS$$
+ */
+struct eth_event_data
+{
+ uint32_t echo /* set mac echo data to return to driver */;
+ uint32_t reserved0;
+ uint32_t reserved1;
+};
+
+
+/*
+ * pf-vf event data $$KEEP_ENDIANNESS$$
+ */
+struct vf_pf_event_data
+{
+ uint8_t vf_id /* VF ID (0-63) */;
+ uint8_t reserved0;
+ uint16_t reserved1;
+ uint32_t msg_addr_lo /* message address on Vf (low 32 bits) */;
+ uint32_t msg_addr_hi /* message address on Vf (high 32 bits) */;
+};
+
+/*
+ * VF FLR event data $$KEEP_ENDIANNESS$$
+ */
+struct vf_flr_event_data
+{
+ uint8_t vf_id /* VF ID (0-63) */;
+ uint8_t reserved0;
+ uint16_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+};
+
+/*
+ * malicious VF event data $$KEEP_ENDIANNESS$$
+ */
+struct malicious_vf_event_data
+{
+ uint8_t vf_id /* VF ID (0-63) */;
+ uint8_t err_id /* reason for malicious notification */;
+ uint16_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+};
+
+/*
+ * vif list event data $$KEEP_ENDIANNESS$$
+ */
+struct vif_list_event_data
+{
+ uint8_t func_bit_map /* bit map of pf indice */;
+ uint8_t echo;
+ uint16_t reserved0;
+ uint32_t reserved1;
+ uint32_t reserved2;
+};
+
+/*
+ * function update event data $$KEEP_ENDIANNESS$$
+ */
+struct function_update_event_data
+{
+ uint8_t echo;
+ uint8_t reserved;
+ uint16_t reserved0;
+ uint32_t reserved1;
+ uint32_t reserved2;
+};
+
+/*
+ * union for all event ring message types
+ */
+union event_data
+{
+ struct vf_pf_event_data vf_pf_event /* vf-pf event data */;
+ struct eth_event_data eth_event /* set mac event data */;
+ struct cfc_del_event_data cfc_del_event /* cfc delete event data */;
+ struct vf_flr_event_data vf_flr_event /* vf flr event data */;
+ struct malicious_vf_event_data malicious_vf_event /* malicious vf event data */;
+ struct vif_list_event_data vif_list_event /* vif list event data */;
+ struct function_update_event_data function_update_event /* function update event data */;
+};
+
+
+/*
+ * per PF event ring data
+ */
+struct event_ring_data
+{
+ struct regpair_native base_addr /* ring base address */;
+#if defined(__BIG_ENDIAN)
+ uint8_t index_id /* index ID within the status block */;
+ uint8_t sb_id /* status block ID */;
+ uint16_t producer /* event ring producer */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t producer /* event ring producer */;
+ uint8_t sb_id /* status block ID */;
+ uint8_t index_id /* index ID within the status block */;
+#endif
+ uint32_t reserved0;
+};
+
+
+/*
+ * event ring message element (each element is 128 bits) $$KEEP_ENDIANNESS$$
+ */
+struct event_ring_msg
+{
+ uint8_t opcode;
+ uint8_t error /* error on the mesasage */;
+ uint16_t reserved1;
+ union event_data data /* message data (96 bits data) */;
+};
+
+/*
+ * event ring next page element (128 bits)
+ */
+struct event_ring_next
+{
+ struct regpair addr /* Address of the next page of the ring */;
+ uint32_t reserved[2];
+};
+
+/*
+ * union for event ring element types (each element is 128 bits)
+ */
+union event_ring_elem
+{
+ struct event_ring_msg message /* event ring message */;
+ struct event_ring_next next_page /* event ring next page */;
+};
+
+
+/*
+ * Common event ring opcodes
+ */
+enum event_ring_opcode
+{
+ EVENT_RING_OPCODE_VF_PF_CHANNEL,
+ EVENT_RING_OPCODE_FUNCTION_START /* Start a function (for PFs only) */,
+ EVENT_RING_OPCODE_FUNCTION_STOP /* Stop a function (for PFs only) */,
+ EVENT_RING_OPCODE_CFC_DEL /* Delete a connection from CFC */,
+ EVENT_RING_OPCODE_CFC_DEL_WB /* Delete a connection from CFC (with write back) */,
+ EVENT_RING_OPCODE_STAT_QUERY /* Collect statistics counters */,
+ EVENT_RING_OPCODE_STOP_TRAFFIC /* Stop Tx traffic (before DCB updates) */,
+ EVENT_RING_OPCODE_START_TRAFFIC /* Start Tx traffic (after DCB updates) */,
+ EVENT_RING_OPCODE_VF_FLR /* VF FLR indication for PF */,
+ EVENT_RING_OPCODE_MALICIOUS_VF /* Malicious VF operation detected */,
+ EVENT_RING_OPCODE_FORWARD_SETUP /* Initialize forward channel */,
+ EVENT_RING_OPCODE_RSS_UPDATE_RULES /* Update RSS configuration */,
+ EVENT_RING_OPCODE_FUNCTION_UPDATE /* function update */,
+ EVENT_RING_OPCODE_AFEX_VIF_LISTS /* event ring opcode niv vif lists */,
+ EVENT_RING_OPCODE_SET_MAC /* Add/remove MAC (in E1x only) */,
+ EVENT_RING_OPCODE_CLASSIFICATION_RULES /* Add/remove MAC or VLAN (in E2/E3 only) */,
+ EVENT_RING_OPCODE_FILTERS_RULES /* Add/remove classification filters for L2 client (in E2/E3 only) */,
+ EVENT_RING_OPCODE_MULTICAST_RULES /* Add/remove multicast classification bin (in E2/E3 only) */,
+ EVENT_RING_OPCODE_SET_TIMESYNC /* Set Timesync Parameters (E3 Only) */,
+ MAX_EVENT_RING_OPCODE};
+
+
+/*
+ * Modes for fairness algorithm
+ */
+enum fairness_mode
+{
+ FAIRNESS_COS_WRR_MODE /* Weighted round robin mode (used in Google) */,
+ FAIRNESS_COS_ETS_MODE /* ETS mode (used in FCoE) */,
+ MAX_FAIRNESS_MODE};
+
+
+/*
+ * Priority and cos $$KEEP_ENDIANNESS$$
+ */
+struct priority_cos
+{
+ uint8_t priority /* Priority */;
+ uint8_t cos /* Cos */;
+ uint16_t reserved1;
+};
+
+/*
+ * The data for flow control configuration $$KEEP_ENDIANNESS$$
+ */
+struct flow_control_configuration
+{
+ struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES] /* traffic_type to priority cos */;
+ uint8_t dcb_enabled /* If DCB mode is enabled then traffic class to priority array is fully initialized and there must be inner VLAN */;
+ uint8_t dcb_version /* DCB version Increase by one on each DCB update */;
+ uint8_t dont_add_pri_0 /* In case, the priority is 0, and the packet has no vlan, the firmware wont add vlan */;
+ uint8_t reserved1;
+ uint32_t reserved2;
+};
+
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct function_start_data
+{
+ uint8_t function_mode /* the function mode */;
+ uint8_t allow_npar_tx_switching /* If set, inter-pf tx switching is allowed in Switch Independent function mode. (E2/E3 Only) */;
+ uint16_t sd_vlan_tag /* value of Vlan in case of switch depended multi-function mode */;
+ uint16_t vif_id /* value of VIF id in case of NIV multi-function mode */;
+ uint8_t path_id;
+ uint8_t network_cos_mode /* The cos mode for network traffic. */;
+ uint8_t dmae_cmd_id /* The DMAE command id to use for FW DMAE transactions */;
+ uint8_t gre_tunnel_mode /* GRE Tunnel Mode to enable on the Function (E2/E3 Only) */;
+ uint8_t gre_tunnel_rss /* Type of RSS to perform on GRE Tunneled packets */;
+ uint8_t nvgre_clss_en /* If set, NVGRE tunneled packets are classified according to their inner MAC (gre_mode must be NVGRE_TUNNEL) */;
+ uint16_t reserved1[2];
+};
+
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct function_update_data
+{
+ uint8_t vif_id_change_flg /* If set, vif_id will be checked */;
+ uint8_t afex_default_vlan_change_flg /* If set, afex_default_vlan will be checked */;
+ uint8_t allowed_priorities_change_flg /* If set, allowed_priorities will be checked */;
+ uint8_t network_cos_mode_change_flg /* If set, network_cos_mode will be checked */;
+ uint16_t vif_id /* value of VIF id in case of NIV multi-function mode */;
+ uint16_t afex_default_vlan /* value of default Vlan in case of NIV mf */;
+ uint8_t allowed_priorities /* bit vector of allowed Vlan priorities for this VIF */;
+ uint8_t network_cos_mode /* The cos mode for network traffic. */;
+ uint8_t lb_mode_en_change_flg /* If set, lb_mode_en will be checked */;
+ uint8_t lb_mode_en /* If set, niv loopback mode will be enabled */;
+ uint8_t tx_switch_suspend_change_flg /* If set, tx_switch_suspend will be checked */;
+ uint8_t tx_switch_suspend /* If set, TX switching TO this function will be disabled and packets will be dropped */;
+ uint8_t echo;
+ uint8_t reserved1;
+ uint8_t update_gre_cfg_flg /* If set, GRE config for the function will be updated according to the gre_tunnel_rss and nvgre_clss_en fields */;
+ uint8_t gre_tunnel_mode /* GRE Tunnel Mode to enable on the Function (E2/E3 Only) */;
+ uint8_t gre_tunnel_rss /* Type of RSS to perform on GRE Tunneled packets */;
+ uint8_t nvgre_clss_en /* If set, NVGRE tunneled packets are classified according to their inner MAC (gre_mode must be NVGRE_TUNNEL) */;
+ uint32_t reserved3;
+};
+
+
+/*
+ * FW version stored in the Xstorm RAM
+ */
+struct fw_version
+{
+#if defined(__BIG_ENDIAN)
+ uint8_t engineering /* firmware current engineering version */;
+ uint8_t revision /* firmware current revision version */;
+ uint8_t minor /* firmware current minor version */;
+ uint8_t major /* firmware current major version */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t major /* firmware current major version */;
+ uint8_t minor /* firmware current minor version */;
+ uint8_t revision /* firmware current revision version */;
+ uint8_t engineering /* firmware current engineering version */;
+#endif
+ uint32_t flags;
+#define FW_VERSION_OPTIMIZED (0x1<<0) /* BitField flags if set, this is optimized ASM */
+#define FW_VERSION_OPTIMIZED_SHIFT 0
+#define FW_VERSION_BIG_ENDIEN (0x1<<1) /* BitField flags if set, this is big-endien ASM */
+#define FW_VERSION_BIG_ENDIEN_SHIFT 1
+#define FW_VERSION_CHIP_VERSION (0x3<<2) /* BitField flags 1 - E1H */
+#define FW_VERSION_CHIP_VERSION_SHIFT 2
+#define __FW_VERSION_RESERVED (0xFFFFFFF<<4) /* BitField flags */
+#define __FW_VERSION_RESERVED_SHIFT 4
+};
+
+
+/*
+ * GRE RSS Mode
+ */
+enum gre_rss_mode
+{
+ GRE_OUTER_HEADERS_RSS /* RSS for GRE Packets is performed on the outer headers */,
+ GRE_INNER_HEADERS_RSS /* RSS for GRE Packets is performed on the inner headers */,
+ NVGRE_KEY_ENTROPY_RSS /* RSS for NVGRE Packets is done based on a hash containing the entropy bits from the GRE Key Field (gre_tunnel must be NVGRE_TUNNEL) */,
+ MAX_GRE_RSS_MODE};
+
+
+/*
+ * GRE Tunnel Mode
+ */
+enum gre_tunnel_type
+{
+ NO_GRE_TUNNEL,
+ NVGRE_TUNNEL /* NV-GRE Tunneling Microsoft L2 over GRE. GRE header contains mandatory Key Field. */,
+ L2GRE_TUNNEL /* L2-GRE Tunneling General L2 over GRE. GRE can contain Key field with Tenant ID and Sequence Field */,
+ IPGRE_TUNNEL /* IP-GRE Tunneling IP over GRE. GRE may contain Key field with Tenant ID, Sequence Field and/or Checksum Field */,
+ MAX_GRE_TUNNEL_TYPE};
+
+
+/*
+ * Dynamic Host-Coalescing - Driver(host) counters
+ */
+struct hc_dynamic_sb_drv_counters
+{
+ uint32_t dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES] /* Dynamic HC counters written by drivers */;
+};
+
+
+/*
+ * 2 bytes. configuration/state parameters for a single protocol index
+ */
+struct hc_index_data
+{
+#if defined(__BIG_ENDIAN)
+ uint8_t flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0) /* BitField flags Index to a state machine. Can be 0 or 1 */
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1) /* BitField flags if set, host coalescing would be done for this index */
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2) /* BitField flags if set, dynamic HC will be done for this index */
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3) /* BitField flags */
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+ uint8_t timeout /* the timeout values for this index. Units are 4 usec */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t timeout /* the timeout values for this index. Units are 4 usec */;
+ uint8_t flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0) /* BitField flags Index to a state machine. Can be 0 or 1 */
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1) /* BitField flags if set, host coalescing would be done for this index */
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2) /* BitField flags if set, dynamic HC will be done for this index */
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3) /* BitField flags */
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+#endif
+};
+
+
+/*
+ * HC state-machine
+ */
+struct hc_status_block_sm
+{
+#if defined(__BIG_ENDIAN)
+ uint8_t igu_seg_id;
+ uint8_t igu_sb_id /* sb_id within the IGU */;
+ uint8_t timer_value /* Determines the time_to_expire */;
+ uint8_t __flags;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t __flags;
+ uint8_t timer_value /* Determines the time_to_expire */;
+ uint8_t igu_sb_id /* sb_id within the IGU */;
+ uint8_t igu_seg_id;
+#endif
+ uint32_t time_to_expire /* The time in which it expects to wake up */;
+};
+
+/*
+ * hold PCI identification variables- used in various places in firmware
+ */
+struct pci_entity
+{
+#if defined(__BIG_ENDIAN)
+ uint8_t vf_valid /* If set, this is a VF, otherwise it is PF */;
+ uint8_t vf_id /* VF ID (0-63). Value of 0xFF means VF not valid */;
+ uint8_t vnic_id /* Virtual NIC ID (0-3) */;
+ uint8_t pf_id /* PCI physical function number (0-7). The LSB of this field is the port ID */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t pf_id /* PCI physical function number (0-7). The LSB of this field is the port ID */;
+ uint8_t vnic_id /* Virtual NIC ID (0-3) */;
+ uint8_t vf_id /* VF ID (0-63). Value of 0xFF means VF not valid */;
+ uint8_t vf_valid /* If set, this is a VF, otherwise it is PF */;
+#endif
+};
+
+/*
+ * The fast-path status block meta-data, common to all chips
+ */
+struct hc_sb_data
+{
+ struct regpair_native host_sb_addr /* Host status block address */;
+ struct hc_status_block_sm state_machine[HC_SB_MAX_SM] /* Holds the state machines of the status block */;
+ struct pci_entity p_func /* vnic / port of the status block to be set by the driver */;
+#if defined(__BIG_ENDIAN)
+ uint8_t rsrv0;
+ uint8_t state;
+ uint8_t dhc_qzone_id /* used in E2 only, to specify the HW queue zone ID used for this status block dynamic HC counters */;
+ uint8_t same_igu_sb_1b /* Indicate that both state-machines acts like single sm */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t same_igu_sb_1b /* Indicate that both state-machines acts like single sm */;
+ uint8_t dhc_qzone_id /* used in E2 only, to specify the HW queue zone ID used for this status block dynamic HC counters */;
+ uint8_t state;
+ uint8_t rsrv0;
+#endif
+ struct regpair_native rsrv1[2];
+};
+
+
+/*
+ * Segment types for host coaslescing
+ */
+enum hc_segment
+{
+ HC_REGULAR_SEGMENT,
+ HC_DEFAULT_SEGMENT,
+ MAX_HC_SEGMENT};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_sp_status_block_data
+{
+ struct regpair_native host_sb_addr /* Host status block address */;
+#if defined(__BIG_ENDIAN)
+ uint8_t rsrv1;
+ uint8_t state;
+ uint8_t igu_seg_id /* segment id of the IGU */;
+ uint8_t igu_sb_id /* sb_id within the IGU */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t igu_sb_id /* sb_id within the IGU */;
+ uint8_t igu_seg_id /* segment id of the IGU */;
+ uint8_t state;
+ uint8_t rsrv1;
+#endif
+ struct pci_entity p_func /* vnic / port of the status block to be set by the driver */;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e1x
+{
+ struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X] /* configuration/state parameters for a single protocol index */;
+ struct hc_sb_data common /* The fast-path status block meta-data, common to all chips */;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e2
+{
+ struct hc_index_data index_data[HC_SB_MAX_INDICES_E2] /* configuration/state parameters for a single protocol index */;
+ struct hc_sb_data common /* The fast-path status block meta-data, common to all chips */;
+};
+
+
+/*
+ * IGU block operartion modes (in Everest2)
+ */
+enum igu_mode
+{
+ HC_IGU_BC_MODE /* Backward compatible mode */,
+ HC_IGU_NBC_MODE /* Non-backward compatible mode */,
+ MAX_IGU_MODE};
+
+
+/*
+ * IP versions
+ */
+enum ip_ver
+{
+ IP_V4,
+ IP_V6,
+ MAX_IP_VER};
+
+
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id
+{
+ VF_PF_CHANNEL_NOT_READY /* Writing to VF/PF channel when it is not ready */,
+ ETH_ILLEGAL_BD_LENGTHS /* TX BD lengths error was detected */,
+ ETH_PACKET_TOO_SHORT /* TX packet is shorter then reported on BDs */,
+ ETH_PAYLOAD_TOO_BIG /* TX packet is greater then MTU */,
+ ETH_ILLEGAL_ETH_TYPE /* TX packet reported without VLAN but eth type is 0x8100 */,
+ ETH_ILLEGAL_LSO_HDR_LEN /* LSO header length on BDs and on hdr_nbd do not match */,
+ ETH_TOO_MANY_BDS /* Tx packet has too many BDs */,
+ ETH_ZERO_HDR_NBDS /* hdr_nbds field is zero */,
+ ETH_START_BD_NOT_SET /* start_bd should be set on first TX BD in packet */,
+ ETH_ILLEGAL_PARSE_NBDS /* Tx packet with parse_nbds field which is not legal */,
+ ETH_IPV6_AND_CHECKSUM /* Tx packet with IP checksum on IPv6 */,
+ ETH_VLAN_FLG_INCORRECT /* Tx packet with incorrect VLAN flag */,
+ ETH_ILLEGAL_LSO_MSS /* Tx LSO packet with illegal MSS value */,
+ ETH_TUNNEL_NOT_SUPPORTED /* Tunneling packets are not supported in current connection */,
+ MAX_MALICIOUS_VF_ERROR_ID};
+
+
+/*
+ * Multi-function modes
+ */
+enum mf_mode
+{
+ SINGLE_FUNCTION,
+ MULTI_FUNCTION_SD /* Switch dependent (vlan based) */,
+ MULTI_FUNCTION_SI /* Switch independent (mac based) */,
+ MULTI_FUNCTION_AFEX /* Switch dependent (niv based) */,
+ MAX_MF_MODE};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per pf) $$KEEP_ENDIANNESS$$
+ */
+struct tstorm_per_pf_stats
+{
+ struct regpair rcv_error_bytes /* number of bytes received with errors */;
+};
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct per_pf_stats
+{
+ struct tstorm_per_pf_stats tstorm_pf_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per port) $$KEEP_ENDIANNESS$$
+ */
+struct tstorm_per_port_stats
+{
+ uint32_t mac_discard /* number of packets with mac errors */;
+ uint32_t mac_filter_discard /* the number of good frames dropped because of no perfect match to MAC/VLAN address */;
+ uint32_t brb_truncate_discard /* the number of packtes that were dropped because they were truncated in BRB */;
+ uint32_t mf_tag_discard /* the number of good frames dropped because of no match to the outer vlan/VNtag */;
+ uint32_t packet_drop /* general packet drop conter- incremented for every packet drop */;
+ uint32_t reserved;
+};
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct per_port_stats
+{
+ struct tstorm_per_port_stats tstorm_port_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per client) $$KEEP_ENDIANNESS$$
+ */
+struct tstorm_per_queue_stats
+{
+ struct regpair rcv_ucast_bytes /* number of bytes in unicast packets received without errors and pass the filter */;
+ uint32_t rcv_ucast_pkts /* number of unicast packets received without errors and pass the filter */;
+ uint32_t checksum_discard /* number of total packets received with checksum error */;
+ struct regpair rcv_bcast_bytes /* number of bytes in broadcast packets received without errors and pass the filter */;
+ uint32_t rcv_bcast_pkts /* number of packets in broadcast packets received without errors and pass the filter */;
+ uint32_t pkts_too_big_discard /* number of too long packets received */;
+ struct regpair rcv_mcast_bytes /* number of bytes in multicast packets received without errors and pass the filter */;
+ uint32_t rcv_mcast_pkts /* number of packets in multicast packets received without errors and pass the filter */;
+ uint32_t ttl0_discard /* the number of good frames dropped because of TTL=0 */;
+ uint16_t no_buff_discard;
+ uint16_t reserved0;
+ uint32_t reserved1;
+};
+
+/*
+ * Protocol-common statistics collected by the Ustorm (per client) $$KEEP_ENDIANNESS$$
+ */
+struct ustorm_per_queue_stats
+{
+ struct regpair ucast_no_buff_bytes /* the number of unicast bytes received from network dropped because of no buffer at host */;
+ struct regpair mcast_no_buff_bytes /* the number of multicast bytes received from network dropped because of no buffer at host */;
+ struct regpair bcast_no_buff_bytes /* the number of broadcast bytes received from network dropped because of no buffer at host */;
+ uint32_t ucast_no_buff_pkts /* the number of unicast frames received from network dropped because of no buffer at host */;
+ uint32_t mcast_no_buff_pkts /* the number of unicast frames received from network dropped because of no buffer at host */;
+ uint32_t bcast_no_buff_pkts /* the number of unicast frames received from network dropped because of no buffer at host */;
+ uint32_t coalesced_pkts /* the number of packets coalesced in all aggregations */;
+ struct regpair coalesced_bytes /* the number of bytes coalesced in all aggregations */;
+ uint32_t coalesced_events /* the number of aggregations */;
+ uint32_t coalesced_aborts /* the number of exception which avoid aggregation */;
+};
+
+/*
+ * Protocol-common statistics collected by the Xstorm (per client) $$KEEP_ENDIANNESS$$
+ */
+struct xstorm_per_queue_stats
+{
+ struct regpair ucast_bytes_sent /* number of total bytes sent without errors */;
+ struct regpair mcast_bytes_sent /* number of total bytes sent without errors */;
+ struct regpair bcast_bytes_sent /* number of total bytes sent without errors */;
+ uint32_t ucast_pkts_sent /* number of total packets sent without errors */;
+ uint32_t mcast_pkts_sent /* number of total packets sent without errors */;
+ uint32_t bcast_pkts_sent /* number of total packets sent without errors */;
+ uint32_t error_drop_pkts /* number of total packets drooped due to errors */;
+};
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct per_queue_stats
+{
+ struct tstorm_per_queue_stats tstorm_queue_statistics;
+ struct ustorm_per_queue_stats ustorm_queue_statistics;
+ struct xstorm_per_queue_stats xstorm_queue_statistics;
+};
+
+
+/*
+ * FW version stored in first line of pram $$KEEP_ENDIANNESS$$
+ */
+struct pram_fw_version
+{
+ uint8_t major /* firmware current major version */;
+ uint8_t minor /* firmware current minor version */;
+ uint8_t revision /* firmware current revision version */;
+ uint8_t engineering /* firmware current engineering version */;
+ uint8_t flags;
+#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0) /* BitField flags if set, this is optimized ASM */
+#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0
+#define PRAM_FW_VERSION_STORM_ID (0x3<<1) /* BitField flags storm_id identification */
+#define PRAM_FW_VERSION_STORM_ID_SHIFT 1
+#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3) /* BitField flags if set, this is big-endien ASM */
+#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3
+#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4) /* BitField flags 1 - E1H */
+#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4
+#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6) /* BitField flags */
+#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6
+};
+
+
+/*
+ * Ethernet slow path element
+ */
+union protocol_common_specific_data
+{
+ uint8_t protocol_data[8] /* to fix this structure size to 8 bytes */;
+ struct regpair phy_address /* SPE physical address */;
+ struct regpair mac_config_addr /* physical address of the MAC configuration command, as allocated by the driver */;
+ struct afex_vif_list_ramrod_data afex_vif_list_data /* The data afex vif list ramrod need */;
+};
+
+/*
+ * The send queue element
+ */
+struct protocol_common_spe
+{
+ struct spe_hdr hdr /* SPE header */;
+ union protocol_common_specific_data data /* data specific to common protocol */;
+};
+
+
+/*
+ * The data for the Set Timesync Ramrod $$KEEP_ENDIANNESS$$
+ */
+struct set_timesync_ramrod_data
+{
+ uint8_t drift_adjust_cmd /* Timesync Drift Adjust Command */;
+ uint8_t offset_cmd /* Timesync Offset Command */;
+ uint8_t add_sub_drift_adjust_value /* Whether to add(1)/subtract(0) Drift Adjust Value from the Offset */;
+ uint8_t drift_adjust_value /* Drift Adjust Value (in ns) */;
+ uint32_t drift_adjust_period /* Drift Adjust Period (in us) */;
+ struct regpair offset_delta /* Timesync Offset Delta (in ns) */;
+};
+
+
+/*
+ * The send queue element
+ */
+struct slow_path_element
+{
+ struct spe_hdr hdr /* common data for all protocols */;
+ struct regpair protocol_data /* additional data specific to the protocol */;
+};
+
+
+/*
+ * Protocol-common statistics counter $$KEEP_ENDIANNESS$$
+ */
+struct stats_counter
+{
+ uint16_t xstats_counter /* xstorm statistics counter */;
+ uint16_t reserved0;
+ uint32_t reserved1;
+ uint16_t tstats_counter /* tstorm statistics counter */;
+ uint16_t reserved2;
+ uint32_t reserved3;
+ uint16_t ustats_counter /* ustorm statistics counter */;
+ uint16_t reserved4;
+ uint32_t reserved5;
+ uint16_t cstats_counter /* ustorm statistics counter */;
+ uint16_t reserved6;
+ uint32_t reserved7;
+};
+
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct stats_query_entry
+{
+ uint8_t kind;
+ uint8_t index /* queue index */;
+ uint16_t funcID /* the func the statistic will send to */;
+ uint32_t reserved;
+ struct regpair address /* pxp address */;
+};
+
+/*
+ * statistic command $$KEEP_ENDIANNESS$$
+ */
+struct stats_query_cmd_group
+{
+ struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+
+/*
+ * statistic command header $$KEEP_ENDIANNESS$$
+ */
+struct stats_query_header
+{
+ uint8_t cmd_num /* command number */;
+ uint8_t reserved0;
+ uint16_t drv_stats_counter;
+ uint32_t reserved1;
+ struct regpair stats_counters_addrs /* stats counter */;
+};
+
+
+/*
+ * Types of statistcis query entry
+ */
+enum stats_query_type
+{
+ STATS_TYPE_QUEUE,
+ STATS_TYPE_PORT,
+ STATS_TYPE_PF,
+ STATS_TYPE_TOE,
+ STATS_TYPE_FCOE,
+ MAX_STATS_QUERY_TYPE};
+
+
+/*
+ * Indicate of the function status block state
+ */
+enum status_block_state
+{
+ SB_DISABLED,
+ SB_ENABLED,
+ SB_CLEANED,
+ MAX_STATUS_BLOCK_STATE};
+
+
+/*
+ * Storm IDs (including attentions for IGU related enums)
+ */
+enum storm_id
+{
+ USTORM_ID,
+ CSTORM_ID,
+ XSTORM_ID,
+ TSTORM_ID,
+ ATTENTION_ID,
+ MAX_STORM_ID};
+
+
+/*
+ * Taffic types used in ETS and flow control algorithms
+ */
+enum traffic_type
+{
+ LLFC_TRAFFIC_TYPE_NW /* Networking */,
+ LLFC_TRAFFIC_TYPE_FCOE /* FCoE */,
+ LLFC_TRAFFIC_TYPE_ISCSI /* iSCSI */,
+ MAX_TRAFFIC_TYPE};
+
+
+/*
+ * zone A per-queue data
+ */
+struct tstorm_queue_zone_data
+{
+ struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct tstorm_vf_zone_data
+{
+ struct regpair reserved;
+};
+
+
+/*
+ * Add or Subtract Value for Set Timesync Ramrod
+ */
+enum ts_add_sub_value
+{
+ TS_SUB_VALUE /* Subtract Value */,
+ TS_ADD_VALUE /* Add Value */,
+ MAX_TS_ADD_SUB_VALUE};
+
+
+/*
+ * Drift-Adjust Commands for Set Timesync Ramrod
+ */
+enum ts_drift_adjust_cmd
+{
+ TS_DRIFT_ADJUST_KEEP /* Keep Drift-Adjust at current values */,
+ TS_DRIFT_ADJUST_SET /* Set Drift-Adjust */,
+ TS_DRIFT_ADJUST_RESET /* Reset Drift-Adjust */,
+ MAX_TS_DRIFT_ADJUST_CMD};
+
+
+/*
+ * Offset Commands for Set Timesync Ramrod
+ */
+enum ts_offset_cmd
+{
+ TS_OFFSET_KEEP /* Keep Offset at current values */,
+ TS_OFFSET_INC /* Increase Offset by Offset Delta */,
+ TS_OFFSET_DEC /* Decrease Offset by Offset Delta */,
+ MAX_TS_OFFSET_CMD};
+
+
+/*
+ * zone A per-queue data
+ */
+struct ustorm_queue_zone_data
+{
+ union ustorm_eth_rx_producers eth_rx_producers /* ETH RX rings producers */;
+ struct regpair reserved[3];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct ustorm_vf_zone_data
+{
+ struct regpair reserved;
+};
+
+
+/*
+ * data per VF-PF channel
+ */
+struct vf_pf_channel_data
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t reserved0;
+ uint8_t valid /* flag for channel validity. (cleared when identify a VF as malicious) */;
+ uint8_t state /* channel state (ready / waiting for ack) */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t state /* channel state (ready / waiting for ack) */;
+ uint8_t valid /* flag for channel validity. (cleared when identify a VF as malicious) */;
+ uint16_t reserved0;
+#endif
+ uint32_t reserved1;
+};
+
+
+/*
+ * State of VF-PF channel
+ */
+enum vf_pf_channel_state
+{
+ VF_PF_CHANNEL_STATE_READY /* Channel is ready to accept a message from VF */,
+ VF_PF_CHANNEL_STATE_WAITING_FOR_ACK /* Channel waits for an ACK from PF */,
+ MAX_VF_PF_CHANNEL_STATE};
+
+
+/*
+ * vif_list_rule_kind
+ */
+enum vif_list_rule_kind
+{
+ VIF_LIST_RULE_SET,
+ VIF_LIST_RULE_GET,
+ VIF_LIST_RULE_CLEAR_ALL,
+ VIF_LIST_RULE_CLEAR_FUNC,
+ MAX_VIF_LIST_RULE_KIND};
+
+
+/*
+ * zone A per-queue data
+ */
+struct xstorm_queue_zone_data
+{
+ struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct xstorm_vf_zone_data
+{
+ struct regpair reserved;
+};
+
+
+#endif /* ECORE_HSI_H */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_init.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_init.h
new file mode 100644
index 00000000..f2de07e5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_init.h
@@ -0,0 +1,817 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef ECORE_INIT_H
+#define ECORE_INIT_H
+
+/* Init operation types and structures */
+enum {
+ OP_RD = 0x1, /* read a single register */
+ OP_WR, /* write a single register */
+ OP_SW, /* copy a string to the device */
+ OP_ZR, /* clear memory */
+ OP_ZP, /* unzip then copy with DMAE */
+ OP_WR_64, /* write 64 bit pattern */
+ OP_WB, /* copy a string using DMAE */
+ OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */
+ OP_IF_MODE_OR, /* Skip the following ops if all init modes don't match */
+ OP_IF_MODE_AND, /* Skip the following ops if any init modes don't match */
+ OP_IF_PHASE,
+ OP_RT,
+ OP_DELAY,
+ OP_VERIFY,
+ OP_MAX
+};
+
+enum {
+ STAGE_START,
+ STAGE_END,
+};
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+ (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
+/* structs for the various opcodes */
+struct raw_op {
+ uint32_t op:8;
+ uint32_t offset:24;
+ uint32_t raw_data;
+};
+
+struct op_read {
+ uint32_t op:8;
+ uint32_t offset:24;
+ uint32_t val;
+};
+
+struct op_write {
+ uint32_t op:8;
+ uint32_t offset:24;
+ uint32_t val;
+};
+
+struct op_arr_write {
+ uint32_t op:8;
+ uint32_t offset:24;
+#ifdef __BIG_ENDIAN
+ uint16_t data_len;
+ uint16_t data_off;
+#else /* __LITTLE_ENDIAN */
+ uint16_t data_off;
+ uint16_t data_len;
+#endif
+};
+
+struct op_zero {
+ uint32_t op:8;
+ uint32_t offset:24;
+ uint32_t len;
+};
+
+struct op_if_mode {
+ uint32_t op:8;
+ uint32_t cmd_offset:24;
+ uint32_t mode_bit_map;
+};
+
+struct op_if_phase {
+ uint32_t op:8;
+ uint32_t cmd_offset:24;
+ uint32_t phase_bit_map;
+};
+
+struct op_delay {
+ uint32_t op:8;
+ uint32_t reserved:24;
+ uint32_t delay;
+};
+
+union init_op {
+ struct op_read read;
+ struct op_write write;
+ struct op_arr_write arr_wr;
+ struct op_zero zero;
+ struct raw_op raw;
+ struct op_if_mode if_mode;
+ struct op_if_phase if_phase;
+ struct op_delay delay;
+};
+
+
+/* Init Phases */
+enum {
+ PHASE_COMMON,
+ PHASE_PORT0,
+ PHASE_PORT1,
+ PHASE_PF0,
+ PHASE_PF1,
+ PHASE_PF2,
+ PHASE_PF3,
+ PHASE_PF4,
+ PHASE_PF5,
+ PHASE_PF6,
+ PHASE_PF7,
+ NUM_OF_INIT_PHASES
+};
+
+/* Init Modes */
+enum {
+ MODE_ASIC = 0x00000001,
+ MODE_FPGA = 0x00000002,
+ MODE_EMUL = 0x00000004,
+ MODE_E2 = 0x00000008,
+ MODE_E3 = 0x00000010,
+ MODE_PORT2 = 0x00000020,
+ MODE_PORT4 = 0x00000040,
+ MODE_SF = 0x00000080,
+ MODE_MF = 0x00000100,
+ MODE_MF_SD = 0x00000200,
+ MODE_MF_SI = 0x00000400,
+ MODE_MF_AFEX = 0x00000800,
+ MODE_E3_A0 = 0x00001000,
+ MODE_E3_B0 = 0x00002000,
+ MODE_COS3 = 0x00004000,
+ MODE_COS6 = 0x00008000,
+ MODE_LITTLE_ENDIAN = 0x00010000,
+ MODE_BIG_ENDIAN = 0x00020000,
+};
+
+/* Init Blocks */
+enum {
+ BLOCK_ATC,
+ BLOCK_BRB1,
+ BLOCK_CCM,
+ BLOCK_CDU,
+ BLOCK_CFC,
+ BLOCK_CSDM,
+ BLOCK_CSEM,
+ BLOCK_DBG,
+ BLOCK_DMAE,
+ BLOCK_DORQ,
+ BLOCK_HC,
+ BLOCK_IGU,
+ BLOCK_MISC,
+ BLOCK_NIG,
+ BLOCK_PBF,
+ BLOCK_PGLUE_B,
+ BLOCK_PRS,
+ BLOCK_PXP2,
+ BLOCK_PXP,
+ BLOCK_QM,
+ BLOCK_SRC,
+ BLOCK_TCM,
+ BLOCK_TM,
+ BLOCK_TSDM,
+ BLOCK_TSEM,
+ BLOCK_UCM,
+ BLOCK_UPB,
+ BLOCK_USDM,
+ BLOCK_USEM,
+ BLOCK_XCM,
+ BLOCK_XPB,
+ BLOCK_XSDM,
+ BLOCK_XSEM,
+ BLOCK_MISC_AEU,
+ NUM_OF_INIT_BLOCKS
+};
+
+
+
+
+
+
+
+
+/* Vnics per mode */
+#define ECORE_PORT2_MODE_NUM_VNICS 4
+
+
+/* QM queue numbers */
+#define ECORE_ETH_Q 0
+#define ECORE_TOE_Q 3
+#define ECORE_TOE_ACK_Q 6
+#define ECORE_ISCSI_Q 9
+#define ECORE_ISCSI_ACK_Q 11
+#define ECORE_FCOE_Q 10
+
+/* Vnics per mode */
+#define ECORE_PORT4_MODE_NUM_VNICS 2
+
+/* COS offset for port1 in E3 B0 4port mode */
+#define ECORE_E3B0_PORT1_COS_OFFSET 3
+
+/* QM Register addresses */
+#define ECORE_Q_VOQ_REG_ADDR(pf_q_num)\
+ (QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
+#define ECORE_VOQ_Q_REG_ADDR(cos, pf_q_num)\
+ (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
+#define ECORE_Q_CMDQ_REG_ADDR(pf_q_num)\
+ (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
+
+/* extracts the QM queue number for the specified port and vnic */
+#define ECORE_PF_Q_NUM(q_num, port, vnic)\
+ ((((port) << 1) | (vnic)) * 16 + (q_num))
+
+
+/* Maps the specified queue to the specified COS */
+static inline void ecore_map_q_cos(struct bnx2x_softc *sc, uint32_t q_num, uint32_t new_cos)
+{
+ /* find current COS mapping */
+ uint32_t curr_cos = REG_RD(sc, QM_REG_QVOQIDX_0 + q_num * 4);
+
+ /* check if queue->COS mapping has changed */
+ if (curr_cos != new_cos) {
+ uint32_t num_vnics = ECORE_PORT2_MODE_NUM_VNICS;
+ uint32_t reg_addr, reg_bit_map, vnic;
+
+ /* update parameters for 4port mode */
+ if (INIT_MODE_FLAGS(sc) & MODE_PORT4) {
+ num_vnics = ECORE_PORT4_MODE_NUM_VNICS;
+ if (PORT_ID(sc)) {
+ curr_cos += ECORE_E3B0_PORT1_COS_OFFSET;
+ new_cos += ECORE_E3B0_PORT1_COS_OFFSET;
+ }
+ }
+
+ /* change queue mapping for each VNIC */
+ for (vnic = 0; vnic < num_vnics; vnic++) {
+ uint32_t pf_q_num =
+ ECORE_PF_Q_NUM(q_num, PORT_ID(sc), vnic);
+ uint32_t q_bit_map = 1 << (pf_q_num & 0x1f);
+
+ /* overwrite queue->VOQ mapping */
+ REG_WR(sc, ECORE_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
+
+ /* clear queue bit from current COS bit map */
+ reg_addr = ECORE_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
+ reg_bit_map = REG_RD(sc, reg_addr);
+ REG_WR(sc, reg_addr, reg_bit_map & (~q_bit_map));
+
+ /* set queue bit in new COS bit map */
+ reg_addr = ECORE_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
+ reg_bit_map = REG_RD(sc, reg_addr);
+ REG_WR(sc, reg_addr, reg_bit_map | q_bit_map);
+
+ /* set/clear queue bit in command-queue bit map
+ (E2/E3A0 only, valid COS values are 0/1) */
+ if (!(INIT_MODE_FLAGS(sc) & MODE_E3_B0)) {
+ reg_addr = ECORE_Q_CMDQ_REG_ADDR(pf_q_num);
+ reg_bit_map = REG_RD(sc, reg_addr);
+ q_bit_map = 1 << (2 * (pf_q_num & 0xf));
+ reg_bit_map = new_cos ?
+ (reg_bit_map | q_bit_map) :
+ (reg_bit_map & (~q_bit_map));
+ REG_WR(sc, reg_addr, reg_bit_map);
+ }
+ }
+ }
+}
+
+/* Configures the QM according to the specified per-traffic-type COSes */
+static inline void ecore_dcb_config_qm(struct bnx2x_softc *sc, enum cos_mode mode,
+ struct priority_cos *traffic_cos)
+{
+ ecore_map_q_cos(sc, ECORE_FCOE_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
+ ecore_map_q_cos(sc, ECORE_ISCSI_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+ ecore_map_q_cos(sc, ECORE_ISCSI_ACK_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+ if (mode != STATIC_COS) {
+ /* required only in OVERRIDE_COS mode */
+ ecore_map_q_cos(sc, ECORE_ETH_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ ecore_map_q_cos(sc, ECORE_TOE_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ ecore_map_q_cos(sc, ECORE_TOE_ACK_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ }
+}
+
+
+/*
+ * congestion management port init api description
+ * the api works as follows:
+ * the driver should pass the cmng_init_input struct, the port_init function
+ * will prepare the required internal ram structure which will be passed back
+ * to the driver (cmng_init) that will write it into the internal ram.
+ *
+ * IMPORTANT REMARKS:
+ * 1. the cmng_init struct does not represent the contiguous internal ram
+ * structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
+ * offset in order to write the port sub struct and the
+ * PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
+ * words - don't use memcpy!).
+ * 2. although the cmng_init struct is filled for the maximal vnic number
+ * possible, the driver should only write the valid vnics into the internal
+ * ram according to the appropriate port mode.
+ */
+#define BITS_TO_BYTES(x) ((x)/8)
+
+/* CMNG constants, as derived from system spec calculations */
+
+/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
+#define DEF_MIN_RATE 100
+
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
+
+/*
+ * number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer
+ */
+#define QM_ARB_BYTES 160000
+
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+
+/*
+ * how many bytes above threshold for
+ * the minimal credit of Min algorithm
+ */
+#define MIN_ABOVE_THRESH 32768
+
+/*
+ * Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair
+ */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+
+/* Memory of fairness algorithm - 2 cycles */
+#define FAIR_MEM 2
+#define SAFC_TIMEOUT_USEC 52
+
+#define SDM_TICKS 4
+
+
+static inline void ecore_init_max(const struct cmng_init_input *input_data,
+ uint32_t r_param, struct cmng_init *ram_data)
+{
+ uint32_t vnic;
+ struct cmng_vnic *vdata = &ram_data->vnic;
+ struct cmng_struct_per_port *pdata = &ram_data->port;
+ /*
+ * rate shaping per-port variables
+ * 100 micro seconds in SDM ticks = 25
+ * since each tick is 4 microSeconds
+ */
+
+ pdata->rs_vars.rs_periodic_timeout =
+ RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
+
+ /* this is the threshold below which no timer arming will occur.
+ * 1.25 coefficient is for the threshold to be a little bigger
+ * then the real time to compensate for timer in-accuracy
+ */
+ pdata->rs_vars.rs_threshold =
+ (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
+
+ /* rate shaping per-vnic variables */
+ for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
+ /* global vnic counter */
+ vdata->vnic_max_rate[vnic].vn_counter.rate =
+ input_data->vnic_max_rate[vnic];
+ /*
+ * maximal Mbps for this vnic
+ * the quota in each timer period - number of bytes
+ * transmitted in this period
+ */
+ vdata->vnic_max_rate[vnic].vn_counter.quota =
+ RS_PERIODIC_TIMEOUT_USEC *
+ (uint32_t)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
+ }
+
+}
+
+static inline void ecore_init_max_per_vn(uint16_t vnic_max_rate,
+ struct rate_shaping_vars_per_vn *ram_data)
+{
+ /* global vnic counter */
+ ram_data->vn_counter.rate = vnic_max_rate;
+
+ /*
+ * maximal Mbps for this vnic
+ * the quota in each timer period - number of bytes
+ * transmitted in this period
+ */
+ ram_data->vn_counter.quota =
+ RS_PERIODIC_TIMEOUT_USEC * (uint32_t)vnic_max_rate / 8;
+}
+
+static inline void ecore_init_min(const struct cmng_init_input *input_data,
+ uint32_t r_param, struct cmng_init *ram_data)
+{
+ uint32_t vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
+ struct cmng_vnic *vdata = &ram_data->vnic;
+ struct cmng_struct_per_port *pdata = &ram_data->port;
+
+ /* this is the resolution of the fairness timer */
+ fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+
+ /*
+ * fairness per-port variables
+ * for 10G it is 1000usec. for 1G it is 10000usec.
+ */
+ tFair = T_FAIR_COEF / input_data->port_rate;
+
+ /* this is the threshold below which we won't arm the timer anymore */
+ pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
+
+ /*
+ * we multiply by 1e3/8 to get bytes/msec. We don't want the credits
+ * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
+ */
+ pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
+
+ /* since each tick is 4 microSeconds */
+ pdata->fair_vars.fairness_timeout =
+ fair_periodic_timeout_usec / SDM_TICKS;
+
+ /* calculate sum of weights */
+ vnicWeightSum = 0;
+
+ for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++)
+ vnicWeightSum += input_data->vnic_min_rate[vnic];
+
+ /* global vnic counter */
+ if (vnicWeightSum > 0) {
+ /* fairness per-vnic variables */
+ for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
+ /*
+ * this is the credit for each period of the fairness
+ * algorithm - number of bytes in T_FAIR (this vnic
+ * share of the port rate)
+ */
+ vdata->vnic_min_rate[vnic].vn_credit_delta =
+ ((uint32_t)(input_data->vnic_min_rate[vnic]) * 100 *
+ (T_FAIR_COEF / (8 * 100 * vnicWeightSum)));
+ if (vdata->vnic_min_rate[vnic].vn_credit_delta <
+ pdata->fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH) {
+ vdata->vnic_min_rate[vnic].vn_credit_delta =
+ pdata->fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH;
+ }
+ }
+ }
+}
+
+static inline void ecore_init_fw_wrr(const struct cmng_init_input *input_data,
+ struct cmng_init *ram_data)
+{
+ uint32_t vnic, cos;
+ uint32_t cosWeightSum = 0;
+ struct cmng_vnic *vdata = &ram_data->vnic;
+ struct cmng_struct_per_port *pdata = &ram_data->port;
+
+ for (cos = 0; cos < MAX_COS_NUMBER; cos++)
+ cosWeightSum += input_data->cos_min_rate[cos];
+
+ if (cosWeightSum > 0) {
+
+ for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
+ /*
+ * Since cos and vnic shouldn't work together the rate
+ * to divide between the coses is the port rate.
+ */
+ uint32_t *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
+ for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
+ /*
+ * this is the credit for each period of
+ * the fairness algorithm - number of bytes
+ * in T_FAIR (this cos share of the vnic rate)
+ */
+ ccd[cos] =
+ ((uint32_t)input_data->cos_min_rate[cos] * 100 *
+ (T_FAIR_COEF / (8 * 100 * cosWeightSum)));
+ if (ccd[cos] < pdata->fair_vars.fair_threshold
+ + MIN_ABOVE_THRESH) {
+ ccd[cos] =
+ pdata->fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH;
+ }
+ }
+ }
+ }
+}
+
+static inline void ecore_init_safc(struct cmng_init *ram_data)
+{
+ /* in microSeconds */
+ ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
+}
+
+/* Congestion management port init */
+static inline void ecore_init_cmng(const struct cmng_init_input *input_data,
+ struct cmng_init *ram_data)
+{
+ uint32_t r_param;
+ ECORE_MEMSET(ram_data, 0,sizeof(struct cmng_init));
+
+ ram_data->port.flags = input_data->flags;
+
+ /*
+ * number of bytes transmitted in a rate of 10Gbps
+ * in one usec = 1.25KB.
+ */
+ r_param = BITS_TO_BYTES(input_data->port_rate);
+ ecore_init_max(input_data, r_param, ram_data);
+ ecore_init_min(input_data, r_param, ram_data);
+ ecore_init_fw_wrr(input_data, ram_data);
+ ecore_init_safc(ram_data);
+}
+
+
+
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+ (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
+#define INITOP_SET 0 /* set the HW directly */
+#define INITOP_CLEAR 1 /* clear the HW directly */
+#define INITOP_INIT 2 /* set the init-value array */
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+struct ilt_line {
+ ecore_dma_addr_t page_mapping;
+ void *page;
+ uint32_t size;
+};
+
+struct ilt_client_info {
+ uint32_t page_size;
+ uint16_t start;
+ uint16_t end;
+ uint16_t client_num;
+ uint16_t flags;
+#define ILT_CLIENT_SKIP_INIT 0x1
+#define ILT_CLIENT_SKIP_MEM 0x2
+};
+
+struct ecore_ilt {
+ uint32_t start_line;
+ struct ilt_line *lines;
+ struct ilt_client_info clients[4];
+#define ILT_CLIENT_CDU 0
+#define ILT_CLIENT_QM 1
+#define ILT_CLIENT_SRC 2
+#define ILT_CLIENT_TM 3
+};
+
+/****************************************************************************
+* SRC configuration
+****************************************************************************/
+struct src_ent {
+ uint8_t opaque[56];
+ uint64_t next;
+};
+
+/****************************************************************************
+* Parity configuration
+****************************************************************************/
+#define BLOCK_PRTY_INFO(block, en_mask, m1h, m2, m3) \
+{ \
+ block##_REG_##block##_PRTY_MASK, \
+ block##_REG_##block##_PRTY_STS_CLR, \
+ en_mask, {m1h, m2, m3}, #block \
+}
+
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1h, m2, m3) \
+{ \
+ block##_REG_##block##_PRTY_MASK_0, \
+ block##_REG_##block##_PRTY_STS_CLR_0, \
+ en_mask, {m1h, m2, m3}, #block"_0" \
+}
+
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1h, m2, m3) \
+{ \
+ block##_REG_##block##_PRTY_MASK_1, \
+ block##_REG_##block##_PRTY_STS_CLR_1, \
+ en_mask, {m1h, m2, m3}, #block"_1" \
+}
+
+static const struct {
+ uint32_t mask_addr;
+ uint32_t sts_clr_addr;
+ uint32_t en_mask; /* Mask to enable parity attentions */
+ struct {
+ uint32_t e1h; /* 57711 */
+ uint32_t e2; /* 57712 */
+ uint32_t e3; /* 578xx */
+ } reg_mask; /* Register mask (all valid bits) */
+ char name[8]; /* Block's longest name is 7 characters long
+ * (name + suffix)
+ */
+} ecore_blocks_parity_data[] = {
+ /* bit 19 masked */
+ /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+ /* bit 5,18,20-31 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+ /* bit 5 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
+ /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
+ /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+
+ /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
+ * want to handle "system kill" flow at the moment.
+ */
+ BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x7ffffff,
+ 0x7ffffff),
+ BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7ff, 0x1ffffff),
+ BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0, 0),
+ BLOCK_PRTY_INFO(NIG, 0xffffffff, 0xffffffff, 0, 0),
+ BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0xff, 0xffff),
+ BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(QM, 0, 0xfff, 0xfff, 0xfff),
+ BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0x3, 0x3),
+ BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
+ {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
+ {0xf, 0xf, 0xf}, "UPB"},
+ {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
+ {0xf, 0xf, 0xf}, "XPB"},
+ BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
+ BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0x3f),
+ BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
+ BLOCK_PRTY_INFO(PBF, 0, 0x3ffff, 0xfffff, 0xfffffff),
+ BLOCK_PRTY_INFO(TM, 0, 0x7f, 0x7f, 0x7f),
+ BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(TCM, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(CCM, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(UCM, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(XCM, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
+ BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(TSEM, 0, 0x1f, 0x3f, 0x3f),
+ BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(USEM, 0, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(CSEM, 0, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(XSEM, 0, 0x1f, 0x3f, 0x3f),
+};
+
+
+/* [28] MCP Latched rom_parity
+ * [29] MCP Latched ump_rx_parity
+ * [30] MCP Latched ump_tx_parity
+ * [31] MCP Latched scpad_parity
+ */
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+/* Below registers control the MCP parity attention output. When
+ * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
+ * enabled, when cleared - disabled.
+ */
+static const uint32_t mcp_attn_ctl_regs[] = {
+ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+ MISC_REG_AEU_ENABLE4_NIG_0,
+ MISC_REG_AEU_ENABLE4_PXP_0,
+ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+ MISC_REG_AEU_ENABLE4_NIG_1,
+ MISC_REG_AEU_ENABLE4_PXP_1
+};
+
+static inline void ecore_set_mcp_parity(struct bnx2x_softc *sc, uint8_t enable)
+{
+ uint32_t i;
+ uint32_t reg_val;
+
+ for (i = 0; i < ARRSIZE(mcp_attn_ctl_regs); i++) {
+ reg_val = REG_RD(sc, mcp_attn_ctl_regs[i]);
+
+ if (enable)
+ reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ else
+ reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+
+ REG_WR(sc, mcp_attn_ctl_regs[i], reg_val);
+ }
+}
+
+static inline uint32_t ecore_parity_reg_mask(struct bnx2x_softc *sc, int idx)
+{
+ if (CHIP_IS_E1H(sc))
+ return ecore_blocks_parity_data[idx].reg_mask.e1h;
+ else if (CHIP_IS_E2(sc))
+ return ecore_blocks_parity_data[idx].reg_mask.e2;
+ else /* CHIP_IS_E3 */
+ return ecore_blocks_parity_data[idx].reg_mask.e3;
+}
+
+static inline void ecore_disable_blocks_parity(struct bnx2x_softc *sc)
+{
+ uint32_t i;
+
+ for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
+ uint32_t dis_mask = ecore_parity_reg_mask(sc, i);
+
+ if (dis_mask) {
+ REG_WR(sc, ecore_blocks_parity_data[i].mask_addr,
+ dis_mask);
+ ECORE_MSG("Setting parity mask "
+ "for %s to\t\t0x%x",
+ ecore_blocks_parity_data[i].name, dis_mask);
+ }
+ }
+
+ /* Disable MCP parity attentions */
+ ecore_set_mcp_parity(sc, FALSE);
+}
+
+/**
+ * Clear the parity error status registers.
+ */
+static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc)
+{
+ uint32_t i;
+ uint32_t reg_val, mcp_aeu_bits =
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
+
+ /* Clear SEM_FAST parities */
+ REG_WR(sc, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(sc, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(sc, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(sc, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+
+ for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
+ uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
+
+ if (reg_mask) {
+ reg_val = REG_RD(sc, ecore_blocks_parity_data[i].
+ sts_clr_addr);
+ if (reg_val & reg_mask)
+ ECORE_MSG("Parity errors in %s: 0x%x",
+ ecore_blocks_parity_data[i].name,
+ reg_val & reg_mask);
+ }
+ }
+
+ /* Check if there were parity attentions in MCP */
+ reg_val = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_MCP);
+ if (reg_val & mcp_aeu_bits)
+ ECORE_MSG("Parity error in MCP: 0x%x",
+ reg_val & mcp_aeu_bits);
+
+ /* Clear parity attentions in MCP:
+ * [7] clears Latched rom_parity
+ * [8] clears Latched ump_rx_parity
+ * [9] clears Latched ump_tx_parity
+ * [10] clears Latched scpad_parity (both ports)
+ */
+ REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
+}
+
+static inline void ecore_enable_blocks_parity(struct bnx2x_softc *sc)
+{
+ uint32_t i;
+
+ for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
+ uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
+
+ if (reg_mask)
+ REG_WR(sc, ecore_blocks_parity_data[i].mask_addr,
+ ecore_blocks_parity_data[i].en_mask & reg_mask);
+ }
+
+ /* Enable MCP parity attentions */
+ ecore_set_mcp_parity(sc, TRUE);
+}
+
+
+#endif /* ECORE_INIT_H */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_init_ops.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_init_ops.h
new file mode 100644
index 00000000..2b003afb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_init_ops.h
@@ -0,0 +1,863 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef ECORE_INIT_OPS_H
+#define ECORE_INIT_OPS_H
+
+static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t *zbuf, int len);
+static void ecore_write_dmae_phys_len(struct bnx2x_softc *sc,
+ ecore_dma_addr_t phys_addr, uint32_t addr,
+ uint32_t len);
+
+static void ecore_init_str_wr(struct bnx2x_softc *sc, uint32_t addr,
+ const uint32_t *data, uint32_t len)
+{
+ uint32_t i;
+
+ for (i = 0; i < len; i++)
+ REG_WR(sc, addr + i*4, data[i]);
+}
+
+static void ecore_write_big_buf(struct bnx2x_softc *sc, uint32_t addr, uint32_t len)
+{
+ if (DMAE_READY(sc))
+ ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len);
+
+ else ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len);
+}
+
+static void ecore_init_fill(struct bnx2x_softc *sc, uint32_t addr, int fill,
+ uint32_t len)
+{
+ uint32_t buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
+ uint32_t buf_len32 = buf_len/4;
+ uint32_t i;
+
+ ECORE_MEMSET(GUNZIP_BUF(sc), (uint8_t)fill, buf_len);
+
+ for (i = 0; i < len; i += buf_len32) {
+ uint32_t cur_len = min(buf_len32, len - i);
+
+ ecore_write_big_buf(sc, addr + i*4, cur_len);
+ }
+}
+
+static void ecore_write_big_buf_wb(struct bnx2x_softc *sc, uint32_t addr, uint32_t len)
+{
+ if (DMAE_READY(sc))
+ ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len);
+
+ else ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len);
+}
+
+static void ecore_init_wr_64(struct bnx2x_softc *sc, uint32_t addr,
+ const uint32_t *data, uint32_t len64)
+{
+ uint32_t buf_len32 = FW_BUF_SIZE/4;
+ uint32_t len = len64*2;
+ uint64_t data64 = 0;
+ uint32_t i;
+
+ /* 64 bit value is in a blob: first low DWORD, then high DWORD */
+ data64 = HILO_U64((*(data + 1)), (*data));
+
+ len64 = min((uint32_t)(FW_BUF_SIZE/8), len64);
+ for (i = 0; i < len64; i++) {
+ uint64_t *pdata = ((uint64_t *)(GUNZIP_BUF(sc))) + i;
+
+ *pdata = data64;
+ }
+
+ for (i = 0; i < len; i += buf_len32) {
+ uint32_t cur_len = min(buf_len32, len - i);
+
+ ecore_write_big_buf_wb(sc, addr + i*4, cur_len);
+ }
+}
+
+/*********************************************************
+ There are different blobs for each PRAM section.
+ In addition, each blob write operation is divided into a few operations
+ in order to decrease the amount of phys. contiguous buffer needed.
+ Thus, when we select a blob the address may be with some offset
+ from the beginning of PRAM section.
+ The same holds for the INT_TABLE sections.
+**********************************************************/
+#define IF_IS_INT_TABLE_ADDR(base, addr) \
+ if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
+
+#define IF_IS_PRAM_ADDR(base, addr) \
+ if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
+
+static const uint8_t *ecore_sel_blob(struct bnx2x_softc *sc, uint32_t addr,
+ const uint8_t *data)
+{
+ IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
+ data = INIT_TSEM_INT_TABLE_DATA(sc);
+ else
+ IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
+ data = INIT_CSEM_INT_TABLE_DATA(sc);
+ else
+ IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
+ data = INIT_USEM_INT_TABLE_DATA(sc);
+ else
+ IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
+ data = INIT_XSEM_INT_TABLE_DATA(sc);
+ else
+ IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
+ data = INIT_TSEM_PRAM_DATA(sc);
+ else
+ IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
+ data = INIT_CSEM_PRAM_DATA(sc);
+ else
+ IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
+ data = INIT_USEM_PRAM_DATA(sc);
+ else
+ IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
+ data = INIT_XSEM_PRAM_DATA(sc);
+
+ return data;
+}
+
+static void ecore_init_wr_wb(struct bnx2x_softc *sc, uint32_t addr,
+ const uint32_t *data, uint32_t len)
+{
+ if (DMAE_READY(sc))
+ VIRT_WR_DMAE_LEN(sc, data, addr, len, 0);
+
+ else ecore_init_str_wr(sc, addr, data, len);
+}
+
+static void ecore_wr_64(struct bnx2x_softc *sc, uint32_t reg, uint32_t val_lo,
+ uint32_t val_hi)
+{
+ uint32_t wb_write[2];
+
+ wb_write[0] = val_lo;
+ wb_write[1] = val_hi;
+ REG_WR_DMAE_LEN(sc, reg, wb_write, 2);
+}
+
+static void ecore_init_wr_zp(struct bnx2x_softc *sc, uint32_t addr, uint32_t len,
+ uint32_t blob_off)
+{
+ const uint8_t *data = NULL;
+ int rc;
+ uint32_t i;
+
+ data = ecore_sel_blob(sc, addr, data) + blob_off*4;
+
+ rc = ecore_gunzip(sc, data, len);
+ if (rc)
+ return;
+
+ /* gunzip_outlen is in dwords */
+ len = GUNZIP_OUTLEN(sc);
+ for (i = 0; i < len; i++)
+ ((uint32_t *)GUNZIP_BUF(sc))[i] = (uint32_t)
+ ECORE_CPU_TO_LE32(((uint32_t *)GUNZIP_BUF(sc))[i]);
+
+ ecore_write_big_buf_wb(sc, addr, len);
+}
+
+static void ecore_init_block(struct bnx2x_softc *sc, uint32_t block, uint32_t stage)
+{
+ uint16_t op_start =
+ INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage,
+ STAGE_START)];
+ uint16_t op_end =
+ INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage,
+ STAGE_END)];
+ const union init_op *op;
+ uint32_t op_idx, op_type, addr, len;
+ const uint32_t *data, *data_base;
+
+ /* If empty block */
+ if (op_start == op_end)
+ return;
+
+ data_base = INIT_DATA(sc);
+
+ for (op_idx = op_start; op_idx < op_end; op_idx++) {
+
+ op = (const union init_op *)&(INIT_OPS(sc)[op_idx]);
+ /* Get generic data */
+ op_type = op->raw.op;
+ addr = op->raw.offset;
+ /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
+ * OP_WR64 (we assume that op_arr_write and op_write have the
+ * same structure).
+ */
+ len = op->arr_wr.data_len;
+ data = data_base + op->arr_wr.data_off;
+
+ switch (op_type) {
+ case OP_RD:
+ REG_RD(sc, addr);
+ break;
+ case OP_WR:
+ REG_WR(sc, addr, op->write.val);
+ break;
+ case OP_SW:
+ ecore_init_str_wr(sc, addr, data, len);
+ break;
+ case OP_WB:
+ ecore_init_wr_wb(sc, addr, data, len);
+ break;
+ case OP_ZR:
+ case OP_WB_ZR:
+ ecore_init_fill(sc, addr, 0, op->zero.len);
+ break;
+ case OP_ZP:
+ ecore_init_wr_zp(sc, addr, len, op->arr_wr.data_off);
+ break;
+ case OP_WR_64:
+ ecore_init_wr_64(sc, addr, data, len);
+ break;
+ case OP_IF_MODE_AND:
+ /* if any of the flags doesn't match, skip the
+ * conditional block.
+ */
+ if ((INIT_MODE_FLAGS(sc) &
+ op->if_mode.mode_bit_map) !=
+ op->if_mode.mode_bit_map)
+ op_idx += op->if_mode.cmd_offset;
+ break;
+ case OP_IF_MODE_OR:
+ /* if all the flags don't match, skip the conditional
+ * block.
+ */
+ if ((INIT_MODE_FLAGS(sc) &
+ op->if_mode.mode_bit_map) == 0)
+ op_idx += op->if_mode.cmd_offset;
+ break;
+ /* the following opcodes are unused at the moment. */
+ case OP_IF_PHASE:
+ case OP_RT:
+ case OP_DELAY:
+ case OP_VERIFY:
+ default:
+ /* Should never get here! */
+
+ break;
+ }
+ }
+}
+
+
+/****************************************************************************
+* PXP Arbiter
+****************************************************************************/
+/*
+ * This code configures the PCI read/write arbiter
+ * which implements a weighted round robin
+ * between the virtual queues in the chip.
+ *
+ * The values were derived for each PCI max payload and max request size.
+ * since max payload and max request size are only known at run time,
+ * this is done as a separate init stage.
+ */
+
+#define NUM_WR_Q 13
+#define NUM_RD_Q 29
+#define MAX_RD_ORD 3
+#define MAX_WR_ORD 2
+
+/* configuration for one arbiter queue */
+struct arb_line {
+ int l;
+ int add;
+ int ubound;
+};
+
+/* derived configuration for each read queue for each max request size */
+static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
+/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
+ { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
+ { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
+ { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
+ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
+};
+
+/* derived configuration for each write queue for each max request size */
+static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
+/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
+ { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
+ { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
+ { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
+ { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
+ { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
+ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
+ { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
+ { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
+/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
+ { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
+ { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
+ { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
+};
+
+/* register addresses for read queues */
+static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
+/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
+ PXP2_REG_RQ_BW_RD_UBOUND0},
+ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+ PXP2_REG_PSWRQ_BW_UB1},
+ {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
+ PXP2_REG_PSWRQ_BW_UB2},
+ {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
+ PXP2_REG_PSWRQ_BW_UB3},
+ {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
+ PXP2_REG_RQ_BW_RD_UBOUND4},
+ {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
+ PXP2_REG_RQ_BW_RD_UBOUND5},
+ {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
+ PXP2_REG_PSWRQ_BW_UB6},
+ {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
+ PXP2_REG_PSWRQ_BW_UB7},
+ {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
+ PXP2_REG_PSWRQ_BW_UB8},
+/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
+ PXP2_REG_PSWRQ_BW_UB9},
+ {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
+ PXP2_REG_PSWRQ_BW_UB10},
+ {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
+ PXP2_REG_PSWRQ_BW_UB11},
+ {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
+ PXP2_REG_RQ_BW_RD_UBOUND12},
+ {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
+ PXP2_REG_RQ_BW_RD_UBOUND13},
+ {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
+ PXP2_REG_RQ_BW_RD_UBOUND14},
+ {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
+ PXP2_REG_RQ_BW_RD_UBOUND15},
+ {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
+ PXP2_REG_RQ_BW_RD_UBOUND16},
+ {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
+ PXP2_REG_RQ_BW_RD_UBOUND17},
+ {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
+ PXP2_REG_RQ_BW_RD_UBOUND18},
+/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
+ PXP2_REG_RQ_BW_RD_UBOUND19},
+ {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
+ PXP2_REG_RQ_BW_RD_UBOUND20},
+ {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
+ PXP2_REG_RQ_BW_RD_UBOUND22},
+ {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
+ PXP2_REG_RQ_BW_RD_UBOUND23},
+ {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
+ PXP2_REG_RQ_BW_RD_UBOUND24},
+ {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
+ PXP2_REG_RQ_BW_RD_UBOUND25},
+ {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
+ PXP2_REG_RQ_BW_RD_UBOUND26},
+ {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
+ PXP2_REG_RQ_BW_RD_UBOUND27},
+ {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
+ PXP2_REG_PSWRQ_BW_UB28}
+};
+
+/* register addresses for write queues */
+static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
+/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+ PXP2_REG_PSWRQ_BW_UB1},
+ {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
+ PXP2_REG_PSWRQ_BW_UB2},
+ {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
+ PXP2_REG_PSWRQ_BW_UB3},
+ {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
+ PXP2_REG_PSWRQ_BW_UB6},
+ {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
+ PXP2_REG_PSWRQ_BW_UB7},
+ {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
+ PXP2_REG_PSWRQ_BW_UB8},
+ {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
+ PXP2_REG_PSWRQ_BW_UB9},
+ {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
+ PXP2_REG_PSWRQ_BW_UB10},
+ {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
+ PXP2_REG_PSWRQ_BW_UB11},
+/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
+ PXP2_REG_PSWRQ_BW_UB28},
+ {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
+ PXP2_REG_RQ_BW_WR_UBOUND29},
+ {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
+ PXP2_REG_RQ_BW_WR_UBOUND30}
+};
+
+static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order,
+ int w_order)
+{
+ uint32_t val, i;
+
+ if (r_order > MAX_RD_ORD) {
+ ECORE_MSG("read order of %d order adjusted to %d",
+ r_order, MAX_RD_ORD);
+ r_order = MAX_RD_ORD;
+ }
+ if (w_order > MAX_WR_ORD) {
+ ECORE_MSG("write order of %d order adjusted to %d",
+ w_order, MAX_WR_ORD);
+ w_order = MAX_WR_ORD;
+ }
+ if (CHIP_REV_IS_FPGA(sc)) {
+ ECORE_MSG("write order adjusted to 1 for FPGA");
+ w_order = 0;
+ }
+ ECORE_MSG("read order %d write order %d", r_order, w_order);
+
+ for (i = 0; i < NUM_RD_Q-1; i++) {
+ REG_WR(sc, read_arb_addr[i].l, read_arb_data[i][r_order].l);
+ REG_WR(sc, read_arb_addr[i].add,
+ read_arb_data[i][r_order].add);
+ REG_WR(sc, read_arb_addr[i].ubound,
+ read_arb_data[i][r_order].ubound);
+ }
+
+ for (i = 0; i < NUM_WR_Q-1; i++) {
+ if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
+ (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
+
+ REG_WR(sc, write_arb_addr[i].l,
+ write_arb_data[i][w_order].l);
+
+ REG_WR(sc, write_arb_addr[i].add,
+ write_arb_data[i][w_order].add);
+
+ REG_WR(sc, write_arb_addr[i].ubound,
+ write_arb_data[i][w_order].ubound);
+ } else {
+
+ val = REG_RD(sc, write_arb_addr[i].l);
+ REG_WR(sc, write_arb_addr[i].l,
+ val | (write_arb_data[i][w_order].l << 10));
+
+ val = REG_RD(sc, write_arb_addr[i].add);
+ REG_WR(sc, write_arb_addr[i].add,
+ val | (write_arb_data[i][w_order].add << 10));
+
+ val = REG_RD(sc, write_arb_addr[i].ubound);
+ REG_WR(sc, write_arb_addr[i].ubound,
+ val | (write_arb_data[i][w_order].ubound << 7));
+ }
+ }
+
+ val = write_arb_data[NUM_WR_Q-1][w_order].add;
+ val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
+ val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
+ REG_WR(sc, PXP2_REG_PSWRQ_BW_RD, val);
+
+ val = read_arb_data[NUM_RD_Q-1][r_order].add;
+ val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
+ val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
+ REG_WR(sc, PXP2_REG_PSWRQ_BW_WR, val);
+
+ REG_WR(sc, PXP2_REG_RQ_WR_MBS0, w_order);
+ REG_WR(sc, PXP2_REG_RQ_WR_MBS1, w_order);
+ REG_WR(sc, PXP2_REG_RQ_RD_MBS0, r_order);
+ REG_WR(sc, PXP2_REG_RQ_RD_MBS1, r_order);
+
+ if (CHIP_IS_E1H(sc) && (r_order == MAX_RD_ORD))
+ REG_WR(sc, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
+
+ if (CHIP_IS_E3(sc))
+ REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
+ else if (CHIP_IS_E2(sc))
+ REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
+ else
+ REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
+
+ /* MPS w_order optimal TH presently TH
+ * 128 0 0 2
+ * 256 1 1 3
+ * >=512 2 2 3
+ */
+ /* DMAE is special */
+ if (!CHIP_IS_E1H(sc)) {
+ /* E2 can use optimal TH */
+ val = w_order;
+ REG_WR(sc, PXP2_REG_WR_DMAE_MPS, val);
+ } else {
+ val = ((w_order == 0) ? 2 : 3);
+ REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2);
+ }
+
+ REG_WR(sc, PXP2_REG_WR_HC_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_USDM_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_QM_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_TM_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_SRC_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_DBG_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_CDU_MPS, val);
+
+ /* Validate number of tags suppoted by device */
+#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
+ val = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST);
+ val &= 0xFF;
+ if (val <= 0x20)
+ REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
+}
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+/*
+ * This codes hides the low level HW interaction for ILT management and
+ * configuration. The API consists of a shadow ILT table which is set by the
+ * driver and a set of routines to use it to configure the HW.
+ *
+ */
+
+/* ILT HW init operations */
+
+/* ILT memory management operations */
+#define ILT_MEMOP_ALLOC 0
+#define ILT_MEMOP_FREE 1
+
+/* the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ILT_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF))
+#define ILT_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44)))
+#define ILT_RANGE(f, l) (((l) << 10) | f)
+
+static int ecore_ilt_line_mem_op(struct bnx2x_softc *sc,
+ struct ilt_line *line, uint32_t size, uint8_t memop, int cli_num, int i)
+{
+#define ECORE_ILT_NAMESIZE 10
+ char str[ECORE_ILT_NAMESIZE];
+
+ if (memop == ILT_MEMOP_FREE) {
+ ECORE_ILT_FREE(line->page, line->page_mapping, line->size);
+ return 0;
+ }
+ snprintf(str, ECORE_ILT_NAMESIZE, "ILT_%d_%d", cli_num, i);
+ ECORE_ILT_ZALLOC(line->page, &line->page_mapping, size, str);
+ if (!line->page)
+ return -1;
+ line->size = size;
+ return 0;
+}
+
+
+static int ecore_ilt_client_mem_op(struct bnx2x_softc *sc, int cli_num,
+ uint8_t memop)
+{
+ int i, rc = 0;
+ struct ecore_ilt *ilt = SC_ILT(sc);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ if (!ilt || !ilt->lines)
+ return -1;
+
+ if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
+ return 0;
+
+ for (i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
+ rc = ecore_ilt_line_mem_op(sc, &ilt->lines[i],
+ ilt_cli->page_size, memop, cli_num, i);
+ }
+ return rc;
+}
+
+static inline int ecore_ilt_mem_op_cnic(struct bnx2x_softc *sc, uint8_t memop)
+{
+ int rc = 0;
+
+ if (CONFIGURE_NIC_MODE(sc))
+ rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop);
+ if (!rc)
+ rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_TM, memop);
+
+ return rc;
+}
+
+static int ecore_ilt_mem_op(struct bnx2x_softc *sc, uint8_t memop)
+{
+ int rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_CDU, memop);
+ if (!rc)
+ rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_QM, memop);
+ if (!rc && CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc))
+ rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop);
+
+ return rc;
+}
+
+static void ecore_ilt_line_wr(struct bnx2x_softc *sc, int abs_idx,
+ ecore_dma_addr_t page_mapping)
+{
+ uint32_t reg;
+
+ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
+
+ ecore_wr_64(sc, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
+}
+
+static void ecore_ilt_line_init_op(struct bnx2x_softc *sc,
+ struct ecore_ilt *ilt, int idx, uint8_t initop)
+{
+ ecore_dma_addr_t null_mapping;
+ int abs_idx = ilt->start_line + idx;
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ ecore_ilt_line_wr(sc, abs_idx, ilt->lines[idx].page_mapping);
+ break;
+ case INITOP_CLEAR:
+ null_mapping = 0;
+ ecore_ilt_line_wr(sc, abs_idx, null_mapping);
+ break;
+ }
+}
+
+static void ecore_ilt_boundry_init_op(struct bnx2x_softc *sc,
+ struct ilt_client_info *ilt_cli,
+ uint32_t ilt_start)
+{
+ uint32_t start_reg = 0;
+ uint32_t end_reg = 0;
+
+ /* The boundary is either SET or INIT,
+ CLEAR => SET and for now SET ~~ INIT */
+
+ /* find the appropriate regs */
+ switch (ilt_cli->client_num) {
+ case ILT_CLIENT_CDU:
+ start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
+ break;
+ case ILT_CLIENT_QM:
+ start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_QM_LAST_ILT;
+ break;
+ case ILT_CLIENT_SRC:
+ start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
+ break;
+ case ILT_CLIENT_TM:
+ start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_TM_LAST_ILT;
+ break;
+ }
+ REG_WR(sc, start_reg, (ilt_start + ilt_cli->start));
+ REG_WR(sc, end_reg, (ilt_start + ilt_cli->end));
+}
+
+static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc,
+ struct ecore_ilt *ilt,
+ struct ilt_client_info *ilt_cli,
+ uint8_t initop)
+{
+ int i;
+
+ if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+ return;
+
+ for (i = ilt_cli->start; i <= ilt_cli->end; i++)
+ ecore_ilt_line_init_op(sc, ilt, i, initop);
+
+ /* init/clear the ILT boundries */
+ ecore_ilt_boundry_init_op(sc, ilt_cli, ilt->start_line);
+}
+
+static void ecore_ilt_client_init_op(struct bnx2x_softc *sc,
+ struct ilt_client_info *ilt_cli, uint8_t initop)
+{
+ struct ecore_ilt *ilt = SC_ILT(sc);
+
+ ecore_ilt_client_init_op_ilt(sc, ilt, ilt_cli, initop);
+}
+
+static void ecore_ilt_client_id_init_op(struct bnx2x_softc *sc,
+ int cli_num, uint8_t initop)
+{
+ struct ecore_ilt *ilt = SC_ILT(sc);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ ecore_ilt_client_init_op(sc, ilt_cli, initop);
+}
+
+static inline void ecore_ilt_init_op_cnic(struct bnx2x_softc *sc, uint8_t initop)
+{
+ if (CONFIGURE_NIC_MODE(sc))
+ ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop);
+ ecore_ilt_client_id_init_op(sc, ILT_CLIENT_TM, initop);
+}
+
+static void ecore_ilt_init_op(struct bnx2x_softc *sc, uint8_t initop)
+{
+ ecore_ilt_client_id_init_op(sc, ILT_CLIENT_CDU, initop);
+ ecore_ilt_client_id_init_op(sc, ILT_CLIENT_QM, initop);
+ if (CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc))
+ ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop);
+}
+
+static void ecore_ilt_init_client_psz(struct bnx2x_softc *sc, int cli_num,
+ uint32_t psz_reg, uint8_t initop)
+{
+ struct ecore_ilt *ilt = SC_ILT(sc);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+ return;
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ REG_WR(sc, psz_reg, ILOG2(ilt_cli->page_size >> 12));
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+}
+
+/*
+ * called during init common stage, ilt clients should be initialized
+ * prioir to calling this function
+ */
+static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop)
+{
+ ecore_ilt_init_client_psz(sc, ILT_CLIENT_CDU,
+ PXP2_REG_RQ_CDU_P_SIZE, initop);
+ ecore_ilt_init_client_psz(sc, ILT_CLIENT_QM,
+ PXP2_REG_RQ_QM_P_SIZE, initop);
+ ecore_ilt_init_client_psz(sc, ILT_CLIENT_SRC,
+ PXP2_REG_RQ_SRC_P_SIZE, initop);
+ ecore_ilt_init_client_psz(sc, ILT_CLIENT_TM,
+ PXP2_REG_RQ_TM_P_SIZE, initop);
+}
+
+/****************************************************************************
+* QM initializations
+****************************************************************************/
+#define QM_QUEUES_PER_FUNC 16
+#define QM_INIT_MIN_CID_COUNT 31
+#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
+
+/* called during init port stage */
+static void ecore_qm_init_cid_count(struct bnx2x_softc *sc, int qm_cid_count,
+ uint8_t initop)
+{
+ int port = SC_PORT(sc);
+
+ if (QM_INIT(qm_cid_count)) {
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ REG_WR(sc, QM_REG_CONNNUM_0 + port*4,
+ qm_cid_count/16 - 1);
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+ }
+}
+
+static void ecore_qm_set_ptr_table(struct bnx2x_softc *sc, int qm_cid_count,
+ uint32_t base_reg, uint32_t reg)
+{
+ int i;
+ uint32_t wb_data[2] = {0, 0};
+ for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
+ REG_WR(sc, base_reg + i*4,
+ qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
+ ecore_init_wr_wb(sc, reg + i*8,
+ wb_data, 2);
+ }
+}
+
+/* called during init common stage */
+static void ecore_qm_init_ptr_table(struct bnx2x_softc *sc, int qm_cid_count,
+ uint8_t initop)
+{
+ if (!QM_INIT(qm_cid_count))
+ return;
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ ecore_qm_set_ptr_table(sc, qm_cid_count,
+ QM_REG_BASEADDR, QM_REG_PTRTBL);
+ if (CHIP_IS_E1H(sc))
+ ecore_qm_set_ptr_table(sc, qm_cid_count,
+ QM_REG_BASEADDR_EXT_A,
+ QM_REG_PTRTBL_EXT_A);
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+}
+
+/****************************************************************************
+* SRC initializations
+****************************************************************************/
+#ifdef ECORE_L5
+/* called during init func stage */
+static void ecore_src_init_t2(struct bnx2x_softc *sc, struct src_ent *t2,
+ ecore_dma_addr_t t2_mapping, int src_cid_count)
+{
+ int i;
+ int port = SC_PORT(sc);
+
+ /* Initialize T2 */
+ for (i = 0; i < src_cid_count-1; i++)
+ t2[i].next = (uint64_t)(t2_mapping +
+ (i+1)*sizeof(struct src_ent));
+
+ /* tell the searcher where the T2 table is */
+ REG_WR(sc, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
+
+ ecore_wr_64(sc, SRC_REG_FIRSTFREE0 + port*16,
+ U64_LO(t2_mapping), U64_HI(t2_mapping));
+
+ ecore_wr_64(sc, SRC_REG_LASTFREE0 + port*16,
+ U64_LO((uint64_t)t2_mapping +
+ (src_cid_count-1) * sizeof(struct src_ent)),
+ U64_HI((uint64_t)t2_mapping +
+ (src_cid_count-1) * sizeof(struct src_ent)));
+}
+#endif
+#endif /* ECORE_INIT_OPS_H */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_mfw_req.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_mfw_req.h
new file mode 100644
index 00000000..fe945048
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_mfw_req.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2014-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef ECORE_MFW_REQ_H
+#define ECORE_MFW_REQ_H
+
+
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
+#define NVM_PATH_MAX 2
+
+/* FCoE capabilities required from the driver */
+struct fcoe_capabilities {
+ uint32_t capability1;
+ /* Maximum number of I/Os per connection */
+ #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff
+ #define FCOE_IOS_PER_CONNECTION_SHIFT 0
+ /* Maximum number of Logins per port */
+ #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000
+ #define FCOE_LOGINS_PER_PORT_SHIFT 16
+
+ uint32_t capability2;
+ /* Maximum number of exchanges */
+ #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff
+ #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0
+ /* Maximum NPIV WWN per port */
+ #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000
+ #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16
+
+ uint32_t capability3;
+ /* Maximum number of targets supported */
+ #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff
+ #define FCOE_TARGETS_SUPPORTED_SHIFT 0
+ /* Maximum number of outstanding commands across all connections */
+ #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000
+ #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16
+
+ uint32_t capability4;
+ #define FCOE_CAPABILITY4_STATEFUL 0x00000001
+ #define FCOE_CAPABILITY4_STATELESS 0x00000002
+ #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004
+};
+
+struct glob_ncsi_oem_data
+{
+ uint32_t driver_version;
+ uint32_t unused[3];
+ struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX];
+};
+
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 2
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+ ETH_STATS_OPCODE,
+ FCOE_STATS_OPCODE,
+ ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN 12
+/* Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+ /* Function's Driver Version. padded to 12 */
+ char version[ETH_STAT_INFO_VERSION_LEN];
+ /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+ uint8_t mac_local[8];
+ uint8_t mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ uint8_t mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ uint32_t mtu_size; /* MTU Size. Note : Negotiated MTU */
+ uint32_t feature_flags; /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
+#define FEATURE_ETH_LSO_MASK 0x02
+#define FEATURE_ETH_BOOTMODE_MASK 0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT 2
+#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK 0x20
+ uint32_t lso_max_size; /* LSO MaxOffloadSize. */
+ uint32_t lso_min_seg_cnt; /* LSO MinSegmentCount. */
+ /* Num Offloaded Connections TCP_IPv4. */
+ uint32_t ipv4_ofld_cnt;
+ /* Num Offloaded Connections TCP_IPv6. */
+ uint32_t ipv6_ofld_cnt;
+ uint32_t promiscuous_mode; /* Promiscuous Mode. non-zero true */
+ uint32_t txq_size; /* TX Descriptors Queue Size */
+ uint32_t rxq_size; /* RX Descriptors Queue Size */
+ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+ uint32_t txq_avg_depth;
+ /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+ uint32_t rxq_avg_depth;
+ /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+ uint32_t iov_offload;
+ /* Number of NetQueue/VMQ Config'd. */
+ uint32_t netq_cnt;
+ uint32_t vf_cnt; /* Num VF assigned to this PF. */
+};
+
+/* Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+ uint8_t version[12]; /* Function's Driver Version. */
+ uint8_t mac_local[8]; /* Locally Admin Addr. */
+ uint8_t mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ uint8_t mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ uint32_t qos_priority;
+ uint32_t txq_size; /* FCoE TX Descriptors Queue Size. */
+ uint32_t rxq_size; /* FCoE RX Descriptors Queue Size. */
+ /* FCoE TX Descriptor Queue Avg Depth. */
+ uint32_t txq_avg_depth;
+ /* FCoE RX Descriptors Queue Avg Depth. */
+ uint32_t rxq_avg_depth;
+ uint32_t rx_frames_lo; /* FCoE RX Frames received. */
+ uint32_t rx_frames_hi; /* FCoE RX Frames received. */
+ uint32_t rx_bytes_lo; /* FCoE RX Bytes received. */
+ uint32_t rx_bytes_hi; /* FCoE RX Bytes received. */
+ uint32_t tx_frames_lo; /* FCoE TX Frames sent. */
+ uint32_t tx_frames_hi; /* FCoE TX Frames sent. */
+ uint32_t tx_bytes_lo; /* FCoE TX Bytes sent. */
+ uint32_t tx_bytes_hi; /* FCoE TX Bytes sent. */
+ uint32_t rx_fcs_errors; /* number of receive packets with FCS errors */
+ uint32_t rx_fc_crc_errors; /* number of FC frames with CRC errors*/
+ uint32_t fip_login_failures; /* number of FCoE/FIP Login failures */
+};
+
+/* Per PCI Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+ uint8_t version[12]; /* Function's Driver Version. */
+ uint8_t mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
+ uint8_t mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ uint32_t qos_priority;
+
+ uint8_t initiator_name[64]; /* iSCSI Boot Initiator Node name. */
+
+ uint8_t ww_port_name[64]; /* iSCSI World wide port name */
+
+ uint8_t boot_target_name[64];/* iSCSI Boot Target Name. */
+
+ uint8_t boot_target_ip[16]; /* iSCSI Boot Target IP. */
+ uint32_t boot_target_portal; /* iSCSI Boot Target Portal. */
+ uint8_t boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
+ uint32_t max_frame_size; /* Max Frame Size. bytes */
+ uint32_t txq_size; /* PDU TX Descriptors Queue Size. */
+ uint32_t rxq_size; /* PDU RX Descriptors Queue Size. */
+
+ uint32_t txq_avg_depth; /*PDU TX Descriptor Queue Avg Depth. */
+ uint32_t rxq_avg_depth; /*PDU RX Descriptors Queue Avg Depth. */
+ uint32_t rx_pdus_lo; /* iSCSI PDUs received. */
+ uint32_t rx_pdus_hi; /* iSCSI PDUs received. */
+
+ uint32_t rx_bytes_lo; /* iSCSI RX Bytes received. */
+ uint32_t rx_bytes_hi; /* iSCSI RX Bytes received. */
+ uint32_t tx_pdus_lo; /* iSCSI PDUs sent. */
+ uint32_t tx_pdus_hi; /* iSCSI PDUs sent. */
+
+ uint32_t tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
+ uint32_t tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
+ uint32_t pcp_prior_map_tbl; /*C-PCP to S-PCP Priority MapTable.
+ 9 nibbles, the position of each nibble
+ represents the C-PCP value, the value
+ of the nibble = S-PCP value.*/
+};
+
+union drv_info_to_mcp {
+ struct eth_stats_info ether_stat;
+ struct fcoe_stats_info fcoe_stat;
+ struct iscsi_stats_info iscsi_stat;
+};
+
+
+#endif /* ECORE_MFW_REQ_H */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_reg.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_reg.h
new file mode 100644
index 00000000..ae8a93bb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_reg.h
@@ -0,0 +1,3642 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2014-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef ECORE_REG_H
+#define ECORE_REG_H
+
+
+#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR \
+ (0x1<<0)
+#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS \
+ (0x1<<2)
+#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU \
+ (0x1<<5)
+#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT \
+ (0x1<<3)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR \
+ (0x1<<4)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND \
+ (0x1<<1)
+#define ATC_REG_ATC_INIT_DONE \
+ 0x1100bcUL
+#define ATC_REG_ATC_INT_STS_CLR \
+ 0x1101c0UL
+#define ATC_REG_ATC_PRTY_MASK \
+ 0x1101d8UL
+#define ATC_REG_ATC_PRTY_STS_CLR \
+ 0x1101d0UL
+#define BRB1_REG_BRB1_INT_MASK \
+ 0x60128UL
+#define BRB1_REG_BRB1_PRTY_MASK \
+ 0x60138UL
+#define BRB1_REG_BRB1_PRTY_STS_CLR \
+ 0x60130UL
+#define BRB1_REG_MAC_GUARANTIED_0 \
+ 0x601e8UL
+#define BRB1_REG_MAC_GUARANTIED_1 \
+ 0x60240UL
+#define BRB1_REG_NUM_OF_FULL_BLOCKS \
+ 0x60090UL
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 \
+ 0x60078UL
+#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 \
+ 0x60068UL
+#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 \
+ 0x60094UL
+#define CCM_REG_CCM_INT_MASK \
+ 0xd01e4UL
+#define CCM_REG_CCM_PRTY_MASK \
+ 0xd01f4UL
+#define CCM_REG_CCM_PRTY_STS_CLR \
+ 0xd01ecUL
+#define CDU_REG_CDU_GLOBAL_PARAMS \
+ 0x101020UL
+#define CDU_REG_CDU_INT_MASK \
+ 0x10103cUL
+#define CDU_REG_CDU_PRTY_MASK \
+ 0x10104cUL
+#define CDU_REG_CDU_PRTY_STS_CLR \
+ 0x101044UL
+#define CFC_REG_AC_INIT_DONE \
+ 0x104078UL
+#define CFC_REG_CAM_INIT_DONE \
+ 0x10407cUL
+#define CFC_REG_CFC_INT_MASK \
+ 0x104108UL
+#define CFC_REG_CFC_INT_STS_CLR \
+ 0x104100UL
+#define CFC_REG_CFC_PRTY_MASK \
+ 0x104118UL
+#define CFC_REG_CFC_PRTY_STS_CLR \
+ 0x104110UL
+#define CFC_REG_DEBUG0 \
+ 0x104050UL
+#define CFC_REG_INIT_REG \
+ 0x10404cUL
+#define CFC_REG_LL_INIT_DONE \
+ 0x104074UL
+#define CFC_REG_NUM_LCIDS_INSIDE_PF \
+ 0x104120UL
+#define CFC_REG_STRONG_ENABLE_PF \
+ 0x104128UL
+#define CFC_REG_WEAK_ENABLE_PF \
+ 0x104124UL
+#define CSDM_REG_CSDM_INT_MASK_0 \
+ 0xc229cUL
+#define CSDM_REG_CSDM_INT_MASK_1 \
+ 0xc22acUL
+#define CSDM_REG_CSDM_PRTY_MASK \
+ 0xc22bcUL
+#define CSDM_REG_CSDM_PRTY_STS_CLR \
+ 0xc22b4UL
+#define CSEM_REG_CSEM_INT_MASK_0 \
+ 0x200110UL
+#define CSEM_REG_CSEM_INT_MASK_1 \
+ 0x200120UL
+#define CSEM_REG_CSEM_PRTY_MASK_0 \
+ 0x200130UL
+#define CSEM_REG_CSEM_PRTY_MASK_1 \
+ 0x200140UL
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0 \
+ 0x200128UL
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1 \
+ 0x200138UL
+#define CSEM_REG_FAST_MEMORY \
+ 0x220000UL
+#define CSEM_REG_INT_TABLE \
+ 0x200400UL
+#define CSEM_REG_PASSIVE_BUFFER \
+ 0x202000UL
+#define CSEM_REG_PRAM \
+ 0x240000UL
+#define CSEM_REG_VFPF_ERR_NUM \
+ 0x200380UL
+#define DBG_REG_DBG_PRTY_MASK \
+ 0xc0a8UL
+#define DBG_REG_DBG_PRTY_STS_CLR \
+ 0xc0a0UL
+#define DMAE_REG_BACKWARD_COMP_EN \
+ 0x10207cUL
+#define DMAE_REG_CMD_MEM \
+ 0x102400UL
+#define DMAE_REG_DMAE_INT_MASK \
+ 0x102054UL
+#define DMAE_REG_DMAE_PRTY_MASK \
+ 0x102064UL
+#define DMAE_REG_DMAE_PRTY_STS_CLR \
+ 0x10205cUL
+#define DMAE_REG_GO_C0 \
+ 0x102080UL
+#define DMAE_REG_GO_C1 \
+ 0x102084UL
+#define DMAE_REG_GO_C10 \
+ 0x102088UL
+#define DMAE_REG_GO_C11 \
+ 0x10208cUL
+#define DMAE_REG_GO_C12 \
+ 0x102090UL
+#define DMAE_REG_GO_C13 \
+ 0x102094UL
+#define DMAE_REG_GO_C14 \
+ 0x102098UL
+#define DMAE_REG_GO_C15 \
+ 0x10209cUL
+#define DMAE_REG_GO_C2 \
+ 0x1020a0UL
+#define DMAE_REG_GO_C3 \
+ 0x1020a4UL
+#define DMAE_REG_GO_C4 \
+ 0x1020a8UL
+#define DMAE_REG_GO_C5 \
+ 0x1020acUL
+#define DMAE_REG_GO_C6 \
+ 0x1020b0UL
+#define DMAE_REG_GO_C7 \
+ 0x1020b4UL
+#define DMAE_REG_GO_C8 \
+ 0x1020b8UL
+#define DMAE_REG_GO_C9 \
+ 0x1020bcUL
+#define DORQ_REG_DORQ_INT_MASK \
+ 0x170180UL
+#define DORQ_REG_DORQ_INT_STS_CLR \
+ 0x170178UL
+#define DORQ_REG_DORQ_PRTY_MASK \
+ 0x170190UL
+#define DORQ_REG_DORQ_PRTY_STS_CLR \
+ 0x170188UL
+#define DORQ_REG_DPM_CID_OFST \
+ 0x170030UL
+#define DORQ_REG_MAX_RVFID_SIZE \
+ 0x1701ecUL
+#define DORQ_REG_NORM_CID_OFST \
+ 0x17002cUL
+#define DORQ_REG_PF_USAGE_CNT \
+ 0x1701d0UL
+#define DORQ_REG_VF_NORM_CID_BASE \
+ 0x1701a0UL
+#define DORQ_REG_VF_NORM_CID_OFST \
+ 0x1701f4UL
+#define DORQ_REG_VF_NORM_CID_WND_SIZE \
+ 0x1701a4UL
+#define DORQ_REG_VF_NORM_MAX_CID_COUNT \
+ 0x1701e4UL
+#define DORQ_REG_VF_NORM_VF_BASE \
+ 0x1701a8UL
+#define DORQ_REG_VF_TYPE_MASK_0 \
+ 0x170218UL
+#define DORQ_REG_VF_TYPE_MAX_MCID_0 \
+ 0x1702d8UL
+#define DORQ_REG_VF_TYPE_MIN_MCID_0 \
+ 0x170298UL
+#define DORQ_REG_VF_TYPE_VALUE_0 \
+ 0x170258UL
+#define DORQ_REG_VF_USAGE_CNT \
+ 0x170320UL
+#define DORQ_REG_VF_USAGE_CT_LIMIT \
+ 0x170340UL
+#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 \
+ (0x1<<4)
+#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 \
+ (0x1<<0)
+#define HC_CONFIG_0_REG_INT_LINE_EN_0 \
+ (0x1<<3)
+#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 \
+ (0x1<<7)
+#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 \
+ (0x1<<2)
+#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 \
+ (0x1<<1)
+#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 \
+ (0x1<<0)
+#define HC_REG_ATTN_MSG0_ADDR_L \
+ 0x108018UL
+#define HC_REG_ATTN_MSG1_ADDR_L \
+ 0x108020UL
+#define HC_REG_COMMAND_REG \
+ 0x108180UL
+#define HC_REG_CONFIG_0 \
+ 0x108000UL
+#define HC_REG_CONFIG_1 \
+ 0x108004UL
+#define HC_REG_HC_PRTY_MASK \
+ 0x1080a0UL
+#define HC_REG_HC_PRTY_STS_CLR \
+ 0x108098UL
+#define HC_REG_INT_MASK \
+ 0x108108UL
+#define HC_REG_LEADING_EDGE_0 \
+ 0x108040UL
+#define HC_REG_MAIN_MEMORY \
+ 0x108800UL
+#define HC_REG_MAIN_MEMORY_SIZE \
+ 152
+#define HC_REG_TRAILING_EDGE_0 \
+ 0x108044UL
+#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN \
+ (0x1<<1)
+#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE \
+ (0x1<<0)
+#define IGU_REG_ATTENTION_ACK_BITS \
+ 0x130108UL
+#define IGU_REG_ATTN_MSG_ADDR_H \
+ 0x13011cUL
+#define IGU_REG_ATTN_MSG_ADDR_L \
+ 0x130120UL
+#define IGU_REG_BLOCK_CONFIGURATION \
+ 0x130000UL
+#define IGU_REG_COMMAND_REG_32LSB_DATA \
+ 0x130124UL
+#define IGU_REG_COMMAND_REG_CTRL \
+ 0x13012cUL
+#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP \
+ 0x130200UL
+#define IGU_REG_IGU_PRTY_MASK \
+ 0x1300a8UL
+#define IGU_REG_IGU_PRTY_STS_CLR \
+ 0x1300a0UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x130134UL
+#define IGU_REG_MAPPING_MEMORY \
+ 0x131000UL
+#define IGU_REG_MAPPING_MEMORY_SIZE \
+ 136
+#define IGU_REG_PBA_STATUS_LSB \
+ 0x130138UL
+#define IGU_REG_PBA_STATUS_MSB \
+ 0x13013cUL
+#define IGU_REG_PCI_PF_MSIX_EN \
+ 0x130144UL
+#define IGU_REG_PCI_PF_MSIX_FUNC_MASK \
+ 0x130148UL
+#define IGU_REG_PCI_PF_MSI_EN \
+ 0x130140UL
+#define IGU_REG_PENDING_BITS_STATUS \
+ 0x130300UL
+#define IGU_REG_PF_CONFIGURATION \
+ 0x130154UL
+#define IGU_REG_PROD_CONS_MEMORY \
+ 0x132000UL
+#define IGU_REG_RESET_MEMORIES \
+ 0x130158UL
+#define IGU_REG_SB_INT_BEFORE_MASK_LSB \
+ 0x13015cUL
+#define IGU_REG_SB_INT_BEFORE_MASK_MSB \
+ 0x130160UL
+#define IGU_REG_SB_MASK_LSB \
+ 0x130164UL
+#define IGU_REG_SB_MASK_MSB \
+ 0x130168UL
+#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT \
+ 0x130800UL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x130104UL
+#define IGU_REG_VF_CONFIGURATION \
+ 0x130170UL
+#define MCP_REG_MCPR_ACCESS_LOCK \
+ 0x8009c
+#define MCP_REG_MCPR_GP_INPUTS \
+ 0x800c0
+#define MCP_REG_MCPR_GP_OENABLE \
+ 0x800c8
+#define MCP_REG_MCPR_GP_OUTPUTS \
+ 0x800c4
+#define MCP_REG_MCPR_IMC_COMMAND \
+ 0x85900
+#define MCP_REG_MCPR_IMC_DATAREG0 \
+ 0x85920
+#define MCP_REG_MCPR_IMC_SLAVE_CONTROL \
+ 0x85904
+#define MCP_REG_MCPR_NVM_ACCESS_ENABLE \
+ 0x86424
+#define MCP_REG_MCPR_NVM_ADDR \
+ 0x8640c
+#define MCP_REG_MCPR_NVM_CFG4 \
+ 0x8642c
+#define MCP_REG_MCPR_NVM_COMMAND \
+ 0x86400
+#define MCP_REG_MCPR_NVM_READ \
+ 0x86410
+#define MCP_REG_MCPR_NVM_SW_ARB \
+ 0x86420
+#define MCP_REG_MCPR_NVM_WRITE \
+ 0x86408
+#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK \
+ (0x1<<1)
+#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK \
+ (0x1<<0)
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 \
+ 0xa42cUL
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 \
+ 0xa438UL
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 \
+ 0xa444UL
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 \
+ 0xa450UL
+#define MISC_REG_AEU_AFTER_INVERT_4_MCP \
+ 0xa458UL
+#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 \
+ 0xa700UL
+#define MISC_REG_AEU_CLR_LATCH_SIGNAL \
+ 0xa45cUL
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 \
+ 0xa06cUL
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 \
+ 0xa07cUL
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 \
+ 0xa08cUL
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 \
+ 0xa10cUL
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 \
+ 0xa11cUL
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 \
+ 0xa12cUL
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 \
+ 0xa078UL
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 \
+ 0xa118UL
+#define MISC_REG_AEU_ENABLE4_NIG_0 \
+ 0xa0f8UL
+#define MISC_REG_AEU_ENABLE4_NIG_1 \
+ 0xa198UL
+#define MISC_REG_AEU_ENABLE4_PXP_0 \
+ 0xa108UL
+#define MISC_REG_AEU_ENABLE4_PXP_1 \
+ 0xa1a8UL
+#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 \
+ 0xa688UL
+#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 \
+ 0xa6b0UL
+#define MISC_REG_AEU_GENERAL_ATTN_0 \
+ 0xa000UL
+#define MISC_REG_AEU_GENERAL_ATTN_1 \
+ 0xa004UL
+#define MISC_REG_AEU_GENERAL_ATTN_10 \
+ 0xa028UL
+#define MISC_REG_AEU_GENERAL_ATTN_11 \
+ 0xa02cUL
+#define MISC_REG_AEU_GENERAL_ATTN_12 \
+ 0xa030UL
+#define MISC_REG_AEU_GENERAL_ATTN_2 \
+ 0xa008UL
+#define MISC_REG_AEU_GENERAL_ATTN_3 \
+ 0xa00cUL
+#define MISC_REG_AEU_GENERAL_ATTN_4 \
+ 0xa010UL
+#define MISC_REG_AEU_GENERAL_ATTN_5 \
+ 0xa014UL
+#define MISC_REG_AEU_GENERAL_ATTN_6 \
+ 0xa018UL
+#define MISC_REG_AEU_GENERAL_ATTN_7 \
+ 0xa01cUL
+#define MISC_REG_AEU_GENERAL_ATTN_8 \
+ 0xa020UL
+#define MISC_REG_AEU_GENERAL_ATTN_9 \
+ 0xa024UL
+#define MISC_REG_AEU_GENERAL_MASK \
+ 0xa61cUL
+#define MISC_REG_AEU_MASK_ATTN_FUNC_0 \
+ 0xa060UL
+#define MISC_REG_AEU_MASK_ATTN_FUNC_1 \
+ 0xa064UL
+#define MISC_REG_BOND_ID \
+ 0xa400UL
+#define MISC_REG_CHIP_NUM \
+ 0xa408UL
+#define MISC_REG_CHIP_REV \
+ 0xa40cUL
+#define MISC_REG_CHIP_TYPE \
+ 0xac60UL
+#define MISC_REG_CHIP_TYPE_57811_MASK \
+ (1<<1)
+#define MISC_REG_CPMU_LP_DR_ENABLE \
+ 0xa858UL
+#define MISC_REG_CPMU_LP_FW_ENABLE_P0 \
+ 0xa84cUL
+#define MISC_REG_CPMU_LP_IDLE_THR_P0 \
+ 0xa8a0UL
+#define MISC_REG_CPMU_LP_MASK_ENT_P0 \
+ 0xa880UL
+#define MISC_REG_CPMU_LP_MASK_EXT_P0 \
+ 0xa888UL
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 \
+ 0xa8b8UL
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P1 \
+ 0xa8bcUL
+#define MISC_REG_DRIVER_CONTROL_1 \
+ 0xa510UL
+#define MISC_REG_DRIVER_CONTROL_7 \
+ 0xa3c8UL
+#define MISC_REG_FOUR_PORT_PATH_SWAP \
+ 0xa75cUL
+#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR \
+ 0xa738UL
+#define MISC_REG_FOUR_PORT_PORT_SWAP \
+ 0xa754UL
+#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR \
+ 0xa734UL
+#define MISC_REG_GENERIC_CR_0 \
+ 0xa460UL
+#define MISC_REG_GENERIC_CR_1 \
+ 0xa464UL
+#define MISC_REG_GENERIC_POR_1 \
+ 0xa474UL
+#define MISC_REG_GEN_PURP_HWG \
+ 0xa9a0UL
+#define MISC_REG_GPIO \
+ 0xa490UL
+#define MISC_REG_GPIO_EVENT_EN \
+ 0xa2bcUL
+#define MISC_REG_GPIO_INT \
+ 0xa494UL
+#define MISC_REG_GRC_RSV_ATTN \
+ 0xa3c0UL
+#define MISC_REG_GRC_TIMEOUT_ATTN \
+ 0xa3c4UL
+#define MISC_REG_LCPLL_E40_PWRDWN \
+ 0xaa74UL
+#define MISC_REG_LCPLL_E40_RESETB_ANA \
+ 0xaa78UL
+#define MISC_REG_LCPLL_E40_RESETB_DIG \
+ 0xaa7cUL
+#define MISC_REG_MISC_INT_MASK \
+ 0xa388UL
+#define MISC_REG_MISC_PRTY_MASK \
+ 0xa398UL
+#define MISC_REG_MISC_PRTY_STS_CLR \
+ 0xa390UL
+#define MISC_REG_PORT4MODE_EN \
+ 0xa750UL
+#define MISC_REG_PORT4MODE_EN_OVWR \
+ 0xa720UL
+#define MISC_REG_RESET_REG_1 \
+ 0xa580UL
+#define MISC_REG_RESET_REG_2 \
+ 0xa590UL
+#define MISC_REG_SHARED_MEM_ADDR \
+ 0xa2b4UL
+#define MISC_REG_SPIO \
+ 0xa4fcUL
+#define MISC_REG_SPIO_EVENT_EN \
+ 0xa2b8UL
+#define MISC_REG_SPIO_INT \
+ 0xa500UL
+#define MISC_REG_TWO_PORT_PATH_SWAP \
+ 0xa758UL
+#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR \
+ 0xa72cUL
+#define MISC_REG_UNPREPARED \
+ 0xa424UL
+#define MISC_REG_WC0_CTRL_PHY_ADDR \
+ 0xa9ccUL
+#define MISC_REG_WC0_RESET \
+ 0xac30UL
+#define MISC_REG_XMAC_CORE_PORT_MODE \
+ 0xa964UL
+#define MISC_REG_XMAC_PHY_PORT_MODE \
+ 0xa960UL
+#define MSTAT_REG_RX_STAT_GR64_LO \
+ 0x200UL
+#define MSTAT_REG_TX_STAT_GTXPOK_LO \
+ 0UL
+#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN \
+ (0x1<<0)
+#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN \
+ (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT \
+ (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS \
+ (0x1<<9)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G \
+ (0x1<<15)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS \
+ (0xf<<18)
+#define NIG_REG_BMAC0_IN_EN \
+ 0x100acUL
+#define NIG_REG_BMAC0_OUT_EN \
+ 0x100e0UL
+#define NIG_REG_BMAC0_PAUSE_OUT_EN \
+ 0x10110UL
+#define NIG_REG_BMAC0_REGS_OUT_EN \
+ 0x100e8UL
+#define NIG_REG_BRB0_PAUSE_IN_EN \
+ 0x100c4UL
+#define NIG_REG_BRB1_PAUSE_IN_EN \
+ 0x100c8UL
+#define NIG_REG_DEBUG_PACKET_LB \
+ 0x10800UL
+#define NIG_REG_EGRESS_DRAIN0_MODE \
+ 0x10060UL
+#define NIG_REG_EGRESS_EMAC0_OUT_EN \
+ 0x10120UL
+#define NIG_REG_EGRESS_EMAC0_PORT \
+ 0x10058UL
+#define NIG_REG_EMAC0_IN_EN \
+ 0x100a4UL
+#define NIG_REG_EMAC0_PAUSE_OUT_EN \
+ 0x10118UL
+#define NIG_REG_EMAC0_STATUS_MISC_MI_INT \
+ 0x10494UL
+#define NIG_REG_INGRESS_BMAC0_MEM \
+ 0x10c00UL
+#define NIG_REG_INGRESS_BMAC1_MEM \
+ 0x11000UL
+#define NIG_REG_INGRESS_EOP_LB_EMPTY \
+ 0x104e0UL
+#define NIG_REG_INGRESS_EOP_LB_FIFO \
+ 0x104e4UL
+#define NIG_REG_LATCH_BC_0 \
+ 0x16210UL
+#define NIG_REG_LATCH_STATUS_0 \
+ 0x18000UL
+#define NIG_REG_LED_10G_P0 \
+ 0x10320UL
+#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 \
+ 0x10318UL
+#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 \
+ 0x10310UL
+#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 \
+ 0x10308UL
+#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 \
+ 0x102f8UL
+#define NIG_REG_LED_CONTROL_TRAFFIC_P0 \
+ 0x10300UL
+#define NIG_REG_LED_MODE_P0 \
+ 0x102f0UL
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 \
+ 0x16070UL
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 \
+ 0x16074UL
+#define NIG_REG_LLFC_ENABLE_0 \
+ 0x16208UL
+#define NIG_REG_LLFC_ENABLE_1 \
+ 0x1620cUL
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 \
+ 0x16058UL
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 \
+ 0x1605cUL
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 \
+ 0x16060UL
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 \
+ 0x16064UL
+#define NIG_REG_LLFC_OUT_EN_0 \
+ 0x160c8UL
+#define NIG_REG_LLFC_OUT_EN_1 \
+ 0x160ccUL
+#define NIG_REG_LLH0_BRB1_DRV_MASK \
+ 0x10244UL
+#define NIG_REG_LLH0_BRB1_DRV_MASK_MF \
+ 0x16048UL
+#define NIG_REG_LLH0_BRB1_NOT_MCP \
+ 0x1025cUL
+#define NIG_REG_LLH0_CLS_TYPE \
+ 0x16080UL
+#define NIG_REG_LLH0_FUNC_EN \
+ 0x160fcUL
+#define NIG_REG_LLH0_FUNC_MEM \
+ 0x16180UL
+#define NIG_REG_LLH0_FUNC_MEM_ENABLE \
+ 0x16140UL
+#define NIG_REG_LLH0_FUNC_VLAN_ID \
+ 0x16100UL
+#define NIG_REG_LLH0_XCM_MASK \
+ 0x10130UL
+#define NIG_REG_LLH1_BRB1_NOT_MCP \
+ 0x102dcUL
+#define NIG_REG_LLH1_CLS_TYPE \
+ 0x16084UL
+#define NIG_REG_LLH1_FUNC_MEM \
+ 0x161c0UL
+#define NIG_REG_LLH1_FUNC_MEM_ENABLE \
+ 0x16160UL
+#define NIG_REG_LLH1_FUNC_MEM_SIZE \
+ 16
+#define NIG_REG_LLH1_MF_MODE \
+ 0x18614UL
+#define NIG_REG_LLH1_XCM_MASK \
+ 0x10134UL
+#define NIG_REG_LLH_E1HOV_MODE \
+ 0x160d8UL
+#define NIG_REG_LLH_MF_MODE \
+ 0x16024UL
+#define NIG_REG_MASK_INTERRUPT_PORT0 \
+ 0x10330UL
+#define NIG_REG_MASK_INTERRUPT_PORT1 \
+ 0x10334UL
+#define NIG_REG_NIG_EMAC0_EN \
+ 0x1003cUL
+#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC \
+ 0x10044UL
+#define NIG_REG_NIG_INT_STS_CLR_0 \
+ 0x103b4UL
+#define NIG_REG_NIG_PRTY_MASK \
+ 0x103dcUL
+#define NIG_REG_NIG_PRTY_MASK_0 \
+ 0x183c8UL
+#define NIG_REG_NIG_PRTY_MASK_1 \
+ 0x183d8UL
+#define NIG_REG_NIG_PRTY_STS_CLR \
+ 0x103d4UL
+#define NIG_REG_NIG_PRTY_STS_CLR_0 \
+ 0x183c0UL
+#define NIG_REG_NIG_PRTY_STS_CLR_1 \
+ 0x183d0UL
+#define NIG_REG_P0_HDRS_AFTER_BASIC \
+ 0x18038UL
+#define NIG_REG_P0_HWPFC_ENABLE \
+ 0x18078UL
+#define NIG_REG_P0_LLH_FUNC_MEM2 \
+ 0x18480UL
+#define NIG_REG_P0_MAC_IN_EN \
+ 0x185acUL
+#define NIG_REG_P0_MAC_OUT_EN \
+ 0x185b0UL
+#define NIG_REG_P0_MAC_PAUSE_OUT_EN \
+ 0x185b4UL
+#define NIG_REG_P0_PKT_PRIORITY_TO_COS \
+ 0x18054UL
+#define NIG_REG_P0_RX_COS0_PRIORITY_MASK \
+ 0x18058UL
+#define NIG_REG_P0_RX_COS1_PRIORITY_MASK \
+ 0x1805cUL
+#define NIG_REG_P0_RX_COS2_PRIORITY_MASK \
+ 0x186b0UL
+#define NIG_REG_P0_RX_COS3_PRIORITY_MASK \
+ 0x186b4UL
+#define NIG_REG_P0_RX_COS4_PRIORITY_MASK \
+ 0x186b8UL
+#define NIG_REG_P0_RX_COS5_PRIORITY_MASK \
+ 0x186bcUL
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP \
+ 0x180f0UL
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB \
+ 0x18688UL
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB \
+ 0x1868cUL
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT \
+ 0x180e8UL
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ \
+ 0x180ecUL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 \
+ 0x1810cUL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 \
+ 0x18110UL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 \
+ 0x18114UL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 \
+ 0x18118UL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 \
+ 0x1811cUL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 \
+ 0x186a0UL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 \
+ 0x186a4UL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 \
+ 0x186a8UL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 \
+ 0x186acUL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 \
+ 0x180f8UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 \
+ 0x180fcUL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 \
+ 0x18100UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 \
+ 0x18104UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 \
+ 0x18108UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 \
+ 0x18690UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 \
+ 0x18694UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 \
+ 0x18698UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 \
+ 0x1869cUL
+#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS \
+ 0x180f4UL
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT \
+ 0x180e4UL
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB \
+ 0x18680UL
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB \
+ 0x18684UL
+#define NIG_REG_P1_HDRS_AFTER_BASIC \
+ 0x1818cUL
+#define NIG_REG_P1_HWPFC_ENABLE \
+ 0x181d0UL
+#define NIG_REG_P1_LLH_FUNC_MEM2 \
+ 0x184c0UL
+#define NIG_REG_P1_MAC_IN_EN \
+ 0x185c0UL
+#define NIG_REG_P1_MAC_OUT_EN \
+ 0x185c4UL
+#define NIG_REG_P1_MAC_PAUSE_OUT_EN \
+ 0x185c8UL
+#define NIG_REG_P1_PKT_PRIORITY_TO_COS \
+ 0x181a8UL
+#define NIG_REG_P1_RX_COS0_PRIORITY_MASK \
+ 0x181acUL
+#define NIG_REG_P1_RX_COS1_PRIORITY_MASK \
+ 0x181b0UL
+#define NIG_REG_P1_RX_COS2_PRIORITY_MASK \
+ 0x186f8UL
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB \
+ 0x186e8UL
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB \
+ 0x186ecUL
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT \
+ 0x18234UL
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ \
+ 0x18238UL
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 \
+ 0x18258UL
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 \
+ 0x1825cUL
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 \
+ 0x18260UL
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 \
+ 0x18264UL
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 \
+ 0x18268UL
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 \
+ 0x186f4UL
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 \
+ 0x18244UL
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 \
+ 0x18248UL
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 \
+ 0x1824cUL
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 \
+ 0x18250UL
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 \
+ 0x18254UL
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 \
+ 0x186f0UL
+#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS \
+ 0x18240UL
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB \
+ 0x186e0UL
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB \
+ 0x186e4UL
+#define NIG_REG_PAUSE_ENABLE_0 \
+ 0x160c0UL
+#define NIG_REG_PAUSE_ENABLE_1 \
+ 0x160c4UL
+#define NIG_REG_PORT_SWAP \
+ 0x10394UL
+#define NIG_REG_PPP_ENABLE_0 \
+ 0x160b0UL
+#define NIG_REG_PPP_ENABLE_1 \
+ 0x160b4UL
+#define NIG_REG_PRS_REQ_IN_EN \
+ 0x100b8UL
+#define NIG_REG_SERDES0_CTRL_MD_DEVAD \
+ 0x10370UL
+#define NIG_REG_SERDES0_CTRL_MD_ST \
+ 0x1036cUL
+#define NIG_REG_SERDES0_CTRL_PHY_ADDR \
+ 0x10374UL
+#define NIG_REG_SERDES0_STATUS_LINK_STATUS \
+ 0x10578UL
+#define NIG_REG_STAT0_BRB_DISCARD \
+ 0x105f0UL
+#define NIG_REG_STAT0_BRB_TRUNCATE \
+ 0x105f8UL
+#define NIG_REG_STAT0_EGRESS_MAC_PKT0 \
+ 0x10750UL
+#define NIG_REG_STAT0_EGRESS_MAC_PKT1 \
+ 0x10760UL
+#define NIG_REG_STAT1_BRB_DISCARD \
+ 0x10628UL
+#define NIG_REG_STAT1_EGRESS_MAC_PKT0 \
+ 0x107a0UL
+#define NIG_REG_STAT1_EGRESS_MAC_PKT1 \
+ 0x107b0UL
+#define NIG_REG_STAT2_BRB_OCTET \
+ 0x107e0UL
+#define NIG_REG_STATUS_INTERRUPT_PORT0 \
+ 0x10328UL
+#define NIG_REG_STRAP_OVERRIDE \
+ 0x10398UL
+#define NIG_REG_XCM0_OUT_EN \
+ 0x100f0UL
+#define NIG_REG_XCM1_OUT_EN \
+ 0x100f4UL
+#define NIG_REG_XGXS0_CTRL_MD_DEVAD \
+ 0x1033cUL
+#define NIG_REG_XGXS0_CTRL_MD_ST \
+ 0x10338UL
+#define NIG_REG_XGXS0_CTRL_PHY_ADDR \
+ 0x10340UL
+#define NIG_REG_XGXS0_STATUS_LINK10G \
+ 0x10680UL
+#define NIG_REG_XGXS0_STATUS_LINK_STATUS \
+ 0x10684UL
+#define NIG_REG_XGXS_LANE_SEL_P0 \
+ 0x102e8UL
+#define NIG_REG_XGXS_SERDES0_MODE_SEL \
+ 0x102e0UL
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT \
+ (0x1<<0)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS \
+ (0x1<<9)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G \
+ (0x1<<15)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS \
+ (0xf<<18)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE \
+ 18
+#define PBF_REG_COS0_UPPER_BOUND \
+ 0x15c05cUL
+#define PBF_REG_COS0_UPPER_BOUND_P0 \
+ 0x15c2ccUL
+#define PBF_REG_COS0_UPPER_BOUND_P1 \
+ 0x15c2e4UL
+#define PBF_REG_COS0_WEIGHT \
+ 0x15c054UL
+#define PBF_REG_COS0_WEIGHT_P0 \
+ 0x15c2a8UL
+#define PBF_REG_COS0_WEIGHT_P1 \
+ 0x15c2c0UL
+#define PBF_REG_COS1_UPPER_BOUND \
+ 0x15c060UL
+#define PBF_REG_COS1_WEIGHT \
+ 0x15c058UL
+#define PBF_REG_COS1_WEIGHT_P0 \
+ 0x15c2acUL
+#define PBF_REG_COS1_WEIGHT_P1 \
+ 0x15c2c4UL
+#define PBF_REG_COS2_WEIGHT_P0 \
+ 0x15c2b0UL
+#define PBF_REG_COS2_WEIGHT_P1 \
+ 0x15c2c8UL
+#define PBF_REG_COS3_WEIGHT_P0 \
+ 0x15c2b4UL
+#define PBF_REG_COS4_WEIGHT_P0 \
+ 0x15c2b8UL
+#define PBF_REG_COS5_WEIGHT_P0 \
+ 0x15c2bcUL
+#define PBF_REG_CREDIT_LB_Q \
+ 0x140338UL
+#define PBF_REG_CREDIT_Q0 \
+ 0x14033cUL
+#define PBF_REG_CREDIT_Q1 \
+ 0x140340UL
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 \
+ 0x14005cUL
+#define PBF_REG_DISABLE_PF \
+ 0x1402e8UL
+#define PBF_REG_DISABLE_VF \
+ 0x1402ecUL
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 \
+ 0x15c288UL
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 \
+ 0x15c28cUL
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 \
+ 0x15c278UL
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 \
+ 0x15c27cUL
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 \
+ 0x15c280UL
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 \
+ 0x15c284UL
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 \
+ 0x15c2a0UL
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 \
+ 0x15c2a4UL
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 \
+ 0x15c270UL
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 \
+ 0x15c274UL
+#define PBF_REG_ETS_ENABLED \
+ 0x15c050UL
+#define PBF_REG_HDRS_AFTER_BASIC \
+ 0x15c0a8UL
+#define PBF_REG_HDRS_AFTER_TAG_0 \
+ 0x15c0b8UL
+#define PBF_REG_HIGH_PRIORITY_COS_NUM \
+ 0x15c04cUL
+#define PBF_REG_INIT_CRD_LB_Q \
+ 0x15c248UL
+#define PBF_REG_INIT_CRD_Q0 \
+ 0x15c230UL
+#define PBF_REG_INIT_CRD_Q1 \
+ 0x15c234UL
+#define PBF_REG_INIT_P0 \
+ 0x140004UL
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q \
+ 0x140354UL
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 \
+ 0x140358UL
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 \
+ 0x14035cUL
+#define PBF_REG_MUST_HAVE_HDRS \
+ 0x15c0c4UL
+#define PBF_REG_NUM_STRICT_ARB_SLOTS \
+ 0x15c064UL
+#define PBF_REG_P0_ARB_THRSH \
+ 0x1400e4UL
+#define PBF_REG_P0_CREDIT \
+ 0x140200UL
+#define PBF_REG_P0_INIT_CRD \
+ 0x1400d0UL
+#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT \
+ 0x140308UL
+#define PBF_REG_P0_PAUSE_ENABLE \
+ 0x140014UL
+#define PBF_REG_P0_TQ_LINES_FREED_CNT \
+ 0x1402f0UL
+#define PBF_REG_P0_TQ_OCCUPANCY \
+ 0x1402fcUL
+#define PBF_REG_P1_CREDIT \
+ 0x140208UL
+#define PBF_REG_P1_INIT_CRD \
+ 0x1400d4UL
+#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT \
+ 0x14030cUL
+#define PBF_REG_P1_TQ_LINES_FREED_CNT \
+ 0x1402f4UL
+#define PBF_REG_P1_TQ_OCCUPANCY \
+ 0x140300UL
+#define PBF_REG_P4_CREDIT \
+ 0x140210UL
+#define PBF_REG_P4_INIT_CRD \
+ 0x1400e0UL
+#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT \
+ 0x140310UL
+#define PBF_REG_P4_TQ_LINES_FREED_CNT \
+ 0x1402f8UL
+#define PBF_REG_P4_TQ_OCCUPANCY \
+ 0x140304UL
+#define PBF_REG_PBF_INT_MASK \
+ 0x1401d4UL
+#define PBF_REG_PBF_PRTY_MASK \
+ 0x1401e4UL
+#define PBF_REG_PBF_PRTY_STS_CLR \
+ 0x1401dcUL
+#define PBF_REG_TAG_ETHERTYPE_0 \
+ 0x15c090UL
+#define PBF_REG_TAG_LEN_0 \
+ 0x15c09cUL
+#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q \
+ 0x14038cUL
+#define PBF_REG_TQ_LINES_FREED_CNT_Q0 \
+ 0x140390UL
+#define PBF_REG_TQ_LINES_FREED_CNT_Q1 \
+ 0x140394UL
+#define PBF_REG_TQ_OCCUPANCY_LB_Q \
+ 0x1403a8UL
+#define PBF_REG_TQ_OCCUPANCY_Q0 \
+ 0x1403acUL
+#define PBF_REG_TQ_OCCUPANCY_Q1 \
+ 0x1403b0UL
+#define PB_REG_PB_INT_MASK \
+ 0x28UL
+#define PB_REG_PB_PRTY_MASK \
+ 0x38UL
+#define PB_REG_PB_PRTY_STS_CLR \
+ 0x30UL
+#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR \
+ (0x1<<0)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW \
+ (0x1<<8)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR \
+ (0x1<<1)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN \
+ (0x1<<6)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN \
+ (0x1<<7)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN \
+ (0x1<<4)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN \
+ (0x1<<3)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN \
+ (0x1<<5)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN \
+ (0x1<<2)
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR \
+ 0x9418UL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
+ 0x9478UL
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR \
+ 0x947cUL
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR \
+ 0x9480UL
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR \
+ 0x9474UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
+ 0x942cUL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+ 0x9430UL
+#define PGLUE_B_REG_INTERNAL_VFID_ENABLE \
+ 0x9438UL
+#define PGLUE_B_REG_PGLUE_B_INT_STS \
+ 0x9298UL
+#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR \
+ 0x929cUL
+#define PGLUE_B_REG_PGLUE_B_PRTY_MASK \
+ 0x92b4UL
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR \
+ 0x92acUL
+#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR \
+ 0x9458UL
+#define PGLUE_B_REG_TAGS_63_32 \
+ 0x9244UL
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR \
+ 0x9470UL
+#define PRS_REG_A_PRSU_20 \
+ 0x40134UL
+#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT \
+ 0x4011cUL
+#define PRS_REG_E1HOV_MODE \
+ 0x401c8UL
+#define PRS_REG_HDRS_AFTER_BASIC \
+ 0x40238UL
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 \
+ 0x40270UL
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 \
+ 0x40290UL
+#define PRS_REG_HDRS_AFTER_TAG_0 \
+ 0x40248UL
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 \
+ 0x40280UL
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 \
+ 0x402a0UL
+#define PRS_REG_MUST_HAVE_HDRS \
+ 0x40254UL
+#define PRS_REG_MUST_HAVE_HDRS_PORT_0 \
+ 0x4028cUL
+#define PRS_REG_MUST_HAVE_HDRS_PORT_1 \
+ 0x402acUL
+#define PRS_REG_NIC_MODE \
+ 0x40138UL
+#define PRS_REG_NUM_OF_PACKETS \
+ 0x40124UL
+#define PRS_REG_PRS_PRTY_MASK \
+ 0x401a4UL
+#define PRS_REG_PRS_PRTY_STS_CLR \
+ 0x4019cUL
+#define PRS_REG_TAG_ETHERTYPE_0 \
+ 0x401d4UL
+#define PRS_REG_TAG_LEN_0 \
+ 0x4022cUL
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT \
+ (0x1<<19)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF \
+ (0x1<<20)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN \
+ (0x1<<22)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED \
+ (0x1<<23)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED \
+ (0x1<<24)
+#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR \
+ (0x1<<7)
+#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR \
+ (0x1<<7)
+#define PXP2_REG_PGL_ADDR_88_F0 \
+ 0x120534UL
+#define PXP2_REG_PGL_ADDR_88_F1 \
+ 0x120544UL
+#define PXP2_REG_PGL_ADDR_8C_F0 \
+ 0x120538UL
+#define PXP2_REG_PGL_ADDR_8C_F1 \
+ 0x120548UL
+#define PXP2_REG_PGL_ADDR_90_F0 \
+ 0x12053cUL
+#define PXP2_REG_PGL_ADDR_90_F1 \
+ 0x12054cUL
+#define PXP2_REG_PGL_ADDR_94_F0 \
+ 0x120540UL
+#define PXP2_REG_PGL_ADDR_94_F1 \
+ 0x120550UL
+#define PXP2_REG_PGL_EXP_ROM2 \
+ 0x120808UL
+#define PXP2_REG_PGL_PRETEND_FUNC_F0 \
+ 0x120674UL
+#define PXP2_REG_PGL_PRETEND_FUNC_F1 \
+ 0x120678UL
+#define PXP2_REG_PGL_TAGS_LIMIT \
+ 0x1205a8UL
+#define PXP2_REG_PSWRQ_BW_ADD1 \
+ 0x1201c0UL
+#define PXP2_REG_PSWRQ_BW_ADD10 \
+ 0x1201e4UL
+#define PXP2_REG_PSWRQ_BW_ADD11 \
+ 0x1201e8UL
+#define PXP2_REG_PSWRQ_BW_ADD2 \
+ 0x1201c4UL
+#define PXP2_REG_PSWRQ_BW_ADD28 \
+ 0x120228UL
+#define PXP2_REG_PSWRQ_BW_ADD3 \
+ 0x1201c8UL
+#define PXP2_REG_PSWRQ_BW_ADD6 \
+ 0x1201d4UL
+#define PXP2_REG_PSWRQ_BW_ADD7 \
+ 0x1201d8UL
+#define PXP2_REG_PSWRQ_BW_ADD8 \
+ 0x1201dcUL
+#define PXP2_REG_PSWRQ_BW_ADD9 \
+ 0x1201e0UL
+#define PXP2_REG_PSWRQ_BW_L1 \
+ 0x1202b0UL
+#define PXP2_REG_PSWRQ_BW_L10 \
+ 0x1202d4UL
+#define PXP2_REG_PSWRQ_BW_L11 \
+ 0x1202d8UL
+#define PXP2_REG_PSWRQ_BW_L2 \
+ 0x1202b4UL
+#define PXP2_REG_PSWRQ_BW_L28 \
+ 0x120318UL
+#define PXP2_REG_PSWRQ_BW_L3 \
+ 0x1202b8UL
+#define PXP2_REG_PSWRQ_BW_L6 \
+ 0x1202c4UL
+#define PXP2_REG_PSWRQ_BW_L7 \
+ 0x1202c8UL
+#define PXP2_REG_PSWRQ_BW_L8 \
+ 0x1202ccUL
+#define PXP2_REG_PSWRQ_BW_L9 \
+ 0x1202d0UL
+#define PXP2_REG_PSWRQ_BW_RD \
+ 0x120324UL
+#define PXP2_REG_PSWRQ_BW_UB1 \
+ 0x120238UL
+#define PXP2_REG_PSWRQ_BW_UB10 \
+ 0x12025cUL
+#define PXP2_REG_PSWRQ_BW_UB11 \
+ 0x120260UL
+#define PXP2_REG_PSWRQ_BW_UB2 \
+ 0x12023cUL
+#define PXP2_REG_PSWRQ_BW_UB28 \
+ 0x1202a0UL
+#define PXP2_REG_PSWRQ_BW_UB3 \
+ 0x120240UL
+#define PXP2_REG_PSWRQ_BW_UB6 \
+ 0x12024cUL
+#define PXP2_REG_PSWRQ_BW_UB7 \
+ 0x120250UL
+#define PXP2_REG_PSWRQ_BW_UB8 \
+ 0x120254UL
+#define PXP2_REG_PSWRQ_BW_UB9 \
+ 0x120258UL
+#define PXP2_REG_PSWRQ_BW_WR \
+ 0x120328UL
+#define PXP2_REG_PSWRQ_CDU0_L2P \
+ 0x120000UL
+#define PXP2_REG_PSWRQ_QM0_L2P \
+ 0x120038UL
+#define PXP2_REG_PSWRQ_SRC0_L2P \
+ 0x120054UL
+#define PXP2_REG_PSWRQ_TM0_L2P \
+ 0x12001cUL
+#define PXP2_REG_PXP2_INT_MASK_0 \
+ 0x120578UL
+#define PXP2_REG_PXP2_INT_MASK_1 \
+ 0x120614UL
+#define PXP2_REG_PXP2_INT_STS_0 \
+ 0x12056cUL
+#define PXP2_REG_PXP2_INT_STS_1 \
+ 0x120608UL
+#define PXP2_REG_PXP2_INT_STS_CLR_0 \
+ 0x120570UL
+#define PXP2_REG_PXP2_PRTY_MASK_0 \
+ 0x120588UL
+#define PXP2_REG_PXP2_PRTY_MASK_1 \
+ 0x120598UL
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0 \
+ 0x120580UL
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1 \
+ 0x120590UL
+#define PXP2_REG_RD_BLK_CNT \
+ 0x120418UL
+#define PXP2_REG_RD_CDURD_SWAP_MODE \
+ 0x120404UL
+#define PXP2_REG_RD_DISABLE_INPUTS \
+ 0x120374UL
+#define PXP2_REG_RD_INIT_DONE \
+ 0x120370UL
+#define PXP2_REG_RD_PBF_SWAP_MODE \
+ 0x1203f4UL
+#define PXP2_REG_RD_PORT_IS_IDLE_0 \
+ 0x12041cUL
+#define PXP2_REG_RD_PORT_IS_IDLE_1 \
+ 0x120420UL
+#define PXP2_REG_RD_QM_SWAP_MODE \
+ 0x1203f8UL
+#define PXP2_REG_RD_SRC_SWAP_MODE \
+ 0x120400UL
+#define PXP2_REG_RD_SR_CNT \
+ 0x120414UL
+#define PXP2_REG_RD_START_INIT \
+ 0x12036cUL
+#define PXP2_REG_RD_TM_SWAP_MODE \
+ 0x1203fcUL
+#define PXP2_REG_RQ_BW_RD_ADD0 \
+ 0x1201bcUL
+#define PXP2_REG_RQ_BW_RD_ADD12 \
+ 0x1201ecUL
+#define PXP2_REG_RQ_BW_RD_ADD13 \
+ 0x1201f0UL
+#define PXP2_REG_RQ_BW_RD_ADD14 \
+ 0x1201f4UL
+#define PXP2_REG_RQ_BW_RD_ADD15 \
+ 0x1201f8UL
+#define PXP2_REG_RQ_BW_RD_ADD16 \
+ 0x1201fcUL
+#define PXP2_REG_RQ_BW_RD_ADD17 \
+ 0x120200UL
+#define PXP2_REG_RQ_BW_RD_ADD18 \
+ 0x120204UL
+#define PXP2_REG_RQ_BW_RD_ADD19 \
+ 0x120208UL
+#define PXP2_REG_RQ_BW_RD_ADD20 \
+ 0x12020cUL
+#define PXP2_REG_RQ_BW_RD_ADD22 \
+ 0x120210UL
+#define PXP2_REG_RQ_BW_RD_ADD23 \
+ 0x120214UL
+#define PXP2_REG_RQ_BW_RD_ADD24 \
+ 0x120218UL
+#define PXP2_REG_RQ_BW_RD_ADD25 \
+ 0x12021cUL
+#define PXP2_REG_RQ_BW_RD_ADD26 \
+ 0x120220UL
+#define PXP2_REG_RQ_BW_RD_ADD27 \
+ 0x120224UL
+#define PXP2_REG_RQ_BW_RD_ADD4 \
+ 0x1201ccUL
+#define PXP2_REG_RQ_BW_RD_ADD5 \
+ 0x1201d0UL
+#define PXP2_REG_RQ_BW_RD_L0 \
+ 0x1202acUL
+#define PXP2_REG_RQ_BW_RD_L12 \
+ 0x1202dcUL
+#define PXP2_REG_RQ_BW_RD_L13 \
+ 0x1202e0UL
+#define PXP2_REG_RQ_BW_RD_L14 \
+ 0x1202e4UL
+#define PXP2_REG_RQ_BW_RD_L15 \
+ 0x1202e8UL
+#define PXP2_REG_RQ_BW_RD_L16 \
+ 0x1202ecUL
+#define PXP2_REG_RQ_BW_RD_L17 \
+ 0x1202f0UL
+#define PXP2_REG_RQ_BW_RD_L18 \
+ 0x1202f4UL
+#define PXP2_REG_RQ_BW_RD_L19 \
+ 0x1202f8UL
+#define PXP2_REG_RQ_BW_RD_L20 \
+ 0x1202fcUL
+#define PXP2_REG_RQ_BW_RD_L22 \
+ 0x120300UL
+#define PXP2_REG_RQ_BW_RD_L23 \
+ 0x120304UL
+#define PXP2_REG_RQ_BW_RD_L24 \
+ 0x120308UL
+#define PXP2_REG_RQ_BW_RD_L25 \
+ 0x12030cUL
+#define PXP2_REG_RQ_BW_RD_L26 \
+ 0x120310UL
+#define PXP2_REG_RQ_BW_RD_L27 \
+ 0x120314UL
+#define PXP2_REG_RQ_BW_RD_L4 \
+ 0x1202bcUL
+#define PXP2_REG_RQ_BW_RD_L5 \
+ 0x1202c0UL
+#define PXP2_REG_RQ_BW_RD_UBOUND0 \
+ 0x120234UL
+#define PXP2_REG_RQ_BW_RD_UBOUND12 \
+ 0x120264UL
+#define PXP2_REG_RQ_BW_RD_UBOUND13 \
+ 0x120268UL
+#define PXP2_REG_RQ_BW_RD_UBOUND14 \
+ 0x12026cUL
+#define PXP2_REG_RQ_BW_RD_UBOUND15 \
+ 0x120270UL
+#define PXP2_REG_RQ_BW_RD_UBOUND16 \
+ 0x120274UL
+#define PXP2_REG_RQ_BW_RD_UBOUND17 \
+ 0x120278UL
+#define PXP2_REG_RQ_BW_RD_UBOUND18 \
+ 0x12027cUL
+#define PXP2_REG_RQ_BW_RD_UBOUND19 \
+ 0x120280UL
+#define PXP2_REG_RQ_BW_RD_UBOUND20 \
+ 0x120284UL
+#define PXP2_REG_RQ_BW_RD_UBOUND22 \
+ 0x120288UL
+#define PXP2_REG_RQ_BW_RD_UBOUND23 \
+ 0x12028cUL
+#define PXP2_REG_RQ_BW_RD_UBOUND24 \
+ 0x120290UL
+#define PXP2_REG_RQ_BW_RD_UBOUND25 \
+ 0x120294UL
+#define PXP2_REG_RQ_BW_RD_UBOUND26 \
+ 0x120298UL
+#define PXP2_REG_RQ_BW_RD_UBOUND27 \
+ 0x12029cUL
+#define PXP2_REG_RQ_BW_RD_UBOUND4 \
+ 0x120244UL
+#define PXP2_REG_RQ_BW_RD_UBOUND5 \
+ 0x120248UL
+#define PXP2_REG_RQ_BW_WR_ADD29 \
+ 0x12022cUL
+#define PXP2_REG_RQ_BW_WR_ADD30 \
+ 0x120230UL
+#define PXP2_REG_RQ_BW_WR_L29 \
+ 0x12031cUL
+#define PXP2_REG_RQ_BW_WR_L30 \
+ 0x120320UL
+#define PXP2_REG_RQ_BW_WR_UBOUND29 \
+ 0x1202a4UL
+#define PXP2_REG_RQ_BW_WR_UBOUND30 \
+ 0x1202a8UL
+#define PXP2_REG_RQ_CDU_ENDIAN_M \
+ 0x1201a0UL
+#define PXP2_REG_RQ_CDU_FIRST_ILT \
+ 0x12061cUL
+#define PXP2_REG_RQ_CDU_LAST_ILT \
+ 0x120620UL
+#define PXP2_REG_RQ_CDU_P_SIZE \
+ 0x120018UL
+#define PXP2_REG_RQ_CFG_DONE \
+ 0x1201b4UL
+#define PXP2_REG_RQ_DBG_ENDIAN_M \
+ 0x1201a4UL
+#define PXP2_REG_RQ_DISABLE_INPUTS \
+ 0x120330UL
+#define PXP2_REG_RQ_DRAM_ALIGN \
+ 0x1205b0UL
+#define PXP2_REG_RQ_DRAM_ALIGN_RD \
+ 0x12092cUL
+#define PXP2_REG_RQ_DRAM_ALIGN_SEL \
+ 0x120930UL
+#define PXP2_REG_RQ_HC_ENDIAN_M \
+ 0x1201a8UL
+#define PXP2_REG_RQ_ONCHIP_AT \
+ 0x122000UL
+#define PXP2_REG_RQ_ONCHIP_AT_B0 \
+ 0x128000UL
+#define PXP2_REG_RQ_PDR_LIMIT \
+ 0x12033cUL
+#define PXP2_REG_RQ_QM_ENDIAN_M \
+ 0x120194UL
+#define PXP2_REG_RQ_QM_FIRST_ILT \
+ 0x120634UL
+#define PXP2_REG_RQ_QM_LAST_ILT \
+ 0x120638UL
+#define PXP2_REG_RQ_QM_P_SIZE \
+ 0x120050UL
+#define PXP2_REG_RQ_RBC_DONE \
+ 0x1201b0UL
+#define PXP2_REG_RQ_RD_MBS0 \
+ 0x120160UL
+#define PXP2_REG_RQ_RD_MBS1 \
+ 0x120168UL
+#define PXP2_REG_RQ_SRC_ENDIAN_M \
+ 0x12019cUL
+#define PXP2_REG_RQ_SRC_FIRST_ILT \
+ 0x12063cUL
+#define PXP2_REG_RQ_SRC_LAST_ILT \
+ 0x120640UL
+#define PXP2_REG_RQ_SRC_P_SIZE \
+ 0x12006cUL
+#define PXP2_REG_RQ_TM_ENDIAN_M \
+ 0x120198UL
+#define PXP2_REG_RQ_TM_FIRST_ILT \
+ 0x120644UL
+#define PXP2_REG_RQ_TM_LAST_ILT \
+ 0x120648UL
+#define PXP2_REG_RQ_TM_P_SIZE \
+ 0x120034UL
+#define PXP2_REG_RQ_WR_MBS0 \
+ 0x12015cUL
+#define PXP2_REG_RQ_WR_MBS1 \
+ 0x120164UL
+#define PXP2_REG_WR_CDU_MPS \
+ 0x1205f0UL
+#define PXP2_REG_WR_CSDM_MPS \
+ 0x1205d0UL
+#define PXP2_REG_WR_DBG_MPS \
+ 0x1205e8UL
+#define PXP2_REG_WR_DMAE_MPS \
+ 0x1205ecUL
+#define PXP2_REG_WR_HC_MPS \
+ 0x1205c8UL
+#define PXP2_REG_WR_QM_MPS \
+ 0x1205dcUL
+#define PXP2_REG_WR_SRC_MPS \
+ 0x1205e4UL
+#define PXP2_REG_WR_TM_MPS \
+ 0x1205e0UL
+#define PXP2_REG_WR_TSDM_MPS \
+ 0x1205d4UL
+#define PXP2_REG_WR_USDMDP_TH \
+ 0x120348UL
+#define PXP2_REG_WR_USDM_MPS \
+ 0x1205ccUL
+#define PXP2_REG_WR_XSDM_MPS \
+ 0x1205d8UL
+#define PXP_REG_HST_DISCARD_DOORBELLS \
+ 0x1030a4UL
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES \
+ 0x1030a8UL
+#define PXP_REG_HST_ZONE_PERMISSION_TABLE \
+ 0x103400UL
+#define PXP_REG_PXP_INT_MASK_0 \
+ 0x103074UL
+#define PXP_REG_PXP_INT_MASK_1 \
+ 0x103084UL
+#define PXP_REG_PXP_INT_STS_CLR_0 \
+ 0x10306cUL
+#define PXP_REG_PXP_INT_STS_CLR_1 \
+ 0x10307cUL
+#define PXP_REG_PXP_PRTY_MASK \
+ 0x103094UL
+#define PXP_REG_PXP_PRTY_STS_CLR \
+ 0x10308cUL
+#define QM_REG_BASEADDR \
+ 0x168900UL
+#define QM_REG_BASEADDR_EXT_A \
+ 0x16e100UL
+#define QM_REG_BYTECRDCMDQ_0 \
+ 0x16e6e8UL
+#define QM_REG_CONNNUM_0 \
+ 0x168020UL
+#define QM_REG_PF_EN \
+ 0x16e70cUL
+#define QM_REG_PF_USG_CNT_0 \
+ 0x16e040UL
+#define QM_REG_PTRTBL \
+ 0x168a00UL
+#define QM_REG_PTRTBL_EXT_A \
+ 0x16e200UL
+#define QM_REG_QM_INT_MASK \
+ 0x168444UL
+#define QM_REG_QM_PRTY_MASK \
+ 0x168454UL
+#define QM_REG_QM_PRTY_STS_CLR \
+ 0x16844cUL
+#define QM_REG_QVOQIDX_0 \
+ 0x1680f4UL
+#define QM_REG_SOFT_RESET \
+ 0x168428UL
+#define QM_REG_VOQQMASK_0_LSB \
+ 0x168240UL
+#define SEM_FAST_REG_PARITY_RST \
+ 0x18840UL
+#define SRC_REG_COUNTFREE0 \
+ 0x40500UL
+#define SRC_REG_FIRSTFREE0 \
+ 0x40510UL
+#define SRC_REG_KEYSEARCH_0 \
+ 0x40458UL
+#define SRC_REG_KEYSEARCH_1 \
+ 0x4045cUL
+#define SRC_REG_KEYSEARCH_2 \
+ 0x40460UL
+#define SRC_REG_KEYSEARCH_3 \
+ 0x40464UL
+#define SRC_REG_KEYSEARCH_4 \
+ 0x40468UL
+#define SRC_REG_KEYSEARCH_5 \
+ 0x4046cUL
+#define SRC_REG_KEYSEARCH_6 \
+ 0x40470UL
+#define SRC_REG_KEYSEARCH_7 \
+ 0x40474UL
+#define SRC_REG_KEYSEARCH_8 \
+ 0x40478UL
+#define SRC_REG_KEYSEARCH_9 \
+ 0x4047cUL
+#define SRC_REG_LASTFREE0 \
+ 0x40530UL
+#define SRC_REG_NUMBER_HASH_BITS0 \
+ 0x40400UL
+#define SRC_REG_SOFT_RST \
+ 0x4049cUL
+#define SRC_REG_SRC_PRTY_MASK \
+ 0x404c8UL
+#define SRC_REG_SRC_PRTY_STS_CLR \
+ 0x404c0UL
+#define TCM_REG_PRS_IFEN \
+ 0x50020UL
+#define TCM_REG_TCM_INT_MASK \
+ 0x501dcUL
+#define TCM_REG_TCM_PRTY_MASK \
+ 0x501ecUL
+#define TCM_REG_TCM_PRTY_STS_CLR \
+ 0x501e4UL
+#define TM_REG_EN_LINEAR0_TIMER \
+ 0x164014UL
+#define TM_REG_LIN0_MAX_ACTIVE_CID \
+ 0x164048UL
+#define TM_REG_LIN0_NUM_SCANS \
+ 0x1640a0UL
+#define TM_REG_LIN0_SCAN_ON \
+ 0x1640d0UL
+#define TM_REG_LIN0_SCAN_TIME \
+ 0x16403cUL
+#define TM_REG_LIN0_VNIC_UC \
+ 0x164128UL
+#define TM_REG_TM_INT_MASK \
+ 0x1640fcUL
+#define TM_REG_TM_PRTY_MASK \
+ 0x16410cUL
+#define TM_REG_TM_PRTY_STS_CLR \
+ 0x164104UL
+#define TSDM_REG_ENABLE_IN1 \
+ 0x42238UL
+#define TSDM_REG_TSDM_INT_MASK_0 \
+ 0x4229cUL
+#define TSDM_REG_TSDM_INT_MASK_1 \
+ 0x422acUL
+#define TSDM_REG_TSDM_PRTY_MASK \
+ 0x422bcUL
+#define TSDM_REG_TSDM_PRTY_STS_CLR \
+ 0x422b4UL
+#define TSEM_REG_FAST_MEMORY \
+ 0x1a0000UL
+#define TSEM_REG_INT_TABLE \
+ 0x180400UL
+#define TSEM_REG_PASSIVE_BUFFER \
+ 0x181000UL
+#define TSEM_REG_PRAM \
+ 0x1c0000UL
+#define TSEM_REG_TSEM_INT_MASK_0 \
+ 0x180100UL
+#define TSEM_REG_TSEM_INT_MASK_1 \
+ 0x180110UL
+#define TSEM_REG_TSEM_PRTY_MASK_0 \
+ 0x180120UL
+#define TSEM_REG_TSEM_PRTY_MASK_1 \
+ 0x180130UL
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0 \
+ 0x180118UL
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1 \
+ 0x180128UL
+#define TSEM_REG_VFPF_ERR_NUM \
+ 0x180380UL
+#define UCM_REG_UCM_INT_MASK \
+ 0xe01d4UL
+#define UCM_REG_UCM_PRTY_MASK \
+ 0xe01e4UL
+#define UCM_REG_UCM_PRTY_STS_CLR \
+ 0xe01dcUL
+#define UMAC_COMMAND_CONFIG_REG_HD_ENA \
+ (0x1<<10)
+#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE \
+ (0x1<<28)
+#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA \
+ (0x1<<15)
+#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK \
+ (0x1<<24)
+#define UMAC_COMMAND_CONFIG_REG_PAD_EN \
+ (0x1<<5)
+#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE \
+ (0x1<<8)
+#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN \
+ (0x1<<4)
+#define UMAC_COMMAND_CONFIG_REG_RX_ENA \
+ (0x1<<1)
+#define UMAC_COMMAND_CONFIG_REG_SW_RESET \
+ (0x1<<13)
+#define UMAC_COMMAND_CONFIG_REG_TX_ENA \
+ (0x1<<0)
+#define UMAC_REG_COMMAND_CONFIG \
+ 0x8UL
+#define UMAC_REG_EEE_WAKE_TIMER \
+ 0x6cUL
+#define UMAC_REG_MAC_ADDR0 \
+ 0xcUL
+#define UMAC_REG_MAC_ADDR1 \
+ 0x10UL
+#define UMAC_REG_MAXFR \
+ 0x14UL
+#define UMAC_REG_UMAC_EEE_CTRL \
+ 0x64UL
+#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN \
+ (0x1<<3)
+#define USDM_REG_USDM_INT_MASK_0 \
+ 0xc42a0UL
+#define USDM_REG_USDM_INT_MASK_1 \
+ 0xc42b0UL
+#define USDM_REG_USDM_PRTY_MASK \
+ 0xc42c0UL
+#define USDM_REG_USDM_PRTY_STS_CLR \
+ 0xc42b8UL
+#define USEM_REG_FAST_MEMORY \
+ 0x320000UL
+#define USEM_REG_INT_TABLE \
+ 0x300400UL
+#define USEM_REG_PASSIVE_BUFFER \
+ 0x302000UL
+#define USEM_REG_PRAM \
+ 0x340000UL
+#define USEM_REG_USEM_INT_MASK_0 \
+ 0x300110UL
+#define USEM_REG_USEM_INT_MASK_1 \
+ 0x300120UL
+#define USEM_REG_USEM_PRTY_MASK_0 \
+ 0x300130UL
+#define USEM_REG_USEM_PRTY_MASK_1 \
+ 0x300140UL
+#define USEM_REG_USEM_PRTY_STS_CLR_0 \
+ 0x300128UL
+#define USEM_REG_USEM_PRTY_STS_CLR_1 \
+ 0x300138UL
+#define USEM_REG_VFPF_ERR_NUM \
+ 0x300380UL
+#define VFC_MEMORIES_RST_REG_CAM_RST \
+ (0x1<<0)
+#define VFC_MEMORIES_RST_REG_RAM_RST \
+ (0x1<<1)
+#define VFC_REG_MEMORIES_RST \
+ 0x1943cUL
+#define XCM_REG_XCM_INT_MASK \
+ 0x202b4UL
+#define XCM_REG_XCM_PRTY_MASK \
+ 0x202c4UL
+#define XCM_REG_XCM_PRTY_STS_CLR \
+ 0x202bcUL
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS \
+ (0x1<<0)
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS \
+ (0x1<<1)
+#define XMAC_CTRL_REG_LINE_LOCAL_LPBK \
+ (0x1<<2)
+#define XMAC_CTRL_REG_RX_EN \
+ (0x1<<1)
+#define XMAC_CTRL_REG_SOFT_RESET \
+ (0x1<<6)
+#define XMAC_CTRL_REG_TX_EN \
+ (0x1<<0)
+#define XMAC_CTRL_REG_XLGMII_ALIGN_ENB \
+ (0x1<<7)
+#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN \
+ (0x1<<18)
+#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN \
+ (0x1<<17)
+#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON \
+ (0x1<<1)
+#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN \
+ (0x1<<0)
+#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN \
+ (0x1<<3)
+#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN \
+ (0x1<<4)
+#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN \
+ (0x1<<5)
+#define XMAC_REG_CLEAR_RX_LSS_STATUS \
+ 0x60UL
+#define XMAC_REG_CTRL \
+ 0UL
+#define XMAC_REG_CTRL_SA_HI \
+ 0x2cUL
+#define XMAC_REG_CTRL_SA_LO \
+ 0x28UL
+#define XMAC_REG_EEE_CTRL \
+ 0xd8UL
+#define XMAC_REG_EEE_TIMERS_HI \
+ 0xe4UL
+#define XMAC_REG_PAUSE_CTRL \
+ 0x68UL
+#define XMAC_REG_PFC_CTRL \
+ 0x70UL
+#define XMAC_REG_PFC_CTRL_HI \
+ 0x74UL
+#define XMAC_REG_RX_LSS_CTRL \
+ 0x50UL
+#define XMAC_REG_RX_LSS_STATUS \
+ 0x58UL
+#define XMAC_REG_RX_MAX_SIZE \
+ 0x40UL
+#define XMAC_REG_TX_CTRL \
+ 0x20UL
+#define XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE \
+ (0x1<<0)
+#define XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE \
+ (0x1<<1)
+#define XSDM_REG_OPERATION_GEN \
+ 0x1664c4UL
+#define XSDM_REG_XSDM_INT_MASK_0 \
+ 0x16629cUL
+#define XSDM_REG_XSDM_INT_MASK_1 \
+ 0x1662acUL
+#define XSDM_REG_XSDM_PRTY_MASK \
+ 0x1662bcUL
+#define XSDM_REG_XSDM_PRTY_STS_CLR \
+ 0x1662b4UL
+#define XSEM_REG_FAST_MEMORY \
+ 0x2a0000UL
+#define XSEM_REG_INT_TABLE \
+ 0x280400UL
+#define XSEM_REG_PASSIVE_BUFFER \
+ 0x282000UL
+#define XSEM_REG_PRAM \
+ 0x2c0000UL
+#define XSEM_REG_VFPF_ERR_NUM \
+ 0x280380UL
+#define XSEM_REG_XSEM_INT_MASK_0 \
+ 0x280110UL
+#define XSEM_REG_XSEM_INT_MASK_1 \
+ 0x280120UL
+#define XSEM_REG_XSEM_PRTY_MASK_0 \
+ 0x280130UL
+#define XSEM_REG_XSEM_PRTY_MASK_1 \
+ 0x280140UL
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0 \
+ 0x280128UL
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1 \
+ 0x280138UL
+#define MCPR_ACCESS_LOCK_LOCK (1L<<31)
+#define MCPR_IMC_COMMAND_ENABLE (1L<<31)
+#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16
+#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28
+#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8
+#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
+#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
+#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
+#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0)
+#define MCPR_NVM_COMMAND_DOIT (1L<<4)
+#define MCPR_NVM_COMMAND_DONE (1L<<3)
+#define MCPR_NVM_COMMAND_FIRST (1L<<7)
+#define MCPR_NVM_COMMAND_LAST (1L<<8)
+#define MCPR_NVM_COMMAND_WR (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9)
+#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1)
+
+
+#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3)
+#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3)
+#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3)
+#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3)
+#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3)
+#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3)
+#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3)
+#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3)
+#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3)
+#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
+#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3)
+#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3)
+#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3)
+#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3)
+#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3)
+#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3)
+#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3)
+#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3)
+#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3)
+
+
+#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
+#define EMAC_LED_100MB_OVERRIDE (1L<<2)
+#define EMAC_LED_10MB_OVERRIDE (1L<<3)
+#define EMAC_LED_OVERRIDE (1L<<0)
+#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26)
+#define EMAC_MDIO_COMM_DATA (0xffffL<<0)
+#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
+#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
+#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
+#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16)
+#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
+#define EMAC_MDIO_STATUS_10MB (1L<<1)
+#define EMAC_MODE_25G_MODE (1L<<5)
+#define EMAC_MODE_HALF_DUPLEX (1L<<1)
+#define EMAC_MODE_PORT_GMII (2L<<2)
+#define EMAC_MODE_PORT_MII (1L<<2)
+#define EMAC_MODE_PORT_MII_10M (3L<<2)
+#define EMAC_MODE_RESET (1L<<0)
+#define EMAC_REG_EMAC_LED 0xc
+#define EMAC_REG_EMAC_MAC_MATCH 0x10
+#define EMAC_REG_EMAC_MDIO_COMM 0xac
+#define EMAC_REG_EMAC_MDIO_MODE 0xb4
+#define EMAC_REG_EMAC_MDIO_STATUS 0xb0
+#define EMAC_REG_EMAC_MODE 0x0
+#define EMAC_REG_EMAC_RX_MODE 0xc8
+#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
+#define EMAC_REG_EMAC_RX_STAT_AC 0x180
+#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4
+#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23
+#define EMAC_REG_EMAC_TX_MODE 0xbc
+#define EMAC_REG_EMAC_TX_STAT_AC 0x280
+#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
+#define EMAC_REG_RX_PFC_MODE 0x320
+#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2)
+#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1)
+#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0)
+#define EMAC_REG_RX_PFC_PARAM 0x324
+#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0
+#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334
+#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0)
+#define EMAC_RX_MODE_FLOW_EN (1L<<2)
+#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
+#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
+#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
+#define EMAC_RX_MODE_RESET (1L<<0)
+#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
+#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
+#define EMAC_TX_MODE_FLOW_EN (1L<<4)
+#define EMAC_TX_MODE_RESET (1L<<0)
+
+
+#define MISC_REGISTERS_GPIO_0 0
+#define MISC_REGISTERS_GPIO_1 1
+#define MISC_REGISTERS_GPIO_2 2
+#define MISC_REGISTERS_GPIO_3 3
+#define MISC_REGISTERS_GPIO_CLR_POS 16
+#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24)
+#define MISC_REGISTERS_GPIO_FLOAT_POS 24
+#define MISC_REGISTERS_GPIO_HIGH 1
+#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2
+#define MISC_REGISTERS_GPIO_INT_CLR_POS 24
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1
+#define MISC_REGISTERS_GPIO_INT_SET_POS 16
+#define MISC_REGISTERS_GPIO_LOW 0
+#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1
+#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0
+#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
+#define MISC_REGISTERS_GPIO_SET_POS 8
+#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_BRB1 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_1_RST_DORQ \
+ (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_1_RST_HC \
+ (0x1<<29)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXP \
+ (0x1<<26)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXPV \
+ (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_RST_QM \
+ (0x1<<17)
+#define MISC_REGISTERS_RESET_REG_1_SET 0x584
+#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
+#define MISC_REGISTERS_RESET_REG_2_MSTAT0 \
+ (0x1<<24)
+#define MISC_REGISTERS_RESET_REG_2_MSTAT1 \
+ (0x1<<25)
+#define MISC_REGISTERS_RESET_REG_2_PGLC \
+ (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_2_RST_ATC \
+ (0x1<<17)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE \
+ (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE \
+ (0x1<<15)
+#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE \
+ (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO \
+ (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR \
+ (0x1<<16)
+#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
+#define MISC_REGISTERS_RESET_REG_2_SET 0x594
+#define MISC_REGISTERS_RESET_REG_2_UMAC0 \
+ (0x1<<20)
+#define MISC_REGISTERS_RESET_REG_2_UMAC1 \
+ (0x1<<21)
+#define MISC_REGISTERS_RESET_REG_2_XMAC \
+ (0x1<<22)
+#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT \
+ (0x1<<23)
+#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4
+#define MISC_SPIO_CLR_POS 16
+#define MISC_SPIO_FLOAT (0xffL<<24)
+#define MISC_SPIO_FLOAT_POS 24
+#define MISC_SPIO_INPUT_HI_Z 2
+#define MISC_SPIO_INT_OLD_SET_POS 16
+#define MISC_SPIO_OUTPUT_HIGH 1
+#define MISC_SPIO_OUTPUT_LOW 0
+#define MISC_SPIO_SET_POS 8
+#define MISC_SPIO_SPIO4 0x10
+#define MISC_SPIO_SPIO5 0x20
+#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_DRV_FLAGS 10
+#define HW_LOCK_RESOURCE_GPIO 1
+#define HW_LOCK_RESOURCE_NVRAM 12
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
+#define HW_LOCK_RESOURCE_RECOVERY_REG 11
+#define HW_LOCK_RESOURCE_RESET 5
+#define HW_LOCK_RESOURCE_SPIO 2
+
+
+#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19)
+#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1UL<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1UL<<31)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23)
+#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21)
+#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16)
+#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10)
+#define HW_PRTY_ASSERT_SET_0 \
+(AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
+#define HW_PRTY_ASSERT_SET_1 \
+(AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
+#define HW_PRTY_ASSERT_SET_2 \
+(AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+#define HW_PRTY_ASSERT_SET_3 \
+(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+#define HW_PRTY_ASSERT_SET_4 \
+(AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
+#define HW_INTERRUT_ASSERT_SET_0 \
+(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
+#define HW_INTERRUT_ASSERT_SET_1 \
+(AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
+#define HW_INTERRUT_ASSERT_SET_2 \
+(AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
+
+
+#define RESERVED_GENERAL_ATTENTION_BIT_0 0
+
+#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0
+#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000
+
+#define RESERVED_GENERAL_ATTENTION_BIT_6 6
+#define RESERVED_GENERAL_ATTENTION_BIT_7 7
+#define RESERVED_GENERAL_ATTENTION_BIT_8 8
+#define RESERVED_GENERAL_ATTENTION_BIT_9 9
+#define RESERVED_GENERAL_ATTENTION_BIT_10 10
+#define RESERVED_GENERAL_ATTENTION_BIT_11 11
+#define RESERVED_GENERAL_ATTENTION_BIT_12 12
+#define RESERVED_GENERAL_ATTENTION_BIT_13 13
+#define RESERVED_GENERAL_ATTENTION_BIT_14 14
+#define RESERVED_GENERAL_ATTENTION_BIT_15 15
+#define RESERVED_GENERAL_ATTENTION_BIT_16 16
+#define RESERVED_GENERAL_ATTENTION_BIT_17 17
+#define RESERVED_GENERAL_ATTENTION_BIT_18 18
+#define RESERVED_GENERAL_ATTENTION_BIT_19 19
+#define RESERVED_GENERAL_ATTENTION_BIT_20 20
+#define RESERVED_GENERAL_ATTENTION_BIT_21 21
+
+/* storm asserts attention bits */
+#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7
+#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8
+#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9
+#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10
+
+/* mcp error attention bit */
+#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11
+
+/*E1H NIG status sync attention mapped to group 4-7*/
+#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12
+#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13
+#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14
+#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15
+#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16
+#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17
+#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18
+#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19
+
+ /* Used For Error Recovery: changing this will require more \
+ changes in code that assume
+ * error recovery uses general attn bit20 ! */
+#define ERROR_RECOVERY_ATTENTION_BIT \
+ RESERVED_GENERAL_ATTENTION_BIT_20
+#define RESERVED_ATTENTION_BIT \
+ RESERVED_GENERAL_ATTENTION_BIT_21
+
+#define LATCHED_ATTN_RBCR 23
+#define LATCHED_ATTN_RBCT 24
+#define LATCHED_ATTN_RBCN 25
+#define LATCHED_ATTN_RBCU 26
+#define LATCHED_ATTN_RBCP 27
+#define LATCHED_ATTN_TIMEOUT_GRC 28
+#define LATCHED_ATTN_RSVD_GRC 29
+#define LATCHED_ATTN_ROM_PARITY_MCP 30
+#define LATCHED_ATTN_UM_RX_PARITY_MCP 31
+#define LATCHED_ATTN_UM_TX_PARITY_MCP 32
+#define LATCHED_ATTN_SCPAD_PARITY_MCP 33
+
+#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32)
+#define GENERAL_ATTEN_OFFSET(atten_name) (1UL << ((94 + atten_name) % 32))
+
+
+/*
+ * This file defines GRC base address for every block.
+ * This file is included by chipsim, asm microcode and cpp microcode.
+ * These values are used in Design.xml on regBase attribute
+ * Use the base with the generated offsets of specific registers.
+ */
+
+#define GRCBASE_PXPCS 0x000000
+#define GRCBASE_PCICONFIG 0x002000
+#define GRCBASE_PCIREG 0x002400
+#define GRCBASE_EMAC0 0x008000
+#define GRCBASE_EMAC1 0x008400
+#define GRCBASE_DBU 0x008800
+#define GRCBASE_PGLUE_B 0x009000
+#define GRCBASE_MISC 0x00A000
+#define GRCBASE_DBG 0x00C000
+#define GRCBASE_NIG 0x010000
+#define GRCBASE_XCM 0x020000
+#define GRCBASE_PRS 0x040000
+#define GRCBASE_SRCH 0x040400
+#define GRCBASE_TSDM 0x042000
+#define GRCBASE_TCM 0x050000
+#define GRCBASE_BRB1 0x060000
+#define GRCBASE_MCP 0x080000
+#define GRCBASE_UPB 0x0C1000
+#define GRCBASE_CSDM 0x0C2000
+#define GRCBASE_USDM 0x0C4000
+#define GRCBASE_CCM 0x0D0000
+#define GRCBASE_UCM 0x0E0000
+#define GRCBASE_CDU 0x101000
+#define GRCBASE_DMAE 0x102000
+#define GRCBASE_PXP 0x103000
+#define GRCBASE_CFC 0x104000
+#define GRCBASE_HC 0x108000
+#define GRCBASE_ATC 0x110000
+#define GRCBASE_PXP2 0x120000
+#define GRCBASE_IGU 0x130000
+#define GRCBASE_PBF 0x140000
+#define GRCBASE_UMAC0 0x160000
+#define GRCBASE_UMAC1 0x160400
+#define GRCBASE_XPB 0x161000
+#define GRCBASE_MSTAT0 0x162000
+#define GRCBASE_MSTAT1 0x162800
+#define GRCBASE_XMAC0 0x163000
+#define GRCBASE_XMAC1 0x163800
+#define GRCBASE_TIMERS 0x164000
+#define GRCBASE_XSDM 0x166000
+#define GRCBASE_QM 0x168000
+#define GRCBASE_QM_4PORT 0x168000
+#define GRCBASE_DQ 0x170000
+#define GRCBASE_TSEM 0x180000
+#define GRCBASE_CSEM 0x200000
+#define GRCBASE_XSEM 0x280000
+#define GRCBASE_XSEM_4PORT 0x280000
+#define GRCBASE_USEM 0x300000
+#define GRCBASE_MCP_A 0x380000
+#define GRCBASE_MISC_AEU GRCBASE_MISC
+#define GRCBASE_Tstorm GRCBASE_TSEM
+#define GRCBASE_Cstorm GRCBASE_CSEM
+#define GRCBASE_Xstorm GRCBASE_XSEM
+#define GRCBASE_Ustorm GRCBASE_USEM
+
+
+/* offset of configuration space in the pci core register */
+#define PCICFG_OFFSET 0x2000
+#define PCICFG_VENDOR_ID_OFFSET 0x00
+#define PCICFG_DEVICE_ID_OFFSET 0x02
+#define PCICFG_COMMAND_OFFSET 0x04
+#define PCICFG_COMMAND_IO_SPACE (1<<0)
+#define PCICFG_COMMAND_MEM_SPACE (1<<1)
+#define PCICFG_COMMAND_BUS_MASTER (1<<2)
+#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
+#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
+#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
+#define PCICFG_COMMAND_PERR_ENA (1<<6)
+#define PCICFG_COMMAND_STEPPING (1<<7)
+#define PCICFG_COMMAND_SERR_ENA (1<<8)
+#define PCICFG_COMMAND_FAST_B2B (1<<9)
+#define PCICFG_COMMAND_INT_DISABLE (1<<10)
+#define PCICFG_COMMAND_RESERVED (0x1f<<11)
+#define PCICFG_STATUS_OFFSET 0x06
+#define PCICFG_REVISION_ID_OFFSET 0x08
+#define PCICFG_REVESION_ID_MASK 0xff
+#define PCICFG_REVESION_ID_ERROR_VAL 0xff
+#define PCICFG_CACHE_LINE_SIZE 0x0c
+#define PCICFG_LATENCY_TIMER 0x0d
+#define PCICFG_HEADER_TYPE 0x0e
+#define PCICFG_HEADER_TYPE_NORMAL 0
+#define PCICFG_HEADER_TYPE_BRIDGE 1
+#define PCICFG_HEADER_TYPE_CARDBUS 2
+#define PCICFG_BAR_1_LOW 0x10
+#define PCICFG_BAR_1_HIGH 0x14
+#define PCICFG_BAR_2_LOW 0x18
+#define PCICFG_BAR_2_HIGH 0x1c
+#define PCICFG_BAR_3_LOW 0x20
+#define PCICFG_BAR_3_HIGH 0x24
+#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
+#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
+#define PCICFG_INT_LINE 0x3c
+#define PCICFG_INT_PIN 0x3d
+#define PCICFG_PM_CAPABILITY 0x48
+#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
+#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
+#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
+#define PCICFG_PM_CAPABILITY_DSI (1<<21)
+#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
+#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
+#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
+#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
+#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
+#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
+#define PCICFG_PM_CSR_OFFSET 0x4c
+#define PCICFG_PM_CSR_STATE (0x3<<0)
+#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
+#define PCICFG_PM_CSR_PME_STATUS (1<<15)
+#define PCICFG_VPD_FLAG_ADDR_OFFSET 0x50
+#define PCICFG_VPD_DATA_OFFSET 0x54
+#define PCICFG_MSI_CAP_ID_OFFSET 0x58
+#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16)
+#define PCICFG_MSI_CONTROL_MCAP (0x7<<17)
+#define PCICFG_MSI_CONTROL_MENA (0x7<<20)
+#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
+#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
+#define PCICFG_MSI_ADDR_LOW_OFFSET 0x5c
+#define PCICFG_MSI_ADDR_HIGH_OFFSET 0x60
+#define PCICFG_MSI_DATA_OFFSET 0x64
+#define PCICFG_GRC_ADDRESS 0x78
+#define PCICFG_GRC_DATA 0x80
+#define PCICFG_ME_REGISTER 0x98
+#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
+#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
+#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
+#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30)
+#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31)
+
+#define PCICFG_DEVICE_CONTROL 0xb4
+#define PCICFG_DEVICE_CONTROL_NP_TRANSACTION_PEND (1<<21)
+#define PCICFG_DEVICE_STATUS 0xb6
+#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0)
+#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1)
+#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2)
+#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3)
+#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4)
+#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5)
+#define PCICFG_LINK_CONTROL 0xbc
+
+
+/* config_2 offset */
+#define GRC_CONFIG_2_SIZE_REG 0x408
+#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
+#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
+#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
+#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
+#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
+#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
+#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
+
+/* config_3 offset */
+#define GRC_CONFIG_3_SIZE_REG 0x40c
+#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
+#define PCI_CONFIG_3_FORCE_PME (1L<<24)
+#define PCI_CONFIG_3_PME_STATUS (1L<<25)
+#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
+#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
+#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
+#define PCI_CONFIG_3_PCI_POWER (1L<<31)
+
+#define GRC_REG_DEVICE_CONTROL 0x4d8
+#define PCIE_SRIOV_DISABLE_IN_PROGRESS \
+ (1 << 29) /*When VF Enable is cleared(after it was previously set),
+ this register will read a value of 1, indicating that all the
+ VFs that belong to this PF should be flushed.
+ Software should clear this bit within 1 second of VF Enable
+ being set by writing a 1 to it, so that VFs are visible to the system again.
+ WC */
+#define PCIE_FLR_IN_PROGRESS \
+ (1 << 27) /*When FLR is initiated, this register will read a \
+ value of 1 indicating that the
+ Function is in FLR state. Func can be brought out of FLR state either by
+ writing 1 to this register (at least 50 ms after FLR was initiated),
+ or it can also be cleared automatically after 55 ms if auto_clear bit
+ in private reg space is set. This bit also exists in VF register space
+ WC */
+
+#define GRC_BAR2_CONFIG 0x4e0
+#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
+
+#define GRC_BAR3_CONFIG 0x4f4
+#define PCI_CONFIG_2_BAR3_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR3_64ENA (1L<<4)
+
+#define PCI_PM_DATA_A 0x410
+#define PCI_PM_DATA_B 0x414
+#define PCI_ID_VAL1 0x434
+#define PCI_ID_VAL2 0x438
+#define PCI_ID_VAL3 0x43c
+#define PCI_ID_VAL3_REVISION_ID_ERROR (0xffL<<24)
+
+
+#define GRC_CONFIG_REG_VF_BAR_REG_1 0x608
+#define GRC_CONFIG_REG_VF_BAR_REG_BAR0_SIZE 0xf
+
+#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
+#define GRC_CR_VF_MSIX_CTRL_VF_MSIX_TBL_SIZE_MASK \
+ 0x3F /*This field resides in VF only and does not exist in PF.
+ This register controls the read value of the MSIX_CONTROL[10:0] register
+ in the VF configuration space. A value of "00000000011" indicates
+ a table size of 4. The value is controlled by IOV_MSIX_TBL_SIZ
+ define in version.v */
+
+#define GRC_CONFIG_REG_PF_INIT_VF 0x624
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK \
+ 0xf /*First VF_NUM for PF is encoded in this register.
+ The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ Software should program these bits based on Total Number of VFs \
+ programmed for each PF.
+ Since registers from 0x000-0x7ff are spilt across functions, each PF will have
+ the same location for the same 4 bits*/
+
+#define PXPCS_TL_CONTROL_5 0x814
+#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
+#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/
+#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/
+#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/
+#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/
+#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/
+#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/
+#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/
+
+
+#define PXPCS_TL_FUNC345_STAT 0x854
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 \
+ (1 << 28) /* Unsupported Request Error Status in function4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4 \
+ (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4 \
+ (1 << 26) /* Malformed TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4 \
+ (1 << 25) /* Receiver Overflow Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4 \
+ (1 << 24) /* Unexpected Completion Status Status in function 4, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4 \
+ (1 << 23) /* Receive UR Statusin function 4. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4 \
+ (1 << 22) /* Completer Timeout Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4 \
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 4, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4 \
+ (1 << 20) /* Poisoned Error Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 \
+ (1 << 18) /* Unsupported Request Error Status in function3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3 \
+ (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3 \
+ (1 << 16) /* Malformed TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3 \
+ (1 << 15) /* Receiver Overflow Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3 \
+ (1 << 14) /* Unexpected Completion Status Status in function 3, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3 \
+ (1 << 13) /* Receive UR Statusin function 3. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3 \
+ (1 << 12) /* Completer Timeout Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3 \
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 3, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3 \
+ (1 << 10) /* Poisoned Error Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2 \
+ (1 << 8) /* Unsupported Request Error Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2 \
+ (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2 \
+ (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2 \
+ (1 << 5) /* Receiver Overflow Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2 \
+ (1 << 4) /* Unexpected Completion Status Status for Function 2, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2 \
+ (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2 \
+ (1 << 2) /* Completer Timeout Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2 \
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 2, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2 \
+ (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define PXPCS_TL_FUNC678_STAT 0x85C
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 \
+ (1 << 28) /* Unsupported Request Error Status in function7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7 \
+ (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7 \
+ (1 << 26) /* Malformed TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7 \
+ (1 << 25) /* Receiver Overflow Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7 \
+ (1 << 24) /* Unexpected Completion Status Status in function 7, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7 \
+ (1 << 23) /* Receive UR Statusin function 7. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7 \
+ (1 << 22) /* Completer Timeout Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7 \
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 7, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7 \
+ (1 << 20) /* Poisoned Error Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 \
+ (1 << 18) /* Unsupported Request Error Status in function6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6 \
+ (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6 \
+ (1 << 16) /* Malformed TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6 \
+ (1 << 15) /* Receiver Overflow Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6 \
+ (1 << 14) /* Unexpected Completion Status Status in function 6, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6 \
+ (1 << 13) /* Receive UR Statusin function 6. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6 \
+ (1 << 12) /* Completer Timeout Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6 \
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 6, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6 \
+ (1 << 10) /* Poisoned Error Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5 \
+ (1 << 8) /* Unsupported Request Error Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5 \
+ (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5 \
+ (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5 \
+ (1 << 5) /* Receiver Overflow Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5 \
+ (1 << 4) /* Unexpected Completion Status Status for Function 5, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5 \
+ (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5 \
+ (1 << 2) /* Completer Timeout Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5 \
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 5, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5 \
+ (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define BAR_USTRORM_INTMEM 0x400000
+#define BAR_CSTRORM_INTMEM 0x410000
+#define BAR_XSTRORM_INTMEM 0x420000
+#define BAR_TSTRORM_INTMEM 0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM 0x440000
+
+#define BAR_DOORBELL_OFFSET 0x800000
+
+#define BAR_ME_REGISTER 0x450000
+#define ME_REG_PF_NUM_SHIFT 0
+#define ME_REG_PF_NUM \
+ (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
+#define ME_REG_VF_VALID (1<<8)
+#define ME_REG_VF_NUM_SHIFT 9
+#define ME_REG_VF_NUM_MASK (0x3f<<ME_REG_VF_NUM_SHIFT)
+#define VF_ID(x) ((x & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT)
+#define ME_REG_VF_ERR (0x1<<3)
+#define ME_REG_ABS_PF_NUM_SHIFT 16
+#define ME_REG_ABS_PF_NUM \
+ (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+
+
+#define PXP_VF_ADRR_NUM_QUEUES 136
+#define PXP_ADDR_QUEUE_SIZE 32
+#define PXP_ADDR_REG_SIZE 512
+
+
+#define PXP_VF_ADDR_IGU_START 0
+#define PXP_VF_ADDR_IGU_SIZE (0x3000)
+#define PXP_VF_ADDR_IGU_END \
+ ((PXP_VF_ADDR_IGU_START) + (PXP_VF_ADDR_IGU_SIZE) - 1)
+
+#define PXP_VF_ADDR_USDM_QUEUES_START 0x3000
+#define PXP_VF_ADDR_USDM_QUEUES_SIZE \
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_USDM_QUEUES_END \
+ ((PXP_VF_ADDR_USDM_QUEUES_START) + (PXP_VF_ADDR_USDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_CSDM_QUEUES_START 0x4100
+#define PXP_VF_ADDR_CSDM_QUEUES_SIZE \
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_CSDM_QUEUES_END \
+ ((PXP_VF_ADDR_CSDM_QUEUES_START) + (PXP_VF_ADDR_CSDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_XSDM_QUEUES_START 0x5200
+#define PXP_VF_ADDR_XSDM_QUEUES_SIZE \
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_XSDM_QUEUES_END \
+ ((PXP_VF_ADDR_XSDM_QUEUES_START) + (PXP_VF_ADDR_XSDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_TSDM_QUEUES_START 0x6300
+#define PXP_VF_ADDR_TSDM_QUEUES_SIZE \
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_TSDM_QUEUES_END \
+ ((PXP_VF_ADDR_TSDM_QUEUES_START) + (PXP_VF_ADDR_TSDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_USDM_GLOBAL_START 0x7400
+#define PXP_VF_ADDR_USDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_USDM_GLOBAL_END \
+ ((PXP_VF_ADDR_USDM_GLOBAL_START) + (PXP_VF_ADDR_USDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_CSDM_GLOBAL_START 0x7600
+#define PXP_VF_ADDR_CSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_CSDM_GLOBAL_END \
+ ((PXP_VF_ADDR_CSDM_GLOBAL_START) + (PXP_VF_ADDR_CSDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_XSDM_GLOBAL_START 0x7800
+#define PXP_VF_ADDR_XSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_XSDM_GLOBAL_END \
+ ((PXP_VF_ADDR_XSDM_GLOBAL_START) + (PXP_VF_ADDR_XSDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_TSDM_GLOBAL_START 0x7a00
+#define PXP_VF_ADDR_TSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_TSDM_GLOBAL_END \
+ ((PXP_VF_ADDR_TSDM_GLOBAL_START) + (PXP_VF_ADDR_TSDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_DB_START 0x7c00
+#define PXP_VF_ADDR_DB_SIZE (0x200)
+#define PXP_VF_ADDR_DB_END \
+ ((PXP_VF_ADDR_DB_START) + (PXP_VF_ADDR_DB_SIZE) - 1)
+
+#define PXP_VF_ADDR_GRC_START 0x7e00
+#define PXP_VF_ADDR_GRC_SIZE (0x200)
+#define PXP_VF_ADDR_GRC_END \
+ ((PXP_VF_ADDR_GRC_START) + (PXP_VF_ADDR_GRC_SIZE) - 1)
+
+#define PXP_VF_ADDR_DORQ_START (0x0)
+#define PXP_VF_ADDR_DORQ_SIZE (0xffffffff)
+#define PXP_VF_ADDR_DORQ_END (0xffffffff)
+
+#define PXP_BAR_GRC 0
+#define PXP_BAR_TSDM 0
+#define PXP_BAR_USDM 0
+#define PXP_BAR_XSDM 0
+#define PXP_BAR_CSDM 0
+#define PXP_BAR_IGU 0
+#define PXP_BAR_DQ 1
+
+#define PXP_VF_BAR_IGU 0
+#define PXP_VF_BAR_USDM_QUEUES 0
+#define PXP_VF_BAR_TSDM_QUEUES 0
+#define PXP_VF_BAR_XSDM_QUEUES 0
+#define PXP_VF_BAR_CSDM_QUEUES 0
+#define PXP_VF_BAR_USDM_GLOBAL 0
+#define PXP_VF_BAR_TSDM_GLOBAL 0
+#define PXP_VF_BAR_XSDM_GLOBAL 0
+#define PXP_VF_BAR_CSDM_GLOBAL 0
+#define PXP_VF_BAR_DB 0
+#define PXP_VF_BAR_GRC 0
+#define PXP_VF_BAR_DORQ 1
+
+/* PCI CAPABILITIES*/
+
+#define PCI_CAP_PCIE 0x10 /*PCIe capability ID*/
+
+#define PCIE_DEV_CAPS 0x04
+
+#define PCIE_DEV_CTRL 0x08
+#define PCIE_DEV_CTRL_FLR 0x8000;
+
+#define PCIE_DEV_STATUS 0x0A
+
+#define PCI_CAP_MSIX 0x11 /*MSI-X capability ID*/
+#define PCI_MSIX_CONTROL_SHIFT 16
+#define PCI_MSIX_TABLE_SIZE_MASK 0x07FF
+#define PCI_MSIX_TABLE_ENABLE_MASK 0x8000
+
+
+#define MDIO_REG_BANK_CL73_IEEEB0 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
+
+#define MDIO_REG_BANK_CL73_IEEEB1 0x10
+#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV2 0x04
+
+#define MDIO_REG_BANK_RX0 0x80b0
+#define MDIO_RX0_RX_STATUS 0x10
+#define MDIO_RX0_RX_STATUS_SIGDET 0x8000
+#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000
+#define MDIO_RX0_RX_EQ_BOOST 0x1c
+#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX1 0x80c0
+#define MDIO_RX1_RX_EQ_BOOST 0x1c
+#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX2 0x80d0
+#define MDIO_RX2_RX_EQ_BOOST 0x1c
+#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX3 0x80e0
+#define MDIO_RX3_RX_EQ_BOOST 0x1c
+#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX_ALL 0x80f0
+#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
+#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_TX0 0x8060
+#define MDIO_TX0_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX1 0x8070
+#define MDIO_TX1_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX2 0x8080
+#define MDIO_TX2_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX3 0x8090
+#define MDIO_TX3_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000
+#define MDIO_BLOCK0_XGXS_CONTROL 0x10
+
+#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010
+#define MDIO_BLOCK1_LANE_CTRL0 0x15
+#define MDIO_BLOCK1_LANE_CTRL1 0x16
+#define MDIO_BLOCK1_LANE_CTRL2 0x17
+#define MDIO_BLOCK1_LANE_PRBS 0x19
+
+#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
+#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
+
+#define MDIO_REG_BANK_GP_STATUS 0x8120
+#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 0x3900
+
+
+#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1)
+
+#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1 0x18
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009
+
+#define MDIO_REG_BANK_OVER_1G 0x8320
+#define MDIO_OVER_1G_DIGCTL_3_4 0x14
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5
+#define MDIO_OVER_1G_UP1 0x19
+#define MDIO_OVER_1G_UP1_2_5G 0x0001
+#define MDIO_OVER_1G_UP1_5G 0x0002
+#define MDIO_OVER_1G_UP1_6G 0x0004
+#define MDIO_OVER_1G_UP1_10G 0x0010
+#define MDIO_OVER_1G_UP1_10GH 0x0008
+#define MDIO_OVER_1G_UP1_12G 0x0020
+#define MDIO_OVER_1G_UP1_12_5G 0x0040
+#define MDIO_OVER_1G_UP1_13G 0x0080
+#define MDIO_OVER_1G_UP1_15G 0x0100
+#define MDIO_OVER_1G_UP1_16G 0x0200
+#define MDIO_OVER_1G_UP2 0x1A
+#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007
+#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038
+#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0
+#define MDIO_OVER_1G_UP3 0x1B
+#define MDIO_OVER_1G_UP3_HIGIG2 0x0001
+#define MDIO_OVER_1G_LP_UP1 0x1C
+#define MDIO_OVER_1G_LP_UP2 0x1D
+#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7
+#define MDIO_OVER_1G_LP_UP3 0x1E
+
+#define MDIO_REG_BANK_REMOTE_PHY 0x8330
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600
+
+#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002
+
+#define MDIO_REG_BANK_CL73_USERB0 0x8370
+#define MDIO_CL73_USERB0_CL73_UCTRL 0x10
+#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002
+#define MDIO_CL73_USERB0_CL73_USTAT1 0x11
+#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100
+#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001
+
+#define MDIO_REG_BANK_AER_BLOCK 0xFFD0
+#define MDIO_AER_BLOCK_AER_REG 0x1E
+
+#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0
+#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040
+#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200
+#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000
+#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000
+#define MDIO_COMBO_IEEE0_MII_STATUS 0x11
+#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004
+#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020
+/*WhenthelinkpartnerisinSGMIImode(bit0=1), then
+bit15=link, bit12=duplex, bits11:10=speed, bit14=acknowledge.
+Theotherbitsarereservedandshouldbezero*/
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
+
+
+#define MDIO_PMA_DEVAD 0x1
+/*ieee*/
+#define MDIO_PMA_REG_CTRL 0x0
+#define MDIO_PMA_REG_STATUS 0x1
+#define MDIO_PMA_REG_10G_CTRL2 0x7
+#define MDIO_PMA_REG_TX_DISABLE 0x0009
+#define MDIO_PMA_REG_RX_SD 0xa
+/*bnx2x*/
+#define MDIO_PMA_REG_BNX2X_CTRL 0x0096
+#define MDIO_PMA_REG_FEC_CTRL 0x00ab
+#define MDIO_PMA_LASI_RXCTRL 0x9000
+#define MDIO_PMA_LASI_TXCTRL 0x9001
+#define MDIO_PMA_LASI_CTRL 0x9002
+#define MDIO_PMA_LASI_RXSTAT 0x9003
+#define MDIO_PMA_LASI_TXSTAT 0x9004
+#define MDIO_PMA_LASI_STAT 0x9005
+#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800
+#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808
+#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809
+#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02
+#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09
+#define MDIO_PMA_REG_MISC_CTRL 0xca0a
+#define MDIO_PMA_REG_GEN_CTRL 0xca10
+#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
+#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
+#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
+#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
+#define MDIO_PMA_REG_ROM_VER1 0xca19
+#define MDIO_PMA_REG_ROM_VER2 0xca1a
+#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
+#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d
+#define MDIO_PMA_REG_PLL_CTRL 0xca1e
+#define MDIO_PMA_REG_MISC_CTRL0 0xca23
+#define MDIO_PMA_REG_LRM_MODE 0xca3f
+#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46
+#define MDIO_PMA_REG_MISC_CTRL1 0xca85
+
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002
+#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01
+#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05
+
+#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309
+#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
+#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
+#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
+#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
+#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
+
+#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
+#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
+#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
+#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
+#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
+
+#define MDIO_PMA_REG_7101_RESET 0xc000
+#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
+#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
+#define MDIO_PMA_REG_7101_VER1 0xc026
+#define MDIO_PMA_REG_7101_VER2 0xc027
+
+#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
+#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
+#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
+#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
+#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
+#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
+#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
+
+
+#define MDIO_WIS_DEVAD 0x2
+/*bnx2x*/
+#define MDIO_WIS_REG_LASI_CNTL 0x9002
+#define MDIO_WIS_REG_LASI_STATUS 0x9005
+
+#define MDIO_PCS_DEVAD 0x3
+#define MDIO_PCS_REG_STATUS 0x0020
+#define MDIO_PCS_REG_LASI_STATUS 0x9005
+#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000
+#define MDIO_PCS_REG_7101_SPI_MUX 0xD008
+#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
+#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
+#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
+
+
+#define MDIO_XS_DEVAD 0x4
+#define MDIO_XS_REG_STATUS 0x0001
+#define MDIO_XS_PLL_SEQUENCER 0x8000
+#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a
+
+#define MDIO_XS_8706_REG_BANK_RX0 0x80bc
+#define MDIO_XS_8706_REG_BANK_RX1 0x80cc
+#define MDIO_XS_8706_REG_BANK_RX2 0x80dc
+#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
+#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
+
+#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
+
+#define MDIO_AN_DEVAD 0x7
+/*ieee*/
+#define MDIO_AN_REG_CTRL 0x0000
+#define MDIO_AN_REG_STATUS 0x0001
+#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
+#define MDIO_AN_REG_ADV_PAUSE 0x0010
+#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
+#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
+#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
+#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
+#define MDIO_AN_REG_ADV 0x0011
+#define MDIO_AN_REG_ADV2 0x0012
+#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
+#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
+#define MDIO_AN_REG_MASTER_STATUS 0x0021
+#define MDIO_AN_REG_EEE_ADV 0x003c
+#define MDIO_AN_REG_LP_EEE_ADV 0x003d
+/*bnx2x*/
+#define MDIO_AN_REG_LINK_STATUS 0x8304
+#define MDIO_AN_REG_CL37_CL73 0x8370
+#define MDIO_AN_REG_CL37_AN 0xffe0
+#define MDIO_AN_REG_CL37_FC_LD 0xffe4
+#define MDIO_AN_REG_CL37_FC_LP 0xffe5
+#define MDIO_AN_REG_1000T_STATUS 0xffea
+
+#define MDIO_AN_REG_8073_2_5G 0x8329
+#define MDIO_AN_REG_8073_BAM 0x8350
+
+#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
+#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
+#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
+#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
+#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
+#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
+#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
+#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL 0xfff0
+#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008
+#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
+#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
+#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
+#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
+
+/* BNX2X84823 only */
+#define MDIO_CTL_DEVAD 0x1e
+#define MDIO_CTL_REG_84823_MEDIA 0x401a
+#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
+ /* These pins configure the BNX2X84823 interface to MAC after reset. */
+#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
+#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
+ /* These pins configure the BNX2X84823 interface to Line after reset. */
+#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
+ /* When this pin is active high during reset, 10GBASE-T core is power
+ * down, When it is active low the 10GBASE-T is power up
+ */
+#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b
+#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+
+/* BNX2X84833 only */
+#define MDIO_84833_TOP_CFG_FW_REV 0x400f
+#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
+#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
+#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
+#define MDIO_84833_SUPER_ISOLATE 0x8000
+/* These are mailbox register set used by 84833. */
+#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
+#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
+#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
+#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
+#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
+#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037
+#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038
+#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039
+#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a
+#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b
+#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c
+#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0
+#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26
+#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27
+#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28
+#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29
+#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30
+#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31
+
+/* Mailbox command set used by 84833. */
+#define PHY84833_CMD_SET_PAIR_SWAP 0x8001
+#define PHY84833_CMD_GET_EEE_MODE 0x8008
+#define PHY84833_CMD_SET_EEE_MODE 0x8009
+#define PHY84833_CMD_GET_CURRENT_TEMP 0x8031
+/* Mailbox status set used by 84833. */
+#define PHY84833_STATUS_CMD_RECEIVED 0x0001
+#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010
+#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020
+#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
+#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
+#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
+
+
+/* Warpcore clause 45 addressing */
+#define MDIO_WC_DEVAD 0x3
+#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0
+#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
+#define MDIO_WC_REG_PCS_STATUS2 0x0021
+#define MDIO_WC_REG_PMD_KR_CONTROL 0x0096
+#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
+#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
+#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL3 0x8018
+#define MDIO_WC_REG_XGXSBLK1_LANETEST0 0x801a
+#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061
+#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071
+#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081
+#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091
+#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000
+#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077
+#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087
+#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097
+#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9
+#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9
+#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba
+#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
+#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
+#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
+#define MDIO_WC_REG_XGXS_STATUS3 0x8129
+#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
+#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
+#define MDIO_WC_REG_XGXS_STATUS4 0x813c
+#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141
+#define MDIO_WC_REG_XGXS_X2_CONTROL3 0x8142
+#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B
+#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169
+#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0
+#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1
+#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
+#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1
+#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
+#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc
+#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE
+#define MDIO_WC_REG_DSC1B0_UC_CTRL 0x820e
+#define MDIO_WC_REG_DSC1B0_UC_CTRL_RDY4CMD (1<<7)
+#define MDIO_WC_REG_DSC_SMC 0x8213
+#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e
+#define MDIO_WC_REG_TX_FIR_TAP 0x82e2
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
+#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP 0x82e2
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3
+#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6
+#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7
+#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302
+#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304
+#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308
+#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309
+#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
+#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
+#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
+#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
+#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
+#define MDIO_WC_REG_DIGITAL5_LINK_STATUS 0x834d
+#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
+#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
+#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_CL73_USERB0_CTRL 0x8370
+#define MDIO_WC_REG_CL73_USERB0_USTAT 0x8371
+#define MDIO_WC_REG_CL73_BAM_CTRL1 0x8372
+#define MDIO_WC_REG_CL73_BAM_CTRL2 0x8373
+#define MDIO_WC_REG_CL73_BAM_CTRL3 0x8374
+#define MDIO_WC_REG_CL73_BAM_CODE_FIELD 0x837b
+#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
+#define MDIO_WC_REG_TX66_CONTROL 0x83b0
+#define MDIO_WC_REG_RX66_CONTROL 0x83c0
+#define MDIO_WC_REG_RX66_SCW0 0x83c2
+#define MDIO_WC_REG_RX66_SCW1 0x83c3
+#define MDIO_WC_REG_RX66_SCW2 0x83c4
+#define MDIO_WC_REG_RX66_SCW3 0x83c5
+#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6
+#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7
+#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8
+#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9
+#define MDIO_WC_REG_FX100_CTRL1 0x8400
+#define MDIO_WC_REG_FX100_CTRL3 0x8402
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5 0x8436
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6 0x8437
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7 0x8438
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9 0x8439
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10 0x843a
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11 0x843b
+#define MDIO_WC_REG_ETA_CL73_OUI1 0x8453
+#define MDIO_WC_REG_ETA_CL73_OUI2 0x8454
+#define MDIO_WC_REG_ETA_CL73_OUI3 0x8455
+#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE 0x8456
+#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE 0x8457
+#define MDIO_WC_REG_MICROBLK_CMD 0xffc2
+#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5
+#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc
+
+#define MDIO_WC_REG_AERBLK_AER 0xffde
+#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0
+#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1
+
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4
+
+#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141
+
+#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f
+
+/* 54618se */
+#define MDIO_REG_GPHY_MII_STATUS 0x1
+#define MDIO_REG_GPHY_PHYID_LSB 0x3
+#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
+#define MDIO_REG_GPHY_CL45_REG_WRITE 0x4000
+#define MDIO_REG_GPHY_CL45_REG_READ 0xc000
+#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
+#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15
+#define MDIO_REG_GPHY_EXP_ACCESS 0x17
+#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00
+#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40
+#define MDIO_REG_GPHY_AUX_STATUS 0x19
+#define MDIO_REG_INTR_STATUS 0x1a
+#define MDIO_REG_INTR_MASK 0x1b
+#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
+#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
+#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
+#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
+#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
+#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8)
+
+
+#define IGU_FUNC_BASE 0x0400
+
+#define IGU_ADDR_MSIX 0x0000
+#define IGU_ADDR_INT_ACK 0x0200
+#define IGU_ADDR_PROD_UPD 0x0201
+#define IGU_ADDR_ATTN_BITS_UPD 0x0202
+#define IGU_ADDR_ATTN_BITS_SET 0x0203
+#define IGU_ADDR_ATTN_BITS_CLR 0x0204
+#define IGU_ADDR_COALESCE_NOW 0x0205
+#define IGU_ADDR_SIMD_MASK 0x0206
+#define IGU_ADDR_SIMD_NOMASK 0x0207
+#define IGU_ADDR_MSI_CTL 0x0210
+#define IGU_ADDR_MSI_ADDR_LO 0x0211
+#define IGU_ADDR_MSI_ADDR_HI 0x0212
+#define IGU_ADDR_MSI_DATA 0x0213
+
+
+#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
+#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
+#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
+#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup 3
+
+#define COMMAND_REG_INT_ACK 0x0
+#define COMMAND_REG_PROD_UPD 0x4
+#define COMMAND_REG_ATTN_BITS_UPD 0x8
+#define COMMAND_REG_ATTN_BITS_SET 0xc
+#define COMMAND_REG_ATTN_BITS_CLR 0x10
+#define COMMAND_REG_COALESCE_NOW 0x14
+#define COMMAND_REG_SIMD_MASK 0x18
+#define COMMAND_REG_SIMD_NOMASK 0x1c
+
+
+#define IGU_MEM_BASE 0x0000
+
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x007f
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0200
+
+#define IGU_CMD_BACKWARD_COMP_PROD_UPD 0x0201
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER \
+ (IGU_CMD_INT_ACK_BASE + MAX_SB_PER_PATH - 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x04ff
+
+#define IGU_CMD_E2_PROD_UPD_BASE 0x0500
+#define IGU_CMD_E2_PROD_UPD_UPPER \
+ (IGU_CMD_E2_PROD_UPD_BASE + MAX_SB_PER_PATH - 1)
+#define IGU_CMD_E2_PROD_UPD_RESERVED_UPPER 0x059f
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05a0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05a1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05a2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05a3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05a4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05a5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
+
+
+#define IGU_REG_RESERVED_UPPER 0x05ff
+
+#define IGU_SEG_IDX_ATTN 2
+#define IGU_SEG_IDX_DEFAULT 1
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
+
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 2 /* Parent PF */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+
+
+#define IGU_BC_DSB_NUM_SEGS 5
+#define IGU_BC_NDSB_NUM_SEGS 2
+#define IGU_NORM_DSB_NUM_SEGS 2
+#define IGU_NORM_NDSB_NUM_SEGS 1
+#define IGU_BC_BASE_DSB_PROD 128
+#define IGU_NORM_BASE_DSB_PROD 136
+
+ /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
+ [5:2] = 0; [1:0] = PF number) */
+#define IGU_FID_ENCODE_IS_PF (0x1<<6)
+#define IGU_FID_ENCODE_IS_PF_SHIFT 6
+#define IGU_FID_VF_NUM_MASK (0x3f)
+#define IGU_FID_PF_NUM_MASK (0x7)
+
+#define IGU_REG_MAPPING_MEMORY_VALID (1<<0)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK (0x3F<<1)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT 1
+#define IGU_REG_MAPPING_MEMORY_FID_MASK (0x7F<<7)
+#define IGU_REG_MAPPING_MEMORY_FID_SHIFT 7
+
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+
+/* String-to-compress [31:8] = CID (all 24 bits)
+ * String-to-compress [7:4] = Region
+ * String-to-compress [3:0] = Type
+ */
+#define CDU_VALID_DATA(_cid, _region, _type) \
+ (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
+#define CDU_CRC8(_cid, _region, _type) \
+ (ecore_calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
+#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \
+ (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
+#define CDU_RSRVD_VALUE_TYPE_B(_crc, _type) \
+ (0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7))
+#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
+
+#endif /* ECORE_REG_H */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.c b/src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.c
new file mode 100644
index 00000000..0c8685c6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.c
@@ -0,0 +1,5428 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bnx2x.h"
+#include "ecore_init.h"
+
+/**** Exe Queue interfaces ****/
+
+/**
+ * ecore_exe_queue_init - init the Exe Queue object
+ *
+ * @o: pointer to the object
+ * @exe_len: length
+ * @owner: pointer to the owner
+ * @validate: validate function pointer
+ * @optimize: optimize function pointer
+ * @exec: execute function pointer
+ * @get: get function pointer
+ */
+static void
+ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_exe_queue_obj *o,
+ int exe_len,
+ union ecore_qable_obj *owner,
+ exe_q_validate validate,
+ exe_q_remove remove,
+ exe_q_optimize optimize, exe_q_execute exec, exe_q_get get)
+{
+ ECORE_MEMSET(o, 0, sizeof(*o));
+
+ ECORE_LIST_INIT(&o->exe_queue);
+ ECORE_LIST_INIT(&o->pending_comp);
+
+ ECORE_SPIN_LOCK_INIT(&o->lock, sc);
+
+ o->exe_chunk_len = exe_len;
+ o->owner = owner;
+
+ /* Owner specific callbacks */
+ o->validate = validate;
+ o->remove = remove;
+ o->optimize = optimize;
+ o->execute = exec;
+ o->get = get;
+
+ ECORE_MSG("Setup the execution queue with the chunk length of %d",
+ exe_len);
+}
+
+static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_exeq_elem *elem)
+{
+ ECORE_MSG("Deleting an exe_queue element");
+ ECORE_FREE(sc, elem, sizeof(*elem));
+}
+
+static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
+{
+ struct ecore_exeq_elem *elem;
+ int cnt = 0;
+
+ ECORE_SPIN_LOCK_BH(&o->lock);
+
+ ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
+ struct ecore_exeq_elem) cnt++;
+
+ ECORE_SPIN_UNLOCK_BH(&o->lock);
+
+ return cnt;
+}
+
+/**
+ * ecore_exe_queue_add - add a new element to the execution queue
+ *
+ * @sc: driver handle
+ * @o: queue
+ * @cmd: new command to add
+ * @restore: true - do not optimize the command
+ *
+ * If the element is optimized or is illegal, frees it.
+ */
+static int ecore_exe_queue_add(struct bnx2x_softc *sc,
+ struct ecore_exe_queue_obj *o,
+ struct ecore_exeq_elem *elem, int restore)
+{
+ int rc;
+
+ ECORE_SPIN_LOCK_BH(&o->lock);
+
+ if (!restore) {
+ /* Try to cancel this element queue */
+ rc = o->optimize(sc, o->owner, elem);
+ if (rc)
+ goto free_and_exit;
+
+ /* Check if this request is ok */
+ rc = o->validate(sc, o->owner, elem);
+ if (rc) {
+ ECORE_MSG("Preamble failed: %d", rc);
+ goto free_and_exit;
+ }
+ }
+
+ /* If so, add it to the execution queue */
+ ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
+
+ ECORE_SPIN_UNLOCK_BH(&o->lock);
+
+ return ECORE_SUCCESS;
+
+free_and_exit:
+ ecore_exe_queue_free_elem(sc, elem);
+
+ ECORE_SPIN_UNLOCK_BH(&o->lock);
+
+ return rc;
+}
+
+static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj
+ *o)
+{
+ struct ecore_exeq_elem *elem;
+
+ while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
+ elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
+ struct ecore_exeq_elem, link);
+
+ ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
+ ecore_exe_queue_free_elem(sc, elem);
+ }
+}
+
+static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc,
+ struct ecore_exe_queue_obj *o)
+{
+ ECORE_SPIN_LOCK_BH(&o->lock);
+
+ __ecore_exe_queue_reset_pending(sc, o);
+
+ ECORE_SPIN_UNLOCK_BH(&o->lock);
+}
+
+/**
+ * ecore_exe_queue_step - execute one execution chunk atomically
+ *
+ * @sc: driver handle
+ * @o: queue
+ * @ramrod_flags: flags
+ *
+ * (Should be called while holding the exe_queue->lock).
+ */
+static int ecore_exe_queue_step(struct bnx2x_softc *sc,
+ struct ecore_exe_queue_obj *o,
+ unsigned long *ramrod_flags)
+{
+ struct ecore_exeq_elem *elem, spacer;
+ int cur_len = 0, rc;
+
+ ECORE_MEMSET(&spacer, 0, sizeof(spacer));
+
+ /* Next step should not be performed until the current is finished,
+ * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
+ * properly clear object internals without sending any command to the FW
+ * which also implies there won't be any completion to clear the
+ * 'pending' list.
+ */
+ if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
+ if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
+ ECORE_MSG
+ ("RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
+ __ecore_exe_queue_reset_pending(sc, o);
+ } else {
+ return ECORE_PENDING;
+ }
+ }
+
+ /* Run through the pending commands list and create a next
+ * execution chunk.
+ */
+ while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
+ elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
+ struct ecore_exeq_elem, link);
+ ECORE_DBG_BREAK_IF(!elem->cmd_len);
+
+ if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
+ cur_len += elem->cmd_len;
+ /* Prevent from both lists being empty when moving an
+ * element. This will allow the call of
+ * ecore_exe_queue_empty() without locking.
+ */
+ ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
+ mb();
+ ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
+ ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
+ ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
+ } else
+ break;
+ }
+
+ /* Sanity check */
+ if (!cur_len)
+ return ECORE_SUCCESS;
+
+ rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
+ if (rc < 0)
+ /* In case of an error return the commands back to the queue
+ * and reset the pending_comp.
+ */
+ ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
+ else if (!rc)
+ /* If zero is returned, means there are no outstanding pending
+ * completions and we may dismiss the pending list.
+ */
+ __ecore_exe_queue_reset_pending(sc, o);
+
+ return rc;
+}
+
+static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
+{
+ int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
+
+ /* Don't reorder!!! */
+ mb();
+
+ return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
+}
+
+static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct
+ bnx2x_softc *sc
+ __rte_unused)
+{
+ ECORE_MSG("Allocating a new exe_queue element");
+ return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc);
+}
+
+/************************ raw_obj functions ***********************************/
+static int ecore_raw_check_pending(struct ecore_raw_obj *o)
+{
+ /*
+ * !! converts the value returned by ECORE_TEST_BIT such that it
+ * is guaranteed not to be truncated regardless of int definition.
+ *
+ * Note we cannot simply define the function's return value type
+ * to match the type returned by ECORE_TEST_BIT, as it varies by
+ * platform/implementation.
+ */
+
+ return ! !ECORE_TEST_BIT(o->state, o->pstate);
+}
+
+static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
+{
+ ECORE_SMP_MB_BEFORE_CLEAR_BIT();
+ ECORE_CLEAR_BIT(o->state, o->pstate);
+ ECORE_SMP_MB_AFTER_CLEAR_BIT();
+}
+
+static void ecore_raw_set_pending(struct ecore_raw_obj *o)
+{
+ ECORE_SMP_MB_BEFORE_CLEAR_BIT();
+ ECORE_SET_BIT(o->state, o->pstate);
+ ECORE_SMP_MB_AFTER_CLEAR_BIT();
+}
+
+/**
+ * ecore_state_wait - wait until the given bit(state) is cleared
+ *
+ * @sc: device handle
+ * @state: state which is to be cleared
+ * @state_p: state buffer
+ *
+ */
+static int ecore_state_wait(struct bnx2x_softc *sc, int state,
+ unsigned long *pstate)
+{
+ /* can take a while if any port is running */
+ int cnt = 5000;
+
+ if (CHIP_REV_IS_EMUL(sc))
+ cnt *= 20;
+
+ ECORE_MSG("waiting for state to become %d", state);
+
+ ECORE_MIGHT_SLEEP();
+ while (cnt--) {
+ bnx2x_intr_legacy(sc, 1);
+ if (!ECORE_TEST_BIT(state, pstate)) {
+#ifdef ECORE_STOP_ON_ERROR
+ ECORE_MSG("exit (cnt %d)", 5000 - cnt);
+#endif
+ return ECORE_SUCCESS;
+ }
+
+ ECORE_WAIT(sc, delay_us);
+
+ if (sc->panic)
+ return ECORE_IO;
+ }
+
+ /* timeout! */
+ PMD_DRV_LOG(ERR, "timeout waiting for state %d", state);
+#ifdef ECORE_STOP_ON_ERROR
+ ecore_panic();
+#endif
+
+ return ECORE_TIMEOUT;
+}
+
+static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw)
+{
+ return ecore_state_wait(sc, raw->state, raw->pstate);
+}
+
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/* credit handling callbacks */
+static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
+{
+ struct ecore_credit_pool_obj *mp = o->macs_pool;
+
+ ECORE_DBG_BREAK_IF(!mp);
+
+ return mp->get_entry(mp, offset);
+}
+
+static int ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
+{
+ struct ecore_credit_pool_obj *mp = o->macs_pool;
+
+ ECORE_DBG_BREAK_IF(!mp);
+
+ return mp->get(mp, 1);
+}
+
+static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
+{
+ struct ecore_credit_pool_obj *mp = o->macs_pool;
+
+ return mp->put_entry(mp, offset);
+}
+
+static int ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
+{
+ struct ecore_credit_pool_obj *mp = o->macs_pool;
+
+ return mp->put(mp, 1);
+}
+
+/**
+ * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
+ * head list.
+ *
+ * @sc: device handle
+ * @o: vlan_mac object
+ *
+ * @details: Non-blocking implementation; should be called under execution
+ * queue lock.
+ */
+static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_vlan_mac_obj *o)
+{
+ if (o->head_reader) {
+ ECORE_MSG("vlan_mac_lock writer - There are readers; Busy");
+ return ECORE_BUSY;
+ }
+
+ ECORE_MSG("vlan_mac_lock writer - Taken");
+ return ECORE_SUCCESS;
+}
+
+/**
+ * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
+ * which wasn't able to run due to a taken lock on vlan mac head list.
+ *
+ * @sc: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock; notice it might release
+ * and reclaim it during its run.
+ */
+static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o)
+{
+ int rc;
+ unsigned long ramrod_flags = o->saved_ramrod_flags;
+
+ ECORE_MSG("vlan_mac_lock execute pending command with ramrod flags %lu",
+ ramrod_flags);
+ o->head_exe_request = FALSE;
+ o->saved_ramrod_flags = 0;
+ rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
+ if (rc != ECORE_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "execution of pending commands failed with rc %d",
+ rc);
+#ifdef ECORE_STOP_ON_ERROR
+ ecore_panic();
+#endif
+ }
+}
+
+/**
+ * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
+ * called due to vlan mac head list lock being taken.
+ *
+ * @sc: device handle
+ * @o: vlan_mac object
+ * @ramrod_flags: ramrod flags of missed execution
+ *
+ * @details Should be called under execution queue lock.
+ */
+static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_vlan_mac_obj *o,
+ unsigned long ramrod_flags)
+{
+ o->head_exe_request = TRUE;
+ o->saved_ramrod_flags = ramrod_flags;
+ ECORE_MSG("Placing pending execution with ramrod flags %lu",
+ ramrod_flags);
+}
+
+/**
+ * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @sc: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would perform it - possibly releasing and
+ * reclaiming the execution queue lock.
+ */
+static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o)
+{
+ /* It's possible a new pending execution was added since this writer
+ * executed. If so, execute again. [Ad infinitum]
+ */
+ while (o->head_exe_request) {
+ ECORE_MSG
+ ("vlan_mac_lock - writer release encountered a pending request");
+ __ecore_vlan_mac_h_exec_pending(sc, o);
+ }
+}
+
+/**
+ * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @sc: device handle
+ * @o: vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would perform it -
+ * possibly releasing and reclaiming the execution queue lock.
+ */
+void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o)
+{
+ ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
+ __ecore_vlan_mac_h_write_unlock(sc, o);
+ ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
+}
+
+/**
+ * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @sc: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under the execution queue lock. May sleep. May
+ * release and reclaim execution queue lock during its run.
+ */
+static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_vlan_mac_obj *o)
+{
+ /* If we got here, we're holding lock --> no WRITER exists */
+ o->head_reader++;
+ ECORE_MSG("vlan_mac_lock - locked reader - number %d", o->head_reader);
+
+ return ECORE_SUCCESS;
+}
+
+/**
+ * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @sc: device handle
+ * @o: vlan_mac object
+ *
+ * @details May sleep. Claims and releases execution queue lock during its run.
+ */
+static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o)
+{
+ int rc;
+
+ ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
+ rc = __ecore_vlan_mac_h_read_lock(sc, o);
+ ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
+
+ return rc;
+}
+
+/**
+ * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @sc: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would be performed if this was the last
+ * reader. possibly releasing and reclaiming the execution queue lock.
+ */
+static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o)
+{
+ if (!o->head_reader) {
+ PMD_DRV_LOG(ERR,
+ "Need to release vlan mac reader lock, but lock isn't taken");
+#ifdef ECORE_STOP_ON_ERROR
+ ecore_panic();
+#endif
+ } else {
+ o->head_reader--;
+ PMD_DRV_LOG(INFO,
+ "vlan_mac_lock - decreased readers to %d",
+ o->head_reader);
+ }
+
+ /* It's possible a new pending execution was added, and that this reader
+ * was last - if so we need to execute the command.
+ */
+ if (!o->head_reader && o->head_exe_request) {
+ PMD_DRV_LOG(INFO,
+ "vlan_mac_lock - reader release encountered a pending request");
+
+ /* Writer release will do the trick */
+ __ecore_vlan_mac_h_write_unlock(sc, o);
+ }
+}
+
+/**
+ * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @sc: device handle
+ * @o: vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would be performed if this
+ * was the last reader. Claims and releases the execution queue lock
+ * during its run.
+ */
+void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o)
+{
+ ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
+ __ecore_vlan_mac_h_read_unlock(sc, o);
+ ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
+}
+
+/**
+ * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @sc: device handle
+ * @o: vlan_mac object
+ * @n: number of elements to get
+ * @base: base address for element placement
+ * @stride: stride between elements (in bytes)
+ */
+static int ecore_get_n_elements(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o, int n,
+ uint8_t * base, uint8_t stride, uint8_t size)
+{
+ struct ecore_vlan_mac_registry_elem *pos;
+ uint8_t *next = base;
+ int counter = 0, read_lock;
+
+ ECORE_MSG("get_n_elements - taking vlan_mac_lock (reader)");
+ read_lock = ecore_vlan_mac_h_read_lock(sc, o);
+ if (read_lock != ECORE_SUCCESS)
+ PMD_DRV_LOG(ERR,
+ "get_n_elements failed to get vlan mac reader lock; Access without lock");
+
+ /* traverse list */
+ ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
+ struct ecore_vlan_mac_registry_elem) {
+ if (counter < n) {
+ ECORE_MEMCPY(next, &pos->u, size);
+ counter++;
+ ECORE_MSG
+ ("copied element number %d to address %p element was:",
+ counter, next);
+ next += stride + size;
+ }
+ }
+
+ if (read_lock == ECORE_SUCCESS) {
+ ECORE_MSG("get_n_elements - releasing vlan_mac_lock (reader)");
+ ecore_vlan_mac_h_read_unlock(sc, o);
+ }
+
+ return counter * ETH_ALEN;
+}
+
+/* check_add() callbacks */
+static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_vlan_mac_obj *o,
+ union ecore_classification_ramrod_data *data)
+{
+ struct ecore_vlan_mac_registry_elem *pos;
+
+ ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
+ data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
+ data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
+
+ if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
+ return ECORE_INVAL;
+
+ /* Check if a requested MAC already exists */
+ ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
+ struct ecore_vlan_mac_registry_elem)
+ if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
+ return ECORE_EXISTS;
+
+ return ECORE_SUCCESS;
+}
+
+/* check_del() callbacks */
+static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc
+ *sc
+ __rte_unused,
+ struct
+ ecore_vlan_mac_obj
+ *o, union
+ ecore_classification_ramrod_data
+ *data)
+{
+ struct ecore_vlan_mac_registry_elem *pos;
+
+ ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
+ data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
+ data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
+
+ ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
+ struct ecore_vlan_mac_registry_elem)
+ if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
+ return pos;
+
+ return NULL;
+}
+
+/* check_move() callback */
+static int ecore_check_move(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *src_o,
+ struct ecore_vlan_mac_obj *dst_o,
+ union ecore_classification_ramrod_data *data)
+{
+ struct ecore_vlan_mac_registry_elem *pos;
+ int rc;
+
+ /* Check if we can delete the requested configuration from the first
+ * object.
+ */
+ pos = src_o->check_del(sc, src_o, data);
+
+ /* check if configuration can be added */
+ rc = dst_o->check_add(sc, dst_o, data);
+
+ /* If this classification can not be added (is already set)
+ * or can't be deleted - return an error.
+ */
+ if (rc || !pos)
+ return FALSE;
+
+ return TRUE;
+}
+
+static int ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc,
+ __rte_unused struct ecore_vlan_mac_obj
+ *src_o, __rte_unused struct ecore_vlan_mac_obj
+ *dst_o, __rte_unused union
+ ecore_classification_ramrod_data *data)
+{
+ return FALSE;
+}
+
+static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj
+ *o)
+{
+ struct ecore_raw_obj *raw = &o->raw;
+ uint8_t rx_tx_flag = 0;
+
+ if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
+ (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
+
+ if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
+ (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
+
+ return rx_tx_flag;
+}
+
+static void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
+ int add, unsigned char *dev_addr, int index)
+{
+ uint32_t wb_data[2];
+ uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
+ NIG_REG_LLH0_FUNC_MEM;
+
+ if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
+ return;
+
+ if (index > ECORE_LLH_CAM_MAX_PF_LINE)
+ return;
+
+ ECORE_MSG("Going to %s LLH configuration at entry %d",
+ (add ? "ADD" : "DELETE"), index);
+
+ if (add) {
+ /* LLH_FUNC_MEM is a uint64_t WB register */
+ reg_offset += 8 * index;
+
+ wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
+ (dev_addr[4] << 8) | dev_addr[5]);
+ wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
+
+ ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
+ }
+
+ REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
+ NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add);
+}
+
+/**
+ * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
+ *
+ * @sc: device handle
+ * @o: queue for which we want to configure this rule
+ * @add: if TRUE the command is an ADD command, DEL otherwise
+ * @opcode: CLASSIFY_RULE_OPCODE_XXX
+ * @hdr: pointer to a header to setup
+ *
+ */
+static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o,
+ int add, int opcode,
+ struct eth_classify_cmd_header
+ *hdr)
+{
+ struct ecore_raw_obj *raw = &o->raw;
+
+ hdr->client_id = raw->cl_id;
+ hdr->func_id = raw->func_id;
+
+ /* Rx or/and Tx (internal switching) configuration ? */
+ hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o);
+
+ if (add)
+ hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
+
+ hdr->cmd_general_data |=
+ (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
+}
+
+/**
+ * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
+ *
+ * @cid: connection id
+ * @type: ECORE_FILTER_XXX_PENDING
+ * @hdr: pointer to header to setup
+ * @rule_cnt:
+ *
+ * currently we always configure one rule and echo field to contain a CID and an
+ * opcode type.
+ */
+static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header
+ *hdr, int rule_cnt)
+{
+ hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
+ (type << ECORE_SWCID_SHIFT));
+ hdr->rule_cnt = (uint8_t) rule_cnt;
+}
+
+/* hw_config() callbacks */
+static void ecore_set_one_mac_e2(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ struct ecore_exeq_elem *elem, int rule_idx,
+ __rte_unused int cam_offset)
+{
+ struct ecore_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ int add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
+ unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
+ uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
+
+ /* Set LLH CAM entry: currently only iSCSI and ETH macs are
+ * relevant. In addition, current implementation is tuned for a
+ * single ETH MAC.
+ *
+ * When multiple unicast ETH MACs PF configuration in switch
+ * independent mode is required (NetQ, multiple netdev MACs,
+ * etc.), consider better utilisation of 8 per function MAC
+ * entries in the LLH register. There is also
+ * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
+ * total number of CAM entries to 16.
+ *
+ * Currently we won't configure NIG for MACs other than a primary ETH
+ * MAC and iSCSI L2 MAC.
+ *
+ * If this MAC is moving from one Queue to another, no need to change
+ * NIG configuration.
+ */
+ if (cmd != ECORE_VLAN_MAC_MOVE) {
+ if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
+ ecore_set_mac_in_nig(sc, add, mac,
+ ECORE_LLH_CAM_ISCSI_ETH_LINE);
+ else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
+ ecore_set_mac_in_nig(sc, add, mac,
+ ECORE_LLH_CAM_ETH_LINE);
+ }
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ ECORE_MEMSET(data, 0, sizeof(*data));
+
+ /* Setup a command header */
+ ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC,
+ &rule_entry->mac.header);
+
+ ECORE_MSG("About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
+ (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3],
+ mac[4], mac[5], raw->cl_id);
+
+ /* Set a MAC itself */
+ ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+ &rule_entry->mac.mac_mid,
+ &rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
+
+ /* MOVE: Add a rule that will add this MAC to the target Queue */
+ if (cmd == ECORE_VLAN_MAC_MOVE) {
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data.
+ vlan_mac.target_obj, TRUE,
+ CLASSIFY_RULE_OPCODE_MAC,
+ &rule_entry->mac.header);
+
+ /* Set a MAC itself */
+ ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+ &rule_entry->mac.mac_mid,
+ &rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
+ }
+
+ /* Set the ramrod data header */
+ ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+/**
+ * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
+ *
+ * @sc: device handle
+ * @o: queue
+ * @type:
+ * @cam_offset: offset in cam memory
+ * @hdr: pointer to a header to setup
+ *
+ * E1H
+ */
+static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj
+ *o, int type, int cam_offset, struct mac_configuration_hdr
+ *hdr)
+{
+ struct ecore_raw_obj *r = &o->raw;
+
+ hdr->length = 1;
+ hdr->offset = (uint8_t) cam_offset;
+ hdr->client_id = ECORE_CPU_TO_LE16(0xff);
+ hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
+ (type << ECORE_SWCID_SHIFT));
+}
+
+static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj
+ *o, int add, int opcode,
+ uint8_t * mac,
+ uint16_t vlan_id, struct
+ mac_configuration_entry
+ *cfg_entry)
+{
+ struct ecore_raw_obj *r = &o->raw;
+ uint32_t cl_bit_vec = (1 << r->cl_id);
+
+ cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
+ cfg_entry->pf_id = r->func_id;
+ cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
+
+ if (add) {
+ ECORE_SET_FLAG(cfg_entry->flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_SET);
+ ECORE_SET_FLAG(cfg_entry->flags,
+ MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
+ opcode);
+
+ /* Set a MAC in a ramrod data */
+ ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
+ &cfg_entry->middle_mac_addr,
+ &cfg_entry->lsb_mac_addr, mac);
+ } else
+ ECORE_SET_FLAG(cfg_entry->flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+}
+
+static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc
+ __rte_unused,
+ struct ecore_vlan_mac_obj *o,
+ int type, int cam_offset,
+ int add, uint8_t * mac,
+ uint16_t vlan_id, int opcode,
+ struct mac_configuration_cmd
+ *config)
+{
+ struct mac_configuration_entry *cfg_entry = &config->config_table[0];
+
+ ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr);
+ ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id,
+ cfg_entry);
+
+ ECORE_MSG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
+ (add ? "setting" : "clearing"),
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ o->raw.cl_id, cam_offset);
+}
+
+/**
+ * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
+ *
+ * @sc: device handle
+ * @o: ecore_vlan_mac_obj
+ * @elem: ecore_exeq_elem
+ * @rule_idx: rule_idx
+ * @cam_offset: cam_offset
+ */
+static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ struct ecore_exeq_elem *elem,
+ __rte_unused int rule_idx, int cam_offset)
+{
+ struct ecore_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *config =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ /* 57711 do not support MOVE command,
+ * so it's either ADD or DEL
+ */
+ int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
+ TRUE : FALSE;
+
+ /* Reset the ramrod data buffer */
+ ECORE_MEMSET(config, 0, sizeof(*config));
+
+ ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
+ cam_offset, add,
+ elem->cmd_data.vlan_mac.u.mac.mac, 0,
+ ETH_VLAN_FILTER_ANY_VLAN, config);
+}
+
+/**
+ * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
+ *
+ * @sc: device handle
+ * @p: command parameters
+ * @ppos: pointer to the cookie
+ *
+ * reconfigure next MAC/VLAN/VLAN-MAC element from the
+ * previously configured elements list.
+ *
+ * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
+ * into an account
+ *
+ * pointer to the cookie - that should be given back in the next call to make
+ * function handle the next element. If *ppos is set to NULL it will restart the
+ * iterator. If returned *ppos == NULL this means that the last element has been
+ * handled.
+ *
+ */
+static int ecore_vlan_mac_restore(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_ramrod_params *p,
+ struct ecore_vlan_mac_registry_elem **ppos)
+{
+ struct ecore_vlan_mac_registry_elem *pos;
+ struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
+
+ /* If list is empty - there is nothing to do here */
+ if (ECORE_LIST_IS_EMPTY(&o->head)) {
+ *ppos = NULL;
+ return 0;
+ }
+
+ /* make a step... */
+ if (*ppos == NULL)
+ *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct
+ ecore_vlan_mac_registry_elem,
+ link);
+ else
+ *ppos = ECORE_LIST_NEXT(*ppos, link,
+ struct ecore_vlan_mac_registry_elem);
+
+ pos = *ppos;
+
+ /* If it's the last step - return NULL */
+ if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
+ *ppos = NULL;
+
+ /* Prepare a 'user_req' */
+ ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
+
+ /* Set the command */
+ p->user_req.cmd = ECORE_VLAN_MAC_ADD;
+
+ /* Set vlan_mac_flags */
+ p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
+
+ /* Set a restore bit */
+ ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
+
+ return ecore_config_vlan_mac(sc, p);
+}
+
+/* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
+ * pointer to an element with a specific criteria and NULL if such an element
+ * hasn't been found.
+ */
+static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o,
+ struct ecore_exeq_elem *elem)
+{
+ struct ecore_exeq_elem *pos;
+ struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
+
+ /* Check pending for execution commands */
+ ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
+ struct ecore_exeq_elem)
+ if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
+/**
+ * ecore_validate_vlan_mac_add - check if an ADD command can be executed
+ *
+ * @sc: device handle
+ * @qo: ecore_qable_obj
+ * @elem: ecore_exeq_elem
+ *
+ * Checks that the requested configuration can be added. If yes and if
+ * requested, consume CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ *
+ */
+static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
+ union ecore_qable_obj *qo,
+ struct ecore_exeq_elem *elem)
+{
+ struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
+ struct ecore_exe_queue_obj *exeq = &o->exe_queue;
+ int rc;
+
+ /* Check the registry */
+ rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
+ if (rc) {
+ ECORE_MSG
+ ("ADD command is not allowed considering current registry state.");
+ return rc;
+ }
+
+ /* Check if there is a pending ADD command for this
+ * MAC/VLAN/VLAN-MAC. Return an error if there is.
+ */
+ if (exeq->get(exeq, elem)) {
+ ECORE_MSG("There is a pending ADD command already");
+ return ECORE_EXISTS;
+ }
+
+ /* Consume the credit if not requested not to */
+ if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ o->get_credit(o)))
+ return ECORE_INVAL;
+
+ return ECORE_SUCCESS;
+}
+
+/**
+ * ecore_validate_vlan_mac_del - check if the DEL command can be executed
+ *
+ * @sc: device handle
+ * @qo: quable object to check
+ * @elem: element that needs to be deleted
+ *
+ * Checks that the requested configuration can be deleted. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
+ union ecore_qable_obj *qo,
+ struct ecore_exeq_elem *elem)
+{
+ struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
+ struct ecore_vlan_mac_registry_elem *pos;
+ struct ecore_exe_queue_obj *exeq = &o->exe_queue;
+ struct ecore_exeq_elem query_elem;
+
+ /* If this classification can not be deleted (doesn't exist)
+ * - return a ECORE_EXIST.
+ */
+ pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
+ if (!pos) {
+ ECORE_MSG
+ ("DEL command is not allowed considering current registry state");
+ return ECORE_EXISTS;
+ }
+
+ /* Check if there are pending DEL or MOVE commands for this
+ * MAC/VLAN/VLAN-MAC. Return an error if so.
+ */
+ ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
+
+ /* Check for MOVE commands */
+ query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
+ if (exeq->get(exeq, &query_elem)) {
+ PMD_DRV_LOG(ERR, "There is a pending MOVE command already");
+ return ECORE_INVAL;
+ }
+
+ /* Check for DEL commands */
+ if (exeq->get(exeq, elem)) {
+ ECORE_MSG("There is a pending DEL command already");
+ return ECORE_EXISTS;
+ }
+
+ /* Return the credit to the credit pool if not requested not to */
+ if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ o->put_credit(o))) {
+ PMD_DRV_LOG(ERR, "Failed to return a credit");
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/**
+ * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
+ *
+ * @sc: device handle
+ * @qo: quable object to check (source)
+ * @elem: element that needs to be moved
+ *
+ * Checks that the requested configuration can be moved. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
+ union ecore_qable_obj *qo,
+ struct ecore_exeq_elem *elem)
+{
+ struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
+ struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
+ struct ecore_exeq_elem query_elem;
+ struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
+ struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
+
+ /* Check if we can perform this operation based on the current registry
+ * state.
+ */
+ if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
+ ECORE_MSG
+ ("MOVE command is not allowed considering current registry state");
+ return ECORE_INVAL;
+ }
+
+ /* Check if there is an already pending DEL or MOVE command for the
+ * source object or ADD command for a destination object. Return an
+ * error if so.
+ */
+ ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
+
+ /* Check DEL on source */
+ query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
+ if (src_exeq->get(src_exeq, &query_elem)) {
+ PMD_DRV_LOG(ERR,
+ "There is a pending DEL command on the source queue already");
+ return ECORE_INVAL;
+ }
+
+ /* Check MOVE on source */
+ if (src_exeq->get(src_exeq, elem)) {
+ ECORE_MSG("There is a pending MOVE command already");
+ return ECORE_EXISTS;
+ }
+
+ /* Check ADD on destination */
+ query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
+ if (dest_exeq->get(dest_exeq, &query_elem)) {
+ PMD_DRV_LOG(ERR,
+ "There is a pending ADD command on the destination queue already");
+ return ECORE_INVAL;
+ }
+
+ /* Consume the credit if not requested not to */
+ if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ dest_o->get_credit(dest_o)))
+ return ECORE_INVAL;
+
+ if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ src_o->put_credit(src_o))) {
+ /* return the credit taken from dest... */
+ dest_o->put_credit(dest_o);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int ecore_validate_vlan_mac(struct bnx2x_softc *sc,
+ union ecore_qable_obj *qo,
+ struct ecore_exeq_elem *elem)
+{
+ switch (elem->cmd_data.vlan_mac.cmd) {
+ case ECORE_VLAN_MAC_ADD:
+ return ecore_validate_vlan_mac_add(sc, qo, elem);
+ case ECORE_VLAN_MAC_DEL:
+ return ecore_validate_vlan_mac_del(sc, qo, elem);
+ case ECORE_VLAN_MAC_MOVE:
+ return ecore_validate_vlan_mac_move(sc, qo, elem);
+ default:
+ return ECORE_INVAL;
+ }
+}
+
+static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc,
+ union ecore_qable_obj *qo,
+ struct ecore_exeq_elem *elem)
+{
+ int rc = 0;
+
+ /* If consumption wasn't required, nothing to do */
+ if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags))
+ return ECORE_SUCCESS;
+
+ switch (elem->cmd_data.vlan_mac.cmd) {
+ case ECORE_VLAN_MAC_ADD:
+ case ECORE_VLAN_MAC_MOVE:
+ rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
+ break;
+ case ECORE_VLAN_MAC_DEL:
+ rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ if (rc != TRUE)
+ return ECORE_INVAL;
+
+ return ECORE_SUCCESS;
+}
+
+/**
+ * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
+ *
+ * @sc: device handle
+ * @o: ecore_vlan_mac_obj
+ *
+ */
+static int ecore_wait_vlan_mac(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o)
+{
+ int cnt = 5000, rc;
+ struct ecore_exe_queue_obj *exeq = &o->exe_queue;
+ struct ecore_raw_obj *raw = &o->raw;
+
+ while (cnt--) {
+ /* Wait for the current command to complete */
+ rc = raw->wait_comp(sc, raw);
+ if (rc)
+ return rc;
+
+ /* Wait until there are no pending commands */
+ if (!ecore_exe_queue_empty(exeq))
+ ECORE_WAIT(sc, 1000);
+ else
+ return ECORE_SUCCESS;
+ }
+
+ return ECORE_TIMEOUT;
+}
+
+static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ unsigned long *ramrod_flags)
+{
+ int rc = ECORE_SUCCESS;
+
+ ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
+
+ ECORE_MSG("vlan_mac_execute_step - trying to take writer lock");
+ rc = __ecore_vlan_mac_h_write_trylock(sc, o);
+
+ if (rc != ECORE_SUCCESS) {
+ __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
+
+ /** Calling function should not diffrentiate between this case
+ * and the case in which there is already a pending ramrod
+ */
+ rc = ECORE_PENDING;
+ } else {
+ rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
+ }
+ ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
+
+ return rc;
+}
+
+/**
+ * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
+ *
+ * @sc: device handle
+ * @o: ecore_vlan_mac_obj
+ * @cqe:
+ * @cont: if TRUE schedule next execution chunk
+ *
+ */
+static int ecore_complete_vlan_mac(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ union event_ring_elem *cqe,
+ unsigned long *ramrod_flags)
+{
+ struct ecore_raw_obj *r = &o->raw;
+ int rc;
+
+ /* Reset pending list */
+ ecore_exe_queue_reset_pending(sc, &o->exe_queue);
+
+ /* Clear pending */
+ r->clear_pending(r);
+
+ /* If ramrod failed this is most likely a SW bug */
+ if (cqe->message.error)
+ return ECORE_INVAL;
+
+ /* Run the next bulk of pending commands if requested */
+ if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
+ rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
+ if (rc < 0)
+ return rc;
+ }
+
+ /* If there is more work to do return PENDING */
+ if (!ecore_exe_queue_empty(&o->exe_queue))
+ return ECORE_PENDING;
+
+ return ECORE_SUCCESS;
+}
+
+/**
+ * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
+ *
+ * @sc: device handle
+ * @o: ecore_qable_obj
+ * @elem: ecore_exeq_elem
+ */
+static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc,
+ union ecore_qable_obj *qo,
+ struct ecore_exeq_elem *elem)
+{
+ struct ecore_exeq_elem query, *pos;
+ struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
+ struct ecore_exe_queue_obj *exeq = &o->exe_queue;
+
+ ECORE_MEMCPY(&query, elem, sizeof(query));
+
+ switch (elem->cmd_data.vlan_mac.cmd) {
+ case ECORE_VLAN_MAC_ADD:
+ query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
+ break;
+ case ECORE_VLAN_MAC_DEL:
+ query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
+ break;
+ default:
+ /* Don't handle anything other than ADD or DEL */
+ return 0;
+ }
+
+ /* If we found the appropriate element - delete it */
+ pos = exeq->get(exeq, &query);
+ if (pos) {
+
+ /* Return the credit of the optimized command */
+ if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
+ &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
+ if ((query.cmd_data.vlan_mac.cmd ==
+ ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
+ PMD_DRV_LOG(ERR,
+ "Failed to return the credit for the optimized ADD command");
+ return ECORE_INVAL;
+ } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
+ PMD_DRV_LOG(ERR,
+ "Failed to recover the credit from the optimized DEL command");
+ return ECORE_INVAL;
+ }
+ }
+
+ ECORE_MSG("Optimizing %s command",
+ (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
+ "ADD" : "DEL");
+
+ ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
+ ecore_exe_queue_free_elem(sc, pos);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * ecore_vlan_mac_get_registry_elem - prepare a registry element
+ *
+ * @sc: device handle
+ * @o:
+ * @elem:
+ * @restore:
+ * @re:
+ *
+ * prepare a registry element according to the current command request.
+ */
+static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ struct ecore_exeq_elem *elem,
+ int restore, struct
+ ecore_vlan_mac_registry_elem
+ **re)
+{
+ enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+ struct ecore_vlan_mac_registry_elem *reg_elem;
+
+ /* Allocate a new registry element if needed. */
+ if (!restore &&
+ ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
+ reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
+ if (!reg_elem)
+ return ECORE_NOMEM;
+
+ /* Get a new CAM offset */
+ if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
+ /* This shall never happen, because we have checked the
+ * CAM availability in the 'validate'.
+ */
+ ECORE_DBG_BREAK_IF(1);
+ ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
+ return ECORE_INVAL;
+ }
+
+ ECORE_MSG("Got cam offset %d", reg_elem->cam_offset);
+
+ /* Set a VLAN-MAC data */
+ ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
+ sizeof(reg_elem->u));
+
+ /* Copy the flags (needed for DEL and RESTORE flows) */
+ reg_elem->vlan_mac_flags =
+ elem->cmd_data.vlan_mac.vlan_mac_flags;
+ } else /* DEL, RESTORE */
+ reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
+
+ *re = reg_elem;
+ return ECORE_SUCCESS;
+}
+
+/**
+ * ecore_execute_vlan_mac - execute vlan mac command
+ *
+ * @sc: device handle
+ * @qo:
+ * @exe_chunk:
+ * @ramrod_flags:
+ *
+ * go and send a ramrod!
+ */
+static int ecore_execute_vlan_mac(struct bnx2x_softc *sc,
+ union ecore_qable_obj *qo,
+ ecore_list_t * exe_chunk,
+ unsigned long *ramrod_flags)
+{
+ struct ecore_exeq_elem *elem;
+ struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
+ struct ecore_raw_obj *r = &o->raw;
+ int rc, idx = 0;
+ int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
+ int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
+ struct ecore_vlan_mac_registry_elem *reg_elem;
+ enum ecore_vlan_mac_cmd cmd;
+
+ /* If DRIVER_ONLY execution is requested, cleanup a registry
+ * and exit. Otherwise send a ramrod to FW.
+ */
+ if (!drv_only) {
+
+ /* Set pending */
+ r->set_pending(r);
+
+ /* Fill the ramrod data */
+ ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
+ struct ecore_exeq_elem) {
+ cmd = elem->cmd_data.vlan_mac.cmd;
+ /* We will add to the target object in MOVE command, so
+ * change the object for a CAM search.
+ */
+ if (cmd == ECORE_VLAN_MAC_MOVE)
+ cam_obj = elem->cmd_data.vlan_mac.target_obj;
+ else
+ cam_obj = o;
+
+ rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
+ elem, restore,
+ &reg_elem);
+ if (rc)
+ goto error_exit;
+
+ ECORE_DBG_BREAK_IF(!reg_elem);
+
+ /* Push a new entry into the registry */
+ if (!restore &&
+ ((cmd == ECORE_VLAN_MAC_ADD) ||
+ (cmd == ECORE_VLAN_MAC_MOVE)))
+ ECORE_LIST_PUSH_HEAD(&reg_elem->link,
+ &cam_obj->head);
+
+ /* Configure a single command in a ramrod data buffer */
+ o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset);
+
+ /* MOVE command consumes 2 entries in the ramrod data */
+ if (cmd == ECORE_VLAN_MAC_MOVE)
+ idx += 2;
+ else
+ idx++;
+ }
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside ecore_sp_post()).
+ */
+
+ rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
+ r->rdata_mapping, ETH_CONNECTION_TYPE);
+ if (rc)
+ goto error_exit;
+ }
+
+ /* Now, when we are done with the ramrod - clean up the registry */
+ ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
+ cmd = elem->cmd_data.vlan_mac.cmd;
+ if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) {
+ reg_elem = o->check_del(sc, o,
+ &elem->cmd_data.vlan_mac.u);
+
+ ECORE_DBG_BREAK_IF(!reg_elem);
+
+ o->put_cam_offset(o, reg_elem->cam_offset);
+ ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
+ ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
+ }
+ }
+
+ if (!drv_only)
+ return ECORE_PENDING;
+ else
+ return ECORE_SUCCESS;
+
+error_exit:
+ r->clear_pending(r);
+
+ /* Cleanup a registry in case of a failure */
+ ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
+ cmd = elem->cmd_data.vlan_mac.cmd;
+
+ if (cmd == ECORE_VLAN_MAC_MOVE)
+ cam_obj = elem->cmd_data.vlan_mac.target_obj;
+ else
+ cam_obj = o;
+
+ /* Delete all newly added above entries */
+ if (!restore &&
+ ((cmd == ECORE_VLAN_MAC_ADD) ||
+ (cmd == ECORE_VLAN_MAC_MOVE))) {
+ reg_elem = o->check_del(sc, cam_obj,
+ &elem->cmd_data.vlan_mac.u);
+ if (reg_elem) {
+ ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
+ &cam_obj->head);
+ ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct
+ ecore_vlan_mac_ramrod_params *p)
+{
+ struct ecore_exeq_elem *elem;
+ struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
+ int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
+
+ /* Allocate the execution queue element */
+ elem = ecore_exe_queue_alloc_elem(sc);
+ if (!elem)
+ return ECORE_NOMEM;
+
+ /* Set the command 'length' */
+ switch (p->user_req.cmd) {
+ case ECORE_VLAN_MAC_MOVE:
+ elem->cmd_len = 2;
+ break;
+ default:
+ elem->cmd_len = 1;
+ }
+
+ /* Fill the object specific info */
+ ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req,
+ sizeof(p->user_req));
+
+ /* Try to add a new command to the pending list */
+ return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
+}
+
+/**
+ * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
+ *
+ * @sc: device handle
+ * @p:
+ *
+ */
+int ecore_config_vlan_mac(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_ramrod_params *p)
+{
+ int rc = ECORE_SUCCESS;
+ struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
+ unsigned long *ramrod_flags = &p->ramrod_flags;
+ int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
+ struct ecore_raw_obj *raw = &o->raw;
+
+ /*
+ * Add new elements to the execution list for commands that require it.
+ */
+ if (!cont) {
+ rc = ecore_vlan_mac_push_new_cmd(sc, p);
+ if (rc)
+ return rc;
+ }
+
+ /* If nothing will be executed further in this iteration we want to
+ * return PENDING if there are pending commands
+ */
+ if (!ecore_exe_queue_empty(&o->exe_queue))
+ rc = ECORE_PENDING;
+
+ if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
+ ECORE_MSG
+ ("RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
+ raw->clear_pending(raw);
+ }
+
+ /* Execute commands if required */
+ if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
+ ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
+ rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
+ &p->ramrod_flags);
+ if (rc < 0)
+ return rc;
+ }
+
+ /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
+ * then user want to wait until the last command is done.
+ */
+ if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+ /* Wait maximum for the current exe_queue length iterations plus
+ * one (for the current pending command).
+ */
+ int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
+
+ while (!ecore_exe_queue_empty(&o->exe_queue) &&
+ max_iterations--) {
+
+ /* Wait for the current command to complete */
+ rc = raw->wait_comp(sc, raw);
+ if (rc)
+ return rc;
+
+ /* Make a next step */
+ rc = __ecore_vlan_mac_execute_step(sc,
+ p->vlan_mac_obj,
+ &p->ramrod_flags);
+ if (rc < 0)
+ return rc;
+ }
+
+ return ECORE_SUCCESS;
+ }
+
+ return rc;
+}
+
+/**
+ * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
+ *
+ * @sc: device handle
+ * @o:
+ * @vlan_mac_flags:
+ * @ramrod_flags: execution flags to be used for this deletion
+ *
+ * if the last operation has completed successfully and there are no
+ * more elements left, positive value if the last operation has completed
+ * successfully and there are more previously configured elements, negative
+ * value is current operation has failed.
+ */
+static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ unsigned long *vlan_mac_flags,
+ unsigned long *ramrod_flags)
+{
+ struct ecore_vlan_mac_registry_elem *pos = NULL;
+ int rc = 0, read_lock;
+ struct ecore_vlan_mac_ramrod_params p;
+ struct ecore_exe_queue_obj *exeq = &o->exe_queue;
+ struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
+
+ /* Clear pending commands first */
+
+ ECORE_SPIN_LOCK_BH(&exeq->lock);
+
+ ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
+ &exeq->exe_queue, link,
+ struct ecore_exeq_elem) {
+ if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
+ *vlan_mac_flags) {
+ rc = exeq->remove(sc, exeq->owner, exeq_pos);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to remove command");
+ ECORE_SPIN_UNLOCK_BH(&exeq->lock);
+ return rc;
+ }
+ ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
+ &exeq->exe_queue);
+ ecore_exe_queue_free_elem(sc, exeq_pos);
+ }
+ }
+
+ ECORE_SPIN_UNLOCK_BH(&exeq->lock);
+
+ /* Prepare a command request */
+ ECORE_MEMSET(&p, 0, sizeof(p));
+ p.vlan_mac_obj = o;
+ p.ramrod_flags = *ramrod_flags;
+ p.user_req.cmd = ECORE_VLAN_MAC_DEL;
+
+ /* Add all but the last VLAN-MAC to the execution queue without actually
+ * execution anything.
+ */
+ ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
+ ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
+ ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
+
+ ECORE_MSG("vlan_mac_del_all -- taking vlan_mac_lock (reader)");
+ read_lock = ecore_vlan_mac_h_read_lock(sc, o);
+ if (read_lock != ECORE_SUCCESS)
+ return read_lock;
+
+ ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
+ struct ecore_vlan_mac_registry_elem) {
+ if (pos->vlan_mac_flags == *vlan_mac_flags) {
+ p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
+ ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
+ rc = ecore_config_vlan_mac(sc, &p);
+ if (rc < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to add a new DEL command");
+ ecore_vlan_mac_h_read_unlock(sc, o);
+ return rc;
+ }
+ }
+ }
+
+ ECORE_MSG("vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
+ ecore_vlan_mac_h_read_unlock(sc, o);
+
+ p.ramrod_flags = *ramrod_flags;
+ ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
+
+ return ecore_config_vlan_mac(sc, &p);
+}
+
+static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
+ uint32_t cid, uint8_t func_id,
+ void *rdata,
+ ecore_dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, ecore_obj_type type)
+{
+ raw->func_id = func_id;
+ raw->cid = cid;
+ raw->cl_id = cl_id;
+ raw->rdata = rdata;
+ raw->rdata_mapping = rdata_mapping;
+ raw->state = state;
+ raw->pstate = pstate;
+ raw->obj_type = type;
+ raw->check_pending = ecore_raw_check_pending;
+ raw->clear_pending = ecore_raw_clear_pending;
+ raw->set_pending = ecore_raw_set_pending;
+ raw->wait_comp = ecore_raw_wait;
+}
+
+static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
+ uint8_t cl_id, uint32_t cid,
+ uint8_t func_id, void *rdata,
+ ecore_dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ ecore_obj_type type,
+ struct ecore_credit_pool_obj
+ *macs_pool, struct ecore_credit_pool_obj
+ *vlans_pool)
+{
+ ECORE_LIST_INIT(&o->head);
+ o->head_reader = 0;
+ o->head_exe_request = FALSE;
+ o->saved_ramrod_flags = 0;
+
+ o->macs_pool = macs_pool;
+ o->vlans_pool = vlans_pool;
+
+ o->delete_all = ecore_vlan_mac_del_all;
+ o->restore = ecore_vlan_mac_restore;
+ o->complete = ecore_complete_vlan_mac;
+ o->wait = ecore_wait_vlan_mac;
+
+ ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
+ state, pstate, type);
+}
+
+void ecore_init_mac_obj(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *mac_obj,
+ uint8_t cl_id, uint32_t cid, uint8_t func_id,
+ void *rdata, ecore_dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, ecore_obj_type type,
+ struct ecore_credit_pool_obj *macs_pool)
+{
+ union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
+
+ ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type,
+ macs_pool, NULL);
+
+ /* CAM credit pool handling */
+ mac_obj->get_credit = ecore_get_credit_mac;
+ mac_obj->put_credit = ecore_put_credit_mac;
+ mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
+ mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
+
+ if (CHIP_IS_E1x(sc)) {
+ mac_obj->set_one_rule = ecore_set_one_mac_e1x;
+ mac_obj->check_del = ecore_check_mac_del;
+ mac_obj->check_add = ecore_check_mac_add;
+ mac_obj->check_move = ecore_check_move_always_err;
+ mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
+
+ /* Exe Queue */
+ ecore_exe_queue_init(sc,
+ &mac_obj->exe_queue, 1, qable_obj,
+ ecore_validate_vlan_mac,
+ ecore_remove_vlan_mac,
+ ecore_optimize_vlan_mac,
+ ecore_execute_vlan_mac,
+ ecore_exeq_get_mac);
+ } else {
+ mac_obj->set_one_rule = ecore_set_one_mac_e2;
+ mac_obj->check_del = ecore_check_mac_del;
+ mac_obj->check_add = ecore_check_mac_add;
+ mac_obj->check_move = ecore_check_move;
+ mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ mac_obj->get_n_elements = ecore_get_n_elements;
+
+ /* Exe Queue */
+ ecore_exe_queue_init(sc,
+ &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
+ qable_obj, ecore_validate_vlan_mac,
+ ecore_remove_vlan_mac,
+ ecore_optimize_vlan_mac,
+ ecore_execute_vlan_mac,
+ ecore_exeq_get_mac);
+ }
+}
+
+/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct
+ tstorm_eth_mac_filter_config
+ *mac_filters, uint16_t pf_id)
+{
+ size_t size = sizeof(struct tstorm_eth_mac_filter_config);
+
+ uint32_t addr = BAR_TSTRORM_INTMEM +
+ TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
+
+ ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters);
+}
+
+static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc,
+ struct ecore_rx_mode_ramrod_params *p)
+{
+ /* update the sc MAC filter structure */
+ uint32_t mask = (1 << p->cl_id);
+
+ struct tstorm_eth_mac_filter_config *mac_filters =
+ (struct tstorm_eth_mac_filter_config *)p->rdata;
+
+ /* initial setting is drop-all */
+ uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
+ uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
+ uint8_t unmatched_unicast = 0;
+
+ /* In e1x there we only take into account rx accept flag since tx switching
+ * isn't enabled. */
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
+ /* accept matched ucast */
+ drop_all_ucast = 0;
+
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
+ /* accept matched mcast */
+ drop_all_mcast = 0;
+
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
+ /* accept all mcast */
+ drop_all_ucast = 0;
+ accp_all_ucast = 1;
+ }
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
+ /* accept all mcast */
+ drop_all_mcast = 0;
+ accp_all_mcast = 1;
+ }
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
+ /* accept (all) bcast */
+ accp_all_bcast = 1;
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
+ /* accept unmatched unicasts */
+ unmatched_unicast = 1;
+
+ mac_filters->ucast_drop_all = drop_all_ucast ?
+ mac_filters->ucast_drop_all | mask :
+ mac_filters->ucast_drop_all & ~mask;
+
+ mac_filters->mcast_drop_all = drop_all_mcast ?
+ mac_filters->mcast_drop_all | mask :
+ mac_filters->mcast_drop_all & ~mask;
+
+ mac_filters->ucast_accept_all = accp_all_ucast ?
+ mac_filters->ucast_accept_all | mask :
+ mac_filters->ucast_accept_all & ~mask;
+
+ mac_filters->mcast_accept_all = accp_all_mcast ?
+ mac_filters->mcast_accept_all | mask :
+ mac_filters->mcast_accept_all & ~mask;
+
+ mac_filters->bcast_accept_all = accp_all_bcast ?
+ mac_filters->bcast_accept_all | mask :
+ mac_filters->bcast_accept_all & ~mask;
+
+ mac_filters->unmatched_unicast = unmatched_unicast ?
+ mac_filters->unmatched_unicast | mask :
+ mac_filters->unmatched_unicast & ~mask;
+
+ ECORE_MSG("drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
+ "accp_mcast 0x%xaccp_bcast 0x%x",
+ mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
+ mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
+ mac_filters->bcast_accept_all);
+
+ /* write the MAC filter structure */
+ __storm_memset_mac_filters(sc, mac_filters, p->func_id);
+
+ /* The operation is completed */
+ ECORE_CLEAR_BIT(p->state, p->pstate);
+ ECORE_SMP_MB_AFTER_CLEAR_BIT();
+
+ return ECORE_SUCCESS;
+}
+
+/* Setup ramrod data */
+static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header
+ *hdr, uint8_t rule_cnt)
+{
+ hdr->echo = ECORE_CPU_TO_LE32(cid);
+ hdr->rule_cnt = rule_cnt;
+}
+
+static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd
+ *cmd, int clear_accept_all)
+{
+ uint16_t state;
+
+ /* start with 'drop-all' */
+ state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
+ ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+ }
+
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
+ state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+ }
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
+ state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+ }
+ if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
+ state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+
+ /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
+ if (clear_accept_all) {
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+ }
+
+ cmd->state = ECORE_CPU_TO_LE16(state);
+}
+
+static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc,
+ struct ecore_rx_mode_ramrod_params *p)
+{
+ struct eth_filter_rules_ramrod_data *data = p->rdata;
+ int rc;
+ uint8_t rule_idx = 0;
+
+ /* Reset the ramrod data buffer */
+ ECORE_MEMSET(data, 0, sizeof(*data));
+
+ /* Setup ramrod data */
+
+ /* Tx (internal switching) */
+ if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = p->cl_id;
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_TX_CMD;
+
+ ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
+ &(data->rules[rule_idx++]),
+ FALSE);
+ }
+
+ /* Rx */
+ if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = p->cl_id;
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_RX_CMD;
+
+ ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
+ &(data->rules[rule_idx++]),
+ FALSE);
+ }
+
+ /* If FCoE Queue configuration has been requested configure the Rx and
+ * internal switching modes for this queue in separate rules.
+ *
+ * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
+ * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
+ */
+ if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
+ /* Tx (internal switching) */
+ if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_TX_CMD;
+
+ ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
+ &(data->rules
+ [rule_idx++]), TRUE);
+ }
+
+ /* Rx */
+ if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_RX_CMD;
+
+ ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
+ &(data->rules
+ [rule_idx++]), TRUE);
+ }
+ }
+
+ /* Set the ramrod header (most importantly - number of rules to
+ * configure).
+ */
+ ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
+
+ ECORE_MSG
+ ("About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
+ data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags);
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside ecore_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = ecore_sp_post(sc,
+ RAMROD_CMD_ID_ETH_FILTER_RULES,
+ p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE);
+ if (rc)
+ return rc;
+
+ /* Ramrod completion is pending */
+ return ECORE_PENDING;
+}
+
+static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc,
+ struct ecore_rx_mode_ramrod_params *p)
+{
+ return ecore_state_wait(sc, p->state, p->pstate);
+}
+
+static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc,
+ __rte_unused struct
+ ecore_rx_mode_ramrod_params *p)
+{
+ /* Do nothing */
+ return ECORE_SUCCESS;
+}
+
+int ecore_config_rx_mode(struct bnx2x_softc *sc,
+ struct ecore_rx_mode_ramrod_params *p)
+{
+ int rc;
+
+ /* Configure the new classification in the chip */
+ if (p->rx_mode_obj->config_rx_mode) {
+ rc = p->rx_mode_obj->config_rx_mode(sc, p);
+ if (rc < 0)
+ return rc;
+
+ /* Wait for a ramrod completion if was requested */
+ if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+ rc = p->rx_mode_obj->wait_comp(sc, p);
+ if (rc)
+ return rc;
+ }
+ } else {
+ ECORE_MSG("ERROR: config_rx_mode is NULL");
+ return -1;
+ }
+
+ return rc;
+}
+
+void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o)
+{
+ if (CHIP_IS_E1x(sc)) {
+ o->wait_comp = ecore_empty_rx_mode_wait;
+ o->config_rx_mode = ecore_set_rx_mode_e1x;
+ } else {
+ o->wait_comp = ecore_wait_rx_mode_comp_e2;
+ o->config_rx_mode = ecore_set_rx_mode_e2;
+ }
+}
+
+/********************* Multicast verbs: SET, CLEAR ****************************/
+static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac)
+{
+ return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
+}
+
+struct ecore_mcast_mac_elem {
+ ecore_list_entry_t link;
+ uint8_t mac[ETH_ALEN];
+ uint8_t pad[2]; /* For a natural alignment of the following buffer */
+};
+
+struct ecore_pending_mcast_cmd {
+ ecore_list_entry_t link;
+ int type; /* ECORE_MCAST_CMD_X */
+ union {
+ ecore_list_t macs_head;
+ uint32_t macs_num; /* Needed for DEL command */
+ int next_bin; /* Needed for RESTORE flow with aprox match */
+ } data;
+
+ int done; /* set to TRUE, when the command has been handled,
+ * practically used in 57712 handling only, where one pending
+ * command may be handled in a few operations. As long as for
+ * other chips every operation handling is completed in a
+ * single ramrod, there is no need to utilize this field.
+ */
+};
+
+static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o)
+{
+ if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
+ o->raw.wait_comp(sc, &o->raw))
+ return ECORE_TIMEOUT;
+
+ return ECORE_SUCCESS;
+}
+
+static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_mcast_obj *o,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd)
+{
+ int total_sz;
+ struct ecore_pending_mcast_cmd *new_cmd;
+ struct ecore_mcast_mac_elem *cur_mac = NULL;
+ struct ecore_mcast_list_elem *pos;
+ int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
+ p->mcast_list_len : 0);
+
+ /* If the command is empty ("handle pending commands only"), break */
+ if (!p->mcast_list_len)
+ return ECORE_SUCCESS;
+
+ total_sz = sizeof(*new_cmd) +
+ macs_list_len * sizeof(struct ecore_mcast_mac_elem);
+
+ /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
+ new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
+
+ if (!new_cmd)
+ return ECORE_NOMEM;
+
+ ECORE_MSG("About to enqueue a new %d command. macs_list_len=%d",
+ cmd, macs_list_len);
+
+ ECORE_LIST_INIT(&new_cmd->data.macs_head);
+
+ new_cmd->type = cmd;
+ new_cmd->done = FALSE;
+
+ switch (cmd) {
+ case ECORE_MCAST_CMD_ADD:
+ cur_mac = (struct ecore_mcast_mac_elem *)
+ ((uint8_t *) new_cmd + sizeof(*new_cmd));
+
+ /* Push the MACs of the current command into the pending command
+ * MACs list: FIFO
+ */
+ ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
+ struct ecore_mcast_list_elem) {
+ ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
+ ECORE_LIST_PUSH_TAIL(&cur_mac->link,
+ &new_cmd->data.macs_head);
+ cur_mac++;
+ }
+
+ break;
+
+ case ECORE_MCAST_CMD_DEL:
+ new_cmd->data.macs_num = p->mcast_list_len;
+ break;
+
+ case ECORE_MCAST_CMD_RESTORE:
+ new_cmd->data.next_bin = 0;
+ break;
+
+ default:
+ ECORE_FREE(sc, new_cmd, total_sz);
+ PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ return ECORE_INVAL;
+ }
+
+ /* Push the new pending command to the tail of the pending list: FIFO */
+ ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
+
+ o->set_sched(o);
+
+ return ECORE_PENDING;
+}
+
+/**
+ * ecore_mcast_get_next_bin - get the next set bin (index)
+ *
+ * @o:
+ * @last: index to start looking from (including)
+ *
+ * returns the next found (set) bin or a negative value if none is found.
+ */
+static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
+{
+ int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
+
+ for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
+ if (o->registry.aprox_match.vec[i])
+ for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
+ int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
+ if (BIT_VEC64_TEST_BIT
+ (o->registry.aprox_match.vec, cur_bit)) {
+ return cur_bit;
+ }
+ }
+ inner_start = 0;
+ }
+
+ /* None found */
+ return -1;
+}
+
+/**
+ * ecore_mcast_clear_first_bin - find the first set bin and clear it
+ *
+ * @o:
+ *
+ * returns the index of the found bin or -1 if none is found
+ */
+static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
+{
+ int cur_bit = ecore_mcast_get_next_bin(o, 0);
+
+ if (cur_bit >= 0)
+ BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
+
+ return cur_bit;
+}
+
+static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
+{
+ struct ecore_raw_obj *raw = &o->raw;
+ uint8_t rx_tx_flag = 0;
+
+ if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
+ (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
+
+ if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
+ (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
+
+ return rx_tx_flag;
+}
+
+static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_mcast_obj *o, int idx,
+ union ecore_mcast_config_data *cfg_data,
+ enum ecore_mcast_cmd cmd)
+{
+ struct ecore_raw_obj *r = &o->raw;
+ struct eth_multicast_rules_ramrod_data *data =
+ (struct eth_multicast_rules_ramrod_data *)(r->rdata);
+ uint8_t func_id = r->func_id;
+ uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
+ int bin;
+
+ if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
+ rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
+
+ data->rules[idx].cmd_general_data |= rx_tx_add_flag;
+
+ /* Get a bin and update a bins' vector */
+ switch (cmd) {
+ case ECORE_MCAST_CMD_ADD:
+ bin = ecore_mcast_bin_from_mac(cfg_data->mac);
+ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
+ break;
+
+ case ECORE_MCAST_CMD_DEL:
+ /* If there were no more bins to clear
+ * (ecore_mcast_clear_first_bin() returns -1) then we would
+ * clear any (0xff) bin.
+ * See ecore_mcast_validate_e2() for explanation when it may
+ * happen.
+ */
+ bin = ecore_mcast_clear_first_bin(o);
+ break;
+
+ case ECORE_MCAST_CMD_RESTORE:
+ bin = cfg_data->bin;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ return;
+ }
+
+ ECORE_MSG("%s bin %d",
+ ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
+ "Setting" : "Clearing"), bin);
+
+ data->rules[idx].bin_id = (uint8_t) bin;
+ data->rules[idx].func_id = func_id;
+ data->rules[idx].engine_id = o->engine_id;
+}
+
+/**
+ * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
+ *
+ * @sc: device handle
+ * @o:
+ * @start_bin: index in the registry to start from (including)
+ * @rdata_idx: index in the ramrod data to start from
+ *
+ * returns last handled bin index or -1 if all bins have been handled
+ */
+static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc,
+ struct ecore_mcast_obj *o,
+ int start_bin, int *rdata_idx)
+{
+ int cur_bin, cnt = *rdata_idx;
+ union ecore_mcast_config_data cfg_data = { NULL };
+
+ /* go through the registry and configure the bins from it */
+ for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
+ cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
+
+ cfg_data.bin = (uint8_t) cur_bin;
+ o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE);
+
+ cnt++;
+
+ ECORE_MSG("About to configure a bin %d", cur_bin);
+
+ /* Break if we reached the maximum number
+ * of rules.
+ */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ *rdata_idx = cnt;
+
+ return cur_bin;
+}
+
+static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc,
+ struct ecore_mcast_obj *o,
+ struct ecore_pending_mcast_cmd
+ *cmd_pos, int *line_idx)
+{
+ struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
+ int cnt = *line_idx;
+ union ecore_mcast_config_data cfg_data = { NULL };
+
+ ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
+ &cmd_pos->data.macs_head, link,
+ struct ecore_mcast_mac_elem) {
+
+ cfg_data.mac = &pmac_pos->mac[0];
+ o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
+
+ cnt++;
+
+ ECORE_MSG
+ ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
+ pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2],
+ pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
+
+ ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
+ &cmd_pos->data.macs_head);
+
+ /* Break if we reached the maximum number
+ * of rules.
+ */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ *line_idx = cnt;
+
+ /* if no more MACs to configure - we are done */
+ if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
+ cmd_pos->done = TRUE;
+}
+
+static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc,
+ struct ecore_mcast_obj *o,
+ struct ecore_pending_mcast_cmd
+ *cmd_pos, int *line_idx)
+{
+ int cnt = *line_idx;
+
+ while (cmd_pos->data.macs_num) {
+ o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
+
+ cnt++;
+
+ cmd_pos->data.macs_num--;
+
+ ECORE_MSG("Deleting MAC. %d left,cnt is %d",
+ cmd_pos->data.macs_num, cnt);
+
+ /* Break if we reached the maximum
+ * number of rules.
+ */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ *line_idx = cnt;
+
+ /* If we cleared all bins - we are done */
+ if (!cmd_pos->data.macs_num)
+ cmd_pos->done = TRUE;
+}
+
+static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc,
+ struct ecore_mcast_obj *o, struct
+ ecore_pending_mcast_cmd
+ *cmd_pos, int *line_idx)
+{
+ cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
+ line_idx);
+
+ if (cmd_pos->data.next_bin < 0)
+ /* If o->set_restore returned -1 we are done */
+ cmd_pos->done = TRUE;
+ else
+ /* Start from the next bin next time */
+ cmd_pos->data.next_bin++;
+}
+
+static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct
+ ecore_mcast_ramrod_params
+ *p)
+{
+ struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
+ int cnt = 0;
+ struct ecore_mcast_obj *o = p->mcast_obj;
+
+ ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
+ &o->pending_cmds_head, link,
+ struct ecore_pending_mcast_cmd) {
+ switch (cmd_pos->type) {
+ case ECORE_MCAST_CMD_ADD:
+ ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
+ break;
+
+ case ECORE_MCAST_CMD_DEL:
+ ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
+ break;
+
+ case ECORE_MCAST_CMD_RESTORE:
+ ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
+ &cnt);
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Unknown command: %d", cmd_pos->type);
+ return ECORE_INVAL;
+ }
+
+ /* If the command has been completed - remove it from the list
+ * and free the memory
+ */
+ if (cmd_pos->done) {
+ ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
+ &o->pending_cmds_head);
+ ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
+ }
+
+ /* Break if we reached the maximum number of rules */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ return cnt;
+}
+
+static void ecore_mcast_hdl_add(struct bnx2x_softc *sc,
+ struct ecore_mcast_obj *o,
+ struct ecore_mcast_ramrod_params *p,
+ int *line_idx)
+{
+ struct ecore_mcast_list_elem *mlist_pos;
+ union ecore_mcast_config_data cfg_data = { NULL };
+ int cnt = *line_idx;
+
+ ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
+ struct ecore_mcast_list_elem) {
+ cfg_data.mac = mlist_pos->mac;
+ o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
+
+ cnt++;
+
+ ECORE_MSG
+ ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
+ mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
+ mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
+ }
+
+ *line_idx = cnt;
+}
+
+static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
+ struct ecore_mcast_obj *o,
+ struct ecore_mcast_ramrod_params *p,
+ int *line_idx)
+{
+ int cnt = *line_idx, i;
+
+ for (i = 0; i < p->mcast_list_len; i++) {
+ o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
+
+ cnt++;
+
+ ECORE_MSG("Deleting MAC. %d left", p->mcast_list_len - i - 1);
+ }
+
+ *line_idx = cnt;
+}
+
+/**
+ * ecore_mcast_handle_current_cmd -
+ *
+ * @sc: device handle
+ * @p:
+ * @cmd:
+ * @start_cnt: first line in the ramrod data that may be used
+ *
+ * This function is called if there is enough place for the current command in
+ * the ramrod data.
+ * Returns number of lines filled in the ramrod data in total.
+ */
+static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
+ ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd,
+ int start_cnt)
+{
+ struct ecore_mcast_obj *o = p->mcast_obj;
+ int cnt = start_cnt;
+
+ ECORE_MSG("p->mcast_list_len=%d", p->mcast_list_len);
+
+ switch (cmd) {
+ case ECORE_MCAST_CMD_ADD:
+ ecore_mcast_hdl_add(sc, o, p, &cnt);
+ break;
+
+ case ECORE_MCAST_CMD_DEL:
+ ecore_mcast_hdl_del(sc, o, p, &cnt);
+ break;
+
+ case ECORE_MCAST_CMD_RESTORE:
+ o->hdl_restore(sc, o, 0, &cnt);
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ return ECORE_INVAL;
+ }
+
+ /* The current command has been handled */
+ p->mcast_list_len = 0;
+
+ return cnt;
+}
+
+static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd)
+{
+ struct ecore_mcast_obj *o = p->mcast_obj;
+ int reg_sz = o->get_registry_size(o);
+
+ switch (cmd) {
+ /* DEL command deletes all currently configured MACs */
+ case ECORE_MCAST_CMD_DEL:
+ o->set_registry_size(o, 0);
+ /* fall-through */
+
+ /* RESTORE command will restore the entire multicast configuration */
+ case ECORE_MCAST_CMD_RESTORE:
+ /* Here we set the approximate amount of work to do, which in
+ * fact may be only less as some MACs in postponed ADD
+ * command(s) scheduled before this command may fall into
+ * the same bin and the actual number of bins set in the
+ * registry would be less than we estimated here. See
+ * ecore_mcast_set_one_rule_e2() for further details.
+ */
+ p->mcast_list_len = reg_sz;
+ break;
+
+ case ECORE_MCAST_CMD_ADD:
+ case ECORE_MCAST_CMD_CONT:
+ /* Here we assume that all new MACs will fall into new bins.
+ * However we will correct the real registry size after we
+ * handle all pending commands.
+ */
+ o->set_registry_size(o, reg_sz + p->mcast_list_len);
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ return ECORE_INVAL;
+ }
+
+ /* Increase the total number of MACs pending to be configured */
+ o->total_pending_num += p->mcast_list_len;
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ int old_num_bins)
+{
+ struct ecore_mcast_obj *o = p->mcast_obj;
+
+ o->set_registry_size(o, old_num_bins);
+ o->total_pending_num -= p->mcast_list_len;
+}
+
+/**
+ * ecore_mcast_set_rdata_hdr_e2 - sets a header values
+ *
+ * @sc: device handle
+ * @p:
+ * @len: number of rules to handle
+ */
+static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc
+ *sc, struct ecore_mcast_ramrod_params
+ *p, uint8_t len)
+{
+ struct ecore_raw_obj *r = &p->mcast_obj->raw;
+ struct eth_multicast_rules_ramrod_data *data =
+ (struct eth_multicast_rules_ramrod_data *)(r->rdata);
+
+ data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
+ (ECORE_FILTER_MCAST_PENDING <<
+ ECORE_SWCID_SHIFT));
+ data->header.rule_cnt = len;
+}
+
+/**
+ * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
+ *
+ * @sc: device handle
+ * @o:
+ *
+ * Recalculate the actual number of set bins in the registry using Brian
+ * Kernighan's algorithm: it's execution complexity is as a number of set bins.
+ */
+static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o)
+{
+ int i, cnt = 0;
+ uint64_t elem;
+
+ for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
+ elem = o->registry.aprox_match.vec[i];
+ for (; elem; cnt++)
+ elem &= elem - 1;
+ }
+
+ o->set_registry_size(o, cnt);
+
+ return ECORE_SUCCESS;
+}
+
+static int ecore_mcast_setup_e2(struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd)
+{
+ struct ecore_raw_obj *raw = &p->mcast_obj->raw;
+ struct ecore_mcast_obj *o = p->mcast_obj;
+ struct eth_multicast_rules_ramrod_data *data =
+ (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
+ int cnt = 0, rc;
+
+ /* Reset the ramrod data buffer */
+ ECORE_MEMSET(data, 0, sizeof(*data));
+
+ cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
+
+ /* If there are no more pending commands - clear SCHEDULED state */
+ if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
+ o->clear_sched(o);
+
+ /* The below may be TRUE if there was enough room in ramrod
+ * data for all pending commands and for the current
+ * command. Otherwise the current command would have been added
+ * to the pending commands and p->mcast_list_len would have been
+ * zeroed.
+ */
+ if (p->mcast_list_len > 0)
+ cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
+
+ /* We've pulled out some MACs - update the total number of
+ * outstanding.
+ */
+ o->total_pending_num -= cnt;
+
+ /* send a ramrod */
+ ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
+ ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
+
+ ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt);
+
+ /* Update a registry size if there are no more pending operations.
+ *
+ * We don't want to change the value of the registry size if there are
+ * pending operations because we want it to always be equal to the
+ * exact or the approximate number (see ecore_mcast_validate_e2()) of
+ * set bins after the last requested operation in order to properly
+ * evaluate the size of the next DEL/RESTORE operation.
+ *
+ * Note that we update the registry itself during command(s) handling
+ * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
+ * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
+ * with a limited amount of update commands (per MAC/bin) and we don't
+ * know in this scope what the actual state of bins configuration is
+ * going to be after this ramrod.
+ */
+ if (!o->total_pending_num)
+ ecore_mcast_refresh_registry_e2(o);
+
+ /* If CLEAR_ONLY was requested - don't send a ramrod and clear
+ * RAMROD_PENDING status immediately.
+ */
+ if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ raw->clear_pending(raw);
+ return ECORE_SUCCESS;
+ } else {
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside ecore_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = ecore_sp_post(sc,
+ RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+ raw->cid,
+ raw->rdata_mapping, ETH_CONNECTION_TYPE);
+ if (rc)
+ return rc;
+
+ /* Ramrod completion is pending */
+ return ECORE_PENDING;
+ }
+}
+
+static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd)
+{
+ /* Mark, that there is a work to do */
+ if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
+ p->mcast_list_len = 1;
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc,
+ __rte_unused struct ecore_mcast_ramrod_params
+ *p, __rte_unused int old_num_bins)
+{
+ /* Do nothing */
+}
+
+#define ECORE_57711_SET_MC_FILTER(filter, bit) \
+do { \
+ (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
+} while (0)
+
+static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_mcast_obj *o,
+ struct ecore_mcast_ramrod_params *p,
+ uint32_t * mc_filter)
+{
+ struct ecore_mcast_list_elem *mlist_pos;
+ int bit;
+
+ ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
+ struct ecore_mcast_list_elem) {
+ bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
+ ECORE_57711_SET_MC_FILTER(mc_filter, bit);
+
+ ECORE_MSG
+ ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
+ mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
+ mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5],
+ bit);
+
+ /* bookkeeping... */
+ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit);
+ }
+}
+
+static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc
+ __rte_unused,
+ struct ecore_mcast_obj *o,
+ uint32_t * mc_filter)
+{
+ int bit;
+
+ for (bit = ecore_mcast_get_next_bin(o, 0);
+ bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) {
+ ECORE_57711_SET_MC_FILTER(mc_filter, bit);
+ ECORE_MSG("About to set bin %d", bit);
+ }
+}
+
+/* On 57711 we write the multicast MACs' approximate match
+ * table by directly into the TSTORM's internal RAM. So we don't
+ * really need to handle any tricks to make it work.
+ */
+static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd)
+{
+ int i;
+ struct ecore_mcast_obj *o = p->mcast_obj;
+ struct ecore_raw_obj *r = &o->raw;
+
+ /* If CLEAR_ONLY has been requested - clear the registry
+ * and clear a pending bit.
+ */
+ if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 };
+
+ /* Set the multicast filter bits before writing it into
+ * the internal memory.
+ */
+ switch (cmd) {
+ case ECORE_MCAST_CMD_ADD:
+ ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
+ break;
+
+ case ECORE_MCAST_CMD_DEL:
+ ECORE_MSG("Invalidating multicast MACs configuration");
+
+ /* clear the registry */
+ ECORE_MEMSET(o->registry.aprox_match.vec, 0,
+ sizeof(o->registry.aprox_match.vec));
+ break;
+
+ case ECORE_MCAST_CMD_RESTORE:
+ ecore_mcast_hdl_restore_e1h(sc, o, mc_filter);
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ return ECORE_INVAL;
+ }
+
+ /* Set the mcast filter in the internal memory */
+ for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
+ REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
+ } else
+ /* clear the registry */
+ ECORE_MEMSET(o->registry.aprox_match.vec, 0,
+ sizeof(o->registry.aprox_match.vec));
+
+ /* We are done */
+ r->clear_pending(r);
+
+ return ECORE_SUCCESS;
+}
+
+static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
+{
+ return o->registry.aprox_match.num_bins_set;
+}
+
+static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
+ int n)
+{
+ o->registry.aprox_match.num_bins_set = n;
+}
+
+int ecore_config_mcast(struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd)
+{
+ struct ecore_mcast_obj *o = p->mcast_obj;
+ struct ecore_raw_obj *r = &o->raw;
+ int rc = 0, old_reg_size;
+
+ /* This is needed to recover number of currently configured mcast macs
+ * in case of failure.
+ */
+ old_reg_size = o->get_registry_size(o);
+
+ /* Do some calculations and checks */
+ rc = o->validate(sc, p, cmd);
+ if (rc)
+ return rc;
+
+ /* Return if there is no work to do */
+ if ((!p->mcast_list_len) && (!o->check_sched(o)))
+ return ECORE_SUCCESS;
+
+ ECORE_MSG
+ ("o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
+ o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
+
+ /* Enqueue the current command to the pending list if we can't complete
+ * it in the current iteration
+ */
+ if (r->check_pending(r) ||
+ ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
+ rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
+ if (rc < 0)
+ goto error_exit1;
+
+ /* As long as the current command is in a command list we
+ * don't need to handle it separately.
+ */
+ p->mcast_list_len = 0;
+ }
+
+ if (!r->check_pending(r)) {
+
+ /* Set 'pending' state */
+ r->set_pending(r);
+
+ /* Configure the new classification in the chip */
+ rc = o->config_mcast(sc, p, cmd);
+ if (rc < 0)
+ goto error_exit2;
+
+ /* Wait for a ramrod completion if was requested */
+ if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
+ rc = o->wait_comp(sc, o);
+ }
+
+ return rc;
+
+error_exit2:
+ r->clear_pending(r);
+
+error_exit1:
+ o->revert(sc, p, old_reg_size);
+
+ return rc;
+}
+
+static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
+{
+ ECORE_SMP_MB_BEFORE_CLEAR_BIT();
+ ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
+ ECORE_SMP_MB_AFTER_CLEAR_BIT();
+}
+
+static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
+{
+ ECORE_SMP_MB_BEFORE_CLEAR_BIT();
+ ECORE_SET_BIT(o->sched_state, o->raw.pstate);
+ ECORE_SMP_MB_AFTER_CLEAR_BIT();
+}
+
+static int ecore_mcast_check_sched(struct ecore_mcast_obj *o)
+{
+ return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
+}
+
+static int ecore_mcast_check_pending(struct ecore_mcast_obj *o)
+{
+ return o->raw.check_pending(&o->raw) || o->check_sched(o);
+}
+
+void ecore_init_mcast_obj(struct bnx2x_softc *sc,
+ struct ecore_mcast_obj *mcast_obj,
+ uint8_t mcast_cl_id, uint32_t mcast_cid,
+ uint8_t func_id, uint8_t engine_id, void *rdata,
+ ecore_dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, ecore_obj_type type)
+{
+ ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
+
+ ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
+ rdata, rdata_mapping, state, pstate, type);
+
+ mcast_obj->engine_id = engine_id;
+
+ ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
+
+ mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
+ mcast_obj->check_sched = ecore_mcast_check_sched;
+ mcast_obj->set_sched = ecore_mcast_set_sched;
+ mcast_obj->clear_sched = ecore_mcast_clear_sched;
+
+ if (CHIP_IS_E1H(sc)) {
+ mcast_obj->config_mcast = ecore_mcast_setup_e1h;
+ mcast_obj->enqueue_cmd = NULL;
+ mcast_obj->hdl_restore = NULL;
+ mcast_obj->check_pending = ecore_mcast_check_pending;
+
+ /* 57711 doesn't send a ramrod, so it has unlimited credit
+ * for one command.
+ */
+ mcast_obj->max_cmd_len = -1;
+ mcast_obj->wait_comp = ecore_mcast_wait;
+ mcast_obj->set_one_rule = NULL;
+ mcast_obj->validate = ecore_mcast_validate_e1h;
+ mcast_obj->revert = ecore_mcast_revert_e1h;
+ mcast_obj->get_registry_size =
+ ecore_mcast_get_registry_size_aprox;
+ mcast_obj->set_registry_size =
+ ecore_mcast_set_registry_size_aprox;
+ } else {
+ mcast_obj->config_mcast = ecore_mcast_setup_e2;
+ mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
+ mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2;
+ mcast_obj->check_pending = ecore_mcast_check_pending;
+ mcast_obj->max_cmd_len = 16;
+ mcast_obj->wait_comp = ecore_mcast_wait;
+ mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2;
+ mcast_obj->validate = ecore_mcast_validate_e2;
+ mcast_obj->revert = ecore_mcast_revert_e2;
+ mcast_obj->get_registry_size =
+ ecore_mcast_get_registry_size_aprox;
+ mcast_obj->set_registry_size =
+ ecore_mcast_set_registry_size_aprox;
+ }
+}
+
+/*************************** Credit handling **********************************/
+
+/**
+ * atomic_add_ifless - add if the result is less than a given value.
+ *
+ * @v: pointer of type ecore_atomic_t
+ * @a: the amount to add to v...
+ * @u: ...if (v + a) is less than u.
+ *
+ * returns TRUE if (v + a) was less than u, and FALSE otherwise.
+ *
+ */
+static int __atomic_add_ifless(ecore_atomic_t * v, int a, int u)
+{
+ int c, old;
+
+ c = ECORE_ATOMIC_READ(v);
+ for (;;) {
+ if (ECORE_UNLIKELY(c + a >= u))
+ return FALSE;
+
+ old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
+ if (ECORE_LIKELY(old == c))
+ break;
+ c = old;
+ }
+
+ return TRUE;
+}
+
+/**
+ * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
+ *
+ * @v: pointer of type ecore_atomic_t
+ * @a: the amount to dec from v...
+ * @u: ...if (v - a) is more or equal than u.
+ *
+ * returns TRUE if (v - a) was more or equal than u, and FALSE
+ * otherwise.
+ */
+static int __atomic_dec_ifmoe(ecore_atomic_t * v, int a, int u)
+{
+ int c, old;
+
+ c = ECORE_ATOMIC_READ(v);
+ for (;;) {
+ if (ECORE_UNLIKELY(c - a < u))
+ return FALSE;
+
+ old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
+ if (ECORE_LIKELY(old == c))
+ break;
+ c = old;
+ }
+
+ return TRUE;
+}
+
+static int ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
+{
+ int rc;
+
+ ECORE_SMP_MB();
+ rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
+ ECORE_SMP_MB();
+
+ return rc;
+}
+
+static int ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
+{
+ int rc;
+
+ ECORE_SMP_MB();
+
+ /* Don't let to refill if credit + cnt > pool_sz */
+ rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
+
+ ECORE_SMP_MB();
+
+ return rc;
+}
+
+static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
+{
+ int cur_credit;
+
+ ECORE_SMP_MB();
+ cur_credit = ECORE_ATOMIC_READ(&o->credit);
+
+ return cur_credit;
+}
+
+static int ecore_credit_pool_always_TRUE(__rte_unused struct
+ ecore_credit_pool_obj *o,
+ __rte_unused int cnt)
+{
+ return TRUE;
+}
+
+static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o,
+ int *offset)
+{
+ int idx, vec, i;
+
+ *offset = -1;
+
+ /* Find "internal cam-offset" then add to base for this object... */
+ for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
+
+ /* Skip the current vector if there are no free entries in it */
+ if (!o->pool_mirror[vec])
+ continue;
+
+ /* If we've got here we are going to find a free entry */
+ for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
+ i < BIT_VEC64_ELEM_SZ; idx++, i++)
+
+ if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
+ /* Got one!! */
+ BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
+ *offset = o->base_pool_offset + idx;
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o,
+ int offset)
+{
+ if (offset < o->base_pool_offset)
+ return FALSE;
+
+ offset -= o->base_pool_offset;
+
+ if (offset >= o->pool_sz)
+ return FALSE;
+
+ /* Return the entry to the pool */
+ BIT_VEC64_SET_BIT(o->pool_mirror, offset);
+
+ return TRUE;
+}
+
+static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct
+ ecore_credit_pool_obj *o,
+ __rte_unused int offset)
+{
+ return TRUE;
+}
+
+static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct
+ ecore_credit_pool_obj *o,
+ __rte_unused int *offset)
+{
+ *offset = -1;
+ return TRUE;
+}
+
+/**
+ * ecore_init_credit_pool - initialize credit pool internals.
+ *
+ * @p:
+ * @base: Base entry in the CAM to use.
+ * @credit: pool size.
+ *
+ * If base is negative no CAM entries handling will be performed.
+ * If credit is negative pool operations will always succeed (unlimited pool).
+ *
+ */
+static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
+ int base, int credit)
+{
+ /* Zero the object first */
+ ECORE_MEMSET(p, 0, sizeof(*p));
+
+ /* Set the table to all 1s */
+ ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
+
+ /* Init a pool as full */
+ ECORE_ATOMIC_SET(&p->credit, credit);
+
+ /* The total poll size */
+ p->pool_sz = credit;
+
+ p->base_pool_offset = base;
+
+ /* Commit the change */
+ ECORE_SMP_MB();
+
+ p->check = ecore_credit_pool_check;
+
+ /* if pool credit is negative - disable the checks */
+ if (credit >= 0) {
+ p->put = ecore_credit_pool_put;
+ p->get = ecore_credit_pool_get;
+ p->put_entry = ecore_credit_pool_put_entry;
+ p->get_entry = ecore_credit_pool_get_entry;
+ } else {
+ p->put = ecore_credit_pool_always_TRUE;
+ p->get = ecore_credit_pool_always_TRUE;
+ p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
+ p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
+ }
+
+ /* If base is negative - disable entries handling */
+ if (base < 0) {
+ p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
+ p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
+ }
+}
+
+void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
+ struct ecore_credit_pool_obj *p,
+ uint8_t func_id, uint8_t func_num)
+{
+
+#define ECORE_CAM_SIZE_EMUL 5
+
+ int cam_sz;
+
+ if (CHIP_IS_E1H(sc)) {
+ /* CAM credit is equally divided between all active functions
+ * on the PORT!.
+ */
+ if (func_num > 0) {
+ if (!CHIP_REV_IS_SLOW(sc))
+ cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num));
+ else
+ cam_sz = ECORE_CAM_SIZE_EMUL;
+ ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
+ } else {
+ /* this should never happen! Block MAC operations. */
+ ecore_init_credit_pool(p, 0, 0);
+ }
+
+ } else {
+
+ /*
+ * CAM credit is equaly divided between all active functions
+ * on the PATH.
+ */
+ if (func_num > 0) {
+ if (!CHIP_REV_IS_SLOW(sc))
+ cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+ else
+ cam_sz = ECORE_CAM_SIZE_EMUL;
+
+ /* No need for CAM entries handling for 57712 and
+ * newer.
+ */
+ ecore_init_credit_pool(p, -1, cam_sz);
+ } else {
+ /* this should never happen! Block MAC operations. */
+ ecore_init_credit_pool(p, 0, 0);
+ }
+ }
+}
+
+void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
+ struct ecore_credit_pool_obj *p,
+ uint8_t func_id, uint8_t func_num)
+{
+ if (CHIP_IS_E1x(sc)) {
+ /* There is no VLAN credit in HW on 57711 only
+ * MAC / MAC-VLAN can be set
+ */
+ ecore_init_credit_pool(p, 0, -1);
+ } else {
+ /* CAM credit is equally divided between all active functions
+ * on the PATH.
+ */
+ if (func_num > 0) {
+ int credit = MAX_VLAN_CREDIT_E2 / func_num;
+ ecore_init_credit_pool(p, func_id * credit, credit);
+ } else
+ /* this should never happen! Block VLAN operations. */
+ ecore_init_credit_pool(p, 0, 0);
+ }
+}
+
+/****************** RSS Configuration ******************/
+
+/**
+ * ecore_setup_rss - configure RSS
+ *
+ * @sc: device handle
+ * @p: rss configuration
+ *
+ * sends on UPDATE ramrod for that matter.
+ */
+static int ecore_setup_rss(struct bnx2x_softc *sc,
+ struct ecore_config_rss_params *p)
+{
+ struct ecore_rss_config_obj *o = p->rss_obj;
+ struct ecore_raw_obj *r = &o->raw;
+ struct eth_rss_update_ramrod_data *data =
+ (struct eth_rss_update_ramrod_data *)(r->rdata);
+ uint8_t rss_mode = 0;
+ int rc;
+
+ ECORE_MEMSET(data, 0, sizeof(*data));
+
+ ECORE_MSG("Configuring RSS");
+
+ /* Set an echo field */
+ data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
+ (r->state << ECORE_SWCID_SHIFT));
+
+ /* RSS mode */
+ if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_DISABLED;
+ else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_REGULAR;
+
+ data->rss_mode = rss_mode;
+
+ ECORE_MSG("rss_mode=%d", rss_mode);
+
+ /* RSS capabilities */
+ if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
+
+ if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+
+ if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
+
+ if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
+
+ if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+
+ if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
+
+ if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
+ data->udp_4tuple_dst_port_mask =
+ ECORE_CPU_TO_LE16(p->tunnel_mask);
+ data->udp_4tuple_dst_port_value =
+ ECORE_CPU_TO_LE16(p->tunnel_value);
+ }
+
+ /* Hashing mask */
+ data->rss_result_mask = p->rss_result_mask;
+
+ /* RSS engine ID */
+ data->rss_engine_id = o->engine_id;
+
+ ECORE_MSG("rss_engine_id=%d", data->rss_engine_id);
+
+ /* Indirection table */
+ ECORE_MEMCPY(data->indirection_table, p->ind_table,
+ T_ETH_INDIRECTION_TABLE_SIZE);
+
+ /* Remember the last configuration */
+ ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+
+ /* RSS keys */
+ if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
+ ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
+ sizeof(data->rss_key));
+ data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
+ }
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside ecore_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = ecore_sp_post(sc,
+ RAMROD_CMD_ID_ETH_RSS_UPDATE,
+ r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE);
+
+ if (rc < 0)
+ return rc;
+
+ return ECORE_PENDING;
+}
+
+int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p)
+{
+ int rc;
+ struct ecore_rss_config_obj *o = p->rss_obj;
+ struct ecore_raw_obj *r = &o->raw;
+
+ /* Do nothing if only driver cleanup was requested */
+ if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+ return ECORE_SUCCESS;
+
+ r->set_pending(r);
+
+ rc = o->config_rss(sc, p);
+ if (rc < 0) {
+ r->clear_pending(r);
+ return rc;
+ }
+
+ if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
+ rc = r->wait_comp(sc, r);
+
+ return rc;
+}
+
+void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
+ uint8_t cl_id, uint32_t cid, uint8_t func_id,
+ uint8_t engine_id, void *rdata,
+ ecore_dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, ecore_obj_type type)
+{
+ ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type);
+
+ rss_obj->engine_id = engine_id;
+ rss_obj->config_rss = ecore_setup_rss;
+}
+
+/********************** Queue state object ***********************************/
+
+/**
+ * ecore_queue_state_change - perform Queue state change transition
+ *
+ * @sc: device handle
+ * @params: parameters to perform the transition
+ *
+ * returns 0 in case of successfully completed transition, negative error
+ * code in case of failure, positive (EBUSY) value if there is a completion
+ * to that is still pending (possible only if RAMROD_COMP_WAIT is
+ * not set in params->ramrod_flags for asynchronous commands).
+ *
+ */
+int ecore_queue_state_change(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params)
+{
+ struct ecore_queue_sp_obj *o = params->q_obj;
+ int rc, pending_bit;
+ unsigned long *pending = &o->pending;
+
+ /* Check that the requested transition is legal */
+ rc = o->check_transition(sc, o, params);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "check transition returned an error. rc %d",
+ rc);
+ return ECORE_INVAL;
+ }
+
+ /* Set "pending" bit */
+ ECORE_MSG("pending bit was=%lx", o->pending);
+ pending_bit = o->set_pending(o, params);
+ ECORE_MSG("pending bit now=%lx", o->pending);
+
+ /* Don't send a command if only driver cleanup was requested */
+ if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
+ o->complete_cmd(sc, o, pending_bit);
+ else {
+ /* Send a ramrod */
+ rc = o->send_cmd(sc, params);
+ if (rc) {
+ o->next_state = ECORE_Q_STATE_MAX;
+ ECORE_CLEAR_BIT(pending_bit, pending);
+ ECORE_SMP_MB_AFTER_CLEAR_BIT();
+ return rc;
+ }
+
+ if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+ rc = o->wait_comp(sc, o, pending_bit);
+ if (rc)
+ return rc;
+
+ return ECORE_SUCCESS;
+ }
+ }
+
+ return ECORE_RET_PENDING(pending_bit, pending);
+}
+
+static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
+ struct ecore_queue_state_params *params)
+{
+ enum ecore_queue_cmd cmd = params->cmd, bit;
+
+ /* ACTIVATE and DEACTIVATE commands are implemented on top of
+ * UPDATE command.
+ */
+ if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE))
+ bit = ECORE_Q_CMD_UPDATE;
+ else
+ bit = cmd;
+
+ ECORE_SET_BIT(bit, &obj->pending);
+ return bit;
+}
+
+static int ecore_queue_wait_comp(struct bnx2x_softc *sc,
+ struct ecore_queue_sp_obj *o,
+ enum ecore_queue_cmd cmd)
+{
+ return ecore_state_wait(sc, cmd, &o->pending);
+}
+
+/**
+ * ecore_queue_comp_cmd - complete the state change command.
+ *
+ * @sc: device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_queue_sp_obj *o,
+ enum ecore_queue_cmd cmd)
+{
+ unsigned long cur_pending = o->pending;
+
+ if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
+ PMD_DRV_LOG(ERR,
+ "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d",
+ cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state,
+ cur_pending, o->next_state);
+ return ECORE_INVAL;
+ }
+
+ if (o->next_tx_only >= o->max_cos)
+ /* >= because tx only must always be smaller than cos since the
+ * primary connection supports COS 0
+ */
+ PMD_DRV_LOG(ERR,
+ "illegal value for next tx_only: %d. max cos was %d",
+ o->next_tx_only, o->max_cos);
+
+ ECORE_MSG("Completing command %d for queue %d, setting state to %d",
+ cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
+
+ if (o->next_tx_only) /* print num tx-only if any exist */
+ ECORE_MSG("primary cid %d: num tx-only cons %d",
+ o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
+
+ o->state = o->next_state;
+ o->num_tx_only = o->next_tx_only;
+ o->next_state = ECORE_Q_STATE_MAX;
+
+ /* It's important that o->state and o->next_state are
+ * updated before o->pending.
+ */
+ wmb();
+
+ ECORE_CLEAR_BIT(cmd, &o->pending);
+ ECORE_SMP_MB_AFTER_CLEAR_BIT();
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params
+ *cmd_params,
+ struct client_init_ramrod_data *data)
+{
+ struct ecore_queue_setup_params *params = &cmd_params->params.setup;
+
+ /* Rx data */
+
+ /* IPv6 TPA supported for E2 and above only */
+ data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
+ &params->flags) *
+ CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
+}
+
+static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_queue_sp_obj *o,
+ struct ecore_general_setup_params
+ *params, struct client_init_general_data
+ *gen_data, unsigned long *flags)
+{
+ gen_data->client_id = o->cl_id;
+
+ if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
+ gen_data->statistics_counter_id = params->stat_id;
+ gen_data->statistics_en_flg = 1;
+ gen_data->statistics_zero_flg =
+ ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
+ } else
+ gen_data->statistics_counter_id =
+ DISABLE_STATISTIC_COUNTER_ID_VALUE;
+
+ gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags);
+ gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags);
+ gen_data->sp_client_id = params->spcl_id;
+ gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
+ gen_data->func_id = o->func_id;
+
+ gen_data->cos = params->cos;
+
+ gen_data->traffic_type =
+ ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
+ LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
+
+ ECORE_MSG("flags: active %d, cos %d, stats en %d",
+ gen_data->activate_flg, gen_data->cos,
+ gen_data->statistics_en_flg);
+}
+
+static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params,
+ struct client_init_tx_data *tx_data,
+ unsigned long *flags)
+{
+ tx_data->enforce_security_flg =
+ ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
+ tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan);
+ tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
+ tx_data->tx_switching_flg =
+ ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
+ tx_data->anti_spoofing_flg =
+ ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
+ tx_data->force_default_pri_flg =
+ ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
+ tx_data->refuse_outband_vlan_flg =
+ ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
+ tx_data->tunnel_non_lso_pcsum_location =
+ ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
+ CSUM_ON_BD;
+
+ tx_data->tx_status_block_id = params->fw_sb_id;
+ tx_data->tx_sb_index_number = params->sb_cq_index;
+ tx_data->tss_leading_client_id = params->tss_leading_cl_id;
+
+ tx_data->tx_bd_page_base.lo =
+ ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
+ tx_data->tx_bd_page_base.hi =
+ ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
+
+ /* Don't configure any Tx switching mode during queue SETUP */
+ tx_data->state = 0;
+}
+
+static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params,
+ struct client_init_rx_data *rx_data)
+{
+ /* flow control data */
+ rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
+ rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
+ rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
+ rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
+ rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
+ rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
+ rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
+}
+
+static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params,
+ struct client_init_rx_data *rx_data,
+ unsigned long *flags)
+{
+ rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
+ CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
+ rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
+ CLIENT_INIT_RX_DATA_TPA_MODE;
+ rx_data->vmqueue_mode_en_flg = 0;
+
+ rx_data->extra_data_over_sgl_en_flg =
+ ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
+ rx_data->cache_line_alignment_log_size = params->cache_line_log;
+ rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
+ rx_data->client_qzone_id = params->cl_qzone_id;
+ rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
+
+ /* Always start in DROP_ALL mode */
+ rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
+ CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
+
+ /* We don't set drop flags */
+ rx_data->drop_ip_cs_err_flg = 0;
+ rx_data->drop_tcp_cs_err_flg = 0;
+ rx_data->drop_ttl0_flg = 0;
+ rx_data->drop_udp_cs_err_flg = 0;
+ rx_data->inner_vlan_removal_enable_flg =
+ ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
+ rx_data->outer_vlan_removal_enable_flg =
+ ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
+ rx_data->status_block_id = params->fw_sb_id;
+ rx_data->rx_sb_index_number = params->sb_cq_index;
+ rx_data->max_tpa_queues = params->max_tpa_queues;
+ rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
+ rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
+ rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
+ rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
+ rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
+ rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
+ flags);
+
+ if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
+ rx_data->approx_mcast_engine_id = params->mcast_engine_id;
+ rx_data->is_approx_mcast = 1;
+ }
+
+ rx_data->rss_engine_id = params->rss_engine_id;
+
+ /* silent vlan removal */
+ rx_data->silent_vlan_removal_flg =
+ ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
+ rx_data->silent_vlan_value =
+ ECORE_CPU_TO_LE16(params->silent_removal_value);
+ rx_data->silent_vlan_mask =
+ ECORE_CPU_TO_LE16(params->silent_removal_mask);
+}
+
+/* initialize the general, tx and rx parts of a queue object */
+static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
+ *cmd_params,
+ struct client_init_ramrod_data *data)
+{
+ ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
+ &cmd_params->params.setup.gen_params,
+ &data->general,
+ &cmd_params->params.setup.flags);
+
+ ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params,
+ &data->tx, &cmd_params->params.setup.flags);
+
+ ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params,
+ &data->rx, &cmd_params->params.setup.flags);
+
+ ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params,
+ &data->rx);
+}
+
+/* initialize the general and tx parts of a tx-only queue object */
+static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
+ *cmd_params,
+ struct tx_queue_init_ramrod_data *data)
+{
+ ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
+ &cmd_params->params.tx_only.gen_params,
+ &data->general,
+ &cmd_params->params.tx_only.flags);
+
+ ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params,
+ &data->tx, &cmd_params->params.tx_only.flags);
+
+ ECORE_MSG("cid %d, tx bd page lo %x hi %x",
+ cmd_params->q_obj->cids[0],
+ data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
+}
+
+/**
+ * ecore_q_init - init HW/FW queue
+ *
+ * @sc: device handle
+ * @params:
+ *
+ * HW/FW initial Queue configuration:
+ * - HC: Rx and Tx
+ * - CDU context validation
+ *
+ */
+static int ecore_q_init(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params)
+{
+ struct ecore_queue_sp_obj *o = params->q_obj;
+ struct ecore_queue_init_params *init = &params->params.init;
+ uint16_t hc_usec;
+ uint8_t cos;
+
+ /* Tx HC configuration */
+ if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
+ ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
+ hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
+
+ ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
+ init->tx.sb_cq_index,
+ !ECORE_TEST_BIT
+ (ECORE_Q_FLG_HC_EN,
+ &init->tx.flags), hc_usec);
+ }
+
+ /* Rx HC configuration */
+ if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
+ ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
+ hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
+
+ ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
+ init->rx.sb_cq_index,
+ !ECORE_TEST_BIT
+ (ECORE_Q_FLG_HC_EN,
+ &init->rx.flags), hc_usec);
+ }
+
+ /* Set CDU context validation values */
+ for (cos = 0; cos < o->max_cos; cos++) {
+ ECORE_MSG("setting context validation. cid %d, cos %d",
+ o->cids[cos], cos);
+ ECORE_MSG("context pointer %p", init->cxts[cos]);
+ ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
+ }
+
+ /* As no ramrod is sent, complete the command immediately */
+ o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
+
+ ECORE_MMIOWB();
+ ECORE_SMP_MB();
+
+ return ECORE_SUCCESS;
+}
+
+static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params
+ *params)
+{
+ struct ecore_queue_sp_obj *o = params->q_obj;
+ struct client_init_ramrod_data *rdata =
+ (struct client_init_ramrod_data *)o->rdata;
+ ecore_dma_addr_t data_mapping = o->rdata_mapping;
+ int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+ /* Clear the ramrod data */
+ ECORE_MEMSET(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ ecore_q_fill_setup_data_cmn(sc, params, rdata);
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside ecore_sp_post()).
+ */
+
+ return ecore_sp_post(sc,
+ ramrod,
+ o->cids[ECORE_PRIMARY_CID_INDEX],
+ data_mapping, ETH_CONNECTION_TYPE);
+}
+
+static int ecore_q_send_setup_e2(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params)
+{
+ struct ecore_queue_sp_obj *o = params->q_obj;
+ struct client_init_ramrod_data *rdata =
+ (struct client_init_ramrod_data *)o->rdata;
+ ecore_dma_addr_t data_mapping = o->rdata_mapping;
+ int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+ /* Clear the ramrod data */
+ ECORE_MEMSET(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ ecore_q_fill_setup_data_cmn(sc, params, rdata);
+ ecore_q_fill_setup_data_e2(params, rdata);
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside ecore_sp_post()).
+ */
+
+ return ecore_sp_post(sc,
+ ramrod,
+ o->cids[ECORE_PRIMARY_CID_INDEX],
+ data_mapping, ETH_CONNECTION_TYPE);
+}
+
+static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
+ *params)
+{
+ struct ecore_queue_sp_obj *o = params->q_obj;
+ struct tx_queue_init_ramrod_data *rdata =
+ (struct tx_queue_init_ramrod_data *)o->rdata;
+ ecore_dma_addr_t data_mapping = o->rdata_mapping;
+ int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
+ struct ecore_queue_setup_tx_only_params *tx_only_params =
+ &params->params.tx_only;
+ uint8_t cid_index = tx_only_params->cid_index;
+
+ if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
+ ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
+ ECORE_MSG("sending forward tx-only ramrod");
+
+ if (cid_index >= o->max_cos) {
+ PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
+ o->cl_id, cid_index);
+ return ECORE_INVAL;
+ }
+
+ ECORE_MSG("parameters received: cos: %d sp-id: %d",
+ tx_only_params->gen_params.cos,
+ tx_only_params->gen_params.spcl_id);
+
+ /* Clear the ramrod data */
+ ECORE_MEMSET(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ ecore_q_fill_setup_tx_only(sc, params, rdata);
+
+ ECORE_MSG
+ ("sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
+ o->cids[cid_index], rdata->general.client_id,
+ rdata->general.sp_client_id, rdata->general.cos);
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside ecore_sp_post()).
+ */
+
+ return ecore_sp_post(sc, ramrod, o->cids[cid_index],
+ data_mapping, ETH_CONNECTION_TYPE);
+}
+
+static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj,
+ struct ecore_queue_update_params *params,
+ struct client_update_ramrod_data *data)
+{
+ /* Client ID of the client to update */
+ data->client_id = obj->cl_id;
+
+ /* Function ID of the client to update */
+ data->func_id = obj->func_id;
+
+ /* Default VLAN value */
+ data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
+
+ /* Inner VLAN stripping */
+ data->inner_vlan_removal_enable_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
+ data->inner_vlan_removal_change_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
+ &params->update_flags);
+
+ /* Outer VLAN stripping */
+ data->outer_vlan_removal_enable_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
+ data->outer_vlan_removal_change_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
+ &params->update_flags);
+
+ /* Drop packets that have source MAC that doesn't belong to this
+ * Queue.
+ */
+ data->anti_spoofing_enable_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
+ data->anti_spoofing_change_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
+ &params->update_flags);
+
+ /* Activate/Deactivate */
+ data->activate_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
+ data->activate_change_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
+
+ /* Enable default VLAN */
+ data->default_vlan_enable_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
+ data->default_vlan_change_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &params->update_flags);
+
+ /* silent vlan removal */
+ data->silent_vlan_change_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &params->update_flags);
+ data->silent_vlan_removal_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
+ &params->update_flags);
+ data->silent_vlan_value =
+ ECORE_CPU_TO_LE16(params->silent_removal_value);
+ data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
+
+ /* tx switching */
+ data->tx_switching_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, &params->update_flags);
+ data->tx_switching_change_flg =
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
+ &params->update_flags);
+}
+
+static int ecore_q_send_update(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params)
+{
+ struct ecore_queue_sp_obj *o = params->q_obj;
+ struct client_update_ramrod_data *rdata =
+ (struct client_update_ramrod_data *)o->rdata;
+ ecore_dma_addr_t data_mapping = o->rdata_mapping;
+ struct ecore_queue_update_params *update_params =
+ &params->params.update;
+ uint8_t cid_index = update_params->cid_index;
+
+ if (cid_index >= o->max_cos) {
+ PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
+ o->cl_id, cid_index);
+ return ECORE_INVAL;
+ }
+
+ /* Clear the ramrod data */
+ ECORE_MEMSET(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ ecore_q_fill_update_data(o, update_params, rdata);
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside ecore_sp_post()).
+ */
+
+ return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
+ o->cids[cid_index], data_mapping,
+ ETH_CONNECTION_TYPE);
+}
+
+/**
+ * ecore_q_send_deactivate - send DEACTIVATE command
+ *
+ * @sc: device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params
+ *params)
+{
+ struct ecore_queue_update_params *update = &params->params.update;
+
+ ECORE_MEMSET(update, 0, sizeof(*update));
+
+ ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+ return ecore_q_send_update(sc, params);
+}
+
+/**
+ * ecore_q_send_activate - send ACTIVATE command
+ *
+ * @sc: device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static int ecore_q_send_activate(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params)
+{
+ struct ecore_queue_update_params *update = &params->params.update;
+
+ ECORE_MEMSET(update, 0, sizeof(*update));
+
+ ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
+ ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+ return ecore_q_send_update(sc, params);
+}
+
+static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc,
+ __rte_unused struct
+ ecore_queue_state_params *params)
+{
+ /* Not implemented yet. */
+ return -1;
+}
+
+static int ecore_q_send_halt(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params)
+{
+ struct ecore_queue_sp_obj *o = params->q_obj;
+
+ /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
+ ecore_dma_addr_t data_mapping = 0;
+ data_mapping = (ecore_dma_addr_t) o->cl_id;
+
+ return ecore_sp_post(sc,
+ RAMROD_CMD_ID_ETH_HALT,
+ o->cids[ECORE_PRIMARY_CID_INDEX],
+ data_mapping, ETH_CONNECTION_TYPE);
+}
+
+static int ecore_q_send_cfc_del(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params)
+{
+ struct ecore_queue_sp_obj *o = params->q_obj;
+ uint8_t cid_idx = params->params.cfc_del.cid_index;
+
+ if (cid_idx >= o->max_cos) {
+ PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
+ o->cl_id, cid_idx);
+ return ECORE_INVAL;
+ }
+
+ return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
+ o->cids[cid_idx], 0, NONE_CONNECTION_TYPE);
+}
+
+static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params
+ *params)
+{
+ struct ecore_queue_sp_obj *o = params->q_obj;
+ uint8_t cid_index = params->params.terminate.cid_index;
+
+ if (cid_index >= o->max_cos) {
+ PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
+ o->cl_id, cid_index);
+ return ECORE_INVAL;
+ }
+
+ return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
+ o->cids[cid_index], 0, ETH_CONNECTION_TYPE);
+}
+
+static int ecore_q_send_empty(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params)
+{
+ struct ecore_queue_sp_obj *o = params->q_obj;
+
+ return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
+ o->cids[ECORE_PRIMARY_CID_INDEX], 0,
+ ETH_CONNECTION_TYPE);
+}
+
+static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
+ *params)
+{
+ switch (params->cmd) {
+ case ECORE_Q_CMD_INIT:
+ return ecore_q_init(sc, params);
+ case ECORE_Q_CMD_SETUP_TX_ONLY:
+ return ecore_q_send_setup_tx_only(sc, params);
+ case ECORE_Q_CMD_DEACTIVATE:
+ return ecore_q_send_deactivate(sc, params);
+ case ECORE_Q_CMD_ACTIVATE:
+ return ecore_q_send_activate(sc, params);
+ case ECORE_Q_CMD_UPDATE:
+ return ecore_q_send_update(sc, params);
+ case ECORE_Q_CMD_UPDATE_TPA:
+ return ecore_q_send_update_tpa(sc, params);
+ case ECORE_Q_CMD_HALT:
+ return ecore_q_send_halt(sc, params);
+ case ECORE_Q_CMD_CFC_DEL:
+ return ecore_q_send_cfc_del(sc, params);
+ case ECORE_Q_CMD_TERMINATE:
+ return ecore_q_send_terminate(sc, params);
+ case ECORE_Q_CMD_EMPTY:
+ return ecore_q_send_empty(sc, params);
+ default:
+ PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
+ return ECORE_INVAL;
+ }
+}
+
+static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params)
+{
+ switch (params->cmd) {
+ case ECORE_Q_CMD_SETUP:
+ return ecore_q_send_setup_e1x(sc, params);
+ case ECORE_Q_CMD_INIT:
+ case ECORE_Q_CMD_SETUP_TX_ONLY:
+ case ECORE_Q_CMD_DEACTIVATE:
+ case ECORE_Q_CMD_ACTIVATE:
+ case ECORE_Q_CMD_UPDATE:
+ case ECORE_Q_CMD_UPDATE_TPA:
+ case ECORE_Q_CMD_HALT:
+ case ECORE_Q_CMD_CFC_DEL:
+ case ECORE_Q_CMD_TERMINATE:
+ case ECORE_Q_CMD_EMPTY:
+ return ecore_queue_send_cmd_cmn(sc, params);
+ default:
+ PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
+ return ECORE_INVAL;
+ }
+}
+
+static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params)
+{
+ switch (params->cmd) {
+ case ECORE_Q_CMD_SETUP:
+ return ecore_q_send_setup_e2(sc, params);
+ case ECORE_Q_CMD_INIT:
+ case ECORE_Q_CMD_SETUP_TX_ONLY:
+ case ECORE_Q_CMD_DEACTIVATE:
+ case ECORE_Q_CMD_ACTIVATE:
+ case ECORE_Q_CMD_UPDATE:
+ case ECORE_Q_CMD_UPDATE_TPA:
+ case ECORE_Q_CMD_HALT:
+ case ECORE_Q_CMD_CFC_DEL:
+ case ECORE_Q_CMD_TERMINATE:
+ case ECORE_Q_CMD_EMPTY:
+ return ecore_queue_send_cmd_cmn(sc, params);
+ default:
+ PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
+ return ECORE_INVAL;
+ }
+}
+
+/**
+ * ecore_queue_chk_transition - check state machine of a regular Queue
+ *
+ * @sc: device handle
+ * @o:
+ * @params:
+ *
+ * (not Forwarding)
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ * ECORE_INVAL otherwise.
+ */
+static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_queue_sp_obj *o,
+ struct ecore_queue_state_params *params)
+{
+ enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
+ enum ecore_queue_cmd cmd = params->cmd;
+ struct ecore_queue_update_params *update_params =
+ &params->params.update;
+ uint8_t next_tx_only = o->num_tx_only;
+
+ /* Forget all pending for completion commands if a driver only state
+ * transition has been requested.
+ */
+ if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+ o->pending = 0;
+ o->next_state = ECORE_Q_STATE_MAX;
+ }
+
+ /* Don't allow a next state transition if we are in the middle of
+ * the previous one.
+ */
+ if (o->pending) {
+ PMD_DRV_LOG(ERR, "Blocking transition since pending was %lx",
+ o->pending);
+ return ECORE_BUSY;
+ }
+
+ switch (state) {
+ case ECORE_Q_STATE_RESET:
+ if (cmd == ECORE_Q_CMD_INIT)
+ next_state = ECORE_Q_STATE_INITIALIZED;
+
+ break;
+ case ECORE_Q_STATE_INITIALIZED:
+ if (cmd == ECORE_Q_CMD_SETUP) {
+ if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
+ &params->params.setup.flags))
+ next_state = ECORE_Q_STATE_ACTIVE;
+ else
+ next_state = ECORE_Q_STATE_INACTIVE;
+ }
+
+ break;
+ case ECORE_Q_STATE_ACTIVE:
+ if (cmd == ECORE_Q_CMD_DEACTIVATE)
+ next_state = ECORE_Q_STATE_INACTIVE;
+
+ else if ((cmd == ECORE_Q_CMD_EMPTY) ||
+ (cmd == ECORE_Q_CMD_UPDATE_TPA))
+ next_state = ECORE_Q_STATE_ACTIVE;
+
+ else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
+ next_state = ECORE_Q_STATE_MULTI_COS;
+ next_tx_only = 1;
+ }
+
+ else if (cmd == ECORE_Q_CMD_HALT)
+ next_state = ECORE_Q_STATE_STOPPED;
+
+ else if (cmd == ECORE_Q_CMD_UPDATE) {
+ /* If "active" state change is requested, update the
+ * state accordingly.
+ */
+ if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
+ &update_params->update_flags) &&
+ !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
+ &update_params->update_flags))
+ next_state = ECORE_Q_STATE_INACTIVE;
+ else
+ next_state = ECORE_Q_STATE_ACTIVE;
+ }
+
+ break;
+ case ECORE_Q_STATE_MULTI_COS:
+ if (cmd == ECORE_Q_CMD_TERMINATE)
+ next_state = ECORE_Q_STATE_MCOS_TERMINATED;
+
+ else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
+ next_state = ECORE_Q_STATE_MULTI_COS;
+ next_tx_only = o->num_tx_only + 1;
+ }
+
+ else if ((cmd == ECORE_Q_CMD_EMPTY) ||
+ (cmd == ECORE_Q_CMD_UPDATE_TPA))
+ next_state = ECORE_Q_STATE_MULTI_COS;
+
+ else if (cmd == ECORE_Q_CMD_UPDATE) {
+ /* If "active" state change is requested, update the
+ * state accordingly.
+ */
+ if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
+ &update_params->update_flags) &&
+ !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
+ &update_params->update_flags))
+ next_state = ECORE_Q_STATE_INACTIVE;
+ else
+ next_state = ECORE_Q_STATE_MULTI_COS;
+ }
+
+ break;
+ case ECORE_Q_STATE_MCOS_TERMINATED:
+ if (cmd == ECORE_Q_CMD_CFC_DEL) {
+ next_tx_only = o->num_tx_only - 1;
+ if (next_tx_only == 0)
+ next_state = ECORE_Q_STATE_ACTIVE;
+ else
+ next_state = ECORE_Q_STATE_MULTI_COS;
+ }
+
+ break;
+ case ECORE_Q_STATE_INACTIVE:
+ if (cmd == ECORE_Q_CMD_ACTIVATE)
+ next_state = ECORE_Q_STATE_ACTIVE;
+
+ else if ((cmd == ECORE_Q_CMD_EMPTY) ||
+ (cmd == ECORE_Q_CMD_UPDATE_TPA))
+ next_state = ECORE_Q_STATE_INACTIVE;
+
+ else if (cmd == ECORE_Q_CMD_HALT)
+ next_state = ECORE_Q_STATE_STOPPED;
+
+ else if (cmd == ECORE_Q_CMD_UPDATE) {
+ /* If "active" state change is requested, update the
+ * state accordingly.
+ */
+ if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
+ &update_params->update_flags) &&
+ ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
+ &update_params->update_flags)) {
+ if (o->num_tx_only == 0)
+ next_state = ECORE_Q_STATE_ACTIVE;
+ else /* tx only queues exist for this queue */
+ next_state = ECORE_Q_STATE_MULTI_COS;
+ } else
+ next_state = ECORE_Q_STATE_INACTIVE;
+ }
+
+ break;
+ case ECORE_Q_STATE_STOPPED:
+ if (cmd == ECORE_Q_CMD_TERMINATE)
+ next_state = ECORE_Q_STATE_TERMINATED;
+
+ break;
+ case ECORE_Q_STATE_TERMINATED:
+ if (cmd == ECORE_Q_CMD_CFC_DEL)
+ next_state = ECORE_Q_STATE_RESET;
+
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Illegal state: %d", state);
+ }
+
+ /* Transition is assured */
+ if (next_state != ECORE_Q_STATE_MAX) {
+ ECORE_MSG("Good state transition: %d(%d)->%d",
+ state, cmd, next_state);
+ o->next_state = next_state;
+ o->next_tx_only = next_tx_only;
+ return ECORE_SUCCESS;
+ }
+
+ ECORE_MSG("Bad state transition request: %d %d", state, cmd);
+
+ return ECORE_INVAL;
+}
+
+/**
+ * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
+ *
+ * @sc: device handle
+ * @o:
+ * @params:
+ *
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ * ECORE_INVAL otherwise.
+ */
+static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_queue_sp_obj *o,
+ struct ecore_queue_state_params
+ *params)
+{
+ enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
+ enum ecore_queue_cmd cmd = params->cmd;
+
+ switch (state) {
+ case ECORE_Q_STATE_RESET:
+ if (cmd == ECORE_Q_CMD_INIT)
+ next_state = ECORE_Q_STATE_INITIALIZED;
+
+ break;
+ case ECORE_Q_STATE_INITIALIZED:
+ if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
+ if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
+ &params->params.tx_only.flags))
+ next_state = ECORE_Q_STATE_ACTIVE;
+ else
+ next_state = ECORE_Q_STATE_INACTIVE;
+ }
+
+ break;
+ case ECORE_Q_STATE_ACTIVE:
+ case ECORE_Q_STATE_INACTIVE:
+ if (cmd == ECORE_Q_CMD_CFC_DEL)
+ next_state = ECORE_Q_STATE_RESET;
+
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Illegal state: %d", state);
+ }
+
+ /* Transition is assured */
+ if (next_state != ECORE_Q_STATE_MAX) {
+ ECORE_MSG("Good state transition: %d(%d)->%d",
+ state, cmd, next_state);
+ o->next_state = next_state;
+ return ECORE_SUCCESS;
+ }
+
+ ECORE_MSG("Bad state transition request: %d %d", state, cmd);
+ return ECORE_INVAL;
+}
+
+void ecore_init_queue_obj(struct bnx2x_softc *sc,
+ struct ecore_queue_sp_obj *obj,
+ uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt,
+ uint8_t func_id, void *rdata,
+ ecore_dma_addr_t rdata_mapping, unsigned long type)
+{
+ ECORE_MEMSET(obj, 0, sizeof(*obj));
+
+ /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
+ ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
+
+ rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
+ obj->max_cos = cid_cnt;
+ obj->cl_id = cl_id;
+ obj->func_id = func_id;
+ obj->rdata = rdata;
+ obj->rdata_mapping = rdata_mapping;
+ obj->type = type;
+ obj->next_state = ECORE_Q_STATE_MAX;
+
+ if (CHIP_IS_E1x(sc))
+ obj->send_cmd = ecore_queue_send_cmd_e1x;
+ else
+ obj->send_cmd = ecore_queue_send_cmd_e2;
+
+ if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
+ obj->check_transition = ecore_queue_chk_fwd_transition;
+ else
+ obj->check_transition = ecore_queue_chk_transition;
+
+ obj->complete_cmd = ecore_queue_comp_cmd;
+ obj->wait_comp = ecore_queue_wait_comp;
+ obj->set_pending = ecore_queue_set_pending;
+}
+
+/********************** Function state object *********************************/
+enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc,
+ struct ecore_func_sp_obj *o)
+{
+ /* in the middle of transaction - return INVALID state */
+ if (o->pending)
+ return ECORE_F_STATE_MAX;
+
+ /* unsure the order of reading of o->pending and o->state
+ * o->pending should be read first
+ */
+ rmb();
+
+ return o->state;
+}
+
+static int ecore_func_wait_comp(struct bnx2x_softc *sc,
+ struct ecore_func_sp_obj *o,
+ enum ecore_func_cmd cmd)
+{
+ return ecore_state_wait(sc, cmd, &o->pending);
+}
+
+/**
+ * ecore_func_state_change_comp - complete the state machine transition
+ *
+ * @sc: device handle
+ * @o:
+ * @cmd:
+ *
+ * Called on state change transition. Completes the state
+ * machine transition only - no HW interaction.
+ */
+static int
+ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_func_sp_obj *o,
+ enum ecore_func_cmd cmd)
+{
+ unsigned long cur_pending = o->pending;
+
+ if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
+ PMD_DRV_LOG(ERR,
+ "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d",
+ cmd, ECORE_FUNC_ID(sc), o->state, cur_pending,
+ o->next_state);
+ return ECORE_INVAL;
+ }
+
+ ECORE_MSG("Completing command %d for func %d, setting state to %d",
+ cmd, ECORE_FUNC_ID(sc), o->next_state);
+
+ o->state = o->next_state;
+ o->next_state = ECORE_F_STATE_MAX;
+
+ /* It's important that o->state and o->next_state are
+ * updated before o->pending.
+ */
+ wmb();
+
+ ECORE_CLEAR_BIT(cmd, &o->pending);
+ ECORE_SMP_MB_AFTER_CLEAR_BIT();
+
+ return ECORE_SUCCESS;
+}
+
+/**
+ * ecore_func_comp_cmd - complete the state change command
+ *
+ * @sc: device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int ecore_func_comp_cmd(struct bnx2x_softc *sc,
+ struct ecore_func_sp_obj *o,
+ enum ecore_func_cmd cmd)
+{
+ /* Complete the state machine part first, check if it's a
+ * legal completion.
+ */
+ int rc = ecore_func_state_change_comp(sc, o, cmd);
+ return rc;
+}
+
+/**
+ * ecore_func_chk_transition - perform function state machine transition
+ *
+ * @sc: device handle
+ * @o:
+ * @params:
+ *
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ * ECORE_INVAL otherwise.
+ */
+static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused,
+ struct ecore_func_sp_obj *o,
+ struct ecore_func_state_params *params)
+{
+ enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
+ enum ecore_func_cmd cmd = params->cmd;
+
+ /* Forget all pending for completion commands if a driver only state
+ * transition has been requested.
+ */
+ if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+ o->pending = 0;
+ o->next_state = ECORE_F_STATE_MAX;
+ }
+
+ /* Don't allow a next state transition if we are in the middle of
+ * the previous one.
+ */
+ if (o->pending)
+ return ECORE_BUSY;
+
+ switch (state) {
+ case ECORE_F_STATE_RESET:
+ if (cmd == ECORE_F_CMD_HW_INIT)
+ next_state = ECORE_F_STATE_INITIALIZED;
+
+ break;
+ case ECORE_F_STATE_INITIALIZED:
+ if (cmd == ECORE_F_CMD_START)
+ next_state = ECORE_F_STATE_STARTED;
+
+ else if (cmd == ECORE_F_CMD_HW_RESET)
+ next_state = ECORE_F_STATE_RESET;
+
+ break;
+ case ECORE_F_STATE_STARTED:
+ if (cmd == ECORE_F_CMD_STOP)
+ next_state = ECORE_F_STATE_INITIALIZED;
+ /* afex ramrods can be sent only in started mode, and only
+ * if not pending for function_stop ramrod completion
+ * for these events - next state remained STARTED.
+ */
+ else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
+ (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
+ next_state = ECORE_F_STATE_STARTED;
+
+ else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
+ (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
+ next_state = ECORE_F_STATE_STARTED;
+
+ /* Switch_update ramrod can be sent in either started or
+ * tx_stopped state, and it doesn't change the state.
+ */
+ else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
+ (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
+ next_state = ECORE_F_STATE_STARTED;
+
+ else if (cmd == ECORE_F_CMD_TX_STOP)
+ next_state = ECORE_F_STATE_TX_STOPPED;
+
+ break;
+ case ECORE_F_STATE_TX_STOPPED:
+ if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
+ (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
+ next_state = ECORE_F_STATE_TX_STOPPED;
+
+ else if (cmd == ECORE_F_CMD_TX_START)
+ next_state = ECORE_F_STATE_STARTED;
+
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown state: %d", state);
+ }
+
+ /* Transition is assured */
+ if (next_state != ECORE_F_STATE_MAX) {
+ ECORE_MSG("Good function state transition: %d(%d)->%d",
+ state, cmd, next_state);
+ o->next_state = next_state;
+ return ECORE_SUCCESS;
+ }
+
+ ECORE_MSG("Bad function state transition request: %d %d", state, cmd);
+
+ return ECORE_INVAL;
+}
+
+/**
+ * ecore_func_init_func - performs HW init at function stage
+ *
+ * @sc: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
+ * HW blocks.
+ */
+static int ecore_func_init_func(struct bnx2x_softc *sc,
+ const struct ecore_func_sp_drv_ops *drv)
+{
+ return drv->init_hw_func(sc);
+}
+
+/**
+ * ecore_func_init_port - performs HW init at port stage
+ *
+ * @sc: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
+ * FUNCTION-only HW blocks.
+ *
+ */
+static int ecore_func_init_port(struct bnx2x_softc *sc,
+ const struct ecore_func_sp_drv_ops *drv)
+{
+ int rc = drv->init_hw_port(sc);
+ if (rc)
+ return rc;
+
+ return ecore_func_init_func(sc, drv);
+}
+
+/**
+ * ecore_func_init_cmn_chip - performs HW init at chip-common stage
+ *
+ * @sc: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
+ *drv)
+{
+ int rc = drv->init_hw_cmn_chip(sc);
+ if (rc)
+ return rc;
+
+ return ecore_func_init_port(sc, drv);
+}
+
+/**
+ * ecore_func_init_cmn - performs HW init at common stage
+ *
+ * @sc: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static int ecore_func_init_cmn(struct bnx2x_softc *sc,
+ const struct ecore_func_sp_drv_ops *drv)
+{
+ int rc = drv->init_hw_cmn(sc);
+ if (rc)
+ return rc;
+
+ return ecore_func_init_port(sc, drv);
+}
+
+static int ecore_func_hw_init(struct bnx2x_softc *sc,
+ struct ecore_func_state_params *params)
+{
+ uint32_t load_code = params->params.hw_init.load_phase;
+ struct ecore_func_sp_obj *o = params->f_obj;
+ const struct ecore_func_sp_drv_ops *drv = o->drv;
+ int rc = 0;
+
+ ECORE_MSG("function %d load_code %x",
+ ECORE_ABS_FUNC_ID(sc), load_code);
+
+ /* Prepare FW */
+ rc = drv->init_fw(sc);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Error loading firmware");
+ goto init_err;
+ }
+
+ /* Handle the beginning of COMMON_XXX pases separately... */
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+ rc = ecore_func_init_cmn_chip(sc, drv);
+ if (rc)
+ goto init_err;
+
+ break;
+ case FW_MSG_CODE_DRV_LOAD_COMMON:
+ rc = ecore_func_init_cmn(sc, drv);
+ if (rc)
+ goto init_err;
+
+ break;
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ rc = ecore_func_init_port(sc, drv);
+ if (rc)
+ goto init_err;
+
+ break;
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ rc = ecore_func_init_func(sc, drv);
+ if (rc)
+ goto init_err;
+
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown load_code (0x%x) from MCP",
+ load_code);
+ rc = ECORE_INVAL;
+ }
+
+init_err:
+ /* In case of success, complete the command immediately: no ramrods
+ * have been sent.
+ */
+ if (!rc)
+ o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
+
+ return rc;
+}
+
+/**
+ * ecore_func_reset_func - reset HW at function stage
+ *
+ * @sc: device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
+ * FUNCTION-only HW blocks.
+ */
+static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
+ *drv)
+{
+ drv->reset_hw_func(sc);
+}
+
+/**
+ * ecore_func_reset_port - reser HW at port stage
+ *
+ * @sc: device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
+ * FUNCTION-only and PORT-only HW blocks.
+ *
+ * !!!IMPORTANT!!!
+ *
+ * It's important to call reset_port before reset_func() as the last thing
+ * reset_func does is pf_disable() thus disabling PGLUE_B, which
+ * makes impossible any DMAE transactions.
+ */
+static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
+ *drv)
+{
+ drv->reset_hw_port(sc);
+ ecore_func_reset_func(sc, drv);
+}
+
+/**
+ * ecore_func_reset_cmn - reser HW at common stage
+ *
+ * @sc: device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
+ * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
+ * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
+ */
+static void ecore_func_reset_cmn(struct bnx2x_softc *sc,
+ const struct ecore_func_sp_drv_ops *drv)
+{
+ ecore_func_reset_port(sc, drv);
+ drv->reset_hw_cmn(sc);
+}
+
+static int ecore_func_hw_reset(struct bnx2x_softc *sc,
+ struct ecore_func_state_params *params)
+{
+ uint32_t reset_phase = params->params.hw_reset.reset_phase;
+ struct ecore_func_sp_obj *o = params->f_obj;
+ const struct ecore_func_sp_drv_ops *drv = o->drv;
+
+ ECORE_MSG("function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc),
+ reset_phase);
+
+ switch (reset_phase) {
+ case FW_MSG_CODE_DRV_UNLOAD_COMMON:
+ ecore_func_reset_cmn(sc, drv);
+ break;
+ case FW_MSG_CODE_DRV_UNLOAD_PORT:
+ ecore_func_reset_port(sc, drv);
+ break;
+ case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
+ ecore_func_reset_func(sc, drv);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown reset_phase (0x%x) from MCP",
+ reset_phase);
+ break;
+ }
+
+ /* Complete the command immediately: no ramrods have been sent. */
+ o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
+
+ return ECORE_SUCCESS;
+}
+
+static int ecore_func_send_start(struct bnx2x_softc *sc,
+ struct ecore_func_state_params *params)
+{
+ struct ecore_func_sp_obj *o = params->f_obj;
+ struct function_start_data *rdata =
+ (struct function_start_data *)o->rdata;
+ ecore_dma_addr_t data_mapping = o->rdata_mapping;
+ struct ecore_func_start_params *start_params = &params->params.start;
+
+ ECORE_MEMSET(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->function_mode = (uint8_t) start_params->mf_mode;
+ rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
+ rdata->path_id = ECORE_PATH_ID(sc);
+ rdata->network_cos_mode = start_params->network_cos_mode;
+ rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
+ rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside ecore_sp_post()).
+ */
+
+ return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
+ data_mapping, NONE_CONNECTION_TYPE);
+}
+
+static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params
+ *params)
+{
+ struct ecore_func_sp_obj *o = params->f_obj;
+ struct function_update_data *rdata =
+ (struct function_update_data *)o->rdata;
+ ecore_dma_addr_t data_mapping = o->rdata_mapping;
+ struct ecore_func_switch_update_params *switch_update_params =
+ &params->params.switch_update;
+
+ ECORE_MEMSET(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->tx_switch_suspend_change_flg = 1;
+ rdata->tx_switch_suspend = switch_update_params->suspend;
+ rdata->echo = SWITCH_UPDATE;
+
+ return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
+ data_mapping, NONE_CONNECTION_TYPE);
+}
+
+static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params
+ *params)
+{
+ struct ecore_func_sp_obj *o = params->f_obj;
+ struct function_update_data *rdata =
+ (struct function_update_data *)o->afex_rdata;
+ ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
+ struct ecore_func_afex_update_params *afex_update_params =
+ &params->params.afex_update;
+
+ ECORE_MEMSET(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->vif_id_change_flg = 1;
+ rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
+ rdata->afex_default_vlan_change_flg = 1;
+ rdata->afex_default_vlan =
+ ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
+ rdata->allowed_priorities_change_flg = 1;
+ rdata->allowed_priorities = afex_update_params->allowed_priorities;
+ rdata->echo = AFEX_UPDATE;
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside ecore_sp_post()).
+ */
+ ECORE_MSG("afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
+ rdata->vif_id,
+ rdata->afex_default_vlan, rdata->allowed_priorities);
+
+ return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
+ data_mapping, NONE_CONNECTION_TYPE);
+}
+
+static
+inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc,
+ struct ecore_func_state_params *params)
+{
+ struct ecore_func_sp_obj *o = params->f_obj;
+ struct afex_vif_list_ramrod_data *rdata =
+ (struct afex_vif_list_ramrod_data *)o->afex_rdata;
+ struct ecore_func_afex_viflists_params *afex_vif_params =
+ &params->params.afex_viflists;
+ uint64_t *p_rdata = (uint64_t *) rdata;
+
+ ECORE_MEMSET(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->vif_list_index =
+ ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
+ rdata->func_bit_map = afex_vif_params->func_bit_map;
+ rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
+ rdata->func_to_clear = afex_vif_params->func_to_clear;
+
+ /* send in echo type of sub command */
+ rdata->echo = afex_vif_params->afex_vif_list_command;
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside ecore_sp_post()).
+ */
+
+ ECORE_MSG
+ ("afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
+ rdata->afex_vif_list_command, rdata->vif_list_index,
+ rdata->func_bit_map, rdata->func_to_clear);
+
+ /* this ramrod sends data directly and not through DMA mapping */
+ return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
+ *p_rdata, NONE_CONNECTION_TYPE);
+}
+
+static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct
+ ecore_func_state_params *params)
+{
+ return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
+ NONE_CONNECTION_TYPE);
+}
+
+static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct
+ ecore_func_state_params *params)
+{
+ return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
+ NONE_CONNECTION_TYPE);
+}
+
+static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params
+ *params)
+{
+ struct ecore_func_sp_obj *o = params->f_obj;
+ struct flow_control_configuration *rdata =
+ (struct flow_control_configuration *)o->rdata;
+ ecore_dma_addr_t data_mapping = o->rdata_mapping;
+ struct ecore_func_tx_start_params *tx_start_params =
+ &params->params.tx_start;
+ uint32_t i;
+
+ ECORE_MEMSET(rdata, 0, sizeof(*rdata));
+
+ rdata->dcb_enabled = tx_start_params->dcb_enabled;
+ rdata->dcb_version = tx_start_params->dcb_version;
+ rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
+
+ for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
+ rdata->traffic_type_to_priority_cos[i] =
+ tx_start_params->traffic_type_to_priority_cos[i];
+
+ return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
+ data_mapping, NONE_CONNECTION_TYPE);
+}
+
+static int ecore_func_send_cmd(struct bnx2x_softc *sc,
+ struct ecore_func_state_params *params)
+{
+ switch (params->cmd) {
+ case ECORE_F_CMD_HW_INIT:
+ return ecore_func_hw_init(sc, params);
+ case ECORE_F_CMD_START:
+ return ecore_func_send_start(sc, params);
+ case ECORE_F_CMD_STOP:
+ return ecore_func_send_stop(sc, params);
+ case ECORE_F_CMD_HW_RESET:
+ return ecore_func_hw_reset(sc, params);
+ case ECORE_F_CMD_AFEX_UPDATE:
+ return ecore_func_send_afex_update(sc, params);
+ case ECORE_F_CMD_AFEX_VIFLISTS:
+ return ecore_func_send_afex_viflists(sc, params);
+ case ECORE_F_CMD_TX_STOP:
+ return ecore_func_send_tx_stop(sc, params);
+ case ECORE_F_CMD_TX_START:
+ return ecore_func_send_tx_start(sc, params);
+ case ECORE_F_CMD_SWITCH_UPDATE:
+ return ecore_func_send_switch_update(sc, params);
+ default:
+ PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
+ return ECORE_INVAL;
+ }
+}
+
+void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc,
+ struct ecore_func_sp_obj *obj,
+ void *rdata, ecore_dma_addr_t rdata_mapping,
+ void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
+ struct ecore_func_sp_drv_ops *drv_iface)
+{
+ ECORE_MEMSET(obj, 0, sizeof(*obj));
+
+ ECORE_MUTEX_INIT(&obj->one_pending_mutex);
+
+ obj->rdata = rdata;
+ obj->rdata_mapping = rdata_mapping;
+ obj->afex_rdata = afex_rdata;
+ obj->afex_rdata_mapping = afex_rdata_mapping;
+ obj->send_cmd = ecore_func_send_cmd;
+ obj->check_transition = ecore_func_chk_transition;
+ obj->complete_cmd = ecore_func_comp_cmd;
+ obj->wait_comp = ecore_func_wait_comp;
+ obj->drv = drv_iface;
+}
+
+/**
+ * ecore_func_state_change - perform Function state change transition
+ *
+ * @sc: device handle
+ * @params: parameters to perform the transaction
+ *
+ * returns 0 in case of successfully completed transition,
+ * negative error code in case of failure, positive
+ * (EBUSY) value if there is a completion to that is
+ * still pending (possible only if RAMROD_COMP_WAIT is
+ * not set in params->ramrod_flags for asynchronous
+ * commands).
+ */
+int ecore_func_state_change(struct bnx2x_softc *sc,
+ struct ecore_func_state_params *params)
+{
+ struct ecore_func_sp_obj *o = params->f_obj;
+ int rc, cnt = 300;
+ enum ecore_func_cmd cmd = params->cmd;
+ unsigned long *pending = &o->pending;
+
+ ECORE_MUTEX_LOCK(&o->one_pending_mutex);
+
+ /* Check that the requested transition is legal */
+ rc = o->check_transition(sc, o, params);
+ if ((rc == ECORE_BUSY) &&
+ (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
+ while ((rc == ECORE_BUSY) && (--cnt > 0)) {
+ ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
+ ECORE_MSLEEP(10);
+ ECORE_MUTEX_LOCK(&o->one_pending_mutex);
+ rc = o->check_transition(sc, o, params);
+ }
+ if (rc == ECORE_BUSY) {
+ ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
+ PMD_DRV_LOG(ERR,
+ "timeout waiting for previous ramrod completion");
+ return rc;
+ }
+ } else if (rc) {
+ ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
+ return rc;
+ }
+
+ /* Set "pending" bit */
+ ECORE_SET_BIT(cmd, pending);
+
+ /* Don't send a command if only driver cleanup was requested */
+ if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+ ecore_func_state_change_comp(sc, o, cmd);
+ ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
+ } else {
+ /* Send a ramrod */
+ rc = o->send_cmd(sc, params);
+
+ ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
+
+ if (rc) {
+ o->next_state = ECORE_F_STATE_MAX;
+ ECORE_CLEAR_BIT(cmd, pending);
+ ECORE_SMP_MB_AFTER_CLEAR_BIT();
+ return rc;
+ }
+
+ if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+ rc = o->wait_comp(sc, o, cmd);
+ if (rc)
+ return rc;
+
+ return ECORE_SUCCESS;
+ }
+ }
+
+ return ECORE_RET_PENDING(cmd, pending);
+}
+
+/******************************************************************************
+ * Description:
+ * Calculates crc 8 on a word value: polynomial 0-1-2-8
+ * Code was translated from Verilog.
+ * Return:
+ *****************************************************************************/
+uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc)
+{
+ uint8_t D[32];
+ uint8_t NewCRC[8];
+ uint8_t C[8];
+ uint8_t crc_res;
+ uint8_t i;
+
+ /* split the data into 31 bits */
+ for (i = 0; i < 32; i++) {
+ D[i] = (uint8_t) (data & 1);
+ data = data >> 1;
+ }
+
+ /* split the crc into 8 bits */
+ for (i = 0; i < 8; i++) {
+ C[i] = crc & 1;
+ crc = crc >> 1;
+ }
+
+ NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
+ D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
+ C[6] ^ C[7];
+ NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
+ D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
+ D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
+ NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
+ D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
+ C[0] ^ C[1] ^ C[4] ^ C[5];
+ NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
+ D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
+ C[1] ^ C[2] ^ C[5] ^ C[6];
+ NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
+ D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
+ C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
+ NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
+ D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
+ C[3] ^ C[4] ^ C[7];
+ NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
+ D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5];
+ NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
+ D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6];
+
+ crc_res = 0;
+ for (i = 0; i < 8; i++) {
+ crc_res |= (NewCRC[i] << i);
+ }
+
+ return crc_res;
+}
+
+uint32_t
+ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++;
+ for (i = 0; i < 8; i++)
+ crc = (crc >> 1) ^ ((crc & 1) ? magic : 0);
+ }
+ return crc;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.h
new file mode 100644
index 00000000..6b65a496
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.h
@@ -0,0 +1,1766 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef ECORE_SP_H
+#define ECORE_SP_H
+
+#include <rte_byteorder.h>
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN RTE_LITTLE_ENDIAN
+#endif
+#undef __BIG_ENDIAN
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN RTE_BIG_ENDIAN
+#endif
+#undef __LITTLE_ENDIAN
+#endif
+
+#include "ecore_mfw_req.h"
+#include "ecore_fw_defs.h"
+#include "ecore_hsi.h"
+#include "ecore_reg.h"
+
+struct bnx2x_softc;
+typedef rte_iova_t ecore_dma_addr_t; /* expected to be 64 bit wide */
+typedef volatile int ecore_atomic_t;
+
+
+#define ETH_ALEN ETHER_ADDR_LEN /* 6 */
+
+#define ECORE_SWCID_SHIFT 17
+#define ECORE_SWCID_MASK ((0x1 << ECORE_SWCID_SHIFT) - 1)
+
+#define ECORE_MC_HASH_SIZE 8
+#define ECORE_MC_HASH_OFFSET(sc, i) \
+ (BAR_TSTRORM_INTMEM + \
+ TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(FUNC_ID(sc)) + i*4)
+
+#define ECORE_MAX_MULTICAST 64
+#define ECORE_MAX_EMUL_MULTI 1
+
+#define IRO sc->iro_array
+
+typedef rte_spinlock_t ECORE_MUTEX;
+#define ECORE_MUTEX_INIT(_mutex) rte_spinlock_init(_mutex)
+#define ECORE_MUTEX_LOCK(_mutex) rte_spinlock_lock(_mutex)
+#define ECORE_MUTEX_UNLOCK(_mutex) rte_spinlock_unlock(_mutex)
+
+typedef rte_spinlock_t ECORE_MUTEX_SPIN;
+#define ECORE_SPIN_LOCK_INIT(_spin, _sc) rte_spinlock_init(_spin)
+#define ECORE_SPIN_LOCK_BH(_spin) rte_spinlock_lock(_spin) /* bh = bottom-half */
+#define ECORE_SPIN_UNLOCK_BH(_spin) rte_spinlock_unlock(_spin) /* bh = bottom-half */
+
+#define ECORE_SMP_MB_AFTER_CLEAR_BIT() mb()
+#define ECORE_SMP_MB_BEFORE_CLEAR_BIT() mb()
+#define ECORE_SMP_MB() mb()
+#define ECORE_SMP_RMB() rmb()
+#define ECORE_SMP_WMB() wmb()
+#define ECORE_MMIOWB() wmb()
+
+#define ECORE_SET_BIT_NA(bit, var) (*var |= (1 << bit))
+#define ECORE_CLEAR_BIT_NA(bit, var) (*var &= ~(1 << bit))
+
+#define ECORE_TEST_BIT(bit, var) bnx2x_test_bit(bit, var)
+#define ECORE_SET_BIT(bit, var) bnx2x_set_bit(bit, var)
+#define ECORE_CLEAR_BIT(bit, var) bnx2x_clear_bit(bit, var)
+#define ECORE_TEST_AND_CLEAR_BIT(bit, var) bnx2x_test_and_clear_bit(bit, var)
+
+#define atomic_load_acq_int (int)*
+#define atomic_store_rel_int(a, v) (*a = v)
+#define atomic_cmpset_acq_int(a, o, n) ((*a = (o & (n)) | (n)) ^ o)
+
+#define atomic_load_acq_long (long)*
+#define atomic_store_rel_long(a, v) (*a = v)
+#define atomic_set_acq_long(a, v) (*a |= v)
+#define atomic_clear_acq_long(a, v) (*a &= ~v)
+#define atomic_cmpset_acq_long(a, o, n) ((*a = (o & (n)) | (n)) ^ o)
+#define atomic_subtract_acq_long(a, v) (*a -= v)
+#define atomic_add_acq_long(a, v) (*a += v)
+
+#define ECORE_ATOMIC_READ(a) atomic_load_acq_int((volatile int *)a)
+#define ECORE_ATOMIC_SET(a, v) atomic_store_rel_int((volatile int *)a, v)
+#define ECORE_ATOMIC_CMPXCHG(a, o, n) bnx2x_cmpxchg((volatile int *)a, o, n)
+
+#define ECORE_RET_PENDING(pending_bit, pending) \
+ (ECORE_TEST_BIT(pending_bit, pending) ? ECORE_PENDING : ECORE_SUCCESS)
+
+#define ECORE_SET_FLAG(value, mask, flag) \
+ do { \
+ (value) &= ~(mask); \
+ (value) |= ((flag) << (mask##_SHIFT)); \
+ } while (0)
+
+#define ECORE_GET_FLAG(value, mask) \
+ (((value) &= (mask)) >> (mask##_SHIFT))
+
+#define ECORE_MIGHT_SLEEP()
+
+#define ECORE_FCOE_CID(sc) ((sc)->fp[FCOE_IDX(sc)].cl_id)
+
+#define ECORE_MEMCMP(_a, _b, _s) memcmp(_a, _b, _s)
+#define ECORE_MEMCPY(_a, _b, _s) rte_memcpy(_a, _b, _s)
+#define ECORE_MEMSET(_a, _c, _s) memset(_a, _c, _s)
+
+#define ECORE_CPU_TO_LE16(x) htole16(x)
+#define ECORE_CPU_TO_LE32(x) htole32(x)
+
+#define ECORE_WAIT(_s, _t) DELAY(1000)
+#define ECORE_MSLEEP(_t) DELAY((_t) * 1000)
+
+#define ECORE_LIKELY(x) likely(x)
+#define ECORE_UNLIKELY(x) unlikely(x)
+
+#define ECORE_ZALLOC(_size, _flags, _sc) \
+ rte_zmalloc("", _size, RTE_CACHE_LINE_SIZE)
+
+#define ECORE_CALLOC(_len, _size, _flags, _sc) \
+ rte_calloc("", _len, _size, RTE_CACHE_LINE_SIZE)
+
+#define ECORE_FREE(_s, _buf, _size) \
+ rte_free(_buf)
+
+#define SC_ILT(sc) ((sc)->ilt)
+#define ILOG2(x) bnx2x_ilog2(x)
+
+#define ECORE_ILT_ZALLOC(x, y, size, str) \
+ do { \
+ x = rte_malloc("", sizeof(struct bnx2x_dma), RTE_CACHE_LINE_SIZE); \
+ if (x) { \
+ if (bnx2x_dma_alloc((struct bnx2x_softc *)sc, \
+ size, (struct bnx2x_dma *)x, \
+ str, RTE_CACHE_LINE_SIZE) != 0) { \
+ rte_free(x); \
+ x = NULL; \
+ *y = 0; \
+ } else { \
+ *y = ((struct bnx2x_dma *)x)->paddr; \
+ } \
+ } \
+ } while (0)
+
+#define ECORE_ILT_FREE(x, y, size) \
+ do { \
+ if (x) { \
+ rte_free(x); \
+ x = NULL; \
+ y = 0; \
+ } \
+ } while (0)
+
+#define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE
+
+#define ECORE_IS_MF_SD_MODE IS_MF_SD_MODE
+#define ECORE_IS_MF_SI_MODE IS_MF_SI_MODE
+#define ECORE_IS_MF_AFEX_MODE IS_MF_AFEX_MODE
+
+#define ECORE_SET_CTX_VALIDATION bnx2x_set_ctx_validation
+
+#define ECORE_UPDATE_COALESCE_SB_INDEX bnx2x_update_coalesce_sb_index
+
+#define ECORE_ALIGN(x, a) ((((x) + (a) - 1) / (a)) * (a))
+
+#define ECORE_REG_WR_DMAE_LEN REG_WR_DMAE_LEN
+
+#define ECORE_PATH_ID SC_PATH
+#define ECORE_PORT_ID SC_PORT
+#define ECORE_FUNC_ID SC_FUNC
+#define ECORE_ABS_FUNC_ID SC_ABS_FUNC
+
+#define CRCPOLY_LE 0xedb88320
+uint32_t ecore_calc_crc32(uint32_t crc, uint8_t const *p,
+ uint32_t len, uint32_t magic);
+
+uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc);
+
+
+static inline uint32_t
+ECORE_CRC32_LE(uint32_t seed, uint8_t *mac, uint32_t len)
+{
+ return ecore_calc_crc32(seed, mac, len, CRCPOLY_LE);
+}
+
+#define ecore_sp_post(_sc, _a, _b, _c, _d) \
+ bnx2x_sp_post(_sc, _a, _b, U64_HI(_c), U64_LO(_c), _d)
+
+#define ECORE_DBG_BREAK_IF(exp) \
+ do { \
+ if (unlikely(exp)) { \
+ rte_panic("ECORE"); \
+ } \
+ } while (0)
+
+#define ECORE_BUG() \
+ do { \
+ rte_panic("BUG (%s:%d)", __FILE__, __LINE__); \
+ } while(0);
+
+#define ECORE_BUG_ON(exp) \
+ do { \
+ if (likely(exp)) { \
+ rte_panic("BUG_ON (%s:%d)", __FILE__, __LINE__); \
+ } \
+ } while (0)
+
+
+#define ECORE_MSG(m, ...) \
+ PMD_DRV_LOG(DEBUG, m, ##__VA_ARGS__)
+
+typedef struct _ecore_list_entry_t
+{
+ struct _ecore_list_entry_t *next, *prev;
+} ecore_list_entry_t;
+
+typedef struct ecore_list_t
+{
+ ecore_list_entry_t *head, *tail;
+ unsigned long cnt;
+} ecore_list_t;
+
+/* initialize the list */
+#define ECORE_LIST_INIT(_list) \
+ do { \
+ (_list)->head = NULL; \
+ (_list)->tail = NULL; \
+ (_list)->cnt = 0; \
+ } while (0)
+
+/* return TRUE if the element is the last on the list */
+#define ECORE_LIST_IS_LAST(_elem, _list) \
+ (_elem == (_list)->tail)
+
+/* return TRUE if the list is empty */
+#define ECORE_LIST_IS_EMPTY(_list) \
+ ((_list)->cnt == 0)
+
+/* return the first element */
+#define ECORE_LIST_FIRST_ENTRY(_list, cast, _link) \
+ (cast *)((_list)->head)
+
+/* return the next element */
+#define ECORE_LIST_NEXT(_elem, _link, cast) \
+ (cast *)((&((_elem)->_link))->next)
+
+/* push an element on the head of the list */
+#define ECORE_LIST_PUSH_HEAD(_elem, _list) \
+ do { \
+ (_elem)->prev = (ecore_list_entry_t *)0; \
+ (_elem)->next = (_list)->head; \
+ if ((_list)->tail == (ecore_list_entry_t *)0) { \
+ (_list)->tail = (_elem); \
+ } else { \
+ (_list)->head->prev = (_elem); \
+ } \
+ (_list)->head = (_elem); \
+ (_list)->cnt++; \
+ } while (0)
+
+/* push an element on the tail of the list */
+#define ECORE_LIST_PUSH_TAIL(_elem, _list) \
+ do { \
+ (_elem)->next = (ecore_list_entry_t *)0; \
+ (_elem)->prev = (_list)->tail; \
+ if ((_list)->tail) { \
+ (_list)->tail->next = (_elem); \
+ } else { \
+ (_list)->head = (_elem); \
+ } \
+ (_list)->tail = (_elem); \
+ (_list)->cnt++; \
+ } while (0)
+
+/* push list1 on the head of list2 and return with list1 as empty */
+#define ECORE_LIST_SPLICE_INIT(_list1, _list2) \
+ do { \
+ (_list1)->tail->next = (_list2)->head; \
+ if ((_list2)->head) { \
+ (_list2)->head->prev = (_list1)->tail; \
+ } else { \
+ (_list2)->tail = (_list1)->tail; \
+ } \
+ (_list2)->head = (_list1)->head; \
+ (_list2)->cnt += (_list1)->cnt; \
+ (_list1)->head = NULL; \
+ (_list1)->tail = NULL; \
+ (_list1)->cnt = 0; \
+ } while (0)
+
+/* remove an element from the list */
+#define ECORE_LIST_REMOVE_ENTRY(_elem, _list) \
+ do { \
+ if ((_list)->head == (_elem)) { \
+ if ((_list)->head) { \
+ (_list)->head = (_list)->head->next; \
+ if ((_list)->head) { \
+ (_list)->head->prev = (ecore_list_entry_t *)0; \
+ } else { \
+ (_list)->tail = (ecore_list_entry_t *)0; \
+ } \
+ (_list)->cnt--; \
+ } \
+ } else if ((_list)->tail == (_elem)) { \
+ if ((_list)->tail) { \
+ (_list)->tail = (_list)->tail->prev; \
+ if ((_list)->tail) { \
+ (_list)->tail->next = (ecore_list_entry_t *)0; \
+ } else { \
+ (_list)->head = (ecore_list_entry_t *)0; \
+ } \
+ (_list)->cnt--; \
+ } \
+ } else { \
+ (_elem)->prev->next = (_elem)->next; \
+ (_elem)->next->prev = (_elem)->prev; \
+ (_list)->cnt--; \
+ } \
+ } while (0)
+
+/* walk the list */
+#define ECORE_LIST_FOR_EACH_ENTRY(pos, _list, _link, cast) \
+ for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _link); \
+ pos; \
+ pos = ECORE_LIST_NEXT(pos, _link, cast))
+
+/* walk the list (safely) */
+#define ECORE_LIST_FOR_EACH_ENTRY_SAFE(pos, n, _list, _link, cast) \
+ for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _lint), \
+ n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL; \
+ pos != NULL; \
+ pos = (cast *)n, \
+ n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL)
+
+
+/* Manipulate a bit vector defined as an array of uint64_t */
+
+/* Number of bits in one sge_mask array element */
+#define BIT_VEC64_ELEM_SZ 64
+#define BIT_VEC64_ELEM_SHIFT 6
+#define BIT_VEC64_ELEM_MASK ((uint64_t)BIT_VEC64_ELEM_SZ - 1)
+
+#define __BIT_VEC64_SET_BIT(el, bit) \
+ do { \
+ el = ((el) | ((uint64_t)0x1 << (bit))); \
+ } while (0)
+
+#define __BIT_VEC64_CLEAR_BIT(el, bit) \
+ do { \
+ el = ((el) & (~((uint64_t)0x1 << (bit)))); \
+ } while (0)
+
+#define BIT_VEC64_SET_BIT(vec64, idx) \
+ __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+ (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
+ __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+ (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_TEST_BIT(vec64, idx) \
+ (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
+ ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
+
+/*
+ * Creates a bitmask of all ones in less significant bits.
+ * idx - index of the most significant bit in the created mask
+ */
+#define BIT_VEC64_ONES_MASK(idx) \
+ (((uint64_t)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
+#define BIT_VEC64_ELEM_ONE_MASK ((uint64_t)(~0))
+
+/* fill in a MAC address the way the FW likes it */
+static inline void
+ecore_set_fw_mac_addr(uint16_t *fw_hi,
+ uint16_t *fw_mid,
+ uint16_t *fw_lo,
+ uint8_t *mac)
+{
+ ((uint8_t *)fw_hi)[0] = mac[1];
+ ((uint8_t *)fw_hi)[1] = mac[0];
+ ((uint8_t *)fw_mid)[0] = mac[3];
+ ((uint8_t *)fw_mid)[1] = mac[2];
+ ((uint8_t *)fw_lo)[0] = mac[5];
+ ((uint8_t *)fw_lo)[1] = mac[4];
+}
+
+
+enum ecore_status_t {
+ ECORE_EXISTS = -6,
+ ECORE_IO = -5,
+ ECORE_TIMEOUT = -4,
+ ECORE_INVAL = -3,
+ ECORE_BUSY = -2,
+ ECORE_NOMEM = -1,
+ ECORE_SUCCESS = 0,
+ /* PENDING is not an error and should be positive */
+ ECORE_PENDING = 1,
+};
+
+enum {
+ SWITCH_UPDATE,
+ AFEX_UPDATE,
+};
+
+
+
+
+struct bnx2x_softc;
+struct eth_context;
+
+/* Bits representing general command's configuration */
+enum {
+ RAMROD_TX,
+ RAMROD_RX,
+ /* Wait until all pending commands complete */
+ RAMROD_COMP_WAIT,
+ /* Don't send a ramrod, only update a registry */
+ RAMROD_DRV_CLR_ONLY,
+ /* Configure HW according to the current object state */
+ RAMROD_RESTORE,
+ /* Execute the next command now */
+ RAMROD_EXEC,
+ /* Don't add a new command and continue execution of posponed
+ * commands. If not set a new command will be added to the
+ * pending commands list.
+ */
+ RAMROD_CONT,
+ /* If there is another pending ramrod, wait until it finishes and
+ * re-try to submit this one. This flag can be set only in sleepable
+ * context, and should not be set from the context that completes the
+ * ramrods as deadlock will occur.
+ */
+ RAMROD_RETRY,
+};
+
+typedef enum {
+ ECORE_OBJ_TYPE_RX,
+ ECORE_OBJ_TYPE_TX,
+ ECORE_OBJ_TYPE_RX_TX,
+} ecore_obj_type;
+
+/* Public slow path states */
+enum {
+ ECORE_FILTER_MAC_PENDING,
+ ECORE_FILTER_VLAN_PENDING,
+ ECORE_FILTER_VLAN_MAC_PENDING,
+ ECORE_FILTER_RX_MODE_PENDING,
+ ECORE_FILTER_RX_MODE_SCHED,
+ ECORE_FILTER_ISCSI_ETH_START_SCHED,
+ ECORE_FILTER_ISCSI_ETH_STOP_SCHED,
+ ECORE_FILTER_FCOE_ETH_START_SCHED,
+ ECORE_FILTER_FCOE_ETH_STOP_SCHED,
+ ECORE_FILTER_MCAST_PENDING,
+ ECORE_FILTER_MCAST_SCHED,
+ ECORE_FILTER_RSS_CONF_PENDING,
+ ECORE_AFEX_FCOE_Q_UPDATE_PENDING,
+ ECORE_AFEX_PENDING_VIFSET_MCP_ACK
+};
+
+struct ecore_raw_obj {
+ uint8_t func_id;
+
+ /* Queue params */
+ uint8_t cl_id;
+ uint32_t cid;
+
+ /* Ramrod data buffer params */
+ void *rdata;
+ ecore_dma_addr_t rdata_mapping;
+
+ /* Ramrod state params */
+ int state; /* "ramrod is pending" state bit */
+ unsigned long *pstate; /* pointer to state buffer */
+
+ ecore_obj_type obj_type;
+
+ int (*wait_comp)(struct bnx2x_softc *sc,
+ struct ecore_raw_obj *o);
+
+ int (*check_pending)(struct ecore_raw_obj *o);
+ void (*clear_pending)(struct ecore_raw_obj *o);
+ void (*set_pending)(struct ecore_raw_obj *o);
+};
+
+/************************* VLAN-MAC commands related parameters ***************/
+struct ecore_mac_ramrod_data {
+ uint8_t mac[ETH_ALEN];
+ uint8_t is_inner_mac;
+};
+
+struct ecore_vlan_ramrod_data {
+ uint16_t vlan;
+};
+
+struct ecore_vlan_mac_ramrod_data {
+ uint8_t mac[ETH_ALEN];
+ uint8_t is_inner_mac;
+ uint16_t vlan;
+};
+
+union ecore_classification_ramrod_data {
+ struct ecore_mac_ramrod_data mac;
+ struct ecore_vlan_ramrod_data vlan;
+ struct ecore_vlan_mac_ramrod_data vlan_mac;
+};
+
+/* VLAN_MAC commands */
+enum ecore_vlan_mac_cmd {
+ ECORE_VLAN_MAC_ADD,
+ ECORE_VLAN_MAC_DEL,
+ ECORE_VLAN_MAC_MOVE,
+};
+
+struct ecore_vlan_mac_data {
+ /* Requested command: ECORE_VLAN_MAC_XX */
+ enum ecore_vlan_mac_cmd cmd;
+ /* used to contain the data related vlan_mac_flags bits from
+ * ramrod parameters.
+ */
+ unsigned long vlan_mac_flags;
+
+ /* Needed for MOVE command */
+ struct ecore_vlan_mac_obj *target_obj;
+
+ union ecore_classification_ramrod_data u;
+};
+
+/*************************** Exe Queue obj ************************************/
+union ecore_exe_queue_cmd_data {
+ struct ecore_vlan_mac_data vlan_mac;
+
+ struct {
+ } mcast;
+};
+
+struct ecore_exeq_elem {
+ ecore_list_entry_t link;
+
+ /* Length of this element in the exe_chunk. */
+ int cmd_len;
+
+ union ecore_exe_queue_cmd_data cmd_data;
+};
+
+union ecore_qable_obj;
+
+union ecore_exeq_comp_elem {
+ union event_ring_elem *elem;
+};
+
+struct ecore_exe_queue_obj;
+
+typedef int (*exe_q_validate)(struct bnx2x_softc *sc,
+ union ecore_qable_obj *o,
+ struct ecore_exeq_elem *elem);
+
+typedef int (*exe_q_remove)(struct bnx2x_softc *sc,
+ union ecore_qable_obj *o,
+ struct ecore_exeq_elem *elem);
+
+/* Return positive if entry was optimized, 0 - if not, negative
+ * in case of an error.
+ */
+typedef int (*exe_q_optimize)(struct bnx2x_softc *sc,
+ union ecore_qable_obj *o,
+ struct ecore_exeq_elem *elem);
+typedef int (*exe_q_execute)(struct bnx2x_softc *sc,
+ union ecore_qable_obj *o,
+ ecore_list_t *exe_chunk,
+ unsigned long *ramrod_flags);
+typedef struct ecore_exeq_elem *
+ (*exe_q_get)(struct ecore_exe_queue_obj *o,
+ struct ecore_exeq_elem *elem);
+
+struct ecore_exe_queue_obj {
+ /* Commands pending for an execution. */
+ ecore_list_t exe_queue;
+
+ /* Commands pending for an completion. */
+ ecore_list_t pending_comp;
+
+ ECORE_MUTEX_SPIN lock;
+
+ /* Maximum length of commands' list for one execution */
+ int exe_chunk_len;
+
+ union ecore_qable_obj *owner;
+
+ /****** Virtual functions ******/
+ /**
+ * Called before commands execution for commands that are really
+ * going to be executed (after 'optimize').
+ *
+ * Must run under exe_queue->lock
+ */
+ exe_q_validate validate;
+
+ /**
+ * Called before removing pending commands, cleaning allocated
+ * resources (e.g., credits from validate)
+ */
+ exe_q_remove remove;
+
+ /**
+ * This will try to cancel the current pending commands list
+ * considering the new command.
+ *
+ * Returns the number of optimized commands or a negative error code
+ *
+ * Must run under exe_queue->lock
+ */
+ exe_q_optimize optimize;
+
+ /**
+ * Run the next commands chunk (owner specific).
+ */
+ exe_q_execute execute;
+
+ /**
+ * Return the exe_queue element containing the specific command
+ * if any. Otherwise return NULL.
+ */
+ exe_q_get get;
+};
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/*
+ * Element in the VLAN_MAC registry list having all current configured
+ * rules.
+ */
+struct ecore_vlan_mac_registry_elem {
+ ecore_list_entry_t link;
+
+ /* Used to store the cam offset used for the mac/vlan/vlan-mac.
+ * Relevant for 57711 only. VLANs and MACs share the
+ * same CAM for these chips.
+ */
+ int cam_offset;
+
+ /* Needed for DEL and RESTORE flows */
+ unsigned long vlan_mac_flags;
+
+ union ecore_classification_ramrod_data u;
+};
+
+/* Bits representing VLAN_MAC commands specific flags */
+enum {
+ ECORE_UC_LIST_MAC,
+ ECORE_ETH_MAC,
+ ECORE_ISCSI_ETH_MAC,
+ ECORE_NETQ_ETH_MAC,
+ ECORE_DONT_CONSUME_CAM_CREDIT,
+ ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
+};
+
+struct ecore_vlan_mac_ramrod_params {
+ /* Object to run the command from */
+ struct ecore_vlan_mac_obj *vlan_mac_obj;
+
+ /* General command flags: COMP_WAIT, etc. */
+ unsigned long ramrod_flags;
+
+ /* Command specific configuration request */
+ struct ecore_vlan_mac_data user_req;
+};
+
+struct ecore_vlan_mac_obj {
+ struct ecore_raw_obj raw;
+
+ /* Bookkeeping list: will prevent the addition of already existing
+ * entries.
+ */
+ ecore_list_t head;
+ /* Implement a simple reader/writer lock on the head list.
+ * all these fields should only be accessed under the exe_queue lock
+ */
+ uint8_t head_reader; /* Num. of readers accessing head list */
+ int head_exe_request; /* Pending execution request. */
+ unsigned long saved_ramrod_flags; /* Ramrods of pending execution */
+
+ /* Execution queue interface instance */
+ struct ecore_exe_queue_obj exe_queue;
+
+ /* MACs credit pool */
+ struct ecore_credit_pool_obj *macs_pool;
+
+ /* VLANs credit pool */
+ struct ecore_credit_pool_obj *vlans_pool;
+
+ /* RAMROD command to be used */
+ int ramrod_cmd;
+
+ /* copy first n elements onto preallocated buffer
+ *
+ * @param n number of elements to get
+ * @param buf buffer preallocated by caller into which elements
+ * will be copied. Note elements are 4-byte aligned
+ * so buffer size must be able to accommodate the
+ * aligned elements.
+ *
+ * @return number of copied bytes
+ */
+
+ int (*get_n_elements)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o, int n, uint8_t *base,
+ uint8_t stride, uint8_t size);
+
+ /**
+ * Checks if ADD-ramrod with the given params may be performed.
+ *
+ * @return zero if the element may be added
+ */
+
+ int (*check_add)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ union ecore_classification_ramrod_data *data);
+
+ /**
+ * Checks if DEL-ramrod with the given params may be performed.
+ *
+ * @return TRUE if the element may be deleted
+ */
+ struct ecore_vlan_mac_registry_elem *
+ (*check_del)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ union ecore_classification_ramrod_data *data);
+
+ /**
+ * Checks if DEL-ramrod with the given params may be performed.
+ *
+ * @return TRUE if the element may be deleted
+ */
+ int (*check_move)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *src_o,
+ struct ecore_vlan_mac_obj *dst_o,
+ union ecore_classification_ramrod_data *data);
+
+ /**
+ * Update the relevant credit object(s) (consume/return
+ * correspondingly).
+ */
+ int (*get_credit)(struct ecore_vlan_mac_obj *o);
+ int (*put_credit)(struct ecore_vlan_mac_obj *o);
+ int (*get_cam_offset)(struct ecore_vlan_mac_obj *o, int *offset);
+ int (*put_cam_offset)(struct ecore_vlan_mac_obj *o, int offset);
+
+ /**
+ * Configures one rule in the ramrod data buffer.
+ */
+ void (*set_one_rule)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ struct ecore_exeq_elem *elem, int rule_idx,
+ int cam_offset);
+
+ /**
+ * Delete all configured elements having the given
+ * vlan_mac_flags specification. Assumes no pending for
+ * execution commands. Will schedule all all currently
+ * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
+ * specification for deletion and will use the given
+ * ramrod_flags for the last DEL operation.
+ *
+ * @param sc
+ * @param o
+ * @param ramrod_flags RAMROD_XX flags
+ *
+ * @return 0 if the last operation has completed successfully
+ * and there are no more elements left, positive value
+ * if there are pending for completion commands,
+ * negative value in case of failure.
+ */
+ int (*delete_all)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ unsigned long *vlan_mac_flags,
+ unsigned long *ramrod_flags);
+
+ /**
+ * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
+ * configured elements list.
+ *
+ * @param sc
+ * @param p Command parameters (RAMROD_COMP_WAIT bit in
+ * ramrod_flags is only taken into an account)
+ * @param ppos a pointer to the cookie that should be given back in the
+ * next call to make function handle the next element. If
+ * *ppos is set to NULL it will restart the iterator.
+ * If returned *ppos == NULL this means that the last
+ * element has been handled.
+ *
+ * @return int
+ */
+ int (*restore)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_ramrod_params *p,
+ struct ecore_vlan_mac_registry_elem **ppos);
+
+ /**
+ * Should be called on a completion arrival.
+ *
+ * @param sc
+ * @param o
+ * @param cqe Completion element we are handling
+ * @param ramrod_flags if RAMROD_CONT is set the next bulk of
+ * pending commands will be executed.
+ * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
+ * may also be set if needed.
+ *
+ * @return 0 if there are neither pending nor waiting for
+ * completion commands. Positive value if there are
+ * pending for execution or for completion commands.
+ * Negative value in case of an error (including an
+ * error in the cqe).
+ */
+ int (*complete)(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *o,
+ union event_ring_elem *cqe,
+ unsigned long *ramrod_flags);
+
+ /**
+ * Wait for completion of all commands. Don't schedule new ones,
+ * just wait. It assumes that the completion code will schedule
+ * for new commands.
+ */
+ int (*wait)(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *o);
+};
+
+enum {
+ ECORE_LLH_CAM_ISCSI_ETH_LINE = 0,
+ ECORE_LLH_CAM_ETH_LINE,
+ ECORE_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
+};
+
+/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+
+/* RX_MODE ramrod special flags: set in rx_mode_flags field in
+ * a ecore_rx_mode_ramrod_params.
+ */
+enum {
+ ECORE_RX_MODE_FCOE_ETH,
+ ECORE_RX_MODE_ISCSI_ETH,
+};
+
+enum {
+ ECORE_ACCEPT_UNICAST,
+ ECORE_ACCEPT_MULTICAST,
+ ECORE_ACCEPT_ALL_UNICAST,
+ ECORE_ACCEPT_ALL_MULTICAST,
+ ECORE_ACCEPT_BROADCAST,
+ ECORE_ACCEPT_UNMATCHED,
+ ECORE_ACCEPT_ANY_VLAN
+};
+
+struct ecore_rx_mode_ramrod_params {
+ struct ecore_rx_mode_obj *rx_mode_obj;
+ unsigned long *pstate;
+ int state;
+ uint8_t cl_id;
+ uint32_t cid;
+ uint8_t func_id;
+ unsigned long ramrod_flags;
+ unsigned long rx_mode_flags;
+
+ /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
+ * a tstorm_eth_mac_filter_config (e1x).
+ */
+ void *rdata;
+ ecore_dma_addr_t rdata_mapping;
+
+ /* Rx mode settings */
+ unsigned long rx_accept_flags;
+
+ /* internal switching settings */
+ unsigned long tx_accept_flags;
+};
+
+struct ecore_rx_mode_obj {
+ int (*config_rx_mode)(struct bnx2x_softc *sc,
+ struct ecore_rx_mode_ramrod_params *p);
+
+ int (*wait_comp)(struct bnx2x_softc *sc,
+ struct ecore_rx_mode_ramrod_params *p);
+};
+
+/********************** Set multicast group ***********************************/
+
+struct ecore_mcast_list_elem {
+ ecore_list_entry_t link;
+ uint8_t *mac;
+};
+
+union ecore_mcast_config_data {
+ uint8_t *mac;
+ uint8_t bin; /* used in a RESTORE flow */
+};
+
+struct ecore_mcast_ramrod_params {
+ struct ecore_mcast_obj *mcast_obj;
+
+ /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
+ unsigned long ramrod_flags;
+
+ ecore_list_t mcast_list; /* list of struct ecore_mcast_list_elem */
+ int mcast_list_len;
+};
+
+enum ecore_mcast_cmd {
+ ECORE_MCAST_CMD_ADD,
+ ECORE_MCAST_CMD_CONT,
+ ECORE_MCAST_CMD_DEL,
+ ECORE_MCAST_CMD_RESTORE,
+};
+
+struct ecore_mcast_obj {
+ struct ecore_raw_obj raw;
+
+ union {
+ struct {
+ #define ECORE_MCAST_BINS_NUM 256
+ #define ECORE_MCAST_VEC_SZ (ECORE_MCAST_BINS_NUM / 64)
+ uint64_t vec[ECORE_MCAST_VEC_SZ];
+
+ /** Number of BINs to clear. Should be updated
+ * immediately when a command arrives in order to
+ * properly create DEL commands.
+ */
+ int num_bins_set;
+ } aprox_match;
+
+ struct {
+ ecore_list_t macs;
+ int num_macs_set;
+ } exact_match;
+ } registry;
+
+ /* Pending commands */
+ ecore_list_t pending_cmds_head;
+
+ /* A state that is set in raw.pstate, when there are pending commands */
+ int sched_state;
+
+ /* Maximal number of mcast MACs configured in one command */
+ int max_cmd_len;
+
+ /* Total number of currently pending MACs to configure: both
+ * in the pending commands list and in the current command.
+ */
+ int total_pending_num;
+
+ uint8_t engine_id;
+
+ /**
+ * @param cmd command to execute (ECORE_MCAST_CMD_X, see above)
+ */
+ int (*config_mcast)(struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd);
+
+ /**
+ * Fills the ramrod data during the RESTORE flow.
+ *
+ * @param sc
+ * @param o
+ * @param start_idx Registry index to start from
+ * @param rdata_idx Index in the ramrod data to start from
+ *
+ * @return -1 if we handled the whole registry or index of the last
+ * handled registry element.
+ */
+ int (*hdl_restore)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o,
+ int start_bin, int *rdata_idx);
+
+ int (*enqueue_cmd)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd);
+
+ void (*set_one_rule)(struct bnx2x_softc *sc,
+ struct ecore_mcast_obj *o, int idx,
+ union ecore_mcast_config_data *cfg_data,
+ enum ecore_mcast_cmd cmd);
+
+ /** Checks if there are more mcast MACs to be set or a previous
+ * command is still pending.
+ */
+ int (*check_pending)(struct ecore_mcast_obj *o);
+
+ /**
+ * Set/Clear/Check SCHEDULED state of the object
+ */
+ void (*set_sched)(struct ecore_mcast_obj *o);
+ void (*clear_sched)(struct ecore_mcast_obj *o);
+ int (*check_sched)(struct ecore_mcast_obj *o);
+
+ /* Wait until all pending commands complete */
+ int (*wait_comp)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o);
+
+ /**
+ * Handle the internal object counters needed for proper
+ * commands handling. Checks that the provided parameters are
+ * feasible.
+ */
+ int (*validate)(struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd);
+
+ /**
+ * Restore the values of internal counters in case of a failure.
+ */
+ void (*revert)(struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ int old_num_bins);
+
+ int (*get_registry_size)(struct ecore_mcast_obj *o);
+ void (*set_registry_size)(struct ecore_mcast_obj *o, int n);
+};
+
+/*************************** Credit handling **********************************/
+struct ecore_credit_pool_obj {
+
+ /* Current amount of credit in the pool */
+ ecore_atomic_t credit;
+
+ /* Maximum allowed credit. put() will check against it. */
+ int pool_sz;
+
+ /* Allocate a pool table statically.
+ *
+ * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
+ *
+ * The set bit in the table will mean that the entry is available.
+ */
+#define ECORE_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64)
+ uint64_t pool_mirror[ECORE_POOL_VEC_SIZE];
+
+ /* Base pool offset (initialized differently */
+ int base_pool_offset;
+
+ /**
+ * Get the next free pool entry.
+ *
+ * @return TRUE if there was a free entry in the pool
+ */
+ int (*get_entry)(struct ecore_credit_pool_obj *o, int *entry);
+
+ /**
+ * Return the entry back to the pool.
+ *
+ * @return TRUE if entry is legal and has been successfully
+ * returned to the pool.
+ */
+ int (*put_entry)(struct ecore_credit_pool_obj *o, int entry);
+
+ /**
+ * Get the requested amount of credit from the pool.
+ *
+ * @param cnt Amount of requested credit
+ * @return TRUE if the operation is successful
+ */
+ int (*get)(struct ecore_credit_pool_obj *o, int cnt);
+
+ /**
+ * Returns the credit to the pool.
+ *
+ * @param cnt Amount of credit to return
+ * @return TRUE if the operation is successful
+ */
+ int (*put)(struct ecore_credit_pool_obj *o, int cnt);
+
+ /**
+ * Reads the current amount of credit.
+ */
+ int (*check)(struct ecore_credit_pool_obj *o);
+};
+
+/*************************** RSS configuration ********************************/
+enum {
+ /* RSS_MODE bits are mutually exclusive */
+ ECORE_RSS_MODE_DISABLED,
+ ECORE_RSS_MODE_REGULAR,
+
+ ECORE_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
+
+ ECORE_RSS_IPV4,
+ ECORE_RSS_IPV4_TCP,
+ ECORE_RSS_IPV4_UDP,
+ ECORE_RSS_IPV6,
+ ECORE_RSS_IPV6_TCP,
+ ECORE_RSS_IPV6_UDP,
+
+ ECORE_RSS_TUNNELING,
+};
+
+struct ecore_config_rss_params {
+ struct ecore_rss_config_obj *rss_obj;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* ECORE_RSS_X bits */
+ unsigned long rss_flags;
+
+ /* Number hash bits to take into an account */
+ uint8_t rss_result_mask;
+
+ /* Indirection table */
+ uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+ /* RSS hash values */
+ uint32_t rss_key[10];
+
+ /* valid only if ECORE_RSS_UPDATE_TOE is set */
+ uint16_t toe_rss_bitmap;
+
+ /* valid if ECORE_RSS_TUNNELING is set */
+ uint16_t tunnel_value;
+ uint16_t tunnel_mask;
+};
+
+struct ecore_rss_config_obj {
+ struct ecore_raw_obj raw;
+
+ /* RSS engine to use */
+ uint8_t engine_id;
+
+ /* Last configured indirection table */
+ uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+ /* flags for enabling 4-tupple hash on UDP */
+ uint8_t udp_rss_v4;
+ uint8_t udp_rss_v6;
+
+ int (*config_rss)(struct bnx2x_softc *sc,
+ struct ecore_config_rss_params *p);
+};
+
+/*********************** Queue state update ***********************************/
+
+/* UPDATE command options */
+enum {
+ ECORE_Q_UPDATE_IN_VLAN_REM,
+ ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
+ ECORE_Q_UPDATE_OUT_VLAN_REM,
+ ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
+ ECORE_Q_UPDATE_ANTI_SPOOF,
+ ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
+ ECORE_Q_UPDATE_ACTIVATE,
+ ECORE_Q_UPDATE_ACTIVATE_CHNG,
+ ECORE_Q_UPDATE_DEF_VLAN_EN,
+ ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ ECORE_Q_UPDATE_SILENT_VLAN_REM,
+ ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
+ ECORE_Q_UPDATE_TX_SWITCHING,
+};
+
+/* Allowed Queue states */
+enum ecore_q_state {
+ ECORE_Q_STATE_RESET,
+ ECORE_Q_STATE_INITIALIZED,
+ ECORE_Q_STATE_ACTIVE,
+ ECORE_Q_STATE_MULTI_COS,
+ ECORE_Q_STATE_MCOS_TERMINATED,
+ ECORE_Q_STATE_INACTIVE,
+ ECORE_Q_STATE_STOPPED,
+ ECORE_Q_STATE_TERMINATED,
+ ECORE_Q_STATE_FLRED,
+ ECORE_Q_STATE_MAX,
+};
+
+/* Allowed Queue states */
+enum ecore_q_logical_state {
+ ECORE_Q_LOGICAL_STATE_ACTIVE,
+ ECORE_Q_LOGICAL_STATE_STOPPED,
+};
+
+/* Allowed commands */
+enum ecore_queue_cmd {
+ ECORE_Q_CMD_INIT,
+ ECORE_Q_CMD_SETUP,
+ ECORE_Q_CMD_SETUP_TX_ONLY,
+ ECORE_Q_CMD_DEACTIVATE,
+ ECORE_Q_CMD_ACTIVATE,
+ ECORE_Q_CMD_UPDATE,
+ ECORE_Q_CMD_UPDATE_TPA,
+ ECORE_Q_CMD_HALT,
+ ECORE_Q_CMD_CFC_DEL,
+ ECORE_Q_CMD_TERMINATE,
+ ECORE_Q_CMD_EMPTY,
+ ECORE_Q_CMD_MAX,
+};
+
+/* queue SETUP + INIT flags */
+enum {
+ ECORE_Q_FLG_TPA,
+ ECORE_Q_FLG_TPA_IPV6,
+ ECORE_Q_FLG_TPA_GRO,
+ ECORE_Q_FLG_STATS,
+ ECORE_Q_FLG_ZERO_STATS,
+ ECORE_Q_FLG_ACTIVE,
+ ECORE_Q_FLG_OV,
+ ECORE_Q_FLG_VLAN,
+ ECORE_Q_FLG_COS,
+ ECORE_Q_FLG_HC,
+ ECORE_Q_FLG_HC_EN,
+ ECORE_Q_FLG_DHC,
+ ECORE_Q_FLG_OOO,
+ ECORE_Q_FLG_FCOE,
+ ECORE_Q_FLG_LEADING_RSS,
+ ECORE_Q_FLG_MCAST,
+ ECORE_Q_FLG_DEF_VLAN,
+ ECORE_Q_FLG_TX_SWITCH,
+ ECORE_Q_FLG_TX_SEC,
+ ECORE_Q_FLG_ANTI_SPOOF,
+ ECORE_Q_FLG_SILENT_VLAN_REM,
+ ECORE_Q_FLG_FORCE_DEFAULT_PRI,
+ ECORE_Q_FLG_REFUSE_OUTBAND_VLAN,
+ ECORE_Q_FLG_PCSUM_ON_PKT,
+ ECORE_Q_FLG_TUN_INC_INNER_IP_ID
+};
+
+/* Queue type options: queue type may be a combination of below. */
+enum ecore_q_type {
+ ECORE_Q_TYPE_FWD,
+ ECORE_Q_TYPE_HAS_RX,
+ ECORE_Q_TYPE_HAS_TX,
+};
+
+#define ECORE_PRIMARY_CID_INDEX 0
+#define ECORE_MULTI_TX_COS_E1X 3 /* QM only */
+#define ECORE_MULTI_TX_COS_E2_E3A0 2
+#define ECORE_MULTI_TX_COS_E3B0 3
+#define ECORE_MULTI_TX_COS 3 /* Maximum possible */
+#define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN)
+
+struct ecore_queue_init_params {
+ struct {
+ unsigned long flags;
+ uint16_t hc_rate;
+ uint8_t fw_sb_id;
+ uint8_t sb_cq_index;
+ } tx;
+
+ struct {
+ unsigned long flags;
+ uint16_t hc_rate;
+ uint8_t fw_sb_id;
+ uint8_t sb_cq_index;
+ } rx;
+
+ /* CID context in the host memory */
+ struct eth_context *cxts[ECORE_MULTI_TX_COS];
+
+ /* maximum number of cos supported by hardware */
+ uint8_t max_cos;
+};
+
+struct ecore_queue_terminate_params {
+ /* index within the tx_only cids of this queue object */
+ uint8_t cid_index;
+};
+
+struct ecore_queue_cfc_del_params {
+ /* index within the tx_only cids of this queue object */
+ uint8_t cid_index;
+};
+
+struct ecore_queue_update_params {
+ unsigned long update_flags; /* ECORE_Q_UPDATE_XX bits */
+ uint16_t def_vlan;
+ uint16_t silent_removal_value;
+ uint16_t silent_removal_mask;
+/* index within the tx_only cids of this queue object */
+ uint8_t cid_index;
+};
+
+struct rxq_pause_params {
+ uint16_t bd_th_lo;
+ uint16_t bd_th_hi;
+ uint16_t rcq_th_lo;
+ uint16_t rcq_th_hi;
+ uint16_t sge_th_lo; /* valid if ECORE_Q_FLG_TPA */
+ uint16_t sge_th_hi; /* valid if ECORE_Q_FLG_TPA */
+ uint16_t pri_map;
+};
+
+/* general */
+struct ecore_general_setup_params {
+ /* valid if ECORE_Q_FLG_STATS */
+ uint8_t stat_id;
+
+ uint8_t spcl_id;
+ uint16_t mtu;
+ uint8_t cos;
+};
+
+struct ecore_rxq_setup_params {
+ /* dma */
+ ecore_dma_addr_t dscr_map;
+ ecore_dma_addr_t rcq_map;
+ ecore_dma_addr_t rcq_np_map;
+
+ uint16_t drop_flags;
+ uint16_t buf_sz;
+ uint8_t fw_sb_id;
+ uint8_t cl_qzone_id;
+
+ /* valid if ECORE_Q_FLG_TPA */
+ uint16_t tpa_agg_sz;
+ uint8_t max_tpa_queues;
+ uint8_t rss_engine_id;
+
+ /* valid if ECORE_Q_FLG_MCAST */
+ uint8_t mcast_engine_id;
+
+ uint8_t cache_line_log;
+
+ uint8_t sb_cq_index;
+
+ /* valid if BXN2X_Q_FLG_SILENT_VLAN_REM */
+ uint16_t silent_removal_value;
+ uint16_t silent_removal_mask;
+};
+
+struct ecore_txq_setup_params {
+ /* dma */
+ ecore_dma_addr_t dscr_map;
+
+ uint8_t fw_sb_id;
+ uint8_t sb_cq_index;
+ uint8_t cos; /* valid if ECORE_Q_FLG_COS */
+ uint16_t traffic_type;
+ /* equals to the leading rss client id, used for TX classification*/
+ uint8_t tss_leading_cl_id;
+
+ /* valid if ECORE_Q_FLG_DEF_VLAN */
+ uint16_t default_vlan;
+};
+
+struct ecore_queue_setup_params {
+ struct ecore_general_setup_params gen_params;
+ struct ecore_txq_setup_params txq_params;
+ struct ecore_rxq_setup_params rxq_params;
+ struct rxq_pause_params pause_params;
+ unsigned long flags;
+};
+
+struct ecore_queue_setup_tx_only_params {
+ struct ecore_general_setup_params gen_params;
+ struct ecore_txq_setup_params txq_params;
+ unsigned long flags;
+ /* index within the tx_only cids of this queue object */
+ uint8_t cid_index;
+};
+
+struct ecore_queue_state_params {
+ struct ecore_queue_sp_obj *q_obj;
+
+ /* Current command */
+ enum ecore_queue_cmd cmd;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* Params according to the current command */
+ union {
+ struct ecore_queue_update_params update;
+ struct ecore_queue_setup_params setup;
+ struct ecore_queue_init_params init;
+ struct ecore_queue_setup_tx_only_params tx_only;
+ struct ecore_queue_terminate_params terminate;
+ struct ecore_queue_cfc_del_params cfc_del;
+ } params;
+};
+
+struct ecore_viflist_params {
+ uint8_t echo_res;
+ uint8_t func_bit_map_res;
+};
+
+struct ecore_queue_sp_obj {
+ uint32_t cids[ECORE_MULTI_TX_COS];
+ uint8_t cl_id;
+ uint8_t func_id;
+
+ /* number of traffic classes supported by queue.
+ * The primary connection of the queue supports the first traffic
+ * class. Any further traffic class is supported by a tx-only
+ * connection.
+ *
+ * Therefore max_cos is also a number of valid entries in the cids
+ * array.
+ */
+ uint8_t max_cos;
+ uint8_t num_tx_only, next_tx_only;
+
+ enum ecore_q_state state, next_state;
+
+ /* bits from enum ecore_q_type */
+ unsigned long type;
+
+ /* ECORE_Q_CMD_XX bits. This object implements "one
+ * pending" paradigm but for debug and tracing purposes it's
+ * more convenient to have different bits for different
+ * commands.
+ */
+ unsigned long pending;
+
+ /* Buffer to use as a ramrod data and its mapping */
+ void *rdata;
+ ecore_dma_addr_t rdata_mapping;
+
+ /**
+ * Performs one state change according to the given parameters.
+ *
+ * @return 0 in case of success and negative value otherwise.
+ */
+ int (*send_cmd)(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params);
+
+ /**
+ * Sets the pending bit according to the requested transition.
+ */
+ int (*set_pending)(struct ecore_queue_sp_obj *o,
+ struct ecore_queue_state_params *params);
+
+ /**
+ * Checks that the requested state transition is legal.
+ */
+ int (*check_transition)(struct bnx2x_softc *sc,
+ struct ecore_queue_sp_obj *o,
+ struct ecore_queue_state_params *params);
+
+ /**
+ * Completes the pending command.
+ */
+ int (*complete_cmd)(struct bnx2x_softc *sc,
+ struct ecore_queue_sp_obj *o,
+ enum ecore_queue_cmd);
+
+ int (*wait_comp)(struct bnx2x_softc *sc,
+ struct ecore_queue_sp_obj *o,
+ enum ecore_queue_cmd cmd);
+};
+
+/********************** Function state update *********************************/
+/* Allowed Function states */
+enum ecore_func_state {
+ ECORE_F_STATE_RESET,
+ ECORE_F_STATE_INITIALIZED,
+ ECORE_F_STATE_STARTED,
+ ECORE_F_STATE_TX_STOPPED,
+ ECORE_F_STATE_MAX,
+};
+
+/* Allowed Function commands */
+enum ecore_func_cmd {
+ ECORE_F_CMD_HW_INIT,
+ ECORE_F_CMD_START,
+ ECORE_F_CMD_STOP,
+ ECORE_F_CMD_HW_RESET,
+ ECORE_F_CMD_AFEX_UPDATE,
+ ECORE_F_CMD_AFEX_VIFLISTS,
+ ECORE_F_CMD_TX_STOP,
+ ECORE_F_CMD_TX_START,
+ ECORE_F_CMD_SWITCH_UPDATE,
+ ECORE_F_CMD_MAX,
+};
+
+struct ecore_func_hw_init_params {
+ /* A load phase returned by MCP.
+ *
+ * May be:
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+ * FW_MSG_CODE_DRV_LOAD_COMMON
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ */
+ uint32_t load_phase;
+};
+
+struct ecore_func_hw_reset_params {
+ /* A load phase returned by MCP.
+ *
+ * May be:
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+ * FW_MSG_CODE_DRV_LOAD_COMMON
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ */
+ uint32_t reset_phase;
+};
+
+struct ecore_func_start_params {
+ /* Multi Function mode:
+ * - Single Function
+ * - Switch Dependent
+ * - Switch Independent
+ */
+ uint16_t mf_mode;
+
+ /* Switch Dependent mode outer VLAN tag */
+ uint16_t sd_vlan_tag;
+
+ /* Function cos mode */
+ uint8_t network_cos_mode;
+
+ /* NVGRE classification enablement */
+ uint8_t nvgre_clss_en;
+
+ /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
+ uint8_t gre_tunnel_mode;
+
+ /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
+ uint8_t gre_tunnel_rss;
+
+};
+
+struct ecore_func_switch_update_params {
+ uint8_t suspend;
+};
+
+struct ecore_func_afex_update_params {
+ uint16_t vif_id;
+ uint16_t afex_default_vlan;
+ uint8_t allowed_priorities;
+};
+
+struct ecore_func_afex_viflists_params {
+ uint16_t vif_list_index;
+ uint8_t func_bit_map;
+ uint8_t afex_vif_list_command;
+ uint8_t func_to_clear;
+};
+struct ecore_func_tx_start_params {
+ struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
+ uint8_t dcb_enabled;
+ uint8_t dcb_version;
+ uint8_t dont_add_pri_0;
+};
+
+struct ecore_func_state_params {
+ struct ecore_func_sp_obj *f_obj;
+
+ /* Current command */
+ enum ecore_func_cmd cmd;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* Params according to the current command */
+ union {
+ struct ecore_func_hw_init_params hw_init;
+ struct ecore_func_hw_reset_params hw_reset;
+ struct ecore_func_start_params start;
+ struct ecore_func_switch_update_params switch_update;
+ struct ecore_func_afex_update_params afex_update;
+ struct ecore_func_afex_viflists_params afex_viflists;
+ struct ecore_func_tx_start_params tx_start;
+ } params;
+};
+
+struct ecore_func_sp_drv_ops {
+ /* Init tool + runtime initialization:
+ * - Common Chip
+ * - Common (per Path)
+ * - Port
+ * - Function phases
+ */
+ int (*init_hw_cmn_chip)(struct bnx2x_softc *sc);
+ int (*init_hw_cmn)(struct bnx2x_softc *sc);
+ int (*init_hw_port)(struct bnx2x_softc *sc);
+ int (*init_hw_func)(struct bnx2x_softc *sc);
+
+ /* Reset Function HW: Common, Port, Function phases. */
+ void (*reset_hw_cmn)(struct bnx2x_softc *sc);
+ void (*reset_hw_port)(struct bnx2x_softc *sc);
+ void (*reset_hw_func)(struct bnx2x_softc *sc);
+
+ /* Prepare/Release FW resources */
+ int (*init_fw)(struct bnx2x_softc *sc);
+ void (*release_fw)(struct bnx2x_softc *sc);
+};
+
+struct ecore_func_sp_obj {
+ enum ecore_func_state state, next_state;
+
+ /* ECORE_FUNC_CMD_XX bits. This object implements "one
+ * pending" paradigm but for debug and tracing purposes it's
+ * more convenient to have different bits for different
+ * commands.
+ */
+ unsigned long pending;
+
+ /* Buffer to use as a ramrod data and its mapping */
+ void *rdata;
+ ecore_dma_addr_t rdata_mapping;
+
+ /* Buffer to use as a afex ramrod data and its mapping.
+ * This can't be same rdata as above because afex ramrod requests
+ * can arrive to the object in parallel to other ramrod requests.
+ */
+ void *afex_rdata;
+ ecore_dma_addr_t afex_rdata_mapping;
+
+ /* this mutex validates that when pending flag is taken, the next
+ * ramrod to be sent will be the one set the pending bit
+ */
+ ECORE_MUTEX one_pending_mutex;
+
+ /* Driver interface */
+ struct ecore_func_sp_drv_ops *drv;
+
+ /**
+ * Performs one state change according to the given parameters.
+ *
+ * @return 0 in case of success and negative value otherwise.
+ */
+ int (*send_cmd)(struct bnx2x_softc *sc,
+ struct ecore_func_state_params *params);
+
+ /**
+ * Checks that the requested state transition is legal.
+ */
+ int (*check_transition)(struct bnx2x_softc *sc,
+ struct ecore_func_sp_obj *o,
+ struct ecore_func_state_params *params);
+
+ /**
+ * Completes the pending command.
+ */
+ int (*complete_cmd)(struct bnx2x_softc *sc,
+ struct ecore_func_sp_obj *o,
+ enum ecore_func_cmd cmd);
+
+ int (*wait_comp)(struct bnx2x_softc *sc, struct ecore_func_sp_obj *o,
+ enum ecore_func_cmd cmd);
+};
+
+/********************** Interfaces ********************************************/
+/* Queueable objects set */
+union ecore_qable_obj {
+ struct ecore_vlan_mac_obj vlan_mac;
+};
+/************** Function state update *********/
+void ecore_init_func_obj(struct bnx2x_softc *sc,
+ struct ecore_func_sp_obj *obj,
+ void *rdata, ecore_dma_addr_t rdata_mapping,
+ void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
+ struct ecore_func_sp_drv_ops *drv_iface);
+
+int ecore_func_state_change(struct bnx2x_softc *sc,
+ struct ecore_func_state_params *params);
+
+enum ecore_func_state ecore_func_get_state(struct bnx2x_softc *sc,
+ struct ecore_func_sp_obj *o);
+/******************* Queue State **************/
+void ecore_init_queue_obj(struct bnx2x_softc *sc,
+ struct ecore_queue_sp_obj *obj, uint8_t cl_id, uint32_t *cids,
+ uint8_t cid_cnt, uint8_t func_id, void *rdata,
+ ecore_dma_addr_t rdata_mapping, unsigned long type);
+
+int ecore_queue_state_change(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params);
+
+/********************* VLAN-MAC ****************/
+void ecore_init_mac_obj(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *mac_obj,
+ uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
+ ecore_dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, ecore_obj_type type,
+ struct ecore_credit_pool_obj *macs_pool);
+
+void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o);
+int ecore_vlan_mac_h_write_lock(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o);
+void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o);
+int ecore_config_vlan_mac(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_ramrod_params *p);
+
+int ecore_vlan_mac_move(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_ramrod_params *p,
+ struct ecore_vlan_mac_obj *dest_o);
+
+/********************* RX MODE ****************/
+
+void ecore_init_rx_mode_obj(struct bnx2x_softc *sc,
+ struct ecore_rx_mode_obj *o);
+
+/**
+ * ecore_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
+ *
+ * @p: Command parameters
+ *
+ * Return: 0 - if operation was successful and there is no pending completions,
+ * positive number - if there are pending completions,
+ * negative - if there were errors
+ */
+int ecore_config_rx_mode(struct bnx2x_softc *sc,
+ struct ecore_rx_mode_ramrod_params *p);
+
+/****************** MULTICASTS ****************/
+
+void ecore_init_mcast_obj(struct bnx2x_softc *sc,
+ struct ecore_mcast_obj *mcast_obj,
+ uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
+ uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ ecore_obj_type type);
+
+/**
+ * ecore_config_mcast - Configure multicast MACs list.
+ *
+ * @cmd: command to execute: BNX2X_MCAST_CMD_X
+ *
+ * May configure a new list
+ * provided in p->mcast_list (ECORE_MCAST_CMD_ADD), clean up
+ * (ECORE_MCAST_CMD_DEL) or restore (ECORE_MCAST_CMD_RESTORE) a current
+ * configuration, continue to execute the pending commands
+ * (ECORE_MCAST_CMD_CONT).
+ *
+ * If previous command is still pending or if number of MACs to
+ * configure is more that maximum number of MACs in one command,
+ * the current command will be enqueued to the tail of the
+ * pending commands list.
+ *
+ * Return: 0 is operation was successful and there are no pending completions,
+ * negative if there were errors, positive if there are pending
+ * completions.
+ */
+int ecore_config_mcast(struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd);
+
+/****************** CREDIT POOL ****************/
+void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
+ struct ecore_credit_pool_obj *p, uint8_t func_id,
+ uint8_t func_num);
+void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
+ struct ecore_credit_pool_obj *p, uint8_t func_id,
+ uint8_t func_num);
+
+/****************** RSS CONFIGURATION ****************/
+void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
+ uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
+ void *rdata, ecore_dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ ecore_obj_type type);
+
+/**
+ * ecore_config_rss - Updates RSS configuration according to provided parameters
+ *
+ * Return: 0 in case of success
+ */
+int ecore_config_rss(struct bnx2x_softc *sc,
+ struct ecore_config_rss_params *p);
+
+
+#endif /* ECORE_SP_H */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/elink.c b/src/spdk/dpdk/drivers/net/bnx2x/elink.c
new file mode 100644
index 00000000..b63fd23e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/elink.c
@@ -0,0 +1,13096 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bnx2x.h"
+#include "elink.h"
+#include "ecore_mfw_req.h"
+#include "ecore_fw_defs.h"
+#include "ecore_hsi.h"
+#include "ecore_reg.h"
+
+static elink_status_t elink_link_reset(struct elink_params *params,
+ struct elink_vars *vars,
+ uint8_t reset_ext_phy);
+static elink_status_t elink_check_half_open_conn(struct elink_params *params,
+ struct elink_vars *vars,
+ uint8_t notify);
+static elink_status_t elink_sfp_module_detection(struct elink_phy *phy,
+ struct elink_params *params);
+
+#define MDIO_REG_BANK_CL73_IEEEB0 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
+
+#define MDIO_REG_BANK_CL73_IEEEB1 0x10
+#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV2 0x04
+
+#define MDIO_REG_BANK_RX0 0x80b0
+#define MDIO_RX0_RX_STATUS 0x10
+#define MDIO_RX0_RX_STATUS_SIGDET 0x8000
+#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000
+#define MDIO_RX0_RX_EQ_BOOST 0x1c
+#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX1 0x80c0
+#define MDIO_RX1_RX_EQ_BOOST 0x1c
+#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX2 0x80d0
+#define MDIO_RX2_RX_EQ_BOOST 0x1c
+#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX3 0x80e0
+#define MDIO_RX3_RX_EQ_BOOST 0x1c
+#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX_ALL 0x80f0
+#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
+#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_TX0 0x8060
+#define MDIO_TX0_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX1 0x8070
+#define MDIO_TX1_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX2 0x8080
+#define MDIO_TX2_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX3 0x8090
+#define MDIO_TX3_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000
+#define MDIO_BLOCK0_XGXS_CONTROL 0x10
+
+#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010
+#define MDIO_BLOCK1_LANE_CTRL0 0x15
+#define MDIO_BLOCK1_LANE_CTRL1 0x16
+#define MDIO_BLOCK1_LANE_CTRL2 0x17
+#define MDIO_BLOCK1_LANE_PRBS 0x19
+
+#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
+#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
+
+#define MDIO_REG_BANK_GP_STATUS 0x8120
+#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 0x3900
+
+#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1)
+
+#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1 0x18
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009
+
+#define MDIO_REG_BANK_OVER_1G 0x8320
+#define MDIO_OVER_1G_DIGCTL_3_4 0x14
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5
+#define MDIO_OVER_1G_UP1 0x19
+#define MDIO_OVER_1G_UP1_2_5G 0x0001
+#define MDIO_OVER_1G_UP1_5G 0x0002
+#define MDIO_OVER_1G_UP1_6G 0x0004
+#define MDIO_OVER_1G_UP1_10G 0x0010
+#define MDIO_OVER_1G_UP1_10GH 0x0008
+#define MDIO_OVER_1G_UP1_12G 0x0020
+#define MDIO_OVER_1G_UP1_12_5G 0x0040
+#define MDIO_OVER_1G_UP1_13G 0x0080
+#define MDIO_OVER_1G_UP1_15G 0x0100
+#define MDIO_OVER_1G_UP1_16G 0x0200
+#define MDIO_OVER_1G_UP2 0x1A
+#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007
+#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038
+#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0
+#define MDIO_OVER_1G_UP3 0x1B
+#define MDIO_OVER_1G_UP3_HIGIG2 0x0001
+#define MDIO_OVER_1G_LP_UP1 0x1C
+#define MDIO_OVER_1G_LP_UP2 0x1D
+#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7
+#define MDIO_OVER_1G_LP_UP3 0x1E
+
+#define MDIO_REG_BANK_REMOTE_PHY 0x8330
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600
+
+#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002
+
+#define MDIO_REG_BANK_CL73_USERB0 0x8370
+#define MDIO_CL73_USERB0_CL73_UCTRL 0x10
+#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002
+#define MDIO_CL73_USERB0_CL73_USTAT1 0x11
+#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100
+#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001
+
+#define MDIO_REG_BANK_AER_BLOCK 0xFFD0
+#define MDIO_AER_BLOCK_AER_REG 0x1E
+
+#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0
+#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040
+#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200
+#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000
+#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000
+#define MDIO_COMBO_IEEE0_MII_STATUS 0x11
+#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004
+#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020
+/*WhenthelinkpartnerisinSGMIImode(bit0=1),then
+bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge.
+Theotherbitsarereservedandshouldbezero*/
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
+
+#define MDIO_PMA_DEVAD 0x1
+/*ieee*/
+#define MDIO_PMA_REG_CTRL 0x0
+#define MDIO_PMA_REG_STATUS 0x1
+#define MDIO_PMA_REG_10G_CTRL2 0x7
+#define MDIO_PMA_REG_TX_DISABLE 0x0009
+#define MDIO_PMA_REG_RX_SD 0xa
+/*bnx2x*/
+#define MDIO_PMA_REG_BNX2X_CTRL 0x0096
+#define MDIO_PMA_REG_FEC_CTRL 0x00ab
+#define MDIO_PMA_LASI_RXCTRL 0x9000
+#define MDIO_PMA_LASI_TXCTRL 0x9001
+#define MDIO_PMA_LASI_CTRL 0x9002
+#define MDIO_PMA_LASI_RXSTAT 0x9003
+#define MDIO_PMA_LASI_TXSTAT 0x9004
+#define MDIO_PMA_LASI_STAT 0x9005
+#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800
+#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808
+#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809
+#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02
+#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09
+#define MDIO_PMA_REG_MISC_CTRL 0xca0a
+#define MDIO_PMA_REG_GEN_CTRL 0xca10
+#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
+#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
+#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
+#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
+#define MDIO_PMA_REG_ROM_VER1 0xca19
+#define MDIO_PMA_REG_ROM_VER2 0xca1a
+#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
+#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d
+#define MDIO_PMA_REG_PLL_CTRL 0xca1e
+#define MDIO_PMA_REG_MISC_CTRL0 0xca23
+#define MDIO_PMA_REG_LRM_MODE 0xca3f
+#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46
+#define MDIO_PMA_REG_MISC_CTRL1 0xca85
+
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002
+#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01
+#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05
+
+#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309
+#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
+#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
+#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
+#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
+#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
+
+#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
+#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
+#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
+#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
+#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
+
+#define MDIO_PMA_REG_7101_RESET 0xc000
+#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
+#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
+#define MDIO_PMA_REG_7101_VER1 0xc026
+#define MDIO_PMA_REG_7101_VER2 0xc027
+
+#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
+#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
+#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
+#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
+#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
+#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
+#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
+
+#define MDIO_WIS_DEVAD 0x2
+/*bnx2x*/
+#define MDIO_WIS_REG_LASI_CNTL 0x9002
+#define MDIO_WIS_REG_LASI_STATUS 0x9005
+
+#define MDIO_PCS_DEVAD 0x3
+#define MDIO_PCS_REG_STATUS 0x0020
+#define MDIO_PCS_REG_LASI_STATUS 0x9005
+#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000
+#define MDIO_PCS_REG_7101_SPI_MUX 0xD008
+#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
+#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
+#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
+
+#define MDIO_XS_DEVAD 0x4
+#define MDIO_XS_REG_STATUS 0x0001
+#define MDIO_XS_PLL_SEQUENCER 0x8000
+#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a
+
+#define MDIO_XS_8706_REG_BANK_RX0 0x80bc
+#define MDIO_XS_8706_REG_BANK_RX1 0x80cc
+#define MDIO_XS_8706_REG_BANK_RX2 0x80dc
+#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
+#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
+
+#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
+
+#define MDIO_AN_DEVAD 0x7
+/*ieee*/
+#define MDIO_AN_REG_CTRL 0x0000
+#define MDIO_AN_REG_STATUS 0x0001
+#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
+#define MDIO_AN_REG_ADV_PAUSE 0x0010
+#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
+#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
+#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
+#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
+#define MDIO_AN_REG_ADV 0x0011
+#define MDIO_AN_REG_ADV2 0x0012
+#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
+#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
+#define MDIO_AN_REG_MASTER_STATUS 0x0021
+#define MDIO_AN_REG_EEE_ADV 0x003c
+#define MDIO_AN_REG_LP_EEE_ADV 0x003d
+/*bnx2x*/
+#define MDIO_AN_REG_LINK_STATUS 0x8304
+#define MDIO_AN_REG_CL37_CL73 0x8370
+#define MDIO_AN_REG_CL37_AN 0xffe0
+#define MDIO_AN_REG_CL37_FC_LD 0xffe4
+#define MDIO_AN_REG_CL37_FC_LP 0xffe5
+#define MDIO_AN_REG_1000T_STATUS 0xffea
+
+#define MDIO_AN_REG_8073_2_5G 0x8329
+#define MDIO_AN_REG_8073_BAM 0x8350
+
+#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
+#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
+#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
+#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
+#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
+#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
+#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
+#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL 0xfff0
+#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008
+#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
+#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
+#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
+#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
+
+/* BNX2X84823 only */
+#define MDIO_CTL_DEVAD 0x1e
+#define MDIO_CTL_REG_84823_MEDIA 0x401a
+#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
+ /* These pins configure the BNX2X84823 interface to MAC after reset. */
+#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
+#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
+ /* These pins configure the BNX2X84823 interface to Line after reset. */
+#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
+ /* When this pin is active high during reset, 10GBASE-T core is power
+ * down, When it is active low the 10GBASE-T is power up
+ */
+#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b
+#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+
+/* BNX2X84833 only */
+#define MDIO_84833_TOP_CFG_FW_REV 0x400f
+#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
+#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
+#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
+#define MDIO_84833_SUPER_ISOLATE 0x8000
+/* These are mailbox register set used by 84833. */
+#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
+#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
+#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
+#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
+#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
+#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037
+#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038
+#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039
+#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a
+#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b
+#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c
+#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0
+#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26
+#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27
+#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28
+#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29
+#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30
+#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31
+
+/* Mailbox command set used by 84833. */
+#define PHY84833_CMD_SET_PAIR_SWAP 0x8001
+#define PHY84833_CMD_GET_EEE_MODE 0x8008
+#define PHY84833_CMD_SET_EEE_MODE 0x8009
+#define PHY84833_CMD_GET_CURRENT_TEMP 0x8031
+/* Mailbox status set used by 84833. */
+#define PHY84833_STATUS_CMD_RECEIVED 0x0001
+#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010
+#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020
+#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
+#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
+#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
+
+/* Warpcore clause 45 addressing */
+#define MDIO_WC_DEVAD 0x3
+#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0
+#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
+#define MDIO_WC_REG_PCS_STATUS2 0x0021
+#define MDIO_WC_REG_PMD_KR_CONTROL 0x0096
+#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
+#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
+#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL3 0x8018
+#define MDIO_WC_REG_XGXSBLK1_LANETEST0 0x801a
+#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061
+#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071
+#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081
+#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091
+#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000
+#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077
+#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087
+#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097
+#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9
+#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9
+#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba
+#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
+#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
+#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
+#define MDIO_WC_REG_XGXS_STATUS3 0x8129
+#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
+#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
+#define MDIO_WC_REG_XGXS_STATUS4 0x813c
+#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141
+#define MDIO_WC_REG_XGXS_X2_CONTROL3 0x8142
+#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B
+#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169
+#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0
+#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1
+#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
+#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1
+#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
+#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc
+#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE
+#define MDIO_WC_REG_DSC1B0_UC_CTRL 0x820e
+#define MDIO_WC_REG_DSC1B0_UC_CTRL_RDY4CMD (1<<7)
+#define MDIO_WC_REG_DSC_SMC 0x8213
+#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e
+#define MDIO_WC_REG_TX_FIR_TAP 0x82e2
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
+#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP 0x82e2
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3
+#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6
+#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7
+#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302
+#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304
+#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308
+#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309
+#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
+#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
+#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
+#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
+#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
+#define MDIO_WC_REG_DIGITAL5_LINK_STATUS 0x834d
+#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
+#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
+#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_CL73_USERB0_CTRL 0x8370
+#define MDIO_WC_REG_CL73_USERB0_USTAT 0x8371
+#define MDIO_WC_REG_CL73_BAM_CTRL1 0x8372
+#define MDIO_WC_REG_CL73_BAM_CTRL2 0x8373
+#define MDIO_WC_REG_CL73_BAM_CTRL3 0x8374
+#define MDIO_WC_REG_CL73_BAM_CODE_FIELD 0x837b
+#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
+#define MDIO_WC_REG_TX66_CONTROL 0x83b0
+#define MDIO_WC_REG_RX66_CONTROL 0x83c0
+#define MDIO_WC_REG_RX66_SCW0 0x83c2
+#define MDIO_WC_REG_RX66_SCW1 0x83c3
+#define MDIO_WC_REG_RX66_SCW2 0x83c4
+#define MDIO_WC_REG_RX66_SCW3 0x83c5
+#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6
+#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7
+#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8
+#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9
+#define MDIO_WC_REG_FX100_CTRL1 0x8400
+#define MDIO_WC_REG_FX100_CTRL3 0x8402
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5 0x8436
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6 0x8437
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7 0x8438
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9 0x8439
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10 0x843a
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11 0x843b
+#define MDIO_WC_REG_ETA_CL73_OUI1 0x8453
+#define MDIO_WC_REG_ETA_CL73_OUI2 0x8454
+#define MDIO_WC_REG_ETA_CL73_OUI3 0x8455
+#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE 0x8456
+#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE 0x8457
+#define MDIO_WC_REG_MICROBLK_CMD 0xffc2
+#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5
+#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc
+
+#define MDIO_WC_REG_AERBLK_AER 0xffde
+#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0
+#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1
+
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4
+
+#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141
+
+#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f
+
+/* 54618se */
+#define MDIO_REG_GPHY_MII_STATUS 0x1
+#define MDIO_REG_GPHY_PHYID_LSB 0x3
+#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
+#define MDIO_REG_GPHY_CL45_REG_WRITE 0x4000
+#define MDIO_REG_GPHY_CL45_REG_READ 0xc000
+#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
+#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15
+#define MDIO_REG_GPHY_EXP_ACCESS 0x17
+#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00
+#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40
+#define MDIO_REG_GPHY_AUX_STATUS 0x19
+#define MDIO_REG_INTR_STATUS 0x1a
+#define MDIO_REG_INTR_MASK 0x1b
+#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
+#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
+#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
+#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
+#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
+#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8)
+
+typedef elink_status_t(*read_sfp_module_eeprom_func_p) (struct elink_phy * phy,
+ struct elink_params *
+ params,
+ uint8_t dev_addr,
+ uint16_t addr,
+ uint8_t byte_cnt,
+ uint8_t * o_buf,
+ uint8_t);
+/********************************************************/
+#define ELINK_ETH_HLEN 14
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ELINK_ETH_OVREHEAD (ELINK_ETH_HLEN + 8 + 8)
+#define ELINK_ETH_MIN_PACKET_SIZE 60
+#define ELINK_ETH_MAX_PACKET_SIZE 1500
+#define ELINK_ETH_MAX_JUMBO_PACKET_SIZE 9600
+#define ELINK_MDIO_ACCESS_TIMEOUT 1000
+#define WC_LANE_MAX 4
+#define I2C_SWITCH_WIDTH 2
+#define I2C_BSC0 0
+#define I2C_BSC1 1
+#define I2C_WA_RETRY_CNT 3
+#define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1)
+#define MCPR_IMC_COMMAND_READ_OP 1
+#define MCPR_IMC_COMMAND_WRITE_OP 2
+
+/* LED Blink rate that will achieve ~15.9Hz */
+#define LED_BLINK_RATE_VAL_E3 354
+#define LED_BLINK_RATE_VAL_E1X_E2 480
+/***********************************************************/
+/* Shortcut definitions */
+/***********************************************************/
+
+#define ELINK_NIG_LATCH_BC_ENABLE_MI_INT 0
+
+#define ELINK_NIG_STATUS_EMAC0_MI_INT \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT
+#define ELINK_NIG_STATUS_XGXS0_LINK10G \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
+#define ELINK_NIG_STATUS_XGXS0_LINK_STATUS \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
+#define ELINK_NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
+#define ELINK_NIG_STATUS_SERDES0_LINK_STATUS \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
+#define ELINK_NIG_MASK_MI_INT \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
+#define ELINK_NIG_MASK_XGXS0_LINK10G \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
+#define ELINK_NIG_MASK_XGXS0_LINK_STATUS \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
+#define ELINK_NIG_MASK_SERDES0_LINK_STATUS \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
+
+#define ELINK_MDIO_AN_CL73_OR_37_COMPLETE \
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
+
+#define ELINK_XGXS_RESET_BITS \
+ (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
+
+#define ELINK_SERDES_RESET_BITS \
+ (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
+
+#define ELINK_AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
+#define ELINK_AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
+#define ELINK_AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
+#define ELINK_AUTONEG_PARALLEL \
+ SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
+#define ELINK_AUTONEG_SGMII_FIBER_AUTODET \
+ SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
+#define ELINK_AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
+
+#define ELINK_GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
+#define ELINK_GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
+#define ELINK_GP_STATUS_SPEED_MASK \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
+#define ELINK_GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
+#define ELINK_GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
+#define ELINK_GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
+#define ELINK_GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
+#define ELINK_GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
+#define ELINK_GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
+#define ELINK_GP_STATUS_10G_HIG \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
+#define ELINK_GP_STATUS_10G_CX4 \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
+#define ELINK_GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
+#define ELINK_GP_STATUS_10G_KX4 \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
+#define ELINK_GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR
+#define ELINK_GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
+#define ELINK_GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
+#define ELINK_GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
+#define ELINK_GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2
+#define ELINK_LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define ELINK_LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define ELINK_LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
+#define ELINK_LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define ELINK_LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
+#define ELINK_LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
+#define ELINK_LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
+#define ELINK_LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
+#define ELINK_LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
+#define ELINK_LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
+#define ELINK_LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
+#define ELINK_LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define ELINK_LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define ELINK_LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
+#define ELINK_LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
+
+#define ELINK_LINK_UPDATE_MASK \
+ (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \
+ LINK_STATUS_LINK_UP | \
+ LINK_STATUS_PHYSICAL_LINK_FLAG | \
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \
+ LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \
+ LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \
+ LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \
+ LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \
+ LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
+
+#define ELINK_SFP_EEPROM_CON_TYPE_ADDR 0x2
+#define ELINK_SFP_EEPROM_CON_TYPE_VAL_LC 0x7
+#define ELINK_SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
+#define ELINK_SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
+
+#define ELINK_SFP_EEPROM_COMP_CODE_ADDR 0x3
+#define ELINK_SFP_EEPROM_COMP_CODE_SR_MASK (1<<4)
+#define ELINK_SFP_EEPROM_COMP_CODE_LR_MASK (1<<5)
+#define ELINK_SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6)
+
+#define ELINK_SFP_EEPROM_FC_TX_TECH_ADDR 0x8
+#define ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
+#define ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
+
+#define ELINK_SFP_EEPROM_OPTIONS_ADDR 0x40
+#define ELINK_SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
+#define ELINK_SFP_EEPROM_OPTIONS_SIZE 2
+
+#define ELINK_EDC_MODE_LINEAR 0x0022
+#define ELINK_EDC_MODE_LIMITING 0x0044
+#define ELINK_EDC_MODE_PASSIVE_DAC 0x0055
+#define ELINK_EDC_MODE_ACTIVE_DAC 0x0066
+
+/* ETS defines*/
+#define DCBX_INVALID_COS (0xFF)
+
+#define ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
+#define ELINK_ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
+#define ELINK_ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360)
+#define ELINK_ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720)
+#define ELINK_ETS_E3B0_PBF_MIN_W_VAL (10000)
+
+#define ELINK_MAX_PACKET_SIZE (9700)
+#define MAX_KR_LINK_RETRY 4
+
+/**********************************************************/
+/* INTERFACE */
+/**********************************************************/
+
+#define CL22_WR_OVER_CL45(_sc, _phy, _bank, _addr, _val) \
+ elink_cl45_write(_sc, _phy, \
+ (_phy)->def_md_devad, \
+ (_bank + (_addr & 0xf)), \
+ _val)
+
+#define CL22_RD_OVER_CL45(_sc, _phy, _bank, _addr, _val) \
+ elink_cl45_read(_sc, _phy, \
+ (_phy)->def_md_devad, \
+ (_bank + (_addr & 0xf)), \
+ _val)
+
+static uint32_t elink_bits_en(struct bnx2x_softc *sc, uint32_t reg, uint32_t bits)
+{
+ uint32_t val = REG_RD(sc, reg);
+
+ val |= bits;
+ REG_WR(sc, reg, val);
+ return val;
+}
+
+static uint32_t elink_bits_dis(struct bnx2x_softc *sc, uint32_t reg,
+ uint32_t bits)
+{
+ uint32_t val = REG_RD(sc, reg);
+
+ val &= ~bits;
+ REG_WR(sc, reg, val);
+ return val;
+}
+
+/*
+ * elink_check_lfa - This function checks if link reinitialization is required,
+ * or link flap can be avoided.
+ *
+ * @params: link parameters
+ * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed
+ * condition code.
+ */
+static int elink_check_lfa(struct elink_params *params)
+{
+ uint32_t link_status, cfg_idx, lfa_mask, cfg_size;
+ uint32_t cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config;
+ uint32_t saved_val, req_val, eee_status;
+ struct bnx2x_softc *sc = params->sc;
+
+ additional_config =
+ REG_RD(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config));
+
+ /* NOTE: must be first condition checked -
+ * to verify DCC bit is cleared in any case!
+ */
+ if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
+ PMD_DRV_LOG(DEBUG, "No LFA due to DCC flap after clp exit");
+ REG_WR(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config),
+ additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
+ return LFA_DCC_LFA_DISABLED;
+ }
+
+ /* Verify that link is up */
+ link_status = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status));
+ if (!(link_status & LINK_STATUS_LINK_UP))
+ return LFA_LINK_DOWN;
+
+ /* if loaded after BOOT from SAN, don't flap the link in any case and
+ * rely on link set by preboot driver
+ */
+ if (params->feature_config_flags & ELINK_FEATURE_CONFIG_BOOT_FROM_SAN)
+ return 0;
+
+ /* Verify that loopback mode is not set */
+ if (params->loopback_mode)
+ return LFA_LOOPBACK_ENABLED;
+
+ /* Verify that MFW supports LFA */
+ if (!params->lfa_base)
+ return LFA_MFW_IS_TOO_OLD;
+
+ if (params->num_phys == 3) {
+ cfg_size = 2;
+ lfa_mask = 0xffffffff;
+ } else {
+ cfg_size = 1;
+ lfa_mask = 0xffff;
+ }
+
+ /* Compare Duplex */
+ saved_val = REG_RD(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, req_duplex));
+ req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
+ if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+ PMD_DRV_LOG(INFO, "Duplex mismatch %x vs. %x",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
+ return LFA_DUPLEX_MISMATCH;
+ }
+ /* Compare Flow Control */
+ saved_val = REG_RD(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, req_flow_ctrl));
+ req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
+ if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+ PMD_DRV_LOG(DEBUG, "Flow control mismatch %x vs. %x",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
+ return LFA_FLOW_CTRL_MISMATCH;
+ }
+ /* Compare Link Speed */
+ saved_val = REG_RD(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, req_line_speed));
+ req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
+ if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+ PMD_DRV_LOG(DEBUG, "Link speed mismatch %x vs. %x",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
+ return LFA_LINK_SPEED_MISMATCH;
+ }
+
+ for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) {
+ cur_speed_cap_mask = REG_RD(sc, params->lfa_base +
+ offsetof(struct shmem_lfa,
+ speed_cap_mask[cfg_idx]));
+
+ if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
+ PMD_DRV_LOG(DEBUG, "Speed Cap mismatch %x vs. %x",
+ cur_speed_cap_mask,
+ params->speed_cap_mask[cfg_idx]);
+ return LFA_SPEED_CAP_MISMATCH;
+ }
+ }
+
+ cur_req_fc_auto_adv =
+ REG_RD(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config)) &
+ REQ_FC_AUTO_ADV_MASK;
+
+ if ((uint16_t) cur_req_fc_auto_adv != params->req_fc_auto_adv) {
+ PMD_DRV_LOG(DEBUG, "Flow Ctrl AN mismatch %x vs. %x",
+ cur_req_fc_auto_adv, params->req_fc_auto_adv);
+ return LFA_FLOW_CTRL_MISMATCH;
+ }
+
+ eee_status = REG_RD(sc, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]));
+
+ if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^
+ (params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI)) ||
+ ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
+ (params->eee_mode & ELINK_EEE_MODE_ADV_LPI))) {
+ PMD_DRV_LOG(DEBUG, "EEE mismatch %x vs. %x", params->eee_mode,
+ eee_status);
+ return LFA_EEE_MISMATCH;
+ }
+
+ /* LFA conditions are met */
+ return 0;
+}
+
+/******************************************************************/
+/* EPIO/GPIO section */
+/******************************************************************/
+static void elink_get_epio(struct bnx2x_softc *sc, uint32_t epio_pin,
+ uint32_t * en)
+{
+ uint32_t epio_mask, gp_oenable;
+ *en = 0;
+ /* Sanity check */
+ if (epio_pin > 31) {
+ PMD_DRV_LOG(DEBUG, "Invalid EPIO pin %d to get", epio_pin);
+ return;
+ }
+
+ epio_mask = 1 << epio_pin;
+ /* Set this EPIO to output */
+ gp_oenable = REG_RD(sc, MCP_REG_MCPR_GP_OENABLE);
+ REG_WR(sc, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask);
+
+ *en = (REG_RD(sc, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
+}
+
+static void elink_set_epio(struct bnx2x_softc *sc, uint32_t epio_pin, uint32_t en)
+{
+ uint32_t epio_mask, gp_output, gp_oenable;
+
+ /* Sanity check */
+ if (epio_pin > 31) {
+ PMD_DRV_LOG(DEBUG, "Invalid EPIO pin %d to set", epio_pin);
+ return;
+ }
+ PMD_DRV_LOG(DEBUG, "Setting EPIO pin %d to %d", epio_pin, en);
+ epio_mask = 1 << epio_pin;
+ /* Set this EPIO to output */
+ gp_output = REG_RD(sc, MCP_REG_MCPR_GP_OUTPUTS);
+ if (en)
+ gp_output |= epio_mask;
+ else
+ gp_output &= ~epio_mask;
+
+ REG_WR(sc, MCP_REG_MCPR_GP_OUTPUTS, gp_output);
+
+ /* Set the value for this EPIO */
+ gp_oenable = REG_RD(sc, MCP_REG_MCPR_GP_OENABLE);
+ REG_WR(sc, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask);
+}
+
+static void elink_set_cfg_pin(struct bnx2x_softc *sc, uint32_t pin_cfg,
+ uint32_t val)
+{
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ if (pin_cfg >= PIN_CFG_EPIO0) {
+ elink_set_epio(sc, pin_cfg - PIN_CFG_EPIO0, val);
+ } else {
+ uint8_t gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+ uint8_t gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+ elink_cb_gpio_write(sc, gpio_num, (uint8_t) val, gpio_port);
+ }
+}
+
+static uint32_t elink_get_cfg_pin(struct bnx2x_softc *sc, uint32_t pin_cfg,
+ uint32_t * val)
+{
+ if (pin_cfg == PIN_CFG_NA)
+ return ELINK_STATUS_ERROR;
+ if (pin_cfg >= PIN_CFG_EPIO0) {
+ elink_get_epio(sc, pin_cfg - PIN_CFG_EPIO0, val);
+ } else {
+ uint8_t gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+ uint8_t gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+ *val = elink_cb_gpio_read(sc, gpio_num, gpio_port);
+ }
+ return ELINK_STATUS_OK;
+
+}
+
+/******************************************************************/
+/* PFC section */
+/******************************************************************/
+static void elink_update_pfc_xmac(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t xmac_base;
+ uint32_t pause_val, pfc0_val, pfc1_val;
+
+ /* XMAC base adrr */
+ xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ /* Initialize pause and pfc registers */
+ pause_val = 0x18000;
+ pfc0_val = 0xFFFF8000;
+ pfc1_val = 0x2;
+
+ /* No PFC support */
+ if (!(params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)) {
+
+ /* RX flow control - Process pause frame in receive direction
+ */
+ if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX)
+ pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
+
+ /* TX flow control - Send pause packet when buffer is full */
+ if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)
+ pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
+ } else { /* PFC support */
+ pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
+ XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
+ XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
+ XMAC_PFC_CTRL_HI_REG_TX_PFC_EN |
+ XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
+ /* Write pause and PFC registers */
+ REG_WR(sc, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+ REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+ REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+ pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
+
+ }
+
+ /* Write pause and PFC registers */
+ REG_WR(sc, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+ REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+ REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+
+ /* Set MAC address for source TX Pause/PFC frames */
+ REG_WR(sc, xmac_base + XMAC_REG_CTRL_SA_LO,
+ ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) | (params->mac_addr[5])));
+ REG_WR(sc, xmac_base + XMAC_REG_CTRL_SA_HI,
+ ((params->mac_addr[0] << 8) | (params->mac_addr[1])));
+
+ DELAY(30);
+}
+
+/******************************************************************/
+/* MAC/PBF section */
+/******************************************************************/
+static void elink_set_mdio_clk(struct bnx2x_softc *sc, uint32_t emac_base)
+{
+ uint32_t new_mode, cur_mode;
+ uint32_t clc_cnt;
+ /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ * (a value of 49==0x31) and make sure that the AUTO poll is off
+ */
+ cur_mode = REG_RD(sc, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+
+ if (USES_WARPCORE(sc))
+ clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
+ else
+ clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
+
+ if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) &&
+ (cur_mode & (EMAC_MDIO_MODE_CLAUSE_45)))
+ return;
+
+ new_mode = cur_mode &
+ ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
+ new_mode |= clc_cnt;
+ new_mode |= (EMAC_MDIO_MODE_CLAUSE_45);
+
+ PMD_DRV_LOG(DEBUG, "Changing emac_mode from 0x%x to 0x%x",
+ cur_mode, new_mode);
+ REG_WR(sc, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode);
+ DELAY(40);
+}
+
+static void elink_set_mdio_emac_per_phy(struct bnx2x_softc *sc,
+ struct elink_params *params)
+{
+ uint8_t phy_index;
+ /* Set mdio clock per phy */
+ for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys;
+ phy_index++)
+ elink_set_mdio_clk(sc, params->phy[phy_index].mdio_ctrl);
+}
+
+static uint8_t elink_is_4_port_mode(struct bnx2x_softc *sc)
+{
+ uint32_t port4mode_ovwr_val;
+ /* Check 4-port override enabled */
+ port4mode_ovwr_val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
+ if (port4mode_ovwr_val & (1 << 0)) {
+ /* Return 4-port mode override value */
+ return (port4mode_ovwr_val & (1 << 1)) == (1 << 1);
+ }
+ /* Return 4-port mode from input pin */
+ return (uint8_t) REG_RD(sc, MISC_REG_PORT4MODE_EN);
+}
+
+static void elink_emac_init(struct elink_params *params)
+{
+ /* reset and unreset the emac core */
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port = params->port;
+ uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ uint32_t val;
+ uint16_t timeout;
+
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+ DELAY(5);
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+
+ /* init emac - use read-modify-write */
+ /* self clear reset */
+ val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE);
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE,
+ (val | EMAC_MODE_RESET));
+
+ timeout = 200;
+ do {
+ val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE);
+ PMD_DRV_LOG(DEBUG, "EMAC reset reg is %u", val);
+ if (!timeout) {
+ PMD_DRV_LOG(DEBUG, "EMAC timeout!");
+ return;
+ }
+ timeout--;
+ } while (val & EMAC_MODE_RESET);
+
+ elink_set_mdio_emac_per_phy(sc, params);
+ /* Set mac address */
+ val = ((params->mac_addr[0] << 8) | params->mac_addr[1]);
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MAC_MATCH, val);
+
+ val = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) | params->mac_addr[5]);
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MAC_MATCH + 4, val);
+}
+
+static void elink_set_xumac_nig(struct elink_params *params,
+ uint16_t tx_pause_en, uint8_t enable)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ REG_WR(sc, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN,
+ enable);
+ REG_WR(sc, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN,
+ enable);
+ REG_WR(sc, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN :
+ NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
+}
+
+static void elink_set_umac_rxtx(struct elink_params *params, uint8_t en)
+{
+ uint32_t umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ uint32_t val;
+ struct bnx2x_softc *sc = params->sc;
+ if (!(REG_RD(sc, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
+ return;
+ val = REG_RD(sc, umac_base + UMAC_REG_COMMAND_CONFIG);
+ if (en)
+ val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA);
+ else
+ val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA);
+ /* Disable RX and TX */
+ REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+}
+
+static void elink_umac_enable(struct elink_params *params,
+ struct elink_vars *vars, uint8_t lb)
+{
+ uint32_t val;
+ uint32_t umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ struct bnx2x_softc *sc = params->sc;
+ /* Reset UMAC */
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+ DELAY(1000 * 1);
+
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+
+ PMD_DRV_LOG(DEBUG, "enabling UMAC");
+
+ /* This register opens the gate for the UMAC despite its name */
+ REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4, 1);
+
+ val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN |
+ UMAC_COMMAND_CONFIG_REG_PAD_EN |
+ UMAC_COMMAND_CONFIG_REG_SW_RESET |
+ UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
+ switch (vars->line_speed) {
+ case ELINK_SPEED_10:
+ val |= (0 << 2);
+ break;
+ case ELINK_SPEED_100:
+ val |= (1 << 2);
+ break;
+ case ELINK_SPEED_1000:
+ val |= (2 << 2);
+ break;
+ case ELINK_SPEED_2500:
+ val |= (3 << 2);
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Invalid speed for UMAC %d",
+ vars->line_speed);
+ break;
+ }
+ if (!(vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
+ val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
+
+ if (!(vars->flow_ctrl & ELINK_FLOW_CTRL_RX))
+ val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
+
+ if (vars->duplex == DUPLEX_HALF)
+ val |= UMAC_COMMAND_CONFIG_REG_HD_ENA;
+
+ REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+ DELAY(50);
+
+ /* Configure UMAC for EEE */
+ if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+ PMD_DRV_LOG(DEBUG, "configured UMAC for EEE");
+ REG_WR(sc, umac_base + UMAC_REG_UMAC_EEE_CTRL,
+ UMAC_UMAC_EEE_CTRL_REG_EEE_EN);
+ REG_WR(sc, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
+ } else {
+ REG_WR(sc, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0);
+ }
+
+ /* Set MAC address for source TX Pause/PFC frames (under SW reset) */
+ REG_WR(sc, umac_base + UMAC_REG_MAC_ADDR0,
+ ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) | (params->mac_addr[5])));
+ REG_WR(sc, umac_base + UMAC_REG_MAC_ADDR1,
+ ((params->mac_addr[0] << 8) | (params->mac_addr[1])));
+
+ /* Enable RX and TX */
+ val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN;
+ val |= UMAC_COMMAND_CONFIG_REG_TX_ENA | UMAC_COMMAND_CONFIG_REG_RX_ENA;
+ REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+ DELAY(50);
+
+ /* Remove SW Reset */
+ val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET;
+
+ /* Check loopback mode */
+ if (lb)
+ val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
+ REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+
+ /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+ * length used by the MAC receive logic to check frames.
+ */
+ REG_WR(sc, umac_base + UMAC_REG_MAXFR, 0x2710);
+ elink_set_xumac_nig(params,
+ ((vars->flow_ctrl & ELINK_FLOW_CTRL_TX) != 0), 1);
+ vars->mac_type = ELINK_MAC_TYPE_UMAC;
+
+}
+
+/* Define the XMAC mode */
+static void elink_xmac_init(struct elink_params *params, uint32_t max_speed)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t is_port4mode = elink_is_4_port_mode(sc);
+
+ /* In 4-port mode, need to set the mode only once, so if XMAC is
+ * already out of reset, it means the mode has already been set,
+ * and it must not* reset the XMAC again, since it controls both
+ * ports of the path
+ */
+
+ if (((CHIP_NUM(sc) == CHIP_NUM_57840_4_10) ||
+ (CHIP_NUM(sc) == CHIP_NUM_57840_2_20) ||
+ (CHIP_NUM(sc) == CHIP_NUM_57840_OBS)) &&
+ is_port4mode &&
+ (REG_RD(sc, MISC_REG_RESET_REG_2) &
+ MISC_REGISTERS_RESET_REG_2_XMAC)) {
+ PMD_DRV_LOG(DEBUG, "XMAC already out of reset in 4-port mode");
+ return;
+ }
+
+ /* Hard reset */
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ MISC_REGISTERS_RESET_REG_2_XMAC);
+ DELAY(1000 * 1);
+
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ MISC_REGISTERS_RESET_REG_2_XMAC);
+ if (is_port4mode) {
+ PMD_DRV_LOG(DEBUG, "Init XMAC to 2 ports x 10G per path");
+
+ /* Set the number of ports on the system side to up to 2 */
+ REG_WR(sc, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+
+ /* Set the number of ports on the Warp Core to 10G */
+ REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ } else {
+ /* Set the number of ports on the system side to 1 */
+ REG_WR(sc, MISC_REG_XMAC_CORE_PORT_MODE, 0);
+ if (max_speed == ELINK_SPEED_10000) {
+ PMD_DRV_LOG(DEBUG,
+ "Init XMAC to 10G x 1 port per path");
+ /* Set the number of ports on the Warp Core to 10G */
+ REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ } else {
+ PMD_DRV_LOG(DEBUG,
+ "Init XMAC to 20G x 2 ports per path");
+ /* Set the number of ports on the Warp Core to 20G */
+ REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 1);
+ }
+ }
+ /* Soft reset */
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+ DELAY(1000 * 1);
+
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+
+}
+
+static void elink_set_xmac_rxtx(struct elink_params *params, uint8_t en)
+{
+ uint8_t port = params->port;
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+ uint32_t val;
+
+ if (REG_RD(sc, MISC_REG_RESET_REG_2) & MISC_REGISTERS_RESET_REG_2_XMAC) {
+ /* Send an indication to change the state in the NIG back to XON
+ * Clearing this bit enables the next set of this bit to get
+ * rising edge
+ */
+ pfc_ctrl = REG_RD(sc, xmac_base + XMAC_REG_PFC_CTRL_HI);
+ REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl & ~(1 << 1)));
+ REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl | (1 << 1)));
+ PMD_DRV_LOG(DEBUG, "Disable XMAC on port %x", port);
+ val = REG_RD(sc, xmac_base + XMAC_REG_CTRL);
+ if (en)
+ val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+ else
+ val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+ REG_WR(sc, xmac_base + XMAC_REG_CTRL, val);
+ }
+}
+
+static elink_status_t elink_xmac_enable(struct elink_params *params,
+ struct elink_vars *vars, uint8_t lb)
+{
+ uint32_t val, xmac_base;
+ struct bnx2x_softc *sc = params->sc;
+ PMD_DRV_LOG(DEBUG, "enabling XMAC");
+
+ xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ elink_xmac_init(params, vars->line_speed);
+
+ /* This register determines on which events the MAC will assert
+ * error on the i/f to the NIG along w/ EOP.
+ */
+
+ /* This register tells the NIG whether to send traffic to UMAC
+ * or XMAC
+ */
+ REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4, 0);
+
+ /* When XMAC is in XLGMII mode, disable sending idles for fault
+ * detection.
+ */
+ if (!(params->phy[ELINK_INT_PHY].flags & ELINK_FLAGS_TX_ERROR_CHECK)) {
+ REG_WR(sc, xmac_base + XMAC_REG_RX_LSS_CTRL,
+ (XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE |
+ XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE));
+ REG_WR(sc, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+ REG_WR(sc, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+ }
+ /* Set Max packet size */
+ REG_WR(sc, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
+
+ /* CRC append for Tx packets */
+ REG_WR(sc, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
+
+ /* update PFC */
+ elink_update_pfc_xmac(params, vars);
+
+ if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+ PMD_DRV_LOG(DEBUG, "Setting XMAC for EEE");
+ REG_WR(sc, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
+ REG_WR(sc, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
+ } else {
+ REG_WR(sc, xmac_base + XMAC_REG_EEE_CTRL, 0x0);
+ }
+
+ /* Enable TX and RX */
+ val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
+
+ /* Set MAC in XLGMII mode for dual-mode */
+ if ((vars->line_speed == ELINK_SPEED_20000) &&
+ (params->phy[ELINK_INT_PHY].supported &
+ ELINK_SUPPORTED_20000baseKR2_Full))
+ val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB;
+
+ /* Check loopback mode */
+ if (lb)
+ val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
+ REG_WR(sc, xmac_base + XMAC_REG_CTRL, val);
+ elink_set_xumac_nig(params,
+ ((vars->flow_ctrl & ELINK_FLOW_CTRL_TX) != 0), 1);
+
+ vars->mac_type = ELINK_MAC_TYPE_XMAC;
+
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_emac_enable(struct elink_params *params,
+ struct elink_vars *vars, uint8_t lb)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port = params->port;
+ uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ uint32_t val;
+
+ PMD_DRV_LOG(DEBUG, "enabling EMAC");
+
+ /* Disable BMAC */
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+ /* enable emac and not bmac */
+ REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 1);
+
+ if (vars->phy_flags & PHY_XGXS_FLAG) {
+ uint32_t ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ PMD_DRV_LOG(DEBUG, "XGXS");
+ /* select the master lanes (out of 0-3) */
+ REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, ser_lane);
+ /* select XGXS */
+ REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1);
+
+ } else { /* SerDes */
+ PMD_DRV_LOG(DEBUG, "SerDes");
+ /* select SerDes */
+ REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 0);
+ }
+
+ elink_bits_en(sc, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_RESET);
+ elink_bits_en(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_RESET);
+
+ /* pause enable/disable */
+ elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ if (!(params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_PFC_ENABLED)) {
+ if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX)
+ elink_bits_en(sc, emac_base +
+ EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)
+ elink_bits_en(sc, emac_base +
+ EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ } else
+ elink_bits_en(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_FLOW_EN);
+
+ /* KEEP_VLAN_TAG, promiscuous */
+ val = REG_RD(sc, emac_base + EMAC_REG_EMAC_RX_MODE);
+ val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
+
+ /* Setting this bit causes MAC control frames (except for pause
+ * frames) to be passed on for processing. This setting has no
+ * affect on the operation of the pause frames. This bit effects
+ * all packets regardless of RX Parser packet sorting logic.
+ * Turn the PFC off to make sure we are in Xon state before
+ * enabling it.
+ */
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_MODE, 0);
+ if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) {
+ PMD_DRV_LOG(DEBUG, "PFC is enabled");
+ /* Enable PFC again */
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_MODE,
+ EMAC_REG_RX_PFC_MODE_RX_EN |
+ EMAC_REG_RX_PFC_MODE_TX_EN |
+ EMAC_REG_RX_PFC_MODE_PRIORITIES);
+
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_PARAM,
+ ((0x0101 <<
+ EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
+ (0x00ff <<
+ EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
+ val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
+ }
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_RX_MODE, val);
+
+ /* Set Loopback */
+ val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE);
+ if (lb)
+ val |= 0x810;
+ else
+ val &= ~0x810;
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE, val);
+
+ /* Enable emac */
+ REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 1);
+
+ /* Enable emac for jumbo packets */
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_RX_MTU_SIZE,
+ (EMAC_RX_MTU_SIZE_JUMBO_ENA |
+ (ELINK_ETH_MAX_JUMBO_PACKET_SIZE +
+ ELINK_ETH_OVREHEAD)));
+
+ /* Strip CRC */
+ REG_WR(sc, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port * 4, 0x1);
+
+ /* Disable the NIG in/out to the bmac */
+ REG_WR(sc, NIG_REG_BMAC0_IN_EN + port * 4, 0x0);
+ REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + port * 4, 0x0);
+ REG_WR(sc, NIG_REG_BMAC0_OUT_EN + port * 4, 0x0);
+
+ /* Enable the NIG in/out to the emac */
+ REG_WR(sc, NIG_REG_EMAC0_IN_EN + port * 4, 0x1);
+ val = 0;
+ if ((params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
+ val = 1;
+
+ REG_WR(sc, NIG_REG_EMAC0_PAUSE_OUT_EN + port * 4, val);
+ REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0x1);
+
+ REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x0);
+
+ vars->mac_type = ELINK_MAC_TYPE_EMAC;
+ return ELINK_STATUS_OK;
+}
+
+static void elink_update_pfc_bmac1(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ uint32_t wb_data[2];
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+
+ uint32_t val = 0x14;
+ if ((!(params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & ELINK_FLOW_CTRL_RX))
+ /* Enable BigMAC to react on received Pause packets */
+ val |= (1 << 5);
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
+
+ /* TX control */
+ val = 0xc0;
+ if (!(params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_PFC_ENABLED) &&
+ (vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
+ val |= 0x800000;
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
+}
+
+static void elink_update_pfc_bmac2(struct elink_params *params,
+ struct elink_vars *vars, uint8_t is_lb)
+{
+ /* Set rx control: Strip CRC and enable BigMAC to relay
+ * control packets to the system as well
+ */
+ uint32_t wb_data[2];
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ uint32_t val = 0x14;
+
+ if ((!(params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & ELINK_FLOW_CTRL_RX))
+ /* Enable BigMAC to react on received Pause packets */
+ val |= (1 << 5);
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
+ DELAY(30);
+
+ /* Tx control */
+ val = 0xc0;
+ if (!(params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_PFC_ENABLED) &&
+ (vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
+ val |= 0x800000;
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
+
+ if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) {
+ PMD_DRV_LOG(DEBUG, "PFC is enabled");
+ /* Enable PFC RX & TX & STATS and set 8 COS */
+ wb_data[0] = 0x0;
+ wb_data[0] |= (1 << 0); /* RX */
+ wb_data[0] |= (1 << 1); /* TX */
+ wb_data[0] |= (1 << 2); /* Force initial Xon */
+ wb_data[0] |= (1 << 3); /* 8 cos */
+ wb_data[0] |= (1 << 5); /* STATS */
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
+ wb_data, 2);
+ /* Clear the force Xon */
+ wb_data[0] &= ~(1 << 2);
+ } else {
+ PMD_DRV_LOG(DEBUG, "PFC is disabled");
+ /* Disable PFC RX & TX & STATS and set 8 COS */
+ wb_data[0] = 0x8;
+ wb_data[1] = 0;
+ }
+
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
+
+ /* Set Time (based unit is 512 bit time) between automatic
+ * re-sending of PP packets amd enable automatic re-send of
+ * Per-Priroity Packet as long as pp_gen is asserted and
+ * pp_disable is low.
+ */
+ val = 0x8000;
+ if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
+ val |= (1 << 16); /* enable automatic re-send */
+
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
+ wb_data, 2);
+
+ /* mac control */
+ val = 0x3; /* Enable RX and TX */
+ if (is_lb) {
+ val |= 0x4; /* Local loopback */
+ PMD_DRV_LOG(DEBUG, "enable bmac loopback");
+ }
+ /* When PFC enabled, Pass pause frames towards the NIG. */
+ if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
+ val |= ((1 << 6) | (1 << 5));
+
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
+}
+
+/******************************************************************************
+* Description:
+* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+static elink_status_t elink_pfc_nig_rx_priority_mask(struct bnx2x_softc *sc,
+ uint8_t cos_entry,
+ uint32_t priority_mask,
+ uint8_t port)
+{
+ uint32_t nig_reg_rx_priority_mask_add = 0;
+
+ switch (cos_entry) {
+ case 0:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS0_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS0_PRIORITY_MASK;
+ break;
+ case 1:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS1_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS1_PRIORITY_MASK;
+ break;
+ case 2:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS2_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS2_PRIORITY_MASK;
+ break;
+ case 3:
+ if (port)
+ return ELINK_STATUS_ERROR;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
+ break;
+ case 4:
+ if (port)
+ return ELINK_STATUS_ERROR;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
+ break;
+ case 5:
+ if (port)
+ return ELINK_STATUS_ERROR;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
+ break;
+ }
+
+ REG_WR(sc, nig_reg_rx_priority_mask_add, priority_mask);
+
+ return ELINK_STATUS_OK;
+}
+
+static void elink_update_mng(struct elink_params *params, uint32_t link_status)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ REG_WR(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status), link_status);
+}
+
+static void elink_update_link_attr(struct elink_params *params,
+ uint32_t link_attr)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ if (SHMEM2_HAS(sc, link_attr_sync))
+ REG_WR(sc, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ link_attr_sync[params->port]), link_attr);
+}
+
+static void elink_update_pfc_nig(struct elink_params *params,
+ struct elink_nig_brb_pfc_port_params
+ *nig_params)
+{
+ uint32_t xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en =
+ 0;
+ uint32_t llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
+ uint32_t pkt_priority_to_cos = 0;
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port = params->port;
+
+ int set_pfc = params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_PFC_ENABLED;
+ PMD_DRV_LOG(DEBUG, "updating pfc nig parameters");
+
+ /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
+ * MAC control frames (that are not pause packets)
+ * will be forwarded to the XCM.
+ */
+ xcm_mask = REG_RD(sc, port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK);
+ /* NIG params will override non PFC params, since it's possible to
+ * do transition from PFC to SAFC
+ */
+ if (set_pfc) {
+ pause_enable = 0;
+ llfc_out_en = 0;
+ llfc_enable = 0;
+ if (CHIP_IS_E3(sc))
+ ppp_enable = 0;
+ else
+ ppp_enable = 1;
+ xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ xcm_out_en = 0;
+ hwpfc_enable = 1;
+ } else {
+ if (nig_params) {
+ llfc_out_en = nig_params->llfc_out_en;
+ llfc_enable = nig_params->llfc_enable;
+ pause_enable = nig_params->pause_enable;
+ } else /* Default non PFC mode - PAUSE */
+ pause_enable = 1;
+
+ xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ xcm_out_en = 1;
+ }
+
+ if (CHIP_IS_E3(sc))
+ REG_WR(sc, port ? NIG_REG_BRB1_PAUSE_IN_EN :
+ NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
+ REG_WR(sc, port ? NIG_REG_LLFC_OUT_EN_1 :
+ NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
+ REG_WR(sc, port ? NIG_REG_LLFC_ENABLE_1 :
+ NIG_REG_LLFC_ENABLE_0, llfc_enable);
+ REG_WR(sc, port ? NIG_REG_PAUSE_ENABLE_1 :
+ NIG_REG_PAUSE_ENABLE_0, pause_enable);
+
+ REG_WR(sc, port ? NIG_REG_PPP_ENABLE_1 :
+ NIG_REG_PPP_ENABLE_0, ppp_enable);
+
+ REG_WR(sc, port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK, xcm_mask);
+
+ REG_WR(sc, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
+ NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+
+ /* Output enable for RX_XCM # IF */
+ REG_WR(sc, port ? NIG_REG_XCM1_OUT_EN :
+ NIG_REG_XCM0_OUT_EN, xcm_out_en);
+
+ /* HW PFC TX enable */
+ REG_WR(sc, port ? NIG_REG_P1_HWPFC_ENABLE :
+ NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
+
+ if (nig_params) {
+ uint8_t i = 0;
+ pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
+
+ for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
+ elink_pfc_nig_rx_priority_mask(sc, i,
+ nig_params->
+ rx_cos_priority_mask[i],
+ port);
+
+ REG_WR(sc, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
+ NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
+ nig_params->llfc_high_priority_classes);
+
+ REG_WR(sc, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
+ NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
+ nig_params->llfc_low_priority_classes);
+ }
+ REG_WR(sc, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
+ NIG_REG_P0_PKT_PRIORITY_TO_COS, pkt_priority_to_cos);
+}
+
+elink_status_t elink_update_pfc(struct elink_params *params,
+ struct elink_vars *vars,
+ struct elink_nig_brb_pfc_port_params
+ *pfc_params)
+{
+ /* The PFC and pause are orthogonal to one another, meaning when
+ * PFC is enabled, the pause are disabled, and when PFC is
+ * disabled, pause are set according to the pause result.
+ */
+ uint32_t val;
+ struct bnx2x_softc *sc = params->sc;
+ elink_status_t elink_status = ELINK_STATUS_OK;
+ uint8_t bmac_loopback = (params->loopback_mode == ELINK_LOOPBACK_BMAC);
+
+ if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
+ vars->link_status |= LINK_STATUS_PFC_ENABLED;
+ else
+ vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
+
+ elink_update_mng(params, vars->link_status);
+
+ /* Update NIG params */
+ elink_update_pfc_nig(params, pfc_params);
+
+ if (!vars->link_up)
+ return elink_status;
+
+ PMD_DRV_LOG(DEBUG, "About to update PFC in BMAC");
+
+ if (CHIP_IS_E3(sc)) {
+ if (vars->mac_type == ELINK_MAC_TYPE_XMAC)
+ elink_update_pfc_xmac(params, vars);
+ } else {
+ val = REG_RD(sc, MISC_REG_RESET_REG_2);
+ if ((val &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
+ == 0) {
+ PMD_DRV_LOG(DEBUG, "About to update PFC in EMAC");
+ elink_emac_enable(params, vars, 0);
+ return elink_status;
+ }
+ if (CHIP_IS_E2(sc))
+ elink_update_pfc_bmac2(params, vars, bmac_loopback);
+ else
+ elink_update_pfc_bmac1(params, vars);
+
+ val = 0;
+ if ((params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
+ val = 1;
+ REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port * 4, val);
+ }
+ return elink_status;
+}
+
+static elink_status_t elink_bmac1_enable(struct elink_params *params,
+ struct elink_vars *vars, uint8_t is_lb)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port = params->port;
+ uint32_t bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ uint32_t wb_data[2];
+ uint32_t val;
+
+ PMD_DRV_LOG(DEBUG, "Enabling BigMAC1");
+
+ /* XGXS control */
+ wb_data[0] = 0x3c;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
+
+ /* TX MAC SA */
+ wb_data[0] = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) | params->mac_addr[5]);
+ wb_data[1] = ((params->mac_addr[0] << 8) | params->mac_addr[1]);
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
+
+ /* MAC control */
+ val = 0x3;
+ if (is_lb) {
+ val |= 0x4;
+ PMD_DRV_LOG(DEBUG, "enable bmac loopback");
+ }
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
+
+ /* Set rx mtu */
+ wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
+
+ elink_update_pfc_bmac1(params, vars);
+
+ /* Set tx mtu */
+ wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
+
+ /* Set cnt max size */
+ wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
+
+ /* Configure SAFC */
+ wb_data[0] = 0x1000200;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
+ wb_data, 2);
+
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_bmac2_enable(struct elink_params *params,
+ struct elink_vars *vars, uint8_t is_lb)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port = params->port;
+ uint32_t bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ uint32_t wb_data[2];
+
+ PMD_DRV_LOG(DEBUG, "Enabling BigMAC2");
+
+ wb_data[0] = 0;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
+ DELAY(30);
+
+ /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
+ wb_data[0] = 0x3c;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
+
+ DELAY(30);
+
+ /* TX MAC SA */
+ wb_data[0] = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) | params->mac_addr[5]);
+ wb_data[1] = ((params->mac_addr[0] << 8) | params->mac_addr[1]);
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
+ wb_data, 2);
+
+ DELAY(30);
+
+ /* Configure SAFC */
+ wb_data[0] = 0x1000200;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
+ wb_data, 2);
+ DELAY(30);
+
+ /* Set RX MTU */
+ wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
+ DELAY(30);
+
+ /* Set TX MTU */
+ wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
+ DELAY(30);
+ /* Set cnt max size */
+ wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD - 2;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
+ DELAY(30);
+ elink_update_pfc_bmac2(params, vars, is_lb);
+
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_bmac_enable(struct elink_params *params,
+ struct elink_vars *vars,
+ uint8_t is_lb, uint8_t reset_bmac)
+{
+ elink_status_t rc = ELINK_STATUS_OK;
+ uint8_t port = params->port;
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t val;
+ /* Reset and unreset the BigMac */
+ if (reset_bmac) {
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ DELAY(1000 * 1);
+ }
+
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+ /* Enable access for bmac registers */
+ REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x1);
+
+ /* Enable BMAC according to BMAC type */
+ if (CHIP_IS_E2(sc))
+ rc = elink_bmac2_enable(params, vars, is_lb);
+ else
+ rc = elink_bmac1_enable(params, vars, is_lb);
+ REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 0x1);
+ REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 0x0);
+ REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 0x0);
+ val = 0;
+ if ((params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
+ val = 1;
+ REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + port * 4, val);
+ REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0x0);
+ REG_WR(sc, NIG_REG_EMAC0_IN_EN + port * 4, 0x0);
+ REG_WR(sc, NIG_REG_EMAC0_PAUSE_OUT_EN + port * 4, 0x0);
+ REG_WR(sc, NIG_REG_BMAC0_IN_EN + port * 4, 0x1);
+ REG_WR(sc, NIG_REG_BMAC0_OUT_EN + port * 4, 0x1);
+
+ vars->mac_type = ELINK_MAC_TYPE_BMAC;
+ return rc;
+}
+
+static void elink_set_bmac_rx(struct bnx2x_softc *sc, uint8_t port, uint8_t en)
+{
+ uint32_t bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ uint32_t wb_data[2];
+ uint32_t nig_bmac_enable =
+ REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
+
+ if (CHIP_IS_E2(sc))
+ bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL;
+ else
+ bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL;
+ /* Only if the bmac is out of reset */
+ if (REG_RD(sc, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && nig_bmac_enable) {
+ /* Clear Rx Enable bit in BMAC_CONTROL register */
+ REG_RD_DMAE(sc, bmac_addr, wb_data, 2);
+ if (en)
+ wb_data[0] |= ELINK_BMAC_CONTROL_RX_ENABLE;
+ else
+ wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
+ REG_WR_DMAE(sc, bmac_addr, wb_data, 2);
+ DELAY(1000 * 1);
+ }
+}
+
+static elink_status_t elink_pbf_update(struct elink_params *params,
+ uint32_t flow_ctrl, uint32_t line_speed)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port = params->port;
+ uint32_t init_crd, crd;
+ uint32_t count = 1000;
+
+ /* Disable port */
+ REG_WR(sc, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port * 4, 0x1);
+
+ /* Wait for init credit */
+ init_crd = REG_RD(sc, PBF_REG_P0_INIT_CRD + port * 4);
+ crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8);
+ PMD_DRV_LOG(DEBUG, "init_crd 0x%x crd 0x%x", init_crd, crd);
+
+ while ((init_crd != crd) && count) {
+ DELAY(1000 * 5);
+ crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8);
+ count--;
+ }
+ crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8);
+ if (init_crd != crd) {
+ PMD_DRV_LOG(DEBUG, "BUG! init_crd 0x%x != crd 0x%x",
+ init_crd, crd);
+ return ELINK_STATUS_ERROR;
+ }
+
+ if (flow_ctrl & ELINK_FLOW_CTRL_RX ||
+ line_speed == ELINK_SPEED_10 ||
+ line_speed == ELINK_SPEED_100 ||
+ line_speed == ELINK_SPEED_1000 || line_speed == ELINK_SPEED_2500) {
+ REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 1);
+ /* Update threshold */
+ REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, 0);
+ /* Update init credit */
+ init_crd = 778; /* (800-18-4) */
+
+ } else {
+ uint32_t thresh = (ELINK_ETH_MAX_JUMBO_PACKET_SIZE +
+ ELINK_ETH_OVREHEAD) / 16;
+ REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0);
+ /* Update threshold */
+ REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, thresh);
+ /* Update init credit */
+ switch (line_speed) {
+ case ELINK_SPEED_10000:
+ init_crd = thresh + 553 - 22;
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Invalid line_speed 0x%x",
+ line_speed);
+ return ELINK_STATUS_ERROR;
+ }
+ }
+ REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4, init_crd);
+ PMD_DRV_LOG(DEBUG, "PBF updated to speed %d credit %d",
+ line_speed, init_crd);
+
+ /* Probe the credit changes */
+ REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0x1);
+ DELAY(1000 * 5);
+ REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0x0);
+
+ /* Enable port */
+ REG_WR(sc, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port * 4, 0x0);
+ return ELINK_STATUS_OK;
+}
+
+/**
+ * elink_get_emac_base - retrive emac base address
+ *
+ * @bp: driver handle
+ * @mdc_mdio_access: access type
+ * @port: port id
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
+static uint32_t elink_get_emac_base(struct bnx2x_softc *sc,
+ uint32_t mdc_mdio_access, uint8_t port)
+{
+ uint32_t emac_base = 0;
+ switch (mdc_mdio_access) {
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
+ if (REG_RD(sc, NIG_REG_PORT_SWAP))
+ emac_base = GRCBASE_EMAC1;
+ else
+ emac_base = GRCBASE_EMAC0;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
+ if (REG_RD(sc, NIG_REG_PORT_SWAP))
+ emac_base = GRCBASE_EMAC0;
+ else
+ emac_base = GRCBASE_EMAC1;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
+ emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
+ emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
+ break;
+ default:
+ break;
+ }
+ return emac_base;
+
+}
+
+/******************************************************************/
+/* CL22 access functions */
+/******************************************************************/
+static elink_status_t elink_cl22_write(struct bnx2x_softc *sc,
+ struct elink_phy *phy,
+ uint16_t reg, uint16_t val)
+{
+ uint32_t tmp, mode;
+ uint8_t i;
+ elink_status_t rc = ELINK_STATUS_OK;
+ /* Switch to CL22 */
+ mode = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+ mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+ /* Address */
+ tmp = ((phy->addr << 21) | (reg << 16) | val |
+ EMAC_MDIO_COMM_COMMAND_WRITE_22 | EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ DELAY(10);
+
+ tmp = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ DELAY(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ PMD_DRV_LOG(DEBUG, "write phy register failed");
+ rc = ELINK_STATUS_TIMEOUT;
+ }
+ REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+ return rc;
+}
+
+static elink_status_t elink_cl22_read(struct bnx2x_softc *sc,
+ struct elink_phy *phy,
+ uint16_t reg, uint16_t * ret_val)
+{
+ uint32_t val, mode;
+ uint16_t i;
+ elink_status_t rc = ELINK_STATUS_OK;
+
+ /* Switch to CL22 */
+ mode = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+ mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+ /* Address */
+ val = ((phy->addr << 21) | (reg << 16) |
+ EMAC_MDIO_COMM_COMMAND_READ_22 | EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ DELAY(10);
+
+ val = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ *ret_val = (uint16_t) (val & EMAC_MDIO_COMM_DATA);
+ DELAY(5);
+ break;
+ }
+ }
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ PMD_DRV_LOG(DEBUG, "read phy register failed");
+
+ *ret_val = 0;
+ rc = ELINK_STATUS_TIMEOUT;
+ }
+ REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+ return rc;
+}
+
+/******************************************************************/
+/* CL45 access functions */
+/******************************************************************/
+static elink_status_t elink_cl45_read(struct bnx2x_softc *sc,
+ struct elink_phy *phy, uint8_t devad,
+ uint16_t reg, uint16_t * ret_val)
+{
+ uint32_t val;
+ uint16_t i;
+ elink_status_t rc = ELINK_STATUS_OK;
+ if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_G) {
+ elink_set_mdio_clk(sc, phy->mdio_ctrl);
+ }
+
+ if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0)
+ elink_bits_en(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ /* Address */
+ val = ((phy->addr << 21) | (devad << 16) | reg |
+ EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ DELAY(10);
+
+ val = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ DELAY(5);
+ break;
+ }
+ }
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ PMD_DRV_LOG(DEBUG, "read phy register failed");
+ elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout"
+
+ *ret_val = 0;
+ rc = ELINK_STATUS_TIMEOUT;
+ } else {
+ /* Data */
+ val = ((phy->addr << 21) | (devad << 16) |
+ EMAC_MDIO_COMM_COMMAND_READ_45 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ DELAY(10);
+
+ val = REG_RD(sc, phy->mdio_ctrl +
+ EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ *ret_val =
+ (uint16_t) (val & EMAC_MDIO_COMM_DATA);
+ break;
+ }
+ }
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ PMD_DRV_LOG(DEBUG, "read phy register failed");
+ elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout"
+
+ *ret_val = 0;
+ rc = ELINK_STATUS_TIMEOUT;
+ }
+ }
+ /* Work around for E3 A0 */
+ if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA) {
+ phy->flags ^= ELINK_FLAGS_DUMMY_READ;
+ if (phy->flags & ELINK_FLAGS_DUMMY_READ) {
+ uint16_t temp_val;
+ elink_cl45_read(sc, phy, devad, 0xf, &temp_val);
+ }
+ }
+
+ if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0)
+ elink_bits_dis(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ return rc;
+}
+
+static elink_status_t elink_cl45_write(struct bnx2x_softc *sc,
+ struct elink_phy *phy, uint8_t devad,
+ uint16_t reg, uint16_t val)
+{
+ uint32_t tmp;
+ uint8_t i;
+ elink_status_t rc = ELINK_STATUS_OK;
+ if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_G) {
+ elink_set_mdio_clk(sc, phy->mdio_ctrl);
+ }
+
+ if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0)
+ elink_bits_en(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+
+ /* Address */
+ tmp = ((phy->addr << 21) | (devad << 16) | reg |
+ EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ DELAY(10);
+
+ tmp = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ DELAY(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ PMD_DRV_LOG(DEBUG, "write phy register failed");
+ elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout"
+
+ rc = ELINK_STATUS_TIMEOUT;
+ } else {
+ /* Data */
+ tmp = ((phy->addr << 21) | (devad << 16) | val |
+ EMAC_MDIO_COMM_COMMAND_WRITE_45 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ DELAY(10);
+
+ tmp = REG_RD(sc, phy->mdio_ctrl +
+ EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ DELAY(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ PMD_DRV_LOG(DEBUG, "write phy register failed");
+ elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout"
+
+ rc = ELINK_STATUS_TIMEOUT;
+ }
+ }
+ /* Work around for E3 A0 */
+ if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA) {
+ phy->flags ^= ELINK_FLAGS_DUMMY_READ;
+ if (phy->flags & ELINK_FLAGS_DUMMY_READ) {
+ uint16_t temp_val;
+ elink_cl45_read(sc, phy, devad, 0xf, &temp_val);
+ }
+ }
+ if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0)
+ elink_bits_dis(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ return rc;
+}
+
+/******************************************************************/
+/* EEE section */
+/******************************************************************/
+static uint8_t elink_eee_has_cap(struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ if (REG_RD(sc, params->shmem2_base) <=
+ offsetof(struct shmem2_region, eee_status[params->port]))
+ return 0;
+
+ return 1;
+}
+
+static elink_status_t elink_eee_nvram_to_time(uint32_t nvram_mode,
+ uint32_t * idle_timer)
+{
+ switch (nvram_mode) {
+ case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
+ *idle_timer = ELINK_EEE_MODE_NVRAM_BALANCED_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
+ *idle_timer = ELINK_EEE_MODE_NVRAM_AGGRESSIVE_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
+ *idle_timer = ELINK_EEE_MODE_NVRAM_LATENCY_TIME;
+ break;
+ default:
+ *idle_timer = 0;
+ break;
+ }
+
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_eee_time_to_nvram(uint32_t idle_timer,
+ uint32_t * nvram_mode)
+{
+ switch (idle_timer) {
+ case ELINK_EEE_MODE_NVRAM_BALANCED_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
+ break;
+ case ELINK_EEE_MODE_NVRAM_AGGRESSIVE_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
+ break;
+ case ELINK_EEE_MODE_NVRAM_LATENCY_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
+ break;
+ default:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
+ break;
+ }
+
+ return ELINK_STATUS_OK;
+}
+
+static uint32_t elink_eee_calc_timer(struct elink_params *params)
+{
+ uint32_t eee_mode, eee_idle;
+ struct bnx2x_softc *sc = params->sc;
+
+ if (params->eee_mode & ELINK_EEE_MODE_OVERRIDE_NVRAM) {
+ if (params->eee_mode & ELINK_EEE_MODE_OUTPUT_TIME) {
+ /* time value in eee_mode --> used directly */
+ eee_idle = params->eee_mode & ELINK_EEE_MODE_TIMER_MASK;
+ } else {
+ /* hsi value in eee_mode --> time */
+ if (elink_eee_nvram_to_time(params->eee_mode &
+ ELINK_EEE_MODE_NVRAM_MASK,
+ &eee_idle))
+ return 0;
+ }
+ } else {
+ /* hsi values in nvram --> time */
+ eee_mode = ((REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_feature_config
+ [params->
+ port].eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+
+ if (elink_eee_nvram_to_time(eee_mode, &eee_idle))
+ return 0;
+ }
+
+ return eee_idle;
+}
+
+static elink_status_t elink_eee_set_timers(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ uint32_t eee_idle = 0, eee_mode;
+ struct bnx2x_softc *sc = params->sc;
+
+ eee_idle = elink_eee_calc_timer(params);
+
+ if (eee_idle) {
+ REG_WR(sc, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
+ eee_idle);
+ } else if ((params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI) &&
+ (params->eee_mode & ELINK_EEE_MODE_OVERRIDE_NVRAM) &&
+ (params->eee_mode & ELINK_EEE_MODE_OUTPUT_TIME)) {
+ PMD_DRV_LOG(DEBUG, "Error: Tx LPI is enabled with timer 0");
+ return ELINK_STATUS_ERROR;
+ }
+
+ vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
+ if (params->eee_mode & ELINK_EEE_MODE_OUTPUT_TIME) {
+ /* eee_idle in 1u --> eee_status in 16u */
+ eee_idle >>= 4;
+ vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
+ SHMEM_EEE_TIME_OUTPUT_BIT;
+ } else {
+ if (elink_eee_time_to_nvram(eee_idle, &eee_mode))
+ return ELINK_STATUS_ERROR;
+ vars->eee_status |= eee_mode;
+ }
+
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_eee_initial_config(struct elink_params *params,
+ struct elink_vars *vars,
+ uint8_t mode)
+{
+ vars->eee_status |= ((uint32_t) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
+
+ /* Propagate params' bits --> vars (for migration exposure) */
+ if (params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI)
+ vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
+
+ if (params->eee_mode & ELINK_EEE_MODE_ADV_LPI)
+ vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
+
+ return elink_eee_set_timers(params, vars);
+}
+
+static elink_status_t elink_eee_disable(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ /* Make Certain LPI is disabled */
+ REG_WR(sc, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
+
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_eee_advertise(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars,
+ uint8_t modes)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val = 0;
+
+ /* Mask events preventing LPI generation */
+ REG_WR(sc, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
+
+ if (modes & SHMEM_EEE_10G_ADV) {
+ PMD_DRV_LOG(DEBUG, "Advertise 10GBase-T EEE");
+ val |= 0x8;
+ }
+ if (modes & SHMEM_EEE_1G_ADV) {
+ PMD_DRV_LOG(DEBUG, "Advertise 1GBase-T EEE");
+ val |= 0x4;
+ }
+
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+ vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT);
+
+ return ELINK_STATUS_OK;
+}
+
+static void elink_update_mng_eee(struct elink_params *params,
+ uint32_t eee_status)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ if (elink_eee_has_cap(params))
+ REG_WR(sc, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]), eee_status);
+}
+
+static void elink_eee_an_resolve(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t adv = 0, lp = 0;
+ uint32_t lp_adv = 0;
+ uint8_t neg = 0;
+
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv);
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp);
+
+ if (lp & 0x2) {
+ lp_adv |= SHMEM_EEE_100M_ADV;
+ if (adv & 0x2) {
+ if (vars->line_speed == ELINK_SPEED_100)
+ neg = 1;
+ PMD_DRV_LOG(DEBUG, "EEE negotiated - 100M");
+ }
+ }
+ if (lp & 0x14) {
+ lp_adv |= SHMEM_EEE_1G_ADV;
+ if (adv & 0x14) {
+ if (vars->line_speed == ELINK_SPEED_1000)
+ neg = 1;
+ PMD_DRV_LOG(DEBUG, "EEE negotiated - 1G");
+ }
+ }
+ if (lp & 0x68) {
+ lp_adv |= SHMEM_EEE_10G_ADV;
+ if (adv & 0x68) {
+ if (vars->line_speed == ELINK_SPEED_10000)
+ neg = 1;
+ PMD_DRV_LOG(DEBUG, "EEE negotiated - 10G");
+ }
+ }
+
+ vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
+ vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+ if (neg) {
+ PMD_DRV_LOG(DEBUG, "EEE is active");
+ vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
+ }
+}
+
+/******************************************************************/
+/* BSC access functions from E3 */
+/******************************************************************/
+static void elink_bsc_module_sel(struct elink_params *params)
+{
+ int idx;
+ uint32_t board_cfg, sfp_ctrl;
+ uint32_t i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH];
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port = params->port;
+ /* Read I2C output PINs */
+ board_cfg = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.shared_hw_config.board));
+ i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
+ i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
+ SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
+
+ /* Read I2C output value */
+ sfp_ctrl = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].
+ e3_cmn_pin_cfg));
+ i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
+ i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
+ PMD_DRV_LOG(DEBUG, "Setting BSC switch");
+ for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
+ elink_set_cfg_pin(sc, i2c_pins[idx], i2c_val[idx]);
+}
+
+static elink_status_t elink_bsc_read(struct elink_params *params,
+ struct bnx2x_softc *sc,
+ uint8_t sl_devid,
+ uint16_t sl_addr,
+ uint8_t lc_addr,
+ uint8_t xfer_cnt, uint32_t * data_array)
+{
+ uint32_t val, i;
+ elink_status_t rc = ELINK_STATUS_OK;
+
+ if (xfer_cnt > 16) {
+ PMD_DRV_LOG(DEBUG, "invalid xfer_cnt %d. Max is 16 bytes",
+ xfer_cnt);
+ return ELINK_STATUS_ERROR;
+ }
+ if (params)
+ elink_bsc_module_sel(params);
+
+ xfer_cnt = 16 - lc_addr;
+
+ /* Enable the engine */
+ val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND);
+ val |= MCPR_IMC_COMMAND_ENABLE;
+ REG_WR(sc, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* Program slave device ID */
+ val = (sl_devid << 16) | sl_addr;
+ REG_WR(sc, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
+
+ /* Start xfer with 0 byte to update the address pointer ??? */
+ val = (MCPR_IMC_COMMAND_ENABLE) |
+ (MCPR_IMC_COMMAND_WRITE_OP <<
+ MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+ (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
+ REG_WR(sc, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* Poll for completion */
+ i = 0;
+ val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND);
+ while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+ DELAY(10);
+ val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND);
+ if (i++ > 1000) {
+ PMD_DRV_LOG(DEBUG, "wr 0 byte timed out after %d try",
+ i);
+ rc = ELINK_STATUS_TIMEOUT;
+ break;
+ }
+ }
+ if (rc == ELINK_STATUS_TIMEOUT)
+ return rc;
+
+ /* Start xfer with read op */
+ val = (MCPR_IMC_COMMAND_ENABLE) |
+ (MCPR_IMC_COMMAND_READ_OP <<
+ MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+ (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
+ (xfer_cnt);
+ REG_WR(sc, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* Poll for completion */
+ i = 0;
+ val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND);
+ while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+ DELAY(10);
+ val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND);
+ if (i++ > 1000) {
+ PMD_DRV_LOG(DEBUG, "rd op timed out after %d try", i);
+ rc = ELINK_STATUS_TIMEOUT;
+ break;
+ }
+ }
+ if (rc == ELINK_STATUS_TIMEOUT)
+ return rc;
+
+ for (i = (lc_addr >> 2); i < 4; i++) {
+ data_array[i] = REG_RD(sc, (MCP_REG_MCPR_IMC_DATAREG0 + i * 4));
+#ifdef __BIG_ENDIAN
+ data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
+ ((data_array[i] & 0x0000ff00) << 8) |
+ ((data_array[i] & 0x00ff0000) >> 8) |
+ ((data_array[i] & 0xff000000) >> 24);
+#endif
+ }
+ return rc;
+}
+
+static void elink_cl45_read_or_write(struct bnx2x_softc *sc,
+ struct elink_phy *phy, uint8_t devad,
+ uint16_t reg, uint16_t or_val)
+{
+ uint16_t val;
+ elink_cl45_read(sc, phy, devad, reg, &val);
+ elink_cl45_write(sc, phy, devad, reg, val | or_val);
+}
+
+static void elink_cl45_read_and_write(struct bnx2x_softc *sc,
+ struct elink_phy *phy,
+ uint8_t devad, uint16_t reg,
+ uint16_t and_val)
+{
+ uint16_t val;
+ elink_cl45_read(sc, phy, devad, reg, &val);
+ elink_cl45_write(sc, phy, devad, reg, val & and_val);
+}
+
+static uint8_t elink_get_warpcore_lane(struct elink_params *params)
+{
+ uint8_t lane = 0;
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t path_swap, path_swap_ovr;
+ uint8_t path, port;
+
+ path = SC_PATH(sc);
+ port = params->port;
+
+ if (elink_is_4_port_mode(sc)) {
+ uint32_t port_swap, port_swap_ovr;
+
+ /* Figure out path swap value */
+ path_swap_ovr = REG_RD(sc, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
+ if (path_swap_ovr & 0x1)
+ path_swap = (path_swap_ovr & 0x2);
+ else
+ path_swap = REG_RD(sc, MISC_REG_FOUR_PORT_PATH_SWAP);
+
+ if (path_swap)
+ path = path ^ 1;
+
+ /* Figure out port swap value */
+ port_swap_ovr = REG_RD(sc, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
+ if (port_swap_ovr & 0x1)
+ port_swap = (port_swap_ovr & 0x2);
+ else
+ port_swap = REG_RD(sc, MISC_REG_FOUR_PORT_PORT_SWAP);
+
+ if (port_swap)
+ port = port ^ 1;
+
+ lane = (port << 1) + path;
+ } else { /* Two port mode - no port swap */
+
+ /* Figure out path swap value */
+ path_swap_ovr = REG_RD(sc, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
+ if (path_swap_ovr & 0x1) {
+ path_swap = (path_swap_ovr & 0x2);
+ } else {
+ path_swap = REG_RD(sc, MISC_REG_TWO_PORT_PATH_SWAP);
+ }
+ if (path_swap)
+ path = path ^ 1;
+
+ lane = path << 1;
+ }
+ return lane;
+}
+
+static void elink_set_aer_mmd(struct elink_params *params,
+ struct elink_phy *phy)
+{
+ uint32_t ser_lane;
+ uint16_t offset, aer_val;
+ struct bnx2x_softc *sc = params->sc;
+ ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
+ (phy->addr + ser_lane) : 0;
+
+ if (USES_WARPCORE(sc)) {
+ aer_val = elink_get_warpcore_lane(params);
+ /* In Dual-lane mode, two lanes are joined together,
+ * so in order to configure them, the AER broadcast method is
+ * used here.
+ * 0x200 is the broadcast address for lanes 0,1
+ * 0x201 is the broadcast address for lanes 2,3
+ */
+ if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE)
+ aer_val = (aer_val >> 1) | 0x200;
+ } else if (CHIP_IS_E2(sc))
+ aer_val = 0x3800 + offset - 1;
+ else
+ aer_val = 0x3800 + offset;
+
+ CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, aer_val);
+
+}
+
+/******************************************************************/
+/* Internal phy section */
+/******************************************************************/
+
+static void elink_set_serdes_access(struct bnx2x_softc *sc, uint8_t port)
+{
+ uint32_t emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+ /* Set Clause 22 */
+ REG_WR(sc, NIG_REG_SERDES0_CTRL_MD_ST + port * 0x10, 1);
+ REG_WR(sc, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
+ DELAY(500);
+ REG_WR(sc, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
+ DELAY(500);
+ /* Set Clause 45 */
+ REG_WR(sc, NIG_REG_SERDES0_CTRL_MD_ST + port * 0x10, 0);
+}
+
+static void elink_serdes_deassert(struct bnx2x_softc *sc, uint8_t port)
+{
+ uint32_t val;
+
+ PMD_DRV_LOG(DEBUG, "elink_serdes_deassert");
+
+ val = ELINK_SERDES_RESET_BITS << (port * 16);
+
+ /* Reset and unreset the SerDes/XGXS */
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+ DELAY(500);
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+ elink_set_serdes_access(sc, port);
+
+ REG_WR(sc, NIG_REG_SERDES0_CTRL_MD_DEVAD + port * 0x10,
+ ELINK_DEFAULT_PHY_DEV_ADDR);
+}
+
+static void elink_xgxs_specific_func(struct elink_phy *phy,
+ struct elink_params *params,
+ uint32_t action)
+{
+ struct bnx2x_softc *sc = params->sc;
+ switch (action) {
+ case ELINK_PHY_INIT:
+ /* Set correct devad */
+ REG_WR(sc, NIG_REG_XGXS0_CTRL_MD_ST + params->port * 0x18, 0);
+ REG_WR(sc, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port * 0x18,
+ phy->def_md_devad);
+ break;
+ }
+}
+
+static void elink_xgxs_deassert(struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port;
+ uint32_t val;
+ PMD_DRV_LOG(DEBUG, "elink_xgxs_deassert");
+ port = params->port;
+
+ val = ELINK_XGXS_RESET_BITS << (port * 16);
+
+ /* Reset and unreset the SerDes/XGXS */
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+ DELAY(500);
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+ elink_xgxs_specific_func(&params->phy[ELINK_INT_PHY], params,
+ ELINK_PHY_INIT);
+}
+
+static void elink_calc_ieee_aneg_adv(struct elink_phy *phy,
+ struct elink_params *params,
+ uint16_t * ieee_fc)
+{
+ *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
+ /* Resolve pause mode and advertisement Please refer to Table
+ * 28B-3 of the 802.3ab-1999 spec
+ */
+
+ switch (phy->req_flow_ctrl) {
+ case ELINK_FLOW_CTRL_AUTO:
+ switch (params->req_fc_auto_adv) {
+ case ELINK_FLOW_CTRL_BOTH:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ break;
+ case ELINK_FLOW_CTRL_RX:
+ case ELINK_FLOW_CTRL_TX:
+ *ieee_fc |=
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+ default:
+ break;
+ }
+ break;
+ case ELINK_FLOW_CTRL_TX:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+
+ case ELINK_FLOW_CTRL_RX:
+ case ELINK_FLOW_CTRL_BOTH:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ break;
+
+ case ELINK_FLOW_CTRL_NONE:
+ default:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+ break;
+ }
+ PMD_DRV_LOG(DEBUG, "ieee_fc = 0x%x", *ieee_fc);
+}
+
+static void set_phy_vars(struct elink_params *params, struct elink_vars *vars)
+{
+ uint8_t actual_phy_idx, phy_index, link_cfg_idx;
+ uint8_t phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+ for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ link_cfg_idx = ELINK_LINK_CONFIG_IDX(phy_index);
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == ELINK_EXT_PHY1)
+ actual_phy_idx = ELINK_EXT_PHY2;
+ else if (phy_index == ELINK_EXT_PHY2)
+ actual_phy_idx = ELINK_EXT_PHY1;
+ }
+ params->phy[actual_phy_idx].req_flow_ctrl =
+ params->req_flow_ctrl[link_cfg_idx];
+
+ params->phy[actual_phy_idx].req_line_speed =
+ params->req_line_speed[link_cfg_idx];
+
+ params->phy[actual_phy_idx].speed_cap_mask =
+ params->speed_cap_mask[link_cfg_idx];
+
+ params->phy[actual_phy_idx].req_duplex =
+ params->req_duplex[link_cfg_idx];
+
+ if (params->req_line_speed[link_cfg_idx] ==
+ ELINK_SPEED_AUTO_NEG)
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+
+ PMD_DRV_LOG(DEBUG, "req_flow_ctrl %x, req_line_speed %x,"
+ " speed_cap_mask %x",
+ params->phy[actual_phy_idx].req_flow_ctrl,
+ params->phy[actual_phy_idx].req_line_speed,
+ params->phy[actual_phy_idx].speed_cap_mask);
+ }
+}
+
+static void elink_ext_phy_set_pause(struct elink_params *params,
+ struct elink_phy *phy,
+ struct elink_vars *vars)
+{
+ uint16_t val;
+ struct bnx2x_softc *sc = params->sc;
+ /* Read modify write pause advertizing */
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
+
+ val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+ }
+ PMD_DRV_LOG(DEBUG, "Ext phy AN advertize 0x%x", val);
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
+}
+
+static void elink_pause_resolve(struct elink_vars *vars, uint32_t pause_result)
+{ /* LD LP */
+ switch (pause_result) { /* ASYM P ASYM P */
+ case 0xb: /* 1 0 1 1 */
+ vars->flow_ctrl = ELINK_FLOW_CTRL_TX;
+ break;
+
+ case 0xe: /* 1 1 1 0 */
+ vars->flow_ctrl = ELINK_FLOW_CTRL_RX;
+ break;
+
+ case 0x5: /* 0 1 0 1 */
+ case 0x7: /* 0 1 1 1 */
+ case 0xd: /* 1 1 0 1 */
+ case 0xf: /* 1 1 1 1 */
+ vars->flow_ctrl = ELINK_FLOW_CTRL_BOTH;
+ break;
+
+ default:
+ break;
+ }
+ if (pause_result & (1 << 0))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
+ if (pause_result & (1 << 1))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
+
+}
+
+static void elink_ext_phy_update_adv_fc(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ uint16_t ld_pause; /* local */
+ uint16_t lp_pause; /* link partner */
+ uint16_t pause_result;
+ struct bnx2x_softc *sc = params->sc;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE) {
+ elink_cl22_read(sc, phy, 0x4, &ld_pause);
+ elink_cl22_read(sc, phy, 0x5, &lp_pause);
+ } else if (CHIP_IS_E3(sc) && ELINK_SINGLE_MEDIA_DIRECT(params)) {
+ uint8_t lane = elink_get_warpcore_lane(params);
+ uint16_t gp_status, gp_mask;
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4,
+ &gp_status);
+ gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL |
+ MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) <<
+ lane;
+ if ((gp_status & gp_mask) == gp_mask) {
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ } else {
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+ ld_pause = ((ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+ << 3);
+ lp_pause = ((lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+ << 3);
+ }
+ } else {
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ }
+ pause_result = (ld_pause & MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause & MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+ PMD_DRV_LOG(DEBUG, "Ext PHY pause result 0x%x", pause_result);
+ elink_pause_resolve(vars, pause_result);
+
+}
+
+static uint8_t elink_ext_phy_resolve_fc(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ uint8_t ret = 0;
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+ if (phy->req_flow_ctrl != ELINK_FLOW_CTRL_AUTO) {
+ /* Update the advertised flow-controled of LD/LP in AN */
+ if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG)
+ elink_ext_phy_update_adv_fc(phy, params, vars);
+ /* But set the flow-control result as the requested one */
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ } else if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ ret = 1;
+ elink_ext_phy_update_adv_fc(phy, params, vars);
+ }
+ return ret;
+}
+
+/******************************************************************/
+/* Warpcore section */
+/******************************************************************/
+/* The init_internal_warpcore should mirror the xgxs,
+ * i.e. reset the lane (if needed), set aer for the
+ * init configuration, and set/clear SGMII flag. Internal
+ * phy init is done purely in phy_init stage.
+ */
+#define WC_TX_DRIVER(post2, idriver, ipre) \
+ ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
+ (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
+ (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))
+
+#define WC_TX_FIR(post, main, pre) \
+ ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
+ (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
+ (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
+
+static void elink_warpcore_enable_AN_KR2(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t i;
+ static struct elink_reg_set reg_set[] = {
+ /* Step 1 - Program the TX/RX alignment markers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537},
+ /* Step 2 - Configure the NP registers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620}
+ };
+ PMD_DRV_LOG(DEBUG, "Enabling 20G-KR2");
+
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL49_USERB0_CTRL, (3 << 6));
+
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+
+ /* Start KR2 work-around timer which handles BNX2X8073 link-parner */
+ vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+ elink_update_link_attr(params, vars->link_attr_sync);
+}
+
+static void elink_disable_kr2(struct elink_params *params,
+ struct elink_vars *vars, struct elink_phy *phy)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t i;
+ static struct elink_reg_set reg_set[] = {
+ /* Step 1 - Program the TX/RX alignment markers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
+ };
+ PMD_DRV_LOG(DEBUG, "Disabling 20G-KR2");
+
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+ vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+ elink_update_link_attr(params, vars->link_attr_sync);
+
+ vars->check_kr2_recovery_cnt = ELINK_CHECK_KR2_RECOVERY_CNT;
+}
+
+static void elink_warpcore_set_lpi_passthrough(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ PMD_DRV_LOG(DEBUG, "Configure WC for LPI pass through");
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c);
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
+}
+
+static void elink_warpcore_restart_AN_KR(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ /* Restart autoneg on the leading lane only */
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t lane = elink_get_warpcore_lane(params);
+ CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, lane);
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
+
+ /* Restore AER */
+ elink_set_aer_mmd(params, phy);
+}
+
+static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ uint16_t lane, i, cl72_ctrl, an_adv = 0;
+ struct bnx2x_softc *sc = params->sc;
+ static struct elink_reg_set reg_set[] = {
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
+ /* Disable Autoneg: re-enable it after adv is done. */
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0},
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0},
+ };
+ PMD_DRV_LOG(DEBUG, "Enable Auto Negotiation for KR");
+ /* Set to default registers that may be overridden by 10G force */
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
+ cl72_ctrl &= 0x08ff;
+ cl72_ctrl |= 0x3800;
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
+
+ /* Check adding advertisement for 1G KX */
+ if (((vars->line_speed == ELINK_SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (vars->line_speed == ELINK_SPEED_1000)) {
+ uint16_t addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
+ an_adv |= (1 << 5);
+
+ /* Enable CL37 1G Parallel Detect */
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, addr, 0x1);
+ PMD_DRV_LOG(DEBUG, "Advertize 1G");
+ }
+ if (((vars->line_speed == ELINK_SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+ (vars->line_speed == ELINK_SPEED_10000)) {
+ /* Check adding advertisement for 10G KR */
+ an_adv |= (1 << 7);
+ /* Enable 10G Parallel Detect */
+ CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+ elink_set_aer_mmd(params, phy);
+ PMD_DRV_LOG(DEBUG, "Advertize 10G");
+ }
+
+ /* Set Transmit PMD settings */
+ lane = elink_get_warpcore_lane(params);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane,
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
+ /* Configure the next lane if dual mode */
+ if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE)
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * (lane + 1),
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, 0x03f0);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL, 0x03f0);
+
+ /* Advertised speeds */
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv);
+
+ /* Advertised and set FEC (Forward Error Correction) */
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
+ (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
+
+ /* Enable CL37 BAM */
+ if (REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].
+ default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
+ 1);
+ PMD_DRV_LOG(DEBUG, "Enable CL37 BAM on KR");
+ }
+
+ /* Advertise pause */
+ elink_ext_phy_set_pause(params, phy, vars);
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
+
+ /* Over 1G - AN local device user page 1 */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
+
+ if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
+ (phy->req_line_speed == ELINK_SPEED_20000)) {
+
+ CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, lane);
+
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX1_PCI_CTRL +
+ (0x10 * lane), (1 << 11));
+
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7);
+ elink_set_aer_mmd(params, phy);
+
+ elink_warpcore_enable_AN_KR2(phy, params, vars);
+ } else {
+ elink_disable_kr2(params, vars, phy);
+ }
+
+ /* Enable Autoneg: only on the main lane */
+ elink_warpcore_restart_AN_KR(phy, params);
+}
+
+static void elink_warpcore_set_10G_KR(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val16, i, lane;
+ static struct elink_reg_set reg_set[] = {
+ /* Disable Autoneg */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x3f00},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
+ /* Leave cl72 training enable, needed for KR */
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
+ };
+
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+
+ lane = elink_get_warpcore_lane(params);
+ /* Global registers */
+ CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Disable CL36 PCS Tx */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
+ val16 &= ~(0x0011 << lane);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
+
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
+ val16 |= (0x0303 << (lane << 1));
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
+ /* Restore AER */
+ elink_set_aer_mmd(params, phy);
+ /* Set speed via PMA/PMD register */
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
+
+ /* Enable encoded forced speed */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
+
+ /* Turn TX scramble payload only the 64/66 scrambler */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX66_CONTROL, 0x9);
+
+ /* Turn RX scramble payload only the 64/66 scrambler */
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, 0xF9);
+
+ /* Set and clear loopback to cause a reset to 64/66 decoder */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+}
+
+static void elink_warpcore_set_10G_XFI(struct elink_phy *phy,
+ struct elink_params *params,
+ uint8_t is_xfi)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t misc1_val, tap_val, tx_driver_val, lane, val;
+ uint32_t cfg_tap_val, tx_drv_brdct, tx_equal;
+
+ /* Hold rxSeqStart */
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
+
+ /* Hold tx_fifo_reset */
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1);
+
+ /* Disable CL73 AN */
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+
+ /* Disable 100FX Enable and Auto-Detect */
+ elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL1, 0xFFFA);
+
+ /* Disable 100FX Idle detect */
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, 0x0080);
+
+ /* Set Block address to Remote PHY & Clear forced_speed[5] */
+ elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F);
+
+ /* Turn off auto-detect & fiber mode */
+ elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ 0xFFEE);
+
+ /* Set filter_force_link, disable_false_link and parallel_detect */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ ((val | 0x0006) & 0xFFFE));
+
+ /* Set XFI / SFI */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val);
+
+ misc1_val &= ~(0x1f);
+
+ if (is_xfi) {
+ misc1_val |= 0x5;
+ tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
+ tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03);
+ } else {
+ cfg_tap_val = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->
+ port].sfi_tap_values));
+
+ tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
+
+ tx_drv_brdct = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
+ PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
+
+ misc1_val |= 0x9;
+
+ /* TAP values are controlled by nvram, if value there isn't 0 */
+ if (tx_equal)
+ tap_val = (uint16_t) tx_equal;
+ else
+ tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
+
+ if (tx_drv_brdct)
+ tx_driver_val =
+ WC_TX_DRIVER(0x03, (uint16_t) tx_drv_brdct, 0x06);
+ else
+ tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06);
+ }
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
+
+ /* Set Transmit PMD settings */
+ lane = elink_get_warpcore_lane(params);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP,
+ tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane,
+ tx_driver_val);
+
+ /* Enable fiber mode, enable and invert sig_det */
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd);
+
+ /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
+
+ elink_warpcore_set_lpi_passthrough(phy, params);
+
+ /* 10G XFI Full Duplex */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
+
+ /* Release tx_fifo_reset */
+ elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+ 0xFFFE);
+ /* Release rxSeqStart */
+ elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF);
+}
+
+static void elink_warpcore_set_20G_force_KR2(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ uint16_t val;
+ struct bnx2x_softc *sc = params->sc;
+ /* Set global registers, so set AER lane to 0 */
+ CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+
+ /* Disable sequencer */
+ elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1 << 13));
+
+ elink_set_aer_mmd(params, phy);
+
+ elink_cl45_read_and_write(sc, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_PMD_KR_CONTROL, ~(1 << 1));
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+ /* Turn off CL73 */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL73_USERB0_CTRL, &val);
+ val &= ~(1 << 5);
+ val |= (1 << 6);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL73_USERB0_CTRL, val);
+
+ /* Set 20G KR2 force speed */
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f);
+
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, (1 << 7));
+
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val);
+ val &= ~(3 << 14);
+ val |= (1 << 15);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A);
+
+ /* Enable sequencer (over lane 0) */
+ CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1 << 13));
+
+ elink_set_aer_mmd(params, phy);
+}
+
+static void elink_warpcore_set_20G_DXGXS(struct bnx2x_softc *sc,
+ struct elink_phy *phy, uint16_t lane)
+{
+ /* Rx0 anaRxControl1G */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90);
+
+ /* Rx2 anaRxControl1G */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90);
+
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW0, 0xE070);
+
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW1, 0xC0D0);
+
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW2, 0xA0B0);
+
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW3, 0x8090);
+
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
+
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0);
+
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0);
+
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0);
+
+ /* Serdes Digital Misc1 */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008);
+
+ /* Serdes Digital4 Misc3 */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8088);
+
+ /* Set Transmit PMD settings */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP,
+ (WC_TX_FIR(0x12, 0x2d, 0x00) |
+ MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane,
+ WC_TX_DRIVER(0x02, 0x02, 0x02));
+}
+
+static void elink_warpcore_set_sgmii_speed(struct elink_phy *phy,
+ struct elink_params *params,
+ uint8_t fiber_mode,
+ uint8_t always_autoneg)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val16, digctrl_kx1, digctrl_kx2;
+
+ /* Clear XFI clock comp in non-10G single lane mode. */
+ elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, ~(3 << 13));
+
+ elink_warpcore_set_lpi_passthrough(phy, params);
+
+ if (always_autoneg || phy->req_line_speed == ELINK_SPEED_AUTO_NEG) {
+ /* SGMII Autoneg */
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ 0x1000);
+ PMD_DRV_LOG(DEBUG, "set SGMII AUTONEG");
+ } else {
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ val16 &= 0xcebf;
+ switch (phy->req_line_speed) {
+ case ELINK_SPEED_10:
+ break;
+ case ELINK_SPEED_100:
+ val16 |= 0x2000;
+ break;
+ case ELINK_SPEED_1000:
+ val16 |= 0x0040;
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG,
+ "Speed not supported: 0x%x",
+ phy->req_line_speed);
+ return;
+ }
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ val16 |= 0x0100;
+
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
+
+ PMD_DRV_LOG(DEBUG, "set SGMII force speed %d",
+ phy->req_line_speed);
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ PMD_DRV_LOG(DEBUG, " (readback) %x", val16);
+ }
+
+ /* SGMII Slave mode and disable signal detect */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1);
+ if (fiber_mode)
+ digctrl_kx1 = 1;
+ else
+ digctrl_kx1 &= 0xff4a;
+
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, digctrl_kx1);
+
+ /* Turn off parallel detect */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (digctrl_kx2 & ~(1 << 2)));
+
+ /* Re-enable parallel detect */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (digctrl_kx2 | (1 << 2)));
+
+ /* Enable autodet */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ (digctrl_kx1 | 0x10));
+}
+
+static void elink_warpcore_reset_lane(struct bnx2x_softc *sc,
+ struct elink_phy *phy, uint8_t reset)
+{
+ uint16_t val;
+ /* Take lane out of reset after configuration is finished */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, &val);
+ if (reset)
+ val |= 0xC000;
+ else
+ val &= 0x3FFF;
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, val);
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, &val);
+}
+
+/* Clear SFI/XFI link settings registers */
+static void elink_warpcore_clear_regs(struct elink_phy *phy,
+ struct elink_params *params,
+ uint16_t lane)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t i;
+ static struct elink_reg_set wc_regs[] = {
+ {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ 0x0195},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ 0x0007},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+ 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140}
+ };
+ /* Set XFI clock comp as default. */
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, (3 << 13));
+
+ for (i = 0; i < ARRAY_SIZE(wc_regs); i++)
+ elink_cl45_write(sc, phy, wc_regs[i].devad, wc_regs[i].reg,
+ wc_regs[i].val);
+
+ lane = elink_get_warpcore_lane(params);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane, 0x0990);
+
+}
+
+static elink_status_t elink_get_mod_abs_int_cfg(struct bnx2x_softc *sc,
+ uint32_t shmem_base,
+ uint8_t port,
+ uint8_t * gpio_num,
+ uint8_t * gpio_port)
+{
+ uint32_t cfg_pin;
+ *gpio_num = 0;
+ *gpio_port = 0;
+ if (CHIP_IS_E3(sc)) {
+ cfg_pin = (REG_RD(sc, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].
+ e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_MOD_ABS_MASK) >>
+ PORT_HW_CFG_E3_MOD_ABS_SHIFT;
+
+ /* Should not happen. This function called upon interrupt
+ * triggered by GPIO ( since EPIO can only generate interrupts
+ * to MCP).
+ * So if this function was called and none of the GPIOs was set,
+ * it means the shit hit the fan.
+ */
+ if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
+ (cfg_pin > PIN_CFG_GPIO3_P1)) {
+ PMD_DRV_LOG(DEBUG,
+ "No cfg pin %x for module detect indication",
+ cfg_pin);
+ return ELINK_STATUS_ERROR;
+ }
+
+ *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3;
+ *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2;
+ } else {
+ *gpio_num = MISC_REGISTERS_GPIO_3;
+ *gpio_port = port;
+ }
+
+ return ELINK_STATUS_OK;
+}
+
+static int elink_is_sfp_module_plugged(struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t gpio_num, gpio_port;
+ uint32_t gpio_val;
+ if (elink_get_mod_abs_int_cfg(sc,
+ params->shmem_base, params->port,
+ &gpio_num, &gpio_port) != ELINK_STATUS_OK)
+ return 0;
+ gpio_val = elink_cb_gpio_read(sc, gpio_num, gpio_port);
+
+ /* Call the handling function in case module is detected */
+ if (gpio_val == 0)
+ return 1;
+ else
+ return 0;
+}
+
+static int elink_warpcore_get_sigdet(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ uint16_t gp2_status_reg0, lane;
+ struct bnx2x_softc *sc = params->sc;
+
+ lane = elink_get_warpcore_lane(params);
+
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0,
+ &gp2_status_reg0);
+
+ return (gp2_status_reg0 >> (8 + lane)) & 0x1;
+}
+
+static void elink_warpcore_config_runtime(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t serdes_net_if;
+ uint16_t gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
+
+ vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
+
+ if (!vars->turn_to_run_wc_rt)
+ return;
+
+ if (vars->rx_tx_asic_rst) {
+ uint16_t lane = elink_get_warpcore_lane(params);
+ serdes_net_if = (REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config
+ [params->port].
+ default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
+
+ switch (serdes_net_if) {
+ case PORT_HW_CFG_NET_SERDES_IF_KR:
+ /* Do we get link yet? */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 0x81d1,
+ &gp_status1);
+ lnkup = (gp_status1 >> (8 + lane)) & 0x1; /* 1G */
+ /*10G KR */
+ lnkup_kr = (gp_status1 >> (12 + lane)) & 0x1;
+
+ if (lnkup_kr || lnkup) {
+ vars->rx_tx_asic_rst = 0;
+ } else {
+ /* Reset the lane to see if link comes up. */
+ elink_warpcore_reset_lane(sc, phy, 1);
+ elink_warpcore_reset_lane(sc, phy, 0);
+
+ /* Restart Autoneg */
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL,
+ 0x1200);
+
+ vars->rx_tx_asic_rst--;
+ PMD_DRV_LOG(DEBUG, "0x%x retry left",
+ vars->rx_tx_asic_rst);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ }
+ /*params->rx_tx_asic_rst */
+}
+
+static void elink_warpcore_config_sfi(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ uint16_t lane = elink_get_warpcore_lane(params);
+
+ elink_warpcore_clear_regs(phy, params, lane);
+ if ((params->req_line_speed[ELINK_LINK_CONFIG_IDX(ELINK_INT_PHY)] ==
+ ELINK_SPEED_10000) &&
+ (phy->media_type != ELINK_ETH_PHY_SFP_1G_FIBER)) {
+ PMD_DRV_LOG(DEBUG, "Setting 10G SFI");
+ elink_warpcore_set_10G_XFI(phy, params, 0);
+ } else {
+ PMD_DRV_LOG(DEBUG, "Setting 1G Fiber");
+ elink_warpcore_set_sgmii_speed(phy, params, 1, 0);
+ }
+}
+
+static void elink_sfp_e3_set_transmitter(struct elink_params *params,
+ struct elink_phy *phy, uint8_t tx_en)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t cfg_pin;
+ uint8_t port = params->port;
+
+ cfg_pin = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_TX_LASER_MASK;
+ /* Set the !tx_en since this pin is DISABLE_TX_LASER */
+ PMD_DRV_LOG(DEBUG, "Setting WC TX to %d", tx_en);
+
+ /* For 20G, the expected pin to be used is 3 pins after the current */
+ elink_set_cfg_pin(sc, cfg_pin, tx_en ^ 1);
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+ elink_set_cfg_pin(sc, cfg_pin + 3, tx_en ^ 1);
+}
+
+static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t serdes_net_if;
+ uint8_t fiber_mode;
+ uint16_t lane = elink_get_warpcore_lane(params);
+ serdes_net_if = (REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].
+ default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
+ PMD_DRV_LOG(DEBUG,
+ "Begin Warpcore init, link_speed %d, "
+ "serdes_net_if = 0x%x", vars->line_speed, serdes_net_if);
+ elink_set_aer_mmd(params, phy);
+ elink_warpcore_reset_lane(sc, phy, 1);
+ vars->phy_flags |= PHY_XGXS_FLAG;
+ if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
+ (phy->req_line_speed &&
+ ((phy->req_line_speed == ELINK_SPEED_100) ||
+ (phy->req_line_speed == ELINK_SPEED_10)))) {
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ PMD_DRV_LOG(DEBUG, "Setting SGMII mode");
+ elink_warpcore_clear_regs(phy, params, lane);
+ elink_warpcore_set_sgmii_speed(phy, params, 0, 1);
+ } else {
+ switch (serdes_net_if) {
+ case PORT_HW_CFG_NET_SERDES_IF_KR:
+ /* Enable KR Auto Neg */
+ if (params->loopback_mode != ELINK_LOOPBACK_EXT)
+ elink_warpcore_enable_AN_KR(phy, params, vars);
+ else {
+ PMD_DRV_LOG(DEBUG, "Setting KR 10G-Force");
+ elink_warpcore_set_10G_KR(phy, params);
+ }
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_XFI:
+ elink_warpcore_clear_regs(phy, params, lane);
+ if (vars->line_speed == ELINK_SPEED_10000) {
+ PMD_DRV_LOG(DEBUG, "Setting 10G XFI");
+ elink_warpcore_set_10G_XFI(phy, params, 1);
+ } else {
+ if (ELINK_SINGLE_MEDIA_DIRECT(params)) {
+ PMD_DRV_LOG(DEBUG, "1G Fiber");
+ fiber_mode = 1;
+ } else {
+ PMD_DRV_LOG(DEBUG, "10/100/1G SGMII");
+ fiber_mode = 0;
+ }
+ elink_warpcore_set_sgmii_speed(phy,
+ params,
+ fiber_mode, 0);
+ }
+
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_SFI:
+ /* Issue Module detection if module is plugged, or
+ * enabled transmitter to avoid current leakage in case
+ * no module is connected
+ */
+ if ((params->loopback_mode == ELINK_LOOPBACK_NONE) ||
+ (params->loopback_mode == ELINK_LOOPBACK_EXT)) {
+ if (elink_is_sfp_module_plugged(params))
+ elink_sfp_module_detection(phy, params);
+ else
+ elink_sfp_e3_set_transmitter(params,
+ phy, 1);
+ }
+
+ elink_warpcore_config_sfi(phy, params);
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+ if (vars->line_speed != ELINK_SPEED_20000) {
+ PMD_DRV_LOG(DEBUG, "Speed not supported yet");
+ return 0;
+ }
+ PMD_DRV_LOG(DEBUG, "Setting 20G DXGXS");
+ elink_warpcore_set_20G_DXGXS(sc, phy, lane);
+ /* Issue Module detection */
+
+ elink_sfp_module_detection(phy, params);
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_KR2:
+ if (!params->loopback_mode) {
+ elink_warpcore_enable_AN_KR(phy, params, vars);
+ } else {
+ PMD_DRV_LOG(DEBUG, "Setting KR 20G-Force");
+ elink_warpcore_set_20G_force_KR2(phy, params);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG,
+ "Unsupported Serdes Net Interface 0x%x",
+ serdes_net_if);
+ return 0;
+ }
+ }
+
+ /* Take lane out of reset after configuration is finished */
+ elink_warpcore_reset_lane(sc, phy, 0);
+ PMD_DRV_LOG(DEBUG, "Exit config init");
+
+ return 0;
+}
+
+static void elink_warpcore_link_reset(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val16, lane;
+ elink_sfp_e3_set_transmitter(params, phy, 0);
+ elink_set_mdio_emac_per_phy(sc, params);
+ elink_set_aer_mmd(params, phy);
+ /* Global register */
+ elink_warpcore_reset_lane(sc, phy, 1);
+
+ /* Clear loopback settings (if any) */
+ /* 10G & 20G */
+ elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF);
+
+ elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe);
+
+ /* Update those 1-copy registers */
+ CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Enable 1G MDIO (1-copy) */
+ elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~0x10);
+
+ elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00);
+ lane = elink_get_warpcore_lane(params);
+ /* Disable CL36 PCS Tx */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
+ val16 |= (0x11 << lane);
+ if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE)
+ val16 |= (0x22 << lane);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
+
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
+ val16 &= ~(0x0303 << (lane << 1));
+ val16 |= (0x0101 << (lane << 1));
+ if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE) {
+ val16 &= ~(0x0c0c << (lane << 1));
+ val16 |= (0x0404 << (lane << 1));
+ }
+
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
+ /* Restore AER */
+ elink_set_aer_mmd(params, phy);
+
+}
+
+static void elink_set_warpcore_loopback(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val16;
+ uint32_t lane;
+ PMD_DRV_LOG(DEBUG, "Setting Warpcore loopback type %x, speed %d",
+ params->loopback_mode, phy->req_line_speed);
+
+ if (phy->req_line_speed < ELINK_SPEED_10000 ||
+ phy->supported & ELINK_SUPPORTED_20000baseKR2_Full) {
+ /* 10/100/1000/20G-KR2 */
+
+ /* Update those 1-copy registers */
+ CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Enable 1G MDIO (1-copy) */
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ 0x10);
+ /* Set 1G loopback based on lane (1-copy) */
+ lane = elink_get_warpcore_lane(params);
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+ val16 |= (1 << lane);
+ if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE)
+ val16 |= (2 << lane);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2, val16);
+
+ /* Switch back to 4-copy registers */
+ elink_set_aer_mmd(params, phy);
+ } else {
+ /* 10G / 20G-DXGXS */
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ 0x4000);
+ elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
+ }
+}
+
+static void elink_sync_link(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t link_10g_plus;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+ vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
+ if (vars->link_up) {
+ PMD_DRV_LOG(DEBUG, "phy link up");
+
+ vars->phy_link_up = 1;
+ vars->duplex = DUPLEX_FULL;
+ switch (vars->link_status & LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+ case ELINK_LINK_10THD:
+ vars->duplex = DUPLEX_HALF;
+ /* Fall through */
+ case ELINK_LINK_10TFD:
+ vars->line_speed = ELINK_SPEED_10;
+ break;
+
+ case ELINK_LINK_100TXHD:
+ vars->duplex = DUPLEX_HALF;
+ /* Fall through */
+ case ELINK_LINK_100T4:
+ case ELINK_LINK_100TXFD:
+ vars->line_speed = ELINK_SPEED_100;
+ break;
+
+ case ELINK_LINK_1000THD:
+ vars->duplex = DUPLEX_HALF;
+ /* Fall through */
+ case ELINK_LINK_1000TFD:
+ vars->line_speed = ELINK_SPEED_1000;
+ break;
+
+ case ELINK_LINK_2500THD:
+ vars->duplex = DUPLEX_HALF;
+ /* Fall through */
+ case ELINK_LINK_2500TFD:
+ vars->line_speed = ELINK_SPEED_2500;
+ break;
+
+ case ELINK_LINK_10GTFD:
+ vars->line_speed = ELINK_SPEED_10000;
+ break;
+ case ELINK_LINK_20GTFD:
+ vars->line_speed = ELINK_SPEED_20000;
+ break;
+ default:
+ break;
+ }
+ vars->flow_ctrl = 0;
+ if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
+ vars->flow_ctrl |= ELINK_FLOW_CTRL_TX;
+
+ if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
+ vars->flow_ctrl |= ELINK_FLOW_CTRL_RX;
+
+ if (!vars->flow_ctrl)
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+
+ if (vars->line_speed &&
+ ((vars->line_speed == ELINK_SPEED_10) ||
+ (vars->line_speed == ELINK_SPEED_100))) {
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ } else {
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+ }
+ if (vars->line_speed &&
+ USES_WARPCORE(sc) && (vars->line_speed == ELINK_SPEED_1000))
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ /* Anything 10 and over uses the bmac */
+ link_10g_plus = (vars->line_speed >= ELINK_SPEED_10000);
+
+ if (link_10g_plus) {
+ if (USES_WARPCORE(sc))
+ vars->mac_type = ELINK_MAC_TYPE_XMAC;
+ else
+ vars->mac_type = ELINK_MAC_TYPE_BMAC;
+ } else {
+ if (USES_WARPCORE(sc))
+ vars->mac_type = ELINK_MAC_TYPE_UMAC;
+ else
+ vars->mac_type = ELINK_MAC_TYPE_EMAC;
+ }
+ } else { /* Link down */
+ PMD_DRV_LOG(DEBUG, "phy link down");
+
+ vars->phy_link_up = 0;
+
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+
+ /* Indicate no mac active */
+ vars->mac_type = ELINK_MAC_TYPE_NONE;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ if (vars->link_status & LINK_STATUS_SFP_TX_FAULT)
+ vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG;
+ }
+}
+
+void elink_link_status_update(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port = params->port;
+ uint32_t sync_offset, media_types;
+ /* Update PHY configuration */
+ set_phy_vars(params, vars);
+
+ vars->link_status = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
+
+ /* Force link UP in non LOOPBACK_EXT loopback mode(s) */
+ if (params->loopback_mode != ELINK_LOOPBACK_NONE &&
+ params->loopback_mode != ELINK_LOOPBACK_EXT)
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
+ if (elink_eee_has_cap(params))
+ vars->eee_status = REG_RD(sc, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]));
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+ elink_sync_link(params, vars);
+ /* Sync media type */
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].media_type);
+ media_types = REG_RD(sc, sync_offset);
+
+ params->phy[ELINK_INT_PHY].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
+ params->phy[ELINK_EXT_PHY1].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
+ params->phy[ELINK_EXT_PHY2].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
+ PMD_DRV_LOG(DEBUG, "media_types = 0x%x", media_types);
+
+ /* Sync AEU offset */
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].aeu_int_mask);
+
+ vars->aeu_int_mask = REG_RD(sc, sync_offset);
+
+ /* Sync PFC status */
+ if (vars->link_status & LINK_STATUS_PFC_ENABLED)
+ params->feature_config_flags |=
+ ELINK_FEATURE_CONFIG_PFC_ENABLED;
+ else
+ params->feature_config_flags &=
+ ~ELINK_FEATURE_CONFIG_PFC_ENABLED;
+
+ if (SHMEM2_HAS(sc, link_attr_sync))
+ vars->link_attr_sync = SHMEM2_RD(sc,
+ link_attr_sync[params->port]);
+
+ PMD_DRV_LOG(DEBUG, "link_status 0x%x phy_link_up %x int_mask 0x%x",
+ vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
+ PMD_DRV_LOG(DEBUG, "line_speed %x duplex %x flow_ctrl 0x%x",
+ vars->line_speed, vars->duplex, vars->flow_ctrl);
+}
+
+static void elink_set_master_ln(struct elink_params *params,
+ struct elink_phy *phy)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t new_master_ln, ser_lane;
+ ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ /* Set the master_ln for AN */
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE, &new_master_ln);
+
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ (new_master_ln | ser_lane));
+}
+
+static elink_status_t elink_reset_unicore(struct elink_params *params,
+ struct elink_phy *phy,
+ uint8_t set_serdes)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t mii_control;
+ uint16_t i;
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+
+ /* Reset the unicore */
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+ if (set_serdes)
+ elink_set_serdes_access(sc, params->port);
+
+ /* Wait for the reset to self clear */
+ for (i = 0; i < ELINK_MDIO_ACCESS_TIMEOUT; i++) {
+ DELAY(5);
+
+ /* The reset erased the previous bank value */
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+
+ if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
+ DELAY(5);
+ return ELINK_STATUS_OK;
+ }
+ }
+
+ elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, params->port); // "Warning: PHY was not initialized,"
+ // " Port %d",
+
+ PMD_DRV_LOG(DEBUG, "BUG! XGXS is still in reset!");
+ return ELINK_STATUS_ERROR;
+
+}
+
+static void elink_set_swap_lanes(struct elink_params *params,
+ struct elink_phy *phy)
+{
+ struct bnx2x_softc *sc = params->sc;
+ /* Each two bits represents a lane number:
+ * No swap is 0123 => 0x1b no need to enable the swap
+ */
+ uint16_t rx_lane_swap, tx_lane_swap;
+
+ rx_lane_swap = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+ tx_lane_swap = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+
+ if (rx_lane_swap != 0x1b) {
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+ (rx_lane_swap |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+ } else {
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+ }
+
+ if (tx_lane_swap != 0x1b) {
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+ (tx_lane_swap |
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+ } else {
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+ }
+}
+
+static void elink_set_parallel_detection(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t control2;
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, &control2);
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+ else
+ control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+ PMD_DRV_LOG(DEBUG, "phy->speed_cap_mask = 0x%x, control2 = 0x%x",
+ phy->speed_cap_mask, control2);
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, control2);
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ PMD_DRV_LOG(DEBUG, "XGXS");
+
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ &control2);
+
+ control2 |=
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
+
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ control2);
+
+ /* Disable parallel detection of HiG */
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+ }
+}
+
+static void elink_set_autoneg(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars, uint8_t enable_cl73)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t reg_val;
+
+ /* CL37 Autoneg */
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+
+ /* CL37 Autoneg Enabled */
+ if (vars->line_speed == ELINK_SPEED_AUTO_NEG)
+ reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
+ else /* CL37 Autoneg Disabled */
+ reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
+
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+ /* Enable/Disable Autodetection */
+
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+ reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
+ reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
+ if (vars->line_speed == ELINK_SPEED_AUTO_NEG)
+ reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+ else
+ reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+
+ /* Enable TetonII and BAM autoneg */
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, &reg_val);
+ if (vars->line_speed == ELINK_SPEED_AUTO_NEG) {
+ /* Enable BAM aneg Mode and TetonII aneg Mode */
+ reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+ } else {
+ /* TetonII and BAM Autoneg Disabled */
+ reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+ }
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, reg_val);
+
+ if (enable_cl73) {
+ /* Enable Cl73 FSM status bits */
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_UCTRL, 0xe);
+
+ /* Enable BAM Station Manager */
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1,
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN
+ |
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
+
+ /* Advertise CL73 link speeds */
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
+
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
+
+ /* CL73 Autoneg Enabled */
+ reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
+
+ } else /* CL73 Autoneg Disabled */
+ reg_val = 0;
+
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+}
+
+/* Program SerDes, forced speed */
+static void elink_program_serdes(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t reg_val;
+
+ /* Program duplex, disable autoneg and sgmii */
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+ reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
+ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
+ if (phy->req_duplex == DUPLEX_FULL)
+ reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+ /* Program speed
+ * - needed only if the speed is greater than 1G (2.5G or 10G)
+ */
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+ /* Clearing the speed value before setting the right speed */
+ PMD_DRV_LOG(DEBUG, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x", reg_val);
+
+ reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+
+ if (!((vars->line_speed == ELINK_SPEED_1000) ||
+ (vars->line_speed == ELINK_SPEED_100) ||
+ (vars->line_speed == ELINK_SPEED_10))) {
+
+ reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+ if (vars->line_speed == ELINK_SPEED_10000)
+ reg_val |=
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
+ }
+
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, reg_val);
+
+}
+
+static void elink_set_brcm_cl37_advertisement(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val = 0;
+
+ /* Set extended capabilities */
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
+ val |= MDIO_OVER_1G_UP1_2_5G;
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ val |= MDIO_OVER_1G_UP1_10G;
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_OVER_1G, MDIO_OVER_1G_UP1, val);
+
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_OVER_1G, MDIO_OVER_1G_UP3, 0x400);
+}
+
+static void elink_set_ieee_aneg_advertisement(struct elink_phy *phy,
+ struct elink_params *params,
+ uint16_t ieee_fc)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val;
+ /* For AN, we are always publishing full duplex */
+
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, &val);
+ val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
+ val |= ((ieee_fc << 3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, val);
+}
+
+static void elink_restart_autoneg(struct elink_phy *phy,
+ struct elink_params *params,
+ uint8_t enable_cl73)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t mii_control;
+
+ PMD_DRV_LOG(DEBUG, "elink_restart_autoneg");
+ /* Enable and restart BAM/CL37 aneg */
+
+ if (enable_cl73) {
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ &mii_control);
+
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ (mii_control |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+ } else {
+
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+ PMD_DRV_LOG(DEBUG,
+ "elink_restart_autoneg mii_control before = 0x%x",
+ mii_control);
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+ }
+}
+
+static void elink_initialize_sgmii_process(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t control1;
+
+ /* In SGMII mode, the unicore is always slave */
+
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &control1);
+ control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
+ /* Set sgmii mode (and not fiber) */
+ control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, control1);
+
+ /* If forced speed */
+ if (!(vars->line_speed == ELINK_SPEED_AUTO_NEG)) {
+ /* Set speed, disable autoneg */
+ uint16_t mii_control;
+
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+ mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
+ MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
+
+ switch (vars->line_speed) {
+ case ELINK_SPEED_100:
+ mii_control |=
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
+ break;
+ case ELINK_SPEED_1000:
+ mii_control |=
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
+ break;
+ case ELINK_SPEED_10:
+ /* There is nothing to set for 10M */
+ break;
+ default:
+ /* Invalid speed for SGMII */
+ PMD_DRV_LOG(DEBUG, "Invalid line_speed 0x%x",
+ vars->line_speed);
+ break;
+ }
+
+ /* Setting the full duplex */
+ if (phy->req_duplex == DUPLEX_FULL)
+ mii_control |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, mii_control);
+
+ } else { /* AN mode */
+ /* Enable and restart AN */
+ elink_restart_autoneg(phy, params, 0);
+ }
+}
+
+/* Link management
+ */
+static elink_status_t elink_direct_parallel_detect_used(struct elink_phy *phy,
+ struct elink_params
+ *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t pd_10g, status2_1000x;
+ if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG)
+ return ELINK_STATUS_OK;
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2, &status2_1000x);
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2, &status2_1000x);
+ if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
+ PMD_DRV_LOG(DEBUG, "1G parallel detect link on port %d",
+ params->port);
+ return ELINK_STATUS_ERROR;
+ }
+
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, &pd_10g);
+
+ if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
+ PMD_DRV_LOG(DEBUG, "10G parallel detect link on port %d",
+ params->port);
+ return ELINK_STATUS_ERROR;
+ }
+ return ELINK_STATUS_OK;
+}
+
+static void elink_update_adv_fc(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars, uint32_t gp_status)
+{
+ uint16_t ld_pause; /* local driver */
+ uint16_t lp_pause; /* link partner */
+ uint16_t pause_result;
+ struct bnx2x_softc *sc = params->sc;
+ if ((gp_status &
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
+
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, &ld_pause);
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV1, &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10;
+ PMD_DRV_LOG(DEBUG, "pause_result CL73 0x%x", pause_result);
+ } else {
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV, &ld_pause);
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+ &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) >> 5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) >> 7;
+ PMD_DRV_LOG(DEBUG, "pause_result CL37 0x%x", pause_result);
+ }
+ elink_pause_resolve(vars, pause_result);
+
+}
+
+static void elink_flow_ctrl_resolve(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars, uint32_t gp_status)
+{
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+
+ /* Resolve from gp_status in case of AN complete and not sgmii */
+ if (phy->req_flow_ctrl != ELINK_FLOW_CTRL_AUTO) {
+ /* Update the advertised flow-controled of LD/LP in AN */
+ if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG)
+ elink_update_adv_fc(phy, params, vars, gp_status);
+ /* But set the flow-control result as the requested one */
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ } else if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if ((gp_status & ELINK_MDIO_AN_CL73_OR_37_COMPLETE) &&
+ (!(vars->phy_flags & PHY_SGMII_FLAG))) {
+ if (elink_direct_parallel_detect_used(phy, params)) {
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ return;
+ }
+ elink_update_adv_fc(phy, params, vars, gp_status);
+ }
+ PMD_DRV_LOG(DEBUG, "flow_ctrl 0x%x", vars->flow_ctrl);
+}
+
+static void elink_check_fallback_to_cl37(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t rx_status, ustat_val, cl37_fsm_received;
+ PMD_DRV_LOG(DEBUG, "elink_check_fallback_to_cl37");
+ /* Step 1: Make sure signal is detected */
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_RX0, MDIO_RX0_RX_STATUS, &rx_status);
+ if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
+ (MDIO_RX0_RX_STATUS_SIGDET)) {
+ PMD_DRV_LOG(DEBUG, "Signal is not detected. Restoring CL73."
+ "rx_status(0x80b0) = 0x%x", rx_status);
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+ return;
+ }
+ /* Step 2: Check CL73 state machine */
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_USTAT1, &ustat_val);
+ if ((ustat_val &
+ (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
+ MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
+ (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
+ MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
+ PMD_DRV_LOG(DEBUG, "CL73 state-machine is not stable. "
+ "ustat_val(0x8371) = 0x%x", ustat_val);
+ return;
+ }
+ /* Step 3: Check CL37 Message Pages received to indicate LP
+ * supports only CL37
+ */
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_REMOTE_PHY,
+ MDIO_REMOTE_PHY_MISC_RX_STATUS, &cl37_fsm_received);
+ if ((cl37_fsm_received &
+ (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
+ MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
+ (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
+ MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
+ PMD_DRV_LOG(DEBUG, "No CL37 FSM were received. "
+ "misc_rx_status(0x8330) = 0x%x", cl37_fsm_received);
+ return;
+ }
+ /* The combined cl37/cl73 fsm state information indicating that
+ * we are connected to a device which does not support cl73, but
+ * does support cl37 BAM. In this case we disable cl73 and
+ * restart cl37 auto-neg
+ */
+
+ /* Disable CL73 */
+ CL22_WR_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 0);
+ /* Restart CL37 autoneg */
+ elink_restart_autoneg(phy, params, 0);
+ PMD_DRV_LOG(DEBUG, "Disabling CL73, and restarting CL37 autoneg");
+}
+
+static void elink_xgxs_an_resolve(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars, uint32_t gp_status)
+{
+ if (gp_status & ELINK_MDIO_AN_CL73_OR_37_COMPLETE)
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+ if (elink_direct_parallel_detect_used(phy, params))
+ vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+
+static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
+ struct elink_params *params __rte_unused,
+ struct elink_vars *vars,
+ uint16_t is_link_up,
+ uint16_t speed_mask,
+ uint16_t is_duplex)
+{
+ if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG)
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+ if (is_link_up) {
+ PMD_DRV_LOG(DEBUG, "phy link up");
+
+ vars->phy_link_up = 1;
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
+ switch (speed_mask) {
+ case ELINK_GP_STATUS_10M:
+ vars->line_speed = ELINK_SPEED_10;
+ if (is_duplex == DUPLEX_FULL)
+ vars->link_status |= ELINK_LINK_10TFD;
+ else
+ vars->link_status |= ELINK_LINK_10THD;
+ break;
+
+ case ELINK_GP_STATUS_100M:
+ vars->line_speed = ELINK_SPEED_100;
+ if (is_duplex == DUPLEX_FULL)
+ vars->link_status |= ELINK_LINK_100TXFD;
+ else
+ vars->link_status |= ELINK_LINK_100TXHD;
+ break;
+
+ case ELINK_GP_STATUS_1G:
+ case ELINK_GP_STATUS_1G_KX:
+ vars->line_speed = ELINK_SPEED_1000;
+ if (is_duplex == DUPLEX_FULL)
+ vars->link_status |= ELINK_LINK_1000TFD;
+ else
+ vars->link_status |= ELINK_LINK_1000THD;
+ break;
+
+ case ELINK_GP_STATUS_2_5G:
+ vars->line_speed = ELINK_SPEED_2500;
+ if (is_duplex == DUPLEX_FULL)
+ vars->link_status |= ELINK_LINK_2500TFD;
+ else
+ vars->link_status |= ELINK_LINK_2500THD;
+ break;
+
+ case ELINK_GP_STATUS_5G:
+ case ELINK_GP_STATUS_6G:
+ PMD_DRV_LOG(DEBUG,
+ "link speed unsupported gp_status 0x%x",
+ speed_mask);
+ return ELINK_STATUS_ERROR;
+
+ case ELINK_GP_STATUS_10G_KX4:
+ case ELINK_GP_STATUS_10G_HIG:
+ case ELINK_GP_STATUS_10G_CX4:
+ case ELINK_GP_STATUS_10G_KR:
+ case ELINK_GP_STATUS_10G_SFI:
+ case ELINK_GP_STATUS_10G_XFI:
+ vars->line_speed = ELINK_SPEED_10000;
+ vars->link_status |= ELINK_LINK_10GTFD;
+ break;
+ case ELINK_GP_STATUS_20G_DXGXS:
+ case ELINK_GP_STATUS_20G_KR2:
+ vars->line_speed = ELINK_SPEED_20000;
+ vars->link_status |= ELINK_LINK_20GTFD;
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG,
+ "link speed unsupported gp_status 0x%x",
+ speed_mask);
+ return ELINK_STATUS_ERROR;
+ }
+ } else { /* link_down */
+ PMD_DRV_LOG(DEBUG, "phy link down");
+
+ vars->phy_link_up = 0;
+
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+ vars->mac_type = ELINK_MAC_TYPE_NONE;
+ }
+ PMD_DRV_LOG(DEBUG, " phy_link_up %x line_speed %d",
+ vars->phy_link_up, vars->line_speed);
+ return ELINK_STATUS_OK;
+}
+
+static uint8_t elink_link_settings_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ uint16_t gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
+ elink_status_t rc = ELINK_STATUS_OK;
+
+ /* Read gp_status */
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1, &gp_status);
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
+ duplex = DUPLEX_FULL;
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
+ link_up = 1;
+ speed_mask = gp_status & ELINK_GP_STATUS_SPEED_MASK;
+ PMD_DRV_LOG(DEBUG, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x",
+ gp_status, link_up, speed_mask);
+ rc = elink_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
+ duplex);
+ if (rc == ELINK_STATUS_ERROR)
+ return rc;
+
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
+ if (ELINK_SINGLE_MEDIA_DIRECT(params)) {
+ vars->duplex = duplex;
+ elink_flow_ctrl_resolve(phy, params, vars, gp_status);
+ if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG)
+ elink_xgxs_an_resolve(phy, params, vars,
+ gp_status);
+ }
+ } else { /* Link_down */
+ if ((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
+ ELINK_SINGLE_MEDIA_DIRECT(params)) {
+ /* Check signal is detected */
+ elink_check_fallback_to_cl37(phy, params);
+ }
+ }
+
+ /* Read LP advertised speeds */
+ if (ELINK_SINGLE_MEDIA_DIRECT(params) &&
+ (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)) {
+ uint16_t val;
+
+ CL22_RD_OVER_CL45(sc, phy, MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV2, &val);
+
+ if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
+ MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ CL22_RD_OVER_CL45(sc, phy, MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_LP_UP1, &val);
+
+ if (val & MDIO_OVER_1G_UP1_2_5G)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
+ if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ }
+
+ PMD_DRV_LOG(DEBUG, "duplex %x flow_ctrl 0x%x link_status 0x%x",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
+ return rc;
+}
+
+static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t lane;
+ uint16_t gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
+ elink_status_t rc = ELINK_STATUS_OK;
+ lane = elink_get_warpcore_lane(params);
+ /* Read gp_status */
+ if ((params->loopback_mode) && (phy->flags & ELINK_FLAGS_WC_DUAL_MODE)) {
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
+ link_up &= 0x1;
+ } else if ((phy->req_line_speed > ELINK_SPEED_10000) &&
+ (phy->supported & ELINK_SUPPORTED_20000baseMLD2_Full)) {
+ uint16_t temp_link_up;
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 1, &temp_link_up);
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 1, &link_up);
+ PMD_DRV_LOG(DEBUG, "PCS RX link status = 0x%x-->0x%x",
+ temp_link_up, link_up);
+ link_up &= (1 << 2);
+ if (link_up)
+ elink_ext_phy_resolve_fc(phy, params, vars);
+ } else {
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
+ PMD_DRV_LOG(DEBUG, "0x81d1 = 0x%x", gp_status1);
+ /* Check for either KR, 1G, or AN up. */
+ link_up = ((gp_status1 >> 8) |
+ (gp_status1 >> 12) | (gp_status1)) & (1 << lane);
+ if (phy->supported & ELINK_SUPPORTED_20000baseKR2_Full) {
+ uint16_t an_link;
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &an_link);
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &an_link);
+ link_up |= (an_link & (1 << 2));
+ }
+ if (link_up && ELINK_SINGLE_MEDIA_DIRECT(params)) {
+ uint16_t pd, gp_status4;
+ if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) {
+ /* Check Autoneg complete */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_4,
+ &gp_status4);
+ if (gp_status4 & ((1 << 12) << lane))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+ /* Check parallel detect used */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_STATUS,
+ &pd);
+ if (pd & (1 << 15))
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+ }
+ elink_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = duplex;
+ }
+ }
+
+ if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) &&
+ ELINK_SINGLE_MEDIA_DIRECT(params)) {
+ uint16_t val;
+
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG2, &val);
+
+ if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
+ MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL3_LP_UP1, &val);
+
+ if (val & MDIO_OVER_1G_UP1_2_5G)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
+ if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ }
+
+ if (lane < 2) {
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
+ } else {
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
+ }
+ PMD_DRV_LOG(DEBUG, "lane %d gp_speed 0x%x", lane, gp_speed);
+
+ if ((lane & 1) == 0)
+ gp_speed <<= 8;
+ gp_speed &= 0x3f00;
+ link_up = ! !link_up;
+
+ /* Reset the TX FIFO to fix SGMII issue */
+ rc = elink_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
+ duplex);
+
+ /* In case of KR link down, start up the recovering procedure */
+ if ((!link_up) && (phy->media_type == ELINK_ETH_PHY_KR) &&
+ (!(phy->flags & ELINK_FLAGS_WC_DUAL_MODE)))
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+
+ PMD_DRV_LOG(DEBUG, "duplex %x flow_ctrl 0x%x link_status 0x%x",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
+ return rc;
+}
+
+static void elink_set_gmii_tx_driver(struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ struct elink_phy *phy = &params->phy[ELINK_INT_PHY];
+ uint16_t lp_up2;
+ uint16_t tx_driver;
+ uint16_t bank;
+
+ /* Read precomp */
+ CL22_RD_OVER_CL45(sc, phy,
+ MDIO_REG_BANK_OVER_1G, MDIO_OVER_1G_LP_UP2, &lp_up2);
+
+ /* Bits [10:7] at lp_up2, positioned at [15:12] */
+ lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
+ MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
+ MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
+
+ if (lp_up2 == 0)
+ return;
+
+ for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
+ bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
+ CL22_RD_OVER_CL45(sc, phy,
+ bank, MDIO_TX0_TX_DRIVER, &tx_driver);
+
+ /* Replace tx_driver bits [15:12] */
+ if (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
+ tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
+ tx_driver |= lp_up2;
+ CL22_WR_OVER_CL45(sc, phy,
+ bank, MDIO_TX0_TX_DRIVER, tx_driver);
+ }
+ }
+}
+
+static elink_status_t elink_emac_program(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port = params->port;
+ uint16_t mode = 0;
+
+ PMD_DRV_LOG(DEBUG, "setting link speed & duplex");
+ elink_bits_dis(sc, GRCBASE_EMAC0 + port * 0x400 +
+ EMAC_REG_EMAC_MODE,
+ (EMAC_MODE_25G_MODE |
+ EMAC_MODE_PORT_MII_10M | EMAC_MODE_HALF_DUPLEX));
+ switch (vars->line_speed) {
+ case ELINK_SPEED_10:
+ mode |= EMAC_MODE_PORT_MII_10M;
+ break;
+
+ case ELINK_SPEED_100:
+ mode |= EMAC_MODE_PORT_MII;
+ break;
+
+ case ELINK_SPEED_1000:
+ mode |= EMAC_MODE_PORT_GMII;
+ break;
+
+ case ELINK_SPEED_2500:
+ mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
+ break;
+
+ default:
+ /* 10G not valid for EMAC */
+ PMD_DRV_LOG(DEBUG, "Invalid line_speed 0x%x", vars->line_speed);
+ return ELINK_STATUS_ERROR;
+ }
+
+ if (vars->duplex == DUPLEX_HALF)
+ mode |= EMAC_MODE_HALF_DUPLEX;
+ elink_bits_en(sc,
+ GRCBASE_EMAC0 + port * 0x400 + EMAC_REG_EMAC_MODE, mode);
+
+ elink_set_led(params, vars, ELINK_LED_MODE_OPER, vars->line_speed);
+ return ELINK_STATUS_OK;
+}
+
+static void elink_set_preemphasis(struct elink_phy *phy,
+ struct elink_params *params)
+{
+
+ uint16_t bank, i = 0;
+ struct bnx2x_softc *sc = params->sc;
+
+ for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
+ bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0), i++) {
+ CL22_WR_OVER_CL45(sc, phy,
+ bank,
+ MDIO_RX0_RX_EQ_BOOST, phy->rx_preemphasis[i]);
+ }
+
+ for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
+ bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
+ CL22_WR_OVER_CL45(sc, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, phy->tx_preemphasis[i]);
+ }
+}
+
+static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ uint8_t enable_cl73 = (ELINK_SINGLE_MEDIA_DIRECT(params) ||
+ (params->loopback_mode == ELINK_LOOPBACK_XGXS));
+
+ if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
+ if (ELINK_SINGLE_MEDIA_DIRECT(params) &&
+ (params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
+ elink_set_preemphasis(phy, params);
+
+ /* Forced speed requested? */
+ if (vars->line_speed != ELINK_SPEED_AUTO_NEG ||
+ (ELINK_SINGLE_MEDIA_DIRECT(params) &&
+ params->loopback_mode == ELINK_LOOPBACK_EXT)) {
+ PMD_DRV_LOG(DEBUG, "not SGMII, no AN");
+
+ /* Disable autoneg */
+ elink_set_autoneg(phy, params, vars, 0);
+
+ /* Program speed and duplex */
+ elink_program_serdes(phy, params, vars);
+
+ } else { /* AN_mode */
+ PMD_DRV_LOG(DEBUG, "not SGMII, AN");
+
+ /* AN enabled */
+ elink_set_brcm_cl37_advertisement(phy, params);
+
+ /* Program duplex & pause advertisement (for aneg) */
+ elink_set_ieee_aneg_advertisement(phy, params,
+ vars->ieee_fc);
+
+ /* Enable autoneg */
+ elink_set_autoneg(phy, params, vars, enable_cl73);
+
+ /* Enable and restart AN */
+ elink_restart_autoneg(phy, params, enable_cl73);
+ }
+
+ } else { /* SGMII mode */
+ PMD_DRV_LOG(DEBUG, "SGMII");
+
+ elink_initialize_sgmii_process(phy, params, vars);
+ }
+
+ return 0;
+}
+
+static elink_status_t elink_prepare_xgxs(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ elink_status_t rc;
+ vars->phy_flags |= PHY_XGXS_FLAG;
+ if ((phy->req_line_speed &&
+ ((phy->req_line_speed == ELINK_SPEED_100) ||
+ (phy->req_line_speed == ELINK_SPEED_10))) ||
+ (!phy->req_line_speed &&
+ (phy->speed_cap_mask >=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+ (phy->speed_cap_mask <
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD))
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ else
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+
+ elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ elink_set_aer_mmd(params, phy);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ elink_set_master_ln(params, phy);
+
+ rc = elink_reset_unicore(params, phy, 0);
+ /* Reset the SerDes and wait for reset bit return low */
+ if (rc != ELINK_STATUS_OK)
+ return rc;
+
+ elink_set_aer_mmd(params, phy);
+ /* Setting the masterLn_def again after the reset */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
+ elink_set_master_ln(params, phy);
+ elink_set_swap_lanes(params, phy);
+ }
+
+ return rc;
+}
+
+static uint16_t elink_wait_reset_complete(struct bnx2x_softc *sc,
+ struct elink_phy *phy,
+ struct elink_params *params)
+{
+ uint16_t cnt, ctrl;
+ /* Wait for soft reset to get cleared up to 1 sec */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE)
+ elink_cl22_read(sc, phy, MDIO_PMA_REG_CTRL, &ctrl);
+ else
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, &ctrl);
+ if (!(ctrl & (1 << 15)))
+ break;
+ DELAY(1000 * 1);
+ }
+
+ if (cnt == 1000)
+ elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, params->port); // "Warning: PHY was not initialized,"
+ // " Port %d",
+
+ PMD_DRV_LOG(DEBUG, "control reg 0x%x (after %d ms)", ctrl, cnt);
+ return cnt;
+}
+
+static void elink_link_int_enable(struct elink_params *params)
+{
+ uint8_t port = params->port;
+ uint32_t mask;
+ struct bnx2x_softc *sc = params->sc;
+
+ /* Setting the status to report on link up for either XGXS or SerDes */
+ if (CHIP_IS_E3(sc)) {
+ mask = ELINK_NIG_MASK_XGXS0_LINK_STATUS;
+ if (!(ELINK_SINGLE_MEDIA_DIRECT(params)))
+ mask |= ELINK_NIG_MASK_MI_INT;
+ } else if (params->switch_cfg == ELINK_SWITCH_CFG_10G) {
+ mask = (ELINK_NIG_MASK_XGXS0_LINK10G |
+ ELINK_NIG_MASK_XGXS0_LINK_STATUS);
+ PMD_DRV_LOG(DEBUG, "enabled XGXS interrupt");
+ if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) &&
+ params->phy[ELINK_INT_PHY].type !=
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
+ mask |= ELINK_NIG_MASK_MI_INT;
+ PMD_DRV_LOG(DEBUG, "enabled external phy int");
+ }
+
+ } else { /* SerDes */
+ mask = ELINK_NIG_MASK_SERDES0_LINK_STATUS;
+ PMD_DRV_LOG(DEBUG, "enabled SerDes interrupt");
+ if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) &&
+ params->phy[ELINK_INT_PHY].type !=
+ PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
+ mask |= ELINK_NIG_MASK_MI_INT;
+ PMD_DRV_LOG(DEBUG, "enabled external phy int");
+ }
+ }
+ elink_bits_en(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, mask);
+
+ PMD_DRV_LOG(DEBUG, "port %x, is_xgxs %x, int_status 0x%x", port,
+ (params->switch_cfg == ELINK_SWITCH_CFG_10G),
+ REG_RD(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4));
+ PMD_DRV_LOG(DEBUG, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x",
+ REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4),
+ REG_RD(sc, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port * 0x18),
+ REG_RD(sc,
+ NIG_REG_SERDES0_STATUS_LINK_STATUS + port * 0x3c));
+ PMD_DRV_LOG(DEBUG, " 10G %x, XGXS_LINK %x",
+ REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK10G + port * 0x68),
+ REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK_STATUS + port * 0x68));
+}
+
+static void elink_rearm_latch_signal(struct bnx2x_softc *sc, uint8_t port,
+ uint8_t exp_mi_int)
+{
+ uint32_t latch_status = 0;
+
+ /* Disable the MI INT ( external phy int ) by writing 1 to the
+ * status register. Link down indication is high-active-signal,
+ * so in this case we need to write the status to clear the XOR
+ */
+ /* Read Latched signals */
+ latch_status = REG_RD(sc, NIG_REG_LATCH_STATUS_0 + port * 8);
+ PMD_DRV_LOG(DEBUG, "latch_status = 0x%x", latch_status);
+ /* Handle only those with latched-signal=up. */
+ if (exp_mi_int)
+ elink_bits_en(sc,
+ NIG_REG_STATUS_INTERRUPT_PORT0
+ + port * 4, ELINK_NIG_STATUS_EMAC0_MI_INT);
+ else
+ elink_bits_dis(sc,
+ NIG_REG_STATUS_INTERRUPT_PORT0
+ + port * 4, ELINK_NIG_STATUS_EMAC0_MI_INT);
+
+ if (latch_status & 1) {
+
+ /* For all latched-signal=up : Re-Arm Latch signals */
+ REG_WR(sc, NIG_REG_LATCH_STATUS_0 + port * 8,
+ (latch_status & 0xfffe) | (latch_status & 1));
+ }
+ /* For all latched-signal=up,Write original_signal to status */
+}
+
+static void elink_link_int_ack(struct elink_params *params,
+ struct elink_vars *vars, uint8_t is_10g_plus)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port = params->port;
+ uint32_t mask;
+ /* First reset all status we assume only one line will be
+ * change at a time
+ */
+ elink_bits_dis(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4,
+ (ELINK_NIG_STATUS_XGXS0_LINK10G |
+ ELINK_NIG_STATUS_XGXS0_LINK_STATUS |
+ ELINK_NIG_STATUS_SERDES0_LINK_STATUS));
+ if (vars->phy_link_up) {
+ if (USES_WARPCORE(sc))
+ mask = ELINK_NIG_STATUS_XGXS0_LINK_STATUS;
+ else {
+ if (is_10g_plus)
+ mask = ELINK_NIG_STATUS_XGXS0_LINK10G;
+ else if (params->switch_cfg == ELINK_SWITCH_CFG_10G) {
+ /* Disable the link interrupt by writing 1 to
+ * the relevant lane in the status register
+ */
+ uint32_t ser_lane =
+ ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ mask = ((1 << ser_lane) <<
+ ELINK_NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
+ } else
+ mask = ELINK_NIG_STATUS_SERDES0_LINK_STATUS;
+ }
+ PMD_DRV_LOG(DEBUG, "Ack link up interrupt with mask 0x%x",
+ mask);
+ elink_bits_en(sc,
+ NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4, mask);
+ }
+}
+
+static uint8_t elink_format_ver(uint32_t num, uint8_t * str,
+ uint16_t * len)
+{
+ uint8_t *str_ptr = str;
+ uint32_t mask = 0xf0000000;
+ uint8_t shift = 8 * 4;
+ uint8_t digit;
+ uint8_t remove_leading_zeros = 1;
+ if (*len < 10) {
+ /* Need more than 10chars for this format */
+ *str_ptr = '\0';
+ (*len)--;
+ return ELINK_STATUS_ERROR;
+ }
+ while (shift > 0) {
+
+ shift -= 4;
+ digit = ((num & mask) >> shift);
+ if (digit == 0 && remove_leading_zeros) {
+ mask = mask >> 4;
+ continue;
+ } else if (digit < 0xa)
+ *str_ptr = digit + '0';
+ else
+ *str_ptr = digit - 0xa + 'a';
+ remove_leading_zeros = 0;
+ str_ptr++;
+ (*len)--;
+ mask = mask >> 4;
+ if (shift == 4 * 4) {
+ *str_ptr = '.';
+ str_ptr++;
+ (*len)--;
+ remove_leading_zeros = 1;
+ }
+ }
+ return ELINK_STATUS_OK;
+}
+
+static uint8_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
+ uint8_t * str, uint16_t * len)
+{
+ str[0] = '\0';
+ (*len)--;
+ return ELINK_STATUS_OK;
+}
+
+static void elink_set_xgxs_loopback(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ uint8_t port = params->port;
+ struct bnx2x_softc *sc = params->sc;
+
+ if (phy->req_line_speed != ELINK_SPEED_1000) {
+ uint32_t md_devad = 0;
+
+ PMD_DRV_LOG(DEBUG, "XGXS 10G loopback enable");
+
+ if (!CHIP_IS_E3(sc)) {
+ /* Change the uni_phy_addr in the nig */
+ md_devad = REG_RD(sc, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
+ port * 0x18));
+
+ REG_WR(sc, NIG_REG_XGXS0_CTRL_MD_DEVAD + port * 0x18,
+ 0x5);
+ }
+
+ elink_cl45_write(sc, phy,
+ 5,
+ (MDIO_REG_BANK_AER_BLOCK +
+ (MDIO_AER_BLOCK_AER_REG & 0xf)), 0x2800);
+
+ elink_cl45_write(sc, phy,
+ 5,
+ (MDIO_REG_BANK_CL73_IEEEB0 +
+ (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+ 0x6041);
+ DELAY(1000 * 200);
+ /* Set aer mmd back */
+ elink_set_aer_mmd(params, phy);
+
+ if (!CHIP_IS_E3(sc)) {
+ /* And md_devad */
+ REG_WR(sc, NIG_REG_XGXS0_CTRL_MD_DEVAD + port * 0x18,
+ md_devad);
+ }
+ } else {
+ uint16_t mii_ctrl;
+ PMD_DRV_LOG(DEBUG, "XGXS 1G loopback enable");
+ elink_cl45_read(sc, phy, 5,
+ (MDIO_REG_BANK_COMBO_IEEE0 +
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ &mii_ctrl);
+ elink_cl45_write(sc, phy, 5,
+ (MDIO_REG_BANK_COMBO_IEEE0 +
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ mii_ctrl |
+ MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
+ }
+}
+
+elink_status_t elink_set_led(struct elink_params *params,
+ struct elink_vars *vars, uint8_t mode,
+ uint32_t speed)
+{
+ uint8_t port = params->port;
+ uint16_t hw_led_mode = params->hw_led_mode;
+ elink_status_t rc = ELINK_STATUS_OK;
+ uint8_t phy_idx;
+ uint32_t tmp;
+ uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ struct bnx2x_softc *sc = params->sc;
+ PMD_DRV_LOG(DEBUG, "elink_set_led: port %x, mode %d", port, mode);
+ PMD_DRV_LOG(DEBUG, "speed 0x%x, hw_led_mode 0x%x", speed, hw_led_mode);
+ /* In case */
+ for (phy_idx = ELINK_EXT_PHY1; phy_idx < ELINK_MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].set_link_led) {
+ params->phy[phy_idx].set_link_led(&params->phy[phy_idx],
+ params, mode);
+ }
+ }
+
+ switch (mode) {
+ case ELINK_LED_MODE_FRONT_PANEL_OFF:
+ case ELINK_LED_MODE_OFF:
+ REG_WR(sc, NIG_REG_LED_10G_P0 + port * 4, 0);
+ REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4,
+ SHARED_HW_CFG_LED_MAC1);
+
+ tmp = elink_cb_reg_read(sc, emac_base + EMAC_REG_EMAC_LED);
+ if (params->phy[ELINK_EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE)
+ tmp &= ~(EMAC_LED_1000MB_OVERRIDE |
+ EMAC_LED_100MB_OVERRIDE |
+ EMAC_LED_10MB_OVERRIDE);
+ else
+ tmp |= EMAC_LED_OVERRIDE;
+
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED, tmp);
+ break;
+
+ case ELINK_LED_MODE_OPER:
+ /* For all other phys, OPER mode is same as ON, so in case
+ * link is down, do nothing
+ */
+ if (!vars->link_up)
+ break;
+ /* fall-through */
+ case ELINK_LED_MODE_ON:
+ if (((params->phy[ELINK_EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727) ||
+ (params->phy[ELINK_EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722)) &&
+ CHIP_IS_E2(sc) && params->num_phys == 2) {
+ /* This is a work-around for E2+8727 Configurations */
+ if (mode == ELINK_LED_MODE_ON ||
+ speed == ELINK_SPEED_10000) {
+ REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, 0);
+ REG_WR(sc, NIG_REG_LED_10G_P0 + port * 4, 1);
+
+ tmp =
+ elink_cb_reg_read(sc,
+ emac_base +
+ EMAC_REG_EMAC_LED);
+ elink_cb_reg_write(sc,
+ emac_base +
+ EMAC_REG_EMAC_LED,
+ (tmp | EMAC_LED_OVERRIDE));
+ /* Return here without enabling traffic
+ * LED blink and setting rate in ON mode.
+ * In oper mode, enabling LED blink
+ * and setting rate is needed.
+ */
+ if (mode == ELINK_LED_MODE_ON)
+ return rc;
+ }
+ } else if (ELINK_SINGLE_MEDIA_DIRECT(params)) {
+ /* This is a work-around for HW issue found when link
+ * is up in CL73
+ */
+ if ((!CHIP_IS_E3(sc)) ||
+ (CHIP_IS_E3(sc) && mode == ELINK_LED_MODE_ON))
+ REG_WR(sc, NIG_REG_LED_10G_P0 + port * 4, 1);
+
+ if (CHIP_IS_E1x(sc) ||
+ CHIP_IS_E2(sc) || (mode == ELINK_LED_MODE_ON))
+ REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, 0);
+ else
+ REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4,
+ hw_led_mode);
+ } else if ((params->phy[ELINK_EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE) &&
+ (mode == ELINK_LED_MODE_ON)) {
+ REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, 0);
+ tmp =
+ elink_cb_reg_read(sc,
+ emac_base + EMAC_REG_EMAC_LED);
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED,
+ tmp | EMAC_LED_OVERRIDE |
+ EMAC_LED_1000MB_OVERRIDE);
+ /* Break here; otherwise, it'll disable the
+ * intended override.
+ */
+ break;
+ } else {
+ uint32_t nig_led_mode = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT)
+ ==
+ SHARED_HW_CFG_LED_EXTPHY2)
+ ? (SHARED_HW_CFG_LED_PHY1 >>
+ SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
+ REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4,
+ nig_led_mode);
+ }
+
+ REG_WR(sc, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port * 4,
+ 0);
+ /* Set blinking rate to ~15.9Hz */
+ if (CHIP_IS_E3(sc))
+ REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port * 4,
+ LED_BLINK_RATE_VAL_E3);
+ else
+ REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port * 4,
+ LED_BLINK_RATE_VAL_E1X_E2);
+ REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port * 4, 1);
+ tmp = elink_cb_reg_read(sc, emac_base + EMAC_REG_EMAC_LED);
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED,
+ (tmp & (~EMAC_LED_OVERRIDE)));
+
+ break;
+
+ default:
+ rc = ELINK_STATUS_ERROR;
+ PMD_DRV_LOG(DEBUG, "elink_set_led: Invalid led mode %d", mode);
+ break;
+ }
+ return rc;
+
+}
+
+static elink_status_t elink_link_initialize(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ elink_status_t rc = ELINK_STATUS_OK;
+ uint8_t phy_index, non_ext_phy;
+ struct bnx2x_softc *sc = params->sc;
+ /* In case of external phy existence, the line speed would be the
+ * line speed linked up by the external phy. In case it is direct
+ * only, then the line_speed during initialization will be
+ * equal to the req_line_speed
+ */
+ vars->line_speed = params->phy[ELINK_INT_PHY].req_line_speed;
+
+ /* Initialize the internal phy in case this is a direct board
+ * (no external phys), or this board has external phy which requires
+ * to first.
+ */
+ if (!USES_WARPCORE(sc))
+ elink_prepare_xgxs(&params->phy[ELINK_INT_PHY], params, vars);
+ /* init ext phy and enable link state int */
+ non_ext_phy = (ELINK_SINGLE_MEDIA_DIRECT(params) ||
+ (params->loopback_mode == ELINK_LOOPBACK_XGXS));
+
+ if (non_ext_phy ||
+ (params->phy[ELINK_EXT_PHY1].flags & ELINK_FLAGS_INIT_XGXS_FIRST) ||
+ (params->loopback_mode == ELINK_LOOPBACK_EXT_PHY)) {
+ struct elink_phy *phy = &params->phy[ELINK_INT_PHY];
+ if (vars->line_speed == ELINK_SPEED_AUTO_NEG &&
+ (CHIP_IS_E1x(sc) || CHIP_IS_E2(sc)))
+ elink_set_parallel_detection(phy, params);
+ if (params->phy[ELINK_INT_PHY].config_init)
+ params->phy[ELINK_INT_PHY].config_init(phy,
+ params, vars);
+ }
+
+ /* Re-read this value in case it was changed inside config_init due to
+ * limitations of optic module
+ */
+ vars->line_speed = params->phy[ELINK_INT_PHY].req_line_speed;
+
+ /* Init external phy */
+ if (non_ext_phy) {
+ if (params->phy[ELINK_INT_PHY].supported &
+ ELINK_SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+ } else {
+ for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ /* No need to initialize second phy in case of first
+ * phy only selection. In case of second phy, we do
+ * need to initialize the first phy, since they are
+ * connected.
+ */
+ if (params->phy[phy_index].supported &
+ ELINK_SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+
+ if (phy_index == ELINK_EXT_PHY2 &&
+ (elink_phy_selection(params) ==
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
+ PMD_DRV_LOG(DEBUG,
+ "Not initializing second phy");
+ continue;
+ }
+ params->phy[phy_index].config_init(&params->
+ phy[phy_index],
+ params, vars);
+ }
+ }
+ /* Reset the interrupt indication after phy was initialized */
+ elink_bits_dis(sc, NIG_REG_STATUS_INTERRUPT_PORT0 +
+ params->port * 4,
+ (ELINK_NIG_STATUS_XGXS0_LINK10G |
+ ELINK_NIG_STATUS_XGXS0_LINK_STATUS |
+ ELINK_NIG_STATUS_SERDES0_LINK_STATUS |
+ ELINK_NIG_MASK_MI_INT));
+ return rc;
+}
+
+static void elink_int_link_reset(__rte_unused struct elink_phy *phy,
+ struct elink_params *params)
+{
+ /* Reset the SerDes/XGXS */
+ REG_WR(params->sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+ (0x1ff << (params->port * 16)));
+}
+
+static void elink_common_ext_link_reset(__rte_unused struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t gpio_port;
+ /* HW reset */
+ if (CHIP_IS_E2(sc))
+ gpio_port = SC_PATH(sc);
+ else
+ gpio_port = params->port;
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, gpio_port);
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, gpio_port);
+ PMD_DRV_LOG(DEBUG, "reset external PHY");
+}
+
+static elink_status_t elink_update_link_down(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port = params->port;
+
+ PMD_DRV_LOG(DEBUG, "Port %x: Link is down", port);
+ elink_set_led(params, vars, ELINK_LED_MODE_OFF, 0);
+ vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
+ /* Indicate no mac active */
+ vars->mac_type = ELINK_MAC_TYPE_NONE;
+
+ /* Update shared memory */
+ vars->link_status &= ~ELINK_LINK_UPDATE_MASK;
+ vars->line_speed = 0;
+ elink_update_mng(params, vars->link_status);
+
+ /* Activate nig drain */
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + port * 4, 1);
+
+ /* Disable emac */
+ if (!CHIP_IS_E3(sc))
+ REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 0);
+
+ DELAY(1000 * 10);
+ /* Reset BigMac/Xmac */
+ if (CHIP_IS_E1x(sc) || CHIP_IS_E2(sc))
+ elink_set_bmac_rx(sc, params->port, 0);
+
+ if (CHIP_IS_E3(sc)) {
+ /* Prevent LPI Generation by chip */
+ REG_WR(sc, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
+ 0);
+ REG_WR(sc, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
+ 0);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+
+ elink_update_mng_eee(params, vars->eee_status);
+ elink_set_xmac_rxtx(params, 0);
+ elink_set_umac_rxtx(params, 0);
+ }
+
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_update_link_up(struct elink_params *params,
+ struct elink_vars *vars,
+ uint8_t link_10g)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t phy_idx, port = params->port;
+ elink_status_t rc = ELINK_STATUS_OK;
+
+ vars->link_status |= (LINK_STATUS_LINK_UP |
+ LINK_STATUS_PHYSICAL_LINK_FLAG);
+ vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+
+ if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)
+ vars->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+
+ if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX)
+ vars->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+ if (USES_WARPCORE(sc)) {
+ if (link_10g) {
+ if (elink_xmac_enable(params, vars, 0) ==
+ ELINK_STATUS_NO_LINK) {
+ PMD_DRV_LOG(DEBUG, "Found errors on XMAC");
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ }
+ } else
+ elink_umac_enable(params, vars, 0);
+ elink_set_led(params, vars,
+ ELINK_LED_MODE_OPER, vars->line_speed);
+
+ if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
+ (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
+ PMD_DRV_LOG(DEBUG, "Enabling LPI assertion");
+ REG_WR(sc, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
+ (params->port << 2), 1);
+ REG_WR(sc, MISC_REG_CPMU_LP_DR_ENABLE, 1);
+ REG_WR(sc, MISC_REG_CPMU_LP_MASK_ENT_P0 +
+ (params->port << 2), 0xfc20);
+ }
+ }
+ if ((CHIP_IS_E1x(sc) || CHIP_IS_E2(sc))) {
+ if (link_10g) {
+ if (elink_bmac_enable(params, vars, 0, 1) ==
+ ELINK_STATUS_NO_LINK) {
+ PMD_DRV_LOG(DEBUG, "Found errors on BMAC");
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ }
+
+ elink_set_led(params, vars,
+ ELINK_LED_MODE_OPER, ELINK_SPEED_10000);
+ } else {
+ rc = elink_emac_program(params, vars);
+ elink_emac_enable(params, vars, 0);
+
+ /* AN complete? */
+ if ((vars->link_status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+ && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
+ ELINK_SINGLE_MEDIA_DIRECT(params))
+ elink_set_gmii_tx_driver(params);
+ }
+ }
+
+ /* PBF - link up */
+ if (CHIP_IS_E1x(sc))
+ rc |= elink_pbf_update(params, vars->flow_ctrl,
+ vars->line_speed);
+
+ /* Disable drain */
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + port * 4, 0);
+
+ /* Update shared memory */
+ elink_update_mng(params, vars->link_status);
+ elink_update_mng_eee(params, vars->eee_status);
+ /* Check remote fault */
+ for (phy_idx = ELINK_INT_PHY; phy_idx < ELINK_MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].flags & ELINK_FLAGS_TX_ERROR_CHECK) {
+ elink_check_half_open_conn(params, vars, 0);
+ break;
+ }
+ }
+ DELAY(1000 * 20);
+ return rc;
+}
+
+/* The elink_link_update function should be called upon link
+ * interrupt.
+ * Link is considered up as follows:
+ * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
+ * to be up
+ * - SINGLE_MEDIA - The link between the 577xx and the external
+ * phy (XGXS) need to up as well as the external link of the
+ * phy (PHY_EXT1)
+ * - DUAL_MEDIA - The link between the 577xx and the first
+ * external phy needs to be up, and at least one of the 2
+ * external phy link must be up.
+ */
+elink_status_t elink_link_update(struct elink_params * params,
+ struct elink_vars * vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ struct elink_vars phy_vars[ELINK_MAX_PHYS];
+ uint8_t port = params->port;
+ uint8_t link_10g_plus, phy_index;
+ uint8_t ext_phy_link_up = 0, cur_link_up;
+ elink_status_t rc = ELINK_STATUS_OK;
+ __rte_unused uint8_t is_mi_int = 0;
+ uint16_t ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
+ uint8_t active_external_phy = ELINK_INT_PHY;
+ vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~ELINK_LINK_UPDATE_MASK;
+ for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ phy_vars[phy_index].flow_ctrl = 0;
+ phy_vars[phy_index].link_status = ETH_LINK_DOWN;
+ phy_vars[phy_index].line_speed = 0;
+ phy_vars[phy_index].duplex = DUPLEX_FULL;
+ phy_vars[phy_index].phy_link_up = 0;
+ phy_vars[phy_index].link_up = 0;
+ phy_vars[phy_index].fault_detected = 0;
+ /* different consideration, since vars holds inner state */
+ phy_vars[phy_index].eee_status = vars->eee_status;
+ }
+
+ if (USES_WARPCORE(sc))
+ elink_set_aer_mmd(params, &params->phy[ELINK_INT_PHY]);
+
+ PMD_DRV_LOG(DEBUG, "port %x, XGXS?%x, int_status 0x%x",
+ port, (vars->phy_flags & PHY_XGXS_FLAG),
+ REG_RD(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4));
+
+ is_mi_int = (uint8_t) (REG_RD(sc, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
+ port * 0x18) > 0);
+ PMD_DRV_LOG(DEBUG, "int_mask 0x%x MI_INT %x, SERDES_LINK %x",
+ REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4),
+ is_mi_int,
+ REG_RD(sc,
+ NIG_REG_SERDES0_STATUS_LINK_STATUS + port * 0x3c));
+
+ PMD_DRV_LOG(DEBUG, " 10G %x, XGXS_LINK %x",
+ REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK10G + port * 0x68),
+ REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK_STATUS + port * 0x68));
+
+ /* Disable emac */
+ if (!CHIP_IS_E3(sc))
+ REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 0);
+
+ /* Step 1:
+ * Check external link change only for external phys, and apply
+ * priority selection between them in case the link on both phys
+ * is up. Note that instead of the common vars, a temporary
+ * vars argument is used since each phy may have different link/
+ * speed/duplex result
+ */
+ for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ struct elink_phy *phy = &params->phy[phy_index];
+ if (!phy->read_status)
+ continue;
+ /* Read link status and params of this ext phy */
+ cur_link_up = phy->read_status(phy, params,
+ &phy_vars[phy_index]);
+ if (cur_link_up) {
+ PMD_DRV_LOG(DEBUG, "phy in index %d link is up",
+ phy_index);
+ } else {
+ PMD_DRV_LOG(DEBUG, "phy in index %d link is down",
+ phy_index);
+ continue;
+ }
+
+ if (!ext_phy_link_up) {
+ ext_phy_link_up = 1;
+ active_external_phy = phy_index;
+ } else {
+ switch (elink_phy_selection(params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ /* In this option, the first PHY makes sure to pass the
+ * traffic through itself only.
+ * Its not clear how to reset the link on the second phy
+ */
+ active_external_phy = ELINK_EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ /* In this option, the first PHY makes sure to pass the
+ * traffic through the second PHY.
+ */
+ active_external_phy = ELINK_EXT_PHY2;
+ break;
+ default:
+ /* Link indication on both PHYs with the following cases
+ * is invalid:
+ * - FIRST_PHY means that second phy wasn't initialized,
+ * hence its link is expected to be down
+ * - SECOND_PHY means that first phy should not be able
+ * to link up by itself (using configuration)
+ * - DEFAULT should be overridden during initialization
+ */
+ PMD_DRV_LOG(DEBUG, "Invalid link indication"
+ "mpc=0x%x. DISABLING LINK !!!",
+ params->multi_phy_config);
+ ext_phy_link_up = 0;
+ break;
+ }
+ }
+ }
+ prev_line_speed = vars->line_speed;
+ /* Step 2:
+ * Read the status of the internal phy. In case of
+ * DIRECT_SINGLE_MEDIA board, this link is the external link,
+ * otherwise this is the link between the 577xx and the first
+ * external phy
+ */
+ if (params->phy[ELINK_INT_PHY].read_status)
+ params->phy[ELINK_INT_PHY].read_status(&params->
+ phy[ELINK_INT_PHY],
+ params, vars);
+ /* The INT_PHY flow control reside in the vars. This include the
+ * case where the speed or flow control are not set to AUTO.
+ * Otherwise, the active external phy flow control result is set
+ * to the vars. The ext_phy_line_speed is needed to check if the
+ * speed is different between the internal phy and external phy.
+ * This case may be result of intermediate link speed change.
+ */
+ if (active_external_phy > ELINK_INT_PHY) {
+ vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
+ /* Link speed is taken from the XGXS. AN and FC result from
+ * the external phy.
+ */
+ vars->link_status |= phy_vars[active_external_phy].link_status;
+
+ /* if active_external_phy is first PHY and link is up - disable
+ * disable TX on second external PHY
+ */
+ if (active_external_phy == ELINK_EXT_PHY1) {
+ if (params->phy[ELINK_EXT_PHY2].phy_specific_func) {
+ PMD_DRV_LOG(DEBUG, "Disabling TX on EXT_PHY2");
+ params->phy[ELINK_EXT_PHY2].
+ phy_specific_func(&params->
+ phy[ELINK_EXT_PHY2],
+ params, ELINK_DISABLE_TX);
+ }
+ }
+
+ ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
+ vars->duplex = phy_vars[active_external_phy].duplex;
+ if (params->phy[active_external_phy].supported &
+ ELINK_SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+ else
+ vars->link_status &= ~LINK_STATUS_SERDES_LINK;
+
+ vars->eee_status = phy_vars[active_external_phy].eee_status;
+
+ PMD_DRV_LOG(DEBUG, "Active external phy selected: %x",
+ active_external_phy);
+ }
+
+ for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ if (params->phy[phy_index].flags &
+ ELINK_FLAGS_REARM_LATCH_SIGNAL) {
+ elink_rearm_latch_signal(sc, port,
+ phy_index ==
+ active_external_phy);
+ break;
+ }
+ }
+ PMD_DRV_LOG(DEBUG, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
+ " ext_phy_line_speed = %d", vars->flow_ctrl,
+ vars->link_status, ext_phy_line_speed);
+ /* Upon link speed change set the NIG into drain mode. Comes to
+ * deals with possible FIFO glitch due to clk change when speed
+ * is decreased without link down indicator
+ */
+
+ if (vars->phy_link_up) {
+ if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
+ (ext_phy_line_speed != vars->line_speed)) {
+ PMD_DRV_LOG(DEBUG, "Internal link speed %d is"
+ " different than the external"
+ " link speed %d", vars->line_speed,
+ ext_phy_line_speed);
+ vars->phy_link_up = 0;
+ } else if (prev_line_speed != vars->line_speed) {
+ REG_WR(sc,
+ NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4,
+ 0);
+ DELAY(1000 * 1);
+ }
+ }
+
+ /* Anything 10 and over uses the bmac */
+ link_10g_plus = (vars->line_speed >= ELINK_SPEED_10000);
+
+ elink_link_int_ack(params, vars, link_10g_plus);
+
+ /* In case external phy link is up, and internal link is down
+ * (not initialized yet probably after link initialization, it
+ * needs to be initialized.
+ * Note that after link down-up as result of cable plug, the xgxs
+ * link would probably become up again without the need
+ * initialize it
+ */
+ if (!(ELINK_SINGLE_MEDIA_DIRECT(params))) {
+ PMD_DRV_LOG(DEBUG, "ext_phy_link_up = %d, int_link_up = %d,"
+ " init_preceding = %d", ext_phy_link_up,
+ vars->phy_link_up,
+ params->phy[ELINK_EXT_PHY1].flags &
+ ELINK_FLAGS_INIT_XGXS_FIRST);
+ if (!(params->phy[ELINK_EXT_PHY1].flags &
+ ELINK_FLAGS_INIT_XGXS_FIRST)
+ && ext_phy_link_up && !vars->phy_link_up) {
+ vars->line_speed = ext_phy_line_speed;
+ if (vars->line_speed < ELINK_SPEED_1000)
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ else
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+
+ if (params->phy[ELINK_INT_PHY].config_init)
+ params->phy[ELINK_INT_PHY].config_init(&params->
+ phy
+ [ELINK_INT_PHY],
+ params,
+ vars);
+ }
+ }
+ /* Link is up only if both local phy and external phy (in case of
+ * non-direct board) are up and no fault detected on active PHY.
+ */
+ vars->link_up = (vars->phy_link_up &&
+ (ext_phy_link_up ||
+ ELINK_SINGLE_MEDIA_DIRECT(params)) &&
+ (phy_vars[active_external_phy].fault_detected == 0));
+
+ /* Update the PFC configuration in case it was changed */
+ if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
+ vars->link_status |= LINK_STATUS_PFC_ENABLED;
+ else
+ vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
+
+ if (vars->link_up)
+ rc = elink_update_link_up(params, vars, link_10g_plus);
+ else
+ rc = elink_update_link_down(params, vars);
+
+ /* Update MCP link status was changed */
+ if (params->
+ feature_config_flags & ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX)
+ elink_cb_fw_command(sc, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
+
+ return rc;
+}
+
+/*****************************************************************************/
+/* External Phy section */
+/*****************************************************************************/
+static void elink_ext_phy_hw_reset(struct bnx2x_softc *sc, uint8_t port)
+{
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ DELAY(1000 * 1);
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+}
+
+static void elink_save_spirom_version(struct bnx2x_softc *sc,
+ __rte_unused uint8_t port,
+ uint32_t spirom_ver, uint32_t ver_addr)
+{
+ PMD_DRV_LOG(DEBUG, "FW version 0x%x:0x%x for port %d",
+ (uint16_t) (spirom_ver >> 16), (uint16_t) spirom_ver, port);
+
+ if (ver_addr)
+ REG_WR(sc, ver_addr, spirom_ver);
+}
+
+static void elink_save_bnx2x_spirom_ver(struct bnx2x_softc *sc,
+ struct elink_phy *phy, uint8_t port)
+{
+ uint16_t fw_ver1, fw_ver2;
+
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+ elink_save_spirom_version(sc, port,
+ (uint32_t) (fw_ver1 << 16 | fw_ver2),
+ phy->ver_addr);
+}
+
+static void elink_ext_phy_10G_an_resolve(struct bnx2x_softc *sc,
+ struct elink_phy *phy,
+ struct elink_vars *vars)
+{
+ uint16_t val;
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_STATUS, &val);
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_STATUS, &val);
+ if (val & (1 << 5))
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ if ((val & (1 << 0)) == 0)
+ vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+
+/******************************************************************/
+/* common BNX2X8073/BNX2X8727 PHY SECTION */
+/******************************************************************/
+static void elink_8073_resolve_fc(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ if (phy->req_line_speed == ELINK_SPEED_10 ||
+ phy->req_line_speed == ELINK_SPEED_100) {
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ return;
+ }
+
+ if (elink_ext_phy_resolve_fc(phy, params, vars) &&
+ (vars->flow_ctrl == ELINK_FLOW_CTRL_NONE)) {
+ uint16_t pause_result;
+ uint16_t ld_pause; /* local */
+ uint16_t lp_pause; /* link partner */
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
+
+ elink_pause_resolve(vars, pause_result);
+ PMD_DRV_LOG(DEBUG, "Ext PHY CL37 pause result 0x%x",
+ pause_result);
+ }
+}
+
+static elink_status_t elink_8073_8727_external_rom_boot(struct bnx2x_softc *sc,
+ struct elink_phy *phy,
+ uint8_t port)
+{
+ uint32_t count = 0;
+ uint16_t fw_ver1 = 0, fw_msgout;
+ elink_status_t rc = ELINK_STATUS_OK;
+
+ /* Boot port from external ROM */
+ /* EDC grst */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+
+ /* Ucode reboot and rst */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x008c);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+
+ /* Reset internal microprocessor */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+ /* Release srst bit */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+ /* Delay 100ms per the PHY specifications */
+ DELAY(1000 * 100);
+
+ /* 8073 sometimes taking longer to download */
+ do {
+ count++;
+ if (count > 300) {
+ PMD_DRV_LOG(DEBUG,
+ "elink_8073_8727_external_rom_boot port %x:"
+ "Download failed. fw version = 0x%x",
+ port, fw_ver1);
+ rc = ELINK_STATUS_ERROR;
+ break;
+ }
+
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
+
+ DELAY(1000 * 1);
+ } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
+ ((fw_msgout & 0xff) != 0x03 && (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073)));
+
+ /* Clear ser_boot_ctl bit */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ elink_save_bnx2x_spirom_ver(sc, phy, port);
+
+ PMD_DRV_LOG(DEBUG,
+ "elink_8073_8727_external_rom_boot port %x:"
+ "Download complete. fw version = 0x%x", port, fw_ver1);
+
+ return rc;
+}
+
+/******************************************************************/
+/* BNX2X8073 PHY SECTION */
+/******************************************************************/
+static elink_status_t elink_8073_is_snr_needed(struct bnx2x_softc *sc,
+ struct elink_phy *phy)
+{
+ /* This is only required for 8073A1, version 102 only */
+ uint16_t val;
+
+ /* Read 8073 HW revision */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+ if (val != 1) {
+ /* No need to workaround in 8073 A1 */
+ return ELINK_STATUS_OK;
+ }
+
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER2, &val);
+
+ /* SNR should be applied only for version 0x102 */
+ if (val != 0x102)
+ return ELINK_STATUS_OK;
+
+ return ELINK_STATUS_ERROR;
+}
+
+static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc,
+ struct elink_phy *phy)
+{
+ uint16_t val, cnt, cnt1;
+
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+ if (val > 0) {
+ /* No need to workaround in 8073 A1 */
+ return ELINK_STATUS_OK;
+ }
+ /* XAUI workaround in 8073 A0: */
+
+ /* After loading the boot ROM and restarting Autoneg, poll
+ * Dev1, Reg $C820:
+ */
+
+ for (cnt = 0; cnt < 1000; cnt++) {
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &val);
+ /* If bit [14] = 0 or bit [13] = 0, continue on with
+ * system initialization (XAUI work-around not required, as
+ * these bits indicate 2.5G or 1G link up).
+ */
+ if (!(val & (1 << 14)) || !(val & (1 << 13))) {
+ PMD_DRV_LOG(DEBUG, "XAUI work-around not required");
+ return ELINK_STATUS_OK;
+ } else if (!(val & (1 << 15))) {
+ PMD_DRV_LOG(DEBUG, "bit 15 went off");
+ /* If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+ * MSB (bit15) goes to 1 (indicating that the XAUI
+ * workaround has completed), then continue on with
+ * system initialization.
+ */
+ for (cnt1 = 0; cnt1 < 1000; cnt1++) {
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_XAUI_WA,
+ &val);
+ if (val & (1 << 15)) {
+ PMD_DRV_LOG(DEBUG,
+ "XAUI workaround has completed");
+ return ELINK_STATUS_OK;
+ }
+ DELAY(1000 * 3);
+ }
+ break;
+ }
+ DELAY(1000 * 3);
+ }
+ PMD_DRV_LOG(DEBUG, "Warning: XAUI work-around timeout !!!");
+ return ELINK_STATUS_ERROR;
+}
+
+static void elink_807x_force_10G(struct bnx2x_softc *sc, struct elink_phy *phy)
+{
+ /* Force KR or KX */
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BNX2X_CTRL, 0x0000);
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+}
+
+static void elink_8073_set_pause_cl37(struct elink_params *params,
+ struct elink_phy *phy,
+ struct elink_vars *vars)
+{
+ uint16_t cl37_val;
+ struct bnx2x_softc *sc = params->sc;
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
+
+ cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ }
+ PMD_DRV_LOG(DEBUG, "Ext phy AN advertize cl37 0x%x", cl37_val);
+
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
+ DELAY(1000 * 500);
+}
+
+static void elink_8073_specific_func(struct elink_phy *phy,
+ struct elink_params *params,
+ uint32_t action)
+{
+ struct bnx2x_softc *sc = params->sc;
+ switch (action) {
+ case ELINK_PHY_INIT:
+ /* Enable LASI */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ (1 << 2));
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+ 0x0004);
+ break;
+ }
+}
+
+static uint8_t elink_8073_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val = 0, tmp1;
+ uint8_t gpio_port;
+ PMD_DRV_LOG(DEBUG, "Init 8073");
+
+ if (CHIP_IS_E2(sc))
+ gpio_port = SC_PATH(sc);
+ else
+ gpio_port = params->port;
+ /* Restore normal power mode */
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+
+ elink_8073_specific_func(phy, params, ELINK_PHY_INIT);
+ elink_8073_set_pause_cl37(params, phy, vars);
+
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+
+ PMD_DRV_LOG(DEBUG, "Before rom RX_ALARM(port1): 0x%x", tmp1);
+
+ /* Swap polarity if required - Must be done only in non-1G mode */
+ if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap _P and _N of the KR lines */
+ PMD_DRV_LOG(DEBUG, "Swapping polarity for the 8073");
+ /* 10G Rx/Tx and 1G Tx signal polarity swap */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
+ (val | (3 << 9)));
+ }
+
+ /* Enable CL37 BAM */
+ if (REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].
+ default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8073_BAM, &val);
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8073_BAM, val | 1);
+ PMD_DRV_LOG(DEBUG, "Enable CL37 BAM on KR");
+ }
+ if (params->loopback_mode == ELINK_LOOPBACK_EXT) {
+ elink_807x_force_10G(sc, phy);
+ PMD_DRV_LOG(DEBUG, "Forced speed 10G on 807X");
+ return ELINK_STATUS_OK;
+ } else {
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BNX2X_CTRL, 0x0002);
+ }
+ if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG) {
+ if (phy->req_line_speed == ELINK_SPEED_10000) {
+ val = (1 << 7);
+ } else if (phy->req_line_speed == ELINK_SPEED_2500) {
+ val = (1 << 5);
+ /* Note that 2.5G works only when used with 1G
+ * advertisement
+ */
+ } else
+ val = (1 << 5);
+ } else {
+ val = 0;
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ val |= (1 << 7);
+
+ /* Note that 2.5G works only when used with 1G advertisement */
+ if (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+ val |= (1 << 5);
+ PMD_DRV_LOG(DEBUG, "807x autoneg val = 0x%x", val);
+ }
+
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
+
+ if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
+ (phy->req_line_speed == ELINK_SPEED_AUTO_NEG)) ||
+ (phy->req_line_speed == ELINK_SPEED_2500)) {
+ uint16_t phy_ver;
+ /* Allow 2.5G for A1 and above */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
+ &phy_ver);
+ PMD_DRV_LOG(DEBUG, "Add 2.5G");
+ if (phy_ver > 0)
+ tmp1 |= 1;
+ else
+ tmp1 &= 0xfffe;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Disable 2.5G");
+ tmp1 &= 0xfffe;
+ }
+
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
+ /* Add support for CL37 (passive mode) II */
+
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
+ (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
+ 0x20 : 0x40)));
+
+ /* Add support for CL37 (passive mode) III */
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+
+ /* The SNR will improve about 2db by changing BW and FEE main
+ * tap. Rest commands are executed after link is up
+ * Change FFE main cursor to 5 in EDC register
+ */
+ if (elink_8073_is_snr_needed(sc, phy))
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
+ 0xFB0C);
+
+ /* Enable FEC (Forware Error Correction) Request in the AN */
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
+ tmp1 |= (1 << 15);
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
+
+ elink_ext_phy_set_pause(params, phy, vars);
+
+ /* Restart autoneg */
+ DELAY(1000 * 500);
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ PMD_DRV_LOG(DEBUG, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x",
+ ((val & (1 << 5)) > 0), ((val & (1 << 7)) > 0));
+ return ELINK_STATUS_OK;
+}
+
+static uint8_t elink_8073_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t link_up = 0;
+ uint16_t val1, val2;
+ uint16_t link_status = 0;
+ uint16_t an1000_status = 0;
+
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+
+ PMD_DRV_LOG(DEBUG, "8703 LASI status 0x%x", val1);
+
+ /* Clear the interrupt LASI status register */
+ elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
+ PMD_DRV_LOG(DEBUG, "807x PCS status 0x%x->0x%x", val2, val1);
+ /* Clear MSG-OUT */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+ /* Check the LASI */
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+ PMD_DRV_LOG(DEBUG, "KR 0x9003 0x%x", val2);
+
+ /* Check the link status */
+ elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ PMD_DRV_LOG(DEBUG, "KR PCS status 0x%x", val2);
+
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ link_up = ((val1 & 4) == 4);
+ PMD_DRV_LOG(DEBUG, "PMA_REG_STATUS=0x%x", val1);
+
+ if (link_up && ((phy->req_line_speed != ELINK_SPEED_10000))) {
+ if (elink_8073_xaui_wa(sc, phy) != 0)
+ return 0;
+ }
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+
+ /* Check the link status on 1.1.2 */
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ PMD_DRV_LOG(DEBUG, "KR PMA status 0x%x->0x%x,"
+ "an_link_status=0x%x", val2, val1, an1000_status);
+
+ link_up = (((val1 & 4) == 4) || (an1000_status & (1 << 1)));
+ if (link_up && elink_8073_is_snr_needed(sc, phy)) {
+ /* The SNR will improve about 2dbby changing the BW and FEE main
+ * tap. The 1st write to change FFE main tap is set before
+ * restart AN. Change PLL Bandwidth in EDC register
+ */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
+ 0x26BC);
+
+ /* Change CDR Bandwidth in EDC register */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
+ 0x0333);
+ }
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &link_status);
+
+ /* Bits 0..2 --> speed detected, bits 13..15--> link is down */
+ if ((link_status & (1 << 2)) && (!(link_status & (1 << 15)))) {
+ link_up = 1;
+ vars->line_speed = ELINK_SPEED_10000;
+ PMD_DRV_LOG(DEBUG, "port %x: External link up in 10G",
+ params->port);
+ } else if ((link_status & (1 << 1)) && (!(link_status & (1 << 14)))) {
+ link_up = 1;
+ vars->line_speed = ELINK_SPEED_2500;
+ PMD_DRV_LOG(DEBUG, "port %x: External link up in 2.5G",
+ params->port);
+ } else if ((link_status & (1 << 0)) && (!(link_status & (1 << 13)))) {
+ link_up = 1;
+ vars->line_speed = ELINK_SPEED_1000;
+ PMD_DRV_LOG(DEBUG, "port %x: External link up in 1G",
+ params->port);
+ } else {
+ link_up = 0;
+ PMD_DRV_LOG(DEBUG, "port %x: External link is down",
+ params->port);
+ }
+
+ if (link_up) {
+ /* Swap polarity if required */
+ if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap P and N of the KR lines */
+ elink_cl45_read(sc, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
+ /* Set bit 3 to invert Rx in 1G mode and clear this bit
+ * when it`s in 10G mode.
+ */
+ if (vars->line_speed == ELINK_SPEED_1000) {
+ PMD_DRV_LOG(DEBUG, "Swapping 1G polarity for"
+ "the 8073");
+ val1 |= (1 << 3);
+ } else
+ val1 &= ~(1 << 3);
+
+ elink_cl45_write(sc, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE, val1);
+ }
+ elink_ext_phy_10G_an_resolve(sc, phy, vars);
+ elink_8073_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ }
+
+ if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG2, &val1);
+
+ if (val1 & (1 << 5))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ if (val1 & (1 << 7))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ }
+
+ return link_up;
+}
+
+static void elink_8073_link_reset(__rte_unused struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t gpio_port;
+ if (CHIP_IS_E2(sc))
+ gpio_port = SC_PATH(sc);
+ else
+ gpio_port = params->port;
+ PMD_DRV_LOG(DEBUG, "Setting 8073 port %d into low power mode",
+ gpio_port);
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, gpio_port);
+}
+
+/******************************************************************/
+/* BNX2X8705 PHY SECTION */
+/******************************************************************/
+static uint8_t elink_8705_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ __rte_unused struct elink_vars
+ *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ PMD_DRV_LOG(DEBUG, "init 8705");
+ /* Restore normal power mode */
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ elink_ext_phy_hw_reset(sc, params->port);
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+ elink_wait_reset_complete(sc, phy, params);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
+ elink_cl45_write(sc, phy, MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
+ /* BNX2X8705 doesn't have microcode, hence the 0 */
+ elink_save_spirom_version(sc, params->port, params->shmem_base, 0);
+ return ELINK_STATUS_OK;
+}
+
+static uint8_t elink_8705_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ uint8_t link_up = 0;
+ uint16_t val1, rx_sd;
+ struct bnx2x_softc *sc = params->sc;
+ PMD_DRV_LOG(DEBUG, "read status 8705");
+ elink_cl45_read(sc, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ PMD_DRV_LOG(DEBUG, "8705 LASI status 0x%x", val1);
+
+ elink_cl45_read(sc, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ PMD_DRV_LOG(DEBUG, "8705 LASI status 0x%x", val1);
+
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xc809, &val1);
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xc809, &val1);
+
+ PMD_DRV_LOG(DEBUG, "8705 1.c809 val=0x%x", val1);
+ link_up = ((rx_sd & 0x1) && (val1 & (1 << 9))
+ && ((val1 & (1 << 8)) == 0));
+ if (link_up) {
+ vars->line_speed = ELINK_SPEED_10000;
+ elink_ext_phy_resolve_fc(phy, params, vars);
+ }
+ return link_up;
+}
+
+/******************************************************************/
+/* SFP+ module Section */
+/******************************************************************/
+static void elink_set_disable_pmd_transmit(struct elink_params *params,
+ struct elink_phy *phy,
+ uint8_t pmd_dis)
+{
+ struct bnx2x_softc *sc = params->sc;
+ /* Disable transmitter only for bootcodes which can enable it afterwards
+ * (for D3 link)
+ */
+ if (pmd_dis) {
+ if (params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED) {
+ PMD_DRV_LOG(DEBUG, "Disabling PMD transmitter");
+ } else {
+ PMD_DRV_LOG(DEBUG, "NOT disabling PMD transmitter");
+ return;
+ }
+ } else {
+ PMD_DRV_LOG(DEBUG, "Enabling PMD transmitter");
+ }
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_TX_DISABLE, pmd_dis);
+}
+
+static uint8_t elink_get_gpio_port(struct elink_params *params)
+{
+ uint8_t gpio_port;
+ uint32_t swap_val, swap_override;
+ struct bnx2x_softc *sc = params->sc;
+ if (CHIP_IS_E2(sc)) {
+ gpio_port = SC_PATH(sc);
+ } else {
+ gpio_port = params->port;
+ }
+ swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
+ return gpio_port ^ (swap_val && swap_override);
+}
+
+static void elink_sfp_e1e2_set_transmitter(struct elink_params *params,
+ struct elink_phy *phy, uint8_t tx_en)
+{
+ uint16_t val;
+ uint8_t port = params->port;
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t tx_en_mode;
+
+ /* Disable/Enable transmitter ( TX laser of the SFP+ module.) */
+ tx_en_mode = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].sfp_ctrl)) &
+ PORT_HW_CFG_TX_LASER_MASK;
+ PMD_DRV_LOG(DEBUG, "Setting transmitter tx_en=%x for port %x "
+ "mode = %x", tx_en, port, tx_en_mode);
+ switch (tx_en_mode) {
+ case PORT_HW_CFG_TX_LASER_MDIO:
+
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &val);
+
+ if (tx_en)
+ val &= ~(1 << 15);
+ else
+ val |= (1 << 15);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, val);
+ break;
+ case PORT_HW_CFG_TX_LASER_GPIO0:
+ case PORT_HW_CFG_TX_LASER_GPIO1:
+ case PORT_HW_CFG_TX_LASER_GPIO2:
+ case PORT_HW_CFG_TX_LASER_GPIO3:
+ {
+ uint16_t gpio_pin;
+ uint8_t gpio_port, gpio_mode;
+ if (tx_en)
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+ else
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+ gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+ gpio_port = elink_get_gpio_port(params);
+ elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port);
+ break;
+ }
+ default:
+ PMD_DRV_LOG(DEBUG, "Invalid TX_LASER_MDIO 0x%x", tx_en_mode);
+ break;
+ }
+}
+
+static void elink_sfp_set_transmitter(struct elink_params *params,
+ struct elink_phy *phy, uint8_t tx_en)
+{
+ struct bnx2x_softc *sc = params->sc;
+ PMD_DRV_LOG(DEBUG, "Setting SFP+ transmitter to %d", tx_en);
+ if (CHIP_IS_E3(sc))
+ elink_sfp_e3_set_transmitter(params, phy, tx_en);
+ else
+ elink_sfp_e1e2_set_transmitter(params, phy, tx_en);
+}
+
+static elink_status_t elink_8726_read_sfp_module_eeprom(struct elink_phy *phy,
+ struct elink_params
+ *params,
+ uint8_t dev_addr,
+ uint16_t addr,
+ uint8_t byte_cnt,
+ uint8_t * o_buf,
+ __rte_unused uint8_t
+ is_init)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val = 0;
+ uint16_t i;
+ if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) {
+ PMD_DRV_LOG(DEBUG, "Reading from eeprom is limited to 0xf");
+ return ELINK_STATUS_ERROR;
+ }
+ /* Set the read command byte count */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ (byte_cnt | (dev_addr << 8)));
+
+ /* Set the read command address */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ addr);
+
+ /* Activate read command */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ 0x2c0f);
+
+ /* Wait up to 500us for command complete status */
+ for (i = 0; i < 100; i++) {
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
+ break;
+ DELAY(5);
+ }
+
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+ PMD_DRV_LOG(DEBUG,
+ "Got bad status 0x%x when reading from SFP+ EEPROM",
+ (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+ return ELINK_STATUS_ERROR;
+ }
+
+ /* Read the buffer */
+ for (i = 0; i < byte_cnt; i++) {
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+ o_buf[i] =
+ (uint8_t) (val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
+ }
+
+ for (i = 0; i < 100; i++) {
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
+ return ELINK_STATUS_OK;
+ DELAY(1000 * 1);
+ }
+ return ELINK_STATUS_ERROR;
+}
+
+static void elink_warpcore_power_module(struct elink_params *params,
+ uint8_t power)
+{
+ uint32_t pin_cfg;
+ struct bnx2x_softc *sc = params->sc;
+
+ pin_cfg = (REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].
+ e3_sfp_ctrl)) & PORT_HW_CFG_E3_PWR_DIS_MASK)
+ >> PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ PMD_DRV_LOG(DEBUG, "Setting SFP+ module power to %d using pin cfg %d",
+ power, pin_cfg);
+ /* Low ==> corresponding SFP+ module is powered
+ * high ==> the SFP+ module is powered down
+ */
+ elink_set_cfg_pin(sc, pin_cfg, power ^ 1);
+}
+
+static elink_status_t elink_warpcore_read_sfp_module_eeprom(__rte_unused struct
+ elink_phy *phy,
+ struct elink_params
+ *params,
+ uint8_t dev_addr,
+ uint16_t addr,
+ uint8_t byte_cnt,
+ uint8_t * o_buf,
+ uint8_t is_init)
+{
+ elink_status_t rc = ELINK_STATUS_OK;
+ uint8_t i, j = 0, cnt = 0;
+ uint32_t data_array[4];
+ uint16_t addr32;
+ struct bnx2x_softc *sc = params->sc;
+
+ if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) {
+ PMD_DRV_LOG(DEBUG,
+ "Reading from eeprom is limited to 16 bytes");
+ return ELINK_STATUS_ERROR;
+ }
+
+ /* 4 byte aligned address */
+ addr32 = addr & (~0x3);
+ do {
+ if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) {
+ elink_warpcore_power_module(params, 0);
+ /* Note that 100us are not enough here */
+ DELAY(1000 * 1);
+ elink_warpcore_power_module(params, 1);
+ }
+ rc = elink_bsc_read(params, sc, dev_addr, addr32, 0, byte_cnt,
+ data_array);
+ } while ((rc != ELINK_STATUS_OK) && (++cnt < I2C_WA_RETRY_CNT));
+
+ if (rc == ELINK_STATUS_OK) {
+ for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) {
+ o_buf[j] = *((uint8_t *) data_array + i);
+ j++;
+ }
+ }
+
+ return rc;
+}
+
+static elink_status_t elink_8727_read_sfp_module_eeprom(struct elink_phy *phy,
+ struct elink_params
+ *params,
+ uint8_t dev_addr,
+ uint16_t addr,
+ uint8_t byte_cnt,
+ uint8_t * o_buf,
+ __rte_unused uint8_t
+ is_init)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val, i;
+
+ if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) {
+ PMD_DRV_LOG(DEBUG, "Reading from eeprom is limited to 0xf");
+ return ELINK_STATUS_ERROR;
+ }
+
+ /* Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ ((dev_addr << 8) | 1));
+
+ /* Need to read from 1.8000 to clear it */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+
+ /* Set the read command byte count */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ ((byte_cnt < 2) ? 2 : byte_cnt));
+
+ /* Set the read command address */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, addr);
+ /* Set the destination address */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ 0x8004, MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+
+ /* Activate read command */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 0x8002);
+ /* Wait appropriate time for two-wire command to finish before
+ * polling the status register
+ */
+ DELAY(1000 * 1);
+
+ /* Wait up to 500us for command complete status */
+ for (i = 0; i < 100; i++) {
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
+ break;
+ DELAY(5);
+ }
+
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+ PMD_DRV_LOG(DEBUG,
+ "Got bad status 0x%x when reading from SFP+ EEPROM",
+ (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+ return ELINK_STATUS_TIMEOUT;
+ }
+
+ /* Read the buffer */
+ for (i = 0; i < byte_cnt; i++) {
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+ o_buf[i] =
+ (uint8_t) (val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
+ }
+
+ for (i = 0; i < 100; i++) {
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
+ return ELINK_STATUS_OK;
+ DELAY(1000 * 1);
+ }
+
+ return ELINK_STATUS_ERROR;
+}
+
+static elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy,
+ struct elink_params *params,
+ uint8_t dev_addr,
+ uint16_t addr,
+ uint16_t byte_cnt,
+ uint8_t * o_buf)
+{
+ elink_status_t rc = ELINK_STATUS_OK;
+ uint8_t xfer_size;
+ uint8_t *user_data = o_buf;
+ read_sfp_module_eeprom_func_p read_func;
+
+ if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) {
+ PMD_DRV_LOG(DEBUG, "invalid dev_addr 0x%x", dev_addr);
+ return ELINK_STATUS_ERROR;
+ }
+
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726:
+ read_func = elink_8726_read_sfp_module_eeprom;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722:
+ read_func = elink_8727_read_sfp_module_eeprom;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ read_func = elink_warpcore_read_sfp_module_eeprom;
+ break;
+ default:
+ return ELINK_OP_NOT_SUPPORTED;
+ }
+
+ while (!rc && (byte_cnt > 0)) {
+ xfer_size = (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) ?
+ ELINK_SFP_EEPROM_PAGE_SIZE : byte_cnt;
+ rc = read_func(phy, params, dev_addr, addr, xfer_size,
+ user_data, 0);
+ byte_cnt -= xfer_size;
+ user_data += xfer_size;
+ addr += xfer_size;
+ }
+ return rc;
+}
+
+static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
+ struct elink_params *params,
+ uint16_t * edc_mode)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t sync_offset = 0, phy_idx, media_types;
+ uint8_t gport, val[2], check_limiting_mode = 0;
+ *edc_mode = ELINK_EDC_MODE_LIMITING;
+ phy->media_type = ELINK_ETH_PHY_UNSPECIFIED;
+ /* First check for copper cable */
+ if (elink_read_sfp_module_eeprom(phy,
+ params,
+ ELINK_I2C_DEV_ADDR_A0,
+ ELINK_SFP_EEPROM_CON_TYPE_ADDR,
+ 2, (uint8_t *) val) != 0) {
+ PMD_DRV_LOG(DEBUG, "Failed to read from SFP+ module EEPROM");
+ return ELINK_STATUS_ERROR;
+ }
+
+ switch (val[0]) {
+ case ELINK_SFP_EEPROM_CON_TYPE_VAL_COPPER:
+ {
+ uint8_t copper_module_type;
+ phy->media_type = ELINK_ETH_PHY_DA_TWINAX;
+ /* Check if its active cable (includes SFP+ module)
+ * of passive cable
+ */
+ if (elink_read_sfp_module_eeprom(phy,
+ params,
+ ELINK_I2C_DEV_ADDR_A0,
+ ELINK_SFP_EEPROM_FC_TX_TECH_ADDR,
+ 1,
+ &copper_module_type) !=
+ 0) {
+ PMD_DRV_LOG(DEBUG,
+ "Failed to read copper-cable-type"
+ " from SFP+ EEPROM");
+ return ELINK_STATUS_ERROR;
+ }
+
+ if (copper_module_type &
+ ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
+ PMD_DRV_LOG(DEBUG,
+ "Active Copper cable detected");
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ *edc_mode = ELINK_EDC_MODE_ACTIVE_DAC;
+ else
+ check_limiting_mode = 1;
+ } else if (copper_module_type &
+ ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE)
+ {
+ PMD_DRV_LOG(DEBUG,
+ "Passive Copper cable detected");
+ *edc_mode = ELINK_EDC_MODE_PASSIVE_DAC;
+ } else {
+ PMD_DRV_LOG(DEBUG,
+ "Unknown copper-cable-type 0x%x !!!",
+ copper_module_type);
+ return ELINK_STATUS_ERROR;
+ }
+ break;
+ }
+ case ELINK_SFP_EEPROM_CON_TYPE_VAL_LC:
+ case ELINK_SFP_EEPROM_CON_TYPE_VAL_RJ45:
+ check_limiting_mode = 1;
+ if ((val[1] & (ELINK_SFP_EEPROM_COMP_CODE_SR_MASK |
+ ELINK_SFP_EEPROM_COMP_CODE_LR_MASK |
+ ELINK_SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
+ PMD_DRV_LOG(DEBUG, "1G SFP module detected");
+ gport = params->port;
+ phy->media_type = ELINK_ETH_PHY_SFP_1G_FIBER;
+ if (phy->req_line_speed != ELINK_SPEED_1000) {
+ phy->req_line_speed = ELINK_SPEED_1000;
+ if (!CHIP_IS_E1x(sc)) {
+ gport = SC_PATH(sc) +
+ (params->port << 1);
+ }
+ elink_cb_event_log(sc, ELINK_LOG_ID_NON_10G_MODULE, gport); //"Warning: Link speed was forced to 1000Mbps."
+ // " Current SFP module in port %d is not"
+ // " compliant with 10G Ethernet",
+
+ }
+ } else {
+ int idx, cfg_idx = 0;
+ PMD_DRV_LOG(DEBUG, "10G Optic module detected");
+ for (idx = ELINK_INT_PHY; idx < ELINK_MAX_PHYS; idx++) {
+ if (params->phy[idx].type == phy->type) {
+ cfg_idx = ELINK_LINK_CONFIG_IDX(idx);
+ break;
+ }
+ }
+ phy->media_type = ELINK_ETH_PHY_SFPP_10G_FIBER;
+ phy->req_line_speed = params->req_line_speed[cfg_idx];
+ }
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Unable to determine module type 0x%x !!!",
+ val[0]);
+ return ELINK_STATUS_ERROR;
+ }
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].media_type);
+ media_types = REG_RD(sc, sync_offset);
+ /* Update media type for non-PMF sync */
+ for (phy_idx = ELINK_INT_PHY; phy_idx < ELINK_MAX_PHYS; phy_idx++) {
+ if (&(params->phy[phy_idx]) == phy) {
+ media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+ phy_idx));
+ media_types |=
+ ((phy->
+ media_type & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+ break;
+ }
+ }
+ REG_WR(sc, sync_offset, media_types);
+ if (check_limiting_mode) {
+ uint8_t options[ELINK_SFP_EEPROM_OPTIONS_SIZE];
+ if (elink_read_sfp_module_eeprom(phy,
+ params,
+ ELINK_I2C_DEV_ADDR_A0,
+ ELINK_SFP_EEPROM_OPTIONS_ADDR,
+ ELINK_SFP_EEPROM_OPTIONS_SIZE,
+ options) != 0) {
+ PMD_DRV_LOG(DEBUG,
+ "Failed to read Option field from module EEPROM");
+ return ELINK_STATUS_ERROR;
+ }
+ if ((options[0] & ELINK_SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK))
+ *edc_mode = ELINK_EDC_MODE_LINEAR;
+ else
+ *edc_mode = ELINK_EDC_MODE_LIMITING;
+ }
+ PMD_DRV_LOG(DEBUG, "EDC mode is set to 0x%x", *edc_mode);
+ return ELINK_STATUS_OK;
+}
+
+/* This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
+static elink_status_t elink_verify_sfp_module(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t val, cmd;
+ uint32_t fw_resp, fw_cmd_param;
+ char vendor_name[ELINK_SFP_EEPROM_VENDOR_NAME_SIZE + 1];
+ char vendor_pn[ELINK_SFP_EEPROM_PART_NO_SIZE + 1];
+ phy->flags &= ~ELINK_FLAGS_SFP_NOT_APPROVED;
+ val = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_feature_config[params->port].
+ config));
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) {
+ PMD_DRV_LOG(DEBUG, "NOT enforcing module verification");
+ return ELINK_STATUS_OK;
+ }
+
+ if (params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
+ /* Use specific phy request */
+ cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
+ } else if (params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
+ /* Use first phy request only in case of non-dual media */
+ if (ELINK_DUAL_MEDIA(params)) {
+ PMD_DRV_LOG(DEBUG,
+ "FW does not support OPT MDL verification");
+ return ELINK_STATUS_ERROR;
+ }
+ cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
+ } else {
+ /* No support in OPT MDL detection */
+ PMD_DRV_LOG(DEBUG, "FW does not support OPT MDL verification");
+ return ELINK_STATUS_ERROR;
+ }
+
+ fw_cmd_param = ELINK_FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
+ fw_resp = elink_cb_fw_command(sc, cmd, fw_cmd_param);
+ if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
+ PMD_DRV_LOG(DEBUG, "Approved module");
+ return ELINK_STATUS_OK;
+ }
+
+ /* Format the warning message */
+ if (elink_read_sfp_module_eeprom(phy,
+ params,
+ ELINK_I2C_DEV_ADDR_A0,
+ ELINK_SFP_EEPROM_VENDOR_NAME_ADDR,
+ ELINK_SFP_EEPROM_VENDOR_NAME_SIZE,
+ (uint8_t *) vendor_name))
+ vendor_name[0] = '\0';
+ else
+ vendor_name[ELINK_SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
+ if (elink_read_sfp_module_eeprom(phy,
+ params,
+ ELINK_I2C_DEV_ADDR_A0,
+ ELINK_SFP_EEPROM_PART_NO_ADDR,
+ ELINK_SFP_EEPROM_PART_NO_SIZE,
+ (uint8_t *) vendor_pn))
+ vendor_pn[0] = '\0';
+ else
+ vendor_pn[ELINK_SFP_EEPROM_PART_NO_SIZE] = '\0';
+
+ elink_cb_event_log(sc, ELINK_LOG_ID_UNQUAL_IO_MODULE, params->port, vendor_name, vendor_pn); // "Warning: Unqualified SFP+ module detected,"
+ // " Port %d from %s part number %s",
+
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG)
+ phy->flags |= ELINK_FLAGS_SFP_NOT_APPROVED;
+ return ELINK_STATUS_ERROR;
+}
+
+static elink_status_t elink_wait_for_sfp_module_initialized(struct elink_phy
+ *phy,
+ struct elink_params
+ *params)
+{
+ uint8_t val;
+ elink_status_t rc;
+ uint16_t timeout;
+ /* Initialization time after hot-plug may take up to 300ms for
+ * some phys type ( e.g. JDSU )
+ */
+
+ for (timeout = 0; timeout < 60; timeout++) {
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ rc = elink_warpcore_read_sfp_module_eeprom(phy, params,
+ ELINK_I2C_DEV_ADDR_A0,
+ 1, 1, &val,
+ 1);
+ else
+ rc = elink_read_sfp_module_eeprom(phy, params,
+ ELINK_I2C_DEV_ADDR_A0,
+ 1, 1, &val);
+ if (rc == 0) {
+ PMD_DRV_LOG(DEBUG,
+ "SFP+ module initialization took %d ms",
+ timeout * 5);
+ return ELINK_STATUS_OK;
+ }
+ DELAY(1000 * 5);
+ }
+ rc = elink_read_sfp_module_eeprom(phy, params, ELINK_I2C_DEV_ADDR_A0,
+ 1, 1, &val);
+ return rc;
+}
+
+static void elink_8727_power_module(struct bnx2x_softc *sc,
+ struct elink_phy *phy, uint8_t is_power_up)
+{
+ /* Make sure GPIOs are not using for LED mode */
+ uint16_t val;
+ /* In the GPIO register, bit 4 is use to determine if the GPIOs are
+ * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
+ * output
+ * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
+ * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1
+ * where the 1st bit is the over-current(only input), and 2nd bit is
+ * for power( only output )
+ *
+ * In case of NOC feature is disabled and power is up, set GPIO control
+ * as input to enable listening of over-current indication
+ */
+ if (phy->flags & ELINK_FLAGS_NOC)
+ return;
+ if (is_power_up)
+ val = (1 << 4);
+ else
+ /* Set GPIO control to OUTPUT, and set the power bit
+ * to according to the is_power_up
+ */
+ val = (1 << 1);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, val);
+}
+
+static elink_status_t elink_8726_set_limiting_mode(struct bnx2x_softc *sc,
+ struct elink_phy *phy,
+ uint16_t edc_mode)
+{
+ uint16_t cur_limiting_mode;
+
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &cur_limiting_mode);
+ PMD_DRV_LOG(DEBUG, "Current Limiting mode is 0x%x", cur_limiting_mode);
+
+ if (edc_mode == ELINK_EDC_MODE_LIMITING) {
+ PMD_DRV_LOG(DEBUG, "Setting LIMITING MODE");
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ ELINK_EDC_MODE_LIMITING);
+ } else { /* LRM mode ( default ) */
+
+ PMD_DRV_LOG(DEBUG, "Setting LRM MODE");
+
+ /* Changing to LRM mode takes quite few seconds. So do it only
+ * if current mode is limiting (default is LRM)
+ */
+ if (cur_limiting_mode != ELINK_EDC_MODE_LIMITING)
+ return ELINK_STATUS_OK;
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LRM_MODE, 0);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER2, 0x128);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL0, 0x4008);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LRM_MODE, 0xaaaa);
+ }
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_8727_set_limiting_mode(struct bnx2x_softc *sc,
+ struct elink_phy *phy,
+ uint16_t edc_mode)
+{
+ uint16_t phy_identifier;
+ uint16_t rom_ver2_val;
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &phy_identifier);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier & ~(1 << 9)));
+
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER2, &rom_ver2_val);
+ /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier | (1 << 9)));
+
+ return ELINK_STATUS_OK;
+}
+
+static void elink_8727_specific_func(struct elink_phy *phy,
+ struct elink_params *params,
+ uint32_t action)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val;
+ switch (action) {
+ case ELINK_DISABLE_TX:
+ elink_sfp_set_transmitter(params, phy, 0);
+ break;
+ case ELINK_ENABLE_TX:
+ if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED))
+ elink_sfp_set_transmitter(params, phy, 1);
+ break;
+ case ELINK_PHY_INIT:
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ (1 << 2) | (1 << 5));
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, 0);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006);
+ /* Make MOD_ABS give interrupt on change */
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL, &val);
+ val |= (1 << 12);
+ if (phy->flags & ELINK_FLAGS_NOC)
+ val |= (3 << 5);
+ /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+ * status which reflect SFP+ module over-current
+ */
+ if (!(phy->flags & ELINK_FLAGS_NOC))
+ val &= 0xff8f; /* Reset bits 4-6 */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ val);
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Function 0x%x not supported by 8727",
+ action);
+ return;
+ }
+}
+
+static void elink_set_e1e2_module_fault_led(struct elink_params *params,
+ uint8_t gpio_mode)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ uint32_t fault_led_gpio = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].
+ sfp_ctrl)) &
+ PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+ switch (fault_led_gpio) {
+ case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+ return;
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+ {
+ uint8_t gpio_port = elink_get_gpio_port(params);
+ uint16_t gpio_pin = fault_led_gpio -
+ PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+ PMD_DRV_LOG(DEBUG, "Set fault module-detected led "
+ "pin %x port %x mode %x",
+ gpio_pin, gpio_port, gpio_mode);
+ elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Error: Invalid fault led mode 0x%x",
+ fault_led_gpio);
+ }
+}
+
+static void elink_set_e3_module_fault_led(struct elink_params *params,
+ uint8_t gpio_mode)
+{
+ uint32_t pin_cfg;
+ uint8_t port = params->port;
+ struct bnx2x_softc *sc = params->sc;
+ pin_cfg = (REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
+ PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
+ PMD_DRV_LOG(DEBUG, "Setting Fault LED to %d using pin cfg %d",
+ gpio_mode, pin_cfg);
+ elink_set_cfg_pin(sc, pin_cfg, gpio_mode);
+}
+
+static void elink_set_sfp_module_fault_led(struct elink_params *params,
+ uint8_t gpio_mode)
+{
+ struct bnx2x_softc *sc = params->sc;
+ PMD_DRV_LOG(DEBUG, "Setting SFP+ module fault LED to %d", gpio_mode);
+ if (CHIP_IS_E3(sc)) {
+ /* Low ==> if SFP+ module is supported otherwise
+ * High ==> if SFP+ module is not on the approved vendor list
+ */
+ elink_set_e3_module_fault_led(params, gpio_mode);
+ } else
+ elink_set_e1e2_module_fault_led(params, gpio_mode);
+}
+
+static void elink_warpcore_hw_reset(__rte_unused struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ elink_warpcore_power_module(params, 0);
+ /* Put Warpcore in low power mode */
+ REG_WR(sc, MISC_REG_WC0_RESET, 0x0c0e);
+
+ /* Put LCPLL in low power mode */
+ REG_WR(sc, MISC_REG_LCPLL_E40_PWRDWN, 1);
+ REG_WR(sc, MISC_REG_LCPLL_E40_RESETB_ANA, 0);
+ REG_WR(sc, MISC_REG_LCPLL_E40_RESETB_DIG, 0);
+}
+
+static void elink_power_sfp_module(struct elink_params *params,
+ struct elink_phy *phy, uint8_t power)
+{
+ PMD_DRV_LOG(DEBUG, "Setting SFP+ power to %x", power);
+
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722:
+ elink_8727_power_module(params->sc, phy, power);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ elink_warpcore_power_module(params, power);
+ break;
+ default:
+ break;
+ }
+}
+
+static void elink_warpcore_set_limiting_mode(struct elink_params *params,
+ struct elink_phy *phy,
+ uint16_t edc_mode)
+{
+ uint16_t val = 0;
+ uint16_t mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+ struct bnx2x_softc *sc = params->sc;
+
+ uint8_t lane = elink_get_warpcore_lane(params);
+ /* This is a global register which controls all lanes */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+ val &= ~(0xf << (lane << 2));
+
+ switch (edc_mode) {
+ case ELINK_EDC_MODE_LINEAR:
+ case ELINK_EDC_MODE_LIMITING:
+ mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+ break;
+ case ELINK_EDC_MODE_PASSIVE_DAC:
+ case ELINK_EDC_MODE_ACTIVE_DAC:
+ mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
+ break;
+ default:
+ break;
+ }
+
+ val |= (mode << (lane << 2));
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val);
+ /* A must read */
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+
+ /* Restart microcode to re-read the new mode */
+ elink_warpcore_reset_lane(sc, phy, 1);
+ elink_warpcore_reset_lane(sc, phy, 0);
+
+}
+
+static void elink_set_limiting_mode(struct elink_params *params,
+ struct elink_phy *phy, uint16_t edc_mode)
+{
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726:
+ elink_8726_set_limiting_mode(params->sc, phy, edc_mode);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722:
+ elink_8727_set_limiting_mode(params->sc, phy, edc_mode);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ elink_warpcore_set_limiting_mode(params, phy, edc_mode);
+ break;
+ }
+}
+
+static elink_status_t elink_sfp_module_detection(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t edc_mode;
+ elink_status_t rc = ELINK_STATUS_OK;
+
+ uint32_t val = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_feature_config[params->
+ port].
+ config));
+ /* Enabled transmitter by default */
+ elink_sfp_set_transmitter(params, phy, 1);
+ PMD_DRV_LOG(DEBUG, "SFP+ module plugged in/out detected on port %d",
+ params->port);
+ /* Power up module */
+ elink_power_sfp_module(params, phy, 1);
+ if (elink_get_edc_mode(phy, params, &edc_mode) != 0) {
+ PMD_DRV_LOG(DEBUG, "Failed to get valid module type");
+ return ELINK_STATUS_ERROR;
+ } else if (elink_verify_sfp_module(phy, params) != 0) {
+ /* Check SFP+ module compatibility */
+ PMD_DRV_LOG(DEBUG, "Module verification failed!!");
+ rc = ELINK_STATUS_ERROR;
+ /* Turn on fault module-detected led */
+ elink_set_sfp_module_fault_led(params,
+ MISC_REGISTERS_GPIO_HIGH);
+
+ /* Check if need to power down the SFP+ module */
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
+ PMD_DRV_LOG(DEBUG, "Shutdown SFP+ module!!");
+ elink_power_sfp_module(params, phy, 0);
+ return rc;
+ }
+ } else {
+ /* Turn off fault module-detected led */
+ elink_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
+ }
+
+ /* Check and set limiting mode / LRM mode on 8726. On 8727 it
+ * is done automatically
+ */
+ elink_set_limiting_mode(params, phy, edc_mode);
+
+ /* Disable transmit for this module if the module is not approved, and
+ * laser needs to be disabled.
+ */
+ if ((rc != 0) &&
+ ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER))
+ elink_sfp_set_transmitter(params, phy, 0);
+
+ return rc;
+}
+
+void elink_handle_module_detect_int(struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ struct elink_phy *phy;
+ uint32_t gpio_val;
+ uint8_t gpio_num, gpio_port;
+ if (CHIP_IS_E3(sc)) {
+ phy = &params->phy[ELINK_INT_PHY];
+ /* Always enable TX laser,will be disabled in case of fault */
+ elink_sfp_set_transmitter(params, phy, 1);
+ } else {
+ phy = &params->phy[ELINK_EXT_PHY1];
+ }
+ if (elink_get_mod_abs_int_cfg(sc, params->shmem_base,
+ params->port, &gpio_num, &gpio_port) ==
+ ELINK_STATUS_ERROR) {
+ PMD_DRV_LOG(DEBUG, "Failed to get MOD_ABS interrupt config");
+ return;
+ }
+
+ /* Set valid module led off */
+ elink_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
+
+ /* Get current gpio val reflecting module plugged in / out */
+ gpio_val = elink_cb_gpio_read(sc, gpio_num, gpio_port);
+
+ /* Call the handling function in case module is detected */
+ if (gpio_val == 0) {
+ elink_set_mdio_emac_per_phy(sc, params);
+ elink_set_aer_mmd(params, phy);
+
+ elink_power_sfp_module(params, phy, 1);
+ elink_cb_gpio_int_write(sc, gpio_num,
+ MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
+ gpio_port);
+ if (elink_wait_for_sfp_module_initialized(phy, params) == 0) {
+ elink_sfp_module_detection(phy, params);
+ if (CHIP_IS_E3(sc)) {
+ uint16_t rx_tx_in_reset;
+ /* In case WC is out of reset, reconfigure the
+ * link speed while taking into account 1G
+ * module limitation.
+ */
+ elink_cl45_read(sc, phy,
+ MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6,
+ &rx_tx_in_reset);
+ if ((!rx_tx_in_reset) &&
+ (params->link_flags &
+ ELINK_PHY_INITIALIZED)) {
+ elink_warpcore_reset_lane(sc, phy, 1);
+ elink_warpcore_config_sfi(phy, params);
+ elink_warpcore_reset_lane(sc, phy, 0);
+ }
+ }
+ } else {
+ PMD_DRV_LOG(DEBUG, "SFP+ module is not initialized");
+ }
+ } else {
+ elink_cb_gpio_int_write(sc, gpio_num,
+ MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
+ gpio_port);
+ /* Module was plugged out.
+ * Disable transmit for this module
+ */
+ phy->media_type = ELINK_ETH_PHY_NOT_PRESENT;
+ }
+}
+
+/******************************************************************/
+/* Used by 8706 and 8727 */
+/******************************************************************/
+static void elink_sfp_mask_fault(struct bnx2x_softc *sc,
+ struct elink_phy *phy,
+ uint16_t alarm_status_offset,
+ uint16_t alarm_ctrl_offset)
+{
+ uint16_t alarm_status, val;
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, alarm_status_offset, &alarm_status);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, alarm_status_offset, &alarm_status);
+ /* Mask or enable the fault event. */
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val);
+ if (alarm_status & (1 << 0))
+ val &= ~(1 << 0);
+ else
+ val |= (1 << 0);
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val);
+}
+
+/******************************************************************/
+/* common BNX2X8706/BNX2X8726 PHY SECTION */
+/******************************************************************/
+static uint8_t elink_8706_8726_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ uint8_t link_up = 0;
+ uint16_t val1, val2, rx_sd, pcs_status;
+ struct bnx2x_softc *sc = params->sc;
+ PMD_DRV_LOG(DEBUG, "XGXS 8706/8726");
+ /* Clear RX Alarm */
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+ elink_sfp_mask_fault(sc, phy, MDIO_PMA_LASI_TXSTAT,
+ MDIO_PMA_LASI_TXCTRL);
+
+ /* Clear LASI indication */
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+ PMD_DRV_LOG(DEBUG, "8706/8726 LASI status 0x%x--> 0x%x", val1, val2);
+
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+ elink_cl45_read(sc, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+
+ PMD_DRV_LOG(DEBUG, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
+ " link_status 0x%x", rx_sd, pcs_status, val2);
+ /* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+ * are set, or if the autoneg bit 1 is set
+ */
+ link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1 << 1)));
+ if (link_up) {
+ if (val2 & (1 << 1))
+ vars->line_speed = ELINK_SPEED_1000;
+ else
+ vars->line_speed = ELINK_SPEED_10000;
+ elink_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ }
+
+ /* Capture 10G link fault. Read twice to clear stale value. */
+ if (vars->line_speed == ELINK_SPEED_10000) {
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+ if (val1 & (1 << 0))
+ vars->fault_detected = 1;
+ }
+
+ return link_up;
+}
+
+/******************************************************************/
+/* BNX2X8706 PHY SECTION */
+/******************************************************************/
+static uint8_t elink_8706_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ __rte_unused struct elink_vars *vars)
+{
+ uint32_t tx_en_mode;
+ uint16_t cnt, val, tmp1;
+ struct bnx2x_softc *sc = params->sc;
+
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ elink_ext_phy_hw_reset(sc, params->port);
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+ elink_wait_reset_complete(sc, phy, params);
+
+ /* Wait until fw is loaded */
+ for (cnt = 0; cnt < 100; cnt++) {
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
+ if (val)
+ break;
+ DELAY(1000 * 10);
+ }
+ PMD_DRV_LOG(DEBUG, "XGXS 8706 is initialized after %d ms", cnt);
+ if ((params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ uint8_t i;
+ uint16_t reg;
+ for (i = 0; i < 4; i++) {
+ reg = MDIO_XS_8706_REG_BANK_RX0 +
+ i * (MDIO_XS_8706_REG_BANK_RX1 -
+ MDIO_XS_8706_REG_BANK_RX0);
+ elink_cl45_read(sc, phy, MDIO_XS_DEVAD, reg, &val);
+ /* Clear first 3 bits of the control */
+ val &= ~0x7;
+ /* Set control bits according to configuration */
+ val |= (phy->rx_preemphasis[i] & 0x7);
+ PMD_DRV_LOG(DEBUG, "Setting RX Equalizer to BNX2X8706"
+ " reg 0x%x <-- val 0x%x", reg, val);
+ elink_cl45_write(sc, phy, MDIO_XS_DEVAD, reg, val);
+ }
+ }
+ /* Force speed */
+ if (phy->req_line_speed == ELINK_SPEED_10000) {
+ PMD_DRV_LOG(DEBUG, "XGXS 8706 force 10Gbps");
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, 0);
+ /* Arm LASI for link and Tx fault. */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3);
+ } else {
+ /* Force 1Gbps using autoneg with 1G advertisement */
+
+ /* Allow CL37 through CL73 */
+ PMD_DRV_LOG(DEBUG, "XGXS 8706 AutoNeg");
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+
+ /* Enable Full-Duplex advertisement on CL37 */
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
+ /* Enable CL37 AN */
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+ /* 1G support */
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1 << 5));
+
+ /* Enable clause 73 AN */
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 0x0400);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
+ }
+ elink_save_bnx2x_spirom_ver(sc, phy, params->port);
+
+ /* If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+
+ tx_en_mode = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].
+ sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+ PMD_DRV_LOG(DEBUG, "Enabling TXONOFF_PWRDN_DIS");
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL,
+ &tmp1);
+ tmp1 |= 0x1;
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL,
+ tmp1);
+ }
+
+ return ELINK_STATUS_OK;
+}
+
+static uint8_t elink_8706_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ return elink_8706_8726_read_status(phy, params, vars);
+}
+
+/******************************************************************/
+/* BNX2X8726 PHY SECTION */
+/******************************************************************/
+static void elink_8726_config_loopback(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ PMD_DRV_LOG(DEBUG, "PMA/PMD ext_phy_loopback: 8726");
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
+}
+
+static void elink_8726_external_rom_boot(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ /* Need to wait 100ms after reset */
+ DELAY(1000 * 100);
+
+ /* Micro controller re-boot */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
+
+ /* Set soft reset */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+ /* Wait for 150ms for microcode load */
+ DELAY(1000 * 150);
+
+ /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+
+ DELAY(1000 * 200);
+ elink_save_bnx2x_spirom_ver(sc, phy, params->port);
+}
+
+static uint8_t elink_8726_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val1;
+ uint8_t link_up = elink_8706_8726_read_status(phy, params, vars);
+ if (link_up) {
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val1);
+ if (val1 & (1 << 15)) {
+ PMD_DRV_LOG(DEBUG, "Tx is disabled");
+ link_up = 0;
+ vars->line_speed = 0;
+ }
+ }
+ return link_up;
+}
+
+static uint8_t elink_8726_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ PMD_DRV_LOG(DEBUG, "Initializing BNX2X8726");
+
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15);
+ elink_wait_reset_complete(sc, phy, params);
+
+ elink_8726_external_rom_boot(phy, params);
+
+ /* Need to call module detected on initialization since the module
+ * detection triggered by actual module insertion might occur before
+ * driver is loaded, and when driver is loaded, it reset all
+ * registers, including the transmitter
+ */
+ elink_sfp_module_detection(phy, params);
+
+ if (phy->req_line_speed == ELINK_SPEED_1000) {
+ PMD_DRV_LOG(DEBUG, "Setting 1G force");
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 0x400);
+ } else if ((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ PMD_DRV_LOG(DEBUG, "Setting 1G clause37");
+ /* Set Flow control */
+ elink_ext_phy_set_pause(params, phy, vars);
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ /* Enable RX-ALARM control to receive interrupt for 1G speed
+ * change
+ */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 0x400);
+
+ } else { /* Default 10G. Set only LASI control */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1);
+ }
+
+ /* Set TX PreEmphasis if needed */
+ if ((params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ PMD_DRV_LOG(DEBUG,
+ "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x",
+ phy->tx_preemphasis[0], phy->tx_preemphasis[1]);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TX_CTRL1,
+ phy->tx_preemphasis[0]);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TX_CTRL2,
+ phy->tx_preemphasis[1]);
+ }
+
+ return ELINK_STATUS_OK;
+
+}
+
+static void elink_8726_link_reset(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ PMD_DRV_LOG(DEBUG, "elink_8726_link_reset port %d", params->port);
+ /* Set serial boot control for external load */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+}
+
+/******************************************************************/
+/* BNX2X8727 PHY SECTION */
+/******************************************************************/
+
+static void elink_8727_set_link_led(struct elink_phy *phy,
+ struct elink_params *params, uint8_t mode)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t led_mode_bitmask = 0;
+ uint16_t gpio_pins_bitmask = 0;
+ uint16_t val;
+ /* Only NOC flavor requires to set the LED specifically */
+ if (!(phy->flags & ELINK_FLAGS_NOC))
+ return;
+ switch (mode) {
+ case ELINK_LED_MODE_FRONT_PANEL_OFF:
+ case ELINK_LED_MODE_OFF:
+ led_mode_bitmask = 0;
+ gpio_pins_bitmask = 0x03;
+ break;
+ case ELINK_LED_MODE_ON:
+ led_mode_bitmask = 0;
+ gpio_pins_bitmask = 0x02;
+ break;
+ case ELINK_LED_MODE_OPER:
+ led_mode_bitmask = 0x60;
+ gpio_pins_bitmask = 0x11;
+ break;
+ }
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, &val);
+ val &= 0xff8f;
+ val |= led_mode_bitmask;
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, &val);
+ val &= 0xffe0;
+ val |= gpio_pins_bitmask;
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, val);
+}
+
+static void elink_8727_hw_reset(__rte_unused struct elink_phy *phy,
+ struct elink_params *params)
+{
+ uint32_t swap_val, swap_override;
+ uint8_t port;
+ /* The PHY reset is controlled by GPIO 1. Fake the port number
+ * to cancel the swap done in set_gpio()
+ */
+ struct bnx2x_softc *sc = params->sc;
+ swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
+ port = (swap_val && swap_override) ^ 1;
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+}
+
+static void elink_8727_config_speed(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t tmp1, val;
+ /* Set option 1G speed */
+ if ((phy->req_line_speed == ELINK_SPEED_1000) ||
+ (phy->media_type == ELINK_ETH_PHY_SFP_1G_FIBER)) {
+ PMD_DRV_LOG(DEBUG, "Setting 1G force");
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+ PMD_DRV_LOG(DEBUG, "1.7 = 0x%x", tmp1);
+ /* Power down the XAUI until link is up in case of dual-media
+ * and 1G
+ */
+ if (ELINK_DUAL_MEDIA(params)) {
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val);
+ val |= (3 << 10);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val);
+ }
+ } else if ((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+ PMD_DRV_LOG(DEBUG, "Setting 1G clause37");
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+ } else {
+ /* Since the 8727 has only single reset pin, need to set the 10G
+ * registers although it is default
+ */
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+ 0x0020);
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+ 0x0008);
+ }
+}
+
+static uint8_t elink_8727_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ __rte_unused struct elink_vars
+ *vars)
+{
+ uint32_t tx_en_mode;
+ uint16_t tmp1, mod_abs, tmp2;
+ struct bnx2x_softc *sc = params->sc;
+ /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
+
+ elink_wait_reset_complete(sc, phy, params);
+
+ PMD_DRV_LOG(DEBUG, "Initializing BNX2X8727");
+
+ elink_8727_specific_func(phy, params, ELINK_PHY_INIT);
+ /* Initially configure MOD_ABS to interrupt when module is
+ * presence( bit 8)
+ */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ /* Set EDC off by setting OPTXLOS signal input to low (bit 9).
+ * When the EDC is off it locks onto a reference clock and avoids
+ * becoming 'lost'
+ */
+ mod_abs &= ~(1 << 8);
+ if (!(phy->flags & ELINK_FLAGS_NOC))
+ mod_abs &= ~(1 << 9);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+ /* Enable/Disable PHY transmitter output */
+ elink_set_disable_pmd_transmit(params, phy, 0);
+
+ elink_8727_power_module(sc, phy, 1);
+
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+
+ elink_8727_config_speed(phy, params);
+
+ /* Set TX PreEmphasis if needed */
+ if ((params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ PMD_DRV_LOG(DEBUG, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x",
+ phy->tx_preemphasis[0], phy->tx_preemphasis[1]);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
+ phy->tx_preemphasis[0]);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
+ phy->tx_preemphasis[1]);
+ }
+
+ /* If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+ tx_en_mode = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].
+ sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+ PMD_DRV_LOG(DEBUG, "Enabling TXONOFF_PWRDN_DIS");
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG,
+ &tmp2);
+ tmp2 |= 0x1000;
+ tmp2 &= 0xFFEF;
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG,
+ tmp2);
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &tmp2);
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, (tmp2 & 0x7fff));
+ }
+
+ return ELINK_STATUS_OK;
+}
+
+static void elink_8727_handle_mod_abs(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t mod_abs, rx_alarm_status;
+ uint32_t val = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_feature_config[params->
+ port].config));
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+ &mod_abs);
+ if (mod_abs & (1 << 8)) {
+
+ /* Module is absent */
+ PMD_DRV_LOG(DEBUG, "MOD_ABS indication show module is absent");
+ phy->media_type = ELINK_ETH_PHY_NOT_PRESENT;
+ /* 1. Set mod_abs to detect next module
+ * presence event
+ * 2. Set EDC off by setting OPTXLOS signal input to low
+ * (bit 9).
+ * When the EDC is off it locks onto a reference clock and
+ * avoids becoming 'lost'.
+ */
+ mod_abs &= ~(1 << 8);
+ if (!(phy->flags & ELINK_FLAGS_NOC))
+ mod_abs &= ~(1 << 9);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+ /* Clear RX alarm since it stays up as long as
+ * the mod_abs wasn't changed
+ */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
+ } else {
+ /* Module is present */
+ PMD_DRV_LOG(DEBUG, "MOD_ABS indication show module is present");
+ /* First disable transmitter, and if the module is ok, the
+ * module_detection will enable it
+ * 1. Set mod_abs to detect next module absent event ( bit 8)
+ * 2. Restore the default polarity of the OPRXLOS signal and
+ * this signal will then correctly indicate the presence or
+ * absence of the Rx signal. (bit 9)
+ */
+ mod_abs |= (1 << 8);
+ if (!(phy->flags & ELINK_FLAGS_NOC))
+ mod_abs |= (1 << 9);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+ /* Clear RX alarm since it stays up as long as the mod_abs
+ * wasn't changed. This is need to be done before calling the
+ * module detection, otherwise it will clear* the link update
+ * alarm
+ */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
+ elink_sfp_set_transmitter(params, phy, 0);
+
+ if (elink_wait_for_sfp_module_initialized(phy, params) == 0) {
+ elink_sfp_module_detection(phy, params);
+ } else {
+ PMD_DRV_LOG(DEBUG, "SFP+ module is not initialized");
+ }
+
+ /* Reconfigure link speed based on module type limitations */
+ elink_8727_config_speed(phy, params);
+ }
+
+ PMD_DRV_LOG(DEBUG, "8727 RX_ALARM_STATUS 0x%x", rx_alarm_status);
+ /* No need to check link status in case of module plugged in/out */
+}
+
+static uint8_t elink_8727_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t link_up = 0, oc_port = params->port;
+ uint16_t link_status = 0;
+ uint16_t rx_alarm_status, lasi_ctrl, val1;
+
+ /* If PHY is not initialized, do not check link status */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, &lasi_ctrl);
+ if (!lasi_ctrl)
+ return 0;
+
+ /* Check the LASI on Rx */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+ vars->line_speed = 0;
+ PMD_DRV_LOG(DEBUG, "8727 RX_ALARM_STATUS 0x%x", rx_alarm_status);
+
+ elink_sfp_mask_fault(sc, phy, MDIO_PMA_LASI_TXSTAT,
+ MDIO_PMA_LASI_TXCTRL);
+
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+
+ PMD_DRV_LOG(DEBUG, "8727 LASI status 0x%x", val1);
+
+ /* Clear MSG-OUT */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+ /* If a module is present and there is need to check
+ * for over current
+ */
+ if (!(phy->flags & ELINK_FLAGS_NOC) && !(rx_alarm_status & (1 << 5))) {
+ /* Check over-current using 8727 GPIO0 input */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
+ &val1);
+
+ if ((val1 & (1 << 8)) == 0) {
+ if (!CHIP_IS_E1x(sc))
+ oc_port = SC_PATH(sc) + (params->port << 1);
+ PMD_DRV_LOG(DEBUG,
+ "8727 Power fault has been detected on port %d",
+ oc_port);
+ elink_cb_event_log(sc, ELINK_LOG_ID_OVER_CURRENT, oc_port); //"Error: Power fault on Port %d has "
+ // "been detected and the power to "
+ // "that SFP+ module has been removed "
+ // "to prevent failure of the card. "
+ // "Please remove the SFP+ module and "
+ // "restart the system to clear this "
+ // "error.",
+ /* Disable all RX_ALARMs except for mod_abs */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXCTRL, (1 << 5));
+
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+ /* Wait for module_absent_event */
+ val1 |= (1 << 8);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, val1);
+ /* Clear RX alarm */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+ elink_8727_power_module(params->sc, phy, 0);
+ return 0;
+ }
+ }
+
+ /* Over current check */
+ /* When module absent bit is set, check module */
+ if (rx_alarm_status & (1 << 5)) {
+ elink_8727_handle_mod_abs(phy, params);
+ /* Enable all mod_abs and link detection bits */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ ((1 << 5) | (1 << 2)));
+ }
+
+ if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED)) {
+ PMD_DRV_LOG(DEBUG, "Enabling 8727 TX laser");
+ elink_sfp_set_transmitter(params, phy, 1);
+ } else {
+ PMD_DRV_LOG(DEBUG, "Tx is disabled");
+ return 0;
+ }
+
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
+
+ /* Bits 0..2 --> speed detected,
+ * Bits 13..15--> link is down
+ */
+ if ((link_status & (1 << 2)) && (!(link_status & (1 << 15)))) {
+ link_up = 1;
+ vars->line_speed = ELINK_SPEED_10000;
+ PMD_DRV_LOG(DEBUG, "port %x: External link up in 10G",
+ params->port);
+ } else if ((link_status & (1 << 0)) && (!(link_status & (1 << 13)))) {
+ link_up = 1;
+ vars->line_speed = ELINK_SPEED_1000;
+ PMD_DRV_LOG(DEBUG, "port %x: External link up in 1G",
+ params->port);
+ } else {
+ link_up = 0;
+ PMD_DRV_LOG(DEBUG, "port %x: External link is down",
+ params->port);
+ }
+
+ /* Capture 10G link fault. */
+ if (vars->line_speed == ELINK_SPEED_10000) {
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+
+ if (val1 & (1 << 0)) {
+ vars->fault_detected = 1;
+ }
+ }
+
+ if (link_up) {
+ elink_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ PMD_DRV_LOG(DEBUG, "duplex = 0x%x", vars->duplex);
+ }
+
+ if ((ELINK_DUAL_MEDIA(params)) &&
+ (phy->req_line_speed == ELINK_SPEED_1000)) {
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val1);
+ /* In case of dual-media board and 1G, power up the XAUI side,
+ * otherwise power it down. For 10G it is done automatically
+ */
+ if (link_up)
+ val1 &= ~(3 << 10);
+ else
+ val1 |= (3 << 10);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val1);
+ }
+ return link_up;
+}
+
+static void elink_8727_link_reset(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ /* Enable/Disable PHY transmitter output */
+ elink_set_disable_pmd_transmit(params, phy, 1);
+
+ /* Disable Transmitter */
+ elink_sfp_set_transmitter(params, phy, 0);
+ /* Clear LASI */
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0);
+
+}
+
+/******************************************************************/
+/* BNX2X8481/BNX2X84823/BNX2X84833 PHY SECTION */
+/******************************************************************/
+static void elink_save_848xx_spirom_version(struct elink_phy *phy,
+ struct bnx2x_softc *sc, uint8_t port)
+{
+ uint16_t val, fw_ver2, cnt, i;
+ static struct elink_reg_set reg_set[] = {
+ {MDIO_PMA_DEVAD, 0xA819, 0x0014},
+ {MDIO_PMA_DEVAD, 0xA81A, 0xc200},
+ {MDIO_PMA_DEVAD, 0xA81B, 0x0000},
+ {MDIO_PMA_DEVAD, 0xA81C, 0x0300},
+ {MDIO_PMA_DEVAD, 0xA817, 0x0009}
+ };
+ uint16_t fw_ver1;
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) {
+ elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
+ elink_save_spirom_version(sc, port, fw_ver1 & 0xfff,
+ phy->ver_addr);
+ } else {
+ /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
+ /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ elink_cl45_write(sc, phy, reg_set[i].devad,
+ reg_set[i].reg, reg_set[i].val);
+
+ for (cnt = 0; cnt < 100; cnt++) {
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ DELAY(5);
+ }
+ if (cnt == 100) {
+ PMD_DRV_LOG(DEBUG, "Unable to read 848xx "
+ "phy fw version(1)");
+ elink_save_spirom_version(sc, port, 0, phy->ver_addr);
+ return;
+ }
+
+ /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ for (cnt = 0; cnt < 100; cnt++) {
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ DELAY(5);
+ }
+ if (cnt == 100) {
+ PMD_DRV_LOG(DEBUG, "Unable to read 848xx phy fw "
+ "version(2)");
+ elink_save_spirom_version(sc, port, 0, phy->ver_addr);
+ return;
+ }
+
+ /* lower 16 bits of the register SPI_FW_STATUS */
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ /* upper 16 bits of register SPI_FW_STATUS */
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+
+ elink_save_spirom_version(sc, port, (fw_ver2 << 16) | fw_ver1,
+ phy->ver_addr);
+ }
+
+}
+
+static void elink_848xx_set_led(struct bnx2x_softc *sc, struct elink_phy *phy)
+{
+ uint16_t val, offset, i;
+ static struct elink_reg_set reg_set[] = {
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+ MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
+ {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
+ };
+ /* PHYC_CTL_LED_CTL */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ val &= 0xFE00;
+ val |= 0x0092;
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834))
+ offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
+ else
+ offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
+
+ /* stretch_en for LED3 */
+ elink_cl45_read_or_write(sc, phy,
+ MDIO_PMA_DEVAD, offset,
+ MDIO_PMA_REG_84823_LED3_STRETCH_EN);
+}
+
+static void elink_848xx_specific_func(struct elink_phy *phy,
+ struct elink_params *params,
+ uint32_t action)
+{
+ struct bnx2x_softc *sc = params->sc;
+ switch (action) {
+ case ELINK_PHY_INIT:
+ if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) &&
+ (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) {
+ /* Save spirom version */
+ elink_save_848xx_spirom_version(phy, sc, params->port);
+ }
+ /* This phy uses the NIG latch mechanism since link indication
+ * arrives through its LED4 and not via its LASI signal, so we
+ * get steady signal instead of clear on read
+ */
+ elink_bits_en(sc, NIG_REG_LATCH_BC_0 + params->port * 4,
+ 1 << ELINK_NIG_LATCH_BC_ENABLE_MI_INT);
+
+ elink_848xx_set_led(sc, phy);
+ break;
+ }
+}
+
+static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t autoneg_val, an_1000_val, an_10_100_val;
+
+ elink_848xx_specific_func(phy, params, ELINK_PHY_INIT);
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
+
+ /* set 1000 speed advertisement */
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+ &an_1000_val);
+
+ elink_ext_phy_set_pause(params, phy, vars);
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_AN_ADV, &an_10_100_val);
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+ &autoneg_val);
+ /* Disable forced speed */
+ autoneg_val &=
+ ~((1 << 6) | (1 << 8) | (1 << 9) | (1 << 12) | (1 << 13));
+ an_10_100_val &= ~((1 << 5) | (1 << 6) | (1 << 7) | (1 << 8));
+
+ if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == ELINK_SPEED_1000)) {
+ an_1000_val |= (1 << 8);
+ autoneg_val |= (1 << 9 | 1 << 12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_1000_val |= (1 << 9);
+ PMD_DRV_LOG(DEBUG, "Advertising 1G");
+ } else
+ an_1000_val &= ~((1 << 8) | (1 << 9));
+
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+ an_1000_val);
+
+ /* Set 10/100 speed advertisement */
+ if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ /* Enable autoneg and restart autoneg for legacy speeds
+ */
+ autoneg_val |= (1 << 9 | 1 << 12);
+ an_10_100_val |= (1 << 8);
+ PMD_DRV_LOG(DEBUG, "Advertising 100M-FD");
+ }
+
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ /* Enable autoneg and restart autoneg for legacy speeds
+ */
+ autoneg_val |= (1 << 9 | 1 << 12);
+ an_10_100_val |= (1 << 7);
+ PMD_DRV_LOG(DEBUG, "Advertising 100M-HD");
+ }
+
+ if ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+ (phy->supported & ELINK_SUPPORTED_10baseT_Full)) {
+ an_10_100_val |= (1 << 6);
+ autoneg_val |= (1 << 9 | 1 << 12);
+ PMD_DRV_LOG(DEBUG, "Advertising 10M-FD");
+ }
+
+ if ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
+ (phy->supported & ELINK_SUPPORTED_10baseT_Half)) {
+ an_10_100_val |= (1 << 5);
+ autoneg_val |= (1 << 9 | 1 << 12);
+ PMD_DRV_LOG(DEBUG, "Advertising 10M-HD");
+ }
+ }
+
+ /* Only 10/100 are allowed to work in FORCE mode */
+ if ((phy->req_line_speed == ELINK_SPEED_100) &&
+ (phy->supported &
+ (ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full))) {
+ autoneg_val |= (1 << 13);
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+ (1 << 15 | 1 << 9 | 7 << 0));
+ /* The PHY needs this set even for forced link. */
+ an_10_100_val |= (1 << 8) | (1 << 7);
+ PMD_DRV_LOG(DEBUG, "Setting 100M force");
+ }
+ if ((phy->req_line_speed == ELINK_SPEED_10) &&
+ (phy->supported &
+ (ELINK_SUPPORTED_10baseT_Half | ELINK_SUPPORTED_10baseT_Full))) {
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+ (1 << 15 | 1 << 9 | 7 << 0));
+ PMD_DRV_LOG(DEBUG, "Setting 10M force");
+ }
+
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
+ an_10_100_val);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ autoneg_val |= (1 << 8);
+
+ /* Always write this if this is not 84833/4.
+ * For 84833/4, write it only when it's a forced speed.
+ */
+ if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) &&
+ (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) ||
+ ((autoneg_val & (1 << 12)) == 0))
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
+
+ if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+ (phy->req_line_speed == ELINK_SPEED_10000)) {
+ PMD_DRV_LOG(DEBUG, "Advertising 10G");
+ /* Restart autoneg for 10G */
+
+ elink_cl45_read_or_write(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ 0x1000);
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x3200);
+ } else
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, 1);
+
+ return ELINK_STATUS_OK;
+}
+
+static uint8_t elink_8481_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ /* Restore normal power mode */
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+
+ /* HW reset */
+ elink_ext_phy_hw_reset(sc, params->port);
+ elink_wait_reset_complete(sc, phy, params);
+
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15);
+ return elink_848xx_cmn_config_init(phy, params, vars);
+}
+
+#define PHY84833_CMDHDLR_WAIT 300
+#define PHY84833_CMDHDLR_MAX_ARGS 5
+static elink_status_t elink_84833_cmd_hdlr(struct elink_phy *phy,
+ struct elink_params *params,
+ uint16_t fw_cmd, uint16_t cmd_args[],
+ int argc)
+{
+ int idx;
+ uint16_t val;
+ struct bnx2x_softc *sc = params->sc;
+ /* Write CMD_OPEN_OVERRIDE to STATUS reg */
+ elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+ for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+ elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_STATUS, &val);
+ if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
+ break;
+ DELAY(1000 * 1);
+ }
+ if (idx >= PHY84833_CMDHDLR_WAIT) {
+ PMD_DRV_LOG(DEBUG, "FW cmd: FW not ready.");
+ return ELINK_STATUS_ERROR;
+ }
+
+ /* Prepare argument(s) and issue command */
+ for (idx = 0; idx < argc; idx++) {
+ elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
+ elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
+ for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+ elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_STATUS, &val);
+ if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
+ break;
+ DELAY(1000 * 1);
+ }
+ if ((idx >= PHY84833_CMDHDLR_WAIT) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+ PMD_DRV_LOG(DEBUG, "FW cmd failed.");
+ return ELINK_STATUS_ERROR;
+ }
+ /* Gather returning data */
+ for (idx = 0; idx < argc; idx++) {
+ elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_DATA1 + idx,
+ &cmd_args[idx]);
+ }
+ elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_CLEAR_COMPLETE);
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_84833_pair_swap_cfg(struct elink_phy *phy,
+ struct elink_params *params,
+ __rte_unused struct elink_vars
+ *vars)
+{
+ uint32_t pair_swap;
+ uint16_t data[PHY84833_CMDHDLR_MAX_ARGS];
+ elink_status_t status;
+ struct bnx2x_softc *sc = params->sc;
+
+ /* Check for configuration. */
+ pair_swap = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].
+ xgbt_phy_cfg)) &
+ PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+ if (pair_swap == 0)
+ return ELINK_STATUS_OK;
+
+ /* Only the second argument is used for this command */
+ data[1] = (uint16_t) pair_swap;
+
+ status = elink_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_PAIR_SWAP, data,
+ PHY84833_CMDHDLR_MAX_ARGS);
+ if (status == ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "Pairswap OK, val=0x%x", data[1]);
+ }
+
+ return status;
+}
+
+static uint8_t elink_84833_get_reset_gpios(struct bnx2x_softc *sc,
+ uint32_t shmem_base_path[],
+ __rte_unused uint32_t chip_id)
+{
+ uint32_t reset_pin[2];
+ uint32_t idx;
+ uint8_t reset_gpios;
+ if (CHIP_IS_E3(sc)) {
+ /* Assume that these will be GPIOs, not EPIOs. */
+ for (idx = 0; idx < 2; idx++) {
+ /* Map config param to register bit. */
+ reset_pin[idx] = REG_RD(sc, shmem_base_path[idx] +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[0].
+ e3_cmn_pin_cfg));
+ reset_pin[idx] =
+ (reset_pin[idx] & PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+ reset_pin[idx] -= PIN_CFG_GPIO0_P0;
+ reset_pin[idx] = (1 << reset_pin[idx]);
+ }
+ reset_gpios = (uint8_t) (reset_pin[0] | reset_pin[1]);
+ } else {
+ /* E2, look from diff place of shmem. */
+ for (idx = 0; idx < 2; idx++) {
+ reset_pin[idx] = REG_RD(sc, shmem_base_path[idx] +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[0].
+ default_cfg));
+ reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK;
+ reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0;
+ reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT;
+ reset_pin[idx] = (1 << reset_pin[idx]);
+ }
+ reset_gpios = (uint8_t) (reset_pin[0] | reset_pin[1]);
+ }
+
+ return reset_gpios;
+}
+
+static void elink_84833_hw_reset_phy(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t reset_gpios;
+ uint32_t other_shmem_base_addr = REG_RD(sc, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ other_shmem_base_addr));
+
+ uint32_t shmem_base_path[2];
+
+ /* Work around for 84833 LED failure inside RESET status */
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+ MDIO_AN_REG_8481_MII_CTRL_FORCE_1G);
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_1G_100T_EXT_CTRL,
+ MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF);
+
+ shmem_base_path[0] = params->shmem_base;
+ shmem_base_path[1] = other_shmem_base_addr;
+
+ reset_gpios = elink_84833_get_reset_gpios(sc, shmem_base_path,
+ params->chip_id);
+
+ elink_cb_gpio_mult_write(sc, reset_gpios,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ DELAY(10);
+ PMD_DRV_LOG(DEBUG, "84833 hw reset on pin values 0x%x", reset_gpios);
+}
+
+static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ elink_status_t rc;
+ uint16_t cmd_args = 0;
+
+ PMD_DRV_LOG(DEBUG, "Don't Advertise 10GBase-T EEE");
+
+ /* Prevent Phy from working in EEE and advertising it */
+ rc = elink_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ if (rc != ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "EEE disable failed.");
+ return rc;
+ }
+
+ return elink_eee_disable(phy, params, vars);
+}
+
+static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ elink_status_t rc;
+ uint16_t cmd_args = 1;
+
+ rc = elink_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ if (rc != ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "EEE enable failed.");
+ return rc;
+ }
+
+ return elink_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV);
+}
+
+#define PHY84833_CONSTANT_LATENCY 1193
+static uint8_t elink_848x3_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port, initialize = 1;
+ uint16_t val;
+ uint32_t actual_phy_selection;
+ uint16_t cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
+ elink_status_t rc = ELINK_STATUS_OK;
+
+ DELAY(1000 * 1);
+
+ if (!(CHIP_IS_E1x(sc)))
+ port = SC_PATH(sc);
+ else
+ port = params->port;
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823) {
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+ } else {
+ /* MDIO reset */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x8000);
+ }
+
+ elink_wait_reset_complete(sc, phy, params);
+
+ /* Wait for GPHY to come out of reset */
+ DELAY(1000 * 50);
+ if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) &&
+ (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) {
+ /* BNX2X84823 requires that XGXS links up first @ 10G for normal
+ * behavior.
+ */
+ uint16_t temp;
+ temp = vars->line_speed;
+ vars->line_speed = ELINK_SPEED_10000;
+ elink_set_autoneg(&params->phy[ELINK_INT_PHY], params, vars, 0);
+ elink_program_serdes(&params->phy[ELINK_INT_PHY], params, vars);
+ vars->line_speed = temp;
+ }
+
+ elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_MEDIA, &val);
+ val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+ MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
+ MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
+ MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
+ MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
+
+ if (CHIP_IS_E3(sc)) {
+ val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+ MDIO_CTL_REG_84823_MEDIA_LINE_MASK);
+ } else {
+ val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI |
+ MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L);
+ }
+
+ actual_phy_selection = elink_phy_selection(params);
+
+ switch (actual_phy_selection) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ /* Do nothing. Essentially this is like the priority copper */
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ /* Do nothing here. The first PHY won't be initialized at all */
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
+ initialize = 0;
+ break;
+ }
+ if (params->phy[ELINK_EXT_PHY2].req_line_speed == ELINK_SPEED_1000)
+ val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
+
+ elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_MEDIA, val);
+ PMD_DRV_LOG(DEBUG, "Multi_phy config = 0x%x, Media control = 0x%x",
+ params->multi_phy_config, val);
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) {
+ elink_84833_pair_swap_cfg(phy, params, vars);
+
+ /* Keep AutogrEEEn disabled. */
+ cmd_args[0] = 0x0;
+ cmd_args[1] = 0x0;
+ cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+ cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+ rc = elink_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, cmd_args,
+ PHY84833_CMDHDLR_MAX_ARGS);
+ if (rc != ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "Cfg AutogrEEEn failed.");
+ }
+ }
+ if (initialize) {
+ rc = elink_848xx_cmn_config_init(phy, params, vars);
+ } else {
+ elink_save_848xx_spirom_version(phy, sc, params->port);
+ }
+ /* 84833 PHY has a better feature and doesn't need to support this. */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823) {
+ uint32_t cms_enable = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->
+ port].
+ default_cfg)) &
+ PORT_HW_CFG_ENABLE_CMS_MASK;
+
+ elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+ if (cms_enable)
+ val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ else
+ val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+ }
+
+ elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_FW_REV, &val);
+
+ /* Configure EEE support */
+ if ((val >= MDIO_84833_TOP_CFG_FW_EEE) &&
+ (val != MDIO_84833_TOP_CFG_FW_NO_EEE) &&
+ elink_eee_has_cap(params)) {
+ rc = elink_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV);
+ if (rc != ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "Failed to configure EEE timers");
+ elink_8483x_disable_eee(phy, params, vars);
+ return rc;
+ }
+
+ if ((phy->req_duplex == DUPLEX_FULL) &&
+ (params->eee_mode & ELINK_EEE_MODE_ADV_LPI) &&
+ (elink_eee_calc_timer(params) ||
+ !(params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI)))
+ rc = elink_8483x_enable_eee(phy, params, vars);
+ else
+ rc = elink_8483x_disable_eee(phy, params, vars);
+ if (rc != ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "Failed to set EEE advertisement");
+ return rc;
+ }
+ } else {
+ vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
+ }
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) {
+ /* Bring PHY out of super isolate mode as the final step. */
+ elink_cl45_read_and_write(sc, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1,
+ (uint16_t) ~
+ MDIO_84833_SUPER_ISOLATE);
+ }
+ return rc;
+}
+
+static uint8_t elink_848xx_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val, val1, val2;
+ uint8_t link_up = 0;
+
+ /* Check 10G-BaseT link status */
+ /* Check PMD signal ok */
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD, 0xFFFA, &val1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, &val2);
+ PMD_DRV_LOG(DEBUG, "BNX2X848xx: PMD_SIGNAL 1.a811 = 0x%x", val2);
+
+ /* Check link 10G */
+ if (val2 & (1 << 11)) {
+ vars->line_speed = ELINK_SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ link_up = 1;
+ elink_ext_phy_10G_an_resolve(sc, phy, vars);
+ } else { /* Check Legacy speed link */
+ uint16_t legacy_status, legacy_speed, mii_ctrl;
+
+ /* Enable expansion register 0x42 (Operation mode status) */
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
+
+ /* Get legacy speed operation status */
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
+ &legacy_status);
+
+ PMD_DRV_LOG(DEBUG, "Legacy speed status = 0x%x", legacy_status);
+ link_up = ((legacy_status & (1 << 11)) == (1 << 11));
+ legacy_speed = (legacy_status & (3 << 9));
+ if (legacy_speed == (0 << 9))
+ vars->line_speed = ELINK_SPEED_10;
+ else if (legacy_speed == (1 << 9))
+ vars->line_speed = ELINK_SPEED_100;
+ else if (legacy_speed == (2 << 9))
+ vars->line_speed = ELINK_SPEED_1000;
+ else { /* Should not happen: Treat as link down */
+ vars->line_speed = 0;
+ link_up = 0;
+ }
+
+ if (params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_IEEE_PHY_TEST) {
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+ &mii_ctrl);
+ /* For IEEE testing, check for a fake link. */
+ link_up |= ((mii_ctrl & 0x3040) == 0x40);
+ }
+
+ if (link_up) {
+ if (legacy_status & (1 << 8))
+ vars->duplex = DUPLEX_FULL;
+ else
+ vars->duplex = DUPLEX_HALF;
+
+ PMD_DRV_LOG(DEBUG,
+ "Link is up in %dMbps, is_duplex_full= %d",
+ vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
+ /* Check legacy speed AN resolution */
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_STATUS,
+ &val);
+ if (val & (1 << 5))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
+ &val);
+ if ((val & (1 << 0)) == 0)
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+ }
+ }
+ if (link_up) {
+ PMD_DRV_LOG(DEBUG, "BNX2X848x3: link speed is %d",
+ vars->line_speed);
+ elink_ext_phy_resolve_fc(phy, params, vars);
+
+ /* Read LP advertised speeds */
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &val);
+ if (val & (1 << 5))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
+ if (val & (1 << 6))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
+ if (val & (1 << 7))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
+ if (val & (1 << 8))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
+ if (val & (1 << 9))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
+
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_1000T_STATUS, &val);
+
+ if (val & (1 << 10))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
+ if (val & (1 << 11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_MASTER_STATUS, &val);
+
+ if (val & (1 << 11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ /* Determine if EEE was negotiated */
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834))
+ elink_eee_an_resolve(phy, params, vars);
+ }
+
+ return link_up;
+}
+
+static uint8_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str,
+ uint16_t * len)
+{
+ elink_status_t status = ELINK_STATUS_OK;
+ uint32_t spirom_ver;
+ spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
+ status = elink_format_ver(spirom_ver, str, len);
+ return status;
+}
+
+static void elink_8481_hw_reset(__rte_unused struct elink_phy *phy,
+ struct elink_params *params)
+{
+ elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+ elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+}
+
+static void elink_8481_link_reset(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ elink_cl45_write(params->sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+ elink_cl45_write(params->sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
+}
+
+static void elink_848x3_link_reset(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port;
+ uint16_t val16;
+
+ if (!(CHIP_IS_E1x(sc)))
+ port = SC_PATH(sc);
+ else
+ port = params->port;
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823) {
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ } else {
+ elink_cl45_read(sc, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16);
+ val16 |= MDIO_84833_SUPER_ISOLATE;
+ elink_cl45_write(sc, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16);
+ }
+}
+
+static void elink_848xx_set_link_led(struct elink_phy *phy,
+ struct elink_params *params, uint8_t mode)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val;
+ __rte_unused uint8_t port;
+
+ if (!(CHIP_IS_E1x(sc)))
+ port = SC_PATH(sc);
+ else
+ port = params->port;
+
+ switch (mode) {
+ case ELINK_LED_MODE_OFF:
+
+ PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE OFF", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set LED masks */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK, 0x0);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK, 0x0);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK, 0x0);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK, 0x0);
+
+ } else {
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK, 0x0);
+ }
+ break;
+ case ELINK_LED_MODE_FRONT_PANEL_OFF:
+
+ PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE FRONT PANEL OFF", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set LED masks */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK, 0x0);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK, 0x0);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK, 0x0);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK, 0x20);
+
+ } else {
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK, 0x0);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) {
+ /* Disable MI_INT interrupt before setting LED4
+ * source to constant off.
+ */
+ if (REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port * 4) &
+ ELINK_NIG_MASK_MI_INT) {
+ params->link_flags |=
+ ELINK_LINK_FLAGS_INT_DISABLED;
+
+ elink_bits_dis(sc,
+ NIG_REG_MASK_INTERRUPT_PORT0
+ + params->port * 4,
+ ELINK_NIG_MASK_MI_INT);
+ }
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x0);
+ }
+ }
+ break;
+ case ELINK_LED_MODE_ON:
+
+ PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE ON", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+ /* Set control reg */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ val &= 0x8000;
+ val |= 0x2492;
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+
+ /* Set LED masks */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK, 0x0);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK, 0x20);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK, 0x20);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK, 0x0);
+ } else {
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK, 0x20);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) {
+ /* Disable MI_INT interrupt before setting LED4
+ * source to constant on.
+ */
+ if (REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port * 4) &
+ ELINK_NIG_MASK_MI_INT) {
+ params->link_flags |=
+ ELINK_LINK_FLAGS_INT_DISABLED;
+
+ elink_bits_dis(sc,
+ NIG_REG_MASK_INTERRUPT_PORT0
+ + params->port * 4,
+ ELINK_NIG_MASK_MI_INT);
+ }
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x20);
+ }
+ }
+ break;
+
+ case ELINK_LED_MODE_OPER:
+
+ PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE OPER", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set control reg */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+
+ if (!((val &
+ MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+ >>
+ MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT))
+ {
+ PMD_DRV_LOG(DEBUG, "Setting LINK_SIGNAL");
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ 0xa492);
+ }
+
+ /* Set LED masks */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK, 0x10);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK, 0x80);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK, 0x98);
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK, 0x40);
+
+ } else {
+ /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
+ * sources are all wired through LED1, rather than only
+ * 10G in other modes.
+ */
+ val = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
+
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK, val);
+
+ /* Tell LED3 to blink on source */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ val &= ~(7 << 6);
+ val |= (1 << 6); /* A83B[8:6]= 1 */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) {
+ /* Restore LED4 source to external link,
+ * and re-enable interrupts.
+ */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x40);
+ if (params->link_flags &
+ ELINK_LINK_FLAGS_INT_DISABLED) {
+ elink_link_int_enable(params);
+ params->link_flags &=
+ ~ELINK_LINK_FLAGS_INT_DISABLED;
+ }
+ }
+ }
+ break;
+ }
+
+ /* This is a workaround for E3+84833 until autoneg
+ * restart is fixed in f/w
+ */
+ if (CHIP_IS_E3(sc)) {
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1, &val);
+ }
+}
+
+/******************************************************************/
+/* 54618SE PHY SECTION */
+/******************************************************************/
+static void elink_54618se_specific_func(struct elink_phy *phy,
+ struct elink_params *params,
+ uint32_t action)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t temp;
+ switch (action) {
+ case ELINK_PHY_INIT:
+ /* Configure LED4: set to INTR (0x6). */
+ /* Accessing shadow register 0xe. */
+ elink_cl22_write(sc, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL2);
+ elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp);
+ temp &= ~(0xf << 4);
+ temp |= (0x6 << 4);
+ elink_cl22_write(sc, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ /* Configure INTR based on link status change. */
+ elink_cl22_write(sc, phy,
+ MDIO_REG_INTR_MASK,
+ ~MDIO_REG_INTR_MASK_LINK_STATUS);
+ break;
+ }
+}
+
+static uint8_t elink_54618se_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t port;
+ uint16_t autoneg_val, an_1000_val, an_10_100_val, fc_val, temp;
+ uint32_t cfg_pin;
+
+ PMD_DRV_LOG(DEBUG, "54618SE cfg init");
+ DELAY(1000 * 1);
+
+ /* This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
+ port = params->port;
+
+ cfg_pin = (REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].
+ e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+ /* Drive pin high to bring the GPHY out of reset. */
+ elink_set_cfg_pin(sc, cfg_pin, 1);
+
+ /* wait for GPHY to reset */
+ DELAY(1000 * 50);
+
+ /* reset phy */
+ elink_cl22_write(sc, phy, MDIO_PMA_REG_CTRL, 0x8000);
+ elink_wait_reset_complete(sc, phy, params);
+
+ /* Wait for GPHY to reset */
+ DELAY(1000 * 50);
+
+ elink_54618se_specific_func(phy, params, ELINK_PHY_INIT);
+ /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
+ elink_cl22_write(sc, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_AUTO_DET_MED);
+ elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp);
+ temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD;
+ elink_cl22_write(sc, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+
+ /* Set up fc */
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ fc_val = 0;
+ if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC)
+ fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+
+ if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+ fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+
+ /* Read all advertisement */
+ elink_cl22_read(sc, phy, 0x09, &an_1000_val);
+
+ elink_cl22_read(sc, phy, 0x04, &an_10_100_val);
+
+ elink_cl22_read(sc, phy, MDIO_PMA_REG_CTRL, &autoneg_val);
+
+ /* Disable forced speed */
+ autoneg_val &=
+ ~((1 << 6) | (1 << 8) | (1 << 9) | (1 << 12) | (1 << 13));
+ an_10_100_val &=
+ ~((1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 10) |
+ (1 << 11));
+
+ if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == ELINK_SPEED_1000)) {
+ an_1000_val |= (1 << 8);
+ autoneg_val |= (1 << 9 | 1 << 12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_1000_val |= (1 << 9);
+ PMD_DRV_LOG(DEBUG, "Advertising 1G");
+ } else
+ an_1000_val &= ~((1 << 8) | (1 << 9));
+
+ elink_cl22_write(sc, phy, 0x09, an_1000_val);
+ elink_cl22_read(sc, phy, 0x09, &an_1000_val);
+
+ /* Advertise 10/100 link speed */
+ if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+ an_10_100_val |= (1 << 5);
+ autoneg_val |= (1 << 9 | 1 << 12);
+ PMD_DRV_LOG(DEBUG, "Advertising 10M-HD");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+ an_10_100_val |= (1 << 6);
+ autoneg_val |= (1 << 9 | 1 << 12);
+ PMD_DRV_LOG(DEBUG, "Advertising 10M-FD");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ an_10_100_val |= (1 << 7);
+ autoneg_val |= (1 << 9 | 1 << 12);
+ PMD_DRV_LOG(DEBUG, "Advertising 100M-HD");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ an_10_100_val |= (1 << 8);
+ autoneg_val |= (1 << 9 | 1 << 12);
+ PMD_DRV_LOG(DEBUG, "Advertising 100M-FD");
+ }
+ }
+
+ /* Only 10/100 are allowed to work in FORCE mode */
+ if (phy->req_line_speed == ELINK_SPEED_100) {
+ autoneg_val |= (1 << 13);
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ elink_cl22_write(sc, phy, 0x18, (1 << 15 | 1 << 9 | 7 << 0));
+ PMD_DRV_LOG(DEBUG, "Setting 100M force");
+ }
+ if (phy->req_line_speed == ELINK_SPEED_10) {
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ elink_cl22_write(sc, phy, 0x18, (1 << 15 | 1 << 9 | 7 << 0));
+ PMD_DRV_LOG(DEBUG, "Setting 10M force");
+ }
+
+ if ((phy->flags & ELINK_FLAGS_EEE) && elink_eee_has_cap(params)) {
+ elink_status_t rc;
+
+ elink_cl22_write(sc, phy, MDIO_REG_GPHY_EXP_ACCESS,
+ MDIO_REG_GPHY_EXP_ACCESS_TOP |
+ MDIO_REG_GPHY_EXP_TOP_2K_BUF);
+ elink_cl22_read(sc, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp);
+ temp &= 0xfffe;
+ elink_cl22_write(sc, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp);
+
+ rc = elink_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV);
+ if (rc != ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "Failed to configure EEE timers");
+ elink_eee_disable(phy, params, vars);
+ } else if ((params->eee_mode & ELINK_EEE_MODE_ADV_LPI) &&
+ (phy->req_duplex == DUPLEX_FULL) &&
+ (elink_eee_calc_timer(params) ||
+ !(params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI))) {
+ /* Need to advertise EEE only when requested,
+ * and either no LPI assertion was requested,
+ * or it was requested and a valid timer was set.
+ * Also notice full duplex is required for EEE.
+ */
+ elink_eee_advertise(phy, params, vars,
+ SHMEM_EEE_1G_ADV);
+ } else {
+ PMD_DRV_LOG(DEBUG, "Don't Advertise 1GBase-T EEE");
+ elink_eee_disable(phy, params, vars);
+ }
+ } else {
+ vars->eee_status &= ~SHMEM_EEE_1G_ADV <<
+ SHMEM_EEE_SUPPORTED_SHIFT;
+
+ if (phy->flags & ELINK_FLAGS_EEE) {
+ /* Handle legacy auto-grEEEn */
+ if (params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+ temp = 6;
+ PMD_DRV_LOG(DEBUG, "Enabling Auto-GrEEEn");
+ } else {
+ temp = 0;
+ PMD_DRV_LOG(DEBUG, "Don't Adv. EEE");
+ }
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_EEE_ADV, temp);
+ }
+ }
+
+ elink_cl22_write(sc, phy, 0x04, an_10_100_val | fc_val);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ autoneg_val |= (1 << 8);
+
+ elink_cl22_write(sc, phy, MDIO_PMA_REG_CTRL, autoneg_val);
+
+ return ELINK_STATUS_OK;
+}
+
+static void elink_5461x_set_link_led(struct elink_phy *phy,
+ struct elink_params *params, uint8_t mode)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t temp;
+
+ elink_cl22_write(sc, phy,
+ MDIO_REG_GPHY_SHADOW, MDIO_REG_GPHY_SHADOW_LED_SEL1);
+ elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp);
+ temp &= 0xff00;
+
+ PMD_DRV_LOG(DEBUG, "54618x set link led (mode=%x)", mode);
+ switch (mode) {
+ case ELINK_LED_MODE_FRONT_PANEL_OFF:
+ case ELINK_LED_MODE_OFF:
+ temp |= 0x00ee;
+ break;
+ case ELINK_LED_MODE_OPER:
+ temp |= 0x0001;
+ break;
+ case ELINK_LED_MODE_ON:
+ temp |= 0x00ff;
+ break;
+ default:
+ break;
+ }
+ elink_cl22_write(sc, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ return;
+}
+
+static void elink_54618se_link_reset(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t cfg_pin;
+ uint8_t port;
+
+ /* In case of no EPIO routed to reset the GPHY, put it
+ * in low power mode.
+ */
+ elink_cl22_write(sc, phy, MDIO_PMA_REG_CTRL, 0x800);
+ /* This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
+ port = params->port;
+ cfg_pin = (REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].
+ e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+ /* Drive pin low to put GPHY in reset. */
+ elink_set_cfg_pin(sc, cfg_pin, 0);
+}
+
+static uint8_t elink_54618se_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val;
+ uint8_t link_up = 0;
+ uint16_t legacy_status, legacy_speed;
+
+ /* Get speed operation status */
+ elink_cl22_read(sc, phy, MDIO_REG_GPHY_AUX_STATUS, &legacy_status);
+ PMD_DRV_LOG(DEBUG, "54618SE read_status: 0x%x", legacy_status);
+
+ /* Read status to clear the PHY interrupt. */
+ elink_cl22_read(sc, phy, MDIO_REG_INTR_STATUS, &val);
+
+ link_up = ((legacy_status & (1 << 2)) == (1 << 2));
+
+ if (link_up) {
+ legacy_speed = (legacy_status & (7 << 8));
+ if (legacy_speed == (7 << 8)) {
+ vars->line_speed = ELINK_SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ } else if (legacy_speed == (6 << 8)) {
+ vars->line_speed = ELINK_SPEED_1000;
+ vars->duplex = DUPLEX_HALF;
+ } else if (legacy_speed == (5 << 8)) {
+ vars->line_speed = ELINK_SPEED_100;
+ vars->duplex = DUPLEX_FULL;
+ }
+ /* Omitting 100Base-T4 for now */
+ else if (legacy_speed == (3 << 8)) {
+ vars->line_speed = ELINK_SPEED_100;
+ vars->duplex = DUPLEX_HALF;
+ } else if (legacy_speed == (2 << 8)) {
+ vars->line_speed = ELINK_SPEED_10;
+ vars->duplex = DUPLEX_FULL;
+ } else if (legacy_speed == (1 << 8)) {
+ vars->line_speed = ELINK_SPEED_10;
+ vars->duplex = DUPLEX_HALF;
+ } else /* Should not happen */
+ vars->line_speed = 0;
+
+ PMD_DRV_LOG(DEBUG,
+ "Link is up in %dMbps, is_duplex_full= %d",
+ vars->line_speed, (vars->duplex == DUPLEX_FULL));
+
+ /* Check legacy speed AN resolution */
+ elink_cl22_read(sc, phy, 0x01, &val);
+ if (val & (1 << 5))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ elink_cl22_read(sc, phy, 0x06, &val);
+ if ((val & (1 << 0)) == 0)
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+
+ PMD_DRV_LOG(DEBUG, "BNX2X54618SE: link speed is %d",
+ vars->line_speed);
+
+ elink_ext_phy_resolve_fc(phy, params, vars);
+
+ if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ /* Report LP advertised speeds */
+ elink_cl22_read(sc, phy, 0x5, &val);
+
+ if (val & (1 << 5))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
+ if (val & (1 << 6))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
+ if (val & (1 << 7))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
+ if (val & (1 << 8))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
+ if (val & (1 << 9))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
+
+ elink_cl22_read(sc, phy, 0xa, &val);
+ if (val & (1 << 10))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
+ if (val & (1 << 11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+
+ if ((phy->flags & ELINK_FLAGS_EEE) &&
+ elink_eee_has_cap(params))
+ elink_eee_an_resolve(phy, params, vars);
+ }
+ }
+ return link_up;
+}
+
+static void elink_54618se_config_loopback(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t val;
+ uint32_t umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+
+ PMD_DRV_LOG(DEBUG, "2PMA/PMD ext_phy_loopback: 54618se");
+
+ /* Enable master/slave manual mmode and set to master */
+ /* mii write 9 [bits set 11 12] */
+ elink_cl22_write(sc, phy, 0x09, 3 << 11);
+
+ /* forced 1G and disable autoneg */
+ /* set val [mii read 0] */
+ /* set val [expr $val & [bits clear 6 12 13]] */
+ /* set val [expr $val | [bits set 6 8]] */
+ /* mii write 0 $val */
+ elink_cl22_read(sc, phy, 0x00, &val);
+ val &= ~((1 << 6) | (1 << 12) | (1 << 13));
+ val |= (1 << 6) | (1 << 8);
+ elink_cl22_write(sc, phy, 0x00, val);
+
+ /* Set external loopback and Tx using 6dB coding */
+ /* mii write 0x18 7 */
+ /* set val [mii read 0x18] */
+ /* mii write 0x18 [expr $val | [bits set 10 15]] */
+ elink_cl22_write(sc, phy, 0x18, 7);
+ elink_cl22_read(sc, phy, 0x18, &val);
+ elink_cl22_write(sc, phy, 0x18, val | (1 << 10) | (1 << 15));
+
+ /* This register opens the gate for the UMAC despite its name */
+ REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4, 1);
+
+ /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+ * length used by the MAC receive logic to check frames.
+ */
+ REG_WR(sc, umac_base + UMAC_REG_MAXFR, 0x2710);
+}
+
+/******************************************************************/
+/* SFX7101 PHY SECTION */
+/******************************************************************/
+static void elink_7101_config_loopback(struct elink_phy *phy,
+ struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ /* SFX7101_XGXS_TEST1 */
+ elink_cl45_write(sc, phy,
+ MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
+}
+
+static uint8_t elink_7101_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ uint16_t fw_ver1, fw_ver2, val;
+ struct bnx2x_softc *sc = params->sc;
+ PMD_DRV_LOG(DEBUG, "Setting the SFX7101 LASI indication");
+
+ /* Restore normal power mode */
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ elink_ext_phy_hw_reset(sc, params->port);
+ elink_wait_reset_complete(sc, phy, params);
+
+ elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
+ PMD_DRV_LOG(DEBUG, "Setting the SFX7101 LED to blink on traffic");
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1 << 3));
+
+ elink_ext_phy_set_pause(params, phy, vars);
+ /* Restart autoneg */
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
+ val |= 0x200;
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
+
+ /* Save spirom version */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
+
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
+ elink_save_spirom_version(sc, params->port,
+ (uint32_t) (fw_ver1 << 16 | fw_ver2),
+ phy->ver_addr);
+ return ELINK_STATUS_OK;
+}
+
+static uint8_t elink_7101_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t link_up;
+ uint16_t val1, val2;
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+ PMD_DRV_LOG(DEBUG, "10G-base-T LASI status 0x%x->0x%x", val2, val1);
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ PMD_DRV_LOG(DEBUG, "10G-base-T PMA status 0x%x->0x%x", val2, val1);
+ link_up = ((val1 & 4) == 4);
+ /* If link is up print the AN outcome of the SFX7101 PHY */
+ if (link_up) {
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
+ &val2);
+ vars->line_speed = ELINK_SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ PMD_DRV_LOG(DEBUG, "SFX7101 AN status 0x%x->Master=%x",
+ val2, (val2 & (1 << 14)));
+ elink_ext_phy_10G_an_resolve(sc, phy, vars);
+ elink_ext_phy_resolve_fc(phy, params, vars);
+
+ /* Read LP advertised speeds */
+ if (val2 & (1 << 11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ }
+ return link_up;
+}
+
+static uint8_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str,
+ uint16_t * len)
+{
+ if (*len < 5)
+ return ELINK_STATUS_ERROR;
+ str[0] = (spirom_ver & 0xFF);
+ str[1] = (spirom_ver & 0xFF00) >> 8;
+ str[2] = (spirom_ver & 0xFF0000) >> 16;
+ str[3] = (spirom_ver & 0xFF000000) >> 24;
+ str[4] = '\0';
+ *len -= 5;
+ return ELINK_STATUS_OK;
+}
+
+static void elink_7101_hw_reset(__rte_unused struct elink_phy *phy,
+ struct elink_params *params)
+{
+ /* Low power mode is controlled by GPIO 2 */
+ elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ /* The PHY reset is controlled by GPIO 1 */
+ elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+}
+
+static void elink_7101_set_link_led(struct elink_phy *phy,
+ struct elink_params *params, uint8_t mode)
+{
+ uint16_t val = 0;
+ struct bnx2x_softc *sc = params->sc;
+ switch (mode) {
+ case ELINK_LED_MODE_FRONT_PANEL_OFF:
+ case ELINK_LED_MODE_OFF:
+ val = 2;
+ break;
+ case ELINK_LED_MODE_ON:
+ val = 1;
+ break;
+ case ELINK_LED_MODE_OPER:
+ val = 0;
+ break;
+ }
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LINK_LED_CNTL, val);
+}
+
+/******************************************************************/
+/* STATIC PHY DECLARATION */
+/******************************************************************/
+
+static const struct elink_phy phy_null = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
+ .addr = 0,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = 0,
+ .media_type = ELINK_ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = NULL,
+ .read_status = NULL,
+ .link_reset = NULL,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
+};
+
+static const struct elink_phy phy_serdes = {
+ .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_2500baseX_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = elink_xgxs_config_init,
+ .read_status = elink_link_settings_status,
+ .link_reset = elink_int_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
+};
+
+static const struct elink_phy phy_xgxs = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_2500baseX_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_CX4,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = elink_xgxs_config_init,
+ .read_status = elink_link_settings_status,
+ .link_reset = elink_int_link_reset,
+ .config_loopback = elink_set_xgxs_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = elink_xgxs_specific_func
+};
+
+static const struct elink_phy phy_warpcore = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_TX_ERROR_CHECK,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_20000baseKR2_Full |
+ ELINK_SUPPORTED_20000baseMLD2_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_UNSPECIFIED,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ /* req_duplex = */ 0,
+ /* rsrv = */ 0,
+ .config_init = elink_warpcore_config_init,
+ .read_status = elink_warpcore_read_status,
+ .link_reset = elink_warpcore_link_reset,
+ .config_loopback = elink_set_warpcore_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = elink_warpcore_hw_reset,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
+};
+
+static const struct elink_phy phy_7101 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = elink_7101_config_init,
+ .read_status = elink_7101_read_status,
+ .link_reset = elink_common_ext_link_reset,
+ .config_loopback = elink_7101_config_loopback,
+ .format_fw_ver = elink_7101_format_ver,
+ .hw_reset = elink_7101_hw_reset,
+ .set_link_led = elink_7101_set_link_led,
+ .phy_specific_func = NULL
+};
+
+static const struct elink_phy phy_8073 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_2500baseX_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_KR,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = elink_8073_config_init,
+ .read_status = elink_8073_read_status,
+ .link_reset = elink_8073_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = elink_8073_specific_func
+};
+
+static const struct elink_phy phy_8705 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8705,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_XFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = elink_8705_config_init,
+ .read_status = elink_8705_read_status,
+ .link_reset = elink_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_null_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
+};
+
+static const struct elink_phy phy_8706 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8706,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_SFPP_10G_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = elink_8706_config_init,
+ .read_status = elink_8706_read_status,
+ .link_reset = elink_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
+};
+
+static const struct elink_phy phy_8726 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (ELINK_FLAGS_INIT_XGXS_FIRST | ELINK_FLAGS_TX_ERROR_CHECK),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = elink_8726_config_init,
+ .read_status = elink_8726_read_status,
+ .link_reset = elink_8726_link_reset,
+ .config_loopback = elink_8726_config_loopback,
+ .format_fw_ver = elink_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
+};
+
+static const struct elink_phy phy_8727 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ | ELINK_FLAGS_TX_ERROR_CHECK),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = elink_8727_config_init,
+ .read_status = elink_8727_read_status,
+ .link_reset = elink_8727_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_format_ver,
+ .hw_reset = elink_8727_hw_reset,
+ .set_link_led = elink_8727_set_link_led,
+ .phy_specific_func = elink_8727_specific_func
+};
+
+static const struct elink_phy phy_8481 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8481,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ |
+ ELINK_FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = elink_8481_config_init,
+ .read_status = elink_848xx_read_status,
+ .link_reset = elink_8481_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_848xx_format_ver,
+ .hw_reset = elink_8481_hw_reset,
+ .set_link_led = elink_848xx_set_link_led,
+ .phy_specific_func = NULL
+};
+
+static const struct elink_phy phy_84823 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ |
+ ELINK_FLAGS_REARM_LATCH_SIGNAL | ELINK_FLAGS_TX_ERROR_CHECK),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = elink_848x3_config_init,
+ .read_status = elink_848xx_read_status,
+ .link_reset = elink_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_848xx_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = elink_848xx_set_link_led,
+ .phy_specific_func = elink_848xx_specific_func
+};
+
+static const struct elink_phy phy_84833 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ |
+ ELINK_FLAGS_REARM_LATCH_SIGNAL |
+ ELINK_FLAGS_TX_ERROR_CHECK | ELINK_FLAGS_TEMPERATURE),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = elink_848x3_config_init,
+ .read_status = elink_848xx_read_status,
+ .link_reset = elink_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_848xx_format_ver,
+ .hw_reset = elink_84833_hw_reset_phy,
+ .set_link_led = elink_848xx_set_link_led,
+ .phy_specific_func = elink_848xx_specific_func
+};
+
+static const struct elink_phy phy_84834 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ |
+ ELINK_FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = elink_848x3_config_init,
+ .read_status = elink_848xx_read_status,
+ .link_reset = elink_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_848xx_format_ver,
+ .hw_reset = elink_84833_hw_reset_phy,
+ .set_link_led = elink_848xx_set_link_led,
+ .phy_specific_func = elink_848xx_specific_func
+};
+
+static const struct elink_phy phy_54618se = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ /* req_duplex = */ 0,
+ /* rsrv = */ 0,
+ .config_init = elink_54618se_config_init,
+ .read_status = elink_54618se_read_status,
+ .link_reset = elink_54618se_link_reset,
+ .config_loopback = elink_54618se_config_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = elink_5461x_set_link_led,
+ .phy_specific_func = elink_54618se_specific_func
+};
+
+/*****************************************************************/
+/* */
+/* Populate the phy according. Main function: elink_populate_phy */
+/* */
+/*****************************************************************/
+
+static void elink_populate_preemphasis(struct bnx2x_softc *sc,
+ uint32_t shmem_base,
+ struct elink_phy *phy, uint8_t port,
+ uint8_t phy_index)
+{
+ /* Get the 4 lanes xgxs config rx and tx */
+ uint32_t rx = 0, tx = 0, i;
+ for (i = 0; i < 2; i++) {
+ /* INT_PHY and ELINK_EXT_PHY1 share the same value location in
+ * the shmem. When num_phys is greater than 1, than this value
+ * applies only to ELINK_EXT_PHY1
+ */
+ if (phy_index == ELINK_INT_PHY || phy_index == ELINK_EXT_PHY1) {
+ rx = REG_RD(sc, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].
+ xgxs_config_rx[i << 1]));
+
+ tx = REG_RD(sc, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].
+ xgxs_config_tx[i << 1]));
+ } else {
+ rx = REG_RD(sc, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].
+ xgxs_config2_rx[i << 1]));
+
+ tx = REG_RD(sc, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].
+ xgxs_config2_rx[i << 1]));
+ }
+
+ phy->rx_preemphasis[i << 1] = ((rx >> 16) & 0xffff);
+ phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
+
+ phy->tx_preemphasis[i << 1] = ((tx >> 16) & 0xffff);
+ phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
+ }
+}
+
+static uint32_t elink_get_ext_phy_config(struct bnx2x_softc *sc,
+ uint32_t shmem_base, uint8_t phy_index,
+ uint8_t port)
+{
+ uint32_t ext_phy_config = 0;
+ switch (phy_index) {
+ case ELINK_EXT_PHY1:
+ ext_phy_config = REG_RD(sc, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].
+ external_phy_config));
+ break;
+ case ELINK_EXT_PHY2:
+ ext_phy_config = REG_RD(sc, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].
+ external_phy_config2));
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Invalid phy_index %d", phy_index);
+ return ELINK_STATUS_ERROR;
+ }
+
+ return ext_phy_config;
+}
+
+static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc,
+ uint32_t shmem_base, uint8_t port,
+ struct elink_phy *phy)
+{
+ uint32_t phy_addr;
+ __rte_unused uint32_t chip_id;
+ uint32_t switch_cfg = (REG_RD(sc, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_feature_config[port].
+ link_config)) &
+ PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ chip_id =
+ (REG_RD(sc, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12);
+
+ PMD_DRV_LOG(DEBUG, ":chip_id = 0x%x", chip_id);
+ if (USES_WARPCORE(sc)) {
+ uint32_t serdes_net_if;
+ phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
+ *phy = phy_warpcore;
+ if (REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR) == 0x3)
+ phy->flags |= ELINK_FLAGS_4_PORT_MODE;
+ else
+ phy->flags &= ~ELINK_FLAGS_4_PORT_MODE;
+ /* Check Dual mode */
+ serdes_net_if = (REG_RD(sc, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].
+ default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
+ /* Set the appropriate supported and flags indications per
+ * interface type of the chip
+ */
+ switch (serdes_net_if) {
+ case PORT_HW_CFG_NET_SERDES_IF_SGMII:
+ phy->supported &= (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause);
+ phy->media_type = ELINK_ETH_PHY_BASE_T;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_XFI:
+ phy->supported &= (ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause);
+ phy->media_type = ELINK_ETH_PHY_XFP_FIBER;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_SFI:
+ phy->supported &= (ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause);
+ phy->media_type = ELINK_ETH_PHY_SFPP_10G_FIBER;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_KR:
+ phy->media_type = ELINK_ETH_PHY_KR;
+ phy->supported &= (ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause);
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+ phy->media_type = ELINK_ETH_PHY_KR;
+ phy->flags |= ELINK_FLAGS_WC_DUAL_MODE;
+ phy->supported &= (ELINK_SUPPORTED_20000baseMLD2_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause);
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_KR2:
+ phy->media_type = ELINK_ETH_PHY_KR;
+ phy->flags |= ELINK_FLAGS_WC_DUAL_MODE;
+ phy->supported &= (ELINK_SUPPORTED_20000baseKR2_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause);
+ phy->flags &= ~ELINK_FLAGS_TX_ERROR_CHECK;
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Unknown WC interface type 0x%x",
+ serdes_net_if);
+ break;
+ }
+
+ /* Enable MDC/MDIO work-around for E3 A0 since free running MDC
+ * was not set as expected. For B0, ECO will be enabled so there
+ * won't be an issue there
+ */
+ if (CHIP_REV(sc) == CHIP_REV_Ax)
+ phy->flags |= ELINK_FLAGS_MDC_MDIO_WA;
+ else
+ phy->flags |= ELINK_FLAGS_MDC_MDIO_WA_B0;
+ } else {
+ switch (switch_cfg) {
+ case ELINK_SWITCH_CFG_1G:
+ phy_addr = REG_RD(sc,
+ NIG_REG_SERDES0_CTRL_PHY_ADDR +
+ port * 0x10);
+ *phy = phy_serdes;
+ break;
+ case ELINK_SWITCH_CFG_10G:
+ phy_addr = REG_RD(sc,
+ NIG_REG_XGXS0_CTRL_PHY_ADDR +
+ port * 0x18);
+ *phy = phy_xgxs;
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Invalid switch_cfg");
+ return ELINK_STATUS_ERROR;
+ }
+ }
+ phy->addr = (uint8_t) phy_addr;
+ phy->mdio_ctrl = elink_get_emac_base(sc,
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
+ port);
+ if (CHIP_IS_E2(sc))
+ phy->def_md_devad = ELINK_E2_DEFAULT_PHY_DEV_ADDR;
+ else
+ phy->def_md_devad = ELINK_DEFAULT_PHY_DEV_ADDR;
+
+ PMD_DRV_LOG(DEBUG, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x",
+ port, phy->addr, phy->mdio_ctrl);
+
+ elink_populate_preemphasis(sc, shmem_base, phy, port, ELINK_INT_PHY);
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_populate_ext_phy(struct bnx2x_softc *sc,
+ uint8_t phy_index,
+ uint32_t shmem_base,
+ uint32_t shmem2_base,
+ uint8_t port,
+ struct elink_phy *phy)
+{
+ uint32_t ext_phy_config, phy_type, config2;
+ uint32_t mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
+ ext_phy_config = elink_get_ext_phy_config(sc, shmem_base,
+ phy_index, port);
+ phy_type = ELINK_XGXS_EXT_PHY_TYPE(ext_phy_config);
+ /* Select the phy type */
+ switch (phy_type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
+ *phy = phy_8073;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8705:
+ *phy = phy_8705;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8706:
+ *phy = phy_8706;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8726;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727_NOC:
+ /* BNX2X8727_NOC => BNX2X8727 no over current */
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8727;
+ phy->flags |= ELINK_FLAGS_NOC;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8727;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8481:
+ *phy = phy_8481;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823:
+ *phy = phy_84823;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833:
+ *phy = phy_84833;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834:
+ *phy = phy_84834;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54616:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE:
+ *phy = phy_54618se;
+ if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE)
+ phy->flags |= ELINK_FLAGS_EEE;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+ *phy = phy_7101;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ *phy = phy_null;
+ return ELINK_STATUS_ERROR;
+ default:
+ *phy = phy_null;
+ /* In case external PHY wasn't found */
+ if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+ (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+ return ELINK_STATUS_ERROR;
+ return ELINK_STATUS_OK;
+ }
+
+ phy->addr = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
+ elink_populate_preemphasis(sc, shmem_base, phy, port, phy_index);
+
+ /* The shmem address of the phy version is located on different
+ * structures. In case this structure is too old, do not set
+ * the address
+ */
+ config2 = REG_RD(sc, shmem_base + offsetof(struct shmem_region,
+ dev_info.shared_hw_config.
+ config2));
+ if (phy_index == ELINK_EXT_PHY1) {
+ phy->ver_addr = shmem_base + offsetof(struct shmem_region,
+ port_mb[port].
+ ext_phy_fw_version);
+
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+ mdc_mdio_access = config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+ } else {
+ uint32_t size = REG_RD(sc, shmem2_base);
+
+ if (size > offsetof(struct shmem2_region, ext_phy_fw_version2)) {
+ phy->ver_addr = shmem2_base +
+ offsetof(struct shmem2_region,
+ ext_phy_fw_version2[port]);
+ }
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
+ mdc_mdio_access = (config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
+ >> (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
+ }
+ phy->mdio_ctrl = elink_get_emac_base(sc, mdc_mdio_access, port);
+
+ if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) &&
+ (phy->ver_addr)) {
+ /* Remove 100Mb link supported for BNX2X84833/4 when phy fw
+ * version lower than or equal to 1.39
+ */
+ uint32_t raw_ver = REG_RD(sc, phy->ver_addr);
+ if (((raw_ver & 0x7F) <= 39) && (((raw_ver & 0xF80) >> 7) <= 1))
+ phy->supported &= ~(ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full);
+ }
+
+ PMD_DRV_LOG(DEBUG, "phy_type 0x%x port %d found in index %d",
+ phy_type, port, phy_index);
+ PMD_DRV_LOG(DEBUG, " addr=0x%x, mdio_ctl=0x%x",
+ phy->addr, phy->mdio_ctrl);
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_populate_phy(struct bnx2x_softc *sc,
+ uint8_t phy_index, uint32_t shmem_base,
+ uint32_t shmem2_base, uint8_t port,
+ struct elink_phy *phy)
+{
+ elink_status_t status = ELINK_STATUS_OK;
+ phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
+ if (phy_index == ELINK_INT_PHY)
+ return elink_populate_int_phy(sc, shmem_base, port, phy);
+ status = elink_populate_ext_phy(sc, phy_index, shmem_base, shmem2_base,
+ port, phy);
+ return status;
+}
+
+static void elink_phy_def_cfg(struct elink_params *params,
+ struct elink_phy *phy, uint8_t phy_index)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t link_config;
+ /* Populate the default phy configuration for MF mode */
+ if (phy_index == ELINK_EXT_PHY2) {
+ link_config = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_feature_config
+ [params->port].link_config2));
+ phy->speed_cap_mask =
+ REG_RD(sc,
+ params->shmem_base + offsetof(struct shmem_region,
+ dev_info.port_hw_config
+ [params->port].
+ speed_capability_mask2));
+ } else {
+ link_config = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_feature_config
+ [params->port].link_config));
+ phy->speed_cap_mask =
+ REG_RD(sc,
+ params->shmem_base + offsetof(struct shmem_region,
+ dev_info.port_hw_config
+ [params->port].
+ speed_capability_mask));
+ }
+
+ PMD_DRV_LOG(DEBUG,
+ "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x",
+ phy_index, link_config, phy->speed_cap_mask);
+
+ phy->req_duplex = DUPLEX_FULL;
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ case PORT_FEATURE_LINK_SPEED_10M_HALF:
+ phy->req_duplex = DUPLEX_HALF;
+ /* fall-through */
+ case PORT_FEATURE_LINK_SPEED_10M_FULL:
+ phy->req_line_speed = ELINK_SPEED_10;
+ break;
+ case PORT_FEATURE_LINK_SPEED_100M_HALF:
+ phy->req_duplex = DUPLEX_HALF;
+ /* fall-through */
+ case PORT_FEATURE_LINK_SPEED_100M_FULL:
+ phy->req_line_speed = ELINK_SPEED_100;
+ break;
+ case PORT_FEATURE_LINK_SPEED_1G:
+ phy->req_line_speed = ELINK_SPEED_1000;
+ break;
+ case PORT_FEATURE_LINK_SPEED_2_5G:
+ phy->req_line_speed = ELINK_SPEED_2500;
+ break;
+ case PORT_FEATURE_LINK_SPEED_10G_CX4:
+ phy->req_line_speed = ELINK_SPEED_10000;
+ break;
+ default:
+ phy->req_line_speed = ELINK_SPEED_AUTO_NEG;
+ break;
+ }
+
+ switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) {
+ case PORT_FEATURE_FLOW_CONTROL_AUTO:
+ phy->req_flow_ctrl = ELINK_FLOW_CTRL_AUTO;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_TX:
+ phy->req_flow_ctrl = ELINK_FLOW_CTRL_TX;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_RX:
+ phy->req_flow_ctrl = ELINK_FLOW_CTRL_RX;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_BOTH:
+ phy->req_flow_ctrl = ELINK_FLOW_CTRL_BOTH;
+ break;
+ default:
+ phy->req_flow_ctrl = ELINK_FLOW_CTRL_NONE;
+ break;
+ }
+}
+
+uint32_t elink_phy_selection(struct elink_params *params)
+{
+ uint32_t phy_config_swapped, prio_cfg;
+ uint32_t return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
+
+ phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+ prio_cfg = params->multi_phy_config & PORT_HW_CFG_PHY_SELECTION_MASK;
+
+ if (phy_config_swapped) {
+ switch (prio_cfg) {
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ return_cfg =
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ return_cfg =
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ }
+ } else
+ return_cfg = prio_cfg;
+
+ return return_cfg;
+}
+
+elink_status_t elink_phy_probe(struct elink_params * params)
+{
+ uint8_t phy_index, actual_phy_idx;
+ uint32_t phy_config_swapped, sync_offset, media_types;
+ struct bnx2x_softc *sc = params->sc;
+ struct elink_phy *phy;
+ params->num_phys = 0;
+ PMD_DRV_LOG(DEBUG, "Begin phy probe");
+
+ phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+ for (phy_index = ELINK_INT_PHY; phy_index < ELINK_MAX_PHYS; phy_index++) {
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == ELINK_EXT_PHY1)
+ actual_phy_idx = ELINK_EXT_PHY2;
+ else if (phy_index == ELINK_EXT_PHY2)
+ actual_phy_idx = ELINK_EXT_PHY1;
+ }
+ PMD_DRV_LOG(DEBUG, "phy_config_swapped %x, phy_index %x,"
+ " actual_phy_idx %x", phy_config_swapped,
+ phy_index, actual_phy_idx);
+ phy = &params->phy[actual_phy_idx];
+ if (elink_populate_phy(sc, phy_index, params->shmem_base,
+ params->shmem2_base, params->port,
+ phy) != ELINK_STATUS_OK) {
+ params->num_phys = 0;
+ PMD_DRV_LOG(DEBUG, "phy probe failed in phy index %d",
+ phy_index);
+ for (phy_index = ELINK_INT_PHY;
+ phy_index < ELINK_MAX_PHYS; phy_index++)
+ *phy = phy_null;
+ return ELINK_STATUS_ERROR;
+ }
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
+ break;
+
+ if (params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
+ phy->flags &= ~ELINK_FLAGS_TX_ERROR_CHECK;
+
+ if (!(params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_MT_SUPPORT))
+ phy->flags |= ELINK_FLAGS_MDC_MDIO_WA_G;
+
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].media_type);
+ media_types = REG_RD(sc, sync_offset);
+
+ /* Update media type for non-PMF sync only for the first time
+ * In case the media type changes afterwards, it will be updated
+ * using the update_status function
+ */
+ if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+ actual_phy_idx))) == 0) {
+ media_types |= ((phy->media_type &
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+ actual_phy_idx));
+ }
+ REG_WR(sc, sync_offset, media_types);
+
+ elink_phy_def_cfg(params, phy, phy_index);
+ params->num_phys++;
+ }
+
+ PMD_DRV_LOG(DEBUG, "End phy probe. #phys found %x", params->num_phys);
+ return ELINK_STATUS_OK;
+}
+
+static void elink_init_bmac_loopback(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ vars->link_up = 1;
+ vars->line_speed = ELINK_SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+ vars->mac_type = ELINK_MAC_TYPE_BMAC;
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+
+ elink_xgxs_deassert(params);
+
+ /* Set bmac loopback */
+ elink_bmac_enable(params, vars, 1, 1);
+
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+}
+
+static void elink_init_emac_loopback(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ vars->link_up = 1;
+ vars->line_speed = ELINK_SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+ vars->mac_type = ELINK_MAC_TYPE_EMAC;
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+
+ elink_xgxs_deassert(params);
+ /* Set bmac loopback */
+ elink_emac_enable(params, vars, 1);
+ elink_emac_program(params, vars);
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+}
+
+static void elink_init_xmac_loopback(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ vars->link_up = 1;
+ if (!params->req_line_speed[0])
+ vars->line_speed = ELINK_SPEED_10000;
+ else
+ vars->line_speed = params->req_line_speed[0];
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+ vars->mac_type = ELINK_MAC_TYPE_XMAC;
+ vars->phy_flags = PHY_XGXS_FLAG;
+ /* Set WC to loopback mode since link is required to provide clock
+ * to the XMAC in 20G mode
+ */
+ elink_set_aer_mmd(params, &params->phy[0]);
+ elink_warpcore_reset_lane(sc, &params->phy[0], 0);
+ params->phy[ELINK_INT_PHY].config_loopback(&params->phy[ELINK_INT_PHY],
+ params);
+
+ elink_xmac_enable(params, vars, 1);
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+}
+
+static void elink_init_umac_loopback(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ vars->link_up = 1;
+ vars->line_speed = ELINK_SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+ vars->mac_type = ELINK_MAC_TYPE_UMAC;
+ vars->phy_flags = PHY_XGXS_FLAG;
+ elink_umac_enable(params, vars, 1);
+
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+}
+
+static void elink_init_xgxs_loopback(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ struct elink_phy *int_phy = &params->phy[ELINK_INT_PHY];
+ vars->link_up = 1;
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+ vars->duplex = DUPLEX_FULL;
+ if (params->req_line_speed[0] == ELINK_SPEED_1000)
+ vars->line_speed = ELINK_SPEED_1000;
+ else if ((params->req_line_speed[0] == ELINK_SPEED_20000) ||
+ (int_phy->flags & ELINK_FLAGS_WC_DUAL_MODE))
+ vars->line_speed = ELINK_SPEED_20000;
+ else
+ vars->line_speed = ELINK_SPEED_10000;
+
+ if (!USES_WARPCORE(sc))
+ elink_xgxs_deassert(params);
+ elink_link_initialize(params, vars);
+
+ if (params->req_line_speed[0] == ELINK_SPEED_1000) {
+ if (USES_WARPCORE(sc))
+ elink_umac_enable(params, vars, 0);
+ else {
+ elink_emac_program(params, vars);
+ elink_emac_enable(params, vars, 0);
+ }
+ } else {
+ if (USES_WARPCORE(sc))
+ elink_xmac_enable(params, vars, 0);
+ else
+ elink_bmac_enable(params, vars, 0, 1);
+ }
+
+ if (params->loopback_mode == ELINK_LOOPBACK_XGXS) {
+ /* Set 10G XGXS loopback */
+ int_phy->config_loopback(int_phy, params);
+ } else {
+ /* Set external phy loopback */
+ uint8_t phy_index;
+ for (phy_index = ELINK_EXT_PHY1;
+ phy_index < params->num_phys; phy_index++)
+ if (params->phy[phy_index].config_loopback)
+ params->phy[phy_index].config_loopback(&params->
+ phy
+ [phy_index],
+ params);
+ }
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+
+ elink_set_led(params, vars, ELINK_LED_MODE_OPER, vars->line_speed);
+}
+
+void elink_set_rx_filter(struct elink_params *params, uint8_t en)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t val = en * 0x1F;
+
+ /* Open / close the gate between the NIG and the BRB */
+ if (!CHIP_IS_E1x(sc))
+ val |= en * 0x20;
+ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + params->port * 4, val);
+
+ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port * 4, en * 0x3);
+
+ REG_WR(sc, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), en);
+}
+
+static elink_status_t elink_avoid_link_flap(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ uint32_t phy_idx;
+ uint32_t dont_clear_stat, lfa_sts;
+ struct bnx2x_softc *sc = params->sc;
+
+ /* Sync the link parameters */
+ elink_link_status_update(params, vars);
+
+ /*
+ * The module verification was already done by previous link owner,
+ * so this call is meant only to get warning message
+ */
+
+ for (phy_idx = ELINK_INT_PHY; phy_idx < params->num_phys; phy_idx++) {
+ struct elink_phy *phy = &params->phy[phy_idx];
+ if (phy->phy_specific_func) {
+ PMD_DRV_LOG(DEBUG, "Calling PHY specific func");
+ phy->phy_specific_func(phy, params, ELINK_PHY_INIT);
+ }
+ if ((phy->media_type == ELINK_ETH_PHY_SFPP_10G_FIBER) ||
+ (phy->media_type == ELINK_ETH_PHY_SFP_1G_FIBER) ||
+ (phy->media_type == ELINK_ETH_PHY_DA_TWINAX))
+ elink_verify_sfp_module(phy, params);
+ }
+ lfa_sts = REG_RD(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts));
+
+ dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT;
+
+ /* Re-enable the NIG/MAC */
+ if (CHIP_IS_E3(sc)) {
+ if (!dont_clear_stat) {
+ REG_WR(sc, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+ params->port));
+ REG_WR(sc, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+ params->port));
+ }
+ if (vars->line_speed < ELINK_SPEED_10000)
+ elink_umac_enable(params, vars, 0);
+ else
+ elink_xmac_enable(params, vars, 0);
+ } else {
+ if (vars->line_speed < ELINK_SPEED_10000)
+ elink_emac_enable(params, vars, 0);
+ else
+ elink_bmac_enable(params, vars, 0, !dont_clear_stat);
+ }
+
+ /* Increment LFA count */
+ lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) |
+ (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >>
+ LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff)
+ << LINK_FLAP_AVOIDANCE_COUNT_OFFSET));
+ /* Clear link flap reason */
+ lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+
+ REG_WR(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+
+ /* Disable NIG DRAIN */
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+
+ /* Enable interrupts */
+ elink_link_int_enable(params);
+ return ELINK_STATUS_OK;
+}
+
+static void elink_cannot_avoid_link_flap(struct elink_params *params,
+ struct elink_vars *vars,
+ int lfa_status)
+{
+ uint32_t lfa_sts, cfg_idx, tmp_val;
+ struct bnx2x_softc *sc = params->sc;
+
+ elink_link_reset(params, vars, 1);
+
+ if (!params->lfa_base)
+ return;
+ /* Store the new link parameters */
+ REG_WR(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, req_duplex),
+ params->req_duplex[0] | (params->req_duplex[1] << 16));
+
+ REG_WR(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, req_flow_ctrl),
+ params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16));
+
+ REG_WR(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, req_line_speed),
+ params->req_line_speed[0] | (params->req_line_speed[1] << 16));
+
+ for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) {
+ REG_WR(sc, params->lfa_base +
+ offsetof(struct shmem_lfa,
+ speed_cap_mask[cfg_idx]),
+ params->speed_cap_mask[cfg_idx]);
+ }
+
+ tmp_val = REG_RD(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config));
+ tmp_val &= ~REQ_FC_AUTO_ADV_MASK;
+ tmp_val |= params->req_fc_auto_adv;
+
+ REG_WR(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config), tmp_val);
+
+ lfa_sts = REG_RD(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts));
+
+ /* Clear the "Don't Clear Statistics" bit, and set reason */
+ lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT;
+
+ /* Set link flap reason */
+ lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+ lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) <<
+ LFA_LINK_FLAP_REASON_OFFSET);
+
+ /* Increment link flap counter */
+ lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) |
+ (((((lfa_sts & LINK_FLAP_COUNT_MASK) >>
+ LINK_FLAP_COUNT_OFFSET) + 1) & 0xff)
+ << LINK_FLAP_COUNT_OFFSET));
+ REG_WR(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+ /* Proceed with regular link initialization */
+}
+
+elink_status_t elink_phy_init(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ int lfa_status;
+ struct bnx2x_softc *sc = params->sc;
+ PMD_DRV_LOG(DEBUG, "Phy Initialization started");
+ PMD_DRV_LOG(DEBUG, "(1) req_speed %d, req_flowctrl %d",
+ params->req_line_speed[0], params->req_flow_ctrl[0]);
+ PMD_DRV_LOG(DEBUG, "(2) req_speed %d, req_flowctrl %d",
+ params->req_line_speed[1], params->req_flow_ctrl[1]);
+ PMD_DRV_LOG(DEBUG, "req_adv_flow_ctrl 0x%x", params->req_fc_auto_adv);
+ vars->link_status = 0;
+ vars->phy_link_up = 0;
+ vars->link_up = 0;
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+ vars->mac_type = ELINK_MAC_TYPE_NONE;
+ vars->phy_flags = 0;
+ vars->check_kr2_recovery_cnt = 0;
+ params->link_flags = ELINK_PHY_INITIALIZED;
+ /* Driver opens NIG-BRB filters */
+ elink_set_rx_filter(params, 1);
+ /* Check if link flap can be avoided */
+ lfa_status = elink_check_lfa(params);
+
+ if (lfa_status == 0) {
+ PMD_DRV_LOG(DEBUG, "Link Flap Avoidance in progress");
+ return elink_avoid_link_flap(params, vars);
+ }
+
+ PMD_DRV_LOG(DEBUG, "Cannot avoid link flap lfa_sta=0x%x", lfa_status);
+ elink_cannot_avoid_link_flap(params, vars, lfa_status);
+
+ /* Disable attentions */
+ elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + params->port * 4,
+ (ELINK_NIG_MASK_XGXS0_LINK_STATUS |
+ ELINK_NIG_MASK_XGXS0_LINK10G |
+ ELINK_NIG_MASK_SERDES0_LINK_STATUS |
+ ELINK_NIG_MASK_MI_INT));
+
+ elink_emac_init(params);
+
+ if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
+ vars->link_status |= LINK_STATUS_PFC_ENABLED;
+
+ if ((params->num_phys == 0) && !CHIP_REV_IS_SLOW(sc)) {
+ PMD_DRV_LOG(DEBUG, "No phy found for initialization !!");
+ return ELINK_STATUS_ERROR;
+ }
+ set_phy_vars(params, vars);
+
+ PMD_DRV_LOG(DEBUG, "Num of phys on board: %d", params->num_phys);
+
+ switch (params->loopback_mode) {
+ case ELINK_LOOPBACK_BMAC:
+ elink_init_bmac_loopback(params, vars);
+ break;
+ case ELINK_LOOPBACK_EMAC:
+ elink_init_emac_loopback(params, vars);
+ break;
+ case ELINK_LOOPBACK_XMAC:
+ elink_init_xmac_loopback(params, vars);
+ break;
+ case ELINK_LOOPBACK_UMAC:
+ elink_init_umac_loopback(params, vars);
+ break;
+ case ELINK_LOOPBACK_XGXS:
+ case ELINK_LOOPBACK_EXT_PHY:
+ elink_init_xgxs_loopback(params, vars);
+ break;
+ default:
+ if (!CHIP_IS_E3(sc)) {
+ if (params->switch_cfg == ELINK_SWITCH_CFG_10G)
+ elink_xgxs_deassert(params);
+ else
+ elink_serdes_deassert(sc, params->port);
+ }
+ elink_link_initialize(params, vars);
+ DELAY(1000 * 30);
+ elink_link_int_enable(params);
+ break;
+ }
+ elink_update_mng(params, vars->link_status);
+
+ elink_update_mng_eee(params, vars->eee_status);
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_link_reset(struct elink_params *params,
+ struct elink_vars *vars,
+ uint8_t reset_ext_phy)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t phy_index, port = params->port, clear_latch_ind = 0;
+ PMD_DRV_LOG(DEBUG, "Resetting the link of port %d", port);
+ /* Disable attentions */
+ vars->link_status = 0;
+ elink_update_mng(params, vars->link_status);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+ elink_update_mng_eee(params, vars->eee_status);
+ elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4,
+ (ELINK_NIG_MASK_XGXS0_LINK_STATUS |
+ ELINK_NIG_MASK_XGXS0_LINK10G |
+ ELINK_NIG_MASK_SERDES0_LINK_STATUS |
+ ELINK_NIG_MASK_MI_INT));
+
+ /* Activate nig drain */
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + port * 4, 1);
+
+ /* Disable nig egress interface */
+ if (!CHIP_IS_E3(sc)) {
+ REG_WR(sc, NIG_REG_BMAC0_OUT_EN + port * 4, 0);
+ REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0);
+ }
+ if (!CHIP_IS_E3(sc))
+ elink_set_bmac_rx(sc, port, 0);
+ if (CHIP_IS_E3(sc) && !CHIP_REV_IS_FPGA(sc)) {
+ elink_set_xmac_rxtx(params, 0);
+ elink_set_umac_rxtx(params, 0);
+ }
+ /* Disable emac */
+ if (!CHIP_IS_E3(sc))
+ REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 0);
+
+ DELAY(1000 * 10);
+ /* The PHY reset is controlled by GPIO 1
+ * Hold it as vars low
+ */
+ /* Clear link led */
+ elink_set_mdio_emac_per_phy(sc, params);
+ elink_set_led(params, vars, ELINK_LED_MODE_OFF, 0);
+
+ if (reset_ext_phy && (!CHIP_REV_IS_SLOW(sc))) {
+ for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ if (params->phy[phy_index].link_reset) {
+ elink_set_aer_mmd(params,
+ &params->phy[phy_index]);
+ params->phy[phy_index].link_reset(&params->
+ phy
+ [phy_index],
+ params);
+ }
+ if (params->phy[phy_index].flags &
+ ELINK_FLAGS_REARM_LATCH_SIGNAL)
+ clear_latch_ind = 1;
+ }
+ }
+
+ if (clear_latch_ind) {
+ /* Clear latching indication */
+ elink_rearm_latch_signal(sc, port, 0);
+ elink_bits_dis(sc, NIG_REG_LATCH_BC_0 + port * 4,
+ 1 << ELINK_NIG_LATCH_BC_ENABLE_MI_INT);
+ }
+ if (params->phy[ELINK_INT_PHY].link_reset)
+ params->phy[ELINK_INT_PHY].link_reset(&params->
+ phy
+ [ELINK_INT_PHY],
+ params);
+
+ /* Disable nig ingress interface */
+ if (!CHIP_IS_E3(sc)) {
+ /* Reset BigMac */
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ REG_WR(sc, NIG_REG_BMAC0_IN_EN + port * 4, 0);
+ REG_WR(sc, NIG_REG_EMAC0_IN_EN + port * 4, 0);
+ } else {
+ uint32_t xmac_base =
+ (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+ elink_set_xumac_nig(params, 0, 0);
+ if (REG_RD(sc, MISC_REG_RESET_REG_2) &
+ MISC_REGISTERS_RESET_REG_2_XMAC)
+ REG_WR(sc, xmac_base + XMAC_REG_CTRL,
+ XMAC_CTRL_REG_SOFT_RESET);
+ }
+ vars->link_up = 0;
+ vars->phy_flags = 0;
+ return ELINK_STATUS_OK;
+}
+
+elink_status_t elink_lfa_reset(struct elink_params * params,
+ struct elink_vars * vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ vars->link_up = 0;
+ vars->phy_flags = 0;
+ params->link_flags &= ~ELINK_PHY_INITIALIZED;
+ if (!params->lfa_base)
+ return elink_link_reset(params, vars, 1);
+ /*
+ * Activate NIG drain so that during this time the device won't send
+ * anything while it is unable to response.
+ */
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 1);
+
+ /*
+ * Close gracefully the gate from BMAC to NIG such that no half packets
+ * are passed.
+ */
+ if (!CHIP_IS_E3(sc))
+ elink_set_bmac_rx(sc, params->port, 0);
+
+ if (CHIP_IS_E3(sc)) {
+ elink_set_xmac_rxtx(params, 0);
+ elink_set_umac_rxtx(params, 0);
+ }
+ /* Wait 10ms for the pipe to clean up */
+ DELAY(1000 * 10);
+
+ /* Clean the NIG-BRB using the network filters in a way that will
+ * not cut a packet in the middle.
+ */
+ elink_set_rx_filter(params, 0);
+
+ /*
+ * Re-open the gate between the BMAC and the NIG, after verifying the
+ * gate to the BRB is closed, otherwise packets may arrive to the
+ * firmware before driver had initialized it. The target is to achieve
+ * minimum management protocol down time.
+ */
+ if (!CHIP_IS_E3(sc))
+ elink_set_bmac_rx(sc, params->port, 1);
+
+ if (CHIP_IS_E3(sc)) {
+ elink_set_xmac_rxtx(params, 1);
+ elink_set_umac_rxtx(params, 1);
+ }
+ /* Disable NIG drain */
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+ return ELINK_STATUS_OK;
+}
+
+/****************************************************************************/
+/* Common function */
+/****************************************************************************/
+static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc,
+ uint32_t shmem_base_path[],
+ uint32_t shmem2_base_path[],
+ uint8_t phy_index,
+ __rte_unused uint32_t chip_id)
+{
+ struct elink_phy phy[PORT_MAX];
+ struct elink_phy *phy_blk[PORT_MAX];
+ uint16_t val;
+ int8_t port = 0;
+ int8_t port_of_path = 0;
+ uint32_t swap_val, swap_override;
+ swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
+ port ^= (swap_val && swap_override);
+ elink_ext_phy_hw_reset(sc, port);
+ /* PART1 - Reset both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ uint32_t shmem_base, shmem2_base;
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E1x(sc)) {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ port_of_path = port;
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ port_of_path = 0;
+ }
+
+ /* Extract the ext phy address for the port */
+ if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base,
+ port_of_path, &phy[port]) !=
+ ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "populate_phy failed");
+ return ELINK_STATUS_ERROR;
+ }
+ /* Disable attentions */
+ elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path * 4,
+ (ELINK_NIG_MASK_XGXS0_LINK_STATUS |
+ ELINK_NIG_MASK_XGXS0_LINK10G |
+ ELINK_NIG_MASK_SERDES0_LINK_STATUS |
+ ELINK_NIG_MASK_MI_INT));
+
+ /* Need to take the phy out of low power mode in order
+ * to write to access its registers
+ */
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+
+ /* Reset the phy */
+ elink_cl45_write(sc, &phy[port],
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15);
+ }
+
+ /* Add delay of 150ms after reset */
+ DELAY(1000 * 150);
+
+ if (phy[PORT_0].addr & 0x1) {
+ phy_blk[PORT_0] = &(phy[PORT_1]);
+ phy_blk[PORT_1] = &(phy[PORT_0]);
+ } else {
+ phy_blk[PORT_0] = &(phy[PORT_0]);
+ phy_blk[PORT_1] = &(phy[PORT_1]);
+ }
+
+ /* PART2 - Download firmware to both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ if (CHIP_IS_E1x(sc))
+ port_of_path = port;
+ else
+ port_of_path = 0;
+
+ PMD_DRV_LOG(DEBUG, "Loading spirom for phy address 0x%x",
+ phy_blk[port]->addr);
+ if (elink_8073_8727_external_rom_boot(sc, phy_blk[port],
+ port_of_path))
+ return ELINK_STATUS_ERROR;
+
+ /* Only set bit 10 = 1 (Tx power down) */
+ elink_cl45_read(sc, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
+
+ /* Phase1 of TX_POWER_DOWN reset */
+ elink_cl45_write(sc, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, (val | 1 << 10));
+ }
+
+ /* Toggle Transmitter: Power down and then up with 600ms delay
+ * between
+ */
+ DELAY(1000 * 600);
+
+ /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ /* Phase2 of POWER_DOWN_RESET */
+ /* Release bit 10 (Release Tx power down) */
+ elink_cl45_read(sc, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
+
+ elink_cl45_write(sc, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN,
+ (val & (~(1 << 10))));
+ DELAY(1000 * 15);
+
+ /* Read modify write the SPI-ROM version select register */
+ elink_cl45_read(sc, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+ elink_cl45_write(sc, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1 << 12)));
+
+ /* set GPIO2 back to LOW */
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ }
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_8726_common_init_phy(struct bnx2x_softc *sc,
+ uint32_t shmem_base_path[],
+ uint32_t shmem2_base_path[],
+ uint8_t phy_index,
+ __rte_unused uint32_t chip_id)
+{
+ uint32_t val;
+ int8_t port;
+ struct elink_phy phy;
+ /* Use port1 because of the static port-swap */
+ /* Enable the module detection interrupt */
+ val = REG_RD(sc, MISC_REG_GPIO_EVENT_EN);
+ val |= ((1 << MISC_REGISTERS_GPIO_3) |
+ (1 <<
+ (MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
+ REG_WR(sc, MISC_REG_GPIO_EVENT_EN, val);
+
+ elink_ext_phy_hw_reset(sc, 0);
+ DELAY(1000 * 5);
+ for (port = 0; port < PORT_MAX; port++) {
+ uint32_t shmem_base, shmem2_base;
+
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E1x(sc)) {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ }
+ /* Extract the ext phy address for the port */
+ if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base,
+ port, &phy) != ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "populate phy failed");
+ return ELINK_STATUS_ERROR;
+ }
+
+ /* Reset phy */
+ elink_cl45_write(sc, &phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+
+ /* Set fault module detected LED on */
+ elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_HIGH, port);
+ }
+
+ return ELINK_STATUS_OK;
+}
+
+static void elink_get_ext_phy_reset_gpio(struct bnx2x_softc *sc,
+ uint32_t shmem_base, uint8_t * io_gpio,
+ uint8_t * io_port)
+{
+
+ uint32_t phy_gpio_reset = REG_RD(sc, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[PORT_0].
+ default_cfg));
+ switch (phy_gpio_reset) {
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+ *io_gpio = 0;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+ *io_gpio = 1;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+ *io_gpio = 2;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+ *io_gpio = 3;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+ *io_gpio = 0;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+ *io_gpio = 1;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+ *io_gpio = 2;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+ *io_gpio = 3;
+ *io_port = 1;
+ break;
+ default:
+ /* Don't override the io_gpio and io_port */
+ break;
+ }
+}
+
+static elink_status_t elink_8727_common_init_phy(struct bnx2x_softc *sc,
+ uint32_t shmem_base_path[],
+ uint32_t shmem2_base_path[],
+ uint8_t phy_index,
+ __rte_unused uint32_t chip_id)
+{
+ int8_t port, reset_gpio;
+ uint32_t swap_val, swap_override;
+ struct elink_phy phy[PORT_MAX];
+ struct elink_phy *phy_blk[PORT_MAX];
+ int8_t port_of_path;
+ swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
+
+ reset_gpio = MISC_REGISTERS_GPIO_1;
+ port = 1;
+
+ /* Retrieve the reset gpio/port which control the reset.
+ * Default is GPIO1, PORT1
+ */
+ elink_get_ext_phy_reset_gpio(sc, shmem_base_path[0],
+ (uint8_t *) & reset_gpio,
+ (uint8_t *) & port);
+
+ /* Calculate the port based on port swap */
+ port ^= (swap_val && swap_override);
+
+ /* Initiate PHY reset */
+ elink_cb_gpio_write(sc, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+ DELAY(1000 * 1);
+ elink_cb_gpio_write(sc, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+
+ DELAY(1000 * 5);
+
+ /* PART1 - Reset both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ uint32_t shmem_base, shmem2_base;
+
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E1x(sc)) {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ port_of_path = port;
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ port_of_path = 0;
+ }
+
+ /* Extract the ext phy address for the port */
+ if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base,
+ port_of_path, &phy[port]) !=
+ ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "populate phy failed");
+ return ELINK_STATUS_ERROR;
+ }
+ /* disable attentions */
+ elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path * 4,
+ (ELINK_NIG_MASK_XGXS0_LINK_STATUS |
+ ELINK_NIG_MASK_XGXS0_LINK10G |
+ ELINK_NIG_MASK_SERDES0_LINK_STATUS |
+ ELINK_NIG_MASK_MI_INT));
+
+ /* Reset the phy */
+ elink_cl45_write(sc, &phy[port],
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15);
+ }
+
+ /* Add delay of 150ms after reset */
+ DELAY(1000 * 150);
+ if (phy[PORT_0].addr & 0x1) {
+ phy_blk[PORT_0] = &(phy[PORT_1]);
+ phy_blk[PORT_1] = &(phy[PORT_0]);
+ } else {
+ phy_blk[PORT_0] = &(phy[PORT_0]);
+ phy_blk[PORT_1] = &(phy[PORT_1]);
+ }
+ /* PART2 - Download firmware to both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ if (CHIP_IS_E1x(sc))
+ port_of_path = port;
+ else
+ port_of_path = 0;
+ PMD_DRV_LOG(DEBUG, "Loading spirom for phy address 0x%x",
+ phy_blk[port]->addr);
+ if (elink_8073_8727_external_rom_boot(sc, phy_blk[port],
+ port_of_path))
+ return ELINK_STATUS_ERROR;
+ /* Disable PHY transmitter output */
+ elink_cl45_write(sc, phy_blk[port],
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_TX_DISABLE, 1);
+
+ }
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_84833_common_init_phy(struct bnx2x_softc *sc,
+ uint32_t shmem_base_path[],
+ __rte_unused uint32_t
+ shmem2_base_path[],
+ __rte_unused uint8_t
+ phy_index, uint32_t chip_id)
+{
+ uint8_t reset_gpios;
+ reset_gpios = elink_84833_get_reset_gpios(sc, shmem_base_path, chip_id);
+ elink_cb_gpio_mult_write(sc, reset_gpios,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ DELAY(10);
+ elink_cb_gpio_mult_write(sc, reset_gpios,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ PMD_DRV_LOG(DEBUG, "84833 reset pulse on pin values 0x%x", reset_gpios);
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_ext_phy_common_init(struct bnx2x_softc *sc,
+ uint32_t shmem_base_path[],
+ uint32_t shmem2_base_path[],
+ uint8_t phy_index,
+ uint32_t ext_phy_type,
+ uint32_t chip_id)
+{
+ elink_status_t rc = ELINK_STATUS_OK;
+
+ switch (ext_phy_type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073:
+ rc = elink_8073_common_init_phy(sc, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727_NOC:
+ rc = elink_8727_common_init_phy(sc, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726:
+ /* GPIO1 affects both ports, so there's need to pull
+ * it for single port alone
+ */
+ rc = elink_8726_common_init_phy(sc, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834:
+ /* GPIO3's are linked, and so both need to be toggled
+ * to obtain required 2us pulse.
+ */
+ rc = elink_84833_common_init_phy(sc, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ rc = ELINK_STATUS_ERROR;
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG,
+ "ext_phy 0x%x common init not required",
+ ext_phy_type);
+ break;
+ }
+
+ if (rc != ELINK_STATUS_OK)
+ elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, 0); // "Warning: PHY was not initialized,"
+ // " Port %d",
+
+ return rc;
+}
+
+elink_status_t elink_common_init_phy(struct bnx2x_softc * sc,
+ uint32_t shmem_base_path[],
+ uint32_t shmem2_base_path[],
+ uint32_t chip_id,
+ __rte_unused uint8_t one_port_enabled)
+{
+ elink_status_t rc = ELINK_STATUS_OK;
+ uint32_t phy_ver, val;
+ uint8_t phy_index = 0;
+ uint32_t ext_phy_type, ext_phy_config;
+
+ elink_set_mdio_clk(sc, GRCBASE_EMAC0);
+ elink_set_mdio_clk(sc, GRCBASE_EMAC1);
+ PMD_DRV_LOG(DEBUG, "Begin common phy init");
+ if (CHIP_IS_E3(sc)) {
+ /* Enable EPIO */
+ val = REG_RD(sc, MISC_REG_GEN_PURP_HWG);
+ REG_WR(sc, MISC_REG_GEN_PURP_HWG, val | 1);
+ }
+ /* Check if common init was already done */
+ phy_ver = REG_RD(sc, shmem_base_path[0] +
+ offsetof(struct shmem_region,
+ port_mb[PORT_0].ext_phy_fw_version));
+ if (phy_ver) {
+ PMD_DRV_LOG(DEBUG, "Not doing common init; phy ver is 0x%x",
+ phy_ver);
+ return ELINK_STATUS_OK;
+ }
+
+ /* Read the ext_phy_type for arbitrary port(0) */
+ for (phy_index = ELINK_EXT_PHY1; phy_index < ELINK_MAX_PHYS;
+ phy_index++) {
+ ext_phy_config = elink_get_ext_phy_config(sc,
+ shmem_base_path[0],
+ phy_index, 0);
+ ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(ext_phy_config);
+ rc |= elink_ext_phy_common_init(sc, shmem_base_path,
+ shmem2_base_path,
+ phy_index, ext_phy_type,
+ chip_id);
+ }
+ return rc;
+}
+
+static void elink_check_over_curr(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t cfg_pin;
+ uint8_t port = params->port;
+ uint32_t pin_val;
+
+ cfg_pin = (REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].
+ e3_cmn_pin_cfg1)) &
+ PORT_HW_CFG_E3_OVER_CURRENT_MASK) >>
+ PORT_HW_CFG_E3_OVER_CURRENT_SHIFT;
+
+ /* Ignore check if no external input PIN available */
+ if (elink_get_cfg_pin(sc, cfg_pin, &pin_val) != ELINK_STATUS_OK)
+ return;
+
+ if (!pin_val) {
+ if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) {
+ elink_cb_event_log(sc, ELINK_LOG_ID_OVER_CURRENT, params->port); //"Error: Power fault on Port %d has"
+ // " been detected and the power to "
+ // "that SFP+ module has been removed"
+ // " to prevent failure of the card."
+ // " Please remove the SFP+ module and"
+ // " restart the system to clear this"
+ // " error.",
+ vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
+ elink_warpcore_power_module(params, 0);
+ }
+ } else
+ vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
+}
+
+/* Returns 0 if no change occurred since last check; 1 otherwise. */
+static uint8_t elink_analyze_link_error(struct elink_params *params,
+ struct elink_vars *vars,
+ uint32_t status, uint32_t phy_flag,
+ uint32_t link_flag, uint8_t notify)
+{
+ struct bnx2x_softc *sc = params->sc;
+ /* Compare new value with previous value */
+ uint8_t led_mode;
+ uint32_t old_status = (vars->phy_flags & phy_flag) ? 1 : 0;
+
+ if ((status ^ old_status) == 0)
+ return 0;
+
+ /* If values differ */
+ switch (phy_flag) {
+ case PHY_HALF_OPEN_CONN_FLAG:
+ PMD_DRV_LOG(DEBUG, "Analyze Remote Fault");
+ break;
+ case PHY_SFP_TX_FAULT_FLAG:
+ PMD_DRV_LOG(DEBUG, "Analyze TX Fault");
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Analyze UNKNOWN");
+ }
+ PMD_DRV_LOG(DEBUG, "Link changed:[%x %x]->%x", vars->link_up,
+ old_status, status);
+
+ /* a. Update shmem->link_status accordingly
+ * b. Update elink_vars->link_up
+ */
+ if (status) {
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ vars->link_status |= link_flag;
+ vars->link_up = 0;
+ vars->phy_flags |= phy_flag;
+
+ /* activate nig drain */
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 1);
+ /* Set LED mode to off since the PHY doesn't know about these
+ * errors
+ */
+ led_mode = ELINK_LED_MODE_OFF;
+ } else {
+ vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_status &= ~link_flag;
+ vars->link_up = 1;
+ vars->phy_flags &= ~phy_flag;
+ led_mode = ELINK_LED_MODE_OPER;
+
+ /* Clear nig drain */
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+ }
+ elink_sync_link(params, vars);
+ /* Update the LED according to the link state */
+ elink_set_led(params, vars, led_mode, ELINK_SPEED_10000);
+
+ /* Update link status in the shared memory */
+ elink_update_mng(params, vars->link_status);
+
+ /* C. Trigger General Attention */
+ vars->periodic_flags |= ELINK_PERIODIC_FLAGS_LINK_EVENT;
+ if (notify)
+ elink_cb_notify_link_changed(sc);
+
+ return 1;
+}
+
+/******************************************************************************
+* Description:
+* This function checks for half opened connection change indication.
+* When such change occurs, it calls the elink_analyze_link_error
+* to check if Remote Fault is set or cleared. Reception of remote fault
+* status message in the MAC indicates that the peer's MAC has detected
+* a fault, for example, due to break in the TX side of fiber.
+*
+******************************************************************************/
+static elink_status_t elink_check_half_open_conn(struct elink_params *params,
+ struct elink_vars *vars,
+ uint8_t notify)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t lss_status = 0;
+ uint32_t mac_base;
+ /* In case link status is physically up @ 10G do */
+ if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) ||
+ (REG_RD(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4)))
+ return ELINK_STATUS_OK;
+
+ if (CHIP_IS_E3(sc) &&
+ (REG_RD(sc, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_XMAC))) {
+ /* Check E3 XMAC */
+ /* Note that link speed cannot be queried here, since it may be
+ * zero while link is down. In case UMAC is active, LSS will
+ * simply not be set
+ */
+ mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ /* Clear stick bits (Requires rising edge) */
+ REG_WR(sc, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+ REG_WR(sc, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+ if (REG_RD(sc, mac_base + XMAC_REG_RX_LSS_STATUS))
+ lss_status = 1;
+
+ elink_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
+ } else if (REG_RD(sc, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
+ /* Check E1X / E2 BMAC */
+ uint32_t lss_status_reg;
+ uint32_t wb_data[2];
+ mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ /* Read BIGMAC_REGISTER_RX_LSS_STATUS */
+ if (CHIP_IS_E2(sc))
+ lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT;
+ else
+ lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS;
+
+ REG_RD_DMAE(sc, mac_base + lss_status_reg, wb_data, 2);
+ lss_status = (wb_data[0] > 0);
+
+ elink_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
+ }
+ return ELINK_STATUS_OK;
+}
+
+static void elink_sfp_tx_fault_detection(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t cfg_pin, value = 0;
+ uint8_t led_change, port = params->port;
+
+ /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */
+ cfg_pin = (REG_RD(sc, params->shmem_base + offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config
+ [port].
+ e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_TX_FAULT_MASK) >>
+ PORT_HW_CFG_E3_TX_FAULT_SHIFT;
+
+ if (elink_get_cfg_pin(sc, cfg_pin, &value)) {
+ PMD_DRV_LOG(DEBUG, "Failed to read pin 0x%02x", cfg_pin);
+ return;
+ }
+
+ led_change = elink_analyze_link_error(params, vars, value,
+ PHY_SFP_TX_FAULT_FLAG,
+ LINK_STATUS_SFP_TX_FAULT, 1);
+
+ if (led_change) {
+ /* Change TX_Fault led, set link status for further syncs */
+ uint8_t led_mode;
+
+ if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) {
+ led_mode = MISC_REGISTERS_GPIO_HIGH;
+ vars->link_status |= LINK_STATUS_SFP_TX_FAULT;
+ } else {
+ led_mode = MISC_REGISTERS_GPIO_LOW;
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ }
+
+ /* If module is unapproved, led should be on regardless */
+ if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED)) {
+ PMD_DRV_LOG(DEBUG, "Change TX_Fault LED: ->%x",
+ led_mode);
+ elink_set_e3_module_fault_led(params, led_mode);
+ }
+ }
+}
+
+static void elink_kr2_recovery(struct elink_params *params,
+ struct elink_vars *vars, struct elink_phy *phy)
+{
+ PMD_DRV_LOG(DEBUG, "KR2 recovery");
+
+ elink_warpcore_enable_AN_KR2(phy, params, vars);
+ elink_warpcore_restart_AN_KR(phy, params);
+}
+
+static void elink_check_kr2_wa(struct elink_params *params,
+ struct elink_vars *vars, struct elink_phy *phy)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t base_page, next_page, not_kr2_device, lane;
+ int sigdet;
+
+ /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
+ * Since some switches tend to reinit the AN process and clear the
+ * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * and recovered many times
+ */
+ if (vars->check_kr2_recovery_cnt > 0) {
+ vars->check_kr2_recovery_cnt--;
+ return;
+ }
+
+ sigdet = elink_warpcore_get_sigdet(phy, params);
+ if (!sigdet) {
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ elink_kr2_recovery(params, vars, phy);
+ PMD_DRV_LOG(DEBUG, "No sigdet");
+ }
+ return;
+ }
+
+ lane = elink_get_warpcore_lane(params);
+ CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, lane);
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &base_page);
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG2, &next_page);
+ elink_set_aer_mmd(params, phy);
+
+ /* CL73 has not begun yet */
+ if (base_page == 0) {
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ elink_kr2_recovery(params, vars, phy);
+ PMD_DRV_LOG(DEBUG, "No BP");
+ }
+ return;
+ }
+
+ /* In case NP bit is not set in the BasePage, or it is set,
+ * but only KX is advertised, declare this link partner as non-KR2
+ * device.
+ */
+ not_kr2_device = (((base_page & 0x8000) == 0) ||
+ (((base_page & 0x8000) &&
+ ((next_page & 0xe0) == 0x20))));
+
+ /* In case KR2 is already disabled, check if we need to re-enable it */
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!not_kr2_device) {
+ PMD_DRV_LOG(DEBUG, "BP=0x%x, NP=0x%x", base_page,
+ next_page);
+ elink_kr2_recovery(params, vars, phy);
+ }
+ return;
+ }
+ /* KR2 is enabled, but not KR2 device */
+ if (not_kr2_device) {
+ /* Disable KR2 on both lanes */
+ PMD_DRV_LOG(DEBUG, "BP=0x%x, NP=0x%x", base_page, next_page);
+ elink_disable_kr2(params, vars, phy);
+ /* Restart AN on leading lane */
+ elink_warpcore_restart_AN_KR(phy, params);
+ return;
+ }
+}
+
+void elink_period_func(struct elink_params *params, struct elink_vars *vars)
+{
+ uint16_t phy_idx;
+ struct bnx2x_softc *sc = params->sc;
+ for (phy_idx = ELINK_INT_PHY; phy_idx < ELINK_MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].flags & ELINK_FLAGS_TX_ERROR_CHECK) {
+ elink_set_aer_mmd(params, &params->phy[phy_idx]);
+ if (elink_check_half_open_conn(params, vars, 1) !=
+ ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "Fault detection failed");
+ }
+ break;
+ }
+ }
+
+ if (CHIP_IS_E3(sc)) {
+ struct elink_phy *phy = &params->phy[ELINK_INT_PHY];
+ elink_set_aer_mmd(params, phy);
+ if ((phy->supported & ELINK_SUPPORTED_20000baseKR2_Full) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
+ elink_check_kr2_wa(params, vars, phy);
+ elink_check_over_curr(params, vars);
+ if (vars->rx_tx_asic_rst)
+ elink_warpcore_config_runtime(phy, params, vars);
+
+ if ((REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].
+ default_cfg))
+ & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ PORT_HW_CFG_NET_SERDES_IF_SFI) {
+ if (elink_is_sfp_module_plugged(params)) {
+ elink_sfp_tx_fault_detection(phy, params, vars);
+ } else if (vars->link_status & LINK_STATUS_SFP_TX_FAULT) {
+ /* Clean trail, interrupt corrects the leds */
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG;
+ /* Update link status in the shared memory */
+ elink_update_mng(params, vars->link_status);
+ }
+ }
+ }
+}
+
+uint8_t elink_fan_failure_det_req(struct bnx2x_softc *sc,
+ uint32_t shmem_base,
+ uint32_t shmem2_base, uint8_t port)
+{
+ uint8_t phy_index, fan_failure_det_req = 0;
+ struct elink_phy phy;
+ for (phy_index = ELINK_EXT_PHY1; phy_index < ELINK_MAX_PHYS;
+ phy_index++) {
+ if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base,
+ port, &phy)
+ != ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "populate phy failed");
+ return 0;
+ }
+ fan_failure_det_req |= (phy.flags &
+ ELINK_FLAGS_FAN_FAILURE_DET_REQ);
+ }
+ return fan_failure_det_req;
+}
+
+void elink_hw_reset_phy(struct elink_params *params)
+{
+ uint8_t phy_index;
+ struct bnx2x_softc *sc = params->sc;
+ elink_update_mng(params, 0);
+ elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + params->port * 4,
+ (ELINK_NIG_MASK_XGXS0_LINK_STATUS |
+ ELINK_NIG_MASK_XGXS0_LINK10G |
+ ELINK_NIG_MASK_SERDES0_LINK_STATUS |
+ ELINK_NIG_MASK_MI_INT));
+
+ for (phy_index = ELINK_INT_PHY; phy_index < ELINK_MAX_PHYS; phy_index++) {
+ if (params->phy[phy_index].hw_reset) {
+ params->phy[phy_index].hw_reset(&params->phy[phy_index],
+ params);
+ params->phy[phy_index] = phy_null;
+ }
+ }
+}
+
+void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars,
+ __rte_unused uint32_t chip_id, uint32_t shmem_base,
+ uint32_t shmem2_base, uint8_t port)
+{
+ uint8_t gpio_num = 0xff, gpio_port = 0xff, phy_index;
+ uint32_t val;
+ uint32_t offset, aeu_mask, swap_val, swap_override, sync_offset;
+ if (CHIP_IS_E3(sc)) {
+ if (elink_get_mod_abs_int_cfg(sc,
+ shmem_base,
+ port,
+ &gpio_num,
+ &gpio_port) != ELINK_STATUS_OK)
+ return;
+ } else {
+ struct elink_phy phy;
+ for (phy_index = ELINK_EXT_PHY1; phy_index < ELINK_MAX_PHYS;
+ phy_index++) {
+ if (elink_populate_phy(sc, phy_index, shmem_base,
+ shmem2_base, port, &phy)
+ != ELINK_STATUS_OK) {
+ PMD_DRV_LOG(DEBUG, "populate phy failed");
+ return;
+ }
+ if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726) {
+ gpio_num = MISC_REGISTERS_GPIO_3;
+ gpio_port = port;
+ break;
+ }
+ }
+ }
+
+ if (gpio_num == 0xff)
+ return;
+
+ /* Set GPIO3 to trigger SFP+ module insertion/removal */
+ elink_cb_gpio_write(sc, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z,
+ gpio_port);
+
+ swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
+ gpio_port ^= (swap_val && swap_override);
+
+ vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 <<
+ (gpio_num + (gpio_port << 2));
+
+ sync_offset = shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].aeu_int_mask);
+ REG_WR(sc, sync_offset, vars->aeu_int_mask);
+
+ PMD_DRV_LOG(DEBUG, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x",
+ gpio_num, gpio_port, vars->aeu_int_mask);
+
+ if (port == 0)
+ offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+ else
+ offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
+
+ /* Open appropriate AEU for interrupts */
+ aeu_mask = REG_RD(sc, offset);
+ aeu_mask |= vars->aeu_int_mask;
+ REG_WR(sc, offset, aeu_mask);
+
+ /* Enable the GPIO to trigger interrupt */
+ val = REG_RD(sc, MISC_REG_GPIO_EVENT_EN);
+ val |= 1 << (gpio_num + (gpio_port << 2));
+ REG_WR(sc, MISC_REG_GPIO_EVENT_EN, val);
+}
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/elink.h b/src/spdk/dpdk/drivers/net/bnx2x/elink.h
new file mode 100644
index 00000000..40000c24
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/elink.h
@@ -0,0 +1,582 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015-2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef ELINK_H
+#define ELINK_H
+
+#define ELINK_DEBUG
+
+
+
+
+
+
+/***********************************************************/
+/* CLC Call backs functions */
+/***********************************************************/
+/* CLC device structure */
+struct bnx2x_softc;
+
+extern uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr);
+extern void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val);
+
+/* mode - 0( LOW ) /1(HIGH)*/
+extern uint8_t elink_cb_gpio_write(struct bnx2x_softc *sc,
+ uint16_t gpio_num,
+ uint8_t mode, uint8_t port);
+extern uint8_t elink_cb_gpio_mult_write(struct bnx2x_softc *sc,
+ uint8_t pins,
+ uint8_t mode);
+
+extern uint32_t elink_cb_gpio_read(struct bnx2x_softc *sc, uint16_t gpio_num, uint8_t port);
+extern uint8_t elink_cb_gpio_int_write(struct bnx2x_softc *sc,
+ uint16_t gpio_num,
+ uint8_t mode, uint8_t port);
+
+extern uint32_t elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param);
+
+/* This function is called every 1024 bytes downloading of phy firmware.
+Driver can use it to print to screen indication for download progress */
+extern void elink_cb_download_progress(struct bnx2x_softc *sc, uint32_t cur, uint32_t total);
+
+/* Each log type has its own parameters */
+typedef enum elink_log_id {
+ ELINK_LOG_ID_UNQUAL_IO_MODULE = 0, /* uint8_t port, const char* vendor_name, const char* vendor_pn */
+ ELINK_LOG_ID_OVER_CURRENT = 1, /* uint8_t port */
+ ELINK_LOG_ID_PHY_UNINITIALIZED = 2, /* uint8_t port */
+ ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT= 3, /* No params */
+ ELINK_LOG_ID_NON_10G_MODULE = 4, /* uint8_t port */
+}elink_log_id_t;
+
+typedef enum elink_status {
+ ELINK_STATUS_OK = 0,
+ ELINK_STATUS_ERROR,
+ ELINK_STATUS_TIMEOUT,
+ ELINK_STATUS_NO_LINK,
+ ELINK_STATUS_INVALID_IMAGE,
+ ELINK_OP_NOT_SUPPORTED = 122
+} elink_status_t;
+extern void elink_cb_event_log(struct bnx2x_softc *sc, const elink_log_id_t log_id, ...);
+extern void elink_cb_load_warpcore_microcode(void);
+
+extern void elink_cb_notify_link_changed(struct bnx2x_softc *sc);
+
+#define ELINK_EVENT_LOG_LEVEL_ERROR 1
+#define ELINK_EVENT_LOG_LEVEL_WARNING 2
+#define ELINK_EVENT_ID_SFP_UNQUALIFIED_MODULE 1
+#define ELINK_EVENT_ID_SFP_POWER_FAULT 2
+
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+/* Debug prints */
+
+/***********************************************************/
+/* Defines */
+/***********************************************************/
+#define ELINK_DEFAULT_PHY_DEV_ADDR 3
+#define ELINK_E2_DEFAULT_PHY_DEV_ADDR 5
+
+
+#define DUPLEX_FULL 1
+#define DUPLEX_HALF 2
+
+#define ELINK_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO
+#define ELINK_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX
+#define ELINK_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX
+#define ELINK_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
+#define ELINK_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
+
+#define ELINK_NET_SERDES_IF_XFI 1
+#define ELINK_NET_SERDES_IF_SFI 2
+#define ELINK_NET_SERDES_IF_KR 3
+#define ELINK_NET_SERDES_IF_DXGXS 4
+
+#define ELINK_SPEED_AUTO_NEG 0
+#define ELINK_SPEED_10 10
+#define ELINK_SPEED_100 100
+#define ELINK_SPEED_1000 1000
+#define ELINK_SPEED_2500 2500
+#define ELINK_SPEED_10000 10000
+#define ELINK_SPEED_20000 20000
+
+#define ELINK_I2C_DEV_ADDR_A0 0xa0
+#define ELINK_I2C_DEV_ADDR_A2 0xa2
+
+#define ELINK_SFP_EEPROM_PAGE_SIZE 16
+#define ELINK_SFP_EEPROM_VENDOR_NAME_ADDR 0x14
+#define ELINK_SFP_EEPROM_VENDOR_NAME_SIZE 16
+#define ELINK_SFP_EEPROM_VENDOR_OUI_ADDR 0x25
+#define ELINK_SFP_EEPROM_VENDOR_OUI_SIZE 3
+#define ELINK_SFP_EEPROM_PART_NO_ADDR 0x28
+#define ELINK_SFP_EEPROM_PART_NO_SIZE 16
+#define ELINK_SFP_EEPROM_REVISION_ADDR 0x38
+#define ELINK_SFP_EEPROM_REVISION_SIZE 4
+#define ELINK_SFP_EEPROM_SERIAL_ADDR 0x44
+#define ELINK_SFP_EEPROM_SERIAL_SIZE 16
+#define ELINK_SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
+#define ELINK_SFP_EEPROM_DATE_SIZE 6
+#define ELINK_SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
+#define ELINK_SFP_EEPROM_DIAG_TYPE_SIZE 1
+#define ELINK_SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
+#define ELINK_SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+#define ELINK_SFP_EEPROM_SFF_8472_COMP_SIZE 1
+
+#define ELINK_SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
+#define ELINK_SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
+
+#define ELINK_PWR_FLT_ERR_MSG_LEN 250
+
+#define ELINK_XGXS_EXT_PHY_TYPE(ext_phy_config) \
+ ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
+#define ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config) \
+ (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
+#define ELINK_SERDES_EXT_PHY_TYPE(ext_phy_config) \
+ ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
+
+/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
+#define ELINK_SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1)
+/* Single Media board contains single external phy */
+#define ELINK_SINGLE_MEDIA(params) (params->num_phys == 2)
+/* Dual Media board contains two external phy with different media */
+#define ELINK_DUAL_MEDIA(params) (params->num_phys == 3)
+
+#define ELINK_FW_PARAM_PHY_ADDR_MASK 0x000000FF
+#define ELINK_FW_PARAM_PHY_TYPE_MASK 0x0000FF00
+#define ELINK_FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000
+#define ELINK_FW_PARAM_MDIO_CTRL_OFFSET 16
+#define ELINK_FW_PARAM_PHY_ADDR(fw_param) (fw_param & \
+ ELINK_FW_PARAM_PHY_ADDR_MASK)
+#define ELINK_FW_PARAM_PHY_TYPE(fw_param) (fw_param & \
+ ELINK_FW_PARAM_PHY_TYPE_MASK)
+#define ELINK_FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \
+ ELINK_FW_PARAM_MDIO_CTRL_MASK) >> \
+ ELINK_FW_PARAM_MDIO_CTRL_OFFSET)
+#define ELINK_FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
+ (phy_addr | phy_type | mdio_access << ELINK_FW_PARAM_MDIO_CTRL_OFFSET)
+
+
+#define ELINK_PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
+#define ELINK_PFC_BRB_FULL_LB_XON_THRESHOLD 250
+
+#define ELINK_MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
+
+#define ELINK_BMAC_CONTROL_RX_ENABLE 2
+/***********************************************************/
+/* Structs */
+/***********************************************************/
+#define ELINK_INT_PHY 0
+#define ELINK_EXT_PHY1 1
+#define ELINK_EXT_PHY2 2
+#define ELINK_MAX_PHYS 3
+
+/* Same configuration is shared between the XGXS and the first external phy */
+#define ELINK_LINK_CONFIG_SIZE (ELINK_MAX_PHYS - 1)
+#define ELINK_LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == ELINK_INT_PHY) ? \
+ 0 : (_phy_idx - 1))
+/***********************************************************/
+/* elink_phy struct */
+/* Defines the required arguments and function per phy */
+/***********************************************************/
+struct elink_vars;
+struct elink_params;
+struct elink_phy;
+
+typedef uint8_t (*config_init_t)(struct elink_phy *phy, struct elink_params *params,
+ struct elink_vars *vars);
+typedef uint8_t (*read_status_t)(struct elink_phy *phy, struct elink_params *params,
+ struct elink_vars *vars);
+typedef void (*link_reset_t)(struct elink_phy *phy,
+ struct elink_params *params);
+typedef void (*config_loopback_t)(struct elink_phy *phy,
+ struct elink_params *params);
+typedef uint8_t (*format_fw_ver_t)(uint32_t raw, uint8_t *str, uint16_t *len);
+typedef void (*hw_reset_t)(struct elink_phy *phy, struct elink_params *params);
+typedef void (*set_link_led_t)(struct elink_phy *phy,
+ struct elink_params *params, uint8_t mode);
+typedef void (*phy_specific_func_t)(struct elink_phy *phy,
+ struct elink_params *params, uint32_t action);
+struct elink_reg_set {
+ uint8_t devad;
+ uint16_t reg;
+ uint16_t val;
+};
+
+struct elink_phy {
+ uint32_t type;
+
+ /* Loaded during init */
+ uint8_t addr;
+ uint8_t def_md_devad;
+ uint16_t flags;
+ /* No Over-Current detection */
+#define ELINK_FLAGS_NOC (1<<1)
+ /* Fan failure detection required */
+#define ELINK_FLAGS_FAN_FAILURE_DET_REQ (1<<2)
+ /* Initialize first the XGXS and only then the phy itself */
+#define ELINK_FLAGS_INIT_XGXS_FIRST (1<<3)
+#define ELINK_FLAGS_WC_DUAL_MODE (1<<4)
+#define ELINK_FLAGS_4_PORT_MODE (1<<5)
+#define ELINK_FLAGS_REARM_LATCH_SIGNAL (1<<6)
+#define ELINK_FLAGS_SFP_NOT_APPROVED (1<<7)
+#define ELINK_FLAGS_MDC_MDIO_WA (1<<8)
+#define ELINK_FLAGS_DUMMY_READ (1<<9)
+#define ELINK_FLAGS_MDC_MDIO_WA_B0 (1<<10)
+#define ELINK_FLAGS_SFP_MODULE_PLUGGED_IN_WC (1<<11)
+#define ELINK_FLAGS_TX_ERROR_CHECK (1<<12)
+#define ELINK_FLAGS_EEE (1<<13)
+#define ELINK_FLAGS_TEMPERATURE (1<<14)
+#define ELINK_FLAGS_MDC_MDIO_WA_G (1<<15)
+
+ /* preemphasis values for the rx side */
+ uint16_t rx_preemphasis[4];
+
+ /* preemphasis values for the tx side */
+ uint16_t tx_preemphasis[4];
+
+ /* EMAC address for access MDIO */
+ uint32_t mdio_ctrl;
+
+ uint32_t supported;
+#define ELINK_SUPPORTED_10baseT_Half (1<<0)
+#define ELINK_SUPPORTED_10baseT_Full (1<<1)
+#define ELINK_SUPPORTED_100baseT_Half (1<<2)
+#define ELINK_SUPPORTED_100baseT_Full (1<<3)
+#define ELINK_SUPPORTED_1000baseT_Full (1<<4)
+#define ELINK_SUPPORTED_2500baseX_Full (1<<5)
+#define ELINK_SUPPORTED_10000baseT_Full (1<<6)
+#define ELINK_SUPPORTED_TP (1<<7)
+#define ELINK_SUPPORTED_FIBRE (1<<8)
+#define ELINK_SUPPORTED_Autoneg (1<<9)
+#define ELINK_SUPPORTED_Pause (1<<10)
+#define ELINK_SUPPORTED_Asym_Pause (1<<11)
+#define ELINK_SUPPORTED_20000baseMLD2_Full (1<<21)
+#define ELINK_SUPPORTED_20000baseKR2_Full (1<<22)
+
+ uint32_t media_type;
+#define ELINK_ETH_PHY_UNSPECIFIED 0x0
+#define ELINK_ETH_PHY_SFPP_10G_FIBER 0x1
+#define ELINK_ETH_PHY_XFP_FIBER 0x2
+#define ELINK_ETH_PHY_DA_TWINAX 0x3
+#define ELINK_ETH_PHY_BASE_T 0x4
+#define ELINK_ETH_PHY_SFP_1G_FIBER 0x5
+#define ELINK_ETH_PHY_KR 0xf0
+#define ELINK_ETH_PHY_CX4 0xf1
+#define ELINK_ETH_PHY_NOT_PRESENT 0xff
+
+ /* The address in which version is located*/
+ uint32_t ver_addr;
+
+ uint16_t req_flow_ctrl;
+
+ uint16_t req_line_speed;
+
+ uint32_t speed_cap_mask;
+
+ uint16_t req_duplex;
+ uint16_t rsrv;
+ /* Called per phy/port init, and it configures LASI, speed, autoneg,
+ duplex, flow control negotiation, etc. */
+ config_init_t config_init;
+
+ /* Called due to interrupt. It determines the link, speed */
+ read_status_t read_status;
+
+ /* Called when driver is unloading. Should reset the phy */
+ link_reset_t link_reset;
+
+ /* Set the loopback configuration for the phy */
+ config_loopback_t config_loopback;
+
+ /* Format the given raw number into str up to len */
+ format_fw_ver_t format_fw_ver;
+
+ /* Reset the phy (both ports) */
+ hw_reset_t hw_reset;
+
+ /* Set link led mode (on/off/oper)*/
+ set_link_led_t set_link_led;
+
+ /* PHY Specific tasks */
+ phy_specific_func_t phy_specific_func;
+#define ELINK_DISABLE_TX 1
+#define ELINK_ENABLE_TX 2
+#define ELINK_PHY_INIT 3
+};
+
+/* Inputs parameters to the CLC */
+struct elink_params {
+
+ uint8_t port;
+
+ /* Default / User Configuration */
+ uint8_t loopback_mode;
+#define ELINK_LOOPBACK_NONE 0
+#define ELINK_LOOPBACK_EMAC 1
+#define ELINK_LOOPBACK_BMAC 2
+#define ELINK_LOOPBACK_XGXS 3
+#define ELINK_LOOPBACK_EXT_PHY 4
+#define ELINK_LOOPBACK_EXT 5
+#define ELINK_LOOPBACK_UMAC 6
+#define ELINK_LOOPBACK_XMAC 7
+
+ /* Device parameters */
+ uint8_t mac_addr[6];
+
+ uint16_t req_duplex[ELINK_LINK_CONFIG_SIZE];
+ uint16_t req_flow_ctrl[ELINK_LINK_CONFIG_SIZE];
+
+ uint16_t req_line_speed[ELINK_LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
+
+ /* shmem parameters */
+ uint32_t shmem_base;
+ uint32_t shmem2_base;
+ uint32_t speed_cap_mask[ELINK_LINK_CONFIG_SIZE];
+ uint32_t switch_cfg;
+#define ELINK_SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
+#define ELINK_SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
+#define ELINK_SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
+
+ uint32_t lane_config;
+
+ /* Phy register parameter */
+ uint32_t chip_id;
+
+ /* features */
+ uint32_t feature_config_flags;
+#define ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
+#define ELINK_FEATURE_CONFIG_PFC_ENABLED (1<<1)
+#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
+#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
+#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8)
+#define ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
+#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
+#define ELINK_FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11)
+#define ELINK_FEATURE_CONFIG_IEEE_PHY_TEST (1<<12)
+#define ELINK_FEATURE_CONFIG_MT_SUPPORT (1<<13)
+#define ELINK_FEATURE_CONFIG_BOOT_FROM_SAN (1<<14)
+
+ /* Will be populated during common init */
+ struct elink_phy phy[ELINK_MAX_PHYS];
+
+ /* Will be populated during common init */
+ uint8_t num_phys;
+
+ uint8_t rsrv;
+
+ /* Used to configure the EEE Tx LPI timer, has several modes of
+ * operation, according to bits 29:28 -
+ * 2'b00: Timer will be configured by nvram, output will be the value
+ * from nvram.
+ * 2'b01: Timer will be configured by nvram, output will be in
+ * microseconds.
+ * 2'b10: bits 1:0 contain an nvram value which will be used instead
+ * of the one located in the nvram. Output will be that value.
+ * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+ * will be in microseconds.
+ * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+ */
+ uint32_t eee_mode;
+#define ELINK_EEE_MODE_NVRAM_BALANCED_TIME (0xa00)
+#define ELINK_EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100)
+#define ELINK_EEE_MODE_NVRAM_LATENCY_TIME (0x6000)
+#define ELINK_EEE_MODE_NVRAM_MASK (0x3)
+#define ELINK_EEE_MODE_TIMER_MASK (0xfffff)
+#define ELINK_EEE_MODE_OUTPUT_TIME (1<<28)
+#define ELINK_EEE_MODE_OVERRIDE_NVRAM (1<<29)
+#define ELINK_EEE_MODE_ENABLE_LPI (1<<30)
+#define ELINK_EEE_MODE_ADV_LPI (1<<31)
+
+ uint16_t hw_led_mode; /* part of the hw_config read from the shmem */
+ uint32_t multi_phy_config;
+
+ /* Device pointer passed to all callback functions */
+ struct bnx2x_softc *sc;
+ uint16_t req_fc_auto_adv; /* Should be set to TX / BOTH when
+ req_flow_ctrl is set to AUTO */
+ uint16_t link_flags;
+#define ELINK_LINK_FLAGS_INT_DISABLED (1<<0)
+#define ELINK_PHY_INITIALIZED (1<<1)
+ uint32_t lfa_base;
+};
+
+/* Output parameters */
+struct elink_vars {
+ uint8_t phy_flags;
+#define PHY_XGXS_FLAG (1<<0)
+#define PHY_SGMII_FLAG (1<<1)
+#define PHY_PHYSICAL_LINK_FLAG (1<<2)
+#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
+#define PHY_OVER_CURRENT_FLAG (1<<4)
+#define PHY_SFP_TX_FAULT_FLAG (1<<5)
+
+ uint8_t mac_type;
+#define ELINK_MAC_TYPE_NONE 0
+#define ELINK_MAC_TYPE_EMAC 1
+#define ELINK_MAC_TYPE_BMAC 2
+#define ELINK_MAC_TYPE_UMAC 3
+#define ELINK_MAC_TYPE_XMAC 4
+
+ uint8_t phy_link_up; /* internal phy link indication */
+ uint8_t link_up;
+
+ uint16_t line_speed;
+ uint16_t duplex;
+
+ uint16_t flow_ctrl;
+ uint16_t ieee_fc;
+
+ /* The same definitions as the shmem parameter */
+ uint32_t link_status;
+ uint32_t eee_status;
+ uint8_t fault_detected;
+ uint8_t check_kr2_recovery_cnt;
+#define ELINK_CHECK_KR2_RECOVERY_CNT 5
+ uint16_t periodic_flags;
+#define ELINK_PERIODIC_FLAGS_LINK_EVENT 0x0001
+
+ uint32_t aeu_int_mask;
+ uint8_t rx_tx_asic_rst;
+ uint8_t turn_to_run_wc_rt;
+ uint16_t rsrv2;
+ /* The same definitions as the shmem2 parameter */
+ uint32_t link_attr_sync;
+};
+
+/***********************************************************/
+/* Functions */
+/***********************************************************/
+elink_status_t elink_phy_init(struct elink_params *params, struct elink_vars *vars);
+
+/* Reset the link. Should be called when driver or interface goes down
+ Before calling phy firmware upgrade, the reset_ext_phy should be set
+ to 0 */
+elink_status_t elink_lfa_reset(struct elink_params *params, struct elink_vars *vars);
+/* elink_link_update should be called upon link interrupt */
+elink_status_t elink_link_update(struct elink_params *params, struct elink_vars *vars);
+
+/* Reads the link_status from the shmem,
+ and update the link vars accordingly */
+void elink_link_status_update(struct elink_params *input,
+ struct elink_vars *output);
+
+/* Set/Unset the led
+ Basically, the CLC takes care of the led for the link, but in case one needs
+ to set/unset the led unnaturally, set the "mode" to ELINK_LED_MODE_OPER to
+ blink the led, and ELINK_LED_MODE_OFF to set the led off.*/
+elink_status_t elink_set_led(struct elink_params *params,
+ struct elink_vars *vars, uint8_t mode, uint32_t speed);
+#define ELINK_LED_MODE_OFF 0
+#define ELINK_LED_MODE_ON 1
+#define ELINK_LED_MODE_OPER 2
+#define ELINK_LED_MODE_FRONT_PANEL_OFF 3
+
+/* elink_handle_module_detect_int should be called upon module detection
+ interrupt */
+void elink_handle_module_detect_int(struct elink_params *params);
+
+/* One-time initialization for external phy after power up */
+elink_status_t elink_common_init_phy(struct bnx2x_softc *sc, uint32_t shmem_base_path[],
+ uint32_t shmem2_base_path[], uint32_t chip_id, uint8_t one_port_enabled);
+
+void elink_hw_reset_phy(struct elink_params *params);
+
+/* Check swap bit and adjust PHY order */
+uint32_t elink_phy_selection(struct elink_params *params);
+
+/* Probe the phys on board, and populate them in "params" */
+elink_status_t elink_phy_probe(struct elink_params *params);
+
+/* Checks if fan failure detection is required on one of the phys on board */
+uint8_t elink_fan_failure_det_req(struct bnx2x_softc *sc, uint32_t shmem_base,
+ uint32_t shmem2_base, uint8_t port);
+
+/* Open / close the gate between the NIG and the BRB */
+void elink_set_rx_filter(struct elink_params *params, uint8_t en);
+
+/* DCBX structs */
+
+/* Number of maximum COS per chip */
+#define ELINK_DCBX_E2E3_MAX_NUM_COS (2)
+#define ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0 (6)
+#define ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 (3)
+#define ELINK_DCBX_E3B0_MAX_NUM_COS ( \
+ ELINK_MAXVAL(ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0, \
+ ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1))
+
+#define ELINK_DCBX_MAX_NUM_COS ( \
+ ELINK_MAXVAL(ELINK_DCBX_E3B0_MAX_NUM_COS, \
+ ELINK_DCBX_E2E3_MAX_NUM_COS))
+
+/* PFC port configuration params */
+struct elink_nig_brb_pfc_port_params {
+ /* NIG */
+ uint32_t pause_enable;
+ uint32_t llfc_out_en;
+ uint32_t llfc_enable;
+ uint32_t pkt_priority_to_cos;
+ uint8_t num_of_rx_cos_priority_mask;
+ uint32_t rx_cos_priority_mask[ELINK_DCBX_MAX_NUM_COS];
+ uint32_t llfc_high_priority_classes;
+ uint32_t llfc_low_priority_classes;
+};
+
+
+/* ETS port configuration params */
+struct elink_ets_bw_params {
+ uint8_t bw;
+};
+
+struct elink_ets_sp_params {
+ /**
+ * valid values are 0 - 5. 0 is highest strict priority.
+ * There can't be two COS's with the same pri.
+ */
+ uint8_t pri;
+};
+
+enum elink_cos_state {
+ elink_cos_state_strict = 0,
+ elink_cos_state_bw = 1,
+};
+
+struct elink_ets_cos_params {
+ enum elink_cos_state state ;
+ union {
+ struct elink_ets_bw_params bw_params;
+ struct elink_ets_sp_params sp_params;
+ } params;
+};
+
+struct elink_ets_params {
+ uint8_t num_of_cos; /* Number of valid COS entries*/
+ struct elink_ets_cos_params cos[ELINK_DCBX_MAX_NUM_COS];
+};
+
+/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+ * when link is already up
+ */
+elink_status_t elink_update_pfc(struct elink_params *params,
+ struct elink_vars *vars,
+ struct elink_nig_brb_pfc_port_params *pfc_params);
+
+void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars,
+ uint32_t chip_id, uint32_t shmem_base, uint32_t shmem2_base,
+ uint8_t port);
+
+void elink_period_func(struct elink_params *params, struct elink_vars *vars);
+
+void elink_enable_pmd_tx(struct elink_params *params);
+
+
+
+#endif /* ELINK_H */
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/meson.build b/src/spdk/dpdk/drivers/net/bnx2x/meson.build
new file mode 100644
index 00000000..e3c68886
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+dep = cc.find_library('z', required: false)
+build = dep.found()
+ext_deps += dep
+cflags += '-DZLIB_CONST'
+sources = files('bnx2x.c',
+ 'bnx2x_ethdev.c',
+ 'bnx2x_rxtx.c',
+ 'bnx2x_stats.c',
+ 'bnx2x_vfpf.c',
+ 'ecore_sp.c',
+ 'elink.c')
diff --git a/src/spdk/dpdk/drivers/net/bnx2x/rte_pmd_bnx2x_version.map b/src/spdk/dpdk/drivers/net/bnx2x/rte_pmd_bnx2x_version.map
new file mode 100644
index 00000000..bd8138a0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnx2x/rte_pmd_bnx2x_version.map
@@ -0,0 +1,4 @@
+DPDK_2.1 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/bnxt/Makefile b/src/spdk/dpdk/drivers/net/bnxt/Makefile
new file mode 100644
index 00000000..8be3cb0e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/Makefile
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation.
+# Copyright(c) 2014 6WIND S.A.
+# Copyright(c) Broadcom Limited.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_bnxt.a
+
+EXPORT_MAP := rte_pmd_bnxt_version.map
+
+LIBABIVER := 2
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+EXPORT_MAP := rte_pmd_bnxt_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_cpr.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_hwrm.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ring.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxq.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxr.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_util.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += rte_pmd_bnxt.c
+
+#
+# Export include files
+#
+SYMLINK-y-include +=
+SYMLINK-$(CONFIG_RTE_LIBRTE_BNXT_PMD)-include := rte_pmd_bnxt.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt.h
new file mode 100644
index 00000000..db5c4eb0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt.h
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_H_
+#define _BNXT_H_
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
+#include <rte_time.h>
+
+#include "bnxt_cpr.h"
+
+#define BNXT_MAX_MTU 9500
+#define VLAN_TAG_SIZE 4
+#define BNXT_VF_RSV_NUM_RSS_CTX 1
+#define BNXT_VF_RSV_NUM_L2_CTX 4
+/* TODO: For now, do not support VMDq/RFS on VFs. */
+#define BNXT_VF_RSV_NUM_VNIC 1
+#define BNXT_MAX_LED 4
+#define BNXT_NUM_VLANS 2
+#define BNXT_MIN_RING_DESC 16
+#define BNXT_MAX_TX_RING_DESC 4096
+#define BNXT_MAX_RX_RING_DESC 8192
+#define BNXT_DB_SIZE 0x80
+
+#define BNXT_INT_LAT_TMR_MIN 75
+#define BNXT_INT_LAT_TMR_MAX 150
+#define BNXT_NUM_CMPL_AGGR_INT 36
+#define BNXT_CMPL_AGGR_DMA_TMR 37
+#define BNXT_NUM_CMPL_DMA_AGGR 36
+#define BNXT_CMPL_AGGR_DMA_TMR_DURING_INT 50
+#define BNXT_NUM_CMPL_DMA_AGGR_DURING_INT 12
+
+struct bnxt_led_info {
+ uint8_t led_id;
+ uint8_t led_type;
+ uint8_t led_group_id;
+ uint8_t unused;
+ uint16_t led_state_caps;
+#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \
+ rte_cpu_to_le_16(HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT))
+
+ uint16_t led_color_caps;
+};
+
+struct bnxt_led_cfg {
+ uint8_t led_id;
+ uint8_t led_state;
+ uint8_t led_color;
+ uint8_t unused;
+ uint16_t led_blink_on;
+ uint16_t led_blink_off;
+ uint8_t led_group_id;
+ uint8_t rsvd;
+};
+
+#define BNXT_LED_DFLT_ENA \
+ (HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID | \
+ HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE | \
+ HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON | \
+ HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF | \
+ HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID)
+
+#define BNXT_LED_DFLT_ENA_SHIFT 6
+
+#define BNXT_LED_DFLT_ENABLES(x) \
+ rte_cpu_to_le_32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
+
+enum bnxt_hw_context {
+ HW_CONTEXT_NONE = 0,
+ HW_CONTEXT_IS_RSS = 1,
+ HW_CONTEXT_IS_COS = 2,
+ HW_CONTEXT_IS_LB = 3,
+};
+
+struct bnxt_vlan_table_entry {
+ uint16_t tpid;
+ uint16_t vid;
+} __attribute__((packed));
+
+struct bnxt_vlan_antispoof_table_entry {
+ uint16_t tpid;
+ uint16_t vid;
+ uint16_t mask;
+} __attribute__((packed));
+
+struct bnxt_child_vf_info {
+ void *req_buf;
+ struct bnxt_vlan_table_entry *vlan_table;
+ struct bnxt_vlan_antispoof_table_entry *vlan_as_table;
+ STAILQ_HEAD(, bnxt_filter_info) filter;
+ uint32_t func_cfg_flags;
+ uint32_t l2_rx_mask;
+ uint16_t fid;
+ uint16_t max_tx_rate;
+ uint16_t dflt_vlan;
+ uint16_t vlan_count;
+ uint8_t mac_spoof_en;
+ uint8_t vlan_spoof_en;
+ bool random_mac;
+ bool persist_stats;
+};
+
+struct bnxt_pf_info {
+#define BNXT_FIRST_PF_FID 1
+#define BNXT_MAX_VFS(bp) (bp->pf.max_vfs)
+#define BNXT_TOTAL_VFS(bp) ((bp)->pf.total_vfs)
+#define BNXT_FIRST_VF_FID 128
+#define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp)
+#define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp))
+ uint16_t port_id;
+ uint16_t first_vf_id;
+ uint16_t active_vfs;
+ uint16_t max_vfs;
+ uint16_t total_vfs; /* Total VFs possible.
+ * Not necessarily enabled.
+ */
+ uint32_t func_cfg_flags;
+ void *vf_req_buf;
+ rte_iova_t vf_req_buf_dma_addr;
+ uint32_t vf_req_fwd[8];
+ uint16_t total_vnics;
+ struct bnxt_child_vf_info *vf_info;
+#define BNXT_EVB_MODE_NONE 0
+#define BNXT_EVB_MODE_VEB 1
+#define BNXT_EVB_MODE_VEPA 2
+ uint8_t evb_mode;
+};
+
+/* Max wait time is 10 * 100ms = 1s */
+#define BNXT_LINK_WAIT_CNT 10
+#define BNXT_LINK_WAIT_INTERVAL 100
+struct bnxt_link_info {
+ uint32_t phy_flags;
+ uint8_t mac_type;
+ uint8_t phy_link_status;
+ uint8_t loop_back;
+ uint8_t link_up;
+ uint8_t duplex;
+ uint8_t pause;
+ uint8_t force_pause;
+ uint8_t auto_pause;
+ uint8_t auto_mode;
+#define PHY_VER_LEN 3
+ uint8_t phy_ver[PHY_VER_LEN];
+ uint16_t link_speed;
+ uint16_t support_speeds;
+ uint16_t auto_link_speed;
+ uint16_t force_link_speed;
+ uint16_t auto_link_speed_mask;
+ uint32_t preemphasis;
+ uint8_t phy_type;
+ uint8_t media_type;
+};
+
+#define BNXT_COS_QUEUE_COUNT 8
+struct bnxt_cos_queue_info {
+ uint8_t id;
+ uint8_t profile;
+};
+
+struct rte_flow {
+ STAILQ_ENTRY(rte_flow) next;
+ struct bnxt_filter_info *filter;
+ struct bnxt_vnic_info *vnic;
+};
+
+struct bnxt_ptp_cfg {
+#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
+#define BNXT_GRCPF_REG_SYNC_TIME 0x480
+#define BNXT_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+ struct rte_timecounter tc;
+ struct rte_timecounter tx_tstamp_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct bnxt *bp;
+#define BNXT_MAX_TX_TS 1
+ uint16_t rxctl;
+#define BNXT_PTP_MSG_SYNC (1 << 0)
+#define BNXT_PTP_MSG_DELAY_REQ (1 << 1)
+#define BNXT_PTP_MSG_PDELAY_REQ (1 << 2)
+#define BNXT_PTP_MSG_PDELAY_RESP (1 << 3)
+#define BNXT_PTP_MSG_FOLLOW_UP (1 << 8)
+#define BNXT_PTP_MSG_DELAY_RESP (1 << 9)
+#define BNXT_PTP_MSG_PDELAY_RESP_FOLLOW_UP (1 << 10)
+#define BNXT_PTP_MSG_ANNOUNCE (1 << 11)
+#define BNXT_PTP_MSG_SIGNALING (1 << 12)
+#define BNXT_PTP_MSG_MANAGEMENT (1 << 13)
+#define BNXT_PTP_MSG_EVENTS (BNXT_PTP_MSG_SYNC | \
+ BNXT_PTP_MSG_DELAY_REQ | \
+ BNXT_PTP_MSG_PDELAY_REQ | \
+ BNXT_PTP_MSG_PDELAY_RESP)
+ uint8_t tx_tstamp_en:1;
+ int rx_filter;
+
+#define BNXT_PTP_RX_TS_L 0
+#define BNXT_PTP_RX_TS_H 1
+#define BNXT_PTP_RX_SEQ 2
+#define BNXT_PTP_RX_FIFO 3
+#define BNXT_PTP_RX_FIFO_PENDING 0x1
+#define BNXT_PTP_RX_FIFO_ADV 4
+#define BNXT_PTP_RX_REGS 5
+
+#define BNXT_PTP_TX_TS_L 0
+#define BNXT_PTP_TX_TS_H 1
+#define BNXT_PTP_TX_SEQ 2
+#define BNXT_PTP_TX_FIFO 3
+#define BNXT_PTP_TX_FIFO_EMPTY 0x2
+#define BNXT_PTP_TX_REGS 4
+ uint32_t rx_regs[BNXT_PTP_RX_REGS];
+ uint32_t rx_mapped_regs[BNXT_PTP_RX_REGS];
+ uint32_t tx_regs[BNXT_PTP_TX_REGS];
+ uint32_t tx_mapped_regs[BNXT_PTP_TX_REGS];
+};
+
+struct bnxt_coal {
+ uint16_t num_cmpl_aggr_int;
+ uint16_t num_cmpl_dma_aggr;
+ uint16_t num_cmpl_dma_aggr_during_int;
+ uint16_t int_lat_tmr_max;
+ uint16_t int_lat_tmr_min;
+ uint16_t cmpl_aggr_dma_tmr;
+ uint16_t cmpl_aggr_dma_tmr_during_int;
+};
+
+#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
+struct bnxt {
+ void *bar0;
+
+ struct rte_eth_dev *eth_dev;
+ struct rte_eth_rss_conf rss_conf;
+ struct rte_pci_device *pdev;
+ void *doorbell_base;
+
+ uint32_t flags;
+#define BNXT_FLAG_REGISTERED (1 << 0)
+#define BNXT_FLAG_VF (1 << 1)
+#define BNXT_FLAG_PORT_STATS (1 << 2)
+#define BNXT_FLAG_JUMBO (1 << 3)
+#define BNXT_FLAG_SHORT_CMD (1 << 4)
+#define BNXT_FLAG_UPDATE_HASH (1 << 5)
+#define BNXT_FLAG_PTP_SUPPORTED (1 << 6)
+#define BNXT_FLAG_MULTI_HOST (1 << 7)
+#define BNXT_FLAG_NEW_RM (1 << 30)
+#define BNXT_FLAG_INIT_DONE (1 << 31)
+#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
+#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#define BNXT_NPAR(bp) ((bp)->port_partition_type)
+#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
+#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
+
+ unsigned int rx_nr_rings;
+ unsigned int rx_cp_nr_rings;
+ struct bnxt_rx_queue **rx_queues;
+ const void *rx_mem_zone;
+ struct rx_port_stats *hw_rx_port_stats;
+ rte_iova_t hw_rx_port_stats_map;
+
+ unsigned int tx_nr_rings;
+ unsigned int tx_cp_nr_rings;
+ struct bnxt_tx_queue **tx_queues;
+ const void *tx_mem_zone;
+ struct tx_port_stats *hw_tx_port_stats;
+ rte_iova_t hw_tx_port_stats_map;
+
+ /* Default completion ring */
+ struct bnxt_cp_ring_info *def_cp_ring;
+ uint32_t max_ring_grps;
+ struct bnxt_ring_grp_info *grp_info;
+
+ unsigned int nr_vnics;
+
+ struct bnxt_vnic_info *vnic_info;
+ STAILQ_HEAD(, bnxt_vnic_info) free_vnic_list;
+
+ struct bnxt_filter_info *filter_info;
+ STAILQ_HEAD(, bnxt_filter_info) free_filter_list;
+
+ /* VNIC pointer for flow filter (VMDq) pools */
+#define MAX_FF_POOLS 256
+ STAILQ_HEAD(, bnxt_vnic_info) ff_pool[MAX_FF_POOLS];
+
+ struct bnxt_irq *irq_tbl;
+
+#define MAX_NUM_MAC_ADDR 32
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+
+ uint16_t hwrm_cmd_seq;
+ void *hwrm_cmd_resp_addr;
+ rte_iova_t hwrm_cmd_resp_dma_addr;
+ void *hwrm_short_cmd_req_addr;
+ rte_iova_t hwrm_short_cmd_req_dma_addr;
+ rte_spinlock_t hwrm_lock;
+ uint16_t max_req_len;
+ uint16_t max_resp_len;
+
+ struct bnxt_link_info link_info;
+ struct bnxt_cos_queue_info cos_queue[BNXT_COS_QUEUE_COUNT];
+ uint8_t tx_cosq_id;
+
+ uint16_t fw_fid;
+ uint8_t dflt_mac_addr[ETHER_ADDR_LEN];
+ uint16_t max_rsscos_ctx;
+ uint16_t max_cp_rings;
+ uint16_t max_tx_rings;
+ uint16_t max_rx_rings;
+ uint16_t max_l2_ctx;
+ uint16_t max_vnics;
+ uint16_t max_stat_ctx;
+ uint16_t vlan;
+ struct bnxt_pf_info pf;
+ uint8_t port_partition_type;
+ uint8_t dev_stopped;
+ uint8_t vxlan_port_cnt;
+ uint8_t geneve_port_cnt;
+ uint16_t vxlan_port;
+ uint16_t geneve_port;
+ uint16_t vxlan_fw_dst_port_id;
+ uint16_t geneve_fw_dst_port_id;
+ uint32_t fw_ver;
+ uint32_t hwrm_spec_code;
+
+ struct bnxt_led_info leds[BNXT_MAX_LED];
+ uint8_t num_leds;
+ struct bnxt_ptp_cfg *ptp_cfg;
+ uint16_t vf_resv_strategy;
+};
+
+int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
+int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
+
+bool is_bnxt_supported(struct rte_eth_dev *dev);
+bool bnxt_stratus_device(struct bnxt *bp);
+extern const struct rte_flow_ops bnxt_flow_ops;
+
+extern int bnxt_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, bnxt_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt, ## args)
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.c
new file mode 100644
index 00000000..ff20b6fd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.c
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_ring.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Async event handling
+ */
+void bnxt_handle_async_event(struct bnxt *bp,
+ struct cmpl_base *cmp)
+{
+ struct hwrm_async_event_cmpl *async_cmp =
+ (struct hwrm_async_event_cmpl *)cmp;
+ uint16_t event_id = rte_le_to_cpu_16(async_cmp->event_id);
+
+ /* TODO: HWRM async events are not defined yet */
+ /* Needs to handle: link events, error events, etc. */
+ switch (event_id) {
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
+ /* FALLTHROUGH */
+ bnxt_link_update_op(bp->eth_dev, 1);
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+ PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+ PMD_DRV_LOG(INFO, "Async event: VF config changed\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
+ PMD_DRV_LOG(INFO, "Port conn async event\n");
+ break;
+ default:
+ PMD_DRV_LOG(INFO, "handle_async_event id = 0x%x\n", event_id);
+ break;
+ }
+}
+
+void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
+{
+ struct hwrm_exec_fwd_resp_input *fwreq;
+ struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl;
+ struct input *fwd_cmd;
+ uint16_t fw_vf_id;
+ uint16_t vf_id;
+ uint16_t req_len;
+ int rc;
+
+ if (bp->pf.active_vfs <= 0) {
+ PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n");
+ return;
+ }
+
+ /* Qualify the fwd request */
+ fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id);
+ vf_id = fw_vf_id - bp->pf.first_vf_id;
+
+ req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) &
+ HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >>
+ HWRM_FWD_REQ_CMPL_REQ_LEN_SFT;
+ if (req_len > sizeof(fwreq->encap_request))
+ req_len = sizeof(fwreq->encap_request);
+
+ /* Locate VF's forwarded command */
+ fwd_cmd = (struct input *)bp->pf.vf_info[vf_id].req_buf;
+
+ if (fw_vf_id < bp->pf.first_vf_id ||
+ fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
+ PMD_DRV_LOG(ERR,
+ "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
+ fw_vf_id, bp->pf.first_vf_id,
+ (bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
+ bp->pf.first_vf_id, bp->pf.active_vfs);
+ goto reject;
+ }
+
+ if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) {
+ /*
+ * In older firmware versions, the MAC had to be all zeros for
+ * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all
+ * zeros if it's being configured and has been ok'd by caller.
+ */
+ if (fwd_cmd->req_type == HWRM_FUNC_VF_CFG) {
+ struct hwrm_func_vf_cfg_input *vfc = (void *)fwd_cmd;
+
+ if (vfc->enables &
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR) {
+ bnxt_hwrm_func_vf_mac(bp, vf_id,
+ (const uint8_t *)"\x00\x00\x00\x00\x00");
+ }
+ }
+ if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) {
+ struct hwrm_cfa_l2_set_rx_mask_input *srm =
+ (void *)fwd_cmd;
+
+ srm->vlan_tag_tbl_addr = rte_cpu_to_le_64(0);
+ srm->num_vlan_tags = rte_cpu_to_le_32(0);
+ srm->mask &= ~rte_cpu_to_le_32(
+ HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY |
+ HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN |
+ HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
+ }
+ /* Forward */
+ rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to send FWD req VF 0x%x, type 0x%x.\n",
+ fw_vf_id - bp->pf.first_vf_id,
+ rte_le_to_cpu_16(fwd_cmd->req_type));
+ }
+ return;
+ }
+
+reject:
+ rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to send REJECT req VF 0x%x, type 0x%x.\n",
+ fw_vf_id - bp->pf.first_vf_id,
+ rte_le_to_cpu_16(fwd_cmd->req_type));
+ }
+
+ return;
+}
+
+int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
+{
+ bool evt = 0;
+
+ if (bp == NULL || cmp == NULL) {
+ PMD_DRV_LOG(ERR, "invalid NULL argument\n");
+ return evt;
+ }
+
+ switch (CMP_TYPE(cmp)) {
+ case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+ /* Handle any async event */
+ bnxt_handle_async_event(bp, cmp);
+ evt = 1;
+ break;
+ case CMPL_BASE_TYPE_HWRM_FWD_RESP:
+ /* Handle HWRM forwarded responses */
+ bnxt_handle_fwd_req(bp, cmp);
+ evt = 1;
+ break;
+ default:
+ /* Ignore any other events */
+ PMD_DRV_LOG(INFO, "Ignoring %02x completion\n", CMP_TYPE(cmp));
+ break;
+ }
+
+ return evt;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.h
new file mode 100644
index 00000000..c7af5698
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_CPR_H_
+#define _BNXT_CPR_H_
+#include <stdbool.h>
+
+#include <rte_io.h>
+
+#define CMP_VALID(cmp, raw_cons, ring) \
+ (!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == \
+ !((raw_cons) & ((ring)->ring_size)))
+
+#define CMPL_VALID(cmp, v) \
+ (!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == !(v))
+
+#define CMP_TYPE(cmp) \
+ (((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)
+
+#define ADV_RAW_CMP(idx, n) ((idx) + (n))
+#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
+#define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask)
+#define RING_CMPL(ring_mask, idx) ((idx) & (ring_mask))
+#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
+#define FLIP_VALID(cons, mask, val) ((cons) >= (mask) ? !(val) : (val))
+
+#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
+#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+
+#define NEXT_CMPL(cpr, idx, v, inc) do { \
+ (idx) += (inc); \
+ if (unlikely((idx) == (cpr)->cp_ring_struct->ring_size)) { \
+ (v) = !(v); \
+ (idx) = 0; \
+ } \
+} while (0)
+#define B_CP_DB_REARM(cpr, raw_cons) \
+ rte_write32((DB_CP_REARM_FLAGS | \
+ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
+ ((cpr)->cp_doorbell))
+
+#define B_CP_DB_ARM(cpr) rte_write32((DB_KEY_CP), ((cpr)->cp_doorbell))
+#define B_CP_DB_DISARM(cpr) (*(uint32_t *)((cpr)->cp_doorbell) = \
+ DB_KEY_CP | DB_IRQ_DIS)
+
+#define B_CP_DB_IDX_ARM(cpr, cons) \
+ (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_REARM_FLAGS | \
+ (cons)))
+
+#define B_CP_DB_IDX_DISARM(cpr, cons) do { \
+ rte_smp_wmb(); \
+ (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_FLAGS | \
+ (cons)); \
+} while (0)
+#define B_CP_DIS_DB(cpr, raw_cons) \
+ rte_write32((DB_CP_FLAGS | \
+ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
+ ((cpr)->cp_doorbell))
+#define B_CP_DB(cpr, raw_cons, ring_mask) \
+ rte_write32((DB_CP_FLAGS | \
+ RING_CMPL((ring_mask), raw_cons)), \
+ ((cpr)->cp_doorbell))
+
+struct bnxt_ring;
+struct bnxt_cp_ring_info {
+ uint32_t cp_raw_cons;
+ void *cp_doorbell;
+
+ struct cmpl_base *cp_desc_ring;
+
+ rte_iova_t cp_desc_mapping;
+
+ struct ctx_hw_stats *hw_stats;
+ rte_iova_t hw_stats_map;
+ uint32_t hw_stats_ctx_id;
+
+ struct bnxt_ring *cp_ring_struct;
+ uint16_t cp_cons;
+ bool valid;
+};
+
+#define RX_CMP_L2_ERRORS \
+ (RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR)
+
+struct bnxt;
+void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
+void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp);
+int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_ethdev.c
new file mode 100644
index 00000000..cc7e4391
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_ethdev.c
@@ -0,0 +1,3552 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#include <rte_dev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_irq.h"
+#include "bnxt_ring.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
+#include "bnxt_stats.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+#include "bnxt_nvm_defs.h"
+#include "bnxt_util.h"
+
+#define DRV_MODULE_NAME "bnxt"
+static const char bnxt_version[] =
+ "Broadcom NetXtreme driver " DRV_MODULE_NAME "\n";
+int bnxt_logtype_driver;
+
+#define PCI_VENDOR_ID_BROADCOM 0x14E4
+
+#define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606
+#define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609
+#define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
+#define BROADCOM_DEV_ID_57414_VF 0x16c1
+#define BROADCOM_DEV_ID_57301 0x16c8
+#define BROADCOM_DEV_ID_57302 0x16c9
+#define BROADCOM_DEV_ID_57304_PF 0x16ca
+#define BROADCOM_DEV_ID_57304_VF 0x16cb
+#define BROADCOM_DEV_ID_57417_MF 0x16cc
+#define BROADCOM_DEV_ID_NS2 0x16cd
+#define BROADCOM_DEV_ID_57311 0x16ce
+#define BROADCOM_DEV_ID_57312 0x16cf
+#define BROADCOM_DEV_ID_57402 0x16d0
+#define BROADCOM_DEV_ID_57404 0x16d1
+#define BROADCOM_DEV_ID_57406_PF 0x16d2
+#define BROADCOM_DEV_ID_57406_VF 0x16d3
+#define BROADCOM_DEV_ID_57402_MF 0x16d4
+#define BROADCOM_DEV_ID_57407_RJ45 0x16d5
+#define BROADCOM_DEV_ID_57412 0x16d6
+#define BROADCOM_DEV_ID_57414 0x16d7
+#define BROADCOM_DEV_ID_57416_RJ45 0x16d8
+#define BROADCOM_DEV_ID_57417_RJ45 0x16d9
+#define BROADCOM_DEV_ID_5741X_VF 0x16dc
+#define BROADCOM_DEV_ID_57412_MF 0x16de
+#define BROADCOM_DEV_ID_57314 0x16df
+#define BROADCOM_DEV_ID_57317_RJ45 0x16e0
+#define BROADCOM_DEV_ID_5731X_VF 0x16e1
+#define BROADCOM_DEV_ID_57417_SFP 0x16e2
+#define BROADCOM_DEV_ID_57416_SFP 0x16e3
+#define BROADCOM_DEV_ID_57317_SFP 0x16e4
+#define BROADCOM_DEV_ID_57404_MF 0x16e7
+#define BROADCOM_DEV_ID_57406_MF 0x16e8
+#define BROADCOM_DEV_ID_57407_SFP 0x16e9
+#define BROADCOM_DEV_ID_57407_MF 0x16ea
+#define BROADCOM_DEV_ID_57414_MF 0x16ec
+#define BROADCOM_DEV_ID_57416_MF 0x16ee
+#define BROADCOM_DEV_ID_58802 0xd802
+#define BROADCOM_DEV_ID_58804 0xd804
+#define BROADCOM_DEV_ID_58808 0x16f0
+#define BROADCOM_DEV_ID_58802_VF 0xd800
+
+static const struct rte_pci_id bnxt_pci_id_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
+ BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
+ BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+#define BNXT_ETH_RSS_SUPPORT ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP)
+
+#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ DEV_TX_OFFLOAD_GRE_TNL_TSO | \
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
+ DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_CKSUM | \
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_CRC_STRIP | \
+ DEV_RX_OFFLOAD_KEEP_CRC | \
+ DEV_RX_OFFLOAD_TCP_LRO)
+
+static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
+static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
+static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu);
+static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
+
+/***********************/
+
+/*
+ * High level utility functions
+ */
+
+static void bnxt_free_mem(struct bnxt *bp)
+{
+ bnxt_free_filter_mem(bp);
+ bnxt_free_vnic_attributes(bp);
+ bnxt_free_vnic_mem(bp);
+
+ bnxt_free_stats(bp);
+ bnxt_free_tx_rings(bp);
+ bnxt_free_rx_rings(bp);
+}
+
+static int bnxt_alloc_mem(struct bnxt *bp)
+{
+ int rc;
+
+ rc = bnxt_alloc_vnic_mem(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_vnic_attributes(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_filter_mem(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ return 0;
+
+alloc_mem_err:
+ bnxt_free_mem(bp);
+ return rc;
+}
+
+static int bnxt_init_chip(struct bnxt *bp)
+{
+ struct bnxt_rx_queue *rxq;
+ struct rte_eth_link new;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
+ uint32_t queue_id, base = BNXT_MISC_VEC_ID;
+ uint32_t vec = BNXT_MISC_VEC_ID;
+ unsigned int i, j;
+ int rc;
+
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ if (bp->eth_dev->data->mtu > ETHER_MTU) {
+ bp->eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ bp->flags |= BNXT_FLAG_JUMBO;
+ } else {
+ bp->eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ bp->flags &= ~BNXT_FLAG_JUMBO;
+ }
+
+ rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_alloc_hwrm_rings(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_alloc_all_hwrm_ring_grps(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_mq_rx_configure(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ /* VNIC configuration */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
+
+ vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
+ if (!vnic->fw_grp_ids) {
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc %d bytes for group ids\n",
+ size);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ memset(vnic->fw_grp_ids, -1, size);
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n",
+ i, rc);
+ goto err_out;
+ }
+
+ /* Alloc RSS context only if RSS mode is enabled */
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM vnic %d ctx alloc failure rc: %x\n",
+ i, rc);
+ goto err_out;
+ }
+ }
+
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
+ i, rc);
+ goto err_out;
+ }
+
+ rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM vnic %d filter failure rc: %x\n",
+ i, rc);
+ goto err_out;
+ }
+
+ for (j = 0; j < bp->rx_nr_rings; j++) {
+ rxq = bp->eth_dev->data->rx_queues[j];
+
+ if (rxq->rx_deferred_start)
+ rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
+ }
+
+ rc = bnxt_vnic_rss_configure(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM vnic set RSS failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+
+ if (bp->eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_TCP_LRO)
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
+ else
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
+ }
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM cfa l2 rx mask failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ /* check and configure queue intr-vector mapping */
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
+ bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = bp->eth_dev->data->nb_rx_queues;
+ PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
+ if (intr_vector > bp->rx_cp_nr_rings) {
+ PMD_DRV_LOG(ERR, "At most %d intr queues supported",
+ bp->rx_cp_nr_rings);
+ return -ENOTSUP;
+ }
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ bp->eth_dev->data->nb_rx_queues *
+ sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", bp->eth_dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
+ "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
+ intr_handle->intr_vec, intr_handle->nb_efd,
+ intr_handle->max_intr);
+ }
+
+ for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
+ queue_id++) {
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ rc = bnxt_get_hwrm_link_config(bp, &new);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ if (!bp->link_info.link_up) {
+ rc = bnxt_set_hwrm_link_config(bp, true);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM link config failure rc: %x\n", rc);
+ goto err_out;
+ }
+ }
+ bnxt_print_link_info(bp->eth_dev);
+
+ return 0;
+
+err_out:
+ bnxt_free_all_hwrm_resources(bp);
+
+ /* Some of the error status returned by FW may not be from errno.h */
+ if (rc > 0)
+ rc = -EIO;
+
+ return rc;
+}
+
+static int bnxt_shutdown_nic(struct bnxt *bp)
+{
+ bnxt_free_all_hwrm_resources(bp);
+ bnxt_free_all_filters(bp);
+ bnxt_free_all_vnics(bp);
+ return 0;
+}
+
+static int bnxt_init_nic(struct bnxt *bp)
+{
+ int rc;
+
+ rc = bnxt_init_ring_grps(bp);
+ if (rc)
+ return rc;
+
+ bnxt_init_vnics(bp);
+ bnxt_init_filters(bp);
+
+ return 0;
+}
+
+/*
+ * Device configuration and status function
+ */
+
+static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint16_t max_vnics, i, j, vpool, vrxq;
+ unsigned int max_rx_rings;
+
+ /* MAC Specifics */
+ dev_info->max_mac_addrs = bp->max_l2_ctx;
+ dev_info->max_hash_mac_addrs = 0;
+
+ /* PF/VF specifics */
+ if (BNXT_PF(bp))
+ dev_info->max_vfs = bp->pdev->max_vfs;
+ max_rx_rings = RTE_MIN(bp->max_vnics, bp->max_stat_ctx);
+ /* For the sake of symmetry, max_rx_queues = max_tx_queues */
+ dev_info->max_rx_queues = max_rx_rings;
+ dev_info->max_tx_queues = max_rx_rings;
+ dev_info->reta_size = HW_HASH_INDEX_SIZE;
+ dev_info->hash_key_size = 40;
+ max_vnics = bp->max_vnics;
+
+ /* Fast path specifics */
+ dev_info->min_rx_bufsize = 1;
+ dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + VLAN_TAG_SIZE;
+
+ dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
+ if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+ dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
+ dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
+
+ /* *INDENT-OFF* */
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = 8,
+ .hthresh = 8,
+ .wthresh = 0,
+ },
+ .rx_free_thresh = 32,
+ /* If no descriptors available, pkts are dropped by default */
+ .rx_drop_en = 1,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = 32,
+ .hthresh = 0,
+ .wthresh = 0,
+ },
+ .tx_free_thresh = 32,
+ .tx_rs_thresh = 32,
+ };
+ eth_dev->data->dev_conf.intr_conf.lsc = 1;
+
+ eth_dev->data->dev_conf.intr_conf.rxq = 1;
+ dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
+ dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
+ dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
+ dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
+
+ /* *INDENT-ON* */
+
+ /*
+ * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
+ * need further investigation.
+ */
+
+ /* VMDq resources */
+ vpool = 64; /* ETH_64_POOLS */
+ vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
+ for (i = 0; i < 4; vpool >>= 1, i++) {
+ if (max_vnics > vpool) {
+ for (j = 0; j < 5; vrxq >>= 1, j++) {
+ if (dev_info->max_rx_queues > vrxq) {
+ if (vpool > vrxq)
+ vpool = vrxq;
+ goto found;
+ }
+ }
+ /* Not enough resources to support VMDq */
+ break;
+ }
+ }
+ /* Not enough resources to support VMDq */
+ vpool = 0;
+ vrxq = 0;
+found:
+ dev_info->max_vmdq_pools = vpool;
+ dev_info->vmdq_queue_num = vrxq;
+
+ dev_info->vmdq_pool_base = 0;
+ dev_info->vmdq_queue_base = 0;
+}
+
+/* Configure the device based on the configuration provided */
+static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+ int rc;
+
+ bp->rx_queues = (void *)eth_dev->data->rx_queues;
+ bp->tx_queues = (void *)eth_dev->data->tx_queues;
+ bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
+ bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
+
+ if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
+ rc = bnxt_hwrm_check_vf_rings(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
+ return -ENOSPC;
+ }
+
+ rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
+ return -ENOSPC;
+ }
+ } else {
+ /* legacy driver needs to get updated values */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Inherit new configurations */
+ if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
+ eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
+ eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
+ bp->max_cp_rings ||
+ eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
+ bp->max_stat_ctx ||
+ (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps ||
+ (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+ bp->max_vnics < eth_dev->data->nb_rx_queues)) {
+ PMD_DRV_LOG(ERR,
+ "Insufficient resources to support requested config\n");
+ PMD_DRV_LOG(ERR,
+ "Num Queues Requested: Tx %d, Rx %d\n",
+ eth_dev->data->nb_tx_queues,
+ eth_dev->data->nb_rx_queues);
+ PMD_DRV_LOG(ERR,
+ "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
+ bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
+ bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
+ return -ENOSPC;
+ }
+
+ bp->rx_cp_nr_rings = bp->rx_nr_rings;
+ bp->tx_cp_nr_rings = bp->tx_nr_rings;
+
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ eth_dev->data->mtu =
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS;
+ bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
+ }
+ return 0;
+}
+
+static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
+{
+ struct rte_eth_link *link = &eth_dev->data->dev_link;
+
+ if (link->link_status)
+ PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
+ eth_dev->data->port_id,
+ (uint32_t)link->link_speed,
+ (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex\n"));
+ else
+ PMD_DRV_LOG(INFO, "Port %d Link Down\n",
+ eth_dev->data->port_id);
+}
+
+static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
+{
+ bnxt_print_link_info(eth_dev);
+ return 0;
+}
+
+static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+ int vlan_mask = 0;
+ int rc;
+
+ if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ PMD_DRV_LOG(ERR,
+ "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
+ bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ }
+ bp->dev_stopped = 0;
+
+ rc = bnxt_init_chip(bp);
+ if (rc)
+ goto error;
+
+ bnxt_link_update_op(eth_dev, 1);
+
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ vlan_mask |= ETH_VLAN_FILTER_MASK;
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ vlan_mask |= ETH_VLAN_STRIP_MASK;
+ rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
+ if (rc)
+ goto error;
+
+ bp->flags |= BNXT_FLAG_INIT_DONE;
+ return 0;
+
+error:
+ bnxt_shutdown_nic(bp);
+ bnxt_free_tx_mbufs(bp);
+ bnxt_free_rx_mbufs(bp);
+ return rc;
+}
+
+static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ int rc = 0;
+
+ if (!bp->link_info.link_up)
+ rc = bnxt_set_hwrm_link_config(bp, true);
+ if (!rc)
+ eth_dev->data->dev_link.link_status = 1;
+
+ bnxt_print_link_info(eth_dev);
+ return 0;
+}
+
+static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ eth_dev->data->dev_link.link_status = 0;
+ bnxt_set_hwrm_link_config(bp, false);
+ bp->link_info.link_up = 0;
+
+ return 0;
+}
+
+/* Unload the driver, release resources */
+static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ bp->flags &= ~BNXT_FLAG_INIT_DONE;
+ if (bp->eth_dev->data->dev_started) {
+ /* TBD: STOP HW queues DMA */
+ eth_dev->data->dev_link.link_status = 0;
+ }
+ bnxt_set_hwrm_link_config(bp, false);
+ bnxt_hwrm_port_clr_stats(bp);
+ bnxt_free_tx_mbufs(bp);
+ bnxt_free_rx_mbufs(bp);
+ bnxt_shutdown_nic(bp);
+ bp->dev_stopped = 1;
+}
+
+static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (bp->dev_stopped == 0)
+ bnxt_dev_stop_op(eth_dev);
+
+ bnxt_free_mem(bp);
+ if (eth_dev->data->mac_addrs != NULL) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ }
+ if (bp->grp_info != NULL) {
+ rte_free(bp->grp_info);
+ bp->grp_info = NULL;
+ }
+
+ bnxt_dev_uninit(eth_dev);
+}
+
+static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
+ uint32_t index)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_filter_info *filter, *temp_filter;
+ uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS);
+ uint32_t i;
+
+ /*
+ * Loop through all VNICs from the specified filter flow pools to
+ * remove the corresponding MAC addr filter
+ */
+ for (i = 0; i < pool; i++) {
+ if (!(pool_mask & (1ULL << i)))
+ continue;
+
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+ if (filter->mac_index == index) {
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
+ filter->mac_index = INVALID_MAC_INDEX;
+ memset(&filter->l2_addr, 0,
+ ETHER_ADDR_LEN);
+ STAILQ_INSERT_TAIL(
+ &bp->free_filter_list,
+ filter, next);
+ }
+ filter = temp_filter;
+ }
+ }
+ }
+}
+
+static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
+ struct bnxt_filter_info *filter;
+
+ if (BNXT_VF(bp)) {
+ PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
+ return -ENOTSUP;
+ }
+
+ if (!vnic) {
+ PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
+ return -EINVAL;
+ }
+ /* Attach requested MAC address to the new l2_filter */
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ if (filter->mac_index == index) {
+ PMD_DRV_LOG(ERR,
+ "MAC addr already existed for pool %d\n", pool);
+ return 0;
+ }
+ }
+ filter = bnxt_alloc_filter(bp);
+ if (!filter) {
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
+ return -ENODEV;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+ filter->mac_index = index;
+ memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
+ return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
+}
+
+int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
+{
+ int rc = 0;
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct rte_eth_link new;
+ unsigned int cnt = BNXT_LINK_WAIT_CNT;
+
+ memset(&new, 0, sizeof(new));
+ do {
+ /* Retrieve link info from hardware */
+ rc = bnxt_get_hwrm_link_config(bp, &new);
+ if (rc) {
+ new.link_speed = ETH_LINK_SPEED_100M;
+ new.link_duplex = ETH_LINK_FULL_DUPLEX;
+ PMD_DRV_LOG(ERR,
+ "Failed to retrieve link rc = 0x%x!\n", rc);
+ goto out;
+ }
+ rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
+
+ if (!wait_to_complete)
+ break;
+ } while (!new.link_status && cnt--);
+
+out:
+ /* Timed out or success */
+ if (new.link_status != eth_dev->data->dev_link.link_status ||
+ new.link_speed != eth_dev->data->dev_link.link_speed) {
+ memcpy(&eth_dev->data->dev_link, &new,
+ sizeof(struct rte_eth_link));
+
+ _rte_eth_dev_callback_process(eth_dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+
+ bnxt_print_link_info(eth_dev);
+ }
+
+ return rc;
+}
+
+static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+
+ vnic->flags |= BNXT_VNIC_INFO_PROMISC;
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
+static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+
+ vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
+static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+
+ vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
+static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+
+ vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
+static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_vnic_info *vnic;
+ int i;
+
+ if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ return -EINVAL;
+
+ if (reta_size != HW_HASH_INDEX_SIZE) {
+ PMD_DRV_LOG(ERR, "The configured hash table lookup size "
+ "(%d) must equal the size supported by the hardware "
+ "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+ return -EINVAL;
+ }
+ /* Update the RSS VNIC(s) */
+ for (i = 0; i < MAX_FF_POOLS; i++) {
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ memcpy(vnic->rss_table, reta_conf, reta_size);
+
+ bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ }
+ }
+ return 0;
+}
+
+static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct rte_intr_handle *intr_handle
+ = &bp->pdev->intr_handle;
+
+ /* Retrieve from the default VNIC */
+ if (!vnic)
+ return -EINVAL;
+ if (!vnic->rss_table)
+ return -EINVAL;
+
+ if (reta_size != HW_HASH_INDEX_SIZE) {
+ PMD_DRV_LOG(ERR, "The configured hash table lookup size "
+ "(%d) must equal the size supported by the hardware "
+ "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+ return -EINVAL;
+ }
+ /* EW - need to revisit here copying from uint64_t to uint16_t */
+ memcpy(reta_conf, vnic->rss_table, reta_size);
+
+ if (rte_intr_allow_others(intr_handle)) {
+ if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
+ bnxt_dev_lsc_intr_setup(eth_dev);
+ }
+
+ return 0;
+}
+
+static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_vnic_info *vnic;
+ uint16_t hash_type = 0;
+ int i;
+
+ /*
+ * If RSS enablement were different than dev_configure,
+ * then return -EINVAL
+ */
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (!rss_conf->rss_hf)
+ PMD_DRV_LOG(ERR, "Hash type NONE\n");
+ } else {
+ if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
+ return -EINVAL;
+ }
+
+ bp->flags |= BNXT_FLAG_UPDATE_HASH;
+ memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV4)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_IPV6)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+
+ /* Update the RSS VNIC(s) */
+ for (i = 0; i < MAX_FF_POOLS; i++) {
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ vnic->hash_type = hash_type;
+
+ /*
+ * Use the supplied key if the key length is
+ * acceptable and the rss_key is not NULL
+ */
+ if (rss_conf->rss_key &&
+ rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
+ memcpy(vnic->rss_hash_key, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+
+ bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ }
+ }
+ return 0;
+}
+
+static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ int len;
+ uint32_t hash_types;
+
+ /* RSS configuration is the same for all VNICs */
+ if (vnic && vnic->rss_hash_key) {
+ if (rss_conf->rss_key) {
+ len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
+ rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
+ memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
+ }
+
+ hash_types = vnic->hash_type;
+ rss_conf->rss_hf = 0;
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
+ rss_conf->rss_hf |= ETH_RSS_IPV4;
+ hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ hash_types &=
+ ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ hash_types &=
+ ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
+ rss_conf->rss_hf |= ETH_RSS_IPV6;
+ hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ hash_types &=
+ ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ hash_types &=
+ ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+ }
+ if (hash_types) {
+ PMD_DRV_LOG(ERR,
+ "Unknwon RSS config from firmware (%08x), RSS disabled",
+ vnic->hash_type);
+ return -ENOTSUP;
+ }
+ } else {
+ rss_conf->rss_hf = 0;
+ }
+ return 0;
+}
+
+static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_link link_info;
+ int rc;
+
+ rc = bnxt_get_hwrm_link_config(bp, &link_info);
+ if (rc)
+ return rc;
+
+ memset(fc_conf, 0, sizeof(*fc_conf));
+ if (bp->link_info.auto_pause)
+ fc_conf->autoneg = 1;
+ switch (bp->link_info.pause) {
+ case 0:
+ fc_conf->mode = RTE_FC_NONE;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ break;
+ case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
+ HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
+ fc_conf->mode = RTE_FC_FULL;
+ break;
+ }
+ return 0;
+}
+
+static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
+ PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
+ return -ENOTSUP;
+ }
+
+ switch (fc_conf->mode) {
+ case RTE_FC_NONE:
+ bp->link_info.auto_pause = 0;
+ bp->link_info.force_pause = 0;
+ break;
+ case RTE_FC_RX_PAUSE:
+ if (fc_conf->autoneg) {
+ bp->link_info.auto_pause =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
+ bp->link_info.force_pause = 0;
+ } else {
+ bp->link_info.auto_pause = 0;
+ bp->link_info.force_pause =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
+ }
+ break;
+ case RTE_FC_TX_PAUSE:
+ if (fc_conf->autoneg) {
+ bp->link_info.auto_pause =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
+ bp->link_info.force_pause = 0;
+ } else {
+ bp->link_info.auto_pause = 0;
+ bp->link_info.force_pause =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
+ }
+ break;
+ case RTE_FC_FULL:
+ if (fc_conf->autoneg) {
+ bp->link_info.auto_pause =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
+ bp->link_info.force_pause = 0;
+ } else {
+ bp->link_info.auto_pause = 0;
+ bp->link_info.force_pause =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
+ }
+ break;
+ }
+ return bnxt_set_hwrm_link_config(bp, true);
+}
+
+/* Add UDP tunneling port */
+static int
+bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint16_t tunnel_type = 0;
+ int rc = 0;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (bp->vxlan_port_cnt) {
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
+ udp_tunnel->udp_port);
+ if (bp->vxlan_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
+ return -ENOSPC;
+ }
+ bp->vxlan_port_cnt++;
+ return 0;
+ }
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
+ bp->vxlan_port_cnt++;
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (bp->geneve_port_cnt) {
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
+ udp_tunnel->udp_port);
+ if (bp->geneve_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
+ return -ENOSPC;
+ }
+ bp->geneve_port_cnt++;
+ return 0;
+ }
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
+ bp->geneve_port_cnt++;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
+ return -ENOTSUP;
+ }
+ rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
+ tunnel_type);
+ return rc;
+}
+
+static int
+bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint16_t tunnel_type = 0;
+ uint16_t port = 0;
+ int rc = 0;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (!bp->vxlan_port_cnt) {
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
+ return -EINVAL;
+ }
+ if (bp->vxlan_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
+ udp_tunnel->udp_port, bp->vxlan_port);
+ return -EINVAL;
+ }
+ if (--bp->vxlan_port_cnt)
+ return 0;
+
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
+ port = bp->vxlan_fw_dst_port_id;
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (!bp->geneve_port_cnt) {
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
+ return -EINVAL;
+ }
+ if (bp->geneve_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
+ udp_tunnel->udp_port, bp->geneve_port);
+ return -EINVAL;
+ }
+ if (--bp->geneve_port_cnt)
+ return 0;
+
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
+ port = bp->geneve_fw_dst_port_id;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
+ return -ENOTSUP;
+ }
+
+ rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
+ if (!rc) {
+ if (tunnel_type ==
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
+ bp->vxlan_port = 0;
+ if (tunnel_type ==
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
+ bp->geneve_port = 0;
+ }
+ return rc;
+}
+
+static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
+{
+ struct bnxt_filter_info *filter, *temp_filter, *new_filter;
+ struct bnxt_vnic_info *vnic;
+ unsigned int i;
+ int rc = 0;
+ uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
+
+ /* Cycle through all VNICs */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ /*
+ * For each VNIC and each associated filter(s)
+ * if VLAN exists && VLAN matches vlan_id
+ * remove the MAC+VLAN filter
+ * add a new MAC only filter
+ * else
+ * VLAN filter doesn't exist, just skip and continue
+ */
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+
+ if (filter->enables & chk &&
+ filter->l2_ovlan == vlan_id) {
+ /* Must delete the filter */
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
+ STAILQ_INSERT_TAIL(
+ &bp->free_filter_list,
+ filter, next);
+
+ /*
+ * Need to examine to see if the MAC
+ * filter already existed or not before
+ * allocating a new one
+ */
+
+ new_filter = bnxt_alloc_filter(bp);
+ if (!new_filter) {
+ PMD_DRV_LOG(ERR,
+ "MAC/VLAN filter alloc failed\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter,
+ new_filter, next);
+ /* Inherit MAC from previous filter */
+ new_filter->mac_index =
+ filter->mac_index;
+ memcpy(new_filter->l2_addr,
+ filter->l2_addr, ETHER_ADDR_LEN);
+ /* MAC only filter */
+ rc = bnxt_hwrm_set_l2_filter(bp,
+ vnic->fw_vnic_id,
+ new_filter);
+ if (rc)
+ goto exit;
+ PMD_DRV_LOG(INFO,
+ "Del Vlan filter for %d\n",
+ vlan_id);
+ }
+ filter = temp_filter;
+ }
+ }
+ }
+exit:
+ return rc;
+}
+
+static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
+{
+ struct bnxt_filter_info *filter, *temp_filter, *new_filter;
+ struct bnxt_vnic_info *vnic;
+ unsigned int i;
+ int rc = 0;
+ uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
+ uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
+
+ /* Cycle through all VNICs */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ /*
+ * For each VNIC and each associated filter(s)
+ * if VLAN exists:
+ * if VLAN matches vlan_id
+ * VLAN filter already exists, just skip and continue
+ * else
+ * add a new MAC+VLAN filter
+ * else
+ * Remove the old MAC only filter
+ * Add a new MAC+VLAN filter
+ */
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+
+ if (filter->enables & chk) {
+ if (filter->l2_ovlan == vlan_id)
+ goto cont;
+ } else {
+ /* Must delete the MAC filter */
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
+ filter->l2_ovlan = 0;
+ STAILQ_INSERT_TAIL(
+ &bp->free_filter_list,
+ filter, next);
+ }
+ new_filter = bnxt_alloc_filter(bp);
+ if (!new_filter) {
+ PMD_DRV_LOG(ERR,
+ "MAC/VLAN filter alloc failed\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter, new_filter,
+ next);
+ /* Inherit MAC from the previous filter */
+ new_filter->mac_index = filter->mac_index;
+ memcpy(new_filter->l2_addr, filter->l2_addr,
+ ETHER_ADDR_LEN);
+ /* MAC + VLAN ID filter */
+ new_filter->l2_ivlan = vlan_id;
+ new_filter->l2_ivlan_mask = 0xF000;
+ new_filter->enables |= en;
+ rc = bnxt_hwrm_set_l2_filter(bp,
+ vnic->fw_vnic_id,
+ new_filter);
+ if (rc)
+ goto exit;
+ PMD_DRV_LOG(INFO,
+ "Added Vlan filter for %d\n", vlan_id);
+cont:
+ filter = temp_filter;
+ }
+ }
+ }
+exit:
+ return rc;
+}
+
+static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
+ uint16_t vlan_id, int on)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ /* These operations apply to ALL existing MAC/VLAN filters */
+ if (on)
+ return bnxt_add_vlan_filter(bp, vlan_id);
+ else
+ return bnxt_del_vlan_filter(bp, vlan_id);
+}
+
+static int
+bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ unsigned int i;
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
+ /* Remove any VLAN filters programmed */
+ for (i = 0; i < 4095; i++)
+ bnxt_del_vlan_filter(bp, i);
+ }
+ PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
+ !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ vnic->vlan_strip = true;
+ else
+ vnic->vlan_strip = false;
+ bnxt_hwrm_vnic_cfg(bp, vnic);
+ }
+ PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
+ !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK)
+ PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n");
+
+ return 0;
+}
+
+static int
+bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ /* Default Filter is tied to VNIC 0 */
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_filter_info *filter;
+ int rc;
+
+ if (BNXT_VF(bp))
+ return -EPERM;
+
+ memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
+
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ /* Default Filter is at Index 0 */
+ if (filter->mac_index != 0)
+ continue;
+ rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+ if (rc)
+ return rc;
+ memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter->enables |=
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
+ rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
+ if (rc)
+ return rc;
+ filter->mac_index = 0;
+ PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
+ }
+
+ return 0;
+}
+
+static int
+bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ char *mc_addr_list = (char *)mc_addr_set;
+ struct bnxt_vnic_info *vnic;
+ uint32_t off = 0, i = 0;
+
+ vnic = &bp->vnic_info[0];
+
+ if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
+ vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
+ goto allmulti;
+ }
+
+ /* TODO Check for Duplicate mcast addresses */
+ vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
+ for (i = 0; i < nb_mc_addr; i++) {
+ memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
+ off += ETHER_ADDR_LEN;
+ }
+
+ vnic->mc_addr_cnt = i;
+
+allmulti:
+ return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
+static int
+bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
+ uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
+ uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
+ int ret;
+
+ ret = snprintf(fw_version, fw_size, "%d.%d.%d",
+ fw_major, fw_minor, fw_updt);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static void
+bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct bnxt_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = 0;
+ qinfo->conf.rx_deferred_start = 0;
+}
+
+static void
+bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct bnxt_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = 0;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
+{
+ struct bnxt *bp = eth_dev->data->dev_private;
+ struct rte_eth_dev_info dev_info;
+ uint32_t max_dev_mtu;
+ uint32_t rc = 0;
+ uint32_t i;
+
+ bnxt_dev_info_get_op(eth_dev, &dev_info);
+ max_dev_mtu = dev_info.max_rx_pktlen -
+ ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
+
+ if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
+ PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
+ ETHER_MIN_MTU, max_dev_mtu);
+ return -EINVAL;
+ }
+
+
+ if (new_mtu > ETHER_MTU) {
+ bp->flags |= BNXT_FLAG_JUMBO;
+ bp->eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ } else {
+ bp->eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ bp->flags &= ~BNXT_FLAG_JUMBO;
+ }
+
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
+ new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+
+ eth_dev->data->mtu = new_mtu;
+ PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint16_t size = 0;
+
+ vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rc)
+ break;
+
+ size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+ size -= RTE_PKTMBUF_HEADROOM;
+
+ if (size < new_mtu) {
+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int
+bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint16_t vlan = bp->vlan;
+ int rc;
+
+ if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "PVID cannot be modified for this function\n");
+ return -ENOTSUP;
+ }
+ bp->vlan = on ? pvid : 0;
+
+ rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
+ if (rc)
+ bp->vlan = vlan;
+ return rc;
+}
+
+static int
+bnxt_dev_led_on_op(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+
+ return bnxt_hwrm_port_led_cfg(bp, true);
+}
+
+static int
+bnxt_dev_led_off_op(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+
+ return bnxt_hwrm_port_led_cfg(bp, false);
+}
+
+static uint32_t
+bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ uint32_t desc = 0, raw_cons = 0, cons;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_queue *rxq;
+ struct rx_pkt_cmpl *rxcmp;
+ uint16_t cmp_type;
+ uint8_t cmp = 1;
+ bool valid;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ cpr = rxq->cp_ring;
+ valid = cpr->valid;
+
+ while (raw_cons < rxq->nb_rx_desc) {
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (!CMPL_VALID(rxcmp, valid))
+ goto nothing_to_do;
+ valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
+ cmp_type = CMP_TYPE(rxcmp);
+ if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
+ cmp = (rte_le_to_cpu_32(
+ ((struct rx_tpa_end_cmpl *)
+ (rxcmp))->agg_bufs_v1) &
+ RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
+ RX_TPA_END_CMPL_AGG_BUFS_SFT;
+ desc++;
+ } else if (cmp_type == 0x11) {
+ desc++;
+ cmp = (rxcmp->agg_bufs_v1 &
+ RX_PKT_CMPL_AGG_BUFS_MASK) >>
+ RX_PKT_CMPL_AGG_BUFS_SFT;
+ } else {
+ cmp = 1;
+ }
+nothing_to_do:
+ raw_cons += cmp ? cmp : 2;
+ }
+
+ return desc;
+}
+
+static int
+bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
+{
+ struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_sw_rx_bd *rx_buf;
+ struct rx_pkt_cmpl *rxcmp;
+ uint32_t cons, cp_cons;
+
+ if (!rxq)
+ return -EINVAL;
+
+ cpr = rxq->cp_ring;
+ rxr = rxq->rx_ring;
+
+ if (offset >= rxq->nb_rx_desc)
+ return -EINVAL;
+
+ cons = RING_CMP(cpr->cp_ring_struct, offset);
+ cp_cons = cpr->cp_raw_cons;
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (cons > cp_cons) {
+ if (CMPL_VALID(rxcmp, cpr->valid))
+ return RTE_ETH_RX_DESC_DONE;
+ } else {
+ if (CMPL_VALID(rxcmp, !cpr->valid))
+ return RTE_ETH_RX_DESC_DONE;
+ }
+ rx_buf = &rxr->rx_buf_ring[cons];
+ if (rx_buf->mbuf == NULL)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+static int
+bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
+{
+ struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct tx_pkt_cmpl *txcmp;
+ uint32_t cons, cp_cons;
+
+ if (!txq)
+ return -EINVAL;
+
+ cpr = txq->cp_ring;
+ txr = txq->tx_ring;
+
+ if (offset >= txq->nb_tx_desc)
+ return -EINVAL;
+
+ cons = RING_CMP(cpr->cp_ring_struct, offset);
+ txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+ cp_cons = cpr->cp_raw_cons;
+
+ if (cons > cp_cons) {
+ if (CMPL_VALID(txcmp, cpr->valid))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+ } else {
+ if (CMPL_VALID(txcmp, !cpr->valid))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+ }
+ tx_buf = &txr->tx_buf_ring[cons];
+ if (tx_buf->mbuf == NULL)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+static struct bnxt_filter_info *
+bnxt_match_and_validate_ether_filter(struct bnxt *bp,
+ struct rte_eth_ethertype_filter *efilter,
+ struct bnxt_vnic_info *vnic0,
+ struct bnxt_vnic_info *vnic,
+ int *ret)
+{
+ struct bnxt_filter_info *mfilter = NULL;
+ int match = 0;
+ *ret = 0;
+
+ if (efilter->ether_type == ETHER_TYPE_IPv4 ||
+ efilter->ether_type == ETHER_TYPE_IPv6) {
+ PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
+ " ethertype filter.", efilter->ether_type);
+ *ret = -EINVAL;
+ goto exit;
+ }
+ if (efilter->queue >= bp->rx_nr_rings) {
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
+ *ret = -EINVAL;
+ goto exit;
+ }
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
+ if (vnic == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
+ *ret = -EINVAL;
+ goto exit;
+ }
+
+ if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
+ if ((!memcmp(efilter->mac_addr.addr_bytes,
+ mfilter->l2_addr, ETHER_ADDR_LEN) &&
+ mfilter->flags ==
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
+ mfilter->ethertype == efilter->ether_type)) {
+ match = 1;
+ break;
+ }
+ }
+ } else {
+ STAILQ_FOREACH(mfilter, &vnic->filter, next)
+ if ((!memcmp(efilter->mac_addr.addr_bytes,
+ mfilter->l2_addr, ETHER_ADDR_LEN) &&
+ mfilter->ethertype == efilter->ether_type &&
+ mfilter->flags ==
+ HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
+ match = 1;
+ break;
+ }
+ }
+
+ if (match)
+ *ret = -EEXIST;
+
+exit:
+ return mfilter;
+}
+
+static int
+bnxt_ethertype_filter(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_ethertype_filter *efilter =
+ (struct rte_eth_ethertype_filter *)arg;
+ struct bnxt_filter_info *bfilter, *filter1;
+ struct bnxt_vnic_info *vnic, *vnic0;
+ int ret;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ bnxt_match_and_validate_ether_filter(bp, efilter,
+ vnic0, vnic, &ret);
+ if (ret < 0)
+ return ret;
+
+ bfilter = bnxt_get_unused_filter(bp);
+ if (bfilter == NULL) {
+ PMD_DRV_LOG(ERR,
+ "Not enough resources for a new filter.\n");
+ return -ENOMEM;
+ }
+ bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
+ memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
+ ETHER_ADDR_LEN);
+ memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
+ ETHER_ADDR_LEN);
+ bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
+ bfilter->ethertype = efilter->ether_type;
+ bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+
+ filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
+ if (filter1 == NULL) {
+ ret = -1;
+ goto cleanup;
+ }
+ bfilter->enables |=
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+
+ bfilter->dst_id = vnic->fw_vnic_id;
+
+ if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ bfilter->flags =
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
+ }
+
+ ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
+ if (ret)
+ goto cleanup;
+ STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
+ vnic0, vnic, &ret);
+ if (ret == -EEXIST) {
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
+
+ STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
+ next);
+ bnxt_free_filter(bp, filter1);
+ } else if (ret == 0) {
+ PMD_DRV_LOG(ERR, "No matching filter found\n");
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ goto error;
+ }
+ return ret;
+cleanup:
+ bnxt_free_filter(bp, bfilter);
+error:
+ return ret;
+}
+
+static inline int
+parse_ntuple_filter(struct bnxt *bp,
+ struct rte_eth_ntuple_filter *nfilter,
+ struct bnxt_filter_info *bfilter)
+{
+ uint32_t en = 0;
+
+ if (nfilter->queue >= bp->rx_nr_rings) {
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
+ return -EINVAL;
+ }
+
+ switch (nfilter->dst_port_mask) {
+ case UINT16_MAX:
+ bfilter->dst_port_mask = -1;
+ bfilter->dst_port = nfilter->dst_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+
+ switch (nfilter->proto_mask) {
+ case UINT8_MAX:
+ if (nfilter->proto == 17) /* IPPROTO_UDP */
+ bfilter->ip_protocol = 17;
+ else if (nfilter->proto == 6) /* IPPROTO_TCP */
+ bfilter->ip_protocol = 6;
+ else
+ return -EINVAL;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ switch (nfilter->dst_ip_mask) {
+ case UINT32_MAX:
+ bfilter->dst_ipaddr_mask[0] = -1;
+ bfilter->dst_ipaddr[0] = nfilter->dst_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (nfilter->src_ip_mask) {
+ case UINT32_MAX:
+ bfilter->src_ipaddr_mask[0] = -1;
+ bfilter->src_ipaddr[0] = nfilter->src_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (nfilter->src_port_mask) {
+ case UINT16_MAX:
+ bfilter->src_port_mask = -1;
+ bfilter->src_port = nfilter->src_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
+ return -EINVAL;
+ }
+
+ //TODO Priority
+ //nfilter->priority = (uint8_t)filter->priority;
+
+ bfilter->enables = en;
+ return 0;
+}
+
+static struct bnxt_filter_info*
+bnxt_match_ntuple_filter(struct bnxt *bp,
+ struct bnxt_filter_info *bfilter,
+ struct bnxt_vnic_info **mvnic)
+{
+ struct bnxt_filter_info *mfilter = NULL;
+ int i;
+
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ STAILQ_FOREACH(mfilter, &vnic->filter, next) {
+ if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
+ bfilter->src_ipaddr_mask[0] ==
+ mfilter->src_ipaddr_mask[0] &&
+ bfilter->src_port == mfilter->src_port &&
+ bfilter->src_port_mask == mfilter->src_port_mask &&
+ bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
+ bfilter->dst_ipaddr_mask[0] ==
+ mfilter->dst_ipaddr_mask[0] &&
+ bfilter->dst_port == mfilter->dst_port &&
+ bfilter->dst_port_mask == mfilter->dst_port_mask &&
+ bfilter->flags == mfilter->flags &&
+ bfilter->enables == mfilter->enables) {
+ if (mvnic)
+ *mvnic = vnic;
+ return mfilter;
+ }
+ }
+ }
+ return NULL;
+}
+
+static int
+bnxt_cfg_ntuple_filter(struct bnxt *bp,
+ struct rte_eth_ntuple_filter *nfilter,
+ enum rte_filter_op filter_op)
+{
+ struct bnxt_filter_info *bfilter, *mfilter, *filter1;
+ struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
+ int ret;
+
+ if (nfilter->flags != RTE_5TUPLE_FLAGS) {
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+ return -EINVAL;
+ }
+
+ if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+ PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
+ return -EINVAL;
+ }
+
+ bfilter = bnxt_get_unused_filter(bp);
+ if (bfilter == NULL) {
+ PMD_DRV_LOG(ERR,
+ "Not enough resources for a new filter.\n");
+ return -ENOMEM;
+ }
+ ret = parse_ntuple_filter(bp, nfilter, bfilter);
+ if (ret < 0)
+ goto free_filter;
+
+ vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]);
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = STAILQ_FIRST(&vnic0->filter);
+ if (filter1 == NULL) {
+ ret = -1;
+ goto free_filter;
+ }
+
+ bfilter->dst_id = vnic->fw_vnic_id;
+ bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ bfilter->enables |=
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ bfilter->ethertype = 0x800;
+ bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+
+ mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
+
+ if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
+ bfilter->dst_id == mfilter->dst_id) {
+ PMD_DRV_LOG(ERR, "filter exists.\n");
+ ret = -EEXIST;
+ goto free_filter;
+ } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
+ bfilter->dst_id != mfilter->dst_id) {
+ mfilter->dst_id = vnic->fw_vnic_id;
+ ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
+ STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
+ STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
+ PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
+ PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
+ goto free_filter;
+ }
+ if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ ret = -ENOENT;
+ goto free_filter;
+ }
+
+ if (filter_op == RTE_ETH_FILTER_ADD) {
+ bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
+ ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
+ if (ret)
+ goto free_filter;
+ STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
+ } else {
+ if (mfilter == NULL) {
+ /* This should not happen. But for Coverity! */
+ ret = -ENOENT;
+ goto free_filter;
+ }
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
+
+ STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
+ bnxt_free_filter(bp, mfilter);
+ mfilter->fw_l2_filter_id = -1;
+ bnxt_free_filter(bp, bfilter);
+ bfilter->fw_l2_filter_id = -1;
+ }
+
+ return 0;
+free_filter:
+ bfilter->fw_l2_filter_id = -1;
+ bnxt_free_filter(bp, bfilter);
+ return ret;
+}
+
+static int
+bnxt_ntuple_filter(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ int ret;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = bnxt_cfg_ntuple_filter(bp,
+ (struct rte_eth_ntuple_filter *)arg,
+ filter_op);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = bnxt_cfg_ntuple_filter(bp,
+ (struct rte_eth_ntuple_filter *)arg,
+ filter_op);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+bnxt_parse_fdir_filter(struct bnxt *bp,
+ struct rte_eth_fdir_filter *fdir,
+ struct bnxt_filter_info *filter)
+{
+ enum rte_fdir_mode fdir_mode =
+ bp->eth_dev->data->dev_conf.fdir_conf.mode;
+ struct bnxt_vnic_info *vnic0, *vnic;
+ struct bnxt_filter_info *filter1;
+ uint32_t en = 0;
+ int i;
+
+ if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ return -EINVAL;
+
+ filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
+ en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
+
+ switch (fdir->input.flow_type) {
+ case RTE_ETH_FLOW_IPV4:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ /* FALLTHROUGH */
+ filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
+ filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ filter->ip_addr_type =
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+ filter->src_ipaddr_mask[0] = 0xffffffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ filter->dst_ipaddr_mask[0] = 0xffffffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ filter->ethertype = 0x800;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ filter->src_port = fdir->input.flow.tcp4_flow.src_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
+ filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ filter->dst_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ filter->src_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
+ filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ filter->ip_protocol = 6;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ filter->ip_addr_type =
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+ filter->src_ipaddr_mask[0] = 0xffffffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ filter->dst_ipaddr_mask[0] = 0xffffffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ filter->ethertype = 0x800;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ filter->src_port = fdir->input.flow.udp4_flow.src_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
+ filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ filter->dst_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ filter->src_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
+ filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ filter->ip_protocol = 17;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ filter->ip_addr_type =
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+ filter->src_ipaddr_mask[0] = 0xffffffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ filter->dst_ipaddr_mask[0] = 0xffffffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ filter->ethertype = 0x800;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_IPV6:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ /* FALLTHROUGH */
+ filter->ip_addr_type =
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
+ filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ rte_memcpy(filter->src_ipaddr,
+ fdir->input.flow.ipv6_flow.src_ip, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
+ rte_memcpy(filter->dst_ipaddr,
+ fdir->input.flow.ipv6_flow.dst_ip, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ memset(filter->dst_ipaddr_mask, 0xff, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ memset(filter->src_ipaddr_mask, 0xff, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ filter->ethertype = 0x86dd;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ filter->src_port = fdir->input.flow.tcp6_flow.src_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
+ filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ filter->dst_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ filter->src_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ filter->ip_addr_type =
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
+ filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ rte_memcpy(filter->src_ipaddr,
+ fdir->input.flow.tcp6_flow.ip.src_ip, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
+ rte_memcpy(filter->dst_ipaddr,
+ fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ memset(filter->dst_ipaddr_mask, 0xff, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ memset(filter->src_ipaddr_mask, 0xff, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ filter->ethertype = 0x86dd;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ filter->src_port = fdir->input.flow.udp6_flow.src_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
+ filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ filter->dst_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ filter->src_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ filter->ip_addr_type =
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
+ filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ rte_memcpy(filter->src_ipaddr,
+ fdir->input.flow.udp6_flow.ip.src_ip, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
+ rte_memcpy(filter->dst_ipaddr,
+ fdir->input.flow.udp6_flow.ip.dst_ip, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ memset(filter->dst_ipaddr_mask, 0xff, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ memset(filter->src_ipaddr_mask, 0xff, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ filter->ethertype = 0x86dd;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_L2_PAYLOAD:
+ filter->ethertype = fdir->input.flow.l2_flow.ether_type;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_VXLAN:
+ if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
+ return -EINVAL;
+ filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
+ filter->tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+ en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
+ break;
+ case RTE_ETH_FLOW_NVGRE:
+ if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
+ return -EINVAL;
+ filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
+ filter->tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
+ en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
+ break;
+ case RTE_ETH_FLOW_UNKNOWN:
+ case RTE_ETH_FLOW_RAW:
+ case RTE_ETH_FLOW_FRAG_IPV4:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_FLOW_FRAG_IPV6:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ case RTE_ETH_FLOW_IPV6_EX:
+ case RTE_ETH_FLOW_IPV6_TCP_EX:
+ case RTE_ETH_FLOW_IPV6_UDP_EX:
+ case RTE_ETH_FLOW_GENEVE:
+ /* FALLTHROUGH */
+ default:
+ return -EINVAL;
+ }
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
+ if (vnic == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
+ return -EINVAL;
+ }
+
+
+ if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ rte_memcpy(filter->dst_macaddr,
+ fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
+ }
+
+ if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
+ filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
+ filter1 = STAILQ_FIRST(&vnic0->filter);
+ //filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ } else {
+ filter->dst_id = vnic->fw_vnic_id;
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ if (filter->dst_macaddr[i] == 0x00)
+ filter1 = STAILQ_FIRST(&vnic0->filter);
+ else
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic);
+ }
+
+ if (filter1 == NULL)
+ return -EINVAL;
+
+ en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+
+ filter->enables = en;
+
+ return 0;
+}
+
+static struct bnxt_filter_info *
+bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
+ struct bnxt_vnic_info **mvnic)
+{
+ struct bnxt_filter_info *mf = NULL;
+ int i;
+
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ STAILQ_FOREACH(mf, &vnic->filter, next) {
+ if (mf->filter_type == nf->filter_type &&
+ mf->flags == nf->flags &&
+ mf->src_port == nf->src_port &&
+ mf->src_port_mask == nf->src_port_mask &&
+ mf->dst_port == nf->dst_port &&
+ mf->dst_port_mask == nf->dst_port_mask &&
+ mf->ip_protocol == nf->ip_protocol &&
+ mf->ip_addr_type == nf->ip_addr_type &&
+ mf->ethertype == nf->ethertype &&
+ mf->vni == nf->vni &&
+ mf->tunnel_type == nf->tunnel_type &&
+ mf->l2_ovlan == nf->l2_ovlan &&
+ mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
+ mf->l2_ivlan == nf->l2_ivlan &&
+ mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
+ !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
+ !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->src_macaddr, nf->src_macaddr,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->dst_macaddr, nf->dst_macaddr,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->src_ipaddr, nf->src_ipaddr,
+ sizeof(nf->src_ipaddr)) &&
+ !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
+ sizeof(nf->src_ipaddr_mask)) &&
+ !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
+ sizeof(nf->dst_ipaddr)) &&
+ !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
+ sizeof(nf->dst_ipaddr_mask))) {
+ if (mvnic)
+ *mvnic = vnic;
+ return mf;
+ }
+ }
+ }
+ return NULL;
+}
+
+static int
+bnxt_fdir_filter(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg;
+ struct bnxt_filter_info *filter, *match;
+ struct bnxt_vnic_info *vnic, *mvnic;
+ int ret = 0, i;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
+ return -EINVAL;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ case RTE_ETH_FILTER_DELETE:
+ /* FALLTHROUGH */
+ filter = bnxt_get_unused_filter(bp);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR,
+ "Not enough resources for a new flow.\n");
+ return -ENOMEM;
+ }
+
+ ret = bnxt_parse_fdir_filter(bp, fdir, filter);
+ if (ret != 0)
+ goto free_filter;
+ filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
+
+ if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
+ vnic = STAILQ_FIRST(&bp->ff_pool[0]);
+ else
+ vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
+
+ match = bnxt_match_fdir(bp, filter, &mvnic);
+ if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
+ if (match->dst_id == vnic->fw_vnic_id) {
+ PMD_DRV_LOG(ERR, "Flow already exists.\n");
+ ret = -EEXIST;
+ goto free_filter;
+ } else {
+ match->dst_id = vnic->fw_vnic_id;
+ ret = bnxt_hwrm_set_ntuple_filter(bp,
+ match->dst_id,
+ match);
+ STAILQ_REMOVE(&mvnic->filter, match,
+ bnxt_filter_info, next);
+ STAILQ_INSERT_TAIL(&vnic->filter, match, next);
+ PMD_DRV_LOG(ERR,
+ "Filter with matching pattern exist\n");
+ PMD_DRV_LOG(ERR,
+ "Updated it to new destination q\n");
+ goto free_filter;
+ }
+ }
+ if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
+ PMD_DRV_LOG(ERR, "Flow does not exist.\n");
+ ret = -ENOENT;
+ goto free_filter;
+ }
+
+ if (filter_op == RTE_ETH_FILTER_ADD) {
+ ret = bnxt_hwrm_set_ntuple_filter(bp,
+ filter->dst_id,
+ filter);
+ if (ret)
+ goto free_filter;
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+ } else {
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
+ STAILQ_REMOVE(&vnic->filter, match,
+ bnxt_filter_info, next);
+ bnxt_free_filter(bp, match);
+ filter->fw_l2_filter_id = -1;
+ bnxt_free_filter(bp, filter);
+ }
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ if (filter->filter_type ==
+ HWRM_CFA_NTUPLE_FILTER) {
+ ret =
+ bnxt_hwrm_clear_ntuple_filter(bp,
+ filter);
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ }
+ }
+ }
+ return ret;
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_STATS:
+ case RTE_ETH_FILTER_INFO:
+ PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+
+free_filter:
+ filter->fw_l2_filter_id = -1;
+ bnxt_free_filter(bp, filter);
+ return ret;
+}
+
+static int
+bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
+{
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_TUNNEL:
+ PMD_DRV_LOG(ERR,
+ "filter type: %d: To be implemented\n", filter_type);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = bnxt_fdir_filter(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_NTUPLE:
+ ret = bnxt_ntuple_filter(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = bnxt_ethertype_filter(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &bnxt_flow_ops;
+ break;
+ default:
+ PMD_DRV_LOG(ERR,
+ "Filter type (%d) not supported", filter_type);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static const uint32_t *
+bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == bnxt_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
+static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
+ int reg_win)
+{
+ uint32_t reg_base = *reg_arr & 0xfffff000;
+ uint32_t win_off;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if ((reg_arr[i] & 0xfffff000) != reg_base)
+ return -ERANGE;
+ }
+ win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
+ rte_cpu_to_le_32(rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off));
+ return 0;
+}
+
+static int bnxt_map_ptp_regs(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t *reg_arr;
+ int rc, i;
+
+ reg_arr = ptp->rx_regs;
+ rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
+ if (rc)
+ return rc;
+
+ reg_arr = ptp->tx_regs;
+ rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < BNXT_PTP_RX_REGS; i++)
+ ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
+
+ for (i = 0; i < BNXT_PTP_TX_REGS; i++)
+ ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
+
+ return 0;
+}
+
+static void bnxt_unmap_ptp_regs(struct bnxt *bp)
+{
+ rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16));
+ rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20));
+}
+
+static uint64_t bnxt_cc_read(struct bnxt *bp)
+{
+ uint64_t ns;
+
+ ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_SYNC_TIME));
+ ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
+ return ns;
+}
+
+static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t fifo;
+
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
+ if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
+ return -EAGAIN;
+
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
+ *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
+ *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
+
+ return 0;
+}
+
+static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_pf_info *pf = &bp->pf;
+ uint16_t port_id;
+ uint32_t fifo;
+
+ if (!ptp)
+ return -ENODEV;
+
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
+ if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
+ return -EAGAIN;
+
+ port_id = pf->port_id;
+ rte_cpu_to_le_32(rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]));
+
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
+ if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
+/* bnxt_clr_rx_ts(bp); TBD */
+ return -EBUSY;
+ }
+
+ *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
+ *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
+
+ return 0;
+}
+
+static int
+bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ uint64_t ns;
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return 0;
+
+ ns = rte_timespec_to_ns(ts);
+ /* Set the timecounters to a new value. */
+ ptp->tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ uint64_t ns, systime_cycles;
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return 0;
+
+ systime_cycles = bnxt_cc_read(bp);
+ ns = rte_timecounter_update(&ptp->tc, systime_cycles);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+static int
+bnxt_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t shift = 0;
+
+ if (!ptp)
+ return 0;
+
+ ptp->rx_filter = 1;
+ ptp->tx_tstamp_en = 1;
+ ptp->rxctl = BNXT_PTP_MSG_EVENTS;
+
+ if (!bnxt_hwrm_ptp_cfg(bp))
+ bnxt_map_ptp_regs(bp);
+
+ memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
+ memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
+ ptp->tc.cc_shift = shift;
+ ptp->tc.nsec_mask = (1ULL << shift) - 1;
+
+ ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
+ ptp->rx_tstamp_tc.cc_shift = shift;
+ ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+ ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
+ ptp->tx_tstamp_tc.cc_shift = shift;
+ ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+ return 0;
+}
+
+static int
+bnxt_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return 0;
+
+ ptp->rx_filter = 0;
+ ptp->tx_tstamp_en = 0;
+ ptp->rxctl = 0;
+
+ bnxt_hwrm_ptp_cfg(bp);
+
+ bnxt_unmap_ptp_regs(bp);
+
+ return 0;
+}
+
+static int
+bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint64_t rx_tstamp_cycles = 0;
+ uint64_t ns;
+
+ if (!ptp)
+ return 0;
+
+ bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
+ ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+ return 0;
+}
+
+static int
+bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint64_t tx_tstamp_cycles = 0;
+ uint64_t ns;
+
+ if (!ptp)
+ return 0;
+
+ bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
+ ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return 0;
+
+ ptp->tc.nsec += delta;
+
+ return 0;
+}
+
+static int
+bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ int rc;
+ uint32_t dir_entries;
+ uint32_t entry_length;
+
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n",
+ bp->pdev->addr.domain, bp->pdev->addr.bus,
+ bp->pdev->addr.devid, bp->pdev->addr.function);
+
+ rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
+ if (rc != 0)
+ return rc;
+
+ return dir_entries * entry_length;
+}
+
+static int
+bnxt_get_eeprom_op(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint32_t index;
+ uint32_t offset;
+
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", bp->pdev->addr.domain,
+ bp->pdev->addr.bus, bp->pdev->addr.devid,
+ bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
+
+ if (in_eeprom->offset == 0) /* special offset value to get directory */
+ return bnxt_get_nvram_directory(bp, in_eeprom->length,
+ in_eeprom->data);
+
+ index = in_eeprom->offset >> 24;
+ offset = in_eeprom->offset & 0xffffff;
+
+ if (index != 0)
+ return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
+ in_eeprom->length, in_eeprom->data);
+
+ return 0;
+}
+
+static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_CHIMP_PATCH:
+ case BNX_DIR_TYPE_BOOTCODE:
+ case BNX_DIR_TYPE_BOOTCODE_2:
+ case BNX_DIR_TYPE_APE_FW:
+ case BNX_DIR_TYPE_APE_PATCH:
+ case BNX_DIR_TYPE_KONG_FW:
+ case BNX_DIR_TYPE_KONG_PATCH:
+ case BNX_DIR_TYPE_BONO_FW:
+ case BNX_DIR_TYPE_BONO_PATCH:
+ /* FALLTHROUGH */
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_AVS:
+ case BNX_DIR_TYPE_EXP_ROM_MBA:
+ case BNX_DIR_TYPE_PCIE:
+ case BNX_DIR_TYPE_TSCF_UCODE:
+ case BNX_DIR_TYPE_EXT_PHY:
+ case BNX_DIR_TYPE_CCM:
+ case BNX_DIR_TYPE_ISCSI_BOOT:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+ /* FALLTHROUGH */
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_executable(uint16_t dir_type)
+{
+ return bnxt_dir_type_is_ape_bin_format(dir_type) ||
+ bnxt_dir_type_is_other_exec_format(dir_type);
+}
+
+static int
+bnxt_set_eeprom_op(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint8_t index, dir_op;
+ uint16_t type, ext, ordinal, attr;
+
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", bp->pdev->addr.domain,
+ bp->pdev->addr.bus, bp->pdev->addr.devid,
+ bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
+ return -EINVAL;
+ }
+
+ type = in_eeprom->magic >> 16;
+
+ if (type == 0xffff) { /* special value for directory operations */
+ index = in_eeprom->magic & 0xff;
+ dir_op = in_eeprom->magic >> 8;
+ if (index == 0)
+ return -EINVAL;
+ switch (dir_op) {
+ case 0x0e: /* erase */
+ if (in_eeprom->offset != ~in_eeprom->magic)
+ return -EINVAL;
+ return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Create or re-write an NVM item: */
+ if (bnxt_dir_type_is_executable(type) == true)
+ return -EOPNOTSUPP;
+ ext = in_eeprom->magic & 0xffff;
+ ordinal = in_eeprom->offset >> 16;
+ attr = in_eeprom->offset & 0xffff;
+
+ return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
+ in_eeprom->data, in_eeprom->length);
+ return 0;
+}
+
+/*
+ * Initialization
+ */
+
+static const struct eth_dev_ops bnxt_dev_ops = {
+ .dev_infos_get = bnxt_dev_info_get_op,
+ .dev_close = bnxt_dev_close_op,
+ .dev_configure = bnxt_dev_configure_op,
+ .dev_start = bnxt_dev_start_op,
+ .dev_stop = bnxt_dev_stop_op,
+ .dev_set_link_up = bnxt_dev_set_link_up_op,
+ .dev_set_link_down = bnxt_dev_set_link_down_op,
+ .stats_get = bnxt_stats_get_op,
+ .stats_reset = bnxt_stats_reset_op,
+ .rx_queue_setup = bnxt_rx_queue_setup_op,
+ .rx_queue_release = bnxt_rx_queue_release_op,
+ .tx_queue_setup = bnxt_tx_queue_setup_op,
+ .tx_queue_release = bnxt_tx_queue_release_op,
+ .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
+ .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
+ .reta_update = bnxt_reta_update_op,
+ .reta_query = bnxt_reta_query_op,
+ .rss_hash_update = bnxt_rss_hash_update_op,
+ .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
+ .link_update = bnxt_link_update_op,
+ .promiscuous_enable = bnxt_promiscuous_enable_op,
+ .promiscuous_disable = bnxt_promiscuous_disable_op,
+ .allmulticast_enable = bnxt_allmulticast_enable_op,
+ .allmulticast_disable = bnxt_allmulticast_disable_op,
+ .mac_addr_add = bnxt_mac_addr_add_op,
+ .mac_addr_remove = bnxt_mac_addr_remove_op,
+ .flow_ctrl_get = bnxt_flow_ctrl_get_op,
+ .flow_ctrl_set = bnxt_flow_ctrl_set_op,
+ .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op,
+ .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op,
+ .vlan_filter_set = bnxt_vlan_filter_set_op,
+ .vlan_offload_set = bnxt_vlan_offload_set_op,
+ .vlan_pvid_set = bnxt_vlan_pvid_set_op,
+ .mtu_set = bnxt_mtu_set_op,
+ .mac_addr_set = bnxt_set_default_mac_addr_op,
+ .xstats_get = bnxt_dev_xstats_get_op,
+ .xstats_get_names = bnxt_dev_xstats_get_names_op,
+ .xstats_reset = bnxt_dev_xstats_reset_op,
+ .fw_version_get = bnxt_fw_version_get,
+ .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
+ .rxq_info_get = bnxt_rxq_info_get_op,
+ .txq_info_get = bnxt_txq_info_get_op,
+ .dev_led_on = bnxt_dev_led_on_op,
+ .dev_led_off = bnxt_dev_led_off_op,
+ .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
+ .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
+ .rx_queue_count = bnxt_rx_queue_count_op,
+ .rx_descriptor_status = bnxt_rx_descriptor_status_op,
+ .tx_descriptor_status = bnxt_tx_descriptor_status_op,
+ .rx_queue_start = bnxt_rx_queue_start,
+ .rx_queue_stop = bnxt_rx_queue_stop,
+ .tx_queue_start = bnxt_tx_queue_start,
+ .tx_queue_stop = bnxt_tx_queue_stop,
+ .filter_ctrl = bnxt_filter_ctrl_op,
+ .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
+ .get_eeprom_length = bnxt_get_eeprom_length_op,
+ .get_eeprom = bnxt_get_eeprom_op,
+ .set_eeprom = bnxt_set_eeprom_op,
+ .timesync_enable = bnxt_timesync_enable,
+ .timesync_disable = bnxt_timesync_disable,
+ .timesync_read_time = bnxt_timesync_read_time,
+ .timesync_write_time = bnxt_timesync_write_time,
+ .timesync_adjust_time = bnxt_timesync_adjust_time,
+ .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
+};
+
+static bool bnxt_vf_pciid(uint16_t id)
+{
+ if (id == BROADCOM_DEV_ID_57304_VF ||
+ id == BROADCOM_DEV_ID_57406_VF ||
+ id == BROADCOM_DEV_ID_5731X_VF ||
+ id == BROADCOM_DEV_ID_5741X_VF ||
+ id == BROADCOM_DEV_ID_57414_VF ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 ||
+ id == BROADCOM_DEV_ID_58802_VF)
+ return true;
+ return false;
+}
+
+bool bnxt_stratus_device(struct bnxt *bp)
+{
+ uint16_t id = bp->pdev->id.device_id;
+
+ if (id == BROADCOM_DEV_ID_STRATUS_NIC ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF2)
+ return true;
+ return false;
+}
+
+static int bnxt_init_board(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ int rc;
+
+ /* enable device (incl. PCI PM wakeup), and bus-mastering */
+ if (!pci_dev->mem_resource[0].addr) {
+ PMD_DRV_LOG(ERR,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto init_err_disable;
+ }
+
+ bp->eth_dev = eth_dev;
+ bp->pdev = pci_dev;
+
+ bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
+ if (!bp->bar0) {
+ PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ if (!pci_dev->mem_resource[2].addr) {
+ PMD_DRV_LOG(ERR,
+ "Cannot find PCI device BAR 2 address, aborting\n");
+ rc = -ENODEV;
+ goto init_err_release;
+ } else {
+ bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
+ }
+
+ return 0;
+
+init_err_release:
+ if (bp->bar0)
+ bp->bar0 = NULL;
+ if (bp->doorbell_base)
+ bp->doorbell_base = NULL;
+
+init_err_disable:
+
+ return rc;
+}
+
+
+#define ALLOW_FUNC(x) \
+ { \
+ typeof(x) arg = (x); \
+ bp->pf.vf_req_fwd[((arg) >> 5)] &= \
+ ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
+ }
+static int
+bnxt_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz = NULL;
+ static int version_printed;
+ uint32_t total_alloc_len;
+ rte_iova_t mz_phys_addr;
+ struct bnxt *bp;
+ int rc;
+
+ if (version_printed++ == 0)
+ PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ bp = eth_dev->data->dev_private;
+
+ bp->dev_stopped = 1;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ goto skip_init;
+
+ if (bnxt_vf_pciid(pci_dev->id.device_id))
+ bp->flags |= BNXT_FLAG_VF;
+
+ rc = bnxt_init_board(eth_dev);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Board initialization failed rc: %x\n", rc);
+ goto error;
+ }
+skip_init:
+ eth_dev->dev_ops = &bnxt_dev_ops;
+ eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
+ eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "rx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
+ sizeof(struct rx_port_stats) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ PMD_DRV_LOG(WARNING,
+ "Memzone physical address same as virtual.\n");
+ PMD_DRV_LOG(WARNING,
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
+ if (mz_phys_addr == 0) {
+ PMD_DRV_LOG(ERR,
+ "unable to map address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ bp->rx_mem_zone = (const void *)mz;
+ bp->hw_rx_port_stats = mz->addr;
+ bp->hw_rx_port_stats_map = mz_phys_addr;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "tx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
+ sizeof(struct tx_port_stats) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name,
+ total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ PMD_DRV_LOG(WARNING,
+ "Memzone physical address same as virtual.\n");
+ PMD_DRV_LOG(WARNING,
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
+ if (mz_phys_addr == 0) {
+ PMD_DRV_LOG(ERR,
+ "unable to map address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ bp->tx_mem_zone = (const void *)mz;
+ bp->hw_tx_port_stats = mz->addr;
+ bp->hw_tx_port_stats_map = mz_phys_addr;
+
+ bp->flags |= BNXT_FLAG_PORT_STATS;
+ }
+
+ rc = bnxt_alloc_hwrm_resources(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "hwrm resource allocation failure rc: %x\n", rc);
+ goto error_free;
+ }
+ rc = bnxt_hwrm_ver_get(bp);
+ if (rc)
+ goto error_free;
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
+ goto error_free;
+ }
+
+ rc = bnxt_hwrm_func_qcfg(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
+ goto error_free;
+ }
+
+ /* Get the MAX capabilities for this function */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
+ goto error_free;
+ }
+ if (bp->max_tx_rings == 0) {
+ PMD_DRV_LOG(ERR, "No TX rings available!\n");
+ rc = -EBUSY;
+ goto error_free;
+ }
+ eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
+ ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc %u bytes needed to store MAC addr tbl",
+ ETHER_ADDR_LEN * bp->max_l2_ctx);
+ rc = -ENOMEM;
+ goto error_free;
+ }
+
+ if (bnxt_check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
+ bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
+ bp->dflt_mac_addr[2], bp->dflt_mac_addr[3],
+ bp->dflt_mac_addr[4], bp->dflt_mac_addr[5]);
+ rc = -EINVAL;
+ goto error_free;
+ }
+ /* Copy the permanent MAC from the qcap response address now. */
+ memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
+ memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
+
+ if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
+ /* 1 ring is for default completion ring */
+ PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
+ rc = -ENOSPC;
+ goto error_free;
+ }
+
+ bp->grp_info = rte_zmalloc("bnxt_grp_info",
+ sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
+ if (!bp->grp_info) {
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc %zu bytes to store group info table\n",
+ sizeof(*bp->grp_info) * bp->max_ring_grps);
+ rc = -ENOMEM;
+ goto error_free;
+ }
+
+ /* Forward all requests if firmware is new enough */
+ if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
+ (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
+ ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
+ memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
+ } else {
+ PMD_DRV_LOG(WARNING,
+ "Firmware too old for VF mailbox functionality\n");
+ memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
+ }
+
+ /*
+ * The following are used for driver cleanup. If we disallow these,
+ * VF drivers can't clean up cleanly.
+ */
+ ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
+ ALLOW_FUNC(HWRM_VNIC_FREE);
+ ALLOW_FUNC(HWRM_RING_FREE);
+ ALLOW_FUNC(HWRM_RING_GRP_FREE);
+ ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
+ ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
+ ALLOW_FUNC(HWRM_STAT_CTX_FREE);
+ ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
+ ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
+ rc = bnxt_hwrm_func_driver_register(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to register driver");
+ rc = -EBUSY;
+ goto error_free;
+ }
+
+ PMD_DRV_LOG(INFO,
+ DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
+ pci_dev->mem_resource[0].phys_addr,
+ pci_dev->mem_resource[0].addr);
+
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
+ rc = -EIO;
+ goto error_free;
+ }
+
+ if (BNXT_PF(bp)) {
+ //if (bp->pf.active_vfs) {
+ // TODO: Deallocate VF resources?
+ //}
+ if (bp->pdev->max_vfs) {
+ rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
+ goto error_free;
+ }
+ } else {
+ rc = bnxt_hwrm_allocate_pf_only(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate PF resources\n");
+ goto error_free;
+ }
+ }
+ }
+
+ bnxt_hwrm_port_led_qcaps(bp);
+
+ rc = bnxt_setup_int(bp);
+ if (rc)
+ goto error_free;
+
+ rc = bnxt_alloc_mem(bp);
+ if (rc)
+ goto error_free_int;
+
+ rc = bnxt_request_int(bp);
+ if (rc)
+ goto error_free_int;
+
+ bnxt_enable_int(bp);
+ bnxt_init_nic(bp);
+
+ return 0;
+
+error_free_int:
+ bnxt_disable_int(bp);
+ bnxt_hwrm_func_buf_unrgtr(bp);
+ bnxt_free_int(bp);
+ bnxt_free_mem(bp);
+error_free:
+ bnxt_dev_uninit(eth_dev);
+error:
+ return rc;
+}
+
+static int
+bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = eth_dev->data->dev_private;
+ int rc;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
+ bnxt_disable_int(bp);
+ bnxt_free_int(bp);
+ bnxt_free_mem(bp);
+ if (eth_dev->data->mac_addrs != NULL) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ }
+ if (bp->grp_info != NULL) {
+ rte_free(bp->grp_info);
+ bp->grp_info = NULL;
+ }
+ rc = bnxt_hwrm_func_driver_unregister(bp, 0);
+ bnxt_free_hwrm_resources(bp);
+
+ if (bp->tx_mem_zone) {
+ rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
+ bp->tx_mem_zone = NULL;
+ }
+
+ if (bp->rx_mem_zone) {
+ rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
+ bp->rx_mem_zone = NULL;
+ }
+
+ if (bp->dev_stopped == 0)
+ bnxt_dev_close_op(eth_dev);
+ if (bp->pf.vf_info)
+ rte_free(bp->pf.vf_info);
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ return rc;
+}
+
+static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
+ bnxt_dev_init);
+}
+
+static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit);
+}
+
+static struct rte_pci_driver bnxt_rte_pmd = {
+ .id_table = bnxt_pci_id_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
+ RTE_PCI_DRV_INTR_LSC,
+ .probe = bnxt_pci_probe,
+ .remove = bnxt_pci_remove,
+};
+
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
+{
+ if (strcmp(dev->device->driver->name, drv->driver.name))
+ return false;
+
+ return true;
+}
+
+bool is_bnxt_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &bnxt_rte_pmd);
+}
+
+RTE_INIT(bnxt_init_log)
+{
+ bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
+ if (bnxt_logtype_driver >= 0)
+ rte_log_set_level(bnxt_logtype_driver, RTE_LOG_INFO);
+}
+
+RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.c
new file mode 100644
index 00000000..1038941e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <sys/queue.h>
+
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "bnxt.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Filter Functions
+ */
+
+struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
+{
+ struct bnxt_filter_info *filter;
+
+ /* Find the 1st unused filter from the free_filter_list pool*/
+ filter = STAILQ_FIRST(&bp->free_filter_list);
+ if (!filter) {
+ PMD_DRV_LOG(ERR, "No more free filter resources\n");
+ return NULL;
+ }
+ STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
+
+ /* Default to L2 MAC Addr filter */
+ filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
+ memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
+ ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ return filter;
+}
+
+struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
+{
+ struct bnxt_filter_info *filter;
+
+ filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
+ if (!filter) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
+ vf);
+ return NULL;
+ }
+
+ filter->fw_l2_filter_id = UINT64_MAX;
+ STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
+ return filter;
+}
+
+void bnxt_init_filters(struct bnxt *bp)
+{
+ struct bnxt_filter_info *filter;
+ int i, max_filters;
+
+ max_filters = bp->max_l2_ctx;
+ STAILQ_INIT(&bp->free_filter_list);
+ for (i = 0; i < max_filters; i++) {
+ filter = &bp->filter_info[i];
+ filter->fw_l2_filter_id = UINT64_MAX;
+ filter->fw_em_filter_id = UINT64_MAX;
+ filter->fw_ntuple_filter_id = UINT64_MAX;
+ STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
+ }
+}
+
+void bnxt_free_all_filters(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_filter_info *filter, *temp_filter;
+ int i;
+
+ for (i = 0; i < MAX_FF_POOLS; i++) {
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ STAILQ_INSERT_TAIL(&bp->free_filter_list,
+ filter, next);
+ filter = temp_filter;
+ }
+ STAILQ_INIT(&vnic->filter);
+ }
+ }
+
+ for (i = 0; i < bp->pf.max_vfs; i++) {
+ STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
+ bnxt_hwrm_clear_l2_filter(bp, filter);
+ }
+ }
+}
+
+void bnxt_free_filter_mem(struct bnxt *bp)
+{
+ struct bnxt_filter_info *filter;
+ uint16_t max_filters, i;
+ int rc = 0;
+
+ if (bp->filter_info == NULL)
+ return;
+
+ /* Ensure that all filters are freed */
+ max_filters = bp->max_l2_ctx;
+ for (i = 0; i < max_filters; i++) {
+ filter = &bp->filter_info[i];
+ if (filter->fw_l2_filter_id != ((uint64_t)-1) &&
+ filter->filter_type == HWRM_CFA_L2_FILTER) {
+ PMD_DRV_LOG(ERR, "L2 filter is not free\n");
+ /* Call HWRM to try to free filter again */
+ rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+ if (rc)
+ PMD_DRV_LOG(ERR,
+ "Cannot free L2 filter: %d\n",
+ rc);
+ }
+ filter->fw_l2_filter_id = UINT64_MAX;
+
+ if (filter->fw_ntuple_filter_id != ((uint64_t)-1) &&
+ filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
+ PMD_DRV_LOG(ERR, "NTUPLE filter is not free\n");
+ /* Call HWRM to try to free filter again */
+ rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+ if (rc)
+ PMD_DRV_LOG(ERR,
+ "Cannot free NTUPLE filter: %d\n",
+ rc);
+ }
+ filter->fw_ntuple_filter_id = UINT64_MAX;
+ }
+ STAILQ_INIT(&bp->free_filter_list);
+
+ rte_free(bp->filter_info);
+ bp->filter_info = NULL;
+
+ for (i = 0; i < bp->pf.max_vfs; i++) {
+ STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
+ rte_free(filter);
+ STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
+ bnxt_filter_info, next);
+ }
+ }
+}
+
+int bnxt_alloc_filter_mem(struct bnxt *bp)
+{
+ struct bnxt_filter_info *filter_mem;
+ uint16_t max_filters;
+
+ max_filters = bp->max_l2_ctx;
+ /* Allocate memory for VNIC pool and filter pool */
+ filter_mem = rte_zmalloc("bnxt_filter_info",
+ max_filters * sizeof(struct bnxt_filter_info),
+ 0);
+ if (filter_mem == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
+ max_filters);
+ return -ENOMEM;
+ }
+ bp->filter_info = filter_mem;
+ return 0;
+}
+
+struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
+{
+ struct bnxt_filter_info *filter;
+
+ /* Find the 1st unused filter from the free_filter_list pool*/
+ filter = STAILQ_FIRST(&bp->free_filter_list);
+ if (!filter) {
+ PMD_DRV_LOG(ERR, "No more free filter resources\n");
+ return NULL;
+ }
+ STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
+
+ return filter;
+}
+
+void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
+{
+ STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.h
new file mode 100644
index 00000000..a1ecfb19
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_FILTER_H_
+#define _BNXT_FILTER_H_
+
+#include <rte_ether.h>
+
+struct bnxt;
+struct bnxt_filter_info {
+ STAILQ_ENTRY(bnxt_filter_info) next;
+ uint64_t fw_l2_filter_id;
+ uint64_t fw_em_filter_id;
+ uint64_t fw_ntuple_filter_id;
+#define INVALID_MAC_INDEX ((uint16_t)-1)
+ uint16_t mac_index;
+#define HWRM_CFA_L2_FILTER 0
+#define HWRM_CFA_EM_FILTER 1
+#define HWRM_CFA_NTUPLE_FILTER 2
+ uint8_t filter_type; //L2 or EM or NTUPLE filter
+ uint32_t dst_id;
+
+ /* Filter Characteristics */
+ uint32_t flags;
+ uint32_t enables;
+ uint8_t l2_addr[ETHER_ADDR_LEN];
+ uint8_t l2_addr_mask[ETHER_ADDR_LEN];
+ uint16_t l2_ovlan;
+ uint16_t l2_ovlan_mask;
+ uint16_t l2_ivlan;
+ uint16_t l2_ivlan_mask;
+ uint8_t t_l2_addr[ETHER_ADDR_LEN];
+ uint8_t t_l2_addr_mask[ETHER_ADDR_LEN];
+ uint16_t t_l2_ovlan;
+ uint16_t t_l2_ovlan_mask;
+ uint16_t t_l2_ivlan;
+ uint16_t t_l2_ivlan_mask;
+ uint8_t tunnel_type;
+ uint16_t mirror_vnic_id;
+ uint32_t vni;
+ uint8_t pri_hint;
+ uint64_t l2_filter_id_hint;
+ uint32_t src_id;
+ uint8_t src_type;
+ uint8_t src_macaddr[6];
+ uint8_t dst_macaddr[6];
+ uint32_t dst_ipaddr[4];
+ uint32_t dst_ipaddr_mask[4];
+ uint32_t src_ipaddr[4];
+ uint32_t src_ipaddr_mask[4];
+ uint16_t dst_port;
+ uint16_t dst_port_mask;
+ uint16_t src_port;
+ uint16_t src_port_mask;
+ uint16_t ip_protocol;
+ uint16_t ip_addr_type;
+ uint16_t ethertype;
+};
+
+struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp);
+struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf);
+void bnxt_init_filters(struct bnxt *bp);
+void bnxt_free_all_filters(struct bnxt *bp);
+void bnxt_free_filter_mem(struct bnxt *bp);
+int bnxt_alloc_filter_mem(struct bnxt *bp);
+struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp);
+void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
+struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp,
+ struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic);
+
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
+#define EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR
+#define EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE
+#define EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE
+#define EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK
+#define NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL
+#define EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR
+#define EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR
+#define EM_FLOW_ALLOC_INPUT_EN_SRC_PORT \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT
+#define EM_FLOW_ALLOC_INPUT_EN_DST_PORT \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT
+#define EM_FLOW_ALLOC_INPUT_EN_IP_PROTO \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL
+#define EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
+#define NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
+#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN
+#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE
+#define L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
+#define NTUPLE_FLTR_ALLOC_INPUT_IP_PROTOCOL_UDP \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP
+#define NTUPLE_FLTR_ALLOC_INPUT_IP_PROTOCOL_TCP \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP
+#define NTUPLE_FLTR_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN
+#define NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_flow.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_flow.c
new file mode 100644
index 00000000..ac765674
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_flow.c
@@ -0,0 +1,1171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "bnxt.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_vnic.h"
+#include "bnxt_util.h"
+#include "hsi_struct_def_dpdk.h"
+
+static int
+bnxt_flow_args_validate(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL,
+ "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL,
+ "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL,
+ "NULL attribute.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static const struct rte_flow_item *
+bnxt_flow_non_void_item(const struct rte_flow_item *cur)
+{
+ while (1) {
+ if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return cur;
+ cur++;
+ }
+}
+
+static const struct rte_flow_action *
+bnxt_flow_non_void_action(const struct rte_flow_action *cur)
+{
+ while (1) {
+ if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return cur;
+ cur++;
+ }
+}
+
+static int
+bnxt_filter_type_check(const struct rte_flow_item pattern[],
+ struct rte_flow_error *error __rte_unused)
+{
+ const struct rte_flow_item *item =
+ bnxt_flow_non_void_item(pattern);
+ int use_ntuple = 1;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ use_ntuple = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ use_ntuple = 0;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* FALLTHROUGH */
+ /* need ntuple match, reset exact match */
+ if (!use_ntuple) {
+ PMD_DRV_LOG(ERR,
+ "VLAN flow cannot use NTUPLE filter\n");
+ rte_flow_error_set
+ (error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Cannot use VLAN with NTUPLE");
+ return -rte_errno;
+ }
+ use_ntuple |= 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown Flow type\n");
+ use_ntuple |= 1;
+ }
+ item++;
+ }
+ return use_ntuple;
+}
+
+static int
+bnxt_validate_and_parse_flow_type(struct bnxt *bp,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct bnxt_filter_info *filter)
+{
+ const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
+ uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t tenant_id_be = 0;
+ bool vni_masked = 0;
+ bool tni_masked = 0;
+ uint32_t vf = 0;
+ int use_ntuple;
+ uint32_t en = 0;
+ uint32_t en_ethertype;
+ int dflt_vnic;
+
+ use_ntuple = bnxt_filter_type_check(pattern, error);
+ PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
+ if (use_ntuple < 0)
+ return use_ntuple;
+
+ filter->filter_type = use_ntuple ?
+ HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
+ en_ethertype = use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
+ EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (item->last) {
+ /* last or range is NOT supported as match criteria */
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "No support for range");
+ return -rte_errno;
+ }
+
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "spec/mask is NULL");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ /* Source MAC address mask cannot be partially set.
+ * Should be All 0's or all 1's.
+ * Destination MAC address mask must not be partially
+ * set. Should be all 1's or all 0's.
+ */
+ if ((!is_zero_ether_addr(&eth_mask->src) &&
+ !is_broadcast_ether_addr(&eth_mask->src)) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "MAC_addr mask not valid");
+ return -rte_errno;
+ }
+
+ /* Mask is not allowed. Only exact matches are */
+ if (eth_mask->type &&
+ eth_mask->type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ethertype mask not valid");
+ return -rte_errno;
+ }
+
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ rte_memcpy(filter->dst_macaddr,
+ &eth_spec->dst, 6);
+ en |= use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
+ EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
+ }
+
+ if (is_broadcast_ether_addr(&eth_mask->src)) {
+ rte_memcpy(filter->src_macaddr,
+ &eth_spec->src, 6);
+ en |= use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
+ EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
+ } /*
+ * else {
+ * PMD_DRV_LOG(ERR, "Handle this condition\n");
+ * }
+ */
+ if (eth_mask->type) {
+ filter->ethertype =
+ rte_be_to_cpu_16(eth_spec->type);
+ en |= en_ethertype;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ if (en & en_ethertype) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN TPID matching is not"
+ " supported");
+ return -rte_errno;
+ }
+ if (vlan_mask->tci &&
+ vlan_mask->tci == RTE_BE16(0x0fff)) {
+ /* Only the VLAN ID can be matched. */
+ filter->l2_ovlan =
+ rte_be_to_cpu_16(vlan_spec->tci &
+ RTE_BE16(0x0fff));
+ en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
+ } else {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN mask is invalid");
+ return -rte_errno;
+ }
+ if (vlan_mask->inner_type &&
+ vlan_mask->inner_type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "inner ethertype mask not"
+ " valid");
+ return -rte_errno;
+ }
+ if (vlan_mask->inner_type) {
+ filter->ethertype =
+ rte_be_to_cpu_16(vlan_spec->inner_type);
+ en |= en_ethertype;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ /* If mask is not involved, we could use EM filters. */
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+ /* Only IP DST and SRC fields are maskable. */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.next_proto_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
+ filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
+ EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
+
+ if (ipv4_mask->hdr.src_addr) {
+ filter->src_ipaddr_mask[0] =
+ ipv4_mask->hdr.src_addr;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ }
+
+ if (ipv4_mask->hdr.dst_addr) {
+ filter->dst_ipaddr_mask[0] =
+ ipv4_mask->hdr.dst_addr;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ }
+
+ filter->ip_addr_type = use_ntuple ?
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+
+ if (ipv4_spec->hdr.next_proto_id) {
+ filter->ip_protocol =
+ ipv4_spec->hdr.next_proto_id;
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+
+ /* Only IP DST and SRC fields are maskable. */
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask.");
+ return -rte_errno;
+ }
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
+ EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
+
+ rte_memcpy(filter->src_ipaddr,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->dst_ipaddr,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
+ 16)) {
+ rte_memcpy(filter->src_ipaddr_mask,
+ ipv6_mask->hdr.src_addr, 16);
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ }
+
+ if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
+ 16)) {
+ rte_memcpy(filter->dst_ipaddr_mask,
+ ipv6_mask->hdr.dst_addr, 16);
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ }
+
+ filter->ip_addr_type = use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
+ EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ /* Check TCP mask. Only DST & SRC ports are maskable */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
+ EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
+
+ if (tcp_mask->hdr.dst_port) {
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ }
+
+ if (tcp_mask->hdr.src_port) {
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ filter->src_port = udp_spec->hdr.src_port;
+ filter->dst_port = udp_spec->hdr.dst_port;
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
+ EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
+
+ if (udp_mask->hdr.dst_port) {
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ }
+
+ if (udp_mask->hdr.src_port) {
+ filter->src_port_mask = udp_mask->hdr.src_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
+ vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
+ vxlan_spec->flags != 0x8) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_spec && vxlan_mask) {
+ vni_masked =
+ !!memcmp(vxlan_mask->vni, vni_mask,
+ RTE_DIM(vni_mask));
+ if (vni_masked) {
+ rte_flow_error_set
+ (error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->vni =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter->tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre_spec = item->spec;
+ nvgre_mask = item->mask;
+ /* Check if NVGRE item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!nvgre_spec && nvgre_mask) ||
+ (nvgre_spec && !nvgre_mask)) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
+ nvgre_spec->protocol != 0x6558) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec && nvgre_mask) {
+ tni_masked =
+ !!memcmp(nvgre_mask->tni, tni_mask,
+ RTE_DIM(tni_mask));
+ if (tni_masked) {
+ rte_flow_error_set
+ (error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TNI mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ nvgre_spec->tni, 3);
+ filter->vni =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter->tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = item->spec;
+ vf = vf_spec->id;
+
+ if (!BNXT_PF(bp)) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Configuring on a VF!");
+ return -rte_errno;
+ }
+
+ if (vf >= bp->pdev->max_vfs) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Incorrect VF id!");
+ return -rte_errno;
+ }
+
+ if (!attr->transfer) {
+ rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Matching VF traffic without"
+ " affecting it (transfer attribute)"
+ " is unsupported");
+ return -rte_errno;
+ }
+
+ filter->mirror_vnic_id =
+ dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
+ if (dflt_vnic < 0) {
+ /* This simply indicates there's no driver
+ * loaded. This is not an error.
+ */
+ rte_flow_error_set
+ (error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unable to get default VNIC for VF");
+ return -rte_errno;
+ }
+
+ filter->mirror_vnic_id = dflt_vnic;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
+ break;
+ default:
+ break;
+ }
+ item++;
+ }
+ filter->enables = en;
+
+ return 0;
+}
+
+/* Parse attributes */
+static int
+bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr,
+ "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr,
+ "No support for egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr,
+ "No support for priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr,
+ "No support for group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+struct bnxt_filter_info *
+bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
+ struct bnxt_vnic_info *vnic)
+{
+ struct bnxt_filter_info *filter1, *f0;
+ struct bnxt_vnic_info *vnic0;
+ int rc;
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ f0 = STAILQ_FIRST(&vnic0->filter);
+
+ /* This flow has same DST MAC as the port/l2 filter. */
+ if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
+ return f0;
+
+ /* This flow needs DST MAC which is not same as port/l2 */
+ PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
+ filter1 = bnxt_get_unused_filter(bp);
+ if (filter1 == NULL)
+ return NULL;
+
+ filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
+ memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
+ memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
+ filter1);
+ if (rc) {
+ bnxt_free_filter(bp, filter1);
+ return NULL;
+ }
+ return filter1;
+}
+
+static int
+bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error,
+ struct bnxt_filter_info *filter)
+{
+ const struct rte_flow_action *act =
+ bnxt_flow_non_void_action(actions);
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_vf *act_vf;
+ struct bnxt_vnic_info *vnic, *vnic0;
+ struct bnxt_filter_info *filter1;
+ uint32_t vf = 0;
+ int dflt_vnic;
+ int rc;
+
+ if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Cannot create flow on RSS queues");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ rc =
+ bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
+ if (rc != 0)
+ goto ret;
+
+ rc = bnxt_flow_parse_attr(attr, error);
+ if (rc != 0)
+ goto ret;
+
+ /* Since we support ingress attribute only - right now. */
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
+
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ /* Allow this flow. Redirect to a VNIC. */
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ if (act_q->index >= bp->rx_nr_rings) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Invalid queue ID.");
+ rc = -rte_errno;
+ goto ret;
+ }
+ PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
+ if (vnic == NULL) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "No matching VNIC for queue ID.");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ filter->dst_id = vnic->fw_vnic_id;
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ PMD_DRV_LOG(DEBUG, "VNIC found\n");
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ filter->flags =
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
+ else
+ filter->flags =
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VF:
+ act_vf = (const struct rte_flow_action_vf *)act->conf;
+ vf = act_vf->id;
+
+ if (!BNXT_PF(bp)) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Configuring on a VF!");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ if (vf >= bp->pdev->max_vfs) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Incorrect VF id!");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ filter->mirror_vnic_id =
+ dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
+ if (dflt_vnic < 0) {
+ /* This simply indicates there's no driver loaded.
+ * This is not an error.
+ */
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Unable to get default VNIC for VF");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ filter->mirror_vnic_id = dflt_vnic;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Invalid action.");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ if (filter1) {
+ bnxt_free_filter(bp, filter1);
+ filter1->fw_l2_filter_id = -1;
+ }
+
+ act = bnxt_flow_non_void_action(++act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Invalid action.");
+ rc = -rte_errno;
+ goto ret;
+ }
+ret:
+ return rc;
+}
+
+static int
+bnxt_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_filter_info *filter;
+ int ret = 0;
+
+ ret = bnxt_flow_args_validate(attr, pattern, actions, error);
+ if (ret != 0)
+ return ret;
+
+ filter = bnxt_get_unused_filter(bp);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
+ return -ENOMEM;
+ }
+
+ ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
+ error, filter);
+ /* No need to hold on to this filter if we are just validating flow */
+ filter->fw_l2_filter_id = UINT64_MAX;
+ bnxt_free_filter(bp, filter);
+
+ return ret;
+}
+
+static int
+bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
+{
+ struct bnxt_filter_info *mf;
+ struct rte_flow *flow;
+ int i;
+
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ mf = flow->filter;
+
+ if (mf->filter_type == nf->filter_type &&
+ mf->flags == nf->flags &&
+ mf->src_port == nf->src_port &&
+ mf->src_port_mask == nf->src_port_mask &&
+ mf->dst_port == nf->dst_port &&
+ mf->dst_port_mask == nf->dst_port_mask &&
+ mf->ip_protocol == nf->ip_protocol &&
+ mf->ip_addr_type == nf->ip_addr_type &&
+ mf->ethertype == nf->ethertype &&
+ mf->vni == nf->vni &&
+ mf->tunnel_type == nf->tunnel_type &&
+ mf->l2_ovlan == nf->l2_ovlan &&
+ mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
+ mf->l2_ivlan == nf->l2_ivlan &&
+ mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
+ !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
+ !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->src_macaddr, nf->src_macaddr,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->dst_macaddr, nf->dst_macaddr,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->src_ipaddr, nf->src_ipaddr,
+ sizeof(nf->src_ipaddr)) &&
+ !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
+ sizeof(nf->src_ipaddr_mask)) &&
+ !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
+ sizeof(nf->dst_ipaddr)) &&
+ !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
+ sizeof(nf->dst_ipaddr_mask))) {
+ if (mf->dst_id == nf->dst_id)
+ return -EEXIST;
+ /*
+ * Same Flow, Different queue
+ * Clear the old ntuple filter
+ * Reuse the matching L2 filter
+ * ID for the new filter
+ */
+ nf->fw_l2_filter_id = mf->fw_l2_filter_id;
+ if (nf->filter_type == HWRM_CFA_EM_FILTER)
+ bnxt_hwrm_clear_em_filter(bp, mf);
+ if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ bnxt_hwrm_clear_ntuple_filter(bp, mf);
+ /* Free the old filter, update flow
+ * with new filter
+ */
+ bnxt_free_filter(bp, mf);
+ flow->filter = nf;
+ return -EXDEV;
+ }
+ }
+ }
+ return 0;
+}
+
+static struct rte_flow *
+bnxt_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_filter_info *filter;
+ struct bnxt_vnic_info *vnic = NULL;
+ bool update_flow = false;
+ struct rte_flow *flow;
+ unsigned int i;
+ int ret = 0;
+
+ flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ret = bnxt_flow_args_validate(attr, pattern, actions, error);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Not a validate flow.\n");
+ goto free_flow;
+ }
+
+ filter = bnxt_get_unused_filter(bp);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
+ goto free_flow;
+ }
+
+ ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
+ error, filter);
+ if (ret != 0)
+ goto free_filter;
+
+ ret = bnxt_match_filter(bp, filter);
+ if (ret == -EEXIST) {
+ PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
+ /* Clear the filter that was created as part of
+ * validate_and_parse_flow() above
+ */
+ bnxt_hwrm_clear_l2_filter(bp, filter);
+ goto free_filter;
+ } else if (ret == -EXDEV) {
+ PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
+ PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
+ update_flow = true;
+ }
+
+ if (filter->filter_type == HWRM_CFA_EM_FILTER) {
+ filter->enables |=
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
+ }
+
+ if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
+ filter->enables |=
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
+ }
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ if (filter->dst_id == vnic->fw_vnic_id)
+ break;
+ }
+
+ if (!ret) {
+ flow->filter = filter;
+ flow->vnic = vnic;
+ if (update_flow) {
+ ret = -EXDEV;
+ goto free_flow;
+ }
+ PMD_DRV_LOG(ERR, "Successfully created flow.\n");
+ STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
+ return flow;
+ }
+free_filter:
+ bnxt_free_filter(bp, filter);
+free_flow:
+ if (ret == -EEXIST)
+ rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Matching Flow exists.");
+ else if (ret == -EXDEV)
+ rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Flow with pattern exists, updating destination queue");
+ else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ flow = NULL;
+ return flow;
+}
+
+static int
+bnxt_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_filter_info *filter = flow->filter;
+ struct bnxt_vnic_info *vnic = flow->vnic;
+ int ret = 0;
+
+ ret = bnxt_match_filter(bp, filter);
+ if (ret == 0)
+ PMD_DRV_LOG(ERR, "Could not find matching flow\n");
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ ret = bnxt_hwrm_clear_em_filter(bp, filter);
+ if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+ else
+ ret = bnxt_hwrm_clear_l2_filter(bp, filter);
+ if (!ret) {
+ STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
+ rte_free(flow);
+ } else {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+ }
+
+ return ret;
+}
+
+static int
+bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+ struct rte_flow *flow;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ struct bnxt_filter_info *filter = flow->filter;
+
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ ret = bnxt_hwrm_clear_em_filter(bp, filter);
+ if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+
+ if (ret) {
+ rte_flow_error_set
+ (error,
+ -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Failed to flush flow in HW.");
+ return -rte_errno;
+ }
+
+ STAILQ_REMOVE(&vnic->flow_list, flow,
+ rte_flow, next);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
+
+const struct rte_flow_ops bnxt_flow_ops = {
+ .validate = bnxt_flow_validate,
+ .create = bnxt_flow_create,
+ .destroy = bnxt_flow_destroy,
+ .flush = bnxt_flow_flush,
+};
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.c
new file mode 100644
index 00000000..c682488a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.c
@@ -0,0 +1,3947 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <unistd.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_version.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
+#include "bnxt_ring.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+#include <rte_io.h>
+
+#define HWRM_CMD_TIMEOUT 10000
+#define HWRM_SPEC_CODE_1_8_3 0x10803
+#define HWRM_VERSION_1_9_1 0x10901
+
+struct bnxt_plcmodes_cfg {
+ uint32_t flags;
+ uint16_t jumbo_thresh;
+ uint16_t hds_offset;
+ uint16_t hds_threshold;
+};
+
+static int page_getenum(size_t size)
+{
+ if (size <= 1 << 4)
+ return 4;
+ if (size <= 1 << 12)
+ return 12;
+ if (size <= 1 << 13)
+ return 13;
+ if (size <= 1 << 16)
+ return 16;
+ if (size <= 1 << 21)
+ return 21;
+ if (size <= 1 << 22)
+ return 22;
+ if (size <= 1 << 30)
+ return 30;
+ PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
+ return sizeof(void *) * 8 - 1;
+}
+
+static int page_roundup(size_t size)
+{
+ return 1 << page_getenum(size);
+}
+
+/*
+ * HWRM Functions (sent to HWRM)
+ * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
+ * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
+ * command was failed by the ChiMP.
+ */
+
+static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
+ uint32_t msg_len)
+{
+ unsigned int i;
+ struct input *req = msg;
+ struct output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t *data = msg;
+ uint8_t *bar;
+ uint8_t *valid;
+ uint16_t max_req_len = bp->max_req_len;
+ struct hwrm_short_input short_input = { 0 };
+
+ if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
+
+ memset(short_cmd_req, 0, bp->max_req_len);
+ memcpy(short_cmd_req, req, msg_len);
+
+ short_input.req_type = rte_cpu_to_le_16(req->req_type);
+ short_input.signature = rte_cpu_to_le_16(
+ HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
+ short_input.size = rte_cpu_to_le_16(msg_len);
+ short_input.req_addr =
+ rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
+
+ data = (uint32_t *)&short_input;
+ msg_len = sizeof(short_input);
+
+ /* Sync memory write before updating doorbell */
+ rte_wmb();
+
+ max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
+ }
+
+ /* Write request msg to hwrm channel */
+ for (i = 0; i < msg_len; i += 4) {
+ bar = (uint8_t *)bp->bar0 + i;
+ rte_write32(*data, bar);
+ data++;
+ }
+
+ /* Zero the rest of the request space */
+ for (; i < max_req_len; i += 4) {
+ bar = (uint8_t *)bp->bar0 + i;
+ rte_write32(0, bar);
+ }
+
+ /* Ring channel doorbell */
+ bar = (uint8_t *)bp->bar0 + 0x100;
+ rte_write32(1, bar);
+
+ /* Poll for the valid bit */
+ for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
+ /* Sanity check on the resp->resp_len */
+ rte_rmb();
+ if (resp->resp_len && resp->resp_len <=
+ bp->max_resp_len) {
+ /* Last byte of resp contains the valid key */
+ valid = (uint8_t *)resp + resp->resp_len - 1;
+ if (*valid == HWRM_RESP_VALID_KEY)
+ break;
+ }
+ rte_delay_us(600);
+ }
+
+ if (i >= HWRM_CMD_TIMEOUT) {
+ PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
+ req->req_type);
+ goto err_ret;
+ }
+ return 0;
+
+err_ret:
+ return -1;
+}
+
+/*
+ * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
+ * spinlock, and does initial processing.
+ *
+ * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
+ * releases the spinlock only if it returns. If the regular int return codes
+ * are not used by the function, HWRM_CHECK_RESULT() should not be used
+ * directly, rather it should be copied and modified to suit the function.
+ *
+ * HWRM_UNLOCK() must be called after all response processing is completed.
+ */
+#define HWRM_PREP(req, type) do { \
+ rte_spinlock_lock(&bp->hwrm_lock); \
+ memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
+ req.req_type = rte_cpu_to_le_16(HWRM_##type); \
+ req.cmpl_ring = rte_cpu_to_le_16(-1); \
+ req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
+ req.target_id = rte_cpu_to_le_16(0xffff); \
+ req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
+} while (0)
+
+#define HWRM_CHECK_RESULT_SILENT() do {\
+ if (rc) { \
+ rte_spinlock_unlock(&bp->hwrm_lock); \
+ return rc; \
+ } \
+ if (resp->error_code) { \
+ rc = rte_le_to_cpu_16(resp->error_code); \
+ rte_spinlock_unlock(&bp->hwrm_lock); \
+ return rc; \
+ } \
+} while (0)
+
+#define HWRM_CHECK_RESULT() do {\
+ if (rc) { \
+ PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
+ rte_spinlock_unlock(&bp->hwrm_lock); \
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+ rc = -EACCES; \
+ else if (rc > 0) \
+ rc = -EINVAL; \
+ return rc; \
+ } \
+ if (resp->error_code) { \
+ rc = rte_le_to_cpu_16(resp->error_code); \
+ if (resp->resp_len >= 16) { \
+ struct hwrm_err_output *tmp_hwrm_err_op = \
+ (void *)resp; \
+ PMD_DRV_LOG(ERR, \
+ "error %d:%d:%08x:%04x\n", \
+ rc, tmp_hwrm_err_op->cmd_err, \
+ rte_le_to_cpu_32(\
+ tmp_hwrm_err_op->opaque_0), \
+ rte_le_to_cpu_16(\
+ tmp_hwrm_err_op->opaque_1)); \
+ } else { \
+ PMD_DRV_LOG(ERR, "error %d\n", rc); \
+ } \
+ rte_spinlock_unlock(&bp->hwrm_lock); \
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+ rc = -EACCES; \
+ else if (rc > 0) \
+ rc = -EINVAL; \
+ return rc; \
+ } \
+} while (0)
+
+#define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
+
+int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
+ struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, CFA_L2_SET_RX_MASK);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.mask = 0;
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ uint16_t vlan_count,
+ struct bnxt_vlan_table_entry *vlan_table)
+{
+ int rc = 0;
+ struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
+ struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t mask = 0;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ return rc;
+
+ HWRM_PREP(req, CFA_L2_SET_RX_MASK);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+ /* FIXME add multicast flag, when multicast adding options is supported
+ * by ethtool.
+ */
+ if (vnic->flags & BNXT_VNIC_INFO_BCAST)
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
+ if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
+ if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
+ if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+ if (vnic->flags & BNXT_VNIC_INFO_MCAST)
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
+ if (vnic->mc_addr_cnt) {
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
+ req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
+ req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
+ }
+ if (vlan_table) {
+ if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
+ req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
+ rte_mem_virt2iova(vlan_table));
+ req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
+ }
+ req.mask = rte_cpu_to_le_32(mask);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
+ uint16_t vlan_count,
+ struct bnxt_vlan_antispoof_table_entry *vlan_table)
+{
+ int rc = 0;
+ struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
+ struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ /*
+ * Older HWRM versions did not support this command, and the set_rx_mask
+ * list was used for anti-spoof. In 1.8.0, the TX path configuration was
+ * removed from set_rx_mask call, and this command was added.
+ *
+ * This command is also present from 1.7.8.11 and higher,
+ * as well as 1.7.8.0
+ */
+ if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
+ if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
+ if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
+ (11)))
+ return 0;
+ }
+ }
+ HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
+ req.fid = rte_cpu_to_le_16(fid);
+
+ req.vlan_tag_mask_tbl_addr =
+ rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
+ req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
+ struct bnxt_filter_info *filter)
+{
+ int rc = 0;
+ struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
+ struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (filter->fw_l2_filter_id == UINT64_MAX)
+ return 0;
+
+ HWRM_PREP(req, CFA_L2_FILTER_FREE);
+
+ req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ filter->fw_l2_filter_id = UINT64_MAX;
+
+ return 0;
+}
+
+int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
+ uint16_t dst_id,
+ struct bnxt_filter_info *filter)
+{
+ int rc = 0;
+ struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
+ struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ const struct rte_eth_vmdq_rx_conf *conf =
+ &dev_conf->rx_adv_conf.vmdq_rx_conf;
+ uint32_t enables = 0;
+ uint16_t j = dst_id - 1;
+
+ //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
+ if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+ conf->pool_map[j].pools & (1UL << j)) {
+ PMD_DRV_LOG(DEBUG,
+ "Add vlan %u to vmdq pool %u\n",
+ conf->pool_map[j].vlan_id, j);
+
+ filter->l2_ivlan = conf->pool_map[j].vlan_id;
+ filter->enables |=
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
+ }
+
+ if (filter->fw_l2_filter_id != UINT64_MAX)
+ bnxt_hwrm_clear_l2_filter(bp, filter);
+
+ HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
+
+ req.flags = rte_cpu_to_le_32(filter->flags);
+
+ enables = filter->enables |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
+ req.dst_id = rte_cpu_to_le_16(dst_id);
+
+ if (enables &
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
+ memcpy(req.l2_addr, filter->l2_addr,
+ ETHER_ADDR_LEN);
+ if (enables &
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
+ memcpy(req.l2_addr_mask, filter->l2_addr_mask,
+ ETHER_ADDR_LEN);
+ if (enables &
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
+ req.l2_ovlan = filter->l2_ovlan;
+ if (enables &
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
+ req.l2_ivlan = filter->l2_ivlan;
+ if (enables &
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
+ req.l2_ovlan_mask = filter->l2_ovlan_mask;
+ if (enables &
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
+ req.l2_ivlan_mask = filter->l2_ivlan_mask;
+ if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
+ req.src_id = rte_cpu_to_le_32(filter->src_id);
+ if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
+ req.src_type = filter->src_type;
+
+ req.enables = rte_cpu_to_le_32(enables);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
+{
+ struct hwrm_port_mac_cfg_input req = {.req_type = 0};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t flags = 0;
+ int rc;
+
+ if (!ptp)
+ return 0;
+
+ HWRM_PREP(req, PORT_MAC_CFG);
+
+ if (ptp->rx_filter)
+ flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+ else
+ flags |=
+ HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+ if (ptp->tx_tstamp_en)
+ flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
+ else
+ flags |=
+ HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
+ req.flags = rte_cpu_to_le_32(flags);
+ req.enables = rte_cpu_to_le_32
+ (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+ req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
+ struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+/* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
+ if (ptp)
+ return 0;
+
+ HWRM_PREP(req, PORT_MAC_PTP_QCFG);
+
+ req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
+ return 0;
+
+ ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
+ if (!ptp)
+ return -ENOMEM;
+
+ ptp->rx_regs[BNXT_PTP_RX_TS_L] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
+ ptp->rx_regs[BNXT_PTP_RX_TS_H] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
+ ptp->rx_regs[BNXT_PTP_RX_SEQ] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
+ ptp->rx_regs[BNXT_PTP_RX_FIFO] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
+ ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
+ ptp->tx_regs[BNXT_PTP_TX_TS_L] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
+ ptp->tx_regs[BNXT_PTP_TX_TS_H] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
+ ptp->tx_regs[BNXT_PTP_TX_SEQ] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
+ ptp->tx_regs[BNXT_PTP_TX_FIFO] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
+
+ ptp->bp = bp;
+ bp->ptp_cfg = ptp;
+
+ return 0;
+}
+
+static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_qcaps_input req = {.req_type = 0 };
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t new_max_vfs;
+ uint32_t flags;
+ int i;
+
+ HWRM_PREP(req, FUNC_QCAPS);
+
+ req.fid = rte_cpu_to_le_16(0xffff);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+ flags = rte_le_to_cpu_32(resp->flags);
+ if (BNXT_PF(bp)) {
+ bp->pf.port_id = resp->port_id;
+ bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+ bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
+ new_max_vfs = bp->pdev->max_vfs;
+ if (new_max_vfs != bp->pf.max_vfs) {
+ if (bp->pf.vf_info)
+ rte_free(bp->pf.vf_info);
+ bp->pf.vf_info = rte_malloc("bnxt_vf_info",
+ sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
+ bp->pf.max_vfs = new_max_vfs;
+ for (i = 0; i < new_max_vfs; i++) {
+ bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
+ bp->pf.vf_info[i].vlan_table =
+ rte_zmalloc("VF VLAN table",
+ getpagesize(),
+ getpagesize());
+ if (bp->pf.vf_info[i].vlan_table == NULL)
+ PMD_DRV_LOG(ERR,
+ "Fail to alloc VLAN table for VF %d\n",
+ i);
+ else
+ rte_mem_lock_page(
+ bp->pf.vf_info[i].vlan_table);
+ bp->pf.vf_info[i].vlan_as_table =
+ rte_zmalloc("VF VLAN AS table",
+ getpagesize(),
+ getpagesize());
+ if (bp->pf.vf_info[i].vlan_as_table == NULL)
+ PMD_DRV_LOG(ERR,
+ "Alloc VLAN AS table for VF %d fail\n",
+ i);
+ else
+ rte_mem_lock_page(
+ bp->pf.vf_info[i].vlan_as_table);
+ STAILQ_INIT(&bp->pf.vf_info[i].filter);
+ }
+ }
+ }
+
+ bp->fw_fid = rte_le_to_cpu_32(resp->fid);
+ memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
+ bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+ bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+ bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+ bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ /* TODO: For now, do not support VMDq/RFS on VFs. */
+ if (BNXT_PF(bp)) {
+ if (bp->pf.max_vfs)
+ bp->max_vnics = 1;
+ else
+ bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ } else {
+ bp->max_vnics = 1;
+ }
+ bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
+ if (BNXT_PF(bp)) {
+ bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
+ bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
+ PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
+ HWRM_UNLOCK();
+ bnxt_hwrm_ptp_qcfg(bp);
+ }
+ }
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc;
+
+ rc = __bnxt_hwrm_func_qcaps(bp);
+ if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
+ rc = bnxt_hwrm_func_resc_qcaps(bp);
+ if (!rc)
+ bp->flags |= BNXT_FLAG_NEW_RM;
+ }
+
+ return rc;
+}
+
+int bnxt_hwrm_func_reset(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_reset_input req = {.req_type = 0 };
+ struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_RESET);
+
+ req.enables = rte_cpu_to_le_32(0);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_func_driver_register(struct bnxt *bp)
+{
+ int rc;
+ struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
+ struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (bp->flags & BNXT_FLAG_REGISTERED)
+ return 0;
+
+ HWRM_PREP(req, FUNC_DRV_RGTR);
+ req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
+ HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
+ req.ver_maj = RTE_VER_YEAR;
+ req.ver_min = RTE_VER_MONTH;
+ req.ver_upd = RTE_VER_MINOR;
+
+ if (BNXT_PF(bp)) {
+ req.enables |= rte_cpu_to_le_32(
+ HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
+ memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
+ RTE_MIN(sizeof(req.vf_req_fwd),
+ sizeof(bp->pf.vf_req_fwd)));
+
+ /*
+ * PF can sniff HWRM API issued by VF. This can be set up by
+ * linux driver and inherited by the DPDK PF driver. Clear
+ * this HWRM sniffer list in FW because DPDK PF driver does
+ * not support this.
+ */
+ req.flags =
+ rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
+ }
+
+ req.async_event_fwd[0] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
+ ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
+ ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
+ req.async_event_fwd[1] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
+ ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ bp->flags |= BNXT_FLAG_REGISTERED;
+
+ return rc;
+}
+
+int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
+{
+ if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
+ return 0;
+
+ return bnxt_hwrm_func_reserve_vf_resc(bp, true);
+}
+
+int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
+{
+ int rc;
+ uint32_t flags = 0;
+ uint32_t enables;
+ struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_vf_cfg_input req = {0};
+
+ HWRM_PREP(req, FUNC_VF_CFG);
+
+ req.enables = rte_cpu_to_le_32
+ (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
+
+ req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
+ req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
+ AGG_RING_MULTIPLIER);
+ req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
+ req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
+ bp->tx_nr_rings);
+ req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
+ req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
+ if (bp->vf_resv_strategy ==
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
+ enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
+ req.enables |= rte_cpu_to_le_32(enables);
+ req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
+ req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
+ req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
+ }
+
+ if (test)
+ flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
+
+ req.flags = rte_cpu_to_le_32(flags);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ if (test)
+ HWRM_CHECK_RESULT_SILENT();
+ else
+ HWRM_CHECK_RESULT();
+
+ HWRM_UNLOCK();
+ return rc;
+}
+
+int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
+{
+ int rc;
+ struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_resource_qcaps_input req = {0};
+
+ HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
+ req.fid = rte_cpu_to_le_16(0xffff);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ if (BNXT_VF(bp)) {
+ bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+ bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+ bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+ bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+ bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
+ }
+ bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
+ if (bp->vf_resv_strategy >
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
+ bp->vf_resv_strategy =
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
+
+ HWRM_UNLOCK();
+ return rc;
+}
+
+int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_ver_get_input req = {.req_type = 0 };
+ struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t my_version;
+ uint32_t fw_version;
+ uint16_t max_resp_len;
+ char type[RTE_MEMZONE_NAMESIZE];
+ uint32_t dev_caps_cfg;
+
+ bp->max_req_len = HWRM_MAX_REQ_LEN;
+ HWRM_PREP(req, VER_GET);
+
+ req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req.hwrm_intf_min = HWRM_VERSION_MINOR;
+ req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
+ resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
+ bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
+ (resp->hwrm_fw_min_8b << 16) |
+ (resp->hwrm_fw_bld_8b << 8) |
+ resp->hwrm_fw_rsvd_8b;
+ PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
+ HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
+
+ my_version = HWRM_VERSION_MAJOR << 16;
+ my_version |= HWRM_VERSION_MINOR << 8;
+ my_version |= HWRM_VERSION_UPDATE;
+
+ fw_version = resp->hwrm_intf_maj_8b << 16;
+ fw_version |= resp->hwrm_intf_min_8b << 8;
+ fw_version |= resp->hwrm_intf_upd_8b;
+ bp->hwrm_spec_code = fw_version;
+
+ if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
+ PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (my_version != fw_version) {
+ PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
+ if (my_version < fw_version) {
+ PMD_DRV_LOG(INFO,
+ "Firmware API version is newer than driver.\n");
+ PMD_DRV_LOG(INFO,
+ "The driver may be missing features.\n");
+ } else {
+ PMD_DRV_LOG(INFO,
+ "Firmware API version is older than driver.\n");
+ PMD_DRV_LOG(INFO,
+ "Not all driver features may be functional.\n");
+ }
+ }
+
+ if (bp->max_req_len > resp->max_req_win_len) {
+ PMD_DRV_LOG(ERR, "Unsupported request length\n");
+ rc = -EINVAL;
+ }
+ bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
+ max_resp_len = resp->max_resp_len;
+ dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
+
+ if (bp->max_resp_len != max_resp_len) {
+ sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
+ bp->pdev->addr.domain, bp->pdev->addr.bus,
+ bp->pdev->addr.devid, bp->pdev->addr.function);
+
+ rte_free(bp->hwrm_cmd_resp_addr);
+
+ bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
+ if (bp->hwrm_cmd_resp_addr == NULL) {
+ rc = -ENOMEM;
+ goto error;
+ }
+ rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
+ bp->hwrm_cmd_resp_dma_addr =
+ rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
+ if (bp->hwrm_cmd_resp_dma_addr == 0) {
+ PMD_DRV_LOG(ERR,
+ "Unable to map response buffer to physical memory.\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ bp->max_resp_len = max_resp_len;
+ }
+
+ if ((dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
+ PMD_DRV_LOG(DEBUG, "Short command supported\n");
+
+ rte_free(bp->hwrm_short_cmd_req_addr);
+
+ bp->hwrm_short_cmd_req_addr = rte_malloc(type,
+ bp->max_req_len, 0);
+ if (bp->hwrm_short_cmd_req_addr == NULL) {
+ rc = -ENOMEM;
+ goto error;
+ }
+ rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
+ bp->hwrm_short_cmd_req_dma_addr =
+ rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
+ if (bp->hwrm_short_cmd_req_dma_addr == 0) {
+ rte_free(bp->hwrm_short_cmd_req_addr);
+ PMD_DRV_LOG(ERR,
+ "Unable to map buffer to physical memory.\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ bp->flags |= BNXT_FLAG_SHORT_CMD;
+ }
+
+error:
+ HWRM_UNLOCK();
+ return rc;
+}
+
+int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
+{
+ int rc;
+ struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
+ struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (!(bp->flags & BNXT_FLAG_REGISTERED))
+ return 0;
+
+ HWRM_PREP(req, FUNC_DRV_UNRGTR);
+ req.flags = flags;
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ bp->flags &= ~BNXT_FLAG_REGISTERED;
+
+ return rc;
+}
+
+static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
+{
+ int rc = 0;
+ struct hwrm_port_phy_cfg_input req = {0};
+ struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t enables = 0;
+
+ HWRM_PREP(req, PORT_PHY_CFG);
+
+ if (conf->link_up) {
+ /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
+ if (bp->link_info.auto_mode && conf->link_speed) {
+ req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
+ PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
+ }
+
+ req.flags = rte_cpu_to_le_32(conf->phy_flags);
+ req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
+ /*
+ * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
+ * any auto mode, even "none".
+ */
+ if (!conf->link_speed) {
+ /* No speeds specified. Enable AutoNeg - all speeds */
+ req.auto_mode =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
+ }
+ /* AutoNeg - Advertise speeds specified. */
+ if (conf->auto_link_speed_mask &&
+ !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
+ req.auto_mode =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
+ req.auto_link_speed_mask =
+ conf->auto_link_speed_mask;
+ enables |=
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
+ }
+
+ req.auto_duplex = conf->duplex;
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
+ req.auto_pause = conf->auto_pause;
+ req.force_pause = conf->force_pause;
+ /* Set force_pause if there is no auto or if there is a force */
+ if (req.auto_pause && !req.force_pause)
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
+ else
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
+
+ req.enables = rte_cpu_to_le_32(enables);
+ } else {
+ req.flags =
+ rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
+ PMD_DRV_LOG(INFO, "Force Link Down\n");
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
+ struct bnxt_link_info *link_info)
+{
+ int rc = 0;
+ struct hwrm_port_phy_qcfg_input req = {0};
+ struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, PORT_PHY_QCFG);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ link_info->phy_link_status = resp->link;
+ link_info->link_up =
+ (link_info->phy_link_status ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
+ link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
+ link_info->duplex = resp->duplex_cfg;
+ link_info->pause = resp->pause;
+ link_info->auto_pause = resp->auto_pause;
+ link_info->force_pause = resp->force_pause;
+ link_info->auto_mode = resp->auto_mode;
+ link_info->phy_type = resp->phy_type;
+ link_info->media_type = resp->media_type;
+
+ link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
+ link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
+ link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
+ link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
+ link_info->phy_ver[0] = resp->phy_maj;
+ link_info->phy_ver[1] = resp->phy_min;
+ link_info->phy_ver[2] = resp->phy_bld;
+
+ HWRM_UNLOCK();
+
+ PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
+ PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
+ PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
+ PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
+ PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
+ link_info->auto_link_speed_mask);
+ PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
+ link_info->force_link_speed);
+
+ return rc;
+}
+
+int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
+ struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int i;
+
+ HWRM_PREP(req, QUEUE_QPORTCFG);
+
+ req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
+ /* HWRM Version >= 1.9.1 */
+ if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
+ req.drv_qmap_cap =
+ HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+#define GET_QUEUE_INFO(x) \
+ bp->cos_queue[x].id = resp->queue_id##x; \
+ bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
+
+ GET_QUEUE_INFO(0);
+ GET_QUEUE_INFO(1);
+ GET_QUEUE_INFO(2);
+ GET_QUEUE_INFO(3);
+ GET_QUEUE_INFO(4);
+ GET_QUEUE_INFO(5);
+ GET_QUEUE_INFO(6);
+ GET_QUEUE_INFO(7);
+
+ HWRM_UNLOCK();
+
+ if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
+ bp->tx_cosq_id = bp->cos_queue[0].id;
+ } else {
+ /* iterate and find the COSq profile to use for Tx */
+ for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
+ if (bp->cos_queue[i].profile ==
+ HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
+ bp->tx_cosq_id = bp->cos_queue[i].id;
+ break;
+ }
+ }
+ }
+ PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
+
+ return rc;
+}
+
+int bnxt_hwrm_ring_alloc(struct bnxt *bp,
+ struct bnxt_ring *ring,
+ uint32_t ring_type, uint32_t map_index,
+ uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
+{
+ int rc = 0;
+ uint32_t enables = 0;
+ struct hwrm_ring_alloc_input req = {.req_type = 0 };
+ struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, RING_ALLOC);
+
+ req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
+ req.fbo = rte_cpu_to_le_32(0);
+ /* Association of ring index with doorbell index */
+ req.logical_id = rte_cpu_to_le_16(map_index);
+ req.length = rte_cpu_to_le_32(ring->ring_size);
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
+ req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
+ /* FALLTHROUGH */
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
+ req.ring_type = ring_type;
+ req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
+ req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
+ if (stats_ctx_id != INVALID_STATS_CTX_ID)
+ enables |=
+ HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
+ break;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
+ req.ring_type = ring_type;
+ /*
+ * TODO: Some HWRM versions crash with
+ * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
+ */
+ req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
+ ring_type);
+ HWRM_UNLOCK();
+ return -1;
+ }
+ req.enables = rte_cpu_to_le_32(enables);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ if (rc || resp->error_code) {
+ if (rc == 0 && resp->error_code)
+ rc = rte_le_to_cpu_16(resp->error_code);
+ switch (ring_type) {
+ case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
+ PMD_DRV_LOG(ERR,
+ "hwrm_ring_alloc cp failed. rc:%d\n", rc);
+ HWRM_UNLOCK();
+ return rc;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
+ PMD_DRV_LOG(ERR,
+ "hwrm_ring_alloc rx failed. rc:%d\n", rc);
+ HWRM_UNLOCK();
+ return rc;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
+ PMD_DRV_LOG(ERR,
+ "hwrm_ring_alloc tx failed. rc:%d\n", rc);
+ HWRM_UNLOCK();
+ return rc;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
+ HWRM_UNLOCK();
+ return rc;
+ }
+ }
+
+ ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
+ HWRM_UNLOCK();
+ return rc;
+}
+
+int bnxt_hwrm_ring_free(struct bnxt *bp,
+ struct bnxt_ring *ring, uint32_t ring_type)
+{
+ int rc;
+ struct hwrm_ring_free_input req = {.req_type = 0 };
+ struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, RING_FREE);
+
+ req.ring_type = ring_type;
+ req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ if (rc || resp->error_code) {
+ if (rc == 0 && resp->error_code)
+ rc = rte_le_to_cpu_16(resp->error_code);
+ HWRM_UNLOCK();
+
+ switch (ring_type) {
+ case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
+ PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
+ rc);
+ return rc;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
+ PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
+ rc);
+ return rc;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
+ PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
+ rc);
+ return rc;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
+ return rc;
+ }
+ }
+ HWRM_UNLOCK();
+ return 0;
+}
+
+int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
+{
+ int rc = 0;
+ struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
+ struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, RING_GRP_ALLOC);
+
+ req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
+ req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
+ req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
+ req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ bp->grp_info[idx].fw_grp_id =
+ rte_le_to_cpu_16(resp->ring_group_id);
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
+{
+ int rc;
+ struct hwrm_ring_grp_free_input req = {.req_type = 0 };
+ struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, RING_GRP_FREE);
+
+ req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
+ return rc;
+}
+
+int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ int rc = 0;
+ struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
+ struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
+ return rc;
+
+ HWRM_PREP(req, STAT_CTX_CLR_STATS);
+
+ req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ unsigned int idx __rte_unused)
+{
+ int rc;
+ struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
+ struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, STAT_CTX_ALLOC);
+
+ req.update_period_ms = rte_cpu_to_le_32(0);
+
+ req.stats_dma_addr =
+ rte_cpu_to_le_64(cpr->hw_stats_map);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ unsigned int idx __rte_unused)
+{
+ int rc;
+ struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
+ struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, STAT_CTX_FREE);
+
+ req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0, i, j;
+ struct hwrm_vnic_alloc_input req = { 0 };
+ struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ /* map ring groups to this vnic */
+ PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
+ vnic->start_grp_id, vnic->end_grp_id);
+ for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
+ vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
+
+ vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
+ vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE;
+ HWRM_PREP(req, VNIC_ALLOC);
+
+ if (vnic->func_default)
+ req.flags =
+ rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
+ HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ struct bnxt_plcmodes_cfg *pmode)
+{
+ int rc = 0;
+ struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_PLCMODES_QCFG);
+
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ pmode->flags = rte_le_to_cpu_32(resp->flags);
+ /* dflt_vnic bit doesn't exist in the _cfg command */
+ pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
+ pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
+ pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
+ pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ struct bnxt_plcmodes_cfg *pmode)
+{
+ int rc = 0;
+ struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_PLCMODES_CFG);
+
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.flags = rte_cpu_to_le_32(pmode->flags);
+ req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
+ req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
+ req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
+ );
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t ctx_enable_flag = 0;
+ struct bnxt_plcmodes_cfg pmodes;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+ }
+
+ rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
+ if (rc)
+ return rc;
+
+ HWRM_PREP(req, VNIC_CFG);
+
+ /* Only RSS support for now TBD: COS & LB */
+ req.enables =
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
+ if (vnic->lb_rule != 0xffff)
+ ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
+ if (vnic->cos_rule != 0xffff)
+ ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
+ if (vnic->rss_rule != 0xffff) {
+ ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
+ ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+ }
+ req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
+ req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
+ req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
+ req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
+ req.mru = rte_cpu_to_le_16(vnic->mru);
+ if (vnic->func_default)
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
+ if (vnic->vlan_strip)
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
+ if (vnic->bd_stall)
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
+ if (vnic->roce_dual)
+ req.flags |= rte_cpu_to_le_32(
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
+ if (vnic->roce_only)
+ req.flags |= rte_cpu_to_le_32(
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
+ if (vnic->rss_dflt_cr)
+ req.flags |= rte_cpu_to_le_32(
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int16_t fw_vf_id)
+{
+ int rc = 0;
+ struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
+ return rc;
+ }
+ HWRM_PREP(req, VNIC_QCFG);
+
+ req.enables =
+ rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.vf_id = rte_cpu_to_le_16(fw_vf_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
+ vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
+ vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
+ vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
+ vnic->mru = rte_le_to_cpu_16(resp->mru);
+ vnic->func_default = rte_le_to_cpu_32(
+ resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
+ vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
+ vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
+ vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
+ vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
+ vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+ HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
+ struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ if (vnic->rss_rule == 0xffff) {
+ PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ return rc;
+ }
+ HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
+
+ req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ vnic->rss_rule = INVALID_HW_RING_ID;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_free_input req = {.req_type = 0 };
+ struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
+ return rc;
+ }
+
+ HWRM_PREP(req, VNIC_FREE);
+
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ return rc;
+}
+
+int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_RSS_CFG);
+
+ req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+ req.hash_mode_flags = vnic->hash_mode;
+
+ req.ring_grp_tbl_addr =
+ rte_cpu_to_le_64(vnic->rss_table_dma_addr);
+ req.hash_key_tbl_addr =
+ rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
+ req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t size;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+ }
+
+ HWRM_PREP(req, VNIC_PLCMODES_CFG);
+
+ req.flags = rte_cpu_to_le_32(
+ HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
+
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
+
+ size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+ size -= RTE_PKTMBUF_HEADROOM;
+
+ req.jumbo_thresh = rte_cpu_to_le_16(size);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool enable)
+{
+ int rc = 0;
+ struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_TPA_CFG);
+
+ if (enable) {
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
+ req.flags = rte_cpu_to_le_32(
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
+ req.max_agg_segs = rte_cpu_to_le_16(5);
+ req.max_aggs =
+ rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
+ req.min_agg_len = rte_cpu_to_le_32(512);
+ }
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+
+ HWRM_PREP(req, FUNC_CFG);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ bp->pf.vf_info[vf].random_mac = false;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
+ uint64_t *dropped)
+{
+ int rc = 0;
+ struct hwrm_func_qstats_input req = {.req_type = 0};
+ struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_QSTATS);
+
+ req.fid = rte_cpu_to_le_16(fid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ if (dropped)
+ *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
+ struct rte_eth_stats *stats)
+{
+ int rc = 0;
+ struct hwrm_func_qstats_input req = {.req_type = 0};
+ struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_QSTATS);
+
+ req.fid = rte_cpu_to_le_16(fid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+ stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
+ stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
+ stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+ stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
+ stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+
+ stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+ stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
+ stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
+ stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+ stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
+ stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+
+ stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
+ stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
+ stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
+{
+ int rc = 0;
+ struct hwrm_func_clr_stats_input req = {.req_type = 0};
+ struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_CLR_STATS);
+
+ req.fid = rte_cpu_to_le_16(fid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+/*
+ * HWRM utility functions
+ */
+
+int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq;
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+
+ if (i >= bp->rx_cp_nr_rings) {
+ txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
+ cpr = txq->cp_ring;
+ } else {
+ rxq = bp->rx_queues[i];
+ cpr = rxq->cp_ring;
+ }
+
+ rc = bnxt_hwrm_stat_clear(bp, cpr);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
+{
+ int rc;
+ unsigned int i;
+ struct bnxt_cp_ring_info *cpr;
+
+ for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+
+ if (i >= bp->rx_cp_nr_rings) {
+ cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
+ } else {
+ cpr = bp->rx_queues[i]->cp_ring;
+ bp->grp_info[i].fw_stats_ctx = -1;
+ }
+ if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
+ rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
+ cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+ if (rc)
+ return rc;
+ }
+ }
+ return 0;
+}
+
+int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq;
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+
+ if (i >= bp->rx_cp_nr_rings) {
+ txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
+ cpr = txq->cp_ring;
+ } else {
+ rxq = bp->rx_queues[i];
+ cpr = rxq->cp_ring;
+ }
+
+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
+
+ if (rc)
+ return rc;
+ }
+ return rc;
+}
+
+int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
+{
+ uint16_t idx;
+ uint32_t rc = 0;
+
+ for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
+
+ if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
+ continue;
+
+ rc = bnxt_hwrm_ring_grp_free(bp, idx);
+
+ if (rc)
+ return rc;
+ }
+ return rc;
+}
+
+static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+
+ bnxt_hwrm_ring_free(bp, cp_ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
+ cp_ring->fw_ring_id = INVALID_HW_RING_ID;
+ memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
+ sizeof(*cpr->cp_desc_ring));
+ cpr->cp_raw_cons = 0;
+}
+
+void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
+{
+ struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->rx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
+ memset(rxr->rx_desc_ring, 0,
+ rxr->rx_ring_struct->ring_size *
+ sizeof(*rxr->rx_desc_ring));
+ memset(rxr->rx_buf_ring, 0,
+ rxr->rx_ring_struct->ring_size *
+ sizeof(*rxr->rx_buf_ring));
+ rxr->rx_prod = 0;
+ }
+ ring = rxr->ag_ring_struct;
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ memset(rxr->ag_buf_ring, 0,
+ rxr->ag_ring_struct->ring_size *
+ sizeof(*rxr->ag_buf_ring));
+ rxr->ag_prod = 0;
+ bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
+ }
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ bnxt_free_cp_ring(bp, cpr);
+
+ bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+int bnxt_free_all_hwrm_rings(struct bnxt *bp)
+{
+ unsigned int i;
+
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_TX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ memset(txr->tx_desc_ring, 0,
+ txr->tx_ring_struct->ring_size *
+ sizeof(*txr->tx_desc_ring));
+ memset(txr->tx_buf_ring, 0,
+ txr->tx_ring_struct->ring_size *
+ sizeof(*txr->tx_buf_ring));
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+ }
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_free_cp_ring(bp, cpr);
+ cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++)
+ bnxt_free_hwrm_rx_ring(bp, i);
+
+ return 0;
+}
+
+int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
+{
+ uint16_t i;
+ uint32_t rc = 0;
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ rc = bnxt_hwrm_ring_grp_alloc(bp, i);
+ if (rc)
+ return rc;
+ }
+ return rc;
+}
+
+void bnxt_free_hwrm_resources(struct bnxt *bp)
+{
+ /* Release memzone */
+ rte_free(bp->hwrm_cmd_resp_addr);
+ rte_free(bp->hwrm_short_cmd_req_addr);
+ bp->hwrm_cmd_resp_addr = NULL;
+ bp->hwrm_short_cmd_req_addr = NULL;
+ bp->hwrm_cmd_resp_dma_addr = 0;
+ bp->hwrm_short_cmd_req_dma_addr = 0;
+}
+
+int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+{
+ struct rte_pci_device *pdev = bp->pdev;
+ char type[RTE_MEMZONE_NAMESIZE];
+
+ sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+ bp->max_resp_len = HWRM_MAX_RESP_LEN;
+ bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
+ rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
+ if (bp->hwrm_cmd_resp_addr == NULL)
+ return -ENOMEM;
+ bp->hwrm_cmd_resp_dma_addr =
+ rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
+ if (bp->hwrm_cmd_resp_dma_addr == 0) {
+ PMD_DRV_LOG(ERR,
+ "unable to map response address to physical memory\n");
+ return -ENOMEM;
+ }
+ rte_spinlock_init(&bp->hwrm_lock);
+
+ return 0;
+}
+
+int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ struct bnxt_filter_info *filter;
+ int rc = 0;
+
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ rc = bnxt_hwrm_clear_em_filter(bp, filter);
+ else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+ else
+ rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+ STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
+ //if (rc)
+ //break;
+ }
+ return rc;
+}
+
+static int
+bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ struct bnxt_filter_info *filter;
+ struct rte_flow *flow;
+ int rc = 0;
+
+ STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ filter = flow->filter;
+ PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ rc = bnxt_hwrm_clear_em_filter(bp, filter);
+ else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+ else
+ rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+
+ STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
+ rte_free(flow);
+ //if (rc)
+ //break;
+ }
+ return rc;
+}
+
+int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ struct bnxt_filter_info *filter;
+ int rc = 0;
+
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
+ filter);
+ else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
+ filter);
+ else
+ rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
+ filter);
+ if (rc)
+ break;
+ }
+ return rc;
+}
+
+void bnxt_free_tunnel_ports(struct bnxt *bp)
+{
+ if (bp->vxlan_port_cnt)
+ bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
+ bp->vxlan_port = 0;
+ if (bp->geneve_port_cnt)
+ bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
+ bp->geneve_port = 0;
+}
+
+void bnxt_free_all_hwrm_resources(struct bnxt *bp)
+{
+ int i;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ /*
+ * Cleanup VNICs in reverse order, to make sure the L2 filter
+ * from vnic0 is last to be cleaned up.
+ */
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ bnxt_clear_hwrm_vnic_flows(bp, vnic);
+
+ bnxt_clear_hwrm_vnic_filters(bp, vnic);
+
+ bnxt_hwrm_vnic_ctx_free(bp, vnic);
+
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
+
+ bnxt_hwrm_vnic_free(bp, vnic);
+
+ rte_free(vnic->fw_grp_ids);
+ }
+ /* Ring resources */
+ bnxt_free_all_hwrm_rings(bp);
+ bnxt_free_all_hwrm_ring_grps(bp);
+ bnxt_free_all_hwrm_stat_ctxs(bp);
+ bnxt_free_tunnel_ports(bp);
+}
+
+static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
+{
+ uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
+
+ if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+ return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
+
+ switch (conf_link_speed) {
+ case ETH_LINK_SPEED_10M_HD:
+ case ETH_LINK_SPEED_100M_HD:
+ /* FALLTHROUGH */
+ return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
+ }
+ return hw_link_duplex;
+}
+
+static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
+{
+ return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
+}
+
+static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
+{
+ uint16_t eth_link_speed = 0;
+
+ if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
+ return ETH_LINK_SPEED_AUTONEG;
+
+ switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
+ case ETH_LINK_SPEED_100M:
+ case ETH_LINK_SPEED_100M_HD:
+ /* FALLTHROUGH */
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
+ break;
+ case ETH_LINK_SPEED_1G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
+ break;
+ case ETH_LINK_SPEED_2_5G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
+ break;
+ case ETH_LINK_SPEED_10G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
+ break;
+ case ETH_LINK_SPEED_20G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
+ break;
+ case ETH_LINK_SPEED_25G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
+ break;
+ case ETH_LINK_SPEED_40G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
+ break;
+ case ETH_LINK_SPEED_50G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
+ break;
+ case ETH_LINK_SPEED_100G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
+ break;
+ default:
+ PMD_DRV_LOG(ERR,
+ "Unsupported link speed %d; default to AUTO\n",
+ conf_link_speed);
+ break;
+ }
+ return eth_link_speed;
+}
+
+#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
+ ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
+ ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
+
+static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
+{
+ uint32_t one_speed;
+
+ if (link_speed == ETH_LINK_SPEED_AUTONEG)
+ return 0;
+
+ if (link_speed & ETH_LINK_SPEED_FIXED) {
+ one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
+
+ if (one_speed & (one_speed - 1)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid advertised speeds (%u) for port %u\n",
+ link_speed, port_id);
+ return -EINVAL;
+ }
+ if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
+ PMD_DRV_LOG(ERR,
+ "Unsupported advertised speed (%u) for port %u\n",
+ link_speed, port_id);
+ return -EINVAL;
+ }
+ } else {
+ if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
+ PMD_DRV_LOG(ERR,
+ "Unsupported advertised speeds (%u) for port %u\n",
+ link_speed, port_id);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static uint16_t
+bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
+{
+ uint16_t ret = 0;
+
+ if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+ if (bp->link_info.support_speeds)
+ return bp->link_info.support_speeds;
+ link_speed = BNXT_SUPPORTED_SPEEDS;
+ }
+
+ if (link_speed & ETH_LINK_SPEED_100M)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
+ if (link_speed & ETH_LINK_SPEED_100M_HD)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
+ if (link_speed & ETH_LINK_SPEED_1G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
+ if (link_speed & ETH_LINK_SPEED_2_5G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
+ if (link_speed & ETH_LINK_SPEED_10G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
+ if (link_speed & ETH_LINK_SPEED_20G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
+ if (link_speed & ETH_LINK_SPEED_25G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
+ if (link_speed & ETH_LINK_SPEED_40G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
+ if (link_speed & ETH_LINK_SPEED_50G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
+ if (link_speed & ETH_LINK_SPEED_100G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
+ return ret;
+}
+
+static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
+{
+ uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
+
+ switch (hw_link_speed) {
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
+ eth_link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
+ eth_link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
+ eth_link_speed = ETH_SPEED_NUM_2_5G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
+ eth_link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
+ eth_link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
+ eth_link_speed = ETH_SPEED_NUM_25G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
+ eth_link_speed = ETH_SPEED_NUM_40G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
+ eth_link_speed = ETH_SPEED_NUM_50G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
+ eth_link_speed = ETH_SPEED_NUM_100G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
+ default:
+ PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
+ hw_link_speed);
+ break;
+ }
+ return eth_link_speed;
+}
+
+static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
+{
+ uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ switch (hw_link_duplex) {
+ case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
+ case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
+ /* FALLTHROUGH */
+ eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
+ eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
+ hw_link_duplex);
+ break;
+ }
+ return eth_link_duplex;
+}
+
+int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
+{
+ int rc = 0;
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Get link config failed with rc %d\n", rc);
+ goto exit;
+ }
+ if (link_info->link_speed)
+ link->link_speed =
+ bnxt_parse_hw_link_speed(link_info->link_speed);
+ else
+ link->link_speed = ETH_SPEED_NUM_NONE;
+ link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
+ link->link_status = link_info->link_up;
+ link->link_autoneg = link_info->auto_mode ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
+ ETH_LINK_FIXED : ETH_LINK_AUTONEG;
+exit:
+ return rc;
+}
+
+int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
+{
+ int rc = 0;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_link_info link_req;
+ uint16_t speed, autoneg;
+
+ if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
+ return 0;
+
+ rc = bnxt_valid_link_speed(dev_conf->link_speeds,
+ bp->eth_dev->data->port_id);
+ if (rc)
+ goto error;
+
+ memset(&link_req, 0, sizeof(link_req));
+ link_req.link_up = link_up;
+ if (!link_up)
+ goto port_phy_cfg;
+
+ autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
+ speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
+ link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
+ /* Autoneg can be done only when the FW allows */
+ if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
+ bp->link_info.force_link_speed)) {
+ link_req.phy_flags |=
+ HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
+ link_req.auto_link_speed_mask =
+ bnxt_parse_eth_link_speed_mask(bp,
+ dev_conf->link_speeds);
+ } else {
+ if (bp->link_info.phy_type ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
+ bp->link_info.phy_type ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
+ bp->link_info.media_type ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
+ PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
+ return -EINVAL;
+ }
+
+ link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
+ /* If user wants a particular speed try that first. */
+ if (speed)
+ link_req.link_speed = speed;
+ else if (bp->link_info.force_link_speed)
+ link_req.link_speed = bp->link_info.force_link_speed;
+ else
+ link_req.link_speed = bp->link_info.auto_link_speed;
+ }
+ link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
+ link_req.auto_pause = bp->link_info.auto_pause;
+ link_req.force_pause = bp->link_info.force_pause;
+
+port_phy_cfg:
+ rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Set link config failed with rc %d\n", rc);
+ }
+
+error:
+ return rc;
+}
+
+/* JIRA 22088 */
+int bnxt_hwrm_func_qcfg(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t flags;
+ int rc = 0;
+
+ HWRM_PREP(req, FUNC_QCFG);
+ req.fid = rte_cpu_to_le_16(0xffff);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ /* Hard Coded.. 0xfff VLAN ID mask */
+ bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
+ flags = rte_le_to_cpu_16(resp->flags);
+ if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
+ bp->flags |= BNXT_FLAG_MULTI_HOST;
+
+ switch (resp->port_partition_type) {
+ case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
+ case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
+ case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
+ /* FALLTHROUGH */
+ bp->port_partition_type = resp->port_partition_type;
+ break;
+ default:
+ bp->port_partition_type = 0;
+ break;
+ }
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
+ struct hwrm_func_qcaps_output *qcaps)
+{
+ qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
+ memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
+ sizeof(qcaps->mac_address));
+ qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
+ qcaps->max_rx_rings = fcfg->num_rx_rings;
+ qcaps->max_tx_rings = fcfg->num_tx_rings;
+ qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
+ qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
+ qcaps->max_vfs = 0;
+ qcaps->first_vf_id = 0;
+ qcaps->max_vnics = fcfg->num_vnics;
+ qcaps->max_decap_records = 0;
+ qcaps->max_encap_records = 0;
+ qcaps->max_tx_wm_flows = 0;
+ qcaps->max_tx_em_flows = 0;
+ qcaps->max_rx_wm_flows = 0;
+ qcaps->max_rx_em_flows = 0;
+ qcaps->max_flow_id = 0;
+ qcaps->max_mcast_filters = fcfg->num_mcast_filters;
+ qcaps->max_sp_tx_rings = 0;
+ qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
+}
+
+static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+ req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
+ req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
+ req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS);
+ req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
+ req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
+ req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
+ req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
+ req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
+ req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
+ req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
+ req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
+ req.fid = rte_cpu_to_le_16(0xffff);
+
+ HWRM_PREP(req, FUNC_CFG);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+static void populate_vf_func_cfg_req(struct bnxt *bp,
+ struct hwrm_func_cfg_input *req,
+ int num_vfs)
+{
+ req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+
+ req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS);
+ req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS);
+ req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
+ (num_vfs + 1));
+ req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
+ req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
+ (num_vfs + 1));
+ req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
+ req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
+ req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
+ /* TODO: For now, do not support VMDq/RFS on VFs. */
+ req->num_vnics = rte_cpu_to_le_16(1);
+ req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
+ (num_vfs + 1));
+}
+
+static void add_random_mac_if_needed(struct bnxt *bp,
+ struct hwrm_func_cfg_input *cfg_req,
+ int vf)
+{
+ struct ether_addr mac;
+
+ if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
+ return;
+
+ if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
+ cfg_req->enables |=
+ rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+ eth_random_addr(cfg_req->dflt_mac_addr);
+ bp->pf.vf_info[vf].random_mac = true;
+ } else {
+ memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
+ }
+}
+
+static void reserve_resources_from_vf(struct bnxt *bp,
+ struct hwrm_func_cfg_input *cfg_req,
+ int vf)
+{
+ struct hwrm_func_qcaps_input req = {0};
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* Get the actual allocated values now */
+ HWRM_PREP(req, FUNC_QCAPS);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
+ copy_func_cfg_to_qcaps(cfg_req, resp);
+ } else if (resp->error_code) {
+ rc = rte_le_to_cpu_16(resp->error_code);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
+ copy_func_cfg_to_qcaps(cfg_req, resp);
+ }
+
+ bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
+ bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
+ bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
+ bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
+ bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
+ bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
+ /*
+ * TODO: While not supporting VMDq with VFs, max_vnics is always
+ * forced to 1 in this case
+ */
+ //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
+ bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
+
+ HWRM_UNLOCK();
+}
+
+int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* Check for zero MAC address */
+ HWRM_PREP(req, FUNC_QCFG);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
+ return -1;
+ } else if (resp->error_code) {
+ rc = rte_le_to_cpu_16(resp->error_code);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
+ return -1;
+ }
+ rc = rte_le_to_cpu_16(resp->vlan);
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+static int update_pf_resource_max(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* And copy the allocated numbers into the pf struct */
+ HWRM_PREP(req, FUNC_QCFG);
+ req.fid = rte_cpu_to_le_16(0xffff);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT();
+
+ /* Only TX ring value reflects actual allocation? TODO */
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+ bp->pf.evb_mode = resp->evb_mode;
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
+{
+ int rc;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
+ return -1;
+ }
+
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc)
+ return rc;
+
+ bp->pf.func_cfg_flags &=
+ ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
+ bp->pf.func_cfg_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
+ rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+ return rc;
+}
+
+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int i;
+ size_t sz;
+ int rc = 0;
+ size_t req_buf_sz;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
+ return -1;
+ }
+
+ rc = bnxt_hwrm_func_qcaps(bp);
+
+ if (rc)
+ return rc;
+
+ bp->pf.active_vfs = num_vfs;
+
+ /*
+ * First, configure the PF to only use one TX ring. This ensures that
+ * there are enough rings for all VFs.
+ *
+ * If we don't do this, when we call func_alloc() later, we will lock
+ * extra rings to the PF that won't be available during func_cfg() of
+ * the VFs.
+ *
+ * This has been fixed with firmware versions above 20.6.54
+ */
+ bp->pf.func_cfg_flags &=
+ ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
+ bp->pf.func_cfg_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
+ rc = bnxt_hwrm_pf_func_cfg(bp, 1);
+ if (rc)
+ return rc;
+
+ /*
+ * Now, create and register a buffer to hold forwarded VF requests
+ */
+ req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
+ bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
+ page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
+ if (bp->pf.vf_req_buf == NULL) {
+ rc = -ENOMEM;
+ goto error_free;
+ }
+ for (sz = 0; sz < req_buf_sz; sz += getpagesize())
+ rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
+ for (i = 0; i < num_vfs; i++)
+ bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
+ (i * HWRM_MAX_REQ_LEN);
+
+ rc = bnxt_hwrm_func_buf_rgtr(bp);
+ if (rc)
+ goto error_free;
+
+ populate_vf_func_cfg_req(bp, &req, num_vfs);
+
+ bp->pf.active_vfs = 0;
+ for (i = 0; i < num_vfs; i++) {
+ add_random_mac_if_needed(bp, &req, i);
+
+ HWRM_PREP(req, FUNC_CFG);
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ /* Clear enable flag for next pass */
+ req.enables &= ~rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+
+ if (rc || resp->error_code) {
+ PMD_DRV_LOG(ERR,
+ "Failed to initizlie VF %d\n", i);
+ PMD_DRV_LOG(ERR,
+ "Not all VFs available. (%d, %d)\n",
+ rc, resp->error_code);
+ HWRM_UNLOCK();
+ break;
+ }
+
+ HWRM_UNLOCK();
+
+ reserve_resources_from_vf(bp, &req, i);
+ bp->pf.active_vfs++;
+ bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
+ }
+
+ /*
+ * Now configure the PF to use "the rest" of the resources
+ * We're using STD_TX_RING_MODE here though which will limit the TX
+ * rings. This will allow QoS to function properly. Not setting this
+ * will cause PF rings to break bandwidth settings.
+ */
+ rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+ if (rc)
+ goto error_free;
+
+ rc = update_pf_resource_max(bp);
+ if (rc)
+ goto error_free;
+
+ return rc;
+
+error_free:
+ bnxt_hwrm_func_buf_unrgtr(bp);
+ return rc;
+}
+
+int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG);
+
+ req.fid = rte_cpu_to_le_16(0xffff);
+ req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
+ req.evb_mode = bp->pf.evb_mode;
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
+ uint8_t tunnel_type)
+{
+ struct hwrm_tunnel_dst_port_alloc_input req = {0};
+ struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
+ req.tunnel_type = tunnel_type;
+ req.tunnel_dst_port_val = port;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT();
+
+ switch (tunnel_type) {
+ case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
+ bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+ bp->vxlan_port = port;
+ break;
+ case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
+ bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
+ bp->geneve_port = port;
+ break;
+ default:
+ break;
+ }
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
+ uint8_t tunnel_type)
+{
+ struct hwrm_tunnel_dst_port_free_input req = {0};
+ struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
+
+ req.tunnel_type = tunnel_type;
+ req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
+ uint32_t flags)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG);
+
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.flags = rte_cpu_to_le_32(flags);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
+{
+ uint32_t *flag = flagp;
+
+ vnic->flags = *flag;
+}
+
+int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
+ struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_BUF_RGTR);
+
+ req.req_buf_num_pages = rte_cpu_to_le_16(1);
+ req.req_buf_page_size = rte_cpu_to_le_16(
+ page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
+ req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
+ req.req_buf_page_addr0 =
+ rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
+ if (req.req_buf_page_addr0 == 0) {
+ PMD_DRV_LOG(ERR,
+ "unable to map buffer address to physical memory\n");
+ return -ENOMEM;
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
+ struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_BUF_UNRGTR);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG);
+
+ req.fid = rte_cpu_to_le_16(0xffff);
+ req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
+ req.async_event_cr = rte_cpu_to_le_16(
+ bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
+{
+ struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_vf_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_VF_CFG);
+
+ req.enables = rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
+ req.async_event_cr = rte_cpu_to_le_16(
+ bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t dflt_vlan, fid;
+ uint32_t func_cfg_flags;
+ int rc = 0;
+
+ HWRM_PREP(req, FUNC_CFG);
+
+ if (is_vf) {
+ dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
+ fid = bp->pf.vf_info[vf].fid;
+ func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
+ } else {
+ fid = rte_cpu_to_le_16(0xffff);
+ func_cfg_flags = bp->pf.func_cfg_flags;
+ dflt_vlan = bp->vlan;
+ }
+
+ req.flags = rte_cpu_to_le_32(func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(fid);
+ req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
+ req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
+ uint16_t max_bw, uint16_t enables)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG);
+
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.enables |= rte_cpu_to_le_32(enables);
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.max_bw = rte_cpu_to_le_32(max_bw);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, FUNC_CFG);
+
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
+ req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
+{
+ int rc;
+
+ if (BNXT_PF(bp))
+ rc = bnxt_hwrm_func_cfg_def_cp(bp);
+ else
+ rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
+
+ return rc;
+}
+
+int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
+ void *encaped, size_t ec_size)
+{
+ int rc = 0;
+ struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
+ struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (ec_size > sizeof(req.encap_request))
+ return -1;
+
+ HWRM_PREP(req, REJECT_FWD_RESP);
+
+ req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
+ memcpy(req.encap_request, encaped, ec_size);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
+ struct ether_addr *mac)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ HWRM_PREP(req, FUNC_QCFG);
+
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
+ void *encaped, size_t ec_size)
+{
+ int rc = 0;
+ struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
+ struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (ec_size > sizeof(req.encap_request))
+ return -1;
+
+ HWRM_PREP(req, EXEC_FWD_RESP);
+
+ req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
+ memcpy(req.encap_request, encaped, ec_size);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
+ struct rte_eth_stats *stats, uint8_t rx)
+{
+ int rc = 0;
+ struct hwrm_stat_ctx_query_input req = {.req_type = 0};
+ struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, STAT_CTX_QUERY);
+
+ req.stat_ctx_id = rte_cpu_to_le_32(cid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ if (rx) {
+ stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+ stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
+ stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
+ stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+ stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
+ stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+ stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
+ stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
+ } else {
+ stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+ stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
+ stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
+ stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+ stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
+ stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+ stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
+ }
+
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_port_qstats(struct bnxt *bp)
+{
+ struct hwrm_port_qstats_input req = {0};
+ struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
+
+ HWRM_PREP(req, PORT_QSTATS);
+
+ req.port_id = rte_cpu_to_le_16(pf->port_id);
+ req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
+ req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
+{
+ struct hwrm_port_clr_stats_input req = {0};
+ struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
+
+ /* Not allowed on NS2 device, NPAR, MultiHost, VF */
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
+ BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
+ return 0;
+
+ HWRM_PREP(req, PORT_CLR_STATS);
+
+ req.port_id = rte_cpu_to_le_16(pf->port_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_led_qcaps_input req = {0};
+ int rc;
+
+ if (BNXT_VF(bp))
+ return 0;
+
+ HWRM_PREP(req, PORT_LED_QCAPS);
+ req.port_id = bp->pf.port_id;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
+ unsigned int i;
+
+ bp->num_leds = resp->num_leds;
+ memcpy(bp->leds, &resp->led0_id,
+ sizeof(bp->leds[0]) * bp->num_leds);
+ for (i = 0; i < bp->num_leds; i++) {
+ struct bnxt_led_info *led = &bp->leds[i];
+
+ uint16_t caps = led->led_state_caps;
+
+ if (!led->led_group_id ||
+ !BNXT_LED_ALT_BLINK_CAP(caps)) {
+ bp->num_leds = 0;
+ break;
+ }
+ }
+ }
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
+{
+ struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_led_cfg_input req = {0};
+ struct bnxt_led_cfg *led_cfg;
+ uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
+ uint16_t duration = 0;
+ int rc, i;
+
+ if (!bp->num_leds || BNXT_VF(bp))
+ return -EOPNOTSUPP;
+
+ HWRM_PREP(req, PORT_LED_CFG);
+
+ if (led_on) {
+ led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
+ duration = rte_cpu_to_le_16(500);
+ }
+ req.port_id = bp->pf.port_id;
+ req.num_leds = bp->num_leds;
+ led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
+ for (i = 0; i < bp->num_leds; i++, led_cfg++) {
+ req.enables |= BNXT_LED_DFLT_ENABLES(i);
+ led_cfg->led_id = bp->leds[i].led_id;
+ led_cfg->led_state = led_state;
+ led_cfg->led_blink_on = duration;
+ led_cfg->led_blink_off = duration;
+ led_cfg->led_group_id = bp->leds[i].led_group_id;
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
+ uint32_t *length)
+{
+ int rc;
+ struct hwrm_nvm_get_dir_info_input req = {0};
+ struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, NVM_GET_DIR_INFO);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ if (!rc) {
+ *entries = rte_le_to_cpu_32(resp->entries);
+ *length = rte_le_to_cpu_32(resp->entry_length);
+ }
+ return rc;
+}
+
+int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
+{
+ int rc;
+ uint32_t dir_entries;
+ uint32_t entry_length;
+ uint8_t *buf;
+ size_t buflen;
+ rte_iova_t dma_handle;
+ struct hwrm_nvm_get_dir_entries_input req = {0};
+ struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
+
+ rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
+ if (rc != 0)
+ return rc;
+
+ *data++ = dir_entries;
+ *data++ = entry_length;
+ len -= 2;
+ memset(data, 0xff, len);
+
+ buflen = dir_entries * entry_length;
+ buf = rte_malloc("nvm_dir", buflen, 0);
+ rte_mem_lock_page(buf);
+ if (buf == NULL)
+ return -ENOMEM;
+ dma_handle = rte_mem_virt2iova(buf);
+ if (dma_handle == 0) {
+ PMD_DRV_LOG(ERR,
+ "unable to map response address to physical memory\n");
+ return -ENOMEM;
+ }
+ HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
+ req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ if (rc == 0)
+ memcpy(data, buf, len > buflen ? buflen : len);
+
+ rte_free(buf);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
+ uint32_t offset, uint32_t length,
+ uint8_t *data)
+{
+ int rc;
+ uint8_t *buf;
+ rte_iova_t dma_handle;
+ struct hwrm_nvm_read_input req = {0};
+ struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
+
+ buf = rte_malloc("nvm_item", length, 0);
+ rte_mem_lock_page(buf);
+ if (!buf)
+ return -ENOMEM;
+
+ dma_handle = rte_mem_virt2iova(buf);
+ if (dma_handle == 0) {
+ PMD_DRV_LOG(ERR,
+ "unable to map response address to physical memory\n");
+ return -ENOMEM;
+ }
+ HWRM_PREP(req, NVM_READ);
+ req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
+ req.dir_idx = rte_cpu_to_le_16(index);
+ req.offset = rte_cpu_to_le_32(offset);
+ req.len = rte_cpu_to_le_32(length);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ if (rc == 0)
+ memcpy(data, buf, length);
+
+ rte_free(buf);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
+{
+ int rc;
+ struct hwrm_nvm_erase_dir_entry_input req = {0};
+ struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
+ req.dir_idx = rte_cpu_to_le_16(index);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+
+int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
+ uint16_t dir_ordinal, uint16_t dir_ext,
+ uint16_t dir_attr, const uint8_t *data,
+ size_t data_len)
+{
+ int rc;
+ struct hwrm_nvm_write_input req = {0};
+ struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
+ rte_iova_t dma_handle;
+ uint8_t *buf;
+
+ buf = rte_malloc("nvm_write", data_len, 0);
+ rte_mem_lock_page(buf);
+ if (!buf)
+ return -ENOMEM;
+
+ dma_handle = rte_mem_virt2iova(buf);
+ if (dma_handle == 0) {
+ PMD_DRV_LOG(ERR,
+ "unable to map response address to physical memory\n");
+ return -ENOMEM;
+ }
+ memcpy(buf, data, data_len);
+
+ HWRM_PREP(req, NVM_WRITE);
+
+ req.dir_type = rte_cpu_to_le_16(dir_type);
+ req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
+ req.dir_ext = rte_cpu_to_le_16(dir_ext);
+ req.dir_attr = rte_cpu_to_le_16(dir_attr);
+ req.dir_data_length = rte_cpu_to_le_32(data_len);
+ req.host_src_addr = rte_cpu_to_le_64(dma_handle);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ rte_free(buf);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+static void
+bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
+{
+ uint32_t *count = cbdata;
+
+ *count = *count + 1;
+}
+
+static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
+ struct bnxt_vnic_info *vnic __rte_unused)
+{
+ return 0;
+}
+
+int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
+{
+ uint32_t count = 0;
+
+ bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
+ &count, bnxt_vnic_count_hwrm_stub);
+
+ return count;
+}
+
+static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
+ uint16_t *vnic_ids)
+{
+ struct hwrm_func_vf_vnic_ids_query_input req = {0};
+ struct hwrm_func_vf_vnic_ids_query_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* First query all VNIC ids */
+ HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
+
+ req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
+ req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
+ req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
+
+ if (req.vnic_id_tbl_addr == 0) {
+ HWRM_UNLOCK();
+ PMD_DRV_LOG(ERR,
+ "unable to map VNIC ID table address to physical memory\n");
+ return -ENOMEM;
+ }
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ if (rc) {
+ HWRM_UNLOCK();
+ PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
+ return -1;
+ } else if (resp->error_code) {
+ rc = rte_le_to_cpu_16(resp->error_code);
+ HWRM_UNLOCK();
+ PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
+ return -1;
+ }
+ rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+/*
+ * This function queries the VNIC IDs for a specified VF. It then calls
+ * the vnic_cb to update the necessary field in vnic_info with cbdata.
+ * Then it calls the hwrm_cb function to program this new vnic configuration.
+ */
+int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
+ void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
+ int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
+{
+ struct bnxt_vnic_info vnic;
+ int rc = 0;
+ int i, num_vnic_ids;
+ uint16_t *vnic_ids;
+ size_t vnic_id_sz;
+ size_t sz;
+
+ /* First query all VNIC ids */
+ vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+ vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
+ RTE_CACHE_LINE_SIZE);
+ if (vnic_ids == NULL) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
+ rte_mem_lock_page(((char *)vnic_ids) + sz);
+
+ num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
+
+ if (num_vnic_ids < 0)
+ return num_vnic_ids;
+
+ /* Retrieve VNIC, update bd_stall then update */
+
+ for (i = 0; i < num_vnic_ids; i++) {
+ memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
+ vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
+ rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
+ if (rc)
+ break;
+ if (vnic.mru <= 4) /* Indicates unallocated */
+ continue;
+
+ vnic_cb(&vnic, cbdata);
+
+ rc = hwrm_cb(bp, &vnic);
+ if (rc)
+ break;
+ }
+
+ rte_free(vnic_ids);
+
+ return rc;
+}
+
+int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
+ bool on)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG);
+
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.enables |= rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
+ req.vlan_antispoof_mode = on ?
+ HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
+ HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
+{
+ struct bnxt_vnic_info vnic;
+ uint16_t *vnic_ids;
+ size_t vnic_id_sz;
+ int num_vnic_ids, i;
+ size_t sz;
+ int rc;
+
+ vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+ vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
+ RTE_CACHE_LINE_SIZE);
+ if (vnic_ids == NULL) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
+ rte_mem_lock_page(((char *)vnic_ids) + sz);
+
+ rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
+ if (rc <= 0)
+ goto exit;
+ num_vnic_ids = rc;
+
+ /*
+ * Loop through to find the default VNIC ID.
+ * TODO: The easier way would be to obtain the resp->dflt_vnic_id
+ * by sending the hwrm_func_qcfg command to the firmware.
+ */
+ for (i = 0; i < num_vnic_ids; i++) {
+ memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
+ vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
+ rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
+ bp->pf.first_vf_id + vf);
+ if (rc)
+ goto exit;
+ if (vnic.func_default) {
+ rte_free(vnic_ids);
+ return vnic.fw_vnic_id;
+ }
+ }
+ /* Could not find a default VNIC. */
+ PMD_DRV_LOG(ERR, "No default VNIC\n");
+exit:
+ rte_free(vnic_ids);
+ return -1;
+}
+
+int bnxt_hwrm_set_em_filter(struct bnxt *bp,
+ uint16_t dst_id,
+ struct bnxt_filter_info *filter)
+{
+ int rc = 0;
+ struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
+ struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t enables = 0;
+
+ if (filter->fw_em_filter_id != UINT64_MAX)
+ bnxt_hwrm_clear_em_filter(bp, filter);
+
+ HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
+
+ req.flags = rte_cpu_to_le_32(filter->flags);
+
+ enables = filter->enables |
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
+ req.dst_id = rte_cpu_to_le_16(dst_id);
+
+ if (filter->ip_addr_type) {
+ req.ip_addr_type = filter->ip_addr_type;
+ enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
+ }
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
+ req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
+ memcpy(req.src_macaddr, filter->src_macaddr,
+ ETHER_ADDR_LEN);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
+ memcpy(req.dst_macaddr, filter->dst_macaddr,
+ ETHER_ADDR_LEN);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
+ req.ovlan_vid = filter->l2_ovlan;
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
+ req.ivlan_vid = filter->l2_ivlan;
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
+ req.ethertype = rte_cpu_to_be_16(filter->ethertype);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
+ req.ip_protocol = filter->ip_protocol;
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
+ req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
+ req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
+ req.src_port = rte_cpu_to_be_16(filter->src_port);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
+ req.dst_port = rte_cpu_to_be_16(filter->dst_port);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
+ req.mirror_vnic_id = filter->mirror_vnic_id;
+
+ req.enables = rte_cpu_to_le_32(enables);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
+{
+ int rc = 0;
+ struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
+ struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (filter->fw_em_filter_id == UINT64_MAX)
+ return 0;
+
+ PMD_DRV_LOG(ERR, "Clear EM filter\n");
+ HWRM_PREP(req, CFA_EM_FLOW_FREE);
+
+ req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ filter->fw_em_filter_id = UINT64_MAX;
+ filter->fw_l2_filter_id = UINT64_MAX;
+
+ return 0;
+}
+
+int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
+ uint16_t dst_id,
+ struct bnxt_filter_info *filter)
+{
+ int rc = 0;
+ struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
+ struct hwrm_cfa_ntuple_filter_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ uint32_t enables = 0;
+
+ if (filter->fw_ntuple_filter_id != UINT64_MAX)
+ bnxt_hwrm_clear_ntuple_filter(bp, filter);
+
+ HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
+
+ req.flags = rte_cpu_to_le_32(filter->flags);
+
+ enables = filter->enables |
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
+ req.dst_id = rte_cpu_to_le_16(dst_id);
+
+
+ if (filter->ip_addr_type) {
+ req.ip_addr_type = filter->ip_addr_type;
+ enables |=
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
+ }
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
+ req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
+ memcpy(req.src_macaddr, filter->src_macaddr,
+ ETHER_ADDR_LEN);
+ //if (enables &
+ //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
+ //memcpy(req.dst_macaddr, filter->dst_macaddr,
+ //ETHER_ADDR_LEN);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
+ req.ethertype = rte_cpu_to_be_16(filter->ethertype);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
+ req.ip_protocol = filter->ip_protocol;
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
+ req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
+ req.src_ipaddr_mask[0] =
+ rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
+ req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
+ req.dst_ipaddr_mask[0] =
+ rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
+ req.src_port = rte_cpu_to_le_16(filter->src_port);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
+ req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
+ req.dst_port = rte_cpu_to_le_16(filter->dst_port);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
+ req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
+ req.mirror_vnic_id = filter->mirror_vnic_id;
+
+ req.enables = rte_cpu_to_le_32(enables);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
+ struct bnxt_filter_info *filter)
+{
+ int rc = 0;
+ struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
+ struct hwrm_cfa_ntuple_filter_free_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ if (filter->fw_ntuple_filter_id == UINT64_MAX)
+ return 0;
+
+ HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
+
+ req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ filter->fw_ntuple_filter_id = UINT64_MAX;
+
+ return 0;
+}
+
+int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ unsigned int rss_idx, fw_idx, i;
+
+ if (vnic->rss_table && vnic->hash_type) {
+ /*
+ * Fill the RSS hash & redirection table with
+ * ring group ids for all VNICs
+ */
+ for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+ rss_idx++, fw_idx++) {
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ fw_idx %= bp->rx_cp_nr_rings;
+ if (vnic->fw_grp_ids[fw_idx] !=
+ INVALID_HW_RING_ID)
+ break;
+ fw_idx++;
+ }
+ if (i == bp->rx_cp_nr_rings)
+ return 0;
+ vnic->rss_table[rss_idx] =
+ vnic->fw_grp_ids[fw_idx];
+ }
+ return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ }
+ return 0;
+}
+
+static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ uint16_t flags;
+
+ req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
+
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
+
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ req->num_cmpl_dma_aggr_during_int =
+ rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
+
+ req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
+
+ /* min timer set to 1/2 of interrupt timer */
+ req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
+
+ /* buf timer set to 1/4 of interrupt timer */
+ req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
+
+ req->cmpl_aggr_dma_tmr_during_int =
+ rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
+
+ flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
+ req->flags = rte_cpu_to_le_16(flags);
+}
+
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
+ struct bnxt_coal *coal, uint16_t ring_id)
+{
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* Set ring coalesce parameters only for Stratus 100G NIC */
+ if (!bnxt_stratus_device(bp))
+ return 0;
+
+ HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
+ bnxt_hwrm_set_coal_params(coal, &req);
+ req.ring_id = rte_cpu_to_le_16(ring_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.h
new file mode 100644
index 00000000..379aac6e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_HWRM_H_
+#define _BNXT_HWRM_H_
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+struct bnxt;
+struct bnxt_filter_info;
+struct bnxt_cp_ring_info;
+
+#define HWRM_SEQ_ID_INVALID -1U
+/* Convert Bit field location to value */
+#define ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED)
+#define ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
+ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD - 32))
+#define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \
+ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE - 32))
+
+#define HWRM_QUEUE_SERVICE_PROFILE_LOSSY \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY
+
+#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC \
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+
+int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ uint16_t vlan_count,
+ struct bnxt_vlan_table_entry *vlan_table);
+int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
+ uint16_t vlan_count,
+ struct bnxt_vlan_antispoof_table_entry *vlan_table);
+int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
+ struct bnxt_filter_info *filter);
+int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
+ uint16_t dst_id,
+ struct bnxt_filter_info *filter);
+int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
+ void *encaped, size_t ec_size);
+int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
+ void *encaped, size_t ec_size);
+
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp);
+int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp);
+int bnxt_hwrm_func_driver_register(struct bnxt *bp);
+int bnxt_hwrm_func_qcaps(struct bnxt *bp);
+int bnxt_hwrm_func_reset(struct bnxt *bp);
+int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags);
+int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
+ struct rte_eth_stats *stats);
+int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
+ uint64_t *dropped);
+int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid);
+int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp);
+int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp);
+
+int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
+
+int bnxt_hwrm_set_async_event_cr(struct bnxt *bp);
+int bnxt_hwrm_ring_alloc(struct bnxt *bp,
+ struct bnxt_ring *ring,
+ uint32_t ring_type, uint32_t map_index,
+ uint32_t stats_ctx_id, uint32_t cmpl_ring_id);
+int bnxt_hwrm_ring_free(struct bnxt *bp,
+ struct bnxt_ring *ring, uint32_t ring_type);
+int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx);
+int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx);
+
+int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr, unsigned int idx);
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr, unsigned int idx);
+int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
+ struct rte_eth_stats *stats, uint8_t rx);
+
+int bnxt_hwrm_ver_get(struct bnxt *bp);
+
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int16_t fw_vf_id);
+int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool enable);
+
+int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp);
+int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp);
+int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp);
+int bnxt_free_all_hwrm_rings(struct bnxt *bp);
+int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp);
+int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp);
+int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+void bnxt_free_all_hwrm_resources(struct bnxt *bp);
+void bnxt_free_hwrm_resources(struct bnxt *bp);
+void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index);
+int bnxt_alloc_hwrm_resources(struct bnxt *bp);
+int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
+int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
+int bnxt_hwrm_func_qcfg(struct bnxt *bp);
+int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp);
+int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test);
+int bnxt_hwrm_allocate_pf_only(struct bnxt *bp);
+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs);
+int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf,
+ const uint8_t *mac_addr);
+int bnxt_hwrm_pf_evb_mode(struct bnxt *bp);
+int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
+ uint16_t max_bw, uint16_t enables);
+int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf);
+int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
+ struct ether_addr *mac);
+int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf);
+int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
+ uint8_t tunnel_type);
+int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
+ uint8_t tunnel_type);
+void bnxt_free_tunnel_ports(struct bnxt *bp);
+int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf);
+int bnxt_hwrm_port_qstats(struct bnxt *bp);
+int bnxt_hwrm_port_clr_stats(struct bnxt *bp);
+int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on);
+int bnxt_hwrm_port_led_qcaps(struct bnxt *bp);
+int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
+ uint32_t flags);
+void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp);
+int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf);
+int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
+ void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
+ int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic));
+int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
+ bool on);
+int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf);
+int bnxt_hwrm_set_em_filter(struct bnxt *bp, uint16_t dst_id,
+ struct bnxt_filter_info *filter);
+int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
+
+int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, uint16_t dst_id,
+ struct bnxt_filter_info *filter);
+int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
+ struct bnxt_filter_info *filter);
+int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data);
+int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
+ uint32_t *length);
+int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
+ uint32_t offset, uint32_t length,
+ uint8_t *data);
+int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index);
+int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
+ uint16_t dir_ordinal, uint16_t dir_ext,
+ uint16_t dir_attr, const uint8_t *data,
+ size_t data_len);
+int bnxt_hwrm_ptp_cfg(struct bnxt *bp);
+int bnxt_vnic_rss_configure(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
+ struct bnxt_coal *coal, uint16_t ring_id);
+int bnxt_hwrm_check_vf_rings(struct bnxt *bp);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.c
new file mode 100644
index 00000000..7ef7023e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_irq.h"
+#include "bnxt_ring.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Interrupts
+ */
+
+static void bnxt_int_handler(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+ struct cmpl_base *cmp;
+ uint32_t raw_cons;
+ uint32_t cons;
+
+ if (cpr == NULL)
+ return;
+
+ raw_cons = cpr->cp_raw_cons;
+ while (1) {
+ if (!cpr || !cpr->cp_ring_struct)
+ return;
+
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ cmp = &cpr->cp_desc_ring[cons];
+
+ if (!CMP_VALID(cmp, raw_cons, cpr->cp_ring_struct))
+ break;
+
+ bnxt_event_hwrm_resp_handler(bp, cmp);
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ };
+
+ cpr->cp_raw_cons = raw_cons;
+ B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
+}
+
+void bnxt_free_int(struct bnxt *bp)
+{
+ struct bnxt_irq *irq;
+
+ irq = bp->irq_tbl;
+ if (irq) {
+ if (irq->requested) {
+ rte_intr_disable(&bp->pdev->intr_handle);
+ rte_intr_callback_unregister(&bp->pdev->intr_handle,
+ irq->handler,
+ (void *)bp->eth_dev);
+ irq->requested = 0;
+ }
+ rte_free((void *)bp->irq_tbl);
+ bp->irq_tbl = NULL;
+ }
+}
+
+void bnxt_disable_int(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+
+ /* Only the default completion ring */
+ if (cpr != NULL && cpr->cp_doorbell != NULL)
+ B_CP_DB_DISARM(cpr);
+}
+
+void bnxt_enable_int(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+
+ /* Only the default completion ring */
+ if (cpr != NULL && cpr->cp_doorbell != NULL)
+ B_CP_DB_ARM(cpr);
+}
+
+int bnxt_setup_int(struct bnxt *bp)
+{
+ uint16_t total_vecs;
+ const int len = sizeof(bp->irq_tbl[0].name);
+ int i, rc = 0;
+
+ /* DPDK host only supports 1 MSI-X vector */
+ total_vecs = 1;
+ bp->irq_tbl = rte_calloc("bnxt_irq_tbl", total_vecs,
+ sizeof(struct bnxt_irq), 0);
+ if (bp->irq_tbl) {
+ for (i = 0; i < total_vecs; i++) {
+ bp->irq_tbl[i].vector = i;
+ snprintf(bp->irq_tbl[i].name, len,
+ "%s-%d", bp->eth_dev->device->name, i);
+ bp->irq_tbl[i].handler = bnxt_int_handler;
+ }
+ } else {
+ rc = -ENOMEM;
+ goto setup_exit;
+ }
+ return 0;
+
+setup_exit:
+ PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
+ return rc;
+}
+
+int bnxt_request_int(struct bnxt *bp)
+{
+ int rc = 0;
+
+ struct bnxt_irq *irq = bp->irq_tbl;
+
+ rte_intr_callback_register(&bp->pdev->intr_handle, irq->handler,
+ (void *)bp->eth_dev);
+ rte_intr_enable(&bp->pdev->intr_handle);
+
+ irq->requested = 1;
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.h
new file mode 100644
index 00000000..75ba2135
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_IRQ_H_
+#define _BNXT_IRQ_H_
+
+#define BNXT_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define BNXT_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+struct bnxt_irq {
+ rte_intr_callback_fn handler;
+ unsigned int vector;
+ uint8_t requested;
+ char name[RTE_ETH_NAME_MAX_LEN + 2];
+};
+
+struct bnxt;
+void bnxt_free_int(struct bnxt *bp);
+void bnxt_disable_int(struct bnxt *bp);
+void bnxt_enable_int(struct bnxt *bp);
+int bnxt_setup_int(struct bnxt *bp);
+int bnxt_request_int(struct bnxt *bp);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_nvm_defs.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_nvm_defs.h
new file mode 100644
index 00000000..ea9d4a9d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_nvm_defs.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_NVM_DEFS_H_
+#define _BNXT_NVM_DEFS_H_
+
+enum bnxt_nvm_directory_type {
+ BNX_DIR_TYPE_UNUSED = 0,
+ BNX_DIR_TYPE_PKG_LOG = 1,
+ BNX_DIR_TYPE_UPDATE = 2,
+ BNX_DIR_TYPE_CHIMP_PATCH = 3,
+ BNX_DIR_TYPE_BOOTCODE = 4,
+ BNX_DIR_TYPE_VPD = 5,
+ BNX_DIR_TYPE_EXP_ROM_MBA = 6,
+ BNX_DIR_TYPE_AVS = 7,
+ BNX_DIR_TYPE_PCIE = 8,
+ BNX_DIR_TYPE_PORT_MACRO = 9,
+ BNX_DIR_TYPE_APE_FW = 10,
+ BNX_DIR_TYPE_APE_PATCH = 11,
+ BNX_DIR_TYPE_KONG_FW = 12,
+ BNX_DIR_TYPE_KONG_PATCH = 13,
+ BNX_DIR_TYPE_BONO_FW = 14,
+ BNX_DIR_TYPE_BONO_PATCH = 15,
+ BNX_DIR_TYPE_TANG_FW = 16,
+ BNX_DIR_TYPE_TANG_PATCH = 17,
+ BNX_DIR_TYPE_BOOTCODE_2 = 18,
+ BNX_DIR_TYPE_CCM = 19,
+ BNX_DIR_TYPE_PCI_CFG = 20,
+ BNX_DIR_TYPE_TSCF_UCODE = 21,
+ BNX_DIR_TYPE_ISCSI_BOOT = 22,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
+ BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
+ BNX_DIR_TYPE_EXT_PHY = 27,
+ BNX_DIR_TYPE_SHARED_CFG = 40,
+ BNX_DIR_TYPE_PORT_CFG = 41,
+ BNX_DIR_TYPE_FUNC_CFG = 42,
+ BNX_DIR_TYPE_MGMT_CFG = 48,
+ BNX_DIR_TYPE_MGMT_DATA = 49,
+ BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
+ BNX_DIR_TYPE_MGMT_WEB_META = 51,
+ BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
+ BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
+};
+
+#define BNX_DIR_ORDINAL_FIRST 0
+
+#define BNX_DIR_EXT_NONE 0
+#define BNX_DIR_EXT_INACTIVE (1 << 0)
+#define BNX_DIR_EXT_UPDATE (1 << 1)
+
+#define BNX_DIR_ATTR_NONE 0
+#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
+#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
+
+#define BNX_PKG_LOG_MAX_LENGTH 4096
+
+enum bnxnvm_pkglog_field_index {
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
+ BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
+ BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2,
+ BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3,
+ BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4,
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5,
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6
+};
+
+#endif /* Don't add anything after this line */
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.c
new file mode 100644
index 00000000..fcbd6bc6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.c
@@ -0,0 +1,501 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <rte_bitmap.h>
+#include <rte_memzone.h>
+#include <unistd.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_ring.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Generic ring handling
+ */
+
+void bnxt_free_ring(struct bnxt_ring *ring)
+{
+ if (!ring)
+ return;
+
+ if (ring->vmem_size && *ring->vmem) {
+ memset((char *)*ring->vmem, 0, ring->vmem_size);
+ *ring->vmem = NULL;
+ }
+ ring->mem_zone = NULL;
+}
+
+/*
+ * Ring groups
+ */
+
+int bnxt_init_ring_grps(struct bnxt *bp)
+{
+ unsigned int i;
+
+ for (i = 0; i < bp->max_ring_grps; i++)
+ memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
+ sizeof(struct bnxt_ring_grp_info));
+
+ return 0;
+}
+
+/*
+ * Allocates a completion ring with vmem and stats optionally also allocating
+ * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
+ * to not allocate them.
+ *
+ * Order in the allocation is:
+ * stats - Always non-zero length
+ * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
+ * tx vmem - Only non-zero length if tx_ring_info is not NULL
+ * rx vmem - Only non-zero length if rx_ring_info is not NULL
+ * cp bd ring - Always non-zero length
+ * tx bd ring - Only non-zero length if tx_ring_info is not NULL
+ * rx bd ring - Only non-zero length if rx_ring_info is not NULL
+ */
+int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
+ struct bnxt_tx_queue *txq,
+ struct bnxt_rx_queue *rxq,
+ struct bnxt_cp_ring_info *cp_ring_info,
+ const char *suffix)
+{
+ struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
+ struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
+ struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
+ struct bnxt_ring *tx_ring;
+ struct bnxt_ring *rx_ring;
+ struct rte_pci_device *pdev = bp->pdev;
+ uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
+ const struct rte_memzone *mz = NULL;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ rte_iova_t mz_phys_addr;
+ int sz;
+
+ int stats_len = (tx_ring_info || rx_ring_info) ?
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
+ sizeof (struct hwrm_resp_hdr)) : 0;
+
+ int cp_vmem_start = stats_len;
+ int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
+
+ int tx_vmem_start = cp_vmem_start + cp_vmem_len;
+ int tx_vmem_len =
+ tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
+ tx_ring_struct->vmem_size) : 0;
+
+ int rx_vmem_start = tx_vmem_start + tx_vmem_len;
+ int rx_vmem_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
+ rx_ring_struct->vmem_size) : 0;
+ int ag_vmem_start = 0;
+ int ag_vmem_len = 0;
+ int cp_ring_start = 0;
+
+ ag_vmem_start = rx_vmem_start + rx_vmem_len;
+ ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
+ rx_ring_info->ag_ring_struct->vmem_size) : 0;
+ cp_ring_start = ag_vmem_start + ag_vmem_len;
+
+ int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
+ sizeof(struct cmpl_base));
+
+ int tx_ring_start = cp_ring_start + cp_ring_len;
+ int tx_ring_len = tx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
+ sizeof(struct tx_bd_long)) : 0;
+
+ int rx_ring_start = tx_ring_start + tx_ring_len;
+ int rx_ring_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
+ sizeof(struct rx_prod_pkt_bd)) : 0;
+
+ int ag_ring_start = rx_ring_start + rx_ring_len;
+ int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
+
+ int ag_bitmap_start = ag_ring_start + ag_ring_len;
+ int ag_bitmap_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
+ rx_ring_info->rx_ring_struct->ring_size *
+ AGG_RING_SIZE_FACTOR)) : 0;
+
+ int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
+ int tpa_info_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
+ sizeof(struct bnxt_tpa_info)) : 0;
+
+ int total_alloc_len = tpa_info_start;
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ total_alloc_len += tpa_info_len;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
+ suffix);
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ if (!mz) {
+ mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG,
+ getpagesize());
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ PMD_DRV_LOG(WARNING,
+ "Memzone physical address same as virtual.\n");
+ PMD_DRV_LOG(WARNING,
+ "Using rte_mem_virt2iova()\n");
+ for (sz = 0; sz < total_alloc_len; sz += getpagesize())
+ rte_mem_lock_page(((char *)mz->addr) + sz);
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
+ if (mz_phys_addr == 0) {
+ PMD_DRV_LOG(ERR,
+ "unable to map ring address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (tx_ring_info) {
+ txq->mz = mz;
+ tx_ring = tx_ring_info->tx_ring_struct;
+
+ tx_ring->bd = ((char *)mz->addr + tx_ring_start);
+ tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
+ tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
+ tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
+ tx_ring->mem_zone = (const void *)mz;
+
+ if (!tx_ring->bd)
+ return -ENOMEM;
+ if (tx_ring->vmem_size) {
+ tx_ring->vmem =
+ (void **)((char *)mz->addr + tx_vmem_start);
+ tx_ring_info->tx_buf_ring =
+ (struct bnxt_sw_tx_bd *)tx_ring->vmem;
+ }
+ }
+
+ if (rx_ring_info) {
+ rxq->mz = mz;
+ rx_ring = rx_ring_info->rx_ring_struct;
+
+ rx_ring->bd = ((char *)mz->addr + rx_ring_start);
+ rx_ring_info->rx_desc_ring =
+ (struct rx_prod_pkt_bd *)rx_ring->bd;
+ rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
+ rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
+ rx_ring->mem_zone = (const void *)mz;
+
+ if (!rx_ring->bd)
+ return -ENOMEM;
+ if (rx_ring->vmem_size) {
+ rx_ring->vmem =
+ (void **)((char *)mz->addr + rx_vmem_start);
+ rx_ring_info->rx_buf_ring =
+ (struct bnxt_sw_rx_bd *)rx_ring->vmem;
+ }
+
+ rx_ring = rx_ring_info->ag_ring_struct;
+
+ rx_ring->bd = ((char *)mz->addr + ag_ring_start);
+ rx_ring_info->ag_desc_ring =
+ (struct rx_prod_pkt_bd *)rx_ring->bd;
+ rx_ring->bd_dma = mz->iova + ag_ring_start;
+ rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
+ rx_ring->mem_zone = (const void *)mz;
+
+ if (!rx_ring->bd)
+ return -ENOMEM;
+ if (rx_ring->vmem_size) {
+ rx_ring->vmem =
+ (void **)((char *)mz->addr + ag_vmem_start);
+ rx_ring_info->ag_buf_ring =
+ (struct bnxt_sw_rx_bd *)rx_ring->vmem;
+ }
+
+ rx_ring_info->ag_bitmap =
+ rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
+ AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
+ ag_bitmap_start, ag_bitmap_len);
+
+ /* TPA info */
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ rx_ring_info->tpa_info =
+ ((struct bnxt_tpa_info *)((char *)mz->addr +
+ tpa_info_start));
+ }
+
+ cp_ring->bd = ((char *)mz->addr + cp_ring_start);
+ cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
+ cp_ring_info->cp_desc_ring = cp_ring->bd;
+ cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
+ cp_ring->mem_zone = (const void *)mz;
+
+ if (!cp_ring->bd)
+ return -ENOMEM;
+ if (cp_ring->vmem_size)
+ *cp_ring->vmem = ((char *)mz->addr + stats_len);
+ if (stats_len) {
+ cp_ring_info->hw_stats = mz->addr;
+ cp_ring_info->hw_stats_map = mz_phys_addr;
+ }
+ cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+ return 0;
+}
+
+static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
+{
+ /* Tick values in micro seconds.
+ * 1 coal_buf x bufs_per_record = 1 completion record.
+ */
+ coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT;
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR;
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT;
+ coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX;
+ /* min timer set to 1/2 of interrupt timer */
+ coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN;
+ /* buf timer set to 1/4 of interrupt timer */
+ coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR;
+ coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
+}
+
+int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
+{
+ struct rte_pci_device *pci_dev = bp->pdev;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->rx_ring_struct;
+ unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
+ int rc = 0;
+
+ bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
+
+ /* Rx cmpl */
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ queue_index, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+
+ cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
+ queue_index * BNXT_DB_SIZE;
+ bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+
+ if (!queue_index) {
+ /*
+ * In order to save completion resources, use the first
+ * completion ring from PF or VF as the default completion ring
+ * for async event and HWRM forward response handling.
+ */
+ bp->def_cp_ring = cpr;
+ rc = bnxt_hwrm_set_async_event_cr(bp);
+ if (rc)
+ goto err_out;
+ }
+ /* Rx ring */
+ rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ queue_index, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
+ if (rc)
+ goto err_out;
+
+ rxr->rx_prod = 0;
+ rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr +
+ queue_index * BNXT_DB_SIZE;
+ bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+
+ ring = rxr->ag_ring_struct;
+ /* Agg ring */
+ if (!ring)
+ PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
+
+ rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ map_idx, HWRM_NA_SIGNATURE,
+ cp_ring->fw_ring_id);
+ if (rc)
+ goto err_out;
+
+ PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
+ rxr->ag_prod = 0;
+ rxr->ag_doorbell = (char *)pci_dev->mem_resource[2].addr +
+ map_idx * BNXT_DB_SIZE;
+ bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+
+ rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
+
+ if (bp->eth_dev->data->rx_queue_state[queue_index] ==
+ RTE_ETH_QUEUE_STATE_STARTED) {
+ if (bnxt_init_one_rx_ring(rxq)) {
+ RTE_LOG(ERR, PMD,
+ "bnxt_init_one_rx_ring failed!\n");
+ bnxt_rx_queue_release_op(rxq);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+ }
+ rxq->index = queue_index;
+ PMD_DRV_LOG(INFO,
+ "queue %d, rx_deferred_start %d, state %d!\n",
+ queue_index, rxq->rx_deferred_start,
+ bp->eth_dev->data->rx_queue_state[queue_index]);
+
+err_out:
+ return rc;
+}
+/* ring_grp usage:
+ * [0] = default completion ring
+ * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
+ * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
+ */
+int bnxt_alloc_hwrm_rings(struct bnxt *bp)
+{
+ struct bnxt_coal coal;
+ unsigned int i;
+ int rc = 0;
+
+ bnxt_init_dflt_coal(&coal);
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->rx_ring_struct;
+ unsigned int map_idx = i + bp->rx_cp_nr_rings;
+
+ bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
+
+ /* Rx cmpl */
+ rc = bnxt_hwrm_ring_alloc
+ (bp,
+ cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ i,
+ HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+ cpr->cp_doorbell = (char *)bp->doorbell_base + i * 0x80;
+ bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
+
+ if (!i) {
+ /*
+ * In order to save completion resource, use the first
+ * completion ring from PF or VF as the default
+ * completion ring for async event & HWRM
+ * forward response handling.
+ */
+ bp->def_cp_ring = cpr;
+ rc = bnxt_hwrm_set_async_event_cr(bp);
+ if (rc)
+ goto err_out;
+ }
+
+ /* Rx ring */
+ rc = bnxt_hwrm_ring_alloc(bp,
+ ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ i,
+ cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
+ if (rc)
+ goto err_out;
+ rxr->rx_prod = 0;
+ rxr->rx_doorbell = (char *)bp->doorbell_base + i * 0x80;
+ bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+
+ ring = rxr->ag_ring_struct;
+ /* Agg ring */
+ if (ring == NULL) {
+ PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
+ goto err_out;
+ }
+
+ rc = bnxt_hwrm_ring_alloc(bp, ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ map_idx, HWRM_NA_SIGNATURE,
+ cp_ring->fw_ring_id);
+ if (rc)
+ goto err_out;
+ PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
+ rxr->ag_prod = 0;
+ rxr->ag_doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
+ bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id;
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+
+ rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
+ if (bnxt_init_one_rx_ring(rxq)) {
+ PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
+ bnxt_rx_queue_release_op(rxq);
+ return -ENOMEM;
+ }
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+ rxq->index = i;
+ }
+
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ unsigned int idx = i + bp->rx_cp_nr_rings;
+
+ /* Tx cmpl */
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ idx, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+
+ cpr->cp_doorbell = (char *)bp->doorbell_base + idx * 0x80;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+
+ /* Tx ring */
+ rc = bnxt_hwrm_ring_alloc(bp, ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
+ idx, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
+ if (rc)
+ goto err_out;
+
+ txr->tx_doorbell = (char *)bp->doorbell_base + idx * 0x80;
+ txq->index = idx;
+ bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
+ }
+
+err_out:
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.h
new file mode 100644
index 00000000..1446d784
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_RING_H_
+#define _BNXT_RING_H_
+
+#include <inttypes.h>
+
+#include <rte_memory.h>
+
+#define RING_NEXT(ring, idx) (((idx) + 1) & (ring)->ring_mask)
+
+#define DB_IDX_MASK 0xffffff
+#define DB_IDX_VALID (0x1 << 26)
+#define DB_IRQ_DIS (0x1 << 27)
+#define DB_KEY_TX (0x0 << 28)
+#define DB_KEY_RX (0x1 << 28)
+#define DB_KEY_CP (0x2 << 28)
+#define DB_KEY_ST (0x3 << 28)
+#define DB_KEY_TX_PUSH (0x4 << 28)
+#define DB_LONG_TX_PUSH (0x2 << 24)
+
+#define DEFAULT_CP_RING_SIZE 256
+#define DEFAULT_RX_RING_SIZE 256
+#define DEFAULT_TX_RING_SIZE 256
+
+#define BNXT_TPA_MAX 64
+#define AGG_RING_SIZE_FACTOR 2
+#define AGG_RING_MULTIPLIER 2
+
+/* These assume 4k pages */
+#define MAX_RX_DESC_CNT (8 * 1024)
+#define MAX_TX_DESC_CNT (4 * 1024)
+#define MAX_CP_DESC_CNT (16 * 1024)
+
+#define INVALID_HW_RING_ID ((uint16_t)-1)
+#define INVALID_STATS_CTX_ID ((uint16_t)-1)
+
+struct bnxt_ring {
+ void *bd;
+ rte_iova_t bd_dma;
+ uint32_t ring_size;
+ uint32_t ring_mask;
+
+ int vmem_size;
+ void **vmem;
+
+ uint16_t fw_ring_id; /* Ring id filled by Chimp FW */
+ const void *mem_zone;
+};
+
+struct bnxt_ring_grp_info {
+ uint16_t fw_stats_ctx;
+ uint16_t fw_grp_id;
+ uint16_t rx_fw_ring_id;
+ uint16_t cp_fw_ring_id;
+ uint16_t ag_fw_ring_id;
+};
+
+struct bnxt;
+struct bnxt_tx_ring_info;
+struct bnxt_rx_ring_info;
+struct bnxt_cp_ring_info;
+void bnxt_free_ring(struct bnxt_ring *ring);
+int bnxt_init_ring_grps(struct bnxt *bp);
+int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
+ struct bnxt_tx_queue *txq,
+ struct bnxt_rx_queue *rxq,
+ struct bnxt_cp_ring_info *cp_ring_info,
+ const char *suffix);
+int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index);
+int bnxt_alloc_hwrm_rings(struct bnxt *bp);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.c
new file mode 100644
index 00000000..832fc9ec
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.c
@@ -0,0 +1,471 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_ring.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * RX Queues
+ */
+
+void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
+{
+ if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
+ rxq->cp_ring->hw_stats = NULL;
+}
+
+int bnxt_mq_rx_configure(struct bnxt *bp)
+{
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ const struct rte_eth_vmdq_rx_conf *conf =
+ &dev_conf->rx_adv_conf.vmdq_rx_conf;
+ unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
+ int start_grp_id, end_grp_id = 1, rc = 0;
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_filter_info *filter;
+ enum rte_eth_nb_pools pools = bp->rx_cp_nr_rings, max_pools = 0;
+ struct bnxt_rx_queue *rxq;
+
+ bp->nr_vnics = 0;
+
+ /* Single queue mode */
+ if (bp->rx_cp_nr_rings < 2) {
+ vnic = bnxt_alloc_vnic(bp);
+ if (!vnic) {
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
+ STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
+ bp->nr_vnics++;
+
+ rxq = bp->eth_dev->data->rx_queues[0];
+ rxq->vnic = vnic;
+
+ vnic->func_default = true;
+ vnic->ff_pool_idx = 0;
+ vnic->start_grp_id = 0;
+ vnic->end_grp_id = vnic->start_grp_id;
+ filter = bnxt_alloc_filter(bp);
+ if (!filter) {
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+ goto out;
+ }
+
+ /* Multi-queue mode */
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
+ /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
+
+ switch (dev_conf->rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_RSS:
+ case ETH_MQ_RX_VMDQ_ONLY:
+ /* FALLTHROUGH */
+ /* ETH_8/64_POOLs */
+ pools = conf->nb_queue_pools;
+ /* For each pool, allocate MACVLAN CFA rule & VNIC */
+ max_pools = RTE_MIN(bp->max_vnics,
+ RTE_MIN(bp->max_l2_ctx,
+ RTE_MIN(bp->max_rsscos_ctx,
+ ETH_64_POOLS)));
+ if (pools > max_pools)
+ pools = max_pools;
+ break;
+ case ETH_MQ_RX_RSS:
+ pools = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
+ dev_conf->rxmode.mq_mode);
+ rc = -EINVAL;
+ goto err_out;
+ }
+ }
+
+ nb_q_per_grp = bp->rx_cp_nr_rings / pools;
+ start_grp_id = 0;
+ end_grp_id = nb_q_per_grp;
+
+ for (i = 0; i < pools; i++) {
+ vnic = bnxt_alloc_vnic(bp);
+ if (!vnic) {
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
+ STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
+ bp->nr_vnics++;
+
+ for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
+ rxq = bp->eth_dev->data->rx_queues[ring_idx];
+ rxq->vnic = vnic;
+ }
+ if (i == 0) {
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+ bp->eth_dev->data->promiscuous = 1;
+ vnic->flags |= BNXT_VNIC_INFO_PROMISC;
+ }
+ vnic->func_default = true;
+ }
+ vnic->ff_pool_idx = i;
+ vnic->start_grp_id = start_grp_id;
+ vnic->end_grp_id = end_grp_id;
+
+ if (i) {
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
+ !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+ vnic->rss_dflt_cr = true;
+ goto skip_filter_allocation;
+ }
+ filter = bnxt_alloc_filter(bp);
+ if (!filter) {
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ /*
+ * TODO: Configure & associate CFA rule for
+ * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
+ */
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+
+skip_filter_allocation:
+ start_grp_id = end_grp_id;
+ end_grp_id += nb_q_per_grp;
+ }
+
+out:
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
+ uint16_t hash_type = 0;
+
+ if (bp->flags & BNXT_FLAG_UPDATE_HASH) {
+ rss = &bp->rss_conf;
+ bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
+ }
+
+ if (rss->rss_hf & ETH_RSS_IPV4)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+ if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
+ if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
+ if (rss->rss_hf & ETH_RSS_IPV6)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+ if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
+ if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ vnic->hash_type = hash_type;
+
+ /*
+ * Use the supplied key if the key length is
+ * acceptable and the rss_key is not NULL
+ */
+ if (rss->rss_key &&
+ rss->rss_key_len <= HW_HASH_KEY_SIZE)
+ memcpy(vnic->rss_hash_key,
+ rss->rss_key, rss->rss_key_len);
+ }
+ }
+ }
+
+ return rc;
+
+err_out:
+ /* Free allocated vnic/filters */
+
+ return rc;
+}
+
+void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_sw_rx_bd *sw_ring;
+ struct bnxt_tpa_info *tpa_info;
+ uint16_t i;
+
+ rte_spinlock_lock(&rxq->lock);
+
+ if (rxq) {
+ sw_ring = rxq->rx_ring->rx_buf_ring;
+ if (sw_ring) {
+ for (i = 0;
+ i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
+ if (sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(sw_ring[i].mbuf);
+ sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+ /* Free up mbufs in Agg ring */
+ sw_ring = rxq->rx_ring->ag_buf_ring;
+ if (sw_ring) {
+ for (i = 0;
+ i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
+ if (sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(sw_ring[i].mbuf);
+ sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+
+ /* Free up mbufs in TPA */
+ tpa_info = rxq->rx_ring->tpa_info;
+ if (tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ if (tpa_info[i].mbuf) {
+ rte_pktmbuf_free_seg(tpa_info[i].mbuf);
+ tpa_info[i].mbuf = NULL;
+ }
+ }
+ }
+ }
+
+ rte_spinlock_unlock(&rxq->lock);
+}
+
+void bnxt_free_rx_mbufs(struct bnxt *bp)
+{
+ struct bnxt_rx_queue *rxq;
+ int i;
+
+ for (i = 0; i < (int)bp->rx_nr_rings; i++) {
+ rxq = bp->rx_queues[i];
+ bnxt_rx_queue_release_mbufs(rxq);
+ }
+}
+
+void bnxt_rx_queue_release_op(void *rx_queue)
+{
+ struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+
+ if (rxq) {
+ bnxt_rx_queue_release_mbufs(rxq);
+
+ /* Free RX ring hardware descriptors */
+ bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+ /* Free RX Agg ring hardware descriptors */
+ bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
+
+ /* Free RX completion ring hardware descriptors */
+ bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+
+ bnxt_free_rxq_stats(rxq);
+ rte_memzone_free(rxq->mz);
+ rxq->mz = NULL;
+
+ rte_free(rxq);
+ }
+}
+
+int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+ struct bnxt_rx_queue *rxq;
+ int rc = 0;
+ uint8_t queue_state;
+
+ if (queue_idx >= bp->max_rx_rings) {
+ PMD_DRV_LOG(ERR,
+ "Cannot create Rx ring %d. Only %d rings available\n",
+ queue_idx, bp->max_rx_rings);
+ return -EINVAL;
+ }
+
+ if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (eth_dev->data->rx_queues) {
+ rxq = eth_dev->data->rx_queues[queue_idx];
+ if (rxq)
+ bnxt_rx_queue_release_op(rxq);
+ }
+ rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ rxq->bp = bp;
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+
+ PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_use_size);
+ PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
+
+ rc = bnxt_init_rx_ring_struct(rxq, socket_id);
+ if (rc)
+ goto out;
+
+ rxq->queue_id = queue_idx;
+ rxq->port_id = eth_dev->data->port_id;
+ rxq->crc_len = rte_eth_dev_must_keep_crc(rx_offloads) ?
+ ETHER_CRC_LEN : 0;
+
+ eth_dev->data->rx_queues[queue_idx] = rxq;
+ /* Allocate RX ring hardware descriptors */
+ if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
+ "rxr")) {
+ PMD_DRV_LOG(ERR,
+ "ring_dma_zone_reserve for rx_ring failed!\n");
+ bnxt_rx_queue_release_op(rxq);
+ rc = -ENOMEM;
+ goto out;
+ }
+ rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
+
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ queue_state = rxq->rx_deferred_start ? RTE_ETH_QUEUE_STATE_STOPPED :
+ RTE_ETH_QUEUE_STATE_STARTED;
+ eth_dev->data->rx_queue_state[queue_idx] = queue_state;
+ rte_spinlock_init(&rxq->lock);
+out:
+ return rc;
+}
+
+int
+bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+ int rc = 0;
+
+ if (eth_dev->data->rx_queues) {
+ rxq = eth_dev->data->rx_queues[queue_id];
+ if (!rxq) {
+ rc = -EINVAL;
+ return rc;
+ }
+ cpr = rxq->cp_ring;
+ B_CP_DB_ARM(cpr);
+ }
+ return rc;
+}
+
+int
+bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+ int rc = 0;
+
+ if (eth_dev->data->rx_queues) {
+ rxq = eth_dev->data->rx_queues[queue_id];
+ if (!rxq) {
+ rc = -EINVAL;
+ return rc;
+ }
+ cpr = rxq->cp_ring;
+ B_CP_DB_DISARM(cpr);
+ }
+ return rc;
+}
+
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+ int rc = 0;
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
+ bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
+ PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+
+ if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
+ return 0;
+
+ PMD_DRV_LOG(DEBUG,
+ "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id].fw_grp_id);
+
+ vnic->fw_grp_ids[rx_queue_id] =
+ bp->grp_info[rx_queue_id].fw_grp_id;
+ rc = bnxt_vnic_rss_configure(bp, vnic);
+ }
+
+ if (rc == 0)
+ rxq->rx_deferred_start = false;
+
+ return rc;
+}
+
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_vnic_info *vnic = NULL;
+ struct bnxt_rx_queue *rxq = NULL;
+ int rc = 0;
+
+ /* Rx CQ 0 also works as Default CQ for async notifications */
+ if (!rx_queue_id) {
+ PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ rxq = bp->rx_queues[rx_queue_id];
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
+ rc = bnxt_vnic_rss_configure(bp, vnic);
+ }
+
+ if (rc == 0)
+ bnxt_rx_queue_release_mbufs(rxq);
+
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.h
new file mode 100644
index 00000000..e5d6001d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_RQX_H_
+#define _BNXT_RQX_H_
+
+struct bnxt;
+struct bnxt_rx_ring_info;
+struct bnxt_cp_ring_info;
+struct bnxt_rx_queue {
+ rte_spinlock_t lock; /* Synchronize between rx_queue_stop
+ * and fast path
+ */
+ struct rte_mempool *mb_pool; /* mbuf pool for RX ring */
+ struct rte_mbuf *pkt_first_seg; /* 1st seg of pkt */
+ struct rte_mbuf *pkt_last_seg; /* Last seg of pkt */
+ uint64_t mbuf_initializer; /* val to init mbuf */
+ uint16_t nb_rx_desc; /* num of RX desc */
+ uint16_t rx_tail; /* cur val of RDT register */
+ uint16_t nb_rx_hold; /* num held free RX desc */
+ uint16_t rx_free_thresh; /* max free RX desc to hold */
+ uint16_t queue_id; /* RX queue index */
+ uint16_t reg_idx; /* RX queue register index */
+ uint16_t port_id; /* Device port identifier */
+ uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+ uint8_t rx_deferred_start; /* not in global dev start */
+
+ struct bnxt *bp;
+ int index;
+ struct bnxt_vnic_info *vnic;
+
+ uint32_t rx_buf_size;
+ uint32_t rx_buf_use_size; /* useable size */
+ struct bnxt_rx_ring_info *rx_ring;
+ struct bnxt_cp_ring_info *cp_ring;
+ rte_atomic64_t rx_mbuf_alloc_fail;
+ const struct rte_memzone *mz;
+};
+
+void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
+int bnxt_mq_rx_configure(struct bnxt *bp);
+void bnxt_rx_queue_release_op(void *rx_queue);
+int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+void bnxt_free_rx_mbufs(struct bnxt *bp);
+int bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_id);
+int bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_id);
+int bnxt_rx_queue_start(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.c
new file mode 100644
index 00000000..c7bc8848
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.c
@@ -0,0 +1,805 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#include <rte_bitmap.h>
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_ring.h"
+#include "bnxt_rxr.h"
+#include "bnxt_rxq.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * RX Ring handling
+ */
+
+static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
+{
+ struct rte_mbuf *data;
+
+ data = rte_mbuf_raw_alloc(mb);
+
+ return data;
+}
+
+static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
+ struct bnxt_rx_ring_info *rxr,
+ uint16_t prod)
+{
+ struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+ struct rte_mbuf *mbuf;
+
+ mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!mbuf) {
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ return -ENOMEM;
+ }
+
+ rx_buf->mbuf = mbuf;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+
+ rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ return 0;
+}
+
+static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
+ struct bnxt_rx_ring_info *rxr,
+ uint16_t prod)
+{
+ struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
+ struct rte_mbuf *mbuf;
+
+ mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!mbuf) {
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ return -ENOMEM;
+ }
+
+ if (rxbd == NULL)
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
+ if (rx_buf == NULL)
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
+
+
+ rx_buf->mbuf = mbuf;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+
+ rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ return 0;
+}
+
+static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
+ struct rte_mbuf *mbuf)
+{
+ uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
+ struct bnxt_sw_rx_bd *prod_rx_buf;
+ struct rx_prod_pkt_bd *prod_bd;
+
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
+
+ RTE_ASSERT(prod_rx_buf->mbuf == NULL);
+ RTE_ASSERT(mbuf != NULL);
+
+ prod_rx_buf->mbuf = mbuf;
+
+ prod_bd = &rxr->rx_desc_ring[prod];
+
+ prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxr->rx_prod = prod;
+}
+
+#ifdef BNXT_DEBUG
+static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
+ struct rte_mbuf *mbuf)
+{
+ uint16_t prod = rxr->ag_prod;
+ struct bnxt_sw_rx_bd *prod_rx_buf;
+ struct rx_prod_pkt_bd *prod_bd, *cons_bd;
+
+ prod_rx_buf = &rxr->ag_buf_ring[prod];
+
+ prod_rx_buf->mbuf = mbuf;
+
+ prod_bd = &rxr->ag_desc_ring[prod];
+ cons_bd = &rxr->ag_desc_ring[cons];
+
+ prod_bd->address = cons_bd->addr;
+}
+#endif
+
+static inline
+struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
+ uint16_t cons)
+{
+ struct bnxt_sw_rx_bd *cons_rx_buf;
+ struct rte_mbuf *mbuf;
+
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ RTE_ASSERT(cons_rx_buf->mbuf != NULL);
+ mbuf = cons_rx_buf->mbuf;
+ cons_rx_buf->mbuf = NULL;
+ return mbuf;
+}
+
+static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
+ struct rx_tpa_start_cmpl *tpa_start,
+ struct rx_tpa_start_cmpl_hi *tpa_start1)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id &
+ RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT;
+ uint16_t data_cons;
+ struct bnxt_tpa_info *tpa_info;
+ struct rte_mbuf *mbuf;
+
+ data_cons = tpa_start->opaque;
+ tpa_info = &rxr->tpa_info[agg_id];
+
+ mbuf = bnxt_consume_rx_buf(rxr, data_cons);
+
+ bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
+
+ tpa_info->mbuf = mbuf;
+ tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
+
+ mbuf->nb_segs = 1;
+ mbuf->next = NULL;
+ mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = rxq->port_id;
+ mbuf->ol_flags = PKT_RX_LRO;
+ if (likely(tpa_start->flags_type &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
+ mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ } else {
+ mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ }
+ if (tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
+ mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
+ mbuf->ol_flags |= PKT_RX_VLAN;
+ }
+ if (likely(tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ /* recycle next mbuf */
+ data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
+ bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
+ uint8_t agg_bufs, uint32_t raw_cp_cons)
+{
+ uint16_t last_cp_cons;
+ struct rx_pkt_cmpl *agg_cmpl;
+
+ raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
+ last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
+ agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
+ cpr->valid = FLIP_VALID(raw_cp_cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
+ return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
+}
+
+/* TPA consume agg buffer out of order, allocate connected data only */
+static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
+
+ /* TODO batch allocation for better performance */
+ while (rte_bitmap_get(rxr->ag_bitmap, next)) {
+ if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
+ PMD_DRV_LOG(ERR,
+ "agg mbuf alloc failed: prod=0x%x\n", next);
+ break;
+ }
+ rte_bitmap_clear(rxr->ag_bitmap, next);
+ rxr->ag_prod = next;
+ next = RING_NEXT(rxr->ag_ring_struct, next);
+ }
+
+ return 0;
+}
+
+static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
+ struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
+ uint8_t agg_buf)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ int i;
+ uint16_t cp_cons, ag_cons;
+ struct rx_pkt_cmpl *rxcmp;
+ struct rte_mbuf *last = mbuf;
+
+ for (i = 0; i < agg_buf; i++) {
+ struct bnxt_sw_rx_bd *ag_buf;
+ struct rte_mbuf *ag_mbuf;
+ *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
+ cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)
+ &cpr->cp_desc_ring[cp_cons];
+
+#ifdef BNXT_DEBUG
+ bnxt_dump_cmpl(cp_cons, rxcmp);
+#endif
+
+ ag_cons = rxcmp->opaque;
+ RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
+ ag_buf = &rxr->ag_buf_ring[ag_cons];
+ ag_mbuf = ag_buf->mbuf;
+ RTE_ASSERT(ag_mbuf != NULL);
+
+ ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
+
+ mbuf->nb_segs++;
+ mbuf->pkt_len += ag_mbuf->data_len;
+
+ last->next = ag_mbuf;
+ last = ag_mbuf;
+
+ ag_buf->mbuf = NULL;
+
+ /*
+ * As aggregation buffer consumed out of order in TPA module,
+ * use bitmap to track freed slots to be allocated and notified
+ * to NIC
+ */
+ rte_bitmap_set(rxr->ag_bitmap, ag_cons);
+ }
+ bnxt_prod_ag_mbuf(rxq);
+ return 0;
+}
+
+static inline struct rte_mbuf *bnxt_tpa_end(
+ struct bnxt_rx_queue *rxq,
+ uint32_t *raw_cp_cons,
+ struct rx_tpa_end_cmpl *tpa_end,
+ struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK)
+ >> RX_TPA_END_CMPL_AGG_ID_SFT;
+ struct rte_mbuf *mbuf;
+ uint8_t agg_bufs;
+ struct bnxt_tpa_info *tpa_info;
+
+ tpa_info = &rxr->tpa_info[agg_id];
+ mbuf = tpa_info->mbuf;
+ RTE_ASSERT(mbuf != NULL);
+
+ rte_prefetch0(mbuf);
+ agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) &
+ RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT;
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
+ return NULL;
+ bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs);
+ }
+ mbuf->l4_len = tpa_end->payload_offset;
+
+ struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
+ RTE_ASSERT(new_data != NULL);
+ if (!new_data) {
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ return NULL;
+ }
+ tpa_info->mbuf = new_data;
+
+ return mbuf;
+}
+
+static uint32_t
+bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
+{
+ uint32_t l3, pkt_type = 0;
+ uint32_t t_ipcs = 0, ip6 = 0, vlan = 0;
+ uint32_t flags_type;
+
+ vlan = !!(rxcmp1->flags2 &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));
+ pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
+
+ t_ipcs = !!(rxcmp1->flags2 &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));
+ ip6 = !!(rxcmp1->flags2 &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));
+
+ flags_type = rxcmp->flags_type &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
+
+ if (!t_ipcs && !ip6)
+ l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (!t_ipcs && ip6)
+ l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ else if (t_ipcs && !ip6)
+ l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ else
+ l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+
+ switch (flags_type) {
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP):
+ if (!t_ipcs)
+ pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
+ else
+ pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
+ break;
+
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP):
+ if (!t_ipcs)
+ pkt_type |= l3 | RTE_PTYPE_L4_TCP;
+ else
+ pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
+ break;
+
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP):
+ if (!t_ipcs)
+ pkt_type |= l3 | RTE_PTYPE_L4_UDP;
+ else
+ pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
+ break;
+
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP):
+ pkt_type |= l3;
+ break;
+ }
+
+ return pkt_type;
+}
+
+static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
+ struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct rx_pkt_cmpl *rxcmp;
+ struct rx_pkt_cmpl_hi *rxcmp1;
+ uint32_t tmp_raw_cons = *raw_cons;
+ uint16_t cons, prod, cp_cons =
+ RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
+#ifdef BNXT_DEBUG
+ uint16_t ag_cons;
+#endif
+ struct rte_mbuf *mbuf;
+ int rc = 0;
+ uint8_t agg_buf = 0;
+ uint16_t cmp_type;
+
+ rxcmp = (struct rx_pkt_cmpl *)
+ &cpr->cp_desc_ring[cp_cons];
+
+ tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+ cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
+ rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
+
+ if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
+ return -EBUSY;
+
+ cpr->valid = FLIP_VALID(cp_cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
+
+ cmp_type = CMP_TYPE(rxcmp);
+ if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) {
+ bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
+ (struct rx_tpa_start_cmpl_hi *)rxcmp1);
+ rc = -EINVAL; /* Continue w/o new mbuf */
+ goto next_rx;
+ } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
+ mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
+ (struct rx_tpa_end_cmpl *)rxcmp,
+ (struct rx_tpa_end_cmpl_hi *)rxcmp1);
+ if (unlikely(!mbuf))
+ return -EBUSY;
+ *rx_pkt = mbuf;
+ goto next_rx;
+ } else if (cmp_type != 0x11) {
+ rc = -EINVAL;
+ goto next_rx;
+ }
+
+ agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
+ >> RX_PKT_CMPL_AGG_BUFS_SFT;
+ if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
+ return -EBUSY;
+
+ prod = rxr->rx_prod;
+
+ cons = rxcmp->opaque;
+ mbuf = bnxt_consume_rx_buf(rxr, cons);
+ if (mbuf == NULL)
+ return -EBUSY;
+
+ rte_prefetch0(mbuf);
+
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->next = NULL;
+ mbuf->pkt_len = rxcmp->len;
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = rxq->port_id;
+ mbuf->ol_flags = 0;
+ if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
+ mbuf->hash.rss = rxcmp->rss_hash;
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ } else {
+ mbuf->hash.fdir.id = rxcmp1->cfa_code;
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ }
+
+ if ((rxcmp->flags_type & rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_MASK)) ==
+ RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)
+ mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
+
+ if (agg_buf)
+ bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
+
+ if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
+ mbuf->vlan_tci = rxcmp1->metadata &
+ (RX_PKT_CMPL_METADATA_VID_MASK |
+ RX_PKT_CMPL_METADATA_DE |
+ RX_PKT_CMPL_METADATA_PRI_MASK);
+ mbuf->ol_flags |= PKT_RX_VLAN;
+ }
+
+ if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else if (likely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ else
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else if (likely(RX_CMP_L4_CS_UNKNOWN(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ else
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+
+ mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
+
+#ifdef BNXT_DEBUG
+ if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
+ /* Re-install the mbuf back to the rx ring */
+ bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
+ if (agg_buf)
+ bnxt_reuse_ag_mbuf(rxr, ag_cons, mbuf);
+
+ rc = -EIO;
+ goto next_rx;
+ }
+#endif
+ /*
+ * TODO: Redesign this....
+ * If the allocation fails, the packet does not get received.
+ * Simply returning this will result in slowly falling behind
+ * on the producer ring buffers.
+ * Instead, "filling up" the producer just before ringing the
+ * doorbell could be a better solution since it will let the
+ * producer ring starve until memory is available again pushing
+ * the drops into hardware and getting them out of the driver
+ * allowing recovery to a full producer ring.
+ *
+ * This could also help with cache usage by preventing per-packet
+ * calls in favour of a tight loop with the same function being called
+ * in it.
+ */
+ prod = RING_NEXT(rxr->rx_ring_struct, prod);
+ if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
+ PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
+ rc = -ENOMEM;
+ goto rx;
+ }
+ rxr->rx_prod = prod;
+ /*
+ * All MBUFs are allocated with the same size under DPDK,
+ * no optimization for rx_copy_thresh
+ */
+rx:
+ *rx_pkt = mbuf;
+
+next_rx:
+
+ *raw_cons = tmp_raw_cons;
+
+ return rc;
+}
+
+uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct bnxt_rx_queue *rxq = rx_queue;
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint32_t raw_cons = cpr->cp_raw_cons;
+ uint32_t cons;
+ int nb_rx_pkts = 0;
+ struct rx_pkt_cmpl *rxcmp;
+ uint16_t prod = rxr->rx_prod;
+ uint16_t ag_prod = rxr->ag_prod;
+ int rc = 0;
+ bool evt = false;
+
+ /* If Rx Q was stopped return. RxQ0 cannot be stopped. */
+ if (unlikely(((rxq->rx_deferred_start ||
+ !rte_spinlock_trylock(&rxq->lock)) &&
+ rxq->queue_id)))
+ return 0;
+
+ /* Handle RX burst request */
+ while (1) {
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ rte_prefetch0(&cpr->cp_desc_ring[cons]);
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ break;
+ cpr->valid = FLIP_VALID(cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
+
+ /* TODO: Avoid magic numbers... */
+ if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
+ rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
+ if (likely(!rc) || rc == -ENOMEM)
+ nb_rx_pkts++;
+ if (rc == -EBUSY) /* partial completion */
+ break;
+ } else {
+ evt =
+ bnxt_event_hwrm_resp_handler(rxq->bp,
+ (struct cmpl_base *)rxcmp);
+ }
+
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ if (nb_rx_pkts == nb_pkts || evt)
+ break;
+ /* Post some Rx buf early in case of larger burst processing */
+ if (nb_rx_pkts == BNXT_RX_POST_THRESH)
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ }
+
+ cpr->cp_raw_cons = raw_cons;
+ if (!nb_rx_pkts && !evt) {
+ /*
+ * For PMD, there is no need to keep on pushing to REARM
+ * the doorbell if there are no new completions
+ */
+ goto done;
+ }
+
+ if (prod != rxr->rx_prod)
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+
+ /* Ring the AGG ring DB */
+ if (ag_prod != rxr->ag_prod)
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+
+ /* Attempt to alloc Rx buf in case of a previous allocation failure. */
+ if (rc == -ENOMEM) {
+ int i;
+
+ for (i = prod; i <= nb_rx_pkts;
+ i = RING_NEXT(rxr->rx_ring_struct, i)) {
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+
+ /* Buffer already allocated for this index. */
+ if (rx_buf->mbuf != NULL)
+ continue;
+
+ /* This slot is empty. Alloc buffer for Rx */
+ if (!bnxt_alloc_rx_data(rxq, rxr, i)) {
+ rxr->rx_prod = i;
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ } else {
+ PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
+ break;
+ }
+ }
+ }
+
+done:
+ rte_spinlock_unlock(&rxq->lock);
+
+ return nb_rx_pkts;
+}
+
+void bnxt_free_rx_rings(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_rx_queue *rxq;
+
+ if (!bp->rx_queues)
+ return;
+
+ for (i = 0; i < (int)bp->rx_nr_rings; i++) {
+ rxq = bp->rx_queues[i];
+ if (!rxq)
+ continue;
+
+ bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+ rte_free(rxq->rx_ring->rx_ring_struct);
+
+ /* Free the Aggregator ring */
+ bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
+ rte_free(rxq->rx_ring->ag_ring_struct);
+ rxq->rx_ring->ag_ring_struct = NULL;
+
+ rte_free(rxq->rx_ring);
+
+ bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+ rte_free(rxq->cp_ring->cp_ring_struct);
+ rte_free(rxq->cp_ring);
+
+ rte_free(rxq);
+ bp->rx_queues[i] = NULL;
+ }
+}
+
+int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
+{
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring *ring;
+
+ rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN +
+ (2 * VLAN_TAG_SIZE);
+ rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
+
+ rxr = rte_zmalloc_socket("bnxt_rx_ring",
+ sizeof(struct bnxt_rx_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxr == NULL)
+ return -ENOMEM;
+ rxq->rx_ring = rxr;
+
+ ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ rxr->rx_ring_struct = ring;
+ ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)rxr->rx_desc_ring;
+ ring->bd_dma = rxr->rx_desc_mapping;
+ ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
+ ring->vmem = (void **)&rxr->rx_buf_ring;
+
+ cpr = rte_zmalloc_socket("bnxt_rx_ring",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cpr == NULL)
+ return -ENOMEM;
+ rxq->cp_ring = cpr;
+
+ ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ cpr->cp_ring_struct = ring;
+ ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size *
+ (2 + AGG_RING_SIZE_FACTOR));
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)cpr->cp_desc_ring;
+ ring->bd_dma = cpr->cp_desc_mapping;
+ ring->vmem_size = 0;
+ ring->vmem = NULL;
+
+ /* Allocate Aggregator rings */
+ ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ rxr->ag_ring_struct = ring;
+ ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
+ AGG_RING_SIZE_FACTOR);
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)rxr->ag_desc_ring;
+ ring->bd_dma = rxr->ag_desc_mapping;
+ ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
+ ring->vmem = (void **)&rxr->ag_buf_ring;
+
+ return 0;
+}
+
+static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
+ uint16_t len)
+{
+ uint32_t j;
+ struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
+
+ if (!rx_bd_ring)
+ return;
+ for (j = 0; j < ring->ring_size; j++) {
+ rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
+ rx_bd_ring[j].len = rte_cpu_to_le_16(len);
+ rx_bd_ring[j].opaque = j;
+ }
+}
+
+int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring *ring;
+ uint32_t prod, type;
+ unsigned int i;
+ uint16_t size;
+
+ size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (rxq->rx_buf_use_size <= size)
+ size = rxq->rx_buf_use_size;
+
+ type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
+
+ rxr = rxq->rx_ring;
+ ring = rxr->rx_ring_struct;
+ bnxt_init_rxbds(ring, type, size);
+
+ prod = rxr->rx_prod;
+ for (i = 0; i < ring->ring_size; i++) {
+ if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
+ PMD_DRV_LOG(WARNING,
+ "init'ed rx ring %d with %d/%d mbufs only\n",
+ rxq->queue_id, i, ring->ring_size);
+ break;
+ }
+ rxr->rx_prod = prod;
+ prod = RING_NEXT(rxr->rx_ring_struct, prod);
+ }
+
+ ring = rxr->ag_ring_struct;
+ type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
+ bnxt_init_rxbds(ring, type, size);
+ prod = rxr->ag_prod;
+
+ for (i = 0; i < ring->ring_size; i++) {
+ if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
+ PMD_DRV_LOG(WARNING,
+ "init'ed AG ring %d with %d/%d mbufs only\n",
+ rxq->queue_id, i, ring->ring_size);
+ break;
+ }
+ rxr->ag_prod = prod;
+ prod = RING_NEXT(rxr->ag_ring_struct, prod);
+ }
+ PMD_DRV_LOG(DEBUG, "AGG Done!\n");
+
+ if (rxr->tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ rxr->tpa_info[i].mbuf =
+ __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!rxr->tpa_info[i].mbuf) {
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ return -ENOMEM;
+ }
+ }
+ }
+ PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.h
new file mode 100644
index 00000000..3815a219
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_RXR_H_
+#define _BNXT_RXR_H_
+
+#define B_RX_DB(db, prod) \
+ (*(uint32_t *)db = (DB_KEY_RX | prod))
+
+#define BNXT_TPA_L4_SIZE(x) \
+ { \
+ typeof(x) hdr_info = (x); \
+ (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) \
+ }
+
+#define BNXT_TPA_INNER_L3_OFF(hdr_info) \
+ (((hdr_info) >> 18) & 0x1ff)
+
+#define BNXT_TPA_INNER_L2_OFF(hdr_info) \
+ (((hdr_info) >> 9) & 0x1ff)
+
+#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
+ ((hdr_info) & 0x1ff)
+
+#define RX_CMP_L4_CS_BITS \
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
+ RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
+
+#define RX_CMP_L4_CS_ERR_BITS \
+ rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR | \
+ RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR)
+
+#define RX_CMP_L4_CS_OK(rxcmp1) \
+ (((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \
+ !((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \
+ !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS)
+
+#define RX_CMP_IP_CS_ERR_BITS \
+ rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR | \
+ RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR)
+
+#define RX_CMP_IP_CS_BITS \
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
+ RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
+
+#define RX_CMP_IP_CS_OK(rxcmp1) \
+ (((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \
+ !((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS))
+
+#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \
+ !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS)
+
+#define BNXT_RX_POST_THRESH 32
+
+enum pkt_hash_types {
+ PKT_HASH_TYPE_NONE, /* Undefined type */
+ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
+ PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
+ PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
+};
+
+struct bnxt_tpa_info {
+ struct rte_mbuf *mbuf;
+ uint16_t len;
+ unsigned short gso_type;
+ uint32_t flags2;
+ uint32_t metadata;
+ enum pkt_hash_types hash_type;
+ uint32_t rss_hash;
+ uint32_t hdr_info;
+};
+
+struct bnxt_sw_rx_bd {
+ struct rte_mbuf *mbuf; /* data associated with RX descriptor */
+};
+
+struct bnxt_rx_ring_info {
+ uint16_t rx_prod;
+ uint16_t ag_prod;
+ void *rx_doorbell;
+ void *ag_doorbell;
+
+ struct rx_prod_pkt_bd *rx_desc_ring;
+ struct rx_prod_pkt_bd *ag_desc_ring;
+ struct bnxt_sw_rx_bd *rx_buf_ring; /* sw ring */
+ struct bnxt_sw_rx_bd *ag_buf_ring; /* sw ring */
+
+ rte_iova_t rx_desc_mapping;
+ rte_iova_t ag_desc_mapping;
+
+ struct bnxt_ring *rx_ring_struct;
+ struct bnxt_ring *ag_ring_struct;
+
+ /*
+ * To deal with out of order return from TPA, use free buffer indicator
+ */
+ struct rte_bitmap *ag_bitmap;
+
+ struct bnxt_tpa_info *tpa_info;
+};
+
+uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+void bnxt_free_rx_rings(struct bnxt *bp);
+int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
+int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.c
new file mode 100644
index 00000000..a5d3c866
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.c
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_rxq.h"
+#include "bnxt_stats.h"
+#include "bnxt_txq.h"
+#include "hsi_struct_def_dpdk.h"
+
+static const struct bnxt_xstats_name_off bnxt_rx_stats_strings[] = {
+ {"rx_64b_frames", offsetof(struct rx_port_stats,
+ rx_64b_frames)},
+ {"rx_65b_127b_frames", offsetof(struct rx_port_stats,
+ rx_65b_127b_frames)},
+ {"rx_128b_255b_frames", offsetof(struct rx_port_stats,
+ rx_128b_255b_frames)},
+ {"rx_256b_511b_frames", offsetof(struct rx_port_stats,
+ rx_256b_511b_frames)},
+ {"rx_512b_1023b_frames", offsetof(struct rx_port_stats,
+ rx_512b_1023b_frames)},
+ {"rx_1024b_1518_frames", offsetof(struct rx_port_stats,
+ rx_1024b_1518_frames)},
+ {"rx_good_vlan_frames", offsetof(struct rx_port_stats,
+ rx_good_vlan_frames)},
+ {"rx_1519b_2047b_frames", offsetof(struct rx_port_stats,
+ rx_1519b_2047b_frames)},
+ {"rx_2048b_4095b_frames", offsetof(struct rx_port_stats,
+ rx_2048b_4095b_frames)},
+ {"rx_4096b_9216b_frames", offsetof(struct rx_port_stats,
+ rx_4096b_9216b_frames)},
+ {"rx_9217b_16383b_frames", offsetof(struct rx_port_stats,
+ rx_9217b_16383b_frames)},
+ {"rx_total_frames", offsetof(struct rx_port_stats,
+ rx_total_frames)},
+ {"rx_ucast_frames", offsetof(struct rx_port_stats,
+ rx_ucast_frames)},
+ {"rx_mcast_frames", offsetof(struct rx_port_stats,
+ rx_mcast_frames)},
+ {"rx_bcast_frames", offsetof(struct rx_port_stats,
+ rx_bcast_frames)},
+ {"rx_fcs_err_frames", offsetof(struct rx_port_stats,
+ rx_fcs_err_frames)},
+ {"rx_ctrl_frames", offsetof(struct rx_port_stats,
+ rx_ctrl_frames)},
+ {"rx_pause_frames", offsetof(struct rx_port_stats,
+ rx_pause_frames)},
+ {"rx_pfc_frames", offsetof(struct rx_port_stats,
+ rx_pfc_frames)},
+ {"rx_align_err_frames", offsetof(struct rx_port_stats,
+ rx_align_err_frames)},
+ {"rx_ovrsz_frames", offsetof(struct rx_port_stats,
+ rx_ovrsz_frames)},
+ {"rx_jbr_frames", offsetof(struct rx_port_stats,
+ rx_jbr_frames)},
+ {"rx_mtu_err_frames", offsetof(struct rx_port_stats,
+ rx_mtu_err_frames)},
+ {"rx_tagged_frames", offsetof(struct rx_port_stats,
+ rx_tagged_frames)},
+ {"rx_double_tagged_frames", offsetof(struct rx_port_stats,
+ rx_double_tagged_frames)},
+ {"rx_good_frames", offsetof(struct rx_port_stats,
+ rx_good_frames)},
+ {"rx_undrsz_frames", offsetof(struct rx_port_stats,
+ rx_undrsz_frames)},
+ {"rx_eee_lpi_events", offsetof(struct rx_port_stats,
+ rx_eee_lpi_events)},
+ {"rx_eee_lpi_duration", offsetof(struct rx_port_stats,
+ rx_eee_lpi_duration)},
+ {"rx_bytes", offsetof(struct rx_port_stats,
+ rx_bytes)},
+ {"rx_runt_bytes", offsetof(struct rx_port_stats,
+ rx_runt_bytes)},
+ {"rx_runt_frames", offsetof(struct rx_port_stats,
+ rx_runt_frames)},
+};
+
+static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = {
+ {"tx_64b_frames", offsetof(struct tx_port_stats,
+ tx_64b_frames)},
+ {"tx_65b_127b_frames", offsetof(struct tx_port_stats,
+ tx_65b_127b_frames)},
+ {"tx_128b_255b_frames", offsetof(struct tx_port_stats,
+ tx_128b_255b_frames)},
+ {"tx_256b_511b_frames", offsetof(struct tx_port_stats,
+ tx_256b_511b_frames)},
+ {"tx_512b_1023b_frames", offsetof(struct tx_port_stats,
+ tx_512b_1023b_frames)},
+ {"tx_1024b_1518_frames", offsetof(struct tx_port_stats,
+ tx_1024b_1518_frames)},
+ {"tx_good_vlan_frames", offsetof(struct tx_port_stats,
+ tx_good_vlan_frames)},
+ {"tx_1519b_2047_frames", offsetof(struct tx_port_stats,
+ tx_1519b_2047_frames)},
+ {"tx_2048b_4095b_frames", offsetof(struct tx_port_stats,
+ tx_2048b_4095b_frames)},
+ {"tx_4096b_9216b_frames", offsetof(struct tx_port_stats,
+ tx_4096b_9216b_frames)},
+ {"tx_9217b_16383b_frames", offsetof(struct tx_port_stats,
+ tx_9217b_16383b_frames)},
+ {"tx_good_frames", offsetof(struct tx_port_stats,
+ tx_good_frames)},
+ {"tx_total_frames", offsetof(struct tx_port_stats,
+ tx_total_frames)},
+ {"tx_ucast_frames", offsetof(struct tx_port_stats,
+ tx_ucast_frames)},
+ {"tx_mcast_frames", offsetof(struct tx_port_stats,
+ tx_mcast_frames)},
+ {"tx_bcast_frames", offsetof(struct tx_port_stats,
+ tx_bcast_frames)},
+ {"tx_pause_frames", offsetof(struct tx_port_stats,
+ tx_pause_frames)},
+ {"tx_pfc_frames", offsetof(struct tx_port_stats,
+ tx_pfc_frames)},
+ {"tx_jabber_frames", offsetof(struct tx_port_stats,
+ tx_jabber_frames)},
+ {"tx_fcs_err_frames", offsetof(struct tx_port_stats,
+ tx_fcs_err_frames)},
+ {"tx_err", offsetof(struct tx_port_stats,
+ tx_err)},
+ {"tx_fifo_underruns", offsetof(struct tx_port_stats,
+ tx_fifo_underruns)},
+ {"tx_eee_lpi_events", offsetof(struct tx_port_stats,
+ tx_eee_lpi_events)},
+ {"tx_eee_lpi_duration", offsetof(struct tx_port_stats,
+ tx_eee_lpi_duration)},
+ {"tx_total_collisions", offsetof(struct tx_port_stats,
+ tx_total_collisions)},
+ {"tx_bytes", offsetof(struct tx_port_stats,
+ tx_bytes)},
+};
+
+static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
+ {"tx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_ucast_pkts)},
+ {"tx_mcast_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_mcast_pkts)},
+ {"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_bcast_pkts)},
+ {"tx_discard_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_discard_pkts)},
+ {"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_drop_pkts)},
+ {"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
+ tx_ucast_bytes)},
+ {"tx_mcast_bytes", offsetof(struct hwrm_func_qstats_output,
+ tx_mcast_bytes)},
+ {"tx_bcast_bytes", offsetof(struct hwrm_func_qstats_output,
+ tx_bcast_bytes)},
+ {"rx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_ucast_pkts)},
+ {"rx_mcast_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_mcast_pkts)},
+ {"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_bcast_pkts)},
+ {"rx_discard_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_discard_pkts)},
+ {"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_drop_pkts)},
+ {"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
+ rx_ucast_bytes)},
+ {"rx_mcast_bytes", offsetof(struct hwrm_func_qstats_output,
+ rx_mcast_bytes)},
+ {"rx_bcast_bytes", offsetof(struct hwrm_func_qstats_output,
+ rx_bcast_bytes)},
+ {"rx_agg_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_agg_pkts)},
+ {"rx_agg_bytes", offsetof(struct hwrm_func_qstats_output,
+ rx_agg_bytes)},
+ {"rx_agg_events", offsetof(struct hwrm_func_qstats_output,
+ rx_agg_events)},
+ {"rx_agg_aborts", offsetof(struct hwrm_func_qstats_output,
+ rx_agg_aborts)},
+};
+
+/*
+ * Statistics functions
+ */
+
+void bnxt_free_stats(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < (int)bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+
+ bnxt_free_txq_stats(txq);
+ }
+ for (i = 0; i < (int)bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+
+ bnxt_free_rxq_stats(rxq);
+ }
+}
+
+int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *bnxt_stats)
+{
+ int rc = 0;
+ unsigned int i;
+ struct bnxt *bp = eth_dev->data->dev_private;
+
+ memset(bnxt_stats, 0, sizeof(*bnxt_stats));
+ if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
+ PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
+ return -1;
+ }
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+
+ rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i,
+ bnxt_stats, 1);
+ if (unlikely(rc))
+ return rc;
+ bnxt_stats->rx_nombuf +=
+ rte_atomic64_read(&rxq->rx_mbuf_alloc_fail);
+ }
+
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+
+ rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i,
+ bnxt_stats, 0);
+ if (unlikely(rc))
+ return rc;
+ }
+ rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats);
+ if (unlikely(rc))
+ return rc;
+ return rc;
+}
+
+void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ unsigned int i;
+
+ if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
+ PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
+ return;
+ }
+
+ bnxt_clear_all_hwrm_stat_ctxs(bp);
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+
+ rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail);
+ }
+}
+
+int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat *xstats, unsigned int n)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ unsigned int count, i;
+ uint64_t tx_drop_pkts;
+
+ bnxt_hwrm_port_qstats(bp);
+ bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts);
+
+ count = RTE_DIM(bnxt_rx_stats_strings) +
+ RTE_DIM(bnxt_tx_stats_strings) + 1; /* For tx_drop_pkts */
+
+ if (n < count)
+ return count;
+
+ count = 0;
+ for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
+ uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
+ xstats[count].id = count;
+ xstats[count].value = rte_le_to_cpu_64(
+ *(uint64_t *)((char *)rx_stats +
+ bnxt_rx_stats_strings[i].offset));
+ count++;
+ }
+
+ for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
+ uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
+ xstats[count].id = count;
+ xstats[count].value = rte_le_to_cpu_64(
+ *(uint64_t *)((char *)tx_stats +
+ bnxt_tx_stats_strings[i].offset));
+ count++;
+ }
+
+ /* The Tx drop pkts aka the Anti spoof coounter */
+ xstats[count].id = count;
+ xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts);
+ count++;
+
+ return count;
+}
+
+int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ /* Account for the Tx drop pkts aka the Anti spoof counter */
+ const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
+ RTE_DIM(bnxt_tx_stats_strings) + 1;
+ unsigned int i, count;
+
+ if (xstats_names != NULL) {
+ count = 0;
+
+ for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ bnxt_rx_stats_strings[i].name);
+ count++;
+ }
+
+ for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ bnxt_tx_stats_strings[i].name);
+ count++;
+ }
+
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ bnxt_func_stats_strings[4].name);
+ count++;
+ }
+ return stat_cnt;
+}
+
+void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (bp->flags & BNXT_FLAG_PORT_STATS && BNXT_SINGLE_PF(bp))
+ bnxt_hwrm_port_clr_stats(bp);
+
+ if (BNXT_VF(bp))
+ PMD_DRV_LOG(ERR, "Operation not supported on a VF device\n");
+ if (!BNXT_SINGLE_PF(bp))
+ PMD_DRV_LOG(ERR, "Operation not supported on a MF device\n");
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+ PMD_DRV_LOG(ERR, "Operation not supported\n");
+}
+
+int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int limit)
+{
+ /* Account for the Tx drop pkts aka the Anti spoof counter */
+ const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
+ RTE_DIM(bnxt_tx_stats_strings) + 1;
+ struct rte_eth_xstat xstats[stat_cnt];
+ uint64_t values_copy[stat_cnt];
+ uint16_t i;
+
+ if (!ids)
+ return bnxt_dev_xstats_get_op(dev, xstats, stat_cnt);
+
+ bnxt_dev_xstats_get_by_id_op(dev, NULL, values_copy, stat_cnt);
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= stat_cnt) {
+ PMD_DRV_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return stat_cnt;
+}
+
+int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids, unsigned int limit)
+{
+ /* Account for the Tx drop pkts aka the Anti spoof counter */
+ const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
+ RTE_DIM(bnxt_tx_stats_strings) + 1;
+ struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
+ uint16_t i;
+
+ if (!ids)
+ return bnxt_dev_xstats_get_names_op(dev, xstats_names,
+ stat_cnt);
+ bnxt_dev_xstats_get_names_by_id_op(dev, xstats_names_copy, NULL,
+ stat_cnt);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= stat_cnt) {
+ PMD_DRV_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name,
+ xstats_names_copy[ids[i]].name);
+ }
+ return stat_cnt;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.h
new file mode 100644
index 00000000..b0f135a5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_STATS_H_
+#define _BNXT_STATS_H_
+
+#include <rte_ethdev_driver.h>
+
+void bnxt_free_stats(struct bnxt *bp);
+int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *bnxt_stats);
+void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev);
+int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit);
+int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat *xstats, unsigned int n);
+void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev);
+int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int limit);
+int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids, unsigned int limit);
+
+struct bnxt_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.c
new file mode 100644
index 00000000..b9b975e4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.c
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_ring.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+
+/*
+ * TX Queues
+ */
+
+void bnxt_free_txq_stats(struct bnxt_tx_queue *txq)
+{
+ if (txq && txq->cp_ring && txq->cp_ring->hw_stats)
+ txq->cp_ring->hw_stats = NULL;
+}
+
+static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq)
+{
+ struct bnxt_sw_tx_bd *sw_ring;
+ uint16_t i;
+
+ if (!txq)
+ return;
+
+ sw_ring = txq->tx_ring->tx_buf_ring;
+ if (sw_ring) {
+ for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) {
+ if (sw_ring[i].mbuf) {
+ rte_pktmbuf_free(sw_ring[i].mbuf);
+ sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+void bnxt_free_tx_mbufs(struct bnxt *bp)
+{
+ struct bnxt_tx_queue *txq;
+ int i;
+
+ for (i = 0; i < (int)bp->tx_nr_rings; i++) {
+ txq = bp->tx_queues[i];
+ bnxt_tx_queue_release_mbufs(txq);
+ }
+}
+
+void bnxt_tx_queue_release_op(void *tx_queue)
+{
+ struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
+
+ if (txq) {
+ /* Free TX ring hardware descriptors */
+ bnxt_tx_queue_release_mbufs(txq);
+ bnxt_free_ring(txq->tx_ring->tx_ring_struct);
+
+ /* Free TX completion ring hardware descriptors */
+ bnxt_free_ring(txq->cp_ring->cp_ring_struct);
+
+ bnxt_free_txq_stats(txq);
+ rte_memzone_free(txq->mz);
+ txq->mz = NULL;
+
+ rte_free(txq);
+ }
+}
+
+int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_tx_queue *txq;
+ int rc = 0;
+
+ if (queue_idx >= bp->max_tx_rings) {
+ PMD_DRV_LOG(ERR,
+ "Cannot create Tx ring %d. Only %d rings available\n",
+ queue_idx, bp->max_tx_rings);
+ return -EINVAL;
+ }
+
+ if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (eth_dev->data->tx_queues) {
+ txq = eth_dev->data->tx_queues[queue_idx];
+ if (txq) {
+ bnxt_tx_queue_release_op(txq);
+ txq = NULL;
+ }
+ }
+ txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
+ rc = -ENOMEM;
+ goto out;
+ }
+ txq->bp = bp;
+ txq->nb_tx_desc = nb_desc;
+ txq->tx_free_thresh = tx_conf->tx_free_thresh;
+
+ rc = bnxt_init_tx_ring_struct(txq, socket_id);
+ if (rc)
+ goto out;
+
+ txq->queue_id = queue_idx;
+ txq->port_id = eth_dev->data->port_id;
+
+ /* Allocate TX ring hardware descriptors */
+ if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring,
+ "txr")) {
+ PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
+ bnxt_tx_queue_release_op(txq);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (bnxt_init_one_tx_ring(txq)) {
+ PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
+ bnxt_tx_queue_release_op(txq);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ eth_dev->data->tx_queues[queue_idx] = txq;
+
+out:
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.h
new file mode 100644
index 00000000..f2c712a7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_TXQ_H_
+#define _BNXT_TXQ_H_
+
+struct bnxt_tx_ring_info;
+struct bnxt_cp_ring_info;
+struct bnxt_tx_queue {
+ uint16_t nb_tx_desc; /* number of TX descriptors */
+ uint16_t tx_free_thresh;/* minimum TX before freeing */
+ /** Index to last TX descriptor to have been cleaned. */
+ uint16_t last_desc_cleaned;
+ /** Total number of TX descriptors ready to be allocated. */
+ uint16_t tx_next_dd; /* next desc to scan for DD bit */
+ uint16_t tx_next_rs; /* next desc to set RS bit */
+ uint16_t queue_id; /* TX queue index */
+ uint16_t reg_idx; /* TX queue register index */
+ uint16_t port_id; /* Device port identifier */
+ uint8_t pthresh; /* Prefetch threshold register */
+ uint8_t hthresh; /* Host threshold register */
+ uint8_t wthresh; /* Write-back threshold reg */
+ uint32_t ctx_curr; /* Hardware context states */
+ uint8_t tx_deferred_start; /* not in global dev start */
+ uint8_t cmpl_next; /* Next BD to trigger a compl */
+
+ struct bnxt *bp;
+ int index;
+ int tx_wake_thresh;
+ struct bnxt_tx_ring_info *tx_ring;
+
+ unsigned int cp_nr_rings;
+ struct bnxt_cp_ring_info *cp_ring;
+ const struct rte_memzone *mz;
+};
+
+void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);
+void bnxt_free_tx_mbufs(struct bnxt *bp);
+void bnxt_tx_queue_release_op(void *tx_queue);
+int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.c
new file mode 100644
index 00000000..67bb35e0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.c
@@ -0,0 +1,442 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_ring.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+#include "hsi_struct_def_dpdk.h"
+#include <stdbool.h>
+
+/*
+ * TX Ring handling
+ */
+
+void bnxt_free_tx_rings(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < (int)bp->tx_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+
+ if (!txq)
+ continue;
+
+ bnxt_free_ring(txq->tx_ring->tx_ring_struct);
+ rte_free(txq->tx_ring->tx_ring_struct);
+ rte_free(txq->tx_ring);
+
+ bnxt_free_ring(txq->cp_ring->cp_ring_struct);
+ rte_free(txq->cp_ring->cp_ring_struct);
+ rte_free(txq->cp_ring);
+
+ rte_free(txq);
+ bp->tx_queues[i] = NULL;
+ }
+}
+
+int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+
+ txq->tx_wake_thresh = ring->ring_size / 2;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ return 0;
+}
+
+int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
+{
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring *ring;
+
+ txr = rte_zmalloc_socket("bnxt_tx_ring",
+ sizeof(struct bnxt_tx_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txr == NULL)
+ return -ENOMEM;
+ txq->tx_ring = txr;
+
+ ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ txr->tx_ring_struct = ring;
+ ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)txr->tx_desc_ring;
+ ring->bd_dma = txr->tx_desc_mapping;
+ ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
+ ring->vmem = (void **)&txr->tx_buf_ring;
+
+ cpr = rte_zmalloc_socket("bnxt_tx_ring",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cpr == NULL)
+ return -ENOMEM;
+ txq->cp_ring = cpr;
+
+ ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ cpr->cp_ring_struct = ring;
+ ring->ring_size = txr->tx_ring_struct->ring_size;
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)cpr->cp_desc_ring;
+ ring->bd_dma = cpr->cp_desc_mapping;
+ ring->vmem_size = 0;
+ ring->vmem = NULL;
+
+ return 0;
+}
+
+static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr)
+{
+ /* Tell compiler to fetch tx indices from memory. */
+ rte_compiler_barrier();
+
+ return txr->tx_ring_struct->ring_size -
+ ((txr->tx_prod - txr->tx_cons) &
+ txr->tx_ring_struct->ring_mask) - 1;
+}
+
+static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
+ struct bnxt_tx_queue *txq,
+ uint16_t *coal_pkts,
+ uint16_t *cmpl_next)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct tx_bd_long *txbd;
+ struct tx_bd_long_hi *txbd1;
+ uint32_t vlan_tag_flags, cfa_action;
+ bool long_bd = false;
+ uint16_t last_prod = 0;
+ struct rte_mbuf *m_seg;
+ struct bnxt_sw_tx_bd *tx_buf;
+ static const uint32_t lhint_arr[4] = {
+ TX_BD_LONG_FLAGS_LHINT_LT512,
+ TX_BD_LONG_FLAGS_LHINT_LT1K,
+ TX_BD_LONG_FLAGS_LHINT_LT2K,
+ TX_BD_LONG_FLAGS_LHINT_LT2K
+ };
+
+ if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
+ PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
+ PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
+ PKT_TX_TUNNEL_GENEVE))
+ long_bd = true;
+
+ tx_buf = &txr->tx_buf_ring[txr->tx_prod];
+ tx_buf->mbuf = tx_pkt;
+ tx_buf->nr_bds = long_bd + tx_pkt->nb_segs;
+ last_prod = (txr->tx_prod + tx_buf->nr_bds - 1) &
+ txr->tx_ring_struct->ring_mask;
+
+ if (unlikely(bnxt_tx_avail(txr) < tx_buf->nr_bds))
+ return -ENOMEM;
+
+ txbd = &txr->tx_desc_ring[txr->tx_prod];
+ txbd->opaque = *coal_pkts;
+ txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
+ txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
+ if (!*cmpl_next) {
+ txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
+ } else {
+ *coal_pkts = 0;
+ *cmpl_next = false;
+ }
+ txbd->len = tx_pkt->data_len;
+ if (tx_pkt->pkt_len >= 2014)
+ txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
+ else
+ txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9];
+ txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf));
+
+ if (long_bd) {
+ txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
+ vlan_tag_flags = 0;
+ cfa_action = 0;
+ if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ /* shurd: Should this mask at
+ * TX_BD_LONG_CFA_META_VLAN_VID_MASK?
+ */
+ vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
+ tx_buf->mbuf->vlan_tci;
+ /* Currently supports 8021Q, 8021AD vlan offloads
+ * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+ */
+ /* DPDK only supports 802.11q VLAN packets */
+ vlan_tag_flags |=
+ TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
+ }
+
+ txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
+
+ txbd1 = (struct tx_bd_long_hi *)
+ &txr->tx_desc_ring[txr->tx_prod];
+ txbd1->lflags = 0;
+ txbd1->cfa_meta = vlan_tag_flags;
+ txbd1->cfa_action = cfa_action;
+
+ if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
+ /* TSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO;
+ txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
+ tx_pkt->l4_len + tx_pkt->outer_l2_len +
+ tx_pkt->outer_l3_len;
+ txbd1->mss = tx_pkt->tso_segsz;
+
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
+ PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) ==
+ PKT_TX_OIP_IIP_TCP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) ==
+ PKT_TX_OIP_IIP_UDP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
+ PKT_TX_IIP_TCP_UDP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) ==
+ PKT_TX_IIP_UDP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) ==
+ PKT_TX_IIP_TCP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
+ PKT_TX_OIP_TCP_UDP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) ==
+ PKT_TX_OIP_UDP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) ==
+ PKT_TX_OIP_TCP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
+ PKT_TX_OIP_IIP_CKSUM) {
+ /* Outer IP, Inner IP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) ==
+ PKT_TX_TCP_UDP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
+ PKT_TX_TCP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
+ PKT_TX_UDP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
+ PKT_TX_IP_CKSUM) {
+ /* IP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
+ PKT_TX_OUTER_IP_CKSUM) {
+ /* IP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
+ txbd1->mss = 0;
+ }
+ } else {
+ txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
+ }
+
+ m_seg = tx_pkt->next;
+ /* i is set at the end of the if(long_bd) block */
+ while (txr->tx_prod != last_prod) {
+ txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
+ tx_buf = &txr->tx_buf_ring[txr->tx_prod];
+
+ txbd = &txr->tx_desc_ring[txr->tx_prod];
+ txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
+ txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
+ txbd->len = m_seg->data_len;
+
+ m_seg = m_seg->next;
+ }
+
+ txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
+ txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags);
+
+ txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
+
+ return 0;
+}
+
+static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ uint16_t cons = txr->tx_cons;
+ int i, j;
+
+ for (i = 0; i < nr_pkts; i++) {
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct rte_mbuf *mbuf;
+
+ tx_buf = &txr->tx_buf_ring[cons];
+ cons = RING_NEXT(txr->tx_ring_struct, cons);
+ mbuf = tx_buf->mbuf;
+ tx_buf->mbuf = NULL;
+
+ /* EW - no need to unmap DMA memory? */
+
+ for (j = 1; j < tx_buf->nr_bds; j++)
+ cons = RING_NEXT(txr->tx_ring_struct, cons);
+ rte_pktmbuf_free(mbuf);
+ }
+
+ txr->tx_cons = cons;
+}
+
+static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
+{
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ uint32_t raw_cons = cpr->cp_raw_cons;
+ uint32_t cons;
+ uint32_t nb_tx_pkts = 0;
+ struct tx_cmpl *txcmp;
+ struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
+ struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
+ uint32_t ring_mask = cp_ring_struct->ring_mask;
+ uint32_t opaque = 0;
+
+ if (((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) &
+ txq->tx_ring->tx_ring_struct->ring_mask) < txq->tx_free_thresh)
+ return 0;
+
+ do {
+ cons = RING_CMPL(ring_mask, raw_cons);
+ txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
+ rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) &
+ ring_mask]);
+
+ if (!CMPL_VALID(txcmp, cpr->valid))
+ break;
+ opaque = rte_cpu_to_le_32(txcmp->opaque);
+ NEXT_CMPL(cpr, cons, cpr->valid, 1);
+ rte_prefetch0(&cp_desc_ring[cons]);
+
+ if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
+ nb_tx_pkts += opaque;
+ else
+ RTE_LOG_DP(ERR, PMD,
+ "Unhandled CMP type %02x\n",
+ CMP_TYPE(txcmp));
+ raw_cons = cons;
+ } while (nb_tx_pkts < ring_mask);
+
+ if (nb_tx_pkts) {
+ bnxt_tx_cmp(txq, nb_tx_pkts);
+ cpr->cp_raw_cons = raw_cons;
+ B_CP_DB(cpr, cpr->cp_raw_cons, ring_mask);
+ }
+
+ return nb_tx_pkts;
+}
+
+uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct bnxt_tx_queue *txq = tx_queue;
+ uint16_t nb_tx_pkts = 0;
+ uint16_t coal_pkts = 0;
+ uint16_t cmpl_next = txq->cmpl_next;
+
+ /* Handle TX completions */
+ bnxt_handle_tx_cp(txq);
+
+ /* Tx queue was stopped; wait for it to be restarted */
+ if (txq->tx_deferred_start) {
+ PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
+ return 0;
+ }
+
+ txq->cmpl_next = 0;
+ /* Handle TX burst request */
+ for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
+ int rc;
+
+ /* Request a completion on first and last packet */
+ cmpl_next |= (nb_pkts == nb_tx_pkts + 1);
+ coal_pkts++;
+ rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
+ &coal_pkts, &cmpl_next);
+
+ if (unlikely(rc)) {
+ /* Request a completion in next cycle */
+ txq->cmpl_next = 1;
+ break;
+ }
+ }
+
+ if (nb_tx_pkts)
+ B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod);
+
+ return nb_tx_pkts;
+}
+
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ txq->tx_deferred_start = false;
+ PMD_DRV_LOG(DEBUG, "Tx queue started\n");
+
+ return 0;
+}
+
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ /* Handle TX completions */
+ bnxt_handle_tx_cp(txq);
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ txq->tx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.h
new file mode 100644
index 00000000..7f3c7cdb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_TXR_H_
+#define _BNXT_TXR_H_
+
+#include <rte_io.h>
+
+#define MAX_TX_RINGS 16
+#define BNXT_TX_PUSH_THRESH 92
+
+#define B_TX_DB(db, prod) rte_write32((DB_KEY_TX | (prod)), db)
+
+struct bnxt_tx_ring_info {
+ uint16_t tx_prod;
+ uint16_t tx_cons;
+ void *tx_doorbell;
+
+ struct tx_bd_long *tx_desc_ring;
+ struct bnxt_sw_tx_bd *tx_buf_ring;
+
+ rte_iova_t tx_desc_mapping;
+
+#define BNXT_DEV_STATE_CLOSING 0x1
+ uint32_t dev_state;
+
+ struct bnxt_ring *tx_ring_struct;
+};
+
+struct bnxt_sw_tx_bd {
+ struct rte_mbuf *mbuf; /* mbuf associated with TX descriptor */
+ uint8_t is_gso;
+ unsigned short nr_bds;
+};
+
+void bnxt_free_tx_rings(struct bnxt *bp);
+int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
+int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
+uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
+ PKT_TX_IP_CKSUM)
+#define PKT_TX_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM)
+#define PKT_TX_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)
+#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)
+
+
+#define TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
+ TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \
+ TX_BD_LONG_LFLAGS_IP_CHKSUM)
+#define TX_BD_FLG_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
+ TX_BD_LONG_LFLAGS_IP_CHKSUM)
+#define TX_BD_FLG_TIP_IP_CHKSUM (TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \
+ TX_BD_LONG_LFLAGS_IP_CHKSUM)
+#define TX_BD_FLG_TIP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
+ TX_BD_LONG_LFLAGS_T_IP_CHKSUM)
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_util.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_util.c
new file mode 100644
index 00000000..7d334271
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_util.c
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+
+#include "bnxt_util.h"
+
+int bnxt_check_zero_bytes(const uint8_t *bytes, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (bytes[i] != 0x00)
+ return 0;
+ return 1;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_util.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_util.h
new file mode 100644
index 00000000..2378833c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_util.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_UTIL_H_
+#define _BNXT_UTIL_H_
+
+int bnxt_check_zero_bytes(const uint8_t *bytes, int len);
+
+#endif /* _BNXT_UTIL_H_ */
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.c
new file mode 100644
index 00000000..c0577cd7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.c
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * VNIC Functions
+ */
+
+static void prandom_bytes(void *dest_ptr, size_t len)
+{
+ char *dest = (char *)dest_ptr;
+ uint64_t rb;
+
+ while (len) {
+ rb = rte_rand();
+ if (len >= 8) {
+ memcpy(dest, &rb, 8);
+ len -= 8;
+ dest += 8;
+ } else {
+ memcpy(dest, &rb, len);
+ dest += len;
+ len = 0;
+ }
+ }
+}
+
+void bnxt_init_vnics(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+ uint16_t max_vnics;
+ int i;
+
+ max_vnics = bp->max_vnics;
+ STAILQ_INIT(&bp->free_vnic_list);
+ for (i = 0; i < max_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->hash_mode =
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
+
+ prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+ STAILQ_INIT(&vnic->filter);
+ STAILQ_INIT(&vnic->flow_list);
+ STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
+ }
+ for (i = 0; i < MAX_FF_POOLS; i++)
+ STAILQ_INIT(&bp->ff_pool[i]);
+}
+
+int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int pool)
+{
+ struct bnxt_vnic_info *temp;
+
+ temp = STAILQ_FIRST(&bp->ff_pool[pool]);
+ while (temp) {
+ if (temp == vnic) {
+ STAILQ_REMOVE(&bp->ff_pool[pool], vnic,
+ bnxt_vnic_info, next);
+ vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE;
+ STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic,
+ next);
+ return 0;
+ }
+ temp = STAILQ_NEXT(temp, next);
+ }
+ PMD_DRV_LOG(ERR, "VNIC %p is not found in pool[%d]\n", vnic, pool);
+ return -EINVAL;
+}
+
+struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+
+ /* Find the 1st unused vnic from the free_vnic_list pool*/
+ vnic = STAILQ_FIRST(&bp->free_vnic_list);
+ if (!vnic) {
+ PMD_DRV_LOG(ERR, "No more free VNIC resources\n");
+ return NULL;
+ }
+ STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next);
+ return vnic;
+}
+
+void bnxt_free_all_vnics(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *temp, *next;
+ int i;
+
+ for (i = 0; i < MAX_FF_POOLS; i++) {
+ temp = STAILQ_FIRST(&bp->ff_pool[i]);
+ while (temp) {
+ next = STAILQ_NEXT(temp, next);
+ STAILQ_REMOVE(&bp->ff_pool[i], temp, bnxt_vnic_info,
+ next);
+ STAILQ_INSERT_TAIL(&bp->free_vnic_list, temp, next);
+ temp = next;
+ }
+ }
+}
+
+void bnxt_free_vnic_attributes(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+
+ STAILQ_FOREACH(vnic, &bp->free_vnic_list, next) {
+ if (vnic->rss_table) {
+ /* 'Unreserve' the rss_table */
+ /* N/A */
+
+ vnic->rss_table = NULL;
+ }
+
+ if (vnic->rss_hash_key) {
+ /* 'Unreserve' the rss_hash_key */
+ /* N/A */
+
+ vnic->rss_hash_key = NULL;
+ }
+ }
+}
+
+int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+ struct rte_pci_device *pdev = bp->pdev;
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ uint32_t entry_length = RTE_CACHE_LINE_ROUNDUP(
+ HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) +
+ HW_HASH_KEY_SIZE +
+ BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN);
+ uint16_t max_vnics;
+ int i;
+ rte_iova_t mz_phys_addr;
+
+ max_vnics = bp->max_vnics;
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_%04x:%02x:%02x:%02x_vnicattr", pdev->addr.domain,
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name,
+ entry_length * max_vnics, SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
+ if (!mz)
+ return -ENOMEM;
+ }
+ mz_phys_addr = mz->iova;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ PMD_DRV_LOG(WARNING,
+ "Memzone physical address same as virtual.\n");
+ PMD_DRV_LOG(WARNING,
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
+ if (mz_phys_addr == 0) {
+ PMD_DRV_LOG(ERR,
+ "unable to map vnic address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < max_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ /* Allocate rss table and hash key */
+ vnic->rss_table =
+ (void *)((char *)mz->addr + (entry_length * i));
+ memset(vnic->rss_table, -1, entry_length);
+
+ vnic->rss_table_dma_addr = mz_phys_addr + (entry_length * i);
+ vnic->rss_hash_key = (void *)((char *)vnic->rss_table +
+ HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table));
+
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr +
+ HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table);
+ vnic->mc_list = (void *)((char *)vnic->rss_hash_key +
+ HW_HASH_KEY_SIZE);
+ vnic->mc_list_dma_addr = vnic->rss_hash_key_dma_addr +
+ HW_HASH_KEY_SIZE;
+ }
+
+ return 0;
+}
+
+void bnxt_free_vnic_mem(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+ uint16_t max_vnics, i;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ max_vnics = bp->max_vnics;
+ for (i = 0; i < max_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
+ PMD_DRV_LOG(ERR, "VNIC is not freed yet!\n");
+ /* TODO Call HWRM to free VNIC */
+ }
+ }
+
+ rte_free(bp->vnic_info);
+ bp->vnic_info = NULL;
+}
+
+int bnxt_alloc_vnic_mem(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic_mem;
+ uint16_t max_vnics;
+
+ max_vnics = bp->max_vnics;
+ /* Allocate memory for VNIC pool and filter pool */
+ vnic_mem = rte_zmalloc("bnxt_vnic_info",
+ max_vnics * sizeof(struct bnxt_vnic_info), 0);
+ if (vnic_mem == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for %d VNICs",
+ max_vnics);
+ return -ENOMEM;
+ }
+ bp->vnic_info = vnic_mem;
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.h
new file mode 100644
index 00000000..9029f78c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_VNIC_H_
+#define _BNXT_VNIC_H_
+
+#include <sys/queue.h>
+#include <stdbool.h>
+
+struct bnxt_vnic_info {
+ STAILQ_ENTRY(bnxt_vnic_info) next;
+ uint8_t ff_pool_idx;
+
+ uint16_t fw_vnic_id; /* returned by Chimp during alloc */
+ uint16_t rss_rule;
+ uint16_t start_grp_id;
+ uint16_t end_grp_id;
+ uint16_t *fw_grp_ids;
+ uint16_t dflt_ring_grp;
+ uint16_t mru;
+ uint16_t hash_type;
+ uint8_t hash_mode;
+ rte_iova_t rss_table_dma_addr;
+ uint16_t *rss_table;
+ rte_iova_t rss_hash_key_dma_addr;
+ void *rss_hash_key;
+ rte_iova_t mc_list_dma_addr;
+ char *mc_list;
+ uint32_t mc_addr_cnt;
+#define BNXT_MAX_MC_ADDRS 16
+ uint32_t flags;
+#define BNXT_VNIC_INFO_PROMISC (1 << 0)
+#define BNXT_VNIC_INFO_ALLMULTI (1 << 1)
+#define BNXT_VNIC_INFO_BCAST (1 << 2)
+#define BNXT_VNIC_INFO_UCAST (1 << 3)
+#define BNXT_VNIC_INFO_MCAST (1 << 4)
+#define BNXT_VNIC_INFO_TAGGED (1 << 5)
+#define BNXT_VNIC_INFO_UNTAGGED (1 << 6)
+
+ uint16_t cos_rule;
+ uint16_t lb_rule;
+ bool vlan_strip;
+ bool func_default;
+ bool bd_stall;
+ bool roce_dual;
+ bool roce_only;
+ bool rss_dflt_cr;
+
+ STAILQ_HEAD(, bnxt_filter_info) filter;
+ STAILQ_HEAD(, rte_flow) flow_list;
+};
+
+struct bnxt;
+void bnxt_init_vnics(struct bnxt *bp);
+int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int pool);
+struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp);
+void bnxt_free_all_vnics(struct bnxt *bp);
+void bnxt_free_vnic_attributes(struct bnxt *bp);
+int bnxt_alloc_vnic_attributes(struct bnxt *bp);
+void bnxt_free_vnic_mem(struct bnxt *bp);
+int bnxt_alloc_vnic_mem(struct bnxt *bp);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h b/src/spdk/dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h
new file mode 100644
index 00000000..f5c7b422
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -0,0 +1,28211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Broadcom Limited
+ * All rights reserved.
+ *
+ * DO NOT MODIFY!!! This file is automatically generated.
+ */
+
+#ifndef _HSI_STRUCT_DEF_DPDK_H_
+#define _HSI_STRUCT_DEF_DPDK_H_
+
+/* This is the HWRM command header. */
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* This is the HWRM response header. */
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+} __attribute__((packed));
+
+/*
+ * TLV encapsulated message. Use the TLV type field of the
+ * TLV to determine the type of message encapsulated.
+ */
+#define CMD_DISCR_TLV_ENCAP UINT32_C(0x8000)
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+
+/* HWRM request message */
+#define TLV_TYPE_HWRM_REQUEST UINT32_C(0x1)
+/* HWRM response message */
+#define TLV_TYPE_HWRM_RESPONSE UINT32_C(0x2)
+/* RoCE slow path command */
+#define TLV_TYPE_ROCE_SP_COMMAND UINT32_C(0x3)
+/* Engine CKV - The device's serial number. */
+#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER UINT32_C(0x8001)
+/* Engine CKV - Per-function random nonce data. */
+#define TLV_TYPE_ENGINE_CKV_NONCE UINT32_C(0x8002)
+/* Engine CKV - Initialization vector. */
+#define TLV_TYPE_ENGINE_CKV_IV UINT32_C(0x8003)
+/* Engine CKV - Authentication tag. */
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG UINT32_C(0x8004)
+/* Engine CKV - The encrypted data. */
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT UINT32_C(0x8005)
+/* Engine CKV - Supported algorithms. */
+#define TLV_TYPE_ENGINE_CKV_ALGORITHMS UINT32_C(0x8006)
+/* Engine CKV - The EC curve name and ECC public key information. */
+#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY UINT32_C(0x8007)
+/* Engine CKV - The ECDSA signature. */
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE UINT32_C(0x8008)
+#define TLV_TYPE_LAST \
+ TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ /*
+ * The command discriminator is used to differentiate between various
+ * types of HWRM messages. This includes legacy HWRM and RoCE slowpath
+ * command messages as well as newer TLV encapsulated HWRM commands.
+ *
+ * For TLV encapsulated messages this field must be 0x8000.
+ */
+ uint16_t cmd_discr;
+ uint8_t reserved_8b;
+ uint8_t flags;
+ /*
+ * Indicates the presence of additional TLV encapsulated data
+ * follows this TLV.
+ */
+ #define TLV_FLAGS_MORE UINT32_C(0x1)
+ /* Last TLV in a sequence of TLVs. */
+ #define TLV_FLAGS_MORE_LAST UINT32_C(0x0)
+ /* More TLVs follow this TLV. */
+ #define TLV_FLAGS_MORE_NOT_LAST UINT32_C(0x1)
+ /*
+ * When an HWRM receiver detects a TLV type that it does not
+ * support with the TLV required flag set, the receiver must
+ * reject the HWRM message with an error code indicating an
+ * unsupported TLV type.
+ */
+ #define TLV_FLAGS_REQUIRED UINT32_C(0x2)
+ /* No */
+ #define TLV_FLAGS_REQUIRED_NO (UINT32_C(0x0) << 1)
+ /* Yes */
+ #define TLV_FLAGS_REQUIRED_YES (UINT32_C(0x1) << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ /*
+ * This field defines the TLV type value which is divided into
+ * two ranges to differentiate between global and local TLV types.
+ * Global TLV types must be unique across all defined TLV types.
+ * Local TLV types are valid only for extensions to a given
+ * HWRM message and may be repeated across different HWRM message
+ * types. There is a direct correlation of each HWRM message type
+ * to a single global TLV type value.
+ *
+ * Global TLV range: `0 - (63k-1)`
+ *
+ * Local TLV range: `63k - (64k-1)`
+ */
+ uint16_t tlv_type;
+ /*
+ * Length of the message data encapsulated by this TLV in bytes.
+ * This length does not include the size of the TLV header itself
+ * and it must be an integer multiple of 8B.
+ */
+ uint16_t length;
+} __attribute__((packed));
+
+/* Input */
+/* input (size:128b/16B) */
+struct input {
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+ /*
+ * This value indicates the what completion ring the request will
+ * be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t cmpl_ring;
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+ /*
+ * Target ID of this command.
+ *
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* Output */
+/* output (size:64b/8B) */
+struct output {
+ /*
+ * Pass/Fail or error type
+ *
+ * Note: receiver to verify the in parameters, and fail the call
+ * with an error when appropriate
+ */
+ uint16_t error_code;
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+ /*
+ * This field is the length of the response in bytes. The
+ * last byte of the response is a valid flag that will read
+ * as '1' when the command has been completely written to
+ * memory.
+ */
+ uint16_t resp_len;
+} __attribute__((packed));
+
+/* Short Command Structure */
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ /*
+ * This field indicates the type of request in the request buffer.
+ * The format for the rest of the command (request) is determined
+ * by this field.
+ */
+ uint16_t req_type;
+ /*
+ * This field indicates a signature that is used to identify short
+ * form of the command listed here. This field shall be set to
+ * 17185 (0x4321).
+ */
+ uint16_t signature;
+ /* Signature indicating this is a short form of HWRM command */
+ #define HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD UINT32_C(0x4321)
+ #define HWRM_SHORT_INPUT_SIGNATURE_LAST \
+ HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD
+ /* Reserved for future use. */
+ uint16_t unused_0;
+ /* This value indicates the length of the request. */
+ uint16_t size;
+ /*
+ * This is the host address where the request was written.
+ * This area must be 16B aligned.
+ */
+ uint64_t req_addr;
+} __attribute__((packed));
+
+/*
+ * Command numbering
+ * # NOTE - definitions already in hwrm_req_type, in hwrm_types.yaml
+ * # So only structure definition is provided here.
+ */
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ /*
+ * This version of the specification defines the commands listed in
+ * the table below. The following are general implementation
+ * requirements for these commands:
+ *
+ * # All commands listed below that are marked neither
+ * reserved nor experimental shall be implemented by the HWRM.
+ * # A HWRM client compliant to this specification should not use
+ * commands outside of the list below.
+ * # A HWRM client compliant to this specification should not use
+ * command numbers marked reserved below.
+ * # A command marked experimental below may not be implemented
+ * by the HWRM.
+ * # A command marked experimental may change in the
+ * future version of the HWRM specification.
+ * # A command not listed below may be implemented by the HWRM.
+ * The behavior of commands that are not listed below is outside
+ * the scope of this specification.
+ */
+ uint16_t req_type;
+ #define HWRM_VER_GET UINT32_C(0x0)
+ #define HWRM_FUNC_BUF_UNRGTR UINT32_C(0xe)
+ #define HWRM_FUNC_VF_CFG UINT32_C(0xf)
+ /* Reserved for future use. */
+ #define HWRM_RESERVED1 UINT32_C(0x10)
+ #define HWRM_FUNC_RESET UINT32_C(0x11)
+ #define HWRM_FUNC_GETFID UINT32_C(0x12)
+ #define HWRM_FUNC_VF_ALLOC UINT32_C(0x13)
+ #define HWRM_FUNC_VF_FREE UINT32_C(0x14)
+ #define HWRM_FUNC_QCAPS UINT32_C(0x15)
+ #define HWRM_FUNC_QCFG UINT32_C(0x16)
+ #define HWRM_FUNC_CFG UINT32_C(0x17)
+ #define HWRM_FUNC_QSTATS UINT32_C(0x18)
+ #define HWRM_FUNC_CLR_STATS UINT32_C(0x19)
+ #define HWRM_FUNC_DRV_UNRGTR UINT32_C(0x1a)
+ #define HWRM_FUNC_VF_RESC_FREE UINT32_C(0x1b)
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY UINT32_C(0x1c)
+ #define HWRM_FUNC_DRV_RGTR UINT32_C(0x1d)
+ #define HWRM_FUNC_DRV_QVER UINT32_C(0x1e)
+ #define HWRM_FUNC_BUF_RGTR UINT32_C(0x1f)
+ #define HWRM_PORT_PHY_CFG UINT32_C(0x20)
+ #define HWRM_PORT_MAC_CFG UINT32_C(0x21)
+ /* Experimental */
+ #define HWRM_PORT_TS_QUERY UINT32_C(0x22)
+ #define HWRM_PORT_QSTATS UINT32_C(0x23)
+ #define HWRM_PORT_LPBK_QSTATS UINT32_C(0x24)
+ /* Experimental */
+ #define HWRM_PORT_CLR_STATS UINT32_C(0x25)
+ /* Experimental */
+ #define HWRM_PORT_LPBK_CLR_STATS UINT32_C(0x26)
+ #define HWRM_PORT_PHY_QCFG UINT32_C(0x27)
+ #define HWRM_PORT_MAC_QCFG UINT32_C(0x28)
+ /* Experimental */
+ #define HWRM_PORT_MAC_PTP_QCFG UINT32_C(0x29)
+ #define HWRM_PORT_PHY_QCAPS UINT32_C(0x2a)
+ #define HWRM_PORT_PHY_I2C_WRITE UINT32_C(0x2b)
+ #define HWRM_PORT_PHY_I2C_READ UINT32_C(0x2c)
+ #define HWRM_PORT_LED_CFG UINT32_C(0x2d)
+ #define HWRM_PORT_LED_QCFG UINT32_C(0x2e)
+ #define HWRM_PORT_LED_QCAPS UINT32_C(0x2f)
+ #define HWRM_QUEUE_QPORTCFG UINT32_C(0x30)
+ #define HWRM_QUEUE_QCFG UINT32_C(0x31)
+ #define HWRM_QUEUE_CFG UINT32_C(0x32)
+ #define HWRM_FUNC_VLAN_CFG UINT32_C(0x33)
+ #define HWRM_FUNC_VLAN_QCFG UINT32_C(0x34)
+ #define HWRM_QUEUE_PFCENABLE_QCFG UINT32_C(0x35)
+ #define HWRM_QUEUE_PFCENABLE_CFG UINT32_C(0x36)
+ #define HWRM_QUEUE_PRI2COS_QCFG UINT32_C(0x37)
+ #define HWRM_QUEUE_PRI2COS_CFG UINT32_C(0x38)
+ #define HWRM_QUEUE_COS2BW_QCFG UINT32_C(0x39)
+ #define HWRM_QUEUE_COS2BW_CFG UINT32_C(0x3a)
+ /* Experimental */
+ #define HWRM_QUEUE_DSCP_QCAPS UINT32_C(0x3b)
+ /* Experimental */
+ #define HWRM_QUEUE_DSCP2PRI_QCFG UINT32_C(0x3c)
+ /* Experimental */
+ #define HWRM_QUEUE_DSCP2PRI_CFG UINT32_C(0x3d)
+ #define HWRM_VNIC_ALLOC UINT32_C(0x40)
+ #define HWRM_VNIC_FREE UINT32_C(0x41)
+ #define HWRM_VNIC_CFG UINT32_C(0x42)
+ #define HWRM_VNIC_QCFG UINT32_C(0x43)
+ #define HWRM_VNIC_TPA_CFG UINT32_C(0x44)
+ /* Experimental */
+ #define HWRM_VNIC_TPA_QCFG UINT32_C(0x45)
+ #define HWRM_VNIC_RSS_CFG UINT32_C(0x46)
+ #define HWRM_VNIC_RSS_QCFG UINT32_C(0x47)
+ #define HWRM_VNIC_PLCMODES_CFG UINT32_C(0x48)
+ #define HWRM_VNIC_PLCMODES_QCFG UINT32_C(0x49)
+ #define HWRM_VNIC_QCAPS UINT32_C(0x4a)
+ #define HWRM_RING_ALLOC UINT32_C(0x50)
+ #define HWRM_RING_FREE UINT32_C(0x51)
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS UINT32_C(0x52)
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS UINT32_C(0x53)
+ #define HWRM_RING_RESET UINT32_C(0x5e)
+ #define HWRM_RING_GRP_ALLOC UINT32_C(0x60)
+ #define HWRM_RING_GRP_FREE UINT32_C(0x61)
+ /* Reserved for future use. */
+ #define HWRM_RESERVED5 UINT32_C(0x64)
+ /* Reserved for future use. */
+ #define HWRM_RESERVED6 UINT32_C(0x65)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC UINT32_C(0x70)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE UINT32_C(0x71)
+ #define HWRM_CFA_L2_FILTER_ALLOC UINT32_C(0x90)
+ #define HWRM_CFA_L2_FILTER_FREE UINT32_C(0x91)
+ #define HWRM_CFA_L2_FILTER_CFG UINT32_C(0x92)
+ #define HWRM_CFA_L2_SET_RX_MASK UINT32_C(0x93)
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG UINT32_C(0x94)
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC UINT32_C(0x95)
+ #define HWRM_CFA_TUNNEL_FILTER_FREE UINT32_C(0x96)
+ /* Experimental */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC UINT32_C(0x97)
+ /* Experimental */
+ #define HWRM_CFA_ENCAP_RECORD_FREE UINT32_C(0x98)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC UINT32_C(0x99)
+ #define HWRM_CFA_NTUPLE_FILTER_FREE UINT32_C(0x9a)
+ #define HWRM_CFA_NTUPLE_FILTER_CFG UINT32_C(0x9b)
+ /* Experimental */
+ #define HWRM_CFA_EM_FLOW_ALLOC UINT32_C(0x9c)
+ /* Experimental */
+ #define HWRM_CFA_EM_FLOW_FREE UINT32_C(0x9d)
+ /* Experimental */
+ #define HWRM_CFA_EM_FLOW_CFG UINT32_C(0x9e)
+ #define HWRM_TUNNEL_DST_PORT_QUERY UINT32_C(0xa0)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC UINT32_C(0xa1)
+ #define HWRM_TUNNEL_DST_PORT_FREE UINT32_C(0xa2)
+ #define HWRM_STAT_CTX_ALLOC UINT32_C(0xb0)
+ #define HWRM_STAT_CTX_FREE UINT32_C(0xb1)
+ #define HWRM_STAT_CTX_QUERY UINT32_C(0xb2)
+ #define HWRM_STAT_CTX_CLR_STATS UINT32_C(0xb3)
+ #define HWRM_PORT_QSTATS_EXT UINT32_C(0xb4)
+ #define HWRM_FW_RESET UINT32_C(0xc0)
+ #define HWRM_FW_QSTATUS UINT32_C(0xc1)
+ /* Experimental */
+ #define HWRM_FW_SET_TIME UINT32_C(0xc8)
+ /* Experimental */
+ #define HWRM_FW_GET_TIME UINT32_C(0xc9)
+ /* Experimental */
+ #define HWRM_FW_SET_STRUCTURED_DATA UINT32_C(0xca)
+ /* Experimental */
+ #define HWRM_FW_GET_STRUCTURED_DATA UINT32_C(0xcb)
+ /* Experimental */
+ #define HWRM_FW_IPC_MAILBOX UINT32_C(0xcc)
+ #define HWRM_EXEC_FWD_RESP UINT32_C(0xd0)
+ #define HWRM_REJECT_FWD_RESP UINT32_C(0xd1)
+ #define HWRM_FWD_RESP UINT32_C(0xd2)
+ #define HWRM_FWD_ASYNC_EVENT_CMPL UINT32_C(0xd3)
+ #define HWRM_OEM_CMD UINT32_C(0xd4)
+ #define HWRM_TEMP_MONITOR_QUERY UINT32_C(0xe0)
+ #define HWRM_WOL_FILTER_ALLOC UINT32_C(0xf0)
+ #define HWRM_WOL_FILTER_FREE UINT32_C(0xf1)
+ #define HWRM_WOL_FILTER_QCFG UINT32_C(0xf2)
+ #define HWRM_WOL_REASON_QCFG UINT32_C(0xf3)
+ /* Experimental */
+ #define HWRM_CFA_METER_PROFILE_ALLOC UINT32_C(0xf5)
+ /* Experimental */
+ #define HWRM_CFA_METER_PROFILE_FREE UINT32_C(0xf6)
+ /* Experimental */
+ #define HWRM_CFA_METER_PROFILE_CFG UINT32_C(0xf7)
+ /* Experimental */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC UINT32_C(0xf8)
+ /* Experimental */
+ #define HWRM_CFA_METER_INSTANCE_FREE UINT32_C(0xf9)
+ /* Experimental */
+ #define HWRM_CFA_VFR_ALLOC UINT32_C(0xfd)
+ /* Experimental */
+ #define HWRM_CFA_VFR_FREE UINT32_C(0xfe)
+ /* Experimental */
+ #define HWRM_CFA_VF_PAIR_ALLOC UINT32_C(0x100)
+ /* Experimental */
+ #define HWRM_CFA_VF_PAIR_FREE UINT32_C(0x101)
+ /* Experimental */
+ #define HWRM_CFA_VF_PAIR_INFO UINT32_C(0x102)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_ALLOC UINT32_C(0x103)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_FREE UINT32_C(0x104)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_FLUSH UINT32_C(0x105)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_STATS UINT32_C(0x106)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_INFO UINT32_C(0x107)
+ /* Experimental */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC UINT32_C(0x108)
+ /* Experimental */
+ #define HWRM_CFA_DECAP_FILTER_FREE UINT32_C(0x109)
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG UINT32_C(0x10a)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC UINT32_C(0x10b)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE UINT32_C(0x10c)
+ /* Experimental */
+ #define HWRM_CFA_PAIR_ALLOC UINT32_C(0x10d)
+ /* Experimental */
+ #define HWRM_CFA_PAIR_FREE UINT32_C(0x10e)
+ /* Experimental */
+ #define HWRM_CFA_PAIR_INFO UINT32_C(0x10f)
+ /* Experimental */
+ #define HWRM_FW_IPC_MSG UINT32_C(0x110)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO UINT32_C(0x111)
+ /* Engine CKV - Ping the device and SRT firmware to get the public key. */
+ #define HWRM_ENGINE_CKV_HELLO UINT32_C(0x12d)
+ /* Engine CKV - Get the current allocation status of keys provisioned in the key vault. */
+ #define HWRM_ENGINE_CKV_STATUS UINT32_C(0x12e)
+ /* Engine CKV - Add a new CKEK used to encrypt keys. */
+ #define HWRM_ENGINE_CKV_CKEK_ADD UINT32_C(0x12f)
+ /* Engine CKV - Delete a previously added CKEK. */
+ #define HWRM_ENGINE_CKV_CKEK_DELETE UINT32_C(0x130)
+ /* Engine CKV - Add a new key to the key vault. */
+ #define HWRM_ENGINE_CKV_KEY_ADD UINT32_C(0x131)
+ /* Engine CKV - Delete a key from the key vault. */
+ #define HWRM_ENGINE_CKV_KEY_DELETE UINT32_C(0x132)
+ /* Engine CKV - Delete all keys from the key vault. */
+ #define HWRM_ENGINE_CKV_FLUSH UINT32_C(0x133)
+ /* Engine CKV - Get random data. */
+ #define HWRM_ENGINE_CKV_RNG_GET UINT32_C(0x134)
+ /* Engine CKV - Generate and encrypt a new AES key. */
+ #define HWRM_ENGINE_CKV_KEY_GEN UINT32_C(0x135)
+ /* Engine - Query the available queue groups configuration. */
+ #define HWRM_ENGINE_QG_CONFIG_QUERY UINT32_C(0x13c)
+ /* Engine - Query the queue groups assigned to a function. */
+ #define HWRM_ENGINE_QG_QUERY UINT32_C(0x13d)
+ /* Engine - Query the available queue group meter profile configuration. */
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY UINT32_C(0x13e)
+ /* Engine - Query the configuration of a queue group meter profile. */
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY UINT32_C(0x13f)
+ /* Engine - Allocate a queue group meter profile. */
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC UINT32_C(0x140)
+ /* Engine - Free a queue group meter profile. */
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE UINT32_C(0x141)
+ /* Engine - Query the meters assigned to a queue group. */
+ #define HWRM_ENGINE_QG_METER_QUERY UINT32_C(0x142)
+ /* Engine - Bind a queue group meter profile to a queue group. */
+ #define HWRM_ENGINE_QG_METER_BIND UINT32_C(0x143)
+ /* Engine - Unbind a queue group meter profile from a queue group. */
+ #define HWRM_ENGINE_QG_METER_UNBIND UINT32_C(0x144)
+ /* Engine - Bind a queue group to a function. */
+ #define HWRM_ENGINE_QG_FUNC_BIND UINT32_C(0x145)
+ /* Engine - Query the scheduling group configuration. */
+ #define HWRM_ENGINE_SG_CONFIG_QUERY UINT32_C(0x146)
+ /* Engine - Query the queue groups assigned to a scheduling group. */
+ #define HWRM_ENGINE_SG_QUERY UINT32_C(0x147)
+ /* Engine - Query the configuration of a scheduling group's meter profiles. */
+ #define HWRM_ENGINE_SG_METER_QUERY UINT32_C(0x148)
+ /* Engine - Configure a scheduling group's meter profiles. */
+ #define HWRM_ENGINE_SG_METER_CONFIG UINT32_C(0x149)
+ /* Engine - Bind a queue group to a scheduling group. */
+ #define HWRM_ENGINE_SG_QG_BIND UINT32_C(0x14a)
+ /* Engine - Unbind a queue group from its scheduling group. */
+ #define HWRM_ENGINE_QG_SG_UNBIND UINT32_C(0x14b)
+ /* Engine - Query the Engine configuration. */
+ #define HWRM_ENGINE_CONFIG_QUERY UINT32_C(0x154)
+ /* Engine - Configure the statistics accumulator for an Engine. */
+ #define HWRM_ENGINE_STATS_CONFIG UINT32_C(0x155)
+ /* Engine - Clear the statistics accumulator for an Engine. */
+ #define HWRM_ENGINE_STATS_CLEAR UINT32_C(0x156)
+ /* Engine - Query the statistics accumulator for an Engine. */
+ #define HWRM_ENGINE_STATS_QUERY UINT32_C(0x157)
+ /* Engine - Allocate an Engine RQ. */
+ #define HWRM_ENGINE_RQ_ALLOC UINT32_C(0x15e)
+ /* Engine - Free an Engine RQ. */
+ #define HWRM_ENGINE_RQ_FREE UINT32_C(0x15f)
+ /* Engine - Allocate an Engine CQ. */
+ #define HWRM_ENGINE_CQ_ALLOC UINT32_C(0x160)
+ /* Engine - Free an Engine CQ. */
+ #define HWRM_ENGINE_CQ_FREE UINT32_C(0x161)
+ /* Engine - Allocate an NQ. */
+ #define HWRM_ENGINE_NQ_ALLOC UINT32_C(0x162)
+ /* Engine - Free an NQ. */
+ #define HWRM_ENGINE_NQ_FREE UINT32_C(0x163)
+ /* Engine - Set the on-die RQE credit update location. */
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS UINT32_C(0x164)
+ /* Experimental */
+ #define HWRM_FUNC_RESOURCE_QCAPS UINT32_C(0x190)
+ /* Experimental */
+ #define HWRM_FUNC_VF_RESOURCE_CFG UINT32_C(0x191)
+ /* Experimental */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS UINT32_C(0x192)
+ /* Experimental */
+ #define HWRM_FUNC_BACKING_STORE_CFG UINT32_C(0x193)
+ /* Experimental */
+ #define HWRM_FUNC_BACKING_STORE_QCFG UINT32_C(0x194)
+ /* Experimental */
+ #define HWRM_SELFTEST_QLIST UINT32_C(0x200)
+ /* Experimental */
+ #define HWRM_SELFTEST_EXEC UINT32_C(0x201)
+ /* Experimental */
+ #define HWRM_SELFTEST_IRQ UINT32_C(0x202)
+ /* Experimental */
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA UINT32_C(0x203)
+ /* Experimental */
+ #define HWRM_PCIE_QSTATS UINT32_C(0x204)
+ /* Experimental */
+ #define HWRM_DBG_READ_DIRECT UINT32_C(0xff10)
+ /* Experimental */
+ #define HWRM_DBG_READ_INDIRECT UINT32_C(0xff11)
+ /* Experimental */
+ #define HWRM_DBG_WRITE_DIRECT UINT32_C(0xff12)
+ /* Experimental */
+ #define HWRM_DBG_WRITE_INDIRECT UINT32_C(0xff13)
+ #define HWRM_DBG_DUMP UINT32_C(0xff14)
+ /* Experimental */
+ #define HWRM_DBG_ERASE_NVM UINT32_C(0xff15)
+ /* Experimental */
+ #define HWRM_DBG_CFG UINT32_C(0xff16)
+ /* Experimental */
+ #define HWRM_DBG_COREDUMP_LIST UINT32_C(0xff17)
+ /* Experimental */
+ #define HWRM_DBG_COREDUMP_INITIATE UINT32_C(0xff18)
+ /* Experimental */
+ #define HWRM_DBG_COREDUMP_RETRIEVE UINT32_C(0xff19)
+ /* */
+ #define HWRM_DBG_I2C_CMD UINT32_C(0xff1b)
+ /* Experimental */
+ #define HWRM_NVM_FACTORY_DEFAULTS UINT32_C(0xffee)
+ #define HWRM_NVM_VALIDATE_OPTION UINT32_C(0xffef)
+ #define HWRM_NVM_FLUSH UINT32_C(0xfff0)
+ #define HWRM_NVM_GET_VARIABLE UINT32_C(0xfff1)
+ #define HWRM_NVM_SET_VARIABLE UINT32_C(0xfff2)
+ #define HWRM_NVM_INSTALL_UPDATE UINT32_C(0xfff3)
+ #define HWRM_NVM_MODIFY UINT32_C(0xfff4)
+ #define HWRM_NVM_VERIFY_UPDATE UINT32_C(0xfff5)
+ #define HWRM_NVM_GET_DEV_INFO UINT32_C(0xfff6)
+ #define HWRM_NVM_ERASE_DIR_ENTRY UINT32_C(0xfff7)
+ #define HWRM_NVM_MOD_DIR_ENTRY UINT32_C(0xfff8)
+ #define HWRM_NVM_FIND_DIR_ENTRY UINT32_C(0xfff9)
+ #define HWRM_NVM_GET_DIR_ENTRIES UINT32_C(0xfffa)
+ #define HWRM_NVM_GET_DIR_INFO UINT32_C(0xfffb)
+ #define HWRM_NVM_RAW_DUMP UINT32_C(0xfffc)
+ #define HWRM_NVM_READ UINT32_C(0xfffd)
+ #define HWRM_NVM_WRITE UINT32_C(0xfffe)
+ #define HWRM_NVM_RAW_WRITE_BLK UINT32_C(0xffff)
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Return Codes */
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ uint16_t error_code;
+ /* Request was successfully executed by the HWRM. */
+ #define HWRM_ERR_CODE_SUCCESS UINT32_C(0x0)
+ /* The HWRM failed to execute the request. */
+ #define HWRM_ERR_CODE_FAIL UINT32_C(0x1)
+ /*
+ * The request contains invalid argument(s) or input
+ * parameters.
+ */
+ #define HWRM_ERR_CODE_INVALID_PARAMS UINT32_C(0x2)
+ /*
+ * The requester is not allowed to access the requested
+ * resource. This error code shall be provided in a
+ * response to a request to query or modify an existing
+ * resource that is not accessible by the requester.
+ */
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED UINT32_C(0x3)
+ /*
+ * The HWRM is unable to allocate the requested resource.
+ * This code only applies to requests for HWRM resource
+ * allocations.
+ */
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR UINT32_C(0x4)
+ /*
+ * Invalid combination of flags is specified in the
+ * request.
+ */
+ #define HWRM_ERR_CODE_INVALID_FLAGS UINT32_C(0x5)
+ /*
+ * Invalid combination of enables fields is specified in
+ * the request.
+ */
+ #define HWRM_ERR_CODE_INVALID_ENABLES UINT32_C(0x6)
+ /*
+ * Request contains a required TLV that is not supported by
+ * the installed version of firmware.
+ */
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV UINT32_C(0x7)
+ /*
+ * No firmware buffer available to accept the request. Driver
+ * should retry the request.
+ */
+ #define HWRM_ERR_CODE_NO_BUFFER UINT32_C(0x8)
+ /*
+ * Generic HWRM execution error that represents an
+ * internal error.
+ */
+ #define HWRM_ERR_CODE_HWRM_ERROR UINT32_C(0xf)
+ /* Unknown error */
+ #define HWRM_ERR_CODE_UNKNOWN_ERR UINT32_C(0xfffe)
+ /* Unsupported or invalid command */
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED UINT32_C(0xffff)
+ #define HWRM_ERR_CODE_LAST \
+ HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output */
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ /*
+ * Pass/Fail or error type
+ *
+ * Note: receiver to verify the in parameters, and fail the call
+ * with an error when appropriate
+ */
+ uint16_t error_code;
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+ /*
+ * This field is the length of the response in bytes. The
+ * last byte of the response is a valid flag that will read
+ * as '1' when the command has been completely written to
+ * memory.
+ */
+ uint16_t resp_len;
+ /* debug info for this error response. */
+ uint32_t opaque_0;
+ /* debug info for this error response. */
+ uint16_t opaque_1;
+ /*
+ * In the case of an error response, command specific error
+ * code is returned in this field.
+ */
+ uint8_t cmd_err;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+/*
+ * Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
+ */
+#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
+/* hwrm_func_buf_rgtr */
+#define HWRM_MAX_REQ_LEN 128
+/* hwrm_selftest_qlist */
+#define HWRM_MAX_RESP_LEN 280
+/* 7 bit indirection table index. */
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+/* valid key for HWRM response */
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 9
+#define HWRM_VERSION_UPDATE 2
+/* non-zero means beta version */
+#define HWRM_VERSION_RSVD 9
+#define HWRM_VERSION_STR "1.9.2.9"
+
+/****************
+ * hwrm_ver_get *
+ ****************/
+
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This field represents the major version of HWRM interface
+ * specification supported by the driver HWRM implementation.
+ * The interface major version is intended to change only when
+ * non backward compatible changes are made to the HWRM
+ * interface specification.
+ */
+ uint8_t hwrm_intf_maj;
+ /*
+ * This field represents the minor version of HWRM interface
+ * specification supported by the driver HWRM implementation.
+ * A change in interface minor version is used to reflect
+ * significant backward compatible modification to HWRM
+ * interface specification.
+ * This can be due to addition or removal of functionality.
+ * HWRM interface specifications with the same major version
+ * but different minor versions are compatible.
+ */
+ uint8_t hwrm_intf_min;
+ /*
+ * This field represents the update version of HWRM interface
+ * specification supported by the driver HWRM implementation.
+ * The interface update version is used to reflect minor
+ * changes or bug fixes to a released HWRM interface
+ * specification.
+ */
+ uint8_t hwrm_intf_upd;
+ uint8_t unused_0[5];
+} __attribute__((packed));
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This field represents the major version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * The interface major version is intended to change only when
+ * non backward compatible changes are made to the HWRM
+ * interface specification.
+ * A HWRM implementation that is compliant with this
+ * specification shall provide value of 1 in this field.
+ */
+ uint8_t hwrm_intf_maj_8b;
+ /*
+ * This field represents the minor version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * A change in interface minor version is used to reflect
+ * significant backward compatible modification to HWRM
+ * interface specification.
+ * This can be due to addition or removal of functionality.
+ * HWRM interface specifications with the same major version
+ * but different minor versions are compatible.
+ * A HWRM implementation that is compliant with this
+ * specification shall provide value of 2 in this field.
+ */
+ uint8_t hwrm_intf_min_8b;
+ /*
+ * This field represents the update version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * The interface update version is used to reflect minor
+ * changes or bug fixes to a released HWRM interface
+ * specification.
+ * A HWRM implementation that is compliant with this
+ * specification shall provide value of 2 in this field.
+ */
+ uint8_t hwrm_intf_upd_8b;
+ uint8_t hwrm_intf_rsvd_8b;
+ /*
+ * This field represents the major version of HWRM firmware.
+ * A change in firmware major version represents a major
+ * firmware release.
+ */
+ uint8_t hwrm_fw_maj_8b;
+ /*
+ * This field represents the minor version of HWRM firmware.
+ * A change in firmware minor version represents significant
+ * firmware functionality changes.
+ */
+ uint8_t hwrm_fw_min_8b;
+ /*
+ * This field represents the build version of HWRM firmware.
+ * A change in firmware build version represents bug fixes
+ * to a released firmware.
+ */
+ uint8_t hwrm_fw_bld_8b;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version of the
+ * HWRM firmware.
+ */
+ uint8_t hwrm_fw_rsvd_8b;
+ /*
+ * This field represents the major version of mgmt firmware.
+ * A change in major version represents a major release.
+ */
+ uint8_t mgmt_fw_maj_8b;
+ /*
+ * This field represents the minor version of mgmt firmware.
+ * A change in minor version represents significant
+ * functionality changes.
+ */
+ uint8_t mgmt_fw_min_8b;
+ /*
+ * This field represents the build version of mgmt firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint8_t mgmt_fw_bld_8b;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint8_t mgmt_fw_rsvd_8b;
+ /*
+ * This field represents the major version of network
+ * control firmware.
+ * A change in major version represents a major release.
+ */
+ uint8_t netctrl_fw_maj_8b;
+ /*
+ * This field represents the minor version of network
+ * control firmware.
+ * A change in minor version represents significant
+ * functionality changes.
+ */
+ uint8_t netctrl_fw_min_8b;
+ /*
+ * This field represents the build version of network
+ * control firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint8_t netctrl_fw_bld_8b;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint8_t netctrl_fw_rsvd_8b;
+ /*
+ * This field is used to indicate device's capabilities and
+ * configurations.
+ */
+ uint32_t dev_caps_cfg;
+ /*
+ * If set to 1, then secure firmware update behavior
+ * is supported.
+ * If set to 0, then secure firmware update behavior is
+ * not supported.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, then firmware based DCBX agent is supported.
+ * If set to 0, then firmware based DCBX agent capability
+ * is not supported on this device.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, then HWRM short command format is supported.
+ * If set to 0, then HWRM short command format is not supported.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, then HWRM short command format is required.
+ * If set to 0, then HWRM short command format is not required.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED \
+ UINT32_C(0x8)
+ /*
+ * This field represents the major version of RoCE firmware.
+ * A change in major version represents a major release.
+ */
+ uint8_t roce_fw_maj_8b;
+ /*
+ * This field represents the minor version of RoCE firmware.
+ * A change in minor version represents significant
+ * functionality changes.
+ */
+ uint8_t roce_fw_min_8b;
+ /*
+ * This field represents the build version of RoCE firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint8_t roce_fw_bld_8b;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint8_t roce_fw_rsvd_8b;
+ /*
+ * This field represents the name of HWRM FW (ASCII chars
+ * with NULL at the end).
+ */
+ char hwrm_fw_name[16];
+ /*
+ * This field represents the name of mgmt FW (ASCII chars
+ * with NULL at the end).
+ */
+ char mgmt_fw_name[16];
+ /*
+ * This field represents the name of network control
+ * firmware (ASCII chars with NULL at the end).
+ */
+ char netctrl_fw_name[16];
+ /*
+ * This field is reserved for future use.
+ * The responder should set it to 0.
+ * The requester should ignore this field.
+ */
+ uint8_t reserved2[16];
+ /*
+ * This field represents the name of RoCE FW (ASCII chars
+ * with NULL at the end).
+ */
+ char roce_fw_name[16];
+ /* This field returns the chip number. */
+ uint16_t chip_num;
+ /* This field returns the revision of chip. */
+ uint8_t chip_rev;
+ /* This field returns the chip metal number. */
+ uint8_t chip_metal;
+ /* This field returns the bond id of the chip. */
+ uint8_t chip_bond_id;
+ /* This value indicates the type of platform used for chip implementation. */
+ uint8_t chip_platform_type;
+ /* ASIC */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0)
+ /* FPGA platform of the chip. */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1)
+ /* Palladium platform of the chip. */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2)
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_LAST \
+ HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM
+ /*
+ * This field returns the maximum value of request window that
+ * is supported by the HWRM. The request window is mapped
+ * into device address space using MMIO.
+ */
+ uint16_t max_req_win_len;
+ /*
+ * This field returns the maximum value of response buffer in
+ * bytes.
+ */
+ uint16_t max_resp_len;
+ /*
+ * This field returns the default request timeout value in
+ * milliseconds.
+ */
+ uint16_t def_req_timeout;
+ /*
+ * This field will indicate if any subsystems is not fully
+ * initialized.
+ */
+ uint8_t flags;
+ /*
+ * If set to 1, device is not ready.
+ * If set to 0, device is ready to accept all HWRM commands.
+ */
+ #define HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY UINT32_C(0x1)
+ /*
+ * If set to 1, external version present.
+ * If set to 0, external version not present.
+ */
+ #define HWRM_VER_GET_OUTPUT_FLAGS_EXT_VER_AVAIL UINT32_C(0x2)
+ uint8_t unused_0[2];
+ /*
+ * For backward compatibility this field must be set to 1.
+ * Older drivers might look for this field to be 1 before
+ * processing the message.
+ */
+ uint8_t always_1;
+ /*
+ * This field represents the major version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * The interface major version is intended to change only when
+ * non backward compatible changes are made to the HWRM
+ * interface specification. A HWRM implementation that is
+ * compliant with this specification shall provide value of 1
+ * in this field.
+ */
+ uint16_t hwrm_intf_major;
+ /*
+ * This field represents the minor version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * A change in interface minor version is used to reflect
+ * significant backward compatible modification to HWRM
+ * interface specification. This can be due to addition or
+ * removal of functionality. HWRM interface specifications
+ * with the same major version but different minor versions are
+ * compatible. A HWRM implementation that is compliant with
+ * this specification shall provide value of 2 in this field.
+ */
+ uint16_t hwrm_intf_minor;
+ /*
+ * This field represents the update version of HWRM interface
+ * specification supported by the HWRM implementation. The
+ * interface update version is used to reflect minor changes or
+ * bug fixes to a released HWRM interface specification.
+ * A HWRM implementation that is compliant with this
+ * specification shall provide value of 2 in this field.
+ */
+ uint16_t hwrm_intf_build;
+ /*
+ * This field represents the patch version of HWRM interface
+ * specification supported by the HWRM implementation.
+ */
+ uint16_t hwrm_intf_patch;
+ /*
+ * This field represents the major version of HWRM firmware.
+ * A change in firmware major version represents a major
+ * firmware release.
+ */
+ uint16_t hwrm_fw_major;
+ /*
+ * This field represents the minor version of HWRM firmware.
+ * A change in firmware minor version represents significant
+ * firmware functionality changes.
+ */
+ uint16_t hwrm_fw_minor;
+ /*
+ * This field represents the build version of HWRM firmware.
+ * A change in firmware build version represents bug fixes to
+ * a released firmware.
+ */
+ uint16_t hwrm_fw_build;
+ /*
+ * This field is a reserved field.
+ * This field can be used to represent firmware branches or customer
+ * specific releases tied to a specific (major,minor,update) version
+ * of the HWRM firmware.
+ */
+ uint16_t hwrm_fw_patch;
+ /*
+ * This field represents the major version of mgmt firmware.
+ * A change in major version represents a major release.
+ */
+ uint16_t mgmt_fw_major;
+ /*
+ * This field represents the minor version of HWRM firmware.
+ * A change in firmware minor version represents significant
+ * firmware functionality changes.
+ */
+ uint16_t mgmt_fw_minor;
+ /*
+ * This field represents the build version of mgmt firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint16_t mgmt_fw_build;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version.
+ */
+ uint16_t mgmt_fw_patch;
+ /*
+ * This field represents the major version of network control
+ * firmware. A change in major version represents
+ * a major release.
+ */
+ uint16_t netctrl_fw_major;
+ /*
+ * This field represents the minor version of network control
+ * firmware. A change in minor version represents significant
+ * functionality changes.
+ */
+ uint16_t netctrl_fw_minor;
+ /*
+ * This field represents the build version of network control
+ * firmware. A change in update version represents bug fixes.
+ */
+ uint16_t netctrl_fw_build;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint16_t netctrl_fw_patch;
+ /*
+ * This field represents the major version of RoCE firmware.
+ * A change in major version represents a major release.
+ */
+ uint16_t roce_fw_major;
+ /*
+ * This field represents the minor version of RoCE firmware.
+ * A change in minor version represents significant
+ * functionality changes.
+ */
+ uint16_t roce_fw_minor;
+ /*
+ * This field represents the build version of RoCE firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint16_t roce_fw_build;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint16_t roce_fw_patch;
+ /*
+ * This field returns the maximum extended request length acceptable
+ * by the device which allows requests greater than mailbox size when
+ * used with the short cmd request format.
+ */
+ uint16_t max_ext_req_len;
+ uint8_t unused_1[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* bd_base (size:64b/8B) */
+struct bd_base {
+ uint8_t type;
+ /* This value identifies the type of buffer descriptor. */
+ #define BD_BASE_TYPE_MASK UINT32_C(0x3f)
+ #define BD_BASE_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is used for
+ * normal L2 packet transmission.
+ */
+ #define BD_BASE_TYPE_TX_BD_SHORT UINT32_C(0x0)
+ /*
+ * Indicates that this BD is 1BB long and is an empty
+ * TX BD. Not valid for use by the driver.
+ */
+ #define BD_BASE_TYPE_TX_BD_EMPTY UINT32_C(0x1)
+ /*
+ * Indicates that this BD is 16B long and is an RX Producer
+ * (ie. empty) buffer descriptor.
+ */
+ #define BD_BASE_TYPE_RX_PROD_PKT UINT32_C(0x4)
+ /*
+ * Indicates that this BD is 16B long and is an RX
+ * Producer Buffer BD.
+ */
+ #define BD_BASE_TYPE_RX_PROD_BFR UINT32_C(0x5)
+ /*
+ * Indicates that this BD is 16B long and is an
+ * RX Producer Assembly Buffer Descriptor.
+ */
+ #define BD_BASE_TYPE_RX_PROD_AGG UINT32_C(0x6)
+ /*
+ * Indicates that this BD is 32B long and is used for
+ * normal L2 packet transmission.
+ */
+ #define BD_BASE_TYPE_TX_BD_LONG UINT32_C(0x10)
+ #define BD_BASE_TYPE_LAST BD_BASE_TYPE_TX_BD_LONG
+ uint8_t unused_1[7];
+} __attribute__((packed));
+
+/* tx_bd_short (size:128b/16B) */
+struct tx_bd_short {
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs
+ * of a packet.
+ */
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_SHORT_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is used for
+ * normal L2 packet transmission.
+ */
+ #define TX_BD_SHORT_TYPE_TX_BD_SHORT UINT32_C(0x0)
+ #define TX_BD_SHORT_TYPE_LAST TX_BD_SHORT_TYPE_TX_BD_SHORT
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs
+ * of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_SHORT_FLAGS_SFT 6
+ /*
+ * If set to 1, the packet ends with the data in the buffer
+ * pointed to by this descriptor. This flag must be
+ * valid on every BD.
+ */
+ #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40)
+ /*
+ * If set to 1, the device will not generate a completion for
+ * this transmit packet unless there is an error in it's
+ * processing.
+ * If this bit
+ * is set to 0, then the packet will be completed normally.
+ *
+ * This bit must be valid only on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80)
+ /*
+ * This value indicates how many 16B BD locations are consumed
+ * in the ring by this packet.
+ * A value of 1 indicates that this BD is the only BD (and that
+ * the it is a short BD). A value
+ * of 3 indicates either 3 short BDs or 1 long BD and one short
+ * BD in the packet. A value of 0 indicates
+ * that there are 32 BD locations in the packet (the maximum).
+ *
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
+ #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8
+ /*
+ * This value is a hint for the length of the entire packet.
+ * It is used by the chip to optimize internal processing.
+ *
+ * The packet will be dropped if the hint is too short.
+ *
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000)
+ #define TX_BD_SHORT_FLAGS_LHINT_SFT 13
+ /* indicates packet length < 512B */
+ #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
+ /* indicates 512 <= packet length < 1KB */
+ #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
+ /* indicates 1KB <= packet length < 2KB */
+ #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
+ /* indicates packet length >= 2KB */
+ #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
+ #define TX_BD_SHORT_FLAGS_LHINT_LAST \
+ TX_BD_SHORT_FLAGS_LHINT_GTE2K
+ /*
+ * If set to 1, the device immediately updates the Send Consumer
+ * Index after the buffer associated with this descriptor has
+ * been transferred via DMA to NIC memory from host memory. An
+ * interrupt may or may not be generated according to the state
+ * of the interrupt avoidance mechanisms. If this bit
+ * is set to 0, then the Consumer Index is only updated as soon
+ * as one of the host interrupt coalescing conditions has been met.
+ *
+ * This bit must be valid on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000)
+ /*
+ * This is the length of the host physical buffer this BD describes
+ * in bytes.
+ *
+ * This field must be valid on all BDs of a packet.
+ */
+ uint16_t len;
+ /*
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with the
+ * transmit BD.
+ *
+ * This field must be valid on the first BD of a packet.
+ */
+ uint32_t opaque;
+ /*
+ * This is the host physical address for the portion of the packet
+ * described by this TX BD.
+ *
+ * This value must be valid on all BDs of a packet.
+ */
+ uint64_t address;
+} __attribute__((packed));
+
+/* tx_bd_long (size:128b/16B) */
+struct tx_bd_long {
+ /* This value identifies the type of buffer descriptor. */
+ uint16_t flags_type;
+ /*
+ * This value indicates the type of buffer descriptor.
+ * packet.
+ */
+ #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_LONG_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 32B long and is used for
+ * normal L2 packet transmission.
+ */
+ #define TX_BD_LONG_TYPE_TX_BD_LONG UINT32_C(0x10)
+ #define TX_BD_LONG_TYPE_LAST TX_BD_LONG_TYPE_TX_BD_LONG
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs
+ * of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_LONG_FLAGS_SFT 6
+ /*
+ * If set to 1, the packet ends with the data in the buffer
+ * pointed to by this descriptor. This flag must be
+ * valid on every BD.
+ */
+ #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40)
+ /*
+ * If set to 1, the device will not generate a completion for
+ * this transmit packet unless there is an error in it's
+ * processing.
+ * If this bit
+ * is set to 0, then the packet will be completed normally.
+ *
+ * This bit must be valid only on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80)
+ /*
+ * This value indicates how many 16B BD locations are consumed
+ * in the ring by this packet.
+ * A value of 1 indicates that this BD is the only BD (and that
+ * the it is a short BD). A value
+ * of 3 indicates either 3 short BDs or 1 long BD and one short
+ * BD in the packet. A value of 0 indicates
+ * that there are 32 BD locations in the packet (the maximum).
+ *
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
+ #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8
+ /*
+ * This value is a hint for the length of the entire packet.
+ * It is used by the chip to optimize internal processing.
+ *
+ * The packet will be dropped if the hint is too short.
+ *
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000)
+ #define TX_BD_LONG_FLAGS_LHINT_SFT 13
+ /* indicates packet length < 512B */
+ #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
+ /* indicates 512 <= packet length < 1KB */
+ #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
+ /* indicates 1KB <= packet length < 2KB */
+ #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
+ /* indicates packet length >= 2KB */
+ #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
+ #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K
+ /*
+ * If set to 1, the device immediately updates the Send Consumer
+ * Index after the buffer associated with this descriptor has
+ * been transferred via DMA to NIC memory from host memory. An
+ * interrupt may or may not be generated according to the state
+ * of the interrupt avoidance mechanisms. If this bit
+ * is set to 0, then the Consumer Index is only updated as soon
+ * as one of the host interrupt coalescing conditions has been met.
+ *
+ * This bit must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000)
+ /*
+ * This is the length of the host physical buffer this BD describes
+ * in bytes.
+ *
+ * This field must be valid on all BDs of a packet.
+ */
+ uint16_t len;
+ /*
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with the
+ * transmit BD.
+ *
+ * This field must be valid on the first BD of a packet.
+ */
+ uint32_t opaque;
+ /*
+ * This is the host physical address for the portion of the packet
+ * described by this TX BD.
+ *
+ * This value must be valid on all BDs of a packet.
+ */
+ uint64_t address;
+} __attribute__((packed));
+
+/* tx_bd_long_hi (size:128b/16B) */
+struct tx_bd_long_hi {
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Their value on other BDs of the packet will be ignored.
+ */
+ uint16_t lflags;
+ /*
+ * If set to 1, the controller replaces the TCP/UPD checksum
+ * fields of normal TCP/UPD checksum, or the inner TCP/UDP
+ * checksum field of the encapsulated TCP/UDP packets with the
+ * hardware calculated TCP/UDP checksum for the packet associated
+ * with this descriptor. The flag is ignored if the LSO flag is set.
+ *
+ * This bit must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
+ /*
+ * If set to 1, the controller replaces the IP checksum of the
+ * normal packets, or the inner IP checksum of the encapsulated
+ * packets with the hardware calculated IP checksum for the
+ * packet associated with this descriptor.
+ *
+ * This bit must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2)
+ /*
+ * If set to 1, the controller will not append an Ethernet CRC
+ * to the end of the frame.
+ *
+ * This bit must be valid on the first BD of a packet.
+ *
+ * Packet must be 64B or longer when this flag is set. It is not
+ * useful to use this bit with any form of TX offload such as
+ * CSO or LSO. The intent is that the packet from the host already
+ * has a valid Ethernet CRC on the packet.
+ */
+ #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4)
+ /*
+ * If set to 1, the device will record the time at which the packet
+ * was actually transmitted at the TX MAC.
+ *
+ * This bit must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8)
+ /*
+ * If set to 1, The controller replaces the tunnel IP checksum
+ * field with hardware calculated IP checksum for the IP header
+ * of the packet associated with this descriptor.
+ *
+ * For outer UDP checksum, global outer UDP checksum TE_NIC register
+ * needs to be enabled. If the global outer UDP checksum TE_NIC register
+ * bit is set, outer UDP checksum will be calculated for the following
+ * cases:
+ * 1. Packets with tcp_udp_chksum flag set to offload checksum for inner
+ * packet AND the inner packet is TCP/UDP. If the inner packet is ICMP for
+ * example (non-TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP
+ * checksum will not be calculated.
+ * 2. Packets with lso flag set which implies inner TCP checksum calculation
+ * as part of LSO operation.
+ */
+ #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
+ /*
+ * If set to 1, the device will treat this packet with LSO(Large
+ * Send Offload) processing for both normal or encapsulated
+ * packets, which is a form of TCP segmentation. When this bit
+ * is 1, the hdr_size and mss fields must be valid. The driver
+ * doesn't need to set t_ip_chksum, ip_chksum, and tcp_udp_chksum
+ * flags since the controller will replace the appropriate
+ * checksum fields for segmented packets.
+ *
+ * When this bit is 1, the hdr_size and mss fields must be valid.
+ */
+ #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20)
+ /*
+ * If set to zero when LSO is '1', then the IPID will be treated
+ * as a 16b number and will be wrapped if it exceeds a value of
+ * 0xffff.
+ *
+ * If set to one when LSO is '1', then the IPID will be treated
+ * as a 15b number and will be wrapped if it exceeds a value 0f
+ * 0x7fff.
+ */
+ #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40)
+ /*
+ * If set to zero when LSO is '1', then the IPID of the tunnel
+ * IP header will not be modified during LSO operations.
+ *
+ * If set to one when LSO is '1', then the IPID of the tunnel
+ * IP header will be incremented for each subsequent segment of an
+ * LSO operation.
+ *
+ * The flag is ignored if the LSO packet is a normal (non-tunneled)
+ * TCP packet.
+ */
+ #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80)
+ /*
+ * If set to '1', then the RoCE ICRC will be appended to the
+ * packet. Packet must be a valid RoCE format packet.
+ */
+ #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100)
+ /*
+ * If set to '1', then the FCoE CRC will be appended to the
+ * packet. Packet must be a valid FCoE format packet.
+ */
+ #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200)
+ uint16_t hdr_size;
+ /*
+ * When LSO is '1', this field must contain the offset of the
+ * TCP payload from the beginning of the packet in as
+ * 16b words. In case of encapsulated/tunneling packet, this field
+ * contains the offset of the inner TCP payload from beginning of the
+ * packet as 16-bit words.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff)
+ #define TX_BD_LONG_HDR_SIZE_SFT 0
+ uint32_t mss;
+ /*
+ * This is the MSS value that will be used to do the LSO processing.
+ * The value is the length in bytes of the TCP payload for each
+ * segment generated by the LSO operation.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff)
+ #define TX_BD_LONG_MSS_SFT 0
+ uint16_t unused2;
+ /*
+ * This value selects a CFA action to perform on the packet.
+ * Set this value to zero if no CFA action is desired.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ uint16_t cfa_action;
+ /*
+ * This value is action meta-data that defines CFA edit operations
+ * that are done in addition to any action editing.
+ */
+ uint32_t cfa_meta;
+ /* When key=1, This is the VLAN tag VID value. */
+ #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff)
+ #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0
+ /* When key=1, This is the VLAN tag DE value. */
+ #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000)
+ /* When key=1, This is the VLAN tag PRI value. */
+ #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000)
+ #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13
+ /* When key=1, This is the VLAN tag TPID select value. */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16
+ /* 0x88a8 */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16)
+ /* 0x8100 */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16)
+ /* 0x9100 */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16)
+ /* 0x9200 */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16)
+ /* 0x9300 */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16)
+ /* Value programmed in CFA VLANTPID register. */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \
+ TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG
+ /* When key=1, This is the VLAN tag TPID select value. */
+ #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000)
+ #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19
+ /*
+ * This field identifies the type of edit to be performed
+ * on the packet.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000)
+ #define TX_BD_LONG_CFA_META_KEY_SFT 28
+ /* No editing */
+ #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
+ /*
+ * - meta[17:16] - TPID select value (0 = 0x8100).
+ * - meta[15:12] - PRI/DE value.
+ * - meta[11:0] - VID value.
+ */
+ #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
+ #define TX_BD_LONG_CFA_META_KEY_LAST \
+ TX_BD_LONG_CFA_META_KEY_VLAN_TAG
+} __attribute__((packed));
+
+/* tx_bd_empty (size:128b/16B) */
+struct tx_bd_empty {
+ /* This value identifies the type of buffer descriptor. */
+ uint8_t type;
+ #define TX_BD_EMPTY_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_EMPTY_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 1BB long and is an empty
+ * TX BD. Not valid for use by the driver.
+ */
+ #define TX_BD_EMPTY_TYPE_TX_BD_EMPTY UINT32_C(0x1)
+ #define TX_BD_EMPTY_TYPE_LAST TX_BD_EMPTY_TYPE_TX_BD_EMPTY
+ uint8_t unused_1[3];
+ uint8_t unused_2;
+ uint8_t unused_3[3];
+ uint8_t unused_4[8];
+} __attribute__((packed));
+
+/* rx_prod_pkt_bd (size:128b/16B) */
+struct rx_prod_pkt_bd {
+ /* This value identifies the type of buffer descriptor. */
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PROD_PKT_BD_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is an RX Producer
+ * (ie. empty) buffer descriptor.
+ */
+ #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4)
+ #define RX_PROD_PKT_BD_TYPE_LAST \
+ RX_PROD_PKT_BD_TYPE_RX_PROD_PKT
+ #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PROD_PKT_BD_FLAGS_SFT 6
+ /*
+ * If set to 1, the packet will be placed at the address plus
+ * 2B. The 2 Bytes of padding will be written as zero.
+ */
+ #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40)
+ /*
+ * If set to 1, the packet write will be padded out to the
+ * nearest cache-line with zero value padding.
+ */
+ #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80)
+ /*
+ * This value is the number of additional buffers in the ring that
+ * describe the buffer space to be consumed for the this packet.
+ * If the value is zero, then the packet must fit within the
+ * space described by this BD. If this value is 1 or more, it
+ * indicates how many additional "buffer" BDs are in the ring
+ * immediately following this BD to be used for the same
+ * network packet.
+ *
+ * Even if the packet to be placed does not need all the
+ * additional buffers, they will be consumed anyway.
+ */
+ #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300)
+ #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8
+ /*
+ * This is the length in Bytes of the host physical buffer where
+ * data for the packet may be placed in host memory.
+ */
+ uint16_t len;
+ /*
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with this
+ * receive buffer set.
+ */
+ uint32_t opaque;
+ /*
+ * This is the host physical address where data for the packet may
+ * by placed in host memory.
+ */
+ uint64_t address;
+} __attribute__((packed));
+
+/* rx_prod_bfr_bd (size:128b/16B) */
+struct rx_prod_bfr_bd {
+ /* This value identifies the type of buffer descriptor. */
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define RX_PROD_BFR_BD_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PROD_BFR_BD_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is an RX
+ * Producer Buffer BD.
+ */
+ #define RX_PROD_BFR_BD_TYPE_RX_PROD_BFR UINT32_C(0x5)
+ #define RX_PROD_BFR_BD_TYPE_LAST RX_PROD_BFR_BD_TYPE_RX_PROD_BFR
+ #define RX_PROD_BFR_BD_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PROD_BFR_BD_FLAGS_SFT 6
+ /*
+ * This is the length in Bytes of the host physical buffer where
+ * data for the packet may be placed in host memory.
+ */
+ uint16_t len;
+ /* This field is not used. */
+ uint32_t opaque;
+ /*
+ * This is the host physical address where data for the packet may
+ * by placed in host memory.
+ */
+ uint64_t address;
+} __attribute__((packed));
+
+/* rx_prod_agg_bd (size:128b/16B) */
+struct rx_prod_agg_bd {
+ /* This value identifies the type of buffer descriptor. */
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define RX_PROD_AGG_BD_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PROD_AGG_BD_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is an
+ * RX Producer Assembly Buffer Descriptor.
+ */
+ #define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG UINT32_C(0x6)
+ #define RX_PROD_AGG_BD_TYPE_LAST \
+ RX_PROD_AGG_BD_TYPE_RX_PROD_AGG
+ #define RX_PROD_AGG_BD_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PROD_AGG_BD_FLAGS_SFT 6
+ /*
+ * If set to 1, the packet write will be padded out to the
+ * nearest cache-line with zero value padding.
+ */
+ #define RX_PROD_AGG_BD_FLAGS_EOP_PAD UINT32_C(0x40)
+ /*
+ * This is the length in Bytes of the host physical buffer where
+ * data for the packet may be placed in host memory.
+ */
+ uint16_t len;
+ /*
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with this
+ * receive assembly buffer.
+ */
+ uint32_t opaque;
+ /*
+ * This is the host physical address where data for the packet may
+ * by placed in host memory.
+ */
+ uint64_t address;
+} __attribute__((packed));
+
+/* cmpl_base (size:128b/16B) */
+struct cmpl_base {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f)
+ #define CMPL_BASE_TYPE_SFT 0
+ /*
+ * TX L2 completion:
+ * Completion of TX packet. Length = 16B
+ */
+ #define CMPL_BASE_TYPE_TX_L2 UINT32_C(0x0)
+ /*
+ * RX L2 completion:
+ * Completion of and L2 RX packet. Length = 32B
+ */
+ #define CMPL_BASE_TYPE_RX_L2 UINT32_C(0x11)
+ /*
+ * RX Aggregation Buffer completion :
+ * Completion of an L2 aggregation buffer in support of
+ * TPA, HDS, or Jumbo packet completion. Length = 16B
+ */
+ #define CMPL_BASE_TYPE_RX_AGG UINT32_C(0x12)
+ /*
+ * RX L2 TPA Start Completion:
+ * Completion at the beginning of a TPA operation.
+ * Length = 32B
+ */
+ #define CMPL_BASE_TYPE_RX_TPA_START UINT32_C(0x13)
+ /*
+ * RX L2 TPA End Completion:
+ * Completion at the end of a TPA operation.
+ * Length = 32B
+ */
+ #define CMPL_BASE_TYPE_RX_TPA_END UINT32_C(0x15)
+ /*
+ * Statistics Ejection Completion:
+ * Completion of statistics data ejection buffer.
+ * Length = 16B
+ */
+ #define CMPL_BASE_TYPE_STAT_EJECT UINT32_C(0x1a)
+ /*
+ * HWRM Command Completion:
+ * Completion of an HWRM command.
+ */
+ #define CMPL_BASE_TYPE_HWRM_DONE UINT32_C(0x20)
+ /* Forwarded HWRM Request */
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ UINT32_C(0x22)
+ /* Forwarded HWRM Response */
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP UINT32_C(0x24)
+ /* HWRM Asynchronous Event Information */
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ /* CQ Notification */
+ #define CMPL_BASE_TYPE_CQ_NOTIFICATION UINT32_C(0x30)
+ /* SRQ Threshold Event */
+ #define CMPL_BASE_TYPE_SRQ_EVENT UINT32_C(0x32)
+ /* DBQ Threshold Event */
+ #define CMPL_BASE_TYPE_DBQ_EVENT UINT32_C(0x34)
+ /* QP Async Notification */
+ #define CMPL_BASE_TYPE_QP_EVENT UINT32_C(0x38)
+ /* Function Async Notification */
+ #define CMPL_BASE_TYPE_FUNC_EVENT UINT32_C(0x3a)
+ #define CMPL_BASE_TYPE_LAST CMPL_BASE_TYPE_FUNC_EVENT
+ /* info1 is 16 b */
+ uint16_t info1;
+ /* info2 is 32 b */
+ uint32_t info2;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ uint32_t info3_v;
+ #define CMPL_BASE_V UINT32_C(0x1)
+ #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe)
+ #define CMPL_BASE_INFO3_SFT 1
+ /* info4 is 32 b */
+ uint32_t info4;
+} __attribute__((packed));
+
+/* tx_cmpl (size:128b/16B) */
+struct tx_cmpl {
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define TX_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define TX_CMPL_TYPE_SFT 0
+ /*
+ * TX L2 completion:
+ * Completion of TX packet. Length = 16B
+ */
+ #define TX_CMPL_TYPE_TX_L2 UINT32_C(0x0)
+ #define TX_CMPL_TYPE_LAST TX_CMPL_TYPE_TX_L2
+ #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_CMPL_FLAGS_SFT 6
+ /*
+ * When this bit is '1', it indicates a packet that has an
+ * error of some type. Type of error is indicated in
+ * error_flags.
+ */
+ #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ /*
+ * When this bit is '1', it indicates that the packet completed
+ * was transmitted using the push acceleration data provided
+ * by the driver. When this bit is '0', it indicates that the
+ * packet had not push acceleration data written or was executed
+ * as a normal packet even though push data was provided.
+ */
+ #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80)
+ /* unused1 is 16 b */
+ uint16_t unused_0;
+ /*
+ * This is a copy of the opaque field from the first TX BD of this
+ * transmitted packet.
+ */
+ uint32_t opaque;
+ uint16_t errors_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define TX_CMPL_V UINT32_C(0x1)
+ #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define TX_CMPL_ERRORS_SFT 1
+ /*
+ * This error indicates that there was some sort of problem
+ * with the BDs for the packet.
+ */
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ /* No error */
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1)
+ /*
+ * Bad Format:
+ * BDs were not formatted correctly.
+ */
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1)
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT
+ /*
+ * When this bit is '1', it indicates that the length of
+ * the packet was zero. No packet was transmitted.
+ */
+ #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10)
+ /*
+ * When this bit is '1', it indicates that the packet
+ * was longer than the programmed limit in TDI. No
+ * packet was transmitted.
+ */
+ #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20)
+ /*
+ * When this bit is '1', it indicates that one or more of the
+ * BDs associated with this packet generated a PCI error.
+ * This probably means the address was not valid.
+ */
+ #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40)
+ /*
+ * When this bit is '1', it indicates that the packet was longer
+ * than indicated by the hint. No packet was transmitted.
+ */
+ #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80)
+ /*
+ * When this bit is '1', it indicates that the packet was
+ * dropped due to Poison TLP error on one or more of the
+ * TLPs in the PXP completion.
+ */
+ #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100)
+ /* unused2 is 16 b */
+ uint16_t unused_1;
+ /* unused3 is 32 b */
+ uint32_t unused_2;
+} __attribute__((packed));
+
+/* rx_pkt_cmpl (size:128b/16B) */
+struct rx_pkt_cmpl {
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PKT_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 completion:
+ * Completion of and L2 RX packet. Length = 32B
+ */
+ #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
+ #define RX_PKT_CMPL_TYPE_LAST RX_PKT_CMPL_TYPE_RX_L2
+ #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PKT_CMPL_FLAGS_SFT 6
+ /*
+ * When this bit is '1', it indicates a packet that has an
+ * error of some type. Type of error is indicated in
+ * error_flags.
+ */
+ #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ /* This field indicates how the packet was placed in the buffer. */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Normal:
+ * Packet was placed using normal algorithm.
+ */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7)
+ /*
+ * Jumbo:
+ * Packet was placed using jumbo algorithm.
+ */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation:
+ * Packet was placed using Header/Data separation algorithm.
+ * The separation location is indicated by the itype field.
+ */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_PKT_CMPL_FLAGS_PLACEMENT_HDS
+ /* This bit is '1' if the RSS field in this completion is valid. */
+ #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+ /* unused is 1 b */
+ #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800)
+ /*
+ * This value indicates what the inner packet determined for the
+ * packet was.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12
+ /*
+ * Not Known:
+ * Indicates that the packet type was not known.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN \
+ (UINT32_C(0x0) << 12)
+ /*
+ * IP Packet:
+ * Indicates that the packet was an IP packet, but further
+ * classification was not possible.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_IP \
+ (UINT32_C(0x1) << 12)
+ /*
+ * TCP Packet:
+ * Indicates that the packet was IP and TCP.
+ * This indicates that the payload_offset field is valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_TCP \
+ (UINT32_C(0x2) << 12)
+ /*
+ * UDP Packet:
+ * Indicates that the packet was IP and UDP.
+ * This indicates that the payload_offset field is valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_UDP \
+ (UINT32_C(0x3) << 12)
+ /*
+ * FCoE Packet:
+ * Indicates that the packet was recognized as a FCoE.
+ * This also indicates that the payload_offset field is valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE \
+ (UINT32_C(0x4) << 12)
+ /*
+ * RoCE Packet:
+ * Indicates that the packet was recognized as a RoCE.
+ * This also indicates that the payload_offset field is valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE \
+ (UINT32_C(0x5) << 12)
+ /*
+ * ICMP Packet:
+ * Indicates that the packet was recognized as ICMP.
+ * This indicates that the payload_offset field is valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP \
+ (UINT32_C(0x7) << 12)
+ /*
+ * PtP packet wo/timestamp:
+ * Indicates that the packet was recognized as a PtP
+ * packet.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP \
+ (UINT32_C(0x8) << 12)
+ /*
+ * PtP packet w/timestamp:
+ * Indicates that the packet was recognized as a PtP
+ * packet and that a timestamp was taken for the packet.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP \
+ (UINT32_C(0x9) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_LAST \
+ RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP
+ /*
+ * This is the length of the data for the packet stored in the
+ * buffer(s) identified by the opaque value. This includes
+ * the packet BD and any associated buffer BDs. This does not include
+ * the the length of any data places in aggregation BDs.
+ */
+ uint16_t len;
+ /*
+ * This is a copy of the opaque field from the RX BD this completion
+ * corresponds to.
+ */
+ uint32_t opaque;
+ uint8_t agg_bufs_v1;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define RX_PKT_CMPL_V1 UINT32_C(0x1)
+ /*
+ * This value is the number of aggregation buffers that follow this
+ * entry in the completion ring that are a part of this packet.
+ * If the value is zero, then the packet is completely contained
+ * in the buffer space provided for the packet in the RX ring.
+ */
+ #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e)
+ #define RX_PKT_CMPL_AGG_BUFS_SFT 1
+ /* unused1 is 2 b */
+ #define RX_PKT_CMPL_UNUSED1_MASK UINT32_C(0xc0)
+ #define RX_PKT_CMPL_UNUSED1_SFT 6
+ /*
+ * This is the RSS hash type for the packet. The value is packed
+ * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}.
+ *
+ * The value of tuple_extrac_op provides the information about
+ * what fields the hash was computed on.
+ * * 0: The RSS hash was computed over source IP address,
+ * destination IP address, source port, and destination port of inner
+ * IP and TCP or UDP headers. Note: For non-tunneled packets,
+ * the packet headers are considered inner packet headers for the RSS
+ * hash computation purpose.
+ * * 1: The RSS hash was computed over source IP address and destination
+ * IP address of inner IP header. Note: For non-tunneled packets,
+ * the packet headers are considered inner packet headers for the RSS
+ * hash computation purpose.
+ * * 2: The RSS hash was computed over source IP address,
+ * destination IP address, source port, and destination port of
+ * IP and TCP or UDP headers of outer tunnel headers.
+ * Note: For non-tunneled packets, this value is not applicable.
+ * * 3: The RSS hash was computed over source IP address and
+ * destination IP address of IP header of outer tunnel headers.
+ * Note: For non-tunneled packets, this value is not applicable.
+ *
+ * Note that 4-tuples values listed above are applicable
+ * for layer 4 protocols supported and enabled for RSS in the hardware,
+ * HWRM firmware, and drivers. For example, if RSS hash is supported and
+ * enabled for TCP traffic only, then the values of tuple_extract_op
+ * corresponding to 4-tuples are only valid for TCP traffic.
+ */
+ uint8_t rss_hash_type;
+ /*
+ * This value indicates the offset in bytes from the beginning of the packet
+ * where the inner payload starts. This value is valid for TCP, UDP,
+ * FCoE, and RoCE packets.
+ *
+ * A value of zero indicates that header is 256B into the packet.
+ */
+ uint8_t payload_offset;
+ /* unused2 is 8 b */
+ uint8_t unused1;
+ /*
+ * This value is the RSS hash value calculated for the packet
+ * based on the mode bits and key value in the VNIC.
+ */
+ uint32_t rss_hash;
+} __attribute__((packed));
+
+/* rx_pkt_cmpl_hi (size:128b/16B) */
+struct rx_pkt_cmpl_hi {
+ uint32_t flags2;
+ /*
+ * This indicates that the ip checksum was calculated for the
+ * inner packet and that the ip_cs_error field indicates if there
+ * was an error.
+ */
+ #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+ /*
+ * This indicates that the TCP, UDP or ICMP checksum was
+ * calculated for the inner packet and that the l4_cs_error field
+ * indicates if there was an error.
+ */
+ #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+ /*
+ * This indicates that the ip checksum was calculated for the
+ * tunnel header and that the t_ip_cs_error field indicates if there
+ * was an error.
+ */
+ #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+ /*
+ * This indicates that the UDP checksum was
+ * calculated for the tunnel packet and that the t_l4_cs_error field
+ * indicates if there was an error.
+ */
+ #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+ /* This value indicates what format the metadata field is. */
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4
+ /* No metadata informtaion. Value is zero. */
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
+ /*
+ * The metadata field contains the VLAN tag and TPID value.
+ * - metadata[11:0] contains the vlan VID value.
+ * - metadata[12] contains the vlan DE value.
+ * - metadata[15:13] contains the vlan PRI value.
+ * - metadata[31:16] contains the vlan TPID value.
+ */
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \
+ RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
+ /*
+ * This field indicates the IP type for the inner-most IP header.
+ * A value of '0' indicates IPv4. A value of '1' indicates IPv6.
+ * This value is only valid if itype indicates a packet
+ * with an IP header.
+ */
+ #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
+ /*
+ * This is data from the CFA block as indicated by the meta_format
+ * field.
+ */
+ uint32_t metadata;
+ /* When meta_format=1, this value is the VLAN VID. */
+ #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
+ #define RX_PKT_CMPL_METADATA_VID_SFT 0
+ /* When meta_format=1, this value is the VLAN DE. */
+ #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000)
+ /* When meta_format=1, this value is the VLAN PRI. */
+ #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
+ #define RX_PKT_CMPL_METADATA_PRI_SFT 13
+ /* When meta_format=1, this value is the VLAN TPID. */
+ #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
+ #define RX_PKT_CMPL_METADATA_TPID_SFT 16
+ uint16_t errors_v2;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define RX_PKT_CMPL_V2 \
+ UINT32_C(0x1)
+ #define RX_PKT_CMPL_ERRORS_MASK \
+ UINT32_C(0xfffe)
+ #define RX_PKT_CMPL_ERRORS_SFT 1
+ /*
+ * This error indicates that there was some sort of problem with
+ * the BDs for the packet that was found after part of the
+ * packet was already placed. The packet should be treated as
+ * invalid.
+ */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK \
+ UINT32_C(0xe)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ /* No buffer error */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER \
+ (UINT32_C(0x0) << 1)
+ /*
+ * Did Not Fit:
+ * Packet did not fit into packet buffer provided.
+ * For regular placement, this means the packet did not fit
+ * in the buffer provided. For HDS and jumbo placement, this
+ * means that the packet could not be placed into 7 physical
+ * buffers or less.
+ */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \
+ (UINT32_C(0x1) << 1)
+ /*
+ * Not On Chip:
+ * All BDs needed for the packet were not on-chip when
+ * the packet arrived.
+ */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
+ (UINT32_C(0x2) << 1)
+ /*
+ * Bad Format:
+ * BDs were not formatted correctly.
+ */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \
+ (UINT32_C(0x3) << 1)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT
+ /*
+ * This indicates that there was an error in the IP header
+ * checksum.
+ */
+ #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR \
+ UINT32_C(0x10)
+ /*
+ * This indicates that there was an error in the TCP, UDP
+ * or ICMP checksum.
+ */
+ #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR \
+ UINT32_C(0x20)
+ /*
+ * This indicates that there was an error in the tunnel
+ * IP header checksum.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR \
+ UINT32_C(0x40)
+ /*
+ * This indicates that there was an error in the tunnel
+ * UDP checksum.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR \
+ UINT32_C(0x80)
+ /*
+ * This indicates that there was a CRC error on either an FCoE
+ * or RoCE packet. The itype indicates the packet type.
+ */
+ #define RX_PKT_CMPL_ERRORS_CRC_ERROR \
+ UINT32_C(0x100)
+ /*
+ * This indicates that there was an error in the tunnel
+ * portion of the packet when this
+ * field is non-zero.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK \
+ UINT32_C(0xe00)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9
+ /*
+ * No additional error occurred on the tunnel portion
+ * of the packet of the packet does not have a tunnel.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR \
+ (UINT32_C(0x0) << 9)
+ /*
+ * Indicates that IP header version does not match
+ * expectation from L2 Ethertype for IPv4 and IPv6
+ * in the tunnel header.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \
+ (UINT32_C(0x1) << 9)
+ /*
+ * Indicates that header length is out of range in the
+ * tunnel header. Valid for
+ * IPv4.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \
+ (UINT32_C(0x2) << 9)
+ /*
+ * Indicates that the physical packet is shorter than that
+ * claimed by the PPPoE header length for a tunnel PPPoE
+ * packet.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \
+ (UINT32_C(0x3) << 9)
+ /*
+ * Indicates that physical packet is shorter than that claimed
+ * by the tunnel l3 header length. Valid for IPv4, or IPv6
+ * tunnel packet packets.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \
+ (UINT32_C(0x4) << 9)
+ /*
+ * Indicates that the physical packet is shorter than that
+ * claimed by the tunnel UDP header length for a tunnel
+ * UDP packet that is not fragmented.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \
+ (UINT32_C(0x5) << 9)
+ /*
+ * indicates that the IPv4 TTL or IPv6 hop limit check
+ * have failed (e.g. TTL = 0) in the tunnel header. Valid
+ * for IPv4, and IPv6.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \
+ (UINT32_C(0x6) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \
+ RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL
+ /*
+ * This indicates that there was an error in the inner
+ * portion of the packet when this
+ * field is non-zero.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK \
+ UINT32_C(0xf000)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12
+ /*
+ * No additional error occurred on the tunnel portion
+ * of the packet of the packet does not have a tunnel.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR \
+ (UINT32_C(0x0) << 12)
+ /*
+ * Indicates that IP header version does not match
+ * expectation from L2 Ethertype for IPv4 and IPv6 or that
+ * option other than VFT was parsed on
+ * FCoE packet.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \
+ (UINT32_C(0x1) << 12)
+ /*
+ * indicates that header length is out of range. Valid for
+ * IPv4 and RoCE
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \
+ (UINT32_C(0x2) << 12)
+ /*
+ * indicates that the IPv4 TTL or IPv6 hop limit check
+ * have failed (e.g. TTL = 0). Valid for IPv4, and IPv6
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL \
+ (UINT32_C(0x3) << 12)
+ /*
+ * Indicates that physical packet is shorter than that
+ * claimed by the l3 header length. Valid for IPv4,
+ * IPv6 packet or RoCE packets.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \
+ (UINT32_C(0x4) << 12)
+ /*
+ * Indicates that the physical packet is shorter than that
+ * claimed by the UDP header length for a UDP packet that is
+ * not fragmented.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \
+ (UINT32_C(0x5) << 12)
+ /*
+ * Indicates that TCP header length > IP payload. Valid for
+ * TCP packets only.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \
+ (UINT32_C(0x6) << 12)
+ /* Indicates that TCP header length < 5. Valid for TCP. */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \
+ (UINT32_C(0x7) << 12)
+ /*
+ * Indicates that TCP option headers result in a TCP header
+ * size that does not match data offset in TCP header. Valid
+ * for TCP.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \
+ (UINT32_C(0x8) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \
+ RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
+ /*
+ * This field identifies the CFA action rule that was used for this
+ * packet.
+ */
+ uint16_t cfa_code;
+ uint32_t reorder;
+ /*
+ * This value holds the reordering sequence number for the packet.
+ * If the reordering sequence is not valid, then this value is zero.
+ * The reordering domain for the packet is in the bottom 8 to 10b of
+ * the rss_hash value. The bottom 20b of this value contain the
+ * ordering domain value for the packet.
+ */
+ #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff)
+ #define RX_PKT_CMPL_REORDER_SFT 0
+} __attribute__((packed));
+
+/* rx_tpa_start_cmpl (size:128b/16B) */
+struct rx_tpa_start_cmpl {
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_TPA_START_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 TPA Start Completion:
+ * Completion at the beginning of a TPA operation.
+ * Length = 32B
+ */
+ #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13)
+ #define RX_TPA_START_CMPL_TYPE_LAST \
+ RX_TPA_START_CMPL_TYPE_RX_TPA_START
+ #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_TPA_START_CMPL_FLAGS_SFT 6
+ /* This bit will always be '0' for TPA start completions. */
+ #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ /* This field indicates how the packet was placed in the buffer. */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Jumbo:
+ * TPA Packet was placed using jumbo algorithm. This means
+ * that the first buffer will be filled with data before
+ * moving to aggregation buffers. Each aggregation buffer
+ * will be filled before moving to the next aggregation
+ * buffer.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO \
+ (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation:
+ * Packet was placed using Header/Data separation algorithm.
+ * The separation location is indicated by the itype field.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS \
+ (UINT32_C(0x2) << 7)
+ /*
+ * GRO/Jumbo:
+ * Packet will be placed using GRO/Jumbo where the first
+ * packet is filled with data. Subsequent packets will be
+ * placed such that any one packet does not span two
+ * aggregation buffers unless it starts at the beginning of
+ * an aggregation buffer.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \
+ (UINT32_C(0x5) << 7)
+ /*
+ * GRO/Header-Data Separation:
+ * Packet will be placed using GRO/HDS where the header
+ * is in the first packet.
+ * Payload of each packet will be
+ * placed such that any one packet does not span two
+ * aggregation buffers unless it starts at the beginning of
+ * an aggregation buffer.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS \
+ (UINT32_C(0x6) << 7)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS
+ /* This bit is '1' if the RSS field in this completion is valid. */
+ #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+ /* unused is 1 b */
+ #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800)
+ /*
+ * This value indicates what the inner packet determined for the
+ * packet was.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12
+ /*
+ * TCP Packet:
+ * Indicates that the packet was IP and TCP.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP \
+ (UINT32_C(0x2) << 12)
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_LAST \
+ RX_TPA_START_CMPL_FLAGS_ITYPE_TCP
+ /*
+ * This value indicates the amount of packet data written to the
+ * buffer the opaque field in this completion corresponds to.
+ */
+ uint16_t len;
+ /*
+ * This is a copy of the opaque field from the RX BD this completion
+ * corresponds to.
+ */
+ uint32_t opaque;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ uint8_t v1;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_START_CMPL_V1 UINT32_C(0x1)
+ #define RX_TPA_START_CMPL_LAST RX_TPA_START_CMPL_V1
+ /*
+ * This is the RSS hash type for the packet. The value is packed
+ * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}.
+ *
+ * The value of tuple_extrac_op provides the information about
+ * what fields the hash was computed on.
+ * * 0: The RSS hash was computed over source IP address,
+ * destination IP address, source port, and destination port of inner
+ * IP and TCP or UDP headers. Note: For non-tunneled packets,
+ * the packet headers are considered inner packet headers for the RSS
+ * hash computation purpose.
+ * * 1: The RSS hash was computed over source IP address and destination
+ * IP address of inner IP header. Note: For non-tunneled packets,
+ * the packet headers are considered inner packet headers for the RSS
+ * hash computation purpose.
+ * * 2: The RSS hash was computed over source IP address,
+ * destination IP address, source port, and destination port of
+ * IP and TCP or UDP headers of outer tunnel headers.
+ * Note: For non-tunneled packets, this value is not applicable.
+ * * 3: The RSS hash was computed over source IP address and
+ * destination IP address of IP header of outer tunnel headers.
+ * Note: For non-tunneled packets, this value is not applicable.
+ *
+ * Note that 4-tuples values listed above are applicable
+ * for layer 4 protocols supported and enabled for RSS in the hardware,
+ * HWRM firmware, and drivers. For example, if RSS hash is supported and
+ * enabled for TCP traffic only, then the values of tuple_extract_op
+ * corresponding to 4-tuples are only valid for TCP traffic.
+ */
+ uint8_t rss_hash_type;
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ uint16_t agg_id;
+ /* unused2 is 9 b */
+ #define RX_TPA_START_CMPL_UNUSED2_MASK UINT32_C(0x1ff)
+ #define RX_TPA_START_CMPL_UNUSED2_SFT 0
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00)
+ #define RX_TPA_START_CMPL_AGG_ID_SFT 9
+ /*
+ * This value is the RSS hash value calculated for the packet
+ * based on the mode bits and key value in the VNIC.
+ */
+ uint32_t rss_hash;
+} __attribute__((packed));
+
+/* rx_tpa_start_cmpl_hi (size:128b/16B) */
+struct rx_tpa_start_cmpl_hi {
+ uint32_t flags2;
+ /*
+ * This indicates that the ip checksum was calculated for the
+ * inner packet and that the sum passed for all segments
+ * included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+ /*
+ * This indicates that the TCP, UDP or ICMP checksum was
+ * calculated for the inner packet and that the sum passed
+ * for all segments included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+ /*
+ * This indicates that the ip checksum was calculated for the
+ * tunnel header and that the sum passed for all segments
+ * included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+ /*
+ * This indicates that the UDP checksum was
+ * calculated for the tunnel packet and that the sum passed for
+ * all segments included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+ /* This value indicates what format the metadata field is. */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4
+ /* No metadata informtaion. Value is zero. */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE \
+ (UINT32_C(0x0) << 4)
+ /*
+ * The metadata field contains the VLAN tag and TPID value.
+ * - metadata[11:0] contains the vlan VID value.
+ * - metadata[12] contains the vlan DE value.
+ * - metadata[15:13] contains the vlan PRI value.
+ * - metadata[31:16] contains the vlan TPID value.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN \
+ (UINT32_C(0x1) << 4)
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_LAST \
+ RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN
+ /*
+ * This field indicates the IP type for the inner-most IP header.
+ * A value of '0' indicates IPv4. A value of '1' indicates IPv6.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
+ /*
+ * This is data from the CFA block as indicated by the meta_format
+ * field.
+ */
+ uint32_t metadata;
+ /* When meta_format=1, this value is the VLAN VID. */
+ #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
+ #define RX_TPA_START_CMPL_METADATA_VID_SFT 0
+ /* When meta_format=1, this value is the VLAN DE. */
+ #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000)
+ /* When meta_format=1, this value is the VLAN PRI. */
+ #define RX_TPA_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
+ #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13
+ /* When meta_format=1, this value is the VLAN TPID. */
+ #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
+ #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16
+ uint16_t v2;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_START_CMPL_V2 UINT32_C(0x1)
+ /*
+ * This field identifies the CFA action rule that was used for this
+ * packet.
+ */
+ uint16_t cfa_code;
+ /*
+ * This is the size in bytes of the inner most L4 header.
+ * This can be subtracted from the payload_offset to determine
+ * the start of the inner most L4 header.
+ */
+ uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset;
+ /*
+ * This is the offset from the beginning of the packet in bytes for
+ * the outer L3 header. If there is no outer L3 header, then this
+ * value is zero.
+ */
+ #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff)
+ #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0
+ /*
+ * This is the offset from the beginning of the packet in bytes for
+ * the inner most L2 header.
+ */
+ #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00)
+ #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9
+ /*
+ * This is the offset from the beginning of the packet in bytes for
+ * the inner most L3 header.
+ */
+ #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000)
+ #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18
+ /*
+ * This is the size in bytes of the inner most L4 header.
+ * This can be subtracted from the payload_offset to determine
+ * the start of the inner most L4 header.
+ */
+ #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000)
+ #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27
+} __attribute__((packed));
+
+/* rx_tpa_end_cmpl (size:128b/16B) */
+struct rx_tpa_end_cmpl {
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_TPA_END_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 TPA End Completion:
+ * Completion at the end of a TPA operation.
+ * Length = 32B
+ */
+ #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15)
+ #define RX_TPA_END_CMPL_TYPE_LAST \
+ RX_TPA_END_CMPL_TYPE_RX_TPA_END
+ #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_TPA_END_CMPL_FLAGS_SFT 6
+ /*
+ * When this bit is '1', it indicates a packet that has an
+ * error of some type. Type of error is indicated in
+ * error_flags.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ /* This field indicates how the packet was placed in the buffer. */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Jumbo:
+ * TPA Packet was placed using jumbo algorithm. This means
+ * that the first buffer will be filled with data before
+ * moving to aggregation buffers. Each aggregation buffer
+ * will be filled before moving to the next aggregation
+ * buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO \
+ (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation:
+ * Packet was placed using Header/Data separation algorithm.
+ * The separation location is indicated by the itype field.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS \
+ (UINT32_C(0x2) << 7)
+ /*
+ * GRO/Jumbo:
+ * Packet will be placed using GRO/Jumbo where the first
+ * packet is filled with data. Subsequent packets will be
+ * placed such that any one packet does not span two
+ * aggregation buffers unless it starts at the beginning of
+ * an aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \
+ (UINT32_C(0x5) << 7)
+ /*
+ * GRO/Header-Data Separation:
+ * Packet will be placed using GRO/HDS where the header
+ * is in the first packet.
+ * Payload of each packet will be
+ * placed such that any one packet does not span two
+ * aggregation buffers unless it starts at the beginning of
+ * an aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS \
+ (UINT32_C(0x6) << 7)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS
+ /* unused is 2 b */
+ #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00)
+ #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10
+ /*
+ * This value indicates what the inner packet determined for the
+ * packet was.
+ * - 2 TCP Packet
+ * Indicates that the packet was IP and TCP. This indicates
+ * that the ip_cs field is valid and that the tcp_udp_cs
+ * field is valid and contains the TCP checksum.
+ * This also indicates that the payload_offset field is valid.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12
+ /*
+ * This value is zero for TPA End completions.
+ * There is no data in the buffer that corresponds to the opaque
+ * value in this completion.
+ */
+ uint16_t len;
+ /*
+ * This is a copy of the opaque field from the RX BD this completion
+ * corresponds to.
+ */
+ uint32_t opaque;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ uint8_t agg_bufs_v1;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_END_CMPL_V1 UINT32_C(0x1)
+ /*
+ * This value is the number of aggregation buffers that follow this
+ * entry in the completion ring that are a part of this aggregation
+ * packet.
+ * If the value is zero, then the packet is completely contained
+ * in the buffer space provided in the aggregation start completion.
+ */
+ #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e)
+ #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1
+ /* This value is the number of segments in the TPA operation. */
+ uint8_t tpa_segs;
+ /*
+ * This value indicates the offset in bytes from the beginning of the packet
+ * where the inner payload starts. This value is valid for TCP, UDP,
+ * FCoE, and RoCE packets.
+ *
+ * A value of zero indicates an offset of 256 bytes.
+ */
+ uint8_t payload_offset;
+ uint8_t agg_id;
+ /* unused2 is 1 b */
+ #define RX_TPA_END_CMPL_UNUSED2 UINT32_C(0x1)
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe)
+ #define RX_TPA_END_CMPL_AGG_ID_SFT 1
+ /*
+ * For non-GRO packets, this value is the
+ * timestamp delta between earliest and latest timestamp values for
+ * TPA packet. If packets were not time stamped, then delta will be
+ * zero.
+ *
+ * For GRO packets, this field is zero except for the following
+ * sub-fields.
+ * - tsdelta[31]
+ * Timestamp present indication. When '0', no Timestamp
+ * option is in the packet. When '1', then a Timestamp
+ * option is present in the packet.
+ */
+ uint32_t tsdelta;
+} __attribute__((packed));
+
+/* rx_tpa_end_cmpl_hi (size:128b/16B) */
+struct rx_tpa_end_cmpl_hi {
+ /*
+ * This value is the number of duplicate ACKs that have been
+ * received as part of the TPA operation.
+ */
+ uint32_t tpa_dup_acks;
+ /*
+ * This value is the number of duplicate ACKs that have been
+ * received as part of the TPA operation.
+ */
+ #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf)
+ #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0
+ /*
+ * This value is the valid when TPA completion is active. It
+ * indicates the length of the longest segment of the TPA operation
+ * for LRO mode and the length of the first segment in GRO mode.
+ *
+ * This value may be used by GRO software to re-construct the original
+ * packet stream from the TPA packet. This is the length of all
+ * but the last segment for GRO. In LRO mode this value may be used
+ * to indicate MSS size to the stack.
+ */
+ uint16_t tpa_seg_len;
+ /* unused4 is 16 b */
+ uint16_t unused3;
+ uint16_t errors_v2;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_END_CMPL_V2 UINT32_C(0x1)
+ #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define RX_TPA_END_CMPL_ERRORS_SFT 1
+ /*
+ * This error indicates that there was some sort of problem with
+ * the BDs for the packet that was found after part of the
+ * packet was already placed. The packet should be treated as
+ * invalid.
+ */
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ /*
+ * This error occurs when there is a fatal HW problem in
+ * the chip only. It indicates that there were not
+ * BDs on chip but that there was adequate reservation.
+ * provided by the TPA block.
+ */
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
+ (UINT32_C(0x2) << 1)
+ /*
+ * This error occurs when TPA block was not configured to
+ * reserve adequate BDs for TPA operations on this RX
+ * ring. All data for the TPA operation was not placed.
+ *
+ * This error can also be generated when the number of
+ * segments is not programmed correctly in TPA and the
+ * 33 total aggregation buffers allowed for the TPA
+ * operation has been exceeded.
+ */
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \
+ (UINT32_C(0x4) << 1)
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR
+ /* unused5 is 16 b */
+ uint16_t unused_4;
+ /*
+ * This is the opaque value that was completed for the TPA start
+ * completion that corresponds to this TPA end completion.
+ */
+ uint32_t start_opaque;
+} __attribute__((packed));
+
+/* rx_abuf_cmpl (size:128b/16B) */
+struct rx_abuf_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define RX_ABUF_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_ABUF_CMPL_TYPE_SFT 0
+ /*
+ * RX Aggregation Buffer completion :
+ * Completion of an L2 aggregation buffer in support of
+ * TPA, HDS, or Jumbo packet completion. Length = 16B
+ */
+ #define RX_ABUF_CMPL_TYPE_RX_AGG UINT32_C(0x12)
+ #define RX_ABUF_CMPL_TYPE_LAST RX_ABUF_CMPL_TYPE_RX_AGG
+ /*
+ * This is the length of the data for the packet stored in this
+ * aggregation buffer identified by the opaque value. This does not
+ * include the length of any
+ * data placed in other aggregation BDs or in the packet or buffer
+ * BDs. This length does not include any space added due to
+ * hdr_offset register during HDS placement mode.
+ */
+ uint16_t len;
+ /*
+ * This is a copy of the opaque field from the RX BD this aggregation
+ * buffer corresponds to.
+ */
+ uint32_t opaque;
+ uint32_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define RX_ABUF_CMPL_V UINT32_C(0x1)
+ /* unused3 is 32 b */
+ uint32_t unused_2;
+} __attribute__((packed));
+
+/* eject_cmpl (size:128b/16B) */
+struct eject_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define EJECT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define EJECT_CMPL_TYPE_SFT 0
+ /*
+ * Statistics Ejection Completion:
+ * Completion of statistics data ejection buffer.
+ * Length = 16B
+ */
+ #define EJECT_CMPL_TYPE_STAT_EJECT UINT32_C(0x1a)
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ /*
+ * This is the length of the statistics data stored in this
+ * buffer.
+ */
+ uint16_t len;
+ /*
+ * This is a copy of the opaque field from the RX BD this ejection
+ * buffer corresponds to.
+ */
+ uint32_t opaque;
+ uint32_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define EJECT_CMPL_V UINT32_C(0x1)
+ /* unused3 is 32 b */
+ uint32_t unused_2;
+} __attribute__((packed));
+
+/* hwrm_cmpl (size:128b/16B) */
+struct hwrm_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_CMPL_TYPE_SFT 0
+ /*
+ * HWRM Command Completion:
+ * Completion of an HWRM command.
+ */
+ #define HWRM_CMPL_TYPE_HWRM_DONE UINT32_C(0x20)
+ #define HWRM_CMPL_TYPE_LAST HWRM_CMPL_TYPE_HWRM_DONE
+ /* This is the sequence_id of the HWRM command that has completed. */
+ uint16_t sequence_id;
+ /* unused2 is 32 b */
+ uint32_t unused_1;
+ uint32_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_CMPL_V UINT32_C(0x1)
+ /* unused4 is 32 b */
+ uint32_t unused_3;
+} __attribute__((packed));
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
+struct hwrm_fwd_req_cmpl {
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ uint16_t req_len_type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_FWD_REQ_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
+ /* Forwarded HWRM Request */
+ #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ UINT32_C(0x22)
+ #define HWRM_FWD_REQ_CMPL_TYPE_LAST \
+ HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+ /* Length of forwarded request in bytes. */
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0)
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
+ /*
+ * Source ID of this request.
+ * Typically used in forwarding requests and responses.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t source_id;
+ /* unused1 is 32 b */
+ uint32_t unused0;
+ /* Address of forwarded request. */
+ uint32_t req_buf_addr_v[2];
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_FWD_REQ_CMPL_V UINT32_C(0x1)
+ /* Address of forwarded request. */
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe)
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+} __attribute__((packed));
+
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
+struct hwrm_fwd_resp_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_FWD_RESP_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
+ /* Forwarded HWRM Response */
+ #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP UINT32_C(0x24)
+ #define HWRM_FWD_RESP_CMPL_TYPE_LAST \
+ HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ /*
+ * Source ID of this response.
+ * Typically used in forwarding requests and responses.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t source_id;
+ /* Length of forwarded response in bytes. */
+ uint16_t resp_len;
+ /* unused2 is 16 b */
+ uint16_t unused_1;
+ /* Address of forwarded request. */
+ uint32_t resp_buf_addr_v[2];
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_FWD_RESP_CMPL_V UINT32_C(0x1)
+ /* Address of forwarded request. */
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK UINT32_C(0xfffffffe)
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
+struct hwrm_async_event_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link status changed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE \
+ UINT32_C(0x0)
+ /* Link MTU changed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE \
+ UINT32_C(0x1)
+ /* Link speed changed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE \
+ UINT32_C(0x2)
+ /* DCB Configuration changed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE \
+ UINT32_C(0x3)
+ /* Port connection not allowed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ UINT32_C(0x4)
+ /* Link speed configuration was not allowed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \
+ UINT32_C(0x5)
+ /* Link speed configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \
+ UINT32_C(0x6)
+ /* Port PHY configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE \
+ UINT32_C(0x7)
+ /* Function driver unloaded */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD \
+ UINT32_C(0x10)
+ /* Function driver loaded */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD \
+ UINT32_C(0x11)
+ /* Function FLR related processing has completed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT \
+ UINT32_C(0x12)
+ /* PF driver unloaded */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
+ UINT32_C(0x20)
+ /* PF driver loaded */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD \
+ UINT32_C(0x21)
+ /* VF Function Level Reset (FLR) */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR \
+ UINT32_C(0x30)
+ /* VF MAC Address Change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE \
+ UINT32_C(0x31)
+ /* PF-VF communication channel status change. */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \
+ UINT32_C(0x32)
+ /* VF Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE \
+ UINT32_C(0x33)
+ /* LLFC/PFC Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE \
+ UINT32_C(0x34)
+ /* Default VNIC Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE \
+ UINT32_C(0x35)
+ /* HWRM Error */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR \
+ UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_status_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link status changed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE \
+ UINT32_C(0x0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Indicates link status change */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE \
+ UINT32_C(0x1)
+ /*
+ * If this bit set to 0, then it indicates that the link
+ * was up and it went down.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN \
+ UINT32_C(0x0)
+ /*
+ * If this bit is set to 1, then it indicates that the link
+ * was down and it went up.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP \
+ UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ /* Indicates the physical port this link status change occur */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK \
+ UINT32_C(0xe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT \
+ 1
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 4
+ /* Indicates the physical function this event occured on. */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK \
+ UINT32_C(0xff00000)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT \
+ 20
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_mtu_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link MTU changed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE \
+ UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* The new MTU of the link in bytes. */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_link_speed_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link speed changed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /*
+ * When this bit is '1', the link was forced to the
+ * force_link_speed value.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE \
+ UINT32_C(0x1)
+ /* The new link speed in 100 Mbps units. */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK \
+ UINT32_C(0xfffe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT \
+ 1
+ /* 100Mb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB \
+ (UINT32_C(0x1) << 1)
+ /* 1Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB \
+ (UINT32_C(0xa) << 1)
+ /* 2Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB \
+ (UINT32_C(0x14) << 1)
+ /* 25Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB \
+ (UINT32_C(0x19) << 1)
+ /* 10Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB \
+ (UINT32_C(0x64) << 1)
+ /* 20Mb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB \
+ (UINT32_C(0xc8) << 1)
+ /* 25Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB \
+ (UINT32_C(0xfa) << 1)
+ /* 40Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB \
+ (UINT32_C(0x190) << 1)
+ /* 50Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB \
+ (UINT32_C(0x1f4) << 1)
+ /* 100Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB \
+ (UINT32_C(0x3e8) << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 16
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_dcb_config_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_dcb_config_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* DCB Configuration changed */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE \
+ UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ /* ETS configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS \
+ UINT32_C(0x1)
+ /* PFC configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC \
+ UINT32_C(0x2)
+ /* APP configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP \
+ UINT32_C(0x4)
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 0
+ /* Priority recommended for RoCE traffic */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK \
+ UINT32_C(0xff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT \
+ 16
+ /* none is 255 */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE \
+ (UINT32_C(0xff) << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
+ /* Priority recommended for L2 traffic */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK \
+ UINT32_C(0xff000000)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT \
+ 24
+ /* none is 255 */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE \
+ (UINT32_C(0xff) << 24)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Port connection not allowed */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ UINT32_C(0x4)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \
+ 0
+ /*
+ * This value indicates the current port level enforcement policy
+ * for the optics module when there is an optical module mismatch
+ * and port is not connected.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK \
+ UINT32_C(0xff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT \
+ 16
+ /* No enforcement */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE \
+ (UINT32_C(0x0) << 16)
+ /* Disable Transmit side Laser. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX \
+ (UINT32_C(0x1) << 16)
+ /* Raise a warning message. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG \
+ (UINT32_C(0x2) << 16)
+ /* Power down the module. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN \
+ (UINT32_C(0x3) << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_link_speed_cfg_not_allowed (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link speed configuration was not allowed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \
+ UINT32_C(0x5)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \
+ 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link speed configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE \
+ UINT32_C(0x6)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 0
+ /*
+ * If set to 1, it indicates that the supported link speeds
+ * configuration on the port has changed.
+ * If set to 0, then there is no change in supported link speeds
+ * configuration.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE \
+ UINT32_C(0x10000)
+ /*
+ * If set to 1, it indicates that the link speed configuration
+ * on the port has become illegal or invalid.
+ * If set to 0, then the link speed configuration on the port is
+ * legal or valid.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG \
+ UINT32_C(0x20000)
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_port_phy_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_port_phy_cfg_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Port PHY configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE \
+ UINT32_C(0x7)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 0
+ /*
+ * If set to 1, it indicates that the FEC
+ * configuration on the port has changed.
+ * If set to 0, then there is no change in FEC configuration.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_FEC_CFG_CHANGE \
+ UINT32_C(0x10000)
+ /*
+ * If set to 1, it indicates that the EEE configuration
+ * on the port has changed.
+ * If set to 0, then there is no change in EEE configuration
+ * on the port.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_EEE_CFG_CHANGE \
+ UINT32_C(0x20000)
+ /*
+ * If set to 1, it indicates that the pause configuration
+ * on the PHY has changed.
+ * If set to 0, then there is no change in the pause
+ * configuration on the PHY.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE \
+ UINT32_C(0x40000)
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_func_drvr_unload (size:128b/16B) */
+struct hwrm_async_event_cmpl_func_drvr_unload {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Function driver unloaded */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD \
+ UINT32_C(0x10)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Function ID */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT \
+ 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_func_drvr_load (size:128b/16B) */
+struct hwrm_async_event_cmpl_func_drvr_load {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Function driver loaded */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD \
+ UINT32_C(0x11)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Function ID */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_func_flr_proc_cmplt (size:128b/16B) */
+struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Function FLR related processing has completed */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT \
+ UINT32_C(0x12)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Function ID */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT \
+ 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
+struct hwrm_async_event_cmpl_pf_drvr_unload {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* PF driver unloaded */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD \
+ UINT32_C(0x20)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PF ID */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ /* Indicates the physical port this pf belongs to */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK \
+ UINT32_C(0x70000)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_pf_drvr_load (size:128b/16B) */
+struct hwrm_async_event_cmpl_pf_drvr_load {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* PF driver loaded */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD \
+ UINT32_C(0x21)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PF ID */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ /* Indicates the physical port this pf belongs to */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK \
+ UINT32_C(0x70000)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_vf_flr (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_flr {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* VF Function Level Reset (FLR) */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR UINT32_C(0x30)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* VF ID */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+ /* Indicates the physical function this event occured on. */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_MASK \
+ UINT32_C(0xff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_SFT 16
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_mac_addr_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* VF MAC Address Change */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE \
+ UINT32_C(0x31)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* VF ID */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT \
+ 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_pf_vf_comm_status_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* PF-VF communication channel status change. */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \
+ UINT32_C(0x32)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /*
+ * If this bit is set to 1, then it indicates that the PF-VF
+ * communication was lost and it is established.
+ * If this bit set to 0, then it indicates that the PF-VF
+ * communication was established and it is lost.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED \
+ UINT32_C(0x1)
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* VF Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE \
+ UINT32_C(0x33)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /*
+ * Each flag provided in this field indicates a specific VF
+ * configuration change. At least one of these flags shall be set to 1
+ * when an asynchronous event completion of this type is provided
+ * by the HWRM.
+ */
+ uint32_t event_data1;
+ /*
+ * If this bit is set to 1, then the value of MTU
+ * was changed on this VF.
+ * If set to 0, then this bit should be ignored.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE \
+ UINT32_C(0x1)
+ /*
+ * If this bit is set to 1, then the value of MRU
+ * was changed on this VF.
+ * If set to 0, then this bit should be ignored.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE \
+ UINT32_C(0x2)
+ /*
+ * If this bit is set to 1, then the value of default MAC
+ * address was changed on this VF.
+ * If set to 0, then this bit should be ignored.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE \
+ UINT32_C(0x4)
+ /*
+ * If this bit is set to 1, then the value of default VLAN
+ * was changed on this VF.
+ * If set to 0, then this bit should be ignored.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE \
+ UINT32_C(0x8)
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_llfc_pfc_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* unused1 is 10 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_MASK \
+ UINT32_C(0xffc0)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_SFT 6
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* LLFC/PFC Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE \
+ UINT32_C(0x34)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Indicates llfc pfc status change */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_MASK \
+ UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_SFT \
+ 0
+ /*
+ * If this field set to 1, then it indicates that llfc is
+ * enabled.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LLFC \
+ UINT32_C(0x1)
+ /*
+ * If this field is set to 2, then it indicates that pfc
+ * is enabled.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC
+ /* Indicates the physical port this llfc pfc change occur */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_MASK \
+ UINT32_C(0x1c)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_SFT \
+ 2
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0x1fffe0)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 5
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_default_vnic_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* unused1 is 10 b */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK \
+ UINT32_C(0xffc0)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT \
+ 6
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Notification of a default vnic allocaiton or free */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION \
+ UINT32_C(0x35)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Indicates default vnic configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK \
+ UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT \
+ 0
+ /*
+ * If this field is set to 1, then it indicates that
+ * a default VNIC has been allocate.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC \
+ UINT32_C(0x1)
+ /*
+ * If this field is set to 2, then it indicates that
+ * a default VNIC has been freed.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
+ /* Indicates the physical function this event occured on. */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK \
+ UINT32_C(0x3fc)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT \
+ 2
+ /* Indicates the virtual function this event occured on */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK \
+ UINT32_C(0x3fffc00)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT \
+ 10
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* HWRM Error */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR \
+ UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
+ /* Event specific data */
+ uint32_t event_data2;
+ /* Severity of HWRM Error */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK \
+ UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ /* Warning */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING \
+ UINT32_C(0x0)
+ /* Non-fatal Error */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL \
+ UINT32_C(0x1)
+ /* Fatal Error */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST \
+ HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Time stamp for error event */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP \
+ UINT32_C(0x1)
+} __attribute__((packed));
+
+/*******************
+ * hwrm_func_reset *
+ *******************/
+
+
+/* hwrm_func_reset_input (size:192b/24B) */
+struct hwrm_func_reset_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the vf_id_valid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
+ /*
+ * The ID of the VF that this PF is trying to reset.
+ * Only the parent PF shall be allowed to reset a child VF.
+ *
+ * A parent PF driver shall use this field only when a specific child VF
+ * is requested to be reset.
+ */
+ uint16_t vf_id;
+ /* This value indicates the level of a function reset. */
+ uint8_t func_reset_level;
+ /*
+ * Reset the caller function and its children VFs (if any). If no
+ * children functions exist, then reset the caller function only.
+ */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL \
+ UINT32_C(0x0)
+ /* Reset the caller function only */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME \
+ UINT32_C(0x1)
+ /*
+ * Reset all children VFs of the caller function driver if the
+ * caller is a PF driver.
+ * It is an error to specify this level by a VF driver.
+ * It is an error to specify this level by a PF driver with
+ * no children VFs.
+ */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \
+ UINT32_C(0x2)
+ /*
+ * Reset a specific VF of the caller function driver if the caller
+ * is the parent PF driver.
+ * It is an error to specify this level by a VF driver.
+ * It is an error to specify this level by a PF driver that is not
+ * the parent of the VF that is being requested to reset.
+ */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF \
+ UINT32_C(0x3)
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_LAST \
+ HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF
+ uint8_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_func_reset_output (size:128b/16B) */
+struct hwrm_func_reset_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************
+ * hwrm_func_getfid *
+ ********************/
+
+
+/* hwrm_func_getfid_input (size:192b/24B) */
+struct hwrm_func_getfid_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the pci_id field to be
+ * configured.
+ */
+ #define HWRM_FUNC_GETFID_INPUT_ENABLES_PCI_ID UINT32_C(0x1)
+ /*
+ * This value is the PCI ID of the queried function.
+ * If ARI is enabled, then it is
+ * Bus Number (8b):Function Number(8b). Otherwise, it is
+ * Bus Number (8b):Device Number (5b):Function Number(3b).
+ */
+ uint16_t pci_id;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_func_getfid_output (size:128b/16B) */
+struct hwrm_func_getfid_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * FID value. This value is used to identify operations on the PCI
+ * bus as belonging to a particular PCI function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_func_vf_alloc *
+ **********************/
+
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
+struct hwrm_func_vf_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the first_vf_id field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_ALLOC_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1)
+ /*
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
+ */
+ uint16_t first_vf_id;
+ /* The number of virtual functions requested. */
+ uint16_t num_vfs;
+} __attribute__((packed));
+
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
+struct hwrm_func_vf_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The ID of the first VF allocated. */
+ uint16_t first_vf_id;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_func_vf_free *
+ *********************/
+
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
+struct hwrm_func_vf_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the first_vf_id field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_FREE_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1)
+ /*
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
+ */
+ uint16_t first_vf_id;
+ /*
+ * The number of virtual functions requested.
+ * 0xFFFF - Cleanup all children of this PF.
+ */
+ uint16_t num_vfs;
+} __attribute__((packed));
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
+struct hwrm_func_vf_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************
+ * hwrm_func_vf_cfg *
+ ********************/
+
+
+/* hwrm_func_vf_cfg_input (size:448b/56B) */
+struct hwrm_func_vf_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the mtu field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the guest_vlan field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the async_event_cr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the dflt_mac_addr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the num_rsscos_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the num_cmpl_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the num_tx_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the num_rx_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the num_l2_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the num_vnics field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the num_stat_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the num_hw_ring_grps field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \
+ UINT32_C(0x800)
+ /*
+ * The maximum transmission unit requested on the function.
+ * The HWRM should make sure that the mtu of
+ * the function does not exceed the mtu of the physical
+ * port that this function is associated with.
+ *
+ * In addition to requesting mtu per function, it is
+ * possible to configure mtu per transmit ring.
+ * By default, the mtu of each transmit ring associated
+ * with a function is equal to the mtu of the function.
+ * The HWRM should make sure that the mtu of each transmit
+ * ring that is assigned to a function has a valid mtu.
+ */
+ uint16_t mtu;
+ /*
+ * The guest VLAN for the function being configured.
+ * This field's format is same as 802.1Q Tag's
+ * Tag Control Information (TCI) format that includes both
+ * Priority Code Point (PCP) and VLAN Identifier (VID).
+ */
+ uint16_t guest_vlan;
+ /*
+ * ID of the target completion ring for receiving asynchronous
+ * event completions. If this field is not valid, then the
+ * HWRM shall use the default completion ring of the function
+ * that is being configured as the target completion ring for
+ * providing any asynchronous event completions for that
+ * function.
+ * If this field is valid, then the HWRM shall use the
+ * completion ring identified by this ID as the target
+ * completion ring for providing any asynchronous event
+ * completions for the function that is being configured.
+ */
+ uint16_t async_event_cr;
+ /*
+ * This value is the current MAC address requested by the VF
+ * driver to be configured on this VF. A value of
+ * 00-00-00-00-00-00 indicates no MAC address configuration
+ * is requested by the VF driver.
+ * The parent PF driver may reject or overwrite this
+ * MAC address.
+ */
+ uint8_t dflt_mac_addr[6];
+ uint32_t flags;
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of TX rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST \
+ UINT32_C(0x1)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of RX rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST \
+ UINT32_C(0x2)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of CMPL rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \
+ UINT32_C(0x4)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of RSS ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST \
+ UINT32_C(0x8)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of ring groups) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \
+ UINT32_C(0x10)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of stat ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST \
+ UINT32_C(0x20)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of VNICs) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST \
+ UINT32_C(0x40)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of L2 ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \
+ UINT32_C(0x80)
+ /* The number of RSS/COS contexts requested for the VF. */
+ uint16_t num_rsscos_ctxs;
+ /* The number of completion rings requested for the VF. */
+ uint16_t num_cmpl_rings;
+ /* The number of transmit rings requested for the VF. */
+ uint16_t num_tx_rings;
+ /* The number of receive rings requested for the VF. */
+ uint16_t num_rx_rings;
+ /* The number of L2 contexts requested for the VF. */
+ uint16_t num_l2_ctxs;
+ /* The number of vnics requested for the VF. */
+ uint16_t num_vnics;
+ /* The number of statistic contexts requested for the VF. */
+ uint16_t num_stat_ctxs;
+ /* The number of HW ring groups requested for the VF. */
+ uint16_t num_hw_ring_grps;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
+struct hwrm_func_vf_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************
+ * hwrm_func_qcaps *
+ *******************/
+
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
+struct hwrm_func_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_func_qcaps_output (size:640b/80B) */
+struct hwrm_func_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * FID value. This value is used to identify operations on the PCI
+ * bus as belonging to a particular PCI function.
+ */
+ uint16_t fid;
+ /*
+ * Port ID of port that this function is associated with.
+ * Valid only for the PF.
+ * 0xFF... (All Fs) if this function is not associated with
+ * any port.
+ * 0xFF... (All Fs) if this function is called from a VF.
+ */
+ uint16_t port_id;
+ uint32_t flags;
+ /* If 1, then Push mode is supported on this function. */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED \
+ UINT32_C(0x1)
+ /*
+ * If 1, then the global MSI-X auto-masking is enabled for the
+ * device.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \
+ UINT32_C(0x2)
+ /*
+ * If 1, then the Precision Time Protocol (PTP) processing
+ * is supported on this function.
+ * The HWRM should enable PTP on only a single Physical
+ * Function (PF) per port.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If 1, then RDMA over Converged Ethernet (RoCE) v1
+ * is supported on this function.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If 1, then RDMA over Converged Ethernet (RoCE) v2
+ * is supported on this function.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED \
+ UINT32_C(0x10)
+ /*
+ * If 1, then control and configuration of WoL magic packet
+ * are supported on this function.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \
+ UINT32_C(0x20)
+ /*
+ * If 1, then control and configuration of bitmap pattern
+ * packet are supported on this function.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED \
+ UINT32_C(0x40)
+ /*
+ * If set to 1, then the control and configuration of rate limit
+ * of an allocated TX ring on the queried function is supported.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED \
+ UINT32_C(0x80)
+ /*
+ * If 1, then control and configuration of minimum and
+ * maximum bandwidths are supported on the queried function.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED \
+ UINT32_C(0x100)
+ /*
+ * If the query is for a VF, then this flag shall be ignored.
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to set the rate limits
+ * on the TX rings of its children VFs.
+ * If this query is for a PF and this flag is set to 0, then
+ * the PF does not have the capability to set the rate limits
+ * on the TX rings of its children VFs.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \
+ UINT32_C(0x200)
+ /*
+ * If the query is for a VF, then this flag shall be ignored.
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to set the minimum and/or
+ * maximum bandwidths for its children VFs.
+ * If this query is for a PF and this flag is set to 0, then
+ * the PF does not have the capability to set the minimum or
+ * maximum bandwidths for its children VFs.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED \
+ UINT32_C(0x400)
+ /*
+ * Standard TX Ring mode is used for the allocation of TX ring
+ * and underlying scheduling resources that allow bandwidth
+ * reservation and limit settings on the queried function.
+ * If set to 1, then standard TX ring mode is supported
+ * on the queried function.
+ * If set to 0, then standard TX ring mode is not available
+ * on the queried function.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \
+ UINT32_C(0x800)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to detect GENEVE tunnel
+ * flags.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED \
+ UINT32_C(0x1000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to detect NVGRE tunnel
+ * flags.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED \
+ UINT32_C(0x2000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to detect GRE tunnel
+ * flags.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GRE_TUN_FLAGS_SUPPORTED \
+ UINT32_C(0x4000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to detect MPLS tunnel
+ * flags.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_MPLS_TUN_FLAGS_SUPPORTED \
+ UINT32_C(0x8000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to support pcie stats.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PCIE_STATS_SUPPORTED \
+ UINT32_C(0x10000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to adopt the VF's belonging
+ * to another PF.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADOPTED_PF_SUPPORTED \
+ UINT32_C(0x20000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to administer another PF.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED \
+ UINT32_C(0x40000)
+ /*
+ * This value is current MAC address configured for this
+ * function. A value of 00-00-00-00-00-00 indicates no
+ * MAC address is currently configured.
+ */
+ uint8_t mac_address[6];
+ /*
+ * The maximum number of RSS/COS contexts that can be
+ * allocated to the function.
+ */
+ uint16_t max_rsscos_ctx;
+ /*
+ * The maximum number of completion rings that can be
+ * allocated to the function.
+ */
+ uint16_t max_cmpl_rings;
+ /*
+ * The maximum number of transmit rings that can be
+ * allocated to the function.
+ */
+ uint16_t max_tx_rings;
+ /*
+ * The maximum number of receive rings that can be
+ * allocated to the function.
+ */
+ uint16_t max_rx_rings;
+ /*
+ * The maximum number of L2 contexts that can be
+ * allocated to the function.
+ */
+ uint16_t max_l2_ctxs;
+ /*
+ * The maximum number of VNICs that can be
+ * allocated to the function.
+ */
+ uint16_t max_vnics;
+ /*
+ * The identifier for the first VF enabled on a PF. This
+ * is valid only on the PF with SR-IOV enabled.
+ * 0xFF... (All Fs) if this command is called on a PF with
+ * SR-IOV disabled or on a VF.
+ */
+ uint16_t first_vf_id;
+ /*
+ * The maximum number of VFs that can be
+ * allocated to the function. This is valid only on the
+ * PF with SR-IOV enabled. 0xFF... (All Fs) if this
+ * command is called on a PF with SR-IOV disabled or
+ * on a VF.
+ */
+ uint16_t max_vfs;
+ /*
+ * The maximum number of statistic contexts that can be
+ * allocated to the function.
+ */
+ uint16_t max_stat_ctx;
+ /*
+ * The maximum number of Encapsulation records that can be
+ * offloaded by this function.
+ */
+ uint32_t max_encap_records;
+ /*
+ * The maximum number of decapsulation records that can
+ * be offloaded by this function.
+ */
+ uint32_t max_decap_records;
+ /*
+ * The maximum number of Exact Match (EM) flows that can be
+ * offloaded by this function on the TX side.
+ */
+ uint32_t max_tx_em_flows;
+ /*
+ * The maximum number of Wildcard Match (WM) flows that can
+ * be offloaded by this function on the TX side.
+ */
+ uint32_t max_tx_wm_flows;
+ /*
+ * The maximum number of Exact Match (EM) flows that can be
+ * offloaded by this function on the RX side.
+ */
+ uint32_t max_rx_em_flows;
+ /*
+ * The maximum number of Wildcard Match (WM) flows that can
+ * be offloaded by this function on the RX side.
+ */
+ uint32_t max_rx_wm_flows;
+ /*
+ * The maximum number of multicast filters that can
+ * be supported by this function on the RX side.
+ */
+ uint32_t max_mcast_filters;
+ /*
+ * The maximum value of flow_id that can be supported
+ * in completion records.
+ */
+ uint32_t max_flow_id;
+ /*
+ * The maximum number of HW ring groups that can be
+ * supported on this function.
+ */
+ uint32_t max_hw_ring_grps;
+ /*
+ * The maximum number of strict priority transmit rings
+ * that can be allocated to the function.
+ * This number indicates the maximum number of TX rings
+ * that can be assigned strict priorities out of the
+ * maximum number of TX rings that can be allocated
+ * (max_tx_rings) to the function.
+ */
+ uint16_t max_sp_tx_rings;
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************
+ * hwrm_func_qcfg *
+ ******************/
+
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
+struct hwrm_func_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_func_qcfg_output (size:640b/80B) */
+struct hwrm_func_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * FID value. This value is used to identify operations on the PCI
+ * bus as belonging to a particular PCI function.
+ */
+ uint16_t fid;
+ /*
+ * Port ID of port that this function is associated with.
+ * 0xFF... (All Fs) if this function is not associated with
+ * any port.
+ */
+ uint16_t port_id;
+ /*
+ * This value is the current VLAN setting for this
+ * function. The value of 0 for this field indicates
+ * no priority tagging or VLAN is used.
+ * This field's format is same as 802.1Q Tag's
+ * Tag Control Information (TCI) format that includes both
+ * Priority Code Point (PCP) and VLAN Identifier (VID).
+ */
+ uint16_t vlan;
+ uint16_t flags;
+ /*
+ * If 1, then magic packet based Out-Of-Box WoL is enabled on
+ * the port associated with this function.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \
+ UINT32_C(0x1)
+ /*
+ * If 1, then bitmap pattern based Out-Of-Box WoL packet is enabled
+ * on the port associated with this function.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_BMP_ENABLED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, then FW based DCBX agent is enabled and running on
+ * the port associated with this function.
+ * If set to 0, then DCBX agent is not running in the firmware.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \
+ UINT32_C(0x4)
+ /*
+ * Standard TX Ring mode is used for the allocation of TX ring
+ * and underlying scheduling resources that allow bandwidth
+ * reservation and limit settings on the queried function.
+ * If set to 1, then standard TX ring mode is enabled
+ * on the queried function.
+ * If set to 0, then the standard TX ring mode is disabled
+ * on the queried function. In this extended TX ring resource
+ * mode, the minimum and maximum bandwidth settings are not
+ * supported to allow the allocation of TX rings to span multiple
+ * scheduler nodes.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1 then FW based LLDP agent is enabled and running on
+ * the port associated with this function.
+ * If set to 0 then the LLDP agent is not running in the firmware.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED \
+ UINT32_C(0x10)
+ /*
+ * If set to 1, then multi-host mode is active for this function.
+ * If set to 0, then multi-host mode is inactive for this function
+ * or not applicable for this device.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST \
+ UINT32_C(0x20)
+ /*
+ * This value is current MAC address configured for this
+ * function. A value of 00-00-00-00-00-00 indicates no
+ * MAC address is currently configured.
+ */
+ uint8_t mac_address[6];
+ /*
+ * This value is current PCI ID of this
+ * function. If ARI is enabled, then it is
+ * Bus Number (8b):Function Number(8b). Otherwise, it is
+ * Bus Number (8b):Device Number (4b):Function Number(4b).
+ * If multi-host mode is active, the 4 lsb will indicate
+ * the PF index for this function.
+ */
+ uint16_t pci_id;
+ /*
+ * The number of RSS/COS contexts currently
+ * allocated to the function.
+ */
+ uint16_t alloc_rsscos_ctx;
+ /*
+ * The number of completion rings currently allocated to
+ * the function. This does not include the rings allocated
+ * to any children functions if any.
+ */
+ uint16_t alloc_cmpl_rings;
+ /*
+ * The number of transmit rings currently allocated to
+ * the function. This does not include the rings allocated
+ * to any children functions if any.
+ */
+ uint16_t alloc_tx_rings;
+ /*
+ * The number of receive rings currently allocated to
+ * the function. This does not include the rings allocated
+ * to any children functions if any.
+ */
+ uint16_t alloc_rx_rings;
+ /* The allocated number of L2 contexts to the function. */
+ uint16_t alloc_l2_ctx;
+ /* The allocated number of vnics to the function. */
+ uint16_t alloc_vnics;
+ /*
+ * The maximum transmission unit of the function.
+ * For rings allocated on this function, this default
+ * value is used if ring MTU is not specified.
+ */
+ uint16_t mtu;
+ /*
+ * The maximum receive unit of the function.
+ * For vnics allocated on this function, this default
+ * value is used if vnic MRU is not specified.
+ */
+ uint16_t mru;
+ /* The statistics context assigned to a function. */
+ uint16_t stat_ctx_id;
+ /*
+ * The HWRM shall return Unknown value for this field
+ * when this command is used to query VF's configuration.
+ */
+ uint8_t port_partition_type;
+ /* Single physical function */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_SPF UINT32_C(0x0)
+ /* Multiple physical functions */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1)
+ /* Network Partitioning 1.0 */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 UINT32_C(0x2)
+ /* Network Partitioning 1.5 */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 UINT32_C(0x3)
+ /* Network Partitioning 2.0 */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 UINT32_C(0x4)
+ /* Unknown */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN \
+ UINT32_C(0xff)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN
+ /*
+ * This field will indicate number of physical functions on this port_partition.
+ * HWRM shall return unavail (i.e. value of 0) for this field
+ * when this command is used to query VF's configuration or
+ * from older firmware that doesn't support this field.
+ */
+ uint8_t port_pf_cnt;
+ /* number of PFs is not available */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL
+ /*
+ * The default VNIC ID assigned to a function that is
+ * being queried.
+ */
+ uint16_t dflt_vnic_id;
+ uint16_t max_mtu_configured;
+ /*
+ * Minimum BW allocated for this function.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for the scheduler inside the device.
+ * A value of 0 indicates the minimum bandwidth is not
+ * configured.
+ */
+ uint32_t min_bw;
+ /* The bandwidth value. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated for this function.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for the scheduler inside the device.
+ * A value of 0 indicates that the maximum bandwidth is not
+ * configured.
+ */
+ uint32_t max_bw;
+ /* The bandwidth value. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * This value indicates the Edge virtual bridge mode for the
+ * domain that this function belongs to.
+ */
+ uint8_t evb_mode;
+ /* No Edge Virtual Bridging (EVB) */
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
+ /* Virtual Ethernet Bridge (VEB) */
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1)
+ /* Virtual Ethernet Port Aggregator (VEPA) */
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2)
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA
+ uint8_t options;
+ /*
+ * This value indicates the PCIE device cache line size.
+ * The cache line size allows the DMA writes to terminate and
+ * start at the cache boundary.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_MASK \
+ UINT32_C(0x3)
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SFT 0
+ /* Cache Line Size 64 bytes */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \
+ UINT32_C(0x0)
+ /* Cache Line Size 128 bytes */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 \
+ UINT32_C(0x1)
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128
+ /* Reserved for future. */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_MASK \
+ UINT32_C(0xfc)
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_SFT 2
+ /*
+ * The number of VFs that are allocated to the function.
+ * This is valid only on the PF with SR-IOV enabled.
+ * 0xFF... (All Fs) if this command is called on a PF with
+ * SR-IOV disabled or on a VF.
+ */
+ uint16_t alloc_vfs;
+ /*
+ * The number of allocated multicast filters for this
+ * function on the RX side.
+ */
+ uint32_t alloc_mcast_filters;
+ /*
+ * The number of allocated HW ring groups for this
+ * function.
+ */
+ uint32_t alloc_hw_ring_grps;
+ /*
+ * The number of strict priority transmit rings out of
+ * currently allocated TX rings to the function
+ * (alloc_tx_rings).
+ */
+ uint16_t alloc_sp_tx_rings;
+ /*
+ * The number of statistics contexts
+ * currently reserved for the function.
+ */
+ uint16_t alloc_stat_ctx;
+ /*
+ * This field specifies how many NQs are reserved for the PF.
+ * Remaining NQs that belong to the PF are available for VFs.
+ * Once a PF has created VFs, it cannot change how many NQs are
+ * reserved for itself (since the NQs must be contiguous in HW).
+ */
+ uint16_t alloc_msix;
+ uint8_t unused_2[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_func_vlan_qcfg *
+ ***********************/
+
+
+/* hwrm_func_vlan_qcfg_input (size:192b/24B) */
+struct hwrm_func_vlan_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the configuration is
+ * for the requesting function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_func_vlan_qcfg_output (size:320b/40B) */
+struct hwrm_func_vlan_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+ /* S-TAG VLAN identifier configured for the function. */
+ uint16_t stag_vid;
+ /* S-TAG PCP value configured for the function. */
+ uint8_t stag_pcp;
+ uint8_t unused_1;
+ /*
+ * S-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
+ */
+ uint16_t stag_tpid;
+ /* C-TAG VLAN identifier configured for the function. */
+ uint16_t ctag_vid;
+ /* C-TAG PCP value configured for the function. */
+ uint8_t ctag_pcp;
+ uint8_t unused_2;
+ /*
+ * C-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
+ */
+ uint16_t ctag_tpid;
+ /* Future use. */
+ uint32_t rsvd2;
+ /* Future use. */
+ uint32_t rsvd3;
+ uint32_t unused_3;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_func_vlan_cfg *
+ **********************/
+
+
+/* hwrm_func_vlan_cfg_input (size:384b/48B) */
+struct hwrm_func_vlan_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the configuration is
+ * for the requesting function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[2];
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the stag_vid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the ctag_vid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the stag_pcp field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the ctag_pcp field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the stag_tpid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the ctag_tpid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20)
+ /* S-TAG VLAN identifier configured for the function. */
+ uint16_t stag_vid;
+ /* S-TAG PCP value configured for the function. */
+ uint8_t stag_pcp;
+ uint8_t unused_1;
+ /*
+ * S-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
+ */
+ uint16_t stag_tpid;
+ /* C-TAG VLAN identifier configured for the function. */
+ uint16_t ctag_vid;
+ /* C-TAG PCP value configured for the function. */
+ uint8_t ctag_pcp;
+ uint8_t unused_2;
+ /*
+ * C-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
+ */
+ uint16_t ctag_tpid;
+ /* Future use. */
+ uint32_t rsvd1;
+ /* Future use. */
+ uint32_t rsvd2;
+ uint8_t unused_3[4];
+} __attribute__((packed));
+
+/* hwrm_func_vlan_cfg_output (size:128b/16B) */
+struct hwrm_func_vlan_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************
+ * hwrm_func_cfg *
+ *****************/
+
+
+/* hwrm_func_cfg_input (size:704b/88B) */
+struct hwrm_func_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the the configuration is
+ * for the requesting function.
+ */
+ uint16_t fid;
+ /*
+ * This field specifies how many NQs will be reserved for the PF.
+ * Remaining NQs that belong to the PF become available for VFs.
+ * Once a PF has created VFs, it cannot change how many NQs are
+ * reserved for itself (since the NQs must be contiguous in HW).
+ */
+ uint16_t num_msix;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the function is disabled with
+ * source MAC address check.
+ * This is an anti-spoofing check. If this flag is set,
+ * then the function shall be configured to disallow
+ * transmission of frames with the source MAC address that
+ * is configured for this function.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the function is enabled with
+ * source MAC address check.
+ * This is an anti-spoofing check. If this flag is set,
+ * then the function shall be configured to allow
+ * transmission of frames with the source MAC address that
+ * is configured for this function.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK \
+ UINT32_C(0x1fc)
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2
+ /*
+ * Standard TX Ring mode is used for the allocation of TX ring
+ * and underlying scheduling resources that allow bandwidth
+ * reservation and limit settings on the queried function.
+ * If set to 1, then standard TX ring mode is requested to be
+ * enabled on the function being configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \
+ UINT32_C(0x200)
+ /*
+ * Standard TX Ring mode is used for the allocation of TX ring
+ * and underlying scheduling resources that allow bandwidth
+ * reservation and limit settings on the queried function.
+ * If set to 1, then the standard TX ring mode is requested to
+ * be disabled on the function being configured. In this extended
+ * TX ring resource mode, the minimum and maximum bandwidth settings
+ * are not supported to allow the allocation of TX rings to
+ * span multiple scheduler nodes.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \
+ UINT32_C(0x400)
+ /*
+ * If this bit is set, virtual mac address configured
+ * in this command will be persistent over warm boot.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST \
+ UINT32_C(0x800)
+ /*
+ * This bit only applies to the VF. If this bit is set, the statistic
+ * context counters will not be cleared when the statistic context is freed
+ * or a function reset is called on VF. This bit will be cleared when the PF
+ * is unloaded or a function reset is called on the PF.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \
+ UINT32_C(0x1000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of TX rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_TX_ASSETS_TEST \
+ UINT32_C(0x2000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of RX rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RX_ASSETS_TEST \
+ UINT32_C(0x4000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of CMPL rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \
+ UINT32_C(0x8000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of RSS ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST \
+ UINT32_C(0x10000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of ring groups) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \
+ UINT32_C(0x20000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of stat ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST \
+ UINT32_C(0x40000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of VNICs) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST \
+ UINT32_C(0x80000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of L2 ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \
+ UINT32_C(0x100000)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the mtu field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the mru field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the num_rsscos_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the num_cmpl_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the num_tx_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the num_rx_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the num_l2_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the num_vnics field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the num_stat_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the dflt_mac_addr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the dflt_vlan field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the dflt_ip_addr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the min_bw field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the max_bw field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the async_event_cr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the vlan_antispoof_mode field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE \
+ UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the allowed_vlan_pris field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS \
+ UINT32_C(0x10000)
+ /*
+ * This bit must be '1' for the evb_mode field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE \
+ UINT32_C(0x20000)
+ /*
+ * This bit must be '1' for the num_mcast_filters field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS \
+ UINT32_C(0x40000)
+ /*
+ * This bit must be '1' for the num_hw_ring_grps field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \
+ UINT32_C(0x80000)
+ /*
+ * This bit must be '1' for the cache_linesize field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_CACHE_LINESIZE \
+ UINT32_C(0x100000)
+ /*
+ * This bit must be '1' for the num_msix field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX \
+ UINT32_C(0x200000)
+ /*
+ * The maximum transmission unit of the function.
+ * The HWRM should make sure that the mtu of
+ * the function does not exceed the mtu of the physical
+ * port that this function is associated with.
+ *
+ * In addition to configuring mtu per function, it is
+ * possible to configure mtu per transmit ring.
+ * By default, the mtu of each transmit ring associated
+ * with a function is equal to the mtu of the function.
+ * The HWRM should make sure that the mtu of each transmit
+ * ring that is assigned to a function has a valid mtu.
+ */
+ uint16_t mtu;
+ /*
+ * The maximum receive unit of the function.
+ * The HWRM should make sure that the mru of
+ * the function does not exceed the mru of the physical
+ * port that this function is associated with.
+ *
+ * In addition to configuring mru per function, it is
+ * possible to configure mru per vnic.
+ * By default, the mru of each vnic associated
+ * with a function is equal to the mru of the function.
+ * The HWRM should make sure that the mru of each vnic
+ * that is assigned to a function has a valid mru.
+ */
+ uint16_t mru;
+ /*
+ * The number of RSS/COS contexts requested for the
+ * function.
+ */
+ uint16_t num_rsscos_ctxs;
+ /*
+ * The number of completion rings requested for the
+ * function. This does not include the rings allocated
+ * to any children functions if any.
+ */
+ uint16_t num_cmpl_rings;
+ /*
+ * The number of transmit rings requested for the function.
+ * This does not include the rings allocated to any
+ * children functions if any.
+ */
+ uint16_t num_tx_rings;
+ /*
+ * The number of receive rings requested for the function.
+ * This does not include the rings allocated
+ * to any children functions if any.
+ */
+ uint16_t num_rx_rings;
+ /* The requested number of L2 contexts for the function. */
+ uint16_t num_l2_ctxs;
+ /* The requested number of vnics for the function. */
+ uint16_t num_vnics;
+ /* The requested number of statistic contexts for the function. */
+ uint16_t num_stat_ctxs;
+ /*
+ * The number of HW ring groups that should
+ * be reserved for this function.
+ */
+ uint16_t num_hw_ring_grps;
+ /* The default MAC address for the function being configured. */
+ uint8_t dflt_mac_addr[6];
+ /*
+ * The default VLAN for the function being configured.
+ * This field's format is same as 802.1Q Tag's
+ * Tag Control Information (TCI) format that includes both
+ * Priority Code Point (PCP) and VLAN Identifier (VID).
+ */
+ uint16_t dflt_vlan;
+ /*
+ * The default IP address for the function being configured.
+ * This address is only used in enabling source property check.
+ */
+ uint32_t dflt_ip_addr[4];
+ /*
+ * Minimum BW allocated for this function.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for the scheduler inside the device.
+ */
+ uint32_t min_bw;
+ /* The bandwidth value. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_LAST \
+ HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated for this function.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for the scheduler inside the device.
+ */
+ uint32_t max_bw;
+ /* The bandwidth value. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_LAST \
+ HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * ID of the target completion ring for receiving asynchronous
+ * event completions. If this field is not valid, then the
+ * HWRM shall use the default completion ring of the function
+ * that is being configured as the target completion ring for
+ * providing any asynchronous event completions for that
+ * function.
+ * If this field is valid, then the HWRM shall use the
+ * completion ring identified by this ID as the target
+ * completion ring for providing any asynchronous event
+ * completions for the function that is being configured.
+ */
+ uint16_t async_event_cr;
+ /* VLAN Anti-spoofing mode. */
+ uint8_t vlan_antispoof_mode;
+ /* No VLAN anti-spoofing checks are enabled */
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK \
+ UINT32_C(0x0)
+ /* Validate VLAN against the configured VLAN(s) */
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN \
+ UINT32_C(0x1)
+ /* Insert VLAN if it does not exist, otherwise discard */
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE \
+ UINT32_C(0x2)
+ /* Insert VLAN if it does not exist, override VLAN if it exists */
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \
+ UINT32_C(0x3)
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_LAST \
+ HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+ /*
+ * This bit field defines VLAN PRIs that are allowed on
+ * this function.
+ * If nth bit is set, then VLAN PRI n is allowed on this
+ * function.
+ */
+ uint8_t allowed_vlan_pris;
+ /*
+ * The HWRM shall allow a PF driver to change EVB mode for the
+ * partition it belongs to.
+ * The HWRM shall not allow a VF driver to change the EVB mode.
+ * The HWRM shall take into account the switching of EVB mode
+ * from one to another and reconfigure hardware resources as
+ * appropriately.
+ * The switching from VEB to VEPA mode requires
+ * the disabling of the loopback traffic. Additionally,
+ * source knock outs are handled differently in VEB and VEPA
+ * modes.
+ */
+ uint8_t evb_mode;
+ /* No Edge Virtual Bridging (EVB) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
+ /* Virtual Ethernet Bridge (VEB) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1)
+ /* Virtual Ethernet Port Aggregator (VEPA) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2)
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_LAST \
+ HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA
+ uint8_t options;
+ /*
+ * This value indicates the PCIE device cache line size.
+ * The cache line size allows the DMA writes to terminate and
+ * start at the cache boundary.
+ */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_MASK \
+ UINT32_C(0x3)
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SFT 0
+ /* Cache Line Size 64 bytes */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \
+ UINT32_C(0x0)
+ /* Cache Line Size 128 bytes */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 \
+ UINT32_C(0x1)
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_LAST \
+ HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128
+ /* Reserved for future. */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_MASK \
+ UINT32_C(0xfc)
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_SFT 2
+ /*
+ * The number of multicast filters that should
+ * be reserved for this function on the RX side.
+ */
+ uint16_t num_mcast_filters;
+} __attribute__((packed));
+
+/* hwrm_func_cfg_output (size:128b/16B) */
+struct hwrm_func_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************
+ * hwrm_func_qstats *
+ ********************/
+
+
+/* hwrm_func_qstats_input (size:192b/24B) */
+struct hwrm_func_qstats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_func_qstats_output (size:1408b/176B) */
+struct hwrm_func_qstats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of transmitted unicast packets on the function. */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets on the function. */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets on the function. */
+ uint64_t tx_bcast_pkts;
+ /*
+ * Number of transmitted packets that were discarded due to
+ * internal NIC resource problems. For transmit, this
+ * can only happen if TMP is configured to allow dropping
+ * in HOL blocking conditions, which is not a normal
+ * configuration.
+ */
+ uint64_t tx_discard_pkts;
+ /*
+ * Number of dropped packets on transmit path on the function.
+ * These are packets that have been marked for drop by
+ * the TE CFA block or are packets that exceeded the
+ * transmit MTU limit for the function.
+ */
+ uint64_t tx_drop_pkts;
+ /* Number of transmitted bytes for unicast traffic on the function. */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic on the function. */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic on the function. */
+ uint64_t tx_bcast_bytes;
+ /* Number of received unicast packets on the function. */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets on the function. */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets on the function. */
+ uint64_t rx_bcast_pkts;
+ /*
+ * Number of received packets that were discarded on the function
+ * due to resource limitations. This can happen for 3 reasons.
+ * # The BD used for the packet has a bad format.
+ * # There were no BDs available in the ring for the packet.
+ * # There were no BDs available on-chip for the packet.
+ */
+ uint64_t rx_discard_pkts;
+ /*
+ * Number of dropped packets on received path on the function.
+ * These are packets that have been marked for drop by the
+ * RE CFA.
+ */
+ uint64_t rx_drop_pkts;
+ /* Number of received bytes for unicast traffic on the function. */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic on the function. */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic on the function. */
+ uint64_t rx_bcast_bytes;
+ /* Number of aggregated unicast packets on the function. */
+ uint64_t rx_agg_pkts;
+ /* Number of aggregated unicast bytes on the function. */
+ uint64_t rx_agg_bytes;
+ /* Number of aggregation events on the function. */
+ uint64_t rx_agg_events;
+ /* Number of aborted aggregations on the function. */
+ uint64_t rx_agg_aborts;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_func_clr_stats *
+ ***********************/
+
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
+struct hwrm_func_clr_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_func_clr_stats_output (size:128b/16B) */
+struct hwrm_func_clr_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_func_vf_resc_free *
+ **************************/
+
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
+struct hwrm_func_vf_resc_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
+ */
+ uint16_t vf_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
+struct hwrm_func_vf_resc_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_func_vf_vnic_ids_query *
+ *******************************/
+
+
+/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
+struct hwrm_func_vf_vnic_ids_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
+ */
+ uint16_t vf_id;
+ uint8_t unused_0[2];
+ /* Max number of vnic ids in vnic id table */
+ uint32_t max_vnic_id_cnt;
+ /* This is the address for VF VNIC ID table */
+ uint64_t vnic_id_tbl_addr;
+} __attribute__((packed));
+
+/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
+struct hwrm_func_vf_vnic_ids_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Actual number of vnic ids
+ *
+ * Each VNIC ID is written as a 32-bit number.
+ */
+ uint32_t vnic_id_cnt;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_func_drv_rgtr *
+ **********************/
+
+
+/* hwrm_func_drv_rgtr_input (size:896b/112B) */
+struct hwrm_func_drv_rgtr_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the function driver is requesting
+ * all requests from its children VF drivers to be
+ * forwarded to itself.
+ * This flag can only be set by the PF driver.
+ * If a VF driver sets this flag, it should be ignored
+ * by the HWRM.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1)
+ /*
+ * When this bit is '1', the function is requesting none of
+ * the requests from its children VF drivers to be
+ * forwarded to itself.
+ * This flag can only be set by the PF driver.
+ * If a VF driver sets this flag, it should be ignored
+ * by the HWRM.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2)
+ /*
+ * When this bit is '1', then ver_maj_8b, ver_min_8b, ver_upd_8b
+ * fields shall be ignored and ver_maj, ver_min, ver_upd
+ * and ver_patch shall be used for the driver version information.
+ * When this bit is '0', then ver_maj_8b, ver_min_8b, ver_upd_8b
+ * fields shall be used for the driver version information and
+ * ver_maj, ver_min, ver_upd and ver_patch shall be ignored.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_16BIT_VER_MODE UINT32_C(0x4)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the os_type field to be
+ * configured.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the ver field to be
+ * configured.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the timestamp field to be
+ * configured.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the vf_req_fwd field to be
+ * configured.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the async_event_fwd field to be
+ * configured.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD \
+ UINT32_C(0x10)
+ /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */
+ uint16_t os_type;
+ /* Unknown */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
+ /* Other OS not listed below. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER UINT32_C(0x1)
+ /* MSDOS OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS UINT32_C(0xe)
+ /* Windows OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS UINT32_C(0x12)
+ /* Solaris OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS UINT32_C(0x1d)
+ /* Linux OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX UINT32_C(0x24)
+ /* FreeBSD OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD UINT32_C(0x2a)
+ /* VMware ESXi OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI UINT32_C(0x68)
+ /* Microsoft Windows 8 64-bit OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 UINT32_C(0x73)
+ /* Microsoft Windows Server 2012 R2 OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74)
+ /* UEFI driver. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UEFI UINT32_C(0x8000)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LAST \
+ HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UEFI
+ /* This is the 8bit major version of the driver. */
+ uint8_t ver_maj_8b;
+ /* This is the 8bit minor version of the driver. */
+ uint8_t ver_min_8b;
+ /* This is the 8bit update version of the driver. */
+ uint8_t ver_upd_8b;
+ uint8_t unused_0[3];
+ /*
+ * This is a 32-bit timestamp provided by the driver for
+ * keep alive.
+ * The timestamp is in multiples of 1ms.
+ */
+ uint32_t timestamp;
+ uint8_t unused_1[4];
+ /*
+ * This is a 256-bit bit mask provided by the PF driver for
+ * letting the HWRM know what commands issued by the VF driver
+ * to the HWRM should be forwarded to the PF driver.
+ * Nth bit refers to the Nth req_type.
+ *
+ * Setting Nth bit to 1 indicates that requests from the
+ * VF driver with req_type equal to N shall be forwarded to
+ * the parent PF driver.
+ *
+ * This field is not valid for the VF driver.
+ */
+ uint32_t vf_req_fwd[8];
+ /*
+ * This is a 256-bit bit mask provided by the function driver
+ * (PF or VF driver) to indicate the list of asynchronous event
+ * completions to be forwarded.
+ *
+ * Nth bit refers to the Nth event_id.
+ *
+ * Setting Nth bit to 1 by the function driver shall result in
+ * the HWRM forwarding asynchronous event completion with
+ * event_id equal to N.
+ *
+ * If all bits are set to 0 (value of 0), then the HWRM shall
+ * not forward any asynchronous event completion to this
+ * function driver.
+ */
+ uint32_t async_event_fwd[8];
+ /* This is the 16bit major version of the driver. */
+ uint16_t ver_maj;
+ /* This is the 16bit minor version of the driver. */
+ uint16_t ver_min;
+ /* This is the 16bit update version of the driver. */
+ uint16_t ver_upd;
+ /* This is the 16bit patch version of the driver. */
+ uint16_t ver_patch;
+} __attribute__((packed));
+
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
+struct hwrm_func_drv_rgtr_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/************************
+ * hwrm_func_drv_unrgtr *
+ ************************/
+
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
+struct hwrm_func_drv_unrgtr_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the function driver is notifying
+ * the HWRM to prepare for the shutdown.
+ */
+ #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \
+ UINT32_C(0x1)
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
+struct hwrm_func_drv_unrgtr_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_func_buf_rgtr *
+ **********************/
+
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
+struct hwrm_func_buf_rgtr_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the vf_id field to be
+ * configured.
+ */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the err_buf_addr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2)
+ /*
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
+ */
+ uint16_t vf_id;
+ /*
+ * This field represents the number of pages used for request
+ * buffer(s).
+ */
+ uint16_t req_buf_num_pages;
+ /*
+ * This field represents the page size used for request
+ * buffer(s).
+ */
+ uint16_t req_buf_page_size;
+ /* 16 bytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_16B UINT32_C(0x4)
+ /* 4 Kbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4K UINT32_C(0xc)
+ /* 8 Kbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_8K UINT32_C(0xd)
+ /* 64 Kbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_64K UINT32_C(0x10)
+ /* 2 Mbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_2M UINT32_C(0x15)
+ /* 4 Mbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4M UINT32_C(0x16)
+ /* 1 Gbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G UINT32_C(0x1e)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_LAST \
+ HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G
+ /* The length of the request buffer per VF in bytes. */
+ uint16_t req_buf_len;
+ /* The length of the response buffer in bytes. */
+ uint16_t resp_buf_len;
+ uint8_t unused_0[2];
+ /* This field represents the page address of page #0. */
+ uint64_t req_buf_page_addr0;
+ /* This field represents the page address of page #1. */
+ uint64_t req_buf_page_addr1;
+ /* This field represents the page address of page #2. */
+ uint64_t req_buf_page_addr2;
+ /* This field represents the page address of page #3. */
+ uint64_t req_buf_page_addr3;
+ /* This field represents the page address of page #4. */
+ uint64_t req_buf_page_addr4;
+ /* This field represents the page address of page #5. */
+ uint64_t req_buf_page_addr5;
+ /* This field represents the page address of page #6. */
+ uint64_t req_buf_page_addr6;
+ /* This field represents the page address of page #7. */
+ uint64_t req_buf_page_addr7;
+ /* This field represents the page address of page #8. */
+ uint64_t req_buf_page_addr8;
+ /* This field represents the page address of page #9. */
+ uint64_t req_buf_page_addr9;
+ /*
+ * This field is used to receive the error reporting from
+ * the chipset. Only applicable for PFs.
+ */
+ uint64_t error_buf_addr;
+ /*
+ * This field is used to receive the response forwarded by the
+ * HWRM.
+ */
+ uint64_t resp_buf_addr;
+} __attribute__((packed));
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
+struct hwrm_func_buf_rgtr_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/************************
+ * hwrm_func_buf_unrgtr *
+ ************************/
+
+
+/* hwrm_func_buf_unrgtr_input (size:192b/24B) */
+struct hwrm_func_buf_unrgtr_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the vf_id field to be
+ * configured.
+ */
+ #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
+ /*
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
+ */
+ uint16_t vf_id;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_func_buf_unrgtr_output (size:128b/16B) */
+struct hwrm_func_buf_unrgtr_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_func_drv_qver *
+ **********************/
+
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
+struct hwrm_func_drv_qver_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Reserved for future use. */
+ uint32_t reserved;
+ /*
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_func_drv_qver_output (size:192b/24B) */
+struct hwrm_func_drv_qver_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */
+ uint16_t os_type;
+ /* Unknown */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
+ /* Other OS not listed below. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_OTHER UINT32_C(0x1)
+ /* MSDOS OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_MSDOS UINT32_C(0xe)
+ /* Windows OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WINDOWS UINT32_C(0x12)
+ /* Solaris OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_SOLARIS UINT32_C(0x1d)
+ /* Linux OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_LINUX UINT32_C(0x24)
+ /* FreeBSD OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_FREEBSD UINT32_C(0x2a)
+ /* VMware ESXi OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_ESXI UINT32_C(0x68)
+ /* Microsoft Windows 8 64-bit OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WIN864 UINT32_C(0x73)
+ /* Microsoft Windows Server 2012 R2 OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74)
+ /* UEFI driver. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UEFI UINT32_C(0x8000)
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_LAST \
+ HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UEFI
+ /* This is the 8bit major version of the driver. */
+ uint8_t ver_maj_8b;
+ /* This is the 8bit minor version of the driver. */
+ uint8_t ver_min_8b;
+ /* This is the 8bit update version of the driver. */
+ uint8_t ver_upd_8b;
+ uint8_t unused_0[2];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+ /* This is the 16bit major version of the driver. */
+ uint16_t ver_maj;
+ /* This is the 16bit minor version of the driver. */
+ uint16_t ver_min;
+ /* This is the 16bit update version of the driver. */
+ uint16_t ver_upd;
+ /* This is the 16bit patch version of the driver. */
+ uint16_t ver_patch;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_func_resource_qcaps *
+ ****************************/
+
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_func_resource_qcaps_output (size:448b/56B) */
+struct hwrm_func_resource_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Maximum guaranteed number of VFs supported by PF. Not applicable for VFs. */
+ uint16_t max_vfs;
+ /* Maximum guaranteed number of MSI-X vectors supported by function */
+ uint16_t max_msix;
+ /* Hint of strategy to be used by PF driver to reserve resources for its VF */
+ uint16_t vf_reservation_strategy;
+ /* The PF driver should evenly divide its remaining resources among all VFs. */
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL \
+ UINT32_C(0x0)
+ /* The PF driver should only reserve minimal resources for each VF. */
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL \
+ UINT32_C(0x1)
+ /*
+ * The PF driver should not reserve any resources for each VF until the
+ * the VF interface is brought up.
+ */
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_LAST \
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+ /* Minimum guaranteed number of RSS/COS contexts */
+ uint16_t min_rsscos_ctx;
+ /* Maximum non-guaranteed number of RSS/COS contexts */
+ uint16_t max_rsscos_ctx;
+ /* Minimum guaranteed number of completion rings */
+ uint16_t min_cmpl_rings;
+ /* Maximum non-guaranteed number of completion rings */
+ uint16_t max_cmpl_rings;
+ /* Minimum guaranteed number of transmit rings */
+ uint16_t min_tx_rings;
+ /* Maximum non-guaranteed number of transmit rings */
+ uint16_t max_tx_rings;
+ /* Minimum guaranteed number of receive rings */
+ uint16_t min_rx_rings;
+ /* Maximum non-guaranteed number of receive rings */
+ uint16_t max_rx_rings;
+ /* Minimum guaranteed number of L2 contexts */
+ uint16_t min_l2_ctxs;
+ /* Maximum non-guaranteed number of L2 contexts */
+ uint16_t max_l2_ctxs;
+ /* Minimum guaranteed number of VNICs */
+ uint16_t min_vnics;
+ /* Maximum non-guaranteed number of VNICs */
+ uint16_t max_vnics;
+ /* Minimum guaranteed number of statistic contexts */
+ uint16_t min_stat_ctx;
+ /* Maximum non-guaranteed number of statistic contexts */
+ uint16_t max_stat_ctx;
+ /* Minimum guaranteed number of ring groups */
+ uint16_t min_hw_ring_grps;
+ /* Maximum non-guaranteed number of ring groups */
+ uint16_t max_hw_ring_grps;
+ /*
+ * Maximum number of inputs into the transmit scheduler for this function.
+ * The number of TX rings assigned to the function cannot exceed this value.
+ */
+ uint16_t max_tx_scheduler_inputs;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_func_vf_resource_cfg *
+ *****************************/
+
+
+/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+struct hwrm_func_vf_resource_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF ID that is being configured by PF */
+ uint16_t vf_id;
+ /* Maximum guaranteed number of MSI-X vectors for the function */
+ uint16_t max_msix;
+ /* Minimum guaranteed number of RSS/COS contexts */
+ uint16_t min_rsscos_ctx;
+ /* Maximum non-guaranteed number of RSS/COS contexts */
+ uint16_t max_rsscos_ctx;
+ /* Minimum guaranteed number of completion rings */
+ uint16_t min_cmpl_rings;
+ /* Maximum non-guaranteed number of completion rings */
+ uint16_t max_cmpl_rings;
+ /* Minimum guaranteed number of transmit rings */
+ uint16_t min_tx_rings;
+ /* Maximum non-guaranteed number of transmit rings */
+ uint16_t max_tx_rings;
+ /* Minimum guaranteed number of receive rings */
+ uint16_t min_rx_rings;
+ /* Maximum non-guaranteed number of receive rings */
+ uint16_t max_rx_rings;
+ /* Minimum guaranteed number of L2 contexts */
+ uint16_t min_l2_ctxs;
+ /* Maximum non-guaranteed number of L2 contexts */
+ uint16_t max_l2_ctxs;
+ /* Minimum guaranteed number of VNICs */
+ uint16_t min_vnics;
+ /* Maximum non-guaranteed number of VNICs */
+ uint16_t max_vnics;
+ /* Minimum guaranteed number of statistic contexts */
+ uint16_t min_stat_ctx;
+ /* Maximum non-guaranteed number of statistic contexts */
+ uint16_t max_stat_ctx;
+ /* Minimum guaranteed number of ring groups */
+ uint16_t min_hw_ring_grps;
+ /* Maximum non-guaranteed number of ring groups */
+ uint16_t max_hw_ring_grps;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
+struct hwrm_func_vf_resource_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Reserved number of RSS/COS contexts */
+ uint16_t reserved_rsscos_ctx;
+ /* Reserved number of completion rings */
+ uint16_t reserved_cmpl_rings;
+ /* Reserved number of transmit rings */
+ uint16_t reserved_tx_rings;
+ /* Reserved number of receive rings */
+ uint16_t reserved_rx_rings;
+ /* Reserved number of L2 contexts */
+ uint16_t reserved_l2_ctxs;
+ /* Reserved number of VNICs */
+ uint16_t reserved_vnics;
+ /* Reserved number of statistic contexts */
+ uint16_t reserved_stat_ctx;
+ /* Reserved number of ring groups */
+ uint16_t reserved_hw_ring_grps;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************************
+ * hwrm_func_backing_store_qcaps *
+ *********************************/
+
+
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_func_backing_store_qcaps_output (size:512b/64B) */
+struct hwrm_func_backing_store_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Maximum number of QP context entries supported for this function. */
+ uint32_t qp_max_entries;
+ /*
+ * Minimum number of QP context entries that are needed to be reserved
+ * for QP1 for the PF and its VFs. PF drivers must allocate at least
+ * this many QP context entries, even if RoCE will not be used.
+ */
+ uint16_t qp_min_qp1_entries;
+ /* Maximum number of QP context entries that can be used for L2. */
+ uint16_t qp_max_l2_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t qp_entry_size;
+ /* Maximum number of SRQ context entries that can be used for L2. */
+ uint16_t srq_max_l2_entries;
+ /* Maximum number of SRQ context entries supported for this function. */
+ uint32_t srq_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t srq_entry_size;
+ /* Maximum number of CQ context entries that can be used for L2. */
+ uint16_t cq_max_l2_entries;
+ /* Maximum number of CQ context entries supported for this function. */
+ uint32_t cq_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t cq_entry_size;
+ /* Maximum number of VNIC context entries supported for this function. */
+ uint16_t vnic_max_vnic_entries;
+ /* Maximum number of Ring table context entries supported for this function. */
+ uint16_t vnic_max_ring_table_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t vnic_entry_size;
+ /* Maximum number of statistic context entries supported for this function. */
+ uint32_t stat_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t stat_entry_size;
+ /* Maximum number of TQM context entries supported per ring. */
+ uint16_t tqm_max_entries_per_ring;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t tqm_entry_size;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t mrav_entry_size;
+ /* Maximum number of MR/AV context entries supported for this function. */
+ uint32_t mrav_max_entries;
+ /* Maximum number of Timer context entries supported for this function. */
+ uint32_t tim_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t tim_entry_size;
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_func_backing_store_cfg *
+ *******************************/
+
+
+/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */
+struct hwrm_func_backing_store_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When set, the firmware only uses on-chip resources and does not
+ * expect any backing store to be provided by the host driver. This
+ * mode provides minimal L2 functionality (e.g. limited L2 resources,
+ * no RoCE).
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_FLAGS_PREBOOT_MODE \
+ UINT32_C(0x1)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the qp fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the srq fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the cq fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the vnic fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the stat fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the tqm_sp fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the tqm_ring0 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING0 \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the tqm_ring1 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING1 \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the tqm_ring2 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING2 \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the tqm_ring3 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING3 \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the tqm_ring4 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING4 \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the tqm_ring5 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING5 \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the tqm_ring6 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING6 \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the tqm_ring7 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING7 \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the mrav fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the tim fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM \
+ UINT32_C(0x8000)
+ /* QPC page size and level. */
+ uint8_t qpc_pg_size_qpc_lvl;
+ /* QPC PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2
+ /* QPC page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_1G
+ /* SRQ page size and level. */
+ uint8_t srq_pg_size_srq_lvl;
+ /* SRQ PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2
+ /* SRQ page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_1G
+ /* CQ page size and level. */
+ uint8_t cq_pg_size_cq_lvl;
+ /* CQ PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2
+ /* CQ page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_1G
+ /* VNIC page size and level. */
+ uint8_t vnic_pg_size_vnic_lvl;
+ /* VNIC PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2
+ /* VNIC page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_1G
+ /* Stat page size and level. */
+ uint8_t stat_pg_size_stat_lvl;
+ /* Stat PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2
+ /* Stat page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_1G
+ /* TQM slow path page size and level. */
+ uint8_t tqm_sp_pg_size_tqm_sp_lvl;
+ /* TQM slow path PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2
+ /* TQM slow path page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_1G
+ /* TQM ring 0 page size and level. */
+ uint8_t tqm_ring0_pg_size_tqm_ring0_lvl;
+ /* TQM ring 0 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2
+ /* TQM ring 0 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_1G
+ /* TQM ring 1 page size and level. */
+ uint8_t tqm_ring1_pg_size_tqm_ring1_lvl;
+ /* TQM ring 1 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2
+ /* TQM ring 1 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_1G
+ /* TQM ring 2 page size and level. */
+ uint8_t tqm_ring2_pg_size_tqm_ring2_lvl;
+ /* TQM ring 2 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2
+ /* TQM ring 2 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_1G
+ /* TQM ring 3 page size and level. */
+ uint8_t tqm_ring3_pg_size_tqm_ring3_lvl;
+ /* TQM ring 3 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2
+ /* TQM ring 3 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_1G
+ /* TQM ring 4 page size and level. */
+ uint8_t tqm_ring4_pg_size_tqm_ring4_lvl;
+ /* TQM ring 4 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2
+ /* TQM ring 4 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_1G
+ /* TQM ring 5 page size and level. */
+ uint8_t tqm_ring5_pg_size_tqm_ring5_lvl;
+ /* TQM ring 5 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2
+ /* TQM ring 5 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_1G
+ /* TQM ring 6 page size and level. */
+ uint8_t tqm_ring6_pg_size_tqm_ring6_lvl;
+ /* TQM ring 6 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2
+ /* TQM ring 6 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_1G
+ /* TQM ring 7 page size and level. */
+ uint8_t tqm_ring7_pg_size_tqm_ring7_lvl;
+ /* TQM ring 7 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2
+ /* TQM ring 7 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_1G
+ /* MR/AV page size and level. */
+ uint8_t mrav_pg_size_mrav_lvl;
+ /* MR/AV PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2
+ /* MR/AV page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_1G
+ /* Timer page size and level. */
+ uint8_t tim_pg_size_tim_lvl;
+ /* Timer PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2
+ /* Timer page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_1G
+ /* QP page directory. */
+ uint64_t qpc_page_dir;
+ /* SRQ page directory. */
+ uint64_t srq_page_dir;
+ /* CQ page directory. */
+ uint64_t cq_page_dir;
+ /* VNIC page directory. */
+ uint64_t vnic_page_dir;
+ /* Stat page directory. */
+ uint64_t stat_page_dir;
+ /* TQM slowpath page directory. */
+ uint64_t tqm_sp_page_dir;
+ /* TQM ring 0 page directory. */
+ uint64_t tqm_ring0_page_dir;
+ /* TQM ring 1 page directory. */
+ uint64_t tqm_ring1_page_dir;
+ /* TQM ring 2 page directory. */
+ uint64_t tqm_ring2_page_dir;
+ /* TQM ring 3 page directory. */
+ uint64_t tqm_ring3_page_dir;
+ /* TQM ring 4 page directory. */
+ uint64_t tqm_ring4_page_dir;
+ /* TQM ring 5 page directory. */
+ uint64_t tqm_ring5_page_dir;
+ /* TQM ring 6 page directory. */
+ uint64_t tqm_ring6_page_dir;
+ /* TQM ring 7 page directory. */
+ uint64_t tqm_ring7_page_dir;
+ /* MR/AV page directory. */
+ uint64_t mrav_page_dir;
+ /* Timer page directory. */
+ uint64_t tim_page_dir;
+ /* Number of QPs. */
+ uint32_t qp_num_entries;
+ /* Number of SRQs. */
+ uint32_t srq_num_entries;
+ /* Number of CQs. */
+ uint32_t cq_num_entries;
+ /* Number of Stats. */
+ uint32_t stat_num_entries;
+ /* Number of TQM slowpath entries. */
+ uint32_t tqm_sp_num_entries;
+ /* Number of TQM ring 0 entries. */
+ uint32_t tqm_ring0_num_entries;
+ /* Number of TQM ring 1 entries. */
+ uint32_t tqm_ring1_num_entries;
+ /* Number of TQM ring 2 entries. */
+ uint32_t tqm_ring2_num_entries;
+ /* Number of TQM ring 3 entries. */
+ uint32_t tqm_ring3_num_entries;
+ /* Number of TQM ring 4 entries. */
+ uint32_t tqm_ring4_num_entries;
+ /* Number of TQM ring 5 entries. */
+ uint32_t tqm_ring5_num_entries;
+ /* Number of TQM ring 6 entries. */
+ uint32_t tqm_ring6_num_entries;
+ /* Number of TQM ring 7 entries. */
+ uint32_t tqm_ring7_num_entries;
+ /* Number of MR/AV entries. */
+ uint32_t mrav_num_entries;
+ /* Number of Timer entries. */
+ uint32_t tim_num_entries;
+ /* Number of entries to reserve for QP1 */
+ uint16_t qp_num_qp1_entries;
+ /* Number of entries to reserve for L2 */
+ uint16_t qp_num_l2_entries;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t qp_entry_size;
+ /* Number of entries to reserve for L2 */
+ uint16_t srq_num_l2_entries;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t srq_entry_size;
+ /* Number of entries to reserve for L2 */
+ uint16_t cq_num_l2_entries;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t cq_entry_size;
+ /* Number of entries to reserve for VNIC entries */
+ uint16_t vnic_num_vnic_entries;
+ /* Number of entries to reserve for Ring table entries */
+ uint16_t vnic_num_ring_table_entries;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t vnic_entry_size;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t stat_entry_size;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t tqm_entry_size;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t mrav_entry_size;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t tim_entry_size;
+} __attribute__((packed));
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_func_backing_store_qcfg *
+ ********************************/
+
+
+/* hwrm_func_backing_store_qcfg_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_func_backing_store_qcfg_output (size:1920b/240B) */
+struct hwrm_func_backing_store_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /*
+ * When set, the firmware only uses on-chip resources and does not
+ * expect any backing store to be provided by the host driver. This
+ * mode provides minimal L2 functionality (e.g. limited L2 resources,
+ * no RoCE).
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_FLAGS_PREBOOT_MODE \
+ UINT32_C(0x1)
+ uint8_t unused_0[4];
+ /*
+ * This bit must be '1' for the qp fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_QP \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the srq fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_SRQ \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the cq fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_CQ \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the vnic fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_VNIC \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the stat fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_STAT \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the tqm_sp fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_SP \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the tqm_ring0 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING0 \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the tqm_ring1 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING1 \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the tqm_ring2 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING2 \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the tqm_ring3 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING3 \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the tqm_ring4 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING4 \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the tqm_ring5 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING5 \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the tqm_ring6 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING6 \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the tqm_ring7 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING7 \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the mrav fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_MRAV \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the tim fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TIM \
+ UINT32_C(0x8000)
+ /* QPC page size and level. */
+ uint8_t qpc_pg_size_qpc_lvl;
+ /* QPC PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2
+ /* QPC page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_1G
+ /* SRQ page size and level. */
+ uint8_t srq_pg_size_srq_lvl;
+ /* SRQ PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2
+ /* SRQ page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_1G
+ /* CQ page size and level. */
+ uint8_t cq_pg_size_cq_lvl;
+ /* CQ PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2
+ /* CQ page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_1G
+ /* VNIC page size and level. */
+ uint8_t vnic_pg_size_vnic_lvl;
+ /* VNIC PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2
+ /* VNIC page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_1G
+ /* Stat page size and level. */
+ uint8_t stat_pg_size_stat_lvl;
+ /* Stat PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2
+ /* Stat page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_1G
+ /* TQM slow path page size and level. */
+ uint8_t tqm_sp_pg_size_tqm_sp_lvl;
+ /* TQM slow path PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2
+ /* TQM slow path page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_1G
+ /* TQM ring 0 page size and level. */
+ uint8_t tqm_ring0_pg_size_tqm_ring0_lvl;
+ /* TQM ring 0 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2
+ /* TQM ring 0 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_1G
+ /* TQM ring 1 page size and level. */
+ uint8_t tqm_ring1_pg_size_tqm_ring1_lvl;
+ /* TQM ring 1 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2
+ /* TQM ring 1 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_1G
+ /* TQM ring 2 page size and level. */
+ uint8_t tqm_ring2_pg_size_tqm_ring2_lvl;
+ /* TQM ring 2 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2
+ /* TQM ring 2 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_1G
+ /* TQM ring 3 page size and level. */
+ uint8_t tqm_ring3_pg_size_tqm_ring3_lvl;
+ /* TQM ring 3 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2
+ /* TQM ring 3 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_1G
+ /* TQM ring 4 page size and level. */
+ uint8_t tqm_ring4_pg_size_tqm_ring4_lvl;
+ /* TQM ring 4 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2
+ /* TQM ring 4 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_1G
+ /* TQM ring 5 page size and level. */
+ uint8_t tqm_ring5_pg_size_tqm_ring5_lvl;
+ /* TQM ring 5 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2
+ /* TQM ring 5 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_1G
+ /* TQM ring 6 page size and level. */
+ uint8_t tqm_ring6_pg_size_tqm_ring6_lvl;
+ /* TQM ring 6 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2
+ /* TQM ring 6 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_1G
+ /* TQM ring 7 page size and level. */
+ uint8_t tqm_ring7_pg_size_tqm_ring7_lvl;
+ /* TQM ring 7 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2
+ /* TQM ring 7 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_1G
+ /* MR/AV page size and level. */
+ uint8_t mrav_pg_size_mrav_lvl;
+ /* MR/AV PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2
+ /* MR/AV page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_1G
+ /* Timer page size and level. */
+ uint8_t tim_pg_size_tim_lvl;
+ /* Timer PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2
+ /* Timer page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_1G
+ /* QP page directory. */
+ uint64_t qpc_page_dir;
+ /* SRQ page directory. */
+ uint64_t srq_page_dir;
+ /* CQ page directory. */
+ uint64_t cq_page_dir;
+ /* VNIC page directory. */
+ uint64_t vnic_page_dir;
+ /* Stat page directory. */
+ uint64_t stat_page_dir;
+ /* TQM slowpath page directory. */
+ uint64_t tqm_sp_page_dir;
+ /* TQM ring 0 page directory. */
+ uint64_t tqm_ring0_page_dir;
+ /* TQM ring 1 page directory. */
+ uint64_t tqm_ring1_page_dir;
+ /* TQM ring 2 page directory. */
+ uint64_t tqm_ring2_page_dir;
+ /* TQM ring 3 page directory. */
+ uint64_t tqm_ring3_page_dir;
+ /* TQM ring 4 page directory. */
+ uint64_t tqm_ring4_page_dir;
+ /* TQM ring 5 page directory. */
+ uint64_t tqm_ring5_page_dir;
+ /* TQM ring 6 page directory. */
+ uint64_t tqm_ring6_page_dir;
+ /* TQM ring 7 page directory. */
+ uint64_t tqm_ring7_page_dir;
+ /* MR/AV page directory. */
+ uint64_t mrav_page_dir;
+ /* Timer page directory. */
+ uint64_t tim_page_dir;
+ /* Number of entries to reserve for QP1 */
+ uint16_t qp_num_qp1_entries;
+ /* Number of entries to reserve for L2 */
+ uint16_t qp_num_l2_entries;
+ /* Number of QPs. */
+ uint32_t qp_num_entries;
+ /* Number of SRQs. */
+ uint32_t srq_num_entries;
+ /* Number of entries to reserve for L2 */
+ uint16_t srq_num_l2_entries;
+ /* Number of entries to reserve for L2 */
+ uint16_t cq_num_l2_entries;
+ /* Number of CQs. */
+ uint32_t cq_num_entries;
+ /* Number of entries to reserve for VNIC entries */
+ uint16_t vnic_num_vnic_entries;
+ /* Number of entries to reserve for Ring table entries */
+ uint16_t vnic_num_ring_table_entries;
+ /* Number of Stats. */
+ uint32_t stat_num_entries;
+ /* Number of TQM slowpath entries. */
+ uint32_t tqm_sp_num_entries;
+ /* Number of TQM ring 0 entries. */
+ uint32_t tqm_ring0_num_entries;
+ /* Number of TQM ring 1 entries. */
+ uint32_t tqm_ring1_num_entries;
+ /* Number of TQM ring 2 entries. */
+ uint32_t tqm_ring2_num_entries;
+ /* Number of TQM ring 3 entries. */
+ uint32_t tqm_ring3_num_entries;
+ /* Number of TQM ring 4 entries. */
+ uint32_t tqm_ring4_num_entries;
+ /* Number of TQM ring 5 entries. */
+ uint32_t tqm_ring5_num_entries;
+ /* Number of TQM ring 6 entries. */
+ uint32_t tqm_ring6_num_entries;
+ /* Number of TQM ring 7 entries. */
+ uint32_t tqm_ring7_num_entries;
+ /* Number of MR/AV entries. */
+ uint32_t mrav_num_entries;
+ /* Number of Timer entries. */
+ uint32_t tim_num_entries;
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_port_phy_cfg *
+ *********************/
+
+
+/* hwrm_port_phy_cfg_input (size:448b/56B) */
+struct hwrm_port_phy_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When this bit is set to '1', the PHY for the port shall
+ * be reset.
+ *
+ * # If this bit is set to 1, then the HWRM shall reset the
+ * PHY after applying PHY configuration changes specified
+ * in this command.
+ * # In order to guarantee that PHY configuration changes
+ * specified in this command take effect, the HWRM
+ * client should set this flag to 1.
+ * # If this bit is not set to 1, then the HWRM may reset
+ * the PHY depending on the current PHY configuration and
+ * settings specified in this command.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY \
+ UINT32_C(0x1)
+ /* deprecated bit. Do not use!!! */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED \
+ UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', the link shall be forced to
+ * the force_link_speed value.
+ *
+ * When this bit is set to '1', the HWRM client should
+ * not enable any of the auto negotiation related
+ * fields represented by auto_XXX fields in this command.
+ * When this bit is set to '1' and the HWRM client has
+ * enabled a auto_XXX field in this command, then the
+ * HWRM shall ignore the enabled auto_XXX field.
+ *
+ * When this bit is set to zero, the link
+ * shall be allowed to autoneg.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is set to '1', the auto-negotiation process
+ * shall be restarted on the link.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG \
+ UINT32_C(0x8)
+ /*
+ * When this bit is set to '1', Energy Efficient Ethernet
+ * (EEE) is requested to be enabled on this link.
+ * If EEE is not supported on this port, then this flag
+ * shall be ignored by the HWRM.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE \
+ UINT32_C(0x10)
+ /*
+ * When this bit is set to '1', Energy Efficient Ethernet
+ * (EEE) is requested to be disabled on this link.
+ * If EEE is not supported on this port, then this flag
+ * shall be ignored by the HWRM.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE \
+ UINT32_C(0x20)
+ /*
+ * When this bit is set to '1' and EEE is enabled on this
+ * link, then TX LPI is requested to be enabled on the link.
+ * If EEE is not supported on this port, then this flag
+ * shall be ignored by the HWRM.
+ * If EEE is disabled on this port, then this flag shall be
+ * ignored by the HWRM.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_ENABLE \
+ UINT32_C(0x40)
+ /*
+ * When this bit is set to '1' and EEE is enabled on this
+ * link, then TX LPI is requested to be disabled on the link.
+ * If EEE is not supported on this port, then this flag
+ * shall be ignored by the HWRM.
+ * If EEE is disabled on this port, then this flag shall be
+ * ignored by the HWRM.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE \
+ UINT32_C(0x80)
+ /*
+ * When set to 1, then the HWRM shall enable FEC autonegotitation
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC autonegotiation is not supported, then the HWRM shall ignore this
+ * flag.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE \
+ UINT32_C(0x100)
+ /*
+ * When set to 1, then the HWRM shall disable FEC autonegotiation
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC autonegotiation is not supported, then the HWRM shall ignore this
+ * flag.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \
+ UINT32_C(0x200)
+ /*
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire Code)
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this
+ * flag.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \
+ UINT32_C(0x400)
+ /*
+ * When set to 1, then the HWRM shall disable FEC CLAUSE 74 (Fire Code)
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this
+ * flag.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \
+ UINT32_C(0x800)
+ /*
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed Solomon)
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC CLAUSE 91 is not supported, then the HWRM shall ignore this
+ * flag.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \
+ UINT32_C(0x1000)
+ /*
+ * When set to 1, then the HWRM shall disable FEC CLAUSE 91 (Reed Solomon)
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC CLAUSE 91 is not supported, then the HWRM shall ignore this
+ * flag.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \
+ UINT32_C(0x2000)
+ /*
+ * When this bit is set to '1', the link shall be forced to
+ * be taken down.
+ *
+ * # When this bit is set to '1", all other
+ * command input settings related to the link speed shall
+ * be ignored.
+ * Once the link state is forced down, it can be
+ * explicitly cleared from that state by setting this flag
+ * to '0'.
+ * # If this flag is set to '0', then the link shall be
+ * cleared from forced down state if the link is in forced
+ * down state.
+ * There may be conditions (e.g. out-of-band or sideband
+ * configuration changes for the link) outside the scope
+ * of the HWRM implementation that may clear forced down
+ * link state.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN \
+ UINT32_C(0x4000)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the auto_mode field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the auto_duplex field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the auto_pause field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the auto_link_speed field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the auto_link_speed_mask field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the wirespeed field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIRESPEED \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the lpbk field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the preemphasis field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the force_pause field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the eee_link_speed_mask field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the tx_lpi_timer field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER \
+ UINT32_C(0x400)
+ /* Port ID of port that is to be configured. */
+ uint16_t port_id;
+ /*
+ * This is the speed that will be used if the force
+ * bit is '1'. If unsupported speed is selected, an error
+ * will be generated.
+ */
+ uint16_t force_link_speed;
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB
+ /*
+ * This value is used to identify what autoneg mode is
+ * used when the link speed is not being forced.
+ */
+ uint8_t auto_mode;
+ /* Disable autoneg or autoneg disabled. No speeds are selected. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE UINT32_C(0x0)
+ /* Select all possible speeds for autoneg mode. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
+ /*
+ * Select only the auto_link_speed speed for autoneg mode. This mode has
+ * been DEPRECATED. An HWRM client should not use this mode.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
+ /*
+ * Select the auto_link_speed or any speed below that speed for autoneg.
+ * This mode has been DEPRECATED. An HWRM client should not use this mode.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
+ /*
+ * Select the speeds based on the corresponding link speed mask value
+ * that is provided.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK
+ /*
+ * This is the duplex setting that will be used if the autoneg_mode
+ * is "one_speed" or "one_or_below".
+ */
+ uint8_t auto_duplex;
+ /* Half Duplex will be requested. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF UINT32_C(0x0)
+ /* Full duplex will be requested. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL UINT32_C(0x1)
+ /* Both Half and Full dupex will be requested. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH
+ /*
+ * This value is used to configure the pause that will be
+ * used for autonegotiation.
+ * Add text on the usage of auto_pause and force_pause.
+ */
+ uint8_t auto_pause;
+ /*
+ * When this bit is '1', Generation of tx pause messages
+ * has been requested. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages
+ * has been requested. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX \
+ UINT32_C(0x2)
+ /*
+ * When set to 1, the advertisement of pause is enabled.
+ *
+ * # When the auto_mode is not set to none and this flag is
+ * set to 1, then the auto_pause bits on this port are being
+ * advertised and autoneg pause results are being interpreted.
+ * # When the auto_mode is not set to none and this
+ * flag is set to 0, the pause is forced as indicated in
+ * force_pause, and also advertised as auto_pause bits, but
+ * the autoneg results are not interpreted since the pause
+ * configuration is being forced.
+ * # When the auto_mode is set to none and this flag is set to
+ * 1, auto_pause bits should be ignored and should be set to 0.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE \
+ UINT32_C(0x4)
+ uint8_t unused_0;
+ /*
+ * This is the speed that will be used if the autoneg_mode
+ * is "one_speed" or "one_or_below". If an unsupported speed
+ * is selected, an error will be generated.
+ */
+ uint16_t auto_link_speed;
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB
+ /*
+ * This is a mask of link speeds that will be used if
+ * autoneg_mode is "mask". If unsupported speed is enabled
+ * an error will be generated.
+ */
+ uint16_t auto_link_speed_mask;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \
+ UINT32_C(0x10)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \
+ UINT32_C(0x2000)
+ /* This value controls the wirespeed feature. */
+ uint8_t wirespeed;
+ /* Wirespeed feature is disabled. */
+ #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_OFF UINT32_C(0x0)
+ /* Wirespeed feature is enabled. */
+ #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON
+ /* This value controls the loopback setting for the PHY. */
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1)
+ /*
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2)
+ /*
+ * The HW will be configured with external loopback such that
+ * host data is sent on the trasmitter and based on the external
+ * loopback connection the data will be received without modification.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL UINT32_C(0x3)
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL
+ /*
+ * This value is used to configure the pause that will be
+ * used for force mode.
+ */
+ uint8_t force_pause;
+ /*
+ * When this bit is '1', Generation of tx pause messages
+ * is supported. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages
+ * is supported. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2)
+ uint8_t unused_1;
+ /*
+ * This value controls the pre-emphasis to be used for the
+ * link. Driver should not set this value (use
+ * enable.preemphasis = 0) unless driver is sure of setting.
+ * Normally HWRM FW will determine proper pre-emphasis.
+ */
+ uint32_t preemphasis;
+ /*
+ * Setting for link speed mask that is used to
+ * advertise speeds during autonegotiation when EEE is enabled.
+ * This field is valid only when EEE is enabled.
+ * The speeds specified in this field shall be a subset of
+ * speeds specified in auto_link_speed_mask.
+ * If EEE is enabled,then at least one speed shall be provided
+ * in this mask.
+ */
+ uint16_t eee_link_speed_mask;
+ /* Reserved */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
+ /* Reserved */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
+ /* Reserved */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 \
+ UINT32_C(0x10)
+ /* Reserved */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ uint8_t unused_2[2];
+ /*
+ * Reuested setting of TX LPI timer in microseconds.
+ * This field is valid only when EEE is enabled and TX LPI is
+ * enabled.
+ */
+ uint32_t tx_lpi_timer;
+ #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0
+ uint32_t unused_3;
+} __attribute__((packed));
+
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
+struct hwrm_port_phy_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_cfg_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* Unable to complete operation due to invalid speed */
+ #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED UINT32_C(0x1)
+ /*
+ * retry the command since the phy is not ready.
+ * retry count is returned in opaque_0.
+ * This is only valid for the first command and
+ * this value will not change for successive calls.
+ * but if a 0 is returned at any time then this should
+ * be treated as an un recoverable failure,
+ *
+ * retry interval in milli seconds is returned in opaque_1.
+ * This specifies the time that user should wait before
+ * issuing the next port_phy_cfg command.
+ */
+ #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_LAST \
+ HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/**********************
+ * hwrm_port_phy_qcfg *
+ **********************/
+
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
+struct hwrm_port_phy_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is to be queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_phy_qcfg_output (size:768b/96B) */
+struct hwrm_port_phy_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value indicates the current link status. */
+ uint8_t link;
+ /* There is no link or cable detected. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK UINT32_C(0x0)
+ /* There is no link, but a cable has been detected. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL UINT32_C(0x1)
+ /* There is a link. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK
+ uint8_t unused_0;
+ /* This value indicates the current link speed of the connection. */
+ uint16_t link_speed;
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB UINT32_C(0x1)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB UINT32_C(0xa)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB UINT32_C(0x19)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB UINT32_C(0x64)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB UINT32_C(0xc8)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB UINT32_C(0xfa)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB UINT32_C(0x190)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB UINT32_C(0x1f4)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB
+ /*
+ * This value is indicates the duplex of the current
+ * configuration.
+ */
+ uint8_t duplex_cfg;
+ /* Half Duplex connection. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_HALF UINT32_C(0x0)
+ /* Full duplex connection. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL
+ /*
+ * This value is used to indicate the current
+ * pause configuration. When autoneg is enabled, this value
+ * represents the autoneg results of pause configuration.
+ */
+ uint8_t pause;
+ /*
+ * When this bit is '1', Generation of tx pause messages
+ * is supported. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages
+ * is supported. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2)
+ /*
+ * The supported speeds for the port. This is a bit mask.
+ * For each speed that is supported, the corrresponding
+ * bit will be set to '1'.
+ */
+ uint16_t support_speeds;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB \
+ UINT32_C(0x10)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB \
+ UINT32_C(0x2000)
+ /*
+ * Current setting of forced link speed.
+ * When the link speed is not being forced, this
+ * value shall be set to 0.
+ */
+ uint16_t force_link_speed;
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB \
+ UINT32_C(0x190)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB \
+ UINT32_C(0x1f4)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \
+ UINT32_C(0x3e8)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB \
+ UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB
+ /* Current setting of auto negotiation mode. */
+ uint8_t auto_mode;
+ /* Disable autoneg or autoneg disabled. No speeds are selected. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE UINT32_C(0x0)
+ /* Select all possible speeds for autoneg mode. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
+ /*
+ * Select only the auto_link_speed speed for autoneg mode. This mode has
+ * been DEPRECATED. An HWRM client should not use this mode.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
+ /*
+ * Select the auto_link_speed or any speed below that speed for autoneg.
+ * This mode has been DEPRECATED. An HWRM client should not use this mode.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
+ /*
+ * Select the speeds based on the corresponding link speed mask value
+ * that is provided.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK
+ /*
+ * Current setting of pause autonegotiation.
+ * Move autoneg_pause flag here.
+ */
+ uint8_t auto_pause;
+ /*
+ * When this bit is '1', Generation of tx pause messages
+ * has been requested. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages
+ * has been requested. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX \
+ UINT32_C(0x2)
+ /*
+ * When set to 1, the advertisement of pause is enabled.
+ *
+ * # When the auto_mode is not set to none and this flag is
+ * set to 1, then the auto_pause bits on this port are being
+ * advertised and autoneg pause results are being interpreted.
+ * # When the auto_mode is not set to none and this
+ * flag is set to 0, the pause is forced as indicated in
+ * force_pause, and also advertised as auto_pause bits, but
+ * the autoneg results are not interpreted since the pause
+ * configuration is being forced.
+ * # When the auto_mode is set to none and this flag is set to
+ * 1, auto_pause bits should be ignored and should be set to 0.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \
+ UINT32_C(0x4)
+ /*
+ * Current setting for auto_link_speed. This field is only
+ * valid when auto_mode is set to "one_speed" or "one_or_below".
+ */
+ uint16_t auto_link_speed;
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB \
+ UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB
+ /*
+ * Current setting for auto_link_speed_mask that is used to
+ * advertise speeds during autonegotiation.
+ * This field is only valid when auto_mode is set to "mask".
+ * The speeds specified in this field shall be a subset of
+ * supported speeds on this port.
+ */
+ uint16_t auto_link_speed_mask;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \
+ UINT32_C(0x10)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \
+ UINT32_C(0x2000)
+ /* Current setting for wirespeed. */
+ uint8_t wirespeed;
+ /* Wirespeed feature is disabled. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_OFF UINT32_C(0x0)
+ /* Wirespeed feature is enabled. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON
+ /* Current setting for loopback. */
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1)
+ /*
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2)
+ /*
+ * The HW will be configured with external loopback such that
+ * host data is sent on the trasmitter and based on the external
+ * loopback connection the data will be received without modification.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL UINT32_C(0x3)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL
+ /*
+ * Current setting of forced pause.
+ * When the pause configuration is not being forced, then
+ * this value shall be set to 0.
+ */
+ uint8_t force_pause;
+ /*
+ * When this bit is '1', Generation of tx pause messages
+ * is supported. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages
+ * is supported. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX UINT32_C(0x2)
+ /*
+ * This value indicates the current status of the optics module on
+ * this port.
+ */
+ uint8_t module_status;
+ /* Module is inserted and accepted */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE \
+ UINT32_C(0x0)
+ /* Module is rejected and transmit side Laser is disabled. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \
+ UINT32_C(0x1)
+ /* Module mismatch warning. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \
+ UINT32_C(0x2)
+ /* Module is rejected and powered down. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN \
+ UINT32_C(0x3)
+ /* Module is not inserted. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \
+ UINT32_C(0x4)
+ /* Module status is not applicable. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \
+ UINT32_C(0xff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE
+ /* Current setting for preemphasis. */
+ uint32_t preemphasis;
+ /* This field represents the major version of the PHY. */
+ uint8_t phy_maj;
+ /* This field represents the minor version of the PHY. */
+ uint8_t phy_min;
+ /* This field represents the build version of the PHY. */
+ uint8_t phy_bld;
+ /* This value represents a PHY type. */
+ uint8_t phy_type;
+ /* Unknown */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN \
+ UINT32_C(0x0)
+ /* BASE-CR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR \
+ UINT32_C(0x1)
+ /* BASE-KR4 (Deprecated) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 \
+ UINT32_C(0x2)
+ /* BASE-LR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR \
+ UINT32_C(0x3)
+ /* BASE-SR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR \
+ UINT32_C(0x4)
+ /* BASE-KR2 (Deprecated) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 \
+ UINT32_C(0x5)
+ /* BASE-KX */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX \
+ UINT32_C(0x6)
+ /* BASE-KR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR \
+ UINT32_C(0x7)
+ /* BASE-T */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET \
+ UINT32_C(0x8)
+ /* EEE capable BASE-T */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE \
+ UINT32_C(0x9)
+ /* SGMII connected external PHY */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY \
+ UINT32_C(0xa)
+ /* 25G_BASECR_CA_L */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L \
+ UINT32_C(0xb)
+ /* 25G_BASECR_CA_S */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S \
+ UINT32_C(0xc)
+ /* 25G_BASECR_CA_N */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N \
+ UINT32_C(0xd)
+ /* 25G_BASESR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR \
+ UINT32_C(0xe)
+ /* 100G_BASECR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 \
+ UINT32_C(0xf)
+ /* 100G_BASESR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 \
+ UINT32_C(0x10)
+ /* 100G_BASELR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 \
+ UINT32_C(0x11)
+ /* 100G_BASEER4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 \
+ UINT32_C(0x12)
+ /* 100G_BASESR10 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 \
+ UINT32_C(0x13)
+ /* 40G_BASECR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 \
+ UINT32_C(0x14)
+ /* 40G_BASESR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 \
+ UINT32_C(0x15)
+ /* 40G_BASELR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 \
+ UINT32_C(0x16)
+ /* 40G_BASEER4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 \
+ UINT32_C(0x17)
+ /* 40G_ACTIVE_CABLE */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \
+ UINT32_C(0x18)
+ /* 1G_baseT */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET \
+ UINT32_C(0x19)
+ /* 1G_baseSX */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX \
+ UINT32_C(0x1a)
+ /* 1G_baseCX */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX \
+ UINT32_C(0x1b)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX
+ /* This value represents a media type. */
+ uint8_t media_type;
+ /* Unknown */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN UINT32_C(0x0)
+ /* Twisted Pair */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP UINT32_C(0x1)
+ /* Direct Attached Copper */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC UINT32_C(0x2)
+ /* Fiber */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE UINT32_C(0x3)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE
+ /* This value represents a transceiver type. */
+ uint8_t xcvr_pkg_type;
+ /* PHY and MAC are in the same package */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \
+ UINT32_C(0x1)
+ /* PHY and MAC are in different packages */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \
+ UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ uint8_t eee_config_phy_addr;
+ /* This field represents PHY address. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK \
+ UINT32_C(0x1f)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0
+ /*
+ * This field represents flags related to EEE configuration.
+ * These EEE configuration flags are valid only when the
+ * auto_mode is not set to none (in other words autonegotiation
+ * is enabled).
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK \
+ UINT32_C(0xe0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5
+ /*
+ * When set to 1, Energy Efficient Ethernet (EEE) mode is enabled.
+ * Speeds for autoneg with EEE mode enabled
+ * are based on eee_link_speed_mask.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \
+ UINT32_C(0x20)
+ /*
+ * This flag is valid only when eee_enabled is set to 1.
+ *
+ * # If eee_enabled is set to 0, then EEE mode is disabled
+ * and this flag shall be ignored.
+ * # If eee_enabled is set to 1 and this flag is set to 1,
+ * then Energy Efficient Ethernet (EEE) mode is enabled
+ * and in use.
+ * # If eee_enabled is set to 1 and this flag is set to 0,
+ * then Energy Efficient Ethernet (EEE) mode is enabled
+ * but is currently not in use.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE \
+ UINT32_C(0x40)
+ /*
+ * This flag is valid only when eee_enabled is set to 1.
+ *
+ * # If eee_enabled is set to 0, then EEE mode is disabled
+ * and this flag shall be ignored.
+ * # If eee_enabled is set to 1 and this flag is set to 1,
+ * then Energy Efficient Ethernet (EEE) mode is enabled
+ * and TX LPI is enabled.
+ * # If eee_enabled is set to 1 and this flag is set to 0,
+ * then Energy Efficient Ethernet (EEE) mode is enabled
+ * but TX LPI is disabled.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI \
+ UINT32_C(0x80)
+ /*
+ * When set to 1, the parallel detection is used to determine
+ * the speed of the link partner.
+ *
+ * Parallel detection is used when a autonegotiation capable
+ * device is connected to a link parter that is not capable
+ * of autonegotiation.
+ */
+ uint8_t parallel_detect;
+ /*
+ * When set to 1, the parallel detection is used to determine
+ * the speed of the link partner.
+ *
+ * Parallel detection is used when a autonegotiation capable
+ * device is connected to a link parter that is not capable
+ * of autonegotiation.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT UINT32_C(0x1)
+ /*
+ * The advertised speeds for the port by the link partner.
+ * Each advertised speed will be set to '1'.
+ */
+ uint16_t link_partner_adv_speeds;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \
+ UINT32_C(0x10)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \
+ UINT32_C(0x2000)
+ /*
+ * The advertised autoneg for the port by the link partner.
+ * This field is deprecated and should be set to 0.
+ */
+ uint8_t link_partner_adv_auto_mode;
+ /* Disable autoneg or autoneg disabled. No speeds are selected. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_NONE \
+ UINT32_C(0x0)
+ /* Select all possible speeds for autoneg mode. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \
+ UINT32_C(0x1)
+ /*
+ * Select only the auto_link_speed speed for autoneg mode. This mode has
+ * been DEPRECATED. An HWRM client should not use this mode.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \
+ UINT32_C(0x2)
+ /*
+ * Select the auto_link_speed or any speed below that speed for autoneg.
+ * This mode has been DEPRECATED. An HWRM client should not use this mode.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \
+ UINT32_C(0x3)
+ /*
+ * Select the speeds based on the corresponding link speed mask value
+ * that is provided.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \
+ UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+ /* The advertised pause settings on the port by the link partner. */
+ uint8_t link_partner_adv_pause;
+ /*
+ * When this bit is '1', Generation of tx pause messages
+ * is supported. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages
+ * is supported. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \
+ UINT32_C(0x2)
+ /*
+ * Current setting for link speed mask that is used to
+ * advertise speeds during autonegotiation when EEE is enabled.
+ * This field is valid only when eee_enabled flags is set to 1.
+ * The speeds specified in this field shall be a subset of
+ * speeds specified in auto_link_speed_mask.
+ */
+ uint16_t adv_eee_link_speed_mask;
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
+ UINT32_C(0x10)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ /*
+ * Current setting for link speed mask that is advertised by
+ * the link partner when EEE is enabled.
+ * This field is valid only when eee_enabled flags is set to 1.
+ */
+ uint16_t link_partner_adv_eee_link_speed_mask;
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
+ UINT32_C(0x10)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ uint32_t xcvr_identifier_type_tx_lpi_timer;
+ /*
+ * Current setting of TX LPI timer in microseconds.
+ * This field is valid only when_eee_enabled flag is set to 1
+ * and tx_lpi_enabled is set to 1.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \
+ UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0
+ /* This value represents transceiver identifier type. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \
+ UINT32_C(0xff000000)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24
+ /* Unknown */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \
+ (UINT32_C(0x0) << 24)
+ /* SFP/SFP+/SFP28 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \
+ (UINT32_C(0x3) << 24)
+ /* QSFP+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \
+ (UINT32_C(0xc) << 24)
+ /* QSFP+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \
+ (UINT32_C(0xd) << 24)
+ /* QSFP28 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \
+ (UINT32_C(0x11) << 24)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28
+ /*
+ * This value represents the current configuration of
+ * Forward Error Correction (FEC) on the port.
+ */
+ uint16_t fec_cfg;
+ /*
+ * When set to 1, then FEC is not supported on this port. If this flag
+ * is set to 1, then all other FEC configuration flags shall be ignored.
+ * When set to 0, then FEC is supported as indicated by other
+ * configuration flags.
+ * If no cable is attached and the HWRM does not yet know the FEC
+ * capability, then the HWRM shall set this flag to 1 when reporting
+ * FEC capability.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \
+ UINT32_C(0x1)
+ /*
+ * When set to 1, then FEC autonegotiation is supported on this port.
+ * When set to 0, then FEC autonegotiation is not supported on this port.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * When set to 1, then FEC autonegotiation is enabled on this port.
+ * When set to 0, then FEC autonegotiation is disabled if supported.
+ * This flag should be ignored if FEC autonegotiation is not supported on this port.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \
+ UINT32_C(0x4)
+ /*
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on this port.
+ * When set to 0, then FEC CLAUSE 74 (Fire Code) is not supported on this port.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on this port.
+ * When set to 0, then FEC CLAUSE 74 (Fire Code) is disabled if supported.
+ * This flag should be ignored if FEC CLAUSE 74 is not supported on this port.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \
+ UINT32_C(0x10)
+ /*
+ * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported on this port.
+ * When set to 0, then FEC CLAUSE 91 (Reed Solomon) is not supported on this port.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \
+ UINT32_C(0x20)
+ /*
+ * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled on this port.
+ * When set to 0, then FEC CLAUSE 91 (Reed Solomon) is disabled if supported.
+ * This flag should be ignored if FEC CLAUSE 91 is not supported on this port.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \
+ UINT32_C(0x40)
+ /*
+ * This value is indicates the duplex of the current
+ * connection state.
+ */
+ uint8_t duplex_state;
+ /* Half Duplex connection. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF UINT32_C(0x0)
+ /* Full duplex connection. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL
+ /* Option flags fields. */
+ uint8_t option_flags;
+ /* When this bit is '1', Media auto detect is enabled. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_MEDIA_AUTO_DETECT \
+ UINT32_C(0x1)
+ /*
+ * Up to 16 bytes of null padded ASCII string representing
+ * PHY vendor.
+ * If the string is set to null, then the vendor name is not
+ * available.
+ */
+ char phy_vendor_name[16];
+ /*
+ * Up to 16 bytes of null padded ASCII string that
+ * identifies vendor specific part number of the PHY.
+ * If the string is set to null, then the vendor specific
+ * part number is not available.
+ */
+ char phy_vendor_partnumber[16];
+ uint8_t unused_2[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_port_mac_cfg *
+ *********************/
+
+
+/* hwrm_port_mac_cfg_input (size:320b/40B) */
+struct hwrm_port_mac_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * In this field, there are a number of CoS mappings related flags
+ * that are used to configure CoS mappings and their corresponding
+ * priorities in the hardware.
+ * For the priorities of CoS mappings, the HWRM uses the following
+ * priority order (high to low) by default:
+ * # vlan pri
+ * # ip_dscp
+ * # tunnel_vlan_pri
+ * # default cos
+ *
+ * A subset of CoS mappings can be enabled.
+ * If a priority is not specified for an enabled CoS mapping, the
+ * priority will be assigned in the above order for the enabled CoS
+ * mappings. For example, if vlan_pri and ip_dscp CoS mappings are
+ * enabled and their priorities are not specified, the following
+ * priority order (high to low) will be used by the HWRM:
+ * # vlan_pri
+ * # ip_dscp
+ * # default cos
+ *
+ * vlan_pri CoS mapping together with default CoS with lower priority
+ * are enabled by default by the HWRM.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', this command will configure
+ * the MAC to match the current link state of the PHY.
+ * If the link is not established on the PHY, then this
+ * bit has no effect.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_MATCH_LINK \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', the inner VLAN PRI to CoS mapping
+ * is requested to be enabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_VLAN_PRI2COS_ENABLE \
+ UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', tunnel VLAN PRI field to
+ * CoS mapping is requested to be enabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_TUNNEL_PRI2COS_ENABLE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is set to '1', the IP DSCP to CoS mapping is
+ * requested to be enabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_IP_DSCP2COS_ENABLE \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the HWRM is requested to
+ * enable timestamp capture capability on the receive side
+ * of this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the HWRM is requested to
+ * disable timestamp capture capability on the receive side
+ * of this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the HWRM is requested to
+ * enable timestamp capture capability on the transmit side
+ * of this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1', the HWRM is requested to
+ * disable timestamp capture capability on the transmit side
+ * of this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE \
+ UINT32_C(0x80)
+ /*
+ * When this bit is '1', the Out-Of-Box WoL is requested to
+ * be enabled on this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_OOB_WOL_ENABLE \
+ UINT32_C(0x100)
+ /*
+ * When this bit is '1', the the Out-Of-Box WoL is requested to
+ * be disabled on this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_OOB_WOL_DISABLE \
+ UINT32_C(0x200)
+ /*
+ * When this bit is set to '1', the inner VLAN PRI to CoS mapping
+ * is requested to be disabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_VLAN_PRI2COS_DISABLE \
+ UINT32_C(0x400)
+ /*
+ * When this bit is set to '1', tunnel VLAN PRI field to
+ * CoS mapping is requested to be disabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_TUNNEL_PRI2COS_DISABLE \
+ UINT32_C(0x800)
+ /*
+ * When this bit is set to '1', the IP DSCP to CoS mapping is
+ * requested to be disabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_IP_DSCP2COS_DISABLE \
+ UINT32_C(0x1000)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the ipg field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_IPG \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the lpbk field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_LPBK \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the vlan_pri2cos_map_pri field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_VLAN_PRI2COS_MAP_PRI \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the tunnel_pri2cos_map_pri field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TUNNEL_PRI2COS_MAP_PRI \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the dscp2cos_map_pri field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_DSCP2COS_MAP_PRI \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the rx_ts_capture_ptp_msg_type field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the tx_ts_capture_ptp_msg_type field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the cos_field_cfg field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_COS_FIELD_CFG \
+ UINT32_C(0x100)
+ /* Port ID of port that is to be configured. */
+ uint16_t port_id;
+ /*
+ * This value is used to configure the minimum IPG that will
+ * be sent between packets by this port.
+ */
+ uint8_t ipg;
+ /* This value controls the loopback setting for the MAC. */
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_MAC_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1)
+ /*
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2)
+ #define HWRM_PORT_MAC_CFG_INPUT_LPBK_LAST \
+ HWRM_PORT_MAC_CFG_INPUT_LPBK_REMOTE
+ /*
+ * This value controls the priority setting of VLAN PRI to CoS
+ * mapping based on VLAN Tags of inner packet headers of
+ * tunneled packets or packet headers of non-tunneled packets.
+ *
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being specified.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ */
+ uint8_t vlan_pri2cos_map_pri;
+ /* Reserved field. */
+ uint8_t reserved1;
+ /*
+ * This value controls the priority setting of VLAN PRI to CoS
+ * mapping based on VLAN Tags of tunneled header.
+ * This mapping only applies when tunneled headers
+ * are present.
+ *
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being specified.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ */
+ uint8_t tunnel_pri2cos_map_pri;
+ /*
+ * This value controls the priority setting of IP DSCP to CoS
+ * mapping based on inner IP header of tunneled packets or
+ * IP header of non-tunneled packets.
+ *
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being specified.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ */
+ uint8_t dscp2pri_map_pri;
+ /*
+ * This is a 16-bit bit mask that is used to request a
+ * specific configuration of time stamp capture of PTP messages
+ * on the receive side of this port.
+ * This field shall be ignored if the ptp_rx_ts_capture_enable
+ * flag is not set in this command.
+ * Otherwise, if bit 'i' is set, then the HWRM is being
+ * requested to configure the receive side of the port to
+ * capture the time stamp of every received PTP message
+ * with messageType field value set to i.
+ */
+ uint16_t rx_ts_capture_ptp_msg_type;
+ /*
+ * This is a 16-bit bit mask that is used to request a
+ * specific configuration of time stamp capture of PTP messages
+ * on the transmit side of this port.
+ * This field shall be ignored if the ptp_tx_ts_capture_enable
+ * flag is not set in this command.
+ * Otherwise, if bit 'i' is set, then the HWRM is being
+ * requested to configure the transmit sied of the port to
+ * capture the time stamp of every transmitted PTP message
+ * with messageType field value set to i.
+ */
+ uint16_t tx_ts_capture_ptp_msg_type;
+ /* Configuration of CoS fields. */
+ uint8_t cos_field_cfg;
+ /* Reserved */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_RSVD1 \
+ UINT32_C(0x1)
+ /*
+ * This field is used to specify selection of VLAN PRI value
+ * based on whether one or two VLAN Tags are present in
+ * the inner packet headers of tunneled packets or
+ * non-tunneled packets.
+ * This field is valid only if inner VLAN PRI to CoS mapping
+ * is enabled.
+ * If VLAN PRI to CoS mapping is not enabled, then this
+ * field shall be ignored.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_MASK \
+ UINT32_C(0x6)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_SFT \
+ 1
+ /*
+ * Select inner VLAN PRI when 1 or 2 VLAN Tags are
+ * present in the inner packet headers
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
+ (UINT32_C(0x0) << 1)
+ /*
+ * Select outer VLAN Tag PRI when 2 VLAN Tags are
+ * present in the inner packet headers.
+ * No VLAN PRI shall be selected for this configuration
+ * if only one VLAN Tag is present in the inner
+ * packet headers.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
+ (UINT32_C(0x1) << 1)
+ /*
+ * Select outermost VLAN PRI when 1 or 2 VLAN Tags
+ * are present in the inner packet headers
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
+ (UINT32_C(0x2) << 1)
+ /* Unspecified */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
+ (UINT32_C(0x3) << 1)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
+ HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ /*
+ * This field is used to specify selection of tunnel VLAN
+ * PRI value based on whether one or two VLAN Tags are
+ * present in tunnel headers.
+ * This field is valid only if tunnel VLAN PRI to CoS mapping
+ * is enabled.
+ * If tunnel VLAN PRI to CoS mapping is not enabled, then this
+ * field shall be ignored.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK \
+ UINT32_C(0x18)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT \
+ 3
+ /*
+ * Select inner VLAN PRI when 1 or 2 VLAN Tags are
+ * present in the tunnel packet headers
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
+ (UINT32_C(0x0) << 3)
+ /*
+ * Select outer VLAN Tag PRI when 2 VLAN Tags are
+ * present in the tunnel packet headers.
+ * No tunnel VLAN PRI shall be selected for this
+ * configuration if only one VLAN Tag is present in
+ * the tunnel packet headers.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
+ (UINT32_C(0x1) << 3)
+ /*
+ * Select outermost VLAN PRI when 1 or 2 VLAN Tags
+ * are present in the tunnel packet headers
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
+ (UINT32_C(0x2) << 3)
+ /* Unspecified */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
+ (UINT32_C(0x3) << 3)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
+ HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ /*
+ * This field shall be used to provide default CoS value
+ * that has been configured on this port.
+ * This field is valid only if default CoS mapping
+ * is enabled.
+ * If default CoS mapping is not enabled, then this
+ * field shall be ignored.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_DEFAULT_COS_MASK \
+ UINT32_C(0xe0)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_DEFAULT_COS_SFT \
+ 5
+ uint8_t unused_0[3];
+} __attribute__((packed));
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
+struct hwrm_port_mac_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This is the configured maximum length of Ethernet packet
+ * payload that is allowed to be received on the port.
+ * This value does not include the number of bytes used by
+ * Ethernet header and trailer (CRC).
+ */
+ uint16_t mru;
+ /*
+ * This is the configured maximum length of Ethernet packet
+ * payload that is allowed to be transmitted on the port.
+ * This value does not include the number of bytes used by
+ * Ethernet header and trailer (CRC).
+ */
+ uint16_t mtu;
+ /* Current configuration of the IPG value. */
+ uint8_t ipg;
+ /* Current value of the loopback value. */
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1)
+ /*
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
+ */
+ #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2)
+ #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_LAST \
+ HWRM_PORT_MAC_CFG_OUTPUT_LPBK_REMOTE
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_port_mac_qcfg *
+ **********************/
+
+
+/* hwrm_port_mac_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is to be configured. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_mac_qcfg_output (size:192b/24B) */
+struct hwrm_port_mac_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This is the configured maximum length of Ethernet packet
+ * payload that is allowed to be received on the port.
+ * This value does not include the number of bytes used by the
+ * Ethernet header and trailer (CRC).
+ */
+ uint16_t mru;
+ /*
+ * This is the configured maximum length of Ethernet packet
+ * payload that is allowed to be transmitted on the port.
+ * This value does not include the number of bytes used by the
+ * Ethernet header and trailer (CRC).
+ */
+ uint16_t mtu;
+ /*
+ * The minimum IPG that will
+ * be sent between packets by this port.
+ */
+ uint8_t ipg;
+ /* The loopback setting for the MAC. */
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1)
+ /*
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_LAST \
+ HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_REMOTE
+ /*
+ * Priority setting for VLAN PRI to CoS mapping.
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being used.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ * # If the correspoding CoS mapping is not enabled, then this
+ * field should be ignored.
+ * # This value indicates the normalized priority value retained
+ * in the HWRM.
+ */
+ uint8_t vlan_pri2cos_map_pri;
+ /*
+ * In this field, a number of CoS mappings related flags
+ * are used to indicate configured CoS mappings.
+ */
+ uint8_t flags;
+ /*
+ * When this bit is set to '1', the inner VLAN PRI to CoS mapping
+ * is enabled.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_VLAN_PRI2COS_ENABLE \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', tunnel VLAN PRI field to
+ * CoS mapping is enabled.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_TUNNEL_PRI2COS_ENABLE \
+ UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', the IP DSCP to CoS mapping is
+ * enabled.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_IP_DSCP2COS_ENABLE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the Out-Of-Box WoL is enabled on this
+ * port.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_OOB_WOL_ENABLE \
+ UINT32_C(0x8)
+ /* When this bit is '1', PTP is enabled for RX on this port. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE \
+ UINT32_C(0x10)
+ /* When this bit is '1', PTP is enabled for TX on this port. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE \
+ UINT32_C(0x20)
+ /*
+ * Priority setting for tunnel VLAN PRI to CoS mapping.
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being used.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ * # If the correspoding CoS mapping is not enabled, then this
+ * field should be ignored.
+ * # This value indicates the normalized priority value retained
+ * in the HWRM.
+ */
+ uint8_t tunnel_pri2cos_map_pri;
+ /*
+ * Priority setting for DSCP to PRI mapping.
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being used.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ * # If the correspoding CoS mapping is not enabled, then this
+ * field should be ignored.
+ * # This value indicates the normalized priority value retained
+ * in the HWRM.
+ */
+ uint8_t dscp2pri_map_pri;
+ /*
+ * This is a 16-bit bit mask that represents the
+ * current configuration of time stamp capture of PTP messages
+ * on the receive side of this port.
+ * If bit 'i' is set, then the receive side of the port
+ * is configured to capture the time stamp of every
+ * received PTP message with messageType field value set
+ * to i.
+ * If all bits are set to 0 (i.e. field value set 0),
+ * then the receive side of the port is not configured
+ * to capture timestamp for PTP messages.
+ * If all bits are set to 1, then the receive side of the
+ * port is configured to capture timestamp for all PTP
+ * messages.
+ */
+ uint16_t rx_ts_capture_ptp_msg_type;
+ /*
+ * This is a 16-bit bit mask that represents the
+ * current configuration of time stamp capture of PTP messages
+ * on the transmit side of this port.
+ * If bit 'i' is set, then the transmit side of the port
+ * is configured to capture the time stamp of every
+ * received PTP message with messageType field value set
+ * to i.
+ * If all bits are set to 0 (i.e. field value set 0),
+ * then the transmit side of the port is not configured
+ * to capture timestamp for PTP messages.
+ * If all bits are set to 1, then the transmit side of the
+ * port is configured to capture timestamp for all PTP
+ * messages.
+ */
+ uint16_t tx_ts_capture_ptp_msg_type;
+ /* Configuration of CoS fields. */
+ uint8_t cos_field_cfg;
+ /* Reserved */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_RSVD \
+ UINT32_C(0x1)
+ /*
+ * This field is used for selecting VLAN PRI value
+ * based on whether one or two VLAN Tags are present in
+ * the inner packet headers of tunneled packets or
+ * non-tunneled packets.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_MASK \
+ UINT32_C(0x6)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_SFT \
+ 1
+ /*
+ * Select inner VLAN PRI when 1 or 2 VLAN Tags are
+ * present in the inner packet headers
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
+ (UINT32_C(0x0) << 1)
+ /*
+ * Select outer VLAN Tag PRI when 2 VLAN Tags are
+ * present in the inner packet headers.
+ * No VLAN PRI is selected for this configuration
+ * if only one VLAN Tag is present in the inner
+ * packet headers.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
+ (UINT32_C(0x1) << 1)
+ /*
+ * Select outermost VLAN PRI when 1 or 2 VLAN Tags
+ * are present in the inner packet headers
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
+ (UINT32_C(0x2) << 1)
+ /* Unspecified */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
+ (UINT32_C(0x3) << 1)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
+ HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ /*
+ * This field is used for selecting tunnel VLAN PRI value
+ * based on whether one or two VLAN Tags are present in
+ * the tunnel headers of tunneled packets. This selection
+ * does not apply to non-tunneled packets.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK \
+ UINT32_C(0x18)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT \
+ 3
+ /*
+ * Select inner VLAN PRI when 1 or 2 VLAN Tags are
+ * present in the tunnel packet headers
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
+ (UINT32_C(0x0) << 3)
+ /*
+ * Select outer VLAN Tag PRI when 2 VLAN Tags are
+ * present in the tunnel packet headers.
+ * No VLAN PRI is selected for this configuration
+ * if only one VLAN Tag is present in the tunnel
+ * packet headers.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
+ (UINT32_C(0x1) << 3)
+ /*
+ * Select outermost VLAN PRI when 1 or 2 VLAN Tags
+ * are present in the tunnel packet headers
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
+ (UINT32_C(0x2) << 3)
+ /* Unspecified */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
+ (UINT32_C(0x3) << 3)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
+ HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ /*
+ * This field is used to provide default CoS value that
+ * has been configured on this port.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_DEFAULT_COS_MASK \
+ UINT32_C(0xe0)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_DEFAULT_COS_SFT \
+ 5
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_port_mac_ptp_qcfg *
+ **************************/
+
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_ptp_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
+struct hwrm_port_mac_ptp_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * In this field, a number of PTP related flags
+ * are used to indicate configured PTP capabilities.
+ */
+ uint8_t flags;
+ /*
+ * When this bit is set to '1', the PTP related registers are
+ * directly accessible by the host.
+ */
+ #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', the PTP information is accessible
+ * via HWRM commands.
+ */
+ #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS \
+ UINT32_C(0x2)
+ uint8_t unused_0[3];
+ /* Offset of the PTP register for the lower 32 bits of timestamp for RX. */
+ uint32_t rx_ts_reg_off_lower;
+ /* Offset of the PTP register for the upper 32 bits of timestamp for RX. */
+ uint32_t rx_ts_reg_off_upper;
+ /* Offset of the PTP register for the sequence ID for RX. */
+ uint32_t rx_ts_reg_off_seq_id;
+ /* Offset of the first PTP source ID for RX. */
+ uint32_t rx_ts_reg_off_src_id_0;
+ /* Offset of the second PTP source ID for RX. */
+ uint32_t rx_ts_reg_off_src_id_1;
+ /* Offset of the third PTP source ID for RX. */
+ uint32_t rx_ts_reg_off_src_id_2;
+ /* Offset of the domain ID for RX. */
+ uint32_t rx_ts_reg_off_domain_id;
+ /* Offset of the PTP FIFO register for RX. */
+ uint32_t rx_ts_reg_off_fifo;
+ /* Offset of the PTP advance FIFO register for RX. */
+ uint32_t rx_ts_reg_off_fifo_adv;
+ /* PTP timestamp granularity for RX. */
+ uint32_t rx_ts_reg_off_granularity;
+ /* Offset of the PTP register for the lower 32 bits of timestamp for TX. */
+ uint32_t tx_ts_reg_off_lower;
+ /* Offset of the PTP register for the upper 32 bits of timestamp for TX. */
+ uint32_t tx_ts_reg_off_upper;
+ /* Offset of the PTP register for the sequence ID for TX. */
+ uint32_t tx_ts_reg_off_seq_id;
+ /* Offset of the PTP FIFO register for TX. */
+ uint32_t tx_ts_reg_off_fifo;
+ /* PTP timestamp granularity for TX. */
+ uint32_t tx_ts_reg_off_granularity;
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************
+ * hwrm_port_qstats *
+ ********************/
+
+
+/* hwrm_port_qstats_input (size:320b/40B) */
+struct hwrm_port_qstats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+ /*
+ * This is the host address where
+ * Tx port statistics will be stored
+ */
+ uint64_t tx_stat_host_addr;
+ /*
+ * This is the host address where
+ * Rx port statistics will be stored
+ */
+ uint64_t rx_stat_host_addr;
+} __attribute__((packed));
+
+/* hwrm_port_qstats_output (size:128b/16B) */
+struct hwrm_port_qstats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The size of TX port statistics block in bytes. */
+ uint16_t tx_stat_size;
+ /* The size of RX port statistics block in bytes. */
+ uint16_t rx_stat_size;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/************************
+ * hwrm_port_qstats_ext *
+ ************************/
+
+
+/* hwrm_port_qstats_ext_input (size:320b/40B) */
+struct hwrm_port_qstats_ext_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ /*
+ * The size of TX port extended
+ * statistics block in bytes.
+ */
+ uint16_t tx_stat_size;
+ /*
+ * The size of RX port extended
+ * statistics block in bytes
+ */
+ uint16_t rx_stat_size;
+ uint8_t unused_0[2];
+ /*
+ * This is the host address where
+ * Tx port statistics will be stored
+ */
+ uint64_t tx_stat_host_addr;
+ /*
+ * This is the host address where
+ * Rx port statistics will be stored
+ */
+ uint64_t rx_stat_host_addr;
+} __attribute__((packed));
+
+/* hwrm_port_qstats_ext_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The size of TX port statistics block in bytes. */
+ uint16_t tx_stat_size;
+ /* The size of RX port statistics block in bytes. */
+ uint16_t rx_stat_size;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_port_lpbk_qstats *
+ *************************/
+
+
+/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
+struct hwrm_port_lpbk_qstats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
+struct hwrm_port_lpbk_qstats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of transmitted unicast frames */
+ uint64_t lpbk_ucast_frames;
+ /* Number of transmitted multicast frames */
+ uint64_t lpbk_mcast_frames;
+ /* Number of transmitted broadcast frames */
+ uint64_t lpbk_bcast_frames;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t lpbk_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t lpbk_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t lpbk_bcast_bytes;
+ /* Total Tx Drops for loopback traffic reported by STATS block */
+ uint64_t tx_stat_discard;
+ /* Total Tx Error Drops for loopback traffic reported by STATS block */
+ uint64_t tx_stat_error;
+ /* Total Rx Drops for loopback traffic reported by STATS block */
+ uint64_t rx_stat_discard;
+ /* Total Rx Error Drops for loopback traffic reported by STATS block */
+ uint64_t rx_stat_error;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_port_clr_stats *
+ ***********************/
+
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
+struct hwrm_port_clr_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_clr_stats_output (size:128b/16B) */
+struct hwrm_port_clr_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_port_lpbk_clr_stats *
+ ****************************/
+
+
+/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_port_ts_query *
+ **********************/
+
+
+/* hwrm_port_ts_query_input (size:192b/24B) */
+struct hwrm_port_ts_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_LAST \
+ HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_port_ts_query_output (size:192b/24B) */
+struct hwrm_port_ts_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Timestamp value of PTP message captured. */
+ uint64_t ptp_msg_ts;
+ /* Sequence ID of the PTP message captured. */
+ uint16_t ptp_msg_seqid;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_port_phy_qcaps *
+ ***********************/
+
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
+struct hwrm_port_phy_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_phy_qcaps_output (size:192b/24B) */
+struct hwrm_port_phy_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* PHY capability flags */
+ uint8_t flags;
+ /*
+ * If set to 1, then this field indicates that the
+ * link is capable of supporting EEE.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EEE_SUPPORTED \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, then this field indicates that the
+ * PHY is capable of supporting external loopback.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EXTERNAL_LPBK_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * Reserved field. The HWRM shall set this field to 0.
+ * An HWRM client shall ignore this field.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_RSVD1_MASK \
+ UINT32_C(0xfc)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_RSVD1_SFT 2
+ /* Number of front panel ports for this device. */
+ uint8_t port_cnt;
+ /* Not supported or unknown */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_UNKNOWN UINT32_C(0x0)
+ /* single port device */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_1 UINT32_C(0x1)
+ /* 2-port device */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_2 UINT32_C(0x2)
+ /* 3-port device */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_3 UINT32_C(0x3)
+ /* 4-port device */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_4 UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_LAST \
+ HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_4
+ /*
+ * This is a bit mask to indicate what speeds are supported
+ * as forced speeds on this link.
+ * For each speed that can be forced on this link, the
+ * corresponding mask bit shall be set to '1'.
+ */
+ uint16_t supported_speeds_force_mode;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_2GB \
+ UINT32_C(0x10)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10MB \
+ UINT32_C(0x2000)
+ /*
+ * This is a bit mask to indicate what speeds are supported
+ * for autonegotiation on this link.
+ * For each speed that can be autonegotiated on this link, the
+ * corresponding mask bit shall be set to '1'.
+ */
+ uint16_t supported_speeds_auto_mode;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_2GB \
+ UINT32_C(0x10)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10MB \
+ UINT32_C(0x2000)
+ /*
+ * This is a bit mask to indicate what speeds are supported
+ * for EEE on this link.
+ * For each speed that can be autonegotiated when EEE is enabled
+ * on this link, the corresponding mask bit shall be set to '1'.
+ * This field is only valid when the eee_suppotred is set to '1'.
+ */
+ uint16_t supported_speeds_eee_mode;
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_100MB \
+ UINT32_C(0x2)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_1GB \
+ UINT32_C(0x8)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 \
+ UINT32_C(0x10)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_10GB \
+ UINT32_C(0x40)
+ uint32_t tx_lpi_timer_low;
+ /*
+ * The lowest value of TX LPI timer that can be set on this link
+ * when EEE is enabled. This value is in microseconds.
+ * This field is valid only when_eee_supported is set to '1'.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_MASK \
+ UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_SFT 0
+ /*
+ * Reserved field. The HWRM shall set this field to 0.
+ * An HWRM client shall ignore this field.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_RSVD2_MASK \
+ UINT32_C(0xff000000)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_RSVD2_SFT 24
+ uint32_t valid_tx_lpi_timer_high;
+ /*
+ * The highest value of TX LPI timer that can be set on this link
+ * when EEE is enabled. This value is in microseconds.
+ * This field is valid only when_eee_supported is set to '1'.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_MASK \
+ UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_SFT 0
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_MASK \
+ UINT32_C(0xff000000)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_SFT 24
+} __attribute__((packed));
+
+/***************************
+ * hwrm_port_phy_i2c_write *
+ ***************************/
+
+
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the page_offset field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_I2C_WRITE_INPUT_ENABLES_PAGE_OFFSET \
+ UINT32_C(0x1)
+ /* Port ID of port. */
+ uint16_t port_id;
+ /* 8-bit I2C slave address. */
+ uint8_t i2c_slave_addr;
+ uint8_t unused_0;
+ /* The page number that is being accessed over I2C. */
+ uint16_t page_number;
+ /* Offset within the page that is being accessed over I2C. */
+ uint16_t page_offset;
+ /*
+ * Length of data to write, in bytes starting at the offset
+ * specified above. If the offset is not specified, then
+ * the data shall be written from the beginning of the page.
+ */
+ uint8_t data_length;
+ uint8_t unused_1[7];
+ /* Up to 64B of data. */
+ uint32_t data[16];
+} __attribute__((packed));
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_port_phy_i2c_read *
+ **************************/
+
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
+struct hwrm_port_phy_i2c_read_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the page_offset field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET \
+ UINT32_C(0x1)
+ /* Port ID of port. */
+ uint16_t port_id;
+ /* 8-bit I2C slave address. */
+ uint8_t i2c_slave_addr;
+ uint8_t unused_0;
+ /* The page number that is being accessed over I2C. */
+ uint16_t page_number;
+ /* Offset within the page that is being accessed over I2C. */
+ uint16_t page_offset;
+ /*
+ * Length of data to read, in bytes starting at the offset
+ * specified above. If the offset is not specified, then
+ * the data shall be read from the beginning of the page.
+ */
+ uint8_t data_length;
+ uint8_t unused_1[7];
+} __attribute__((packed));
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
+struct hwrm_port_phy_i2c_read_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Up to 64B of data. */
+ uint32_t data[16];
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_port_led_cfg *
+ *********************/
+
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
+struct hwrm_port_led_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the led0_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the led0_state field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the led0_color field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the led0_blink_on field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the led0_blink_off field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the led0_group_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the led1_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the led1_state field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the led1_color field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the led1_blink_on field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the led1_blink_off field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the led1_group_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the led2_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the led2_state field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the led2_color field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the led2_blink_on field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON \
+ UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the led2_blink_off field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF \
+ UINT32_C(0x10000)
+ /*
+ * This bit must be '1' for the led2_group_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID \
+ UINT32_C(0x20000)
+ /*
+ * This bit must be '1' for the led3_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID \
+ UINT32_C(0x40000)
+ /*
+ * This bit must be '1' for the led3_state field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE \
+ UINT32_C(0x80000)
+ /*
+ * This bit must be '1' for the led3_color field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR \
+ UINT32_C(0x100000)
+ /*
+ * This bit must be '1' for the led3_blink_on field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON \
+ UINT32_C(0x200000)
+ /*
+ * This bit must be '1' for the led3_blink_off field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \
+ UINT32_C(0x400000)
+ /*
+ * This bit must be '1' for the led3_group_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID \
+ UINT32_C(0x800000)
+ /* Port ID of port whose LEDs are configured. */
+ uint16_t port_id;
+ /*
+ * The number of LEDs that are being configured.
+ * Up to 4 LEDs can be configured with this command.
+ */
+ uint8_t num_leds;
+ /* Reserved field. */
+ uint8_t rsvd;
+ /* An identifier for the LED #0. */
+ uint8_t led0_id;
+ /* The requested state of the LED #0. */
+ uint8_t led0_state;
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT
+ /* The requested color of LED #0. */
+ uint8_t led0_color;
+ /* Default */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER
+ uint8_t unused_0;
+ /*
+ * If the LED #0 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
+ */
+ uint16_t led0_blink_on;
+ /*
+ * If the LED #0 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
+ */
+ uint16_t led0_blink_off;
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs
+ * to.
+ * If set to 0, then the LED #0 shall not be grouped and
+ * shall be treated as an individual resource.
+ * For all other non-zero values of this field, LED #0 shall
+ * be grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led0_group_id;
+ /* Reserved field. */
+ uint8_t rsvd0;
+ /* An identifier for the LED #1. */
+ uint8_t led1_id;
+ /* The requested state of the LED #1. */
+ uint8_t led1_state;
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT
+ /* The requested color of LED #1. */
+ uint8_t led1_color;
+ /* Default */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER
+ uint8_t unused_1;
+ /*
+ * If the LED #1 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
+ */
+ uint16_t led1_blink_on;
+ /*
+ * If the LED #1 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
+ */
+ uint16_t led1_blink_off;
+ /*
+ * An identifier for the group of LEDs that LED #1 belongs
+ * to.
+ * If set to 0, then the LED #1 shall not be grouped and
+ * shall be treated as an individual resource.
+ * For all other non-zero values of this field, LED #1 shall
+ * be grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led1_group_id;
+ /* Reserved field. */
+ uint8_t rsvd1;
+ /* An identifier for the LED #2. */
+ uint8_t led2_id;
+ /* The requested state of the LED #2. */
+ uint8_t led2_state;
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT
+ /* The requested color of LED #2. */
+ uint8_t led2_color;
+ /* Default */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER
+ uint8_t unused_2;
+ /*
+ * If the LED #2 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
+ */
+ uint16_t led2_blink_on;
+ /*
+ * If the LED #2 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
+ */
+ uint16_t led2_blink_off;
+ /*
+ * An identifier for the group of LEDs that LED #2 belongs
+ * to.
+ * If set to 0, then the LED #2 shall not be grouped and
+ * shall be treated as an individual resource.
+ * For all other non-zero values of this field, LED #2 shall
+ * be grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led2_group_id;
+ /* Reserved field. */
+ uint8_t rsvd2;
+ /* An identifier for the LED #3. */
+ uint8_t led3_id;
+ /* The requested state of the LED #3. */
+ uint8_t led3_state;
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT
+ /* The requested color of LED #3. */
+ uint8_t led3_color;
+ /* Default */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER
+ uint8_t unused_3;
+ /*
+ * If the LED #3 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
+ */
+ uint16_t led3_blink_on;
+ /*
+ * If the LED #3 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
+ */
+ uint16_t led3_blink_off;
+ /*
+ * An identifier for the group of LEDs that LED #3 belongs
+ * to.
+ * If set to 0, then the LED #3 shall not be grouped and
+ * shall be treated as an individual resource.
+ * For all other non-zero values of this field, LED #3 shall
+ * be grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led3_group_id;
+ /* Reserved field. */
+ uint8_t rsvd3;
+} __attribute__((packed));
+
+/* hwrm_port_led_cfg_output (size:128b/16B) */
+struct hwrm_port_led_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_port_led_qcfg *
+ **********************/
+
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port whose LED configuration is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * The number of LEDs that are configured on this port.
+ * Up to 4 LEDs can be returned in the response.
+ */
+ uint8_t num_leds;
+ /* An identifier for the LED #0. */
+ uint8_t led0_id;
+ /* The type of LED #0. */
+ uint8_t led0_type;
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID
+ /* The current state of the LED #0. */
+ uint8_t led0_state;
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT
+ /* The color of LED #0. */
+ uint8_t led0_color;
+ /* Default */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER
+ uint8_t unused_0;
+ /*
+ * If the LED #0 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
+ */
+ uint16_t led0_blink_on;
+ /*
+ * If the LED #0 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
+ */
+ uint16_t led0_blink_off;
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs
+ * to.
+ * If set to 0, then the LED #0 is not grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led0_group_id;
+ /* An identifier for the LED #1. */
+ uint8_t led1_id;
+ /* The type of LED #1. */
+ uint8_t led1_type;
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID
+ /* The current state of the LED #1. */
+ uint8_t led1_state;
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT
+ /* The color of LED #1. */
+ uint8_t led1_color;
+ /* Default */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER
+ uint8_t unused_1;
+ /*
+ * If the LED #1 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
+ */
+ uint16_t led1_blink_on;
+ /*
+ * If the LED #1 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
+ */
+ uint16_t led1_blink_off;
+ /*
+ * An identifier for the group of LEDs that LED #1 belongs
+ * to.
+ * If set to 0, then the LED #1 is not grouped.
+ * For all other non-zero values of this field, LED #1 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led1_group_id;
+ /* An identifier for the LED #2. */
+ uint8_t led2_id;
+ /* The type of LED #2. */
+ uint8_t led2_type;
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID
+ /* The current state of the LED #2. */
+ uint8_t led2_state;
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT
+ /* The color of LED #2. */
+ uint8_t led2_color;
+ /* Default */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER
+ uint8_t unused_2;
+ /*
+ * If the LED #2 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
+ */
+ uint16_t led2_blink_on;
+ /*
+ * If the LED #2 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
+ */
+ uint16_t led2_blink_off;
+ /*
+ * An identifier for the group of LEDs that LED #2 belongs
+ * to.
+ * If set to 0, then the LED #2 is not grouped.
+ * For all other non-zero values of this field, LED #2 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led2_group_id;
+ /* An identifier for the LED #3. */
+ uint8_t led3_id;
+ /* The type of LED #3. */
+ uint8_t led3_type;
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID
+ /* The current state of the LED #3. */
+ uint8_t led3_state;
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT
+ /* The color of LED #3. */
+ uint8_t led3_color;
+ /* Default */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER
+ uint8_t unused_3;
+ /*
+ * If the LED #3 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
+ */
+ uint16_t led3_blink_on;
+ /*
+ * If the LED #3 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
+ */
+ uint16_t led3_blink_off;
+ /*
+ * An identifier for the group of LEDs that LED #3 belongs
+ * to.
+ * If set to 0, then the LED #3 is not grouped.
+ * For all other non-zero values of this field, LED #3 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led3_group_id;
+ uint8_t unused_4[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_port_led_qcaps *
+ ***********************/
+
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
+struct hwrm_port_led_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port whose LED configuration is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
+struct hwrm_port_led_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * The number of LEDs that are configured on this port.
+ * Up to 4 LEDs can be returned in the response.
+ */
+ uint8_t num_leds;
+ /* Reserved for future use. */
+ uint8_t unused[3];
+ /* An identifier for the LED #0. */
+ uint8_t led0_id;
+ /* The type of LED #0. */
+ uint8_t led0_type;
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_LAST \
+ HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs
+ * to.
+ * If set to 0, then the LED #0 cannot be grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led0_group_id;
+ uint8_t unused_0;
+ /* The states supported by LED #0. */
+ uint16_t led0_state_caps;
+ /*
+ * If set to 1, this LED is enabled.
+ * If set to 0, this LED is disabled.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, off state is supported on this LED.
+ * If set to 0, off state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, on state is supported on this LED.
+ * If set to 0, on state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, blink state is supported on this LED.
+ * If set to 0, blink state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, blink_alt state is supported on this LED.
+ * If set to 0, blink_alt state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED \
+ UINT32_C(0x10)
+ /* The colors supported by LED #0. */
+ uint16_t led0_color_caps;
+ /* reserved. */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, Amber color is supported on this LED.
+ * If set to 0, Amber color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_AMBER_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, Green color is supported on this LED.
+ * If set to 0, Green color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_GREEN_SUPPORTED \
+ UINT32_C(0x4)
+ /* An identifier for the LED #1. */
+ uint8_t led1_id;
+ /* The type of LED #1. */
+ uint8_t led1_type;
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_LAST \
+ HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID
+ /*
+ * An identifier for the group of LEDs that LED #1 belongs
+ * to.
+ * If set to 0, then the LED #0 cannot be grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led1_group_id;
+ uint8_t unused_1;
+ /* The states supported by LED #1. */
+ uint16_t led1_state_caps;
+ /*
+ * If set to 1, this LED is enabled.
+ * If set to 0, this LED is disabled.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, off state is supported on this LED.
+ * If set to 0, off state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, on state is supported on this LED.
+ * If set to 0, on state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, blink state is supported on this LED.
+ * If set to 0, blink state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, blink_alt state is supported on this LED.
+ * If set to 0, blink_alt state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED \
+ UINT32_C(0x10)
+ /* The colors supported by LED #1. */
+ uint16_t led1_color_caps;
+ /* reserved. */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, Amber color is supported on this LED.
+ * If set to 0, Amber color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_AMBER_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, Green color is supported on this LED.
+ * If set to 0, Green color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_GREEN_SUPPORTED \
+ UINT32_C(0x4)
+ /* An identifier for the LED #2. */
+ uint8_t led2_id;
+ /* The type of LED #2. */
+ uint8_t led2_type;
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_LAST \
+ HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs
+ * to.
+ * If set to 0, then the LED #0 cannot be grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led2_group_id;
+ uint8_t unused_2;
+ /* The states supported by LED #2. */
+ uint16_t led2_state_caps;
+ /*
+ * If set to 1, this LED is enabled.
+ * If set to 0, this LED is disabled.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, off state is supported on this LED.
+ * If set to 0, off state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, on state is supported on this LED.
+ * If set to 0, on state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, blink state is supported on this LED.
+ * If set to 0, blink state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, blink_alt state is supported on this LED.
+ * If set to 0, blink_alt state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED \
+ UINT32_C(0x10)
+ /* The colors supported by LED #2. */
+ uint16_t led2_color_caps;
+ /* reserved. */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, Amber color is supported on this LED.
+ * If set to 0, Amber color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_AMBER_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, Green color is supported on this LED.
+ * If set to 0, Green color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_GREEN_SUPPORTED \
+ UINT32_C(0x4)
+ /* An identifier for the LED #3. */
+ uint8_t led3_id;
+ /* The type of LED #3. */
+ uint8_t led3_type;
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_LAST \
+ HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID
+ /*
+ * An identifier for the group of LEDs that LED #3 belongs
+ * to.
+ * If set to 0, then the LED #0 cannot be grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led3_group_id;
+ uint8_t unused_3;
+ /* The states supported by LED #3. */
+ uint16_t led3_state_caps;
+ /*
+ * If set to 1, this LED is enabled.
+ * If set to 0, this LED is disabled.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, off state is supported on this LED.
+ * If set to 0, off state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, on state is supported on this LED.
+ * If set to 0, on state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, blink state is supported on this LED.
+ * If set to 0, blink state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, blink_alt state is supported on this LED.
+ * If set to 0, blink_alt state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED \
+ UINT32_C(0x10)
+ /* The colors supported by LED #3. */
+ uint16_t led3_color_caps;
+ /* reserved. */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, Amber color is supported on this LED.
+ * If set to 0, Amber color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_AMBER_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, Green color is supported on this LED.
+ * If set to 0, Green color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_GREEN_SUPPORTED \
+ UINT32_C(0x4)
+ uint8_t unused_4[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_queue_qportcfg *
+ ***********************/
+
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
+struct hwrm_queue_qportcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX
+ /*
+ * Port ID of port for which the queue configuration is being
+ * queried. This field is only required when sent by IPC.
+ */
+ uint16_t port_id;
+ /*
+ * Drivers will set this capability when it can use
+ * queue_idx_service_profile to map the queues to application.
+ */
+ uint8_t drv_qmap_cap;
+ /* disabled */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_DISABLED UINT32_C(0x0)
+ /* enabled */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED UINT32_C(0x1)
+ #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_LAST \
+ HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED
+ uint8_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_queue_qportcfg_output (size:256b/32B) */
+struct hwrm_queue_qportcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * The maximum number of queues that can be configured on this
+ * port.
+ * Valid values range from 1 through 8.
+ */
+ uint8_t max_configurable_queues;
+ /*
+ * The maximum number of lossless queues that can be configured
+ * on this port.
+ * Valid values range from 0 through 8.
+ */
+ uint8_t max_configurable_lossless_queues;
+ /*
+ * Bitmask indicating which queues can be configured by the
+ * hwrm_queue_cfg command.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * # A value of 0 indicates that the queue is not configurable
+ * by the hwrm_queue_cfg command.
+ * # A value of 1 indicates that the queue is configurable.
+ * # A hwrm_queue_cfg command shall return error when trying to
+ * configure a queue not configurable.
+ */
+ uint8_t queue_cfg_allowed;
+ /* Information about queue configuration. */
+ uint8_t queue_cfg_info;
+ /*
+ * If this flag is set to '1', then the queues are
+ * configured asymmetrically on TX and RX sides.
+ * If this flag is set to '0', then the queues are
+ * configured symmetrically on TX and RX sides. For
+ * symmetric configuration, the queue configuration
+ * including queue ids and service profiles on the
+ * TX side is the same as the corresponding queue
+ * configuration on the RX side.
+ */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \
+ UINT32_C(0x1)
+ /*
+ * Bitmask indicating which queues can be configured by the
+ * hwrm_queue_pfcenable_cfg command.
+ *
+ * Each bit represents a specific priority where bit 0 represents
+ * priority 0 and bit 7 represents priority 7.
+ * # A value of 0 indicates that the priority is not configurable by
+ * the hwrm_queue_pfcenable_cfg command.
+ * # A value of 1 indicates that the priority is configurable.
+ * # A hwrm_queue_pfcenable_cfg command shall return error when
+ * trying to configure a priority that is not configurable.
+ */
+ uint8_t queue_pfcenable_cfg_allowed;
+ /*
+ * Bitmask indicating which queues can be configured by the
+ * hwrm_queue_pri2cos_cfg command.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * # A value of 0 indicates that the queue is not configurable
+ * by the hwrm_queue_pri2cos_cfg command.
+ * # A value of 1 indicates that the queue is configurable.
+ * # A hwrm_queue_pri2cos_cfg command shall return error when
+ * trying to configure a queue that is not configurable.
+ */
+ uint8_t queue_pri2cos_cfg_allowed;
+ /*
+ * Bitmask indicating which queues can be configured by the
+ * hwrm_queue_pri2cos_cfg command.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * # A value of 0 indicates that the queue is not configurable
+ * by the hwrm_queue_pri2cos_cfg command.
+ * # A value of 1 indicates that the queue is configurable.
+ * # A hwrm_queue_pri2cos_cfg command shall return error when
+ * trying to configure a queue not configurable.
+ */
+ uint8_t queue_cos2bw_cfg_allowed;
+ /*
+ * ID of CoS Queue 0.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
+ */
+ uint8_t queue_id0;
+ /* This value is applicable to CoS queues only. */
+ uint8_t queue_id0_service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless (legacy) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 1.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
+ */
+ uint8_t queue_id1;
+ /* This value is applicable to CoS queues only. */
+ uint8_t queue_id1_service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless (legacy) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 2.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
+ */
+ uint8_t queue_id2;
+ /* This value is applicable to CoS queues only. */
+ uint8_t queue_id2_service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless (legacy) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 3.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
+ */
+ uint8_t queue_id3;
+ /* This value is applicable to CoS queues only. */
+ uint8_t queue_id3_service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless (legacy) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 4.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
+ */
+ uint8_t queue_id4;
+ /* This value is applicable to CoS queues only. */
+ uint8_t queue_id4_service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless (legacy) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 5.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
+ */
+ uint8_t queue_id5;
+ /* This value is applicable to CoS queues only. */
+ uint8_t queue_id5_service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless (legacy) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 6.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
+ */
+ uint8_t queue_id6;
+ /* This value is applicable to CoS queues only. */
+ uint8_t queue_id6_service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless (legacy) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 7.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
+ */
+ uint8_t queue_id7;
+ /* This value is applicable to CoS queues only. */
+ uint8_t queue_id7_service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless (legacy) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************
+ * hwrm_queue_qcfg *
+ *******************/
+
+
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX
+ /* Queue ID of the queue. */
+ uint32_t queue_id;
+} __attribute__((packed));
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This value is a the estimate packet length used in the
+ * TX arbiter.
+ */
+ uint32_t queue_len;
+ /* This value is applicable to CoS queues only. */
+ uint8_t service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LOSSY UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LOSSLESS UINT32_C(0x1)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_UNKNOWN UINT32_C(0xff)
+ #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_UNKNOWN
+ /* Information about queue configuration. */
+ uint8_t queue_cfg_info;
+ /*
+ * If this flag is set to '1', then the queue is
+ * configured asymmetrically on TX and RX sides.
+ * If this flag is set to '0', then this queue is
+ * configured symmetrically on TX and RX sides.
+ */
+ #define HWRM_QUEUE_QCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \
+ UINT32_C(0x1)
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************
+ * hwrm_queue_cfg *
+ ******************/
+
+
+/* hwrm_queue_cfg_input (size:320b/40B) */
+struct hwrm_queue_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX, or both directions applicable to the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3)
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_SFT 0
+ /* tx path */
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ /* Bi-directional (Symmetrically applicable to TX and RX paths) */
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_BIDIR UINT32_C(0x2)
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_BIDIR
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the dflt_len field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_CFG_INPUT_ENABLES_DFLT_LEN UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the service_profile field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_CFG_INPUT_ENABLES_SERVICE_PROFILE UINT32_C(0x2)
+ /* Queue ID of queue that is to be configured by this function. */
+ uint32_t queue_id;
+ /*
+ * This value is a the estimate packet length used in the
+ * TX arbiter.
+ * Set to 0xFF... (All Fs) to not adjust this value.
+ */
+ uint32_t dflt_len;
+ /* This value is applicable to CoS queues only. */
+ uint8_t service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LOSSY UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LOSSLESS UINT32_C(0x1)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN UINT32_C(0xff)
+ #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
+struct hwrm_queue_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_queue_pfcenable_qcfg *
+ *****************************/
+
+
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /* If set to 1, then PFC is enabled on PRI 0. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI0_PFC_ENABLED \
+ UINT32_C(0x1)
+ /* If set to 1, then PFC is enabled on PRI 1. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI1_PFC_ENABLED \
+ UINT32_C(0x2)
+ /* If set to 1, then PFC is enabled on PRI 2. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI2_PFC_ENABLED \
+ UINT32_C(0x4)
+ /* If set to 1, then PFC is enabled on PRI 3. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI3_PFC_ENABLED \
+ UINT32_C(0x8)
+ /* If set to 1, then PFC is enabled on PRI 4. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI4_PFC_ENABLED \
+ UINT32_C(0x10)
+ /* If set to 1, then PFC is enabled on PRI 5. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI5_PFC_ENABLED \
+ UINT32_C(0x20)
+ /* If set to 1, then PFC is enabled on PRI 6. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI6_PFC_ENABLED \
+ UINT32_C(0x40)
+ /* If set to 1, then PFC is enabled on PRI 7. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI7_PFC_ENABLED \
+ UINT32_C(0x80)
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_queue_pfcenable_cfg *
+ ****************************/
+
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* If set to 1, then PFC is requested to be enabled on PRI 0. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI0_PFC_ENABLED \
+ UINT32_C(0x1)
+ /* If set to 1, then PFC is requested to be enabled on PRI 1. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI1_PFC_ENABLED \
+ UINT32_C(0x2)
+ /* If set to 1, then PFC is requested to be enabled on PRI 2. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI2_PFC_ENABLED \
+ UINT32_C(0x4)
+ /* If set to 1, then PFC is requested to be enabled on PRI 3. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI3_PFC_ENABLED \
+ UINT32_C(0x8)
+ /* If set to 1, then PFC is requested to be enabled on PRI 4. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI4_PFC_ENABLED \
+ UINT32_C(0x10)
+ /* If set to 1, then PFC is requested to be enabled on PRI 5. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI5_PFC_ENABLED \
+ UINT32_C(0x20)
+ /* If set to 1, then PFC is requested to be enabled on PRI 6. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI6_PFC_ENABLED \
+ UINT32_C(0x40)
+ /* If set to 1, then PFC is requested to be enabled on PRI 7. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI7_PFC_ENABLED \
+ UINT32_C(0x80)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint16_t port_id;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_queue_pri2cos_qcfg *
+ ***************************/
+
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX
+ /*
+ * When this bit is set to '0', the query is
+ * for VLAN PRI field in tunnel headers.
+ * When this bit is set to '1', the query is
+ * for VLAN PRI field in inner packet headers.
+ */
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN UINT32_C(0x2)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint8_t port_id;
+ uint8_t unused_0[3];
+} __attribute__((packed));
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * CoS Queue assigned to priority 0. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri0_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 1. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri1_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 2 This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri2_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 3. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri3_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 4. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri4_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 5. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri5_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 6. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri6_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 7. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri7_cos_queue_id;
+ /* Information about queue configuration. */
+ uint8_t queue_cfg_info;
+ /*
+ * If this flag is set to '1', then the PRI to CoS
+ * configuration is asymmetric on TX and RX sides.
+ * If this flag is set to '0', then PRI to CoS configuration
+ * is symmetric on TX and RX sides.
+ */
+ #define HWRM_QUEUE_PRI2COS_QCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \
+ UINT32_C(0x1)
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_queue_pri2cos_cfg *
+ **************************/
+
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
+struct hwrm_queue_pri2cos_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX, or both directions applicable to the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3)
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_SFT 0
+ /* tx path */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ /* Bi-directional (Symmetrically applicable to TX and RX paths) */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR UINT32_C(0x2)
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR
+ /*
+ * When this bit is set to '0', the mapping is requested
+ * for VLAN PRI field in tunnel headers.
+ * When this bit is set to '1', the mapping is requested
+ * for VLAN PRI field in inner packet headers.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_IVLAN UINT32_C(0x4)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the pri0_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI0_COS_QUEUE_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the pri1_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI1_COS_QUEUE_ID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the pri2_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI2_COS_QUEUE_ID \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the pri3_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI3_COS_QUEUE_ID \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the pri4_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI4_COS_QUEUE_ID \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the pri5_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI5_COS_QUEUE_ID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the pri6_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI6_COS_QUEUE_ID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the pri7_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI7_COS_QUEUE_ID \
+ UINT32_C(0x80)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint8_t port_id;
+ /*
+ * CoS Queue assigned to priority 0. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri0_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 1. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri1_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 2 This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri2_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 3. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri3_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 4. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri4_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 5. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri5_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 6. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri6_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 7. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri7_cos_queue_id;
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
+struct hwrm_queue_pri2cos_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_queue_cos2bw_qcfg *
+ **************************/
+
+
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
+struct hwrm_queue_cos2bw_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure TC BW assignment on this port.
+ */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
+struct hwrm_queue_cos2bw_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* ID of CoS Queue 0. */
+ uint8_t queue_id0;
+ uint8_t unused_0;
+ uint16_t unused_1;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id0_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id0_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id0_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id0_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id0_bw_weight;
+ /* ID of CoS Queue 1. */
+ uint8_t queue_id1;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id1_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id1_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id1_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id1_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id1_bw_weight;
+ /* ID of CoS Queue 2. */
+ uint8_t queue_id2;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id2_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id2_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id2_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id2_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id2_bw_weight;
+ /* ID of CoS Queue 3. */
+ uint8_t queue_id3;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id3_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id3_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id3_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id3_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id3_bw_weight;
+ /* ID of CoS Queue 4. */
+ uint8_t queue_id4;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id4_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id4_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id4_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id4_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id4_bw_weight;
+ /* ID of CoS Queue 5. */
+ uint8_t queue_id5;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id5_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id5_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id5_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id5_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id5_bw_weight;
+ /* ID of CoS Queue 6. */
+ uint8_t queue_id6;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id6_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id6_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id6_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id6_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id6_bw_weight;
+ /* ID of CoS Queue 7. */
+ uint8_t queue_id7;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id7_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id7_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id7_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id7_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id7_bw_weight;
+ uint8_t unused_2[4];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_queue_cos2bw_cfg *
+ *************************/
+
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
+struct hwrm_queue_cos2bw_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ uint32_t enables;
+ /*
+ * If this bit is set to 1, then all queue_id0 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID0_VALID \
+ UINT32_C(0x1)
+ /*
+ * If this bit is set to 1, then all queue_id1 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID1_VALID \
+ UINT32_C(0x2)
+ /*
+ * If this bit is set to 1, then all queue_id2 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID2_VALID \
+ UINT32_C(0x4)
+ /*
+ * If this bit is set to 1, then all queue_id3 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID3_VALID \
+ UINT32_C(0x8)
+ /*
+ * If this bit is set to 1, then all queue_id4 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID4_VALID \
+ UINT32_C(0x10)
+ /*
+ * If this bit is set to 1, then all queue_id5 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID5_VALID \
+ UINT32_C(0x20)
+ /*
+ * If this bit is set to 1, then all queue_id6 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID6_VALID \
+ UINT32_C(0x40)
+ /*
+ * If this bit is set to 1, then all queue_id7 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID7_VALID \
+ UINT32_C(0x80)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure TC BW assignment on this port.
+ */
+ uint16_t port_id;
+ /* ID of CoS Queue 0. */
+ uint8_t queue_id0;
+ uint8_t unused_0;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id0_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id0_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id0_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id0_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id0_bw_weight;
+ /* ID of CoS Queue 1. */
+ uint8_t queue_id1;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id1_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id1_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id1_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id1_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id1_bw_weight;
+ /* ID of CoS Queue 2. */
+ uint8_t queue_id2;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id2_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id2_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id2_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id2_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id2_bw_weight;
+ /* ID of CoS Queue 3. */
+ uint8_t queue_id3;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id3_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id3_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id3_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id3_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id3_bw_weight;
+ /* ID of CoS Queue 4. */
+ uint8_t queue_id4;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id4_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id4_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id4_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id4_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id4_bw_weight;
+ /* ID of CoS Queue 5. */
+ uint8_t queue_id5;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id5_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id5_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id5_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id5_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id5_bw_weight;
+ /* ID of CoS Queue 6. */
+ uint8_t queue_id6;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id6_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id6_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id6_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id6_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id6_bw_weight;
+ /* ID of CoS Queue 7. */
+ uint8_t queue_id7;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id7_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id7_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id7_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id7_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id7_bw_weight;
+ uint8_t unused_1[5];
+} __attribute__((packed));
+
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
+struct hwrm_queue_cos2bw_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_queue_dscp_qcaps *
+ *************************/
+
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
+struct hwrm_queue_dscp_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint8_t port_id;
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
+struct hwrm_queue_dscp_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The number of bits provided by the hardware for the DSCP value. */
+ uint8_t num_dscp_bits;
+ uint8_t unused_0;
+ /* Max number of DSCP-MASK-PRI entries supported. */
+ uint16_t max_entries;
+ uint8_t unused_1[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_queue_dscp2pri_qcfg *
+ ****************************/
+
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
+struct hwrm_queue_dscp2pri_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the 24-bits DSCP-MASK-PRI
+ * tuple(s) will be copied to.
+ */
+ uint64_t dest_data_addr;
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint8_t port_id;
+ uint8_t unused_0;
+ /* Size of the buffer pointed to by dest_data_addr. */
+ uint16_t dest_data_buffer_size;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * A count of the number of DSCP-MASK-PRI tuple(s) pointed to
+ * by the dest_data_addr.
+ */
+ uint16_t entry_cnt;
+ /*
+ * This is the default PRI which un-initialized DSCP values are
+ * mapped to.
+ */
+ uint8_t default_pri;
+ uint8_t unused_0[4];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_queue_dscp2pri_cfg *
+ ***************************/
+
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
+struct hwrm_queue_dscp2pri_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the 24-bits DSCP-MASK-PRI tuple
+ * will be copied from.
+ */
+ uint64_t src_data_addr;
+ uint32_t flags;
+ /* use_hw_default_pri is 1 b */
+ #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_FLAGS_USE_HW_DEFAULT_PRI \
+ UINT32_C(0x1)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the default_pri field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_ENABLES_DEFAULT_PRI \
+ UINT32_C(0x1)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint8_t port_id;
+ /*
+ * This is the default PRI which un-initialized DSCP values will be
+ * mapped to.
+ */
+ uint8_t default_pri;
+ /*
+ * A count of the number of DSCP-MASK-PRI tuple(s) in the data pointed
+ * to by src_data_addr.
+ */
+ uint16_t entry_cnt;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************
+ * hwrm_vnic_alloc *
+ *******************/
+
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
+struct hwrm_vnic_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When this bit is '1', this VNIC is requested to
+ * be the default VNIC for this function.
+ */
+ #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_vnic_alloc_output (size:128b/16B) */
+struct hwrm_vnic_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************
+ * hwrm_vnic_free *
+ ******************/
+
+
+/* hwrm_vnic_free_input (size:192b/24B) */
+struct hwrm_vnic_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_vnic_free_output (size:128b/16B) */
+struct hwrm_vnic_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************
+ * hwrm_vnic_cfg *
+ *****************/
+
+
+/* hwrm_vnic_cfg_input (size:320b/40B) */
+struct hwrm_vnic_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is requested to
+ * be the default VNIC for the function.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is being configured to
+ * strip VLAN in the RX path.
+ * If set to '0', then VLAN stripping is disabled on
+ * this VNIC.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is being configured to
+ * buffer receive packets in the hardware until the host
+ * posts new receive buffers.
+ * If set to '0', then bd_stall is being configured to be
+ * disabled on this VNIC.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is being configured to
+ * receive both RoCE and non-RoCE traffic.
+ * If set to '0', then this VNIC is not configured to be
+ * operating in dual VNIC mode.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE \
+ UINT32_C(0x8)
+ /*
+ * When this flag is set to '1', the VNIC is requested to
+ * be configured to receive only RoCE traffic.
+ * If this flag is set to '0', then this flag shall be
+ * ignored by the HWRM.
+ * If roce_dual_vnic_mode flag is set to '1'
+ * or roce_mirroring_capable_vnic_mode flag to 1,
+ * then the HWRM client shall not set this flag to '1'.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE \
+ UINT32_C(0x10)
+ /*
+ * When a VNIC uses one destination ring group for certain
+ * application (e.g. Receive Flow Steering) where
+ * exact match is used to direct packets to a VNIC with one
+ * destination ring group only, there is no need to configure
+ * RSS indirection table for that VNIC as only one destination
+ * ring group is used.
+ *
+ * This flag is used to enable a mode where
+ * RSS is enabled in the VNIC using a RSS context
+ * for computing RSS hash but the RSS indirection table is
+ * not configured using hwrm_vnic_rss_cfg.
+ *
+ * If this mode is enabled, then the driver should not program
+ * RSS indirection table for the RSS context that is used for
+ * computing RSS hash only.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the VNIC is being configured to
+ * receive both RoCE and non-RoCE traffic, but forward only the
+ * RoCE traffic further. Also, RoCE traffic can be mirrored to
+ * L2 driver.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \
+ UINT32_C(0x40)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the dflt_ring_grp field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the rss_rule field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the cos_rule field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the lb_rule field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the mru field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the default_rx_ring_id field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the default_cmpl_ring_id field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID \
+ UINT32_C(0x40)
+ /* Logical vnic ID */
+ uint16_t vnic_id;
+ /*
+ * Default Completion ring for the VNIC. This ring will
+ * be chosen if packet does not match any RSS rules and if
+ * there is no COS rule.
+ */
+ uint16_t dflt_ring_grp;
+ /*
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * there is no RSS rule.
+ */
+ uint16_t rss_rule;
+ /*
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * there is no COS rule.
+ */
+ uint16_t cos_rule;
+ /*
+ * RSS ID for load balancing rule/table structure.
+ * 0xFF... (All Fs) if there is no LB rule.
+ */
+ uint16_t lb_rule;
+ /*
+ * The maximum receive unit of the vnic.
+ * Each vnic is associated with a function.
+ * The vnic mru value overwrites the mru setting of the
+ * associated function.
+ * The HWRM shall make sure that vnic mru does not exceed
+ * the mru of the port the function is associated with.
+ */
+ uint16_t mru;
+ /*
+ * Default Rx ring for the VNIC. This ring will
+ * be chosen if packet does not match any RSS rules.
+ * The aggregation ring associated with the Rx ring is
+ * implied based on the Rx ring specified when the
+ * aggregation ring was allocated.
+ */
+ uint16_t default_rx_ring_id;
+ /*
+ * Default completion ring for the VNIC. This ring will
+ * be chosen if packet does not match any RSS rules.
+ */
+ uint16_t default_cmpl_ring_id;
+} __attribute__((packed));
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
+struct hwrm_vnic_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************
+ * hwrm_vnic_qcfg *
+ ******************/
+
+
+/* hwrm_vnic_qcfg_input (size:256b/32B) */
+struct hwrm_vnic_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the vf_id_valid field to be
+ * configured.
+ */
+ #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+ /* ID of Virtual Function whose VNIC resource is being queried. */
+ uint16_t vf_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_vnic_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Default Completion ring for the VNIC. */
+ uint16_t dflt_ring_grp;
+ /*
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * there is no RSS rule.
+ */
+ uint16_t rss_rule;
+ /*
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * there is no COS rule.
+ */
+ uint16_t cos_rule;
+ /*
+ * RSS ID for load balancing rule/table structure.
+ * 0xFF... (All Fs) if there is no LB rule.
+ */
+ uint16_t lb_rule;
+ /* The maximum receive unit of the vnic. */
+ uint16_t mru;
+ uint8_t unused_0[2];
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is the default VNIC for
+ * the function.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * strip VLAN in the RX path.
+ * If set to '0', then VLAN stripping is disabled on
+ * this VNIC.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * buffer receive packets in the hardware until the host
+ * posts new receive buffers.
+ * If set to '0', then bd_stall is disabled on
+ * this VNIC.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * receive both RoCE and non-RoCE traffic.
+ * If set to '0', then this VNIC is not configured to
+ * operate in dual VNIC mode.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE \
+ UINT32_C(0x8)
+ /*
+ * When this flag is set to '1', the VNIC is configured to
+ * receive only RoCE traffic.
+ * When this flag is set to '0', the VNIC is not configured
+ * to receive only RoCE traffic.
+ * If roce_dual_vnic_mode flag and this flag both are set
+ * to '1', then it is an invalid configuration of the
+ * VNIC. The HWRM should not allow that type of
+ * mis-configuration by HWRM clients.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE \
+ UINT32_C(0x10)
+ /*
+ * When a VNIC uses one destination ring group for certain
+ * application (e.g. Receive Flow Steering) where
+ * exact match is used to direct packets to a VNIC with one
+ * destination ring group only, there is no need to configure
+ * RSS indirection table for that VNIC as only one destination
+ * ring group is used.
+ *
+ * When this bit is set to '1', then the VNIC is enabled in a
+ * mode where RSS is enabled in the VNIC using a RSS context
+ * for computing RSS hash but the RSS indirection table is
+ * not configured.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * receive both RoCE and non-RoCE traffic, but forward only
+ * RoCE traffic further. Also RoCE traffic can be mirrored to
+ * L2 driver.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \
+ UINT32_C(0x40)
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************
+ * hwrm_vnic_qcaps *
+ *******************/
+
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
+struct hwrm_vnic_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
+struct hwrm_vnic_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The maximum receive unit that is settable on a vnic. */
+ uint16_t mru;
+ uint8_t unused_0[2];
+ uint32_t flags;
+ /* Unused. */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_UNUSED \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the capability of stripping VLAN in
+ * the RX path is supported on VNIC(s).
+ * If set to '0', then VLAN stripping capability is
+ * not supported on VNIC(s).
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the capability to buffer receive
+ * packets in the hardware until the host posts new receive buffers
+ * is supported on VNIC(s).
+ * If set to '0', then bd_stall capability is not supported
+ * on VNIC(s).
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_BD_STALL_CAP \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the capability to
+ * receive both RoCE and non-RoCE traffic on VNIC(s) is
+ * supported.
+ * If set to '0', then the capability to receive
+ * both RoCE and non-RoCE traffic on VNIC(s) is
+ * not supported.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_DUAL_VNIC_CAP \
+ UINT32_C(0x8)
+ /*
+ * When this bit is set to '1', the capability to configure
+ * a VNIC to receive only RoCE traffic is supported.
+ * When this flag is set to '0', the VNIC capability to
+ * configure to receive only RoCE traffic is not supported.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_ONLY_VNIC_CAP \
+ UINT32_C(0x10)
+ /*
+ * When this bit is set to '1', then the capability to enable
+ * a VNIC in a mode where RSS context without configuring
+ * RSS indirection table is supported (for RSS hash computation).
+ * When this bit is set to '0', then a VNIC can not be configured
+ * with a mode to enable RSS context without configuring RSS
+ * indirection table.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_DFLT_CR_CAP \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the capability to
+ * mirror the the RoCE traffic is supported.
+ * If set to '0', then the capability to mirror the
+ * RoCE traffic is not supported.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1', the outermost RSS hashing capability
+ * is supported. If set to '0', then the outermost RSS hashing
+ * capability is not supported.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP \
+ UINT32_C(0x80)
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_vnic_tpa_cfg *
+ *********************/
+
+
+/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
+struct hwrm_vnic_tpa_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) of
+ * non-tunneled TCP packets.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) of
+ * tunneled TCP packets.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Windows Receive Segment Coalescing (RSC) rules.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Linux Generic Receive Offload (GRO) rules.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for TCP
+ * packets with IP ECN set to non-zero.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for
+ * GRE tunneled TCP packets only if all packets have the
+ * same GRE sequence.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP/IPv4 packets with consecutively increasing IPIDs.
+ * In other words, the last packet that is being
+ * aggregated to an already existing aggregation context
+ * shall have IPID 1 more than the IPID of the last packet
+ * that was aggregated in that aggregation context.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP packets with the same TTL (IPv4) or Hop limit (IPv6)
+ * value.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK \
+ UINT32_C(0x80)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the max_agg_segs field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the max_aggs field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the max_agg_timer field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the min_agg_len field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
+ /* Logical vnic ID */
+ uint16_t vnic_id;
+ /*
+ * This is the maximum number of TCP segments that can
+ * be aggregated (unit is Log2). Max value is 31.
+ */
+ uint16_t max_agg_segs;
+ /* 1 segment */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
+ /* 2 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
+ /* 4 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
+ /* 8 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
+ /* Any segment size larger than this is not valid */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_LAST \
+ HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX
+ /*
+ * This is the maximum number of aggregations this VNIC is
+ * allowed (unit is Log2). Max value is 7
+ */
+ uint16_t max_aggs;
+ /* 1 aggregation */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0)
+ /* 2 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1)
+ /* 4 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2)
+ /* 8 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3)
+ /* 16 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4)
+ /* Any aggregation size larger than this is not valid */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_LAST \
+ HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX
+ uint8_t unused_0[2];
+ /*
+ * This is the maximum amount of time allowed for
+ * an aggregation context to complete after it was initiated.
+ */
+ uint32_t max_agg_timer;
+ /*
+ * This is the minimum amount of payload length required to
+ * start an aggregation context.
+ */
+ uint32_t min_agg_len;
+} __attribute__((packed));
+
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
+struct hwrm_vnic_tpa_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_vnic_tpa_qcfg *
+ **********************/
+
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical vnic ID */
+ uint16_t vnic_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) of
+ * non-tunneled TCP packets.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_TPA \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) of
+ * tunneled TCP packets.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_ENCAP_TPA \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Windows Receive Segment Coalescing (RSC) rules.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_RSC_WND_UPDATE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Linux Generic Receive Offload (GRO) rules.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for TCP
+ * packets with IP ECN set to non-zero.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_ECN \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for
+ * GRE tunneled TCP packets only if all packets have the
+ * same GRE sequence.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP/IPv4 packets with consecutively increasing IPIDs.
+ * In other words, the last packet that is being
+ * aggregated to an already existing aggregation context
+ * shall have IPID 1 more than the IPID of the last packet
+ * that was aggregated in that aggregation context.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_IPID_CHECK \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP packets with the same TTL (IPv4) or Hop limit (IPv6)
+ * value.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_TTL_CHECK \
+ UINT32_C(0x80)
+ /*
+ * This is the maximum number of TCP segments that can
+ * be aggregated (unit is Log2). Max value is 31.
+ */
+ uint16_t max_agg_segs;
+ /* 1 segment */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
+ /* 2 segments */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
+ /* 4 segments */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
+ /* 8 segments */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
+ /* Any segment size larger than this is not valid */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_LAST \
+ HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX
+ /*
+ * This is the maximum number of aggregations this VNIC is
+ * allowed (unit is Log2). Max value is 7
+ */
+ uint16_t max_aggs;
+ /* 1 aggregation */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_1 UINT32_C(0x0)
+ /* 2 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_2 UINT32_C(0x1)
+ /* 4 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_4 UINT32_C(0x2)
+ /* 8 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_8 UINT32_C(0x3)
+ /* 16 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_16 UINT32_C(0x4)
+ /* Any aggregation size larger than this is not valid */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX UINT32_C(0x7)
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_LAST \
+ HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX
+ /*
+ * This is the maximum amount of time allowed for
+ * an aggregation context to complete after it was initiated.
+ */
+ uint32_t max_agg_timer;
+ /*
+ * This is the minimum amount of payload length required to
+ * start an aggregation context.
+ */
+ uint32_t min_agg_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_vnic_rss_cfg *
+ *********************/
+
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
+struct hwrm_vnic_rss_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t hash_type;
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv4
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of TCP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of UDP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv6
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of TCP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of UDP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20)
+ /* VNIC ID of VNIC associated with RSS table being configured. */
+ uint16_t vnic_id;
+ /*
+ * Specifies which VNIC ring table pair to configure.
+ * Valid values range from 0 to 7.
+ */
+ uint8_t ring_table_pair_index;
+ /* Flags to specify different RSS hash modes. */
+ uint8_t hash_mode_flags;
+ /*
+ * When this bit is '1', it indicates using current RSS
+ * hash mode setting configured in the device.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 4 tuples {l3.src, l3.dest,
+ * l4.src, l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4 \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest,
+ * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 \
+ UINT32_C(0x10)
+ /* This is the address for rss ring group table */
+ uint64_t ring_grp_tbl_addr;
+ /* This is the address for rss hash key table */
+ uint64_t hash_key_tbl_addr;
+ /* Index to the rss indirection table. */
+ uint16_t rss_ctx_idx;
+ uint8_t unused_1[6];
+} __attribute__((packed));
+
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
+struct hwrm_vnic_rss_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_vnic_rss_qcfg *
+ **********************/
+
+
+/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_rss_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Index to the rss indirection table. */
+ uint16_t rss_ctx_idx;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
+struct hwrm_vnic_rss_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t hash_type;
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv4
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV4 UINT32_C(0x1)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of TCP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of UDP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv6
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV6 UINT32_C(0x8)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of TCP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of UDP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20)
+ uint8_t unused_0[4];
+ /* This is the value of rss hash key */
+ uint32_t hash_key[10];
+ /* Flags to specify different RSS hash modes. */
+ uint8_t hash_mode_flags;
+ /*
+ * When this bit is '1', it indicates using current RSS
+ * hash mode setting configured in the device.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_DEFAULT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 4 tuples {l3.src, l3.dest,
+ * l4.src, l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_4 \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_2 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest,
+ * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_4 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_2 \
+ UINT32_C(0x10)
+ uint8_t unused_1[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_vnic_plcmodes_cfg *
+ **************************/
+
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
+struct hwrm_vnic_plcmodes_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC shall be configured to
+ * use regular placement algorithm.
+ * By default, the regular placement algorithm shall be
+ * enabled on the VNIC.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC shall be configured
+ * use the jumbo placement algorithm.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for IPv4 packets according
+ * to the following rules:
+ * # If the packet is identified as TCP/IPv4, then the
+ * packet is split at the beginning of the TCP payload.
+ * # If the packet is identified as UDP/IPv4, then the
+ * packet is split at the beginning of UDP payload.
+ * # If the packet is identified as non-TCP and non-UDP
+ * IPv4 packet, then the packet is split at the beginning
+ * of the upper layer protocol header carried in the IPv4
+ * packet.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for IPv6 packets according
+ * to the following rules:
+ * # If the packet is identified as TCP/IPv6, then the
+ * packet is split at the beginning of the TCP payload.
+ * # If the packet is identified as UDP/IPv6, then the
+ * packet is split at the beginning of UDP payload.
+ * # If the packet is identified as non-TCP and non-UDP
+ * IPv6 packet, then the packet is split at the beginning
+ * of the upper layer protocol header carried in the IPv6
+ * packet.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for FCoE packets at the
+ * beginning of FC payload.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for RoCE packets at the
+ * beginning of RoCE payload (after BTH/GRH headers).
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE \
+ UINT32_C(0x20)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the jumbo_thresh_valid field to be
+ * configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the hds_offset_valid field to be
+ * configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the hds_threshold_valid field to be
+ * configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \
+ UINT32_C(0x4)
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+ /*
+ * When jumbo placement algorithm is enabled, this value
+ * is used to determine the threshold for jumbo placement.
+ * Packets with length larger than this value will be
+ * placed according to the jumbo placement algorithm.
+ */
+ uint16_t jumbo_thresh;
+ /*
+ * This value is used to determine the offset into
+ * packet buffer where the split data (payload) will be
+ * placed according to one of of HDS placement algorithm.
+ *
+ * The lengths of packet buffers provided for split data
+ * shall be larger than this value.
+ */
+ uint16_t hds_offset;
+ /*
+ * When one of the HDS placement algorithm is enabled, this
+ * value is used to determine the threshold for HDS
+ * placement.
+ * Packets with length larger than this value will be
+ * placed according to the HDS placement algorithm.
+ * This value shall be in multiple of 4 bytes.
+ */
+ uint16_t hds_threshold;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
+struct hwrm_vnic_plcmodes_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_vnic_plcmodes_qcfg *
+ ***************************/
+
+
+/* hwrm_vnic_plcmodes_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_plcmodes_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */
+struct hwrm_vnic_plcmodes_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * use regular placement algorithm.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * use the jumbo placement algorithm.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for IPv4 packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for IPv6 packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for FCoE packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for RoCE packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the VNIC is configured
+ * to be the default VNIC of the requesting function.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC \
+ UINT32_C(0x40)
+ /*
+ * When jumbo placement algorithm is enabled, this value
+ * is used to determine the threshold for jumbo placement.
+ * Packets with length larger than this value will be
+ * placed according to the jumbo placement algorithm.
+ */
+ uint16_t jumbo_thresh;
+ /*
+ * This value is used to determine the offset into
+ * packet buffer where the split data (payload) will be
+ * placed according to one of of HDS placement algorithm.
+ *
+ * The lengths of packet buffers provided for split data
+ * shall be larger than this value.
+ */
+ uint16_t hds_offset;
+ /*
+ * When one of the HDS placement algorithm is enabled, this
+ * value is used to determine the threshold for HDS
+ * placement.
+ * Packets with length larger than this value will be
+ * placed according to the HDS placement algorithm.
+ * This value shall be in multiple of 4 bytes.
+ */
+ uint16_t hds_threshold;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************************
+ * hwrm_vnic_rss_cos_lb_ctx_alloc *
+ **********************************/
+
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* rss_cos_lb_ctx_id is 16 b */
+ uint16_t rss_cos_lb_ctx_id;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************************
+ * hwrm_vnic_rss_cos_lb_ctx_free *
+ *********************************/
+
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* rss_cos_lb_ctx_id is 16 b */
+ uint16_t rss_cos_lb_ctx_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************
+ * hwrm_ring_alloc *
+ *******************/
+
+
+/* hwrm_ring_alloc_input (size:640b/80B) */
+struct hwrm_ring_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the ring_arb_cfg field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the stat_ctx_id_valid field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the max_bw_valid field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the rx_ring_id field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the nq_ring_id field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the rx_buf_size field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID \
+ UINT32_C(0x100)
+ /* Ring Type. */
+ uint8_t ring_type;
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
+ /* RX Aggregation Ring */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG UINT32_C(0x4)
+ /* Notification Queue */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ UINT32_C(0x5)
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_LAST \
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ
+ uint8_t unused_0[3];
+ /*
+ * This value is a pointer to the page table for the
+ * Ring.
+ */
+ uint64_t page_tbl_addr;
+ /* First Byte Offset of the first entry in the first page. */
+ uint32_t fbo;
+ /*
+ * Actual page size in 2^page_size. The supported range is increments
+ * in powers of 2 from 16 bytes to 1GB.
+ * - 4 = 16 B
+ * Page size is 16 B.
+ * - 12 = 4 KB
+ * Page size is 4 KB.
+ * - 13 = 8 KB
+ * Page size is 8 KB.
+ * - 16 = 64 KB
+ * Page size is 64 KB.
+ * - 21 = 2 MB
+ * Page size is 2 MB.
+ * - 22 = 4 MB
+ * Page size is 4 MB.
+ * - 30 = 1 GB
+ * Page size is 1 GB.
+ */
+ uint8_t page_size;
+ /*
+ * This value indicates the depth of page table.
+ * For this version of the specification, value other than 0 or
+ * 1 shall be considered as an invalid value.
+ * When the page_tbl_depth = 0, then it is treated as a
+ * special case with the following.
+ * 1. FBO and page size fields are not valid.
+ * 2. page_tbl_addr is the physical address of the first
+ * element of the ring.
+ */
+ uint8_t page_tbl_depth;
+ uint8_t unused_1[2];
+ /*
+ * Number of 16B units in the ring. Minimum size for
+ * a ring is 16 16B entries.
+ */
+ uint32_t length;
+ /*
+ * Logical ring number for the ring to be allocated.
+ * This value determines the position in the doorbell
+ * area where the update to the ring will be made.
+ *
+ * For completion rings, this value is also the MSI-X
+ * vector number for the function the completion ring is
+ * associated with.
+ */
+ uint16_t logical_id;
+ /*
+ * This field is used only when ring_type is a TX ring.
+ * This value indicates what completion ring the TX ring
+ * is associated with.
+ */
+ uint16_t cmpl_ring_id;
+ /*
+ * This field is used only when ring_type is a TX ring.
+ * This value indicates what CoS queue the TX ring
+ * is associated with.
+ */
+ uint16_t queue_id;
+ /*
+ * When allocating a Rx ring or Rx aggregation ring, this field
+ * specifies the size of the buffer descriptors posted to the ring.
+ */
+ uint16_t rx_buf_size;
+ /*
+ * When allocating an Rx aggregation ring, this field
+ * specifies the associated Rx ring ID.
+ */
+ uint16_t rx_ring_id;
+ /*
+ * When allocating a completion ring, this field
+ * specifies the associated NQ ring ID.
+ */
+ uint16_t nq_ring_id;
+ /*
+ * This field is used only when ring_type is a TX ring.
+ * This field is used to configure arbitration related
+ * parameters for a TX ring.
+ */
+ uint16_t ring_arb_cfg;
+ /* Arbitration policy used for the ring. */
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK \
+ UINT32_C(0xf)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0
+ /*
+ * Use strict priority for the TX ring.
+ * Priority value is specified in arb_policy_param
+ */
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \
+ UINT32_C(0x1)
+ /*
+ * Use weighted fair queue arbitration for the TX ring.
+ * Weight is specified in arb_policy_param
+ */
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \
+ UINT32_C(0x2)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \
+ HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ
+ /* Reserved field. */
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_SFT 4
+ /*
+ * Arbitration policy specific parameter.
+ * # For strict priority arbitration policy, this field
+ * represents a priority value. If set to 0, then the priority
+ * is not specified and the HWRM is allowed to select
+ * any priority for this TX ring.
+ * # For weighted fair queue arbitration policy, this field
+ * represents a weight value. If set to 0, then the weight
+ * is not specified and the HWRM is allowed to select
+ * any weight for this TX ring.
+ */
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \
+ UINT32_C(0xff00)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ uint16_t unused_3;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint32_t reserved3;
+ /*
+ * This field is used only when ring_type is a TX ring.
+ * This input indicates what statistics context this ring
+ * should be associated with.
+ */
+ uint32_t stat_ctx_id;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint32_t reserved4;
+ /*
+ * This field is used only when ring_type is a TX ring
+ * to specify maximum BW allocated to the TX ring.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this ring inside the device.
+ */
+ uint32_t max_bw;
+ /* The bandwidth value. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_LAST \
+ HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * This field is used only when ring_type is a Completion ring.
+ * This value indicates what interrupt mode should be used
+ * on this completion ring.
+ * Note: In the legacy interrupt mode, no more than 16
+ * completion rings are allowed.
+ */
+ uint8_t int_mode;
+ /* Legacy INTA */
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY UINT32_C(0x0)
+ /* Reserved */
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD UINT32_C(0x1)
+ /* MSI-X */
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX UINT32_C(0x2)
+ /* No Interrupt - Polled mode */
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL UINT32_C(0x3)
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_LAST \
+ HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
+ uint8_t unused_4[3];
+} __attribute__((packed));
+
+/* hwrm_ring_alloc_output (size:128b/16B) */
+struct hwrm_ring_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Physical number of ring allocated.
+ * This value shall be unique for a ring type.
+ */
+ uint16_t ring_id;
+ /* Logical number of ring allocated. */
+ uint16_t logical_ring_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************
+ * hwrm_ring_free *
+ ******************/
+
+
+/* hwrm_ring_free_input (size:192b/24B) */
+struct hwrm_ring_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Ring Type. */
+ uint8_t ring_type;
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
+ /* RX Aggregation Ring */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG UINT32_C(0x4)
+ /* Notification Queue */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_NQ UINT32_C(0x5)
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_LAST \
+ HWRM_RING_FREE_INPUT_RING_TYPE_NQ
+ uint8_t unused_0;
+ /* Physical number of ring allocated. */
+ uint16_t ring_id;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_ring_free_output (size:128b/16B) */
+struct hwrm_ring_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************************
+ * hwrm_ring_cmpl_ring_qaggint_params *
+ **************************************/
+
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Physical number of completion ring. */
+ uint16_t ring_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint16_t flags;
+ /*
+ * When this bit is set to '1', interrupt max
+ * timer is reset whenever a completion is received.
+ */
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS_OUTPUT_FLAGS_TIMER_RESET \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', ring idle mode
+ * aggregation will be enabled.
+ */
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS_OUTPUT_FLAGS_RING_IDLE \
+ UINT32_C(0x2)
+ /*
+ * Number of completions to aggregate before DMA
+ * during the normal mode.
+ */
+ uint16_t num_cmpl_dma_aggr;
+ /*
+ * Number of completions to aggregate before DMA
+ * during the interrupt mode.
+ */
+ uint16_t num_cmpl_dma_aggr_during_int;
+ /*
+ * Timer in unit of 80-nsec used to aggregate completions before
+ * DMA during the normal mode (not in interrupt mode).
+ */
+ uint16_t cmpl_aggr_dma_tmr;
+ /*
+ * Timer in unit of 80-nsec used to aggregate completions before
+ * DMA during the interrupt mode.
+ */
+ uint16_t cmpl_aggr_dma_tmr_during_int;
+ /* Minimum time (in unit of 80-nsec) between two interrupts. */
+ uint16_t int_lat_tmr_min;
+ /*
+ * Maximum wait time (in unit of 80-nsec) spent aggregating
+ * completions before signaling the interrupt after the
+ * interrupt is enabled.
+ */
+ uint16_t int_lat_tmr_max;
+ /*
+ * Minimum number of completions aggregated before signaling
+ * an interrupt.
+ */
+ uint16_t num_cmpl_aggr_int;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************************************
+ * hwrm_ring_cmpl_ring_cfg_aggint_params *
+ *****************************************/
+
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Physical number of completion ring. */
+ uint16_t ring_id;
+ uint16_t flags;
+ /*
+ * When this bit is set to '1', interrupt latency max
+ * timer is reset whenever a completion is received.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', ring idle mode
+ * aggregation will be enabled.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE \
+ UINT32_C(0x2)
+ /*
+ * Set this flag to 1 when configuring parameters on a
+ * notification queue. Set this flag to 0 when configuring
+ * parameters on a completion queue.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_IS_NQ \
+ UINT32_C(0x4)
+ /*
+ * Number of completions to aggregate before DMA
+ * during the normal mode.
+ */
+ uint16_t num_cmpl_dma_aggr;
+ /*
+ * Number of completions to aggregate before DMA
+ * during the interrupt mode.
+ */
+ uint16_t num_cmpl_dma_aggr_during_int;
+ /*
+ * Timer in unit of 80-nsec used to aggregate completions before
+ * DMA during the normal mode (not in interrupt mode).
+ */
+ uint16_t cmpl_aggr_dma_tmr;
+ /*
+ * Timer in unit of 80-nsec used to aggregate completions before
+ * DMA during the interrupt mode.
+ */
+ uint16_t cmpl_aggr_dma_tmr_during_int;
+ /* Minimum time (in unit of 80-nsec) between two interrupts. */
+ uint16_t int_lat_tmr_min;
+ /*
+ * Maximum wait time (in unit of 80-nsec) spent aggregating
+ * cmpls before signaling the interrupt after the
+ * interrupt is enabled.
+ */
+ uint16_t int_lat_tmr_max;
+ /*
+ * Minimum number of completions aggregated before signaling
+ * an interrupt.
+ */
+ uint16_t num_cmpl_aggr_int;
+ /*
+ * Bitfield that indicates which parameters are to be applied. Only
+ * required when configuring devices with notification queues, and
+ * used in that case to set certain parameters on completion queues
+ * and others on notification queues.
+ */
+ uint16_t enables;
+ /*
+ * This bit must be '1' for the num_cmpl_dma_aggr field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the num_cmpl_dma_aggr_during_int field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the cmpl_aggr_dma_tmr field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the int_lat_tmr_min field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_INT_LAT_TMR_MIN \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the int_lat_tmr_max field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_INT_LAT_TMR_MAX \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the num_cmpl_aggr_int field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_AGGR_INT \
+ UINT32_C(0x20)
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************
+ * hwrm_ring_reset *
+ *******************/
+
+
+/* hwrm_ring_reset_input (size:192b/24B) */
+struct hwrm_ring_reset_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Ring Type. */
+ uint8_t ring_type;
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_LAST \
+ HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL
+ uint8_t unused_0;
+ /* Physical number of the ring. */
+ uint16_t ring_id;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_ring_reset_output (size:128b/16B) */
+struct hwrm_ring_reset_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_ring_grp_alloc *
+ ***********************/
+
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
+struct hwrm_ring_grp_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value identifies the CR associated with the ring
+ * group.
+ */
+ uint16_t cr;
+ /*
+ * This value identifies the main RR associated with the ring
+ * group.
+ */
+ uint16_t rr;
+ /*
+ * This value identifies the aggregation RR associated with
+ * the ring group. If this value is 0xFF... (All Fs), then no
+ * Aggregation ring will be set.
+ */
+ uint16_t ar;
+ /*
+ * This value identifies the statistics context associated
+ * with the ring group.
+ */
+ uint16_t sc;
+} __attribute__((packed));
+
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
+struct hwrm_ring_grp_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This is the ring group ID value. Use this value to program
+ * the default ring group for the VNIC or as table entries
+ * in an RSS/COS context.
+ */
+ uint32_t ring_group_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_ring_grp_free *
+ **********************/
+
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
+struct hwrm_ring_grp_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This is the ring group ID value. */
+ uint32_t ring_group_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_ring_grp_free_output (size:128b/16B) */
+struct hwrm_ring_grp_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_cfa_l2_filter_alloc *
+ ****************************/
+
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
+struct hwrm_cfa_l2_filter_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH \
+ UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
+ /* Setting of this flag indicates the applicability to the loopback path. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
+ UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP \
+ UINT32_C(0x4)
+ /*
+ * If this flag is set, all t_l2_* fields are invalid
+ * and they should not be specified.
+ * If this flag is set, then l2_* fields refer to
+ * fields of outermost L2 header.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST \
+ UINT32_C(0x8)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the l2_addr field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the l2_addr_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the l2_ovlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the l2_ovlan_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the l2_ivlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the l2_ivlan_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the t_l2_addr field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the t_l2_addr_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the t_l2_ovlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the t_l2_ovlan_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the t_l2_ivlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the t_l2_ivlan_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the src_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the src_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x10000)
+ /*
+ * This value sets the match value for the L2 MAC address.
+ * Destination MAC address for RX path.
+ * Source MAC address for TX path.
+ */
+ uint8_t l2_addr[6];
+ uint8_t unused_0[2];
+ /*
+ * This value sets the mask value for the L2 address.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
+ */
+ uint8_t l2_addr_mask[6];
+ /* This value sets VLAN ID value for outer VLAN. */
+ uint16_t l2_ovlan;
+ /*
+ * This value sets the mask value for the ovlan id.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
+ */
+ uint16_t l2_ovlan_mask;
+ /* This value sets VLAN ID value for inner VLAN. */
+ uint16_t l2_ivlan;
+ /*
+ * This value sets the mask value for the ivlan id.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
+ */
+ uint16_t l2_ivlan_mask;
+ uint8_t unused_1[2];
+ /*
+ * This value sets the match value for the tunnel
+ * L2 MAC address.
+ * Destination MAC address for RX path.
+ * Source MAC address for TX path.
+ */
+ uint8_t t_l2_addr[6];
+ uint8_t unused_2[2];
+ /*
+ * This value sets the mask value for the tunnel L2
+ * address.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
+ */
+ uint8_t t_l2_addr_mask[6];
+ /* This value sets VLAN ID value for tunnel outer VLAN. */
+ uint16_t t_l2_ovlan;
+ /*
+ * This value sets the mask value for the tunnel ovlan id.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
+ */
+ uint16_t t_l2_ovlan_mask;
+ /* This value sets VLAN ID value for tunnel inner VLAN. */
+ uint16_t t_l2_ivlan;
+ /*
+ * This value sets the mask value for the tunnel ivlan id.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
+ */
+ uint16_t t_l2_ivlan_mask;
+ /* This value identifies the type of source of the packet. */
+ uint8_t src_type;
+ /* Network port */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT UINT32_C(0x0)
+ /* Physical function */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF UINT32_C(0x1)
+ /* Virtual function */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF UINT32_C(0x2)
+ /* Virtual NIC of a function */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC UINT32_C(0x3)
+ /* Embedded processor for CFA management */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG UINT32_C(0x4)
+ /* Embedded processor for OOB management */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE UINT32_C(0x5)
+ /* Embedded processor for RoCE */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO UINT32_C(0x6)
+ /* Embedded processor for network proxy functions */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG UINT32_C(0x7)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_LAST \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG
+ uint8_t unused_3;
+ /*
+ * This value is the id of the source.
+ * For a network port, it represents port_id.
+ * For a physical function, it represents fid.
+ * For a virtual function, it represents vf_id.
+ * For a vnic, it represents vnic_id.
+ * For embedded processors, this id is not valid.
+ *
+ * Notes:
+ * 1. The function ID is implied if it src_id is
+ * not provided for a src_type that is either
+ */
+ uint32_t src_id;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_4;
+ /*
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
+ */
+ uint16_t dst_id;
+ /*
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint16_t mirror_vnic_id;
+ /*
+ * This hint is provided to help in placing
+ * the filter in the filter table.
+ */
+ uint8_t pri_hint;
+ /* No preference */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \
+ UINT32_C(0x0)
+ /* Above the given filter */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER \
+ UINT32_C(0x1)
+ /* Below the given filter */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \
+ UINT32_C(0x2)
+ /* As high as possible */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX \
+ UINT32_C(0x3)
+ /* As low as possible */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN \
+ UINT32_C(0x4)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_LAST \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN
+ uint8_t unused_5;
+ uint32_t unused_6;
+ /*
+ * This is the ID of the filter that goes along with
+ * the pri_hint.
+ *
+ * This field is valid only for the following values.
+ * 1 - Above the given filter
+ * 2 - Below the given filter
+ */
+ uint64_t l2_filter_id_hint;
+} __attribute__((packed));
+
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
+ */
+ uint64_t l2_filter_id;
+ /*
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
+ */
+ uint32_t flow_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_cfa_l2_filter_free *
+ ***************************/
+
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_l2_filter_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
+ */
+ uint64_t l2_filter_id;
+} __attribute__((packed));
+
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_cfa_l2_filter_cfg *
+ **************************/
+
+
+/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
+struct hwrm_cfa_l2_filter_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP UINT32_C(0x2)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the new_mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
+ UINT32_C(0x2)
+ /*
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
+ */
+ uint64_t l2_filter_id;
+ /*
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
+ */
+ uint32_t dst_id;
+ /*
+ * New Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint32_t new_mirror_vnic_id;
+} __attribute__((packed));
+
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_cfa_l2_set_rx_mask *
+ ***************************/
+
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VNIC ID */
+ uint32_t vnic_id;
+ uint32_t mask;
+ /*
+ * When this bit is '1', the function is requested to accept
+ * multi-cast packets specified by the multicast addr table.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the function is requested to accept
+ * all multi-cast packets.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the function is requested to accept
+ * broadcast packets.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the function is requested to be
+ * put in the promiscuous mode.
+ *
+ * The HWRM should accept any function to set up
+ * promiscuous mode.
+ *
+ * The HWRM shall follow the semantics below for the
+ * promiscuous mode support.
+ * # When partitioning is not enabled on a port
+ * (i.e. single PF on the port), then the PF shall
+ * be allowed to be in the promiscuous mode. When the
+ * PF is in the promiscuous mode, then it shall
+ * receive all host bound traffic on that port.
+ * # When partitioning is enabled on a port
+ * (i.e. multiple PFs per port) and a PF on that
+ * port is in the promiscuous mode, then the PF
+ * receives all traffic within that partition as
+ * identified by a unique identifier for the
+ * PF (e.g. S-Tag). If a unique outer VLAN
+ * for the PF is specified, then the setting of
+ * promiscuous mode on that PF shall result in the
+ * PF receiving all host bound traffic with matching
+ * outer VLAN.
+ * # A VF shall can be set in the promiscuous mode.
+ * In the promiscuous mode, the VF does not receive any
+ * traffic unless a unique outer VLAN for the
+ * VF is specified. If a unique outer VLAN
+ * for the VF is specified, then the setting of
+ * promiscuous mode on that VF shall result in the
+ * VF receiving all host bound traffic with the
+ * matching outer VLAN.
+ * # The HWRM shall allow the setting of promiscuous
+ * mode on a function independently from the
+ * promiscuous mode settings on other functions.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS \
+ UINT32_C(0x10)
+ /*
+ * If this flag is set, the corresponding RX
+ * filters shall be set up to cover multicast/broadcast
+ * filters for the outermost Layer 2 destination MAC
+ * address field.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST \
+ UINT32_C(0x20)
+ /*
+ * If this flag is set, the corresponding RX
+ * filters shall be set up to cover multicast/broadcast
+ * filters for the VLAN-tagged packets that match the
+ * TPID and VID fields of VLAN tags in the VLAN tag
+ * table specified in this command.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY \
+ UINT32_C(0x40)
+ /*
+ * If this flag is set, the corresponding RX
+ * filters shall be set up to cover multicast/broadcast
+ * filters for non-VLAN tagged packets and VLAN-tagged
+ * packets that match the TPID and VID fields of VLAN
+ * tags in the VLAN tag table specified in this command.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN \
+ UINT32_C(0x80)
+ /*
+ * If this flag is set, the corresponding RX
+ * filters shall be set up to cover multicast/broadcast
+ * filters for non-VLAN tagged packets and VLAN-tagged
+ * packets matching any VLAN tag.
+ *
+ * If this flag is set, then the HWRM shall ignore
+ * VLAN tags specified in vlan_tag_tbl.
+ *
+ * If none of vlanonly, vlan_nonvlan, and anyvlan_nonvlan
+ * flags is set, then the HWRM shall ignore
+ * VLAN tags specified in vlan_tag_tbl.
+ *
+ * The HWRM client shall set at most one flag out of
+ * vlanonly, vlan_nonvlan, and anyvlan_nonvlan.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \
+ UINT32_C(0x100)
+ /* This is the address for mcast address tbl. */
+ uint64_t mc_tbl_addr;
+ /*
+ * This value indicates how many entries in mc_tbl are valid.
+ * Each entry is 6 bytes.
+ */
+ uint32_t num_mc_entries;
+ uint8_t unused_0[4];
+ /*
+ * This is the address for VLAN tag table.
+ * Each VLAN entry in the table is 4 bytes of a VLAN tag
+ * including TPID, PCP, DEI, and VID fields in network byte
+ * order.
+ */
+ uint64_t vlan_tag_tbl_addr;
+ /*
+ * This value indicates how many entries in vlan_tag_tbl are
+ * valid. Each entry is 4 bytes.
+ */
+ uint32_t num_vlan_tags;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
+struct hwrm_cfa_l2_set_rx_mask_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN \
+ UINT32_C(0x0)
+ /* Unable to complete operation due to conflict with Ntuple Filter */
+ #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR \
+ UINT32_C(0x1)
+ #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST \
+ HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_vlan_antispoof_cfg *
+ *******************************/
+
+
+/* hwrm_cfa_vlan_antispoof_cfg_input (size:256b/32B) */
+struct hwrm_cfa_vlan_antispoof_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being configured.
+ * Only valid for a VF FID configured by the PF.
+ */
+ uint16_t fid;
+ uint8_t unused_0[2];
+ /* Number of VLAN entries in the vlan_tag_mask_tbl. */
+ uint32_t num_vlan_entries;
+ /*
+ * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN
+ * antispoof table. Each table entry contains the 16-bit TPID
+ * (0x8100 or 0x88a8 only), 16-bit VLAN ID, and a 16-bit mask,
+ * all in network order to match hwrm_cfa_l2_set_rx_mask.
+ * For an individual VLAN entry, the mask value should be 0xfff
+ * for the 12-bit VLAN ID.
+ */
+ uint64_t vlan_tag_mask_tbl_addr;
+} __attribute__((packed));
+
+/* hwrm_cfa_vlan_antispoof_cfg_output (size:128b/16B) */
+struct hwrm_cfa_vlan_antispoof_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_vlan_antispoof_qcfg *
+ ********************************/
+
+
+/* hwrm_cfa_vlan_antispoof_qcfg_input (size:256b/32B) */
+struct hwrm_cfa_vlan_antispoof_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being queried.
+ * Only valid for a VF FID queried by the PF.
+ */
+ uint16_t fid;
+ uint8_t unused_0[2];
+ /*
+ * Maximum number of VLAN entries the firmware is allowed to DMA
+ * to vlan_tag_mask_tbl.
+ */
+ uint32_t max_vlan_entries;
+ /*
+ * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN
+ * antispoof table to which firmware will DMA to. Each table
+ * entry will contain the 16-bit TPID (0x8100 or 0x88a8 only),
+ * 16-bit VLAN ID, and a 16-bit mask, all in network order to
+ * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry,
+ * the mask value should be 0xfff for the 12-bit VLAN ID.
+ */
+ uint64_t vlan_tag_mask_tbl_addr;
+} __attribute__((packed));
+
+/* hwrm_cfa_vlan_antispoof_qcfg_output (size:128b/16B) */
+struct hwrm_cfa_vlan_antispoof_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of valid entries DMAd by firmware to vlan_tag_mask_tbl. */
+ uint32_t num_vlan_entries;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_tunnel_filter_alloc *
+ ********************************/
+
+
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* Setting of this flag indicates the applicability to the loopback path. */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
+ UINT32_C(0x1)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the l2_filter_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the l2_addr field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the l2_ivlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the l3_addr field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L3_ADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the l3_addr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L3_ADDR_TYPE \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the t_l3_addr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_T_L3_ADDR_TYPE \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the t_l3_addr field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_T_L3_ADDR \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the vni field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_VNI \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the dst_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_DST_VNIC_ID \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x400)
+ /*
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
+ */
+ uint64_t l2_filter_id;
+ /*
+ * This value sets the match value for the inner L2
+ * MAC address.
+ * Destination MAC address for RX path.
+ * Source MAC address for TX path.
+ */
+ uint8_t l2_addr[6];
+ /*
+ * This value sets VLAN ID value for inner VLAN.
+ * Only 12-bits of VLAN ID are used in setting the filter.
+ */
+ uint16_t l2_ivlan;
+ /*
+ * The value of inner destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t l3_addr[4];
+ /*
+ * The value of tunnel destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t t_l3_addr[4];
+ /*
+ * This value indicates the type of inner IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
+ */
+ uint8_t l3_addr_type;
+ /*
+ * This value indicates the type of tunnel IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
+ */
+ uint8_t t_l3_addr_type;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ /*
+ * tunnel_flags allows the user to indicate the tunnel tag detection
+ * for the tunnel type specified in tunnel_type.
+ */
+ uint8_t tunnel_flags;
+ /*
+ * If the tunnel_type is geneve, then this bit indicates if we
+ * need to match the geneve OAM packet.
+ * If the tunnel_type is nvgre or gre, then this bit indicates if
+ * we need to detect checksum present bit in geneve header.
+ * If the tunnel_type is mpls, then this bit indicates if we need
+ * to match mpls packet with explicit IPV4/IPV6 null header.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR \
+ UINT32_C(0x1)
+ /*
+ * If the tunnel_type is geneve, then this bit indicates if we
+ * need to detect the critical option bit set in the oam packet.
+ * If the tunnel_type is nvgre or gre, then this bit indicates
+ * if we need to match nvgre packets with key present bit set in
+ * gre header.
+ * If the tunnel_type is mpls, then this bit indicates if we
+ * need to match mpls packet with S bit from inner/second label.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 \
+ UINT32_C(0x2)
+ /*
+ * If the tunnel_type is geneve, then this bit indicates if we
+ * need to match geneve packet with extended header bit set in
+ * geneve header.
+ * If the tunnel_type is nvgre or gre, then this bit indicates
+ * if we need to match nvgre packets with sequence number
+ * present bit set in gre header.
+ * If the tunnel_type is mpls, then this bit indicates if we
+ * need to match mpls packet with S bit from out/first label.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 \
+ UINT32_C(0x4)
+ /*
+ * Virtual Network Identifier (VNI). Only valid with
+ * tunnel_types VXLAN, NVGRE, and Geneve.
+ * Only lower 24-bits of VNI field are used
+ * in setting up the filter.
+ */
+ uint32_t vni;
+ /* Logical VNIC ID of the destination VNIC. */
+ uint32_t dst_vnic_id;
+ /*
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint32_t mirror_vnic_id;
+} __attribute__((packed));
+
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t tunnel_filter_id;
+ /*
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
+ */
+ uint32_t flow_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_tunnel_filter_free *
+ *******************************/
+
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t tunnel_filter_id;
+} __attribute__((packed));
+
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************************
+ * hwrm_cfa_redirect_tunnel_type_alloc *
+ ***************************************/
+
+
+/* hwrm_cfa_redirect_tunnel_type_alloc_input (size:192b/24B) */
+struct hwrm_cfa_redirect_tunnel_type_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* The destination function id, to whom the traffic is redirected. */
+ uint16_t dest_fid;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ /* Tunnel alloc flags. */
+ uint8_t flags;
+ /* Setting of this flag indicates modify existing redirect tunnel to new destination function ID. */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_FLAGS_MODIFY_DST \
+ UINT32_C(0x1)
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_redirect_tunnel_type_alloc_output (size:128b/16B) */
+struct hwrm_cfa_redirect_tunnel_type_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************************
+ * hwrm_cfa_redirect_tunnel_type_free *
+ **************************************/
+
+
+/* hwrm_cfa_redirect_tunnel_type_free_input (size:192b/24B) */
+struct hwrm_cfa_redirect_tunnel_type_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* The destination function id, to whom the traffic is redirected. */
+ uint16_t dest_fid;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_0[5];
+} __attribute__((packed));
+
+/* hwrm_cfa_redirect_tunnel_type_free_output (size:128b/16B) */
+struct hwrm_cfa_redirect_tunnel_type_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************************
+ * hwrm_cfa_redirect_tunnel_type_info *
+ **************************************/
+
+
+/* hwrm_cfa_redirect_tunnel_type_info_input (size:192b/24B) */
+struct hwrm_cfa_redirect_tunnel_type_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* The source function id. */
+ uint16_t src_fid;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_0[5];
+} __attribute__((packed));
+
+/* hwrm_cfa_redirect_tunnel_type_info_output (size:128b/16B) */
+struct hwrm_cfa_redirect_tunnel_type_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The destination function id, to whom the traffic is redirected. */
+ uint16_t dest_fid;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+ /* IPv4 version and header length. */
+ uint8_t ver_hlen;
+ /* IPv4 header length */
+ #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK UINT32_C(0xf)
+ #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+ /* Version */
+ #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK UINT32_C(0xf0)
+ #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
+ /* IPv4 type of service. */
+ uint8_t tos;
+ /* IPv4 identification. */
+ uint16_t ip_id;
+ /* IPv4 flags and offset. */
+ uint16_t flags_frag_offset;
+ /* IPv4 TTL. */
+ uint8_t ttl;
+ /* IPv4 protocol. */
+ uint8_t protocol;
+ /* IPv4 source address. */
+ uint32_t src_ip_addr;
+ /* IPv4 destination address. */
+ uint32_t dest_ip_addr;
+} __attribute__((packed));
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+ /* IPv6 version, traffic class and flow label. */
+ uint32_t ver_tc_flow_label;
+ /* IPv6 version shift */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT \
+ UINT32_C(0x1c)
+ /* IPv6 version mask */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK \
+ UINT32_C(0xf0000000)
+ /* IPv6 TC shift */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT \
+ UINT32_C(0x14)
+ /* IPv6 TC mask */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK \
+ UINT32_C(0xff00000)
+ /* IPv6 flow label shift */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT \
+ UINT32_C(0x0)
+ /* IPv6 flow label mask */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK \
+ UINT32_C(0xfffff)
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST \
+ HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+ /* IPv6 payload length. */
+ uint16_t payload_len;
+ /* IPv6 next header. */
+ uint8_t next_hdr;
+ /* IPv6 TTL. */
+ uint8_t ttl;
+ /* IPv6 source address. */
+ uint32_t src_ip_addr[4];
+ /* IPv6 destination address. */
+ uint32_t dest_ip_addr[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+struct hwrm_cfa_encap_data_vxlan {
+ /* Source MAC address. */
+ uint8_t src_mac_addr[6];
+ /* reserved. */
+ uint16_t unused_0;
+ /* Destination MAC address. */
+ uint8_t dst_mac_addr[6];
+ /* Number of VLAN tags. */
+ uint8_t num_vlan_tags;
+ /* reserved. */
+ uint8_t unused_1;
+ /* Outer VLAN TPID. */
+ uint16_t ovlan_tpid;
+ /* Outer VLAN TCI. */
+ uint16_t ovlan_tci;
+ /* Inner VLAN TPID. */
+ uint16_t ivlan_tpid;
+ /* Inner VLAN TCI. */
+ uint16_t ivlan_tci;
+ /* L3 header fields. */
+ uint32_t l3[10];
+ /* IP version mask. */
+ #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_MASK UINT32_C(0xf)
+ /* IP version 4. */
+ #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 UINT32_C(0x4)
+ /* IP version 6. */
+ #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 UINT32_C(0x6)
+ #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_LAST \
+ HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+ /* UDP source port. */
+ uint16_t src_port;
+ /* UDP destination port. */
+ uint16_t dst_port;
+ /* VXLAN Network Identifier. */
+ uint32_t vni;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_encap_record_alloc *
+ *******************************/
+
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
+struct hwrm_cfa_encap_record_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* Setting of this flag indicates the applicability to the loopback path. */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_FLAGS_LOOPBACK \
+ UINT32_C(0x1)
+ /* Encapsulation Type. */
+ uint8_t encap_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) after inside Ethernet payload */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* VLAN */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VLAN \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE \
+ UINT32_C(0x8)
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_LAST \
+ HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE
+ uint8_t unused_0[3];
+ /* This value is encap data used for the given encap type. */
+ uint32_t encap_data[20];
+} __attribute__((packed));
+
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t encap_record_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_cfa_encap_record_free *
+ ******************************/
+
+
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
+struct hwrm_cfa_encap_record_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t encap_record_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_ntuple_filter_alloc *
+ ********************************/
+
+
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* Setting of this flag indicates the applicability to the loopback path. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
+ UINT32_C(0x1)
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP \
+ UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates that a meter is expected to be attached
+ * to this flow. This hint can be used when choosing the action record
+ * format required for the flow.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER \
+ UINT32_C(0x4)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the l2_filter_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the ethertype field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the src_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the ipaddr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the src_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the src_ipaddr_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the dst_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the dst_ipaddr_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the ip_protocol field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the src_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the src_port_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the dst_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the dst_port_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the pri_hint field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_PRI_HINT \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the ntuple_filter_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_NTUPLE_FILTER_ID \
+ UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x10000)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x20000)
+ /*
+ * This bit must be '1' for the dst_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \
+ UINT32_C(0x40000)
+ /*
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
+ */
+ uint64_t l2_filter_id;
+ /*
+ * This value indicates the source MAC address in
+ * the Ethernet header.
+ */
+ uint8_t src_macaddr[6];
+ /* This value indicates the ethertype in the Ethernet header. */
+ uint16_t ethertype;
+ /*
+ * This value indicates the type of IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
+ */
+ uint8_t ip_addr_type;
+ /* invalid */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \
+ UINT32_C(0x0)
+ /* IPv4 */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \
+ UINT32_C(0x4)
+ /* IPv6 */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
+ UINT32_C(0x6)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
+ /*
+ * The value of protocol filed in IP header.
+ * Applies to UDP and TCP traffic.
+ * 6 - TCP
+ * 17 - UDP
+ */
+ uint8_t ip_protocol;
+ /* invalid */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \
+ UINT32_C(0x0)
+ /* TCP */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \
+ UINT32_C(0x6)
+ /* UDP */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \
+ UINT32_C(0x11)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP
+ /*
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
+ */
+ uint16_t dst_id;
+ /*
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint16_t mirror_vnic_id;
+ /*
+ * This value indicates the tunnel type for this filter.
+ * If this field is not specified, then the filter shall
+ * apply to both non-tunneled and tunneled packets.
+ * If this field conflicts with the tunnel_type specified
+ * in the l2_filter_id, then the HWRM shall return an
+ * error for this command.
+ */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ /*
+ * This hint is provided to help in placing
+ * the filter in the filter table.
+ */
+ uint8_t pri_hint;
+ /* No preference */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \
+ UINT32_C(0x0)
+ /* Above the given filter */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE \
+ UINT32_C(0x1)
+ /* Below the given filter */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_BELOW \
+ UINT32_C(0x2)
+ /* As high as possible */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_HIGHEST \
+ UINT32_C(0x3)
+ /* As low as possible */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST \
+ UINT32_C(0x4)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST
+ /*
+ * The value of source IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t src_ipaddr[4];
+ /*
+ * The value of source IP address mask to be used in
+ * filtering.
+ * For IPv4, first four bytes represent the IP address mask.
+ */
+ uint32_t src_ipaddr_mask[4];
+ /*
+ * The value of destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t dst_ipaddr[4];
+ /*
+ * The value of destination IP address mask to be used in
+ * filtering.
+ * For IPv4, first four bytes represent the IP address mask.
+ */
+ uint32_t dst_ipaddr_mask[4];
+ /*
+ * The value of source port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t src_port;
+ /*
+ * The value of source port mask to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t src_port_mask;
+ /*
+ * The value of destination port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t dst_port;
+ /*
+ * The value of destination port mask to be used in
+ * filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t dst_port_mask;
+ /*
+ * This is the ID of the filter that goes along with
+ * the pri_hint.
+ */
+ uint64_t ntuple_filter_id_hint;
+} __attribute__((packed));
+
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t ntuple_filter_id;
+ /*
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
+ */
+ uint32_t flow_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN \
+ UINT32_C(0x0)
+ /* Unable to complete operation due to conflict with Rx Mask VLAN */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR \
+ UINT32_C(0x1)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_ntuple_filter_free *
+ *******************************/
+
+
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t ntuple_filter_id;
+} __attribute__((packed));
+
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_cfa_ntuple_filter_cfg *
+ ******************************/
+
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the new_dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_DST_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the new_mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the new_meter_instance_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \
+ UINT32_C(0x4)
+ uint8_t unused_0[4];
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t ntuple_filter_id;
+ /*
+ * If set, this value shall represent the new
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and new network port id of the destination port for
+ * the TX path.
+ */
+ uint32_t new_dst_id;
+ /*
+ * New Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint32_t new_mirror_vnic_id;
+ /*
+ * New meter to attach to the flow. Specifying the
+ * invalid instance ID is used to remove any existing
+ * meter from the flow.
+ */
+ uint16_t new_meter_instance_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID
+ uint8_t unused_1[6];
+} __attribute__((packed));
+
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_cfa_em_flow_alloc *
+ **************************/
+
+
+/* hwrm_cfa_em_flow_alloc_input (size:896b/112B) */
+struct hwrm_cfa_em_flow_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX
+ /*
+ * Setting of this flag indicates enabling of a byte counter for a given
+ * flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_BYTE_CTR UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates enabling of a packet counter for a given
+ * flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PKT_CTR UINT32_C(0x4)
+ /* Setting of this flag indicates de-capsulation action for the given flow. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DECAP UINT32_C(0x8)
+ /* Setting of this flag indicates encapsulation action for the given flow. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_ENCAP UINT32_C(0x10)
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x20)
+ /*
+ * Setting of this flag indicates that a meter is expected to be attached
+ * to this flow. This hint can be used when choosing the action record
+ * format required for the flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_METER UINT32_C(0x40)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the l2_filter_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the tunnel_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_ID \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the src_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the dst_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the ovlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the ivlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the ethertype field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the src_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the dst_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the ipaddr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the ip_protocol field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the src_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the dst_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the encap_record_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ENCAP_RECORD_ID \
+ UINT32_C(0x10000)
+ /*
+ * This bit must be '1' for the meter_instance_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_METER_INSTANCE_ID \
+ UINT32_C(0x20000)
+ /*
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
+ */
+ uint64_t l2_filter_id;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_0[3];
+ /*
+ * Tunnel identifier.
+ * Virtual Network Identifier (VNI). Only valid with
+ * tunnel_types VXLAN, NVGRE, and Geneve.
+ * Only lower 24-bits of VNI field are used
+ * in setting up the filter.
+ */
+ uint32_t tunnel_id;
+ /*
+ * This value indicates the source MAC address in
+ * the Ethernet header.
+ */
+ uint8_t src_macaddr[6];
+ /* The meter instance to attach to the flow. */
+ uint16_t meter_instance_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID
+ /*
+ * This value indicates the destination MAC address in
+ * the Ethernet header.
+ */
+ uint8_t dst_macaddr[6];
+ /*
+ * This value indicates the VLAN ID of the outer VLAN tag
+ * in the Ethernet header.
+ */
+ uint16_t ovlan_vid;
+ /*
+ * This value indicates the VLAN ID of the inner VLAN tag
+ * in the Ethernet header.
+ */
+ uint16_t ivlan_vid;
+ /* This value indicates the ethertype in the Ethernet header. */
+ uint16_t ethertype;
+ /*
+ * This value indicates the type of IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
+ */
+ uint8_t ip_addr_type;
+ /* invalid */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN UINT32_C(0x0)
+ /* IPv4 */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 UINT32_C(0x4)
+ /* IPv6 */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
+ /*
+ * The value of protocol filed in IP header.
+ * Applies to UDP and TCP traffic.
+ * 6 - TCP
+ * 17 - UDP
+ */
+ uint8_t ip_protocol;
+ /* invalid */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN UINT32_C(0x0)
+ /* TCP */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_TCP UINT32_C(0x6)
+ /* UDP */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP UINT32_C(0x11)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP
+ uint8_t unused_1[2];
+ /*
+ * The value of source IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t src_ipaddr[4];
+ /*
+ * big_endian = True
+ * The value of destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t dst_ipaddr[4];
+ /*
+ * The value of source port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t src_port;
+ /*
+ * The value of destination port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t dst_port;
+ /*
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
+ */
+ uint16_t dst_id;
+ /*
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint16_t mirror_vnic_id;
+ /* Logical ID of the encapsulation record. */
+ uint32_t encap_record_id;
+ uint8_t unused_2[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_em_flow_alloc_output (size:192b/24B) */
+struct hwrm_cfa_em_flow_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t em_filter_id;
+ /*
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
+ */
+ uint32_t flow_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_cfa_em_flow_free *
+ *************************/
+
+
+/* hwrm_cfa_em_flow_free_input (size:192b/24B) */
+struct hwrm_cfa_em_flow_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t em_filter_id;
+} __attribute__((packed));
+
+/* hwrm_cfa_em_flow_free_output (size:128b/16B) */
+struct hwrm_cfa_em_flow_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/************************
+ * hwrm_cfa_em_flow_cfg *
+ ************************/
+
+
+/* hwrm_cfa_em_flow_cfg_input (size:384b/48B) */
+struct hwrm_cfa_em_flow_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the new_dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_DST_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the new_mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the new_meter_instance_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \
+ UINT32_C(0x4)
+ uint8_t unused_0[4];
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t em_filter_id;
+ /*
+ * If set, this value shall represent the new
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
+ */
+ uint32_t new_dst_id;
+ /*
+ * New Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint32_t new_mirror_vnic_id;
+ /*
+ * New meter to attach to the flow. Specifying the
+ * invalid instance ID is used to remove any existing
+ * meter from the flow.
+ */
+ uint16_t new_meter_instance_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
+ */
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID
+ uint8_t unused_1[6];
+} __attribute__((packed));
+
+/* hwrm_cfa_em_flow_cfg_output (size:128b/16B) */
+struct hwrm_cfa_em_flow_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_meter_profile_alloc *
+ ********************************/
+
+
+/* hwrm_cfa_meter_profile_alloc_input (size:320b/40B) */
+struct hwrm_cfa_meter_profile_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX
+ /* The meter algorithm type. */
+ uint8_t meter_type;
+ /* RFC 2697 (srTCM) */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2697 \
+ UINT32_C(0x0)
+ /* RFC 2698 (trTCM) */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2698 \
+ UINT32_C(0x1)
+ /* RFC 4115 (trTCM) */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115 \
+ UINT32_C(0x2)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint16_t reserved1;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint32_t reserved2;
+ /* A meter rate specified in bytes-per-second. */
+ uint32_t commit_rate;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID
+ /* A meter burst size specified in bytes. */
+ uint32_t commit_burst;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID
+ /* A meter rate specified in bytes-per-second. */
+ uint32_t excess_peak_rate;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID
+ /* A meter burst size specified in bytes. */
+ uint32_t excess_peak_burst;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID
+} __attribute__((packed));
+
+/* hwrm_cfa_meter_profile_alloc_output (size:128b/16B) */
+struct hwrm_cfa_meter_profile_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value identifies a meter profile in CFA. */
+ uint16_t meter_profile_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * profile is not configured.
+ */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_meter_profile_free *
+ *******************************/
+
+
+/* hwrm_cfa_meter_profile_free_input (size:192b/24B) */
+struct hwrm_cfa_meter_profile_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX
+ uint8_t unused_0;
+ /* This value identifies a meter profile in CFA. */
+ uint16_t meter_profile_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * profile is not configured.
+ */
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_LAST \
+ HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_meter_profile_free_output (size:128b/16B) */
+struct hwrm_cfa_meter_profile_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_cfa_meter_profile_cfg *
+ ******************************/
+
+
+/* hwrm_cfa_meter_profile_cfg_input (size:320b/40B) */
+struct hwrm_cfa_meter_profile_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX
+ /* The meter algorithm type. */
+ uint8_t meter_type;
+ /* RFC 2697 (srTCM) */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2697 \
+ UINT32_C(0x0)
+ /* RFC 2698 (trTCM) */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2698 \
+ UINT32_C(0x1)
+ /* RFC 4115 (trTCM) */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115 \
+ UINT32_C(0x2)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115
+ /* This value identifies a meter profile in CFA. */
+ uint16_t meter_profile_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * profile is not configured.
+ */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint32_t reserved;
+ /* A meter rate specified in bytes-per-second. */
+ uint32_t commit_rate;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID
+ /* A meter burst size specified in bytes. */
+ uint32_t commit_burst;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID
+ /* A meter rate specified in bytes-per-second. */
+ uint32_t excess_peak_rate;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID
+ /* A meter burst size specified in bytes. */
+ uint32_t excess_peak_burst;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID
+} __attribute__((packed));
+
+/* hwrm_cfa_meter_profile_cfg_output (size:128b/16B) */
+struct hwrm_cfa_meter_profile_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************************
+ * hwrm_cfa_meter_instance_alloc *
+ *********************************/
+
+
+/* hwrm_cfa_meter_instance_alloc_input (size:192b/24B) */
+struct hwrm_cfa_meter_instance_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH \
+ UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX
+ uint8_t unused_0;
+ /* This value identifies a meter profile in CFA. */
+ uint16_t meter_profile_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * profile is not configured.
+ */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_LAST \
+ HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_meter_instance_alloc_output (size:128b/16B) */
+struct hwrm_cfa_meter_instance_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value identifies a meter instance in CFA. */
+ uint16_t meter_instance_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
+ */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_meter_instance_free *
+ ********************************/
+
+
+/* hwrm_cfa_meter_instance_free_input (size:192b/24B) */
+struct hwrm_cfa_meter_instance_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX
+ uint8_t unused_0;
+ /* This value identifies a meter instance in CFA. */
+ uint16_t meter_instance_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
+ */
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_meter_instance_free_output (size:128b/16B) */
+struct hwrm_cfa_meter_instance_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_decap_filter_alloc *
+ *******************************/
+
+
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
+struct hwrm_cfa_decap_filter_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* ovs_tunnel is 1 b */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_FLAGS_OVS_TUNNEL \
+ UINT32_C(0x1)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the tunnel_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_ID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the src_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the dst_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the ovlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_OVLAN_VID \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the ivlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IVLAN_VID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the t_ovlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_T_OVLAN_VID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the t_ivlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_T_IVLAN_VID \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the ethertype field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the src_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the dst_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the ipaddr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the ip_protocol field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the src_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the dst_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x10000)
+ /*
+ * Tunnel identifier.
+ * Virtual Network Identifier (VNI). Only valid with
+ * tunnel_types VXLAN, NVGRE, and Geneve.
+ * Only lower 24-bits of VNI field are used
+ * in setting up the filter.
+ */
+ uint32_t tunnel_id;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_0;
+ uint16_t unused_1;
+ /*
+ * This value indicates the source MAC address in
+ * the Ethernet header.
+ */
+ uint8_t src_macaddr[6];
+ uint8_t unused_2[2];
+ /*
+ * This value indicates the destination MAC address in
+ * the Ethernet header.
+ */
+ uint8_t dst_macaddr[6];
+ /*
+ * This value indicates the VLAN ID of the outer VLAN tag
+ * in the Ethernet header.
+ */
+ uint16_t ovlan_vid;
+ /*
+ * This value indicates the VLAN ID of the inner VLAN tag
+ * in the Ethernet header.
+ */
+ uint16_t ivlan_vid;
+ /*
+ * This value indicates the VLAN ID of the outer VLAN tag
+ * in the tunnel Ethernet header.
+ */
+ uint16_t t_ovlan_vid;
+ /*
+ * This value indicates the VLAN ID of the inner VLAN tag
+ * in the tunnel Ethernet header.
+ */
+ uint16_t t_ivlan_vid;
+ /* This value indicates the ethertype in the Ethernet header. */
+ uint16_t ethertype;
+ /*
+ * This value indicates the type of IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
+ */
+ uint8_t ip_addr_type;
+ /* invalid */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \
+ UINT32_C(0x0)
+ /* IPv4 */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \
+ UINT32_C(0x4)
+ /* IPv6 */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
+ UINT32_C(0x6)
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
+ HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
+ /*
+ * The value of protocol filed in IP header.
+ * Applies to UDP and TCP traffic.
+ * 6 - TCP
+ * 17 - UDP
+ */
+ uint8_t ip_protocol;
+ /* invalid */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \
+ UINT32_C(0x0)
+ /* TCP */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \
+ UINT32_C(0x6)
+ /* UDP */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \
+ UINT32_C(0x11)
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \
+ HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP
+ uint16_t unused_3;
+ uint32_t unused_4;
+ /*
+ * The value of source IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t src_ipaddr[4];
+ /*
+ * The value of destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t dst_ipaddr[4];
+ /*
+ * The value of source port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t src_port;
+ /*
+ * The value of destination port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t dst_port;
+ /*
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path.
+ */
+ uint16_t dst_id;
+ /*
+ * If set, this value shall represent the L2 context that matches the L2
+ * information of the decap filter.
+ */
+ uint16_t l2_ctxt_ref_id;
+} __attribute__((packed));
+
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t decap_filter_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_cfa_decap_filter_free *
+ ******************************/
+
+
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_decap_filter_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t decap_filter_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_cfa_flow_alloc *
+ ***********************/
+
+
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_flow_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint16_t flags;
+ /* tunnel is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_TUNNEL UINT32_C(0x1)
+ /* num_vlan is 2 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_MASK UINT32_C(0x6)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_SFT 1
+ /* no tags */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_NONE \
+ (UINT32_C(0x0) << 1)
+ /* 1 tag */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_ONE \
+ (UINT32_C(0x1) << 1)
+ /* 2 tags */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO \
+ (UINT32_C(0x2) << 1)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_LAST \
+ HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO
+ /* Enumeration denoting the Flow Type. */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_MASK UINT32_C(0x38)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_SFT 3
+ /* L2 flow */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_L2 \
+ (UINT32_C(0x0) << 3)
+ /* IPV4 flow */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV4 \
+ (UINT32_C(0x1) << 3)
+ /* IPV6 flow */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6 \
+ (UINT32_C(0x2) << 3)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_LAST \
+ HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6
+ /*
+ * Tx Flow: vf fid.
+ * Rx Flow: pf fid.
+ */
+ uint16_t src_fid;
+ /* Tunnel handle valid when tunnel flag is set. */
+ uint32_t tunnel_handle;
+ uint16_t action_flags;
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_FWD \
+ UINT32_C(0x1)
+ /* recycle is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_RECYCLE \
+ UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_DROP \
+ UINT32_C(0x4)
+ /* meter is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_METER \
+ UINT32_C(0x8)
+ /* tunnel is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TUNNEL \
+ UINT32_C(0x10)
+ /* nat_src is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_SRC \
+ UINT32_C(0x20)
+ /* nat_dest is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_DEST \
+ UINT32_C(0x40)
+ /* nat_ipv4_address is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_IPV4_ADDRESS \
+ UINT32_C(0x80)
+ /* l2_header_rewrite is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_L2_HEADER_REWRITE \
+ UINT32_C(0x100)
+ /* ttl_decrement is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TTL_DECREMENT \
+ UINT32_C(0x200)
+ /*
+ * Tx Flow: pf or vf fid.
+ * Rx Flow: vf fid.
+ */
+ uint16_t dst_fid;
+ /* VLAN tpid, valid when push_vlan flag is set. */
+ uint16_t l2_rewrite_vlan_tpid;
+ /* VLAN tci, valid when push_vlan flag is set. */
+ uint16_t l2_rewrite_vlan_tci;
+ /* Meter id, valid when meter flag is set. */
+ uint16_t act_meter_id;
+ /* Flow with the same l2 context tcam key. */
+ uint16_t ref_flow_handle;
+ /* This value sets the match value for the ethertype. */
+ uint16_t ethertype;
+ /* valid when num tags is 1 or 2. */
+ uint16_t outer_vlan_tci;
+ /* This value sets the match value for the Destination MAC address. */
+ uint16_t dmac[3];
+ /* valid when num tags is 2. */
+ uint16_t inner_vlan_tci;
+ /* This value sets the match value for the Source MAC address. */
+ uint16_t smac[3];
+ /* The bit length of destination IP address mask. */
+ uint8_t ip_dst_mask_len;
+ /* The bit length of source IP address mask. */
+ uint8_t ip_src_mask_len;
+ /* The value of destination IPv4/IPv6 address. */
+ uint32_t ip_dst[4];
+ /* The source IPv4/IPv6 address. */
+ uint32_t ip_src[4];
+ /*
+ * The value of source port.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t l4_src_port;
+ /*
+ * The value of source port mask.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t l4_src_port_mask;
+ /*
+ * The value of destination port.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t l4_dst_port;
+ /*
+ * The value of destination port mask.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t l4_dst_port_mask;
+ /*
+ * NAT IPv4/6 address based on address type flag.
+ * 0 values are ignored.
+ */
+ uint32_t nat_ip_address[4];
+ /* L2 header re-write Destination MAC address. */
+ uint16_t l2_rewrite_dmac[3];
+ /*
+ * The NAT source/destination port based on direction flag.
+ * Applies to UDP and TCP traffic.
+ * 0 values are ignored.
+ */
+ uint16_t nat_port;
+ /* L2 header re-write Source MAC address. */
+ uint16_t l2_rewrite_smac[3];
+ /* The value of ip protocol. */
+ uint8_t ip_proto;
+ uint8_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
+struct hwrm_cfa_flow_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Flow record index. */
+ uint16_t flow_handle;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_flow_free *
+ **********************/
+
+
+/* hwrm_cfa_flow_free_input (size:192b/24B) */
+struct hwrm_cfa_flow_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Flow record index. */
+ uint16_t flow_handle;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
+struct hwrm_cfa_flow_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* packet is 64 b */
+ uint64_t packet;
+ /* byte is 64 b */
+ uint64_t byte;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_flow_info *
+ **********************/
+
+
+/* hwrm_cfa_flow_info_input (size:192b/24B) */
+struct hwrm_cfa_flow_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Flow record index. */
+ uint16_t flow_handle;
+ /* Max flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_MASK \
+ UINT32_C(0xfff)
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_SFT 0
+ /* CNP flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT \
+ UINT32_C(0x1000)
+ /* Direction rx = 1 */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX \
+ UINT32_C(0x8000)
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_cfa_flow_info_output (size:448b/56B) */
+struct hwrm_cfa_flow_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* flags is 8 b */
+ uint8_t flags;
+ /* profile is 8 b */
+ uint8_t profile;
+ /* src_fid is 16 b */
+ uint16_t src_fid;
+ /* dst_fid is 16 b */
+ uint16_t dst_fid;
+ /* l2_ctxt_id is 16 b */
+ uint16_t l2_ctxt_id;
+ /* em_info is 64 b */
+ uint64_t em_info;
+ /* tcam_info is 64 b */
+ uint64_t tcam_info;
+ /* vfp_tcam_info is 64 b */
+ uint64_t vfp_tcam_info;
+ /* ar_id is 16 b */
+ uint16_t ar_id;
+ /* flow_handle is 16 b */
+ uint16_t flow_handle;
+ /* tunnel_handle is 32 b */
+ uint32_t tunnel_handle;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_cfa_flow_flush *
+ ***********************/
+
+
+/* hwrm_cfa_flow_flush_input (size:192b/24B) */
+struct hwrm_cfa_flow_flush_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_flow_flush_output (size:128b/16B) */
+struct hwrm_cfa_flow_flush_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_cfa_flow_stats *
+ ***********************/
+
+
+/* hwrm_cfa_flow_stats_input (size:320b/40B) */
+struct hwrm_cfa_flow_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Flow handle. */
+ uint16_t num_flows;
+ /* Flow handle. */
+ uint16_t flow_handle_0;
+ /* Flow handle. */
+ uint16_t flow_handle_1;
+ /* Flow handle. */
+ uint16_t flow_handle_2;
+ /* Flow handle. */
+ uint16_t flow_handle_3;
+ /* Flow handle. */
+ uint16_t flow_handle_4;
+ /* Flow handle. */
+ uint16_t flow_handle_5;
+ /* Flow handle. */
+ uint16_t flow_handle_6;
+ /* Flow handle. */
+ uint16_t flow_handle_7;
+ /* Flow handle. */
+ uint16_t flow_handle_8;
+ /* Flow handle. */
+ uint16_t flow_handle_9;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
+struct hwrm_cfa_flow_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* packet_0 is 64 b */
+ uint64_t packet_0;
+ /* packet_1 is 64 b */
+ uint64_t packet_1;
+ /* packet_2 is 64 b */
+ uint64_t packet_2;
+ /* packet_3 is 64 b */
+ uint64_t packet_3;
+ /* packet_4 is 64 b */
+ uint64_t packet_4;
+ /* packet_5 is 64 b */
+ uint64_t packet_5;
+ /* packet_6 is 64 b */
+ uint64_t packet_6;
+ /* packet_7 is 64 b */
+ uint64_t packet_7;
+ /* packet_8 is 64 b */
+ uint64_t packet_8;
+ /* packet_9 is 64 b */
+ uint64_t packet_9;
+ /* byte_0 is 64 b */
+ uint64_t byte_0;
+ /* byte_1 is 64 b */
+ uint64_t byte_1;
+ /* byte_2 is 64 b */
+ uint64_t byte_2;
+ /* byte_3 is 64 b */
+ uint64_t byte_3;
+ /* byte_4 is 64 b */
+ uint64_t byte_4;
+ /* byte_5 is 64 b */
+ uint64_t byte_5;
+ /* byte_6 is 64 b */
+ uint64_t byte_6;
+ /* byte_7 is 64 b */
+ uint64_t byte_7;
+ /* byte_8 is 64 b */
+ uint64_t byte_8;
+ /* byte_9 is 64 b */
+ uint64_t byte_9;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_cfa_vf_pair_alloc *
+ **************************/
+
+
+/* hwrm_cfa_vf_pair_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vf_pair_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_a_id;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_b_id;
+ uint8_t unused_0[4];
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_vf_pair_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vf_pair_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_cfa_vf_pair_free *
+ *************************/
+
+
+/* hwrm_cfa_vf_pair_free_input (size:384b/48B) */
+struct hwrm_cfa_vf_pair_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_vf_pair_free_output (size:128b/16B) */
+struct hwrm_cfa_vf_pair_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_cfa_vf_pair_info *
+ *************************/
+
+
+/* hwrm_cfa_vf_pair_info_input (size:448b/56B) */
+struct hwrm_cfa_vf_pair_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* If this flag is set, lookup by name else lookup by index. */
+ #define HWRM_CFA_VF_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1)
+ /* vf pair table index. */
+ uint16_t vf_pair_index;
+ uint8_t unused_0[2];
+ /* VF Pair name (32 byte string). */
+ char vf_pair_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_vf_pair_info_output (size:512b/64B) */
+struct hwrm_cfa_vf_pair_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* vf pair table index. */
+ uint16_t next_vf_pair_index;
+ /* vf pair member a's vf_fid. */
+ uint16_t vf_a_fid;
+ /* vf pair member a's Linux logical VF number. */
+ uint16_t vf_a_index;
+ /* vf pair member b's vf_fid. */
+ uint16_t vf_b_fid;
+ /* vf pair member a's Linux logical VF number. */
+ uint16_t vf_b_index;
+ /* vf pair state. */
+ uint8_t pair_state;
+ /* Pair has been allocated */
+ #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1)
+ /* Both pair members are active */
+ #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2)
+ #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \
+ HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE
+ uint8_t unused_0[5];
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_cfa_pair_alloc *
+ ***********************/
+
+
+/* hwrm_cfa_pair_alloc_input (size:576b/72B) */
+struct hwrm_cfa_pair_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair, 5-rep2fn_mod). */
+ uint8_t pair_mode;
+ /* Pair between VF on local host with PF or VF on specified host. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_VF2FN UINT32_C(0x0)
+ /* Pair between REP on local host with PF or VF on specified host. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN UINT32_C(0x1)
+ /* Pair between REP on local host with REP on specified host. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2REP UINT32_C(0x2)
+ /* Pair for the proxy interface. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PROXY UINT32_C(0x3)
+ /* Pair for the PF interface. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PFPAIR UINT32_C(0x4)
+ /* Modify exiting rep2fn pair and move pair to new PF. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MOD UINT32_C(0x5)
+ /* Modify exiting rep2fn pairs paired with same PF and move pairs to new PF. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL UINT32_C(0x6)
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_LAST \
+ HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL
+ uint8_t unused_0;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_a_id;
+ /* Logical Host (0xff-local host). */
+ uint8_t host_b_id;
+ /* Logical PF (0xff-PF for command channel). */
+ uint8_t pf_b_id;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_b_id;
+ /* Loopback port (0xff-internal loopback), valid for mode-3. */
+ uint8_t port_id;
+ /* Priority used for encap of loopback packets valid for mode-3. */
+ uint8_t pri;
+ /* New PF for rep2fn modify, valid for mode 5. */
+ uint16_t new_pf_fid;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the q_ab field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the q_ba field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the fc_ab field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the fc_ba field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID UINT32_C(0x8)
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+ /*
+ * The q_ab value specifies the logical index of the TX/RX CoS
+ * queue to be assigned for traffic in the A to B direction of
+ * the interface pair. The default value is 0.
+ */
+ uint8_t q_ab;
+ /*
+ * The q_ba value specifies the logical index of the TX/RX CoS
+ * queue to be assigned for traffic in the B to A direction of
+ * the interface pair. The default value is 1.
+ */
+ uint8_t q_ba;
+ /*
+ * Specifies whether RX ring flow control is disabled (0) or enabled
+ * (1) in the A to B direction. The default value is 0, meaning that
+ * packets will be dropped when the B-side RX rings are full.
+ */
+ uint8_t fc_ab;
+ /*
+ * Specifies whether RX ring flow control is disabled (0) or enabled
+ * (1) in the B to A direction. The default value is 1, meaning that
+ * the RX CoS queue will be flow controlled when the A-side RX rings
+ * are full.
+ */
+ uint8_t fc_ba;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_pair_alloc_output (size:192b/24B) */
+struct hwrm_cfa_pair_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Only valid for modes 1 and 2. */
+ uint16_t rx_cfa_code_a;
+ /* Only valid for modes 1 and 2. */
+ uint16_t tx_cfa_action_a;
+ /* Only valid for mode 2. */
+ uint16_t rx_cfa_code_b;
+ /* Only valid for mode 2. */
+ uint16_t tx_cfa_action_b;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_pair_free *
+ **********************/
+
+
+/* hwrm_cfa_pair_free_input (size:384b/48B) */
+struct hwrm_cfa_pair_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_pair_free_output (size:128b/16B) */
+struct hwrm_cfa_pair_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_pair_info *
+ **********************/
+
+
+/* hwrm_cfa_pair_info_input (size:448b/56B) */
+struct hwrm_cfa_pair_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* If this flag is set, lookup by name else lookup by index. */
+ #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1)
+ /* If this flag is set, lookup by PF id and VF id. */
+ #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_REPRE UINT32_C(0x2)
+ /* Pair table index. */
+ uint16_t pair_index;
+ /* Pair pf index. */
+ uint8_t pair_pfid;
+ /* Pair vf index. */
+ uint8_t pair_vfid;
+ /* Pair name (32 byte string). */
+ char pair_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_pair_info_output (size:576b/72B) */
+struct hwrm_cfa_pair_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Pair table index. */
+ uint16_t next_pair_index;
+ /* Pair member a's fid. */
+ uint16_t a_fid;
+ /* Logical host number. */
+ uint8_t host_a_index;
+ /* Logical PF number. */
+ uint8_t pf_a_index;
+ /* Pair member a's Linux logical VF number. */
+ uint16_t vf_a_index;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code_a;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action_a;
+ /* Pair member b's fid. */
+ uint16_t b_fid;
+ /* Logical host number. */
+ uint8_t host_b_index;
+ /* Logical PF number. */
+ uint8_t pf_b_index;
+ /* Pair member a's Linux logical VF number. */
+ uint16_t vf_b_index;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code_b;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action_b;
+ /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair). */
+ uint8_t pair_mode;
+ /* Pair between VF on local host with PF or VF on specified host. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_VF2FN UINT32_C(0x0)
+ /* Pair between REP on local host with PF or VF on specified host. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2FN UINT32_C(0x1)
+ /* Pair between REP on local host with REP on specified host. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2REP UINT32_C(0x2)
+ /* Pair for the proxy interface. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PROXY UINT32_C(0x3)
+ /* Pair for the PF interface. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR UINT32_C(0x4)
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_LAST \
+ HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR
+ /* Pair state. */
+ uint8_t pair_state;
+ /* Pair has been allocated */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1)
+ /* Both pair members are active */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2)
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \
+ HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE
+ /* Pair name (32 byte string). */
+ char pair_name[32];
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_vfr_alloc *
+ **********************/
+
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vfr_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_id;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint16_t reserved;
+ uint8_t unused_0[4];
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vfr_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_cfa_vfr_free *
+ *********************/
+
+
+/* hwrm_cfa_vfr_free_input (size:384b/48B) */
+struct hwrm_cfa_vfr_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
+struct hwrm_cfa_vfr_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_tunnel_dst_port_query *
+ ******************************/
+
+
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This field represents the identifier of L4 destination port
+ * used for the given tunnel type. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP)
+ * transports for tunneling.
+ */
+ uint16_t tunnel_dst_port_id;
+ /*
+ * This field represents the value of L4 destination port
+ * identified by tunnel_dst_port_id. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP)
+ * transports for tunneling.
+ * This field is in network byte order.
+ *
+ * A value of 0 means that the destination port is not
+ * configured.
+ */
+ uint16_t tunnel_dst_port_val;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_tunnel_dst_port_alloc *
+ ******************************/
+
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1
+ uint8_t unused_0;
+ /*
+ * This field represents the value of L4 destination port used
+ * for the given tunnel type. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP)
+ * transports for tunneling.
+ *
+ * This field is in network byte order.
+ *
+ * A value of 0 shall fail the command.
+ */
+ uint16_t tunnel_dst_port_val;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Identifier of a tunnel L4 destination port value. Only applies to tunnel
+ * types that has l4 destination port parameters.
+ */
+ uint16_t tunnel_dst_port_id;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_tunnel_dst_port_free *
+ *****************************/
+
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1
+ uint8_t unused_0;
+ /*
+ * Identifier of a tunnel L4 destination port value. Only applies to tunnel
+ * types that has l4 destination port parameters.
+ */
+ uint16_t tunnel_dst_port_id;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ /* Number of received unicast packets */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of discarded packets on received path */
+ uint64_t rx_discard_pkts;
+ /* Number of dropped packets on received path */
+ uint64_t rx_drop_pkts;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of transmitted unicast packets */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of discarded packets on transmit path */
+ uint64_t tx_discard_pkts;
+ /* Number of dropped packets on transmit path */
+ uint64_t tx_drop_pkts;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of TPA packets */
+ uint64_t tpa_pkts;
+ /* Number of TPA bytes */
+ uint64_t tpa_bytes;
+ /* Number of TPA events */
+ uint64_t tpa_events;
+ /* Number of TPA aborts */
+ uint64_t tpa_aborts;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_stat_ctx_alloc *
+ ***********************/
+
+
+/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
+struct hwrm_stat_ctx_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This is the address for statistic block. */
+ uint64_t stats_dma_addr;
+ /*
+ * The statistic block update period in ms.
+ * e.g. 250ms, 500ms, 750ms, 1000ms.
+ * If update_period_ms is 0, then the stats update
+ * shall be never done and the DMA address shall not be used.
+ * In this case, the stat block can only be read by
+ * hwrm_stat_ctx_query command.
+ */
+ uint32_t update_period_ms;
+ /*
+ * This field is used to specify statistics context specific
+ * configuration flags.
+ */
+ uint8_t stat_ctx_flags;
+ /*
+ * When this bit is set to '1', the statistics context shall be
+ * allocated for RoCE traffic only. In this case, traffic other
+ * than offloaded RoCE traffic shall not be included in this
+ * statistic context.
+ * When this bit is set to '0', the statistics context shall be
+ * used for the network traffic other than offloaded RoCE traffic.
+ */
+ #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1)
+ uint8_t unused_0[3];
+} __attribute__((packed));
+
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
+struct hwrm_stat_ctx_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This is the statistics context ID value. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_stat_ctx_free *
+ **********************/
+
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
+struct hwrm_stat_ctx_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
+struct hwrm_stat_ctx_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This is the statistics context ID value. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_stat_ctx_query *
+ ***********************/
+
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
+struct hwrm_stat_ctx_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of transmitted unicast packets */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of transmitted packets with error */
+ uint64_t tx_err_pkts;
+ /* Number of dropped packets on transmit path */
+ uint64_t tx_drop_pkts;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of received unicast packets */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of received packets with error */
+ uint64_t rx_err_pkts;
+ /* Number of dropped packets on received path */
+ uint64_t rx_drop_pkts;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of aggregated unicast packets */
+ uint64_t rx_agg_pkts;
+ /* Number of aggregated unicast bytes */
+ uint64_t rx_agg_bytes;
+ /* Number of aggregation events */
+ uint64_t rx_agg_events;
+ /* Number of aborted aggregations */
+ uint64_t rx_agg_aborts;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_stat_ctx_clr_stats *
+ ***************************/
+
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
+struct hwrm_stat_ctx_clr_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
+struct hwrm_stat_ctx_clr_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************
+ * hwrm_pcie_qstats *
+ ********************/
+
+
+/* hwrm_pcie_qstats_input (size:256b/32B) */
+struct hwrm_pcie_qstats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * The size of PCIe statistics block in bytes.
+ * Firmware will DMA the PCIe statistics to
+ * the host with this field size in the response.
+ */
+ uint16_t pcie_stat_size;
+ uint8_t unused_0[6];
+ /*
+ * This is the host address where
+ * PCIe statistics will be stored
+ */
+ uint64_t pcie_stat_host_addr;
+} __attribute__((packed));
+
+/* hwrm_pcie_qstats_output (size:128b/16B) */
+struct hwrm_pcie_qstats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The size of PCIe statistics block in bytes. */
+ uint16_t pcie_stat_size;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* Port Tx Statistics Formats */
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ /* Total Number of 64 Bytes frames transmitted */
+ uint64_t tx_64b_frames;
+ /* Total Number of 65-127 Bytes frames transmitted */
+ uint64_t tx_65b_127b_frames;
+ /* Total Number of 128-255 Bytes frames transmitted */
+ uint64_t tx_128b_255b_frames;
+ /* Total Number of 256-511 Bytes frames transmitted */
+ uint64_t tx_256b_511b_frames;
+ /* Total Number of 512-1023 Bytes frames transmitted */
+ uint64_t tx_512b_1023b_frames;
+ /* Total Number of 1024-1518 Bytes frames transmitted */
+ uint64_t tx_1024b_1518_frames;
+ /*
+ * Total Number of each good VLAN (exludes FCS errors)
+ * frame transmitted which is 1519 to 1522 bytes in length
+ * inclusive (excluding framing bits but including FCS bytes).
+ */
+ uint64_t tx_good_vlan_frames;
+ /* Total Number of 1519-2047 Bytes frames transmitted */
+ uint64_t tx_1519b_2047_frames;
+ /* Total Number of 2048-4095 Bytes frames transmitted */
+ uint64_t tx_2048b_4095b_frames;
+ /* Total Number of 4096-9216 Bytes frames transmitted */
+ uint64_t tx_4096b_9216b_frames;
+ /* Total Number of 9217-16383 Bytes frames transmitted */
+ uint64_t tx_9217b_16383b_frames;
+ /* Total Number of good frames transmitted */
+ uint64_t tx_good_frames;
+ /* Total Number of frames transmitted */
+ uint64_t tx_total_frames;
+ /* Total number of unicast frames transmitted */
+ uint64_t tx_ucast_frames;
+ /* Total number of multicast frames transmitted */
+ uint64_t tx_mcast_frames;
+ /* Total number of broadcast frames transmitted */
+ uint64_t tx_bcast_frames;
+ /* Total number of PAUSE control frames transmitted */
+ uint64_t tx_pause_frames;
+ /*
+ * Total number of PFC/per-priority PAUSE
+ * control frames transmitted
+ */
+ uint64_t tx_pfc_frames;
+ /* Total number of jabber frames transmitted */
+ uint64_t tx_jabber_frames;
+ /* Total number of frames transmitted with FCS error */
+ uint64_t tx_fcs_err_frames;
+ /* Total number of control frames transmitted */
+ uint64_t tx_control_frames;
+ /* Total number of over-sized frames transmitted */
+ uint64_t tx_oversz_frames;
+ /* Total number of frames with single deferral */
+ uint64_t tx_single_dfrl_frames;
+ /* Total number of frames with multiple deferrals */
+ uint64_t tx_multi_dfrl_frames;
+ /* Total number of frames with single collision */
+ uint64_t tx_single_coll_frames;
+ /* Total number of frames with multiple collisions */
+ uint64_t tx_multi_coll_frames;
+ /* Total number of frames with late collisions */
+ uint64_t tx_late_coll_frames;
+ /* Total number of frames with excessive collisions */
+ uint64_t tx_excessive_coll_frames;
+ /* Total number of fragmented frames transmitted */
+ uint64_t tx_frag_frames;
+ /* Total number of transmit errors */
+ uint64_t tx_err;
+ /* Total number of single VLAN tagged frames transmitted */
+ uint64_t tx_tagged_frames;
+ /* Total number of double VLAN tagged frames transmitted */
+ uint64_t tx_dbl_tagged_frames;
+ /* Total number of runt frames transmitted */
+ uint64_t tx_runt_frames;
+ /* Total number of TX FIFO under runs */
+ uint64_t tx_fifo_underruns;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 0 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri0;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 1 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri1;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 2 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri2;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 3 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri3;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 4 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri4;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 5 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri5;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 6 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri6;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 7 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri7;
+ /* Total number of EEE LPI Events on TX */
+ uint64_t tx_eee_lpi_events;
+ /* EEE LPI Duration Counter on TX */
+ uint64_t tx_eee_lpi_duration;
+ /*
+ * Total number of Link Level Flow Control (LLFC) messages
+ * transmitted
+ */
+ uint64_t tx_llfc_logical_msgs;
+ /* Total number of HCFC messages transmitted */
+ uint64_t tx_hcfc_msgs;
+ /* Total number of TX collisions */
+ uint64_t tx_total_collisions;
+ /* Total number of transmitted bytes */
+ uint64_t tx_bytes;
+ /* Total number of end-to-end HOL frames */
+ uint64_t tx_xthol_frames;
+ /* Total Tx Drops per Port reported by STATS block */
+ uint64_t tx_stat_discard;
+ /* Total Tx Error Drops per Port reported by STATS block */
+ uint64_t tx_stat_error;
+} __attribute__((packed));
+
+/* Port Rx Statistics Formats */
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ /* Total Number of 64 Bytes frames received */
+ uint64_t rx_64b_frames;
+ /* Total Number of 65-127 Bytes frames received */
+ uint64_t rx_65b_127b_frames;
+ /* Total Number of 128-255 Bytes frames received */
+ uint64_t rx_128b_255b_frames;
+ /* Total Number of 256-511 Bytes frames received */
+ uint64_t rx_256b_511b_frames;
+ /* Total Number of 512-1023 Bytes frames received */
+ uint64_t rx_512b_1023b_frames;
+ /* Total Number of 1024-1518 Bytes frames received */
+ uint64_t rx_1024b_1518_frames;
+ /*
+ * Total Number of each good VLAN (exludes FCS errors)
+ * frame received which is 1519 to 1522 bytes in length
+ * inclusive (excluding framing bits but including FCS bytes).
+ */
+ uint64_t rx_good_vlan_frames;
+ /* Total Number of 1519-2047 Bytes frames received */
+ uint64_t rx_1519b_2047b_frames;
+ /* Total Number of 2048-4095 Bytes frames received */
+ uint64_t rx_2048b_4095b_frames;
+ /* Total Number of 4096-9216 Bytes frames received */
+ uint64_t rx_4096b_9216b_frames;
+ /* Total Number of 9217-16383 Bytes frames received */
+ uint64_t rx_9217b_16383b_frames;
+ /* Total number of frames received */
+ uint64_t rx_total_frames;
+ /* Total number of unicast frames received */
+ uint64_t rx_ucast_frames;
+ /* Total number of multicast frames received */
+ uint64_t rx_mcast_frames;
+ /* Total number of broadcast frames received */
+ uint64_t rx_bcast_frames;
+ /* Total number of received frames with FCS error */
+ uint64_t rx_fcs_err_frames;
+ /* Total number of control frames received */
+ uint64_t rx_ctrl_frames;
+ /* Total number of PAUSE frames received */
+ uint64_t rx_pause_frames;
+ /* Total number of PFC frames received */
+ uint64_t rx_pfc_frames;
+ /*
+ * Total number of frames received with an unsupported
+ * opcode
+ */
+ uint64_t rx_unsupported_opcode_frames;
+ /*
+ * Total number of frames received with an unsupported
+ * DA for pause and PFC
+ */
+ uint64_t rx_unsupported_da_pausepfc_frames;
+ /* Total number of frames received with an unsupported SA */
+ uint64_t rx_wrong_sa_frames;
+ /* Total number of received packets with alignment error */
+ uint64_t rx_align_err_frames;
+ /* Total number of received frames with out-of-range length */
+ uint64_t rx_oor_len_frames;
+ /* Total number of received frames with error termination */
+ uint64_t rx_code_err_frames;
+ /*
+ * Total number of received frames with a false carrier is
+ * detected during idle, as defined by RX_ER samples active
+ * and RXD is 0xE. The event is reported along with the
+ * statistics generated on the next received frame. Only
+ * one false carrier condition can be detected and logged
+ * between frames.
+ *
+ * Carrier event, valid for 10M/100M speed modes only.
+ */
+ uint64_t rx_false_carrier_frames;
+ /* Total number of over-sized frames received */
+ uint64_t rx_ovrsz_frames;
+ /* Total number of jabber packets received */
+ uint64_t rx_jbr_frames;
+ /* Total number of received frames with MTU error */
+ uint64_t rx_mtu_err_frames;
+ /* Total number of received frames with CRC match */
+ uint64_t rx_match_crc_frames;
+ /* Total number of frames received promiscuously */
+ uint64_t rx_promiscuous_frames;
+ /*
+ * Total number of received frames with one or two VLAN
+ * tags
+ */
+ uint64_t rx_tagged_frames;
+ /* Total number of received frames with two VLAN tags */
+ uint64_t rx_double_tagged_frames;
+ /* Total number of truncated frames received */
+ uint64_t rx_trunc_frames;
+ /* Total number of good frames (without errors) received */
+ uint64_t rx_good_frames;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 0
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri0;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 1
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri1;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 2
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri2;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 3
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri3;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 4
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri4;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 5
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri5;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 6
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri6;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 7
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri7;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 0
+ */
+ uint64_t rx_pfc_ena_frames_pri0;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 1
+ */
+ uint64_t rx_pfc_ena_frames_pri1;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 2
+ */
+ uint64_t rx_pfc_ena_frames_pri2;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 3
+ */
+ uint64_t rx_pfc_ena_frames_pri3;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 4
+ */
+ uint64_t rx_pfc_ena_frames_pri4;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 5
+ */
+ uint64_t rx_pfc_ena_frames_pri5;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 6
+ */
+ uint64_t rx_pfc_ena_frames_pri6;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 7
+ */
+ uint64_t rx_pfc_ena_frames_pri7;
+ /* Total Number of frames received with SCH CRC error */
+ uint64_t rx_sch_crc_err_frames;
+ /* Total Number of under-sized frames received */
+ uint64_t rx_undrsz_frames;
+ /* Total Number of fragmented frames received */
+ uint64_t rx_frag_frames;
+ /* Total number of RX EEE LPI Events */
+ uint64_t rx_eee_lpi_events;
+ /* EEE LPI Duration Counter on RX */
+ uint64_t rx_eee_lpi_duration;
+ /*
+ * Total number of physical type Link Level Flow Control
+ * (LLFC) messages received
+ */
+ uint64_t rx_llfc_physical_msgs;
+ /*
+ * Total number of logical type Link Level Flow Control
+ * (LLFC) messages received
+ */
+ uint64_t rx_llfc_logical_msgs;
+ /*
+ * Total number of logical type Link Level Flow Control
+ * (LLFC) messages received with CRC error
+ */
+ uint64_t rx_llfc_msgs_with_crc_err;
+ /* Total number of HCFC messages received */
+ uint64_t rx_hcfc_msgs;
+ /* Total number of HCFC messages received with CRC error */
+ uint64_t rx_hcfc_msgs_with_crc_err;
+ /* Total number of received bytes */
+ uint64_t rx_bytes;
+ /* Total number of bytes received in runt frames */
+ uint64_t rx_runt_bytes;
+ /* Total number of runt frames received */
+ uint64_t rx_runt_frames;
+ /* Total Rx Discards per Port reported by STATS block */
+ uint64_t rx_stat_discard;
+ uint64_t rx_stat_err;
+} __attribute__((packed));
+
+/* Port Rx Statistics extended Formats */
+/* rx_port_stats_ext (size:320b/40B) */
+struct rx_port_stats_ext {
+ /* Number of times link state changed to down */
+ uint64_t link_down_events;
+ /* Number of times the idle rings with pause bit are found */
+ uint64_t continuous_pause_events;
+ /* Number of times the active rings pause bit resumed back */
+ uint64_t resume_pause_events;
+ /* Number of times, the ROCE cos queue PFC is disabled to avoid pause flood/burst */
+ uint64_t continuous_roce_pause_events;
+ /* Number of times, the ROCE cos queue PFC is enabled back */
+ uint64_t resume_roce_pause_events;
+} __attribute__((packed));
+
+/* PCIe Statistics Formats */
+/* pcie_ctx_hw_stats (size:768b/96B) */
+struct pcie_ctx_hw_stats {
+ /* Number of physical layer receiver errors */
+ uint64_t pcie_pl_signal_integrity;
+ /* Number of DLLP CRC errors detected by Data Link Layer */
+ uint64_t pcie_dl_signal_integrity;
+ /*
+ * Number of TLP LCRC and sequence number errors detected
+ * by Data Link Layer
+ */
+ uint64_t pcie_tl_signal_integrity;
+ /* Number of times LTSSM entered Recovery state */
+ uint64_t pcie_link_integrity;
+ /* Number of TLP bytes that have been trasmitted */
+ uint64_t pcie_tx_traffic_rate;
+ /* Number of TLP bytes that have been received */
+ uint64_t pcie_rx_traffic_rate;
+ /* Number of DLLP bytes that have been trasmitted */
+ uint64_t pcie_tx_dllp_statistics;
+ /* Number of DLLP bytes that have been received */
+ uint64_t pcie_rx_dllp_statistics;
+ /*
+ * Number of times spent in each phase of gen3
+ * equalization
+ */
+ uint64_t pcie_equalization_time;
+ /* Records the last 16 transitions of the LTSSM */
+ uint32_t pcie_ltssm_histogram[4];
+ /*
+ * Record the last 8 reasons on why LTSSM transitioned
+ * to Recovery
+ */
+ uint64_t pcie_recovery_histogram;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_exec_fwd_resp *
+ **********************/
+
+
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
+struct hwrm_exec_fwd_resp_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is an encapsulated request. This request should
+ * be executed by the HWRM and the response should be
+ * provided in the response buffer inside the encapsulated
+ * request.
+ */
+ uint32_t encap_request[26];
+ /*
+ * This value indicates the target id of the response to
+ * the encapsulated request.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t encap_resp_target_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
+struct hwrm_exec_fwd_resp_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/************************
+ * hwrm_reject_fwd_resp *
+ ************************/
+
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
+struct hwrm_reject_fwd_resp_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is an encapsulated request. This request should
+ * be rejected by the HWRM and the error response should be
+ * provided in the response buffer inside the encapsulated
+ * request.
+ */
+ uint32_t encap_request[26];
+ /*
+ * This value indicates the target id of the response to
+ * the encapsulated request.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t encap_resp_target_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
+struct hwrm_reject_fwd_resp_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************
+ * hwrm_fwd_resp *
+ *****************/
+
+
+/* hwrm_fwd_resp_input (size:1024b/128B) */
+struct hwrm_fwd_resp_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value indicates the target id of the encapsulated
+ * response.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t encap_resp_target_id;
+ /*
+ * This value indicates the completion ring the encapsulated
+ * response will be optionally completed on. If the value is
+ * -1, then no CR completion shall be generated for the
+ * encapsulated response. Any other value must be a
+ * valid CR ring_id value. If a valid encap_resp_cmpl_ring
+ * is provided, then a CR completion shall be generated for
+ * the encapsulated response.
+ */
+ uint16_t encap_resp_cmpl_ring;
+ /* This field indicates the length of encapsulated response. */
+ uint16_t encap_resp_len;
+ uint8_t unused_0;
+ uint8_t unused_1;
+ /*
+ * This is the host address where the encapsulated response
+ * will be written.
+ * This area must be 16B aligned and must be cleared to zero
+ * before the original request is made.
+ */
+ uint64_t encap_resp_addr;
+ /* This is an encapsulated response. */
+ uint32_t encap_resp[24];
+} __attribute__((packed));
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
+struct hwrm_fwd_resp_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_fwd_async_event_cmpl *
+ *****************************/
+
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
+struct hwrm_fwd_async_event_cmpl_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value indicates the target id of the encapsulated
+ * asynchronous event.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - Broadcast to all children VFs (only applicable when
+ * a PF is the requester)
+ */
+ uint16_t encap_async_event_target_id;
+ uint8_t unused_0[6];
+ /* This is an encapsulated asynchronous event completion. */
+ uint32_t encap_async_event_cmpl[4];
+} __attribute__((packed));
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
+struct hwrm_fwd_async_event_cmpl_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_nvm_raw_write_blk *
+ **************************/
+
+
+/* hwrm_nvm_raw_write_blk_input (size:256b/32B) */
+struct hwrm_nvm_raw_write_blk_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Source Address.
+ * This is the loation of the source data to be written.
+ */
+ uint64_t host_src_addr;
+ /*
+ * 32-bit Destination Address.
+ * This is the NVRAM byte-offset where the source data will be written to.
+ */
+ uint32_t dest_addr;
+ /* Length of data to be written, in bytes. */
+ uint32_t len;
+} __attribute__((packed));
+
+/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */
+struct hwrm_nvm_raw_write_blk_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************
+ * hwrm_nvm_read *
+ *****************/
+
+
+/* hwrm_nvm_read_input (size:320b/40B) */
+struct hwrm_nvm_read_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Destination Address.
+ * This is the host address where the data will be written to.
+ */
+ uint64_t host_dest_addr;
+ /* The 0-based index of the directory entry. */
+ uint16_t dir_idx;
+ uint8_t unused_0[2];
+ /* The NVRAM byte-offset to read from. */
+ uint32_t offset;
+ /* The length of the data to be read, in bytes. */
+ uint32_t len;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_nvm_read_output (size:128b/16B) */
+struct hwrm_nvm_read_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_nvm_raw_dump *
+ *********************/
+
+
+/* hwrm_nvm_raw_dump_input (size:256b/32B) */
+struct hwrm_nvm_raw_dump_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Destination Address.
+ * This is the host address where the data will be written to.
+ */
+ uint64_t host_dest_addr;
+ /* 32-bit NVRAM byte-offset to read from. */
+ uint32_t offset;
+ /* Total length of NVRAM contents to be read, in bytes. */
+ uint32_t len;
+} __attribute__((packed));
+
+/* hwrm_nvm_raw_dump_output (size:128b/16B) */
+struct hwrm_nvm_raw_dump_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_nvm_get_dir_entries *
+ ****************************/
+
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
+struct hwrm_nvm_get_dir_entries_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Destination Address.
+ * This is the host address where the directory will be written.
+ */
+ uint64_t host_dest_addr;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
+struct hwrm_nvm_get_dir_entries_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_nvm_get_dir_info *
+ *************************/
+
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dir_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
+struct hwrm_nvm_get_dir_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of directory entries in the directory. */
+ uint32_t entries;
+ /* Size of each directory entry, in bytes. */
+ uint32_t entry_length;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************
+ * hwrm_nvm_write *
+ ******************/
+
+
+/* hwrm_nvm_write_input (size:384b/48B) */
+struct hwrm_nvm_write_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Source Address.
+ * This is where the source data is.
+ */
+ uint64_t host_src_addr;
+ /* The Directory Entry Type (valid values are defined in the bnxnvm_directory_type enum defined in the file bnxnvm_defs.h). */
+ uint16_t dir_type;
+ /*
+ * Directory ordinal.
+ * The 0-based instance of the combined Directory Entry Type and Extension.
+ */
+ uint16_t dir_ordinal;
+ /* The Directory Entry Extension flags (see BNX_DIR_EXT_* in the file bnxnvm_defs.h). */
+ uint16_t dir_ext;
+ /* Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the file bnxnvm_defs.h). */
+ uint16_t dir_attr;
+ /*
+ * Length of data to write, in bytes. May be less than or equal to the allocated size for the directory entry.
+ * The data length stored in the directory entry will be updated to reflect this value once the write is complete.
+ */
+ uint32_t dir_data_length;
+ /* Option. */
+ uint16_t option;
+ uint16_t flags;
+ /*
+ * When this bit is '1', the original active image
+ * will not be removed. TBD: what purpose is this?
+ */
+ #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG \
+ UINT32_C(0x1)
+ /*
+ * The requested length of the allocated NVM for the item, in bytes. This value may be greater than or equal to the specified data length (dir_data_length).
+ * If this value is less than the specified data length, it will be ignored.
+ * The response will contain the actual allocated item length, which may be greater than the requested item length.
+ * The purpose for allocating more than the required number of bytes for an item's data is to pre-allocate extra storage (padding) to accomodate
+ * the potential future growth of an item (e.g. upgraded firmware with a size increase, log growth, expanded configuration data).
+ */
+ uint32_t dir_item_length;
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_nvm_write_output (size:128b/16B) */
+struct hwrm_nvm_write_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Length of the allocated NVM for the item, in bytes. The value may be greater than or equal to the specified data length or the requested item length.
+ * The actual item length used when creating a new directory entry will be a multiple of an NVM block size.
+ */
+ uint32_t dir_item_length;
+ /* The directory index of the created or modified item. */
+ uint16_t dir_idx;
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
+struct hwrm_nvm_write_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* Unable to complete operation due to fragmentation */
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1)
+ /* nvm is completely full. */
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2)
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*******************
+ * hwrm_nvm_modify *
+ *******************/
+
+
+/* hwrm_nvm_modify_input (size:320b/40B) */
+struct hwrm_nvm_modify_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Source Address.
+ * This is where the modified data is.
+ */
+ uint64_t host_src_addr;
+ /* 16-bit directory entry index. */
+ uint16_t dir_idx;
+ uint8_t unused_0[2];
+ /* 32-bit NVRAM byte-offset to modify content from. */
+ uint32_t offset;
+ /*
+ * Length of data to be modified, in bytes. The length shall
+ * be non-zero.
+ */
+ uint32_t len;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
+struct hwrm_nvm_modify_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_nvm_find_dir_entry *
+ ***************************/
+
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the dir_idx_valid field to be
+ * configured.
+ */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID \
+ UINT32_C(0x1)
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ /* Directory Entry (Image) Type */
+ uint16_t dir_type;
+ /*
+ * Directory ordinal.
+ * The instance of this Directory Type
+ */
+ uint16_t dir_ordinal;
+ /* The Directory Entry Extension flags. */
+ uint16_t dir_ext;
+ /* This value indicates the search option using dir_ordinal. */
+ uint8_t opt_ordinal;
+ /* This value indicates the search option using dir_ordinal. */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_MASK UINT32_C(0x3)
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_SFT 0
+ /* Equal to specified ordinal value. */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_EQ UINT32_C(0x0)
+ /* Greater than or equal to specified ordinal value */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GE UINT32_C(0x1)
+ /* Greater than specified ordinal value */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT UINT32_C(0x2)
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_LAST \
+ HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT
+ uint8_t unused_0[3];
+} __attribute__((packed));
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Allocated NVRAM for this directory entry, in bytes. */
+ uint32_t dir_item_length;
+ /* Size of the stored data for this directory entry, in bytes. */
+ uint32_t dir_data_length;
+ /*
+ * Firmware version.
+ * Only valid if the directory entry is for embedded firmware stored in APE_BIN Format.
+ */
+ uint32_t fw_ver;
+ /* Directory ordinal. */
+ uint16_t dir_ordinal;
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_nvm_erase_dir_entry *
+ ****************************/
+
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
+struct hwrm_nvm_erase_dir_entry_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_erase_dir_entry_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_nvm_get_dev_info *
+ *************************/
+
+
+/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dev_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
+struct hwrm_nvm_get_dev_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Manufacturer ID. */
+ uint16_t manufacturer_id;
+ /* Device ID. */
+ uint16_t device_id;
+ /* Sector size of the NVRAM device. */
+ uint32_t sector_size;
+ /* Total size, in bytes of the NVRAM device. */
+ uint32_t nvram_size;
+ uint32_t reserved_size;
+ /* Available size that can be used, in bytes. Available size is the NVRAM size take away the used size and reserved size. */
+ uint32_t available_size;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_nvm_mod_dir_entry *
+ **************************/
+
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_mod_dir_entry_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the checksum field to be
+ * configured.
+ */
+ #define HWRM_NVM_MOD_DIR_ENTRY_INPUT_ENABLES_CHECKSUM UINT32_C(0x1)
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ /*
+ * Directory ordinal.
+ * The (0-based) instance of this Directory Type.
+ */
+ uint16_t dir_ordinal;
+ /* The Directory Entry Extension flags (see BNX_DIR_EXT_* for extension flag definitions). */
+ uint16_t dir_ext;
+ /* Directory Entry Attribute flags (see BNX_DIR_ATTR_* for attribute flag definitions). */
+ uint16_t dir_attr;
+ /*
+ * If valid, then this field updates the checksum
+ * value of the content in the directory entry.
+ */
+ uint32_t checksum;
+} __attribute__((packed));
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_mod_dir_entry_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_nvm_verify_update *
+ **************************/
+
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
+struct hwrm_nvm_verify_update_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Directory Entry Type, to be verified. */
+ uint16_t dir_type;
+ /*
+ * Directory ordinal.
+ * The instance of the Directory Type to be verified.
+ */
+ uint16_t dir_ordinal;
+ /*
+ * The Directory Entry Extension flags.
+ * The "UPDATE" extension flag must be set in this value.
+ * A corresponding directory entry with the same type and ordinal values but *without*
+ * the "UPDATE" extension flag must also exist. The other flags of the extension must
+ * be identical between the active and update entries.
+ */
+ uint16_t dir_ext;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
+struct hwrm_nvm_verify_update_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_nvm_install_update *
+ ***************************/
+
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
+struct hwrm_nvm_install_update_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Installation type. If the value 3 through 0xffff is used,
+ * only packaged items with that type value will be installed and
+ * conditional installation directives for those packaged items
+ * will be over-ridden (i.e. 'create' or 'replace' will be treated
+ * as 'install').
+ */
+ uint32_t install_type;
+ /*
+ * Perform a normal package installation. Conditional installation
+ * directives (e.g. 'create' and 'replace') of packaged items
+ * will be followed.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_NORMAL UINT32_C(0x0)
+ /*
+ * Install all packaged items regardless of installation directive
+ * (i.e. treat all packaged items as though they have an installation
+ * directive of 'install').
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL \
+ UINT32_C(0xffffffff)
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_LAST \
+ HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL
+ uint16_t flags;
+ /* If set to 1, then securely erase all unused locations in persistent storage. */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ERASE_UNUSED_SPACE \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, then unspecifed images, images not in the package file, will be safely deleted.
+ * When combined with erase_unused_space then unspecified images will be securely erased.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_REMOVE_UNUSED_PKG \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, FW will defragment the NVM if defragmentation is required for the update.
+ * Allow additional time for this command to complete if this bit is set to 1.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ALLOWED_TO_DEFRAG \
+ UINT32_C(0x4)
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
+struct hwrm_nvm_install_update_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Bit-mask of successfully installed items.
+ * Bit-0 corresponding to the first packaged item, Bit-1 for the second item, etc.
+ * A value of 0 indicates that no items were successfully installed.
+ */
+ uint64_t installed_items;
+ /* result is 8 b */
+ uint8_t result;
+ /* There was no problem with the package installation. */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS UINT32_C(0x0)
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_LAST \
+ HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS
+ /* problem_item is 8 b */
+ uint8_t problem_item;
+ /* There was no problem with any packaged items. */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_NONE \
+ UINT32_C(0x0)
+ /* There was a problem with the NVM package itself. */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE \
+ UINT32_C(0xff)
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_LAST \
+ HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE
+ /* reset_required is 8 b */
+ uint8_t reset_required;
+ /*
+ * No reset is required for installed/updated firmware or
+ * microcode to take effect.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_NONE \
+ UINT32_C(0x0)
+ /*
+ * A PCIe reset (e.g. system reboot) is
+ * required for newly installed/updated firmware or
+ * microcode to take effect.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_PCI \
+ UINT32_C(0x1)
+ /*
+ * A controller power reset (e.g. system power-cycle) is
+ * required for newly installed/updated firmware or
+ * microcode to take effect. Some newly installed/updated
+ * firmware or microcode may still take effect upon the
+ * next PCIe reset.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER \
+ UINT32_C(0x2)
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_LAST \
+ HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER
+ uint8_t unused_0[4];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
+struct hwrm_nvm_install_update_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* Unable to complete operation due to fragmentation */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1)
+ /* nvm is completely full. */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2)
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/******************
+ * hwrm_nvm_flush *
+ ******************/
+
+
+/* hwrm_nvm_flush_input (size:128b/16B) */
+struct hwrm_nvm_flush_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_nvm_flush_output (size:128b/16B) */
+struct hwrm_nvm_flush_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_flush_cmd_err (size:64b/8B) */
+struct hwrm_nvm_flush_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_FLUSH_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* flush could not be performed */
+ #define HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL UINT32_C(0x1)
+ #define HWRM_NVM_FLUSH_CMD_ERR_CODE_LAST \
+ HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*************************
+ * hwrm_nvm_get_variable *
+ *************************/
+
+
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
+struct hwrm_nvm_get_variable_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where
+ * nvm variable will be stored
+ */
+ uint64_t dest_data_addr;
+ /* size of data in bits */
+ uint16_t data_len;
+ /* nvm cfg option number */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_LAST \
+ HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF
+ /*
+ * Number of dimensions for this nvm configuration variable.
+ * This value indicates how many of the indexN values to use.
+ * A value of 0 means that none of the indexN values are valid.
+ * A value of 1 requires at index0 is valued, a value of 2
+ * requires that index0 and index1 are valid, and so forth
+ */
+ uint16_t dimensions;
+ /* index for the 1st dimensions */
+ uint16_t index_0;
+ /* index for the 2nd dimensions */
+ uint16_t index_1;
+ /* index for the 3rd dimensions */
+ uint16_t index_2;
+ /* index for the 4th dimensions */
+ uint16_t index_3;
+ uint8_t flags;
+ /*
+ * When this bit is set to 1, the factory default value will be returned,
+ * 0 returns the operational value.
+ */
+ #define HWRM_NVM_GET_VARIABLE_INPUT_FLAGS_FACTORY_DFLT \
+ UINT32_C(0x1)
+ uint8_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
+struct hwrm_nvm_get_variable_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* size of data of the actual variable retrieved in bits */
+ uint16_t data_len;
+ /*
+ * option_num is the option number for the data retrieved. It is possible in the
+ * future that the option number returned would be different than requested. This
+ * condition could occur if an option is deprecated and a new option id is defined
+ * with similar characteristics, but has a slightly different definition. This
+ * also makes it convenient for the caller to identify the variable result with
+ * the option id from the response.
+ */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_LAST \
+ HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_variable_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* variable does not exist */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1)
+ /* configuration is corrupted and the variable cannot be saved */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2)
+ /* length specified is too small */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT UINT32_C(0x3)
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*************************
+ * hwrm_nvm_set_variable *
+ *************************/
+
+
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
+struct hwrm_nvm_set_variable_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where
+ * nvm variable will be copied from
+ */
+ uint64_t src_data_addr;
+ /* size of data in bits */
+ uint16_t data_len;
+ /* nvm cfg option number */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_LAST \
+ HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF
+ /*
+ * Number of dimensions for this nvm configuration variable.
+ * This value indicates how many of the indexN values to use.
+ * A value of 0 means that none of the indexN values are valid.
+ * A value of 1 requires at index0 is valued, a value of 2
+ * requires that index0 and index1 are valid, and so forth
+ */
+ uint16_t dimensions;
+ /* index for the 1st dimensions */
+ uint16_t index_0;
+ /* index for the 2nd dimensions */
+ uint16_t index_1;
+ /* index for the 3rd dimensions */
+ uint16_t index_2;
+ /* index for the 4th dimensions */
+ uint16_t index_3;
+ uint8_t flags;
+ /* When this bit is 1, flush internal cache after this write operation (see hwrm_nvm_flush command.) */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FORCE_FLUSH \
+ UINT32_C(0x1)
+ /* encryption method */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_MASK \
+ UINT32_C(0xe)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_SFT 1
+ /* No encryption. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_NONE \
+ (UINT32_C(0x0) << 1)
+ /* one-way encryption. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1 \
+ (UINT32_C(0x1) << 1)
+ /* symmetric AES256 encryption. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_AES256 \
+ (UINT32_C(0x2) << 1)
+ /* SHA1 digest appended to plaintext contents, for authentication */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH \
+ (UINT32_C(0x3) << 1)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_LAST \
+ HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ uint8_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
+struct hwrm_nvm_set_variable_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_set_variable_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* variable does not exist */
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1)
+ /* configuration is corrupted and the variable cannot be saved */
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2)
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/****************************
+ * hwrm_nvm_validate_option *
+ ****************************/
+
+
+/* hwrm_nvm_validate_option_input (size:320b/40B) */
+struct hwrm_nvm_validate_option_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where
+ * nvm variable will be copied from
+ */
+ uint64_t src_data_addr;
+ /* size of data in bits */
+ uint16_t data_len;
+ /* nvm cfg option number */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_0 \
+ UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_LAST \
+ HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF
+ /*
+ * Number of dimensions for this nvm configuration variable.
+ * This value indicates how many of the indexN values to use.
+ * A value of 0 means that none of the indexN values are valid.
+ * A value of 1 requires at index0 is valued, a value of 2
+ * requires that index0 and index1 are valid, and so forth
+ */
+ uint16_t dimensions;
+ /* index for the 1st dimensions */
+ uint16_t index_0;
+ /* index for the 2nd dimensions */
+ uint16_t index_1;
+ /* index for the 3rd dimensions */
+ uint16_t index_2;
+ /* index for the 4th dimensions */
+ uint16_t index_3;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_nvm_validate_option_output (size:128b/16B) */
+struct hwrm_nvm_validate_option_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t result;
+ /* indicates that the value provided for the option is not matching with the saved data. */
+ #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_NOT_MATCH UINT32_C(0x0)
+ /* indicates that the value provided for the option is matching the saved data. */
+ #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH UINT32_C(0x1)
+ #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_LAST \
+ HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */
+struct hwrm_nvm_validate_option_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST \
+ HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_nvm_factory_defaults *
+ *****************************/
+
+
+/* hwrm_nvm_factory_defaults_input (size:192b/24B) */
+struct hwrm_nvm_factory_defaults_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* mode is 8 b */
+ uint8_t mode;
+ /* If set to 1, it will trigger restoration of factory default settings */
+ #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_RESTORE UINT32_C(0x0)
+ /* If set to 1, it will trigger creation of factory default settings */
+ #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_CREATE UINT32_C(0x1)
+ #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_LAST \
+ HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_CREATE
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* hwrm_nvm_factory_defaults_output (size:128b/16B) */
+struct hwrm_nvm_factory_defaults_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t result;
+ /* factory defaults created successfully. */
+ #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_OK \
+ UINT32_C(0x0)
+ /* factory defaults restored successfully. */
+ #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_RESTORE_OK \
+ UINT32_C(0x1)
+ /* factory defaults already created. */
+ #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_ALREADY \
+ UINT32_C(0x2)
+ #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_LAST \
+ HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_ALREADY
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_factory_defaults_cmd_err (size:64b/8B) */
+struct hwrm_nvm_factory_defaults_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_UNKNOWN \
+ UINT32_C(0x0)
+ /* valid configuration not present to create defaults */
+ #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_VALID_CFG \
+ UINT32_C(0x1)
+ /* No saved configuration present to restore, restore failed */
+ #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG \
+ UINT32_C(0x2)
+ #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_LAST \
+ HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+#endif /* _HSI_STRUCT_DEF_DPDK_H_ */
diff --git a/src/spdk/dpdk/drivers/net/bnxt/meson.build b/src/spdk/dpdk/drivers/net/bnxt/meson.build
new file mode 100644
index 00000000..e130f271
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/meson.build
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+install_headers('rte_pmd_bnxt.h')
+version = 2
+sources = files('bnxt_cpr.c',
+ 'bnxt_ethdev.c',
+ 'bnxt_filter.c',
+ 'bnxt_flow.c',
+ 'bnxt_hwrm.c',
+ 'bnxt_irq.c',
+ 'bnxt_ring.c',
+ 'bnxt_rxq.c',
+ 'bnxt_rxr.c',
+ 'bnxt_stats.c',
+ 'bnxt_txq.c',
+ 'bnxt_txr.c',
+ 'bnxt_util.c',
+ 'bnxt_vnic.c',
+ 'rte_pmd_bnxt.c')
diff --git a/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c b/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c
new file mode 100644
index 00000000..c298de83
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -0,0 +1,811 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_dev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_byteorder.h>
+
+#include "bnxt.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_vnic.h"
+#include "rte_pmd_bnxt.h"
+#include "hsi_struct_def_dpdk.h"
+
+int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg)
+{
+ struct rte_pmd_bnxt_mb_event_param ret_param;
+
+ ret_param.retval = RTE_PMD_BNXT_MB_EVENT_PROCEED;
+ ret_param.vf_id = vf_id;
+ ret_param.msg = msg;
+
+ _rte_eth_dev_callback_process(bp->eth_dev, RTE_ETH_EVENT_VF_MBOX,
+ &ret_param);
+
+ /* Default to approve */
+ if (ret_param.retval == RTE_PMD_BNXT_MB_EVENT_PROCEED)
+ ret_param.retval = RTE_PMD_BNXT_MB_EVENT_NOOP_ACK;
+
+ return ret_param.retval == RTE_PMD_BNXT_MB_EVENT_NOOP_ACK ?
+ true : false;
+}
+
+int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ eth_dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(eth_dev))
+ return -ENOTSUP;
+
+ bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to set Tx loopback on non-PF port %d!\n",
+ port);
+ return -ENOTSUP;
+ }
+
+ if (on)
+ bp->pf.evb_mode = BNXT_EVB_MODE_VEB;
+ else
+ bp->pf.evb_mode = BNXT_EVB_MODE_VEPA;
+
+ rc = bnxt_hwrm_pf_evb_mode(bp);
+
+ return rc;
+}
+
+static void
+rte_pmd_bnxt_set_all_queues_drop_en_cb(struct bnxt_vnic_info *vnic, void *onptr)
+{
+ uint8_t *on = onptr;
+ vnic->bd_stall = !(*on);
+}
+
+int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+ uint32_t i;
+ int rc = -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ eth_dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(eth_dev))
+ return -ENOTSUP;
+
+ bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to set all queues drop on non-PF port!\n");
+ return -ENOTSUP;
+ }
+
+ if (bp->vnic_info == NULL)
+ return -ENODEV;
+
+ /* Stall PF */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ bp->vnic_info[i].bd_stall = !on;
+ rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to update PF VNIC %d.\n", i);
+ return rc;
+ }
+ }
+
+ /* Stall all active VFs */
+ for (i = 0; i < bp->pf.active_vfs; i++) {
+ rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, i,
+ rte_pmd_bnxt_set_all_queues_drop_en_cb, &on,
+ bnxt_hwrm_vnic_cfg);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", i);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
+ struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf >= dev_info.max_vfs || mac_addr == NULL)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to set VF %d mac address on non-PF port %d!\n",
+ vf, port);
+ return -ENOTSUP;
+ }
+
+ rc = bnxt_hwrm_func_vf_mac(bp, vf, (uint8_t *)mac_addr);
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct rte_eth_dev *eth_dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ uint16_t tot_rate = 0;
+ uint64_t idx;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ eth_dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(eth_dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (!bp->pf.active_vfs)
+ return -EINVAL;
+
+ if (vf >= bp->pf.max_vfs)
+ return -EINVAL;
+
+ /* Add up the per queue BW and configure MAX BW of the VF */
+ for (idx = 0; idx < 64; idx++) {
+ if ((1ULL << idx) & q_msk)
+ tot_rate += tx_rate;
+ }
+
+ /* Requested BW can't be greater than link speed */
+ if (tot_rate > eth_dev->data->dev_link.link_speed) {
+ PMD_DRV_LOG(ERR, "Rate > Link speed. Set to %d\n", tot_rate);
+ return -EINVAL;
+ }
+
+ /* Requested BW already configured */
+ if (tot_rate == bp->pf.vf_info[vf].max_tx_rate)
+ return 0;
+
+ rc = bnxt_hwrm_func_bw_cfg(bp, vf, tot_rate,
+ HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW);
+
+ if (!rc)
+ bp->pf.vf_info[vf].max_tx_rate = tot_rate;
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev;
+ uint32_t func_flags;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to set mac spoof on non-PF port %d!\n", port);
+ return -EINVAL;
+ }
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ /* Prev setting same as new setting. */
+ if (on == bp->pf.vf_info[vf].mac_spoof_en)
+ return 0;
+
+ func_flags = bp->pf.vf_info[vf].func_cfg_flags;
+ func_flags &= ~(HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE |
+ HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE);
+
+ if (on)
+ func_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
+ else
+ func_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
+
+ rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags);
+ if (!rc) {
+ bp->pf.vf_info[vf].mac_spoof_en = on;
+ bp->pf.vf_info[vf].func_cfg_flags = func_flags;
+ }
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to set VLAN spoof on non-PF port %d!\n", port);
+ return -EINVAL;
+ }
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ rc = bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(bp, vf, on);
+ if (!rc) {
+ bp->pf.vf_info[vf].vlan_spoof_en = on;
+ if (on) {
+ if (bnxt_hwrm_cfa_vlan_antispoof_cfg(bp,
+ bp->pf.first_vf_id + vf,
+ bp->pf.vf_info[vf].vlan_count,
+ bp->pf.vf_info[vf].vlan_as_table))
+ rc = -1;
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
+ }
+
+ return rc;
+}
+
+static void
+rte_pmd_bnxt_set_vf_vlan_stripq_cb(struct bnxt_vnic_info *vnic, void *onptr)
+{
+ uint8_t *on = onptr;
+ vnic->vlan_strip = *on;
+}
+
+int
+rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to set VF %d stripq on non-PF port %d!\n",
+ vf, port);
+ return -ENOTSUP;
+ }
+
+ rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf,
+ rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on,
+ bnxt_hwrm_vnic_cfg);
+ if (rc)
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ uint16_t flag = 0;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!bp->pf.vf_info)
+ return -EINVAL;
+
+ if (vf >= bp->pdev->max_vfs)
+ return -EINVAL;
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
+ PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
+ return -ENOTSUP;
+ }
+
+ /* Is this really the correct mapping? VFd seems to think it is. */
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ flag |= BNXT_VNIC_INFO_PROMISC;
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ flag |= BNXT_VNIC_INFO_BCAST;
+ if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST;
+
+ if (on)
+ bp->pf.vf_info[vf].l2_rx_mask |= flag;
+ else
+ bp->pf.vf_info[vf].l2_rx_mask &= ~flag;
+
+ rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf,
+ vf_vnic_set_rxmask_cb,
+ &bp->pf.vf_info[vf].l2_rx_mask,
+ bnxt_set_rx_mask_no_vlan);
+ if (rc)
+ PMD_DRV_LOG(ERR, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
+
+ return rc;
+}
+
+static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
+{
+ int rc = 0;
+ int dflt_vnic;
+ struct bnxt_vnic_info vnic;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to set VLAN table on non-PF port!\n");
+ return -EINVAL;
+ }
+
+ if (vf >= bp->pdev->max_vfs)
+ return -EINVAL;
+
+ dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
+ if (dflt_vnic < 0) {
+ /* This simply indicates there's no driver loaded.
+ * This is not an error.
+ */
+ PMD_DRV_LOG(ERR, "Unable to get default VNIC for VF %d\n", vf);
+ } else {
+ memset(&vnic, 0, sizeof(vnic));
+ vnic.fw_vnic_id = dflt_vnic;
+ if (bnxt_hwrm_vnic_qcfg(bp, &vnic,
+ bp->pf.first_vf_id + vf) == 0) {
+ if (bnxt_hwrm_cfa_l2_set_rx_mask(bp, &vnic,
+ bp->pf.vf_info[vf].vlan_count,
+ bp->pf.vf_info[vf].vlan_table))
+ rc = -1;
+ } else {
+ rc = -1;
+ }
+ }
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on)
+{
+ struct bnxt_vlan_table_entry *ve;
+ struct bnxt_vlan_antispoof_table_entry *vase;
+ struct rte_eth_dev *dev;
+ struct bnxt *bp;
+ uint16_t cnt;
+ int rc = 0;
+ int i, j;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ bp = (struct bnxt *)dev->data->dev_private;
+ if (!bp->pf.vf_info)
+ return -EINVAL;
+
+ for (i = 0; vf_mask; i++, vf_mask >>= 1) {
+ cnt = bp->pf.vf_info[i].vlan_count;
+ if ((vf_mask & 1) == 0)
+ continue;
+
+ if (bp->pf.vf_info[i].vlan_table == NULL) {
+ rc = -1;
+ continue;
+ }
+ if (bp->pf.vf_info[i].vlan_as_table == NULL) {
+ rc = -1;
+ continue;
+ }
+ if (vlan_on) {
+ /* First, search for a duplicate... */
+ for (j = 0; j < cnt; j++) {
+ if (rte_be_to_cpu_16(
+ bp->pf.vf_info[i].vlan_table[j].vid) == vlan)
+ break;
+ }
+ if (j == cnt) {
+ /* Now check that there's space */
+ if (cnt == getpagesize() / sizeof(struct
+ bnxt_vlan_antispoof_table_entry)) {
+ PMD_DRV_LOG(ERR,
+ "VLAN anti-spoof table is full\n");
+ PMD_DRV_LOG(ERR,
+ "VF %d cannot add VLAN %u\n",
+ i, vlan);
+ rc = -1;
+ continue;
+ }
+
+ /* cnt is one less than vlan_count */
+ cnt = bp->pf.vf_info[i].vlan_count++;
+ /*
+ * And finally, add to the
+ * end of the table
+ */
+ vase = &bp->pf.vf_info[i].vlan_as_table[cnt];
+ // TODO: Hardcoded TPID
+ vase->tpid = rte_cpu_to_be_16(0x8100);
+ vase->vid = rte_cpu_to_be_16(vlan);
+ vase->mask = rte_cpu_to_be_16(0xfff);
+ ve = &bp->pf.vf_info[i].vlan_table[cnt];
+ /* TODO: Hardcoded TPID */
+ ve->tpid = rte_cpu_to_be_16(0x8100);
+ ve->vid = rte_cpu_to_be_16(vlan);
+ }
+ } else {
+ for (j = 0; j < cnt; j++) {
+ if (rte_be_to_cpu_16(
+ bp->pf.vf_info[i].vlan_table[j].vid) != vlan)
+ continue;
+ memmove(&bp->pf.vf_info[i].vlan_table[j],
+ &bp->pf.vf_info[i].vlan_table[j + 1],
+ getpagesize() - ((j + 1) *
+ sizeof(struct bnxt_vlan_table_entry)));
+ memmove(&bp->pf.vf_info[i].vlan_as_table[j],
+ &bp->pf.vf_info[i].vlan_as_table[j + 1],
+ getpagesize() - ((j + 1) * sizeof(struct
+ bnxt_vlan_antispoof_table_entry)));
+ j--;
+ cnt = --bp->pf.vf_info[i].vlan_count;
+ }
+ }
+ bnxt_set_vf_table(bp, i);
+ }
+
+ return rc;
+}
+
+int rte_pmd_bnxt_get_vf_stats(uint16_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to get VF %d stats on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ return bnxt_hwrm_func_qstats(bp, bp->pf.first_vf_id + vf_id, stats);
+}
+
+int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
+ uint16_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to reset VF %d stats on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ return bnxt_hwrm_func_clr_stats(bp, bp->pf.first_vf_id + vf_id);
+}
+
+int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to query VF %d RX stats on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ return bnxt_vf_vnic_count(bp, vf_id);
+}
+
+int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
+ uint64_t *count)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to query VF %d TX drops on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ return bnxt_hwrm_func_qstats_tx_drop(bp, bp->pf.first_vf_id + vf_id,
+ count);
+}
+
+int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *addr,
+ uint32_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ struct bnxt_filter_info *filter;
+ struct bnxt_vnic_info vnic;
+ struct ether_addr dflt_mac;
+ int rc;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to config VF %d MAC on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ /* If the VF currently uses a random MAC, update default to this one */
+ if (bp->pf.vf_info[vf_id].random_mac) {
+ if (rte_pmd_bnxt_get_vf_rx_status(port, vf_id) <= 0)
+ bnxt_hwrm_func_vf_mac(bp, vf_id, (uint8_t *)addr);
+ }
+
+ /* query the default VNIC id used by the function */
+ rc = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf_id);
+ if (rc < 0)
+ goto exit;
+
+ memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
+ vnic.fw_vnic_id = rte_le_to_cpu_16(rc);
+ rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf_id);
+ if (rc < 0)
+ goto exit;
+
+ STAILQ_FOREACH(filter, &bp->pf.vf_info[vf_id].filter, next) {
+ if (filter->flags ==
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX &&
+ filter->enables ==
+ (HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) &&
+ memcmp(addr, filter->l2_addr, ETHER_ADDR_LEN) == 0) {
+ bnxt_hwrm_clear_l2_filter(bp, filter);
+ break;
+ }
+ }
+
+ if (filter == NULL)
+ filter = bnxt_alloc_vf_filter(bp, vf_id);
+
+ filter->fw_l2_filter_id = UINT64_MAX;
+ filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
+ memcpy(filter->l2_addr, addr, ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+
+ /* Do not add a filter for the default MAC */
+ if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf_id, &dflt_mac) ||
+ memcmp(filter->l2_addr, dflt_mac.addr_bytes, ETHER_ADDR_LEN))
+ rc = bnxt_hwrm_set_l2_filter(bp, vnic.fw_vnic_id, filter);
+
+exit:
+ return rc;
+}
+
+int
+rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
+ uint16_t vlan_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to set VF %d vlan insert on non-PF port %d!\n",
+ vf, port);
+ return -ENOTSUP;
+ }
+
+ bp->pf.vf_info[vf].dflt_vlan = vlan_id;
+ if (bnxt_hwrm_func_qcfg_current_vf_vlan(bp, vf) ==
+ bp->pf.vf_info[vf].dflt_vlan)
+ return 0;
+
+ rc = bnxt_hwrm_set_vf_vlan(bp, vf);
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev;
+ uint32_t func_flags;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Attempt to set persist stats on non-PF port %d!\n",
+ port);
+ return -EINVAL;
+ }
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ /* Prev setting same as new setting. */
+ if (on == bp->pf.vf_info[vf].persist_stats)
+ return 0;
+
+ func_flags = bp->pf.vf_info[vf].func_cfg_flags;
+
+ if (on)
+ func_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC;
+ else
+ func_flags &=
+ ~HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC;
+
+ rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags);
+ if (!rc) {
+ bp->pf.vf_info[vf].persist_stats = on;
+ bp->pf.vf_info[vf].func_cfg_flags = func_flags;
+ }
+
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.h b/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.h
new file mode 100644
index 00000000..68fbe34d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.h
@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _PMD_BNXT_H_
+#define _PMD_BNXT_H_
+
+#include <rte_ethdev_driver.h>
+
+/*
+ * Response sent back to the caller after callback
+ */
+enum rte_pmd_bnxt_mb_event_rsp {
+ RTE_PMD_BNXT_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */
+ RTE_PMD_BNXT_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */
+ RTE_PMD_BNXT_MB_EVENT_PROCEED, /**< proceed with mbox request */
+ RTE_PMD_BNXT_MB_EVENT_MAX /**< max value of this enum */
+};
+
+/* mailbox message types */
+#define BNXT_VF_RESET 0x01 /* VF requests reset */
+#define BNXT_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define BNXT_VF_SET_VLAN 0x03 /* VF requests PF to set VLAN */
+#define BNXT_VF_SET_MTU 0x04 /* VF requests PF to set MTU */
+#define BNXT_VF_SET_MRU 0x05 /* VF requests PF to set MRU */
+
+/*
+ * Data sent to the caller when the callback is executed.
+ */
+struct rte_pmd_bnxt_mb_event_param {
+ uint16_t vf_id; /* Virtual Function number */
+ int retval; /* return value */
+ void *msg; /* pointer to message */
+};
+
+/**
+ * Enable/Disable VF MAC anti spoof
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param on
+ * 1 - Enable VF MAC anti spoof.
+ * 0 - Disable VF MAC anti spoof.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Set the VF MAC address.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
+ struct ether_addr *mac_addr);
+
+/**
+ * Enable/Disable vf vlan strip for all queues in a pool
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - Enable VF's vlan strip on RX queues.
+ * 0 - Disable VF's vlan strip on RX queues.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan insert
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param vlan_id
+ * 0 - Disable VF's vlan insert.
+ * n - Enable; n is inserted as the vlan id.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
+ uint16_t vlan_id);
+
+/**
+ * Enable/Disable hardware VF VLAN filtering by an Ethernet device of
+ * received VLAN packets tagged with a given VLAN Tag Identifier.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vlan
+ * The VLAN Tag Identifier whose filtering must be enabled or disabled.
+ * @param vf_mask
+ * Bitmap listing which VFs participate in the VLAN filtering.
+ * @param vlan_on
+ * 1 - Enable VFs VLAN filtering.
+ * 0 - Disable VFs VLAN filtering.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on);
+
+/**
+ * Enable/Disable tx loopback
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable tx loopback.
+ * 0 - Disable tx loopback.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on);
+
+/**
+ * set all queues drop enable bit
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - set the queue drop enable bit for all pools.
+ * 0 - reset the queue drop enable bit for all pools.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on);
+
+/**
+ * Set the VF rate limit.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param tx_rate
+ * Tx rate for the VF
+ * @param q_msk
+ * Mask of the Tx queue
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+
+/**
+ * Get VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @param stats
+ * A pointer to a structure of type *rte_eth_stats* to be filled with
+ * the values of device counters supported statistics:
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+
+int rte_pmd_bnxt_get_vf_stats(uint16_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats);
+
+/**
+ * Clear VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
+ uint16_t vf_id);
+
+/**
+ * Enable/Disable VF VLAN anti spoof
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param on
+ * 1 - Enable VF VLAN anti spoof.
+ * 0 - Disable VF VLAN anti spoof.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Set RX L2 Filtering mode of a VF of an Ethernet device.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param rx_mask
+ * The RX mode mask
+ * @param on
+ * 1 - Enable a VF RX mode.
+ * 0 - Disable a VF RX mode.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on);
+
+/**
+ * Returns the number of default RX queues on a VF
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - Non-negative value - Number of default RX queues
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) if on a function without VFs
+ * - (-ENOMEM) on an allocation failure
+ * - (-1) firmware interface error
+ */
+int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id);
+
+/**
+ * Queries the TX drop counter for the function
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @param count
+ * Pointer to a uint64_t that will be populated with the counter value.
+ * @return
+ * - Positive Non-zero value - Error code from HWRM
+ * - (-EINVAL) invalid vf_id specified.
+ * - (-ENOTSUP) Ethernet device is not a PF
+ */
+int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
+ uint64_t *count);
+
+/**
+ * Programs the MAC address for the function specified
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac_addr
+ * The MAC address to be programmed in the filter.
+ * @param vf_id
+ * VF on which to get.
+ * @return
+ * - Positive Non-zero value - Error code from HWRM
+ * - (-EINVAL) invalid vf_id specified.
+ * - (-ENOTSUP) Ethernet device is not a PF
+ * - (-ENOMEM) on an allocation failure
+ */
+int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *mac_addr,
+ uint32_t vf_id);
+
+/**
+ * Enable/Disable VF statistics retention
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param on
+ * 1 - Prevent VF statistics from automatically resetting
+ * 0 - Allow VF statistics to automatically reset
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on);
+#endif /* _PMD_BNXT_H_ */
diff --git a/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt_version.map b/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt_version.map
new file mode 100644
index 00000000..4750d40a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt_version.map
@@ -0,0 +1,22 @@
+DPDK_17.08 {
+ global:
+
+ rte_pmd_bnxt_get_vf_rx_status;
+ rte_pmd_bnxt_get_vf_stats;
+ rte_pmd_bnxt_get_vf_tx_drop_count;
+ rte_pmd_bnxt_mac_addr_add;
+ rte_pmd_bnxt_reset_vf_stats;
+ rte_pmd_bnxt_set_all_queues_drop_en;
+ rte_pmd_bnxt_set_tx_loopback;
+ rte_pmd_bnxt_set_vf_mac_addr;
+ rte_pmd_bnxt_set_vf_mac_anti_spoof;
+ rte_pmd_bnxt_set_vf_rate_limit;
+ rte_pmd_bnxt_set_vf_rxmode;
+ rte_pmd_bnxt_set_vf_vlan_anti_spoof;
+ rte_pmd_bnxt_set_vf_vlan_filter;
+ rte_pmd_bnxt_set_vf_vlan_insert;
+ rte_pmd_bnxt_set_vf_vlan_stripq;
+ rte_pmd_bnxt_set_vf_persist_stats;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/bonding/Makefile b/src/spdk/dpdk/drivers/net/bonding/Makefile
new file mode 100644
index 00000000..acad16a1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_bond.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cmdline
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
+
+EXPORT_MAP := rte_pmd_bond_version.map
+
+LIBABIVER := 2
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_args.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_8023ad.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_alb.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_flow.c
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_eth_bond.h
+SYMLINK-y-include += rte_eth_bond_8023ad.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/bonding/meson.build b/src/spdk/dpdk/drivers/net/bonding/meson.build
new file mode 100644
index 00000000..602d2880
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+name = 'bond' #, james bond :-)
+version = 2
+sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c', 'rte_eth_bond_flow.c',
+ 'rte_eth_bond_args.c', 'rte_eth_bond_8023ad.c', 'rte_eth_bond_alb.c')
+
+deps += 'sched' # needed for rte_bitmap.h
+deps += ['ip_frag', 'cmdline']
+
+install_headers('rte_eth_bond.h', 'rte_eth_bond_8023ad.h')
diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond.h b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond.h
new file mode 100644
index 00000000..b668ff9a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond.h
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef _RTE_ETH_BOND_H_
+#define _RTE_ETH_BOND_H_
+
+/**
+ * @file rte_eth_bond.h
+ *
+ * RTE Link Bonding Ethernet Device
+ * Link Bonding for 1GbE and 10GbE ports to allow the aggregation of multiple
+ * (slave) NICs into a single logical interface. The bonded device processes
+ * these interfaces based on the mode of operation specified and supported.
+ * This implementation supports 4 modes of operation round robin, active backup
+ * balance and broadcast. Providing redundant links, fault tolerance and/or
+ * load balancing of network ports
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_ether.h>
+
+/* Supported modes of operation of link bonding library */
+
+#define BONDING_MODE_ROUND_ROBIN (0)
+/**< Round Robin (Mode 0).
+ * In this mode all transmitted packets will be balanced equally across all
+ * active slaves of the bonded in a round robin fashion. */
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+/**< Active Backup (Mode 1).
+ * In this mode all packets transmitted will be transmitted on the primary
+ * slave until such point as the primary slave is no longer available and then
+ * transmitted packets will be sent on the next available slaves. The primary
+ * slave can be defined by the user but defaults to the first active slave
+ * available if not specified. */
+#define BONDING_MODE_BALANCE (2)
+/**< Balance (Mode 2).
+ * In this mode all packets transmitted will be balanced across the available
+ * slaves using one of three available transmit policies - l2, l2+3 or l3+4.
+ * See BALANCE_XMIT_POLICY macros definitions for further details on transmit
+ * policies. */
+#define BONDING_MODE_BROADCAST (3)
+/**< Broadcast (Mode 3).
+ * In this mode all transmitted packets will be transmitted on all available
+ * active slaves of the bonded. */
+#define BONDING_MODE_8023AD (4)
+/**< 802.3AD (Mode 4).
+ *
+ * This mode provides auto negotiation/configuration
+ * of peers and well as link status changes monitoring using out of band
+ * LACP (link aggregation control protocol) messages. For further details of
+ * LACP specification see the IEEE 802.3ad/802.1AX standards. It is also
+ * described here
+ * https://www.kernel.org/doc/Documentation/networking/bonding.txt.
+ *
+ * Important Usage Notes:
+ * - for LACP mode to work the rx/tx burst functions must be invoked
+ * at least once every 100ms, otherwise the out-of-band LACP messages will not
+ * be handled with the expected latency and this may cause the link status to be
+ * incorrectly marked as down or failure to correctly negotiate with peers.
+ * - For optimal performance during initial handshaking the array of mbufs provided
+ * to rx_burst should be at least 2 times the slave count size.
+ *
+ */
+#define BONDING_MODE_TLB (5)
+/**< Adaptive TLB (Mode 5)
+ * This mode provides an adaptive transmit load balancing. It dynamically
+ * changes the transmitting slave, according to the computed load. Statistics
+ * are collected in 100ms intervals and scheduled every 10ms */
+#define BONDING_MODE_ALB (6)
+/**< Adaptive Load Balancing (Mode 6)
+ * This mode includes adaptive TLB and receive load balancing (RLB). In RLB the
+ * bonding driver intercepts ARP replies send by local system and overwrites its
+ * source MAC address, so that different peers send data to the server on
+ * different slave interfaces. When local system sends ARP request, it saves IP
+ * information from it. When ARP reply from that peer is received, its MAC is
+ * stored, one of slave MACs assigned and ARP reply send to that peer.
+ */
+
+/* Balance Mode Transmit Policies */
+#define BALANCE_XMIT_POLICY_LAYER2 (0)
+/**< Layer 2 (Ethernet MAC) */
+#define BALANCE_XMIT_POLICY_LAYER23 (1)
+/**< Layer 2+3 (Ethernet MAC + IP Addresses) transmit load balancing */
+#define BALANCE_XMIT_POLICY_LAYER34 (2)
+/**< Layer 3+4 (IP Addresses + UDP Ports) transmit load balancing */
+
+/**
+ * Create a bonded rte_eth_dev device
+ *
+ * @param name Name of new link bonding device.
+ * @param mode Mode to initialize bonding device in.
+ * @param socket_id Socket Id on which to allocate eth_dev resources.
+ *
+ * @return
+ * Port Id of created rte_eth_dev on success, negative value otherwise
+ */
+int
+rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id);
+
+/**
+ * Free a bonded rte_eth_dev device
+ *
+ * @param name Name of the link bonding device.
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_free(const char *name);
+
+/**
+ * Add a rte_eth_dev device as a slave to the bonded device
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param slave_port_id Port ID of slave device.
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id);
+
+/**
+ * Remove a slave rte_eth_dev device from the bonded device
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param slave_port_id Port ID of slave device.
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id);
+
+/**
+ * Set link bonding mode of bonded device
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param mode Bonding mode to set
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode);
+
+/**
+ * Get link bonding mode of bonded device
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * link bonding mode on success, negative value otherwise
+ */
+int
+rte_eth_bond_mode_get(uint16_t bonded_port_id);
+
+/**
+ * Set slave rte_eth_dev as primary slave of bonded device
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param slave_port_id Port ID of slave device.
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id);
+
+/**
+ * Get primary slave of bonded device
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * Port Id of primary slave on success, -1 on failure
+ */
+int
+rte_eth_bond_primary_get(uint16_t bonded_port_id);
+
+/**
+ * Populate an array with list of the slaves port id's of the bonded device
+ *
+ * @param bonded_port_id Port ID of bonded eth_dev to interrogate
+ * @param slaves Array to be populated with the current active slaves
+ * @param len Length of slaves array
+ *
+ * @return
+ * Number of slaves associated with bonded device on success,
+ * negative value otherwise
+ */
+int
+rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
+ uint16_t len);
+
+/**
+ * Populate an array with list of the active slaves port id's of the bonded
+ * device.
+ *
+ * @param bonded_port_id Port ID of bonded eth_dev to interrogate
+ * @param slaves Array to be populated with the current active slaves
+ * @param len Length of slaves array
+ *
+ * @return
+ * Number of active slaves associated with bonded device on success,
+ * negative value otherwise
+ */
+int
+rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
+ uint16_t len);
+
+/**
+ * Set explicit MAC address to use on bonded device and it's slaves.
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param mac_addr MAC Address to use on bonded device overriding
+ * slaves MAC addresses
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_mac_address_set(uint16_t bonded_port_id,
+ struct ether_addr *mac_addr);
+
+/**
+ * Reset bonded device to use MAC from primary slave on bonded device and it's
+ * slaves.
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_mac_address_reset(uint16_t bonded_port_id);
+
+/**
+ * Set the transmit policy for bonded device to use when it is operating in
+ * balance mode, this parameter is otherwise ignored in other modes of
+ * operation.
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param policy Balance mode transmission policy.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy);
+
+/**
+ * Get the transmit policy set on bonded device for balance mode operation
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * Balance transmit policy on success, negative value otherwise.
+ */
+int
+rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id);
+
+/**
+ * Set the link monitoring frequency (in ms) for monitoring the link status of
+ * slave devices
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param internal_ms Monitoring interval in milliseconds
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+
+int
+rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms);
+
+/**
+ * Get the current link monitoring frequency (in ms) for monitoring of the link
+ * status of slave devices
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * Monitoring interval on success, negative value otherwise.
+ */
+int
+rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id);
+
+
+/**
+ * Set the period in milliseconds for delaying the disabling of a bonded link
+ * when the link down status has been detected
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param delay_ms Delay period in milliseconds.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
+ uint32_t delay_ms);
+
+/**
+ * Get the period in milliseconds set for delaying the disabling of a bonded
+ * link when the link down status has been detected
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * Delay period on success, negative value otherwise.
+ */
+int
+rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id);
+
+/**
+ * Set the period in milliseconds for delaying the enabling of a bonded link
+ * when the link up status has been detected
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param delay_ms Delay period in milliseconds.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id,
+ uint32_t delay_ms);
+
+/**
+ * Get the period in milliseconds set for delaying the enabling of a bonded
+ * link when the link up status has been detected
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * Delay period on success, negative value otherwise.
+ */
+int
+rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c
new file mode 100644
index 00000000..f8cea4b6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -0,0 +1,1614 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include <stddef.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include <rte_alarm.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+#include <rte_compat.h>
+
+#include "rte_eth_bond_private.h"
+
+static void bond_mode_8023ad_ext_periodic_cb(void *arg);
+#ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
+
+#define MODE4_DEBUG(fmt, ...) \
+ rte_log(RTE_LOG_DEBUG, bond_logtype, \
+ "%6u [Port %u: %s] " fmt, \
+ bond_dbg_get_time_diff_ms(), slave_id, \
+ __func__, ##__VA_ARGS__)
+
+static uint64_t start_time;
+
+static unsigned
+bond_dbg_get_time_diff_ms(void)
+{
+ uint64_t now;
+
+ now = rte_rdtsc();
+ if (start_time == 0)
+ start_time = now;
+
+ return ((now - start_time) * 1000) / rte_get_tsc_hz();
+}
+
+static void
+bond_print_lacp(struct lacpdu *l)
+{
+ char a_address[18];
+ char p_address[18];
+ char a_state[256] = { 0 };
+ char p_state[256] = { 0 };
+
+ static const char * const state_labels[] = {
+ "ACT", "TIMEOUT", "AGG", "SYNC", "COL", "DIST", "DEF", "EXP"
+ };
+
+ int a_len = 0;
+ int p_len = 0;
+ uint8_t i;
+ uint8_t *addr;
+
+ addr = l->actor.port_params.system.addr_bytes;
+ snprintf(a_address, sizeof(a_address), "%02X:%02X:%02X:%02X:%02X:%02X",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ addr = l->partner.port_params.system.addr_bytes;
+ snprintf(p_address, sizeof(p_address), "%02X:%02X:%02X:%02X:%02X:%02X",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ for (i = 0; i < 8; i++) {
+ if ((l->actor.state >> i) & 1) {
+ a_len += snprintf(&a_state[a_len], RTE_DIM(a_state) - a_len, "%s ",
+ state_labels[i]);
+ }
+
+ if ((l->partner.state >> i) & 1) {
+ p_len += snprintf(&p_state[p_len], RTE_DIM(p_state) - p_len, "%s ",
+ state_labels[i]);
+ }
+ }
+
+ if (a_len && a_state[a_len-1] == ' ')
+ a_state[a_len-1] = '\0';
+
+ if (p_len && p_state[p_len-1] == ' ')
+ p_state[p_len-1] = '\0';
+
+ RTE_BOND_LOG(DEBUG,
+ "LACP: {\n"
+ " subtype= %02X\n"
+ " ver_num=%02X\n"
+ " actor={ tlv=%02X, len=%02X\n"
+ " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"
+ " state={ %s }\n"
+ " }\n"
+ " partner={ tlv=%02X, len=%02X\n"
+ " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"
+ " state={ %s }\n"
+ " }\n"
+ " collector={info=%02X, length=%02X, max_delay=%04X\n, "
+ "type_term=%02X, terminator_length = %02X }",
+ l->subtype,
+ l->version_number,
+ l->actor.tlv_type_info,
+ l->actor.info_length,
+ l->actor.port_params.system_priority,
+ a_address,
+ l->actor.port_params.key,
+ l->actor.port_params.port_priority,
+ l->actor.port_params.port_number,
+ a_state,
+ l->partner.tlv_type_info,
+ l->partner.info_length,
+ l->partner.port_params.system_priority,
+ p_address,
+ l->partner.port_params.key,
+ l->partner.port_params.port_priority,
+ l->partner.port_params.port_number,
+ p_state,
+ l->tlv_type_collector_info,
+ l->collector_info_length,
+ l->collector_max_delay,
+ l->tlv_type_terminator,
+ l->terminator_length);
+
+}
+
+#define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu)
+#else
+#define BOND_PRINT_LACP(lacpdu) do { } while (0)
+#define MODE4_DEBUG(fmt, ...) do { } while (0)
+#endif
+
+static const struct ether_addr lacp_mac_addr = {
+ .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }
+};
+
+struct port mode_8023ad_ports[RTE_MAX_ETHPORTS];
+
+static void
+timer_cancel(uint64_t *timer)
+{
+ *timer = 0;
+}
+
+static void
+timer_set(uint64_t *timer, uint64_t timeout)
+{
+ *timer = rte_rdtsc() + timeout;
+}
+
+/* Forces given timer to be in expired state. */
+static void
+timer_force_expired(uint64_t *timer)
+{
+ *timer = rte_rdtsc();
+}
+
+static bool
+timer_is_stopped(uint64_t *timer)
+{
+ return *timer == 0;
+}
+
+static bool
+timer_is_expired(uint64_t *timer)
+{
+ return *timer < rte_rdtsc();
+}
+
+/* Timer is in running state if it is not stopped nor expired */
+static bool
+timer_is_running(uint64_t *timer)
+{
+ return !timer_is_stopped(timer) && !timer_is_expired(timer);
+}
+
+static void
+set_warning_flags(struct port *port, uint16_t flags)
+{
+ int retval;
+ uint16_t old;
+ uint16_t new_flag = 0;
+
+ do {
+ old = port->warnings_to_show;
+ new_flag = old | flags;
+ retval = rte_atomic16_cmpset(&port->warnings_to_show, old, new_flag);
+ } while (unlikely(retval == 0));
+}
+
+static void
+show_warnings(uint16_t slave_id)
+{
+ struct port *port = &mode_8023ad_ports[slave_id];
+ uint8_t warnings;
+
+ do {
+ warnings = port->warnings_to_show;
+ } while (rte_atomic16_cmpset(&port->warnings_to_show, warnings, 0) == 0);
+
+ if (!warnings)
+ return;
+
+ if (!timer_is_expired(&port->warning_timer))
+ return;
+
+
+ timer_set(&port->warning_timer, BOND_8023AD_WARNINGS_PERIOD_MS *
+ rte_get_tsc_hz() / 1000);
+
+ if (warnings & WRN_RX_QUEUE_FULL) {
+ RTE_BOND_LOG(DEBUG,
+ "Slave %u: failed to enqueue LACP packet into RX ring.\n"
+ "Receive and transmit functions must be invoked on bonded"
+ "interface at least 10 times per second or LACP will notwork correctly",
+ slave_id);
+ }
+
+ if (warnings & WRN_TX_QUEUE_FULL) {
+ RTE_BOND_LOG(DEBUG,
+ "Slave %u: failed to enqueue LACP packet into TX ring.\n"
+ "Receive and transmit functions must be invoked on bonded"
+ "interface at least 10 times per second or LACP will not work correctly",
+ slave_id);
+ }
+
+ if (warnings & WRN_RX_MARKER_TO_FAST)
+ RTE_BOND_LOG(INFO, "Slave %u: marker to early - ignoring.",
+ slave_id);
+
+ if (warnings & WRN_UNKNOWN_SLOW_TYPE) {
+ RTE_BOND_LOG(INFO,
+ "Slave %u: ignoring unknown slow protocol frame type",
+ slave_id);
+ }
+
+ if (warnings & WRN_UNKNOWN_MARKER_TYPE)
+ RTE_BOND_LOG(INFO, "Slave %u: ignoring unknown marker type",
+ slave_id);
+
+ if (warnings & WRN_NOT_LACP_CAPABLE)
+ MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id);
+}
+
+static void
+record_default(struct port *port)
+{
+ /* Record default parameters for partner. Partner admin parameters
+ * are not implemented so set them to arbitrary default (last known) and
+ * mark actor that parner is in defaulted state. */
+ port->partner_state = STATE_LACP_ACTIVE;
+ ACTOR_STATE_SET(port, DEFAULTED);
+}
+
+/** Function handles rx state machine.
+ *
+ * This function implements Receive State Machine from point 5.4.12 in
+ * 802.1AX documentation. It should be called periodically.
+ *
+ * @param lacpdu LACPDU received.
+ * @param port Port on which LACPDU was received.
+ */
+static void
+rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
+ struct lacpdu *lacp)
+{
+ struct port *agg, *port = &mode_8023ad_ports[slave_id];
+ uint64_t timeout;
+
+ if (SM_FLAG(port, BEGIN)) {
+ /* Initialize stuff */
+ MODE4_DEBUG("-> INITIALIZE\n");
+ SM_FLAG_CLR(port, MOVED);
+ port->selected = UNSELECTED;
+
+ record_default(port);
+
+ ACTOR_STATE_CLR(port, EXPIRED);
+ timer_cancel(&port->current_while_timer);
+
+ /* DISABLED: On initialization partner is out of sync */
+ PARTNER_STATE_CLR(port, SYNCHRONIZATION);
+
+ /* LACP DISABLED stuff if LACP not enabled on this port */
+ if (!SM_FLAG(port, LACP_ENABLED))
+ PARTNER_STATE_CLR(port, AGGREGATION);
+ else
+ PARTNER_STATE_SET(port, AGGREGATION);
+ }
+
+ if (!SM_FLAG(port, LACP_ENABLED)) {
+ /* Update parameters only if state changed */
+ if (!timer_is_stopped(&port->current_while_timer)) {
+ port->selected = UNSELECTED;
+ record_default(port);
+ PARTNER_STATE_CLR(port, AGGREGATION);
+ ACTOR_STATE_CLR(port, EXPIRED);
+ timer_cancel(&port->current_while_timer);
+ }
+ return;
+ }
+
+ if (lacp) {
+ MODE4_DEBUG("LACP -> CURRENT\n");
+ BOND_PRINT_LACP(lacp);
+ /* Update selected flag. If partner parameters are defaulted assume they
+ * are match. If not defaulted compare LACP actor with ports parner
+ * params. */
+ if (!ACTOR_STATE(port, DEFAULTED) &&
+ (ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)
+ || memcmp(&port->partner, &lacp->actor.port_params,
+ sizeof(port->partner)) != 0)) {
+ MODE4_DEBUG("selected <- UNSELECTED\n");
+ port->selected = UNSELECTED;
+ }
+
+ /* Record this PDU actor params as partner params */
+ memcpy(&port->partner, &lacp->actor.port_params,
+ sizeof(struct port_params));
+ port->partner_state = lacp->actor.state;
+
+ /* Partner parameters are not defaulted any more */
+ ACTOR_STATE_CLR(port, DEFAULTED);
+
+ /* If LACP partner params match this port actor params */
+ agg = &mode_8023ad_ports[port->aggregator_port_id];
+ bool match = port->actor.system_priority ==
+ lacp->partner.port_params.system_priority &&
+ is_same_ether_addr(&agg->actor.system,
+ &lacp->partner.port_params.system) &&
+ port->actor.port_priority ==
+ lacp->partner.port_params.port_priority &&
+ port->actor.port_number ==
+ lacp->partner.port_params.port_number;
+
+ /* Update NTT if partners information are outdated (xored and masked
+ * bits are set)*/
+ uint8_t state_mask = STATE_LACP_ACTIVE | STATE_LACP_SHORT_TIMEOUT |
+ STATE_SYNCHRONIZATION | STATE_AGGREGATION;
+
+ if (((port->actor_state ^ lacp->partner.state) & state_mask) ||
+ match == false) {
+ SM_FLAG_SET(port, NTT);
+ }
+
+ /* If LACP partner params match this port actor params */
+ if (match == true && ACTOR_STATE(port, AGGREGATION) ==
+ PARTNER_STATE(port, AGGREGATION))
+ PARTNER_STATE_SET(port, SYNCHRONIZATION);
+ else if (!PARTNER_STATE(port, AGGREGATION) && ACTOR_STATE(port,
+ AGGREGATION))
+ PARTNER_STATE_SET(port, SYNCHRONIZATION);
+ else
+ PARTNER_STATE_CLR(port, SYNCHRONIZATION);
+
+ if (ACTOR_STATE(port, LACP_SHORT_TIMEOUT))
+ timeout = internals->mode4.short_timeout;
+ else
+ timeout = internals->mode4.long_timeout;
+
+ timer_set(&port->current_while_timer, timeout);
+ ACTOR_STATE_CLR(port, EXPIRED);
+ return; /* No state change */
+ }
+
+ /* If CURRENT state timer is not running (stopped or expired)
+ * transit to EXPIRED state from DISABLED or CURRENT */
+ if (!timer_is_running(&port->current_while_timer)) {
+ ACTOR_STATE_SET(port, EXPIRED);
+ PARTNER_STATE_CLR(port, SYNCHRONIZATION);
+ PARTNER_STATE_SET(port, LACP_SHORT_TIMEOUT);
+ timer_set(&port->current_while_timer, internals->mode4.short_timeout);
+ }
+}
+
+/**
+ * Function handles periodic tx state machine.
+ *
+ * Function implements Periodic Transmission state machine from point 5.4.13
+ * in 802.1AX documentation. It should be called periodically.
+ *
+ * @param port Port to handle state machine.
+ */
+static void
+periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)
+{
+ struct port *port = &mode_8023ad_ports[slave_id];
+ /* Calculate if either site is LACP enabled */
+ uint64_t timeout;
+ uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) ||
+ PARTNER_STATE(port, LACP_ACTIVE);
+
+ uint8_t is_partner_fast, was_partner_fast;
+ /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */
+ if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {
+ timer_cancel(&port->periodic_timer);
+ timer_force_expired(&port->tx_machine_timer);
+ SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
+
+ MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n",
+ SM_FLAG(port, BEGIN) ? "begind " : "",
+ SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ",
+ active ? "LACP active " : "LACP pasive ");
+ return;
+ }
+
+ is_partner_fast = PARTNER_STATE(port, LACP_SHORT_TIMEOUT);
+ was_partner_fast = SM_FLAG(port, PARTNER_SHORT_TIMEOUT);
+
+ /* If periodic timer is not started, transit from NO PERIODIC to FAST/SLOW.
+ * Other case: check if timer expire or partners settings changed. */
+ if (!timer_is_stopped(&port->periodic_timer)) {
+ if (timer_is_expired(&port->periodic_timer)) {
+ SM_FLAG_SET(port, NTT);
+ } else if (is_partner_fast != was_partner_fast) {
+ /* Partners timeout was slow and now it is fast -> send LACP.
+ * In other case (was fast and now it is slow) just switch
+ * timeout to slow without forcing send of LACP (because standard
+ * say so)*/
+ if (is_partner_fast)
+ SM_FLAG_SET(port, NTT);
+ } else
+ return; /* Nothing changed */
+ }
+
+ /* Handle state transition to FAST/SLOW LACP timeout */
+ if (is_partner_fast) {
+ timeout = internals->mode4.fast_periodic_timeout;
+ SM_FLAG_SET(port, PARTNER_SHORT_TIMEOUT);
+ } else {
+ timeout = internals->mode4.slow_periodic_timeout;
+ SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
+ }
+
+ timer_set(&port->periodic_timer, timeout);
+}
+
+/**
+ * Function handles mux state machine.
+ *
+ * Function implements Mux Machine from point 5.4.15 in 802.1AX documentation.
+ * It should be called periodically.
+ *
+ * @param port Port to handle state machine.
+ */
+static void
+mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
+{
+ struct port *port = &mode_8023ad_ports[slave_id];
+
+ /* Save current state for later use */
+ const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
+ STATE_COLLECTING;
+
+ /* Enter DETACHED state on BEGIN condition or from any other state if
+ * port was unselected */
+ if (SM_FLAG(port, BEGIN) ||
+ port->selected == UNSELECTED || (port->selected == STANDBY &&
+ (port->actor_state & state_mask) != 0)) {
+ /* detach mux from aggregator */
+ port->actor_state &= ~state_mask;
+ /* Set ntt to true if BEGIN condition or transition from any other state
+ * which is indicated that wait_while_timer was started */
+ if (SM_FLAG(port, BEGIN) ||
+ !timer_is_stopped(&port->wait_while_timer)) {
+ SM_FLAG_SET(port, NTT);
+ MODE4_DEBUG("-> DETACHED\n");
+ }
+ timer_cancel(&port->wait_while_timer);
+ }
+
+ if (timer_is_stopped(&port->wait_while_timer)) {
+ if (port->selected == SELECTED || port->selected == STANDBY) {
+ timer_set(&port->wait_while_timer,
+ internals->mode4.aggregate_wait_timeout);
+
+ MODE4_DEBUG("DETACHED -> WAITING\n");
+ }
+ /* Waiting state entered */
+ return;
+ }
+
+ /* Transit next state if port is ready */
+ if (!timer_is_expired(&port->wait_while_timer))
+ return;
+
+ if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&
+ !PARTNER_STATE(port, SYNCHRONIZATION)) {
+ /* If in COLLECTING or DISTRIBUTING state and partner becomes out of
+ * sync transit to ATACHED state. */
+ ACTOR_STATE_CLR(port, DISTRIBUTING);
+ ACTOR_STATE_CLR(port, COLLECTING);
+ /* Clear actor sync to activate transit ATACHED in condition bellow */
+ ACTOR_STATE_CLR(port, SYNCHRONIZATION);
+ MODE4_DEBUG("Out of sync -> ATTACHED\n");
+ }
+
+ if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
+ /* attach mux to aggregator */
+ RTE_ASSERT((port->actor_state & (STATE_COLLECTING |
+ STATE_DISTRIBUTING)) == 0);
+
+ ACTOR_STATE_SET(port, SYNCHRONIZATION);
+ SM_FLAG_SET(port, NTT);
+ MODE4_DEBUG("ATTACHED Entered\n");
+ } else if (!ACTOR_STATE(port, COLLECTING)) {
+ /* Start collecting if in sync */
+ if (PARTNER_STATE(port, SYNCHRONIZATION)) {
+ MODE4_DEBUG("ATTACHED -> COLLECTING\n");
+ ACTOR_STATE_SET(port, COLLECTING);
+ SM_FLAG_SET(port, NTT);
+ }
+ } else if (ACTOR_STATE(port, COLLECTING)) {
+ /* Check if partner is in COLLECTING state. If so this port can
+ * distribute frames to it */
+ if (!ACTOR_STATE(port, DISTRIBUTING)) {
+ if (PARTNER_STATE(port, COLLECTING)) {
+ /* Enable DISTRIBUTING if partner is collecting */
+ ACTOR_STATE_SET(port, DISTRIBUTING);
+ SM_FLAG_SET(port, NTT);
+ MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n");
+ RTE_BOND_LOG(INFO,
+ "Bond %u: slave id %u distributing started.",
+ internals->port_id, slave_id);
+ }
+ } else {
+ if (!PARTNER_STATE(port, COLLECTING)) {
+ /* Disable DISTRIBUTING (enter COLLECTING state) if partner
+ * is not collecting */
+ ACTOR_STATE_CLR(port, DISTRIBUTING);
+ SM_FLAG_SET(port, NTT);
+ MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n");
+ RTE_BOND_LOG(INFO,
+ "Bond %u: slave id %u distributing stopped.",
+ internals->port_id, slave_id);
+ }
+ }
+ }
+}
+
+/**
+ * Function handles transmit state machine.
+ *
+ * Function implements Transmit Machine from point 5.4.16 in 802.1AX
+ * documentation.
+ *
+ * @param port
+ */
+static void
+tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
+{
+ struct port *agg, *port = &mode_8023ad_ports[slave_id];
+
+ struct rte_mbuf *lacp_pkt = NULL;
+ struct lacpdu_header *hdr;
+ struct lacpdu *lacpdu;
+
+ /* If periodic timer is not running periodic machine is in NO PERIODIC and
+ * according to 802.3ax standard tx machine should not transmit any frames
+ * and set ntt to false. */
+ if (timer_is_stopped(&port->periodic_timer))
+ SM_FLAG_CLR(port, NTT);
+
+ if (!SM_FLAG(port, NTT))
+ return;
+
+ if (!timer_is_expired(&port->tx_machine_timer))
+ return;
+
+ lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool);
+ if (lacp_pkt == NULL) {
+ RTE_BOND_LOG(ERR, "Failed to allocate LACP packet from pool");
+ return;
+ }
+
+ lacp_pkt->data_len = sizeof(*hdr);
+ lacp_pkt->pkt_len = sizeof(*hdr);
+
+ hdr = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
+
+ /* Source and destination MAC */
+ ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr);
+ rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr);
+ hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);
+
+ lacpdu = &hdr->lacpdu;
+ memset(lacpdu, 0, sizeof(*lacpdu));
+
+ /* Initialize LACP part */
+ lacpdu->subtype = SLOW_SUBTYPE_LACP;
+ lacpdu->version_number = 1;
+
+ /* ACTOR */
+ lacpdu->actor.tlv_type_info = TLV_TYPE_ACTOR_INFORMATION;
+ lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params);
+ memcpy(&hdr->lacpdu.actor.port_params, &port->actor,
+ sizeof(port->actor));
+ agg = &mode_8023ad_ports[port->aggregator_port_id];
+ ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system);
+ lacpdu->actor.state = port->actor_state;
+
+ /* PARTNER */
+ lacpdu->partner.tlv_type_info = TLV_TYPE_PARTNER_INFORMATION;
+ lacpdu->partner.info_length = sizeof(struct lacpdu_actor_partner_params);
+ memcpy(&lacpdu->partner.port_params, &port->partner,
+ sizeof(struct port_params));
+ lacpdu->partner.state = port->partner_state;
+
+ /* Other fields */
+ lacpdu->tlv_type_collector_info = TLV_TYPE_COLLECTOR_INFORMATION;
+ lacpdu->collector_info_length = 0x10;
+ lacpdu->collector_max_delay = 0;
+
+ lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
+ lacpdu->terminator_length = 0;
+
+ MODE4_DEBUG("Sending LACP frame\n");
+ BOND_PRINT_LACP(lacpdu);
+
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ int retval = rte_ring_enqueue(port->tx_ring, lacp_pkt);
+ if (retval != 0) {
+ /* If TX ring full, drop packet and free message.
+ Retransmission will happen in next function call. */
+ rte_pktmbuf_free(lacp_pkt);
+ set_warning_flags(port, WRN_TX_QUEUE_FULL);
+ return;
+ }
+ } else {
+ uint16_t pkts_sent = rte_eth_tx_burst(slave_id,
+ internals->mode4.dedicated_queues.tx_qid,
+ &lacp_pkt, 1);
+ if (pkts_sent != 1) {
+ rte_pktmbuf_free(lacp_pkt);
+ set_warning_flags(port, WRN_TX_QUEUE_FULL);
+ return;
+ }
+ }
+
+
+ timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout);
+ SM_FLAG_CLR(port, NTT);
+}
+
+static uint8_t
+max_index(uint64_t *a, int n)
+{
+ if (n <= 0)
+ return -1;
+
+ int i, max_i = 0;
+ uint64_t max = a[0];
+
+ for (i = 1; i < n; ++i) {
+ if (a[i] > max) {
+ max = a[i];
+ max_i = i;
+ }
+ }
+
+ return max_i;
+}
+
+/**
+ * Function assigns port to aggregator.
+ *
+ * @param bond_dev_private Pointer to bond_dev_private structure.
+ * @param port_pos Port to assign.
+ */
+static void
+selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
+{
+ struct port *agg, *port;
+ uint16_t slaves_count, new_agg_id, i, j = 0;
+ uint16_t *slaves;
+ uint64_t agg_bandwidth[8] = {0};
+ uint64_t agg_count[8] = {0};
+ uint16_t default_slave = 0;
+ uint8_t mode_count_id, mode_band_id;
+ struct rte_eth_link link_info;
+
+ slaves = internals->active_slaves;
+ slaves_count = internals->active_slave_count;
+ port = &mode_8023ad_ports[slave_id];
+
+ /* Search for aggregator suitable for this port */
+ for (i = 0; i < slaves_count; ++i) {
+ agg = &mode_8023ad_ports[slaves[i]];
+ /* Skip ports that are not aggreagators */
+ if (agg->aggregator_port_id != slaves[i])
+ continue;
+
+ agg_count[agg->aggregator_port_id] += 1;
+ rte_eth_link_get_nowait(slaves[i], &link_info);
+ agg_bandwidth[agg->aggregator_port_id] += link_info.link_speed;
+
+ /* Actors system ID is not checked since all slave device have the same
+ * ID (MAC address). */
+ if ((agg->actor.key == port->actor.key &&
+ agg->partner.system_priority == port->partner.system_priority &&
+ is_same_ether_addr(&agg->partner.system, &port->partner.system) == 1
+ && (agg->partner.key == port->partner.key)) &&
+ is_zero_ether_addr(&port->partner.system) != 1 &&
+ (agg->actor.key &
+ rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) {
+
+ if (j == 0)
+ default_slave = i;
+ j++;
+ }
+ }
+
+ switch (internals->mode4.agg_selection) {
+ case AGG_COUNT:
+ mode_count_id = max_index(
+ (uint64_t *)agg_count, slaves_count);
+ new_agg_id = mode_count_id;
+ break;
+ case AGG_BANDWIDTH:
+ mode_band_id = max_index(
+ (uint64_t *)agg_bandwidth, slaves_count);
+ new_agg_id = mode_band_id;
+ break;
+ case AGG_STABLE:
+ if (default_slave == slaves_count)
+ new_agg_id = slave_id;
+ else
+ new_agg_id = slaves[default_slave];
+ break;
+ default:
+ if (default_slave == slaves_count)
+ new_agg_id = slave_id;
+ else
+ new_agg_id = slaves[default_slave];
+ break;
+ }
+
+ if (new_agg_id != port->aggregator_port_id) {
+ port->aggregator_port_id = new_agg_id;
+
+ MODE4_DEBUG("-> SELECTED: ID=%3u\n"
+ "\t%s aggregator ID=%3u\n",
+ port->aggregator_port_id,
+ port->aggregator_port_id == slave_id ?
+ "aggregator not found, using default" : "aggregator found",
+ port->aggregator_port_id);
+ }
+
+ port->selected = SELECTED;
+}
+
+/* Function maps DPDK speed to bonding speed stored in key field */
+static uint16_t
+link_speed_key(uint16_t speed) {
+ uint16_t key_speed;
+
+ switch (speed) {
+ case ETH_SPEED_NUM_NONE:
+ key_speed = 0x00;
+ break;
+ case ETH_SPEED_NUM_10M:
+ key_speed = BOND_LINK_SPEED_KEY_10M;
+ break;
+ case ETH_SPEED_NUM_100M:
+ key_speed = BOND_LINK_SPEED_KEY_100M;
+ break;
+ case ETH_SPEED_NUM_1G:
+ key_speed = BOND_LINK_SPEED_KEY_1000M;
+ break;
+ case ETH_SPEED_NUM_10G:
+ key_speed = BOND_LINK_SPEED_KEY_10G;
+ break;
+ case ETH_SPEED_NUM_20G:
+ key_speed = BOND_LINK_SPEED_KEY_20G;
+ break;
+ case ETH_SPEED_NUM_40G:
+ key_speed = BOND_LINK_SPEED_KEY_40G;
+ break;
+ default:
+ /* Unknown speed*/
+ key_speed = 0xFFFF;
+ }
+
+ return key_speed;
+}
+
+static void
+rx_machine_update(struct bond_dev_private *internals, uint8_t slave_id,
+ struct rte_mbuf *lacp_pkt) {
+ struct lacpdu_header *lacp;
+
+ if (lacp_pkt != NULL) {
+ lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
+ RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+
+ /* This is LACP frame so pass it to rx_machine */
+ rx_machine(internals, slave_id, &lacp->lacpdu);
+ rte_pktmbuf_free(lacp_pkt);
+ } else
+ rx_machine(internals, slave_id, NULL);
+}
+
+static void
+bond_mode_8023ad_periodic_cb(void *arg)
+{
+ struct rte_eth_dev *bond_dev = arg;
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ struct port *port;
+ struct rte_eth_link link_info;
+ struct ether_addr slave_addr;
+ struct rte_mbuf *lacp_pkt = NULL;
+
+ uint8_t i, slave_id;
+
+
+ /* Update link status on each port */
+ for (i = 0; i < internals->active_slave_count; i++) {
+ uint16_t key;
+
+ slave_id = internals->active_slaves[i];
+ rte_eth_link_get_nowait(slave_id, &link_info);
+ rte_eth_macaddr_get(slave_id, &slave_addr);
+
+ if (link_info.link_status != 0) {
+ key = link_speed_key(link_info.link_speed) << 1;
+ if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
+ key |= BOND_LINK_FULL_DUPLEX_KEY;
+ } else
+ key = 0;
+
+ port = &mode_8023ad_ports[slave_id];
+
+ key = rte_cpu_to_be_16(key);
+ if (key != port->actor.key) {
+ if (!(key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)))
+ set_warning_flags(port, WRN_NOT_LACP_CAPABLE);
+
+ port->actor.key = key;
+ SM_FLAG_SET(port, NTT);
+ }
+
+ if (!is_same_ether_addr(&port->actor.system, &slave_addr)) {
+ ether_addr_copy(&slave_addr, &port->actor.system);
+ if (port->aggregator_port_id == slave_id)
+ SM_FLAG_SET(port, NTT);
+ }
+ }
+
+ for (i = 0; i < internals->active_slave_count; i++) {
+ slave_id = internals->active_slaves[i];
+ port = &mode_8023ad_ports[slave_id];
+
+ if ((port->actor.key &
+ rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) {
+
+ SM_FLAG_SET(port, BEGIN);
+
+ /* LACP is disabled on half duples or link is down */
+ if (SM_FLAG(port, LACP_ENABLED)) {
+ /* If port was enabled set it to BEGIN state */
+ SM_FLAG_CLR(port, LACP_ENABLED);
+ ACTOR_STATE_CLR(port, DISTRIBUTING);
+ ACTOR_STATE_CLR(port, COLLECTING);
+ }
+
+ /* Skip this port processing */
+ continue;
+ }
+
+ SM_FLAG_SET(port, LACP_ENABLED);
+
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ /* Find LACP packet to this port. Do not check subtype,
+ * it is done in function that queued packet
+ */
+ int retval = rte_ring_dequeue(port->rx_ring,
+ (void **)&lacp_pkt);
+
+ if (retval != 0)
+ lacp_pkt = NULL;
+
+ rx_machine_update(internals, slave_id, lacp_pkt);
+ } else {
+ uint16_t rx_count = rte_eth_rx_burst(slave_id,
+ internals->mode4.dedicated_queues.rx_qid,
+ &lacp_pkt, 1);
+
+ if (rx_count == 1)
+ bond_mode_8023ad_handle_slow_pkt(internals,
+ slave_id, lacp_pkt);
+ else
+ rx_machine_update(internals, slave_id, NULL);
+ }
+
+ periodic_machine(internals, slave_id);
+ mux_machine(internals, slave_id);
+ tx_machine(internals, slave_id);
+ selection_logic(internals, slave_id);
+
+ SM_FLAG_CLR(port, BEGIN);
+ show_warnings(slave_id);
+ }
+
+ rte_eal_alarm_set(internals->mode4.update_timeout_us,
+ bond_mode_8023ad_periodic_cb, arg);
+}
+
+void
+bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev,
+ uint16_t slave_id)
+{
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+
+ struct port *port = &mode_8023ad_ports[slave_id];
+ struct port_params initial = {
+ .system = { { 0 } },
+ .system_priority = rte_cpu_to_be_16(0xFFFF),
+ .key = rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY),
+ .port_priority = rte_cpu_to_be_16(0x00FF),
+ .port_number = 0,
+ };
+
+ char mem_name[RTE_ETH_NAME_MAX_LEN];
+ int socket_id;
+ unsigned element_size;
+ uint32_t total_tx_desc;
+ struct bond_tx_queue *bd_tx_q;
+ uint16_t q_id;
+
+ /* Given slave mus not be in active list */
+ RTE_ASSERT(find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, slave_id) == internals->active_slave_count);
+ RTE_SET_USED(internals); /* used only for assert when enabled */
+
+ memcpy(&port->actor, &initial, sizeof(struct port_params));
+ /* Standard requires that port ID must be grater than 0.
+ * Add 1 do get corresponding port_number */
+ port->actor.port_number = rte_cpu_to_be_16(slave_id + 1);
+
+ memcpy(&port->partner, &initial, sizeof(struct port_params));
+
+ /* default states */
+ port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED;
+ port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION;
+ port->sm_flags = SM_FLAGS_BEGIN;
+
+ /* use this port as agregator */
+ port->aggregator_port_id = slave_id;
+ rte_eth_promiscuous_enable(slave_id);
+
+ timer_cancel(&port->warning_timer);
+
+ if (port->mbuf_pool != NULL)
+ return;
+
+ RTE_ASSERT(port->rx_ring == NULL);
+ RTE_ASSERT(port->tx_ring == NULL);
+
+ socket_id = rte_eth_dev_socket_id(slave_id);
+ if (socket_id == (int)LCORE_ID_ANY)
+ socket_id = rte_socket_id();
+
+ element_size = sizeof(struct slow_protocol_frame) +
+ RTE_PKTMBUF_HEADROOM;
+
+ /* The size of the mempool should be at least:
+ * the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */
+ total_tx_desc = BOND_MODE_8023AX_SLAVE_TX_PKTS;
+ for (q_id = 0; q_id < bond_dev->data->nb_tx_queues; q_id++) {
+ bd_tx_q = (struct bond_tx_queue*)bond_dev->data->tx_queues[q_id];
+ total_tx_desc += bd_tx_q->nb_tx_desc;
+ }
+
+ snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id);
+ port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc,
+ RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
+ 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
+ 0, element_size, socket_id);
+
+ /* Any memory allocation failure in initialization is critical because
+ * resources can't be free, so reinitialization is impossible. */
+ if (port->mbuf_pool == NULL) {
+ rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
+ slave_id, mem_name, rte_strerror(rte_errno));
+ }
+
+ snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id);
+ port->rx_ring = rte_ring_create(mem_name,
+ rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0);
+
+ if (port->rx_ring == NULL) {
+ rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id,
+ mem_name, rte_strerror(rte_errno));
+ }
+
+ /* TX ring is at least one pkt longer to make room for marker packet. */
+ snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id);
+ port->tx_ring = rte_ring_create(mem_name,
+ rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0);
+
+ if (port->tx_ring == NULL) {
+ rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id,
+ mem_name, rte_strerror(rte_errno));
+ }
+}
+
+int
+bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused,
+ uint16_t slave_id)
+{
+ void *pkt = NULL;
+ struct port *port = NULL;
+ uint8_t old_partner_state;
+
+ port = &mode_8023ad_ports[slave_id];
+
+ ACTOR_STATE_CLR(port, AGGREGATION);
+ port->selected = UNSELECTED;
+
+ old_partner_state = port->partner_state;
+ record_default(port);
+
+ /* If partner timeout state changes then disable timer */
+ if (!((old_partner_state ^ port->partner_state) &
+ STATE_LACP_SHORT_TIMEOUT))
+ timer_cancel(&port->current_while_timer);
+
+ PARTNER_STATE_CLR(port, AGGREGATION);
+ ACTOR_STATE_CLR(port, EXPIRED);
+
+ /* flush rx/tx rings */
+ while (rte_ring_dequeue(port->rx_ring, &pkt) == 0)
+ rte_pktmbuf_free((struct rte_mbuf *)pkt);
+
+ while (rte_ring_dequeue(port->tx_ring, &pkt) == 0)
+ rte_pktmbuf_free((struct rte_mbuf *)pkt);
+ return 0;
+}
+
+void
+bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
+{
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ struct ether_addr slave_addr;
+ struct port *slave, *agg_slave;
+ uint16_t slave_id, i, j;
+
+ bond_mode_8023ad_stop(bond_dev);
+
+ for (i = 0; i < internals->active_slave_count; i++) {
+ slave_id = internals->active_slaves[i];
+ slave = &mode_8023ad_ports[slave_id];
+ rte_eth_macaddr_get(slave_id, &slave_addr);
+
+ if (is_same_ether_addr(&slave_addr, &slave->actor.system))
+ continue;
+
+ ether_addr_copy(&slave_addr, &slave->actor.system);
+ /* Do nothing if this port is not an aggregator. In other case
+ * Set NTT flag on every port that use this aggregator. */
+ if (slave->aggregator_port_id != slave_id)
+ continue;
+
+ for (j = 0; j < internals->active_slave_count; j++) {
+ agg_slave = &mode_8023ad_ports[internals->active_slaves[j]];
+ if (agg_slave->aggregator_port_id == slave_id)
+ SM_FLAG_SET(agg_slave, NTT);
+ }
+ }
+
+ if (bond_dev->data->dev_started)
+ bond_mode_8023ad_start(bond_dev);
+}
+
+static void
+bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+ uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
+
+ conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks;
+ conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks;
+ conf->short_timeout_ms = mode4->short_timeout / ms_ticks;
+ conf->long_timeout_ms = mode4->long_timeout / ms_ticks;
+ conf->aggregate_wait_timeout_ms = mode4->aggregate_wait_timeout / ms_ticks;
+ conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks;
+ conf->update_timeout_ms = mode4->update_timeout_us / 1000;
+ conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks;
+ conf->slowrx_cb = mode4->slowrx_cb;
+ conf->agg_selection = mode4->agg_selection;
+}
+
+static void
+bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
+{
+ conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
+ conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS;
+ conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS;
+ conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS;
+ conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS;
+ conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS;
+ conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
+ conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
+ conf->slowrx_cb = NULL;
+ conf->agg_selection = AGG_STABLE;
+}
+
+static void
+bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
+
+ mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks;
+ mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks;
+ mode4->short_timeout = conf->short_timeout_ms * ms_ticks;
+ mode4->long_timeout = conf->long_timeout_ms * ms_ticks;
+ mode4->aggregate_wait_timeout = conf->aggregate_wait_timeout_ms * ms_ticks;
+ mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks;
+ mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks;
+ mode4->update_timeout_us = conf->update_timeout_ms * 1000;
+
+ mode4->dedicated_queues.enabled = 0;
+ mode4->dedicated_queues.rx_qid = UINT16_MAX;
+ mode4->dedicated_queues.tx_qid = UINT16_MAX;
+}
+
+void
+bond_mode_8023ad_setup(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_bond_8023ad_conf def_conf;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+
+ if (conf == NULL) {
+ conf = &def_conf;
+ bond_mode_8023ad_conf_get_default(conf);
+ }
+
+ bond_mode_8023ad_stop(dev);
+ bond_mode_8023ad_conf_assign(mode4, conf);
+ mode4->slowrx_cb = conf->slowrx_cb;
+ mode4->agg_selection = AGG_STABLE;
+
+ if (dev->data->dev_started)
+ bond_mode_8023ad_start(dev);
+}
+
+int
+bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
+{
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ uint8_t i;
+
+ for (i = 0; i < internals->active_slave_count; i++)
+ bond_mode_8023ad_activate_slave(bond_dev,
+ internals->active_slaves[i]);
+
+ return 0;
+}
+
+int
+bond_mode_8023ad_start(struct rte_eth_dev *bond_dev)
+{
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+ static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000;
+
+ if (mode4->slowrx_cb)
+ return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb,
+ bond_dev);
+
+ return rte_eal_alarm_set(us, &bond_mode_8023ad_periodic_cb, bond_dev);
+}
+
+void
+bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev)
+{
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+
+ if (mode4->slowrx_cb) {
+ rte_eal_alarm_cancel(&bond_mode_8023ad_ext_periodic_cb,
+ bond_dev);
+ return;
+ }
+ rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev);
+}
+
+void
+bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
+ uint16_t slave_id, struct rte_mbuf *pkt)
+{
+ struct mode8023ad_private *mode4 = &internals->mode4;
+ struct port *port = &mode_8023ad_ports[slave_id];
+ struct marker_header *m_hdr;
+ uint64_t marker_timer, old_marker_timer;
+ int retval;
+ uint8_t wrn, subtype;
+ /* If packet is a marker, we send response now by reusing given packet
+ * and update only source MAC, destination MAC is multicast so don't
+ * update it. Other frames will be handled later by state machines */
+ subtype = rte_pktmbuf_mtod(pkt,
+ struct slow_protocol_frame *)->slow_protocol.subtype;
+
+ if (subtype == SLOW_SUBTYPE_MARKER) {
+ m_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *);
+
+ if (likely(m_hdr->marker.tlv_type_marker != MARKER_TLV_TYPE_INFO)) {
+ wrn = WRN_UNKNOWN_MARKER_TYPE;
+ goto free_out;
+ }
+
+ /* Setup marker timer. Do it in loop in case concurrent access. */
+ do {
+ old_marker_timer = port->rx_marker_timer;
+ if (!timer_is_expired(&old_marker_timer)) {
+ wrn = WRN_RX_MARKER_TO_FAST;
+ goto free_out;
+ }
+
+ timer_set(&marker_timer, mode4->rx_marker_timeout);
+ retval = rte_atomic64_cmpset(&port->rx_marker_timer,
+ old_marker_timer, marker_timer);
+ } while (unlikely(retval == 0));
+
+ m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP;
+ rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr);
+
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ int retval = rte_ring_enqueue(port->tx_ring, pkt);
+ if (retval != 0) {
+ /* reset timer */
+ port->rx_marker_timer = 0;
+ wrn = WRN_TX_QUEUE_FULL;
+ goto free_out;
+ }
+ } else {
+ /* Send packet directly to the slow queue */
+ uint16_t tx_count = rte_eth_tx_burst(slave_id,
+ internals->mode4.dedicated_queues.tx_qid,
+ &pkt, 1);
+ if (tx_count != 1) {
+ /* reset timer */
+ port->rx_marker_timer = 0;
+ wrn = WRN_TX_QUEUE_FULL;
+ goto free_out;
+ }
+ }
+ } else if (likely(subtype == SLOW_SUBTYPE_LACP)) {
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ int retval = rte_ring_enqueue(port->rx_ring, pkt);
+ if (retval != 0) {
+ /* If RX fing full free lacpdu message and drop packet */
+ wrn = WRN_RX_QUEUE_FULL;
+ goto free_out;
+ }
+ } else
+ rx_machine_update(internals, slave_id, pkt);
+ } else {
+ wrn = WRN_UNKNOWN_SLOW_TYPE;
+ goto free_out;
+ }
+
+ return;
+
+free_out:
+ set_warning_flags(port, wrn);
+ rte_pktmbuf_free(pkt);
+}
+
+int
+rte_eth_bond_8023ad_conf_get(uint16_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_dev *bond_dev;
+
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+
+ if (conf == NULL)
+ return -EINVAL;
+
+ bond_dev = &rte_eth_devices[port_id];
+ bond_mode_8023ad_conf_get(bond_dev, conf);
+ return 0;
+}
+
+int
+rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id,
+ enum rte_bond_8023ad_agg_selection agg_selection)
+{
+ struct rte_eth_dev *bond_dev;
+ struct bond_dev_private *internals;
+ struct mode8023ad_private *mode4;
+
+ bond_dev = &rte_eth_devices[port_id];
+ internals = bond_dev->data->dev_private;
+
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+ if (internals->mode != 4)
+ return -EINVAL;
+
+ mode4 = &internals->mode4;
+ if (agg_selection == AGG_COUNT || agg_selection == AGG_BANDWIDTH
+ || agg_selection == AGG_STABLE)
+ mode4->agg_selection = agg_selection;
+ return 0;
+}
+
+int rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id)
+{
+ struct rte_eth_dev *bond_dev;
+ struct bond_dev_private *internals;
+ struct mode8023ad_private *mode4;
+
+ bond_dev = &rte_eth_devices[port_id];
+ internals = bond_dev->data->dev_private;
+
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+ if (internals->mode != 4)
+ return -EINVAL;
+ mode4 = &internals->mode4;
+
+ return mode4->agg_selection;
+}
+
+
+
+static int
+bond_8023ad_setup_validate(uint16_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+
+ if (conf != NULL) {
+ /* Basic sanity check */
+ if (conf->slow_periodic_ms == 0 ||
+ conf->fast_periodic_ms >= conf->slow_periodic_ms ||
+ conf->long_timeout_ms == 0 ||
+ conf->short_timeout_ms >= conf->long_timeout_ms ||
+ conf->aggregate_wait_timeout_ms == 0 ||
+ conf->tx_period_ms == 0 ||
+ conf->rx_marker_period_ms == 0 ||
+ conf->update_timeout_ms == 0) {
+ RTE_BOND_LOG(ERR, "given mode 4 configuration is invalid");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+
+int
+rte_eth_bond_8023ad_setup(uint16_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_dev *bond_dev;
+ int err;
+
+ err = bond_8023ad_setup_validate(port_id, conf);
+ if (err != 0)
+ return err;
+
+ bond_dev = &rte_eth_devices[port_id];
+ bond_mode_8023ad_setup(bond_dev, conf);
+
+ return 0;
+}
+
+
+
+
+
+int
+rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id,
+ struct rte_eth_bond_8023ad_slave_info *info)
+{
+ struct rte_eth_dev *bond_dev;
+ struct bond_dev_private *internals;
+ struct port *port;
+
+ if (info == NULL || valid_bonded_port_id(port_id) != 0 ||
+ rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
+ return -EINVAL;
+
+ bond_dev = &rte_eth_devices[port_id];
+
+ internals = bond_dev->data->dev_private;
+ if (find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, slave_id) ==
+ internals->active_slave_count)
+ return -EINVAL;
+
+ port = &mode_8023ad_ports[slave_id];
+ info->selected = port->selected;
+
+ info->actor_state = port->actor_state;
+ rte_memcpy(&info->actor, &port->actor, sizeof(port->actor));
+
+ info->partner_state = port->partner_state;
+ rte_memcpy(&info->partner, &port->partner, sizeof(port->partner));
+
+ info->agg_port_id = port->aggregator_port_id;
+ return 0;
+}
+
+static int
+bond_8023ad_ext_validate(uint16_t port_id, uint16_t slave_id)
+{
+ struct rte_eth_dev *bond_dev;
+ struct bond_dev_private *internals;
+ struct mode8023ad_private *mode4;
+
+ if (rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
+ return -EINVAL;
+
+ bond_dev = &rte_eth_devices[port_id];
+
+ if (!bond_dev->data->dev_started)
+ return -EINVAL;
+
+ internals = bond_dev->data->dev_private;
+ if (find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, slave_id) ==
+ internals->active_slave_count)
+ return -EINVAL;
+
+ mode4 = &internals->mode4;
+ if (mode4->slowrx_cb == NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id,
+ int enabled)
+{
+ struct port *port;
+ int res;
+
+ res = bond_8023ad_ext_validate(port_id, slave_id);
+ if (res != 0)
+ return res;
+
+ port = &mode_8023ad_ports[slave_id];
+
+ if (enabled)
+ ACTOR_STATE_SET(port, COLLECTING);
+ else
+ ACTOR_STATE_CLR(port, COLLECTING);
+
+ return 0;
+}
+
+int
+rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id,
+ int enabled)
+{
+ struct port *port;
+ int res;
+
+ res = bond_8023ad_ext_validate(port_id, slave_id);
+ if (res != 0)
+ return res;
+
+ port = &mode_8023ad_ports[slave_id];
+
+ if (enabled)
+ ACTOR_STATE_SET(port, DISTRIBUTING);
+ else
+ ACTOR_STATE_CLR(port, DISTRIBUTING);
+
+ return 0;
+}
+
+int
+rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id)
+{
+ struct port *port;
+ int err;
+
+ err = bond_8023ad_ext_validate(port_id, slave_id);
+ if (err != 0)
+ return err;
+
+ port = &mode_8023ad_ports[slave_id];
+ return ACTOR_STATE(port, DISTRIBUTING);
+}
+
+int
+rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id)
+{
+ struct port *port;
+ int err;
+
+ err = bond_8023ad_ext_validate(port_id, slave_id);
+ if (err != 0)
+ return err;
+
+ port = &mode_8023ad_ports[slave_id];
+ return ACTOR_STATE(port, COLLECTING);
+}
+
+int
+rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id,
+ struct rte_mbuf *lacp_pkt)
+{
+ struct port *port;
+ int res;
+
+ res = bond_8023ad_ext_validate(port_id, slave_id);
+ if (res != 0)
+ return res;
+
+ port = &mode_8023ad_ports[slave_id];
+
+ if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header))
+ return -EINVAL;
+
+ struct lacpdu_header *lacp;
+
+ /* only enqueue LACPDUs */
+ lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
+ if (lacp->lacpdu.subtype != SLOW_SUBTYPE_LACP)
+ return -EINVAL;
+
+ MODE4_DEBUG("sending LACP frame\n");
+
+ return rte_ring_enqueue(port->tx_ring, lacp_pkt);
+}
+
+static void
+bond_mode_8023ad_ext_periodic_cb(void *arg)
+{
+ struct rte_eth_dev *bond_dev = arg;
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+ struct port *port;
+ void *pkt = NULL;
+ uint16_t i, slave_id;
+
+ for (i = 0; i < internals->active_slave_count; i++) {
+ slave_id = internals->active_slaves[i];
+ port = &mode_8023ad_ports[slave_id];
+
+ if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
+ struct rte_mbuf *lacp_pkt = pkt;
+ struct lacpdu_header *lacp;
+
+ lacp = rte_pktmbuf_mtod(lacp_pkt,
+ struct lacpdu_header *);
+ RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+
+ /* This is LACP frame so pass it to rx callback.
+ * Callback is responsible for freeing mbuf.
+ */
+ mode4->slowrx_cb(slave_id, lacp_pkt);
+ }
+ }
+
+ rte_eal_alarm_set(internals->mode4.update_timeout_us,
+ bond_mode_8023ad_ext_periodic_cb, arg);
+}
+
+int
+rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port)
+{
+ int retval = 0;
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ dev->data->dev_private;
+
+ if (check_for_bonded_ethdev(dev) != 0)
+ return -1;
+
+ if (bond_8023ad_slow_pkt_hw_filter_supported(port) != 0)
+ return -1;
+
+ /* Device must be stopped to set up slow queue */
+ if (dev->data->dev_started)
+ return -1;
+
+ internals->mode4.dedicated_queues.enabled = 1;
+
+ bond_ethdev_mode_set(dev, internals->mode);
+ return retval;
+}
+
+int
+rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port)
+{
+ int retval = 0;
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ dev->data->dev_private;
+
+ if (check_for_bonded_ethdev(dev) != 0)
+ return -1;
+
+ /* Device must be stopped to set up slow queue */
+ if (dev->data->dev_started)
+ return -1;
+
+ internals->mode4.dedicated_queues.enabled = 0;
+
+ bond_ethdev_mode_set(dev, internals->mode);
+
+ return retval;
+}
diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h
new file mode 100644
index 00000000..d8b5dbc2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef RTE_ETH_BOND_8023AD_H_
+#define RTE_ETH_BOND_8023AD_H_
+
+#include <rte_ether.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Actor/partner states
+ */
+#define STATE_LACP_ACTIVE 0x01
+#define STATE_LACP_SHORT_TIMEOUT 0x02
+#define STATE_AGGREGATION 0x04
+#define STATE_SYNCHRONIZATION 0x08
+#define STATE_COLLECTING 0x10
+#define STATE_DISTRIBUTING 0x20
+/** Partners parameters are defaulted */
+#define STATE_DEFAULTED 0x40
+#define STATE_EXPIRED 0x80
+
+#define TLV_TYPE_ACTOR_INFORMATION 0x01
+#define TLV_TYPE_PARTNER_INFORMATION 0x02
+#define TLV_TYPE_COLLECTOR_INFORMATION 0x03
+#define TLV_TYPE_TERMINATOR_INFORMATION 0x00
+
+#define SLOW_SUBTYPE_LACP 0x01
+#define SLOW_SUBTYPE_MARKER 0x02
+
+#define MARKER_TLV_TYPE_INFO 0x01
+#define MARKER_TLV_TYPE_RESP 0x02
+
+typedef void (*rte_eth_bond_8023ad_ext_slowrx_fn)(uint16_t slave_id,
+ struct rte_mbuf *lacp_pkt);
+
+enum rte_bond_8023ad_selection {
+ UNSELECTED,
+ STANDBY,
+ SELECTED
+};
+
+enum rte_bond_8023ad_agg_selection {
+ AGG_BANDWIDTH,
+ AGG_COUNT,
+ AGG_STABLE
+};
+
+/** Generic slow protocol structure */
+struct slow_protocol {
+ uint8_t subtype;
+ uint8_t reserved_119[119];
+} __attribute__((__packed__));
+
+/** Generic slow protocol frame type structure */
+struct slow_protocol_frame {
+ struct ether_hdr eth_hdr;
+ struct slow_protocol slow_protocol;
+} __attribute__((__packed__));
+
+struct port_params {
+ uint16_t system_priority;
+ /**< System priority (unused in current implementation) */
+ struct ether_addr system;
+ /**< System ID - Slave MAC address, same as bonding MAC address */
+ uint16_t key;
+ /**< Speed information (implementation dependednt) and duplex. */
+ uint16_t port_priority;
+ /**< Priority of this (unused in current implementation) */
+ uint16_t port_number;
+ /**< Port number. It corresponds to slave port id. */
+} __attribute__((__packed__));
+
+struct lacpdu_actor_partner_params {
+ uint8_t tlv_type_info;
+ uint8_t info_length;
+ struct port_params port_params;
+ uint8_t state;
+ uint8_t reserved_3[3];
+} __attribute__((__packed__));
+
+/** LACPDU structure (5.4.2 in 802.1AX documentation). */
+struct lacpdu {
+ uint8_t subtype;
+ uint8_t version_number;
+
+ struct lacpdu_actor_partner_params actor;
+ struct lacpdu_actor_partner_params partner;
+
+ uint8_t tlv_type_collector_info;
+ uint8_t collector_info_length;
+ uint16_t collector_max_delay;
+ uint8_t reserved_12[12];
+
+ uint8_t tlv_type_terminator;
+ uint8_t terminator_length;
+ uint8_t reserved_50[50];
+} __attribute__((__packed__));
+
+/** LACPDU frame: Contains ethernet header and LACPDU. */
+struct lacpdu_header {
+ struct ether_hdr eth_hdr;
+ struct lacpdu lacpdu;
+} __attribute__((__packed__));
+
+struct marker {
+ uint8_t subtype;
+ uint8_t version_number;
+
+ uint8_t tlv_type_marker;
+ uint8_t info_length;
+ uint16_t requester_port;
+ struct ether_addr requester_system;
+ uint32_t requester_transaction_id;
+ uint8_t reserved_2[2];
+
+ uint8_t tlv_type_terminator;
+ uint8_t terminator_length;
+ uint8_t reserved_90[90];
+} __attribute__((__packed__));
+
+struct marker_header {
+ struct ether_hdr eth_hdr;
+ struct marker marker;
+} __attribute__((__packed__));
+
+struct rte_eth_bond_8023ad_conf {
+ uint32_t fast_periodic_ms;
+ uint32_t slow_periodic_ms;
+ uint32_t short_timeout_ms;
+ uint32_t long_timeout_ms;
+ uint32_t aggregate_wait_timeout_ms;
+ uint32_t tx_period_ms;
+ uint32_t rx_marker_period_ms;
+ uint32_t update_timeout_ms;
+ rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb;
+ enum rte_bond_8023ad_agg_selection agg_selection;
+};
+
+struct rte_eth_bond_8023ad_slave_info {
+ enum rte_bond_8023ad_selection selected;
+ uint8_t actor_state;
+ struct port_params actor;
+ uint8_t partner_state;
+ struct port_params partner;
+ uint16_t agg_port_id;
+};
+
+/**
+ * @internal
+ *
+ * Function returns current configuration of 802.3AX mode.
+ *
+ * @param port_id Bonding device id
+ * @param conf Pointer to timeout structure.
+ *
+ * @return
+ * 0 - if ok
+ * -EINVAL if conf is NULL
+ */
+int
+rte_eth_bond_8023ad_conf_get(uint16_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf);
+
+/**
+ * @internal
+ *
+ * Function set new configuration of 802.3AX mode.
+ *
+ * @param port_id Bonding device id
+ * @param conf Configuration, if NULL set default configuration.
+ * @return
+ * 0 - if ok
+ * -EINVAL if configuration is invalid.
+ */
+int
+rte_eth_bond_8023ad_setup(uint16_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf);
+
+/**
+ * @internal
+ *
+ * Function returns current state of given slave device.
+ *
+ * @param slave_id Port id of valid slave.
+ * @param conf buffer for configuration
+ * @return
+ * 0 - if ok
+ * -EINVAL if conf is NULL or slave id is invalid (not a slave of given
+ * bonded device or is not inactive).
+ */
+int
+rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id,
+ struct rte_eth_bond_8023ad_slave_info *conf);
+
+#ifdef __cplusplus
+}
+#endif
+
+/**
+ * Configure a slave port to start collecting.
+ *
+ * @param port_id Bonding device id
+ * @param slave_id Port id of valid slave.
+ * @param enabled Non-zero when collection enabled.
+ * @return
+ * 0 - if ok
+ * -EINVAL if slave is not valid.
+ */
+int
+rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id,
+ int enabled);
+
+/**
+ * Get COLLECTING flag from slave port actor state.
+ *
+ * @param port_id Bonding device id
+ * @param slave_id Port id of valid slave.
+ * @return
+ * 0 - if not set
+ * 1 - if set
+ * -EINVAL if slave is not valid.
+ */
+int
+rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id);
+
+/**
+ * Configure a slave port to start distributing.
+ *
+ * @param port_id Bonding device id
+ * @param slave_id Port id of valid slave.
+ * @param enabled Non-zero when distribution enabled.
+ * @return
+ * 0 - if ok
+ * -EINVAL if slave is not valid.
+ */
+int
+rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id,
+ int enabled);
+
+/**
+ * Get DISTRIBUTING flag from slave port actor state.
+ *
+ * @param port_id Bonding device id
+ * @param slave_id Port id of valid slave.
+ * @return
+ * 0 - if not set
+ * 1 - if set
+ * -EINVAL if slave is not valid.
+ */
+int
+rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id);
+
+/**
+ * LACPDU transmit path for external 802.3ad state machine. Caller retains
+ * ownership of the packet on failure.
+ *
+ * @param port_id Bonding device id
+ * @param slave_id Port ID of valid slave device.
+ * @param lacp_pkt mbuf containing LACPDU.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id,
+ struct rte_mbuf *lacp_pkt);
+
+/**
+ * Enable dedicated hw queues for 802.3ad control plane traffic on on slaves
+ *
+ * This function creates an additional tx and rx queue on each slave for
+ * dedicated 802.3ad control plane traffic . A flow filtering rule is
+ * programmed on each slave to redirect all LACP slow packets to that rx queue
+ * for processing in the LACP state machine, this removes the need to filter
+ * these packets in the bonded devices data path. The additional tx queue is
+ * used to enable the LACP state machine to enqueue LACP packets directly to
+ * slave hw independently of the bonded devices data path.
+ *
+ * To use this feature all slaves must support the programming of the flow
+ * filter rule required for rx and have enough queues that one rx and tx queue
+ * can be reserved for the LACP state machines control packets.
+ *
+ * Bonding port must be stopped to change this configuration.
+ *
+ * @param port_id Bonding device id
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port_id);
+
+/**
+ * Disable slow queue on slaves
+ *
+ * This function disables hardware slow packet filter.
+ *
+ * Bonding port must be stopped to change this configuration.
+ *
+ * @see rte_eth_bond_8023ad_slow_pkt_hw_filter_enable
+ *
+ * @param port_id Bonding device id
+ * @return
+ * 0 on success, negative value otherwise.
+ *
+ */
+int
+rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port_id);
+
+/*
+ * Get aggregator mode for 8023ad
+ * @param port_id Bonding device id
+ *
+ * @return
+ * agregator mode on success, negative value otherwise
+ */
+int
+rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id);
+
+/**
+ * Set aggregator mode for 8023ad
+ * @param port_id Bonding device id
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id,
+ enum rte_bond_8023ad_agg_selection agg_selection);
+#endif /* RTE_ETH_BOND_8023AD_H_ */
diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad_private.h b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad_private.h
new file mode 100644
index 00000000..0f490a51
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad_private.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef RTE_ETH_BOND_8023AD_PRIVATE_H_
+#define RTE_ETH_BOND_8023AD_PRIVATE_H_
+
+#include <stdint.h>
+
+#include <rte_ether.h>
+#include <rte_byteorder.h>
+#include <rte_atomic.h>
+#include <rte_flow.h>
+
+#include "rte_eth_bond_8023ad.h"
+
+#define BOND_MODE_8023AX_UPDATE_TIMEOUT_MS 100
+/** Maximum number of packets to one slave queued in TX ring. */
+#define BOND_MODE_8023AX_SLAVE_RX_PKTS 3
+/** Maximum number of LACP packets from one slave queued in TX ring. */
+#define BOND_MODE_8023AX_SLAVE_TX_PKTS 1
+/**
+ * Timeouts deffinitions (5.4.4 in 802.1AX documentation).
+ */
+#define BOND_8023AD_FAST_PERIODIC_MS 900
+#define BOND_8023AD_SLOW_PERIODIC_MS 29000
+#define BOND_8023AD_SHORT_TIMEOUT_MS 3000
+#define BOND_8023AD_LONG_TIMEOUT_MS 90000
+#define BOND_8023AD_CHURN_DETECTION_TIMEOUT_MS 60000
+#define BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS 2000
+#define BOND_8023AD_TX_MACHINE_PERIOD_MS 500
+#define BOND_8023AD_RX_MARKER_PERIOD_MS 2000
+
+/**
+ * Interval of showing warning message from state machines. All messages will
+ * be held (and gathered together) to prevent flooding.
+ * This is no parto of 802.1AX standard.
+ */
+#define BOND_8023AD_WARNINGS_PERIOD_MS 1000
+
+
+
+/**
+ * State machine flags
+ */
+#define SM_FLAGS_BEGIN 0x0001
+#define SM_FLAGS_LACP_ENABLED 0x0002
+#define SM_FLAGS_ACTOR_CHURN 0x0004
+#define SM_FLAGS_PARTNER_CHURN 0x0008
+#define SM_FLAGS_MOVED 0x0100
+#define SM_FLAGS_PARTNER_SHORT_TIMEOUT 0x0200
+#define SM_FLAGS_NTT 0x0400
+
+#define BOND_LINK_FULL_DUPLEX_KEY 0x01
+#define BOND_LINK_SPEED_KEY_10M 0x02
+#define BOND_LINK_SPEED_KEY_100M 0x04
+#define BOND_LINK_SPEED_KEY_1000M 0x08
+#define BOND_LINK_SPEED_KEY_10G 0x10
+#define BOND_LINK_SPEED_KEY_20G 0x11
+#define BOND_LINK_SPEED_KEY_40G 0x12
+
+#define WRN_RX_MARKER_TO_FAST 0x01
+#define WRN_UNKNOWN_SLOW_TYPE 0x02
+#define WRN_UNKNOWN_MARKER_TYPE 0x04
+#define WRN_NOT_LACP_CAPABLE 0x08
+#define WRN_RX_QUEUE_FULL 0x10
+#define WRN_TX_QUEUE_FULL 0x20
+
+#define CHECK_FLAGS(_variable, _f) ((_variable) & (_f))
+#define SET_FLAGS(_variable, _f) ((_variable) |= (_f))
+#define CLEAR_FLAGS(_variable, _f) ((_variable) &= ~(_f))
+
+#define SM_FLAG(_p, _f) (!!CHECK_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f))
+#define SM_FLAG_SET(_p, _f) SET_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f)
+#define SM_FLAG_CLR(_p, _f) CLEAR_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f)
+
+#define ACTOR_STATE(_p, _f) (!!CHECK_FLAGS((_p)->actor_state, STATE_ ## _f))
+#define ACTOR_STATE_SET(_p, _f) SET_FLAGS((_p)->actor_state, STATE_ ## _f)
+#define ACTOR_STATE_CLR(_p, _f) CLEAR_FLAGS((_p)->actor_state, STATE_ ## _f)
+
+#define PARTNER_STATE(_p, _f) (!!CHECK_FLAGS((_p)->partner_state, STATE_ ## _f))
+#define PARTNER_STATE_SET(_p, _f) SET_FLAGS((_p)->partner_state, STATE_ ## _f)
+#define PARTNER_STATE_CLR(_p, _f) CLEAR_FLAGS((_p)->partner_state, STATE_ ## _f)
+
+/** Variables associated with each port (5.4.7 in 802.1AX documentation). */
+struct port {
+ /**
+ * The operational values of the Actor's state parameters. Bitmask
+ * of port states.
+ */
+ uint8_t actor_state;
+
+ /** The operational Actor's port parameters */
+ struct port_params actor;
+
+ /**
+ * The operational value of the Actor's view of the current values of
+ * the Partner's state parameters. The Actor sets this variable either
+ * to the value received from the Partner in an LACPDU, or to the value
+ * of Partner_Admin_Port_State. Bitmask of port states.
+ */
+ uint8_t partner_state;
+
+ /** The operational Partner's port parameters */
+ struct port_params partner;
+
+ /* Additional port parameters not listed in documentation */
+ /** State machine flags */
+ uint16_t sm_flags;
+ enum rte_bond_8023ad_selection selected;
+
+ uint64_t current_while_timer;
+ uint64_t periodic_timer;
+ uint64_t wait_while_timer;
+ uint64_t tx_machine_timer;
+ uint64_t tx_marker_timer;
+ /* Agregator parameters */
+ /** Used aggregator port ID */
+ uint16_t aggregator_port_id;
+
+ /** Memory pool used to allocate rings */
+ struct rte_mempool *mbuf_pool;
+
+ /** Ring of LACP packets from RX burst function */
+ struct rte_ring *rx_ring;
+
+ /** Ring of slow protocol packets (LACP and MARKERS) to TX burst function */
+ struct rte_ring *tx_ring;
+
+ /** Timer which is also used as mutex. If is 0 (not running) RX marker
+ * packet might be responded. Otherwise shall be dropped. It is zeroed in
+ * mode 4 callback function after expire. */
+ volatile uint64_t rx_marker_timer;
+
+ uint64_t warning_timer;
+ volatile uint16_t warnings_to_show;
+
+ /** Memory pool used to allocate slow queues */
+ struct rte_mempool *slow_pool;
+};
+
+struct mode8023ad_private {
+ uint64_t fast_periodic_timeout;
+ uint64_t slow_periodic_timeout;
+ uint64_t short_timeout;
+ uint64_t long_timeout;
+ uint64_t aggregate_wait_timeout;
+ uint64_t tx_period_timeout;
+ uint64_t rx_marker_timeout;
+ uint64_t update_timeout_us;
+ rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb;
+ uint8_t external_sm;
+
+ struct rte_eth_link slave_link;
+ /***< slave link properties */
+
+ /**
+ * Configuration of dedicated hardware queues for control plane
+ * traffic
+ */
+ struct {
+ uint8_t enabled;
+
+ struct rte_flow *flow[RTE_MAX_ETHPORTS];
+
+ uint16_t rx_qid;
+ uint16_t tx_qid;
+ } dedicated_queues;
+ enum rte_bond_8023ad_agg_selection agg_selection;
+};
+
+/**
+ * @internal
+ * The pool of *port* structures. The size of the pool
+ * is configured at compile-time in the <rte_eth_bond_8023ad.c> file.
+ */
+extern struct port mode_8023ad_ports[];
+
+/* Forward declaration */
+struct bond_dev_private;
+
+
+/**
+ * @internal
+ *
+ * Set mode 4 configuration of bonded interface.
+ *
+ * @pre Bonded interface must be stopped.
+ *
+ * @param dev Bonded interface
+ * @param conf new configuration. If NULL set default configuration.
+ */
+void
+bond_mode_8023ad_setup(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf);
+
+/**
+ * @internal
+ *
+ * Enables 802.1AX mode and all active slaves on bonded interface.
+ *
+ * @param dev Bonded interface
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+bond_mode_8023ad_enable(struct rte_eth_dev *dev);
+
+/**
+ * @internal
+ *
+ * Disables 802.1AX mode of the bonded interface and slaves.
+ *
+ * @param dev Bonded interface
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int bond_mode_8023ad_disable(struct rte_eth_dev *dev);
+
+/**
+ * @internal
+ *
+ * Starts 802.3AX state machines management logic.
+ * @param dev Bonded interface
+ * @return
+ * 0 if machines was started, 1 if machines was already running,
+ * negative value otherwise.
+ */
+int
+bond_mode_8023ad_start(struct rte_eth_dev *dev);
+
+/**
+ * @internal
+ *
+ * Stops 802.3AX state machines management logic.
+ * @param dev Bonded interface
+ * @return
+ * 0 if this call stopped state machines, -ENOENT if alarm was not set.
+ */
+void
+bond_mode_8023ad_stop(struct rte_eth_dev *dev);
+
+/**
+ * @internal
+ *
+ * Passes given slow packet to state machines management logic.
+ * @param internals Bonded device private data.
+ * @param slave_id Slave port id.
+ * @param slot_pkt Slow packet.
+ */
+void
+bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
+ uint16_t slave_id, struct rte_mbuf *pkt);
+
+/**
+ * @internal
+ *
+ * Appends given slave used slave
+ *
+ * @param dev Bonded interface.
+ * @param port_id Slave port ID to be added
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+void
+bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint16_t port_id);
+
+/**
+ * @internal
+ *
+ * Denitializes and removes given slave from 802.1AX mode.
+ *
+ * @param dev Bonded interface.
+ * @param slave_num Position of slave in active_slaves array
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint16_t slave_pos);
+
+/**
+ * Updates state when MAC was changed on bonded device or one of its slaves.
+ * @param bond_dev Bonded device
+ */
+void
+bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev);
+
+int
+bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
+ uint16_t slave_port);
+
+int
+bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port);
+
+int
+bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id);
+
+#endif /* RTE_ETH_BOND_8023AD_H_ */
diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.c b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.c
new file mode 100644
index 00000000..c3891c7e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.c
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include "rte_eth_bond_private.h"
+#include "rte_eth_bond_alb.h"
+
+static inline uint8_t
+simple_hash(uint8_t *hash_start, int hash_size)
+{
+ int i;
+ uint8_t hash;
+
+ hash = 0;
+ for (i = 0; i < hash_size; ++i)
+ hash ^= hash_start[i];
+
+ return hash;
+}
+
+static uint8_t
+calculate_slave(struct bond_dev_private *internals)
+{
+ uint8_t idx;
+
+ idx = (internals->mode6.last_slave + 1) % internals->active_slave_count;
+ internals->mode6.last_slave = idx;
+ return internals->active_slaves[idx];
+}
+
+int
+bond_mode_alb_enable(struct rte_eth_dev *bond_dev)
+{
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ struct client_data *hash_table = internals->mode6.client_table;
+
+ uint16_t data_size;
+ char mem_name[RTE_ETH_NAME_MAX_LEN];
+ int socket_id = bond_dev->data->numa_node;
+
+ /* Fill hash table with initial values */
+ memset(hash_table, 0, sizeof(struct client_data) * ALB_HASH_TABLE_SIZE);
+ rte_spinlock_init(&internals->mode6.lock);
+ internals->mode6.last_slave = ALB_NULL_INDEX;
+ internals->mode6.ntt = 0;
+
+ /* Initialize memory pool for ARP packets to send */
+ if (internals->mode6.mempool == NULL) {
+ /*
+ * 256 is size of ETH header, ARP header and nested VLAN headers.
+ * The value is chosen to be cache aligned.
+ */
+ data_size = 256 + RTE_PKTMBUF_HEADROOM;
+ snprintf(mem_name, sizeof(mem_name), "%s_ALB",
+ bond_dev->device->name);
+ internals->mode6.mempool = rte_pktmbuf_pool_create(mem_name,
+ 512 * RTE_MAX_ETHPORTS,
+ RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
+ 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
+ 0, data_size, socket_id);
+
+ if (internals->mode6.mempool == NULL) {
+ RTE_BOND_LOG(ERR, "%s: Failed to initialize ALB mempool.\n",
+ bond_dev->device->name);
+ goto mempool_alloc_error;
+ }
+ }
+
+ return 0;
+
+mempool_alloc_error:
+ return -ENOMEM;
+}
+
+void bond_mode_alb_arp_recv(struct ether_hdr *eth_h, uint16_t offset,
+ struct bond_dev_private *internals) {
+ struct arp_hdr *arp;
+
+ struct client_data *hash_table = internals->mode6.client_table;
+ struct client_data *client_info;
+
+ uint8_t hash_index;
+
+ arp = (struct arp_hdr *) ((char *) (eth_h + 1) + offset);
+
+ /* ARP Requests are forwarded to the application with no changes */
+ if (arp->arp_op != rte_cpu_to_be_16(ARP_OP_REPLY))
+ return;
+
+ /* From now on, we analyze only ARP Reply packets */
+ hash_index = simple_hash((uint8_t *) &arp->arp_data.arp_sip,
+ sizeof(arp->arp_data.arp_sip));
+ client_info = &hash_table[hash_index];
+
+ /*
+ * We got reply for ARP Request send by the application. We need to
+ * update client table when received data differ from what is stored
+ * in ALB table and issue sending update packet to that slave.
+ */
+ rte_spinlock_lock(&internals->mode6.lock);
+ if (client_info->in_use == 0 ||
+ client_info->app_ip != arp->arp_data.arp_tip ||
+ client_info->cli_ip != arp->arp_data.arp_sip ||
+ !is_same_ether_addr(&client_info->cli_mac, &arp->arp_data.arp_sha) ||
+ client_info->vlan_count != offset / sizeof(struct vlan_hdr) ||
+ memcmp(client_info->vlan, eth_h + 1, offset) != 0
+ ) {
+ client_info->in_use = 1;
+ client_info->app_ip = arp->arp_data.arp_tip;
+ client_info->cli_ip = arp->arp_data.arp_sip;
+ ether_addr_copy(&arp->arp_data.arp_sha, &client_info->cli_mac);
+ client_info->slave_idx = calculate_slave(internals);
+ rte_eth_macaddr_get(client_info->slave_idx, &client_info->app_mac);
+ ether_addr_copy(&client_info->app_mac, &arp->arp_data.arp_tha);
+ memcpy(client_info->vlan, eth_h + 1, offset);
+ client_info->vlan_count = offset / sizeof(struct vlan_hdr);
+ }
+ internals->mode6.ntt = 1;
+ rte_spinlock_unlock(&internals->mode6.lock);
+}
+
+uint16_t
+bond_mode_alb_arp_xmit(struct ether_hdr *eth_h, uint16_t offset,
+ struct bond_dev_private *internals)
+{
+ struct arp_hdr *arp;
+
+ struct client_data *hash_table = internals->mode6.client_table;
+ struct client_data *client_info;
+
+ uint8_t hash_index;
+
+ struct ether_addr bonding_mac;
+
+ arp = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
+
+ /*
+ * Traffic with src MAC other than bonding should be sent on
+ * current primary port.
+ */
+ rte_eth_macaddr_get(internals->port_id, &bonding_mac);
+ if (!is_same_ether_addr(&bonding_mac, &arp->arp_data.arp_sha)) {
+ rte_eth_macaddr_get(internals->current_primary_port,
+ &arp->arp_data.arp_sha);
+ return internals->current_primary_port;
+ }
+
+ hash_index = simple_hash((uint8_t *)&arp->arp_data.arp_tip,
+ sizeof(uint32_t));
+ client_info = &hash_table[hash_index];
+
+ rte_spinlock_lock(&internals->mode6.lock);
+ if (arp->arp_op == rte_cpu_to_be_16(ARP_OP_REPLY)) {
+ if (client_info->in_use) {
+ if (client_info->app_ip == arp->arp_data.arp_sip &&
+ client_info->cli_ip == arp->arp_data.arp_tip) {
+ /* Entry is already assigned to this client */
+ if (!is_broadcast_ether_addr(&arp->arp_data.arp_tha)) {
+ ether_addr_copy(&arp->arp_data.arp_tha,
+ &client_info->cli_mac);
+ }
+ rte_eth_macaddr_get(client_info->slave_idx,
+ &client_info->app_mac);
+ ether_addr_copy(&client_info->app_mac, &arp->arp_data.arp_sha);
+ memcpy(client_info->vlan, eth_h + 1, offset);
+ client_info->vlan_count = offset / sizeof(struct vlan_hdr);
+ rte_spinlock_unlock(&internals->mode6.lock);
+ return client_info->slave_idx;
+ }
+ }
+
+ /* Assign new slave to this client and update src mac in ARP */
+ client_info->in_use = 1;
+ client_info->ntt = 0;
+ client_info->app_ip = arp->arp_data.arp_sip;
+ ether_addr_copy(&arp->arp_data.arp_tha, &client_info->cli_mac);
+ client_info->cli_ip = arp->arp_data.arp_tip;
+ client_info->slave_idx = calculate_slave(internals);
+ rte_eth_macaddr_get(client_info->slave_idx, &client_info->app_mac);
+ ether_addr_copy(&client_info->app_mac, &arp->arp_data.arp_sha);
+ memcpy(client_info->vlan, eth_h + 1, offset);
+ client_info->vlan_count = offset / sizeof(struct vlan_hdr);
+ rte_spinlock_unlock(&internals->mode6.lock);
+ return client_info->slave_idx;
+ }
+
+ /* If packet is not ARP Reply, send it on current primary port. */
+ rte_spinlock_unlock(&internals->mode6.lock);
+ rte_eth_macaddr_get(internals->current_primary_port,
+ &arp->arp_data.arp_sha);
+ return internals->current_primary_port;
+}
+
+uint16_t
+bond_mode_alb_arp_upd(struct client_data *client_info,
+ struct rte_mbuf *pkt, struct bond_dev_private *internals)
+{
+ struct ether_hdr *eth_h;
+ struct arp_hdr *arp_h;
+ uint16_t slave_idx;
+
+ rte_spinlock_lock(&internals->mode6.lock);
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+
+ ether_addr_copy(&client_info->app_mac, &eth_h->s_addr);
+ ether_addr_copy(&client_info->cli_mac, &eth_h->d_addr);
+ if (client_info->vlan_count > 0)
+ eth_h->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ else
+ eth_h->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP);
+
+ arp_h = (struct arp_hdr *)((char *)eth_h + sizeof(struct ether_hdr)
+ + client_info->vlan_count * sizeof(struct vlan_hdr));
+
+ memcpy(eth_h + 1, client_info->vlan,
+ client_info->vlan_count * sizeof(struct vlan_hdr));
+
+ ether_addr_copy(&client_info->app_mac, &arp_h->arp_data.arp_sha);
+ arp_h->arp_data.arp_sip = client_info->app_ip;
+ ether_addr_copy(&client_info->cli_mac, &arp_h->arp_data.arp_tha);
+ arp_h->arp_data.arp_tip = client_info->cli_ip;
+
+ arp_h->arp_hrd = rte_cpu_to_be_16(ARP_HRD_ETHER);
+ arp_h->arp_pro = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ arp_h->arp_hln = ETHER_ADDR_LEN;
+ arp_h->arp_pln = sizeof(uint32_t);
+ arp_h->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY);
+
+ slave_idx = client_info->slave_idx;
+ rte_spinlock_unlock(&internals->mode6.lock);
+
+ return slave_idx;
+}
+
+void
+bond_mode_alb_client_list_upd(struct rte_eth_dev *bond_dev)
+{
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ struct client_data *client_info;
+
+ int i;
+
+ /* If active slave count is 0, it's pointless to refresh alb table */
+ if (internals->active_slave_count <= 0)
+ return;
+
+ rte_spinlock_lock(&internals->mode6.lock);
+ internals->mode6.last_slave = ALB_NULL_INDEX;
+
+ for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
+ client_info = &internals->mode6.client_table[i];
+ if (client_info->in_use) {
+ client_info->slave_idx = calculate_slave(internals);
+ rte_eth_macaddr_get(client_info->slave_idx, &client_info->app_mac);
+ internals->mode6.ntt = 1;
+ }
+ }
+ rte_spinlock_unlock(&internals->mode6.lock);
+}
diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.h b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.h
new file mode 100644
index 00000000..4640fd24
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef RTE_ETH_BOND_ALB_H_
+#define RTE_ETH_BOND_ALB_H_
+
+#include <rte_ether.h>
+#include <rte_arp.h>
+
+#define ALB_HASH_TABLE_SIZE 256
+#define ALB_NULL_INDEX 0xFFFFFFFF
+
+struct client_data {
+ /** ARP data of single client */
+ struct ether_addr app_mac;
+ /**< MAC address of application running DPDK */
+ uint32_t app_ip;
+ /**< IP address of application running DPDK */
+ struct ether_addr cli_mac;
+ /**< Client MAC address */
+ uint32_t cli_ip;
+ /**< Client IP address */
+
+ uint16_t slave_idx;
+ /**< Index of slave on which we connect with that client */
+ uint8_t in_use;
+ /**< Flag indicating if entry in client table is currently used */
+ uint8_t ntt;
+ /**< Flag indicating if we need to send update to this client on next tx */
+
+ struct vlan_hdr vlan[2];
+ /**< Content of vlan headers */
+ uint8_t vlan_count;
+ /**< Number of nested vlan headers */
+};
+
+struct mode_alb_private {
+ struct client_data client_table[ALB_HASH_TABLE_SIZE];
+ /**< Hash table storing ARP data of every client connected */
+ struct rte_mempool *mempool;
+ /**< Mempool for creating ARP update packets */
+ uint8_t ntt;
+ /**< Flag indicating if we need to send update to any client on next tx */
+ uint32_t last_slave;
+ /**< Index of last used slave in client table */
+ rte_spinlock_t lock;
+};
+
+/**
+ * ALB mode initialization.
+ *
+ * @param bond_dev Pointer to bonding device.
+ *
+ * @return
+ * Error code - 0 on success.
+ */
+int
+bond_mode_alb_enable(struct rte_eth_dev *bond_dev);
+
+/**
+ * Function handles ARP packet reception. If received ARP request, it is
+ * forwarded to application without changes. If it is ARP reply, client table
+ * is updated.
+ *
+ * @param eth_h ETH header of received packet.
+ * @param offset Vlan header offset.
+ * @param internals Bonding data.
+ */
+void
+bond_mode_alb_arp_recv(struct ether_hdr *eth_h, uint16_t offset,
+ struct bond_dev_private *internals);
+
+/**
+ * Function handles ARP packet transmission. It also decides on which slave
+ * send that packet. If packet is ARP Request, it is send on primary slave.
+ * If it is ARP Reply, it is send on slave stored in client table for that
+ * connection. On Reply function also updates data in client table.
+ *
+ * @param eth_h ETH header of transmitted packet.
+ * @param offset Vlan header offset.
+ * @param internals Bonding data.
+ *
+ * @return
+ * Index of slave on which packet should be sent.
+ */
+uint16_t
+bond_mode_alb_arp_xmit(struct ether_hdr *eth_h, uint16_t offset,
+ struct bond_dev_private *internals);
+
+/**
+ * Function fills packet with ARP data from client_info.
+ *
+ * @param client_info Data of client to which packet is sent.
+ * @param pkt Pointer to packet which is sent.
+ * @param internals Bonding data.
+ *
+ * @return
+ * Index of slawe on which packet should be sent.
+ */
+uint16_t
+bond_mode_alb_arp_upd(struct client_data *client_info,
+ struct rte_mbuf *pkt, struct bond_dev_private *internals);
+
+/**
+ * Function updates slave indexes of active connections.
+ *
+ * @param bond_dev Pointer to bonded device struct.
+ */
+void
+bond_mode_alb_client_list_upd(struct rte_eth_dev *bond_dev);
+
+#endif /* RTE_ETH_BOND_ALB_H_ */
diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_api.c b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_api.c
new file mode 100644
index 00000000..8bc04cfd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_api.c
@@ -0,0 +1,867 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ethdev_driver.h>
+#include <rte_tcp.h>
+#include <rte_bus_vdev.h>
+#include <rte_kvargs.h>
+
+#include "rte_eth_bond.h"
+#include "rte_eth_bond_private.h"
+#include "rte_eth_bond_8023ad_private.h"
+
+int
+check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
+{
+ /* Check valid pointer */
+ if (eth_dev->device->driver->name == NULL)
+ return -1;
+
+ /* return 0 if driver name matches */
+ return eth_dev->device->driver->name != pmd_bond_drv.driver.name;
+}
+
+int
+valid_bonded_port_id(uint16_t port_id)
+{
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
+ return check_for_bonded_ethdev(&rte_eth_devices[port_id]);
+}
+
+int
+check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev)
+{
+ int i;
+ struct bond_dev_private *internals;
+
+ if (check_for_bonded_ethdev(eth_dev) != 0)
+ return 0;
+
+ internals = eth_dev->data->dev_private;
+
+ /* Check if any of slave devices is a bonded device */
+ for (i = 0; i < internals->slave_count; i++)
+ if (valid_bonded_port_id(internals->slaves[i].port_id) == 0)
+ return 1;
+
+ return 0;
+}
+
+int
+valid_slave_port_id(uint16_t port_id, uint8_t mode)
+{
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
+
+ /* Verify that port_id refers to a non bonded port */
+ if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0 &&
+ mode == BONDING_MODE_8023AD) {
+ RTE_BOND_LOG(ERR, "Cannot add slave to bonded device in 802.3ad"
+ " mode as slave is also a bonded device, only "
+ "physical devices can be support in this mode.");
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
+{
+ struct bond_dev_private *internals = eth_dev->data->dev_private;
+ uint8_t active_count = internals->active_slave_count;
+
+ if (internals->mode == BONDING_MODE_8023AD)
+ bond_mode_8023ad_activate_slave(eth_dev, port_id);
+
+ if (internals->mode == BONDING_MODE_TLB
+ || internals->mode == BONDING_MODE_ALB) {
+
+ internals->tlb_slaves_order[active_count] = port_id;
+ }
+
+ RTE_ASSERT(internals->active_slave_count <
+ (RTE_DIM(internals->active_slaves) - 1));
+
+ internals->active_slaves[internals->active_slave_count] = port_id;
+ internals->active_slave_count++;
+
+ if (internals->mode == BONDING_MODE_TLB)
+ bond_tlb_activate_slave(internals);
+ if (internals->mode == BONDING_MODE_ALB)
+ bond_mode_alb_client_list_upd(eth_dev);
+}
+
+void
+deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
+{
+ uint16_t slave_pos;
+ struct bond_dev_private *internals = eth_dev->data->dev_private;
+ uint16_t active_count = internals->active_slave_count;
+
+ if (internals->mode == BONDING_MODE_8023AD) {
+ bond_mode_8023ad_stop(eth_dev);
+ bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
+ } else if (internals->mode == BONDING_MODE_TLB
+ || internals->mode == BONDING_MODE_ALB)
+ bond_tlb_disable(internals);
+
+ slave_pos = find_slave_by_id(internals->active_slaves, active_count,
+ port_id);
+
+ /* If slave was not at the end of the list
+ * shift active slaves up active array list */
+ if (slave_pos < active_count) {
+ active_count--;
+ memmove(internals->active_slaves + slave_pos,
+ internals->active_slaves + slave_pos + 1,
+ (active_count - slave_pos) *
+ sizeof(internals->active_slaves[0]));
+ }
+
+ RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
+ internals->active_slave_count = active_count;
+
+ if (eth_dev->data->dev_started) {
+ if (internals->mode == BONDING_MODE_8023AD) {
+ bond_mode_8023ad_start(eth_dev);
+ } else if (internals->mode == BONDING_MODE_TLB) {
+ bond_tlb_enable(internals);
+ } else if (internals->mode == BONDING_MODE_ALB) {
+ bond_tlb_enable(internals);
+ bond_mode_alb_client_list_upd(eth_dev);
+ }
+ }
+}
+
+int
+rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
+{
+ struct bond_dev_private *internals;
+ char devargs[52];
+ uint16_t port_id;
+ int ret;
+
+ if (name == NULL) {
+ RTE_BOND_LOG(ERR, "Invalid name specified");
+ return -EINVAL;
+ }
+
+ ret = snprintf(devargs, sizeof(devargs),
+ "driver=net_bonding,mode=%d,socket_id=%d", mode, socket_id);
+ if (ret < 0 || ret >= (int)sizeof(devargs))
+ return -ENOMEM;
+
+ ret = rte_vdev_init(name, devargs);
+ if (ret)
+ return -ENOMEM;
+
+ ret = rte_eth_dev_get_port_by_name(name, &port_id);
+ RTE_ASSERT(!ret);
+
+ /*
+ * To make bond_ethdev_configure() happy we need to free the
+ * internals->kvlist here.
+ *
+ * Also see comment in bond_ethdev_configure().
+ */
+ internals = rte_eth_devices[port_id].data->dev_private;
+ rte_kvargs_free(internals->kvlist);
+ internals->kvlist = NULL;
+
+ return port_id;
+}
+
+int
+rte_eth_bond_free(const char *name)
+{
+ return rte_vdev_uninit(name);
+}
+
+static int
+slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+ int found;
+ int res = 0;
+ uint64_t slab = 0;
+ uint32_t pos = 0;
+ uint16_t first;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
+ return 0;
+
+ internals = bonded_eth_dev->data->dev_private;
+ found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab);
+ first = pos;
+
+ if (!found)
+ return 0;
+
+ do {
+ uint32_t i;
+ uint64_t mask;
+
+ for (i = 0, mask = 1;
+ i < RTE_BITMAP_SLAB_BIT_SIZE;
+ i ++, mask <<= 1) {
+ if (unlikely(slab & mask)) {
+ uint16_t vlan_id = pos + i;
+
+ res = rte_eth_dev_vlan_filter(slave_port_id,
+ vlan_id, 1);
+ }
+ }
+ found = rte_bitmap_scan(internals->vlan_filter_bmp,
+ &pos, &slab);
+ } while (found && first != pos && res == 0);
+
+ return res;
+}
+
+static int
+slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals)
+{
+ struct rte_flow *flow;
+ struct rte_flow_error ferror;
+ uint16_t slave_port_id = internals->slaves[slave_id].port_id;
+
+ if (internals->flow_isolated_valid != 0) {
+ rte_eth_dev_stop(slave_port_id);
+ if (rte_flow_isolate(slave_port_id, internals->flow_isolated,
+ &ferror)) {
+ RTE_BOND_LOG(ERR, "rte_flow_isolate failed for slave"
+ " %d: %s", slave_id, ferror.message ?
+ ferror.message : "(no stated reason)");
+ return -1;
+ }
+ }
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ flow->flows[slave_id] = rte_flow_create(slave_port_id,
+ &flow->fd->attr,
+ flow->fd->items,
+ flow->fd->actions,
+ &ferror);
+ if (flow->flows[slave_id] == NULL) {
+ RTE_BOND_LOG(ERR, "Cannot create flow for slave"
+ " %d: %s", slave_id,
+ ferror.message ? ferror.message :
+ "(no stated reason)");
+ /* Destroy successful bond flows from the slave */
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ if (flow->flows[slave_id] != NULL) {
+ rte_flow_destroy(slave_port_id,
+ flow->flows[slave_id],
+ &ferror);
+ flow->flows[slave_id] = NULL;
+ }
+ }
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
+__eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
+{
+ struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
+ struct bond_dev_private *internals;
+ struct rte_eth_link link_props;
+ struct rte_eth_dev_info dev_info;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ internals = bonded_eth_dev->data->dev_private;
+
+ if (valid_slave_port_id(slave_port_id, internals->mode) != 0)
+ return -1;
+
+ slave_eth_dev = &rte_eth_devices[slave_port_id];
+ if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
+ RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device");
+ return -1;
+ }
+
+ rte_eth_dev_info_get(slave_port_id, &dev_info);
+ if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) {
+ RTE_BOND_LOG(ERR, "Slave (port %u) max_rx_pktlen too small",
+ slave_port_id);
+ return -1;
+ }
+
+ slave_add(internals, slave_eth_dev);
+
+ /* We need to store slaves reta_size to be able to synchronize RETA for all
+ * slave devices even if its sizes are different.
+ */
+ internals->slaves[internals->slave_count].reta_size = dev_info.reta_size;
+
+ if (internals->slave_count < 1) {
+ /* if MAC is not user defined then use MAC of first slave add to
+ * bonded device */
+ if (!internals->user_defined_mac) {
+ if (mac_address_set(bonded_eth_dev,
+ slave_eth_dev->data->mac_addrs)) {
+ RTE_BOND_LOG(ERR, "Failed to set MAC address");
+ return -1;
+ }
+ }
+
+ /* Inherit eth dev link properties from first slave */
+ link_properties_set(bonded_eth_dev,
+ &(slave_eth_dev->data->dev_link));
+
+ /* Make primary slave */
+ internals->primary_port = slave_port_id;
+ internals->current_primary_port = slave_port_id;
+
+ /* Inherit queues settings from first slave */
+ internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
+ internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
+
+ internals->reta_size = dev_info.reta_size;
+
+ /* Take the first dev's offload capabilities */
+ internals->rx_offload_capa = dev_info.rx_offload_capa;
+ internals->tx_offload_capa = dev_info.tx_offload_capa;
+ internals->rx_queue_offload_capa = dev_info.rx_queue_offload_capa;
+ internals->tx_queue_offload_capa = dev_info.tx_queue_offload_capa;
+ internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads;
+
+ /* Inherit first slave's max rx packet size */
+ internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen;
+
+ } else {
+ internals->rx_offload_capa &= dev_info.rx_offload_capa;
+ internals->tx_offload_capa &= dev_info.tx_offload_capa;
+ internals->rx_queue_offload_capa &= dev_info.rx_queue_offload_capa;
+ internals->tx_queue_offload_capa &= dev_info.tx_queue_offload_capa;
+ internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads;
+
+ /* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
+ * the power of 2, the lower one is GCD
+ */
+ if (internals->reta_size > dev_info.reta_size)
+ internals->reta_size = dev_info.reta_size;
+
+ if (!internals->max_rx_pktlen &&
+ dev_info.max_rx_pktlen < internals->candidate_max_rx_pktlen)
+ internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen;
+ }
+
+ bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
+ internals->flow_type_rss_offloads;
+
+ if (slave_rte_flow_prepare(internals->slave_count, internals) != 0) {
+ RTE_BOND_LOG(ERR, "Failed to prepare new slave flows: port=%d",
+ slave_port_id);
+ return -1;
+ }
+
+ /* Add additional MAC addresses to the slave */
+ if (slave_add_mac_addresses(bonded_eth_dev, slave_port_id) != 0) {
+ RTE_BOND_LOG(ERR, "Failed to add mac address(es) to slave %hu",
+ slave_port_id);
+ return -1;
+ }
+
+ internals->slave_count++;
+
+ if (bonded_eth_dev->data->dev_started) {
+ if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
+ internals->slave_count--;
+ RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
+ slave_port_id);
+ return -1;
+ }
+ }
+
+ /* Add slave details to bonded device */
+ slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
+
+ /* Update all slave devices MACs */
+ mac_address_slaves_update(bonded_eth_dev);
+
+ /* Register link status change callback with bonded device pointer as
+ * argument*/
+ rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
+ bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id);
+
+ /* If bonded device is started then we can add the slave to our active
+ * slave array */
+ if (bonded_eth_dev->data->dev_started) {
+ rte_eth_link_get_nowait(slave_port_id, &link_props);
+
+ if (link_props.link_status == ETH_LINK_UP) {
+ if (internals->active_slave_count == 0 &&
+ !internals->user_defined_primary_port)
+ bond_ethdev_primary_set(internals,
+ slave_port_id);
+ }
+ }
+
+ slave_vlan_filter_set(bonded_port_id, slave_port_id);
+
+ return 0;
+
+}
+
+int
+rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+
+ int retval;
+
+ /* Verify that port id's are valid bonded and slave ports */
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ internals = bonded_eth_dev->data->dev_private;
+
+ rte_spinlock_lock(&internals->lock);
+
+ retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id);
+
+ rte_spinlock_unlock(&internals->lock);
+
+ return retval;
+}
+
+static int
+__eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
+ uint16_t slave_port_id)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+ struct rte_eth_dev *slave_eth_dev;
+ struct rte_flow_error flow_error;
+ struct rte_flow *flow;
+ int i, slave_idx;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ internals = bonded_eth_dev->data->dev_private;
+
+ if (valid_slave_port_id(slave_port_id, internals->mode) < 0)
+ return -1;
+
+ /* first remove from active slave list */
+ slave_idx = find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, slave_port_id);
+
+ if (slave_idx < internals->active_slave_count)
+ deactivate_slave(bonded_eth_dev, slave_port_id);
+
+ slave_idx = -1;
+ /* now find in slave list */
+ for (i = 0; i < internals->slave_count; i++)
+ if (internals->slaves[i].port_id == slave_port_id) {
+ slave_idx = i;
+ break;
+ }
+
+ if (slave_idx < 0) {
+ RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d",
+ internals->slave_count);
+ return -1;
+ }
+
+ /* Un-register link status change callback with bonded device pointer as
+ * argument*/
+ rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
+ bond_ethdev_lsc_event_callback,
+ &rte_eth_devices[bonded_port_id].data->port_id);
+
+ /* Restore original MAC address of slave device */
+ rte_eth_dev_default_mac_addr_set(slave_port_id,
+ &(internals->slaves[slave_idx].persisted_mac_addr));
+
+ /* remove additional MAC addresses from the slave */
+ slave_remove_mac_addresses(bonded_eth_dev, slave_port_id);
+
+ /*
+ * Remove bond device flows from slave device.
+ * Note: don't restore flow isolate mode.
+ */
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ if (flow->flows[slave_idx] != NULL) {
+ rte_flow_destroy(slave_port_id, flow->flows[slave_idx],
+ &flow_error);
+ flow->flows[slave_idx] = NULL;
+ }
+ }
+
+ slave_eth_dev = &rte_eth_devices[slave_port_id];
+ slave_remove(internals, slave_eth_dev);
+ slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
+
+ /* first slave in the active list will be the primary by default,
+ * otherwise use first device in list */
+ if (internals->current_primary_port == slave_port_id) {
+ if (internals->active_slave_count > 0)
+ internals->current_primary_port = internals->active_slaves[0];
+ else if (internals->slave_count > 0)
+ internals->current_primary_port = internals->slaves[0].port_id;
+ else
+ internals->primary_port = 0;
+ }
+
+ if (internals->active_slave_count < 1) {
+ /* if no slaves are any longer attached to bonded device and MAC is not
+ * user defined then clear MAC of bonded device as it will be reset
+ * when a new slave is added */
+ if (internals->slave_count < 1 && !internals->user_defined_mac)
+ memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0,
+ sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs)));
+ }
+ if (internals->slave_count == 0) {
+ internals->rx_offload_capa = 0;
+ internals->tx_offload_capa = 0;
+ internals->rx_queue_offload_capa = 0;
+ internals->tx_queue_offload_capa = 0;
+ internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+ internals->reta_size = 0;
+ internals->candidate_max_rx_pktlen = 0;
+ internals->max_rx_pktlen = 0;
+ }
+ return 0;
+}
+
+int
+rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+ int retval;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ internals = bonded_eth_dev->data->dev_private;
+
+ rte_spinlock_lock(&internals->lock);
+
+ retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id);
+
+ rte_spinlock_unlock(&internals->lock);
+
+ return retval;
+}
+
+int
+rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+
+ if (check_for_master_bonded_ethdev(bonded_eth_dev) != 0 &&
+ mode == BONDING_MODE_8023AD)
+ return -1;
+
+ return bond_ethdev_mode_set(bonded_eth_dev, mode);
+}
+
+int
+rte_eth_bond_mode_get(uint16_t bonded_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ return internals->mode;
+}
+
+int
+rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ if (valid_slave_port_id(slave_port_id, internals->mode) != 0)
+ return -1;
+
+ internals->user_defined_primary_port = 1;
+ internals->primary_port = slave_port_id;
+
+ bond_ethdev_primary_set(internals, slave_port_id);
+
+ return 0;
+}
+
+int
+rte_eth_bond_primary_get(uint16_t bonded_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ if (internals->slave_count < 1)
+ return -1;
+
+ return internals->current_primary_port;
+}
+
+int
+rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
+ uint16_t len)
+{
+ struct bond_dev_private *internals;
+ uint8_t i;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ if (slaves == NULL)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ if (internals->slave_count > len)
+ return -1;
+
+ for (i = 0; i < internals->slave_count; i++)
+ slaves[i] = internals->slaves[i].port_id;
+
+ return internals->slave_count;
+}
+
+int
+rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
+ uint16_t len)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ if (slaves == NULL)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ if (internals->active_slave_count > len)
+ return -1;
+
+ memcpy(slaves, internals->active_slaves,
+ internals->active_slave_count * sizeof(internals->active_slaves[0]));
+
+ return internals->active_slave_count;
+}
+
+int
+rte_eth_bond_mac_address_set(uint16_t bonded_port_id,
+ struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ internals = bonded_eth_dev->data->dev_private;
+
+ /* Set MAC Address of Bonded Device */
+ if (mac_address_set(bonded_eth_dev, mac_addr))
+ return -1;
+
+ internals->user_defined_mac = 1;
+
+ /* Update all slave devices MACs*/
+ if (internals->slave_count > 0)
+ return mac_address_slaves_update(bonded_eth_dev);
+
+ return 0;
+}
+
+int
+rte_eth_bond_mac_address_reset(uint16_t bonded_port_id)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ internals = bonded_eth_dev->data->dev_private;
+
+ internals->user_defined_mac = 0;
+
+ if (internals->slave_count > 0) {
+ int slave_port;
+ /* Get the primary slave location based on the primary port
+ * number as, while slave_add(), we will keep the primary
+ * slave based on slave_count,but not based on the primary port.
+ */
+ for (slave_port = 0; slave_port < internals->slave_count;
+ slave_port++) {
+ if (internals->slaves[slave_port].port_id ==
+ internals->primary_port)
+ break;
+ }
+
+ /* Set MAC Address of Bonded Device */
+ if (mac_address_set(bonded_eth_dev,
+ &internals->slaves[slave_port].persisted_mac_addr)
+ != 0) {
+ RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
+ return -1;
+ }
+ /* Update all slave devices MAC addresses */
+ return mac_address_slaves_update(bonded_eth_dev);
+ }
+ /* No need to update anything as no slaves present */
+ return 0;
+}
+
+int
+rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ switch (policy) {
+ case BALANCE_XMIT_POLICY_LAYER2:
+ internals->balance_xmit_policy = policy;
+ internals->burst_xmit_hash = burst_xmit_l2_hash;
+ break;
+ case BALANCE_XMIT_POLICY_LAYER23:
+ internals->balance_xmit_policy = policy;
+ internals->burst_xmit_hash = burst_xmit_l23_hash;
+ break;
+ case BALANCE_XMIT_POLICY_LAYER34:
+ internals->balance_xmit_policy = policy;
+ internals->burst_xmit_hash = burst_xmit_l34_hash;
+ break;
+
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+int
+rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ return internals->balance_xmit_policy;
+}
+
+int
+rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+ internals->link_status_polling_interval_ms = internal_ms;
+
+ return 0;
+}
+
+int
+rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ return internals->link_status_polling_interval_ms;
+}
+
+int
+rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
+ uint32_t delay_ms)
+
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+ internals->link_down_delay_ms = delay_ms;
+
+ return 0;
+}
+
+int
+rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ return internals->link_down_delay_ms;
+}
+
+int
+rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms)
+
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+ internals->link_up_delay_ms = delay_ms;
+
+ return 0;
+}
+
+int
+rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ return internals->link_up_delay_ms;
+}
diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_args.c b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_args.c
new file mode 100644
index 00000000..b60fde6a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_args.c
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <rte_devargs.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_kvargs.h>
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_etheraddr.h>
+
+#include "rte_eth_bond.h"
+#include "rte_eth_bond_private.h"
+
+const char *pmd_bond_init_valid_arguments[] = {
+ PMD_BOND_SLAVE_PORT_KVARG,
+ PMD_BOND_PRIMARY_SLAVE_KVARG,
+ PMD_BOND_MODE_KVARG,
+ PMD_BOND_XMIT_POLICY_KVARG,
+ PMD_BOND_SOCKET_ID_KVARG,
+ PMD_BOND_MAC_ADDR_KVARG,
+ PMD_BOND_AGG_MODE_KVARG,
+ "driver",
+ NULL
+};
+
+static inline int
+find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr)
+{
+ struct rte_pci_device *pci_dev;
+ struct rte_pci_addr *eth_pci_addr;
+ unsigned i;
+
+ RTE_ETH_FOREACH_DEV(i) {
+ pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[i]);
+ eth_pci_addr = &pci_dev->addr;
+
+ if (pci_addr->bus == eth_pci_addr->bus &&
+ pci_addr->devid == eth_pci_addr->devid &&
+ pci_addr->domain == eth_pci_addr->domain &&
+ pci_addr->function == eth_pci_addr->function)
+ return i;
+ }
+ return -1;
+}
+
+static inline int
+find_port_id_by_dev_name(const char *name)
+{
+ unsigned i;
+
+ RTE_ETH_FOREACH_DEV(i) {
+ if (rte_eth_devices[i].data == NULL)
+ continue;
+
+ if (strcmp(rte_eth_devices[i].device->name, name) == 0)
+ return i;
+ }
+ return -1;
+}
+
+static inline int
+bond_pci_addr_cmp(const struct rte_device *dev, const void *_pci_addr)
+{
+ struct rte_pci_device *pdev;
+ const struct rte_pci_addr *paddr = _pci_addr;
+
+ pdev = RTE_DEV_TO_PCI(*(struct rte_device **)(void *)&dev);
+ return rte_eal_compare_pci_addr(&pdev->addr, paddr);
+}
+
+/**
+ * Parses a port identifier string to a port id by pci address, then by name,
+ * and finally port id.
+ */
+static inline int
+parse_port_id(const char *port_str)
+{
+ struct rte_pci_addr dev_addr;
+ struct rte_bus *pci_bus;
+ struct rte_device *dev;
+ int port_id;
+
+ pci_bus = rte_bus_find_by_name("pci");
+ if (pci_bus == NULL) {
+ RTE_LOG(ERR, PMD, "unable to find PCI bus\n");
+ return -1;
+ }
+
+ /* try parsing as pci address, physical devices */
+ if (pci_bus->parse(port_str, &dev_addr) == 0) {
+ dev = pci_bus->find_device(NULL, bond_pci_addr_cmp, &dev_addr);
+ if (dev == NULL) {
+ RTE_BOND_LOG(ERR, "unable to find PCI device");
+ return -1;
+ }
+ port_id = find_port_id_by_pci_addr(&dev_addr);
+ if (port_id < 0)
+ return -1;
+ } else {
+ /* try parsing as device name, virtual devices */
+ port_id = find_port_id_by_dev_name(port_str);
+ if (port_id < 0) {
+ char *end;
+ errno = 0;
+
+ /* try parsing as port id */
+ port_id = strtol(port_str, &end, 10);
+ if (*end != 0 || errno != 0)
+ return -1;
+ }
+ }
+
+ if (port_id < 0 || port_id > RTE_MAX_ETHPORTS) {
+ RTE_BOND_LOG(ERR, "Slave port specified (%s) outside expected range",
+ port_str);
+ return -1;
+ }
+ return port_id;
+}
+
+int
+bond_ethdev_parse_slave_port_kvarg(const char *key,
+ const char *value, void *extra_args)
+{
+ struct bond_ethdev_slave_ports *slave_ports;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ slave_ports = extra_args;
+
+ if (strcmp(key, PMD_BOND_SLAVE_PORT_KVARG) == 0) {
+ int port_id = parse_port_id(value);
+ if (port_id < 0) {
+ RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified",
+ value);
+ return -1;
+ } else
+ slave_ports->slaves[slave_ports->slave_count++] =
+ port_id;
+ }
+ return 0;
+}
+
+int
+bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ uint8_t *mode;
+ char *endptr;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ mode = extra_args;
+
+ errno = 0;
+ *mode = strtol(value, &endptr, 10);
+ if (*endptr != 0 || errno != 0)
+ return -1;
+
+ /* validate mode value */
+ switch (*mode) {
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_ACTIVE_BACKUP:
+ case BONDING_MODE_BALANCE:
+ case BONDING_MODE_BROADCAST:
+ case BONDING_MODE_8023AD:
+ case BONDING_MODE_TLB:
+ case BONDING_MODE_ALB:
+ return 0;
+ default:
+ RTE_BOND_LOG(ERR, "Invalid slave mode value (%s) specified", value);
+ return -1;
+ }
+}
+
+int
+bond_ethdev_parse_slave_agg_mode_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ uint8_t *agg_mode;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ agg_mode = extra_args;
+
+ errno = 0;
+ if (strncmp(value, "stable", 6) == 0)
+ *agg_mode = AGG_STABLE;
+
+ if (strncmp(value, "bandwidth", 9) == 0)
+ *agg_mode = AGG_BANDWIDTH;
+
+ if (strncmp(value, "count", 5) == 0)
+ *agg_mode = AGG_COUNT;
+
+ switch (*agg_mode) {
+ case AGG_STABLE:
+ case AGG_BANDWIDTH:
+ case AGG_COUNT:
+ return 0;
+ default:
+ RTE_BOND_LOG(ERR, "Invalid agg mode value stable/bandwidth/count");
+ return -1;
+ }
+}
+
+int
+bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int socket_id;
+ char *endptr;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ errno = 0;
+ socket_id = (uint8_t)strtol(value, &endptr, 10);
+ if (*endptr != 0 || errno != 0)
+ return -1;
+
+ /* validate socket id value */
+ if (socket_id >= 0) {
+ *(uint8_t *)extra_args = (uint8_t)socket_id;
+ return 0;
+ }
+ return -1;
+}
+
+int
+bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int primary_slave_port_id;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ primary_slave_port_id = parse_port_id(value);
+ if (primary_slave_port_id < 0)
+ return -1;
+
+ *(uint16_t *)extra_args = (uint16_t)primary_slave_port_id;
+
+ return 0;
+}
+
+int
+bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ uint8_t *xmit_policy;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ xmit_policy = extra_args;
+
+ if (strcmp(PMD_BOND_XMIT_POLICY_LAYER2_KVARG, value) == 0)
+ *xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
+ else if (strcmp(PMD_BOND_XMIT_POLICY_LAYER23_KVARG, value) == 0)
+ *xmit_policy = BALANCE_XMIT_POLICY_LAYER23;
+ else if (strcmp(PMD_BOND_XMIT_POLICY_LAYER34_KVARG, value) == 0)
+ *xmit_policy = BALANCE_XMIT_POLICY_LAYER34;
+ else
+ return -1;
+
+ return 0;
+}
+
+int
+bond_ethdev_parse_bond_mac_addr_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ /* Parse MAC */
+ return cmdline_parse_etheraddr(NULL, value, extra_args,
+ sizeof(struct ether_addr));
+}
+
+int
+bond_ethdev_parse_time_ms_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ uint32_t time_ms;
+ char *endptr;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ errno = 0;
+ time_ms = (uint32_t)strtol(value, &endptr, 10);
+ if (*endptr != 0 || errno != 0)
+ return -1;
+
+ *(uint32_t *)extra_args = time_ms;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_flow.c b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_flow.c
new file mode 100644
index 00000000..31e4bcae
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_flow.c
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <sys/queue.h>
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include <rte_flow.h>
+
+#include "rte_eth_bond_private.h"
+
+static struct rte_flow *
+bond_flow_alloc(int numa_node, const struct rte_flow_attr *attr,
+ const struct rte_flow_item *items,
+ const struct rte_flow_action *actions)
+{
+ struct rte_flow *flow;
+ size_t fdsz;
+
+ fdsz = rte_flow_copy(NULL, 0, attr, items, actions);
+ flow = rte_zmalloc_socket(NULL, sizeof(struct rte_flow) + fdsz,
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (unlikely(flow == NULL)) {
+ RTE_BOND_LOG(ERR, "Could not allocate new flow");
+ return NULL;
+ }
+ flow->fd = (void *)((uintptr_t)flow + sizeof(*flow));
+ if (unlikely(rte_flow_copy(flow->fd, fdsz, attr, items, actions) !=
+ fdsz)) {
+ RTE_BOND_LOG(ERR, "Failed to copy flow description");
+ rte_free(flow);
+ return NULL;
+ }
+ return flow;
+}
+
+static void
+bond_flow_release(struct rte_flow **flow)
+{
+ rte_free(*flow);
+ *flow = NULL;
+}
+
+static int
+bond_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+ int ret;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_flow_validate(internals->slaves[i].port_id, attr,
+ patterns, actions, err);
+ if (ret) {
+ RTE_BOND_LOG(ERR, "Operation rte_flow_validate failed"
+ " for slave %d with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static struct rte_flow *
+bond_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_flow *flow;
+ int i;
+
+ flow = bond_flow_alloc(dev->data->numa_node, attr, patterns, actions);
+ if (unlikely(flow == NULL)) {
+ rte_flow_error_set(err, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOMEM));
+ return NULL;
+ }
+ for (i = 0; i < internals->slave_count; i++) {
+ flow->flows[i] = rte_flow_create(internals->slaves[i].port_id,
+ attr, patterns, actions, err);
+ if (unlikely(flow->flows[i] == NULL)) {
+ RTE_BOND_LOG(ERR, "Failed to create flow on slave %d",
+ i);
+ goto err;
+ }
+ }
+ TAILQ_INSERT_TAIL(&internals->flow_list, flow, next);
+ return flow;
+err:
+ /* Destroy all slaves flows. */
+ for (i = 0; i < internals->slave_count; i++) {
+ if (flow->flows[i] != NULL)
+ rte_flow_destroy(internals->slaves[i].port_id,
+ flow->flows[i], err);
+ }
+ bond_flow_release(&flow);
+ return NULL;
+}
+
+static int
+bond_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ int lret;
+
+ if (unlikely(flow->flows[i] == NULL))
+ continue;
+ lret = rte_flow_destroy(internals->slaves[i].port_id,
+ flow->flows[i], err);
+ if (unlikely(lret != 0)) {
+ RTE_BOND_LOG(ERR, "Failed to destroy flow on slave %d:"
+ " %d", i, lret);
+ ret = lret;
+ }
+ }
+ TAILQ_REMOVE(&internals->flow_list, flow, next);
+ bond_flow_release(&flow);
+ return ret;
+}
+
+static int
+bond_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_flow *flow;
+ void *tmp;
+ int ret = 0;
+ int lret;
+
+ /* Destroy all bond flows from its slaves instead of flushing them to
+ * keep the LACP flow or any other external flows.
+ */
+ TAILQ_FOREACH_SAFE(flow, &internals->flow_list, next, tmp) {
+ lret = bond_flow_destroy(dev, flow, err);
+ if (unlikely(lret != 0))
+ ret = lret;
+ }
+ if (unlikely(ret != 0))
+ RTE_BOND_LOG(ERR, "Failed to flush flow in all slaves");
+ return ret;
+}
+
+static int
+bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_flow_query_count slave_count;
+ int i;
+ int ret;
+
+ count->bytes = 0;
+ count->hits = 0;
+ rte_memcpy(&slave_count, count, sizeof(slave_count));
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_flow_query(internals->slaves[i].port_id,
+ flow->flows[i], action,
+ &slave_count, err);
+ if (unlikely(ret != 0)) {
+ RTE_BOND_LOG(ERR, "Failed to query flow on"
+ " slave %d: %d", i, ret);
+ return ret;
+ }
+ count->bytes += slave_count.bytes;
+ count->hits += slave_count.hits;
+ slave_count.bytes = 0;
+ slave_count.hits = 0;
+ }
+ return 0;
+}
+
+static int
+bond_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action, void *arg,
+ struct rte_flow_error *err)
+{
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ return bond_flow_query_count(dev, flow, action, arg, err);
+ default:
+ return rte_flow_error_set(err, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, arg,
+ rte_strerror(ENOTSUP));
+ }
+}
+
+static int
+bond_flow_isolate(struct rte_eth_dev *dev, int set,
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+ int ret;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_flow_isolate(internals->slaves[i].port_id, set, err);
+ if (unlikely(ret != 0)) {
+ RTE_BOND_LOG(ERR, "Operation rte_flow_isolate failed"
+ " for slave %d with error %d", i, ret);
+ internals->flow_isolated_valid = 0;
+ return ret;
+ }
+ }
+ internals->flow_isolated = set;
+ internals->flow_isolated_valid = 1;
+ return 0;
+}
+
+const struct rte_flow_ops bond_flow_ops = {
+ .validate = bond_flow_validate,
+ .create = bond_flow_create,
+ .destroy = bond_flow_destroy,
+ .flush = bond_flow_flush,
+ .query = bond_flow_query,
+ .isolate = bond_flow_isolate,
+};
diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c
new file mode 100644
index 00000000..58f7377c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -0,0 +1,3624 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+#include <stdlib.h>
+#include <netinet/in.h>
+
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_ip.h>
+#include <rte_ip_frag.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_bus_vdev.h>
+#include <rte_alarm.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#include "rte_eth_bond.h"
+#include "rte_eth_bond_private.h"
+#include "rte_eth_bond_8023ad_private.h"
+
+#define REORDER_PERIOD_MS 10
+#define DEFAULT_POLLING_INTERVAL_10_MS (10)
+#define BOND_MAX_MAC_ADDRS 16
+
+#define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
+
+/* Table for statistics in mode 5 TLB */
+static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
+
+static inline size_t
+get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
+{
+ size_t vlan_offset = 0;
+
+ if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+
+ vlan_offset = sizeof(struct vlan_hdr);
+ *proto = vlan_hdr->eth_proto;
+
+ if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ vlan_hdr = vlan_hdr + 1;
+ *proto = vlan_hdr->eth_proto;
+ vlan_offset += sizeof(struct vlan_hdr);
+ }
+ }
+ return vlan_offset;
+}
+
+static uint16_t
+bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+
+ uint16_t num_rx_slave = 0;
+ uint16_t num_rx_total = 0;
+
+ int i;
+
+ /* Cast to structure, containing bonded device's port id and queue id */
+ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
+
+ internals = bd_rx_q->dev_private;
+
+
+ for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
+ /* Offset of pointer to *bufs increases as packets are received
+ * from other slaves */
+ num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
+ bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
+ if (num_rx_slave) {
+ num_rx_total += num_rx_slave;
+ nb_pkts -= num_rx_slave;
+ }
+ }
+
+ return num_rx_total;
+}
+
+static uint16_t
+bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+
+ /* Cast to structure, containing bonded device's port id and queue id */
+ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
+
+ internals = bd_rx_q->dev_private;
+
+ return rte_eth_rx_burst(internals->current_primary_port,
+ bd_rx_q->queue_id, bufs, nb_pkts);
+}
+
+static inline uint8_t
+is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
+{
+ const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
+
+ return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
+ (ethertype == ether_type_slow_be &&
+ (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
+}
+
+/*****************************************************************************
+ * Flow director's setup for mode 4 optimization
+ */
+
+static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
+ .dst.addr_bytes = { 0 },
+ .src.addr_bytes = { 0 },
+ .type = RTE_BE16(ETHER_TYPE_SLOW),
+};
+
+static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
+ .dst.addr_bytes = { 0 },
+ .src.addr_bytes = { 0 },
+ .type = 0xFFFF,
+};
+
+static struct rte_flow_item flow_item_8023ad[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &flow_item_eth_type_8023ad,
+ .last = NULL,
+ .mask = &flow_item_eth_mask_type_8023ad,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ .spec = NULL,
+ .last = NULL,
+ .mask = NULL,
+ }
+};
+
+const struct rte_flow_attr flow_attr_8023ad = {
+ .group = 0,
+ .priority = 0,
+ .ingress = 1,
+ .egress = 0,
+ .reserved = 0,
+};
+
+int
+bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
+ uint16_t slave_port) {
+ struct rte_eth_dev_info slave_info;
+ struct rte_flow_error error;
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ (bond_dev->data->dev_private);
+
+ const struct rte_flow_action_queue lacp_queue_conf = {
+ .index = 0,
+ };
+
+ const struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &lacp_queue_conf
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ }
+ };
+
+ int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
+ flow_item_8023ad, actions, &error);
+ if (ret < 0) {
+ RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)",
+ __func__, error.message, slave_port,
+ internals->mode4.dedicated_queues.rx_qid);
+ return -1;
+ }
+
+ rte_eth_dev_info_get(slave_port, &slave_info);
+ if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
+ slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
+ RTE_BOND_LOG(ERR,
+ "%s: Slave %d capabilities doesn't allow to allocate additional queues",
+ __func__, slave_port);
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
+ struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ (bond_dev->data->dev_private);
+ struct rte_eth_dev_info bond_info;
+ uint16_t idx;
+
+ /* Verify if all slaves in bonding supports flow director and */
+ if (internals->slave_count > 0) {
+ rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
+
+ internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
+ internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
+
+ for (idx = 0; idx < internals->slave_count; idx++) {
+ if (bond_ethdev_8023ad_flow_verify(bond_dev,
+ internals->slaves[idx].port_id) != 0)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int
+bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
+
+ struct rte_flow_error error;
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ (bond_dev->data->dev_private);
+
+ struct rte_flow_action_queue lacp_queue_conf = {
+ .index = internals->mode4.dedicated_queues.rx_qid,
+ };
+
+ const struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &lacp_queue_conf
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ }
+ };
+
+ internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
+ &flow_attr_8023ad, flow_item_8023ad, actions, &error);
+ if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
+ RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
+ "(slave_port=%d queue_id=%d)",
+ error.message, slave_port,
+ internals->mode4.dedicated_queues.rx_qid);
+ return -1;
+ }
+
+ return 0;
+}
+
+static uint16_t
+bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
+ struct bond_dev_private *internals = bd_rx_q->dev_private;
+ uint16_t num_rx_total = 0; /* Total number of received packets */
+ uint16_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slave_count;
+
+ uint16_t i, idx;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ slave_count = internals->active_slave_count;
+ memcpy(slaves, internals->active_slaves,
+ sizeof(internals->active_slaves[0]) * slave_count);
+
+ for (i = 0, idx = internals->active_slave;
+ i < slave_count && num_rx_total < nb_pkts; i++, idx++) {
+ idx = idx % slave_count;
+
+ /* Read packets from this slave */
+ num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
+ &bufs[num_rx_total], nb_pkts - num_rx_total);
+ }
+
+ internals->active_slave = idx;
+
+ return num_rx_total;
+}
+
+static uint16_t
+bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+ uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t slave_count;
+
+ uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t dist_slave_count;
+
+ /* 2-D array to sort mbufs for transmission on each slave into */
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
+ /* Number of mbufs for transmission on each slave */
+ uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
+ /* Mapping array generated by hash function to map mbufs to slaves */
+ uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
+
+ uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t total_tx_count = 0, total_tx_fail_count = 0;
+
+ uint16_t i, j;
+
+ if (unlikely(nb_bufs == 0))
+ return 0;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ slave_count = internals->active_slave_count;
+ if (unlikely(slave_count < 1))
+ return 0;
+
+ memcpy(slave_port_ids, internals->active_slaves,
+ sizeof(slave_port_ids[0]) * slave_count);
+
+
+ dist_slave_count = 0;
+ for (i = 0; i < slave_count; i++) {
+ struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
+
+ if (ACTOR_STATE(port, DISTRIBUTING))
+ dist_slave_port_ids[dist_slave_count++] =
+ slave_port_ids[i];
+ }
+
+ if (unlikely(dist_slave_count < 1))
+ return 0;
+
+ /*
+ * Populate slaves mbuf with the packets which are to be sent on it
+ * selecting output slave using hash based on xmit policy
+ */
+ internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count,
+ bufs_slave_port_idxs);
+
+ for (i = 0; i < nb_bufs; i++) {
+ /* Populate slave mbuf arrays with mbufs for that slave. */
+ uint8_t slave_idx = bufs_slave_port_idxs[i];
+
+ slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
+ }
+
+
+ /* Send packet burst on each slave device */
+ for (i = 0; i < dist_slave_count; i++) {
+ if (slave_nb_bufs[i] == 0)
+ continue;
+
+ slave_tx_count = rte_eth_tx_burst(dist_slave_port_ids[i],
+ bd_tx_q->queue_id, slave_bufs[i],
+ slave_nb_bufs[i]);
+
+ total_tx_count += slave_tx_count;
+
+ /* If tx burst fails move packets to end of bufs */
+ if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
+ slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ slave_tx_count;
+ total_tx_fail_count += slave_tx_fail_count[i];
+
+ /*
+ * Shift bufs to beginning of array to allow reordering
+ * later
+ */
+ for (j = 0; j < slave_tx_fail_count[i]; j++) {
+ slave_bufs[i][j] =
+ slave_bufs[i][(slave_tx_count - 1) + j];
+ }
+ }
+ }
+
+ /*
+ * If there are tx burst failures we move packets to end of bufs to
+ * preserve expected PMD behaviour of all failed transmitted being
+ * at the end of the input mbuf array
+ */
+ if (unlikely(total_tx_fail_count > 0)) {
+ int bufs_idx = nb_bufs - total_tx_fail_count - 1;
+
+ for (i = 0; i < slave_count; i++) {
+ if (slave_tx_fail_count[i] > 0) {
+ for (j = 0; j < slave_tx_fail_count[i]; j++)
+ bufs[bufs_idx++] = slave_bufs[i][j];
+ }
+ }
+ }
+
+ return total_tx_count;
+}
+
+
+static uint16_t
+bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ /* Cast to structure, containing bonded device's port id and queue id */
+ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
+ struct bond_dev_private *internals = bd_rx_q->dev_private;
+ struct ether_addr bond_mac;
+
+ struct ether_hdr *hdr;
+
+ const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
+ uint16_t num_rx_total = 0; /* Total number of received packets */
+ uint16_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slave_count, idx;
+
+ uint8_t collecting; /* current slave collecting status */
+ const uint8_t promisc = internals->promiscuous_en;
+ uint8_t i, j, k;
+ uint8_t subtype;
+
+ rte_eth_macaddr_get(internals->port_id, &bond_mac);
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ slave_count = internals->active_slave_count;
+ memcpy(slaves, internals->active_slaves,
+ sizeof(internals->active_slaves[0]) * slave_count);
+
+ idx = internals->active_slave;
+ if (idx >= slave_count) {
+ internals->active_slave = 0;
+ idx = 0;
+ }
+ for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
+ j = num_rx_total;
+ collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[idx]],
+ COLLECTING);
+
+ /* Read packets from this slave */
+ num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
+ &bufs[num_rx_total], nb_pkts - num_rx_total);
+
+ for (k = j; k < 2 && k < num_rx_total; k++)
+ rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
+
+ /* Handle slow protocol packets. */
+ while (j < num_rx_total) {
+
+ /* If packet is not pure L2 and is known, skip it */
+ if ((bufs[j]->packet_type & ~RTE_PTYPE_L2_ETHER) != 0) {
+ j++;
+ continue;
+ }
+
+ if (j + 3 < num_rx_total)
+ rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
+
+ hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
+ subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
+
+ /* Remove packet from array if it is slow packet or slave is not
+ * in collecting state or bonding interface is not in promiscuous
+ * mode and packet address does not match. */
+ if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) ||
+ !collecting || (!promisc &&
+ !is_multicast_ether_addr(&hdr->d_addr) &&
+ !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
+
+ if (hdr->ether_type == ether_type_slow_be) {
+ bond_mode_8023ad_handle_slow_pkt(
+ internals, slaves[idx], bufs[j]);
+ } else
+ rte_pktmbuf_free(bufs[j]);
+
+ /* Packet is managed by mode 4 or dropped, shift the array */
+ num_rx_total--;
+ if (j < num_rx_total) {
+ memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
+ (num_rx_total - j));
+ }
+ } else
+ j++;
+ }
+ if (unlikely(++idx == slave_count))
+ idx = 0;
+ }
+
+ internals->active_slave = idx;
+ return num_rx_total;
+}
+
+#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
+uint32_t burstnumberRX;
+uint32_t burstnumberTX;
+
+#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
+
+static void
+arp_op_name(uint16_t arp_op, char *buf)
+{
+ switch (arp_op) {
+ case ARP_OP_REQUEST:
+ snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
+ return;
+ case ARP_OP_REPLY:
+ snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
+ return;
+ case ARP_OP_REVREQUEST:
+ snprintf(buf, sizeof("Reverse ARP Request"), "%s",
+ "Reverse ARP Request");
+ return;
+ case ARP_OP_REVREPLY:
+ snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
+ "Reverse ARP Reply");
+ return;
+ case ARP_OP_INVREQUEST:
+ snprintf(buf, sizeof("Peer Identify Request"), "%s",
+ "Peer Identify Request");
+ return;
+ case ARP_OP_INVREPLY:
+ snprintf(buf, sizeof("Peer Identify Reply"), "%s",
+ "Peer Identify Reply");
+ return;
+ default:
+ break;
+ }
+ snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
+ return;
+}
+#endif
+#define MaxIPv4String 16
+static void
+ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
+{
+ uint32_t ipv4_addr;
+
+ ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
+ snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
+ (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
+ ipv4_addr & 0xFF);
+}
+
+#define MAX_CLIENTS_NUMBER 128
+uint8_t active_clients;
+struct client_stats_t {
+ uint16_t port;
+ uint32_t ipv4_addr;
+ uint32_t ipv4_rx_packets;
+ uint32_t ipv4_tx_packets;
+};
+struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
+
+static void
+update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
+{
+ int i = 0;
+
+ for (; i < MAX_CLIENTS_NUMBER; i++) {
+ if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
+ /* Just update RX packets number for this client */
+ if (TXorRXindicator == &burstnumberRX)
+ client_stats[i].ipv4_rx_packets++;
+ else
+ client_stats[i].ipv4_tx_packets++;
+ return;
+ }
+ }
+ /* We have a new client. Insert him to the table, and increment stats */
+ if (TXorRXindicator == &burstnumberRX)
+ client_stats[active_clients].ipv4_rx_packets++;
+ else
+ client_stats[active_clients].ipv4_tx_packets++;
+ client_stats[active_clients].ipv4_addr = addr;
+ client_stats[active_clients].port = port;
+ active_clients++;
+
+}
+
+#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
+#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
+ rte_log(RTE_LOG_DEBUG, bond_logtype, \
+ "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \
+ "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \
+ info, \
+ port, \
+ eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \
+ eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \
+ eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \
+ src_ip, \
+ eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \
+ eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \
+ eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \
+ dst_ip, \
+ arp_op, ++burstnumber)
+#endif
+
+static void
+mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
+ uint16_t port, uint32_t __attribute__((unused)) *burstnumber)
+{
+ struct ipv4_hdr *ipv4_h;
+#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
+ struct arp_hdr *arp_h;
+ char dst_ip[16];
+ char ArpOp[24];
+ char buf[16];
+#endif
+ char src_ip[16];
+
+ uint16_t ether_type = eth_h->ether_type;
+ uint16_t offset = get_vlan_offset(eth_h, &ether_type);
+
+#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
+ strlcpy(buf, info, 16);
+#endif
+
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
+ ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
+#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
+ ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
+ MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
+#endif
+ update_client_stats(ipv4_h->src_addr, port, burstnumber);
+ }
+#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
+ else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
+ ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
+ ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
+ arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
+ MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
+ }
+#endif
+}
+#endif
+
+static uint16_t
+bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+ struct ether_hdr *eth_h;
+ uint16_t ether_type, offset;
+ uint16_t nb_recv_pkts;
+ int i;
+
+ nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
+
+ for (i = 0; i < nb_recv_pkts; i++) {
+ eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
+ ether_type = eth_h->ether_type;
+ offset = get_vlan_offset(eth_h, &ether_type);
+
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
+ mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
+#endif
+ bond_mode_alb_arp_recv(eth_h, offset, internals);
+ }
+#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
+ else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
+#endif
+ }
+
+ return nb_recv_pkts;
+}
+
+static uint16_t
+bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+ struct bond_tx_queue *bd_tx_q;
+
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
+ uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
+
+ uint16_t num_of_slaves;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
+
+ uint16_t num_tx_total = 0, num_tx_slave;
+
+ static int slave_idx = 0;
+ int i, cslave_idx = 0, tx_fail_total = 0;
+
+ bd_tx_q = (struct bond_tx_queue *)queue;
+ internals = bd_tx_q->dev_private;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ num_of_slaves = internals->active_slave_count;
+ memcpy(slaves, internals->active_slaves,
+ sizeof(internals->active_slaves[0]) * num_of_slaves);
+
+ if (num_of_slaves < 1)
+ return num_tx_total;
+
+ /* Populate slaves mbuf with which packets are to be sent on it */
+ for (i = 0; i < nb_pkts; i++) {
+ cslave_idx = (slave_idx + i) % num_of_slaves;
+ slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
+ }
+
+ /* increment current slave index so the next call to tx burst starts on the
+ * next slave */
+ slave_idx = ++cslave_idx;
+
+ /* Send packet burst on each slave device */
+ for (i = 0; i < num_of_slaves; i++) {
+ if (slave_nb_pkts[i] > 0) {
+ num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+ slave_bufs[i], slave_nb_pkts[i]);
+
+ /* if tx burst fails move packets to end of bufs */
+ if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
+ int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
+
+ tx_fail_total += tx_fail_slave;
+
+ memcpy(&bufs[nb_pkts - tx_fail_total],
+ &slave_bufs[i][num_tx_slave],
+ tx_fail_slave * sizeof(bufs[0]));
+ }
+ num_tx_total += num_tx_slave;
+ }
+ }
+
+ return num_tx_total;
+}
+
+static uint16_t
+bond_ethdev_tx_burst_active_backup(void *queue,
+ struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+ struct bond_tx_queue *bd_tx_q;
+
+ bd_tx_q = (struct bond_tx_queue *)queue;
+ internals = bd_tx_q->dev_private;
+
+ if (internals->active_slave_count < 1)
+ return 0;
+
+ return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
+ bufs, nb_pkts);
+}
+
+static inline uint16_t
+ether_hash(struct ether_hdr *eth_hdr)
+{
+ unaligned_uint16_t *word_src_addr =
+ (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
+ unaligned_uint16_t *word_dst_addr =
+ (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
+
+ return (word_src_addr[0] ^ word_dst_addr[0]) ^
+ (word_src_addr[1] ^ word_dst_addr[1]) ^
+ (word_src_addr[2] ^ word_dst_addr[2]);
+}
+
+static inline uint32_t
+ipv4_hash(struct ipv4_hdr *ipv4_hdr)
+{
+ return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
+}
+
+static inline uint32_t
+ipv6_hash(struct ipv6_hdr *ipv6_hdr)
+{
+ unaligned_uint32_t *word_src_addr =
+ (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
+ unaligned_uint32_t *word_dst_addr =
+ (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
+
+ return (word_src_addr[0] ^ word_dst_addr[0]) ^
+ (word_src_addr[1] ^ word_dst_addr[1]) ^
+ (word_src_addr[2] ^ word_dst_addr[2]) ^
+ (word_src_addr[3] ^ word_dst_addr[3]);
+}
+
+
+void
+burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves)
+{
+ struct ether_hdr *eth_hdr;
+ uint32_t hash;
+ int i;
+
+ for (i = 0; i < nb_pkts; i++) {
+ eth_hdr = rte_pktmbuf_mtod(buf[i], struct ether_hdr *);
+
+ hash = ether_hash(eth_hdr);
+
+ slaves[i] = (hash ^= hash >> 8) % slave_count;
+ }
+}
+
+void
+burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves)
+{
+ uint16_t i;
+ struct ether_hdr *eth_hdr;
+ uint16_t proto;
+ size_t vlan_offset;
+ uint32_t hash, l3hash;
+
+ for (i = 0; i < nb_pkts; i++) {
+ eth_hdr = rte_pktmbuf_mtod(buf[i], struct ether_hdr *);
+ l3hash = 0;
+
+ proto = eth_hdr->ether_type;
+ hash = ether_hash(eth_hdr);
+
+ vlan_offset = get_vlan_offset(eth_hdr, &proto);
+
+ if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
+ struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+ ((char *)(eth_hdr + 1) + vlan_offset);
+ l3hash = ipv4_hash(ipv4_hdr);
+
+ } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
+ struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+ ((char *)(eth_hdr + 1) + vlan_offset);
+ l3hash = ipv6_hash(ipv6_hdr);
+ }
+
+ hash = hash ^ l3hash;
+ hash ^= hash >> 16;
+ hash ^= hash >> 8;
+
+ slaves[i] = hash % slave_count;
+ }
+}
+
+void
+burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves)
+{
+ struct ether_hdr *eth_hdr;
+ uint16_t proto;
+ size_t vlan_offset;
+ int i;
+
+ struct udp_hdr *udp_hdr;
+ struct tcp_hdr *tcp_hdr;
+ uint32_t hash, l3hash, l4hash;
+
+ for (i = 0; i < nb_pkts; i++) {
+ eth_hdr = rte_pktmbuf_mtod(buf[i], struct ether_hdr *);
+ proto = eth_hdr->ether_type;
+ vlan_offset = get_vlan_offset(eth_hdr, &proto);
+ l3hash = 0;
+ l4hash = 0;
+
+ if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
+ struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+ ((char *)(eth_hdr + 1) + vlan_offset);
+ size_t ip_hdr_offset;
+
+ l3hash = ipv4_hash(ipv4_hdr);
+
+ /* there is no L4 header in fragmented packet */
+ if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
+ == 0)) {
+ ip_hdr_offset = (ipv4_hdr->version_ihl
+ & IPV4_HDR_IHL_MASK) *
+ IPV4_IHL_MULTIPLIER;
+
+ if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
+ tcp_hdr = (struct tcp_hdr *)
+ ((char *)ipv4_hdr +
+ ip_hdr_offset);
+ l4hash = HASH_L4_PORTS(tcp_hdr);
+ } else if (ipv4_hdr->next_proto_id ==
+ IPPROTO_UDP) {
+ udp_hdr = (struct udp_hdr *)
+ ((char *)ipv4_hdr +
+ ip_hdr_offset);
+ l4hash = HASH_L4_PORTS(udp_hdr);
+ }
+ }
+ } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
+ struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+ ((char *)(eth_hdr + 1) + vlan_offset);
+ l3hash = ipv6_hash(ipv6_hdr);
+
+ if (ipv6_hdr->proto == IPPROTO_TCP) {
+ tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
+ l4hash = HASH_L4_PORTS(tcp_hdr);
+ } else if (ipv6_hdr->proto == IPPROTO_UDP) {
+ udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
+ l4hash = HASH_L4_PORTS(udp_hdr);
+ }
+ }
+
+ hash = l3hash ^ l4hash;
+ hash ^= hash >> 16;
+ hash ^= hash >> 8;
+
+ slaves[i] = hash % slave_count;
+ }
+}
+
+struct bwg_slave {
+ uint64_t bwg_left_int;
+ uint64_t bwg_left_remainder;
+ uint8_t slave;
+};
+
+void
+bond_tlb_activate_slave(struct bond_dev_private *internals) {
+ int i;
+
+ for (i = 0; i < internals->active_slave_count; i++) {
+ tlb_last_obytets[internals->active_slaves[i]] = 0;
+ }
+}
+
+static int
+bandwidth_cmp(const void *a, const void *b)
+{
+ const struct bwg_slave *bwg_a = a;
+ const struct bwg_slave *bwg_b = b;
+ int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
+ int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
+ (int64_t)bwg_a->bwg_left_remainder;
+ if (diff > 0)
+ return 1;
+ else if (diff < 0)
+ return -1;
+ else if (diff2 > 0)
+ return 1;
+ else if (diff2 < 0)
+ return -1;
+ else
+ return 0;
+}
+
+static void
+bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
+ struct bwg_slave *bwg_slave)
+{
+ struct rte_eth_link link_status;
+
+ rte_eth_link_get_nowait(port_id, &link_status);
+ uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
+ if (link_bwg == 0)
+ return;
+ link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
+ bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
+ bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
+}
+
+static void
+bond_ethdev_update_tlb_slave_cb(void *arg)
+{
+ struct bond_dev_private *internals = arg;
+ struct rte_eth_stats slave_stats;
+ struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
+ uint8_t slave_count;
+ uint64_t tx_bytes;
+
+ uint8_t update_stats = 0;
+ uint8_t i, slave_id;
+
+ internals->slave_update_idx++;
+
+
+ if (internals->slave_update_idx >= REORDER_PERIOD_MS)
+ update_stats = 1;
+
+ for (i = 0; i < internals->active_slave_count; i++) {
+ slave_id = internals->active_slaves[i];
+ rte_eth_stats_get(slave_id, &slave_stats);
+ tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
+ bandwidth_left(slave_id, tx_bytes,
+ internals->slave_update_idx, &bwg_array[i]);
+ bwg_array[i].slave = slave_id;
+
+ if (update_stats) {
+ tlb_last_obytets[slave_id] = slave_stats.obytes;
+ }
+ }
+
+ if (update_stats == 1)
+ internals->slave_update_idx = 0;
+
+ slave_count = i;
+ qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
+ for (i = 0; i < slave_count; i++)
+ internals->tlb_slaves_order[i] = bwg_array[i].slave;
+
+ rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
+ (struct bond_dev_private *)internals);
+}
+
+static uint16_t
+bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+ struct rte_eth_dev *primary_port =
+ &rte_eth_devices[internals->primary_port];
+ uint16_t num_tx_total = 0;
+ uint16_t i, j;
+
+ uint16_t num_of_slaves = internals->active_slave_count;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
+
+ struct ether_hdr *ether_hdr;
+ struct ether_addr primary_slave_addr;
+ struct ether_addr active_slave_addr;
+
+ if (num_of_slaves < 1)
+ return num_tx_total;
+
+ memcpy(slaves, internals->tlb_slaves_order,
+ sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
+
+
+ ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
+
+ if (nb_pkts > 3) {
+ for (i = 0; i < 3; i++)
+ rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
+ }
+
+ for (i = 0; i < num_of_slaves; i++) {
+ rte_eth_macaddr_get(slaves[i], &active_slave_addr);
+ for (j = num_tx_total; j < nb_pkts; j++) {
+ if (j + 3 < nb_pkts)
+ rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
+
+ ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
+ if (is_same_ether_addr(&ether_hdr->s_addr, &primary_slave_addr))
+ ether_addr_copy(&active_slave_addr, &ether_hdr->s_addr);
+#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
+ mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
+#endif
+ }
+
+ num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+ bufs + num_tx_total, nb_pkts - num_tx_total);
+
+ if (num_tx_total == nb_pkts)
+ break;
+ }
+
+ return num_tx_total;
+}
+
+void
+bond_tlb_disable(struct bond_dev_private *internals)
+{
+ rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
+}
+
+void
+bond_tlb_enable(struct bond_dev_private *internals)
+{
+ bond_ethdev_update_tlb_slave_cb(internals);
+}
+
+static uint16_t
+bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+ struct ether_hdr *eth_h;
+ uint16_t ether_type, offset;
+
+ struct client_data *client_info;
+
+ /*
+ * We create transmit buffers for every slave and one additional to send
+ * through tlb. In worst case every packet will be send on one port.
+ */
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
+ uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
+
+ /*
+ * We create separate transmit buffers for update packets as they won't
+ * be counted in num_tx_total.
+ */
+ struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
+ uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
+
+ struct rte_mbuf *upd_pkt;
+ size_t pkt_size;
+
+ uint16_t num_send, num_not_send = 0;
+ uint16_t num_tx_total = 0;
+ uint16_t slave_idx;
+
+ int i, j;
+
+ /* Search tx buffer for ARP packets and forward them to alb */
+ for (i = 0; i < nb_pkts; i++) {
+ eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
+ ether_type = eth_h->ether_type;
+ offset = get_vlan_offset(eth_h, &ether_type);
+
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
+
+ /* Change src mac in eth header */
+ rte_eth_macaddr_get(slave_idx, &eth_h->s_addr);
+
+ /* Add packet to slave tx buffer */
+ slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
+ slave_bufs_pkts[slave_idx]++;
+ } else {
+ /* If packet is not ARP, send it with TLB policy */
+ slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
+ bufs[i];
+ slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
+ }
+ }
+
+ /* Update connected client ARP tables */
+ if (internals->mode6.ntt) {
+ for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
+ client_info = &internals->mode6.client_table[i];
+
+ if (client_info->in_use) {
+ /* Allocate new packet to send ARP update on current slave */
+ upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
+ if (upd_pkt == NULL) {
+ RTE_BOND_LOG(ERR,
+ "Failed to allocate ARP packet from pool");
+ continue;
+ }
+ pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
+ + client_info->vlan_count * sizeof(struct vlan_hdr);
+ upd_pkt->data_len = pkt_size;
+ upd_pkt->pkt_len = pkt_size;
+
+ slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
+ internals);
+
+ /* Add packet to update tx buffer */
+ update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
+ update_bufs_pkts[slave_idx]++;
+ }
+ }
+ internals->mode6.ntt = 0;
+ }
+
+ /* Send ARP packets on proper slaves */
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ if (slave_bufs_pkts[i] > 0) {
+ num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
+ slave_bufs[i], slave_bufs_pkts[i]);
+ for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
+ bufs[nb_pkts - 1 - num_not_send - j] =
+ slave_bufs[i][nb_pkts - 1 - j];
+ }
+
+ num_tx_total += num_send;
+ num_not_send += slave_bufs_pkts[i] - num_send;
+
+#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
+ /* Print TX stats including update packets */
+ for (j = 0; j < slave_bufs_pkts[i]; j++) {
+ eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
+ mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
+ }
+#endif
+ }
+ }
+
+ /* Send update packets on proper slaves */
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ if (update_bufs_pkts[i] > 0) {
+ num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
+ update_bufs_pkts[i]);
+ for (j = num_send; j < update_bufs_pkts[i]; j++) {
+ rte_pktmbuf_free(update_bufs[i][j]);
+ }
+#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
+ for (j = 0; j < update_bufs_pkts[i]; j++) {
+ eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
+ mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
+ }
+#endif
+ }
+ }
+
+ /* Send non-ARP packets using tlb policy */
+ if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
+ num_send = bond_ethdev_tx_burst_tlb(queue,
+ slave_bufs[RTE_MAX_ETHPORTS],
+ slave_bufs_pkts[RTE_MAX_ETHPORTS]);
+
+ for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
+ bufs[nb_pkts - 1 - num_not_send - j] =
+ slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
+ }
+
+ num_tx_total += num_send;
+ }
+
+ return num_tx_total;
+}
+
+static uint16_t
+bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+ uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t slave_count;
+
+ /* Array to sort mbufs for transmission on each slave into */
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
+ /* Number of mbufs for transmission on each slave */
+ uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
+ /* Mapping array generated by hash function to map mbufs to slaves */
+ uint16_t bufs_slave_port_idxs[nb_bufs];
+
+ uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t total_tx_count = 0, total_tx_fail_count = 0;
+
+ uint16_t i, j;
+
+ if (unlikely(nb_bufs == 0))
+ return 0;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ slave_count = internals->active_slave_count;
+ if (unlikely(slave_count < 1))
+ return 0;
+
+ memcpy(slave_port_ids, internals->active_slaves,
+ sizeof(slave_port_ids[0]) * slave_count);
+
+ /*
+ * Populate slaves mbuf with the packets which are to be sent on it
+ * selecting output slave using hash based on xmit policy
+ */
+ internals->burst_xmit_hash(bufs, nb_bufs, slave_count,
+ bufs_slave_port_idxs);
+
+ for (i = 0; i < nb_bufs; i++) {
+ /* Populate slave mbuf arrays with mbufs for that slave. */
+ uint8_t slave_idx = bufs_slave_port_idxs[i];
+
+ slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
+ }
+
+ /* Send packet burst on each slave device */
+ for (i = 0; i < slave_count; i++) {
+ if (slave_nb_bufs[i] == 0)
+ continue;
+
+ slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
+ bd_tx_q->queue_id, slave_bufs[i],
+ slave_nb_bufs[i]);
+
+ total_tx_count += slave_tx_count;
+
+ /* If tx burst fails move packets to end of bufs */
+ if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
+ slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ slave_tx_count;
+ total_tx_fail_count += slave_tx_fail_count[i];
+
+ /*
+ * Shift bufs to beginning of array to allow reordering
+ * later
+ */
+ for (j = 0; j < slave_tx_fail_count[i]; j++) {
+ slave_bufs[i][j] =
+ slave_bufs[i][(slave_tx_count - 1) + j];
+ }
+ }
+ }
+
+ /*
+ * If there are tx burst failures we move packets to end of bufs to
+ * preserve expected PMD behaviour of all failed transmitted being
+ * at the end of the input mbuf array
+ */
+ if (unlikely(total_tx_fail_count > 0)) {
+ int bufs_idx = nb_bufs - total_tx_fail_count - 1;
+
+ for (i = 0; i < slave_count; i++) {
+ if (slave_tx_fail_count[i] > 0) {
+ for (j = 0; j < slave_tx_fail_count[i]; j++)
+ bufs[bufs_idx++] = slave_bufs[i][j];
+ }
+ }
+ }
+
+ return total_tx_count;
+}
+
+static uint16_t
+bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+ uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t slave_count;
+
+ uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t dist_slave_count;
+
+ /* 2-D array to sort mbufs for transmission on each slave into */
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
+ /* Number of mbufs for transmission on each slave */
+ uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
+ /* Mapping array generated by hash function to map mbufs to slaves */
+ uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
+
+ uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t total_tx_count = 0, total_tx_fail_count = 0;
+
+ uint16_t i, j;
+
+ if (unlikely(nb_bufs == 0))
+ return 0;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ slave_count = internals->active_slave_count;
+ if (unlikely(slave_count < 1))
+ return 0;
+
+ memcpy(slave_port_ids, internals->active_slaves,
+ sizeof(slave_port_ids[0]) * slave_count);
+
+ dist_slave_count = 0;
+ for (i = 0; i < slave_count; i++) {
+ struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
+
+ if (ACTOR_STATE(port, DISTRIBUTING))
+ dist_slave_port_ids[dist_slave_count++] =
+ slave_port_ids[i];
+ }
+
+ if (likely(dist_slave_count > 1)) {
+
+ /*
+ * Populate slaves mbuf with the packets which are to be sent
+ * on it, selecting output slave using hash based on xmit policy
+ */
+ internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count,
+ bufs_slave_port_idxs);
+
+ for (i = 0; i < nb_bufs; i++) {
+ /*
+ * Populate slave mbuf arrays with mbufs for that
+ * slave
+ */
+ uint8_t slave_idx = bufs_slave_port_idxs[i];
+
+ slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] =
+ bufs[i];
+ }
+
+
+ /* Send packet burst on each slave device */
+ for (i = 0; i < dist_slave_count; i++) {
+ if (slave_nb_bufs[i] == 0)
+ continue;
+
+ slave_tx_count = rte_eth_tx_burst(
+ dist_slave_port_ids[i],
+ bd_tx_q->queue_id, slave_bufs[i],
+ slave_nb_bufs[i]);
+
+ total_tx_count += slave_tx_count;
+
+ /* If tx burst fails move packets to end of bufs */
+ if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
+ slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ slave_tx_count;
+ total_tx_fail_count += slave_tx_fail_count[i];
+
+ /*
+ * Shift bufs to beginning of array to allow
+ * reordering later
+ */
+ for (j = 0; j < slave_tx_fail_count[i]; j++)
+ slave_bufs[i][j] =
+ slave_bufs[i]
+ [(slave_tx_count - 1)
+ + j];
+ }
+ }
+
+ /*
+ * If there are tx burst failures we move packets to end of
+ * bufs to preserve expected PMD behaviour of all failed
+ * transmitted being at the end of the input mbuf array
+ */
+ if (unlikely(total_tx_fail_count > 0)) {
+ int bufs_idx = nb_bufs - total_tx_fail_count - 1;
+
+ for (i = 0; i < slave_count; i++) {
+ if (slave_tx_fail_count[i] > 0) {
+ for (j = 0;
+ j < slave_tx_fail_count[i];
+ j++) {
+ bufs[bufs_idx++] =
+ slave_bufs[i][j];
+ }
+ }
+ }
+ }
+ }
+
+ /* Check for LACP control packets and send if available */
+ for (i = 0; i < slave_count; i++) {
+ struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
+ struct rte_mbuf *ctrl_pkt = NULL;
+
+ if (likely(rte_ring_empty(port->tx_ring)))
+ continue;
+
+ if (rte_ring_dequeue(port->tx_ring,
+ (void **)&ctrl_pkt) != -ENOENT) {
+ slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
+ bd_tx_q->queue_id, &ctrl_pkt, 1);
+ /*
+ * re-enqueue LAG control plane packets to buffering
+ * ring if transmission fails so the packet isn't lost.
+ */
+ if (slave_tx_count != 1)
+ rte_ring_enqueue(port->tx_ring, ctrl_pkt);
+ }
+ }
+
+ return total_tx_count;
+}
+
+static uint16_t
+bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+ struct bond_tx_queue *bd_tx_q;
+
+ uint8_t tx_failed_flag = 0, num_of_slaves;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
+
+ uint16_t max_nb_of_tx_pkts = 0;
+
+ int slave_tx_total[RTE_MAX_ETHPORTS];
+ int i, most_successful_tx_slave = -1;
+
+ bd_tx_q = (struct bond_tx_queue *)queue;
+ internals = bd_tx_q->dev_private;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ num_of_slaves = internals->active_slave_count;
+ memcpy(slaves, internals->active_slaves,
+ sizeof(internals->active_slaves[0]) * num_of_slaves);
+
+ if (num_of_slaves < 1)
+ return 0;
+
+ /* Increment reference count on mbufs */
+ for (i = 0; i < nb_pkts; i++)
+ rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
+
+ /* Transmit burst on each active slave */
+ for (i = 0; i < num_of_slaves; i++) {
+ slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+ bufs, nb_pkts);
+
+ if (unlikely(slave_tx_total[i] < nb_pkts))
+ tx_failed_flag = 1;
+
+ /* record the value and slave index for the slave which transmits the
+ * maximum number of packets */
+ if (slave_tx_total[i] > max_nb_of_tx_pkts) {
+ max_nb_of_tx_pkts = slave_tx_total[i];
+ most_successful_tx_slave = i;
+ }
+ }
+
+ /* if slaves fail to transmit packets from burst, the calling application
+ * is not expected to know about multiple references to packets so we must
+ * handle failures of all packets except those of the most successful slave
+ */
+ if (unlikely(tx_failed_flag))
+ for (i = 0; i < num_of_slaves; i++)
+ if (i != most_successful_tx_slave)
+ while (slave_tx_total[i] < nb_pkts)
+ rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
+
+ return max_nb_of_tx_pkts;
+}
+
+void
+link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
+{
+ struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
+
+ if (bond_ctx->mode == BONDING_MODE_8023AD) {
+ /**
+ * If in mode 4 then save the link properties of the first
+ * slave, all subsequent slaves must match these properties
+ */
+ struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
+
+ bond_link->link_autoneg = slave_link->link_autoneg;
+ bond_link->link_duplex = slave_link->link_duplex;
+ bond_link->link_speed = slave_link->link_speed;
+ } else {
+ /**
+ * In any other mode the link properties are set to default
+ * values of AUTONEG/DUPLEX
+ */
+ ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
+ ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ }
+}
+
+int
+link_properties_valid(struct rte_eth_dev *ethdev,
+ struct rte_eth_link *slave_link)
+{
+ struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
+
+ if (bond_ctx->mode == BONDING_MODE_8023AD) {
+ struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
+
+ if (bond_link->link_duplex != slave_link->link_duplex ||
+ bond_link->link_autoneg != slave_link->link_autoneg ||
+ bond_link->link_speed != slave_link->link_speed)
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
+{
+ struct ether_addr *mac_addr;
+
+ if (eth_dev == NULL) {
+ RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
+ return -1;
+ }
+
+ if (dst_mac_addr == NULL) {
+ RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
+ return -1;
+ }
+
+ mac_addr = eth_dev->data->mac_addrs;
+
+ ether_addr_copy(mac_addr, dst_mac_addr);
+ return 0;
+}
+
+int
+mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
+{
+ struct ether_addr *mac_addr;
+
+ if (eth_dev == NULL) {
+ RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
+ return -1;
+ }
+
+ if (new_mac_addr == NULL) {
+ RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
+ return -1;
+ }
+
+ mac_addr = eth_dev->data->mac_addrs;
+
+ /* If new MAC is different to current MAC then update */
+ if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
+ memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
+
+ return 0;
+}
+
+static const struct ether_addr null_mac_addr;
+
+/*
+ * Add additional MAC addresses to the slave
+ */
+int
+slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id)
+{
+ int i, ret;
+ struct ether_addr *mac_addr;
+
+ for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
+ mac_addr = &bonded_eth_dev->data->mac_addrs[i];
+ if (is_same_ether_addr(mac_addr, &null_mac_addr))
+ break;
+
+ ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
+ if (ret < 0) {
+ /* rollback */
+ for (i--; i > 0; i--)
+ rte_eth_dev_mac_addr_remove(slave_port_id,
+ &bonded_eth_dev->data->mac_addrs[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Remove additional MAC addresses from the slave
+ */
+int
+slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id)
+{
+ int i, rc, ret;
+ struct ether_addr *mac_addr;
+
+ rc = 0;
+ for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
+ mac_addr = &bonded_eth_dev->data->mac_addrs[i];
+ if (is_same_ether_addr(mac_addr, &null_mac_addr))
+ break;
+
+ ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
+ /* save only the first error */
+ if (ret < 0 && rc == 0)
+ rc = ret;
+ }
+
+ return rc;
+}
+
+int
+mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
+{
+ struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+ int i;
+
+ /* Update slave devices MAC addresses */
+ if (internals->slave_count < 1)
+ return -1;
+
+ switch (internals->mode) {
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_BALANCE:
+ case BONDING_MODE_BROADCAST:
+ for (i = 0; i < internals->slave_count; i++) {
+ if (rte_eth_dev_default_mac_addr_set(
+ internals->slaves[i].port_id,
+ bonded_eth_dev->data->mac_addrs)) {
+ RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
+ internals->slaves[i].port_id);
+ return -1;
+ }
+ }
+ break;
+ case BONDING_MODE_8023AD:
+ bond_mode_8023ad_mac_address_update(bonded_eth_dev);
+ break;
+ case BONDING_MODE_ACTIVE_BACKUP:
+ case BONDING_MODE_TLB:
+ case BONDING_MODE_ALB:
+ default:
+ for (i = 0; i < internals->slave_count; i++) {
+ if (internals->slaves[i].port_id ==
+ internals->current_primary_port) {
+ if (rte_eth_dev_default_mac_addr_set(
+ internals->primary_port,
+ bonded_eth_dev->data->mac_addrs)) {
+ RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
+ internals->current_primary_port);
+ return -1;
+ }
+ } else {
+ if (rte_eth_dev_default_mac_addr_set(
+ internals->slaves[i].port_id,
+ &internals->slaves[i].persisted_mac_addr)) {
+ RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
+ internals->slaves[i].port_id);
+ return -1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+int
+bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
+{
+ struct bond_dev_private *internals;
+
+ internals = eth_dev->data->dev_private;
+
+ switch (mode) {
+ case BONDING_MODE_ROUND_ROBIN:
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
+ break;
+ case BONDING_MODE_ACTIVE_BACKUP:
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
+ break;
+ case BONDING_MODE_BALANCE:
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
+ break;
+ case BONDING_MODE_BROADCAST:
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
+ break;
+ case BONDING_MODE_8023AD:
+ if (bond_mode_8023ad_enable(eth_dev) != 0)
+ return -1;
+
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
+ RTE_BOND_LOG(WARNING,
+ "Using mode 4, it is necessary to do TX burst "
+ "and RX burst at least every 100ms.");
+ } else {
+ /* Use flow director's optimization */
+ eth_dev->rx_pkt_burst =
+ bond_ethdev_rx_burst_8023ad_fast_queue;
+ eth_dev->tx_pkt_burst =
+ bond_ethdev_tx_burst_8023ad_fast_queue;
+ }
+ break;
+ case BONDING_MODE_TLB:
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
+ break;
+ case BONDING_MODE_ALB:
+ if (bond_mode_alb_enable(eth_dev) != 0)
+ return -1;
+
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
+ break;
+ default:
+ return -1;
+ }
+
+ internals->mode = mode;
+
+ return 0;
+}
+
+
+static int
+slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_dev *slave_eth_dev)
+{
+ int errval = 0;
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ bonded_eth_dev->data->dev_private;
+ struct port *port = &mode_8023ad_ports[slave_eth_dev->data->port_id];
+
+ if (port->slow_pool == NULL) {
+ char mem_name[256];
+ int slave_id = slave_eth_dev->data->port_id;
+
+ snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
+ slave_id);
+ port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
+ 250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+ slave_eth_dev->data->numa_node);
+
+ /* Any memory allocation failure in initialization is critical because
+ * resources can't be free, so reinitialization is impossible. */
+ if (port->slow_pool == NULL) {
+ rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
+ slave_id, mem_name, rte_strerror(rte_errno));
+ }
+ }
+
+ if (internals->mode4.dedicated_queues.enabled == 1) {
+ /* Configure slow Rx queue */
+
+ errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.rx_qid, 128,
+ rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
+ NULL, port->slow_pool);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.rx_qid,
+ errval);
+ return errval;
+ }
+
+ errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.tx_qid, 512,
+ rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
+ NULL);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.tx_qid,
+ errval);
+ return errval;
+ }
+ }
+ return 0;
+}
+
+int
+slave_configure(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_dev *slave_eth_dev)
+{
+ struct bond_rx_queue *bd_rx_q;
+ struct bond_tx_queue *bd_tx_q;
+ uint16_t nb_rx_queues;
+ uint16_t nb_tx_queues;
+
+ int errval;
+ uint16_t q_id;
+ struct rte_flow_error flow_error;
+
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ bonded_eth_dev->data->dev_private;
+
+ /* Stop slave */
+ rte_eth_dev_stop(slave_eth_dev->data->port_id);
+
+ /* Enable interrupts on slave device if supported */
+ if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
+
+ /* If RSS is enabled for bonding, try to enable it for slaves */
+ if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
+ != 0) {
+ slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
+ bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
+ slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
+ bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+ } else {
+ slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
+ }
+
+ slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
+ bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ slave_eth_dev->data->dev_conf.rxmode.mq_mode =
+ bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
+ }
+
+ if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
+ slave_eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+ else
+ slave_eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
+ nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
+
+ if (internals->mode == BONDING_MODE_8023AD) {
+ if (internals->mode4.dedicated_queues.enabled == 1) {
+ nb_rx_queues++;
+ nb_tx_queues++;
+ }
+ }
+
+ errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
+ bonded_eth_dev->data->mtu);
+ if (errval != 0 && errval != -ENOTSUP) {
+ RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
+ slave_eth_dev->data->port_id, errval);
+ return errval;
+ }
+
+ /* Configure device */
+ errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
+ nb_rx_queues, nb_tx_queues,
+ &(slave_eth_dev->data->dev_conf));
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
+ slave_eth_dev->data->port_id, errval);
+ return errval;
+ }
+
+ /* Setup Rx Queues */
+ for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
+ bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
+
+ errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
+ bd_rx_q->nb_rx_desc,
+ rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
+ &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id, q_id, errval);
+ return errval;
+ }
+ }
+
+ /* Setup Tx Queues */
+ for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
+ bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
+
+ errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
+ bd_tx_q->nb_tx_desc,
+ rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
+ &bd_tx_q->tx_conf);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id, q_id, errval);
+ return errval;
+ }
+ }
+
+ if (internals->mode == BONDING_MODE_8023AD &&
+ internals->mode4.dedicated_queues.enabled == 1) {
+ if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
+ != 0)
+ return errval;
+
+ if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
+ slave_eth_dev->data->port_id) != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id, q_id, errval);
+ return -1;
+ }
+
+ if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
+ rte_flow_destroy(slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
+ &flow_error);
+
+ bond_ethdev_8023ad_flow_set(bonded_eth_dev,
+ slave_eth_dev->data->port_id);
+ }
+
+ /* Start device */
+ errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
+ slave_eth_dev->data->port_id, errval);
+ return -1;
+ }
+
+ /* If RSS is enabled for bonding, synchronize RETA */
+ if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ int i;
+ struct bond_dev_private *internals;
+
+ internals = bonded_eth_dev->data->dev_private;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
+ errval = rte_eth_dev_rss_reta_update(
+ slave_eth_dev->data->port_id,
+ &internals->reta_conf[0],
+ internals->slaves[i].reta_size);
+ if (errval != 0) {
+ RTE_BOND_LOG(WARNING,
+ "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
+ " RSS Configuration for bonding may be inconsistent.",
+ slave_eth_dev->data->port_id, errval);
+ }
+ break;
+ }
+ }
+ }
+
+ /* If lsc interrupt is set, check initial slave's link status */
+ if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+ slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
+ bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
+ RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
+ NULL);
+ }
+
+ return 0;
+}
+
+void
+slave_remove(struct bond_dev_private *internals,
+ struct rte_eth_dev *slave_eth_dev)
+{
+ uint8_t i;
+
+ for (i = 0; i < internals->slave_count; i++)
+ if (internals->slaves[i].port_id ==
+ slave_eth_dev->data->port_id)
+ break;
+
+ if (i < (internals->slave_count - 1)) {
+ struct rte_flow *flow;
+
+ memmove(&internals->slaves[i], &internals->slaves[i + 1],
+ sizeof(internals->slaves[0]) *
+ (internals->slave_count - i - 1));
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ memmove(&flow->flows[i], &flow->flows[i + 1],
+ sizeof(flow->flows[0]) *
+ (internals->slave_count - i - 1));
+ flow->flows[internals->slave_count - 1] = NULL;
+ }
+ }
+
+ internals->slave_count--;
+
+ /* force reconfiguration of slave interfaces */
+ _rte_eth_dev_reset(slave_eth_dev);
+}
+
+static void
+bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
+
+void
+slave_add(struct bond_dev_private *internals,
+ struct rte_eth_dev *slave_eth_dev)
+{
+ struct bond_slave_details *slave_details =
+ &internals->slaves[internals->slave_count];
+
+ slave_details->port_id = slave_eth_dev->data->port_id;
+ slave_details->last_link_status = 0;
+
+ /* Mark slave devices that don't support interrupts so we can
+ * compensate when we start the bond
+ */
+ if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
+ slave_details->link_status_poll_enabled = 1;
+ }
+
+ slave_details->link_status_wait_to_complete = 0;
+ /* clean tlb_last_obytes when adding port for bonding device */
+ memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
+ sizeof(struct ether_addr));
+}
+
+void
+bond_ethdev_primary_set(struct bond_dev_private *internals,
+ uint16_t slave_port_id)
+{
+ int i;
+
+ if (internals->active_slave_count < 1)
+ internals->current_primary_port = slave_port_id;
+ else
+ /* Search bonded device slave ports for new proposed primary port */
+ for (i = 0; i < internals->active_slave_count; i++) {
+ if (internals->active_slaves[i] == slave_port_id)
+ internals->current_primary_port = slave_port_id;
+ }
+}
+
+static void
+bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
+
+static int
+bond_ethdev_start(struct rte_eth_dev *eth_dev)
+{
+ struct bond_dev_private *internals;
+ int i;
+
+ /* slave eth dev will be started by bonded device */
+ if (check_for_bonded_ethdev(eth_dev)) {
+ RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
+ eth_dev->data->port_id);
+ return -1;
+ }
+
+ eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ eth_dev->data->dev_started = 1;
+
+ internals = eth_dev->data->dev_private;
+
+ if (internals->slave_count == 0) {
+ RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
+ goto out_err;
+ }
+
+ if (internals->user_defined_mac == 0) {
+ struct ether_addr *new_mac_addr = NULL;
+
+ for (i = 0; i < internals->slave_count; i++)
+ if (internals->slaves[i].port_id == internals->primary_port)
+ new_mac_addr = &internals->slaves[i].persisted_mac_addr;
+
+ if (new_mac_addr == NULL)
+ goto out_err;
+
+ if (mac_address_set(eth_dev, new_mac_addr) != 0) {
+ RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
+ eth_dev->data->port_id);
+ goto out_err;
+ }
+ }
+
+ /* If bonded device is configure in promiscuous mode then re-apply config */
+ if (internals->promiscuous_en)
+ bond_ethdev_promiscuous_enable(eth_dev);
+
+ if (internals->mode == BONDING_MODE_8023AD) {
+ if (internals->mode4.dedicated_queues.enabled == 1) {
+ internals->mode4.dedicated_queues.rx_qid =
+ eth_dev->data->nb_rx_queues;
+ internals->mode4.dedicated_queues.tx_qid =
+ eth_dev->data->nb_tx_queues;
+ }
+ }
+
+
+ /* Reconfigure each slave device if starting bonded device */
+ for (i = 0; i < internals->slave_count; i++) {
+ struct rte_eth_dev *slave_ethdev =
+ &(rte_eth_devices[internals->slaves[i].port_id]);
+ if (slave_configure(eth_dev, slave_ethdev) != 0) {
+ RTE_BOND_LOG(ERR,
+ "bonded port (%d) failed to reconfigure slave device (%d)",
+ eth_dev->data->port_id,
+ internals->slaves[i].port_id);
+ goto out_err;
+ }
+ /* We will need to poll for link status if any slave doesn't
+ * support interrupts
+ */
+ if (internals->slaves[i].link_status_poll_enabled)
+ internals->link_status_polling_enabled = 1;
+ }
+
+ /* start polling if needed */
+ if (internals->link_status_polling_enabled) {
+ rte_eal_alarm_set(
+ internals->link_status_polling_interval_ms * 1000,
+ bond_ethdev_slave_link_status_change_monitor,
+ (void *)&rte_eth_devices[internals->port_id]);
+ }
+
+ /* Update all slave devices MACs*/
+ if (mac_address_slaves_update(eth_dev) != 0)
+ goto out_err;
+
+ if (internals->user_defined_primary_port)
+ bond_ethdev_primary_set(internals, internals->primary_port);
+
+ if (internals->mode == BONDING_MODE_8023AD)
+ bond_mode_8023ad_start(eth_dev);
+
+ if (internals->mode == BONDING_MODE_TLB ||
+ internals->mode == BONDING_MODE_ALB)
+ bond_tlb_enable(internals);
+
+ return 0;
+
+out_err:
+ eth_dev->data->dev_started = 0;
+ return -1;
+}
+
+static void
+bond_ethdev_free_queues(struct rte_eth_dev *dev)
+{
+ uint8_t i;
+
+ if (dev->data->rx_queues != NULL) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rte_free(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+ }
+
+ if (dev->data->tx_queues != NULL) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ rte_free(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+ }
+}
+
+void
+bond_ethdev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct bond_dev_private *internals = eth_dev->data->dev_private;
+ uint8_t i;
+
+ if (internals->mode == BONDING_MODE_8023AD) {
+ struct port *port;
+ void *pkt = NULL;
+
+ bond_mode_8023ad_stop(eth_dev);
+
+ /* Discard all messages to/from mode 4 state machines */
+ for (i = 0; i < internals->active_slave_count; i++) {
+ port = &mode_8023ad_ports[internals->active_slaves[i]];
+
+ RTE_ASSERT(port->rx_ring != NULL);
+ while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
+ rte_pktmbuf_free(pkt);
+
+ RTE_ASSERT(port->tx_ring != NULL);
+ while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
+ rte_pktmbuf_free(pkt);
+ }
+ }
+
+ if (internals->mode == BONDING_MODE_TLB ||
+ internals->mode == BONDING_MODE_ALB) {
+ bond_tlb_disable(internals);
+ for (i = 0; i < internals->active_slave_count; i++)
+ tlb_last_obytets[internals->active_slaves[i]] = 0;
+ }
+
+ internals->link_status_polling_enabled = 0;
+ for (i = 0; i < internals->slave_count; i++)
+ internals->slaves[i].last_link_status = 0;
+
+ eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ eth_dev->data->dev_started = 0;
+}
+
+void
+bond_ethdev_close(struct rte_eth_dev *dev)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ uint8_t bond_port_id = internals->port_id;
+ int skipped = 0;
+ struct rte_flow_error ferror;
+
+ RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
+ while (internals->slave_count != skipped) {
+ uint16_t port_id = internals->slaves[skipped].port_id;
+
+ rte_eth_dev_stop(port_id);
+
+ if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to remove port %d from bonded device %s",
+ port_id, dev->device->name);
+ skipped++;
+ }
+ }
+ bond_flow_ops.flush(dev, &ferror);
+ bond_ethdev_free_queues(dev);
+ rte_bitmap_reset(internals->vlan_filter_bmp);
+}
+
+/* forward declaration */
+static int bond_ethdev_configure(struct rte_eth_dev *dev);
+
+static void
+bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+
+ uint16_t max_nb_rx_queues = UINT16_MAX;
+ uint16_t max_nb_tx_queues = UINT16_MAX;
+
+ dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
+
+ dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
+ internals->candidate_max_rx_pktlen :
+ ETHER_MAX_JUMBO_FRAME_LEN;
+
+ /* Max number of tx/rx queues that the bonded device can support is the
+ * minimum values of the bonded slaves, as all slaves must be capable
+ * of supporting the same number of tx/rx queues.
+ */
+ if (internals->slave_count > 0) {
+ struct rte_eth_dev_info slave_info;
+ uint8_t idx;
+
+ for (idx = 0; idx < internals->slave_count; idx++) {
+ rte_eth_dev_info_get(internals->slaves[idx].port_id,
+ &slave_info);
+
+ if (slave_info.max_rx_queues < max_nb_rx_queues)
+ max_nb_rx_queues = slave_info.max_rx_queues;
+
+ if (slave_info.max_tx_queues < max_nb_tx_queues)
+ max_nb_tx_queues = slave_info.max_tx_queues;
+ }
+ }
+
+ dev_info->max_rx_queues = max_nb_rx_queues;
+ dev_info->max_tx_queues = max_nb_tx_queues;
+
+ /**
+ * If dedicated hw queues enabled for link bonding device in LACP mode
+ * then we need to reduce the maximum number of data path queues by 1.
+ */
+ if (internals->mode == BONDING_MODE_8023AD &&
+ internals->mode4.dedicated_queues.enabled == 1) {
+ dev_info->max_rx_queues--;
+ dev_info->max_tx_queues--;
+ }
+
+ dev_info->min_rx_bufsize = 0;
+
+ dev_info->rx_offload_capa = internals->rx_offload_capa;
+ dev_info->tx_offload_capa = internals->tx_offload_capa;
+ dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
+ dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
+
+ dev_info->reta_size = internals->reta_size;
+}
+
+static int
+bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ int res;
+ uint16_t i;
+ struct bond_dev_private *internals = dev->data->dev_private;
+
+ /* don't do this while a slave is being added */
+ rte_spinlock_lock(&internals->lock);
+
+ if (on)
+ rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
+ else
+ rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
+
+ for (i = 0; i < internals->slave_count; i++) {
+ uint16_t port_id = internals->slaves[i].port_id;
+
+ res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
+ if (res == ENOTSUP)
+ RTE_BOND_LOG(WARNING,
+ "Setting VLAN filter on slave port %u not supported.",
+ port_id);
+ }
+
+ rte_spinlock_unlock(&internals->lock);
+ return 0;
+}
+
+static int
+bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
+{
+ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
+ rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
+ 0, dev->data->numa_node);
+ if (bd_rx_q == NULL)
+ return -1;
+
+ bd_rx_q->queue_id = rx_queue_id;
+ bd_rx_q->dev_private = dev->data->dev_private;
+
+ bd_rx_q->nb_rx_desc = nb_rx_desc;
+
+ memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
+ bd_rx_q->mb_pool = mb_pool;
+
+ dev->data->rx_queues[rx_queue_id] = bd_rx_q;
+
+ return 0;
+}
+
+static int
+bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
+ rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
+ 0, dev->data->numa_node);
+
+ if (bd_tx_q == NULL)
+ return -1;
+
+ bd_tx_q->queue_id = tx_queue_id;
+ bd_tx_q->dev_private = dev->data->dev_private;
+
+ bd_tx_q->nb_tx_desc = nb_tx_desc;
+ memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
+
+ dev->data->tx_queues[tx_queue_id] = bd_tx_q;
+
+ return 0;
+}
+
+static void
+bond_ethdev_rx_queue_release(void *queue)
+{
+ if (queue == NULL)
+ return;
+
+ rte_free(queue);
+}
+
+static void
+bond_ethdev_tx_queue_release(void *queue)
+{
+ if (queue == NULL)
+ return;
+
+ rte_free(queue);
+}
+
+static void
+bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
+{
+ struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
+ struct bond_dev_private *internals;
+
+ /* Default value for polling slave found is true as we don't want to
+ * disable the polling thread if we cannot get the lock */
+ int i, polling_slave_found = 1;
+
+ if (cb_arg == NULL)
+ return;
+
+ bonded_ethdev = (struct rte_eth_dev *)cb_arg;
+ internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
+
+ if (!bonded_ethdev->data->dev_started ||
+ !internals->link_status_polling_enabled)
+ return;
+
+ /* If device is currently being configured then don't check slaves link
+ * status, wait until next period */
+ if (rte_spinlock_trylock(&internals->lock)) {
+ if (internals->slave_count > 0)
+ polling_slave_found = 0;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ if (!internals->slaves[i].link_status_poll_enabled)
+ continue;
+
+ slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
+ polling_slave_found = 1;
+
+ /* Update slave link status */
+ (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
+ internals->slaves[i].link_status_wait_to_complete);
+
+ /* if link status has changed since last checked then call lsc
+ * event callback */
+ if (slave_ethdev->data->dev_link.link_status !=
+ internals->slaves[i].last_link_status) {
+ internals->slaves[i].last_link_status =
+ slave_ethdev->data->dev_link.link_status;
+
+ bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
+ RTE_ETH_EVENT_INTR_LSC,
+ &bonded_ethdev->data->port_id,
+ NULL);
+ }
+ }
+ rte_spinlock_unlock(&internals->lock);
+ }
+
+ if (polling_slave_found)
+ /* Set alarm to continue monitoring link status of slave ethdev's */
+ rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
+ bond_ethdev_slave_link_status_change_monitor, cb_arg);
+}
+
+static int
+bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
+{
+ void (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
+
+ struct bond_dev_private *bond_ctx;
+ struct rte_eth_link slave_link;
+
+ uint32_t idx;
+
+ bond_ctx = ethdev->data->dev_private;
+
+ ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+
+ if (ethdev->data->dev_started == 0 ||
+ bond_ctx->active_slave_count == 0) {
+ ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
+ return 0;
+ }
+
+ ethdev->data->dev_link.link_status = ETH_LINK_UP;
+
+ if (wait_to_complete)
+ link_update = rte_eth_link_get;
+ else
+ link_update = rte_eth_link_get_nowait;
+
+ switch (bond_ctx->mode) {
+ case BONDING_MODE_BROADCAST:
+ /**
+ * Setting link speed to UINT32_MAX to ensure we pick up the
+ * value of the first active slave
+ */
+ ethdev->data->dev_link.link_speed = UINT32_MAX;
+
+ /**
+ * link speed is minimum value of all the slaves link speed as
+ * packet loss will occur on this slave if transmission at rates
+ * greater than this are attempted
+ */
+ for (idx = 1; idx < bond_ctx->active_slave_count; idx++) {
+ link_update(bond_ctx->active_slaves[0], &slave_link);
+
+ if (slave_link.link_speed <
+ ethdev->data->dev_link.link_speed)
+ ethdev->data->dev_link.link_speed =
+ slave_link.link_speed;
+ }
+ break;
+ case BONDING_MODE_ACTIVE_BACKUP:
+ /* Current primary slave */
+ link_update(bond_ctx->current_primary_port, &slave_link);
+
+ ethdev->data->dev_link.link_speed = slave_link.link_speed;
+ break;
+ case BONDING_MODE_8023AD:
+ ethdev->data->dev_link.link_autoneg =
+ bond_ctx->mode4.slave_link.link_autoneg;
+ ethdev->data->dev_link.link_duplex =
+ bond_ctx->mode4.slave_link.link_duplex;
+ /* fall through to update link speed */
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_BALANCE:
+ case BONDING_MODE_TLB:
+ case BONDING_MODE_ALB:
+ default:
+ /**
+ * In theses mode the maximum theoretical link speed is the sum
+ * of all the slaves
+ */
+ ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+
+ for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
+ link_update(bond_ctx->active_slaves[idx], &slave_link);
+
+ ethdev->data->dev_link.link_speed +=
+ slave_link.link_speed;
+ }
+ }
+
+
+ return 0;
+}
+
+
+static int
+bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_eth_stats slave_stats;
+ int i, j;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
+
+ stats->ipackets += slave_stats.ipackets;
+ stats->opackets += slave_stats.opackets;
+ stats->ibytes += slave_stats.ibytes;
+ stats->obytes += slave_stats.obytes;
+ stats->imissed += slave_stats.imissed;
+ stats->ierrors += slave_stats.ierrors;
+ stats->oerrors += slave_stats.oerrors;
+ stats->rx_nombuf += slave_stats.rx_nombuf;
+
+ for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
+ stats->q_ipackets[j] += slave_stats.q_ipackets[j];
+ stats->q_opackets[j] += slave_stats.q_opackets[j];
+ stats->q_ibytes[j] += slave_stats.q_ibytes[j];
+ stats->q_obytes[j] += slave_stats.q_obytes[j];
+ stats->q_errors[j] += slave_stats.q_errors[j];
+ }
+
+ }
+
+ return 0;
+}
+
+static void
+bond_ethdev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+
+ for (i = 0; i < internals->slave_count; i++)
+ rte_eth_stats_reset(internals->slaves[i].port_id);
+}
+
+static void
+bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct bond_dev_private *internals = eth_dev->data->dev_private;
+ int i;
+
+ internals->promiscuous_en = 1;
+
+ switch (internals->mode) {
+ /* Promiscuous mode is propagated to all slaves */
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_BALANCE:
+ case BONDING_MODE_BROADCAST:
+ for (i = 0; i < internals->slave_count; i++)
+ rte_eth_promiscuous_enable(internals->slaves[i].port_id);
+ break;
+ /* In mode4 promiscus mode is managed when slave is added/removed */
+ case BONDING_MODE_8023AD:
+ break;
+ /* Promiscuous mode is propagated only to primary slave */
+ case BONDING_MODE_ACTIVE_BACKUP:
+ case BONDING_MODE_TLB:
+ case BONDING_MODE_ALB:
+ default:
+ rte_eth_promiscuous_enable(internals->current_primary_port);
+ }
+}
+
+static void
+bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+
+ internals->promiscuous_en = 0;
+
+ switch (internals->mode) {
+ /* Promiscuous mode is propagated to all slaves */
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_BALANCE:
+ case BONDING_MODE_BROADCAST:
+ for (i = 0; i < internals->slave_count; i++)
+ rte_eth_promiscuous_disable(internals->slaves[i].port_id);
+ break;
+ /* In mode4 promiscus mode is set managed when slave is added/removed */
+ case BONDING_MODE_8023AD:
+ break;
+ /* Promiscuous mode is propagated only to primary slave */
+ case BONDING_MODE_ACTIVE_BACKUP:
+ case BONDING_MODE_TLB:
+ case BONDING_MODE_ALB:
+ default:
+ rte_eth_promiscuous_disable(internals->current_primary_port);
+ }
+}
+
+static void
+bond_ethdev_delayed_lsc_propagation(void *arg)
+{
+ if (arg == NULL)
+ return;
+
+ _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
+int
+bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
+ void *param, void *ret_param __rte_unused)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+ struct rte_eth_link link;
+ int rc = -1;
+
+ int i, valid_slave = 0;
+ uint8_t active_pos;
+ uint8_t lsc_flag = 0;
+
+ if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
+ return rc;
+
+ bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
+
+ if (check_for_bonded_ethdev(bonded_eth_dev))
+ return rc;
+
+ internals = bonded_eth_dev->data->dev_private;
+
+ /* If the device isn't started don't handle interrupts */
+ if (!bonded_eth_dev->data->dev_started)
+ return rc;
+
+ /* verify that port_id is a valid slave of bonded port */
+ for (i = 0; i < internals->slave_count; i++) {
+ if (internals->slaves[i].port_id == port_id) {
+ valid_slave = 1;
+ break;
+ }
+ }
+
+ if (!valid_slave)
+ return rc;
+
+ /* Synchronize lsc callback parallel calls either by real link event
+ * from the slaves PMDs or by the bonding PMD itself.
+ */
+ rte_spinlock_lock(&internals->lsc_lock);
+
+ /* Search for port in active port list */
+ active_pos = find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, port_id);
+
+ rte_eth_link_get_nowait(port_id, &link);
+ if (link.link_status) {
+ if (active_pos < internals->active_slave_count)
+ goto link_update;
+
+ /* if no active slave ports then set this port to be primary port */
+ if (internals->active_slave_count < 1) {
+ /* If first active slave, then change link status */
+ bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ internals->current_primary_port = port_id;
+ lsc_flag = 1;
+
+ mac_address_slaves_update(bonded_eth_dev);
+ }
+
+ /* check link state properties if bonded link is up*/
+ if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
+ if (link_properties_valid(bonded_eth_dev, &link) != 0)
+ RTE_BOND_LOG(ERR, "Invalid link properties "
+ "for slave %d in bonding mode %d",
+ port_id, internals->mode);
+ } else {
+ /* inherit slave link properties */
+ link_properties_set(bonded_eth_dev, &link);
+ }
+
+ activate_slave(bonded_eth_dev, port_id);
+
+ /* If user has defined the primary port then default to using it */
+ if (internals->user_defined_primary_port &&
+ internals->primary_port == port_id)
+ bond_ethdev_primary_set(internals, port_id);
+ } else {
+ if (active_pos == internals->active_slave_count)
+ goto link_update;
+
+ /* Remove from active slave list */
+ deactivate_slave(bonded_eth_dev, port_id);
+
+ if (internals->active_slave_count < 1)
+ lsc_flag = 1;
+
+ /* Update primary id, take first active slave from list or if none
+ * available set to -1 */
+ if (port_id == internals->current_primary_port) {
+ if (internals->active_slave_count > 0)
+ bond_ethdev_primary_set(internals,
+ internals->active_slaves[0]);
+ else
+ internals->current_primary_port = internals->primary_port;
+ }
+ }
+
+link_update:
+ /**
+ * Update bonded device link properties after any change to active
+ * slaves
+ */
+ bond_ethdev_link_update(bonded_eth_dev, 0);
+
+ if (lsc_flag) {
+ /* Cancel any possible outstanding interrupts if delays are enabled */
+ if (internals->link_up_delay_ms > 0 ||
+ internals->link_down_delay_ms > 0)
+ rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
+ bonded_eth_dev);
+
+ if (bonded_eth_dev->data->dev_link.link_status) {
+ if (internals->link_up_delay_ms > 0)
+ rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
+ bond_ethdev_delayed_lsc_propagation,
+ (void *)bonded_eth_dev);
+ else
+ _rte_eth_dev_callback_process(bonded_eth_dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+
+ } else {
+ if (internals->link_down_delay_ms > 0)
+ rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
+ bond_ethdev_delayed_lsc_propagation,
+ (void *)bonded_eth_dev);
+ else
+ _rte_eth_dev_callback_process(bonded_eth_dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+ }
+
+ rte_spinlock_unlock(&internals->lsc_lock);
+
+ return rc;
+}
+
+static int
+bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
+{
+ unsigned i, j;
+ int result = 0;
+ int slave_reta_size;
+ unsigned reta_count;
+ struct bond_dev_private *internals = dev->data->dev_private;
+
+ if (reta_size != internals->reta_size)
+ return -EINVAL;
+
+ /* Copy RETA table */
+ reta_count = reta_size / RTE_RETA_GROUP_SIZE;
+
+ for (i = 0; i < reta_count; i++) {
+ internals->reta_conf[i].mask = reta_conf[i].mask;
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ if ((reta_conf[i].mask >> j) & 0x01)
+ internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
+ }
+
+ /* Fill rest of array */
+ for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
+ memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
+ sizeof(internals->reta_conf[0]) * reta_count);
+
+ /* Propagate RETA over slaves */
+ for (i = 0; i < internals->slave_count; i++) {
+ slave_reta_size = internals->slaves[i].reta_size;
+ result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
+ &internals->reta_conf[0], slave_reta_size);
+ if (result < 0)
+ return result;
+ }
+
+ return 0;
+}
+
+static int
+bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
+{
+ int i, j;
+ struct bond_dev_private *internals = dev->data->dev_private;
+
+ if (reta_size != internals->reta_size)
+ return -EINVAL;
+
+ /* Copy RETA table */
+ for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ if ((reta_conf[i].mask >> j) & 0x01)
+ reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
+
+ return 0;
+}
+
+static int
+bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ int i, result = 0;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_eth_rss_conf bond_rss_conf;
+
+ memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
+
+ bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
+
+ if (bond_rss_conf.rss_hf != 0)
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
+
+ if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
+ sizeof(internals->rss_key)) {
+ if (bond_rss_conf.rss_key_len == 0)
+ bond_rss_conf.rss_key_len = 40;
+ internals->rss_key_len = bond_rss_conf.rss_key_len;
+ memcpy(internals->rss_key, bond_rss_conf.rss_key,
+ internals->rss_key_len);
+ }
+
+ for (i = 0; i < internals->slave_count; i++) {
+ result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
+ &bond_rss_conf);
+ if (result < 0)
+ return result;
+ }
+
+ return 0;
+}
+
+static int
+bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+
+ rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ rss_conf->rss_key_len = internals->rss_key_len;
+ if (rss_conf->rss_key)
+ memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
+
+ return 0;
+}
+
+static int
+bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct rte_eth_dev *slave_eth_dev;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int ret, i;
+
+ rte_spinlock_lock(&internals->lock);
+
+ for (i = 0; i < internals->slave_count; i++) {
+ slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
+ if (*slave_eth_dev->dev_ops->mtu_set == NULL) {
+ rte_spinlock_unlock(&internals->lock);
+ return -ENOTSUP;
+ }
+ }
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu);
+ if (ret < 0) {
+ rte_spinlock_unlock(&internals->lock);
+ return ret;
+ }
+ }
+
+ rte_spinlock_unlock(&internals->lock);
+ return 0;
+}
+
+static int
+bond_ethdev_mac_address_set(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ if (mac_address_set(dev, addr)) {
+ RTE_BOND_LOG(ERR, "Failed to update MAC address");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
+ enum rte_filter_type type, enum rte_filter_op op, void *arg)
+{
+ if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
+ *(const void **)arg = &bond_flow_ops;
+ return 0;
+ }
+ return -ENOTSUP;
+}
+
+static int
+bond_ethdev_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ __rte_unused uint32_t index, uint32_t vmdq)
+{
+ struct rte_eth_dev *slave_eth_dev;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int ret, i;
+
+ rte_spinlock_lock(&internals->lock);
+
+ for (i = 0; i < internals->slave_count; i++) {
+ slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
+ if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
+ *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
+ ret = -ENOTSUP;
+ goto end;
+ }
+ }
+
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
+ mac_addr, vmdq);
+ if (ret < 0) {
+ /* rollback */
+ for (i--; i >= 0; i--)
+ rte_eth_dev_mac_addr_remove(
+ internals->slaves[i].port_id, mac_addr);
+ goto end;
+ }
+ }
+
+ ret = 0;
+end:
+ rte_spinlock_unlock(&internals->lock);
+ return ret;
+}
+
+static void
+bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct rte_eth_dev *slave_eth_dev;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+
+ rte_spinlock_lock(&internals->lock);
+
+ for (i = 0; i < internals->slave_count; i++) {
+ slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
+ if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
+ goto end;
+ }
+
+ struct ether_addr *mac_addr = &dev->data->mac_addrs[index];
+
+ for (i = 0; i < internals->slave_count; i++)
+ rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
+ mac_addr);
+
+end:
+ rte_spinlock_unlock(&internals->lock);
+}
+
+const struct eth_dev_ops default_dev_ops = {
+ .dev_start = bond_ethdev_start,
+ .dev_stop = bond_ethdev_stop,
+ .dev_close = bond_ethdev_close,
+ .dev_configure = bond_ethdev_configure,
+ .dev_infos_get = bond_ethdev_info,
+ .vlan_filter_set = bond_ethdev_vlan_filter_set,
+ .rx_queue_setup = bond_ethdev_rx_queue_setup,
+ .tx_queue_setup = bond_ethdev_tx_queue_setup,
+ .rx_queue_release = bond_ethdev_rx_queue_release,
+ .tx_queue_release = bond_ethdev_tx_queue_release,
+ .link_update = bond_ethdev_link_update,
+ .stats_get = bond_ethdev_stats_get,
+ .stats_reset = bond_ethdev_stats_reset,
+ .promiscuous_enable = bond_ethdev_promiscuous_enable,
+ .promiscuous_disable = bond_ethdev_promiscuous_disable,
+ .reta_update = bond_ethdev_rss_reta_update,
+ .reta_query = bond_ethdev_rss_reta_query,
+ .rss_hash_update = bond_ethdev_rss_hash_update,
+ .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get,
+ .mtu_set = bond_ethdev_mtu_set,
+ .mac_addr_set = bond_ethdev_mac_address_set,
+ .mac_addr_add = bond_ethdev_mac_addr_add,
+ .mac_addr_remove = bond_ethdev_mac_addr_remove,
+ .filter_ctrl = bond_filter_ctrl
+};
+
+static int
+bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
+{
+ const char *name = rte_vdev_device_name(dev);
+ uint8_t socket_id = dev->device.numa_node;
+ struct bond_dev_private *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ uint32_t vlan_filter_bmp_size;
+
+ /* now do all data allocation - for eth_dev structure, dummy pci driver
+ * and internal (private) data
+ */
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
+ if (eth_dev == NULL) {
+ RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
+ goto err;
+ }
+
+ internals = eth_dev->data->dev_private;
+ eth_dev->data->nb_rx_queues = (uint16_t)1;
+ eth_dev->data->nb_tx_queues = (uint16_t)1;
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN *
+ BOND_MAX_MAC_ADDRS, 0, socket_id);
+ if (eth_dev->data->mac_addrs == NULL) {
+ RTE_BOND_LOG(ERR,
+ "Failed to allocate %u bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
+ goto err;
+ }
+
+ eth_dev->dev_ops = &default_dev_ops;
+ eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+
+ rte_spinlock_init(&internals->lock);
+ rte_spinlock_init(&internals->lsc_lock);
+
+ internals->port_id = eth_dev->data->port_id;
+ internals->mode = BONDING_MODE_INVALID;
+ internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
+ internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
+ internals->burst_xmit_hash = burst_xmit_l2_hash;
+ internals->user_defined_mac = 0;
+
+ internals->link_status_polling_enabled = 0;
+
+ internals->link_status_polling_interval_ms =
+ DEFAULT_POLLING_INTERVAL_10_MS;
+ internals->link_down_delay_ms = 0;
+ internals->link_up_delay_ms = 0;
+
+ internals->slave_count = 0;
+ internals->active_slave_count = 0;
+ internals->rx_offload_capa = 0;
+ internals->tx_offload_capa = 0;
+ internals->rx_queue_offload_capa = 0;
+ internals->tx_queue_offload_capa = 0;
+ internals->candidate_max_rx_pktlen = 0;
+ internals->max_rx_pktlen = 0;
+
+ /* Initially allow to choose any offload type */
+ internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+
+ memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
+ memset(internals->slaves, 0, sizeof(internals->slaves));
+
+ TAILQ_INIT(&internals->flow_list);
+ internals->flow_isolated_valid = 0;
+
+ /* Set mode 4 default configuration */
+ bond_mode_8023ad_setup(eth_dev, NULL);
+ if (bond_ethdev_mode_set(eth_dev, mode)) {
+ RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d",
+ eth_dev->data->port_id, mode);
+ goto err;
+ }
+
+ vlan_filter_bmp_size =
+ rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
+ internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
+ RTE_CACHE_LINE_SIZE);
+ if (internals->vlan_filter_bmpmem == NULL) {
+ RTE_BOND_LOG(ERR,
+ "Failed to allocate vlan bitmap for bonded device %u",
+ eth_dev->data->port_id);
+ goto err;
+ }
+
+ internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
+ internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
+ if (internals->vlan_filter_bmp == NULL) {
+ RTE_BOND_LOG(ERR,
+ "Failed to init vlan bitmap for bonded device %u",
+ eth_dev->data->port_id);
+ rte_free(internals->vlan_filter_bmpmem);
+ goto err;
+ }
+
+ return eth_dev->data->port_id;
+
+err:
+ rte_free(internals);
+ if (eth_dev != NULL) {
+ rte_free(eth_dev->data->mac_addrs);
+ rte_eth_dev_release_port(eth_dev);
+ }
+ return -1;
+}
+
+static int
+bond_probe(struct rte_vdev_device *dev)
+{
+ const char *name;
+ struct bond_dev_private *internals;
+ struct rte_kvargs *kvlist;
+ uint8_t bonding_mode, socket_id/*, agg_mode*/;
+ int arg_count, port_id;
+ uint8_t agg_mode;
+ struct rte_eth_dev *eth_dev;
+
+ if (!dev)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(dev);
+ RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ RTE_BOND_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &default_dev_ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
+ pmd_bond_init_valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+
+ /* Parse link bonding mode */
+ if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
+ if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
+ &bond_ethdev_parse_slave_mode_kvarg,
+ &bonding_mode) != 0) {
+ RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
+ name);
+ goto parse_error;
+ }
+ } else {
+ RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
+ "device %s", name);
+ goto parse_error;
+ }
+
+ /* Parse socket id to create bonding device on */
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
+ if (arg_count == 1) {
+ if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
+ &bond_ethdev_parse_socket_id_kvarg, &socket_id)
+ != 0) {
+ RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
+ "bonded device %s", name);
+ goto parse_error;
+ }
+ } else if (arg_count > 1) {
+ RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
+ "bonded device %s", name);
+ goto parse_error;
+ } else {
+ socket_id = rte_socket_id();
+ }
+
+ dev->device.numa_node = socket_id;
+
+ /* Create link bonding eth device */
+ port_id = bond_alloc(dev, bonding_mode);
+ if (port_id < 0) {
+ RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
+ "socket %u.", name, bonding_mode, socket_id);
+ goto parse_error;
+ }
+ internals = rte_eth_devices[port_id].data->dev_private;
+ internals->kvlist = kvlist;
+
+ rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
+
+ if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_AGG_MODE_KVARG,
+ &bond_ethdev_parse_slave_agg_mode_kvarg,
+ &agg_mode) != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to parse agg selection mode for bonded device %s",
+ name);
+ goto parse_error;
+ }
+
+ if (internals->mode == BONDING_MODE_8023AD)
+ rte_eth_bond_8023ad_agg_selection_set(port_id,
+ agg_mode);
+ } else {
+ rte_eth_bond_8023ad_agg_selection_set(port_id, AGG_STABLE);
+ }
+
+ RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
+ "socket %u.", name, port_id, bonding_mode, socket_id);
+ return 0;
+
+parse_error:
+ rte_kvargs_free(kvlist);
+
+ return -1;
+}
+
+static int
+bond_remove(struct rte_vdev_device *dev)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bond_dev_private *internals;
+ const char *name;
+
+ if (!dev)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(dev);
+ RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
+
+ /* now free all data allocation - for eth_dev structure,
+ * dummy pci driver and internal (private) data
+ */
+
+ /* find an ethdev entry */
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ RTE_ASSERT(eth_dev->device == &dev->device);
+
+ internals = eth_dev->data->dev_private;
+ if (internals->slave_count != 0)
+ return -EBUSY;
+
+ if (eth_dev->data->dev_started == 1) {
+ bond_ethdev_stop(eth_dev);
+ bond_ethdev_close(eth_dev);
+ }
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ internals = eth_dev->data->dev_private;
+ /* Try to release mempool used in mode6. If the bond
+ * device is not mode6, free the NULL is not problem.
+ */
+ rte_mempool_free(internals->mode6.mempool);
+ rte_bitmap_free(internals->vlan_filter_bmp);
+ rte_free(internals->vlan_filter_bmpmem);
+ rte_free(eth_dev->data->dev_private);
+ rte_free(eth_dev->data->mac_addrs);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+/* this part will resolve the slave portids after all the other pdev and vdev
+ * have been allocated */
+static int
+bond_ethdev_configure(struct rte_eth_dev *dev)
+{
+ const char *name = dev->device->name;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_kvargs *kvlist = internals->kvlist;
+ int arg_count;
+ uint16_t port_id = dev - rte_eth_devices;
+ uint8_t agg_mode;
+
+ static const uint8_t default_rss_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
+ 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
+ 0xBE, 0xAC, 0x01, 0xFA
+ };
+
+ unsigned i, j;
+
+ /* If RSS is enabled, fill table and key with default values */
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
+ memcpy(internals->rss_key, default_rss_key, 40);
+
+ for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
+ internals->reta_conf[i].mask = ~0LL;
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
+ }
+ }
+
+ /* set the max_rx_pktlen */
+ internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
+
+ /*
+ * if no kvlist, it means that this bonded device has been created
+ * through the bonding api.
+ */
+ if (!kvlist)
+ return 0;
+
+ /* Parse MAC address for bonded device */
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
+ if (arg_count == 1) {
+ struct ether_addr bond_mac;
+
+ if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
+ &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
+ RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
+ name);
+ return -1;
+ }
+
+ /* Set MAC address */
+ if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set mac address on bonded device %s",
+ name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_BOND_LOG(ERR,
+ "MAC address can be specified only once for bonded device %s",
+ name);
+ return -1;
+ }
+
+ /* Parse/set balance mode transmit policy */
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
+ if (arg_count == 1) {
+ uint8_t xmit_policy;
+
+ if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
+ &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
+ 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid xmit policy specified for bonded device %s",
+ name);
+ return -1;
+ }
+
+ /* Set balance mode transmit policy*/
+ if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set balance xmit policy on bonded device %s",
+ name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_BOND_LOG(ERR,
+ "Transmit policy can be specified only once for bonded device %s",
+ name);
+ return -1;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_AGG_MODE_KVARG,
+ &bond_ethdev_parse_slave_agg_mode_kvarg,
+ &agg_mode) != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to parse agg selection mode for bonded device %s",
+ name);
+ }
+ if (internals->mode == BONDING_MODE_8023AD)
+ rte_eth_bond_8023ad_agg_selection_set(port_id,
+ agg_mode);
+ }
+
+ /* Parse/add slave ports to bonded device */
+ if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
+ struct bond_ethdev_slave_ports slave_ports;
+ unsigned i;
+
+ memset(&slave_ports, 0, sizeof(slave_ports));
+
+ if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
+ &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to parse slave ports for bonded device %s",
+ name);
+ return -1;
+ }
+
+ for (i = 0; i < slave_ports.slave_count; i++) {
+ if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to add port %d as slave to bonded device %s",
+ slave_ports.slaves[i], name);
+ }
+ }
+
+ } else {
+ RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
+ return -1;
+ }
+
+ /* Parse/set primary slave port id*/
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
+ if (arg_count == 1) {
+ uint16_t primary_slave_port_id;
+
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_PRIMARY_SLAVE_KVARG,
+ &bond_ethdev_parse_primary_slave_port_id_kvarg,
+ &primary_slave_port_id) < 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid primary slave port id specified for bonded device %s",
+ name);
+ return -1;
+ }
+
+ /* Set balance mode transmit policy*/
+ if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
+ != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set primary slave port %d on bonded device %s",
+ primary_slave_port_id, name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_BOND_LOG(INFO,
+ "Primary slave can be specified only once for bonded device %s",
+ name);
+ return -1;
+ }
+
+ /* Parse link status monitor polling interval */
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
+ if (arg_count == 1) {
+ uint32_t lsc_poll_interval_ms;
+
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_LSC_POLL_PERIOD_KVARG,
+ &bond_ethdev_parse_time_ms_kvarg,
+ &lsc_poll_interval_ms) < 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid lsc polling interval value specified for bonded"
+ " device %s", name);
+ return -1;
+ }
+
+ if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
+ != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
+ lsc_poll_interval_ms, name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_BOND_LOG(INFO,
+ "LSC polling interval can be specified only once for bonded"
+ " device %s", name);
+ return -1;
+ }
+
+ /* Parse link up interrupt propagation delay */
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
+ if (arg_count == 1) {
+ uint32_t link_up_delay_ms;
+
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
+ &bond_ethdev_parse_time_ms_kvarg,
+ &link_up_delay_ms) < 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid link up propagation delay value specified for"
+ " bonded device %s", name);
+ return -1;
+ }
+
+ /* Set balance mode transmit policy*/
+ if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
+ != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set link up propagation delay (%u ms) on bonded"
+ " device %s", link_up_delay_ms, name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_BOND_LOG(INFO,
+ "Link up propagation delay can be specified only once for"
+ " bonded device %s", name);
+ return -1;
+ }
+
+ /* Parse link down interrupt propagation delay */
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
+ if (arg_count == 1) {
+ uint32_t link_down_delay_ms;
+
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
+ &bond_ethdev_parse_time_ms_kvarg,
+ &link_down_delay_ms) < 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid link down propagation delay value specified for"
+ " bonded device %s", name);
+ return -1;
+ }
+
+ /* Set balance mode transmit policy*/
+ if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
+ != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set link down propagation delay (%u ms) on bonded device %s",
+ link_down_delay_ms, name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_BOND_LOG(INFO,
+ "Link down propagation delay can be specified only once for bonded device %s",
+ name);
+ return -1;
+ }
+
+ return 0;
+}
+
+struct rte_vdev_driver pmd_bond_drv = {
+ .probe = bond_probe,
+ .remove = bond_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
+RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
+
+RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
+ "slave=<ifc> "
+ "primary=<ifc> "
+ "mode=[0-6] "
+ "xmit_policy=[l2 | l23 | l34] "
+ "agg_mode=[count | stable | bandwidth] "
+ "socket_id=<int> "
+ "mac=<mac addr> "
+ "lsc_poll_period_ms=<int> "
+ "up_delay=<int> "
+ "down_delay=<int>");
+
+int bond_logtype;
+
+RTE_INIT(bond_init_log)
+{
+ bond_logtype = rte_log_register("pmd.net.bon");
+ if (bond_logtype >= 0)
+ rte_log_set_level(bond_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_private.h b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_private.h
new file mode 100644
index 00000000..43e0e448
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_private.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#ifndef _RTE_ETH_BOND_PRIVATE_H_
+#define _RTE_ETH_BOND_PRIVATE_H_
+
+#include <sys/queue.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_spinlock.h>
+#include <rte_bitmap.h>
+#include <rte_flow_driver.h>
+
+#include "rte_eth_bond.h"
+#include "rte_eth_bond_8023ad_private.h"
+#include "rte_eth_bond_alb.h"
+
+#define PMD_BOND_SLAVE_PORT_KVARG ("slave")
+#define PMD_BOND_PRIMARY_SLAVE_KVARG ("primary")
+#define PMD_BOND_MODE_KVARG ("mode")
+#define PMD_BOND_AGG_MODE_KVARG ("agg_mode")
+#define PMD_BOND_XMIT_POLICY_KVARG ("xmit_policy")
+#define PMD_BOND_SOCKET_ID_KVARG ("socket_id")
+#define PMD_BOND_MAC_ADDR_KVARG ("mac")
+#define PMD_BOND_LSC_POLL_PERIOD_KVARG ("lsc_poll_period_ms")
+#define PMD_BOND_LINK_UP_PROP_DELAY_KVARG ("up_delay")
+#define PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG ("down_delay")
+
+#define PMD_BOND_XMIT_POLICY_LAYER2_KVARG ("l2")
+#define PMD_BOND_XMIT_POLICY_LAYER23_KVARG ("l23")
+#define PMD_BOND_XMIT_POLICY_LAYER34_KVARG ("l34")
+
+extern int bond_logtype;
+
+#define RTE_BOND_LOG(lvl, msg, ...) \
+ rte_log(RTE_LOG_ ## lvl, bond_logtype, \
+ "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__)
+
+#define BONDING_MODE_INVALID 0xFF
+
+extern const char *pmd_bond_init_valid_arguments[];
+
+extern struct rte_vdev_driver pmd_bond_drv;
+
+extern const struct rte_flow_ops bond_flow_ops;
+
+/** Port Queue Mapping Structure */
+struct bond_rx_queue {
+ uint16_t queue_id;
+ /**< Queue Id */
+ struct bond_dev_private *dev_private;
+ /**< Reference to eth_dev private structure */
+ uint16_t nb_rx_desc;
+ /**< Number of RX descriptors available for the queue */
+ struct rte_eth_rxconf rx_conf;
+ /**< Copy of RX configuration structure for queue */
+ struct rte_mempool *mb_pool;
+ /**< Reference to mbuf pool to use for RX queue */
+};
+
+struct bond_tx_queue {
+ uint16_t queue_id;
+ /**< Queue Id */
+ struct bond_dev_private *dev_private;
+ /**< Reference to dev private structure */
+ uint16_t nb_tx_desc;
+ /**< Number of TX descriptors available for the queue */
+ struct rte_eth_txconf tx_conf;
+ /**< Copy of TX configuration structure for queue */
+};
+
+/** Bonded slave devices structure */
+struct bond_ethdev_slave_ports {
+ uint16_t slaves[RTE_MAX_ETHPORTS]; /**< Slave port id array */
+ uint16_t slave_count; /**< Number of slaves */
+};
+
+struct bond_slave_details {
+ uint16_t port_id;
+
+ uint8_t link_status_poll_enabled;
+ uint8_t link_status_wait_to_complete;
+ uint8_t last_link_status;
+ /**< Port Id of slave eth_dev */
+ struct ether_addr persisted_mac_addr;
+
+ uint16_t reta_size;
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next;
+ /* Slaves flows */
+ struct rte_flow *flows[RTE_MAX_ETHPORTS];
+ /* Flow description for synchronization */
+ struct rte_flow_desc *fd;
+};
+
+typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves);
+
+/** Link Bonding PMD device private configuration Structure */
+struct bond_dev_private {
+ uint16_t port_id; /**< Port Id of Bonded Port */
+ uint8_t mode; /**< Link Bonding Mode */
+
+ rte_spinlock_t lock;
+ rte_spinlock_t lsc_lock;
+
+ uint16_t primary_port; /**< Primary Slave Port */
+ uint16_t current_primary_port; /**< Primary Slave Port */
+ uint16_t user_defined_primary_port;
+ /**< Flag for whether primary port is user defined or not */
+
+ uint8_t balance_xmit_policy;
+ /**< Transmit policy - l2 / l23 / l34 for operation in balance mode */
+ burst_xmit_hash_t burst_xmit_hash;
+ /**< Transmit policy hash function */
+
+ uint8_t user_defined_mac;
+ /**< Flag for whether MAC address is user defined or not */
+ uint8_t promiscuous_en;
+ /**< Enabled/disable promiscuous mode on bonding device */
+
+
+ uint8_t link_status_polling_enabled;
+ uint32_t link_status_polling_interval_ms;
+
+ uint32_t link_down_delay_ms;
+ uint32_t link_up_delay_ms;
+
+ uint16_t nb_rx_queues; /**< Total number of rx queues */
+ uint16_t nb_tx_queues; /**< Total number of tx queues*/
+
+ uint16_t active_slave; /**< Next active_slave to poll */
+ uint16_t active_slave_count; /**< Number of active slaves */
+ uint16_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */
+
+ uint16_t slave_count; /**< Number of bonded slaves */
+ struct bond_slave_details slaves[RTE_MAX_ETHPORTS];
+ /**< Arary of bonded slaves details */
+
+ struct mode8023ad_private mode4;
+ uint16_t tlb_slaves_order[RTE_MAX_ETHPORTS];
+ /**< TLB active slaves send order */
+ struct mode_alb_private mode6;
+
+ uint64_t rx_offload_capa; /** Rx offload capability */
+ uint64_t tx_offload_capa; /** Tx offload capability */
+ uint64_t rx_queue_offload_capa; /** per queue Rx offload capability */
+ uint64_t tx_queue_offload_capa; /** per queue Tx offload capability */
+
+ /**< List of the configured flows */
+ TAILQ_HEAD(sub_flows, rte_flow) flow_list;
+
+ /**< Flow isolation state */
+ int flow_isolated;
+ int flow_isolated_valid;
+
+ /** Bit mask of RSS offloads, the bit offset also means flow type */
+ uint64_t flow_type_rss_offloads;
+
+ uint16_t reta_size;
+ struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
+ RTE_RETA_GROUP_SIZE];
+
+ uint8_t rss_key[52]; /**< 52-byte hash key buffer. */
+ uint8_t rss_key_len; /**< hash key length in bytes. */
+
+ struct rte_kvargs *kvlist;
+ uint8_t slave_update_idx;
+
+ uint32_t candidate_max_rx_pktlen;
+ uint32_t max_rx_pktlen;
+
+ void *vlan_filter_bmpmem; /* enabled vlan filter bitmap */
+ struct rte_bitmap *vlan_filter_bmp;
+};
+
+extern const struct eth_dev_ops default_dev_ops;
+
+int
+check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev);
+
+int
+check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev);
+
+/* Search given slave array to find position of given id.
+ * Return slave pos or slaves_count if not found. */
+static inline uint16_t
+find_slave_by_id(uint16_t *slaves, uint16_t slaves_count, uint16_t slave_id) {
+
+ uint16_t pos;
+ for (pos = 0; pos < slaves_count; pos++) {
+ if (slave_id == slaves[pos])
+ break;
+ }
+
+ return pos;
+}
+
+int
+valid_port_id(uint16_t port_id);
+
+int
+valid_bonded_port_id(uint16_t port_id);
+
+int
+valid_slave_port_id(uint16_t port_id, uint8_t mode);
+
+void
+deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
+
+void
+activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
+
+void
+link_properties_set(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_link *slave_dev_link);
+int
+link_properties_valid(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_link *slave_dev_link);
+
+int
+mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr);
+
+int
+mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr);
+
+int
+mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev);
+
+int
+slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id);
+
+int
+slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id);
+
+int
+bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode);
+
+int
+slave_configure(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_dev *slave_eth_dev);
+
+void
+slave_remove(struct bond_dev_private *internals,
+ struct rte_eth_dev *slave_eth_dev);
+
+void
+slave_add(struct bond_dev_private *internals,
+ struct rte_eth_dev *slave_eth_dev);
+
+void
+burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves);
+
+void
+burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves);
+
+void
+burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves);
+
+
+void
+bond_ethdev_primary_set(struct bond_dev_private *internals,
+ uint16_t slave_port_id);
+
+int
+bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
+ void *param, void *ret_param);
+
+int
+bond_ethdev_parse_slave_port_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_slave_mode_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_slave_agg_mode_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_socket_id_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_bond_mac_addr_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_time_ms_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+void
+bond_tlb_disable(struct bond_dev_private *internals);
+
+void
+bond_tlb_enable(struct bond_dev_private *internals);
+
+void
+bond_tlb_activate_slave(struct bond_dev_private *internals);
+
+void
+bond_ethdev_stop(struct rte_eth_dev *eth_dev);
+
+void
+bond_ethdev_close(struct rte_eth_dev *dev);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_pmd_bond_version.map b/src/spdk/dpdk/drivers/net/bonding/rte_pmd_bond_version.map
new file mode 100644
index 00000000..03ddb44e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/bonding/rte_pmd_bond_version.map
@@ -0,0 +1,55 @@
+DPDK_2.0 {
+ global:
+
+ rte_eth_bond_8023ad_slave_info;
+ rte_eth_bond_active_slaves_get;
+ rte_eth_bond_create;
+ rte_eth_bond_link_monitoring_set;
+ rte_eth_bond_mac_address_reset;
+ rte_eth_bond_mac_address_set;
+ rte_eth_bond_mode_get;
+ rte_eth_bond_mode_set;
+ rte_eth_bond_primary_get;
+ rte_eth_bond_primary_set;
+ rte_eth_bond_slave_add;
+ rte_eth_bond_slave_remove;
+ rte_eth_bond_slaves_get;
+ rte_eth_bond_xmit_policy_get;
+ rte_eth_bond_xmit_policy_set;
+
+ local: *;
+};
+
+DPDK_2.1 {
+ global:
+
+ rte_eth_bond_free;
+
+} DPDK_2.0;
+
+DPDK_16.04 {
+};
+
+DPDK_16.07 {
+ global:
+
+ rte_eth_bond_8023ad_ext_collect;
+ rte_eth_bond_8023ad_ext_collect_get;
+ rte_eth_bond_8023ad_ext_distrib;
+ rte_eth_bond_8023ad_ext_distrib_get;
+ rte_eth_bond_8023ad_ext_slowtx;
+
+} DPDK_16.04;
+
+DPDK_17.08 {
+ global:
+
+ rte_eth_bond_8023ad_dedicated_queues_enable;
+ rte_eth_bond_8023ad_dedicated_queues_disable;
+ rte_eth_bond_8023ad_agg_selection_get;
+ rte_eth_bond_8023ad_agg_selection_set;
+ rte_eth_bond_8023ad_conf_get;
+ rte_eth_bond_8023ad_setup;
+
+
+} DPDK_16.07;
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/Makefile b/src/spdk/dpdk/drivers/net/cxgbe/Makefile
new file mode 100644
index 00000000..5d66c4b3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/Makefile
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2014-2018 Chelsio Communications.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_cxgbe.a
+
+CFLAGS += -I$(SRCDIR)/base/
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_cxgbe_version.map
+
+LIBABIVER := 1
+
+#
+# CFLAGS for gcc/clang
+#
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
+CFLAGS += -Wno-deprecated
+endif
+endif
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += clip_tbl.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4vf_hw.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h b/src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h
new file mode 100644
index 00000000..e98dd218
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h
@@ -0,0 +1,829 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+/* This file should not be included directly. Include common.h instead. */
+
+#ifndef __T4_ADAPTER_H__
+#define __T4_ADAPTER_H__
+
+#include <rte_bus_pci.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+#include <rte_rwlock.h>
+#include <rte_ethdev.h>
+
+#include "cxgbe_compat.h"
+#include "t4_regs_values.h"
+#include "cxgbe_ofld.h"
+
+enum {
+ MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */
+ MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
+};
+
+struct adapter;
+struct sge_rspq;
+
+enum {
+ PORT_RSS_DONE = (1 << 0),
+};
+
+struct port_info {
+ struct adapter *adapter; /* adapter that this port belongs to */
+ struct rte_eth_dev *eth_dev; /* associated rte eth device */
+ struct port_stats stats_base; /* port statistics base */
+ struct link_config link_cfg; /* link configuration info */
+
+ unsigned long flags; /* port related flags */
+ short int xact_addr_filt; /* index of exact MAC address filter */
+
+ u16 viid; /* associated virtual interface id */
+ s8 mdio_addr; /* address of the PHY */
+ u8 port_type; /* firmware port type */
+ u8 mod_type; /* firmware module type */
+ u8 port_id; /* physical port ID */
+ u8 pidx; /* port index for this PF */
+ u8 tx_chan; /* associated channel */
+
+ u8 n_rx_qsets; /* # of rx qsets */
+ u8 n_tx_qsets; /* # of tx qsets */
+ u8 first_qset; /* index of first qset */
+
+ u16 *rss; /* rss table */
+ u8 rss_mode; /* rss mode */
+ u16 rss_size; /* size of VI's RSS table slice */
+ u64 rss_hf; /* RSS Hash Function */
+};
+
+/* Enable or disable autonegotiation. If this is set to enable,
+ * the forced link modes above are completely ignored.
+ */
+#define AUTONEG_DISABLE 0x00
+#define AUTONEG_ENABLE 0x01
+
+enum { /* adapter flags */
+ FULL_INIT_DONE = (1 << 0),
+ USING_MSI = (1 << 1),
+ USING_MSIX = (1 << 2),
+ FW_QUEUE_BOUND = (1 << 3),
+ FW_OK = (1 << 4),
+ CFG_QUEUES = (1 << 5),
+ MASTER_PF = (1 << 6),
+};
+
+struct rx_sw_desc { /* SW state per Rx descriptor */
+ void *buf; /* struct page or mbuf */
+ dma_addr_t dma_addr;
+};
+
+struct sge_fl { /* SGE free-buffer queue state */
+ /* RO fields */
+ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
+
+ dma_addr_t addr; /* bus address of HW ring start */
+ __be64 *desc; /* address of HW Rx descriptor ring */
+
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
+
+ unsigned int cntxt_id; /* SGE relative QID for the free list */
+ unsigned int size; /* capacity of free list */
+
+ unsigned int avail; /* # of available Rx buffers */
+ unsigned int pend_cred; /* new buffers since last FL DB ring */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+
+ unsigned long alloc_failed; /* # of times buffer allocation failed */
+ unsigned long low; /* # of times momentarily starving */
+};
+
+#define MAX_MBUF_FRAGS (16384 / 512 + 2)
+
+/* A packet gather list */
+struct pkt_gl {
+ union {
+ struct rte_mbuf *mbufs[MAX_MBUF_FRAGS];
+ } /* UNNAMED */;
+ void *va; /* virtual address of first byte */
+ unsigned int nfrags; /* # of fragments */
+ unsigned int tot_len; /* total length of fragments */
+ bool usembufs; /* use mbufs for fragments */
+};
+
+typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl);
+
+struct sge_rspq { /* state for an SGE response queue */
+ struct adapter *adapter; /* adapter that this queue belongs to */
+ struct rte_eth_dev *eth_dev; /* associated rte eth device */
+ struct rte_mempool *mb_pool; /* associated mempool */
+
+ dma_addr_t phys_addr; /* physical address of the ring */
+ __be64 *desc; /* address of HW response ring */
+ const __be64 *cur_desc; /* current descriptor in queue */
+
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
+ struct sge_qstat *stat;
+
+ unsigned int cidx; /* consumer index */
+ unsigned int gts_idx; /* last gts write sent */
+ unsigned int iqe_len; /* entry size */
+ unsigned int size; /* capacity of response queue */
+ int offset; /* offset into current Rx buffer */
+
+ u8 gen; /* current generation bit */
+ u8 intr_params; /* interrupt holdoff parameters */
+ u8 next_intr_params; /* holdoff params for next interrupt */
+ u8 pktcnt_idx; /* interrupt packet threshold */
+ u8 port_id; /* associated port-id */
+ u8 idx; /* queue index within its group */
+ u16 cntxt_id; /* SGE relative QID for the response Q */
+ u16 abs_id; /* absolute SGE id for the response q */
+
+ rspq_handler_t handler; /* associated handler for this response q */
+};
+
+struct sge_eth_rx_stats { /* Ethernet rx queue statistics */
+ u64 pkts; /* # of ethernet packets */
+ u64 rx_bytes; /* # of ethernet bytes */
+ u64 rx_cso; /* # of Rx checksum offloads */
+ u64 vlan_ex; /* # of Rx VLAN extractions */
+ u64 rx_drops; /* # of packets dropped due to no mem */
+};
+
+struct sge_eth_rxq { /* a SW Ethernet Rx queue */
+ struct sge_rspq rspq;
+ struct sge_fl fl;
+ struct sge_eth_rx_stats stats;
+ bool usembufs; /* one ingress packet per mbuf FL buffer */
+} __rte_cache_aligned;
+
+/*
+ * Currently there are two types of coalesce WR. Type 0 needs 48 bytes per
+ * packet (if one sgl is present) and type 1 needs 32 bytes. This means
+ * that type 0 can fit a maximum of 10 packets per WR and type 1 can fit
+ * 15 packets. We need to keep track of the mbuf pointers in a coalesce WR
+ * to be able to free those mbufs when we get completions back from the FW.
+ * Allocating the maximum number of pointers in every tx desc is a waste
+ * of memory resources so we only store 2 pointers per tx desc which should
+ * be enough since a tx desc can only fit 2 packets in the best case
+ * scenario where a packet needs 32 bytes.
+ */
+#define ETH_COALESCE_PKT_NUM 15
+#define ETH_COALESCE_VF_PKT_NUM 7
+#define ETH_COALESCE_PKT_PER_DESC 2
+
+struct tx_eth_coal_desc {
+ struct rte_mbuf *mbuf[ETH_COALESCE_PKT_PER_DESC];
+ struct ulptx_sgl *sgl[ETH_COALESCE_PKT_PER_DESC];
+ int idx;
+};
+
+struct tx_desc {
+ __be64 flit[8];
+};
+
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct rte_mbuf *mbuf;
+ struct ulptx_sgl *sgl;
+ struct tx_eth_coal_desc coalesce;
+};
+
+enum {
+ EQ_STOPPED = (1 << 0),
+};
+
+struct eth_coalesce {
+ unsigned char *ptr;
+ unsigned char type;
+ unsigned int idx;
+ unsigned int len;
+ unsigned int flits;
+ unsigned int max;
+ __u8 ethmacdst[ETHER_ADDR_LEN];
+ __u8 ethmacsrc[ETHER_ADDR_LEN];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+struct sge_txq {
+ struct tx_desc *desc; /* address of HW Tx descriptor ring */
+ struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
+ struct sge_qstat *stat; /* queue status entry */
+ struct eth_coalesce coalesce; /* coalesce info */
+
+ uint64_t phys_addr; /* physical address of the ring */
+
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
+
+ unsigned int cntxt_id; /* SGE relative QID for the Tx Q */
+ unsigned int in_use; /* # of in-use Tx descriptors */
+ unsigned int size; /* # of descriptors */
+ unsigned int cidx; /* SW consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned int dbidx; /* last idx when db ring was done */
+ unsigned int equeidx; /* last sent credit request */
+ unsigned int last_pidx; /* last pidx recorded by tx monitor */
+ unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */
+ unsigned int abs_id;
+
+ int db_disabled; /* doorbell state */
+ unsigned short db_pidx; /* doorbell producer index */
+ unsigned short db_pidx_inc; /* doorbell producer increment */
+};
+
+struct sge_eth_tx_stats { /* Ethernet tx queue statistics */
+ u64 pkts; /* # of ethernet packets */
+ u64 tx_bytes; /* # of ethernet bytes */
+ u64 tso; /* # of TSO requests */
+ u64 tx_cso; /* # of Tx checksum offloads */
+ u64 vlan_ins; /* # of Tx VLAN insertions */
+ u64 mapping_err; /* # of I/O MMU packet mapping errors */
+ u64 coal_wr; /* # of coalesced wr */
+ u64 coal_pkts; /* # of coalesced packets */
+};
+
+struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
+ struct sge_txq q;
+ struct rte_eth_dev *eth_dev; /* port that this queue belongs to */
+ struct rte_eth_dev_data *data;
+ struct sge_eth_tx_stats stats; /* queue statistics */
+ rte_spinlock_t txq_lock;
+
+ unsigned int flags; /* flags for state of the queue */
+} __rte_cache_aligned;
+
+struct sge_ctrl_txq { /* State for an SGE control Tx queue */
+ struct sge_txq q; /* txq */
+ struct adapter *adapter; /* adapter associated with this queue */
+ rte_spinlock_t ctrlq_lock; /* control queue lock */
+ u8 full; /* the Tx ring is full */
+ u64 txp; /* number of transmits */
+ struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */
+} __rte_cache_aligned;
+
+struct sge {
+ struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
+ struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
+ struct sge_rspq fw_evtq __rte_cache_aligned;
+ struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
+
+ u16 max_ethqsets; /* # of available Ethernet queue sets */
+ u32 stat_len; /* length of status page at ring end */
+ u32 pktshift; /* padding between CPL & packet data */
+
+ /* response queue interrupt parameters */
+ u16 timer_val[SGE_NTIMERS];
+ u8 counter_val[SGE_NCOUNTERS];
+
+ u32 fl_align; /* response queue message alignment */
+ u32 fl_pg_order; /* large page allocation size */
+ u32 fl_starve_thres; /* Free List starvation threshold */
+};
+
+#define T4_OS_NEEDS_MBOX_LOCKING 1
+
+/*
+ * OS Lock/List primitives for those interfaces in the Common Code which
+ * need this.
+ */
+
+struct mbox_entry {
+ TAILQ_ENTRY(mbox_entry) next;
+};
+
+TAILQ_HEAD(mbox_list, mbox_entry);
+
+struct adapter {
+ struct rte_pci_device *pdev; /* associated rte pci device */
+ struct rte_eth_dev *eth_dev; /* first port's rte eth device */
+ struct adapter_params params; /* adapter parameters */
+ struct port_info *port[MAX_NPORTS];/* ports belonging to this adapter */
+ struct sge sge; /* associated SGE */
+
+ /* support for single-threading access to adapter mailbox registers */
+ struct mbox_list mbox_list;
+ rte_spinlock_t mbox_lock;
+
+ u8 *regs; /* pointer to registers region */
+ u8 *bar2; /* pointer to bar2 region */
+ unsigned long flags; /* adapter flags */
+ unsigned int mbox; /* associated mailbox */
+ unsigned int pf; /* associated physical function id */
+
+ unsigned int vpd_busy;
+ unsigned int vpd_flag;
+
+ int use_unpacked_mode; /* unpacked rx mode state */
+ rte_spinlock_t win0_lock;
+
+ unsigned int clipt_start; /* CLIP table start */
+ unsigned int clipt_end; /* CLIP table end */
+ struct clip_tbl *clipt; /* CLIP table */
+
+ struct tid_info tids; /* Info used to access TID related tables */
+};
+
+/**
+ * t4_os_rwlock_init - initialize rwlock
+ * @lock: the rwlock
+ */
+static inline void t4_os_rwlock_init(rte_rwlock_t *lock)
+{
+ rte_rwlock_init(lock);
+}
+
+/**
+ * t4_os_write_lock - get a write lock
+ * @lock: the rwlock
+ */
+static inline void t4_os_write_lock(rte_rwlock_t *lock)
+{
+ rte_rwlock_write_lock(lock);
+}
+
+/**
+ * t4_os_write_unlock - unlock a write lock
+ * @lock: the rwlock
+ */
+static inline void t4_os_write_unlock(rte_rwlock_t *lock)
+{
+ rte_rwlock_write_unlock(lock);
+}
+
+/**
+ * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev
+ * @dev: the rte_eth_dev
+ *
+ * Return the struct port_info associated with a rte_eth_dev
+ */
+static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev)
+{
+ return (struct port_info *)dev->data->dev_private;
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Return the port_info structure for the port of the given index.
+ */
+static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx)
+{
+ return adap->port[idx];
+}
+
+/**
+ * ethdev2adap - return the adapter structure associated with a rte_eth_dev
+ * @dev: the rte_eth_dev
+ *
+ * Return the struct adapter associated with a rte_eth_dev
+ */
+static inline struct adapter *ethdev2adap(const struct rte_eth_dev *dev)
+{
+ return ethdev2pinfo(dev)->adapter;
+}
+
+#define CXGBE_PCI_REG(reg) rte_read32(reg)
+
+static inline uint64_t cxgbe_read_addr64(volatile void *addr)
+{
+ uint64_t val = CXGBE_PCI_REG(addr);
+ uint64_t val2 = CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4));
+
+ val2 = (uint64_t)(val2 << 32);
+ val += val2;
+ return val;
+}
+
+static inline uint32_t cxgbe_read_addr(volatile void *addr)
+{
+ return CXGBE_PCI_REG(addr);
+}
+
+#define CXGBE_PCI_REG_ADDR(adap, reg) \
+ ((volatile uint32_t *)((char *)(adap)->regs + (reg)))
+
+#define CXGBE_READ_REG(adap, reg) \
+ cxgbe_read_addr(CXGBE_PCI_REG_ADDR((adap), (reg)))
+
+#define CXGBE_READ_REG64(adap, reg) \
+ cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)))
+
+#define CXGBE_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
+
+#define CXGBE_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((value), (reg))
+
+#define CXGBE_WRITE_REG(adap, reg, value) \
+ CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
+
+#define CXGBE_WRITE_REG_RELAXED(adap, reg, value) \
+ CXGBE_PCI_REG_WRITE_RELAXED(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
+
+static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val)
+{
+ CXGBE_PCI_REG_WRITE(addr, val);
+ CXGBE_PCI_REG_WRITE(((volatile uint8_t *)(addr) + 4), (val >> 32));
+ return val;
+}
+
+#define CXGBE_WRITE_REG64(adap, reg, value) \
+ cxgbe_write_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
+
+/**
+ * t4_read_reg - read a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 32-bit value of the given HW register.
+ */
+static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
+{
+ u32 val = CXGBE_READ_REG(adapter, reg_addr);
+
+ CXGBE_DEBUG_REG(adapter, "read register 0x%x value 0x%x\n", reg_addr,
+ val);
+ return val;
+}
+
+/**
+ * t4_write_reg - write a HW register with barrier
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given HW register.
+ */
+static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
+{
+ CXGBE_DEBUG_REG(adapter, "setting register 0x%x to 0x%x\n", reg_addr,
+ val);
+ CXGBE_WRITE_REG(adapter, reg_addr, val);
+}
+
+/**
+ * t4_write_reg_relaxed - write a HW register with no barrier
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given HW register.
+ */
+static inline void t4_write_reg_relaxed(struct adapter *adapter, u32 reg_addr,
+ u32 val)
+{
+ CXGBE_DEBUG_REG(adapter, "setting register 0x%x to 0x%x\n", reg_addr,
+ val);
+ CXGBE_WRITE_REG_RELAXED(adapter, reg_addr, val);
+}
+
+/**
+ * t4_read_reg64 - read a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 64-bit value of the given HW register.
+ */
+static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
+{
+ u64 val = CXGBE_READ_REG64(adapter, reg_addr);
+
+ CXGBE_DEBUG_REG(adapter, "64-bit read register %#x value %#llx\n",
+ reg_addr, (unsigned long long)val);
+ return val;
+}
+
+/**
+ * t4_write_reg64 - write a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 64-bit value into the given HW register.
+ */
+static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
+ u64 val)
+{
+ CXGBE_DEBUG_REG(adapter, "setting register %#x to %#llx\n", reg_addr,
+ (unsigned long long)val);
+
+ CXGBE_WRITE_REG64(adapter, reg_addr, val);
+}
+
+#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
+#define PCI_CAPABILITY_LIST 0x34
+/* Offset of first capability list entry */
+#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
+#define PCI_CAP_LIST_ID 0 /* Capability ID */
+#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
+#define PCI_EXP_DEVCTL 0x0008 /* Device control */
+#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
+#define PCI_EXP_DEVCTL_PAYLOAD 0x00E0 /* Max payload */
+#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */
+#define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */
+#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
+#define PCI_VPD_DATA 4 /* 32-bits of data returned here */
+
+/**
+ * t4_os_pci_write_cfg4 - 32-bit write to PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given register in PCI config space.
+ */
+static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr,
+ off_t val)
+{
+ u32 val32 = val;
+
+ if (rte_pci_write_config(adapter->pdev, &val32, sizeof(val32),
+ addr) < 0)
+ dev_err(adapter, "Can't write to PCI config space\n");
+}
+
+/**
+ * t4_os_pci_read_cfg4 - read a 32-bit value from PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: where to store the value read
+ *
+ * Read a 32-bit value from the given register in PCI config space.
+ */
+static inline void t4_os_pci_read_cfg4(struct adapter *adapter, size_t addr,
+ u32 *val)
+{
+ if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
+ addr) < 0)
+ dev_err(adapter, "Can't read from PCI config space\n");
+}
+
+/**
+ * t4_os_pci_write_cfg2 - 16-bit write to PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: the value to write
+ *
+ * Write a 16-bit value into the given register in PCI config space.
+ */
+static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr,
+ off_t val)
+{
+ u16 val16 = val;
+
+ if (rte_pci_write_config(adapter->pdev, &val16, sizeof(val16),
+ addr) < 0)
+ dev_err(adapter, "Can't write to PCI config space\n");
+}
+
+/**
+ * t4_os_pci_read_cfg2 - read a 16-bit value from PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: where to store the value read
+ *
+ * Read a 16-bit value from the given register in PCI config space.
+ */
+static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr,
+ u16 *val)
+{
+ if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
+ addr) < 0)
+ dev_err(adapter, "Can't read from PCI config space\n");
+}
+
+/**
+ * t4_os_pci_read_cfg - read a 8-bit value from PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: where to store the value read
+ *
+ * Read a 8-bit value from the given register in PCI config space.
+ */
+static inline void t4_os_pci_read_cfg(struct adapter *adapter, size_t addr,
+ u8 *val)
+{
+ if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
+ addr) < 0)
+ dev_err(adapter, "Can't read from PCI config space\n");
+}
+
+/**
+ * t4_os_find_pci_capability - lookup a capability in the PCI capability list
+ * @adapter: the adapter
+ * @cap: the capability
+ *
+ * Return the address of the given capability within the PCI capability list.
+ */
+static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap)
+{
+ u16 status;
+ int ttl = 48;
+ u8 pos = 0;
+ u8 id = 0;
+
+ t4_os_pci_read_cfg2(adapter, PCI_STATUS, &status);
+ if (!(status & PCI_STATUS_CAP_LIST)) {
+ dev_err(adapter, "PCIe capability reading failed\n");
+ return -1;
+ }
+
+ t4_os_pci_read_cfg(adapter, PCI_CAPABILITY_LIST, &pos);
+ while (ttl-- && pos >= 0x40) {
+ pos &= ~3;
+ t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_ID), &id);
+
+ if (id == 0xff)
+ break;
+
+ if (id == cap)
+ return (int)pos;
+
+ t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_NEXT), &pos);
+ }
+ return 0;
+}
+
+/**
+ * t4_os_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @port_idx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW. Called by the
+ * common code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx,
+ u8 hw_addr[])
+{
+ struct port_info *pi = adap2pinfo(adapter, port_idx);
+
+ ether_addr_copy((struct ether_addr *)hw_addr,
+ &pi->eth_dev->data->mac_addrs[0]);
+}
+
+/**
+ * t4_os_lock_init - initialize spinlock
+ * @lock: the spinlock
+ */
+static inline void t4_os_lock_init(rte_spinlock_t *lock)
+{
+ rte_spinlock_init(lock);
+}
+
+/**
+ * t4_os_lock - spin until lock is acquired
+ * @lock: the spinlock
+ */
+static inline void t4_os_lock(rte_spinlock_t *lock)
+{
+ rte_spinlock_lock(lock);
+}
+
+/**
+ * t4_os_unlock - unlock a spinlock
+ * @lock: the spinlock
+ */
+static inline void t4_os_unlock(rte_spinlock_t *lock)
+{
+ rte_spinlock_unlock(lock);
+}
+
+/**
+ * t4_os_trylock - try to get a lock
+ * @lock: the spinlock
+ */
+static inline int t4_os_trylock(rte_spinlock_t *lock)
+{
+ return rte_spinlock_trylock(lock);
+}
+
+/**
+ * t4_os_init_list_head - initialize
+ * @head: head of list to initialize [to empty]
+ */
+static inline void t4_os_init_list_head(struct mbox_list *head)
+{
+ TAILQ_INIT(head);
+}
+
+static inline struct mbox_entry *t4_os_list_first_entry(struct mbox_list *head)
+{
+ return TAILQ_FIRST(head);
+}
+
+/**
+ * t4_os_atomic_add_tail - Enqueue list element atomically onto list
+ * @new: the entry to be addded to the queue
+ * @head: current head of the linked list
+ * @lock: lock to use to guarantee atomicity
+ */
+static inline void t4_os_atomic_add_tail(struct mbox_entry *entry,
+ struct mbox_list *head,
+ rte_spinlock_t *lock)
+{
+ t4_os_lock(lock);
+ TAILQ_INSERT_TAIL(head, entry, next);
+ t4_os_unlock(lock);
+}
+
+/**
+ * t4_os_atomic_list_del - Dequeue list element atomically from list
+ * @entry: the entry to be remove/dequeued from the list.
+ * @lock: the spinlock
+ */
+static inline void t4_os_atomic_list_del(struct mbox_entry *entry,
+ struct mbox_list *head,
+ rte_spinlock_t *lock)
+{
+ t4_os_lock(lock);
+ TAILQ_REMOVE(head, entry, next);
+ t4_os_unlock(lock);
+}
+
+/**
+ * t4_init_completion - initialize completion
+ * @c: the completion context
+ */
+static inline void t4_init_completion(struct t4_completion *c)
+{
+ c->done = 0;
+ t4_os_lock_init(&c->lock);
+}
+
+/**
+ * t4_complete - set completion as done
+ * @c: the completion context
+ */
+static inline void t4_complete(struct t4_completion *c)
+{
+ t4_os_lock(&c->lock);
+ c->done = 1;
+ t4_os_unlock(&c->lock);
+}
+
+/**
+ * cxgbe_port_viid - get the VI id of a port
+ * @dev: the device for the port
+ *
+ * Return the VI id of the given port.
+ */
+static inline unsigned int cxgbe_port_viid(const struct rte_eth_dev *dev)
+{
+ return ethdev2pinfo(dev)->viid;
+}
+
+void *t4_alloc_mem(size_t size);
+void t4_free_mem(void *addr);
+#define t4_os_alloc(_size) t4_alloc_mem((_size))
+#define t4_os_free(_ptr) t4_free_mem((_ptr))
+
+void t4_os_portmod_changed(const struct adapter *adap, int port_id);
+void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
+
+void reclaim_completed_tx(struct sge_txq *q);
+void t4_free_sge_resources(struct adapter *adap);
+void t4_sge_tx_monitor_start(struct adapter *adap);
+void t4_sge_tx_monitor_stop(struct adapter *adap);
+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ uint16_t nb_pkts);
+int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf);
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl);
+int t4_sge_init(struct adapter *adap);
+int t4vf_sge_init(struct adapter *adap);
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id);
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id);
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq,
+ struct rte_eth_dev *eth_dev, int intr_idx,
+ struct sge_fl *fl, rspq_handler_t handler,
+ int cong, struct rte_mempool *mp, int queue_id,
+ int socket_id);
+int t4_sge_eth_txq_start(struct sge_eth_txq *txq);
+int t4_sge_eth_txq_stop(struct sge_eth_txq *txq);
+void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq);
+int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq);
+int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq);
+void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq);
+void t4_sge_eth_clear_queues(struct port_info *pi);
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt);
+int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
+ unsigned int budget, unsigned int *work_done);
+int cxgbe_write_rss(const struct port_info *pi, const u16 *queues);
+int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t flags);
+
+#endif /* __T4_ADAPTER_H__ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/common.h b/src/spdk/dpdk/drivers/net/cxgbe/base/common.h
new file mode 100644
index 00000000..157201da
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/common.h
@@ -0,0 +1,541 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __CHELSIO_COMMON_H
+#define __CHELSIO_COMMON_H
+
+#include "cxgbe_compat.h"
+#include "t4_hw.h"
+#include "t4vf_hw.h"
+#include "t4_chip_type.h"
+#include "t4fw_interface.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CXGBE_PAGE_SIZE RTE_PGSIZE_4K
+
+#define T4_MEMORY_WRITE 0
+#define T4_MEMORY_READ 1
+
+enum {
+ MAX_NPORTS = 4, /* max # of ports */
+};
+
+enum {
+ T5_REGMAP_SIZE = (332 * 1024),
+};
+
+enum {
+ MEMWIN0_APERTURE = 2048,
+ MEMWIN0_BASE = 0x1b800,
+};
+
+enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST };
+
+enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR };
+
+enum cc_pause {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+enum cc_fec {
+ FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */
+ FEC_RS = 1 << 1, /* Reed-Solomon */
+ FEC_BASER_RS = 1 << 2, /* BaseR/Reed-Solomon */
+};
+
+enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
+
+struct port_stats {
+ u64 tx_octets; /* total # of octets in good frames */
+ u64 tx_frames; /* all good frames */
+ u64 tx_bcast_frames; /* all broadcast frames */
+ u64 tx_mcast_frames; /* all multicast frames */
+ u64 tx_ucast_frames; /* all unicast frames */
+ u64 tx_error_frames; /* all error frames */
+
+ u64 tx_frames_64; /* # of Tx frames in a particular range */
+ u64 tx_frames_65_127;
+ u64 tx_frames_128_255;
+ u64 tx_frames_256_511;
+ u64 tx_frames_512_1023;
+ u64 tx_frames_1024_1518;
+ u64 tx_frames_1519_max;
+
+ u64 tx_drop; /* # of dropped Tx frames */
+ u64 tx_pause; /* # of transmitted pause frames */
+ u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
+ u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
+ u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
+ u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
+ u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
+ u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
+ u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
+ u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
+
+ u64 rx_octets; /* total # of octets in good frames */
+ u64 rx_frames; /* all good frames */
+ u64 rx_bcast_frames; /* all broadcast frames */
+ u64 rx_mcast_frames; /* all multicast frames */
+ u64 rx_ucast_frames; /* all unicast frames */
+ u64 rx_too_long; /* # of frames exceeding MTU */
+ u64 rx_jabber; /* # of jabber frames */
+ u64 rx_fcs_err; /* # of received frames with bad FCS */
+ u64 rx_len_err; /* # of received frames with length error */
+ u64 rx_symbol_err; /* symbol errors */
+ u64 rx_runt; /* # of short frames */
+
+ u64 rx_frames_64; /* # of Rx frames in a particular range */
+ u64 rx_frames_65_127;
+ u64 rx_frames_128_255;
+ u64 rx_frames_256_511;
+ u64 rx_frames_512_1023;
+ u64 rx_frames_1024_1518;
+ u64 rx_frames_1519_max;
+
+ u64 rx_pause; /* # of received pause frames */
+ u64 rx_ppp0; /* # of received PPP prio 0 frames */
+ u64 rx_ppp1; /* # of received PPP prio 1 frames */
+ u64 rx_ppp2; /* # of received PPP prio 2 frames */
+ u64 rx_ppp3; /* # of received PPP prio 3 frames */
+ u64 rx_ppp4; /* # of received PPP prio 4 frames */
+ u64 rx_ppp5; /* # of received PPP prio 5 frames */
+ u64 rx_ppp6; /* # of received PPP prio 6 frames */
+ u64 rx_ppp7; /* # of received PPP prio 7 frames */
+
+ u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
+ u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
+ u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
+ u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
+ u64 rx_trunc0; /* buffer-group 0 truncated packets */
+ u64 rx_trunc1; /* buffer-group 1 truncated packets */
+ u64 rx_trunc2; /* buffer-group 2 truncated packets */
+ u64 rx_trunc3; /* buffer-group 3 truncated packets */
+};
+
+struct sge_params {
+ u32 hps; /* host page size for our PF/VF */
+ u32 eq_qpp; /* egress queues/page for our PF/VF */
+ u32 iq_qpp; /* egress queues/page for our PF/VF */
+};
+
+struct tp_params {
+ unsigned int ntxchan; /* # of Tx channels */
+ unsigned int tre; /* log2 of core clocks per TP tick */
+ unsigned int dack_re; /* DACK timer resolution */
+ unsigned int la_mask; /* what events are recorded by TP LA */
+ unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
+
+ u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
+ u32 ingress_config; /* cached TP_INGRESS_CONFIG */
+
+ /* cached TP_OUT_CONFIG compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ int rx_pkt_encap;
+
+ /*
+ * TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
+ * subset of the set of fields which may be present in the Compressed
+ * Filter Tuple portion of filters and TCP TCB connections. The
+ * fields which are present are controlled by the TP_VLAN_PRI_MAP.
+ * Since a variable number of fields may or may not be present, their
+ * shifted field positions within the Compressed Filter Tuple may
+ * vary, or not even be present if the field isn't selected in
+ * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
+ * places we store their offsets here, or a -1 if the field isn't
+ * present.
+ */
+ int vlan_shift;
+ int vnic_shift;
+ int port_shift;
+ int protocol_shift;
+ int ethertype_shift;
+
+ u64 hash_filter_mask;
+};
+
+struct vpd_params {
+ unsigned int cclk;
+};
+
+struct pci_params {
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint32_t vpd_cap_addr;
+ uint16_t speed;
+ uint8_t width;
+};
+
+/*
+ * Firmware device log.
+ */
+struct devlog_params {
+ u32 memtype; /* which memory (EDC0, EDC1, MC) */
+ u32 start; /* start of log in firmware memory */
+ u32 size; /* size of log */
+};
+
+struct arch_specific_params {
+ u8 nchan;
+ u16 mps_rplc_size;
+ u16 vfcount;
+ u32 sge_fl_db;
+ u16 mps_tcam_size;
+};
+
+/*
+ * Global Receive Side Scaling (RSS) parameters in host-native format.
+ */
+struct rss_params {
+ unsigned int mode; /* RSS mode */
+ union {
+ struct {
+ uint synmapen:1; /* SYN Map Enable */
+ uint syn4tupenipv6:1; /* en 4-tuple IPv6 SYNs hash */
+ uint syn2tupenipv6:1; /* en 2-tuple IPv6 SYNs hash */
+ uint syn4tupenipv4:1; /* en 4-tuple IPv4 SYNs hash */
+ uint syn2tupenipv4:1; /* en 2-tuple IPv4 SYNs hash */
+ uint ofdmapen:1; /* Offload Map Enable */
+ uint tnlmapen:1; /* Tunnel Map Enable */
+ uint tnlalllookup:1; /* Tunnel All Lookup */
+ uint hashtoeplitz:1; /* use Toeplitz hash */
+ } basicvirtual;
+ } u;
+};
+
+/*
+ * Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int neq; /* N egress Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+};
+
+/*
+ * Maximum resources provisioned for a PCI VF.
+ */
+struct vf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
+struct adapter_params {
+ struct sge_params sge;
+ struct tp_params tp;
+ struct vpd_params vpd;
+ struct pci_params pci;
+ struct devlog_params devlog;
+ struct rss_params rss;
+ struct pf_resources pfres;
+ struct vf_resources vfres;
+ enum pcie_memwin drv_memwin;
+
+ unsigned int sf_size; /* serial flash size in bytes */
+ unsigned int sf_nsec; /* # of flash sectors */
+
+ unsigned int fw_vers;
+ unsigned int bs_vers;
+ unsigned int tp_vers;
+ unsigned int er_vers;
+
+ unsigned short mtus[NMTUS];
+ unsigned short a_wnd[NCCTRL_WIN];
+ unsigned short b_wnd[NCCTRL_WIN];
+
+ unsigned int mc_size; /* MC memory size */
+ unsigned int cim_la_size;
+
+ unsigned char nports; /* # of ethernet ports */
+ unsigned char portvec;
+
+ unsigned char hash_filter;
+
+ enum chip_type chip; /* chip code */
+ struct arch_specific_params arch; /* chip specific params */
+
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+ u8 fw_caps_support; /* 32-bit Port Capabilities */
+};
+
+/* Firmware Port Capabilities types.
+ */
+typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */
+typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */
+
+enum fw_caps {
+ FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */
+ FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */
+ FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */
+};
+
+struct link_config {
+ fw_port_cap32_t pcaps; /* link capabilities */
+ fw_port_cap32_t acaps; /* advertised capabilities */
+
+ u32 requested_speed; /* speed (Mb/s) user has requested */
+ u32 speed; /* actual link speed (Mb/s) */
+
+ enum cc_pause requested_fc; /* flow control user has requested */
+ enum cc_pause fc; /* actual link flow control */
+
+ enum cc_fec auto_fec; /* Forward Error Correction
+ * "automatic" (IEEE 802.3)
+ */
+ enum cc_fec requested_fec; /* Forward Error Correction requested */
+ enum cc_fec fec; /* Forward Error Correction actual */
+
+ unsigned char autoneg; /* autonegotiating? */
+
+ unsigned char link_ok; /* link up? */
+ unsigned char link_down_rc; /* link down reason */
+};
+
+#include "adapter.h"
+
+void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
+ u32 val);
+int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity,
+ int attempts, int delay, u32 *valp);
+
+static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay)
+{
+ return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+ delay, NULL);
+}
+
+static inline int is_pf4(struct adapter *adap)
+{
+ return adap->pf == 4;
+}
+
+#define for_each_port(adapter, iter) \
+ for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+static inline int is_hashfilter(const struct adapter *adap)
+{
+ return adap->params.hash_filter;
+}
+
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+ unsigned int mask, unsigned int val);
+void t4_intr_enable(struct adapter *adapter);
+void t4_intr_disable(struct adapter *adapter);
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
+ struct link_config *lc);
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+ const unsigned short *alpha, const unsigned short *beta);
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+ enum dev_master master, enum dev_state *state);
+int t4_fw_bye(struct adapter *adap, unsigned int mbox);
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4vf_fw_reset(struct adapter *adap);
+int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fl_pkt_align(struct adapter *adap);
+int t4vf_fl_pkt_align(struct adapter *adap, u32 sge_control, u32 sge_control2);
+int t4vf_get_vfres(struct adapter *adap);
+int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,
+ unsigned int cache_line_size,
+ enum chip_type chip_compat);
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+ unsigned int cache_line_size);
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val);
+int t4vf_query_params(struct adapter *adap, unsigned int nparams,
+ const u32 *params, u32 *vals);
+int t4vf_get_dev_params(struct adapter *adap);
+int t4vf_get_vpd_params(struct adapter *adap);
+int t4vf_get_rss_glb_config(struct adapter *adap);
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, const u32 *vals);
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int nparams, const u32 *params,
+ const u32 *val, int timeout);
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ const u32 *val);
+int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
+ unsigned int port, unsigned int pf, unsigned int vf,
+ unsigned int nmac, u8 *mac, unsigned int *rss_size,
+ unsigned int portfunc, unsigned int idstype);
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+ unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+ unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int viid);
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok);
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int idx, const u8 *addr, bool persist, bool add_smt);
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool rx_en, bool tx_en);
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+ unsigned int pf, unsigned int vf, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+{
+ return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int us_to_core_ticks(const struct adapter *adap,
+ unsigned int us)
+{
+ return (us * adap->params.vpd.cclk) / 1000;
+}
+
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+ unsigned int ticks)
+{
+ /* add Core Clock / 2 to round ticks to nearest uS */
+ return ((ticks * 1000 + adapter->params.vpd.cclk / 2) /
+ adapter->params.vpd.cclk);
+}
+
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl, bool sleep_ok, int timeout);
+int t4_wr_mbox_meat(struct adapter *adap, int mbox,
+ const void __attribute__((__may_alias__)) *cmd, int size,
+ void *rpl, bool sleep_ok);
+
+static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
+ const void *cmd, int size, void *rpl,
+ int timeout)
+{
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
+ timeout);
+}
+
+int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p);
+
+static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl)
+{
+ return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
+}
+
+static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl)
+{
+ return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
+}
+
+int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool);
+
+static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true);
+}
+
+static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
+}
+
+
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx);
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx);
+
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_pfres(struct adapter *adapter);
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented);
+int t4_flash_cfg_addr(struct adapter *adapter);
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, unsigned int pidx);
+unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx);
+const char *t4_get_port_type_description(enum fw_port_type port_type);
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4vf_get_port_stats(struct adapter *adapter, int pidx,
+ struct port_stats *p);
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset);
+void t4_clr_port_stats(struct adapter *adap, int idx);
+void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
+ fw_port_cap32_t acaps);
+void t4_reset_link_config(struct adapter *adap, int idx);
+int t4_get_version_info(struct adapter *adapter);
+void t4_dump_version_info(struct adapter *adapter);
+int t4_get_flash_params(struct adapter *adapter);
+int t4_get_chip_type(struct adapter *adap, int ver);
+int t4_prep_adapter(struct adapter *adapter);
+int t4vf_prep_adapter(struct adapter *adapter);
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
+int t4vf_port_init(struct adapter *adap);
+int t4_init_rss_mode(struct adapter *adap, int mbox);
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+ int start, int n, const u16 *rspq, unsigned int nrspq);
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq);
+int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ u64 *flags, unsigned int *defq);
+void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw);
+void t4_write_rss_key(struct adapter *adap, u32 *key, int idx);
+void t4_read_rss_key(struct adapter *adap, u32 *key);
+
+enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
+int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
+ unsigned int qtype, u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
+
+int t4_init_sge_params(struct adapter *adapter);
+int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel);
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+unsigned int t4_get_regs_len(struct adapter *adap);
+unsigned int t4vf_get_pf_from_vf(struct adapter *adap);
+void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
+int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
+int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
+int t4_seeprom_wp(struct adapter *adapter, int enable);
+int t4_memory_rw_addr(struct adapter *adap, int win,
+ u32 addr, u32 len, void *hbuf, int dir);
+int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
+ u32 len, void *hbuf, int dir);
+static inline int t4_memory_rw(struct adapter *adap, int win,
+ int mtype, u32 maddr, u32 len,
+ void *hbuf, int dir)
+{
+ return t4_memory_rw_mtype(adap, win, mtype, maddr, len, hbuf, dir);
+}
+fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
+#endif /* __CHELSIO_COMMON_H */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_chip_type.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_chip_type.h
new file mode 100644
index 00000000..c0c5d0b2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_chip_type.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4_CHIP_TYPE_H__
+#define __T4_CHIP_TYPE_H__
+
+/*
+ * All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where:
+ *
+ * V = "4" for T4; "5" for T5, etc. or
+ * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ * PP = adapter product designation
+ *
+ * We use the "version" (V) of the adpater to code the Chip Version above.
+ */
+#define CHELSIO_PCI_ID_VER(devid) ((devid) >> 12)
+#define CHELSIO_PCI_ID_FUNC(devid) (((devid) >> 8) & 0xf)
+#define CHELSIO_PCI_ID_PROD(devid) ((devid) & 0xff)
+
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
+
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A2,
+
+ T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+ T5_FIRST_REV = T5_A0,
+ T5_LAST_REV = T5_A1,
+
+ T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+ T6_FIRST_REV = T6_A0,
+ T6_LAST_REV = T6_A0,
+};
+
+static inline int is_t4(enum chip_type chip)
+{
+ return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4);
+}
+
+static inline int is_t5(enum chip_type chip)
+{
+ return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5);
+}
+
+static inline int is_t6(enum chip_type chip)
+{
+ return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6);
+}
+#endif /* __T4_CHIP_TYPE_H__ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c
new file mode 100644
index 00000000..31762c9c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c
@@ -0,0 +1,5544 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <netinet/in.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_byteorder.h>
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_regs_values.h"
+#include "t4fw_interface.h"
+
+/**
+ * t4_read_mtu_tbl - returns the values in the HW path MTU table
+ * @adap: the adapter
+ * @mtus: where to store the MTU values
+ * @mtu_log: where to store the MTU base-2 log (may be %NULL)
+ *
+ * Reads the HW path MTU table.
+ */
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
+{
+ u32 v;
+ int i;
+
+ for (i = 0; i < NMTUS; ++i) {
+ t4_write_reg(adap, A_TP_MTU_TABLE,
+ V_MTUINDEX(0xff) | V_MTUVALUE(i));
+ v = t4_read_reg(adap, A_TP_MTU_TABLE);
+ mtus[i] = G_MTUVALUE(v);
+ if (mtu_log)
+ mtu_log[i] = G_MTUWIDTH(v);
+ }
+}
+
+/**
+ * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ * @adap: the adapter
+ * @addr: the indirect TP register address
+ * @mask: specifies the field within the register to modify
+ * @val: new value for the field
+ *
+ * Sets a field of an indirect TP register to the given value.
+ */
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+ unsigned int mask, unsigned int val)
+{
+ t4_write_reg(adap, A_TP_PIO_ADDR, addr);
+ val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
+ t4_write_reg(adap, A_TP_PIO_DATA, val);
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ * t4_load_mtus - write the MTU and congestion control HW tables
+ * @adap: the adapter
+ * @mtus: the values for the MTU table
+ * @alpha: the values for the congestion control alpha parameter
+ * @beta: the values for the congestion control beta parameter
+ *
+ * Write the HW MTU table with the supplied MTUs and the high-speed
+ * congestion control table with the supplied alpha, beta, and MTUs.
+ * We write the two tables together because the additive increments
+ * depend on the MTUs.
+ */
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+ const unsigned short *alpha, const unsigned short *beta)
+{
+ static const unsigned int avg_pkts[NCCTRL_WIN] = {
+ 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+ 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+ 28672, 40960, 57344, 81920, 114688, 163840, 229376
+ };
+
+ unsigned int i, w;
+
+ for (i = 0; i < NMTUS; ++i) {
+ unsigned int mtu = mtus[i];
+ unsigned int log2 = cxgbe_fls(mtu);
+
+ if (!(mtu & ((1 << log2) >> 2))) /* round */
+ log2--;
+ t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
+ V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
+
+ for (w = 0; w < NCCTRL_WIN; ++w) {
+ unsigned int inc;
+
+ inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+ CC_MIN_INCR);
+
+ t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
+ (w << 16) | (beta[w] << 13) | inc);
+ }
+ }
+}
+
+/**
+ * t4_wait_op_done_val - wait until an operation is completed
+ * @adapter: the adapter performing the operation
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp)
+{
+ while (1) {
+ u32 val = t4_read_reg(adapter, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+/**
+ * t4_set_reg_field - set a register field to a value
+ * @adapter: the adapter to program
+ * @addr: the register address
+ * @mask: specifies the portion of the register to modify
+ * @val: the new value for the register field
+ *
+ * Sets a register field specified by the supplied mask to the
+ * given value.
+ */
+void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
+ u32 val)
+{
+ u32 v = t4_read_reg(adapter, addr) & ~mask;
+
+ t4_write_reg(adapter, addr, v | val);
+ (void)t4_read_reg(adapter, addr); /* flush */
+}
+
+/**
+ * t4_read_indirect - read indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect address
+ * @data_reg: register holding the value of the indirect register
+ * @vals: where the read register values are stored
+ * @nregs: how many indirect registers to read
+ * @start_idx: index of first indirect register to read
+ *
+ * Reads registers that are accessed indirectly through an address/data
+ * register pair.
+ */
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx)
+{
+ while (nregs--) {
+ t4_write_reg(adap, addr_reg, start_idx);
+ *vals++ = t4_read_reg(adap, data_reg);
+ start_idx++;
+ }
+}
+
+/**
+ * t4_write_indirect - write indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect addresses
+ * @data_reg: register holding the value for the indirect registers
+ * @vals: values to write
+ * @nregs: how many indirect registers to write
+ * @start_idx: address of first indirect register to write
+ *
+ * Writes a sequential block of registers that are accessed indirectly
+ * through an address/data register pair.
+ */
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
+{
+ while (nregs--) {
+ t4_write_reg(adap, addr_reg, start_idx++);
+ t4_write_reg(adap, data_reg, *vals++);
+ }
+}
+
+/**
+ * t4_report_fw_error - report firmware error
+ * @adap: the adapter
+ *
+ * The adapter firmware can indicate error conditions to the host.
+ * If the firmware has indicated an error, print out the reason for
+ * the firmware error.
+ */
+static void t4_report_fw_error(struct adapter *adap)
+{
+ static const char * const reason[] = {
+ "Crash", /* PCIE_FW_EVAL_CRASH */
+ "During Device Preparation", /* PCIE_FW_EVAL_PREP */
+ "During Device Configuration", /* PCIE_FW_EVAL_CONF */
+ "During Device Initialization", /* PCIE_FW_EVAL_INIT */
+ "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
+ "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
+ "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
+ "Reserved", /* reserved */
+ };
+ u32 pcie_fw;
+
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ if (pcie_fw & F_PCIE_FW_ERR)
+ pr_err("%s: Firmware reports adapter error: %s\n",
+ __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+ u32 mbox_addr)
+{
+ for ( ; nflit; nflit--, mbox_addr += 8)
+ *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr));
+}
+
+/*
+ * Handle a FW assertion reported in a mailbox.
+ */
+static void fw_asrt(struct adapter *adap, u32 mbox_addr)
+{
+ struct fw_debug_cmd asrt;
+
+ get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
+ pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+ asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
+ be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
+}
+
+#define X_CIM_PF_NOACCESS 0xeeeeeeee
+
+/*
+ * If the Host OS Driver needs locking arround accesses to the mailbox, this
+ * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
+ */
+/* makes single-statement usage a bit cleaner ... */
+#ifdef T4_OS_NEEDS_MBOX_LOCKING
+#define T4_OS_MBOX_LOCKING(x) x
+#else
+#define T4_OS_MBOX_LOCKING(x) do {} while (0)
+#endif
+
+/**
+ * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
+ * @adap: the adapter
+ * @mbox: index of the mailbox to use
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
+ * (negative implies @sleep_ok=false)
+ *
+ * Sends the given command to FW through the selected mailbox and waits
+ * for the FW to execute the command. If @rpl is not %NULL it is used to
+ * store the FW's reply to the command. The command and its optional
+ * reply are of the same length. Some FW commands like RESET and
+ * INITIALIZE can take a considerable amount of time to execute.
+ * @sleep_ok determines whether we may sleep while awaiting the response.
+ * If sleeping is allowed we use progressive backoff otherwise we spin.
+ * Note that passing in a negative @timeout is an alternate mechanism
+ * for specifying @sleep_ok=false. This is useful when a higher level
+ * interface allows for specification of @timeout but not @sleep_ok ...
+ *
+ * Returns 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
+ const void __attribute__((__may_alias__)) *cmd,
+ int size, void *rpl, bool sleep_ok, int timeout)
+{
+ /*
+ * We delay in small increments at first in an effort to maintain
+ * responsiveness for simple, fast executing commands but then back
+ * off to larger delays to a maximum retry delay.
+ */
+ static const int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100
+ };
+
+ u32 v;
+ u64 res;
+ int i, ms;
+ unsigned int delay_idx;
+ __be64 *temp = (__be64 *)malloc(size * sizeof(char));
+ __be64 *p = temp;
+ u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
+ u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
+ u32 ctl;
+ struct mbox_entry entry;
+ u32 pcie_fw = 0;
+
+ if (!temp)
+ return -ENOMEM;
+
+ if ((size & 15) || size > MBOX_LEN) {
+ free(temp);
+ return -EINVAL;
+ }
+
+ bzero(p, size);
+ memcpy(p, (const __be64 *)cmd, size);
+
+ /*
+ * If we have a negative timeout, that implies that we can't sleep.
+ */
+ if (timeout < 0) {
+ sleep_ok = false;
+ timeout = -timeout;
+ }
+
+#ifdef T4_OS_NEEDS_MBOX_LOCKING
+ /*
+ * Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /*
+ * If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rarely
+ * contend on access to the mailbox ... Also check for a
+ * firmware error which we'll report as a device error.
+ */
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
+ t4_os_atomic_list_del(&entry, &adap->mbox_list,
+ &adap->mbox_lock);
+ t4_report_fw_error(adap);
+ free(temp);
+ return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
+ }
+
+ /*
+ * If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
+ break;
+
+ /*
+ * Delay for a bit before checking again ...
+ */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+ }
+#endif /* T4_OS_NEEDS_MBOX_LOCKING */
+
+ /*
+ * Attempt to gain access to the mailbox.
+ */
+ for (i = 0; i < 4; i++) {
+ ctl = t4_read_reg(adap, ctl_reg);
+ v = G_MBOWNER(ctl);
+ if (v != X_MBOWNER_NONE)
+ break;
+ }
+
+ /*
+ * If we were unable to gain access, dequeue ourselves from the
+ * mailbox atomic access list and report the error to our caller.
+ */
+ if (v != X_MBOWNER_PL) {
+ T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
+ &adap->mbox_list,
+ &adap->mbox_lock));
+ t4_report_fw_error(adap);
+ free(temp);
+ return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
+ }
+
+ /*
+ * If we gain ownership of the mailbox and there's a "valid" message
+ * in it, this is likely an asynchronous error message from the
+ * firmware. So we'll report that and then proceed on with attempting
+ * to issue our own command ... which may well fail if the error
+ * presaged the firmware crashing ...
+ */
+ if (ctl & F_MBMSGVALID) {
+ dev_err(adap, "found VALID command in mbox %u: "
+ "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
+ (unsigned long long)t4_read_reg64(adap, data_reg),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+ }
+
+ /*
+ * Copy in the new mailbox command and send it on its way ...
+ */
+ for (i = 0; i < size; i += 8, p++)
+ t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
+
+ CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
+ "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
+ (unsigned long long)t4_read_reg64(adap, data_reg),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+
+ t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
+ t4_read_reg(adap, ctl_reg); /* flush write */
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ /*
+ * Loop waiting for the reply; bail out if we time out or the firmware
+ * reports an error.
+ */
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ msleep(ms);
+ }
+
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ v = t4_read_reg(adap, ctl_reg);
+ if (v == X_CIM_PF_NOACCESS)
+ continue;
+ if (G_MBOWNER(v) == X_MBOWNER_PL) {
+ if (!(v & F_MBMSGVALID)) {
+ t4_write_reg(adap, ctl_reg,
+ V_MBOWNER(X_MBOWNER_NONE));
+ continue;
+ }
+
+ CXGBE_DEBUG_MBOX(adap,
+ "%s: mbox %u: %016llx %016llx %016llx %016llx "
+ "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
+ (unsigned long long)t4_read_reg64(adap, data_reg),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+
+ CXGBE_DEBUG_MBOX(adap,
+ "command %#x completed in %d ms (%ssleeping)\n",
+ *(const u8 *)cmd,
+ i + ms, sleep_ok ? "" : "non-");
+
+ res = t4_read_reg64(adap, data_reg);
+ if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
+ fw_asrt(adap, data_reg);
+ res = V_FW_CMD_RETVAL(EIO);
+ } else if (rpl) {
+ get_mbox_rpl(adap, rpl, size / 8, data_reg);
+ }
+ t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
+ T4_OS_MBOX_LOCKING(
+ t4_os_atomic_list_del(&entry, &adap->mbox_list,
+ &adap->mbox_lock));
+ free(temp);
+ return -G_FW_CMD_RETVAL((int)res);
+ }
+ }
+
+ /*
+ * We timed out waiting for a reply to our mailbox command. Report
+ * the error and also check to see if the firmware reported any
+ * errors ...
+ */
+ dev_err(adap, "command %#x in mailbox %d timed out\n",
+ *(const u8 *)cmd, mbox);
+ T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
+ &adap->mbox_list,
+ &adap->mbox_lock));
+ t4_report_fw_error(adap);
+ free(temp);
+ return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
+}
+
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+ void *rpl, bool sleep_ok)
+{
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
+ FW_CMD_MAX_TIMEOUT);
+}
+
+/**
+ * t4_get_regs_len - return the size of the chips register set
+ * @adapter: the adapter
+ *
+ * Returns the size of the chip's BAR0 register space.
+ */
+unsigned int t4_get_regs_len(struct adapter *adapter)
+{
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ switch (chip_version) {
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ return T5_REGMAP_SIZE;
+ }
+
+ dev_err(adapter,
+ "Unsupported chip version %d\n", chip_version);
+ return 0;
+}
+
+/**
+ * t4_get_regs - read chip registers into provided buffer
+ * @adap: the adapter
+ * @buf: register buffer
+ * @buf_size: size (in bytes) of register buffer
+ *
+ * If the provided register buffer isn't large enough for the chip's
+ * full register range, the register dump will be truncated to the
+ * register buffer's size.
+ */
+void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
+{
+ static const unsigned int t5_reg_ranges[] = {
+ 0x1008, 0x10c0,
+ 0x10cc, 0x10f8,
+ 0x1100, 0x1100,
+ 0x110c, 0x1148,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1280, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x3028,
+ 0x3060, 0x30b0,
+ 0x30b8, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x59c8,
+ 0x59d0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a70,
+ 0x5a80, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x6000, 0x6020,
+ 0x6028, 0x6040,
+ 0x6058, 0x609c,
+ 0x60a8, 0x614c,
+ 0x7700, 0x7798,
+ 0x77c0, 0x78fc,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c54,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d80,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7edc,
+ 0x7ee8, 0x7efc,
+ 0x8dc0, 0x8de0,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e84,
+ 0x8ea0, 0x8f84,
+ 0x8fc0, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x90f8,
+ 0x9400, 0x9408,
+ 0x9410, 0x9470,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x96f4,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd004,
+ 0xd010, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0x1106c,
+ 0x11074, 0x11088,
+ 0x1109c, 0x1117c,
+ 0x11190, 0x11204,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x19290,
+ 0x193f8, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c08,
+ 0x19c10, 0x19c60,
+ 0x19c94, 0x19ce4,
+ 0x19cf0, 0x19d40,
+ 0x19d50, 0x19d94,
+ 0x19da0, 0x19de8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e90,
+ 0x19ea0, 0x19f24,
+ 0x19f34, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fb4,
+ 0x19fc4, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f8,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30030,
+ 0x30038, 0x30038,
+ 0x30040, 0x30040,
+ 0x30100, 0x30144,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301d0,
+ 0x30200, 0x30318,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x30828,
+ 0x30834, 0x30834,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309ac,
+ 0x30a00, 0x30a14,
+ 0x30a1c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30a74,
+ 0x30a7c, 0x30afc,
+ 0x30b08, 0x30c24,
+ 0x30d00, 0x30d00,
+ 0x30d08, 0x30d14,
+ 0x30d1c, 0x30d20,
+ 0x30d3c, 0x30d3c,
+ 0x30d48, 0x30d50,
+ 0x31200, 0x3120c,
+ 0x31220, 0x31220,
+ 0x31240, 0x31240,
+ 0x31600, 0x3160c,
+ 0x31a00, 0x31a1c,
+ 0x31e00, 0x31e20,
+ 0x31e38, 0x31e3c,
+ 0x31e80, 0x31e80,
+ 0x31e88, 0x31ea8,
+ 0x31eb0, 0x31eb4,
+ 0x31ec8, 0x31ed4,
+ 0x31fb8, 0x32004,
+ 0x32200, 0x32200,
+ 0x32208, 0x32240,
+ 0x32248, 0x32280,
+ 0x32288, 0x322c0,
+ 0x322c8, 0x322fc,
+ 0x32600, 0x32630,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b10,
+ 0x32b20, 0x32b30,
+ 0x32b40, 0x32b50,
+ 0x32b60, 0x32b70,
+ 0x33000, 0x33028,
+ 0x33030, 0x33048,
+ 0x33060, 0x33068,
+ 0x33070, 0x3309c,
+ 0x330f0, 0x33128,
+ 0x33130, 0x33148,
+ 0x33160, 0x33168,
+ 0x33170, 0x3319c,
+ 0x331f0, 0x33238,
+ 0x33240, 0x33240,
+ 0x33248, 0x33250,
+ 0x3325c, 0x33264,
+ 0x33270, 0x332b8,
+ 0x332c0, 0x332e4,
+ 0x332f8, 0x33338,
+ 0x33340, 0x33340,
+ 0x33348, 0x33350,
+ 0x3335c, 0x33364,
+ 0x33370, 0x333b8,
+ 0x333c0, 0x333e4,
+ 0x333f8, 0x33428,
+ 0x33430, 0x33448,
+ 0x33460, 0x33468,
+ 0x33470, 0x3349c,
+ 0x334f0, 0x33528,
+ 0x33530, 0x33548,
+ 0x33560, 0x33568,
+ 0x33570, 0x3359c,
+ 0x335f0, 0x33638,
+ 0x33640, 0x33640,
+ 0x33648, 0x33650,
+ 0x3365c, 0x33664,
+ 0x33670, 0x336b8,
+ 0x336c0, 0x336e4,
+ 0x336f8, 0x33738,
+ 0x33740, 0x33740,
+ 0x33748, 0x33750,
+ 0x3375c, 0x33764,
+ 0x33770, 0x337b8,
+ 0x337c0, 0x337e4,
+ 0x337f8, 0x337fc,
+ 0x33814, 0x33814,
+ 0x3382c, 0x3382c,
+ 0x33880, 0x3388c,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x33928,
+ 0x33930, 0x33948,
+ 0x33960, 0x33968,
+ 0x33970, 0x3399c,
+ 0x339f0, 0x33a38,
+ 0x33a40, 0x33a40,
+ 0x33a48, 0x33a50,
+ 0x33a5c, 0x33a64,
+ 0x33a70, 0x33ab8,
+ 0x33ac0, 0x33ae4,
+ 0x33af8, 0x33b10,
+ 0x33b28, 0x33b28,
+ 0x33b3c, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c28, 0x33c28,
+ 0x33c3c, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34030,
+ 0x34038, 0x34038,
+ 0x34040, 0x34040,
+ 0x34100, 0x34144,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341d0,
+ 0x34200, 0x34318,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x34828,
+ 0x34834, 0x34834,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349ac,
+ 0x34a00, 0x34a14,
+ 0x34a1c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34a74,
+ 0x34a7c, 0x34afc,
+ 0x34b08, 0x34c24,
+ 0x34d00, 0x34d00,
+ 0x34d08, 0x34d14,
+ 0x34d1c, 0x34d20,
+ 0x34d3c, 0x34d3c,
+ 0x34d48, 0x34d50,
+ 0x35200, 0x3520c,
+ 0x35220, 0x35220,
+ 0x35240, 0x35240,
+ 0x35600, 0x3560c,
+ 0x35a00, 0x35a1c,
+ 0x35e00, 0x35e20,
+ 0x35e38, 0x35e3c,
+ 0x35e80, 0x35e80,
+ 0x35e88, 0x35ea8,
+ 0x35eb0, 0x35eb4,
+ 0x35ec8, 0x35ed4,
+ 0x35fb8, 0x36004,
+ 0x36200, 0x36200,
+ 0x36208, 0x36240,
+ 0x36248, 0x36280,
+ 0x36288, 0x362c0,
+ 0x362c8, 0x362fc,
+ 0x36600, 0x36630,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b10,
+ 0x36b20, 0x36b30,
+ 0x36b40, 0x36b50,
+ 0x36b60, 0x36b70,
+ 0x37000, 0x37028,
+ 0x37030, 0x37048,
+ 0x37060, 0x37068,
+ 0x37070, 0x3709c,
+ 0x370f0, 0x37128,
+ 0x37130, 0x37148,
+ 0x37160, 0x37168,
+ 0x37170, 0x3719c,
+ 0x371f0, 0x37238,
+ 0x37240, 0x37240,
+ 0x37248, 0x37250,
+ 0x3725c, 0x37264,
+ 0x37270, 0x372b8,
+ 0x372c0, 0x372e4,
+ 0x372f8, 0x37338,
+ 0x37340, 0x37340,
+ 0x37348, 0x37350,
+ 0x3735c, 0x37364,
+ 0x37370, 0x373b8,
+ 0x373c0, 0x373e4,
+ 0x373f8, 0x37428,
+ 0x37430, 0x37448,
+ 0x37460, 0x37468,
+ 0x37470, 0x3749c,
+ 0x374f0, 0x37528,
+ 0x37530, 0x37548,
+ 0x37560, 0x37568,
+ 0x37570, 0x3759c,
+ 0x375f0, 0x37638,
+ 0x37640, 0x37640,
+ 0x37648, 0x37650,
+ 0x3765c, 0x37664,
+ 0x37670, 0x376b8,
+ 0x376c0, 0x376e4,
+ 0x376f8, 0x37738,
+ 0x37740, 0x37740,
+ 0x37748, 0x37750,
+ 0x3775c, 0x37764,
+ 0x37770, 0x377b8,
+ 0x377c0, 0x377e4,
+ 0x377f8, 0x377fc,
+ 0x37814, 0x37814,
+ 0x3782c, 0x3782c,
+ 0x37880, 0x3788c,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x37928,
+ 0x37930, 0x37948,
+ 0x37960, 0x37968,
+ 0x37970, 0x3799c,
+ 0x379f0, 0x37a38,
+ 0x37a40, 0x37a40,
+ 0x37a48, 0x37a50,
+ 0x37a5c, 0x37a64,
+ 0x37a70, 0x37ab8,
+ 0x37ac0, 0x37ae4,
+ 0x37af8, 0x37b10,
+ 0x37b28, 0x37b28,
+ 0x37b3c, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c28, 0x37c28,
+ 0x37c3c, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x38000, 0x38030,
+ 0x38038, 0x38038,
+ 0x38040, 0x38040,
+ 0x38100, 0x38144,
+ 0x38190, 0x381a0,
+ 0x381a8, 0x381b8,
+ 0x381c4, 0x381c8,
+ 0x381d0, 0x381d0,
+ 0x38200, 0x38318,
+ 0x38400, 0x384b4,
+ 0x384c0, 0x3852c,
+ 0x38540, 0x3861c,
+ 0x38800, 0x38828,
+ 0x38834, 0x38834,
+ 0x388c0, 0x38908,
+ 0x38910, 0x389ac,
+ 0x38a00, 0x38a14,
+ 0x38a1c, 0x38a2c,
+ 0x38a44, 0x38a50,
+ 0x38a74, 0x38a74,
+ 0x38a7c, 0x38afc,
+ 0x38b08, 0x38c24,
+ 0x38d00, 0x38d00,
+ 0x38d08, 0x38d14,
+ 0x38d1c, 0x38d20,
+ 0x38d3c, 0x38d3c,
+ 0x38d48, 0x38d50,
+ 0x39200, 0x3920c,
+ 0x39220, 0x39220,
+ 0x39240, 0x39240,
+ 0x39600, 0x3960c,
+ 0x39a00, 0x39a1c,
+ 0x39e00, 0x39e20,
+ 0x39e38, 0x39e3c,
+ 0x39e80, 0x39e80,
+ 0x39e88, 0x39ea8,
+ 0x39eb0, 0x39eb4,
+ 0x39ec8, 0x39ed4,
+ 0x39fb8, 0x3a004,
+ 0x3a200, 0x3a200,
+ 0x3a208, 0x3a240,
+ 0x3a248, 0x3a280,
+ 0x3a288, 0x3a2c0,
+ 0x3a2c8, 0x3a2fc,
+ 0x3a600, 0x3a630,
+ 0x3aa00, 0x3aabc,
+ 0x3ab00, 0x3ab10,
+ 0x3ab20, 0x3ab30,
+ 0x3ab40, 0x3ab50,
+ 0x3ab60, 0x3ab70,
+ 0x3b000, 0x3b028,
+ 0x3b030, 0x3b048,
+ 0x3b060, 0x3b068,
+ 0x3b070, 0x3b09c,
+ 0x3b0f0, 0x3b128,
+ 0x3b130, 0x3b148,
+ 0x3b160, 0x3b168,
+ 0x3b170, 0x3b19c,
+ 0x3b1f0, 0x3b238,
+ 0x3b240, 0x3b240,
+ 0x3b248, 0x3b250,
+ 0x3b25c, 0x3b264,
+ 0x3b270, 0x3b2b8,
+ 0x3b2c0, 0x3b2e4,
+ 0x3b2f8, 0x3b338,
+ 0x3b340, 0x3b340,
+ 0x3b348, 0x3b350,
+ 0x3b35c, 0x3b364,
+ 0x3b370, 0x3b3b8,
+ 0x3b3c0, 0x3b3e4,
+ 0x3b3f8, 0x3b428,
+ 0x3b430, 0x3b448,
+ 0x3b460, 0x3b468,
+ 0x3b470, 0x3b49c,
+ 0x3b4f0, 0x3b528,
+ 0x3b530, 0x3b548,
+ 0x3b560, 0x3b568,
+ 0x3b570, 0x3b59c,
+ 0x3b5f0, 0x3b638,
+ 0x3b640, 0x3b640,
+ 0x3b648, 0x3b650,
+ 0x3b65c, 0x3b664,
+ 0x3b670, 0x3b6b8,
+ 0x3b6c0, 0x3b6e4,
+ 0x3b6f8, 0x3b738,
+ 0x3b740, 0x3b740,
+ 0x3b748, 0x3b750,
+ 0x3b75c, 0x3b764,
+ 0x3b770, 0x3b7b8,
+ 0x3b7c0, 0x3b7e4,
+ 0x3b7f8, 0x3b7fc,
+ 0x3b814, 0x3b814,
+ 0x3b82c, 0x3b82c,
+ 0x3b880, 0x3b88c,
+ 0x3b8e8, 0x3b8ec,
+ 0x3b900, 0x3b928,
+ 0x3b930, 0x3b948,
+ 0x3b960, 0x3b968,
+ 0x3b970, 0x3b99c,
+ 0x3b9f0, 0x3ba38,
+ 0x3ba40, 0x3ba40,
+ 0x3ba48, 0x3ba50,
+ 0x3ba5c, 0x3ba64,
+ 0x3ba70, 0x3bab8,
+ 0x3bac0, 0x3bae4,
+ 0x3baf8, 0x3bb10,
+ 0x3bb28, 0x3bb28,
+ 0x3bb3c, 0x3bb50,
+ 0x3bbf0, 0x3bc10,
+ 0x3bc28, 0x3bc28,
+ 0x3bc3c, 0x3bc50,
+ 0x3bcf0, 0x3bcfc,
+ 0x3c000, 0x3c030,
+ 0x3c038, 0x3c038,
+ 0x3c040, 0x3c040,
+ 0x3c100, 0x3c144,
+ 0x3c190, 0x3c1a0,
+ 0x3c1a8, 0x3c1b8,
+ 0x3c1c4, 0x3c1c8,
+ 0x3c1d0, 0x3c1d0,
+ 0x3c200, 0x3c318,
+ 0x3c400, 0x3c4b4,
+ 0x3c4c0, 0x3c52c,
+ 0x3c540, 0x3c61c,
+ 0x3c800, 0x3c828,
+ 0x3c834, 0x3c834,
+ 0x3c8c0, 0x3c908,
+ 0x3c910, 0x3c9ac,
+ 0x3ca00, 0x3ca14,
+ 0x3ca1c, 0x3ca2c,
+ 0x3ca44, 0x3ca50,
+ 0x3ca74, 0x3ca74,
+ 0x3ca7c, 0x3cafc,
+ 0x3cb08, 0x3cc24,
+ 0x3cd00, 0x3cd00,
+ 0x3cd08, 0x3cd14,
+ 0x3cd1c, 0x3cd20,
+ 0x3cd3c, 0x3cd3c,
+ 0x3cd48, 0x3cd50,
+ 0x3d200, 0x3d20c,
+ 0x3d220, 0x3d220,
+ 0x3d240, 0x3d240,
+ 0x3d600, 0x3d60c,
+ 0x3da00, 0x3da1c,
+ 0x3de00, 0x3de20,
+ 0x3de38, 0x3de3c,
+ 0x3de80, 0x3de80,
+ 0x3de88, 0x3dea8,
+ 0x3deb0, 0x3deb4,
+ 0x3dec8, 0x3ded4,
+ 0x3dfb8, 0x3e004,
+ 0x3e200, 0x3e200,
+ 0x3e208, 0x3e240,
+ 0x3e248, 0x3e280,
+ 0x3e288, 0x3e2c0,
+ 0x3e2c8, 0x3e2fc,
+ 0x3e600, 0x3e630,
+ 0x3ea00, 0x3eabc,
+ 0x3eb00, 0x3eb10,
+ 0x3eb20, 0x3eb30,
+ 0x3eb40, 0x3eb50,
+ 0x3eb60, 0x3eb70,
+ 0x3f000, 0x3f028,
+ 0x3f030, 0x3f048,
+ 0x3f060, 0x3f068,
+ 0x3f070, 0x3f09c,
+ 0x3f0f0, 0x3f128,
+ 0x3f130, 0x3f148,
+ 0x3f160, 0x3f168,
+ 0x3f170, 0x3f19c,
+ 0x3f1f0, 0x3f238,
+ 0x3f240, 0x3f240,
+ 0x3f248, 0x3f250,
+ 0x3f25c, 0x3f264,
+ 0x3f270, 0x3f2b8,
+ 0x3f2c0, 0x3f2e4,
+ 0x3f2f8, 0x3f338,
+ 0x3f340, 0x3f340,
+ 0x3f348, 0x3f350,
+ 0x3f35c, 0x3f364,
+ 0x3f370, 0x3f3b8,
+ 0x3f3c0, 0x3f3e4,
+ 0x3f3f8, 0x3f428,
+ 0x3f430, 0x3f448,
+ 0x3f460, 0x3f468,
+ 0x3f470, 0x3f49c,
+ 0x3f4f0, 0x3f528,
+ 0x3f530, 0x3f548,
+ 0x3f560, 0x3f568,
+ 0x3f570, 0x3f59c,
+ 0x3f5f0, 0x3f638,
+ 0x3f640, 0x3f640,
+ 0x3f648, 0x3f650,
+ 0x3f65c, 0x3f664,
+ 0x3f670, 0x3f6b8,
+ 0x3f6c0, 0x3f6e4,
+ 0x3f6f8, 0x3f738,
+ 0x3f740, 0x3f740,
+ 0x3f748, 0x3f750,
+ 0x3f75c, 0x3f764,
+ 0x3f770, 0x3f7b8,
+ 0x3f7c0, 0x3f7e4,
+ 0x3f7f8, 0x3f7fc,
+ 0x3f814, 0x3f814,
+ 0x3f82c, 0x3f82c,
+ 0x3f880, 0x3f88c,
+ 0x3f8e8, 0x3f8ec,
+ 0x3f900, 0x3f928,
+ 0x3f930, 0x3f948,
+ 0x3f960, 0x3f968,
+ 0x3f970, 0x3f99c,
+ 0x3f9f0, 0x3fa38,
+ 0x3fa40, 0x3fa40,
+ 0x3fa48, 0x3fa50,
+ 0x3fa5c, 0x3fa64,
+ 0x3fa70, 0x3fab8,
+ 0x3fac0, 0x3fae4,
+ 0x3faf8, 0x3fb10,
+ 0x3fb28, 0x3fb28,
+ 0x3fb3c, 0x3fb50,
+ 0x3fbf0, 0x3fc10,
+ 0x3fc28, 0x3fc28,
+ 0x3fc3c, 0x3fc50,
+ 0x3fcf0, 0x3fcfc,
+ 0x40000, 0x4000c,
+ 0x40040, 0x40050,
+ 0x40060, 0x40068,
+ 0x4007c, 0x4008c,
+ 0x40094, 0x400b0,
+ 0x400c0, 0x40144,
+ 0x40180, 0x4018c,
+ 0x40200, 0x40254,
+ 0x40260, 0x40264,
+ 0x40270, 0x40288,
+ 0x40290, 0x40298,
+ 0x402ac, 0x402c8,
+ 0x402d0, 0x402e0,
+ 0x402f0, 0x402f0,
+ 0x40300, 0x4033c,
+ 0x403f8, 0x403fc,
+ 0x41304, 0x413c4,
+ 0x41400, 0x4140c,
+ 0x41414, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x44054,
+ 0x4405c, 0x44078,
+ 0x440c0, 0x44174,
+ 0x44180, 0x441ac,
+ 0x441b4, 0x441b8,
+ 0x441c0, 0x44254,
+ 0x4425c, 0x44278,
+ 0x442c0, 0x44374,
+ 0x44380, 0x443ac,
+ 0x443b4, 0x443b8,
+ 0x443c0, 0x44454,
+ 0x4445c, 0x44478,
+ 0x444c0, 0x44574,
+ 0x44580, 0x445ac,
+ 0x445b4, 0x445b8,
+ 0x445c0, 0x44654,
+ 0x4465c, 0x44678,
+ 0x446c0, 0x44774,
+ 0x44780, 0x447ac,
+ 0x447b4, 0x447b8,
+ 0x447c0, 0x44854,
+ 0x4485c, 0x44878,
+ 0x448c0, 0x44974,
+ 0x44980, 0x449ac,
+ 0x449b4, 0x449b8,
+ 0x449c0, 0x449fc,
+ 0x45000, 0x45004,
+ 0x45010, 0x45030,
+ 0x45040, 0x45060,
+ 0x45068, 0x45068,
+ 0x45080, 0x45084,
+ 0x450a0, 0x450b0,
+ 0x45200, 0x45204,
+ 0x45210, 0x45230,
+ 0x45240, 0x45260,
+ 0x45268, 0x45268,
+ 0x45280, 0x45284,
+ 0x452a0, 0x452b0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4703c,
+ 0x47044, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47408,
+ 0x47414, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x47814,
+ 0x48000, 0x4800c,
+ 0x48040, 0x48050,
+ 0x48060, 0x48068,
+ 0x4807c, 0x4808c,
+ 0x48094, 0x480b0,
+ 0x480c0, 0x48144,
+ 0x48180, 0x4818c,
+ 0x48200, 0x48254,
+ 0x48260, 0x48264,
+ 0x48270, 0x48288,
+ 0x48290, 0x48298,
+ 0x482ac, 0x482c8,
+ 0x482d0, 0x482e0,
+ 0x482f0, 0x482f0,
+ 0x48300, 0x4833c,
+ 0x483f8, 0x483fc,
+ 0x49304, 0x493c4,
+ 0x49400, 0x4940c,
+ 0x49414, 0x4941c,
+ 0x49480, 0x494d0,
+ 0x4c000, 0x4c054,
+ 0x4c05c, 0x4c078,
+ 0x4c0c0, 0x4c174,
+ 0x4c180, 0x4c1ac,
+ 0x4c1b4, 0x4c1b8,
+ 0x4c1c0, 0x4c254,
+ 0x4c25c, 0x4c278,
+ 0x4c2c0, 0x4c374,
+ 0x4c380, 0x4c3ac,
+ 0x4c3b4, 0x4c3b8,
+ 0x4c3c0, 0x4c454,
+ 0x4c45c, 0x4c478,
+ 0x4c4c0, 0x4c574,
+ 0x4c580, 0x4c5ac,
+ 0x4c5b4, 0x4c5b8,
+ 0x4c5c0, 0x4c654,
+ 0x4c65c, 0x4c678,
+ 0x4c6c0, 0x4c774,
+ 0x4c780, 0x4c7ac,
+ 0x4c7b4, 0x4c7b8,
+ 0x4c7c0, 0x4c854,
+ 0x4c85c, 0x4c878,
+ 0x4c8c0, 0x4c974,
+ 0x4c980, 0x4c9ac,
+ 0x4c9b4, 0x4c9b8,
+ 0x4c9c0, 0x4c9fc,
+ 0x4d000, 0x4d004,
+ 0x4d010, 0x4d030,
+ 0x4d040, 0x4d060,
+ 0x4d068, 0x4d068,
+ 0x4d080, 0x4d084,
+ 0x4d0a0, 0x4d0b0,
+ 0x4d200, 0x4d204,
+ 0x4d210, 0x4d230,
+ 0x4d240, 0x4d260,
+ 0x4d268, 0x4d268,
+ 0x4d280, 0x4d284,
+ 0x4d2a0, 0x4d2b0,
+ 0x4e0c0, 0x4e0e4,
+ 0x4f000, 0x4f03c,
+ 0x4f044, 0x4f08c,
+ 0x4f200, 0x4f250,
+ 0x4f400, 0x4f408,
+ 0x4f414, 0x4f420,
+ 0x4f600, 0x4f618,
+ 0x4f800, 0x4f814,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50400, 0x50400,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x5101c,
+ 0x51300, 0x51308,
+ };
+
+ static const unsigned int t6_reg_ranges[] = {
+ 0x1008, 0x101c,
+ 0x1024, 0x10a8,
+ 0x10b4, 0x10f8,
+ 0x1100, 0x1114,
+ 0x111c, 0x112c,
+ 0x1138, 0x113c,
+ 0x1144, 0x114c,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
+ 0x11fc, 0x1274,
+ 0x1280, 0x133c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x302c,
+ 0x3060, 0x30b0,
+ 0x30b8, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x595c,
+ 0x5980, 0x598c,
+ 0x59b0, 0x59c8,
+ 0x59d0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a6c,
+ 0x5a80, 0x5a8c,
+ 0x5a94, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x5c10, 0x5e48,
+ 0x5e50, 0x5e94,
+ 0x5ea0, 0x5eb0,
+ 0x5ec0, 0x5ec0,
+ 0x5ec8, 0x5ed0,
+ 0x5ee0, 0x5ee0,
+ 0x5ef0, 0x5ef0,
+ 0x5f00, 0x5f00,
+ 0x6000, 0x6020,
+ 0x6028, 0x6040,
+ 0x6058, 0x609c,
+ 0x60a8, 0x619c,
+ 0x7700, 0x7798,
+ 0x77c0, 0x7880,
+ 0x78cc, 0x78fc,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c54,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d84,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7edc,
+ 0x7ee8, 0x7efc,
+ 0x8dc0, 0x8de4,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e84,
+ 0x8ea0, 0x8f88,
+ 0x8fb8, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x90f8,
+ 0x9100, 0x9124,
+ 0x9400, 0x9470,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x9704,
+ 0x9710, 0x971c,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xd100, 0xd118,
+ 0xd200, 0xd214,
+ 0xd220, 0xd234,
+ 0xd240, 0xd254,
+ 0xd260, 0xd274,
+ 0xd280, 0xd294,
+ 0xd2a0, 0xd2b4,
+ 0xd2c0, 0xd2d4,
+ 0xd2e0, 0xd2f4,
+ 0xd300, 0xd31c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xf008,
+ 0xf010, 0xf018,
+ 0xf020, 0xf028,
+ 0x11000, 0x11014,
+ 0x11048, 0x1106c,
+ 0x11074, 0x11088,
+ 0x11098, 0x11120,
+ 0x1112c, 0x1117c,
+ 0x11190, 0x112e0,
+ 0x11300, 0x1130c,
+ 0x12000, 0x1206c,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x19290,
+ 0x192a4, 0x192b0,
+ 0x192bc, 0x192bc,
+ 0x19348, 0x1934c,
+ 0x193f8, 0x19418,
+ 0x19420, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c48,
+ 0x19c50, 0x19c80,
+ 0x19c94, 0x19c98,
+ 0x19ca0, 0x19cbc,
+ 0x19ce4, 0x19ce4,
+ 0x19cf0, 0x19cf8,
+ 0x19d00, 0x19d28,
+ 0x19d50, 0x19d78,
+ 0x19d94, 0x19d98,
+ 0x19da0, 0x19dc8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e6c,
+ 0x19ea0, 0x19ebc,
+ 0x19ec4, 0x19ef4,
+ 0x19f04, 0x19f2c,
+ 0x19f34, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fac,
+ 0x19fc4, 0x19fc8,
+ 0x19fd0, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f8,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30030,
+ 0x30100, 0x30168,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301d0,
+ 0x30200, 0x30320,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x308a0,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309b8,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a14,
+ 0x30a1c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30a74,
+ 0x30a7c, 0x30afc,
+ 0x30b08, 0x30c24,
+ 0x30d00, 0x30d14,
+ 0x30d1c, 0x30d3c,
+ 0x30d44, 0x30d4c,
+ 0x30d54, 0x30d74,
+ 0x30d7c, 0x30d7c,
+ 0x30de0, 0x30de0,
+ 0x30e00, 0x30ed4,
+ 0x30f00, 0x30fa4,
+ 0x30fc0, 0x30fc4,
+ 0x31000, 0x31004,
+ 0x31080, 0x310fc,
+ 0x31208, 0x31220,
+ 0x3123c, 0x31254,
+ 0x31300, 0x31300,
+ 0x31308, 0x3131c,
+ 0x31338, 0x3133c,
+ 0x31380, 0x31380,
+ 0x31388, 0x313a8,
+ 0x313b4, 0x313b4,
+ 0x31400, 0x31420,
+ 0x31438, 0x3143c,
+ 0x31480, 0x31480,
+ 0x314a8, 0x314a8,
+ 0x314b0, 0x314b4,
+ 0x314c8, 0x314d4,
+ 0x31a40, 0x31a4c,
+ 0x31af0, 0x31b20,
+ 0x31b38, 0x31b3c,
+ 0x31b80, 0x31b80,
+ 0x31ba8, 0x31ba8,
+ 0x31bb0, 0x31bb4,
+ 0x31bc8, 0x31bd4,
+ 0x32140, 0x3218c,
+ 0x321f0, 0x321f4,
+ 0x32200, 0x32200,
+ 0x32218, 0x32218,
+ 0x32400, 0x32400,
+ 0x32408, 0x3241c,
+ 0x32618, 0x32620,
+ 0x32664, 0x32664,
+ 0x326a8, 0x326a8,
+ 0x326ec, 0x326ec,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b38,
+ 0x32b20, 0x32b38,
+ 0x32b40, 0x32b58,
+ 0x32b60, 0x32b78,
+ 0x32c00, 0x32c00,
+ 0x32c08, 0x32c3c,
+ 0x33000, 0x3302c,
+ 0x33034, 0x33050,
+ 0x33058, 0x33058,
+ 0x33060, 0x3308c,
+ 0x3309c, 0x330ac,
+ 0x330c0, 0x330c0,
+ 0x330c8, 0x330d0,
+ 0x330d8, 0x330e0,
+ 0x330ec, 0x3312c,
+ 0x33134, 0x33150,
+ 0x33158, 0x33158,
+ 0x33160, 0x3318c,
+ 0x3319c, 0x331ac,
+ 0x331c0, 0x331c0,
+ 0x331c8, 0x331d0,
+ 0x331d8, 0x331e0,
+ 0x331ec, 0x33290,
+ 0x33298, 0x332c4,
+ 0x332e4, 0x33390,
+ 0x33398, 0x333c4,
+ 0x333e4, 0x3342c,
+ 0x33434, 0x33450,
+ 0x33458, 0x33458,
+ 0x33460, 0x3348c,
+ 0x3349c, 0x334ac,
+ 0x334c0, 0x334c0,
+ 0x334c8, 0x334d0,
+ 0x334d8, 0x334e0,
+ 0x334ec, 0x3352c,
+ 0x33534, 0x33550,
+ 0x33558, 0x33558,
+ 0x33560, 0x3358c,
+ 0x3359c, 0x335ac,
+ 0x335c0, 0x335c0,
+ 0x335c8, 0x335d0,
+ 0x335d8, 0x335e0,
+ 0x335ec, 0x33690,
+ 0x33698, 0x336c4,
+ 0x336e4, 0x33790,
+ 0x33798, 0x337c4,
+ 0x337e4, 0x337fc,
+ 0x33814, 0x33814,
+ 0x33854, 0x33868,
+ 0x33880, 0x3388c,
+ 0x338c0, 0x338d0,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x3392c,
+ 0x33934, 0x33950,
+ 0x33958, 0x33958,
+ 0x33960, 0x3398c,
+ 0x3399c, 0x339ac,
+ 0x339c0, 0x339c0,
+ 0x339c8, 0x339d0,
+ 0x339d8, 0x339e0,
+ 0x339ec, 0x33a90,
+ 0x33a98, 0x33ac4,
+ 0x33ae4, 0x33b10,
+ 0x33b24, 0x33b28,
+ 0x33b38, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c24, 0x33c28,
+ 0x33c38, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34030,
+ 0x34100, 0x34168,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341d0,
+ 0x34200, 0x34320,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x348a0,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349b8,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a14,
+ 0x34a1c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34a74,
+ 0x34a7c, 0x34afc,
+ 0x34b08, 0x34c24,
+ 0x34d00, 0x34d14,
+ 0x34d1c, 0x34d3c,
+ 0x34d44, 0x34d4c,
+ 0x34d54, 0x34d74,
+ 0x34d7c, 0x34d7c,
+ 0x34de0, 0x34de0,
+ 0x34e00, 0x34ed4,
+ 0x34f00, 0x34fa4,
+ 0x34fc0, 0x34fc4,
+ 0x35000, 0x35004,
+ 0x35080, 0x350fc,
+ 0x35208, 0x35220,
+ 0x3523c, 0x35254,
+ 0x35300, 0x35300,
+ 0x35308, 0x3531c,
+ 0x35338, 0x3533c,
+ 0x35380, 0x35380,
+ 0x35388, 0x353a8,
+ 0x353b4, 0x353b4,
+ 0x35400, 0x35420,
+ 0x35438, 0x3543c,
+ 0x35480, 0x35480,
+ 0x354a8, 0x354a8,
+ 0x354b0, 0x354b4,
+ 0x354c8, 0x354d4,
+ 0x35a40, 0x35a4c,
+ 0x35af0, 0x35b20,
+ 0x35b38, 0x35b3c,
+ 0x35b80, 0x35b80,
+ 0x35ba8, 0x35ba8,
+ 0x35bb0, 0x35bb4,
+ 0x35bc8, 0x35bd4,
+ 0x36140, 0x3618c,
+ 0x361f0, 0x361f4,
+ 0x36200, 0x36200,
+ 0x36218, 0x36218,
+ 0x36400, 0x36400,
+ 0x36408, 0x3641c,
+ 0x36618, 0x36620,
+ 0x36664, 0x36664,
+ 0x366a8, 0x366a8,
+ 0x366ec, 0x366ec,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b38,
+ 0x36b20, 0x36b38,
+ 0x36b40, 0x36b58,
+ 0x36b60, 0x36b78,
+ 0x36c00, 0x36c00,
+ 0x36c08, 0x36c3c,
+ 0x37000, 0x3702c,
+ 0x37034, 0x37050,
+ 0x37058, 0x37058,
+ 0x37060, 0x3708c,
+ 0x3709c, 0x370ac,
+ 0x370c0, 0x370c0,
+ 0x370c8, 0x370d0,
+ 0x370d8, 0x370e0,
+ 0x370ec, 0x3712c,
+ 0x37134, 0x37150,
+ 0x37158, 0x37158,
+ 0x37160, 0x3718c,
+ 0x3719c, 0x371ac,
+ 0x371c0, 0x371c0,
+ 0x371c8, 0x371d0,
+ 0x371d8, 0x371e0,
+ 0x371ec, 0x37290,
+ 0x37298, 0x372c4,
+ 0x372e4, 0x37390,
+ 0x37398, 0x373c4,
+ 0x373e4, 0x3742c,
+ 0x37434, 0x37450,
+ 0x37458, 0x37458,
+ 0x37460, 0x3748c,
+ 0x3749c, 0x374ac,
+ 0x374c0, 0x374c0,
+ 0x374c8, 0x374d0,
+ 0x374d8, 0x374e0,
+ 0x374ec, 0x3752c,
+ 0x37534, 0x37550,
+ 0x37558, 0x37558,
+ 0x37560, 0x3758c,
+ 0x3759c, 0x375ac,
+ 0x375c0, 0x375c0,
+ 0x375c8, 0x375d0,
+ 0x375d8, 0x375e0,
+ 0x375ec, 0x37690,
+ 0x37698, 0x376c4,
+ 0x376e4, 0x37790,
+ 0x37798, 0x377c4,
+ 0x377e4, 0x377fc,
+ 0x37814, 0x37814,
+ 0x37854, 0x37868,
+ 0x37880, 0x3788c,
+ 0x378c0, 0x378d0,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x3792c,
+ 0x37934, 0x37950,
+ 0x37958, 0x37958,
+ 0x37960, 0x3798c,
+ 0x3799c, 0x379ac,
+ 0x379c0, 0x379c0,
+ 0x379c8, 0x379d0,
+ 0x379d8, 0x379e0,
+ 0x379ec, 0x37a90,
+ 0x37a98, 0x37ac4,
+ 0x37ae4, 0x37b10,
+ 0x37b24, 0x37b28,
+ 0x37b38, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c24, 0x37c28,
+ 0x37c38, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x40040, 0x40040,
+ 0x40080, 0x40084,
+ 0x40100, 0x40100,
+ 0x40140, 0x401bc,
+ 0x40200, 0x40214,
+ 0x40228, 0x40228,
+ 0x40240, 0x40258,
+ 0x40280, 0x40280,
+ 0x40304, 0x40304,
+ 0x40330, 0x4033c,
+ 0x41304, 0x413c8,
+ 0x413d0, 0x413dc,
+ 0x413f0, 0x413f0,
+ 0x41400, 0x4140c,
+ 0x41414, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x4407c,
+ 0x440c0, 0x441ac,
+ 0x441b4, 0x4427c,
+ 0x442c0, 0x443ac,
+ 0x443b4, 0x4447c,
+ 0x444c0, 0x445ac,
+ 0x445b4, 0x4467c,
+ 0x446c0, 0x447ac,
+ 0x447b4, 0x4487c,
+ 0x448c0, 0x449ac,
+ 0x449b4, 0x44a7c,
+ 0x44ac0, 0x44bac,
+ 0x44bb4, 0x44c7c,
+ 0x44cc0, 0x44dac,
+ 0x44db4, 0x44e7c,
+ 0x44ec0, 0x44fac,
+ 0x44fb4, 0x4507c,
+ 0x450c0, 0x451ac,
+ 0x451b4, 0x451fc,
+ 0x45800, 0x45804,
+ 0x45810, 0x45830,
+ 0x45840, 0x45860,
+ 0x45868, 0x45868,
+ 0x45880, 0x45884,
+ 0x458a0, 0x458b0,
+ 0x45a00, 0x45a04,
+ 0x45a10, 0x45a30,
+ 0x45a40, 0x45a60,
+ 0x45a68, 0x45a68,
+ 0x45a80, 0x45a84,
+ 0x45aa0, 0x45ab0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4703c,
+ 0x47044, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47408,
+ 0x47414, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x47814,
+ 0x47820, 0x4782c,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50300, 0x50384,
+ 0x50400, 0x50400,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50b00, 0x50b84,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x51020,
+ 0x51028, 0x510b0,
+ 0x51300, 0x51324,
+ };
+
+ u32 *buf_end = (u32 *)((char *)buf + buf_size);
+ const unsigned int *reg_ranges;
+ int reg_ranges_size, range;
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ /* Select the right set of register ranges to dump depending on the
+ * adapter chip type.
+ */
+ switch (chip_version) {
+ case CHELSIO_T5:
+ reg_ranges = t5_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
+ break;
+
+ case CHELSIO_T6:
+ reg_ranges = t6_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+ break;
+
+ default:
+ dev_err(adap,
+ "Unsupported chip version %d\n", chip_version);
+ return;
+ }
+
+ /* Clear the register buffer and insert the appropriate register
+ * values selected by the above register ranges.
+ */
+ memset(buf, 0, buf_size);
+ for (range = 0; range < reg_ranges_size; range += 2) {
+ unsigned int reg = reg_ranges[range];
+ unsigned int last_reg = reg_ranges[range + 1];
+ u32 *bufp = (u32 *)((char *)buf + reg);
+
+ /* Iterate across the register range filling in the register
+ * buffer but don't write past the end of the register buffer.
+ */
+ while (reg <= last_reg && bufp < buf_end) {
+ *bufp++ = t4_read_reg(adap, reg);
+ reg += sizeof(u32);
+ }
+ }
+}
+
+/* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
+#define EEPROM_DELAY 10 /* 10us per poll spin */
+#define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
+
+#define EEPROM_STAT_ADDR 0x7bfc
+
+/**
+ * Small utility function to wait till any outstanding VPD Access is complete.
+ * We have a per-adapter state variable "VPD Busy" to indicate when we have a
+ * VPD Access in flight. This allows us to handle the problem of having a
+ * previous VPD Access time out and prevent an attempt to inject a new VPD
+ * Request before any in-flight VPD request has completed.
+ */
+static int t4_seeprom_wait(struct adapter *adapter)
+{
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+ int max_poll;
+
+ /* If no VPD Access is in flight, we can just return success right
+ * away.
+ */
+ if (!adapter->vpd_busy)
+ return 0;
+
+ /* Poll the VPD Capability Address/Flag register waiting for it
+ * to indicate that the operation is complete.
+ */
+ max_poll = EEPROM_MAX_POLL;
+ do {
+ u16 val;
+
+ udelay(EEPROM_DELAY);
+ t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
+
+ /* If the operation is complete, mark the VPD as no longer
+ * busy and return success.
+ */
+ if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
+ adapter->vpd_busy = 0;
+ return 0;
+ }
+ } while (--max_poll);
+
+ /* Failure! Note that we leave the VPD Busy status set in order to
+ * avoid pushing a new VPD Access request into the VPD Capability till
+ * the current operation eventually succeeds. It's a bug to issue a
+ * new request when an existing request is in flight and will result
+ * in corrupt hardware state.
+ */
+ return -ETIMEDOUT;
+}
+
+/**
+ * t4_seeprom_read - read a serial EEPROM location
+ * @adapter: adapter to read
+ * @addr: EEPROM virtual address
+ * @data: where to store the read data
+ *
+ * Read a 32-bit word from a location in serial EEPROM using the card's PCI
+ * VPD capability. Note that this function must be called with a virtual
+ * address.
+ */
+int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
+{
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+ int ret;
+
+ /* VPD Accesses must alway be 4-byte aligned!
+ */
+ if (addr >= EEPROMVSIZE || (addr & 3))
+ return -EINVAL;
+
+ /* Wait for any previous operation which may still be in flight to
+ * complete.
+ */
+ ret = t4_seeprom_wait(adapter);
+ if (ret) {
+ dev_err(adapter, "VPD still busy from previous operation\n");
+ return ret;
+ }
+
+ /* Issue our new VPD Read request, mark the VPD as being busy and wait
+ * for our request to complete. If it doesn't complete, note the
+ * error and return it to our caller. Note that we do not reset the
+ * VPD Busy status!
+ */
+ t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
+ adapter->vpd_busy = 1;
+ adapter->vpd_flag = PCI_VPD_ADDR_F;
+ ret = t4_seeprom_wait(adapter);
+ if (ret) {
+ dev_err(adapter, "VPD read of address %#x failed\n", addr);
+ return ret;
+ }
+
+ /* Grab the returned data, swizzle it into our endianness and
+ * return success.
+ */
+ t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
+ *data = le32_to_cpu(*data);
+ return 0;
+}
+
+/**
+ * t4_seeprom_write - write a serial EEPROM location
+ * @adapter: adapter to write
+ * @addr: virtual EEPROM address
+ * @data: value to write
+ *
+ * Write a 32-bit word to a location in serial EEPROM using the card's PCI
+ * VPD capability. Note that this function must be called with a virtual
+ * address.
+ */
+int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
+{
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+ int ret;
+ u32 stats_reg = 0;
+ int max_poll;
+
+ /* VPD Accesses must alway be 4-byte aligned!
+ */
+ if (addr >= EEPROMVSIZE || (addr & 3))
+ return -EINVAL;
+
+ /* Wait for any previous operation which may still be in flight to
+ * complete.
+ */
+ ret = t4_seeprom_wait(adapter);
+ if (ret) {
+ dev_err(adapter, "VPD still busy from previous operation\n");
+ return ret;
+ }
+
+ /* Issue our new VPD Read request, mark the VPD as being busy and wait
+ * for our request to complete. If it doesn't complete, note the
+ * error and return it to our caller. Note that we do not reset the
+ * VPD Busy status!
+ */
+ t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
+ cpu_to_le32(data));
+ t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
+ (u16)addr | PCI_VPD_ADDR_F);
+ adapter->vpd_busy = 1;
+ adapter->vpd_flag = 0;
+ ret = t4_seeprom_wait(adapter);
+ if (ret) {
+ dev_err(adapter, "VPD write of address %#x failed\n", addr);
+ return ret;
+ }
+
+ /* Reset PCI_VPD_DATA register after a transaction and wait for our
+ * request to complete. If it doesn't complete, return error.
+ */
+ t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
+ max_poll = EEPROM_MAX_POLL;
+ do {
+ udelay(EEPROM_DELAY);
+ t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
+ } while ((stats_reg & 0x1) && --max_poll);
+ if (!max_poll)
+ return -ETIMEDOUT;
+
+ /* Return success! */
+ return 0;
+}
+
+/**
+ * t4_seeprom_wp - enable/disable EEPROM write protection
+ * @adapter: the adapter
+ * @enable: whether to enable or disable write protection
+ *
+ * Enables or disables write protection on the serial EEPROM.
+ */
+int t4_seeprom_wp(struct adapter *adapter, int enable)
+{
+ return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
+}
+
+/**
+ * t4_fw_tp_pio_rw - Access TP PIO through LDST
+ * @adap: the adapter
+ * @vals: where the indirect register values are stored/written
+ * @nregs: how many indirect registers to read/write
+ * @start_idx: index of first indirect register to read/write
+ * @rw: Read (1) or Write (0)
+ *
+ * Access TP PIO registers through LDST
+ */
+void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw)
+{
+ int cmd = FW_LDST_ADDRSPC_TP_PIO;
+ struct fw_ldst_cmd c;
+ unsigned int i;
+ int ret;
+
+ for (i = 0 ; i < nregs; i++) {
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+ F_FW_CMD_REQUEST |
+ (rw ? F_FW_CMD_READ :
+ F_FW_CMD_WRITE) |
+ V_FW_LDST_CMD_ADDRSPACE(cmd));
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+ c.u.addrval.addr = cpu_to_be32(start_index + i);
+ c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ if (rw)
+ vals[i] = be32_to_cpu(c.u.addrval.val);
+ }
+ }
+}
+
+/**
+ * t4_read_rss_key - read the global RSS key
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ *
+ * Reads the global 320-bit RSS key.
+ */
+void t4_read_rss_key(struct adapter *adap, u32 *key)
+{
+ t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
+}
+
+/**
+ * t4_write_rss_key - program one of the RSS keys
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ * @idx: which RSS key to write
+ *
+ * Writes one of the RSS keys with the given 320-bit value. If @idx is
+ * 0..15 the corresponding entry in the RSS key table is written,
+ * otherwise the global RSS key is written.
+ */
+void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
+{
+ u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
+ u8 rss_key_addr_cnt = 16;
+
+ /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+ * allows access to key addresses 16-63 by using KeyWrAddrX
+ * as index[5:4](upper 2) into key table
+ */
+ if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
+ (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
+ rss_key_addr_cnt = 32;
+
+ t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
+
+ if (idx >= 0 && idx < rss_key_addr_cnt) {
+ if (rss_key_addr_cnt > 16)
+ t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+ V_KEYWRADDRX(idx >> 4) |
+ V_T6_VFWRADDR(idx) | F_KEYWREN);
+ else
+ t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+ V_KEYWRADDR(idx) | F_KEYWREN);
+ }
+}
+
+/**
+ * t4_config_rss_range - configure a portion of the RSS mapping table
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: virtual interface whose RSS subtable is to be written
+ * @start: start entry in the table to write
+ * @n: how many table entries to write
+ * @rspq: values for the "response queue" (Ingress Queue) lookup table
+ * @nrspq: number of values in @rspq
+ *
+ * Programs the selected part of the VI's RSS mapping table with the
+ * provided values. If @nrspq < @n the supplied values are used repeatedly
+ * until the full table range is populated.
+ *
+ * The caller must ensure the values in @rspq are in the range allowed for
+ * @viid.
+ */
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+ int start, int n, const u16 *rspq, unsigned int nrspq)
+{
+ int ret;
+ const u16 *rsp = rspq;
+ const u16 *rsp_end = rspq + nrspq;
+ struct fw_rss_ind_tbl_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_RSS_IND_TBL_CMD_VIID(viid));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+ /*
+ * Each firmware RSS command can accommodate up to 32 RSS Ingress
+ * Queue Identifiers. These Ingress Queue IDs are packed three to
+ * a 32-bit word as 10-bit values with the upper remaining 2 bits
+ * reserved.
+ */
+ while (n > 0) {
+ int nq = min(n, 32);
+ int nq_packed = 0;
+ __be32 *qp = &cmd.iq0_to_iq2;
+
+ /*
+ * Set up the firmware RSS command header to send the next
+ * "nq" Ingress Queue IDs to the firmware.
+ */
+ cmd.niqid = cpu_to_be16(nq);
+ cmd.startidx = cpu_to_be16(start);
+
+ /*
+ * "nq" more done for the start of the next loop.
+ */
+ start += nq;
+ n -= nq;
+
+ /*
+ * While there are still Ingress Queue IDs to stuff into the
+ * current firmware RSS command, retrieve them from the
+ * Ingress Queue ID array and insert them into the command.
+ */
+ while (nq > 0) {
+ /*
+ * Grab up to the next 3 Ingress Queue IDs (wrapping
+ * around the Ingress Queue ID array if necessary) and
+ * insert them into the firmware RSS command at the
+ * current 3-tuple position within the commad.
+ */
+ u16 qbuf[3];
+ u16 *qbp = qbuf;
+ int nqbuf = min(3, nq);
+
+ nq -= nqbuf;
+ qbuf[0] = 0;
+ qbuf[1] = 0;
+ qbuf[2] = 0;
+ while (nqbuf && nq_packed < 32) {
+ nqbuf--;
+ nq_packed++;
+ *qbp++ = *rsp++;
+ if (rsp >= rsp_end)
+ rsp = rspq;
+ }
+ *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
+ V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
+ V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
+ }
+
+ /*
+ * Send this portion of the RRS table update to the firmware;
+ * bail out on any errors.
+ */
+ if (is_pf4(adapter))
+ ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd),
+ NULL);
+ else
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * t4_config_vi_rss - configure per VI RSS settings
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: the VI id
+ * @flags: RSS flags
+ * @defq: id of the default RSS queue for the VI.
+ *
+ * Configures VI-specific RSS properties.
+ */
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq)
+{
+ struct fw_rss_vi_config_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
+ V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
+ if (is_pf4(adapter))
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_read_config_vi_rss - read the configured per VI RSS settings
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: the VI id
+ * @flags: where to place the configured flags
+ * @defq: where to place the id of the default RSS queue for the VI.
+ *
+ * Read configured VI-specific RSS properties.
+ */
+int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ u64 *flags, unsigned int *defq)
+{
+ struct fw_rss_vi_config_cmd c;
+ unsigned int result;
+ int ret;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c);
+ if (!ret) {
+ result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen);
+ if (defq)
+ *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result);
+ if (flags)
+ *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ;
+ }
+
+ return ret;
+}
+
+/**
+ * init_cong_ctrl - initialize congestion control parameters
+ * @a: the alpha values for congestion control
+ * @b: the beta values for congestion control
+ *
+ * Initialize the congestion control parameters.
+ */
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+ int i;
+
+ for (i = 0; i < 9; i++) {
+ a[i] = 1;
+ b[i] = 0;
+ }
+
+ a[9] = 2;
+ a[10] = 3;
+ a[11] = 4;
+ a[12] = 5;
+ a[13] = 6;
+ a[14] = 7;
+ a[15] = 8;
+ a[16] = 9;
+ a[17] = 10;
+ a[18] = 14;
+ a[19] = 17;
+ a[20] = 21;
+ a[21] = 25;
+ a[22] = 30;
+ a[23] = 35;
+ a[24] = 45;
+ a[25] = 60;
+ a[26] = 80;
+ a[27] = 100;
+ a[28] = 200;
+ a[29] = 300;
+ a[30] = 400;
+ a[31] = 500;
+
+ b[9] = 1;
+ b[10] = 1;
+ b[11] = 2;
+ b[12] = 2;
+ b[13] = 3;
+ b[14] = 3;
+ b[15] = 3;
+ b[16] = 3;
+ b[17] = 4;
+ b[18] = 4;
+ b[19] = 4;
+ b[20] = 4;
+ b[21] = 4;
+ b[22] = 5;
+ b[23] = 5;
+ b[24] = 5;
+ b[25] = 5;
+ b[26] = 5;
+ b[27] = 5;
+ b[28] = 6;
+ b[29] = 6;
+ b[30] = 7;
+ b[31] = 7;
+}
+
+#define INIT_CMD(var, cmd, rd_wr) do { \
+ (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
+ F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
+ (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
+} while (0)
+
+int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
+{
+ u32 cclk_param, cclk_val;
+ int ret;
+
+ /*
+ * Ask firmware for the Core Clock since it knows how to translate the
+ * Reference Clock ('V2') VPD field into a Core Clock value ...
+ */
+ cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
+ 1, &cclk_param, &cclk_val);
+ if (ret) {
+ dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ p->cclk = cclk_val;
+ dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
+ return 0;
+}
+
+/**
+ * t4_get_pfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a physical
+ * function. The results are stored in @adapter->pfres.
+ */
+int t4_get_pfres(struct adapter *adapter)
+{
+ struct pf_resources *pfres = &adapter->params.pfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ u32 word;
+ int v;
+
+ /*
+ * Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PFVF_CMD_PFN(adapter->pf) |
+ V_FW_PFVF_CMD_VFN(0));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Extract PF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ pfres->neq = G_FW_PFVF_CMD_NEQ(word);
+ return 0;
+}
+
+/* serial flash and firmware constants and flash config file constants */
+enum {
+ SF_ATTEMPTS = 10, /* max retries for SF operations */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_RD_ID = 0x9f, /* read ID */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+};
+
+/**
+ * sf1_read - read data from the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ int lock, u32 *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t4_write_reg(adapter, A_SF_OP,
+ V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+ ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
+ if (!ret)
+ *valp = t4_read_reg(adapter, A_SF_DATA);
+ return ret;
+}
+
+/**
+ * sf1_write - write data to the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ int lock, u32 val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t4_write_reg(adapter, A_SF_DATA, val);
+ t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
+ V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
+ return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
+}
+
+/**
+ * t4_read_flash - read words from serial flash
+ * @adapter: the adapter
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianness.
+ */
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
+{
+ int ret;
+
+ if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
+ (addr & 3))
+ return -EINVAL;
+
+ addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
+
+ ret = sf1_write(adapter, 4, 1, 0, addr);
+ if (ret != 0)
+ return ret;
+
+ ret = sf1_read(adapter, 1, 1, 0, data);
+ if (ret != 0)
+ return ret;
+
+ for ( ; nwords; nwords--, data++) {
+ ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
+ if (nwords == 1)
+ t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = cpu_to_be32(*data);
+ }
+ return 0;
+}
+
+/**
+ * t4_get_exprom_version - return the Expansion ROM version (if any)
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the Expansion ROM header from FLASH and returns the version
+ * number (if present) through the @vers return value pointer. We return
+ * this in the Firmware Version Format since it's convenient. Return
+ * 0 on success, -ENOENT if no Expansion ROM is present.
+ */
+static int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
+{
+ struct exprom_header {
+ unsigned char hdr_arr[16]; /* must start with 0x55aa */
+ unsigned char hdr_ver[4]; /* Expansion ROM version */
+ } *hdr;
+ u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
+ sizeof(u32))];
+ int ret;
+
+ ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
+ ARRAY_SIZE(exprom_header_buf),
+ exprom_header_buf, 0);
+ if (ret)
+ return ret;
+
+ hdr = (struct exprom_header *)exprom_header_buf;
+ if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
+ return -ENOENT;
+
+ *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
+ V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
+ V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
+ V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
+ return 0;
+}
+
+/**
+ * t4_get_fw_version - read the firmware version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+static int t4_get_fw_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FW_START +
+ offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
+}
+
+/**
+ * t4_get_bs_version - read the firmware bootstrap version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW Bootstrap version from flash.
+ */
+static int t4_get_bs_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/**
+ * t4_get_tp_version - read the TP microcode version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the TP microcode version from flash.
+ */
+static int t4_get_tp_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FW_START +
+ offsetof(struct fw_hdr, tp_microcode_ver),
+ 1, vers, 0);
+}
+
+/**
+ * t4_get_version_info - extract various chip/firmware version information
+ * @adapter: the adapter
+ *
+ * Reads various chip/firmware version numbers and stores them into the
+ * adapter Adapter Parameters structure. If any of the efforts fails
+ * the first failure will be returned, but all of the version numbers
+ * will be read.
+ */
+int t4_get_version_info(struct adapter *adapter)
+{
+ int ret = 0;
+
+#define FIRST_RET(__getvinfo) \
+ do { \
+ int __ret = __getvinfo; \
+ if (__ret && !ret) \
+ ret = __ret; \
+ } while (0)
+
+ FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
+ FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
+ FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
+ FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
+
+#undef FIRST_RET
+
+ return ret;
+}
+
+/**
+ * t4_dump_version_info - dump all of the adapter configuration IDs
+ * @adapter: the adapter
+ *
+ * Dumps all of the various bits of adapter configuration version/revision
+ * IDs information. This is typically called at some point after
+ * t4_get_version_info() has been called.
+ */
+void t4_dump_version_info(struct adapter *adapter)
+{
+ /**
+ * Device information.
+ */
+ dev_info(adapter, "Chelsio rev %d\n",
+ CHELSIO_CHIP_RELEASE(adapter->params.chip));
+
+ /**
+ * Firmware Version.
+ */
+ if (!adapter->params.fw_vers)
+ dev_warn(adapter, "No firmware loaded\n");
+ else
+ dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
+
+ /**
+ * Bootstrap Firmware Version.
+ */
+ if (!adapter->params.bs_vers)
+ dev_warn(adapter, "No bootstrap loaded\n");
+ else
+ dev_info(adapter, "Bootstrap version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
+
+ /**
+ * TP Microcode Version.
+ */
+ if (!adapter->params.tp_vers)
+ dev_warn(adapter, "No TP Microcode loaded\n");
+ else
+ dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
+
+ /**
+ * Expansion ROM version.
+ */
+ if (!adapter->params.er_vers)
+ dev_info(adapter, "No Expansion ROM loaded\n");
+ else
+ dev_info(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
+}
+
+#define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \
+ FW_PORT_CAP32_ANEG)
+/**
+ * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
+ * @caps16: a 16-bit Port Capabilities value
+ *
+ * Returns the equivalent 32-bit Port Capabilities value.
+ */
+fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
+{
+ fw_port_cap32_t caps32 = 0;
+
+#define CAP16_TO_CAP32(__cap) \
+ do { \
+ if (caps16 & FW_PORT_CAP_##__cap) \
+ caps32 |= FW_PORT_CAP32_##__cap; \
+ } while (0)
+
+ CAP16_TO_CAP32(SPEED_100M);
+ CAP16_TO_CAP32(SPEED_1G);
+ CAP16_TO_CAP32(SPEED_25G);
+ CAP16_TO_CAP32(SPEED_10G);
+ CAP16_TO_CAP32(SPEED_40G);
+ CAP16_TO_CAP32(SPEED_100G);
+ CAP16_TO_CAP32(FC_RX);
+ CAP16_TO_CAP32(FC_TX);
+ CAP16_TO_CAP32(ANEG);
+ CAP16_TO_CAP32(MDIX);
+ CAP16_TO_CAP32(MDIAUTO);
+ CAP16_TO_CAP32(FEC_RS);
+ CAP16_TO_CAP32(FEC_BASER_RS);
+ CAP16_TO_CAP32(802_3_PAUSE);
+ CAP16_TO_CAP32(802_3_ASM_DIR);
+
+#undef CAP16_TO_CAP32
+
+ return caps32;
+}
+
+/**
+ * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
+ * @caps32: a 32-bit Port Capabilities value
+ *
+ * Returns the equivalent 16-bit Port Capabilities value. Note that
+ * not all 32-bit Port Capabilities can be represented in the 16-bit
+ * Port Capabilities and some fields/values may not make it.
+ */
+static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
+{
+ fw_port_cap16_t caps16 = 0;
+
+#define CAP32_TO_CAP16(__cap) \
+ do { \
+ if (caps32 & FW_PORT_CAP32_##__cap) \
+ caps16 |= FW_PORT_CAP_##__cap; \
+ } while (0)
+
+ CAP32_TO_CAP16(SPEED_100M);
+ CAP32_TO_CAP16(SPEED_1G);
+ CAP32_TO_CAP16(SPEED_10G);
+ CAP32_TO_CAP16(SPEED_25G);
+ CAP32_TO_CAP16(SPEED_40G);
+ CAP32_TO_CAP16(SPEED_100G);
+ CAP32_TO_CAP16(FC_RX);
+ CAP32_TO_CAP16(FC_TX);
+ CAP32_TO_CAP16(802_3_PAUSE);
+ CAP32_TO_CAP16(802_3_ASM_DIR);
+ CAP32_TO_CAP16(ANEG);
+ CAP32_TO_CAP16(MDIX);
+ CAP32_TO_CAP16(MDIAUTO);
+ CAP32_TO_CAP16(FEC_RS);
+ CAP32_TO_CAP16(FEC_BASER_RS);
+
+#undef CAP32_TO_CAP16
+
+ return caps16;
+}
+
+/* Translate Firmware Pause specification to Common Code */
+static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
+{
+ enum cc_pause cc_pause = 0;
+
+ if (fw_pause & FW_PORT_CAP32_FC_RX)
+ cc_pause |= PAUSE_RX;
+ if (fw_pause & FW_PORT_CAP32_FC_TX)
+ cc_pause |= PAUSE_TX;
+
+ return cc_pause;
+}
+
+/* Translate Common Code Pause Frame specification into Firmware */
+static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
+{
+ fw_port_cap32_t fw_pause = 0;
+
+ if (cc_pause & PAUSE_RX)
+ fw_pause |= FW_PORT_CAP32_FC_RX;
+ if (cc_pause & PAUSE_TX)
+ fw_pause |= FW_PORT_CAP32_FC_TX;
+
+ return fw_pause;
+}
+
+/* Translate Firmware Forward Error Correction specification to Common Code */
+static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
+{
+ enum cc_fec cc_fec = 0;
+
+ if (fw_fec & FW_PORT_CAP32_FEC_RS)
+ cc_fec |= FEC_RS;
+ if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
+ cc_fec |= FEC_BASER_RS;
+
+ return cc_fec;
+}
+
+/* Translate Common Code Forward Error Correction specification to Firmware */
+static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
+{
+ fw_port_cap32_t fw_fec = 0;
+
+ if (cc_fec & FEC_RS)
+ fw_fec |= FW_PORT_CAP32_FEC_RS;
+ if (cc_fec & FEC_BASER_RS)
+ fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
+
+ return fw_fec;
+}
+
+/**
+ * t4_link_l1cfg - apply link configuration to MAC/PHY
+ * @adapter: the adapter
+ * @mbox: the Firmware Mailbox to use
+ * @port: the Port ID
+ * @lc: the Port's Link Configuration
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
+ struct link_config *lc)
+{
+ unsigned int fw_mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
+ unsigned int fw_caps = adap->params.fw_caps_support;
+ fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
+ struct fw_port_cmd cmd;
+
+ lc->link_ok = 0;
+
+ fw_fc = cc_to_fwcap_pause(lc->requested_fc);
+
+ /* Convert Common Code Forward Error Control settings into the
+ * Firmware's API. If the current Requested FEC has "Automatic"
+ * (IEEE 802.3) specified, then we use whatever the Firmware
+ * sent us as part of it's IEEE 802.3-based interpratation of
+ * the Transceiver Module EPROM FEC parameters. Otherwise we
+ * use whatever is in the current Requested FEC settings.
+ */
+ if (lc->requested_fec & FEC_AUTO)
+ cc_fec = lc->auto_fec;
+ else
+ cc_fec = lc->requested_fec;
+ fw_fec = cc_to_fwcap_fec(cc_fec);
+
+ /* Figure out what our Requested Port Capabilities are going to be.
+ */
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
+ rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
+ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+ lc->fec = cc_fec;
+ } else if (lc->autoneg == AUTONEG_DISABLE) {
+ rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi;
+ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+ lc->fec = cc_fec;
+ } else {
+ rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
+ }
+
+ /* And send that on to the Firmware ...
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_PORT_CMD_PORTID(port));
+ cmd.action_to_len16 =
+ cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
+ FW_PORT_ACTION_L1_CFG :
+ FW_PORT_ACTION_L1_CFG32) |
+ FW_LEN16(cmd));
+
+ if (fw_caps == FW_CAPS16)
+ cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
+ else
+ cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
+
+ return t4_wr_mbox(adap, mbox, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4_flash_cfg_addr - return the address of the flash configuration file
+ * @adapter: the adapter
+ *
+ * Return the address within the flash where the Firmware Configuration
+ * File is stored, or an error if the device FLASH is too small to contain
+ * a Firmware Configuration File.
+ */
+int t4_flash_cfg_addr(struct adapter *adapter)
+{
+ /*
+ * If the device FLASH isn't large enough to hold a Firmware
+ * Configuration File, return an error.
+ */
+ if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
+ return -ENOSPC;
+
+ return FLASH_CFG_START;
+}
+
+#define PF_INTR_MASK (F_PFSW | F_PFCIM)
+
+/**
+ * t4_intr_enable - enable interrupts
+ * @adapter: the adapter whose interrupts should be enabled
+ *
+ * Enable PF-specific interrupts for the calling function and the top-level
+ * interrupt concentrator for global interrupts. Interrupts are already
+ * enabled at each module, here we just enable the roots of the interrupt
+ * hierarchies.
+ *
+ * Note: this function should be called only when the driver manages
+ * non PF-specific interrupts from the various HW modules. Only one PCI
+ * function at a time should be doing this.
+ */
+void t4_intr_enable(struct adapter *adapter)
+{
+ u32 val = 0;
+ u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
+ t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
+ F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
+ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
+ F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
+ F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
+ F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
+ F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
+ t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
+ t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
+}
+
+/**
+ * t4_intr_disable - disable interrupts
+ * @adapter: the adapter whose interrupts should be disabled
+ *
+ * Disable interrupts. We only disable the top-level interrupt
+ * concentrators. The caller must be a PCI function managing global
+ * interrupts.
+ */
+void t4_intr_disable(struct adapter *adapter)
+{
+ u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
+
+ t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
+ t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
+}
+
+/**
+ * t4_get_port_type_description - return Port Type string description
+ * @port_type: firmware Port Type enumeration
+ */
+const char *t4_get_port_type_description(enum fw_port_type port_type)
+{
+ static const char * const port_type_description[] = {
+ "Fiber_XFI",
+ "Fiber_XAUI",
+ "BT_SGMII",
+ "BT_XFI",
+ "BT_XAUI",
+ "KX4",
+ "CX4",
+ "KX",
+ "KR",
+ "SFP",
+ "BP_AP",
+ "BP4_AP",
+ "QSFP_10G",
+ "QSA",
+ "QSFP",
+ "BP40_BA",
+ "KR4_100G",
+ "CR4_QSFP",
+ "CR_QSFP",
+ "CR2_QSFP",
+ "SFP28",
+ "KR_SFP28",
+ };
+
+ if (port_type < ARRAY_SIZE(port_type_description))
+ return port_type_description[port_type];
+ return "UNKNOWN";
+}
+
+/**
+ * t4_get_mps_bg_map - return the buffer groups associated with a port
+ * @adap: the adapter
+ * @pidx: the port index
+ *
+ * Returns a bitmap indicating which MPS buffer groups are associated
+ * with the given port. Bit i is set if buffer group i is used by the
+ * port.
+ */
+unsigned int t4_get_mps_bg_map(struct adapter *adap, unsigned int pidx)
+{
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+ unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adap,
+ A_MPS_CMN_CTL));
+
+ if (pidx >= nports) {
+ dev_warn(adap, "MPS Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ switch (nports) {
+ case 1: return 0xf;
+ case 2: return 3 << (2 * pidx);
+ case 4: return 1 << pidx;
+ }
+ break;
+
+ case CHELSIO_T6:
+ switch (nports) {
+ case 2: return 1 << (2 * pidx);
+ }
+ break;
+ }
+
+ dev_err(adap, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
+ chip_version, nports);
+ return 0;
+}
+
+/**
+ * t4_get_tp_ch_map - return TP ingress channels associated with a port
+ * @adapter: the adapter
+ * @pidx: the port index
+ *
+ * Returns a bitmap indicating which TP Ingress Channels are associated with
+ * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
+ */
+unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx)
+{
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter,
+ A_MPS_CMN_CTL));
+
+ if (pidx >= nports) {
+ dev_warn(adap, "TP Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ /* Note that this happens to be the same values as the MPS
+ * Buffer Group Map for these Chips. But we replicate the code
+ * here because they're really separate concepts.
+ */
+ switch (nports) {
+ case 1: return 0xf;
+ case 2: return 3 << (2 * pidx);
+ case 4: return 1 << pidx;
+ }
+ break;
+
+ case CHELSIO_T6:
+ switch (nports) {
+ case 2: return 1 << pidx;
+ }
+ break;
+ }
+
+ dev_err(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
+ chip_version, nports);
+ return 0;
+}
+
+/**
+ * t4_get_port_stats - collect port statistics
+ * @adap: the adapter
+ * @idx: the port index
+ * @p: the stats structure to fill
+ *
+ * Collect statistics related to the given port from HW.
+ */
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
+{
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
+ u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
+
+#define GET_STAT(name) \
+ t4_read_reg64(adap, \
+ (is_t4(adap->params.chip) ? \
+ PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
+ T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
+#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+
+ p->tx_octets = GET_STAT(TX_PORT_BYTES);
+ p->tx_frames = GET_STAT(TX_PORT_FRAMES);
+ p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
+ p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
+ p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
+ p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
+ p->tx_frames_64 = GET_STAT(TX_PORT_64B);
+ p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
+ p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
+ p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
+ p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
+ p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
+ p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
+ p->tx_drop = GET_STAT(TX_PORT_DROP);
+ p->tx_pause = GET_STAT(TX_PORT_PAUSE);
+ p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
+ p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
+ p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
+ p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
+ p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
+ p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
+ p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
+ p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & F_COUNTPAUSESTATTX) {
+ p->tx_frames -= p->tx_pause;
+ p->tx_octets -= p->tx_pause * 64;
+ }
+ if (stat_ctl & F_COUNTPAUSEMCTX)
+ p->tx_mcast_frames -= p->tx_pause;
+ }
+
+ p->rx_octets = GET_STAT(RX_PORT_BYTES);
+ p->rx_frames = GET_STAT(RX_PORT_FRAMES);
+ p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
+ p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
+ p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
+ p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
+ p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
+ p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
+ p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
+ p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
+ p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
+ p->rx_frames_64 = GET_STAT(RX_PORT_64B);
+ p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
+ p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
+ p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
+ p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
+ p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
+ p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
+ p->rx_pause = GET_STAT(RX_PORT_PAUSE);
+ p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
+ p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
+ p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
+ p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
+ p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
+ p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
+ p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
+ p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & F_COUNTPAUSESTATRX) {
+ p->rx_frames -= p->rx_pause;
+ p->rx_octets -= p->rx_pause * 64;
+ }
+ if (stat_ctl & F_COUNTPAUSEMCRX)
+ p->rx_mcast_frames -= p->rx_pause;
+ }
+
+ p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
+ p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/**
+ * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
+ * @adap: The adapter
+ * @idx: The port
+ * @stats: Current stats to fill
+ * @offset: Previous stats snapshot
+ */
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset)
+{
+ u64 *s, *o;
+ unsigned int i;
+
+ t4_get_port_stats(adap, idx, stats);
+ for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
+ i < (sizeof(struct port_stats) / sizeof(u64));
+ i++, s++, o++)
+ *s -= *o;
+}
+
+/**
+ * t4_clr_port_stats - clear port statistics
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Clear HW statistics for the given port.
+ */
+void t4_clr_port_stats(struct adapter *adap, int idx)
+{
+ unsigned int i;
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
+ u32 port_base_addr;
+
+ if (is_t4(adap->params.chip))
+ port_base_addr = PORT_BASE(idx);
+ else
+ port_base_addr = T5_PORT_BASE(idx);
+
+ for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ for (i = 0; i < 4; i++)
+ if (bgmap & (1 << i)) {
+ t4_write_reg(adap,
+ A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
+ i * 8, 0);
+ t4_write_reg(adap,
+ A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
+ i * 8, 0);
+ }
+}
+
+/**
+ * t4_fw_hello - establish communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @evt_mbox: mailbox to receive async FW events
+ * @master: specifies the caller's willingness to be the device master
+ * @state: returns the current device state (if non-NULL)
+ *
+ * Issues a command to establish communication with FW. Returns either
+ * an error (negative integer) or the mailbox of the Master PF.
+ */
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+ enum dev_master master, enum dev_state *state)
+{
+ int ret;
+ struct fw_hello_cmd c;
+ u32 v;
+ unsigned int master_mbox;
+ int retries = FW_CMD_HELLO_RETRIES;
+
+retry:
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, HELLO, WRITE);
+ c.err_to_clearinit = cpu_to_be32(
+ V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
+ V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
+ V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
+ M_FW_HELLO_CMD_MBMASTER) |
+ V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
+ V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
+ F_FW_HELLO_CMD_CLEARINIT);
+
+ /*
+ * Issue the HELLO command to the firmware. If it's not successful
+ * but indicates that we got a "busy" or "timeout" condition, retry
+ * the HELLO until we exhaust our retry limit. If we do exceed our
+ * retry limit, check to see if the firmware left us any error
+ * information and report that if so ...
+ */
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret != FW_SUCCESS) {
+ if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
+ goto retry;
+ if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
+ t4_report_fw_error(adap);
+ return ret;
+ }
+
+ v = be32_to_cpu(c.err_to_clearinit);
+ master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
+ if (state) {
+ if (v & F_FW_HELLO_CMD_ERR)
+ *state = DEV_STATE_ERR;
+ else if (v & F_FW_HELLO_CMD_INIT)
+ *state = DEV_STATE_INIT;
+ else
+ *state = DEV_STATE_UNINIT;
+ }
+
+ /*
+ * If we're not the Master PF then we need to wait around for the
+ * Master PF Driver to finish setting up the adapter.
+ *
+ * Note that we also do this wait if we're a non-Master-capable PF and
+ * there is no current Master PF; a Master PF may show up momentarily
+ * and we wouldn't want to fail pointlessly. (This can happen when an
+ * OS loads lots of different drivers rapidly at the same time). In
+ * this case, the Master PF returned by the firmware will be
+ * M_PCIE_FW_MASTER so the test below will work ...
+ */
+ if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
+ master_mbox != mbox) {
+ int waiting = FW_CMD_HELLO_TIMEOUT;
+
+ /*
+ * Wait for the firmware to either indicate an error or
+ * initialized state. If we see either of these we bail out
+ * and report the issue to the caller. If we exhaust the
+ * "hello timeout" and we haven't exhausted our retries, try
+ * again. Otherwise bail with a timeout error.
+ */
+ for (;;) {
+ u32 pcie_fw;
+
+ msleep(50);
+ waiting -= 50;
+
+ /*
+ * If neither Error nor Initialialized are indicated
+ * by the firmware keep waiting till we exaust our
+ * timeout ... and then retry if we haven't exhausted
+ * our retries ...
+ */
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
+ if (waiting <= 0) {
+ if (retries-- > 0)
+ goto retry;
+
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ /*
+ * We either have an Error or Initialized condition
+ * report errors preferentially.
+ */
+ if (state) {
+ if (pcie_fw & F_PCIE_FW_ERR)
+ *state = DEV_STATE_ERR;
+ else if (pcie_fw & F_PCIE_FW_INIT)
+ *state = DEV_STATE_INIT;
+ }
+
+ /*
+ * If we arrived before a Master PF was selected and
+ * there's not a valid Master PF, grab its identity
+ * for our caller.
+ */
+ if (master_mbox == M_PCIE_FW_MASTER &&
+ (pcie_fw & F_PCIE_FW_MASTER_VLD))
+ master_mbox = G_PCIE_FW_MASTER(pcie_fw);
+ break;
+ }
+ }
+
+ return master_mbox;
+}
+
+/**
+ * t4_fw_bye - end communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to terminate communication with FW.
+ */
+int t4_fw_bye(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_bye_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, BYE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_fw_reset - issue a reset to FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @reset: specifies the type of reset to perform
+ *
+ * Issues a reset command of the specified type to FW.
+ */
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
+{
+ struct fw_reset_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, RESET, WRITE);
+ c.val = cpu_to_be32(reset);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @force: force uP into RESET even if FW RESET command fails
+ *
+ * Issues a RESET command to firmware (if desired) with a HALT indication
+ * and then puts the microprocessor into RESET state. The RESET command
+ * will only be issued if a legitimate mailbox is provided (mbox <=
+ * M_PCIE_FW_MASTER).
+ *
+ * This is generally used in order for the host to safely manipulate the
+ * adapter without fear of conflicting with whatever the firmware might
+ * be doing. The only way out of this state is to RESTART the firmware
+ * ...
+ */
+int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+{
+ int ret = 0;
+
+ /*
+ * If a legitimate mailbox is provided, issue a RESET command
+ * with a HALT indication.
+ */
+ if (mbox <= M_PCIE_FW_MASTER) {
+ struct fw_reset_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, RESET, WRITE);
+ c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
+ c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ }
+
+ /*
+ * Normally we won't complete the operation if the firmware RESET
+ * command fails but if our caller insists we'll go ahead and put the
+ * uP into RESET. This can be useful if the firmware is hung or even
+ * missing ... We'll have to take the risk of putting the uP into
+ * RESET without the cooperation of firmware in that case.
+ *
+ * We also force the firmware's HALT flag to be on in case we bypassed
+ * the firmware RESET command above or we're dealing with old firmware
+ * which doesn't have the HALT capability. This will serve as a flag
+ * for the incoming firmware to know that it's coming out of a HALT
+ * rather than a RESET ... if it's new enough to understand that ...
+ */
+ if (ret == 0 || force) {
+ t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
+ t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
+ F_PCIE_FW_HALT);
+ }
+
+ /*
+ * And we always return the result of the firmware RESET command
+ * even when we force the uP into RESET ...
+ */
+ return ret;
+}
+
+/**
+ * t4_fw_restart - restart the firmware by taking the uP out of RESET
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @reset: if we want to do a RESET to restart things
+ *
+ * Restart firmware previously halted by t4_fw_halt(). On successful
+ * return the previous PF Master remains as the new PF Master and there
+ * is no need to issue a new HELLO command, etc.
+ *
+ * We do this in two ways:
+ *
+ * 1. If we're dealing with newer firmware we'll simply want to take
+ * the chip's microprocessor out of RESET. This will cause the
+ * firmware to start up from its start vector. And then we'll loop
+ * until the firmware indicates it's started again (PCIE_FW.HALT
+ * reset to 0) or we timeout.
+ *
+ * 2. If we're dealing with older firmware then we'll need to RESET
+ * the chip since older firmware won't recognize the PCIE_FW.HALT
+ * flag and automatically RESET itself on startup.
+ */
+int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+{
+ if (reset) {
+ /*
+ * Since we're directing the RESET instead of the firmware
+ * doing it automatically, we need to clear the PCIE_FW.HALT
+ * bit.
+ */
+ t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
+
+ /*
+ * If we've been given a valid mailbox, first try to get the
+ * firmware to do the RESET. If that works, great and we can
+ * return success. Otherwise, if we haven't been given a
+ * valid mailbox or the RESET command failed, fall back to
+ * hitting the chip with a hammer.
+ */
+ if (mbox <= M_PCIE_FW_MASTER) {
+ t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
+ msleep(100);
+ if (t4_fw_reset(adap, mbox,
+ F_PIORST | F_PIORSTMODE) == 0)
+ return 0;
+ }
+
+ t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
+ msleep(2000);
+ } else {
+ int ms;
+
+ t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
+ for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
+ if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
+ return FW_SUCCESS;
+ msleep(100);
+ ms += 100;
+ }
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/**
+ * t4_fl_pkt_align - return the fl packet alignment
+ * @adap: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two.
+ */
+int t4_fl_pkt_align(struct adapter *adap)
+{
+ u32 sge_control, sge_control2;
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ sge_control = t4_read_reg(adap, A_SGE_CONTROL);
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ ingpad_shift = X_INGPADBOUNDARY_SHIFT;
+ else
+ ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
+
+ ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adap->params.chip)) {
+ sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
+ ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
+ if (ingpackboundary == X_INGPACKBOUNDARY_16B)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ X_INGPACKBOUNDARY_SHIFT);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+/**
+ * t4_fixup_host_params_compat - fix up host-dependent parameters
+ * @adap: the adapter
+ * @page_size: the host's Base Page Size
+ * @cache_line_size: the host's Cache Line Size
+ * @chip_compat: maintain compatibility with designated chip
+ *
+ * Various registers in the chip contain values which are dependent on the
+ * host's Base Page and Cache Line Sizes. This function will fix all of
+ * those registers with the appropriate values as passed in ...
+ *
+ * @chip_compat is used to limit the set of changes that are made
+ * to be compatible with the indicated chip release. This is used by
+ * drivers to maintain compatibility with chip register settings when
+ * the drivers haven't [yet] been updated with new chip support.
+ */
+int t4_fixup_host_params_compat(struct adapter *adap,
+ unsigned int page_size,
+ unsigned int cache_line_size,
+ enum chip_type chip_compat)
+{
+ unsigned int page_shift = cxgbe_fls(page_size) - 1;
+ unsigned int sge_hps = page_shift - 10;
+ unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
+ unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
+ unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
+
+ t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
+ V_HOSTPAGESIZEPF0(sge_hps) |
+ V_HOSTPAGESIZEPF1(sge_hps) |
+ V_HOSTPAGESIZEPF2(sge_hps) |
+ V_HOSTPAGESIZEPF3(sge_hps) |
+ V_HOSTPAGESIZEPF4(sge_hps) |
+ V_HOSTPAGESIZEPF5(sge_hps) |
+ V_HOSTPAGESIZEPF6(sge_hps) |
+ V_HOSTPAGESIZEPF7(sge_hps));
+
+ if (is_t4(adap->params.chip) || is_t4(chip_compat))
+ t4_set_reg_field(adap, A_SGE_CONTROL,
+ V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
+ F_EGRSTATUSPAGESIZE,
+ V_INGPADBOUNDARY(fl_align_log -
+ X_INGPADBOUNDARY_SHIFT) |
+ V_EGRSTATUSPAGESIZE(stat_len != 64));
+ else {
+ unsigned int pack_align;
+ unsigned int ingpad, ingpack;
+ unsigned int pcie_cap;
+
+ /*
+ * T5 introduced the separation of the Free List Padding and
+ * Packing Boundaries. Thus, we can select a smaller Padding
+ * Boundary to avoid uselessly chewing up PCIe Link and Memory
+ * Bandwidth, and use a Packing Boundary which is large enough
+ * to avoid false sharing between CPUs, etc.
+ *
+ * For the PCI Link, the smaller the Padding Boundary the
+ * better. For the Memory Controller, a smaller Padding
+ * Boundary is better until we cross under the Memory Line
+ * Size (the minimum unit of transfer to/from Memory). If we
+ * have a Padding Boundary which is smaller than the Memory
+ * Line Size, that'll involve a Read-Modify-Write cycle on the
+ * Memory Controller which is never good.
+ */
+
+ /* We want the Packing Boundary to be based on the Cache Line
+ * Size in order to help avoid False Sharing performance
+ * issues between CPUs, etc. We also want the Packing
+ * Boundary to incorporate the PCI-E Maximum Payload Size. We
+ * get best performance when the Packing Boundary is a
+ * multiple of the Maximum Payload Size.
+ */
+ pack_align = fl_align;
+ pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ unsigned int mps, mps_log;
+ u16 devctl;
+
+ /* The PCIe Device Control Maximum Payload Size field
+ * [bits 7:5] encodes sizes as powers of 2 starting at
+ * 128 bytes.
+ */
+ t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
+ &devctl);
+ mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
+ mps = 1 << mps_log;
+ if (mps > pack_align)
+ pack_align = mps;
+ }
+
+ /*
+ * N.B. T5 has a different interpretation of the "0" value for
+ * the Packing Boundary. This corresponds to 16 bytes instead
+ * of the expected 32 bytes. We never have a Packing Boundary
+ * less than 32 bytes so we can't use that special value but
+ * on the other hand, if we wanted 32 bytes, the best we can
+ * really do is 64 bytes ...
+ */
+ if (pack_align <= 16) {
+ ingpack = X_INGPACKBOUNDARY_16B;
+ fl_align = 16;
+ } else if (pack_align == 32) {
+ ingpack = X_INGPACKBOUNDARY_64B;
+ fl_align = 64;
+ } else {
+ unsigned int pack_align_log = cxgbe_fls(pack_align) - 1;
+
+ ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
+ fl_align = pack_align;
+ }
+
+ /* Use the smallest Ingress Padding which isn't smaller than
+ * the Memory Controller Read/Write Size. We'll take that as
+ * being 8 bytes since we don't know of any system with a
+ * wider Memory Controller Bus Width.
+ */
+ if (is_t5(adap->params.chip))
+ ingpad = X_INGPADBOUNDARY_32B;
+ else
+ ingpad = X_T6_INGPADBOUNDARY_8B;
+ t4_set_reg_field(adap, A_SGE_CONTROL,
+ V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
+ F_EGRSTATUSPAGESIZE,
+ V_INGPADBOUNDARY(ingpad) |
+ V_EGRSTATUSPAGESIZE(stat_len != 64));
+ t4_set_reg_field(adap, A_SGE_CONTROL2,
+ V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
+ V_INGPACKBOUNDARY(ingpack));
+ }
+
+ /*
+ * Adjust various SGE Free List Host Buffer Sizes.
+ *
+ * The first four entries are:
+ *
+ * 0: Host Page Size
+ * 1: 64KB
+ * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
+ * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
+ *
+ * For the single-MTU buffers in unpacked mode we need to include
+ * space for the SGE Control Packet Shift, 14 byte Ethernet header,
+ * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
+ * Padding boundary. All of these are accommodated in the Factory
+ * Default Firmware Configuration File but we need to adjust it for
+ * this host's cache line size.
+ */
+ t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
+ t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
+ (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
+ & ~(fl_align - 1));
+ t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
+ (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
+ & ~(fl_align - 1));
+
+ t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
+
+ return 0;
+}
+
+/**
+ * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
+ * @adap: the adapter
+ * @page_size: the host's Base Page Size
+ * @cache_line_size: the host's Cache Line Size
+ *
+ * Various registers in T4 contain values which are dependent on the
+ * host's Base Page and Cache Line Sizes. This function will fix all of
+ * those registers with the appropriate values as passed in ...
+ *
+ * This routine makes changes which are compatible with T4 chips.
+ */
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+ unsigned int cache_line_size)
+{
+ return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
+ T4_LAST_REV);
+}
+
+/**
+ * t4_fw_initialize - ask FW to initialize the device
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to FW to partially initialize the device. This
+ * performs initialization that generally doesn't depend on user input.
+ */
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_initialize_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, INITIALIZE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_query_params_rw - query FW or device parameters
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF
+ * @vf: the VF
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @val: the parameter values
+ * @rw: Write and read flag
+ *
+ * Reads the value of FW or device parameters. Up to 7 parameters can be
+ * queried at once.
+ */
+static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int nparams, const u32 *params,
+ u32 *val, int rw)
+{
+ unsigned int i;
+ int ret;
+ struct fw_params_cmd c;
+ __be32 *p = &c.param[0].mnem;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_PARAMS_CMD_PFN(pf) |
+ V_FW_PARAMS_CMD_VFN(vf));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+ for (i = 0; i < nparams; i++) {
+ *p++ = cpu_to_be32(*params++);
+ if (rw)
+ *p = cpu_to_be32(*(val + i));
+ p++;
+ }
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0)
+ for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
+ *val++ = be32_to_cpu(*p);
+ return ret;
+}
+
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val)
+{
+ return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
+}
+
+/**
+ * t4_set_params_timeout - sets FW or device parameters
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF
+ * @vf: the VF
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @val: the parameter values
+ * @timeout: the timeout time
+ *
+ * Sets the value of FW or device parameters. Up to 7 parameters can be
+ * specified at once.
+ */
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int nparams, const u32 *params,
+ const u32 *val, int timeout)
+{
+ struct fw_params_cmd c;
+ __be32 *p = &c.param[0].mnem;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_PARAMS_CMD_PFN(pf) |
+ V_FW_PARAMS_CMD_VFN(vf));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+ while (nparams--) {
+ *p++ = cpu_to_be32(*params++);
+ *p++ = cpu_to_be32(*val++);
+ }
+
+ return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
+}
+
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ const u32 *val)
+{
+ return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
+ FW_CMD_MAX_TIMEOUT);
+}
+
+/**
+ * t4_alloc_vi_func - allocate a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @port: physical port associated with the VI
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @nmac: number of MAC addresses needed (1 to 5)
+ * @mac: the MAC addresses of the VI
+ * @rss_size: size of RSS table slice associated with this VI
+ * @portfunc: which Port Application Function MAC Address is desired
+ * @idstype: Intrusion Detection Type
+ *
+ * Allocates a virtual interface for the given physical port. If @mac is
+ * not %NULL it contains the MAC addresses of the VI as assigned by FW.
+ * @mac should be large enough to hold @nmac Ethernet addresses, they are
+ * stored consecutively so the space needed is @nmac * 6 bytes.
+ * Returns a negative error number or the non-negative VI id.
+ */
+int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
+ unsigned int port, unsigned int pf, unsigned int vf,
+ unsigned int nmac, u8 *mac, unsigned int *rss_size,
+ unsigned int portfunc, unsigned int idstype)
+{
+ int ret;
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+ V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
+ c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
+ V_FW_VI_CMD_FUNC(portfunc));
+ c.portid_pkd = V_FW_VI_CMD_PORTID(port);
+ c.nmac = nmac - 1;
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ if (mac) {
+ memcpy(mac, c.mac, sizeof(c.mac));
+ switch (nmac) {
+ case 5:
+ memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
+ /* FALLTHROUGH */
+ case 4:
+ memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
+ /* FALLTHROUGH */
+ case 3:
+ memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
+ /* FALLTHROUGH */
+ case 2:
+ memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
+ /* FALLTHROUGH */
+ }
+ }
+ if (rss_size)
+ *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
+ return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
+}
+
+/**
+ * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @port: physical port associated with the VI
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @nmac: number of MAC addresses needed (1 to 5)
+ * @mac: the MAC addresses of the VI
+ * @rss_size: size of RSS table slice associated with this VI
+ *
+ * Backwards compatible and convieniance routine to allocate a Virtual
+ * Interface with a Ethernet Port Application Function and Intrustion
+ * Detection System disabled.
+ */
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+ unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+ unsigned int *rss_size)
+{
+ return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
+ FW_VI_FUNC_ETH, 0);
+}
+
+/**
+ * t4_free_vi - free a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @viid: virtual interface identifiler
+ *
+ * Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int viid)
+{
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) |
+ V_FW_VI_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
+ c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
+
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_set_rxmode - set Rx properties of a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @mtu: the new MTU or -1
+ * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
+ * -1 no change
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sets Rx properties of a virtual interface.
+ */
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok)
+{
+ struct fw_vi_rxmode_cmd c;
+
+ /* convert to FW values */
+ if (mtu < 0)
+ mtu = M_FW_VI_RXMODE_CMD_MTU;
+ if (promisc < 0)
+ promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
+ if (all_multi < 0)
+ all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
+ if (bcast < 0)
+ bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
+ if (vlanex < 0)
+ vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_VI_RXMODE_CMD_VIID(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
+ V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+ V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+ V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+ V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+ if (is_pf4(adap))
+ return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL,
+ sleep_ok);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_change_mac - modifies the exact-match filter for a MAC address
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @idx: index of existing filter for old value of MAC address, or -1
+ * @addr: the new MAC address value
+ * @persist: whether a new MAC allocation should be persistent
+ * @add_smt: if true also add the address to the HW SMT
+ *
+ * Modifies an exact-match filter and sets it to the new MAC address if
+ * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
+ * latter case the address is added persistently if @persist is %true.
+ *
+ * Note that in general it is not possible to modify the value of a given
+ * filter so the generic way to modify an address filter is to free the one
+ * being used by the old address value and allocate a new filter for the
+ * new address value.
+ *
+ * Returns a negative error number or the index of the filter with the new
+ * MAC value. Note that this index may differ from @idx.
+ */
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int idx, const u8 *addr, bool persist, bool add_smt)
+{
+ int ret, mode;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_exact *p = c.u.exact;
+ int max_mac_addr = adap->params.arch.mps_tcam_size;
+
+ if (idx < 0) /* new allocation */
+ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+ mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_VI_MAC_CMD_VIID(viid));
+ c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
+ p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
+ V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
+ V_FW_VI_MAC_CMD_IDX(idx));
+ memcpy(p->macaddr, addr, sizeof(p->macaddr));
+
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
+ if (ret == 0) {
+ ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
+ if (ret >= max_mac_addr)
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * t4_enable_vi_params - enable/disable a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ * @dcb_en: 1=enable delivery of Data Center Bridging messages.
+ *
+ * Enables/disables a virtual interface. Note that setting DCB Enable
+ * only makes sense when enabling a Virtual Interface ...
+ */
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
+{
+ struct fw_vi_enable_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_VI_ENABLE_CMD_VIID(viid));
+ c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
+ V_FW_VI_ENABLE_CMD_EEN(tx_en) |
+ V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
+ FW_LEN16(c));
+ if (is_pf4(adap))
+ return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_enable_vi - enable/disable a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ *
+ * Enables/disables a virtual interface. Note that setting DCB Enable
+ * only makes sense when enabling a Virtual Interface ...
+ */
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool rx_en, bool tx_en)
+{
+ return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
+}
+
+/**
+ * t4_iq_start_stop - enable/disable an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @start: %true to enable the queues, %false to disable them
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Starts or stops an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+ unsigned int pf, unsigned int vf, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC);
+ c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
+ V_FW_IQ_CMD_IQSTOP(!start) |
+ FW_LEN16(c));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
+ if (is_pf4(adap)) {
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ } else {
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+ }
+}
+
+/**
+ * t4_iq_free - free an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Frees an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
+ c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_eth_eq_free - free an Ethernet egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees an Ethernet egress queue.
+ */
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_eth_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
+ c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_link_down_rc_str - return a string for a Link Down Reason Code
+ * @link_down_rc: Link Down Reason Code
+ *
+ * Returns a string representation of the Link Down Reason Code.
+ */
+static const char *t4_link_down_rc_str(unsigned char link_down_rc)
+{
+ static const char * const reason[] = {
+ "Link Down",
+ "Remote Fault",
+ "Auto-negotiation Failure",
+ "Reserved",
+ "Insufficient Airflow",
+ "Unable To Determine Reason",
+ "No RX Signal Detected",
+ "Reserved",
+ };
+
+ if (link_down_rc >= ARRAY_SIZE(reason))
+ return "Bad Reason Code";
+
+ return reason[link_down_rc];
+}
+
+/* Return the highest speed set in the port capabilities, in Mb/s. */
+static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
+{
+#define TEST_SPEED_RETURN(__caps_speed, __speed) \
+ do { \
+ if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
+ return __speed; \
+ } while (0)
+
+ TEST_SPEED_RETURN(100G, 100000);
+ TEST_SPEED_RETURN(50G, 50000);
+ TEST_SPEED_RETURN(40G, 40000);
+ TEST_SPEED_RETURN(25G, 25000);
+ TEST_SPEED_RETURN(10G, 10000);
+ TEST_SPEED_RETURN(1G, 1000);
+ TEST_SPEED_RETURN(100M, 100);
+
+#undef TEST_SPEED_RETURN
+
+ return 0;
+}
+
+/**
+ * t4_handle_get_port_info - process a FW reply message
+ * @pi: the port info
+ * @rpl: start of the FW message
+ *
+ * Processes a GET_PORT_INFO FW reply message.
+ */
+static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+{
+ const struct fw_port_cmd *cmd = (const void *)rpl;
+ int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16));
+ fw_port_cap32_t pcaps, acaps, linkattr;
+ struct link_config *lc = &pi->link_cfg;
+ struct adapter *adapter = pi->adapter;
+ enum fw_port_module_type mod_type;
+ enum fw_port_type port_type;
+ unsigned int speed, fc, fec;
+ int link_ok, linkdnrc;
+
+ /* Extract the various fields from the Port Information message.
+ */
+ switch (action) {
+ case FW_PORT_ACTION_GET_PORT_INFO: {
+ u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
+
+ link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0;
+ linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus);
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mod_type = G_FW_PORT_CMD_MODTYPE(lstatus);
+ pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
+ acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
+
+ /* Unfortunately the format of the Link Status in the old
+ * 16-bit Port Information message isn't the same as the
+ * 16-bit Port Capabilities bitfield used everywhere else ...
+ */
+ linkattr = 0;
+ if (lstatus & F_FW_PORT_CMD_RXPAUSE)
+ linkattr |= FW_PORT_CAP32_FC_RX;
+ if (lstatus & F_FW_PORT_CMD_TXPAUSE)
+ linkattr |= FW_PORT_CAP32_FC_TX;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ linkattr |= FW_PORT_CAP32_SPEED_100M;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ linkattr |= FW_PORT_CAP32_SPEED_1G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ linkattr |= FW_PORT_CAP32_SPEED_10G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
+ linkattr |= FW_PORT_CAP32_SPEED_25G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ linkattr |= FW_PORT_CAP32_SPEED_40G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
+ linkattr |= FW_PORT_CAP32_SPEED_100G;
+
+ break;
+ }
+
+ case FW_PORT_ACTION_GET_PORT_INFO32: {
+ u32 lstatus32 =
+ be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
+
+ link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0;
+ linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32);
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32);
+ pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
+ acaps = be32_to_cpu(cmd->u.info32.acaps32);
+ linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
+ break;
+ }
+
+ default:
+ dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n",
+ be32_to_cpu(cmd->action_to_len16));
+ return;
+ }
+
+ fec = fwcap_to_cc_fec(acaps);
+
+ fc = fwcap_to_cc_pause(linkattr);
+ speed = fwcap_to_speed(linkattr);
+
+ if (mod_type != pi->mod_type) {
+ lc->auto_fec = fec;
+ pi->port_type = port_type;
+ pi->mod_type = mod_type;
+ t4_os_portmod_changed(adapter, pi->pidx);
+ }
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc || fec != lc->fec) { /* something changed */
+ if (!link_ok && lc->link_ok) {
+ lc->link_down_rc = linkdnrc;
+ dev_warn(adap, "Port %d link down, reason: %s\n",
+ pi->tx_chan, t4_link_down_rc_str(linkdnrc));
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ lc->fec = fec;
+ lc->pcaps = pcaps;
+ lc->acaps = acaps & ADVERT_MASK;
+
+ if (lc->acaps & FW_PORT_CAP32_ANEG) {
+ lc->autoneg = AUTONEG_ENABLE;
+ } else {
+ /* When Autoneg is disabled, user needs to set
+ * single speed.
+ * Similar to cxgb4_ethtool.c: set_link_ksettings
+ */
+ lc->acaps = 0;
+ lc->requested_speed = fwcap_to_speed(acaps);
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+ }
+}
+
+/**
+ * t4_ctrl_eq_free - free a control egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees a control egress queue.
+ */
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_ctrl_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_EQ_CTRL_CMD_PFN(pf) |
+ V_FW_EQ_CTRL_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
+ c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_handle_fw_rpl - process a FW reply message
+ * @adap: the adapter
+ * @rpl: start of the FW message
+ *
+ * Processes a FW message, such as link state change messages.
+ */
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
+{
+ u8 opcode = *(const u8 *)rpl;
+
+ /*
+ * This might be a port command ... this simplifies the following
+ * conditionals ... We can get away with pre-dereferencing
+ * action_to_len16 because it's in the first 16 bytes and all messages
+ * will be at least that long.
+ */
+ const struct fw_port_cmd *p = (const void *)rpl;
+ unsigned int action =
+ G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
+
+ if (opcode == FW_PORT_CMD &&
+ (action == FW_PORT_ACTION_GET_PORT_INFO ||
+ action == FW_PORT_ACTION_GET_PORT_INFO32)) {
+ /* link/module state change message */
+ int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
+ struct port_info *pi = NULL;
+ int i;
+
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->tx_chan == chan)
+ break;
+ }
+
+ t4_handle_get_port_info(pi, rpl);
+ } else {
+ dev_warn(adap, "Unknown firmware reply %d\n", opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void t4_reset_link_config(struct adapter *adap, int idx)
+{
+ struct port_info *pi = adap2pinfo(adap, idx);
+ struct link_config *lc = &pi->link_cfg;
+
+ lc->link_ok = 0;
+ lc->requested_speed = 0;
+ lc->requested_fc = 0;
+ lc->speed = 0;
+ lc->fc = 0;
+}
+
+/**
+ * init_link_config - initialize a link's SW state
+ * @lc: structure holding the link state
+ * @pcaps: link Port Capabilities
+ * @acaps: link current Advertised Port Capabilities
+ *
+ * Initializes the SW state maintained for each link, including the link's
+ * capabilities and default speed/flow-control/autonegotiation settings.
+ */
+void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
+ fw_port_cap32_t acaps)
+{
+ lc->pcaps = pcaps;
+ lc->requested_speed = 0;
+ lc->speed = 0;
+ lc->requested_fc = 0;
+ lc->fc = 0;
+
+ /**
+ * For Forward Error Control, we default to whatever the Firmware
+ * tells us the Link is currently advertising.
+ */
+ lc->auto_fec = fwcap_to_cc_fec(acaps);
+ lc->requested_fec = FEC_AUTO;
+ lc->fec = lc->auto_fec;
+
+ if (lc->pcaps & FW_PORT_CAP32_ANEG) {
+ lc->acaps = lc->pcaps & ADVERT_MASK;
+ lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
+ } else {
+ lc->acaps = 0;
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+}
+
+/**
+ * t4_wait_dev_ready - wait till to reads of registers work
+ *
+ * Right after the device is RESET is can take a small amount of time
+ * for it to respond to register reads. Until then, all reads will
+ * return either 0xff...ff or 0xee...ee. Return an error if reads
+ * don't work within a reasonable time frame.
+ */
+static int t4_wait_dev_ready(struct adapter *adapter)
+{
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+
+ if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
+ return 0;
+
+ msleep(500);
+ whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
+ return 0;
+
+ dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
+ whoami);
+ return -EIO;
+}
+
+struct flash_desc {
+ u32 vendor_and_model_id;
+ u32 size_mb;
+};
+
+int t4_get_flash_params(struct adapter *adapter)
+{
+ /*
+ * Table for non-standard supported Flash parts. Note, all Flash
+ * parts must have 64KB sectors.
+ */
+ static struct flash_desc supported_flash[] = {
+ { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
+ };
+
+ int ret;
+ u32 flashid = 0;
+ unsigned int part, manufacturer;
+ unsigned int density, size = 0;
+
+ /**
+ * Issue a Read ID Command to the Flash part. We decode supported
+ * Flash parts and their sizes from this. There's a newer Query
+ * Command which can retrieve detailed geometry information but
+ * many Flash parts don't support it.
+ */
+ ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
+ if (!ret)
+ ret = sf1_read(adapter, 3, 0, 1, &flashid);
+ t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
+ if (ret < 0)
+ return ret;
+
+ /**
+ * Check to see if it's one of our non-standard supported Flash parts.
+ */
+ for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
+ if (supported_flash[part].vendor_and_model_id == flashid) {
+ adapter->params.sf_size =
+ supported_flash[part].size_mb;
+ adapter->params.sf_nsec =
+ adapter->params.sf_size / SF_SEC_SIZE;
+ goto found;
+ }
+ }
+
+ /**
+ * Decode Flash part size. The code below looks repetative with
+ * common encodings, but that's not guaranteed in the JEDEC
+ * specification for the Read JADEC ID command. The only thing that
+ * we're guaranteed by the JADEC specification is where the
+ * Manufacturer ID is in the returned result. After that each
+ * Manufacturer ~could~ encode things completely differently.
+ * Note, all Flash parts must have 64KB sectors.
+ */
+ manufacturer = flashid & 0xff;
+ switch (manufacturer) {
+ case 0x20: { /* Micron/Numonix */
+ /**
+ * This Density -> Size decoding table is taken from Micron
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x14:
+ size = 1 << 20; /* 1MB */
+ break;
+ case 0x15:
+ size = 1 << 21; /* 2MB */
+ break;
+ case 0x16:
+ size = 1 << 22; /* 4MB */
+ break;
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ case 0x19:
+ size = 1 << 25; /* 32MB */
+ break;
+ case 0x20:
+ size = 1 << 26; /* 64MB */
+ break;
+ case 0x21:
+ size = 1 << 27; /* 128MB */
+ break;
+ case 0x22:
+ size = 1 << 28; /* 256MB */
+ break;
+ }
+ break;
+ }
+
+ case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
+ /**
+ * This Density -> Size decoding table is taken from ISSI
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x16:
+ size = 1 << 25; /* 32MB */
+ break;
+ case 0x17:
+ size = 1 << 26; /* 64MB */
+ break;
+ }
+ break;
+ }
+
+ case 0xc2: { /* Macronix */
+ /**
+ * This Density -> Size decoding table is taken from Macronix
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
+ }
+
+ case 0xef: { /* Winbond */
+ /**
+ * This Density -> Size decoding table is taken from Winbond
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
+ }
+ }
+
+ /* If we didn't recognize the FLASH part, that's no real issue: the
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_
+ * use a FLASH part which is at least 4MB in size and has 64KB
+ * sectors. The unrecognized FLASH part is likely to be much larger
+ * than 4MB, but that's all we really need.
+ */
+ if (size == 0) {
+ dev_warn(adapter,
+ "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+ flashid);
+ size = 1 << 22;
+ }
+
+ /**
+ * Store decoded Flash size and fall through into vetting code.
+ */
+ adapter->params.sf_size = size;
+ adapter->params.sf_nsec = size / SF_SEC_SIZE;
+
+found:
+ /*
+ * We should reject adapters with FLASHes which are too small. So, emit
+ * a warning.
+ */
+ if (adapter->params.sf_size < FLASH_MIN_SIZE)
+ dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
+ flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
+
+ return 0;
+}
+
+static void set_pcie_completion_timeout(struct adapter *adapter,
+ u8 range)
+{
+ u32 pcie_cap;
+ u16 val;
+
+ pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
+ val &= 0xfff0;
+ val |= range;
+ t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
+ }
+}
+
+/**
+ * t4_get_chip_type - Determine chip type from device ID
+ * @adap: the adapter
+ * @ver: adapter version
+ */
+int t4_get_chip_type(struct adapter *adap, int ver)
+{
+ enum chip_type chip = 0;
+ u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
+
+ /* Retrieve adapter's device ID */
+ switch (ver) {
+ case CHELSIO_T5:
+ chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ break;
+ case CHELSIO_T6:
+ chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ break;
+ default:
+ dev_err(adap, "Device %d is not supported\n",
+ adap->params.pci.device_id);
+ return -EINVAL;
+ }
+
+ return chip;
+}
+
+/**
+ * t4_prep_adapter - prepare SW and HW for operation
+ * @adapter: the adapter
+ *
+ * Initialize adapter SW state for the various HW modules, set initial
+ * values for some adapter tunables, take PHYs out of reset, and
+ * initialize the MDIO interface.
+ */
+int t4_prep_adapter(struct adapter *adapter)
+{
+ int ret, ver;
+ u32 pl_rev;
+
+ ret = t4_wait_dev_ready(adapter);
+ if (ret < 0)
+ return ret;
+
+ pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
+ adapter->params.pci.device_id = adapter->pdev->id.device_id;
+ adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
+
+ /*
+ * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
+ * ADAPTER (VERSION << 4 | REVISION)
+ */
+ ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
+ adapter->params.chip = 0;
+ switch (ver) {
+ case CHELSIO_T5:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 128;
+ adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.vfcount = 128;
+ break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 256;
+ adapter->params.arch.nchan = 2;
+ adapter->params.arch.vfcount = 256;
+ break;
+ default:
+ dev_err(adapter, "%s: Device %d is not supported\n",
+ __func__, adapter->params.pci.device_id);
+ return -EINVAL;
+ }
+
+ adapter->params.pci.vpd_cap_addr =
+ t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
+
+ ret = t4_get_flash_params(adapter);
+ if (ret < 0) {
+ dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
+ -ret);
+ return ret;
+ }
+
+ adapter->params.cim_la_size = CIMLA_SIZE;
+
+ init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+
+ /*
+ * Default port and clock for debugging in case we can't reach FW.
+ */
+ adapter->params.nports = 1;
+ adapter->params.portvec = 1;
+ adapter->params.vpd.cclk = 50000;
+
+ /* Set pci completion timeout value to 4 seconds. */
+ set_pcie_completion_timeout(adapter, 0xd);
+ return 0;
+}
+
+/**
+ * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * @adapter: the adapter
+ * @qid: the Queue ID
+ * @qtype: the Ingress or Egress type for @qid
+ * @pbar2_qoffset: BAR2 Queue Offset
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ * Returns the BAR2 SGE Queue Registers information associated with the
+ * indicated Absolute Queue ID. These are passed back in return value
+ * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
+ * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
+ *
+ * This may return an error which indicates that BAR2 SGE Queue
+ * registers aren't available. If an error is not returned, then the
+ * following values are returned:
+ *
+ * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
+ * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
+ *
+ * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
+ * require the "Inferred Queue ID" ability may be used. E.g. the
+ * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
+ * then these "Inferred Queue ID" register may not be used.
+ */
+int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
+ enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
+{
+ unsigned int page_shift, page_size, qpp_shift, qpp_mask;
+ u64 bar2_page_offset, bar2_qoffset;
+ unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
+
+ /*
+ * T4 doesn't support BAR2 SGE Queue registers.
+ */
+ if (is_t4(adapter->params.chip))
+ return -EINVAL;
+
+ /*
+ * Get our SGE Page Size parameters.
+ */
+ page_shift = adapter->params.sge.hps + 10;
+ page_size = 1 << page_shift;
+
+ /*
+ * Get the right Queues per Page parameters for our Queue.
+ */
+ qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
+ adapter->params.sge.eq_qpp :
+ adapter->params.sge.iq_qpp);
+ qpp_mask = (1 << qpp_shift) - 1;
+
+ /*
+ * Calculate the basics of the BAR2 SGE Queue register area:
+ * o The BAR2 page the Queue registers will be in.
+ * o The BAR2 Queue ID.
+ * o The BAR2 Queue ID Offset into the BAR2 page.
+ */
+ bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_qid = qid & qpp_mask;
+ bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
+
+ /*
+ * If the BAR2 Queue ID Offset is less than the Page Size, then the
+ * hardware will infer the Absolute Queue ID simply from the writes to
+ * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
+ * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
+ * write to the first BAR2 SGE Queue Area within the BAR2 Page with
+ * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
+ * from the BAR2 Page and BAR2 Queue ID.
+ *
+ * One important censequence of this is that some BAR2 SGE registers
+ * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
+ * there. But other registers synthesize the SGE Queue ID purely
+ * from the writes to the registers -- the Write Combined Doorbell
+ * Buffer is a good example. These BAR2 SGE Registers are only
+ * available for those BAR2 SGE Register areas where the SGE Absolute
+ * Queue ID can be inferred from simple writes.
+ */
+ bar2_qoffset = bar2_page_offset;
+ bar2_qinferred = (bar2_qid_offset < page_size);
+ if (bar2_qinferred) {
+ bar2_qoffset += bar2_qid_offset;
+ bar2_qid = 0;
+ }
+
+ *pbar2_qoffset = bar2_qoffset;
+ *pbar2_qid = bar2_qid;
+ return 0;
+}
+
+/**
+ * t4_init_sge_params - initialize adap->params.sge
+ * @adapter: the adapter
+ *
+ * Initialize various fields of the adapter's SGE Parameters structure.
+ */
+int t4_init_sge_params(struct adapter *adapter)
+{
+ struct sge_params *sge_params = &adapter->params.sge;
+ u32 hps, qpp;
+ unsigned int s_hps, s_qpp;
+
+ /*
+ * Extract the SGE Page Size for our PF.
+ */
+ hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
+ s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
+ adapter->pf);
+ sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
+
+ /*
+ * Extract the SGE Egress and Ingess Queues Per Page for our PF.
+ */
+ s_qpp = (S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
+ qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
+ sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
+ qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
+ sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
+
+ return 0;
+}
+
+/**
+ * t4_init_tp_params - initialize adap->params.tp
+ * @adap: the adapter
+ *
+ * Initialize various fields of the adapter's TP Parameters structure.
+ */
+int t4_init_tp_params(struct adapter *adap)
+{
+ int chan;
+ u32 v;
+
+ v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
+ adap->params.tp.tre = G_TIMERRESOLUTION(v);
+ adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
+
+ /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+ for (chan = 0; chan < NCHAN; chan++)
+ adap->params.tp.tx_modq[chan] = chan;
+
+ /*
+ * Cache the adapter's Compressed Filter Mode and global Incress
+ * Configuration.
+ */
+ t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &adap->params.tp.vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
+ t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &adap->params.tp.ingress_config, 1,
+ A_TP_INGRESS_CONFIG);
+
+ /* For T6, cache the adapter's compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ v = t4_read_reg(adap, A_TP_OUT_CONFIG);
+ adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
+ }
+
+ /*
+ * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+ * shift positions of several elements of the Compressed Filter Tuple
+ * for this adapter which we need frequently ...
+ */
+ adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+ adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
+ F_PROTOCOL);
+ adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
+ F_ETHERTYPE);
+
+ /*
+ * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
+ * represents the presense of an Outer VLAN instead of a VNIC ID.
+ */
+ if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+ adap->params.tp.vnic_shift = -1;
+
+ v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
+ adap->params.tp.hash_filter_mask = v;
+ v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
+ adap->params.tp.hash_filter_mask |= ((u64)v << 32);
+
+ return 0;
+}
+
+/**
+ * t4_filter_field_shift - calculate filter field shift
+ * @adap: the adapter
+ * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
+ *
+ * Return the shift position of a filter field within the Compressed
+ * Filter Tuple. The filter field is specified via its selection bit
+ * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
+ */
+int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
+{
+ unsigned int filter_mode = adap->params.tp.vlan_pri_map;
+ unsigned int sel;
+ int field_shift;
+
+ if ((filter_mode & filter_sel) == 0)
+ return -1;
+
+ for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+ switch (filter_mode & sel) {
+ case F_FCOE:
+ field_shift += W_FT_FCOE;
+ break;
+ case F_PORT:
+ field_shift += W_FT_PORT;
+ break;
+ case F_VNIC_ID:
+ field_shift += W_FT_VNIC_ID;
+ break;
+ case F_VLAN:
+ field_shift += W_FT_VLAN;
+ break;
+ case F_TOS:
+ field_shift += W_FT_TOS;
+ break;
+ case F_PROTOCOL:
+ field_shift += W_FT_PROTOCOL;
+ break;
+ case F_ETHERTYPE:
+ field_shift += W_FT_ETHERTYPE;
+ break;
+ case F_MACMATCH:
+ field_shift += W_FT_MACMATCH;
+ break;
+ case F_MPSHITTYPE:
+ field_shift += W_FT_MPSHITTYPE;
+ break;
+ case F_FRAGMENTATION:
+ field_shift += W_FT_FRAGMENTATION;
+ break;
+ }
+ }
+ return field_shift;
+}
+
+int t4_init_rss_mode(struct adapter *adap, int mbox)
+{
+ int i, ret;
+ struct fw_rss_vi_config_cmd rvc;
+
+ memset(&rvc, 0, sizeof(rvc));
+
+ for_each_port(adap, i) {
+ struct port_info *p = adap2pinfo(adap, i);
+
+ rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
+ rvc.retval_len16 = htonl(FW_LEN16(rvc));
+ ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
+ if (ret)
+ return ret;
+ p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
+ }
+ return 0;
+}
+
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+{
+ unsigned int fw_caps = adap->params.fw_caps_support;
+ fw_port_cap32_t pcaps, acaps;
+ enum fw_port_type port_type;
+ struct fw_port_cmd cmd;
+ int ret, i, j = 0;
+ int mdio_addr;
+ u32 action;
+ u8 addr[6];
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+ unsigned int rss_size = 0;
+
+ while ((adap->params.portvec & (1 << j)) == 0)
+ j++;
+
+ /* If we haven't yet determined whether we're talking to
+ * Firmware which knows the new 32-bit Port Capabilities, it's
+ * time to find out now. This will also tell new Firmware to
+ * send us Port Status Updates using the new 32-bit Port
+ * Capabilities version of the Port Information message.
+ */
+ if (fw_caps == FW_CAPS_UNKNOWN) {
+ u32 param, val, caps;
+
+ caps = FW_PARAMS_PARAM_PFVF_PORT_CAPS32;
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X(caps));
+ val = 1;
+ ret = t4_set_params(adap, mbox, pf, vf, 1, &param,
+ &val);
+ fw_caps = ret == 0 ? FW_CAPS32 : FW_CAPS16;
+ adap->params.fw_caps_support = fw_caps;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PORT_CMD_PORTID(j));
+ action = fw_caps == FW_CAPS16 ? FW_PORT_ACTION_GET_PORT_INFO :
+ FW_PORT_ACTION_GET_PORT_INFO32;
+ cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
+ FW_LEN16(cmd));
+ ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ /* Extract the various fields from the Port Information message.
+ */
+ if (fw_caps == FW_CAPS16) {
+ u32 lstatus =
+ be32_to_cpu(cmd.u.info.lstatus_to_modtype);
+
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mdio_addr = (lstatus & F_FW_PORT_CMD_MDIOCAP) ?
+ (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : -1;
+ pcaps = be16_to_cpu(cmd.u.info.pcap);
+ acaps = be16_to_cpu(cmd.u.info.acap);
+ pcaps = fwcaps16_to_caps32(pcaps);
+ acaps = fwcaps16_to_caps32(acaps);
+ } else {
+ u32 lstatus32 =
+ be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
+
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
+ (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
+ -1;
+ pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
+ acaps = be32_to_cpu(cmd.u.info32.acaps32);
+ }
+
+ ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
+ if (ret < 0)
+ return ret;
+
+ pi->viid = ret;
+ pi->tx_chan = j;
+ pi->rss_size = rss_size;
+ t4_os_set_hw_addr(adap, i, addr);
+
+ pi->port_type = port_type;
+ pi->mdio_addr = mdio_addr;
+ pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+ init_link_config(&pi->link_cfg, pcaps, acaps);
+ j++;
+ }
+ return 0;
+}
+
+/**
+ * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
+ * @adap: the adapter
+ * @win: PCI-E Memory Window to use
+ * @addr: address within adapter memory
+ * @len: amount of memory to transfer
+ * @hbuf: host memory buffer
+ * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address and host buffer must be aligned on 32-bit
+ * boudaries; the length may be arbitrary.
+ *
+ * NOTES:
+ * 1. The memory is transferred as a raw byte sequence from/to the
+ * firmware's memory. If this memory contains data structures which
+ * contain multi-byte integers, it's the caller's responsibility to
+ * perform appropriate byte order conversions.
+ *
+ * 2. It is the Caller's responsibility to ensure that no other code
+ * uses the specified PCI-E Memory Window while this routine is
+ * using it. This is typically done via the use of OS-specific
+ * locks, etc.
+ */
+int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
+ u32 len, void *hbuf, int dir)
+{
+ u32 pos, offset, resid;
+ u32 win_pf, mem_reg, mem_aperture, mem_base;
+ u32 *buf;
+
+ /* Argument sanity checks ...*/
+ if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
+ return -EINVAL;
+ buf = (u32 *)hbuf;
+
+ /* It's convenient to be able to handle lengths which aren't a
+ * multiple of 32-bits because we often end up transferring files to
+ * the firmware. So we'll handle that by normalizing the length here
+ * and then handling any residual transfer at the end.
+ */
+ resid = len & 0x3;
+ len -= resid;
+
+ /* Each PCI-E Memory Window is programmed with a window size -- or
+ * "aperture" -- which controls the granularity of its mapping onto
+ * adapter memory. We need to grab that aperture in order to know
+ * how to use the specified window. The window is also programmed
+ * with the base address of the Memory Window in BAR0's address
+ * space. For T4 this is an absolute PCI-E Bus Address. For T5
+ * the address is relative to BAR0.
+ */
+ mem_reg = t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
+ win));
+ mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
+ mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
+
+ win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
+
+ /* Calculate our initial PCI-E Memory Window Position and Offset into
+ * that Window.
+ */
+ pos = addr & ~(mem_aperture - 1);
+ offset = addr - pos;
+
+ /* Set up initial PCI-E Memory Window to cover the start of our
+ * transfer. (Read it back to ensure that changes propagate before we
+ * attempt to use the new value.)
+ */
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
+ pos | win_pf);
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
+
+ /* Transfer data to/from the adapter as long as there's an integral
+ * number of 32-bit transfers to complete.
+ *
+ * A note on Endianness issues:
+ *
+ * The "register" reads and writes below from/to the PCI-E Memory
+ * Window invoke the standard adapter Big-Endian to PCI-E Link
+ * Little-Endian "swizzel." As a result, if we have the following
+ * data in adapter memory:
+ *
+ * Memory: ... | b0 | b1 | b2 | b3 | ...
+ * Address: i+0 i+1 i+2 i+3
+ *
+ * Then a read of the adapter memory via the PCI-E Memory Window
+ * will yield:
+ *
+ * x = readl(i)
+ * 31 0
+ * [ b3 | b2 | b1 | b0 ]
+ *
+ * If this value is stored into local memory on a Little-Endian system
+ * it will show up correctly in local memory as:
+ *
+ * ( ..., b0, b1, b2, b3, ... )
+ *
+ * But on a Big-Endian system, the store will show up in memory
+ * incorrectly swizzled as:
+ *
+ * ( ..., b3, b2, b1, b0, ... )
+ *
+ * So we need to account for this in the reads and writes to the
+ * PCI-E Memory Window below by undoing the register read/write
+ * swizzels.
+ */
+ while (len > 0) {
+ if (dir == T4_MEMORY_READ)
+ *buf++ = le32_to_cpu((__le32)t4_read_reg(adap,
+ mem_base +
+ offset));
+ else
+ t4_write_reg(adap, mem_base + offset,
+ (u32)cpu_to_le32(*buf++));
+ offset += sizeof(__be32);
+ len -= sizeof(__be32);
+
+ /* If we've reached the end of our current window aperture,
+ * move the PCI-E Memory Window on to the next. Note that
+ * doing this here after "len" may be 0 allows us to set up
+ * the PCI-E Memory Window for a possible final residual
+ * transfer below ...
+ */
+ if (offset == mem_aperture) {
+ pos += mem_aperture;
+ offset = 0;
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
+ win), pos | win_pf);
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
+ win));
+ }
+ }
+
+ /* If the original transfer had a length which wasn't a multiple of
+ * 32-bits, now's where we need to finish off the transfer of the
+ * residual amount. The PCI-E Memory Window has already been moved
+ * above (if necessary) to cover this final transfer.
+ */
+ if (resid) {
+ union {
+ u32 word;
+ char byte[4];
+ } last;
+ unsigned char *bp;
+ int i;
+
+ if (dir == T4_MEMORY_READ) {
+ last.word = le32_to_cpu((__le32)t4_read_reg(adap,
+ mem_base +
+ offset));
+ for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
+ bp[i] = last.byte[i];
+ } else {
+ last.word = *buf;
+ for (i = resid; i < 4; i++)
+ last.byte[i] = 0;
+ t4_write_reg(adap, mem_base + offset,
+ (u32)cpu_to_le32(last.word));
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * t4_memory_rw_mtype -read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @adap: the adapter
+ * @win: PCI-E Memory Window to use
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ * @maddr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @hbuf: host memory buffer
+ * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
+ *
+ * Reads/writes adapter memory using t4_memory_rw_addr(). This routine
+ * provides an (memory type, address within memory type) interface.
+ */
+int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
+ u32 len, void *hbuf, int dir)
+{
+ u32 mtype_offset;
+ u32 edc_size, mc_size;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
+ * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
+ */
+ edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
+ if (mtype != MEM_MC1) {
+ mtype_offset = (mtype * (edc_size * 1024 * 1024));
+ } else {
+ mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
+ A_MA_EXT_MEMORY0_BAR));
+ mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
+ return t4_memory_rw_addr(adap, win,
+ mtype_offset + maddr, len,
+ hbuf, dir);
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h
new file mode 100644
index 00000000..e77563df
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4_HW_H
+#define __T4_HW_H
+
+enum {
+ NCHAN = 4, /* # of HW channels */
+ EEPROMSIZE = 17408, /* Serial EEPROM physical size */
+ EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ MBOX_LEN = 64, /* mailbox size in bytes */
+ UDBS_SEG_SIZE = 128, /* segment size for BAR2 user doorbells */
+};
+
+enum {
+ CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */
+};
+
+enum {
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
+};
+
+enum {
+ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
+ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+};
+
+/* PCI-e memory window access */
+enum pcie_memwin {
+ MEMWIN_NIC = 0,
+};
+
+enum {
+ SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
+ SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */
+ /* max no. of desc allowed in WR */
+ SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / SGE_EQ_IDXSIZE,
+};
+
+enum {
+ TCB_SIZE = 128, /* TCB size */
+};
+
+struct sge_qstat { /* data written to SGE queue status entries */
+ __be32 qid;
+ __be16 cidx;
+ __be16 pidx;
+};
+
+/*
+ * Structure for last 128 bits of response descriptors
+ */
+struct rsp_ctrl {
+ __be32 hdrbuflen_pidx;
+ __be32 pldbuflen_qid;
+ union {
+ u8 type_gen;
+ __be64 last_flit;
+ } u;
+};
+
+#define S_RSPD_NEWBUF 31
+#define V_RSPD_NEWBUF(x) ((x) << S_RSPD_NEWBUF)
+#define F_RSPD_NEWBUF V_RSPD_NEWBUF(1U)
+
+#define S_RSPD_LEN 0
+#define M_RSPD_LEN 0x7fffffff
+#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
+#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
+
+#define S_RSPD_GEN 7
+#define V_RSPD_GEN(x) ((x) << S_RSPD_GEN)
+#define F_RSPD_GEN V_RSPD_GEN(1U)
+
+#define S_RSPD_TYPE 4
+#define M_RSPD_TYPE 0x3
+#define V_RSPD_TYPE(x) ((x) << S_RSPD_TYPE)
+#define G_RSPD_TYPE(x) (((x) >> S_RSPD_TYPE) & M_RSPD_TYPE)
+
+/* Rx queue interrupt deferral field: timer index */
+#define S_QINTR_CNT_EN 0
+#define V_QINTR_CNT_EN(x) ((x) << S_QINTR_CNT_EN)
+#define F_QINTR_CNT_EN V_QINTR_CNT_EN(1U)
+
+#define S_QINTR_TIMER_IDX 1
+#define M_QINTR_TIMER_IDX 0x7
+#define V_QINTR_TIMER_IDX(x) ((x) << S_QINTR_TIMER_IDX)
+#define G_QINTR_TIMER_IDX(x) (((x) >> S_QINTR_TIMER_IDX) & M_QINTR_TIMER_IDX)
+
+/*
+ * Flash layout.
+ */
+#define FLASH_START(start) ((start) * SF_SEC_SIZE)
+#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
+
+enum {
+ /*
+ * Various Expansion-ROM boot images, etc.
+ */
+ FLASH_EXP_ROM_START_SEC = 0,
+ FLASH_EXP_ROM_NSECS = 6,
+ FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
+ FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+
+ /*
+ * Location of firmware image in FLASH.
+ */
+ FLASH_FW_START_SEC = 8,
+ FLASH_FW_NSECS = 16,
+ FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
+ FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+
+ /*
+ * Location of bootstrap firmware image in FLASH.
+ */
+ FLASH_FWBOOTSTRAP_START_SEC = 27,
+ FLASH_FWBOOTSTRAP_NSECS = 1,
+ FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
+ FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+
+ /*
+ * Location of Firmware Configuration File in FLASH.
+ */
+ FLASH_CFG_START_SEC = 31,
+ FLASH_CFG_NSECS = 1,
+ FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
+ FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+
+ /*
+ * We don't support FLASH devices which can't support the full
+ * standard set of sections which we need for normal operations.
+ */
+ FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE,
+};
+
+#undef FLASH_START
+#undef FLASH_MAX_SIZE
+
+#endif /* __T4_HW_H */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h
new file mode 100644
index 00000000..5d433c91
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h
@@ -0,0 +1,541 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef T4_MSG_H
+#define T4_MSG_H
+
+enum {
+ CPL_ACT_OPEN_REQ = 0x3,
+ CPL_SET_TCB_FIELD = 0x5,
+ CPL_ABORT_REQ = 0xA,
+ CPL_ABORT_RPL = 0xB,
+ CPL_TID_RELEASE = 0x1A,
+ CPL_ACT_OPEN_RPL = 0x25,
+ CPL_ABORT_RPL_RSS = 0x2D,
+ CPL_SET_TCB_RPL = 0x3A,
+ CPL_ACT_OPEN_REQ6 = 0x83,
+ CPL_SGE_EGR_UPDATE = 0xA5,
+ CPL_FW4_MSG = 0xC0,
+ CPL_FW6_MSG = 0xE0,
+ CPL_TX_PKT_LSO = 0xED,
+ CPL_TX_PKT_XT = 0xEE,
+};
+
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_FULL = 3,
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+};
+
+enum { /* TX_PKT_XT checksum types */
+ TX_CSUM_TCPIP = 8,
+ TX_CSUM_UDPIP = 9,
+ TX_CSUM_TCPIP6 = 10,
+};
+
+union opcode_tid {
+ __be32 opcode_tid;
+ __u8 opcode;
+};
+
+#define S_CPL_OPCODE 24
+#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE)
+
+#define G_TID(x) ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_CPL_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(be32_to_cpu(OPCODE_TID(cmd))))
+
+/* partitioning of TID fields that also carry a queue id */
+#define S_TID_TID 0
+#define M_TID_TID 0x3fff
+#define G_TID_TID(x) (((x) >> S_TID_TID) & M_TID_TID)
+
+struct rss_header {
+ __u8 opcode;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ __u8 channel:2;
+ __u8 filter_hit:1;
+ __u8 filter_tid:1;
+ __u8 hash_type:2;
+ __u8 ipv6:1;
+ __u8 send2fw:1;
+#else
+ __u8 send2fw:1;
+ __u8 ipv6:1;
+ __u8 hash_type:2;
+ __u8 filter_tid:1;
+ __u8 filter_hit:1;
+ __u8 channel:2;
+#endif
+ __be16 qid;
+ __be32 hash_val;
+};
+
+#if defined(RSS_HDR_VLD) || defined(CHELSIO_FW)
+#define RSS_HDR struct rss_header rss_hdr
+#else
+#define RSS_HDR
+#endif
+
+#ifndef CHELSIO_FW
+struct work_request_hdr {
+ __be32 wr_hi;
+ __be32 wr_mid;
+ __be64 wr_lo;
+};
+
+#define WR_HDR struct work_request_hdr wr
+#define WR_HDR_SIZE sizeof(struct work_request_hdr)
+#else
+#define WR_HDR
+#define WR_HDR_SIZE 0
+#endif
+
+#define S_COOKIE 5
+#define M_COOKIE 0x7
+#define V_COOKIE(x) ((x) << S_COOKIE)
+#define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)
+
+/* option 0 fields */
+#define S_TX_CHAN 2
+#define V_TX_CHAN(x) ((x) << S_TX_CHAN)
+
+#define S_DELACK 5
+#define V_DELACK(x) ((x) << S_DELACK)
+
+#define S_NON_OFFLOAD 7
+#define V_NON_OFFLOAD(x) ((x) << S_NON_OFFLOAD)
+#define F_NON_OFFLOAD V_NON_OFFLOAD(1U)
+
+#define S_ULP_MODE 8
+#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
+
+#define S_SMAC_SEL 28
+#define V_SMAC_SEL(x) ((__u64)(x) << S_SMAC_SEL)
+
+#define S_TCAM_BYPASS 48
+#define V_TCAM_BYPASS(x) ((__u64)(x) << S_TCAM_BYPASS)
+#define F_TCAM_BYPASS V_TCAM_BYPASS(1ULL)
+
+/* option 2 fields */
+#define S_RSS_QUEUE 0
+#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
+
+#define S_RSS_QUEUE_VALID 10
+#define V_RSS_QUEUE_VALID(x) ((x) << S_RSS_QUEUE_VALID)
+#define F_RSS_QUEUE_VALID V_RSS_QUEUE_VALID(1U)
+
+#define S_CONG_CNTRL 14
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+
+#define S_RX_CHANNEL 26
+#define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL)
+#define F_RX_CHANNEL V_RX_CHANNEL(1U)
+
+#define S_CCTRL_ECN 27
+#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN)
+
+#define S_T5_OPT_2_VALID 31
+#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID)
+#define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U)
+
+struct cpl_t6_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+struct cpl_t6_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+#define S_FILTER_TUPLE 24
+#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
+
+struct cpl_act_open_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 atid_status;
+};
+
+/* cpl_act_open_rpl.atid_status fields */
+#define S_AOPEN_STATUS 0
+#define M_AOPEN_STATUS 0xFF
+#define G_AOPEN_STATUS(x) (((x) >> S_AOPEN_STATUS) & M_AOPEN_STATUS)
+
+#define S_AOPEN_ATID 8
+#define M_AOPEN_ATID 0xFFFFFF
+#define G_AOPEN_ATID(x) (((x) >> S_AOPEN_ATID) & M_AOPEN_ATID)
+
+struct cpl_set_tcb_field {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 word_cookie;
+ __be64 mask;
+ __be64 val;
+};
+
+/* cpl_set_tcb_field.word_cookie fields */
+#define S_WORD 0
+#define V_WORD(x) ((x) << S_WORD)
+
+/* cpl_get_tcb.reply_ctrl fields */
+#define S_QUEUENO 0
+#define V_QUEUENO(x) ((x) << S_QUEUENO)
+
+#define S_REPLY_CHAN 14
+#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
+
+#define S_NO_REPLY 15
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+
+struct cpl_set_tcb_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __u8 cookie;
+ __u8 status;
+ __be64 oldval;
+};
+
+/* cpl_abort_req status command code
+ */
+struct cpl_abort_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_abort_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_tid_release {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_tx_data {
+ union opcode_tid ot;
+ __be32 len;
+ __be32 rsvd;
+ __be32 flags;
+};
+
+struct cpl_tx_pkt_core {
+ __be32 ctrl0;
+ __be16 pack;
+ __be16 len;
+ __be64 ctrl1;
+};
+
+struct cpl_tx_pkt {
+ WR_HDR;
+ struct cpl_tx_pkt_core c;
+};
+
+/* cpl_tx_pkt_core.ctrl0 fields */
+#define S_TXPKT_PF 8
+#define M_TXPKT_PF 0x7
+#define V_TXPKT_PF(x) ((x) << S_TXPKT_PF)
+#define G_TXPKT_PF(x) (((x) >> S_TXPKT_PF) & M_TXPKT_PF)
+
+#define S_TXPKT_INTF 16
+#define M_TXPKT_INTF 0xF
+#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
+#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
+
+#define S_TXPKT_OPCODE 24
+#define M_TXPKT_OPCODE 0xFF
+#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
+#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
+
+/* cpl_tx_pkt_core.ctrl1 fields */
+#define S_TXPKT_IPHDR_LEN 20
+#define M_TXPKT_IPHDR_LEN 0x3FFF
+#define V_TXPKT_IPHDR_LEN(x) ((__u64)(x) << S_TXPKT_IPHDR_LEN)
+#define G_TXPKT_IPHDR_LEN(x) (((x) >> S_TXPKT_IPHDR_LEN) & M_TXPKT_IPHDR_LEN)
+
+#define S_TXPKT_ETHHDR_LEN 34
+#define M_TXPKT_ETHHDR_LEN 0x3F
+#define V_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_TXPKT_ETHHDR_LEN)
+#define G_TXPKT_ETHHDR_LEN(x) (((x) >> S_TXPKT_ETHHDR_LEN) & M_TXPKT_ETHHDR_LEN)
+
+#define S_T6_TXPKT_ETHHDR_LEN 32
+#define M_T6_TXPKT_ETHHDR_LEN 0xFF
+#define V_T6_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_T6_TXPKT_ETHHDR_LEN)
+#define G_T6_TXPKT_ETHHDR_LEN(x) \
+ (((x) >> S_T6_TXPKT_ETHHDR_LEN) & M_T6_TXPKT_ETHHDR_LEN)
+
+#define S_TXPKT_CSUM_TYPE 40
+#define M_TXPKT_CSUM_TYPE 0xF
+#define V_TXPKT_CSUM_TYPE(x) ((__u64)(x) << S_TXPKT_CSUM_TYPE)
+#define G_TXPKT_CSUM_TYPE(x) (((x) >> S_TXPKT_CSUM_TYPE) & M_TXPKT_CSUM_TYPE)
+
+#define S_TXPKT_VLAN 44
+#define M_TXPKT_VLAN 0xFFFF
+#define V_TXPKT_VLAN(x) ((__u64)(x) << S_TXPKT_VLAN)
+#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
+
+#define S_TXPKT_VLAN_VLD 60
+#define V_TXPKT_VLAN_VLD(x) ((__u64)(x) << S_TXPKT_VLAN_VLD)
+#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1ULL)
+
+#define S_TXPKT_IPCSUM_DIS 62
+#define V_TXPKT_IPCSUM_DIS(x) ((__u64)(x) << S_TXPKT_IPCSUM_DIS)
+#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1ULL)
+
+#define S_TXPKT_L4CSUM_DIS 63
+#define V_TXPKT_L4CSUM_DIS(x) ((__u64)(x) << S_TXPKT_L4CSUM_DIS)
+#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1ULL)
+
+struct cpl_tx_pkt_lso_core {
+ __be32 lso_ctrl;
+ __be16 ipid_ofst;
+ __be16 mss;
+ __be32 seqno_offset;
+ __be32 len;
+ /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+struct cpl_tx_pkt_lso {
+ WR_HDR;
+ struct cpl_tx_pkt_lso_core c;
+ /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+/* cpl_tx_pkt_lso_core.lso_ctrl fields */
+#define S_LSO_TCPHDR_LEN 0
+#define M_LSO_TCPHDR_LEN 0xF
+#define V_LSO_TCPHDR_LEN(x) ((x) << S_LSO_TCPHDR_LEN)
+#define G_LSO_TCPHDR_LEN(x) (((x) >> S_LSO_TCPHDR_LEN) & M_LSO_TCPHDR_LEN)
+
+#define S_LSO_IPHDR_LEN 4
+#define M_LSO_IPHDR_LEN 0xFFF
+#define V_LSO_IPHDR_LEN(x) ((x) << S_LSO_IPHDR_LEN)
+#define G_LSO_IPHDR_LEN(x) (((x) >> S_LSO_IPHDR_LEN) & M_LSO_IPHDR_LEN)
+
+#define S_LSO_ETHHDR_LEN 16
+#define M_LSO_ETHHDR_LEN 0xF
+#define V_LSO_ETHHDR_LEN(x) ((x) << S_LSO_ETHHDR_LEN)
+#define G_LSO_ETHHDR_LEN(x) (((x) >> S_LSO_ETHHDR_LEN) & M_LSO_ETHHDR_LEN)
+
+#define S_LSO_IPV6 20
+#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
+#define F_LSO_IPV6 V_LSO_IPV6(1U)
+
+#define S_LSO_LAST_SLICE 22
+#define V_LSO_LAST_SLICE(x) ((x) << S_LSO_LAST_SLICE)
+#define F_LSO_LAST_SLICE V_LSO_LAST_SLICE(1U)
+
+#define S_LSO_FIRST_SLICE 23
+#define V_LSO_FIRST_SLICE(x) ((x) << S_LSO_FIRST_SLICE)
+#define F_LSO_FIRST_SLICE V_LSO_FIRST_SLICE(1U)
+
+#define S_LSO_OPCODE 24
+#define M_LSO_OPCODE 0xFF
+#define V_LSO_OPCODE(x) ((x) << S_LSO_OPCODE)
+#define G_LSO_OPCODE(x) (((x) >> S_LSO_OPCODE) & M_LSO_OPCODE)
+
+#define S_LSO_T5_XFER_SIZE 0
+#define M_LSO_T5_XFER_SIZE 0xFFFFFFF
+#define V_LSO_T5_XFER_SIZE(x) ((x) << S_LSO_T5_XFER_SIZE)
+#define G_LSO_T5_XFER_SIZE(x) (((x) >> S_LSO_T5_XFER_SIZE) & M_LSO_T5_XFER_SIZE)
+
+struct cpl_rx_pkt {
+ RSS_HDR;
+ __u8 opcode;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ __u8 iff:4;
+ __u8 csum_calc:1;
+ __u8 ipmi_pkt:1;
+ __u8 vlan_ex:1;
+ __u8 ip_frag:1;
+#else
+ __u8 ip_frag:1;
+ __u8 vlan_ex:1;
+ __u8 ipmi_pkt:1;
+ __u8 csum_calc:1;
+ __u8 iff:4;
+#endif
+ __be16 csum;
+ __be16 vlan;
+ __be16 len;
+ __be32 l2info;
+ __be16 hdr_len;
+ __be16 err_vec;
+};
+
+/* rx_pkt.l2info fields */
+#define S_RXF_UDP 22
+#define V_RXF_UDP(x) ((x) << S_RXF_UDP)
+#define F_RXF_UDP V_RXF_UDP(1U)
+
+#define S_RXF_TCP 23
+#define V_RXF_TCP(x) ((x) << S_RXF_TCP)
+#define F_RXF_TCP V_RXF_TCP(1U)
+
+#define S_RXF_IP 24
+#define V_RXF_IP(x) ((x) << S_RXF_IP)
+#define F_RXF_IP V_RXF_IP(1U)
+
+#define S_RXF_IP6 25
+#define V_RXF_IP6(x) ((x) << S_RXF_IP6)
+#define F_RXF_IP6 V_RXF_IP6(1U)
+
+/* rx_pkt.err_vec fields */
+/* In T6, rx_pkt.err_vec indicates
+ * RxError Error vector (16b) or
+ * Encapsulating header length (8b),
+ * Outer encapsulation type (2b) and
+ * compressed error vector (6b) if CRxPktEnc is
+ * enabled in TP_OUT_CONFIG
+ */
+#define S_T6_COMPR_RXERR_VEC 0
+#define M_T6_COMPR_RXERR_VEC 0x3F
+#define V_T6_COMPR_RXERR_VEC(x) ((x) << S_T6_COMPR_RXERR_VEC)
+#define G_T6_COMPR_RXERR_VEC(x) \
+ (((x) >> S_T6_COMPR_RXERR_VEC) & M_T6_COMPR_RXERR_VEC)
+
+/* cpl_fw*.type values */
+enum {
+ FW_TYPE_RSSCPL = 4,
+};
+
+struct cpl_fw4_msg {
+ RSS_HDR;
+ u8 opcode;
+ u8 type;
+ __be16 rsvd0;
+ __be32 rsvd1;
+ __be64 data[2];
+};
+
+struct cpl_fw6_msg {
+ RSS_HDR;
+ u8 opcode;
+ u8 type;
+ __be16 rsvd0;
+ __be32 rsvd1;
+ __be64 data[4];
+};
+
+/* ULP_TX opcodes */
+enum {
+ ULP_TX_PKT = 4
+};
+
+enum {
+ ULP_TX_SC_NOOP = 0x80,
+ ULP_TX_SC_IMM = 0x81,
+ ULP_TX_SC_DSGL = 0x82,
+ ULP_TX_SC_ISGL = 0x83
+};
+
+#define S_ULPTX_CMD 24
+#define M_ULPTX_CMD 0xFF
+#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD)
+
+#define S_ULP_TX_SC_MORE 23
+#define V_ULP_TX_SC_MORE(x) ((x) << S_ULP_TX_SC_MORE)
+#define F_ULP_TX_SC_MORE V_ULP_TX_SC_MORE(1U)
+
+struct ulptx_sge_pair {
+ __be32 len[2];
+ __be64 addr[2];
+};
+
+struct ulptx_sgl {
+ __be32 cmd_nsge;
+ __be32 len0;
+ __be64 addr0;
+
+#if !(defined C99_NOT_SUPPORTED)
+ struct ulptx_sge_pair sge[0];
+#endif
+
+};
+
+struct ulptx_idata {
+ __be32 cmd_more;
+ __be32 len;
+};
+
+#define S_ULPTX_NSGE 0
+#define M_ULPTX_NSGE 0xFFFF
+#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+
+struct ulp_txpkt {
+ __be32 cmd_dest;
+ __be32 len;
+};
+
+/* ulp_txpkt.cmd_dest fields */
+#define S_ULP_TXPKT_DEST 16
+#define M_ULP_TXPKT_DEST 0x3
+#define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST)
+
+#define S_ULP_TXPKT_FID 4
+#define M_ULP_TXPKT_FID 0x7ff
+#define V_ULP_TXPKT_FID(x) ((x) << S_ULP_TXPKT_FID)
+
+#define S_ULP_TXPKT_RO 3
+#define V_ULP_TXPKT_RO(x) ((x) << S_ULP_TXPKT_RO)
+#define F_ULP_TXPKT_RO V_ULP_TXPKT_RO(1U)
+
+#endif /* T4_MSG_H */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h
new file mode 100644
index 00000000..5f5cbe04
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4_PCI_ID_TBL_H__
+#define __T4_PCI_ID_TBL_H__
+
+/*
+ * The Os-Dependent code can defined cpp macros for creating a PCI Device ID
+ * Table. This is useful because it allows the PCI ID Table to be maintained
+ * in a single place and all supporting OSes to get new PCI Device IDs
+ * automatically.
+ *
+ * The macros are:
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ * -- Used to start the definition of the PCI ID Table.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION
+ * -- The PCI Function Number to use in the PCI Device ID Table. "0"
+ * -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4,
+ * -- "8" for drivers attaching to SR-IOV Virtual Functions, etc.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION2 [optional]
+ * -- If defined, create a PCI Device ID Table with both
+ * -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated.
+ *
+ * CH_PCI_ID_TABLE_ENTRY(DeviceID)
+ * -- Used for the individual PCI Device ID entries. Note that we will
+ * -- be adding a trailing comma (",") after all of the entries (and
+ * -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined).
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+ * -- Used to finish the definition of the PCI ID Table. Note that we
+ * -- will be adding a trailing semi-colon (";") here.
+ *
+ * CH_PCI_DEVICE_ID_BYPASS_SUPPORTED [optional]
+ * -- If defined, indicates that the OS Driver has support for Bypass
+ * -- Adapters.
+ */
+#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+
+/*
+ * Some sanity checks ...
+ */
+#ifndef CH_PCI_DEVICE_ID_FUNCTION
+#error CH_PCI_DEVICE_ID_FUNCTION not defined!
+#endif
+#ifndef CH_PCI_ID_TABLE_ENTRY
+#error CH_PCI_ID_TABLE_ENTRY not defined!
+#endif
+#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined!
+#endif
+
+/*
+ * T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where:
+ *
+ * V = "4" for T4; "5" for T5, etc.
+ * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ * PP = adapter product designation
+ *
+ * We use this consistency in order to create the proper PCI Device IDs
+ * for the specified CH_PCI_DEVICE_ID_FUNCTION.
+ */
+#ifndef CH_PCI_DEVICE_ID_FUNCTION2
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION) << 8))
+#else
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION2) << 8))
+#endif
+
+CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ /*
+ * T5 adapters:
+ */
+ CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */
+ CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */
+ CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */
+ CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */
+ CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */
+#ifdef CH_PCI_DEVICE_ID_BYPASS_SUPPORTED
+ CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */
+ CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */
+#endif
+ CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
+ CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
+ CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+
+ /* T6 adapter */
+ CH_PCI_ID_TABLE_FENTRY(0x6001), /* T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6002), /* T6225-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6003), /* T6425-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6005), /* T6225-OCP */
+ CH_PCI_ID_TABLE_FENTRY(0x6007), /* T62100-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6008), /* T62100-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x600d), /* T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6011), /* T6225-LL-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6014), /* T61100-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x6080), /* Custom T6225-CR SFP28 */
+ CH_PCI_ID_TABLE_FENTRY(0x6081), /* Custom T62100-CR */
+CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
+
+#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
+
+#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h
new file mode 100644
index 00000000..6f872edc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h
@@ -0,0 +1,954 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#define MYPF_BASE 0x1b000
+#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
+
+#define PF0_BASE 0x1e000
+#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
+
+#define PF_STRIDE 0x400
+#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
+#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+
+#define MYPORT_BASE 0x1c000
+#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
+
+#define PORT0_BASE 0x20000
+#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
+
+#define PORT_STRIDE 0x2000
+#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
+#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
+
+#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_PCIE_MEM_ACCESS_INSTANCES 8
+
+#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_PCIE_FW_INSTANCES 8
+
+#define T5_MYPORT_BASE 0x2c000
+#define T5_MYPORT_REG(reg_addr) (T5_MYPORT_BASE + (reg_addr))
+
+#define T5_PORT0_BASE 0x30000
+#define T5_PORT0_REG(reg_addr) (T5_PORT0_BASE + (reg_addr))
+
+#define T5_PORT_STRIDE 0x4000
+#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
+#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
+
+#define MPS_T5_CLS_SRAM_L(idx) (A_MPS_T5_CLS_SRAM_L + (idx) * 8)
+#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
+
+#define MPS_T5_CLS_SRAM_H(idx) (A_MPS_T5_CLS_SRAM_H + (idx) * 8)
+#define NUM_MPS_T5_CLS_SRAM_H_INSTANCES 512
+
+/* registers for module SGE */
+#define SGE_BASE_ADDR 0x1000
+
+#define A_SGE_PF_KDOORBELL 0x0
+#define A_SGE_VF_KDOORBELL 0x0
+
+#define S_QID 15
+#define M_QID 0x1ffffU
+#define V_QID(x) ((x) << S_QID)
+#define G_QID(x) (((x) >> S_QID) & M_QID)
+
+#define S_DBPRIO 14
+#define V_DBPRIO(x) ((x) << S_DBPRIO)
+#define F_DBPRIO V_DBPRIO(1U)
+
+#define S_PIDX 0
+#define M_PIDX 0x3fffU
+#define V_PIDX(x) ((x) << S_PIDX)
+#define G_PIDX(x) (((x) >> S_PIDX) & M_PIDX)
+
+#define S_DBTYPE 13
+#define V_DBTYPE(x) ((x) << S_DBTYPE)
+#define F_DBTYPE V_DBTYPE(1U)
+
+#define S_PIDX_T5 0
+#define M_PIDX_T5 0x1fffU
+#define V_PIDX_T5(x) ((x) << S_PIDX_T5)
+#define G_PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5)
+
+#define A_SGE_PF_GTS 0x4
+
+#define T4VF_SGE_BASE_ADDR 0x0000
+#define A_SGE_VF_GTS 0x4
+
+#define S_INGRESSQID 16
+#define M_INGRESSQID 0xffffU
+#define V_INGRESSQID(x) ((x) << S_INGRESSQID)
+#define G_INGRESSQID(x) (((x) >> S_INGRESSQID) & M_INGRESSQID)
+
+#define S_SEINTARM 12
+#define V_SEINTARM(x) ((x) << S_SEINTARM)
+#define F_SEINTARM V_SEINTARM(1U)
+
+#define S_CIDXINC 0
+#define M_CIDXINC 0xfffU
+#define V_CIDXINC(x) ((x) << S_CIDXINC)
+#define G_CIDXINC(x) (((x) >> S_CIDXINC) & M_CIDXINC)
+
+#define A_SGE_CONTROL 0x1008
+
+#define S_RXPKTCPLMODE 18
+#define V_RXPKTCPLMODE(x) ((x) << S_RXPKTCPLMODE)
+#define F_RXPKTCPLMODE V_RXPKTCPLMODE(1U)
+
+#define S_EGRSTATUSPAGESIZE 17
+#define V_EGRSTATUSPAGESIZE(x) ((x) << S_EGRSTATUSPAGESIZE)
+#define F_EGRSTATUSPAGESIZE V_EGRSTATUSPAGESIZE(1U)
+
+#define S_PKTSHIFT 10
+#define M_PKTSHIFT 0x7U
+#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
+#define G_PKTSHIFT(x) (((x) >> S_PKTSHIFT) & M_PKTSHIFT)
+
+#define S_INGPADBOUNDARY 4
+#define M_INGPADBOUNDARY 0x7U
+#define V_INGPADBOUNDARY(x) ((x) << S_INGPADBOUNDARY)
+#define G_INGPADBOUNDARY(x) (((x) >> S_INGPADBOUNDARY) & M_INGPADBOUNDARY)
+
+#define A_SGE_HOST_PAGE_SIZE 0x100c
+
+#define S_HOSTPAGESIZEPF7 28
+#define M_HOSTPAGESIZEPF7 0xfU
+#define V_HOSTPAGESIZEPF7(x) ((x) << S_HOSTPAGESIZEPF7)
+#define G_HOSTPAGESIZEPF7(x) (((x) >> S_HOSTPAGESIZEPF7) & M_HOSTPAGESIZEPF7)
+
+#define S_HOSTPAGESIZEPF6 24
+#define M_HOSTPAGESIZEPF6 0xfU
+#define V_HOSTPAGESIZEPF6(x) ((x) << S_HOSTPAGESIZEPF6)
+#define G_HOSTPAGESIZEPF6(x) (((x) >> S_HOSTPAGESIZEPF6) & M_HOSTPAGESIZEPF6)
+
+#define S_HOSTPAGESIZEPF5 20
+#define M_HOSTPAGESIZEPF5 0xfU
+#define V_HOSTPAGESIZEPF5(x) ((x) << S_HOSTPAGESIZEPF5)
+#define G_HOSTPAGESIZEPF5(x) (((x) >> S_HOSTPAGESIZEPF5) & M_HOSTPAGESIZEPF5)
+
+#define S_HOSTPAGESIZEPF4 16
+#define M_HOSTPAGESIZEPF4 0xfU
+#define V_HOSTPAGESIZEPF4(x) ((x) << S_HOSTPAGESIZEPF4)
+#define G_HOSTPAGESIZEPF4(x) (((x) >> S_HOSTPAGESIZEPF4) & M_HOSTPAGESIZEPF4)
+
+#define S_HOSTPAGESIZEPF3 12
+#define M_HOSTPAGESIZEPF3 0xfU
+#define V_HOSTPAGESIZEPF3(x) ((x) << S_HOSTPAGESIZEPF3)
+#define G_HOSTPAGESIZEPF3(x) (((x) >> S_HOSTPAGESIZEPF3) & M_HOSTPAGESIZEPF3)
+
+#define S_HOSTPAGESIZEPF2 8
+#define M_HOSTPAGESIZEPF2 0xfU
+#define V_HOSTPAGESIZEPF2(x) ((x) << S_HOSTPAGESIZEPF2)
+#define G_HOSTPAGESIZEPF2(x) (((x) >> S_HOSTPAGESIZEPF2) & M_HOSTPAGESIZEPF2)
+
+#define S_HOSTPAGESIZEPF1 4
+#define M_HOSTPAGESIZEPF1 0xfU
+#define V_HOSTPAGESIZEPF1(x) ((x) << S_HOSTPAGESIZEPF1)
+#define G_HOSTPAGESIZEPF1(x) (((x) >> S_HOSTPAGESIZEPF1) & M_HOSTPAGESIZEPF1)
+
+#define S_HOSTPAGESIZEPF0 0
+#define M_HOSTPAGESIZEPF0 0xfU
+#define V_HOSTPAGESIZEPF0(x) ((x) << S_HOSTPAGESIZEPF0)
+#define G_HOSTPAGESIZEPF0(x) (((x) >> S_HOSTPAGESIZEPF0) & M_HOSTPAGESIZEPF0)
+
+#define A_SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
+
+#define S_QUEUESPERPAGEPF1 4
+#define M_QUEUESPERPAGEPF1 0xfU
+#define V_QUEUESPERPAGEPF1(x) ((x) << S_QUEUESPERPAGEPF1)
+#define G_QUEUESPERPAGEPF1(x) (((x) >> S_QUEUESPERPAGEPF1) & M_QUEUESPERPAGEPF1)
+
+#define S_QUEUESPERPAGEPF0 0
+#define M_QUEUESPERPAGEPF0 0xfU
+#define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0)
+#define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0)
+
+#define A_SGE_EGRESS_QUEUES_PER_PAGE_VF 0x1014
+
+#define S_ERR_CPL_EXCEED_IQE_SIZE 22
+#define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE)
+#define F_ERR_CPL_EXCEED_IQE_SIZE V_ERR_CPL_EXCEED_IQE_SIZE(1U)
+
+#define S_ERR_INVALID_CIDX_INC 21
+#define V_ERR_INVALID_CIDX_INC(x) ((x) << S_ERR_INVALID_CIDX_INC)
+#define F_ERR_INVALID_CIDX_INC V_ERR_INVALID_CIDX_INC(1U)
+
+#define S_ERR_CPL_OPCODE_0 19
+#define V_ERR_CPL_OPCODE_0(x) ((x) << S_ERR_CPL_OPCODE_0)
+#define F_ERR_CPL_OPCODE_0 V_ERR_CPL_OPCODE_0(1U)
+
+#define S_ERR_DROPPED_DB 18
+#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB)
+#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U)
+
+#define S_ERR_DATA_CPL_ON_HIGH_QID1 17
+#define V_ERR_DATA_CPL_ON_HIGH_QID1(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID1)
+#define F_ERR_DATA_CPL_ON_HIGH_QID1 V_ERR_DATA_CPL_ON_HIGH_QID1(1U)
+
+#define S_ERR_DATA_CPL_ON_HIGH_QID0 16
+#define V_ERR_DATA_CPL_ON_HIGH_QID0(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID0)
+#define F_ERR_DATA_CPL_ON_HIGH_QID0 V_ERR_DATA_CPL_ON_HIGH_QID0(1U)
+
+#define S_ERR_BAD_DB_PIDX3 15
+#define V_ERR_BAD_DB_PIDX3(x) ((x) << S_ERR_BAD_DB_PIDX3)
+#define F_ERR_BAD_DB_PIDX3 V_ERR_BAD_DB_PIDX3(1U)
+
+#define S_ERR_BAD_DB_PIDX2 14
+#define V_ERR_BAD_DB_PIDX2(x) ((x) << S_ERR_BAD_DB_PIDX2)
+#define F_ERR_BAD_DB_PIDX2 V_ERR_BAD_DB_PIDX2(1U)
+
+#define S_ERR_BAD_DB_PIDX1 13
+#define V_ERR_BAD_DB_PIDX1(x) ((x) << S_ERR_BAD_DB_PIDX1)
+#define F_ERR_BAD_DB_PIDX1 V_ERR_BAD_DB_PIDX1(1U)
+
+#define S_ERR_BAD_DB_PIDX0 12
+#define V_ERR_BAD_DB_PIDX0(x) ((x) << S_ERR_BAD_DB_PIDX0)
+#define F_ERR_BAD_DB_PIDX0 V_ERR_BAD_DB_PIDX0(1U)
+
+#define S_ERR_ING_PCIE_CHAN 11
+#define V_ERR_ING_PCIE_CHAN(x) ((x) << S_ERR_ING_PCIE_CHAN)
+#define F_ERR_ING_PCIE_CHAN V_ERR_ING_PCIE_CHAN(1U)
+
+#define S_ERR_ING_CTXT_PRIO 10
+#define V_ERR_ING_CTXT_PRIO(x) ((x) << S_ERR_ING_CTXT_PRIO)
+#define F_ERR_ING_CTXT_PRIO V_ERR_ING_CTXT_PRIO(1U)
+
+#define S_ERR_EGR_CTXT_PRIO 9
+#define V_ERR_EGR_CTXT_PRIO(x) ((x) << S_ERR_EGR_CTXT_PRIO)
+#define F_ERR_EGR_CTXT_PRIO V_ERR_EGR_CTXT_PRIO(1U)
+
+#define S_DBFIFO_HP_INT 8
+#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT)
+#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U)
+
+#define S_DBFIFO_LP_INT 7
+#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT)
+#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U)
+
+#define S_INGRESS_SIZE_ERR 5
+#define V_INGRESS_SIZE_ERR(x) ((x) << S_INGRESS_SIZE_ERR)
+#define F_INGRESS_SIZE_ERR V_INGRESS_SIZE_ERR(1U)
+
+#define S_EGRESS_SIZE_ERR 4
+#define V_EGRESS_SIZE_ERR(x) ((x) << S_EGRESS_SIZE_ERR)
+#define F_EGRESS_SIZE_ERR V_EGRESS_SIZE_ERR(1U)
+
+#define A_SGE_INT_ENABLE3 0x1040
+
+#define A_SGE_FL_BUFFER_SIZE0 0x1044
+#define A_SGE_FL_BUFFER_SIZE1 0x1048
+#define A_SGE_FL_BUFFER_SIZE2 0x104c
+#define A_SGE_FL_BUFFER_SIZE3 0x1050
+
+#define A_SGE_FLM_CFG 0x1090
+
+#define S_CREDITCNT 4
+#define M_CREDITCNT 0x3U
+#define V_CREDITCNT(x) ((x) << S_CREDITCNT)
+#define G_CREDITCNT(x) (((x) >> S_CREDITCNT) & M_CREDITCNT)
+
+#define S_CREDITCNTPACKING 2
+#define M_CREDITCNTPACKING 0x3U
+#define V_CREDITCNTPACKING(x) ((x) << S_CREDITCNTPACKING)
+#define G_CREDITCNTPACKING(x) (((x) >> S_CREDITCNTPACKING) & M_CREDITCNTPACKING)
+
+#define A_SGE_CONM_CTRL 0x1094
+
+#define S_T6_EGRTHRESHOLDPACKING 16
+#define M_T6_EGRTHRESHOLDPACKING 0xffU
+#define G_T6_EGRTHRESHOLDPACKING(x) (((x) >> S_T6_EGRTHRESHOLDPACKING) & \
+ M_T6_EGRTHRESHOLDPACKING)
+
+#define S_EGRTHRESHOLD 8
+#define M_EGRTHRESHOLD 0x3fU
+#define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD)
+#define G_EGRTHRESHOLD(x) (((x) >> S_EGRTHRESHOLD) & M_EGRTHRESHOLD)
+
+#define S_EGRTHRESHOLDPACKING 14
+#define M_EGRTHRESHOLDPACKING 0x3fU
+#define V_EGRTHRESHOLDPACKING(x) ((x) << S_EGRTHRESHOLDPACKING)
+#define G_EGRTHRESHOLDPACKING(x) (((x) >> S_EGRTHRESHOLDPACKING) & \
+ M_EGRTHRESHOLDPACKING)
+
+#define S_INGTHRESHOLD 2
+#define M_INGTHRESHOLD 0x3fU
+#define V_INGTHRESHOLD(x) ((x) << S_INGTHRESHOLD)
+#define G_INGTHRESHOLD(x) (((x) >> S_INGTHRESHOLD) & M_INGTHRESHOLD)
+
+#define A_SGE_INGRESS_RX_THRESHOLD 0x10a0
+
+#define S_THRESHOLD_0 24
+#define M_THRESHOLD_0 0x3fU
+#define V_THRESHOLD_0(x) ((x) << S_THRESHOLD_0)
+#define G_THRESHOLD_0(x) (((x) >> S_THRESHOLD_0) & M_THRESHOLD_0)
+
+#define S_THRESHOLD_1 16
+#define M_THRESHOLD_1 0x3fU
+#define V_THRESHOLD_1(x) ((x) << S_THRESHOLD_1)
+#define G_THRESHOLD_1(x) (((x) >> S_THRESHOLD_1) & M_THRESHOLD_1)
+
+#define S_THRESHOLD_2 8
+#define M_THRESHOLD_2 0x3fU
+#define V_THRESHOLD_2(x) ((x) << S_THRESHOLD_2)
+#define G_THRESHOLD_2(x) (((x) >> S_THRESHOLD_2) & M_THRESHOLD_2)
+
+#define S_THRESHOLD_3 0
+#define M_THRESHOLD_3 0x3fU
+#define V_THRESHOLD_3(x) ((x) << S_THRESHOLD_3)
+#define G_THRESHOLD_3(x) (((x) >> S_THRESHOLD_3) & M_THRESHOLD_3)
+
+#define A_SGE_TIMER_VALUE_0_AND_1 0x10b8
+
+#define S_TIMERVALUE0 16
+#define M_TIMERVALUE0 0xffffU
+#define V_TIMERVALUE0(x) ((x) << S_TIMERVALUE0)
+#define G_TIMERVALUE0(x) (((x) >> S_TIMERVALUE0) & M_TIMERVALUE0)
+
+#define S_TIMERVALUE1 0
+#define M_TIMERVALUE1 0xffffU
+#define V_TIMERVALUE1(x) ((x) << S_TIMERVALUE1)
+#define G_TIMERVALUE1(x) (((x) >> S_TIMERVALUE1) & M_TIMERVALUE1)
+
+#define A_SGE_TIMER_VALUE_2_AND_3 0x10bc
+
+#define S_TIMERVALUE2 16
+#define M_TIMERVALUE2 0xffffU
+#define V_TIMERVALUE2(x) ((x) << S_TIMERVALUE2)
+#define G_TIMERVALUE2(x) (((x) >> S_TIMERVALUE2) & M_TIMERVALUE2)
+
+#define S_TIMERVALUE3 0
+#define M_TIMERVALUE3 0xffffU
+#define V_TIMERVALUE3(x) ((x) << S_TIMERVALUE3)
+#define G_TIMERVALUE3(x) (((x) >> S_TIMERVALUE3) & M_TIMERVALUE3)
+
+#define A_SGE_TIMER_VALUE_4_AND_5 0x10c0
+
+#define S_TIMERVALUE4 16
+#define M_TIMERVALUE4 0xffffU
+#define V_TIMERVALUE4(x) ((x) << S_TIMERVALUE4)
+#define G_TIMERVALUE4(x) (((x) >> S_TIMERVALUE4) & M_TIMERVALUE4)
+
+#define S_TIMERVALUE5 0
+#define M_TIMERVALUE5 0xffffU
+#define V_TIMERVALUE5(x) ((x) << S_TIMERVALUE5)
+#define G_TIMERVALUE5(x) (((x) >> S_TIMERVALUE5) & M_TIMERVALUE5)
+
+#define A_SGE_DEBUG_INDEX 0x10cc
+#define A_SGE_DEBUG_DATA_HIGH 0x10d0
+#define A_SGE_DEBUG_DATA_LOW 0x10d4
+#define A_SGE_STAT_CFG 0x10ec
+
+#define S_STATMODE 2
+#define M_STATMODE 0x3U
+#define V_STATMODE(x) ((x) << S_STATMODE)
+#define G_STATMODE(x) (((x) >> S_STATMODE) & M_STATMODE)
+
+#define S_STATSOURCE_T5 9
+#define M_STATSOURCE_T5 0xfU
+#define V_STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
+#define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5)
+
+#define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+#define A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8
+
+#define A_SGE_CONTROL2 0x1124
+
+#define S_IDMAARBROUNDROBIN 19
+#define V_IDMAARBROUNDROBIN(x) ((x) << S_IDMAARBROUNDROBIN)
+#define F_IDMAARBROUNDROBIN V_IDMAARBROUNDROBIN(1U)
+
+#define S_INGPACKBOUNDARY 16
+#define M_INGPACKBOUNDARY 0x7U
+#define V_INGPACKBOUNDARY(x) ((x) << S_INGPACKBOUNDARY)
+#define G_INGPACKBOUNDARY(x) (((x) >> S_INGPACKBOUNDARY) & M_INGPACKBOUNDARY)
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
+#define A_SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8
+#define A_SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
+
+/* registers for module PCIE */
+#define PCIE_BASE_ADDR 0x3000
+
+#define A_PCIE_MEM_ACCESS_BASE_WIN 0x3068
+
+#define S_PCIEOFST 10
+#define M_PCIEOFST 0x3fffffU
+#define V_PCIEOFST(x) ((x) << S_PCIEOFST)
+#define G_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
+
+#define S_BIR 8
+#define M_BIR 0x3U
+#define V_BIR(x) ((x) << S_BIR)
+#define G_BIR(x) (((x) >> S_BIR) & M_BIR)
+
+#define S_WINDOW 0
+#define M_WINDOW 0xffU
+#define V_WINDOW(x) ((x) << S_WINDOW)
+#define G_WINDOW(x) (((x) >> S_WINDOW) & M_WINDOW)
+
+#define A_PCIE_MEM_ACCESS_OFFSET 0x306c
+
+#define S_PFNUM 0
+#define M_PFNUM 0x7U
+#define V_PFNUM(x) ((x) << S_PFNUM)
+#define G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM)
+
+#define A_PCIE_FW 0x30b8
+#define A_PCIE_FW_PF 0x30bc
+
+#define A_PCIE_CFG2 0x3018
+
+#define S_TOTMAXTAG 0
+#define M_TOTMAXTAG 0x3U
+#define V_TOTMAXTAG(x) ((x) << S_TOTMAXTAG)
+
+#define S_T6_TOTMAXTAG 0
+#define M_T6_TOTMAXTAG 0x7U
+#define V_T6_TOTMAXTAG(x) ((x) << S_T6_TOTMAXTAG)
+
+#define A_PCIE_CMD_CFG 0x5980
+
+#define S_MINTAG 0
+#define M_MINTAG 0xffU
+#define V_MINTAG(x) ((x) << S_MINTAG)
+
+#define S_T6_MINTAG 0
+#define M_T6_MINTAG 0xffU
+#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
+
+/* registers for module CIM */
+#define CIM_BASE_ADDR 0x7b00
+
+#define A_CIM_VF_EXT_MAILBOX_CTRL 0x0
+
+#define A_CIM_PF_MAILBOX_DATA 0x240
+#define A_CIM_PF_MAILBOX_CTRL 0x280
+
+#define S_MBMSGVALID 3
+#define V_MBMSGVALID(x) ((x) << S_MBMSGVALID)
+#define F_MBMSGVALID V_MBMSGVALID(1U)
+
+#define S_MBOWNER 0
+#define M_MBOWNER 0x3U
+#define V_MBOWNER(x) ((x) << S_MBOWNER)
+#define G_MBOWNER(x) (((x) >> S_MBOWNER) & M_MBOWNER)
+
+#define A_CIM_PF_MAILBOX_CTRL_SHADOW_COPY 0x290
+#define A_CIM_BOOT_CFG 0x7b00
+
+#define S_UPCRST 0
+#define V_UPCRST(x) ((x) << S_UPCRST)
+#define F_UPCRST V_UPCRST(1U)
+
+#define NUM_CIM_PF_MAILBOX_DATA_INSTANCES 16
+
+/* registers for module TP */
+#define A_TP_OUT_CONFIG 0x7d04
+
+#define S_CRXPKTENC 3
+#define V_CRXPKTENC(x) ((x) << S_CRXPKTENC)
+#define F_CRXPKTENC V_CRXPKTENC(1U)
+
+#define TP_BASE_ADDR 0x7d00
+#define A_TP_CMM_TCB_BASE 0x7d10
+
+#define A_TP_TIMER_RESOLUTION 0x7d90
+
+#define S_TIMERRESOLUTION 16
+#define M_TIMERRESOLUTION 0xffU
+#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
+#define G_TIMERRESOLUTION(x) (((x) >> S_TIMERRESOLUTION) & M_TIMERRESOLUTION)
+
+#define S_DELAYEDACKRESOLUTION 0
+#define M_DELAYEDACKRESOLUTION 0xffU
+#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
+#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & \
+ M_DELAYEDACKRESOLUTION)
+
+#define A_TP_CCTRL_TABLE 0x7ddc
+
+#define A_TP_MTU_TABLE 0x7de4
+
+#define S_MTUINDEX 24
+#define M_MTUINDEX 0xffU
+#define V_MTUINDEX(x) ((x) << S_MTUINDEX)
+#define G_MTUINDEX(x) (((x) >> S_MTUINDEX) & M_MTUINDEX)
+
+#define S_MTUWIDTH 16
+#define M_MTUWIDTH 0xfU
+#define V_MTUWIDTH(x) ((x) << S_MTUWIDTH)
+#define G_MTUWIDTH(x) (((x) >> S_MTUWIDTH) & M_MTUWIDTH)
+
+#define S_MTUVALUE 0
+#define M_MTUVALUE 0x3fffU
+#define V_MTUVALUE(x) ((x) << S_MTUVALUE)
+#define G_MTUVALUE(x) (((x) >> S_MTUVALUE) & M_MTUVALUE)
+
+#define A_TP_RSS_CONFIG_VRT 0x7e00
+
+#define S_KEYMODE 6
+#define M_KEYMODE 0x3U
+#define G_KEYMODE(x) (((x) >> S_KEYMODE) & M_KEYMODE)
+
+#define S_KEYWRADDR 0
+#define V_KEYWRADDR(x) ((x) << S_KEYWRADDR)
+
+#define S_KEYWREN 4
+#define V_KEYWREN(x) ((x) << S_KEYWREN)
+#define F_KEYWREN V_KEYWREN(1U)
+
+#define S_KEYWRADDRX 30
+#define V_KEYWRADDRX(x) ((x) << S_KEYWRADDRX)
+
+#define S_KEYEXTEND 26
+#define V_KEYEXTEND(x) ((x) << S_KEYEXTEND)
+#define F_KEYEXTEND V_KEYEXTEND(1U)
+
+#define S_T6_VFWRADDR 8
+#define V_T6_VFWRADDR(x) ((x) << S_T6_VFWRADDR)
+
+#define A_TP_PIO_ADDR 0x7e40
+#define A_TP_PIO_DATA 0x7e44
+
+#define A_TP_RSS_SECRET_KEY0 0x40
+
+#define A_TP_VLAN_PRI_MAP 0x140
+
+#define S_FRAGMENTATION 9
+#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
+#define F_FRAGMENTATION V_FRAGMENTATION(1U)
+
+#define S_MPSHITTYPE 8
+#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
+#define F_MPSHITTYPE V_MPSHITTYPE(1U)
+
+#define S_MACMATCH 7
+#define V_MACMATCH(x) ((x) << S_MACMATCH)
+#define F_MACMATCH V_MACMATCH(1U)
+
+#define S_ETHERTYPE 6
+#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
+#define F_ETHERTYPE V_ETHERTYPE(1U)
+
+#define S_PROTOCOL 5
+#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
+#define F_PROTOCOL V_PROTOCOL(1U)
+
+#define S_TOS 4
+#define V_TOS(x) ((x) << S_TOS)
+#define F_TOS V_TOS(1U)
+
+#define S_VLAN 3
+#define V_VLAN(x) ((x) << S_VLAN)
+#define F_VLAN V_VLAN(1U)
+
+#define S_VNIC_ID 2
+#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
+#define F_VNIC_ID V_VNIC_ID(1U)
+
+#define S_PORT 1
+#define V_PORT(x) ((x) << S_PORT)
+#define F_PORT V_PORT(1U)
+
+#define S_FCOE 0
+#define V_FCOE(x) ((x) << S_FCOE)
+#define F_FCOE V_FCOE(1U)
+
+#define A_TP_INGRESS_CONFIG 0x141
+
+#define S_VNIC 11
+#define V_VNIC(x) ((x) << S_VNIC)
+#define F_VNIC V_VNIC(1U)
+
+#define S_CSUM_HAS_PSEUDO_HDR 10
+#define V_CSUM_HAS_PSEUDO_HDR(x) ((x) << S_CSUM_HAS_PSEUDO_HDR)
+#define F_CSUM_HAS_PSEUDO_HDR V_CSUM_HAS_PSEUDO_HDR(1U)
+
+#define S_RM_OVLAN 9
+#define V_RM_OVLAN(x) ((x) << S_RM_OVLAN)
+
+/* registers for module MA */
+#define A_MA_EDRAM0_BAR 0x77c0
+
+#define S_EDRAM0_SIZE 0
+#define M_EDRAM0_SIZE 0xfffU
+#define V_EDRAM0_SIZE(x) ((x) << S_EDRAM0_SIZE)
+#define G_EDRAM0_SIZE(x) (((x) >> S_EDRAM0_SIZE) & M_EDRAM0_SIZE)
+
+#define A_MA_EXT_MEMORY0_BAR 0x77c8
+
+#define S_EXT_MEM0_SIZE 0
+#define M_EXT_MEM0_SIZE 0xfffU
+#define V_EXT_MEM0_SIZE(x) ((x) << S_EXT_MEM0_SIZE)
+#define G_EXT_MEM0_SIZE(x) (((x) >> S_EXT_MEM0_SIZE) & M_EXT_MEM0_SIZE)
+
+/* registers for module MPS */
+#define MPS_BASE_ADDR 0x9000
+#define T4VF_MPS_BASE_ADDR 0x0100
+
+#define S_REPLICATE 11
+#define V_REPLICATE(x) ((x) << S_REPLICATE)
+#define F_REPLICATE V_REPLICATE(1U)
+
+#define S_PF 8
+#define M_PF 0x7U
+#define V_PF(x) ((x) << S_PF)
+#define G_PF(x) (((x) >> S_PF) & M_PF)
+
+#define S_VF_VALID 7
+#define V_VF_VALID(x) ((x) << S_VF_VALID)
+#define F_VF_VALID V_VF_VALID(1U)
+
+#define S_VF 0
+#define M_VF 0x7fU
+#define V_VF(x) ((x) << S_VF)
+#define G_VF(x) (((x) >> S_VF) & M_VF)
+
+#define A_MPS_STAT_CTL 0x9600
+
+#define S_COUNTPAUSEMCRX 5
+#define V_COUNTPAUSEMCRX(x) ((x) << S_COUNTPAUSEMCRX)
+#define F_COUNTPAUSEMCRX V_COUNTPAUSEMCRX(1U)
+
+#define S_COUNTPAUSESTATRX 4
+#define V_COUNTPAUSESTATRX(x) ((x) << S_COUNTPAUSESTATRX)
+#define F_COUNTPAUSESTATRX V_COUNTPAUSESTATRX(1U)
+
+#define S_COUNTPAUSEMCTX 3
+#define V_COUNTPAUSEMCTX(x) ((x) << S_COUNTPAUSEMCTX)
+#define F_COUNTPAUSEMCTX V_COUNTPAUSEMCTX(1U)
+
+#define S_COUNTPAUSESTATTX 2
+#define V_COUNTPAUSESTATTX(x) ((x) << S_COUNTPAUSESTATTX)
+#define F_COUNTPAUSESTATTX V_COUNTPAUSESTATTX(1U)
+
+#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
+#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
+#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
+#define A_MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c
+#define A_MPS_PORT_STAT_TX_PORT_BCAST_L 0x410
+#define A_MPS_PORT_STAT_TX_PORT_BCAST_H 0x414
+#define A_MPS_PORT_STAT_TX_PORT_MCAST_L 0x418
+#define A_MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c
+#define A_MPS_PORT_STAT_TX_PORT_UCAST_L 0x420
+#define A_MPS_PORT_STAT_TX_PORT_UCAST_H 0x424
+#define A_MPS_PORT_STAT_TX_PORT_ERROR_L 0x428
+#define A_MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c
+#define A_MPS_PORT_STAT_TX_PORT_64B_L 0x430
+#define A_MPS_PORT_STAT_TX_PORT_64B_H 0x434
+#define A_MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438
+#define A_MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c
+#define A_MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440
+#define A_MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444
+#define A_MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448
+#define A_MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c
+#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450
+#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454
+#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458
+#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c
+#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460
+#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464
+#define A_MPS_PORT_STAT_TX_PORT_DROP_L 0x468
+#define A_MPS_PORT_STAT_TX_PORT_DROP_H 0x46c
+#define A_MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470
+#define A_MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474
+#define A_MPS_PORT_STAT_TX_PORT_PPP0_L 0x478
+#define A_MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c
+#define A_MPS_PORT_STAT_TX_PORT_PPP1_L 0x480
+#define A_MPS_PORT_STAT_TX_PORT_PPP1_H 0x484
+#define A_MPS_PORT_STAT_TX_PORT_PPP2_L 0x488
+#define A_MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c
+#define A_MPS_PORT_STAT_TX_PORT_PPP3_L 0x490
+#define A_MPS_PORT_STAT_TX_PORT_PPP3_H 0x494
+#define A_MPS_PORT_STAT_TX_PORT_PPP4_L 0x498
+#define A_MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c
+#define A_MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0
+#define A_MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4
+#define A_MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8
+#define A_MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac
+#define A_MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0
+#define A_MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4
+#define A_MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0
+#define A_MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4
+#define A_MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8
+#define A_MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc
+#define A_MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0
+#define A_MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4
+#define A_MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8
+#define A_MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc
+#define A_MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0
+#define A_MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4
+#define A_MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8
+#define A_MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec
+#define A_MPS_PORT_STAT_LB_PORT_64B_L 0x4f0
+#define A_MPS_PORT_STAT_LB_PORT_64B_H 0x4f4
+#define A_MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8
+#define A_MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc
+#define A_MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500
+#define A_MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504
+#define A_MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508
+#define A_MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c
+#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510
+#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514
+#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518
+#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c
+#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
+#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
+#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528
+#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_H 0x52c
+#define A_MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
+#define A_MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
+#define A_MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
+#define A_MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c
+#define A_MPS_PORT_STAT_RX_PORT_BCAST_L 0x550
+#define A_MPS_PORT_STAT_RX_PORT_BCAST_H 0x554
+#define A_MPS_PORT_STAT_RX_PORT_MCAST_L 0x558
+#define A_MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c
+#define A_MPS_PORT_STAT_RX_PORT_UCAST_L 0x560
+#define A_MPS_PORT_STAT_RX_PORT_UCAST_H 0x564
+#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568
+#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c
+#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570
+#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574
+#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578
+#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c
+#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580
+#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584
+#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588
+#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c
+#define A_MPS_PORT_STAT_RX_PORT_64B_L 0x590
+#define A_MPS_PORT_STAT_RX_PORT_64B_H 0x594
+#define A_MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598
+#define A_MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c
+#define A_MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0
+#define A_MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4
+#define A_MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8
+#define A_MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac
+#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0
+#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4
+#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8
+#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc
+#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0
+#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4
+#define A_MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8
+#define A_MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc
+#define A_MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0
+#define A_MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4
+#define A_MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8
+#define A_MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc
+#define A_MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0
+#define A_MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4
+#define A_MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8
+#define A_MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec
+#define A_MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0
+#define A_MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4
+#define A_MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8
+#define A_MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc
+#define A_MPS_PORT_STAT_RX_PORT_PPP6_L 0x600
+#define A_MPS_PORT_STAT_RX_PORT_PPP6_H 0x604
+#define A_MPS_PORT_STAT_RX_PORT_PPP7_L 0x608
+#define A_MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
+#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
+#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
+#define A_MPS_CMN_CTL 0x9000
+
+#define S_NUMPORTS 0
+#define M_NUMPORTS 0x3U
+#define V_NUMPORTS(x) ((x) << S_NUMPORTS)
+#define G_NUMPORTS(x) (((x) >> S_NUMPORTS) & M_NUMPORTS)
+
+#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
+#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
+#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
+#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c
+#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650
+#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654
+#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658
+#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c
+#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660
+#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664
+#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668
+#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c
+#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670
+#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674
+#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678
+#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c
+#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680
+#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684
+#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688
+#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c
+#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690
+#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694
+#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698
+#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c
+#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0
+#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4
+#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8
+#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac
+#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0
+#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
+#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
+#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+
+#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80
+#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88
+#define A_MPS_VF_STAT_TX_VF_MCAST_BYTES_L 0x90
+#define A_MPS_VF_STAT_TX_VF_MCAST_FRAMES_L 0x98
+#define A_MPS_VF_STAT_TX_VF_UCAST_BYTES_L 0xa0
+#define A_MPS_VF_STAT_TX_VF_UCAST_FRAMES_L 0xa8
+#define A_MPS_VF_STAT_TX_VF_DROP_FRAMES_L 0xb0
+#define A_MPS_VF_STAT_RX_VF_BCAST_FRAMES_L 0xd0
+#define A_MPS_VF_STAT_RX_VF_MCAST_FRAMES_L 0xe0
+#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0
+#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8
+
+#define A_MPS_PORT0_RX_IVLAN 0x3011c
+
+#define S_IVLAN_ETYPE 0
+#define M_IVLAN_ETYPE 0xffffU
+#define V_IVLAN_ETYPE(x) ((x) << S_IVLAN_ETYPE)
+
+#define MPS_PORT_RX_IVLAN_STRIDE 0x4000
+#define MPS_PORT_RX_IVLAN(idx) \
+ (A_MPS_PORT0_RX_IVLAN + (idx) * MPS_PORT_RX_IVLAN_STRIDE)
+
+#define A_MPS_PORT0_RX_OVLAN0 0x30120
+
+#define S_OVLAN_MASK 16
+#define M_OVLAN_MASK 0xffffU
+#define V_OVLAN_MASK(x) ((x) << S_OVLAN_MASK)
+
+#define S_OVLAN_ETYPE 0
+#define M_OVLAN_ETYPE 0xffffU
+#define V_OVLAN_ETYPE(x) ((x) << S_OVLAN_ETYPE)
+
+#define MPS_PORT_RX_OVLAN_STRIDE 0x4000
+#define MPS_PORT_RX_OVLAN_BASE(idx) \
+(A_MPS_PORT0_RX_OVLAN0 + (idx) * MPS_PORT_RX_OVLAN_STRIDE)
+#define MPS_PORT_RX_OVLAN_REG(idx, reg) (MPS_PORT_RX_OVLAN_BASE(idx) + (reg))
+
+#define A_RX_OVLAN0 0x0
+#define A_RX_OVLAN1 0x4
+#define A_RX_OVLAN2 0x8
+
+#define A_MPS_PORT0_RX_CTL 0x30100
+
+#define S_OVLAN_EN0 0
+#define V_OVLAN_EN0(x) ((x) << S_OVLAN_EN0)
+#define F_OVLAN_EN0 V_OVLAN_EN0(1)
+
+#define S_OVLAN_EN1 1
+#define V_OVLAN_EN1(x) ((x) << S_OVLAN_EN1)
+#define F_OVLAN_EN1 V_OVLAN_EN1(1)
+
+#define S_OVLAN_EN2 2
+#define V_OVLAN_EN2(x) ((x) << S_OVLAN_EN2)
+#define F_OVLAN_EN2 V_OVLAN_EN2(1)
+
+#define S_IVLAN_EN 4
+#define V_IVLAN_EN(x) ((x) << S_IVLAN_EN)
+#define F_IVLAN_EN V_IVLAN_EN(1)
+
+#define MPS_PORT_RX_CTL_STRIDE 0x4000
+#define MPS_PORT_RX_CTL(idx) \
+ (A_MPS_PORT0_RX_CTL + (idx) * MPS_PORT_RX_CTL_STRIDE)
+
+/* registers for module ULP_RX */
+#define ULP_RX_BASE_ADDR 0x19150
+
+#define S_HPZ0 0
+#define M_HPZ0 0xfU
+#define V_HPZ0(x) ((x) << S_HPZ0)
+#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
+
+#define A_ULP_RX_TDDP_PSZ 0x19178
+
+/* registers for module SF */
+#define SF_BASE_ADDR 0x193f8
+
+#define A_SF_DATA 0x193f8
+#define A_SF_OP 0x193fc
+
+#define S_SF_LOCK 4
+#define V_SF_LOCK(x) ((x) << S_SF_LOCK)
+#define F_SF_LOCK V_SF_LOCK(1U)
+
+#define S_CONT 3
+#define V_CONT(x) ((x) << S_CONT)
+#define F_CONT V_CONT(1U)
+
+#define S_BYTECNT 1
+#define M_BYTECNT 0x3U
+#define V_BYTECNT(x) ((x) << S_BYTECNT)
+#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT)
+
+#define S_OP 0
+#define V_OP(x) ((x) << S_OP)
+#define F_OP V_OP(1U)
+
+/* registers for module PL */
+#define PL_BASE_ADDR 0x19400
+
+#define S_SOURCEPF 8
+#define M_SOURCEPF 0x7U
+#define V_SOURCEPF(x) ((x) << S_SOURCEPF)
+#define G_SOURCEPF(x) (((x) >> S_SOURCEPF) & M_SOURCEPF)
+
+#define S_T6_SOURCEPF 9
+#define M_T6_SOURCEPF 0x7U
+#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF)
+#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF)
+
+#define A_PL_PF_INT_ENABLE 0x3c4
+
+#define S_PFSW 3
+#define V_PFSW(x) ((x) << S_PFSW)
+#define F_PFSW V_PFSW(1U)
+
+#define S_PFCIM 1
+#define V_PFCIM(x) ((x) << S_PFCIM)
+#define F_PFCIM V_PFCIM(1U)
+
+#define A_PL_WHOAMI 0x19400
+#define A_PL_VF_WHOAMI 0x0
+
+#define A_PL_RST 0x19428
+
+#define A_PL_INT_MAP0 0x19414
+
+#define S_PIORST 1
+#define V_PIORST(x) ((x) << S_PIORST)
+#define F_PIORST V_PIORST(1U)
+
+#define S_PIORSTMODE 0
+#define V_PIORSTMODE(x) ((x) << S_PIORSTMODE)
+#define F_PIORSTMODE V_PIORSTMODE(1U)
+
+#define A_PL_REV 0x1943c
+#define A_PL_VF_REV 0x4
+
+#define S_REV 0
+#define M_REV 0xfU
+#define V_REV(x) ((x) << S_REV)
+#define G_REV(x) (((x) >> S_REV) & M_REV)
+
+/* registers for module LE */
+#define A_LE_DB_CONFIG 0x19c04
+
+#define S_HASHEN 20
+#define V_HASHEN(x) ((x) << S_HASHEN)
+#define F_HASHEN V_HASHEN(1U)
+
+#define A_LE_DB_TID_HASHBASE 0x19df8
+
+#define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac
+#define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs_values.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs_values.h
new file mode 100644
index 00000000..a9414d20
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs_values.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4_REGS_VALUES_H__
+#define __T4_REGS_VALUES_H__
+
+/*
+ * This file contains definitions for various T4 register value hardware
+ * constants. The types of values encoded here are predominantly those for
+ * register fields which control "modal" behavior. For the most part, we do
+ * not include definitions for register fields which are simple numeric
+ * metrics, etc.
+ */
+
+/*
+ * SGE definitions.
+ * ================
+ */
+
+/*
+ * SGE register field values.
+ */
+
+/* CONTROL register */
+#define X_RXPKTCPLMODE_SPLIT 1
+#define X_INGPCIEBOUNDARY_32B 0
+#define X_INGPADBOUNDARY_SHIFT 5
+#define X_INGPADBOUNDARY_32B 0
+
+#define X_T6_INGPADBOUNDARY_SHIFT 3
+#define X_T6_INGPADBOUNDARY_8B 0
+
+/* CONTROL2 register */
+#define X_INGPACKBOUNDARY_SHIFT 5
+#define X_INGPACKBOUNDARY_16B 0
+#define X_INGPACKBOUNDARY_64B 1
+
+/* GTS register */
+#define X_TIMERREG_RESTART_COUNTER 6
+#define X_TIMERREG_UPDATE_CIDX 7
+
+/*
+ * Egress Context field values
+ */
+#define X_FETCHBURSTMIN_64B 2
+#define X_FETCHBURSTMIN_128B 3
+#define X_FETCHBURSTMAX_256B 2
+#define X_FETCHBURSTMAX_512B 3
+
+#define X_HOSTFCMODE_NONE 0
+
+/*
+ * Ingress Context field values
+ */
+#define X_UPDATEDELIVERY_STATUS_PAGE 2
+
+#define X_RSPD_TYPE_FLBUF 0
+#define X_RSPD_TYPE_CPL 1
+
+/*
+ * Context field definitions. This is by no means a complete list of SGE
+ * Context fields. In the vast majority of cases the firmware initializes
+ * things the way they need to be set up. But in a few small cases, we need
+ * to compute new values and ship them off to the firmware to be applied to
+ * the SGE Conexts ...
+ */
+
+/*
+ * Congestion Manager Definitions.
+ */
+#define S_CONMCTXT_CNGTPMODE 19
+#define M_CONMCTXT_CNGTPMODE 0x3
+#define V_CONMCTXT_CNGTPMODE(x) ((x) << S_CONMCTXT_CNGTPMODE)
+#define G_CONMCTXT_CNGTPMODE(x) \
+ (((x) >> S_CONMCTXT_CNGTPMODE) & M_CONMCTXT_CNGTPMODE)
+#define S_CONMCTXT_CNGCHMAP 0
+#define M_CONMCTXT_CNGCHMAP 0xffff
+#define V_CONMCTXT_CNGCHMAP(x) ((x) << S_CONMCTXT_CNGCHMAP)
+#define G_CONMCTXT_CNGCHMAP(x) \
+ (((x) >> S_CONMCTXT_CNGCHMAP) & M_CONMCTXT_CNGCHMAP)
+
+#define X_CONMCTXT_CNGTPMODE_QUEUE 1
+#define X_CONMCTXT_CNGTPMODE_CHANNEL 2
+
+/*
+ * T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
+ * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
+ * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
+ * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues,
+ * we have a Going To Sleep register at offsets 8x+4.
+ *
+ * As noted above, we have many instances of the Simple Doorbell and Going To
+ * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a
+ * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
+ * avoid buffering of the writes to the Simple Doorbell and we want to use a
+ * non-contiguous offset for the Going To Sleep writes in order to avoid
+ * possible combining between them.
+ */
+#define SGE_UDB_SIZE 128
+#define SGE_UDB_KDOORBELL 8
+#define SGE_UDB_GTS 20
+
+/*
+ * CIM definitions.
+ * ================
+ */
+
+/*
+ * CIM register field values.
+ */
+#define X_MBOWNER_NONE 0
+#define X_MBOWNER_FW 1
+#define X_MBOWNER_PL 2
+
+/*
+ * PCI-E definitions.
+ * ==================
+ */
+#define X_WINDOW_SHIFT 10
+#define X_PCIEOFST_SHIFT 10
+
+/*
+ * TP definitions.
+ * ===============
+ */
+
+/*
+ * TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present. These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define W_FT_FCOE 1
+#define W_FT_PORT 3
+#define W_FT_VNIC_ID 17
+#define W_FT_VLAN 17
+#define W_FT_TOS 8
+#define W_FT_PROTOCOL 8
+#define W_FT_ETHERTYPE 16
+#define W_FT_MACMATCH 9
+#define W_FT_MPSHITTYPE 3
+#define W_FT_FRAGMENTATION 1
+
+#endif /* __T4_REGS_VALUES_H__ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h
new file mode 100644
index 00000000..25435f9f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _T4_TCB_DEFS_H
+#define _T4_TCB_DEFS_H
+
+/* 105:96 */
+#define W_TCB_RSS_INFO 3
+#define S_TCB_RSS_INFO 0
+#define M_TCB_RSS_INFO 0x3ffULL
+#define V_TCB_RSS_INFO(x) ((x) << S_TCB_RSS_INFO)
+
+/* 191:160 */
+#define W_TCB_TIMESTAMP 5
+#define S_TCB_TIMESTAMP 0
+#define M_TCB_TIMESTAMP 0xffffffffULL
+#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
+
+/* 223:192 */
+#define S_TCB_T_RTT_TS_RECENT_AGE 0
+#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
+#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
+
+#endif /* _T4_TCB_DEFS_H */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4fw_interface.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4fw_interface.h
new file mode 100644
index 00000000..e80b58a3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4fw_interface.h
@@ -0,0 +1,2350 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _T4FW_INTERFACE_H_
+#define _T4FW_INTERFACE_H_
+
+/******************************************************************************
+ * R E T U R N V A L U E S
+ ********************************/
+
+enum fw_retval {
+ FW_SUCCESS = 0, /* completed successfully */
+ FW_EPERM = 1, /* operation not permitted */
+ FW_ENOENT = 2, /* no such file or directory */
+ FW_EIO = 5, /* input/output error; hw bad */
+ FW_ENOEXEC = 8, /* exec format error; inv microcode */
+ FW_EAGAIN = 11, /* try again */
+ FW_ENOMEM = 12, /* out of memory */
+ FW_EFAULT = 14, /* bad address; fw bad */
+ FW_EBUSY = 16, /* resource busy */
+ FW_EEXIST = 17, /* file exists */
+ FW_ENODEV = 19, /* no such device */
+ FW_EINVAL = 22, /* invalid argument */
+ FW_ENOSPC = 28, /* no space left on device */
+ FW_ENOSYS = 38, /* functionality not implemented */
+ FW_ENODATA = 61, /* no data available */
+ FW_EPROTO = 71, /* protocol error */
+ FW_EADDRINUSE = 98, /* address already in use */
+ FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */
+ FW_ENETDOWN = 100, /* network is down */
+ FW_ENETUNREACH = 101, /* network is unreachable */
+ FW_ENOBUFS = 105, /* no buffer space available */
+ FW_ETIMEDOUT = 110, /* timeout */
+ FW_EINPROGRESS = 115, /* fw internal */
+};
+
+/******************************************************************************
+ * M E M O R Y T Y P E s
+ ******************************/
+
+enum fw_memtype {
+ FW_MEMTYPE_EDC0 = 0x0,
+ FW_MEMTYPE_EDC1 = 0x1,
+ FW_MEMTYPE_EXTMEM = 0x2,
+ FW_MEMTYPE_FLASH = 0x4,
+ FW_MEMTYPE_INTERNAL = 0x5,
+ FW_MEMTYPE_EXTMEM1 = 0x6,
+};
+
+/******************************************************************************
+ * W O R K R E Q U E S T s
+ ********************************/
+
+enum fw_wr_opcodes {
+ FW_FILTER_WR = 0x02,
+ FW_ULPTX_WR = 0x04,
+ FW_TP_WR = 0x05,
+ FW_ETH_TX_PKT_WR = 0x08,
+ FW_ETH_TX_PKTS_WR = 0x09,
+ FW_ETH_TX_PKT_VM_WR = 0x11,
+ FW_ETH_TX_PKTS_VM_WR = 0x12,
+ FW_ETH_TX_PKTS2_WR = 0x78,
+};
+
+/*
+ * Generic work request header flit0
+ */
+struct fw_wr_hdr {
+ __be32 hi;
+ __be32 lo;
+};
+
+/* work request opcode (hi)
+ */
+#define S_FW_WR_OP 24
+#define M_FW_WR_OP 0xff
+#define V_FW_WR_OP(x) ((x) << S_FW_WR_OP)
+#define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP)
+
+/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER
+ */
+#define S_FW_WR_ATOMIC 23
+#define V_FW_WR_ATOMIC(x) ((x) << S_FW_WR_ATOMIC)
+
+/* work request immediate data length (hi)
+ */
+#define S_FW_WR_IMMDLEN 0
+#define M_FW_WR_IMMDLEN 0xff
+#define V_FW_WR_IMMDLEN(x) ((x) << S_FW_WR_IMMDLEN)
+#define G_FW_WR_IMMDLEN(x) \
+ (((x) >> S_FW_WR_IMMDLEN) & M_FW_WR_IMMDLEN)
+
+/* egress queue status update to egress queue status entry (lo)
+ */
+#define S_FW_WR_EQUEQ 30
+#define M_FW_WR_EQUEQ 0x1
+#define V_FW_WR_EQUEQ(x) ((x) << S_FW_WR_EQUEQ)
+#define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ)
+#define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U)
+
+/* flow context identifier (lo)
+ */
+#define S_FW_WR_FLOWID 8
+#define V_FW_WR_FLOWID(x) ((x) << S_FW_WR_FLOWID)
+
+/* length in units of 16-bytes (lo)
+ */
+#define S_FW_WR_LEN16 0
+#define M_FW_WR_LEN16 0xff
+#define V_FW_WR_LEN16(x) ((x) << S_FW_WR_LEN16)
+#define G_FW_WR_LEN16(x) (((x) >> S_FW_WR_LEN16) & M_FW_WR_LEN16)
+
+struct fw_eth_tx_pkt_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be64 r3;
+};
+
+#define S_FW_ETH_TX_PKT_WR_IMMDLEN 0
+#define M_FW_ETH_TX_PKT_WR_IMMDLEN 0x1ff
+#define V_FW_ETH_TX_PKT_WR_IMMDLEN(x) ((x) << S_FW_ETH_TX_PKT_WR_IMMDLEN)
+#define G_FW_ETH_TX_PKT_WR_IMMDLEN(x) \
+ (((x) >> S_FW_ETH_TX_PKT_WR_IMMDLEN) & M_FW_ETH_TX_PKT_WR_IMMDLEN)
+
+struct fw_eth_tx_pkts_wr {
+ __be32 op_pkd;
+ __be32 equiq_to_len16;
+ __be32 r3;
+ __be16 plen;
+ __u8 npkt;
+ __u8 type;
+};
+
+struct fw_eth_tx_pkt_vm_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be32 r3[2];
+ __u8 ethmacdst[6];
+ __u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+struct fw_eth_tx_pkts_vm_wr {
+ __be32 op_pkd;
+ __be32 equiq_to_len16;
+ __be32 r3;
+ __be16 plen;
+ __u8 npkt;
+ __u8 r4;
+ __u8 ethmacdst[6];
+ __u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
+enum fw_filter_wr_cookie {
+ FW_FILTER_WR_SUCCESS,
+ FW_FILTER_WR_FLT_ADDED,
+ FW_FILTER_WR_FLT_DELETED,
+ FW_FILTER_WR_SMT_TBL_FULL,
+ FW_FILTER_WR_EINVAL,
+};
+
+struct fw_filter_wr {
+ __be32 op_pkd;
+ __be32 len16_pkd;
+ __be64 r3;
+ __be32 tid_to_iq;
+ __be32 del_filter_to_l2tix;
+ __be16 ethtype;
+ __be16 ethtypem;
+ __u8 frag_to_ovlan_vldm;
+ __u8 smac_sel;
+ __be16 rx_chan_rx_rpl_iq;
+ __be32 maci_to_matchtypem;
+ __u8 ptcl;
+ __u8 ptclm;
+ __u8 ttyp;
+ __u8 ttypm;
+ __be16 ivlan;
+ __be16 ivlanm;
+ __be16 ovlan;
+ __be16 ovlanm;
+ __u8 lip[16];
+ __u8 lipm[16];
+ __u8 fip[16];
+ __u8 fipm[16];
+ __be16 lp;
+ __be16 lpm;
+ __be16 fp;
+ __be16 fpm;
+ __be16 r7;
+ __u8 sma[6];
+};
+
+#define S_FW_FILTER_WR_TID 12
+#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
+
+#define S_FW_FILTER_WR_RQTYPE 11
+#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
+
+#define S_FW_FILTER_WR_NOREPLY 10
+#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
+
+#define S_FW_FILTER_WR_IQ 0
+#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
+
+#define S_FW_FILTER_WR_DEL_FILTER 31
+#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
+#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
+
+#define S_FW_FILTER_WR_RPTTID 25
+#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
+
+#define S_FW_FILTER_WR_DROP 24
+#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
+
+#define S_FW_FILTER_WR_DIRSTEER 23
+#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
+
+#define S_FW_FILTER_WR_MASKHASH 22
+#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
+
+#define S_FW_FILTER_WR_DIRSTEERHASH 21
+#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
+
+#define S_FW_FILTER_WR_LPBK 20
+#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
+
+#define S_FW_FILTER_WR_DMAC 19
+#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
+
+#define S_FW_FILTER_WR_INSVLAN 17
+#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
+
+#define S_FW_FILTER_WR_RMVLAN 16
+#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
+
+#define S_FW_FILTER_WR_HITCNTS 15
+#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
+
+#define S_FW_FILTER_WR_TXCHAN 13
+#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
+
+#define S_FW_FILTER_WR_PRIO 12
+#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
+
+#define S_FW_FILTER_WR_L2TIX 0
+#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
+
+#define S_FW_FILTER_WR_FRAG 7
+#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
+
+#define S_FW_FILTER_WR_FRAGM 6
+#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
+
+#define S_FW_FILTER_WR_IVLAN_VLD 5
+#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
+
+#define S_FW_FILTER_WR_OVLAN_VLD 4
+#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
+
+#define S_FW_FILTER_WR_IVLAN_VLDM 3
+#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
+
+#define S_FW_FILTER_WR_OVLAN_VLDM 2
+#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
+
+#define S_FW_FILTER_WR_RX_CHAN 15
+#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
+
+#define S_FW_FILTER_WR_RX_RPL_IQ 0
+#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
+
+#define S_FW_FILTER_WR_MACI 23
+#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
+
+#define S_FW_FILTER_WR_MACIM 14
+#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
+
+#define S_FW_FILTER_WR_FCOE 13
+#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
+
+#define S_FW_FILTER_WR_FCOEM 12
+#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
+
+#define S_FW_FILTER_WR_PORT 9
+#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
+
+#define S_FW_FILTER_WR_PORTM 6
+#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
+
+#define S_FW_FILTER_WR_MATCHTYPE 3
+#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
+
+#define S_FW_FILTER_WR_MATCHTYPEM 0
+#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
+
+/******************************************************************************
+ * C O M M A N D s
+ *********************/
+
+/*
+ * The maximum length of time, in miliseconds, that we expect any firmware
+ * command to take to execute and return a reply to the host. The RESET
+ * and INITIALIZE commands can take a fair amount of time to execute but
+ * most execute in far less time than this maximum. This constant is used
+ * by host software to determine how long to wait for a firmware command
+ * reply before declaring the firmware as dead/unreachable ...
+ */
+#define FW_CMD_MAX_TIMEOUT 10000
+
+/*
+ * If a host driver does a HELLO and discovers that there's already a MASTER
+ * selected, we may have to wait for that MASTER to finish issuing RESET,
+ * configuration and INITIALIZE commands. Also, there's a possibility that
+ * our own HELLO may get lost if it happens right as the MASTER is issuign a
+ * RESET command, so we need to be willing to make a few retries of our HELLO.
+ */
+#define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT)
+#define FW_CMD_HELLO_RETRIES 3
+
+enum fw_cmd_opcodes {
+ FW_LDST_CMD = 0x01,
+ FW_RESET_CMD = 0x03,
+ FW_HELLO_CMD = 0x04,
+ FW_BYE_CMD = 0x05,
+ FW_INITIALIZE_CMD = 0x06,
+ FW_CAPS_CONFIG_CMD = 0x07,
+ FW_PARAMS_CMD = 0x08,
+ FW_PFVF_CMD = 0x09,
+ FW_IQ_CMD = 0x10,
+ FW_EQ_ETH_CMD = 0x12,
+ FW_EQ_CTRL_CMD = 0x13,
+ FW_VI_CMD = 0x14,
+ FW_VI_MAC_CMD = 0x15,
+ FW_VI_RXMODE_CMD = 0x16,
+ FW_VI_ENABLE_CMD = 0x17,
+ FW_VI_STATS_CMD = 0x1a,
+ FW_PORT_CMD = 0x1b,
+ FW_RSS_IND_TBL_CMD = 0x20,
+ FW_RSS_GLB_CONFIG_CMD = 0x22,
+ FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_CLIP_CMD = 0x28,
+ FW_DEBUG_CMD = 0x81,
+};
+
+enum fw_cmd_cap {
+ FW_CMD_CAP_PORT = 0x04,
+};
+
+/*
+ * Generic command header flit0
+ */
+struct fw_cmd_hdr {
+ __be32 hi;
+ __be32 lo;
+};
+
+#define S_FW_CMD_OP 24
+#define M_FW_CMD_OP 0xff
+#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP)
+#define G_FW_CMD_OP(x) (((x) >> S_FW_CMD_OP) & M_FW_CMD_OP)
+
+#define S_FW_CMD_REQUEST 23
+#define M_FW_CMD_REQUEST 0x1
+#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST)
+#define G_FW_CMD_REQUEST(x) (((x) >> S_FW_CMD_REQUEST) & M_FW_CMD_REQUEST)
+#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U)
+
+#define S_FW_CMD_READ 22
+#define M_FW_CMD_READ 0x1
+#define V_FW_CMD_READ(x) ((x) << S_FW_CMD_READ)
+#define G_FW_CMD_READ(x) (((x) >> S_FW_CMD_READ) & M_FW_CMD_READ)
+#define F_FW_CMD_READ V_FW_CMD_READ(1U)
+
+#define S_FW_CMD_WRITE 21
+#define M_FW_CMD_WRITE 0x1
+#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE)
+#define G_FW_CMD_WRITE(x) (((x) >> S_FW_CMD_WRITE) & M_FW_CMD_WRITE)
+#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U)
+
+#define S_FW_CMD_EXEC 20
+#define M_FW_CMD_EXEC 0x1
+#define V_FW_CMD_EXEC(x) ((x) << S_FW_CMD_EXEC)
+#define G_FW_CMD_EXEC(x) (((x) >> S_FW_CMD_EXEC) & M_FW_CMD_EXEC)
+#define F_FW_CMD_EXEC V_FW_CMD_EXEC(1U)
+
+#define S_FW_CMD_RETVAL 8
+#define M_FW_CMD_RETVAL 0xff
+#define V_FW_CMD_RETVAL(x) ((x) << S_FW_CMD_RETVAL)
+#define G_FW_CMD_RETVAL(x) (((x) >> S_FW_CMD_RETVAL) & M_FW_CMD_RETVAL)
+
+#define S_FW_CMD_LEN16 0
+#define M_FW_CMD_LEN16 0xff
+#define V_FW_CMD_LEN16(x) ((x) << S_FW_CMD_LEN16)
+#define G_FW_CMD_LEN16(x) (((x) >> S_FW_CMD_LEN16) & M_FW_CMD_LEN16)
+
+#define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16)
+
+/* address spaces
+ */
+enum fw_ldst_addrspc {
+ FW_LDST_ADDRSPC_TP_PIO = 0x0010,
+};
+
+struct fw_ldst_cmd {
+ __be32 op_to_addrspace;
+ __be32 cycles_to_len16;
+ union fw_ldst {
+ struct fw_ldst_addrval {
+ __be32 addr;
+ __be32 val;
+ } addrval;
+ struct fw_ldst_idctxt {
+ __be32 physid;
+ __be32 msg_ctxtflush;
+ __be32 ctxt_data7;
+ __be32 ctxt_data6;
+ __be32 ctxt_data5;
+ __be32 ctxt_data4;
+ __be32 ctxt_data3;
+ __be32 ctxt_data2;
+ __be32 ctxt_data1;
+ __be32 ctxt_data0;
+ } idctxt;
+ struct fw_ldst_mdio {
+ __be16 paddr_mmd;
+ __be16 raddr;
+ __be16 vctl;
+ __be16 rval;
+ } mdio;
+ struct fw_ldst_mps {
+ __be16 fid_ctl;
+ __be16 rplcpf_pkd;
+ __be32 rplc127_96;
+ __be32 rplc95_64;
+ __be32 rplc63_32;
+ __be32 rplc31_0;
+ __be32 atrb;
+ __be16 vlan[16];
+ } mps;
+ struct fw_ldst_func {
+ __u8 access_ctl;
+ __u8 mod_index;
+ __be16 ctl_id;
+ __be32 offset;
+ __be64 data0;
+ __be64 data1;
+ } func;
+ struct fw_ldst_pcie {
+ __u8 ctrl_to_fn;
+ __u8 bnum;
+ __u8 r;
+ __u8 ext_r;
+ __u8 select_naccess;
+ __u8 pcie_fn;
+ __be16 nset_pkd;
+ __be32 data[12];
+ } pcie;
+ struct fw_ldst_i2c_deprecated {
+ __u8 pid_pkd;
+ __u8 base;
+ __u8 boffset;
+ __u8 data;
+ __be32 r9;
+ } i2c_deprecated;
+ struct fw_ldst_i2c {
+ __u8 pid;
+ __u8 did;
+ __u8 boffset;
+ __u8 blen;
+ __be32 r9;
+ __u8 data[48];
+ } i2c;
+ struct fw_ldst_le {
+ __be32 index;
+ __be32 r9;
+ __u8 val[33];
+ __u8 r11[7];
+ } le;
+ } u;
+};
+
+#define S_FW_LDST_CMD_ADDRSPACE 0
+#define M_FW_LDST_CMD_ADDRSPACE 0xff
+#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
+
+struct fw_reset_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 val;
+ __be32 halt_pkd;
+};
+
+#define S_FW_RESET_CMD_HALT 31
+#define M_FW_RESET_CMD_HALT 0x1
+#define V_FW_RESET_CMD_HALT(x) ((x) << S_FW_RESET_CMD_HALT)
+#define G_FW_RESET_CMD_HALT(x) \
+ (((x) >> S_FW_RESET_CMD_HALT) & M_FW_RESET_CMD_HALT)
+#define F_FW_RESET_CMD_HALT V_FW_RESET_CMD_HALT(1U)
+
+enum {
+ FW_HELLO_CMD_STAGE_OS = 0,
+};
+
+struct fw_hello_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 err_to_clearinit;
+ __be32 fwrev;
+};
+
+#define S_FW_HELLO_CMD_ERR 31
+#define M_FW_HELLO_CMD_ERR 0x1
+#define V_FW_HELLO_CMD_ERR(x) ((x) << S_FW_HELLO_CMD_ERR)
+#define G_FW_HELLO_CMD_ERR(x) \
+ (((x) >> S_FW_HELLO_CMD_ERR) & M_FW_HELLO_CMD_ERR)
+#define F_FW_HELLO_CMD_ERR V_FW_HELLO_CMD_ERR(1U)
+
+#define S_FW_HELLO_CMD_INIT 30
+#define M_FW_HELLO_CMD_INIT 0x1
+#define V_FW_HELLO_CMD_INIT(x) ((x) << S_FW_HELLO_CMD_INIT)
+#define G_FW_HELLO_CMD_INIT(x) \
+ (((x) >> S_FW_HELLO_CMD_INIT) & M_FW_HELLO_CMD_INIT)
+#define F_FW_HELLO_CMD_INIT V_FW_HELLO_CMD_INIT(1U)
+
+#define S_FW_HELLO_CMD_MASTERDIS 29
+#define M_FW_HELLO_CMD_MASTERDIS 0x1
+#define V_FW_HELLO_CMD_MASTERDIS(x) ((x) << S_FW_HELLO_CMD_MASTERDIS)
+#define G_FW_HELLO_CMD_MASTERDIS(x) \
+ (((x) >> S_FW_HELLO_CMD_MASTERDIS) & M_FW_HELLO_CMD_MASTERDIS)
+#define F_FW_HELLO_CMD_MASTERDIS V_FW_HELLO_CMD_MASTERDIS(1U)
+
+#define S_FW_HELLO_CMD_MASTERFORCE 28
+#define M_FW_HELLO_CMD_MASTERFORCE 0x1
+#define V_FW_HELLO_CMD_MASTERFORCE(x) ((x) << S_FW_HELLO_CMD_MASTERFORCE)
+#define G_FW_HELLO_CMD_MASTERFORCE(x) \
+ (((x) >> S_FW_HELLO_CMD_MASTERFORCE) & M_FW_HELLO_CMD_MASTERFORCE)
+#define F_FW_HELLO_CMD_MASTERFORCE V_FW_HELLO_CMD_MASTERFORCE(1U)
+
+#define S_FW_HELLO_CMD_MBMASTER 24
+#define M_FW_HELLO_CMD_MBMASTER 0xf
+#define V_FW_HELLO_CMD_MBMASTER(x) ((x) << S_FW_HELLO_CMD_MBMASTER)
+#define G_FW_HELLO_CMD_MBMASTER(x) \
+ (((x) >> S_FW_HELLO_CMD_MBMASTER) & M_FW_HELLO_CMD_MBMASTER)
+
+#define S_FW_HELLO_CMD_MBASYNCNOT 20
+#define M_FW_HELLO_CMD_MBASYNCNOT 0x7
+#define V_FW_HELLO_CMD_MBASYNCNOT(x) ((x) << S_FW_HELLO_CMD_MBASYNCNOT)
+#define G_FW_HELLO_CMD_MBASYNCNOT(x) \
+ (((x) >> S_FW_HELLO_CMD_MBASYNCNOT) & M_FW_HELLO_CMD_MBASYNCNOT)
+
+#define S_FW_HELLO_CMD_STAGE 17
+#define M_FW_HELLO_CMD_STAGE 0x7
+#define V_FW_HELLO_CMD_STAGE(x) ((x) << S_FW_HELLO_CMD_STAGE)
+#define G_FW_HELLO_CMD_STAGE(x) \
+ (((x) >> S_FW_HELLO_CMD_STAGE) & M_FW_HELLO_CMD_STAGE)
+
+#define S_FW_HELLO_CMD_CLEARINIT 16
+#define M_FW_HELLO_CMD_CLEARINIT 0x1
+#define V_FW_HELLO_CMD_CLEARINIT(x) ((x) << S_FW_HELLO_CMD_CLEARINIT)
+#define G_FW_HELLO_CMD_CLEARINIT(x) \
+ (((x) >> S_FW_HELLO_CMD_CLEARINIT) & M_FW_HELLO_CMD_CLEARINIT)
+#define F_FW_HELLO_CMD_CLEARINIT V_FW_HELLO_CMD_CLEARINIT(1U)
+
+struct fw_bye_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be64 r3;
+};
+
+struct fw_initialize_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be64 r3;
+};
+
+enum fw_caps_config_nic {
+ FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020,
+ FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040,
+};
+
+enum fw_memtype_cf {
+ FW_MEMTYPE_CF_FLASH = FW_MEMTYPE_FLASH,
+};
+
+struct fw_caps_config_cmd {
+ __be32 op_to_write;
+ __be32 cfvalid_to_len16;
+ __be32 r2;
+ __be32 hwmbitmap;
+ __be16 nbmcaps;
+ __be16 linkcaps;
+ __be16 switchcaps;
+ __be16 r3;
+ __be16 niccaps;
+ __be16 toecaps;
+ __be16 rdmacaps;
+ __be16 r4;
+ __be16 iscsicaps;
+ __be16 fcoecaps;
+ __be32 cfcsum;
+ __be32 finiver;
+ __be32 finicsum;
+};
+
+#define S_FW_CAPS_CONFIG_CMD_CFVALID 27
+#define M_FW_CAPS_CONFIG_CMD_CFVALID 0x1
+#define V_FW_CAPS_CONFIG_CMD_CFVALID(x) ((x) << S_FW_CAPS_CONFIG_CMD_CFVALID)
+#define G_FW_CAPS_CONFIG_CMD_CFVALID(x) \
+ (((x) >> S_FW_CAPS_CONFIG_CMD_CFVALID) & M_FW_CAPS_CONFIG_CMD_CFVALID)
+#define F_FW_CAPS_CONFIG_CMD_CFVALID V_FW_CAPS_CONFIG_CMD_CFVALID(1U)
+
+#define S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 24
+#define M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 0x7
+#define V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \
+ ((x) << S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF)
+#define G_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \
+ (((x) >> S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) & \
+ M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF)
+
+#define S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 16
+#define M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 0xff
+#define V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \
+ ((x) << S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF)
+#define G_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \
+ (((x) >> S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) & \
+ M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF)
+
+/*
+ * params command mnemonics
+ */
+enum fw_params_mnem {
+ FW_PARAMS_MNEM_DEV = 1, /* device params */
+ FW_PARAMS_MNEM_PFVF = 2, /* function params */
+ FW_PARAMS_MNEM_REG = 3, /* limited register access */
+ FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
+};
+
+/*
+ * device parameters
+ */
+enum fw_params_param_dev {
+ FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
+ FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
+ FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs
+ * allocated by the device's
+ * Lookup Engine
+ */
+ FW_PARAMS_PARAM_DEV_FWREV = 0x0B, /* fw version */
+ FW_PARAMS_PARAM_DEV_TPREV = 0x0C, /* tp version */
+ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
+};
+
+/*
+ * physical and virtual function parameters
+ */
+enum fw_params_param_pfvf {
+ FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
+ FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
+ FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
+ FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
+ FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31,
+ FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A
+};
+
+/*
+ * dma queue parameters
+ */
+enum fw_params_param_dmaq {
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01,
+ FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
+};
+
+#define S_FW_PARAMS_MNEM 24
+#define M_FW_PARAMS_MNEM 0xff
+#define V_FW_PARAMS_MNEM(x) ((x) << S_FW_PARAMS_MNEM)
+#define G_FW_PARAMS_MNEM(x) \
+ (((x) >> S_FW_PARAMS_MNEM) & M_FW_PARAMS_MNEM)
+
+#define S_FW_PARAMS_PARAM_X 16
+#define M_FW_PARAMS_PARAM_X 0xff
+#define V_FW_PARAMS_PARAM_X(x) ((x) << S_FW_PARAMS_PARAM_X)
+#define G_FW_PARAMS_PARAM_X(x) \
+ (((x) >> S_FW_PARAMS_PARAM_X) & M_FW_PARAMS_PARAM_X)
+
+#define S_FW_PARAMS_PARAM_Y 8
+#define M_FW_PARAMS_PARAM_Y 0xff
+#define V_FW_PARAMS_PARAM_Y(x) ((x) << S_FW_PARAMS_PARAM_Y)
+#define G_FW_PARAMS_PARAM_Y(x) \
+ (((x) >> S_FW_PARAMS_PARAM_Y) & M_FW_PARAMS_PARAM_Y)
+
+#define S_FW_PARAMS_PARAM_Z 0
+#define M_FW_PARAMS_PARAM_Z 0xff
+#define V_FW_PARAMS_PARAM_Z(x) ((x) << S_FW_PARAMS_PARAM_Z)
+#define G_FW_PARAMS_PARAM_Z(x) \
+ (((x) >> S_FW_PARAMS_PARAM_Z) & M_FW_PARAMS_PARAM_Z)
+
+#define S_FW_PARAMS_PARAM_YZ 0
+#define M_FW_PARAMS_PARAM_YZ 0xffff
+#define V_FW_PARAMS_PARAM_YZ(x) ((x) << S_FW_PARAMS_PARAM_YZ)
+#define G_FW_PARAMS_PARAM_YZ(x) \
+ (((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ)
+
+#define S_FW_PARAMS_PARAM_XYZ 0
+#define M_FW_PARAMS_PARAM_XYZ 0xffffff
+#define V_FW_PARAMS_PARAM_XYZ(x) ((x) << S_FW_PARAMS_PARAM_XYZ)
+
+struct fw_params_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ struct fw_params_param {
+ __be32 mnem;
+ __be32 val;
+ } param[7];
+};
+
+#define S_FW_PARAMS_CMD_PFN 8
+#define M_FW_PARAMS_CMD_PFN 0x7
+#define V_FW_PARAMS_CMD_PFN(x) ((x) << S_FW_PARAMS_CMD_PFN)
+#define G_FW_PARAMS_CMD_PFN(x) \
+ (((x) >> S_FW_PARAMS_CMD_PFN) & M_FW_PARAMS_CMD_PFN)
+
+#define S_FW_PARAMS_CMD_VFN 0
+#define M_FW_PARAMS_CMD_VFN 0xff
+#define V_FW_PARAMS_CMD_VFN(x) ((x) << S_FW_PARAMS_CMD_VFN)
+#define G_FW_PARAMS_CMD_VFN(x) \
+ (((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN)
+
+struct fw_pfvf_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ __be32 niqflint_niq;
+ __be32 type_to_neq;
+ __be32 tc_to_nexactf;
+ __be32 r_caps_to_nethctrl;
+ __be16 nricq;
+ __be16 nriqp;
+ __be32 r4;
+};
+
+#define S_FW_PFVF_CMD_PFN 8
+#define V_FW_PFVF_CMD_PFN(x) ((x) << S_FW_PFVF_CMD_PFN)
+
+#define S_FW_PFVF_CMD_VFN 0
+#define V_FW_PFVF_CMD_VFN(x) ((x) << S_FW_PFVF_CMD_VFN)
+
+#define S_FW_PFVF_CMD_NIQFLINT 20
+#define M_FW_PFVF_CMD_NIQFLINT 0xfff
+#define G_FW_PFVF_CMD_NIQFLINT(x) \
+ (((x) >> S_FW_PFVF_CMD_NIQFLINT) & M_FW_PFVF_CMD_NIQFLINT)
+
+#define S_FW_PFVF_CMD_NIQ 0
+#define M_FW_PFVF_CMD_NIQ 0xfffff
+#define G_FW_PFVF_CMD_NIQ(x) \
+ (((x) >> S_FW_PFVF_CMD_NIQ) & M_FW_PFVF_CMD_NIQ)
+
+#define S_FW_PFVF_CMD_PMASK 20
+#define M_FW_PFVF_CMD_PMASK 0xf
+#define G_FW_PFVF_CMD_PMASK(x) \
+ (((x) >> S_FW_PFVF_CMD_PMASK) & M_FW_PFVF_CMD_PMASK)
+
+#define S_FW_PFVF_CMD_NEQ 0
+#define M_FW_PFVF_CMD_NEQ 0xfffff
+#define G_FW_PFVF_CMD_NEQ(x) \
+ (((x) >> S_FW_PFVF_CMD_NEQ) & M_FW_PFVF_CMD_NEQ)
+
+#define S_FW_PFVF_CMD_TC 24
+#define M_FW_PFVF_CMD_TC 0xff
+#define G_FW_PFVF_CMD_TC(x) \
+ (((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC)
+
+#define S_FW_PFVF_CMD_NVI 16
+#define M_FW_PFVF_CMD_NVI 0xff
+#define G_FW_PFVF_CMD_NVI(x) \
+ (((x) >> S_FW_PFVF_CMD_NVI) & M_FW_PFVF_CMD_NVI)
+
+#define S_FW_PFVF_CMD_NEXACTF 0
+#define M_FW_PFVF_CMD_NEXACTF 0xffff
+#define G_FW_PFVF_CMD_NEXACTF(x) \
+ (((x) >> S_FW_PFVF_CMD_NEXACTF) & M_FW_PFVF_CMD_NEXACTF)
+
+#define S_FW_PFVF_CMD_R_CAPS 24
+#define M_FW_PFVF_CMD_R_CAPS 0xff
+#define G_FW_PFVF_CMD_R_CAPS(x) \
+ (((x) >> S_FW_PFVF_CMD_R_CAPS) & M_FW_PFVF_CMD_R_CAPS)
+
+#define S_FW_PFVF_CMD_WX_CAPS 16
+#define M_FW_PFVF_CMD_WX_CAPS 0xff
+#define G_FW_PFVF_CMD_WX_CAPS(x) \
+ (((x) >> S_FW_PFVF_CMD_WX_CAPS) & M_FW_PFVF_CMD_WX_CAPS)
+
+#define S_FW_PFVF_CMD_NETHCTRL 0
+#define M_FW_PFVF_CMD_NETHCTRL 0xffff
+#define G_FW_PFVF_CMD_NETHCTRL(x) \
+ (((x) >> S_FW_PFVF_CMD_NETHCTRL) & M_FW_PFVF_CMD_NETHCTRL)
+
+/*
+ * ingress queue type; the first 1K ingress queues can have associated 0,
+ * 1 or 2 free lists and an interrupt, all other ingress queues lack these
+ * capabilities
+ */
+enum fw_iq_type {
+ FW_IQ_TYPE_FL_INT_CAP,
+};
+
+enum fw_iq_iqtype {
+ FW_IQ_IQTYPE_NIC = 1,
+ FW_IQ_IQTYPE_OFLD,
+};
+
+struct fw_iq_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be16 physiqid;
+ __be16 iqid;
+ __be16 fl0id;
+ __be16 fl1id;
+ __be32 type_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_to_fl0congen;
+ __be16 fl0dcaen_to_fl0cidxfthresh;
+ __be16 fl0size;
+ __be64 fl0addr;
+ __be32 fl1cngchmap_to_fl1congen;
+ __be16 fl1dcaen_to_fl1cidxfthresh;
+ __be16 fl1size;
+ __be64 fl1addr;
+};
+
+#define S_FW_IQ_CMD_PFN 8
+#define M_FW_IQ_CMD_PFN 0x7
+#define V_FW_IQ_CMD_PFN(x) ((x) << S_FW_IQ_CMD_PFN)
+#define G_FW_IQ_CMD_PFN(x) (((x) >> S_FW_IQ_CMD_PFN) & M_FW_IQ_CMD_PFN)
+
+#define S_FW_IQ_CMD_VFN 0
+#define M_FW_IQ_CMD_VFN 0xff
+#define V_FW_IQ_CMD_VFN(x) ((x) << S_FW_IQ_CMD_VFN)
+#define G_FW_IQ_CMD_VFN(x) (((x) >> S_FW_IQ_CMD_VFN) & M_FW_IQ_CMD_VFN)
+
+#define S_FW_IQ_CMD_ALLOC 31
+#define M_FW_IQ_CMD_ALLOC 0x1
+#define V_FW_IQ_CMD_ALLOC(x) ((x) << S_FW_IQ_CMD_ALLOC)
+#define G_FW_IQ_CMD_ALLOC(x) \
+ (((x) >> S_FW_IQ_CMD_ALLOC) & M_FW_IQ_CMD_ALLOC)
+#define F_FW_IQ_CMD_ALLOC V_FW_IQ_CMD_ALLOC(1U)
+
+#define S_FW_IQ_CMD_FREE 30
+#define M_FW_IQ_CMD_FREE 0x1
+#define V_FW_IQ_CMD_FREE(x) ((x) << S_FW_IQ_CMD_FREE)
+#define G_FW_IQ_CMD_FREE(x) (((x) >> S_FW_IQ_CMD_FREE) & M_FW_IQ_CMD_FREE)
+#define F_FW_IQ_CMD_FREE V_FW_IQ_CMD_FREE(1U)
+
+#define S_FW_IQ_CMD_IQSTART 28
+#define M_FW_IQ_CMD_IQSTART 0x1
+#define V_FW_IQ_CMD_IQSTART(x) ((x) << S_FW_IQ_CMD_IQSTART)
+#define G_FW_IQ_CMD_IQSTART(x) \
+ (((x) >> S_FW_IQ_CMD_IQSTART) & M_FW_IQ_CMD_IQSTART)
+#define F_FW_IQ_CMD_IQSTART V_FW_IQ_CMD_IQSTART(1U)
+
+#define S_FW_IQ_CMD_IQSTOP 27
+#define M_FW_IQ_CMD_IQSTOP 0x1
+#define V_FW_IQ_CMD_IQSTOP(x) ((x) << S_FW_IQ_CMD_IQSTOP)
+#define G_FW_IQ_CMD_IQSTOP(x) \
+ (((x) >> S_FW_IQ_CMD_IQSTOP) & M_FW_IQ_CMD_IQSTOP)
+#define F_FW_IQ_CMD_IQSTOP V_FW_IQ_CMD_IQSTOP(1U)
+
+#define S_FW_IQ_CMD_TYPE 29
+#define M_FW_IQ_CMD_TYPE 0x7
+#define V_FW_IQ_CMD_TYPE(x) ((x) << S_FW_IQ_CMD_TYPE)
+#define G_FW_IQ_CMD_TYPE(x) (((x) >> S_FW_IQ_CMD_TYPE) & M_FW_IQ_CMD_TYPE)
+
+#define S_FW_IQ_CMD_IQASYNCH 28
+#define M_FW_IQ_CMD_IQASYNCH 0x1
+#define V_FW_IQ_CMD_IQASYNCH(x) ((x) << S_FW_IQ_CMD_IQASYNCH)
+#define G_FW_IQ_CMD_IQASYNCH(x) \
+ (((x) >> S_FW_IQ_CMD_IQASYNCH) & M_FW_IQ_CMD_IQASYNCH)
+#define F_FW_IQ_CMD_IQASYNCH V_FW_IQ_CMD_IQASYNCH(1U)
+
+#define S_FW_IQ_CMD_VIID 16
+#define M_FW_IQ_CMD_VIID 0xfff
+#define V_FW_IQ_CMD_VIID(x) ((x) << S_FW_IQ_CMD_VIID)
+#define G_FW_IQ_CMD_VIID(x) (((x) >> S_FW_IQ_CMD_VIID) & M_FW_IQ_CMD_VIID)
+
+#define S_FW_IQ_CMD_IQANDST 15
+#define M_FW_IQ_CMD_IQANDST 0x1
+#define V_FW_IQ_CMD_IQANDST(x) ((x) << S_FW_IQ_CMD_IQANDST)
+#define G_FW_IQ_CMD_IQANDST(x) \
+ (((x) >> S_FW_IQ_CMD_IQANDST) & M_FW_IQ_CMD_IQANDST)
+#define F_FW_IQ_CMD_IQANDST V_FW_IQ_CMD_IQANDST(1U)
+
+#define S_FW_IQ_CMD_IQANUD 12
+#define M_FW_IQ_CMD_IQANUD 0x3
+#define V_FW_IQ_CMD_IQANUD(x) ((x) << S_FW_IQ_CMD_IQANUD)
+#define G_FW_IQ_CMD_IQANUD(x) \
+ (((x) >> S_FW_IQ_CMD_IQANUD) & M_FW_IQ_CMD_IQANUD)
+
+#define S_FW_IQ_CMD_IQANDSTINDEX 0
+#define M_FW_IQ_CMD_IQANDSTINDEX 0xfff
+#define V_FW_IQ_CMD_IQANDSTINDEX(x) ((x) << S_FW_IQ_CMD_IQANDSTINDEX)
+#define G_FW_IQ_CMD_IQANDSTINDEX(x) \
+ (((x) >> S_FW_IQ_CMD_IQANDSTINDEX) & M_FW_IQ_CMD_IQANDSTINDEX)
+
+#define S_FW_IQ_CMD_IQGTSMODE 14
+#define M_FW_IQ_CMD_IQGTSMODE 0x1
+#define V_FW_IQ_CMD_IQGTSMODE(x) ((x) << S_FW_IQ_CMD_IQGTSMODE)
+#define G_FW_IQ_CMD_IQGTSMODE(x) \
+ (((x) >> S_FW_IQ_CMD_IQGTSMODE) & M_FW_IQ_CMD_IQGTSMODE)
+#define F_FW_IQ_CMD_IQGTSMODE V_FW_IQ_CMD_IQGTSMODE(1U)
+
+#define S_FW_IQ_CMD_IQPCIECH 12
+#define M_FW_IQ_CMD_IQPCIECH 0x3
+#define V_FW_IQ_CMD_IQPCIECH(x) ((x) << S_FW_IQ_CMD_IQPCIECH)
+#define G_FW_IQ_CMD_IQPCIECH(x) \
+ (((x) >> S_FW_IQ_CMD_IQPCIECH) & M_FW_IQ_CMD_IQPCIECH)
+
+#define S_FW_IQ_CMD_IQINTCNTTHRESH 4
+#define M_FW_IQ_CMD_IQINTCNTTHRESH 0x3
+#define V_FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << S_FW_IQ_CMD_IQINTCNTTHRESH)
+#define G_FW_IQ_CMD_IQINTCNTTHRESH(x) \
+ (((x) >> S_FW_IQ_CMD_IQINTCNTTHRESH) & M_FW_IQ_CMD_IQINTCNTTHRESH)
+
+#define S_FW_IQ_CMD_IQESIZE 0
+#define M_FW_IQ_CMD_IQESIZE 0x3
+#define V_FW_IQ_CMD_IQESIZE(x) ((x) << S_FW_IQ_CMD_IQESIZE)
+#define G_FW_IQ_CMD_IQESIZE(x) \
+ (((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE)
+
+#define S_FW_IQ_CMD_IQRO 30
+#define M_FW_IQ_CMD_IQRO 0x1
+#define V_FW_IQ_CMD_IQRO(x) ((x) << S_FW_IQ_CMD_IQRO)
+#define G_FW_IQ_CMD_IQRO(x) \
+ (((x) >> S_FW_IQ_CMD_IQRO) & M_FW_IQ_CMD_IQRO)
+#define F_FW_IQ_CMD_IQRO V_FW_IQ_CMD_IQRO(1U)
+
+#define S_FW_IQ_CMD_IQFLINTCONGEN 27
+#define M_FW_IQ_CMD_IQFLINTCONGEN 0x1
+#define V_FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << S_FW_IQ_CMD_IQFLINTCONGEN)
+#define G_FW_IQ_CMD_IQFLINTCONGEN(x) \
+ (((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN)
+#define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U)
+
+#define S_FW_IQ_CMD_IQTYPE 24
+#define V_FW_IQ_CMD_IQTYPE(x) ((x) << S_FW_IQ_CMD_IQTYPE)
+
+#define S_FW_IQ_CMD_FL0CNGCHMAP 20
+#define M_FW_IQ_CMD_FL0CNGCHMAP 0xf
+#define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP)
+#define G_FW_IQ_CMD_FL0CNGCHMAP(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CNGCHMAP) & M_FW_IQ_CMD_FL0CNGCHMAP)
+
+#define S_FW_IQ_CMD_FL0DATARO 12
+#define M_FW_IQ_CMD_FL0DATARO 0x1
+#define V_FW_IQ_CMD_FL0DATARO(x) ((x) << S_FW_IQ_CMD_FL0DATARO)
+#define G_FW_IQ_CMD_FL0DATARO(x) \
+ (((x) >> S_FW_IQ_CMD_FL0DATARO) & M_FW_IQ_CMD_FL0DATARO)
+#define F_FW_IQ_CMD_FL0DATARO V_FW_IQ_CMD_FL0DATARO(1U)
+
+#define S_FW_IQ_CMD_FL0CONGCIF 11
+#define M_FW_IQ_CMD_FL0CONGCIF 0x1
+#define V_FW_IQ_CMD_FL0CONGCIF(x) ((x) << S_FW_IQ_CMD_FL0CONGCIF)
+#define G_FW_IQ_CMD_FL0CONGCIF(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CONGCIF) & M_FW_IQ_CMD_FL0CONGCIF)
+#define F_FW_IQ_CMD_FL0CONGCIF V_FW_IQ_CMD_FL0CONGCIF(1U)
+
+#define S_FW_IQ_CMD_FL0FETCHRO 6
+#define M_FW_IQ_CMD_FL0FETCHRO 0x1
+#define V_FW_IQ_CMD_FL0FETCHRO(x) ((x) << S_FW_IQ_CMD_FL0FETCHRO)
+#define G_FW_IQ_CMD_FL0FETCHRO(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FETCHRO) & M_FW_IQ_CMD_FL0FETCHRO)
+#define F_FW_IQ_CMD_FL0FETCHRO V_FW_IQ_CMD_FL0FETCHRO(1U)
+
+#define S_FW_IQ_CMD_FL0HOSTFCMODE 4
+#define M_FW_IQ_CMD_FL0HOSTFCMODE 0x3
+#define V_FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << S_FW_IQ_CMD_FL0HOSTFCMODE)
+#define G_FW_IQ_CMD_FL0HOSTFCMODE(x) \
+ (((x) >> S_FW_IQ_CMD_FL0HOSTFCMODE) & M_FW_IQ_CMD_FL0HOSTFCMODE)
+
+#define S_FW_IQ_CMD_FL0PADEN 2
+#define M_FW_IQ_CMD_FL0PADEN 0x1
+#define V_FW_IQ_CMD_FL0PADEN(x) ((x) << S_FW_IQ_CMD_FL0PADEN)
+#define G_FW_IQ_CMD_FL0PADEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0PADEN) & M_FW_IQ_CMD_FL0PADEN)
+#define F_FW_IQ_CMD_FL0PADEN V_FW_IQ_CMD_FL0PADEN(1U)
+
+#define S_FW_IQ_CMD_FL0PACKEN 1
+#define M_FW_IQ_CMD_FL0PACKEN 0x1
+#define V_FW_IQ_CMD_FL0PACKEN(x) ((x) << S_FW_IQ_CMD_FL0PACKEN)
+#define G_FW_IQ_CMD_FL0PACKEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0PACKEN) & M_FW_IQ_CMD_FL0PACKEN)
+#define F_FW_IQ_CMD_FL0PACKEN V_FW_IQ_CMD_FL0PACKEN(1U)
+
+#define S_FW_IQ_CMD_FL0CONGEN 0
+#define M_FW_IQ_CMD_FL0CONGEN 0x1
+#define V_FW_IQ_CMD_FL0CONGEN(x) ((x) << S_FW_IQ_CMD_FL0CONGEN)
+#define G_FW_IQ_CMD_FL0CONGEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CONGEN) & M_FW_IQ_CMD_FL0CONGEN)
+#define F_FW_IQ_CMD_FL0CONGEN V_FW_IQ_CMD_FL0CONGEN(1U)
+
+#define S_FW_IQ_CMD_FL0FBMIN 7
+#define M_FW_IQ_CMD_FL0FBMIN 0x7
+#define V_FW_IQ_CMD_FL0FBMIN(x) ((x) << S_FW_IQ_CMD_FL0FBMIN)
+#define G_FW_IQ_CMD_FL0FBMIN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FBMIN) & M_FW_IQ_CMD_FL0FBMIN)
+
+#define S_FW_IQ_CMD_FL0FBMAX 4
+#define M_FW_IQ_CMD_FL0FBMAX 0x7
+#define V_FW_IQ_CMD_FL0FBMAX(x) ((x) << S_FW_IQ_CMD_FL0FBMAX)
+#define G_FW_IQ_CMD_FL0FBMAX(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FBMAX) & M_FW_IQ_CMD_FL0FBMAX)
+
+struct fw_eq_eth_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 eqid_pkd;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ __be32 autoequiqe_to_viid;
+ __be32 r8_lo;
+ __be64 r9;
+};
+
+#define S_FW_EQ_ETH_CMD_PFN 8
+#define M_FW_EQ_ETH_CMD_PFN 0x7
+#define V_FW_EQ_ETH_CMD_PFN(x) ((x) << S_FW_EQ_ETH_CMD_PFN)
+#define G_FW_EQ_ETH_CMD_PFN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PFN) & M_FW_EQ_ETH_CMD_PFN)
+
+#define S_FW_EQ_ETH_CMD_VFN 0
+#define M_FW_EQ_ETH_CMD_VFN 0xff
+#define V_FW_EQ_ETH_CMD_VFN(x) ((x) << S_FW_EQ_ETH_CMD_VFN)
+#define G_FW_EQ_ETH_CMD_VFN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_VFN) & M_FW_EQ_ETH_CMD_VFN)
+
+#define S_FW_EQ_ETH_CMD_ALLOC 31
+#define M_FW_EQ_ETH_CMD_ALLOC 0x1
+#define V_FW_EQ_ETH_CMD_ALLOC(x) ((x) << S_FW_EQ_ETH_CMD_ALLOC)
+#define G_FW_EQ_ETH_CMD_ALLOC(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_ALLOC) & M_FW_EQ_ETH_CMD_ALLOC)
+#define F_FW_EQ_ETH_CMD_ALLOC V_FW_EQ_ETH_CMD_ALLOC(1U)
+
+#define S_FW_EQ_ETH_CMD_FREE 30
+#define M_FW_EQ_ETH_CMD_FREE 0x1
+#define V_FW_EQ_ETH_CMD_FREE(x) ((x) << S_FW_EQ_ETH_CMD_FREE)
+#define G_FW_EQ_ETH_CMD_FREE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FREE) & M_FW_EQ_ETH_CMD_FREE)
+#define F_FW_EQ_ETH_CMD_FREE V_FW_EQ_ETH_CMD_FREE(1U)
+
+#define S_FW_EQ_ETH_CMD_EQSTART 28
+#define M_FW_EQ_ETH_CMD_EQSTART 0x1
+#define V_FW_EQ_ETH_CMD_EQSTART(x) ((x) << S_FW_EQ_ETH_CMD_EQSTART)
+#define G_FW_EQ_ETH_CMD_EQSTART(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQSTART) & M_FW_EQ_ETH_CMD_EQSTART)
+#define F_FW_EQ_ETH_CMD_EQSTART V_FW_EQ_ETH_CMD_EQSTART(1U)
+
+#define S_FW_EQ_ETH_CMD_EQID 0
+#define M_FW_EQ_ETH_CMD_EQID 0xfffff
+#define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID)
+#define G_FW_EQ_ETH_CMD_EQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID)
+
+#define S_FW_EQ_ETH_CMD_PHYSEQID 0
+#define M_FW_EQ_ETH_CMD_PHYSEQID 0xfffff
+#define G_FW_EQ_ETH_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID)
+
+#define S_FW_EQ_ETH_CMD_FETCHRO 22
+#define M_FW_EQ_ETH_CMD_FETCHRO 0x1
+#define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO)
+#define G_FW_EQ_ETH_CMD_FETCHRO(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FETCHRO) & M_FW_EQ_ETH_CMD_FETCHRO)
+#define F_FW_EQ_ETH_CMD_FETCHRO V_FW_EQ_ETH_CMD_FETCHRO(1U)
+
+#define S_FW_EQ_ETH_CMD_HOSTFCMODE 20
+#define M_FW_EQ_ETH_CMD_HOSTFCMODE 0x3
+#define V_FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_ETH_CMD_HOSTFCMODE)
+#define G_FW_EQ_ETH_CMD_HOSTFCMODE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_HOSTFCMODE) & M_FW_EQ_ETH_CMD_HOSTFCMODE)
+
+#define S_FW_EQ_ETH_CMD_PCIECHN 16
+#define M_FW_EQ_ETH_CMD_PCIECHN 0x3
+#define V_FW_EQ_ETH_CMD_PCIECHN(x) ((x) << S_FW_EQ_ETH_CMD_PCIECHN)
+#define G_FW_EQ_ETH_CMD_PCIECHN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PCIECHN) & M_FW_EQ_ETH_CMD_PCIECHN)
+
+#define S_FW_EQ_ETH_CMD_IQID 0
+#define M_FW_EQ_ETH_CMD_IQID 0xffff
+#define V_FW_EQ_ETH_CMD_IQID(x) ((x) << S_FW_EQ_ETH_CMD_IQID)
+#define G_FW_EQ_ETH_CMD_IQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_IQID) & M_FW_EQ_ETH_CMD_IQID)
+
+#define S_FW_EQ_ETH_CMD_FBMIN 23
+#define M_FW_EQ_ETH_CMD_FBMIN 0x7
+#define V_FW_EQ_ETH_CMD_FBMIN(x) ((x) << S_FW_EQ_ETH_CMD_FBMIN)
+#define G_FW_EQ_ETH_CMD_FBMIN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FBMIN) & M_FW_EQ_ETH_CMD_FBMIN)
+
+#define S_FW_EQ_ETH_CMD_FBMAX 20
+#define M_FW_EQ_ETH_CMD_FBMAX 0x7
+#define V_FW_EQ_ETH_CMD_FBMAX(x) ((x) << S_FW_EQ_ETH_CMD_FBMAX)
+#define G_FW_EQ_ETH_CMD_FBMAX(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FBMAX) & M_FW_EQ_ETH_CMD_FBMAX)
+
+#define S_FW_EQ_ETH_CMD_CIDXFTHRESH 16
+#define M_FW_EQ_ETH_CMD_CIDXFTHRESH 0x7
+#define V_FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_ETH_CMD_CIDXFTHRESH)
+#define G_FW_EQ_ETH_CMD_CIDXFTHRESH(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_CIDXFTHRESH) & M_FW_EQ_ETH_CMD_CIDXFTHRESH)
+
+#define S_FW_EQ_ETH_CMD_EQSIZE 0
+#define M_FW_EQ_ETH_CMD_EQSIZE 0xffff
+#define V_FW_EQ_ETH_CMD_EQSIZE(x) ((x) << S_FW_EQ_ETH_CMD_EQSIZE)
+#define G_FW_EQ_ETH_CMD_EQSIZE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQSIZE) & M_FW_EQ_ETH_CMD_EQSIZE)
+
+#define S_FW_EQ_ETH_CMD_AUTOEQUEQE 30
+#define M_FW_EQ_ETH_CMD_AUTOEQUEQE 0x1
+#define V_FW_EQ_ETH_CMD_AUTOEQUEQE(x) ((x) << S_FW_EQ_ETH_CMD_AUTOEQUEQE)
+#define G_FW_EQ_ETH_CMD_AUTOEQUEQE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_AUTOEQUEQE) & M_FW_EQ_ETH_CMD_AUTOEQUEQE)
+#define F_FW_EQ_ETH_CMD_AUTOEQUEQE V_FW_EQ_ETH_CMD_AUTOEQUEQE(1U)
+
+#define S_FW_EQ_ETH_CMD_VIID 16
+#define M_FW_EQ_ETH_CMD_VIID 0xfff
+#define V_FW_EQ_ETH_CMD_VIID(x) ((x) << S_FW_EQ_ETH_CMD_VIID)
+#define G_FW_EQ_ETH_CMD_VIID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID)
+
+struct fw_eq_ctrl_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 cmpliqid_eqid;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+};
+
+#define S_FW_EQ_CTRL_CMD_PFN 8
+#define V_FW_EQ_CTRL_CMD_PFN(x) ((x) << S_FW_EQ_CTRL_CMD_PFN)
+
+#define S_FW_EQ_CTRL_CMD_VFN 0
+#define V_FW_EQ_CTRL_CMD_VFN(x) ((x) << S_FW_EQ_CTRL_CMD_VFN)
+
+#define S_FW_EQ_CTRL_CMD_ALLOC 31
+#define V_FW_EQ_CTRL_CMD_ALLOC(x) ((x) << S_FW_EQ_CTRL_CMD_ALLOC)
+#define F_FW_EQ_CTRL_CMD_ALLOC V_FW_EQ_CTRL_CMD_ALLOC(1U)
+
+#define S_FW_EQ_CTRL_CMD_FREE 30
+#define V_FW_EQ_CTRL_CMD_FREE(x) ((x) << S_FW_EQ_CTRL_CMD_FREE)
+#define F_FW_EQ_CTRL_CMD_FREE V_FW_EQ_CTRL_CMD_FREE(1U)
+
+#define S_FW_EQ_CTRL_CMD_EQSTART 28
+#define V_FW_EQ_CTRL_CMD_EQSTART(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTART)
+#define F_FW_EQ_CTRL_CMD_EQSTART V_FW_EQ_CTRL_CMD_EQSTART(1U)
+
+#define S_FW_EQ_CTRL_CMD_CMPLIQID 20
+#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID)
+
+#define S_FW_EQ_CTRL_CMD_EQID 0
+#define M_FW_EQ_CTRL_CMD_EQID 0xfffff
+#define V_FW_EQ_CTRL_CMD_EQID(x) ((x) << S_FW_EQ_CTRL_CMD_EQID)
+#define G_FW_EQ_CTRL_CMD_EQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID)
+
+#define S_FW_EQ_CTRL_CMD_PHYSEQID 0
+#define M_FW_EQ_CTRL_CMD_PHYSEQID 0xfffff
+#define V_FW_EQ_CTRL_CMD_PHYSEQID(x) ((x) << S_FW_EQ_CTRL_CMD_PHYSEQID)
+#define G_FW_EQ_CTRL_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_PHYSEQID) & M_FW_EQ_CTRL_CMD_PHYSEQID)
+
+#define S_FW_EQ_CTRL_CMD_FETCHRO 22
+#define V_FW_EQ_CTRL_CMD_FETCHRO(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHRO)
+#define F_FW_EQ_CTRL_CMD_FETCHRO V_FW_EQ_CTRL_CMD_FETCHRO(1U)
+
+#define S_FW_EQ_CTRL_CMD_HOSTFCMODE 20
+#define M_FW_EQ_CTRL_CMD_HOSTFCMODE 0x3
+#define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE)
+
+#define S_FW_EQ_CTRL_CMD_PCIECHN 16
+#define V_FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << S_FW_EQ_CTRL_CMD_PCIECHN)
+
+#define S_FW_EQ_CTRL_CMD_IQID 0
+#define V_FW_EQ_CTRL_CMD_IQID(x) ((x) << S_FW_EQ_CTRL_CMD_IQID)
+
+#define S_FW_EQ_CTRL_CMD_FBMIN 23
+#define V_FW_EQ_CTRL_CMD_FBMIN(x) ((x) << S_FW_EQ_CTRL_CMD_FBMIN)
+
+#define S_FW_EQ_CTRL_CMD_FBMAX 20
+#define V_FW_EQ_CTRL_CMD_FBMAX(x) ((x) << S_FW_EQ_CTRL_CMD_FBMAX)
+
+#define S_FW_EQ_CTRL_CMD_CIDXFTHRESH 16
+#define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH)
+
+#define S_FW_EQ_CTRL_CMD_EQSIZE 0
+#define V_FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << S_FW_EQ_CTRL_CMD_EQSIZE)
+
+enum fw_vi_func {
+ FW_VI_FUNC_ETH,
+};
+
+struct fw_vi_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be16 type_to_viid;
+ __u8 mac[6];
+ __u8 portid_pkd;
+ __u8 nmac;
+ __u8 nmac0[6];
+ __be16 norss_rsssize;
+ __u8 nmac1[6];
+ __be16 idsiiq_pkd;
+ __u8 nmac2[6];
+ __be16 idseiq_pkd;
+ __u8 nmac3[6];
+ __be64 r9;
+ __be64 r10;
+};
+
+#define S_FW_VI_CMD_PFN 8
+#define M_FW_VI_CMD_PFN 0x7
+#define V_FW_VI_CMD_PFN(x) ((x) << S_FW_VI_CMD_PFN)
+#define G_FW_VI_CMD_PFN(x) (((x) >> S_FW_VI_CMD_PFN) & M_FW_VI_CMD_PFN)
+
+#define S_FW_VI_CMD_VFN 0
+#define M_FW_VI_CMD_VFN 0xff
+#define V_FW_VI_CMD_VFN(x) ((x) << S_FW_VI_CMD_VFN)
+#define G_FW_VI_CMD_VFN(x) (((x) >> S_FW_VI_CMD_VFN) & M_FW_VI_CMD_VFN)
+
+#define S_FW_VI_CMD_ALLOC 31
+#define M_FW_VI_CMD_ALLOC 0x1
+#define V_FW_VI_CMD_ALLOC(x) ((x) << S_FW_VI_CMD_ALLOC)
+#define G_FW_VI_CMD_ALLOC(x) \
+ (((x) >> S_FW_VI_CMD_ALLOC) & M_FW_VI_CMD_ALLOC)
+#define F_FW_VI_CMD_ALLOC V_FW_VI_CMD_ALLOC(1U)
+
+#define S_FW_VI_CMD_FREE 30
+#define M_FW_VI_CMD_FREE 0x1
+#define V_FW_VI_CMD_FREE(x) ((x) << S_FW_VI_CMD_FREE)
+#define G_FW_VI_CMD_FREE(x) (((x) >> S_FW_VI_CMD_FREE) & M_FW_VI_CMD_FREE)
+#define F_FW_VI_CMD_FREE V_FW_VI_CMD_FREE(1U)
+
+#define S_FW_VI_CMD_TYPE 15
+#define M_FW_VI_CMD_TYPE 0x1
+#define V_FW_VI_CMD_TYPE(x) ((x) << S_FW_VI_CMD_TYPE)
+#define G_FW_VI_CMD_TYPE(x) (((x) >> S_FW_VI_CMD_TYPE) & M_FW_VI_CMD_TYPE)
+#define F_FW_VI_CMD_TYPE V_FW_VI_CMD_TYPE(1U)
+
+#define S_FW_VI_CMD_FUNC 12
+#define M_FW_VI_CMD_FUNC 0x7
+#define V_FW_VI_CMD_FUNC(x) ((x) << S_FW_VI_CMD_FUNC)
+#define G_FW_VI_CMD_FUNC(x) (((x) >> S_FW_VI_CMD_FUNC) & M_FW_VI_CMD_FUNC)
+
+#define S_FW_VI_CMD_VIID 0
+#define M_FW_VI_CMD_VIID 0xfff
+#define V_FW_VI_CMD_VIID(x) ((x) << S_FW_VI_CMD_VIID)
+#define G_FW_VI_CMD_VIID(x) (((x) >> S_FW_VI_CMD_VIID) & M_FW_VI_CMD_VIID)
+
+#define S_FW_VI_CMD_PORTID 4
+#define M_FW_VI_CMD_PORTID 0xf
+#define V_FW_VI_CMD_PORTID(x) ((x) << S_FW_VI_CMD_PORTID)
+#define G_FW_VI_CMD_PORTID(x) \
+ (((x) >> S_FW_VI_CMD_PORTID) & M_FW_VI_CMD_PORTID)
+
+#define S_FW_VI_CMD_RSSSIZE 0
+#define M_FW_VI_CMD_RSSSIZE 0x7ff
+#define V_FW_VI_CMD_RSSSIZE(x) ((x) << S_FW_VI_CMD_RSSSIZE)
+#define G_FW_VI_CMD_RSSSIZE(x) \
+ (((x) >> S_FW_VI_CMD_RSSSIZE) & M_FW_VI_CMD_RSSSIZE)
+
+/* Special VI_MAC command index ids */
+#define FW_VI_MAC_ADD_MAC 0x3FF
+#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
+
+enum fw_vi_mac_smac {
+ FW_VI_MAC_MPS_TCAM_ENTRY,
+ FW_VI_MAC_SMT_AND_MPSTCAM
+};
+
+struct fw_vi_mac_cmd {
+ __be32 op_to_viid;
+ __be32 freemacs_to_len16;
+ union fw_vi_mac {
+ struct fw_vi_mac_exact {
+ __be16 valid_to_idx;
+ __u8 macaddr[6];
+ } exact[7];
+ struct fw_vi_mac_hash {
+ __be64 hashvec;
+ } hash;
+ } u;
+};
+
+#define S_FW_VI_MAC_CMD_VIID 0
+#define M_FW_VI_MAC_CMD_VIID 0xfff
+#define V_FW_VI_MAC_CMD_VIID(x) ((x) << S_FW_VI_MAC_CMD_VIID)
+#define G_FW_VI_MAC_CMD_VIID(x) \
+ (((x) >> S_FW_VI_MAC_CMD_VIID) & M_FW_VI_MAC_CMD_VIID)
+
+#define S_FW_VI_MAC_CMD_VALID 15
+#define M_FW_VI_MAC_CMD_VALID 0x1
+#define V_FW_VI_MAC_CMD_VALID(x) ((x) << S_FW_VI_MAC_CMD_VALID)
+#define G_FW_VI_MAC_CMD_VALID(x) \
+ (((x) >> S_FW_VI_MAC_CMD_VALID) & M_FW_VI_MAC_CMD_VALID)
+#define F_FW_VI_MAC_CMD_VALID V_FW_VI_MAC_CMD_VALID(1U)
+
+#define S_FW_VI_MAC_CMD_SMAC_RESULT 10
+#define M_FW_VI_MAC_CMD_SMAC_RESULT 0x3
+#define V_FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << S_FW_VI_MAC_CMD_SMAC_RESULT)
+#define G_FW_VI_MAC_CMD_SMAC_RESULT(x) \
+ (((x) >> S_FW_VI_MAC_CMD_SMAC_RESULT) & M_FW_VI_MAC_CMD_SMAC_RESULT)
+
+#define S_FW_VI_MAC_CMD_IDX 0
+#define M_FW_VI_MAC_CMD_IDX 0x3ff
+#define V_FW_VI_MAC_CMD_IDX(x) ((x) << S_FW_VI_MAC_CMD_IDX)
+#define G_FW_VI_MAC_CMD_IDX(x) \
+ (((x) >> S_FW_VI_MAC_CMD_IDX) & M_FW_VI_MAC_CMD_IDX)
+
+struct fw_vi_rxmode_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ __be32 mtu_to_vlanexen;
+ __be32 r4_lo;
+};
+
+#define S_FW_VI_RXMODE_CMD_VIID 0
+#define M_FW_VI_RXMODE_CMD_VIID 0xfff
+#define V_FW_VI_RXMODE_CMD_VIID(x) ((x) << S_FW_VI_RXMODE_CMD_VIID)
+#define G_FW_VI_RXMODE_CMD_VIID(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_VIID) & M_FW_VI_RXMODE_CMD_VIID)
+
+#define S_FW_VI_RXMODE_CMD_MTU 16
+#define M_FW_VI_RXMODE_CMD_MTU 0xffff
+#define V_FW_VI_RXMODE_CMD_MTU(x) ((x) << S_FW_VI_RXMODE_CMD_MTU)
+#define G_FW_VI_RXMODE_CMD_MTU(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_MTU) & M_FW_VI_RXMODE_CMD_MTU)
+
+#define S_FW_VI_RXMODE_CMD_PROMISCEN 14
+#define M_FW_VI_RXMODE_CMD_PROMISCEN 0x3
+#define V_FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << S_FW_VI_RXMODE_CMD_PROMISCEN)
+#define G_FW_VI_RXMODE_CMD_PROMISCEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_PROMISCEN) & M_FW_VI_RXMODE_CMD_PROMISCEN)
+
+#define S_FW_VI_RXMODE_CMD_ALLMULTIEN 12
+#define M_FW_VI_RXMODE_CMD_ALLMULTIEN 0x3
+#define V_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \
+ ((x) << S_FW_VI_RXMODE_CMD_ALLMULTIEN)
+#define G_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_ALLMULTIEN) & M_FW_VI_RXMODE_CMD_ALLMULTIEN)
+
+#define S_FW_VI_RXMODE_CMD_BROADCASTEN 10
+#define M_FW_VI_RXMODE_CMD_BROADCASTEN 0x3
+#define V_FW_VI_RXMODE_CMD_BROADCASTEN(x) \
+ ((x) << S_FW_VI_RXMODE_CMD_BROADCASTEN)
+#define G_FW_VI_RXMODE_CMD_BROADCASTEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_BROADCASTEN) & \
+ M_FW_VI_RXMODE_CMD_BROADCASTEN)
+
+#define S_FW_VI_RXMODE_CMD_VLANEXEN 8
+#define M_FW_VI_RXMODE_CMD_VLANEXEN 0x3
+#define V_FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << S_FW_VI_RXMODE_CMD_VLANEXEN)
+#define G_FW_VI_RXMODE_CMD_VLANEXEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_VLANEXEN) & M_FW_VI_RXMODE_CMD_VLANEXEN)
+
+struct fw_vi_enable_cmd {
+ __be32 op_to_viid;
+ __be32 ien_to_len16;
+ __be16 blinkdur;
+ __be16 r3;
+ __be32 r4;
+};
+
+#define S_FW_VI_ENABLE_CMD_VIID 0
+#define M_FW_VI_ENABLE_CMD_VIID 0xfff
+#define V_FW_VI_ENABLE_CMD_VIID(x) ((x) << S_FW_VI_ENABLE_CMD_VIID)
+#define G_FW_VI_ENABLE_CMD_VIID(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_VIID) & M_FW_VI_ENABLE_CMD_VIID)
+
+#define S_FW_VI_ENABLE_CMD_IEN 31
+#define M_FW_VI_ENABLE_CMD_IEN 0x1
+#define V_FW_VI_ENABLE_CMD_IEN(x) ((x) << S_FW_VI_ENABLE_CMD_IEN)
+#define G_FW_VI_ENABLE_CMD_IEN(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_IEN) & M_FW_VI_ENABLE_CMD_IEN)
+#define F_FW_VI_ENABLE_CMD_IEN V_FW_VI_ENABLE_CMD_IEN(1U)
+
+#define S_FW_VI_ENABLE_CMD_EEN 30
+#define M_FW_VI_ENABLE_CMD_EEN 0x1
+#define V_FW_VI_ENABLE_CMD_EEN(x) ((x) << S_FW_VI_ENABLE_CMD_EEN)
+#define G_FW_VI_ENABLE_CMD_EEN(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_EEN) & M_FW_VI_ENABLE_CMD_EEN)
+#define F_FW_VI_ENABLE_CMD_EEN V_FW_VI_ENABLE_CMD_EEN(1U)
+
+#define S_FW_VI_ENABLE_CMD_DCB_INFO 28
+#define M_FW_VI_ENABLE_CMD_DCB_INFO 0x1
+#define V_FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << S_FW_VI_ENABLE_CMD_DCB_INFO)
+#define G_FW_VI_ENABLE_CMD_DCB_INFO(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO)
+#define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U)
+
+/* VI VF stats offset definitions */
+#define VI_VF_NUM_STATS 16
+
+/* VI PF stats offset definitions */
+#define VI_PF_NUM_STATS 17
+enum fw_vi_stats_pf_index {
+ FW_VI_PF_STAT_TX_BCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_BCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_MCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_MCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_UCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_UCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_OFLD_BYTES_IX,
+ FW_VI_PF_STAT_TX_OFLD_FRAMES_IX,
+ FW_VI_PF_STAT_RX_BYTES_IX,
+ FW_VI_PF_STAT_RX_FRAMES_IX,
+ FW_VI_PF_STAT_RX_BCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_BCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_MCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_MCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_UCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_UCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_ERR_FRAMES_IX
+};
+
+struct fw_vi_stats_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ union fw_vi_stats {
+ struct fw_vi_stats_ctl {
+ __be16 nstats_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_vi_stats_pf {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_pf_bytes;
+ __be64 rx_pf_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } pf;
+ struct fw_vi_stats_vf {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_drop_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } vf;
+ } u;
+};
+
+#define S_FW_VI_STATS_CMD_VIID 0
+#define V_FW_VI_STATS_CMD_VIID(x) ((x) << S_FW_VI_STATS_CMD_VIID)
+
+#define S_FW_VI_STATS_CMD_NSTATS 12
+#define V_FW_VI_STATS_CMD_NSTATS(x) ((x) << S_FW_VI_STATS_CMD_NSTATS)
+
+#define S_FW_VI_STATS_CMD_IX 0
+#define V_FW_VI_STATS_CMD_IX(x) ((x) << S_FW_VI_STATS_CMD_IX)
+
+/* old 16-bit port capabilities bitmap */
+enum fw_port_cap {
+ FW_PORT_CAP_SPEED_100M = 0x0001,
+ FW_PORT_CAP_SPEED_1G = 0x0002,
+ FW_PORT_CAP_SPEED_25G = 0x0004,
+ FW_PORT_CAP_SPEED_10G = 0x0008,
+ FW_PORT_CAP_SPEED_40G = 0x0010,
+ FW_PORT_CAP_SPEED_100G = 0x0020,
+ FW_PORT_CAP_FC_RX = 0x0040,
+ FW_PORT_CAP_FC_TX = 0x0080,
+ FW_PORT_CAP_ANEG = 0x0100,
+ FW_PORT_CAP_MDIX = 0x0200,
+ FW_PORT_CAP_MDIAUTO = 0x0400,
+ FW_PORT_CAP_FEC_RS = 0x0800,
+ FW_PORT_CAP_FEC_BASER_RS = 0x1000,
+ FW_PORT_CAP_FEC_RESERVED = 0x2000,
+ FW_PORT_CAP_802_3_PAUSE = 0x4000,
+ FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
+};
+
+#define S_FW_PORT_CAP_SPEED 0
+#define M_FW_PORT_CAP_SPEED 0x3f
+#define V_FW_PORT_CAP_SPEED(x) ((x) << S_FW_PORT_CAP_SPEED)
+#define G_FW_PORT_CAP_SPEED(x) \
+ (((x) >> S_FW_PORT_CAP_SPEED) & M_FW_PORT_CAP_SPEED)
+
+enum fw_port_mdi {
+ FW_PORT_CAP_MDI_AUTO,
+};
+
+#define S_FW_PORT_CAP_MDI 9
+#define M_FW_PORT_CAP_MDI 3
+#define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI)
+#define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI)
+
+/* new 32-bit port capabilities bitmap (fw_port_cap32_t) */
+#define FW_PORT_CAP32_SPEED_100M 0x00000001UL
+#define FW_PORT_CAP32_SPEED_1G 0x00000002UL
+#define FW_PORT_CAP32_SPEED_10G 0x00000004UL
+#define FW_PORT_CAP32_SPEED_25G 0x00000008UL
+#define FW_PORT_CAP32_SPEED_40G 0x00000010UL
+#define FW_PORT_CAP32_SPEED_50G 0x00000020UL
+#define FW_PORT_CAP32_SPEED_100G 0x00000040UL
+#define FW_PORT_CAP32_FC_RX 0x00010000UL
+#define FW_PORT_CAP32_FC_TX 0x00020000UL
+#define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL
+#define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL
+#define FW_PORT_CAP32_ANEG 0x00100000UL
+#define FW_PORT_CAP32_MDIX 0x00200000UL
+#define FW_PORT_CAP32_MDIAUTO 0x00400000UL
+#define FW_PORT_CAP32_FEC_RS 0x00800000UL
+#define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL
+
+#define S_FW_PORT_CAP32_SPEED 0
+#define M_FW_PORT_CAP32_SPEED 0xfff
+#define V_FW_PORT_CAP32_SPEED(x) ((x) << S_FW_PORT_CAP32_SPEED)
+#define G_FW_PORT_CAP32_SPEED(x) \
+ (((x) >> S_FW_PORT_CAP32_SPEED) & M_FW_PORT_CAP32_SPEED)
+
+enum fw_port_mdi32 {
+ FW_PORT_CAP32_MDI_AUTO,
+};
+
+#define S_FW_PORT_CAP32_MDI 21
+#define M_FW_PORT_CAP32_MDI 3
+#define V_FW_PORT_CAP32_MDI(x) ((x) << S_FW_PORT_CAP32_MDI)
+#define G_FW_PORT_CAP32_MDI(x) \
+ (((x) >> S_FW_PORT_CAP32_MDI) & M_FW_PORT_CAP32_MDI)
+
+enum fw_port_action {
+ FW_PORT_ACTION_L1_CFG = 0x0001,
+ FW_PORT_ACTION_GET_PORT_INFO = 0x0003,
+ FW_PORT_ACTION_L1_CFG32 = 0x0009,
+ FW_PORT_ACTION_GET_PORT_INFO32 = 0x000a,
+};
+
+struct fw_port_cmd {
+ __be32 op_to_portid;
+ __be32 action_to_len16;
+ union fw_port {
+ struct fw_port_l1cfg {
+ __be32 rcap;
+ __be32 r;
+ } l1cfg;
+ struct fw_port_l2cfg {
+ __u8 ctlbf;
+ __u8 ovlan3_to_ivlan0;
+ __be16 ivlantype;
+ __be16 txipg_force_pinfo;
+ __be16 mtu;
+ __be16 ovlan0mask;
+ __be16 ovlan0type;
+ __be16 ovlan1mask;
+ __be16 ovlan1type;
+ __be16 ovlan2mask;
+ __be16 ovlan2type;
+ __be16 ovlan3mask;
+ __be16 ovlan3type;
+ } l2cfg;
+ struct fw_port_info {
+ __be32 lstatus_to_modtype;
+ __be16 pcap;
+ __be16 acap;
+ __be16 mtu;
+ __u8 cbllen;
+ __u8 auxlinfo;
+ __u8 dcbxdis_pkd;
+ __u8 r8_lo;
+ __be16 lpacap;
+ __be64 r9;
+ } info;
+ struct fw_port_diags {
+ __u8 diagop;
+ __u8 r[3];
+ __be32 diagval;
+ } diags;
+ union fw_port_dcb {
+ struct fw_port_dcb_pgid {
+ __u8 type;
+ __u8 apply_pkd;
+ __u8 r10_lo[2];
+ __be32 pgid;
+ __be64 r11;
+ } pgid;
+ struct fw_port_dcb_pgrate {
+ __u8 type;
+ __u8 apply_pkd;
+ __u8 r10_lo[5];
+ __u8 num_tcs_supported;
+ __u8 pgrate[8];
+ __u8 tsa[8];
+ } pgrate;
+ struct fw_port_dcb_priorate {
+ __u8 type;
+ __u8 apply_pkd;
+ __u8 r10_lo[6];
+ __u8 strict_priorate[8];
+ } priorate;
+ struct fw_port_dcb_pfc {
+ __u8 type;
+ __u8 pfcen;
+ __u8 r10[5];
+ __u8 max_pfc_tcs;
+ __be64 r11;
+ } pfc;
+ struct fw_port_app_priority {
+ __u8 type;
+ __u8 r10[2];
+ __u8 idx;
+ __u8 user_prio_map;
+ __u8 sel_field;
+ __be16 protocolid;
+ __be64 r12;
+ } app_priority;
+ struct fw_port_dcb_control {
+ __u8 type;
+ __u8 all_syncd_pkd;
+ __be16 dcb_version_to_app_state;
+ __be32 r11;
+ __be64 r12;
+ } control;
+ } dcb;
+ struct fw_port_l1cfg32 {
+ __be32 rcap32;
+ __be32 r;
+ } l1cfg32;
+ struct fw_port_info32 {
+ __be32 lstatus32_to_cbllen32;
+ __be32 auxlinfo32_mtu32;
+ __be32 linkattr32;
+ __be32 pcaps32;
+ __be32 acaps32;
+ __be32 lpacaps32;
+ } info32;
+ } u;
+};
+
+#define S_FW_PORT_CMD_PORTID 0
+#define M_FW_PORT_CMD_PORTID 0xf
+#define V_FW_PORT_CMD_PORTID(x) ((x) << S_FW_PORT_CMD_PORTID)
+#define G_FW_PORT_CMD_PORTID(x) \
+ (((x) >> S_FW_PORT_CMD_PORTID) & M_FW_PORT_CMD_PORTID)
+
+#define S_FW_PORT_CMD_ACTION 16
+#define M_FW_PORT_CMD_ACTION 0xffff
+#define V_FW_PORT_CMD_ACTION(x) ((x) << S_FW_PORT_CMD_ACTION)
+#define G_FW_PORT_CMD_ACTION(x) \
+ (((x) >> S_FW_PORT_CMD_ACTION) & M_FW_PORT_CMD_ACTION)
+
+#define S_FW_PORT_CMD_LSTATUS 31
+#define M_FW_PORT_CMD_LSTATUS 0x1
+#define V_FW_PORT_CMD_LSTATUS(x) ((x) << S_FW_PORT_CMD_LSTATUS)
+#define G_FW_PORT_CMD_LSTATUS(x) \
+ (((x) >> S_FW_PORT_CMD_LSTATUS) & M_FW_PORT_CMD_LSTATUS)
+#define F_FW_PORT_CMD_LSTATUS V_FW_PORT_CMD_LSTATUS(1U)
+
+#define S_FW_PORT_CMD_LSPEED 24
+#define M_FW_PORT_CMD_LSPEED 0x3f
+#define V_FW_PORT_CMD_LSPEED(x) ((x) << S_FW_PORT_CMD_LSPEED)
+#define G_FW_PORT_CMD_LSPEED(x) \
+ (((x) >> S_FW_PORT_CMD_LSPEED) & M_FW_PORT_CMD_LSPEED)
+
+#define S_FW_PORT_CMD_TXPAUSE 23
+#define M_FW_PORT_CMD_TXPAUSE 0x1
+#define V_FW_PORT_CMD_TXPAUSE(x) ((x) << S_FW_PORT_CMD_TXPAUSE)
+#define G_FW_PORT_CMD_TXPAUSE(x) \
+ (((x) >> S_FW_PORT_CMD_TXPAUSE) & M_FW_PORT_CMD_TXPAUSE)
+#define F_FW_PORT_CMD_TXPAUSE V_FW_PORT_CMD_TXPAUSE(1U)
+
+#define S_FW_PORT_CMD_RXPAUSE 22
+#define M_FW_PORT_CMD_RXPAUSE 0x1
+#define V_FW_PORT_CMD_RXPAUSE(x) ((x) << S_FW_PORT_CMD_RXPAUSE)
+#define G_FW_PORT_CMD_RXPAUSE(x) \
+ (((x) >> S_FW_PORT_CMD_RXPAUSE) & M_FW_PORT_CMD_RXPAUSE)
+#define F_FW_PORT_CMD_RXPAUSE V_FW_PORT_CMD_RXPAUSE(1U)
+
+#define S_FW_PORT_CMD_MDIOCAP 21
+#define M_FW_PORT_CMD_MDIOCAP 0x1
+#define V_FW_PORT_CMD_MDIOCAP(x) ((x) << S_FW_PORT_CMD_MDIOCAP)
+#define G_FW_PORT_CMD_MDIOCAP(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOCAP) & M_FW_PORT_CMD_MDIOCAP)
+#define F_FW_PORT_CMD_MDIOCAP V_FW_PORT_CMD_MDIOCAP(1U)
+
+#define S_FW_PORT_CMD_MDIOADDR 16
+#define M_FW_PORT_CMD_MDIOADDR 0x1f
+#define V_FW_PORT_CMD_MDIOADDR(x) ((x) << S_FW_PORT_CMD_MDIOADDR)
+#define G_FW_PORT_CMD_MDIOADDR(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOADDR) & M_FW_PORT_CMD_MDIOADDR)
+
+#define S_FW_PORT_CMD_PTYPE 8
+#define M_FW_PORT_CMD_PTYPE 0x1f
+#define V_FW_PORT_CMD_PTYPE(x) ((x) << S_FW_PORT_CMD_PTYPE)
+#define G_FW_PORT_CMD_PTYPE(x) \
+ (((x) >> S_FW_PORT_CMD_PTYPE) & M_FW_PORT_CMD_PTYPE)
+
+#define S_FW_PORT_CMD_LINKDNRC 5
+#define M_FW_PORT_CMD_LINKDNRC 0x7
+#define V_FW_PORT_CMD_LINKDNRC(x) ((x) << S_FW_PORT_CMD_LINKDNRC)
+#define G_FW_PORT_CMD_LINKDNRC(x) \
+ (((x) >> S_FW_PORT_CMD_LINKDNRC) & M_FW_PORT_CMD_LINKDNRC)
+
+#define S_FW_PORT_CMD_MODTYPE 0
+#define M_FW_PORT_CMD_MODTYPE 0x1f
+#define V_FW_PORT_CMD_MODTYPE(x) ((x) << S_FW_PORT_CMD_MODTYPE)
+#define G_FW_PORT_CMD_MODTYPE(x) \
+ (((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE)
+
+#define S_FW_PORT_CMD_LSTATUS32 31
+#define M_FW_PORT_CMD_LSTATUS32 0x1
+#define V_FW_PORT_CMD_LSTATUS32(x) ((x) << S_FW_PORT_CMD_LSTATUS32)
+#define F_FW_PORT_CMD_LSTATUS32 V_FW_PORT_CMD_LSTATUS32(1U)
+
+#define S_FW_PORT_CMD_LINKDNRC32 28
+#define M_FW_PORT_CMD_LINKDNRC32 0x7
+#define G_FW_PORT_CMD_LINKDNRC32(x) \
+ (((x) >> S_FW_PORT_CMD_LINKDNRC32) & M_FW_PORT_CMD_LINKDNRC32)
+
+#define S_FW_PORT_CMD_MDIOCAP32 26
+#define M_FW_PORT_CMD_MDIOCAP32 0x1
+#define V_FW_PORT_CMD_MDIOCAP32(x) ((x) << S_FW_PORT_CMD_MDIOCAP32)
+#define F_FW_PORT_CMD_MDIOCAP32 V_FW_PORT_CMD_MDIOCAP32(1U)
+
+#define S_FW_PORT_CMD_MDIOADDR32 21
+#define M_FW_PORT_CMD_MDIOADDR32 0x1f
+#define G_FW_PORT_CMD_MDIOADDR32(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOADDR32) & M_FW_PORT_CMD_MDIOADDR32)
+
+#define S_FW_PORT_CMD_PORTTYPE32 13
+#define M_FW_PORT_CMD_PORTTYPE32 0xff
+#define G_FW_PORT_CMD_PORTTYPE32(x) \
+ (((x) >> S_FW_PORT_CMD_PORTTYPE32) & M_FW_PORT_CMD_PORTTYPE32)
+
+#define S_FW_PORT_CMD_MODTYPE32 8
+#define M_FW_PORT_CMD_MODTYPE32 0x1f
+#define G_FW_PORT_CMD_MODTYPE32(x) \
+ (((x) >> S_FW_PORT_CMD_MODTYPE32) & M_FW_PORT_CMD_MODTYPE32)
+
+/*
+ * These are configured into the VPD and hence tools that generate
+ * VPD may use this enumeration.
+ * extPHY #lanes T4_I2C extI2C BP_Eq BP_ANEG Speed
+ *
+ * REMEMBER:
+ * Update the Common Code t4_hw.c:t4_get_port_type_description()
+ * with any new Firmware Port Technology Types!
+ */
+enum fw_port_type {
+ FW_PORT_TYPE_FIBER_XFI = 0, /* Y, 1, N, Y, N, N, 10G */
+ FW_PORT_TYPE_FIBER_XAUI = 1, /* Y, 4, N, Y, N, N, 10G */
+ FW_PORT_TYPE_BT_SGMII = 2, /* Y, 1, No, No, No, No, 1G/100M */
+ FW_PORT_TYPE_BT_XFI = 3, /* Y, 1, No, No, No, No, 10G */
+ FW_PORT_TYPE_BT_XAUI = 4, /* Y, 4, No, No, No, No, 10G/1G/100M? */
+ FW_PORT_TYPE_KX4 = 5, /* No, 4, No, No, Yes, Yes, 10G */
+ FW_PORT_TYPE_CX4 = 6, /* No, 4, No, No, No, No, 10G */
+ FW_PORT_TYPE_KX = 7, /* No, 1, No, No, Yes, No, 1G */
+ FW_PORT_TYPE_KR = 8, /* No, 1, No, No, Yes, Yes, 10G */
+ FW_PORT_TYPE_SFP = 9, /* No, 1, Yes, No, No, No, 10G */
+ FW_PORT_TYPE_BP_AP = 10,
+ /* No, 1, No, No, Yes, Yes, 10G, BP ANGE */
+ FW_PORT_TYPE_BP4_AP = 11,
+ /* No, 4, No, No, Yes, Yes, 10G, BP ANGE */
+ FW_PORT_TYPE_QSFP_10G = 12, /* No, 1, Yes, No, No, No, 10G */
+ FW_PORT_TYPE_QSA = 13, /* No, 1, Yes, No, No, No, 10G */
+ FW_PORT_TYPE_QSFP = 14, /* No, 4, Yes, No, No, No, 40G */
+ FW_PORT_TYPE_BP40_BA = 15,
+ /* No, 4, No, No, Yes, Yes, 40G/10G/1G, BP ANGE */
+ FW_PORT_TYPE_KR4_100G = 16, /* No, 4, 100G/40G/25G, Backplane */
+ FW_PORT_TYPE_CR4_QSFP = 17, /* No, 4, 100G/40G/25G */
+ FW_PORT_TYPE_CR_QSFP = 18, /* No, 1, 25G Spider cable */
+ FW_PORT_TYPE_CR2_QSFP = 19, /* No, 2, 50G */
+ FW_PORT_TYPE_SFP28 = 20, /* No, 1, 25G/10G/1G */
+ FW_PORT_TYPE_KR_SFP28 = 21, /* No, 1, 25G/10G/1G using Backplane */
+ FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE
+};
+
+/* These are read from module's EEPROM and determined once the
+ * module is inserted.
+ */
+enum fw_port_module_type {
+ FW_PORT_MOD_TYPE_NA = 0x0,
+ FW_PORT_MOD_TYPE_LR = 0x1,
+ FW_PORT_MOD_TYPE_SR = 0x2,
+ FW_PORT_MOD_TYPE_ER = 0x3,
+ FW_PORT_MOD_TYPE_TWINAX_PASSIVE = 0x4,
+ FW_PORT_MOD_TYPE_TWINAX_ACTIVE = 0x5,
+ FW_PORT_MOD_TYPE_LRM = 0x6,
+ FW_PORT_MOD_TYPE_ERROR = M_FW_PORT_CMD_MODTYPE - 3,
+ FW_PORT_MOD_TYPE_UNKNOWN = M_FW_PORT_CMD_MODTYPE - 2,
+ FW_PORT_MOD_TYPE_NOTSUPPORTED = M_FW_PORT_CMD_MODTYPE - 1,
+ FW_PORT_MOD_TYPE_NONE = M_FW_PORT_CMD_MODTYPE
+};
+
+/* used by FW and tools may use this to generate VPD */
+enum fw_port_mod_sub_type {
+ FW_PORT_MOD_SUB_TYPE_NA,
+ FW_PORT_MOD_SUB_TYPE_MV88E114X = 0x1,
+ FW_PORT_MOD_SUB_TYPE_TN8022 = 0x2,
+ FW_PORT_MOD_SUB_TYPE_AQ1202 = 0x3,
+ FW_PORT_MOD_SUB_TYPE_88x3120 = 0x4,
+ FW_PORT_MOD_SUB_TYPE_BCM84834 = 0x5,
+ FW_PORT_MOD_SUB_TYPE_BCM5482 = 0x6,
+ FW_PORT_MOD_SUB_TYPE_BCM84856 = 0x7,
+ FW_PORT_MOD_SUB_TYPE_BT_VSC8634 = 0x8,
+
+ /*
+ * The following will never been in the VPD. They are TWINAX cable
+ * lengths decoded from SFP+ module i2c PROMs. These should almost
+ * certainly go somewhere else ...
+ */
+ FW_PORT_MOD_SUB_TYPE_TWINAX_1 = 0x9,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_3 = 0xA,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_5 = 0xB,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
+};
+
+/* link down reason codes (3b) */
+enum fw_port_link_dn_rc {
+ FW_PORT_LINK_DN_RC_NONE,
+ FW_PORT_LINK_DN_RC_REMFLT, /* Remote fault detected */
+ FW_PORT_LINK_DN_ANEG_F, /* Auto-negotiation fault */
+ FW_PORT_LINK_DN_RESERVED3,
+ FW_PORT_LINK_DN_OVERHEAT, /* Port overheated */
+ FW_PORT_LINK_DN_UNKNOWN, /* Unable to determine reason */
+ FW_PORT_LINK_DN_RX_LOS, /* No RX signal detected */
+ FW_PORT_LINK_DN_RESERVED7
+};
+
+/* port stats */
+#define FW_NUM_PORT_STATS 50
+#define FW_NUM_PORT_TX_STATS 23
+#define FW_NUM_PORT_RX_STATS 27
+
+enum fw_port_stats_tx_index {
+ FW_STAT_TX_PORT_BYTES_IX,
+ FW_STAT_TX_PORT_FRAMES_IX,
+ FW_STAT_TX_PORT_BCAST_IX,
+ FW_STAT_TX_PORT_MCAST_IX,
+ FW_STAT_TX_PORT_UCAST_IX,
+ FW_STAT_TX_PORT_ERROR_IX,
+ FW_STAT_TX_PORT_64B_IX,
+ FW_STAT_TX_PORT_65B_127B_IX,
+ FW_STAT_TX_PORT_128B_255B_IX,
+ FW_STAT_TX_PORT_256B_511B_IX,
+ FW_STAT_TX_PORT_512B_1023B_IX,
+ FW_STAT_TX_PORT_1024B_1518B_IX,
+ FW_STAT_TX_PORT_1519B_MAX_IX,
+ FW_STAT_TX_PORT_DROP_IX,
+ FW_STAT_TX_PORT_PAUSE_IX,
+ FW_STAT_TX_PORT_PPP0_IX,
+ FW_STAT_TX_PORT_PPP1_IX,
+ FW_STAT_TX_PORT_PPP2_IX,
+ FW_STAT_TX_PORT_PPP3_IX,
+ FW_STAT_TX_PORT_PPP4_IX,
+ FW_STAT_TX_PORT_PPP5_IX,
+ FW_STAT_TX_PORT_PPP6_IX,
+ FW_STAT_TX_PORT_PPP7_IX
+};
+
+enum fw_port_stat_rx_index {
+ FW_STAT_RX_PORT_BYTES_IX,
+ FW_STAT_RX_PORT_FRAMES_IX,
+ FW_STAT_RX_PORT_BCAST_IX,
+ FW_STAT_RX_PORT_MCAST_IX,
+ FW_STAT_RX_PORT_UCAST_IX,
+ FW_STAT_RX_PORT_MTU_ERROR_IX,
+ FW_STAT_RX_PORT_MTU_CRC_ERROR_IX,
+ FW_STAT_RX_PORT_CRC_ERROR_IX,
+ FW_STAT_RX_PORT_LEN_ERROR_IX,
+ FW_STAT_RX_PORT_SYM_ERROR_IX,
+ FW_STAT_RX_PORT_64B_IX,
+ FW_STAT_RX_PORT_65B_127B_IX,
+ FW_STAT_RX_PORT_128B_255B_IX,
+ FW_STAT_RX_PORT_256B_511B_IX,
+ FW_STAT_RX_PORT_512B_1023B_IX,
+ FW_STAT_RX_PORT_1024B_1518B_IX,
+ FW_STAT_RX_PORT_1519B_MAX_IX,
+ FW_STAT_RX_PORT_PAUSE_IX,
+ FW_STAT_RX_PORT_PPP0_IX,
+ FW_STAT_RX_PORT_PPP1_IX,
+ FW_STAT_RX_PORT_PPP2_IX,
+ FW_STAT_RX_PORT_PPP3_IX,
+ FW_STAT_RX_PORT_PPP4_IX,
+ FW_STAT_RX_PORT_PPP5_IX,
+ FW_STAT_RX_PORT_PPP6_IX,
+ FW_STAT_RX_PORT_PPP7_IX,
+ FW_STAT_RX_PORT_LESS_64B_IX
+};
+
+struct fw_port_stats_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ union fw_port_stats {
+ struct fw_port_stats_ctl {
+ __u8 nstats_bg_bm;
+ __u8 tx_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_port_stats_all {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 tx_bcast;
+ __be64 tx_mcast;
+ __be64 tx_ucast;
+ __be64 tx_error;
+ __be64 tx_64b;
+ __be64 tx_65b_127b;
+ __be64 tx_128b_255b;
+ __be64 tx_256b_511b;
+ __be64 tx_512b_1023b;
+ __be64 tx_1024b_1518b;
+ __be64 tx_1519b_max;
+ __be64 tx_drop;
+ __be64 tx_pause;
+ __be64 tx_ppp0;
+ __be64 tx_ppp1;
+ __be64 tx_ppp2;
+ __be64 tx_ppp3;
+ __be64 tx_ppp4;
+ __be64 tx_ppp5;
+ __be64 tx_ppp6;
+ __be64 tx_ppp7;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be64 rx_bcast;
+ __be64 rx_mcast;
+ __be64 rx_ucast;
+ __be64 rx_mtu_error;
+ __be64 rx_mtu_crc_error;
+ __be64 rx_crc_error;
+ __be64 rx_len_error;
+ __be64 rx_sym_error;
+ __be64 rx_64b;
+ __be64 rx_65b_127b;
+ __be64 rx_128b_255b;
+ __be64 rx_256b_511b;
+ __be64 rx_512b_1023b;
+ __be64 rx_1024b_1518b;
+ __be64 rx_1519b_max;
+ __be64 rx_pause;
+ __be64 rx_ppp0;
+ __be64 rx_ppp1;
+ __be64 rx_ppp2;
+ __be64 rx_ppp3;
+ __be64 rx_ppp4;
+ __be64 rx_ppp5;
+ __be64 rx_ppp6;
+ __be64 rx_ppp7;
+ __be64 rx_less_64b;
+ __be64 rx_bg_drop;
+ __be64 rx_bg_trunc;
+ } all;
+ } u;
+};
+
+struct fw_rss_ind_tbl_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ __be16 niqid;
+ __be16 startidx;
+ __be32 r3;
+ __be32 iq0_to_iq2;
+ __be32 iq3_to_iq5;
+ __be32 iq6_to_iq8;
+ __be32 iq9_to_iq11;
+ __be32 iq12_to_iq14;
+ __be32 iq15_to_iq17;
+ __be32 iq18_to_iq20;
+ __be32 iq21_to_iq23;
+ __be32 iq24_to_iq26;
+ __be32 iq27_to_iq29;
+ __be32 iq30_iq31;
+ __be32 r15_lo;
+};
+
+#define S_FW_RSS_IND_TBL_CMD_VIID 0
+#define M_FW_RSS_IND_TBL_CMD_VIID 0xfff
+#define V_FW_RSS_IND_TBL_CMD_VIID(x) ((x) << S_FW_RSS_IND_TBL_CMD_VIID)
+#define G_FW_RSS_IND_TBL_CMD_VIID(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_VIID) & M_FW_RSS_IND_TBL_CMD_VIID)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ0 20
+#define M_FW_RSS_IND_TBL_CMD_IQ0 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ0)
+#define G_FW_RSS_IND_TBL_CMD_IQ0(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ0) & M_FW_RSS_IND_TBL_CMD_IQ0)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ1 10
+#define M_FW_RSS_IND_TBL_CMD_IQ1 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ1)
+#define G_FW_RSS_IND_TBL_CMD_IQ1(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ1) & M_FW_RSS_IND_TBL_CMD_IQ1)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ2 0
+#define M_FW_RSS_IND_TBL_CMD_IQ2 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ2)
+#define G_FW_RSS_IND_TBL_CMD_IQ2(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2)
+
+struct fw_rss_glb_config_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ union fw_rss_glb_config {
+ struct fw_rss_glb_config_manual {
+ __be32 mode_pkd;
+ __be32 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_glb_config_basicvirtual {
+ __be32 mode_keymode;
+ __be32 synmapen_to_hashtoeplitz;
+ __be64 r8;
+ __be64 r9;
+ } basicvirtual;
+ } u;
+};
+
+#define S_FW_RSS_GLB_CONFIG_CMD_MODE 28
+#define M_FW_RSS_GLB_CONFIG_CMD_MODE 0xf
+#define G_FW_RSS_GLB_CONFIG_CMD_MODE(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_MODE) & M_FW_RSS_GLB_CONFIG_CMD_MODE)
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 8
+#define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 7
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 6
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 5
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 4
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 3
+#define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 2
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 1
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP \
+ V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0
+#define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ)
+#define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ \
+ V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(1U)
+
+struct fw_rss_vi_config_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ union fw_rss_vi_config {
+ struct fw_rss_vi_config_manual {
+ __be64 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_vi_config_basicvirtual {
+ __be32 r6;
+ __be32 defaultq_to_udpen;
+ __be64 r9;
+ __be64 r10;
+ } basicvirtual;
+ } u;
+};
+
+#define S_FW_RSS_VI_CONFIG_CMD_VIID 0
+#define M_FW_RSS_VI_CONFIG_CMD_VIID 0xfff
+#define V_FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_VIID)
+#define G_FW_RSS_VI_CONFIG_CMD_VIID(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_VIID) & M_FW_RSS_VI_CONFIG_CMD_VIID)
+
+#define S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 16
+#define M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 0x3ff
+#define V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ)
+#define G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) & \
+ M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 4
+#define M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 3
+#define M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 2
+#define M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 1
+#define M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_UDPEN 0
+#define M_FW_RSS_VI_CONFIG_CMD_UDPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_UDPEN(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_UDPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_UDPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_UDPEN V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U)
+
+struct fw_clip_cmd {
+ __be32 op_to_write;
+ __be32 alloc_to_len16;
+ __be64 ip_hi;
+ __be64 ip_lo;
+ __be32 r4[2];
+};
+
+#define S_FW_CLIP_CMD_ALLOC 31
+#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC)
+#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U)
+
+#define S_FW_CLIP_CMD_FREE 30
+#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE)
+#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U)
+
+/******************************************************************************
+ * D E B U G C O M M A N D s
+ ******************************************************/
+
+struct fw_debug_cmd {
+ __be32 op_type;
+ __be32 len16_pkd;
+ union fw_debug {
+ struct fw_debug_assert {
+ __be32 fcid;
+ __be32 line;
+ __be32 x;
+ __be32 y;
+ __u8 filename_0_7[8];
+ __u8 filename_8_15[8];
+ __be64 r3;
+ } assert;
+ struct fw_debug_prt {
+ __be16 dprtstridx;
+ __be16 r3[3];
+ __be32 dprtstrparam0;
+ __be32 dprtstrparam1;
+ __be32 dprtstrparam2;
+ __be32 dprtstrparam3;
+ } prt;
+ } u;
+};
+
+#define S_FW_DEBUG_CMD_TYPE 0
+#define M_FW_DEBUG_CMD_TYPE 0xff
+#define V_FW_DEBUG_CMD_TYPE(x) ((x) << S_FW_DEBUG_CMD_TYPE)
+#define G_FW_DEBUG_CMD_TYPE(x) \
+ (((x) >> S_FW_DEBUG_CMD_TYPE) & M_FW_DEBUG_CMD_TYPE)
+
+/******************************************************************************
+ * P C I E F W R E G I S T E R
+ **************************************/
+
+/*
+ * Register definitions for the PCIE_FW register which the firmware uses
+ * to retain status across RESETs. This register should be considered
+ * as a READ-ONLY register for Host Software and only to be used to
+ * track firmware initialization/error state, etc.
+ */
+#define S_PCIE_FW_ERR 31
+#define M_PCIE_FW_ERR 0x1
+#define V_PCIE_FW_ERR(x) ((x) << S_PCIE_FW_ERR)
+#define G_PCIE_FW_ERR(x) (((x) >> S_PCIE_FW_ERR) & M_PCIE_FW_ERR)
+#define F_PCIE_FW_ERR V_PCIE_FW_ERR(1U)
+
+#define S_PCIE_FW_INIT 30
+#define M_PCIE_FW_INIT 0x1
+#define V_PCIE_FW_INIT(x) ((x) << S_PCIE_FW_INIT)
+#define G_PCIE_FW_INIT(x) (((x) >> S_PCIE_FW_INIT) & M_PCIE_FW_INIT)
+#define F_PCIE_FW_INIT V_PCIE_FW_INIT(1U)
+
+#define S_PCIE_FW_HALT 29
+#define M_PCIE_FW_HALT 0x1
+#define V_PCIE_FW_HALT(x) ((x) << S_PCIE_FW_HALT)
+#define G_PCIE_FW_HALT(x) (((x) >> S_PCIE_FW_HALT) & M_PCIE_FW_HALT)
+#define F_PCIE_FW_HALT V_PCIE_FW_HALT(1U)
+
+#define S_PCIE_FW_EVAL 24
+#define M_PCIE_FW_EVAL 0x7
+#define V_PCIE_FW_EVAL(x) ((x) << S_PCIE_FW_EVAL)
+#define G_PCIE_FW_EVAL(x) (((x) >> S_PCIE_FW_EVAL) & M_PCIE_FW_EVAL)
+
+#define S_PCIE_FW_MASTER_VLD 15
+#define M_PCIE_FW_MASTER_VLD 0x1
+#define V_PCIE_FW_MASTER_VLD(x) ((x) << S_PCIE_FW_MASTER_VLD)
+#define G_PCIE_FW_MASTER_VLD(x) \
+ (((x) >> S_PCIE_FW_MASTER_VLD) & M_PCIE_FW_MASTER_VLD)
+#define F_PCIE_FW_MASTER_VLD V_PCIE_FW_MASTER_VLD(1U)
+
+#define S_PCIE_FW_MASTER 12
+#define M_PCIE_FW_MASTER 0x7
+#define V_PCIE_FW_MASTER(x) ((x) << S_PCIE_FW_MASTER)
+#define G_PCIE_FW_MASTER(x) (((x) >> S_PCIE_FW_MASTER) & M_PCIE_FW_MASTER)
+
+/******************************************************************************
+ * B I N A R Y H E A D E R F O R M A T
+ **********************************************/
+
+/*
+ * firmware binary header format
+ */
+struct fw_hdr {
+ __u8 ver;
+ __u8 chip; /* terminator chip family */
+ __be16 len512; /* bin length in units of 512-bytes */
+ __be32 fw_ver; /* firmware version */
+ __be32 tp_microcode_ver; /* tcp processor microcode version */
+ __u8 intfver_nic;
+ __u8 intfver_vnic;
+ __u8 intfver_ofld;
+ __u8 intfver_ri;
+ __u8 intfver_iscsipdu;
+ __u8 intfver_iscsi;
+ __u8 intfver_fcoepdu;
+ __u8 intfver_fcoe;
+ __u32 reserved2;
+ __u32 reserved3;
+ __u32 magic; /* runtime or bootstrap fw */
+ __be32 flags;
+ __be32 reserved6[23];
+};
+
+#define S_FW_HDR_FW_VER_MAJOR 24
+#define M_FW_HDR_FW_VER_MAJOR 0xff
+#define V_FW_HDR_FW_VER_MAJOR(x) \
+ ((x) << S_FW_HDR_FW_VER_MAJOR)
+#define G_FW_HDR_FW_VER_MAJOR(x) \
+ (((x) >> S_FW_HDR_FW_VER_MAJOR) & M_FW_HDR_FW_VER_MAJOR)
+
+#define S_FW_HDR_FW_VER_MINOR 16
+#define M_FW_HDR_FW_VER_MINOR 0xff
+#define V_FW_HDR_FW_VER_MINOR(x) \
+ ((x) << S_FW_HDR_FW_VER_MINOR)
+#define G_FW_HDR_FW_VER_MINOR(x) \
+ (((x) >> S_FW_HDR_FW_VER_MINOR) & M_FW_HDR_FW_VER_MINOR)
+
+#define S_FW_HDR_FW_VER_MICRO 8
+#define M_FW_HDR_FW_VER_MICRO 0xff
+#define V_FW_HDR_FW_VER_MICRO(x) \
+ ((x) << S_FW_HDR_FW_VER_MICRO)
+#define G_FW_HDR_FW_VER_MICRO(x) \
+ (((x) >> S_FW_HDR_FW_VER_MICRO) & M_FW_HDR_FW_VER_MICRO)
+
+#define S_FW_HDR_FW_VER_BUILD 0
+#define M_FW_HDR_FW_VER_BUILD 0xff
+#define V_FW_HDR_FW_VER_BUILD(x) \
+ ((x) << S_FW_HDR_FW_VER_BUILD)
+#define G_FW_HDR_FW_VER_BUILD(x) \
+ (((x) >> S_FW_HDR_FW_VER_BUILD) & M_FW_HDR_FW_VER_BUILD)
+
+#endif /* _T4FW_INTERFACE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c b/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c
new file mode 100644
index 00000000..d96456bb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c
@@ -0,0 +1,880 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+
+#include "common.h"
+#include "t4_regs.h"
+
+/**
+ * t4vf_wait_dev_ready - wait till to reads of registers work
+ *
+ * Wait for the device to become ready (signified by our "who am I" register
+ * returning a value other than all 1's). Return an error if it doesn't
+ * become ready ...
+ */
+static int t4vf_wait_dev_ready(struct adapter *adapter)
+{
+ const u32 whoami = T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI;
+ const u32 notready1 = 0xffffffff;
+ const u32 notready2 = 0xeeeeeeee;
+ u32 val;
+
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+
+ msleep(500);
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+
+ dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
+ val);
+ return -EIO;
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+ u32 mbox_addr)
+{
+ for ( ; nflit; nflit--, mbox_addr += 8)
+ *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr));
+}
+
+/**
+ * t4vf_wr_mbox_core - send a command to FW through the mailbox
+ * @adapter: the adapter
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sends the given command to FW through the mailbox and waits for the
+ * FW to execute the command. If @rpl is not %NULL it is used to store
+ * the FW's reply to the command. The command and its optional reply
+ * are of the same length. FW can take up to 500 ms to respond.
+ * @sleep_ok determines whether we may sleep while awaiting the response.
+ * If sleeping is allowed we use progressive backoff otherwise we spin.
+ *
+ * The return value is 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4vf_wr_mbox_core(struct adapter *adapter,
+ const void __attribute__((__may_alias__)) *cmd,
+ int size, void *rpl, bool sleep_ok)
+{
+ /*
+ * We delay in small increments at first in an effort to maintain
+ * responsiveness for simple, fast executing commands but then back
+ * off to larger delays to a maximum retry delay.
+ */
+ static const int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100
+ };
+
+
+ u32 mbox_ctl = T4VF_CIM_BASE_ADDR + A_CIM_VF_EXT_MAILBOX_CTRL;
+ __be64 cmd_rpl[MBOX_LEN / 8];
+ struct mbox_entry entry;
+ unsigned int delay_idx;
+ u32 v, mbox_data;
+ const __be64 *p;
+ int i, ret;
+ int ms;
+
+ /* In T6, mailbox size is changed to 128 bytes to avoid
+ * invalidating the entire prefetch buffer.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ mbox_data = T4VF_MBDATA_BASE_ADDR;
+ else
+ mbox_data = T6VF_MBDATA_BASE_ADDR;
+
+ /*
+ * Commands must be multiples of 16 bytes in length and may not be
+ * larger than the size of the Mailbox Data register array.
+ */
+ if ((size % 16) != 0 ||
+ size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
+ return -EINVAL;
+
+ /*
+ * Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ t4_os_atomic_add_tail(&entry, &adapter->mbox_list, &adapter->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /*
+ * If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rarely
+ * contend on access to the mailbox ...
+ */
+ if (i > (2 * FW_CMD_MAX_TIMEOUT)) {
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+ ret = -EBUSY;
+ return ret;
+ }
+
+ /*
+ * If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (t4_os_list_first_entry(&adapter->mbox_list) == &entry)
+ break;
+
+ /*
+ * Delay for a bit before checking again ...
+ */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+ }
+
+ /*
+ * Loop trying to get ownership of the mailbox. Return an error
+ * if we can't gain ownership.
+ */
+ v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
+ for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
+ v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
+
+ if (v != X_MBOWNER_PL) {
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+ ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
+ return ret;
+ }
+
+ /*
+ * Write the command array into the Mailbox Data register array and
+ * transfer ownership of the mailbox to the firmware.
+ */
+ for (i = 0, p = cmd; i < size; i += 8)
+ t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+
+ t4_read_reg(adapter, mbox_data); /* flush write */
+ t4_write_reg(adapter, mbox_ctl,
+ F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
+ t4_read_reg(adapter, mbox_ctl); /* flush write */
+ delay_idx = 0;
+ ms = delay[0];
+
+ /*
+ * Spin waiting for firmware to acknowledge processing our command.
+ */
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i++) {
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+
+ /*
+ * If we're the owner, see if this is the reply we wanted.
+ */
+ v = t4_read_reg(adapter, mbox_ctl);
+ if (G_MBOWNER(v) == X_MBOWNER_PL) {
+ /*
+ * If the Message Valid bit isn't on, revoke ownership
+ * of the mailbox and continue waiting for our reply.
+ */
+ if ((v & F_MBMSGVALID) == 0) {
+ t4_write_reg(adapter, mbox_ctl,
+ V_MBOWNER(X_MBOWNER_NONE));
+ continue;
+ }
+
+ /*
+ * We now have our reply. Extract the command return
+ * value, copy the reply back to our caller's buffer
+ * (if specified) and revoke ownership of the mailbox.
+ * We return the (negated) firmware command return
+ * code (this depends on FW_SUCCESS == 0). (Again we
+ * avoid clogging the log with FW_VI_STATS_CMD
+ * reply results.)
+ */
+
+ /*
+ * Retrieve the command reply and release the mailbox.
+ */
+ get_mbox_rpl(adapter, cmd_rpl, size / 8, mbox_data);
+ t4_write_reg(adapter, mbox_ctl,
+ V_MBOWNER(X_MBOWNER_NONE));
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+
+ /* return value in high-order host-endian word */
+ v = be64_to_cpu(cmd_rpl[0]);
+
+ if (rpl) {
+ /* request bit in high-order BE word */
+ WARN_ON((be32_to_cpu(*(const u32 *)cmd)
+ & F_FW_CMD_REQUEST) == 0);
+ memcpy(rpl, cmd_rpl, size);
+ }
+ return -((int)G_FW_CMD_RETVAL(v));
+ }
+ }
+
+ /*
+ * We timed out. Return the error ...
+ */
+ dev_err(adapter, "command %#x timed out\n",
+ *(const u8 *)cmd);
+ dev_err(adapter, " Control = %#x\n", t4_read_reg(adapter, mbox_ctl));
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list, &adapter->mbox_lock);
+ ret = -ETIMEDOUT;
+ return ret;
+}
+
+/**
+ * t4vf_fw_reset - issue a reset to FW
+ * @adapter: the adapter
+ *
+ * Issues a reset command to FW. For a Physical Function this would
+ * result in the Firmware resetting all of its state. For a Virtual
+ * Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+ struct fw_reset_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RESET_CMD) |
+ F_FW_CMD_WRITE);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(FW_LEN16(cmd)));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_prep_adapter - prepare SW and HW for operation
+ * @adapter: the adapter
+ *
+ * Initialize adapter SW state for the various HW modules, set initial
+ * values for some adapter tunables, take PHYs out of reset, and
+ * initialize the MDIO interface.
+ */
+int t4vf_prep_adapter(struct adapter *adapter)
+{
+ u32 pl_vf_rev;
+ int ret, ver;
+
+ ret = t4vf_wait_dev_ready(adapter);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Default port and clock for debugging in case we can't reach
+ * firmware.
+ */
+ adapter->params.nports = 1;
+ adapter->params.vfres.pmask = 1;
+ adapter->params.vpd.cclk = 50000;
+
+ pl_vf_rev = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+ adapter->params.pci.device_id = adapter->pdev->id.device_id;
+ adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
+
+ /*
+ * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
+ * ADAPTER (VERSION << 4 | REVISION)
+ */
+ ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
+ adapter->params.chip = 0;
+ switch (ver) {
+ case CHELSIO_T5:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5,
+ pl_vf_rev);
+ adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6,
+ pl_vf_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+ default:
+ dev_err(adapter, "%s: Device %d is not supported\n",
+ __func__, adapter->params.pci.device_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * t4vf_query_params - query FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Reads the values of firmware or device parameters. Up to 7 parameters
+ * can be queried at once.
+ */
+int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, u32 *vals)
+{
+ struct fw_params_cmd cmd, rpl;
+ struct fw_params_param *p;
+ unsigned int i;
+ size_t len16;
+ int ret;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams]), 16);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
+ p->mnem = cpu_to_be32(*params++);
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret == 0)
+ for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
+ *vals++ = be32_to_cpu(p->val);
+ return ret;
+}
+
+/**
+ * t4vf_get_vpd_params - retrieve device VPD paremeters
+ * @adapter: the adapter
+ *
+ * Retrives various device Vital Product Data parameters. The parameters
+ * are stored in @adapter->params.vpd.
+ */
+int t4vf_get_vpd_params(struct adapter *adapter)
+{
+ struct vpd_params *vpd_params = &adapter->params.vpd;
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ v = t4vf_query_params(adapter, 1, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ vpd_params->cclk = vals[0];
+ dev_debug(adapter, "%s: vpd_params->cclk = %u\n",
+ __func__, vpd_params->cclk);
+ return 0;
+}
+
+/**
+ * t4vf_get_dev_params - retrieve device paremeters
+ * @adapter: the adapter
+ *
+ * Retrives fw and tp version.
+ */
+int t4vf_get_dev_params(struct adapter *adapter)
+{
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
+ v = t4vf_query_params(adapter, 2, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ adapter->params.fw_vers = vals[0];
+ adapter->params.tp_vers = vals[1];
+
+ dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
+
+ dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
+ return 0;
+}
+
+/**
+ * t4vf_set_params - sets FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Sets the values of firmware or device parameters. Up to 7 parameters
+ * can be specified at once.
+ */
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, const u32 *vals)
+{
+ struct fw_params_param *p;
+ struct fw_params_cmd cmd;
+ unsigned int i;
+ size_t len16;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams]), 16);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
+ p->mnem = cpu_to_be32(*params++);
+ p->val = cpu_to_be32(*vals++);
+ }
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_fl_pkt_align - return the fl packet alignment
+ * @adapter: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two.
+ */
+int t4vf_fl_pkt_align(struct adapter *adapter, u32 sge_control,
+ u32 sge_control2)
+{
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ ingpad_shift = X_INGPADBOUNDARY_SHIFT;
+ else
+ ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
+
+ ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adapter->params.chip)) {
+ ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
+ if (ingpackboundary == X_INGPACKBOUNDARY_16B)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ X_INGPACKBOUNDARY_SHIFT);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)
+{
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
+ return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami));
+}
+
+/**
+ * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
+ * @adapter: the adapter
+ *
+ * Retrieves global RSS mode and parameters with which we have to live
+ * and stores them in the @adapter's RSS parameters.
+ */
+int t4vf_get_rss_glb_config(struct adapter *adapter)
+{
+ struct rss_params *rss = &adapter->params.rss;
+ struct fw_rss_glb_config_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute an RSS Global Configuration read command to retrieve
+ * our RSS configuration.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Translate the big-endian RSS Global Configuration into our
+ * cpu-endian format based on the RSS mode. We also do first level
+ * filtering at this point to weed out modes which don't support
+ * VF Drivers ...
+ */
+ rss->mode = G_FW_RSS_GLB_CONFIG_CMD_MODE
+ (be32_to_cpu(rpl.u.manual.mode_pkd));
+ switch (rss->mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+ u32 word = be32_to_cpu
+ (rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
+
+ rss->u.basicvirtual.synmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
+ rss->u.basicvirtual.syn4tupenipv6 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn2tupenipv6 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn4tupenipv4 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
+ rss->u.basicvirtual.syn2tupenipv4 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
+ rss->u.basicvirtual.ofdmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
+ rss->u.basicvirtual.tnlmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
+ rss->u.basicvirtual.tnlalllookup =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
+ rss->u.basicvirtual.hashtoeplitz =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
+
+ /* we need at least Tunnel Map Enable to be set */
+ if (!rss->u.basicvirtual.tnlmapen)
+ return -EINVAL;
+ break;
+ }
+
+ default:
+ /* all unknown/unsupported RSS modes result in an error */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * t4vf_get_vfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a virtual
+ * function. The results are stored in @adapter->vfres.
+ */
+int t4vf_get_vfres(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ u32 word;
+ int v;
+
+ /*
+ * Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Extract VF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ vfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
+ vfres->niq = G_FW_PFVF_CMD_NIQ(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ vfres->neq = G_FW_PFVF_CMD_NEQ(word);
+ vfres->pmask = G_FW_PFVF_CMD_PMASK(word);
+
+ word = be32_to_cpu(rpl.tc_to_nexactf);
+ vfres->tc = G_FW_PFVF_CMD_TC(word);
+ vfres->nvi = G_FW_PFVF_CMD_NVI(word);
+ vfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word);
+
+ word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+ vfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word);
+ vfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word);
+ vfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
+ return 0;
+}
+
+/**
+ * t4vf_get_port_stats_fw - collect "port" statistics via Firmware
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @s: the stats structure to fill
+ *
+ * Collect statistics for the "port"'s Virtual Interface via Firmware
+ * commands.
+ */
+static int t4vf_get_port_stats_fw(struct adapter *adapter, int pidx,
+ struct port_stats *p)
+{
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ unsigned int rem = VI_VF_NUM_STATS;
+ struct fw_vi_stats_vf fwstats;
+ __be64 *fwsp = (__be64 *)&fwstats;
+
+ /*
+ * Grab the Virtual Interface statistics a chunk at a time via mailbox
+ * commands. We could use a Work Request and get all of them at once
+ * but that's an asynchronous interface which is awkward to use.
+ */
+ while (rem) {
+ unsigned int ix = VI_VF_NUM_STATS - rem;
+ unsigned int nstats = min(6U, rem);
+ struct fw_vi_stats_cmd cmd, rpl;
+ size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
+ sizeof(struct fw_vi_stats_ctl));
+ size_t len16 = DIV_ROUND_UP(len, 16);
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_STATS_CMD) |
+ V_FW_VI_STATS_CMD_VIID(pi->viid) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ cmd.u.ctl.nstats_ix =
+ cpu_to_be16(V_FW_VI_STATS_CMD_IX(ix) |
+ V_FW_VI_STATS_CMD_NSTATS(nstats));
+ ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
+
+ rem -= nstats;
+ fwsp += nstats;
+ }
+
+ /*
+ * Translate firmware statistics into host native statistics.
+ */
+ p->tx_octets = be64_to_cpu(fwstats.tx_bcast_bytes) +
+ be64_to_cpu(fwstats.tx_mcast_bytes) +
+ be64_to_cpu(fwstats.tx_ucast_bytes);
+ p->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
+ p->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
+ p->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
+ p->tx_drop = be64_to_cpu(fwstats.tx_drop_frames);
+
+ p->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
+ p->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
+ p->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
+ p->rx_len_err = be64_to_cpu(fwstats.rx_err_frames);
+
+ return 0;
+}
+
+/**
+ * t4vf_get_port_stats - collect "port" statistics
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @s: the stats structure to fill
+ *
+ * Collect statistics for the "port"'s Virtual Interface.
+ */
+void t4vf_get_port_stats(struct adapter *adapter, int pidx,
+ struct port_stats *p)
+{
+ /*
+ * If this is not the first Virtual Interface for our Virtual
+ * Function, we need to use Firmware commands to retrieve its
+ * MPS statistics.
+ */
+ if (pidx != 0)
+ t4vf_get_port_stats_fw(adapter, pidx, p);
+
+ /*
+ * But for the first VI, we can grab its statistics via the MPS
+ * register mapped into the VF register space.
+ */
+#define GET_STAT(name) \
+ t4_read_reg64(adapter, \
+ T4VF_MPS_BASE_ADDR + A_MPS_VF_STAT_##name##_L)
+ p->tx_octets = GET_STAT(TX_VF_BCAST_BYTES) +
+ GET_STAT(TX_VF_MCAST_BYTES) +
+ GET_STAT(TX_VF_UCAST_BYTES);
+ p->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
+ p->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
+ p->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
+ p->tx_drop = GET_STAT(TX_VF_DROP_FRAMES);
+
+ p->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
+ p->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
+ p->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
+
+ p->rx_len_err = GET_STAT(RX_VF_ERR_FRAMES);
+#undef GET_STAT
+}
+
+static int t4vf_alloc_vi(struct adapter *adapter, int port_id)
+{
+ struct fw_vi_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute a VI command to allocate Virtual Interface and return its
+ * VIID.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE |
+ F_FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+ F_FW_VI_CMD_ALLOC);
+ cmd.portid_pkd = V_FW_VI_CMD_PORTID(port_id);
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+ return G_FW_VI_CMD_VIID(be16_to_cpu(rpl.type_to_viid));
+}
+
+int t4vf_port_init(struct adapter *adapter)
+{
+ unsigned int fw_caps = adapter->params.fw_caps_support;
+ struct fw_port_cmd port_cmd, port_rpl;
+ struct fw_vi_cmd vi_cmd, vi_rpl;
+ fw_port_cap32_t pcaps, acaps;
+ enum fw_port_type port_type;
+ int mdio_addr;
+ int ret, i;
+
+ for_each_port(adapter, i) {
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ /*
+ * If we haven't yet determined if we're talking to Firmware
+ * which knows the new 32-bit Port Caps, it's time to find
+ * out now. This will also tell new Firmware to send us Port
+ * Status Updates using the new 32-bit Port Capabilities
+ * version of the Port Information message.
+ */
+ if (fw_caps == FW_CAPS_UNKNOWN) {
+ u32 param, val;
+
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X
+ (FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
+ val = 1;
+ ret = t4vf_set_params(adapter, 1, &param, &val);
+ fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
+ adapter->params.fw_caps_support = fw_caps;
+ }
+
+ ret = t4vf_alloc_vi(adapter, p->port_id);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot allocate VI for port %d:"
+ " err=%d\n", p->port_id, ret);
+ return ret;
+ }
+ p->viid = ret;
+
+ /*
+ * Execute a VI Read command to get our Virtual Interface
+ * information like MAC address, etc.
+ */
+ memset(&vi_cmd, 0, sizeof(vi_cmd));
+ vi_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
+ vi_cmd.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(p->viid));
+ ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ p->rss_size = G_FW_VI_CMD_RSSSIZE
+ (be16_to_cpu(vi_rpl.norss_rsssize));
+ t4_os_set_hw_addr(adapter, i, vi_rpl.mac);
+
+ /*
+ * If we don't have read access to our port information, we're
+ * done now. Else, execute a PORT Read command to get it ...
+ */
+ if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
+ return 0;
+
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_portid = cpu_to_be32
+ (V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PORT_CMD_PORTID(p->port_id));
+ port_cmd.action_to_len16 = cpu_to_be32
+ (V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
+ FW_PORT_ACTION_GET_PORT_INFO :
+ FW_PORT_ACTION_GET_PORT_INFO32) |
+ FW_LEN16(port_cmd));
+ ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd),
+ &port_rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ /*
+ * Extract the various fields from the Port Information message.
+ */
+ if (fw_caps == FW_CAPS16) {
+ u32 lstatus = be32_to_cpu
+ (port_rpl.u.info.lstatus_to_modtype);
+
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP) ?
+ (int)G_FW_PORT_CMD_MDIOADDR(lstatus) :
+ -1);
+ pcaps = fwcaps16_to_caps32
+ (be16_to_cpu(port_rpl.u.info.pcap));
+ acaps = fwcaps16_to_caps32
+ (be16_to_cpu(port_rpl.u.info.acap));
+ } else {
+ u32 lstatus32 = be32_to_cpu
+ (port_rpl.u.info32.lstatus32_to_cbllen32);
+
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
+ (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
+ -1);
+ pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32);
+ acaps = be32_to_cpu(port_rpl.u.info32.acaps32);
+ }
+
+ p->port_type = port_type;
+ p->mdio_addr = mdio_addr;
+ p->mod_type = FW_PORT_MOD_TYPE_NA;
+ init_link_config(&p->link_cfg, pcaps, acaps);
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h
new file mode 100644
index 00000000..55e436e7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4VF_HW_H
+#define __T4VF_HW_H
+
+#define T4VF_PL_BASE_ADDR 0x0200
+#define T4VF_CIM_BASE_ADDR 0x0300
+#define T4VF_MBDATA_BASE_ADDR 0x0240
+#define T6VF_MBDATA_BASE_ADDR 0x0280
+
+#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES NUM_CIM_PF_MAILBOX_DATA_INSTANCES
+#endif /* __T4VF_HW_H */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c b/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c
new file mode 100644
index 00000000..5e4dc527
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include "common.h"
+#include "clip_tbl.h"
+
+/**
+ * Allocate clip entry in HW with associated IPV4/IPv6 address
+ */
+static int clip6_get_mbox(const struct rte_eth_dev *dev, const u32 *lip)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct fw_clip_cmd c;
+ u64 hi = ((u64)lip[1]) << 32 | lip[0];
+ u64 lo = ((u64)lip[3]) << 32 | lip[2];
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+ c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
+ c.ip_hi = hi;
+ c.ip_lo = lo;
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+/**
+ * Delete clip entry in HW having the associated IPV4/IPV6 address
+ */
+static int clip6_release_mbox(const struct rte_eth_dev *dev, const u32 *lip)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct fw_clip_cmd c;
+ u64 hi = ((u64)lip[1]) << 32 | lip[0];
+ u64 lo = ((u64)lip[3]) << 32 | lip[2];
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
+ c.ip_hi = hi;
+ c.ip_lo = lo;
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+/**
+ * cxgbe_clip_release - Release associated CLIP entry
+ * @ce: clip entry to release
+ *
+ * Releases ref count and frees up a clip entry from CLIP table
+ */
+void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce)
+{
+ int ret;
+
+ t4_os_lock(&ce->lock);
+ if (rte_atomic32_dec_and_test(&ce->refcnt)) {
+ ret = clip6_release_mbox(dev, ce->addr);
+ if (ret)
+ dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
+ }
+ t4_os_unlock(&ce->lock);
+}
+
+/**
+ * find_or_alloc_clipe - Find/Allocate a free CLIP entry
+ * @c: CLIP table
+ * @lip: IPV4/IPV6 address to compare/add
+ * Returns pointer to the IPV4/IPV6 entry found/created
+ *
+ * Finds/Allocates an CLIP entry to be used for a filter rule.
+ */
+static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c,
+ const u32 *lip)
+{
+ struct clip_entry *end, *e;
+ struct clip_entry *first_free = NULL;
+ unsigned int clipt_size = c->clipt_size;
+
+ for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
+ if (rte_atomic32_read(&e->refcnt) == 0) {
+ if (!first_free)
+ first_free = e;
+ } else {
+ if (memcmp(lip, e->addr, sizeof(e->addr)) == 0)
+ goto exists;
+ }
+ }
+
+ if (first_free) {
+ e = first_free;
+ goto exists;
+ }
+
+ return NULL;
+
+exists:
+ return e;
+}
+
+static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
+ u32 *lip, u8 v6)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct clip_tbl *ctbl = adap->clipt;
+ struct clip_entry *ce;
+ int ret = 0;
+
+ if (!ctbl)
+ return NULL;
+
+ t4_os_write_lock(&ctbl->lock);
+ ce = find_or_alloc_clipe(ctbl, lip);
+ if (ce) {
+ t4_os_lock(&ce->lock);
+ if (!rte_atomic32_read(&ce->refcnt)) {
+ rte_memcpy(ce->addr, lip, sizeof(ce->addr));
+ if (v6) {
+ ce->type = FILTER_TYPE_IPV6;
+ rte_atomic32_set(&ce->refcnt, 1);
+ ret = clip6_get_mbox(dev, lip);
+ if (ret)
+ dev_debug(adap,
+ "CLIP FW ADD CMD failed: %d",
+ ret);
+ } else {
+ ce->type = FILTER_TYPE_IPV4;
+ }
+ } else {
+ rte_atomic32_inc(&ce->refcnt);
+ }
+ t4_os_unlock(&ce->lock);
+ }
+ t4_os_write_unlock(&ctbl->lock);
+
+ return ret ? NULL : ce;
+}
+
+/**
+ * cxgbe_clip_alloc - Allocate a IPV6 CLIP entry
+ * @dev: rte_eth_dev pointer
+ * @lip: IPV6 address to add
+ * Returns pointer to the CLIP entry created
+ *
+ * Allocates a IPV6 CLIP entry to be used for a filter rule.
+ */
+struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip)
+{
+ return t4_clip_alloc(dev, lip, FILTER_TYPE_IPV6);
+}
+
+/**
+ * Initialize CLIP Table
+ */
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+ unsigned int clipt_end)
+{
+ unsigned int clipt_size;
+ struct clip_tbl *ctbl;
+ unsigned int i;
+
+ if (clipt_start >= clipt_end)
+ return NULL;
+
+ clipt_size = clipt_end - clipt_start + 1;
+
+ ctbl = t4_os_alloc(sizeof(*ctbl) +
+ clipt_size * sizeof(struct clip_entry));
+ if (!ctbl)
+ return NULL;
+
+ ctbl->clipt_start = clipt_start;
+ ctbl->clipt_size = clipt_size;
+
+ t4_os_rwlock_init(&ctbl->lock);
+
+ for (i = 0; i < ctbl->clipt_size; i++) {
+ t4_os_lock_init(&ctbl->cl_list[i].lock);
+ rte_atomic32_set(&ctbl->cl_list[i].refcnt, 0);
+ }
+
+ return ctbl;
+}
+
+/**
+ * Cleanup CLIP Table
+ */
+void t4_cleanup_clip_tbl(struct adapter *adap)
+{
+ if (adap->clipt)
+ t4_os_free(adap->clipt);
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h b/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h
new file mode 100644
index 00000000..737ccc69
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_CLIP_H_
+#define _CXGBE_CLIP_H_
+
+/*
+ * State for the corresponding entry of the HW CLIP table.
+ */
+struct clip_entry {
+ enum filter_type type; /* entry type */
+ u32 addr[4]; /* IPV4 or IPV6 address */
+ rte_spinlock_t lock; /* entry lock */
+ rte_atomic32_t refcnt; /* entry reference count */
+};
+
+struct clip_tbl {
+ unsigned int clipt_start; /* start index of CLIP table */
+ unsigned int clipt_size; /* size of CLIP table */
+ rte_rwlock_t lock; /* table rw lock */
+ struct clip_entry cl_list[0]; /* MUST BE LAST */
+};
+
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+ unsigned int clipt_end);
+void t4_cleanup_clip_tbl(struct adapter *adap);
+struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip);
+void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce);
+#endif /* _CXGBE_CLIP_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h
new file mode 100644
index 00000000..5e6f5c98
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_H_
+#define _CXGBE_H_
+
+#include "common.h"
+#include "t4_regs.h"
+
+#define CXGBE_MIN_RING_DESC_SIZE 128 /* Min TX/RX descriptor ring size */
+#define CXGBE_MAX_RING_DESC_SIZE 4096 /* Max TX/RX descriptor ring size */
+
+#define CXGBE_DEFAULT_TX_DESC_SIZE 1024 /* Default TX ring size */
+#define CXGBE_DEFAULT_RX_DESC_SIZE 1024 /* Default RX ring size */
+
+#define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */
+#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */
+
+/* Max poll time is 100 * 100msec = 10 sec */
+#define CXGBE_LINK_STATUS_POLL_MS 100 /* 100ms */
+#define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
+
+#define CXGBE_DEFAULT_RSS_KEY_LEN 40 /* 320-bits */
+#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_OTHER | \
+ ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+#define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan"
+#define CXGBE_DEVARG_FORCE_LINK_UP "force_link_up"
+
+bool force_linkup(struct adapter *adap);
+int cxgbe_probe(struct adapter *adapter);
+int cxgbevf_probe(struct adapter *adapter);
+void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps);
+int cxgbe_set_link_status(struct port_info *pi, bool status);
+int cxgbe_up(struct adapter *adap);
+int cxgbe_down(struct port_info *pi);
+void cxgbe_close(struct adapter *adapter);
+void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats);
+void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats);
+void cxgbe_stats_reset(struct port_info *pi);
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt, struct t4_completion *c);
+int link_start(struct port_info *pi);
+void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us,
+ unsigned int cnt, unsigned int size, unsigned int iqe_size);
+int setup_sge_fwevtq(struct adapter *adapter);
+int setup_sge_ctrl_txq(struct adapter *adapter);
+void cfg_queues(struct rte_eth_dev *eth_dev);
+int cfg_queue_count(struct rte_eth_dev *eth_dev);
+int init_rss(struct adapter *adap);
+int setup_rss(struct port_info *pi);
+void cxgbe_enable_rx_queues(struct port_info *pi);
+void print_port_info(struct adapter *adap);
+void print_adapter_info(struct adapter *adap);
+int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key);
+void configure_max_ethqsets(struct adapter *adapter);
+
+#endif /* _CXGBE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h
new file mode 100644
index 00000000..5d47c5f3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_COMPAT_H_
+#define _CXGBE_COMPAT_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_memcpy.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#define dev_printf(level, fmt, args...) \
+ RTE_LOG(level, PMD, "rte_cxgbe_pmd: " fmt, ## args)
+
+#define dev_err(x, args...) dev_printf(ERR, args)
+#define dev_info(x, args...) dev_printf(INFO, args)
+#define dev_warn(x, args...) dev_printf(WARNING, args)
+
+#ifdef RTE_LIBRTE_CXGBE_DEBUG
+#define dev_debug(x, args...) dev_printf(DEBUG, args)
+#else
+#define dev_debug(x, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_CXGBE_DEBUG_REG
+#define CXGBE_DEBUG_REG(x, args...) dev_printf(DEBUG, "REG:" args)
+#else
+#define CXGBE_DEBUG_REG(x, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_CXGBE_DEBUG_MBOX
+#define CXGBE_DEBUG_MBOX(x, args...) dev_printf(DEBUG, "MBOX:" args)
+#else
+#define CXGBE_DEBUG_MBOX(x, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_CXGBE_DEBUG_TX
+#define CXGBE_DEBUG_TX(x, args...) dev_printf(DEBUG, "TX:" args)
+#else
+#define CXGBE_DEBUG_TX(x, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_CXGBE_DEBUG_RX
+#define CXGBE_DEBUG_RX(x, args...) dev_printf(DEBUG, "RX:" args)
+#else
+#define CXGBE_DEBUG_RX(x, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_CXGBE_DEBUG
+#define CXGBE_FUNC_TRACE() \
+ RTE_LOG(DEBUG, PMD, "CXGBE trace: %s\n", __func__)
+#else
+#define CXGBE_FUNC_TRACE() do { } while (0)
+#endif
+
+#define pr_err(y, args...) dev_err(0, y, ##args)
+#define pr_warn(y, args...) dev_warn(0, y, ##args)
+#define pr_info(y, args...) dev_info(0, y, ##args)
+#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__)
+
+#define ASSERT(x) do {\
+ if (!(x)) \
+ rte_panic("CXGBE: x"); \
+} while (0)
+#define BUG_ON(x) ASSERT(!(x))
+
+#ifndef WARN_ON
+#define WARN_ON(x) do { \
+ int ret = !!(x); \
+ if (unlikely(ret)) \
+ pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \
+} while (0)
+#endif
+
+#define __iomem
+
+#ifndef BIT
+#define BIT(n) (1 << (n))
+#endif
+
+#define L1_CACHE_SHIFT 6
+#define L1_CACHE_BYTES BIT(L1_CACHE_SHIFT)
+
+#define PAGE_SHIFT 12
+#define CXGBE_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
+#define PTR_ALIGN(p, a) ((typeof(p))CXGBE_ALIGN((unsigned long)(p), (a)))
+
+#define VLAN_HLEN 4
+
+#define rmb() rte_rmb() /* dpdk rte provided rmb */
+#define wmb() rte_wmb() /* dpdk rte provided wmb */
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef int bool;
+typedef uint64_t dma_addr_t;
+
+#ifndef __le16
+#define __le16 uint16_t
+#endif
+#ifndef __le32
+#define __le32 uint32_t
+#endif
+#ifndef __le64
+#define __le64 uint64_t
+#endif
+#ifndef __be16
+#define __be16 uint16_t
+#endif
+#ifndef __be32
+#define __be32 uint32_t
+#endif
+#ifndef __be64
+#define __be64 uint64_t
+#endif
+#ifndef __u8
+#define __u8 uint8_t
+#endif
+#ifndef __u16
+#define __u16 uint16_t
+#endif
+#ifndef __u32
+#define __u32 uint32_t
+#endif
+#ifndef __u64
+#define __u64 uint64_t
+#endif
+
+#define FALSE 0
+#define TRUE 1
+#define false 0
+#define true 1
+
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+/*
+ * round up val _p to a power of 2 size _s
+ */
+#define cxgbe_roundup(_p, _s) (((unsigned long)(_p) + (_s - 1)) & ~(_s - 1))
+
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+#define cpu_to_be16(o) rte_cpu_to_be_16(o)
+#define cpu_to_be32(o) rte_cpu_to_be_32(o)
+#define cpu_to_be64(o) rte_cpu_to_be_64(o)
+#define cpu_to_le32(o) rte_cpu_to_le_32(o)
+#define be16_to_cpu(o) rte_be_to_cpu_16(o)
+#define be32_to_cpu(o) rte_be_to_cpu_32(o)
+#define be64_to_cpu(o) rte_be_to_cpu_64(o)
+#define le32_to_cpu(o) rte_le_to_cpu_32(o)
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define DELAY(x) rte_delay_us(x)
+#define udelay(x) DELAY(x)
+#define msleep(x) DELAY(1000 * (x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+static inline uint8_t hweight32(uint32_t word32)
+{
+ uint32_t res = word32 - ((word32 >> 1) & 0x55555555);
+
+ res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+ res = (res + (res >> 4)) & 0x0F0F0F0F;
+ res = res + (res >> 8);
+ return (res + (res >> 16)) & 0x000000FF;
+
+} /* weight32 */
+
+/**
+ * cxgbe_fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note cxgbe_fls(0) = 0, cxgbe_fls(1) = 1, cxgbe_fls(0x80000000) = 32.
+ */
+static inline int cxgbe_fls(int x)
+{
+ return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
+}
+
+static inline unsigned long ilog2(unsigned long n)
+{
+ unsigned int e = 0;
+
+ while (n) {
+ if (n & ~((1 << 8) - 1)) {
+ e += 8;
+ n >>= 8;
+ continue;
+ }
+
+ if (n & ~((1 << 4) - 1)) {
+ e += 4;
+ n >>= 4;
+ }
+
+ for (;;) {
+ n >>= 1;
+ if (n == 0)
+ break;
+ e++;
+ }
+ }
+
+ return e;
+}
+
+static inline void writel(unsigned int val, volatile void __iomem *addr)
+{
+ rte_write32(val, addr);
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, (void *)((uintptr_t)addr + 4));
+}
+
+static inline void writel_relaxed(unsigned int val, volatile void __iomem *addr)
+{
+ rte_write32_relaxed(val, addr);
+}
+
+/*
+ * Multiplies an integer by a fraction, while avoiding unnecessary
+ * overflow or loss of precision.
+ */
+#define mult_frac(x, numer, denom)( \
+{ \
+ typeof(x) quot = (x) / (denom); \
+ typeof(x) rem = (x) % (denom); \
+ (quot * (numer)) + ((rem * (numer)) / (denom)); \
+} \
+)
+
+#endif /* _CXGBE_COMPAT_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c
new file mode 100644
index 00000000..4dcad7a2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -0,0 +1,1239 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+
+#include "cxgbe.h"
+#include "cxgbe_pfvf.h"
+#include "cxgbe_flow.h"
+
+/*
+ * Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static const struct rte_pci_id cxgb4_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+
+#define PCI_VENDOR_ID_CHELSIO 0x1425
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
+ { .vendor_id = 0, } \
+ }
+
+/*
+ *... and the PCI ID Table itself ...
+ */
+#include "t4_pci_id_tbl.h"
+
+#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT |\
+ DEV_TX_OFFLOAD_IPV4_CKSUM |\
+ DEV_TX_OFFLOAD_UDP_CKSUM |\
+ DEV_TX_OFFLOAD_TCP_CKSUM |\
+ DEV_TX_OFFLOAD_TCP_TSO)
+
+#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP |\
+ DEV_RX_OFFLOAD_CRC_STRIP |\
+ DEV_RX_OFFLOAD_IPV4_CKSUM |\
+ DEV_RX_OFFLOAD_JUMBO_FRAME |\
+ DEV_RX_OFFLOAD_UDP_CKSUM |\
+ DEV_RX_OFFLOAD_TCP_CKSUM)
+
+uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
+ uint16_t pkts_sent, pkts_remain;
+ uint16_t total_sent = 0;
+ int ret = 0;
+
+ CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n",
+ __func__, txq, tx_pkts, nb_pkts);
+
+ t4_os_lock(&txq->txq_lock);
+ /* free up desc from already completed tx */
+ reclaim_completed_tx(&txq->q);
+ while (total_sent < nb_pkts) {
+ pkts_remain = nb_pkts - total_sent;
+
+ for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
+ ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent],
+ nb_pkts);
+ if (ret < 0)
+ break;
+ }
+ if (!pkts_sent)
+ break;
+ total_sent += pkts_sent;
+ /* reclaim as much as possible */
+ reclaim_completed_tx(&txq->q);
+ }
+
+ t4_os_unlock(&txq->txq_lock);
+ return total_sent;
+}
+
+uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
+ unsigned int work_done;
+
+ CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n",
+ __func__, rxq->rspq.cntxt_id, nb_pkts);
+
+ if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
+ dev_err(adapter, "error in cxgbe poll\n");
+
+ CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done);
+ return work_done;
+}
+
+void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
+
+ static const struct rte_eth_desc_lim cxgbe_desc_lim = {
+ .nb_max = CXGBE_MAX_RING_DESC_SIZE,
+ .nb_min = CXGBE_MIN_RING_DESC_SIZE,
+ .nb_align = 1,
+ };
+
+ device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
+ device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
+ device_info->max_rx_queues = max_queues;
+ device_info->max_tx_queues = max_queues;
+ device_info->max_mac_addrs = 1;
+ /* XXX: For now we support one MAC/port */
+ device_info->max_vfs = adapter->params.arch.vfcount;
+ device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
+
+ device_info->rx_queue_offload_capa = 0UL;
+ device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
+
+ device_info->tx_queue_offload_capa = 0UL;
+ device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
+
+ device_info->reta_size = pi->rss_size;
+ device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
+ device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
+
+ device_info->rx_desc_lim = cxgbe_desc_lim;
+ device_info->tx_desc_lim = cxgbe_desc_lim;
+ cxgbe_get_speed_caps(pi, &device_info->speed_capa);
+}
+
+void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ 1, -1, 1, -1, false);
+}
+
+void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ 0, -1, 1, -1, false);
+}
+
+void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ /* TODO: address filters ?? */
+
+ t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ -1, 1, 1, -1, false);
+}
+
+void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ /* TODO: address filters ?? */
+
+ t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ -1, 0, 1, -1, false);
+}
+
+int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct rte_eth_link new_link = { 0 };
+ unsigned int i, work_done, budget = 32;
+ u8 old_link = pi->link_cfg.link_ok;
+
+ for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* Exit if link status changed or always forced up */
+ if (pi->link_cfg.link_ok != old_link || force_linkup(adapter))
+ break;
+
+ if (!wait_to_complete)
+ break;
+
+ rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
+ }
+
+ new_link.link_status = force_linkup(adapter) ?
+ ETH_LINK_UP : pi->link_cfg.link_ok;
+ new_link.link_autoneg = pi->link_cfg.autoneg;
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_speed = pi->link_cfg.speed;
+
+ return rte_eth_linkstatus_set(eth_dev, &new_link);
+}
+
+/**
+ * Set device link up.
+ */
+int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ unsigned int work_done, budget = 32;
+ struct sge *s = &adapter->sge;
+ int ret;
+
+ /* Flush all link events */
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* If link already up, nothing to do */
+ if (pi->link_cfg.link_ok)
+ return 0;
+
+ ret = cxgbe_set_link_status(pi, true);
+ if (ret)
+ return ret;
+
+ cxgbe_dev_link_update(dev, 1);
+ return 0;
+}
+
+/**
+ * Set device link down.
+ */
+int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ unsigned int work_done, budget = 32;
+ struct sge *s = &adapter->sge;
+ int ret;
+
+ /* Flush all link events */
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* If link already down, nothing to do */
+ if (!pi->link_cfg.link_ok)
+ return 0;
+
+ ret = cxgbe_set_link_status(pi, false);
+ if (ret)
+ return ret;
+
+ cxgbe_dev_link_update(dev, 0);
+ return 0;
+}
+
+int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct rte_eth_dev_info dev_info;
+ int err;
+ uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ cxgbe_dev_info_get(eth_dev, &dev_info);
+
+ /* Must accommodate at least ETHER_MIN_MTU */
+ if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* set to jumbo mode if needed */
+ if (new_mtu > ETHER_MAX_LEN)
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
+ -1, -1, true);
+ if (!err)
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
+
+ return err;
+}
+
+/*
+ * Stop device.
+ */
+void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ CXGBE_FUNC_TRACE();
+
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return;
+
+ cxgbe_down(pi);
+
+ /*
+ * We clear queues only if both tx and rx path of the port
+ * have been disabled
+ */
+ t4_sge_eth_clear_queues(pi);
+}
+
+/* Start the device.
+ * It returns 0 on success.
+ */
+int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int err = 0, i;
+
+ CXGBE_FUNC_TRACE();
+
+ /*
+ * If we don't have a connection to the firmware there's nothing we
+ * can do.
+ */
+ if (!(adapter->flags & FW_OK)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ err = cxgbe_up(adapter);
+ if (err < 0)
+ goto out;
+ }
+
+ cxgbe_enable_rx_queues(pi);
+
+ err = setup_rss(pi);
+ if (err)
+ goto out;
+
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ err = cxgbe_dev_tx_queue_start(eth_dev, i);
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ err = cxgbe_dev_rx_queue_start(eth_dev, i);
+ if (err)
+ goto out;
+ }
+
+ err = link_start(pi);
+ if (err)
+ goto out;
+
+out:
+ return err;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ CXGBE_FUNC_TRACE();
+
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return;
+
+ cxgbe_down(pi);
+
+ /*
+ * We clear queues only if both tx and rx path of the port
+ * have been disabled
+ */
+ t4_sge_eth_clear_queues(pi);
+}
+
+int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ uint64_t configured_offloads;
+ int err;
+
+ CXGBE_FUNC_TRACE();
+ configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(configured_offloads)) {
+ dev_info(adapter, "can't disable hw crc strip\n");
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_CRC_STRIP;
+ }
+
+ if (!(adapter->flags & FW_QUEUE_BOUND)) {
+ err = setup_sge_fwevtq(adapter);
+ if (err)
+ return err;
+ adapter->flags |= FW_QUEUE_BOUND;
+ if (is_pf4(adapter)) {
+ err = setup_sge_ctrl_txq(adapter);
+ if (err)
+ return err;
+ }
+ }
+
+ err = cfg_queue_count(eth_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+ int ret;
+ struct sge_eth_txq *txq = (struct sge_eth_txq *)
+ (eth_dev->data->tx_queues[tx_queue_id]);
+
+ dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
+
+ ret = t4_sge_eth_txq_start(txq);
+ if (ret == 0)
+ eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return ret;
+}
+
+int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+ int ret;
+ struct sge_eth_txq *txq = (struct sge_eth_txq *)
+ (eth_dev->data->tx_queues[tx_queue_id]);
+
+ dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
+
+ ret = t4_sge_eth_txq_stop(txq);
+ if (ret == 0)
+ eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return ret;
+}
+
+int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
+ int err = 0;
+ unsigned int temp_nb_desc;
+
+ dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
+ __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
+ socket_id, pi->first_qset);
+
+ /* Free up the existing queue */
+ if (eth_dev->data->tx_queues[queue_idx]) {
+ cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
+ eth_dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ eth_dev->data->tx_queues[queue_idx] = (void *)txq;
+
+ /* Sanity Checking
+ *
+ * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
+ */
+ temp_nb_desc = nb_desc;
+ if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
+ dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
+ __func__, CXGBE_MIN_RING_DESC_SIZE,
+ CXGBE_DEFAULT_TX_DESC_SIZE);
+ temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
+ } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
+ dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
+ __func__, CXGBE_MIN_RING_DESC_SIZE,
+ CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
+ return -(EINVAL);
+ }
+
+ txq->q.size = temp_nb_desc;
+
+ err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
+ s->fw_evtq.cntxt_id, socket_id);
+
+ dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
+ __func__, txq->q.cntxt_id, txq->q.abs_id, err);
+ return err;
+}
+
+void cxgbe_dev_tx_queue_release(void *q)
+{
+ struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
+
+ if (txq) {
+ struct port_info *pi = (struct port_info *)
+ (txq->eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+
+ dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
+ __func__, pi->port_id, txq->q.cntxt_id);
+
+ t4_sge_eth_txq_release(adap, txq);
+ }
+}
+
+int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+ int ret;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+ struct sge_rspq *q;
+
+ dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
+ __func__, pi->port_id, rx_queue_id);
+
+ q = eth_dev->data->rx_queues[rx_queue_id];
+
+ ret = t4_sge_eth_rxq_start(adap, q);
+ if (ret == 0)
+ eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return ret;
+}
+
+int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+ int ret;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+ struct sge_rspq *q;
+
+ dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
+ __func__, pi->port_id, rx_queue_id);
+
+ q = eth_dev->data->rx_queues[rx_queue_id];
+ ret = t4_sge_eth_rxq_stop(adap, q);
+ if (ret == 0)
+ eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return ret;
+}
+
+int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
+ int err = 0;
+ int msi_idx = 0;
+ unsigned int temp_nb_desc;
+ struct rte_eth_dev_info dev_info;
+ unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
+ __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
+ socket_id, mp);
+
+ cxgbe_dev_info_get(eth_dev, &dev_info);
+
+ /* Must accommodate at least ETHER_MIN_MTU */
+ if ((pkt_len < dev_info.min_rx_bufsize) ||
+ (pkt_len > dev_info.max_rx_pktlen)) {
+ dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
+ __func__, dev_info.min_rx_bufsize,
+ dev_info.max_rx_pktlen);
+ return -EINVAL;
+ }
+
+ /* Free up the existing queue */
+ if (eth_dev->data->rx_queues[queue_idx]) {
+ cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
+ eth_dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
+
+ /* Sanity Checking
+ *
+ * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
+ */
+ temp_nb_desc = nb_desc;
+ if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
+ dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
+ __func__, CXGBE_MIN_RING_DESC_SIZE,
+ CXGBE_DEFAULT_RX_DESC_SIZE);
+ temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
+ } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
+ dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
+ __func__, CXGBE_MIN_RING_DESC_SIZE,
+ CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
+ return -(EINVAL);
+ }
+
+ rxq->rspq.size = temp_nb_desc;
+ if ((&rxq->fl) != NULL)
+ rxq->fl.size = temp_nb_desc;
+
+ /* Set to jumbo mode if necessary */
+ if (pkt_len > ETHER_MAX_LEN)
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
+ &rxq->fl, t4_ethrx_handler,
+ is_pf4(adapter) ?
+ t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
+ queue_idx, socket_id);
+
+ dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
+ __func__, err, pi->port_id, rxq->rspq.cntxt_id,
+ rxq->rspq.abs_id);
+ return err;
+}
+
+void cxgbe_dev_rx_queue_release(void *q)
+{
+ struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
+ struct sge_rspq *rq = &rxq->rspq;
+
+ if (rq) {
+ struct port_info *pi = (struct port_info *)
+ (rq->eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+
+ dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
+ __func__, pi->port_id, rxq->rspq.cntxt_id);
+
+ t4_sge_eth_rxq_release(adap, rxq);
+ }
+}
+
+/*
+ * Get port statistics.
+ */
+static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *eth_stats)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct port_stats ps;
+ unsigned int i;
+
+ cxgbe_stats_get(pi, &ps);
+
+ /* RX Stats */
+ eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 +
+ ps.rx_ovflow2 + ps.rx_ovflow3 +
+ ps.rx_trunc0 + ps.rx_trunc1 +
+ ps.rx_trunc2 + ps.rx_trunc3;
+ eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err +
+ ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
+ ps.rx_len_err;
+
+ /* TX Stats */
+ eth_stats->opackets = ps.tx_frames;
+ eth_stats->obytes = ps.tx_octets;
+ eth_stats->oerrors = ps.tx_error_frames;
+
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ struct sge_eth_rxq *rxq =
+ &s->ethrxq[pi->first_qset + i];
+
+ eth_stats->q_ipackets[i] = rxq->stats.pkts;
+ eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
+ eth_stats->ipackets += eth_stats->q_ipackets[i];
+ eth_stats->ibytes += eth_stats->q_ibytes[i];
+ }
+
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ struct sge_eth_txq *txq =
+ &s->ethtxq[pi->first_qset + i];
+
+ eth_stats->q_opackets[i] = txq->stats.pkts;
+ eth_stats->q_obytes[i] = txq->stats.tx_bytes;
+ eth_stats->q_errors[i] = txq->stats.mapping_err;
+ }
+ return 0;
+}
+
+/*
+ * Reset port statistics.
+ */
+static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ unsigned int i;
+
+ cxgbe_stats_reset(pi);
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ struct sge_eth_rxq *rxq =
+ &s->ethrxq[pi->first_qset + i];
+
+ rxq->stats.pkts = 0;
+ rxq->stats.rx_bytes = 0;
+ }
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ struct sge_eth_txq *txq =
+ &s->ethtxq[pi->first_qset + i];
+
+ txq->stats.pkts = 0;
+ txq->stats.tx_bytes = 0;
+ txq->stats.mapping_err = 0;
+ }
+}
+
+static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct link_config *lc = &pi->link_cfg;
+ int rx_pause, tx_pause;
+
+ fc_conf->autoneg = lc->fc & PAUSE_AUTONEG;
+ rx_pause = lc->fc & PAUSE_RX;
+ tx_pause = lc->fc & PAUSE_TX;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+ return 0;
+}
+
+static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct link_config *lc = &pi->link_cfg;
+
+ if (lc->pcaps & FW_PORT_CAP32_ANEG) {
+ if (fc_conf->autoneg)
+ lc->requested_fc |= PAUSE_AUTONEG;
+ else
+ lc->requested_fc &= ~PAUSE_AUTONEG;
+ }
+
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_RX_PAUSE))
+ lc->requested_fc |= PAUSE_RX;
+ else
+ lc->requested_fc &= ~PAUSE_RX;
+
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_TX_PAUSE))
+ lc->requested_fc |= PAUSE_TX;
+ else
+ lc->requested_fc &= ~PAUSE_TX;
+
+ return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
+ &pi->link_cfg);
+}
+
+const uint32_t *
+cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
+/* Update RSS hash configuration
+ */
+static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int err;
+
+ err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
+ if (err)
+ return err;
+
+ pi->rss_hf = rss_conf->rss_hf;
+
+ if (rss_conf->rss_key) {
+ u32 key[10], mod_key[10];
+ int i, j;
+
+ memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
+
+ for (i = 9, j = 0; i >= 0; i--, j++)
+ mod_key[j] = cpu_to_be32(key[i]);
+
+ t4_write_rss_key(adapter, mod_key, -1);
+ }
+
+ return 0;
+}
+
+/* Get RSS hash configuration
+ */
+static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ u64 rss_hf = 0;
+ u64 flags = 0;
+ int err;
+
+ err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ &flags, NULL);
+
+ if (err)
+ return err;
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
+ rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
+ rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
+ }
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ }
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
+
+ rss_conf->rss_hf = rss_hf;
+
+ if (rss_conf->rss_key) {
+ u32 key[10], mod_key[10];
+ int i, j;
+
+ t4_read_rss_key(adapter, key);
+
+ for (i = 9, j = 0; i >= 0; i--, j++)
+ mod_key[j] = be32_to_cpu(key[i]);
+
+ memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
+ }
+
+ return 0;
+}
+
+static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+ return EEPROMSIZE;
+}
+
+/**
+ * eeprom_ptov - translate a physical EEPROM address to virtual
+ * @phys_addr: the physical EEPROM address
+ * @fn: the PCI function number
+ * @sz: size of function-specific area
+ *
+ * Translate a physical EEPROM address to virtual. The first 1K is
+ * accessed through virtual addresses starting at 31K, the rest is
+ * accessed through virtual addresses starting at 0.
+ *
+ * The mapping is as follows:
+ * [0..1K) -> [31K..32K)
+ * [1K..1K+A) -> [31K-A..31K)
+ * [1K+A..ES) -> [0..ES-A-1K)
+ *
+ * where A = @fn * @sz, and ES = EEPROM size.
+ */
+static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
+{
+ fn *= sz;
+ if (phys_addr < 1024)
+ return phys_addr + (31 << 10);
+ if (phys_addr < 1024 + fn)
+ return fn + phys_addr - 1024;
+ if (phys_addr < EEPROMSIZE)
+ return phys_addr - 1024 - fn;
+ if (phys_addr < EEPROMVSIZE)
+ return phys_addr - 1024;
+ return -EINVAL;
+}
+
+/* The next two routines implement eeprom read/write from physical addresses.
+ */
+static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
+{
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
+
+ if (vaddr >= 0)
+ vaddr = t4_seeprom_read(adap, vaddr, v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
+{
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
+
+ if (vaddr >= 0)
+ vaddr = t4_seeprom_write(adap, vaddr, v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *e)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ u32 i, err = 0;
+ u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
+
+ if (!buf)
+ return -ENOMEM;
+
+ e->magic = EEPROM_MAGIC;
+ for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
+ err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
+
+ if (!err)
+ rte_memcpy(e->data, buf + e->offset, e->length);
+ rte_free(buf);
+ return err;
+}
+
+static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ u8 *buf;
+ int err = 0;
+ u32 aligned_offset, aligned_len, *p;
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return -EINVAL;
+
+ aligned_offset = eeprom->offset & ~3;
+ aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
+
+ if (adapter->pf > 0) {
+ u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
+
+ if (aligned_offset < start ||
+ aligned_offset + aligned_len > start + EEPROMPFSIZE)
+ return -EPERM;
+ }
+
+ if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
+ /* RMW possibly needed for first or last words.
+ */
+ buf = rte_zmalloc(NULL, aligned_len, 0);
+ if (!buf)
+ return -ENOMEM;
+ err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
+ if (!err && aligned_len > 4)
+ err = eeprom_rd_phys(adapter,
+ aligned_offset + aligned_len - 4,
+ (u32 *)&buf[aligned_len - 4]);
+ if (err)
+ goto out;
+ rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
+ eeprom->length);
+ } else {
+ buf = eeprom->data;
+ }
+
+ err = t4_seeprom_wp(adapter, false);
+ if (err)
+ goto out;
+
+ for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
+ err = eeprom_wr_phys(adapter, aligned_offset, *p);
+ aligned_offset += 4;
+ }
+
+ if (!err)
+ err = t4_seeprom_wp(adapter, true);
+out:
+ if (buf != eeprom->data)
+ rte_free(buf);
+ return err;
+}
+
+static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ return t4_get_regs_len(adapter) / sizeof(uint32_t);
+}
+
+static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
+ (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
+ (1 << 16);
+
+ if (regs->data == NULL) {
+ regs->length = cxgbe_get_regs_len(eth_dev);
+ regs->width = sizeof(uint32_t);
+
+ return 0;
+ }
+
+ t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
+
+ return 0;
+}
+
+int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
+ pi->xact_addr_filt, (u8 *)addr, true, true);
+ if (ret < 0) {
+ dev_err(adapter, "failed to set mac addr; err = %d\n",
+ ret);
+ return ret;
+ }
+ pi->xact_addr_filt = ret;
+ return 0;
+}
+
+static const struct eth_dev_ops cxgbe_eth_dev_ops = {
+ .dev_start = cxgbe_dev_start,
+ .dev_stop = cxgbe_dev_stop,
+ .dev_close = cxgbe_dev_close,
+ .promiscuous_enable = cxgbe_dev_promiscuous_enable,
+ .promiscuous_disable = cxgbe_dev_promiscuous_disable,
+ .allmulticast_enable = cxgbe_dev_allmulticast_enable,
+ .allmulticast_disable = cxgbe_dev_allmulticast_disable,
+ .dev_configure = cxgbe_dev_configure,
+ .dev_infos_get = cxgbe_dev_info_get,
+ .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
+ .link_update = cxgbe_dev_link_update,
+ .dev_set_link_up = cxgbe_dev_set_link_up,
+ .dev_set_link_down = cxgbe_dev_set_link_down,
+ .mtu_set = cxgbe_dev_mtu_set,
+ .tx_queue_setup = cxgbe_dev_tx_queue_setup,
+ .tx_queue_start = cxgbe_dev_tx_queue_start,
+ .tx_queue_stop = cxgbe_dev_tx_queue_stop,
+ .tx_queue_release = cxgbe_dev_tx_queue_release,
+ .rx_queue_setup = cxgbe_dev_rx_queue_setup,
+ .rx_queue_start = cxgbe_dev_rx_queue_start,
+ .rx_queue_stop = cxgbe_dev_rx_queue_stop,
+ .rx_queue_release = cxgbe_dev_rx_queue_release,
+ .filter_ctrl = cxgbe_dev_filter_ctrl,
+ .stats_get = cxgbe_dev_stats_get,
+ .stats_reset = cxgbe_dev_stats_reset,
+ .flow_ctrl_get = cxgbe_flow_ctrl_get,
+ .flow_ctrl_set = cxgbe_flow_ctrl_set,
+ .get_eeprom_length = cxgbe_get_eeprom_length,
+ .get_eeprom = cxgbe_get_eeprom,
+ .set_eeprom = cxgbe_set_eeprom,
+ .get_reg = cxgbe_get_regs,
+ .rss_hash_update = cxgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get,
+ .mac_addr_set = cxgbe_mac_addr_set,
+};
+
+/*
+ * Initialize driver
+ * It returns 0 on success.
+ */
+static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = NULL;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ int err = 0;
+
+ CXGBE_FUNC_TRACE();
+
+ eth_dev->dev_ops = &cxgbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ /* for secondary processes, we attach to ethdevs allocated by primary
+ * and do minimal initialization.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ int i;
+
+ for (i = 1; i < MAX_NPORTS; i++) {
+ struct rte_eth_dev *rest_eth_dev;
+ char namei[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(namei, sizeof(namei), "%s_%d",
+ pci_dev->device.name, i);
+ rest_eth_dev = rte_eth_dev_attach_secondary(namei);
+ if (rest_eth_dev) {
+ rest_eth_dev->device = &pci_dev->device;
+ rest_eth_dev->dev_ops =
+ eth_dev->dev_ops;
+ rest_eth_dev->rx_pkt_burst =
+ eth_dev->rx_pkt_burst;
+ rest_eth_dev->tx_pkt_burst =
+ eth_dev->tx_pkt_burst;
+ rte_eth_dev_probing_finish(rest_eth_dev);
+ }
+ }
+ return 0;
+ }
+
+ snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
+ adapter = rte_zmalloc(name, sizeof(*adapter), 0);
+ if (!adapter)
+ return -1;
+
+ adapter->use_unpacked_mode = 1;
+ adapter->regs = (void *)pci_dev->mem_resource[0].addr;
+ if (!adapter->regs) {
+ dev_err(adapter, "%s: cannot map device registers\n", __func__);
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ adapter->pdev = pci_dev;
+ adapter->eth_dev = eth_dev;
+ pi->adapter = adapter;
+
+ err = cxgbe_probe(adapter);
+ if (err) {
+ dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
+ __func__, err);
+ goto out_free_adapter;
+ }
+
+ return 0;
+
+out_free_adapter:
+ rte_free(adapter);
+ return err;
+}
+
+static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+
+ /* Free up other ports and all resources */
+ cxgbe_close(adap);
+ return 0;
+}
+
+static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct port_info), eth_cxgbe_dev_init);
+}
+
+static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
+}
+
+static struct rte_pci_driver rte_cxgbe_pmd = {
+ .id_table = cxgb4_pci_tbl,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_cxgbe_pci_probe,
+ .remove = eth_cxgbe_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
+ CXGBE_DEVARG_KEEP_OVLAN "=<0|1> "
+ CXGBE_DEVARG_FORCE_LINK_UP "=<0|1> ");
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c
new file mode 100644
index 00000000..7f0d3800
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c
@@ -0,0 +1,1252 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#include <rte_net.h>
+#include "common.h"
+#include "t4_tcb.h"
+#include "t4_regs.h"
+#include "cxgbe_filter.h"
+#include "clip_tbl.h"
+
+/**
+ * Initialize Hash Filters
+ */
+int init_hash_filter(struct adapter *adap)
+{
+ unsigned int n_user_filters;
+ unsigned int user_filter_perc;
+ int ret;
+ u32 params[7], val[7];
+
+#define FW_PARAM_DEV(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
+ V_FW_PARAMS_PARAM_Y(0) | \
+ V_FW_PARAMS_PARAM_Z(0))
+
+ params[0] = FW_PARAM_DEV(NTID);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ params, val);
+ if (ret < 0)
+ return ret;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+
+ user_filter_perc = 100;
+ n_user_filters = mult_frac(adap->tids.nftids,
+ user_filter_perc,
+ 100);
+
+ adap->tids.nftids = n_user_filters;
+ adap->params.hash_filter = 1;
+ return 0;
+}
+
+/**
+ * Validate if the requested filter specification can be set by checking
+ * if the requested features have been enabled
+ */
+int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
+{
+ u32 fconf;
+
+ /*
+ * Check for unconfigured fields being used.
+ */
+ fconf = adapter->params.tp.vlan_pri_map;
+
+#define S(_field) \
+ (fs->val._field || fs->mask._field)
+#define U(_mask, _field) \
+ (!(fconf & (_mask)) && S(_field))
+
+ if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))
+ return -EOPNOTSUPP;
+
+#undef S
+#undef U
+
+ /*
+ * If the user is requesting that the filter action loop
+ * matching packets back out one of our ports, make sure that
+ * the egress port is in range.
+ */
+ if (fs->action == FILTER_SWITCH &&
+ fs->eport >= adapter->params.nports)
+ return -ERANGE;
+
+ /*
+ * Don't allow various trivially obvious bogus out-of-range
+ * values ...
+ */
+ if (fs->val.iport >= adapter->params.nports)
+ return -ERANGE;
+
+ return 0;
+}
+
+/**
+ * Get the queue to which the traffic must be steered to.
+ */
+static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
+ struct ch_filter_specification *fs)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int iq;
+
+ /*
+ * If the user has requested steering matching Ingress Packets
+ * to a specific Queue Set, we need to make sure it's in range
+ * for the port and map that into the Absolute Queue ID of the
+ * Queue Set's Response Queue.
+ */
+ if (!fs->dirsteer) {
+ iq = 0;
+ } else {
+ /*
+ * If the iq id is greater than the number of qsets,
+ * then assume it is an absolute qid.
+ */
+ if (fs->iq < pi->n_rx_qsets)
+ iq = adapter->sge.ethrxq[pi->first_qset +
+ fs->iq].rspq.abs_id;
+ else
+ iq = fs->iq;
+ }
+
+ return iq;
+}
+
+/* Return an error number if the indicated filter isn't writable ... */
+int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * Send CPL_SET_TCB_FIELD message
+ */
+static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
+ u16 word, u64 mask, u64 val, int no_reply)
+{
+ struct rte_mbuf *mbuf;
+ struct cpl_set_tcb_field *req;
+ struct sge_ctrl_txq *ctrlq;
+
+ ctrlq = &adapter->sge.ctrlq[0];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ WARN_ON(!mbuf);
+
+ mbuf->data_len = sizeof(*req);
+ mbuf->pkt_len = mbuf->data_len;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
+ memset(req, 0, sizeof(*req));
+ INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
+ req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
+ V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
+ V_NO_REPLY(no_reply));
+ req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+
+ t4_mgmt_tx(ctrlq, mbuf);
+}
+
+/**
+ * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
+ */
+static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
+ struct cpl_set_tcb_field *req,
+ unsigned int word,
+ u64 mask, u64 val, u8 cookie,
+ int no_reply)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
+ req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
+ V_QUEUENO(0));
+ req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+ sc = (struct ulptx_idata *)(req + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Check if entry already filled.
+ */
+bool is_filter_set(struct tid_info *t, int fidx, int family)
+{
+ bool result = FALSE;
+ int i, max;
+
+ /* IPv6 requires four slots and IPv4 requires only 1 slot.
+ * Ensure, there's enough slots available.
+ */
+ max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
+
+ t4_os_lock(&t->ftid_lock);
+ for (i = fidx; i <= max; i++) {
+ if (rte_bitmap_get(t->ftid_bmap, i)) {
+ result = TRUE;
+ break;
+ }
+ }
+ t4_os_unlock(&t->ftid_lock);
+ return result;
+}
+
+/**
+ * Allocate a available free entry
+ */
+int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
+{
+ struct tid_info *t = &adap->tids;
+ int pos;
+ int size = t->nftids;
+
+ t4_os_lock(&t->ftid_lock);
+ if (family == FILTER_TYPE_IPV6)
+ pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
+ else
+ pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
+ t4_os_unlock(&t->ftid_lock);
+
+ return pos < size ? pos : -1;
+}
+
+/**
+ * Construct hash filter ntuple.
+ */
+static u64 hash_filter_ntuple(const struct filter_entry *f)
+{
+ struct adapter *adap = ethdev2adap(f->dev);
+ struct tp_params *tp = &adap->params.tp;
+ u64 ntuple = 0;
+ u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
+
+ if (tp->port_shift >= 0)
+ ntuple |= (u64)f->fs.mask.iport << tp->port_shift;
+
+ if (tp->protocol_shift >= 0) {
+ if (!f->fs.val.proto)
+ ntuple |= (u64)tcp_proto << tp->protocol_shift;
+ else
+ ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
+ }
+
+ if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
+ ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
+
+ if (ntuple != tp->hash_filter_mask)
+ return 0;
+
+ return ntuple;
+}
+
+/**
+ * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
+ */
+static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
+ unsigned int tid)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*abort_req) -
+ sizeof(struct work_request_hdr));
+ OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+ abort_req->rsvd0 = cpu_to_be32(0);
+ abort_req->rsvd1 = 0;
+ abort_req->cmd = CPL_ABORT_NO_RST;
+ sc = (struct ulptx_idata *)(abort_req + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
+ */
+static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
+ unsigned int tid)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*abort_rpl) -
+ sizeof(struct work_request_hdr));
+ OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ abort_rpl->rsvd0 = cpu_to_be32(0);
+ abort_rpl->rsvd1 = 0;
+ abort_rpl->cmd = CPL_ABORT_NO_RST;
+ sc = (struct ulptx_idata *)(abort_rpl + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Delete the specified hash filter.
+ */
+static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
+ unsigned int filter_id,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct tid_info *t = &adapter->tids;
+ struct filter_entry *f;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+ int ret;
+
+ if (filter_id > adapter->tids.ntids)
+ return -E2BIG;
+
+ f = lookup_tid(t, filter_id);
+ if (!f) {
+ dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
+ __func__, filter_id);
+ return -EINVAL;
+ }
+
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ unsigned int wrlen;
+ struct rte_mbuf *mbuf;
+ struct work_request_hdr *wr;
+ struct ulptx_idata *aligner;
+ struct cpl_set_tcb_field *req;
+ struct cpl_abort_req *abort_req;
+ struct cpl_abort_rpl *abort_rpl;
+
+ f->ctx = ctx;
+ f->pending = 1;
+
+ wrlen = cxgbe_roundup(sizeof(*wr) +
+ (sizeof(*req) + sizeof(*aligner)) +
+ sizeof(*abort_req) + sizeof(*abort_rpl),
+ 16);
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ dev_err(adapter, "%s: could not allocate skb ..\n",
+ __func__);
+ goto out_err;
+ }
+
+ mbuf->data_len = wrlen;
+ mbuf->pkt_len = mbuf->data_len;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
+ INIT_ULPTX_WR(req, wrlen, 0, 0);
+ wr = (struct work_request_hdr *)req;
+ wr++;
+ req = (struct cpl_set_tcb_field *)wr;
+ mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
+ V_TCB_RSS_INFO(M_TCB_RSS_INFO),
+ V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
+ 0, 1);
+ aligner = (struct ulptx_idata *)(req + 1);
+ abort_req = (struct cpl_abort_req *)(aligner + 1);
+ mk_abort_req_ulp(abort_req, f->tid);
+ abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
+ mk_abort_rpl_ulp(abort_rpl, f->tid);
+ t4_mgmt_tx(ctrlq, mbuf);
+ }
+ return 0;
+
+out_err:
+ return -ENOMEM;
+}
+
+/**
+ * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
+ */
+static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
+ unsigned int qid_filterid, struct adapter *adap)
+{
+ struct cpl_t6_act_open_req6 *req = NULL;
+ u64 local_lo, local_hi, peer_lo, peer_hi;
+ u32 *lip = (u32 *)f->fs.val.lip;
+ u32 *fip = (u32 *)f->fs.val.fip;
+
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T6:
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
+
+ INIT_TP_WR(req, 0);
+ break;
+ default:
+ dev_err(adap, "%s: unsupported chip type!\n", __func__);
+ return;
+ }
+
+ local_hi = ((u64)lip[1]) << 32 | lip[0];
+ local_lo = ((u64)lip[3]) << 32 | lip[2];
+ peer_hi = ((u64)fip[1]) << 32 | fip[0];
+ peer_lo = ((u64)fip[3]) << 32 | fip[2];
+
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ qid_filterid));
+ req->local_port = cpu_to_be16(f->fs.val.lport);
+ req->peer_port = cpu_to_be16(f->fs.val.fport);
+ req->local_ip_hi = local_hi;
+ req->local_ip_lo = local_lo;
+ req->peer_ip_hi = peer_hi;
+ req->peer_ip_lo = peer_lo;
+ req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
+ V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
+ << 1) |
+ V_TX_CHAN(f->fs.eport) |
+ V_ULP_MODE(ULP_MODE_NONE) |
+ F_TCAM_BYPASS | F_NON_OFFLOAD);
+ req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
+ req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
+ V_RSS_QUEUE(f->fs.iq) |
+ F_T5_OPT_2_VALID |
+ F_RX_CHANNEL |
+ V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
+ (f->fs.dirsteer << 1)) |
+ V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
+}
+
+/**
+ * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
+ */
+static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
+ unsigned int qid_filterid, struct adapter *adap)
+{
+ struct cpl_t6_act_open_req *req = NULL;
+
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T6:
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
+
+ INIT_TP_WR(req, 0);
+ break;
+ default:
+ dev_err(adap, "%s: unsupported chip type!\n", __func__);
+ return;
+ }
+
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ qid_filterid));
+ req->local_port = cpu_to_be16(f->fs.val.lport);
+ req->peer_port = cpu_to_be16(f->fs.val.fport);
+ req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
+ f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
+ req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
+ f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
+ req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
+ V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
+ << 1) |
+ V_TX_CHAN(f->fs.eport) |
+ V_ULP_MODE(ULP_MODE_NONE) |
+ F_TCAM_BYPASS | F_NON_OFFLOAD);
+ req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
+ req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
+ V_RSS_QUEUE(f->fs.iq) |
+ F_T5_OPT_2_VALID |
+ F_RX_CHANNEL |
+ V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
+ (f->fs.dirsteer << 1)) |
+ V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
+}
+
+/**
+ * Set the specified hash filter.
+ */
+static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ struct tid_info *t = &adapter->tids;
+ struct filter_entry *f;
+ struct rte_mbuf *mbuf;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int iq;
+ int atid, size;
+ int ret = 0;
+
+ ret = validate_filter(adapter, fs);
+ if (ret)
+ return ret;
+
+ iq = get_filter_steerq(dev, fs);
+
+ ctrlq = &adapter->sge.ctrlq[pi->port_id];
+
+ f = t4_os_alloc(sizeof(*f));
+ if (!f)
+ goto out_err;
+
+ f->fs = *fs;
+ f->ctx = ctx;
+ f->dev = dev;
+ f->fs.iq = iq;
+
+ atid = cxgbe_alloc_atid(t, f);
+ if (atid < 0)
+ goto out_err;
+
+ if (f->fs.type) {
+ /* IPv6 hash filter */
+ f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
+ if (!f->clipt)
+ goto free_atid;
+
+ size = sizeof(struct cpl_t6_act_open_req6);
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto free_clip;
+ }
+
+ mbuf->data_len = size;
+ mbuf->pkt_len = mbuf->data_len;
+
+ mk_act_open_req6(f, mbuf,
+ ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+ adapter);
+ } else {
+ /* IPv4 hash filter */
+ size = sizeof(struct cpl_t6_act_open_req);
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto free_atid;
+ }
+
+ mbuf->data_len = size;
+ mbuf->pkt_len = mbuf->data_len;
+
+ mk_act_open_req(f, mbuf,
+ ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+ adapter);
+ }
+
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+
+free_clip:
+ cxgbe_clip_release(f->dev, f->clipt);
+free_atid:
+ cxgbe_free_atid(t, atid);
+
+out_err:
+ t4_os_free(f);
+ return ret;
+}
+
+/**
+ * Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+void clear_filter(struct filter_entry *f)
+{
+ if (f->clipt)
+ cxgbe_clip_release(f->dev, f->clipt);
+
+ /*
+ * The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+/**
+ * t4_mk_filtdelwr - create a delete filter WR
+ * @ftid: the filter ID
+ * @wr: the filter work request to populate
+ * @qid: ingress queue to receive the delete notification
+ *
+ * Creates a filter work request to delete the supplied filter. If @qid is
+ * negative the delete notification is suppressed.
+ */
+static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+{
+ memset(wr, 0, sizeof(*wr));
+ wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+ wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
+ wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_NOREPLY(qid < 0));
+ wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
+ if (qid >= 0)
+ wr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+}
+
+/**
+ * Create FW work request to delete the filter at a specified index
+ */
+static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct rte_mbuf *mbuf;
+ struct fw_filter_wr *fwr;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf)
+ return -ENOMEM;
+
+ mbuf->data_len = sizeof(*fwr);
+ mbuf->pkt_len = mbuf->data_len;
+
+ fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
+ t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /*
+ * Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+}
+
+int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct rte_mbuf *mbuf;
+ struct fw_filter_wr *fwr;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+ int ret;
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mbuf->data_len = sizeof(*fwr);
+ mbuf->pkt_len = mbuf->data_len;
+
+ fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
+ memset(fwr, 0, sizeof(*fwr));
+
+ /*
+ * Construct the work request to set the filter.
+ */
+ fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+ fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
+ fwr->tid_to_iq =
+ cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
+ V_FW_FILTER_WR_RQTYPE(f->fs.type) |
+ V_FW_FILTER_WR_NOREPLY(0) |
+ V_FW_FILTER_WR_IQ(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
+ V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
+ V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
+ V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
+ V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
+ V_FW_FILTER_WR_PRIO(f->fs.prio));
+ fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
+ fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
+ fwr->smac_sel = 0;
+ fwr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
+ V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
+ ));
+ fwr->maci_to_matchtypem =
+ cpu_to_be32(V_FW_FILTER_WR_PORT(f->fs.val.iport) |
+ V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = cpu_to_be16(f->fs.val.lport);
+ fwr->lpm = cpu_to_be16(f->fs.mask.lport);
+ fwr->fp = cpu_to_be16(f->fs.val.fport);
+ fwr->fpm = cpu_to_be16(f->fs.mask.fport);
+
+ /*
+ * Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+
+out:
+ return ret;
+}
+
+/**
+ * Set the corresponding entry in the bitmap. 4 slots are
+ * marked for IPv6, whereas only 1 slot is marked for IPv4.
+ */
+static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
+{
+ t4_os_lock(&t->ftid_lock);
+ if (rte_bitmap_get(t->ftid_bmap, fidx)) {
+ t4_os_unlock(&t->ftid_lock);
+ return -EBUSY;
+ }
+
+ if (family == FILTER_TYPE_IPV4) {
+ rte_bitmap_set(t->ftid_bmap, fidx);
+ } else {
+ rte_bitmap_set(t->ftid_bmap, fidx);
+ rte_bitmap_set(t->ftid_bmap, fidx + 1);
+ rte_bitmap_set(t->ftid_bmap, fidx + 2);
+ rte_bitmap_set(t->ftid_bmap, fidx + 3);
+ }
+ t4_os_unlock(&t->ftid_lock);
+ return 0;
+}
+
+/**
+ * Clear the corresponding entry in the bitmap. 4 slots are
+ * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
+ */
+static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
+{
+ t4_os_lock(&t->ftid_lock);
+ if (family == FILTER_TYPE_IPV4) {
+ rte_bitmap_clear(t->ftid_bmap, fidx);
+ } else {
+ rte_bitmap_clear(t->ftid_bmap, fidx);
+ rte_bitmap_clear(t->ftid_bmap, fidx + 1);
+ rte_bitmap_clear(t->ftid_bmap, fidx + 2);
+ rte_bitmap_clear(t->ftid_bmap, fidx + 3);
+ }
+ t4_os_unlock(&t->ftid_lock);
+}
+
+/**
+ * Check a delete filter request for validity and send it to the hardware.
+ * Return 0 on success, an error number otherwise. We attach any provided
+ * filter operation context to the internal filter specification in order to
+ * facilitate signaling completion of the operation.
+ */
+int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct filter_entry *f;
+ unsigned int chip_ver;
+ int ret;
+
+ if (is_hashfilter(adapter) && fs->cap)
+ return cxgbe_del_hash_filter(dev, filter_id, ctx);
+
+ if (filter_id >= adapter->tids.nftids)
+ return -ERANGE;
+
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ ret = is_filter_set(&adapter->tids, filter_id, fs->type);
+ if (!ret) {
+ dev_warn(adap, "%s: could not find filter entry: %u\n",
+ __func__, filter_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure filter id is aligned on the 2 slot boundary for T6,
+ * and 4 slot boundary for cards below T6.
+ */
+ if (fs->type) {
+ if (chip_ver < CHELSIO_T6)
+ filter_id &= ~(0x3);
+ else
+ filter_id &= ~(0x1);
+ }
+
+ f = &adapter->tids.ftid_tab[filter_id];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ f->ctx = ctx;
+ cxgbe_clear_ftid(&adapter->tids,
+ f->tid - adapter->tids.ftid_base,
+ f->fs.type ? FILTER_TYPE_IPV6 :
+ FILTER_TYPE_IPV4);
+ return del_filter_wr(dev, filter_id);
+ }
+
+ /*
+ * If the caller has passed in a Completion Context then we need to
+ * mark it as a successful completion so they don't stall waiting
+ * for it.
+ */
+ if (ctx) {
+ ctx->result = 0;
+ t4_complete(&ctx->completion);
+ }
+
+ return 0;
+}
+
+/**
+ * Check a Chelsio Filter Request for validity, convert it into our internal
+ * format and send it to the hardware. Return 0 on success, an error number
+ * otherwise. We attach any provided filter operation context to the internal
+ * filter specification in order to facilitate signaling completion of the
+ * operation.
+ */
+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int fidx, iq, fid_bit = 0;
+ struct filter_entry *f;
+ unsigned int chip_ver;
+ uint8_t bitoff[16] = {0};
+ int ret;
+
+ if (is_hashfilter(adapter) && fs->cap)
+ return cxgbe_set_hash_filter(dev, fs, ctx);
+
+ if (filter_id >= adapter->tids.nftids)
+ return -ERANGE;
+
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ ret = validate_filter(adapter, fs);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure filter id is aligned on the 4 slot boundary for IPv6
+ * maskfull filters.
+ */
+ if (fs->type)
+ filter_id &= ~(0x3);
+
+ ret = is_filter_set(&adapter->tids, filter_id, fs->type);
+ if (ret)
+ return -EBUSY;
+
+ iq = get_filter_steerq(dev, fs);
+
+ /*
+ * IPv6 filters occupy four slots and must be aligned on four-slot
+ * boundaries for T5. On T6, IPv6 filters occupy two-slots and
+ * must be aligned on two-slot boundaries.
+ *
+ * IPv4 filters only occupy a single slot and have no alignment
+ * requirements but writing a new IPv4 filter into the middle
+ * of an existing IPv6 filter requires clearing the old IPv6
+ * filter.
+ */
+ if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
+ /*
+ * For T6, If our IPv4 filter isn't being written to a
+ * multiple of two filter index and there's an IPv6
+ * filter at the multiple of 2 base slot, then we need
+ * to delete that IPv6 filter ...
+ * For adapters below T6, IPv6 filter occupies 4 entries.
+ */
+ if (chip_ver < CHELSIO_T6)
+ fidx = filter_id & ~0x3;
+ else
+ fidx = filter_id & ~0x1;
+
+ if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid)
+ return -EBUSY;
+ }
+ } else { /* IPv6 */
+ unsigned int max_filter_id;
+
+ if (chip_ver < CHELSIO_T6) {
+ /*
+ * Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3)
+ return -EINVAL;
+
+ max_filter_id = filter_id + 4;
+ } else {
+ /*
+ * For T6, CLIP being enabled, IPv6 filter would occupy
+ * 2 entries.
+ */
+ if (filter_id & 0x1)
+ return -EINVAL;
+
+ max_filter_id = filter_id + 2;
+ }
+
+ /*
+ * Check all except the base overlapping IPv4 filter
+ * slots.
+ */
+ for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid)
+ return -EBUSY;
+ }
+ }
+
+ /*
+ * Check to make sure that provided filter index is not
+ * already in use by someone else
+ */
+ f = &adapter->tids.ftid_tab[filter_id];
+ if (f->valid)
+ return -EBUSY;
+
+ fidx = adapter->tids.ftid_base + filter_id;
+ fid_bit = filter_id;
+ ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
+ fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
+ if (ret)
+ return ret;
+
+ /*
+ * Check to make sure the filter requested is writable ...
+ */
+ ret = writable_filter(f);
+ if (ret) {
+ /* Clear the bits we have set above */
+ cxgbe_clear_ftid(&adapter->tids, fid_bit,
+ fs->type ? FILTER_TYPE_IPV6 :
+ FILTER_TYPE_IPV4);
+ return ret;
+ }
+
+ /*
+ * Allocate a clip table entry only if we have non-zero IPv6 address
+ */
+ if (chip_ver > CHELSIO_T5 && fs->type &&
+ memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
+ f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
+ if (!f->clipt)
+ goto free_tid;
+ }
+
+ /*
+ * Convert the filter specification into our internal format.
+ * We copy the PF/VF specification into the Outer VLAN field
+ * here so the rest of the code -- including the interface to
+ * the firmware -- doesn't have to constantly do these checks.
+ */
+ f->fs = *fs;
+ f->fs.iq = iq;
+ f->dev = dev;
+
+ /*
+ * Attempt to set the filter. If we don't succeed, we clear
+ * it and return the failure.
+ */
+ f->ctx = ctx;
+ f->tid = fidx; /* Save the actual tid */
+ ret = set_filter_wr(dev, filter_id);
+ if (ret) {
+ fid_bit = f->tid - adapter->tids.ftid_base;
+ goto free_tid;
+ }
+
+ return ret;
+
+free_tid:
+ cxgbe_clear_ftid(&adapter->tids, fid_bit,
+ fs->type ? FILTER_TYPE_IPV6 :
+ FILTER_TYPE_IPV4);
+ clear_filter(f);
+ return ret;
+}
+
+/**
+ * Handle a Hash filter write reply.
+ */
+void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
+{
+ struct tid_info *t = &adap->tids;
+ struct filter_entry *f;
+ struct filter_ctx *ctx = NULL;
+ unsigned int tid = GET_TID(rpl);
+ unsigned int ftid = G_TID_TID(G_AOPEN_ATID
+ (be32_to_cpu(rpl->atid_status)));
+ unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
+
+ f = lookup_atid(t, ftid);
+ if (!f) {
+ dev_warn(adap, "%s: could not find filter entry: %d\n",
+ __func__, ftid);
+ return;
+ }
+
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ switch (status) {
+ case CPL_ERR_NONE: {
+ f->tid = tid;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+
+ cxgbe_insert_tid(t, f, f->tid, 0);
+ cxgbe_free_atid(t, ftid);
+ if (ctx) {
+ ctx->tid = f->tid;
+ ctx->result = 0;
+ }
+ if (f->fs.hitcnts)
+ set_tcb_field(adap, tid,
+ W_TCB_TIMESTAMP,
+ V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
+ V_TCB_T_RTT_TS_RECENT_AGE
+ (M_TCB_T_RTT_TS_RECENT_AGE),
+ V_TCB_TIMESTAMP(0ULL) |
+ V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
+ 1);
+ break;
+ }
+ default:
+ dev_warn(adap, "%s: filter creation failed with status = %u\n",
+ __func__, status);
+
+ if (ctx) {
+ if (status == CPL_ERR_TCAM_FULL)
+ ctx->result = -EAGAIN;
+ else
+ ctx->result = -EINVAL;
+ }
+
+ cxgbe_free_atid(t, ftid);
+ t4_os_free(f);
+ }
+
+ if (ctx)
+ t4_complete(&ctx->completion);
+}
+
+/**
+ * Handle a LE-TCAM filter write/deletion reply.
+ */
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ struct filter_entry *f = NULL;
+ unsigned int tid = GET_TID(rpl);
+ int idx, max_fidx = adap->tids.nftids;
+
+ /* Get the corresponding filter entry for this tid */
+ if (adap->tids.ftid_tab) {
+ /* Check this in normal filter region */
+ idx = tid - adap->tids.ftid_base;
+ if (idx >= max_fidx)
+ return;
+
+ f = &adap->tids.ftid_tab[idx];
+ if (f->tid != tid)
+ return;
+ }
+
+ /* We found the filter entry for this tid */
+ if (f) {
+ unsigned int ret = G_COOKIE(rpl->cookie);
+ struct filter_ctx *ctx;
+
+ /*
+ * Pull off any filter operation context attached to the
+ * filter.
+ */
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->tid = f->tid;
+ ctx->result = 0;
+ }
+ } else if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /*
+ * Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(f);
+ if (ctx)
+ ctx->result = 0;
+ } else {
+ /*
+ * Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_warn(adap, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(f);
+ if (ctx)
+ ctx->result = -EINVAL;
+ }
+
+ if (ctx)
+ t4_complete(&ctx->completion);
+ }
+}
+
+/*
+ * Retrieve the packet count for the specified filter.
+ */
+int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
+ u64 *c, int hash, bool get_byte)
+{
+ struct filter_entry *f;
+ unsigned int tcb_base, tcbaddr;
+ int ret;
+
+ tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
+ if (is_hashfilter(adapter) && hash) {
+ if (fidx < adapter->tids.ntids) {
+ f = adapter->tids.tid_tab[fidx];
+ if (!f)
+ return -EINVAL;
+
+ if (is_t5(adapter->params.chip)) {
+ *c = 0;
+ return 0;
+ }
+ tcbaddr = tcb_base + (fidx * TCB_SIZE);
+ goto get_count;
+ } else {
+ return -ERANGE;
+ }
+ } else {
+ if (fidx >= adapter->tids.nftids)
+ return -ERANGE;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ if (!f->valid)
+ return -EINVAL;
+
+ tcbaddr = tcb_base + f->tid * TCB_SIZE;
+ }
+
+ f = &adapter->tids.ftid_tab[fidx];
+ if (!f->valid)
+ return -EINVAL;
+
+get_count:
+ if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
+ /*
+ * For T5, the Filter Packet Hit Count is maintained as a
+ * 32-bit Big Endian value in the TCB field {timestamp}.
+ * Similar to the craziness above, instead of the filter hit
+ * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
+ * sizeof(u32)), it actually shows up at offset 24. Whacky.
+ */
+ if (get_byte) {
+ unsigned int word_offset = 4;
+ __be64 be64_byte_count;
+
+ t4_os_lock(&adapter->win0_lock);
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr +
+ (word_offset * sizeof(__be32)),
+ sizeof(be64_byte_count),
+ &be64_byte_count,
+ T4_MEMORY_READ);
+ t4_os_unlock(&adapter->win0_lock);
+ if (ret < 0)
+ return ret;
+ *c = be64_to_cpu(be64_byte_count);
+ } else {
+ unsigned int word_offset = 6;
+ __be32 be32_count;
+
+ t4_os_lock(&adapter->win0_lock);
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr +
+ (word_offset * sizeof(__be32)),
+ sizeof(be32_count), &be32_count,
+ T4_MEMORY_READ);
+ t4_os_unlock(&adapter->win0_lock);
+ if (ret < 0)
+ return ret;
+ *c = (u64)be32_to_cpu(be32_count);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Handle a Hash filter delete reply.
+ */
+void hash_del_filter_rpl(struct adapter *adap,
+ const struct cpl_abort_rpl_rss *rpl)
+{
+ struct tid_info *t = &adap->tids;
+ struct filter_entry *f;
+ struct filter_ctx *ctx = NULL;
+ unsigned int tid = GET_TID(rpl);
+
+ f = lookup_tid(t, tid);
+ if (!f) {
+ dev_warn(adap, "%s: could not find filter entry: %u\n",
+ __func__, tid);
+ return;
+ }
+
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ f->valid = 0;
+
+ if (f->clipt)
+ cxgbe_clip_release(f->dev, f->clipt);
+
+ cxgbe_remove_tid(t, 0, tid, 0);
+ t4_os_free(f);
+
+ if (ctx) {
+ ctx->result = 0;
+ t4_complete(&ctx->completion);
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h
new file mode 100644
index 00000000..af8fa752
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_FILTER_H_
+#define _CXGBE_FILTER_H_
+
+#include "t4_msg.h"
+/*
+ * Defined bit width of user definable filter tuples
+ */
+#define ETHTYPE_BITWIDTH 16
+#define FRAG_BITWIDTH 1
+#define MACIDX_BITWIDTH 9
+#define FCOE_BITWIDTH 1
+#define IPORT_BITWIDTH 3
+#define MATCHTYPE_BITWIDTH 3
+#define PROTO_BITWIDTH 8
+#define TOS_BITWIDTH 8
+#define PF_BITWIDTH 8
+#define VF_BITWIDTH 8
+#define IVLAN_BITWIDTH 16
+#define OVLAN_BITWIDTH 16
+
+/*
+ * Filter matching rules. These consist of a set of ingress packet field
+ * (value, mask) tuples. The associated ingress packet field matches the
+ * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
+ * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
+ * matches an ingress packet when all of the individual individual field
+ * matching rules are true.
+ *
+ * Partial field masks are always valid, however, while it may be easy to
+ * understand their meanings for some fields (e.g. IP address to match a
+ * subnet), for others making sensible partial masks is less intuitive (e.g.
+ * MPS match type) ...
+ */
+struct ch_filter_tuple {
+ /*
+ * Compressed header matching field rules. The TP_VLAN_PRI_MAP
+ * register selects which of these fields will participate in the
+ * filter match rules -- up to a maximum of 36 bits. Because
+ * TP_VLAN_PRI_MAP is a global register, all filters must use the same
+ * set of fields.
+ */
+ uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
+ uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
+ uint32_t ivlan_vld:1; /* inner VLAN valid */
+ uint32_t ovlan_vld:1; /* outer VLAN valid */
+ uint32_t pfvf_vld:1; /* PF/VF valid */
+ uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
+ uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
+ uint32_t iport:IPORT_BITWIDTH; /* ingress port */
+ uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
+ uint32_t proto:PROTO_BITWIDTH; /* protocol type */
+ uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
+ uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
+ uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
+ uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
+ uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
+
+ /*
+ * Uncompressed header matching field rules. These are always
+ * available for field rules.
+ */
+ uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
+ uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
+ uint16_t lport; /* local port */
+ uint16_t fport; /* foreign port */
+
+ /* reservations for future additions */
+ uint8_t rsvd[12];
+};
+
+/*
+ * Filter specification
+ */
+struct ch_filter_specification {
+ /* Administrative fields for filter. */
+ uint32_t hitcnts:1; /* count filter hits in TCB */
+ uint32_t prio:1; /* filter has priority over active/server */
+
+ /*
+ * Fundamental filter typing. This is the one element of filter
+ * matching that doesn't exist as a (value, mask) tuple.
+ */
+ uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
+ uint32_t cap:1; /* 0 => LE-TCAM, 1 => Hash */
+
+ /*
+ * Packet dispatch information. Ingress packets which match the
+ * filter rules will be dropped, passed to the host or switched back
+ * out as egress packets.
+ */
+ uint32_t action:2; /* drop, pass, switch */
+
+ uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
+ uint32_t iq:10; /* ingress queue */
+
+ uint32_t eport:2; /* egress port to switch packet out */
+
+ /* Filter rule value/mask pairs. */
+ struct ch_filter_tuple val;
+ struct ch_filter_tuple mask;
+};
+
+enum {
+ FILTER_PASS = 0, /* default */
+ FILTER_DROP,
+ FILTER_SWITCH
+};
+
+enum filter_type {
+ FILTER_TYPE_IPV4 = 0,
+ FILTER_TYPE_IPV6,
+};
+
+struct t4_completion {
+ unsigned int done; /* completion done (0 - No, 1 - Yes) */
+ rte_spinlock_t lock; /* completion lock */
+};
+
+/*
+ * Filter operation context to allow callers to wait for
+ * an asynchronous completion.
+ */
+struct filter_ctx {
+ struct t4_completion completion; /* completion rendezvous */
+ int result; /* result of operation */
+ u32 tid; /* to store tid of hash filter */
+};
+
+/*
+ * Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware or the
+ * firmware command.
+ */
+struct filter_entry {
+ /*
+ * Administrative fields for filter.
+ */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+ u32 pending:1; /* filter action is pending FW reply */
+ struct filter_ctx *ctx; /* caller's completion hook */
+ struct clip_entry *clipt; /* CLIP Table entry for IPv6 */
+ struct rte_eth_dev *dev; /* Port's rte eth device */
+ void *private; /* For use by apps using filter_entry */
+
+ /* This will store the actual tid */
+ u32 tid;
+
+ /*
+ * The filter itself.
+ */
+ struct ch_filter_specification fs;
+};
+
+#define FILTER_ID_MAX (~0U)
+
+struct tid_info;
+struct adapter;
+
+/**
+ * Find first clear bit in the bitmap.
+ */
+static inline unsigned int cxgbe_find_first_zero_bit(struct rte_bitmap *bmap,
+ unsigned int size)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < size; idx++)
+ if (!rte_bitmap_get(bmap, idx))
+ break;
+
+ return idx;
+}
+
+/**
+ * Find a free region of 'num' consecutive entries.
+ */
+static inline unsigned int
+cxgbe_bitmap_find_free_region(struct rte_bitmap *bmap, unsigned int size,
+ unsigned int num)
+{
+ unsigned int idx, j, free = 0;
+
+ if (num > size)
+ return size;
+
+ for (idx = 0; idx < size; idx += num) {
+ for (j = 0; j < num; j++) {
+ if (!rte_bitmap_get(bmap, idx + j)) {
+ free++;
+ } else {
+ free = 0;
+ break;
+ }
+ }
+
+ /* Found the Region */
+ if (free == num)
+ break;
+
+ /* Reached the end and still no region found */
+ if ((idx + num) > size) {
+ idx = size;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+bool is_filter_set(struct tid_info *, int fidx, int family);
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
+void clear_filter(struct filter_entry *f);
+int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx);
+int writable_filter(struct filter_entry *f);
+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family);
+int init_hash_filter(struct adapter *adap);
+void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl);
+void hash_del_filter_rpl(struct adapter *adap,
+ const struct cpl_abort_rpl_rss *rpl);
+int validate_filter(struct adapter *adap, struct ch_filter_specification *fs);
+int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
+ u64 *c, int hash, bool get_byte);
+#endif /* _CXGBE_FILTER_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c
new file mode 100644
index 00000000..01c945f1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c
@@ -0,0 +1,845 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#include "common.h"
+#include "cxgbe_flow.h"
+
+#define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
+do { \
+ if (!((fs)->val.elem || (fs)->mask.elem)) { \
+ (fs)->val.elem = (__v); \
+ (fs)->mask.elem = (__m); \
+ } else { \
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
+ NULL, "a filter can be specified" \
+ " only once"); \
+ } \
+} while (0)
+
+#define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
+do { \
+ memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
+ memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
+} while (0)
+
+#define CXGBE_FILL_FS(v, m, elem) \
+ __CXGBE_FILL_FS(v, m, fs, elem, e)
+
+#define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
+ __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
+
+static int
+cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
+{
+ /* rte_flow specification does not allow it. */
+ if (!i->spec && (i->mask || i->last))
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "last or mask given without spec");
+ /*
+ * We don't support it.
+ * Although, we can support values in last as 0's or last == spec.
+ * But this will not provide user with any additional functionality
+ * and will only increase the complexity for us.
+ */
+ if (i->last)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "last is not supported by chelsio pmd");
+ return 0;
+}
+
+static void
+cxgbe_fill_filter_region(struct adapter *adap,
+ struct ch_filter_specification *fs)
+{
+ struct tp_params *tp = &adap->params.tp;
+ u64 hash_filter_mask = tp->hash_filter_mask;
+ u64 ntuple_mask = 0;
+
+ fs->cap = 0;
+
+ if (!is_hashfilter(adap))
+ return;
+
+ if (fs->type) {
+ uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff};
+ uint8_t bitoff[16] = {0};
+
+ if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
+ !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
+ memcmp(fs->mask.lip, biton, sizeof(biton)) ||
+ memcmp(fs->mask.fip, biton, sizeof(biton)))
+ return;
+ } else {
+ uint32_t biton = 0xffffffff;
+ uint32_t bitoff = 0x0U;
+
+ if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
+ !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
+ memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
+ memcmp(fs->mask.fip, &biton, sizeof(biton)))
+ return;
+ }
+
+ if (!fs->val.lport || fs->mask.lport != 0xffff)
+ return;
+ if (!fs->val.fport || fs->mask.fport != 0xffff)
+ return;
+
+ if (tp->protocol_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
+ if (tp->ethertype_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
+ if (tp->port_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
+
+ if (ntuple_mask != hash_filter_mask)
+ return;
+
+ fs->cap = 1; /* use hash region */
+}
+
+static int
+ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_phy_port *val = item->spec;
+ const struct rte_flow_item_phy_port *umask = item->mask;
+ const struct rte_flow_item_phy_port *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
+
+ if (val->index > 0x7)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "port index upto 0x7 is supported");
+
+ CXGBE_FILL_FS(val->index, mask->index, iport);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_udp *val = item->spec;
+ const struct rte_flow_item_udp *umask = item->mask;
+ const struct rte_flow_item_udp *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
+
+ if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "udp: only src/dst port supported");
+
+ CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
+ if (!val)
+ return 0;
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
+ be16_to_cpu(mask->hdr.src_port), fport);
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
+ be16_to_cpu(mask->hdr.dst_port), lport);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_tcp *val = item->spec;
+ const struct rte_flow_item_tcp *umask = item->mask;
+ const struct rte_flow_item_tcp *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
+
+ if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
+ mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
+ mask->hdr.tcp_urp)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "tcp: only src/dst port supported");
+
+ CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
+ if (!val)
+ return 0;
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
+ be16_to_cpu(mask->hdr.src_port), fport);
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
+ be16_to_cpu(mask->hdr.dst_port), lport);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_ipv4 *val = item->spec;
+ const struct rte_flow_item_ipv4 *umask = item->mask;
+ const struct rte_flow_item_ipv4 *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
+
+ if (mask->hdr.time_to_live || mask->hdr.type_of_service)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "ttl/tos are not supported");
+
+ fs->type = FILTER_TYPE_IPV4;
+ CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
+ if (!val)
+ return 0; /* ipv4 wild card */
+
+ CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_ipv6 *val = item->spec;
+ const struct rte_flow_item_ipv6 *umask = item->mask;
+ const struct rte_flow_item_ipv6 *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
+
+ if (mask->hdr.vtc_flow ||
+ mask->hdr.payload_len || mask->hdr.hop_limits)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "tc/flow/hop are not supported");
+
+ fs->type = FILTER_TYPE_IPV6;
+ CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
+ if (!val)
+ return 0; /* ipv6 wild card */
+
+ CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
+
+ return 0;
+}
+
+static int
+cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
+ struct rte_flow_error *e)
+{
+ if (attr->egress)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
+ attr, "attribute:<egress> is"
+ " not supported !");
+ if (attr->group > 0)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
+ attr, "group parameter is"
+ " not supported.");
+
+ flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
+
+ return 0;
+}
+
+static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+
+ if (rxq > pi->n_rx_qsets)
+ return -EINVAL;
+ return 0;
+}
+
+static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
+{
+ struct adapter *adap = ethdev2adap(f->dev);
+ struct ch_filter_specification fs = f->fs;
+
+ if (fidx >= adap->tids.nftids) {
+ dev_err(adap, "invalid flow index %d.\n", fidx);
+ return -EINVAL;
+ }
+ if (!is_filter_set(&adap->tids, fidx, fs.type)) {
+ dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
+ struct adapter *adap, unsigned int fidx)
+{
+ if (is_filter_set(&adap->tids, fidx, fs->type)) {
+ dev_err(adap, "filter index: %d is busy.\n", fidx);
+ return -EBUSY;
+ }
+ if (fidx >= adap->tids.nftids) {
+ dev_err(adap, "filter index (%u) >= max(%u)\n",
+ fidx, adap->tids.nftids);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
+{
+ if (flow->fs.cap)
+ return 0; /* Hash filters */
+ return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
+ cxgbe_validate_fidxonadd(&flow->fs,
+ ethdev2adap(flow->dev), fidx);
+}
+
+static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ struct adapter *adap = ethdev2adap(flow->dev);
+
+ /* For tcam get the next available slot, if default value specified */
+ if (flow->fidx == FILTER_ID_MAX) {
+ int idx;
+
+ idx = cxgbe_alloc_ftid(adap, fs->type);
+ if (idx < 0) {
+ dev_err(adap, "unable to get a filter index in tcam\n");
+ return -ENOMEM;
+ }
+ *fidx = (unsigned int)idx;
+ } else {
+ *fidx = flow->fidx;
+ }
+
+ return 0;
+}
+
+static int
+ch_rte_parse_atype_switch(const struct rte_flow_action *a,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_action_phy_port *port;
+
+ switch (a->type) {
+ case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ port = (const struct rte_flow_action_phy_port *)a->conf;
+ fs->eport = port->index;
+ break;
+ default:
+ /* We are not supposed to come here */
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "Action not supported");
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_rtef_parse_actions(struct rte_flow *flow,
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ const struct rte_flow_action_queue *q;
+ const struct rte_flow_action *a;
+ char abit = 0;
+ int ret;
+
+ for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
+ switch (a->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ if (abit++)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "specify only 1 pass/drop");
+ fs->action = FILTER_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ q = (const struct rte_flow_action_queue *)a->conf;
+ if (!q)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, q,
+ "specify rx queue index");
+ if (check_rxq(flow->dev, q->index))
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, q,
+ "Invalid rx queue");
+ if (abit++)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "specify only 1 pass/drop");
+ fs->action = FILTER_PASS;
+ fs->dirsteer = 1;
+ fs->iq = q->index;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ fs->hitcnts = 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ /* We allow multiple switch actions, but switch is
+ * not compatible with either queue or drop
+ */
+ if (abit++ && fs->action != FILTER_SWITCH)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "overlapping action specified");
+ ret = ch_rte_parse_atype_switch(a, fs, e);
+ if (ret)
+ return ret;
+ fs->action = FILTER_SWITCH;
+ break;
+ default:
+ /* Not supported action : return error */
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ a, "Action not supported");
+ }
+ }
+
+ return 0;
+}
+
+struct chrte_fparse parseitem[] = {
+ [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
+ .fptr = ch_rte_parsetype_port,
+ .dmask = &(const struct rte_flow_item_phy_port){
+ .index = 0x7,
+ }
+ },
+
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .fptr = ch_rte_parsetype_ipv4,
+ .dmask = &rte_flow_item_ipv4_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .fptr = ch_rte_parsetype_ipv6,
+ .dmask = &rte_flow_item_ipv6_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .fptr = ch_rte_parsetype_udp,
+ .dmask = &rte_flow_item_udp_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .fptr = ch_rte_parsetype_tcp,
+ .dmask = &rte_flow_item_tcp_mask,
+ },
+};
+
+static int
+cxgbe_rtef_parse_items(struct rte_flow *flow,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(flow->dev);
+ const struct rte_flow_item *i;
+ char repeat[ARRAY_SIZE(parseitem)] = {0};
+
+ for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
+ struct chrte_fparse *idx = &flow->item_parser[i->type];
+ int ret;
+
+ if (i->type > ARRAY_SIZE(parseitem))
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "Item not supported");
+
+ switch (i->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ continue;
+ default:
+ /* check if item is repeated */
+ if (repeat[i->type])
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, i,
+ "parse items cannot be repeated (except void)");
+ repeat[i->type] = 1;
+
+ /* validate the item */
+ ret = cxgbe_validate_item(i, e);
+ if (ret)
+ return ret;
+
+ if (!idx || !idx->fptr) {
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, i,
+ "Item not supported");
+ } else {
+ ret = idx->fptr(idx->dmask, i, &flow->fs, e);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ cxgbe_fill_filter_region(adap, &flow->fs);
+
+ return 0;
+}
+
+static int
+cxgbe_flow_parse(struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ int ret;
+
+ /* parse user request into ch_filter_specification */
+ ret = cxgbe_rtef_parse_attr(flow, attr, e);
+ if (ret)
+ return ret;
+ ret = cxgbe_rtef_parse_items(flow, item, e);
+ if (ret)
+ return ret;
+ return cxgbe_rtef_parse_actions(flow, action, e);
+}
+
+static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ struct adapter *adap = ethdev2adap(dev);
+ struct tid_info *t = &adap->tids;
+ struct filter_ctx ctx;
+ unsigned int fidx;
+ int err;
+
+ if (cxgbe_get_fidx(flow, &fidx))
+ return -ENOMEM;
+ if (cxgbe_verify_fidx(flow, fidx, 0))
+ return -1;
+
+ t4_init_completion(&ctx.completion);
+ /* go create the filter */
+ err = cxgbe_set_filter(dev, fidx, fs, &ctx);
+ if (err) {
+ dev_err(adap, "Error %d while creating filter.\n", err);
+ return err;
+ }
+
+ /* Poll the FW for reply */
+ err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
+ CXGBE_FLOW_POLL_US,
+ CXGBE_FLOW_POLL_CNT,
+ &ctx.completion);
+ if (err) {
+ dev_err(adap, "Filter set operation timed out (%d)\n", err);
+ return err;
+ }
+ if (ctx.result) {
+ dev_err(adap, "Hardware error %d while creating the filter.\n",
+ ctx.result);
+ return ctx.result;
+ }
+
+ if (fs->cap) { /* to destroy the filter */
+ flow->fidx = ctx.tid;
+ flow->f = lookup_tid(t, ctx.tid);
+ } else {
+ flow->fidx = fidx;
+ flow->f = &adap->tids.ftid_tab[fidx];
+ }
+
+ return 0;
+}
+
+static struct rte_flow *
+cxgbe_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct rte_flow *flow;
+ int ret;
+
+ flow = t4_os_alloc(sizeof(struct rte_flow));
+ if (!flow) {
+ rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unable to allocate memory for"
+ " filter_entry");
+ return NULL;
+ }
+
+ flow->item_parser = parseitem;
+ flow->dev = dev;
+
+ if (cxgbe_flow_parse(flow, attr, item, action, e)) {
+ t4_os_free(flow);
+ return NULL;
+ }
+
+ /* go, interact with cxgbe_filter */
+ ret = __cxgbe_flow_create(dev, flow);
+ if (ret) {
+ rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unable to create flow rule");
+ t4_os_free(flow);
+ return NULL;
+ }
+
+ flow->f->private = flow; /* Will be used during flush */
+
+ return flow;
+}
+
+static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct filter_entry *f = flow->f;
+ struct ch_filter_specification *fs;
+ struct filter_ctx ctx;
+ int err;
+
+ fs = &f->fs;
+ if (cxgbe_verify_fidx(flow, flow->fidx, 1))
+ return -1;
+
+ t4_init_completion(&ctx.completion);
+ err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
+ if (err) {
+ dev_err(adap, "Error %d while deleting filter.\n", err);
+ return err;
+ }
+
+ /* Poll the FW for reply */
+ err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
+ CXGBE_FLOW_POLL_US,
+ CXGBE_FLOW_POLL_CNT,
+ &ctx.completion);
+ if (err) {
+ dev_err(adap, "Filter delete operation timed out (%d)\n", err);
+ return err;
+ }
+ if (ctx.result) {
+ dev_err(adap, "Hardware error %d while deleting the filter.\n",
+ ctx.result);
+ return ctx.result;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *e)
+{
+ int ret;
+
+ ret = __cxgbe_flow_destroy(dev, flow);
+ if (ret)
+ return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ flow, "error destroying filter.");
+ t4_os_free(flow);
+ return 0;
+}
+
+static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
+ u64 *byte_count)
+{
+ struct adapter *adap = ethdev2adap(flow->dev);
+ struct ch_filter_specification fs = flow->f->fs;
+ unsigned int fidx = flow->fidx;
+ int ret = 0;
+
+ ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
+ if (ret)
+ return ret;
+ return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
+}
+
+static int
+cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action, void *data,
+ struct rte_flow_error *e)
+{
+ struct ch_filter_specification fs;
+ struct rte_flow_query_count *c;
+ struct filter_entry *f;
+ int ret;
+
+ RTE_SET_USED(dev);
+
+ f = flow->f;
+ fs = f->fs;
+
+ if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "only count supported for query");
+
+ /*
+ * This is a valid operation, Since we are allowed to do chelsio
+ * specific operations in rte side of our code but not vise-versa
+ *
+ * So, fs can be queried/modified here BUT rte_flow_query_count
+ * cannot be worked on by the lower layer since we want to maintain
+ * it as rte_flow agnostic.
+ */
+ if (!fs.hitcnts)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ &fs, "filter hit counters were not"
+ " enabled during filter creation");
+
+ c = (struct rte_flow_query_count *)data;
+ ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
+ if (ret)
+ return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
+ f, "cxgbe pmd failed to"
+ " perform query");
+
+ /* Query was successful */
+ c->bytes_set = 1;
+ c->hits_set = 1;
+
+ return 0; /* success / partial_success */
+}
+
+static int
+cxgbe_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct rte_flow *flow;
+ unsigned int fidx;
+ int ret;
+
+ flow = t4_os_alloc(sizeof(struct rte_flow));
+ if (!flow)
+ return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Unable to allocate memory for filter_entry");
+
+ flow->item_parser = parseitem;
+ flow->dev = dev;
+
+ ret = cxgbe_flow_parse(flow, attr, item, action, e);
+ if (ret) {
+ t4_os_free(flow);
+ return ret;
+ }
+
+ if (validate_filter(adap, &flow->fs)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "validation failed. Check f/w config file.");
+ }
+
+ if (cxgbe_get_fidx(flow, &fidx)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "no memory in tcam.");
+ }
+
+ if (cxgbe_verify_fidx(flow, fidx, 0)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "validation failed");
+ }
+
+ t4_os_free(flow);
+ return 0;
+}
+
+/*
+ * @ret : > 0 filter destroyed succsesfully
+ * < 0 error destroying filter
+ * == 1 filter not active / not found
+ */
+static int
+cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev,
+ struct rte_flow_error *e)
+{
+ if (f && (f->valid || f->pending) &&
+ f->dev == dev && /* Only if user has asked for this port */
+ f->private) /* We (rte_flow) created this filter */
+ return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private,
+ e);
+ return 1;
+}
+
+static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ unsigned int i;
+ int ret = 0;
+
+ if (adap->tids.ftid_tab) {
+ struct filter_entry *f = &adap->tids.ftid_tab[0];
+
+ for (i = 0; i < adap->tids.nftids; i++, f++) {
+ ret = cxgbe_check_n_destroy(f, dev, e);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ if (is_hashfilter(adap) && adap->tids.tid_tab) {
+ struct filter_entry *f;
+
+ for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
+ f = (struct filter_entry *)adap->tids.tid_tab[i];
+
+ ret = cxgbe_check_n_destroy(f, dev, e);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+out:
+ return ret >= 0 ? 0 : ret;
+}
+
+static const struct rte_flow_ops cxgbe_flow_ops = {
+ .validate = cxgbe_flow_validate,
+ .create = cxgbe_flow_create,
+ .destroy = cxgbe_flow_destroy,
+ .flush = cxgbe_flow_flush,
+ .query = cxgbe_flow_query,
+ .isolate = NULL,
+};
+
+int
+cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ RTE_SET_USED(dev);
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &cxgbe_flow_ops;
+ break;
+ default:
+ ret = -ENOTSUP;
+ break;
+ }
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h
new file mode 100644
index 00000000..0f750474
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#ifndef _CXGBE_FLOW_H_
+#define _CXGBE_FLOW_H_
+
+#include <rte_flow_driver.h>
+#include "cxgbe_filter.h"
+#include "cxgbe.h"
+
+#define CXGBE_FLOW_POLL_US 10
+#define CXGBE_FLOW_POLL_CNT 10
+
+struct chrte_fparse {
+ int (*fptr)(const void *mask, /* currently supported mask */
+ const struct rte_flow_item *item, /* user input */
+ struct ch_filter_specification *fs, /* where to parse */
+ struct rte_flow_error *e);
+ const void *dmask; /* Specify what is supported by chelsio by default*/
+};
+
+struct rte_flow {
+ struct filter_entry *f;
+ struct ch_filter_specification fs; /* temp, to create filter */
+ struct chrte_fparse *item_parser;
+ /*
+ * filter_entry doesn't store user priority.
+ * Post creation of filter this will indicate the
+ * flow index (fidx) for both hash and tcam filters
+ */
+ unsigned int fidx;
+ struct rte_eth_dev *dev;
+};
+
+int
+cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+
+#endif /* _CXGBE_FLOW_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c
new file mode 100644
index 00000000..c3938e8d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c
@@ -0,0 +1,1903 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgbe.h"
+#include "clip_tbl.h"
+
+/**
+ * Allocate a chunk of memory. The allocated memory is cleared.
+ */
+void *t4_alloc_mem(size_t size)
+{
+ return rte_zmalloc(NULL, size, 0);
+}
+
+/**
+ * Free memory allocated through t4_alloc_mem().
+ */
+void t4_free_mem(void *addr)
+{
+ rte_free(addr);
+}
+
+/*
+ * Response queue handler for the FW event queue.
+ */
+static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
+ __rte_unused const struct pkt_gl *gl)
+{
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+
+ rsp++; /* skip RSS header */
+
+ /*
+ * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
+ */
+ if (unlikely(opcode == CPL_FW4_MSG &&
+ ((const struct cpl_fw4_msg *)rsp)->type ==
+ FW_TYPE_RSSCPL)) {
+ rsp++;
+ opcode = ((const struct rss_header *)rsp)->opcode;
+ rsp++;
+ if (opcode != CPL_SGE_EGR_UPDATE) {
+ dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n",
+ opcode);
+ goto out;
+ }
+ }
+
+ if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
+ /* do nothing */
+ } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
+ const struct cpl_fw6_msg *msg = (const void *)rsp;
+
+ t4_handle_fw_rpl(q->adapter, msg->data);
+ } else if (opcode == CPL_ABORT_RPL_RSS) {
+ const struct cpl_abort_rpl_rss *p = (const void *)rsp;
+
+ hash_del_filter_rpl(q->adapter, p);
+ } else if (opcode == CPL_SET_TCB_RPL) {
+ const struct cpl_set_tcb_rpl *p = (const void *)rsp;
+
+ filter_rpl(q->adapter, p);
+ } else if (opcode == CPL_ACT_OPEN_RPL) {
+ const struct cpl_act_open_rpl *p = (const void *)rsp;
+
+ hash_filter_rpl(q->adapter, p);
+ } else {
+ dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
+ opcode);
+ }
+out:
+ return 0;
+}
+
+/**
+ * Setup sge control queues to pass control information.
+ */
+int setup_sge_ctrl_txq(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int err = 0, i = 0;
+
+ for_each_port(adapter, i) {
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct sge_ctrl_txq *q = &s->ctrlq[i];
+
+ q->q.size = 1024;
+ err = t4_sge_alloc_ctrl_txq(adapter, q,
+ adapter->eth_dev, i,
+ s->fw_evtq.cntxt_id,
+ rte_socket_id());
+ if (err) {
+ dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
+ err);
+ goto out;
+ }
+ snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i);
+ q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
+ RTE_CACHE_LINE_SIZE,
+ RTE_MBUF_PRIV_ALIGN,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ SOCKET_ID_ANY);
+ if (!q->mb_pool) {
+ dev_err(adapter, "Can't create ctrl pool for port: %d",
+ i);
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+ return 0;
+out:
+ t4_free_sge_resources(adapter);
+ return err;
+}
+
+/**
+ * cxgbe_poll_for_completion: Poll rxq for completion
+ * @q: rxq to poll
+ * @us: microseconds to delay
+ * @cnt: number of times to poll
+ * @c: completion to check for 'done' status
+ *
+ * Polls the rxq for reples until completion is done or the count
+ * expires.
+ */
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt, struct t4_completion *c)
+{
+ unsigned int i;
+ unsigned int work_done, budget = 4;
+
+ if (!c)
+ return -EINVAL;
+
+ for (i = 0; i < cnt; i++) {
+ cxgbe_poll(q, NULL, budget, &work_done);
+ t4_os_lock(&c->lock);
+ if (c->done) {
+ t4_os_unlock(&c->lock);
+ return 0;
+ }
+ t4_os_unlock(&c->lock);
+ udelay(us);
+ }
+ return -ETIMEDOUT;
+}
+
+int setup_sge_fwevtq(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int err = 0;
+ int msi_idx = 0;
+
+ err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev,
+ msi_idx, NULL, fwevtq_handler, -1, NULL, 0,
+ rte_socket_id());
+ return err;
+}
+
+static int closest_timer(const struct sge *s, int time)
+{
+ unsigned int i, match = 0;
+ int delta, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ delta = time - s->timer_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static int closest_thres(const struct sge *s, int thres)
+{
+ unsigned int i, match = 0;
+ int delta, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ delta = thres - s->counter_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+/**
+ * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
+ * @q: the Rx queue
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Sets an Rx queue's interrupt hold-off time and packet count. At least
+ * one of the two needs to be enabled for the queue to generate interrupts.
+ */
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt)
+{
+ struct adapter *adap = q->adapter;
+ unsigned int timer_val;
+
+ if (cnt) {
+ int err;
+ u32 v, new_idx;
+
+ new_idx = closest_thres(&adap->sge, cnt);
+ if (q->desc && q->pktcnt_idx != new_idx) {
+ /* the queue has already been created, update it */
+ v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+ V_FW_PARAMS_PARAM_X(
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+ V_FW_PARAMS_PARAM_YZ(q->cntxt_id);
+ err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &v, &new_idx);
+ if (err)
+ return err;
+ }
+ q->pktcnt_idx = new_idx;
+ }
+
+ timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER :
+ closest_timer(&adap->sge, us);
+
+ if ((us | cnt) == 0)
+ q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
+ else
+ q->intr_params = V_QINTR_TIMER_IDX(timer_val) |
+ V_QINTR_CNT_EN(cnt > 0);
+ return 0;
+}
+
+/**
+ * Allocate an active-open TID and set it to the supplied value.
+ */
+int cxgbe_alloc_atid(struct tid_info *t, void *data)
+{
+ int atid = -1;
+
+ t4_os_lock(&t->atid_lock);
+ if (t->afree) {
+ union aopen_entry *p = t->afree;
+
+ atid = p - t->atid_tab;
+ t->afree = p->next;
+ p->data = data;
+ t->atids_in_use++;
+ }
+ t4_os_unlock(&t->atid_lock);
+ return atid;
+}
+
+/**
+ * Release an active-open TID.
+ */
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
+{
+ union aopen_entry *p = &t->atid_tab[atid];
+
+ t4_os_lock(&t->atid_lock);
+ p->next = t->afree;
+ t->afree = p;
+ t->atids_in_use--;
+ t4_os_unlock(&t->atid_lock);
+}
+
+/**
+ * Populate a TID_RELEASE WR. Caller must properly size the skb.
+ */
+static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid)
+{
+ struct cpl_tid_release *req;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *);
+ INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
+}
+
+/**
+ * Release a TID and inform HW. If we are unable to allocate the release
+ * message we defer to a work queue.
+ */
+void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
+ unsigned short family)
+{
+ struct rte_mbuf *mbuf;
+ struct adapter *adap = container_of(t, struct adapter, tids);
+
+ WARN_ON(tid >= t->ntids);
+
+ if (t->tid_tab[tid]) {
+ t->tid_tab[tid] = NULL;
+ rte_atomic32_dec(&t->conns_in_use);
+ if (t->hash_base && tid >= t->hash_base) {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_dec(&t->hash_tids_in_use);
+ } else {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_dec(&t->tids_in_use);
+ }
+ }
+
+ mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool);
+ if (mbuf) {
+ mbuf->data_len = sizeof(struct cpl_tid_release);
+ mbuf->pkt_len = mbuf->data_len;
+ mk_tid_release(mbuf, tid);
+ t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf);
+ }
+}
+
+/**
+ * Insert a TID.
+ */
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+ unsigned short family)
+{
+ t->tid_tab[tid] = data;
+ if (t->hash_base && tid >= t->hash_base) {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_inc(&t->hash_tids_in_use);
+ } else {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_inc(&t->tids_in_use);
+ }
+
+ rte_atomic32_inc(&t->conns_in_use);
+}
+
+/**
+ * Free TID tables.
+ */
+static void tid_free(struct tid_info *t)
+{
+ if (t->tid_tab) {
+ if (t->ftid_bmap)
+ rte_bitmap_free(t->ftid_bmap);
+
+ if (t->ftid_bmap_array)
+ t4_os_free(t->ftid_bmap_array);
+
+ t4_os_free(t->tid_tab);
+ }
+
+ memset(t, 0, sizeof(struct tid_info));
+}
+
+/**
+ * Allocate and initialize the TID tables. Returns 0 on success.
+ */
+static int tid_init(struct tid_info *t)
+{
+ size_t size;
+ unsigned int ftid_bmap_size;
+ unsigned int natids = t->natids;
+ unsigned int max_ftids = t->nftids;
+
+ ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
+ size = t->ntids * sizeof(*t->tid_tab) +
+ max_ftids * sizeof(*t->ftid_tab) +
+ natids * sizeof(*t->atid_tab);
+
+ t->tid_tab = t4_os_alloc(size);
+ if (!t->tid_tab)
+ return -ENOMEM;
+
+ t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
+ t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->natids];
+ t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
+ if (!t->ftid_bmap_array) {
+ tid_free(t);
+ return -ENOMEM;
+ }
+
+ t4_os_lock_init(&t->atid_lock);
+ t4_os_lock_init(&t->ftid_lock);
+
+ t->afree = NULL;
+ t->atids_in_use = 0;
+ rte_atomic32_init(&t->tids_in_use);
+ rte_atomic32_set(&t->tids_in_use, 0);
+ rte_atomic32_init(&t->conns_in_use);
+ rte_atomic32_set(&t->conns_in_use, 0);
+
+ /* Setup the free list for atid_tab and clear the stid bitmap. */
+ if (natids) {
+ while (--natids)
+ t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+ t->afree = t->atid_tab;
+ }
+
+ t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
+ ftid_bmap_size);
+ if (!t->ftid_bmap) {
+ tid_free(t);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline bool is_x_1g_port(const struct link_config *lc)
+{
+ return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
+}
+
+static inline bool is_x_10g_port(const struct link_config *lc)
+{
+ unsigned int speeds, high_speeds;
+
+ speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps));
+ high_speeds = speeds &
+ ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
+
+ return high_speeds != 0;
+}
+
+inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
+ unsigned int us, unsigned int cnt,
+ unsigned int size, unsigned int iqe_size)
+{
+ q->adapter = adap;
+ cxgb4_set_rspq_intr_params(q, us, cnt);
+ q->iqe_len = iqe_size;
+ q->size = size;
+}
+
+int cfg_queue_count(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ unsigned int max_queues = s->max_ethqsets / adap->params.nports;
+
+ if ((eth_dev->data->nb_rx_queues < 1) ||
+ (eth_dev->data->nb_tx_queues < 1))
+ return -EINVAL;
+
+ if ((eth_dev->data->nb_rx_queues > max_queues) ||
+ (eth_dev->data->nb_tx_queues > max_queues))
+ return -EINVAL;
+
+ if (eth_dev->data->nb_rx_queues > pi->rss_size)
+ return -EINVAL;
+
+ /* We must configure RSS, since config has changed*/
+ pi->flags &= ~PORT_RSS_DONE;
+
+ pi->n_rx_qsets = eth_dev->data->nb_rx_queues;
+ pi->n_tx_qsets = eth_dev->data->nb_tx_queues;
+
+ return 0;
+}
+
+void cfg_queues(struct rte_eth_dev *eth_dev)
+{
+ struct rte_config *config = rte_eal_get_configuration();
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ unsigned int i, nb_ports = 0, qidx = 0;
+ unsigned int q_per_port = 0;
+
+ if (!(adap->flags & CFG_QUEUES)) {
+ for_each_port(adap, i) {
+ struct port_info *tpi = adap2pinfo(adap, i);
+
+ nb_ports += (is_x_10g_port(&tpi->link_cfg)) ||
+ is_x_1g_port(&tpi->link_cfg) ? 1 : 0;
+ }
+
+ /*
+ * We default up to # of cores queues per 1G/10G port.
+ */
+ if (nb_ports)
+ q_per_port = (s->max_ethqsets -
+ (adap->params.nports - nb_ports)) /
+ nb_ports;
+
+ if (q_per_port > config->lcore_count)
+ q_per_port = config->lcore_count;
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->first_qset = qidx;
+
+ /* Initially n_rx_qsets == n_tx_qsets */
+ pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) ||
+ is_x_1g_port(&pi->link_cfg)) ?
+ q_per_port : 1;
+ pi->n_tx_qsets = pi->n_rx_qsets;
+
+ if (pi->n_rx_qsets > pi->rss_size)
+ pi->n_rx_qsets = pi->rss_size;
+
+ qidx += pi->n_rx_qsets;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
+ struct sge_eth_rxq *r = &s->ethrxq[i];
+
+ init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
+ r->usembufs = 1;
+ r->fl.size = (r->usembufs ? 1024 : 72);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
+ s->ethtxq[i].q.size = 1024;
+
+ init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64);
+ adap->flags |= CFG_QUEUES;
+ }
+}
+
+void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats)
+{
+ t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats,
+ &pi->stats_base);
+}
+
+void cxgbe_stats_reset(struct port_info *pi)
+{
+ t4_clr_port_stats(pi->adapter, pi->tx_chan);
+}
+
+static void setup_memwin(struct adapter *adap)
+{
+ u32 mem_win0_base;
+
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ mem_win0_base = MEMWIN0_BASE;
+
+ /*
+ * Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
+ MEMWIN_NIC),
+ mem_win0_base | V_BIR(0) |
+ V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
+ MEMWIN_NIC));
+}
+
+int init_rss(struct adapter *adap)
+{
+ unsigned int i;
+
+ if (is_pf4(adap)) {
+ int err;
+
+ err = t4_init_rss_mode(adap, adap->mbox);
+ if (err)
+ return err;
+ }
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
+ if (!pi->rss)
+ return -ENOMEM;
+
+ pi->rss_hf = CXGBE_RSS_HF_ALL;
+ }
+ return 0;
+}
+
+/**
+ * Dump basic information about the adapter.
+ */
+void print_adapter_info(struct adapter *adap)
+{
+ /**
+ * Hardware/Firmware/etc. Version/Revision IDs.
+ */
+ t4_dump_version_info(adap);
+}
+
+void print_port_info(struct adapter *adap)
+{
+ int i;
+ char buf[80];
+ struct rte_pci_addr *loc = &adap->pdev->addr;
+
+ for_each_port(adap, i) {
+ const struct port_info *pi = adap2pinfo(adap, i);
+ char *bufp = buf;
+
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
+ bufp += sprintf(bufp, "100M/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
+ bufp += sprintf(bufp, "1G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
+ bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
+ bufp += sprintf(bufp, "25G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
+ bufp += sprintf(bufp, "40G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
+ bufp += sprintf(bufp, "50G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
+ bufp += sprintf(bufp, "100G/");
+ if (bufp != buf)
+ --bufp;
+ sprintf(bufp, "BASE-%s",
+ t4_get_port_type_description(
+ (enum fw_port_type)pi->port_type));
+
+ dev_info(adap,
+ " " PCI_PRI_FMT " Chelsio rev %d %s %s\n",
+ loc->domain, loc->bus, loc->devid, loc->function,
+ CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
+ }
+}
+
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+ __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+ struct rte_kvargs *kvlist;
+
+ if (!devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (!kvlist)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (rte_kvargs_process(kvlist, key,
+ check_devargs_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
+static void configure_vlan_types(struct adapter *adapter)
+{
+ struct rte_pci_device *pdev = adapter->pdev;
+ int i;
+
+ for_each_port(adapter, i) {
+ /* OVLAN Type 0x88a8 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x88a8));
+ /* OVLAN Type 0x9100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x9100));
+ /* OVLAN Type 0x8100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x8100));
+
+ /* IVLAN 0X8100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
+ V_IVLAN_ETYPE(M_IVLAN_ETYPE),
+ V_IVLAN_ETYPE(0x8100));
+
+ t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
+ F_OVLAN_EN0 | F_OVLAN_EN1 |
+ F_OVLAN_EN2 | F_IVLAN_EN,
+ F_OVLAN_EN0 | F_OVLAN_EN1 |
+ F_OVLAN_EN2 | F_IVLAN_EN);
+ }
+
+ if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN))
+ t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
+ V_RM_OVLAN(1), V_RM_OVLAN(0));
+}
+
+static void configure_pcie_ext_tag(struct adapter *adapter)
+{
+ u16 v;
+ int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
+
+ if (!pos)
+ return;
+
+ if (pos > 0) {
+ t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v);
+ v |= PCI_EXP_DEVCTL_EXT_TAG;
+ t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v);
+ if (is_t6(adapter->params.chip)) {
+ t4_set_reg_field(adapter, A_PCIE_CFG2,
+ V_T6_TOTMAXTAG(M_T6_TOTMAXTAG),
+ V_T6_TOTMAXTAG(7));
+ t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
+ V_T6_MINTAG(M_T6_MINTAG),
+ V_T6_MINTAG(8));
+ } else {
+ t4_set_reg_field(adapter, A_PCIE_CFG2,
+ V_TOTMAXTAG(M_TOTMAXTAG),
+ V_TOTMAXTAG(3));
+ t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
+ V_MINTAG(M_MINTAG),
+ V_MINTAG(8));
+ }
+ }
+}
+
+/* Figure out how many Queue Sets we can support */
+void configure_max_ethqsets(struct adapter *adapter)
+{
+ unsigned int ethqsets;
+
+ /*
+ * We need to reserve an Ingress Queue for the Asynchronous Firmware
+ * Event Queue.
+ *
+ * For each Queue Set, we'll need the ability to allocate two Egress
+ * Contexts -- one for the Ingress Queue Free List and one for the TX
+ * Ethernet Queue.
+ */
+ if (is_pf4(adapter)) {
+ struct pf_resources *pfres = &adapter->params.pfres;
+
+ ethqsets = pfres->niqflint - 1;
+ if (pfres->neq < ethqsets * 2)
+ ethqsets = pfres->neq / 2;
+ } else {
+ struct vf_resources *vfres = &adapter->params.vfres;
+
+ ethqsets = vfres->niqflint - 1;
+ if (vfres->nethctrl != ethqsets)
+ ethqsets = min(vfres->nethctrl, ethqsets);
+ if (vfres->neq < ethqsets * 2)
+ ethqsets = vfres->neq / 2;
+ }
+
+ if (ethqsets > MAX_ETH_QSETS)
+ ethqsets = MAX_ETH_QSETS;
+ adapter->sge.max_ethqsets = ethqsets;
+}
+
+/*
+ * Tweak configuration based on system architecture, etc. Most of these have
+ * defaults assigned to them by Firmware Configuration Files (if we're using
+ * them) but need to be explicitly set if we're using hard-coded
+ * initialization. So these are essentially common tweaks/settings for
+ * Configuration Files and hard-coded initialization ...
+ */
+static int adap_init0_tweaks(struct adapter *adapter)
+{
+ u8 rx_dma_offset;
+
+ /*
+ * Fix up various Host-Dependent Parameters like Page Size, Cache
+ * Line Size, etc. The firmware default is for a 4KB Page Size and
+ * 64B Cache Line Size ...
+ */
+ t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES,
+ T5_LAST_REV);
+
+ /*
+ * Keep the chip default offset to deliver Ingress packets into our
+ * DMA buffers to zero
+ */
+ rx_dma_offset = 0;
+ t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT),
+ V_PKTSHIFT(rx_dma_offset));
+
+ t4_set_reg_field(adapter, A_SGE_FLM_CFG,
+ V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,
+ V_CREDITCNT(3) | V_CREDITCNTPACKING(1));
+
+ t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD,
+ V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U));
+
+ t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),
+ V_IDMAARBROUNDROBIN(1U));
+
+ /*
+ * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
+ * adds the pseudo header itself.
+ */
+ t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
+ F_CSUM_HAS_PSEUDO_HDR, 0);
+
+ return 0;
+}
+
+/*
+ * Attempt to initialize the adapter via a Firmware Configuration File.
+ */
+static int adap_init0_config(struct adapter *adapter, int reset)
+{
+ struct fw_caps_config_cmd caps_cmd;
+ unsigned long mtype = 0, maddr = 0;
+ u32 finiver, finicsum, cfcsum;
+ int ret;
+ int config_issued = 0;
+ int cfg_addr;
+ char config_name[20];
+
+ /*
+ * Reset device if necessary.
+ */
+ if (reset) {
+ ret = t4_fw_reset(adapter, adapter->mbox,
+ F_PIORSTMODE | F_PIORST);
+ if (ret < 0) {
+ dev_warn(adapter, "Firmware reset failed, error %d\n",
+ -ret);
+ goto bye;
+ }
+ }
+
+ cfg_addr = t4_flash_cfg_addr(adapter);
+ if (cfg_addr < 0) {
+ ret = cfg_addr;
+ dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n",
+ -ret);
+ goto bye;
+ }
+
+ strcpy(config_name, "On Flash");
+ mtype = FW_MEMTYPE_CF_FLASH;
+ maddr = cfg_addr;
+
+ /*
+ * Issue a Capability Configuration command to the firmware to get it
+ * to parse the Configuration File. We don't use t4_fw_config_file()
+ * because we want the ability to modify various features after we've
+ * processed the configuration file ...
+ */
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 =
+ cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID |
+ V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+ V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+ FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+ &caps_cmd);
+ /*
+ * If the CAPS_CONFIG failed with an ENOENT (for a Firmware
+ * Configuration File in FLASH), our last gasp effort is to use the
+ * Firmware Configuration File which is embedded in the firmware. A
+ * very few early versions of the firmware didn't have one embedded
+ * but we can ignore those.
+ */
+ if (ret == -ENOENT) {
+ dev_info(adapter, "%s: Going for embedded config in firmware..\n",
+ __func__);
+
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write =
+ cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
+ sizeof(caps_cmd), &caps_cmd);
+ strcpy(config_name, "Firmware Default");
+ }
+
+ config_issued = 1;
+ if (ret < 0)
+ goto bye;
+
+ finiver = be32_to_cpu(caps_cmd.finiver);
+ finicsum = be32_to_cpu(caps_cmd.finicsum);
+ cfcsum = be32_to_cpu(caps_cmd.cfcsum);
+ if (finicsum != cfcsum)
+ dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n",
+ finicsum, cfcsum);
+
+ /*
+ * If we're a pure NIC driver then disable all offloading facilities.
+ * This will allow the firmware to optimize aspects of the hardware
+ * configuration which will result in improved performance.
+ */
+ caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD);
+ caps_cmd.toecaps = 0;
+ caps_cmd.iscsicaps = 0;
+ caps_cmd.rdmacaps = 0;
+ caps_cmd.fcoecaps = 0;
+
+ /*
+ * And now tell the firmware to use the configuration we just loaded.
+ */
+ caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+ NULL);
+ if (ret < 0) {
+ dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n",
+ -ret);
+ goto bye;
+ }
+
+ /*
+ * Tweak configuration based on system architecture, etc.
+ */
+ ret = adap_init0_tweaks(adapter);
+ if (ret < 0) {
+ dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret);
+ goto bye;
+ }
+
+ /*
+ * And finally tell the firmware to initialize itself using the
+ * parameters from the Configuration File.
+ */
+ ret = t4_fw_initialize(adapter, adapter->mbox);
+ if (ret < 0) {
+ dev_warn(adapter, "Initializing Firmware failed, error %d\n",
+ -ret);
+ goto bye;
+ }
+
+ /*
+ * Return successfully and note that we're operating with parameters
+ * not supplied by the driver, rather than from hard-wired
+ * initialization constants buried in the driver.
+ */
+ dev_info(adapter,
+ "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n",
+ config_name, finiver, cfcsum);
+
+ return 0;
+
+ /*
+ * Something bad happened. Return the error ... (If the "error"
+ * is that there's no Configuration File on the adapter we don't
+ * want to issue a warning since this is fairly common.)
+ */
+bye:
+ if (config_issued && ret != -ENOENT)
+ dev_warn(adapter, "\"%s\" configuration file error %d\n",
+ config_name, -ret);
+
+ dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret);
+ return ret;
+}
+
+static int adap_init0(struct adapter *adap)
+{
+ struct fw_caps_config_cmd caps_cmd;
+ int ret = 0;
+ u32 v, port_vec;
+ enum dev_state state;
+ u32 params[7], val[7];
+ int reset = 1;
+ int mbox = adap->mbox;
+
+ /*
+ * Contact FW, advertising Master capability.
+ */
+ ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
+ if (ret < 0) {
+ dev_err(adap, "%s: could not connect to FW, error %d\n",
+ __func__, -ret);
+ goto bye;
+ }
+
+ CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__,
+ adap->mbox, ret);
+
+ if (ret == mbox)
+ adap->flags |= MASTER_PF;
+
+ if (state == DEV_STATE_INIT) {
+ /*
+ * Force halt and reset FW because a previous instance may have
+ * exited abnormally without properly shutting down
+ */
+ ret = t4_fw_halt(adap, adap->mbox, reset);
+ if (ret < 0) {
+ dev_err(adap, "Failed to halt. Exit.\n");
+ goto bye;
+ }
+
+ ret = t4_fw_restart(adap, adap->mbox, reset);
+ if (ret < 0) {
+ dev_err(adap, "Failed to restart. Exit.\n");
+ goto bye;
+ }
+ state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT);
+ }
+
+ t4_get_version_info(adap);
+
+ ret = t4_get_core_clock(adap, &adap->params.vpd);
+ if (ret < 0) {
+ dev_err(adap, "%s: could not get core clock, error %d\n",
+ __func__, -ret);
+ goto bye;
+ }
+
+ /*
+ * If the firmware is initialized already (and we're not forcing a
+ * master initialization), note that we're living with existing
+ * adapter parameters. Otherwise, it's time to try initializing the
+ * adapter ...
+ */
+ if (state == DEV_STATE_INIT) {
+ dev_info(adap, "Coming up as %s: Adapter already initialized\n",
+ adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
+ } else {
+ dev_info(adap, "Coming up as MASTER: Initializing adapter\n");
+
+ ret = adap_init0_config(adap, reset);
+ if (ret == -ENOENT) {
+ dev_err(adap,
+ "No Configuration File present on adapter. Using hard-wired configuration parameters.\n");
+ goto bye;
+ }
+ }
+ if (ret < 0) {
+ dev_err(adap, "could not initialize adapter, error %d\n", -ret);
+ goto bye;
+ }
+
+ /* Now that we've successfully configured and initialized the adapter
+ * (or found it already initialized), we can ask the Firmware what
+ * resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning info\n");
+ goto bye;
+ }
+
+ /* Find out what ports are available to us. */
+ v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
+ if (ret < 0) {
+ dev_err(adap, "%s: failure in t4_query_params; error = %d\n",
+ __func__, ret);
+ goto bye;
+ }
+
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+
+ dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
+ adap->params.nports);
+
+ /*
+ * Give the SGE code a chance to pull in anything that it needs ...
+ * Note that this must be called after we retrieve our VPD parameters
+ * in order to know how to convert core ticks to seconds, etc.
+ */
+ ret = t4_sge_init(adap);
+ if (ret < 0) {
+ dev_err(adap, "t4_sge_init failed with error %d\n",
+ -ret);
+ goto bye;
+ }
+
+ /*
+ * Grab some of our basic fundamental operating parameters.
+ */
+#define FW_PARAM_DEV(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
+ V_FW_PARAMS_PARAM_Y(0) | \
+ V_FW_PARAMS_PARAM_Z(0))
+
+ params[0] = FW_PARAM_PFVF(FILTER_START);
+ params[1] = FW_PARAM_PFVF(FILTER_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->tids.ftid_base = val[0];
+ adap->tids.nftids = val[1] - val[0] + 1;
+
+ params[0] = FW_PARAM_PFVF(CLIP_START);
+ params[1] = FW_PARAM_PFVF(CLIP_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->clipt_start = val[0];
+ adap->clipt_end = val[1];
+
+ /*
+ * Get device capabilities so we can determine what resources we need
+ * to manage.
+ */
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
+ &caps_cmd);
+ if (ret < 0)
+ goto bye;
+
+ if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) &&
+ is_t6(adap->params.chip)) {
+ if (init_hash_filter(adap) < 0)
+ goto bye;
+ }
+
+ /* query tid-related parameters */
+ params[0] = FW_PARAM_DEV(NTID);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ params, val);
+ if (ret < 0)
+ goto bye;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+
+ /* If we're running on newer firmware, let it know that we're
+ * prepared to deal with encapsulated CPL messages. Older
+ * firmware won't understand this and we'll just get
+ * unencapsulated messages ...
+ */
+ params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
+ val[0] = 1;
+ (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
+
+ /*
+ * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
+ * capability. Earlier versions of the firmware didn't have the
+ * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
+ * permission to use ULPTX MEMWRITE DSGL.
+ */
+ if (is_t4(adap->params.chip)) {
+ adap->params.ulptx_memwrite_dsgl = false;
+ } else {
+ params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+ adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
+ }
+
+ /*
+ * The MTU/MSS Table is initialized by now, so load their values. If
+ * we're initializing the adapter, then we'll make any modifications
+ * we want to the MTU/MSS Table and also initialize the congestion
+ * parameters.
+ */
+ t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
+ if (state != DEV_STATE_INIT) {
+ int i;
+
+ /*
+ * The default MTU Table contains values 1492 and 1500.
+ * However, for TCP, it's better to have two values which are
+ * a multiple of 8 +/- 4 bytes apart near this popular MTU.
+ * This allows us to have a TCP Data Payload which is a
+ * multiple of 8 regardless of what combination of TCP Options
+ * are in use (always a multiple of 4 bytes) which is
+ * important for performance reasons. For instance, if no
+ * options are in use, then we have a 20-byte IP header and a
+ * 20-byte TCP header. In this case, a 1500-byte MSS would
+ * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
+ * which is not a multiple of 8. So using an MSS of 1488 in
+ * this case results in a TCP Data Payload of 1448 bytes which
+ * is a multiple of 8. On the other hand, if 12-byte TCP Time
+ * Stamps have been negotiated, then an MTU of 1500 bytes
+ * results in a TCP Data Payload of 1448 bytes which, as
+ * above, is a multiple of 8 bytes ...
+ */
+ for (i = 0; i < NMTUS; i++)
+ if (adap->params.mtus[i] == 1492) {
+ adap->params.mtus[i] = 1488;
+ break;
+ }
+
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+ }
+ t4_init_sge_params(adap);
+ t4_init_tp_params(adap);
+ configure_pcie_ext_tag(adap);
+ configure_vlan_types(adap);
+ configure_max_ethqsets(adap);
+
+ adap->params.drv_memwin = MEMWIN_NIC;
+ adap->flags |= FW_OK;
+ dev_debug(adap, "%s: returning zero..\n", __func__);
+ return 0;
+
+ /*
+ * Something bad happened. If a command timed out or failed with EIO
+ * FW does not operate within its spec or something catastrophic
+ * happened to HW/FW, stop issuing commands.
+ */
+bye:
+ if (ret != -ETIMEDOUT && ret != -EIO)
+ t4_fw_bye(adap, adap->mbox);
+ return ret;
+}
+
+/**
+ * t4_os_portmod_changed - handle port module changes
+ * @adap: the adapter associated with the module change
+ * @port_id: the port index whose module status has changed
+ *
+ * This is the OS-dependent handler for port module changes. It is
+ * invoked when a port module is removed or inserted for any OS-specific
+ * processing.
+ */
+void t4_os_portmod_changed(const struct adapter *adap, int port_id)
+{
+ static const char * const mod_str[] = {
+ NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
+ };
+
+ const struct port_info *pi = adap2pinfo(adap, port_id);
+
+ if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+ dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
+ else if (pi->mod_type < ARRAY_SIZE(mod_str))
+ dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
+ mod_str[pi->mod_type]);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ dev_info(adap, "Port%d: unsupported port module inserted\n",
+ pi->port_id);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ dev_info(adap, "Port%d: unknown port module inserted\n",
+ pi->port_id);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ dev_info(adap, "Port%d: transceiver module error\n",
+ pi->port_id);
+ else
+ dev_info(adap, "Port%d: unknown module type %d inserted\n",
+ pi->port_id, pi->mod_type);
+}
+
+inline bool force_linkup(struct adapter *adap)
+{
+ struct rte_pci_device *pdev = adap->pdev;
+
+ if (is_pf4(adap))
+ return false; /* force_linkup not required for pf driver*/
+ if (!cxgbe_get_devargs(pdev->device.devargs,
+ CXGBE_DEVARG_FORCE_LINK_UP))
+ return false;
+ return true;
+}
+
+/**
+ * link_start - enable a port
+ * @dev: the port to enable
+ *
+ * Performs the MAC and PHY actions needed to enable a port.
+ */
+int link_start(struct port_info *pi)
+{
+ struct adapter *adapter = pi->adapter;
+ int ret;
+ unsigned int mtu;
+
+ mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ (ETHER_HDR_LEN + ETHER_CRC_LEN);
+
+ /*
+ * We do not set address filters and promiscuity here, the stack does
+ * that step explicitly.
+ */
+ ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1,
+ -1, 1, true);
+ if (ret == 0) {
+ ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
+ pi->xact_addr_filt,
+ (u8 *)&pi->eth_dev->data->mac_addrs[0],
+ true, true);
+ if (ret >= 0) {
+ pi->xact_addr_filt = ret;
+ ret = 0;
+ }
+ }
+ if (ret == 0 && is_pf4(adapter))
+ ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
+ &pi->link_cfg);
+ if (ret == 0) {
+ /*
+ * Enabling a Virtual Interface can result in an interrupt
+ * during the processing of the VI Enable command and, in some
+ * paths, result in an attempt to issue another command in the
+ * interrupt context. Thus, we disable interrupts during the
+ * course of the VI Enable command ...
+ */
+ ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
+ true, true, false);
+ }
+
+ if (ret == 0 && force_linkup(adapter))
+ pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ return ret;
+}
+
+/**
+ * cxgbe_write_rss_conf - flash the RSS configuration for a given port
+ * @pi: the port
+ * @rss_hf: Hash configuration to apply
+ */
+int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
+{
+ struct adapter *adapter = pi->adapter;
+ const struct sge_eth_rxq *rxq;
+ u64 flags = 0;
+ u16 rss;
+ int err;
+
+ /* Should never be called before setting up sge eth rx queues */
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ dev_err(adap, "%s No RXQs available on port %d\n",
+ __func__, pi->port_id);
+ return -EINVAL;
+ }
+
+ /* Don't allow unsupported hash functions */
+ if (rss_hf & ~CXGBE_RSS_HF_ALL)
+ return -EINVAL;
+
+ if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_UDPEN;
+
+ if (rss_hf & CXGBE_RSS_HF_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
+
+ if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+
+ if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_UDPEN;
+
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
+ rss = rxq[0].rspq.abs_id;
+
+ /* If Tunnel All Lookup isn't specified in the global RSS
+ * Configuration, then we need to specify a default Ingress
+ * Queue for any ingress packets which aren't hashed. We'll
+ * use our first ingress queue ...
+ */
+ err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ flags, rss);
+ return err;
+}
+
+/**
+ * cxgbe_write_rss - write the RSS table for a given port
+ * @pi: the port
+ * @queues: array of queue indices for RSS
+ *
+ * Sets up the portion of the HW RSS table for the port's VI to distribute
+ * packets to the Rx queues in @queues.
+ */
+int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
+{
+ u16 *rss;
+ int i, err;
+ struct adapter *adapter = pi->adapter;
+ const struct sge_eth_rxq *rxq;
+
+ /* Should never be called before setting up sge eth rx queues */
+ BUG_ON(!(adapter->flags & FULL_INIT_DONE));
+
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
+ rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
+ if (!rss)
+ return -ENOMEM;
+
+ /* map the queue indices to queue ids */
+ for (i = 0; i < pi->rss_size; i++, queues++)
+ rss[i] = rxq[*queues].rspq.abs_id;
+
+ err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
+ pi->rss_size, rss, pi->rss_size);
+ rte_free(rss);
+ return err;
+}
+
+/**
+ * setup_rss - configure RSS
+ * @adapter: the adapter
+ *
+ * Sets up RSS to distribute packets to multiple receive queues. We
+ * configure the RSS CPU lookup table to distribute to the number of HW
+ * receive queues, and the response queue lookup table to narrow that
+ * down to the response queues actually configured for each port.
+ * We always configure the RSS mapping for all ports since the mapping
+ * table has plenty of entries.
+ */
+int setup_rss(struct port_info *pi)
+{
+ int j, err;
+ struct adapter *adapter = pi->adapter;
+
+ dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n",
+ __func__, pi->rss_size, pi->n_rx_qsets);
+
+ if (!(pi->flags & PORT_RSS_DONE)) {
+ if (adapter->flags & FULL_INIT_DONE) {
+ /* Fill default values with equal distribution */
+ for (j = 0; j < pi->rss_size; j++)
+ pi->rss[j] = j % pi->n_rx_qsets;
+
+ err = cxgbe_write_rss(pi, pi->rss);
+ if (err)
+ return err;
+
+ err = cxgbe_write_rss_conf(pi, pi->rss_hf);
+ if (err)
+ return err;
+ pi->flags |= PORT_RSS_DONE;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Enable NAPI scheduling and interrupt generation for all Rx queues.
+ */
+static void enable_rx(struct adapter *adap, struct sge_rspq *q)
+{
+ /* 0-increment GTS to start the timer and enable interrupts */
+ t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
+ T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
+ V_SEINTARM(q->intr_params) |
+ V_INGRESSQID(q->cntxt_id));
+}
+
+void cxgbe_enable_rx_queues(struct port_info *pi)
+{
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ unsigned int i;
+
+ for (i = 0; i < pi->n_rx_qsets; i++)
+ enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq);
+}
+
+/**
+ * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps.
+ * @port_type: Firmware Port Type
+ * @fw_caps: Firmware Port Capabilities
+ * @speed_caps: Device Info Speed Capabilities
+ *
+ * Translate a Firmware Port Capabilities specification to Device Info
+ * Speed Capabilities.
+ */
+static void fw_caps_to_speed_caps(enum fw_port_type port_type,
+ unsigned int fw_caps,
+ u32 *speed_caps)
+{
+#define SET_SPEED(__speed_name) \
+ do { \
+ *speed_caps |= ETH_LINK_ ## __speed_name; \
+ } while (0)
+
+#define FW_CAPS_TO_SPEED(__fw_name) \
+ do { \
+ if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
+ SET_SPEED(__fw_name); \
+ } while (0)
+
+ switch (port_type) {
+ case FW_PORT_TYPE_BT_SGMII:
+ case FW_PORT_TYPE_BT_XFI:
+ case FW_PORT_TYPE_BT_XAUI:
+ FW_CAPS_TO_SPEED(SPEED_100M);
+ FW_CAPS_TO_SPEED(SPEED_1G);
+ FW_CAPS_TO_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_KX4:
+ case FW_PORT_TYPE_KX:
+ case FW_PORT_TYPE_FIBER_XFI:
+ case FW_PORT_TYPE_FIBER_XAUI:
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_QSFP_10G:
+ case FW_PORT_TYPE_QSA:
+ FW_CAPS_TO_SPEED(SPEED_1G);
+ FW_CAPS_TO_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_KR:
+ SET_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_BP_AP:
+ case FW_PORT_TYPE_BP4_AP:
+ SET_SPEED(SPEED_1G);
+ SET_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_BP40_BA:
+ case FW_PORT_TYPE_QSFP:
+ SET_SPEED(SPEED_40G);
+ break;
+
+ case FW_PORT_TYPE_CR_QSFP:
+ case FW_PORT_TYPE_SFP28:
+ case FW_PORT_TYPE_KR_SFP28:
+ FW_CAPS_TO_SPEED(SPEED_1G);
+ FW_CAPS_TO_SPEED(SPEED_10G);
+ FW_CAPS_TO_SPEED(SPEED_25G);
+ break;
+
+ case FW_PORT_TYPE_CR2_QSFP:
+ SET_SPEED(SPEED_50G);
+ break;
+
+ case FW_PORT_TYPE_KR4_100G:
+ case FW_PORT_TYPE_CR4_QSFP:
+ FW_CAPS_TO_SPEED(SPEED_25G);
+ FW_CAPS_TO_SPEED(SPEED_40G);
+ FW_CAPS_TO_SPEED(SPEED_50G);
+ FW_CAPS_TO_SPEED(SPEED_100G);
+ break;
+
+ default:
+ break;
+ }
+
+#undef FW_CAPS_TO_SPEED
+#undef SET_SPEED
+}
+
+/**
+ * cxgbe_get_speed_caps - Fetch supported speed capabilities
+ * @pi: Underlying port's info
+ * @speed_caps: Device Info speed capabilities
+ *
+ * Fetch supported speed capabilities of the underlying port.
+ */
+void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
+{
+ *speed_caps = 0;
+
+ fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps,
+ speed_caps);
+
+ if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
+ *speed_caps |= ETH_LINK_SPEED_FIXED;
+}
+
+/**
+ * cxgbe_set_link_status - Set device link up or down.
+ * @pi: Underlying port's info
+ * @status: 0 - down, 1 - up
+ *
+ * Set the device link up or down.
+ */
+int cxgbe_set_link_status(struct port_info *pi, bool status)
+{
+ struct adapter *adapter = pi->adapter;
+ int err = 0;
+
+ err = t4_enable_vi(adapter, adapter->mbox, pi->viid, status, status);
+ if (err) {
+ dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
+ return err;
+ }
+
+ if (!status)
+ t4_reset_link_config(adapter, pi->pidx);
+
+ return 0;
+}
+
+/**
+ * cxgb_up - enable the adapter
+ * @adap: adapter being enabled
+ *
+ * Called when the first port is enabled, this function performs the
+ * actions necessary to make an adapter operational, such as completing
+ * the initialization of HW modules, and enabling interrupts.
+ */
+int cxgbe_up(struct adapter *adap)
+{
+ enable_rx(adap, &adap->sge.fw_evtq);
+ t4_sge_tx_monitor_start(adap);
+ if (is_pf4(adap))
+ t4_intr_enable(adap);
+ adap->flags |= FULL_INIT_DONE;
+
+ /* TODO: deadman watchdog ?? */
+ return 0;
+}
+
+/*
+ * Close the port
+ */
+int cxgbe_down(struct port_info *pi)
+{
+ return cxgbe_set_link_status(pi, false);
+}
+
+/*
+ * Release resources when all the ports have been stopped.
+ */
+void cxgbe_close(struct adapter *adapter)
+{
+ struct port_info *pi;
+ int i;
+
+ if (adapter->flags & FULL_INIT_DONE) {
+ if (is_pf4(adapter))
+ t4_intr_disable(adapter);
+ tid_free(&adapter->tids);
+ t4_cleanup_clip_tbl(adapter);
+ t4_sge_tx_monitor_stop(adapter);
+ t4_free_sge_resources(adapter);
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox,
+ adapter->pf, 0, pi->viid);
+ rte_free(pi->eth_dev->data->mac_addrs);
+ /* Skip first port since it'll be freed by DPDK stack */
+ if (i) {
+ rte_free(pi->eth_dev->data->dev_private);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
+ }
+ adapter->flags &= ~FULL_INIT_DONE;
+ }
+
+ if (is_pf4(adapter) && (adapter->flags & FW_OK))
+ t4_fw_bye(adapter, adapter->mbox);
+}
+
+int cxgbe_probe(struct adapter *adapter)
+{
+ struct port_info *pi;
+ int chip;
+ int func, i;
+ int err = 0;
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ chip = t4_get_chip_type(adapter,
+ CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id));
+ if (chip < 0)
+ return chip;
+
+ func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
+
+ adapter->mbox = func;
+ adapter->pf = func;
+
+ t4_os_lock_init(&adapter->mbox_lock);
+ TAILQ_INIT(&adapter->mbox_list);
+ t4_os_lock_init(&adapter->win0_lock);
+
+ err = t4_prep_adapter(adapter);
+ if (err)
+ return err;
+
+ setup_memwin(adapter);
+ err = adap_init0(adapter);
+ if (err) {
+ dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
+ __func__, err);
+ goto out_free;
+ }
+
+ if (!is_t4(adapter->params.chip)) {
+ /*
+ * The userspace doorbell BAR is split evenly into doorbell
+ * regions, each associated with an egress queue. If this
+ * per-queue region is large enough (at least UDBS_SEG_SIZE)
+ * then it can be used to submit a tx work request with an
+ * implied doorbell. Enable write combining on the BAR if
+ * there is room for such work requests.
+ */
+ int s_qpp, qpp, num_seg;
+
+ s_qpp = (S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) *
+ adapter->pf);
+ qpp = 1 << ((t4_read_reg(adapter,
+ A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+ num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE;
+ if (qpp > num_seg)
+ dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n");
+
+ adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
+ if (!adapter->bar2) {
+ dev_err(adapter, "cannot map device bar2 region\n");
+ err = -ENOMEM;
+ goto out_free;
+ }
+ t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) |
+ V_STATMODE(0));
+ }
+
+ for_each_port(adapter, i) {
+ const unsigned int numa_node = rte_socket_id();
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+
+ snprintf(name, sizeof(name), "%s_%d",
+ adapter->pdev->device.name, i);
+
+ if (i == 0) {
+ /* First port is already allocated by DPDK */
+ eth_dev = adapter->eth_dev;
+ goto allocate_mac;
+ }
+
+ /*
+ * now do all data allocation - for eth_dev structure,
+ * and internal (private) data for the remaining ports
+ */
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
+ goto out_free;
+
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, sizeof(struct port_info),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!eth_dev->data->dev_private)
+ goto out_free;
+
+allocate_mac:
+ pi = (struct port_info *)eth_dev->data->dev_private;
+ adapter->port[i] = pi;
+ pi->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = i;
+ pi->pidx = i;
+
+ pi->eth_dev->device = &adapter->pdev->device;
+ pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
+ pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
+ pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
+
+ rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
+
+ pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
+ ETHER_ADDR_LEN, 0);
+ if (!pi->eth_dev->data->mac_addrs) {
+ dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
+ __func__);
+ err = -1;
+ goto out_free;
+ }
+
+ if (i > 0) {
+ /* First port will be notified by upper layer */
+ rte_eth_dev_probing_finish(eth_dev);
+ }
+ }
+
+ if (adapter->flags & FW_OK) {
+ err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0);
+ if (err) {
+ dev_err(adapter, "%s: t4_port_init failed with err %d\n",
+ __func__, err);
+ goto out_free;
+ }
+ }
+
+ cfg_queues(adapter->eth_dev);
+
+ print_adapter_info(adapter);
+ print_port_info(adapter);
+
+ adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
+ adapter->clipt_end);
+ if (!adapter->clipt) {
+ /* We tolerate a lack of clip_table, giving up some
+ * functionality
+ */
+ dev_warn(adapter, "could not allocate CLIP. Continuing\n");
+ }
+
+ if (tid_init(&adapter->tids) < 0) {
+ /* Disable filtering support */
+ dev_warn(adapter, "could not allocate TID table, "
+ "filter support disabled. Continuing\n");
+ }
+
+ if (is_hashfilter(adapter)) {
+ if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
+ u32 hash_base, hash_reg;
+
+ hash_reg = A_LE_DB_TID_HASHBASE;
+ hash_base = t4_read_reg(adapter, hash_reg);
+ adapter->tids.hash_base = hash_base / 4;
+ }
+ } else {
+ /* Disable hash filtering support */
+ dev_warn(adapter,
+ "Maskless filter support disabled. Continuing\n");
+ }
+
+ err = init_rss(adapter);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox, adapter->pf,
+ 0, pi->viid);
+ /* Skip first port since it'll be de-allocated by DPDK */
+ if (i == 0)
+ continue;
+ if (pi->eth_dev) {
+ if (pi->eth_dev->data->dev_private)
+ rte_free(pi->eth_dev->data->dev_private);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
+ }
+
+ if (adapter->flags & FW_OK)
+ t4_fw_bye(adapter, adapter->mbox);
+ return -err;
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h
new file mode 100644
index 00000000..50931ed0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_OFLD_H_
+#define _CXGBE_OFLD_H_
+
+#include <rte_bitmap.h>
+
+#include "cxgbe_filter.h"
+
+#define INIT_TP_WR(w, tid) do { \
+ (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_TP_WR) | \
+ V_FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
+ (w)->wr.wr_mid = cpu_to_be32( \
+ V_FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
+ V_FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+#define INIT_TP_WR_MIT_CPL(w, cpl, tid) do { \
+ INIT_TP_WR(w, tid); \
+ OPCODE_TID(w) = cpu_to_be32(MK_OPCODE_TID(cpl, tid)); \
+} while (0)
+
+#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
+ (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | \
+ V_FW_WR_ATOMIC(atomic)); \
+ (w)->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
+ V_FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+/*
+ * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
+ */
+#define MAX_ATIDS 8192U
+
+union aopen_entry {
+ void *data;
+ union aopen_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of filter TID.
+ * The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+ void **tid_tab;
+ unsigned int ntids;
+ struct filter_entry *ftid_tab; /* Normal filters */
+ union aopen_entry *atid_tab;
+ struct rte_bitmap *ftid_bmap;
+ uint8_t *ftid_bmap_array;
+ unsigned int nftids, natids;
+ unsigned int ftid_base, hash_base;
+
+ union aopen_entry *afree;
+ unsigned int atids_in_use;
+
+ /* TIDs in the TCAM */
+ rte_atomic32_t tids_in_use;
+ /* TIDs in the HASH */
+ rte_atomic32_t hash_tids_in_use;
+ rte_atomic32_t conns_in_use;
+
+ rte_spinlock_t atid_lock __rte_cache_aligned;
+ rte_spinlock_t ftid_lock;
+};
+
+static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
+{
+ return tid < t->ntids ? t->tid_tab[tid] : NULL;
+}
+
+static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
+{
+ return atid < t->natids ? t->atid_tab[atid].data : NULL;
+}
+
+int cxgbe_alloc_atid(struct tid_info *t, void *data);
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid);
+void cxgbe_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid,
+ unsigned short family);
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+ unsigned short family);
+
+#endif /* _CXGBE_OFLD_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h
new file mode 100644
index 00000000..8d0a105a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_PFVF_H_
+#define _CXGBE_PFVF_H_
+
+void cxgbe_dev_rx_queue_release(void *q);
+void cxgbe_dev_tx_queue_release(void *q);
+void cxgbe_dev_stop(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_close(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info);
+void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev);
+int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr);
+int cxgbe_dev_configure(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id);
+int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id);
+int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id);
+int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id);
+int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
+int cxgbe_dev_start(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete);
+int cxgbe_dev_set_link_up(struct rte_eth_dev *dev);
+int cxgbe_dev_set_link_down(struct rte_eth_dev *dev);
+uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+const uint32_t *cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev);
+#endif /* _CXGBE_PFVF_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c
new file mode 100644
index 00000000..3b32ca9d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+
+#include "cxgbe.h"
+#include "cxgbe_pfvf.h"
+
+/*
+ * Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static const struct rte_pci_id cxgb4vf_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x8
+
+#define PCI_VENDOR_ID_CHELSIO 0x1425
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
+ { .vendor_id = 0, } \
+ }
+
+/*
+ *... and the PCI ID Table itself ...
+ */
+#include "t4_pci_id_tbl.h"
+
+/*
+ * Get port statistics.
+ */
+static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *eth_stats)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct port_stats ps;
+ unsigned int i;
+
+ cxgbevf_stats_get(pi, &ps);
+
+ /* RX Stats */
+ eth_stats->ierrors = ps.rx_len_err;
+
+ /* TX Stats */
+ eth_stats->opackets = ps.tx_bcast_frames + ps.tx_mcast_frames +
+ ps.tx_ucast_frames;
+ eth_stats->obytes = ps.tx_octets;
+ eth_stats->oerrors = ps.tx_drop;
+
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ struct sge_eth_rxq *rxq =
+ &s->ethrxq[pi->first_qset + i];
+
+ eth_stats->q_ipackets[i] = rxq->stats.pkts;
+ eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
+ eth_stats->ipackets += eth_stats->q_ipackets[i];
+ eth_stats->ibytes += eth_stats->q_ibytes[i];
+ }
+
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ struct sge_eth_txq *txq =
+ &s->ethtxq[pi->first_qset + i];
+
+ eth_stats->q_opackets[i] = txq->stats.pkts;
+ eth_stats->q_obytes[i] = txq->stats.tx_bytes;
+ eth_stats->q_errors[i] = txq->stats.mapping_err;
+ }
+ return 0;
+}
+
+static const struct eth_dev_ops cxgbevf_eth_dev_ops = {
+ .dev_start = cxgbe_dev_start,
+ .dev_stop = cxgbe_dev_stop,
+ .dev_close = cxgbe_dev_close,
+ .promiscuous_enable = cxgbe_dev_promiscuous_enable,
+ .promiscuous_disable = cxgbe_dev_promiscuous_disable,
+ .allmulticast_enable = cxgbe_dev_allmulticast_enable,
+ .allmulticast_disable = cxgbe_dev_allmulticast_disable,
+ .dev_configure = cxgbe_dev_configure,
+ .dev_infos_get = cxgbe_dev_info_get,
+ .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
+ .link_update = cxgbe_dev_link_update,
+ .dev_set_link_up = cxgbe_dev_set_link_up,
+ .dev_set_link_down = cxgbe_dev_set_link_down,
+ .mtu_set = cxgbe_dev_mtu_set,
+ .tx_queue_setup = cxgbe_dev_tx_queue_setup,
+ .tx_queue_start = cxgbe_dev_tx_queue_start,
+ .tx_queue_stop = cxgbe_dev_tx_queue_stop,
+ .tx_queue_release = cxgbe_dev_tx_queue_release,
+ .rx_queue_setup = cxgbe_dev_rx_queue_setup,
+ .rx_queue_start = cxgbe_dev_rx_queue_start,
+ .rx_queue_stop = cxgbe_dev_rx_queue_stop,
+ .rx_queue_release = cxgbe_dev_rx_queue_release,
+ .stats_get = cxgbevf_dev_stats_get,
+ .mac_addr_set = cxgbe_mac_addr_set,
+};
+
+/*
+ * Initialize driver
+ * It returns 0 on success.
+ */
+static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct adapter *adapter = NULL;
+ int err = 0;
+
+ CXGBE_FUNC_TRACE();
+
+ eth_dev->dev_ops = &cxgbevf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ /* for secondary processes, we attach to ethdevs allocated by primary
+ * and do minimal initialization.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ int i;
+
+ for (i = 1; i < MAX_NPORTS; i++) {
+ struct rte_eth_dev *rest_eth_dev;
+ char namei[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(namei, sizeof(namei), "%s_%d",
+ pci_dev->device.name, i);
+ rest_eth_dev = rte_eth_dev_attach_secondary(namei);
+ if (rest_eth_dev) {
+ rest_eth_dev->device = &pci_dev->device;
+ rest_eth_dev->dev_ops =
+ eth_dev->dev_ops;
+ rest_eth_dev->rx_pkt_burst =
+ eth_dev->rx_pkt_burst;
+ rest_eth_dev->tx_pkt_burst =
+ eth_dev->tx_pkt_burst;
+ rte_eth_dev_probing_finish(rest_eth_dev);
+ }
+ }
+ return 0;
+ }
+
+ snprintf(name, sizeof(name), "cxgbevfadapter%d",
+ eth_dev->data->port_id);
+ adapter = rte_zmalloc(name, sizeof(*adapter), 0);
+ if (!adapter)
+ return -1;
+
+ adapter->use_unpacked_mode = 1;
+ adapter->regs = (void *)pci_dev->mem_resource[0].addr;
+ if (!adapter->regs) {
+ dev_err(adapter, "%s: cannot map device registers\n", __func__);
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ adapter->pdev = pci_dev;
+ adapter->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ err = cxgbevf_probe(adapter);
+ if (err) {
+ dev_err(adapter, "%s: cxgbevf probe failed with err %d\n",
+ __func__, err);
+ goto out_free_adapter;
+ }
+
+ return 0;
+
+out_free_adapter:
+ rte_free(adapter);
+ return err;
+}
+
+static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct port_info),
+ eth_cxgbevf_dev_init);
+}
+
+static int eth_cxgbevf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_cxgbevf_pmd = {
+ .id_table = cxgb4vf_pci_tbl,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_cxgbevf_pci_probe,
+ .remove = eth_cxgbevf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_cxgbevf, rte_cxgbevf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_cxgbevf, cxgb4vf_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci");
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c
new file mode 100644
index 00000000..4214d031
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgbe.h"
+
+/*
+ * Figure out how many Ports and Queue Sets we can support. This depends on
+ * knowing our Virtual Function Resources and may be called a second time if
+ * we fall back from MSI-X to MSI Interrupt Mode.
+ */
+static void size_nports_qsets(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ unsigned int pmask_nports;
+
+ /*
+ * The number of "ports" which we support is equal to the number of
+ * Virtual Interfaces with which we've been provisioned.
+ */
+ adapter->params.nports = vfres->nvi;
+ if (adapter->params.nports > MAX_NPORTS) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
+ " allowed virtual interfaces\n", MAX_NPORTS,
+ adapter->params.nports);
+ adapter->params.nports = MAX_NPORTS;
+ }
+
+ /*
+ * We may have been provisioned with more VIs than the number of
+ * ports we're allowed to access (our Port Access Rights Mask).
+ * This is obviously a configuration conflict but we don't want to
+ * do anything silly just because of that.
+ */
+ pmask_nports = hweight32(adapter->params.vfres.pmask);
+ if (pmask_nports < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
+ " virtual interfaces; limited by Port Access Rights"
+ " mask %#x\n", pmask_nports, adapter->params.nports,
+ adapter->params.vfres.pmask);
+ adapter->params.nports = pmask_nports;
+ }
+
+ configure_max_ethqsets(adapter);
+ if (adapter->sge.max_ethqsets < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d available"
+ " virtual interfaces (too few Queue Sets)\n",
+ adapter->sge.max_ethqsets, adapter->params.nports);
+ adapter->params.nports = adapter->sge.max_ethqsets;
+ }
+}
+
+void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats)
+{
+ t4vf_get_port_stats(pi->adapter, pi->pidx, stats);
+}
+
+static int adap_init0vf(struct adapter *adapter)
+{
+ u32 param, val = 0;
+ int err;
+
+ err = t4vf_fw_reset(adapter);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Grab basic operational parameters. These will predominantly have
+ * been set up by the Physical Function Driver or will be hard coded
+ * into the adapter. We just have to live with them ... Note that
+ * we _must_ get our VPD parameters before our SGE parameters because
+ * we need to know the adapter's core clock from the VPD in order to
+ * properly decode the SGE Timer Values.
+ */
+ err = t4vf_get_dev_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " device parameters: err=%d\n", err);
+ return err;
+ }
+
+ err = t4vf_get_vpd_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " VPD parameters: err=%d\n", err);
+ return err;
+ }
+
+ adapter->pf = t4vf_get_pf_from_vf(adapter);
+ err = t4vf_sge_init(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "error in sge init\n");
+ return err;
+ }
+
+ err = t4vf_get_rss_glb_config(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " RSS parameters: err=%d\n", err);
+ return err;
+ }
+ if (adapter->params.rss.mode !=
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ dev_err(adapter->pdev_dev, "unable to operate with global RSS"
+ " mode %d\n", adapter->params.rss.mode);
+ return -EINVAL;
+ }
+
+ /* If we're running on newer firmware, let it know that we're
+ * prepared to deal with encapsulated CPL messages. Older
+ * firmware won't understand this and we'll just get
+ * unencapsulated messages ...
+ */
+ param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
+ val = 1;
+ t4vf_set_params(adapter, 1, &param, &val);
+
+ /*
+ * Grab our Virtual Interface resource allocation, extract the
+ * features that we're interested in and do a bit of sanity testing on
+ * what we discover.
+ */
+ err = t4vf_get_vfres(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to get virtual interface"
+ " resources: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Check for various parameter sanity issues.
+ */
+ if (adapter->params.vfres.pmask == 0) {
+ dev_err(adapter->pdev_dev, "no port access configured\n"
+ "usable!\n");
+ return -EINVAL;
+ }
+ if (adapter->params.vfres.nvi == 0) {
+ dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
+ "usable!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Initialize nports and max_ethqsets now that we have our Virtual
+ * Function Resources.
+ */
+ size_nports_qsets(adapter);
+ adapter->flags |= FW_OK;
+ return 0;
+}
+
+int cxgbevf_probe(struct adapter *adapter)
+{
+ struct port_info *pi;
+ unsigned int pmask;
+ int err = 0;
+ int i;
+
+ t4_os_lock_init(&adapter->mbox_lock);
+ TAILQ_INIT(&adapter->mbox_list);
+ err = t4vf_prep_adapter(adapter);
+ if (err)
+ return err;
+
+ if (!is_t4(adapter->params.chip)) {
+ adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
+ if (!adapter->bar2) {
+ dev_err(adapter, "cannot map device bar2 region\n");
+ err = -ENOMEM;
+ return err;
+ }
+ }
+
+ err = adap_init0vf(adapter);
+ if (err) {
+ dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
+ __func__, err);
+ goto out_free;
+ }
+
+ pmask = adapter->params.vfres.pmask;
+ for_each_port(adapter, i) {
+ const unsigned int numa_node = rte_socket_id();
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+ int port_id;
+
+ if (pmask == 0)
+ break;
+ port_id = ffs(pmask) - 1;
+ pmask &= ~(1 << port_id);
+
+ snprintf(name, sizeof(name), "%s_%d",
+ adapter->pdev->device.name, i);
+
+ if (i == 0) {
+ /* First port is already allocated by DPDK */
+ eth_dev = adapter->eth_dev;
+ goto allocate_mac;
+ }
+
+ /*
+ * now do all data allocation - for eth_dev structure,
+ * and internal (private) data for the remaining ports
+ */
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, sizeof(struct port_info),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!eth_dev->data->dev_private)
+ goto out_free;
+
+allocate_mac:
+ pi = (struct port_info *)eth_dev->data->dev_private;
+ adapter->port[i] = pi;
+ pi->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = port_id;
+ pi->pidx = i;
+
+ pi->eth_dev->device = &adapter->pdev->device;
+ pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
+ pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
+ pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
+
+ rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
+ pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
+ ETHER_ADDR_LEN, 0);
+ if (!pi->eth_dev->data->mac_addrs) {
+ dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
+ __func__);
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ if (i > 0) {
+ /* First port will be notified by upper layer */
+ rte_eth_dev_probing_finish(eth_dev);
+ }
+ }
+
+ if (adapter->flags & FW_OK) {
+ err = t4vf_port_init(adapter);
+ if (err) {
+ dev_err(adapter, "%s: t4_port_init failed with err %d\n",
+ __func__, err);
+ goto out_free;
+ }
+ }
+
+ cfg_queues(adapter->eth_dev);
+ print_adapter_info(adapter);
+ print_port_info(adapter);
+
+ err = init_rss(adapter);
+ if (err)
+ goto out_free;
+ return 0;
+
+out_free:
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox, adapter->pf,
+ 0, pi->viid);
+ /* Skip first port since it'll be de-allocated by DPDK */
+ if (i == 0)
+ continue;
+ if (pi->eth_dev) {
+ if (pi->eth_dev->data->dev_private)
+ rte_free(pi->eth_dev->data->dev_private);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
+ }
+ return -err;
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/meson.build b/src/spdk/dpdk/drivers/net/cxgbe/meson.build
new file mode 100644
index 00000000..7c69a34b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('cxgbe_ethdev.c',
+ 'cxgbe_main.c',
+ 'cxgbevf_ethdev.c',
+ 'cxgbevf_main.c',
+ 'sge.c',
+ 'cxgbe_filter.c',
+ 'cxgbe_flow.c',
+ 'clip_tbl.c',
+ 'base/t4_hw.c',
+ 'base/t4vf_hw.c')
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/rte_pmd_cxgbe_version.map b/src/spdk/dpdk/drivers/net/cxgbe/rte_pmd_cxgbe_version.map
new file mode 100644
index 00000000..bd8138a0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/rte_pmd_cxgbe_version.map
@@ -0,0 +1,4 @@
+DPDK_2.1 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/sge.c b/src/spdk/dpdk/drivers/net/cxgbe/sge.c
new file mode 100644
index 00000000..4ea40d19
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/sge.c
@@ -0,0 +1,2739 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgbe.h"
+
+static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
+ struct sge_eth_txq *txq);
+
+/*
+ * Max number of Rx buffers we replenish at a time.
+ */
+#define MAX_RX_REFILL 64U
+
+#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 256
+
+/*
+ * Max size of a WR sent through a control Tx queue.
+ */
+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
+
+/*
+ * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
+ * per mbuf buffer). We currently only support two sizes for 1500- and
+ * 9000-byte MTUs. We could easily support more but there doesn't seem to be
+ * much need for that ...
+ */
+#define FL_MTU_SMALL 1500
+#define FL_MTU_LARGE 9000
+
+static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
+ unsigned int mtu)
+{
+ struct sge *s = &adapter->sge;
+
+ return CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu,
+ s->fl_align);
+}
+
+#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
+#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
+
+/*
+ * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
+ * these to specify the buffer size as an index into the SGE Free List Buffer
+ * Size register array. We also use bit 4, when the buffer has been unmapped
+ * for DMA, but this is of course never sent to the hardware and is only used
+ * to prevent double unmappings. All of the above requires that the Free List
+ * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
+ * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
+ * Free List Buffer alignment is 32 bytes, this works out for us ...
+ */
+enum {
+ RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
+ RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
+ RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
+
+ /*
+ * XXX We shouldn't depend on being able to use these indices.
+ * XXX Especially when some other Master PF has initialized the
+ * XXX adapter or we use the Firmware Configuration File. We
+ * XXX should really search through the Host Buffer Size register
+ * XXX array for the appropriately sized buffer indices.
+ */
+ RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
+ RX_LARGE_PG_BUF = 0x1, /* buffer large page buffer */
+
+ RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
+ RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
+};
+
+/**
+ * txq_avail - return the number of available slots in a Tx queue
+ * @q: the Tx queue
+ *
+ * Returns the number of descriptors in a Tx queue available to write new
+ * packets.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr)
+{
+ struct rte_mbuf *m = mbuf;
+
+ for (; m; m = m->next, addr++) {
+ *addr = m->buf_iova + rte_pktmbuf_headroom(m);
+ if (*addr == 0)
+ goto out_err;
+ }
+ return 0;
+
+out_err:
+ return -ENOMEM;
+}
+
+/**
+ * free_tx_desc - reclaims Tx descriptors and their buffers
+ * @q: the Tx queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ *
+ * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ * Tx buffers. Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct sge_txq *q, unsigned int n)
+{
+ struct tx_sw_desc *d;
+ unsigned int cidx = 0;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->mbuf) { /* an SGL is present */
+ rte_pktmbuf_free(d->mbuf);
+ d->mbuf = NULL;
+ }
+ if (d->coalesce.idx) {
+ int i;
+
+ for (i = 0; i < d->coalesce.idx; i++) {
+ rte_pktmbuf_free(d->coalesce.mbuf[i]);
+ d->coalesce.mbuf[i] = NULL;
+ }
+ d->coalesce.idx = 0;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ RTE_MBUF_PREFETCH_TO_FREE(&q->sdesc->mbuf->pool);
+ }
+}
+
+static void reclaim_tx_desc(struct sge_txq *q, unsigned int n)
+{
+ struct tx_sw_desc *d;
+ unsigned int cidx = q->cidx;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->mbuf) { /* an SGL is present */
+ rte_pktmbuf_free(d->mbuf);
+ d->mbuf = NULL;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ }
+ q->cidx = cidx;
+}
+
+/**
+ * fl_cap - return the capacity of a free-buffer list
+ * @fl: the FL
+ *
+ * Returns the capacity of a free-buffer list. The capacity is less than
+ * the size because one descriptor needs to be left unpopulated, otherwise
+ * HW will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+ return fl->size - 8; /* 1 descriptor = 8 buffers */
+}
+
+/**
+ * fl_starving - return whether a Free List is starving.
+ * @adapter: pointer to the adapter
+ * @fl: the Free List
+ *
+ * Tests specified Free List to see whether the number of buffers
+ * available to the hardware has falled below our "starvation"
+ * threshold.
+ */
+static inline bool fl_starving(const struct adapter *adapter,
+ const struct sge_fl *fl)
+{
+ const struct sge *s = &adapter->sge;
+
+ return fl->avail - fl->pend_cred <= s->fl_starve_thres;
+}
+
+static inline unsigned int get_buf_size(struct adapter *adapter,
+ const struct rx_sw_desc *d)
+{
+ unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
+ unsigned int buf_size = 0;
+
+ switch (rx_buf_size_idx) {
+ case RX_SMALL_MTU_BUF:
+ buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
+ break;
+
+ case RX_LARGE_MTU_BUF:
+ buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
+ break;
+
+ default:
+ BUG_ON(1);
+ /* NOT REACHED */
+ }
+
+ return buf_size;
+}
+
+/**
+ * free_rx_bufs - free the Rx buffers on an SGE free list
+ * @q: the SGE free list to free buffers from
+ * @n: how many buffers to free
+ *
+ * Release the next @n buffers on an SGE free-buffer Rx queue. The
+ * buffers must be made inaccessible to HW before calling this function.
+ */
+static void free_rx_bufs(struct sge_fl *q, int n)
+{
+ unsigned int cidx = q->cidx;
+ struct rx_sw_desc *d;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->buf) {
+ rte_pktmbuf_free(d->buf);
+ d->buf = NULL;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ q->avail--;
+ }
+ q->cidx = cidx;
+}
+
+/**
+ * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
+ * @q: the SGE free list
+ *
+ * Unmap the current buffer on an SGE free-buffer Rx queue. The
+ * buffer must be made inaccessible to HW before calling this function.
+ *
+ * This is similar to @free_rx_bufs above but does not free the buffer.
+ * Do note that the FL still loses any further access to the buffer.
+ */
+static void unmap_rx_buf(struct sge_fl *q)
+{
+ if (++q->cidx == q->size)
+ q->cidx = 0;
+ q->avail--;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+ if (q->pend_cred >= 64) {
+ u32 val = adap->params.arch.sge_fl_db;
+
+ if (is_t4(adap->params.chip))
+ val |= V_PIDX(q->pend_cred / 8);
+ else
+ val |= V_PIDX_T5(q->pend_cred / 8);
+
+ /*
+ * Make sure all memory writes to the Free List queue are
+ * committed before we tell the hardware about them.
+ */
+ wmb();
+
+ /*
+ * If we don't have access to the new User Doorbell (T5+), use
+ * the old doorbell mechanism; otherwise use the new BAR2
+ * mechanism.
+ */
+ if (unlikely(!q->bar2_addr)) {
+ u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) :
+ T4VF_SGE_BASE_ADDR +
+ A_SGE_VF_KDOORBELL;
+
+ t4_write_reg_relaxed(adap, reg,
+ val | V_QID(q->cntxt_id));
+ } else {
+ writel_relaxed(val | V_QID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr +
+ SGE_UDB_KDOORBELL));
+
+ /*
+ * This Write memory Barrier will force the write to
+ * the User Doorbell area to be flushed.
+ */
+ wmb();
+ }
+ q->pend_cred &= 7;
+ }
+}
+
+static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf,
+ dma_addr_t mapping)
+{
+ sd->buf = buf;
+ sd->dma_addr = mapping; /* includes size low bits */
+}
+
+/**
+ * refill_fl_usembufs - refill an SGE Rx buffer ring with mbufs
+ * @adap: the adapter
+ * @q: the ring to refill
+ * @n: the number of new buffers to allocate
+ *
+ * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity. If afterwards the queue is
+ * found critically low mark it as starving in the bitmap of starving FLs.
+ *
+ * Returns the number of buffers allocated.
+ */
+static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
+ int n)
+{
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, fl);
+ unsigned int cred = q->avail;
+ __be64 *d = &q->desc[q->pidx];
+ struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+ unsigned int buf_size_idx = RX_SMALL_MTU_BUF;
+ struct rte_mbuf *buf_bulk[n];
+ int ret, i;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
+ mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
+ if (jumbo_en &&
+ ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
+ buf_size_idx = RX_LARGE_MTU_BUF;
+
+ ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);
+ if (unlikely(ret != 0)) {
+ dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n",
+ __func__);
+ q->alloc_failed++;
+ rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
+ goto out;
+ }
+
+ for (i = 0; i < n; i++) {
+ struct rte_mbuf *mbuf = buf_bulk[i];
+ dma_addr_t mapping;
+
+ if (!mbuf) {
+ dev_debug(adap, "%s: mbuf alloc failed\n", __func__);
+ q->alloc_failed++;
+ rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
+ goto out;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->data_off =
+ (uint16_t)(RTE_PTR_ALIGN((char *)mbuf->buf_addr +
+ RTE_PKTMBUF_HEADROOM,
+ adap->sge.fl_align) -
+ (char *)mbuf->buf_addr);
+ mbuf->next = NULL;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->rspq.port_id;
+
+ mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_iova +
+ mbuf->data_off,
+ adap->sge.fl_align);
+ mapping |= buf_size_idx;
+ *d++ = cpu_to_be64(mapping);
+ set_rx_sw_desc(sd, mbuf, mapping);
+ sd++;
+
+ q->avail++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ }
+
+out: cred = q->avail - cred;
+ q->pend_cred += cred;
+ ring_fl_db(adap, q);
+
+ if (unlikely(fl_starving(adap, q))) {
+ /*
+ * Make sure data has been written to free list
+ */
+ wmb();
+ q->low++;
+ }
+
+ return cred;
+}
+
+/**
+ * refill_fl - refill an SGE Rx buffer ring with mbufs
+ * @adap: the adapter
+ * @q: the ring to refill
+ * @n: the number of new buffers to allocate
+ *
+ * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity. Returns the number of buffers
+ * allocated.
+ */
+static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n)
+{
+ return refill_fl_usembufs(adap, q, n);
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail));
+}
+
+/*
+ * Return the number of reclaimable descriptors in a Tx queue.
+ */
+static inline int reclaimable(const struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+
+ hw_cidx -= q->cidx;
+ if (hw_cidx < 0)
+ return hw_cidx + q->size;
+ return hw_cidx;
+}
+
+/**
+ * reclaim_completed_tx - reclaims completed Tx descriptors
+ * @q: the Tx queue to reclaim completed descriptors from
+ *
+ * Reclaims Tx descriptors that the SGE has indicated it has processed.
+ */
+void reclaim_completed_tx(struct sge_txq *q)
+{
+ unsigned int avail = reclaimable(q);
+
+ do {
+ /* reclaim as much as possible */
+ reclaim_tx_desc(q, avail);
+ q->in_use -= avail;
+ avail = reclaimable(q);
+ } while (avail);
+}
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ /*
+ * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
+ * addresses. The DSGL Work Request starts off with a 32-bit DSGL
+ * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
+ * repeated sequences of { Length[i], Length[i+1], Address[i],
+ * Address[i+1] } (this ensures that all addresses are on 64-bit
+ * boundaries). If N is even, then Length[N+1] should be set to 0 and
+ * Address[N+1] is omitted.
+ *
+ * The following calculation incorporates all of the above. It's
+ * somewhat hard to follow but, briefly: the "+2" accounts for the
+ * first two flits which include the DSGL header, Length0 and
+ * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
+ * flits for every pair of the remaining N) +1 if (n-1) is odd; and
+ * finally the "+((n-1)&1)" adds the one remaining flit needed if
+ * (n-1) is odd ...
+ */
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ return DIV_ROUND_UP(n, 8);
+}
+
+/**
+ * is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @m: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit as
+ * immediate data. Return value corresponds to the headroom required.
+ */
+static inline int is_eth_imm(const struct rte_mbuf *m)
+{
+ unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (m->pkt_len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+
+ return 0;
+}
+
+/**
+ * calc_tx_flits - calculate the number of flits for a packet Tx WR
+ * @m: the packet
+ * @adap: adapter structure pointer
+ *
+ * Returns the number of flits needed for a Tx WR for the given Ethernet
+ * packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct rte_mbuf *m,
+ struct adapter *adap)
+{
+ size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) :
+ sizeof(struct fw_eth_tx_pkt_vm_wr);
+ unsigned int flits;
+ int hdrlen;
+
+ /*
+ * If the mbuf is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the mbuf data in the Work Request.
+ */
+
+ hdrlen = is_eth_imm(m);
+ if (hdrlen)
+ return DIV_ROUND_UP(m->pkt_len + hdrlen, sizeof(__be64));
+
+ /*
+ * Otherwise, we're going to have to construct a Scatter gather list
+ * of the mbuf body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits = sgl_len(m->nb_segs);
+ if (m->tso_segsz)
+ flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ else
+ flits += (wr_size +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+/**
+ * write_sgl - populate a scatter/gather list for a packet
+ * @mbuf: the packet
+ * @q: the Tx queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @start: start offset into mbuf main-body data to include in the SGL
+ * @addr: address of mapped region
+ *
+ * Generates a scatter/gather list for the buffers that make up a packet.
+ * The caller must provide adequate space for the SGL that will be written.
+ * The SGL includes all of the packet's page fragments and the data in its
+ * main body except for the first @start bytes. @sgl must be 16-byte
+ * aligned and within a Tx descriptor with available space. @end points
+ * write after the end of the SGL but does not account for any potential
+ * wrap around, i.e., @end > @sgl.
+ */
+static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
+{
+ unsigned int i, len;
+ struct ulptx_sge_pair *to;
+ struct rte_mbuf *m = mbuf;
+ unsigned int nfrags = m->nb_segs;
+ struct ulptx_sge_pair buf[nfrags / 2];
+
+ len = m->data_len - start;
+ sgl->len0 = htonl(len);
+ sgl->addr0 = rte_cpu_to_be_64(addr[0]);
+
+ sgl->cmd_nsge = htonl(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
+ V_ULPTX_NSGE(nfrags));
+ if (likely(--nfrags == 0))
+ return;
+ /*
+ * Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+ for (i = 0; nfrags >= 2; nfrags -= 2, to++) {
+ m = m->next;
+ to->len[0] = rte_cpu_to_be_32(m->data_len);
+ to->addr[0] = rte_cpu_to_be_64(addr[++i]);
+ m = m->next;
+ to->len[1] = rte_cpu_to_be_32(m->data_len);
+ to->addr[1] = rte_cpu_to_be_64(addr[++i]);
+ }
+ if (nfrags) {
+ m = m->next;
+ to->len[0] = rte_cpu_to_be_32(m->data_len);
+ to->len[1] = rte_cpu_to_be_32(0);
+ to->addr[0] = rte_cpu_to_be_64(addr[i + 1]);
+ }
+ if (unlikely((u8 *)end > (u8 *)q->stat)) {
+ unsigned int part0 = RTE_PTR_DIFF((u8 *)q->stat,
+ (u8 *)sgl->sge);
+ unsigned int part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = RTE_PTR_DIFF((u8 *)end, (u8 *)q->stat);
+ rte_memcpy(q->desc, RTE_PTR_ADD((u8 *)buf, part0), part1);
+ end = RTE_PTR_ADD((void *)q->desc, part1);
+ }
+ if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
+ *(u64 *)end = 0;
+}
+
+#define IDXDIFF(head, tail, wrap) \
+ ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
+
+#define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size)
+#define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size)
+
+#define PIDXDIFF(head, tail, wrap) \
+ ((tail) >= (head) ? (tail) - (head) : (wrap) - (head) + (tail))
+#define P_IDXDIFF(q, idx) PIDXDIFF((q)->cidx, idx, (q)->size)
+
+/**
+ * ring_tx_db - ring a Tx queue's doorbell
+ * @adap: the adapter
+ * @q: the Tx queue
+ * @n: number of new descriptors to give to HW
+ *
+ * Ring the doorbel for a Tx queue.
+ */
+static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q)
+{
+ int n = Q_IDXDIFF(q, dbidx);
+
+ /*
+ * Make sure that all writes to the TX Descriptors are committed
+ * before we tell the hardware about them.
+ */
+ rte_wmb();
+
+ /*
+ * If we don't have access to the new User Doorbell (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(!q->bar2_addr)) {
+ u32 val = V_PIDX(n);
+
+ /*
+ * For T4 we need to participate in the Doorbell Recovery
+ * mechanism.
+ */
+ if (!q->db_disabled)
+ t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+ V_QID(q->cntxt_id) | val);
+ else
+ q->db_pidx_inc += n;
+ q->db_pidx = q->pidx;
+ } else {
+ u32 val = V_PIDX_T5(n);
+
+ /*
+ * T4 and later chips share the same PIDX field offset within
+ * the doorbell, but T5 and later shrank the field in order to
+ * gain a bit for Doorbell Priority. The field was absurdly
+ * large in the first place (14 bits) so we just use the T5
+ * and later limits and warn if a Queue ID is too large.
+ */
+ WARN_ON(val & F_DBPRIO);
+
+ writel(val | V_QID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr + SGE_UDB_KDOORBELL));
+
+ /*
+ * This Write Memory Barrier will force the write to the User
+ * Doorbell area to be flushed. This is needed to prevent
+ * writes on different CPUs for the same queue from hitting
+ * the adapter out of order. This is required when some Work
+ * Requests take the Write Combine Gather Buffer path (user
+ * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
+ * take the traditional path where we simply increment the
+ * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
+ * hardware DMA read the actual Work Request.
+ */
+ rte_wmb();
+ }
+ q->dbidx = q->pidx;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m)
+{
+ int csum_type;
+
+ if (m->ol_flags & PKT_TX_IP_CKSUM) {
+ switch (m->ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ csum_type = TX_CSUM_TCPIP;
+ break;
+ case PKT_TX_UDP_CKSUM:
+ csum_type = TX_CSUM_UDPIP;
+ break;
+ default:
+ goto nocsum;
+ }
+ } else {
+ goto nocsum;
+ }
+
+ if (likely(csum_type >= TX_CSUM_TCPIP)) {
+ u64 hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
+ int eth_hdr_len = m->l2_len;
+
+ if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
+ hdr_len |= V_TXPKT_ETHHDR_LEN(eth_hdr_len);
+ else
+ hdr_len |= V_T6_TXPKT_ETHHDR_LEN(eth_hdr_len);
+ return V_TXPKT_CSUM_TYPE(csum_type) | hdr_len;
+ }
+nocsum:
+ /*
+ * unknown protocol, disable HW csum
+ * and hope a bad packet is detected
+ */
+ return F_TXPKT_L4CSUM_DIS;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+#define MAX_COALESCE_LEN 64000
+
+static inline int wraps_around(struct sge_txq *q, int ndesc)
+{
+ return (q->pidx + ndesc) > q->size ? 1 : 0;
+}
+
+static void tx_timer_cb(void *data)
+{
+ struct adapter *adap = (struct adapter *)data;
+ struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
+ int i;
+ unsigned int coal_idx;
+
+ /* monitor any pending tx */
+ for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) {
+ if (t4_os_trylock(&txq->txq_lock)) {
+ coal_idx = txq->q.coalesce.idx;
+ if (coal_idx) {
+ if (coal_idx == txq->q.last_coal_idx &&
+ txq->q.pidx == txq->q.last_pidx) {
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ } else {
+ txq->q.last_coal_idx = coal_idx;
+ txq->q.last_pidx = txq->q.pidx;
+ }
+ }
+ t4_os_unlock(&txq->txq_lock);
+ }
+ }
+ rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
+}
+
+/**
+ * ship_tx_pkt_coalesce_wr - finalizes and ships a coalesce WR
+ * @ adap: adapter structure
+ * @txq: tx queue
+ *
+ * writes the different fields of the pkts WR and sends it.
+ */
+static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
+ struct sge_eth_txq *txq)
+{
+ struct fw_eth_tx_pkts_vm_wr *vmwr;
+ const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
+ sizeof(vmwr->ethmacsrc) +
+ sizeof(vmwr->ethtype) +
+ sizeof(vmwr->vlantci));
+ struct fw_eth_tx_pkts_wr *wr;
+ struct sge_txq *q = &txq->q;
+ unsigned int ndesc;
+ u32 wr_mid;
+
+ /* fill the pkts WR header */
+ wr = (void *)&q->desc[q->pidx];
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ vmwr = (void *)&q->desc[q->pidx];
+
+ wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
+ ndesc = flits_to_desc(q->coalesce.flits);
+ wr->equiq_to_len16 = htonl(wr_mid);
+ wr->plen = cpu_to_be16(q->coalesce.len);
+ wr->npkt = q->coalesce.idx;
+ wr->r3 = 0;
+ if (is_pf4(adap)) {
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ wr->type = q->coalesce.type;
+ } else {
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
+ vmwr->r4 = 0;
+ memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst,
+ fw_hdr_copy_len);
+ }
+
+ /* zero out coalesce structure members */
+ memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce));
+
+ txq_advance(q, ndesc);
+ txq->stats.coal_wr++;
+ txq->stats.coal_pkts += wr->npkt;
+
+ if (Q_IDXDIFF(q, equeidx) >= q->size / 2) {
+ q->equeidx = q->pidx;
+ wr_mid |= F_FW_WR_EQUEQ;
+ wr->equiq_to_len16 = htonl(wr_mid);
+ }
+ ring_tx_db(adap, q);
+}
+
+/**
+ * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not
+ * @txq: tx queue where the mbuf is sent
+ * @mbuf: mbuf to be sent
+ * @nflits: return value for number of flits needed
+ * @adap: adapter structure
+ *
+ * This function decides if a packet should be coalesced or not.
+ */
+static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
+ struct rte_mbuf *mbuf,
+ unsigned int *nflits,
+ struct adapter *adap)
+{
+ struct fw_eth_tx_pkts_vm_wr *wr;
+ const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci));
+ struct sge_txq *q = &txq->q;
+ unsigned int flits, ndesc;
+ unsigned char type = 0;
+ int credits, wr_size;
+
+ /* use coal WR type 1 when no frags are present */
+ type = (mbuf->nb_segs == 1) ? 1 : 0;
+ if (!is_pf4(adap)) {
+ if (!type)
+ return 0;
+
+ if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst,
+ rte_pktmbuf_mtod(mbuf, void *),
+ fw_hdr_copy_len))
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ }
+
+ if (unlikely(type != q->coalesce.type && q->coalesce.idx))
+ ship_tx_pkt_coalesce_wr(adap, txq);
+
+ /* calculate the number of flits required for coalescing this packet
+ * without the 2 flits of the WR header. These are added further down
+ * if we are just starting in new PKTS WR. sgl_len doesn't account for
+ * the possible 16 bytes alignment ULP TX commands so we do it here.
+ */
+ flits = (sgl_len(mbuf->nb_segs) + 1) & ~1U;
+ if (type == 0)
+ flits += (sizeof(struct ulp_txpkt) +
+ sizeof(struct ulptx_idata)) / sizeof(__be64);
+ flits += sizeof(struct cpl_tx_pkt_core) / sizeof(__be64);
+ *nflits = flits;
+
+ /* If coalescing is on, the mbuf is added to a pkts WR */
+ if (q->coalesce.idx) {
+ ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8);
+ credits = txq_avail(q) - ndesc;
+
+ /* If we are wrapping or this is last mbuf then, send the
+ * already coalesced mbufs and let the non-coalesce pass
+ * handle the mbuf.
+ */
+ if (unlikely(credits < 0 || wraps_around(q, ndesc))) {
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ return 0;
+ }
+
+ /* If the max coalesce len or the max WR len is reached
+ * ship the WR and keep coalescing on.
+ */
+ if (unlikely((q->coalesce.len + mbuf->pkt_len >
+ MAX_COALESCE_LEN) ||
+ (q->coalesce.flits + flits >
+ q->coalesce.max))) {
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ goto new;
+ }
+ return 1;
+ }
+
+new:
+ /* start a new pkts WR, the WR header is not filled below */
+ wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) :
+ sizeof(struct fw_eth_tx_pkts_vm_wr);
+ flits += wr_size / sizeof(__be64);
+ ndesc = flits_to_desc(q->coalesce.flits + flits);
+ credits = txq_avail(q) - ndesc;
+
+ if (unlikely(credits < 0 || wraps_around(q, ndesc)))
+ return 0;
+ q->coalesce.flits += wr_size / sizeof(__be64);
+ q->coalesce.type = type;
+ q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
+ q->coalesce.flits * sizeof(__be64);
+ if (!is_pf4(adap))
+ memcpy((void *)q->coalesce.ethmacdst,
+ rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len);
+ return 1;
+}
+
+/**
+ * tx_do_packet_coalesce - add an mbuf to a coalesce WR
+ * @txq: sge_eth_txq used send the mbuf
+ * @mbuf: mbuf to be sent
+ * @flits: flits needed for this mbuf
+ * @adap: adapter structure
+ * @pi: port_info structure
+ * @addr: mapped address of the mbuf
+ *
+ * Adds an mbuf to be sent as part of a coalesce WR by filling a
+ * ulp_tx_pkt command, ulp_tx_sc_imm command, cpl message and
+ * ulp_tx_sc_dsgl command.
+ */
+static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
+ struct rte_mbuf *mbuf,
+ int flits, struct adapter *adap,
+ const struct port_info *pi,
+ dma_addr_t *addr, uint16_t nb_pkts)
+{
+ u64 cntrl, *end;
+ struct sge_txq *q = &txq->q;
+ struct ulp_txpkt *mc;
+ struct ulptx_idata *sc_imm;
+ struct cpl_tx_pkt_core *cpl;
+ struct tx_sw_desc *sd;
+ unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
+ unsigned int max_coal_pkt_num = is_pf4(adap) ? ETH_COALESCE_PKT_NUM :
+ ETH_COALESCE_VF_PKT_NUM;
+
+#ifdef RTE_LIBRTE_CXGBE_TPUT
+ RTE_SET_USED(nb_pkts);
+#endif
+
+ if (q->coalesce.type == 0) {
+ mc = (struct ulp_txpkt *)q->coalesce.ptr;
+ mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |
+ V_ULP_TXPKT_FID(adap->sge.fw_evtq.cntxt_id) |
+ F_ULP_TXPKT_RO);
+ mc->len = htonl(DIV_ROUND_UP(flits, 2));
+ sc_imm = (struct ulptx_idata *)(mc + 1);
+ sc_imm->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ F_ULP_TX_SC_MORE);
+ sc_imm->len = htonl(sizeof(*cpl));
+ end = (u64 *)mc + flits;
+ cpl = (struct cpl_tx_pkt_core *)(sc_imm + 1);
+ } else {
+ end = (u64 *)q->coalesce.ptr + flits;
+ cpl = (struct cpl_tx_pkt_core *)q->coalesce.ptr;
+ }
+
+ /* update coalesce structure for this txq */
+ q->coalesce.flits += flits;
+ q->coalesce.ptr += flits * sizeof(__be64);
+ q->coalesce.len += mbuf->pkt_len;
+
+ /* fill the cpl message, same as in t4_eth_xmit, this should be kept
+ * similar to t4_eth_xmit
+ */
+ if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
+ cntrl = hwcsum(adap->params.chip, mbuf) |
+ F_TXPKT_IPCSUM_DIS;
+ txq->stats.tx_cso++;
+ } else {
+ cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
+ }
+
+ if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ txq->stats.vlan_ins++;
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
+ }
+
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
+ if (is_pf4(adap))
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ else
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id));
+ cpl->pack = htons(0);
+ cpl->len = htons(len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+ write_sgl(mbuf, q, (struct ulptx_sgl *)(cpl + 1), end, 0, addr);
+ txq->stats.pkts++;
+ txq->stats.tx_bytes += len;
+
+ sd = &q->sdesc[q->pidx + (idx >> 1)];
+ if (!(idx & 1)) {
+ if (sd->coalesce.idx) {
+ int i;
+
+ for (i = 0; i < sd->coalesce.idx; i++) {
+ rte_pktmbuf_free(sd->coalesce.mbuf[i]);
+ sd->coalesce.mbuf[i] = NULL;
+ }
+ }
+ }
+
+ /* store pointers to the mbuf and the sgl used in free_tx_desc.
+ * each tx desc can hold two pointers corresponding to the value
+ * of ETH_COALESCE_PKT_PER_DESC
+ */
+ sd->coalesce.mbuf[idx & 1] = mbuf;
+ sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1);
+ sd->coalesce.idx = (idx & 1) + 1;
+
+ /* send the coaelsced work request if max reached */
+ if (++q->coalesce.idx == max_coal_pkt_num
+#ifndef RTE_LIBRTE_CXGBE_TPUT
+ || q->coalesce.idx >= nb_pkts
+#endif
+ )
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ return 0;
+}
+
+/**
+ * t4_eth_xmit - add a packet to an Ethernet Tx queue
+ * @txq: the egress queue
+ * @mbuf: the packet
+ *
+ * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
+ */
+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ uint16_t nb_pkts)
+{
+ const struct port_info *pi;
+ struct cpl_tx_pkt_lso_core *lso;
+ struct adapter *adap;
+ struct rte_mbuf *m = mbuf;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct fw_eth_tx_pkt_vm_wr *vmwr;
+ struct cpl_tx_pkt_core *cpl;
+ struct tx_sw_desc *d;
+ dma_addr_t addr[m->nb_segs];
+ unsigned int flits, ndesc, cflits;
+ int l3hdr_len, l4hdr_len, eth_xtra_len;
+ int len, last_desc;
+ int credits;
+ u32 wr_mid;
+ u64 cntrl, *end;
+ bool v6;
+ u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ /* Reject xmit if queue is stopped */
+ if (unlikely(txq->flags & EQ_STOPPED))
+ return -(EBUSY);
+
+ /*
+ * The chip min packet length is 10 octets but play safe and reject
+ * anything shorter than an Ethernet header.
+ */
+ if (unlikely(m->pkt_len < ETHER_HDR_LEN)) {
+out_free:
+ rte_pktmbuf_free(m);
+ return 0;
+ }
+
+ if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
+ (unlikely(m->pkt_len > max_pkt_len)))
+ goto out_free;
+
+ pi = (struct port_info *)txq->data->dev_private;
+ adap = pi->adapter;
+
+ cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
+ /* align the end of coalesce WR to a 512 byte boundary */
+ txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
+
+ if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) {
+ if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
+ if (unlikely(map_mbuf(mbuf, addr) < 0)) {
+ dev_warn(adap, "%s: mapping err for coalesce\n",
+ __func__);
+ txq->stats.mapping_err++;
+ goto out_free;
+ }
+ rte_prefetch0((volatile void *)addr);
+ return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
+ pi, addr, nb_pkts);
+ } else {
+ return -EBUSY;
+ }
+ }
+
+ if (txq->q.coalesce.idx)
+ ship_tx_pkt_coalesce_wr(adap, txq);
+
+ flits = calc_tx_flits(m, adap);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&txq->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ dev_debug(adap, "%s: Tx ring %u full; credits = %d\n",
+ __func__, txq->q.cntxt_id, credits);
+ return -EBUSY;
+ }
+
+ if (unlikely(map_mbuf(m, addr) < 0)) {
+ txq->stats.mapping_err++;
+ goto out_free;
+ }
+
+ wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+ if (Q_IDXDIFF(&txq->q, equeidx) >= 64) {
+ txq->q.equeidx = txq->q.pidx;
+ wr_mid |= F_FW_WR_EQUEQ;
+ }
+
+ wr = (void *)&txq->q.desc[txq->q.pidx];
+ vmwr = (void *)&txq->q.desc[txq->q.pidx];
+ wr->equiq_to_len16 = htonl(wr_mid);
+ if (is_pf4(adap)) {
+ wr->r3 = rte_cpu_to_be_64(0);
+ end = (u64 *)wr + flits;
+ } else {
+ const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
+ sizeof(vmwr->ethmacsrc) +
+ sizeof(vmwr->ethtype) +
+ sizeof(vmwr->vlantci));
+
+ vmwr->r3[0] = rte_cpu_to_be_32(0);
+ vmwr->r3[1] = rte_cpu_to_be_32(0);
+ memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *),
+ fw_hdr_copy_len);
+ end = (u64 *)vmwr + flits;
+ }
+
+ len = 0;
+ len += sizeof(*cpl);
+
+ /* Coalescing skipped and we send through normal path */
+ if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
+ wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
+ FW_ETH_TX_PKT_WR :
+ FW_ETH_TX_PKT_VM_WR) |
+ V_FW_WR_IMMDLEN(len));
+ if (is_pf4(adap))
+ cpl = (void *)(wr + 1);
+ else
+ cpl = (void *)(vmwr + 1);
+ if (m->ol_flags & PKT_TX_IP_CKSUM) {
+ cntrl = hwcsum(adap->params.chip, m) |
+ F_TXPKT_IPCSUM_DIS;
+ txq->stats.tx_cso++;
+ }
+ } else {
+ if (is_pf4(adap))
+ lso = (void *)(wr + 1);
+ else
+ lso = (void *)(vmwr + 1);
+ v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
+ l3hdr_len = m->l3_len;
+ l4hdr_len = m->l4_len;
+ eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
+ len += sizeof(*lso);
+ wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
+ FW_ETH_TX_PKT_WR :
+ FW_ETH_TX_PKT_VM_WR) |
+ V_FW_WR_IMMDLEN(len));
+ lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
+ F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
+ V_LSO_IPV6(v6) |
+ V_LSO_ETHHDR_LEN(eth_xtra_len / 4) |
+ V_LSO_IPHDR_LEN(l3hdr_len / 4) |
+ V_LSO_TCPHDR_LEN(l4hdr_len / 4));
+ lso->ipid_ofst = htons(0);
+ lso->mss = htons(m->tso_segsz);
+ lso->seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->len = htonl(m->pkt_len);
+ else
+ lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
+ cpl = (void *)(lso + 1);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ cntrl = V_TXPKT_ETHHDR_LEN(eth_xtra_len);
+ else
+ cntrl = V_T6_TXPKT_ETHHDR_LEN(eth_xtra_len);
+
+ cntrl |= V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 :
+ TX_CSUM_TCPIP) |
+ V_TXPKT_IPHDR_LEN(l3hdr_len);
+ txq->stats.tso++;
+ txq->stats.tx_cso += m->tso_segsz;
+ }
+
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ txq->stats.vlan_ins++;
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
+ }
+
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
+ if (is_pf4(adap))
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ else
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) |
+ V_TXPKT_PF(0));
+
+ cpl->pack = htons(0);
+ cpl->len = htons(m->pkt_len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ txq->stats.pkts++;
+ txq->stats.tx_bytes += m->pkt_len;
+ last_desc = txq->q.pidx + ndesc - 1;
+ if (last_desc >= (int)txq->q.size)
+ last_desc -= txq->q.size;
+
+ d = &txq->q.sdesc[last_desc];
+ if (d->coalesce.idx) {
+ int i;
+
+ for (i = 0; i < d->coalesce.idx; i++) {
+ rte_pktmbuf_free(d->coalesce.mbuf[i]);
+ d->coalesce.mbuf[i] = NULL;
+ }
+ d->coalesce.idx = 0;
+ }
+ write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
+ addr);
+ txq->q.sdesc[last_desc].mbuf = m;
+ txq->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
+ txq_advance(&txq->q, ndesc);
+ ring_tx_db(adap, &txq->q);
+ return 0;
+}
+
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any mbufs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+/**
+ * is_imm - check whether a packet can be sent as immediate data
+ * @mbuf: the packet
+ *
+ * Returns true if a packet can be sent as a WR with immediate data.
+ */
+static inline int is_imm(const struct rte_mbuf *mbuf)
+{
+ return mbuf->pkt_len <= MAX_CTRL_WR_LEN;
+}
+
+/**
+ * inline_tx_mbuf: inline a packet's data into TX descriptors
+ * @q: the TX queue where the packet will be inlined
+ * @from: pointer to data portion of packet
+ * @to: pointer after cpl where data has to be inlined
+ * @len: length of data to inline
+ *
+ * Inline a packet's contents directly to TX descriptors, starting at
+ * the given position within the TX DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to,
+ int len)
+{
+ int left = RTE_PTR_DIFF(q->stat, *to);
+
+ if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) {
+ rte_memcpy(*to, from, len);
+ *to = RTE_PTR_ADD(*to, len);
+ } else {
+ rte_memcpy(*to, from, left);
+ from = RTE_PTR_ADD(from, left);
+ left = len - left;
+ rte_memcpy((void *)q->desc, from, left);
+ *to = RTE_PTR_ADD((void *)q->desc, left);
+ }
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @q: the control queue
+ * @mbuf: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data.
+ */
+static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+ unsigned int ndesc;
+ struct fw_wr_hdr *wr;
+ caddr_t dst;
+
+ if (unlikely(!is_imm(mbuf))) {
+ WARN_ON(1);
+ rte_pktmbuf_free(mbuf);
+ return -1;
+ }
+
+ reclaim_completed_tx_imm(&q->q);
+ ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc));
+ t4_os_lock(&q->ctrlq_lock);
+
+ q->full = txq_avail(&q->q) < ndesc ? 1 : 0;
+ if (unlikely(q->full)) {
+ t4_os_unlock(&q->ctrlq_lock);
+ return -1;
+ }
+
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ dst = (void *)wr;
+ inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t),
+ &dst, mbuf->data_len);
+
+ txq_advance(&q->q, ndesc);
+ if (unlikely(txq_avail(&q->q) < 64))
+ wr->lo |= htonl(F_FW_WR_EQUEQ);
+
+ q->txp++;
+
+ ring_tx_db(q->adapter, &q->q);
+ t4_os_unlock(&q->ctrlq_lock);
+
+ rte_pktmbuf_free(mbuf);
+ return 0;
+}
+
+/**
+ * t4_mgmt_tx - send a management message
+ * @q: the control queue
+ * @mbuf: the packet containing the management message
+ *
+ * Send a management message through control queue.
+ */
+int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+ return ctrl_xmit(q, mbuf);
+}
+
+/**
+ * alloc_ring - allocate resources for an SGE descriptor ring
+ * @dev: the PCI device's core device
+ * @nelem: the number of descriptors
+ * @elem_size: the size of each descriptor
+ * @sw_size: the size of the SW state associated with each ring element
+ * @phys: the physical address of the allocated ring
+ * @metadata: address of the array holding the SW state for the ring
+ * @stat_size: extra space in HW ring for status information
+ * @node: preferred node for memory allocations
+ *
+ * Allocates resources for an SGE descriptor ring, such as Tx queues,
+ * free buffer lists, or response queues. Each SGE ring requires
+ * space for its HW descriptors plus, optionally, space for the SW state
+ * associated with each HW entry (the metadata). The function returns
+ * three values: the virtual address for the HW ring (the return value
+ * of the function), the bus address of the HW ring, and the address
+ * of the SW ring.
+ */
+static void *alloc_ring(size_t nelem, size_t elem_size,
+ size_t sw_size, dma_addr_t *phys, void *metadata,
+ size_t stat_size, __rte_unused uint16_t queue_id,
+ int socket_id, const char *z_name,
+ const char *z_name_sw)
+{
+ size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size;
+ const struct rte_memzone *tz;
+ void *s = NULL;
+
+ dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; "
+ "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;"
+ " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size,
+ stat_size, queue_id, socket_id, z_name, z_name_sw);
+
+ tz = rte_memzone_lookup(z_name);
+ if (tz) {
+ dev_debug(adapter, "%s: tz exists...returning existing..\n",
+ __func__);
+ goto alloc_sw_ring;
+ }
+
+ /*
+ * Allocate TX/RX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = rte_memzone_reserve_aligned(z_name, len, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, 4096);
+ if (!tz)
+ return NULL;
+
+alloc_sw_ring:
+ memset(tz->addr, 0, len);
+ if (sw_size) {
+ s = rte_zmalloc_socket(z_name_sw, nelem * sw_size,
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (!s) {
+ dev_err(adapter, "%s: failed to get sw_ring memory\n",
+ __func__);
+ return NULL;
+ }
+ }
+ if (metadata)
+ *(void **)metadata = s;
+
+ *phys = (uint64_t)tz->iova;
+ return tz->addr;
+}
+
+/**
+ * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list
+ * @gl: the gather list
+ *
+ * Builds an mbuf from the given packet gather list. Returns the mbuf or
+ * %NULL if mbuf allocation failed.
+ */
+static struct rte_mbuf *t4_pktgl_to_mbuf_usembufs(const struct pkt_gl *gl)
+{
+ /*
+ * If there's only one mbuf fragment, just return that.
+ */
+ if (likely(gl->nfrags == 1))
+ return gl->mbufs[0];
+
+ return NULL;
+}
+
+/**
+ * t4_pktgl_to_mbuf - build an mbuf from a packet gather list
+ * @gl: the gather list
+ *
+ * Builds an mbuf from the given packet gather list. Returns the mbuf or
+ * %NULL if mbuf allocation failed.
+ */
+static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl)
+{
+ return t4_pktgl_to_mbuf_usembufs(gl);
+}
+
+/**
+ * t4_ethrx_handler - process an ingress ethernet packet
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the RX_PKT message
+ * @si: the gather list of packet fragments
+ *
+ * Process an ingress ethernet packet and deliver it to the stack.
+ */
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *si)
+{
+ struct rte_mbuf *mbuf;
+ const struct cpl_rx_pkt *pkt;
+ const struct rss_header *rss_hdr;
+ bool csum_ok;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ u16 err_vec;
+
+ rss_hdr = (const void *)rsp;
+ pkt = (const void *)&rsp[1];
+ /* Compressed error vector is enabled for T6 only */
+ if (q->adapter->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(ntohs(pkt->err_vec));
+ else
+ err_vec = ntohs(pkt->err_vec);
+ csum_ok = pkt->csum_calc && !err_vec;
+
+ mbuf = t4_pktgl_to_mbuf(si);
+ if (unlikely(!mbuf)) {
+ rxq->stats.rx_drops++;
+ return 0;
+ }
+
+ mbuf->port = pkt->iff;
+ if (pkt->l2info & htonl(F_RXF_IP)) {
+ mbuf->packet_type = RTE_PTYPE_L3_IPV4;
+ if (unlikely(!csum_ok))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else if (pkt->l2info & htonl(F_RXF_IP6)) {
+ mbuf->packet_type = RTE_PTYPE_L3_IPV6;
+ }
+
+ mbuf->port = pkt->iff;
+
+ if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ mbuf->hash.rss = ntohl(rss_hdr->hash_val);
+ }
+
+ if (pkt->vlan_ex) {
+ mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->vlan_tci = ntohs(pkt->vlan);
+ }
+ rxq->stats.pkts++;
+ rxq->stats.rx_bytes += mbuf->pkt_len;
+
+ return 0;
+}
+
+#define CXGB4_MSG_AN ((void *)1)
+
+/**
+ * rspq_next - advance to the next entry in a response queue
+ * @q: the queue
+ *
+ * Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *q)
+{
+ q->cur_desc = (const __be64 *)((const char *)q->cur_desc + q->iqe_len);
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->gen ^= 1;
+ q->cur_desc = q->desc;
+ }
+}
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @q: the ingress queue to process
+ * @budget: how many responses can be processed in this round
+ * @rx_pkts: mbuf to put the pkts
+ *
+ * Process responses from an SGE response queue up to the supplied budget.
+ * Responses include received packets as well as control messages from FW
+ * or HW.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+static int process_responses(struct sge_rspq *q, int budget,
+ struct rte_mbuf **rx_pkts)
+{
+ int ret = 0, rsp_type;
+ int budget_left = budget;
+ const struct rsp_ctrl *rc;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+
+ while (likely(budget_left)) {
+ if (q->cidx == ntohs(q->stat->pidx))
+ break;
+
+ rc = (const struct rsp_ctrl *)
+ ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc)));
+
+ /*
+ * Ensure response has been read
+ */
+ rmb();
+ rsp_type = G_RSPD_TYPE(rc->u.type_gen);
+
+ if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
+ struct sge *s = &q->adapter->sge;
+ unsigned int stat_pidx;
+ int stat_pidx_diff;
+
+ stat_pidx = ntohs(q->stat->pidx);
+ stat_pidx_diff = P_IDXDIFF(q, stat_pidx);
+ while (stat_pidx_diff && budget_left) {
+ const struct rx_sw_desc *rsd =
+ &rxq->fl.sdesc[rxq->fl.cidx];
+ const struct rss_header *rss_hdr =
+ (const void *)q->cur_desc;
+ const struct cpl_rx_pkt *cpl =
+ (const void *)&q->cur_desc[1];
+ struct rte_mbuf *pkt, *npkt;
+ u32 len, bufsz;
+ bool csum_ok;
+ u16 err_vec;
+
+ rc = (const struct rsp_ctrl *)
+ ((const char *)q->cur_desc +
+ (q->iqe_len - sizeof(*rc)));
+
+ rsp_type = G_RSPD_TYPE(rc->u.type_gen);
+ if (unlikely(rsp_type != X_RSPD_TYPE_FLBUF))
+ break;
+
+ len = ntohl(rc->pldbuflen_qid);
+ BUG_ON(!(len & F_RSPD_NEWBUF));
+ pkt = rsd->buf;
+ npkt = pkt;
+ len = G_RSPD_LEN(len);
+ pkt->pkt_len = len;
+
+ /* Compressed error vector is enabled for
+ * T6 only
+ */
+ if (q->adapter->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(
+ ntohs(cpl->err_vec));
+ else
+ err_vec = ntohs(cpl->err_vec);
+ csum_ok = cpl->csum_calc && !err_vec;
+
+ /* Chain mbufs into len if necessary */
+ while (len) {
+ struct rte_mbuf *new_pkt = rsd->buf;
+
+ bufsz = min(get_buf_size(q->adapter,
+ rsd), len);
+ new_pkt->data_len = bufsz;
+ unmap_rx_buf(&rxq->fl);
+ len -= bufsz;
+ npkt->next = new_pkt;
+ npkt = new_pkt;
+ pkt->nb_segs++;
+ rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+ }
+ npkt->next = NULL;
+ pkt->nb_segs--;
+
+ if (cpl->l2info & htonl(F_RXF_IP)) {
+ pkt->packet_type = RTE_PTYPE_L3_IPV4;
+ if (unlikely(!csum_ok))
+ pkt->ol_flags |=
+ PKT_RX_IP_CKSUM_BAD;
+
+ if ((cpl->l2info &
+ htonl(F_RXF_UDP | F_RXF_TCP)) &&
+ !csum_ok)
+ pkt->ol_flags |=
+ PKT_RX_L4_CKSUM_BAD;
+ } else if (cpl->l2info & htonl(F_RXF_IP6)) {
+ pkt->packet_type = RTE_PTYPE_L3_IPV6;
+ }
+
+ if (!rss_hdr->filter_tid &&
+ rss_hdr->hash_type) {
+ pkt->ol_flags |= PKT_RX_RSS_HASH;
+ pkt->hash.rss =
+ ntohl(rss_hdr->hash_val);
+ }
+
+ if (cpl->vlan_ex) {
+ pkt->ol_flags |= PKT_RX_VLAN |
+ PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci = ntohs(cpl->vlan);
+ }
+
+ rte_pktmbuf_adj(pkt, s->pktshift);
+ rxq->stats.pkts++;
+ rxq->stats.rx_bytes += pkt->pkt_len;
+ rx_pkts[budget - budget_left] = pkt;
+
+ rspq_next(q);
+ budget_left--;
+ stat_pidx_diff--;
+ }
+ continue;
+ } else if (likely(rsp_type == X_RSPD_TYPE_CPL)) {
+ ret = q->handler(q, q->cur_desc, NULL);
+ } else {
+ ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
+ }
+
+ if (unlikely(ret)) {
+ /* couldn't process descriptor, back off for recovery */
+ q->next_intr_params = V_QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+ break;
+ }
+
+ rspq_next(q);
+ budget_left--;
+ }
+
+ /*
+ * If this is a Response Queue with an associated Free List and
+ * there's room for another chunk of new Free List buffer pointers,
+ * refill the Free List.
+ */
+
+ if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
+ __refill_fl(q->adapter, &rxq->fl);
+
+ return budget - budget_left;
+}
+
+int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
+ unsigned int budget, unsigned int *work_done)
+{
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ unsigned int cidx_inc;
+ unsigned int params;
+ u32 val;
+
+ *work_done = process_responses(q, budget, rx_pkts);
+
+ if (*work_done) {
+ cidx_inc = R_IDXDIFF(q, gts_idx);
+
+ if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
+ __refill_fl(q->adapter, &rxq->fl);
+
+ params = q->intr_params;
+ q->next_intr_params = params;
+ val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
+
+ if (unlikely(!q->bar2_addr)) {
+ u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) :
+ T4VF_SGE_BASE_ADDR +
+ A_SGE_VF_GTS;
+
+ t4_write_reg(q->adapter, reg,
+ val | V_INGRESSQID((u32)q->cntxt_id));
+ } else {
+ writel(val | V_INGRESSQID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr + SGE_UDB_GTS));
+ /* This Write memory Barrier will force the
+ * write to the User Doorbell area to be
+ * flushed.
+ */
+ wmb();
+ }
+ q->gts_idx = q->cidx;
+ }
+ return 0;
+}
+
+/**
+ * bar2_address - return the BAR2 address for an SGE Queue's Registers
+ * @adapter: the adapter
+ * @qid: the SGE Queue ID
+ * @qtype: the SGE Queue Type (Egress or Ingress)
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ * Returns the BAR2 address for the SGE Queue Registers associated with
+ * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
+ * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
+ * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
+ * Registers are supported (e.g. the Write Combining Doorbell Buffer).
+ */
+static void __iomem *bar2_address(struct adapter *adapter, unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ unsigned int *pbar2_qid)
+{
+ u64 bar2_qoffset;
+ int ret;
+
+ ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid);
+ if (ret)
+ return NULL;
+
+ return adapter->bar2 + bar2_qoffset;
+}
+
+int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq)
+{
+ struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
+ unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
+
+ return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0,
+ rq->cntxt_id, fl_id, 0xffff);
+}
+
+int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq)
+{
+ struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
+ unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
+
+ return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0,
+ rq->cntxt_id, fl_id, 0xffff);
+}
+
+/*
+ * @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
+ * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
+ */
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ struct rte_eth_dev *eth_dev, int intr_idx,
+ struct sge_fl *fl, rspq_handler_t hnd, int cong,
+ struct rte_mempool *mp, int queue_id, int socket_id)
+{
+ int ret, flsz = 0;
+ struct fw_iq_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ char z_name_sw[RTE_MEMZONE_NAMESIZE];
+ unsigned int nb_refill;
+ u8 pciechan;
+
+ /* Size needs to be multiple of 16, including status entry. */
+ iq->size = cxgbe_roundup(iq->size, 16);
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ eth_dev->device->driver->name,
+ fwevtq ? "fwq_ring" : "rx_ring",
+ eth_dev->data->port_id, queue_id);
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0,
+ queue_id, socket_id, z_name, z_name_sw);
+ if (!iq->desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+
+ if (is_pf4(adap)) {
+ pciechan = pi->tx_chan;
+ c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |
+ V_FW_IQ_CMD_VFN(0));
+ if (cong >= 0)
+ c.iqns_to_fl0congen =
+ htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+ V_FW_IQ_CMD_IQTYPE(cong ?
+ FW_IQ_IQTYPE_NIC :
+ FW_IQ_IQTYPE_OFLD) |
+ F_FW_IQ_CMD_IQRO);
+ } else {
+ pciechan = pi->port_id;
+ }
+
+ c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
+ (sizeof(c) / 16));
+ c.type_to_iqandstindex =
+ htonl(V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+ V_FW_IQ_CMD_IQASYNCH(fwevtq) |
+ V_FW_IQ_CMD_VIID(pi->viid) |
+ V_FW_IQ_CMD_IQANDST(intr_idx < 0) |
+ V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_STATUS_PAGE) |
+ V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
+ -intr_idx - 1));
+ c.iqdroprss_to_iqesize =
+ htons(V_FW_IQ_CMD_IQPCIECH(pciechan) |
+ F_FW_IQ_CMD_IQGTSMODE |
+ V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
+ V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
+ c.iqsize = htons(iq->size);
+ c.iqaddr = cpu_to_be64(iq->phys_addr);
+
+ if (fl) {
+ struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
+ fl);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ /*
+ * Allocate the ring for the hardware free list (with space
+ * for its status page) along with the associated software
+ * descriptor ring. The free list size needs to be a multiple
+ * of the Egress Queue Unit and at least 2 Egress Units larger
+ * than the SGE's Egress Congrestion Threshold
+ * (fl_starve_thres - 1).
+ */
+ if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
+ fl->size = s->fl_starve_thres - 1 + 2 * 8;
+ fl->size = cxgbe_roundup(fl->size, 8);
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ eth_dev->device->driver->name,
+ fwevtq ? "fwq_ring" : "fl_ring",
+ eth_dev->data->port_id, queue_id);
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ fl->desc = alloc_ring(fl->size, sizeof(__be64),
+ sizeof(struct rx_sw_desc),
+ &fl->addr, &fl->sdesc, s->stat_len,
+ queue_id, socket_id, z_name, z_name_sw);
+
+ if (!fl->desc)
+ goto fl_nomem;
+
+ flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
+ c.iqns_to_fl0congen |=
+ htonl(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ (unlikely(rxq->usembufs) ?
+ 0 : F_FW_IQ_CMD_FL0PACKEN) |
+ F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
+ F_FW_IQ_CMD_FL0PADEN);
+ if (is_pf4(adap) && cong >= 0)
+ c.iqns_to_fl0congen |=
+ htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
+ F_FW_IQ_CMD_FL0CONGCIF |
+ F_FW_IQ_CMD_FL0CONGEN);
+
+ /* In T6, for egress queue type FL there is internal overhead
+ * of 16B for header going into FLM module.
+ * Hence maximum allowed burst size will be 448 bytes.
+ */
+ c.fl0dcaen_to_fl0cidxfthresh =
+ htons(V_FW_IQ_CMD_FL0FBMIN(chip_ver <= CHELSIO_T5 ?
+ X_FETCHBURSTMIN_128B :
+ X_FETCHBURSTMIN_64B) |
+ V_FW_IQ_CMD_FL0FBMAX(chip_ver <= CHELSIO_T5 ?
+ X_FETCHBURSTMAX_512B :
+ X_FETCHBURSTMAX_256B));
+ c.fl0size = htons(flsz);
+ c.fl0addr = cpu_to_be64(fl->addr);
+ }
+
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
+ if (ret)
+ goto err;
+
+ iq->cur_desc = iq->desc;
+ iq->cidx = 0;
+ iq->gts_idx = 0;
+ iq->gen = 1;
+ iq->next_intr_params = iq->intr_params;
+ iq->cntxt_id = ntohs(c.iqid);
+ iq->abs_id = ntohs(c.physiqid);
+ iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS,
+ &iq->bar2_qid);
+ iq->size--; /* subtract status entry */
+ iq->stat = (void *)&iq->desc[iq->size * 8];
+ iq->eth_dev = eth_dev;
+ iq->handler = hnd;
+ iq->port_id = pi->pidx;
+ iq->mb_pool = mp;
+
+ /* set offset to -1 to distinguish ingress queues without FL */
+ iq->offset = fl ? 0 : -1;
+
+ if (fl) {
+ fl->cntxt_id = ntohs(c.fl0id);
+ fl->avail = 0;
+ fl->pend_cred = 0;
+ fl->pidx = 0;
+ fl->cidx = 0;
+ fl->alloc_failed = 0;
+
+ /*
+ * Note, we must initialize the BAR2 Free List User Doorbell
+ * information before refilling the Free List!
+ */
+ fl->bar2_addr = bar2_address(adap, fl->cntxt_id,
+ T4_BAR2_QTYPE_EGRESS,
+ &fl->bar2_qid);
+
+ nb_refill = refill_fl(adap, fl, fl_cap(fl));
+ if (nb_refill != fl_cap(fl)) {
+ ret = -ENOMEM;
+ dev_err(adap, "%s: mbuf alloc failed with error: %d\n",
+ __func__, ret);
+ goto refill_fl_err;
+ }
+ }
+
+ /*
+ * For T5 and later we attempt to set up the Congestion Manager values
+ * of the new RX Ethernet Queue. This should really be handled by
+ * firmware because it's more complex than any host driver wants to
+ * get involved with and it's different per chip and this is almost
+ * certainly wrong. Formware would be wrong as well, but it would be
+ * a lot easier to fix in one place ... For now we do something very
+ * simple (and hopefully less wrong).
+ */
+ if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {
+ u32 param, val;
+ int i;
+
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
+ V_FW_PARAMS_PARAM_YZ(iq->cntxt_id));
+ if (cong == 0) {
+ val = V_CONMCTXT_CNGTPMODE(X_CONMCTXT_CNGTPMODE_QUEUE);
+ } else {
+ val = V_CONMCTXT_CNGTPMODE(
+ X_CONMCTXT_CNGTPMODE_CHANNEL);
+ for (i = 0; i < 4; i++) {
+ if (cong & (1 << i))
+ val |= V_CONMCTXT_CNGCHMAP(1 <<
+ (i << 2));
+ }
+ }
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret)
+ dev_warn(adap->pdev_dev, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n",
+ iq->cntxt_id, -ret);
+ }
+
+ return 0;
+
+refill_fl_err:
+ t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
+ iq->cntxt_id, fl->cntxt_id, 0xffff);
+fl_nomem:
+ ret = -ENOMEM;
+err:
+ iq->cntxt_id = 0;
+ iq->abs_id = 0;
+ if (iq->desc)
+ iq->desc = NULL;
+
+ if (fl && fl->desc) {
+ rte_free(fl->sdesc);
+ fl->cntxt_id = 0;
+ fl->sdesc = NULL;
+ fl->desc = NULL;
+ }
+ return ret;
+}
+
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id,
+ unsigned int abs_id)
+{
+ q->cntxt_id = id;
+ q->abs_id = abs_id;
+ q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,
+ &q->bar2_qid);
+ q->cidx = 0;
+ q->pidx = 0;
+ q->dbidx = 0;
+ q->in_use = 0;
+ q->equeidx = 0;
+ q->coalesce.idx = 0;
+ q->coalesce.len = 0;
+ q->coalesce.flits = 0;
+ q->last_coal_idx = 0;
+ q->last_pidx = 0;
+ q->stat = (void *)&q->desc[q->size];
+}
+
+int t4_sge_eth_txq_start(struct sge_eth_txq *txq)
+{
+ /*
+ * TODO: For flow-control, queue may be stopped waiting to reclaim
+ * credits.
+ * Ensure queue is in EQ_STOPPED state before starting it.
+ */
+ if (!(txq->flags & EQ_STOPPED))
+ return -(EBUSY);
+
+ txq->flags &= ~EQ_STOPPED;
+
+ return 0;
+}
+
+int t4_sge_eth_txq_stop(struct sge_eth_txq *txq)
+{
+ txq->flags |= EQ_STOPPED;
+
+ return 0;
+}
+
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id)
+{
+ int ret, nentries;
+ struct fw_eq_eth_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ char z_name_sw[RTE_MEMZONE_NAMESIZE];
+ u8 pciechan;
+
+ /* Add status entries */
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ eth_dev->device->driver->name, "tx_ring",
+ eth_dev->data->port_id, queue_id);
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
+ sizeof(struct tx_sw_desc), &txq->q.phys_addr,
+ &txq->q.sdesc, s->stat_len, queue_id,
+ socket_id, z_name, z_name_sw);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+ if (is_pf4(adap)) {
+ pciechan = pi->tx_chan;
+ c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) |
+ V_FW_EQ_ETH_CMD_VFN(0));
+ } else {
+ pciechan = pi->port_id;
+ }
+
+ c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |
+ F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));
+ c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
+ V_FW_EQ_ETH_CMD_VIID(pi->viid));
+ c.fetchszm_to_iqid =
+ htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ V_FW_EQ_ETH_CMD_PCIECHN(pciechan) |
+ F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));
+ c.dcaen_to_eqsize =
+ htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
+ V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+ V_FW_EQ_ETH_CMD_EQSIZE(nentries));
+ c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);
+
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
+ if (ret) {
+ rte_free(txq->q.sdesc);
+ txq->q.sdesc = NULL;
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)),
+ G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd)));
+ txq->stats.tso = 0;
+ txq->stats.pkts = 0;
+ txq->stats.tx_cso = 0;
+ txq->stats.coal_wr = 0;
+ txq->stats.vlan_ins = 0;
+ txq->stats.tx_bytes = 0;
+ txq->stats.coal_pkts = 0;
+ txq->stats.mapping_err = 0;
+ txq->flags |= EQ_STOPPED;
+ txq->eth_dev = eth_dev;
+ txq->data = eth_dev->data;
+ t4_os_lock_init(&txq->txq_lock);
+ return 0;
+}
+
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id)
+{
+ int ret, nentries;
+ struct fw_eq_ctrl_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ char z_name_sw[RTE_MEMZONE_NAMESIZE];
+
+ /* Add status entries */
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ eth_dev->device->driver->name, "ctrl_tx_ring",
+ eth_dev->data->port_id, queue_id);
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
+ 0, &txq->q.phys_addr,
+ NULL, 0, queue_id,
+ socket_id, z_name, z_name_sw);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+ V_FW_EQ_CTRL_CMD_PFN(adap->pf) |
+ V_FW_EQ_CTRL_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC |
+ F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16));
+ c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0));
+ c.physeqid_pkd = htonl(0);
+ c.fetchszm_to_iqid =
+ htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+ F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid));
+ c.dcaen_to_eqsize =
+ htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
+ V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+ V_FW_EQ_CTRL_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret) {
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)),
+ G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd)));
+ txq->adapter = adap;
+ txq->full = 0;
+ return 0;
+}
+
+static void free_txq(struct sge_txq *q)
+{
+ q->cntxt_id = 0;
+ q->sdesc = NULL;
+ q->desc = NULL;
+}
+
+static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
+ struct sge_fl *fl)
+{
+ unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
+
+ t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
+ rq->cntxt_id, fl_id, 0xffff);
+ rq->cntxt_id = 0;
+ rq->abs_id = 0;
+ rq->desc = NULL;
+
+ if (fl) {
+ free_rx_bufs(fl, fl->avail);
+ rte_free(fl->sdesc);
+ fl->sdesc = NULL;
+ fl->cntxt_id = 0;
+ fl->desc = NULL;
+ }
+}
+
+/*
+ * Clear all queues of the port
+ *
+ * Note: This function must only be called after rx and tx path
+ * of the port have been disabled.
+ */
+void t4_sge_eth_clear_queues(struct port_info *pi)
+{
+ int i;
+ struct adapter *adap = pi->adapter;
+ struct sge_eth_rxq *rxq = &adap->sge.ethrxq[pi->first_qset];
+ struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
+
+ for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
+ if (rxq->rspq.desc)
+ t4_sge_eth_rxq_stop(adap, &rxq->rspq);
+ }
+ for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
+ if (txq->q.desc) {
+ struct sge_txq *q = &txq->q;
+
+ t4_sge_eth_txq_stop(txq);
+ reclaim_completed_tx(q);
+ free_tx_desc(q, q->size);
+ q->equeidx = q->pidx;
+ }
+ }
+}
+
+void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq)
+{
+ if (rxq->rspq.desc) {
+ t4_sge_eth_rxq_stop(adap, &rxq->rspq);
+ free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL);
+ }
+}
+
+void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq)
+{
+ if (txq->q.desc) {
+ t4_sge_eth_txq_stop(txq);
+ reclaim_completed_tx(&txq->q);
+ t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, txq->q.cntxt_id);
+ free_tx_desc(&txq->q, txq->q.size);
+ rte_free(txq->q.sdesc);
+ free_txq(&txq->q);
+ }
+}
+
+void t4_sge_tx_monitor_start(struct adapter *adap)
+{
+ rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
+}
+
+void t4_sge_tx_monitor_stop(struct adapter *adap)
+{
+ rte_eal_alarm_cancel(tx_timer_cb, (void *)adap);
+}
+
+/**
+ * t4_free_sge_resources - free SGE resources
+ * @adap: the adapter
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void t4_free_sge_resources(struct adapter *adap)
+{
+ unsigned int i;
+ struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
+ struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
+
+ /* clean up Ethernet Tx/Rx queues */
+ for (i = 0; i < adap->sge.max_ethqsets; i++, rxq++, txq++) {
+ /* Free only the queues allocated */
+ if (rxq->rspq.desc) {
+ t4_sge_eth_rxq_release(adap, rxq);
+ rxq->rspq.eth_dev = NULL;
+ }
+ if (txq->q.desc) {
+ t4_sge_eth_txq_release(adap, txq);
+ txq->eth_dev = NULL;
+ }
+ }
+
+ /* clean up control Tx queues */
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+ struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
+
+ if (cq->q.desc) {
+ reclaim_completed_tx_imm(&cq->q);
+ t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
+ cq->q.cntxt_id);
+ free_txq(&cq->q);
+ }
+ }
+
+ if (adap->sge.fw_evtq.desc)
+ free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+}
+
+/**
+ * t4_sge_init - initialize SGE
+ * @adap: the adapter
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queues here, instead the driver
+ * top-level must request those individually.
+ *
+ * Called in two different modes:
+ *
+ * 1. Perform actual hardware initialization and record hard-coded
+ * parameters which were used. This gets used when we're the
+ * Master PF and the Firmware Configuration File support didn't
+ * work for some reason.
+ *
+ * 2. We're not the Master PF or initialization was performed with
+ * a Firmware Configuration File. In this case we need to grab
+ * any of the SGE operating parameters that we need to have in
+ * order to do our job and make sure we can live with them ...
+ */
+static int t4_sge_init_soft(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
+ u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
+ u32 ingress_rx_threshold;
+
+ /*
+ * Verify that CPL messages are going to the Ingress Queue for
+ * process_responses() and that only packet data is going to the
+ * Free Lists.
+ */
+ if ((t4_read_reg(adap, A_SGE_CONTROL) & F_RXPKTCPLMODE) !=
+ V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+ dev_err(adap, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Validate the Host Buffer Register Array indices that we want to
+ * use ...
+ *
+ * XXX Note that we should really read through the Host Buffer Size
+ * XXX register array and find the indices of the Buffer Sizes which
+ * XXX meet our needs!
+ */
+#define READ_FL_BUF(x) \
+ t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE0 + (x) * sizeof(u32))
+
+ fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
+ fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
+ fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
+ fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
+
+ /*
+ * We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+#undef READ_FL_BUF
+
+ /*
+ * The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != CXGBE_PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
+ dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n",
+ fl_small_pg, fl_large_pg);
+ return -EINVAL;
+ }
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+
+ if (adap->use_unpacked_mode) {
+ int err = 0;
+
+ if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap)) {
+ dev_err(adap, "bad SGE FL small MTU %d\n",
+ fl_small_mtu);
+ err = -EINVAL;
+ }
+ if (fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
+ dev_err(adap, "bad SGE FL large MTU %d\n",
+ fl_large_mtu);
+ err = -EINVAL;
+ }
+ if (err)
+ return err;
+ }
+
+ /*
+ * Retrieve our RX interrupt holdoff timer values and counter
+ * threshold values from the SGE parameters.
+ */
+ timer_value_0_and_1 = t4_read_reg(adap, A_SGE_TIMER_VALUE_0_AND_1);
+ timer_value_2_and_3 = t4_read_reg(adap, A_SGE_TIMER_VALUE_2_AND_3);
+ timer_value_4_and_5 = t4_read_reg(adap, A_SGE_TIMER_VALUE_4_AND_5);
+ s->timer_val[0] = core_ticks_to_us(adap,
+ G_TIMERVALUE0(timer_value_0_and_1));
+ s->timer_val[1] = core_ticks_to_us(adap,
+ G_TIMERVALUE1(timer_value_0_and_1));
+ s->timer_val[2] = core_ticks_to_us(adap,
+ G_TIMERVALUE2(timer_value_2_and_3));
+ s->timer_val[3] = core_ticks_to_us(adap,
+ G_TIMERVALUE3(timer_value_2_and_3));
+ s->timer_val[4] = core_ticks_to_us(adap,
+ G_TIMERVALUE4(timer_value_4_and_5));
+ s->timer_val[5] = core_ticks_to_us(adap,
+ G_TIMERVALUE5(timer_value_4_and_5));
+
+ ingress_rx_threshold = t4_read_reg(adap, A_SGE_INGRESS_RX_THRESHOLD);
+ s->counter_val[0] = G_THRESHOLD_0(ingress_rx_threshold);
+ s->counter_val[1] = G_THRESHOLD_1(ingress_rx_threshold);
+ s->counter_val[2] = G_THRESHOLD_2(ingress_rx_threshold);
+ s->counter_val[3] = G_THRESHOLD_3(ingress_rx_threshold);
+
+ return 0;
+}
+
+int t4_sge_init(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ u32 sge_control, sge_conm_ctrl;
+ int ret, egress_threshold;
+
+ /*
+ * Ingress Padding Boundary and Egress Status Page Size are set up by
+ * t4_fixup_host_params().
+ */
+ sge_control = t4_read_reg(adap, A_SGE_CONTROL);
+ s->pktshift = G_PKTSHIFT(sge_control);
+ s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
+ s->fl_align = t4_fl_pkt_align(adap);
+ ret = t4_sge_init_soft(adap);
+ if (ret < 0) {
+ dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n",
+ __func__, -ret);
+ return ret;
+ }
+
+ /*
+ * A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.) For T4,
+ * there was only a single field to control this. For T5 there's the
+ * original field which now only applies to Unpacked Mode Free List
+ * buffers and a new field which only applies to Packed Mode Free List
+ * buffers.
+ */
+ sge_conm_ctrl = t4_read_reg(adap, A_SGE_CONM_CTRL);
+ if (is_t4(adap->params.chip) || adap->use_unpacked_mode)
+ egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl);
+ else
+ egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl);
+ s->fl_starve_thres = 2 * egress_threshold + 1;
+
+ return 0;
+}
+
+int t4vf_sge_init(struct adapter *adap)
+{
+ struct sge_params *sge_params = &adap->params.sge;
+ u32 sge_ingress_queues_per_page;
+ u32 sge_egress_queues_per_page;
+ u32 sge_control, sge_control2;
+ u32 fl_small_pg, fl_large_pg;
+ u32 sge_ingress_rx_threshold;
+ u32 sge_timer_value_0_and_1;
+ u32 sge_timer_value_2_and_3;
+ u32 sge_timer_value_4_and_5;
+ u32 sge_congestion_control;
+ struct sge *s = &adap->sge;
+ unsigned int s_hps, s_qpp;
+ u32 sge_host_page_size;
+ u32 params[7], vals[7];
+ int v;
+
+ /* query basic params from fw */
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));
+ params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0));
+ params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1));
+ params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));
+ params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));
+ params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));
+ v = t4vf_query_params(adap, 7, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+
+ sge_control = vals[0];
+ sge_host_page_size = vals[1];
+ fl_small_pg = vals[2];
+ fl_large_pg = vals[3];
+ sge_timer_value_0_and_1 = vals[4];
+ sge_timer_value_2_and_3 = vals[5];
+ sge_timer_value_4_and_5 = vals[6];
+
+ /*
+ * Start by vetting the basic SGE parameters which have been set up by
+ * the Physical Function Driver.
+ */
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != CXGBE_PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
+ dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+ fl_small_pg, fl_large_pg);
+ return -EINVAL;
+ }
+
+ if ((sge_control & F_RXPKTCPLMODE) !=
+ V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+ dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+
+ /* Grab ingress packing boundary from SGE_CONTROL2 for */
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));
+ v = t4vf_query_params(adap, 1, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_err(adapter, "Unable to get SGE Control2; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_control2 = vals[0];
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));
+ v = t4vf_query_params(adap, 2, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ sge_ingress_rx_threshold = vals[0];
+ sge_congestion_control = vals[1];
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));
+ v = t4vf_query_params(adap, 2, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_warn(adap, "Unable to get VF SGE Queues/Page; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_egress_queues_per_page = vals[0];
+ sge_ingress_queues_per_page = vals[1];
+
+ /*
+ * We need the Queues/Page for our VF. This is based on the
+ * PF from which we're instantiated and is indexed in the
+ * register we just read.
+ */
+ s_hps = (S_HOSTPAGESIZEPF0 +
+ (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf);
+ sge_params->hps =
+ ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0);
+
+ s_qpp = (S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf);
+ sge_params->eq_qpp =
+ ((sge_egress_queues_per_page >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+ sge_params->iq_qpp =
+ ((sge_ingress_queues_per_page >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+
+ /*
+ * Now translate the queried parameters into our internal forms.
+ */
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+ s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE)
+ ? 128 : 64);
+ s->pktshift = G_PKTSHIFT(sge_control);
+ s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2);
+
+ /*
+ * A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.)
+ */
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T5:
+ s->fl_starve_thres =
+ G_EGRTHRESHOLDPACKING(sge_congestion_control);
+ break;
+ case CHELSIO_T6:
+ default:
+ s->fl_starve_thres =
+ G_T6_EGRTHRESHOLDPACKING(sge_congestion_control);
+ break;
+ }
+ s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
+
+ /*
+ * Save RX interrupt holdoff timer values and counter
+ * threshold values from the SGE parameters.
+ */
+ s->timer_val[0] = core_ticks_to_us(adap,
+ G_TIMERVALUE0(sge_timer_value_0_and_1));
+ s->timer_val[1] = core_ticks_to_us(adap,
+ G_TIMERVALUE1(sge_timer_value_0_and_1));
+ s->timer_val[2] = core_ticks_to_us(adap,
+ G_TIMERVALUE2(sge_timer_value_2_and_3));
+ s->timer_val[3] = core_ticks_to_us(adap,
+ G_TIMERVALUE3(sge_timer_value_2_and_3));
+ s->timer_val[4] = core_ticks_to_us(adap,
+ G_TIMERVALUE4(sge_timer_value_4_and_5));
+ s->timer_val[5] = core_ticks_to_us(adap,
+ G_TIMERVALUE5(sge_timer_value_4_and_5));
+ s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold);
+ s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold);
+ s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold);
+ s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold);
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/dpaa/Makefile b/src/spdk/dpdk/drivers/net/dpaa/Makefile
new file mode 100644
index 00000000..d7a0a50c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa/Makefile
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+RTE_SDK_DPAA=$(RTE_SDK)/drivers/net/dpaa
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa.a
+
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -I$(RTE_SDK_DPAA)/
+CFLAGS += -I$(RTE_SDK_DPAA)/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/base/qbman
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include
+
+EXPORT_MAP := rte_pmd_dpaa_version.map
+
+LIBABIVER := 1
+
+# depends on dpaa bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# Interfaces with DPDK
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA_PMD)-include := rte_pmd_dpaa.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.c
new file mode 100644
index 00000000..7a950ac0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.c
@@ -0,0 +1,1500 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP
+ *
+ */
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+#include <dpaa_mempool.h>
+
+#include <dpaa_ethdev.h>
+#include <dpaa_rxtx.h>
+#include <rte_pmd_dpaa.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <fsl_fman.h>
+
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+/* Rx offloads which cannot be disabled */
+static uint64_t dev_rx_offloads_nodis =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup;
+
+/* Tx offloads which cannot be disabled */
+static uint64_t dev_tx_offloads_nodis =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_MT_LOCKFREE |
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+/* Keep track of whether QMAN and BMAN have been globally initialized */
+static int is_global_init;
+static int default_q; /* use default queue - FMC is not executed*/
+/* At present we only allow up to 4 push mode queues as default - as each of
+ * this queue need dedicated portal and we are short of portals.
+ */
+#define DPAA_MAX_PUSH_MODE_QUEUE 8
+#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4
+
+static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
+static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
+
+
+/* Per FQ Taildrop in frame count */
+static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
+
+struct rte_dpaa_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint32_t offset;
+};
+
+static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
+ {"rx_align_err",
+ offsetof(struct dpaa_if_stats, raln)},
+ {"rx_valid_pause",
+ offsetof(struct dpaa_if_stats, rxpf)},
+ {"rx_fcs_err",
+ offsetof(struct dpaa_if_stats, rfcs)},
+ {"rx_vlan_frame",
+ offsetof(struct dpaa_if_stats, rvlan)},
+ {"rx_frame_err",
+ offsetof(struct dpaa_if_stats, rerr)},
+ {"rx_drop_err",
+ offsetof(struct dpaa_if_stats, rdrp)},
+ {"rx_undersized",
+ offsetof(struct dpaa_if_stats, rund)},
+ {"rx_oversize_err",
+ offsetof(struct dpaa_if_stats, rovr)},
+ {"rx_fragment_pkt",
+ offsetof(struct dpaa_if_stats, rfrg)},
+ {"tx_valid_pause",
+ offsetof(struct dpaa_if_stats, txpf)},
+ {"tx_fcs_err",
+ offsetof(struct dpaa_if_stats, terr)},
+ {"tx_vlan_frame",
+ offsetof(struct dpaa_if_stats, tvlan)},
+ {"rx_undersized",
+ offsetof(struct dpaa_if_stats, tund)},
+};
+
+static struct rte_dpaa_driver rte_dpaa_pmd;
+
+static void
+dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
+
+static inline void
+dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
+{
+ memset(opts, 0, sizeof(struct qm_mcc_initfq));
+ opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
+ QM_FQCTRL_PREFERINCACHE;
+ opts->fqd.context_a.stashing.exclusive = 0;
+ if (dpaa_svr_family != SVR_LS1046A_FAMILY)
+ opts->fqd.context_a.stashing.annotation_cl =
+ DPAA_IF_RX_ANNOTATION_STASH;
+ opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+ opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
+}
+
+static int
+dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + VLAN_TAG_SIZE;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
+ return -EINVAL;
+ if (frame_size > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.offloads &=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ fman_if_set_maxfrm(dpaa_intf->fif, frame_size);
+
+ return 0;
+}
+
+static int
+dpaa_eth_dev_configure(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ uint64_t rx_offloads = eth_conf->rxmode.offloads;
+ uint64_t tx_offloads = eth_conf->txmode.offloads;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Rx offloads validation */
+ if (dev_rx_offloads_nodis & ~rx_offloads) {
+ DPAA_PMD_WARN(
+ "Rx offloads non configurable - requested 0x%" PRIx64
+ " ignored 0x%" PRIx64,
+ rx_offloads, dev_rx_offloads_nodis);
+ }
+
+ /* Tx offloads validation */
+ if (dev_tx_offloads_nodis & ~tx_offloads) {
+ DPAA_PMD_WARN(
+ "Tx offloads non configurable - requested 0x%" PRIx64
+ " ignored 0x%" PRIx64,
+ tx_offloads, dev_tx_offloads_nodis);
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ DPAA_MAX_RX_PKT_LEN) {
+ fman_if_set_maxfrm(dpaa_intf->fif,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static const uint32_t *
+dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /*todo -= add more types */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
+ return ptypes;
+ return NULL;
+}
+
+static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Change tx callback to the real one */
+ dev->tx_pkt_burst = dpaa_eth_queue_tx;
+ fman_if_enable_rx(dpaa_intf->fif);
+
+ return 0;
+}
+
+static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_disable_rx(dpaa_intf->fif);
+ dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
+}
+
+static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ dpaa_eth_dev_stop(dev);
+}
+
+static int
+dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
+ char *fw_version,
+ size_t fw_size)
+{
+ int ret;
+ FILE *svr_file = NULL;
+ unsigned int svr_ver = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ svr_file = fopen(DPAA_SOC_ID_FILE, "r");
+ if (!svr_file) {
+ DPAA_PMD_ERR("Unable to open SoC device");
+ return -ENOTSUP; /* Not supported on this infra */
+ }
+ if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
+ dpaa_svr_family = svr_ver & SVR_MASK;
+ else
+ DPAA_PMD_ERR("Unable to read SoC device");
+
+ fclose(svr_file);
+
+ ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
+ svr_ver, fman_ip_rev);
+ ret += 1; /* add the size of '\0' */
+
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
+ dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
+ dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
+ dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
+ dev_info->max_hash_mac_addrs = 0;
+ dev_info->max_vfs = 0;
+ dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
+ dev_info->speed_capa = (ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G);
+ dev_info->rx_offload_capa = dev_rx_offloads_sup |
+ dev_rx_offloads_nodis;
+ dev_info->tx_offload_capa = dev_tx_offloads_sup |
+ dev_tx_offloads_nodis;
+ dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
+ dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
+}
+
+static int dpaa_eth_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_link *link = &dev->data->dev_link;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpaa_intf->fif->mac_type == fman_mac_1g)
+ link->link_speed = ETH_SPEED_NUM_1G;
+ else if (dpaa_intf->fif->mac_type == fman_mac_10g)
+ link->link_speed = ETH_SPEED_NUM_10G;
+ else
+ DPAA_PMD_ERR("invalid link_speed: %s, %d",
+ dpaa_intf->name, dpaa_intf->fif->mac_type);
+
+ link->link_status = dpaa_intf->valid;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_autoneg = ETH_LINK_AUTONEG;
+ return 0;
+}
+
+static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_stats_get(dpaa_intf->fif, stats);
+ return 0;
+}
+
+static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_stats_reset(dpaa_intf->fif);
+}
+
+static int
+dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
+ uint64_t values[sizeof(struct dpaa_if_stats) / 8];
+
+ if (n < num)
+ return num;
+
+ if (xstats == NULL)
+ return 0;
+
+ fman_if_stats_get_all(dpaa_intf->fif, values,
+ sizeof(struct dpaa_if_stats) / 8);
+
+ for (i = 0; i < num; i++) {
+ xstats[i].id = i;
+ xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
+ }
+ return i;
+}
+
+static int
+dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+
+ if (limit < stat_cnt)
+ return stat_cnt;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < stat_cnt; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s",
+ dpaa_xstats_strings[i].name);
+
+ return stat_cnt;
+}
+
+static int
+dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+ uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
+
+ if (!ids) {
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ if (n < stat_cnt)
+ return stat_cnt;
+
+ if (!values)
+ return 0;
+
+ fman_if_stats_get_all(dpaa_intf->fif, values_copy,
+ sizeof(struct dpaa_if_stats) / 8);
+
+ for (i = 0; i < stat_cnt; i++)
+ values[i] =
+ values_copy[dpaa_xstats_strings[i].offset / 8];
+
+ return stat_cnt;
+ }
+
+ dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= stat_cnt) {
+ DPAA_PMD_ERR("id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return n;
+}
+
+static int
+dpaa_xstats_get_names_by_id(
+ struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+ struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
+
+ if (!ids)
+ return dpaa_xstats_get_names(dev, xstats_names, limit);
+
+ dpaa_xstats_get_names(dev, xstats_names_copy, limit);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= stat_cnt) {
+ DPAA_PMD_ERR("id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
+ }
+ return limit;
+}
+
+static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_promiscuous_enable(dpaa_intf->fif);
+}
+
+static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_promiscuous_disable(dpaa_intf->fif);
+}
+
+static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_set_mcast_filter_table(dpaa_intf->fif);
+}
+
+static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_reset_mcast_filter_table(dpaa_intf->fif);
+}
+
+static
+int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
+ struct qm_mcc_initfq opts = {0};
+ u32 flags = 0;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue_idx >= dev->data->nb_rx_queues) {
+ rte_errno = EOVERFLOW;
+ DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, queue_idx, dev->data->nb_rx_queues);
+ return -rte_errno;
+ }
+
+ DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
+ queue_idx, rxq->fqid);
+
+ if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
+ struct fman_if_ic_params icp;
+ uint32_t fd_offset;
+ uint32_t bp_size;
+
+ if (!mp->pool_data) {
+ DPAA_PMD_ERR("Not an offloaded buffer pool!");
+ return -1;
+ }
+ dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+ memset(&icp, 0, sizeof(icp));
+ /* set ICEOF for to the default value , which is 0*/
+ icp.iciof = DEFAULT_ICIOF;
+ icp.iceof = DEFAULT_RX_ICEOF;
+ icp.icsz = DEFAULT_ICSZ;
+ fman_if_set_ic_params(dpaa_intf->fif, &icp);
+
+ fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
+ fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
+
+ /* Buffer pool size should be equal to Dataroom Size*/
+ bp_size = rte_pktmbuf_data_room_size(mp);
+ fman_if_set_bp(dpaa_intf->fif, mp->size,
+ dpaa_intf->bp_info->bpid, bp_size);
+ dpaa_intf->valid = 1;
+ DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
+ dpaa_intf->name, fd_offset,
+ fman_if_get_fdoff(dpaa_intf->fif));
+ }
+ /* checking if push mode only, no error check for now */
+ if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
+ dpaa_push_queue_idx++;
+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
+ QM_FQCTRL_CTXASTASHING |
+ QM_FQCTRL_PREFERINCACHE;
+ opts.fqd.context_a.stashing.exclusive = 0;
+ /* In muticore scenario stashing becomes a bottleneck on LS1046.
+ * So do not enable stashing in this case
+ */
+ if (dpaa_svr_family != SVR_LS1046A_FAMILY)
+ opts.fqd.context_a.stashing.annotation_cl =
+ DPAA_IF_RX_ANNOTATION_STASH;
+ opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+ opts.fqd.context_a.stashing.context_cl =
+ DPAA_IF_RX_CONTEXT_STASH;
+
+ /*Create a channel and associate given queue with the channel*/
+ qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
+ opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
+ opts.fqd.dest.channel = rxq->ch_id;
+ opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
+ flags = QMAN_INITFQ_FLAG_SCHED;
+
+ /* Configure tail drop */
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret) {
+ DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
+ "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
+ return ret;
+ }
+ rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
+ rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
+ rxq->is_static = true;
+ }
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ /* configure the CGR size as per the desc size */
+ if (dpaa_intf->cgr_rx) {
+ struct qm_mcc_initcgr cgr_opts = {0};
+
+ /* Enable tail drop with cgr on this queue */
+ qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
+ ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
+ if (ret) {
+ DPAA_PMD_WARN(
+ "rx taildrop modify fail on fqid %d (ret=%d)",
+ rxq->fqid, ret);
+ }
+ }
+
+ return 0;
+}
+
+int
+dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
+ u16 ch_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ int ret;
+ u32 flags = 0;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
+ struct qm_mcc_initfq opts = {0};
+
+ if (dpaa_push_mode_max_queue)
+ DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n"
+ "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
+ dpaa_push_mode_max_queue);
+
+ dpaa_poll_queue_default_config(&opts);
+
+ switch (queue_conf->ev.sched_type) {
+ case RTE_SCHED_TYPE_ATOMIC:
+ opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+ /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
+ * configuration with HOLD_ACTIVE setting
+ */
+ opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
+ rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
+ break;
+ case RTE_SCHED_TYPE_ORDERED:
+ DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
+ return -1;
+ default:
+ opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
+ rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
+ break;
+ }
+
+ opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
+ opts.fqd.dest.channel = ch_id;
+ opts.fqd.dest.wq = queue_conf->ev.priority;
+
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+
+ flags = QMAN_INITFQ_FLAG_SCHED;
+
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret) {
+ DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
+ "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
+ return ret;
+ }
+
+ /* copy configuration which needs to be filled during dequeue */
+ memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
+ dev->data->rx_queues[eth_rx_queue_id] = rxq;
+
+ return ret;
+}
+
+int
+dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id)
+{
+ struct qm_mcc_initfq opts;
+ int ret;
+ u32 flags = 0;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
+
+ dpaa_poll_queue_default_config(&opts);
+
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret) {
+ DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
+ rxq->fqid, ret);
+ }
+
+ rxq->cb.dqrr_dpdk_cb = NULL;
+ dev->data->rx_queues[eth_rx_queue_id] = NULL;
+
+ return 0;
+}
+
+static
+void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static
+int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue_idx >= dev->data->nb_tx_queues) {
+ rte_errno = EOVERFLOW;
+ DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, queue_idx, dev->data->nb_tx_queues);
+ return -rte_errno;
+ }
+
+ DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
+ queue_idx, dpaa_intf->tx_queues[queue_idx].fqid);
+ dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
+ return 0;
+}
+
+static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static uint32_t
+dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
+ u32 frm_cnt = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
+ RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
+ rx_queue_id, frm_cnt);
+ }
+ return frm_cnt;
+}
+
+static int dpaa_link_down(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ dpaa_eth_dev_stop(dev);
+ return 0;
+}
+
+static int dpaa_link_up(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ dpaa_eth_dev_start(dev);
+ return 0;
+}
+
+static int
+dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_fc_conf *net_fc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!(dpaa_intf->fc_conf)) {
+ dpaa_intf->fc_conf = rte_zmalloc(NULL,
+ sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
+ if (!dpaa_intf->fc_conf) {
+ DPAA_PMD_ERR("unable to save flow control info");
+ return -ENOMEM;
+ }
+ }
+ net_fc = dpaa_intf->fc_conf;
+
+ if (fc_conf->high_water < fc_conf->low_water) {
+ DPAA_PMD_ERR("Incorrect Flow Control Configuration");
+ return -EINVAL;
+ }
+
+ if (fc_conf->mode == RTE_FC_NONE) {
+ return 0;
+ } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
+ fc_conf->mode == RTE_FC_FULL) {
+ fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water,
+ fc_conf->low_water,
+ dpaa_intf->bp_info->bpid);
+ if (fc_conf->pause_time)
+ fman_if_set_fc_quanta(dpaa_intf->fif,
+ fc_conf->pause_time);
+ }
+
+ /* Save the information in dpaa device */
+ net_fc->pause_time = fc_conf->pause_time;
+ net_fc->high_water = fc_conf->high_water;
+ net_fc->low_water = fc_conf->low_water;
+ net_fc->send_xon = fc_conf->send_xon;
+ net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
+ net_fc->mode = fc_conf->mode;
+ net_fc->autoneg = fc_conf->autoneg;
+
+ return 0;
+}
+
+static int
+dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (net_fc) {
+ fc_conf->pause_time = net_fc->pause_time;
+ fc_conf->high_water = net_fc->high_water;
+ fc_conf->low_water = net_fc->low_water;
+ fc_conf->send_xon = net_fc->send_xon;
+ fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
+ fc_conf->mode = net_fc->mode;
+ fc_conf->autoneg = net_fc->autoneg;
+ return 0;
+ }
+ ret = fman_if_get_fc_threshold(dpaa_intf->fif);
+ if (ret) {
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
+ } else {
+ fc_conf->mode = RTE_FC_NONE;
+ }
+
+ return 0;
+}
+
+static int
+dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ int ret;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index);
+
+ if (ret)
+ RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
+ " err = %d", ret);
+ return 0;
+}
+
+static void
+dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
+ uint32_t index)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_clear_mac_addr(dpaa_intf->fif, index);
+}
+
+static int
+dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ int ret;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
+ if (ret)
+ RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
+
+ return ret;
+}
+
+static struct eth_dev_ops dpaa_devops = {
+ .dev_configure = dpaa_eth_dev_configure,
+ .dev_start = dpaa_eth_dev_start,
+ .dev_stop = dpaa_eth_dev_stop,
+ .dev_close = dpaa_eth_dev_close,
+ .dev_infos_get = dpaa_eth_dev_info,
+ .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
+
+ .rx_queue_setup = dpaa_eth_rx_queue_setup,
+ .tx_queue_setup = dpaa_eth_tx_queue_setup,
+ .rx_queue_release = dpaa_eth_rx_queue_release,
+ .tx_queue_release = dpaa_eth_tx_queue_release,
+ .rx_queue_count = dpaa_dev_rx_queue_count,
+
+ .flow_ctrl_get = dpaa_flow_ctrl_get,
+ .flow_ctrl_set = dpaa_flow_ctrl_set,
+
+ .link_update = dpaa_eth_link_update,
+ .stats_get = dpaa_eth_stats_get,
+ .xstats_get = dpaa_dev_xstats_get,
+ .xstats_get_by_id = dpaa_xstats_get_by_id,
+ .xstats_get_names_by_id = dpaa_xstats_get_names_by_id,
+ .xstats_get_names = dpaa_xstats_get_names,
+ .xstats_reset = dpaa_eth_stats_reset,
+ .stats_reset = dpaa_eth_stats_reset,
+ .promiscuous_enable = dpaa_eth_promiscuous_enable,
+ .promiscuous_disable = dpaa_eth_promiscuous_disable,
+ .allmulticast_enable = dpaa_eth_multicast_enable,
+ .allmulticast_disable = dpaa_eth_multicast_disable,
+ .mtu_set = dpaa_mtu_set,
+ .dev_set_link_down = dpaa_link_down,
+ .dev_set_link_up = dpaa_link_up,
+ .mac_addr_add = dpaa_dev_add_mac_addr,
+ .mac_addr_remove = dpaa_dev_remove_mac_addr,
+ .mac_addr_set = dpaa_dev_set_mac_addr,
+
+ .fw_version_get = dpaa_fw_version_get,
+};
+
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
+{
+ if (strcmp(dev->device->driver->name,
+ drv->driver.name))
+ return false;
+
+ return true;
+}
+
+static bool
+is_dpaa_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_dpaa_pmd);
+}
+
+int
+rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct dpaa_if *dpaa_intf;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_dpaa_supported(dev))
+ return -ENOTSUP;
+
+ dpaa_intf = dev->data->dev_private;
+
+ if (on)
+ fman_if_loopback_enable(dpaa_intf->fif);
+ else
+ fman_if_loopback_disable(dpaa_intf->fif);
+
+ return 0;
+}
+
+static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
+{
+ struct rte_eth_fc_conf *fc_conf;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!(dpaa_intf->fc_conf)) {
+ dpaa_intf->fc_conf = rte_zmalloc(NULL,
+ sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
+ if (!dpaa_intf->fc_conf) {
+ DPAA_PMD_ERR("unable to save flow control info");
+ return -ENOMEM;
+ }
+ }
+ fc_conf = dpaa_intf->fc_conf;
+ ret = fman_if_get_fc_threshold(dpaa_intf->fif);
+ if (ret) {
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
+ } else {
+ fc_conf->mode = RTE_FC_NONE;
+ }
+
+ return 0;
+}
+
+/* Initialise an Rx FQ */
+static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
+ uint32_t fqid)
+{
+ struct qm_mcc_initfq opts = {0};
+ int ret;
+ u32 flags = 0;
+ struct qm_mcc_initcgr cgr_opts = {
+ .we_mask = QM_CGR_WE_CS_THRES |
+ QM_CGR_WE_CSTD_EN |
+ QM_CGR_WE_MODE,
+ .cgr = {
+ .cstd_en = QM_CGR_EN,
+ .mode = QMAN_CGR_MODE_FRAME
+ }
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = qman_reserve_fqid(fqid);
+ if (ret) {
+ DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
+ fqid, ret);
+ return -EINVAL;
+ }
+
+ DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
+ ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+ if (ret) {
+ DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
+ fqid, ret);
+ return ret;
+ }
+ fq->is_static = false;
+
+ dpaa_poll_queue_default_config(&opts);
+
+ if (cgr_rx) {
+ /* Enable tail drop with cgr on this queue */
+ qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
+ cgr_rx->cb = NULL;
+ ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
+ &cgr_opts);
+ if (ret) {
+ DPAA_PMD_WARN(
+ "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
+ fqid, ret);
+ goto without_cgr;
+ }
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = cgr_rx->cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+without_cgr:
+ ret = qman_init_fq(fq, flags, &opts);
+ if (ret)
+ DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
+ return ret;
+}
+
+/* Initialise a Tx FQ */
+static int dpaa_tx_queue_init(struct qman_fq *fq,
+ struct fman_if *fman_intf)
+{
+ struct qm_mcc_initfq opts = {0};
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
+ QMAN_FQ_FLAG_TO_DCPORTAL, fq);
+ if (ret) {
+ DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
+ return ret;
+ }
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.dest.channel = fman_intf->tx_channel_id;
+ opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
+ opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+ opts.fqd.context_b = 0;
+ /* no tx-confirmation */
+ opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
+ opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+ DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
+ ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+ if (ret)
+ DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
+ return ret;
+}
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+/* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
+static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
+{
+ struct qm_mcc_initfq opts = {0};
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = qman_reserve_fqid(fqid);
+ if (ret) {
+ DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
+ fqid, ret);
+ return -EINVAL;
+ }
+ /* "map" this Rx FQ to one of the interfaces Tx FQID */
+ DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
+ ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+ if (ret) {
+ DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
+ fqid, ret);
+ return ret;
+ }
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
+ opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
+ ret = qman_init_fq(fq, 0, &opts);
+ if (ret)
+ DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
+ fqid, ret);
+ return ret;
+}
+#endif
+
+/* Initialise a network interface */
+static int
+dpaa_dev_init(struct rte_eth_dev *eth_dev)
+{
+ int num_cores, num_rx_fqs, fqid;
+ int loop, ret = 0;
+ int dev_id;
+ struct rte_dpaa_device *dpaa_device;
+ struct dpaa_if *dpaa_intf;
+ struct fm_eth_port_cfg *cfg;
+ struct fman_if *fman_intf;
+ struct fman_if_bpool *bp, *tmp_bp;
+ uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
+ dev_id = dpaa_device->id.dev_id;
+ dpaa_intf = eth_dev->data->dev_private;
+ cfg = &dpaa_netcfg->port_cfg[dev_id];
+ fman_intf = cfg->fman_if;
+
+ dpaa_intf->name = dpaa_device->name;
+
+ /* save fman_if & cfg in the interface struture */
+ dpaa_intf->fif = fman_intf;
+ dpaa_intf->ifid = dev_id;
+ dpaa_intf->cfg = cfg;
+
+ /* Initialize Rx FQ's */
+ if (default_q) {
+ num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
+ } else {
+ if (getenv("DPAA_NUM_RX_QUEUES"))
+ num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
+ else
+ num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
+ }
+
+
+ /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
+ * queues.
+ */
+ if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
+ DPAA_PMD_ERR("Invalid number of RX queues\n");
+ return -EINVAL;
+ }
+
+ dpaa_intf->rx_queues = rte_zmalloc(NULL,
+ sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
+ if (!dpaa_intf->rx_queues) {
+ DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
+ return -ENOMEM;
+ }
+
+ /* If congestion control is enabled globally*/
+ if (td_threshold) {
+ dpaa_intf->cgr_rx = rte_zmalloc(NULL,
+ sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
+ if (!dpaa_intf->cgr_rx) {
+ DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
+ ret = -ENOMEM;
+ goto free_rx;
+ }
+
+ ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
+ if (ret != num_rx_fqs) {
+ DPAA_PMD_WARN("insufficient CGRIDs available");
+ ret = -EINVAL;
+ goto free_rx;
+ }
+ } else {
+ dpaa_intf->cgr_rx = NULL;
+ }
+
+ for (loop = 0; loop < num_rx_fqs; loop++) {
+ if (default_q)
+ fqid = cfg->rx_def;
+ else
+ fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
+ DPAA_PCD_FQID_MULTIPLIER + loop;
+
+ if (dpaa_intf->cgr_rx)
+ dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
+
+ ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
+ dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
+ fqid);
+ if (ret)
+ goto free_rx;
+ dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
+ }
+ dpaa_intf->nb_rx_queues = num_rx_fqs;
+
+ /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
+ num_cores = rte_lcore_count();
+ dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
+ num_cores, MAX_CACHELINE);
+ if (!dpaa_intf->tx_queues) {
+ DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
+ ret = -ENOMEM;
+ goto free_rx;
+ }
+
+ for (loop = 0; loop < num_cores; loop++) {
+ ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
+ fman_intf);
+ if (ret)
+ goto free_tx;
+ dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
+ }
+ dpaa_intf->nb_tx_queues = num_cores;
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+ dpaa_debug_queue_init(&dpaa_intf->debug_queues[
+ DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
+ dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
+ dpaa_debug_queue_init(&dpaa_intf->debug_queues[
+ DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
+ dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
+#endif
+
+ DPAA_PMD_DEBUG("All frame queues created");
+
+ /* Get the initial configuration for flow control */
+ dpaa_fc_set_default(dpaa_intf);
+
+ /* reset bpool list, initialize bpool dynamically */
+ list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
+ list_del(&bp->node);
+ free(bp);
+ }
+
+ /* Populate ethdev structure */
+ eth_dev->dev_ops = &dpaa_devops;
+ eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
+ eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
+ ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
+ "store MAC addresses",
+ ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
+ ret = -ENOMEM;
+ goto free_tx;
+ }
+
+ /* copy the primary mac address */
+ ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
+
+ RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dpaa_device->name,
+ fman_intf->mac_addr.addr_bytes[0],
+ fman_intf->mac_addr.addr_bytes[1],
+ fman_intf->mac_addr.addr_bytes[2],
+ fman_intf->mac_addr.addr_bytes[3],
+ fman_intf->mac_addr.addr_bytes[4],
+ fman_intf->mac_addr.addr_bytes[5]);
+
+ /* Disable RX mode */
+ fman_if_discard_rx_errors(fman_intf);
+ fman_if_disable_rx(fman_intf);
+ /* Disable promiscuous mode */
+ fman_if_promiscuous_disable(fman_intf);
+ /* Disable multicast */
+ fman_if_reset_mcast_filter_table(fman_intf);
+ /* Reset interface statistics */
+ fman_if_stats_reset(fman_intf);
+
+ return 0;
+
+free_tx:
+ rte_free(dpaa_intf->tx_queues);
+ dpaa_intf->tx_queues = NULL;
+ dpaa_intf->nb_tx_queues = 0;
+
+free_rx:
+ rte_free(dpaa_intf->cgr_rx);
+ rte_free(dpaa_intf->rx_queues);
+ dpaa_intf->rx_queues = NULL;
+ dpaa_intf->nb_rx_queues = 0;
+ return ret;
+}
+
+static int
+dpaa_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ int loop;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (!dpaa_intf) {
+ DPAA_PMD_WARN("Already closed or not started");
+ return -1;
+ }
+
+ dpaa_eth_dev_close(dev);
+
+ /* release configuration memory */
+ if (dpaa_intf->fc_conf)
+ rte_free(dpaa_intf->fc_conf);
+
+ /* Release RX congestion Groups */
+ if (dpaa_intf->cgr_rx) {
+ for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
+ qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
+
+ qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid,
+ dpaa_intf->nb_rx_queues);
+ }
+
+ rte_free(dpaa_intf->cgr_rx);
+ dpaa_intf->cgr_rx = NULL;
+
+ rte_free(dpaa_intf->rx_queues);
+ dpaa_intf->rx_queues = NULL;
+
+ rte_free(dpaa_intf->tx_queues);
+ dpaa_intf->tx_queues = NULL;
+
+ /* free memory for storing MAC addresses */
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ return 0;
+}
+
+static int
+rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
+ struct rte_dpaa_device *dpaa_dev)
+{
+ int diag;
+ int ret;
+ struct rte_eth_dev *eth_dev;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* In case of secondary process, the device is already configured
+ * and no further action is required, except portal initialization
+ * and verifying secondary attachment to port name.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
+ if (!eth_dev)
+ return -ENOMEM;
+ eth_dev->device = &dpaa_dev->device;
+ eth_dev->dev_ops = &dpaa_devops;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ if (!is_global_init) {
+ /* One time load of Qman/Bman drivers */
+ ret = qman_global_init();
+ if (ret) {
+ DPAA_PMD_ERR("QMAN initialization failed: %d",
+ ret);
+ return ret;
+ }
+ ret = bman_global_init();
+ if (ret) {
+ DPAA_PMD_ERR("BMAN initialization failed: %d",
+ ret);
+ return ret;
+ }
+
+ if (access("/tmp/fmc.bin", F_OK) == -1) {
+ RTE_LOG(INFO, PMD,
+ "* FMC not configured.Enabling default mode\n");
+ default_q = 1;
+ }
+
+ /* disabling the default push mode for LS1043 */
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY)
+ dpaa_push_mode_max_queue = 0;
+
+ /* if push mode queues to be enabled. Currenly we are allowing
+ * only one queue per thread.
+ */
+ if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
+ dpaa_push_mode_max_queue =
+ atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
+ if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
+ dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+ }
+
+ is_global_init = 1;
+ }
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)1);
+ if (ret) {
+ DPAA_PMD_ERR("Unable to initialize portal");
+ return ret;
+ }
+ }
+
+ eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ eth_dev->data->dev_private = rte_zmalloc(
+ "ethdev private structure",
+ sizeof(struct dpaa_if),
+ RTE_CACHE_LINE_SIZE);
+ if (!eth_dev->data->dev_private) {
+ DPAA_PMD_ERR("Cannot allocate memzone for port data");
+ rte_eth_dev_release_port(eth_dev);
+ return -ENOMEM;
+ }
+
+ eth_dev->device = &dpaa_dev->device;
+ eth_dev->device->driver = &dpaa_drv->driver;
+ dpaa_dev->eth_dev = eth_dev;
+
+ /* Invoke PMD device initialization function */
+ diag = dpaa_dev_init(eth_dev);
+ if (diag == 0) {
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+
+ rte_eth_dev_release_port(eth_dev);
+ return diag;
+}
+
+static int
+rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
+{
+ struct rte_eth_dev *eth_dev;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev = dpaa_dev->eth_dev;
+ dpaa_dev_uninit(eth_dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_dpaa_driver rte_dpaa_pmd = {
+ .drv_type = FSL_DPAA_ETH,
+ .probe = rte_dpaa_probe,
+ .remove = rte_dpaa_remove,
+};
+
+RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
diff --git a/src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.h b/src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.h
new file mode 100644
index 00000000..c79b9f86
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP
+ *
+ */
+#ifndef __DPAA_ETHDEV_H__
+#define __DPAA_ETHDEV_H__
+
+/* System headers */
+#include <stdbool.h>
+#include <rte_ethdev_driver.h>
+#include <rte_event_eth_rx_adapter.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+#define DPAA_MBUF_HW_ANNOTATION 64
+#define DPAA_FD_PTA_SIZE 64
+
+#if (DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM
+#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
+#endif
+
+/* mbuf->seqn will be used to store event entry index for
+ * driver specific usage. For parallel mode queues, invalid
+ * index will be set and for atomic mode queues, valid value
+ * ranging from 1 to 16.
+ */
+#define DPAA_INVALID_MBUF_SEQN 0
+
+/* we will re-use the HEADROOM for annotation in RX */
+#define DPAA_HW_BUF_RESERVE 0
+#define DPAA_PACKET_LAYOUT_ALIGN 64
+
+/* Alignment to use for cpu-local structs to avoid coherency problems. */
+#define MAX_CACHELINE 64
+
+#define DPAA_MIN_RX_BUF_SIZE 512
+#define DPAA_MAX_RX_PKT_LEN 10240
+
+/* RX queue tail drop threshold (CGR Based) in frame count */
+#define CGR_RX_PERFQ_THRESH 256
+
+/*max mac filter for memac(8) including primary mac addr*/
+#define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1)
+
+/*Maximum number of slots available in TX ring*/
+#define DPAA_TX_BURST_SIZE 7
+
+/* Optimal burst size for RX and TX as default */
+#define DPAA_DEF_RX_BURST_SIZE 7
+#define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE
+
+#ifndef VLAN_TAG_SIZE
+#define VLAN_TAG_SIZE 4 /** < Vlan Header Length */
+#endif
+
+/* PCD frame queues */
+#define DPAA_PCD_FQID_START 0x400
+#define DPAA_PCD_FQID_MULTIPLIER 0x100
+#define DPAA_DEFAULT_NUM_PCD_QUEUES 1
+#define DPAA_MAX_NUM_PCD_QUEUES 32
+
+#define DPAA_IF_TX_PRIORITY 3
+#define DPAA_IF_RX_PRIORITY 0
+#define DPAA_IF_DEBUG_PRIORITY 7
+
+#define DPAA_IF_RX_ANNOTATION_STASH 1
+#define DPAA_IF_RX_DATA_STASH 1
+#define DPAA_IF_RX_CONTEXT_STASH 0
+
+/* Each "debug" FQ is represented by one of these */
+#define DPAA_DEBUG_FQ_RX_ERROR 0
+#define DPAA_DEBUG_FQ_TX_ERROR 1
+
+#define DPAA_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IP | \
+ ETH_RSS_UDP | \
+ ETH_RSS_TCP | \
+ ETH_RSS_SCTP)
+
+#define DPAA_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_CKSUM | \
+ PKT_TX_UDP_CKSUM)
+
+/* DPAA Frame descriptor macros */
+
+#define DPAA_FD_CMD_FCO 0x80000000
+/**< Frame queue Context Override */
+#define DPAA_FD_CMD_RPD 0x40000000
+/**< Read Prepended Data */
+#define DPAA_FD_CMD_UPD 0x20000000
+/**< Update Prepended Data */
+#define DPAA_FD_CMD_DTC 0x10000000
+/**< Do IP/TCP/UDP Checksum */
+#define DPAA_FD_CMD_DCL4C 0x10000000
+/**< Didn't calculate L4 Checksum */
+#define DPAA_FD_CMD_CFQ 0x00ffffff
+/**< Confirmation Frame Queue */
+
+/* Each network interface is represented by one of these */
+struct dpaa_if {
+ int valid;
+ char *name;
+ const struct fm_eth_port_cfg *cfg;
+ struct qman_fq *rx_queues;
+ struct qman_cgr *cgr_rx;
+ struct qman_fq *tx_queues;
+ struct qman_fq debug_queues[2];
+ uint16_t nb_rx_queues;
+ uint16_t nb_tx_queues;
+ uint32_t ifid;
+ struct fman_if *fif;
+ struct dpaa_bp_info *bp_info;
+ struct rte_eth_fc_conf *fc_conf;
+};
+
+struct dpaa_if_stats {
+ /* Rx Statistics Counter */
+ uint64_t reoct; /**<Rx Eth Octets Counter */
+ uint64_t roct; /**<Rx Octet Counters */
+ uint64_t raln; /**<Rx Alignment Error Counter */
+ uint64_t rxpf; /**<Rx valid Pause Frame */
+ uint64_t rfrm; /**<Rx Frame counter */
+ uint64_t rfcs; /**<Rx frame check seq error */
+ uint64_t rvlan; /**<Rx Vlan Frame Counter */
+ uint64_t rerr; /**<Rx Frame error */
+ uint64_t ruca; /**<Rx Unicast */
+ uint64_t rmca; /**<Rx Multicast */
+ uint64_t rbca; /**<Rx Broadcast */
+ uint64_t rdrp; /**<Rx Dropped Packet */
+ uint64_t rpkt; /**<Rx packet */
+ uint64_t rund; /**<Rx undersized packets */
+ uint32_t res_x[14];
+ uint64_t rovr; /**<Rx oversized but good */
+ uint64_t rjbr; /**<Rx oversized with bad csum */
+ uint64_t rfrg; /**<Rx fragment Packet */
+ uint64_t rcnp; /**<Rx control packets (0x8808 */
+ uint64_t rdrntp; /**<Rx dropped due to FIFO overflow */
+ uint32_t res01d0[12];
+ /* Tx Statistics Counter */
+ uint64_t teoct; /**<Tx eth octets */
+ uint64_t toct; /**<Tx Octets */
+ uint32_t res0210[2];
+ uint64_t txpf; /**<Tx valid pause frame */
+ uint64_t tfrm; /**<Tx frame counter */
+ uint64_t tfcs; /**<Tx FCS error */
+ uint64_t tvlan; /**<Tx Vlan Frame */
+ uint64_t terr; /**<Tx frame error */
+ uint64_t tuca; /**<Tx Unicast */
+ uint64_t tmca; /**<Tx Multicast */
+ uint64_t tbca; /**<Tx Broadcast */
+ uint32_t res0258[2];
+ uint64_t tpkt; /**<Tx Packet */
+ uint64_t tund; /**<Tx Undersized */
+};
+
+int
+dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
+ u16 ch_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+
+int
+dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id);
+
+enum qman_cb_dqrr_result
+dpaa_rx_cb_parallel(void *event,
+ struct qman_portal *qm __always_unused,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bufs);
+enum qman_cb_dqrr_result
+dpaa_rx_cb_atomic(void *event,
+ struct qman_portal *qm __always_unused,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bufs);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.c b/src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.c
new file mode 100644
index 00000000..168b77e4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.c
@@ -0,0 +1,960 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP
+ *
+ */
+
+/* System headers */
+#include <inttypes.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <limits.h>
+#include <sched.h>
+#include <pthread.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_net.h>
+#include <rte_eventdev.h>
+
+#include "dpaa_ethdev.h"
+#include "dpaa_rxtx.h"
+#include <rte_dpaa_bus.h>
+#include <dpaa_mempool.h>
+
+#include <qman.h>
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+#define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
+ do { \
+ (_fd)->cmd = 0; \
+ (_fd)->opaque_addr = 0; \
+ (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
+ (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
+ (_fd)->opaque |= (_mbuf)->pkt_len; \
+ (_fd)->addr = (_mbuf)->buf_iova; \
+ (_fd)->bpid = _bpid; \
+ } while (0)
+
+#if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER)
+static void dpaa_display_frame(const struct qm_fd *fd)
+{
+ int ii;
+ char *ptr;
+
+ printf("%s::bpid %x addr %08x%08x, format %d off %d, len %d stat %x\n",
+ __func__, fd->bpid, fd->addr_hi, fd->addr_lo, fd->format,
+ fd->offset, fd->length20, fd->status);
+
+ ptr = (char *)rte_dpaa_mem_ptov(fd->addr);
+ ptr += fd->offset;
+ printf("%02x ", *ptr);
+ for (ii = 1; ii < fd->length20; ii++) {
+ printf("%02x ", *ptr);
+ if ((ii % 16) == 0)
+ printf("\n");
+ ptr++;
+ }
+ printf("\n");
+}
+#else
+#define dpaa_display_frame(a)
+#endif
+
+static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
+ uint64_t prs __rte_unused)
+{
+ DPAA_DP_LOG(DEBUG, "Slow parsing");
+ /*TBD:XXX: to be implemented*/
+}
+
+static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr)
+{
+ struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
+ uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK;
+
+ DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
+
+ switch (prs) {
+ case DPAA_PKT_TYPE_IPV4:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4;
+ break;
+ case DPAA_PKT_TYPE_IPV6:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6;
+ break;
+ case DPAA_PKT_TYPE_ETHER:
+ m->packet_type = RTE_PTYPE_L2_ETHER;
+ break;
+ case DPAA_PKT_TYPE_IPV4_FRAG:
+ case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
+ case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
+ case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
+ break;
+ case DPAA_PKT_TYPE_IPV6_FRAG:
+ case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
+ case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
+ case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
+ break;
+ case DPAA_PKT_TYPE_IPV4_EXT:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT;
+ break;
+ case DPAA_PKT_TYPE_IPV6_EXT:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT;
+ break;
+ case DPAA_PKT_TYPE_IPV4_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA_PKT_TYPE_IPV4_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA_PKT_TYPE_IPV4_EXT_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_EXT_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA_PKT_TYPE_IPV4_EXT_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_EXT_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA_PKT_TYPE_IPV4_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
+ break;
+ case DPAA_PKT_TYPE_NONE:
+ m->packet_type = 0;
+ break;
+ /* More switch cases can be added */
+ default:
+ dpaa_slow_parsing(m, prs);
+ }
+
+ m->tx_offload = annot->parse.ip_off[0];
+ m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
+ << DPAA_PKT_L3_LEN_SHIFT;
+
+ /* Set the hash values */
+ m->hash.rss = (uint32_t)(annot->hash);
+ /* All packets with Bad checksum are dropped by interface (and
+ * corresponding notification issued to RX error queues).
+ */
+ m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_GOOD;
+
+ /* Check if Vlan is present */
+ if (prs & DPAA_PARSE_VLAN_MASK)
+ m->ol_flags |= PKT_RX_VLAN;
+ /* Packet received without stripping the vlan */
+}
+
+static inline void dpaa_checksum(struct rte_mbuf *mbuf)
+{
+ struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
+ struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+ struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+
+ DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
+
+ if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
+ ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV4_EXT)) {
+ ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+ ipv4_hdr->hdr_checksum = 0;
+ ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+ } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV6) ||
+ ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV6_EXT))
+ ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+
+ if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
+ struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
+ mbuf->l3_len);
+ tcp_hdr->cksum = 0;
+ if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+ tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
+ tcp_hdr);
+ else /* assume ethertype == ETHER_TYPE_IPv6 */
+ tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
+ tcp_hdr);
+ } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
+ RTE_PTYPE_L4_UDP) {
+ struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
+ mbuf->l3_len);
+ udp_hdr->dgram_cksum = 0;
+ if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+ udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
+ udp_hdr);
+ else /* assume ethertype == ETHER_TYPE_IPv6 */
+ udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
+ udp_hdr);
+ }
+}
+
+static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
+ struct qm_fd *fd, char *prs_buf)
+{
+ struct dpaa_eth_parse_results_t *prs;
+
+ DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
+
+ prs = GET_TX_PRS(prs_buf);
+ prs->l3r = 0;
+ prs->l4r = 0;
+ if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
+ ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV4_EXT))
+ prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
+ else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV6) ||
+ ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV6_EXT))
+ prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
+
+ if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
+ prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
+ else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
+ prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
+
+ prs->ip_off[0] = mbuf->l2_len;
+ prs->l4_off = mbuf->l3_len + mbuf->l2_len;
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum*/
+ fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
+}
+
+static inline void
+dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr)
+{
+ if (!mbuf->packet_type) {
+ struct rte_net_hdr_lens hdr_lens;
+
+ mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
+ RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
+ | RTE_PTYPE_L4_MASK);
+ mbuf->l2_len = hdr_lens.l2_len;
+ mbuf->l3_len = hdr_lens.l3_len;
+ }
+ if (mbuf->data_off < (DEFAULT_TX_ICEOF +
+ sizeof(struct dpaa_eth_parse_results_t))) {
+ DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
+ "Not enough Headroom "
+ "space for correct Checksum offload."
+ "So Calculating checksum in Software.");
+ dpaa_checksum(mbuf);
+ } else {
+ dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
+ }
+}
+
+struct rte_mbuf *
+dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
+{
+ struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
+ struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
+ struct qm_sg_entry *sgt, *sg_temp;
+ void *vaddr, *sg_vaddr;
+ int i = 0;
+ uint8_t fd_offset = fd->offset;
+
+ DPAA_DP_LOG(DEBUG, "Received an SG frame");
+
+ vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
+ if (!vaddr) {
+ DPAA_PMD_ERR("unable to convert physical address");
+ return NULL;
+ }
+ sgt = vaddr + fd_offset;
+ sg_temp = &sgt[i++];
+ hw_sg_to_cpu(sg_temp);
+ temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
+ sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp));
+
+ first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
+ bp_info->meta_data_size);
+ first_seg->data_off = sg_temp->offset;
+ first_seg->data_len = sg_temp->length;
+ first_seg->pkt_len = sg_temp->length;
+ rte_mbuf_refcnt_set(first_seg, 1);
+
+ first_seg->port = ifid;
+ first_seg->nb_segs = 1;
+ first_seg->ol_flags = 0;
+ prev_seg = first_seg;
+ while (i < DPAA_SGT_MAX_ENTRIES) {
+ sg_temp = &sgt[i++];
+ hw_sg_to_cpu(sg_temp);
+ sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
+ qm_sg_entry_get64(sg_temp));
+ cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
+ bp_info->meta_data_size);
+ cur_seg->data_off = sg_temp->offset;
+ cur_seg->data_len = sg_temp->length;
+ first_seg->pkt_len += sg_temp->length;
+ first_seg->nb_segs += 1;
+ rte_mbuf_refcnt_set(cur_seg, 1);
+ prev_seg->next = cur_seg;
+ if (sg_temp->final) {
+ cur_seg->next = NULL;
+ break;
+ }
+ prev_seg = cur_seg;
+ }
+
+ dpaa_eth_packet_info(first_seg, vaddr);
+ rte_pktmbuf_free_seg(temp);
+
+ return first_seg;
+}
+
+static inline struct rte_mbuf *
+dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
+{
+ struct rte_mbuf *mbuf;
+ struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
+ void *ptr;
+ uint8_t format =
+ (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
+ uint16_t offset;
+ uint32_t length;
+
+ DPAA_DP_LOG(DEBUG, " FD--->MBUF");
+
+ if (unlikely(format == qm_fd_sg))
+ return dpaa_eth_sg_to_mbuf(fd, ifid);
+
+ ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
+
+ rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+
+ offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
+ length = fd->opaque & DPAA_FD_LENGTH_MASK;
+
+ /* Ignoring case when format != qm_fd_contig */
+ dpaa_display_frame(fd);
+
+ mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
+
+ mbuf->data_off = offset;
+ mbuf->data_len = length;
+ mbuf->pkt_len = length;
+
+ mbuf->port = ifid;
+ mbuf->nb_segs = 1;
+ mbuf->ol_flags = 0;
+ mbuf->next = NULL;
+ rte_mbuf_refcnt_set(mbuf, 1);
+ dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
+
+ return mbuf;
+}
+
+void
+dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
+ void **bufs, int num_bufs)
+{
+ struct rte_mbuf *mbuf;
+ struct dpaa_bp_info *bp_info;
+ const struct qm_fd *fd;
+ void *ptr;
+ struct dpaa_if *dpaa_intf;
+ uint16_t offset, i;
+ uint32_t length;
+ uint8_t format;
+
+ if (dpaa_svr_family != SVR_LS1046A_FAMILY) {
+ bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid);
+ ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd));
+ rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+ bufs[0] = (struct rte_mbuf *)((char *)ptr -
+ bp_info->meta_data_size);
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ if (dpaa_svr_family != SVR_LS1046A_FAMILY &&
+ i < num_bufs - 1) {
+ bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid);
+ ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd));
+ rte_prefetch0((void *)((uint8_t *)ptr +
+ DEFAULT_RX_ICEOF));
+ bufs[i + 1] = (struct rte_mbuf *)((char *)ptr -
+ bp_info->meta_data_size);
+ }
+
+ fd = &dqrr[i]->fd;
+ dpaa_intf = fq[0]->dpaa_intf;
+
+ format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
+ DPAA_FD_FORMAT_SHIFT;
+ if (unlikely(format == qm_fd_sg)) {
+ bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
+ continue;
+ }
+
+ offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
+ DPAA_FD_OFFSET_SHIFT;
+ length = fd->opaque & DPAA_FD_LENGTH_MASK;
+
+ mbuf = bufs[i];
+ mbuf->data_off = offset;
+ mbuf->data_len = length;
+ mbuf->pkt_len = length;
+ mbuf->port = dpaa_intf->ifid;
+
+ mbuf->nb_segs = 1;
+ mbuf->ol_flags = 0;
+ mbuf->next = NULL;
+ rte_mbuf_refcnt_set(mbuf, 1);
+ dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
+ }
+}
+
+void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)
+{
+ struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid);
+ void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd));
+
+ /* In case of LS1046, annotation stashing is disabled due to L2 cache
+ * being bottleneck in case of multicore scanario for this platform.
+ * So we prefetch the annoation beforehand, so that it is available
+ * in cache when accessed.
+ */
+ if (dpaa_svr_family == SVR_LS1046A_FAMILY)
+ rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+
+ *bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
+}
+
+static uint16_t
+dpaa_eth_queue_portal_rx(struct qman_fq *fq,
+ struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ int ret;
+
+ if (unlikely(fq->qp == NULL)) {
+ ret = rte_dpaa_portal_fq_init((void *)0, fq);
+ if (ret) {
+ DPAA_PMD_ERR("Failure in affining portal %d", ret);
+ return 0;
+ }
+ }
+
+ return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp);
+}
+
+enum qman_cb_dqrr_result
+dpaa_rx_cb_parallel(void *event,
+ struct qman_portal *qm __always_unused,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bufs)
+{
+ u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
+ struct rte_mbuf *mbuf;
+ struct rte_event *ev = (struct rte_event *)event;
+
+ mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
+ ev->event_ptr = (void *)mbuf;
+ ev->flow_id = fq->ev.flow_id;
+ ev->sub_event_type = fq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = fq->ev.sched_type;
+ ev->queue_id = fq->ev.queue_id;
+ ev->priority = fq->ev.priority;
+ ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN;
+ mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+ *bufs = mbuf;
+
+ return qman_cb_dqrr_consume;
+}
+
+enum qman_cb_dqrr_result
+dpaa_rx_cb_atomic(void *event,
+ struct qman_portal *qm __always_unused,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bufs)
+{
+ u8 index;
+ u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
+ struct rte_mbuf *mbuf;
+ struct rte_event *ev = (struct rte_event *)event;
+
+ mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
+ ev->event_ptr = (void *)mbuf;
+ ev->flow_id = fq->ev.flow_id;
+ ev->sub_event_type = fq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = fq->ev.sched_type;
+ ev->queue_id = fq->ev.queue_id;
+ ev->priority = fq->ev.priority;
+
+ /* Save active dqrr entries */
+ index = DQRR_PTR2IDX(dqrr);
+ DPAA_PER_LCORE_DQRR_SIZE++;
+ DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
+ DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf;
+ ev->impl_opaque = index + 1;
+ mbuf->seqn = (uint32_t)index + 1;
+ *bufs = mbuf;
+
+ return qman_cb_dqrr_defer;
+}
+
+uint16_t dpaa_eth_queue_rx(void *q,
+ struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ struct qman_fq *fq = q;
+ struct qm_dqrr_entry *dq;
+ uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
+ int num_rx_bufs, ret;
+ uint32_t vdqcr_flags = 0;
+
+ if (likely(fq->is_static))
+ return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_PMD_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+
+ /* Until request for four buffers, we provide exact number of buffers.
+ * Otherwise we do not set the QM_VDQCR_EXACT flag.
+ * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+ * requested, so we request two less in this case.
+ */
+ if (nb_bufs < 4) {
+ vdqcr_flags = QM_VDQCR_EXACT;
+ num_rx_bufs = nb_bufs;
+ } else {
+ num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+ (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2);
+ }
+ ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
+ if (ret)
+ return 0;
+
+ do {
+ dq = qman_dequeue(fq);
+ if (!dq)
+ continue;
+ bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
+ qman_dqrr_consume(fq, dq);
+ } while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+ return num_rx;
+}
+
+static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)
+{
+ int ret;
+ size_t buf = 0;
+ struct bm_buffer bufs;
+
+ ret = bman_acquire(bp_info->bp, &bufs, 1, 0);
+ if (ret <= 0) {
+ DPAA_PMD_WARN("Failed to allocate buffers %d", ret);
+ return (void *)buf;
+ }
+
+ DPAA_DP_LOG(DEBUG, "got buffer 0x%" PRIx64 " from pool %d",
+ (uint64_t)bufs.addr, bufs.bpid);
+
+ buf = (size_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr)
+ - bp_info->meta_data_size;
+ if (!buf)
+ goto out;
+
+out:
+ return (void *)buf;
+}
+
+static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf,
+ struct dpaa_if *dpaa_intf)
+{
+ struct rte_mbuf *dpaa_mbuf;
+
+ /* allocate pktbuffer on bpid for dpaa port */
+ dpaa_mbuf = dpaa_get_pktbuf(dpaa_intf->bp_info);
+ if (!dpaa_mbuf)
+ return NULL;
+
+ memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + RTE_PKTMBUF_HEADROOM, (void *)
+ ((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len);
+
+ /* Copy only the required fields */
+ dpaa_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ dpaa_mbuf->pkt_len = mbuf->pkt_len;
+ dpaa_mbuf->ol_flags = mbuf->ol_flags;
+ dpaa_mbuf->packet_type = mbuf->packet_type;
+ dpaa_mbuf->tx_offload = mbuf->tx_offload;
+ rte_pktmbuf_free(mbuf);
+ return dpaa_mbuf;
+}
+
+int
+dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
+ struct qm_fd *fd,
+ uint32_t bpid)
+{
+ struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
+ struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid);
+ struct rte_mbuf *temp, *mi;
+ struct qm_sg_entry *sg_temp, *sgt;
+ int i = 0;
+
+ DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
+
+ temp = rte_pktmbuf_alloc(bp_info->mp);
+ if (!temp) {
+ DPAA_PMD_ERR("Failure in allocation of mbuf");
+ return -1;
+ }
+ if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry))
+ + temp->data_off)) {
+ DPAA_PMD_ERR("Insufficient space in mbuf for SG entries");
+ return -1;
+ }
+
+ fd->cmd = 0;
+ fd->opaque_addr = 0;
+
+ if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
+ if (!mbuf->packet_type) {
+ struct rte_net_hdr_lens hdr_lens;
+
+ mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
+ RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
+ | RTE_PTYPE_L4_MASK);
+ mbuf->l2_len = hdr_lens.l2_len;
+ mbuf->l3_len = hdr_lens.l3_len;
+ }
+ if (temp->data_off < DEFAULT_TX_ICEOF
+ + sizeof(struct dpaa_eth_parse_results_t))
+ temp->data_off = DEFAULT_TX_ICEOF
+ + sizeof(struct dpaa_eth_parse_results_t);
+ dcbz_64(temp->buf_addr);
+ dpaa_checksum_offload(mbuf, fd, temp->buf_addr);
+ }
+
+ sgt = temp->buf_addr + temp->data_off;
+ fd->format = QM_FD_SG;
+ fd->addr = temp->buf_iova;
+ fd->offset = temp->data_off;
+ fd->bpid = bpid;
+ fd->length20 = mbuf->pkt_len;
+
+ while (i < DPAA_SGT_MAX_ENTRIES) {
+ sg_temp = &sgt[i++];
+ sg_temp->opaque = 0;
+ sg_temp->val = 0;
+ sg_temp->addr = cur_seg->buf_iova;
+ sg_temp->offset = cur_seg->data_off;
+ sg_temp->length = cur_seg->data_len;
+ if (RTE_MBUF_DIRECT(cur_seg)) {
+ if (rte_mbuf_refcnt_read(cur_seg) > 1) {
+ /*If refcnt > 1, invalid bpid is set to ensure
+ * buffer is not freed by HW.
+ */
+ sg_temp->bpid = 0xff;
+ rte_mbuf_refcnt_update(cur_seg, -1);
+ } else {
+ sg_temp->bpid =
+ DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
+ }
+ cur_seg = cur_seg->next;
+ } else {
+ /* Get owner MBUF from indirect buffer */
+ mi = rte_mbuf_from_indirect(cur_seg);
+ if (rte_mbuf_refcnt_read(mi) > 1) {
+ /*If refcnt > 1, invalid bpid is set to ensure
+ * owner buffer is not freed by HW.
+ */
+ sg_temp->bpid = 0xff;
+ } else {
+ sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
+ rte_mbuf_refcnt_update(mi, 1);
+ }
+ prev_seg = cur_seg;
+ cur_seg = cur_seg->next;
+ prev_seg->next = NULL;
+ rte_pktmbuf_free(prev_seg);
+ }
+ if (cur_seg == NULL) {
+ sg_temp->final = 1;
+ cpu_to_hw_sg(sg_temp);
+ break;
+ }
+ cpu_to_hw_sg(sg_temp);
+ }
+ return 0;
+}
+
+/* Handle mbufs which are not segmented (non SG) */
+static inline void
+tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
+ struct dpaa_bp_info *bp_info,
+ struct qm_fd *fd_arr)
+{
+ struct rte_mbuf *mi = NULL;
+
+ if (RTE_MBUF_DIRECT(mbuf)) {
+ if (rte_mbuf_refcnt_read(mbuf) > 1) {
+ /* In case of direct mbuf and mbuf being cloned,
+ * BMAN should _not_ release buffer.
+ */
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
+ /* Buffer should be releasd by EAL */
+ rte_mbuf_refcnt_update(mbuf, -1);
+ } else {
+ /* In case of direct mbuf and no cloning, mbuf can be
+ * released by BMAN.
+ */
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
+ }
+ } else {
+ /* This is data-containing core mbuf: 'mi' */
+ mi = rte_mbuf_from_indirect(mbuf);
+ if (rte_mbuf_refcnt_read(mi) > 1) {
+ /* In case of indirect mbuf, and mbuf being cloned,
+ * BMAN should _not_ release it and let EAL release
+ * it through pktmbuf_free below.
+ */
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
+ } else {
+ /* In case of indirect mbuf, and no cloning, core mbuf
+ * should be released by BMAN.
+ * Increate refcnt of core mbuf so that when
+ * pktmbuf_free is called and mbuf is released, EAL
+ * doesn't try to release core mbuf which would have
+ * been released by BMAN.
+ */
+ rte_mbuf_refcnt_update(mi, 1);
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
+ }
+ rte_pktmbuf_free(mbuf);
+ }
+
+ if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
+ dpaa_unsegmented_checksum(mbuf, fd_arr);
+}
+
+/* Handle all mbufs on dpaa BMAN managed pool */
+static inline uint16_t
+tx_on_dpaa_pool(struct rte_mbuf *mbuf,
+ struct dpaa_bp_info *bp_info,
+ struct qm_fd *fd_arr)
+{
+ DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
+
+ if (mbuf->nb_segs == 1) {
+ /* Case for non-segmented buffers */
+ tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
+ } else if (mbuf->nb_segs > 1 &&
+ mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
+ if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) {
+ DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
+ return 1;
+ }
+ } else {
+ DPAA_PMD_DEBUG("Number of Segments not supported");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Handle all mbufs on an external pool (non-dpaa) */
+static inline uint16_t
+tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf,
+ struct qm_fd *fd_arr)
+{
+ struct dpaa_if *dpaa_intf = txq->dpaa_intf;
+ struct rte_mbuf *dmable_mbuf;
+
+ DPAA_DP_LOG(DEBUG, "Non-BMAN offloaded buffer."
+ "Allocating an offloaded buffer");
+ dmable_mbuf = dpaa_get_dmable_mbuf(mbuf, dpaa_intf);
+ if (!dmable_mbuf) {
+ DPAA_DP_LOG(DEBUG, "no dpaa buffers.");
+ return 1;
+ }
+
+ DPAA_MBUF_TO_CONTIG_FD(dmable_mbuf, fd_arr, dpaa_intf->bp_info->bpid);
+ if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
+ dpaa_unsegmented_checksum(mbuf, fd_arr);
+
+ return 0;
+}
+
+uint16_t
+dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ struct rte_mbuf *mbuf, *mi = NULL;
+ struct rte_mempool *mp;
+ struct dpaa_bp_info *bp_info;
+ struct qm_fd fd_arr[DPAA_TX_BURST_SIZE];
+ uint32_t frames_to_send, loop, sent = 0;
+ uint16_t state;
+ int ret;
+ uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_PMD_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+
+ DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
+
+ while (nb_bufs) {
+ frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ?
+ DPAA_TX_BURST_SIZE : nb_bufs;
+ for (loop = 0; loop < frames_to_send; loop++) {
+ mbuf = *(bufs++);
+ if (likely(RTE_MBUF_DIRECT(mbuf))) {
+ mp = mbuf->pool;
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+ if (likely(mp->ops_index ==
+ bp_info->dpaa_ops_index &&
+ mbuf->nb_segs == 1 &&
+ rte_mbuf_refcnt_read(mbuf) == 1)) {
+ DPAA_MBUF_TO_CONTIG_FD(mbuf,
+ &fd_arr[loop], bp_info->bpid);
+ if (mbuf->ol_flags &
+ DPAA_TX_CKSUM_OFFLOAD_MASK)
+ dpaa_unsegmented_checksum(mbuf,
+ &fd_arr[loop]);
+ continue;
+ }
+ } else {
+ mi = rte_mbuf_from_indirect(mbuf);
+ mp = mi->pool;
+ }
+
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+ if (likely(mp->ops_index == bp_info->dpaa_ops_index)) {
+ state = tx_on_dpaa_pool(mbuf, bp_info,
+ &fd_arr[loop]);
+ if (unlikely(state)) {
+ /* Set frames_to_send & nb_bufs so
+ * that packets are transmitted till
+ * previous frame.
+ */
+ frames_to_send = loop;
+ nb_bufs = loop;
+ goto send_pkts;
+ }
+ } else {
+ state = tx_on_external_pool(q, mbuf,
+ &fd_arr[loop]);
+ if (unlikely(state)) {
+ /* Set frames_to_send & nb_bufs so
+ * that packets are transmitted till
+ * previous frame.
+ */
+ frames_to_send = loop;
+ nb_bufs = loop;
+ goto send_pkts;
+ }
+ }
+ seqn = mbuf->seqn;
+ if (seqn != DPAA_INVALID_MBUF_SEQN) {
+ index = seqn - 1;
+ if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
+ flags[loop] =
+ ((index & QM_EQCR_DCA_IDXMASK) << 8);
+ flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
+ DPAA_PER_LCORE_DQRR_SIZE--;
+ DPAA_PER_LCORE_DQRR_HELD &=
+ ~(1 << index);
+ }
+ }
+ }
+
+send_pkts:
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qman_enqueue_multi(q, &fd_arr[loop],
+ &flags[loop],
+ frames_to_send - loop);
+ }
+ nb_bufs -= frames_to_send;
+ sent += frames_to_send;
+ }
+
+ DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
+
+ return sent;
+}
+
+uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
+ struct rte_mbuf **bufs __rte_unused,
+ uint16_t nb_bufs __rte_unused)
+{
+ DPAA_DP_LOG(DEBUG, "Drop all packets");
+
+ /* Drop all incoming packets. No need to free packets here
+ * because the rte_eth f/w frees up the packets through tx_buffer
+ * callback in case this functions returns count less than nb_bufs
+ */
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.h b/src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.h
new file mode 100644
index 00000000..d3e63516
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPDK_RXTX_H__
+#define __DPDK_RXTX_H__
+
+/* internal offset from where IC is copied to packet buffer*/
+#define DEFAULT_ICIOF 32
+/* IC transfer size */
+#define DEFAULT_ICSZ 48
+
+/* IC offsets from buffer header address */
+#define DEFAULT_RX_ICEOF 16
+#define DEFAULT_TX_ICEOF 16
+
+/*
+ * Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define DPAA_L3_PARSE_RESULT_IPV4 0x80
+/* L3 Type field: First IP Present IPv6 */
+#define DPAA_L3_PARSE_RESULT_IPV6 0x40
+/* Values for the L4R field of the FM Parse Results
+ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
+ */
+/* L4 Type field: UDP */
+#define DPAA_L4_PARSE_RESULT_UDP 0x40
+/* L4 Type field: TCP */
+#define DPAA_L4_PARSE_RESULT_TCP 0x20
+
+#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+
+#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
+ /** <Maximum number of frames to be dequeued in a single rx call*/
+
+/* FD structure masks and offset */
+#define DPAA_FD_FORMAT_MASK 0xE0000000
+#define DPAA_FD_OFFSET_MASK 0x1FF00000
+#define DPAA_FD_LENGTH_MASK 0xFFFFF
+#define DPAA_FD_FORMAT_SHIFT 29
+#define DPAA_FD_OFFSET_SHIFT 20
+
+/* Parsing mask (Little Endian) - 0x00E044ED00800000
+ * Classification Plan ID 0x00
+ * L4R 0xE0 -
+ * 0x20 - TCP
+ * 0x40 - UDP
+ * 0x80 - SCTP
+ * L3R 0xEDC4 (in Big Endian) -
+ * 0x8000 - IPv4
+ * 0x4000 - IPv6
+ * 0x8140 - IPv4 Ext + Frag
+ * 0x8040 - IPv4 Frag
+ * 0x8100 - IPv4 Ext
+ * 0x4140 - IPv6 Ext + Frag
+ * 0x4040 - IPv6 Frag
+ * 0x4100 - IPv6 Ext
+ * L2R 0x8000 (in Big Endian) -
+ * 0x8000 - Ethernet type
+ * ShimR & Logical Port ID 0x0000
+ */
+#define DPAA_PARSE_MASK 0x00E044ED00800000
+#define DPAA_PARSE_VLAN_MASK 0x0000000000700000
+
+/* Parsed values (Little Endian) */
+#define DPAA_PKT_TYPE_NONE 0x0000000000000000
+#define DPAA_PKT_TYPE_ETHER 0x0000000000800000
+#define DPAA_PKT_TYPE_IPV4 \
+ (0x0000008000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_IPV6 \
+ (0x0000004000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_GRE \
+ (0x0000002000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_IPV4_FRAG \
+ (0x0000400000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_FRAG \
+ (0x0000400000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_EXT \
+ (0x0000000100000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_EXT \
+ (0x0000000100000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_SCTP \
+ (0x0080000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_SCTP \
+ (0x0080000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_FRAG_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_FRAG_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_FRAG_SCTP \
+ (0x0080000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_SCTP \
+ (0x0080000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_EXT_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV4_EXT)
+#define DPAA_PKT_TYPE_IPV6_EXT_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV6_EXT)
+#define DPAA_PKT_TYPE_IPV4_EXT_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV4_EXT)
+#define DPAA_PKT_TYPE_IPV6_EXT_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV6_EXT)
+#define DPAA_PKT_TYPE_TUNNEL_4_4 \
+ (0x0000000800000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6 \
+ (0x0000000400000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6 \
+ (0x0000000400000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_TUNNEL_6_4 \
+ (0x0000000800000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_TUNNEL_4_4_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_4_4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_6_6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_4_6)
+#define DPAA_PKT_TYPE_TUNNEL_6_4_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_6_4)
+#define DPAA_PKT_TYPE_TUNNEL_4_4_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_4_4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_6_6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_4_6)
+#define DPAA_PKT_TYPE_TUNNEL_6_4_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_6_4)
+#define DPAA_PKT_L3_LEN_SHIFT 7
+
+/**
+ * FMan parse result array
+ */
+struct dpaa_eth_parse_results_t {
+ uint8_t lpid; /**< Logical port id */
+ uint8_t shimr; /**< Shim header result */
+ union {
+ uint16_t l2r; /**< Layer 2 result */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint16_t ethernet:1;
+ uint16_t vlan:1;
+ uint16_t llc_snap:1;
+ uint16_t mpls:1;
+ uint16_t ppoe_ppp:1;
+ uint16_t unused_1:3;
+ uint16_t unknown_eth_proto:1;
+ uint16_t eth_frame_type:2;
+ uint16_t l2r_err:5;
+ /*00-unicast, 01-multicast, 11-broadcast*/
+#else
+ uint16_t l2r_err:5;
+ uint16_t eth_frame_type:2;
+ uint16_t unknown_eth_proto:1;
+ uint16_t unused_1:3;
+ uint16_t ppoe_ppp:1;
+ uint16_t mpls:1;
+ uint16_t llc_snap:1;
+ uint16_t vlan:1;
+ uint16_t ethernet:1;
+#endif
+ } __attribute__((__packed__));
+ } __attribute__((__packed__));
+ union {
+ uint16_t l3r; /**< Layer 3 result */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint16_t first_ipv4:1;
+ uint16_t first_ipv6:1;
+ uint16_t gre:1;
+ uint16_t min_enc:1;
+ uint16_t last_ipv4:1;
+ uint16_t last_ipv6:1;
+ uint16_t first_info_err:1;/*0 info, 1 error*/
+ uint16_t first_ip_err_code:5;
+ uint16_t last_info_err:1; /*0 info, 1 error*/
+ uint16_t last_ip_err_code:3;
+#else
+ uint16_t last_ip_err_code:3;
+ uint16_t last_info_err:1; /*0 info, 1 error*/
+ uint16_t first_ip_err_code:5;
+ uint16_t first_info_err:1;/*0 info, 1 error*/
+ uint16_t last_ipv6:1;
+ uint16_t last_ipv4:1;
+ uint16_t min_enc:1;
+ uint16_t gre:1;
+ uint16_t first_ipv6:1;
+ uint16_t first_ipv4:1;
+#endif
+ } __attribute__((__packed__));
+ } __attribute__((__packed__));
+ union {
+ uint8_t l4r; /**< Layer 4 result */
+ struct{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint8_t l4_type:3;
+ uint8_t l4_info_err:1;
+ uint8_t l4_result:4;
+ /* if type IPSec: 1 ESP, 2 AH */
+#else
+ uint8_t l4_result:4;
+ /* if type IPSec: 1 ESP, 2 AH */
+ uint8_t l4_info_err:1;
+ uint8_t l4_type:3;
+#endif
+ } __attribute__((__packed__));
+ } __attribute__((__packed__));
+ uint8_t cplan; /**< Classification plan id */
+ uint16_t nxthdr; /**< Next Header */
+ uint16_t cksum; /**< Checksum */
+ uint32_t lcv; /**< LCV */
+ uint8_t shim_off[3]; /**< Shim offset */
+ uint8_t eth_off; /**< ETH offset */
+ uint8_t llc_snap_off; /**< LLC_SNAP offset */
+ uint8_t vlan_off[2]; /**< VLAN offset */
+ uint8_t etype_off; /**< ETYPE offset */
+ uint8_t pppoe_off; /**< PPP offset */
+ uint8_t mpls_off[2]; /**< MPLS offset */
+ uint8_t ip_off[2]; /**< IP offset */
+ uint8_t gre_off; /**< GRE offset */
+ uint8_t l4_off; /**< Layer 4 offset */
+ uint8_t nxthdr_off; /**< Parser end point */
+} __attribute__ ((__packed__));
+
+/* The structure is the Prepended Data to the Frame which is used by FMAN */
+struct annotations_t {
+ uint8_t reserved[DEFAULT_RX_ICEOF];
+ struct dpaa_eth_parse_results_t parse; /**< Pointer to Parsed result*/
+ uint64_t reserved1;
+ uint64_t hash; /**< Hash Result */
+};
+
+#define GET_ANNOTATIONS(_buf) \
+ (struct annotations_t *)(_buf)
+
+#define GET_RX_PRS(_buf) \
+ (struct dpaa_eth_parse_results_t *)((uint8_t *)(_buf) + \
+ DEFAULT_RX_ICEOF)
+
+#define GET_TX_PRS(_buf) \
+ (struct dpaa_eth_parse_results_t *)((uint8_t *)(_buf) + \
+ DEFAULT_TX_ICEOF)
+
+uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
+
+uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
+
+uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
+ struct rte_mbuf **bufs __rte_unused,
+ uint16_t nb_bufs __rte_unused);
+
+struct rte_mbuf *dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid);
+
+int dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
+ struct qm_fd *fd,
+ uint32_t bpid);
+
+void dpaa_rx_cb(struct qman_fq **fq,
+ struct qm_dqrr_entry **dqrr, void **bufs, int num_bufs);
+
+void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/dpaa/meson.build b/src/spdk/dpdk/drivers/net/dpaa/meson.build
new file mode 100644
index 00000000..62dec7b0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += ['mempool_dpaa']
+
+sources = files('dpaa_ethdev.c',
+ 'dpaa_rxtx.c')
+
+allow_experimental_apis = true
+
+install_headers('rte_pmd_dpaa.h')
diff --git a/src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa.h b/src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa.h
new file mode 100644
index 00000000..37eea9b0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _PMD_DPAA_H_
+#define _PMD_DPAA_H_
+
+/**
+ * @file rte_pmd_dpaa.h
+ *
+ * NXP dpaa PMD specific functions.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ */
+
+#include <rte_ethdev_driver.h>
+
+/**
+ * Enable/Disable TX loopback
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable TX loopback.
+ * 0 - Disable TX loopback.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on);
+
+#endif /* _PMD_DPAA_H_ */
diff --git a/src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa_version.map b/src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa_version.map
new file mode 100644
index 00000000..8cb4500b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa_version.map
@@ -0,0 +1,12 @@
+DPDK_17.11 {
+
+ local: *;
+};
+
+DPDK_18.08 {
+ global:
+
+ dpaa_eth_eventq_attach;
+ dpaa_eth_eventq_detach;
+ rte_pmd_dpaa_set_tx_loopback;
+} DPDK_17.11;
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/Makefile b/src/spdk/dpdk/drivers/net/dpaa2/Makefile
new file mode 100644
index 00000000..9b0b1433
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/Makefile
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright 2016 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa2_version.map
+
+# library version
+LIBABIVER := 1
+
+# depends on fslmc bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpni.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpkg.c
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
new file mode 100644
index 00000000..713a41bf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+
+#include <dpaa2_pmd_logs.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+
+#include "../dpaa2_ethdev.h"
+
+static int
+dpaa2_distset_to_dpkg_profile_cfg(
+ uint64_t req_dist_set,
+ struct dpkg_profile_cfg *kg_cfg);
+
+int
+dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
+ uint64_t req_dist_set)
+{
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct dpni_rx_tc_dist_cfg tc_cfg;
+ struct dpkg_profile_cfg kg_cfg;
+ void *p_params;
+ int ret, tc_index = 0;
+
+ p_params = rte_malloc(
+ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
+ if (!p_params) {
+ DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
+ return -ENOMEM;
+ }
+ memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+
+ ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported",
+ req_dist_set);
+ rte_free(p_params);
+ return ret;
+ }
+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
+ tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
+ tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+ ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
+ if (ret) {
+ DPAA2_PMD_ERR("Unable to prepare extract parameters");
+ rte_free(p_params);
+ return ret;
+ }
+
+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
+ &tc_cfg);
+ rte_free(p_params);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Setting distribution for Rx failed with err: %d",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dpaa2_remove_flow_dist(
+ struct rte_eth_dev *eth_dev,
+ uint8_t tc_index)
+{
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct dpni_rx_tc_dist_cfg tc_cfg;
+ struct dpkg_profile_cfg kg_cfg;
+ void *p_params;
+ int ret;
+
+ p_params = rte_malloc(
+ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
+ if (!p_params) {
+ DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
+ return -ENOMEM;
+ }
+ memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+ kg_cfg.num_extracts = 0;
+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
+ tc_cfg.dist_size = 0;
+ tc_cfg.dist_mode = DPNI_DIST_MODE_NONE;
+
+ ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
+ if (ret) {
+ DPAA2_PMD_ERR("Unable to prepare extract parameters");
+ rte_free(p_params);
+ return ret;
+ }
+
+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
+ &tc_cfg);
+ rte_free(p_params);
+ if (ret)
+ DPAA2_PMD_ERR(
+ "Setting distribution for Rx failed with err: %d",
+ ret);
+ return ret;
+}
+
+static int
+dpaa2_distset_to_dpkg_profile_cfg(
+ uint64_t req_dist_set,
+ struct dpkg_profile_cfg *kg_cfg)
+{
+ uint32_t loop = 0, i = 0, dist_field = 0;
+ int l2_configured = 0, l3_configured = 0;
+ int l4_configured = 0, sctp_configured = 0;
+
+ memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
+ while (req_dist_set) {
+ if (req_dist_set % 2 != 0) {
+ dist_field = 1U << loop;
+ switch (dist_field) {
+ case ETH_RSS_L2_PAYLOAD:
+
+ if (l2_configured)
+ break;
+ l2_configured = 1;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_ETH;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_ETH_TYPE;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+ break;
+
+ case ETH_RSS_IPV4:
+ case ETH_RSS_FRAG_IPV4:
+ case ETH_RSS_NONFRAG_IPV4_OTHER:
+ case ETH_RSS_IPV6:
+ case ETH_RSS_FRAG_IPV6:
+ case ETH_RSS_NONFRAG_IPV6_OTHER:
+ case ETH_RSS_IPV6_EX:
+
+ if (l3_configured)
+ break;
+ l3_configured = 1;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_IP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_IP_SRC;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_IP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_IP_DST;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_IP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_IP_PROTO;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ kg_cfg->num_extracts++;
+ i++;
+ break;
+
+ case ETH_RSS_NONFRAG_IPV4_TCP:
+ case ETH_RSS_NONFRAG_IPV6_TCP:
+ case ETH_RSS_NONFRAG_IPV4_UDP:
+ case ETH_RSS_NONFRAG_IPV6_UDP:
+ case ETH_RSS_IPV6_TCP_EX:
+ case ETH_RSS_IPV6_UDP_EX:
+
+ if (l4_configured)
+ break;
+ l4_configured = 1;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_TCP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_TCP_PORT_SRC;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_TCP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_TCP_PORT_SRC;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+ break;
+
+ case ETH_RSS_NONFRAG_IPV4_SCTP:
+ case ETH_RSS_NONFRAG_IPV6_SCTP:
+
+ if (sctp_configured)
+ break;
+ sctp_configured = 1;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_SCTP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_SCTP_PORT_SRC;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_SCTP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_SCTP_PORT_DST;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+ break;
+
+ default:
+ DPAA2_PMD_WARN(
+ "Unsupported flow dist option %x",
+ dist_field);
+ return -EINVAL;
+ }
+ }
+ req_dist_set = req_dist_set >> 1;
+ loop++;
+ }
+ kg_cfg->num_extracts = i;
+ return 0;
+}
+
+int
+dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
+ void *blist)
+{
+ /* Function to attach a DPNI with a buffer pool list. Buffer pool list
+ * handle is passed in blist.
+ */
+ int32_t retcode;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct dpni_pools_cfg bpool_cfg;
+ struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
+ struct dpni_buffer_layout layout;
+ int tot_size;
+
+ /* ... rx buffer layout .
+ * Check alignment for buffer layouts first
+ */
+
+ /* ... rx buffer layout ... */
+ tot_size = RTE_PKTMBUF_HEADROOM;
+ tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN);
+
+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
+ layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
+
+ layout.pass_frame_status = 1;
+ layout.private_data_size = DPAA2_FD_PTA_SIZE;
+ layout.pass_parser_result = 1;
+ layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN;
+ layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE -
+ DPAA2_MBUF_HW_ANNOTATION;
+ retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_RX, &layout);
+ if (retcode) {
+ DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)",
+ retcode);
+ return retcode;
+ }
+
+ /*Attach buffer pool to the network interface as described by the user*/
+ bpool_cfg.num_dpbp = 1;
+ bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
+ bpool_cfg.pools[0].backup_pool = 0;
+ bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size,
+ DPAA2_PACKET_LAYOUT_ALIGN);
+ bpool_cfg.pools[0].priority_mask = 0;
+
+ retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
+ if (retcode != 0) {
+ DPAA2_PMD_ERR("Error configuring buffer pool on interface."
+ " bpid = %d error code = %d",
+ bpool_cfg.pools[0].dpbp_id, retcode);
+ return retcode;
+ }
+
+ priv->bp_list = bp_list;
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h b/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
new file mode 100644
index 00000000..779cdf2b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+/**
+ * @file
+ *
+ * DPNI packet parse results - implementation internal
+ */
+
+#ifndef _DPAA2_HW_DPNI_ANNOT_H_
+#define _DPAA2_HW_DPNI_ANNOT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Annotation valid bits in FD FRC */
+#define DPAA2_FD_FRC_FASV 0x8000
+#define DPAA2_FD_FRC_FAEADV 0x4000
+#define DPAA2_FD_FRC_FAPRV 0x2000
+#define DPAA2_FD_FRC_FAIADV 0x1000
+#define DPAA2_FD_FRC_FASWOV 0x0800
+#define DPAA2_FD_FRC_FAICFDV 0x0400
+
+/* Annotation bits in FD CTRL */
+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
+#define DPAA2_FD_CTRL_PTA 0x00800000
+#define DPAA2_FD_CTRL_PTV1 0x00400000
+
+/* Frame annotation status */
+struct dpaa2_fas {
+ uint8_t reserved;
+ uint8_t ppid;
+ __le16 ifpid;
+ __le32 status;
+} __attribute__((__packed__));
+
+/**
+ * HW Packet Annotation Register structures
+ */
+struct dpaa2_annot_hdr {
+ /**< word1: Frame Annotation Status (8 bytes)*/
+ uint64_t word1;
+
+ /**< word2: Time Stamp (8 bytes)*/
+ uint64_t word2;
+
+ /**< word3: Next Hdr + FAF Extension + FAF (2 + 2 + 4 bytes)*/
+ uint64_t word3;
+
+ /**< word4: Frame Annotation Flags-FAF (8 bytes) */
+ uint64_t word4;
+
+ /**< word5:
+ * ShimOffset_1 + ShimOffset_2 + IPPIDOffset + EthOffset +
+ * LLC+SNAPOffset + VLANTCIOffset_1 + VLANTCIOffset_n +
+ * LastETypeOffset (1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes)
+ */
+ uint64_t word5;
+
+ /**< word6:
+ * PPPoEOffset + MPLSOffset_1 + MPLSOffset_n + ARPorIPOffset_1
+ * + IPOffset_norMInEncapO + GREOffset + L4Offset +
+ * GTPorESPorIPSecOffset(1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes)
+ */
+ uint64_t word6;
+
+ /**< word7:
+ * RoutingHdrOfset1 + RoutingHdrOfset2 + NxtHdrOffset
+ * + IPv6FragOffset + GrossRunningSum
+ * + RunningSum(1 + 1 + 1 + 1 + 2 + 2 bytes)
+ */
+ uint64_t word7;
+
+ /**< word8:
+ * ParseErrorcode + Soft Parsing Context (1 + 7 bytes)
+ */
+ uint64_t word8;
+};
+
+/**
+ * Internal Macros to get/set Packet annotation header
+ */
+
+/** General Macro to define a particular bit position*/
+#define BIT_POS(x) ((uint64_t)1 << ((x)))
+/** Set a bit in the variable */
+#define BIT_SET_AT_POS(var, pos) ((var) |= (pos))
+/** Reset the bit in the variable */
+#define BIT_RESET_AT_POS(var, pos) ((var) &= ~(pos))
+/** Check the bit is set in the variable */
+#define BIT_ISSET_AT_POS(var, pos) (((var) & (pos)) ? 1 : 0)
+/**
+ * Macrso to define bit position in word3
+ */
+#define NEXT_HDR(var) ((uint64_t)(var) & 0xFFFF000000000000)
+#define FAF_EXTN_IPV6_ROUTE_HDR_PRESENT(var) BIT_POS(16)
+#define FAF_EXTN_RESERVED(var) ((uint64_t)(var) & 0x00007FFF00000000)
+#define FAF_USER_DEFINED_RESERVED(var) ((uint64_t)(var) & 0x00000000FF000000)
+#define SHIM_SHELL_SOFT_PARSING_ERRROR BIT_POS(23)
+#define PARSING_ERROR BIT_POS(22)
+#define L2_ETH_MAC_PRESENT BIT_POS(21)
+#define L2_ETH_MAC_UNICAST BIT_POS(20)
+#define L2_ETH_MAC_MULTICAST BIT_POS(19)
+#define L2_ETH_MAC_BROADCAST BIT_POS(18)
+#define L2_ETH_FRAME_IS_BPDU BIT_POS(17)
+#define L2_ETH_FCOE_PRESENT BIT_POS(16)
+#define L2_ETH_FIP_PRESENT BIT_POS(15)
+#define L2_ETH_PARSING_ERROR BIT_POS(14)
+#define L2_LLC_SNAP_PRESENT BIT_POS(13)
+#define L2_UNKNOWN_LLC_OUI BIT_POS(12)
+#define L2_LLC_SNAP_ERROR BIT_POS(11)
+#define L2_VLAN_1_PRESENT BIT_POS(10)
+#define L2_VLAN_N_PRESENT BIT_POS(9)
+#define L2_VLAN_CFI_BIT_PRESENT BIT_POS(8)
+#define L2_VLAN_PARSING_ERROR BIT_POS(7)
+#define L2_PPPOE_PPP_PRESENT BIT_POS(6)
+#define L2_PPPOE_PPP_PARSING_ERROR BIT_POS(5)
+#define L2_MPLS_1_PRESENT BIT_POS(4)
+#define L2_MPLS_N_PRESENT BIT_POS(3)
+#define L2_MPLS_PARSING_ERROR BIT_POS(2)
+#define L2_ARP_PRESENT BIT_POS(1)
+#define L2_ARP_PARSING_ERROR BIT_POS(0)
+/**
+ * Macrso to define bit position in word4
+ */
+#define L2_UNKNOWN_PROTOCOL BIT_POS(63)
+#define L2_SOFT_PARSING_ERROR BIT_POS(62)
+#define L3_IPV4_1_PRESENT BIT_POS(61)
+#define L3_IPV4_1_UNICAST BIT_POS(60)
+#define L3_IPV4_1_MULTICAST BIT_POS(59)
+#define L3_IPV4_1_BROADCAST BIT_POS(58)
+#define L3_IPV4_N_PRESENT BIT_POS(57)
+#define L3_IPV4_N_UNICAST BIT_POS(56)
+#define L3_IPV4_N_MULTICAST BIT_POS(55)
+#define L3_IPV4_N_BROADCAST BIT_POS(54)
+#define L3_IPV6_1_PRESENT BIT_POS(53)
+#define L3_IPV6_1_UNICAST BIT_POS(52)
+#define L3_IPV6_1_MULTICAST BIT_POS(51)
+#define L3_IPV6_N_PRESENT BIT_POS(50)
+#define L3_IPV6_N_UNICAST BIT_POS(49)
+#define L3_IPV6_N_MULTICAST BIT_POS(48)
+#define L3_IP_1_OPT_PRESENT BIT_POS(47)
+#define L3_IP_1_UNKNOWN_PROTOCOL BIT_POS(46)
+#define L3_IP_1_MORE_FRAGMENT BIT_POS(45)
+#define L3_IP_1_FIRST_FRAGMENT BIT_POS(44)
+#define L3_IP_1_PARSING_ERROR BIT_POS(43)
+#define L3_IP_N_OPT_PRESENT BIT_POS(42)
+#define L3_IP_N_UNKNOWN_PROTOCOL BIT_POS(41)
+#define L3_IP_N_MORE_FRAGMENT BIT_POS(40)
+#define L3_IP_N_FIRST_FRAGMENT BIT_POS(39)
+#define L3_PROTO_ICMP_PRESENT BIT_POS(38)
+#define L3_PROTO_IGMP_PRESENT BIT_POS(37)
+#define L3_PROTO_ICMPV6_PRESENT BIT_POS(36)
+#define L3_PROTO_UDP_LIGHT_PRESENT BIT_POS(35)
+#define L3_IP_N_PARSING_ERROR BIT_POS(34)
+#define L3_MIN_ENCAP_PRESENT BIT_POS(33)
+#define L3_MIN_ENCAP_SBIT_PRESENT BIT_POS(32)
+#define L3_MIN_ENCAP_PARSING_ERROR BIT_POS(31)
+#define L3_PROTO_GRE_PRESENT BIT_POS(30)
+#define L3_PROTO_GRE_RBIT_PRESENT BIT_POS(29)
+#define L3_PROTO_GRE_PARSING_ERROR BIT_POS(28)
+#define L3_IP_UNKNOWN_PROTOCOL BIT_POS(27)
+#define L3_SOFT_PARSING_ERROR BIT_POS(26)
+#define L3_PROTO_UDP_PRESENT BIT_POS(25)
+#define L3_PROTO_UDP_PARSING_ERROR BIT_POS(24)
+#define L3_PROTO_TCP_PRESENT BIT_POS(23)
+#define L3_PROTO_TCP_OPT_PRESENT BIT_POS(22)
+#define L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT BIT_POS(21)
+#define L3_PROTO_TCP_CTRL_BIT_3_TO_5_PRESENT BIT_POS(20)
+#define L3_PROTO_TCP_PARSING_ERROR BIT_POS(19)
+#define L3_PROTO_IPSEC_PRESENT BIT_POS(18)
+#define L3_PROTO_IPSEC_ESP_PRESENT BIT_POS(17)
+#define L3_PROTO_IPSEC_AH_PRESENT BIT_POS(16)
+#define L3_PROTO_IPSEC_PARSING_ERROR BIT_POS(15)
+#define L3_PROTO_SCTP_PRESENT BIT_POS(14)
+#define L3_PROTO_SCTP_PARSING_ERROR BIT_POS(13)
+#define L3_PROTO_DCCP_PRESENT BIT_POS(12)
+#define L3_PROTO_DCCP_PARSING_ERROR BIT_POS(11)
+#define L4_UNKNOWN_PROTOCOL BIT_POS(10)
+#define L4_SOFT_PARSING_ERROR BIT_POS(9)
+#define L3_PROTO_GTP_PRESENT BIT_POS(8)
+#define L3_PROTO_GTP_PARSING_ERROR BIT_POS(7)
+#define L3_PROTO_ESP_PRESENT BIT_POS(6)
+#define L3_PROTO_ESP_PARSING_ERROR BIT_POS(5)
+#define L3_PROTO_ISCSI_PRESENT BIT_POS(4)
+#define L3_PROTO_CAPWAN__CTRL_PRESENT BIT_POS(3)
+#define L3_PROTO_CAPWAN__DATA_PRESENT BIT_POS(2)
+#define L5_SOFT_PARSING_ERROR BIT_POS(1)
+#define L3_IPV6_ROUTE_HDR_PRESENT BIT_POS(0)
+
+#define DPAA2_L3_IPv4 (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \
+ L3_IP_1_UNKNOWN_PROTOCOL | L3_IP_UNKNOWN_PROTOCOL)
+
+#define DPAA2_L3_IPv6 (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \
+ L3_IP_1_UNKNOWN_PROTOCOL | L3_IP_UNKNOWN_PROTOCOL)
+
+#define DPAA2_L3_IPv4_TCP (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \
+ L3_PROTO_TCP_PRESENT | L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT | \
+ L4_UNKNOWN_PROTOCOL)
+
+#define DPAA2_L3_IPv4_UDP (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \
+ L3_PROTO_UDP_PRESENT | L4_UNKNOWN_PROTOCOL)
+
+#define DPAA2_L3_IPv6_TCP (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \
+ L3_PROTO_TCP_PRESENT | L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT | \
+ L4_UNKNOWN_PROTOCOL)
+
+#define DPAA2_L3_IPv6_UDP (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \
+ L3_PROTO_UDP_PRESENT | L4_UNKNOWN_PROTOCOL)
+
+/* Debug frame, otherwise supposed to be discarded */
+#define DPAA2_ETH_FAS_DISC 0x80000000
+/* MACSEC frame */
+#define DPAA2_ETH_FAS_MS 0x40000000
+#define DPAA2_ETH_FAS_PTP 0x08000000
+/* Ethernet multicast frame */
+#define DPAA2_ETH_FAS_MC 0x04000000
+/* Ethernet broadcast frame */
+#define DPAA2_ETH_FAS_BC 0x02000000
+#define DPAA2_ETH_FAS_KSE 0x00040000
+#define DPAA2_ETH_FAS_EOFHE 0x00020000
+#define DPAA2_ETH_FAS_MNLE 0x00010000
+#define DPAA2_ETH_FAS_TIDE 0x00008000
+#define DPAA2_ETH_FAS_PIEE 0x00004000
+/* Frame length error */
+#define DPAA2_ETH_FAS_FLE 0x00002000
+/* Frame physical error; our favourite pastime */
+#define DPAA2_ETH_FAS_FPE 0x00001000
+#define DPAA2_ETH_FAS_PTE 0x00000080
+#define DPAA2_ETH_FAS_ISP 0x00000040
+#define DPAA2_ETH_FAS_PHE 0x00000020
+#define DPAA2_ETH_FAS_BLE 0x00000010
+/* L3 csum validation performed */
+#define DPAA2_ETH_FAS_L3CV 0x00000008
+/* L3 csum error */
+#define DPAA2_ETH_FAS_L3CE 0x00000004
+/* L4 csum validation performed */
+#define DPAA2_ETH_FAS_L4CV 0x00000002
+/* L4 csum error */
+#define DPAA2_ETH_FAS_L4CE 0x00000001
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c
new file mode 100644
index 00000000..c5047367
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -0,0 +1,2061 @@
+/* * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_fslmc.h>
+
+#include "dpaa2_pmd_logs.h"
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+#include <dpaa2_hw_dpio.h>
+#include <mc/fsl_dpmng.h>
+#include "dpaa2_ethdev.h"
+#include <fsl_qbman_debug.h>
+
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+/* Rx offloads which cannot be disabled */
+static uint64_t dev_rx_offloads_nodis =
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+/* Tx offloads which cannot be disabled */
+static uint64_t dev_tx_offloads_nodis =
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_MT_LOCKFREE |
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+struct rte_dpaa2_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint8_t page_id; /* dpni statistics page id */
+ uint8_t stats_id; /* stats id in the given page */
+};
+
+static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
+ {"ingress_multicast_frames", 0, 2},
+ {"ingress_multicast_bytes", 0, 3},
+ {"ingress_broadcast_frames", 0, 4},
+ {"ingress_broadcast_bytes", 0, 5},
+ {"egress_multicast_frames", 1, 2},
+ {"egress_multicast_bytes", 1, 3},
+ {"egress_broadcast_frames", 1, 4},
+ {"egress_broadcast_bytes", 1, 5},
+ {"ingress_filtered_frames", 2, 0},
+ {"ingress_discarded_frames", 2, 1},
+ {"ingress_nobuffer_discards", 2, 2},
+ {"egress_discarded_frames", 2, 3},
+ {"egress_confirmed_frames", 2, 4},
+};
+
+static struct rte_dpaa2_driver rte_dpaa2_pmd;
+static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
+static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
+static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
+static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
+int dpaa2_logtype_pmd;
+
+static int
+dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return -1;
+ }
+
+ if (on)
+ ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
+ priv->token, vlan_id);
+ else
+ ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
+ priv->token, vlan_id);
+
+ if (ret < 0)
+ DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
+ ret, vlan_id, priv->hw_id);
+
+ return ret;
+}
+
+static int
+dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ /* VLAN Filter not avaialble */
+ if (!priv->max_vlan_filters) {
+ DPAA2_PMD_INFO("VLAN filter not available");
+ goto next_mask;
+ }
+
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
+ ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
+ priv->token, true);
+ else
+ ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
+ priv->token, false);
+ if (ret < 0)
+ DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
+ }
+next_mask:
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND)
+ DPAA2_PMD_INFO("VLAN extend offload not supported");
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_fw_version_get(struct rte_eth_dev *dev,
+ char *fw_version,
+ size_t fw_size)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct mc_soc_version mc_plat_info = {0};
+ struct mc_version mc_ver_info = {0};
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
+ DPAA2_PMD_WARN("\tmc_get_soc_version failed");
+
+ if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
+ DPAA2_PMD_WARN("\tmc_get_version failed");
+
+ ret = snprintf(fw_version, fw_size,
+ "%x-%d.%d.%d",
+ mc_plat_info.svr,
+ mc_ver_info.major,
+ mc_ver_info.minor,
+ mc_ver_info.revision);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static void
+dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_info->if_index = priv->hw_id;
+
+ dev_info->max_mac_addrs = priv->max_mac_filters;
+ dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
+ dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
+ dev_info->rx_offload_capa = dev_rx_offloads_sup |
+ dev_rx_offloads_nodis;
+ dev_info->tx_offload_capa = dev_tx_offloads_sup |
+ dev_tx_offloads_nodis;
+ dev_info->speed_capa = ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G |
+ ETH_LINK_SPEED_10G;
+
+ dev_info->max_hash_mac_addrs = 0;
+ dev_info->max_vfs = 0;
+ dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
+}
+
+static int
+dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ uint16_t dist_idx;
+ uint32_t vq_id;
+ struct dpaa2_queue *mc_q, *mcq;
+ uint32_t tot_queues;
+ int i;
+ struct dpaa2_queue *dpaa2_q;
+
+ PMD_INIT_FUNC_TRACE();
+
+ tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
+ mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (!mc_q) {
+ DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
+ return -1;
+ }
+
+ for (i = 0; i < priv->nb_rx_queues; i++) {
+ mc_q->dev = dev;
+ priv->rx_vq[i] = mc_q++;
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
+ dpaa2_q->q_storage = rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpaa2_q->q_storage)
+ goto fail;
+
+ memset(dpaa2_q->q_storage, 0,
+ sizeof(struct queue_storage_info_t));
+ if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+ goto fail;
+ }
+
+ for (i = 0; i < priv->nb_tx_queues; i++) {
+ mc_q->dev = dev;
+ mc_q->flow_id = 0xffff;
+ priv->tx_vq[i] = mc_q++;
+ dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+ dpaa2_q->cscn = rte_malloc(NULL,
+ sizeof(struct qbman_result), 16);
+ if (!dpaa2_q->cscn)
+ goto fail_tx;
+ }
+
+ vq_id = 0;
+ for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
+ mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
+ mcq->tc_index = DPAA2_DEF_TC;
+ mcq->flow_id = dist_idx;
+ vq_id++;
+ }
+
+ return 0;
+fail_tx:
+ i -= 1;
+ while (i >= 0) {
+ dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+ rte_free(dpaa2_q->cscn);
+ priv->tx_vq[i--] = NULL;
+ }
+ i = priv->nb_rx_queues;
+fail:
+ i -= 1;
+ mc_q = priv->rx_vq[0];
+ while (i >= 0) {
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
+ dpaa2_free_dq_storage(dpaa2_q->q_storage);
+ rte_free(dpaa2_q->q_storage);
+ priv->rx_vq[i--] = NULL;
+ }
+ rte_free(mc_q);
+ return -1;
+}
+
+static int
+dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ uint64_t rx_offloads = eth_conf->rxmode.offloads;
+ uint64_t tx_offloads = eth_conf->txmode.offloads;
+ int rx_l3_csum_offload = false;
+ int rx_l4_csum_offload = false;
+ int tx_l3_csum_offload = false;
+ int tx_l4_csum_offload = false;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Rx offloads validation */
+ if (dev_rx_offloads_nodis & ~rx_offloads) {
+ DPAA2_PMD_WARN(
+ "Rx offloads non configurable - requested 0x%" PRIx64
+ " ignored 0x%" PRIx64,
+ rx_offloads, dev_rx_offloads_nodis);
+ }
+
+ /* Tx offloads validation */
+ if (dev_tx_offloads_nodis & ~tx_offloads) {
+ DPAA2_PMD_WARN(
+ "Tx offloads non configurable - requested 0x%" PRIx64
+ " ignored 0x%" PRIx64,
+ tx_offloads, dev_tx_offloads_nodis);
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
+ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
+ priv->token, eth_conf->rxmode.max_rx_pkt_len);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Unable to set mtu. check config");
+ return ret;
+ }
+ } else {
+ return -1;
+ }
+ }
+
+ if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ ret = dpaa2_setup_flow_dist(dev,
+ eth_conf->rx_adv_conf.rss_conf.rss_hf);
+ if (ret) {
+ DPAA2_PMD_ERR("Unable to set flow distribution."
+ "Check queue config");
+ return ret;
+ }
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ rx_l3_csum_offload = true;
+
+ if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
+ (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM))
+ rx_l4_csum_offload = true;
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
+ if (ret) {
+ DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
+ return ret;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
+ if (ret) {
+ DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
+ return ret;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ tx_l3_csum_offload = true;
+
+ if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
+ (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
+ (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+ tx_l4_csum_offload = true;
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
+ if (ret) {
+ DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
+ return ret;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
+ if (ret) {
+ DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
+ return ret;
+ }
+
+ /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
+ * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
+ * to 0 for LS2 in the hardware thus disabling data/annotation
+ * stashing. For LX2 this is fixed in hardware and thus hash result and
+ * parse results can be received in FD using this option.
+ */
+ if (dpaa2_svr_family == SVR_LX2160A) {
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_FLCTYPE_HASH, true);
+ if (ret) {
+ DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
+ return ret;
+ }
+ }
+
+ dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+
+ /* update the current status */
+ dpaa2_dev_link_update(dev, 0);
+
+ return 0;
+}
+
+/* Function to setup RX flow information. It contains traffic class ID,
+ * flow ID, destination configuration etc.
+ */
+static int
+dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct dpaa2_queue *dpaa2_q;
+ struct dpni_queue cfg;
+ uint8_t options = 0;
+ uint8_t flow_id;
+ uint32_t bpid;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
+ dev, rx_queue_id, mb_pool, rx_conf);
+
+ if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
+ bpid = mempool_to_bpid(mb_pool);
+ ret = dpaa2_attach_bp_list(priv,
+ rte_dpaa2_bpid_info[bpid].bp_list);
+ if (ret)
+ return ret;
+ }
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
+ dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
+
+ /*Get the flow id from given VQ id*/
+ flow_id = rx_queue_id % priv->nb_rx_queues;
+ memset(&cfg, 0, sizeof(struct dpni_queue));
+
+ options = options | DPNI_QUEUE_OPT_USER_CTX;
+ cfg.user_context = (size_t)(dpaa2_q);
+
+ /*if ls2088 or rev2 device, enable the stashing */
+
+ if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
+ options |= DPNI_QUEUE_OPT_FLC;
+ cfg.flc.stash_control = true;
+ cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
+ /* 00 00 00 - last 6 bit represent annotation, context stashing,
+ * data stashing setting 01 01 00 (0x14)
+ * (in following order ->DS AS CS)
+ * to enable 1 line data, 1 line annotation.
+ * For LX2, this setting should be 01 00 00 (0x10)
+ */
+ if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
+ cfg.flc.value |= 0x10;
+ else
+ cfg.flc.value |= 0x14;
+ }
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
+ dpaa2_q->tc_index, flow_id, options, &cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
+ return -1;
+ }
+
+ if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
+ struct dpni_taildrop taildrop;
+
+ taildrop.enable = 1;
+ /*enabling per rx queue congestion control */
+ taildrop.threshold = CONG_THRESHOLD_RX_Q;
+ taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
+ taildrop.oal = CONG_RX_OAL;
+ DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d",
+ rx_queue_id);
+ ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
+ dpaa2_q->tc_index, flow_id, &taildrop);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
+ ret);
+ return -1;
+ }
+ }
+
+ dev->data->rx_queues[rx_queue_id] = dpaa2_q;
+ return 0;
+}
+
+static int
+dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
+ priv->tx_vq[tx_queue_id];
+ struct fsl_mc_io *dpni = priv->hw;
+ struct dpni_queue tx_conf_cfg;
+ struct dpni_queue tx_flow_cfg;
+ uint8_t options = 0, flow_id;
+ uint32_t tc_id;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if queue already configured */
+ if (dpaa2_q->flow_id != 0xffff) {
+ dev->data->tx_queues[tx_queue_id] = dpaa2_q;
+ return 0;
+ }
+
+ memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
+ memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
+
+ tc_id = tx_queue_id;
+ flow_id = 0;
+
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
+ tc_id, flow_id, options, &tx_flow_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in setting the tx flow: "
+ "tc_id=%d, flow=%d err=%d",
+ tc_id, flow_id, ret);
+ return -1;
+ }
+
+ dpaa2_q->flow_id = flow_id;
+
+ if (tx_queue_id == 0) {
+ /*Set tx-conf and error configuration*/
+ ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_CONF_DISABLE);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in set tx conf mode settings: "
+ "err=%d", ret);
+ return -1;
+ }
+ }
+ dpaa2_q->tc_index = tc_id;
+
+ if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
+ struct dpni_congestion_notification_cfg cong_notif_cfg;
+
+ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+ cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
+ /* Notify that the queue is not congested when the data in
+ * the queue is below this thershold.
+ */
+ cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
+ cong_notif_cfg.message_ctx = 0;
+ cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn;
+ cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
+ cong_notif_cfg.notification_mode =
+ DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
+ DPNI_CONG_OPT_COHERENT_WRITE;
+
+ ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_QUEUE_TX,
+ tc_id,
+ &cong_notif_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Error in setting tx congestion notification: "
+ "err=%d", ret);
+ return -ret;
+ }
+ }
+ dev->data->tx_queues[tx_queue_id] = dpaa2_q;
+ return 0;
+}
+
+static void
+dpaa2_dev_rx_queue_release(void *q __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static void
+dpaa2_dev_tx_queue_release(void *q __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static uint32_t
+dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ int32_t ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_queue *dpaa2_q;
+ struct qbman_swp *swp;
+ struct qbman_fq_query_np_rslt state;
+ uint32_t frame_cnt = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_PMD_ERR("Failure in affining portal");
+ return -EINVAL;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
+
+ if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
+ frame_cnt = qbman_fq_state_frame_count(&state);
+ DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
+ rx_queue_id, frame_cnt);
+ }
+ return frame_cnt;
+}
+
+static const uint32_t *
+dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /*todo -= add more types */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
+ return ptypes;
+ return NULL;
+}
+
+/**
+ * Dpaa2 link Interrupt handler
+ *
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+dpaa2_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = param;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int ret;
+ int irq_index = DPNI_IRQ_INDEX;
+ unsigned int status = 0, clear = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return;
+ }
+
+ ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
+ irq_index, &status);
+ if (unlikely(ret)) {
+ DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
+ clear = 0xffffffff;
+ goto out;
+ }
+
+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
+ clear = DPNI_IRQ_EVENT_LINK_CHANGED;
+ dpaa2_dev_link_update(dev, 0);
+ /* calling all the apps registered for link status event */
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+out:
+ ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
+ irq_index, clear);
+ if (unlikely(ret))
+ DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
+}
+
+static int
+dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
+{
+ int err = 0;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int irq_index = DPNI_IRQ_INDEX;
+ unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
+
+ PMD_INIT_FUNC_TRACE();
+
+ err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
+ irq_index, mask);
+ if (err < 0) {
+ DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
+ strerror(-err));
+ return err;
+ }
+
+ err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
+ irq_index, enable);
+ if (err < 0)
+ DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
+ strerror(-err));
+
+ return err;
+}
+
+static int
+dpaa2_dev_start(struct rte_eth_dev *dev)
+{
+ struct rte_device *rdev = dev->device;
+ struct rte_dpaa2_device *dpaa2_dev;
+ struct rte_eth_dev_data *data = dev->data;
+ struct dpaa2_dev_priv *priv = data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct dpni_queue cfg;
+ struct dpni_error_cfg err_cfg;
+ uint16_t qdid;
+ struct dpni_queue_id qid;
+ struct dpaa2_queue *dpaa2_q;
+ int ret, i;
+ struct rte_intr_handle *intr_handle;
+
+ dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
+ intr_handle = &dpaa2_dev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
+ priv->hw_id, ret);
+ return ret;
+ }
+
+ /* Power up the phy. Needed to make the link go UP */
+ dpaa2_dev_set_link_up(dev);
+
+ ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX, &qdid);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
+ return ret;
+ }
+ priv->qdid = qdid;
+
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
+ ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_RX, dpaa2_q->tc_index,
+ dpaa2_q->flow_id, &cfg, &qid);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in getting flow information: "
+ "err=%d", ret);
+ return ret;
+ }
+ dpaa2_q->fqid = qid.fqid;
+ }
+
+ /*checksum errors, send them to normal path and set it in annotation */
+ err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
+
+ err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
+ err_cfg.set_frame_annotation = true;
+
+ ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
+ priv->token, &err_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
+ ret);
+ return ret;
+ }
+
+ /* if the interrupts were configured on this devices*/
+ if (intr_handle && (intr_handle->fd) &&
+ (dev->data->dev_conf.intr_conf.lsc != 0)) {
+ /* Registering LSC interrupt handler */
+ rte_intr_callback_register(intr_handle,
+ dpaa2_interrupt_handler,
+ (void *)dev);
+
+ /* enable vfio intr/eventfd mapping
+ * Interrupt index 0 is required, so we can not use
+ * rte_intr_enable.
+ */
+ rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
+
+ /* enable dpni_irqs */
+ dpaa2_eth_setup_irqs(dev, 1);
+ }
+
+ return 0;
+}
+
+/**
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC.
+ */
+static void
+dpaa2_dev_stop(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int ret;
+ struct rte_eth_link link;
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* reset interrupt callback */
+ if (intr_handle && (intr_handle->fd) &&
+ (dev->data->dev_conf.intr_conf.lsc != 0)) {
+ /*disable dpni irqs */
+ dpaa2_eth_setup_irqs(dev, 0);
+
+ /* disable vfio intr before callback unregister */
+ rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
+
+ /* Unregistering LSC interrupt handler */
+ rte_intr_callback_unregister(intr_handle,
+ dpaa2_interrupt_handler,
+ (void *)dev);
+ }
+
+ dpaa2_dev_set_link_down(dev);
+
+ ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
+ ret, priv->hw_id);
+ return;
+ }
+
+ /* clear the recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(dev, &link);
+}
+
+static void
+dpaa2_dev_close(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int i, ret;
+ struct rte_eth_link link;
+ struct dpaa2_queue *dpaa2_q;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
+ if (!dpaa2_q->cscn) {
+ rte_free(dpaa2_q->cscn);
+ dpaa2_q->cscn = NULL;
+ }
+ }
+
+ /* Clean the device first */
+ ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
+ return;
+ }
+
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(dev, &link);
+}
+
+static void
+dpaa2_dev_promiscuous_enable(
+ struct rte_eth_dev *dev)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return;
+ }
+
+ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
+ if (ret < 0)
+ DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
+
+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
+ if (ret < 0)
+ DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
+}
+
+static void
+dpaa2_dev_promiscuous_disable(
+ struct rte_eth_dev *dev)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return;
+ }
+
+ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
+ if (ret < 0)
+ DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
+
+ if (dev->data->all_multicast == 0) {
+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
+ priv->token, false);
+ if (ret < 0)
+ DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
+ ret);
+ }
+}
+
+static void
+dpaa2_dev_allmulticast_enable(
+ struct rte_eth_dev *dev)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return;
+ }
+
+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
+ if (ret < 0)
+ DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
+}
+
+static void
+dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return;
+ }
+
+ /* must remain on for all promiscuous */
+ if (dev->data->promiscuous == 1)
+ return;
+
+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
+ if (ret < 0)
+ DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
+}
+
+static int
+dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + VLAN_TAG_SIZE;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return -EINVAL;
+ }
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
+ return -EINVAL;
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.offloads &=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ /* Set the Max Rx frame length as 'mtu' +
+ * Maximum Ethernet header length
+ */
+ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
+ frame_size);
+ if (ret) {
+ DPAA2_PMD_ERR("Setting the max frame length failed");
+ return -1;
+ }
+ DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
+ return 0;
+}
+
+static int
+dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return -1;
+ }
+
+ ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
+ priv->token, addr->addr_bytes);
+ if (ret)
+ DPAA2_PMD_ERR(
+ "error: Adding the MAC ADDR failed: err = %d", ret);
+ return 0;
+}
+
+static void
+dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
+ uint32_t index)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct rte_eth_dev_data *data = dev->data;
+ struct ether_addr *macaddr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ macaddr = &data->mac_addrs[index];
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return;
+ }
+
+ ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
+ priv->token, macaddr->addr_bytes);
+ if (ret)
+ DPAA2_PMD_ERR(
+ "error: Removing the MAC ADDR failed: err = %d", ret);
+}
+
+static int
+dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return -EINVAL;
+ }
+
+ ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
+ priv->token, addr->addr_bytes);
+
+ if (ret)
+ DPAA2_PMD_ERR(
+ "error: Setting the MAC ADDR failed %d", ret);
+
+ return ret;
+}
+
+static
+int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int32_t retcode;
+ uint8_t page0 = 0, page1 = 1, page2 = 2;
+ union dpni_statistics value;
+
+ memset(&value, 0, sizeof(union dpni_statistics));
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!dpni) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return -EINVAL;
+ }
+
+ if (!stats) {
+ DPAA2_PMD_ERR("stats is NULL");
+ return -EINVAL;
+ }
+
+ /*Get Counters from page_0*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page0, 0, &value);
+ if (retcode)
+ goto err;
+
+ stats->ipackets = value.page_0.ingress_all_frames;
+ stats->ibytes = value.page_0.ingress_all_bytes;
+
+ /*Get Counters from page_1*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page1, 0, &value);
+ if (retcode)
+ goto err;
+
+ stats->opackets = value.page_1.egress_all_frames;
+ stats->obytes = value.page_1.egress_all_bytes;
+
+ /*Get Counters from page_2*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page2, 0, &value);
+ if (retcode)
+ goto err;
+
+ /* Ingress drop frame count due to configured rules */
+ stats->ierrors = value.page_2.ingress_filtered_frames;
+ /* Ingress drop frame count due to error */
+ stats->ierrors += value.page_2.ingress_discarded_frames;
+
+ stats->oerrors = value.page_2.egress_discarded_frames;
+ stats->imissed = value.page_2.ingress_nobuffer_discards;
+
+ return 0;
+
+err:
+ DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
+ return retcode;
+};
+
+static int
+dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int32_t retcode;
+ union dpni_statistics value[3] = {};
+ unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
+
+ if (n < num)
+ return num;
+
+ if (xstats == NULL)
+ return 0;
+
+ /* Get Counters from page_0*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ 0, 0, &value[0]);
+ if (retcode)
+ goto err;
+
+ /* Get Counters from page_1*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ 1, 0, &value[1]);
+ if (retcode)
+ goto err;
+
+ /* Get Counters from page_2*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ 2, 0, &value[2]);
+ if (retcode)
+ goto err;
+
+ for (i = 0; i < num; i++) {
+ xstats[i].id = i;
+ xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
+ raw.counter[dpaa2_xstats_strings[i].stats_id];
+ }
+ return i;
+err:
+ DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
+ return retcode;
+}
+
+static int
+dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
+
+ if (limit < stat_cnt)
+ return stat_cnt;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < stat_cnt; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s",
+ dpaa2_xstats_strings[i].name);
+
+ return stat_cnt;
+}
+
+static int
+dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
+ uint64_t values_copy[stat_cnt];
+
+ if (!ids) {
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int32_t retcode;
+ union dpni_statistics value[3] = {};
+
+ if (n < stat_cnt)
+ return stat_cnt;
+
+ if (!values)
+ return 0;
+
+ /* Get Counters from page_0*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ 0, 0, &value[0]);
+ if (retcode)
+ return 0;
+
+ /* Get Counters from page_1*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ 1, 0, &value[1]);
+ if (retcode)
+ return 0;
+
+ /* Get Counters from page_2*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ 2, 0, &value[2]);
+ if (retcode)
+ return 0;
+
+ for (i = 0; i < stat_cnt; i++) {
+ values[i] = value[dpaa2_xstats_strings[i].page_id].
+ raw.counter[dpaa2_xstats_strings[i].stats_id];
+ }
+ return stat_cnt;
+ }
+
+ dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= stat_cnt) {
+ DPAA2_PMD_ERR("xstats id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return n;
+}
+
+static int
+dpaa2_xstats_get_names_by_id(
+ struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
+ struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
+
+ if (!ids)
+ return dpaa2_xstats_get_names(dev, xstats_names, limit);
+
+ dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= stat_cnt) {
+ DPAA2_PMD_ERR("xstats id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
+ }
+ return limit;
+}
+
+static void
+dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int32_t retcode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return;
+ }
+
+ retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
+ if (retcode)
+ goto error;
+
+ return;
+
+error:
+ DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
+ return;
+};
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+dpaa2_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct rte_eth_link link;
+ struct dpni_link_state state = {0};
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return 0;
+ }
+
+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
+ if (ret < 0) {
+ DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
+ return -1;
+ }
+
+ memset(&link, 0, sizeof(struct rte_eth_link));
+ link.link_status = state.up;
+ link.link_speed = state.rate;
+
+ if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ else
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ ret = rte_eth_linkstatus_set(dev, &link);
+ if (ret == -1)
+ DPAA2_PMD_DEBUG("No change in status");
+ else
+ DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
+ link.link_status ? "Up" : "Down");
+
+ return ret;
+}
+
+/**
+ * Toggle the DPNI to enable, if not already enabled.
+ * This is not strictly PHY up/down - it is more of logical toggling.
+ */
+static int
+dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ int ret = -EINVAL;
+ struct dpaa2_dev_priv *priv;
+ struct fsl_mc_io *dpni;
+ int en = 0;
+ struct dpni_link_state state = {0};
+
+ priv = dev->data->dev_private;
+ dpni = (struct fsl_mc_io *)priv->hw;
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return ret;
+ }
+
+ /* Check if DPNI is currently enabled */
+ ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
+ if (ret) {
+ /* Unable to obtain dpni status; Not continuing */
+ DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
+ return -EINVAL;
+ }
+
+ /* Enable link if not already enabled */
+ if (!en) {
+ ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
+ return -EINVAL;
+ }
+ }
+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
+ if (ret < 0) {
+ DPAA2_PMD_ERR("Unable to get link state (%d)", ret);
+ return -1;
+ }
+
+ /* changing tx burst function to start enqueues */
+ dev->tx_pkt_burst = dpaa2_dev_tx;
+ dev->data->dev_link.link_status = state.up;
+
+ if (state.up)
+ DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
+ else
+ DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
+ return ret;
+}
+
+/**
+ * Toggle the DPNI to disable, if not already disabled.
+ * This is not strictly PHY up/down - it is more of logical toggling.
+ */
+static int
+dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ int ret = -EINVAL;
+ struct dpaa2_dev_priv *priv;
+ struct fsl_mc_io *dpni;
+ int dpni_enabled = 0;
+ int retries = 10;
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = dev->data->dev_private;
+ dpni = (struct fsl_mc_io *)priv->hw;
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("Device has not yet been configured");
+ return ret;
+ }
+
+ /*changing tx burst function to avoid any more enqueues */
+ dev->tx_pkt_burst = dummy_dev_tx;
+
+ /* Loop while dpni_disable() attempts to drain the egress FQs
+ * and confirm them back to us.
+ */
+ do {
+ ret = dpni_disable(dpni, 0, priv->token);
+ if (ret) {
+ DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
+ return ret;
+ }
+ ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
+ if (ret) {
+ DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
+ return ret;
+ }
+ if (dpni_enabled)
+ /* Allow the MC some slack */
+ rte_delay_us(100 * 1000);
+ } while (dpni_enabled && --retries);
+
+ if (!retries) {
+ DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
+ /* todo- we may have to manually cleanup queues.
+ */
+ } else {
+ DPAA2_PMD_INFO("Port %d Link DOWN successful",
+ dev->data->port_id);
+ }
+
+ dev->data->dev_link.link_status = 0;
+
+ return ret;
+}
+
+static int
+dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ int ret = -EINVAL;
+ struct dpaa2_dev_priv *priv;
+ struct fsl_mc_io *dpni;
+ struct dpni_link_state state = {0};
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = dev->data->dev_private;
+ dpni = (struct fsl_mc_io *)priv->hw;
+
+ if (dpni == NULL || fc_conf == NULL) {
+ DPAA2_PMD_ERR("device not configured");
+ return ret;
+ }
+
+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
+ if (ret) {
+ DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
+ return ret;
+ }
+
+ memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+ if (state.options & DPNI_LINK_OPT_PAUSE) {
+ /* DPNI_LINK_OPT_PAUSE set
+ * if ASYM_PAUSE not set,
+ * RX Side flow control (handle received Pause frame)
+ * TX side flow control (send Pause frame)
+ * if ASYM_PAUSE set,
+ * RX Side flow control (handle received Pause frame)
+ * No TX side flow control (send Pause frame disabled)
+ */
+ if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
+ fc_conf->mode = RTE_FC_FULL;
+ else
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ } else {
+ /* DPNI_LINK_OPT_PAUSE not set
+ * if ASYM_PAUSE set,
+ * TX side flow control (send Pause frame)
+ * No RX side flow control (No action on pause frame rx)
+ * if ASYM_PAUSE not set,
+ * Flow control disabled
+ */
+ if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+ }
+
+ return ret;
+}
+
+static int
+dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ int ret = -EINVAL;
+ struct dpaa2_dev_priv *priv;
+ struct fsl_mc_io *dpni;
+ struct dpni_link_state state = {0};
+ struct dpni_link_cfg cfg = {0};
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = dev->data->dev_private;
+ dpni = (struct fsl_mc_io *)priv->hw;
+
+ if (dpni == NULL) {
+ DPAA2_PMD_ERR("dpni is NULL");
+ return ret;
+ }
+
+ /* It is necessary to obtain the current state before setting fc_conf
+ * as MC would return error in case rate, autoneg or duplex values are
+ * different.
+ */
+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
+ if (ret) {
+ DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
+ return -1;
+ }
+
+ /* Disable link before setting configuration */
+ dpaa2_dev_set_link_down(dev);
+
+ /* Based on fc_conf, update cfg */
+ cfg.rate = state.rate;
+ cfg.options = state.options;
+
+ /* update cfg with fc_conf */
+ switch (fc_conf->mode) {
+ case RTE_FC_FULL:
+ /* Full flow control;
+ * OPT_PAUSE set, ASYM_PAUSE not set
+ */
+ cfg.options |= DPNI_LINK_OPT_PAUSE;
+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+ break;
+ case RTE_FC_TX_PAUSE:
+ /* Enable RX flow control
+ * OPT_PAUSE not set;
+ * ASYM_PAUSE set;
+ */
+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+ break;
+ case RTE_FC_RX_PAUSE:
+ /* Enable TX Flow control
+ * OPT_PAUSE set
+ * ASYM_PAUSE set
+ */
+ cfg.options |= DPNI_LINK_OPT_PAUSE;
+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
+ break;
+ case RTE_FC_NONE:
+ /* Disable Flow control
+ * OPT_PAUSE not set
+ * ASYM_PAUSE not set
+ */
+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+ break;
+ default:
+ DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
+ fc_conf->mode);
+ return -1;
+ }
+
+ ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
+ if (ret)
+ DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
+ ret);
+
+ /* Enable link */
+ dpaa2_dev_set_link_up(dev);
+
+ return ret;
+}
+
+static int
+dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *eth_conf = &data->dev_conf;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rss_conf->rss_hf) {
+ ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
+ if (ret) {
+ DPAA2_PMD_ERR("Unable to set flow dist");
+ return ret;
+ }
+ } else {
+ ret = dpaa2_remove_flow_dist(dev, 0);
+ if (ret) {
+ DPAA2_PMD_ERR("Unable to remove flow dist");
+ return ret;
+ }
+ }
+ eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
+ return 0;
+}
+
+static int
+dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *eth_conf = &data->dev_conf;
+
+ /* dpaa2 does not support rss_key, so length should be 0*/
+ rss_conf->rss_key_len = 0;
+ rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
+ return 0;
+}
+
+int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
+ uint16_t dpcon_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
+ struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
+ uint8_t flow_id = dpaa2_ethq->flow_id;
+ struct dpni_queue cfg;
+ uint8_t options;
+ int ret;
+
+ if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
+ dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
+ else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
+ dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
+ else
+ return -EINVAL;
+
+ memset(&cfg, 0, sizeof(struct dpni_queue));
+ options = DPNI_QUEUE_OPT_DEST;
+ cfg.destination.type = DPNI_DEST_DPCON;
+ cfg.destination.id = dpcon_id;
+ cfg.destination.priority = queue_conf->ev.priority;
+
+ if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
+ cfg.destination.hold_active = 1;
+ }
+
+ options |= DPNI_QUEUE_OPT_USER_CTX;
+ cfg.user_context = (size_t)(dpaa2_ethq);
+
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
+ dpaa2_ethq->tc_index, flow_id, options, &cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
+ return ret;
+ }
+
+ memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
+
+ return 0;
+}
+
+int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id)
+{
+ struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
+ struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
+ uint8_t flow_id = dpaa2_ethq->flow_id;
+ struct dpni_queue cfg;
+ uint8_t options;
+ int ret;
+
+ memset(&cfg, 0, sizeof(struct dpni_queue));
+ options = DPNI_QUEUE_OPT_DEST;
+ cfg.destination.type = DPNI_DEST_NONE;
+
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
+ dpaa2_ethq->tc_index, flow_id, options, &cfg);
+ if (ret)
+ DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
+
+ return ret;
+}
+
+static struct eth_dev_ops dpaa2_ethdev_ops = {
+ .dev_configure = dpaa2_eth_dev_configure,
+ .dev_start = dpaa2_dev_start,
+ .dev_stop = dpaa2_dev_stop,
+ .dev_close = dpaa2_dev_close,
+ .promiscuous_enable = dpaa2_dev_promiscuous_enable,
+ .promiscuous_disable = dpaa2_dev_promiscuous_disable,
+ .allmulticast_enable = dpaa2_dev_allmulticast_enable,
+ .allmulticast_disable = dpaa2_dev_allmulticast_disable,
+ .dev_set_link_up = dpaa2_dev_set_link_up,
+ .dev_set_link_down = dpaa2_dev_set_link_down,
+ .link_update = dpaa2_dev_link_update,
+ .stats_get = dpaa2_dev_stats_get,
+ .xstats_get = dpaa2_dev_xstats_get,
+ .xstats_get_by_id = dpaa2_xstats_get_by_id,
+ .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
+ .xstats_get_names = dpaa2_xstats_get_names,
+ .stats_reset = dpaa2_dev_stats_reset,
+ .xstats_reset = dpaa2_dev_stats_reset,
+ .fw_version_get = dpaa2_fw_version_get,
+ .dev_infos_get = dpaa2_dev_info_get,
+ .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
+ .mtu_set = dpaa2_dev_mtu_set,
+ .vlan_filter_set = dpaa2_vlan_filter_set,
+ .vlan_offload_set = dpaa2_vlan_offload_set,
+ .rx_queue_setup = dpaa2_dev_rx_queue_setup,
+ .rx_queue_release = dpaa2_dev_rx_queue_release,
+ .tx_queue_setup = dpaa2_dev_tx_queue_setup,
+ .tx_queue_release = dpaa2_dev_tx_queue_release,
+ .rx_queue_count = dpaa2_dev_rx_queue_count,
+ .flow_ctrl_get = dpaa2_flow_ctrl_get,
+ .flow_ctrl_set = dpaa2_flow_ctrl_set,
+ .mac_addr_add = dpaa2_dev_add_mac_addr,
+ .mac_addr_remove = dpaa2_dev_remove_mac_addr,
+ .mac_addr_set = dpaa2_dev_set_mac_addr,
+ .rss_hash_update = dpaa2_dev_rss_hash_update,
+ .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get,
+};
+
+static int
+dpaa2_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_device *dev = eth_dev->device;
+ struct rte_dpaa2_device *dpaa2_dev;
+ struct fsl_mc_io *dpni_dev;
+ struct dpni_attr attr;
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct dpni_buffer_layout layout;
+ int ret, hw_id;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
+
+ hw_id = dpaa2_dev->object_id;
+
+ dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
+ if (!dpni_dev) {
+ DPAA2_PMD_ERR("Memory allocation failed for dpni device");
+ return -1;
+ }
+
+ dpni_dev->regs = rte_mcp_ptr_list[0];
+ ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Failure in opening dpni@%d with err code %d",
+ hw_id, ret);
+ rte_free(dpni_dev);
+ return -1;
+ }
+
+ /* Clean the device first */
+ ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
+ hw_id, ret);
+ goto init_err;
+ }
+
+ ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Failure in get dpni@%d attribute, err code %d",
+ hw_id, ret);
+ goto init_err;
+ }
+
+ priv->num_rx_tc = attr.num_rx_tcs;
+
+ /* Resetting the "num_rx_queues" to equal number of queues in first TC
+ * as only one TC is supported on Rx Side. Once Multiple TCs will be
+ * in use for Rx processing then this will be changed or removed.
+ */
+ priv->nb_rx_queues = attr.num_queues;
+
+ /* Using number of TX queues as number of TX TCs */
+ priv->nb_tx_queues = attr.num_tx_tcs;
+
+ DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
+ priv->num_rx_tc, priv->nb_rx_queues,
+ priv->nb_tx_queues);
+
+ priv->hw = dpni_dev;
+ priv->hw_id = hw_id;
+ priv->options = attr.options;
+ priv->max_mac_filters = attr.mac_filter_entries;
+ priv->max_vlan_filters = attr.vlan_filter_entries;
+ priv->flags = 0;
+
+ /* Allocate memory for hardware structure for queues */
+ ret = dpaa2_alloc_rx_tx_queues(eth_dev);
+ if (ret) {
+ DPAA2_PMD_ERR("Queue allocation Failed");
+ goto init_err;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("dpni",
+ ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ DPAA2_PMD_ERR(
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * attr.mac_filter_entries);
+ ret = -ENOMEM;
+ goto init_err;
+ }
+
+ ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
+ priv->token,
+ (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
+ if (ret) {
+ DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d",
+ ret);
+ goto init_err;
+ }
+
+ /* ... tx buffer layout ... */
+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ layout.pass_frame_status = 1;
+ ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX, &layout);
+ if (ret) {
+ DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
+ goto init_err;
+ }
+
+ /* ... tx-conf and error buffer layout ... */
+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ layout.pass_frame_status = 1;
+ ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX_CONFIRM, &layout);
+ if (ret) {
+ DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
+ ret);
+ goto init_err;
+ }
+
+ eth_dev->dev_ops = &dpaa2_ethdev_ops;
+
+ eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
+ eth_dev->tx_pkt_burst = dpaa2_dev_tx;
+
+ RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
+ return 0;
+init_err:
+ dpaa2_dev_uninit(eth_dev);
+ return ret;
+}
+
+static int
+dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int i, ret;
+ struct dpaa2_queue *dpaa2_q;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (!dpni) {
+ DPAA2_PMD_WARN("Already closed or not started");
+ return -1;
+ }
+
+ dpaa2_dev_close(eth_dev);
+
+ if (priv->rx_vq[0]) {
+ /* cleaning up queue storage */
+ for (i = 0; i < priv->nb_rx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
+ if (dpaa2_q->q_storage)
+ rte_free(dpaa2_q->q_storage);
+ }
+ /*free the all queue memory */
+ rte_free(priv->rx_vq[0]);
+ priv->rx_vq[0] = NULL;
+ }
+
+ /* free memory for storing MAC addresses */
+ if (eth_dev->data->mac_addrs) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ }
+
+ /* Close the device at underlying layer*/
+ ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Failure closing dpni device with err code %d",
+ ret);
+ }
+
+ /* Free the allocated memory for ethernet private data and dpni*/
+ priv->hw = NULL;
+ rte_free(dpni);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
+ return 0;
+}
+
+static int
+rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int diag;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
+ if (!eth_dev)
+ return -ENODEV;
+ eth_dev->data->dev_private = rte_zmalloc(
+ "ethdev private structure",
+ sizeof(struct dpaa2_dev_priv),
+ RTE_CACHE_LINE_SIZE);
+ if (eth_dev->data->dev_private == NULL) {
+ DPAA2_PMD_CRIT(
+ "Unable to allocate memory for private data");
+ rte_eth_dev_release_port(eth_dev);
+ return -ENOMEM;
+ }
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
+ if (!eth_dev)
+ return -ENODEV;
+ }
+
+ eth_dev->device = &dpaa2_dev->device;
+ eth_dev->device->driver = &dpaa2_drv->driver;
+
+ dpaa2_dev->eth_dev = eth_dev;
+ eth_dev->data->rx_mbuf_alloc_failed = 0;
+
+ if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+
+ /* Invoke PMD device initialization function */
+ diag = dpaa2_dev_init(eth_dev);
+ if (diag == 0) {
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+ rte_eth_dev_release_port(eth_dev);
+ return diag;
+}
+
+static int
+rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_eth_dev *eth_dev;
+
+ eth_dev = dpaa2_dev->eth_dev;
+ dpaa2_dev_uninit(eth_dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_pmd = {
+ .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
+ .drv_type = DPAA2_ETH,
+ .probe = rte_dpaa2_probe,
+ .remove = rte_dpaa2_remove,
+};
+
+RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
+
+RTE_INIT(dpaa2_pmd_init_log)
+{
+ dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
+ if (dpaa2_logtype_pmd >= 0)
+ rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h
new file mode 100644
index 00000000..bd69f523
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _DPAA2_ETHDEV_H
+#define _DPAA2_ETHDEV_H
+
+#include <rte_event_eth_rx_adapter.h>
+
+#include <mc/fsl_dpni.h>
+#include <mc/fsl_mc_sys.h>
+
+#define DPAA2_MIN_RX_BUF_SIZE 512
+#define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/
+
+#define MAX_TCS DPNI_MAX_TC
+#define MAX_RX_QUEUES 16
+#define MAX_TX_QUEUES 16
+
+/*default tc to be used for ,congestion, distribution etc configuration. */
+#define DPAA2_DEF_TC 0
+
+/* Threshold for a Tx queue to *Enter* Congestion state.
+ */
+#define CONG_ENTER_TX_THRESHOLD 512
+
+/* Threshold for a queue to *Exit* Congestion state.
+ */
+#define CONG_EXIT_TX_THRESHOLD 480
+
+#define CONG_RETRY_COUNT 18000
+
+/* RX queue tail drop threshold
+ * currently considering 32 KB packets
+ */
+#define CONG_THRESHOLD_RX_Q (64 * 1024)
+#define CONG_RX_OAL 128
+
+/* Size of the input SMMU mapped memory required by MC */
+#define DIST_PARAM_IOVA_SIZE 256
+
+/* Enable TX Congestion control support
+ * default is disable
+ */
+#define DPAA2_TX_CGR_OFF 0x01
+
+/* Disable RX tail drop, default is enable */
+#define DPAA2_RX_TAILDROP_OFF 0x04
+
+#define DPAA2_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IP | \
+ ETH_RSS_UDP | \
+ ETH_RSS_TCP | \
+ ETH_RSS_SCTP)
+
+/* LX2 FRC Parsed values (Little Endian) */
+#define DPAA2_PKT_TYPE_ETHER 0x0060
+#define DPAA2_PKT_TYPE_IPV4 0x0000
+#define DPAA2_PKT_TYPE_IPV6 0x0020
+#define DPAA2_PKT_TYPE_IPV4_EXT \
+ (0x0001 | DPAA2_PKT_TYPE_IPV4)
+#define DPAA2_PKT_TYPE_IPV6_EXT \
+ (0x0001 | DPAA2_PKT_TYPE_IPV6)
+#define DPAA2_PKT_TYPE_IPV4_TCP \
+ (0x000e | DPAA2_PKT_TYPE_IPV4)
+#define DPAA2_PKT_TYPE_IPV6_TCP \
+ (0x000e | DPAA2_PKT_TYPE_IPV6)
+#define DPAA2_PKT_TYPE_IPV4_UDP \
+ (0x0010 | DPAA2_PKT_TYPE_IPV4)
+#define DPAA2_PKT_TYPE_IPV6_UDP \
+ (0x0010 | DPAA2_PKT_TYPE_IPV6)
+#define DPAA2_PKT_TYPE_IPV4_SCTP \
+ (0x000f | DPAA2_PKT_TYPE_IPV4)
+#define DPAA2_PKT_TYPE_IPV6_SCTP \
+ (0x000f | DPAA2_PKT_TYPE_IPV6)
+#define DPAA2_PKT_TYPE_IPV4_ICMP \
+ (0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
+#define DPAA2_PKT_TYPE_IPV6_ICMP \
+ (0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
+#define DPAA2_PKT_TYPE_VLAN_1 0x0160
+#define DPAA2_PKT_TYPE_VLAN_2 0x0260
+
+struct dpaa2_dev_priv {
+ void *hw;
+ int32_t hw_id;
+ int32_t qdid;
+ uint16_t token;
+ uint8_t nb_tx_queues;
+ uint8_t nb_rx_queues;
+ void *rx_vq[MAX_RX_QUEUES];
+ void *tx_vq[MAX_TX_QUEUES];
+
+ struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
+ uint32_t options;
+ uint8_t max_mac_filters;
+ uint8_t max_vlan_filters;
+ uint8_t num_rx_tc;
+ uint8_t flags; /*dpaa2 config flags */
+};
+
+int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
+ uint64_t req_dist_set);
+
+int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
+ uint8_t tc_index);
+
+int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
+
+int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
+ uint16_t dpcon_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+
+int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id);
+
+uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts);
+void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev);
+void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev);
+uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+#endif /* _DPAA2_ETHDEV_H */
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h
new file mode 100644
index 00000000..c04babdb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h
@@ -0,0 +1,41 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef _DPAA2_PMD_LOGS_H_
+#define _DPAA2_PMD_LOGS_H_
+
+extern int dpaa2_logtype_pmd;
+
+#define DPAA2_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_pmd, "dpaa2_net: " \
+ fmt "\n", ##args)
+
+#define DPAA2_PMD_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_pmd, "dpaa2_net: %s(): "\
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA2_PMD_DEBUG(">>")
+
+#define DPAA2_PMD_CRIT(fmt, args...) \
+ DPAA2_PMD_LOG(CRIT, fmt, ## args)
+#define DPAA2_PMD_INFO(fmt, args...) \
+ DPAA2_PMD_LOG(INFO, fmt, ## args)
+#define DPAA2_PMD_ERR(fmt, args...) \
+ DPAA2_PMD_LOG(ERR, fmt, ## args)
+#define DPAA2_PMD_WARN(fmt, args...) \
+ DPAA2_PMD_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_PMD_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_PMD_DP_DEBUG(fmt, args...) \
+ DPAA2_PMD_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_PMD_DP_INFO(fmt, args...) \
+ DPAA2_PMD_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_PMD_DP_WARN(fmt, args...) \
+ DPAA2_PMD_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAA2_PMD_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c
new file mode 100644
index 00000000..ef109a62
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -0,0 +1,842 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_dev.h>
+
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+#include <dpaa2_hw_mempool.h>
+
+#include "dpaa2_pmd_logs.h"
+#include "dpaa2_ethdev.h"
+#include "base/dpaa2_hw_dpni_annot.h"
+
+#define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
+ DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
+ DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
+ DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
+ DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
+ DPAA2_SET_FD_ASAL(_fd, DPAA2_ASAL_VAL); \
+} while (0)
+
+static inline void __attribute__((hot))
+dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
+{
+ DPAA2_PMD_DP_DEBUG("frc = 0x%x\t", frc);
+
+ m->packet_type = RTE_PTYPE_UNKNOWN;
+ switch (frc) {
+ case DPAA2_PKT_TYPE_ETHER:
+ m->packet_type = RTE_PTYPE_L2_ETHER;
+ break;
+ case DPAA2_PKT_TYPE_IPV4:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4;
+ break;
+ case DPAA2_PKT_TYPE_IPV6:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6;
+ break;
+ case DPAA2_PKT_TYPE_IPV4_EXT:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT;
+ break;
+ case DPAA2_PKT_TYPE_IPV6_EXT:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT;
+ break;
+ case DPAA2_PKT_TYPE_IPV4_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA2_PKT_TYPE_IPV6_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA2_PKT_TYPE_IPV4_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA2_PKT_TYPE_IPV6_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA2_PKT_TYPE_IPV4_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
+ break;
+ case DPAA2_PKT_TYPE_IPV6_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
+ break;
+ case DPAA2_PKT_TYPE_IPV4_ICMP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
+ break;
+ case DPAA2_PKT_TYPE_IPV6_ICMP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
+ break;
+ case DPAA2_PKT_TYPE_VLAN_1:
+ case DPAA2_PKT_TYPE_VLAN_2:
+ m->ol_flags |= PKT_RX_VLAN;
+ break;
+ /* More switch cases can be added */
+ /* TODO: Add handling for checksum error check from FRC */
+ default:
+ m->packet_type = RTE_PTYPE_UNKNOWN;
+ }
+}
+
+static inline uint32_t __attribute__((hot))
+dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation)
+{
+ uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
+
+ DPAA2_PMD_DP_DEBUG("(slow parse) Annotation = 0x%" PRIx64 "\t",
+ annotation->word4);
+ if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
+ pkt_type = RTE_PTYPE_L2_ETHER_ARP;
+ goto parse_done;
+ } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
+ pkt_type = RTE_PTYPE_L2_ETHER;
+ } else {
+ goto parse_done;
+ }
+
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
+ L3_IPV4_N_PRESENT)) {
+ pkt_type |= RTE_PTYPE_L3_IPV4;
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
+ L3_IP_N_OPT_PRESENT))
+ pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
+
+ } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
+ L3_IPV6_N_PRESENT)) {
+ pkt_type |= RTE_PTYPE_L3_IPV6;
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
+ L3_IP_N_OPT_PRESENT))
+ pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
+ } else {
+ goto parse_done;
+ }
+
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
+ L3_IP_1_MORE_FRAGMENT |
+ L3_IP_N_FIRST_FRAGMENT |
+ L3_IP_N_MORE_FRAGMENT)) {
+ pkt_type |= RTE_PTYPE_L4_FRAG;
+ goto parse_done;
+ } else {
+ pkt_type |= RTE_PTYPE_L4_NONFRAG;
+ }
+
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
+ pkt_type |= RTE_PTYPE_L4_UDP;
+
+ else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
+ pkt_type |= RTE_PTYPE_L4_TCP;
+
+ else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
+ pkt_type |= RTE_PTYPE_L4_SCTP;
+
+ else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
+ pkt_type |= RTE_PTYPE_L4_ICMP;
+
+ else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
+ pkt_type |= RTE_PTYPE_UNKNOWN;
+
+parse_done:
+ return pkt_type;
+}
+
+static inline uint32_t __attribute__((hot))
+dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
+{
+ struct dpaa2_annot_hdr *annotation =
+ (struct dpaa2_annot_hdr *)hw_annot_addr;
+
+ DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
+ annotation->word4);
+
+ /* Check offloads first */
+ if (BIT_ISSET_AT_POS(annotation->word3,
+ L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
+ mbuf->ol_flags |= PKT_RX_VLAN;
+
+ if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+
+ /* Return some common types from parse processing */
+ switch (annotation->word4) {
+ case DPAA2_L3_IPv4:
+ return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
+ case DPAA2_L3_IPv6:
+ return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
+ case DPAA2_L3_IPv4_TCP:
+ return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_TCP;
+ case DPAA2_L3_IPv4_UDP:
+ return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_UDP;
+ case DPAA2_L3_IPv6_TCP:
+ return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_TCP;
+ case DPAA2_L3_IPv6_UDP:
+ return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_UDP;
+ default:
+ break;
+ }
+
+ return dpaa2_dev_rx_parse_slow(annotation);
+}
+
+static inline struct rte_mbuf *__attribute__((hot))
+eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
+{
+ struct qbman_sge *sgt, *sge;
+ size_t sg_addr, fd_addr;
+ int i = 0;
+ struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
+
+ fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+ /* Get Scatter gather table address */
+ sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
+
+ sge = &sgt[i++];
+ sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
+
+ /* First Scatter gather entry */
+ first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+ /* Prepare all the metadata for first segment */
+ first_seg->buf_addr = (uint8_t *)sg_addr;
+ first_seg->ol_flags = 0;
+ first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
+ first_seg->data_len = sge->length & 0x1FFFF;
+ first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
+ first_seg->nb_segs = 1;
+ first_seg->next = NULL;
+ if (dpaa2_svr_family == SVR_LX2160A)
+ dpaa2_dev_rx_parse_frc(first_seg,
+ DPAA2_GET_FD_FRC_PARSE_SUM(fd));
+ else
+ first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
+ (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_FD_PTA_SIZE));
+
+ rte_mbuf_refcnt_set(first_seg, 1);
+ cur_seg = first_seg;
+ while (!DPAA2_SG_IS_FINAL(sge)) {
+ sge = &sgt[i++];
+ sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FLE_ADDR(sge));
+ next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
+ next_seg->buf_addr = (uint8_t *)sg_addr;
+ next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
+ next_seg->data_len = sge->length & 0x1FFFF;
+ first_seg->nb_segs += 1;
+ rte_mbuf_refcnt_set(next_seg, 1);
+ cur_seg->next = next_seg;
+ next_seg->next = NULL;
+ cur_seg = next_seg;
+ }
+ temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+ rte_mbuf_refcnt_set(temp, 1);
+ rte_pktmbuf_free_seg(temp);
+
+ return (void *)first_seg;
+}
+
+static inline struct rte_mbuf *__attribute__((hot))
+eth_fd_to_mbuf(const struct qbman_fd *fd)
+{
+ struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+ /* need to repopulated some of the fields,
+ * as they may have changed in last transmission
+ */
+ mbuf->nb_segs = 1;
+ mbuf->ol_flags = 0;
+ mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
+ mbuf->data_len = DPAA2_GET_FD_LEN(fd);
+ mbuf->pkt_len = mbuf->data_len;
+ mbuf->next = NULL;
+ rte_mbuf_refcnt_set(mbuf, 1);
+
+ /* Parse the packet */
+ /* parse results for LX2 are there in FRC field of FD.
+ * For other DPAA2 platforms , parse results are after
+ * the private - sw annotation area
+ */
+
+ if (dpaa2_svr_family == SVR_LX2160A)
+ dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
+ else
+ mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
+ (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_FD_PTA_SIZE));
+
+ DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
+ "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
+ mbuf, mbuf->buf_addr, mbuf->data_off,
+ DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
+
+ return mbuf;
+}
+
+static int __attribute__ ((noinline)) __attribute__((hot))
+eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
+ struct qbman_sge *sgt, *sge = NULL;
+ int i;
+
+ temp = rte_pktmbuf_alloc(mbuf->pool);
+ if (temp == NULL) {
+ DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
+ return -ENOMEM;
+ }
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
+ DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
+ DPAA2_SET_ONLY_FD_BPID(fd, bpid);
+ DPAA2_SET_FD_OFFSET(fd, temp->data_off);
+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
+ DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
+ /*Set Scatter gather table and Scatter gather entries*/
+ sgt = (struct qbman_sge *)(
+ (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_GET_FD_OFFSET(fd));
+
+ for (i = 0; i < mbuf->nb_segs; i++) {
+ sge = &sgt[i];
+ /*Resetting the buffer pool id and offset field*/
+ sge->fin_bpid_offset = 0;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
+ DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
+ sge->length = cur_seg->data_len;
+ if (RTE_MBUF_DIRECT(cur_seg)) {
+ if (rte_mbuf_refcnt_read(cur_seg) > 1) {
+ /* If refcnt > 1, invalid bpid is set to ensure
+ * buffer is not freed by HW
+ */
+ DPAA2_SET_FLE_IVP(sge);
+ rte_mbuf_refcnt_update(cur_seg, -1);
+ } else
+ DPAA2_SET_FLE_BPID(sge,
+ mempool_to_bpid(cur_seg->pool));
+ cur_seg = cur_seg->next;
+ } else {
+ /* Get owner MBUF from indirect buffer */
+ mi = rte_mbuf_from_indirect(cur_seg);
+ if (rte_mbuf_refcnt_read(mi) > 1) {
+ /* If refcnt > 1, invalid bpid is set to ensure
+ * owner buffer is not freed by HW
+ */
+ DPAA2_SET_FLE_IVP(sge);
+ } else {
+ DPAA2_SET_FLE_BPID(sge,
+ mempool_to_bpid(mi->pool));
+ rte_mbuf_refcnt_update(mi, 1);
+ }
+ prev_seg = cur_seg;
+ cur_seg = cur_seg->next;
+ prev_seg->next = NULL;
+ rte_pktmbuf_free(prev_seg);
+ }
+ }
+ DPAA2_SG_SET_FINAL(sge, true);
+ return 0;
+}
+
+static void
+eth_mbuf_to_fd(struct rte_mbuf *mbuf,
+ struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
+
+static void __attribute__ ((noinline)) __attribute__((hot))
+eth_mbuf_to_fd(struct rte_mbuf *mbuf,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
+
+ DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
+ "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
+ mbuf, mbuf->buf_addr, mbuf->data_off,
+ DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
+ if (RTE_MBUF_DIRECT(mbuf)) {
+ if (rte_mbuf_refcnt_read(mbuf) > 1) {
+ DPAA2_SET_FD_IVP(fd);
+ rte_mbuf_refcnt_update(mbuf, -1);
+ }
+ } else {
+ struct rte_mbuf *mi;
+
+ mi = rte_mbuf_from_indirect(mbuf);
+ if (rte_mbuf_refcnt_read(mi) > 1)
+ DPAA2_SET_FD_IVP(fd);
+ else
+ rte_mbuf_refcnt_update(mi, 1);
+ rte_pktmbuf_free(mbuf);
+ }
+}
+
+static inline int __attribute__((hot))
+eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_mbuf *m;
+ void *mb = NULL;
+
+ if (rte_dpaa2_mbuf_alloc_bulk(
+ rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
+ DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
+ return -1;
+ }
+ m = (struct rte_mbuf *)mb;
+ memcpy((char *)m->buf_addr + mbuf->data_off,
+ (void *)((char *)mbuf->buf_addr + mbuf->data_off),
+ mbuf->pkt_len);
+
+ /* Copy required fields */
+ m->data_off = mbuf->data_off;
+ m->ol_flags = mbuf->ol_flags;
+ m->packet_type = mbuf->packet_type;
+ m->tx_offload = mbuf->tx_offload;
+
+ DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
+
+ DPAA2_PMD_DP_DEBUG(
+ "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
+ " meta: %d, off: %d, len: %d\n",
+ (void *)mbuf,
+ mbuf->buf_addr,
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+
+return 0;
+}
+
+/* This function assumes that caller will be keep the same value for nb_pkts
+ * across calls per queue, if that is not the case, better use non-prefetch
+ * version of rx call.
+ * It will return the packets as requested in previous call without honoring
+ * the current nb_pkts or bufs space.
+ */
+uint16_t
+dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function receive frames for a given device and VQ*/
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct qbman_result *dq_storage, *dq_storage1 = NULL;
+ uint32_t fqid = dpaa2_q->fqid;
+ int ret, num_rx = 0, pull_size;
+ uint8_t pending, status;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd, *next_fd;
+ struct qbman_pull_desc pulldesc;
+ struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+ struct rte_eth_dev *dev = dpaa2_q->dev;
+
+ if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
+ ret = dpaa2_affine_qbman_ethrx_swp();
+ if (ret) {
+ DPAA2_PMD_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
+ pull_size = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
+ DPAA2_DQRR_RING_SIZE : nb_pkts;
+ if (unlikely(!q_storage->active_dqs)) {
+ q_storage->toggle = 0;
+ dq_storage = q_storage->dq_storage[q_storage->toggle];
+ q_storage->last_num_pkts = pull_size;
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc,
+ q_storage->last_num_pkts);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
+ while (!qbman_check_command_complete(
+ get_swp_active_dqs(
+ DPAA2_PER_LCORE_ETHRX_DPIO->index)))
+ ;
+ clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
+ }
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
+ " QBMAN is busy (1)\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+ q_storage->active_dqs = dq_storage;
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
+ dq_storage);
+ }
+
+ dq_storage = q_storage->active_dqs;
+ rte_prefetch0((void *)(size_t)(dq_storage));
+ rte_prefetch0((void *)(size_t)(dq_storage + 1));
+
+ /* Prepare next pull descriptor. This will give space for the
+ * prefething done on DQRR entries
+ */
+ q_storage->toggle ^= 1;
+ dq_storage1 = q_storage->dq_storage[q_storage->toggle];
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc, pull_size);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+
+ /* Check if the previous issued command is completed.
+ * Also seems like the SWP is shared between the Ethernet Driver
+ * and the SEC driver.
+ */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+ if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
+ clear_swp_active_dqs(q_storage->active_dpio_id);
+
+ pending = 1;
+
+ do {
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+ rte_prefetch0((void *)((size_t)(dq_storage + 2)));
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ pending = 0;
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
+ continue;
+ }
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ next_fd = qbman_result_DQ_fd(dq_storage + 1);
+ /* Prefetch Annotation address for the parse results */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd)
+ + DPAA2_FD_PTA_SIZE + 16));
+
+ if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
+ bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
+ else
+ bufs[num_rx] = eth_fd_to_mbuf(fd);
+ bufs[num_rx]->port = dev->data->port_id;
+
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rte_vlan_strip(bufs[num_rx]);
+
+ dq_storage++;
+ num_rx++;
+ } while (pending);
+
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
+ while (!qbman_check_command_complete(
+ get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
+ ;
+ clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
+ }
+ /* issue a volatile dequeue command for next pull */
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
+ "QBMAN is busy (2)\n");
+ continue;
+ }
+ break;
+ }
+ q_storage->active_dqs = dq_storage1;
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
+
+ dpaa2_q->rx_pkts += num_rx;
+
+ return num_rx;
+}
+
+void __attribute__((hot))
+dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
+ DPAA2_FD_PTA_SIZE + 16));
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+
+ ev->mbuf = eth_fd_to_mbuf(fd);
+
+ qbman_swp_dqrr_consume(swp, dq);
+}
+
+void __attribute__((hot))
+dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ uint8_t dqrr_index;
+
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
+ DPAA2_FD_PTA_SIZE + 16));
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+
+ ev->mbuf = eth_fd_to_mbuf(fd);
+
+ dqrr_index = qbman_get_dqrr_idx(dq);
+ ev->mbuf->seqn = dqrr_index + 1;
+ DPAA2_PER_LCORE_DQRR_SIZE++;
+ DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+ DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
+}
+
+/*
+ * Callback to handle sending packets through WRIOP based interface
+ */
+uint16_t
+dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function to transmit the frames to given device and VQ*/
+ uint32_t loop, retry_count;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ struct rte_mbuf *mi;
+ uint32_t frames_to_send;
+ struct rte_mempool *mp;
+ struct qbman_eq_desc eqdesc;
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct qbman_swp *swp;
+ uint16_t num_tx = 0;
+ uint16_t bpid;
+ struct rte_eth_dev *dev = dpaa2_q->dev;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ uint32_t flags[MAX_TX_RING_SLOTS] = {0};
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_PMD_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ DPAA2_PMD_DP_DEBUG("===> dev =%p, fqid =%d\n", dev, dpaa2_q->fqid);
+
+ /*Prepare enqueue descriptor*/
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+ qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
+ dpaa2_q->flow_id, dpaa2_q->tc_index);
+ /*Clear the unused FD fields before sending*/
+ while (nb_pkts) {
+ /*Check if the queue is congested*/
+ retry_count = 0;
+ while (qbman_result_SCN_state(dpaa2_q->cscn)) {
+ retry_count++;
+ /* Retry for some time before giving up */
+ if (retry_count > CONG_RETRY_COUNT)
+ goto skip_tx;
+ }
+
+ frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ if ((*bufs)->seqn) {
+ uint8_t dqrr_index = (*bufs)->seqn - 1;
+
+ flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
+ dqrr_index;
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
+ (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
+ }
+
+ fd_arr[loop].simple.frc = 0;
+ DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
+ DPAA2_SET_FD_FLC((&fd_arr[loop]), (size_t)NULL);
+ if (likely(RTE_MBUF_DIRECT(*bufs))) {
+ mp = (*bufs)->pool;
+ /* Check the basic scenario and set
+ * the FD appropriately here itself.
+ */
+ if (likely(mp && mp->ops_index ==
+ priv->bp_list->dpaa2_ops_index &&
+ (*bufs)->nb_segs == 1 &&
+ rte_mbuf_refcnt_read((*bufs)) == 1)) {
+ if (unlikely(((*bufs)->ol_flags
+ & PKT_TX_VLAN_PKT) ||
+ (dev->data->dev_conf.txmode.offloads
+ & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+ ret = rte_vlan_insert(bufs);
+ if (ret)
+ goto send_n_return;
+ }
+ DPAA2_MBUF_TO_CONTIG_FD((*bufs),
+ &fd_arr[loop], mempool_to_bpid(mp));
+ bufs++;
+ continue;
+ }
+ } else {
+ mi = rte_mbuf_from_indirect(*bufs);
+ mp = mi->pool;
+ }
+ /* Not a hw_pkt pool allocated frame */
+ if (unlikely(!mp || !priv->bp_list)) {
+ DPAA2_PMD_ERR("Err: No buffer pool attached");
+ goto send_n_return;
+ }
+
+ if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
+ (dev->data->dev_conf.txmode.offloads
+ & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+ int ret = rte_vlan_insert(bufs);
+ if (ret)
+ goto send_n_return;
+ }
+ if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
+ DPAA2_PMD_WARN("Non DPAA2 buffer pool");
+ /* alloc should be from the default buffer pool
+ * attached to this interface
+ */
+ bpid = priv->bp_list->buf_pool.bpid;
+
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ DPAA2_PMD_ERR("S/G support not added"
+ " for non hw offload buffer");
+ goto send_n_return;
+ }
+ if (eth_copy_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid)) {
+ goto send_n_return;
+ }
+ /* free the original packet */
+ rte_pktmbuf_free(*bufs);
+ } else {
+ bpid = mempool_to_bpid(mp);
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ if (eth_mbuf_to_sg_fd(*bufs,
+ &fd_arr[loop], bpid))
+ goto send_n_return;
+ } else {
+ eth_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid);
+ }
+ }
+ bufs++;
+ }
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
+ &fd_arr[loop], &flags[loop],
+ frames_to_send - loop);
+ }
+
+ num_tx += frames_to_send;
+ nb_pkts -= frames_to_send;
+ }
+ dpaa2_q->tx_pkts += num_tx;
+ return num_tx;
+
+send_n_return:
+ /* send any already prepared fd */
+ if (loop) {
+ unsigned int i = 0;
+
+ while (i < loop) {
+ i += qbman_swp_enqueue_multiple(swp, &eqdesc,
+ &fd_arr[i],
+ &flags[loop],
+ loop - i);
+ }
+ num_tx += loop;
+ }
+skip_tx:
+ dpaa2_q->tx_pkts += num_tx;
+ return num_tx;
+}
+
+/**
+ * Dummy DPDK callback for TX.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ (void)queue;
+ (void)bufs;
+ (void)nb_pkts;
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c
new file mode 100644
index 00000000..80f94f40
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2017 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpkg.h>
+
+/**
+ * dpkg_prepare_key_cfg() - function prepare extract parameters
+ * @cfg: defining a full Key Generation profile (rule)
+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before the following functions:
+ * - dpni_set_rx_tc_dist()
+ * - dpni_set_qos_table()
+ * - dpkg_prepare_key_cfg()
+ */
+int
+dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
+{
+ int i, j;
+ struct dpni_ext_set_rx_tc_dist *dpni_ext;
+ struct dpni_dist_extract *extr;
+
+ if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
+ return -EINVAL;
+
+ dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
+ dpni_ext->num_extracts = cfg->num_extracts;
+
+ for (i = 0; i < cfg->num_extracts; i++) {
+ extr = &dpni_ext->extracts[i];
+
+ switch (cfg->extracts[i].type) {
+ case DPKG_EXTRACT_FROM_HDR:
+ extr->prot = cfg->extracts[i].extract.from_hdr.prot;
+ dpkg_set_field(extr->efh_type, EFH_TYPE,
+ cfg->extracts[i].extract.from_hdr.type);
+ extr->size = cfg->extracts[i].extract.from_hdr.size;
+ extr->offset = cfg->extracts[i].extract.from_hdr.offset;
+ extr->field = cpu_to_le32(
+ cfg->extracts[i].extract.from_hdr.field);
+ extr->hdr_index =
+ cfg->extracts[i].extract.from_hdr.hdr_index;
+ break;
+ case DPKG_EXTRACT_FROM_DATA:
+ extr->size = cfg->extracts[i].extract.from_data.size;
+ extr->offset =
+ cfg->extracts[i].extract.from_data.offset;
+ break;
+ case DPKG_EXTRACT_FROM_PARSE:
+ extr->size = cfg->extracts[i].extract.from_parse.size;
+ extr->offset =
+ cfg->extracts[i].extract.from_parse.offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
+ dpkg_set_field(extr->extract_type, EXTRACT_TYPE,
+ cfg->extracts[i].type);
+
+ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
+ extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
+ extr->masks[j].offset =
+ cfg->extracts[i].masks[j].offset;
+ }
+ }
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c
new file mode 100644
index 00000000..9f228169
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c
@@ -0,0 +1,1943 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpni.h>
+#include <fsl_dpni_cmd.h>
+
+/**
+ * dpni_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpni_id: DPNI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpni_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpni_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_open *cmd_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpni_cmd_open *)cmd.params;
+ cmd_params->dpni_id = cpu_to_le32(dpni_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpni_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_create() - Create the DPNI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPNI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpni_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpni_cmd_create *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpni_cmd_create *)cmd.params;
+ cmd_params->options = cpu_to_le32(cfg->options);
+ cmd_params->num_queues = cfg->num_queues;
+ cmd_params->num_tcs = cfg->num_tcs;
+ cmd_params->mac_filter_entries = cfg->mac_filter_entries;
+ cmd_params->vlan_filter_entries = cfg->vlan_filter_entries;
+ cmd_params->qos_entries = cfg->qos_entries;
+ cmd_params->fs_entries = cpu_to_le16(cfg->fs_entries);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpni_destroy() - Destroy the DPNI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct dpni_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ /* set object id to destroy */
+ cmd_params = (struct dpni_cmd_destroy *)cmd.params;
+ cmd_params->dpsw_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_pools() - Set buffer pools configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Buffer pools configuration
+ *
+ * mandatory for DPNI operation
+ * warning:Allowed only when DPNI is disabled
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_pools_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_pools *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
+ cmd_params->num_dpbp = cfg->num_dpbp;
+ for (i = 0; i < cmd_params->num_dpbp; i++) {
+ cmd_params->pool[i].dpbp_id =
+ cpu_to_le16(cfg->pools[i].dpbp_id);
+ cmd_params->pool[i].priority_mask =
+ cfg->pools[i].priority_mask;
+ cmd_params->buffer_size[i] =
+ cpu_to_le16(cfg->pools[i].buffer_size);
+ cmd_params->backup_pool_mask |=
+ DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
+ }
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_is_enabled() - Check if the DPNI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_is_enabled *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_reset() - Reset the DPNI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state: - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_irq_enable() - Get overall interrupt state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @en: Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t *en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_enable *cmd_params;
+ struct dpni_rsp_get_irq_enable *rsp_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @mask: Event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t mask)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_irq_mask() - Get interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @mask: Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *mask)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_mask *cmd_params;
+ struct dpni_rsp_get_irq_mask *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
+ return 0;
+}
+
+/**
+ * dpni_get_irq_status() - Get the current status of any pending interrupts.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *status)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_status *cmd_params;
+ struct dpni_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dpni_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t status)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->status = cpu_to_le32(status);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_attributes() - Retrieve DPNI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_attr *rsp_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->num_queues = rsp_params->num_queues;
+ attr->num_rx_tcs = rsp_params->num_rx_tcs;
+ attr->num_tx_tcs = rsp_params->num_tx_tcs;
+ attr->mac_filter_entries = rsp_params->mac_filter_entries;
+ attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
+ attr->qos_entries = rsp_params->qos_entries;
+ attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
+ attr->qos_key_size = rsp_params->qos_key_size;
+ attr->fs_key_size = rsp_params->fs_key_size;
+ attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
+
+ return 0;
+}
+
+/**
+ * dpni_set_errors_behavior() - Set errors behavior
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Errors configuration
+ *
+ * This function may be called numerous times with different
+ * error masks
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_error_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_errors_behavior *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
+ cmd_params->errors = cpu_to_le32(cfg->errors);
+ dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
+ dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to retrieve configuration for
+ * @layout: Returns buffer layout attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_buffer_layout *cmd_params;
+ struct dpni_rsp_get_buffer_layout *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
+ cmd_params->qtype = qtype;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
+ layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
+ layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
+ layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
+ layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
+ layout->data_align = le16_to_cpu(rsp_params->data_align);
+ layout->data_head_room = le16_to_cpu(rsp_params->head_room);
+ layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
+
+ return 0;
+}
+
+/**
+ * dpni_set_buffer_layout() - Set buffer layout configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue this configuration applies to
+ * @layout: Buffer layout configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_buffer_layout *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->options = cpu_to_le16(layout->options);
+ dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
+ dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
+ dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
+ cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
+ cmd_params->data_align = cpu_to_le16(layout->data_align);
+ cmd_params->head_room = cpu_to_le16(layout->data_head_room);
+ cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_offload() - Set DPNI offload configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @type: Type of DPNI offload
+ * @config: Offload configuration.
+ * For checksum offloads, non-zero value enables the offload
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_offload type,
+ uint32_t config)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_offload *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
+ cmd_params->dpni_offload = type;
+ cmd_params->config = cpu_to_le32(config);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_offload() - Get DPNI offload configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @type: Type of DPNI offload
+ * @config: Offload configuration.
+ * For checksum offloads, a value of 1 indicates that the
+ * offload is enabled.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_offload type,
+ uint32_t *config)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_offload *cmd_params;
+ struct dpni_rsp_get_offload *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
+ cmd_params->dpni_offload = type;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
+ *config = le32_to_cpu(rsp_params->config);
+
+ return 0;
+}
+
+/**
+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
+ * for enqueue operations
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to receive QDID for
+ * @qdid: Returned virtual QDID value that should be used as an argument
+ * in all enqueue operations
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint16_t *qdid)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_qdid *cmd_params;
+ struct dpni_rsp_get_qdid *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
+ cmd_params->qtype = qtype;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
+ *qdid = le16_to_cpu(rsp_params->qdid);
+
+ return 0;
+}
+
+/**
+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @data_offset: Tx data offset (from start of buffer)
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *data_offset)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_tx_data_offset *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
+ *data_offset = le16_to_cpu(rsp_params->data_offset);
+
+ return 0;
+}
+
+/**
+ * dpni_set_link_cfg() - set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_link_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_link_cfg *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
+ cmd_params->rate = cpu_to_le32(cfg->rate);
+ cmd_params->options = cpu_to_le64(cfg->options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_link_state() - Return the link state (either up or down)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @state: Returned link state;
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_state *state)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_link_state *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
+ state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
+ state->rate = le32_to_cpu(rsp_params->rate);
+ state->options = le64_to_cpu(rsp_params->options);
+
+ return 0;
+}
+
+/**
+ * dpni_set_max_frame_length() - Set the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in bytes);
+ * frame is discarded if its length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t max_frame_length)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_max_frame_length *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
+ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_max_frame_length() - Get the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in bytes);
+ * frame is discarded if its length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *max_frame_length)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_max_frame_length *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
+ *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
+
+ return 0;
+}
+
+/**
+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_multicast_promisc *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_multicast_promisc *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_unicast_promisc *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_unicast_promisc *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_primary_mac_addr() - Set the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to set as primary address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_primary_mac_addr *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_primary_mac_addr() - Get the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: Returned MAC address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_primary_mac_addr *rsp_params;
+ int i, err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+ return 0;
+}
+
+/**
+ * dpni_add_mac_addr() - Add MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to add
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_add_mac_addr *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_mac_addr() - Remove MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_remove_mac_addr *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @unicast: Set to '1' to clear unicast addresses
+ * @multicast: Set to '1' to clear multicast addresses
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int unicast,
+ int multicast)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_clear_mac_filters *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
+ dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
+ dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
+ * port the DPNI is attached to
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address of the physical port, if any, otherwise 0
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_port_mac_addr *rsp_params;
+ int i, err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+ return 0;
+}
+
+/**
+ * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en)
+{
+ struct dpni_cmd_enable_vlan_filter *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_enable_vlan_filter *)cmd.params;
+ dpni_set_field(cmd_params->en, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_vlan_id() - Add VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to add
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_vlan_id() - Remove VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_vlan_filters() - Clear all VLAN filters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Traffic class distribution configuration
+ *
+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpkg_prepare_key_cfg()
+ * first to prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_rx_tc_dist *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ cmd_params->tc_id = tc_id;
+ cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+ dpni_set_field(cmd_params->flags,
+ DIST_MODE,
+ cfg->dist_mode);
+ dpni_set_field(cmd_params->flags,
+ MISS_ACTION,
+ cfg->fs_cfg.miss_action);
+ dpni_set_field(cmd_params->keep_hash_key,
+ KEEP_HASH_KEY,
+ cfg->fs_cfg.keep_hash_key);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_tx_confirmation_mode() - Tx confirmation mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mode: Tx confirmation mode
+ *
+ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not
+ * selected at DPNI creation.
+ * Calling this function with 'mode' set to DPNI_CONF_DISABLE disables all
+ * transmit confirmation (including the private confirmation queues), regardless
+ * of previous settings; Note that in this case, Tx error frames are still
+ * enqueued to the general transmit errors queue.
+ * Calling this function with 'mode' set to DPNI_CONF_SINGLE switches all
+ * Tx confirmations to a shared Tx conf queue. 'index' field in dpni_get_queue
+ * command will be ignored.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode mode)
+{
+ struct dpni_tx_confirmation_mode *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONFIRMATION_MODE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_tx_confirmation_mode *)cmd.params;
+ cmd_params->confirmation_mode = mode;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_congestion_notification() - Set traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ const struct dpni_congestion_notification_cfg *cfg)
+{
+ struct dpni_cmd_set_congestion_notification *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc_id;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+ dpni_set_field(cmd_params->type_units,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpni_set_field(cmd_params->type_units,
+ CONG_UNITS,
+ cfg->units);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_congestion_notification() - Get traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ struct dpni_congestion_notification_cfg *cfg)
+{
+ struct dpni_rsp_get_congestion_notification *rsp_params;
+ struct dpni_cmd_get_congestion_notification *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc_id;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
+ cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->dest_cfg.priority = rsp_params->dest_priority;
+ cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
+ DEST_TYPE);
+
+ return 0;
+}
+
+/**
+ * dpni_get_api_version() - Get Data Path Network Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path network interface API
+ * @minor_ver: Minor version of data path network interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct dpni_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpni_set_queue() - Set queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported, although
+ * the command is ignored for Tx
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated for the
+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
+ * @options: A combination of DPNI_QUEUE_OPT_ values that control what
+ * configuration options are set on the queue
+ * @queue: Queue structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ uint8_t options,
+ const struct dpni_queue *queue)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_queue *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+ cmd_params->options = options;
+ cmd_params->dest_id = cpu_to_le32(queue->destination.id);
+ cmd_params->dest_prio = queue->destination.priority;
+ dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
+ dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
+ dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
+ queue->destination.hold_active);
+ cmd_params->flc = cpu_to_le64(queue->flc.value);
+ cmd_params->user_context = cpu_to_le64(queue->user_context);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_queue() - Get queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated for the
+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
+ * @queue: Queue configuration structure
+ * @qid: Queue identification
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_queue *cmd_params;
+ struct dpni_rsp_get_queue *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
+ queue->destination.id = le32_to_cpu(rsp_params->dest_id);
+ queue->destination.priority = rsp_params->dest_prio;
+ queue->destination.type = dpni_get_field(rsp_params->flags,
+ DEST_TYPE);
+ queue->flc.stash_control = dpni_get_field(rsp_params->flags,
+ STASH_CTRL);
+ queue->destination.hold_active = dpni_get_field(rsp_params->flags,
+ HOLD_ACTIVE);
+ queue->flc.value = le64_to_cpu(rsp_params->flc);
+ queue->user_context = le64_to_cpu(rsp_params->user_context);
+ qid->fqid = le32_to_cpu(rsp_params->fqid);
+ qid->qdbin = le16_to_cpu(rsp_params->qdbin);
+
+ return 0;
+}
+
+/**
+ * dpni_get_statistics() - Get DPNI statistics
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @page: Selects the statistics page to retrieve, see
+ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
+ * @param: Custom parameter for some pages used to select
+ * a certain statistic source, for example the TC.
+ * @stat: Structure containing the statistics
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t page,
+ uint8_t param,
+ union dpni_statistics *stat)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_statistics *cmd_params;
+ struct dpni_rsp_get_statistics *rsp_params;
+ int i, err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
+ cmd_params->page_number = page;
+ cmd_params->param = param;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
+ for (i = 0; i < DPNI_STATISTICS_CNT; i++)
+ stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
+
+ return 0;
+}
+
+/**
+ * dpni_reset_statistics() - Clears DPNI statistics
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_taildrop() - Set taildrop per queue or TC
+ *
+ * Setting a per-TC taildrop (cg_point = DPNI_CP_GROUP) will reset any current
+ * congestion notification or early drop (WRED) configuration previously applied
+ * to the same TC.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point, DPNI_CP_QUEUE is only supported in
+ * combination with DPNI_QUEUE_RX.
+ * @q_type: Queue type, can be DPNI_QUEUE_RX or DPNI_QUEUE_TX.
+ * @tc: Traffic class to apply this taildrop to
+ * @q_index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution.
+ * Ignored if CONGESTION_POINT is not DPNI_CP_QUEUE.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ struct dpni_taildrop *taildrop)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_taildrop *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
+ cmd_params->congestion_point = cg_point;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+ cmd_params->units = taildrop->units;
+ cmd_params->threshold = cpu_to_le32(taildrop->threshold);
+ dpni_set_field(cmd_params->enable_oal_lo, ENABLE, taildrop->enable);
+ dpni_set_field(cmd_params->enable_oal_lo, OAL_LO, taildrop->oal);
+ dpni_set_field(cmd_params->oal_hi,
+ OAL_HI,
+ taildrop->oal >> DPNI_OAL_LO_SIZE);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_taildrop() - Get taildrop information
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point
+ * @q_type: Queue type on which the taildrop is configured.
+ * Only Rx queues are supported for now
+ * @tc: Traffic class to apply this taildrop to
+ * @q_index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ struct dpni_taildrop *taildrop)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_taildrop *cmd_params;
+ struct dpni_rsp_get_taildrop *rsp_params;
+ uint8_t oal_lo, oal_hi;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
+ cmd_params->congestion_point = cg_point;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
+ taildrop->enable = dpni_get_field(rsp_params->enable_oal_lo, ENABLE);
+ taildrop->units = rsp_params->units;
+ taildrop->threshold = le32_to_cpu(rsp_params->threshold);
+ oal_lo = dpni_get_field(rsp_params->enable_oal_lo, OAL_LO);
+ oal_hi = dpni_get_field(rsp_params->oal_hi, OAL_HI);
+ taildrop->oal = oal_hi << DPNI_OAL_LO_SIZE | oal_lo;
+
+ /* Fill the first 4 bits, 'oal' is a 2's complement value of 12 bits */
+ if (taildrop->oal >= 0x0800)
+ taildrop->oal |= 0xF000;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h
new file mode 100644
index 00000000..4de70f30
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef __FSL_DPKG_H_
+#define __FSL_DPKG_H_
+
+#include <fsl_net.h>
+
+/* Data Path Key Generator API
+ * Contains initialization APIs and runtime APIs for the Key Generator
+ */
+
+/** Key Generator properties */
+
+/**
+ * Number of masks per key extraction
+ */
+#define DPKG_NUM_OF_MASKS 4
+/**
+ * Number of extractions per key profile
+ */
+#define DPKG_MAX_NUM_OF_EXTRACTS 10
+
+/**
+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
+ * @DPKG_FULL_FIELD: Extract a full field
+ */
+enum dpkg_extract_from_hdr_type {
+ DPKG_FROM_HDR = 0,
+ DPKG_FROM_FIELD = 1,
+ DPKG_FULL_FIELD = 2
+};
+
+/**
+ * enum dpkg_extract_type - Enumeration for selecting extraction type
+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
+ * e.g. can be used to extract header existence;
+ * please refer to 'Parse Result definition' section in the parser BG
+ */
+enum dpkg_extract_type {
+ DPKG_EXTRACT_FROM_HDR = 0,
+ DPKG_EXTRACT_FROM_DATA = 1,
+ DPKG_EXTRACT_FROM_PARSE = 3
+};
+
+/**
+ * struct dpkg_mask - A structure for defining a single extraction mask
+ * @mask: Byte mask for the extracted content
+ * @offset: Offset within the extracted content
+ */
+struct dpkg_mask {
+ uint8_t mask;
+ uint8_t offset;
+};
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPKG_MASK(field) \
+ GENMASK(DPKG_##field##_SHIFT + DPKG_##field##_SIZE - 1, \
+ DPKG_##field##_SHIFT)
+#define dpkg_set_field(var, field, val) \
+ ((var) |= (((val) << DPKG_##field##_SHIFT) & DPKG_MASK(field)))
+#define dpkg_get_field(var, field) \
+ (((var) & DPKG_MASK(field)) >> DPKG_##field##_SHIFT)
+
+/**
+ * struct dpkg_extract - A structure for defining a single extraction
+ * @type: Determines how the union below is interpreted:
+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
+ * @extract: Selects extraction method
+ * @num_of_byte_masks: Defines the number of valid entries in the array below;
+ * This is also the number of bytes to be used as masks
+ * @masks: Masks parameters
+ */
+struct dpkg_extract {
+ enum dpkg_extract_type type;
+ /**
+ * union extract - Selects extraction method
+ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ */
+ union {
+ /**
+ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @prot: Any of the supported headers
+ * @type: Defines the type of header extraction:
+ * DPKG_FROM_HDR: use size & offset below;
+ * DPKG_FROM_FIELD: use field, size and offset below;
+ * DPKG_FULL_FIELD: use field below
+ * @field: One of the supported fields (NH_FLD_)
+ *
+ * @size: Size in bytes
+ * @offset: Byte offset
+ * @hdr_index: Clear for cases not listed below;
+ * Used for protocols that may have more than a single
+ * header, 0 indicates an outer header;
+ * Supported protocols (possible values):
+ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
+ * NET_PROT_IP(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
+ */
+
+ struct {
+ enum net_prot prot;
+ enum dpkg_extract_from_hdr_type type;
+ uint32_t field;
+ uint8_t size;
+ uint8_t offset;
+ uint8_t hdr_index;
+ } from_hdr;
+ /**
+ * struct from_data
+ * Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @size: Size in bytes
+ * @offset: Byte offset
+ */
+ struct {
+ uint8_t size;
+ uint8_t offset;
+ } from_data;
+
+ /**
+ * struct from_parse
+ * Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ * @size: Size in bytes
+ * @offset: Byte offset
+ */
+ struct {
+ uint8_t size;
+ uint8_t offset;
+ } from_parse;
+ } extract;
+
+ uint8_t num_of_byte_masks;
+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
+};
+
+/**
+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
+ * profile (rule)
+ * @num_extracts: Defines the number of valid entries in the array below
+ * @extracts: Array of required extractions
+ */
+struct dpkg_profile_cfg {
+ uint8_t num_extracts;
+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+};
+
+/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
+ * key_cfg_iova)
+ */
+struct dpni_mask_cfg {
+ uint8_t mask;
+ uint8_t offset;
+};
+
+#define DPKG_EFH_TYPE_SHIFT 0
+#define DPKG_EFH_TYPE_SIZE 4
+#define DPKG_EXTRACT_TYPE_SHIFT 0
+#define DPKG_EXTRACT_TYPE_SIZE 4
+
+struct dpni_dist_extract {
+ /* word 0 */
+ uint8_t prot;
+ /* EFH type stored in the 4 least significant bits */
+ uint8_t efh_type;
+ uint8_t size;
+ uint8_t offset;
+ uint32_t field;
+ /* word 1 */
+ uint8_t hdr_index;
+ uint8_t constant;
+ uint8_t num_of_repeats;
+ uint8_t num_of_byte_masks;
+ /* Extraction type is stored in the 4 LSBs */
+ uint8_t extract_type;
+ uint8_t pad[3];
+ /* word 2 */
+ struct dpni_mask_cfg masks[4];
+};
+
+struct dpni_ext_set_rx_tc_dist {
+ /* extension word 0 */
+ uint8_t num_extracts;
+ uint8_t pad[7];
+ /* words 1..25 */
+ struct dpni_dist_extract extracts[10];
+};
+
+int dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
+ uint8_t *key_cfg_buf);
+
+#endif /* __FSL_DPKG_H_ */
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h
new file mode 100644
index 00000000..f0edcd27
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -0,0 +1,1135 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef __FSL_DPNI_H
+#define __FSL_DPNI_H
+
+#include <fsl_dpkg.h>
+
+struct fsl_mc_io;
+
+/**
+ * Data Path Network Interface API
+ * Contains initialization APIs and runtime control APIs for DPNI
+ */
+
+/** General DPNI macros */
+
+/**
+ * Maximum number of traffic classes
+ */
+#define DPNI_MAX_TC 8
+/**
+ * Maximum number of buffer pools per DPNI
+ */
+#define DPNI_MAX_DPBP 8
+/**
+ * Maximum number of storage-profiles per DPNI
+ */
+#define DPNI_MAX_SP 2
+
+/**
+ * All traffic classes considered; see dpni_set_queue()
+ */
+#define DPNI_ALL_TCS (uint8_t)(-1)
+/**
+ * All flows within traffic class considered; see dpni_set_queue()
+ */
+#define DPNI_ALL_TC_FLOWS (uint16_t)(-1)
+
+/**
+ * Tx traffic is always released to a buffer pool on transmit, there are no
+ * resources allocated to have the frames confirmed back to the source after
+ * transmission.
+ */
+#define DPNI_OPT_TX_FRM_RELEASE 0x000001
+/**
+ * Disables support for MAC address filtering for addresses other than primary
+ * MAC address. This affects both unicast and multicast. Promiscuous mode can
+ * still be enabled/disabled for both unicast and multicast. If promiscuous mode
+ * is disabled, only traffic matching the primary MAC address will be accepted.
+ */
+#define DPNI_OPT_NO_MAC_FILTER 0x000002
+/**
+ * Allocate policers for this DPNI. They can be used to rate-limit traffic per
+ * traffic class (TC) basis.
+ */
+#define DPNI_OPT_HAS_POLICING 0x000004
+/**
+ * Congestion can be managed in several ways, allowing the buffer pool to
+ * deplete on ingress, taildrop on each queue or use congestion groups for sets
+ * of queues. If set, it configures a single congestion groups across all TCs.
+ * If reset, a congestion group is allocated for each TC. Only relevant if the
+ * DPNI has multiple traffic classes.
+ */
+#define DPNI_OPT_SHARED_CONGESTION 0x000008
+/**
+ * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
+ * look-ups are exact match. Note that TCAM is not available on LS1088 and its
+ * variants. Setting this bit on these SoCs will trigger an error.
+ */
+#define DPNI_OPT_HAS_KEY_MASKING 0x000010
+/**
+ * Disables the flow steering table.
+ */
+#define DPNI_OPT_NO_FS 0x000020
+
+int dpni_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpni_id,
+ uint16_t *token);
+
+int dpni_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpni_cfg - Structure representing DPNI configuration
+ * @mac_addr: Primary MAC address
+ * @adv: Advanced parameters; default is all zeros;
+ * use this structure to change default settings
+ */
+struct dpni_cfg {
+ /**
+ * @options: Any combination of the following options:
+ * DPNI_OPT_TX_FRM_RELEASE
+ * DPNI_OPT_NO_MAC_FILTER
+ * DPNI_OPT_HAS_POLICING
+ * DPNI_OPT_SHARED_CONGESTION
+ * DPNI_OPT_HAS_KEY_MASKING
+ * DPNI_OPT_NO_FS
+ * @fs_entries: Number of entries in the flow steering table.
+ * This table is used to select the ingress queue for
+ * ingress traffic, targeting a GPP core or another.
+ * In addition it can be used to discard traffic that
+ * matches the set rule. It is either an exact match table
+ * or a TCAM table, depending on DPNI_OPT_ HAS_KEY_MASKING
+ * bit in OPTIONS field. This field is ignored if
+ * DPNI_OPT_NO_FS bit is set in OPTIONS field. Otherwise,
+ * value 0 defaults to 64. Maximum supported value is 1024.
+ * Note that the total number of entries is limited on the
+ * SoC to as low as 512 entries if TCAM is used.
+ * @vlan_filter_entries: Number of entries in the VLAN address filtering
+ * table. This is an exact match table used to filter
+ * ingress traffic based on VLAN IDs. Value 0 disables VLAN
+ * filtering. Maximum supported value is 16.
+ * @mac_filter_entries: Number of entries in the MAC address filtering
+ * table. This is an exact match table and allows both
+ * unicast and multicast entries. The primary MAC address
+ * of the network interface is not part of this table,
+ * this contains only entries in addition to it. This
+ * field is ignored if DPNI_OPT_ NO_MAC_FILTER is set in
+ * OPTIONS field. Otherwise, value 0 defaults to 80.
+ * Maximum supported value is 80.
+ * @num_queues: Number of Tx and Rx queues used for traffic
+ * distribution. This is orthogonal to QoS and is only
+ * used to distribute traffic to multiple GPP cores.
+ * This configuration affects the number of Tx queues
+ * (logical FQs, all associated with a single CEETM queue),
+ * Rx queues and Tx confirmation queues, if applicable.
+ * Value 0 defaults to one queue. Maximum supported value
+ * is 8.
+ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
+ * TCs can have different priority levels for the purpose
+ * of Tx scheduling (see DPNI_SET_TX_SELECTION), different
+ * BPs (DPNI_ SET_POOLS), policers. There are dedicated QM
+ * queues for traffic classes (including class queues on
+ * Tx). Value 0 defaults to one TC. Maximum supported value
+ * is 8.
+ * @qos_entries: Number of entries in the QoS classification table. This
+ * table is used to select the TC for ingress traffic. It
+ * is either an exact match or a TCAM table, depending on
+ * DPNI_OPT_ HAS_KEY_MASKING bit in OPTIONS field. This
+ * field is ignored if the DPNI has a single TC. Otherwise,
+ * a value of 0 defaults to 64. Maximum supported value
+ * is 64.
+ */
+ uint32_t options;
+ uint16_t fs_entries;
+ uint8_t vlan_filter_entries;
+ uint8_t mac_filter_entries;
+ uint8_t num_queues;
+ uint8_t num_tcs;
+ uint8_t qos_entries;
+};
+
+int dpni_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpni_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpni_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+/**
+ * struct dpni_pools_cfg - Structure representing buffer pools configuration
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ * must match 'num_dpbp' value
+ */
+struct dpni_pools_cfg {
+ uint8_t num_dpbp;
+ /**
+ * struct pools - Buffer pools parameters
+ * @dpbp_id: DPBP object ID
+ * @priority: priority mask that indicates TC's used with this buffer.
+ * I set to 0x00 MC will assume value 0xff.
+ * @buffer_size: Buffer size
+ * @backup_pool: Backup pool
+ */
+ struct {
+ int dpbp_id;
+ uint8_t priority_mask;
+ uint16_t buffer_size;
+ int backup_pool;
+ } pools[DPNI_MAX_DPBP];
+};
+
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_pools_cfg *cfg);
+
+int dpni_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpni_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpni_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * DPNI IRQ Index and Events
+ */
+
+/**
+ * IRQ index
+ */
+#define DPNI_IRQ_INDEX 0
+/**
+ * IRQ event - indicates a change in link state
+ */
+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
+
+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t en);
+
+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t *en);
+
+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t mask);
+
+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *mask);
+
+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *status);
+
+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t status);
+
+/**
+ * struct dpni_attr - Structure representing DPNI attributes
+ * @options: Any combination of the following options:
+ * DPNI_OPT_TX_FRM_RELEASE
+ * DPNI_OPT_NO_MAC_FILTER
+ * DPNI_OPT_HAS_POLICING
+ * DPNI_OPT_SHARED_CONGESTION
+ * DPNI_OPT_HAS_KEY_MASKING
+ * DPNI_OPT_NO_FS
+ * @num_queues: Number of Tx and Rx queues used for traffic distribution.
+ * @num_rx_tcs: Number of RX traffic classes (TCs), reserved for the DPNI.
+ * @num_tx_tcs: Number of TX traffic classes (TCs), reserved for the DPNI.
+ * @mac_filter_entries: Number of entries in the MAC address filtering
+ * table.
+ * @vlan_filter_entries: Number of entries in the VLAN address filtering
+ * table.
+ * @qos_entries: Number of entries in the QoS classification table.
+ * @fs_entries: Number of entries in the flow steering table.
+ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
+ * than this when adding QoS entries will result
+ * in an error.
+ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
+ * key larger than this when composing the hash + FS key
+ * will result in an error.
+ * @wriop_version: Version of WRIOP HW block.
+ * The 3 version values are stored on 6, 5, 5 bits
+ * respectively.
+ * Values returned:
+ * - 0x400 - WRIOP version 1.0.0, used on LS2080 and
+ * variants,
+ * - 0x421 - WRIOP version 1.1.1, used on LS2088 and
+ * variants,
+ * - 0x422 - WRIOP version 1.1.2, used on LS1088 and
+ * variants.
+ */
+struct dpni_attr {
+ uint32_t options;
+ uint8_t num_queues;
+ uint8_t num_rx_tcs;
+ uint8_t num_tx_tcs;
+ uint8_t mac_filter_entries;
+ uint8_t vlan_filter_entries;
+ uint8_t qos_entries;
+ uint16_t fs_entries;
+ uint8_t qos_key_size;
+ uint8_t fs_key_size;
+ uint16_t wriop_version;
+};
+
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_attr *attr);
+
+/**
+ * DPNI errors
+ */
+
+/**
+ * Extract out of frame header error
+ */
+#define DPNI_ERROR_EOFHE 0x00020000
+/**
+ * Frame length error
+ */
+#define DPNI_ERROR_FLE 0x00002000
+/**
+ * Frame physical error
+ */
+#define DPNI_ERROR_FPE 0x00001000
+/**
+ * Parsing header error
+ */
+#define DPNI_ERROR_PHE 0x00000020
+/**
+ * Parser L3 checksum error
+ */
+#define DPNI_ERROR_L3CE 0x00000004
+/**
+ * Parser L3 checksum error
+ */
+#define DPNI_ERROR_L4CE 0x00000001
+
+/**
+ * enum dpni_error_action - Defines DPNI behavior for errors
+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
+ */
+enum dpni_error_action {
+ DPNI_ERROR_ACTION_DISCARD = 0,
+ DPNI_ERROR_ACTION_CONTINUE = 1,
+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
+};
+
+/**
+ * struct dpni_error_cfg - Structure representing DPNI errors treatment
+ * @errors: Errors mask; use 'DPNI_ERROR__<X>
+ * @error_action: The desired action for the errors mask
+ * @set_frame_annotation: Set to '1' to mark the errors in frame
+ * annotation status (FAS); relevant only
+ * for the non-discard action
+ */
+struct dpni_error_cfg {
+ uint32_t errors;
+ enum dpni_error_action error_action;
+ int set_frame_annotation;
+};
+
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_error_cfg *cfg);
+
+/**
+ * DPNI buffer layout modification options
+ */
+
+/**
+ * Select to modify the time-stamp setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
+/**
+ * Select to modify the parser-result setting; not applicable for Tx
+ */
+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
+/**
+ * Select to modify the frame-status setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
+/**
+ * Select to modify the private-data-size setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
+/**
+ * Select to modify the data-alignment setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
+/**
+ * Select to modify the data-head-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
+/**
+ * Select to modify the data-tail-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
+
+/**
+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
+ * @options: Flags representing the suggested modifications to the
+ * buffer layout;
+ * Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
+ * @pass_timestamp: Pass timestamp value
+ * @pass_parser_result: Pass parser results
+ * @pass_frame_status: Pass frame status
+ * @private_data_size: Size kept for private data (in bytes)
+ * @data_align: Data alignment
+ * @data_head_room: Data head room
+ * @data_tail_room: Data tail room
+ */
+struct dpni_buffer_layout {
+ uint32_t options;
+ int pass_timestamp;
+ int pass_parser_result;
+ int pass_frame_status;
+ uint16_t private_data_size;
+ uint16_t data_align;
+ uint16_t data_head_room;
+ uint16_t data_tail_room;
+};
+
+/**
+ * enum dpni_queue_type - Identifies a type of queue targeted by the command
+ * @DPNI_QUEUE_RX: Rx queue
+ * @DPNI_QUEUE_TX: Tx queue
+ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
+ * @DPNI_QUEUE_RX_ERR: Rx error queue
+ */
+enum dpni_queue_type {
+ DPNI_QUEUE_RX,
+ DPNI_QUEUE_TX,
+ DPNI_QUEUE_TX_CONFIRM,
+ DPNI_QUEUE_RX_ERR,
+};
+
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout);
+
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout);
+
+/**
+ * enum dpni_offload - Identifies a type of offload targeted by the command
+ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
+ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
+ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
+ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
+ * @DPNI_OPT_FLCTYPE_HASH: flow context will be generated by WRIOP for AIOP or
+ * for CPU
+ */
+enum dpni_offload {
+ DPNI_OFF_RX_L3_CSUM,
+ DPNI_OFF_RX_L4_CSUM,
+ DPNI_OFF_TX_L3_CSUM,
+ DPNI_OFF_TX_L4_CSUM,
+ DPNI_FLCTYPE_HASH,
+};
+
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_offload type,
+ uint32_t config);
+
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_offload type,
+ uint32_t *config);
+
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint16_t *qdid);
+
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *data_offset);
+
+#define DPNI_STATISTICS_CNT 7
+
+union dpni_statistics {
+ /**
+ * struct page_0 - Page_0 statistics structure
+ * @ingress_all_frames: Ingress frame count
+ * @ingress_all_bytes: Ingress byte count
+ * @ingress_multicast_frames: Ingress multicast frame count
+ * @ingress_multicast_bytes: Ingress multicast byte count
+ * @ingress_broadcast_frames: Ingress broadcast frame count
+ * @ingress_broadcast_bytes: Ingress broadcast byte count
+ */
+ struct {
+ uint64_t ingress_all_frames;
+ uint64_t ingress_all_bytes;
+ uint64_t ingress_multicast_frames;
+ uint64_t ingress_multicast_bytes;
+ uint64_t ingress_broadcast_frames;
+ uint64_t ingress_broadcast_bytes;
+ } page_0;
+ /**
+ * struct page_1 - Page_1 statistics structure
+ * @egress_all_frames: Egress frame count
+ * @egress_all_bytes: Egress byte count
+ * @egress_multicast_frames: Egress multicast frame count
+ * @egress_multicast_bytes: Egress multicast byte count
+ * @egress_broadcast_frames: Egress broadcast frame count
+ * @egress_broadcast_bytes: Egress broadcast byte count
+ */
+ struct {
+ uint64_t egress_all_frames;
+ uint64_t egress_all_bytes;
+ uint64_t egress_multicast_frames;
+ uint64_t egress_multicast_bytes;
+ uint64_t egress_broadcast_frames;
+ uint64_t egress_broadcast_bytes;
+ } page_1;
+ /**
+ * struct page_2 - Page_2 statistics structure
+ * @ingress_filtered_frames: Ingress filtered frame count
+ * @ingress_discarded_frames: Ingress discarded frame count
+ * @ingress_nobuffer_discards: Ingress discarded frame count due to
+ * lack of buffers
+ * @egress_discarded_frames: Egress discarded frame count
+ * @egress_confirmed_frames: Egress confirmed frame count
+ */
+ struct {
+ uint64_t ingress_filtered_frames;
+ uint64_t ingress_discarded_frames;
+ uint64_t ingress_nobuffer_discards;
+ uint64_t egress_discarded_frames;
+ uint64_t egress_confirmed_frames;
+ } page_2;
+ /**
+ * struct page_3 - Page_3 statistics structure with values for the
+ * selected TC
+ * @ceetm_dequeue_bytes: Cumulative count of the number of bytes
+ * dequeued
+ * @ceetm_dequeue_frames: Cumulative count of the number of frames
+ * dequeued
+ * @ceetm_reject_bytes: Cumulative count of the number of bytes in all
+ * frames whose enqueue was rejected
+ * @ceetm_reject_frames: Cumulative count of all frame enqueues rejected
+ */
+ struct {
+ uint64_t ceetm_dequeue_bytes;
+ uint64_t ceetm_dequeue_frames;
+ uint64_t ceetm_reject_bytes;
+ uint64_t ceetm_reject_frames;
+ } page_3;
+ /**
+ * struct raw - raw statistics structure, used to index counters
+ */
+ struct {
+ uint64_t counter[DPNI_STATISTICS_CNT];
+ } raw;
+};
+
+/**
+ * Enable auto-negotiation
+ */
+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
+/**
+ * Enable half-duplex mode
+ */
+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+/**
+ * Enable pause frames
+ */
+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
+/**
+ * Enable a-symmetric pause frames
+ */
+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+/**
+ * Enable priority flow control pause frames
+ */
+#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
+
+/**
+ * struct - Structure representing DPNI link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ */
+struct dpni_link_cfg {
+ uint32_t rate;
+ uint64_t options;
+};
+
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_link_cfg *cfg);
+
+/**
+ * struct dpni_link_state - Structure representing DPNI link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ * @up: Link state; '0' for down, '1' for up
+ */
+struct dpni_link_state {
+ uint32_t rate;
+ uint64_t options;
+ int up;
+};
+
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_state *state);
+
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t max_frame_length);
+
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *max_frame_length);
+
+int dpni_set_mtu(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t mtu);
+
+int dpni_get_mtu(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *mtu);
+
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en);
+
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en);
+
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6]);
+
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int unicast,
+ int multicast);
+
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6]);
+
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en);
+
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id);
+
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id);
+
+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * enum dpni_dist_mode - DPNI distribution mode
+ * @DPNI_DIST_MODE_NONE: No distribution
+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
+ */
+enum dpni_dist_mode {
+ DPNI_DIST_MODE_NONE = 0,
+ DPNI_DIST_MODE_HASH = 1,
+ DPNI_DIST_MODE_FS = 2
+};
+
+/**
+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
+ */
+enum dpni_fs_miss_action {
+ DPNI_FS_MISS_DROP = 0,
+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
+ DPNI_FS_MISS_HASH = 2
+};
+
+/**
+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
+ * @miss_action: Miss action selection
+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
+ */
+struct dpni_fs_tbl_cfg {
+ enum dpni_fs_miss_action miss_action;
+ uint16_t default_flow_id;
+ char keep_hash_key;
+};
+
+/**
+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
+ * @dist_size: Set the distribution size;
+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
+ * 112,128,192,224,256,384,448,512,768,896,1024
+ * @dist_mode: Distribution mode
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpkg_prepare_key_cfg() relevant only when
+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
+ * @fs_cfg: Flow Steering table configuration; only relevant if
+ * 'dist_mode = DPNI_DIST_MODE_FS'
+ */
+struct dpni_rx_tc_dist_cfg {
+ uint16_t dist_size;
+ enum dpni_dist_mode dist_mode;
+ uint64_t key_cfg_iova;
+ struct dpni_fs_tbl_cfg fs_cfg;
+};
+
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg);
+
+/**
+ * enum dpni_congestion_unit - DPNI congestion units
+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpni_congestion_unit {
+ DPNI_CONGESTION_UNIT_BYTES = 0,
+ DPNI_CONGESTION_UNIT_FRAMES
+};
+
+
+/**
+ * enum dpni_dest - DPNI destination types
+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
+ * does not generate FQDAN notifications; user is expected to
+ * dequeue from the queue based on polling or other user-defined
+ * method
+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON
+ * object; user is expected to dequeue from the DPCON channel
+ */
+enum dpni_dest {
+ DPNI_DEST_NONE = 0,
+ DPNI_DEST_DPIO = 1,
+ DPNI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPNI_DEST_NONE' option
+ */
+struct dpni_dest_cfg {
+ enum dpni_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/* DPNI congestion options */
+
+/**
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
+ */
+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
+ */
+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
+/**
+ * This congestion will trigger flow control or priority flow control. This
+ * will have effect only if flow control is enabled with dpni_set_link_cfg()
+ */
+#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
+
+/**
+ * struct dpni_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
+ * contained in 'options'
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
+ */
+
+struct dpni_congestion_notification_cfg {
+ enum dpni_congestion_unit units;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+ uint64_t message_ctx;
+ uint64_t message_iova;
+ struct dpni_dest_cfg dest_cfg;
+ uint16_t notification_mode;
+};
+
+int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ const struct dpni_congestion_notification_cfg *cfg);
+
+int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ struct dpni_congestion_notification_cfg *cfg);
+
+/* DPNI FLC stash options */
+
+/**
+ * stashes the whole annotation area (up to 192 bytes)
+ */
+#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001
+
+/**
+ * struct dpni_queue - Queue structure
+ * @user_context: User data, presented to the user along with any frames
+ * from this queue. Not relevant for Tx queues.
+ */
+struct dpni_queue {
+ /**
+ * struct destination - Destination structure
+ * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
+ * Identifies either a DPIO or a DPCON object.
+ * Not relevant for Tx queues.
+ * @type: May be one of the following:
+ * 0 - No destination, queue can be manually
+ * queried, but will not push traffic or
+ * notifications to a DPIO;
+ * 1 - The destination is a DPIO. When traffic
+ * becomes available in the queue a FQDAN
+ * (FQ data available notification) will be
+ * generated to selected DPIO;
+ * 2 - The destination is a DPCON. The queue is
+ * associated with a DPCON object for the
+ * purpose of scheduling between multiple
+ * queues. The DPCON may be independently
+ * configured to generate notifications.
+ * Not relevant for Tx queues.
+ * @hold_active: Hold active, maintains a queue scheduled for longer
+ * in a DPIO during dequeue to reduce spread of traffic.
+ * Only relevant if queues are
+ * not affined to a single DPIO.
+ */
+ struct {
+ uint16_t id;
+ enum dpni_dest type;
+ char hold_active;
+ uint8_t priority;
+ } destination;
+ uint64_t user_context;
+ /**
+ * struct flc - FD FLow Context structure
+ * @value: Default FLC value for traffic dequeued from
+ * this queue. Please check description of FD
+ * structure for more information.
+ * Note that FLC values set using dpni_add_fs_entry,
+ * if any, take precedence over values per queue.
+ * @stash_control: Boolean, indicates whether the 6 lowest
+ * - significant bits are used for stash control.
+ * significant bits are used for stash control. If set, the 6
+ * least significant bits in value are interpreted as follows:
+ * - bits 0-1: indicates the number of 64 byte units of context
+ * that are stashed. FLC value is interpreted as a memory address
+ * in this case, excluding the 6 LS bits.
+ * - bits 2-3: indicates the number of 64 byte units of frame
+ * annotation to be stashed. Annotation is placed at FD[ADDR].
+ * - bits 4-5: indicates the number of 64 byte units of frame
+ * data to be stashed. Frame data is placed at FD[ADDR] +
+ * FD[OFFSET].
+ * For more details check the Frame Descriptor section in the
+ * hardware documentation.
+ */
+ struct {
+ uint64_t value;
+ char stash_control;
+ } flc;
+};
+
+/**
+ * struct dpni_queue_id - Queue identification, used for enqueue commands
+ * or queue control
+ * @fqid: FQID used for enqueueing to and/or configuration of this
+ * specific FQ
+ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI.
+ * Only relevant for Tx queues.
+ */
+struct dpni_queue_id {
+ uint32_t fqid;
+ uint16_t qdbin;
+};
+
+/**
+ * enum dpni_confirmation_mode - Defines DPNI options supported for Tx
+ * confirmation
+ * @DPNI_CONF_AFFINE: For each Tx queue set associated with a sender there is
+ * an affine Tx Confirmation queue
+ * @DPNI_CONF_SINGLE: All Tx queues are associated with a single Tx
+ * confirmation queue
+ * @DPNI_CONF_DISABLE: Tx frames are not confirmed. This must be associated
+ * with proper FD set-up to have buffers release to a Buffer Pool, otherwise
+ * buffers will be leaked
+ */
+enum dpni_confirmation_mode {
+ DPNI_CONF_AFFINE,
+ DPNI_CONF_SINGLE,
+ DPNI_CONF_DISABLE,
+};
+
+int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode mode);
+
+int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode *mode);
+
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+/**
+ * Set User Context
+ */
+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Set queue destination configuration
+ */
+#define DPNI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Set FD[FLC] configuration for traffic on this queue. Note that FLC values
+ * set with dpni_add_fs_entry, if any, take precedence over values per queue.
+ */
+#define DPNI_QUEUE_OPT_FLC 0x00000004
+
+/**
+ * Set the queue to hold active mode. This prevents the queue from being
+ * rescheduled between DPIOs while it carries traffic and is active on one
+ * DPNI. Can help reduce reordering when servicing one queue on multiple
+ * CPUs, but the queue is also less likely to push data to multiple CPUs
+ * especially when congested.
+ */
+#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
+
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ uint8_t options,
+ const struct dpni_queue *queue);
+
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid);
+
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t page,
+ uint8_t param,
+ union dpni_statistics *stat);
+
+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * enum dpni_congestion_point - Structure representing congestion point
+ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
+ * QUEUE_INDEX
+ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used
+ * to define the DPNI this can be either per
+ * TC (default) or per interface
+ * (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
+ * QUEUE_INDEX is ignored if this type is used.
+ */
+enum dpni_congestion_point {
+ DPNI_CP_QUEUE,
+ DPNI_CP_GROUP,
+};
+
+/**
+ * struct dpni_taildrop - Structure representing the taildrop
+ * @enable: Indicates whether the taildrop is active or not.
+ * @units: Indicates the unit of THRESHOLD. Queue taildrop only
+ * supports byte units, this field is ignored and
+ * assumed = 0 if CONGESTION_POINT is 0.
+ * @threshold: Threshold value, in units identified by UNITS field. Value 0
+ * cannot be used as a valid taildrop threshold,
+ * THRESHOLD must be > 0 if the taildrop is
+ * enabled.
+ * @oal : Overhead Accounting Length, a 12-bit, 2's complement value
+ * with range (-2048 to +2047) representing a fixed per-frame
+ * overhead to be added to the actual length of a frame when
+ * performing WRED and tail drop calculations and threshold
+ * comparisons.
+ */
+struct dpni_taildrop {
+ char enable;
+ enum dpni_congestion_unit units;
+ uint32_t threshold;
+ int16_t oal;
+};
+
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ uint8_t tc,
+ uint8_t q_index,
+ struct dpni_taildrop *taildrop);
+
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ uint8_t tc,
+ uint8_t q_index,
+ struct dpni_taildrop *taildrop);
+#endif /* __FSL_DPNI_H */
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
new file mode 100644
index 00000000..eb3e9987
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -0,0 +1,605 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef _FSL_DPNI_CMD_H
+#define _FSL_DPNI_CMD_H
+
+/* DPNI Version */
+#define DPNI_VER_MAJOR 7
+#define DPNI_VER_MINOR 3
+
+#define DPNI_CMD_BASE_VERSION 1
+#define DPNI_CMD_VERSION_2 2
+#define DPNI_CMD_ID_OFFSET 4
+
+#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
+#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_2)
+
+/* Command IDs */
+#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
+#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
+#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
+#define DPNI_CMDID_DESTROY DPNI_CMD(0x981)
+#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
+
+#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
+#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
+#define DPNI_CMDID_GET_ATTR DPNI_CMD_V2(0x004)
+#define DPNI_CMDID_RESET DPNI_CMD(0x005)
+#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
+
+#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
+#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
+#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
+#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
+#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
+#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
+
+#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200)
+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
+
+#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
+#define DPNI_CMDID_GET_SP_INFO DPNI_CMD(0x211)
+#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
+#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
+#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
+
+#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
+#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
+#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
+#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
+#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
+#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
+#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
+#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
+#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
+
+#define DPNI_CMDID_ENABLE_VLAN_FILTER DPNI_CMD(0x230)
+#define DPNI_CMDID_ADD_VLAN_ID DPNI_CMD(0x231)
+#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232)
+#define DPNI_CMDID_CLR_VLAN_FILTERS DPNI_CMD(0x233)
+
+#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD_V2(0x235)
+
+#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V2(0x25D)
+#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
+#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
+#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
+#define DPNI_CMDID_GET_TAILDROP DPNI_CMD_V2(0x261)
+#define DPNI_CMDID_SET_TAILDROP DPNI_CMD_V2(0x262)
+
+#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
+
+#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
+#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
+
+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
+#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD_V2(0x269)
+#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD_V2(0x26A)
+#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
+#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
+#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
+#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPNI_MASK(field) \
+ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
+ DPNI_##field##_SHIFT)
+#define dpni_set_field(var, field, val) \
+ ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
+#define dpni_get_field(var, field) \
+ (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpni_cmd_open {
+ uint32_t dpni_id;
+};
+
+struct dpni_cmd_create {
+ uint32_t options;
+ uint8_t num_queues;
+ uint8_t num_tcs;
+ uint8_t mac_filter_entries;
+ uint8_t pad1;
+ uint8_t vlan_filter_entries;
+ uint8_t pad2;
+ uint8_t qos_entries;
+ uint8_t pad3;
+ uint16_t fs_entries;
+};
+
+struct dpni_cmd_destroy {
+ uint32_t dpsw_id;
+};
+
+#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
+
+struct dpni_cmd_pool {
+ uint16_t dpbp_id;
+ uint8_t priority_mask;
+ uint8_t pad;
+};
+
+struct dpni_cmd_set_pools {
+ uint8_t num_dpbp;
+ uint8_t backup_pool_mask;
+ uint16_t pad;
+ struct dpni_cmd_pool pool[8];
+ uint16_t buffer_size[8];
+};
+
+/* The enable indication is always the least significant bit */
+#define DPNI_ENABLE_SHIFT 0
+#define DPNI_ENABLE_SIZE 1
+
+struct dpni_rsp_is_enabled {
+ uint8_t enabled;
+};
+
+struct dpni_cmd_set_irq_enable {
+ uint8_t enable;
+ uint8_t pad[3];
+ uint8_t irq_index;
+};
+
+struct dpni_cmd_get_irq_enable {
+ uint32_t pad;
+ uint8_t irq_index;
+};
+
+struct dpni_rsp_get_irq_enable {
+ uint8_t enabled;
+};
+
+struct dpni_cmd_set_irq_mask {
+ uint32_t mask;
+ uint8_t irq_index;
+};
+
+struct dpni_cmd_get_irq_mask {
+ uint32_t pad;
+ uint8_t irq_index;
+};
+
+struct dpni_rsp_get_irq_mask {
+ uint32_t mask;
+};
+
+struct dpni_cmd_get_irq_status {
+ uint32_t status;
+ uint8_t irq_index;
+};
+
+struct dpni_rsp_get_irq_status {
+ uint32_t status;
+};
+
+struct dpni_cmd_clear_irq_status {
+ uint32_t status;
+ uint8_t irq_index;
+};
+
+struct dpni_rsp_get_attr {
+ /* response word 0 */
+ uint32_t options;
+ uint8_t num_queues;
+ uint8_t num_rx_tcs;
+ uint8_t mac_filter_entries;
+ uint8_t num_tx_tcs;
+ /* response word 1 */
+ uint8_t vlan_filter_entries;
+ uint8_t pad1;
+ uint8_t qos_entries;
+ uint8_t pad2;
+ uint16_t fs_entries;
+ uint16_t pad3;
+ /* response word 2 */
+ uint8_t qos_key_size;
+ uint8_t fs_key_size;
+ uint16_t wriop_version;
+};
+
+#define DPNI_ERROR_ACTION_SHIFT 0
+#define DPNI_ERROR_ACTION_SIZE 4
+#define DPNI_FRAME_ANN_SHIFT 4
+#define DPNI_FRAME_ANN_SIZE 1
+
+struct dpni_cmd_set_errors_behavior {
+ uint32_t errors;
+ /* from least significant bit: error_action:4, set_frame_annotation:1 */
+ uint8_t flags;
+};
+
+/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
+ * buffer layouts, but they all share the same parameters.
+ * If one of the functions changes, below structure needs to be split.
+ */
+
+#define DPNI_PASS_TS_SHIFT 0
+#define DPNI_PASS_TS_SIZE 1
+#define DPNI_PASS_PR_SHIFT 1
+#define DPNI_PASS_PR_SIZE 1
+#define DPNI_PASS_FS_SHIFT 2
+#define DPNI_PASS_FS_SIZE 1
+
+struct dpni_cmd_get_buffer_layout {
+ uint8_t qtype;
+};
+
+struct dpni_rsp_get_buffer_layout {
+ /* response word 0 */
+ uint8_t pad0[6];
+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
+ uint8_t flags;
+ uint8_t pad1;
+ /* response word 1 */
+ uint16_t private_data_size;
+ uint16_t data_align;
+ uint16_t head_room;
+ uint16_t tail_room;
+};
+
+struct dpni_cmd_set_buffer_layout {
+ /* cmd word 0 */
+ uint8_t qtype;
+ uint8_t pad0[3];
+ uint16_t options;
+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
+ uint8_t flags;
+ uint8_t pad1;
+ /* cmd word 1 */
+ uint16_t private_data_size;
+ uint16_t data_align;
+ uint16_t head_room;
+ uint16_t tail_room;
+};
+
+struct dpni_cmd_set_offload {
+ uint8_t pad[3];
+ uint8_t dpni_offload;
+ uint32_t config;
+};
+
+struct dpni_cmd_get_offload {
+ uint8_t pad[3];
+ uint8_t dpni_offload;
+};
+
+struct dpni_rsp_get_offload {
+ uint32_t pad;
+ uint32_t config;
+};
+
+struct dpni_cmd_get_qdid {
+ uint8_t qtype;
+};
+
+struct dpni_rsp_get_qdid {
+ uint16_t qdid;
+};
+
+struct dpni_rsp_get_sp_info {
+ uint16_t spids[2];
+};
+
+struct dpni_rsp_get_tx_data_offset {
+ uint16_t data_offset;
+};
+
+struct dpni_cmd_get_statistics {
+ uint8_t page_number;
+ uint8_t param;
+};
+
+struct dpni_rsp_get_statistics {
+ uint64_t counter[7];
+};
+
+struct dpni_cmd_set_link_cfg {
+ uint64_t pad0;
+ uint32_t rate;
+ uint32_t pad1;
+ uint64_t options;
+};
+
+#define DPNI_LINK_STATE_SHIFT 0
+#define DPNI_LINK_STATE_SIZE 1
+
+struct dpni_rsp_get_link_state {
+ uint32_t pad0;
+ /* from LSB: up:1 */
+ uint8_t flags;
+ uint8_t pad1[3];
+ uint32_t rate;
+ uint32_t pad2;
+ uint64_t options;
+};
+
+struct dpni_cmd_set_max_frame_length {
+ uint16_t max_frame_length;
+};
+
+struct dpni_rsp_get_max_frame_length {
+ uint16_t max_frame_length;
+};
+
+struct dpni_cmd_set_multicast_promisc {
+ uint8_t enable;
+};
+
+struct dpni_rsp_get_multicast_promisc {
+ uint8_t enabled;
+};
+
+struct dpni_cmd_set_unicast_promisc {
+ uint8_t enable;
+};
+
+struct dpni_rsp_get_unicast_promisc {
+ uint8_t enabled;
+};
+
+struct dpni_cmd_set_primary_mac_addr {
+ uint16_t pad;
+ uint8_t mac_addr[6];
+};
+
+struct dpni_rsp_get_primary_mac_addr {
+ uint16_t pad;
+ uint8_t mac_addr[6];
+};
+
+struct dpni_rsp_get_port_mac_addr {
+ uint16_t pad;
+ uint8_t mac_addr[6];
+};
+
+struct dpni_cmd_add_mac_addr {
+ uint16_t pad;
+ uint8_t mac_addr[6];
+};
+
+struct dpni_cmd_remove_mac_addr {
+ uint16_t pad;
+ uint8_t mac_addr[6];
+};
+
+#define DPNI_UNICAST_FILTERS_SHIFT 0
+#define DPNI_UNICAST_FILTERS_SIZE 1
+#define DPNI_MULTICAST_FILTERS_SHIFT 1
+#define DPNI_MULTICAST_FILTERS_SIZE 1
+
+struct dpni_cmd_clear_mac_filters {
+ /* from LSB: unicast:1, multicast:1 */
+ uint8_t flags;
+};
+
+struct dpni_cmd_enable_vlan_filter {
+ /* only the LSB */
+ uint8_t en;
+};
+
+struct dpni_cmd_vlan_id {
+ uint32_t pad;
+ uint16_t vlan_id;
+};
+
+#define DPNI_SEPARATE_GRP_SHIFT 0
+#define DPNI_SEPARATE_GRP_SIZE 1
+#define DPNI_MODE_1_SHIFT 0
+#define DPNI_MODE_1_SIZE 4
+#define DPNI_MODE_2_SHIFT 4
+#define DPNI_MODE_2_SIZE 4
+
+struct dpni_cmd_set_tx_priorities {
+ uint16_t flags;
+ uint8_t prio_group_A;
+ uint8_t prio_group_B;
+ uint32_t pad0;
+ uint8_t modes[4];
+ uint32_t pad1;
+ uint64_t pad2;
+ uint16_t delta_bandwidth[8];
+};
+
+#define DPNI_DIST_MODE_SHIFT 0
+#define DPNI_DIST_MODE_SIZE 4
+#define DPNI_MISS_ACTION_SHIFT 4
+#define DPNI_MISS_ACTION_SIZE 4
+#define DPNI_KEEP_HASH_KEY_SHIFT 7
+#define DPNI_KEEP_HASH_KEY_SIZE 1
+
+struct dpni_cmd_set_rx_tc_dist {
+ uint16_t dist_size;
+ uint8_t tc_id;
+ /* from LSB: dist_mode:4, miss_action:4 */
+ uint8_t flags;
+ uint8_t pad0;
+ /* only the LSB */
+ uint8_t keep_hash_key;
+ uint16_t default_flow_id;
+ uint64_t pad1[5];
+ uint64_t key_cfg_iova;
+};
+
+struct dpni_cmd_get_queue {
+ uint8_t qtype;
+ uint8_t tc;
+ uint8_t index;
+};
+
+#define DPNI_DEST_TYPE_SHIFT 0
+#define DPNI_DEST_TYPE_SIZE 4
+#define DPNI_STASH_CTRL_SHIFT 6
+#define DPNI_STASH_CTRL_SIZE 1
+#define DPNI_HOLD_ACTIVE_SHIFT 7
+#define DPNI_HOLD_ACTIVE_SIZE 1
+
+struct dpni_rsp_get_queue {
+ /* response word 0 */
+ uint64_t pad0;
+ /* response word 1 */
+ uint32_t dest_id;
+ uint16_t pad1;
+ uint8_t dest_prio;
+ /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
+ uint8_t flags;
+ /* response word 2 */
+ uint64_t flc;
+ /* response word 3 */
+ uint64_t user_context;
+ /* response word 4 */
+ uint32_t fqid;
+ uint16_t qdbin;
+};
+
+struct dpni_cmd_set_queue {
+ /* cmd word 0 */
+ uint8_t qtype;
+ uint8_t tc;
+ uint8_t index;
+ uint8_t options;
+ uint32_t pad0;
+ /* cmd word 1 */
+ uint32_t dest_id;
+ uint16_t pad1;
+ uint8_t dest_prio;
+ uint8_t flags;
+ /* cmd word 2 */
+ uint64_t flc;
+ /* cmd word 3 */
+ uint64_t user_context;
+};
+
+#define DPNI_DROP_ENABLE_SHIFT 0
+#define DPNI_DROP_ENABLE_SIZE 1
+#define DPNI_DROP_UNITS_SHIFT 2
+#define DPNI_DROP_UNITS_SIZE 2
+
+struct dpni_early_drop {
+ /* from LSB: enable:1 units:2 */
+ uint8_t flags;
+ uint8_t pad0[3];
+ uint32_t pad1;
+ uint8_t green_drop_probability;
+ uint8_t pad2[7];
+ uint64_t green_max_threshold;
+ uint64_t green_min_threshold;
+ uint64_t pad3;
+ uint8_t yellow_drop_probability;
+ uint8_t pad4[7];
+ uint64_t yellow_max_threshold;
+ uint64_t yellow_min_threshold;
+ uint64_t pad5;
+ uint8_t red_drop_probability;
+ uint8_t pad6[7];
+ uint64_t red_max_threshold;
+ uint64_t red_min_threshold;
+};
+
+struct dpni_cmd_early_drop {
+ uint8_t qtype;
+ uint8_t tc;
+ uint8_t pad[6];
+ uint64_t early_drop_iova;
+};
+
+struct dpni_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+struct dpni_cmd_get_taildrop {
+ uint8_t congestion_point;
+ uint8_t qtype;
+ uint8_t tc;
+ uint8_t index;
+};
+
+struct dpni_rsp_get_taildrop {
+ /* cmd word 0 */
+ uint64_t pad0;
+ /* cmd word 1 */
+ /* from LSB: enable:1 oal_lo:7 */
+ uint8_t enable_oal_lo;
+ /* from LSB: oal_hi:5 */
+ uint8_t oal_hi;
+ uint8_t units;
+ uint8_t pad2;
+ uint32_t threshold;
+};
+
+#define DPNI_OAL_LO_SHIFT 1
+#define DPNI_OAL_LO_SIZE 7
+#define DPNI_OAL_HI_SHIFT 0
+#define DPNI_OAL_HI_SIZE 5
+
+struct dpni_cmd_set_taildrop {
+ /* cmd word 0 */
+ uint8_t congestion_point;
+ uint8_t qtype;
+ uint8_t tc;
+ uint8_t index;
+ uint32_t pad0;
+ /* cmd word 1 */
+ /* from LSB: enable:1 oal_lo:7 */
+ uint8_t enable_oal_lo;
+ /* from LSB: oal_hi:5 */
+ uint8_t oal_hi;
+ uint8_t units;
+ uint8_t pad2;
+ uint32_t threshold;
+};
+
+struct dpni_tx_confirmation_mode {
+ uint32_t pad;
+ uint8_t confirmation_mode;
+};
+
+#define DPNI_DEST_TYPE_SHIFT 0
+#define DPNI_DEST_TYPE_SIZE 4
+#define DPNI_CONG_UNITS_SHIFT 4
+#define DPNI_CONG_UNITS_SIZE 2
+
+struct dpni_cmd_set_congestion_notification {
+ uint8_t qtype;
+ uint8_t tc;
+ uint8_t pad[6];
+ uint32_t dest_id;
+ uint16_t notification_mode;
+ uint8_t dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ uint8_t type_units;
+ uint64_t message_iova;
+ uint64_t message_ctx;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+};
+
+struct dpni_cmd_get_congestion_notification {
+ uint8_t qtype;
+ uint8_t tc;
+};
+
+struct dpni_rsp_get_congestion_notification {
+ uint64_t pad;
+ uint32_t dest_id;
+ uint16_t notification_mode;
+ uint8_t dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ uint8_t type_units;
+ uint64_t message_iova;
+ uint64_t message_ctx;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPNI_CMD_H */
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h
new file mode 100644
index 00000000..964870ba
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ */
+#ifndef __FSL_NET_H
+#define __FSL_NET_H
+
+#define LAST_HDR_INDEX 0xFFFFFFFF
+
+/*****************************************************************************/
+/* Protocol fields */
+/*****************************************************************************/
+
+/************************* Ethernet fields *********************************/
+#define NH_FLD_ETH_DA (1)
+#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
+#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
+#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
+#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
+#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
+#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
+
+#define NH_FLD_ETH_ADDR_SIZE 6
+
+/*************************** VLAN fields ***********************************/
+#define NH_FLD_VLAN_VPRI (1)
+#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
+#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
+#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
+#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
+#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
+
+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
+ NH_FLD_VLAN_CFI | \
+ NH_FLD_VLAN_VID)
+
+/************************ IP (generic) fields ******************************/
+#define NH_FLD_IP_VER (1)
+#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
+#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
+#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
+#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
+#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
+#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
+#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
+#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
+
+#define NH_FLD_IP_PROTO_SIZE 1
+
+/***************************** IPV4 fields *********************************/
+#define NH_FLD_IPV4_VER (1)
+#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
+#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
+#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
+#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
+#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
+#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
+#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
+#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
+#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
+#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
+#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
+#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
+#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
+#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
+#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
+
+#define NH_FLD_IPV4_ADDR_SIZE 4
+#define NH_FLD_IPV4_PROTO_SIZE 1
+
+/***************************** IPV6 fields *********************************/
+#define NH_FLD_IPV6_VER (1)
+#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
+#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
+#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
+#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
+#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
+#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
+#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
+#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
+
+#define NH_FLD_IPV6_ADDR_SIZE 16
+#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
+
+/***************************** ICMP fields *********************************/
+#define NH_FLD_ICMP_TYPE (1)
+#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
+#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
+#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
+#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
+#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
+
+#define NH_FLD_ICMP_CODE_SIZE 1
+#define NH_FLD_ICMP_TYPE_SIZE 1
+
+/***************************** IGMP fields *********************************/
+#define NH_FLD_IGMP_VERSION (1)
+#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
+#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
+#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
+#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
+
+/***************************** TCP fields **********************************/
+#define NH_FLD_TCP_PORT_SRC (1)
+#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
+#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
+#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
+#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
+#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
+#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
+#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
+#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
+#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
+#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
+#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
+
+#define NH_FLD_TCP_PORT_SIZE 2
+
+/***************************** UDP fields **********************************/
+#define NH_FLD_UDP_PORT_SRC (1)
+#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
+#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
+#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
+#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
+
+#define NH_FLD_UDP_PORT_SIZE 2
+
+/*************************** UDP-lite fields *******************************/
+#define NH_FLD_UDP_LITE_PORT_SRC (1)
+#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
+#define NH_FLD_UDP_LITE_ALL_FIELDS \
+ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
+
+#define NH_FLD_UDP_LITE_PORT_SIZE 2
+
+/*************************** UDP-encap-ESP fields **************************/
+#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
+#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
+#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
+#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
+#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
+ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
+
+#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
+#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
+
+/***************************** SCTP fields *********************************/
+#define NH_FLD_SCTP_PORT_SRC (1)
+#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
+#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
+#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
+#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
+
+#define NH_FLD_SCTP_PORT_SIZE 2
+
+/***************************** DCCP fields *********************************/
+#define NH_FLD_DCCP_PORT_SRC (1)
+#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
+#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
+
+#define NH_FLD_DCCP_PORT_SIZE 2
+
+/***************************** IPHC fields *********************************/
+#define NH_FLD_IPHC_CID (1)
+#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
+#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
+#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
+#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
+#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
+
+/***************************** SCTP fields *********************************/
+#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
+#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINNING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
+#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
+ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
+
+/*************************** L2TPV2 fields *********************************/
+#define NH_FLD_L2TPV2_TYPE_BIT (1)
+#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
+#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
+#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
+#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
+#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
+#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
+#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
+#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
+#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
+#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
+#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
+#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
+#define NH_FLD_L2TPV2_ALL_FIELDS \
+ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
+
+/*************************** L2TPV3 fields *********************************/
+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
+#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
+#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
+#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
+#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
+#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
+ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
+
+#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
+#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
+#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
+#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
+ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
+
+/**************************** PPP fields ***********************************/
+#define NH_FLD_PPP_PID (1)
+#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
+#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
+
+/************************** PPPoE fields ***********************************/
+#define NH_FLD_PPPOE_VER (1)
+#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
+#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
+#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
+#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
+#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
+#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
+#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
+
+/************************* PPP-Mux fields **********************************/
+#define NH_FLD_PPPMUX_PID (1)
+#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
+#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
+#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
+
+/*********************** PPP-Mux sub-frame fields **************************/
+#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
+#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
+#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
+#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
+#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
+ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
+
+/*************************** LLC fields ************************************/
+#define NH_FLD_LLC_DSAP (1)
+#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
+#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
+#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
+
+/*************************** NLPID fields **********************************/
+#define NH_FLD_NLPID_NLPID (1)
+#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
+
+/*************************** SNAP fields ***********************************/
+#define NH_FLD_SNAP_OUI (1)
+#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
+#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
+
+/*************************** LLC SNAP fields *******************************/
+#define NH_FLD_LLC_SNAP_TYPE (1)
+#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
+
+#define NH_FLD_ARP_HTYPE (1)
+#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
+#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
+#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
+#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
+#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
+#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
+#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
+#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
+#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
+
+/*************************** RFC2684 fields ********************************/
+#define NH_FLD_RFC2684_LLC (1)
+#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
+#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
+#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
+#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
+#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
+#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
+
+/*************************** User defined fields ***************************/
+#define NH_FLD_USER_DEFINED_SRCPORT (1)
+#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
+#define NH_FLD_USER_DEFINED_ALL_FIELDS \
+ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
+
+/*************************** Payload fields ********************************/
+#define NH_FLD_PAYLOAD_BUFFER (1)
+#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
+#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
+#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
+#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
+#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
+#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
+
+/*************************** GRE fields ************************************/
+#define NH_FLD_GRE_TYPE (1)
+#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
+
+/*************************** MINENCAP fields *******************************/
+#define NH_FLD_MINENCAP_SRC_IP (1)
+#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
+#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
+#define NH_FLD_MINENCAP_ALL_FIELDS \
+ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
+
+/*************************** IPSEC AH fields *******************************/
+#define NH_FLD_IPSEC_AH_SPI (1)
+#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
+#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
+
+/*************************** IPSEC ESP fields ******************************/
+#define NH_FLD_IPSEC_ESP_SPI (1)
+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
+#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
+
+#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
+
+/*************************** MPLS fields ***********************************/
+#define NH_FLD_MPLS_LABEL_STACK (1)
+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
+ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
+
+/*************************** MACSEC fields *********************************/
+#define NH_FLD_MACSEC_SECTAG (1)
+#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
+
+/*************************** GTP fields ************************************/
+#define NH_FLD_GTP_TEID (1)
+
+/* Protocol options */
+
+/* Ethernet options */
+#define NH_OPT_ETH_BROADCAST 1
+#define NH_OPT_ETH_MULTICAST 2
+#define NH_OPT_ETH_UNICAST 3
+#define NH_OPT_ETH_BPDU 4
+
+#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
+/* also applicable for broadcast */
+
+/* VLAN options */
+#define NH_OPT_VLAN_CFI 1
+
+/* IPV4 options */
+#define NH_OPT_IPV4_UNICAST 1
+#define NH_OPT_IPV4_MULTICAST 2
+#define NH_OPT_IPV4_BROADCAST 3
+#define NH_OPT_IPV4_OPTION 4
+#define NH_OPT_IPV4_FRAG 5
+#define NH_OPT_IPV4_INITIAL_FRAG 6
+
+/* IPV6 options */
+#define NH_OPT_IPV6_UNICAST 1
+#define NH_OPT_IPV6_MULTICAST 2
+#define NH_OPT_IPV6_OPTION 3
+#define NH_OPT_IPV6_FRAG 4
+#define NH_OPT_IPV6_INITIAL_FRAG 5
+
+/* General IP options (may be used for any version) */
+#define NH_OPT_IP_FRAG 1
+#define NH_OPT_IP_INITIAL_FRAG 2
+#define NH_OPT_IP_OPTION 3
+
+/* Minenc. options */
+#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
+
+/* GRE. options */
+#define NH_OPT_GRE_ROUTING_PRESENT 1
+
+/* TCP options */
+#define NH_OPT_TCP_OPTIONS 1
+#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
+#define NH_OPT_TCP_CONTROL_LOW_BITS 3
+
+/* CAPWAP options */
+#define NH_OPT_CAPWAP_DTLS 1
+
+enum net_prot {
+ NET_PROT_NONE = 0,
+ NET_PROT_PAYLOAD,
+ NET_PROT_ETH,
+ NET_PROT_VLAN,
+ NET_PROT_IPV4,
+ NET_PROT_IPV6,
+ NET_PROT_IP,
+ NET_PROT_TCP,
+ NET_PROT_UDP,
+ NET_PROT_UDP_LITE,
+ NET_PROT_IPHC,
+ NET_PROT_SCTP,
+ NET_PROT_SCTP_CHUNK_DATA,
+ NET_PROT_PPPOE,
+ NET_PROT_PPP,
+ NET_PROT_PPPMUX,
+ NET_PROT_PPPMUX_SUBFRM,
+ NET_PROT_L2TPV2,
+ NET_PROT_L2TPV3_CTRL,
+ NET_PROT_L2TPV3_SESS,
+ NET_PROT_LLC,
+ NET_PROT_LLC_SNAP,
+ NET_PROT_NLPID,
+ NET_PROT_SNAP,
+ NET_PROT_MPLS,
+ NET_PROT_IPSEC_AH,
+ NET_PROT_IPSEC_ESP,
+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
+ NET_PROT_MACSEC,
+ NET_PROT_GRE,
+ NET_PROT_MINENCAP,
+ NET_PROT_DCCP,
+ NET_PROT_ICMP,
+ NET_PROT_IGMP,
+ NET_PROT_ARP,
+ NET_PROT_CAPWAP_DATA,
+ NET_PROT_CAPWAP_CTRL,
+ NET_PROT_RFC2684,
+ NET_PROT_ICMPV6,
+ NET_PROT_FCOE,
+ NET_PROT_FIP,
+ NET_PROT_ISCSI,
+ NET_PROT_GTP,
+ NET_PROT_USER_DEFINED_L2,
+ NET_PROT_USER_DEFINED_L3,
+ NET_PROT_USER_DEFINED_L4,
+ NET_PROT_USER_DEFINED_L5,
+ NET_PROT_USER_DEFINED_SHIM1,
+ NET_PROT_USER_DEFINED_SHIM2,
+
+ NET_PROT_DUMMY_LAST
+};
+
+/*! IEEE8021.Q */
+#define NH_IEEE8021Q_ETYPE 0x8100
+#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
+ ((((uint32_t)(etype & 0xFFFF)) << 16) | \
+ (((uint32_t)(pcp & 0x07)) << 13) | \
+ (((uint32_t)(dei & 0x01)) << 12) | \
+ (((uint32_t)(vlan_id & 0xFFF))))
+
+#endif /* __FSL_NET_H */
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/meson.build b/src/spdk/dpdk/drivers/net/dpaa2/meson.build
new file mode 100644
index 00000000..213f0d72
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['mempool_dpaa2']
+sources = files('base/dpaa2_hw_dpni.c',
+ 'dpaa2_ethdev.c',
+ 'dpaa2_rxtx.c',
+ 'mc/dpkg.c',
+ 'mc/dpni.c')
+
+includes += include_directories('base', 'mc')
+
+# depends on fslmc bus which uses experimental API
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map b/src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
new file mode 100644
index 00000000..09f4364b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
@@ -0,0 +1,12 @@
+DPDK_17.05 {
+
+ local: *;
+};
+
+DPDK_17.11 {
+ global:
+
+ dpaa2_eth_eventq_attach;
+ dpaa2_eth_eventq_detach;
+
+} DPDK_17.05;
diff --git a/src/spdk/dpdk/drivers/net/e1000/Makefile b/src/spdk/dpdk/drivers/net/e1000/Makefile
new file mode 100644
index 00000000..9c87e883
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/Makefile
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2015 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_e1000.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+EXPORT_MAP := rte_pmd_e1000_version.map
+
+LIBABIVER := 1
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER = -diag-disable 177 -diag-disable 181
+CFLAGS_BASE_DRIVER += -diag-disable 869 -diag-disable 2259
+else
+#
+# CFLAGS for gcc/clang
+#
+CFLAGS_BASE_DRIVER = -Wno-uninitialized -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-misleading-indentation
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
+endif
+endif
+endif
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_80003es2lan.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82540.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82541.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82542.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82543.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82571.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82575.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_i210.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_ich8lan.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_logs.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_manage.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_nvm.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_osdep.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_rxtx.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/README b/src/spdk/dpdk/drivers/net/e1000/base/README
new file mode 100644
index 00000000..de1ae4cf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/README
@@ -0,0 +1,65 @@
+..
+ BSD LICENSE
+
+ Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This directory contains source code of FreeBSD em & igb drivers of version
+cid-shared-code.2016.11.22 released by ND. The sub-directory of base/
+contains the original source package.
+
+This driver is valid for the product(s) listed below
+* Intel® Ethernet Controller 82540
+* Intel® Ethernet Controller 82545 Series
+* Intel® Ethernet Controller 82546 Series
+* Intel® Ethernet Controller 82571 Series
+* Intel® Ethernet Controller 82572 Series
+* Intel® Ethernet Controller 82573
+* Intel® Ethernet Controller 82574
+* Intel® Ethernet Controller 82583
+* Intel® Ethernet Controller I217 Series
+* Intel® Ethernet Controller I218 Series
+* Intel® Ethernet Controller I219 Series
+* Intel® Ethernet Controller 82576 Series
+* Intel® Ethernet Controller 82575 Series
+* Intel® Ethernet Controller 82580 Series
+* Intel® Ethernet Controller I350 Series
+* Intel® Ethernet Controller I210 Series
+* Intel® Ethernet Controller I211
+* Intel® Ethernet Controller I354 Series
+* Intel® Ethernet Controller DH89XXCC Series
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ e1000_osdep.c
+ e1000_osdep.h
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.c
new file mode 100644
index 00000000..5ac925e4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.c
@@ -0,0 +1,1525 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
+ * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw);
+STATIC void e1000_release_phy_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw);
+STATIC void e1000_release_nvm_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset,
+ u16 *data);
+STATIC s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset,
+ u16 data);
+STATIC s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+STATIC s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
+STATIC void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+STATIC s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
+STATIC s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+STATIC s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data);
+STATIC void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
+STATIC void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+STATIC s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw);
+STATIC void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
+
+/* A table for the GG82563 cable length where the range is defined
+ * with a lower bound at "index" and the upper bound at
+ * "index + 5".
+ */
+STATIC const u16 e1000_gg82563_cable_length_table[] = {
+ 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+#define GG82563_CABLE_LENGTH_TABLE_SIZE \
+ (sizeof(e1000_gg82563_cable_length_table) / \
+ sizeof(e1000_gg82563_cable_length_table[0]))
+
+/**
+ * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_phy_params_80003es2lan");
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ return E1000_SUCCESS;
+ } else {
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
+ }
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+ phy->type = e1000_phy_gg82563;
+
+ phy->ops.acquire = e1000_acquire_phy_80003es2lan;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.check_reset_block = e1000_check_reset_block_generic;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_80003es2lan;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.release = e1000_release_phy_80003es2lan;
+ phy->ops.reset = e1000_phy_hw_reset_generic;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
+
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan;
+ phy->ops.get_cable_length = e1000_get_cable_length_80003es2lan;
+ phy->ops.read_reg = e1000_read_phy_reg_gg82563_80003es2lan;
+ phy->ops.write_reg = e1000_write_phy_reg_gg82563_80003es2lan;
+
+ phy->ops.cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan;
+
+ /* This can only be done after all function pointers are setup. */
+ ret_val = e1000_get_phy_id(hw);
+
+ /* Verify phy id */
+ if (phy->id != GG82563_E_PHY_ID)
+ return -E1000_ERR_PHY;
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u16 size;
+
+ DEBUGFUNC("e1000_init_nvm_params_80003es2lan");
+
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+ break;
+ }
+
+ nvm->type = e1000_nvm_eeprom_spi;
+
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+
+ /* Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* EEPROM access above 16k is unsupported */
+ if (size > 14)
+ size = 14;
+ nvm->word_size = 1 << size;
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_80003es2lan;
+ nvm->ops.read = e1000_read_nvm_eerd;
+ nvm->ops.release = e1000_release_nvm_80003es2lan;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_80003es2lan;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_init_mac_params_80003es2lan");
+
+ /* Set media type and media-dependent function pointers */
+ switch (hw->device_id) {
+ case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ mac->ops.check_for_link = e1000_check_for_serdes_link_generic;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_generic;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ mac->ops.check_for_link = e1000_check_for_copper_link_generic;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_80003es2lan;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid = !!(E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_FWSM_MODE_MASK);
+ /* Adaptive IFS not supported */
+ mac->adaptive_ifs = false;
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_80003es2lan;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_80003es2lan;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_generic;
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_generic;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_80003es2lan;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* blink LED */
+ mac->ops.blink_led = e1000_blink_led_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_generic;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_80003es2lan;
+
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_80003es2lan - Init ESB2 func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_80003es2lan");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_80003es2lan;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_80003es2lan;
+ hw->phy.ops.init_params = e1000_init_phy_params_80003es2lan;
+}
+
+/**
+ * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to acquire access rights to the correct PHY.
+ **/
+STATIC s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ DEBUGFUNC("e1000_acquire_phy_80003es2lan");
+
+ mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+ return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_release_phy_80003es2lan - Release rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to release access rights to the correct PHY.
+ **/
+STATIC void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ DEBUGFUNC("e1000_release_phy_80003es2lan");
+
+ mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+ e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the semaphore to access the Kumeran interface.
+ *
+ **/
+STATIC s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ DEBUGFUNC("e1000_acquire_mac_csr_80003es2lan");
+
+ mask = E1000_SWFW_CSR_SM;
+
+ return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register
+ * @hw: pointer to the HW structure
+ *
+ * Release the semaphore used to access the Kumeran interface
+ **/
+STATIC void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ DEBUGFUNC("e1000_release_mac_csr_80003es2lan");
+
+ mask = E1000_SWFW_CSR_SM;
+
+ e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the semaphore to access the EEPROM.
+ **/
+STATIC s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_acquire_nvm_80003es2lan");
+
+ ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_acquire_nvm_generic(hw);
+
+ if (ret_val)
+ e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
+ * @hw: pointer to the HW structure
+ *
+ * Release the semaphore used to access the EEPROM.
+ **/
+STATIC void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_80003es2lan");
+
+ e1000_release_nvm_generic(hw);
+ e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+STATIC s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 i = 0;
+ s32 timeout = 50;
+
+ DEBUGFUNC("e1000_acquire_swfw_sync_80003es2lan");
+
+ while (i < timeout) {
+ if (e1000_get_hw_semaphore_generic(hw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /* Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000_put_hw_semaphore_generic(hw);
+ msec_delay_irq(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ return -E1000_ERR_SWFW_SYNC;
+ }
+
+ swfw_sync |= swmask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+STATIC void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ DEBUGFUNC("e1000_release_swfw_sync_80003es2lan");
+
+ while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
+ ; /* Empty */
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @data: pointer to the data returned from the operation
+ *
+ * Read the GG82563 PHY register.
+ **/
+STATIC s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u32 page_select;
+ u16 temp;
+
+ DEBUGFUNC("e1000_read_phy_reg_gg82563_80003es2lan");
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Select Configuration Page */
+ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ page_select = GG82563_PHY_PAGE_SELECT;
+ } else {
+ /* Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ page_select = GG82563_PHY_PAGE_SELECT_ALT;
+ }
+
+ temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+ ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp);
+ if (ret_val) {
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
+
+ if (hw->dev_spec._80003es2lan.mdic_wa_enable) {
+ /* The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ usec_delay(200);
+
+ /* ...and verify the command was successful. */
+ ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp);
+
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ e1000_release_phy_80003es2lan(hw);
+ return -E1000_ERR_PHY;
+ }
+
+ usec_delay(200);
+
+ ret_val = e1000_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ usec_delay(200);
+ } else {
+ ret_val = e1000_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
+
+ e1000_release_phy_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @data: value to write to the register
+ *
+ * Write to the GG82563 PHY register.
+ **/
+STATIC s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset, u16 data)
+{
+ s32 ret_val;
+ u32 page_select;
+ u16 temp;
+
+ DEBUGFUNC("e1000_write_phy_reg_gg82563_80003es2lan");
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Select Configuration Page */
+ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ page_select = GG82563_PHY_PAGE_SELECT;
+ } else {
+ /* Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ page_select = GG82563_PHY_PAGE_SELECT_ALT;
+ }
+
+ temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+ ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp);
+ if (ret_val) {
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
+
+ if (hw->dev_spec._80003es2lan.mdic_wa_enable) {
+ /* The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ usec_delay(200);
+
+ /* ...and verify the command was successful. */
+ ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp);
+
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ e1000_release_phy_80003es2lan(hw);
+ return -E1000_ERR_PHY;
+ }
+
+ usec_delay(200);
+
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ usec_delay(200);
+ } else {
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
+
+ e1000_release_phy_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_80003es2lan - Write to ESB2 NVM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @words: number of words to write
+ * @data: buffer of data to write to the NVM
+ *
+ * Write "words" of data to the ESB2 NVM.
+ **/
+STATIC s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ DEBUGFUNC("e1000_write_nvm_80003es2lan");
+
+ return e1000_write_nvm_spi(hw, offset, words, data);
+}
+
+/**
+ * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
+ * @hw: pointer to the HW structure
+ *
+ * Wait a specific amount of time for manageability processes to complete.
+ * This is a function pointer entry point called by the phy module.
+ **/
+STATIC s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ DEBUGFUNC("e1000_get_cfg_done_80003es2lan");
+
+ if (hw->bus.func == 1)
+ mask = E1000_NVM_CFG_DONE_PORT_1;
+
+ while (timeout) {
+ if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
+ break;
+ msec_delay(1);
+ timeout--;
+ }
+ if (!timeout) {
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
+ * @hw: pointer to the HW structure
+ *
+ * Force the speed and duplex settings onto the PHY. This is a
+ * function pointer entry point called by the phy module.
+ **/
+STATIC s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_80003es2lan");
+
+ if (!(hw->phy.ops.read_reg))
+ return E1000_SUCCESS;
+
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("GG82563 PSCR: %X\n", phy_data);
+
+ ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ /* Reset the phy to commit changes. */
+ phy_data |= MII_CR_RESET;
+
+ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ usec_delay(1);
+
+ if (hw->phy.autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on GG82563 phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ /* We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = e1000_phy_reset_dsp_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Resetting the phy means we need to verify the TX_CLK corresponds
+ * to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
+ */
+ phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+ if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
+ phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
+ else
+ phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
+
+ /* In addition, we must re-enable CRS on Tx for both half and full
+ * duplex.
+ */
+ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+ phy_data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_80003es2lan - Set approximate cable length
+ * @hw: pointer to the HW structure
+ *
+ * Find the approximate cable length as measured by the GG82563 PHY.
+ * This is a function pointer entry point called by the phy module.
+ **/
+STATIC s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, index;
+
+ DEBUGFUNC("e1000_get_cable_length_80003es2lan");
+
+ if (!(hw->phy.ops.read_reg))
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ index = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+ if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5)
+ return -E1000_ERR_PHY;
+
+ phy->min_cable_length = e1000_gg82563_cable_length_table[index];
+ phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_link_up_info_80003es2lan - Report speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to speed buffer
+ * @duplex: pointer to duplex buffer
+ *
+ * Retrieve the current speed and duplex configuration.
+ **/
+STATIC s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_get_link_up_info_80003es2lan");
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
+ duplex);
+ hw->phy.ops.cfg_on_link_up(hw);
+ } else {
+ ret_val = e1000_get_speed_and_duplex_fiber_serdes_generic(hw,
+ speed,
+ duplex);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_80003es2lan - Reset the ESB2 controller
+ * @hw: pointer to the HW structure
+ *
+ * Perform a global reset to the ESB2 controller.
+ **/
+STATIC s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 kum_reg_data;
+
+ DEBUGFUNC("e1000_reset_hw_80003es2lan");
+
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT("Issuing a global reset to MAC\n");
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ e1000_release_phy_80003es2lan(hw);
+
+ /* Disable IBIST slave mode (far-end loopback) */
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM, &kum_reg_data);
+ if (!ret_val) {
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+ if (ret_val)
+ DEBUGOUT("Error disabling far-end loopback\n");
+ } else
+ DEBUGOUT("Error disabling far-end loopback\n");
+
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val)
+ /* We don't want to continue accessing MAC registers. */
+ return ret_val;
+
+ /* Clear any pending interrupt events. */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ return e1000_check_alt_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_init_hw_80003es2lan - Initialize the ESB2 controller
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
+ **/
+STATIC s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 reg_data;
+ s32 ret_val;
+ u16 kum_reg_data;
+ u16 i;
+
+ DEBUGFUNC("e1000_init_hw_80003es2lan");
+
+ e1000_initialize_hw_bits_80003es2lan(hw);
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
+ if (ret_val)
+ DEBUGOUT("Error initializing identification LED\n");
+
+ /* Disabling VLAN filtering */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address. */
+ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Disable IBIST slave mode (far-end loopback) */
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ if (!ret_val) {
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+ if (ret_val)
+ DEBUGOUT("Error disabling far-end loopback\n");
+ } else
+ DEBUGOUT("Error disabling far-end loopback\n");
+
+ /* Set the transmit descriptor write-back policy */
+ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data);
+
+ /* ...for both queues. */
+ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1));
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data);
+
+ /* Enable retransmit on late collisions */
+ reg_data = E1000_READ_REG(hw, E1000_TCTL);
+ reg_data |= E1000_TCTL_RTLC;
+ E1000_WRITE_REG(hw, E1000_TCTL, reg_data);
+
+ /* Configure Gigabit Carry Extend Padding */
+ reg_data = E1000_READ_REG(hw, E1000_TCTL_EXT);
+ reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+ reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
+ E1000_WRITE_REG(hw, E1000_TCTL_EXT, reg_data);
+
+ /* Configure Transmit Inter-Packet Gap */
+ reg_data = E1000_READ_REG(hw, E1000_TIPG);
+ reg_data &= ~E1000_TIPG_IPGT_MASK;
+ reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+ E1000_WRITE_REG(hw, E1000_TIPG, reg_data);
+
+ reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
+ reg_data &= ~0x00100000;
+ E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
+
+ /* default to true to enable the MDIC W/A */
+ hw->dev_spec._80003es2lan.mdic_wa_enable = true;
+
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >>
+ E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i);
+ if (!ret_val) {
+ if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
+ E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
+ hw->dev_spec._80003es2lan.mdic_wa_enable = false;
+ }
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
+ * @hw: pointer to the HW structure
+ *
+ * Initializes required hardware-dependent bits needed for normal operation.
+ **/
+STATIC void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_initialize_hw_bits_80003es2lan");
+
+ /* Transmit Descriptor Control 0 */
+ reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = E1000_READ_REG(hw, E1000_TARC(0));
+ reg &= ~(0xF << 27); /* 30:27 */
+ if (hw->phy.media_type != e1000_media_type_copper)
+ reg &= ~(1 << 20);
+ E1000_WRITE_REG(hw, E1000_TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = E1000_READ_REG(hw, E1000_TARC(1));
+ if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ E1000_WRITE_REG(hw, E1000_TARC(1), reg);
+
+ /* Disable IPv6 extension header parsing because some malformed
+ * IPv6 headers can hang the Rx.
+ */
+ reg = E1000_READ_REG(hw, E1000_RFCTL);
+ reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
+ E1000_WRITE_REG(hw, E1000_RFCTL, reg);
+
+ return;
+}
+
+/**
+ * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
+ * @hw: pointer to the HW structure
+ *
+ * Setup some GG82563 PHY registers for obtaining link
+ **/
+STATIC s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 reg;
+ u16 data;
+
+ DEBUGFUNC("e1000_copper_link_setup_gg82563_80003es2lan");
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ /* Use 25MHz for both link down and 1000Base-T for Tx clock. */
+ data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
+
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+ switch (phy->mdix) {
+ case 1:
+ data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+ break;
+ case 2:
+ data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+ break;
+ case 0:
+ default:
+ data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+ if (phy->disable_polarity_correction)
+ data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ /* Bypass Rx and Tx FIFO's */
+ reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL;
+ data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+ E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
+ if (ret_val)
+ return ret_val;
+
+ reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data);
+ if (ret_val)
+ return ret_val;
+ data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL_2, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL_2, data);
+ if (ret_val)
+ return ret_val;
+
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* Do not init these registers when the HW is in IAMT mode, since the
+ * firmware will have already initialized them. We only initialize
+ * them if the HW is not in IAMT mode.
+ */
+ if (!hw->mac.ops.check_mng_mode(hw)) {
+ /* Enable Electrical Idle on the PHY */
+ data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Workaround: Disable padding in Kumeran interface in the MAC
+ * and in the PHY to avoid CRC errors.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_INBAND_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= GG82563_ICR_DIS_PADDING;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_INBAND_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
+ * @hw: pointer to the HW structure
+ *
+ * Essentially a wrapper for setting up all things "copper" related.
+ * This is a function pointer entry point called by the mac module.
+ **/
+STATIC s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 reg_data;
+
+ DEBUGFUNC("e1000_setup_copper_link_80003es2lan");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Set the mac to wait the maximum time between each
+ * iteration and increase the max iterations when
+ * polling the phy; this fixes erroneous timeouts at 10Mbps.
+ */
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
+ 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+ reg_data);
+ if (ret_val)
+ return ret_val;
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_setup_copper_link_generic(hw);
+}
+
+/**
+ * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
+ * @hw: pointer to the HW structure
+ * @duplex: current duplex setting
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * 10/100 operation.
+ **/
+STATIC s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 speed;
+ u16 duplex;
+
+ DEBUGFUNC("e1000_configure_on_link_up");
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, &speed,
+ &duplex);
+ if (ret_val)
+ return ret_val;
+
+ if (speed == SPEED_1000)
+ ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
+ else
+ ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
+ * @hw: pointer to the HW structure
+ * @duplex: current duplex setting
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * 10/100 operation.
+ **/
+STATIC s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
+{
+ s32 ret_val;
+ u32 tipg;
+ u32 i = 0;
+ u16 reg_data, reg_data2;
+
+ DEBUGFUNC("e1000_configure_kmrn_for_10_100");
+
+ reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = E1000_READ_REG(hw, E1000_TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
+ E1000_WRITE_REG(hw, E1000_TIPG, tipg);
+
+ do {
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ &reg_data2);
+ if (ret_val)
+ return ret_val;
+ i++;
+ } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+ if (duplex == HALF_DUPLEX)
+ reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+ else
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+ return hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+}
+
+/**
+ * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
+ * @hw: pointer to the HW structure
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * gigabit operation.
+ **/
+STATIC s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 reg_data, reg_data2;
+ u32 tipg;
+ u32 i = 0;
+
+ DEBUGFUNC("e1000_configure_kmrn_for_1000");
+
+ reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = E1000_READ_REG(hw, E1000_TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+ E1000_WRITE_REG(hw, E1000_TIPG, tipg);
+
+ do {
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ &reg_data2);
+ if (ret_val)
+ return ret_val;
+ i++;
+ } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+ return hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+}
+
+/**
+ * e1000_read_kmrn_reg_80003es2lan - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquire semaphore, then read the PHY register at offset
+ * using the kumeran interface. The information retrieved is stored in data.
+ * Release the semaphore before exiting.
+ **/
+STATIC s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ u32 kmrnctrlsta;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_kmrn_reg_80003es2lan");
+
+ ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(2);
+
+ kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
+ *data = (u16)kmrnctrlsta;
+
+ e1000_release_mac_csr_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_kmrn_reg_80003es2lan - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquire semaphore, then write the data to PHY register
+ * at the offset using the kumeran interface. Release semaphore
+ * before exiting.
+ **/
+STATIC s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data)
+{
+ u32 kmrnctrlsta;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_write_kmrn_reg_80003es2lan");
+
+ ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | data;
+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(2);
+
+ e1000_release_mac_csr_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_mac_addr_80003es2lan - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_mac_addr_80003es2lan");
+
+ /* If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+STATIC void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(hw->mac.ops.check_mng_mode(hw) ||
+ hw->phy.ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_80003es2lan");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+
+ E1000_READ_REG(hw, E1000_IAC);
+ E1000_READ_REG(hw, E1000_ICRXOC);
+
+ E1000_READ_REG(hw, E1000_ICRXPTC);
+ E1000_READ_REG(hw, E1000_ICRXATC);
+ E1000_READ_REG(hw, E1000_ICTXPTC);
+ E1000_READ_REG(hw, E1000_ICTXATC);
+ E1000_READ_REG(hw, E1000_ICTXQEC);
+ E1000_READ_REG(hw, E1000_ICTXQMTC);
+ E1000_READ_REG(hw, E1000_ICRXDMTC);
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.h
new file mode 100644
index 00000000..93ec19be
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_80003ES2LAN_H_
+#define _E1000_80003ES2LAN_H_
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
+
+#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26)
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-100M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define GG82563_DSPD_CABLE_LENGTH 0x0007
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY 0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+/* 1=Enable SERDES Electrical Idle */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82540.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82540.c
new file mode 100644
index 00000000..7de7b7ba
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82540.c
@@ -0,0 +1,717 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/*
+ * 82540EM Gigabit Ethernet Controller
+ * 82540EP Gigabit Ethernet Controller
+ * 82545EM Gigabit Ethernet Controller (Copper)
+ * 82545EM Gigabit Ethernet Controller (Fiber)
+ * 82545GM Gigabit Ethernet Controller
+ * 82546EB Gigabit Ethernet Controller (Copper)
+ * 82546EB Gigabit Ethernet Controller (Fiber)
+ * 82546GB Gigabit Ethernet Controller
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_init_phy_params_82540(struct e1000_hw *hw);
+STATIC s32 e1000_init_nvm_params_82540(struct e1000_hw *hw);
+STATIC s32 e1000_init_mac_params_82540(struct e1000_hw *hw);
+STATIC s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw);
+STATIC void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_82540(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_82540(struct e1000_hw *hw);
+STATIC s32 e1000_set_phy_mode_82540(struct e1000_hw *hw);
+STATIC s32 e1000_set_vco_speed_82540(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_82540(struct e1000_hw *hw);
+STATIC s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw);
+STATIC void e1000_power_down_phy_copper_82540(struct e1000_hw *hw);
+STATIC s32 e1000_read_mac_addr_82540(struct e1000_hw *hw);
+
+/**
+ * e1000_init_phy_params_82540 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_82540(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 10000;
+ phy->type = e1000_phy_m88;
+
+ /* Function Pointers */
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
+ phy->ops.read_reg = e1000_read_phy_reg_m88;
+ phy->ops.reset = e1000_phy_hw_reset_generic;
+ phy->ops.write_reg = e1000_write_phy_reg_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82540;
+
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+
+ /* Verify phy id */
+ switch (hw->mac.type) {
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if (phy->id == M88E1011_I_PHY_ID)
+ break;
+ /* Fall Through */
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ break;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82540 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_nvm_params_82540(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ DEBUGFUNC("e1000_init_nvm_params_82540");
+
+ nvm->type = e1000_nvm_eeprom_microwire;
+ nvm->delay_usec = 50;
+ nvm->opcode_bits = 3;
+ switch (nvm->override) {
+ case e1000_nvm_override_microwire_large:
+ nvm->address_bits = 8;
+ nvm->word_size = 256;
+ break;
+ case e1000_nvm_override_microwire_small:
+ nvm->address_bits = 6;
+ nvm->word_size = 64;
+ break;
+ default:
+ nvm->address_bits = eecd & E1000_EECD_SIZE ? 8 : 6;
+ nvm->word_size = eecd & E1000_EECD_SIZE ? 256 : 64;
+ break;
+ }
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_generic;
+ nvm->ops.read = e1000_read_nvm_microwire;
+ nvm->ops.release = e1000_release_nvm_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_microwire;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82540 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_82540(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_init_mac_params_82540");
+
+ /* Set media type */
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82545EM_FIBER:
+ case E1000_DEV_ID_82545GM_FIBER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ hw->phy.media_type = e1000_media_type_fiber;
+ break;
+ case E1000_DEV_ID_82545GM_SERDES:
+ case E1000_DEV_ID_82546GB_SERDES:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pci_generic;
+ /* function id */
+ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_82540;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_82540;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_generic;
+ /* physical interface setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_setup_copper_link_82540
+ : e1000_setup_fiber_serdes_link_82540;
+ /* check for link */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ mac->ops.check_for_link = e1000_check_for_copper_link_generic;
+ break;
+ case e1000_media_type_fiber:
+ mac->ops.check_for_link = e1000_check_for_fiber_link_generic;
+ break;
+ case e1000_media_type_internal_serdes:
+ mac->ops.check_for_link = e1000_check_for_serdes_link_generic;
+ break;
+ default:
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ break;
+ }
+ /* link info */
+ mac->ops.get_link_up_info =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_get_speed_and_duplex_copper_generic
+ : e1000_get_speed_and_duplex_fiber_serdes_generic;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_82540;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_generic;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82540;
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_function_pointers_82540 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82540(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82540");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82540;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82540;
+ hw->phy.ops.init_params = e1000_init_phy_params_82540;
+}
+
+/**
+ * e1000_reset_hw_82540 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+STATIC s32 e1000_reset_hw_82540(struct e1000_hw *hw)
+{
+ u32 ctrl, manc;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_reset_hw_82540");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ /*
+ * Delay to allow any outstanding PCI transactions to complete
+ * before resetting the device.
+ */
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGOUT("Issuing a global reset to 82540/82545/82546 MAC\n");
+ switch (hw->mac.type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ E1000_WRITE_REG(hw, E1000_CTRL_DUP, ctrl | E1000_CTRL_RST);
+ break;
+ default:
+ /*
+ * These controllers can't ack the 64-bit write when
+ * issuing the reset, so we use IO-mapping as a
+ * workaround to issue the reset.
+ */
+ E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ break;
+ }
+
+ /* Wait for EEPROM reload */
+ msec_delay(5);
+
+ /* Disable HW ARPs on ASF enabled adapters */
+ manc = E1000_READ_REG(hw, E1000_MANC);
+ manc &= ~E1000_MANC_ARP_EN;
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_82540 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+STATIC s32 e1000_init_hw_82540(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 txdctl, ctrl_ext;
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_init_hw_82540");
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ if (ret_val) {
+ DEBUGOUT("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+ }
+
+ /* Disabling VLAN filtering */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ if (mac->type < e1000_82545_rev_3)
+ E1000_WRITE_REG(hw, E1000_VET, 0);
+
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address. */
+ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+ /*
+ * Avoid back to back register writes by adding the register
+ * read (flush). This is to protect against some strange
+ * bridge configurations that may issue Memory Write Block
+ * (MWB) to our register space. The *_rev_3 hardware at
+ * least doesn't respond correctly to every other dword in an
+ * MWB to our register space.
+ */
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ if (mac->type < e1000_82545_rev_3)
+ e1000_pcix_mmrbc_workaround_generic(hw);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB;
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82540(hw);
+
+ if ((hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER) ||
+ (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3)) {
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /*
+ * Relaxed ordering must be disabled to avoid a parity
+ * error crash in a PCI slot.
+ */
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_copper_link_82540 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -E1000_ERR_PHY (-2).
+ **/
+STATIC s32 e1000_setup_copper_link_82540(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_setup_copper_link_82540");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ ret_val = e1000_set_phy_mode_82540(hw);
+ if (ret_val)
+ goto out;
+
+ if (hw->mac.type == e1000_82545_rev_3 ||
+ hw->mac.type == e1000_82546_rev_3) {
+ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &data);
+ if (ret_val)
+ goto out;
+ data |= 0x00000008;
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ data);
+ if (ret_val)
+ goto out;
+ }
+
+ ret_val = e1000_copper_link_setup_m88(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_fiber_serdes_link_82540 - Setup link for fiber/serdes
+ * @hw: pointer to the HW structure
+ *
+ * Set the output amplitude to the value in the EEPROM and adjust the VCO
+ * speed to improve Bit Error Rate (BER) performance. Configures collision
+ * distance and flow control for fiber and serdes links. Upon successful
+ * setup, poll for link.
+ **/
+STATIC s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_setup_fiber_serdes_link_82540");
+
+ switch (mac->type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+ /*
+ * If we're on serdes media, adjust the output
+ * amplitude to value set in the EEPROM.
+ */
+ ret_val = e1000_adjust_serdes_amplitude_82540(hw);
+ if (ret_val)
+ goto out;
+ }
+ /* Adjust VCO speed to improve BER performance */
+ ret_val = e1000_set_vco_speed_82540(hw);
+ if (ret_val)
+ goto out;
+ default:
+ break;
+ }
+
+ ret_val = e1000_setup_fiber_serdes_link_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_adjust_serdes_amplitude_82540 - Adjust amplitude based on EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Adjust the SERDES output amplitude based on the EEPROM settings.
+ **/
+STATIC s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 nvm_data;
+
+ DEBUGFUNC("e1000_adjust_serdes_amplitude_82540");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_SERDES_AMPLITUDE, 1, &nvm_data);
+ if (ret_val)
+ goto out;
+
+ if (nvm_data != NVM_RESERVED_WORD) {
+ /* Adjust serdes output amplitude only. */
+ nvm_data &= NVM_SERDES_AMPLITUDE_MASK;
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_EXT_CTRL,
+ nvm_data);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_vco_speed_82540 - Set VCO speed for better performance
+ * @hw: pointer to the HW structure
+ *
+ * Set the VCO speed to improve Bit Error Rate (BER) performance.
+ **/
+STATIC s32 e1000_set_vco_speed_82540(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 default_page = 0;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_set_vco_speed_82540");
+
+ /* Set PHY register 30, page 5, bit 8 to 0 */
+
+ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Set PHY register 30, page 4, bit 11 to 1 */
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT,
+ default_page);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_phy_mode_82540 - Set PHY to class A mode
+ * @hw: pointer to the HW structure
+ *
+ * Sets the PHY to class A mode and assumes the following operations will
+ * follow to enable the new class mode:
+ * 1. Do a PHY soft reset.
+ * 2. Restart auto-negotiation or force link.
+ **/
+STATIC s32 e1000_set_phy_mode_82540(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 nvm_data;
+
+ DEBUGFUNC("e1000_set_phy_mode_82540");
+
+ if (hw->mac.type != e1000_82545_rev_3)
+ goto out;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PHY_CLASS_WORD, 1, &nvm_data);
+ if (ret_val) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ if ((nvm_data != NVM_RESERVED_WORD) && (nvm_data & NVM_PHY_CLASS_A)) {
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT,
+ 0x000B);
+ if (ret_val) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL,
+ 0x8104);
+ if (ret_val) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_82540 - Remove link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+STATIC void e1000_power_down_phy_copper_82540(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82540 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82540");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+}
+
+/**
+ * e1000_read_mac_addr_82540 - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ * Since devices with two ports use the same EEPROM, we increment the
+ * last bit in the MAC address for the second port.
+ *
+ * This version is being used over generic because of customer issues
+ * with VmWare and Virtual Box when using generic. It seems in
+ * the emulated 82545, RAR[0] does NOT have a valid address after a
+ * reset, this older method works and using this breaks nothing for
+ * these legacy adapters.
+ **/
+s32 e1000_read_mac_addr_82540(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 offset, nvm_data, i;
+
+ DEBUGFUNC("e1000_read_mac_addr");
+
+ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+ offset = i >> 1;
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+ hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
+ hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
+ }
+
+ /* Flip last bit of mac address if we're on second port */
+ if (hw->bus.func == E1000_FUNC_1)
+ hw->mac.perm_addr[5] ^= 1;
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+out:
+ return ret_val;
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.c
new file mode 100644
index 00000000..9cdb91c9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.c
@@ -0,0 +1,1268 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/*
+ * 82541EI Gigabit Ethernet Controller
+ * 82541ER Gigabit Ethernet Controller
+ * 82541GI Gigabit Ethernet Controller
+ * 82541PI Gigabit Ethernet Controller
+ * 82547EI Gigabit Ethernet Controller
+ * 82547GI Gigabit Ethernet Controller
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_init_phy_params_82541(struct e1000_hw *hw);
+STATIC s32 e1000_init_nvm_params_82541(struct e1000_hw *hw);
+STATIC s32 e1000_init_mac_params_82541(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_82541(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_82541(struct e1000_hw *hw);
+STATIC s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+STATIC s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_82541(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_link_82541(struct e1000_hw *hw);
+STATIC s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw);
+STATIC s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_setup_led_82541(struct e1000_hw *hw);
+STATIC s32 e1000_cleanup_led_82541(struct e1000_hw *hw);
+STATIC void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw);
+STATIC s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
+ bool link_up);
+STATIC s32 e1000_phy_init_script_82541(struct e1000_hw *hw);
+STATIC void e1000_power_down_phy_copper_82541(struct e1000_hw *hw);
+
+STATIC const u16 e1000_igp_cable_length_table[] = {
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10,
+ 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, 25, 25, 25, 25, 30, 30, 30, 30,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 50, 50, 50, 50, 50, 50, 50, 60, 60,
+ 60, 60, 60, 60, 60, 60, 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80,
+ 80, 90, 90, 90, 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 110, 110, 110, 110, 110, 110,
+ 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 120, 120,
+ 120, 120, 120, 120, 120, 120, 120, 120};
+#define IGP01E1000_AGC_LENGTH_TABLE_SIZE \
+ (sizeof(e1000_igp_cable_length_table) / \
+ sizeof(e1000_igp_cable_length_table[0]))
+
+/**
+ * e1000_init_phy_params_82541 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_82541(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_phy_params_82541");
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 10000;
+ phy->type = e1000_phy_igp;
+
+ /* Function Pointers */
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+ phy->ops.get_cable_length = e1000_get_cable_length_igp_82541;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
+ phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.read_reg = e1000_read_phy_reg_igp;
+ phy->ops.reset = e1000_phy_hw_reset_82541;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82541;
+ phy->ops.write_reg = e1000_write_phy_reg_igp;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82541;
+
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+
+ /* Verify phy id */
+ if (phy->id != IGP01E1000_I_PHY_ID) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82541 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_nvm_params_82541(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = E1000_SUCCESS;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u16 size;
+
+ DEBUGFUNC("e1000_init_nvm_params_82541");
+
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->type = e1000_nvm_eeprom_spi;
+ eecd |= E1000_EECD_ADDR_BITS;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->type = e1000_nvm_eeprom_spi;
+ eecd &= ~E1000_EECD_ADDR_BITS;
+ break;
+ case e1000_nvm_override_microwire_large:
+ nvm->type = e1000_nvm_eeprom_microwire;
+ eecd |= E1000_EECD_SIZE;
+ break;
+ case e1000_nvm_override_microwire_small:
+ nvm->type = e1000_nvm_eeprom_microwire;
+ eecd &= ~E1000_EECD_SIZE;
+ break;
+ default:
+ nvm->type = eecd & E1000_EECD_TYPE ? e1000_nvm_eeprom_spi
+ : e1000_nvm_eeprom_microwire;
+ break;
+ }
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 16 : 8;
+ nvm->delay_usec = 1;
+ nvm->opcode_bits = 8;
+ nvm->page_size = (eecd & E1000_EECD_ADDR_BITS) ? 32 : 8;
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_generic;
+ nvm->ops.read = e1000_read_nvm_spi;
+ nvm->ops.release = e1000_release_nvm_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_spi;
+
+ /*
+ * nvm->word_size must be discovered after the pointers
+ * are set so we can verify the size from the nvm image
+ * itself. Temporarily set it to a dummy value so the
+ * read will work.
+ */
+ nvm->word_size = 64;
+ ret_val = nvm->ops.read(hw, NVM_CFG, 1, &size);
+ if (ret_val)
+ goto out;
+ size = (size & NVM_SIZE_MASK) >> NVM_SIZE_SHIFT;
+ /*
+ * if size != 0, it can be added to a constant and become
+ * the left-shift value to set the word_size. Otherwise,
+ * word_size stays at 64.
+ */
+ if (size) {
+ size += NVM_WORD_SIZE_BASE_SHIFT_82541;
+ nvm->word_size = 1 << size;
+ }
+ } else {
+ nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 8 : 6;
+ nvm->delay_usec = 50;
+ nvm->opcode_bits = 3;
+ nvm->word_size = (eecd & E1000_EECD_ADDR_BITS) ? 256 : 64;
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_generic;
+ nvm->ops.read = e1000_read_nvm_microwire;
+ nvm->ops.release = e1000_release_nvm_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_microwire;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mac_params_82541 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_82541(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_init_mac_params_82541");
+
+ /* Set media type */
+ hw->phy.media_type = e1000_media_type_copper;
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+
+ /* Function Pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pci_generic;
+ /* function id */
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_82541;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_82541;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_generic;
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface = e1000_setup_copper_link_82541;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_link_82541;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_82541;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_82541;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_82541;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82541;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_82541 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82541(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82541");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82541;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82541;
+ hw->phy.ops.init_params = e1000_init_phy_params_82541;
+}
+
+/**
+ * e1000_reset_hw_82541 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+STATIC s32 e1000_reset_hw_82541(struct e1000_hw *hw)
+{
+ u32 ledctl, ctrl, manc;
+
+ DEBUGFUNC("e1000_reset_hw_82541");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ /*
+ * Delay to allow any outstanding PCI transactions to complete
+ * before resetting the device.
+ */
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Must reset the Phy before resetting the MAC */
+ if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_PHY_RST));
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(5);
+ }
+
+ DEBUGOUT("Issuing a global reset to 82541/82547 MAC\n");
+ switch (hw->mac.type) {
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ /*
+ * These controllers can't ack the 64-bit write when
+ * issuing the reset, so we use IO-mapping as a
+ * workaround to issue the reset.
+ */
+ E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ break;
+ default:
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ break;
+ }
+
+ /* Wait for NVM reload */
+ msec_delay(20);
+
+ /* Disable HW ARPs on ASF enabled adapters */
+ manc = E1000_READ_REG(hw, E1000_MANC);
+ manc &= ~E1000_MANC_ARP_EN;
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+
+ if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
+ e1000_phy_init_script_82541(hw);
+
+ /* Configure activity LED after Phy reset */
+ ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+ ledctl &= IGP_ACTIVITY_LED_MASK;
+ ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+ }
+
+ /* Once again, mask the interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+
+ /* Clear any pending interrupt events. */
+ E1000_READ_REG(hw, E1000_ICR);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_hw_82541 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+STATIC s32 e1000_init_hw_82541(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+ u32 i, txdctl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_hw_82541");
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ if (ret_val) {
+ DEBUGOUT("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+ }
+
+ /* Storing the Speed Power Down value for later use */
+ ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO,
+ &dev_spec->spd_default);
+ if (ret_val)
+ goto out;
+
+ /* Disabling VLAN filtering */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address. */
+ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+ /*
+ * Avoid back to back register writes by adding the register
+ * read (flush). This is to protect against some strange
+ * bridge configurations that may issue Memory Write Block
+ * (MWB) to our register space.
+ */
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB;
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82541(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_link_up_info_82541 - Report speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to speed buffer
+ * @duplex: pointer to duplex buffer
+ *
+ * Retrieve the current speed and duplex configuration.
+ **/
+STATIC s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_get_link_up_info_82541");
+
+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
+ if (ret_val)
+ goto out;
+
+ if (!phy->speed_downgraded)
+ goto out;
+
+ /*
+ * IGP01 PHY may advertise full duplex operation after speed
+ * downgrade even if it is operating at half duplex.
+ * Here we set the duplex settings to match the duplex in the
+ * link partner's capabilities.
+ */
+ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_EXP, &data);
+ if (ret_val)
+ goto out;
+
+ if (!(data & NWAY_ER_LP_NWAY_CAPS)) {
+ *duplex = HALF_DUPLEX;
+ } else {
+ ret_val = phy->ops.read_reg(hw, PHY_LP_ABILITY, &data);
+ if (ret_val)
+ goto out;
+
+ if (*speed == SPEED_100) {
+ if (!(data & NWAY_LPAR_100TX_FD_CAPS))
+ *duplex = HALF_DUPLEX;
+ } else if (*speed == SPEED_10) {
+ if (!(data & NWAY_LPAR_10T_FD_CAPS))
+ *duplex = HALF_DUPLEX;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_82541 - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Verify the reset block is not blocking us from resetting. Acquire
+ * semaphore (if necessary) and read/set/write the device control reset
+ * bit in the PHY. Wait the appropriate delay time for the device to
+ * reset and release the semaphore (if necessary).
+ **/
+STATIC s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 ledctl;
+
+ DEBUGFUNC("e1000_phy_hw_reset_82541");
+
+ ret_val = e1000_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ e1000_phy_init_script_82541(hw);
+
+ if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
+ /* Configure activity LED after PHY reset */
+ ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+ ledctl &= IGP_ACTIVITY_LED_MASK;
+ ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_copper_link_82541 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -E1000_ERR_PHY (-2).
+ **/
+STATIC s32 e1000_setup_copper_link_82541(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+ s32 ret_val;
+ u32 ctrl, ledctl;
+
+ DEBUGFUNC("e1000_setup_copper_link_82541");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+
+ /* Earlier revs of the IGP phy require us to force MDI. */
+ if (hw->mac.type == e1000_82541 || hw->mac.type == e1000_82547) {
+ dev_spec->dsp_config = e1000_dsp_config_disabled;
+ phy->mdix = 1;
+ } else {
+ dev_spec->dsp_config = e1000_dsp_config_enabled;
+ }
+
+ ret_val = e1000_copper_link_setup_igp(hw);
+ if (ret_val)
+ goto out;
+
+ if (hw->mac.autoneg) {
+ if (dev_spec->ffe_config == e1000_ffe_config_active)
+ dev_spec->ffe_config = e1000_ffe_config_enabled;
+ }
+
+ /* Configure activity LED after Phy reset */
+ ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+ ledctl &= IGP_ACTIVITY_LED_MASK;
+ ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+
+ ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_link_82541 - Check/Store link connection
+ * @hw: pointer to the HW structure
+ *
+ * This checks the link condition of the adapter and stores the
+ * results in the hw->mac structure.
+ **/
+STATIC s32 e1000_check_for_link_82541(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ DEBUGFUNC("e1000_check_for_link_82541");
+
+ /*
+ * We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ /*
+ * First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link) {
+ ret_val = e1000_config_dsp_after_link_change_82541(hw, false);
+ goto out; /* No link detected */
+ }
+
+ mac->get_link_status = false;
+
+ /*
+ * Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000_check_downshift_generic(hw);
+
+ /*
+ * If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg) {
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ ret_val = e1000_config_dsp_after_link_change_82541(hw, true);
+
+ /*
+ * Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ mac->ops.config_collision_dist(hw);
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_config_dsp_after_link_change_82541 - Config DSP after link
+ * @hw: pointer to the HW structure
+ * @link_up: boolean flag for link up status
+ *
+ * Return E1000_ERR_PHY when failing to read/write the PHY, else E1000_SUCCESS
+ * at any other case.
+ *
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ **/
+STATIC s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
+ bool link_up)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+ s32 ret_val;
+ u32 idle_errs = 0;
+ u16 phy_data, phy_saved_data, speed, duplex, i;
+ u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+ u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+ IGP01E1000_PHY_AGC_PARAM_A,
+ IGP01E1000_PHY_AGC_PARAM_B,
+ IGP01E1000_PHY_AGC_PARAM_C,
+ IGP01E1000_PHY_AGC_PARAM_D};
+
+ DEBUGFUNC("e1000_config_dsp_after_link_change_82541");
+
+ if (link_up) {
+ ret_val = hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ goto out;
+ }
+
+ if (speed != SPEED_1000) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ ret_val = phy->ops.get_cable_length(hw);
+ if (ret_val)
+ goto out;
+
+ if ((dev_spec->dsp_config == e1000_dsp_config_enabled) &&
+ phy->min_cable_length >= 50) {
+
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = phy->ops.read_reg(hw,
+ dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+ ret_val = phy->ops.write_reg(hw,
+ dsp_reg_array[i],
+ phy_data);
+ if (ret_val)
+ goto out;
+ }
+ dev_spec->dsp_config = e1000_dsp_config_activated;
+ }
+
+ if ((dev_spec->ffe_config != e1000_ffe_config_enabled) ||
+ (phy->min_cable_length >= 50)) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ /* clear previous idle error counts */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ goto out;
+
+ for (i = 0; i < ffe_idle_err_timeout; i++) {
+ usec_delay(1000);
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
+ if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+ dev_spec->ffe_config = e1000_ffe_config_active;
+
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_CM_CP);
+ if (ret_val)
+ goto out;
+ break;
+ }
+
+ if (idle_errs)
+ ffe_idle_err_timeout =
+ FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+ }
+ } else {
+ if (dev_spec->dsp_config == e1000_dsp_config_activated) {
+ /*
+ * Save off the current value of register 0x2F5B
+ * to be restored at the end of the routines.
+ */
+ ret_val = phy->ops.read_reg(hw, 0x2F5B,
+ &phy_saved_data);
+ if (ret_val)
+ goto out;
+
+ /* Disable the PHY transmitter */
+ ret_val = phy->ops.write_reg(hw, 0x2F5B, 0x0003);
+ if (ret_val)
+ goto out;
+
+ msec_delay_irq(20);
+
+ ret_val = phy->ops.write_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIG);
+ if (ret_val)
+ goto out;
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = phy->ops.read_reg(hw,
+ dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+ phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+ ret_val = phy->ops.write_reg(hw,
+ dsp_reg_array[i],
+ phy_data);
+ if (ret_val)
+ goto out;
+ }
+
+ ret_val = phy->ops.write_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ goto out;
+
+ msec_delay_irq(20);
+
+ /* Now enable the transmitter */
+ ret_val = phy->ops.write_reg(hw, 0x2F5B,
+ phy_saved_data);
+ if (ret_val)
+ goto out;
+
+ dev_spec->dsp_config = e1000_dsp_config_enabled;
+ }
+
+ if (dev_spec->ffe_config != e1000_ffe_config_active) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ /*
+ * Save off the current value of register 0x2F5B
+ * to be restored at the end of the routines.
+ */
+ ret_val = phy->ops.read_reg(hw, 0x2F5B, &phy_saved_data);
+ if (ret_val)
+ goto out;
+
+ /* Disable the PHY transmitter */
+ ret_val = phy->ops.write_reg(hw, 0x2F5B, 0x0003);
+ if (ret_val)
+ goto out;
+
+ msec_delay_irq(20);
+
+ ret_val = phy->ops.write_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIG);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_DEFAULT);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ goto out;
+
+ msec_delay_irq(20);
+
+ /* Now enable the transmitter */
+ ret_val = phy->ops.write_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (ret_val)
+ goto out;
+
+ dev_spec->ffe_config = e1000_ffe_config_enabled;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_igp_82541 - Determine cable length for igp PHY
+ * @hw: pointer to the HW structure
+ *
+ * The automatic gain control (agc) normalizes the amplitude of the
+ * received signal, adjusting for the attenuation produced by the
+ * cable. By reading the AGC registers, which represent the
+ * combination of coarse and fine gain value, the value can be put
+ * into a lookup table to obtain the approximate cable length
+ * for each channel.
+ **/
+STATIC s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 i, data;
+ u16 cur_agc_value, agc_value = 0;
+ u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+ u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {IGP01E1000_PHY_AGC_A,
+ IGP01E1000_PHY_AGC_B,
+ IGP01E1000_PHY_AGC_C,
+ IGP01E1000_PHY_AGC_D};
+
+ DEBUGFUNC("e1000_get_cable_length_igp_82541");
+
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &data);
+ if (ret_val)
+ goto out;
+
+ cur_agc_value = data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+ /* Bounds checking */
+ if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+ (cur_agc_value == 0)) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ agc_value += cur_agc_value;
+
+ if (min_agc_value > cur_agc_value)
+ min_agc_value = cur_agc_value;
+ }
+
+ /* Remove the minimal AGC result for length < 50m */
+ if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * 50) {
+ agc_value -= min_agc_value;
+ /* Average the three remaining channels for the length. */
+ agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+ } else {
+ /* Average the channels for the length. */
+ agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+ }
+
+ phy->min_cable_length = (e1000_igp_cable_length_table[agc_value] >
+ IGP01E1000_AGC_RANGE)
+ ? (e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE)
+ : 0;
+ phy->max_cable_length = e1000_igp_cable_length_table[agc_value] +
+ IGP01E1000_AGC_RANGE;
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_d3_lplu_state_82541 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+STATIC s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d3_lplu_state_82541");
+
+ switch (hw->mac.type) {
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ break;
+ default:
+ ret_val = e1000_set_d3_lplu_state_generic(hw, active);
+ goto out;
+ break;
+ }
+
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_GMII_FIFO, &data);
+ if (ret_val)
+ goto out;
+
+ if (!active) {
+ data &= ~IGP01E1000_GMII_FLEX_SPD;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data);
+ if (ret_val)
+ goto out;
+
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= IGP01E1000_GMII_FLEX_SPD;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data);
+ if (ret_val)
+ goto out;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_led_82541 - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored.
+ **/
+STATIC s32 e1000_setup_led_82541(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_led_82541");
+
+ ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO,
+ &dev_spec->spd_default);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO,
+ (u16)(dev_spec->spd_default &
+ ~IGP01E1000_GMII_SPD));
+ if (ret_val)
+ goto out;
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_cleanup_led_82541 - Set LED config to default operation
+ * @hw: pointer to the HW structure
+ *
+ * Remove the current LED configuration and set the LED configuration
+ * to the default value, saved from the EEPROM.
+ **/
+STATIC s32 e1000_cleanup_led_82541(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_cleanup_led_82541");
+
+ ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO,
+ dev_spec->spd_default);
+ if (ret_val)
+ goto out;
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_phy_init_script_82541 - Initialize GbE PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the IGP PHY.
+ **/
+STATIC s32 e1000_phy_init_script_82541(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+ u32 ret_val;
+ u16 phy_saved_data;
+
+ DEBUGFUNC("e1000_phy_init_script_82541");
+
+ if (!dev_spec->phy_init_script) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ /* Delay after phy reset to enable NVM configuration to load */
+ msec_delay(20);
+
+ /*
+ * Save off the current value of register 0x2F5B to be restored at
+ * the end of this routine.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, 0x2F5B, &phy_saved_data);
+
+ /* Disabled the PHY transmitter */
+ hw->phy.ops.write_reg(hw, 0x2F5B, 0x0003);
+
+ msec_delay(20);
+
+ hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
+
+ msec_delay(5);
+
+ switch (hw->mac.type) {
+ case e1000_82541:
+ case e1000_82547:
+ hw->phy.ops.write_reg(hw, 0x1F95, 0x0001);
+
+ hw->phy.ops.write_reg(hw, 0x1F71, 0xBD21);
+
+ hw->phy.ops.write_reg(hw, 0x1F79, 0x0018);
+
+ hw->phy.ops.write_reg(hw, 0x1F30, 0x1600);
+
+ hw->phy.ops.write_reg(hw, 0x1F31, 0x0014);
+
+ hw->phy.ops.write_reg(hw, 0x1F32, 0x161C);
+
+ hw->phy.ops.write_reg(hw, 0x1F94, 0x0003);
+
+ hw->phy.ops.write_reg(hw, 0x1F96, 0x003F);
+
+ hw->phy.ops.write_reg(hw, 0x2010, 0x0008);
+ break;
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->phy.ops.write_reg(hw, 0x1F73, 0x0099);
+ break;
+ default:
+ break;
+ }
+
+ hw->phy.ops.write_reg(hw, 0x0000, 0x3300);
+
+ msec_delay(20);
+
+ /* Now enable the transmitter */
+ hw->phy.ops.write_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (hw->mac.type == e1000_82547) {
+ u16 fused, fine, coarse;
+
+ /* Move to analog registers page */
+ hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS,
+ &fused);
+
+ if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+ hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS,
+ &fused);
+
+ fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+ coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+ if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+ coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+ } else if (coarse ==
+ IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+ fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+ (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+ (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+ hw->phy.ops.write_reg(hw,
+ IGP01E1000_ANALOG_FUSE_CONTROL,
+ fused);
+ hw->phy.ops.write_reg(hw,
+ IGP01E1000_ANALOG_FUSE_BYPASS,
+ IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_script_state_82541 - Enable/Disable PHY init script
+ * @hw: pointer to the HW structure
+ * @state: boolean value used to enable/disable PHY init script
+ *
+ * Allows the driver to enable/disable the PHY init script, if the PHY is an
+ * IGP PHY.
+ **/
+void e1000_init_script_state_82541(struct e1000_hw *hw, bool state)
+{
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+
+ DEBUGFUNC("e1000_init_script_state_82541");
+
+ if (hw->phy.type != e1000_phy_igp) {
+ DEBUGOUT("Initialization script not necessary.\n");
+ goto out;
+ }
+
+ dev_spec->phy_init_script = state;
+
+out:
+ return;
+}
+
+/**
+ * e1000_power_down_phy_copper_82541 - Remove link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+STATIC void e1000_power_down_phy_copper_82541(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82541 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82541");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.h
new file mode 100644
index 00000000..e0bee7ce
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.h
@@ -0,0 +1,91 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_82541_H_
+#define _E1000_82541_H_
+
+#define NVM_WORD_SIZE_BASE_SHIFT_82541 (NVM_WORD_SIZE_BASE_SHIFT + 1)
+
+#define IGP01E1000_PHY_CHANNEL_NUM 4
+
+#define IGP01E1000_PHY_AGC_A 0x1172
+#define IGP01E1000_PHY_AGC_B 0x1272
+#define IGP01E1000_PHY_AGC_C 0x1472
+#define IGP01E1000_PHY_AGC_D 0x1872
+
+#define IGP01E1000_PHY_AGC_PARAM_A 0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B 0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C 0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D 0x1871
+
+#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000
+
+#define IGP01E1000_PHY_DSP_RESET 0x1F33
+
+#define IGP01E1000_PHY_DSP_FFE 0x1F35
+#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A
+
+#define IGP01E1000_IEEE_FORCE_GIG 0x0140
+#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
+
+#define IGP01E1000_AGC_LENGTH_SHIFT 7
+#define IGP01E1000_AGC_RANGE 10
+
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
+
+#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1
+#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE
+
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000
+#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002
+
+#define IGP01E1000_MSE_CHANNEL_D 0x000F
+#define IGP01E1000_MSE_CHANNEL_C 0x00F0
+#define IGP01E1000_MSE_CHANNEL_B 0x0F00
+#define IGP01E1000_MSE_CHANNEL_A 0xF000
+
+
+void e1000_init_script_state_82541(struct e1000_hw *hw, bool state);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82542.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82542.c
new file mode 100644
index 00000000..4f1183af
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82542.c
@@ -0,0 +1,590 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/*
+ * 82542 Gigabit Ethernet Controller
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_init_phy_params_82542(struct e1000_hw *hw);
+STATIC s32 e1000_init_nvm_params_82542(struct e1000_hw *hw);
+STATIC s32 e1000_init_mac_params_82542(struct e1000_hw *hw);
+STATIC s32 e1000_get_bus_info_82542(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_82542(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_82542(struct e1000_hw *hw);
+STATIC s32 e1000_setup_link_82542(struct e1000_hw *hw);
+STATIC s32 e1000_led_on_82542(struct e1000_hw *hw);
+STATIC s32 e1000_led_off_82542(struct e1000_hw *hw);
+STATIC int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index);
+STATIC void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw);
+STATIC s32 e1000_read_mac_addr_82542(struct e1000_hw *hw);
+
+/**
+ * e1000_init_phy_params_82542 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_82542(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_init_phy_params_82542");
+
+ phy->type = e1000_phy_none;
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82542 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_nvm_params_82542(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ DEBUGFUNC("e1000_init_nvm_params_82542");
+
+ nvm->address_bits = 6;
+ nvm->delay_usec = 50;
+ nvm->opcode_bits = 3;
+ nvm->type = e1000_nvm_eeprom_microwire;
+ nvm->word_size = 64;
+
+ /* Function Pointers */
+ nvm->ops.read = e1000_read_nvm_microwire;
+ nvm->ops.release = e1000_stop_nvm;
+ nvm->ops.write = e1000_write_nvm_microwire;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82542 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_82542(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_init_mac_params_82542");
+
+ /* Set media type */
+ hw->phy.media_type = e1000_media_type_fiber;
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_82542;
+ /* function id */
+ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_82542;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_82542;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_82542;
+ /* phy/fiber/serdes setup */
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_generic;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_fiber_link_generic;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_82542;
+ /* set RAR */
+ mac->ops.rar_set = e1000_rar_set_82542;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_82542;
+ mac->ops.led_off = e1000_led_off_82542;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82542;
+ /* link info */
+ mac->ops.get_link_up_info =
+ e1000_get_speed_and_duplex_fiber_serdes_generic;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_82542 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82542(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82542");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82542;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82542;
+ hw->phy.ops.init_params = e1000_init_phy_params_82542;
+}
+
+/**
+ * e1000_get_bus_info_82542 - Obtain bus information for adapter
+ * @hw: pointer to the HW structure
+ *
+ * This will obtain information about the HW bus for which the
+ * adapter is attached and stores it in the hw structure.
+ **/
+STATIC s32 e1000_get_bus_info_82542(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_get_bus_info_82542");
+
+ hw->bus.type = e1000_bus_type_pci;
+ hw->bus.speed = e1000_bus_speed_unknown;
+ hw->bus.width = e1000_bus_width_unknown;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_hw_82542 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+STATIC s32 e1000_reset_hw_82542(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_reset_hw_82542");
+
+ if (hw->revision_id == E1000_REVISION_2) {
+ DEBUGOUT("Disabling MWI on 82542 rev 2\n");
+ e1000_pci_clear_mwi(hw);
+ }
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ /*
+ * Delay to allow any outstanding PCI transactions to complete before
+ * resetting the device
+ */
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGOUT("Issuing a global reset to 82542/82543 MAC\n");
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+ hw->nvm.ops.reload(hw);
+ msec_delay(2);
+
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ if (hw->revision_id == E1000_REVISION_2) {
+ if (bus->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_82542 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+STATIC s32 e1000_init_hw_82542(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82542 *dev_spec = &hw->dev_spec._82542;
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl;
+ u16 i;
+
+ DEBUGFUNC("e1000_init_hw_82542");
+
+ /* Disabling VLAN filtering */
+ E1000_WRITE_REG(hw, E1000_VET, 0);
+ mac->ops.clear_vfta(hw);
+
+ /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+ if (hw->revision_id == E1000_REVISION_2) {
+ DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(5);
+ }
+
+ /* Setup the receive address. */
+ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+ /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+ if (hw->revision_id == E1000_REVISION_2) {
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+ if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /*
+ * Set the PCI priority bit correctly in the CTRL register. This
+ * determines if the adapter gives priority to receives, or if it
+ * gives equal priority to transmits and receives.
+ */
+ if (dev_spec->dma_fairness) {
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR);
+ }
+
+ /* Setup link and flow control */
+ ret_val = e1000_setup_link_82542(hw);
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82542(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_link_82542 - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+STATIC s32 e1000_setup_link_82542(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_link_82542");
+
+ ret_val = e1000_set_default_fc_generic(hw);
+ if (ret_val)
+ goto out;
+
+ hw->fc.requested_mode &= ~e1000_fc_tx_pause;
+
+ if (mac->report_tx_early)
+ hw->fc.requested_mode &= ~e1000_fc_rx_pause;
+
+ /*
+ * Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
+ hw->fc.current_mode);
+
+ /* Call the necessary subroutine to configure the link. */
+ ret_val = mac->ops.setup_physical_interface(hw);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ DEBUGOUT("Initializing Flow Control address, type and timer regs\n");
+
+ E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+ E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+
+ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+ ret_val = e1000_set_fc_watermarks_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_led_on_82542 - Turn on SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED on.
+ **/
+STATIC s32 e1000_led_on_82542(struct e1000_hw *hw)
+{
+ u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGFUNC("e1000_led_on_82542");
+
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off_82542 - Turn off SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED off.
+ **/
+STATIC s32 e1000_led_off_82542(struct e1000_hw *hw)
+{
+ u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGFUNC("e1000_led_off_82542");
+
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_rar_set_82542 - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr.
+ **/
+STATIC int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ DEBUGFUNC("e1000_rar_set_82542");
+
+ /*
+ * HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
+ E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_translate_register_82542 - Translate the proper register offset
+ * @reg: e1000 register to be read
+ *
+ * Registers in 82542 are located in different offsets than other adapters
+ * even though they function in the same manner. This function takes in
+ * the name of the register to read and returns the correct offset for
+ * 82542 silicon.
+ **/
+u32 e1000_translate_register_82542(u32 reg)
+{
+ /*
+ * Some of the 82542 registers are located at different
+ * offsets than they are in newer adapters.
+ * Despite the difference in location, the registers
+ * function in the same manner.
+ */
+ switch (reg) {
+ case E1000_RA:
+ reg = 0x00040;
+ break;
+ case E1000_RDTR:
+ reg = 0x00108;
+ break;
+ case E1000_RDBAL(0):
+ reg = 0x00110;
+ break;
+ case E1000_RDBAH(0):
+ reg = 0x00114;
+ break;
+ case E1000_RDLEN(0):
+ reg = 0x00118;
+ break;
+ case E1000_RDH(0):
+ reg = 0x00120;
+ break;
+ case E1000_RDT(0):
+ reg = 0x00128;
+ break;
+ case E1000_RDBAL(1):
+ reg = 0x00138;
+ break;
+ case E1000_RDBAH(1):
+ reg = 0x0013C;
+ break;
+ case E1000_RDLEN(1):
+ reg = 0x00140;
+ break;
+ case E1000_RDH(1):
+ reg = 0x00148;
+ break;
+ case E1000_RDT(1):
+ reg = 0x00150;
+ break;
+ case E1000_FCRTH:
+ reg = 0x00160;
+ break;
+ case E1000_FCRTL:
+ reg = 0x00168;
+ break;
+ case E1000_MTA:
+ reg = 0x00200;
+ break;
+ case E1000_TDBAL(0):
+ reg = 0x00420;
+ break;
+ case E1000_TDBAH(0):
+ reg = 0x00424;
+ break;
+ case E1000_TDLEN(0):
+ reg = 0x00428;
+ break;
+ case E1000_TDH(0):
+ reg = 0x00430;
+ break;
+ case E1000_TDT(0):
+ reg = 0x00438;
+ break;
+ case E1000_TIDV:
+ reg = 0x00440;
+ break;
+ case E1000_VFTA:
+ reg = 0x00600;
+ break;
+ case E1000_TDFH:
+ reg = 0x08010;
+ break;
+ case E1000_TDFT:
+ reg = 0x08018;
+ break;
+ default:
+ break;
+ }
+
+ return reg;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82542 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82542");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+}
+
+/**
+ * e1000_read_mac_addr_82542 - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ **/
+s32 e1000_read_mac_addr_82542(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 offset, nvm_data, i;
+
+ DEBUGFUNC("e1000_read_mac_addr");
+
+ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+ offset = i >> 1;
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+ hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
+ hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
+ }
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+out:
+ return ret_val;
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.c
new file mode 100644
index 00000000..fc96199d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.c
@@ -0,0 +1,1553 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/*
+ * 82543GC Gigabit Ethernet Controller (Fiber)
+ * 82543GC Gigabit Ethernet Controller (Copper)
+ * 82544EI Gigabit Ethernet Controller (Copper)
+ * 82544EI Gigabit Ethernet Controller (Fiber)
+ * 82544GC Gigabit Ethernet Controller (Copper)
+ * 82544GC Gigabit Ethernet Controller (LOM)
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_init_phy_params_82543(struct e1000_hw *hw);
+STATIC s32 e1000_init_nvm_params_82543(struct e1000_hw *hw);
+STATIC s32 e1000_init_mac_params_82543(struct e1000_hw *hw);
+STATIC s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+STATIC s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset,
+ u16 data);
+STATIC s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw);
+STATIC s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_82543(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_82543(struct e1000_hw *hw);
+STATIC s32 e1000_setup_link_82543(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_82543(struct e1000_hw *hw);
+STATIC s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw);
+STATIC s32 e1000_led_on_82543(struct e1000_hw *hw);
+STATIC s32 e1000_led_off_82543(struct e1000_hw *hw);
+STATIC void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset,
+ u32 value);
+STATIC void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw);
+STATIC s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw);
+STATIC bool e1000_init_phy_disabled_82543(struct e1000_hw *hw);
+STATIC void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl);
+STATIC s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw);
+STATIC void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl);
+STATIC u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw);
+STATIC void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data,
+ u16 count);
+STATIC bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw);
+STATIC void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state);
+
+/**
+ * e1000_init_phy_params_82543 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_82543(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_init_phy_params_82543");
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ goto out;
+ } else {
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper;
+ }
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 10000;
+ phy->type = e1000_phy_m88;
+
+ /* Function Pointers */
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82543;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
+ phy->ops.read_reg = (hw->mac.type == e1000_82543)
+ ? e1000_read_phy_reg_82543
+ : e1000_read_phy_reg_m88;
+ phy->ops.reset = (hw->mac.type == e1000_82543)
+ ? e1000_phy_hw_reset_82543
+ : e1000_phy_hw_reset_generic;
+ phy->ops.write_reg = (hw->mac.type == e1000_82543)
+ ? e1000_write_phy_reg_82543
+ : e1000_write_phy_reg_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+
+ /*
+ * The external PHY of the 82543 can be in a funky state.
+ * Resetting helps us read the PHY registers for acquiring
+ * the PHY ID.
+ */
+ if (!e1000_init_phy_disabled_82543(hw)) {
+ ret_val = phy->ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Resetting PHY during init failed.\n");
+ goto out;
+ }
+ msec_delay(20);
+ }
+
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+
+ /* Verify phy id */
+ switch (hw->mac.type) {
+ case e1000_82543:
+ if (phy->id != M88E1000_E_PHY_ID) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+ break;
+ case e1000_82544:
+ if (phy->id != M88E1000_I_PHY_ID) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ break;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82543 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_nvm_params_82543(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ DEBUGFUNC("e1000_init_nvm_params_82543");
+
+ nvm->type = e1000_nvm_eeprom_microwire;
+ nvm->word_size = 64;
+ nvm->delay_usec = 50;
+ nvm->address_bits = 6;
+ nvm->opcode_bits = 3;
+
+ /* Function Pointers */
+ nvm->ops.read = e1000_read_nvm_microwire;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_microwire;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82543 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_82543(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_init_mac_params_82543");
+
+ /* Set media type */
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ hw->phy.media_type = e1000_media_type_fiber;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pci_generic;
+ /* function id */
+ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_82543;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_82543;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_82543;
+ /* physical interface setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_setup_copper_link_82543 : e1000_setup_fiber_link_82543;
+ /* check for link */
+ mac->ops.check_for_link =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_check_for_copper_link_82543
+ : e1000_check_for_fiber_link_82543;
+ /* link info */
+ mac->ops.get_link_up_info =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_get_speed_and_duplex_copper_generic
+ : e1000_get_speed_and_duplex_fiber_serdes_generic;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_82543;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_82543;
+ mac->ops.led_off = e1000_led_off_82543;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82543;
+
+ /* Set tbi compatibility */
+ if ((hw->mac.type != e1000_82543) ||
+ (hw->phy.media_type == e1000_media_type_fiber))
+ e1000_set_tbi_compatibility_82543(hw, false);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_82543 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82543(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82543");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82543;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82543;
+ hw->phy.ops.init_params = e1000_init_phy_params_82543;
+}
+
+/**
+ * e1000_tbi_compatibility_enabled_82543 - Returns TBI compat status
+ * @hw: pointer to the HW structure
+ *
+ * Returns the current status of 10-bit Interface (TBI) compatibility
+ * (enabled/disabled).
+ **/
+STATIC bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543;
+ bool state = false;
+
+ DEBUGFUNC("e1000_tbi_compatibility_enabled_82543");
+
+ if (hw->mac.type != e1000_82543) {
+ DEBUGOUT("TBI compatibility workaround for 82543 only.\n");
+ goto out;
+ }
+
+ state = !!(dev_spec->tbi_compatibility & TBI_COMPAT_ENABLED);
+
+out:
+ return state;
+}
+
+/**
+ * e1000_set_tbi_compatibility_82543 - Set TBI compatibility
+ * @hw: pointer to the HW structure
+ * @state: enable/disable TBI compatibility
+ *
+ * Enables or disabled 10-bit Interface (TBI) compatibility.
+ **/
+void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw, bool state)
+{
+ struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543;
+
+ DEBUGFUNC("e1000_set_tbi_compatibility_82543");
+
+ if (hw->mac.type != e1000_82543) {
+ DEBUGOUT("TBI compatibility workaround for 82543 only.\n");
+ goto out;
+ }
+
+ if (state)
+ dev_spec->tbi_compatibility |= TBI_COMPAT_ENABLED;
+ else
+ dev_spec->tbi_compatibility &= ~TBI_COMPAT_ENABLED;
+
+out:
+ return;
+}
+
+/**
+ * e1000_tbi_sbp_enabled_82543 - Returns TBI SBP status
+ * @hw: pointer to the HW structure
+ *
+ * Returns the current status of 10-bit Interface (TBI) store bad packet (SBP)
+ * (enabled/disabled).
+ **/
+bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543;
+ bool state = false;
+
+ DEBUGFUNC("e1000_tbi_sbp_enabled_82543");
+
+ if (hw->mac.type != e1000_82543) {
+ DEBUGOUT("TBI compatibility workaround for 82543 only.\n");
+ goto out;
+ }
+
+ state = !!(dev_spec->tbi_compatibility & TBI_SBP_ENABLED);
+
+out:
+ return state;
+}
+
+/**
+ * e1000_set_tbi_sbp_82543 - Set TBI SBP
+ * @hw: pointer to the HW structure
+ * @state: enable/disable TBI store bad packet
+ *
+ * Enables or disabled 10-bit Interface (TBI) store bad packet (SBP).
+ **/
+STATIC void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state)
+{
+ struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543;
+
+ DEBUGFUNC("e1000_set_tbi_sbp_82543");
+
+ if (state && e1000_tbi_compatibility_enabled_82543(hw))
+ dev_spec->tbi_compatibility |= TBI_SBP_ENABLED;
+ else
+ dev_spec->tbi_compatibility &= ~TBI_SBP_ENABLED;
+
+ return;
+}
+
+/**
+ * e1000_init_phy_disabled_82543 - Returns init PHY status
+ * @hw: pointer to the HW structure
+ *
+ * Returns the current status of whether PHY initialization is disabled.
+ * True if PHY initialization is disabled else false.
+ **/
+STATIC bool e1000_init_phy_disabled_82543(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543;
+ bool ret_val;
+
+ DEBUGFUNC("e1000_init_phy_disabled_82543");
+
+ if (hw->mac.type != e1000_82543) {
+ ret_val = false;
+ goto out;
+ }
+
+ ret_val = dev_spec->init_phy_disabled;
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_tbi_adjust_stats_82543 - Adjust stats when TBI enabled
+ * @hw: pointer to the HW structure
+ * @stats: Struct containing statistic register values
+ * @frame_len: The length of the frame in question
+ * @mac_addr: The Ethernet destination address of the frame in question
+ * @max_frame_size: The maximum frame size
+ *
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ **/
+void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
+ struct e1000_hw_stats *stats, u32 frame_len,
+ u8 *mac_addr, u32 max_frame_size)
+{
+ if (!(e1000_tbi_sbp_enabled_82543(hw)))
+ goto out;
+
+ /* First adjust the frame length. */
+ frame_len--;
+ /*
+ * We need to adjust the statistics counters, since the hardware
+ * counters overcount this packet as a CRC error and undercount
+ * the packet as a good packet
+ */
+ /* This packet should not be counted as a CRC error. */
+ stats->crcerrs--;
+ /* This packet does count as a Good Packet Received. */
+ stats->gprc++;
+
+ /* Adjust the Good Octets received counters */
+ stats->gorc += frame_len;
+
+ /*
+ * Is this a broadcast or multicast? Check broadcast first,
+ * since the test for a multicast frame will test positive on
+ * a broadcast frame.
+ */
+ if ((mac_addr[0] == 0xff) && (mac_addr[1] == 0xff))
+ /* Broadcast packet */
+ stats->bprc++;
+ else if (*mac_addr & 0x01)
+ /* Multicast packet */
+ stats->mprc++;
+
+ /*
+ * In this case, the hardware has over counted the number of
+ * oversize frames.
+ */
+ if ((frame_len == max_frame_size) && (stats->roc > 0))
+ stats->roc--;
+
+ /*
+ * Adjust the bin counters when the extra byte put the frame in the
+ * wrong bin. Remember that the frame_len was adjusted above.
+ */
+ if (frame_len == 64) {
+ stats->prc64++;
+ stats->prc127--;
+ } else if (frame_len == 127) {
+ stats->prc127++;
+ stats->prc255--;
+ } else if (frame_len == 255) {
+ stats->prc255++;
+ stats->prc511--;
+ } else if (frame_len == 511) {
+ stats->prc511++;
+ stats->prc1023--;
+ } else if (frame_len == 1023) {
+ stats->prc1023++;
+ stats->prc1522--;
+ } else if (frame_len == 1522) {
+ stats->prc1522++;
+ }
+
+out:
+ return;
+}
+
+/**
+ * e1000_read_phy_reg_82543 - Read PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY at offset and stores the information read to data.
+ **/
+STATIC s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ u32 mdic;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_read_phy_reg_82543");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ ret_val = -E1000_ERR_PARAM;
+ goto out;
+ }
+
+ /*
+ * We must first send a preamble through the MDIO pin to signal the
+ * beginning of an MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /*
+ * Now combine the next few fields that are required for a read
+ * operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine five different times. The format
+ * of an MII read instruction consists of a shift out of 14 bits and
+ * is defined as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Offset>
+ * followed by a shift in of 18 bits. This first two bits shifted in
+ * are TurnAround bits used to avoid contention on the MDIO pin when a
+ * READ operation is performed. These two bits are thrown away
+ * followed by a shift in of 16 bits which contains the desired data.
+ */
+ mdic = (offset | (hw->phy.addr << 5) |
+ (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+ e1000_shift_out_mdi_bits_82543(hw, mdic, 14);
+
+ /*
+ * Now that we've shifted out the read command to the MII, we need to
+ * "shift in" the 16-bit value (18 total bits) of the requested PHY
+ * register address.
+ */
+ *data = e1000_shift_in_mdi_bits_82543(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_82543 - Write PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be written
+ * @data: pointer to the data to be written at offset
+ *
+ * Writes data to the PHY at offset.
+ **/
+STATIC s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ u32 mdic;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_phy_reg_82543");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ ret_val = -E1000_ERR_PARAM;
+ goto out;
+ }
+
+ /*
+ * We'll need to use the SW defined pins to shift the write command
+ * out to the PHY. We first send a preamble to the PHY to signal the
+ * beginning of the MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /*
+ * Now combine the remaining required fields that will indicate a
+ * write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the command. The
+ * format of a MII write instruction is as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ */
+ mdic = ((PHY_TURNAROUND) | (offset << 2) | (hw->phy.addr << 7) |
+ (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+ mdic <<= 16;
+ mdic |= (u32)data;
+
+ e1000_shift_out_mdi_bits_82543(hw, mdic, 32);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_raise_mdi_clk_82543 - Raise Management Data Input clock
+ * @hw: pointer to the HW structure
+ * @ctrl: pointer to the control register
+ *
+ * Raise the management data input clock by setting the MDC bit in the control
+ * register.
+ **/
+STATIC void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl)
+{
+ /*
+ * Raise the clock input to the Management Data Clock (by setting the
+ * MDC bit), and then delay a sufficient amount of time.
+ */
+ E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl | E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(10);
+}
+
+/**
+ * e1000_lower_mdi_clk_82543 - Lower Management Data Input clock
+ * @hw: pointer to the HW structure
+ * @ctrl: pointer to the control register
+ *
+ * Lower the management data input clock by clearing the MDC bit in the
+ * control register.
+ **/
+STATIC void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl)
+{
+ /*
+ * Lower the clock input to the Management Data Clock (by clearing the
+ * MDC bit), and then delay a sufficient amount of time.
+ */
+ E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl & ~E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(10);
+}
+
+/**
+ * e1000_shift_out_mdi_bits_82543 - Shift data bits our to the PHY
+ * @hw: pointer to the HW structure
+ * @data: data to send to the PHY
+ * @count: number of bits to shift out
+ *
+ * We need to shift 'count' bits out to the PHY. So, the value in the
+ * "data" parameter will be shifted out to the PHY one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ **/
+STATIC void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data,
+ u16 count)
+{
+ u32 ctrl, mask;
+
+ /*
+ * We need to shift "count" number of bits out to the PHY. So, the
+ * value in the "data" parameter will be shifted out to the PHY one
+ * bit at a time. In order to do this, "data" must be broken down
+ * into bits.
+ */
+ mask = 0x01;
+ mask <<= (count - 1);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+ ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+ while (mask) {
+ /*
+ * A "1" is shifted out to the PHY by setting the MDIO bit to
+ * "1" and then raising and lowering the Management Data Clock.
+ * A "0" is shifted out to the PHY by setting the MDIO bit to
+ * "0" and then raising and lowering the clock.
+ */
+ if (data & mask)
+ ctrl |= E1000_CTRL_MDIO;
+ else
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(10);
+
+ e1000_raise_mdi_clk_82543(hw, &ctrl);
+ e1000_lower_mdi_clk_82543(hw, &ctrl);
+
+ mask >>= 1;
+ }
+}
+
+/**
+ * e1000_shift_in_mdi_bits_82543 - Shift data bits in from the PHY
+ * @hw: pointer to the HW structure
+ *
+ * In order to read a register from the PHY, we need to shift 18 bits
+ * in from the PHY. Bits are "shifted in" by raising the clock input to
+ * the PHY (setting the MDC bit), and then reading the value of the data out
+ * MDIO bit.
+ **/
+STATIC u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u16 data = 0;
+ u8 i;
+
+ /*
+ * In order to read a register from the PHY, we need to shift in a
+ * total of 18 bits from the PHY. The first two bit (turnaround)
+ * times are used to avoid contention on the MDIO pin when a read
+ * operation is performed. These two bits are ignored by us and
+ * thrown away. Bits are "shifted in" by raising the input to the
+ * Management Data Clock (setting the MDC bit) and then reading the
+ * value of the MDIO bit.
+ */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /*
+ * Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as
+ * input.
+ */
+ ctrl &= ~E1000_CTRL_MDIO_DIR;
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+
+ /*
+ * Raise and lower the clock before reading in the data. This accounts
+ * for the turnaround bits. The first clock occurred when we clocked
+ * out the last bit of the Register Address.
+ */
+ e1000_raise_mdi_clk_82543(hw, &ctrl);
+ e1000_lower_mdi_clk_82543(hw, &ctrl);
+
+ for (data = 0, i = 0; i < 16; i++) {
+ data <<= 1;
+ e1000_raise_mdi_clk_82543(hw, &ctrl);
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ /* Check to see if we shifted in a "1". */
+ if (ctrl & E1000_CTRL_MDIO)
+ data |= 1;
+ e1000_lower_mdi_clk_82543(hw, &ctrl);
+ }
+
+ e1000_raise_mdi_clk_82543(hw, &ctrl);
+ e1000_lower_mdi_clk_82543(hw, &ctrl);
+
+ return data;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_82543 - Force speed/duplex for PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the function to force speed and duplex for the m88 PHY, and
+ * if the PHY is not auto-negotiating and the speed is forced to 10Mbit,
+ * then call the function for polarity reversal workaround.
+ **/
+STATIC s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_82543");
+
+ ret_val = e1000_phy_force_speed_duplex_m88(hw);
+ if (ret_val)
+ goto out;
+
+ if (!hw->mac.autoneg && (hw->mac.forced_speed_duplex &
+ E1000_ALL_10_SPEED))
+ ret_val = e1000_polarity_reversal_workaround_82543(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_polarity_reversal_workaround_82543 - Workaround polarity reversal
+ * @hw: pointer to the HW structure
+ *
+ * When forcing link to 10 Full or 10 Half, the PHY can reverse the polarity
+ * inadvertently. To workaround the issue, we disable the transmitter on
+ * the PHY until we have established the link partner's link parameters.
+ **/
+STATIC s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 mii_status_reg;
+ u16 i;
+ bool link;
+
+ if (!(hw->phy.ops.write_reg))
+ goto out;
+
+ /* Polarity reversal workaround for forced 10F/10H links. */
+
+ /* Disable the transmitter on the PHY */
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ goto out;
+
+ /*
+ * This loop will early-out if the NO link condition has been met.
+ * In other words, DO NOT use e1000_phy_has_link_generic() here.
+ */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /*
+ * Read the MII Status Register and wait for Link Status bit
+ * to be clear.
+ */
+
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ goto out;
+
+ if (!(mii_status_reg & ~MII_SR_LINK_STATUS))
+ break;
+ msec_delay_irq(100);
+ }
+
+ /* Recommended delay time after link has been lost */
+ msec_delay_irq(1000);
+
+ /* Now we will re-enable the transmitter on the PHY */
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ goto out;
+ msec_delay_irq(50);
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+ if (ret_val)
+ goto out;
+ msec_delay_irq(50);
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+ if (ret_val)
+ goto out;
+ msec_delay_irq(50);
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Read the MII Status Register and wait for Link Status bit
+ * to be set.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_TIME, 100000, &link);
+ if (ret_val)
+ goto out;
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_82543 - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Sets the PHY_RESET_DIR bit in the extended device control register
+ * to put the PHY into a reset and waits for completion. Once the reset
+ * has been accomplished, clear the PHY_RESET_DIR bit to take the PHY out
+ * of reset.
+ **/
+STATIC s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_phy_hw_reset_82543");
+
+ /*
+ * Read the Extended Device Control Register, assert the PHY_RESET_DIR
+ * bit to put the PHY into reset...
+ */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ /* ...then take it out of reset. */
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(150);
+
+ if (!(hw->phy.ops.get_cfg_done))
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.get_cfg_done(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_82543 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+STATIC s32 e1000_reset_hw_82543(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_reset_hw_82543");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ e1000_set_tbi_sbp_82543(hw, false);
+
+ /*
+ * Delay to allow any outstanding PCI transactions to complete before
+ * resetting the device
+ */
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGOUT("Issuing a global reset to 82543/82544 MAC\n");
+ if (hw->mac.type == e1000_82543) {
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ } else {
+ /*
+ * The 82544 can't ACK the 64-bit write when issuing the
+ * reset, so use IO-mapping as a workaround.
+ */
+ E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ }
+
+ /*
+ * After MAC reset, force reload of NVM to restore power-on
+ * settings to device.
+ */
+ hw->nvm.ops.reload(hw);
+ msec_delay(2);
+
+ /* Masking off and clearing any pending interrupts */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_82543 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+STATIC s32 e1000_init_hw_82543(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543;
+ u32 ctrl;
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_init_hw_82543");
+
+ /* Disabling VLAN filtering */
+ E1000_WRITE_REG(hw, E1000_VET, 0);
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address. */
+ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ /*
+ * Set the PCI priority bit correctly in the CTRL register. This
+ * determines if the adapter gives priority to receives, or if it
+ * gives equal priority to transmits and receives.
+ */
+ if (hw->mac.type == e1000_82543 && dev_spec->dma_fairness) {
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR);
+ }
+
+ e1000_pcix_mmrbc_workaround_generic(hw);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82543(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_link_82543 - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Read the EEPROM to determine the initial polarity value and write the
+ * extended device control register with the information before calling
+ * the generic setup link function, which does the following:
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+STATIC s32 e1000_setup_link_82543(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_setup_link_82543");
+
+ /*
+ * Take the 4 bits from NVM word 0xF that determine the initial
+ * polarity value for the SW controlled pins, and setup the
+ * Extended Device Control reg with that info.
+ * This is needed because one of the SW controlled pins is used for
+ * signal detection. So this should be done before phy setup.
+ */
+ if (hw->mac.type == e1000_82543) {
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+ ctrl_ext = ((data & NVM_WORD0F_SWPDIO_EXT_MASK) <<
+ NVM_SWDPIO_EXT_SHIFT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ }
+
+ ret_val = e1000_setup_link_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_copper_link_82543 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ **/
+STATIC s32 e1000_setup_copper_link_82543(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ bool link;
+
+ DEBUGFUNC("e1000_setup_copper_link_82543");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL) | E1000_CTRL_SLU;
+ /*
+ * With 82543, we need to force speed and duplex on the MAC
+ * equal to what the PHY speed and duplex configuration is.
+ * In addition, we need to perform a hardware reset on the
+ * PHY to take it out of reset.
+ */
+ if (hw->mac.type == e1000_82543) {
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val)
+ goto out;
+ } else {
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ }
+
+ /* Set MDI/MDI-X, Polarity Reversal, and downshift settings */
+ ret_val = e1000_copper_link_setup_m88(hw);
+ if (ret_val)
+ goto out;
+
+ if (hw->mac.autoneg) {
+ /*
+ * Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+ goto out;
+ } else {
+ /*
+ * PHY will be set to 10H, 10F, 100H or 100F
+ * depending on user settings.
+ */
+ DEBUGOUT("Forcing Speed and Duplex\n");
+ ret_val = e1000_phy_force_speed_duplex_82543(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Forcing Speed and Duplex\n");
+ goto out;
+ }
+ }
+
+ /*
+ * Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
+ &link);
+ if (ret_val)
+ goto out;
+
+
+ if (link) {
+ DEBUGOUT("Valid link established!!!\n");
+ /* Config the MAC and PHY after link is up */
+ if (hw->mac.type == e1000_82544) {
+ hw->mac.ops.config_collision_dist(hw);
+ } else {
+ ret_val = e1000_config_mac_to_phy_82543(hw);
+ if (ret_val)
+ goto out;
+ }
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ } else {
+ DEBUGOUT("Unable to establish link!!!\n");
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_fiber_link_82543 - Setup link for fiber
+ * @hw: pointer to the HW structure
+ *
+ * Configures collision distance and flow control for fiber links. Upon
+ * successful setup, poll for link.
+ **/
+STATIC s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_fiber_link_82543");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Take the link out of reset */
+ ctrl &= ~E1000_CTRL_LRST;
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ ret_val = e1000_commit_fc_settings_generic(hw);
+ if (ret_val)
+ goto out;
+
+ DEBUGOUT("Auto-negotiation enabled\n");
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+
+ /*
+ * For these adapters, the SW definable pin 1 is cleared when the
+ * optics detect a signal. If we have a signal, then poll for a
+ * "Link-Up" indication.
+ */
+ if (!(E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1))
+ ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+ else
+ DEBUGOUT("No signal detected\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_copper_link_82543 - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks the phy for link, if link exists, do the following:
+ * - check for downshift
+ * - do polarity workaround (if necessary)
+ * - configure collision distance
+ * - configure flow control after link up
+ * - configure tbi compatibility
+ **/
+STATIC s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 icr, rctl;
+ s32 ret_val;
+ u16 speed, duplex;
+ bool link;
+
+ DEBUGFUNC("e1000_check_for_copper_link_82543");
+
+ if (!mac->get_link_status) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ goto out; /* No link detected */
+
+ mac->get_link_status = false;
+
+ e1000_check_downshift_generic(hw);
+
+ /*
+ * If we are forcing speed/duplex, then we can return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg) {
+ /*
+ * If speed and duplex are forced to 10H or 10F, then we will
+ * implement the polarity reversal workaround. We disable
+ * interrupts first, and upon returning, place the devices
+ * interrupt state to its previous value except for the link
+ * status change interrupt which will happened due to the
+ * execution of this workaround.
+ */
+ if (mac->forced_speed_duplex & E1000_ALL_10_SPEED) {
+ E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+ ret_val = e1000_polarity_reversal_workaround_82543(hw);
+ icr = E1000_READ_REG(hw, E1000_ICR);
+ E1000_WRITE_REG(hw, E1000_ICS, (icr & ~E1000_ICS_LSC));
+ E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK);
+ }
+
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ /*
+ * We have a M88E1000 PHY and Auto-Neg is enabled. If we
+ * have Si on board that is 82544 or newer, Auto
+ * Speed Detection takes care of MAC speed/duplex
+ * configuration. So we only need to configure Collision
+ * Distance in the MAC. Otherwise, we need to force
+ * speed/duplex on the MAC to the current PHY speed/duplex
+ * settings.
+ */
+ if (mac->type == e1000_82544)
+ hw->mac.ops.config_collision_dist(hw);
+ else {
+ ret_val = e1000_config_mac_to_phy_82543(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring MAC to PHY settings\n");
+ goto out;
+ }
+ }
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+
+ /*
+ * At this point we know that we are on copper and we have
+ * auto-negotiated link. These are conditions for checking the link
+ * partner capability register. We use the link speed to determine if
+ * TBI compatibility needs to be turned on or off. If the link is not
+ * at gigabit speed, then TBI compatibility is not needed. If we are
+ * at gigabit speed, we turn on TBI compatibility.
+ */
+ if (e1000_tbi_compatibility_enabled_82543(hw)) {
+ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+ if (speed != SPEED_1000) {
+ /*
+ * If link speed is not set to gigabit speed,
+ * we do not need to enable TBI compatibility.
+ */
+ if (e1000_tbi_sbp_enabled_82543(hw)) {
+ /*
+ * If we previously were in the mode,
+ * turn it off.
+ */
+ e1000_set_tbi_sbp_82543(hw, false);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= ~E1000_RCTL_SBP;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ }
+ } else {
+ /*
+ * If TBI compatibility is was previously off,
+ * turn it on. For compatibility with a TBI link
+ * partner, we will store bad packets. Some
+ * frames have an additional byte on the end and
+ * will look like CRC errors to to the hardware.
+ */
+ if (!e1000_tbi_sbp_enabled_82543(hw)) {
+ e1000_set_tbi_sbp_82543(hw, true);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_SBP;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ }
+ }
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_fiber_link_82543 - Check for link (Fiber)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+STATIC s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw, ctrl, status;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_check_for_fiber_link_82543");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+ /*
+ * If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), the cable is plugged in (we have signal),
+ * and our link partner is not trying to auto-negotiate with us (we
+ * are receiving idles or data), we need to force link up. We also
+ * need to give auto-negotiation time to complete, in case the cable
+ * was just plugged in. The autoneg_failed flag does this.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 0 == have signal */
+ if ((!(ctrl & E1000_CTRL_SWDPIN1)) &&
+ (!(status & E1000_STATUS_LU)) &&
+ (!(rxcw & E1000_RXCW_C))) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
+ ret_val = 0;
+ goto out;
+ }
+ DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ goto out;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /*
+ * If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_config_mac_to_phy_82543 - Configure MAC to PHY settings
+ * @hw: pointer to the HW structure
+ *
+ * For the 82543 silicon, we need to set the MAC to match the settings
+ * of the PHY, even if the PHY is auto-negotiating.
+ **/
+STATIC s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_config_mac_to_phy_82543");
+
+ if (!(hw->phy.ops.read_reg))
+ goto out;
+
+ /* Set the bits to force speed and duplex */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+ /*
+ * Set up duplex in the Device Control and Transmit Control
+ * registers depending on negotiated values.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ goto out;
+
+ ctrl &= ~E1000_CTRL_FD;
+ if (phy_data & M88E1000_PSSR_DPLX)
+ ctrl |= E1000_CTRL_FD;
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ /*
+ * Set up speed in the Device Control register depending on
+ * negotiated values.
+ */
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+ ctrl |= E1000_CTRL_SPD_1000;
+ else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+ ctrl |= E1000_CTRL_SPD_100;
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_vfta_82543 - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: the 32-bit offset in which to write the value to.
+ * @value: the 32-bit value to write at location offset.
+ *
+ * This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ * table.
+ **/
+STATIC void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ u32 temp;
+
+ DEBUGFUNC("e1000_write_vfta_82543");
+
+ if ((hw->mac.type == e1000_82544) && (offset & 1)) {
+ temp = E1000_READ_REG_ARRAY(hw, E1000_VFTA, offset - 1);
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset - 1, temp);
+ E1000_WRITE_FLUSH(hw);
+ } else {
+ e1000_write_vfta_generic(hw, offset, value);
+ }
+}
+
+/**
+ * e1000_led_on_82543 - Turn on SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED on.
+ **/
+STATIC s32 e1000_led_on_82543(struct e1000_hw *hw)
+{
+ u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGFUNC("e1000_led_on_82543");
+
+ if (hw->mac.type == e1000_82544 &&
+ hw->phy.media_type == e1000_media_type_copper) {
+ /* Clear SW-definable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Fiber 82544 and all 82543 use this method */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off_82543 - Turn off SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED off.
+ **/
+STATIC s32 e1000_led_off_82543(struct e1000_hw *hw)
+{
+ u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGFUNC("e1000_led_off_82543");
+
+ if (hw->mac.type == e1000_82544 &&
+ hw->phy.media_type == e1000_media_type_copper) {
+ /* Set SW-definable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82543 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82543");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.h
new file mode 100644
index 00000000..4eb3f624
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.h
@@ -0,0 +1,56 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_82543_H_
+#define _E1000_82543_H_
+
+#define PHY_PREAMBLE 0xFFFFFFFF
+#define PHY_PREAMBLE_SIZE 32
+#define PHY_SOF 0x1
+#define PHY_OP_READ 0x2
+#define PHY_OP_WRITE 0x1
+#define PHY_TURNAROUND 0x2
+
+#define TBI_COMPAT_ENABLED 0x1 /* Global "knob" for the workaround */
+/* If TBI_COMPAT_ENABLED, then this is the current state (on/off) */
+#define TBI_SBP_ENABLED 0x2
+
+void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
+ struct e1000_hw_stats *stats,
+ u32 frame_len, u8 *mac_addr,
+ u32 max_frame_size);
+void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw,
+ bool state);
+bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.c
new file mode 100644
index 00000000..7c279dbb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.c
@@ -0,0 +1,2030 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/* 82571EB Gigabit Ethernet Controller
+ * 82571EB Gigabit Ethernet Controller (Copper)
+ * 82571EB Gigabit Ethernet Controller (Fiber)
+ * 82571EB Dual Port Gigabit Mezzanine Adapter
+ * 82571EB Quad Port Gigabit Mezzanine Adapter
+ * 82571PT Gigabit PT Quad Port Server ExpressModule
+ * 82572EI Gigabit Ethernet Controller (Copper)
+ * 82572EI Gigabit Ethernet Controller (Fiber)
+ * 82572EI Gigabit Ethernet Controller
+ * 82573V Gigabit Ethernet Controller (Copper)
+ * 82573E Gigabit Ethernet Controller (Copper)
+ * 82573L Gigabit Ethernet Controller
+ * 82574L Gigabit Network Connection
+ * 82583V Gigabit Network Connection
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_acquire_nvm_82571(struct e1000_hw *hw);
+STATIC void e1000_release_nvm_82571(struct e1000_hw *hw);
+STATIC s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw);
+STATIC s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw);
+STATIC s32 e1000_get_cfg_done_82571(struct e1000_hw *hw);
+STATIC s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_reset_hw_82571(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_82571(struct e1000_hw *hw);
+STATIC void e1000_clear_vfta_82571(struct e1000_hw *hw);
+STATIC bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
+STATIC s32 e1000_led_on_82574(struct e1000_hw *hw);
+STATIC s32 e1000_setup_link_82571(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
+STATIC s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
+STATIC s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data);
+STATIC void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
+STATIC s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw);
+STATIC s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
+STATIC s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
+STATIC void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
+STATIC void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
+STATIC s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
+STATIC void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
+STATIC s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw,
+ bool active);
+STATIC void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
+STATIC s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 e1000_read_mac_addr_82571(struct e1000_hw *hw);
+STATIC void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+
+/**
+ * e1000_init_phy_params_82571 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_phy_params_82571");
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ return E1000_SUCCESS;
+ }
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+
+ phy->ops.check_reset_block = e1000_check_reset_block_generic;
+ phy->ops.reset = e1000_phy_hw_reset_generic;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82571;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82571;
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ phy->type = e1000_phy_igp_2;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_82571;
+ phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+ phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
+ phy->ops.read_reg = e1000_read_phy_reg_igp;
+ phy->ops.write_reg = e1000_write_phy_reg_igp;
+ phy->ops.acquire = e1000_get_hw_semaphore_82571;
+ phy->ops.release = e1000_put_hw_semaphore_82571;
+ break;
+ case e1000_82573:
+ phy->type = e1000_phy_m88;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.read_reg = e1000_read_phy_reg_m88;
+ phy->ops.write_reg = e1000_write_phy_reg_m88;
+ phy->ops.acquire = e1000_get_hw_semaphore_82571;
+ phy->ops.release = e1000_put_hw_semaphore_82571;
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ E1000_MUTEX_INIT(&hw->dev_spec._82571.swflag_mutex);
+
+ phy->type = e1000_phy_bm;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.read_reg = e1000_read_phy_reg_bm2;
+ phy->ops.write_reg = e1000_write_phy_reg_bm2;
+ phy->ops.acquire = e1000_get_hw_semaphore_82574;
+ phy->ops.release = e1000_put_hw_semaphore_82574;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+
+ /* This can only be done after all function pointers are setup. */
+ ret_val = e1000_get_phy_id_82571(hw);
+ if (ret_val) {
+ DEBUGOUT("Error getting PHY ID\n");
+ return ret_val;
+ }
+
+ /* Verify phy id */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ if (phy->id != IGP01E1000_I_PHY_ID)
+ ret_val = -E1000_ERR_PHY;
+ break;
+ case e1000_82573:
+ if (phy->id != M88E1111_I_PHY_ID)
+ ret_val = -E1000_ERR_PHY;
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ if (phy->id != BME1000_E_PHY_ID_R2)
+ ret_val = -E1000_ERR_PHY;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ if (ret_val)
+ DEBUGOUT1("PHY ID unknown: type = 0x%08x\n", phy->id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82571 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u16 size;
+
+ DEBUGFUNC("e1000_init_nvm_params_82571");
+
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+ break;
+ }
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (((eecd >> 15) & 0x3) == 0x3) {
+ nvm->type = e1000_nvm_flash_hw;
+ nvm->word_size = 2048;
+ /* Autonomous Flash update bit must be cleared due
+ * to Flash update issue.
+ */
+ eecd &= ~E1000_EECD_AUPDEN;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ break;
+ }
+ /* Fall Through */
+ default:
+ nvm->type = e1000_nvm_eeprom_spi;
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+ /* Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* EEPROM access above 16k is unsupported */
+ if (size > 14)
+ size = 14;
+ nvm->word_size = 1 << size;
+ break;
+ }
+
+ /* Function Pointers */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ nvm->ops.acquire = e1000_get_hw_semaphore_82574;
+ nvm->ops.release = e1000_put_hw_semaphore_82574;
+ break;
+ default:
+ nvm->ops.acquire = e1000_acquire_nvm_82571;
+ nvm->ops.release = e1000_release_nvm_82571;
+ break;
+ }
+ nvm->ops.read = e1000_read_nvm_eerd;
+ nvm->ops.update = e1000_update_nvm_checksum_82571;
+ nvm->ops.validate = e1000_validate_nvm_checksum_82571;
+ nvm->ops.valid_led_default = e1000_valid_led_default_82571;
+ nvm->ops.write = e1000_write_nvm_82571;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82571 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 swsm = 0;
+ u32 swsm2 = 0;
+ bool force_clear_smbi = false;
+
+ DEBUGFUNC("e1000_init_mac_params_82571");
+
+ /* Set media type and media-dependent function pointers */
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82571EB_FIBER:
+ case E1000_DEV_ID_82572EI_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ hw->phy.media_type = e1000_media_type_fiber;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+ mac->ops.check_for_link = e1000_check_for_fiber_link_generic;
+ mac->ops.get_link_up_info =
+ e1000_get_speed_and_duplex_fiber_serdes_generic;
+ break;
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES_DUAL:
+ case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ case E1000_DEV_ID_82572EI_SERDES:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+ mac->ops.check_for_link = e1000_check_for_serdes_link_82571;
+ mac->ops.get_link_up_info =
+ e1000_get_speed_and_duplex_fiber_serdes_generic;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_82571;
+ mac->ops.check_for_link = e1000_check_for_copper_link_generic;
+ mac->ops.get_link_up_info =
+ e1000_get_speed_and_duplex_copper_generic;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_82571;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_82571;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_82571;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_82571;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_82571;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_generic;
+ /* turn off LED */
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82571;
+
+ /* MAC-specific function pointers */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ mac->ops.check_mng_mode = e1000_check_mng_mode_generic;
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.blink_led = e1000_blink_led_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid = !!(E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_FWSM_MODE_MASK);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ mac->ops.check_mng_mode = e1000_check_mng_mode_82574;
+ mac->ops.led_on = e1000_led_on_82574;
+ break;
+ default:
+ mac->ops.check_mng_mode = e1000_check_mng_mode_generic;
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.blink_led = e1000_blink_led_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+ break;
+ }
+
+ /* Ensure that the inter-port SWSM.SMBI lock bit is clear before
+ * first NVM or PHY access. This should be done for single-port
+ * devices, and for one port only on dual-port devices so that
+ * for those devices we can still use the SMBI lock to synchronize
+ * inter-port accesses to the PHY & NVM.
+ */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ swsm2 = E1000_READ_REG(hw, E1000_SWSM2);
+
+ if (!(swsm2 & E1000_SWSM2_LOCK)) {
+ /* Only do this for the first interface on this card */
+ E1000_WRITE_REG(hw, E1000_SWSM2, swsm2 |
+ E1000_SWSM2_LOCK);
+ force_clear_smbi = true;
+ } else {
+ force_clear_smbi = false;
+ }
+ break;
+ default:
+ force_clear_smbi = true;
+ break;
+ }
+
+ if (force_clear_smbi) {
+ /* Make sure SWSM.SMBI is clear */
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (swsm & E1000_SWSM_SMBI) {
+ /* This bit should not be set on a first interface, and
+ * indicates that the bootagent or EFI code has
+ * improperly left this bit enabled
+ */
+ DEBUGOUT("Please update your 82571 Bootagent\n");
+ }
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_SMBI);
+ }
+
+ /* Initialze device specific counter of SMBI acquisition timeouts. */
+ hw->dev_spec._82571.smb_counter = 0;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_82571 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82571");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82571;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82571;
+ hw->phy.ops.init_params = e1000_init_phy_params_82571;
+}
+
+/**
+ * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ **/
+STATIC s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_id = 0;
+
+ DEBUGFUNC("e1000_get_phy_id_82571");
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ /* The 82571 firmware may still be configuring the PHY.
+ * In this case, we cannot access the PHY until the
+ * configuration is done. So we explicitly set the
+ * PHY ID.
+ */
+ phy->id = IGP01E1000_I_PHY_ID;
+ break;
+ case e1000_82573:
+ return e1000_get_phy_id(hw);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id = (u32)(phy_id << 16);
+ usec_delay(20);
+ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id |= (u32)(phy_id);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+STATIC s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 sw_timeout = hw->nvm.word_size + 1;
+ s32 fw_timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_82571");
+
+ /* If we have timedout 3 times on trying to acquire
+ * the inter-port SMBI semaphore, there is old code
+ * operating on the other port, and it is not
+ * releasing SMBI. Modify the number of times that
+ * we try for the semaphore to interwork with this
+ * older code.
+ */
+ if (hw->dev_spec._82571.smb_counter > 2)
+ sw_timeout = 1;
+
+ /* Get the SW semaphore */
+ while (i < sw_timeout) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == sw_timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ hw->dev_spec._82571.smb_counter++;
+ }
+ /* Get the FW semaphore. */
+ for (i = 0; i < fw_timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ if (i == fw_timeout) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_82571(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_put_hw_semaphore_82571 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ **/
+STATIC void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("e1000_put_hw_semaphore_generic");
+
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+}
+
+/**
+ * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore during reset.
+ *
+ **/
+STATIC s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_82573");
+
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ do {
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+ break;
+
+ msec_delay(2);
+ i++;
+ } while (i < MDIO_OWNERSHIP_TIMEOUT);
+
+ if (i == MDIO_OWNERSHIP_TIMEOUT) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_82573(hw);
+ DEBUGOUT("Driver can't access the PHY\n");
+ return -E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_put_hw_semaphore_82573 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used during reset.
+ *
+ **/
+STATIC void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ DEBUGFUNC("e1000_put_hw_semaphore_82573");
+
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+}
+
+/**
+ * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM.
+ *
+ **/
+STATIC s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_82574");
+
+ E1000_MUTEX_LOCK(&hw->dev_spec._82571.swflag_mutex);
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ if (ret_val)
+ E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex);
+ return ret_val;
+}
+
+/**
+ * e1000_put_hw_semaphore_82574 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ *
+ **/
+STATIC void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_put_hw_semaphore_82574");
+
+ e1000_put_hw_semaphore_82573(hw);
+ E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex);
+}
+
+/**
+ * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag.
+ * LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+STATIC s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u32 data = E1000_READ_REG(hw, E1000_POEMB);
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_82574");
+
+ if (active)
+ data |= E1000_PHY_CTRL_D0A_LPLU;
+ else
+ data &= ~E1000_PHY_CTRL_D0A_LPLU;
+
+ E1000_WRITE_REG(hw, E1000_POEMB, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * when active is true, else clear lplu for D3. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+STATIC s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u32 data = E1000_READ_REG(hw, E1000_POEMB);
+
+ DEBUGFUNC("e1000_set_d3_lplu_state_82574");
+
+ if (!active) {
+ data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= E1000_PHY_CTRL_NOND0A_LPLU;
+ }
+
+ E1000_WRITE_REG(hw, E1000_POEMB, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_acquire_nvm_82571 - Request for access to the EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * To gain access to the EEPROM, first we must obtain a hardware semaphore.
+ * Then for non-82573 hardware, set the EEPROM access request bit and wait
+ * for EEPROM access grant bit. If the access grant bit is not set, release
+ * hardware semaphore.
+ **/
+STATIC s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_acquire_nvm_82571");
+
+ ret_val = e1000_get_hw_semaphore_82571(hw);
+ if (ret_val)
+ return ret_val;
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ break;
+ default:
+ ret_val = e1000_acquire_nvm_generic(hw);
+ break;
+ }
+
+ if (ret_val)
+ e1000_put_hw_semaphore_82571(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_82571 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+STATIC void e1000_release_nvm_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_82571");
+
+ e1000_release_nvm_generic(hw);
+ e1000_put_hw_semaphore_82571(hw);
+}
+
+/**
+ * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * For non-82573 silicon, write data to EEPROM at offset using SPI interface.
+ *
+ * If e1000_update_nvm_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+STATIC s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_write_nvm_82571");
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ ret_val = e1000_write_nvm_spi(hw, offset, words, data);
+ break;
+ default:
+ ret_val = -E1000_ERR_NVM;
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_82571 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+STATIC s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
+{
+ u32 eecd;
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_82571");
+
+ ret_val = e1000_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* If our nvm is an EEPROM, then we're done
+ * otherwise, commit the checksum to the flash NVM.
+ */
+ if (hw->nvm.type != e1000_nvm_flash_hw)
+ return E1000_SUCCESS;
+
+ /* Check for pending operations. */
+ for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+ msec_delay(1);
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD))
+ break;
+ }
+
+ if (i == E1000_FLASH_UPDATES)
+ return -E1000_ERR_NVM;
+
+ /* Reset the firmware if using STM opcode. */
+ if ((E1000_READ_REG(hw, E1000_FLOP) & 0xFF00) == E1000_STM_OPCODE) {
+ /* The enabling of and the actual reset must be done
+ * in two write cycles.
+ */
+ E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET_ENABLE);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET);
+ }
+
+ /* Commit the write to flash */
+ eecd = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+
+ for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+ msec_delay(1);
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD))
+ break;
+ }
+
+ if (i == E1000_FLASH_UPDATES)
+ return -E1000_ERR_NVM;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+STATIC s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_validate_nvm_checksum_82571");
+
+ if (hw->nvm.type == e1000_nvm_flash_hw)
+ e1000_fix_nvm_checksum_82571(hw);
+
+ return e1000_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * After checking for invalid values, poll the EEPROM to ensure the previous
+ * command has completed before trying to write the next word. After write
+ * poll for completion.
+ *
+ * If e1000_update_nvm_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+STATIC s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, eewr = 0;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_nvm_eewr_82571");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((data[i] << E1000_NVM_RW_REG_DATA) |
+ ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
+ E1000_NVM_RW_REG_START);
+
+ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+ if (ret_val)
+ break;
+
+ E1000_WRITE_REG(hw, E1000_EEWR, eewr);
+
+ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+ if (ret_val)
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cfg_done_82571 - Poll for configuration done
+ * @hw: pointer to the HW structure
+ *
+ * Reads the management control register for the config done bit to be set.
+ **/
+STATIC s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+
+ DEBUGFUNC("e1000_get_cfg_done_82571");
+
+ while (timeout) {
+ if (E1000_READ_REG(hw, E1000_EEMNGCTL) &
+ E1000_NVM_CFG_DONE_PORT_0)
+ break;
+ msec_delay(1);
+ timeout--;
+ }
+ if (!timeout) {
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When activating LPLU
+ * this function also disables smart speed and vice versa. LPLU will not be
+ * activated unless the device autonegotiation advertisement meets standards
+ * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function
+ * pointer entry point only called by PHY setup routines.
+ **/
+STATIC s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_82571");
+
+ if (!(phy->ops.read_reg))
+ return E1000_SUCCESS;
+
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (active) {
+ data |= IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ data &= ~IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_hw_82571 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+STATIC s32 e1000_reset_hw_82571(struct e1000_hw *hw)
+{
+ u32 ctrl, ctrl_ext, eecd, tctl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_reset_hw_82571");
+
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ /* Must acquire the MDIO ownership before MAC reset.
+ * Ownership defaults to firmware after a reset.
+ */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ ret_val = e1000_get_hw_semaphore_82574(hw);
+ break;
+ default:
+ break;
+ }
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGOUT("Issuing a global reset to MAC\n");
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+ /* Must release MDIO ownership and mutex after MAC reset. */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ /* Release mutex only if the hw semaphore is acquired */
+ if (!ret_val)
+ e1000_put_hw_semaphore_82573(hw);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ /* Release mutex only if the hw semaphore is acquired */
+ if (!ret_val)
+ e1000_put_hw_semaphore_82574(hw);
+ break;
+ default:
+ break;
+ }
+
+ if (hw->nvm.type == e1000_nvm_flash_hw) {
+ usec_delay(10);
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val)
+ /* We don't want to continue accessing MAC registers. */
+ return ret_val;
+
+ /* Phy configuration from NVM just starts after EECD_AUTO_RD is set.
+ * Need to wait for Phy configuration completion before accessing
+ * NVM and Phy.
+ */
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ /* REQ and GNT bits need to be cleared when using AUTO_RD
+ * to access the EEPROM.
+ */
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ break;
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ msec_delay(25);
+ break;
+ default:
+ break;
+ }
+
+ /* Clear any pending interrupt events. */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ if (hw->mac.type == e1000_82571) {
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000_set_laa_state_82571(hw, true);
+ }
+
+ /* Reinitialize the 82571 serdes link state machine */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes)
+ hw->mac.serdes_link_state = e1000_serdes_link_down;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_hw_82571 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+STATIC s32 e1000_init_hw_82571(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 reg_data;
+ s32 ret_val;
+ u16 i, rar_count = mac->rar_entry_count;
+
+ DEBUGFUNC("e1000_init_hw_82571");
+
+ e1000_initialize_hw_bits_82571(hw);
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
+ if (ret_val)
+ DEBUGOUT("Error initializing identification LED\n");
+
+ /* Disabling VLAN filtering */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address.
+ * If, however, a locally administered address was assigned to the
+ * 82571, we must reserve a RAR for it to work around an issue where
+ * resetting one port will reload the MAC on the other port.
+ */
+ if (e1000_get_laa_state_82571(hw))
+ rar_count--;
+ e1000_init_rx_addrs_generic(hw, rar_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ /* Set the transmit descriptor write-back policy */
+ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data);
+
+ /* ...for both queues. */
+ switch (mac->type) {
+ case e1000_82573:
+ e1000_enable_tx_pkt_filtering_generic(hw);
+ /* fall through */
+ case e1000_82574:
+ case e1000_82583:
+ reg_data = E1000_READ_REG(hw, E1000_GCR);
+ reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+ E1000_WRITE_REG(hw, E1000_GCR, reg_data);
+ break;
+ default:
+ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1));
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB |
+ E1000_TXDCTL_COUNT_DESC);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data);
+ break;
+ }
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82571(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits
+ * @hw: pointer to the HW structure
+ *
+ * Initializes required hardware-dependent bits needed for normal operation.
+ **/
+STATIC void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_initialize_hw_bits_82571");
+
+ /* Transmit Descriptor Control 0 */
+ reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = E1000_READ_REG(hw, E1000_TARC(0));
+ reg &= ~(0xF << 27); /* 30:27 */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ reg |= (1 << 26);
+ break;
+ default:
+ break;
+ }
+ E1000_WRITE_REG(hw, E1000_TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = E1000_READ_REG(hw, E1000_TARC(1));
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ reg &= ~((1 << 29) | (1 << 30));
+ reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+ if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ E1000_WRITE_REG(hw, E1000_TARC(1), reg);
+ break;
+ default:
+ break;
+ }
+
+ /* Device Control */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg &= ~(1 << 29);
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+ break;
+ default:
+ break;
+ }
+
+ /* Extended Device Control */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~(1 << 23);
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+ break;
+ default:
+ break;
+ }
+
+ if (hw->mac.type == e1000_82571) {
+ reg = E1000_READ_REG(hw, E1000_PBA_ECC);
+ reg |= E1000_PBA_ECC_CORR_EN;
+ E1000_WRITE_REG(hw, E1000_PBA_ECC, reg);
+ }
+
+ /* Workaround for hardware errata.
+ * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
+ */
+ if ((hw->mac.type == e1000_82571) ||
+ (hw->mac.type == e1000_82572)) {
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+ }
+
+ /* Disable IPv6 extension header parsing because some malformed
+ * IPv6 headers can hang the Rx.
+ */
+ if (hw->mac.type <= e1000_82573) {
+ reg = E1000_READ_REG(hw, E1000_RFCTL);
+ reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
+ E1000_WRITE_REG(hw, E1000_RFCTL, reg);
+ }
+
+ /* PCI-Ex Control Registers */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ reg = E1000_READ_REG(hw, E1000_GCR);
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_GCR, reg);
+
+ /* Workaround for hardware errata.
+ * apply workaround for hardware errata documented in errata
+ * docs Fixes issue where some error prone or unreliable PCIe
+ * completions are occurring, particularly with ASPM enabled.
+ * Without fix, issue can cause Tx timeouts.
+ */
+ reg = E1000_READ_REG(hw, E1000_GCR2);
+ reg |= 1;
+ E1000_WRITE_REG(hw, E1000_GCR2, reg);
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+/**
+ * e1000_clear_vfta_82571 - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+STATIC void e1000_clear_vfta_82571(struct e1000_hw *hw)
+{
+ u32 offset;
+ u32 vfta_value = 0;
+ u32 vfta_offset = 0;
+ u32 vfta_bit_in_reg = 0;
+
+ DEBUGFUNC("e1000_clear_vfta_82571");
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (hw->mng_cookie.vlan_id != 0) {
+ /* The VFTA is a 4096b bit-field, each identifying
+ * a single VLAN ID. The following operations
+ * determine which 32b entry (i.e. offset) into the
+ * array we want to set the VLAN ID (i.e. bit) of
+ * the manageability unit.
+ */
+ vfta_offset = (hw->mng_cookie.vlan_id >>
+ E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK;
+ vfta_bit_in_reg =
+ 1 << (hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ }
+ break;
+ default:
+ break;
+ }
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ /* If the offset we want to clear is the same offset of the
+ * manageability VLAN ID, then clear all bits except that of
+ * the manageability unit.
+ */
+ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * e1000_check_mng_mode_82574 - Check manageability is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Reads the NVM Initialization Control Word 2 and returns true
+ * (>0) if any manageability is enabled, else false (0).
+ **/
+STATIC bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
+{
+ u16 data;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_check_mng_mode_82574");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+ if (ret_val)
+ return false;
+
+ return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
+}
+
+/**
+ * e1000_led_on_82574 - Turn LED on
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED on.
+ **/
+STATIC s32 e1000_led_on_82574(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 i;
+
+ DEBUGFUNC("e1000_led_on_82574");
+
+ ctrl = hw->mac.ledctl_mode2;
+ if (!(E1000_STATUS_LU & E1000_READ_REG(hw, E1000_STATUS))) {
+ /* If no link, then turn LED on by setting the invert bit
+ * for each LED that's "on" (0x0E) in ledctl_mode2.
+ */
+ for (i = 0; i < 4; i++)
+ if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+ E1000_LEDCTL_MODE_LED_ON)
+ ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8));
+ }
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_phy_82574 - check 82574 phy hung state
+ * @hw: pointer to the HW structure
+ *
+ * Returns whether phy is hung or not
+ **/
+bool e1000_check_phy_82574(struct e1000_hw *hw)
+{
+ u16 status_1kbt = 0;
+ u16 receive_errors = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_check_phy_82574");
+
+ /* Read PHY Receive Error counter first, if its is max - all F's then
+ * read the Base1000T status register If both are max then PHY is hung.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, E1000_RECEIVE_ERROR_COUNTER,
+ &receive_errors);
+ if (ret_val)
+ return false;
+ if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
+ ret_val = hw->phy.ops.read_reg(hw, E1000_BASE1000T_STATUS,
+ &status_1kbt);
+ if (ret_val)
+ return false;
+ if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
+ E1000_IDLE_ERROR_COUNT_MASK)
+ return true;
+ }
+
+ return false;
+}
+
+
+/**
+ * e1000_setup_link_82571 - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+STATIC s32 e1000_setup_link_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_setup_link_82571");
+
+ /* 82573 does not have a word in the NVM to determine
+ * the default flow control setting, so we explicitly
+ * set it to full.
+ */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (hw->fc.requested_mode == e1000_fc_default)
+ hw->fc.requested_mode = e1000_fc_full;
+ break;
+ default:
+ break;
+ }
+
+ return e1000_setup_link_generic(hw);
+}
+
+/**
+ * e1000_setup_copper_link_82571 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ **/
+STATIC s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_copper_link_82571");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ switch (hw->phy.type) {
+ case e1000_phy_m88:
+ case e1000_phy_bm:
+ ret_val = e1000_copper_link_setup_m88(hw);
+ break;
+ case e1000_phy_igp_2:
+ ret_val = e1000_copper_link_setup_igp(hw);
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+
+ if (ret_val)
+ return ret_val;
+
+ return e1000_setup_copper_link_generic(hw);
+}
+
+/**
+ * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configures collision distance and flow control for fiber and serdes links.
+ * Upon successful setup, poll for link.
+ **/
+STATIC s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_setup_fiber_serdes_link_82571");
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ /* If SerDes loopback mode is entered, there is no form
+ * of reset to take the adapter out of that mode. So we
+ * have to explicitly take the adapter out of loopback
+ * mode. This prevents drivers from twiddling their thumbs
+ * if another tool failed to take it out of loopback mode.
+ */
+ E1000_WRITE_REG(hw, E1000_SCTL,
+ E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+ break;
+ default:
+ break;
+ }
+
+ return e1000_setup_fiber_serdes_link_generic(hw);
+}
+
+/**
+ * e1000_check_for_serdes_link_82571 - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Reports the link state as up or down.
+ *
+ * If autonegotiation is supported by the link partner, the link state is
+ * determined by the result of autonegotiation. This is the most likely case.
+ * If autonegotiation is not supported by the link partner, and the link
+ * has a valid signal, force the link up.
+ *
+ * The link state is represented internally here by 4 states:
+ *
+ * 1) down
+ * 2) autoneg_progress
+ * 3) autoneg_complete (the link successfully autonegotiated)
+ * 4) forced_up (the link has been forced up, it did not autonegotiate)
+ *
+ **/
+STATIC s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ u32 txcw;
+ u32 i;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_check_for_serdes_link_82571");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ E1000_READ_REG(hw, E1000_RXCW);
+ /* SYNCH bit and IV bit are sticky */
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+ if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
+ /* Receiver is synchronized with no invalid bits. */
+ switch (mac->serdes_link_state) {
+ case e1000_serdes_link_autoneg_complete:
+ if (!(status & E1000_STATUS_LU)) {
+ /* We have lost link, retry autoneg before
+ * reporting link failure
+ */
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ DEBUGOUT("AN_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = true;
+ }
+ break;
+
+ case e1000_serdes_link_forced_up:
+ /* If we are receiving /C/ ordered sets, re-enable
+ * auto-negotiation in the TXCW register and disable
+ * forced link in the Device Control register in an
+ * attempt to auto-negotiate with our link partner.
+ */
+ if (rxcw & E1000_RXCW_C) {
+ /* Enable autoneg, and unforce link up */
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL,
+ (ctrl & ~E1000_CTRL_SLU));
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ DEBUGOUT("FORCED_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = true;
+ }
+ break;
+
+ case e1000_serdes_link_autoneg_progress:
+ if (rxcw & E1000_RXCW_C) {
+ /* We received /C/ ordered sets, meaning the
+ * link partner has autonegotiated, and we can
+ * trust the Link Up (LU) status bit.
+ */
+ if (status & E1000_STATUS_LU) {
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_complete;
+ DEBUGOUT("AN_PROG -> AN_UP\n");
+ mac->serdes_has_link = true;
+ } else {
+ /* Autoneg completed, but failed. */
+ mac->serdes_link_state =
+ e1000_serdes_link_down;
+ DEBUGOUT("AN_PROG -> DOWN\n");
+ }
+ } else {
+ /* The link partner did not autoneg.
+ * Force link up and full duplex, and change
+ * state to forced.
+ */
+ E1000_WRITE_REG(hw, E1000_TXCW,
+ (mac->txcw & ~E1000_TXCW_ANE));
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Configure Flow Control after link up. */
+ ret_val =
+ e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error config flow control\n");
+ break;
+ }
+ mac->serdes_link_state =
+ e1000_serdes_link_forced_up;
+ mac->serdes_has_link = true;
+ DEBUGOUT("AN_PROG -> FORCED_UP\n");
+ }
+ break;
+
+ case e1000_serdes_link_down:
+ default:
+ /* The link was down but the receiver has now gained
+ * valid sync, so lets see if we can bring the link
+ * up.
+ */
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl &
+ ~E1000_CTRL_SLU));
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ DEBUGOUT("DOWN -> AN_PROG\n");
+ break;
+ }
+ } else {
+ if (!(rxcw & E1000_RXCW_SYNCH)) {
+ mac->serdes_has_link = false;
+ mac->serdes_link_state = e1000_serdes_link_down;
+ DEBUGOUT("ANYSTATE -> DOWN\n");
+ } else {
+ /* Check several times, if SYNCH bit and CONFIG
+ * bit both are consistently 1 then simply ignore
+ * the IV bit and restart Autoneg
+ */
+ for (i = 0; i < AN_RETRY_COUNT; i++) {
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+ if ((rxcw & E1000_RXCW_SYNCH) &&
+ (rxcw & E1000_RXCW_C))
+ continue;
+
+ if (rxcw & E1000_RXCW_IV) {
+ mac->serdes_has_link = false;
+ mac->serdes_link_state =
+ e1000_serdes_link_down;
+ DEBUGOUT("ANYSTATE -> DOWN\n");
+ break;
+ }
+ }
+
+ if (i == AN_RETRY_COUNT) {
+ txcw = E1000_READ_REG(hw, E1000_TXCW);
+ txcw |= E1000_TXCW_ANE;
+ E1000_WRITE_REG(hw, E1000_TXCW, txcw);
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ DEBUGOUT("ANYSTATE -> AN_PROG\n");
+ }
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_valid_led_default_82571 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+STATIC s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_82571");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (*data == ID_LED_RESERVED_F746)
+ *data = ID_LED_DEFAULT_82573;
+ break;
+ default:
+ if (*data == ID_LED_RESERVED_0000 ||
+ *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT;
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_laa_state_82571 - Get locally administered address state
+ * @hw: pointer to the HW structure
+ *
+ * Retrieve and return the current locally administered address state.
+ **/
+bool e1000_get_laa_state_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_get_laa_state_82571");
+
+ if (hw->mac.type != e1000_82571)
+ return false;
+
+ return hw->dev_spec._82571.laa_is_present;
+}
+
+/**
+ * e1000_set_laa_state_82571 - Set locally administered address state
+ * @hw: pointer to the HW structure
+ * @state: enable/disable locally administered address
+ *
+ * Enable/Disable the current locally administered address state.
+ **/
+void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state)
+{
+ DEBUGFUNC("e1000_set_laa_state_82571");
+
+ if (hw->mac.type != e1000_82571)
+ return;
+
+ hw->dev_spec._82571.laa_is_present = state;
+
+ /* If workaround is activated... */
+ if (state)
+ /* Hold a copy of the LAA in RAR[14] This is done so that
+ * between the time RAR[0] gets clobbered and the time it
+ * gets fixed, the actual LAA is in one of the RARs and no
+ * incoming packets directed to this port are dropped.
+ * Eventually the LAA will be in RAR[0] and RAR[14].
+ */
+ hw->mac.ops.rar_set(hw, hw->mac.addr,
+ hw->mac.rar_entry_count - 1);
+ return;
+}
+
+/**
+ * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Verifies that the EEPROM has completed the update. After updating the
+ * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If
+ * the checksum fix is not implemented, we need to set the bit and update
+ * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect,
+ * we need to return bad checksum.
+ **/
+STATIC s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_fix_nvm_checksum_82571");
+
+ if (nvm->type != e1000_nvm_flash_hw)
+ return E1000_SUCCESS;
+
+ /* Check bit 4 of word 10h. If it is 0, firmware is done updating
+ * 10h-12h. Checksum may need to be fixed.
+ */
+ ret_val = nvm->ops.read(hw, 0x10, 1, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(data & 0x10)) {
+ /* Read 0x23 and check bit 15. This bit is a 1
+ * when the checksum has already been fixed. If
+ * the checksum is still wrong and this bit is a
+ * 1, we need to return bad checksum. Otherwise,
+ * we need to set this bit to a 1 and update the
+ * checksum.
+ */
+ ret_val = nvm->ops.read(hw, 0x23, 1, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(data & 0x8000)) {
+ data |= 0x8000;
+ ret_val = nvm->ops.write(hw, 0x23, 1, &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = nvm->ops.update(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+
+/**
+ * e1000_read_mac_addr_82571 - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_read_mac_addr_82571");
+
+ if (hw->mac.type == e1000_82571) {
+ s32 ret_val;
+
+ /* If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+STATIC void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ struct e1000_mac_info *mac = &hw->mac;
+
+ if (!phy->ops.check_reset_block)
+ return;
+
+ /* If the management interface is not enabled, then power down */
+ if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82571");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+
+ E1000_READ_REG(hw, E1000_IAC);
+ E1000_READ_REG(hw, E1000_ICRXOC);
+
+ E1000_READ_REG(hw, E1000_ICRXPTC);
+ E1000_READ_REG(hw, E1000_ICRXATC);
+ E1000_READ_REG(hw, E1000_ICTXPTC);
+ E1000_READ_REG(hw, E1000_ICTXATC);
+ E1000_READ_REG(hw, E1000_ICTXQEC);
+ E1000_READ_REG(hw, E1000_ICTXQMTC);
+ E1000_READ_REG(hw, E1000_ICRXDMTC);
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.h
new file mode 100644
index 00000000..c8037b61
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.h
@@ -0,0 +1,65 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_82571_H_
+#define _E1000_82571_H_
+
+#define ID_LED_RESERVED_F746 0xF746
+#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
+
+/* Intr Throttling - RW */
+#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n)))
+
+#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAC_MASK_82574 0x01F00000
+
+#define E1000_IVAR_INT_ALLOC_VALID 0x8
+
+/* Manageability Operation Mode mask */
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
+
+#define E1000_BASE1000T_STATUS 10
+#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
+#define E1000_RECEIVE_ERROR_COUNTER 21
+#define E1000_RECEIVE_ERROR_MAX 0xFFFF
+bool e1000_check_phy_82574(struct e1000_hw *hw);
+bool e1000_get_laa_state_82571(struct e1000_hw *hw);
+void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.c
new file mode 100644
index 00000000..da1a9a70
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.c
@@ -0,0 +1,3782 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/*
+ * 82575EB Gigabit Network Connection
+ * 82575EB Gigabit Backplane Connection
+ * 82575GB Gigabit Network Connection
+ * 82576 Gigabit Network Connection
+ * 82576 Quad Port Gigabit Mezzanine Adapter
+ * 82580 Gigabit Network Connection
+ * I350 Gigabit Network Connection
+ */
+
+#include "e1000_api.h"
+#include "e1000_i210.h"
+
+STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
+STATIC s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
+STATIC void e1000_release_phy_82575(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
+STATIC void e1000_release_nvm_82575(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_link_82575(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_link_media_swap(struct e1000_hw *hw);
+STATIC s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
+STATIC s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+STATIC s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
+STATIC s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+STATIC s32 e1000_reset_hw_82575(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_82580(struct e1000_hw *hw);
+STATIC s32 e1000_read_phy_reg_82580(struct e1000_hw *hw,
+ u32 offset, u16 *data);
+STATIC s32 e1000_write_phy_reg_82580(struct e1000_hw *hw,
+ u32 offset, u16 data);
+STATIC s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
+STATIC s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
+STATIC s32 e1000_get_media_type_82575(struct e1000_hw *hw);
+STATIC s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw);
+STATIC s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
+STATIC s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
+ u32 offset, u16 data);
+STATIC void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+STATIC s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+STATIC s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
+STATIC void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+STATIC bool e1000_sgmii_active_82575(struct e1000_hw *hw);
+STATIC s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
+STATIC s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
+STATIC void e1000_config_collision_dist_82575(struct e1000_hw *hw);
+STATIC void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
+STATIC void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
+STATIC void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
+STATIC s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
+STATIC s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
+STATIC s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw);
+STATIC s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw);
+STATIC s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset);
+STATIC s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset);
+STATIC s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
+STATIC s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
+STATIC void e1000_clear_vfta_i350(struct e1000_hw *hw);
+
+STATIC void e1000_i2c_start(struct e1000_hw *hw);
+STATIC void e1000_i2c_stop(struct e1000_hw *hw);
+STATIC s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data);
+STATIC s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data);
+STATIC s32 e1000_get_i2c_ack(struct e1000_hw *hw);
+STATIC s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data);
+STATIC s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data);
+STATIC void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
+STATIC void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
+STATIC s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data);
+STATIC bool e1000_get_i2c_data(u32 *i2cctl);
+
+STATIC const u16 e1000_82580_rxpbs_table[] = {
+ 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
+#define E1000_82580_RXPBS_TABLE_SIZE \
+ (sizeof(e1000_82580_rxpbs_table) / \
+ sizeof(e1000_82580_rxpbs_table[0]))
+
+
+/**
+ * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ * @hw: pointer to the HW structure
+ *
+ * Called to determine if the I2C pins are being used for I2C or as an
+ * external MDIO interface since the two options are mutually exclusive.
+ **/
+STATIC bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+{
+ u32 reg = 0;
+ bool ext_mdio = false;
+
+ DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+ reg = E1000_READ_REG(hw, E1000_MDIC);
+ ext_mdio = !!(reg & E1000_MDIC_DEST);
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ reg = E1000_READ_REG(hw, E1000_MDICNFG);
+ ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
+ break;
+ default:
+ break;
+ }
+ return ext_mdio;
+}
+
+/**
+ * e1000_init_phy_params_82575 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext;
+
+ DEBUGFUNC("e1000_init_phy_params_82575");
+
+ phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic;
+ phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic;
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ goto out;
+ }
+
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82575;
+
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+
+ phy->ops.acquire = e1000_acquire_phy_82575;
+ phy->ops.check_reset_block = e1000_check_reset_block_generic;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
+ phy->ops.release = e1000_release_phy_82575;
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+ if (e1000_sgmii_active_82575(hw)) {
+ phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ } else {
+ phy->ops.reset = e1000_phy_hw_reset_generic;
+ ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+ }
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ e1000_reset_mdicnfg_82580(hw);
+
+ if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
+ phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
+ phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
+ } else {
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ phy->ops.read_reg = e1000_read_phy_reg_82580;
+ phy->ops.write_reg = e1000_write_phy_reg_82580;
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ phy->ops.read_reg = e1000_read_phy_reg_gs40g;
+ phy->ops.write_reg = e1000_write_phy_reg_gs40g;
+ break;
+ default:
+ phy->ops.read_reg = e1000_read_phy_reg_igp;
+ phy->ops.write_reg = e1000_write_phy_reg_igp;
+ }
+ }
+
+ /* Set phy->phy_addr and phy->id. */
+ ret_val = e1000_get_phy_id_82575(hw);
+
+ /* Verify phy id and set remaining function pointers */
+ switch (phy->id) {
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ phy->type = e1000_phy_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ if (phy->id == I347AT4_E_PHY_ID ||
+ phy->id == M88E1112_E_PHY_ID ||
+ phy->id == M88E1340M_E_PHY_ID)
+ phy->ops.get_cable_length =
+ e1000_get_cable_length_m88_gen2;
+ else if (phy->id == M88E1543_E_PHY_ID ||
+ phy->id == M88E1512_E_PHY_ID)
+ phy->ops.get_cable_length =
+ e1000_get_cable_length_m88_gen2;
+ else
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ /* Check if this PHY is confgured for media swap. */
+ if (phy->id == M88E1112_E_PHY_ID) {
+ u16 data;
+
+ ret_val = phy->ops.write_reg(hw,
+ E1000_M88E1112_PAGE_ADDR,
+ 2);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw,
+ E1000_M88E1112_MAC_CTRL_1,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+ E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+ data == E1000_M88E1112_AUTO_COPPER_BASEX)
+ hw->mac.ops.check_for_link =
+ e1000_check_for_link_media_swap;
+ }
+ if (phy->id == M88E1512_E_PHY_ID) {
+ ret_val = e1000_initialize_M88E1512_phy(hw);
+ if (ret_val)
+ goto out;
+ }
+ if (phy->id == M88E1543_E_PHY_ID) {
+ ret_val = e1000_initialize_M88E1543_phy(hw);
+ if (ret_val)
+ goto out;
+ }
+ break;
+ case IGP03E1000_E_PHY_ID:
+ case IGP04E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
+ break;
+ case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
+ phy->type = e1000_phy_82580;
+ phy->ops.check_polarity = e1000_check_polarity_82577;
+ phy->ops.force_speed_duplex =
+ e1000_phy_force_speed_duplex_82577;
+ phy->ops.get_cable_length = e1000_get_cable_length_82577;
+ phy->ops.get_info = e1000_get_phy_info_82577;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
+ break;
+ case I210_I_PHY_ID:
+ phy->type = e1000_phy_i210;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ break;
+ case BCM54616_E_PHY_ID:
+ phy->type = e1000_phy_none;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82575 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u16 size;
+
+ DEBUGFUNC("e1000_init_nvm_params_82575");
+
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+ /*
+ * Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* Just in case size is out of range, cap it to the largest
+ * EEPROM size supported
+ */
+ if (size > 15)
+ size = 15;
+
+ nvm->word_size = 1 << size;
+ if (hw->mac.type < e1000_i210) {
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+ 16 : 8;
+ break;
+ }
+ if (nvm->word_size == (1 << 15))
+ nvm->page_size = 128;
+
+ nvm->type = e1000_nvm_eeprom_spi;
+ } else {
+ nvm->type = e1000_nvm_flash_hw;
+ }
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_82575;
+ nvm->ops.release = e1000_release_nvm_82575;
+ if (nvm->word_size < (1 << 15))
+ nvm->ops.read = e1000_read_nvm_eerd;
+ else
+ nvm->ops.read = e1000_read_nvm_spi;
+
+ nvm->ops.write = e1000_write_nvm_spi;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_82575;
+
+ /* override generic family function pointers for specific descendants */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ nvm->ops.validate = e1000_validate_nvm_checksum_82580;
+ nvm->ops.update = e1000_update_nvm_checksum_82580;
+ break;
+ case e1000_i350:
+ case e1000_i354:
+ nvm->ops.validate = e1000_validate_nvm_checksum_i350;
+ nvm->ops.update = e1000_update_nvm_checksum_i350;
+ break;
+ default:
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82575 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+
+ DEBUGFUNC("e1000_init_mac_params_82575");
+
+ /* Derives media type */
+ e1000_get_media_type_82575(hw);
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set uta register count */
+ mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+ if (mac->type == e1000_82576)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ if (mac->type == e1000_82580)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ if (mac->type == e1000_i350 || mac->type == e1000_i354)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+
+ /* Enable EEE default settings for EEE supported devices */
+ if (mac->type >= e1000_i350)
+ dev_spec->eee_disable = false;
+
+ /* Allow a single clear of the SW semaphore on I210 and newer */
+ if (mac->type >= e1000_i210)
+ dev_spec->clear_semaphore_once = true;
+
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK);
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
+ /* reset */
+ if (mac->type >= e1000_82580)
+ mac->ops.reset_hw = e1000_reset_hw_82580;
+ else
+ mac->ops.reset_hw = e1000_reset_hw_82575;
+ /* hw initialization */
+ if ((mac->type == e1000_i210) || (mac->type == e1000_i211))
+ mac->ops.init_hw = e1000_init_hw_i210;
+ else
+ mac->ops.init_hw = e1000_init_hw_82575;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_generic;
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575;
+ /* physical interface shutdown */
+ mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
+ /* physical interface power up */
+ mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_link_82575;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
+ /* configure collision distance */
+ mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) {
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_i350;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_i350;
+ } else {
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ }
+ if (hw->mac.type >= e1000_82580)
+ mac->ops.validate_mdi_setting =
+ e1000_validate_mdi_setting_crossover_generic;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* blink LED */
+ mac->ops.blink_led = e1000_blink_led_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_generic;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
+ /* acquire SW_FW sync */
+ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575;
+ mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575;
+ if (mac->type >= e1000_i210) {
+ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210;
+ mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210;
+ }
+
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_82575 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82575");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82575;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
+ hw->phy.ops.init_params = e1000_init_phy_params_82575;
+ hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
+}
+
+/**
+ * e1000_acquire_phy_82575 - Acquire rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * Acquire access rights to the correct PHY.
+ **/
+STATIC s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
+{
+ u16 mask = E1000_SWFW_PHY0_SM;
+
+ DEBUGFUNC("e1000_acquire_phy_82575");
+
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_SWFW_PHY1_SM;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_SWFW_PHY2_SM;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_SWFW_PHY3_SM;
+
+ return hw->mac.ops.acquire_swfw_sync(hw, mask);
+}
+
+/**
+ * e1000_release_phy_82575 - Release rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to release access rights to the correct PHY.
+ **/
+STATIC void e1000_release_phy_82575(struct e1000_hw *hw)
+{
+ u16 mask = E1000_SWFW_PHY0_SM;
+
+ DEBUGFUNC("e1000_release_phy_82575");
+
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_SWFW_PHY1_SM;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_SWFW_PHY2_SM;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_SWFW_PHY3_SM;
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
+/**
+ * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the serial gigabit media independent
+ * interface and stores the retrieved information in data.
+ **/
+STATIC s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ s32 ret_val = -E1000_ERR_PARAM;
+
+ DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
+
+ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+ DEBUGOUT1("PHY Address %u is out of range\n", offset);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset using the serial gigabit
+ * media independent interface.
+ **/
+STATIC s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 data)
+{
+ s32 ret_val = -E1000_ERR_PARAM;
+
+ DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
+
+ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_id_82575 - Retrieve PHY addr and id
+ * @hw: pointer to the HW structure
+ *
+ * Retrieves the PHY address and ID for both PHY's which do and do not use
+ * sgmi interface.
+ **/
+STATIC s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_id;
+ u32 ctrl_ext;
+ u32 mdic;
+
+ DEBUGFUNC("e1000_get_phy_id_82575");
+
+ /* some i354 devices need an extra read for phy id */
+ if (hw->mac.type == e1000_i354)
+ e1000_get_phy_id(hw);
+
+ /*
+ * For SGMII PHYs, we try the list of possible addresses until
+ * we find one that works. For non-SGMII PHYs
+ * (e.g. integrated copper PHYs), an address of 1 should
+ * work. The result of this function should mean phy->phy_addr
+ * and phy->id are set correctly.
+ */
+ if (!e1000_sgmii_active_82575(hw)) {
+ phy->addr = 1;
+ ret_val = e1000_get_phy_id(hw);
+ goto out;
+ }
+
+ if (e1000_sgmii_uses_mdio_82575(hw)) {
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ mdic &= E1000_MDIC_PHY_MASK;
+ phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ mdic = E1000_READ_REG(hw, E1000_MDICNFG);
+ mdic &= E1000_MDICNFG_PHY_MASK;
+ phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ break;
+ }
+ ret_val = e1000_get_phy_id(hw);
+ goto out;
+ }
+
+ /* Power on sgmii phy if it is disabled */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(300);
+
+ /*
+ * The address field in the I2CCMD register is 3 bits and 0 is invalid.
+ * Therefore, we need to test 1-7
+ */
+ for (phy->addr = 1; phy->addr < 8; phy->addr++) {
+ ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
+ if (ret_val == E1000_SUCCESS) {
+ DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
+ phy_id, phy->addr);
+ /*
+ * At the time of this writing, The M88 part is
+ * the only supported SGMII PHY product.
+ */
+ if (phy_id == M88_VENDOR)
+ break;
+ } else {
+ DEBUGOUT1("PHY address %u was unreadable\n",
+ phy->addr);
+ }
+ }
+
+ /* A valid PHY type couldn't be found. */
+ if (phy->addr == 8) {
+ phy->addr = 0;
+ ret_val = -E1000_ERR_PHY;
+ } else {
+ ret_val = e1000_get_phy_id(hw);
+ }
+
+ /* restore previous sfp cage power state */
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY using the serial gigabit media independent interface.
+ **/
+STATIC s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ struct e1000_phy_info *phy = &hw->phy;
+
+ DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
+
+ /*
+ * This isn't a true "hard" reset, but is the only reset
+ * available to us at this time.
+ */
+
+ DEBUGOUT("Soft resetting SGMII attached PHY...\n");
+
+ if (!(hw->phy.ops.write_reg))
+ goto out;
+
+ /*
+ * SFP documentation requires the following to configure the SPF module
+ * to work on SGMII. No further documentation is given.
+ */
+ ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val)
+ goto out;
+
+ if (phy->id == M88E1512_E_PHY_ID)
+ ret_val = e1000_initialize_M88E1512_phy(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+STATIC s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_82575");
+
+ if (!(hw->phy.ops.read_reg))
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ goto out;
+
+ if (active) {
+ data |= IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ } else {
+ data &= ~IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+STATIC s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 data;
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_82580");
+
+ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+
+ if (active) {
+ data |= E1000_82580_PM_D0_LPLU;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ data &= ~E1000_82580_PM_SPD;
+ } else {
+ data &= ~E1000_82580_PM_D0_LPLU;
+
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on)
+ data |= E1000_82580_PM_SPD;
+ else if (phy->smart_speed == e1000_smart_speed_off)
+ data &= ~E1000_82580_PM_SPD;
+ }
+
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 data;
+
+ DEBUGFUNC("e1000_set_d3_lplu_state_82580");
+
+ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+
+ if (!active) {
+ data &= ~E1000_82580_PM_D3_LPLU;
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on)
+ data |= E1000_82580_PM_SPD;
+ else if (phy->smart_speed == e1000_smart_speed_off)
+ data &= ~E1000_82580_PM_SPD;
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= E1000_82580_PM_D3_LPLU;
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ data &= ~E1000_82580_PM_SPD;
+ }
+
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_acquire_nvm_82575 - Request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+STATIC s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_acquire_nvm_82575");
+
+ ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Check if there is some access
+ * error this access may hook on
+ */
+ if (hw->mac.type == e1000_i350) {
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
+ E1000_EECD_TIMEOUT)) {
+ /* Clear all access error flags */
+ E1000_WRITE_REG(hw, E1000_EECD, eecd |
+ E1000_EECD_ERROR_CLR);
+ DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
+ }
+ }
+
+ if (hw->mac.type == e1000_82580) {
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (eecd & E1000_EECD_BLOCKED) {
+ /* Clear access error flag */
+ E1000_WRITE_REG(hw, E1000_EECD, eecd |
+ E1000_EECD_BLOCKED);
+ DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
+ }
+ }
+
+ ret_val = e1000_acquire_nvm_generic(hw);
+ if (ret_val)
+ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_82575 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ **/
+STATIC void e1000_release_nvm_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_82575");
+
+ e1000_release_nvm_generic(hw);
+
+ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+STATIC s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 ret_val = E1000_SUCCESS;
+ s32 i = 0, timeout = 200;
+
+ DEBUGFUNC("e1000_acquire_swfw_sync_82575");
+
+ while (i < timeout) {
+ if (e1000_get_hw_semaphore_generic(hw)) {
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000_put_hw_semaphore_generic(hw);
+ msec_delay_irq(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+STATIC void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ DEBUGFUNC("e1000_release_swfw_sync_82575");
+
+ while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
+ ; /* Empty */
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ * e1000_get_cfg_done_82575 - Read config done bit
+ * @hw: pointer to the HW structure
+ *
+ * Read the management control register for the config done bit for
+ * completion status. NOTE: silicon which is EEPROM-less will fail trying
+ * to read the config done bit, so an error is *ONLY* logged and returns
+ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
+ * would not be able to be reset or change link.
+ **/
+STATIC s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ DEBUGFUNC("e1000_get_cfg_done_82575");
+
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_NVM_CFG_DONE_PORT_1;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_NVM_CFG_DONE_PORT_2;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_NVM_CFG_DONE_PORT_3;
+ while (timeout) {
+ if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
+ break;
+ msec_delay(1);
+ timeout--;
+ }
+ if (!timeout)
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+
+ /* If EEPROM is not marked present, init the PHY manually */
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
+ (hw->phy.type == e1000_phy_igp_3))
+ e1000_phy_init_script_igp3(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_link_up_info_82575 - Get link speed/duplex info
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * This is a wrapper function, if using the serial gigabit media independent
+ * interface, use PCS to retrieve the link speed and duplex information.
+ * Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+STATIC s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_get_link_up_info_82575");
+
+ if (hw->phy.media_type != e1000_media_type_copper)
+ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
+ duplex);
+ else
+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
+ duplex);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_link_82575 - Check for link
+ * @hw: pointer to the HW structure
+ *
+ * If sgmii is enabled, then use the pcs register to determine link, otherwise
+ * use the generic interface for determining link.
+ **/
+STATIC s32 e1000_check_for_link_82575(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 speed, duplex;
+
+ DEBUGFUNC("e1000_check_for_link_82575");
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
+ &duplex);
+ /*
+ * Use this flag to determine if link needs to be checked or
+ * not. If we have link clear the flag so that we do not
+ * continue to check for link.
+ */
+ hw->mac.get_link_status = !hw->mac.serdes_has_link;
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+ } else {
+ ret_val = e1000_check_for_copper_link_generic(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_link_media_swap - Check which M88E1112 interface linked
+ * @hw: pointer to the HW structure
+ *
+ * Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+STATIC s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ u8 port = 0;
+
+ DEBUGFUNC("e1000_check_for_link_media_swap");
+
+ /* Check for copper. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_COPPER;
+
+ /* Check for other. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_OTHER;
+
+ /* Determine if a swap needs to happen. */
+ if (port && (hw->dev_spec._82575.media_port != port)) {
+ hw->dev_spec._82575.media_port = port;
+ hw->dev_spec._82575.media_changed = true;
+ }
+
+ if (port == E1000_MEDIA_PORT_COPPER) {
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+ e1000_check_for_link_82575(hw);
+ } else {
+ e1000_check_for_link_82575(hw);
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ * @hw: pointer to the HW structure
+ **/
+STATIC void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_power_up_serdes_link_82575");
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
+ return;
+
+ /* Enable PCS to turn on link */
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_PCS_EN;
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+ /* Power up the laser */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+}
+
+/**
+ * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Using the physical coding sub-layer (PCS), retrieve the current speed and
+ * duplex, then store the values in the pointers provided.
+ **/
+STATIC s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 pcs;
+ u32 status;
+
+ DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
+
+ /*
+ * Read the PCS Status register for link state. For non-copper mode,
+ * the status register is not accurate. The PCS status register is
+ * used instead.
+ */
+ pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+ /*
+ * The link up bit determines when link is up on autoneg.
+ */
+ if (pcs & E1000_PCS_LSTS_LINK_OK) {
+ mac->serdes_has_link = true;
+
+ /* Detect and store PCS speed */
+ if (pcs & E1000_PCS_LSTS_SPEED_1000)
+ *speed = SPEED_1000;
+ else if (pcs & E1000_PCS_LSTS_SPEED_100)
+ *speed = SPEED_100;
+ else
+ *speed = SPEED_10;
+
+ /* Detect and store PCS duplex */
+ if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+
+ /* Check if it is an I354 2.5Gb backplane connection. */
+ if (mac->type == e1000_i354) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ *speed = SPEED_2500;
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("2500 Mbs, ");
+ DEBUGOUT("Full Duplex\n");
+ }
+ }
+
+ } else {
+ mac->serdes_has_link = false;
+ *speed = 0;
+ *duplex = 0;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_shutdown_serdes_link_82575 - Remove link during power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of serdes shut down sfp and PCS on driver unload
+ * when management pass thru is not enabled.
+ **/
+void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_shutdown_serdes_link_82575");
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
+ return;
+
+ if (!e1000_enable_mng_pass_thru(hw)) {
+ /* Disable PCS to turn off link */
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg &= ~E1000_PCS_CFG_PCS_EN;
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+ /* shutdown the laser */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg |= E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+ }
+
+ return;
+}
+
+/**
+ * e1000_reset_hw_82575 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+STATIC s32 e1000_reset_hw_82575(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_reset_hw_82575");
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ /* set the completion timeout for interface */
+ ret_val = e1000_set_pcie_completion_timeout(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Set completion timeout has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGOUT("Issuing a global reset to MAC\n");
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ DEBUGOUT("Auto Read Done did not complete\n");
+ }
+
+ /* If EEPROM is not present, run manual init scripts */
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES))
+ e1000_reset_init_script_82575(hw);
+
+ /* Clear any pending interrupt events. */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_82575 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+s32 e1000_init_hw_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ u16 i, rar_count = mac->rar_entry_count;
+
+ DEBUGFUNC("e1000_init_hw_82575");
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ if (ret_val) {
+ DEBUGOUT("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+ }
+
+ /* Disabling VLAN filtering */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address */
+ e1000_init_rx_addrs_generic(hw, rar_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Zero out the Unicast HASH table */
+ DEBUGOUT("Zeroing the UTA\n");
+ for (i = 0; i < mac->uta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ /* Set the default MTU size */
+ hw->dev_spec._82575.mtu = 1500;
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82575(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_copper_link_82575 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ **/
+STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u32 phpm_reg;
+
+ DEBUGFUNC("e1000_setup_copper_link_82575");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Clear Go Link Disconnect bit on supported devices */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ break;
+ default:
+ break;
+ }
+
+ ret_val = e1000_setup_serdes_link_82575(hw);
+ if (ret_val)
+ goto out;
+
+ if (e1000_sgmii_active_82575(hw)) {
+ /* allow time for SFP cage time to power up phy */
+ msec_delay(300);
+
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error resetting the PHY.\n");
+ goto out;
+ }
+ }
+ switch (hw->phy.type) {
+ case e1000_phy_i210:
+ case e1000_phy_m88:
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
+ case I210_I_PHY_ID:
+ ret_val = e1000_copper_link_setup_m88_gen2(hw);
+ break;
+ default:
+ ret_val = e1000_copper_link_setup_m88(hw);
+ break;
+ }
+ break;
+ case e1000_phy_igp_3:
+ ret_val = e1000_copper_link_setup_igp(hw);
+ break;
+ case e1000_phy_82580:
+ ret_val = e1000_copper_link_setup_82577(hw);
+ break;
+ case e1000_phy_none:
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_setup_copper_link_generic(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_serdes_link_82575 - Setup link for serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configure the physical coding sub-layer (PCS) link. The PCS link is
+ * used on copper connections where the serialized gigabit media independent
+ * interface (sgmii), or serdes fiber is being used. Configures the link
+ * for auto-negotiation or forces speed/duplex.
+ **/
+STATIC s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
+ bool pcs_autoneg;
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_setup_serdes_link_82575");
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
+ return ret_val;
+
+ /*
+ * On the 82575, SerDes loopback mode persists until it is
+ * explicitly turned off or a power cycle is performed. A read to
+ * the register does not indicate its status. Therefore, we ensure
+ * loopback mode is disabled during initialization.
+ */
+ E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+
+ /* power on the sfp cage if present */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl_reg |= E1000_CTRL_SLU;
+
+ /* set both sw defined pins on 82575/82576*/
+ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
+ ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
+
+ reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+
+ /* default pcs_autoneg to the same setting as mac autoneg */
+ pcs_autoneg = hw->mac.autoneg;
+
+ switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ /* sgmii mode lets the phy handle forcing speed/duplex */
+ pcs_autoneg = true;
+ /* autoneg time out should be disabled for SGMII mode */
+ reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ /* disable PCS autoneg and support parallel detect only */
+ pcs_autoneg = false;
+ /* fall through to default case */
+ default:
+ if (hw->mac.type == e1000_82575 ||
+ hw->mac.type == e1000_82576) {
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
+ pcs_autoneg = false;
+ }
+
+ /*
+ * non-SGMII modes only supports a speed of 1000/Full for the
+ * link so it is best to just force the MAC and let the pcs
+ * link either autoneg or be forced to 1000/Full
+ */
+ ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
+ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+
+ /* set speed of 1000/Full if speed/duplex is forced */
+ reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
+ break;
+ }
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
+
+ /*
+ * New SerDes mode allows for forcing speed or autonegotiating speed
+ * at 1gb. Autoneg should be default set by most drivers. This is the
+ * mode that will be compatible with older link partners and switches.
+ * However, both are supported by the hardware and some drivers/tools.
+ */
+ reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
+ E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
+
+ if (pcs_autoneg) {
+ /* Set PCS register for autoneg */
+ reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
+ E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+
+ /* Disable force flow control for autoneg */
+ reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
+
+ /* Configure flow control advertisement for autoneg */
+ anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
+
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_full:
+ case e1000_fc_rx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ anadv_reg |= E1000_TXCW_PAUSE;
+ break;
+ case e1000_fc_tx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ break;
+ default:
+ break;
+ }
+
+ E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg);
+
+ DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
+ } else {
+ /* Set PCS register for forced link */
+ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
+
+ /* Force flow control for forced link */
+ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
+ DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
+ }
+
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+
+ if (!pcs_autoneg && !e1000_sgmii_active_82575(hw))
+ e1000_force_mac_fc_generic(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_media_type_82575 - derives current media type.
+ * @hw: pointer to the HW structure
+ *
+ * The media type is chosen reflecting few settings.
+ * The following are taken into account:
+ * - link mode set in the current port Init Control Word #3
+ * - current link mode settings in CSR register
+ * - MDIO vs. I2C PHY control interface chosen
+ * - SFP module media type
+ **/
+STATIC s32 e1000_get_media_type_82575(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext = 0;
+ u32 link_mode = 0;
+
+ /* Set internal phy as default */
+ dev_spec->sgmii_active = false;
+ dev_spec->module_plugged = false;
+
+ /* Get CSR setting */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+ /* extract link mode setting */
+ link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
+
+ switch (link_mode) {
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_GMII:
+ hw->phy.media_type = e1000_media_type_copper;
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ /* Get phy control interface type set (MDIO vs. I2C)*/
+ if (e1000_sgmii_uses_mdio_82575(hw)) {
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = true;
+ break;
+ }
+ /* fall through for I2C based SGMII */
+ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+ /* read media type from SFP EEPROM */
+ ret_val = e1000_set_sfp_media_type_82575(hw);
+ if ((ret_val != E1000_SUCCESS) ||
+ (hw->phy.media_type == e1000_media_type_unknown)) {
+ /*
+ * If media type was not identified then return media
+ * type defined by the CTRL_EXT settings.
+ */
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+
+ if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = true;
+ }
+
+ break;
+ }
+
+ /* do not change link mode for 100BaseFX */
+ if (dev_spec->eth_flags.e100_base_fx)
+ break;
+
+ /* change current link mode setting */
+ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
+ else
+ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_set_sfp_media_type_82575 - derives SFP module media type.
+ * @hw: pointer to the HW structure
+ *
+ * The media type is chosen based on SFP module.
+ * compatibility flags retrieved from SFP ID EEPROM.
+ **/
+STATIC s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_ERR_CONFIG;
+ u32 ctrl_ext = 0;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags;
+ u8 tranceiver_type = 0;
+ s32 timeout = 3;
+
+ /* Turn I2C interface ON and power on sfp cage */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
+
+ E1000_WRITE_FLUSH(hw);
+
+ /* Read SFP module data */
+ while (timeout) {
+ ret_val = e1000_read_sfp_data_byte(hw,
+ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
+ &tranceiver_type);
+ if (ret_val == E1000_SUCCESS)
+ break;
+ msec_delay(100);
+ timeout--;
+ }
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+
+ ret_val = e1000_read_sfp_data_byte(hw,
+ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
+ (u8 *)eth_flags);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+
+ /* Check if there is some SFP module plugged and powered */
+ if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
+ (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
+ dev_spec->module_plugged = true;
+ if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ } else if (eth_flags->e100_base_fx) {
+ dev_spec->sgmii_active = true;
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ } else if (eth_flags->e1000_base_t) {
+ dev_spec->sgmii_active = true;
+ hw->phy.media_type = e1000_media_type_copper;
+ } else {
+ hw->phy.media_type = e1000_media_type_unknown;
+ DEBUGOUT("PHY module has not been recognized\n");
+ goto out;
+ }
+ } else {
+ hw->phy.media_type = e1000_media_type_unknown;
+ }
+ ret_val = E1000_SUCCESS;
+out:
+ /* Restore I2C interface setting */
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ return ret_val;
+}
+
+/**
+ * e1000_valid_led_default_82575 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+STATIC s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_82575");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+ switch (hw->phy.media_type) {
+ case e1000_media_type_internal_serdes:
+ *data = ID_LED_DEFAULT_82575_SERDES;
+ break;
+ case e1000_media_type_copper:
+ default:
+ *data = ID_LED_DEFAULT;
+ break;
+ }
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_sgmii_active_82575 - Return sgmii state
+ * @hw: pointer to the HW structure
+ *
+ * 82575 silicon has a serialized gigabit media independent interface (sgmii)
+ * which can be enabled for use in the embedded applications. Simply
+ * return the current state of the sgmii interface.
+ **/
+STATIC bool e1000_sgmii_active_82575(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ return dev_spec->sgmii_active;
+}
+
+/**
+ * e1000_reset_init_script_82575 - Inits HW defaults after reset
+ * @hw: pointer to the HW structure
+ *
+ * Inits recommended HW defaults after a reset when there is no EEPROM
+ * detected. This is only for the 82575.
+ **/
+STATIC s32 e1000_reset_init_script_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_reset_init_script_82575");
+
+ if (hw->mac.type == e1000_82575) {
+ DEBUGOUT("Running reset init script for 82575\n");
+ /* SerDes configuration via SERDESCTRL */
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
+
+ /* CCM configuration via CCMCTL register */
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
+
+ /* PCIe lanes configuration */
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
+
+ /* PCIe PLL Configuration */
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr_82575 - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_mac_addr_82575");
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_config_collision_dist_82575 - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+STATIC void e1000_config_collision_dist_82575(struct e1000_hw *hw)
+{
+ u32 tctl_ext;
+
+ DEBUGFUNC("e1000_config_collision_dist_82575");
+
+ tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
+
+ tctl_ext &= ~E1000_TCTL_EXT_COLD;
+ tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
+
+ E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+STATIC void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+
+ if (!(phy->ops.check_reset_block))
+ return;
+
+ /* If the management interface is not enabled, then power down */
+ if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82575");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+
+ E1000_READ_REG(hw, E1000_IAC);
+ E1000_READ_REG(hw, E1000_ICRXOC);
+
+ E1000_READ_REG(hw, E1000_ICRXPTC);
+ E1000_READ_REG(hw, E1000_ICRXATC);
+ E1000_READ_REG(hw, E1000_ICTXPTC);
+ E1000_READ_REG(hw, E1000_ICTXATC);
+ E1000_READ_REG(hw, E1000_ICTXQEC);
+ E1000_READ_REG(hw, E1000_ICTXQMTC);
+ E1000_READ_REG(hw, E1000_ICRXDMTC);
+
+ E1000_READ_REG(hw, E1000_CBTMPC);
+ E1000_READ_REG(hw, E1000_HTDPMC);
+ E1000_READ_REG(hw, E1000_CBRMPC);
+ E1000_READ_REG(hw, E1000_RPTHC);
+ E1000_READ_REG(hw, E1000_HGPTC);
+ E1000_READ_REG(hw, E1000_HTCBDPC);
+ E1000_READ_REG(hw, E1000_HGORCL);
+ E1000_READ_REG(hw, E1000_HGORCH);
+ E1000_READ_REG(hw, E1000_HGOTCL);
+ E1000_READ_REG(hw, E1000_HGOTCH);
+ E1000_READ_REG(hw, E1000_LENERRS);
+
+ /* This register should not be read in copper configurations */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
+ e1000_sgmii_active_82575(hw))
+ E1000_READ_REG(hw, E1000_SCVPC);
+}
+
+/**
+ * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
+ * @hw: pointer to the HW structure
+ *
+ * After Rx enable, if manageability is enabled then there is likely some
+ * bad data at the start of the fifo and possibly in the DMA fifo. This
+ * function clears the fifos and flushes any packets that came in as rx was
+ * being enabled.
+ **/
+void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
+{
+ u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
+ int i, ms_wait;
+
+ DEBUGFUNC("e1000_rx_fifo_flush_82575");
+
+ /* disable IPv6 options as per hardware errata */
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ rfctl |= E1000_RFCTL_IPV6_EX_DIS;
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+
+ if (hw->mac.type != e1000_82575 ||
+ !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
+ return;
+
+ /* Disable all Rx queues */
+ for (i = 0; i < 4; i++) {
+ rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i),
+ rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
+ }
+ /* Poll all queues to verify they have shut down */
+ for (ms_wait = 0; ms_wait < 10; ms_wait++) {
+ msec_delay(1);
+ rx_enabled = 0;
+ for (i = 0; i < 4; i++)
+ rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
+ if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
+ break;
+ }
+
+ if (ms_wait == 10)
+ DEBUGOUT("Queue disable timed out after 10ms\n");
+
+ /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
+ * incoming packets are rejected. Set enable and wait 2ms so that
+ * any packet that was coming in as RCTL.EN was set is flushed
+ */
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
+
+ rlpml = E1000_READ_REG(hw, E1000_RLPML);
+ E1000_WRITE_REG(hw, E1000_RLPML, 0);
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
+ temp_rctl |= E1000_RCTL_LPE;
+
+ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(2);
+
+ /* Enable Rx queues that were previously enabled and restore our
+ * previous state
+ */
+ for (i = 0; i < 4; i++)
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+
+ E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+
+ /* Flush receive errors generated by workaround */
+ E1000_READ_REG(hw, E1000_ROC);
+ E1000_READ_REG(hw, E1000_RNBC);
+ E1000_READ_REG(hw, E1000_MPC);
+}
+
+/**
+ * e1000_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ *
+ * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
+ * however the hardware default for these parts is 500us to 1ms which is less
+ * than the 10ms recommended by the pci-e spec. To address this we need to
+ * increase the value to either 10ms to 200ms for capability version 1 config,
+ * or 16ms to 55ms for version 2.
+ **/
+STATIC s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
+{
+ u32 gcr = E1000_READ_REG(hw, E1000_GCR);
+ s32 ret_val = E1000_SUCCESS;
+ u16 pcie_devctl2;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+ /*
+ * if capababilities version is type 1 we can write the
+ * timeout of 10ms to 200ms through the GCR register
+ */
+ if (!(gcr & E1000_GCR_CAP_VER2)) {
+ gcr |= E1000_GCR_CMPL_TMOUT_10ms;
+ goto out;
+ }
+
+ /*
+ * for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+ if (ret_val)
+ goto out;
+
+ pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
+
+ ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+out:
+ /* disable completion timeout resend */
+ gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
+
+ E1000_WRITE_REG(hw, E1000_GCR, gcr);
+ return ret_val;
+}
+
+/**
+ * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ * @pf: Physical Function pool - do not set anti-spoofing for the PF
+ *
+ * enables/disables L2 switch anti-spoofing functionality.
+ **/
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
+{
+ u32 reg_val, reg_offset;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ reg_offset = E1000_DTXSWC;
+ break;
+ case e1000_i350:
+ case e1000_i354:
+ reg_offset = E1000_TXSWC;
+ break;
+ default:
+ return;
+ }
+
+ reg_val = E1000_READ_REG(hw, reg_offset);
+ if (enable) {
+ reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ /* The PF can spoof - it has to in order to
+ * support emulation mode NICs
+ */
+ reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+ } else {
+ reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ }
+ E1000_WRITE_REG(hw, reg_offset, reg_val);
+}
+
+/**
+ * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables L2 switch loopback functionality.
+ **/
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
+{
+ u32 dtxswc;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
+ break;
+ case e1000_i350:
+ case e1000_i354:
+ dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
+ break;
+ default:
+ /* Currently no other hardware supports loopback */
+ break;
+ }
+
+
+}
+
+/**
+ * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables replication of packets across multiple pools.
+ **/
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
+{
+ u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
+
+ if (enable)
+ vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
+ else
+ vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
+
+ E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
+}
+
+/**
+ * e1000_read_phy_reg_82580 - Read 82580 MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+STATIC s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_phy_reg_82580");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_82580 - Write 82580 MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+STATIC s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_write_phy_reg_82580");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ * @hw: pointer to the HW structure
+ *
+ * This resets the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * the values found in the EEPROM. This addresses an issue in which these
+ * bits are not restored from EEPROM after reset.
+ **/
+STATIC s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 mdicnfg;
+ u16 nvm_data = 0;
+
+ DEBUGFUNC("e1000_reset_mdicnfg_82580");
+
+ if (hw->mac.type != e1000_82580)
+ goto out;
+ if (!e1000_sgmii_active_82575(hw))
+ goto out;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+ NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+ &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
+ if (nvm_data & NVM_WORD24_EXT_MDIO)
+ mdicnfg |= E1000_MDICNFG_EXT_MDIO;
+ if (nvm_data & NVM_WORD24_COM_MDIO)
+ mdicnfg |= E1000_MDICNFG_COM_MDIO;
+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_82580 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets function or entire device (all ports, etc.)
+ * to a known state.
+ **/
+STATIC s32 e1000_reset_hw_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ /* BH SW mailbox bit in SW_FW_SYNC */
+ u16 swmbsw_mask = E1000_SW_SYNCH_MB;
+ u32 ctrl;
+ bool global_device_reset = hw->dev_spec._82575.global_device_reset;
+
+ DEBUGFUNC("e1000_reset_hw_82580");
+
+ hw->dev_spec._82575.global_device_reset = false;
+
+ /* 82580 does not reliably do global_device_reset due to hw errata */
+ if (hw->mac.type == e1000_82580)
+ global_device_reset = false;
+
+ /* Get current control state. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ /* Determine whether or not a global dev reset is requested */
+ if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw,
+ swmbsw_mask))
+ global_device_reset = false;
+
+ if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STAT_DEV_RST_SET))
+ ctrl |= E1000_CTRL_DEV_RST;
+ else
+ ctrl |= E1000_CTRL_RST;
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ break;
+ default:
+ E1000_WRITE_FLUSH(hw);
+ break;
+ }
+
+ /* Add delay to insure DEV_RST or RST has time to complete */
+ msec_delay(5);
+
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ DEBUGOUT("Auto Read Done did not complete\n");
+ }
+
+ /* clear global device reset status bit */
+ E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
+
+ /* Clear any pending interrupt events. */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ ret_val = e1000_reset_mdicnfg_82580(hw);
+ if (ret_val)
+ DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
+
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ /* Release semaphore */
+ if (global_device_reset)
+ hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
+
+ return ret_val;
+}
+
+/**
+ * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size
+ * @data: data received by reading RXPBS register
+ *
+ * The 82580 uses a table based approach for packet buffer allocation sizes.
+ * This function converts the retrieved value into the correct table value
+ * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
+ * 0x0 36 72 144 1 2 4 8 16
+ * 0x8 35 70 140 rsv rsv rsv rsv rsv
+ */
+u16 e1000_rxpbs_adjust_82580(u32 data)
+{
+ u16 ret_val = 0;
+
+ if (data < E1000_82580_RXPBS_TABLE_SIZE)
+ ret_val = e1000_82580_rxpbs_table[data];
+
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_with_offset - Validate EEPROM
+ * checksum
+ * @hw: pointer to the HW structure
+ * @offset: offset in words of the checksum protected region
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
+
+ for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16) NVM_SUM) {
+ DEBUGOUT("NVM Checksum Invalid\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_with_offset - Update EEPROM
+ * checksum
+ * @hw: pointer to the HW structure
+ * @offset: offset in words of the checksum protected region
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
+
+ for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
+ &checksum);
+ if (ret_val)
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM section checksum by reading/adding each word of
+ * the EEPROM and then verifies that the sum of the EEPROM is
+ * equal to 0xBABA.
+ **/
+STATIC s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 eeprom_regions_count = 1;
+ u16 j, nvm_data;
+ u16 nvm_offset;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_82580");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
+ /* if chekcsums compatibility bit is set validate checksums
+ * for all 4 ports. */
+ eeprom_regions_count = 4;
+ }
+
+ for (j = 0; j < eeprom_regions_count; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = e1000_validate_nvm_checksum_with_offset(hw,
+ nvm_offset);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_82580 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM section checksums for all 4 ports by reading/adding
+ * each word of the EEPROM up to the checksum. Then calculates the EEPROM
+ * checksum and writes the value to the EEPROM.
+ **/
+STATIC s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 j, nvm_data;
+ u16 nvm_offset;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_82580");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n");
+ goto out;
+ }
+
+ if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) {
+ /* set compatibility bit to validate checksums appropriately */
+ nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
+ ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
+ &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n");
+ goto out;
+ }
+ }
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM section checksum by reading/adding each word of
+ * the EEPROM and then verifies that the sum of the EEPROM is
+ * equal to 0xBABA.
+ **/
+STATIC s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 j;
+ u16 nvm_offset;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_i350");
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = e1000_validate_nvm_checksum_with_offset(hw,
+ nvm_offset);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_i350 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM section checksums for all 4 ports by reading/adding
+ * each word of the EEPROM up to the checksum. Then calculates the EEPROM
+ * checksum and writes the value to the EEPROM.
+ **/
+STATIC s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 j;
+ u16 nvm_offset;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_i350");
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * __e1000_access_emi_reg - Read/write EMI register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: pointer to value to read/write from/to the EMI address
+ * @read: boolean flag to indicate read or write
+ **/
+STATIC s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("__e1000_access_emi_reg");
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_emi_reg - Read Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be read from the EMI address
+ **/
+s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+ DEBUGFUNC("e1000_read_emi_reg");
+
+ return __e1000_access_emi_reg(hw, addr, data, true);
+}
+
+/**
+ * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initialize Marvell 1512 to work correctly with Avoton.
+ **/
+s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_initialize_M88E1512_phy");
+
+ /* Check if this is correct PHY. */
+ if (phy->id != M88E1512_E_PHY_ID)
+ goto out;
+
+ /* Switch to PHY page 0xFF. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0xFB. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0x12. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+ if (ret_val)
+ goto out;
+
+ /* Change mode to SGMII-to-Copper */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ msec_delay(1000);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_M88E1543_phy - Initialize M88E1543 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initialize Marvell 1543 to work correctly with Avoton.
+ **/
+s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_initialize_M88E1543_phy");
+
+ /* Check if this is correct PHY. */
+ if (phy->id != M88E1543_E_PHY_ID)
+ goto out;
+
+ /* Switch to PHY page 0xFF. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0xFB. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0xC00D);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0x12. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+ if (ret_val)
+ goto out;
+
+ /* Change mode to SGMII-to-Copper */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 1. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1);
+ if (ret_val)
+ goto out;
+
+ /* Change mode to 1000BASE-X/SGMII and autoneg enable; reset */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ msec_delay(1000);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_eee_i350 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @adv1g: boolean flag enabling 1G EEE advertisement
+ * @adv100m: boolean flag enabling 100M EEE advertisement
+ *
+ * Enable/disable EEE based on setting in dev_spec structure.
+ *
+ **/
+s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
+{
+ u32 ipcnfg, eeer;
+
+ DEBUGFUNC("e1000_set_eee_i350");
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ goto out;
+ ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
+ eeer = E1000_READ_REG(hw, E1000_EEER);
+
+ /* enable or disable per user setting */
+ if (!(hw->dev_spec._82575.eee_disable)) {
+ u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
+
+ if (adv100M)
+ ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
+ else
+ ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
+
+ if (adv1G)
+ ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
+ else
+ ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
+
+ eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
+ E1000_EEER_LPI_FC);
+
+ /* This bit should not be set in normal operation. */
+ if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
+ DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
+ } else {
+ ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+ eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
+ E1000_EEER_LPI_FC);
+ }
+ E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
+ E1000_WRITE_REG(hw, E1000_EEER, eeer);
+ E1000_READ_REG(hw, E1000_IPCNFG);
+ E1000_READ_REG(hw, E1000_EEER);
+out:
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_eee_i354 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @adv1g: boolean flag enabling 1G EEE advertisement
+ * @adv100m: boolean flag enabling 100M EEE advertisement
+ *
+ * Enable/disable EEE legacy mode based on setting in dev_spec structure.
+ *
+ **/
+s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_set_eee_i354");
+
+ if ((hw->phy.media_type != e1000_media_type_copper) ||
+ ((phy->id != M88E1543_E_PHY_ID) &&
+ (phy->id != M88E1512_E_PHY_ID)))
+ goto out;
+
+ if (!hw->dev_spec._82575.eee_disable) {
+ /* Switch to PHY page 18. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+ phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ /* Turn on EEE advertisement. */
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ if (adv100M)
+ phy_data |= E1000_EEE_ADV_100_SUPPORTED;
+ else
+ phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
+
+ if (adv1G)
+ phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
+ else
+ phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
+
+ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ phy_data);
+ } else {
+ /* Turn off EEE advertisement. */
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
+ E1000_EEE_ADV_1000_SUPPORTED);
+ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ phy_data);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_eee_status_i354 - Get EEE status
+ * @hw: pointer to the HW structure
+ * @status: EEE status
+ *
+ * Get EEE status by guessing based on whether Tx or Rx LPI indications have
+ * been received.
+ **/
+s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_get_eee_status_i354");
+
+ /* Check if EEE is supported on this device. */
+ if ((hw->phy.media_type != e1000_media_type_copper) ||
+ ((phy->id != M88E1543_E_PHY_ID) &&
+ (phy->id != M88E1512_E_PHY_ID)))
+ goto out;
+
+ ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
+ E1000_PCS_STATUS_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
+ E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
+
+out:
+ return ret_val;
+}
+
+/* Due to a hw errata, if the host tries to configure the VFTA register
+ * while performing queries from the BMC or DMA, then the VFTA in some
+ * cases won't be written.
+ */
+
+/**
+ * e1000_clear_vfta_i350 - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void e1000_clear_vfta_i350(struct e1000_hw *hw)
+{
+ u32 offset;
+ int i;
+
+ DEBUGFUNC("e1000_clear_vfta_350");
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ for (i = 0; i < 10; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * e1000_write_vfta_i350 - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ int i;
+
+ DEBUGFUNC("e1000_write_vfta_350");
+
+ for (i = 0; i < 10; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+
+ E1000_WRITE_FLUSH(hw);
+}
+
+
+/**
+ * e1000_set_i2c_bb - Enable I2C bit-bang
+ * @hw: pointer to the HW structure
+ *
+ * Enable I2C bit-bang interface
+ *
+ **/
+s32 e1000_set_i2c_bb(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext, i2cparams;
+
+ DEBUGFUNC("e1000_set_i2c_bb");
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ i2cparams |= E1000_I2CBB_EN;
+ i2cparams |= E1000_I2C_DATA_OE_N;
+ i2cparams |= E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams);
+ E1000_WRITE_FLUSH(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: device address
+ * @data: value read
+ *
+ * Performs byte read operation over I2C interface at
+ * a specified device address.
+ **/
+s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u32 max_retry = 10;
+ u32 retry = 1;
+ u16 swfw_mask = 0;
+
+ bool nack = true;
+
+ DEBUGFUNC("e1000_read_i2c_byte_generic");
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ do {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+ != E1000_SUCCESS) {
+ status = E1000_ERR_SWFW_SYNC;
+ goto read_byte_out;
+ }
+
+ e1000_i2c_start(hw);
+
+ /* Device Address and write indication */
+ status = e1000_clock_out_i2c_byte(hw, dev_addr);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_byte(hw, byte_offset);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ e1000_i2c_start(hw);
+
+ /* Device Address and read indication */
+ status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1));
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_in_i2c_byte(hw, data);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_bit(hw, nack);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ e1000_i2c_stop(hw);
+ break;
+
+fail:
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(100);
+ e1000_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte read error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte read error.\n");
+
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+read_byte_out:
+
+ return status;
+}
+
+/**
+ * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: value to write
+ *
+ * Performs byte write operation over I2C interface at
+ * a specified device address.
+ **/
+s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ s32 status = E1000_SUCCESS;
+ u32 max_retry = 1;
+ u32 retry = 0;
+ u16 swfw_mask = 0;
+
+ DEBUGFUNC("e1000_write_i2c_byte_generic");
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) {
+ status = E1000_ERR_SWFW_SYNC;
+ goto write_byte_out;
+ }
+
+ do {
+ e1000_i2c_start(hw);
+
+ status = e1000_clock_out_i2c_byte(hw, dev_addr);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_byte(hw, byte_offset);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_byte(hw, data);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ e1000_i2c_stop(hw);
+ break;
+
+fail:
+ e1000_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte write error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte write error.\n");
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+write_byte_out:
+
+ return status;
+}
+
+/**
+ * e1000_i2c_start - Sets I2C start condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C start condition (High -> Low on SDA while SCL is High)
+ **/
+STATIC void e1000_i2c_start(struct e1000_hw *hw)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_i2c_start");
+
+ /* Start condition must begin with data and clock high */
+ e1000_set_i2c_data(hw, &i2cctl, 1);
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for start condition (4.7us) */
+ usec_delay(E1000_I2C_T_SU_STA);
+
+ e1000_set_i2c_data(hw, &i2cctl, 0);
+
+ /* Hold time for start condition (4us) */
+ usec_delay(E1000_I2C_T_HD_STA);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(E1000_I2C_T_LOW);
+
+}
+
+/**
+ * e1000_i2c_stop - Sets I2C stop condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ **/
+STATIC void e1000_i2c_stop(struct e1000_hw *hw)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_i2c_stop");
+
+ /* Stop condition must begin with data low and clock high */
+ e1000_set_i2c_data(hw, &i2cctl, 0);
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for stop condition (4us) */
+ usec_delay(E1000_I2C_T_SU_STO);
+
+ e1000_set_i2c_data(hw, &i2cctl, 1);
+
+ /* bus free time between stop and start (4.7us)*/
+ usec_delay(E1000_I2C_T_BUF);
+}
+
+/**
+ * e1000_clock_in_i2c_byte - Clocks in one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte to clock in
+ *
+ * Clocks in one byte data via I2C data/clock
+ **/
+STATIC s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data)
+{
+ s32 i;
+ bool bit = 0;
+
+ DEBUGFUNC("e1000_clock_in_i2c_byte");
+
+ *data = 0;
+ for (i = 7; i >= 0; i--) {
+ e1000_clock_in_i2c_bit(hw, &bit);
+ *data |= bit << i;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clock_out_i2c_byte - Clocks out one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte clocked out
+ *
+ * Clocks out one byte data via I2C data/clock
+ **/
+STATIC s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data)
+{
+ s32 status = E1000_SUCCESS;
+ s32 i;
+ u32 i2cctl;
+ bool bit = 0;
+
+ DEBUGFUNC("e1000_clock_out_i2c_byte");
+
+ for (i = 7; i >= 0; i--) {
+ bit = (data >> i) & 0x1;
+ status = e1000_clock_out_i2c_bit(hw, bit);
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ /* Release SDA line (set high) */
+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ i2cctl |= E1000_I2C_DATA_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ return status;
+}
+
+/**
+ * e1000_get_i2c_ack - Polls for I2C ACK
+ * @hw: pointer to hardware structure
+ *
+ * Clocks in/out one bit via I2C data/clock
+ **/
+STATIC s32 e1000_get_i2c_ack(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ u32 i = 0;
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ u32 timeout = 10;
+ bool ack = true;
+
+ DEBUGFUNC("e1000_get_i2c_ack");
+
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ /* Wait until SCL returns high */
+ for (i = 0; i < timeout; i++) {
+ usec_delay(1);
+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ if (i2cctl & E1000_I2C_CLK_IN)
+ break;
+ }
+ if (!(i2cctl & E1000_I2C_CLK_IN))
+ return E1000_ERR_I2C;
+
+ ack = e1000_get_i2c_data(&i2cctl);
+ if (ack) {
+ DEBUGOUT("I2C ack was not received.\n");
+ status = E1000_ERR_I2C;
+ }
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(E1000_I2C_T_LOW);
+
+ return status;
+}
+
+/**
+ * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: read data value
+ *
+ * Clocks in one bit via I2C data/clock
+ **/
+STATIC s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_clock_in_i2c_bit");
+
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ *data = e1000_get_i2c_data(&i2cctl);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(E1000_I2C_T_LOW);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: data value to write
+ *
+ * Clocks out one bit via I2C data/clock
+ **/
+STATIC s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data)
+{
+ s32 status;
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_clock_out_i2c_bit");
+
+ status = e1000_set_i2c_data(hw, &i2cctl, data);
+ if (status == E1000_SUCCESS) {
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us.
+ * This also takes care of the data hold time.
+ */
+ usec_delay(E1000_I2C_T_LOW);
+ } else {
+ status = E1000_ERR_I2C;
+ DEBUGOUT1("I2C data was not set to %X\n", data);
+ }
+
+ return status;
+}
+/**
+ * e1000_raise_i2c_clk - Raises the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Raises the I2C clock line '0'->'1'
+ **/
+STATIC void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
+{
+ DEBUGFUNC("e1000_raise_i2c_clk");
+
+ *i2cctl |= E1000_I2C_CLK_OUT;
+ *i2cctl &= ~E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* SCL rise time (1000ns) */
+ usec_delay(E1000_I2C_T_RISE);
+}
+
+/**
+ * e1000_lower_i2c_clk - Lowers the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Lowers the I2C clock line '1'->'0'
+ **/
+STATIC void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
+{
+
+ DEBUGFUNC("e1000_lower_i2c_clk");
+
+ *i2cctl &= ~E1000_I2C_CLK_OUT;
+ *i2cctl &= ~E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* SCL fall time (300ns) */
+ usec_delay(E1000_I2C_T_FALL);
+}
+
+/**
+ * e1000_set_i2c_data - Sets the I2C data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ * @data: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ **/
+STATIC s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data)
+{
+ s32 status = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_set_i2c_data");
+
+ if (data)
+ *i2cctl |= E1000_I2C_DATA_OUT;
+ else
+ *i2cctl &= ~E1000_I2C_DATA_OUT;
+
+ *i2cctl &= ~E1000_I2C_DATA_OE_N;
+ *i2cctl |= E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
+ usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA);
+
+ *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ if (data != e1000_get_i2c_data(i2cctl)) {
+ status = E1000_ERR_I2C;
+ DEBUGOUT1("Error - I2C data was not set to %X.\n", data);
+ }
+
+ return status;
+}
+
+/**
+ * e1000_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ **/
+STATIC bool e1000_get_i2c_data(u32 *i2cctl)
+{
+ bool data;
+
+ DEBUGFUNC("e1000_get_i2c_data");
+
+ if (*i2cctl & E1000_I2C_DATA_IN)
+ data = 1;
+ else
+ data = 0;
+
+ return data;
+}
+
+/**
+ * e1000_i2c_bus_clear - Clears the I2C bus
+ * @hw: pointer to hardware structure
+ *
+ * Clears the I2C bus by sending nine clock pulses.
+ * Used when data line is stuck low.
+ **/
+void e1000_i2c_bus_clear(struct e1000_hw *hw)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ u32 i;
+
+ DEBUGFUNC("e1000_i2c_bus_clear");
+
+ e1000_i2c_start(hw);
+
+ e1000_set_i2c_data(hw, &i2cctl, 1);
+
+ for (i = 0; i < 9; i++) {
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Min high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Min low period of clock is 4.7us*/
+ usec_delay(E1000_I2C_T_LOW);
+ }
+
+ e1000_i2c_start(hw);
+
+ /* Put the i2c bus back to default state */
+ e1000_i2c_stop(hw);
+}
+
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.h
new file mode 100644
index 00000000..4133cdd8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.h
@@ -0,0 +1,522 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_82575_H_
+#define _E1000_82575_H_
+
+#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
+/*
+ * Receive Address Register Count
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * These entries are also used for MAC-based filtering.
+ */
+/*
+ * For 82576, there are an additional set of RARs that begin at an offset
+ * separate from the first set of RARs.
+ */
+#define E1000_RAR_ENTRIES_82575 16
+#define E1000_RAR_ENTRIES_82576 24
+#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_I350 32
+#define E1000_SW_SYNCH_MB 0x00000100
+#define E1000_STAT_DEV_RST_SET 0x00100000
+#define E1000_CTRL_DEV_RST 0x20000000
+
+#ifdef E1000_BIT_FIELDS
+struct e1000_adv_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ u32 data;
+ struct {
+ u32 datalen:16; /* Data buffer length */
+ u32 rsvd:4;
+ u32 dtyp:4; /* Descriptor type */
+ u32 dcmd:8; /* Descriptor command */
+ } config;
+ } lower;
+ union {
+ u32 data;
+ struct {
+ u32 status:4; /* Descriptor status */
+ u32 idx:4;
+ u32 popts:6; /* Packet Options */
+ u32 paylen:18; /* Payload length */
+ } options;
+ } upper;
+};
+
+#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */
+#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */
+#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */
+#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */
+#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */
+#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */
+#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */
+#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */
+#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADV_DCMD_RS 0x8 /* Report Status */
+#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */
+#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */
+/* Extended Device Control */
+#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */
+
+struct e1000_adv_context_desc {
+ union {
+ u32 ip_config;
+ struct {
+ u32 iplen:9;
+ u32 maclen:7;
+ u32 vlan_tag:16;
+ } fields;
+ } ip_setup;
+ u32 seq_num;
+ union {
+ u64 l4_config;
+ struct {
+ u32 mkrloc:9;
+ u32 tucmd:11;
+ u32 dtyp:4;
+ u32 adv:8;
+ u32 rsvd:4;
+ u32 idx:4;
+ u32 l4len:8;
+ u32 mss:16;
+ } fields;
+ } l4_setup;
+};
+#endif
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define E1000_SRRCTL_TIMESTAMP 0x40000000
+#define E1000_SRRCTL_DROP_EN 0x80000000
+
+#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
+
+#define E1000_TX_HEAD_WB_ENABLE 0x1
+#define E1000_TX_SEQNUM_WB_ENABLE 0x2
+
+#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
+#define E1000_MRQC_ENABLE_VMDQ 0x00000003
+#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
+#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
+#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002
+
+#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8
+#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \
+ E1000_VMRCTL_MIRROR_PORT_SHIFT)
+#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0)
+#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1)
+#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
+
+#define E1000_EICR_TX_QUEUE ( \
+ E1000_EICR_TX_QUEUE0 | \
+ E1000_EICR_TX_QUEUE1 | \
+ E1000_EICR_TX_QUEUE2 | \
+ E1000_EICR_TX_QUEUE3)
+
+#define E1000_EICR_RX_QUEUE ( \
+ E1000_EICR_RX_QUEUE0 | \
+ E1000_EICR_RX_QUEUE1 | \
+ E1000_EICR_RX_QUEUE2 | \
+ E1000_EICR_RX_QUEUE3)
+
+#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
+#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
+
+#define EIMS_ENABLE_MASK ( \
+ E1000_EIMS_RX_QUEUE | \
+ E1000_EIMS_TX_QUEUE | \
+ E1000_EIMS_TCP_TIMER | \
+ E1000_EIMS_OTHER)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
+#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
+#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
+#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
+#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
+#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
+#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
+#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
+#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /*RSS type, Pkt type*/
+ /* Split Header, header buffer len */
+ __le16 hdr_info;
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F
+#define E1000_RXDADV_RSSTYPE_SHIFT 12
+#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
+#define E1000_RXDADV_SPLITHEADER_EN 0x00001000
+#define E1000_RXDADV_SPH 0x8000
+#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define E1000_RXDADV_ERR_HBO 0x00800000
+
+/* RSS Hash results */
+#define E1000_RXDADV_RSSTYPE_NONE 0x00000000
+#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor */
+#define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0
+#define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00
+#define E1000_RXDADV_PKTTYPE_NONE 0x00000000
+#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
+#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */
+#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */
+#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */
+#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
+#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
+#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
+#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+
+#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
+#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
+#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
+#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
+#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
+#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
+
+/* LinkSec results */
+/* Security Processing bit Indication */
+#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000
+#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
+#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
+#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
+#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
+
+#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000
+#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
+#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
+#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */
+#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */
+#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */
+#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+/* 1st & Last TSO-full iSCSI PDU*/
+#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800
+#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
+#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
+/* IPSec Encrypt Enable for ESP */
+#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
+/* Req requires Markers and CRC */
+#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000
+#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+/* Adv ctxt IPSec SA IDX mask */
+#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF
+/* Adv ctxt IPSec ESP len mask */
+#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF
+
+/* Additional Transmit Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */
+/* Tx Queue Arbitration Priority 0=low, 1=high */
+#define E1000_TXDCTL_PRIORITY 0x08000000
+
+/* Additional Receive Descriptor Control definitions */
+#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
+#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */
+
+/* Direct Cache Access (DCA) definitions */
+#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
+#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
+#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
+#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
+#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
+
+/* Additional interrupt register bit definitions */
+#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */
+#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
+#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
+
+/* ETQF register bit definitions */
+#define E1000_ETQF_FILTER_ENABLE (1 << 26)
+#define E1000_ETQF_IMM_INT (1 << 29)
+#define E1000_ETQF_1588 (1 << 30)
+#define E1000_ETQF_QUEUE_ENABLE (1 << 31)
+/*
+ * ETQF filter list: one static filter per filter consumer. This is
+ * to avoid filter collisions later. Add new filters
+ * here!!
+ *
+ * Current filters:
+ * EAPOL 802.1x (0x888e): Filter 0
+ */
+#define E1000_ETQF_FILTER_EAPOL 0
+
+#define E1000_FTQF_VF_BP 0x00008000
+#define E1000_FTQF_1588_TIME_STAMP 0x08000000
+#define E1000_FTQF_MASK 0xF0000000
+#define E1000_FTQF_MASK_PROTO_BP 0x10000000
+#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
+#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000
+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+
+#define E1000_NVM_APME_82575 0x0400
+#define MAX_NUM_VFS 7
+
+#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */
+#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */
+#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
+#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
+#define E1000_DTXSWC_LLE_SHIFT 16
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+
+/* Easy defines for setting default pool, would normally be left a zero */
+#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
+#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
+
+/* Other useful VMD_CTL register defines */
+#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
+#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
+
+/* Per VM Offload register setup */
+#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
+#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
+#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
+#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
+#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
+#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
+#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
+#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
+#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
+#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */
+#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */
+#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */
+#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
+#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */
+#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */
+
+#define E1000_VLVF_ARRAY_SIZE 32
+#define E1000_VLVF_VLANID_MASK 0x00000FFF
+#define E1000_VLVF_POOLSEL_SHIFT 12
+#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
+#define E1000_VLVF_LVLAN 0x00100000
+#define E1000_VLVF_VLANID_ENABLE 0x80000000
+
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
+#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+#define E1000_IOVCTL 0x05BBC
+#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+
+#define E1000_RPLOLR_STRVLAN 0x40000000
+#define E1000_RPLOLR_STRCRC 0x80000000
+
+#define E1000_TCTL_EXT_COLD 0x000FFC00
+#define E1000_TCTL_EXT_COLD_SHIFT 10
+
+#define E1000_DTXCTL_8023LL 0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN 0x0020
+#define E1000_DTXCTL_SPOOF_INT 0x0040
+
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
+
+#define ALL_QUEUES 0xFFFF
+
+/* Rx packet buffer size defines */
+#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
+s32 e1000_init_hw_82575(struct e1000_hw *hw);
+
+enum e1000_promisc_type {
+ e1000_promisc_disabled = 0, /* all promisc modes disabled */
+ e1000_promisc_unicast = 1, /* unicast promiscuous enabled */
+ e1000_promisc_multicast = 2, /* multicast promiscuous enabled */
+ e1000_promisc_enabled = 3, /* both uni and multicast promisc */
+ e1000_num_promisc_types
+};
+
+void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
+void e1000_rlpml_set_vf(struct e1000_hw *, u16);
+s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type);
+void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
+u16 e1000_rxpbs_adjust_82580(u32 data);
+s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
+s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M);
+s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M);
+s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
+s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw);
+s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw);
+
+/* I2C SDA and SCL timing parameters for standard mode */
+#define E1000_I2C_T_HD_STA 4
+#define E1000_I2C_T_LOW 5
+#define E1000_I2C_T_HIGH 4
+#define E1000_I2C_T_SU_STA 5
+#define E1000_I2C_T_HD_DATA 5
+#define E1000_I2C_T_SU_DATA 1
+#define E1000_I2C_T_RISE 1
+#define E1000_I2C_T_FALL 1
+#define E1000_I2C_T_SU_STO 4
+#define E1000_I2C_T_BUF 5
+
+s32 e1000_set_i2c_bb(struct e1000_hw *hw);
+s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+void e1000_i2c_bus_clear(struct e1000_hw *hw);
+#endif /* _E1000_82575_H_ */
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_api.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_api.c
new file mode 100644
index 00000000..f7cf83b6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_api.c
@@ -0,0 +1,1382 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_api.h"
+
+/**
+ * e1000_init_mac_params - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the MAC
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mac_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->mac.ops.init_params) {
+ ret_val = hw->mac.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("MAC Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("mac.init_mac_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the NVM
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_nvm_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->nvm.ops.init_params) {
+ ret_val = hw->nvm.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("NVM Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("nvm.init_nvm_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_phy_params - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the PHY
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_phy_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->phy.ops.init_params) {
+ ret_val = hw->phy.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("PHY Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("phy.init_phy_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_params - Initialize mailbox function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the PHY
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mbx_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->mbx.ops.init_params) {
+ ret_val = hw->mbx.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("Mailbox Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("mbx.init_mbx_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * device ID stored in the hw structure.
+ * MUST BE FIRST FUNCTION CALLED (explicitly or through
+ * e1000_setup_init_funcs()).
+ **/
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_set_mac_type");
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82542:
+ mac->type = e1000_82542;
+ break;
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82543GC_COPPER:
+ mac->type = e1000_82543;
+ break;
+ case E1000_DEV_ID_82544EI_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82544GC_COPPER:
+ case E1000_DEV_ID_82544GC_LOM:
+ mac->type = e1000_82544;
+ break;
+ case E1000_DEV_ID_82540EM:
+ case E1000_DEV_ID_82540EM_LOM:
+ case E1000_DEV_ID_82540EP:
+ case E1000_DEV_ID_82540EP_LOM:
+ case E1000_DEV_ID_82540EP_LP:
+ mac->type = e1000_82540;
+ break;
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ mac->type = e1000_82545;
+ break;
+ case E1000_DEV_ID_82545GM_COPPER:
+ case E1000_DEV_ID_82545GM_FIBER:
+ case E1000_DEV_ID_82545GM_SERDES:
+ mac->type = e1000_82545_rev_3;
+ break;
+ case E1000_DEV_ID_82546EB_COPPER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ mac->type = e1000_82546;
+ break;
+ case E1000_DEV_ID_82546GB_COPPER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82546GB_SERDES:
+ case E1000_DEV_ID_82546GB_PCIE:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ mac->type = e1000_82546_rev_3;
+ break;
+ case E1000_DEV_ID_82541EI:
+ case E1000_DEV_ID_82541EI_MOBILE:
+ case E1000_DEV_ID_82541ER_LOM:
+ mac->type = e1000_82541;
+ break;
+ case E1000_DEV_ID_82541ER:
+ case E1000_DEV_ID_82541GI:
+ case E1000_DEV_ID_82541GI_LF:
+ case E1000_DEV_ID_82541GI_MOBILE:
+ mac->type = e1000_82541_rev_2;
+ break;
+ case E1000_DEV_ID_82547EI:
+ case E1000_DEV_ID_82547EI_MOBILE:
+ mac->type = e1000_82547;
+ break;
+ case E1000_DEV_ID_82547GI:
+ mac->type = e1000_82547_rev_2;
+ break;
+ case E1000_DEV_ID_82571EB_COPPER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES_DUAL:
+ case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+ mac->type = e1000_82571;
+ break;
+ case E1000_DEV_ID_82572EI:
+ case E1000_DEV_ID_82572EI_COPPER:
+ case E1000_DEV_ID_82572EI_FIBER:
+ case E1000_DEV_ID_82572EI_SERDES:
+ mac->type = e1000_82572;
+ break;
+ case E1000_DEV_ID_82573E:
+ case E1000_DEV_ID_82573E_IAMT:
+ case E1000_DEV_ID_82573L:
+ mac->type = e1000_82573;
+ break;
+ case E1000_DEV_ID_82574L:
+ case E1000_DEV_ID_82574LA:
+ mac->type = e1000_82574;
+ break;
+ case E1000_DEV_ID_82583V:
+ mac->type = e1000_82583;
+ break;
+ case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
+ case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+ case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
+ case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
+ mac->type = e1000_80003es2lan;
+ break;
+ case E1000_DEV_ID_ICH8_IFE:
+ case E1000_DEV_ID_ICH8_IFE_GT:
+ case E1000_DEV_ID_ICH8_IFE_G:
+ case E1000_DEV_ID_ICH8_IGP_M:
+ case E1000_DEV_ID_ICH8_IGP_M_AMT:
+ case E1000_DEV_ID_ICH8_IGP_AMT:
+ case E1000_DEV_ID_ICH8_IGP_C:
+ case E1000_DEV_ID_ICH8_82567V_3:
+ mac->type = e1000_ich8lan;
+ break;
+ case E1000_DEV_ID_ICH9_IFE:
+ case E1000_DEV_ID_ICH9_IFE_GT:
+ case E1000_DEV_ID_ICH9_IFE_G:
+ case E1000_DEV_ID_ICH9_IGP_M:
+ case E1000_DEV_ID_ICH9_IGP_M_AMT:
+ case E1000_DEV_ID_ICH9_IGP_M_V:
+ case E1000_DEV_ID_ICH9_IGP_AMT:
+ case E1000_DEV_ID_ICH9_BM:
+ case E1000_DEV_ID_ICH9_IGP_C:
+ case E1000_DEV_ID_ICH10_R_BM_LM:
+ case E1000_DEV_ID_ICH10_R_BM_LF:
+ case E1000_DEV_ID_ICH10_R_BM_V:
+ mac->type = e1000_ich9lan;
+ break;
+ case E1000_DEV_ID_ICH10_D_BM_LM:
+ case E1000_DEV_ID_ICH10_D_BM_LF:
+ case E1000_DEV_ID_ICH10_D_BM_V:
+ mac->type = e1000_ich10lan;
+ break;
+ case E1000_DEV_ID_PCH_D_HV_DM:
+ case E1000_DEV_ID_PCH_D_HV_DC:
+ case E1000_DEV_ID_PCH_M_HV_LM:
+ case E1000_DEV_ID_PCH_M_HV_LC:
+ mac->type = e1000_pchlan;
+ break;
+ case E1000_DEV_ID_PCH2_LV_LM:
+ case E1000_DEV_ID_PCH2_LV_V:
+ mac->type = e1000_pch2lan;
+ break;
+ case E1000_DEV_ID_PCH_LPT_I217_LM:
+ case E1000_DEV_ID_PCH_LPT_I217_V:
+ case E1000_DEV_ID_PCH_LPTLP_I218_LM:
+ case E1000_DEV_ID_PCH_LPTLP_I218_V:
+ case E1000_DEV_ID_PCH_I218_LM2:
+ case E1000_DEV_ID_PCH_I218_V2:
+ case E1000_DEV_ID_PCH_I218_LM3:
+ case E1000_DEV_ID_PCH_I218_V3:
+ mac->type = e1000_pch_lpt;
+ break;
+ case E1000_DEV_ID_PCH_SPT_I219_LM:
+ case E1000_DEV_ID_PCH_SPT_I219_V:
+ case E1000_DEV_ID_PCH_SPT_I219_LM2:
+ case E1000_DEV_ID_PCH_SPT_I219_V2:
+ case E1000_DEV_ID_PCH_LBG_I219_LM3:
+ case E1000_DEV_ID_PCH_SPT_I219_LM4:
+ case E1000_DEV_ID_PCH_SPT_I219_V4:
+ case E1000_DEV_ID_PCH_SPT_I219_LM5:
+ case E1000_DEV_ID_PCH_SPT_I219_V5:
+ mac->type = e1000_pch_spt;
+ break;
+ case E1000_DEV_ID_PCH_CNP_I219_LM6:
+ case E1000_DEV_ID_PCH_CNP_I219_V6:
+ case E1000_DEV_ID_PCH_CNP_I219_LM7:
+ case E1000_DEV_ID_PCH_CNP_I219_V7:
+ mac->type = e1000_pch_cnp;
+ break;
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ mac->type = e1000_82575;
+ break;
+ case E1000_DEV_ID_82576:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ case E1000_DEV_ID_82576_NS:
+ case E1000_DEV_ID_82576_NS_SERDES:
+ case E1000_DEV_ID_82576_SERDES_QUAD:
+ mac->type = e1000_82576;
+ break;
+ case E1000_DEV_ID_82580_COPPER:
+ case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_SERDES:
+ case E1000_DEV_ID_82580_SGMII:
+ case E1000_DEV_ID_82580_COPPER_DUAL:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ mac->type = e1000_82580;
+ break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ case E1000_DEV_ID_I350_DA4:
+ mac->type = e1000_i350;
+ break;
+ case E1000_DEV_ID_I210_COPPER_FLASHLESS:
+ case E1000_DEV_ID_I210_SERDES_FLASHLESS:
+ case E1000_DEV_ID_I210_COPPER:
+ case E1000_DEV_ID_I210_COPPER_OEM1:
+ case E1000_DEV_ID_I210_COPPER_IT:
+ case E1000_DEV_ID_I210_FIBER:
+ case E1000_DEV_ID_I210_SERDES:
+ case E1000_DEV_ID_I210_SGMII:
+ mac->type = e1000_i210;
+ break;
+ case E1000_DEV_ID_I211_COPPER:
+ mac->type = e1000_i211;
+ break;
+ case E1000_DEV_ID_82576_VF:
+ case E1000_DEV_ID_82576_VF_HV:
+ mac->type = e1000_vfadapt;
+ break;
+ case E1000_DEV_ID_I350_VF:
+ case E1000_DEV_ID_I350_VF_HV:
+ mac->type = e1000_vfadapt_i350;
+ break;
+
+ case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
+ case E1000_DEV_ID_I354_SGMII:
+ case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
+ mac->type = e1000_i354;
+ break;
+ default:
+ /* Should never have loaded on this device */
+ ret_val = -E1000_ERR_MAC_INIT;
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_init_funcs - Initializes function pointers
+ * @hw: pointer to the HW structure
+ * @init_device: true will initialize the rest of the function pointers
+ * getting the device ready for use. false will only set
+ * MAC type and the function pointers for the other init
+ * functions. Passing false will not generate any hardware
+ * reads or writes.
+ *
+ * This function must be called by a driver in order to use the rest
+ * of the 'shared' code files. Called by drivers only.
+ **/
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
+{
+ s32 ret_val;
+
+ /* Can't do much good without knowing the MAC type. */
+ ret_val = e1000_set_mac_type(hw);
+ if (ret_val) {
+ DEBUGOUT("ERROR: MAC type could not be set properly.\n");
+ goto out;
+ }
+
+ if (!hw->hw_addr) {
+ DEBUGOUT("ERROR: Registers not mapped\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ /*
+ * Init function pointers to generic implementations. We do this first
+ * allowing a driver module to override it afterward.
+ */
+ e1000_init_mac_ops_generic(hw);
+ e1000_init_phy_ops_generic(hw);
+ e1000_init_nvm_ops_generic(hw);
+ e1000_init_mbx_ops_generic(hw);
+
+ /*
+ * Set up the init function pointers. These are functions within the
+ * adapter family file that sets up function pointers for the rest of
+ * the functions in that family.
+ */
+ switch (hw->mac.type) {
+ case e1000_82542:
+ e1000_init_function_pointers_82542(hw);
+ break;
+ case e1000_82543:
+ case e1000_82544:
+ e1000_init_function_pointers_82543(hw);
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ e1000_init_function_pointers_82540(hw);
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ e1000_init_function_pointers_82541(hw);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ e1000_init_function_pointers_82571(hw);
+ break;
+ case e1000_80003es2lan:
+ e1000_init_function_pointers_80003es2lan(hw);
+ break;
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
+ e1000_init_function_pointers_ich8lan(hw);
+ break;
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ e1000_init_function_pointers_82575(hw);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ e1000_init_function_pointers_i210(hw);
+ break;
+ case e1000_vfadapt:
+ e1000_init_function_pointers_vf(hw);
+ break;
+ case e1000_vfadapt_i350:
+ e1000_init_function_pointers_vf(hw);
+ break;
+ default:
+ DEBUGOUT("Hardware not supported\n");
+ ret_val = -E1000_ERR_CONFIG;
+ break;
+ }
+
+ /*
+ * Initialize the rest of the function pointers. These require some
+ * register reads/writes in some cases.
+ */
+ if (!(ret_val) && init_device) {
+ ret_val = e1000_init_mac_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_init_nvm_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_init_phy_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_init_mbx_params(hw);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_bus_info - Obtain bus information for adapter
+ * @hw: pointer to the HW structure
+ *
+ * This will obtain information about the HW bus for which the
+ * adapter is attached and stores it in the hw structure. This is a
+ * function pointer entry point called by drivers.
+ **/
+s32 e1000_get_bus_info(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.get_bus_info)
+ return hw->mac.ops.get_bus_info(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clear_vfta - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * This clears the VLAN filter table on the adapter. This is a function
+ * pointer entry point called by drivers.
+ **/
+void e1000_clear_vfta(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.clear_vfta)
+ hw->mac.ops.clear_vfta(hw);
+}
+
+/**
+ * e1000_write_vfta - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: the 32-bit offset in which to write the value to.
+ * @value: the 32-bit value to write at location offset.
+ *
+ * This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ * table. This is a function pointer entry point called by drivers.
+ **/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ if (hw->mac.ops.write_vfta)
+ hw->mac.ops.write_vfta(hw, offset, value);
+}
+
+/**
+ * e1000_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates the Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count)
+{
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
+ mc_addr_count);
+}
+
+/**
+ * e1000_force_mac_fc - Force MAC flow control
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Currently no func pointer exists
+ * and all implementations are handled in the generic version of this
+ * function.
+ **/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+ return e1000_force_mac_fc_generic(hw);
+}
+
+/**
+ * e1000_check_for_link - Check/Store link connection
+ * @hw: pointer to the HW structure
+ *
+ * This checks the link condition of the adapter and stores the
+ * results in the hw->mac structure. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.check_for_link)
+ return hw->mac.ops.check_for_link(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_check_mng_mode - Check management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has manageability enabled.
+ * This is a function pointer entry point called by drivers.
+ **/
+bool e1000_check_mng_mode(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.check_mng_mode)
+ return hw->mac.ops.check_mng_mode(hw);
+
+ return false;
+}
+
+/**
+ * e1000_mng_write_dhcp_info - Writes DHCP info to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface
+ * @length: size of the buffer
+ *
+ * Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+ return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
+}
+
+/**
+ * e1000_reset_hw - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.reset_hw)
+ return hw->mac.ops.reset_hw(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_init_hw - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation. This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.init_hw)
+ return hw->mac.ops.init_hw(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_setup_link - Configures link and flow control
+ * @hw: pointer to the HW structure
+ *
+ * This configures link and flow control settings for the adapter. This
+ * is a function pointer entry point called by drivers. While modules can
+ * also call this, they probably call their own version of this function.
+ **/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.setup_link)
+ return hw->mac.ops.setup_link(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_get_speed_and_duplex - Returns current speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to a 16-bit value to store the speed
+ * @duplex: pointer to a 16-bit value to store the duplex.
+ *
+ * This returns the speed and duplex of the adapter in the two 'out'
+ * variables passed in. This is a function pointer entry point called
+ * by drivers.
+ **/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+ if (hw->mac.ops.get_link_up_info)
+ return hw->mac.ops.get_link_up_info(hw, speed, duplex);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_setup_led - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.setup_led)
+ return hw->mac.ops.setup_led(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cleanup_led - Restores SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This restores the SW controllable LED to the value saved off by
+ * e1000_setup_led. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.cleanup_led)
+ return hw->mac.ops.cleanup_led(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_blink_led - Blink SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This starts the adapter LED blinking. Request the LED to be setup first
+ * and cleaned up after. This is a function pointer entry point called by
+ * drivers.
+ **/
+s32 e1000_blink_led(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.blink_led)
+ return hw->mac.ops.blink_led(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_id_led_init - store LED configurations in SW
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the LED config in SW. This is a function pointer entry point
+ * called by drivers.
+ **/
+s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.id_led_init)
+ return hw->mac.ops.id_led_init(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_on - Turn on SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED on. This is a function pointer entry point
+ * called by drivers.
+ **/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.led_on)
+ return hw->mac.ops.led_on(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off - Turn off SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED off. This is a function pointer entry point
+ * called by drivers.
+ **/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.led_off)
+ return hw->mac.ops.led_off(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_adaptive - Reset adaptive IFS
+ * @hw: pointer to the HW structure
+ *
+ * Resets the adaptive IFS. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+ e1000_reset_adaptive_generic(hw);
+}
+
+/**
+ * e1000_update_adaptive - Update adaptive IFS
+ * @hw: pointer to the HW structure
+ *
+ * Updates adapter IFS. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+ e1000_update_adaptive_generic(hw);
+}
+
+/**
+ * e1000_disable_pcie_master - Disable PCI-Express master access
+ * @hw: pointer to the HW structure
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests. Currently no func pointer exists and all implementations are
+ * handled in the generic version of this function.
+ **/
+s32 e1000_disable_pcie_master(struct e1000_hw *hw)
+{
+ return e1000_disable_pcie_master_generic(hw);
+}
+
+/**
+ * e1000_config_collision_dist - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.config_collision_dist)
+ hw->mac.ops.config_collision_dist(hw);
+}
+
+/**
+ * e1000_rar_set - Sets a receive address register
+ * @hw: pointer to the HW structure
+ * @addr: address to set the RAR to
+ * @index: the RAR to set
+ *
+ * Sets a Receive Address Register (RAR) to the specified address.
+ **/
+int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ if (hw->mac.ops.rar_set)
+ return hw->mac.ops.rar_set(hw, addr, index);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
+ * @hw: pointer to the HW structure
+ *
+ * Ensures that the MDI/MDIX SW state is valid.
+ **/
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.validate_mdi_setting)
+ return hw->mac.ops.validate_mdi_setting(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_hash_mc_addr - Determines address location in multicast table
+ * @hw: pointer to the HW structure
+ * @mc_addr: Multicast address to hash.
+ *
+ * This hashes an address to determine its location in the multicast
+ * table. Currently no func pointer exists and all implementations
+ * are handled in the generic version of this function.
+ **/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+ return e1000_hash_mc_addr_generic(hw, mc_addr);
+}
+
+/**
+ * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
+ * @hw: pointer to the HW structure
+ *
+ * Enables packet filtering on transmit packets if manageability is enabled
+ * and host interface is enabled.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+ return e1000_enable_tx_pkt_filtering_generic(hw);
+}
+
+/**
+ * e1000_mng_host_if_write - Writes to the manageability host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface buffer
+ * @length: size of the buffer
+ * @offset: location in the buffer to write to
+ * @sum: sum of the data (not checksum)
+ *
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient
+ * way. Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum)
+{
+ return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum);
+}
+
+/**
+ * e1000_mng_write_cmd_header - Writes manageability command header
+ * @hw: pointer to the HW structure
+ * @hdr: pointer to the host interface command header
+ *
+ * Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ return e1000_mng_write_cmd_header_generic(hw, hdr);
+}
+
+/**
+ * e1000_mng_enable_host_if - Checks host interface is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ * This function checks whether the HOST IF is enabled for command operation
+ * and also checks whether the previous command is completed. It busy waits
+ * in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+ return e1000_mng_enable_host_if_generic(hw);
+}
+
+/**
+ * e1000_check_reset_block - Verifies PHY can be reset
+ * @hw: pointer to the HW structure
+ *
+ * Checks if the PHY is in a state that can be reset or if manageability
+ * has it tied up. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.check_reset_block)
+ return hw->phy.ops.check_reset_block(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_phy_reg - Reads PHY register
+ * @hw: pointer to the HW structure
+ * @offset: the register to read
+ * @data: the buffer to store the 16-bit read.
+ *
+ * Reads the PHY register and returns the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ if (hw->phy.ops.read_reg)
+ return hw->phy.ops.read_reg(hw, offset, data);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg - Writes PHY register
+ * @hw: pointer to the HW structure
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes the PHY register at offset with the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ if (hw->phy.ops.write_reg)
+ return hw->phy.ops.write_reg(hw, offset, data);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_release_phy - Generic release PHY
+ * @hw: pointer to the HW structure
+ *
+ * Return if silicon family does not require a semaphore when accessing the
+ * PHY.
+ **/
+void e1000_release_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.release)
+ hw->phy.ops.release(hw);
+}
+
+/**
+ * e1000_acquire_phy - Generic acquire PHY
+ * @hw: pointer to the HW structure
+ *
+ * Return success if silicon family does not require a semaphore when
+ * accessing the PHY.
+ **/
+s32 e1000_acquire_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.acquire)
+ return hw->phy.ops.acquire(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cfg_on_link_up - Configure PHY upon link up
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_cfg_on_link_up(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.cfg_on_link_up)
+ return hw->phy.ops.cfg_on_link_up(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_kmrn_reg - Reads register using Kumeran interface
+ * @hw: pointer to the HW structure
+ * @offset: the register to read
+ * @data: the location to store the 16-bit value read.
+ *
+ * Reads a register out of the Kumeran interface. Currently no func pointer
+ * exists and all implementations are handled in the generic version of
+ * this function.
+ **/
+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return e1000_read_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ * e1000_write_kmrn_reg - Writes register using Kumeran interface
+ * @hw: pointer to the HW structure
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes a register to the Kumeran interface. Currently no func pointer
+ * exists and all implementations are handled in the generic version of
+ * this function.
+ **/
+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return e1000_write_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ * e1000_get_cable_length - Retrieves cable length estimation
+ * @hw: pointer to the HW structure
+ *
+ * This function estimates the cable length and stores them in
+ * hw->phy.min_length and hw->phy.max_length. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.get_cable_length)
+ return hw->phy.ops.get_cable_length(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_info - Retrieves PHY information from registers
+ * @hw: pointer to the HW structure
+ *
+ * This function gets some information from various PHY registers and
+ * populates hw->phy values with it. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.get_info)
+ return hw->phy.ops.get_info(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_hw_reset - Hard PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Performs a hard PHY reset. This is a function pointer entry point called
+ * by drivers.
+ **/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.reset)
+ return hw->phy.ops.reset(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_commit - Soft PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Performs a soft PHY reset on those that apply. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000_phy_commit(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.commit)
+ return hw->phy.ops.commit(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d0_lplu_state - Sets low power link up state for D0
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D0
+ * and SmartSpeed is disabled when active is true, else clear lplu for D0
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+ if (hw->phy.ops.set_d0_lplu_state)
+ return hw->phy.ops.set_d0_lplu_state(hw, active);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d3_lplu_state - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+ if (hw->phy.ops.set_d3_lplu_state)
+ return hw->phy.ops.set_d3_lplu_state(hw, active);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr - Reads MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MAC address out of the adapter and stores it in the HW structure.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.read_mac_addr)
+ return hw->mac.ops.read_mac_addr(hw);
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_read_pba_string - Read device part number string
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ return e1000_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ * e1000_read_pba_length - Read device part number string length
+ * @hw: pointer to the HW structure
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number length from the EEPROM and
+ * stores the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
+{
+ return e1000_read_pba_length_generic(hw, pba_num_size);
+}
+
+/**
+ * e1000_read_pba_num - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
+{
+ return e1000_read_pba_num_generic(hw, pba_num);
+}
+
+/**
+ * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
+ * @hw: pointer to the HW structure
+ *
+ * Validates the NVM checksum is correct. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+ if (hw->nvm.ops.validate)
+ return hw->nvm.ops.validate(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the NVM checksum. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
+{
+ if (hw->nvm.ops.update)
+ return hw->nvm.ops.update(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_reload_nvm - Reloads EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
+ **/
+void e1000_reload_nvm(struct e1000_hw *hw)
+{
+ if (hw->nvm.ops.reload)
+ hw->nvm.ops.reload(hw);
+}
+
+/**
+ * e1000_read_nvm - Reads NVM (EEPROM)
+ * @hw: pointer to the HW structure
+ * @offset: the word offset to read
+ * @words: number of 16-bit words to read
+ * @data: pointer to the properly sized buffer for the data.
+ *
+ * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ if (hw->nvm.ops.read)
+ return hw->nvm.ops.read(hw, offset, words, data);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_write_nvm - Writes to NVM (EEPROM)
+ * @hw: pointer to the HW structure
+ * @offset: the word offset to read
+ * @words: number of 16-bit words to write
+ * @data: pointer to the properly sized buffer for the data.
+ *
+ * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ if (hw->nvm.ops.write)
+ return hw->nvm.ops.write(hw, offset, words, data);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_8bit_ctrl_reg - Writes 8bit Control register
+ * @hw: pointer to the HW structure
+ * @reg: 32bit register offset
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes the PHY register at offset with the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
+ u8 data)
+{
+ return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
+}
+
+/**
+ * e1000_power_up_phy - Restores link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_up_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.power_up)
+ hw->phy.ops.power_up(hw);
+
+ e1000_setup_link(hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down PHY
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_down_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.power_down)
+ hw->phy.ops.power_down(hw);
+}
+
+/**
+ * e1000_power_up_fiber_serdes_link - Power up serdes link
+ * @hw: pointer to the HW structure
+ *
+ * Power on the optics and PCS.
+ **/
+void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.power_up_serdes)
+ hw->mac.ops.power_up_serdes(hw);
+}
+
+/**
+ * e1000_shutdown_fiber_serdes_link - Remove link during power down
+ * @hw: pointer to the HW structure
+ *
+ * Shutdown the optics and PCS on driver unload.
+ **/
+void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.shutdown_serdes)
+ hw->mac.ops.shutdown_serdes(hw);
+}
+
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_api.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_api.h
new file mode 100644
index 00000000..0bc471d9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_api.h
@@ -0,0 +1,167 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_API_H_
+#define _E1000_API_H_
+
+#include "e1000_hw.h"
+
+extern void e1000_init_function_pointers_82542(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_82543(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_82540(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_82571(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_82541(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
+extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
+extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_i210(struct e1000_hw *hw);
+
+s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr);
+s32 e1000_set_mac_type(struct e1000_hw *hw);
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
+s32 e1000_init_mac_params(struct e1000_hw *hw);
+s32 e1000_init_nvm_params(struct e1000_hw *hw);
+s32 e1000_init_phy_params(struct e1000_hw *hw);
+s32 e1000_init_mbx_params(struct e1000_hw *hw);
+s32 e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_clear_vfta(struct e1000_hw *hw);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+s32 e1000_force_mac_fc(struct e1000_hw *hw);
+s32 e1000_check_for_link(struct e1000_hw *hw);
+s32 e1000_reset_hw(struct e1000_hw *hw);
+s32 e1000_init_hw(struct e1000_hw *hw);
+s32 e1000_setup_link(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+s32 e1000_disable_pcie_master(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count);
+s32 e1000_setup_led(struct e1000_hw *hw);
+s32 e1000_cleanup_led(struct e1000_hw *hw);
+s32 e1000_check_reset_block(struct e1000_hw *hw);
+s32 e1000_blink_led(struct e1000_hw *hw);
+s32 e1000_led_on(struct e1000_hw *hw);
+s32 e1000_led_off(struct e1000_hw *hw);
+s32 e1000_id_led_init(struct e1000_hw *hw);
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+s32 e1000_get_cable_length(struct e1000_hw *hw);
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
+ u8 data);
+s32 e1000_get_phy_info(struct e1000_hw *hw);
+void e1000_release_phy(struct e1000_hw *hw);
+s32 e1000_acquire_phy(struct e1000_hw *hw);
+s32 e1000_cfg_on_link_up(struct e1000_hw *hw);
+s32 e1000_phy_hw_reset(struct e1000_hw *hw);
+s32 e1000_phy_commit(struct e1000_hw *hw);
+void e1000_power_up_phy(struct e1000_hw *hw);
+void e1000_power_down_phy(struct e1000_hw *hw);
+s32 e1000_read_mac_addr(struct e1000_hw *hw);
+s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num);
+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size);
+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
+void e1000_reload_nvm(struct e1000_hw *hw);
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
+s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum);
+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr);
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+u32 e1000_translate_register_82542(u32 reg);
+
+
+
+/*
+ * TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ * a = a pointer to struct e1000_hw
+ * status = the 8 bit status field of the Rx descriptor with EOP set
+ * errors = the 8 bit error field of the Rx descriptor with EOP set
+ * length = the sum of all the length fields of the Rx descriptors that
+ * make up the current frame
+ * last_byte = the last byte of the frame DMAed by the hardware
+ * min_frame_size = the minimum frame length we want to accept.
+ * max_frame_size = the maximum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ * ...
+ * if (TBI_ACCEPT) {
+ * accept_frame = true;
+ * e1000_tbi_adjust_stats(adapter, MacAddress);
+ * frame_length--;
+ * } else {
+ * accept_frame = false;
+ * }
+ * ...
+ */
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION 0x0F
+
+#define TBI_ACCEPT(a, status, errors, length, last_byte, \
+ min_frame_size, max_frame_size) \
+ (e1000_tbi_sbp_enabled_82543(a) && \
+ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+ ((last_byte) == CARRIER_EXTENSION) && \
+ (((status) & E1000_RXD_STAT_VP) ? \
+ (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \
+ ((length) <= ((max_frame_size) + 1))) : \
+ (((length) > (min_frame_size)) && \
+ ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1)))))
+
+#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b))
+#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */
+#endif /* _E1000_API_H_ */
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_defines.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_defines.h
new file mode 100644
index 00000000..e2101c17
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_defines.h
@@ -0,0 +1,1514 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC E1000_WUFC_LNKC
+#define E1000_WUS_MAG E1000_WUFC_MAG
+#define E1000_WUS_EX E1000_WUFC_EX
+#define E1000_WUS_MC E1000_WUFC_MC
+#define E1000_WUS_BC E1000_WUFC_BC
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */
+/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */
+#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */
+#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+/* Physical Func Reset Done Indication */
+#define E1000_CTRL_EXT_PFRSTD 0x00004000
+#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+/* Offset of the link mode field in Ctrl Ext register */
+#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_EIAME 0x01000000
+#define E1000_CTRL_EXT_IRCA 0x00000001
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
+#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_LSECCK 0x00001000
+#define E1000_CTRL_EXT_PHYPDEN 0x00100000
+#define E1000_I2CCMD_REG_ADDR_SHIFT 16
+#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
+#define E1000_I2CCMD_OPCODE_READ 0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
+#define E1000_I2CCMD_READY 0x20000000
+#define E1000_I2CCMD_ERROR 0x80000000
+#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a))
+#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a))
+#define E1000_MAX_SGMII_PHY_REG_ADDR 255
+#define E1000_I2CCMD_PHY_TIMEOUT 200
+#define E1000_IVAR_VALID 0x80
+#define E1000_GPIE_NSICR 0x00000001
+#define E1000_GPIE_MSIX_MODE 0x00000010
+#define E1000_GPIE_EIAME 0x40000000
+#define E1000_GPIE_PBA 0x80000000
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
+#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+
+#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
+#define E1000_RXDEXT_STATERR_LB 0x00040000
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_TCPE 0x20000000
+#define E1000_RXDEXT_STATERR_IPE 0x40000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
+
+#if !defined(EXTERNAL_RELEASE) || defined(E1000E_MQ)
+#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001
+#endif /* !EXTERNAL_RELEASE || E1000E_MQ */
+#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST 0x00200000
+
+#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
+
+/* Receive Control */
+#define E1000_RCTL_RST 0x00000001 /* Software reset */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
+#define E1000_RCTL_RDMTS_HEX 0x00010000
+#define E1000_RCTL_RDMTS1_HEX E1000_RCTL_RDMTS_HEX
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
+#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ * E1000_PSRCTL_BSIZE0_MASK) |
+ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ * E1000_PSRCTL_BSIZE1_MASK) |
+ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ * E1000_PSRCTL_BSIZE2_MASK) |
+ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ * E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256], default=256
+ * value1 = [1024..64512], default=4096
+ * value2 = [0..64512], default=4096
+ * value3 = [0..64512], default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM 0x01
+#define E1000_SWFW_PHY0_SM 0x02
+#define E1000_SWFW_PHY1_SM 0x04
+#define E1000_SWFW_CSR_SM 0x08
+#define E1000_SWFW_PHY2_SM 0x20
+#define E1000_SWFW_PHY3_SM 0x40
+#define E1000_SWFW_SW_MNG_SM 0x400
+
+/* Device Control */
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
+#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
+#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
+#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
+
+#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3
+
+#define E1000_CONNSW_ENRGSRC 0x4
+#define E1000_CONNSW_PHYSD 0x400
+#define E1000_CONNSW_PHY_PDN 0x800
+#define E1000_CONNSW_SERDESD 0x200
+#define E1000_CONNSW_AUTOSENSE_CONF 0x2
+#define E1000_CONNSW_AUTOSENSE_EN 0x1
+#define E1000_PCS_CFG_PCS_EN 8
+#define E1000_PCS_LCTL_FLV_LINK_UP 1
+#define E1000_PCS_LCTL_FSV_10 0
+#define E1000_PCS_LCTL_FSV_100 2
+#define E1000_PCS_LCTL_FSV_1000 4
+#define E1000_PCS_LCTL_FDV_FULL 8
+#define E1000_PCS_LCTL_FSD 0x10
+#define E1000_PCS_LCTL_FORCE_LINK 0x20
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+#define E1000_PCS_LCTL_AN_ENABLE 0x10000
+#define E1000_PCS_LCTL_AN_RESTART 0x20000
+#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
+#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
+
+#define E1000_PCS_LSTS_LINK_OK 1
+#define E1000_PCS_LSTS_SPEED_100 2
+#define E1000_PCS_LSTS_SPEED_1000 4
+#define E1000_PCS_LSTS_DUPLEX_FULL 8
+#define E1000_PCS_LSTS_SYNK_OK 0x10
+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
+
+/* Device Status */
+#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */
+#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
+#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
+#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
+#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */
+#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */
+#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
+
+/* Constants used to interpret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus spd 50-66MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus spd 66-100MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus spd 100-133MHz*/
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+#define PHY_FORCE_TIME 20
+
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
+#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_PHY_LED0_MODE_MASK 0x00000007
+#define E1000_PHY_LED0_IVRT 0x00000008
+#define E1000_PHY_LED0_MASK 0x0000001F
+
+#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+#define E1000_LEDCTL_LED0_IVRT 0x00000040
+#define E1000_LEDCTL_LED0_BLINK 0x00000080
+
+#define E1000_LEDCTL_MODE_LINK_UP 0x2
+#define E1000_LEDCTL_MODE_LED_ON 0xE
+#define E1000_LEDCTL_MODE_LED_OFF 0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+
+/* Transmit Control */
+#define E1000_TCTL_EN 0x00000002 /* enable Tx */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
+
+/* Transmit Arbitration Count */
+#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
+#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* Header split receive */
+#define E1000_RFCTL_NFSW_DIS 0x00000040
+#define E1000_RFCTL_NFSR_DIS 0x00000080
+#define E1000_RFCTL_ACK_DIS 0x00001000
+#define E1000_RFCTL_EXTEN 0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+#define E1000_RFCTL_LEF 0x00040000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD 15
+#define E1000_CT_SHIFT 4
+#define E1000_COLLISION_DISTANCE 63
+#define E1000_COLD_SHIFT 12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82542_TIPG_IPGT 10
+#define DEFAULT_82543_TIPG_IPGT_FIBER 9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK 0x000003FF
+
+#define DEFAULT_82542_TIPG_IPGR1 2
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT 10
+
+#define DEFAULT_82542_TIPG_IPGR2 10
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT 20
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+
+#define ETHERNET_FCS_SIZE 4
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+/* The datasheet maximum supported RX size is 9.5KB (9728 bytes) */
+#define MAX_RX_JUMBO_FRAME_SIZE 0x2600
+#define E1000_TX_PTR_GAP 0x1F
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
+
+#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+
+/* Low Power IDLE Control */
+#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */
+
+/* PBA constants */
+#define E1000_PBA_8K 0x0008 /* 8KB */
+#define E1000_PBA_10K 0x000A /* 10KB */
+#define E1000_PBA_12K 0x000C /* 12KB */
+#define E1000_PBA_14K 0x000E /* 14KB */
+#define E1000_PBA_16K 0x0010 /* 16KB */
+#define E1000_PBA_18K 0x0012
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_26K 0x001A
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_35K 0x0023
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030 /* 48KB */
+#define E1000_PBA_64K 0x0040 /* 64KB */
+
+#define E1000_PBA_RXA_MASK 0xFFFF
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+/* Uncorrectable/correctable ECC Error counts and enable bits */
+#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
+#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
+
+#define IFS_MAX 80
+#define IFS_MIN 40
+#define IFS_RATIO 4
+#define IFS_STEP 10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+
+#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXO 0x00000040 /* Rx overrun */
+#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
+#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
+#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW 0x00008000
+#define E1000_ICR_MNG 0x00040000 /* Manageability event */
+#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
+#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
+#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
+/* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_INT_ASSERTED 0x80000000
+#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
+#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
+#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
+#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
+#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
+#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
+#define E1000_ICR_FER 0x00400000 /* Fatal Error */
+
+#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/
+#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */
+
+/* PBA ECC Register */
+#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
+#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */
+#define E1000_PBA_ECC_CORR_EN 0x00000001 /* Enable ECC error correction */
+#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
+#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 on ECC error */
+
+/* Extended Interrupt Cause Read */
+#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
+#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+/* TCP Timer */
+#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */
+#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */
+#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */
+#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */
+#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
+#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
+#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
+#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
+#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
+#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
+#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
+#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
+#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */
+#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */
+#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */
+
+#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/
+#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */
+/* Extended Interrupt Mask Set */
+#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+
+/* Extended Interrupt Cause Set */
+#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
+
+#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
+/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
+#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
+#define E1000_EITR_INTERVAL 0x00007FFC
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+/* Enable the counting of descriptors still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots. However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+#define E1000_RAH_QUEUE_MASK_82575 0x000C0000
+#define E1000_RAH_POOL_1 0x00040000
+
+/* Error Codes */
+#define E1000_SUCCESS 0
+#define E1000_ERR_NVM 1
+#define E1000_ERR_PHY 2
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_PARAM 4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET 9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET 12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_MBX 15
+#define E1000_ERR_INVALID_ARGUMENT 16
+#define E1000_ERR_NO_SPACE 17
+#define E1000_ERR_NVM_PBA_SECTION 18
+#define E1000_ERR_I2C 19
+#define E1000_ERR_INVM_VALUE_NOT_FOUND 20
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define FIBER_LINK_UP_LIMIT 50
+#define COPPER_LINK_UP_LIMIT 10
+#define PHY_AUTO_NEG_LIMIT 45
+#define PHY_FORCE_LIMIT 20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT 800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT 100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT 10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT 10
+
+/* Flow Control */
+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
+#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
+#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
+#define E1000_RXCW_C 0x20000000 /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+
+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+
+/* HH Time Sync */
+#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
+#define E1000_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
+#define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */
+#define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
+
+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+
+#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000
+
+#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000
+
+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
+
+#define E1000_TIMINCA_16NS_SHIFT 24
+#define E1000_TIMINCA_INCPERIOD_SHIFT 24
+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
+
+#define E1000_TSICR_TXTS 0x00000002
+#define E1000_TSIM_TXTS 0x00000002
+/* TUPLE Filtering Configuration */
+#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */
+#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */
+#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */
+/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */
+#define E1000_TTQF_PROTOCOL_TCP 0x0
+/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_UDP 0x1
+/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_SCTP 0x2
+#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */
+#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */
+#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */
+#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */
+#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */
+#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */
+#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
+#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
+
+#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
+#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
+#define E1000_MDICNFG_PHY_MASK 0x03E00000
+#define E1000_MDICNFG_PHY_SHIFT 21
+
+#define E1000_MEDIA_PORT_COPPER 1
+#define E1000_MEDIA_PORT_OTHER 2
+#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3
+#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1 0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
+#define E1000_M88E1112_PAGE_ADDR 0x16
+#define E1000_M88E1112_STATUS 0x01
+
+#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */
+#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */
+#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */
+#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
+#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */
+
+/* I350 EEE defines */
+#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */
+#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */
+#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */
+#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */
+#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */
+/* EEE status */
+#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
+#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */
+#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */
+#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
+#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
+#define E1000_M88E1543_EEE_CTRL_1 0x0
+#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
+#define E1000_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */
+#define E1000_EEE_ADV_DEV_I354 7
+#define E1000_EEE_ADV_ADDR_I354 60
+#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
+#define E1000_PCS_STATUS_DEV_I354 3
+#define E1000_PCS_STATUS_ADDR_I354 1
+#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400
+#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800
+#define E1000_M88E1512_CFG_REG_1 0x0010
+#define E1000_M88E1512_CFG_REG_2 0x0011
+#define E1000_M88E1512_CFG_REG_3 0x0007
+#define E1000_M88E1512_MODE 0x0014
+#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */
+#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
+#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
+/* PCI Express Control */
+#define E1000_GCR_RXD_NO_SNOOP 0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
+#define E1000_GCR_TXD_NO_SNOOP 0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
+#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
+#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define E1000_GCR_CAP_VER2 0x00040000
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
+ E1000_GCR_RXDSCW_NO_SNOOP | \
+ E1000_GCR_RXDSCR_NO_SNOOP | \
+ E1000_GCR_TXD_NO_SNOOP | \
+ E1000_GCR_TXDSCW_NO_SNOOP | \
+ E1000_GCR_TXDSCR_NO_SNOOP)
+
+#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
+
+/* mPHY address control and data registers */
+#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */
+#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
+#define E1000_MPHY_DATA 0x0E10 /* Data Register */
+
+/* AFE CSR Offset for PCS CLK */
+#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004
+/* Override for near end digital loopback. */
+#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */
+#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */
+#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */
+#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
+#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */
+#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+/* 1=Repeater/switch device port 0=DTE device */
+#define CR_1000T_REPEATER_DTE 0x0400
+/* 1=Configure PHY as Master 0=Configure PHY as Slave */
+#define CR_1000T_MS_VALUE 0x0800
+/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
+#define CR_1000T_MS_ENABLE 0x1000
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */
+#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */
+#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Register */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+
+#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
+
+/* NVM Control */
+#define E1000_EECD_SK 0x00000001 /* NVM Clock */
+#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* NVM Data In */
+#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
+#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
+#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */
+#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */
+#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */
+#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_NVM_GRANT_ATTEMPTS
+#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
+#endif
+#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT 11
+#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */
+#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
+#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
+#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
+#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */
+#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
+#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */
+#define E1000_FLUDONE_ATTEMPTS 20000
+#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
+#define E1000_I210_FIFO_SEL_RX 0x00
+#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i))
+#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
+#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
+#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
+
+#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
+/* Secure FLASH mode requires removing MSb */
+#define E1000_I210_FW_PTR_MASK 0x7FFF
+/* Firmware code revision field word offset*/
+#define E1000_I210_FW_VER_OFFSET 328
+
+#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */
+#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START 1 /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
+#define E1000_FLASH_UPDATES 2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT 0x0003
+#define NVM_ID_LED_SETTINGS 0x0004
+#define NVM_VERSION 0x0005
+#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */
+#define NVM_PHY_CLASS_WORD 0x0007
+#define E1000_I210_NVM_FW_MODULE_PTR 0x0010
+#define E1000_I350_NVM_FW_MODULE_PTR 0x0051
+#define NVM_FUTURE_INIT_WORD1 0x0019
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_ETRACK_HIWORD 0x0043
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+
+/* NVM version defines */
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_IMAGE_ID_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
+#define NVM_ETRACK_VALID 0x8000
+#define NVM_NEW_DEC_MASK 0x0F00
+#define NVM_HEX_CONV 16
+#define NVM_HEX_TENS 10
+
+/* FW version defines */
+/* Offset of "Loader patch ptr" in Firmware Header */
+#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01
+/* Patch generation hour & minutes */
+#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04
+/* Patch generation month & day */
+#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05
+/* Patch generation year */
+#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06
+/* Patch major & minor numbers */
+#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07
+
+#define NVM_MAC_ADDR 0x0000
+#define NVM_SUB_DEV_ID 0x000B
+#define NVM_SUB_VEN_ID 0x000C
+#define NVM_DEV_ID 0x000D
+#define NVM_VEN_ID 0x000E
+#define NVM_INIT_CTRL_2 0x000F
+#define NVM_INIT_CTRL_4 0x0013
+#define NVM_LED_1_CFG 0x001C
+#define NVM_LED_0_2_CFG 0x001F
+
+#define NVM_COMPAT_VALID_CSUM 0x0001
+#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
+
+#define NVM_INIT_CONTROL2_REG 0x000F
+#define NVM_INIT_CONTROL3_PORT_B 0x0014
+#define NVM_INIT_3GIO_3 0x001A
+#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define NVM_INIT_CONTROL3_PORT_A 0x0024
+#define NVM_CFG 0x0012
+#define NVM_ALT_MAC_ADDR_PTR 0x0037
+#define NVM_CHECKSUM_REG 0x003F
+#define NVM_COMPATIBILITY_REG_3 0x0003
+#define NVM_COMPATIBILITY_BIT_MASK 0x8000
+
+#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
+#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
+
+#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0)
+
+/* Mask bits for fields in Word 0x24 of the NVM */
+#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
+#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */
+/* Offset of Link Mode bits for 82575/82576 */
+#define NVM_WORD24_LNK_MODE_OFFSET 8
+/* Offset of Link Mode bits for 82580 up */
+#define NVM_WORD24_82580_LNK_MODE_OFFSET 4
+
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK 0x3000
+#define NVM_WORD0F_PAUSE 0x1000
+#define NVM_WORD0F_ASM_DIR 0x2000
+#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK 0x000C
+
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM 0x0800
+
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH 11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM 0xBABA
+
+/* PBA (printed board assembly) number words */
+#define NVM_PBA_OFFSET_0 8
+#define NVM_PBA_OFFSET_1 9
+#define NVM_PBA_PTR_GUARD 0xFAFA
+#define NVM_RESERVED_WORD 0xFFFF
+#define NVM_PHY_CLASS_A 0x8000
+#define NVM_SERDES_AMPLITUDE_MASK 0x000F
+#define NVM_SIZE_MASK 0x1C00
+#define NVM_SIZE_SHIFT 10
+#define NVM_WORD_SIZE_BASE_SHIFT 6
+#define NVM_SWDPIO_EXT_SHIFT 4
+
+/* NVM Commands - Microwire */
+#define NVM_READ_OPCODE_MICROWIRE 0x6 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_MICROWIRE 0x5 /* NVM write opcode */
+#define NVM_ERASE_OPCODE_MICROWIRE 0x7 /* NVM erase opcode */
+#define NVM_EWEN_OPCODE_MICROWIRE 0x13 /* NVM erase/write enable */
+#define NVM_EWDS_OPCODE_MICROWIRE 0x10 /* NVM erase/write disable */
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI 0x01
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2 0x1
+#define ID_LED_DEF1_ON2 0x2
+#define ID_LED_DEF1_OFF2 0x3
+#define ID_LED_ON1_DEF2 0x4
+#define ID_LED_ON1_ON2 0x5
+#define ID_LED_ON1_OFF2 0x6
+#define ID_LED_OFF1_DEF2 0x7
+#define ID_LED_OFF1_ON2 0x8
+#define ID_LED_OFF1_OFF2 0x9
+
+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE 0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCIX_COMMAND_REGISTER 0xE6
+#define PCIX_STATUS_REGISTER_LO 0xE8
+#define PCIX_STATUS_REGISTER_HI 0xEA
+#define PCI_HEADER_TYPE_REGISTER 0x0E
+#define PCIE_LINK_STATUS 0x12
+#define PCIE_DEVICE_CONTROL2 0x28
+
+#define PCIX_COMMAND_MMRBC_MASK 0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT 0x2
+#define PCIX_STATUS_HI_MMRBC_MASK 0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5
+#define PCIX_STATUS_HI_MMRBC_4K 0x3
+#define PCIX_STATUS_HI_MMRBC_2K 0x2
+#define PCIX_STATUS_LO_FUNC_MASK 0x7
+#define PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define PCIE_LINK_WIDTH_MASK 0x3F0
+#define PCIE_LINK_WIDTH_SHIFT 4
+#define PCIE_LINK_SPEED_MASK 0x0F
+#define PCIE_LINK_SPEED_2500 0x01
+#define PCIE_LINK_SPEED_5000 0x02
+#define PCIE_DEVICE_CONTROL2_16ms 0x0005
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
+
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs.
+ * I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID 0x01410C50
+#define M88E1000_I_PHY_ID 0x01410C30
+#define M88E1011_I_PHY_ID 0x01410C20
+#define IGP01E1000_I_PHY_ID 0x02A80380
+#define M88E1111_I_PHY_ID 0x01410CC0
+#define M88E1543_E_PHY_ID 0x01410EA0
+#define M88E1512_E_PHY_ID 0x01410DD0
+#define M88E1112_E_PHY_ID 0x01410C90
+#define I347AT4_E_PHY_ID 0x01410DC0
+#define M88E1340M_E_PHY_ID 0x01410DF0
+#define GG82563_E_PHY_ID 0x01410CA0
+#define IGP03E1000_E_PHY_ID 0x02A80390
+#define IFE_E_PHY_ID 0x02A80330
+#define IFE_PLUS_E_PHY_ID 0x02A80320
+#define IFE_C_E_PHY_ID 0x02A80310
+#define BME1000_E_PHY_ID 0x01410CB0
+#define BME1000_E_PHY_ID_R2 0x01410CB1
+#define I82577_E_PHY_ID 0x01540050
+#define I82578_E_PHY_ID 0x004DD040
+#define I82579_E_PHY_ID 0x01540090
+#define I217_E_PHY_ID 0x015400A0
+#define I82580_I_PHY_ID 0x015403A0
+#define I350_I_PHY_ID 0x015403B0
+#define I210_I_PHY_ID 0x01410C00
+#define IGP04E1000_E_PHY_ID 0x02A80391
+#define BCM54616_E_PHY_ID 0x03625D10
+#define M88_VENDOR 0x0141
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */
+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */
+#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */
+#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
+/* MDI Crossover Mode bits 6:5 Manual MDI configuration */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T 0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE 0x0060
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+/* 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-110M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define M88E1000_PSSR_CABLE_LENGTH 0x0380
+#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+
+/* Intel I347AT4 Registers */
+#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
+#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
+#define I347AT4_PAGE_SELECT 0x16
+
+/* I347AT4 Extended PHY Specific Control Register */
+
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
+#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
+#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
+#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
+#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
+#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
+#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
+#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
+#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
+#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
+
+/* I347AT4 PHY Cable Diagnostics Control */
+#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
+
+/* M88E1112 only registers */
+#define M88E1112_VCT_DSP_DISTANCE 0x001A
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+
+#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020
+#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C
+
+/* BME1000 PHY Specific Control Register */
+#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT 5
+#define GG82563_REG(page, reg) \
+ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG 30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */
+#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */
+#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */
+
+/* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21)
+
+#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+/* Kumeran Mode Control */
+#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16)
+#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */
+
+/* MDI Control */
+#define E1000_MDIC_REG_MASK 0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK 0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE 0x04000000
+#define E1000_MDIC_OP_READ 0x08000000
+#define E1000_MDIC_READY 0x10000000
+#define E1000_MDIC_ERROR 0x40000000
+#define E1000_MDIC_DEST 0x80000000
+
+/* SerDes Control */
+#define E1000_GEN_CTL_READY 0x80000000
+#define E1000_GEN_CTL_ADDRESS_SHIFT 8
+#define E1000_GEN_POLL_TIMEOUT 640
+
+/* LinkSec register fields */
+#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000
+#define E1000_LSECTXCAP_SUM_SHIFT 16
+#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000
+#define E1000_LSECRXCAP_SUM_SHIFT 16
+
+#define E1000_LSECTXCTRL_EN_MASK 0x00000003
+#define E1000_LSECTXCTRL_DISABLE 0x0
+#define E1000_LSECTXCTRL_AUTH 0x1
+#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2
+#define E1000_LSECTXCTRL_AISCI 0x00000020
+#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
+#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8
+
+#define E1000_LSECRXCTRL_EN_MASK 0x0000000C
+#define E1000_LSECRXCTRL_EN_SHIFT 2
+#define E1000_LSECRXCTRL_DISABLE 0x0
+#define E1000_LSECRXCTRL_CHECK 0x1
+#define E1000_LSECRXCTRL_STRICT 0x2
+#define E1000_LSECRXCTRL_DROP 0x3
+#define E1000_LSECRXCTRL_PLSH 0x00000040
+#define E1000_LSECRXCTRL_RP 0x00000080
+#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33
+
+/* Tx Rate-Scheduler Config fields */
+#define E1000_RTTBCNRC_RS_ENA 0x80000000
+#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define E1000_RTTBCNRC_RF_INT_SHIFT 14
+#define E1000_RTTBCNRC_RF_INT_MASK \
+ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
+/* DMA Coalescing register fields */
+/* DMA Coalescing Watchdog Timer */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF
+/* DMA Coalescing Rx Threshold */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000
+#define E1000_DMACR_DMACTHR_SHIFT 16
+/* Lx when no PCIe transactions */
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000
+#define E1000_DMACR_DMAC_LX_SHIFT 28
+#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
+/* DMA Coalescing BMC-to-OS Watchdog Enable */
+#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
+
+/* DMA Coalescing Transmit Threshold */
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF
+
+#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
+
+/* Rx Traffic Rate Threshold */
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF
+/* Rx packet rate in current window */
+#define E1000_DMCRTRH_LRPRCW 0x80000000
+
+/* DMA Coal Rx Traffic Current Count */
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF
+
+/* Flow ctrl Rx Threshold High val */
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0
+#define E1000_FCRTC_RTH_COAL_SHIFT 4
+/* Lx power decision based on DMA coal */
+#define E1000_PCIEMISC_LX_DECISION 0x00000080
+
+#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
+#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */
+#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */
+#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+
+
+/* Proxy Filter Control */
+#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */
+#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */
+#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */
+#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */
+#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */
+#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */
+#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */
+#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */
+/* Proxy Status */
+#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */
+
+/* Firmware Status */
+#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */
+/* VF Control */
+#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */
+
+#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */
+/* Lan ID bit field offset in status register */
+#define E1000_STATUS_LAN_ID_OFFSET 2
+#define E1000_VFTA_ENTRIES 128
+#ifndef E1000_UNUSEDARG
+#define E1000_UNUSEDARG
+#endif /* E1000_UNUSEDARG */
+#ifndef ERROR_REPORT
+#define ERROR_REPORT(fmt) do { } while (0)
+#endif /* ERROR_REPORT */
+#endif /* _E1000_DEFINES_H_ */
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_hw.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_hw.h
new file mode 100644
index 00000000..d9de9fc1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_hw.h
@@ -0,0 +1,1049 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82542 0x1000
+#define E1000_DEV_ID_82543GC_FIBER 0x1001
+#define E1000_DEV_ID_82543GC_COPPER 0x1004
+#define E1000_DEV_ID_82544EI_COPPER 0x1008
+#define E1000_DEV_ID_82544EI_FIBER 0x1009
+#define E1000_DEV_ID_82544GC_COPPER 0x100C
+#define E1000_DEV_ID_82544GC_LOM 0x100D
+#define E1000_DEV_ID_82540EM 0x100E
+#define E1000_DEV_ID_82540EM_LOM 0x1015
+#define E1000_DEV_ID_82540EP_LOM 0x1016
+#define E1000_DEV_ID_82540EP 0x1017
+#define E1000_DEV_ID_82540EP_LP 0x101E
+#define E1000_DEV_ID_82545EM_COPPER 0x100F
+#define E1000_DEV_ID_82545EM_FIBER 0x1011
+#define E1000_DEV_ID_82545GM_COPPER 0x1026
+#define E1000_DEV_ID_82545GM_FIBER 0x1027
+#define E1000_DEV_ID_82545GM_SERDES 0x1028
+#define E1000_DEV_ID_82546EB_COPPER 0x1010
+#define E1000_DEV_ID_82546EB_FIBER 0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
+#define E1000_DEV_ID_82546GB_COPPER 0x1079
+#define E1000_DEV_ID_82546GB_FIBER 0x107A
+#define E1000_DEV_ID_82546GB_SERDES 0x107B
+#define E1000_DEV_ID_82546GB_PCIE 0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_82541EI 0x1013
+#define E1000_DEV_ID_82541EI_MOBILE 0x1018
+#define E1000_DEV_ID_82541ER_LOM 0x1014
+#define E1000_DEV_ID_82541ER 0x1078
+#define E1000_DEV_ID_82541GI 0x1076
+#define E1000_DEV_ID_82541GI_LF 0x107C
+#define E1000_DEV_ID_82541GI_MOBILE 0x1077
+#define E1000_DEV_ID_82547EI 0x1019
+#define E1000_DEV_ID_82547EI_MOBILE 0x101A
+#define E1000_DEV_ID_82547GI 0x1075
+#define E1000_DEV_ID_82571EB_COPPER 0x105E
+#define E1000_DEV_ID_82571EB_FIBER 0x105F
+#define E1000_DEV_ID_82571EB_SERDES 0x1060
+#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC
+#define E1000_DEV_ID_82572EI_COPPER 0x107D
+#define E1000_DEV_ID_82572EI_FIBER 0x107E
+#define E1000_DEV_ID_82572EI_SERDES 0x107F
+#define E1000_DEV_ID_82572EI 0x10B9
+#define E1000_DEV_ID_82573E 0x108B
+#define E1000_DEV_ID_82573E_IAMT 0x108C
+#define E1000_DEV_ID_82573L 0x109A
+#define E1000_DEV_ID_82574L 0x10D3
+#define E1000_DEV_ID_82574LA 0x10F6
+#define E1000_DEV_ID_82583V 0x150C
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+#define E1000_DEV_ID_ICH8_82567V_3 0x1501
+#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
+#define E1000_DEV_ID_ICH8_IGP_C 0x104B
+#define E1000_DEV_ID_ICH8_IFE 0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M 0x104D
+#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
+#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
+#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
+#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
+#define E1000_DEV_ID_ICH9_BM 0x10E5
+#define E1000_DEV_ID_ICH9_IGP_C 0x294C
+#define E1000_DEV_ID_ICH9_IFE 0x10C0
+#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3
+#define E1000_DEV_ID_ICH9_IFE_G 0x10C2
+#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
+#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
+#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
+#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
+#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
+#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
+#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
+#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
+#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
+#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
+#define E1000_DEV_ID_PCH2_LV_LM 0x1502
+#define E1000_DEV_ID_PCH2_LV_V 0x1503
+#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A
+#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
+#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
+#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
+#define E1000_DEV_ID_PCH_I218_LM2 0x15A0
+#define E1000_DEV_ID_PCH_I218_V2 0x15A1
+#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* Sunrise Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* Sunrise Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* Sunrise Point-H PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* Sunrise Point-H PCH */
+#define E1000_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LEWISBURG PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM4 0x15D7
+#define E1000_DEV_ID_PCH_SPT_I219_V4 0x15D8
+#define E1000_DEV_ID_PCH_SPT_I219_LM5 0x15E3
+#define E1000_DEV_ID_PCH_SPT_I219_V5 0x15D6
+#define E1000_DEV_ID_PCH_CNP_I219_LM6 0x15BD
+#define E1000_DEV_ID_PCH_CNP_I219_V6 0x15BE
+#define E1000_DEV_ID_PCH_CNP_I219_LM7 0x15BB
+#define E1000_DEV_ID_PCH_CNP_I219_V7 0x15BC
+#define E1000_DEV_ID_82576 0x10C9
+#define E1000_DEV_ID_82576_FIBER 0x10E6
+#define E1000_DEV_ID_82576_SERDES 0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
+#define E1000_DEV_ID_82576_NS 0x150A
+#define E1000_DEV_ID_82576_NS_SERDES 0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
+#define E1000_DEV_ID_82576_VF 0x10CA
+#define E1000_DEV_ID_82576_VF_HV 0x152D
+#define E1000_DEV_ID_I350_VF 0x1520
+#define E1000_DEV_ID_I350_VF_HV 0x152F
+#define E1000_DEV_ID_82575EB_COPPER 0x10A7
+#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
+#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
+#define E1000_DEV_ID_82580_COPPER 0x150E
+#define E1000_DEV_ID_82580_FIBER 0x150F
+#define E1000_DEV_ID_82580_SERDES 0x1510
+#define E1000_DEV_ID_82580_SGMII 0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
+#define E1000_DEV_ID_I350_COPPER 0x1521
+#define E1000_DEV_ID_I350_FIBER 0x1522
+#define E1000_DEV_ID_I350_SERDES 0x1523
+#define E1000_DEV_ID_I350_SGMII 0x1524
+#define E1000_DEV_ID_I350_DA4 0x1546
+#define E1000_DEV_ID_I210_COPPER 0x1533
+#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
+#define E1000_DEV_ID_I210_COPPER_IT 0x1535
+#define E1000_DEV_ID_I210_FIBER 0x1536
+#define E1000_DEV_ID_I210_SERDES 0x1537
+#define E1000_DEV_ID_I210_SGMII 0x1538
+#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
+#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
+#define E1000_DEV_ID_I211_COPPER 0x1539
+#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
+#define E1000_DEV_ID_I354_SGMII 0x1F41
+#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
+#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
+
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_0 0
+#define E1000_FUNC_1 1
+#define E1000_FUNC_2 2
+#define E1000_FUNC_3 3
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
+
+enum e1000_mac_type {
+ e1000_undefined = 0,
+ e1000_82542,
+ e1000_82543,
+ e1000_82544,
+ e1000_82540,
+ e1000_82545,
+ e1000_82545_rev_3,
+ e1000_82546,
+ e1000_82546_rev_3,
+ e1000_82541,
+ e1000_82541_rev_2,
+ e1000_82547,
+ e1000_82547_rev_2,
+ e1000_82571,
+ e1000_82572,
+ e1000_82573,
+ e1000_82574,
+ e1000_82583,
+ e1000_80003es2lan,
+ e1000_ich8lan,
+ e1000_ich9lan,
+ e1000_ich10lan,
+ e1000_pchlan,
+ e1000_pch2lan,
+ e1000_pch_lpt,
+ e1000_pch_spt,
+ e1000_pch_cnp,
+ e1000_82575,
+ e1000_82576,
+ e1000_82580,
+ e1000_i350,
+ e1000_i354,
+ e1000_i210,
+ e1000_i211,
+ e1000_vfadapt,
+ e1000_vfadapt_i350,
+ e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
+};
+
+enum e1000_media_type {
+ e1000_media_type_unknown = 0,
+ e1000_media_type_copper = 1,
+ e1000_media_type_fiber = 2,
+ e1000_media_type_internal_serdes = 3,
+ e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+ e1000_nvm_unknown = 0,
+ e1000_nvm_none,
+ e1000_nvm_eeprom_spi,
+ e1000_nvm_eeprom_microwire,
+ e1000_nvm_flash_hw,
+ e1000_nvm_invm,
+ e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+ e1000_nvm_override_none = 0,
+ e1000_nvm_override_spi_small,
+ e1000_nvm_override_spi_large,
+ e1000_nvm_override_microwire_small,
+ e1000_nvm_override_microwire_large
+};
+
+enum e1000_phy_type {
+ e1000_phy_unknown = 0,
+ e1000_phy_none,
+ e1000_phy_m88,
+ e1000_phy_igp,
+ e1000_phy_igp_2,
+ e1000_phy_gg82563,
+ e1000_phy_igp_3,
+ e1000_phy_ife,
+ e1000_phy_bm,
+ e1000_phy_82578,
+ e1000_phy_82577,
+ e1000_phy_82579,
+ e1000_phy_i217,
+ e1000_phy_82580,
+ e1000_phy_vf,
+ e1000_phy_i210,
+};
+
+enum e1000_bus_type {
+ e1000_bus_type_unknown = 0,
+ e1000_bus_type_pci,
+ e1000_bus_type_pcix,
+ e1000_bus_type_pci_express,
+ e1000_bus_type_reserved
+};
+
+enum e1000_bus_speed {
+ e1000_bus_speed_unknown = 0,
+ e1000_bus_speed_33,
+ e1000_bus_speed_66,
+ e1000_bus_speed_100,
+ e1000_bus_speed_120,
+ e1000_bus_speed_133,
+ e1000_bus_speed_2500,
+ e1000_bus_speed_5000,
+ e1000_bus_speed_reserved
+};
+
+enum e1000_bus_width {
+ e1000_bus_width_unknown = 0,
+ e1000_bus_width_pcie_x1,
+ e1000_bus_width_pcie_x2,
+ e1000_bus_width_pcie_x4 = 4,
+ e1000_bus_width_pcie_x8 = 8,
+ e1000_bus_width_32,
+ e1000_bus_width_64,
+ e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+ e1000_1000t_rx_status_not_ok = 0,
+ e1000_1000t_rx_status_ok,
+ e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity {
+ e1000_rev_polarity_normal = 0,
+ e1000_rev_polarity_reversed,
+ e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+ e1000_fc_none = 0,
+ e1000_fc_rx_pause,
+ e1000_fc_tx_pause,
+ e1000_fc_full,
+ e1000_fc_default = 0xFF
+};
+
+enum e1000_ffe_config {
+ e1000_ffe_config_enabled = 0,
+ e1000_ffe_config_active,
+ e1000_ffe_config_blocked
+};
+
+enum e1000_dsp_config {
+ e1000_dsp_config_disabled = 0,
+ e1000_dsp_config_enabled,
+ e1000_dsp_config_activated,
+ e1000_dsp_config_undefined = 0xFF
+};
+
+enum e1000_ms_type {
+ e1000_ms_hw_default = 0,
+ e1000_ms_force_master,
+ e1000_ms_force_slave,
+ e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+ e1000_smart_speed_default = 0,
+ e1000_smart_speed_on,
+ e1000_smart_speed_off
+};
+
+enum e1000_serdes_link_state {
+ e1000_serdes_link_down = 0,
+ e1000_serdes_link_autoneg_progress,
+ e1000_serdes_link_autoneg_complete,
+ e1000_serdes_link_forced_up
+};
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+/* Receive Descriptor */
+struct e1000_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+ struct {
+ __le64 buffer_addr;
+ __le64 reserved;
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length;
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+ struct {
+ /* one buffer for protocol header(s), three data buffers */
+ __le64 buffer_addr[MAX_PS_BUFFERS];
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length0; /* length of buffer 0 */
+ __le16 vlan; /* VLAN tag */
+ } middle;
+ struct {
+ __le16 header_status;
+ /* length of buffers 1-3 */
+ __le16 length[PS_PAGE_BUFFERS];
+ } upper;
+ __le64 reserved;
+ } wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+ union {
+ __le32 ip_config;
+ struct {
+ u8 ipcss; /* IP checksum start */
+ u8 ipcso; /* IP checksum offset */
+ __le16 ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ __le32 tcp_config;
+ struct {
+ u8 tucss; /* TCP checksum start */
+ u8 tucso; /* TCP checksum offset */
+ __le16 tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ __le32 cmd_and_length;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 hdr_len; /* Header length */
+ __le16 mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's buffer address */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 typ_len_ext;
+ u8 cmd;
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 popts; /* Packet Options */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 cexterr;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 tor;
+ u64 tot;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 iac;
+ u64 icrxptc;
+ u64 icrxatc;
+ u64 ictxptc;
+ u64 ictxatc;
+ u64 ictxqec;
+ u64 ictxqmtc;
+ u64 icrxdmtc;
+ u64 icrxoc;
+ u64 cbtmpc;
+ u64 htdpmc;
+ u64 cbrdpc;
+ u64 cbrmpc;
+ u64 rpthc;
+ u64 hgptc;
+ u64 htcbdpc;
+ u64 hgorc;
+ u64 hgotc;
+ u64 lenerrs;
+ u64 scvpc;
+ u64 hrmpc;
+ u64 doosync;
+ u64 o2bgptc;
+ u64 o2bspc;
+ u64 b2ospc;
+ u64 b2ogprc;
+};
+
+struct e1000_vf_stats {
+ u64 base_gprc;
+ u64 base_gptc;
+ u64 base_gorc;
+ u64 base_gotc;
+ u64 base_mprc;
+ u64 base_gotlbc;
+ u64 base_gptlbc;
+ u64 base_gorlbc;
+ u64 base_gprlbc;
+
+ u32 last_gprc;
+ u32 last_gptc;
+ u32 last_gorc;
+ u32 last_gotc;
+ u32 last_mprc;
+ u32 last_gotlbc;
+ u32 last_gptlbc;
+ u32 last_gorlbc;
+ u32 last_gprlbc;
+
+ u64 gprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 mprc;
+ u64 gotlbc;
+ u64 gptlbc;
+ u64 gorlbc;
+ u64 gprlbc;
+};
+
+struct e1000_phy_stats {
+ u32 idle_errors;
+ u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+ u32 signature;
+ u8 status;
+ u8 reserved0;
+ u16 vlan_id;
+ u32 reserved1;
+ u16 reserved2;
+ u8 reserved3;
+ u8 checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+ u8 command_id;
+ u8 command_length;
+ u8 command_options;
+ u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH 252
+struct e1000_host_command_info {
+ struct e1000_host_command_header command_header;
+ u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+ u8 command_id;
+ u8 checksum;
+ u16 reserved1;
+ u16 reserved2;
+ u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+ struct e1000_host_mng_command_header command_header;
+ u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_manage.h"
+#include "e1000_mbx.h"
+
+/* Function pointers for the MAC. */
+struct e1000_mac_operations {
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*id_led_init)(struct e1000_hw *);
+ s32 (*blink_led)(struct e1000_hw *);
+ bool (*check_mng_mode)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*cleanup_led)(struct e1000_hw *);
+ void (*clear_hw_cntrs)(struct e1000_hw *);
+ void (*clear_vfta)(struct e1000_hw *);
+ s32 (*get_bus_info)(struct e1000_hw *);
+ void (*set_lan_id)(struct e1000_hw *);
+ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+ s32 (*led_on)(struct e1000_hw *);
+ s32 (*led_off)(struct e1000_hw *);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
+ void (*shutdown_serdes)(struct e1000_hw *);
+ void (*power_up_serdes)(struct e1000_hw *);
+ s32 (*setup_link)(struct e1000_hw *);
+ s32 (*setup_physical_interface)(struct e1000_hw *);
+ s32 (*setup_led)(struct e1000_hw *);
+ void (*write_vfta)(struct e1000_hw *, u32, u32);
+ void (*config_collision_dist)(struct e1000_hw *);
+ int (*rar_set)(struct e1000_hw *, u8*, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*validate_mdi_setting)(struct e1000_hw *);
+ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+ void (*release_swfw_sync)(struct e1000_hw *, u16);
+};
+
+/* When to use various PHY register access functions:
+ *
+ * Func Caller
+ * Function Does Does When to use
+ * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * X_reg L,P,A n/a for simple PHY reg accesses
+ * X_reg_locked P,A L for multiple accesses of different regs
+ * on different pages
+ * X_reg_page A L,P for multiple accesses of different regs
+ * on the same page
+ *
+ * Where X=[read|write], L=locking, P=sets page, A=register access
+ *
+ */
+struct e1000_phy_operations {
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*cfg_on_link_up)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
+ s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*commit)(struct e1000_hw *);
+ s32 (*force_speed_duplex)(struct e1000_hw *);
+ s32 (*get_cfg_done)(struct e1000_hw *hw);
+ s32 (*get_cable_length)(struct e1000_hw *);
+ s32 (*get_info)(struct e1000_hw *);
+ s32 (*set_page)(struct e1000_hw *, u16);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
+ void (*release)(struct e1000_hw *);
+ s32 (*reset)(struct e1000_hw *);
+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
+ void (*power_up)(struct e1000_hw *);
+ void (*power_down)(struct e1000_hw *);
+ s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
+};
+
+/* Function pointers for the NVM. */
+struct e1000_nvm_operations {
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ void (*release)(struct e1000_hw *);
+ void (*reload)(struct e1000_hw *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+};
+
+struct e1000_mac_info {
+ struct e1000_mac_operations ops;
+ u8 addr[ETH_ADDR_LEN];
+ u8 perm_addr[ETH_ADDR_LEN];
+
+ enum e1000_mac_type type;
+
+ u32 collision_delta;
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ u32 mc_filter_type;
+ u32 tx_packet_delta;
+ u32 txcw;
+
+ u16 current_ifs_val;
+ u16 ifs_max_val;
+ u16 ifs_min_val;
+ u16 ifs_ratio;
+ u16 ifs_step_size;
+ u16 mta_reg_count;
+ u16 uta_reg_count;
+
+ /* Maximum size of the MTA register table in all supported adapters */
+#define MAX_MTA_REG 128
+ u32 mta_shadow[MAX_MTA_REG];
+ u16 rar_entry_count;
+
+ u8 forced_speed_duplex;
+
+ bool adaptive_ifs;
+ bool has_fwsm;
+ bool arc_subsystem_valid;
+ bool asf_firmware_present;
+ bool autoneg;
+ bool autoneg_failed;
+ bool get_link_status;
+ bool in_ifs_mode;
+ bool report_tx_early;
+ enum e1000_serdes_link_state serdes_link_state;
+ bool serdes_has_link;
+ bool tx_pkt_filtering;
+};
+
+struct e1000_phy_info {
+ struct e1000_phy_operations ops;
+ enum e1000_phy_type type;
+
+ enum e1000_1000t_rx_status local_rx;
+ enum e1000_1000t_rx_status remote_rx;
+ enum e1000_ms_type ms_type;
+ enum e1000_ms_type original_ms_type;
+ enum e1000_rev_polarity cable_polarity;
+ enum e1000_smart_speed smart_speed;
+
+ u32 addr;
+ u32 id;
+ u32 reset_delay_us; /* in usec */
+ u32 revision;
+
+ enum e1000_media_type media_type;
+
+ u16 autoneg_advertised;
+ u16 autoneg_mask;
+ u16 cable_length;
+ u16 max_cable_length;
+ u16 min_cable_length;
+
+ u8 mdix;
+
+ bool disable_polarity_correction;
+ bool is_mdix;
+ bool polarity_correction;
+ bool speed_downgraded;
+ bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+ struct e1000_nvm_operations ops;
+ enum e1000_nvm_type type;
+ enum e1000_nvm_override override;
+
+ u32 flash_bank_size;
+ u32 flash_base_addr;
+
+ u16 word_size;
+ u16 delay_usec;
+ u16 address_bits;
+ u16 opcode_bits;
+ u16 page_size;
+};
+
+struct e1000_bus_info {
+ enum e1000_bus_type type;
+ enum e1000_bus_speed speed;
+ enum e1000_bus_width width;
+
+ u16 func;
+ u16 pci_cmd_word;
+};
+
+struct e1000_fc_info {
+ u32 high_water; /* Flow control high-water mark */
+ u32 low_water; /* Flow control low-water mark */
+ u16 pause_time; /* Flow control pause timer */
+ u16 refresh_time; /* Flow control refresh timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ enum e1000_fc_mode current_mode; /* FC mode in effect */
+ enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+struct e1000_mbx_operations {
+ s32 (*init_params)(struct e1000_hw *hw);
+ s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct e1000_hw *, u16);
+ s32 (*check_for_ack)(struct e1000_hw *, u16);
+ s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct e1000_mbx_info {
+ struct e1000_mbx_operations ops;
+ struct e1000_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u16 size;
+};
+
+struct e1000_dev_spec_82541 {
+ enum e1000_dsp_config dsp_config;
+ enum e1000_ffe_config ffe_config;
+ u16 spd_default;
+ bool phy_init_script;
+};
+
+struct e1000_dev_spec_82542 {
+ bool dma_fairness;
+};
+
+struct e1000_dev_spec_82543 {
+ u32 tbi_compatibility;
+ bool dma_fairness;
+ bool init_phy_disabled;
+};
+
+struct e1000_dev_spec_82571 {
+ bool laa_is_present;
+ u32 smb_counter;
+ E1000_MUTEX swflag_mutex;
+};
+
+struct e1000_dev_spec_80003es2lan {
+ bool mdic_wa_enable;
+};
+
+struct e1000_shadow_ram {
+ u16 value;
+ bool modified;
+};
+
+#define E1000_SHADOW_RAM_WORDS 2048
+
+#ifdef ULP_SUPPORT
+/* I218 PHY Ultra Low Power (ULP) states */
+enum e1000_ulp_state {
+ e1000_ulp_state_unknown,
+ e1000_ulp_state_off,
+ e1000_ulp_state_on,
+};
+
+#endif /* ULP_SUPPORT */
+struct e1000_dev_spec_ich8lan {
+ bool kmrn_lock_loss_workaround_enabled;
+ struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS];
+ E1000_MUTEX nvm_mutex;
+ E1000_MUTEX swflag_mutex;
+ bool nvm_k1_enabled;
+ bool disable_k1_off;
+ bool eee_disable;
+ u16 eee_lp_ability;
+#ifdef ULP_SUPPORT
+ enum e1000_ulp_state ulp_state;
+ bool ulp_capability_disabled;
+ bool during_suspend_flow;
+ bool during_dpg_exit;
+#endif /* ULP_SUPPORT */
+ u16 lat_enc;
+ u16 max_ltr_enc;
+ bool smbus_disable;
+};
+
+struct e1000_dev_spec_82575 {
+ bool sgmii_active;
+ bool global_device_reset;
+ bool eee_disable;
+ bool module_plugged;
+ bool clear_semaphore_once;
+ u32 mtu;
+ struct sfp_e1000_flags eth_flags;
+ u8 media_port;
+ bool media_changed;
+};
+
+struct e1000_dev_spec_vf {
+ u32 vf_number;
+ u32 v2p_mailbox;
+};
+
+struct e1000_hw {
+ void *back;
+
+ u8 *hw_addr;
+ u8 *flash_address;
+ unsigned long io_base;
+
+ struct e1000_mac_info mac;
+ struct e1000_fc_info fc;
+ struct e1000_phy_info phy;
+ struct e1000_nvm_info nvm;
+ struct e1000_bus_info bus;
+ struct e1000_mbx_info mbx;
+ struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+ union {
+ struct e1000_dev_spec_82541 _82541;
+ struct e1000_dev_spec_82542 _82542;
+ struct e1000_dev_spec_82543 _82543;
+ struct e1000_dev_spec_82571 _82571;
+ struct e1000_dev_spec_80003es2lan _80003es2lan;
+ struct e1000_dev_spec_ich8lan ich8lan;
+ struct e1000_dev_spec_82575 _82575;
+ struct e1000_dev_spec_vf vf;
+ } dev_spec;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+};
+
+#include "e1000_82541.h"
+#include "e1000_82543.h"
+#include "e1000_82571.h"
+#include "e1000_80003es2lan.h"
+#include "e1000_ich8lan.h"
+#include "e1000_82575.h"
+#include "e1000_i210.h"
+
+/* These functions must be implemented by drivers */
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.c
new file mode 100644
index 00000000..277331c4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.c
@@ -0,0 +1,1033 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_api.h"
+
+
+STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
+STATIC void e1000_release_nvm_i210(struct e1000_hw *hw);
+STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
+STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+STATIC s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
+STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+
+/**
+ * e1000_acquire_nvm_i210 - Request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_acquire_nvm_i210");
+
+ ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_i210 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ **/
+STATIC void e1000_release_nvm_i210(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_i210");
+
+ e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 ret_val = E1000_SUCCESS;
+ s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+ DEBUGFUNC("e1000_acquire_swfw_sync_i210");
+
+ while (i < timeout) {
+ if (e1000_get_hw_semaphore_i210(hw)) {
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000_put_hw_semaphore_generic(hw);
+ msec_delay_irq(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_release_swfw_sync_i210 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ DEBUGFUNC("e1000_release_swfw_sync_i210");
+
+ while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
+ ; /* Empty */
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_i210");
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ /* In rare circumstances, the SW semaphore may already be held
+ * unintentionally. Clear the semaphore once before giving up.
+ */
+ if (hw->dev_spec._82575.clear_semaphore_once) {
+ hw->dev_spec._82575.clear_semaphore_once = false;
+ e1000_put_hw_semaphore_generic(hw);
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ }
+ }
+
+ /* If we do not have the semaphore here, we have to give up. */
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_NVM;
+ }
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_generic(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the Shadow Ram to read
+ * @words: number of words to read
+ * @data: word read from the Shadow Ram
+ *
+ * Reads a 16 bit word from the Shadow Ram using the EERD register.
+ * Uses necessary synchronization semaphores.
+ **/
+s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("e1000_read_nvm_srrd_i210");
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to read in bursts than synchronizing access for each word. */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ status = e1000_read_nvm_eerd(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow RAM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ * Writes data to Shadow RAM at offset using EEWR register.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * data will not be committed to FLASH and also Shadow RAM will most likely
+ * contain an invalid checksum.
+ *
+ * If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ * partially written.
+ **/
+s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("e1000_write_nvm_srwr_i210");
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to write in bursts than synchronizing access for each word. */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ status = e1000_write_nvm_srwr(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow Ram to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow Ram
+ *
+ * Writes data to Shadow Ram at offset using EEWR register.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * Shadow Ram will most likely contain an invalid checksum.
+ **/
+STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, k, eewr = 0;
+ u32 attempts = 100000;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_nvm_srwr");
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
+ (data[i] << E1000_NVM_RW_REG_DATA) |
+ E1000_NVM_RW_REG_START;
+
+ E1000_WRITE_REG(hw, E1000_SRWR, eewr);
+
+ for (k = 0; k < attempts; k++) {
+ if (E1000_NVM_RW_REG_DONE &
+ E1000_READ_REG(hw, E1000_SRWR)) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Shadow RAM write EEWR timed out\n");
+ break;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/** e1000_read_invm_word_i210 - Reads OTP
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Reads 16-bit words from the OTP. Return error when the word is not
+ * stored in OTP.
+ **/
+STATIC s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
+{
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u32 invm_dword;
+ u16 i;
+ u8 record_type, word_address;
+
+ DEBUGFUNC("e1000_read_invm_word_i210");
+
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
+ /* Get record type */
+ record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+ if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
+ break;
+ if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
+ i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
+ i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
+ word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+ if (word_address == address) {
+ *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+ DEBUGOUT2("Read INVM Word 0x%02x = %x",
+ address, *data);
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ }
+ if (status != E1000_SUCCESS)
+ DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
+ return status;
+}
+
+/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Wrapper function to return data formerly found in the NVM.
+ **/
+STATIC s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
+ u16 E1000_UNUSEDARG words, u16 *data)
+{
+ s32 ret_val = E1000_SUCCESS;
+ UNREFERENCED_1PARAMETER(words);
+
+ DEBUGFUNC("e1000_read_invm_i210");
+
+ /* Only the MAC addr is required to be present in the iNVM */
+ switch (offset) {
+ case NVM_MAC_ADDR:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
+ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1,
+ &data[1]);
+ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2,
+ &data[2]);
+ if (ret_val != E1000_SUCCESS)
+ DEBUGOUT("MAC Addr not found in iNVM\n");
+ break;
+ case NVM_INIT_CTRL_2:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_2_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_INIT_CTRL_4:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_4_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_LED_1_CFG:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_1_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_LED_0_2_CFG:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_0_2_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_ID_LED_SETTINGS:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = ID_LED_RESERVED_FFFF;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_SUB_DEV_ID:
+ *data = hw->subsystem_device_id;
+ break;
+ case NVM_SUB_VEN_ID:
+ *data = hw->subsystem_vendor_id;
+ break;
+ case NVM_DEV_ID:
+ *data = hw->device_id;
+ break;
+ case NVM_VEN_ID:
+ *data = hw->vendor_id;
+ break;
+ default:
+ DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
+ *data = NVM_RESERVED_WORD;
+ break;
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_read_invm_version - Reads iNVM version and image type
+ * @hw: pointer to the HW structure
+ * @invm_ver: version structure for the version read
+ *
+ * Reads iNVM version and image type.
+ **/
+s32 e1000_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver)
+{
+ u32 *record = NULL;
+ u32 *next_record = NULL;
+ u32 i = 0;
+ u32 invm_dword = 0;
+ u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
+ E1000_INVM_RECORD_SIZE_IN_BYTES);
+ u32 buffer[E1000_INVM_SIZE];
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u16 version = 0;
+
+ DEBUGFUNC("e1000_read_invm_version");
+
+ /* Read iNVM memory */
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
+ buffer[i] = invm_dword;
+ }
+
+ /* Read version number */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have first version location used */
+ if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+ version = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have second version location used */
+ else if ((i == 1) &&
+ ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /*
+ * Check if we have odd version location
+ * used and it is the last one used
+ */
+ else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+ ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+ (i != 1))) {
+ version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+ >> 13;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /*
+ * Check if we have even version location
+ * used and it is the last one used
+ */
+ else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+ ((*record & 0x3) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+
+ if (status == E1000_SUCCESS) {
+ invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+ >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+ }
+ /* Read Image Type */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have image type in first location used */
+ if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+ invm_ver->invm_img_type = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have image type in first location used */
+ else if ((((*record & 0x3) == 0) &&
+ ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+ ((((*record & 0x3) != 0) && (i != 1)))) {
+ invm_ver->invm_img_type =
+ (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_i210");
+
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+
+ /*
+ * Replace the read function with semaphore grabbing with
+ * the one that skips this for a while.
+ * We have semaphore taken already here.
+ */
+ read_op_ptr = hw->nvm.ops.read;
+ hw->nvm.ops.read = e1000_read_nvm_eerd;
+
+ status = e1000_validate_nvm_checksum_generic(hw);
+
+ /* Revert original read operation. */
+ hw->nvm.ops.read = read_op_ptr;
+
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+
+/**
+ * e1000_update_nvm_checksum_i210 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM. Next commit EEPROM data onto the Flash.
+ **/
+s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_i210");
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("EEPROM read failed\n");
+ goto out;
+ }
+
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ /*
+ * Do not use hw->nvm.ops.write, hw->nvm.ops.read
+ * because we do not want to take the synchronization
+ * semaphores twice here.
+ */
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw->nvm.ops.release(hw);
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
+ &checksum);
+ if (ret_val != E1000_SUCCESS) {
+ hw->nvm.ops.release(hw);
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
+ goto out;
+ }
+
+ hw->nvm.ops.release(hw);
+
+ ret_val = e1000_update_flash_i210(hw);
+ } else {
+ ret_val = E1000_ERR_SWFW_SYNC;
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_flash_presence_i210 - Check if flash device is detected.
+ * @hw: pointer to the HW structure
+ *
+ **/
+bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
+{
+ u32 eec = 0;
+ bool ret_val = false;
+
+ DEBUGFUNC("e1000_get_flash_presence_i210");
+
+ eec = E1000_READ_REG(hw, E1000_EECD);
+
+ if (eec & E1000_EECD_FLASH_DETECTED_I210)
+ ret_val = true;
+
+ return ret_val;
+}
+
+/**
+ * e1000_update_flash_i210 - Commit EEPROM to the flash
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_update_flash_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 flup;
+
+ DEBUGFUNC("e1000_update_flash_i210");
+
+ ret_val = e1000_pool_flash_update_done_i210(hw);
+ if (ret_val == -E1000_ERR_NVM) {
+ DEBUGOUT("Flash update time out\n");
+ goto out;
+ }
+
+ flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
+ E1000_WRITE_REG(hw, E1000_EECD, flup);
+
+ ret_val = e1000_pool_flash_update_done_i210(hw);
+ if (ret_val == E1000_SUCCESS)
+ DEBUGOUT("Flash update complete\n");
+ else
+ DEBUGOUT("Flash update time out\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_NVM;
+ u32 i, reg;
+
+ DEBUGFUNC("e1000_pool_flash_update_done_i210");
+
+ for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
+ reg = E1000_READ_REG(hw, E1000_EECD);
+ if (reg & E1000_EECD_FLUDONE_I210) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the i210/i211 NVM parameters and function pointers.
+ **/
+STATIC s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ DEBUGFUNC("e1000_init_nvm_params_i210");
+
+ ret_val = e1000_init_nvm_params_82575(hw);
+ nvm->ops.acquire = e1000_acquire_nvm_i210;
+ nvm->ops.release = e1000_release_nvm_i210;
+ nvm->ops.valid_led_default = e1000_valid_led_default_i210;
+ if (e1000_get_flash_presence_i210(hw)) {
+ hw->nvm.type = e1000_nvm_flash_hw;
+ nvm->ops.read = e1000_read_nvm_srrd_i210;
+ nvm->ops.write = e1000_write_nvm_srwr_i210;
+ nvm->ops.validate = e1000_validate_nvm_checksum_i210;
+ nvm->ops.update = e1000_update_nvm_checksum_i210;
+ } else {
+ hw->nvm.type = e1000_nvm_invm;
+ nvm->ops.read = e1000_read_invm_i210;
+ nvm->ops.write = e1000_null_write_nvm;
+ nvm->ops.validate = e1000_null_ops_generic;
+ nvm->ops.update = e1000_null_ops_generic;
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_init_function_pointers_i210 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_i210(struct e1000_hw *hw)
+{
+ e1000_init_function_pointers_82575(hw);
+ hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
+
+ return;
+}
+
+/**
+ * e1000_valid_led_default_i210 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_i210");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+ switch (hw->phy.media_type) {
+ case e1000_media_type_internal_serdes:
+ *data = ID_LED_DEFAULT_I210_SERDES;
+ break;
+ case e1000_media_type_copper:
+ default:
+ *data = ID_LED_DEFAULT_I210;
+ break;
+ }
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * __e1000_access_xmdio_reg - Read/write XMDIO register
+ * @hw: pointer to the HW structure
+ * @address: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: pointer to value to read/write from/to the XMDIO address
+ * @read: boolean flag to indicate read or write
+ **/
+STATIC s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
+ u8 dev_addr, u16 *data, bool read)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("__e1000_access_xmdio_reg");
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
+ dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
+ if (ret_val)
+ return ret_val;
+
+ /* Recalibrate the device back to 0 */
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
+ if (ret_val)
+ return ret_val;
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_xmdio_reg - Read XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be read from the EMI address
+ **/
+s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
+{
+ DEBUGFUNC("e1000_read_xmdio_reg");
+
+ return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true);
+}
+
+/**
+ * e1000_write_xmdio_reg - Write XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be written to the XMDIO address
+ **/
+s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
+{
+ DEBUGFUNC("e1000_read_xmdio_reg");
+
+ return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+}
+
+/**
+ * e1000_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
+ u16 nvm_word, phy_word, pci_word, tmp_nvm;
+ int i;
+
+ /* Get and set needed register values */
+ wuc = E1000_READ_REG(hw, E1000_WUC);
+ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
+ reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+ E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val);
+
+ /* Get data from NVM, or set default */
+ ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+ &nvm_word);
+ if (ret_val != E1000_SUCCESS)
+ nvm_word = E1000_INVM_DEFAULT_AL;
+ tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+ for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+ /* check current state directly from internal PHY */
+ e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+ E1000_PHY_PLL_FREQ_REG), &phy_word);
+ if ((phy_word & E1000_PHY_PLL_UNCONF)
+ != E1000_PHY_PLL_UNCONF) {
+ ret_val = E1000_SUCCESS;
+ break;
+ } else {
+ ret_val = -E1000_ERR_PHY;
+ }
+ /* directly reset the internal PHY */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
+
+ e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ pci_word |= E1000_PCI_PMCSR_D3;
+ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ msec_delay(1);
+ pci_word &= ~E1000_PCI_PMCSR_D3;
+ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
+
+ /* restore WUC register */
+ E1000_WRITE_REG(hw, E1000_WUC, wuc);
+ }
+ /* restore MDICNFG setting */
+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
+ return ret_val;
+}
+
+/**
+ * e1000_get_cfg_done_i210 - Read config done bit
+ * @hw: pointer to the HW structure
+ *
+ * Read the management control register for the config done bit for
+ * completion status. NOTE: silicon which is EEPROM-less will fail trying
+ * to read the config done bit, so an error is *ONLY* logged and returns
+ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
+ * would not be able to be reset or change link.
+ **/
+STATIC s32 e1000_get_cfg_done_i210(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ DEBUGFUNC("e1000_get_cfg_done_i210");
+
+ while (timeout) {
+ if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask)
+ break;
+ msec_delay(1);
+ timeout--;
+ }
+ if (!timeout)
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_hw_i210 - Init hw for I210/I211
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize hw for i210 hw family.
+ **/
+s32 e1000_init_hw_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_hw_i210");
+ if ((hw->mac.type >= e1000_i210) &&
+ !(e1000_get_flash_presence_i210(hw))) {
+ ret_val = e1000_pll_workaround_i210(hw);
+ if (ret_val != E1000_SUCCESS)
+ return ret_val;
+ }
+ hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210;
+ ret_val = e1000_init_hw_82575(hw);
+ return ret_val;
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.h
new file mode 100644
index 00000000..1a6f1dd4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.h
@@ -0,0 +1,110 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_I210_H_
+#define _E1000_I210_H_
+
+bool e1000_get_flash_presence_i210(struct e1000_hw *hw);
+s32 e1000_update_flash_i210(struct e1000_hw *hw);
+s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw);
+s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw);
+s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 e1000_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
+s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ u16 *data);
+s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ u16 data);
+s32 e1000_init_hw_i210(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE 0xDB00
+#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
+
+#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
+ (u8)((invm_dword) & 0x7)
+#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
+ (u8)(((invm_dword) & 0x0000FE00) >> 9)
+#define INVM_DWORD_TO_WORD_DATA(invm_dword) \
+ (u16)(((invm_dword) & 0xFFFF0000) >> 16)
+
+enum E1000_INVM_STRUCTURE_TYPE {
+ E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00,
+ E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01,
+ E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02,
+ E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03,
+ E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04,
+ E1000_INVM_INVALIDATED_STRUCTURE = 0x0F,
+};
+
+#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
+#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
+#define E1000_INVM_ULT_BYTES_SIZE 8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
+#define E1000_INVM_VER_FIELD_ONE 0x1FF8
+#define E1000_INVM_VER_FIELD_TWO 0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
+
+#define E1000_INVM_MAJOR_MASK 0x3F0
+#define E1000_INVM_MINOR_MASK 0xF
+#define E1000_INVM_MAJOR_SHIFT 4
+
+#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_OFF2))
+#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
+
+/* NVM offset defaults for I211 devices */
+#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
+#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
+#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
+#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
+
+/* PLL Defines */
+#define E1000_PCI_PMCSR 0x44
+#define E1000_PCI_PMCSR_D3 0x03
+#define E1000_MAX_PLL_TRIES 5
+#define E1000_PHY_PLL_UNCONF 0xFF
+#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
+#define E1000_PHY_PLL_FREQ_REG 0x000E
+#define E1000_INVM_DEFAULT_AL 0x202F
+#define E1000_INVM_AUTOLOAD 0x0A
+#define E1000_INVM_PLL_WO_VAL 0x0010
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.c
new file mode 100644
index 00000000..92ab6fc6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.c
@@ -0,0 +1,6125 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/* 82562G 10/100 Network Connection
+ * 82562G-2 10/100 Network Connection
+ * 82562GT 10/100 Network Connection
+ * 82562GT-2 10/100 Network Connection
+ * 82562V 10/100 Network Connection
+ * 82562V-2 10/100 Network Connection
+ * 82566DC-2 Gigabit Network Connection
+ * 82566DC Gigabit Network Connection
+ * 82566DM-2 Gigabit Network Connection
+ * 82566DM Gigabit Network Connection
+ * 82566MC Gigabit Network Connection
+ * 82566MM Gigabit Network Connection
+ * 82567LM Gigabit Network Connection
+ * 82567LF Gigabit Network Connection
+ * 82567V Gigabit Network Connection
+ * 82567LM-2 Gigabit Network Connection
+ * 82567LF-2 Gigabit Network Connection
+ * 82567V-2 Gigabit Network Connection
+ * 82567LF-3 Gigabit Network Connection
+ * 82567LM-3 Gigabit Network Connection
+ * 82567LM-4 Gigabit Network Connection
+ * 82577LM Gigabit Network Connection
+ * 82577LC Gigabit Network Connection
+ * 82578DM Gigabit Network Connection
+ * 82578DC Gigabit Network Connection
+ * 82579LM Gigabit Network Connection
+ * 82579V Gigabit Network Connection
+ * Ethernet Connection I217-LM
+ * Ethernet Connection I217-V
+ * Ethernet Connection I218-V
+ * Ethernet Connection I218-LM
+ * Ethernet Connection (2) I218-LM
+ * Ethernet Connection (2) I218-V
+ * Ethernet Connection (3) I218-LM
+ * Ethernet Connection (3) I218-V
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
+STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
+STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
+STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
+STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
+STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
+STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
+#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
+STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
+ u8 *mc_addr_list,
+ u32 mc_addr_count);
+#endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
+STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
+STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
+STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
+ u16 *data);
+STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
+STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
+STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
+STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
+STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw);
+STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw);
+STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
+STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
+ u32 offset, u8 *data);
+STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 size, u16 *data);
+STATIC s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data);
+STATIC s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 *data);
+STATIC s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 data);
+STATIC s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 dword);
+STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
+ u32 offset, u16 *data);
+STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+ u32 offset, u8 byte);
+STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
+STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
+STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
+STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
+
+/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+ struct ich8_hsfsts {
+ u16 flcdone:1; /* bit 0 Flash Cycle Done */
+ u16 flcerr:1; /* bit 1 Flash Cycle Error */
+ u16 dael:1; /* bit 2 Direct Access error Log */
+ u16 berasesz:2; /* bit 4:3 Sector Erase Size */
+ u16 flcinprog:1; /* bit 5 flash cycle in Progress */
+ u16 reserved1:2; /* bit 13:6 Reserved */
+ u16 reserved2:6; /* bit 13:6 Reserved */
+ u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
+ u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
+ } hsf_status;
+ u16 regval;
+};
+
+/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+ struct ich8_hsflctl {
+ u16 flcgo:1; /* 0 Flash Cycle Go */
+ u16 flcycle:2; /* 2:1 Flash Cycle */
+ u16 reserved:5; /* 7:3 Reserved */
+ u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
+ u16 flockdn:6; /* 15:10 Reserved */
+ } hsf_ctrl;
+ u16 regval;
+};
+
+/* ICH Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+ struct ich8_flracc {
+ u32 grra:8; /* 0:7 GbE region Read Access */
+ u32 grwa:8; /* 8:15 GbE region Write Access */
+ u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
+ u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
+ } hsf_flregacc;
+ u16 regval;
+};
+
+/**
+ * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
+ * @hw: pointer to the HW structure
+ *
+ * Test access to the PHY registers by reading the PHY ID registers. If
+ * the PHY ID is already known (e.g. resume path) compare it with known ID,
+ * otherwise assume the read PHY ID is correct if it is valid.
+ *
+ * Assumes the sw/fw/hw semaphore is already acquired.
+ **/
+STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
+{
+ u16 phy_reg = 0;
+ u32 phy_id = 0;
+ s32 ret_val = 0;
+ u16 retry_count;
+ u32 mac_reg = 0;
+
+ for (retry_count = 0; retry_count < 2; retry_count++) {
+ ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF))
+ continue;
+ phy_id = (u32)(phy_reg << 16);
+
+ ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF)) {
+ phy_id = 0;
+ continue;
+ }
+ phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+ break;
+ }
+
+ if (hw->phy.id) {
+ if (hw->phy.id == phy_id)
+ goto out;
+ } else if (phy_id) {
+ hw->phy.id = phy_id;
+ hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
+ goto out;
+ }
+
+ /* In case the PHY needs to be in mdio slow mode,
+ * set slow mode and try to get the PHY id again.
+ */
+ if (hw->mac.type < e1000_pch_lpt) {
+ hw->phy.ops.release(hw);
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (!ret_val)
+ ret_val = e1000_get_phy_id(hw);
+ hw->phy.ops.acquire(hw);
+ }
+
+ if (ret_val)
+ return false;
+out:
+ if (hw->mac.type >= e1000_pch_lpt) {
+ /* Only unforce SMBus if ME is not active */
+ if (!(E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_ICH_FWSM_FW_VALID)) {
+ /* Unforce SMBus mode in PHY */
+ hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+ }
+ }
+
+ return true;
+}
+
+/**
+ * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
+ * @hw: pointer to the HW structure
+ *
+ * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
+ * used to reset the PHY to a quiescent state when necessary.
+ **/
+STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+
+ DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
+
+ /* Set Phy Config Counter to 50msec */
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
+ mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+ mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
+
+ /* Toggle LANPHYPC Value bit */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL);
+ mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
+ E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+ mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
+ E1000_WRITE_FLUSH(hw);
+
+ if (hw->mac.type < e1000_pch_lpt) {
+ msec_delay(50);
+ } else {
+ u16 count = 20;
+
+ do {
+ msec_delay(5);
+ } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
+ E1000_CTRL_EXT_LPCD) && count--);
+
+ msec_delay(30);
+ }
+}
+
+/**
+ * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
+ * @hw: pointer to the HW structure
+ *
+ * Workarounds/flow necessary for PHY initialization during driver load
+ * and resume paths.
+ **/
+STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
+{
+ u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
+
+ /* Gate automatic PHY configuration by hardware on managed and
+ * non-managed 82579 and newer adapters.
+ */
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+#ifdef ULP_SUPPORT
+ /* It is not possible to be certain of the current state of ULP
+ * so forcibly disable it.
+ */
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
+
+#endif /* ULP_SUPPORT */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ DEBUGOUT("Failed to initialize PHY flow\n");
+ goto out;
+ }
+
+ /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
+ * inaccessible and resetting the PHY is not blocked, toggle the
+ * LANPHYPC Value bit to force the interconnect to PCIe mode.
+ */
+ switch (hw->mac.type) {
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ /* Before toggling LANPHYPC, see if PHY is accessible by
+ * forcing MAC to SMBus mode first.
+ */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+ /* Wait 50 milliseconds for MAC to finish any retries
+ * that it might be trying to perform from previous
+ * attempts to acknowledge any phy read requests.
+ */
+ msec_delay(50);
+
+ /* fall-through */
+ case e1000_pch2lan:
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ /* fall-through */
+ case e1000_pchlan:
+ if ((hw->mac.type == e1000_pchlan) &&
+ (fwsm & E1000_ICH_FWSM_FW_VALID))
+ break;
+
+ if (hw->phy.ops.check_reset_block(hw)) {
+ DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ /* Toggle LANPHYPC Value bit */
+ e1000_toggle_lanphypc_pch_lpt(hw);
+ if (hw->mac.type >= e1000_pch_lpt) {
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ /* Toggling LANPHYPC brings the PHY out of SMBus mode
+ * so ensure that the MAC is also out of SMBus mode
+ */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ ret_val = -E1000_ERR_PHY;
+ }
+ break;
+ default:
+ break;
+ }
+
+ hw->phy.ops.release(hw);
+ if (!ret_val) {
+
+ /* Check to see if able to reset PHY. Print error if not */
+ if (hw->phy.ops.check_reset_block(hw)) {
+ ERROR_REPORT("Reset blocked by ME\n");
+ goto out;
+ }
+
+ /* Reset the PHY before any access to it. Doing so, ensures
+ * that the PHY is in a known good state before we read/write
+ * PHY registers. The generic reset is sufficient here,
+ * because we haven't determined the PHY type yet.
+ */
+ ret_val = e1000_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ /* On a successful reset, possibly need to wait for the PHY
+ * to quiesce to an accessible state before returning control
+ * to the calling function. If the PHY does not quiesce, then
+ * return E1000E_BLK_PHY_RESET, as this is the condition that
+ * the PHY is in.
+ */
+ ret_val = hw->phy.ops.check_reset_block(hw);
+ if (ret_val)
+ ERROR_REPORT("ME blocked access to PHY after reset\n");
+ }
+
+out:
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+ msec_delay(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_phy_params_pchlan - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific PHY parameters and function pointers.
+ **/
+STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_phy_params_pchlan");
+
+ phy->addr = 1;
+ phy->reset_delay_us = 100;
+
+ phy->ops.acquire = e1000_acquire_swflag_ich8lan;
+ phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
+ phy->ops.set_page = e1000_set_page_igp;
+ phy->ops.read_reg = e1000_read_phy_reg_hv;
+ phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
+ phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
+ phy->ops.release = e1000_release_swflag_ich8lan;
+ phy->ops.reset = e1000_phy_hw_reset_ich8lan;
+ phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
+ phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
+ phy->ops.write_reg = e1000_write_phy_reg_hv;
+ phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
+ phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ phy->id = e1000_phy_unknown;
+
+ ret_val = e1000_init_phy_workarounds_pchlan(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->id == e1000_phy_unknown)
+ switch (hw->mac.type) {
+ default:
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ return ret_val;
+ if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
+ break;
+ /* fall-through */
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
+ /* In case the PHY needs to be in mdio slow mode,
+ * set slow mode and try to get the PHY id again.
+ */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+ phy->type = e1000_get_phy_type_from_id(phy->id);
+
+ switch (phy->type) {
+ case e1000_phy_82577:
+ case e1000_phy_82579:
+ case e1000_phy_i217:
+ phy->ops.check_polarity = e1000_check_polarity_82577;
+ phy->ops.force_speed_duplex =
+ e1000_phy_force_speed_duplex_82577;
+ phy->ops.get_cable_length = e1000_get_cable_length_82577;
+ phy->ops.get_info = e1000_get_phy_info_82577;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ break;
+ case e1000_phy_82578:
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific PHY parameters and function pointers.
+ **/
+STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 i = 0;
+
+ DEBUGFUNC("e1000_init_phy_params_ich8lan");
+
+ phy->addr = 1;
+ phy->reset_delay_us = 100;
+
+ phy->ops.acquire = e1000_acquire_swflag_ich8lan;
+ phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
+ phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
+ phy->ops.read_reg = e1000_read_phy_reg_igp;
+ phy->ops.release = e1000_release_swflag_ich8lan;
+ phy->ops.reset = e1000_phy_hw_reset_ich8lan;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
+ phy->ops.write_reg = e1000_write_phy_reg_igp;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
+
+ /* We may need to do this twice - once for IGP and if that fails,
+ * we'll set BM func pointers and try again
+ */
+ ret_val = e1000_determine_phy_address(hw);
+ if (ret_val) {
+ phy->ops.write_reg = e1000_write_phy_reg_bm;
+ phy->ops.read_reg = e1000_read_phy_reg_bm;
+ ret_val = e1000_determine_phy_address(hw);
+ if (ret_val) {
+ DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
+ return ret_val;
+ }
+ }
+
+ phy->id = 0;
+ while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
+ (i++ < 100)) {
+ msec_delay(1);
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Verify phy id */
+ switch (phy->id) {
+ case IGP03E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
+ phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
+ phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ phy->type = e1000_phy_ife;
+ phy->autoneg_mask = E1000_ALL_NOT_GIG;
+ phy->ops.get_info = e1000_get_phy_info_ife;
+ phy->ops.check_polarity = e1000_check_polarity_ife;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
+ break;
+ case BME1000_E_PHY_ID:
+ phy->type = e1000_phy_bm;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->ops.read_reg = e1000_read_phy_reg_bm;
+ phy->ops.write_reg = e1000_write_phy_reg_bm;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific NVM parameters and function
+ * pointers.
+ **/
+STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 gfpreg, sector_base_addr, sector_end_addr;
+ u16 i;
+ u32 nvm_size;
+
+ DEBUGFUNC("e1000_init_nvm_params_ich8lan");
+
+ nvm->type = e1000_nvm_flash_sw;
+
+ if (hw->mac.type >= e1000_pch_spt) {
+ /* in SPT, gfpreg doesn't exist. NVM size is taken from the
+ * STRAP register. This is because in SPT the GbE Flash region
+ * is no longer accessed through the flash registers. Instead,
+ * the mechanism has changed, and the Flash region access
+ * registers are now implemented in GbE memory space.
+ */
+ nvm->flash_base_addr = 0;
+ nvm_size =
+ (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
+ * NVM_SIZE_MULTIPLIER;
+ nvm->flash_bank_size = nvm_size / 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+ /* Set the base address for flash register access */
+ hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
+ } else {
+ /* Can't read flash registers if register set isn't mapped. */
+ if (!hw->flash_address) {
+ DEBUGOUT("ERROR: Flash registers not mapped\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
+
+ /* sector_X_addr is a "sector"-aligned address (4096 bytes)
+ * Add 1 to sector_end_addr since this sector is included in
+ * the overall size.
+ */
+ sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+ sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+
+ /* flash_base_addr is byte-aligned */
+ nvm->flash_base_addr = sector_base_addr
+ << FLASH_SECTOR_ADDR_SHIFT;
+
+ /* find total size of the NVM, then cut in half since the total
+ * size represents two separate NVM banks.
+ */
+ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+ << FLASH_SECTOR_ADDR_SHIFT);
+ nvm->flash_bank_size /= 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+ }
+
+ nvm->word_size = E1000_SHADOW_RAM_WORDS;
+
+ /* Clear shadow ram */
+ for (i = 0; i < nvm->word_size; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+ E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
+ E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
+ nvm->ops.release = e1000_release_nvm_ich8lan;
+ if (hw->mac.type >= e1000_pch_spt) {
+ nvm->ops.read = e1000_read_nvm_spt;
+ nvm->ops.update = e1000_update_nvm_checksum_spt;
+ } else {
+ nvm->ops.read = e1000_read_nvm_ich8lan;
+ nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
+ }
+ nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
+ nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
+ nvm->ops.write = e1000_write_nvm_ich8lan;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific MAC parameters and function
+ * pointers.
+ **/
+STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
+ u16 pci_cfg;
+#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
+
+ DEBUGFUNC("e1000_init_mac_params_ich8lan");
+
+ /* Set media type function pointer */
+ hw->phy.media_type = e1000_media_type_copper;
+
+ /* Set mta register count */
+ mac->mta_reg_count = 32;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
+ if (mac->type == e1000_ich8lan)
+ mac->rar_entry_count--;
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC subsystem not supported */
+ mac->arc_subsystem_valid = false;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
+ /* function id */
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_ich8lan;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_ich8lan;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_ich8lan;
+ /* physical interface setup */
+ mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
+
+ /* LED and other operations */
+ switch (mac->type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* blink LED */
+ mac->ops.blink_led = e1000_blink_led_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_ich8lan;
+ mac->ops.led_off = e1000_led_off_ich8lan;
+ break;
+ case e1000_pch2lan:
+ mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
+ mac->ops.rar_set = e1000_rar_set_pch2lan;
+ /* fall-through */
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
+#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
+ /* multicast address update for pch2 */
+ mac->ops.update_mc_addr_list =
+ e1000_update_mc_addr_list_pch2lan;
+ /* fall-through */
+#endif
+ case e1000_pchlan:
+#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
+ /* save PCH revision_id */
+ e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
+ /* SPT uses full byte for revision ID,
+ * as opposed to previous generations
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ hw->revision_id = (u8)(pci_cfg &= 0x00FF);
+ else
+ hw->revision_id = (u8)(pci_cfg &= 0x000F);
+#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_pchlan;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_pchlan;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_pchlan;
+ mac->ops.led_off = e1000_led_off_pchlan;
+ break;
+ default:
+ break;
+ }
+
+ if (mac->type >= e1000_pch_lpt) {
+ mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
+ mac->ops.rar_set = e1000_rar_set_pch_lpt;
+ mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
+ }
+
+ /* Enable PCS Lock-loss workaround for ICH8 */
+ if (mac->type == e1000_ich8lan)
+ e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * __e1000_access_emi_reg_locked - Read/write EMI register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: pointer to value to read/write from/to the EMI address
+ * @read: boolean flag to indicate read or write
+ *
+ * This helper function assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("__e1000_access_emi_reg_locked");
+
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
+ data);
+ else
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
+ *data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_emi_reg_locked - Read Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be read from the EMI address
+ *
+ * Assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+ DEBUGFUNC("e1000_read_emi_reg_locked");
+
+ return __e1000_access_emi_reg_locked(hw, addr, data, true);
+}
+
+/**
+ * e1000_write_emi_reg_locked - Write Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be written to the EMI address
+ *
+ * Assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
+{
+ DEBUGFUNC("e1000_read_emi_reg_locked");
+
+ return __e1000_access_emi_reg_locked(hw, addr, &data, false);
+}
+
+/**
+ * e1000_set_eee_pchlan - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE based on setting in dev_spec structure, the duplex of
+ * the link and the EEE capabilities of the link partner. The LPI Control
+ * register bits will remain set only if/when link is up.
+ *
+ * EEE LPI must not be asserted earlier than one second after link is up.
+ * On 82579, EEE LPI should not be enabled until such time otherwise there
+ * can be link issues with some switches. Other devices can have EEE LPI
+ * enabled immediately upon link up since they have a timer in hardware which
+ * prevents LPI from being asserted too early.
+ **/
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ s32 ret_val;
+ u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
+
+ DEBUGFUNC("e1000_set_eee_pchlan");
+
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ lpa = I82579_EEE_LP_ABILITY;
+ pcs_status = I82579_EEE_PCS_STATUS;
+ adv_addr = I82579_EEE_ADVERTISEMENT;
+ break;
+ case e1000_phy_i217:
+ lpa = I217_EEE_LP_ABILITY;
+ pcs_status = I217_EEE_PCS_STATUS;
+ adv_addr = I217_EEE_ADVERTISEMENT;
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
+ if (ret_val)
+ goto release;
+
+ /* Clear bits that enable EEE in various speeds */
+ lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
+
+ /* Enable EEE if not disabled by user */
+ if (!dev_spec->eee_disable) {
+ /* Save off link partner's EEE ability */
+ ret_val = e1000_read_emi_reg_locked(hw, lpa,
+ &dev_spec->eee_lp_ability);
+ if (ret_val)
+ goto release;
+
+ /* Read EEE advertisement */
+ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
+ if (ret_val)
+ goto release;
+
+ /* Enable EEE only for speeds in which the link partner is
+ * EEE capable and for which we advertise EEE.
+ */
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
+ hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
+ if (data & NWAY_LPAR_100TX_FD_CAPS)
+ lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+ else
+ /* EEE is not supported in 100Half, so ignore
+ * partner's EEE in 100 ability if full-duplex
+ * is not advertised.
+ */
+ dev_spec->eee_lp_ability &=
+ ~I82579_EEE_100_SUPPORTED;
+ }
+ }
+
+ if (hw->phy.type == e1000_phy_82579) {
+ ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+ &data);
+ if (ret_val)
+ goto release;
+
+ data &= ~I82579_LPI_100_PLL_SHUT;
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+ data);
+ }
+
+ /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
+ if (ret_val)
+ goto release;
+
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
+ * @hw: pointer to the HW structure
+ * @link: link up bool flag
+ *
+ * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
+ * preventing further DMA write requests. Workaround the issue by disabling
+ * the de-assertion of the clock request when in 1Gpbs mode.
+ * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
+ * speeds in order to avoid Tx hangs.
+ **/
+STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
+{
+ u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
+ u32 status = E1000_READ_REG(hw, E1000_STATUS);
+ s32 ret_val = E1000_SUCCESS;
+ u16 reg;
+
+ if (link && (status & E1000_STATUS_SPEED_1000)) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ &reg);
+ if (ret_val)
+ goto release;
+
+ ret_val =
+ e1000_write_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ reg &
+ ~E1000_KMRNCTRLSTA_K1_ENABLE);
+ if (ret_val)
+ goto release;
+
+ usec_delay(10);
+
+ E1000_WRITE_REG(hw, E1000_FEXTNVM6,
+ fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
+
+ ret_val =
+ e1000_write_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ reg);
+release:
+ hw->phy.ops.release(hw);
+ } else {
+ /* clear FEXTNVM6 bit 8 on link down or 10/100 */
+ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
+
+ if ((hw->phy.revision > 5) || !link ||
+ ((status & E1000_STATUS_SPEED_100) &&
+ (status & E1000_STATUS_FD)))
+ goto update_fextnvm6;
+
+ ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear link status transmit timeout */
+ reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
+
+ if (status & E1000_STATUS_SPEED_100) {
+ /* Set inband Tx timeout to 5x10us for 100Half */
+ reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Do not extend the K1 entry latency for 100Half */
+ fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ } else {
+ /* Set inband Tx timeout to 50x10us for 10Full/Half */
+ reg |= 50 <<
+ I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Extend the K1 entry latency for 10 Mbps */
+ fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ }
+
+ ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
+ if (ret_val)
+ return ret_val;
+
+update_fextnvm6:
+ E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
+ }
+
+ return ret_val;
+}
+
+#ifdef ULP_SUPPORT
+/**
+ * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @to_sx: boolean indicating a system power state transition to Sx
+ *
+ * When link is down, configure ULP mode to significantly reduce the power
+ * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
+ * ME firmware to start the ULP configuration. If not on an ME enabled
+ * system, configure the ULP mode by software.
+ */
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
+{
+ u32 mac_reg;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_reg;
+ u16 oem_reg = 0;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
+ return 0;
+
+ if (!to_sx) {
+ int i = 0;
+ /* Poll up to 5 seconds for Cable Disconnected indication */
+ while (!(E1000_READ_REG(hw, E1000_FEXT) &
+ E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
+ /* Bail if link is re-acquired */
+ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
+ return -E1000_ERR_PHY;
+ if (i++ == 100)
+ break;
+
+ msec_delay(50);
+ }
+ DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
+ (E1000_READ_REG(hw, E1000_FEXT) &
+ E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
+ i * 50);
+ if (!(E1000_READ_REG(hw, E1000_FEXT) &
+ E1000_FEXT_PHY_CABLE_DISCONNECTED))
+ return 0;
+ }
+
+ if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ /* Request ME configure ULP mode in the PHY */
+ mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+ mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
+ E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* During S0 Idle keep the phy in PCI-E mode */
+ if (hw->dev_spec.ich8lan.smbus_disable)
+ goto skip_smbus;
+
+ /* Force SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Force SMBus mode in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+ /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
+ * LPLU and disable Gig speed when entering ULP
+ */
+ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ &oem_reg);
+ if (ret_val)
+ goto release;
+
+ phy_reg = oem_reg;
+ phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
+
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ phy_reg);
+
+ if (ret_val)
+ goto release;
+ }
+
+skip_smbus:
+ if (!to_sx) {
+ /* Change the 'Link Status Change' interrupt to trigger
+ * on 'Cable Status Change'
+ */
+ ret_val = e1000_read_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_OP_MODES,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
+ e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
+ phy_reg);
+ }
+
+ /* Set Inband ULP Exit, Reset to SMBus mode and
+ * Disable SMBus Release on PERST# in PHY
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ if (to_sx) {
+ if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
+ phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
+ else
+ phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
+
+ phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
+ phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
+ } else {
+ phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
+ phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
+ phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
+ }
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Set Disable SMBus Release on PERST# in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
+ mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
+
+ /* Commit ULP changes in PHY by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ if (!to_sx) {
+ /* Disable Tx so that the MAC doesn't send any (buffered)
+ * packets to the PHY.
+ */
+ mac_reg = E1000_READ_REG(hw, E1000_TCTL);
+ mac_reg &= ~E1000_TCTL_EN;
+ E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
+ }
+
+ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
+ to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ oem_reg);
+ if (ret_val)
+ goto release;
+ }
+
+release:
+ hw->phy.ops.release(hw);
+out:
+ if (ret_val)
+ DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
+ else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
+
+ return ret_val;
+}
+
+/**
+ * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @force: boolean indicating whether or not to force disabling ULP
+ *
+ * Un-configure ULP mode when link is up, the system is transitioned from
+ * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
+ * system, poll for an indication from ME that ULP has been un-configured.
+ * If not on an ME enabled system, un-configure the ULP mode by software.
+ *
+ * During nominal operation, this function is called when link is acquired
+ * to disable ULP mode (force=false); otherwise, for example when unloading
+ * the driver or during Sx->S0 transitions, this is called with force=true
+ * to forcibly disable ULP.
+
+ * When the cable is plugged in while the device is in D0, a Cable Status
+ * Change interrupt is generated which causes this function to be called
+ * to partially disable ULP mode and restart autonegotiation. This function
+ * is then called again due to the resulting Link Status Change interrupt
+ * to finish cleaning up after the ULP flow.
+ */
+s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 mac_reg;
+ u16 phy_reg;
+ int i = 0;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
+ return 0;
+
+ if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (force) {
+ /* Request ME un-configure ULP mode in the PHY */
+ mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
+ E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+ }
+
+ /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
+ while (E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_FWSM_ULP_CFG_DONE) {
+ if (i++ == 30) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ msec_delay(10);
+ }
+ DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+
+ if (force) {
+ mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+ mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
+ E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+ } else {
+ /* Clear H2ME.ULP after ME ULP configuration */
+ mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+
+ /* Restore link speed advertisements and restart
+ * Auto-negotiation
+ */
+ if (hw->mac.autoneg) {
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val)
+ goto out;
+ } else {
+ ret_val = e1000_setup_copper_link_generic(hw);
+ if (ret_val)
+ goto out;
+ }
+ ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ }
+
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* Revert the change to the 'Link Status Change'
+ * interrupt to trigger on 'Cable Status Change'
+ */
+ ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
+ e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
+
+ if (force)
+ /* Toggle LANPHYPC Value bit */
+ e1000_toggle_lanphypc_pch_lpt(hw);
+
+ /* Unforce SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val) {
+ /* The MAC might be in PCIe mode, so temporarily force to
+ * SMBus mode in order to access the PHY.
+ */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+ msec_delay(50);
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ }
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+ /* When ULP mode was previously entered, K1 was disabled by the
+ * hardware. Re-Enable K1 in the PHY when exiting ULP.
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= HV_PM_CTRL_K1_ENABLE;
+ e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
+
+ /* Clear ULP enabled configuration */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ /* CSC interrupt received due to ULP Indication */
+ if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
+ phy_reg &= ~(I218_ULP_CONFIG1_IND |
+ I218_ULP_CONFIG1_STICKY_ULP |
+ I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_WOL_HOST |
+ I218_ULP_CONFIG1_INBAND_EXIT |
+ I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
+ I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Commit ULP changes by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Clear Disable SMBus Release on PERST# in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
+ mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
+
+ if (!force) {
+ hw->phy.ops.release(hw);
+
+ if (hw->mac.autoneg)
+ e1000_phy_setup_autoneg(hw);
+ else
+ e1000_setup_copper_link_generic(hw);
+
+ e1000_sw_lcd_config_ich8lan(hw);
+
+ e1000_oem_bits_config_ich8lan(hw, true);
+
+ /* Set ULP state to unknown and return non-zero to
+ * indicate no link (yet) and re-enter on the next LSC
+ * to finish disabling ULP flow.
+ */
+ hw->dev_spec.ich8lan.ulp_state =
+ e1000_ulp_state_unknown;
+
+ return 1;
+ }
+ }
+
+ /* Re-enable Tx */
+ mac_reg = E1000_READ_REG(hw, E1000_TCTL);
+ mac_reg |= E1000_TCTL_EN;
+ E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
+
+release:
+ hw->phy.ops.release(hw);
+ if (force) {
+ hw->phy.ops.reset(hw);
+ msec_delay(50);
+ }
+out:
+ if (ret_val)
+ DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
+ else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
+
+ return ret_val;
+}
+
+#endif /* ULP_SUPPORT */
+
+
+/**
+ * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val, tipg_reg = 0;
+ u16 emi_addr, emi_val = 0;
+ bool link = false;
+ u16 phy_reg;
+
+ DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
+
+ /* We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status)
+ return E1000_SUCCESS;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Check the MAC's STATUS register to determine link state
+ * since the PHY could be inaccessible while in ULP mode.
+ */
+ link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
+ if (link)
+ ret_val = e1000_disable_ulp_lpt_lp(hw, false);
+ else
+ ret_val = e1000_enable_ulp_lpt_lp(hw, false);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->mac.type == e1000_pchlan) {
+ ret_val = e1000_k1_gig_workaround_hv(hw, link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* When connected at 10Mbps half-duplex, some parts are excessively
+ * aggressive resulting in many collisions. To avoid this, increase
+ * the IPG and reduce Rx latency in the PHY.
+ */
+ if ((hw->mac.type >= e1000_pch2lan) && link) {
+ u16 speed, duplex;
+
+ e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
+ tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
+ tipg_reg &= ~E1000_TIPG_IPGT_MASK;
+
+ if (duplex == HALF_DUPLEX && speed == SPEED_10) {
+ tipg_reg |= 0xFF;
+ /* Reduce Rx latency in analog PHY */
+ emi_val = 0;
+ } else if (hw->mac.type >= e1000_pch_spt &&
+ duplex == FULL_DUPLEX && speed != SPEED_1000) {
+ tipg_reg |= 0xC;
+ emi_val = 1;
+ } else {
+ /* Roll back the default values */
+ tipg_reg |= 0x08;
+ emi_val = 1;
+ }
+
+ E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->mac.type == e1000_pch2lan)
+ emi_addr = I82579_RX_CONFIG;
+ else
+ emi_addr = I217_RX_CONFIG;
+ ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
+
+
+ if (hw->mac.type >= e1000_pch_lpt) {
+ u16 phy_reg;
+
+ hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
+ &phy_reg);
+ phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
+ if (speed == SPEED_100 || speed == SPEED_10)
+ phy_reg |= 0x3E8;
+ else
+ phy_reg |= 0xFA;
+ hw->phy.ops.write_reg_locked(hw,
+ I217_PLL_CLOCK_GATE_REG,
+ phy_reg);
+
+ if (speed == SPEED_1000) {
+ hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
+ &phy_reg);
+
+ phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
+
+ hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
+ phy_reg);
+ }
+ }
+ hw->phy.ops.release(hw);
+
+ if (ret_val)
+ return ret_val;
+
+ if (hw->mac.type >= e1000_pch_spt) {
+ u16 data;
+ u16 ptr_gap;
+
+ if (speed == SPEED_1000) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg_locked(hw,
+ PHY_REG(776, 20),
+ &data);
+ if (ret_val) {
+ hw->phy.ops.release(hw);
+ return ret_val;
+ }
+
+ ptr_gap = (data & (0x3FF << 2)) >> 2;
+ if (ptr_gap < 0x18) {
+ data &= ~(0x3FF << 2);
+ data |= (0x18 << 2);
+ ret_val =
+ hw->phy.ops.write_reg_locked(hw,
+ PHY_REG(776, 20), data);
+ }
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg_locked(hw,
+ PHY_REG(776, 20),
+ 0xC023);
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return ret_val;
+
+ }
+ }
+ }
+
+ /* I217 Packet Loss issue:
+ * ensure that FEXTNVM4 Beacon Duration is set correctly
+ * on power up.
+ * Set the Beacon Duration for I217 to 8 usec
+ */
+ if (hw->mac.type >= e1000_pch_lpt) {
+ u32 mac_reg;
+
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
+ }
+
+ /* Work-around I218 hang issue */
+ if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
+ ret_val = e1000_k1_workaround_lpt_lp(hw, link);
+ if (ret_val)
+ return ret_val;
+ }
+ /* Clear link partner's EEE ability */
+ hw->dev_spec.ich8lan.eee_lp_ability = 0;
+
+ /* Configure K0s minimum time */
+ if (hw->mac.type >= e1000_pch_lpt) {
+ e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
+ }
+
+ if (hw->mac.type >= e1000_pch_lpt) {
+ u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
+
+ if (hw->mac.type == e1000_pch_spt) {
+ /* FEXTNVM6 K1-off workaround - for SPT only */
+ u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
+
+ if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
+ fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
+ else
+ fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+ }
+
+ if (hw->dev_spec.ich8lan.disable_k1_off == true)
+ fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+
+ E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
+ }
+
+ if (!link)
+ return E1000_SUCCESS; /* No link detected */
+
+ mac->get_link_status = false;
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
+ ret_val = e1000_k1_workaround_lv(hw);
+ if (ret_val)
+ return ret_val;
+ /* fall-thru */
+ case e1000_pchlan:
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = e1000_link_stall_workaround_hv(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Workaround for PCHx parts in half-duplex:
+ * Set the number of preambles removed from the packet
+ * when it is passed from the PHY to the MAC to prevent
+ * the MAC from misinterpreting the packet type.
+ */
+ hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
+ phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
+
+ if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
+ E1000_STATUS_FD)
+ phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+
+ hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
+ break;
+ default:
+ break;
+ }
+
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000_check_downshift_generic(hw);
+
+ /* Enable/Disable EEE after link up */
+ if (hw->phy.type > e1000_phy_82579) {
+ ret_val = e1000_set_eee_pchlan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg)
+ return -E1000_ERR_CONFIG;
+
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ mac->ops.config_collision_dist(hw);
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific function pointers for PHY, MAC, and NVM.
+ **/
+void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_ich8lan");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
+ break;
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
+ hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
+ * @hw: pointer to the HW structure
+ *
+ * Acquires the mutex for performing NVM operations.
+ **/
+STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_acquire_nvm_ich8lan");
+
+ E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_release_nvm_ich8lan - Release NVM mutex
+ * @hw: pointer to the HW structure
+ *
+ * Releases the mutex used while performing NVM operations.
+ **/
+STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_ich8lan");
+
+ E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
+
+ return;
+}
+
+/**
+ * e1000_acquire_swflag_ich8lan - Acquire software control flag
+ * @hw: pointer to the HW structure
+ *
+ * Acquires the software control flag for performing PHY and select
+ * MAC CSR accesses.
+ **/
+STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_acquire_swflag_ich8lan");
+
+ E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
+
+ while (timeout) {
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
+ break;
+
+ msec_delay_irq(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("SW has already locked the resource.\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ timeout = SW_FLAG_TIMEOUT;
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+
+ while (timeout) {
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+ break;
+
+ msec_delay_irq(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
+ E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+out:
+ if (ret_val)
+ E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_swflag_ich8lan - Release software control flag
+ * @hw: pointer to the HW structure
+ *
+ * Releases the software control flag for performing PHY and select
+ * MAC CSR accesses.
+ **/
+STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ DEBUGFUNC("e1000_release_swflag_ich8lan");
+
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+ } else {
+ DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
+ }
+
+ E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
+
+ return;
+}
+
+/**
+ * e1000_check_mng_mode_ich8lan - Checks management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has any manageability enabled.
+ * This is a function pointer entry point only called by read/write
+ * routines for the PHY and NVM parts.
+ **/
+STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ DEBUGFUNC("e1000_check_mng_mode_ich8lan");
+
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ * e1000_check_mng_mode_pchlan - Checks management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has iAMT enabled.
+ * This is a function pointer entry point only called by read/write
+ * routines for the PHY and NVM parts.
+ **/
+STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ DEBUGFUNC("e1000_check_mng_mode_pchlan");
+
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ * e1000_rar_set_pch2lan - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr. For 82579, RAR[0] is the base address register that is to
+ * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
+ * Use SHRA[0-3] in place of those reserved for ME.
+ **/
+STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ DEBUGFUNC("e1000_rar_set_pch2lan");
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] |
+ ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ if (index == 0) {
+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+ E1000_WRITE_FLUSH(hw);
+ return E1000_SUCCESS;
+ }
+
+ /* RAR[1-6] are owned by manageability. Skip those and program the
+ * next address into the SHRA register array.
+ */
+ if (index < (u32) (hw->mac.rar_entry_count)) {
+ s32 ret_val;
+
+ ret_val = e1000_acquire_swflag_ich8lan(hw);
+ if (ret_val)
+ goto out;
+
+ E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
+ E1000_WRITE_FLUSH(hw);
+
+ e1000_release_swflag_ich8lan(hw);
+
+ /* verify the register updates */
+ if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
+ (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
+ return E1000_SUCCESS;
+
+ DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
+ (index - 1), E1000_READ_REG(hw, E1000_FWSM));
+ }
+
+out:
+ DEBUGOUT1("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_rar_set_pch_lpt - Set receive address registers
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address register array at index to the address passed
+ * in by addr. For LPT, RAR[0] is the base address register that is to
+ * contain the MAC address. SHRA[0-10] are the shared receive address
+ * registers that are shared between the Host and manageability engine (ME).
+ **/
+STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+ u32 wlock_mac;
+
+ DEBUGFUNC("e1000_rar_set_pch_lpt");
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ if (index == 0) {
+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+ E1000_WRITE_FLUSH(hw);
+ return E1000_SUCCESS;
+ }
+
+ /* The manageability engine (ME) can lock certain SHRAR registers that
+ * it is using - those registers are unavailable for use.
+ */
+ if (index < hw->mac.rar_entry_count) {
+ wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_FWSM_WLOCK_MAC_MASK;
+ wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+ /* Check if all SHRAR registers are locked */
+ if (wlock_mac == 1)
+ goto out;
+
+ if ((wlock_mac == 0) || (index <= wlock_mac)) {
+ s32 ret_val;
+
+ ret_val = e1000_acquire_swflag_ich8lan(hw);
+
+ if (ret_val)
+ goto out;
+
+ E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
+ rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
+ rar_high);
+ E1000_WRITE_FLUSH(hw);
+
+ e1000_release_swflag_ich8lan(hw);
+
+ /* verify the register updates */
+ if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
+ (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
+ return E1000_SUCCESS;
+ }
+ }
+
+out:
+ DEBUGOUT1("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
+}
+
+#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
+/**
+ * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
+ u8 *mc_addr_list,
+ u32 mc_addr_count)
+{
+ u16 phy_reg = 0;
+ int i;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
+
+ e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+
+ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+ if (ret_val)
+ goto release;
+
+ for (i = 0; i < hw->mac.mta_reg_count; i++) {
+ hw->phy.ops.write_reg_page(hw, BM_MTA(i),
+ (u16)(hw->mac.mta_shadow[i] &
+ 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
+ (u16)((hw->mac.mta_shadow[i] >> 16) &
+ 0xFFFF));
+ }
+
+ e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+release:
+ hw->phy.ops.release(hw);
+}
+
+#endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
+/**
+ * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Checks if firmware is blocking the reset of the PHY.
+ * This is a function pointer entry point only called by
+ * reset routines.
+ **/
+STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+ bool blocked = false;
+ int i = 0;
+
+ DEBUGFUNC("e1000_check_reset_block_ich8lan");
+
+ do {
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
+ blocked = true;
+ msec_delay(10);
+ continue;
+ }
+ blocked = false;
+ } while (blocked && (i++ < 30));
+ return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
+ * @hw: pointer to the HW structure
+ *
+ * Assumes semaphore already acquired.
+ *
+ **/
+STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+{
+ u16 phy_data;
+ u32 strap = E1000_READ_REG(hw, E1000_STRAP);
+ u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
+ E1000_STRAP_SMT_FREQ_SHIFT;
+ s32 ret_val;
+
+ strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~HV_SMB_ADDR_MASK;
+ phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
+ phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+
+ if (hw->phy.type == e1000_phy_i217) {
+ /* Restore SMBus frequency */
+ if (freq--) {
+ phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
+ phy_data |= (freq & (1 << 0)) <<
+ HV_SMB_ADDR_FREQ_LOW_SHIFT;
+ phy_data |= (freq & (1 << 1)) <<
+ (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
+ } else {
+ DEBUGOUT("Unsupported SMB frequency in PHY\n");
+ }
+ }
+
+ return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
+}
+
+/**
+ * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
+ * @hw: pointer to the HW structure
+ *
+ * SW should configure the LCD from the NVM extended configuration region
+ * as a workaround for certain parts.
+ **/
+STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
+ s32 ret_val = E1000_SUCCESS;
+ u16 word_addr, reg_data, reg_addr, phy_page = 0;
+
+ DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
+
+ /* Initialize the PHY from the NVM on ICH platforms. This
+ * is needed due to an issue where the NVM configuration is
+ * not properly autoloaded after power transitions.
+ * Therefore, after each PHY reset, we will load the
+ * configuration data out of the NVM manually.
+ */
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ if (phy->type != e1000_phy_igp_3)
+ return ret_val;
+
+ if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
+ (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ break;
+ }
+ /* Fall-thru */
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+ break;
+ default:
+ return ret_val;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ data = E1000_READ_REG(hw, E1000_FEXTNVM);
+ if (!(data & sw_cfg_mask))
+ goto release;
+
+ /* Make sure HW does not configure LCD from PHY
+ * extended configuration before SW configuration
+ */
+ data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if ((hw->mac.type < e1000_pch2lan) &&
+ (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
+ goto release;
+
+ cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
+ cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+ cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+ if (!cnf_size)
+ goto release;
+
+ cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+ cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+
+ if (((hw->mac.type == e1000_pchlan) &&
+ !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
+ (hw->mac.type > e1000_pchlan)) {
+ /* HW configures the SMBus address and LEDs when the
+ * OEM and LCD Write Enable bits are set in the NVM.
+ * When both NVM bits are cleared, SW will configure
+ * them instead.
+ */
+ ret_val = e1000_write_smbus_addr(hw);
+ if (ret_val)
+ goto release;
+
+ data = E1000_READ_REG(hw, E1000_LEDCTL);
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
+ (u16)data);
+ if (ret_val)
+ goto release;
+ }
+
+ /* Configure LCD from extended configuration region. */
+
+ /* cnf_base_addr is in DWORD */
+ word_addr = (u16)(cnf_base_addr << 1);
+
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
+ &reg_data);
+ if (ret_val)
+ goto release;
+
+ ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
+ 1, &reg_addr);
+ if (ret_val)
+ goto release;
+
+ /* Save off the PHY page for future writes. */
+ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+ phy_page = reg_data;
+ continue;
+ }
+
+ reg_addr &= PHY_REG_MASK;
+ reg_addr |= phy_page;
+
+ ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
+ reg_data);
+ if (ret_val)
+ goto release;
+ }
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_k1_gig_workaround_hv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ * @link: link up bool flag
+ *
+ * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
+ * from a lower speed. This workaround disables K1 whenever link is at 1Gig
+ * If link is down, the function will restore the default K1 setting located
+ * in the NVM.
+ **/
+STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 status_reg = 0;
+ bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
+
+ DEBUGFUNC("e1000_k1_gig_workaround_hv");
+
+ if (hw->mac.type != e1000_pchlan)
+ return E1000_SUCCESS;
+
+ /* Wrap the whole flow with the sw flag */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
+ if (link) {
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
+ &status_reg);
+ if (ret_val)
+ goto release;
+
+ status_reg &= (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
+
+ if (status_reg == (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
+ k1_enable = false;
+ }
+
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
+ &status_reg);
+ if (ret_val)
+ goto release;
+
+ status_reg &= (HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_MASK);
+
+ if (status_reg == (HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_1000))
+ k1_enable = false;
+ }
+
+ /* Link stall fix for link up */
+ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
+ 0x0100);
+ if (ret_val)
+ goto release;
+
+ } else {
+ /* Link stall fix for link down */
+ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
+ 0x4100);
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
+
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_configure_k1_ich8lan - Configure K1 power state
+ * @hw: pointer to the HW structure
+ * @enable: K1 state to configure
+ *
+ * Configure the K1 power state based on the provided parameter.
+ * Assumes semaphore already acquired.
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ **/
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
+{
+ s32 ret_val;
+ u32 ctrl_reg = 0;
+ u32 ctrl_ext = 0;
+ u32 reg = 0;
+ u16 kmrn_reg = 0;
+
+ DEBUGFUNC("e1000_configure_k1_ich8lan");
+
+ ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ &kmrn_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (k1_enable)
+ kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
+ else
+ kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
+
+ ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ kmrn_reg);
+ if (ret_val)
+ return ret_val;
+
+ usec_delay(20);
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+
+ reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ reg |= E1000_CTRL_FRCSPD;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(20);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(20);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
+ * @hw: pointer to the HW structure
+ * @d0_state: boolean if entering d0 or d3 device state
+ *
+ * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
+ * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
+ * in NVM determines whether HW should configure LPLU and Gbe Disable.
+ **/
+STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
+{
+ s32 ret_val = 0;
+ u32 mac_reg;
+ u16 oem_reg;
+
+ DEBUGFUNC("e1000_oem_bits_config_ich8lan");
+
+ if (hw->mac.type < e1000_pchlan)
+ return ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->mac.type == e1000_pchlan) {
+ mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
+ goto release;
+ }
+
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
+ if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
+ goto release;
+
+ mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
+
+ ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
+ if (ret_val)
+ goto release;
+
+ oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
+
+ if (d0_state) {
+ if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
+ oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+ if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
+ oem_reg |= HV_OEM_BITS_LPLU;
+ } else {
+ if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
+ oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+ if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
+ E1000_PHY_CTRL_NOND0A_LPLU))
+ oem_reg |= HV_OEM_BITS_LPLU;
+ }
+
+ /* Set Restart auto-neg to activate the bits */
+ if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
+ !hw->phy.ops.check_reset_block(hw))
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
+
+ ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
+
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+
+/**
+ * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
+
+ ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= HV_KMRN_MDIO_SLOW;
+
+ ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
+
+ if (hw->mac.type != e1000_pchlan)
+ return E1000_SUCCESS;
+
+ /* Set MDIO slow mode before any other MDIO access */
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (((hw->phy.type == e1000_phy_82577) &&
+ ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
+ ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
+ /* Disable generation of early preamble */
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
+ if (ret_val)
+ return ret_val;
+
+ /* Preamble tuning for SSC */
+ ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
+ 0xA204);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->phy.type == e1000_phy_82578) {
+ /* Return registers to default by doing a soft reset then
+ * writing 0x3140 to the control register.
+ */
+ if (hw->phy.revision < 2) {
+ e1000_phy_sw_reset_generic(hw);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
+ 0x3140);
+ }
+ }
+
+ /* Select page 0 */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy.addr = 1;
+ ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure the K1 Si workaround during phy reset assuming there is
+ * link so that it disables K1 if link is in 1Gbps.
+ */
+ ret_val = e1000_k1_gig_workaround_hv(hw, true);
+ if (ret_val)
+ return ret_val;
+
+ /* Workaround for link disconnects on a busy hub in half duplex */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
+ phy_data & 0x00FF);
+ if (ret_val)
+ goto release;
+
+ /* set MSE higher to enable link to stay up when noise is high */
+ ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
+ * @hw: pointer to the HW structure
+ **/
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+ u16 i, phy_reg = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+ if (ret_val)
+ goto release;
+
+ /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
+ for (i = 0; i < (hw->mac.rar_entry_count); i++) {
+ mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
+ (u16)((mac_reg >> 16) & 0xFFFF));
+
+ mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
+ (u16)((mac_reg & E1000_RAH_AV)
+ >> 16));
+ }
+
+ e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+release:
+ hw->phy.ops.release(hw);
+}
+
+#ifndef CRC32_OS_SUPPORT
+STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
+{
+ u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
+ u32 i, j, mask, crc;
+
+ DEBUGFUNC("e1000_calc_rx_da_crc");
+
+ crc = 0xffffffff;
+ for (i = 0; i < 6; i++) {
+ crc = crc ^ mac[i];
+ for (j = 8; j > 0; j--) {
+ mask = (crc & 1) * (-1);
+ crc = (crc >> 1) ^ (poly & mask);
+ }
+ }
+ return ~crc;
+}
+
+#endif /* CRC32_OS_SUPPORT */
+/**
+ * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
+ * with 82579 PHY
+ * @hw: pointer to the HW structure
+ * @enable: flag to enable/disable workaround when enabling/disabling jumbos
+ **/
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_reg, data;
+ u32 mac_reg;
+ u16 i;
+
+ DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
+
+ if (hw->mac.type < e1000_pch2lan)
+ return E1000_SUCCESS;
+
+ /* disable Rx path while enabling/disabling workaround */
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
+ phy_reg | (1 << 14));
+ if (ret_val)
+ return ret_val;
+
+ if (enable) {
+ /* Write Rx addresses (rar_entry_count for RAL/H, and
+ * SHRAL/H) and initial CRC values to the MAC
+ */
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ u8 mac_addr[ETH_ADDR_LEN] = {0};
+ u32 addr_high, addr_low;
+
+ addr_high = E1000_READ_REG(hw, E1000_RAH(i));
+ if (!(addr_high & E1000_RAH_AV))
+ continue;
+ addr_low = E1000_READ_REG(hw, E1000_RAL(i));
+ mac_addr[0] = (addr_low & 0xFF);
+ mac_addr[1] = ((addr_low >> 8) & 0xFF);
+ mac_addr[2] = ((addr_low >> 16) & 0xFF);
+ mac_addr[3] = ((addr_low >> 24) & 0xFF);
+ mac_addr[4] = (addr_high & 0xFF);
+ mac_addr[5] = ((addr_high >> 8) & 0xFF);
+
+#ifndef CRC32_OS_SUPPORT
+ E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
+ e1000_calc_rx_da_crc(mac_addr));
+#else /* CRC32_OS_SUPPORT */
+ E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
+ E1000_CRC32(ETH_ADDR_LEN, mac_addr));
+#endif /* CRC32_OS_SUPPORT */
+ }
+
+ /* Write Rx addresses to the PHY */
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+ /* Enable jumbo frame workaround in the MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
+ mac_reg &= ~(1 << 14);
+ mac_reg |= (7 << 15);
+ E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
+
+ mac_reg = E1000_READ_REG(hw, E1000_RCTL);
+ mac_reg |= E1000_RCTL_SECRC;
+ E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
+
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data | (1 << 0));
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ return ret_val;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* Enable jumbo frame workaround in the PHY */
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ data |= (0x37 << 5);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
+ data &= ~(1 << 13);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (E1000_TX_PTR_GAP << 2);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
+ ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
+ (1 << 10));
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Write MAC register values back to h/w defaults */
+ mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
+ mac_reg &= ~(0xF << 14);
+ E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
+
+ mac_reg = E1000_READ_REG(hw, E1000_RCTL);
+ mac_reg &= ~E1000_RCTL_SECRC;
+ E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
+
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data & ~(1 << 0));
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ return ret_val;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* Write PHY register values back to h/w defaults */
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
+ data |= (1 << 13);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (0x8 << 2);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
+ ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
+ ~(1 << 10));
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* re-enable Rx path after enabling/disabling workaround */
+ return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
+ ~(1 << 14));
+}
+
+/**
+ * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
+
+ if (hw->mac.type != e1000_pch2lan)
+ return E1000_SUCCESS;
+
+ /* Set MDIO slow mode before any other MDIO access */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ /* set MSE higher to enable link to stay up when noise is high */
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
+ if (ret_val)
+ goto release;
+ /* drop link after 5 times MSE threshold was reached */
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_k1_gig_workaround_lv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
+ * Disable K1 for 1000 and 100 speeds
+ **/
+STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 status_reg = 0;
+
+ DEBUGFUNC("e1000_k1_workaround_lv");
+
+ if (hw->mac.type != e1000_pch2lan)
+ return E1000_SUCCESS;
+
+ /* Set K1 beacon duration based on 10Mbs speed */
+ ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
+ == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
+ if (status_reg &
+ (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
+ u16 pm_phy_reg;
+
+ /* LV 1G/100 Packet drop issue wa */
+ ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
+ &pm_phy_reg);
+ if (ret_val)
+ return ret_val;
+ pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
+ ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
+ pm_phy_reg);
+ if (ret_val)
+ return ret_val;
+ } else {
+ u32 mac_reg;
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
+ * @hw: pointer to the HW structure
+ * @gate: boolean set to true to gate, false to ungate
+ *
+ * Gate/ungate the automatic PHY configuration via hardware; perform
+ * the configuration via software instead.
+ **/
+STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
+{
+ u32 extcnf_ctrl;
+
+ DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
+
+ if (hw->mac.type < e1000_pch2lan)
+ return;
+
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+
+ if (gate)
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ else
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+}
+
+/**
+ * e1000_lan_init_done_ich8lan - Check for PHY config completion
+ * @hw: pointer to the HW structure
+ *
+ * Check the appropriate indication the MAC has finished configuring the
+ * PHY after a software reset.
+ **/
+STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
+{
+ u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
+
+ DEBUGFUNC("e1000_lan_init_done_ich8lan");
+
+ /* Wait for basic configuration completes before proceeding */
+ do {
+ data = E1000_READ_REG(hw, E1000_STATUS);
+ data &= E1000_STATUS_LAN_INIT_DONE;
+ usec_delay(100);
+ } while ((!data) && --loop);
+
+ /* If basic configuration is incomplete before the above loop
+ * count reaches 0, loading the configuration from NVM will
+ * leave the PHY in a bad state possibly resulting in no link.
+ */
+ if (loop == 0)
+ DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
+
+ /* Clear the Init Done bit for the next init event */
+ data = E1000_READ_REG(hw, E1000_STATUS);
+ data &= ~E1000_STATUS_LAN_INIT_DONE;
+ E1000_WRITE_REG(hw, E1000_STATUS, data);
+}
+
+/**
+ * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 reg;
+
+ DEBUGFUNC("e1000_post_phy_reset_ich8lan");
+
+ if (hw->phy.ops.check_reset_block(hw))
+ return E1000_SUCCESS;
+
+ /* Allow time for h/w to get to quiescent state after reset */
+ msec_delay(10);
+
+ /* Perform any necessary post-reset workarounds */
+ switch (hw->mac.type) {
+ case e1000_pchlan:
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_pch2lan:
+ ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the host wakeup bit after lcd reset */
+ if (hw->mac.type >= e1000_pchlan) {
+ hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
+ reg &= ~BM_WUC_HOST_WU_BIT;
+ hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
+ }
+
+ /* Configure the LCD with the extended configuration region in NVM */
+ ret_val = e1000_sw_lcd_config_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure the LCD with the OEM bits in NVM */
+ ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+
+ if (hw->mac.type == e1000_pch2lan) {
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if (!(E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_ICH_FWSM_FW_VALID)) {
+ msec_delay(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
+ /* Set EEE LPI Update Timer to 200usec */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_emi_reg_locked(hw,
+ I82579_LPI_UPDATE_TIMER,
+ 0x1387);
+ hw->phy.ops.release(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY
+ * This is a function pointer entry point called by drivers
+ * or other shared routines.
+ **/
+STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
+
+ /* Gate automatic PHY configuration by hardware on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+ ret_val = e1000_phy_hw_reset_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_post_phy_reset_ich8lan(hw);
+}
+
+/**
+ * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU state according to the active flag. For PCH, if OEM write
+ * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
+ * the phy speed. This function will manually set the LPLU bit and restart
+ * auto-neg as hw would do. D3 and D0 LPLU will call the same function
+ * since it configures the same bit.
+ **/
+STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
+{
+ s32 ret_val;
+ u16 oem_reg;
+
+ DEBUGFUNC("e1000_set_lplu_state_pchlan");
+ ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (active)
+ oem_reg |= HV_OEM_BITS_LPLU;
+ else
+ oem_reg &= ~HV_OEM_BITS_LPLU;
+
+ if (!hw->phy.ops.check_reset_block(hw))
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
+
+ return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
+}
+
+/**
+ * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 phy_ctrl;
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
+
+ if (phy->type == e1000_phy_ife)
+ return E1000_SUCCESS;
+
+ phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+
+ if (active) {
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* Call gig speed drop workaround on LPLU before accessing
+ * any PHY registers
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ e1000_gig_downshift_workaround_ich8lan(hw);
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D3 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 phy_ctrl;
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
+
+ phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+
+ if (!active) {
+ phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* Call gig speed drop workaround on LPLU before accessing
+ * any PHY registers
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ e1000_gig_downshift_workaround_ich8lan(hw);
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
+ * @hw: pointer to the HW structure
+ * @bank: pointer to the variable that returns the active bank
+ *
+ * Reads signature byte from the NVM using the flash access registers.
+ * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
+ **/
+STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
+{
+ u32 eecd;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
+ u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
+ u32 nvm_dword = 0;
+ u8 sig_byte = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
+
+ switch (hw->mac.type) {
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
+ bank1_offset = nvm->flash_bank_size;
+ act_offset = E1000_ICH_NVM_SIG_WORD;
+
+ /* set bank to 0 in case flash read fails */
+ *bank = 0;
+
+ /* Check bank 0 */
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
+ &nvm_dword);
+ if (ret_val)
+ return ret_val;
+ sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 0;
+ return E1000_SUCCESS;
+ }
+
+ /* Check bank 1 */
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
+ bank1_offset,
+ &nvm_dword);
+ if (ret_val)
+ return ret_val;
+ sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 1;
+ return E1000_SUCCESS;
+ }
+
+ DEBUGOUT("ERROR: No valid NVM bank present\n");
+ return -E1000_ERR_NVM;
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
+ E1000_EECD_SEC1VAL_VALID_MASK) {
+ if (eecd & E1000_EECD_SEC1VAL)
+ *bank = 1;
+ else
+ *bank = 0;
+
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
+ /* fall-thru */
+ default:
+ /* set bank to 0 in case flash read fails */
+ *bank = 0;
+
+ /* Check bank 0 */
+ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
+ &sig_byte);
+ if (ret_val)
+ return ret_val;
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 0;
+ return E1000_SUCCESS;
+ }
+
+ /* Check bank 1 */
+ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
+ bank1_offset,
+ &sig_byte);
+ if (ret_val)
+ return ret_val;
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 1;
+ return E1000_SUCCESS;
+ }
+
+ DEBUGOUT("ERROR: No valid NVM bank present\n");
+ return -E1000_ERR_NVM;
+ }
+}
+
+/**
+ * e1000_read_nvm_spt - NVM access for SPT
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to read.
+ * @words: Size of data to read in words.
+ * @data: pointer to the word(s) to read at offset.
+ *
+ * Reads a word(s) from the NVM
+ **/
+STATIC s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 act_offset;
+ s32 ret_val = E1000_SUCCESS;
+ u32 bank = 0;
+ u32 dword = 0;
+ u16 offset_to_read;
+ u16 i;
+
+ DEBUGFUNC("e1000_read_nvm_spt");
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ nvm->ops.acquire(hw);
+
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ act_offset = (bank) ? nvm->flash_bank_size : 0;
+ act_offset += offset;
+
+ ret_val = E1000_SUCCESS;
+
+ for (i = 0; i < words; i += 2) {
+ if (words - i == 1) {
+ if (dev_spec->shadow_ram[offset+i].modified) {
+ data[i] = dev_spec->shadow_ram[offset+i].value;
+ } else {
+ offset_to_read = act_offset + i -
+ ((act_offset + i) % 2);
+ ret_val =
+ e1000_read_flash_dword_ich8lan(hw,
+ offset_to_read,
+ &dword);
+ if (ret_val)
+ break;
+ if ((act_offset + i) % 2 == 0)
+ data[i] = (u16)(dword & 0xFFFF);
+ else
+ data[i] = (u16)((dword >> 16) & 0xFFFF);
+ }
+ } else {
+ offset_to_read = act_offset + i;
+ if (!(dev_spec->shadow_ram[offset+i].modified) ||
+ !(dev_spec->shadow_ram[offset+i+1].modified)) {
+ ret_val =
+ e1000_read_flash_dword_ich8lan(hw,
+ offset_to_read,
+ &dword);
+ if (ret_val)
+ break;
+ }
+ if (dev_spec->shadow_ram[offset+i].modified)
+ data[i] = dev_spec->shadow_ram[offset+i].value;
+ else
+ data[i] = (u16) (dword & 0xFFFF);
+ if (dev_spec->shadow_ram[offset+i].modified)
+ data[i+1] =
+ dev_spec->shadow_ram[offset+i+1].value;
+ else
+ data[i+1] = (u16) (dword >> 16 & 0xFFFF);
+ }
+ }
+
+ nvm->ops.release(hw);
+
+out:
+ if (ret_val)
+ DEBUGOUT1("NVM read error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_nvm_ich8lan - Read word(s) from the NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to read.
+ * @words: Size of data to read in words
+ * @data: Pointer to the word(s) to read at offset.
+ *
+ * Reads a word(s) from the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 act_offset;
+ s32 ret_val = E1000_SUCCESS;
+ u32 bank = 0;
+ u16 i, word;
+
+ DEBUGFUNC("e1000_read_nvm_ich8lan");
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ nvm->ops.acquire(hw);
+
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ act_offset = (bank) ? nvm->flash_bank_size : 0;
+ act_offset += offset;
+
+ ret_val = E1000_SUCCESS;
+ for (i = 0; i < words; i++) {
+ if (dev_spec->shadow_ram[offset+i].modified) {
+ data[i] = dev_spec->shadow_ram[offset+i].value;
+ } else {
+ ret_val = e1000_read_flash_word_ich8lan(hw,
+ act_offset + i,
+ &word);
+ if (ret_val)
+ break;
+ data[i] = word;
+ }
+ }
+
+ nvm->ops.release(hw);
+
+out:
+ if (ret_val)
+ DEBUGOUT1("NVM read error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_flash_cycle_init_ich8lan - Initialize flash
+ * @hw: pointer to the HW structure
+ *
+ * This function does initial flash setup so that a new read/write/erase cycle
+ * can be started.
+ **/
+STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
+{
+ union ich8_hws_flash_status hsfsts;
+ s32 ret_val = -E1000_ERR_NVM;
+
+ DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
+
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+ /* Check if the flash descriptor is valid */
+ if (!hsfsts.hsf_status.fldesvalid) {
+ DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
+ return -E1000_ERR_NVM;
+ }
+
+ /* Clear FCERR and DAEL in hw status by writing 1 */
+ hsfsts.hsf_status.flcerr = 1;
+ hsfsts.hsf_status.dael = 1;
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval & 0xFFFF);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+
+ /* Either we should have a hardware SPI cycle in progress
+ * bit to check against, in order to start a new cycle or
+ * FDONE bit should be changed in the hardware so that it
+ * is 1 after hardware reset, which can then be used as an
+ * indication whether a cycle is in progress or has been
+ * completed.
+ */
+
+ if (!hsfsts.hsf_status.flcinprog) {
+ /* There is no cycle running at present,
+ * so we can start a cycle.
+ * Begin by setting Flash Cycle Done.
+ */
+ hsfsts.hsf_status.flcdone = 1;
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval & 0xFFFF);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval);
+ ret_val = E1000_SUCCESS;
+ } else {
+ s32 i;
+
+ /* Otherwise poll for sometime so the current
+ * cycle has a chance to end before giving up.
+ */
+ for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFSTS);
+ if (!hsfsts.hsf_status.flcinprog) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(1);
+ }
+ if (ret_val == E1000_SUCCESS) {
+ /* Successful in waiting for previous cycle to timeout,
+ * now set the Flash Cycle Done.
+ */
+ hsfsts.hsf_status.flcdone = 1;
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval & 0xFFFF);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval);
+ } else {
+ DEBUGOUT("Flash controller busy, cannot get access\n");
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
+ * @hw: pointer to the HW structure
+ * @timeout: maximum time to wait for completion
+ *
+ * This function starts a flash cycle and waits for its completion.
+ **/
+STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
+{
+ union ich8_hws_flash_ctrl hsflctl;
+ union ich8_hws_flash_status hsfsts;
+ u32 i = 0;
+
+ DEBUGFUNC("e1000_flash_cycle_ich8lan");
+
+ /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+ if (hw->mac.type >= e1000_pch_spt)
+ hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
+ else
+ hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcgo = 1;
+
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* wait till FDONE bit is set to 1 */
+ do {
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcdone)
+ break;
+ usec_delay(1);
+ } while (i++ < timeout);
+
+ if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
+ return E1000_SUCCESS;
+
+ return -E1000_ERR_NVM;
+}
+
+/**
+ * e1000_read_flash_dword_ich8lan - Read dword from flash
+ * @hw: pointer to the HW structure
+ * @offset: offset to data location
+ * @data: pointer to the location for storing the data
+ *
+ * Reads the flash dword at offset into data. Offset is converted
+ * to bytes before read.
+ **/
+STATIC s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data)
+{
+ DEBUGFUNC("e1000_read_flash_dword_ich8lan");
+
+ if (!data)
+ return -E1000_ERR_NVM;
+
+ /* Must convert word offset into bytes. */
+ offset <<= 1;
+
+ return e1000_read_flash_data32_ich8lan(hw, offset, data);
+}
+
+/**
+ * e1000_read_flash_word_ich8lan - Read word from flash
+ * @hw: pointer to the HW structure
+ * @offset: offset to data location
+ * @data: pointer to the location for storing the data
+ *
+ * Reads the flash word at offset into data. Offset is converted
+ * to bytes before read.
+ **/
+STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ DEBUGFUNC("e1000_read_flash_word_ich8lan");
+
+ if (!data)
+ return -E1000_ERR_NVM;
+
+ /* Must convert offset into bytes. */
+ offset <<= 1;
+
+ return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
+}
+
+/**
+ * e1000_read_flash_byte_ich8lan - Read byte from flash
+ * @hw: pointer to the HW structure
+ * @offset: The offset of the byte to read.
+ * @data: Pointer to a byte to store the value read.
+ *
+ * Reads a single byte from the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 *data)
+{
+ s32 ret_val;
+ u16 word = 0;
+
+ /* In SPT, only 32 bits access is supported,
+ * so this function should not be called.
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ return -E1000_ERR_NVM;
+ else
+ ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+
+ if (ret_val)
+ return ret_val;
+
+ *data = (u8)word;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_flash_data_ich8lan - Read byte or word from NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the byte or word to read.
+ * @size: Size of data to read, 1=byte 2=word
+ * @data: Pointer to the word to store the value read.
+ *
+ * Reads a byte or word from the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 size, u16 *data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ u32 flash_data = 0;
+ s32 ret_val = -E1000_ERR_NVM;
+ u8 count = 0;
+
+ DEBUGFUNC("e1000_read_flash_data_ich8lan");
+
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+
+ do {
+ usec_delay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val != E1000_SUCCESS)
+ break;
+ hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+ ret_val = e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it
+ * and try the whole sequence a few more times, else
+ * read in (shift in) the Flash Data0, the order is
+ * least significant byte first msb to lsb
+ */
+ if (ret_val == E1000_SUCCESS) {
+ flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
+ if (size == 1)
+ *data = (u8)(flash_data & 0x000000FF);
+ else if (size == 2)
+ *data = (u16)(flash_data & 0x0000FFFF);
+ break;
+ } else {
+ /* If we've gotten here, then things are probably
+ * completely hosed, but if the error condition is
+ * detected, it won't hurt to give it another try...
+ * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (!hsfsts.hsf_status.flcdone) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_flash_data32_ich8lan - Read dword from NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the dword to read.
+ * @data: Pointer to the dword to store the value read.
+ *
+ * Reads a byte or word from the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ s32 ret_val = -E1000_ERR_NVM;
+ u8 count = 0;
+
+ DEBUGFUNC("e1000_read_flash_data_ich8lan");
+
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
+ hw->mac.type < e1000_pch_spt)
+ return -E1000_ERR_NVM;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+
+ do {
+ usec_delay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val != E1000_SUCCESS)
+ break;
+ /* In SPT, This register is in Lan memory space, not flash.
+ * Therefore, only 32 bit access is supported
+ */
+ hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
+
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ /* In SPT, This register is in Lan memory space, not flash.
+ * Therefore, only 32 bit access is supported
+ */
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ (u32)hsflctl.regval << 16);
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+ ret_val = e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it
+ * and try the whole sequence a few more times, else
+ * read in (shift in) the Flash Data0, the order is
+ * least significant byte first msb to lsb
+ */
+ if (ret_val == E1000_SUCCESS) {
+ *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
+ break;
+ } else {
+ /* If we've gotten here, then things are probably
+ * completely hosed, but if the error condition is
+ * detected, it won't hurt to give it another try...
+ * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (!hsfsts.hsf_status.flcdone) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_ich8lan - Write word(s) to the NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to write.
+ * @words: Size of data to write in words
+ * @data: Pointer to the word(s) to write at offset.
+ *
+ * Writes a byte or word to the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u16 i;
+
+ DEBUGFUNC("e1000_write_nvm_ich8lan");
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ nvm->ops.acquire(hw);
+
+ for (i = 0; i < words; i++) {
+ dev_spec->shadow_ram[offset+i].modified = true;
+ dev_spec->shadow_ram[offset+i].value = data[i];
+ }
+
+ nvm->ops.release(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_update_nvm_checksum_spt - Update the checksum for NVM
+ * @hw: pointer to the HW structure
+ *
+ * The NVM checksum is updated by calling the generic update_nvm_checksum,
+ * which writes the checksum to the shadow ram. The changes in the shadow
+ * ram are then committed to the EEPROM by processing each bank at a time
+ * checking for the modified bit and writing only the pending changes.
+ * After a successful commit, the shadow ram is cleared and is ready for
+ * future writes.
+ **/
+STATIC s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+ s32 ret_val;
+ u32 dword = 0;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_spt");
+
+ ret_val = e1000_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ goto out;
+
+ if (nvm->type != e1000_nvm_flash_sw)
+ goto out;
+
+ nvm->ops.acquire(hw);
+
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written
+ */
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ if (bank == 0) {
+ new_bank_offset = nvm->flash_bank_size;
+ old_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+ if (ret_val)
+ goto release;
+ } else {
+ old_bank_offset = nvm->flash_bank_size;
+ new_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+ if (ret_val)
+ goto release;
+ }
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
+ /* Determine whether to write the value stored
+ * in the other NVM bank or a modified value stored
+ * in the shadow RAM
+ */
+ ret_val = e1000_read_flash_dword_ich8lan(hw,
+ i + old_bank_offset,
+ &dword);
+
+ if (dev_spec->shadow_ram[i].modified) {
+ dword &= 0xffff0000;
+ dword |= (dev_spec->shadow_ram[i].value & 0xffff);
+ }
+ if (dev_spec->shadow_ram[i + 1].modified) {
+ dword &= 0x0000ffff;
+ dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
+ << 16);
+ }
+ if (ret_val)
+ break;
+
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress
+ */
+ if (i == E1000_ICH_NVM_SIG_WORD - 1)
+ dword |= E1000_ICH_NVM_SIG_MASK << 16;
+
+ /* Convert offset to bytes. */
+ act_offset = (i + new_bank_offset) << 1;
+
+ usec_delay(100);
+
+ /* Write the data to the new bank. Offset in words*/
+ act_offset = i + new_bank_offset;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
+ dword);
+ if (ret_val)
+ break;
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed.
+ */
+ if (ret_val) {
+ DEBUGOUT("Flash commit failed.\n");
+ goto release;
+ }
+
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b
+ */
+ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+
+ /*offset in words but we read dword*/
+ --act_offset;
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+ if (ret_val)
+ goto release;
+
+ dword &= 0xBFFFFFFF;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+ if (ret_val)
+ goto release;
+
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase
+ */
+ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+
+ /* offset in words but we read dword*/
+ act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+ if (ret_val)
+ goto release;
+
+ dword &= 0x00FFFFFF;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+ if (ret_val)
+ goto release;
+
+ /* Great! Everything worked, we can now clear the cached entries. */
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after the next adapter reset.
+ */
+ if (!ret_val) {
+ nvm->ops.reload(hw);
+ msec_delay(10);
+ }
+
+out:
+ if (ret_val)
+ DEBUGOUT1("NVM update error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ * @hw: pointer to the HW structure
+ *
+ * The NVM checksum is updated by calling the generic update_nvm_checksum,
+ * which writes the checksum to the shadow ram. The changes in the shadow
+ * ram are then committed to the EEPROM by processing each bank at a time
+ * checking for the modified bit and writing only the pending changes.
+ * After a successful commit, the shadow ram is cleared and is ready for
+ * future writes.
+ **/
+STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+ s32 ret_val;
+ u16 data = 0;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
+
+ ret_val = e1000_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ goto out;
+
+ if (nvm->type != e1000_nvm_flash_sw)
+ goto out;
+
+ nvm->ops.acquire(hw);
+
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written
+ */
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ if (bank == 0) {
+ new_bank_offset = nvm->flash_bank_size;
+ old_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+ if (ret_val)
+ goto release;
+ } else {
+ old_bank_offset = nvm->flash_bank_size;
+ new_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+ if (ret_val)
+ goto release;
+ }
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ if (dev_spec->shadow_ram[i].modified) {
+ data = dev_spec->shadow_ram[i].value;
+ } else {
+ ret_val = e1000_read_flash_word_ich8lan(hw, i +
+ old_bank_offset,
+ &data);
+ if (ret_val)
+ break;
+ }
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress
+ */
+ if (i == E1000_ICH_NVM_SIG_WORD)
+ data |= E1000_ICH_NVM_SIG_MASK;
+
+ /* Convert offset to bytes. */
+ act_offset = (i + new_bank_offset) << 1;
+
+ usec_delay(100);
+
+ /* Write the bytes to the new bank. */
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+ act_offset,
+ (u8)data);
+ if (ret_val)
+ break;
+
+ usec_delay(100);
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+ act_offset + 1,
+ (u8)(data >> 8));
+ if (ret_val)
+ break;
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed.
+ */
+ if (ret_val) {
+ DEBUGOUT("Flash commit failed.\n");
+ goto release;
+ }
+
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b
+ */
+ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+ ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
+ if (ret_val)
+ goto release;
+
+ data &= 0xBFFF;
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
+ (u8)(data >> 8));
+ if (ret_val)
+ goto release;
+
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase
+ */
+ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
+
+ if (ret_val)
+ goto release;
+
+ /* Great! Everything worked, we can now clear the cached entries. */
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after the next adapter reset.
+ */
+ if (!ret_val) {
+ nvm->ops.reload(hw);
+ msec_delay(10);
+ }
+
+out:
+ if (ret_val)
+ DEBUGOUT1("NVM update error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
+ * If the bit is 0, that the EEPROM had been modified, but the checksum was not
+ * calculated, in which case we need to calculate the checksum and set bit 6.
+ **/
+STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+ u16 word;
+ u16 valid_csum_mask;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
+
+ /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
+ * the checksum needs to be fixed. This bit is an indication that
+ * the NVM was prepared by OEM software and did not calculate
+ * the checksum...a likely scenario.
+ */
+ switch (hw->mac.type) {
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
+ word = NVM_COMPAT;
+ valid_csum_mask = NVM_COMPAT_VALID_CSUM;
+ break;
+ default:
+ word = NVM_FUTURE_INIT_WORD1;
+ valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
+ break;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, word, 1, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(data & valid_csum_mask)) {
+ data |= valid_csum_mask;
+ ret_val = hw->nvm.ops.write(hw, word, 1, &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->nvm.ops.update(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return e1000_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the byte/word to read.
+ * @size: Size of data to read, 1=byte 2=word
+ * @data: The byte(s) to write to the NVM.
+ *
+ * Writes one/two bytes to the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 size, u16 data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ u32 flash_data = 0;
+ s32 ret_val;
+ u8 count = 0;
+
+ DEBUGFUNC("e1000_write_ich8_data");
+
+ if (hw->mac.type >= e1000_pch_spt) {
+ if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ } else {
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ }
+
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+
+ do {
+ usec_delay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val != E1000_SUCCESS)
+ break;
+ /* In SPT, This register is in Lan memory space, not
+ * flash. Therefore, only 32 bit access is supported
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ hsflctl.regval =
+ E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
+ else
+ hsflctl.regval =
+ E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+ /* In SPT, This register is in Lan memory space,
+ * not flash. Therefore, only 32 bit access is
+ * supported
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
+ hsflctl.regval);
+
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+ if (size == 1)
+ flash_data = (u32)data & 0x00FF;
+ else
+ flash_data = (u32)data;
+
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it
+ * and try the whole sequence a few more times else done
+ */
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+ if (ret_val == E1000_SUCCESS)
+ break;
+
+ /* If we're here, then things are most likely
+ * completely hosed, but if the error condition
+ * is detected, it won't hurt to give it another
+ * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr)
+ /* Repeat for some time before giving up. */
+ continue;
+ if (!hsfsts.hsf_status.flcdone) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
+* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
+* @hw: pointer to the HW structure
+* @offset: The offset (in bytes) of the dwords to read.
+* @data: The 4 bytes to write to the NVM.
+*
+* Writes one/two/four bytes to the NVM using the flash access registers.
+**/
+STATIC s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ s32 ret_val;
+ u8 count = 0;
+
+ DEBUGFUNC("e1000_write_flash_data32_ich8lan");
+
+ if (hw->mac.type >= e1000_pch_spt) {
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ }
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+ do {
+ usec_delay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val != E1000_SUCCESS)
+ break;
+
+ /* In SPT, This register is in Lan memory space, not
+ * flash. Therefore, only 32 bit access is supported
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ hsflctl.regval = E1000_READ_FLASH_REG(hw,
+ ICH_FLASH_HSFSTS)
+ >> 16;
+ else
+ hsflctl.regval = E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFCTL);
+
+ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+
+ /* In SPT, This register is in Lan memory space,
+ * not flash. Therefore, only 32 bit access is
+ * supported
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
+ hsflctl.regval);
+
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it
+ * and try the whole sequence a few more times else done
+ */
+ ret_val = e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+
+ if (ret_val == E1000_SUCCESS)
+ break;
+
+ /* If we're here, then things are most likely
+ * completely hosed, but if the error condition
+ * is detected, it won't hurt to give it another
+ * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+ if (hsfsts.hsf_status.flcerr)
+ /* Repeat for some time before giving up. */
+ continue;
+ if (!hsfsts.hsf_status.flcdone) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
+ * @hw: pointer to the HW structure
+ * @offset: The index of the byte to read.
+ * @data: The byte to write to the NVM.
+ *
+ * Writes a single byte to the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 data)
+{
+ u16 word = (u16)data;
+
+ DEBUGFUNC("e1000_write_flash_byte_ich8lan");
+
+ return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
+}
+
+/**
+* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
+* @hw: pointer to the HW structure
+* @offset: The offset of the word to write.
+* @dword: The dword to write to the NVM.
+*
+* Writes a single dword to the NVM using the flash access registers.
+* Goes through a retry algorithm before giving up.
+**/
+STATIC s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 dword)
+{
+ s32 ret_val;
+ u16 program_retries;
+
+ DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
+
+ /* Must convert word offset into bytes. */
+ offset <<= 1;
+
+ ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+
+ if (!ret_val)
+ return ret_val;
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
+ usec_delay(100);
+ ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+ if (ret_val == E1000_SUCCESS)
+ break;
+ }
+ if (program_retries == 100)
+ return -E1000_ERR_NVM;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset of the byte to write.
+ * @byte: The byte to write to the NVM.
+ *
+ * Writes a single byte to the NVM using the flash access registers.
+ * Goes through a retry algorithm before giving up.
+ **/
+STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+ u32 offset, u8 byte)
+{
+ s32 ret_val;
+ u16 program_retries;
+
+ DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
+
+ ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+ if (!ret_val)
+ return ret_val;
+
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
+ usec_delay(100);
+ ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+ if (ret_val == E1000_SUCCESS)
+ break;
+ }
+ if (program_retries == 100)
+ return -E1000_ERR_NVM;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
+ * @hw: pointer to the HW structure
+ * @bank: 0 for first bank, 1 for second bank, etc.
+ *
+ * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
+ * bank N is 4096 * N + flash_reg_addr.
+ **/
+STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ /* bank size is in 16bit words - adjust to bytes */
+ u32 flash_bank_size = nvm->flash_bank_size * 2;
+ s32 ret_val;
+ s32 count = 0;
+ s32 j, iteration, sector_size;
+
+ DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
+
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+ /* Determine HW Sector size: Read BERASE bits of hw flash status
+ * register
+ * 00: The Hw sector is 256 bytes, hence we need to erase 16
+ * consecutive sectors. The start index for the nth Hw sector
+ * can be calculated as = bank * 4096 + n * 256
+ * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+ * The start index for the nth Hw sector can be calculated
+ * as = bank * 4096
+ * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
+ * (ich9 only, otherwise error condition)
+ * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
+ */
+ switch (hsfsts.hsf_status.berasesz) {
+ case 0:
+ /* Hw sector size 256 */
+ sector_size = ICH_FLASH_SEG_SIZE_256;
+ iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
+ break;
+ case 1:
+ sector_size = ICH_FLASH_SEG_SIZE_4K;
+ iteration = 1;
+ break;
+ case 2:
+ sector_size = ICH_FLASH_SEG_SIZE_8K;
+ iteration = 1;
+ break;
+ case 3:
+ sector_size = ICH_FLASH_SEG_SIZE_64K;
+ iteration = 1;
+ break;
+ default:
+ return -E1000_ERR_NVM;
+ }
+
+ /* Start with the base address, then add the sector offset. */
+ flash_linear_addr = hw->nvm.flash_base_addr;
+ flash_linear_addr += (bank) ? flash_bank_size : 0;
+
+ for (j = 0; j < iteration; j++) {
+ do {
+ u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
+
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Write a value 11 (block Erase) in Flash
+ * Cycle field in hw flash control
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ hsflctl.regval =
+ E1000_READ_FLASH_REG(hw,
+ ICH_FLASH_HSFSTS)>>16;
+ else
+ hsflctl.regval =
+ E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFCTL);
+
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
+ hsflctl.regval);
+
+ /* Write the last 24 bits of an index within the
+ * block into Flash Linear address field in Flash
+ * Address.
+ */
+ flash_linear_addr += (j * sector_size);
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
+ flash_linear_addr);
+
+ ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
+ if (ret_val == E1000_SUCCESS)
+ break;
+
+ /* Check if FCERR is set to 1. If 1,
+ * clear it and try the whole sequence
+ * a few more times else Done
+ */
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr)
+ /* repeat for some time before giving up */
+ continue;
+ else if (!hsfsts.hsf_status.flcdone)
+ return ret_val;
+ } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_valid_led_default_ich8lan - Set the default LED settings
+ * @hw: pointer to the HW structure
+ * @data: Pointer to the LED settings
+ *
+ * Reads the LED default settings from the NVM to data. If the NVM LED
+ * settings is all 0's or F's, set the LED default to a valid LED default
+ * setting.
+ **/
+STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_ich8lan");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT_ICH8LAN;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_id_led_init_pchlan - store LED configurations
+ * @hw: pointer to the HW structure
+ *
+ * PCH does not control LEDs via the LEDCTL register, rather it uses
+ * the PHY LED configuration register.
+ *
+ * PCH also does not have an "always on" or "always off" mode which
+ * complicates the ID feature. Instead of using the "on" mode to indicate
+ * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
+ * use "link_up" mode. The LEDs will still ID on request if there is no
+ * link based on logic in e1000_led_[on|off]_pchlan().
+ **/
+STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
+ u16 data, i, temp, shift;
+
+ DEBUGFUNC("e1000_id_led_init_pchlan");
+
+ /* Get default ID LED modes */
+ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+ if (ret_val)
+ return ret_val;
+
+ mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+ for (i = 0; i < 4; i++) {
+ temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
+ shift = (i * 5);
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode1 |= (ledctl_on << shift);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode1 |= (ledctl_off << shift);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode2 |= (ledctl_on << shift);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode2 |= (ledctl_off << shift);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
+ * @hw: pointer to the HW structure
+ *
+ * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
+ * register, so the bus width is hard coded.
+ **/
+STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_get_bus_info_ich8lan");
+
+ ret_val = e1000_get_bus_info_pcie_generic(hw);
+
+ /* ICH devices are "PCI Express"-ish. They have
+ * a configuration space, but do not contain
+ * PCI Express Capability registers, so bus width
+ * must be hardcoded.
+ */
+ if (bus->width == e1000_bus_width_unknown)
+ bus->width = e1000_bus_width_pcie_x1;
+
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_ich8lan - Reset the hardware
+ * @hw: pointer to the HW structure
+ *
+ * Does a full reset of the hardware which includes a reset of the PHY and
+ * MAC.
+ **/
+STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u16 kum_cfg;
+ u32 ctrl, reg;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_reset_hw_ich8lan");
+
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ /* Disable the Transmit and Receive units. Then delay to allow
+ * any pending transactions to complete before we hit the MAC
+ * with the global reset.
+ */
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ /* Workaround for ICH8 bit corruption issue in FIFO memory */
+ if (hw->mac.type == e1000_ich8lan) {
+ /* Set Tx and Rx buffer allocation to 8k apiece. */
+ E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
+ /* Set Packet Buffer Size to 16k. */
+ E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
+ }
+
+ if (hw->mac.type == e1000_pchlan) {
+ /* Save the NVM K1 bit setting*/
+ ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
+ if (ret_val)
+ return ret_val;
+
+ if (kum_cfg & E1000_NVM_K1_ENABLE)
+ dev_spec->nvm_k1_enabled = true;
+ else
+ dev_spec->nvm_k1_enabled = false;
+ }
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ if (!hw->phy.ops.check_reset_block(hw)) {
+ /* Full-chip reset requires MAC and PHY reset at the same
+ * time to make sure the interface between MAC and the
+ * external PHY is reset.
+ */
+ ctrl |= E1000_CTRL_PHY_RST;
+
+ /* Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+ }
+ ret_val = e1000_acquire_swflag_ich8lan(hw);
+ DEBUGOUT("Issuing a global reset to ich8lan\n");
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
+ /* cannot issue a flush here because it hangs the hardware */
+ msec_delay(20);
+
+ /* Set Phy Config Counter to 50msec */
+ if (hw->mac.type == e1000_pch2lan) {
+ reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
+ reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+ reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
+ }
+
+ if (!ret_val)
+ E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
+
+ if (ctrl & E1000_CTRL_PHY_RST) {
+ ret_val = hw->phy.ops.get_cfg_done(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* For PCH, this write will make sure that any noise
+ * will be detected as a CRC error and be dropped rather than show up
+ * as a bad packet to the DMA engine.
+ */
+ if (hw->mac.type == e1000_pchlan)
+ E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
+
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ reg = E1000_READ_REG(hw, E1000_KABGTXD);
+ reg |= E1000_KABGTXD_BGSQLBIAS;
+ E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_hw_ich8lan - Initialize the hardware
+ * @hw: pointer to the HW structure
+ *
+ * Prepares the hardware for transmit and receive by doing the following:
+ * - initialize hardware bits
+ * - initialize LED identification
+ * - setup receive address registers
+ * - setup flow control
+ * - setup transmit descriptors
+ * - clear statistics
+ **/
+STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 ctrl_ext, txdctl, snoop;
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_init_hw_ich8lan");
+
+ e1000_initialize_hw_bits_ich8lan(hw);
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
+ if (ret_val)
+ DEBUGOUT("Error initializing identification LED\n");
+
+ /* Setup the receive address. */
+ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* The 82578 Rx buffer will stall if wakeup is enabled in host and
+ * the ME. Disable wakeup by clearing the host wakeup bit.
+ * Reset the phy after disabling host wakeup to reset the Rx buffer.
+ */
+ if (hw->phy.type == e1000_phy_82578) {
+ hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
+ i &= ~BM_WUC_HOST_WU_BIT;
+ hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
+ ret_val = e1000_phy_hw_reset_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ /* Set the transmit descriptor write-back policy for both queues */
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
+
+ /* ICH8 has opposite polarity of no_snoop bits.
+ * By default, we should use snoop behavior.
+ */
+ if (mac->type == e1000_ich8lan)
+ snoop = PCIE_ICH8_SNOOP_ALL;
+ else
+ snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
+ e1000_set_pcie_no_snoop_generic(hw, snoop);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_ich8lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
+ * @hw: pointer to the HW structure
+ *
+ * Sets/Clears required hardware bits necessary for correctly setting up the
+ * hardware for transmit and receive.
+ **/
+STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
+
+ /* Extended Device Control */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg |= (1 << 22);
+ /* Enable PHY low-power state when MAC is at D3 w/o WoL */
+ if (hw->mac.type >= e1000_pchlan)
+ reg |= E1000_CTRL_EXT_PHYPDEN;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* Transmit Descriptor Control 0 */
+ reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = E1000_READ_REG(hw, E1000_TARC(0));
+ if (hw->mac.type == e1000_ich8lan)
+ reg |= (1 << 28) | (1 << 29);
+ reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+ E1000_WRITE_REG(hw, E1000_TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = E1000_READ_REG(hw, E1000_TARC(1));
+ if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ reg |= (1 << 24) | (1 << 26) | (1 << 30);
+ E1000_WRITE_REG(hw, E1000_TARC(1), reg);
+
+ /* Device Status */
+ if (hw->mac.type == e1000_ich8lan) {
+ reg = E1000_READ_REG(hw, E1000_STATUS);
+ reg &= ~(1 << 31);
+ E1000_WRITE_REG(hw, E1000_STATUS, reg);
+ }
+
+ /* work-around descriptor data corruption issue during nfs v2 udp
+ * traffic, just disable the nfs filtering capability
+ */
+ reg = E1000_READ_REG(hw, E1000_RFCTL);
+ reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
+
+ /* Disable IPv6 extension header parsing because some malformed
+ * IPv6 headers can hang the Rx.
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
+ E1000_WRITE_REG(hw, E1000_RFCTL, reg);
+
+ /* Enable ECC on Lynxpoint */
+ if (hw->mac.type >= e1000_pch_lpt) {
+ reg = E1000_READ_REG(hw, E1000_PBECCSTS);
+ reg |= E1000_PBECCSTS_ECC_ENABLE;
+ E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
+
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg |= E1000_CTRL_MEHE;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+ }
+
+ return;
+}
+
+/**
+ * e1000_setup_link_ich8lan - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_link_ich8lan");
+
+ if (hw->phy.ops.check_reset_block(hw))
+ return E1000_SUCCESS;
+
+ /* ICH parts do not have a word in the NVM to determine
+ * the default flow control setting, so we explicitly
+ * set it to full.
+ */
+ if (hw->fc.requested_mode == e1000_fc_default)
+ hw->fc.requested_mode = e1000_fc_full;
+
+ /* Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
+ hw->fc.current_mode);
+
+ /* Continue to configure the copper link. */
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
+ if (ret_val)
+ return ret_val;
+
+ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
+ (hw->phy.type == e1000_phy_i217) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
+
+ ret_val = hw->phy.ops.write_reg(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 27),
+ hw->fc.pause_time);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return e1000_set_fc_watermarks_generic(hw);
+}
+
+/**
+ * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
+ * @hw: pointer to the HW structure
+ *
+ * Configures the kumeran interface to the PHY to wait the appropriate time
+ * when polling the PHY, then call the generic setup_copper_link to finish
+ * configuring the copper link.
+ **/
+STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 reg_data;
+
+ DEBUGFUNC("e1000_setup_copper_link_ich8lan");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Set the mac to wait the maximum time between each iteration
+ * and increase the max iterations when polling the phy;
+ * this fixes erroneous timeouts at 10Mbps.
+ */
+ ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
+ 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ switch (hw->phy.type) {
+ case e1000_phy_igp_3:
+ ret_val = e1000_copper_link_setup_igp(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_bm:
+ case e1000_phy_82578:
+ ret_val = e1000_copper_link_setup_m88(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_82577:
+ case e1000_phy_82579:
+ ret_val = e1000_copper_link_setup_82577(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_ife:
+ ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ reg_data &= ~IFE_PMC_AUTO_MDIX;
+
+ switch (hw->phy.mdix) {
+ case 1:
+ reg_data &= ~IFE_PMC_FORCE_MDIX;
+ break;
+ case 2:
+ reg_data |= IFE_PMC_FORCE_MDIX;
+ break;
+ case 0:
+ default:
+ reg_data |= IFE_PMC_AUTO_MDIX;
+ break;
+ }
+ ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ break;
+ }
+
+ return e1000_setup_copper_link_generic(hw);
+}
+
+/**
+ * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY specific link setup function and then calls the
+ * generic setup_copper_link to finish configuring the link for
+ * Lynxpoint PCH devices
+ **/
+STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ ret_val = e1000_copper_link_setup_82577(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_setup_copper_link_generic(hw);
+}
+
+/**
+ * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to store current link speed
+ * @duplex: pointer to store the current link duplex
+ *
+ * Calls the generic get_speed_and_duplex to retrieve the current link
+ * information and then calls the Kumeran lock loss workaround for links at
+ * gigabit speeds.
+ **/
+STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_get_link_up_info_ich8lan");
+
+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac.type == e1000_ich8lan) &&
+ (hw->phy.type == e1000_phy_igp_3) &&
+ (*speed == SPEED_1000)) {
+ ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
+ * @hw: pointer to the HW structure
+ *
+ * Work-around for 82566 Kumeran PCS lock loss:
+ * On link status change (i.e. PCI reset, speed change) and link is up and
+ * speed is gigabit-
+ * 0) if workaround is optionally disabled do nothing
+ * 1) wait 1ms for Kumeran link to come up
+ * 2) check Kumeran Diagnostic register PCS lock loss bit
+ * 3) if not set the link is locked (all is good), otherwise...
+ * 4) reset the PHY
+ * 5) repeat up to 10 times
+ * Note: this is only called for IGP3 copper when speed is 1gb.
+ **/
+STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 phy_ctrl;
+ s32 ret_val;
+ u16 i, data;
+ bool link;
+
+ DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
+
+ if (!dev_spec->kmrn_lock_loss_workaround_enabled)
+ return E1000_SUCCESS;
+
+ /* Make sure link is up before proceeding. If not just return.
+ * Attempting this while link is negotiating fouled up link
+ * stability
+ */
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (!link)
+ return E1000_SUCCESS;
+
+ for (i = 0; i < 10; i++) {
+ /* read once to clear */
+ ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
+ if (ret_val)
+ return ret_val;
+ /* and again to get new status */
+ ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* check for PCS lock */
+ if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+ return E1000_SUCCESS;
+
+ /* Issue PHY reset */
+ hw->phy.ops.reset(hw);
+ msec_delay_irq(5);
+ }
+ /* Disable GigE link negotiation */
+ phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+ phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ /* Call gig speed drop workaround on Gig disable before accessing
+ * any PHY registers
+ */
+ e1000_gig_downshift_workaround_ich8lan(hw);
+
+ /* unable to acquire PCS lock */
+ return -E1000_ERR_PHY;
+}
+
+/**
+ * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
+ * @hw: pointer to the HW structure
+ * @state: boolean value used to set the current Kumeran workaround state
+ *
+ * If ICH8, set the current Kumeran workaround state (enabled - true
+ * /disabled - false).
+ **/
+void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+
+ DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
+
+ if (hw->mac.type != e1000_ich8lan) {
+ DEBUGOUT("Workaround applies to ICH8 only.\n");
+ return;
+ }
+
+ dev_spec->kmrn_lock_loss_workaround_enabled = state;
+
+ return;
+}
+
+/**
+ * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
+ * @hw: pointer to the HW structure
+ *
+ * Workaround for 82566 power-down on D3 entry:
+ * 1) disable gigabit link
+ * 2) write VR power-down enable
+ * 3) read it back
+ * Continue if successful, else issue LCD reset and repeat
+ **/
+void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
+{
+ u32 reg;
+ u16 data;
+ u8 retry = 0;
+
+ DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
+
+ if (hw->phy.type != e1000_phy_igp_3)
+ return;
+
+ /* Try the workaround twice (if needed) */
+ do {
+ /* Disable link */
+ reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
+ reg |= (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
+
+ /* Call gig speed drop workaround on Gig disable before
+ * accessing any PHY registers
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ e1000_gig_downshift_workaround_ich8lan(hw);
+
+ /* Write VR power-down enable */
+ hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
+ data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+ hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
+ data | IGP3_VR_CTRL_MODE_SHUTDOWN);
+
+ /* Read it back and test */
+ hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
+ data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+ if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
+ break;
+
+ /* Issue PHY reset and repeat at most one more time */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
+ retry++;
+ } while (retry);
+}
+
+/**
+ * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
+ * @hw: pointer to the HW structure
+ *
+ * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
+ * LPLU, Gig disable, MDIC PHY reset):
+ * 1) Set Kumeran Near-end loopback
+ * 2) Clear Kumeran Near-end loopback
+ * Should only be called for ICH8[m] devices with any 1G Phy.
+ **/
+void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 reg_data;
+
+ DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
+
+ if ((hw->mac.type != e1000_ich8lan) ||
+ (hw->phy.type == e1000_phy_ife))
+ return;
+
+ ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+ &reg_data);
+ if (ret_val)
+ return;
+ reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_DIAG_OFFSET,
+ reg_data);
+ if (ret_val)
+ return;
+ reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
+ e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+ reg_data);
+}
+
+/**
+ * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
+ * @hw: pointer to the HW structure
+ *
+ * During S0 to Sx transition, it is possible the link remains at gig
+ * instead of negotiating to a lower speed. Before going to Sx, set
+ * 'Gig Disable' to force link speed negotiation to a lower speed based on
+ * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
+ * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
+ * needs to be written.
+ * Parts that support (and are linked to a partner which support) EEE in
+ * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
+ * than 10Mbps w/o EEE.
+ **/
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 phy_ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
+
+ phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+ phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
+
+ if (hw->phy.type == e1000_phy_i217) {
+ u16 phy_reg, device_id = hw->device_id;
+
+ if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+ (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
+ (device_id == E1000_DEV_ID_PCH_I218_V3) ||
+ (hw->mac.type >= e1000_pch_spt)) {
+ u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
+
+ E1000_WRITE_REG(hw, E1000_FEXTNVM6,
+ fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ if (!dev_spec->eee_disable) {
+ u16 eee_advert;
+
+ ret_val =
+ e1000_read_emi_reg_locked(hw,
+ I217_EEE_ADVERTISEMENT,
+ &eee_advert);
+ if (ret_val)
+ goto release;
+
+ /* Disable LPLU if both link partners support 100BaseT
+ * EEE and 100Full is advertised on both ends of the
+ * link, and enable Auto Enable LPI since there will
+ * be no driver to enable LPI while in Sx.
+ */
+ if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
+ (dev_spec->eee_lp_ability &
+ I82579_EEE_100_SUPPORTED) &&
+ (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
+ phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
+ E1000_PHY_CTRL_NOND0A_LPLU);
+
+ /* Set Auto Enable LPI after link up */
+ hw->phy.ops.read_reg_locked(hw,
+ I217_LPI_GPIO_CTRL,
+ &phy_reg);
+ phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+ hw->phy.ops.write_reg_locked(hw,
+ I217_LPI_GPIO_CTRL,
+ phy_reg);
+ }
+ }
+
+ /* For i217 Intel Rapid Start Technology support,
+ * when the system is going into Sx and no manageability engine
+ * is present, the driver must configure proxy to reset only on
+ * power good. LPI (Low Power Idle) state must also reset only
+ * on power good, as well as the MTA (Multicast table array).
+ * The SMBus release must also be disabled on LCD reset.
+ */
+ if (!(E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_ICH_FWSM_FW_VALID)) {
+ /* Enable proxy to reset only on power good. */
+ hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
+ &phy_reg);
+ phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
+ hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
+ phy_reg);
+
+ /* Set bit enable LPI (EEE) to reset only on
+ * power good.
+ */
+ hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
+ phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
+ hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
+
+ /* Disable the SMB release on LCD reset. */
+ hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
+ phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
+ hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
+ }
+
+ /* Enable MTA to reset for Intel Rapid Start Technology
+ * Support
+ */
+ hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
+ phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
+ hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
+
+release:
+ hw->phy.ops.release(hw);
+ }
+out:
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ if (hw->mac.type == e1000_ich8lan)
+ e1000_gig_downshift_workaround_ich8lan(hw);
+
+ if (hw->mac.type >= e1000_pchlan) {
+ e1000_oem_bits_config_ich8lan(hw, false);
+
+ /* Reset PHY to activate OEM bits on 82577/8 */
+ if (hw->mac.type == e1000_pchlan)
+ e1000_phy_hw_reset_generic(hw);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ e1000_write_smbus_addr(hw);
+ hw->phy.ops.release(hw);
+ }
+
+ return;
+}
+
+/**
+ * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
+ * @hw: pointer to the HW structure
+ *
+ * During Sx to S0 transitions on non-managed devices or managed devices
+ * on which PHY resets are not blocked, if the PHY registers cannot be
+ * accessed properly by the s/w toggle the LANPHYPC value to power cycle
+ * the PHY.
+ * On i217, setup Intel Rapid Start Technology.
+ **/
+u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_resume_workarounds_pchlan");
+ if (hw->mac.type < e1000_pch2lan)
+ return E1000_SUCCESS;
+
+ ret_val = e1000_init_phy_workarounds_pchlan(hw);
+ if (ret_val) {
+ DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
+ return ret_val;
+ }
+
+ /* For i217 Intel Rapid Start Technology support when the system
+ * is transitioning from Sx and no manageability engine is present
+ * configure SMBus to restore on reset, disable proxy, and enable
+ * the reset on MTA (Multicast table array).
+ */
+ if (hw->phy.type == e1000_phy_i217) {
+ u16 phy_reg;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ DEBUGOUT("Failed to setup iRST\n");
+ return ret_val;
+ }
+
+ /* Clear Auto Enable LPI after link up */
+ hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
+ phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+ hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
+
+ if (!(E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_ICH_FWSM_FW_VALID)) {
+ /* Restore clear on SMB if no manageability engine
+ * is present
+ */
+ ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
+ hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
+
+ /* Disable Proxy */
+ hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
+ }
+ /* Enable reset on MTA */
+ ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
+ hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
+release:
+ if (ret_val)
+ DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
+ hw->phy.ops.release(hw);
+ return ret_val;
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cleanup_led_ich8lan - Restore the default LED operation
+ * @hw: pointer to the HW structure
+ *
+ * Return the LED back to the default configuration.
+ **/
+STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_cleanup_led_ich8lan");
+
+ if (hw->phy.type == e1000_phy_ife)
+ return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ 0);
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_on_ich8lan - Turn LEDs on
+ * @hw: pointer to the HW structure
+ *
+ * Turn on the LEDs.
+ **/
+STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_led_on_ich8lan");
+
+ if (hw->phy.type == e1000_phy_ife)
+ return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off_ich8lan - Turn LEDs off
+ * @hw: pointer to the HW structure
+ *
+ * Turn off the LEDs.
+ **/
+STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_led_off_ich8lan");
+
+ if (hw->phy.type == e1000_phy_ife)
+ return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_led_pchlan - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use.
+ **/
+STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_setup_led_pchlan");
+
+ return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
+ (u16)hw->mac.ledctl_mode1);
+}
+
+/**
+ * e1000_cleanup_led_pchlan - Restore the default LED operation
+ * @hw: pointer to the HW structure
+ *
+ * Return the LED back to the default configuration.
+ **/
+STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_cleanup_led_pchlan");
+
+ return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
+ (u16)hw->mac.ledctl_default);
+}
+
+/**
+ * e1000_led_on_pchlan - Turn LEDs on
+ * @hw: pointer to the HW structure
+ *
+ * Turn on the LEDs.
+ **/
+STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
+{
+ u16 data = (u16)hw->mac.ledctl_mode2;
+ u32 i, led;
+
+ DEBUGFUNC("e1000_led_on_pchlan");
+
+ /* If no link, then turn LED on by setting the invert bit
+ * for each LED that's mode is "link_up" in ledctl_mode2.
+ */
+ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ for (i = 0; i < 3; i++) {
+ led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+ if ((led & E1000_PHY_LED0_MODE_MASK) !=
+ E1000_LEDCTL_MODE_LINK_UP)
+ continue;
+ if (led & E1000_PHY_LED0_IVRT)
+ data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+ else
+ data |= (E1000_PHY_LED0_IVRT << (i * 5));
+ }
+ }
+
+ return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ * e1000_led_off_pchlan - Turn LEDs off
+ * @hw: pointer to the HW structure
+ *
+ * Turn off the LEDs.
+ **/
+STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
+{
+ u16 data = (u16)hw->mac.ledctl_mode1;
+ u32 i, led;
+
+ DEBUGFUNC("e1000_led_off_pchlan");
+
+ /* If no link, then turn LED off by clearing the invert bit
+ * for each LED that's mode is "link_up" in ledctl_mode1.
+ */
+ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ for (i = 0; i < 3; i++) {
+ led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+ if ((led & E1000_PHY_LED0_MODE_MASK) !=
+ E1000_LEDCTL_MODE_LINK_UP)
+ continue;
+ if (led & E1000_PHY_LED0_IVRT)
+ data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+ else
+ data |= (E1000_PHY_LED0_IVRT << (i * 5));
+ }
+ }
+
+ return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Read appropriate register for the config done bit for completion status
+ * and configure the PHY through s/w for EEPROM-less parts.
+ *
+ * NOTE: some silicon which is EEPROM-less will fail trying to read the
+ * config done bit, so only an error is logged and continues. If we were
+ * to return with error, EEPROM-less silicon would not be able to be reset
+ * or change link.
+ **/
+STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 bank = 0;
+ u32 status;
+
+ DEBUGFUNC("e1000_get_cfg_done_ich8lan");
+
+ e1000_get_cfg_done_generic(hw);
+
+ /* Wait for indication from h/w that it has completed basic config */
+ if (hw->mac.type >= e1000_ich10lan) {
+ e1000_lan_init_done_ich8lan(hw);
+ } else {
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+ /* When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ DEBUGOUT("Auto Read Done did not complete\n");
+ ret_val = E1000_SUCCESS;
+ }
+ }
+
+ /* Clear PHY Reset Asserted bit */
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_PHYRA)
+ E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
+ else
+ DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
+
+ /* If EEPROM is not marked present, init the IGP 3 PHY manually */
+ if (hw->mac.type <= e1000_ich9lan) {
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
+ (hw->phy.type == e1000_phy_igp_3)) {
+ e1000_phy_init_script_igp3(hw);
+ }
+ } else {
+ if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
+ /* Maybe we should do a basic PHY config */
+ DEBUGOUT("EEPROM not present\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(hw->mac.ops.check_mng_mode(hw) ||
+ hw->phy.ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears hardware counters specific to the silicon family and calls
+ * clear_hw_cntrs_generic to clear all general purpose counters.
+ **/
+STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
+{
+ u16 phy_data;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+
+ E1000_READ_REG(hw, E1000_IAC);
+ E1000_READ_REG(hw, E1000_ICRXOC);
+
+ /* Clear PHY statistics registers */
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
+ (hw->phy.type == e1000_phy_i217) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ ret_val = hw->phy.ops.set_page(hw,
+ HV_STATS_PAGE << IGP_PAGE_SHIFT);
+ if (ret_val)
+ goto release;
+ hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
+release:
+ hw->phy.ops.release(hw);
+ }
+}
+
+/**
+ * e1000_configure_k0s_lpt - Configure K0s power state
+ * @hw: pointer to the HW structure
+ * @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3.
+ * 0 corresponds to 128ns, each value over 0 doubles the duration.
+ * @min_time: Minimum Tx idle period allowed - valid values are 0 to 4.
+ * 0 corresponds to 128ns, each value over 0 doubles the duration.
+ *
+ * Configure the K1 power state based on the provided parameter.
+ * Assumes semaphore already acquired.
+ *
+ * Success returns 0, Failure returns:
+ * -E1000_ERR_PHY (-2) in case of access error
+ * -E1000_ERR_PARAM (-4) in case of parameters error
+ **/
+s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time)
+{
+ s32 ret_val;
+ u16 kmrn_reg = 0;
+
+ DEBUGFUNC("e1000_configure_k0s_lpt");
+
+ if (entry_latency > 3 || min_time > 4)
+ return -E1000_ERR_PARAM;
+
+ ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
+ &kmrn_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* for now don't touch the latency */
+ kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK);
+ kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT));
+
+ ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
+ kmrn_reg);
+ if (ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.h
new file mode 100644
index 00000000..bc4ed1dd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.h
@@ -0,0 +1,339 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_ICH8LAN_H_
+#define _E1000_ICH8LAN_H_
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+
+/* Requires up to 10 seconds when MNG might be accessing part. */
+#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
+
+#define ICH_CYCLE_READ 0
+#define ICH_CYCLE_WRITE 2
+#define ICH_CYCLE_ERASE 3
+
+#define FLASH_GFPREG_BASE_MASK 0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT 12
+
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_8K 8192
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
+#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
+
+#define E1000_ICH_MNG_IAMT_MODE 0x2
+
+#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
+#define E1000_FWSM_WLOCK_MAC_SHIFT 7
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
+#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
+
+/* Shared Receive Address Registers */
+#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
+#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
+
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
+#define E1000_H2ME 0x05B50 /* Host to ME */
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
+#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */
+#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
+
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_OFF1_ON2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC000
+#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
+#define E1000_ICH_NVM_SIG_VALUE 0x80
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
+
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
+/* FEXT register bit definition */
+#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004
+
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
+#define E1000_FEXTNVM_SW_CONFIG 1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
+
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
+
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+
+#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
+#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
+#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000
+/* bit for disabling packet buffer read */
+#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
+#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
+#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
+#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800
+#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000
+#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200
+#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000
+
+/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
+#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
+
+#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field*/
+#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs*/
+#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
+#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
+#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES 7
+#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+ ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
+#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
+
+/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
+#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
+#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
+
+#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
+#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
+#define HV_STATS_PAGE 778
+/* Half-duplex collision counts */
+#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */
+#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */
+#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */
+#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */
+#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
+
+#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+#define K1_ENTRY_LATENCY 0
+#define K1_MIN_TIME 1
+
+/* SMBus Control Phy Register */
+#define CV_SMB_CTRL PHY_REG(769, 23)
+#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
+
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
+/* I218 Ultra Low Power Configuration 1 Register */
+#define I218_ULP_CONFIG1 PHY_REG(779, 16)
+#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */
+#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */
+#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */
+#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */
+#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */
+#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
+/* enable ULP even if when phy powered down via lanphypc */
+#define I218_ULP_CONFIG1_EN_ULP_LANPHYPC 0x0400
+/* disable clear of sticky ULP on PERST */
+#define I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST 0x0800
+#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
+
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK 0x007F
+#define HV_SMB_ADDR_PEC_EN 0x0200
+#define HV_SMB_ADDR_VALID 0x0080
+#define HV_SMB_ADDR_FREQ_MASK 0x1100
+#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
+#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP 0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
+#define E1000_STRAP_SMT_FREQ_SHIFT 12
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
+
+/* PHY Power Management Control */
+#define HV_PM_CTRL PHY_REG(770, 17)
+#define HV_PM_CTRL_K1_CLK_REQ 0x200
+#define HV_PM_CTRL_K1_ENABLE 0x4000
+
+#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
+#define I217_PLL_CLOCK_GATE_MASK 0x07FF
+
+#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
+
+/* Inband Control */
+#define I217_INBAND_CTRL PHY_REG(770, 18)
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8
+
+/* Low Power Idle GPIO Control */
+#define I217_LPI_GPIO_CTRL PHY_REG(772, 18)
+#define I217_LPI_GPIO_CTRL_AUTO_EN_LPI 0x0800
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_100_ENABLE 0x2000
+#define I82579_LPI_CTRL_1000_ENABLE 0x4000
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+
+/* 82579 DFT Control */
+#define I82579_DFT_CTRL PHY_REG(769, 20)
+#define I82579_DFT_CTRL_GATE_PHY_RESET 0x0040 /* Gate PHY Reset on MAC Reset */
+
+/* Extended Management Interface (EMI) Registers */
+#define I82579_EMI_ADDR 0x10
+#define I82579_EMI_DATA 0x11
+#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
+#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
+#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
+#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
+#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
+#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */
+#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
+#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
+#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
+#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
+#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
+#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
+#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */
+#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
+#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
+#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
+#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
+#define I217_RX_CONFIG 0xB20C /* Receive configuration */
+
+#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
+#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
+
+/* Intel Rapid Start Technology Support */
+#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
+#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
+#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
+#define I217_CGFREG PHY_REG(772, 29)
+#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
+#define I217_MEMPWR PHY_REG(772, 26)
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
+
+/* Receive Address Initial CRC Calculation */
+#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
+
+#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
+#define E1000_PCI_REVISION_ID_REG 0x08
+#endif /* defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) */
+void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state);
+void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time);
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw);
+#ifdef ULP_SUPPORT
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx);
+s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
+#endif /* ULP_SUPPORT */
+#endif /* _E1000_ICH8LAN_H_ */
+void e1000_demote_ltr(struct e1000_hw *hw, bool demote, bool link);
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.c
new file mode 100644
index 00000000..a0f3a999
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.c
@@ -0,0 +1,2249 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
+STATIC void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+STATIC void e1000_config_collision_dist_generic(struct e1000_hw *hw);
+STATIC int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+
+/**
+ * e1000_init_mac_ops_generic - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_mac_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ DEBUGFUNC("e1000_init_mac_ops_generic");
+
+ /* General Setup */
+ mac->ops.init_params = e1000_null_ops_generic;
+ mac->ops.init_hw = e1000_null_ops_generic;
+ mac->ops.reset_hw = e1000_null_ops_generic;
+ mac->ops.setup_physical_interface = e1000_null_ops_generic;
+ mac->ops.get_bus_info = e1000_null_ops_generic;
+ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
+ mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
+ mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
+ mac->ops.clear_hw_cntrs = e1000_null_mac_generic;
+ /* LED */
+ mac->ops.cleanup_led = e1000_null_ops_generic;
+ mac->ops.setup_led = e1000_null_ops_generic;
+ mac->ops.blink_led = e1000_null_ops_generic;
+ mac->ops.led_on = e1000_null_ops_generic;
+ mac->ops.led_off = e1000_null_ops_generic;
+ /* LINK */
+ mac->ops.setup_link = e1000_null_ops_generic;
+ mac->ops.get_link_up_info = e1000_null_link_info;
+ mac->ops.check_for_link = e1000_null_ops_generic;
+ /* Management */
+ mac->ops.check_mng_mode = e1000_null_mng_mode;
+ /* VLAN, MC, etc. */
+ mac->ops.update_mc_addr_list = e1000_null_update_mc;
+ mac->ops.clear_vfta = e1000_null_mac_generic;
+ mac->ops.write_vfta = e1000_null_write_vfta;
+ mac->ops.rar_set = e1000_rar_set_generic;
+ mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
+}
+
+/**
+ * e1000_null_ops_generic - No-op function, returns 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_ops_generic");
+ UNREFERENCED_1PARAMETER(hw);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_mac_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_mac_generic");
+ UNREFERENCED_1PARAMETER(hw);
+ return;
+}
+
+/**
+ * e1000_null_link_info - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d)
+{
+ DEBUGFUNC("e1000_null_link_info");
+ UNREFERENCED_3PARAMETER(hw, s, d);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_mng_mode - No-op function, return false
+ * @hw: pointer to the HW structure
+ **/
+bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_mng_mode");
+ UNREFERENCED_1PARAMETER(hw);
+ return false;
+}
+
+/**
+ * e1000_null_update_mc - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
+{
+ DEBUGFUNC("e1000_null_update_mc");
+ UNREFERENCED_3PARAMETER(hw, h, a);
+ return;
+}
+
+/**
+ * e1000_null_write_vfta - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b)
+{
+ DEBUGFUNC("e1000_null_write_vfta");
+ UNREFERENCED_3PARAMETER(hw, a, b);
+ return;
+}
+
+/**
+ * e1000_null_rar_set - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
+{
+ DEBUGFUNC("e1000_null_rar_set");
+ UNREFERENCED_3PARAMETER(hw, h, a);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_bus_info_pci_generic - Get PCI(x) bus information
+ * @hw: pointer to the HW structure
+ *
+ * Determines and stores the system bus information for a particular
+ * network interface. The following bus information is determined and stored:
+ * bus speed, bus width, type (PCI/PCIx), and PCI(-x) function.
+ **/
+s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_bus_info *bus = &hw->bus;
+ u32 status = E1000_READ_REG(hw, E1000_STATUS);
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_get_bus_info_pci_generic");
+
+ /* PCI or PCI-X? */
+ bus->type = (status & E1000_STATUS_PCIX_MODE)
+ ? e1000_bus_type_pcix
+ : e1000_bus_type_pci;
+
+ /* Bus speed */
+ if (bus->type == e1000_bus_type_pci) {
+ bus->speed = (status & E1000_STATUS_PCI66)
+ ? e1000_bus_speed_66
+ : e1000_bus_speed_33;
+ } else {
+ switch (status & E1000_STATUS_PCIX_SPEED) {
+ case E1000_STATUS_PCIX_SPEED_66:
+ bus->speed = e1000_bus_speed_66;
+ break;
+ case E1000_STATUS_PCIX_SPEED_100:
+ bus->speed = e1000_bus_speed_100;
+ break;
+ case E1000_STATUS_PCIX_SPEED_133:
+ bus->speed = e1000_bus_speed_133;
+ break;
+ default:
+ bus->speed = e1000_bus_speed_reserved;
+ break;
+ }
+ }
+
+ /* Bus width */
+ bus->width = (status & E1000_STATUS_BUS64)
+ ? e1000_bus_width_64
+ : e1000_bus_width_32;
+
+ /* Which PCI(-X) function? */
+ mac->ops.set_lan_id(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_bus_info_pcie_generic - Get PCIe bus information
+ * @hw: pointer to the HW structure
+ *
+ * Determines and stores the system bus information for a particular
+ * network interface. The following bus information is determined and stored:
+ * bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_bus_info *bus = &hw->bus;
+ s32 ret_val;
+ u16 pcie_link_status;
+
+ DEBUGFUNC("e1000_get_bus_info_pcie_generic");
+
+ bus->type = e1000_bus_type_pci_express;
+
+ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS,
+ &pcie_link_status);
+ if (ret_val) {
+ bus->width = e1000_bus_width_unknown;
+ bus->speed = e1000_bus_speed_unknown;
+ } else {
+ switch (pcie_link_status & PCIE_LINK_SPEED_MASK) {
+ case PCIE_LINK_SPEED_2500:
+ bus->speed = e1000_bus_speed_2500;
+ break;
+ case PCIE_LINK_SPEED_5000:
+ bus->speed = e1000_bus_speed_5000;
+ break;
+ default:
+ bus->speed = e1000_bus_speed_unknown;
+ break;
+ }
+
+ bus->width = (enum e1000_bus_width)((pcie_link_status &
+ PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT);
+ }
+
+ mac->ops.set_lan_id(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+STATIC void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ /* The status register reports the correct function number
+ * for the device regardless of function swap state.
+ */
+ reg = E1000_READ_REG(hw, E1000_STATUS);
+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ * e1000_set_lan_id_multi_port_pci - Set LAN id for PCI multiple port devices
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading PCI config space.
+ **/
+void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ u16 pci_header_type;
+ u32 status;
+
+ e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
+ if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ bus->func = (status & E1000_STATUS_FUNC_MASK)
+ >> E1000_STATUS_FUNC_SHIFT;
+ } else {
+ bus->func = 0;
+ }
+}
+
+/**
+ * e1000_set_lan_id_single_port - Set LAN id for a single port device
+ * @hw: pointer to the HW structure
+ *
+ * Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+
+ bus->func = 0;
+}
+
+/**
+ * e1000_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+ u32 offset;
+
+ DEBUGFUNC("e1000_clear_vfta_generic");
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * e1000_write_vfta_generic - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ DEBUGFUNC("e1000_write_vfta_generic");
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_init_rx_addrs_generic - Initialize receive address's
+ * @hw: pointer to the HW structure
+ * @rar_count: receive address registers
+ *
+ * Setup the receive address registers by setting the base receive address
+ * register to the devices MAC address and clearing all the other receive
+ * address registers to 0.
+ **/
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
+{
+ u32 i;
+ u8 mac_addr[ETH_ADDR_LEN] = {0};
+
+ DEBUGFUNC("e1000_init_rx_addrs_generic");
+
+ /* Setup the receive address */
+ DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other (rar_entry_count - 1) receive addresses */
+ DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
+ for (i = 1; i < rar_count; i++)
+ hw->mac.ops.rar_set(hw, mac_addr, i);
+}
+
+/**
+ * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ * @hw: pointer to the HW structure
+ *
+ * Checks the nvm for an alternate MAC address. An alternate MAC address
+ * can be setup by pre-boot software and must be treated like a permanent
+ * address and must override the actual permanent MAC address. If an
+ * alternate MAC address is found it is programmed into RAR0, replacing
+ * the permanent address that was installed into RAR0 by the Si on reset.
+ * This function will return SUCCESS unless it encounters an error while
+ * reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 i;
+ s32 ret_val;
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+ u8 alt_mac_addr[ETH_ADDR_LEN];
+
+ DEBUGFUNC("e1000_check_alt_mac_addr_generic");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
+ if (ret_val)
+ return ret_val;
+
+ /* not supported on older hardware or 82573 */
+ if ((hw->mac.type < e1000_82571) || (hw->mac.type == e1000_82573))
+ return E1000_SUCCESS;
+
+ /* Alternate MAC address is handled by the option ROM for 82580
+ * and newer. SW support not required.
+ */
+ if (hw->mac.type >= e1000_82580)
+ return E1000_SUCCESS;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+ (nvm_alt_mac_addr_offset == 0x0000))
+ /* There is no Alternate MAC Address */
+ return E1000_SUCCESS;
+
+ if (hw->bus.func == E1000_FUNC_1)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ if (hw->bus.func == E1000_FUNC_2)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
+
+ if (hw->bus.func == E1000_FUNC_3)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
+ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+ offset = nvm_alt_mac_addr_offset + (i >> 1);
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+ alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+ }
+
+ /* if multicast bit is set, the alternate address will not be used */
+ if (alt_mac_addr[0] & 0x01) {
+ DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
+ return E1000_SUCCESS;
+ }
+
+ /* We have a valid alternate MAC address, and we want to treat it the
+ * same as the normal permanent MAC address stored by the HW into the
+ * RAR. Do this by mapping this address into RAR0.
+ */
+ hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_rar_set_generic - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr.
+ **/
+STATIC int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ DEBUGFUNC("e1000_rar_set_generic");
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ /* Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+ E1000_WRITE_FLUSH(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_hash_mc_addr_generic - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value.
+ **/
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ DEBUGFUNC("e1000_hash_mc_addr_generic");
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ /* The portion of the address that is used for the hash table
+ * is determined by the mc_filter_type setting.
+ * The algorithm is such that there is a total of 8 bits of shifting.
+ * The bit_shift for a mc_filter_type of 0 represents the number of
+ * left-shifts where the MSB of mc_addr[5] would still fall within
+ * the hash_mask. Case 0 does this exactly. Since there are a total
+ * of 8 bits of shifting, then mc_addr[4] will shift right the
+ * remaining number of bits. Thus 8 - bit_shift. The rest of the
+ * cases are a variation of this algorithm...essentially raising the
+ * number of bits to shift mc_addr[5] left, while still keeping the
+ * 8-bit shifting total.
+ *
+ * For example, given the following Destination MAC Address and an
+ * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+ * we can see that the bit_shift for case 0 is 4. These are the hash
+ * values resulting from each mc_filter_type...
+ * [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ *
+ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+ * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+ * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+ */
+ switch (hw->mac.mc_filter_type) {
+ default:
+ case 0:
+ break;
+ case 1:
+ bit_shift += 1;
+ break;
+ case 2:
+ bit_shift += 2;
+ break;
+ case 3:
+ bit_shift += 4;
+ break;
+ }
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16) mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+/**
+ * e1000_update_mc_addr_list_generic - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
+ DEBUGFUNC("e1000_update_mc_addr_list_generic");
+
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
+ hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ADDR_LEN);
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value
+ * @hw: pointer to the HW structure
+ *
+ * In certain situations, a system BIOS may report that the PCIx maximum
+ * memory read byte count (MMRBC) value is higher than than the actual
+ * value. We check the PCIx command register with the current PCIx status
+ * register.
+ **/
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw)
+{
+ u16 cmd_mmrbc;
+ u16 pcix_cmd;
+ u16 pcix_stat_hi_word;
+ u16 stat_mmrbc;
+
+ DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic");
+
+ /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */
+ if (hw->bus.type != e1000_bus_type_pcix)
+ return;
+
+ e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+ e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word);
+ cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >>
+ PCIX_COMMAND_MMRBC_SHIFT;
+ stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
+ PCIX_STATUS_HI_MMRBC_SHIFT;
+ if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
+ stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
+ if (cmd_mmrbc > stat_mmrbc) {
+ pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK;
+ pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
+ e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+ }
+}
+
+/**
+ * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the base hardware counters by reading the counter registers.
+ **/
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
+
+ E1000_READ_REG(hw, E1000_CRCERRS);
+ E1000_READ_REG(hw, E1000_SYMERRS);
+ E1000_READ_REG(hw, E1000_MPC);
+ E1000_READ_REG(hw, E1000_SCC);
+ E1000_READ_REG(hw, E1000_ECOL);
+ E1000_READ_REG(hw, E1000_MCC);
+ E1000_READ_REG(hw, E1000_LATECOL);
+ E1000_READ_REG(hw, E1000_COLC);
+ E1000_READ_REG(hw, E1000_DC);
+ E1000_READ_REG(hw, E1000_SEC);
+ E1000_READ_REG(hw, E1000_RLEC);
+ E1000_READ_REG(hw, E1000_XONRXC);
+ E1000_READ_REG(hw, E1000_XONTXC);
+ E1000_READ_REG(hw, E1000_XOFFRXC);
+ E1000_READ_REG(hw, E1000_XOFFTXC);
+ E1000_READ_REG(hw, E1000_FCRUC);
+ E1000_READ_REG(hw, E1000_GPRC);
+ E1000_READ_REG(hw, E1000_BPRC);
+ E1000_READ_REG(hw, E1000_MPRC);
+ E1000_READ_REG(hw, E1000_GPTC);
+ E1000_READ_REG(hw, E1000_GORCL);
+ E1000_READ_REG(hw, E1000_GORCH);
+ E1000_READ_REG(hw, E1000_GOTCL);
+ E1000_READ_REG(hw, E1000_GOTCH);
+ E1000_READ_REG(hw, E1000_RNBC);
+ E1000_READ_REG(hw, E1000_RUC);
+ E1000_READ_REG(hw, E1000_RFC);
+ E1000_READ_REG(hw, E1000_ROC);
+ E1000_READ_REG(hw, E1000_RJC);
+ E1000_READ_REG(hw, E1000_TORL);
+ E1000_READ_REG(hw, E1000_TORH);
+ E1000_READ_REG(hw, E1000_TOTL);
+ E1000_READ_REG(hw, E1000_TOTH);
+ E1000_READ_REG(hw, E1000_TPR);
+ E1000_READ_REG(hw, E1000_TPT);
+ E1000_READ_REG(hw, E1000_MPTC);
+ E1000_READ_REG(hw, E1000_BPTC);
+}
+
+/**
+ * e1000_check_for_copper_link_generic - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ DEBUGFUNC("e1000_check_for_copper_link");
+
+ /* We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status)
+ return E1000_SUCCESS;
+
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ return E1000_SUCCESS; /* No link detected */
+
+ mac->get_link_status = false;
+
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000_check_downshift_generic(hw);
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg)
+ return -E1000_ERR_CONFIG;
+
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ mac->ops.config_collision_dist(hw);
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_fiber_link_generic - Check for link (Fiber)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_check_for_fiber_link_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+ /* If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), the cable is plugged in (we have signal),
+ * and our link partner is not trying to auto-negotiate with us (we
+ * are receiving idles or data), we need to force link up. We also
+ * need to give auto-negotiation time to complete, in case the cable
+ * was just plugged in. The autoneg_failed flag does this.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
+ !(rxcw & E1000_RXCW_C)) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /* If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_for_serdes_link_generic - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_check_for_serdes_link_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+ /* If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), and our link partner is not trying to
+ * auto-negotiate with us (we are receiving idles or data),
+ * we need to force link up. We also need to give auto-negotiation
+ * time to complete.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /* If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
+ /* If we force link for non-auto-negotiation switch, check
+ * link status based on MAC synchronization for internal
+ * serdes media type.
+ */
+ /* SYNCH bit and IV bit are sticky. */
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ mac->serdes_has_link = true;
+ DEBUGOUT("SERDES: Link up - forced.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - force failed.\n");
+ }
+ }
+
+ if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_LU) {
+ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ mac->serdes_has_link = true;
+ DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n");
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - no sync.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - autoneg failed\n");
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_default_fc_generic - Set flow control default values
+ * @hw: pointer to the HW structure
+ *
+ * Read the EEPROM for the default values for flow control and store the
+ * values.
+ **/
+s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 nvm_offset = 0;
+
+ DEBUGFUNC("e1000_set_default_fc_generic");
+
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ if (hw->mac.type == e1000_i350) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
+ ret_val = hw->nvm.ops.read(hw,
+ NVM_INIT_CONTROL2_REG +
+ nvm_offset,
+ 1, &nvm_data);
+ } else {
+ ret_val = hw->nvm.ops.read(hw,
+ NVM_INIT_CONTROL2_REG,
+ 1, &nvm_data);
+ }
+
+
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
+ hw->fc.requested_mode = e1000_fc_none;
+ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+ NVM_WORD0F_ASM_DIR)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else
+ hw->fc.requested_mode = e1000_fc_full;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_link_generic - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+s32 e1000_setup_link_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_link_generic");
+
+ /* In the case of the phy reset being blocked, we already have a link.
+ * We do not need to set it up again.
+ */
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
+ return E1000_SUCCESS;
+
+ /* If requested flow control is set to default, set flow control
+ * based on the EEPROM flow control settings.
+ */
+ if (hw->fc.requested_mode == e1000_fc_default) {
+ ret_val = e1000_set_default_fc_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
+ hw->fc.current_mode);
+
+ /* Call the necessary media_type subroutine to configure the link. */
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+ E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+ E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+ return e1000_set_fc_watermarks_generic(hw);
+}
+
+/**
+ * e1000_commit_fc_settings_generic - Configure flow control
+ * @hw: pointer to the HW structure
+ *
+ * Write the flow control settings to the Transmit Config Word Register (TXCW)
+ * base on the flow control settings in e1000_mac_info.
+ **/
+s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 txcw;
+
+ DEBUGFUNC("e1000_commit_fc_settings_generic");
+
+ /* Check for a software override of the flow control settings, and
+ * setup the device accordingly. If auto-negotiation is enabled, then
+ * software will have to set the "PAUSE" bits to the correct value in
+ * the Transmit Config Word Register (TXCW) and re-start auto-
+ * negotiation. However, if auto-negotiation is disabled, then
+ * software will have to manually configure the two flow control enable
+ * bits in the CTRL register.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we
+ * do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ /* Flow control completely disabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+ break;
+ case e1000_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is disabled
+ * by a software over-ride. Since there really isn't a way to
+ * advertise that we are capable of Rx Pause ONLY, we will
+ * advertise that we support both symmetric and asymmetric Rx
+ * PAUSE. Later, we will disable the adapter's ability to send
+ * PAUSE frames.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ case e1000_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
+ * by a software over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+ break;
+ case e1000_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ E1000_WRITE_REG(hw, E1000_TXCW, txcw);
+ mac->txcw = txcw;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_poll_fiber_serdes_link_generic - Poll for link up
+ * @hw: pointer to the HW structure
+ *
+ * Polls for link up by reading the status register, if link fails to come
+ * up with auto-negotiation, then the link is forced if a signal is detected.
+ **/
+s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 i, status;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
+
+ /* If we have a signal (the cable is plugged in, or assumed true for
+ * serdes media) then poll for a "Link-Up" indication in the Device
+ * Status Register. Time-out if a link isn't seen in 500 milliseconds
+ * seconds (Auto-negotiation should complete in less than 500
+ * milliseconds even if the other end is doing it in SW).
+ */
+ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+ msec_delay(10);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_LU)
+ break;
+ }
+ if (i == FIBER_LINK_UP_LIMIT) {
+ DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+ mac->autoneg_failed = true;
+ /* AutoNeg failed to achieve a link, so we'll call
+ * mac->check_for_link. This routine will force the
+ * link up if we detect a signal. This will allow us to
+ * communicate with non-autonegotiating link partners.
+ */
+ ret_val = mac->ops.check_for_link(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while checking for link\n");
+ return ret_val;
+ }
+ mac->autoneg_failed = false;
+ } else {
+ mac->autoneg_failed = false;
+ DEBUGOUT("Valid Link Found\n");
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configures collision distance and flow control for fiber and serdes
+ * links. Upon successful setup, poll for link.
+ **/
+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Take the link out of reset */
+ ctrl &= ~E1000_CTRL_LRST;
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ ret_val = e1000_commit_fc_settings_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Since auto-negotiation is enabled, take the link out of reset (the
+ * link will be in reset, because we previously reset the chip). This
+ * will restart auto-negotiation. If auto-negotiation is successful
+ * then the link-up status bit will be set and the flow control enable
+ * bits (RFCE and TFCE) will be set according to their negotiated value.
+ */
+ DEBUGOUT("Auto-negotiation enabled\n");
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+
+ /* For these adapters, the SW definable pin 1 is set when the optics
+ * detect a signal. If we have a signal, then poll for a "Link-Up"
+ * indication.
+ */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+ (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
+ ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+ } else {
+ DEBUGOUT("No signal detected\n");
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_config_collision_dist_generic - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+STATIC void e1000_config_collision_dist_generic(struct e1000_hw *hw)
+{
+ u32 tctl;
+
+ DEBUGFUNC("e1000_config_collision_dist_generic");
+
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
+ * @hw: pointer to the HW structure
+ *
+ * Sets the flow control high/low threshold (watermark) registers. If
+ * flow control XON frame transmission is enabled, then set XON frame
+ * transmission as well.
+ **/
+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
+{
+ u32 fcrtl = 0, fcrth = 0;
+
+ DEBUGFUNC("e1000_set_fc_watermarks_generic");
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames is not enabled, then these
+ * registers will be set to 0.
+ */
+ if (hw->fc.current_mode & e1000_fc_tx_pause) {
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
+ */
+ fcrtl = hw->fc.low_water;
+ if (hw->fc.send_xon)
+ fcrtl |= E1000_FCRTL_XONE;
+
+ fcrth = hw->fc.high_water;
+ }
+ E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
+ E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_force_mac_fc_generic - Force the MAC's flow control settings
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
+ * device control register to reflect the adapter settings. TFCE and RFCE
+ * need to be explicitly set by software when a copper PHY is used because
+ * autonegotiation is managed by the PHY rather than the MAC. Software must
+ * also configure these bits when link is forced on a fiber connection.
+ **/
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_force_mac_fc_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc.current_mode" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+ DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+ break;
+ case e1000_fc_rx_pause:
+ ctrl &= (~E1000_CTRL_TFCE);
+ ctrl |= E1000_CTRL_RFCE;
+ break;
+ case e1000_fc_tx_pause:
+ ctrl &= (~E1000_CTRL_RFCE);
+ ctrl |= E1000_CTRL_TFCE;
+ break;
+ case e1000_fc_full:
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_config_fc_after_link_up_generic - Configures flow control after link
+ * @hw: pointer to the HW structure
+ *
+ * Checks the status of auto-negotiation after link up to ensure that the
+ * speed and duplex were not forced. If the link needed to be forced, then
+ * flow control needs to be forced also. If auto-negotiation is enabled
+ * and did not fail, then we configure flow control based on our link
+ * partner.
+ **/
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
+ u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+ u16 speed, duplex;
+
+ DEBUGFUNC("e1000_config_fc_after_link_up_generic");
+
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (mac->autoneg_failed) {
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes)
+ ret_val = e1000_force_mac_fc_generic(hw);
+ } else {
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ret_val = e1000_force_mac_fc_generic(hw);
+ }
+
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (Address 4) and the Auto_Negotiation Base
+ * Page Ability Register (Address 5) to determine how
+ * flow control was negotiated.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | E1000_fc_full
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc.current_mode = e1000_fc_none;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = e1000_force_mac_fc_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
+ mac->autoneg) {
+ /* Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+ pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+ DEBUGOUT("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (PCS_ANADV) and the Auto_Negotiation Base
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+ pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB);
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+ * Page Ability Register (PCS_LPAB) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg);
+
+ ret_val = e1000_force_mac_fc_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Read the status register for the current speed/duplex and store the current
+ * speed and duplex for copper connections.
+ **/
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ u32 status;
+
+ DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
+
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+ DEBUGOUT("1000 Mbs, ");
+ } else if (status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ DEBUGOUT("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ DEBUGOUT("10 Mbs, ");
+ }
+
+ if (status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Sets the speed and duplex to gigabit full duplex (the only possible option)
+ * for fiber/serdes links.
+ **/
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 *speed, u16 *duplex)
+{
+ DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
+ UNREFERENCED_1PARAMETER(hw);
+
+ *speed = SPEED_1000;
+ *duplex = FULL_DUPLEX;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_hw_semaphore_generic - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_generic");
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_NVM;
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_generic(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_put_hw_semaphore_generic - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ **/
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("e1000_put_hw_semaphore_generic");
+
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+}
+
+/**
+ * e1000_get_auto_rd_done_generic - Check for auto read completion
+ * @hw: pointer to the HW structure
+ *
+ * Check EEPROM for Auto Read done bit.
+ **/
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
+{
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_auto_rd_done_generic");
+
+ while (i < AUTO_READ_DONE_TIMEOUT) {
+ if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
+ break;
+ msec_delay(1);
+ i++;
+ }
+
+ if (i == AUTO_READ_DONE_TIMEOUT) {
+ DEBUGOUT("Auto read by HW from NVM has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_valid_led_default_generic - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_generic");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_id_led_init_generic -
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_id_led_init_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ const u32 ledctl_mask = 0x000000FF;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+ u16 data, i, temp;
+ const u16 led_mask = 0x0F;
+
+ DEBUGFUNC("e1000_id_led_init_generic");
+
+ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+ if (ret_val)
+ return ret_val;
+
+ mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+ for (i = 0; i < 4; i++) {
+ temp = (data >> (i << 2)) & led_mask;
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode1 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode1 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode2 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode2 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_led_generic - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored.
+ **/
+s32 e1000_setup_led_generic(struct e1000_hw *hw)
+{
+ u32 ledctl;
+
+ DEBUGFUNC("e1000_setup_led_generic");
+
+ if (hw->mac.ops.setup_led != e1000_setup_led_generic)
+ return -E1000_ERR_CONFIG;
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+ hw->mac.ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+ } else if (hw->phy.media_type == e1000_media_type_copper) {
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cleanup_led_generic - Set LED config to default operation
+ * @hw: pointer to the HW structure
+ *
+ * Remove the current LED configuration and set the LED configuration
+ * to the default value, saved from the EEPROM.
+ **/
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_cleanup_led_generic");
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_blink_led_generic - Blink LED
+ * @hw: pointer to the HW structure
+ *
+ * Blink the LEDs which are set to be on.
+ **/
+s32 e1000_blink_led_generic(struct e1000_hw *hw)
+{
+ u32 ledctl_blink = 0;
+ u32 i;
+
+ DEBUGFUNC("e1000_blink_led_generic");
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ /* always blink LED0 for PCI-E fiber */
+ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+ } else {
+ /* Set the blink bit for each LED that's "on" (0x0E)
+ * (or "off" if inverted) in ledctl_mode2. The blink
+ * logic in hardware only works when mode is set to "on"
+ * so it must be changed accordingly when the mode is
+ * "off" and inverted.
+ */
+ ledctl_blink = hw->mac.ledctl_mode2;
+ for (i = 0; i < 32; i += 8) {
+ u32 mode = (hw->mac.ledctl_mode2 >> i) &
+ E1000_LEDCTL_LED0_MODE_MASK;
+ u32 led_default = hw->mac.ledctl_default >> i;
+
+ if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_ON)) ||
+ ((led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_OFF))) {
+ ledctl_blink &=
+ ~(E1000_LEDCTL_LED0_MODE_MASK << i);
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_MODE_LED_ON) << i;
+ }
+ }
+ }
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_on_generic - Turn LED on
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED on.
+ **/
+s32 e1000_led_on_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_led_on_generic");
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_fiber:
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ break;
+ case e1000_media_type_copper:
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+ break;
+ default:
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off_generic - Turn LED off
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED off.
+ **/
+s32 e1000_led_off_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_led_off_generic");
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_fiber:
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ break;
+ case e1000_media_type_copper:
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+ break;
+ default:
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
+ * @hw: pointer to the HW structure
+ * @no_snoop: bitmap of snoop events
+ *
+ * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
+ **/
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
+{
+ u32 gcr;
+
+ DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
+
+ if (hw->bus.type != e1000_bus_type_pci_express)
+ return;
+
+ if (no_snoop) {
+ gcr = E1000_READ_REG(hw, E1000_GCR);
+ gcr &= ~(PCIE_NO_SNOOP_ALL);
+ gcr |= no_snoop;
+ E1000_WRITE_REG(hw, E1000_GCR, gcr);
+ }
+}
+
+/**
+ * e1000_disable_pcie_master_generic - Disables PCI-express master access
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_SUCCESS if successful, else returns -10
+ * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ * the master requests to be disabled.
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests.
+ **/
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 timeout = MASTER_DISABLE_TIMEOUT;
+
+ DEBUGFUNC("e1000_disable_pcie_master_generic");
+
+ if (hw->bus.type != e1000_bus_type_pci_express)
+ return E1000_SUCCESS;
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ while (timeout) {
+ if (!(E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STATUS_GIO_MASTER_ENABLE) ||
+ E1000_REMOVED(hw->hw_addr))
+ break;
+ usec_delay(100);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Master requests are pending.\n");
+ return -E1000_ERR_MASTER_REQUESTS_PENDING;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
+ * @hw: pointer to the HW structure
+ *
+ * Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+void e1000_reset_adaptive_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_reset_adaptive_generic");
+
+ if (!mac->adaptive_ifs) {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ return;
+ }
+
+ mac->current_ifs_val = 0;
+ mac->ifs_min_val = IFS_MIN;
+ mac->ifs_max_val = IFS_MAX;
+ mac->ifs_step_size = IFS_STEP;
+ mac->ifs_ratio = IFS_RATIO;
+
+ mac->in_ifs_mode = false;
+ E1000_WRITE_REG(hw, E1000_AIT, 0);
+}
+
+/**
+ * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
+ * @hw: pointer to the HW structure
+ *
+ * Update the Adaptive Interframe Spacing Throttle value based on the
+ * time between transmitted packets and time between collisions.
+ **/
+void e1000_update_adaptive_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_update_adaptive_generic");
+
+ if (!mac->adaptive_ifs) {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ return;
+ }
+
+ if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+ if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+ mac->in_ifs_mode = true;
+ if (mac->current_ifs_val < mac->ifs_max_val) {
+ if (!mac->current_ifs_val)
+ mac->current_ifs_val = mac->ifs_min_val;
+ else
+ mac->current_ifs_val +=
+ mac->ifs_step_size;
+ E1000_WRITE_REG(hw, E1000_AIT,
+ mac->current_ifs_val);
+ }
+ }
+ } else {
+ if (mac->in_ifs_mode &&
+ (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+ mac->current_ifs_val = 0;
+ mac->in_ifs_mode = false;
+ E1000_WRITE_REG(hw, E1000_AIT, 0);
+ }
+ }
+}
+
+/**
+ * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
+ * @hw: pointer to the HW structure
+ *
+ * Verify that when not using auto-negotiation that MDI/MDIx is correctly
+ * set, which is forced to MDI mode only.
+ **/
+STATIC s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_setting_generic");
+
+ if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
+ DEBUGOUT("Invalid MDI setting detected\n");
+ hw->phy.mdix = 1;
+ return -E1000_ERR_CONFIG;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings
+ * @hw: pointer to the HW structure
+ *
+ * Validate the MDI/MDIx setting, allowing for auto-crossover during forced
+ * operation.
+ **/
+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic");
+ UNREFERENCED_1PARAMETER(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
+ * @hw: pointer to the HW structure
+ * @reg: 32bit register offset such as E1000_SCTL
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes an address/data control type register. There are several of these
+ * and they all have the format address << 8 | data and bit 31 is polled for
+ * completion.
+ **/
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+ u32 offset, u8 data)
+{
+ u32 i, regvalue = 0;
+
+ DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
+
+ /* Set up the address and data */
+ regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
+ E1000_WRITE_REG(hw, reg, regvalue);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+ usec_delay(5);
+ regvalue = E1000_READ_REG(hw, reg);
+ if (regvalue & E1000_GEN_CTL_READY)
+ break;
+ }
+ if (!(regvalue & E1000_GEN_CTL_READY)) {
+ DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
+ return -E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.h
new file mode 100644
index 00000000..96a260c3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.h
@@ -0,0 +1,95 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_MAC_H_
+#define _E1000_MAC_H_
+
+void e1000_init_mac_ops_generic(struct e1000_hw *hw);
+#ifndef E1000_REMOVED
+#define E1000_REMOVED(a) (0)
+#endif /* E1000_REMOVED */
+void e1000_null_mac_generic(struct e1000_hw *hw);
+s32 e1000_null_ops_generic(struct e1000_hw *hw);
+s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
+bool e1000_null_mng_mode(struct e1000_hw *hw);
+void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
+void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
+int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
+s32 e1000_blink_led_generic(struct e1000_hw *hw);
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw);
+s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw);
+s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw);
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw);
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw);
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
+s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw);
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
+void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw);
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+s32 e1000_id_led_init_generic(struct e1000_hw *hw);
+s32 e1000_led_on_generic(struct e1000_hw *hw);
+s32 e1000_led_off_generic(struct e1000_hw *hw);
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
+s32 e1000_set_default_fc_generic(struct e1000_hw *hw);
+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
+s32 e1000_setup_led_generic(struct e1000_hw *hw);
+s32 e1000_setup_link_generic(struct e1000_hw *hw);
+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+ u32 offset, u8 data);
+
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
+
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000_reset_adaptive_generic(struct e1000_hw *hw);
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
+void e1000_update_adaptive_generic(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.c
new file mode 100644
index 00000000..8564a7f8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.c
@@ -0,0 +1,576 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_api.h"
+
+/**
+ * e1000_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ *
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ DEBUGFUNC("e1000_calculate_checksum");
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * e1000_mng_enable_host_if_generic - Checks host interface is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ * This function checks whether the HOST IF is enabled for command operation
+ * and also checks whether the previous command is completed. It busy waits
+ * in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
+{
+ u32 hicr;
+ u8 i;
+
+ DEBUGFUNC("e1000_mng_enable_host_if_generic");
+
+ if (!hw->mac.arc_subsystem_valid) {
+ DEBUGOUT("ARC subsystem not valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+ /* check the previous command is completed */
+ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay_irq(1);
+ }
+
+ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+ DEBUGOUT("Previous command timeout failed .\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_mng_mode_generic - Generic check management mode
+ * @hw: pointer to the HW structure
+ *
+ * Reads the firmware semaphore register and returns true (>0) if
+ * manageability is enabled, else false (0).
+ **/
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
+{
+ u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+ DEBUGFUNC("e1000_check_mng_mode_generic");
+
+
+ return (fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+}
+
+/**
+ * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx
+ * @hw: pointer to the HW structure
+ *
+ * Enables packet filtering on transmit packets if manageability is enabled
+ * and host interface is enabled.
+ **/
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
+{
+ struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+ u32 *buffer = (u32 *)&hw->mng_cookie;
+ u32 offset;
+ s32 ret_val, hdr_csum, csum;
+ u8 i, len;
+
+ DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
+
+ hw->mac.tx_pkt_filtering = true;
+
+ /* No manageability, no filtering */
+ if (!hw->mac.ops.check_mng_mode(hw)) {
+ hw->mac.tx_pkt_filtering = false;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* If we can't read from the host interface for whatever
+ * reason, disable filtering.
+ */
+ ret_val = e1000_mng_enable_host_if_generic(hw);
+ if (ret_val != E1000_SUCCESS) {
+ hw->mac.tx_pkt_filtering = false;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* Read in the header. Length and offset are in dwords. */
+ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+ offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+ for (i = 0; i < len; i++)
+ *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
+ offset + i);
+ hdr_csum = hdr->checksum;
+ hdr->checksum = 0;
+ csum = e1000_calculate_checksum((u8 *)hdr,
+ E1000_MNG_DHCP_COOKIE_LENGTH);
+ /* If either the checksums or signature don't match, then
+ * the cookie area isn't considered valid, in which case we
+ * take the safe route of assuming Tx filtering is enabled.
+ */
+ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
+ hw->mac.tx_pkt_filtering = true;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* Cookie area is valid, make the final check for filtering. */
+ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
+ hw->mac.tx_pkt_filtering = false;
+
+ return hw->mac.tx_pkt_filtering;
+}
+
+/**
+ * e1000_mng_write_cmd_header_generic - Writes manageability command header
+ * @hw: pointer to the HW structure
+ * @hdr: pointer to the host interface command header
+ *
+ * Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+ DEBUGFUNC("e1000_mng_write_cmd_header_generic");
+
+ /* Write the whole command header structure with new checksum. */
+
+ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+ length >>= 2;
+ /* Write the relevant command block into the ram area. */
+ for (i = 0; i < length; i++) {
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+ *((u32 *) hdr + i));
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_mng_host_if_write_generic - Write to the manageability host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface buffer
+ * @length: size of the buffer
+ * @offset: location in the buffer to write to
+ * @sum: sum of the data (not checksum)
+ *
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient
+ * way. Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+ u16 length, u16 offset, u8 *sum)
+{
+ u8 *tmp;
+ u8 *bufptr = buffer;
+ u32 data = 0;
+ u16 remaining, i, j, prev_bytes;
+
+ DEBUGFUNC("e1000_mng_host_if_write_generic");
+
+ /* sum = only sum of the data and it is not checksum */
+
+ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
+ return -E1000_ERR_PARAM;
+
+ tmp = (u8 *)&data;
+ prev_bytes = offset & 0x3;
+ offset >>= 2;
+
+ if (prev_bytes) {
+ data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
+ for (j = prev_bytes; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
+ length -= j - prev_bytes;
+ offset++;
+ }
+
+ remaining = length & 0x3;
+ length -= remaining;
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant command block into the
+ * ram area.
+ */
+ for (i = 0; i < length; i++) {
+ for (j = 0; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
+ data);
+ }
+ if (remaining) {
+ for (j = 0; j < sizeof(u32); j++) {
+ if (j < remaining)
+ *(tmp + j) = *bufptr++;
+ else
+ *(tmp + j) = 0;
+
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
+ data);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface
+ * @length: size of the buffer
+ *
+ * Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
+ u16 length)
+{
+ struct e1000_host_mng_command_header hdr;
+ s32 ret_val;
+ u32 hicr;
+
+ DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
+
+ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+ hdr.command_length = length;
+ hdr.reserved1 = 0;
+ hdr.reserved2 = 0;
+ hdr.checksum = 0;
+
+ /* Enable the host interface */
+ ret_val = e1000_mng_enable_host_if_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Populate the host interface with the contents of "buffer". */
+ ret_val = e1000_mng_host_if_write_generic(hw, buffer, length,
+ sizeof(hdr), &(hdr.checksum));
+ if (ret_val)
+ return ret_val;
+
+ /* Write the manageability command header */
+ ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr);
+ if (ret_val)
+ return ret_val;
+
+ /* Tell the ARC a new command is pending. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_enable_mng_pass_thru - Check if management passthrough is needed
+ * @hw: pointer to the HW structure
+ *
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
+ **/
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+ u32 manc;
+ u32 fwsm, factps;
+
+ DEBUGFUNC("e1000_enable_mng_pass_thru");
+
+ if (!hw->mac.asf_firmware_present)
+ return false;
+
+ manc = E1000_READ_REG(hw, E1000_MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
+ return false;
+
+ if (hw->mac.has_fwsm) {
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ factps = E1000_READ_REG(hw, E1000_FACTPS);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)))
+ return true;
+ } else if ((hw->mac.type == e1000_82574) ||
+ (hw->mac.type == e1000_82583)) {
+ u16 data;
+ s32 ret_val;
+
+ factps = E1000_READ_REG(hw, E1000_FACTPS);
+ ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+ if (ret_val)
+ return false;
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
+ (e1000_mng_mode_pt << 13)))
+ return true;
+ } else if ((manc & E1000_MANC_SMBUS_EN) &&
+ !(manc & E1000_MANC_ASF_EN)) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * e1000_host_interface_command - Writes buffer to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: contains a command to write
+ * @length: the byte length of the buffer, must be multiple of 4 bytes
+ *
+ * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS
+ * else returns E1000_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
+{
+ u32 hicr, i;
+
+ DEBUGFUNC("e1000_host_interface_command");
+
+ if (!(hw->mac.arc_subsystem_valid)) {
+ DEBUGOUT("Hardware doesn't support host interface command.\n");
+ return E1000_SUCCESS;
+ }
+
+ if (!hw->mac.asf_firmware_present) {
+ DEBUGOUT("Firmware is not present.\n");
+ return E1000_SUCCESS;
+ }
+
+ if (length == 0 || length & 0x3 ||
+ length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
+ DEBUGOUT("Buffer length failure.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < length; i++)
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+ *((u32 *)buffer + i));
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check command successful completion. */
+ if (i == E1000_HI_COMMAND_TIMEOUT ||
+ (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
+ DEBUGOUT("Command has failed with no status valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ for (i = 0; i < length; i++)
+ *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
+ E1000_HOST_IF,
+ i);
+
+ return E1000_SUCCESS;
+}
+/**
+ * e1000_load_firmware - Writes proxy FW code buffer to host interface
+ * and execute.
+ * @hw: pointer to the HW structure
+ * @buffer: contains a firmware to write
+ * @length: the byte length of the buffer, must be multiple of 4 bytes
+ *
+ * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled
+ * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length)
+{
+ u32 hicr, hibba, fwsm, icr, i;
+
+ DEBUGFUNC("e1000_load_firmware");
+
+ if (hw->mac.type < e1000_i210) {
+ DEBUGOUT("Hardware doesn't support loading FW by the driver\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_CONFIG;
+ }
+ if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) {
+ DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) {
+ DEBUGOUT("Buffer length failure.\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ /* Clear notification from ROM-FW by reading ICR register */
+ icr = E1000_READ_REG(hw, E1000_ICR_V2);
+
+ /* Reset ROM-FW */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ hicr |= E1000_HICR_FW_RESET_ENABLE;
+ E1000_WRITE_REG(hw, E1000_HICR, hicr);
+ hicr |= E1000_HICR_FW_RESET;
+ E1000_WRITE_REG(hw, E1000_HICR, hicr);
+ E1000_WRITE_FLUSH(hw);
+
+ /* Wait till MAC notifies about its readiness after ROM-FW reset */
+ for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) {
+ icr = E1000_READ_REG(hw, E1000_ICR_V2);
+ if (icr & E1000_ICR_MNG)
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for timeout */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("FW reset failed.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Wait till MAC is ready to accept new FW code */
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ if ((fwsm & E1000_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT ==
+ E1000_FWSM_HI_EN_ONLY_MODE))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for timeout */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("FW reset failed.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant FW code block
+ * into the ram area in DWORDs via 1kB ram addressing window.
+ */
+ for (i = 0; i < length; i++) {
+ if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) {
+ /* Point to correct 1kB ram window */
+ hibba = E1000_HI_FW_BASE_ADDRESS +
+ ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) *
+ (i / E1000_HI_FW_BLOCK_DWORD_LENGTH));
+
+ E1000_WRITE_REG(hw, E1000_HIBBA, hibba);
+ }
+
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
+ i % E1000_HI_FW_BLOCK_DWORD_LENGTH,
+ *((u32 *)buffer + i));
+ }
+
+ /* Setting this bit tells the ARC that a new FW is ready to execute. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for successful FW start. */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("New FW did not start within timeout period.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return E1000_SUCCESS;
+}
+
+
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.h
new file mode 100644
index 00000000..25be1156
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.h
@@ -0,0 +1,95 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_MANAGE_H_
+#define _E1000_MANAGE_H_
+
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+ u16 length, u16 offset, u8 *sum);
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr);
+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
+ u8 *buffer, u16 length);
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+u8 e1000_calculate_checksum(u8 *buffer, u32 length);
+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
+s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length);
+
+enum e1000_mng_mode {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG 0x20000000
+
+#define E1000_FWSM_MODE_MASK 0xE
+#define E1000_FWSM_MODE_SHIFT 1
+#define E1000_FWSM_FW_VALID 0x00008000
+#define E1000_FWSM_HI_EN_ONLY_MODE 0x4
+
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
+
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */
+#define E1000_HI_FW_BASE_ADDRESS 0x10000
+#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */
+#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */
+#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */
+#define E1000_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C 0x02
+#define E1000_HICR_SV 0x04 /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET 0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE 0x544D4149
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.c
new file mode 100644
index 00000000..a92fd22e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.c
@@ -0,0 +1,791 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_mbx.h"
+
+/**
+ * e1000_null_mbx_check_for_flag - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ DEBUGFUNC("e1000_null_mbx_check_flag");
+ UNREFERENCED_2PARAMETER(hw, mbx_id);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_mbx_transact - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG *msg,
+ u16 E1000_UNUSEDARG size,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ DEBUGFUNC("e1000_null_mbx_rw_msg");
+ UNREFERENCED_4PARAMETER(hw, msg, size, mbx_id);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_read_mbx");
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_mbx");
+
+ if (size > mbx->size)
+ ret_val = -E1000_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_msg");
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_ack");
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_rst");
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+STATIC s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("e1000_poll_for_msg");
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ * e1000_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+STATIC s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("e1000_poll_for_ack");
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ * e1000_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_read_posted_mbx");
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = e1000_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_write_posted_mbx");
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = e1000_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_ops_generic - Initialize mbx function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Sets the function pointers to no-op functions
+ **/
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ mbx->ops.init_params = e1000_null_ops_generic;
+ mbx->ops.read = e1000_null_mbx_transact;
+ mbx->ops.write = e1000_null_mbx_transact;
+ mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag;
+ mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag;
+ mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag;
+ mbx->ops.read_posted = e1000_read_posted_mbx;
+ mbx->ops.write_posted = e1000_write_posted_mbx;
+}
+
+/**
+ * e1000_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+STATIC u32 e1000_read_v2p_mailbox(struct e1000_hw *hw)
+{
+ u32 v2p_mailbox = E1000_READ_REG(hw, E1000_V2PMAILBOX(0));
+
+ v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox;
+ hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * e1000_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+STATIC s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = e1000_read_v2p_mailbox(hw);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = E1000_SUCCESS;
+
+ hw->dev_spec.vf.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 e1000_check_for_msg_vf(struct e1000_hw *hw,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("e1000_check_for_msg_vf");
+
+ if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+STATIC s32 e1000_check_for_ack_vf(struct e1000_hw *hw,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("e1000_check_for_ack_vf");
+
+ if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+STATIC s32 e1000_check_for_rst_vf(struct e1000_hw *hw,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("e1000_check_for_rst_vf");
+
+ if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
+ E1000_V2PMAILBOX_RSTI))) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+STATIC s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+ int count = 10;
+
+ DEBUGFUNC("e1000_obtain_mbx_lock_vf");
+
+ do {
+ /* Take ownership of the buffer */
+ E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(1000);
+ } while (count-- > 0);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+STATIC s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ s32 ret_val;
+ u16 i;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+
+ DEBUGFUNC("e1000_write_mbx_vf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = e1000_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ e1000_check_for_msg_vf(hw, 0);
+ e1000_check_for_ack_vf(hw, 0);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(0), i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * e1000_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+STATIC s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 i;
+
+ DEBUGFUNC("e1000_read_mbx_vf");
+ UNREFERENCED_1PARAMETER(mbx_id);
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = e1000_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(0), i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = e1000_read_mbx_vf;
+ mbx->ops.write = e1000_write_mbx_vf;
+ mbx->ops.read_posted = e1000_read_posted_mbx;
+ mbx->ops.write_posted = e1000_write_posted_mbx;
+ mbx->ops.check_for_msg = e1000_check_for_msg_vf;
+ mbx->ops.check_for_ack = e1000_check_for_ack_vf;
+ mbx->ops.check_for_rst = e1000_check_for_rst_vf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ return E1000_SUCCESS;
+}
+
+STATIC s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
+{
+ u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = E1000_SUCCESS;
+ E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_msg_pf");
+
+ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_ack_pf");
+
+ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_rst_pf");
+
+ if (vflre & (1 << vf_number)) {
+ ret_val = E1000_SUCCESS;
+ E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+STATIC s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+ u32 p2v_mailbox;
+ int count = 10;
+
+ DEBUGFUNC("e1000_obtain_mbx_lock_pf");
+
+ do {
+ /* Take ownership of the buffer */
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number),
+ E1000_P2VMAILBOX_PFU);
+
+ /* reserve mailbox for pf use */
+ p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
+ if (p2v_mailbox & E1000_P2VMAILBOX_PFU) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(1000);
+ } while (count-- > 0);
+
+ return ret_val;
+
+}
+
+/**
+ * e1000_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+STATIC s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_write_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ e1000_check_for_msg_pf(hw, vf_number);
+ e1000_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * e1000_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+STATIC s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_read_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ case e1000_i354:
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = e1000_read_mbx_pf;
+ mbx->ops.write = e1000_write_mbx_pf;
+ mbx->ops.read_posted = e1000_read_posted_mbx;
+ mbx->ops.write_posted = e1000_write_posted_mbx;
+ mbx->ops.check_for_msg = e1000_check_for_msg_pf;
+ mbx->ops.check_for_ack = e1000_check_for_ack_pf;
+ mbx->ops.check_for_rst = e1000_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+ default:
+ return E1000_SUCCESS;
+ }
+}
+
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.h
new file mode 100644
index 00000000..563dcb9d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.h
@@ -0,0 +1,105 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_MBX_H_
+#define _E1000_MBX_H_
+
+#include "e1000_api.h"
+
+/* Define mailbox register bits */
+#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
+#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
+#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+
+/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+/* Msgs below or'd with this are the ACK */
+#define E1000_VT_MSGTYPE_ACK 0x80000000
+/* Msgs below or'd with this are the NACK */
+#define E1000_VT_MSGTYPE_NACK 0x40000000
+/* Indicates that VF is still clear to send requests */
+#define E1000_VT_MSGTYPE_CTS 0x20000000
+#define E1000_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for extra info for certain messages */
+#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_VF_RESET 0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_check_for_msg(struct e1000_hw *, u16);
+s32 e1000_check_for_ack(struct e1000_hw *, u16);
+s32 e1000_check_for_rst(struct e1000_hw *, u16);
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
+s32 e1000_init_mbx_params_vf(struct e1000_hw *);
+s32 e1000_init_mbx_params_pf(struct e1000_hw *);
+
+#endif /* _E1000_MBX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.c
new file mode 100644
index 00000000..75c22827
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.c
@@ -0,0 +1,1385 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_api.h"
+
+STATIC void e1000_reload_nvm_generic(struct e1000_hw *hw);
+
+/**
+ * e1000_init_nvm_ops_generic - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ DEBUGFUNC("e1000_init_nvm_ops_generic");
+
+ /* Initialize function pointers */
+ nvm->ops.init_params = e1000_null_ops_generic;
+ nvm->ops.acquire = e1000_null_ops_generic;
+ nvm->ops.read = e1000_null_read_nvm;
+ nvm->ops.release = e1000_null_nvm_generic;
+ nvm->ops.reload = e1000_reload_nvm_generic;
+ nvm->ops.update = e1000_null_ops_generic;
+ nvm->ops.valid_led_default = e1000_null_led_default;
+ nvm->ops.validate = e1000_null_ops_generic;
+ nvm->ops.write = e1000_null_write_nvm;
+}
+
+/**
+ * e1000_null_nvm_read - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
+ u16 E1000_UNUSEDARG *c)
+{
+ DEBUGFUNC("e1000_null_read_nvm");
+ UNREFERENCED_4PARAMETER(hw, a, b, c);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_nvm_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_nvm_generic");
+ UNREFERENCED_1PARAMETER(hw);
+ return;
+}
+
+/**
+ * e1000_null_led_default - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG *data)
+{
+ DEBUGFUNC("e1000_null_led_default");
+ UNREFERENCED_2PARAMETER(hw, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_write_nvm - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
+ u16 E1000_UNUSEDARG *c)
+{
+ DEBUGFUNC("e1000_null_write_nvm");
+ UNREFERENCED_4PARAMETER(hw, a, b, c);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_raise_eec_clk - Raise EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Enable/Raise the EEPROM clock bit.
+ **/
+STATIC void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd | E1000_EECD_SK;
+ E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ * e1000_lower_eec_clk - Lower EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Clear/Lower the EEPROM clock bit.
+ **/
+STATIC void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd & ~E1000_EECD_SK;
+ E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ * @hw: pointer to the HW structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ *
+ * We need to shift 'count' bits out to the EEPROM. So, the value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ **/
+STATIC void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u32 mask;
+
+ DEBUGFUNC("e1000_shift_out_eec_bits");
+
+ mask = 0x01 << (count - 1);
+ if (nvm->type == e1000_nvm_eeprom_microwire)
+ eecd &= ~E1000_EECD_DO;
+ else
+ if (nvm->type == e1000_nvm_eeprom_spi)
+ eecd |= E1000_EECD_DO;
+
+ do {
+ eecd &= ~E1000_EECD_DI;
+
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(nvm->delay_usec);
+
+ e1000_raise_eec_clk(hw, &eecd);
+ e1000_lower_eec_clk(hw, &eecd);
+
+ mask >>= 1;
+ } while (mask);
+
+ eecd &= ~E1000_EECD_DI;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to the HW structure
+ * @count: number of bits to shift in
+ *
+ * In order to read a register from the EEPROM, we need to shift 'count' bits
+ * in from the EEPROM. Bits are "shifted in" by raising the clock input to
+ * the EEPROM (setting the SK bit), and then reading the value of the data out
+ * "DO" bit. During this "shifting in" process the data in "DI" bit should
+ * always be clear.
+ **/
+STATIC u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+ DEBUGFUNC("e1000_shift_in_eec_bits");
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data <<= 1;
+ e1000_raise_eec_clk(hw, &eecd);
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ eecd &= ~E1000_EECD_DI;
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
+
+ e1000_lower_eec_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/**
+ * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ * @hw: pointer to the HW structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the EEPROM status bit for either read or write completion based
+ * upon the value of 'ee_reg'.
+ **/
+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+
+ DEBUGFUNC("e1000_poll_eerd_eewr_done");
+
+ for (i = 0; i < attempts; i++) {
+ if (ee_reg == E1000_NVM_POLL_READ)
+ reg = E1000_READ_REG(hw, E1000_EERD);
+ else
+ reg = E1000_READ_REG(hw, E1000_EEWR);
+
+ if (reg & E1000_NVM_RW_REG_DONE)
+ return E1000_SUCCESS;
+
+ usec_delay(5);
+ }
+
+ return -E1000_ERR_NVM;
+}
+
+/**
+ * e1000_acquire_nvm_generic - Generic request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
+{
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+
+ DEBUGFUNC("e1000_acquire_nvm_generic");
+
+ E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ while (timeout) {
+ if (eecd & E1000_EECD_GNT)
+ break;
+ usec_delay(5);
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ timeout--;
+ }
+
+ if (!timeout) {
+ eecd &= ~E1000_EECD_REQ;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ DEBUGOUT("Could not acquire NVM grant\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_standby_nvm - Return EEPROM to standby state
+ * @hw: pointer to the HW structure
+ *
+ * Return the EEPROM to a standby state.
+ **/
+STATIC void e1000_standby_nvm(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ DEBUGFUNC("e1000_standby_nvm");
+
+ if (nvm->type == e1000_nvm_eeprom_microwire) {
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
+
+ e1000_raise_eec_clk(hw, &eecd);
+
+ /* Select EEPROM */
+ eecd |= E1000_EECD_CS;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
+
+ e1000_lower_eec_clk(hw, &eecd);
+ } else if (nvm->type == e1000_nvm_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
+ }
+}
+
+/**
+ * e1000_stop_nvm - Terminate EEPROM command
+ * @hw: pointer to the HW structure
+ *
+ * Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+void e1000_stop_nvm(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ DEBUGFUNC("e1000_stop_nvm");
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+ /* Pull CS high */
+ eecd |= E1000_EECD_CS;
+ e1000_lower_eec_clk(hw, &eecd);
+ } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
+ /* CS on Microwire is active-high */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ e1000_raise_eec_clk(hw, &eecd);
+ e1000_lower_eec_clk(hw, &eecd);
+ }
+}
+
+/**
+ * e1000_release_nvm_generic - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000_release_nvm_generic(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ DEBUGFUNC("e1000_release_nvm_generic");
+
+ e1000_stop_nvm(hw);
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ eecd &= ~E1000_EECD_REQ;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ * @hw: pointer to the HW structure
+ *
+ * Setups the EEPROM for reading and writing.
+ **/
+STATIC s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u8 spi_stat_reg;
+
+ DEBUGFUNC("e1000_ready_nvm_eeprom");
+
+ if (nvm->type == e1000_nvm_eeprom_microwire) {
+ /* Clear SK and DI */
+ eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ /* Set CS */
+ eecd |= E1000_EECD_CS;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ } else if (nvm->type == e1000_nvm_eeprom_spi) {
+ u16 timeout = NVM_MAX_RETRY_SPI;
+
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(1);
+
+ /* Read "Status Register" repeatedly until the LSB is cleared.
+ * The EEPROM will signal that the command has been completed
+ * by clearing bit 0 of the internal status register. If it's
+ * not cleared within 'timeout', then error out.
+ */
+ while (timeout) {
+ e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+ hw->nvm.opcode_bits);
+ spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+ if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+ break;
+
+ usec_delay(5);
+ e1000_standby_nvm(hw);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("SPI NVM Status error\n");
+ return -E1000_ERR_NVM;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_nvm_spi - Read EEPROM's using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i = 0;
+ s32 ret_val;
+ u16 word_in;
+ u8 read_opcode = NVM_READ_OPCODE_SPI;
+
+ DEBUGFUNC("e1000_read_nvm_spi");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val)
+ goto release;
+
+ e1000_standby_nvm(hw);
+
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ read_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+ /* Read the data. SPI NVMs increment the address with each byte
+ * read and will roll over if reading beyond the end. This allows
+ * us to read the whole NVM from any offset
+ */
+ for (i = 0; i < words; i++) {
+ word_in = e1000_shift_in_eec_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_nvm_microwire - Reads EEPROM's using microwire
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i = 0;
+ s32 ret_val;
+ u8 read_opcode = NVM_READ_OPCODE_MICROWIRE;
+
+ DEBUGFUNC("e1000_read_nvm_microwire");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val)
+ goto release;
+
+ for (i = 0; i < words; i++) {
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)(offset + i),
+ nvm->address_bits);
+
+ /* Read the data. For microwire, each word requires the
+ * overhead of setup and tear-down.
+ */
+ data[i] = e1000_shift_in_eec_bits(hw, 16);
+ e1000_standby_nvm(hw);
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_nvm_eerd - Reads EEPROM using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, eerd = 0;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_read_nvm_eerd");
+
+ /* A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+ E1000_NVM_RW_REG_START;
+
+ E1000_WRITE_REG(hw, E1000_EERD, eerd);
+ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+ if (ret_val)
+ break;
+
+ data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
+ E1000_NVM_RW_REG_DATA);
+ }
+
+ if (ret_val)
+ DEBUGOUT1("NVM read error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_spi - Write to EEPROM using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * Writes data to EEPROM at offset using SPI interface.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = -E1000_ERR_NVM;
+ u16 widx = 0;
+
+ DEBUGFUNC("e1000_write_nvm_spi");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ while (widx < words) {
+ u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
+
+ e1000_standby_nvm(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode) */
+ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+ nvm->opcode_bits);
+
+ e1000_standby_nvm(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ write_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+ nvm->address_bits);
+
+ /* Loop to allow for up to whole page write of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ e1000_shift_out_eec_bits(hw, word_out, 16);
+ widx++;
+
+ if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+ e1000_standby_nvm(hw);
+ break;
+ }
+ }
+ msec_delay(10);
+ nvm->ops.release(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_microwire - Writes EEPROM using microwire
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * Writes data to EEPROM at offset using microwire interface.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val;
+ u32 eecd;
+ u16 words_written = 0;
+ u16 widx = 0;
+
+ DEBUGFUNC("e1000_write_nvm_microwire");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val)
+ goto release;
+
+ e1000_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE,
+ (u16)(nvm->opcode_bits + 2));
+
+ e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
+
+ e1000_standby_nvm(hw);
+
+ while (words_written < words) {
+ e1000_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE,
+ nvm->opcode_bits);
+
+ e1000_shift_out_eec_bits(hw, (u16)(offset + words_written),
+ nvm->address_bits);
+
+ e1000_shift_out_eec_bits(hw, data[words_written], 16);
+
+ e1000_standby_nvm(hw);
+
+ for (widx = 0; widx < 200; widx++) {
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (eecd & E1000_EECD_DO)
+ break;
+ usec_delay(50);
+ }
+
+ if (widx == 200) {
+ DEBUGOUT("NVM Write did not complete\n");
+ ret_val = -E1000_ERR_NVM;
+ goto release;
+ }
+
+ e1000_standby_nvm(hw);
+
+ words_written++;
+ }
+
+ e1000_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE,
+ (u16)(nvm->opcode_bits + 2));
+
+ e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
+
+release:
+ nvm->ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_pba_string_generic - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ DEBUGFUNC("e1000_read_pba_string_generic");
+
+ if ((hw->mac.type >= e1000_i210) &&
+ !e1000_get_flash_presence_i210(hw)) {
+ DEBUGOUT("Flashless no PBA string\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
+ }
+
+ if (pba_num == NULL) {
+ DEBUGOUT("PBA string buffer was null\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /* if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ DEBUGOUT("NVM PBA number is not stored as string\n");
+
+ /* make sure callers buffer is big enough to store the PBA */
+ if (pba_num_size < E1000_PBANUM_LENGTH) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (nvm_data >> 12) & 0xF;
+ pba_num[1] = (nvm_data >> 8) & 0xF;
+ pba_num[2] = (nvm_data >> 4) & 0xF;
+ pba_num[3] = nvm_data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return E1000_SUCCESS;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
+ }
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return -E1000_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(nvm_data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_pba_length_generic - Read device part number length
+ * @hw: pointer to the HW structure
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number length from the EEPROM and
+ * stores the value in pba_num_size.
+ **/
+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 length;
+
+ DEBUGFUNC("e1000_read_pba_length_generic");
+
+ if (pba_num_size == NULL) {
+ DEBUGOUT("PBA buffer size was null\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /* if data is not ptr guard the PBA must be in legacy format */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ *pba_num_size = E1000_PBANUM_LENGTH;
+ return E1000_SUCCESS;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
+ }
+
+ /* Convert from length in u16 values to u8 chars, add 1 for NULL,
+ * and subtract 2 because length field is included in length.
+ */
+ *pba_num_size = ((u32)length * 2) - 1;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_pba_num_generic - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ **/
+s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num)
+{
+ s32 ret_val;
+ u16 nvm_data;
+
+ DEBUGFUNC("e1000_read_pba_num_generic");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ } else if (nvm_data == NVM_PBA_PTR_GUARD) {
+ DEBUGOUT("NVM Not Supported\n");
+ return -E1000_NOT_IMPLEMENTED;
+ }
+ *pba_num = (u32)(nvm_data << 16);
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ *pba_num |= nvm_data;
+
+ return E1000_SUCCESS;
+}
+
+
+/**
+ * e1000_read_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @max_pba_block_size: PBA block size limit
+ * @pba: pointer to output PBA structure
+ *
+ * Reads PBA from EEPROM image when eeprom_buf is not NULL.
+ * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct e1000_pba *pba)
+{
+ s32 ret_val;
+ u16 pba_block_size;
+
+ if (pba == NULL)
+ return -E1000_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 2,
+ &pba->word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > NVM_PBA_OFFSET_1) {
+ pba->word[0] = eeprom_buf[NVM_PBA_OFFSET_0];
+ pba->word[1] = eeprom_buf[NVM_PBA_OFFSET_1];
+ } else {
+ return -E1000_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == NVM_PBA_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return -E1000_ERR_PARAM;
+
+ ret_val = e1000_get_pba_block_size(hw, eeprom_buf,
+ eeprom_buf_size,
+ &pba_block_size);
+ if (ret_val)
+ return ret_val;
+
+ if (pba_block_size > max_pba_block_size)
+ return -E1000_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = e1000_read_nvm(hw, pba->word[1],
+ pba_block_size,
+ pba->pba_block);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba_block_size)) {
+ memcpy(pba->pba_block,
+ &eeprom_buf[pba->word[1]],
+ pba_block_size * sizeof(u16));
+ } else {
+ return -E1000_ERR_PARAM;
+ }
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba: pointer to PBA structure
+ *
+ * Writes PBA to EEPROM image when eeprom_buf is not NULL.
+ * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct e1000_pba *pba)
+{
+ s32 ret_val;
+
+ if (pba == NULL)
+ return -E1000_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = e1000_write_nvm(hw, NVM_PBA_OFFSET_0, 2,
+ &pba->word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > NVM_PBA_OFFSET_1) {
+ eeprom_buf[NVM_PBA_OFFSET_0] = pba->word[0];
+ eeprom_buf[NVM_PBA_OFFSET_1] = pba->word[1];
+ } else {
+ return -E1000_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == NVM_PBA_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return -E1000_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = e1000_write_nvm(hw, pba->word[1],
+ pba->pba_block[0],
+ pba->pba_block);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba->pba_block[0])) {
+ memcpy(&eeprom_buf[pba->word[1]],
+ pba->pba_block,
+ pba->pba_block[0] * sizeof(u16));
+ } else {
+ return -E1000_ERR_PARAM;
+ }
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_pba_block_size
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba_data_size: pointer to output variable
+ *
+ * Returns the size of the PBA block in words. Function operates on EEPROM
+ * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
+ * EEPROM device.
+ *
+ **/
+s32 e1000_get_pba_block_size(struct e1000_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size)
+{
+ s32 ret_val;
+ u16 pba_word[2];
+ u16 length;
+
+ DEBUGFUNC("e1000_get_pba_block_size");
+
+ if (eeprom_buf == NULL) {
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 2, &pba_word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > NVM_PBA_OFFSET_1) {
+ pba_word[0] = eeprom_buf[NVM_PBA_OFFSET_0];
+ pba_word[1] = eeprom_buf[NVM_PBA_OFFSET_1];
+ } else {
+ return -E1000_ERR_PARAM;
+ }
+ }
+
+ if (pba_word[0] == NVM_PBA_PTR_GUARD) {
+ if (eeprom_buf == NULL) {
+ ret_val = e1000_read_nvm(hw, pba_word[1] + 0, 1,
+ &length);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > pba_word[1])
+ length = eeprom_buf[pba_word[1] + 0];
+ else
+ return -E1000_ERR_PARAM;
+ }
+
+ if (length == 0xFFFF || length == 0)
+ return -E1000_ERR_NVM_PBA_SECTION;
+ } else {
+ /* PBA number in legacy format, there is no PBA Block. */
+ length = 0;
+ }
+
+ if (pba_block_size != NULL)
+ *pba_block_size = length;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr_generic - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ * Since devices with two ports use the same EEPROM, we increment the
+ * last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = E1000_READ_REG(hw, E1000_RAH(0));
+ rar_low = E1000_READ_REG(hw, E1000_RAL(0));
+
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_generic");
+
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16) NVM_SUM) {
+ DEBUGOUT("NVM Checksum Invalid\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_update_nvm_checksum_generic - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_update_nvm_checksum");
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
+ if (ret_val)
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000_reload_nvm_generic - Reloads EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
+ **/
+STATIC void e1000_reload_nvm_generic(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+
+ DEBUGFUNC("e1000_reload_nvm_generic");
+
+ usec_delay(10);
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_get_fw_version - Get firmware version information
+ * @hw: pointer to the HW structure
+ * @fw_vers: pointer to output version structure
+ *
+ * unsupported/not present features return 0 in version structure
+ **/
+void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+{
+ u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+ u8 q, hval, rem, result;
+ u16 comb_verh, comb_verl, comb_offset;
+
+ memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+ /* basic eeprom version numbers, bits used vary by part and by tool
+ * used to create the nvm images */
+ /* Check which data format we have */
+ switch (hw->mac.type) {
+ case e1000_i211:
+ e1000_read_invm_version(hw, fw_vers);
+ return;
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i354:
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ /* Use this format, unless EETRACK ID exists,
+ * then use alternate format
+ */
+ if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
+ goto etrack_id;
+ }
+ break;
+ case e1000_i210:
+ if (!(e1000_get_flash_presence_i210(hw))) {
+ e1000_read_invm_version(hw, fw_vers);
+ return;
+ }
+ /* fall through */
+ case e1000_i350:
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ /* find combo image version */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) &&
+ (comb_offset != NVM_VER_INVALID)) {
+
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* get Option Rom version if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != NVM_VER_INVALID) &&
+ (comb_verl != NVM_VER_INVALID))) {
+
+ fw_vers->or_valid = true;
+ fw_vers->or_major =
+ comb_verl >> NVM_COMB_VER_SHFT;
+ fw_vers->or_build =
+ (comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT);
+ fw_vers->or_patch =
+ comb_verh & NVM_COMB_VER_MASK;
+ }
+ }
+ break;
+ default:
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ return;
+ }
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+
+ /* check for old style version format in newer images*/
+ if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+ eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+ } else {
+ eeprom_verl = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ }
+ /* Convert minor value to hex before assigning to output struct
+ * Val to be converted will not be higher than 99, per tool output
+ */
+ q = eeprom_verl / NVM_HEX_CONV;
+ hval = q * NVM_HEX_TENS;
+ rem = eeprom_verl % NVM_HEX_CONV;
+ result = hval + rem;
+ fw_vers->eep_minor = result;
+
+etrack_id:
+ if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+ | eeprom_verl;
+ } else if ((etrack_test & NVM_ETRACK_VALID) == 0) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) |
+ eeprom_verl;
+ }
+}
+
+
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.h
new file mode 100644
index 00000000..c400dc3a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.h
@@ -0,0 +1,98 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_NVM_H_
+#define _E1000_NVM_H_
+
+struct e1000_pba {
+ u16 word[2];
+ u16 *pba_block;
+};
+
+struct e1000_fw_version {
+ u32 etrack_id;
+ u16 eep_major;
+ u16 eep_minor;
+ u16 eep_build;
+
+ u8 invm_major;
+ u8 invm_minor;
+ u8 invm_img_type;
+
+ bool or_valid;
+ u16 or_major;
+ u16 or_build;
+ u16 or_patch;
+};
+
+
+void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
+s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
+void e1000_null_nvm_generic(struct e1000_hw *hw);
+s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data);
+s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw);
+
+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num);
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
+s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct e1000_pba *pba);
+s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct e1000_pba *pba);
+s32 e1000_get_pba_block_size(struct e1000_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size);
+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000_stop_nvm(struct e1000_hw *hw);
+void e1000_release_nvm_generic(struct e1000_hw *hw);
+void e1000_get_fw_version(struct e1000_hw *hw,
+ struct e1000_fw_version *fw_vers);
+
+#define E1000_STM_OPCODE 0xDB00
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.c
new file mode 100644
index 00000000..7270edfa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.c
@@ -0,0 +1,83 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+/*
+ * NOTE: the following routines using the e1000
+ * naming style are provided to the shared
+ * code but are OS specific
+ */
+
+void
+e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ return;
+}
+
+void
+e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ *value = 0;
+ return;
+}
+
+void
+e1000_pci_set_mwi(struct e1000_hw *hw)
+{
+}
+
+void
+e1000_pci_clear_mwi(struct e1000_hw *hw)
+{
+}
+
+
+/*
+ * Read the PCI Express capabilities
+ */
+int32_t
+e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ return E1000_NOT_IMPLEMENTED;
+}
+
+/*
+ * Write the PCI Express capabilities
+ */
+int32_t
+e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ return E1000_NOT_IMPLEMENTED;
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.h
new file mode 100644
index 00000000..b8868049
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.h
@@ -0,0 +1,198 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+
+#include "../e1000_logs.h"
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define usec_delay_irq(x) DELAY(x)
+#define msec_delay(x) DELAY(1000*(x))
+#define msec_delay_irq(x) DELAY(1000*(x))
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n");
+#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args)
+#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args)
+
+#define UNREFERENCED_PARAMETER(_p)
+#define UNREFERENCED_1PARAMETER(_p)
+#define UNREFERENCED_2PARAMETER(_p, _q)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r)
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+
+#define FALSE 0
+#define TRUE 1
+
+#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
+
+/* Mutex used in the shared code */
+#define E1000_MUTEX uintptr_t
+#define E1000_MUTEX_INIT(mutex) (*(mutex) = 0)
+#define E1000_MUTEX_LOCK(mutex) (*(mutex) = 1)
+#define E1000_MUTEX_UNLOCK(mutex) (*(mutex) = 0)
+
+typedef uint64_t u64;
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+typedef int64_t s64;
+typedef int32_t s32;
+typedef int16_t s16;
+typedef int8_t s8;
+typedef int bool;
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
+
+#define E1000_PCI_REG(reg) rte_read32(reg)
+
+#define E1000_PCI_REG16(reg) rte_read16(reg)
+
+#define E1000_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+
+#define E1000_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
+
+#define E1000_PCI_REG_WRITE16(reg, value) \
+ rte_write16((rte_cpu_to_le_16(value)), reg)
+
+#define E1000_PCI_REG_ADDR(hw, reg) \
+ ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
+
+#define E1000_PCI_REG_ARRAY_ADDR(hw, reg, index) \
+ E1000_PCI_REG_ADDR((hw), (reg) + ((index) << 2))
+
+#define E1000_PCI_REG_FLASH_ADDR(hw, reg) \
+ ((volatile uint32_t *)((char *)(hw)->flash_address + (reg)))
+
+static inline uint32_t e1000_read_addr(volatile void *addr)
+{
+ return rte_le_to_cpu_32(E1000_PCI_REG(addr));
+}
+
+static inline uint16_t e1000_read_addr16(volatile void *addr)
+{
+ return rte_le_to_cpu_16(E1000_PCI_REG16(addr));
+}
+
+/* Necessary defines */
+#define E1000_MRQC_ENABLE_MASK 0x00000007
+#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define E1000_ALL_FULL_DUPLEX ( \
+ ADVERTISE_10_FULL | ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
+
+#define M88E1543_E_PHY_ID 0x01410EA0
+#define ULP_SUPPORT
+
+#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
+#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+
+/* Register READ/WRITE macros */
+
+#define E1000_READ_REG(hw, reg) \
+ e1000_read_addr(E1000_PCI_REG_ADDR((hw), (reg)))
+
+#define E1000_WRITE_REG(hw, reg, value) \
+ E1000_PCI_REG_WRITE(E1000_PCI_REG_ADDR((hw), (reg)), (value))
+
+#define E1000_READ_REG_ARRAY(hw, reg, index) \
+ E1000_PCI_REG(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)))
+
+#define E1000_WRITE_REG_ARRAY(hw, reg, index, value) \
+ E1000_PCI_REG_WRITE(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_ACCESS_PANIC(x, hw, reg, value) \
+ rte_panic("%s:%u\t" RTE_STR(x) "(%p, 0x%x, 0x%x)", \
+ __FILE__, __LINE__, (hw), (reg), (unsigned int)(value))
+
+/*
+ * To be able to do IO write, we need to map IO BAR
+ * (bar 2/4 depending on device).
+ * Right now mapping multiple BARs is not supported by DPDK.
+ * Fortunatelly we need it only for legacy hw support.
+ */
+
+#define E1000_WRITE_REG_IO(hw, reg, value) \
+ E1000_WRITE_REG(hw, reg, value)
+
+/*
+ * Tested on I217/I218 chipset.
+ */
+
+#define E1000_READ_FLASH_REG(hw, reg) \
+ e1000_read_addr(E1000_PCI_REG_FLASH_ADDR((hw), (reg)))
+
+#define E1000_READ_FLASH_REG16(hw, reg) \
+ e1000_read_addr16(E1000_PCI_REG_FLASH_ADDR((hw), (reg)))
+
+#define E1000_WRITE_FLASH_REG(hw, reg, value) \
+ E1000_PCI_REG_WRITE(E1000_PCI_REG_FLASH_ADDR((hw), (reg)), (value))
+
+#define E1000_WRITE_FLASH_REG16(hw, reg, value) \
+ E1000_PCI_REG_WRITE16(E1000_PCI_REG_FLASH_ADDR((hw), (reg)), (value))
+
+#define STATIC static
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
+
+#define false FALSE
+#define true TRUE
+
+#endif /* _E1000_OSDEP_H_ */
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.c
new file mode 100644
index 00000000..33f478b1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.c
@@ -0,0 +1,4260 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_wait_autoneg(struct e1000_hw *hw);
+STATIC s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read, bool page_set);
+STATIC u32 e1000_get_phy_addr_for_hv_page(u32 page);
+STATIC s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read);
+
+/* Cable length tables */
+STATIC const u16 e1000_m88_cable_length_table[] = {
+ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+ (sizeof(e1000_m88_cable_length_table) / \
+ sizeof(e1000_m88_cable_length_table[0]))
+
+STATIC const u16 e1000_igp_2_cable_length_table[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
+ 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
+ 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
+ 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
+ 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
+ 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
+ 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
+ 124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+ (sizeof(e1000_igp_2_cable_length_table) / \
+ sizeof(e1000_igp_2_cable_length_table[0]))
+
+/**
+ * e1000_init_phy_ops_generic - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_phy_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ DEBUGFUNC("e1000_init_phy_ops_generic");
+
+ /* Initialize function pointers */
+ phy->ops.init_params = e1000_null_ops_generic;
+ phy->ops.acquire = e1000_null_ops_generic;
+ phy->ops.check_polarity = e1000_null_ops_generic;
+ phy->ops.check_reset_block = e1000_null_ops_generic;
+ phy->ops.commit = e1000_null_ops_generic;
+ phy->ops.force_speed_duplex = e1000_null_ops_generic;
+ phy->ops.get_cfg_done = e1000_null_ops_generic;
+ phy->ops.get_cable_length = e1000_null_ops_generic;
+ phy->ops.get_info = e1000_null_ops_generic;
+ phy->ops.set_page = e1000_null_set_page;
+ phy->ops.read_reg = e1000_null_read_reg;
+ phy->ops.read_reg_locked = e1000_null_read_reg;
+ phy->ops.read_reg_page = e1000_null_read_reg;
+ phy->ops.release = e1000_null_phy_generic;
+ phy->ops.reset = e1000_null_ops_generic;
+ phy->ops.set_d0_lplu_state = e1000_null_lplu_state;
+ phy->ops.set_d3_lplu_state = e1000_null_lplu_state;
+ phy->ops.write_reg = e1000_null_write_reg;
+ phy->ops.write_reg_locked = e1000_null_write_reg;
+ phy->ops.write_reg_page = e1000_null_write_reg;
+ phy->ops.power_up = e1000_null_phy_generic;
+ phy->ops.power_down = e1000_null_phy_generic;
+ phy->ops.read_i2c_byte = e1000_read_i2c_byte_null;
+ phy->ops.write_i2c_byte = e1000_write_i2c_byte_null;
+ phy->ops.cfg_on_link_up = e1000_null_ops_generic;
+}
+
+/**
+ * e1000_null_set_page - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG data)
+{
+ DEBUGFUNC("e1000_null_set_page");
+ UNREFERENCED_2PARAMETER(hw, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_read_reg - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data)
+{
+ DEBUGFUNC("e1000_null_read_reg");
+ UNREFERENCED_3PARAMETER(hw, offset, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_phy_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_phy_generic");
+ UNREFERENCED_1PARAMETER(hw);
+ return;
+}
+
+/**
+ * e1000_null_lplu_state - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw,
+ bool E1000_UNUSEDARG active)
+{
+ DEBUGFUNC("e1000_null_lplu_state");
+ UNREFERENCED_2PARAMETER(hw, active);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_write_reg - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data)
+{
+ DEBUGFUNC("e1000_null_write_reg");
+ UNREFERENCED_3PARAMETER(hw, offset, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_i2c_byte_null - No-op function, return 0
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: data value read
+ *
+ **/
+s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG byte_offset,
+ u8 E1000_UNUSEDARG dev_addr,
+ u8 E1000_UNUSEDARG *data)
+{
+ DEBUGFUNC("e1000_read_i2c_byte_null");
+ UNREFERENCED_4PARAMETER(hw, byte_offset, dev_addr, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_i2c_byte_null - No-op function, return 0
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: data value to write
+ *
+ **/
+s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG byte_offset,
+ u8 E1000_UNUSEDARG dev_addr,
+ u8 E1000_UNUSEDARG data)
+{
+ DEBUGFUNC("e1000_write_i2c_byte_null");
+ UNREFERENCED_4PARAMETER(hw, byte_offset, dev_addr, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_reset_block_generic - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Read the PHY management control register and check whether a PHY reset
+ * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise
+ * return E1000_BLK_PHY_RESET (12).
+ **/
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
+{
+ u32 manc;
+
+ DEBUGFUNC("e1000_check_reset_block");
+
+ manc = E1000_READ_REG(hw, E1000_MANC);
+
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+ E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_id - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ **/
+s32 e1000_get_phy_id(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_id;
+ u16 retry_count = 0;
+
+ DEBUGFUNC("e1000_get_phy_id");
+
+ if (!phy->ops.read_reg)
+ return E1000_SUCCESS;
+
+ while (retry_count < 2) {
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id = (u32)(phy_id << 16);
+ usec_delay(20);
+ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+ if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
+ return E1000_SUCCESS;
+
+ retry_count++;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_reset_dsp_generic - Reset PHY DSP
+ * @hw: pointer to the HW structure
+ *
+ * Reset the digital signal processor.
+ **/
+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_phy_reset_dsp_generic");
+
+ if (!hw->phy.ops.write_reg)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+ if (ret_val)
+ return ret_val;
+
+ return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+}
+
+/**
+ * e1000_read_phy_reg_mdic - Read MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+
+ DEBUGFUNC("e1000_read_phy_reg_mdic");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ usec_delay_irq(50);
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ DEBUGOUT("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
+ *data = (u16) mdic;
+
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ usec_delay_irq(100);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_mdic - Write MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+
+ DEBUGFUNC("e1000_write_phy_reg_mdic");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = (((u32)data) |
+ (offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ usec_delay_irq(50);
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ DEBUGOUT("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
+
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ usec_delay_irq(100);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_phy_reg_i2c - Read PHY register using i2c
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the i2c interface and stores the
+ * retrieved information in data.
+ **/
+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, i2ccmd = 0;
+
+ DEBUGFUNC("e1000_read_phy_reg_i2c");
+
+ /* Set up Op-code, Phy Address, and register address in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ (E1000_I2CCMD_OPCODE_READ));
+
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+
+ /* Need to byte-swap the 16-bit value. */
+ *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_i2c - Write PHY register using i2c
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset using the i2c interface.
+ **/
+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, i2ccmd = 0;
+ u16 phy_data_swapped;
+
+ DEBUGFUNC("e1000_write_phy_reg_i2c");
+
+ /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/
+ if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) {
+ DEBUGOUT1("PHY I2C Address %d is out of range.\n",
+ hw->phy.addr);
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Swap the data bytes for the I2C interface */
+ phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+
+ /* Set up Op-code, Phy Address, and register address in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_WRITE |
+ phy_data_swapped);
+
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_sfp_data_byte - Reads SFP module data.
+ * @hw: pointer to the HW structure
+ * @offset: byte location offset to be read
+ * @data: read data buffer pointer
+ *
+ * Reads one byte from SFP module data stored
+ * in SFP resided EEPROM memory or SFP diagnostic area.
+ * Function should be called with
+ * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
+ * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
+ * access
+ **/
+s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
+{
+ u32 i = 0;
+ u32 i2ccmd = 0;
+ u32 data_local = 0;
+
+ DEBUGFUNC("e1000_read_sfp_data_byte");
+
+ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+ DEBUGOUT("I2CCMD command address exceeds upper limit\n");
+ return -E1000_ERR_PHY;
+ }
+
+ /* Set up Op-code, EEPROM Address,in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * EEPROM to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_READ);
+
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ data_local = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (data_local & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(data_local & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (data_local & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+ *data = (u8) data_local & 0xFF;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_sfp_data_byte - Writes SFP module data.
+ * @hw: pointer to the HW structure
+ * @offset: byte location offset to write to
+ * @data: data to write
+ *
+ * Writes one byte to SFP module data stored
+ * in SFP resided EEPROM memory or SFP diagnostic area.
+ * Function should be called with
+ * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
+ * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
+ * access
+ **/
+s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
+{
+ u32 i = 0;
+ u32 i2ccmd = 0;
+ u32 data_local = 0;
+
+ DEBUGFUNC("e1000_write_sfp_data_byte");
+
+ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+ DEBUGOUT("I2CCMD command address exceeds upper limit\n");
+ return -E1000_ERR_PHY;
+ }
+ /* The programming interface is 16 bits wide
+ * so we need to read the whole word first
+ * then update appropriate byte lane and write
+ * the updated word back.
+ */
+ /* Set up Op-code, EEPROM Address,in the I2CCMD
+ * register. The MAC will take care of interfacing
+ * with an EEPROM to write the data given.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_READ);
+ /* Set a command to read single word */
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ /* Poll the ready bit to see if lastly
+ * launched I2C operation completed
+ */
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY) {
+ /* Check if this is READ or WRITE phase */
+ if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
+ E1000_I2CCMD_OPCODE_READ) {
+ /* Write the selected byte
+ * lane and update whole word
+ */
+ data_local = i2ccmd & 0xFF00;
+ data_local |= data;
+ i2ccmd = ((offset <<
+ E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_WRITE | data_local);
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+ } else {
+ break;
+ }
+ }
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_phy_reg_m88 - Read m88 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_phy_reg_m88");
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_m88 - Write m88 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_write_phy_reg_m88");
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_set_page_igp - Set page as on IGP-like PHY(s)
+ * @hw: pointer to the HW structure
+ * @page: page to set (shifted left when necessary)
+ *
+ * Sets PHY page required for PHY register access. Assumes semaphore is
+ * already acquired. Note, this function sets phy.addr to 1 so the caller
+ * must set it appropriately (if necessary) after this function returns.
+ **/
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
+{
+ DEBUGFUNC("e1000_set_page_igp");
+
+ DEBUGOUT1("Setting page 0x%x\n", page);
+
+ hw->phy.addr = 1;
+
+ return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
+}
+
+/**
+ * __e1000_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and stores the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+STATIC s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("__e1000_read_phy_reg_igp");
+
+ if (!locked) {
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG)
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (!ret_val)
+ ret_val = e1000_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores the
+ * retrieved information in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000_read_phy_reg_igp_locked - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * e1000_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+STATIC s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_phy_reg_igp");
+
+ if (!locked) {
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG)
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (!ret_val)
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
+ offset,
+ data);
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000_write_phy_reg_igp_locked - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * __e1000_read_kmrn_reg - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary. Then reads the PHY register at offset
+ * using the kumeran interface. The information retrieved is stored in data.
+ * Release any acquired semaphores before exiting.
+ **/
+STATIC s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
+{
+ u32 kmrnctrlsta;
+
+ DEBUGFUNC("__e1000_read_kmrn_reg");
+
+ if (!locked) {
+ s32 ret_val = E1000_SUCCESS;
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(2);
+
+ kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
+ *data = (u16)kmrnctrlsta;
+
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_kmrn_reg_generic - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset using the
+ * kumeran interface. The information retrieved is stored in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000_read_kmrn_reg_locked - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the kumeran interface. The
+ * information retrieved is stored in data.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * __e1000_write_kmrn_reg - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary. Then write the data to PHY register
+ * at the offset using the kumeran interface. Release any acquired semaphores
+ * before exiting.
+ **/
+STATIC s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
+{
+ u32 kmrnctrlsta;
+
+ DEBUGFUNC("e1000_write_kmrn_reg_generic");
+
+ if (!locked) {
+ s32 ret_val = E1000_SUCCESS;
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | data;
+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(2);
+
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_kmrn_reg_generic - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to the PHY register at the offset
+ * using the kumeran interface. Release the acquired semaphore before exiting.
+ **/
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000_write_kmrn_reg_locked - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Write the data to PHY register at the offset using the kumeran interface.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * e1000_set_master_slave_mode - Setup PHY for Master/slave mode
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Master/slave mode
+ **/
+STATIC s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ /* Resolve Master/Slave mode */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* load defaults for future use */
+ hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
+ ((phy_data & CR_1000T_MS_VALUE) ?
+ e1000_ms_force_master :
+ e1000_ms_force_slave) : e1000_ms_auto;
+
+ switch (hw->phy.ms_type) {
+ case e1000_ms_force_master:
+ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_force_slave:
+ phy_data |= CR_1000T_MS_ENABLE;
+ phy_data &= ~(CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_auto:
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ /* fall-through */
+ default:
+ break;
+ }
+
+ return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
+}
+
+/**
+ * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_setup_82577");
+
+ if (hw->phy.type == e1000_phy_82580) {
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error resetting the PHY.\n");
+ return ret_val;
+ }
+ }
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
+
+ /* Enable downshift */
+ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
+
+ ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set MDI/MDIX mode */
+ ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+ /* Options:
+ * 0 - Auto (default)
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ */
+ switch (hw->phy.mdix) {
+ case 1:
+ break;
+ case 2:
+ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+ break;
+ }
+ ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_set_master_slave_mode(hw);
+}
+
+/**
+ * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
+ * and downshift values are set also.
+ **/
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_setup_m88");
+
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* For BM PHY this bit is downshift enable */
+ if (phy->type != e1000_phy_bm)
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (phy->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (phy->disable_polarity_correction)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ /* Enable downshift on BM (disabled by default) */
+ if (phy->type == e1000_phy_bm) {
+ /* For 82574/82583, first disable then enable downshift */
+ if (phy->id == BME1000_E_PHY_ID_R2) {
+ phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ /* Commit the changes. */
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+
+ phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
+ }
+
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((phy->type == e1000_phy_m88) &&
+ (phy->revision < E1000_REVISION_4) &&
+ (phy->id != BME1000_E_PHY_ID_R2)) {
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((phy->revision == E1000_REVISION_2) &&
+ (phy->id == M88E1111_I_PHY_ID)) {
+ /* 82573L PHY - set the downshift counter to 5x. */
+ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ }
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) {
+ /* Set PHY page 0, register 29 to 0x0003 */
+ ret_val = phy->ops.write_reg(hw, 29, 0x0003);
+ if (ret_val)
+ return ret_val;
+
+ /* Set PHY page 0, register 30 to 0x0000 */
+ ret_val = phy->ops.write_reg(hw, 30, 0x0000);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Commit the changes. */
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ if (phy->type == e1000_phy_82578) {
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* 82578 PHY - set the downshift count to 1x. */
+ phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
+ phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
+ * Also enables and sets the downshift parameters.
+ **/
+s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_setup_m88_gen2");
+
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (phy->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ /* M88E1112 does not support this mode) */
+ if (phy->id != M88E1112_E_PHY_ID) {
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ }
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (phy->disable_polarity_correction)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ /* Enable downshift and setting it to X6 */
+ if (phy->id == M88E1543_E_PHY_ID) {
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
+ ret_val =
+ phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
+ phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
+ phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
+
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Commit the changes. */
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ ret_val = e1000_set_master_slave_mode(hw);
+ if (ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_setup_igp - Setup igp PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ * igp PHY's.
+ **/
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_copper_link_setup_igp");
+
+
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error resetting the PHY.\n");
+ return ret_val;
+ }
+
+ /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+ * timeout issues when LFS is enabled.
+ */
+ msec_delay(100);
+
+ /* The NVM settings will configure LPLU in D3 for
+ * non-IGP1 PHYs.
+ */
+ if (phy->type == e1000_phy_igp) {
+ /* disable lplu d3 during driver init */
+ ret_val = hw->phy.ops.set_d3_lplu_state(hw, false);
+ if (ret_val) {
+ DEBUGOUT("Error Disabling LPLU D3\n");
+ return ret_val;
+ }
+ }
+
+ /* disable lplu d0 during driver init */
+ if (hw->phy.ops.set_d0_lplu_state) {
+ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
+ if (ret_val) {
+ DEBUGOUT("Error Disabling LPLU D0\n");
+ return ret_val;
+ }
+ }
+ /* Configure mdi-mdix settings */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+ switch (phy->mdix) {
+ case 1:
+ data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 2:
+ data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 0:
+ default:
+ data |= IGP01E1000_PSCR_AUTO_MDIX;
+ break;
+ }
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* set auto-master slave resolution settings */
+ if (hw->mac.autoneg) {
+ /* when autonegotiation advertisement is only 1000Mbps then we
+ * should disable SmartSpeed and enable Auto MasterSlave
+ * resolution as hardware default.
+ */
+ if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+ /* Disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set auto Master/Slave resolution process */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~CR_1000T_MS_ENABLE;
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_set_master_slave_mode(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MII auto-neg advertisement register and/or the 1000T control
+ * register and if the PHY is already setup for auto-negotiation, then
+ * return successful. Otherwise, setup advertisement and flow control to
+ * the appropriate values for the wanted auto-negotiation.
+ **/
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg = 0;
+
+ DEBUGFUNC("e1000_phy_setup_autoneg");
+
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
+ &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+ NWAY_AR_100TX_HD_CAPS |
+ NWAY_AR_10T_FD_CAPS |
+ NWAY_AR_10T_HD_CAPS);
+ mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+ DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+ DEBUGOUT("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+ DEBUGOUT("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+ DEBUGOUT("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+ DEBUGOUT("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+ DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+ DEBUGOUT("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
+
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+ * negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ /* Flow control (Rx & Tx) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_rx_pause:
+ /* Rx Flow control is enabled, and Tx Flow control is
+ * disabled, by a software over-ride.
+ *
+ * Since there really isn't a way to advertise that we are
+ * capable of Rx Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric Rx PAUSE. Later
+ * (in e1000_config_fc_after_link_up) we will disable the
+ * hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case e1000_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL)
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+
+ return ret_val;
+}
+
+/**
+ * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Performs initial bounds checking on autoneg advertisement parameter, then
+ * configure to advertise the full capability. Setup the PHY to autoneg
+ * and restart the negotiation process between the link partner. If
+ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ DEBUGFUNC("e1000_copper_link_autoneg");
+
+ /* Perform some bounds checking on the autoneg advertisement
+ * parameter.
+ */
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (!phy->autoneg_advertised)
+ phy->autoneg_advertised = phy->autoneg_mask;
+
+ DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ DEBUGOUT("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (phy->autoneg_wait_to_complete) {
+ ret_val = e1000_wait_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while waiting for autoneg to complete\n");
+ return ret_val;
+ }
+ }
+
+ hw->mac.get_link_status = true;
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_copper_link_generic - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ bool link;
+
+ DEBUGFUNC("e1000_setup_copper_link_generic");
+
+ if (hw->mac.autoneg) {
+ /* Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H or 100F
+ * depending on user settings.
+ */
+ DEBUGOUT("Forcing Speed and Duplex\n");
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Forcing Speed and Duplex\n");
+ return ret_val;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
+ &link);
+ if (ret_val)
+ return ret_val;
+
+ if (link) {
+ DEBUGOUT("Valid link established!!!\n");
+ hw->mac.ops.config_collision_dist(hw);
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ } else {
+ DEBUGOUT("Unable to establish link!!!\n");
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Waits for link and returns
+ * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("IGP PSCR: %X\n", phy_data);
+
+ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Resets the PHY to commit the
+ * changes. If time expires while waiting for link up, we reset the DSP.
+ * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
+ * successful completion, else return corresponding error code.
+ **/
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
+
+ /* I210 and I211 devices support Auto-Crossover in forced operation. */
+ if (phy->type != e1000_phy_i210) {
+ /* Clear Auto-Crossover to force MDI manually. M88E1000
+ * requires MDI forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
+ }
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Reset the phy to commit changes. */
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ bool reset_dsp = true;
+
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
+ case I210_I_PHY_ID:
+ reset_dsp = false;
+ break;
+ default:
+ if (hw->phy.type != e1000_phy_m88)
+ reset_dsp = false;
+ break;
+ }
+
+ if (!reset_dsp) {
+ DEBUGOUT("Link taking longer than expected.\n");
+ } else {
+ /* We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = phy->ops.write_reg(hw,
+ M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_phy_reset_dsp_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->phy.type != e1000_phy_m88)
+ return E1000_SUCCESS;
+
+ if (hw->phy.id == I347AT4_E_PHY_ID ||
+ hw->phy.id == M88E1340M_E_PHY_ID ||
+ hw->phy.id == M88E1112_E_PHY_ID)
+ return E1000_SUCCESS;
+ if (hw->phy.id == I210_I_PHY_ID)
+ return E1000_SUCCESS;
+ if ((hw->phy.id == M88E1543_E_PHY_ID) ||
+ (hw->phy.id == M88E1512_E_PHY_ID))
+ return E1000_SUCCESS;
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Resetting the phy means we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock from
+ * the reset value of 2.5MHz.
+ */
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* In addition, we must re-enable CRS on Tx for both half and full
+ * duplex.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
+ * @hw: pointer to the HW structure
+ *
+ * Forces the speed and duplex settings of the PHY.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* Disable MDI-X support for 10/100 */
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IFE_PMC_AUTO_MDIX;
+ data &= ~IFE_PMC_FORCE_MDIX;
+
+ ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("IFE PMC: %X\n", data);
+
+ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ * @hw: pointer to the HW structure
+ * @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ * Forces speed and duplex on the PHY by doing the following: disable flow
+ * control, force speed/duplex on the MAC, disable auto speed detection,
+ * disable auto-negotiation, configure duplex, configure speed, configure
+ * the collision distance, write configuration to CTRL register. The
+ * caller must write to the PHY_CONTROL register for these settings to
+ * take affect.
+ **/
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
+
+ /* Turn off flow control when forcing speed/duplex */
+ hw->fc.current_mode = e1000_fc_none;
+
+ /* Force speed/duplex on the mac */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~E1000_CTRL_SPD_SEL;
+
+ /* Disable Auto Speed Detection */
+ ctrl &= ~E1000_CTRL_ASDE;
+
+ /* Disable autoneg on the phy */
+ *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+ /* Forcing Full or Half Duplex? */
+ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+ ctrl &= ~E1000_CTRL_FD;
+ *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ } else {
+ ctrl |= E1000_CTRL_FD;
+ *phy_ctrl |= MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ }
+
+ /* Forcing 10mb or 100mb? */
+ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+ ctrl |= E1000_CTRL_SPD_100;
+ *phy_ctrl |= MII_CR_SPEED_100;
+ *phy_ctrl &= ~MII_CR_SPEED_1000;
+ DEBUGOUT("Forcing 100mb\n");
+ } else {
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ DEBUGOUT("Forcing 10mb\n");
+ }
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+}
+
+/**
+ * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d3_lplu_state_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!active) {
+ data &= ~IGP02E1000_PM_D3_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ return ret_val;
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= IGP02E1000_PM_D3_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_downshift_generic - Checks whether a downshift in speed occurred
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * A downshift is detected by querying the PHY link health.
+ **/
+s32 e1000_check_downshift_generic(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ DEBUGFUNC("e1000_check_downshift_generic");
+
+ switch (phy->type) {
+ case e1000_phy_i210:
+ case e1000_phy_m88:
+ case e1000_phy_gg82563:
+ case e1000_phy_bm:
+ case e1000_phy_82578:
+ offset = M88E1000_PHY_SPEC_STATUS;
+ mask = M88E1000_PSSR_DOWNSHIFT;
+ break;
+ case e1000_phy_igp:
+ case e1000_phy_igp_2:
+ case e1000_phy_igp_3:
+ offset = IGP01E1000_PHY_LINK_HEALTH;
+ mask = IGP01E1000_PLHR_SS_DOWNGRADE;
+ break;
+ default:
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
+ return E1000_SUCCESS;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->speed_downgraded = !!(phy_data & mask);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_m88 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_check_polarity_m88");
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_igp - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY port status register, and the
+ * current speed (since there is no polarity at 100Mbps).
+ **/
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data, offset, mask;
+
+ DEBUGFUNC("e1000_check_polarity_igp");
+
+ /* Polarity is determined based on the speed of
+ * our connection.
+ */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ offset = IGP01E1000_PHY_PCS_INIT_REG;
+ mask = IGP01E1000_PHY_POLARITY_MASK;
+ } else {
+ /* This really only applies to 10Mbps since
+ * there is no polarity for 100Mbps (always 0).
+ */
+ offset = IGP01E1000_PHY_PORT_STATUS;
+ mask = IGP01E1000_PSSR_POLARITY_REVERSED;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ * @hw: pointer to the HW structure
+ *
+ * Polarity is determined on the polarity reversal feature being enabled.
+ **/
+s32 e1000_check_polarity_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ DEBUGFUNC("e1000_check_polarity_ife");
+
+ /* Polarity is determined based on the reversal feature being enabled.
+ */
+ if (phy->polarity_correction) {
+ offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
+ mask = IFE_PESC_POLARITY_REVERSED;
+ } else {
+ offset = IFE_PHY_SPECIAL_CONTROL;
+ mask = IFE_PSC_FORCE_POLARITY;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((phy_data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_wait_autoneg - Wait for auto-neg completion
+ * @hw: pointer to the HW structure
+ *
+ * Waits for auto-negotiation to complete or for the auto-negotiation time
+ * limit to expire, which ever happens first.
+ **/
+STATIC s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 i, phy_status;
+
+ DEBUGFUNC("e1000_wait_autoneg");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
+ /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_AUTONEG_COMPLETE)
+ break;
+ msec_delay(100);
+ }
+
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ * has completed.
+ */
+ return ret_val;
+}
+
+/**
+ * e1000_phy_has_link_generic - Polls PHY for link
+ * @hw: pointer to the HW structure
+ * @iterations: number of times to poll for link
+ * @usec_interval: delay between polling attempts
+ * @success: pointer to whether polling was successful or not
+ *
+ * Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 i, phy_status;
+
+ DEBUGFUNC("e1000_phy_has_link_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
+ for (i = 0; i < iterations; i++) {
+ /* Some PHYs require the PHY_STATUS register to be read
+ * twice due to the link bit being sticky. No harm doing
+ * it across the board.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val) {
+ /* If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ if (usec_interval >= 1000)
+ msec_delay(usec_interval/1000);
+ else
+ usec_delay(usec_interval);
+ }
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_LINK_STATUS)
+ break;
+ if (usec_interval >= 1000)
+ msec_delay(usec_interval/1000);
+ else
+ usec_delay(usec_interval);
+ }
+
+ *success = (i < iterations);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_m88 - Determine cable length for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY specific status register to retrieve the cable length
+ * information. The cable length is determined by averaging the minimum and
+ * maximum values to get the "average" cable length. The m88 PHY has four
+ * possible cable length values, which are:
+ * Register Value Cable Length
+ * 0 < 50 meters
+ * 1 50 - 80 meters
+ * 2 80 - 110 meters
+ * 3 110 - 140 meters
+ * 4 > 140 meters
+ **/
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, index;
+
+ DEBUGFUNC("e1000_get_cable_length_m88");
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
+ return -E1000_ERR_PHY;
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+ return E1000_SUCCESS;
+}
+
+s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, phy_data2, is_cm;
+ u16 index, default_page;
+
+ DEBUGFUNC("e1000_get_cable_length_m88_gen2");
+
+ switch (hw->phy.id) {
+ case I210_I_PHY_ID:
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ return ret_val;
+
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+ break;
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case I347AT4_E_PHY_ID:
+ /* Remember the original page select and set it to 7 */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
+ if (ret_val)
+ return ret_val;
+
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ return ret_val;
+
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+
+ /* Reset the page select to its original value */
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+ default_page);
+ if (ret_val)
+ return ret_val;
+ break;
+
+ case M88E1112_E_PHY_ID:
+ /* Remember the original page select and set it to 5 */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
+ return -E1000_ERR_PHY;
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+ phy->cable_length = (phy->min_cable_length +
+ phy->max_cable_length) / 2;
+
+ /* Reset the page select to its original value */
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+ default_page);
+ if (ret_val)
+ return ret_val;
+
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ * @hw: pointer to the HW structure
+ *
+ * The automatic gain control (agc) normalizes the amplitude of the
+ * received signal, adjusting for the attenuation produced by the
+ * cable. By reading the AGC registers, which represent the
+ * combination of coarse and fine gain value, the value can be put
+ * into a lookup table to obtain the approximate cable length
+ * for each channel.
+ **/
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, i, agc_value = 0;
+ u16 cur_agc_index, max_agc_index = 0;
+ u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+ static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
+ };
+
+ DEBUGFUNC("e1000_get_cable_length_igp_2");
+
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Getting bits 15:9, which represent the combination of
+ * coarse and fine gain values. The result is a number
+ * that can be put into the lookup table to obtain the
+ * approximate cable length.
+ */
+ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK);
+
+ /* Array index bound check. */
+ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+ (cur_agc_index == 0))
+ return -E1000_ERR_PHY;
+
+ /* Remove min & max AGC values from calculation. */
+ if (e1000_igp_2_cable_length_table[min_agc_index] >
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ min_agc_index = cur_agc_index;
+ if (e1000_igp_2_cable_length_table[max_agc_index] <
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ max_agc_index = cur_agc_index;
+
+ agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+ }
+
+ agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+ e1000_igp_2_cable_length_table[max_agc_index]);
+ agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+ /* Calculate cable length with the error range of +/- 10 meters. */
+ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0);
+ phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_info_m88 - Retrieve PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Valid for only copper links. Read the PHY status register (sticky read)
+ * to verify that link is up. Read the PHY special control register to
+ * determine the polarity and 10base-T extended distance. Read the PHY
+ * special status register to determine MDI/MDIx and current speed. If
+ * speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_m88");
+
+ if (phy->media_type != e1000_media_type_copper) {
+ DEBUGOUT("Phy info is only valid for copper media\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->polarity_correction = !!(phy_data &
+ M88E1000_PSCR_POLARITY_REVERSAL);
+
+ ret_val = e1000_check_polarity_m88(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ /* Set values to "undefined" */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_igp - Retrieve igp PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_igp");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = e1000_check_polarity_igp(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX);
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ ret_val = phy->ops.get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_ife - Retrieves various IFE PHY states
+ * @hw: pointer to the HW structure
+ *
+ * Populates "phy" structure with various feature states.
+ **/
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_ife");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+ phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE);
+
+ if (phy->polarity_correction) {
+ ret_val = e1000_check_polarity_ife(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Polarity is forced */
+ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+ }
+
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS);
+
+ /* The following parameters are undefined for 10/100 operation. */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_sw_reset_generic - PHY software reset
+ * @hw: pointer to the HW structure
+ *
+ * Does a software reset of the PHY by reading the PHY control register and
+ * setting/write the control register reset bit to the PHY.
+ **/
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ DEBUGFUNC("e1000_phy_sw_reset_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= MII_CR_RESET;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ usec_delay(1);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_generic - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Verify the reset block is not blocking us from resetting. Acquire
+ * semaphore (if necessary) and read/set/write the device control reset
+ * bit in the PHY. Wait the appropriate delay time for the device to
+ * reset and release the semaphore (if necessary).
+ **/
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_phy_hw_reset_generic");
+
+ if (phy->ops.check_reset_block) {
+ ret_val = phy->ops.check_reset_block(hw);
+ if (ret_val)
+ return E1000_SUCCESS;
+ }
+
+ ret_val = phy->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(phy->reset_delay_us);
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(150);
+
+ phy->ops.release(hw);
+
+ return phy->ops.get_cfg_done(hw);
+}
+
+/**
+ * e1000_get_cfg_done_generic - Generic configuration done
+ * @hw: pointer to the HW structure
+ *
+ * Generic function to wait 10 milli-seconds for configuration to complete
+ * and return success.
+ **/
+s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_get_cfg_done_generic");
+ UNREFERENCED_1PARAMETER(hw);
+
+ msec_delay_irq(10);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_init_script_igp3 - Inits the IGP3 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
+{
+ DEBUGOUT("Running IGP 3 PHY init script\n");
+
+ /* PHY init IGP 3 */
+ /* Enable rise/fall, 10-mode work in class-A */
+ hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
+ /* Remove all caps from Replica path filter */
+ hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
+ /* Bias trimming for ADC, AFE and Driver (Default) */
+ hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
+ /* Increase Hybrid poly bias */
+ hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
+ /* Add 4% to Tx amplitude in Gig mode */
+ hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
+ /* Disable trimming (TTT) */
+ hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
+ /* Poly DC correction to 94.6% + 2% for all channels */
+ hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
+ /* ABS DC correction to 95.9% */
+ hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
+ /* BG temp curve trim */
+ hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
+ /* Increasing ADC OPAMP stage 1 currents to max */
+ hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
+ /* Force 1000 ( required for enabling PHY regs configuration) */
+ hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
+ /* Set upd_freq to 6 */
+ hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
+ /* Disable NPDFE */
+ hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
+ /* Disable adaptive fixed FFE (Default) */
+ hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
+ /* Enable FFE hysteresis */
+ hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
+ /* Fixed FFE for short cable lengths */
+ hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
+ /* Fixed FFE for medium cable lengths */
+ hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
+ /* Fixed FFE for long cable lengths */
+ hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
+ /* Enable Adaptive Clip Threshold */
+ hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
+ /* AHT reset limit to 1 */
+ hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
+ /* Set AHT master delay to 127 msec */
+ hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
+ /* Set scan bits for AHT */
+ hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
+ /* Set AHT Preset bits */
+ hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
+ /* Change integ_factor of channel A to 3 */
+ hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
+ /* Change prop_factor of channels BCD to 8 */
+ hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
+ /* Change cg_icount + enable integbp for channels BCD */
+ hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
+ /* Change cg_icount + enable integbp + change prop_factor_master
+ * to 8 for channel A
+ */
+ hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
+ /* Disable AHT in Slave mode on channel A */
+ hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
+ /* Enable LPLU and disable AN to 1000 in non-D0a states,
+ * Enable SPD+B2B
+ */
+ hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
+ /* Enable restart AN on an1000_dis change */
+ hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
+ /* Enable wh_fifo read clock in 10/100 modes */
+ hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
+ /* Restart AN, Speed selection is 1000 */
+ hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_type_from_id - Get PHY type from id
+ * @phy_id: phy_id read from the phy
+ *
+ * Returns the phy type from the id.
+ **/
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
+{
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ switch (phy_id) {
+ case M88E1000_I_PHY_ID:
+ case M88E1000_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ case M88E1011_I_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ phy_type = e1000_phy_m88;
+ break;
+ case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+ phy_type = e1000_phy_igp_2;
+ break;
+ case GG82563_E_PHY_ID:
+ phy_type = e1000_phy_gg82563;
+ break;
+ case IGP03E1000_E_PHY_ID:
+ phy_type = e1000_phy_igp_3;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ phy_type = e1000_phy_ife;
+ break;
+ case BME1000_E_PHY_ID:
+ case BME1000_E_PHY_ID_R2:
+ phy_type = e1000_phy_bm;
+ break;
+ case I82578_E_PHY_ID:
+ phy_type = e1000_phy_82578;
+ break;
+ case I82577_E_PHY_ID:
+ phy_type = e1000_phy_82577;
+ break;
+ case I82579_E_PHY_ID:
+ phy_type = e1000_phy_82579;
+ break;
+ case I217_E_PHY_ID:
+ phy_type = e1000_phy_i217;
+ break;
+ case I82580_I_PHY_ID:
+ phy_type = e1000_phy_82580;
+ break;
+ case I210_I_PHY_ID:
+ phy_type = e1000_phy_i210;
+ break;
+ default:
+ phy_type = e1000_phy_unknown;
+ break;
+ }
+ return phy_type;
+}
+
+/**
+ * e1000_determine_phy_address - Determines PHY address.
+ * @hw: pointer to the HW structure
+ *
+ * This uses a trial and error method to loop through possible PHY
+ * addresses. It tests each by reading the PHY ID registers and
+ * checking for a match.
+ **/
+s32 e1000_determine_phy_address(struct e1000_hw *hw)
+{
+ u32 phy_addr = 0;
+ u32 i;
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ hw->phy.id = phy_type;
+
+ for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
+ hw->phy.addr = phy_addr;
+ i = 0;
+
+ do {
+ e1000_get_phy_id(hw);
+ phy_type = e1000_get_phy_type_from_id(hw->phy.id);
+
+ /* If phy_type is valid, break - we found our
+ * PHY address
+ */
+ if (phy_type != e1000_phy_unknown)
+ return E1000_SUCCESS;
+
+ msec_delay(1);
+ i++;
+ } while (i < 10);
+ }
+
+ return -E1000_ERR_PHY_TYPE;
+}
+
+/**
+ * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address
+ * @page: page to access
+ *
+ * Returns the phy address for the page requested.
+ **/
+STATIC u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
+{
+ u32 phy_addr = 2;
+
+ if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31))
+ phy_addr = 1;
+
+ return phy_addr;
+}
+
+/**
+ * e1000_write_phy_reg_bm - Write BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u32 page = offset >> IGP_PAGE_SHIFT;
+
+ DEBUGFUNC("e1000_write_phy_reg_bm");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, false);
+ goto release;
+ }
+
+ hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
+ /* Page select is register 31 for phy address 1 and 22 for
+ * phy address 2 and 3. Page select is shifted only for
+ * phy address 1.
+ */
+ if (hw->phy.addr == 1) {
+ page_shift = IGP_PAGE_SHIFT;
+ page_select = IGP01E1000_PHY_PAGE_SELECT;
+ } else {
+ page_shift = 0;
+ page_select = BM_PHY_PAGE_SELECT;
+ }
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_write_phy_reg_mdic(hw, page_select,
+ (page << page_shift));
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_bm - Read BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u32 page = offset >> IGP_PAGE_SHIFT;
+
+ DEBUGFUNC("e1000_read_phy_reg_bm");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, false);
+ goto release;
+ }
+
+ hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
+ /* Page select is register 31 for phy address 1 and 22 for
+ * phy address 2 and 3. Page select is shifted only for
+ * phy address 1.
+ */
+ if (hw->phy.addr == 1) {
+ page_shift = IGP_PAGE_SHIFT;
+ page_select = IGP01E1000_PHY_PAGE_SELECT;
+ } else {
+ page_shift = 0;
+ page_select = BM_PHY_PAGE_SELECT;
+ }
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_write_phy_reg_mdic(hw, page_select,
+ (page << page_shift));
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_bm2 - Read BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+ DEBUGFUNC("e1000_read_phy_reg_bm2");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, false);
+ goto release;
+ }
+
+ hw->phy.addr = 1;
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+ page);
+
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_bm2 - Write BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+ DEBUGFUNC("e1000_write_phy_reg_bm2");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, false);
+ goto release;
+ }
+
+ hw->phy.addr = 1;
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+ page);
+
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
+ * @hw: pointer to the HW structure
+ * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
+ *
+ * Assumes semaphore already acquired and phy_reg points to a valid memory
+ * address to store contents of the BM_WUC_ENABLE_REG register.
+ **/
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+ s32 ret_val;
+ u16 temp;
+
+ DEBUGFUNC("e1000_enable_phy_wakeup_reg_access_bm");
+
+ if (!phy_reg)
+ return -E1000_ERR_PARAM;
+
+ /* All page select, port ctrl and wakeup registers use phy address 1 */
+ hw->phy.addr = 1;
+
+ /* Select Port Control Registers page */
+ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ DEBUGOUT("Could not set Port Control page\n");
+ return ret_val;
+ }
+
+ ret_val = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+ if (ret_val) {
+ DEBUGOUT2("Could not read PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+ return ret_val;
+ }
+
+ /* Enable both PHY wakeup mode and Wakeup register page writes.
+ * Prevent a power state change by disabling ME and Host PHY wakeup.
+ */
+ temp = *phy_reg;
+ temp |= BM_WUC_ENABLE_BIT;
+ temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
+
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp);
+ if (ret_val) {
+ DEBUGOUT2("Could not write PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+ return ret_val;
+ }
+
+ /* Select Host Wakeup Registers page - caller now able to write
+ * registers on the Wakeup registers page
+ */
+ return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
+}
+
+/**
+ * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
+ * @hw: pointer to the HW structure
+ * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
+ *
+ * Restore BM_WUC_ENABLE_REG to its original value.
+ *
+ * Assumes semaphore already acquired and *phy_reg is the contents of the
+ * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
+ * caller.
+ **/
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_disable_phy_wakeup_reg_access_bm");
+
+ if (!phy_reg)
+ return -E1000_ERR_PARAM;
+
+ /* Select Port Control Registers page */
+ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ DEBUGOUT("Could not set Port Control page\n");
+ return ret_val;
+ }
+
+ /* Restore 769.17 to its original value */
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg);
+ if (ret_val)
+ DEBUGOUT2("Could not restore PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+
+ return ret_val;
+}
+
+/**
+ * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read or written
+ * @data: pointer to the data to read or write
+ * @read: determines if operation is read or write
+ * @page_set: BM_WUC_PAGE already set and access enabled
+ *
+ * Read the PHY register at offset and store the retrieved information in
+ * data, or write data to PHY register at offset. Note the procedure to
+ * access the PHY wakeup registers is different than reading the other PHY
+ * registers. It works as such:
+ * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
+ * 2) Set page to 800 for host (801 if we were manageability)
+ * 3) Write the address using the address opcode (0x11)
+ * 4) Read or write the data using the data opcode (0x12)
+ * 5) Restore 769.17.2 to its original value
+ *
+ * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and
+ * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm().
+ *
+ * Assumes semaphore is already acquired. When page_set==true, assumes
+ * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
+ * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()).
+ **/
+STATIC s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read, bool page_set)
+{
+ s32 ret_val;
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 phy_reg = 0;
+
+ DEBUGFUNC("e1000_access_phy_wakeup_reg_bm");
+
+ /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */
+ if ((hw->mac.type == e1000_pchlan) &&
+ (!(E1000_READ_REG(hw, E1000_PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
+ DEBUGOUT1("Attempting to access page %d while gig enabled.\n",
+ page);
+
+ if (!page_set) {
+ /* Enable access to PHY wakeup registers */
+ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+ if (ret_val) {
+ DEBUGOUT("Could not enable PHY wakeup reg access\n");
+ return ret_val;
+ }
+ }
+
+ DEBUGOUT2("Accessing PHY page %d reg 0x%x\n", page, reg);
+
+ /* Write the Wakeup register page offset value using opcode 0x11 */
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
+ if (ret_val) {
+ DEBUGOUT1("Could not write address opcode to page %d\n", page);
+ return ret_val;
+ }
+
+ if (read) {
+ /* Read the Wakeup register page value using opcode 0x12 */
+ ret_val = e1000_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+ data);
+ } else {
+ /* Write the Wakeup register page value using opcode 0x12 */
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+ *data);
+ }
+
+ if (ret_val) {
+ DEBUGOUT2("Could not access PHY reg %d.%d\n", page, reg);
+ return ret_val;
+ }
+
+ if (!page_set)
+ ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+ return ret_val;
+}
+
+/**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+ msec_delay(1);
+}
+
+/**
+ * __e1000_read_phy_reg_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and stores the retrieved information in data. Release any acquired
+ * semaphore before exiting.
+ **/
+STATIC s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked, bool page_set)
+{
+ s32 ret_val;
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+ DEBUGFUNC("__e1000_read_phy_reg_hv");
+
+ if (!locked) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, page_set);
+ goto out;
+ }
+
+ if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+ ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+ data, true);
+ goto out;
+ }
+
+ if (!page_set) {
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
+
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_set_page_igp(hw,
+ (page << IGP_PAGE_SHIFT));
+
+ hw->phy.addr = phy_addr;
+
+ if (ret_val)
+ goto out;
+ }
+ }
+
+ DEBUGOUT3("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+ page << IGP_PAGE_SHIFT, reg);
+
+ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+ data);
+out:
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores
+ * the retrieved information in data. Release the acquired semaphore
+ * before exiting.
+ **/
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, false, false);
+}
+
+/**
+ * e1000_read_phy_reg_hv_locked - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ * e1000_read_phy_reg_page_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired and page already set.
+ **/
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, true, true);
+}
+
+/**
+ * __e1000_write_phy_reg_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+STATIC s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked, bool page_set)
+{
+ s32 ret_val;
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+ DEBUGFUNC("__e1000_write_phy_reg_hv");
+
+ if (!locked) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, page_set);
+ goto out;
+ }
+
+ if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+ ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+ &data, false);
+ goto out;
+ }
+
+ if (!page_set) {
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
+
+ /* Workaround MDIO accesses being disabled after entering IEEE
+ * Power Down (when bit 11 of the PHY Control register is set)
+ */
+ if ((hw->phy.type == e1000_phy_82578) &&
+ (hw->phy.revision >= 1) &&
+ (hw->phy.addr == 2) &&
+ !(MAX_PHY_REG_ADDRESS & reg) &&
+ (data & (1 << 11))) {
+ u16 data2 = 0x7EFF;
+ ret_val = e1000_access_phy_debug_regs_hv(hw,
+ (1 << 6) | 0x3,
+ &data2, false);
+ if (ret_val)
+ goto out;
+ }
+
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_set_page_igp(hw,
+ (page << IGP_PAGE_SHIFT));
+
+ hw->phy.addr = phy_addr;
+
+ if (ret_val)
+ goto out;
+ }
+ }
+
+ DEBUGOUT3("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+ page << IGP_PAGE_SHIFT, reg);
+
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+ data);
+
+out:
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register at the offset.
+ * Release the acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, false, false);
+}
+
+/**
+ * e1000_write_phy_reg_hv_locked - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset. Assumes semaphore
+ * already acquired.
+ **/
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ * e1000_write_phy_reg_page_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset. Assumes semaphore
+ * already acquired and page already set.
+ **/
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, true, true);
+}
+
+/**
+ * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
+ * @page: page to be accessed
+ **/
+STATIC u32 e1000_get_phy_addr_for_hv_page(u32 page)
+{
+ u32 phy_addr = 2;
+
+ if (page >= HV_INTC_FC_PAGE_START)
+ phy_addr = 1;
+
+ return phy_addr;
+}
+
+/**
+ * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read or written
+ * @data: pointer to the data to be read or written
+ * @read: determines if operation is read or write
+ *
+ * Reads the PHY register at offset and stores the retreived information
+ * in data. Assumes semaphore already acquired. Note that the procedure
+ * to access these regs uses the address port and data port to read/write.
+ * These accesses done with PHY address 2 and without using pages.
+ **/
+STATIC s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+ u32 addr_reg;
+ u32 data_reg;
+
+ DEBUGFUNC("e1000_access_phy_debug_regs_hv");
+
+ /* This takes care of the difference with desktop vs mobile phy */
+ addr_reg = ((hw->phy.type == e1000_phy_82578) ?
+ I82578_ADDR_REG : I82577_ADDR_REG);
+ data_reg = addr_reg + 1;
+
+ /* All operations in this function are phy address 2 */
+ hw->phy.addr = 2;
+
+ /* masking with 0x3F to remove the page from offset */
+ ret_val = e1000_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
+ if (ret_val) {
+ DEBUGOUT("Could not write the Address Offset port register\n");
+ return ret_val;
+ }
+
+ /* Read or write the data value next */
+ if (read)
+ ret_val = e1000_read_phy_reg_mdic(hw, data_reg, data);
+ else
+ ret_val = e1000_write_phy_reg_mdic(hw, data_reg, *data);
+
+ if (ret_val)
+ DEBUGOUT("Could not access the Data port register\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000_link_stall_workaround_hv - Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * This function works around a Si bug where the link partner can get
+ * a link up indication before the PHY does. If small packets are sent
+ * by the link partner they can be placed in the packet buffer without
+ * being properly accounted for by the PHY and will stall preventing
+ * further packets from being received. The workaround is to clear the
+ * packet buffer after the PHY detects link up.
+ **/
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_link_stall_workaround_hv");
+
+ if (hw->phy.type != e1000_phy_82578)
+ return E1000_SUCCESS;
+
+ /* Do not apply workaround if in PHY loopback bit 14 set */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
+ if (data & PHY_CONTROL_LB)
+ return E1000_SUCCESS;
+
+ /* check if link is up and at 1Gbps */
+ ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
+
+ if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
+ return E1000_SUCCESS;
+
+ msec_delay(200);
+
+ /* flush the packets in the fifo buffer */
+ ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
+ (HV_MUX_DATA_CTRL_GEN_TO_MAC |
+ HV_MUX_DATA_CTRL_FORCE_SPEED));
+ if (ret_val)
+ return ret_val;
+
+ return hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
+ HV_MUX_DATA_CTRL_GEN_TO_MAC);
+}
+
+/**
+ * e1000_check_polarity_82577 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_check_polarity_82577");
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex.
+ **/
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_82577");
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_82577 - Retrieve I82577 PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_82577");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = e1000_check_polarity_82577(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
+
+ if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
+ I82577_PHY_STATUS2_SPEED_1000MBPS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, length;
+
+ DEBUGFUNC("e1000_get_cable_length_82577");
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT);
+
+ if (length == E1000_CABLE_LENGTH_UNDEFINED)
+ return -E1000_ERR_PHY;
+
+ phy->cable_length = length;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_gs40g - Write GS40G PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u16 page = offset >> GS40G_PAGE_SHIFT;
+
+ DEBUGFUNC("e1000_write_phy_reg_gs40g");
+
+ offset = offset & GS40G_OFFSET_MASK;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+ if (ret_val)
+ goto release;
+ ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_gs40g - Read GS40G PHY register
+ * @hw: pointer to the HW structure
+ * @offset: lower half is register offset to read to
+ * upper half is page to use.
+ * @data: data to read at register offset
+ *
+ * Acquires semaphore, if necessary, then reads the data in the PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u16 page = offset >> GS40G_PAGE_SHIFT;
+
+ DEBUGFUNC("e1000_read_phy_reg_gs40g");
+
+ offset = offset & GS40G_OFFSET_MASK;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+ if (ret_val)
+ goto release;
+ ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_mphy - Read mPHY control register
+ * @hw: pointer to the HW structure
+ * @address: address to be read
+ * @data: pointer to the read data
+ *
+ * Reads the mPHY control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data)
+{
+ u32 mphy_ctrl = 0;
+ bool locked = false;
+ bool ready;
+
+ DEBUGFUNC("e1000_read_phy_reg_mphy");
+
+ /* Check if mPHY is ready to read/write operations */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* Check if mPHY access is disabled and enable it if so */
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
+ locked = true;
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+ }
+
+ /* Set the address that we want to read */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* We mask address, because we want to use only current lane */
+ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK &
+ ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) |
+ (address & E1000_MPHY_ADDRESS_MASK);
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+
+ /* Read data from the address */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ *data = E1000_READ_REG(hw, E1000_MPHY_DATA);
+
+ /* Disable access to mPHY if it was originally disabled */
+ if (locked) {
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
+ E1000_MPHY_DIS_ACCESS);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_mphy - Write mPHY control register
+ * @hw: pointer to the HW structure
+ * @address: address to write to
+ * @data: data to write to register at offset
+ * @line_override: used when we want to use different line than default one
+ *
+ * Writes data to mPHY control register.
+ **/
+s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
+ bool line_override)
+{
+ u32 mphy_ctrl = 0;
+ bool locked = false;
+ bool ready;
+
+ DEBUGFUNC("e1000_write_phy_reg_mphy");
+
+ /* Check if mPHY is ready to read/write operations */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* Check if mPHY access is disabled and enable it if so */
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
+ locked = true;
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+ }
+
+ /* Set the address that we want to read */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* We mask address, because we want to use only current lane */
+ if (line_override)
+ mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE;
+ else
+ mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE;
+ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) |
+ (address & E1000_MPHY_ADDRESS_MASK);
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+
+ /* Read data from the address */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, data);
+
+ /* Disable access to mPHY if it was originally disabled */
+ if (locked) {
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
+ E1000_MPHY_DIS_ACCESS);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_is_mphy_ready - Check if mPHY control register is not busy
+ * @hw: pointer to the HW structure
+ *
+ * Returns mPHY control register status.
+ **/
+bool e1000_is_mphy_ready(struct e1000_hw *hw)
+{
+ u16 retry_count = 0;
+ u32 mphy_ctrl = 0;
+ bool ready = false;
+
+ while (retry_count < 2) {
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_BUSY) {
+ usec_delay(20);
+ retry_count++;
+ continue;
+ }
+ ready = true;
+ break;
+ }
+
+ if (!ready)
+ DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n");
+
+ return ready;
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.h
new file mode 100644
index 00000000..2cd0e14b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.h
@@ -0,0 +1,341 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_PHY_H_
+#define _E1000_PHY_H_
+
+void e1000_init_phy_ops_generic(struct e1000_hw *hw);
+s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+void e1000_null_phy_generic(struct e1000_hw *hw);
+s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_null_set_page(struct e1000_hw *hw, u16 data);
+s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 e1000_check_downshift_generic(struct e1000_hw *hw);
+s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw);
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
+s32 e1000_copper_link_autoneg(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw);
+s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw);
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw);
+s32 e1000_get_cfg_done_generic(struct e1000_hw *hw);
+s32 e1000_get_phy_id(struct e1000_hw *hw);
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw);
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw);
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw);
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw);
+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw);
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
+s32 e1000_determine_phy_address(struct e1000_hw *hw);
+s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
+s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data);
+s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
+ bool line_override);
+bool e1000_is_mphy_ready(struct e1000_hw *hw);
+
+#define E1000_MAX_PHY_ADDR 8
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO */
+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
+#define IGP_PAGE_SHIFT 5
+#define PHY_REG_MASK 0x1F
+
+/* GS40G - I210 PHY defines */
+#define GS40G_PAGE_SELECT 0x16
+#define GS40G_PAGE_SHIFT 16
+#define GS40G_OFFSET_MASK 0xFFFF
+#define GS40G_PAGE_2 0x20000
+#define GS40G_MAC_REG2 0x15
+#define GS40G_MAC_LB 0x4140
+#define GS40G_MAC_SPEED_1G 0X0006
+#define GS40G_COPPER_SPEC 0x0010
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE 769
+#define BM_WUC_PAGE 800
+#define BM_WUC_ADDRESS_OPCODE 0x11
+#define BM_WUC_DATA_OPCODE 0x12
+#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
+#define BM_WUC_ENABLE_REG 17
+#define BM_WUC_ENABLE_BIT (1 << 2)
+#define BM_WUC_HOST_WU_BIT (1 << 4)
+#define BM_WUC_ME_WU_BIT (1 << 5)
+
+#define PHY_UPPER_SHIFT 21
+#define BM_PHY_REG(page, reg) \
+ (((reg) & MAX_PHY_REG_ADDRESS) |\
+ (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+ (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+#define BM_PHY_REG_PAGE(offset) \
+ ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
+#define BM_PHY_REG_NUM(offset) \
+ ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
+ (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
+ ~MAX_PHY_REG_ADDRESS)))
+
+#define HV_INTC_FC_PAGE_START 768
+#define I82578_ADDR_REG 29
+#define I82577_ADDR_REG 16
+#define I82577_CFG_REG 22
+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CTRL_REG 23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2 18
+#define I82577_PHY_LBK_CTRL 19
+#define I82577_PHY_STATUS_2 26
+#define I82577_PHY_DIAG_STATUS 31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82577_PHY_STATUS2_MDIX 0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* 82580 PHY Power Management */
+#define E1000_82580_PHY_POWER_MGMT 0xE14
+#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
+#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
+#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
+
+#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */
+#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */
+#define E1000_MPHY_BUSY 0x00010000 /* busy bit */
+#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */
+#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */
+
+/* BM PHY Copper Specific Control 1 */
+#define BM_CS_CTRL1 16
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS 17
+#define BM_CS_STATUS_LINK_UP 0x0400
+#define BM_CS_STATUS_RESOLVED 0x0800
+#define BM_CS_STATUS_SPEED_MASK 0xC000
+#define BM_CS_STATUS_SPEED_1000 0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS 26
+#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
+#define HV_M_STATUS_SPEED_MASK 0x0300
+#define HV_M_STATUS_SPEED_1000 0x0200
+#define HV_M_STATUS_SPEED_100 0x0100
+#define HV_M_STATUS_LINK_UP 0x0040
+
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+
+/* Enable flexible speed on link-up */
+#define IGP01E1000_GMII_FLEX_SPD 0x0010
+#define IGP01E1000_GMII_SPD 0x0020 /* Enable SPD */
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+#define IGP02E1000_AGC_RANGE 15
+
+#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */
+#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
+#define E1000_KMRNCTRLSTA_K0S_CTRL 0x1E /* Kumeran K0s Control */
+#define E1000_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_SHIFT 0
+#define E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT 4
+#define E1000_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_MASK \
+ (3 << E1000_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_SHIFT)
+#define E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK \
+ (7 << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT)
+#define E1000_KMRNCTRLSTA_OP_MODES 0x1F /* Kumeran Modes of Operation */
+#define E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC 0x0002 /* change LSC to CSC */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED 0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
+#define IFE_PSC_FORCE_POLARITY 0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE 0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
+
+/* SFP modules ID memory locations */
+#define E1000_SFF_IDENTIFIER_OFFSET 0x00
+#define E1000_SFF_IDENTIFIER_SFF 0x02
+#define E1000_SFF_IDENTIFIER_SFP 0x03
+
+#define E1000_SFF_ETH_FLAGS_OFFSET 0x06
+/* Flags for SFP modules compatible with ETH up to 1Gb */
+struct sfp_e1000_flags {
+ u8 e1000_base_sx:1;
+ u8 e1000_base_lx:1;
+ u8 e1000_base_cx:1;
+ u8 e1000_base_t:1;
+ u8 e100_base_lx:1;
+ u8 e100_base_fx:1;
+ u8 e10_base_bx10:1;
+ u8 e10_base_px:1;
+};
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600
+#define E1000_SFF_VENDOR_OUI_FTL 0x00906500
+#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00
+#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100
+
+/* EEPROM byte offsets */
+#define IGB_SFF_8472_SWAP 0x5C
+#define IGB_SFF_8472_COMP 0x5E
+
+/* Bitmasks */
+#define IGB_SFF_ADDRESSING_MODE 0x4
+#define IGB_SFF_8472_UNSUP 0x00
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_regs.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_regs.h
new file mode 100644
index 00000000..364a7261
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_regs.h
@@ -0,0 +1,695 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_REGS_H_
+#define _E1000_REGS_H_
+
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
+#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */
+#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */
+#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */
+#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */
+#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */
+#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */
+#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */
+#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */
+#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */
+#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */
+#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */
+#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
+#define E1000_FEXT 0x0002C /* Future Extended - RW */
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
+#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
+#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
+#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
+#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */
+#define E1000_SVCR 0x000F0
+#define E1000_SVT 0x000F4
+#define E1000_LPIC 0x000FC /* Low Power IDLE control */
+#define E1000_RCTL 0x00100 /* Rx Control - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
+#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */
+#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
+#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define E1000_TCTL 0x00400 /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
+#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
+#define E1000_TBT 0x00448 /* Tx Burst Timer - RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_LEDMUX 0x08130 /* LED MUX Control */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_IOSFPC 0x00F28 /* TX corrupted data */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */
+#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
+#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL 0x01030 /* FLASH control register */
+#define E1000_FLSWDATA 0x01034 /* FLASH data register */
+#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */
+#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */
+#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */
+#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */
+#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
+#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
+#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
+#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */
+#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */
+#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */
+#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
+#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
+#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */
+#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */
+#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */
+#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */
+#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */
+#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */
+#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */
+#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */
+#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */
+#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */
+#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */
+#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */
+#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */
+#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */
+#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
+#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
+#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
+#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
+#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
+#define E1000_I210_FLMNGCTL 0x12038
+#define E1000_I210_FLMNGDATA 0x1203C
+#define E1000_I210_FLMNGCNT 0x12040
+
+#define E1000_I210_FLSWCTL 0x12048
+#define E1000_I210_FLSWDATA 0x1204C
+#define E1000_I210_FLSWCNT 0x12050
+
+#define E1000_I210_FLA 0x1201C
+
+#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
+#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
+
+/* QAV Tx mode control register */
+#define E1000_I210_TQAVCTRL 0x3570
+
+/* QAV Tx mode control register bitfields masks */
+/* QAV enable */
+#define E1000_TQAVCTRL_MODE (1 << 0)
+/* Fetching arbitration type */
+#define E1000_TQAVCTRL_FETCH_ARB (1 << 4)
+/* Fetching timer enable */
+#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5)
+/* Launch arbitration type */
+#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8)
+/* Launch timer enable */
+#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9)
+/* SP waits for SR enable */
+#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10)
+/* Fetching timer correction */
+#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16
+#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \
+ (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET)
+
+/* High credit registers where _n can be 0 or 1. */
+#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n))
+
+/* Queues fetch arbitration priority control register */
+#define E1000_I210_TQAVARBCTRL 0x3574
+/* Queues priority masks where _n and _p can be 0-3. */
+#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n)))
+/* QAV Tx mode control registers where _n can be 0 or 1. */
+#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n))
+
+/* QAV Tx mode control register bitfields masks */
+#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */
+#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */
+#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */
+
+/* Good transmitted packets counter registers */
+#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
+
+/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */
+#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n)))
+
+#define E1000_MMDAC 13 /* MMD Access Control */
+#define E1000_MMDAAD 14 /* MMD Access Address/Data */
+
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+ (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+ (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+ (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+ (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+ (0x0C010 + ((_n) * 0x40)))
+#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+ (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+ (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+ (0x0C028 + ((_n) * 0x40)))
+#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
+ (0x0C030 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+ (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+ (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+ (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+ (0x0E010 + ((_n) * 0x40)))
+#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+ (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+ (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+ (0x0E028 + ((_n) * 0x40)))
+#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
+ (0x0E038 + ((_n) * 0x40)))
+#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
+ (0x0E03C + ((_n) * 0x40)))
+#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
+#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */
+#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
+#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */
+#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */
+#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+/* Same as TXPBS, renamed for newer Si - RW */
+#define E1000_ITPBS 0x03404
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */
+#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */
+#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */
+#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */
+#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */
+#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */
+#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
+#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
+/* DMA Tx Max Total Allow Size Reqs - RW */
+#define E1000_DTXMXSZRQ 0x03540
+#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */
+
+#define E1000_VFGPRC 0x00F10
+#define E1000_VFGORC 0x00F18
+#define E1000_VFMPRC 0x00F3C
+#define E1000_VFGPTC 0x00F14
+#define E1000_VFGOTC 0x00F34
+#define E1000_VFGOTLBC 0x00F50
+#define E1000_VFGPTLBC 0x00F44
+#define E1000_VFGORLBC 0x00F48
+#define E1000_VFGPRLBC 0x00F40
+/* Virtualization statistical counters */
+#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n)))
+#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n)))
+#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n)))
+#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n)))
+#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n)))
+#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n)))
+#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n)))
+#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n)))
+#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n)))
+
+/* LinkSec */
+#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */
+#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */
+#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */
+#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */
+#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */
+#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */
+#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */
+#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */
+#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */
+#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */
+#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */
+#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */
+#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */
+#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */
+#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */
+#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */
+#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */
+#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */
+#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */
+#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */
+#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */
+#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */
+#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */
+#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */
+#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */
+#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */
+#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */
+#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */
+#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */
+#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */
+/* LinkSec Tx 128-bit Key 0 - WO */
+#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n)))
+/* LinkSec Tx 128-bit Key 1 - WO */
+#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n)))
+#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */
+#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */
+/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit
+ * key - RW.
+ */
+#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
+
+#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */
+#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */
+#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */
+#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */
+/* IPSec Rx IPv4/v6 Address - RW */
+#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n)))
+/* IPSec Rx 128-bit Key - RW */
+#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n)))
+#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */
+#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */
+/* IPSec Tx 128-bit Key - RW */
+#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n)))
+#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */
+#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */
+#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
+#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
+#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */
+#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
+#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */
+#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */
+#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
+#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */
+#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */
+#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
+#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
+#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
+#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
+#define E1000_LENERRS 0x04138 /* Length Errors Count */
+#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
+#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
+#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
+#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
+#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */
+#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
+#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
+#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */
+#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */
+#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */
+#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
+#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
+#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
+#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */
+/* Flexible Host Filter Table */
+#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100))
+/* Ext Flexible Host Filter Table */
+#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100))
+
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
+/* Management Decision Filters */
+#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
+#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
+#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
+#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
+#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
+#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+/* Driver-only SW semaphore (not used by BOOT agents) */
+#define E1000_SWSM2 0x05B58
+#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
+#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
+#define E1000_UFUSE 0x05B78 /* UFUSE - RO */
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Interface Control */
+#define E1000_FWSTS 0x08F0C /* FW Status */
+
+/* RSS registers */
+#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
+#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
+#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */
+#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */
+#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
+#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
+/* VT Registers */
+#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */
+#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
+#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
+#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
+#define E1000_VFRE 0x00C8C /* VF Receive Enables */
+#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
+#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
+#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
+#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
+#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
+#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
+#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */
+#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */
+#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */
+#define E1000_MDFB 0x03558 /* Malicious Driver free block */
+#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */
+#define E1000_TXSWC 0x05ACC /* Tx Switch Control */
+#define E1000_SCCRL 0x05DB0 /* Storm Control Control */
+#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */
+#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */
+/* These act per VF so an array friendly macro is used */
+#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
+#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
+#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
+#define E1000_VFVMBMEM(_n) (0x00800 + (_n))
+#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
+/* VLAN Virtual Machine Filter - RW */
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n)))
+#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
+#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */
+#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
+#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
+#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
+#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
+#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_SYSSTMPL 0x0B648 /* HH Timesync system stamp low register */
+#define E1000_SYSSTMPH 0x0B64C /* HH Timesync system stamp hi register */
+#define E1000_PLTSTMPL 0x0B640 /* HH Timesync platform stamp low register */
+#define E1000_PLTSTMPH 0x0B644 /* HH Timesync platform stamp hi register */
+#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
+#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
+#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
+#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
+#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
+
+/* Filtering Registers */
+#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
+#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
+#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
+#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
+#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
+#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */
+#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */
+#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */
+#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */
+#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */
+/* Tx Desc plane TC Rate-scheduler config */
+#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4))
+/* Tx Packet plane TC Rate-Scheduler Config */
+#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler Config */
+#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4))
+/* Tx Desc Plane TC Rate-Scheduler Status */
+#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4))
+/* Tx Desc Plane TC Rate-Scheduler MMW */
+#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4))
+/* Tx Packet plane TC Rate-Scheduler Status */
+#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4))
+/* Tx Packet plane TC Rate-scheduler MMW */
+#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler Status */
+#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler MMW */
+#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4))
+/* Tx Desc plane VM Rate-Scheduler MMW*/
+#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4))
+/* Tx BCN Rate-Scheduler MMW */
+#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4))
+#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */
+#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */
+#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */
+#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */
+#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */
+#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */
+#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */
+#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */
+#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */
+#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */
+#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */
+#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */
+#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */
+#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */
+
+/* DMA Coalescing registers */
+#define E1000_DMACR 0x02508 /* Control Register */
+#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
+#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
+#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
+
+/* PCIe Parity Status Register */
+#define E1000_PCIEERRSTS 0x05BA8
+
+#define E1000_PROXYS 0x5F64 /* Proxying Status */
+#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */
+/* Thermal sensor configuration and status registers */
+#define E1000_THMJT 0x08100 /* Junction Temperature */
+#define E1000_THLOWTC 0x08104 /* Low Threshold Control */
+#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */
+#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
+#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
+
+/* Energy Efficient Ethernet "EEE" registers */
+#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
+#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
+#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/
+#define E1000_EEE_SU 0x0E34 /* EEE Setup */
+#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */
+#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */
+
+/* OS2BMC Registers */
+#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
+#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
+#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
+#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
+
+
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.c
new file mode 100644
index 00000000..44ab0188
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.c
@@ -0,0 +1,589 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "e1000_api.h"
+
+
+STATIC s32 e1000_init_phy_params_vf(struct e1000_hw *hw);
+STATIC s32 e1000_init_nvm_params_vf(struct e1000_hw *hw);
+STATIC void e1000_release_vf(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_vf(struct e1000_hw *hw);
+STATIC s32 e1000_setup_link_vf(struct e1000_hw *hw);
+STATIC s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw);
+STATIC s32 e1000_init_mac_params_vf(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_link_vf(struct e1000_hw *hw);
+STATIC s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+STATIC s32 e1000_init_hw_vf(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_vf(struct e1000_hw *hw);
+STATIC void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, u32);
+STATIC int e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
+STATIC s32 e1000_read_mac_addr_vf(struct e1000_hw *);
+
+/**
+ * e1000_init_phy_params_vf - Inits PHY params
+ * @hw: pointer to the HW structure
+ *
+ * Doesn't do much - there's no PHY available to the VF.
+ **/
+STATIC s32 e1000_init_phy_params_vf(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_phy_params_vf");
+ hw->phy.type = e1000_phy_vf;
+ hw->phy.ops.acquire = e1000_acquire_vf;
+ hw->phy.ops.release = e1000_release_vf;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_nvm_params_vf - Inits NVM params
+ * @hw: pointer to the HW structure
+ *
+ * Doesn't do much - there's no NVM available to the VF.
+ **/
+STATIC s32 e1000_init_nvm_params_vf(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_nvm_params_vf");
+ hw->nvm.type = e1000_nvm_none;
+ hw->nvm.ops.acquire = e1000_acquire_vf;
+ hw->nvm.ops.release = e1000_release_vf;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_vf - Inits MAC params
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_init_mac_params_vf");
+
+ /* Set media type */
+ /*
+ * Virtual functions don't care what they're media type is as they
+ * have no direct access to the PHY, or the media. That is handled
+ * by the physical function driver.
+ */
+ hw->phy.media_type = e1000_media_type_unknown;
+
+ /* No ASF features for the VF driver */
+ mac->asf_firmware_present = false;
+ /* ARC subsystem not supported */
+ mac->arc_subsystem_valid = false;
+ /* Disable adaptive IFS mode so the generic funcs don't do anything */
+ mac->adaptive_ifs = false;
+ /* VF's have no MTA Registers - PF feature only */
+ mac->mta_reg_count = 128;
+ /* VF's have no access to RAR entries */
+ mac->rar_entry_count = 1;
+
+ /* Function pointers */
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_vf;
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pcie_vf;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_vf;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_vf;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_link_vf;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
+ /* set mac address */
+ mac->ops.rar_set = e1000_rar_set_vf;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
+
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_vf - Inits function pointers
+ * @hw: pointer to the HW structure
+ **/
+void e1000_init_function_pointers_vf(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_vf");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_vf;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_vf;
+ hw->phy.ops.init_params = e1000_init_phy_params_vf;
+ hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
+}
+
+/**
+ * e1000_acquire_vf - Acquire rights to access PHY or NVM.
+ * @hw: pointer to the HW structure
+ *
+ * There is no PHY or NVM so we want all attempts to acquire these to fail.
+ * In addition, the MAC registers to access PHY/NVM don't exist so we don't
+ * even want any SW to attempt to use them.
+ **/
+STATIC s32 e1000_acquire_vf(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return -E1000_ERR_PHY;
+}
+
+/**
+ * e1000_release_vf - Release PHY or NVM
+ * @hw: pointer to the HW structure
+ *
+ * There is no PHY or NVM so we want all attempts to acquire these to fail.
+ * In addition, the MAC registers to access PHY/NVM don't exist so we don't
+ * even want any SW to attempt to use them.
+ **/
+STATIC void e1000_release_vf(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return;
+}
+
+/**
+ * e1000_setup_link_vf - Sets up link.
+ * @hw: pointer to the HW structure
+ *
+ * Virtual functions cannot change link.
+ **/
+STATIC s32 e1000_setup_link_vf(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_setup_link_vf");
+ UNREFERENCED_1PARAMETER(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_bus_info_pcie_vf - Gets the bus info.
+ * @hw: pointer to the HW structure
+ *
+ * Virtual functions are not really on their own bus.
+ **/
+STATIC s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+
+ DEBUGFUNC("e1000_get_bus_info_pcie_vf");
+
+ /* Do not set type PCI-E because we don't want disable master to run */
+ bus->type = e1000_bus_type_reserved;
+ bus->speed = e1000_bus_speed_2500;
+
+ return 0;
+}
+
+/**
+ * e1000_get_link_up_info_vf - Gets link info.
+ * @hw: pointer to the HW structure
+ * @speed: pointer to 16 bit value to store link speed.
+ * @duplex: pointer to 16 bit value to store duplex.
+ *
+ * Since we cannot read the PHY and get accurate link info, we must rely upon
+ * the status register's data which is often stale and inaccurate.
+ **/
+STATIC s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 status;
+
+ DEBUGFUNC("e1000_get_link_up_info_vf");
+
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+ DEBUGOUT("1000 Mbs, ");
+ } else if (status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ DEBUGOUT("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ DEBUGOUT("10 Mbs, ");
+ }
+
+ if (status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_hw_vf - Resets the HW
+ * @hw: pointer to the HW structure
+ *
+ * VF's provide a function level reset. This is done using bit 26 of ctrl_reg.
+ * This is all the reset we can perform on a VF.
+ **/
+STATIC s32 e1000_reset_hw_vf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 timeout = E1000_VF_INIT_TIMEOUT;
+ s32 ret_val = -E1000_ERR_MAC_INIT;
+ u32 ctrl, msgbuf[3];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ DEBUGFUNC("e1000_reset_hw_vf");
+
+ DEBUGOUT("Issuing a function level reset to MAC\n");
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+ timeout--;
+ usec_delay(5);
+ }
+
+ if (timeout) {
+ /* mailbox timeout can now become active */
+ mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = E1000_VF_RESET;
+ mbx->ops.write_posted(hw, msgbuf, 1, 0);
+
+ msec_delay(10);
+
+ /* set our "perm_addr" based on info provided by PF */
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+ if (!ret_val) {
+ if (msgbuf[0] == (E1000_VF_RESET |
+ E1000_VT_MSGTYPE_ACK))
+ memcpy(hw->mac.perm_addr, addr, 6);
+ else
+ ret_val = -E1000_ERR_MAC_INIT;
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_vf - Inits the HW
+ * @hw: pointer to the HW structure
+ *
+ * Not much to do here except clear the PF Reset indication if there is one.
+ **/
+STATIC s32 e1000_init_hw_vf(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_hw_vf");
+
+ /* attempt to set and restore our mac address */
+ e1000_rar_set_vf(hw, hw->mac.addr, 0);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_rar_set_vf - set device MAC address
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index receive address array register
+ **/
+STATIC int e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr,
+ u32 E1000_UNUSEDARG index)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ UNREFERENCED_1PARAMETER(index);
+ memset(msgbuf, 0, 12);
+ msgbuf[0] = E1000_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+
+ msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
+ e1000_read_mac_addr_vf(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_hash_mc_addr_vf - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value.
+ **/
+STATIC u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ DEBUGFUNC("e1000_hash_mc_addr_generic");
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /*
+ * The bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16) mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+STATIC void e1000_write_msg_read_ack(struct e1000_hw *hw,
+ u32 *msg, u16 size)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 retmsg[E1000_VFMAILBOX_SIZE];
+ s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
+
+ if (!retval)
+ mbx->ops.read_posted(hw, retmsg, E1000_VFMAILBOX_SIZE, 0);
+}
+
+/**
+ * e1000_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates the Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 msgbuf[E1000_VFMAILBOX_SIZE];
+ u16 *hash_list = (u16 *)&msgbuf[1];
+ u32 hash_value;
+ u32 i;
+
+ DEBUGFUNC("e1000_update_mc_addr_list_vf");
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+
+ msgbuf[0] = E1000_VF_SET_MULTICAST;
+
+ if (mc_addr_count > 30) {
+ msgbuf[0] |= E1000_VF_SET_MULTICAST_OVERFLOW;
+ mc_addr_count = 30;
+ }
+
+ msgbuf[0] |= mc_addr_count << E1000_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < mc_addr_count; i++) {
+ hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list);
+ DEBUGOUT1("Hash value = 0x%03X\n", hash_value);
+ hash_list[i] = hash_value & 0x0FFF;
+ mc_addr_list += ETH_ADDR_LEN;
+ }
+
+ e1000_write_msg_read_ack(hw, msgbuf, E1000_VFMAILBOX_SIZE);
+}
+
+/**
+ * e1000_vfta_set_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vid: determines the vfta register and bit to set/unset
+ * @set: if true then set bit, else clear bit
+ **/
+void e1000_vfta_set_vf(struct e1000_hw *hw, u16 vid, bool set)
+{
+ u32 msgbuf[2];
+
+ msgbuf[0] = E1000_VF_SET_VLAN;
+ msgbuf[1] = vid;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ if (set)
+ msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
+
+ e1000_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/** e1000_rlpml_set_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ **/
+void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
+{
+ u32 msgbuf[2];
+
+ msgbuf[0] = E1000_VF_SET_LPE;
+ msgbuf[1] = max_size;
+
+ e1000_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/**
+ * e1000_promisc_set_vf - Set flags for Unicast or Multicast promisc
+ * @hw: pointer to the HW structure
+ * @uni: boolean indicating unicast promisc status
+ * @multi: boolean indicating multicast promisc status
+ **/
+s32 e1000_promisc_set_vf(struct e1000_hw *hw, enum e1000_promisc_type type)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf = E1000_VF_SET_PROMISC;
+ s32 ret_val;
+
+ switch (type) {
+ case e1000_promisc_multicast:
+ msgbuf |= E1000_VF_SET_PROMISC_MULTICAST;
+ break;
+ case e1000_promisc_enabled:
+ msgbuf |= E1000_VF_SET_PROMISC_MULTICAST;
+ case e1000_promisc_unicast:
+ msgbuf |= E1000_VF_SET_PROMISC_UNICAST;
+ case e1000_promisc_disabled:
+ break;
+ default:
+ return -E1000_ERR_MAC_INIT;
+ }
+
+ ret_val = mbx->ops.write_posted(hw, &msgbuf, 1, 0);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, &msgbuf, 1, 0);
+
+ if (!ret_val && !(msgbuf & E1000_VT_MSGTYPE_ACK))
+ ret_val = -E1000_ERR_MAC_INIT;
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_for_link_vf - Check for link for a virtual interface
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see if the underlying PF is still talking to the VF and
+ * if it is then it reports the link state to the hardware, otherwise
+ * it reports link down and returns an error.
+ **/
+STATIC s32 e1000_check_for_link_vf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+ u32 in_msg = 0;
+
+ DEBUGFUNC("e1000_check_for_link_vf");
+
+ /*
+ * We only want to run this if there has been a rst asserted.
+ * in this case that could mean a link change, device reset,
+ * or a virtual function reset
+ */
+
+ /* If we were hit with a reset or timeout drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
+ goto out;
+
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ goto out;
+
+ /* if incoming message isn't clear to send we are waiting on response */
+ if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
+ /* message is not CTS and is NACK we have lost CTS status */
+ if (in_msg & E1000_VT_MSGTYPE_NACK)
+ ret_val = -E1000_ERR_MAC_INIT;
+ goto out;
+ }
+
+ /* at this point we know the PF is talking to us, check and see if
+ * we are still accepting timeout or if we had a timeout failure.
+ * if we failed then we will need to reinit */
+ if (!mbx->timeout) {
+ ret_val = -E1000_ERR_MAC_INIT;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link */
+ mac->get_link_status = false;
+
+out:
+ return ret_val;
+}
+
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.h
new file mode 100644
index 00000000..d6216dec
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.h
@@ -0,0 +1,295 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_VF_H_
+#define _E1000_VF_H_
+
+#include "e1000_osdep.h"
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82576_VF 0x10CA
+#define E1000_DEV_ID_I350_VF 0x1520
+
+#define E1000_VF_INIT_TIMEOUT 200 /* Num of retries to clear RSTI */
+
+/* Additional Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+ (0x0C00C + ((_n) * 0x40)))
+#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define E1000_SRRCTL_DROP_EN 0x80000000
+
+#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
+
+/* Interrupt Defines */
+#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + ((_n) << 2))
+#define E1000_EICS 0x01520 /* Ext. Intr Cause Set -W0 */
+#define E1000_EIMS 0x01524 /* Ext. Intr Mask Set/Read -RW */
+#define E1000_EIMC 0x01528 /* Ext. Intr Mask Clear -WO */
+#define E1000_EIAC 0x0152C /* Ext. Intr Auto Clear -RW */
+#define E1000_EIAM 0x01530 /* Ext. Intr Ack Auto Clear Mask -RW */
+#define E1000_IVAR0 0x01700 /* Intr Vector Alloc (array) -RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes -RW */
+#define E1000_IVAR_VALID 0x80
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+ struct {
+ u64 pkt_addr; /* Packet buffer address */
+ u64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ u32 data;
+ struct {
+ /* RSS type, Packet type */
+ u16 pkt_info;
+ /* Split Header, header buffer len */
+ u16 hdr_info;
+ } hs_rss;
+ } lo_dword;
+ union {
+ u32 rss; /* RSS Hash */
+ struct {
+ u16 ip_id; /* IP id */
+ u16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ u32 status_error; /* ext status/error */
+ u16 length; /* Packet length */
+ u16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+ struct {
+ u64 buffer_addr; /* Address of descriptor's data buf */
+ u32 cmd_type_len;
+ u32 olinfo_status;
+ } read;
+ struct {
+ u64 rsvd; /* Reserved */
+ u32 nxtseq_seed;
+ u32 status;
+ } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+ u32 vlan_macip_lens;
+ u32 seqnum_seed;
+ u32 type_tucmd_mlhl;
+ u32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+enum e1000_mac_type {
+ e1000_undefined = 0,
+ e1000_vfadapt,
+ e1000_vfadapt_i350,
+ e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
+};
+
+struct e1000_vf_stats {
+ u64 base_gprc;
+ u64 base_gptc;
+ u64 base_gorc;
+ u64 base_gotc;
+ u64 base_mprc;
+ u64 base_gotlbc;
+ u64 base_gptlbc;
+ u64 base_gorlbc;
+ u64 base_gprlbc;
+
+ u32 last_gprc;
+ u32 last_gptc;
+ u32 last_gorc;
+ u32 last_gotc;
+ u32 last_mprc;
+ u32 last_gotlbc;
+ u32 last_gptlbc;
+ u32 last_gorlbc;
+ u32 last_gprlbc;
+
+ u64 gprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 mprc;
+ u64 gotlbc;
+ u64 gptlbc;
+ u64 gorlbc;
+ u64 gprlbc;
+};
+
+#include "e1000_mbx.h"
+
+struct e1000_mac_operations {
+ /* Function pointers for the MAC. */
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ void (*clear_vfta)(struct e1000_hw *);
+ s32 (*get_bus_info)(struct e1000_hw *);
+ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
+ s32 (*setup_link)(struct e1000_hw *);
+ void (*write_vfta)(struct e1000_hw *, u32, u32);
+ int (*rar_set)(struct e1000_hw *, u8*, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+};
+
+struct e1000_mac_info {
+ struct e1000_mac_operations ops;
+ u8 addr[6];
+ u8 perm_addr[6];
+
+ enum e1000_mac_type type;
+
+ u16 mta_reg_count;
+ u16 rar_entry_count;
+
+ bool get_link_status;
+};
+
+struct e1000_mbx_operations {
+ s32 (*init_params)(struct e1000_hw *hw);
+ s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct e1000_hw *, u16);
+ s32 (*check_for_ack)(struct e1000_hw *, u16);
+ s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct e1000_mbx_info {
+ struct e1000_mbx_operations ops;
+ struct e1000_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u16 size;
+};
+
+struct e1000_dev_spec_vf {
+ u32 vf_number;
+ u32 v2p_mailbox;
+};
+
+struct e1000_hw {
+ void *back;
+
+ u8 *hw_addr;
+ u8 *flash_address;
+ unsigned long io_base;
+
+ struct e1000_mac_info mac;
+ struct e1000_mbx_info mbx;
+
+ union {
+ struct e1000_dev_spec_vf vf;
+ } dev_spec;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+};
+
+enum e1000_promisc_type {
+ e1000_promisc_disabled = 0, /* all promisc modes disabled */
+ e1000_promisc_unicast = 1, /* unicast promiscuous enabled */
+ e1000_promisc_multicast = 2, /* multicast promiscuous enabled */
+ e1000_promisc_enabled = 3, /* both uni and multicast promisc */
+ e1000_num_promisc_types
+};
+
+/* These functions must be implemented by drivers */
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
+void e1000_rlpml_set_vf(struct e1000_hw *, u16);
+s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type);
+#endif /* _E1000_VF_H_ */
diff --git a/src/spdk/dpdk/drivers/net/e1000/base/meson.build b/src/spdk/dpdk/drivers/net/e1000/base/meson.build
new file mode 100644
index 00000000..5e1716de
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/base/meson.build
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = [
+ 'e1000_80003es2lan.c',
+ 'e1000_82540.c',
+ 'e1000_82541.c',
+ 'e1000_82542.c',
+ 'e1000_82543.c',
+ 'e1000_82571.c',
+ 'e1000_82575.c',
+ 'e1000_api.c',
+ 'e1000_i210.c',
+ 'e1000_ich8lan.c',
+ 'e1000_mac.c',
+ 'e1000_manage.c',
+ 'e1000_mbx.c',
+ 'e1000_nvm.c',
+ 'e1000_osdep.c',
+ 'e1000_phy.c',
+ 'e1000_vf.c'
+]
+
+error_cflags = ['-Wno-uninitialized', '-Wno-unused-parameter',
+ '-Wno-unused-variable', '-Wno-misleading-indentation',
+ '-Wno-implicit-fallthrough']
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('e1000_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/src/spdk/dpdk/drivers/net/e1000/e1000_ethdev.h b/src/spdk/dpdk/drivers/net/e1000/e1000_ethdev.h
new file mode 100644
index 00000000..902001f3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/e1000_ethdev.h
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef _E1000_ETHDEV_H_
+#define _E1000_ETHDEV_H_
+
+#include <stdint.h>
+
+#include <rte_flow.h>
+#include <rte_time.h>
+#include <rte_pci.h>
+
+#define E1000_INTEL_VENDOR_ID 0x8086
+
+/* need update link, bit flag */
+#define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
+#define E1000_FLAG_MAILBOX (uint32_t)(1 << 1)
+
+/*
+ * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD
+ * driver.
+ */
+#define E1000_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */
+#define E1000_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */
+#define E1000_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE of Reserved */
+#define E1000_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */
+#define E1000_RXD_ERR_CKSUM_BIT 29
+#define E1000_RXD_ERR_CKSUM_MSK 3
+#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */
+#define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */
+#define IGB_VFTA_SIZE 128
+
+#define IGB_HKEY_MAX_INDEX 10
+#define IGB_MAX_RX_QUEUE_NUM 8
+#define IGB_MAX_RX_QUEUE_NUM_82576 16
+
+#define E1000_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */
+#define E1000_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */
+#define E1000_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */
+#define E1000_RFCTL_SYNQFP 0x00080000 /* SYNQFP in RFCTL register */
+
+#define E1000_ETQF_ETHERTYPE 0x0000FFFF
+#define E1000_ETQF_QUEUE 0x00070000
+#define E1000_ETQF_QUEUE_SHIFT 16
+#define E1000_MAX_ETQF_FILTERS 8
+
+#define E1000_IMIR_DSTPORT 0x0000FFFF
+#define E1000_IMIR_PRIORITY 0xE0000000
+#define E1000_MAX_TTQF_FILTERS 8
+#define E1000_2TUPLE_MAX_PRI 7
+
+#define E1000_MAX_FLEX_FILTERS 8
+#define E1000_MAX_FHFT 4
+#define E1000_MAX_FHFT_EXT 4
+#define E1000_FHFT_SIZE_IN_DWD 64
+#define E1000_MAX_FLEX_FILTER_PRI 7
+#define E1000_MAX_FLEX_FILTER_LEN 128
+#define E1000_MAX_FLEX_FILTER_DWDS \
+ (E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
+#define E1000_FLEX_FILTERS_MASK_SIZE \
+ (E1000_MAX_FLEX_FILTER_DWDS / 2)
+#define E1000_FHFT_QUEUEING_LEN 0x0000007F
+#define E1000_FHFT_QUEUEING_QUEUE 0x00000700
+#define E1000_FHFT_QUEUEING_PRIO 0x00070000
+#define E1000_FHFT_QUEUEING_OFFSET 0xFC
+#define E1000_FHFT_QUEUEING_QUEUE_SHIFT 8
+#define E1000_FHFT_QUEUEING_PRIO_SHIFT 16
+#define E1000_WUFC_FLEX_HQ 0x00004000
+
+#define E1000_SPQF_SRCPORT 0x0000FFFF
+
+#define E1000_MAX_FTQF_FILTERS 8
+#define E1000_FTQF_PROTOCOL_MASK 0x000000FF
+#define E1000_FTQF_5TUPLE_MASK_SHIFT 28
+#define E1000_FTQF_QUEUE_MASK 0x03ff0000
+#define E1000_FTQF_QUEUE_SHIFT 16
+#define E1000_FTQF_QUEUE_ENABLE 0x00000100
+
+#define IGB_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_EX | \
+ ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV6_UDP_EX)
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * desscriptors should meet the following condition:
+ * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+#define E1000_MIN_RING_DESC 32
+#define E1000_MAX_RING_DESC 4096
+
+/*
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
+ * This will also optimize cache line size effect.
+ * H/W supports up to cache line size 128.
+ */
+#define E1000_ALIGN 128
+
+#define IGB_RXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
+#define IGB_TXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
+
+#define EM_RXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_rx_desc))
+#define EM_TXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_data_desc))
+
+#define E1000_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define E1000_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+#define IGB_TX_MAX_SEG UINT8_MAX
+#define IGB_TX_MAX_MTU_SEG UINT8_MAX
+#define EM_TX_MAX_SEG UINT8_MAX
+#define EM_TX_MAX_MTU_SEG UINT8_MAX
+
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350 &&\
+ (type) != e1000_82576 && (type) != e1000_i210 &&\
+ (type) != e1000_i211)\
+ return -ENOTSUP;\
+} while (0)
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350 &&\
+ (type) != e1000_i210 && (type) != e1000_i211)\
+ return -ENOTSUP; \
+} while (0)
+
+/* structure for interrupt relative data */
+struct e1000_interrupt {
+ uint32_t flags;
+ uint32_t mask;
+};
+
+/* local vfta copy */
+struct e1000_vfta {
+ uint32_t vfta[IGB_VFTA_SIZE];
+};
+
+/*
+ * VF data which used by PF host only
+ */
+#define E1000_MAX_VF_MC_ENTRIES 30
+struct e1000_vf_info {
+ uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
+ uint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES];
+ uint16_t num_vf_mc_hashes;
+ uint16_t default_vf_vlan_id;
+ uint16_t vlans_enabled;
+ uint16_t pf_qos;
+ uint16_t vlan_count;
+ uint16_t tx_rate;
+};
+
+TAILQ_HEAD(e1000_flex_filter_list, e1000_flex_filter);
+
+struct e1000_flex_filter_info {
+ uint16_t len;
+ uint32_t dwords[E1000_MAX_FLEX_FILTER_DWDS]; /* flex bytes in dword. */
+ /* if mask bit is 1b, do not compare corresponding byte in dwords. */
+ uint8_t mask[E1000_FLEX_FILTERS_MASK_SIZE];
+ uint8_t priority;
+};
+
+/* Flex filter structure */
+struct e1000_flex_filter {
+ TAILQ_ENTRY(e1000_flex_filter) entries;
+ uint16_t index; /* index of flex filter */
+ struct e1000_flex_filter_info filter_info;
+ uint16_t queue; /* rx queue assigned to */
+};
+
+TAILQ_HEAD(e1000_5tuple_filter_list, e1000_5tuple_filter);
+TAILQ_HEAD(e1000_2tuple_filter_list, e1000_2tuple_filter);
+
+struct e1000_5tuple_filter_info {
+ uint32_t dst_ip;
+ uint32_t src_ip;
+ uint16_t dst_port;
+ uint16_t src_port;
+ uint8_t proto; /* l4 protocol. */
+ /* the packet matched above 5tuple and contain any set bit will hit this filter. */
+ uint8_t tcp_flags;
+ uint8_t priority; /* seven levels (001b-111b), 111b is highest,
+ used when more than one filter matches. */
+ uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
+ src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
+ dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
+ src_port_mask:1, /* if mask is 1b, do not compare src port. */
+ proto_mask:1; /* if mask is 1b, do not compare protocol. */
+};
+
+struct e1000_2tuple_filter_info {
+ uint16_t dst_port;
+ uint8_t proto; /* l4 protocol. */
+ /* the packet matched above 2tuple and contain any set bit will hit this filter. */
+ uint8_t tcp_flags;
+ uint8_t priority; /* seven levels (001b-111b), 111b is highest,
+ used when more than one filter matches. */
+ uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
+ src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
+ dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
+ src_port_mask:1, /* if mask is 1b, do not compare src port. */
+ proto_mask:1; /* if mask is 1b, do not compare protocol. */
+};
+
+/* 5tuple filter structure */
+struct e1000_5tuple_filter {
+ TAILQ_ENTRY(e1000_5tuple_filter) entries;
+ uint16_t index; /* the index of 5tuple filter */
+ struct e1000_5tuple_filter_info filter_info;
+ uint16_t queue; /* rx queue assigned to */
+};
+
+/* 2tuple filter structure */
+struct e1000_2tuple_filter {
+ TAILQ_ENTRY(e1000_2tuple_filter) entries;
+ uint16_t index; /* the index of 2tuple filter */
+ struct e1000_2tuple_filter_info filter_info;
+ uint16_t queue; /* rx queue assigned to */
+};
+
+/* ethertype filter structure */
+struct igb_ethertype_filter {
+ uint16_t ethertype;
+ uint32_t etqf;
+};
+
+struct igb_rte_flow_rss_conf {
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+ uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
+ uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
+};
+
+/*
+ * Structure to store filters'info.
+ */
+struct e1000_filter_info {
+ uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
+ /* store used ethertype filters*/
+ struct igb_ethertype_filter ethertype_filters[E1000_MAX_ETQF_FILTERS];
+ uint8_t flex_mask; /* Bit mask for every used flex filter */
+ struct e1000_flex_filter_list flex_list;
+ /* Bit mask for every used 5tuple filter */
+ uint8_t fivetuple_mask;
+ struct e1000_5tuple_filter_list fivetuple_list;
+ /* Bit mask for every used 2tuple filter */
+ uint8_t twotuple_mask;
+ struct e1000_2tuple_filter_list twotuple_list;
+ /* store the SYN filter info */
+ uint32_t syn_info;
+ /* store the rss filter info */
+ struct igb_rte_flow_rss_conf rss_info;
+};
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+struct e1000_adapter {
+ struct e1000_hw hw;
+ struct e1000_hw_stats stats;
+ struct e1000_interrupt intr;
+ struct e1000_vfta shadow_vfta;
+ struct e1000_vf_info *vfdata;
+ struct e1000_filter_info filter;
+ bool stopped;
+ struct rte_timecounter systime_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct rte_timecounter tx_tstamp_tc;
+};
+
+#define E1000_DEV_PRIVATE(adapter) \
+ ((struct e1000_adapter *)adapter)
+
+#define E1000_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct e1000_adapter *)adapter)->hw)
+
+#define E1000_DEV_PRIVATE_TO_STATS(adapter) \
+ (&((struct e1000_adapter *)adapter)->stats)
+
+#define E1000_DEV_PRIVATE_TO_INTR(adapter) \
+ (&((struct e1000_adapter *)adapter)->intr)
+
+#define E1000_DEV_PRIVATE_TO_VFTA(adapter) \
+ (&((struct e1000_adapter *)adapter)->shadow_vfta)
+
+#define E1000_DEV_PRIVATE_TO_P_VFDATA(adapter) \
+ (&((struct e1000_adapter *)adapter)->vfdata)
+
+#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
+ (&((struct e1000_adapter *)adapter)->filter)
+
+struct rte_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+/* ntuple filter list structure */
+struct igb_ntuple_filter_ele {
+ TAILQ_ENTRY(igb_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+
+/* ethertype filter list structure */
+struct igb_ethertype_filter_ele {
+ TAILQ_ENTRY(igb_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+
+/* syn filter list structure */
+struct igb_eth_syn_filter_ele {
+ TAILQ_ENTRY(igb_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+
+/* flex filter list structure */
+struct igb_flex_filter_ele {
+ TAILQ_ENTRY(igb_flex_filter_ele) entries;
+ struct rte_eth_flex_filter filter_info;
+};
+
+/* rss filter list structure */
+struct igb_rss_conf_ele {
+ TAILQ_ENTRY(igb_rss_conf_ele) entries;
+ struct igb_rte_flow_rss_conf filter_info;
+};
+
+/* igb_flow memory list structure */
+struct igb_flow_mem {
+ TAILQ_ENTRY(igb_flow_mem) entries;
+ struct rte_flow *flow;
+ struct rte_eth_dev *dev;
+};
+
+TAILQ_HEAD(igb_ntuple_filter_list, igb_ntuple_filter_ele);
+struct igb_ntuple_filter_list igb_filter_ntuple_list;
+TAILQ_HEAD(igb_ethertype_filter_list, igb_ethertype_filter_ele);
+struct igb_ethertype_filter_list igb_filter_ethertype_list;
+TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
+struct igb_syn_filter_list igb_filter_syn_list;
+TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
+struct igb_flex_filter_list igb_filter_flex_list;
+TAILQ_HEAD(igb_rss_filter_list, igb_rss_conf_ele);
+struct igb_rss_filter_list igb_filter_rss_list;
+TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
+struct igb_flow_mem_list igb_flow_list;
+
+extern const struct rte_flow_ops igb_flow_ops;
+
+/*
+ * RX/TX IGB function prototypes
+ */
+void eth_igb_tx_queue_release(void *txq);
+void eth_igb_rx_queue_release(void *rxq);
+void igb_dev_clear_queues(struct rte_eth_dev *dev);
+void igb_dev_free_queues(struct rte_eth_dev *dev);
+
+uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+
+int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+
+int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
+uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+
+int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+int eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt);
+
+int eth_igb_rx_init(struct rte_eth_dev *dev);
+
+void eth_igb_tx_init(struct rte_eth_dev *dev);
+
+uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t eth_igb_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t eth_igb_recv_scattered_pkts(void *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+int eth_igb_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+int eth_igbvf_rx_init(struct rte_eth_dev *dev);
+
+void eth_igbvf_tx_init(struct rte_eth_dev *dev);
+
+/*
+ * misc function prototypes
+ */
+void igb_pf_host_init(struct rte_eth_dev *eth_dev);
+
+void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
+
+int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
+
+void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+
+void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+
+uint32_t em_get_max_pktlen(struct rte_eth_dev *dev);
+
+/*
+ * RX/TX EM function prototypes
+ */
+void eth_em_tx_queue_release(void *txq);
+void eth_em_rx_queue_release(void *rxq);
+
+void em_dev_clear_queues(struct rte_eth_dev *dev);
+void em_dev_free_queues(struct rte_eth_dev *dev);
+
+uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+
+int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+
+int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
+uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+
+int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+int eth_em_rx_init(struct rte_eth_dev *dev);
+
+void eth_em_tx_init(struct rte_eth_dev *dev);
+
+uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t eth_em_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+
+void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+
+void igb_pf_host_uninit(struct rte_eth_dev *dev);
+
+void igb_filterlist_flush(struct rte_eth_dev *dev);
+int igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct e1000_5tuple_filter *filter);
+int igb_delete_2tuple_filter(struct rte_eth_dev *dev,
+ struct e1000_2tuple_filter *filter);
+void igb_remove_flex_filter(struct rte_eth_dev *dev,
+ struct e1000_flex_filter *filter);
+int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
+ uint8_t idx);
+int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter, bool add);
+int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
+int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
+ struct rte_eth_flex_filter *filter,
+ bool add);
+int igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int igb_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
+int igb_config_rss_filter(struct rte_eth_dev *dev,
+ struct igb_rte_flow_rss_conf *conf,
+ bool add);
+
+#endif /* _E1000_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/e1000/e1000_logs.c b/src/spdk/dpdk/drivers/net/e1000/e1000_logs.c
new file mode 100644
index 00000000..22173939
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/e1000_logs.c
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "e1000_logs.h"
+
+/* declared as extern in e1000_logs.h */
+int e1000_logtype_init;
+int e1000_logtype_driver;
+
+/* avoids double registering of logs if EM and IGB drivers are in use */
+static int e1000_log_initialized;
+
+void
+e1000_igb_init_log(void)
+{
+ if (!e1000_log_initialized) {
+ e1000_logtype_init = rte_log_register("pmd.net.e1000.init");
+ if (e1000_logtype_init >= 0)
+ rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE);
+ e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver");
+ if (e1000_logtype_driver >= 0)
+ rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE);
+ e1000_log_initialized = 1;
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/e1000_logs.h b/src/spdk/dpdk/drivers/net/e1000/e1000_logs.h
new file mode 100644
index 00000000..69d3d311
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/e1000_logs.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _E1000_LOGS_H_
+#define _E1000_LOGS_H_
+
+#include <rte_log.h>
+
+extern int e1000_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, e1000_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_E1000_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_E1000_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_E1000_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+extern int e1000_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, e1000_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+
+/* log init function shared by e1000 and igb drivers */
+void e1000_igb_init_log(void);
+
+#endif /* _E1000_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/e1000/em_ethdev.c b/src/spdk/dpdk/drivers/net/e1000/em_ethdev.c
new file mode 100644
index 00000000..053e855b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/em_ethdev.c
@@ -0,0 +1,1829 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include "e1000_logs.h"
+#include "base/e1000_api.h"
+#include "e1000_ethdev.h"
+
+#define EM_EIAC 0x000DC
+
+#define PMD_ROUNDUP(x,y) (((x) + (y) - 1)/(y) * (y))
+
+
+static int eth_em_configure(struct rte_eth_dev *dev);
+static int eth_em_start(struct rte_eth_dev *dev);
+static void eth_em_stop(struct rte_eth_dev *dev);
+static void eth_em_close(struct rte_eth_dev *dev);
+static void eth_em_promiscuous_enable(struct rte_eth_dev *dev);
+static void eth_em_promiscuous_disable(struct rte_eth_dev *dev);
+static void eth_em_allmulticast_enable(struct rte_eth_dev *dev);
+static void eth_em_allmulticast_disable(struct rte_eth_dev *dev);
+static int eth_em_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int eth_em_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *rte_stats);
+static void eth_em_stats_reset(struct rte_eth_dev *dev);
+static void eth_em_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int eth_em_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_em_interrupt_get_status(struct rte_eth_dev *dev);
+static int eth_em_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
+static void eth_em_interrupt_handler(void *param);
+
+static int em_hw_init(struct e1000_hw *hw);
+static int em_hardware_init(struct e1000_hw *hw);
+static void em_hw_control_acquire(struct e1000_hw *hw);
+static void em_hw_control_release(struct e1000_hw *hw);
+static void em_init_manageability(struct e1000_hw *hw);
+static void em_release_manageability(struct e1000_hw *hw);
+
+static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
+static int eth_em_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev);
+static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev);
+static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev);
+static void em_vlan_hw_strip_disable(struct rte_eth_dev *dev);
+
+/*
+static void eth_em_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+*/
+
+static int eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
+static int eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
+static void em_lsc_intr_disable(struct e1000_hw *hw);
+static void em_rxq_intr_enable(struct e1000_hw *hw);
+static void em_rxq_intr_disable(struct e1000_hw *hw);
+
+static int eth_em_led_on(struct rte_eth_dev *dev);
+static int eth_em_led_off(struct rte_eth_dev *dev);
+
+static int em_get_rx_buffer_size(struct e1000_hw *hw);
+static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
+static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr);
+
+static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+
+#define EM_FC_PAUSE_TIME 0x0680
+#define EM_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
+#define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
+
+static enum e1000_fc_mode em_fc_setting = e1000_fc_full;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_em_map[] = {
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82540EM) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_QUAD_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_DUAL) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_QUAD) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571PT_QUAD_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER_LP) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82573L) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574L) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574LA) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82583V) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH2_LV_LM) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_LM) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_V) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_LM) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_V) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM2) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V2) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM3) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V3) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM2) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V2) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LBG_I219_LM3) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM4) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V4) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM5) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V5) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM6) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V6) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM7) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V7) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct eth_dev_ops eth_em_ops = {
+ .dev_configure = eth_em_configure,
+ .dev_start = eth_em_start,
+ .dev_stop = eth_em_stop,
+ .dev_close = eth_em_close,
+ .promiscuous_enable = eth_em_promiscuous_enable,
+ .promiscuous_disable = eth_em_promiscuous_disable,
+ .allmulticast_enable = eth_em_allmulticast_enable,
+ .allmulticast_disable = eth_em_allmulticast_disable,
+ .link_update = eth_em_link_update,
+ .stats_get = eth_em_stats_get,
+ .stats_reset = eth_em_stats_reset,
+ .dev_infos_get = eth_em_infos_get,
+ .mtu_set = eth_em_mtu_set,
+ .vlan_filter_set = eth_em_vlan_filter_set,
+ .vlan_offload_set = eth_em_vlan_offload_set,
+ .rx_queue_setup = eth_em_rx_queue_setup,
+ .rx_queue_release = eth_em_rx_queue_release,
+ .rx_queue_count = eth_em_rx_queue_count,
+ .rx_descriptor_done = eth_em_rx_descriptor_done,
+ .rx_descriptor_status = eth_em_rx_descriptor_status,
+ .tx_descriptor_status = eth_em_tx_descriptor_status,
+ .tx_queue_setup = eth_em_tx_queue_setup,
+ .tx_queue_release = eth_em_tx_queue_release,
+ .rx_queue_intr_enable = eth_em_rx_queue_intr_enable,
+ .rx_queue_intr_disable = eth_em_rx_queue_intr_disable,
+ .dev_led_on = eth_em_led_on,
+ .dev_led_off = eth_em_led_off,
+ .flow_ctrl_get = eth_em_flow_ctrl_get,
+ .flow_ctrl_set = eth_em_flow_ctrl_set,
+ .mac_addr_set = eth_em_default_mac_addr_set,
+ .mac_addr_add = eth_em_rar_set,
+ .mac_addr_remove = eth_em_rar_clear,
+ .set_mc_addr_list = eth_em_set_mc_addr_list,
+ .rxq_info_get = em_rxq_info_get,
+ .txq_info_get = em_txq_info_get,
+};
+
+
+/**
+ * eth_em_dev_is_ich8 - Check for ICH8 device
+ * @hw: pointer to the HW structure
+ *
+ * return TRUE for ICH8, otherwise FALSE
+ **/
+static bool
+eth_em_dev_is_ich8(struct e1000_hw *hw)
+{
+ DEBUGFUNC("eth_em_dev_is_ich8");
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_PCH2_LV_LM:
+ case E1000_DEV_ID_PCH_LPT_I217_LM:
+ case E1000_DEV_ID_PCH_LPT_I217_V:
+ case E1000_DEV_ID_PCH_LPTLP_I218_LM:
+ case E1000_DEV_ID_PCH_LPTLP_I218_V:
+ case E1000_DEV_ID_PCH_I218_V2:
+ case E1000_DEV_ID_PCH_I218_LM2:
+ case E1000_DEV_ID_PCH_I218_V3:
+ case E1000_DEV_ID_PCH_I218_LM3:
+ case E1000_DEV_ID_PCH_SPT_I219_LM:
+ case E1000_DEV_ID_PCH_SPT_I219_V:
+ case E1000_DEV_ID_PCH_SPT_I219_LM2:
+ case E1000_DEV_ID_PCH_SPT_I219_V2:
+ case E1000_DEV_ID_PCH_LBG_I219_LM3:
+ case E1000_DEV_ID_PCH_SPT_I219_LM4:
+ case E1000_DEV_ID_PCH_SPT_I219_V4:
+ case E1000_DEV_ID_PCH_SPT_I219_LM5:
+ case E1000_DEV_ID_PCH_SPT_I219_V5:
+ case E1000_DEV_ID_PCH_CNP_I219_LM6:
+ case E1000_DEV_ID_PCH_CNP_I219_V6:
+ case E1000_DEV_ID_PCH_CNP_I219_LM7:
+ case E1000_DEV_ID_PCH_CNP_I219_V7:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int
+eth_em_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+
+ eth_dev->dev_ops = &eth_em_ops;
+ eth_dev->rx_pkt_burst = (eth_rx_burst_t)&eth_em_recv_pkts;
+ eth_dev->tx_pkt_burst = (eth_tx_burst_t)&eth_em_xmit_pkts;
+ eth_dev->tx_pkt_prepare = (eth_tx_prep_t)&eth_em_prep_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst =
+ (eth_rx_burst_t)&eth_em_recv_scattered_pkts;
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->device_id = pci_dev->id.device_id;
+ adapter->stopped = 0;
+
+ /* For ICH8 support we'll need to map the flash memory BAR */
+ if (eth_em_dev_is_ich8(hw))
+ hw->flash_address = (void *)pci_dev->mem_resource[1].addr;
+
+ if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS ||
+ em_hw_init(hw) != 0) {
+ PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: "
+ "failed to init HW",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+ return -ENODEV;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN *
+ hw->mac.rar_entry_count, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
+ "store MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ return -ENOMEM;
+ }
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->mac.addr,
+ eth_dev->data->mac_addrs);
+
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ rte_intr_callback_register(intr_handle,
+ eth_em_interrupt_handler, eth_dev);
+
+ return 0;
+}
+
+static int
+eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (adapter->stopped == 0)
+ eth_em_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ eth_em_interrupt_handler, eth_dev);
+
+ return 0;
+}
+
+static int eth_em_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct e1000_adapter), eth_em_dev_init);
+}
+
+static int eth_em_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_em_dev_uninit);
+}
+
+static struct rte_pci_driver rte_em_pmd = {
+ .id_table = pci_id_em_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = eth_em_pci_probe,
+ .remove = eth_em_pci_remove,
+};
+
+static int
+em_hw_init(struct e1000_hw *hw)
+{
+ int diag;
+
+ diag = hw->mac.ops.init_params(hw);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "MAC Initialization Error");
+ return diag;
+ }
+ diag = hw->nvm.ops.init_params(hw);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "NVM Initialization Error");
+ return diag;
+ }
+ diag = hw->phy.ops.init_params(hw);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "PHY Initialization Error");
+ return diag;
+ }
+ (void) e1000_get_bus_info(hw);
+
+ hw->mac.autoneg = 1;
+ hw->phy.autoneg_wait_to_complete = 0;
+ hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+
+ e1000_init_script_state_82541(hw, TRUE);
+ e1000_set_tbi_compatibility_82543(hw, TRUE);
+
+ /* Copper options */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ hw->phy.mdix = 0; /* AUTO_ALL_MODES */
+ hw->phy.disable_polarity_correction = 0;
+ hw->phy.ms_type = e1000_ms_hw_default;
+ }
+
+ /*
+ * Start from a known state, this is important in reading the nvm
+ * and mac from that.
+ */
+ e1000_reset_hw(hw);
+
+ /* Make sure we have a good EEPROM before we read from it */
+ if (e1000_validate_nvm_checksum(hw) < 0) {
+ /*
+ * Some PCI-E parts fail the first check due to
+ * the link being in sleep state, call it again,
+ * if it fails a second time its a real issue.
+ */
+ diag = e1000_validate_nvm_checksum(hw);
+ if (diag < 0) {
+ PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
+ goto error;
+ }
+ }
+
+ /* Read the permanent MAC address out of the EEPROM */
+ diag = e1000_read_mac_addr(hw);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
+ goto error;
+ }
+
+ /* Now initialize the hardware */
+ diag = em_hardware_init(hw);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "Hardware initialization failed");
+ goto error;
+ }
+
+ hw->mac.get_link_status = 1;
+
+ /* Indicate SOL/IDER usage */
+ diag = e1000_check_reset_block(hw);
+ if (diag < 0) {
+ PMD_INIT_LOG(ERR, "PHY reset is blocked due to "
+ "SOL/IDER session");
+ }
+ return 0;
+
+error:
+ em_hw_control_release(hw);
+ return diag;
+}
+
+static int
+eth_em_configure(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+static void
+em_set_pba(struct e1000_hw *hw)
+{
+ uint32_t pba;
+
+ /*
+ * Packet Buffer Allocation (PBA)
+ * Writing PBA sets the receive portion of the buffer
+ * the remainder is used for the transmit buffer.
+ * Devices before the 82547 had a Packet Buffer of 64K.
+ * After the 82547 the buffer was reduced to 40K.
+ */
+ switch (hw->mac.type) {
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ /* 82547: Total Packet Buffer is 40K */
+ pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
+ break;
+ case e1000_82573: /* 82573: Total Packet Buffer is 32K */
+ pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
+ break;
+ case e1000_ich8lan:
+ pba = E1000_PBA_8K;
+ break;
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ pba = E1000_PBA_10K;
+ break;
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
+ pba = E1000_PBA_26K;
+ break;
+ default:
+ pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
+ }
+
+ E1000_WRITE_REG(hw, E1000_PBA, pba);
+}
+
+static void
+eth_em_rxtx_control(struct rte_eth_dev *dev,
+ bool enable)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tctl, rctl;
+
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ if (enable) {
+ /* enable Tx/Rx */
+ tctl |= E1000_TCTL_EN;
+ rctl |= E1000_RCTL_EN;
+ } else {
+ /* disable Tx/Rx */
+ tctl &= ~E1000_TCTL_EN;
+ rctl &= ~E1000_RCTL_EN;
+ }
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+}
+
+static int
+eth_em_start(struct rte_eth_dev *dev)
+{
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int ret, mask;
+ uint32_t intr_vector = 0;
+ uint32_t *speeds;
+ int num_speeds;
+ bool autoneg;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_em_stop(dev);
+
+ e1000_power_up_phy(hw);
+
+ /* Set default PBA value */
+ em_set_pba(hw);
+
+ /* Put the address into the Receive Address Array */
+ e1000_rar_set(hw, hw->mac.addr, 0);
+
+ /*
+ * With the 82571 adapter, RAR[0] may be overwritten
+ * when the other port is reset, we make a duplicate
+ * in RAR[14] for that eventuality, this assures
+ * the interface continues to function.
+ */
+ if (hw->mac.type == e1000_82571) {
+ e1000_set_laa_state_82571(hw, TRUE);
+ e1000_rar_set(hw, hw->mac.addr, E1000_RAR_ENTRIES - 1);
+ }
+
+ /* Initialize the hardware */
+ if (em_hardware_init(hw)) {
+ PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
+ return -EIO;
+ }
+
+ E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
+
+ /* Configure for OS presence */
+ em_init_manageability(hw);
+
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle)) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+
+ /* enable rx interrupt */
+ em_rxq_intr_enable(hw);
+ }
+
+ eth_em_tx_init(dev);
+
+ ret = eth_em_rx_init(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ em_dev_clear_queues(dev);
+ return ret;
+ }
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ ETH_VLAN_EXTEND_MASK;
+ ret = eth_em_vlan_offload_set(dev, mask);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to update vlan offload");
+ em_dev_clear_queues(dev);
+ return ret;
+ }
+
+ /* Set Interrupt Throttling Rate to maximum allowed value. */
+ E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX);
+
+ /* Setup link speed and duplex */
+ speeds = &dev->data->dev_conf.link_speeds;
+ if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+ hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+ hw->mac.autoneg = 1;
+ } else {
+ num_speeds = 0;
+ autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+
+ /* Reset */
+ hw->phy.autoneg_advertised = 0;
+
+ if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
+ ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+ num_speeds = -1;
+ goto error_invalid_config;
+ }
+ if (*speeds & ETH_LINK_SPEED_10M_HD) {
+ hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_10M) {
+ hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_100M_HD) {
+ hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_100M) {
+ hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_1G) {
+ hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
+ num_speeds++;
+ }
+ if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
+ goto error_invalid_config;
+
+ /* Set/reset the mac.autoneg based on the link speed,
+ * fixed or not
+ */
+ if (!autoneg) {
+ hw->mac.autoneg = 0;
+ hw->mac.forced_speed_duplex =
+ hw->phy.autoneg_advertised;
+ } else {
+ hw->mac.autoneg = 1;
+ }
+ }
+
+ e1000_setup_link(hw);
+
+ if (rte_intr_allow_others(intr_handle)) {
+ /* check if lsc interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0) {
+ ret = eth_em_interrupt_setup(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to setup interrupts");
+ em_dev_clear_queues(dev);
+ return ret;
+ }
+ }
+ } else {
+ rte_intr_callback_unregister(intr_handle,
+ eth_em_interrupt_handler,
+ (void *)dev);
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplexn");
+ }
+ /* check if rxq interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ eth_em_rxq_interrupt_setup(dev);
+
+ rte_intr_enable(intr_handle);
+
+ adapter->stopped = 0;
+
+ eth_em_rxtx_control(dev, true);
+ eth_em_link_update(dev, 0);
+
+ PMD_INIT_LOG(DEBUG, "<<");
+
+ return 0;
+
+error_invalid_config:
+ PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
+ dev->data->dev_conf.link_speeds, dev->data->port_id);
+ em_dev_clear_queues(dev);
+ return -EINVAL;
+}
+
+/*********************************************************************
+ *
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC.
+ *
+ **********************************************************************/
+static void
+eth_em_stop(struct rte_eth_dev *dev)
+{
+ struct rte_eth_link link;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ eth_em_rxtx_control(dev, false);
+ em_rxq_intr_disable(hw);
+ em_lsc_intr_disable(hw);
+
+ e1000_reset_hw(hw);
+ if (hw->mac.type >= e1000_82544)
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+ /* Power down the phy. Needed to make the link go down */
+ e1000_power_down_phy(hw);
+
+ em_dev_clear_queues(dev);
+
+ /* clear the recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(dev, &link);
+
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ eth_em_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static void
+eth_em_close(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
+
+ eth_em_stop(dev);
+ adapter->stopped = 1;
+ em_dev_free_queues(dev);
+ e1000_phy_hw_reset(hw);
+ em_release_manageability(hw);
+ em_hw_control_release(hw);
+}
+
+static int
+em_get_rx_buffer_size(struct e1000_hw *hw)
+{
+ uint32_t rx_buf_size;
+
+ rx_buf_size = ((E1000_READ_REG(hw, E1000_PBA) & UINT16_MAX) << 10);
+ return rx_buf_size;
+}
+
+/*********************************************************************
+ *
+ * Initialize the hardware
+ *
+ **********************************************************************/
+static int
+em_hardware_init(struct e1000_hw *hw)
+{
+ uint32_t rx_buf_size;
+ int diag;
+
+ /* Issue a global reset */
+ e1000_reset_hw(hw);
+
+ /* Let the firmware know the OS is in control */
+ em_hw_control_acquire(hw);
+
+ /*
+ * These parameters control the automatic generation (Tx) and
+ * response (Rx) to Ethernet PAUSE frames.
+ * - High water mark should allow for at least two standard size (1518)
+ * frames to be received after sending an XOFF.
+ * - Low water mark works best when it is very near the high water mark.
+ * This allows the receiver to restart by sending XON when it has
+ * drained a bit. Here we use an arbitrary value of 1500 which will
+ * restart after one full frame is pulled from the buffer. There
+ * could be several smaller frames in the buffer and if so they will
+ * not trigger the XON until their total number reduces the buffer
+ * by 1500.
+ * - The pause time is fairly large at 1000 x 512ns = 512 usec.
+ */
+ rx_buf_size = em_get_rx_buffer_size(hw);
+
+ hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024);
+ hw->fc.low_water = hw->fc.high_water - 1500;
+
+ if (hw->mac.type == e1000_80003es2lan)
+ hw->fc.pause_time = UINT16_MAX;
+ else
+ hw->fc.pause_time = EM_FC_PAUSE_TIME;
+
+ hw->fc.send_xon = 1;
+
+ /* Set Flow control, use the tunable location if sane */
+ if (em_fc_setting <= e1000_fc_full)
+ hw->fc.requested_mode = em_fc_setting;
+ else
+ hw->fc.requested_mode = e1000_fc_none;
+
+ /* Workaround: no TX flow ctrl for PCH */
+ if (hw->mac.type == e1000_pchlan)
+ hw->fc.requested_mode = e1000_fc_rx_pause;
+
+ /* Override - settings for PCH2LAN, ya its magic :) */
+ if (hw->mac.type == e1000_pch2lan) {
+ hw->fc.high_water = 0x5C20;
+ hw->fc.low_water = 0x5048;
+ hw->fc.pause_time = 0x0650;
+ hw->fc.refresh_time = 0x0400;
+ } else if (hw->mac.type == e1000_pch_lpt ||
+ hw->mac.type == e1000_pch_spt ||
+ hw->mac.type == e1000_pch_cnp) {
+ hw->fc.requested_mode = e1000_fc_full;
+ }
+
+ diag = e1000_init_hw(hw);
+ if (diag < 0)
+ return diag;
+ e1000_check_for_link(hw);
+ return 0;
+}
+
+/* This function is based on em_update_stats_counters() in e1000/if_em.c */
+static int
+eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_hw_stats *stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ int pause_frames;
+
+ if(hw->phy.media_type == e1000_media_type_copper ||
+ (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS);
+ stats->sec += E1000_READ_REG(hw, E1000_SEC);
+ }
+
+ stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
+ stats->mpc += E1000_READ_REG(hw, E1000_MPC);
+ stats->scc += E1000_READ_REG(hw, E1000_SCC);
+ stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
+
+ stats->mcc += E1000_READ_REG(hw, E1000_MCC);
+ stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
+ stats->colc += E1000_READ_REG(hw, E1000_COLC);
+ stats->dc += E1000_READ_REG(hw, E1000_DC);
+ stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
+ stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
+ stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
+
+ /*
+ * For watchdog management we need to know if we have been
+ * paused during the last interval, so capture that here.
+ */
+ pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
+ stats->xoffrxc += pause_frames;
+ stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
+ stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
+ stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
+ stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
+ stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
+ stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
+ stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
+ stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
+ stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
+ stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
+ stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
+ stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
+
+ /*
+ * For the 64-bit byte counters the low dword must be read first.
+ * Both registers clear on the read of the high dword.
+ */
+
+ stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
+ stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
+ stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
+ stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
+
+ stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
+ stats->ruc += E1000_READ_REG(hw, E1000_RUC);
+ stats->rfc += E1000_READ_REG(hw, E1000_RFC);
+ stats->roc += E1000_READ_REG(hw, E1000_ROC);
+ stats->rjc += E1000_READ_REG(hw, E1000_RJC);
+
+ stats->tor += E1000_READ_REG(hw, E1000_TORH);
+ stats->tot += E1000_READ_REG(hw, E1000_TOTH);
+
+ stats->tpr += E1000_READ_REG(hw, E1000_TPR);
+ stats->tpt += E1000_READ_REG(hw, E1000_TPT);
+ stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
+ stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
+ stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
+ stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
+ stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
+ stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
+ stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
+ stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
+
+ /* Interrupt Counts */
+
+ if (hw->mac.type >= e1000_82571) {
+ stats->iac += E1000_READ_REG(hw, E1000_IAC);
+ stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
+ stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
+ stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
+ stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
+ stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
+ stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
+ stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+ stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
+ }
+
+ if (hw->mac.type >= e1000_82543) {
+ stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
+ stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+ stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+ stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
+ stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
+ stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+ }
+
+ if (rte_stats == NULL)
+ return -EINVAL;
+
+ /* Rx Errors */
+ rte_stats->imissed = stats->mpc;
+ rte_stats->ierrors = stats->crcerrs +
+ stats->rlec + stats->ruc + stats->roc +
+ stats->rxerrc + stats->algnerrc + stats->cexterr;
+
+ /* Tx Errors */
+ rte_stats->oerrors = stats->ecol + stats->latecol;
+
+ rte_stats->ipackets = stats->gprc;
+ rte_stats->opackets = stats->gptc;
+ rte_stats->ibytes = stats->gorc;
+ rte_stats->obytes = stats->gotc;
+ return 0;
+}
+
+static void
+eth_em_stats_reset(struct rte_eth_dev *dev)
+{
+ struct e1000_hw_stats *hw_stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* HW registers are cleared on read */
+ eth_em_stats_get(dev, NULL);
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+}
+
+static int
+eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ em_rxq_intr_enable(hw);
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static int
+eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ em_rxq_intr_disable(hw);
+
+ return 0;
+}
+
+uint32_t
+em_get_max_pktlen(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
+ case e1000_82574:
+ case e1000_80003es2lan: /* 9K Jumbo Frame size */
+ case e1000_82583:
+ return 0x2412;
+ case e1000_pchlan:
+ return 0x1000;
+ /* Adapters that do not support jumbo frames */
+ case e1000_ich8lan:
+ return ETHER_MAX_LEN;
+ default:
+ return MAX_JUMBO_FRAME_SIZE;
+ }
+}
+
+static void
+eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
+ dev_info->max_rx_pktlen = em_get_max_pktlen(dev);
+ dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+
+ /*
+ * Starting with 631xESB hw supports 2 TX/RX queues per port.
+ * Unfortunatelly, all these nics have just one TX context.
+ * So we have few choises for TX:
+ * - Use just one TX queue.
+ * - Allow cksum offload only for one TX queue.
+ * - Don't allow TX cksum offload at all.
+ * For now, option #1 was chosen.
+ * To use second RX queue we have to use extended RX descriptor
+ * (Multiple Receive Queues are mutually exclusive with UDP
+ * fragmentation and are not supported when a legacy receive
+ * descriptor format is used).
+ * Which means separate RX routinies - as legacy nics (82540, 82545)
+ * don't support extended RXD.
+ * To avoid it we support just one RX queue for now (no RSS).
+ */
+
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = 1;
+
+ dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = E1000_MAX_RING_DESC,
+ .nb_min = E1000_MIN_RING_DESC,
+ .nb_align = EM_RXD_ALIGN,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = E1000_MAX_RING_DESC,
+ .nb_min = E1000_MIN_RING_DESC,
+ .nb_align = EM_TXD_ALIGN,
+ .nb_seg_max = EM_TX_MAX_SEG,
+ .nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
+ };
+
+ dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
+ ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G;
+
+ /* Preferred queue parameters */
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_txportconf.ring_size = 256;
+ dev_info->default_rxportconf.ring_size = 256;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link;
+ int link_check, count;
+
+ link_check = 0;
+ hw->mac.get_link_status = 1;
+
+ /* possible wait-to-complete in up to 9 seconds */
+ for (count = 0; count < EM_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
+ /* Read the real link status */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ /* Do the work to read phy */
+ e1000_check_for_link(hw);
+ link_check = !hw->mac.get_link_status;
+ break;
+
+ case e1000_media_type_fiber:
+ e1000_check_for_link(hw);
+ link_check = (E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STATUS_LU);
+ break;
+
+ case e1000_media_type_internal_serdes:
+ e1000_check_for_link(hw);
+ link_check = hw->mac.serdes_has_link;
+ break;
+
+ default:
+ break;
+ }
+ if (link_check || wait_to_complete == 0)
+ break;
+ rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL);
+ }
+ memset(&link, 0, sizeof(link));
+
+ /* Now we check if a transition has happened */
+ if (link_check && (link.link_status == ETH_LINK_DOWN)) {
+ uint16_t duplex, speed;
+ hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
+ link.link_duplex = (duplex == FULL_DUPLEX) ?
+ ETH_LINK_FULL_DUPLEX :
+ ETH_LINK_HALF_DUPLEX;
+ link.link_speed = speed;
+ link.link_status = ETH_LINK_UP;
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ } else if (!link_check && (link.link_status == ETH_LINK_UP)) {
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_status = ETH_LINK_DOWN;
+ link.link_autoneg = ETH_LINK_FIXED;
+ }
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+/*
+ * em_hw_control_acquire sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means
+ * that the driver is loaded. For AMT version type f/w
+ * this means that the network i/f is open.
+ */
+static void
+em_hw_control_acquire(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext, swsm;
+
+ /* Let firmware know the driver has taken over */
+ if (hw->mac.type == e1000_82573) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
+
+ } else {
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ }
+}
+
+/*
+ * em_hw_control_release resets {CTRL_EXTT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT versions of the
+ * f/w this means that the network i/f is closed.
+ */
+static void
+em_hw_control_release(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext, swsm;
+
+ /* Let firmware taken over control of h/w */
+ if (hw->mac.type == e1000_82573) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
+ } else {
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ }
+}
+
+/*
+ * Bit of a misnomer, what this really means is
+ * to enable OS management of the system... aka
+ * to disable special hardware management features.
+ */
+static void
+em_init_manageability(struct e1000_hw *hw)
+{
+ if (e1000_enable_mng_pass_thru(hw)) {
+ uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
+ uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
+
+ /* disable hardware interception of ARP */
+ manc &= ~(E1000_MANC_ARP_EN);
+
+ /* enable receiving management packets to the host */
+ manc |= E1000_MANC_EN_MNG2HOST;
+ manc2h |= 1 << 5; /* Mng Port 623 */
+ manc2h |= 1 << 6; /* Mng Port 664 */
+ E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+ }
+}
+
+/*
+ * Give control back to hardware management
+ * controller if there is one.
+ */
+static void
+em_release_manageability(struct e1000_hw *hw)
+{
+ uint32_t manc;
+
+ if (e1000_enable_mng_pass_thru(hw)) {
+ manc = E1000_READ_REG(hw, E1000_MANC);
+
+ /* re-enable hardware interception of ARP */
+ manc |= E1000_MANC_ARP_EN;
+ manc &= ~E1000_MANC_EN_MNG2HOST;
+
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+ }
+}
+
+static void
+eth_em_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_em_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP);
+ if (dev->data->all_multicast == 1)
+ rctl |= E1000_RCTL_MPE;
+ else
+ rctl &= (~E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_em_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_MPE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_em_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ if (dev->data->promiscuous == 1)
+ return; /* must remain in all_multicast mode */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= (~E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static int
+eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vfta;
+ uint32_t vid_idx;
+ uint32_t vid_bit;
+
+ vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK);
+ vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
+ if (on)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
+
+ /* update local VFTA copy */
+ shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static void
+em_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* Filter Table Disable */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg &= ~E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+}
+
+static void
+em_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t reg;
+ int i;
+
+ /* Filter Table Enable, CFI not used for packet acceptance */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg |= E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+
+ /* restore vfta from local copy */
+ for (i = 0; i < IGB_VFTA_SIZE; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
+}
+
+static void
+em_vlan_hw_strip_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* VLAN Mode Disable */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg &= ~E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+}
+
+static void
+em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* VLAN Mode Enable */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg |= E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+}
+
+static int
+eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+
+ rxmode = &dev->data->dev_conf.rxmode;
+ if(mask & ETH_VLAN_STRIP_MASK){
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ em_vlan_hw_strip_enable(dev);
+ else
+ em_vlan_hw_strip_disable(dev);
+ }
+
+ if(mask & ETH_VLAN_FILTER_MASK){
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ em_vlan_hw_filter_enable(dev);
+ else
+ em_vlan_hw_filter_disable(dev);
+ }
+
+ return 0;
+}
+
+/*
+ * It enables the interrupt mask and then enable the interrupt.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_em_interrupt_setup(struct rte_eth_dev *dev)
+{
+ uint32_t regval;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* clear interrupt */
+ E1000_READ_REG(hw, E1000_ICR);
+ regval = E1000_READ_REG(hw, E1000_IMS);
+ E1000_WRITE_REG(hw, E1000_IMS, regval | E1000_ICR_LSC);
+ return 0;
+}
+
+/*
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ E1000_READ_REG(hw, E1000_ICR);
+ em_rxq_intr_enable(hw);
+ return 0;
+}
+
+/*
+ * It enable receive packet interrupt.
+ * @param hw
+ * Pointer to struct e1000_hw
+ *
+ * @return
+ */
+static void
+em_rxq_intr_enable(struct e1000_hw *hw)
+{
+ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_RXT0);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/*
+ * It disabled lsc interrupt.
+ * @param hw
+ * Pointer to struct e1000_hw
+ *
+ * @return
+ */
+static void
+em_lsc_intr_disable(struct e1000_hw *hw)
+{
+ E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/*
+ * It disabled receive packet interrupt.
+ * @param hw
+ * Pointer to struct e1000_hw
+ *
+ * @return
+ */
+static void
+em_rxq_intr_disable(struct e1000_hw *hw)
+{
+ E1000_READ_REG(hw, E1000_ICR);
+ E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/*
+ * It reads ICR and gets interrupt causes, check it and set a bit flag
+ * to update link status.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_em_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t icr;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ /* read-on-clear nic registers here */
+ icr = E1000_READ_REG(hw, E1000_ICR);
+ if (icr & E1000_ICR_LSC) {
+ intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+ }
+
+ return 0;
+}
+
+/*
+ * It executes link_update after knowing an interrupt is prsent.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_em_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct rte_eth_link link;
+ int ret;
+
+ if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE))
+ return -1;
+
+ intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
+ rte_intr_enable(intr_handle);
+
+ /* set get_link_status to check register later */
+ hw->mac.get_link_status = 1;
+ ret = eth_em_link_update(dev, 0);
+
+ /* check if link has changed */
+ if (ret < 0)
+ return 0;
+
+ rte_eth_linkstatus_get(dev, &link);
+
+ if (link.link_status) {
+ PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
+ dev->data->port_id, link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ } else {
+ PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
+ }
+ PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+
+ return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered at first.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+eth_em_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ eth_em_interrupt_get_status(dev);
+ eth_em_interrupt_action(dev, dev->intr_handle);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
+static int
+eth_em_led_on(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
+}
+
+static int
+eth_em_led_off(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
+}
+
+static int
+eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct e1000_hw *hw;
+ uint32_t ctrl;
+ int tx_pause;
+ int rx_pause;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ fc_conf->pause_time = hw->fc.pause_time;
+ fc_conf->high_water = hw->fc.high_water;
+ fc_conf->low_water = hw->fc.low_water;
+ fc_conf->send_xon = hw->fc.send_xon;
+ fc_conf->autoneg = hw->mac.autoneg;
+
+ /*
+ * Return rx_pause and tx_pause status according to actual setting of
+ * the TFCE and RFCE bits in the CTRL register.
+ */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ if (ctrl & E1000_CTRL_TFCE)
+ tx_pause = 1;
+ else
+ tx_pause = 0;
+
+ if (ctrl & E1000_CTRL_RFCE)
+ rx_pause = 1;
+ else
+ rx_pause = 0;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static int
+eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct e1000_hw *hw;
+ int err;
+ enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
+ e1000_fc_none,
+ e1000_fc_rx_pause,
+ e1000_fc_tx_pause,
+ e1000_fc_full
+ };
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint32_t rctl;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (fc_conf->autoneg != hw->mac.autoneg)
+ return -ENOTSUP;
+ rx_buf_size = em_get_rx_buffer_size(hw);
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+ /* At least reserve one Ethernet frame for watermark */
+ max_high_water = rx_buf_size - ETHER_MAX_LEN;
+ if ((fc_conf->high_water > max_high_water) ||
+ (fc_conf->high_water < fc_conf->low_water)) {
+ PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
+ PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
+ return -EINVAL;
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
+ hw->fc.pause_time = fc_conf->pause_time;
+ hw->fc.high_water = fc_conf->high_water;
+ hw->fc.low_water = fc_conf->low_water;
+ hw->fc.send_xon = fc_conf->send_xon;
+
+ err = e1000_setup_link_generic(hw);
+ if (err == E1000_SUCCESS) {
+
+ /* check if we want to forward MAC frames - driver doesn't have native
+ * capability to do that, so we'll write the registers ourselves */
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* set or clear MFLCN.PMCF bit depending on configuration */
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ rctl |= E1000_RCTL_PMCF;
+ else
+ rctl &= ~E1000_RCTL_PMCF;
+
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+ }
+
+ PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
+ return -EIO;
+}
+
+static int
+eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, __rte_unused uint32_t pool)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return e1000_rar_set(hw, mac_addr->addr_bytes, index);
+}
+
+static void
+eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index)
+{
+ uint8_t addr[ETHER_ADDR_LEN];
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ memset(addr, 0, sizeof(addr));
+
+ e1000_rar_set(hw, addr, index);
+}
+
+static int
+eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ eth_em_rar_clear(dev, 0);
+
+ return eth_em_rar_set(dev, (void *)addr, 0, 0);
+}
+
+static int
+eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct rte_eth_dev_info dev_info;
+ struct e1000_hw *hw;
+ uint32_t frame_size;
+ uint32_t rctl;
+
+ eth_em_infos_get(dev, &dev_info);
+ frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE;
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before. */
+ if (!dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ return -EINVAL;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* switch to jumbo mode if needed */
+ if (frame_size > ETHER_MAX_LEN) {
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ rctl |= E1000_RCTL_LPE;
+ } else {
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ rctl &= ~E1000_RCTL_LPE;
+ }
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ return 0;
+}
+
+static int
+eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
+ return 0;
+}
+
+RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci");
+
+/* see e1000_logs.c */
+RTE_INIT(igb_init_log)
+{
+ e1000_igb_init_log();
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/em_rxtx.c b/src/spdk/dpdk/drivers/net/e1000/em_rxtx.c
new file mode 100644
index 00000000..7d2ac4eb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/em_rxtx.c
@@ -0,0 +1,1999 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_prefetch.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_net.h>
+#include <rte_string_fns.h>
+
+#include "e1000_logs.h"
+#include "base/e1000_api.h"
+#include "e1000_ethdev.h"
+#include "base/e1000_osdep.h"
+
+#define E1000_TXD_VLAN_SHIFT 16
+
+#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
+
+#define E1000_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_VLAN_PKT)
+
+#define E1000_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct em_rx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct em_tx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+ uint16_t next_id; /**< Index of next descriptor in ring. */
+ uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct em_rx_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ volatile struct e1000_rx_desc *rx_ring; /**< RX ring virtual address. */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
+ volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
+ struct em_rx_entry *sw_ring; /**< address of RX software ring. */
+ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */
+ uint16_t nb_rx_desc; /**< number of RX descriptors. */
+ uint16_t rx_tail; /**< current value of RDT register. */
+ uint16_t nb_rx_hold; /**< number of held free RX desc. */
+ uint16_t rx_free_thresh; /**< max free RX desc to hold. */
+ uint16_t queue_id; /**< RX queue index. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold register. */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
+};
+
+/**
+ * Hardware context number
+ */
+enum {
+ EM_CTX_0 = 0, /**< CTX0 */
+ EM_CTX_NUM = 1, /**< CTX NUM */
+};
+
+/** Offload features */
+union em_vlan_macip {
+ uint32_t data;
+ struct {
+ uint16_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint16_t vlan_tci;
+ /**< VLAN Tag Control Identifier (CPU order). */
+ } f;
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with em_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/** MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
+/**
+ * Structure to check if new context need be built
+ */
+struct em_ctx_info {
+ uint64_t flags; /**< ol_flags related to context build. */
+ uint32_t cmp_mask; /**< compare mask */
+ union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct em_tx_queue {
+ volatile struct e1000_data_desc *tx_ring; /**< TX ring address */
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ struct em_tx_entry *sw_ring; /**< virtual address of SW ring. */
+ volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
+ uint16_t nb_tx_desc; /**< number of TX descriptors. */
+ uint16_t tx_tail; /**< Current value of TDT register. */
+ /**< Start freeing TX buffers if there are less free descriptors than
+ this value. */
+ uint16_t tx_free_thresh;
+ /**< Number of TX descriptors to use before RS bit is set. */
+ uint16_t tx_rs_thresh;
+ /** Number of TX descriptors used since RS bit was set. */
+ uint16_t nb_tx_used;
+ /** Index to last TX descriptor to have been cleaned. */
+ uint16_t last_desc_cleaned;
+ /** Total number of TX descriptors ready to be allocated. */
+ uint16_t nb_tx_free;
+ uint16_t queue_id; /**< TX queue index. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold register. */
+ struct em_ctx_info ctx_cache;
+ /**< Hardware context history.*/
+ uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+};
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+#define rte_em_prefetch(p) rte_prefetch0(p)
+#else
+#define rte_em_prefetch(p) do {} while(0)
+#endif
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while(0)
+#endif
+
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif /* DEFAULT_TX_FREE_THRESH */
+
+#ifndef DEFAULT_TX_RS_THRESH
+#define DEFAULT_TX_RS_THRESH 32
+#endif /* DEFAULT_TX_RS_THRESH */
+
+
+/*********************************************************************
+ *
+ * TX function
+ *
+ **********************************************************************/
+
+/*
+ * Populates TX context descriptor.
+ */
+static inline void
+em_set_xmit_ctx(struct em_tx_queue* txq,
+ volatile struct e1000_context_desc *ctx_txd,
+ uint64_t flags,
+ union em_vlan_macip hdrlen)
+{
+ uint32_t cmp_mask, cmd_len;
+ uint16_t ipcse, l2len;
+ struct e1000_context_desc ctx;
+
+ cmp_mask = 0;
+ cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C;
+
+ l2len = hdrlen.f.l2_len;
+ ipcse = (uint16_t)(l2len + hdrlen.f.l3_len);
+
+ /* setup IPCS* fields */
+ ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len;
+ ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len +
+ offsetof(struct ipv4_hdr, hdr_checksum));
+
+ /*
+ * When doing checksum or TCP segmentation with IPv6 headers,
+ * IPCSE field should be set t0 0.
+ */
+ if (flags & PKT_TX_IP_CKSUM) {
+ ctx.lower_setup.ip_fields.ipcse =
+ (uint16_t)rte_cpu_to_le_16(ipcse - 1);
+ cmd_len |= E1000_TXD_CMD_IP;
+ cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+ } else {
+ ctx.lower_setup.ip_fields.ipcse = 0;
+ }
+
+ /* setup TUCS* fields */
+ ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse;
+ ctx.upper_setup.tcp_fields.tucse = 0;
+
+ switch (flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
+ offsetof(struct udp_hdr, dgram_cksum));
+ cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
+ offsetof(struct tcp_hdr, cksum));
+ cmd_len |= E1000_TXD_CMD_TCP;
+ cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+ break;
+ default:
+ ctx.upper_setup.tcp_fields.tucso = 0;
+ }
+
+ ctx.cmd_and_length = rte_cpu_to_le_32(cmd_len);
+ ctx.tcp_seg_setup.data = 0;
+
+ *ctx_txd = ctx;
+
+ txq->ctx_cache.flags = flags;
+ txq->ctx_cache.cmp_mask = cmp_mask;
+ txq->ctx_cache.hdrlen = hdrlen;
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_ctx_update(struct em_tx_queue *txq, uint64_t flags,
+ union em_vlan_macip hdrlen)
+{
+ /* If match with the current context */
+ if (likely (txq->ctx_cache.flags == flags &&
+ ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
+ txq->ctx_cache.cmp_mask) == 0))
+ return EM_CTX_0;
+
+ /* Mismatch */
+ return EM_CTX_NUM;
+}
+
+/* Reset transmit descriptors after they have been used */
+static inline int
+em_xmit_cleanup(struct em_tx_queue *txq)
+{
+ struct em_tx_entry *sw_ring = txq->sw_ring;
+ volatile struct e1000_data_desc *txr = txq->tx_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+
+ /* Determine the last descriptor needing to be cleaned */
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ /* Check to make sure the last descriptor to clean is done */
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD))
+ {
+ PMD_TX_FREE_LOG(DEBUG,
+ "TX descriptor %4u is not done"
+ "(port=%d queue=%d)", desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ /* Failed to clean any descriptors, better luck next time */
+ return -(1);
+ }
+
+ /* Figure out how many descriptors will be cleaned */
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ PMD_TX_FREE_LOG(DEBUG,
+ "Cleaning %4u TX descriptors: %4u to %4u "
+ "(port=%d queue=%d)", nb_tx_to_clean,
+ last_desc_cleaned, desc_to_clean_to, txq->port_id,
+ txq->queue_id);
+
+ /*
+ * The last descriptor to clean is done, so that means all the
+ * descriptors from the last descriptor that was cleaned
+ * up to the last descriptor with the RS bit set
+ * are done. Only reset the threshold descriptor.
+ */
+ txr[desc_to_clean_to].upper.fields.status = 0;
+
+ /* Update the txq to reflect the last descriptor that was cleaned */
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
+
+ /* No Error */
+ return 0;
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_upper(uint64_t ol_flags)
+{
+ static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8};
+ static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8};
+ uint32_t tmp;
+
+ tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
+ tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
+ return tmp;
+}
+
+uint16_t
+eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct em_tx_queue *txq;
+ struct em_tx_entry *sw_ring;
+ struct em_tx_entry *txe, *txn;
+ volatile struct e1000_data_desc *txr;
+ volatile struct e1000_data_desc *txd;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint64_t buf_dma_addr;
+ uint32_t popts_spec;
+ uint32_t cmd_type_len;
+ uint16_t slen;
+ uint64_t ol_flags;
+ uint16_t tx_id;
+ uint16_t tx_last;
+ uint16_t nb_tx;
+ uint16_t nb_used;
+ uint64_t tx_ol_req;
+ uint32_t ctx;
+ uint32_t new_ctx;
+ union em_vlan_macip hdrlen;
+
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Determine if the descriptor ring needs to be cleaned. */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ em_xmit_cleanup(txq);
+
+ /* TX loop */
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ new_ctx = 0;
+ tx_pkt = *tx_pkts++;
+
+ RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+ /*
+ * Determine how many (if any) context descriptors
+ * are needed for offload functionality.
+ */
+ ol_flags = tx_pkt->ol_flags;
+
+ /* If hardware offload required */
+ tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK));
+ if (tx_ol_req) {
+ hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
+ hdrlen.f.l2_len = tx_pkt->l2_len;
+ hdrlen.f.l3_len = tx_pkt->l3_len;
+ /* If new context to be built or reuse the exist ctx. */
+ ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
+
+ /* Only allocate context descriptor if required*/
+ new_ctx = (ctx == EM_CTX_NUM);
+ }
+
+ /*
+ * Keep track of how many descriptors are used this loop
+ * This will always be the number of segments + the number of
+ * Context descriptors required to transmit the packet
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+
+ /*
+ * The number of descriptors that must be allocated for a
+ * packet is the number of segments of that packet, plus 1
+ * Context Descriptor for the hardware offload, if any.
+ * Determine the last TX descriptor to allocate in the TX ring
+ * for the packet, starting from the current position (tx_id)
+ * in the ring.
+ */
+ tx_last = (uint16_t) (tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+ " tx_first=%u tx_last=%u",
+ (unsigned) txq->port_id,
+ (unsigned) txq->queue_id,
+ (unsigned) tx_pkt->pkt_len,
+ (unsigned) tx_id,
+ (unsigned) tx_last);
+
+ /*
+ * Make sure there are enough TX descriptors available to
+ * transmit the entire packet.
+ * nb_used better be less than or equal to txq->tx_rs_thresh
+ */
+ while (unlikely (nb_used > txq->nb_tx_free)) {
+ PMD_TX_FREE_LOG(DEBUG, "Not enough free TX descriptors "
+ "nb_used=%4u nb_free=%4u "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->port_id, txq->queue_id);
+
+ if (em_xmit_cleanup(txq) != 0) {
+ /* Could not clean any descriptors */
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ }
+
+ /*
+ * By now there are enough free TX descriptors to transmit
+ * the packet.
+ */
+
+ /*
+ * Set common flags of all TX Data Descriptors.
+ *
+ * The following bits must be set in all Data Descriptors:
+ * - E1000_TXD_DTYP_DATA
+ * - E1000_TXD_DTYP_DEXT
+ *
+ * The following bits must be set in the first Data Descriptor
+ * and are ignored in the other ones:
+ * - E1000_TXD_POPTS_IXSM
+ * - E1000_TXD_POPTS_TXSM
+ *
+ * The following bits must be set in the last Data Descriptor
+ * and are ignored in the other ones:
+ * - E1000_TXD_CMD_VLE
+ * - E1000_TXD_CMD_IFCS
+ *
+ * The following bits must only be set in the last Data
+ * Descriptor:
+ * - E1000_TXD_CMD_EOP
+ *
+ * The following bits can be set in any Data Descriptor, but
+ * are only set in the last Data Descriptor:
+ * - E1000_TXD_CMD_RS
+ */
+ cmd_type_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+ E1000_TXD_CMD_IFCS;
+ popts_spec = 0;
+
+ /* Set VLAN Tag offload fields. */
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ cmd_type_len |= E1000_TXD_CMD_VLE;
+ popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
+ }
+
+ if (tx_ol_req) {
+ /*
+ * Setup the TX Context Descriptor if required
+ */
+ if (new_ctx) {
+ volatile struct e1000_context_desc *ctx_txd;
+
+ ctx_txd = (volatile struct e1000_context_desc *)
+ &txr[tx_id];
+
+ txn = &sw_ring[txe->next_id];
+ RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ em_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+ hdrlen);
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ /*
+ * Setup the TX Data Descriptor,
+ * This path will go through
+ * whatever new/reuse the context descriptor
+ */
+ popts_spec |= tx_desc_cksum_flags_to_upper(ol_flags);
+ }
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+
+ if (txe->mbuf != NULL)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /*
+ * Set up Transmit Data Descriptor.
+ */
+ slen = m_seg->data_len;
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+
+ txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+ txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
+ txd->upper.data = rte_cpu_to_le_32(popts_spec);
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /*
+ * The last packet data descriptor needs End Of Packet (EOP)
+ */
+ cmd_type_len |= E1000_TXD_CMD_EOP;
+ txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+ /* Set RS bit only on threshold packets' last descriptor */
+ if (txq->nb_tx_used >= txq->tx_rs_thresh) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "Setting RS bit on TXD id=%4u "
+ "(port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
+
+ cmd_type_len |= E1000_TXD_CMD_RS;
+
+ /* Update txq RS bit counters */
+ txq->nb_tx_used = 0;
+ }
+ txd->lower.data |= rte_cpu_to_le_32(cmd_type_len);
+ }
+end_of_tx:
+ rte_wmb();
+
+ /*
+ * Set the Transmit Descriptor Tail (TDT)
+ */
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (unsigned) txq->port_id, (unsigned) txq->queue_id,
+ (unsigned) tx_id, (unsigned) nb_tx);
+ E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
+ * RX functions
+ *
+ **********************************************************************/
+
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status)
+{
+ uint64_t pkt_flags;
+
+ /* Check if VLAN present */
+ pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
+
+ return pkt_flags;
+}
+
+static inline uint64_t
+rx_desc_error_to_pkt_flags(uint32_t rx_error)
+{
+ uint64_t pkt_flags = 0;
+
+ if (rx_error & E1000_RXD_ERR_IPE)
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ if (rx_error & E1000_RXD_ERR_TCPE)
+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ return pkt_flags;
+}
+
+uint16_t
+eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ volatile struct e1000_rx_desc *rx_ring;
+ volatile struct e1000_rx_desc *rxdp;
+ struct em_rx_queue *rxq;
+ struct em_rx_entry *sw_ring;
+ struct em_rx_entry *rxe;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ struct e1000_rx_desc rxd;
+ uint64_t dma_addr;
+ uint16_t pkt_len;
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint8_t status;
+
+ rxq = rx_queue;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+ while (nb_rx < nb_pkts) {
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ status = rxdp->status;
+ if (! (status & E1000_RXD_STAT_DD))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * End of packet.
+ *
+ * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
+ * likely to be invalid and to be dropped by the various
+ * validation checks performed by the network stack.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy do not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "status=0x%x pkt_len=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) status,
+ (unsigned) rte_le_to_cpu_16(rxd.length));
+
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u",
+ (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_em_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_em_prefetch(&rx_ring[rx_id]);
+ rte_em_prefetch(&sw_ring[rx_id]);
+ }
+
+ /* Rearm RXD: attach new mbuf and reset status to zero. */
+
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ rxdp->buffer_addr = dma_addr;
+ rxdp->status = 0;
+
+ /*
+ * Initialize the returned mbuf.
+ * 1) setup generic mbuf fields:
+ * - number of segments,
+ * - next segment,
+ * - packet length,
+ * - RX port identifier.
+ * 2) integrate hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
+ rxq->crc_len);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->port = rxq->port_id;
+
+ rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
+ rxm->ol_flags = rxm->ol_flags |
+ rx_desc_error_to_pkt_flags(rxd.errors);
+
+ /* Only valid if PKT_RX_VLAN set in pkt_flags */
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) nb_hold,
+ (unsigned) nb_rx);
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+uint16_t
+eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct em_rx_queue *rxq;
+ volatile struct e1000_rx_desc *rx_ring;
+ volatile struct e1000_rx_desc *rxdp;
+ struct em_rx_entry *sw_ring;
+ struct em_rx_entry *rxe;
+ struct rte_mbuf *first_seg;
+ struct rte_mbuf *last_seg;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ struct e1000_rx_desc rxd;
+ uint64_t dma; /* Physical address of mbuf data buffer */
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint16_t data_len;
+ uint8_t status;
+
+ rxq = rx_queue;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+
+ /*
+ * Retrieve RX context of current packet, if any.
+ */
+ first_seg = rxq->pkt_first_seg;
+ last_seg = rxq->pkt_last_seg;
+
+ while (nb_rx < nb_pkts) {
+ next_desc:
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ status = rxdp->status;
+ if (! (status & E1000_RXD_STAT_DD))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * Descriptor done.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy does not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "status=0x%x data_len=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) status,
+ (unsigned) rte_le_to_cpu_16(rxd.length));
+
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_em_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_em_prefetch(&rx_ring[rx_id]);
+ rte_em_prefetch(&sw_ring[rx_id]);
+ }
+
+ /*
+ * Update RX descriptor with the physical address of the new
+ * data buffer of the new allocated mbuf.
+ */
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ rxdp->buffer_addr = dma;
+ rxdp->status = 0;
+
+ /*
+ * Set data length & data buffer address of mbuf.
+ */
+ data_len = rte_le_to_cpu_16(rxd.length);
+ rxm->data_len = data_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (first_seg == NULL) {
+ first_seg = rxm;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
+ } else {
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /*
+ * If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (! (status & E1000_RXD_STAT_EOP)) {
+ last_seg = rxm;
+ goto next_desc;
+ }
+
+ /*
+ * This is the last buffer of the received packet.
+ * If the CRC is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer.
+ * If part of the CRC is also contained in the previous
+ * mbuf, subtract the length of that CRC part from the
+ * data length of the previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= ETHER_CRC_LEN;
+ if (data_len <= ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len = (uint16_t)
+ (last_seg->data_len -
+ (ETHER_CRC_LEN - data_len));
+ last_seg->next = NULL;
+ } else
+ rxm->data_len =
+ (uint16_t) (data_len - ETHER_CRC_LEN);
+ }
+
+ /*
+ * Initialize the first mbuf of the returned packet:
+ * - RX port identifier,
+ * - hardware offload data, if any:
+ * - IP checksum flag,
+ * - error flags.
+ */
+ first_seg->port = rxq->port_id;
+
+ first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
+ first_seg->ol_flags = first_seg->ol_flags |
+ rx_desc_error_to_pkt_flags(rxd.errors);
+
+ /* Only valid if PKT_RX_VLAN set in pkt_flags */
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = first_seg;
+
+ /*
+ * Setup receipt context for a new packet.
+ */
+ first_seg = NULL;
+ }
+
+ /*
+ * Record index of the next RX descriptor to probe.
+ */
+ rxq->rx_tail = rx_id;
+
+ /*
+ * Save receive context.
+ */
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) nb_hold,
+ (unsigned) nb_rx);
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+#define EM_MAX_BUF_SIZE 16384
+#define EM_RCTL_FLXBUF_STEP 1024
+
+static void
+em_tx_queue_release_mbufs(struct em_tx_queue *txq)
+{
+ unsigned i;
+
+ if (txq->sw_ring != NULL) {
+ for (i = 0; i != txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static void
+em_tx_queue_release(struct em_tx_queue *txq)
+{
+ if (txq != NULL) {
+ em_tx_queue_release_mbufs(txq);
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ }
+}
+
+void
+eth_em_tx_queue_release(void *txq)
+{
+ em_tx_queue_release(txq);
+}
+
+/* (Re)set dynamic em_tx_queue fields to defaults */
+static void
+em_reset_tx_queue(struct em_tx_queue *txq)
+{
+ uint16_t i, nb_desc, prev;
+ static const struct e1000_data_desc txd_init = {
+ .upper.fields = {.status = E1000_TXD_STAT_DD},
+ };
+
+ nb_desc = txq->nb_tx_desc;
+
+ /* Initialize ring entries */
+
+ prev = (uint16_t) (nb_desc - 1);
+
+ for (i = 0; i < nb_desc; i++) {
+ txq->tx_ring[i] = txd_init;
+ txq->sw_ring[i].mbuf = NULL;
+ txq->sw_ring[i].last_id = i;
+ txq->sw_ring[prev].next_id = i;
+ prev = i;
+ }
+
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->nb_tx_free = (uint16_t)(nb_desc - 1);
+ txq->last_desc_cleaned = (uint16_t)(nb_desc - 1);
+ txq->nb_tx_used = 0;
+ txq->tx_tail = 0;
+
+ memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
+}
+
+uint64_t
+em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+
+ RTE_SET_USED(dev);
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ return tx_offload_capa;
+}
+
+uint64_t
+em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t tx_queue_offload_capa;
+
+ /*
+ * As only one Tx queue can be used, let per queue offloading
+ * capability be same to per port queue offloading capability
+ * for better convenience.
+ */
+ tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev);
+
+ return tx_queue_offload_capa;
+}
+
+int
+eth_em_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const struct rte_memzone *tz;
+ struct em_tx_queue *txq;
+ struct e1000_hw *hw;
+ uint32_t tsize;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ /*
+ * Validate number of transmit descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of E1000_ALIGN.
+ */
+ if (nb_desc % EM_TXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
+ return -(EINVAL);
+ }
+
+ tx_free_thresh = tx_conf->tx_free_thresh;
+ if (tx_free_thresh == 0)
+ tx_free_thresh = (uint16_t)RTE_MIN(nb_desc / 4,
+ DEFAULT_TX_FREE_THRESH);
+
+ tx_rs_thresh = tx_conf->tx_rs_thresh;
+ if (tx_rs_thresh == 0)
+ tx_rs_thresh = (uint16_t)RTE_MIN(tx_free_thresh,
+ DEFAULT_TX_RS_THRESH);
+
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
+ "number of TX descriptors minus 3. "
+ "(tx_free_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+ if (tx_rs_thresh > tx_free_thresh) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
+ "tx_free_thresh. (tx_free_thresh=%u "
+ "tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ /*
+ * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
+ * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
+ * by the NIC and all descriptors are written back after the NIC
+ * accumulates WTHRESH descriptors.
+ */
+ if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
+ PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+ "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->tx_queues[queue_idx] != NULL) {
+ em_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /*
+ * Allocate TX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC;
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (tz == NULL)
+ return -ENOMEM;
+
+ /* Allocate the tx queue data structure. */
+ if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
+ RTE_CACHE_LINE_SIZE)) == NULL)
+ return -ENOMEM;
+
+ /* Allocate software ring */
+ if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
+ sizeof(txq->sw_ring[0]) * nb_desc,
+ RTE_CACHE_LINE_SIZE)) == NULL) {
+ em_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->tx_rs_thresh = tx_rs_thresh;
+ txq->pthresh = tx_conf->tx_thresh.pthresh;
+ txq->hthresh = tx_conf->tx_thresh.hthresh;
+ txq->wthresh = tx_conf->tx_thresh.wthresh;
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+
+ txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
+ txq->tx_ring_phys_addr = tz->iova;
+ txq->tx_ring = (struct e1000_data_desc *) tz->addr;
+
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+ em_reset_tx_queue(txq);
+
+ dev->data->tx_queues[queue_idx] = txq;
+ txq->offloads = offloads;
+ return 0;
+}
+
+static void
+em_rx_queue_release_mbufs(struct em_rx_queue *rxq)
+{
+ unsigned i;
+
+ if (rxq->sw_ring != NULL) {
+ for (i = 0; i != rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static void
+em_rx_queue_release(struct em_rx_queue *rxq)
+{
+ if (rxq != NULL) {
+ em_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ }
+}
+
+void
+eth_em_rx_queue_release(void *rxq)
+{
+ em_rx_queue_release(rxq);
+}
+
+/* Reset dynamic em_rx_queue fields back to defaults */
+static void
+em_reset_rx_queue(struct em_rx_queue *rxq)
+{
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+}
+
+uint64_t
+em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_offload_capa;
+ uint32_t max_rx_pktlen;
+
+ max_rx_pktlen = em_get_max_pktlen(dev);
+
+ rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER;
+ if (max_rx_pktlen > ETHER_MAX_LEN)
+ rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ return rx_offload_capa;
+}
+
+uint64_t
+em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_queue_offload_capa;
+
+ /*
+ * As only one Rx queue can be used, let per queue offloading
+ * capability be same to per port queue offloading capability
+ * for better convenience.
+ */
+ rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev);
+
+ return rx_queue_offload_capa;
+}
+
+int
+eth_em_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *rz;
+ struct em_rx_queue *rxq;
+ struct e1000_hw *hw;
+ uint32_t rsize;
+ uint64_t offloads;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ /*
+ * Validate number of receive descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of E1000_ALIGN.
+ */
+ if (nb_desc % EM_RXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
+ return -EINVAL;
+ }
+
+ /*
+ * EM devices don't support drop_en functionality
+ */
+ if (rx_conf->rx_drop_en) {
+ PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
+ "device");
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ em_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate RX ring for max possible mumber of hardware descriptors. */
+ rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC;
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rz == NULL)
+ return -ENOMEM;
+
+ /* Allocate the RX queue data structure. */
+ if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE)) == NULL)
+ return -ENOMEM;
+
+ /* Allocate software ring. */
+ if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
+ sizeof (rxq->sw_ring[0]) * nb_desc,
+ RTE_CACHE_LINE_SIZE)) == NULL) {
+ em_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->pthresh = rx_conf->rx_thresh.pthresh;
+ rxq->hthresh = rx_conf->rx_thresh.hthresh;
+ rxq->wthresh = rx_conf->rx_thresh.wthresh;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
+ rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
+ rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
+ rxq->rx_ring_phys_addr = rz->iova;
+ rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
+
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ em_reset_rx_queue(rxq);
+ rxq->offloads = offloads;
+
+ return 0;
+}
+
+uint32_t
+eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define EM_RXQ_SCAN_INTERVAL 4
+ volatile struct e1000_rx_desc *rxdp;
+ struct em_rx_queue *rxq;
+ uint32_t desc = 0;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+
+ while ((desc < rxq->nb_rx_desc) &&
+ (rxdp->status & E1000_RXD_STAT_DD)) {
+ desc += EM_RXQ_SCAN_INTERVAL;
+ rxdp += EM_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile struct e1000_rx_desc *rxdp;
+ struct em_rx_queue *rxq = rx_queue;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return 0;
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ rxdp = &rxq->rx_ring[desc];
+ return !!(rxdp->status & E1000_RXD_STAT_DD);
+}
+
+int
+eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct em_rx_queue *rxq = rx_queue;
+ volatile uint8_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].status;
+ if (*status & E1000_RXD_STAT_DD)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct em_tx_queue *txq = tx_queue;
+ volatile uint8_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].upper.fields.status;
+ if (*status & E1000_TXD_STAT_DD)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+void
+em_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct em_tx_queue *txq;
+ struct em_rx_queue *rxq;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq != NULL) {
+ em_tx_queue_release_mbufs(txq);
+ em_reset_tx_queue(txq);
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq != NULL) {
+ em_rx_queue_release_mbufs(rxq);
+ em_reset_rx_queue(rxq);
+ }
+ }
+}
+
+void
+em_dev_free_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_em_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_em_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+/*
+ * Takes as input/output parameter RX buffer size.
+ * Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register.
+ */
+static uint32_t
+em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz)
+{
+ /*
+ * For BSIZE & BSEX all configurable sizes are:
+ * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
+ * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
+ * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
+ * 2048: rctl |= E1000_RCTL_SZ_2048;
+ * 1024: rctl |= E1000_RCTL_SZ_1024;
+ * 512: rctl |= E1000_RCTL_SZ_512;
+ * 256: rctl |= E1000_RCTL_SZ_256;
+ */
+ static const struct {
+ uint32_t bufsz;
+ uint32_t rctl;
+ } bufsz_to_rctl[] = {
+ {16384, (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX)},
+ {8192, (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX)},
+ {4096, (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX)},
+ {2048, E1000_RCTL_SZ_2048},
+ {1024, E1000_RCTL_SZ_1024},
+ {512, E1000_RCTL_SZ_512},
+ {256, E1000_RCTL_SZ_256},
+ };
+
+ int i;
+ uint32_t rctl_bsize;
+
+ rctl_bsize = *bufsz;
+
+ /*
+ * Starting from 82571 it is possible to specify RX buffer size
+ * by RCTL.FLXBUF. When this field is different from zero, the
+ * RX buffer size = RCTL.FLXBUF * 1K
+ * (e.g. t is possible to specify RX buffer size 1,2,...,15KB).
+ * It is working ok on real HW, but by some reason doesn't work
+ * on VMware emulated 82574L.
+ * So for now, always use BSIZE/BSEX to setup RX buffer size.
+ * If you don't plan to use it on VMware emulated 82574L and
+ * would like to specify RX buffer size in 1K granularity,
+ * uncomment the following lines:
+ * ***************************************************************
+ * if (hwtyp >= e1000_82571 && hwtyp <= e1000_82574 &&
+ * rctl_bsize >= EM_RCTL_FLXBUF_STEP) {
+ * rctl_bsize /= EM_RCTL_FLXBUF_STEP;
+ * *bufsz = rctl_bsize;
+ * return (rctl_bsize << E1000_RCTL_FLXBUF_SHIFT &
+ * E1000_RCTL_FLXBUF_MASK);
+ * }
+ * ***************************************************************
+ */
+
+ for (i = 0; i != sizeof(bufsz_to_rctl) / sizeof(bufsz_to_rctl[0]);
+ i++) {
+ if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
+ *bufsz = bufsz_to_rctl[i].bufsz;
+ return bufsz_to_rctl[i].rctl;
+ }
+ }
+
+ /* Should never happen. */
+ return -EINVAL;
+}
+
+static int
+em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
+{
+ struct em_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ unsigned i;
+ static const struct e1000_rx_desc rxd_init = {
+ .buffer_addr = 0,
+ };
+
+ /* Initialize software ring entries */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile struct e1000_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
+ "queue_id=%hu", rxq->queue_id);
+ return -ENOMEM;
+ }
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ /* Clear HW ring memory */
+ rxq->rx_ring[i] = rxd_init;
+
+ rxd = &rxq->rx_ring[i];
+ rxd->buffer_addr = dma_addr;
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
+/*********************************************************************
+ *
+ * Enable receive unit.
+ *
+ **********************************************************************/
+int
+eth_em_rx_init(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ struct em_rx_queue *rxq;
+ struct rte_eth_rxmode *rxmode;
+ uint32_t rctl;
+ uint32_t rfctl;
+ uint32_t rxcsum;
+ uint32_t rctl_bsize;
+ uint16_t i;
+ int ret;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ rxmode = &dev->data->dev_conf.rxmode;
+
+ /*
+ * Make sure receives are disabled while setting
+ * up the descriptor ring.
+ */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+
+ /* Disable extended descriptor type. */
+ rfctl &= ~E1000_RFCTL_EXTEN;
+ /* Disable accelerated acknowledge */
+ if (hw->mac.type == e1000_82574)
+ rfctl |= E1000_RFCTL_ACK_DIS;
+
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+
+ /*
+ * XXX TEMPORARY WORKAROUND: on some systems with 82573
+ * long latencies are observed, like Lenovo X60. This
+ * change eliminates the problem, but since having positive
+ * values in RDTR is a known source of problems on other
+ * platforms another solution is being sought.
+ */
+ if (hw->mac.type == e1000_82573)
+ E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
+
+ dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts;
+
+ /* Determine RX bufsize. */
+ rctl_bsize = EM_MAX_BUF_SIZE;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ uint32_t buf_size;
+
+ rxq = dev->data->rx_queues[i];
+ buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ rctl_bsize = RTE_MIN(rctl_bsize, buf_size);
+ }
+
+ rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize);
+
+ /* Configure and enable each RX queue. */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ uint64_t bus_addr;
+ uint32_t rxdctl;
+
+ rxq = dev->data->rx_queues[i];
+
+ /* Allocate buffers for descriptor rings and setup queue */
+ ret = em_alloc_rx_queue_mbufs(rxq);
+ if (ret)
+ return ret;
+
+ /*
+ * Reset crc_len in case it was changed after queue setup by a
+ * call to configure
+ */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
+ bus_addr = rxq->rx_ring_phys_addr;
+ E1000_WRITE_REG(hw, E1000_RDLEN(i),
+ rxq->nb_rx_desc *
+ sizeof(*rxq->rx_ring));
+ E1000_WRITE_REG(hw, E1000_RDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
+
+ E1000_WRITE_REG(hw, E1000_RDH(i), 0);
+ E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
+
+ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
+ rxdctl &= 0xFE000000;
+ rxdctl |= rxq->pthresh & 0x3F;
+ rxdctl |= (rxq->hthresh & 0x3F) << 8;
+ rxdctl |= (rxq->wthresh & 0x3F) << 16;
+ rxdctl |= E1000_RXDCTL_GRAN;
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
+
+ /*
+ * Due to EM devices not having any sort of hardware
+ * limit for packet length, jumbo frame of any size
+ * can be accepted, thus we have to enable scattered
+ * rx if jumbo frames are enabled (or if buffer size
+ * is too small to accommodate non-jumbo packets)
+ * to avoid splitting packets that don't fit into
+ * one buffer.
+ */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
+ rctl_bsize < ETHER_MAX_LEN) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->rx_pkt_burst =
+ (eth_rx_burst_t)eth_em_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+ }
+
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+
+ /*
+ * Setup the Checksum Register.
+ * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
+ */
+ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ rxcsum |= E1000_RXCSUM_IPOFL;
+ else
+ rxcsum &= ~E1000_RXCSUM_IPOFL;
+ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+
+ /* No MRQ or RSS support for now */
+
+ /* Set early receive threshold on appropriate hw */
+ if ((hw->mac.type == e1000_ich9lan ||
+ hw->mac.type == e1000_pch2lan ||
+ hw->mac.type == e1000_ich10lan) &&
+ rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
+ E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
+ E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
+ }
+
+ if (hw->mac.type == e1000_pch2lan) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
+ else
+ e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
+ }
+
+ /* Setup the Receive Control Register. */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
+ else
+ rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
+
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
+ E1000_RCTL_RDMTS_HALF |
+ (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ /* Make sure VLAN Filters are off. */
+ rctl &= ~E1000_RCTL_VFE;
+ /* Don't store bad packets. */
+ rctl &= ~E1000_RCTL_SBP;
+ /* Legacy descriptor type. */
+ rctl &= ~E1000_RCTL_DTYP_MASK;
+
+ /*
+ * Configure support of jumbo frames, if any.
+ */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ rctl |= E1000_RCTL_LPE;
+ else
+ rctl &= ~E1000_RCTL_LPE;
+
+ /* Enable Receives. */
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ return 0;
+}
+
+/*********************************************************************
+ *
+ * Enable transmit unit.
+ *
+ **********************************************************************/
+void
+eth_em_tx_init(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ struct em_tx_queue *txq;
+ uint32_t tctl;
+ uint32_t txdctl;
+ uint16_t i;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings. */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ uint64_t bus_addr;
+
+ txq = dev->data->tx_queues[i];
+ bus_addr = txq->tx_ring_phys_addr;
+ E1000_WRITE_REG(hw, E1000_TDLEN(i),
+ txq->nb_tx_desc *
+ sizeof(*txq->tx_ring));
+ E1000_WRITE_REG(hw, E1000_TDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers. */
+ E1000_WRITE_REG(hw, E1000_TDT(i), 0);
+ E1000_WRITE_REG(hw, E1000_TDH(i), 0);
+
+ /* Setup Transmit threshold registers. */
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
+ /*
+ * bit 22 is reserved, on some models should always be 0,
+ * on others - always 1.
+ */
+ txdctl &= E1000_TXDCTL_COUNT_DESC;
+ txdctl |= txq->pthresh & 0x3F;
+ txdctl |= (txq->hthresh & 0x3F) << 8;
+ txdctl |= (txq->wthresh & 0x3F) << 16;
+ txdctl |= E1000_TXDCTL_GRAN;
+ E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
+ }
+
+ /* Program the Transmit Control Register. */
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
+
+ /* This write will effectively turn on the transmit unit. */
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+}
+
+void
+em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct em_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct em_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.offloads = txq->offloads;
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/igb_ethdev.c b/src/spdk/dpdk/drivers/net/e1000/igb_ethdev.c
new file mode 100644
index 00000000..64dfe683
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/igb_ethdev.c
@@ -0,0 +1,5692 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include "e1000_logs.h"
+#include "base/e1000_api.h"
+#include "e1000_ethdev.h"
+#include "igb_regs.h"
+
+/*
+ * Default values for port configuration
+ */
+#define IGB_DEFAULT_RX_FREE_THRESH 32
+
+#define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
+#define IGB_DEFAULT_RX_HTHRESH 8
+#define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4)
+
+#define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
+#define IGB_DEFAULT_TX_HTHRESH 1
+#define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16)
+
+/* Bit shift and mask */
+#define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
+#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
+#define IGB_8_BIT_WIDTH CHAR_BIT
+#define IGB_8_BIT_MASK UINT8_MAX
+
+/* Additional timesync values. */
+#define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+#define E1000_ETQF_FILTER_1588 3
+#define IGB_82576_TSYNC_SHIFT 16
+#define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
+#define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
+#define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
+
+#define E1000_VTIVAR_MISC 0x01740
+#define E1000_VTIVAR_MISC_MASK 0xFF
+#define E1000_VTIVAR_VALID 0x80
+#define E1000_VTIVAR_MISC_MAILBOX 0
+#define E1000_VTIVAR_MISC_INTR_MASK 0x3
+
+/* External VLAN Enable bit mask */
+#define E1000_CTRL_EXT_EXT_VLAN (1 << 26)
+
+/* External VLAN Ether Type bit mask and shift */
+#define E1000_VET_VET_EXT 0xFFFF0000
+#define E1000_VET_VET_EXT_SHIFT 16
+
+static int eth_igb_configure(struct rte_eth_dev *dev);
+static int eth_igb_start(struct rte_eth_dev *dev);
+static void eth_igb_stop(struct rte_eth_dev *dev);
+static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev);
+static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev);
+static void eth_igb_close(struct rte_eth_dev *dev);
+static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
+static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
+static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
+static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
+static int eth_igb_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int eth_igb_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *rte_stats);
+static int eth_igb_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned n);
+static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev,
+ const uint64_t *ids,
+ uint64_t *values, unsigned int n);
+static int eth_igb_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int size);
+static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+ unsigned int limit);
+static void eth_igb_stats_reset(struct rte_eth_dev *dev);
+static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
+static int eth_igb_fw_version_get(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size);
+static void eth_igb_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
+static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
+static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
+static int eth_igb_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
+static void eth_igb_interrupt_handler(void *param);
+static int igb_hardware_init(struct e1000_hw *hw);
+static void igb_hw_control_acquire(struct e1000_hw *hw);
+static void igb_hw_control_release(struct e1000_hw *hw);
+static void igb_init_manageability(struct e1000_hw *hw);
+static void igb_release_manageability(struct e1000_hw *hw);
+
+static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
+static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid_id);
+static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+
+static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
+
+static int eth_igb_led_on(struct rte_eth_dev *dev);
+static int eth_igb_led_off(struct rte_eth_dev *dev);
+
+static void igb_intr_disable(struct e1000_hw *hw);
+static int igb_get_rx_buffer_size(struct e1000_hw *hw);
+static int eth_igb_rar_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
+static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr);
+
+static void igbvf_intr_disable(struct e1000_hw *hw);
+static int igbvf_dev_configure(struct rte_eth_dev *dev);
+static int igbvf_dev_start(struct rte_eth_dev *dev);
+static void igbvf_dev_stop(struct rte_eth_dev *dev);
+static void igbvf_dev_close(struct rte_eth_dev *dev);
+static void igbvf_promiscuous_enable(struct rte_eth_dev *dev);
+static void igbvf_promiscuous_disable(struct rte_eth_dev *dev);
+static void igbvf_allmulticast_enable(struct rte_eth_dev *dev);
+static void igbvf_allmulticast_disable(struct rte_eth_dev *dev);
+static int eth_igbvf_link_update(struct e1000_hw *hw);
+static int eth_igbvf_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *rte_stats);
+static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned n);
+static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit);
+static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
+static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
+static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr);
+static int igbvf_get_reg_length(struct rte_eth_dev *dev);
+static int igbvf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+
+static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
+static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter);
+static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter);
+static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter);
+static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
+ struct rte_eth_flex_filter *filter);
+static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter);
+static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter);
+static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter);
+static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter);
+static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
+static int eth_igb_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
+static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+static int eth_igb_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+static int igb_timesync_enable(struct rte_eth_dev *dev);
+static int igb_timesync_disable(struct rte_eth_dev *dev);
+static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int igb_timesync_read_time(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int igb_timesync_write_time(struct rte_eth_dev *dev,
+ const struct timespec *timestamp);
+static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector);
+static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
+ uint8_t index, uint8_t offset);
+static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
+static void eth_igbvf_interrupt_handler(void *param);
+static void igbvf_mbx_process(struct rte_eth_dev *dev);
+static int igb_filter_restore(struct rte_eth_dev *dev);
+
+/*
+ * Define VF Stats MACRO for Non "cleared on read" register
+ */
+#define UPDATE_VF_STAT(reg, last, cur) \
+{ \
+ u32 latest = E1000_READ_REG(hw, reg); \
+ cur += (latest - last) & UINT_MAX; \
+ last = latest; \
+}
+
+#define IGB_FC_PAUSE_TIME 0x0680
+#define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
+#define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
+
+#define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
+
+static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_igb_map[] = {
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) },
+
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) },
+
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) },
+
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+/*
+ * The set of PCI devices this driver supports (for 82576&I350 VF)
+ */
+static const struct rte_pci_id pci_id_igbvf_map[] = {
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct rte_eth_desc_lim rx_desc_lim = {
+ .nb_max = E1000_MAX_RING_DESC,
+ .nb_min = E1000_MIN_RING_DESC,
+ .nb_align = IGB_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+ .nb_max = E1000_MAX_RING_DESC,
+ .nb_min = E1000_MIN_RING_DESC,
+ .nb_align = IGB_RXD_ALIGN,
+ .nb_seg_max = IGB_TX_MAX_SEG,
+ .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG,
+};
+
+static const struct eth_dev_ops eth_igb_ops = {
+ .dev_configure = eth_igb_configure,
+ .dev_start = eth_igb_start,
+ .dev_stop = eth_igb_stop,
+ .dev_set_link_up = eth_igb_dev_set_link_up,
+ .dev_set_link_down = eth_igb_dev_set_link_down,
+ .dev_close = eth_igb_close,
+ .promiscuous_enable = eth_igb_promiscuous_enable,
+ .promiscuous_disable = eth_igb_promiscuous_disable,
+ .allmulticast_enable = eth_igb_allmulticast_enable,
+ .allmulticast_disable = eth_igb_allmulticast_disable,
+ .link_update = eth_igb_link_update,
+ .stats_get = eth_igb_stats_get,
+ .xstats_get = eth_igb_xstats_get,
+ .xstats_get_by_id = eth_igb_xstats_get_by_id,
+ .xstats_get_names_by_id = eth_igb_xstats_get_names_by_id,
+ .xstats_get_names = eth_igb_xstats_get_names,
+ .stats_reset = eth_igb_stats_reset,
+ .xstats_reset = eth_igb_xstats_reset,
+ .fw_version_get = eth_igb_fw_version_get,
+ .dev_infos_get = eth_igb_infos_get,
+ .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
+ .mtu_set = eth_igb_mtu_set,
+ .vlan_filter_set = eth_igb_vlan_filter_set,
+ .vlan_tpid_set = eth_igb_vlan_tpid_set,
+ .vlan_offload_set = eth_igb_vlan_offload_set,
+ .rx_queue_setup = eth_igb_rx_queue_setup,
+ .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
+ .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
+ .rx_queue_release = eth_igb_rx_queue_release,
+ .rx_queue_count = eth_igb_rx_queue_count,
+ .rx_descriptor_done = eth_igb_rx_descriptor_done,
+ .rx_descriptor_status = eth_igb_rx_descriptor_status,
+ .tx_descriptor_status = eth_igb_tx_descriptor_status,
+ .tx_queue_setup = eth_igb_tx_queue_setup,
+ .tx_queue_release = eth_igb_tx_queue_release,
+ .tx_done_cleanup = eth_igb_tx_done_cleanup,
+ .dev_led_on = eth_igb_led_on,
+ .dev_led_off = eth_igb_led_off,
+ .flow_ctrl_get = eth_igb_flow_ctrl_get,
+ .flow_ctrl_set = eth_igb_flow_ctrl_set,
+ .mac_addr_add = eth_igb_rar_set,
+ .mac_addr_remove = eth_igb_rar_clear,
+ .mac_addr_set = eth_igb_default_mac_addr_set,
+ .reta_update = eth_igb_rss_reta_update,
+ .reta_query = eth_igb_rss_reta_query,
+ .rss_hash_update = eth_igb_rss_hash_update,
+ .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
+ .filter_ctrl = eth_igb_filter_ctrl,
+ .set_mc_addr_list = eth_igb_set_mc_addr_list,
+ .rxq_info_get = igb_rxq_info_get,
+ .txq_info_get = igb_txq_info_get,
+ .timesync_enable = igb_timesync_enable,
+ .timesync_disable = igb_timesync_disable,
+ .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
+ .get_reg = eth_igb_get_regs,
+ .get_eeprom_length = eth_igb_get_eeprom_length,
+ .get_eeprom = eth_igb_get_eeprom,
+ .set_eeprom = eth_igb_set_eeprom,
+ .get_module_info = eth_igb_get_module_info,
+ .get_module_eeprom = eth_igb_get_module_eeprom,
+ .timesync_adjust_time = igb_timesync_adjust_time,
+ .timesync_read_time = igb_timesync_read_time,
+ .timesync_write_time = igb_timesync_write_time,
+};
+
+/*
+ * dev_ops for virtual function, bare necessities for basic vf
+ * operation have been implemented
+ */
+static const struct eth_dev_ops igbvf_eth_dev_ops = {
+ .dev_configure = igbvf_dev_configure,
+ .dev_start = igbvf_dev_start,
+ .dev_stop = igbvf_dev_stop,
+ .dev_close = igbvf_dev_close,
+ .promiscuous_enable = igbvf_promiscuous_enable,
+ .promiscuous_disable = igbvf_promiscuous_disable,
+ .allmulticast_enable = igbvf_allmulticast_enable,
+ .allmulticast_disable = igbvf_allmulticast_disable,
+ .link_update = eth_igb_link_update,
+ .stats_get = eth_igbvf_stats_get,
+ .xstats_get = eth_igbvf_xstats_get,
+ .xstats_get_names = eth_igbvf_xstats_get_names,
+ .stats_reset = eth_igbvf_stats_reset,
+ .xstats_reset = eth_igbvf_stats_reset,
+ .vlan_filter_set = igbvf_vlan_filter_set,
+ .dev_infos_get = eth_igbvf_infos_get,
+ .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
+ .rx_queue_setup = eth_igb_rx_queue_setup,
+ .rx_queue_release = eth_igb_rx_queue_release,
+ .rx_descriptor_done = eth_igb_rx_descriptor_done,
+ .rx_descriptor_status = eth_igb_rx_descriptor_status,
+ .tx_descriptor_status = eth_igb_tx_descriptor_status,
+ .tx_queue_setup = eth_igb_tx_queue_setup,
+ .tx_queue_release = eth_igb_tx_queue_release,
+ .set_mc_addr_list = eth_igb_set_mc_addr_list,
+ .rxq_info_get = igb_rxq_info_get,
+ .txq_info_get = igb_txq_info_get,
+ .mac_addr_set = igbvf_default_mac_addr_set,
+ .get_reg = igbvf_get_regs,
+};
+
+/* store statistics names and its offset in stats structure */
+struct rte_igb_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = {
+ {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)},
+ {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)},
+ {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)},
+ {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)},
+ {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)},
+ {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)},
+ {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats,
+ ecol)},
+ {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)},
+ {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)},
+ {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)},
+ {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)},
+ {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)},
+ {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)},
+ {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)},
+ {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)},
+ {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)},
+ {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)},
+ {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats,
+ fcruc)},
+ {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)},
+ {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)},
+ {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)},
+ {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)},
+ {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
+ prc1023)},
+ {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats,
+ prc1522)},
+ {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)},
+ {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)},
+ {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)},
+ {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)},
+ {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)},
+ {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)},
+ {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)},
+ {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)},
+ {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)},
+ {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)},
+ {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)},
+ {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)},
+ {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)},
+ {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)},
+ {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)},
+ {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)},
+ {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)},
+ {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
+ ptc1023)},
+ {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats,
+ ptc1522)},
+ {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)},
+ {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)},
+ {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)},
+ {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)},
+ {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)},
+ {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)},
+ {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)},
+
+ {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)},
+};
+
+#define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \
+ sizeof(rte_igb_stats_strings[0]))
+
+static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = {
+ {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)},
+ {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)},
+ {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)},
+ {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)},
+ {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)},
+};
+
+#define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
+ sizeof(rte_igbvf_stats_strings[0]))
+
+
+static inline void
+igb_intr_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
+ E1000_WRITE_FLUSH(hw);
+}
+
+static void
+igb_intr_disable(struct e1000_hw *hw)
+{
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+}
+
+static inline void
+igbvf_intr_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* only for mailbox */
+ E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX);
+ E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX);
+ E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/* only for mailbox now. If RX/TX needed, should extend this function. */
+static void
+igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector)
+{
+ uint32_t tmp = 0;
+
+ /* mailbox */
+ tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK);
+ tmp |= E1000_VTIVAR_VALID;
+ E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp);
+}
+
+static void
+eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Configure VF other cause ivar */
+ igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX);
+}
+
+static inline int32_t
+igb_pf_reset_hw(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext;
+ int32_t status;
+
+ status = e1000_reset_hw(hw);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ return status;
+}
+
+static void
+igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+
+ e1000_set_mac_type(hw);
+
+ /* need to check if it is a vf device below */
+}
+
+static int
+igb_reset_swfw_lock(struct e1000_hw *hw)
+{
+ int ret_val;
+
+ /*
+ * Do mac ops initialization manually here, since we will need
+ * some function pointers set by this call.
+ */
+ ret_val = e1000_init_mac_params(hw);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * SMBI lock should not fail in this early stage. If this is the case,
+ * it is due to an improper exit of the application.
+ * So force the release of the faulty lock.
+ */
+ if (e1000_get_hw_semaphore_generic(hw) < 0) {
+ PMD_DRV_LOG(DEBUG, "SMBI lock released");
+ }
+ e1000_put_hw_semaphore_generic(hw);
+
+ if (hw->mac.ops.acquire_swfw_sync != NULL) {
+ uint16_t mask;
+
+ /*
+ * Phy lock should not fail in this early stage. If this is the case,
+ * it is due to an improper exit of the application.
+ * So force the release of the faulty lock.
+ */
+ mask = E1000_SWFW_PHY0_SM << hw->bus.func;
+ if (hw->bus.func > E1000_FUNC_1)
+ mask <<= 2;
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
+ PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
+ hw->bus.func);
+ }
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ /*
+ * This one is more tricky since it is common to all ports; but
+ * swfw_sync retries last long enough (1s) to be almost sure that if
+ * lock can not be taken it is due to an improper lock of the
+ * semaphore.
+ */
+ mask = E1000_SWFW_EEP_SM;
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
+ PMD_DRV_LOG(DEBUG, "SWFW common locks released");
+ }
+ hw->mac.ops.release_swfw_sync(hw, mask);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/* Remove all ntuple filters of the device */
+static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct e1000_5tuple_filter *p_5tuple;
+ struct e1000_2tuple_filter *p_2tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple, entries);
+ rte_free(p_5tuple);
+ }
+ filter_info->fivetuple_mask = 0;
+ while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) {
+ TAILQ_REMOVE(&filter_info->twotuple_list,
+ p_2tuple, entries);
+ rte_free(p_2tuple);
+ }
+ filter_info->twotuple_mask = 0;
+
+ return 0;
+}
+
+/* Remove all flex filters of the device */
+static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct e1000_flex_filter *p_flex;
+
+ while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
+ TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
+ rte_free(p_flex);
+ }
+ filter_info->flex_mask = 0;
+
+ return 0;
+}
+
+static int
+eth_igb_dev_init(struct rte_eth_dev *eth_dev)
+{
+ int error = 0;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+
+ uint32_t ctrl_ext;
+
+ eth_dev->dev_ops = &eth_igb_ops;
+ eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
+ eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &eth_igb_prep_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
+
+ igb_identify_hardware(eth_dev, pci_dev);
+ if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
+ error = -EIO;
+ goto err_late;
+ }
+
+ e1000_get_bus_info(hw);
+
+ /* Reset any pending lock */
+ if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
+ error = -EIO;
+ goto err_late;
+ }
+
+ /* Finish initialization */
+ if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
+ error = -EIO;
+ goto err_late;
+ }
+
+ hw->mac.autoneg = 1;
+ hw->phy.autoneg_wait_to_complete = 0;
+ hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+
+ /* Copper options */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ hw->phy.mdix = 0; /* AUTO_ALL_MODES */
+ hw->phy.disable_polarity_correction = 0;
+ hw->phy.ms_type = e1000_ms_hw_default;
+ }
+
+ /*
+ * Start from a known state, this is important in reading the nvm
+ * and mac from that.
+ */
+ igb_pf_reset_hw(hw);
+
+ /* Make sure we have a good EEPROM before we read from it */
+ if (e1000_validate_nvm_checksum(hw) < 0) {
+ /*
+ * Some PCI-E parts fail the first check due to
+ * the link being in sleep state, call it again,
+ * if it fails a second time its a real issue.
+ */
+ if (e1000_validate_nvm_checksum(hw) < 0) {
+ PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
+ error = -EIO;
+ goto err_late;
+ }
+ }
+
+ /* Read the permanent MAC address out of the EEPROM */
+ if (e1000_read_mac_addr(hw) != 0) {
+ PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
+ error = -EIO;
+ goto err_late;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("e1000",
+ ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
+ "store MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ error = -ENOMEM;
+ goto err_late;
+ }
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
+
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ /* Now initialize the hardware */
+ if (igb_hardware_init(hw) != 0) {
+ PMD_INIT_LOG(ERR, "Hardware initialization failed");
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ error = -ENODEV;
+ goto err_late;
+ }
+ hw->mac.get_link_status = 1;
+ adapter->stopped = 0;
+
+ /* Indicate SOL/IDER usage */
+ if (e1000_check_reset_block(hw) < 0) {
+ PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
+ "SOL/IDER session");
+ }
+
+ /* initialize PF if max_vfs not zero */
+ igb_pf_host_init(eth_dev);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ eth_igb_interrupt_handler,
+ (void *)eth_dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(&pci_dev->intr_handle);
+
+ /* enable support intr */
+ igb_intr_enable(eth_dev);
+
+ /* initialize filter info */
+ memset(filter_info, 0,
+ sizeof(struct e1000_filter_info));
+
+ TAILQ_INIT(&filter_info->flex_list);
+ TAILQ_INIT(&filter_info->twotuple_list);
+ TAILQ_INIT(&filter_info->fivetuple_list);
+
+ TAILQ_INIT(&igb_filter_ntuple_list);
+ TAILQ_INIT(&igb_filter_ethertype_list);
+ TAILQ_INIT(&igb_filter_syn_list);
+ TAILQ_INIT(&igb_filter_flex_list);
+ TAILQ_INIT(&igb_filter_rss_list);
+ TAILQ_INIT(&igb_flow_list);
+
+ return 0;
+
+err_late:
+ igb_hw_control_release(hw);
+
+ return error;
+}
+
+static int
+eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
+ struct e1000_hw *hw;
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ intr_handle = &pci_dev->intr_handle;
+
+ if (adapter->stopped == 0)
+ eth_igb_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ /* Reset any pending lock */
+ igb_reset_swfw_lock(hw);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ /* uninitialize PF if max_vfs not zero */
+ igb_pf_host_uninit(eth_dev);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ eth_igb_interrupt_handler, eth_dev);
+
+ /* clear the SYN filter info */
+ filter_info->syn_info = 0;
+
+ /* clear the ethertype filters info */
+ filter_info->ethertype_mask = 0;
+ memset(filter_info->ethertype_filters, 0,
+ E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter));
+
+ /* clear the rss filter info */
+ memset(&filter_info->rss_info, 0,
+ sizeof(struct igb_rte_flow_rss_conf));
+
+ /* remove all ntuple filters of the device */
+ igb_ntuple_filter_uninit(eth_dev);
+
+ /* remove all flex filters of the device */
+ igb_flex_filter_uninit(eth_dev);
+
+ /* clear all the filters list */
+ igb_filterlist_flush(eth_dev);
+
+ return 0;
+}
+
+/*
+ * Virtual Function device init
+ */
+static int
+eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int diag;
+ struct ether_addr *perm_addr = (struct ether_addr *)hw->mac.perm_addr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &igbvf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
+ eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &eth_igb_prep_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
+ return 0;
+ }
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ adapter->stopped = 0;
+
+ /* Initialize the shared code (base driver) */
+ diag = e1000_setup_init_funcs(hw, TRUE);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
+ diag);
+ return -EIO;
+ }
+
+ /* init_mailbox_params */
+ hw->mbx.ops.init_params(hw);
+
+ /* Disable the interrupts for VF */
+ igbvf_intr_disable(hw);
+
+ diag = hw->mac.ops.reset_hw(hw);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
+ hw->mac.rar_entry_count, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC "
+ "addresses",
+ ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ return -ENOMEM;
+ }
+
+ /* Generate a random MAC address, if none was assigned by PF. */
+ if (is_zero_ether_addr(perm_addr)) {
+ eth_random_addr(perm_addr->addr_bytes);
+ PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
+ PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ perm_addr->addr_bytes[0],
+ perm_addr->addr_bytes[1],
+ perm_addr->addr_bytes[2],
+ perm_addr->addr_bytes[3],
+ perm_addr->addr_bytes[4],
+ perm_addr->addr_bytes[5]);
+ }
+
+ diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
+ if (diag) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ return diag;
+ }
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
+ "mac.type=%s",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id, "igb_mac_82576_vf");
+
+ intr_handle = &pci_dev->intr_handle;
+ rte_intr_callback_register(intr_handle,
+ eth_igbvf_interrupt_handler, eth_dev);
+
+ return 0;
+}
+
+static int
+eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (adapter->stopped == 0)
+ igbvf_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ eth_igbvf_interrupt_handler,
+ (void *)eth_dev);
+
+ return 0;
+}
+
+static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct e1000_adapter), eth_igb_dev_init);
+}
+
+static int eth_igb_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit);
+}
+
+static struct rte_pci_driver rte_igb_pmd = {
+ .id_table = pci_id_igb_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = eth_igb_pci_probe,
+ .remove = eth_igb_pci_remove,
+};
+
+
+static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct e1000_adapter), eth_igbvf_dev_init);
+}
+
+static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit);
+}
+
+/*
+ * virtual function driver struct
+ */
+static struct rte_pci_driver rte_igbvf_pmd = {
+ .id_table = pci_id_igbvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = eth_igbvf_pci_probe,
+ .remove = eth_igbvf_pci_remove,
+};
+
+static void
+igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
+ uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static int
+igb_check_mq_mode(struct rte_eth_dev *dev)
+{
+ enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_tx_queues;
+
+ if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
+ tx_mq_mode == ETH_MQ_TX_DCB ||
+ tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ PMD_INIT_LOG(ERR, "DCB mode is not supported.");
+ return -EINVAL;
+ }
+ if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+ /* Check multi-queue mode.
+ * To no break software we accept ETH_MQ_RX_NONE as this might
+ * be used to turn off VLAN filter.
+ */
+
+ if (rx_mq_mode == ETH_MQ_RX_NONE ||
+ rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+ } else {
+ /* Only support one queue on VFs.
+ * RSS together with SRIOV is not supported.
+ */
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " wrong mq_mode rx %d.",
+ rx_mq_mode);
+ return -EINVAL;
+ }
+ /* TX mode is not used here, so mode might be ignored.*/
+ if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+ /* SRIOV only works in VMDq enable mode */
+ PMD_INIT_LOG(WARNING, "SRIOV is active,"
+ " TX mode %d is not supported. "
+ " Driver will behave as %d mode.",
+ tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+ }
+
+ /* check valid queue number */
+ if ((nb_rx_q > 1) || (nb_tx_q > 1)) {
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " only support one queue on VFs.");
+ return -EINVAL;
+ }
+ } else {
+ /* To no break software that set invalid mode, only display
+ * warning if invalid mode is used.
+ */
+ if (rx_mq_mode != ETH_MQ_RX_NONE &&
+ rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
+ rx_mq_mode != ETH_MQ_RX_RSS) {
+ /* RSS together with VMDq not supported*/
+ PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
+ rx_mq_mode);
+ return -EINVAL;
+ }
+
+ if (tx_mq_mode != ETH_MQ_TX_NONE &&
+ tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+ PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
+ " Due to txmode is meaningless in this"
+ " driver, just ignore.",
+ tx_mq_mode);
+ }
+ }
+ return 0;
+}
+
+static int
+eth_igb_configure(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* multipe queue mode checking */
+ ret = igb_check_mq_mode(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
+ ret);
+ return ret;
+ }
+
+ intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+static void
+eth_igb_rxtx_control(struct rte_eth_dev *dev,
+ bool enable)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tctl, rctl;
+
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ if (enable) {
+ /* enable Tx/Rx */
+ tctl |= E1000_TCTL_EN;
+ rctl |= E1000_RCTL_EN;
+ } else {
+ /* disable Tx/Rx */
+ tctl &= ~E1000_TCTL_EN;
+ rctl &= ~E1000_RCTL_EN;
+ }
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+}
+
+static int
+eth_igb_start(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int ret, mask;
+ uint32_t intr_vector = 0;
+ uint32_t ctrl_ext;
+ uint32_t *speeds;
+ int num_speeds;
+ bool autoneg;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* Power up the phy. Needed to make the link go Up */
+ eth_igb_dev_set_link_up(dev);
+
+ /*
+ * Packet Buffer Allocation (PBA)
+ * Writing PBA sets the receive portion of the buffer
+ * the remainder is used for the transmit buffer.
+ */
+ if (hw->mac.type == e1000_82575) {
+ uint32_t pba;
+
+ pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
+ E1000_WRITE_REG(hw, E1000_PBA, pba);
+ }
+
+ /* Put the address into the Receive Address Array */
+ e1000_rar_set(hw, hw->mac.addr, 0);
+
+ /* Initialize the hardware */
+ if (igb_hardware_init(hw)) {
+ PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
+ return -EIO;
+ }
+ adapter->stopped = 0;
+
+ E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ /* configure PF module if SRIOV enabled */
+ igb_pf_host_configure(dev);
+
+ /* check and configure queue intr-vector mapping */
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* confiugre msix for rx interrupt */
+ eth_igb_configure_msix_intr(dev);
+
+ /* Configure for OS presence */
+ igb_init_manageability(hw);
+
+ eth_igb_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ ret = eth_igb_rx_init(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ igb_dev_clear_queues(dev);
+ return ret;
+ }
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ /*
+ * VLAN Offload Settings
+ */
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ ETH_VLAN_EXTEND_MASK;
+ ret = eth_igb_vlan_offload_set(dev, mask);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to set vlan offload");
+ igb_dev_clear_queues(dev);
+ return ret;
+ }
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ /* Enable VLAN filter since VMDq always use VLAN filter */
+ igb_vmdq_vlan_hw_filter_enable(dev);
+ }
+
+ if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
+ (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+ /* Configure EITR with the maximum possible value (0xFFFF) */
+ E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
+ }
+
+ /* Setup link speed and duplex */
+ speeds = &dev->data->dev_conf.link_speeds;
+ if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+ hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+ hw->mac.autoneg = 1;
+ } else {
+ num_speeds = 0;
+ autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+
+ /* Reset */
+ hw->phy.autoneg_advertised = 0;
+
+ if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
+ ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+ num_speeds = -1;
+ goto error_invalid_config;
+ }
+ if (*speeds & ETH_LINK_SPEED_10M_HD) {
+ hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_10M) {
+ hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_100M_HD) {
+ hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_100M) {
+ hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_1G) {
+ hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
+ num_speeds++;
+ }
+ if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
+ goto error_invalid_config;
+
+ /* Set/reset the mac.autoneg based on the link speed,
+ * fixed or not
+ */
+ if (!autoneg) {
+ hw->mac.autoneg = 0;
+ hw->mac.forced_speed_duplex =
+ hw->phy.autoneg_advertised;
+ } else {
+ hw->mac.autoneg = 1;
+ }
+ }
+
+ e1000_setup_link(hw);
+
+ if (rte_intr_allow_others(intr_handle)) {
+ /* check if lsc interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ eth_igb_lsc_interrupt_setup(dev, TRUE);
+ else
+ eth_igb_lsc_interrupt_setup(dev, FALSE);
+ } else {
+ rte_intr_callback_unregister(intr_handle,
+ eth_igb_interrupt_handler,
+ (void *)dev);
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex");
+ }
+
+ /* check if rxq interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+ rte_intr_dp_is_en(intr_handle))
+ eth_igb_rxq_interrupt_setup(dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ /* resume enabled intr since hw reset */
+ igb_intr_enable(dev);
+
+ /* restore all types filter */
+ igb_filter_restore(dev);
+
+ eth_igb_rxtx_control(dev, true);
+ eth_igb_link_update(dev, 0);
+
+ PMD_INIT_LOG(DEBUG, "<<");
+
+ return 0;
+
+error_invalid_config:
+ PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
+ dev->data->dev_conf.link_speeds, dev->data->port_id);
+ igb_dev_clear_queues(dev);
+ return -EINVAL;
+}
+
+/*********************************************************************
+ *
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC.
+ *
+ **********************************************************************/
+static void
+eth_igb_stop(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_eth_link link;
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ eth_igb_rxtx_control(dev, false);
+
+ igb_intr_disable(hw);
+
+ /* disable intr eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ igb_pf_reset_hw(hw);
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+ /* Set bit for Go Link disconnect */
+ if (hw->mac.type >= e1000_82580) {
+ uint32_t phpm_reg;
+
+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+ phpm_reg |= E1000_82580_PM_GO_LINKD;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ }
+
+ /* Power down the phy. Needed to make the link go Down */
+ eth_igb_dev_set_link_down(dev);
+
+ igb_dev_clear_queues(dev);
+
+ /* clear the recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(dev, &link);
+
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ eth_igb_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static int
+eth_igb_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->phy.media_type == e1000_media_type_copper)
+ e1000_power_up_phy(hw);
+ else
+ e1000_power_up_fiber_serdes_link(hw);
+
+ return 0;
+}
+
+static int
+eth_igb_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->phy.media_type == e1000_media_type_copper)
+ e1000_power_down_phy(hw);
+ else
+ e1000_shutdown_fiber_serdes_link(hw);
+
+ return 0;
+}
+
+static void
+eth_igb_close(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
+ struct rte_eth_link link;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ eth_igb_stop(dev);
+ adapter->stopped = 1;
+
+ e1000_phy_hw_reset(hw);
+ igb_release_manageability(hw);
+ igb_hw_control_release(hw);
+
+ /* Clear bit for Go Link disconnect */
+ if (hw->mac.type >= e1000_82580) {
+ uint32_t phpm_reg;
+
+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ }
+
+ igb_dev_free_queues(dev);
+
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+igb_get_rx_buffer_size(struct e1000_hw *hw)
+{
+ uint32_t rx_buf_size;
+ if (hw->mac.type == e1000_82576) {
+ rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
+ } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
+ /* PBS needs to be translated according to a lookup table */
+ rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
+ rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
+ rx_buf_size = (rx_buf_size << 10);
+ } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
+ rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
+ } else {
+ rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
+ }
+
+ return rx_buf_size;
+}
+
+/*********************************************************************
+ *
+ * Initialize the hardware
+ *
+ **********************************************************************/
+static int
+igb_hardware_init(struct e1000_hw *hw)
+{
+ uint32_t rx_buf_size;
+ int diag;
+
+ /* Let the firmware know the OS is in control */
+ igb_hw_control_acquire(hw);
+
+ /*
+ * These parameters control the automatic generation (Tx) and
+ * response (Rx) to Ethernet PAUSE frames.
+ * - High water mark should allow for at least two standard size (1518)
+ * frames to be received after sending an XOFF.
+ * - Low water mark works best when it is very near the high water mark.
+ * This allows the receiver to restart by sending XON when it has
+ * drained a bit. Here we use an arbitrary value of 1500 which will
+ * restart after one full frame is pulled from the buffer. There
+ * could be several smaller frames in the buffer and if so they will
+ * not trigger the XON until their total number reduces the buffer
+ * by 1500.
+ * - The pause time is fairly large at 1000 x 512ns = 512 usec.
+ */
+ rx_buf_size = igb_get_rx_buffer_size(hw);
+
+ hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
+ hw->fc.low_water = hw->fc.high_water - 1500;
+ hw->fc.pause_time = IGB_FC_PAUSE_TIME;
+ hw->fc.send_xon = 1;
+
+ /* Set Flow control, use the tunable location if sane */
+ if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
+ hw->fc.requested_mode = igb_fc_setting;
+ else
+ hw->fc.requested_mode = e1000_fc_none;
+
+ /* Issue a global reset */
+ igb_pf_reset_hw(hw);
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+ diag = e1000_init_hw(hw);
+ if (diag < 0)
+ return diag;
+
+ E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
+ e1000_get_phy_info(hw);
+ e1000_check_for_link(hw);
+
+ return 0;
+}
+
+/* This function is based on igb_update_stats_counters() in igb/if_igb.c */
+static void
+igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
+{
+ int pause_frames;
+
+ uint64_t old_gprc = stats->gprc;
+ uint64_t old_gptc = stats->gptc;
+ uint64_t old_tpr = stats->tpr;
+ uint64_t old_tpt = stats->tpt;
+ uint64_t old_rpthc = stats->rpthc;
+ uint64_t old_hgptc = stats->hgptc;
+
+ if(hw->phy.media_type == e1000_media_type_copper ||
+ (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ stats->symerrs +=
+ E1000_READ_REG(hw,E1000_SYMERRS);
+ stats->sec += E1000_READ_REG(hw, E1000_SEC);
+ }
+
+ stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
+ stats->mpc += E1000_READ_REG(hw, E1000_MPC);
+ stats->scc += E1000_READ_REG(hw, E1000_SCC);
+ stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
+
+ stats->mcc += E1000_READ_REG(hw, E1000_MCC);
+ stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
+ stats->colc += E1000_READ_REG(hw, E1000_COLC);
+ stats->dc += E1000_READ_REG(hw, E1000_DC);
+ stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
+ stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
+ stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
+ /*
+ ** For watchdog management we need to know if we have been
+ ** paused during the last interval, so capture that here.
+ */
+ pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
+ stats->xoffrxc += pause_frames;
+ stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
+ stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
+ stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
+ stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
+ stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
+ stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
+ stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
+ stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
+ stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
+ stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
+ stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
+ stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
+
+ /* For the 64-bit byte counters the low dword must be read first. */
+ /* Both registers clear on the read of the high dword */
+
+ /* Workaround CRC bytes included in size, take away 4 bytes/packet */
+ stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
+ stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
+ stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN;
+ stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
+ stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
+ stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN;
+
+ stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
+ stats->ruc += E1000_READ_REG(hw, E1000_RUC);
+ stats->rfc += E1000_READ_REG(hw, E1000_RFC);
+ stats->roc += E1000_READ_REG(hw, E1000_ROC);
+ stats->rjc += E1000_READ_REG(hw, E1000_RJC);
+
+ stats->tpr += E1000_READ_REG(hw, E1000_TPR);
+ stats->tpt += E1000_READ_REG(hw, E1000_TPT);
+
+ stats->tor += E1000_READ_REG(hw, E1000_TORL);
+ stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
+ stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN;
+ stats->tot += E1000_READ_REG(hw, E1000_TOTL);
+ stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
+ stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN;
+
+ stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
+ stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
+ stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
+ stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
+ stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
+ stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
+ stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
+ stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
+
+ /* Interrupt Counts */
+
+ stats->iac += E1000_READ_REG(hw, E1000_IAC);
+ stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
+ stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
+ stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
+ stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
+ stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
+ stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
+ stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+ stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
+
+ /* Host to Card Statistics */
+
+ stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
+ stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
+ stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
+ stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
+ stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
+ stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
+ stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
+ stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
+ stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
+ stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN;
+ stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
+ stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
+ stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN;
+ stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
+ stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
+ stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
+
+ stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
+ stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+ stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+ stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
+ stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
+ stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+}
+
+static int
+eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_hw_stats *stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ igb_read_stats_registers(hw, stats);
+
+ if (rte_stats == NULL)
+ return -EINVAL;
+
+ /* Rx Errors */
+ rte_stats->imissed = stats->mpc;
+ rte_stats->ierrors = stats->crcerrs +
+ stats->rlec + stats->ruc + stats->roc +
+ stats->rxerrc + stats->algnerrc + stats->cexterr;
+
+ /* Tx Errors */
+ rte_stats->oerrors = stats->ecol + stats->latecol;
+
+ rte_stats->ipackets = stats->gprc;
+ rte_stats->opackets = stats->gptc;
+ rte_stats->ibytes = stats->gorc;
+ rte_stats->obytes = stats->gotc;
+ return 0;
+}
+
+static void
+eth_igb_stats_reset(struct rte_eth_dev *dev)
+{
+ struct e1000_hw_stats *hw_stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* HW registers are cleared on read */
+ eth_igb_stats_get(dev, NULL);
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+}
+
+static void
+eth_igb_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct e1000_hw_stats *stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* HW registers are cleared on read */
+ eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS);
+
+ /* Reset software totals */
+ memset(stats, 0, sizeof(*stats));
+}
+
+static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int size)
+{
+ unsigned i;
+
+ if (xstats_names == NULL)
+ return IGB_NB_XSTATS;
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ for (i = 0; i < IGB_NB_XSTATS; i++) {
+ snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
+ "%s", rte_igb_stats_strings[i].name);
+ }
+
+ return IGB_NB_XSTATS;
+}
+
+static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+ unsigned int limit)
+{
+ unsigned int i;
+
+ if (!ids) {
+ if (xstats_names == NULL)
+ return IGB_NB_XSTATS;
+
+ for (i = 0; i < IGB_NB_XSTATS; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s", rte_igb_stats_strings[i].name);
+
+ return IGB_NB_XSTATS;
+
+ } else {
+ struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS];
+
+ eth_igb_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
+ IGB_NB_XSTATS);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= IGB_NB_XSTATS) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name,
+ xstats_names_copy[ids[i]].name);
+ }
+ return limit;
+ }
+}
+
+static int
+eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned n)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_hw_stats *hw_stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ unsigned i;
+
+ if (n < IGB_NB_XSTATS)
+ return IGB_NB_XSTATS;
+
+ igb_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!xstats)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < IGB_NB_XSTATS; i++) {
+ xstats[i].id = i;
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_igb_stats_strings[i].offset);
+ }
+
+ return IGB_NB_XSTATS;
+}
+
+static int
+eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ unsigned int i;
+
+ if (!ids) {
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_hw_stats *hw_stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ if (n < IGB_NB_XSTATS)
+ return IGB_NB_XSTATS;
+
+ igb_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!values)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < IGB_NB_XSTATS; i++)
+ values[i] = *(uint64_t *)(((char *)hw_stats) +
+ rte_igb_stats_strings[i].offset);
+
+ return IGB_NB_XSTATS;
+
+ } else {
+ uint64_t values_copy[IGB_NB_XSTATS];
+
+ eth_igb_xstats_get_by_id(dev, NULL, values_copy,
+ IGB_NB_XSTATS);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= IGB_NB_XSTATS) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return n;
+ }
+}
+
+static void
+igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
+{
+ /* Good Rx packets, include VF loopback */
+ UPDATE_VF_STAT(E1000_VFGPRC,
+ hw_stats->last_gprc, hw_stats->gprc);
+
+ /* Good Rx octets, include VF loopback */
+ UPDATE_VF_STAT(E1000_VFGORC,
+ hw_stats->last_gorc, hw_stats->gorc);
+
+ /* Good Tx packets, include VF loopback */
+ UPDATE_VF_STAT(E1000_VFGPTC,
+ hw_stats->last_gptc, hw_stats->gptc);
+
+ /* Good Tx octets, include VF loopback */
+ UPDATE_VF_STAT(E1000_VFGOTC,
+ hw_stats->last_gotc, hw_stats->gotc);
+
+ /* Rx Multicst packets */
+ UPDATE_VF_STAT(E1000_VFMPRC,
+ hw_stats->last_mprc, hw_stats->mprc);
+
+ /* Good Rx loopback packets */
+ UPDATE_VF_STAT(E1000_VFGPRLBC,
+ hw_stats->last_gprlbc, hw_stats->gprlbc);
+
+ /* Good Rx loopback octets */
+ UPDATE_VF_STAT(E1000_VFGORLBC,
+ hw_stats->last_gorlbc, hw_stats->gorlbc);
+
+ /* Good Tx loopback packets */
+ UPDATE_VF_STAT(E1000_VFGPTLBC,
+ hw_stats->last_gptlbc, hw_stats->gptlbc);
+
+ /* Good Tx loopback octets */
+ UPDATE_VF_STAT(E1000_VFGOTLBC,
+ hw_stats->last_gotlbc, hw_stats->gotlbc);
+}
+
+static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned limit)
+{
+ unsigned i;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < IGBVF_NB_XSTATS; i++) {
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name), "%s",
+ rte_igbvf_stats_strings[i].name);
+ }
+ return IGBVF_NB_XSTATS;
+}
+
+static int
+eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned n)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ unsigned i;
+
+ if (n < IGBVF_NB_XSTATS)
+ return IGBVF_NB_XSTATS;
+
+ igbvf_read_stats_registers(hw, hw_stats);
+
+ if (!xstats)
+ return 0;
+
+ for (i = 0; i < IGBVF_NB_XSTATS; i++) {
+ xstats[i].id = i;
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_igbvf_stats_strings[i].offset);
+ }
+
+ return IGBVF_NB_XSTATS;
+}
+
+static int
+eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ igbvf_read_stats_registers(hw, hw_stats);
+
+ if (rte_stats == NULL)
+ return -EINVAL;
+
+ rte_stats->ipackets = hw_stats->gprc;
+ rte_stats->ibytes = hw_stats->gorc;
+ rte_stats->opackets = hw_stats->gptc;
+ rte_stats->obytes = hw_stats->gotc;
+ return 0;
+}
+
+static void
+eth_igbvf_stats_reset(struct rte_eth_dev *dev)
+{
+ struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* Sync HW register to the last stats */
+ eth_igbvf_stats_get(dev, NULL);
+
+ /* reset HW current stats*/
+ memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
+ offsetof(struct e1000_vf_stats, gprc));
+}
+
+static int
+eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+ size_t fw_size)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_fw_version fw;
+ int ret;
+
+ e1000_get_fw_version(hw, &fw);
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (!(e1000_get_flash_presence_i210(hw))) {
+ ret = snprintf(fw_version, fw_size,
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
+ break;
+ }
+ /* fall through */
+ default:
+ /* if option rom is valid, display its version too */
+ if (fw.or_valid) {
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
+ /* no option rom */
+ } else {
+ if (fw.etrack_id != 0X0000) {
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor,
+ fw.etrack_id);
+ } else {
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor,
+ fw.eep_build);
+ }
+ }
+ break;
+ }
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static void
+eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
+ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
+ dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+ dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ dev_info->max_rx_queues = 4;
+ dev_info->max_tx_queues = 4;
+ dev_info->max_vmdq_pools = 0;
+ break;
+
+ case e1000_82576:
+ dev_info->max_rx_queues = 16;
+ dev_info->max_tx_queues = 16;
+ dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->vmdq_queue_num = 16;
+ break;
+
+ case e1000_82580:
+ dev_info->max_rx_queues = 8;
+ dev_info->max_tx_queues = 8;
+ dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->vmdq_queue_num = 8;
+ break;
+
+ case e1000_i350:
+ dev_info->max_rx_queues = 8;
+ dev_info->max_tx_queues = 8;
+ dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->vmdq_queue_num = 8;
+ break;
+
+ case e1000_i354:
+ dev_info->max_rx_queues = 8;
+ dev_info->max_tx_queues = 8;
+ break;
+
+ case e1000_i210:
+ dev_info->max_rx_queues = 4;
+ dev_info->max_tx_queues = 4;
+ dev_info->max_vmdq_pools = 0;
+ break;
+
+ case e1000_i211:
+ dev_info->max_rx_queues = 2;
+ dev_info->max_tx_queues = 2;
+ dev_info->max_vmdq_pools = 0;
+ break;
+
+ default:
+ /* Should not happen */
+ break;
+ }
+ dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = IGB_DEFAULT_RX_PTHRESH,
+ .hthresh = IGB_DEFAULT_RX_HTHRESH,
+ .wthresh = IGB_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = IGB_DEFAULT_TX_PTHRESH,
+ .hthresh = IGB_DEFAULT_TX_HTHRESH,
+ .wthresh = IGB_DEFAULT_TX_WTHRESH,
+ },
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
+ ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G;
+}
+
+static const uint32_t *
+eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to igb_rxd_pkt_info_to_pkt_type() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_TUNNEL_IP,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == eth_igb_recv_pkts ||
+ dev->rx_pkt_burst == eth_igb_recv_scattered_pkts)
+ return ptypes;
+ return NULL;
+}
+
+static void
+eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
+ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
+ dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ switch (hw->mac.type) {
+ case e1000_vfadapt:
+ dev_info->max_rx_queues = 2;
+ dev_info->max_tx_queues = 2;
+ break;
+ case e1000_vfadapt_i350:
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = 1;
+ break;
+ default:
+ /* Should not happen */
+ break;
+ }
+
+ dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = IGB_DEFAULT_RX_PTHRESH,
+ .hthresh = IGB_DEFAULT_RX_HTHRESH,
+ .wthresh = IGB_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = IGB_DEFAULT_TX_PTHRESH,
+ .hthresh = IGB_DEFAULT_TX_HTHRESH,
+ .wthresh = IGB_DEFAULT_TX_WTHRESH,
+ },
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link;
+ int link_check, count;
+
+ link_check = 0;
+ hw->mac.get_link_status = 1;
+
+ /* possible wait-to-complete in up to 9 seconds */
+ for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
+ /* Read the real link status */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ /* Do the work to read phy */
+ e1000_check_for_link(hw);
+ link_check = !hw->mac.get_link_status;
+ break;
+
+ case e1000_media_type_fiber:
+ e1000_check_for_link(hw);
+ link_check = (E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STATUS_LU);
+ break;
+
+ case e1000_media_type_internal_serdes:
+ e1000_check_for_link(hw);
+ link_check = hw->mac.serdes_has_link;
+ break;
+
+ /* VF device is type_unknown */
+ case e1000_media_type_unknown:
+ eth_igbvf_link_update(hw);
+ link_check = !hw->mac.get_link_status;
+ break;
+
+ default:
+ break;
+ }
+ if (link_check || wait_to_complete == 0)
+ break;
+ rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
+ }
+ memset(&link, 0, sizeof(link));
+
+ /* Now we check if a transition has happened */
+ if (link_check) {
+ uint16_t duplex, speed;
+ hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
+ link.link_duplex = (duplex == FULL_DUPLEX) ?
+ ETH_LINK_FULL_DUPLEX :
+ ETH_LINK_HALF_DUPLEX;
+ link.link_speed = speed;
+ link.link_status = ETH_LINK_UP;
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ } else if (!link_check) {
+ link.link_speed = 0;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_status = ETH_LINK_DOWN;
+ link.link_autoneg = ETH_LINK_FIXED;
+ }
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+/*
+ * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means
+ * that the driver is loaded.
+ */
+static void
+igb_hw_control_acquire(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
+ */
+static void
+igb_hw_control_release(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext;
+
+ /* Let firmware taken over control of h/w */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * Bit of a misnomer, what this really means is
+ * to enable OS management of the system... aka
+ * to disable special hardware management features.
+ */
+static void
+igb_init_manageability(struct e1000_hw *hw)
+{
+ if (e1000_enable_mng_pass_thru(hw)) {
+ uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
+ uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
+
+ /* disable hardware interception of ARP */
+ manc &= ~(E1000_MANC_ARP_EN);
+
+ /* enable receiving management packets to the host */
+ manc |= E1000_MANC_EN_MNG2HOST;
+ manc2h |= 1 << 5; /* Mng Port 623 */
+ manc2h |= 1 << 6; /* Mng Port 664 */
+ E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+ }
+}
+
+static void
+igb_release_manageability(struct e1000_hw *hw)
+{
+ if (e1000_enable_mng_pass_thru(hw)) {
+ uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
+
+ manc |= E1000_MANC_ARP_EN;
+ manc &= ~E1000_MANC_EN_MNG2HOST;
+
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+ }
+}
+
+static void
+eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= (~E1000_RCTL_UPE);
+ if (dev->data->all_multicast == 1)
+ rctl |= E1000_RCTL_MPE;
+ else
+ rctl &= (~E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_MPE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ if (dev->data->promiscuous == 1)
+ return; /* must remain in all_multicast mode */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= (~E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static int
+eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vfta;
+ uint32_t vid_idx;
+ uint32_t vid_bit;
+
+ vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK);
+ vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
+ if (on)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
+
+ /* update local VFTA copy */
+ shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static int
+eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg, qinq;
+
+ qinq = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ qinq &= E1000_CTRL_EXT_EXT_VLAN;
+
+ /* only outer TPID of double VLAN can be configured*/
+ if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
+ reg = E1000_READ_REG(hw, E1000_VET);
+ reg = (reg & (~E1000_VET_VET_EXT)) |
+ ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
+ E1000_WRITE_REG(hw, E1000_VET, reg);
+
+ return 0;
+ }
+
+ /* all other TPID values are read-only*/
+ PMD_DRV_LOG(ERR, "Not supported");
+
+ return -ENOTSUP;
+}
+
+static void
+igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* Filter Table Disable */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg &= ~E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+}
+
+static void
+igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t reg;
+ int i;
+
+ /* Filter Table Enable, CFI not used for packet acceptance */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg |= E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+
+ /* restore VFTA table */
+ for (i = 0; i < IGB_VFTA_SIZE; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
+}
+
+static void
+igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* VLAN Mode Disable */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg &= ~E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+}
+
+static void
+igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* VLAN Mode Enable */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg |= E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+}
+
+static void
+igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* CTRL_EXT: Extended VLAN */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* Update maximum packet length */
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ VLAN_TAG_SIZE);
+}
+
+static void
+igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* CTRL_EXT: Extended VLAN */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg |= E1000_CTRL_EXT_EXTEND_VLAN;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* Update maximum packet length */
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * VLAN_TAG_SIZE);
+}
+
+static int
+eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+
+ rxmode = &dev->data->dev_conf.rxmode;
+ if(mask & ETH_VLAN_STRIP_MASK){
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ igb_vlan_hw_strip_enable(dev);
+ else
+ igb_vlan_hw_strip_disable(dev);
+ }
+
+ if(mask & ETH_VLAN_FILTER_MASK){
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ igb_vlan_hw_filter_enable(dev);
+ else
+ igb_vlan_hw_filter_disable(dev);
+ }
+
+ if(mask & ETH_VLAN_EXTEND_MASK){
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ igb_vlan_hw_extend_enable(dev);
+ else
+ igb_vlan_hw_extend_disable(dev);
+ }
+
+ return 0;
+}
+
+
+/**
+ * It enables the interrupt mask and then enable the interrupt.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (on)
+ intr->mask |= E1000_ICR_LSC;
+ else
+ intr->mask &= ~E1000_ICR_LSC;
+
+ return 0;
+}
+
+/* It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+ uint32_t mask, regval;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_dev_info dev_info;
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ eth_igb_infos_get(dev, &dev_info);
+
+ mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
+ regval = E1000_READ_REG(hw, E1000_EIMS);
+ E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
+
+ return 0;
+}
+
+/*
+ * It reads ICR and gets interrupt causes, check it and set a bit flag
+ * to update link status.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t icr;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ igb_intr_disable(hw);
+
+ /* read-on-clear nic registers here */
+ icr = E1000_READ_REG(hw, E1000_ICR);
+
+ intr->flags = 0;
+ if (icr & E1000_ICR_LSC) {
+ intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+ }
+
+ if (icr & E1000_ICR_VMMB)
+ intr->flags |= E1000_FLAG_MAILBOX;
+
+ return 0;
+}
+
+/*
+ * It executes link_update after knowing an interrupt is prsent.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_eth_link link;
+ int ret;
+
+ if (intr->flags & E1000_FLAG_MAILBOX) {
+ igb_pf_mbx_process(dev);
+ intr->flags &= ~E1000_FLAG_MAILBOX;
+ }
+
+ igb_intr_enable(dev);
+ rte_intr_enable(intr_handle);
+
+ if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
+ intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
+
+ /* set get_link_status to check register later */
+ hw->mac.get_link_status = 1;
+ ret = eth_igb_link_update(dev, 0);
+
+ /* check if link has changed */
+ if (ret < 0)
+ return 0;
+
+ rte_eth_linkstatus_get(dev, &link);
+ if (link.link_status) {
+ PMD_INIT_LOG(INFO,
+ " Port %d: Link Up - speed %u Mbps - %s",
+ dev->data->port_id,
+ (unsigned)link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ } else {
+ PMD_INIT_LOG(INFO, " Port %d: Link Down",
+ dev->data->port_id);
+ }
+
+ PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+
+ return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered at first.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+eth_igb_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ eth_igb_interrupt_get_status(dev);
+ eth_igb_interrupt_action(dev, dev->intr_handle);
+}
+
+static int
+eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t eicr;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ igbvf_intr_disable(hw);
+
+ /* read-on-clear nic registers here */
+ eicr = E1000_READ_REG(hw, E1000_EICR);
+ intr->flags = 0;
+
+ if (eicr == E1000_VTIVAR_MISC_MAILBOX)
+ intr->flags |= E1000_FLAG_MAILBOX;
+
+ return 0;
+}
+
+void igbvf_mbx_process(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 in_msg = 0;
+
+ /* peek the message first */
+ in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0));
+
+ /* PF reset VF event */
+ if (in_msg == E1000_PF_CONTROL_MSG) {
+ /* dummy mbx read to ack pf */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ return;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ }
+}
+
+static int
+eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (intr->flags & E1000_FLAG_MAILBOX) {
+ igbvf_mbx_process(dev);
+ intr->flags &= ~E1000_FLAG_MAILBOX;
+ }
+
+ igbvf_intr_enable(dev);
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static void
+eth_igbvf_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ eth_igbvf_interrupt_get_status(dev);
+ eth_igbvf_interrupt_action(dev, dev->intr_handle);
+}
+
+static int
+eth_igb_led_on(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
+}
+
+static int
+eth_igb_led_off(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
+}
+
+static int
+eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct e1000_hw *hw;
+ uint32_t ctrl;
+ int tx_pause;
+ int rx_pause;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ fc_conf->pause_time = hw->fc.pause_time;
+ fc_conf->high_water = hw->fc.high_water;
+ fc_conf->low_water = hw->fc.low_water;
+ fc_conf->send_xon = hw->fc.send_xon;
+ fc_conf->autoneg = hw->mac.autoneg;
+
+ /*
+ * Return rx_pause and tx_pause status according to actual setting of
+ * the TFCE and RFCE bits in the CTRL register.
+ */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ if (ctrl & E1000_CTRL_TFCE)
+ tx_pause = 1;
+ else
+ tx_pause = 0;
+
+ if (ctrl & E1000_CTRL_RFCE)
+ rx_pause = 1;
+ else
+ rx_pause = 0;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static int
+eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct e1000_hw *hw;
+ int err;
+ enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
+ e1000_fc_none,
+ e1000_fc_rx_pause,
+ e1000_fc_tx_pause,
+ e1000_fc_full
+ };
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint32_t rctl;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (fc_conf->autoneg != hw->mac.autoneg)
+ return -ENOTSUP;
+ rx_buf_size = igb_get_rx_buffer_size(hw);
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+ /* At least reserve one Ethernet frame for watermark */
+ max_high_water = rx_buf_size - ETHER_MAX_LEN;
+ if ((fc_conf->high_water > max_high_water) ||
+ (fc_conf->high_water < fc_conf->low_water)) {
+ PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
+ PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
+ return -EINVAL;
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
+ hw->fc.pause_time = fc_conf->pause_time;
+ hw->fc.high_water = fc_conf->high_water;
+ hw->fc.low_water = fc_conf->low_water;
+ hw->fc.send_xon = fc_conf->send_xon;
+
+ err = e1000_setup_link_generic(hw);
+ if (err == E1000_SUCCESS) {
+
+ /* check if we want to forward MAC frames - driver doesn't have native
+ * capability to do that, so we'll write the registers ourselves */
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* set or clear MFLCN.PMCF bit depending on configuration */
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ rctl |= E1000_RCTL_PMCF;
+ else
+ rctl &= ~E1000_RCTL_PMCF;
+
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+ }
+
+ PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
+ return -EIO;
+}
+
+#define E1000_RAH_POOLSEL_SHIFT (18)
+static int
+eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rah;
+
+ e1000_rar_set(hw, mac_addr->addr_bytes, index);
+ rah = E1000_READ_REG(hw, E1000_RAH(index));
+ rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
+ E1000_WRITE_REG(hw, E1000_RAH(index), rah);
+ return 0;
+}
+
+static void
+eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
+{
+ uint8_t addr[ETHER_ADDR_LEN];
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ memset(addr, 0, sizeof(addr));
+
+ e1000_rar_set(hw, addr, index);
+}
+
+static int
+eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ eth_igb_rar_clear(dev, 0);
+ eth_igb_rar_set(dev, (void *)addr, 0, 0);
+
+ return 0;
+}
+/*
+ * Virtual Function operations
+ */
+static void
+igbvf_intr_disable(struct e1000_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
+
+ E1000_WRITE_FLUSH(hw);
+}
+
+static void
+igbvf_stop_adapter(struct rte_eth_dev *dev)
+{
+ u32 reg_val;
+ u16 i;
+ struct rte_eth_dev_info dev_info;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ eth_igbvf_infos_get(dev, &dev_info);
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ igbvf_intr_disable(hw);
+
+ /* Clear any pending interrupts, flush previous writes */
+ E1000_READ_REG(hw, E1000_EICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < dev_info.max_tx_queues; i++)
+ E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < dev_info.max_rx_queues; i++) {
+ reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
+ reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
+ while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
+ ;
+ }
+
+ /* flush all queues disables */
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(2);
+}
+
+static int eth_igbvf_link_update(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ struct e1000_mac_info *mac = &hw->mac;
+ int ret_val = E1000_SUCCESS;
+
+ PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
+
+ /*
+ * We only want to run this if there has been a rst asserted.
+ * in this case that could mean a link change, device reset,
+ * or a virtual function reset
+ */
+
+ /* If we were hit with a reset or timeout drop the link */
+ if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = TRUE;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
+ goto out;
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link */
+ mac->get_link_status = FALSE;
+
+out:
+ return ret_val;
+}
+
+
+static int
+igbvf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf* conf = &dev->data->dev_conf;
+
+ PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+ dev->data->port_id);
+
+ /*
+ * VF has no ability to enable/disable HW CRC
+ * Keep the persistent behavior the same as Host PF
+ */
+#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
+ if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
+ PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ }
+#else
+ if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
+ PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
+ }
+#endif
+
+ return 0;
+}
+
+static int
+igbvf_dev_start(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int ret;
+ uint32_t intr_vector = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->mac.ops.reset_hw(hw);
+ adapter->stopped = 0;
+
+ /* Set all vfta */
+ igbvf_set_vfta_all(dev,1);
+
+ eth_igbvf_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ ret = eth_igbvf_rx_init(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ igb_dev_clear_queues(dev);
+ return ret;
+ }
+
+ /* check and configure queue intr-vector mapping */
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
+ intr_vector = dev->data->nb_rx_queues;
+ ret = rte_intr_efd_enable(intr_handle, intr_vector);
+ if (ret)
+ return ret;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (!intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ eth_igbvf_configure_msix_intr(dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ /* resume enabled intr since hw reset */
+ igbvf_intr_enable(dev);
+
+ return 0;
+}
+
+static void
+igbvf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ igbvf_stop_adapter(dev);
+
+ /*
+ * Clear what we set, but we still keep shadow_vfta to
+ * restore after device starts
+ */
+ igbvf_set_vfta_all(dev,0);
+
+ igb_dev_clear_queues(dev);
+
+ /* disable intr eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static void
+igbvf_dev_close(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
+ struct ether_addr addr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ e1000_reset_hw(hw);
+
+ igbvf_dev_stop(dev);
+ adapter->stopped = 1;
+ igb_dev_free_queues(dev);
+
+ /**
+ * reprogram the RAR with a zero mac address,
+ * to ensure that the VF traffic goes to the PF
+ * after stop, close and detach of the VF.
+ **/
+
+ memset(&addr, 0, sizeof(addr));
+ igbvf_default_mac_addr_set(dev, &addr);
+}
+
+static void
+igbvf_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Set both unicast and multicast promisc */
+ e1000_promisc_set_vf(hw, e1000_promisc_enabled);
+}
+
+static void
+igbvf_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* If in allmulticast mode leave multicast promisc */
+ if (dev->data->all_multicast == 1)
+ e1000_promisc_set_vf(hw, e1000_promisc_multicast);
+ else
+ e1000_promisc_set_vf(hw, e1000_promisc_disabled);
+}
+
+static void
+igbvf_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* In promiscuous mode multicast promisc already set */
+ if (dev->data->promiscuous == 0)
+ e1000_promisc_set_vf(hw, e1000_promisc_multicast);
+}
+
+static void
+igbvf_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* In promiscuous mode leave multicast promisc enabled */
+ if (dev->data->promiscuous == 0)
+ e1000_promisc_set_vf(hw, e1000_promisc_disabled);
+}
+
+static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ uint32_t msgbuf[2];
+ s32 err;
+
+ /* After set vlan, vlan strip will also be enabled in igb driver*/
+ msgbuf[0] = E1000_VF_SET_VLAN;
+ msgbuf[1] = vid;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ if (on)
+ msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
+
+ err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
+ if (err)
+ goto mbx_err;
+
+ err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
+ if (err)
+ goto mbx_err;
+
+ msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
+ if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))
+ err = -EINVAL;
+
+mbx_err:
+ return err;
+}
+
+static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ int i = 0, j = 0, vfta = 0, mask = 1;
+
+ for (i = 0; i < IGB_VFTA_SIZE; i++){
+ vfta = shadow_vfta->vfta[i];
+ if(vfta){
+ mask = 1;
+ for (j = 0; j < 32; j++){
+ if(vfta & mask)
+ igbvf_set_vfta(hw,
+ (uint16_t)((i<<5)+j), on);
+ mask<<=1;
+ }
+ }
+ }
+
+}
+
+static int
+igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vid_idx = 0;
+ uint32_t vid_bit = 0;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
+ ret = igbvf_set_vfta(hw, vlan_id, !!on);
+ if(ret){
+ PMD_INIT_LOG(ERR, "Unable to set VF vlan");
+ return ret;
+ }
+ vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
+ vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
+
+ /*Save what we set and retore it after device reset*/
+ if (on)
+ shadow_vfta->vfta[vid_idx] |= vid_bit;
+ else
+ shadow_vfta->vfta[vid_idx] &= ~vid_bit;
+
+ return 0;
+}
+
+static int
+igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* index is not used by rar_set() */
+ hw->mac.ops.rar_set(hw, (void *)addr, 0);
+ return 0;
+}
+
+
+static int
+eth_igb_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint8_t i, j, mask;
+ uint32_t reta, r;
+ uint16_t idx, shift;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IGB_4_BIT_MASK);
+ if (!mask)
+ continue;
+ if (mask == IGB_4_BIT_MASK)
+ r = 0;
+ else
+ r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
+ for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ reta |= reta_conf[idx].reta[shift + j] <<
+ (CHAR_BIT * j);
+ else
+ reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
+ }
+ E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
+ }
+
+ return 0;
+}
+
+static int
+eth_igb_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint8_t i, j, mask;
+ uint32_t reta;
+ uint16_t idx, shift;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IGB_4_BIT_MASK);
+ if (!mask)
+ continue;
+ reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
+ for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ reta_conf[idx].reta[shift + j] =
+ ((reta >> (CHAR_BIT * j)) &
+ IGB_8_BIT_MASK);
+ }
+ }
+
+ return 0;
+}
+
+int
+eth_igb_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t synqf, rfctl;
+
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
+
+ if (add) {
+ if (synqf & E1000_SYN_FILTER_ENABLE)
+ return -EINVAL;
+
+ synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
+ E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
+
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ if (filter->hig_pri)
+ rfctl |= E1000_RFCTL_SYNQFP;
+ else
+ rfctl &= ~E1000_RFCTL_SYNQFP;
+
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+ } else {
+ if (!(synqf & E1000_SYN_FILTER_ENABLE))
+ return -ENOENT;
+ synqf = 0;
+ }
+
+ filter_info->syn_info = synqf;
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
+ E1000_WRITE_FLUSH(hw);
+ return 0;
+}
+
+static int
+eth_igb_syn_filter_get(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf, rfctl;
+
+ synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
+ if (synqf & E1000_SYN_FILTER_ENABLE) {
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
+ filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
+ E1000_SYN_FILTER_QUEUE_SHIFT);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int
+eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = eth_igb_syn_filter_set(dev,
+ (struct rte_eth_syn_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = eth_igb_syn_filter_set(dev,
+ (struct rte_eth_syn_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = eth_igb_syn_filter_get(dev,
+ (struct rte_eth_syn_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
+static inline int
+ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
+ struct e1000_2tuple_filter_info *filter_info)
+{
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+ if (filter->priority > E1000_2TUPLE_MAX_PRI)
+ return -EINVAL; /* filter index is out of range. */
+ if (filter->tcp_flags > TCP_FLAG_ALL)
+ return -EINVAL; /* flags is invalid. */
+
+ switch (filter->dst_port_mask) {
+ case UINT16_MAX:
+ filter_info->dst_port_mask = 0;
+ filter_info->dst_port = filter->dst_port;
+ break;
+ case 0:
+ filter_info->dst_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->proto_mask) {
+ case UINT8_MAX:
+ filter_info->proto_mask = 0;
+ filter_info->proto = filter->proto;
+ break;
+ case 0:
+ filter_info->proto_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ filter_info->priority = (uint8_t)filter->priority;
+ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
+ filter_info->tcp_flags = filter->tcp_flags;
+ else
+ filter_info->tcp_flags = 0;
+
+ return 0;
+}
+
+static inline struct e1000_2tuple_filter *
+igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
+ struct e1000_2tuple_filter_info *key)
+{
+ struct e1000_2tuple_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct e1000_2tuple_filter_info)) == 0) {
+ return it;
+ }
+ }
+ return NULL;
+}
+
+/* inject a igb 2tuple filter to HW */
+static inline void
+igb_inject_2uple_filter(struct rte_eth_dev *dev,
+ struct e1000_2tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
+ uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
+ int i;
+
+ i = filter->index;
+ imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
+ if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+
+ imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ ttqf |= E1000_TTQF_QUEUE_ENABLE;
+ ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
+ ttqf |= (uint32_t)(filter->filter_info.proto &
+ E1000_TTQF_PROTOCOL_MASK);
+ if (filter->filter_info.proto_mask == 0)
+ ttqf &= ~E1000_TTQF_MASK_ENABLE;
+
+ /* tcp flags bits setting. */
+ if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
+ if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_URG;
+ if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_ACK;
+ if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_PSH;
+ if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_RST;
+ if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_SYN;
+ if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_FIN;
+ } else {
+ imir_ext |= E1000_IMIREXT_CTRL_BP;
+ }
+ E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
+ E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+}
+
+/*
+ * igb_add_2tuple_filter - add a 2tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: ponter to the filter that will be added.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+igb_add_2tuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_2tuple_filter *filter;
+ int i, ret;
+
+ filter = rte_zmalloc("e1000_2tuple_filter",
+ sizeof(struct e1000_2tuple_filter), 0);
+ if (filter == NULL)
+ return -ENOMEM;
+
+ ret = ntuple_filter_to_2tuple(ntuple_filter,
+ &filter->filter_info);
+ if (ret < 0) {
+ rte_free(filter);
+ return ret;
+ }
+ if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
+ &filter->filter_info) != NULL) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ rte_free(filter);
+ return -EEXIST;
+ }
+ filter->queue = ntuple_filter->queue;
+
+ /*
+ * look for an unused 2tuple filter index,
+ * and insert the filter to list.
+ */
+ for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
+ if (!(filter_info->twotuple_mask & (1 << i))) {
+ filter_info->twotuple_mask |= 1 << i;
+ filter->index = i;
+ TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
+ filter,
+ entries);
+ break;
+ }
+ }
+ if (i >= E1000_MAX_TTQF_FILTERS) {
+ PMD_DRV_LOG(ERR, "2tuple filters are full.");
+ rte_free(filter);
+ return -ENOSYS;
+ }
+
+ igb_inject_2uple_filter(dev, filter);
+ return 0;
+}
+
+int
+igb_delete_2tuple_filter(struct rte_eth_dev *dev,
+ struct e1000_2tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ filter_info->twotuple_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
+ rte_free(filter);
+
+ E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
+ E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
+ return 0;
+}
+
+/*
+ * igb_remove_2tuple_filter - remove a 2tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: ponter to the filter that will be removed.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+igb_remove_2tuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_2tuple_filter_info filter_2tuple;
+ struct e1000_2tuple_filter *filter;
+ int ret;
+
+ memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
+ ret = ntuple_filter_to_2tuple(ntuple_filter,
+ &filter_2tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
+ &filter_2tuple);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ igb_delete_2tuple_filter(dev, filter);
+
+ return 0;
+}
+
+/* inject a igb flex filter to HW */
+static inline void
+igb_inject_flex_filter(struct rte_eth_dev *dev,
+ struct e1000_flex_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, queueing;
+ uint32_t reg_off;
+ uint8_t i, j = 0;
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ if (filter->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(filter->index);
+ else
+ reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
+
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
+ (E1000_WUFC_FLX0 << filter->index));
+ queueing = filter->filter_info.len |
+ (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
+ (filter->filter_info.priority <<
+ E1000_FHFT_QUEUEING_PRIO_SHIFT);
+ E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
+ queueing);
+
+ for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
+ E1000_WRITE_REG(hw, reg_off,
+ filter->filter_info.dwords[j]);
+ reg_off += sizeof(uint32_t);
+ E1000_WRITE_REG(hw, reg_off,
+ filter->filter_info.dwords[++j]);
+ reg_off += sizeof(uint32_t);
+ E1000_WRITE_REG(hw, reg_off,
+ (uint32_t)filter->filter_info.mask[i]);
+ reg_off += sizeof(uint32_t) * 2;
+ ++j;
+ }
+}
+
+static inline struct e1000_flex_filter *
+eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
+ struct e1000_flex_filter_info *key)
+{
+ struct e1000_flex_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct e1000_flex_filter_info)) == 0)
+ return it;
+ }
+
+ return NULL;
+}
+
+/* remove a flex byte filter
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+void
+igb_remove_flex_filter(struct rte_eth_dev *dev,
+ struct e1000_flex_filter *filter)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, i;
+ uint32_t reg_off;
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ if (filter->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(filter->index);
+ else
+ reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
+
+ for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
+ E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
+
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc &
+ (~(E1000_WUFC_FLX0 << filter->index)));
+
+ filter_info->flex_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->flex_list, filter, entries);
+ rte_free(filter);
+}
+
+int
+eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
+ struct rte_eth_flex_filter *filter,
+ bool add)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_flex_filter *flex_filter, *it;
+ uint32_t mask;
+ uint8_t shift, i;
+
+ flex_filter = rte_zmalloc("e1000_flex_filter",
+ sizeof(struct e1000_flex_filter), 0);
+ if (flex_filter == NULL)
+ return -ENOMEM;
+
+ flex_filter->filter_info.len = filter->len;
+ flex_filter->filter_info.priority = filter->priority;
+ memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
+ for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
+ mask = 0;
+ /* reverse bits in flex filter's mask*/
+ for (shift = 0; shift < CHAR_BIT; shift++) {
+ if (filter->mask[i] & (0x01 << shift))
+ mask |= (0x80 >> shift);
+ }
+ flex_filter->filter_info.mask[i] = mask;
+ }
+
+ it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
+ &flex_filter->filter_info);
+ if (it == NULL && !add) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ rte_free(flex_filter);
+ return -ENOENT;
+ }
+ if (it != NULL && add) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ rte_free(flex_filter);
+ return -EEXIST;
+ }
+
+ if (add) {
+ flex_filter->queue = filter->queue;
+ /*
+ * look for an unused flex filter index
+ * and insert the filter into the list.
+ */
+ for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
+ if (!(filter_info->flex_mask & (1 << i))) {
+ filter_info->flex_mask |= 1 << i;
+ flex_filter->index = i;
+ TAILQ_INSERT_TAIL(&filter_info->flex_list,
+ flex_filter,
+ entries);
+ break;
+ }
+ }
+ if (i >= E1000_MAX_FLEX_FILTERS) {
+ PMD_DRV_LOG(ERR, "flex filters are full.");
+ rte_free(flex_filter);
+ return -ENOSYS;
+ }
+
+ igb_inject_flex_filter(dev, flex_filter);
+
+ } else {
+ igb_remove_flex_filter(dev, it);
+ rte_free(flex_filter);
+ }
+
+ return 0;
+}
+
+static int
+eth_igb_get_flex_filter(struct rte_eth_dev *dev,
+ struct rte_eth_flex_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_flex_filter flex_filter, *it;
+ uint32_t wufc, queueing, wufc_en = 0;
+
+ memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
+ flex_filter.filter_info.len = filter->len;
+ flex_filter.filter_info.priority = filter->priority;
+ memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
+ memcpy(flex_filter.filter_info.mask, filter->mask,
+ RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT);
+
+ it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
+ &flex_filter.filter_info);
+ if (it == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
+
+ if ((wufc & wufc_en) == wufc_en) {
+ uint32_t reg_off = 0;
+ if (it->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(it->index);
+ else
+ reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
+
+ queueing = E1000_READ_REG(hw,
+ reg_off + E1000_FHFT_QUEUEING_OFFSET);
+ filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
+ filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
+ E1000_FHFT_QUEUEING_PRIO_SHIFT;
+ filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
+ E1000_FHFT_QUEUEING_QUEUE_SHIFT;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static int
+eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_flex_filter *filter;
+ int ret = 0;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return ret;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+ filter_op);
+ return -EINVAL;
+ }
+
+ filter = (struct rte_eth_flex_filter *)arg;
+ if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
+ || filter->len % sizeof(uint64_t) != 0) {
+ PMD_DRV_LOG(ERR, "filter's length is out of range");
+ return -EINVAL;
+ }
+ if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
+ PMD_DRV_LOG(ERR, "filter's priority is out of range");
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = eth_igb_get_flex_filter(dev, filter);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
+static inline int
+ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
+ struct e1000_5tuple_filter_info *filter_info)
+{
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
+ return -EINVAL;
+ if (filter->priority > E1000_2TUPLE_MAX_PRI)
+ return -EINVAL; /* filter index is out of range. */
+ if (filter->tcp_flags > TCP_FLAG_ALL)
+ return -EINVAL; /* flags is invalid. */
+
+ switch (filter->dst_ip_mask) {
+ case UINT32_MAX:
+ filter_info->dst_ip_mask = 0;
+ filter_info->dst_ip = filter->dst_ip;
+ break;
+ case 0:
+ filter_info->dst_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_ip_mask) {
+ case UINT32_MAX:
+ filter_info->src_ip_mask = 0;
+ filter_info->src_ip = filter->src_ip;
+ break;
+ case 0:
+ filter_info->src_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->dst_port_mask) {
+ case UINT16_MAX:
+ filter_info->dst_port_mask = 0;
+ filter_info->dst_port = filter->dst_port;
+ break;
+ case 0:
+ filter_info->dst_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_port_mask) {
+ case UINT16_MAX:
+ filter_info->src_port_mask = 0;
+ filter_info->src_port = filter->src_port;
+ break;
+ case 0:
+ filter_info->src_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->proto_mask) {
+ case UINT8_MAX:
+ filter_info->proto_mask = 0;
+ filter_info->proto = filter->proto;
+ break;
+ case 0:
+ filter_info->proto_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ filter_info->priority = (uint8_t)filter->priority;
+ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
+ filter_info->tcp_flags = filter->tcp_flags;
+ else
+ filter_info->tcp_flags = 0;
+
+ return 0;
+}
+
+static inline struct e1000_5tuple_filter *
+igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
+ struct e1000_5tuple_filter_info *key)
+{
+ struct e1000_5tuple_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct e1000_5tuple_filter_info)) == 0) {
+ return it;
+ }
+ }
+ return NULL;
+}
+
+/* inject a igb 5-tuple filter to HW */
+static inline void
+igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct e1000_5tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
+ uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
+ uint8_t i;
+
+ i = filter->index;
+ ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
+ if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
+ if (filter->filter_info.dst_ip_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
+ if (filter->filter_info.src_port_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+ if (filter->filter_info.proto_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
+ ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
+ E1000_FTQF_QUEUE_MASK;
+ ftqf |= E1000_FTQF_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
+ E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
+ E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
+
+ spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
+ E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
+
+ imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
+ if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+ imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ /* tcp flags bits setting. */
+ if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
+ if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_URG;
+ if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_ACK;
+ if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_PSH;
+ if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_RST;
+ if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_SYN;
+ if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_FIN;
+ } else {
+ imir_ext |= E1000_IMIREXT_CTRL_BP;
+ }
+ E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+}
+
+/*
+ * igb_add_5tuple_filter_82576 - add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: ponter to the filter that will be added.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter *filter;
+ uint8_t i;
+ int ret;
+
+ filter = rte_zmalloc("e1000_5tuple_filter",
+ sizeof(struct e1000_5tuple_filter), 0);
+ if (filter == NULL)
+ return -ENOMEM;
+
+ ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
+ &filter->filter_info);
+ if (ret < 0) {
+ rte_free(filter);
+ return ret;
+ }
+
+ if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
+ &filter->filter_info) != NULL) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ rte_free(filter);
+ return -EEXIST;
+ }
+ filter->queue = ntuple_filter->queue;
+
+ /*
+ * look for an unused 5tuple filter index,
+ * and insert the filter to list.
+ */
+ for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
+ if (!(filter_info->fivetuple_mask & (1 << i))) {
+ filter_info->fivetuple_mask |= 1 << i;
+ filter->index = i;
+ TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+ filter,
+ entries);
+ break;
+ }
+ }
+ if (i >= E1000_MAX_FTQF_FILTERS) {
+ PMD_DRV_LOG(ERR, "5tuple filters are full.");
+ rte_free(filter);
+ return -ENOSYS;
+ }
+
+ igb_inject_5tuple_filter_82576(dev, filter);
+ return 0;
+}
+
+int
+igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct e1000_5tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ filter_info->fivetuple_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+ rte_free(filter);
+
+ E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
+ E1000_FTQF_VF_BP | E1000_FTQF_MASK);
+ E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
+ return 0;
+}
+
+/*
+ * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: ponter to the filter that will be removed.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter_info filter_5tuple;
+ struct e1000_5tuple_filter *filter;
+ int ret;
+
+ memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
+ &filter_5tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ igb_delete_5tuple_filter_82576(dev, filter);
+
+ return 0;
+}
+
+static int
+eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ uint32_t rctl;
+ struct e1000_hw *hw;
+ struct rte_eth_dev_info dev_info;
+ uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
+ VLAN_TAG_SIZE);
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+#ifdef RTE_LIBRTE_82571_SUPPORT
+ /* XXX: not bigger than max_rx_pktlen */
+ if (hw->mac.type == e1000_82571)
+ return -ENOTSUP;
+#endif
+ eth_igb_infos_get(dev, &dev_info);
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) ||
+ (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before. */
+ if (!dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ return -EINVAL;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* switch to jumbo mode if needed */
+ if (frame_size > ETHER_MAX_LEN) {
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ rctl |= E1000_RCTL_LPE;
+ } else {
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ rctl &= ~E1000_RCTL_LPE;
+ }
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+ return 0;
+}
+
+/*
+ * igb_add_del_ntuple_filter - add or delete a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * add: if true, add filter, if false, remove filter
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter,
+ bool add)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ switch (ntuple_filter->flags) {
+ case RTE_5TUPLE_FLAGS:
+ case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
+ if (hw->mac.type != e1000_82576)
+ return -ENOTSUP;
+ if (add)
+ ret = igb_add_5tuple_filter_82576(dev,
+ ntuple_filter);
+ else
+ ret = igb_remove_5tuple_filter_82576(dev,
+ ntuple_filter);
+ break;
+ case RTE_2TUPLE_FLAGS:
+ case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
+ if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 &&
+ hw->mac.type != e1000_i210 &&
+ hw->mac.type != e1000_i211)
+ return -ENOTSUP;
+ if (add)
+ ret = igb_add_2tuple_filter(dev, ntuple_filter);
+ else
+ ret = igb_remove_2tuple_filter(dev, ntuple_filter);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * igb_get_ntuple_filter - get a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+igb_get_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter_info filter_5tuple;
+ struct e1000_2tuple_filter_info filter_2tuple;
+ struct e1000_5tuple_filter *p_5tuple_filter;
+ struct e1000_2tuple_filter *p_2tuple_filter;
+ int ret;
+
+ switch (ntuple_filter->flags) {
+ case RTE_5TUPLE_FLAGS:
+ case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
+ if (hw->mac.type != e1000_82576)
+ return -ENOTSUP;
+ memset(&filter_5tuple,
+ 0,
+ sizeof(struct e1000_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
+ &filter_5tuple);
+ if (ret < 0)
+ return ret;
+ p_5tuple_filter = igb_5tuple_filter_lookup_82576(
+ &filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (p_5tuple_filter == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+ ntuple_filter->queue = p_5tuple_filter->queue;
+ break;
+ case RTE_2TUPLE_FLAGS:
+ case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
+ if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
+ return -ENOTSUP;
+ memset(&filter_2tuple,
+ 0,
+ sizeof(struct e1000_2tuple_filter_info));
+ ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
+ if (ret < 0)
+ return ret;
+ p_2tuple_filter = igb_2tuple_filter_lookup(
+ &filter_info->twotuple_list,
+ &filter_2tuple);
+ if (p_2tuple_filter == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+ ntuple_filter->queue = p_2tuple_filter->queue;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * igb_ntuple_filter_handle - Handle operations for ntuple filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+igb_ntuple_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = igb_add_del_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = igb_add_del_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = igb_get_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static inline int
+igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
+ uint16_t ethertype)
+{
+ int i;
+
+ for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_filters[i].ethertype == ethertype &&
+ (filter_info->ethertype_mask & (1 << i)))
+ return i;
+ }
+ return -1;
+}
+
+static inline int
+igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
+ uint16_t ethertype, uint32_t etqf)
+{
+ int i;
+
+ for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
+ if (!(filter_info->ethertype_mask & (1 << i))) {
+ filter_info->ethertype_mask |= 1 << i;
+ filter_info->ethertype_filters[i].ethertype = ethertype;
+ filter_info->ethertype_filters[i].etqf = etqf;
+ return i;
+ }
+ }
+ return -1;
+}
+
+int
+igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
+ uint8_t idx)
+{
+ if (idx >= E1000_MAX_ETQF_FILTERS)
+ return -1;
+ filter_info->ethertype_mask &= ~(1 << idx);
+ filter_info->ethertype_filters[idx].ethertype = 0;
+ filter_info->ethertype_filters[idx].etqf = 0;
+ return idx;
+}
+
+
+int
+igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t etqf = 0;
+ int ret;
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+ " ethertype filter.", filter->ether_type);
+ return -EINVAL;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+ return -EINVAL;
+ }
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ PMD_DRV_LOG(ERR, "drop option is unsupported.");
+ return -EINVAL;
+ }
+
+ ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret >= 0 && add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+ filter->ether_type);
+ return -EEXIST;
+ }
+ if (ret < 0 && !add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ if (add) {
+ etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
+ etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
+ etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
+ ret = igb_ethertype_filter_insert(filter_info,
+ filter->ether_type, etqf);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype filters are full.");
+ return -ENOSYS;
+ }
+ } else {
+ ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
+ if (ret < 0)
+ return -ENOSYS;
+ }
+ E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+igb_get_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t etqf;
+ int ret;
+
+ ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
+ if (etqf & E1000_ETQF_FILTER_ENABLE) {
+ filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
+ filter->flags = 0;
+ filter->queue = (etqf & E1000_ETQF_QUEUE) >>
+ E1000_ETQF_QUEUE_SHIFT;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+/*
+ * igb_ethertype_filter_handle - Handle operations for ethertype filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+igb_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = igb_add_del_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = igb_add_del_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = igb_get_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+eth_igb_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ret = igb_ntuple_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = igb_ethertype_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_SYN:
+ ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &igb_flow_ops;
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
+ return 0;
+}
+
+static uint64_t
+igb_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t systime_cycles;
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ /*
+ * Need to read System Time Residue Register to be able
+ * to read the other two registers.
+ */
+ E1000_READ_REG(hw, E1000_SYSTIMR);
+ /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
+ systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
+ systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
+ * NSEC_PER_SEC;
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ /*
+ * Need to read System Time Residue Register to be able
+ * to read the other two registers.
+ */
+ E1000_READ_REG(hw, E1000_SYSTIMR);
+ systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
+ /* Only the 8 LSB are valid. */
+ systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH)
+ & 0xff) << 32;
+ break;
+ default:
+ systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
+ systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
+ << 32;
+ break;
+ }
+
+ return systime_cycles;
+}
+
+static uint64_t
+igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rx_tstamp_cycles;
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+ rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
+ rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
+ * NSEC_PER_SEC;
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
+ /* Only the 8 LSB are valid. */
+ rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH)
+ & 0xff) << 32;
+ break;
+ default:
+ rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
+ rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
+ << 32;
+ break;
+ }
+
+ return rx_tstamp_cycles;
+}
+
+static uint64_t
+igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t tx_tstamp_cycles;
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+ tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
+ tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
+ * NSEC_PER_SEC;
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
+ /* Only the 8 LSB are valid. */
+ tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH)
+ & 0xff) << 32;
+ break;
+ default:
+ tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
+ tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
+ << 32;
+ break;
+ }
+
+ return tx_tstamp_cycles;
+}
+
+static void
+igb_start_timecounters(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ (struct e1000_adapter *)dev->data->dev_private;
+ uint32_t incval = 1;
+ uint32_t shift = 0;
+ uint64_t mask = E1000_CYCLECOUNTER_MASK;
+
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ /* 32 LSB bits + 8 MSB bits = 40 bits */
+ mask = (1ULL << 40) - 1;
+ /* fall-through */
+ case e1000_i210:
+ case e1000_i211:
+ /*
+ * Start incrementing the register
+ * used to timestamp PTP packets.
+ */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, incval);
+ break;
+ case e1000_82576:
+ incval = E1000_INCVALUE_82576;
+ shift = IGB_82576_TSYNC_SHIFT;
+ E1000_WRITE_REG(hw, E1000_TIMINCA,
+ E1000_INCPERIOD_82576 | incval);
+ break;
+ default:
+ /* Not supported */
+ return;
+ }
+
+ memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ adapter->systime_tc.cc_mask = mask;
+ adapter->systime_tc.cc_shift = shift;
+ adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+ adapter->rx_tstamp_tc.cc_mask = mask;
+ adapter->rx_tstamp_tc.cc_shift = shift;
+ adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+ adapter->tx_tstamp_tc.cc_mask = mask;
+ adapter->tx_tstamp_tc.cc_shift = shift;
+ adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+}
+
+static int
+igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct e1000_adapter *adapter =
+ (struct e1000_adapter *)dev->data->dev_private;
+
+ adapter->systime_tc.nsec += delta;
+ adapter->rx_tstamp_tc.nsec += delta;
+ adapter->tx_tstamp_tc.nsec += delta;
+
+ return 0;
+}
+
+static int
+igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ uint64_t ns;
+ struct e1000_adapter *adapter =
+ (struct e1000_adapter *)dev->data->dev_private;
+
+ ns = rte_timespec_to_ns(ts);
+
+ /* Set the timecounters to a new value. */
+ adapter->systime_tc.nsec = ns;
+ adapter->rx_tstamp_tc.nsec = ns;
+ adapter->tx_tstamp_tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ uint64_t ns, systime_cycles;
+ struct e1000_adapter *adapter =
+ (struct e1000_adapter *)dev->data->dev_private;
+
+ systime_cycles = igb_read_systime_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+igb_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl;
+ uint32_t tsauxc;
+
+ /* Stop the timesync system time. */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0);
+ /* Reset the timesync system time value. */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0);
+ /* fall-through */
+ case e1000_82576:
+ E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0);
+ E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0);
+ break;
+ default:
+ /* Not supported. */
+ return -ENOTSUP;
+ }
+
+ /* Enable system time for it isn't on by default. */
+ tsauxc = E1000_READ_REG(hw, E1000_TSAUXC);
+ tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME;
+ E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc);
+
+ igb_start_timecounters(dev);
+
+ /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
+ (ETHER_TYPE_1588 |
+ E1000_ETQF_FILTER_ENABLE |
+ E1000_ETQF_1588));
+
+ /* Enable timestamping of received PTP packets. */
+ tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
+
+ /* Enable Timestamping of transmitted PTP packets. */
+ tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
+ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
+
+ return 0;
+}
+
+static int
+igb_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl;
+
+ /* Disable timestamping of transmitted PTP packets. */
+ tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
+ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
+
+ /* Disable timestamping of received PTP packets. */
+ tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
+
+ /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
+
+ /* Stop incrementating the System Time registers. */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
+
+ return 0;
+}
+
+static int
+igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ (struct e1000_adapter *)dev->data->dev_private;
+ uint32_t tsync_rxctl;
+ uint64_t rx_tstamp_cycles;
+ uint64_t ns;
+
+ tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
+ return -EINVAL;
+
+ rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ (struct e1000_adapter *)dev->data->dev_private;
+ uint32_t tsync_txctl;
+ uint64_t tx_tstamp_cycles;
+ uint64_t ns;
+
+ tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
+ return -EINVAL;
+
+ tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+
+ while ((reg_group = igb_regs[g_ind++]))
+ count += igb_reg_group_count(reg_group);
+
+ return count;
+}
+
+static int
+igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+
+ while ((reg_group = igbvf_regs[g_ind++]))
+ count += igb_reg_group_count(reg_group);
+
+ return count;
+}
+
+static int
+eth_igb_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+
+ if (data == NULL) {
+ regs->length = eth_igb_get_reg_length(dev);
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = igb_regs[g_ind++]))
+ count += igb_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+igbvf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+
+ if (data == NULL) {
+ regs->length = igbvf_get_reg_length(dev);
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = igbvf_regs[g_ind++]))
+ count += igb_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Return unit is byte count */
+ return hw->nvm.word_size * 2;
+}
+
+static int
+eth_igb_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first >= hw->nvm.word_size) ||
+ ((first + length) >= hw->nvm.word_size))
+ return -EINVAL;
+
+ in_eeprom->magic = hw->vendor_id |
+ ((uint32_t)hw->device_id << 16);
+
+ if ((nvm->ops.read) == NULL)
+ return -ENOTSUP;
+
+ return nvm->ops.read(hw, first, length, data);
+}
+
+static int
+eth_igb_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first >= hw->nvm.word_size) ||
+ ((first + length) >= hw->nvm.word_size))
+ return -EINVAL;
+
+ in_eeprom->magic = (uint32_t)hw->vendor_id |
+ ((uint32_t)hw->device_id << 16);
+
+ if ((nvm->ops.write) == NULL)
+ return -ENOTSUP;
+ return nvm->ops.write(hw, first, length, data);
+}
+
+static int
+eth_igb_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ uint32_t status = 0;
+ uint16_t sff8472_rev, addr_mode;
+ bool page_swap = false;
+
+ if (hw->phy.media_type == e1000_media_type_copper ||
+ hw->phy.media_type == e1000_media_type_unknown)
+ return -EOPNOTSUPP;
+
+ /* Check whether we support SFF-8472 or not */
+ status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
+ if (status)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
+ if (status)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
+ PMD_DRV_LOG(ERR,
+ "Address change required to access page 0xA2, "
+ "but not supported. Please report the module "
+ "type to the driver maintainers.\n");
+ page_swap = true;
+ }
+
+ if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
+ /* We have an SFP, but it does not support SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have an SFP which supports a revision of SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+
+ return 0;
+}
+
+static int
+eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ uint32_t status = 0;
+ uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1];
+ u16 first_word, last_word;
+ int i = 0;
+
+ if (info->length == 0)
+ return -EINVAL;
+
+ first_word = info->offset >> 1;
+ last_word = (info->offset + info->length - 1) >> 1;
+
+ /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2,
+ &dataword[i]);
+ if (status) {
+ /* Error occurred while reading module */
+ return -EIO;
+ }
+
+ dataword[i] = rte_be_to_cpu_16(dataword[i]);
+ }
+
+ memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length);
+
+ return 0;
+}
+
+static int
+eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t vec = E1000_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = E1000_RX_VEC_START;
+
+ uint32_t mask = 1 << (queue_id + vec);
+
+ E1000_WRITE_REG(hw, E1000_EIMC, mask);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t vec = E1000_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = E1000_RX_VEC_START;
+
+ uint32_t mask = 1 << (queue_id + vec);
+ uint32_t regval;
+
+ regval = E1000_READ_REG(hw, E1000_EIMS);
+ E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
+ E1000_WRITE_FLUSH(hw);
+
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static void
+eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
+ uint8_t index, uint8_t offset)
+{
+ uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+
+ /* clear bits */
+ val &= ~((uint32_t)0xFF << offset);
+
+ /* write vector and valid bit */
+ val |= (msix_vector | E1000_IVAR_VALID) << offset;
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
+}
+
+static void
+eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp = 0;
+
+ if (hw->mac.type == e1000_82575) {
+ if (direction == 0)
+ tmp = E1000_EICR_RX_QUEUE0 << queue;
+ else if (direction == 1)
+ tmp = E1000_EICR_TX_QUEUE0 << queue;
+ E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
+ } else if (hw->mac.type == e1000_82576) {
+ if ((direction == 0) || (direction == 1))
+ eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
+ ((queue & 0x8) << 1) +
+ 8 * direction);
+ } else if ((hw->mac.type == e1000_82580) ||
+ (hw->mac.type == e1000_i350) ||
+ (hw->mac.type == e1000_i354) ||
+ (hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+ if ((direction == 0) || (direction == 1))
+ eth_igb_write_ivar(hw, msix_vector,
+ queue >> 1,
+ ((queue & 0x1) << 4) +
+ 8 * direction);
+ }
+}
+
+/* Sets up the hardware to generate MSI-X interrupts properly
+ * @hw
+ * board private structure
+ */
+static void
+eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
+{
+ int queue_id;
+ uint32_t tmpval, regval, intr_mask;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vec = E1000_MISC_VEC_ID;
+ uint32_t base = E1000_MISC_VEC_ID;
+ uint32_t misc_shift = 0;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd
+ */
+ if (!rte_intr_dp_is_en(intr_handle))
+ return;
+
+ if (rte_intr_allow_others(intr_handle)) {
+ vec = base = E1000_RX_VEC_START;
+ misc_shift = 1;
+ }
+
+ /* set interrupt vector for other causes */
+ if (hw->mac.type == e1000_82575) {
+ tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* enable MSI-X PBA support */
+ tmpval |= E1000_CTRL_EXT_PBA_CLR;
+
+ /* Auto-Mask interrupts upon ICR read */
+ tmpval |= E1000_CTRL_EXT_EIAME;
+ tmpval |= E1000_CTRL_EXT_IRCA;
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
+
+ /* enable msix_other interrupt */
+ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
+ regval = E1000_READ_REG(hw, E1000_EIAC);
+ E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
+ regval = E1000_READ_REG(hw, E1000_EIAM);
+ E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
+ } else if ((hw->mac.type == e1000_82576) ||
+ (hw->mac.type == e1000_82580) ||
+ (hw->mac.type == e1000_i350) ||
+ (hw->mac.type == e1000_i354) ||
+ (hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+ /* turn on MSI-X capability first */
+ E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
+ E1000_GPIE_PBA | E1000_GPIE_EIAME |
+ E1000_GPIE_NSICR);
+ intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
+ misc_shift;
+ regval = E1000_READ_REG(hw, E1000_EIAC);
+ E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
+
+ /* enable msix_other interrupt */
+ regval = E1000_READ_REG(hw, E1000_EIMS);
+ E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
+ tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
+ E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
+ }
+
+ /* use EIAM to auto-mask when MSI-X interrupt
+ * is asserted, this saves a register write for every interrupt
+ */
+ intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
+ misc_shift;
+ regval = E1000_READ_REG(hw, E1000_EIAM);
+ E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
+
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
+ eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+
+ E1000_WRITE_FLUSH(hw);
+}
+
+/* restore n-tuple filter */
+static inline void
+igb_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter *p_5tuple;
+ struct e1000_2tuple_filter *p_2tuple;
+
+ TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) {
+ igb_inject_5tuple_filter_82576(dev, p_5tuple);
+ }
+
+ TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) {
+ igb_inject_2uple_filter(dev, p_2tuple);
+ }
+}
+
+/* restore SYN filter */
+static inline void
+igb_syn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t synqf;
+
+ synqf = filter_info->syn_info;
+
+ if (synqf & E1000_SYN_FILTER_ENABLE) {
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/* restore ethernet type filter */
+static inline void
+igb_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ E1000_WRITE_REG(hw, E1000_ETQF(i),
+ filter_info->ethertype_filters[i].etqf);
+ E1000_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* restore flex byte filter */
+static inline void
+igb_flex_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_flex_filter *flex_filter;
+
+ TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) {
+ igb_inject_flex_filter(dev, flex_filter);
+ }
+}
+
+/* restore rss filter */
+static inline void
+igb_rss_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->rss_info.conf.queue_num)
+ igb_config_rss_filter(dev, &filter_info->rss_info, TRUE);
+}
+
+/* restore all types filter */
+static int
+igb_filter_restore(struct rte_eth_dev *dev)
+{
+ igb_ntuple_filter_restore(dev);
+ igb_ethertype_filter_restore(dev);
+ igb_syn_filter_restore(dev);
+ igb_flex_filter_restore(dev);
+ igb_rss_filter_restore(dev);
+
+ return 0;
+}
+
+RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci");
+
+/* see e1000_logs.c */
+RTE_INIT(e1000_init_log)
+{
+ e1000_igb_init_log();
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/igb_flow.c b/src/spdk/dpdk/drivers/net/e1000/igb_flow.c
new file mode 100644
index 00000000..07385291
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/igb_flow.c
@@ -0,0 +1,1911 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "e1000_logs.h"
+#include "base/e1000_api.h"
+#include "e1000_ethdev.h"
+
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
+ do { \
+ item = (pattern) + (index); \
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
+ (index)++; \
+ item = (pattern) + (index); \
+ } \
+ } while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = (actions) + (index); \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+ (index)++; \
+ act = (actions) + (index); \
+ } \
+ } while (0)
+
+#define IGB_FLEX_RAW_NUM 12
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP or SCTP
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item can be MAC or IPv4 */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+ /* Not supported last point for range */
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ ipv4_mask = item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+
+ /* check if the next not void item is TCP or UDP or SCTP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* Not supported last point for range */
+ if (item->last) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* get the TCP/UDP/SCTP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ if (item->spec && item->mask) {
+ tcp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ tcp_spec = item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ }
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ if (item->spec && item->mask) {
+ udp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ }
+ } else {
+ if (item->spec && item->mask) {
+ sctp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = (const struct rte_flow_item_sctp *)
+ item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ }
+ }
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /**
+ * n-tuple only supports forwarding,
+ * check if the first not void action is QUEUE.
+ */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ item, "Not supported action.");
+ return -rte_errno;
+ }
+ filter->queue =
+ ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+ filter->priority = (uint16_t)attr->priority;
+
+ return 0;
+}
+
+/* a specific function for igb because the flags is specific */
+static int
+igb_parse_ntuple_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* Igb doesn't support many priorities. */
+ if (filter->priority > E1000_2TUPLE_MAX_PRI) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Priority not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if (hw->mac.type == e1000_82576) {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by ntuple filter");
+ return -rte_errno;
+ }
+ filter->flags |= RTE_5TUPLE_FLAGS;
+ } else {
+ if (filter->src_ip_mask || filter->dst_ip_mask ||
+ filter->src_port_mask) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "only two tuple are "
+ "supported by this filter");
+ return -rte_errno;
+ }
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by ntuple filter");
+ return -rte_errno;
+ }
+ filter->flags |= RTE_2TUPLE_FLAGS;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH type 0x0807 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* Parse pattern */
+ index = 0;
+
+ /* The first non-void item should be MAC. */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ether address mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ /* Check if the next non-void item is END. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter.");
+ return -rte_errno;
+ }
+
+ /* Parse action */
+
+ index = 0;
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* Parse attr */
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+igb_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_ethertype_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ if (hw->mac.type == e1000_82576) {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
+ memset(filter, 0, sizeof(
+ struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not supported "
+ "by ethertype filter");
+ return -rte_errno;
+ }
+ } else {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(
+ struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not supported "
+ "by ethertype filter");
+ return -rte_errno;
+ }
+ }
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "IPv4/IPv6 not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "mac compare is unsupported");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "drop option is unsupported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a TCP SYN rule.
+ * And get the TCP SYN filter info BTW.
+ * pattern:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4 or IPV6.
+ * The third not void item must be TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * TCP tcp_flags 0x02 0xFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* if the item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN address mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is IPv4 or IPv6 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* if the item is IP, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is TCP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. Only support SYN. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+ if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+ tcp_mask->hdr.src_port ||
+ tcp_mask->hdr.dst_port ||
+ tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Support 2 priorities, the lowest or highest. */
+ if (!attr->priority) {
+ filter->hig_pri = 0;
+ } else if (attr->priority == (uint32_t)~0U) {
+ filter->hig_pri = 1;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+igb_parse_syn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_syn_filter(attr, pattern,
+ actions, filter, error);
+
+ if (hw->mac.type == e1000_82576) {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by syn filter");
+ return -rte_errno;
+ }
+ } else {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a flex byte rule.
+ * And get the flex byte filter info BTW.
+ * pattern:
+ * The first not void item must be RAW.
+ * The second not void item can be RAW or END.
+ * The third not void item can be RAW or END.
+ * The last not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * RAW relative 0 0x1
+ * offset 0 0xFFFFFFFF
+ * pattern {0x08, 0x06} {0xFF, 0xFF}
+ * RAW relative 1 0x1
+ * offset 100 0xFFFFFFFF
+ * pattern {0x11, 0x22, 0x33} {0xFF, 0xFF, 0xFF}
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_flex_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_flex_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_raw *raw_spec;
+ const struct rte_flow_item_raw *raw_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index, i, offset, total_offset;
+ uint32_t max_offset = 0;
+ int32_t shift, j, raw_index = 0;
+ int32_t relative[IGB_FLEX_RAW_NUM] = {0};
+ int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+item_loop:
+
+ /* the first not void item should be RAW */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ raw_spec = item->spec;
+ raw_mask = item->mask;
+
+ if (!raw_mask->length ||
+ !raw_mask->relative) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+
+ if (raw_mask->offset)
+ offset = raw_spec->offset;
+ else
+ offset = 0;
+
+ for (j = 0; j < raw_spec->length; j++) {
+ if (raw_mask->pattern[j] != 0xFF) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+ }
+
+ total_offset = 0;
+
+ if (raw_spec->relative) {
+ for (j = raw_index; j > 0; j--) {
+ total_offset += raw_offset[j - 1];
+ if (!relative[j - 1])
+ break;
+ }
+ if (total_offset + raw_spec->length + offset > max_offset)
+ max_offset = total_offset + raw_spec->length + offset;
+ } else {
+ if (raw_spec->length + offset > max_offset)
+ max_offset = raw_spec->length + offset;
+ }
+
+ if ((raw_spec->length + offset + total_offset) >
+ RTE_FLEX_FILTER_MAXLEN) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+
+ if (raw_spec->relative == 0) {
+ for (j = 0; j < raw_spec->length; j++)
+ filter->bytes[offset + j] =
+ raw_spec->pattern[j];
+ j = offset / CHAR_BIT;
+ shift = offset % CHAR_BIT;
+ } else {
+ for (j = 0; j < raw_spec->length; j++)
+ filter->bytes[total_offset + offset + j] =
+ raw_spec->pattern[j];
+ j = (total_offset + offset) / CHAR_BIT;
+ shift = (total_offset + offset) % CHAR_BIT;
+ }
+
+ i = 0;
+
+ for ( ; shift < CHAR_BIT; shift++) {
+ filter->mask[j] |= (0x80 >> shift);
+ i++;
+ if (i == raw_spec->length)
+ break;
+ if (shift == (CHAR_BIT - 1)) {
+ j++;
+ shift = -1;
+ }
+ }
+
+ relative[raw_index] = raw_spec->relative;
+ raw_offset[raw_index] = offset + raw_spec->length;
+ raw_index++;
+
+ /* check if the next not void item is RAW */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+
+ /* go back to parser */
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+ /* if the item is RAW, the content should be parse */
+ goto item_loop;
+ }
+
+ filter->len = RTE_ALIGN(max_offset, 8);
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+
+ filter->priority = (uint16_t)attr->priority;
+
+ return 0;
+}
+
+static int
+igb_parse_flex_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_flex_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ ret = cons_parse_flex_filter(attr, pattern,
+ actions, filter, error);
+
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not supported by flex filter");
+ return -rte_errno;
+ }
+
+ if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
+ filter->len % sizeof(uint64_t) != 0) {
+ PMD_DRV_LOG(ERR, "filter's length is out of range");
+ return -EINVAL;
+ }
+
+ if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
+ PMD_DRV_LOG(ERR, "filter's priority is out of range");
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+igb_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct igb_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ uint16_t n, index;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ index = 0;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rss = (const struct rte_flow_action_rss *)act->conf;
+
+ if (!rss || !rss->queue_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->queue_num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (igb_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Create a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+igb_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_flex_filter flex_filter;
+ struct igb_rte_flow_rss_conf rss_conf;
+ struct rte_flow *flow = NULL;
+ struct igb_ntuple_filter_ele *ntuple_filter_ptr;
+ struct igb_ethertype_filter_ele *ethertype_filter_ptr;
+ struct igb_eth_syn_filter_ele *syn_filter_ptr;
+ struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_rss_conf_ele *rss_filter_ptr;
+ struct igb_flow_mem *igb_flow_mem_ptr;
+
+ flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+ igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
+ sizeof(struct igb_flow_mem), 0);
+ if (!igb_flow_mem_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ rte_free(flow);
+ return NULL;
+ }
+ igb_flow_mem_ptr->flow = flow;
+ igb_flow_mem_ptr->dev = dev;
+ TAILQ_INSERT_TAIL(&igb_flow_list,
+ igb_flow_mem_ptr, entries);
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = igb_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret) {
+ ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+ if (!ret) {
+ ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
+ sizeof(struct igb_ntuple_filter_ele), 0);
+ if (!ntuple_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+
+ rte_memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ flow->rule = ntuple_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = igb_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret) {
+ ret = igb_add_del_ethertype_filter(dev,
+ &ethertype_filter, TRUE);
+ if (!ret) {
+ ethertype_filter_ptr = rte_zmalloc(
+ "igb_ethertype_filter",
+ sizeof(struct igb_ethertype_filter_ele), 0);
+ if (!ethertype_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+
+ rte_memcpy(&ethertype_filter_ptr->filter_info,
+ &ethertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ flow->rule = ethertype_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = igb_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret) {
+ ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
+ if (!ret) {
+ syn_filter_ptr = rte_zmalloc("igb_syn_filter",
+ sizeof(struct igb_eth_syn_filter_ele), 0);
+ if (!syn_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+
+ rte_memcpy(&syn_filter_ptr->filter_info,
+ &syn_filter,
+ sizeof(struct rte_eth_syn_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ flow->rule = syn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_SYN;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
+ ret = igb_parse_flex_filter(dev, attr, pattern,
+ actions, &flex_filter, error);
+ if (!ret) {
+ ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
+ if (!ret) {
+ flex_filter_ptr = rte_zmalloc("igb_flex_filter",
+ sizeof(struct igb_flex_filter_ele), 0);
+ if (!flex_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+
+ rte_memcpy(&flex_filter_ptr->filter_info,
+ &flex_filter,
+ sizeof(struct rte_eth_flex_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_flex_list,
+ flex_filter_ptr, entries);
+ flow->rule = flex_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
+ return flow;
+ }
+ }
+
+ memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ ret = igb_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
+ if (!ret) {
+ ret = igb_config_rss_filter(dev, &rss_conf, TRUE);
+ if (!ret) {
+ rss_filter_ptr = rte_zmalloc("igb_rss_filter",
+ sizeof(struct igb_rss_conf_ele), 0);
+ if (!rss_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ igb_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
+ TAILQ_INSERT_TAIL(&igb_filter_rss_list,
+ rss_filter_ptr, entries);
+ flow->rule = rss_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_HASH;
+ return flow;
+ }
+ }
+
+out:
+ TAILQ_REMOVE(&igb_flow_list,
+ igb_flow_mem_ptr, entries);
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(igb_flow_mem_ptr);
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Check if the flow rule is supported by igb.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_flex_filter flex_filter;
+ struct igb_rte_flow_rss_conf rss_conf;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = igb_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = igb_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = igb_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
+ ret = igb_parse_flex_filter(dev, attr, pattern,
+ actions, &flex_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ ret = igb_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
+
+ return ret;
+}
+
+/* Destroy a flow rule on igb. */
+static int
+igb_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *pmd_flow = flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ struct igb_ntuple_filter_ele *ntuple_filter_ptr;
+ struct igb_ethertype_filter_ele *ethertype_filter_ptr;
+ struct igb_eth_syn_filter_ele *syn_filter_ptr;
+ struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_flow_mem *igb_flow_mem_ptr;
+ struct igb_rss_conf_ele *rss_filter_ptr;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
+ pmd_flow->rule;
+ ret = igb_add_del_ntuple_filter(dev,
+ &ntuple_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
+ pmd_flow->rule;
+ ret = igb_add_del_ethertype_filter(dev,
+ &ethertype_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ ret = eth_igb_syn_filter_set(dev,
+ &syn_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ flex_filter_ptr = (struct igb_flex_filter_ele *)
+ pmd_flow->rule;
+ ret = eth_igb_add_del_flex_filter(dev,
+ &flex_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_flex_list,
+ flex_filter_ptr, entries);
+ rte_free(flex_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_HASH:
+ rss_filter_ptr = (struct igb_rss_conf_ele *)
+ pmd_flow->rule;
+ ret = igb_config_rss_filter(dev,
+ &rss_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_rss_list,
+ rss_filter_ptr, entries);
+ rte_free(rss_filter_ptr);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to destroy flow");
+ return ret;
+ }
+
+ TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
+ if (igb_flow_mem_ptr->flow == pmd_flow) {
+ TAILQ_REMOVE(&igb_flow_list,
+ igb_flow_mem_ptr, entries);
+ rte_free(igb_flow_mem_ptr);
+ }
+ }
+ rte_free(flow);
+
+ return ret;
+}
+
+/* remove all the n-tuple filters */
+static void
+igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter *p_5tuple;
+ struct e1000_2tuple_filter *p_2tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ igb_delete_5tuple_filter_82576(dev, p_5tuple);
+
+ while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
+ igb_delete_2tuple_filter(dev, p_2tuple);
+}
+
+/* remove all the ether type filters */
+static void
+igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ (void)igb_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* remove the SYN filter */
+static void
+igb_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
+ filter_info->syn_info = 0;
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/* remove all the flex filters */
+static void
+igb_clear_all_flex_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_flex_filter *flex_filter;
+
+ while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
+ igb_remove_flex_filter(dev, flex_filter);
+}
+
+/* remove the rss filter */
+static void
+igb_clear_rss_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter->rss_info.conf.queue_num)
+ igb_config_rss_filter(dev, &filter->rss_info, FALSE);
+}
+
+void
+igb_filterlist_flush(struct rte_eth_dev *dev)
+{
+ struct igb_ntuple_filter_ele *ntuple_filter_ptr;
+ struct igb_ethertype_filter_ele *ethertype_filter_ptr;
+ struct igb_eth_syn_filter_ele *syn_filter_ptr;
+ struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_rss_conf_ele *rss_filter_ptr;
+ struct igb_flow_mem *igb_flow_mem_ptr;
+ enum rte_filter_type filter_type;
+ struct rte_flow *pmd_flow;
+
+ TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
+ if (igb_flow_mem_ptr->dev == dev) {
+ pmd_flow = igb_flow_mem_ptr->flow;
+ filter_type = pmd_flow->filter_type;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr =
+ (struct igb_ntuple_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr =
+ (struct igb_ethertype_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr =
+ (struct igb_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ flex_filter_ptr =
+ (struct igb_flex_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_flex_list,
+ flex_filter_ptr, entries);
+ rte_free(flex_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_HASH:
+ rss_filter_ptr =
+ (struct igb_rss_conf_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_rss_list,
+ rss_filter_ptr, entries);
+ rte_free(rss_filter_ptr);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type"
+ "(%d) not supported", filter_type);
+ break;
+ }
+ TAILQ_REMOVE(&igb_flow_list,
+ igb_flow_mem_ptr,
+ entries);
+ rte_free(igb_flow_mem_ptr->flow);
+ rte_free(igb_flow_mem_ptr);
+ }
+ }
+}
+
+/* Destroy all flow rules associated with a port on igb. */
+static int
+igb_flow_flush(struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_error *error)
+{
+ igb_clear_all_ntuple_filter(dev);
+ igb_clear_all_ethertype_filter(dev);
+ igb_clear_syn_filter(dev);
+ igb_clear_all_flex_filter(dev);
+ igb_clear_rss_filter(dev);
+ igb_filterlist_flush(dev);
+
+ return 0;
+}
+
+const struct rte_flow_ops igb_flow_ops = {
+ .validate = igb_flow_validate,
+ .create = igb_flow_create,
+ .destroy = igb_flow_destroy,
+ .flush = igb_flow_flush,
+};
diff --git a/src/spdk/dpdk/drivers/net/e1000/igb_pf.c b/src/spdk/dpdk/drivers/net/e1000/igb_pf.c
new file mode 100644
index 00000000..b9f2e539
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/igb_pf.c
@@ -0,0 +1,512 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_bus_pci.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_memcpy.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+
+#include "base/e1000_defines.h"
+#include "base/e1000_regs.h"
+#include "base/e1000_hw.h"
+#include "e1000_ethdev.h"
+
+static inline uint16_t
+dev_num_vf(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ return pci_dev->max_vfs;
+}
+
+static inline
+int igb_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
+{
+ unsigned char vf_mac_addr[ETHER_ADDR_LEN];
+ struct e1000_vf_info *vfinfo =
+ *E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ uint16_t vfn;
+
+ for (vfn = 0; vfn < vf_num; vfn++) {
+ eth_random_addr(vf_mac_addr);
+ /* keep the random address as default */
+ memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
+ ETHER_ADDR_LEN);
+ }
+
+ return 0;
+}
+
+static inline int
+igb_mb_intr_setup(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= E1000_ICR_VMMB;
+
+ return 0;
+}
+
+void igb_pf_host_init(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_vf_info **vfinfo =
+ E1000_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ uint16_t vf_num;
+ uint8_t nb_queue;
+
+ RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+ if (0 == (vf_num = dev_num_vf(eth_dev)))
+ return;
+
+ if (hw->mac.type == e1000_i350)
+ nb_queue = 1;
+ else if(hw->mac.type == e1000_82576)
+ /* per datasheet, it should be 2, but 1 seems correct */
+ nb_queue = 1;
+ else
+ return;
+
+ *vfinfo = rte_zmalloc("vf_info", sizeof(struct e1000_vf_info) * vf_num, 0);
+ if (*vfinfo == NULL)
+ rte_panic("Cannot allocate memory for private VF data\n");
+
+ RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
+ RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
+
+ igb_vf_perm_addr_gen(eth_dev, vf_num);
+
+ /* set mb interrupt mask */
+ igb_mb_intr_setup(eth_dev);
+
+ return;
+}
+
+void igb_pf_host_uninit(struct rte_eth_dev *dev)
+{
+ struct e1000_vf_info **vfinfo;
+ uint16_t vf_num;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vfinfo = E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+
+ RTE_ETH_DEV_SRIOV(dev).active = 0;
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 0;
+ RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx = 0;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 0;
+
+ vf_num = dev_num_vf(dev);
+ if (vf_num == 0)
+ return;
+
+ rte_free(*vfinfo);
+ *vfinfo = NULL;
+}
+
+#define E1000_RAH_POOLSEL_SHIFT (18)
+int igb_pf_host_configure(struct rte_eth_dev *eth_dev)
+{
+ uint32_t vtctl;
+ uint16_t vf_num;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ uint32_t vlanctrl;
+ int i;
+ uint32_t rah;
+
+ if (0 == (vf_num = dev_num_vf(eth_dev)))
+ return -1;
+
+ /* enable VMDq and set the default pool for PF */
+ vtctl = E1000_READ_REG(hw, E1000_VT_CTL);
+ vtctl &= ~E1000_VT_CTL_DEFAULT_POOL_MASK;
+ vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
+ << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+ vtctl |= E1000_VT_CTL_VM_REPL_EN;
+ E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl);
+
+ /* Enable pools reserved to PF only */
+ E1000_WRITE_REG(hw, E1000_VFRE, (~0U) << vf_num);
+ E1000_WRITE_REG(hw, E1000_VFTE, (~0U) << vf_num);
+
+ /* PFDMA Tx General Switch Control Enables VMDQ loopback */
+ if (hw->mac.type == e1000_i350)
+ E1000_WRITE_REG(hw, E1000_TXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN);
+ else
+ E1000_WRITE_REG(hw, E1000_DTXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN);
+
+ /* clear VMDq map to perment rar 0 */
+ rah = E1000_READ_REG(hw, E1000_RAH(0));
+ rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT);
+ E1000_WRITE_REG(hw, E1000_RAH(0), rah);
+
+ /* clear VMDq map to scan rar 32 */
+ rah = E1000_READ_REG(hw, E1000_RAH(hw->mac.rar_entry_count));
+ rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT);
+ E1000_WRITE_REG(hw, E1000_RAH(hw->mac.rar_entry_count), rah);
+
+ /* set VMDq map to default PF pool */
+ rah = E1000_READ_REG(hw, E1000_RAH(0));
+ rah |= (0x1 << (RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx +
+ E1000_RAH_POOLSEL_SHIFT));
+ E1000_WRITE_REG(hw, E1000_RAH(0), rah);
+
+ /*
+ * enable vlan filtering and allow all vlan tags through
+ */
+ vlanctrl = E1000_READ_REG(hw, E1000_RCTL);
+ vlanctrl |= E1000_RCTL_VFE ; /* enable vlan filters */
+ E1000_WRITE_REG(hw, E1000_RCTL, vlanctrl);
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < IGB_VFTA_SIZE; i++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, 0xFFFFFFFF);
+ }
+
+ /* Enable/Disable MAC Anti-Spoofing */
+ e1000_vmdq_set_anti_spoofing_pf(hw, FALSE, vf_num);
+
+ return 0;
+}
+
+static void
+set_rx_mode(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *dev_data = dev->data;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fctrl, vmolr = E1000_VMOLR_BAM | E1000_VMOLR_AUPE;
+ uint16_t vfn = dev_num_vf(dev);
+
+ /* Check for Promiscuous and All Multicast modes */
+ fctrl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* set all bits that we expect to always be set */
+ fctrl &= ~E1000_RCTL_SBP; /* disable store-bad-packets */
+ fctrl |= E1000_RCTL_BAM;
+
+ /* clear the bits we are changing the status of */
+ fctrl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+
+ if (dev_data->promiscuous) {
+ fctrl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
+ } else {
+ if (dev_data->all_multicast) {
+ fctrl |= E1000_RCTL_MPE;
+ vmolr |= E1000_VMOLR_MPME;
+ } else {
+ vmolr |= E1000_VMOLR_ROMPE;
+ }
+ }
+
+ if ((hw->mac.type == e1000_82576) ||
+ (hw->mac.type == e1000_i350)) {
+ vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) &
+ ~(E1000_VMOLR_MPME | E1000_VMOLR_ROMPE |
+ E1000_VMOLR_ROPE);
+ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
+ }
+
+ E1000_WRITE_REG(hw, E1000_RCTL, fctrl);
+}
+
+static inline void
+igb_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_info *vfinfo =
+ *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ uint32_t vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
+
+ vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE |
+ E1000_VMOLR_BAM | E1000_VMOLR_AUPE);
+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
+
+ E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0);
+
+ /* reset multicast table array for vf */
+ vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* reset rx mode */
+ set_rx_mode(dev);
+}
+
+static inline void
+igb_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* enable transmit and receive for vf */
+ reg = E1000_READ_REG(hw, E1000_VFTE);
+ reg |= (reg | (1 << vf));
+ E1000_WRITE_REG(hw, E1000_VFTE, reg);
+
+ reg = E1000_READ_REG(hw, E1000_VFRE);
+ reg |= (reg | (1 << vf));
+ E1000_WRITE_REG(hw, E1000_VFRE, reg);
+
+ igb_vf_reset_event(dev, vf);
+}
+
+static int
+igb_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_info *vfinfo =
+ *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
+ int rar_entry = hw->mac.rar_entry_count - (vf + 1);
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+ uint32_t rah;
+
+ igb_vf_reset_msg(dev, vf);
+
+ hw->mac.ops.rar_set(hw, vf_mac, rar_entry);
+ rah = E1000_READ_REG(hw, E1000_RAH(rar_entry));
+ rah |= (0x1 << (vf + E1000_RAH_POOLSEL_SHIFT));
+ E1000_WRITE_REG(hw, E1000_RAH(rar_entry), rah);
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
+ rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
+ e1000_write_mbx(hw, msgbuf, 3, vf);
+
+ return 0;
+}
+
+static int
+igb_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_info *vfinfo =
+ *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ int rar_entry = hw->mac.rar_entry_count - (vf + 1);
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+ int rah;
+
+ if (is_unicast_ether_addr((struct ether_addr *)new_mac)) {
+ if (!is_zero_ether_addr((struct ether_addr *)new_mac))
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
+ sizeof(vfinfo[vf].vf_mac_addresses));
+ hw->mac.ops.rar_set(hw, new_mac, rar_entry);
+ rah = E1000_READ_REG(hw, E1000_RAH(rar_entry));
+ rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + vf));
+ E1000_WRITE_REG(hw, E1000_RAH(rar_entry), rah);
+ return 0;
+ }
+ return -1;
+}
+
+static int
+igb_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
+{
+ int i;
+ uint32_t vector_bit;
+ uint32_t vector_reg;
+ uint32_t mta_reg;
+ int entries = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >>
+ E1000_VT_MSGINFO_SHIFT;
+ uint16_t *hash_list = (uint16_t *)&msgbuf[1];
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_info *vfinfo =
+ *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+
+ /* only so many hash values supported */
+ entries = RTE_MIN(entries, E1000_MAX_VF_MC_ENTRIES);
+
+ /*
+ * salt away the number of multi cast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vfinfo->num_vf_mc_hashes = (uint16_t)entries;
+
+ /*
+ * VFs are limited to using the MTA hash table for their multicast
+ * addresses
+ */
+ for (i = 0; i < entries; i++) {
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+ }
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
+ mta_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, vector_reg);
+ mta_reg |= (1 << vector_bit);
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, vector_reg, mta_reg);
+ }
+
+ return 0;
+}
+
+static int
+igb_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ int add, vid;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_info *vfinfo =
+ *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ uint32_t vid_idx, vid_bit, vfta;
+
+ add = (msgbuf[0] & E1000_VT_MSGINFO_MASK)
+ >> E1000_VT_MSGINFO_SHIFT;
+ vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
+
+ if (add)
+ vfinfo[vf].vlan_count++;
+ else if (vfinfo[vf].vlan_count)
+ vfinfo[vf].vlan_count--;
+
+ vid_idx = (uint32_t)((vid >> E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK);
+ vid_bit = (uint32_t)(1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
+ if (add)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+igb_vf_set_rlpml(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t rlpml = msgbuf[1] & E1000_VMOLR_RLPML_MASK;
+ uint32_t max_frame = rlpml + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t vmolr;
+
+ if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ return -1;
+
+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
+
+ vmolr &= ~E1000_VMOLR_RLPML_MASK;
+ vmolr |= rlpml;
+
+ /* Enable Long Packet support */
+ vmolr |= E1000_VMOLR_LPE;
+
+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
+{
+ uint16_t mbx_size = E1000_VFMAILBOX_SIZE;
+ uint32_t msgbuf[E1000_VFMAILBOX_SIZE];
+ int32_t retval;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ retval = e1000_read_mbx(hw, msgbuf, mbx_size, vf);
+ if (retval) {
+ PMD_INIT_LOG(ERR, "Error mbx recv msg from VF %d", vf);
+ return retval;
+ }
+
+ /* do nothing with the message already processed */
+ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
+ return retval;
+
+ /* flush the ack before we write any messages back */
+ E1000_WRITE_FLUSH(hw);
+
+ /* perform VF reset */
+ if (msgbuf[0] == E1000_VF_RESET) {
+ return igb_vf_reset(dev, vf, msgbuf);
+ }
+
+ /* check & process VF to PF mailbox message */
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case E1000_VF_SET_MAC_ADDR:
+ retval = igb_vf_set_mac_addr(dev, vf, msgbuf);
+ break;
+ case E1000_VF_SET_MULTICAST:
+ retval = igb_vf_set_multicast(dev, vf, msgbuf);
+ break;
+ case E1000_VF_SET_LPE:
+ retval = igb_vf_set_rlpml(dev, vf, msgbuf);
+ break;
+ case E1000_VF_SET_VLAN:
+ retval = igb_vf_set_vlan(dev, vf, msgbuf);
+ break;
+ default:
+ PMD_INIT_LOG(DEBUG, "Unhandled Msg %8.8x",
+ (unsigned) msgbuf[0]);
+ retval = E1000_ERR_MBX;
+ break;
+ }
+
+ /* response the VF according to the message process result */
+ if (retval)
+ msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
+
+ e1000_write_mbx(hw, msgbuf, 1, vf);
+
+ return retval;
+}
+
+static inline void
+igb_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
+{
+ uint32_t msg = E1000_VT_MSGTYPE_NACK;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ e1000_write_mbx(hw, &msg, 1, vf);
+}
+
+void igb_pf_mbx_process(struct rte_eth_dev *eth_dev)
+{
+ uint16_t vf;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
+ /* check & process vf function level reset */
+ if (!e1000_check_for_rst(hw, vf))
+ igb_vf_reset_event(eth_dev, vf);
+
+ /* check & process vf mailbox messages */
+ if (!e1000_check_for_msg(hw, vf))
+ igb_rcv_msg_from_vf(eth_dev, vf);
+
+ /* check & process acks from vf */
+ if (!e1000_check_for_ack(hw, vf))
+ igb_rcv_ack_from_vf(eth_dev, vf);
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/igb_regs.h b/src/spdk/dpdk/drivers/net/e1000/igb_regs.h
new file mode 100644
index 00000000..cacd49c7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/igb_regs.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Intel Corporation
+ */
+#ifndef _IGB_REGS_H_
+#define _IGB_REGS_H_
+
+#include "e1000_ethdev.h"
+
+struct reg_info {
+ uint32_t base_addr;
+ uint32_t count;
+ uint32_t stride;
+ const char *name;
+};
+
+static const struct reg_info igb_regs_general[] = {
+ {E1000_CTRL, 1, 1, "E1000_CTRL"},
+ {E1000_STATUS, 1, 1, "E1000_STATUS"},
+ {E1000_CTRL_EXT, 1, 1, "E1000_CTRL_EXT"},
+ {E1000_MDIC, 1, 1, "E1000_MDIC"},
+ {E1000_SCTL, 1, 1, "E1000_SCTL"},
+ {E1000_CONNSW, 1, 1, "E1000_CONNSW"},
+ {E1000_VET, 1, 1, "E1000_VET"},
+ {E1000_LEDCTL, 1, 1, "E1000_LEDCTL"},
+ {E1000_PBA, 1, 1, "E1000_PBA"},
+ {E1000_PBS, 1, 1, "E1000_PBS"},
+ {E1000_FRTIMER, 1, 1, "E1000_FRTIMER"},
+ {E1000_TCPTIMER, 1, 1, "E1000_TCPTIMER"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_nvm[] = {
+ {E1000_EECD, 1, 1, "E1000_EECD"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_interrupt[] = {
+ {E1000_EICS, 1, 1, "E1000_EICS"},
+ {E1000_EIMS, 1, 1, "E1000_EIMS"},
+ {E1000_EIMC, 1, 1, "E1000_EIMC"},
+ {E1000_EIAC, 1, 1, "E1000_EIAC"},
+ {E1000_EIAM, 1, 1, "E1000_EIAM"},
+ {E1000_ICS, 1, 1, "E1000_ICS"},
+ {E1000_IMS, 1, 1, "E1000_IMS"},
+ {E1000_IMC, 1, 1, "E1000_IMC"},
+ {E1000_IAC, 1, 1, "E1000_IAC"},
+ {E1000_IAM, 1, 1, "E1000_IAM"},
+ {E1000_IMIRVP, 1, 1, "E1000_IMIRVP"},
+ {E1000_EITR(0), 10, 4, "E1000_EITR"},
+ {E1000_IMIR(0), 8, 4, "E1000_IMIR"},
+ {E1000_IMIREXT(0), 8, 4, "E1000_IMIREXT"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_fctl[] = {
+ {E1000_FCAL, 1, 1, "E1000_FCAL"},
+ {E1000_FCAH, 1, 1, "E1000_FCAH"},
+ {E1000_FCTTV, 1, 1, "E1000_FCTTV"},
+ {E1000_FCRTL, 1, 1, "E1000_FCRTL"},
+ {E1000_FCRTH, 1, 1, "E1000_FCRTH"},
+ {E1000_FCRTV, 1, 1, "E1000_FCRTV"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_rxdma[] = {
+ {E1000_RDBAL(0), 4, 0x100, "E1000_RDBAL"},
+ {E1000_RDBAH(0), 4, 0x100, "E1000_RDBAH"},
+ {E1000_RDLEN(0), 4, 0x100, "E1000_RDLEN"},
+ {E1000_RDH(0), 4, 0x100, "E1000_RDH"},
+ {E1000_RDT(0), 4, 0x100, "E1000_RDT"},
+ {E1000_RXCTL(0), 4, 0x100, "E1000_RXCTL"},
+ {E1000_SRRCTL(0), 4, 0x100, "E1000_SRRCTL"},
+ {E1000_DCA_RXCTRL(0), 4, 0x100, "E1000_DCA_RXCTRL"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_rx[] = {
+ {E1000_RCTL, 1, 1, "E1000_RCTL"},
+ {E1000_RXCSUM, 1, 1, "E1000_RXCSUM"},
+ {E1000_RLPML, 1, 1, "E1000_RLPML"},
+ {E1000_RFCTL, 1, 1, "E1000_RFCTL"},
+ {E1000_MRQC, 1, 1, "E1000_MRQC"},
+ {E1000_VT_CTL, 1, 1, "E1000_VT_CTL"},
+ {E1000_RAL(0), 16, 8, "E1000_RAL"},
+ {E1000_RAH(0), 16, 8, "E1000_RAH"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_tx[] = {
+ {E1000_TCTL, 1, 1, "E1000_TCTL"},
+ {E1000_TCTL_EXT, 1, 1, "E1000_TCTL_EXT"},
+ {E1000_TIPG, 1, 1, "E1000_TIPG"},
+ {E1000_DTXCTL, 1, 1, "E1000_DTXCTL"},
+ {E1000_TDBAL(0), 4, 0x100, "E1000_TDBAL"},
+ {E1000_TDBAH(0), 4, 0x100, "E1000_TDBAH"},
+ {E1000_TDLEN(0), 4, 0x100, "E1000_TDLEN"},
+ {E1000_TDH(0), 4, 0x100, "E1000_TDLEN"},
+ {E1000_TDT(0), 4, 0x100, "E1000_TDT"},
+ {E1000_TXDCTL(0), 4, 0x100, "E1000_TXDCTL"},
+ {E1000_TDWBAL(0), 4, 0x100, "E1000_TDWBAL"},
+ {E1000_TDWBAH(0), 4, 0x100, "E1000_TDWBAH"},
+ {E1000_DCA_TXCTRL(0), 4, 0x100, "E1000_DCA_TXCTRL"},
+ {E1000_TDFH, 1, 1, "E1000_TDFH"},
+ {E1000_TDFT, 1, 1, "E1000_TDFT"},
+ {E1000_TDFHS, 1, 1, "E1000_TDFHS"},
+ {E1000_TDFPC, 1, 1, "E1000_TDFPC"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_wakeup[] = {
+ {E1000_WUC, 1, 1, "E1000_WUC"},
+ {E1000_WUFC, 1, 1, "E1000_WUFC"},
+ {E1000_WUS, 1, 1, "E1000_WUS"},
+ {E1000_IPAV, 1, 1, "E1000_IPAV"},
+ {E1000_WUPL, 1, 1, "E1000_WUPL"},
+ {E1000_IP4AT_REG(0), 4, 8, "E1000_IP4AT_REG"},
+ {E1000_IP6AT_REG(0), 4, 4, "E1000_IP6AT_REG"},
+ {E1000_WUPM_REG(0), 4, 4, "E1000_WUPM_REG"},
+ {E1000_FFMT_REG(0), 4, 8, "E1000_FFMT_REG"},
+ {E1000_FFVT_REG(0), 4, 8, "E1000_FFVT_REG"},
+ {E1000_FFLT_REG(0), 4, 8, "E1000_FFLT_REG"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_mac[] = {
+ {E1000_PCS_CFG0, 1, 1, "E1000_PCS_CFG0"},
+ {E1000_PCS_LCTL, 1, 1, "E1000_PCS_LCTL"},
+ {E1000_PCS_LSTAT, 1, 1, "E1000_PCS_LSTAT"},
+ {E1000_PCS_ANADV, 1, 1, "E1000_PCS_ANADV"},
+ {E1000_PCS_LPAB, 1, 1, "E1000_PCS_LPAB"},
+ {E1000_PCS_NPTX, 1, 1, "E1000_PCS_NPTX"},
+ {E1000_PCS_LPABNP, 1, 1, "E1000_PCS_LPABNP"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info *igb_regs[] = {
+ igb_regs_general,
+ igb_regs_nvm,
+ igb_regs_interrupt,
+ igb_regs_fctl,
+ igb_regs_rxdma,
+ igb_regs_rx,
+ igb_regs_tx,
+ igb_regs_wakeup,
+ igb_regs_mac,
+ NULL};
+
+/* FIXME: reading igb_regs_interrupt results side-effect which doesn't
+ * work with VFIO; re-install igb_regs_interrupt once issue is resolved.
+ */
+static const struct reg_info *igbvf_regs[] = {
+ igb_regs_general,
+ igb_regs_rxdma,
+ igb_regs_tx,
+ NULL};
+
+static inline int
+igb_read_regs(struct e1000_hw *hw, const struct reg_info *reg,
+ uint32_t *reg_buf)
+{
+ unsigned int i;
+
+ for (i = 0; i < reg->count; i++) {
+ reg_buf[i] = E1000_READ_REG(hw,
+ reg->base_addr + i * reg->stride);
+ }
+ return reg->count;
+};
+
+static inline int
+igb_reg_group_count(const struct reg_info *regs)
+{
+ int count = 0;
+ int i = 0;
+
+ while (regs[i].count)
+ count += regs[i++].count;
+ return count;
+};
+
+static inline int
+igb_read_regs_group(struct rte_eth_dev *dev, uint32_t *reg_buf,
+ const struct reg_info *regs)
+{
+ int count = 0;
+ int i = 0;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ while (regs[i].count)
+ count += igb_read_regs(hw, &regs[i++], &reg_buf[count]);
+ return count;
+};
+
+#endif /* _IGB_REGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/e1000/igb_rxtx.c b/src/spdk/dpdk/drivers/net/e1000/igb_rxtx.c
new file mode 100644
index 00000000..b955068a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/igb_rxtx.c
@@ -0,0 +1,2952 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_prefetch.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_net.h>
+#include <rte_string_fns.h>
+
+#include "e1000_logs.h"
+#include "base/e1000_api.h"
+#include "e1000_ethdev.h"
+
+#ifdef RTE_LIBRTE_IEEE1588
+#define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define IGB_TX_IEEE1588_TMST 0
+#endif
+/* Bit Mask to indicate what bits required for building TX context */
+#define IGB_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG | \
+ IGB_TX_IEEE1588_TMST)
+
+#define IGB_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct igb_rx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct igb_tx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+ uint16_t next_id; /**< Index of next descriptor in ring. */
+ uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * rx queue flags
+ */
+enum igb_rxq_flags {
+ IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01,
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct igb_rx_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
+ volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
+ struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
+ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint16_t nb_rx_desc; /**< number of RX descriptors. */
+ uint16_t rx_tail; /**< current value of RDT register. */
+ uint16_t nb_rx_hold; /**< number of held free RX desc. */
+ uint16_t rx_free_thresh; /**< max free RX desc to hold. */
+ uint16_t queue_id; /**< RX queue index. */
+ uint16_t reg_idx; /**< RX queue register index. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold register. */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
+ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
+ uint32_t flags; /**< RX flags. */
+ uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
+};
+
+/**
+ * Hardware context number
+ */
+enum igb_advctx_num {
+ IGB_CTX_0 = 0, /**< CTX0 */
+ IGB_CTX_1 = 1, /**< CTX1 */
+ IGB_CTX_NUM = 2, /**< CTX_NUM */
+};
+
+/** Offload features */
+union igb_tx_offload {
+ uint64_t data;
+ struct {
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
+ uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size. */
+
+ /* uint64_t unused:8; */
+ };
+};
+
+/*
+ * Compare mask for igb_tx_offload.data,
+ * should be in sync with igb_tx_offload layout.
+ * */
+#define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
+#define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
+#define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
+#define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
+/** Mac + IP + TCP + Mss mask. */
+#define TX_TSO_CMP_MASK \
+ (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
+
+/**
+ * Strucutre to check if new context need be built
+ */
+struct igb_advctx_info {
+ uint64_t flags; /**< ol_flags related to context build. */
+ /** tx offload: vlan, tso, l2-l3-l4 lengths. */
+ union igb_tx_offload tx_offload;
+ /** compare mask for tx offload. */
+ union igb_tx_offload tx_offload_mask;
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct igb_tx_queue {
+ volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
+ volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
+ uint32_t txd_type; /**< Device-specific TXD type */
+ uint16_t nb_tx_desc; /**< number of TX descriptors. */
+ uint16_t tx_tail; /**< Current value of TDT register. */
+ uint16_t tx_head;
+ /**< Index of first used TX descriptor. */
+ uint16_t queue_id; /**< TX queue index. */
+ uint16_t reg_idx; /**< TX queue register index. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold register. */
+ uint32_t ctx_curr;
+ /**< Current used hardware descriptor. */
+ uint32_t ctx_start;
+ /**< Start context position for transmit queue. */
+ struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
+ /**< Hardware context history.*/
+ uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+};
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+#define rte_igb_prefetch(p) rte_prefetch0(p)
+#else
+#define rte_igb_prefetch(p) do {} while(0)
+#endif
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while(0)
+#endif
+
+/*
+ * Macro for VMDq feature for 1 GbE NIC.
+ */
+#define E1000_VMOLR_SIZE (8)
+#define IGB_TSO_MAX_HDRLEN (512)
+#define IGB_TSO_MAX_MSS (9216)
+
+/*********************************************************************
+ *
+ * TX function
+ *
+ **********************************************************************/
+
+/*
+ *There're some limitations in hardware for TCP segmentation offload. We
+ *should check whether the parameters are valid.
+ */
+static inline uint64_t
+check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
+{
+ if (!(ol_req & PKT_TX_TCP_SEG))
+ return ol_req;
+ if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
+ ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
+ ol_req &= ~PKT_TX_TCP_SEG;
+ ol_req |= PKT_TX_TCP_CKSUM;
+ }
+ return ol_req;
+}
+
+/*
+ * Advanced context descriptor are almost same between igb/ixgbe
+ * This is a separate function, looking for optimization opportunity here
+ * Rework required to go with the pre-defined values.
+ */
+
+static inline void
+igbe_set_xmit_ctx(struct igb_tx_queue* txq,
+ volatile struct e1000_adv_tx_context_desc *ctx_txd,
+ uint64_t ol_flags, union igb_tx_offload tx_offload)
+{
+ uint32_t type_tucmd_mlhl;
+ uint32_t mss_l4len_idx;
+ uint32_t ctx_idx, ctx_curr;
+ uint32_t vlan_macip_lens;
+ union igb_tx_offload tx_offload_mask;
+
+ ctx_curr = txq->ctx_curr;
+ ctx_idx = ctx_curr + txq->ctx_start;
+
+ tx_offload_mask.data = 0;
+ type_tucmd_mlhl = 0;
+
+ /* Specify which HW CTX to upload. */
+ mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
+
+ if (ol_flags & PKT_TX_VLAN_PKT)
+ tx_offload_mask.data |= TX_VLAN_CMP_MASK;
+
+ /* check if TCP segmentation required for this packet */
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* implies IP cksum in IPv4 */
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
+ E1000_ADVTXD_TUCMD_L4T_TCP |
+ E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+ else
+ type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
+ E1000_ADVTXD_TUCMD_L4T_TCP |
+ E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+
+ tx_offload_mask.data |= TX_TSO_CMP_MASK;
+ mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
+ } else { /* no TSO, check if hardware checksum is needed */
+ if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
+ tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
+
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
+
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
+ E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
+ E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
+ E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
+ E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+ break;
+ }
+ }
+
+ txq->ctx_cache[ctx_curr].flags = ol_flags;
+ txq->ctx_cache[ctx_curr].tx_offload.data =
+ tx_offload_mask.data & tx_offload.data;
+ txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
+
+ ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+ vlan_macip_lens = (uint32_t)tx_offload.data;
+ ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
+ ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
+ ctx_txd->seqnum_seed = 0;
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
+ union igb_tx_offload tx_offload)
+{
+ /* If match with the current context */
+ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
+ return txq->ctx_curr;
+ }
+
+ /* If match with the second context */
+ txq->ctx_curr ^= 1;
+ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
+ return txq->ctx_curr;
+ }
+
+ /* Mismatch, use the previous context */
+ return IGB_CTX_NUM;
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
+{
+ static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
+ static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
+ uint32_t tmp;
+
+ tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
+ tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
+ tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
+ return tmp;
+}
+
+static inline uint32_t
+tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
+{
+ uint32_t cmdtype;
+ static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
+ static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
+ cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+ cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
+ return cmdtype;
+}
+
+uint16_t
+eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_tx_queue *txq;
+ struct igb_tx_entry *sw_ring;
+ struct igb_tx_entry *txe, *txn;
+ volatile union e1000_adv_tx_desc *txr;
+ volatile union e1000_adv_tx_desc *txd;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint64_t buf_dma_addr;
+ uint32_t olinfo_status;
+ uint32_t cmd_type_len;
+ uint32_t pkt_len;
+ uint16_t slen;
+ uint64_t ol_flags;
+ uint16_t tx_end;
+ uint16_t tx_id;
+ uint16_t tx_last;
+ uint16_t nb_tx;
+ uint64_t tx_ol_req;
+ uint32_t new_ctx = 0;
+ uint32_t ctx = 0;
+ union igb_tx_offload tx_offload = {0};
+
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ tx_pkt = *tx_pkts++;
+ pkt_len = tx_pkt->pkt_len;
+
+ RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+ /*
+ * The number of descriptors that must be allocated for a
+ * packet is the number of segments of that packet, plus 1
+ * Context Descriptor for the VLAN Tag Identifier, if any.
+ * Determine the last TX descriptor to allocate in the TX ring
+ * for the packet, starting from the current position (tx_id)
+ * in the ring.
+ */
+ tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
+
+ ol_flags = tx_pkt->ol_flags;
+ tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
+
+ /* If a Context Descriptor need be built . */
+ if (tx_ol_req) {
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.vlan_tci = tx_pkt->vlan_tci;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+ tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
+
+ ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
+ /* Only allocate context descriptor if required*/
+ new_ctx = (ctx == IGB_CTX_NUM);
+ ctx = txq->ctx_curr + txq->ctx_start;
+ tx_last = (uint16_t) (tx_last + new_ctx);
+ }
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+ " tx_first=%u tx_last=%u",
+ (unsigned) txq->port_id,
+ (unsigned) txq->queue_id,
+ (unsigned) pkt_len,
+ (unsigned) tx_id,
+ (unsigned) tx_last);
+
+ /*
+ * Check if there are enough free descriptors in the TX ring
+ * to transmit the next packet.
+ * This operation is based on the two following rules:
+ *
+ * 1- Only check that the last needed TX descriptor can be
+ * allocated (by construction, if that descriptor is free,
+ * all intermediate ones are also free).
+ *
+ * For this purpose, the index of the last TX descriptor
+ * used for a packet (the "last descriptor" of a packet)
+ * is recorded in the TX entries (the last one included)
+ * that are associated with all TX descriptors allocated
+ * for that packet.
+ *
+ * 2- Avoid to allocate the last free TX descriptor of the
+ * ring, in order to never set the TDT register with the
+ * same value stored in parallel by the NIC in the TDH
+ * register, which makes the TX engine of the NIC enter
+ * in a deadlock situation.
+ *
+ * By extension, avoid to allocate a free descriptor that
+ * belongs to the last set of free descriptors allocated
+ * to the same packet previously transmitted.
+ */
+
+ /*
+ * The "last descriptor" of the previously sent packet, if any,
+ * which used the last descriptor to allocate.
+ */
+ tx_end = sw_ring[tx_last].last_id;
+
+ /*
+ * The next descriptor following that "last descriptor" in the
+ * ring.
+ */
+ tx_end = sw_ring[tx_end].next_id;
+
+ /*
+ * The "last descriptor" associated with that next descriptor.
+ */
+ tx_end = sw_ring[tx_end].last_id;
+
+ /*
+ * Check that this descriptor is free.
+ */
+ if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+
+ /*
+ * Set common flags of all TX Data Descriptors.
+ *
+ * The following bits must be set in all Data Descriptors:
+ * - E1000_ADVTXD_DTYP_DATA
+ * - E1000_ADVTXD_DCMD_DEXT
+ *
+ * The following bits must be set in the first Data Descriptor
+ * and are ignored in the other ones:
+ * - E1000_ADVTXD_DCMD_IFCS
+ * - E1000_ADVTXD_MAC_1588
+ * - E1000_ADVTXD_DCMD_VLE
+ *
+ * The following bits must only be set in the last Data
+ * Descriptor:
+ * - E1000_TXD_CMD_EOP
+ *
+ * The following bits can be set in any Data Descriptor, but
+ * are only set in the last Data Descriptor:
+ * - E1000_TXD_CMD_RS
+ */
+ cmd_type_len = txq->txd_type |
+ E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
+ if (tx_ol_req & PKT_TX_TCP_SEG)
+ pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
+ olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
+#if defined(RTE_LIBRTE_IEEE1588)
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
+#endif
+ if (tx_ol_req) {
+ /* Setup TX Advanced context descriptor if required */
+ if (new_ctx) {
+ volatile struct e1000_adv_tx_context_desc *
+ ctx_txd;
+
+ ctx_txd = (volatile struct
+ e1000_adv_tx_context_desc *)
+ &txr[tx_id];
+
+ txn = &sw_ring[txe->next_id];
+ RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ /* Setup the TX Advanced Data Descriptor */
+ cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
+ olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
+ olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
+ }
+
+ m_seg = tx_pkt;
+ do {
+ txn = &sw_ring[txe->next_id];
+ txd = &txr[tx_id];
+
+ if (txe->mbuf != NULL)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /*
+ * Set up transmit descriptor.
+ */
+ slen = (uint16_t) m_seg->data_len;
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ txd->read.buffer_addr =
+ rte_cpu_to_le_64(buf_dma_addr);
+ txd->read.cmd_type_len =
+ rte_cpu_to_le_32(cmd_type_len | slen);
+ txd->read.olinfo_status =
+ rte_cpu_to_le_32(olinfo_status);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /*
+ * The last packet data descriptor needs End Of Packet (EOP)
+ * and Report Status (RS).
+ */
+ txd->read.cmd_type_len |=
+ rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
+ }
+ end_of_tx:
+ rte_wmb();
+
+ /*
+ * Set the Transmit Descriptor Tail (TDT).
+ */
+ E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (unsigned) txq->port_id, (unsigned) txq->queue_id,
+ (unsigned) tx_id, (unsigned) nb_tx);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ /* Check some limitations for TSO in hardware */
+ if (m->ol_flags & PKT_TX_TCP_SEG)
+ if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
+ (m->l2_len + m->l3_len + m->l4_len >
+ IGB_TSO_MAX_HDRLEN)) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
+ * RX functions
+ *
+ **********************************************************************/
+#define IGB_PACKET_TYPE_IPV4 0X01
+#define IGB_PACKET_TYPE_IPV4_TCP 0X11
+#define IGB_PACKET_TYPE_IPV4_UDP 0X21
+#define IGB_PACKET_TYPE_IPV4_SCTP 0X41
+#define IGB_PACKET_TYPE_IPV4_EXT 0X03
+#define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
+#define IGB_PACKET_TYPE_IPV6 0X04
+#define IGB_PACKET_TYPE_IPV6_TCP 0X14
+#define IGB_PACKET_TYPE_IPV6_UDP 0X24
+#define IGB_PACKET_TYPE_IPV6_EXT 0X0C
+#define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
+#define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
+#define IGB_PACKET_TYPE_IPV4_IPV6 0X05
+#define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
+#define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
+#define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
+#define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
+#define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
+#define IGB_PACKET_TYPE_MAX 0X80
+#define IGB_PACKET_TYPE_MASK 0X7F
+#define IGB_PACKET_TYPE_SHIFT 0X04
+static inline uint32_t
+igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
+{
+ static const uint32_t
+ ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
+ [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4,
+ [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6,
+ [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
+ [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
+ };
+ if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
+ return RTE_PTYPE_UNKNOWN;
+
+ pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
+
+ return ptype_table[pkt_info];
+}
+
+static inline uint64_t
+rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
+{
+ uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
+
+#if defined(RTE_LIBRTE_IEEE1588)
+ static uint32_t ip_pkt_etqf_map[8] = {
+ 0, 0, 0, PKT_RX_IEEE1588_PTP,
+ 0, 0, 0, 0,
+ };
+
+ struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
+
+ /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
+ if (hw->mac.type == e1000_i210)
+ pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
+ else
+ pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
+#else
+ RTE_SET_USED(rxq);
+#endif
+
+ return pkt_flags;
+}
+
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status)
+{
+ uint64_t pkt_flags;
+
+ /* Check if VLAN present */
+ pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
+
+#if defined(RTE_LIBRTE_IEEE1588)
+ if (rx_status & E1000_RXD_STAT_TMST)
+ pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+#endif
+ return pkt_flags;
+}
+
+static inline uint64_t
+rx_desc_error_to_pkt_flags(uint32_t rx_status)
+{
+ /*
+ * Bit 30: IPE, IPv4 checksum error
+ * Bit 29: L4I, L4I integrity error
+ */
+
+ static uint64_t error_to_pkt_flags_map[4] = {
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+ };
+ return error_to_pkt_flags_map[(rx_status >>
+ E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
+}
+
+uint16_t
+eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_rx_queue *rxq;
+ volatile union e1000_adv_rx_desc *rx_ring;
+ volatile union e1000_adv_rx_desc *rxdp;
+ struct igb_rx_entry *sw_ring;
+ struct igb_rx_entry *rxe;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ union e1000_adv_rx_desc rxd;
+ uint64_t dma_addr;
+ uint32_t staterr;
+ uint32_t hlen_type_rss;
+ uint16_t pkt_len;
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint64_t pkt_flags;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+ while (nb_rx < nb_pkts) {
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rxdp->wb.upper.status_error;
+ if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * End of packet.
+ *
+ * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
+ * likely to be invalid and to be dropped by the various
+ * validation checks performed by the network stack.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy do not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x pkt_len=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) staterr,
+ (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_igb_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_igb_prefetch(&rx_ring[rx_id]);
+ rte_igb_prefetch(&sw_ring[rx_id]);
+ }
+
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+
+ /*
+ * Initialize the returned mbuf.
+ * 1) setup generic mbuf fields:
+ * - number of segments,
+ * - next segment,
+ * - packet length,
+ * - RX port identifier.
+ * 2) integrate hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
+ rxq->crc_len);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->port = rxq->port_id;
+
+ rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+ hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+
+ /*
+ * The vlan_tci field is only valid when PKT_RX_VLAN is
+ * set in the pkt_flags field and must be in CPU byte order.
+ */
+ if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
+ (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
+ rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan);
+ } else {
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ }
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
+ pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
+ rxm->ol_flags = pkt_flags;
+ rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
+ lo_dword.hs_rss.pkt_info);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) nb_hold,
+ (unsigned) nb_rx);
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+uint16_t
+eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_rx_queue *rxq;
+ volatile union e1000_adv_rx_desc *rx_ring;
+ volatile union e1000_adv_rx_desc *rxdp;
+ struct igb_rx_entry *sw_ring;
+ struct igb_rx_entry *rxe;
+ struct rte_mbuf *first_seg;
+ struct rte_mbuf *last_seg;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ union e1000_adv_rx_desc rxd;
+ uint64_t dma; /* Physical address of mbuf data buffer */
+ uint32_t staterr;
+ uint32_t hlen_type_rss;
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint16_t data_len;
+ uint64_t pkt_flags;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+
+ /*
+ * Retrieve RX context of current packet, if any.
+ */
+ first_seg = rxq->pkt_first_seg;
+ last_seg = rxq->pkt_last_seg;
+
+ while (nb_rx < nb_pkts) {
+ next_desc:
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rxdp->wb.upper.status_error;
+ if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * Descriptor done.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy does not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x data_len=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) staterr,
+ (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_igb_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_igb_prefetch(&rx_ring[rx_id]);
+ rte_igb_prefetch(&sw_ring[rx_id]);
+ }
+
+ /*
+ * Update RX descriptor with the physical address of the new
+ * data buffer of the new allocated mbuf.
+ */
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ rxdp->read.pkt_addr = dma;
+ rxdp->read.hdr_addr = 0;
+
+ /*
+ * Set data length & data buffer address of mbuf.
+ */
+ data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
+ rxm->data_len = data_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (first_seg == NULL) {
+ first_seg = rxm;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
+ } else {
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /*
+ * If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (! (staterr & E1000_RXD_STAT_EOP)) {
+ last_seg = rxm;
+ goto next_desc;
+ }
+
+ /*
+ * This is the last buffer of the received packet.
+ * If the CRC is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer.
+ * If part of the CRC is also contained in the previous
+ * mbuf, subtract the length of that CRC part from the
+ * data length of the previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= ETHER_CRC_LEN;
+ if (data_len <= ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len = (uint16_t)
+ (last_seg->data_len -
+ (ETHER_CRC_LEN - data_len));
+ last_seg->next = NULL;
+ } else
+ rxm->data_len =
+ (uint16_t) (data_len - ETHER_CRC_LEN);
+ }
+
+ /*
+ * Initialize the first mbuf of the returned packet:
+ * - RX port identifier,
+ * - hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ first_seg->port = rxq->port_id;
+ first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
+
+ /*
+ * The vlan_tci field is only valid when PKT_RX_VLAN is
+ * set in the pkt_flags field and must be in CPU byte order.
+ */
+ if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
+ (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
+ first_seg->vlan_tci =
+ rte_be_to_cpu_16(rxd.wb.upper.vlan);
+ } else {
+ first_seg->vlan_tci =
+ rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ }
+ hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
+ pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
+ first_seg->ol_flags = pkt_flags;
+ first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
+ lower.lo_dword.hs_rss.pkt_info);
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = first_seg;
+
+ /*
+ * Setup receipt context for a new packet.
+ */
+ first_seg = NULL;
+ }
+
+ /*
+ * Record index of the next RX descriptor to probe.
+ */
+ rxq->rx_tail = rx_id;
+
+ /*
+ * Save receive context.
+ */
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) nb_hold,
+ (unsigned) nb_rx);
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
+ * desscriptors should meet the following condition:
+ * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+
+static void
+igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+{
+ unsigned i;
+
+ if (txq->sw_ring != NULL) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static void
+igb_tx_queue_release(struct igb_tx_queue *txq)
+{
+ if (txq != NULL) {
+ igb_tx_queue_release_mbufs(txq);
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ }
+}
+
+void
+eth_igb_tx_queue_release(void *txq)
+{
+ igb_tx_queue_release(txq);
+}
+
+static int
+igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
+{
+ struct igb_tx_entry *sw_ring;
+ volatile union e1000_adv_tx_desc *txr;
+ uint16_t tx_first; /* First segment analyzed. */
+ uint16_t tx_id; /* Current segment being processed. */
+ uint16_t tx_last; /* Last segment in the current packet. */
+ uint16_t tx_next; /* First segment of the next packet. */
+ int count;
+
+ if (txq != NULL) {
+ count = 0;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+
+ /*
+ * tx_tail is the last sent packet on the sw_ring. Goto the end
+ * of that packet (the last segment in the packet chain) and
+ * then the next segment will be the start of the oldest segment
+ * in the sw_ring. This is the first packet that will be
+ * attempted to be freed.
+ */
+
+ /* Get last segment in most recently added packet. */
+ tx_first = sw_ring[txq->tx_tail].last_id;
+
+ /* Get the next segment, which is the oldest segment in ring. */
+ tx_first = sw_ring[tx_first].next_id;
+
+ /* Set the current index to the first. */
+ tx_id = tx_first;
+
+ /*
+ * Loop through each packet. For each packet, verify that an
+ * mbuf exists and that the last segment is free. If so, free
+ * it and move on.
+ */
+ while (1) {
+ tx_last = sw_ring[tx_id].last_id;
+
+ if (sw_ring[tx_last].mbuf) {
+ if (txr[tx_last].wb.status &
+ E1000_TXD_STAT_DD) {
+ /*
+ * Increment the number of packets
+ * freed.
+ */
+ count++;
+
+ /* Get the start of the next packet. */
+ tx_next = sw_ring[tx_last].next_id;
+
+ /*
+ * Loop through all segments in a
+ * packet.
+ */
+ do {
+ rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+ sw_ring[tx_id].mbuf = NULL;
+ sw_ring[tx_id].last_id = tx_id;
+
+ /* Move to next segemnt. */
+ tx_id = sw_ring[tx_id].next_id;
+
+ } while (tx_id != tx_next);
+
+ if (unlikely(count == (int)free_cnt))
+ break;
+ } else
+ /*
+ * mbuf still in use, nothing left to
+ * free.
+ */
+ break;
+ } else {
+ /*
+ * There are multiple reasons to be here:
+ * 1) All the packets on the ring have been
+ * freed - tx_id is equal to tx_first
+ * and some packets have been freed.
+ * - Done, exit
+ * 2) Interfaces has not sent a rings worth of
+ * packets yet, so the segment after tail is
+ * still empty. Or a previous call to this
+ * function freed some of the segments but
+ * not all so there is a hole in the list.
+ * Hopefully this is a rare case.
+ * - Walk the list and find the next mbuf. If
+ * there isn't one, then done.
+ */
+ if (likely((tx_id == tx_first) && (count != 0)))
+ break;
+
+ /*
+ * Walk the list and find the next mbuf, if any.
+ */
+ do {
+ /* Move to next segemnt. */
+ tx_id = sw_ring[tx_id].next_id;
+
+ if (sw_ring[tx_id].mbuf)
+ break;
+
+ } while (tx_id != tx_first);
+
+ /*
+ * Determine why previous loop bailed. If there
+ * is not an mbuf, done.
+ */
+ if (sw_ring[tx_id].mbuf == NULL)
+ break;
+ }
+ }
+ } else
+ count = -ENODEV;
+
+ return count;
+}
+
+int
+eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ return igb_tx_done_cleanup(txq, free_cnt);
+}
+
+static void
+igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
+{
+ txq->tx_head = 0;
+ txq->tx_tail = 0;
+ txq->ctx_curr = 0;
+ memset((void*)&txq->ctx_cache, 0,
+ IGB_CTX_NUM * sizeof(struct igb_advctx_info));
+}
+
+static void
+igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
+{
+ static const union e1000_adv_tx_desc zeroed_desc = {{0}};
+ struct igb_tx_entry *txe = txq->sw_ring;
+ uint16_t i, prev;
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->tx_ring[i] = zeroed_desc;
+ }
+
+ /* Initialize ring entries */
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
+
+ txd->wb.status = E1000_TXD_STAT_DD;
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->txd_type = E1000_ADVTXD_DTYP_DATA;
+ /* 82575 specific, each tx queue will use 2 hw contexts */
+ if (hw->mac.type == e1000_82575)
+ txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
+
+ igb_reset_tx_queue_stat(txq);
+}
+
+uint64_t
+igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_offload_capa;
+
+ RTE_SET_USED(dev);
+ rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ return rx_offload_capa;
+}
+
+uint64_t
+igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_queue_offload_capa;
+
+ rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
+
+ return rx_queue_offload_capa;
+}
+
+int
+eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const struct rte_memzone *tz;
+ struct igb_tx_queue *txq;
+ struct e1000_hw *hw;
+ uint32_t size;
+ uint64_t offloads;
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Validate number of transmit descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of E1000_ALIGN.
+ */
+ if (nb_desc % IGB_TXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
+ return -EINVAL;
+ }
+
+ /*
+ * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
+ * driver.
+ */
+ if (tx_conf->tx_free_thresh != 0)
+ PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
+ "used for the 1G driver.");
+ if (tx_conf->tx_rs_thresh != 0)
+ PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
+ "used for the 1G driver.");
+ if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
+ PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
+ "consider setting the TX WTHRESH value to 4, 8, "
+ "or 16.");
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->tx_queues[queue_idx] != NULL) {
+ igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the tx queue data structure */
+ txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
+ RTE_CACHE_LINE_SIZE);
+ if (txq == NULL)
+ return -ENOMEM;
+
+ /*
+ * Allocate TX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
+ E1000_ALIGN, socket_id);
+ if (tz == NULL) {
+ igb_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->pthresh = tx_conf->tx_thresh.pthresh;
+ txq->hthresh = tx_conf->tx_thresh.hthresh;
+ txq->wthresh = tx_conf->tx_thresh.wthresh;
+ if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
+ txq->wthresh = 1;
+ txq->queue_id = queue_idx;
+ txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ txq->port_id = dev->data->port_id;
+
+ txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
+ txq->tx_ring_phys_addr = tz->iova;
+
+ txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
+ /* Allocate software ring */
+ txq->sw_ring = rte_zmalloc("txq->sw_ring",
+ sizeof(struct igb_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (txq->sw_ring == NULL) {
+ igb_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+ igb_reset_tx_queue(txq, dev);
+ dev->tx_pkt_burst = eth_igb_xmit_pkts;
+ dev->tx_pkt_prepare = &eth_igb_prep_pkts;
+ dev->data->tx_queues[queue_idx] = txq;
+ txq->offloads = offloads;
+
+ return 0;
+}
+
+static void
+igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+{
+ unsigned i;
+
+ if (rxq->sw_ring != NULL) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static void
+igb_rx_queue_release(struct igb_rx_queue *rxq)
+{
+ if (rxq != NULL) {
+ igb_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ }
+}
+
+void
+eth_igb_rx_queue_release(void *rxq)
+{
+ igb_rx_queue_release(rxq);
+}
+
+static void
+igb_reset_rx_queue(struct igb_rx_queue *rxq)
+{
+ static const union e1000_adv_rx_desc zeroed_desc = {{0}};
+ unsigned i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ rxq->rx_ring[i] = zeroed_desc;
+ }
+
+ rxq->rx_tail = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+}
+
+uint64_t
+igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_offload_capa;
+
+ RTE_SET_USED(dev);
+ rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER;
+
+ return rx_offload_capa;
+}
+
+uint64_t
+igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rx_queue_offload_capa;
+
+ switch (hw->mac.type) {
+ case e1000_vfadapt_i350:
+ /*
+ * As only one Rx queue can be used, let per queue offloading
+ * capability be same to per port queue offloading capability
+ * for better convenience.
+ */
+ rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
+ break;
+ default:
+ rx_queue_offload_capa = 0;
+ }
+ return rx_queue_offload_capa;
+}
+
+int
+eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *rz;
+ struct igb_rx_queue *rxq;
+ struct e1000_hw *hw;
+ unsigned int size;
+ uint64_t offloads;
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Validate number of receive descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of E1000_ALIGN.
+ */
+ if (nb_desc % IGB_RXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the RX queue data structure. */
+ rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
+ RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL)
+ return -ENOMEM;
+ rxq->offloads = offloads;
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->pthresh = rx_conf->rx_thresh.pthresh;
+ rxq->hthresh = rx_conf->rx_thresh.hthresh;
+ rxq->wthresh = rx_conf->rx_thresh.wthresh;
+ if (rxq->wthresh > 0 &&
+ (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
+ rxq->wthresh = 1;
+ rxq->drop_en = rx_conf->rx_drop_en;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ rxq->port_id = dev->data->port_id;
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
+ /*
+ * Allocate RX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
+ E1000_ALIGN, socket_id);
+ if (rz == NULL) {
+ igb_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
+ rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
+ rxq->rx_ring_phys_addr = rz->iova;
+ rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
+
+ /* Allocate software ring. */
+ rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
+ sizeof(struct igb_rx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (rxq->sw_ring == NULL) {
+ igb_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ igb_reset_rx_queue(rxq);
+
+ return 0;
+}
+
+uint32_t
+eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define IGB_RXQ_SCAN_INTERVAL 4
+ volatile union e1000_adv_rx_desc *rxdp;
+ struct igb_rx_queue *rxq;
+ uint32_t desc = 0;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+
+ while ((desc < rxq->nb_rx_desc) &&
+ (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
+ desc += IGB_RXQ_SCAN_INTERVAL;
+ rxdp += IGB_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile union e1000_adv_rx_desc *rxdp;
+ struct igb_rx_queue *rxq = rx_queue;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return 0;
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ rxdp = &rxq->rx_ring[desc];
+ return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
+}
+
+int
+eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct igb_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.upper.status_error;
+ if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct igb_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+
+ status = &txq->tx_ring[desc].wb.status;
+ if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+void
+igb_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct igb_tx_queue *txq;
+ struct igb_rx_queue *rxq;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq != NULL) {
+ igb_tx_queue_release_mbufs(txq);
+ igb_reset_tx_queue(txq, dev);
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq != NULL) {
+ igb_rx_queue_release_mbufs(rxq);
+ igb_reset_rx_queue(rxq);
+ }
+ }
+}
+
+void
+igb_dev_free_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_igb_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_igb_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+/**
+ * Receive Side Scaling (RSS).
+ * See section 7.1.1.7 in the following document:
+ * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
+ *
+ * Principles:
+ * The source and destination IP addresses of the IP header and the source and
+ * destination ports of TCP/UDP headers, if any, of received packets are hashed
+ * against a configurable random key to compute a 32-bit RSS hash result.
+ * The seven (7) LSBs of the 32-bit hash result are used as an index into a
+ * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
+ * RSS output index which is used as the RX queue index where to store the
+ * received packets.
+ * The following output is supplied in the RX write-back descriptor:
+ * - 32-bit result of the Microsoft RSS hash function,
+ * - 4-bit RSS type field.
+ */
+
+/*
+ * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
+ * Used as the default key.
+ */
+static uint8_t rss_intel_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static void
+igb_rss_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ uint32_t mrqc;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mrqc = E1000_READ_REG(hw, E1000_MRQC);
+ mrqc &= ~E1000_MRQC_ENABLE_MASK;
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+}
+
+static void
+igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
+{
+ uint8_t *hash_key;
+ uint32_t rss_key;
+ uint32_t mrqc;
+ uint64_t rss_hf;
+ uint16_t i;
+
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL) {
+ /* Fill in RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = hash_key[(i * 4)];
+ rss_key |= hash_key[(i * 4) + 1] << 8;
+ rss_key |= hash_key[(i * 4) + 2] << 16;
+ rss_key |= hash_key[(i * 4) + 3] << 24;
+ E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
+ }
+ }
+
+ /* Set configured hashing protocols in MRQC register */
+ rss_hf = rss_conf->rss_hf;
+ mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
+ if (rss_hf & ETH_RSS_IPV4)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
+ if (rss_hf & ETH_RSS_IPV6)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
+ if (rss_hf & ETH_RSS_IPV6_EX)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
+ if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+ if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+}
+
+int
+eth_igb_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct e1000_hw *hw;
+ uint32_t mrqc;
+ uint64_t rss_hf;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Before changing anything, first check that the update RSS operation
+ * does not attempt to disable RSS, if RSS was enabled at
+ * initialization time, or does not attempt to enable RSS, if RSS was
+ * disabled at initialization time.
+ */
+ rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
+ mrqc = E1000_READ_REG(hw, E1000_MRQC);
+ if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
+ if (rss_hf != 0) /* Enable RSS */
+ return -(EINVAL);
+ return 0; /* Nothing to do */
+ }
+ /* RSS enabled */
+ if (rss_hf == 0) /* Disable RSS */
+ return -(EINVAL);
+ igb_hw_rss_hash_set(hw, rss_conf);
+ return 0;
+}
+
+int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct e1000_hw *hw;
+ uint8_t *hash_key;
+ uint32_t rss_key;
+ uint32_t mrqc;
+ uint64_t rss_hf;
+ uint16_t i;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL) {
+ /* Return RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
+ hash_key[(i * 4)] = rss_key & 0x000000FF;
+ hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
+ hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
+ hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
+ }
+ }
+
+ /* Get RSS functions configured in MRQC register */
+ mrqc = E1000_READ_REG(hw, E1000_MRQC);
+ if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
+ rss_conf->rss_hf = 0;
+ return 0;
+ }
+ rss_hf = 0;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
+ rss_hf |= ETH_RSS_IPV4;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
+ rss_hf |= ETH_RSS_IPV6;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
+ rss_hf |= ETH_RSS_IPV6_EX;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
+ rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
+ rss_hf |= ETH_RSS_IPV6_UDP_EX;
+ rss_conf->rss_hf = rss_hf;
+ return 0;
+}
+
+static void
+igb_rss_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rss_conf rss_conf;
+ struct e1000_hw *hw;
+ uint32_t shift;
+ uint16_t i;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Fill in redirection table. */
+ shift = (hw->mac.type == e1000_82575) ? 6 : 0;
+ for (i = 0; i < 128; i++) {
+ union e1000_reta {
+ uint32_t dword;
+ uint8_t bytes[4];
+ } reta;
+ uint8_t q_idx;
+
+ q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
+ i % dev->data->nb_rx_queues : 0);
+ reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
+ if ((i & 3) == 3)
+ E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
+ }
+
+ /*
+ * Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
+ igb_rss_disable(dev);
+ return;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ igb_hw_rss_hash_set(hw, &rss_conf);
+}
+
+/*
+ * Check if the mac type support VMDq or not.
+ * Return 1 if it supports, otherwise, return 0.
+ */
+static int
+igb_is_vmdq_supported(const struct rte_eth_dev *dev)
+{
+ const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ return 1;
+ case e1000_82540:
+ case e1000_82541:
+ case e1000_82542:
+ case e1000_82543:
+ case e1000_82544:
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82547:
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ case e1000_i210:
+ case e1000_i211:
+ default:
+ PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
+ return 0;
+ }
+}
+
+static int
+igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_rx_conf *cfg;
+ struct e1000_hw *hw;
+ uint32_t mrqc, vt_ctl, vmolr, rctl;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ /* Check if mac type can support VMDq, return value of 0 means NOT support */
+ if (igb_is_vmdq_supported(dev) == 0)
+ return -1;
+
+ igb_rss_disable(dev);
+
+ /* RCTL: eanble VLAN filter */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ /* MRQC: enable vmdq */
+ mrqc = E1000_READ_REG(hw, E1000_MRQC);
+ mrqc |= E1000_MRQC_ENABLE_VMDQ;
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+
+ /* VTCTL: pool selection according to VLAN tag */
+ vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
+ if (cfg->enable_default_pool)
+ vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
+ vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
+ E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
+
+ for (i = 0; i < E1000_VMOLR_SIZE; i++) {
+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
+ vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
+ E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
+ E1000_VMOLR_MPME);
+
+ if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
+ vmolr |= E1000_VMOLR_AUPE;
+ if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
+ vmolr |= E1000_VMOLR_ROMPE;
+ if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
+ vmolr |= E1000_VMOLR_ROPE;
+ if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
+ vmolr |= E1000_VMOLR_BAM;
+ if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
+ vmolr |= E1000_VMOLR_MPME;
+
+ E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
+ }
+
+ /*
+ * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
+ * Both 82576 and 82580 support it
+ */
+ if (hw->mac.type != e1000_i350) {
+ for (i = 0; i < E1000_VMOLR_SIZE; i++) {
+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
+ vmolr |= E1000_VMOLR_STRVLAN;
+ E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
+ }
+ }
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < IGB_VFTA_SIZE; i++)
+ E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
+
+ /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
+ if (hw->mac.type != e1000_82580)
+ E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
+
+ /*
+ * RAH/RAL - allow pools to read specific mac addresses
+ * In this case, all pools should be able to read from mac addr 0
+ */
+ E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
+ E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
+
+ /* VLVF: set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
+ (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
+ ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
+ E1000_VLVF_POOLSEL_MASK)));
+ }
+
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+
+/*********************************************************************
+ *
+ * Enable receive unit.
+ *
+ **********************************************************************/
+
+static int
+igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+{
+ struct igb_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ unsigned i;
+
+ /* Initialize software ring entries. */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile union e1000_adv_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
+ "queue_id=%hu", rxq->queue_id);
+ return -ENOMEM;
+ }
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ rxd = &rxq->rx_ring[i];
+ rxd->read.hdr_addr = 0;
+ rxd->read.pkt_addr = dma_addr;
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
+#define E1000_MRQC_DEF_Q_SHIFT (3)
+static int
+igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mrqc;
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
+ /*
+ * SRIOV active scheme
+ * FIXME if support RSS together with VMDq & SRIOV
+ */
+ mrqc = E1000_MRQC_ENABLE_VMDQ;
+ /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
+ mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+ } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ */
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ igb_rss_configure(dev);
+ break;
+ case ETH_MQ_RX_VMDQ_ONLY:
+ /*Configure general VMDQ only RX parameters*/
+ igb_vmdq_rx_hw_configure(dev);
+ break;
+ case ETH_MQ_RX_NONE:
+ /* if mq_mode is none, disable rss mode.*/
+ default:
+ igb_rss_disable(dev);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int
+eth_igb_rx_init(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rxmode *rxmode;
+ struct e1000_hw *hw;
+ struct igb_rx_queue *rxq;
+ uint32_t rctl;
+ uint32_t rxcsum;
+ uint32_t srrctl;
+ uint16_t buf_size;
+ uint16_t rctl_bsize;
+ uint16_t i;
+ int ret;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ srrctl = 0;
+
+ /*
+ * Make sure receives are disabled while setting
+ * up the descriptor ring.
+ */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+
+ rxmode = &dev->data->dev_conf.rxmode;
+
+ /*
+ * Configure support of jumbo frames, if any.
+ */
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ rctl |= E1000_RCTL_LPE;
+
+ /*
+ * Set maximum packet length by default, and might be updated
+ * together with enabling/disabling dual VLAN.
+ */
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ VLAN_TAG_SIZE);
+ } else
+ rctl &= ~E1000_RCTL_LPE;
+
+ /* Configure and enable each RX queue. */
+ rctl_bsize = 0;
+ dev->rx_pkt_burst = eth_igb_recv_pkts;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ uint64_t bus_addr;
+ uint32_t rxdctl;
+
+ rxq = dev->data->rx_queues[i];
+
+ rxq->flags = 0;
+ /*
+ * i350 and i354 vlan packets have vlan tags byte swapped.
+ */
+ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) {
+ rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
+ PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
+ } else {
+ PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
+ }
+
+ /* Allocate buffers for descriptor rings and set up queue */
+ ret = igb_alloc_rx_queue_mbufs(rxq);
+ if (ret)
+ return ret;
+
+ /*
+ * Reset crc_len in case it was changed after queue setup by a
+ * call to configure
+ */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
+ bus_addr = rxq->rx_ring_phys_addr;
+ E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
+ rxq->nb_rx_desc *
+ sizeof(union e1000_adv_rx_desc));
+ E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
+ (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
+
+ srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ /*
+ * Configure RX buffer size.
+ */
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
+ if (buf_size >= 1024) {
+ /*
+ * Configure the BSIZEPACKET field of the SRRCTL
+ * register of the queue.
+ * Value is in 1 KB resolution, from 1 KB to 127 KB.
+ * If this field is equal to 0b, then RCTL.BSIZE
+ * determines the RX packet buffer size.
+ */
+ srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
+ E1000_SRRCTL_BSIZEPKT_MASK);
+ buf_size = (uint16_t) ((srrctl &
+ E1000_SRRCTL_BSIZEPKT_MASK) <<
+ E1000_SRRCTL_BSIZEPKT_SHIFT);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * VLAN_TAG_SIZE) > buf_size){
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG,
+ "forcing scatter mode");
+ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+ } else {
+ /*
+ * Use BSIZE field of the device RCTL register.
+ */
+ if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
+ rctl_bsize = buf_size;
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+ E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
+
+ /* Enable this RX queue. */
+ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
+ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+ rxdctl &= 0xFFF00000;
+ rxdctl |= (rxq->pthresh & 0x1F);
+ rxdctl |= ((rxq->hthresh & 0x1F) << 8);
+ rxdctl |= ((rxq->wthresh & 0x1F) << 16);
+ E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
+ }
+
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+
+ /*
+ * Setup BSIZE field of RCTL register, if needed.
+ * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
+ * register, since the code above configures the SRRCTL register of
+ * the RX queue in such a case.
+ * All configurable sizes are:
+ * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
+ * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
+ * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
+ * 2048: rctl |= E1000_RCTL_SZ_2048;
+ * 1024: rctl |= E1000_RCTL_SZ_1024;
+ * 512: rctl |= E1000_RCTL_SZ_512;
+ * 256: rctl |= E1000_RCTL_SZ_256;
+ */
+ if (rctl_bsize > 0) {
+ if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
+ rctl |= E1000_RCTL_SZ_512;
+ else /* 256 <= buf_size < 512 - use 256 */
+ rctl |= E1000_RCTL_SZ_256;
+ }
+
+ /*
+ * Configure RSS if device configured with multiple RX queues.
+ */
+ igb_dev_mq_rx_configure(dev);
+
+ /* Update the rctl since igb_dev_mq_rx_configure may change its value */
+ rctl |= E1000_READ_REG(hw, E1000_RCTL);
+
+ /*
+ * Setup the Checksum Register.
+ * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
+ */
+ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+ rxcsum |= E1000_RXCSUM_PCSD;
+
+ /* Enable both L3/L4 rx checksum offload */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ rxcsum |= E1000_RXCSUM_IPOFL;
+ else
+ rxcsum &= ~E1000_RXCSUM_IPOFL;
+ if (rxmode->offloads &
+ (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+ rxcsum |= E1000_RXCSUM_TUOFL;
+ else
+ rxcsum &= ~E1000_RXCSUM_TUOFL;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ rxcsum |= E1000_RXCSUM_CRCOFL;
+ else
+ rxcsum &= ~E1000_RXCSUM_CRCOFL;
+
+ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+
+ /* Setup the Receive Control Register. */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) {
+ rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
+
+ /* clear STRCRC bit in all queues */
+ if (hw->mac.type == e1000_i350 ||
+ hw->mac.type == e1000_i210 ||
+ hw->mac.type == e1000_i211 ||
+ hw->mac.type == e1000_i354) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ uint32_t dvmolr = E1000_READ_REG(hw,
+ E1000_DVMOLR(rxq->reg_idx));
+ dvmolr &= ~E1000_DVMOLR_STRCRC;
+ E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
+ }
+ }
+ } else {
+ rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
+
+ /* set STRCRC bit in all queues */
+ if (hw->mac.type == e1000_i350 ||
+ hw->mac.type == e1000_i210 ||
+ hw->mac.type == e1000_i211 ||
+ hw->mac.type == e1000_i354) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ uint32_t dvmolr = E1000_READ_REG(hw,
+ E1000_DVMOLR(rxq->reg_idx));
+ dvmolr |= E1000_DVMOLR_STRCRC;
+ E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
+ }
+ }
+ }
+
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
+ E1000_RCTL_RDMTS_HALF |
+ (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ /* Make sure VLAN Filters are off. */
+ if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
+ rctl &= ~E1000_RCTL_VFE;
+ /* Don't store bad packets. */
+ rctl &= ~E1000_RCTL_SBP;
+
+ /* Enable Receives. */
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers.
+ * This needs to be done after enable.
+ */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
+ E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ }
+
+ return 0;
+}
+
+/*********************************************************************
+ *
+ * Enable transmit unit.
+ *
+ **********************************************************************/
+void
+eth_igb_tx_init(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ struct igb_tx_queue *txq;
+ uint32_t tctl;
+ uint32_t txdctl;
+ uint16_t i;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings. */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ uint64_t bus_addr;
+ txq = dev->data->tx_queues[i];
+ bus_addr = txq->tx_ring_phys_addr;
+
+ E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
+ txq->nb_tx_desc *
+ sizeof(union e1000_adv_tx_desc));
+ E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
+ (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers. */
+ E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
+ E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
+
+ /* Setup Transmit threshold registers. */
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
+ txdctl |= txq->pthresh & 0x1F;
+ txdctl |= ((txq->hthresh & 0x1F) << 8);
+ txdctl |= ((txq->wthresh & 0x1F) << 16);
+ txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
+ }
+
+ /* Program the Transmit Control Register. */
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
+
+ e1000_config_collision_dist(hw);
+
+ /* This write will effectively turn on the transmit unit. */
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+}
+
+/*********************************************************************
+ *
+ * Enable VF receive unit.
+ *
+ **********************************************************************/
+int
+eth_igbvf_rx_init(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ struct igb_rx_queue *rxq;
+ uint32_t srrctl;
+ uint16_t buf_size;
+ uint16_t rctl_bsize;
+ uint16_t i;
+ int ret;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* setup MTU */
+ e1000_rlpml_set_vf(hw,
+ (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ VLAN_TAG_SIZE));
+
+ /* Configure and enable each RX queue. */
+ rctl_bsize = 0;
+ dev->rx_pkt_burst = eth_igb_recv_pkts;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ uint64_t bus_addr;
+ uint32_t rxdctl;
+
+ rxq = dev->data->rx_queues[i];
+
+ rxq->flags = 0;
+ /*
+ * i350VF LB vlan packets have vlan tags byte swapped.
+ */
+ if (hw->mac.type == e1000_vfadapt_i350) {
+ rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
+ PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
+ } else {
+ PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
+ }
+
+ /* Allocate buffers for descriptor rings and set up queue */
+ ret = igb_alloc_rx_queue_mbufs(rxq);
+ if (ret)
+ return ret;
+
+ bus_addr = rxq->rx_ring_phys_addr;
+ E1000_WRITE_REG(hw, E1000_RDLEN(i),
+ rxq->nb_rx_desc *
+ sizeof(union e1000_adv_rx_desc));
+ E1000_WRITE_REG(hw, E1000_RDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
+
+ srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ /*
+ * Configure RX buffer size.
+ */
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
+ if (buf_size >= 1024) {
+ /*
+ * Configure the BSIZEPACKET field of the SRRCTL
+ * register of the queue.
+ * Value is in 1 KB resolution, from 1 KB to 127 KB.
+ * If this field is equal to 0b, then RCTL.BSIZE
+ * determines the RX packet buffer size.
+ */
+ srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
+ E1000_SRRCTL_BSIZEPKT_MASK);
+ buf_size = (uint16_t) ((srrctl &
+ E1000_SRRCTL_BSIZEPKT_MASK) <<
+ E1000_SRRCTL_BSIZEPKT_SHIFT);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * VLAN_TAG_SIZE) > buf_size){
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG,
+ "forcing scatter mode");
+ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+ } else {
+ /*
+ * Use BSIZE field of the device RCTL register.
+ */
+ if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
+ rctl_bsize = buf_size;
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+ E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
+
+ /* Enable this RX queue. */
+ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
+ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+ rxdctl &= 0xFFF00000;
+ rxdctl |= (rxq->pthresh & 0x1F);
+ rxdctl |= ((rxq->hthresh & 0x1F) << 8);
+ if (hw->mac.type == e1000_vfadapt) {
+ /*
+ * Workaround of 82576 VF Erratum
+ * force set WTHRESH to 1
+ * to avoid Write-Back not triggered sometimes
+ */
+ rxdctl |= 0x10000;
+ PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
+ }
+ else
+ rxdctl |= ((rxq->wthresh & 0x1F) << 16);
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
+ }
+
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers.
+ * This needs to be done after enable.
+ */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ E1000_WRITE_REG(hw, E1000_RDH(i), 0);
+ E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
+ }
+
+ return 0;
+}
+
+/*********************************************************************
+ *
+ * Enable VF transmit unit.
+ *
+ **********************************************************************/
+void
+eth_igbvf_tx_init(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ struct igb_tx_queue *txq;
+ uint32_t txdctl;
+ uint16_t i;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings. */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ uint64_t bus_addr;
+
+ txq = dev->data->tx_queues[i];
+ bus_addr = txq->tx_ring_phys_addr;
+ E1000_WRITE_REG(hw, E1000_TDLEN(i),
+ txq->nb_tx_desc *
+ sizeof(union e1000_adv_tx_desc));
+ E1000_WRITE_REG(hw, E1000_TDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers. */
+ E1000_WRITE_REG(hw, E1000_TDT(i), 0);
+ E1000_WRITE_REG(hw, E1000_TDH(i), 0);
+
+ /* Setup Transmit threshold registers. */
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
+ txdctl |= txq->pthresh & 0x1F;
+ txdctl |= ((txq->hthresh & 0x1F) << 8);
+ if (hw->mac.type == e1000_82576) {
+ /*
+ * Workaround of 82576 VF Erratum
+ * force set WTHRESH to 1
+ * to avoid Write-Back not triggered sometimes
+ */
+ txdctl |= 0x10000;
+ PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
+ }
+ else
+ txdctl |= ((txq->wthresh & 0x1F) << 16);
+ txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
+ }
+
+}
+
+void
+igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct igb_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct igb_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+ qinfo->conf.offloads = txq->offloads;
+}
+
+int
+igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+igb_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
+igb_config_rss_filter(struct rte_eth_dev *dev,
+ struct igb_rte_flow_rss_conf *conf, bool add)
+{
+ uint32_t shift;
+ uint16_t i, j;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!add) {
+ if (igb_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
+ igb_rss_disable(dev);
+ memset(&filter_info->rss_info, 0,
+ sizeof(struct igb_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (filter_info->rss_info.conf.queue_num)
+ return -EINVAL;
+
+ /* Fill in redirection table. */
+ shift = (hw->mac.type == e1000_82575) ? 6 : 0;
+ for (i = 0, j = 0; i < 128; i++, j++) {
+ union e1000_reta {
+ uint32_t dword;
+ uint8_t bytes[4];
+ } reta;
+ uint8_t q_idx;
+
+ if (j == conf->conf.queue_num)
+ j = 0;
+ q_idx = conf->conf.queue[j];
+ reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
+ if ((i & 3) == 3)
+ E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
+ }
+
+ /* Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
+ igb_rss_disable(dev);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ igb_hw_rss_hash_set(hw, &rss_conf);
+
+ if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/e1000/meson.build b/src/spdk/dpdk/drivers/net/e1000/meson.build
new file mode 100644
index 00000000..cf456995
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/meson.build
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'e1000_logs.c',
+ 'em_ethdev.c',
+ 'em_rxtx.c',
+ 'igb_ethdev.c',
+ 'igb_flow.c',
+ 'igb_pf.c',
+ 'igb_rxtx.c'
+)
+
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/e1000/rte_pmd_e1000_version.map b/src/spdk/dpdk/drivers/net/e1000/rte_pmd_e1000_version.map
new file mode 100644
index 00000000..ef353984
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/e1000/rte_pmd_e1000_version.map
@@ -0,0 +1,4 @@
+DPDK_2.0 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/ena/Makefile b/src/spdk/dpdk/drivers/net/ena/Makefile
new file mode 100644
index 00000000..ff9ce315
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/Makefile
@@ -0,0 +1,63 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ena.a
+CFLAGS += $(WERROR_FLAGS) -O2
+INCLUDES :=-I$(SRCDIR) -I$(SRCDIR)/base/ena_defs -I$(SRCDIR)/base
+
+EXPORT_MAP := rte_pmd_ena_version.map
+LIBABIVER := 1
+
+# rte_fbarray is not yet part of stable API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+VPATH += $(SRCDIR)/base
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_com.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_eth_com.c
+
+CFLAGS += $(INCLUDES)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_timer
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_com.c b/src/spdk/dpdk/drivers/net/ena/base/ena_com.c
new file mode 100644
index 00000000..4abf1a28
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/base/ena_com.c
@@ -0,0 +1,2770 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "ena_com.h"
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/* Timeout in micro-sec */
+#define ADMIN_CMD_TIMEOUT_US (3000000)
+
+#define ENA_ASYNC_QUEUE_DEPTH 16
+#define ENA_ADMIN_QUEUE_DEPTH 32
+
+#ifdef ENA_EXTENDED_STATS
+
+#define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
+#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
+#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
+
+#endif /* ENA_EXTENDED_STATS */
+
+#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
+ | (ENA_COMMON_SPEC_VERSION_MINOR))
+
+#define ENA_CTRL_MAJOR 0
+#define ENA_CTRL_MINOR 0
+#define ENA_CTRL_SUB_MINOR 1
+
+#define MIN_ENA_CTRL_VER \
+ (((ENA_CTRL_MAJOR) << \
+ (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
+ ((ENA_CTRL_MINOR) << \
+ (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
+ (ENA_CTRL_SUB_MINOR))
+
+#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
+#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
+
+#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
+#define ENA_POLL_MS 5
+
+/*****************************************************************************/
+/*****************************************************************************/
+/*****************************************************************************/
+
+enum ena_cmd_status {
+ ENA_CMD_SUBMITTED,
+ ENA_CMD_COMPLETED,
+ /* Abort - canceled by the driver */
+ ENA_CMD_ABORTED,
+};
+
+struct ena_comp_ctx {
+ ena_wait_event_t wait_event;
+ struct ena_admin_acq_entry *user_cqe;
+ u32 comp_size;
+ enum ena_cmd_status status;
+ /* status from the device */
+ u8 comp_status;
+ u8 cmd_opcode;
+ bool occupied;
+};
+
+struct ena_com_stats_ctx {
+ struct ena_admin_aq_get_stats_cmd get_cmd;
+ struct ena_admin_acq_get_stats_resp get_resp;
+};
+
+static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
+ struct ena_common_mem_addr *ena_addr,
+ dma_addr_t addr)
+{
+ if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
+ ena_trc_err("dma address has more bits that the device supports\n");
+ return ENA_COM_INVAL;
+ }
+
+ ena_addr->mem_addr_low = lower_32_bits(addr);
+ ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
+
+ return 0;
+}
+
+static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
+{
+ struct ena_com_admin_sq *sq = &queue->sq;
+ u16 size = ADMIN_SQ_SIZE(queue->q_depth);
+
+ ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
+ sq->mem_handle);
+
+ if (!sq->entries) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ sq->head = 0;
+ sq->tail = 0;
+ sq->phase = 1;
+
+ sq->db_addr = NULL;
+
+ return 0;
+}
+
+static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
+{
+ struct ena_com_admin_cq *cq = &queue->cq;
+ u16 size = ADMIN_CQ_SIZE(queue->q_depth);
+
+ ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
+ cq->mem_handle);
+
+ if (!cq->entries) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ cq->head = 0;
+ cq->phase = 1;
+
+ return 0;
+}
+
+static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
+ struct ena_aenq_handlers *aenq_handlers)
+{
+ struct ena_com_aenq *aenq = &dev->aenq;
+ u32 addr_low, addr_high, aenq_caps;
+ u16 size;
+
+ dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+ size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
+ ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
+ aenq->entries,
+ aenq->dma_addr,
+ aenq->mem_handle);
+
+ if (!aenq->entries) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ aenq->head = aenq->q_depth;
+ aenq->phase = 1;
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
+
+ ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
+
+ aenq_caps = 0;
+ aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+ aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+
+ if (unlikely(!aenq_handlers)) {
+ ena_trc_err("aenq handlers pointer is NULL\n");
+ return ENA_COM_INVAL;
+ }
+
+ aenq->aenq_handlers = aenq_handlers;
+
+ return 0;
+}
+
+static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
+ struct ena_comp_ctx *comp_ctx)
+{
+ comp_ctx->occupied = false;
+ ATOMIC32_DEC(&queue->outstanding_cmds);
+}
+
+static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
+ u16 command_id, bool capture)
+{
+ if (unlikely(command_id >= queue->q_depth)) {
+ ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, queue->q_depth);
+ return NULL;
+ }
+
+ if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
+ ena_trc_err("Completion context is occupied\n");
+ return NULL;
+ }
+
+ if (capture) {
+ ATOMIC32_INC(&queue->outstanding_cmds);
+ queue->comp_ctx[command_id].occupied = true;
+ }
+
+ return &queue->comp_ctx[command_id];
+}
+
+static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ struct ena_comp_ctx *comp_ctx;
+ u16 tail_masked, cmd_id;
+ u16 queue_size_mask;
+ u16 cnt;
+
+ queue_size_mask = admin_queue->q_depth - 1;
+
+ tail_masked = admin_queue->sq.tail & queue_size_mask;
+
+ /* In case of queue FULL */
+ cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
+ if (cnt >= admin_queue->q_depth) {
+ ena_trc_dbg("admin queue is full.\n");
+ admin_queue->stats.out_of_space++;
+ return ERR_PTR(ENA_COM_NO_SPACE);
+ }
+
+ cmd_id = admin_queue->curr_cmd_id;
+
+ cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
+ ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+
+ cmd->aq_common_descriptor.command_id |= cmd_id &
+ ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
+ if (unlikely(!comp_ctx))
+ return ERR_PTR(ENA_COM_INVAL);
+
+ comp_ctx->status = ENA_CMD_SUBMITTED;
+ comp_ctx->comp_size = (u32)comp_size_in_bytes;
+ comp_ctx->user_cqe = comp;
+ comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
+
+ ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
+
+ memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
+
+ admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
+ queue_size_mask;
+
+ admin_queue->sq.tail++;
+ admin_queue->stats.submitted_cmd++;
+
+ if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
+ admin_queue->sq.phase = !admin_queue->sq.phase;
+
+ ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
+ admin_queue->sq.db_addr);
+
+ return comp_ctx;
+}
+
+static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+{
+ size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
+ struct ena_comp_ctx *comp_ctx;
+ u16 i;
+
+ queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
+ if (unlikely(!queue->comp_ctx)) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ for (i = 0; i < queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(queue, i, false);
+ if (comp_ctx)
+ ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
+ }
+
+ return 0;
+}
+
+static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ unsigned long flags = 0;
+ struct ena_comp_ctx *comp_ctx;
+
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ if (unlikely(!admin_queue->running_state)) {
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ return ERR_PTR(ENA_COM_NO_DEVICE);
+ }
+ comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
+ cmd_size_in_bytes,
+ comp,
+ comp_size_in_bytes);
+ if (IS_ERR(comp_ctx))
+ admin_queue->running_state = false;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ return comp_ctx;
+}
+
+static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx,
+ struct ena_com_io_sq *io_sq)
+{
+ size_t size;
+ int dev_node = 0;
+
+ memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
+
+ io_sq->desc_entry_size =
+ (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_desc) :
+ sizeof(struct ena_eth_io_rx_desc);
+
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+ ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle,
+ ctx->numa_node,
+ dev_node);
+ if (!io_sq->desc_addr.virt_addr) {
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle);
+ }
+ } else {
+ ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ ctx->numa_node,
+ dev_node);
+ if (!io_sq->desc_addr.virt_addr) {
+ io_sq->desc_addr.virt_addr =
+ ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ }
+ }
+
+ if (!io_sq->desc_addr.virt_addr) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ io_sq->tail = 0;
+ io_sq->next_to_comp = 0;
+ io_sq->phase = 1;
+
+ return 0;
+}
+
+static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx,
+ struct ena_com_io_cq *io_cq)
+{
+ size_t size;
+ int prev_node = 0;
+
+ memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
+
+ /* Use the basic completion descriptor for Rx */
+ io_cq->cdesc_entry_size_in_bytes =
+ (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_cdesc) :
+ sizeof(struct ena_eth_io_rx_cdesc_base);
+
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+ ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle,
+ ctx->numa_node,
+ prev_node);
+ if (!io_cq->cdesc_addr.virt_addr) {
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle);
+ }
+
+ if (!io_cq->cdesc_addr.virt_addr) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ io_cq->phase = 1;
+ io_cq->head = 0;
+
+ return 0;
+}
+
+static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_acq_entry *cqe)
+{
+ struct ena_comp_ctx *comp_ctx;
+ u16 cmd_id;
+
+ cmd_id = cqe->acq_common_descriptor.command &
+ ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
+ if (unlikely(!comp_ctx)) {
+ ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
+ admin_queue->running_state = false;
+ return;
+ }
+
+ comp_ctx->status = ENA_CMD_COMPLETED;
+ comp_ctx->comp_status = cqe->acq_common_descriptor.status;
+
+ if (comp_ctx->user_cqe)
+ memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
+
+ if (!admin_queue->polling)
+ ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
+}
+
+static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
+{
+ struct ena_admin_acq_entry *cqe = NULL;
+ u16 comp_num = 0;
+ u16 head_masked;
+ u8 phase;
+
+ head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
+ phase = admin_queue->cq.phase;
+
+ cqe = &admin_queue->cq.entries[head_masked];
+
+ /* Go over all the completions */
+ while ((cqe->acq_common_descriptor.flags &
+ ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /* Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+ rmb();
+ ena_com_handle_single_admin_completion(admin_queue, cqe);
+
+ head_masked++;
+ comp_num++;
+ if (unlikely(head_masked == admin_queue->q_depth)) {
+ head_masked = 0;
+ phase = !phase;
+ }
+
+ cqe = &admin_queue->cq.entries[head_masked];
+ }
+
+ admin_queue->cq.head += comp_num;
+ admin_queue->cq.phase = phase;
+ admin_queue->sq.head += comp_num;
+ admin_queue->stats.completed_cmd += comp_num;
+}
+
+static int ena_com_comp_status_to_errno(u8 comp_status)
+{
+ if (unlikely(comp_status != 0))
+ ena_trc_err("admin command failed[%u]\n", comp_status);
+
+ if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
+ return ENA_COM_INVAL;
+
+ switch (comp_status) {
+ case ENA_ADMIN_SUCCESS:
+ return 0;
+ case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
+ return ENA_COM_NO_MEM;
+ case ENA_ADMIN_UNSUPPORTED_OPCODE:
+ return ENA_COM_UNSUPPORTED;
+ case ENA_ADMIN_BAD_OPCODE:
+ case ENA_ADMIN_MALFORMED_REQUEST:
+ case ENA_ADMIN_ILLEGAL_PARAMETER:
+ case ENA_ADMIN_UNKNOWN_ERROR:
+ return ENA_COM_INVAL;
+ }
+
+ return 0;
+}
+
+static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ unsigned long flags = 0;
+ unsigned long timeout;
+ int ret;
+
+ timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
+
+ while (1) {
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ if (comp_ctx->status != ENA_CMD_SUBMITTED)
+ break;
+
+ if (ENA_TIME_EXPIRE(timeout)) {
+ ena_trc_err("Wait for completion (polling) timeout\n");
+ /* ENA didn't have any completion */
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ admin_queue->stats.no_completion++;
+ admin_queue->running_state = false;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ ret = ENA_COM_TIMER_EXPIRED;
+ goto err;
+ }
+
+ ENA_MSLEEP(ENA_POLL_MS);
+ }
+
+ if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
+ ena_trc_err("Command was aborted\n");
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ admin_queue->stats.aborted_cmd++;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ ret = ENA_COM_NO_DEVICE;
+ goto err;
+ }
+
+ ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
+ "Invalid comp status %d\n", comp_ctx->status);
+
+ ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+err:
+ comp_ctxt_release(admin_queue, comp_ctx);
+ return ret;
+}
+
+static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ unsigned long flags = 0;
+ int ret;
+
+ ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
+ admin_queue->completion_timeout);
+
+ /* In case the command wasn't completed find out the root cause.
+ * There might be 2 kinds of errors
+ * 1) No completion (timeout reached)
+ * 2) There is completion but the device didn't get any msi-x interrupt.
+ */
+ if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ admin_queue->stats.no_completion++;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ if (comp_ctx->status == ENA_CMD_COMPLETED)
+ ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
+ comp_ctx->cmd_opcode);
+ else
+ ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
+ comp_ctx->cmd_opcode, comp_ctx->status);
+
+ admin_queue->running_state = false;
+ ret = ENA_COM_TIMER_EXPIRED;
+ goto err;
+ }
+
+ ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+err:
+ comp_ctxt_release(admin_queue, comp_ctx);
+ return ret;
+}
+
+/* This method read the hardware device register through posting writes
+ * and waiting for response
+ * On timeout the function will return ENA_MMIO_READ_TIMEOUT
+ */
+static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+ volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
+ mmio_read->read_resp;
+ u32 mmio_read_reg, ret, i;
+ unsigned long flags = 0;
+ u32 timeout = mmio_read->reg_read_to;
+
+ ENA_MIGHT_SLEEP();
+
+ if (timeout == 0)
+ timeout = ENA_REG_READ_TIMEOUT;
+
+ /* If readless is disabled, perform regular read */
+ if (!mmio_read->readless_supported)
+ return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
+
+ ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
+ mmio_read->seq_num++;
+
+ read_resp->req_id = mmio_read->seq_num + 0xDEAD;
+ mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
+ ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
+ mmio_read_reg |= mmio_read->seq_num &
+ ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
+
+ /* make sure read_resp->req_id get updated before the hw can write
+ * there
+ */
+ wmb();
+
+ ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+
+ for (i = 0; i < timeout; i++) {
+ if (read_resp->req_id == mmio_read->seq_num)
+ break;
+
+ ENA_UDELAY(1);
+ }
+
+ if (unlikely(i == timeout)) {
+ ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+ mmio_read->seq_num,
+ offset,
+ read_resp->req_id,
+ read_resp->reg_off);
+ ret = ENA_MMIO_READ_TIMEOUT;
+ goto err;
+ }
+
+ if (read_resp->reg_off != offset) {
+ ena_trc_err("Read failure: wrong offset provided");
+ ret = ENA_MMIO_READ_TIMEOUT;
+ } else {
+ ret = read_resp->reg_val;
+ }
+err:
+ ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
+
+ return ret;
+}
+
+/* There are two types to wait for completion.
+ * Polling mode - wait until the completion is available.
+ * Async mode - wait on wait queue until the completion is ready
+ * (or the timeout expired).
+ * It is expected that the IRQ called ena_com_handle_admin_completion
+ * to mark the completions.
+ */
+static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ if (admin_queue->polling)
+ return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
+ admin_queue);
+
+ return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
+ admin_queue);
+}
+
+static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
+ struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
+ u8 direction;
+ int ret;
+
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
+
+ if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ direction = ENA_ADMIN_SQ_DIRECTION_TX;
+ else
+ direction = ENA_ADMIN_SQ_DIRECTION_RX;
+
+ destroy_cmd.sq.sq_identity |= (direction <<
+ ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
+ ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+
+ destroy_cmd.sq.sq_idx = io_sq->idx;
+ destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
+ ena_trc_err("failed to destroy io sq error: %d\n", ret);
+
+ return ret;
+}
+
+static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_io_cq *io_cq)
+{
+ size_t size;
+
+ if (io_cq->cdesc_addr.virt_addr) {
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle);
+
+ io_cq->cdesc_addr.virt_addr = NULL;
+ }
+
+ if (io_sq->desc_addr.virt_addr) {
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle);
+ else
+ ENA_MEM_FREE(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
+
+ io_sq->desc_addr.virt_addr = NULL;
+ }
+}
+
+static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
+ u16 exp_state)
+{
+ u32 val, i;
+
+ /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
+ timeout = (timeout * 100) / ENA_POLL_MS;
+
+ for (i = 0; i < timeout; i++) {
+ val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+ if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
+ ena_trc_err("Reg read timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
+ exp_state)
+ return 0;
+
+ ENA_MSLEEP(ENA_POLL_MS);
+ }
+
+ return ENA_COM_TIMER_EXPIRED;
+}
+
+static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
+ enum ena_admin_aq_feature_id feature_id)
+{
+ u32 feature_mask = 1 << feature_id;
+
+ /* Device attributes is always supported */
+ if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
+ !(ena_dev->supported_features & feature_mask))
+ return false;
+
+ return true;
+}
+
+static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *get_resp,
+ enum ena_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_get_feat_cmd get_cmd;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
+ ena_trc_dbg("Feature %d isn't supported\n", feature_id);
+ return ENA_COM_UNSUPPORTED;
+ }
+
+ memset(&get_cmd, 0x0, sizeof(get_cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
+
+ if (control_buff_size)
+ get_cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ else
+ get_cmd.aq_common_descriptor.flags = 0;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd.control_buffer.address,
+ control_buf_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ get_cmd.control_buffer.length = control_buff_size;
+
+ get_cmd.feat_common.feature_id = feature_id;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)
+ &get_cmd,
+ sizeof(get_cmd),
+ (struct ena_admin_acq_entry *)
+ get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to submit get_feature command %d error: %d\n",
+ feature_id, ret);
+
+ return ret;
+}
+
+static int ena_com_get_feature(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *get_resp,
+ enum ena_admin_aq_feature_id feature_id)
+{
+ return ena_com_get_feature_ex(ena_dev,
+ get_resp,
+ feature_id,
+ 0,
+ 0);
+}
+
+static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ sizeof(*rss->hash_key),
+ rss->hash_key,
+ rss->hash_key_dma_addr,
+ rss->hash_key_mem_handle);
+
+ if (unlikely(!rss->hash_key))
+ return ENA_COM_NO_MEM;
+
+ return 0;
+}
+
+static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_key)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ sizeof(*rss->hash_key),
+ rss->hash_key,
+ rss->hash_key_dma_addr,
+ rss->hash_key_mem_handle);
+ rss->hash_key = NULL;
+}
+
+static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ sizeof(*rss->hash_ctrl),
+ rss->hash_ctrl,
+ rss->hash_ctrl_dma_addr,
+ rss->hash_ctrl_mem_handle);
+
+ if (unlikely(!rss->hash_ctrl))
+ return ENA_COM_NO_MEM;
+
+ return 0;
+}
+
+static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_ctrl)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ sizeof(*rss->hash_ctrl),
+ rss->hash_ctrl,
+ rss->hash_ctrl_dma_addr,
+ rss->hash_ctrl_mem_handle);
+ rss->hash_ctrl = NULL;
+}
+
+static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+ u16 log_size)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ size_t tbl_size;
+ int ret;
+
+ ret = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ if (unlikely(ret))
+ return ret;
+
+ if ((get_resp.u.ind_table.min_size > log_size) ||
+ (get_resp.u.ind_table.max_size < log_size)) {
+ ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
+ 1 << log_size,
+ 1 << get_resp.u.ind_table.min_size,
+ 1 << get_resp.u.ind_table.max_size);
+ return ENA_COM_INVAL;
+ }
+
+ tbl_size = (1ULL << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ tbl_size,
+ rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr,
+ rss->rss_ind_tbl_mem_handle);
+ if (unlikely(!rss->rss_ind_tbl))
+ goto mem_err1;
+
+ tbl_size = (1ULL << log_size) * sizeof(u16);
+ rss->host_rss_ind_tbl =
+ ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
+ if (unlikely(!rss->host_rss_ind_tbl))
+ goto mem_err2;
+
+ rss->tbl_log_size = log_size;
+
+ return 0;
+
+mem_err2:
+ tbl_size = (1ULL << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ tbl_size,
+ rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr,
+ rss->rss_ind_tbl_mem_handle);
+ rss->rss_ind_tbl = NULL;
+mem_err1:
+ rss->tbl_log_size = 0;
+ return ENA_COM_NO_MEM;
+}
+
+static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ size_t tbl_size = (1ULL << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ if (rss->rss_ind_tbl)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ tbl_size,
+ rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr,
+ rss->rss_ind_tbl_mem_handle);
+ rss->rss_ind_tbl = NULL;
+
+ if (rss->host_rss_ind_tbl)
+ ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
+ rss->host_rss_ind_tbl = NULL;
+}
+
+static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq, u16 cq_idx)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_create_sq_cmd create_cmd;
+ struct ena_admin_acq_create_sq_resp_desc cmd_completion;
+ u8 direction;
+ int ret;
+
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
+
+ create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
+
+ if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ direction = ENA_ADMIN_SQ_DIRECTION_TX;
+ else
+ direction = ENA_ADMIN_SQ_DIRECTION_RX;
+
+ create_cmd.sq_identity |= (direction <<
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+
+ create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+
+ create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+
+ create_cmd.sq_caps_3 |=
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+
+ create_cmd.cq_idx = cq_idx;
+ create_cmd.sq_depth = io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+ ret = ena_com_mem_addr_set(ena_dev,
+ &create_cmd.sq_ba,
+ io_sq->desc_addr.phys_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+ }
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
+ return ret;
+ }
+
+ io_sq->idx = cmd_completion.sq_idx;
+
+ io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ (uintptr_t)cmd_completion.sq_doorbell_offset);
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
+ + cmd_completion.llq_headers_offset);
+
+ io_sq->desc_addr.pbuf_dev_addr =
+ (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
+ cmd_completion.llq_descriptors_offset);
+ }
+
+ ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+
+ return ret;
+}
+
+static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_com_io_sq *io_sq;
+ u16 qid;
+ int i;
+
+ for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+ qid = rss->host_rss_ind_tbl[i];
+ if (qid >= ENA_TOTAL_NUM_QUEUES)
+ return ENA_COM_INVAL;
+
+ io_sq = &ena_dev->io_sq_queues[qid];
+
+ if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
+ return ENA_COM_INVAL;
+
+ rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
+ }
+
+ return 0;
+}
+
+static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
+{
+ u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
+ struct ena_rss *rss = &ena_dev->rss;
+ u8 idx;
+ u16 i;
+
+ for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
+ dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
+
+ for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+ if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
+ return ENA_COM_INVAL;
+ idx = (u8)rss->rss_ind_tbl[i].cq_idx;
+
+ if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
+ return ENA_COM_INVAL;
+
+ rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
+ }
+
+ return 0;
+}
+
+static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+{
+ size_t size;
+
+ size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
+
+ ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ if (!ena_dev->intr_moder_tbl)
+ return ENA_COM_NO_MEM;
+
+ ena_com_config_default_interrupt_moderation_table(ena_dev);
+
+ return 0;
+}
+
+static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
+ u16 intr_delay_resolution)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+ unsigned int i;
+
+ if (!intr_delay_resolution) {
+ ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
+ intr_delay_resolution = 1;
+ }
+ ena_dev->intr_delay_resolution = intr_delay_resolution;
+
+ /* update Rx */
+ for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
+ intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
+
+ /* update Tx */
+ ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
+}
+
+/*****************************************************************************/
+/******************************* API ******************************/
+/*****************************************************************************/
+
+int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size)
+{
+ struct ena_comp_ctx *comp_ctx;
+ int ret;
+
+ comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
+ comp, comp_size);
+ if (IS_ERR(comp_ctx)) {
+ if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
+ ena_trc_dbg("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+ else
+ ena_trc_err("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+
+ return PTR_ERR(comp_ctx);
+ }
+
+ ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
+ if (unlikely(ret)) {
+ if (admin_queue->running_state)
+ ena_trc_err("Failed to process command. ret = %d\n",
+ ret);
+ else
+ ena_trc_dbg("Failed to process command. ret = %d\n",
+ ret);
+ }
+ return ret;
+}
+
+int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_create_cq_cmd create_cmd;
+ struct ena_admin_acq_create_cq_resp_desc cmd_completion;
+ int ret;
+
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
+
+ create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
+
+ create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ create_cmd.cq_caps_1 |=
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+
+ create_cmd.msix_vector = io_cq->msix_vector;
+ create_cmd.cq_depth = io_cq->q_depth;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &create_cmd.cq_ba,
+ io_cq->cdesc_addr.phys_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
+ return ret;
+ }
+
+ io_cq->idx = cmd_completion.cq_idx;
+
+ io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_interrupt_unmask_register_offset);
+
+ if (cmd_completion.cq_head_db_register_offset)
+ io_cq->cq_head_db_reg =
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_head_db_register_offset);
+
+ if (cmd_completion.numa_node_register_offset)
+ io_cq->numa_node_cfg_reg =
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.numa_node_register_offset);
+
+ ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+
+ return ret;
+}
+
+int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ struct ena_com_io_sq **io_sq,
+ struct ena_com_io_cq **io_cq)
+{
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+ ena_trc_err("Invalid queue number %d but the max is %d\n",
+ qid, ENA_TOTAL_NUM_QUEUES);
+ return ENA_COM_INVAL;
+ }
+
+ *io_sq = &ena_dev->io_sq_queues[qid];
+ *io_cq = &ena_dev->io_cq_queues[qid];
+
+ return 0;
+}
+
+void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_comp_ctx *comp_ctx;
+ u16 i;
+
+ if (!admin_queue->comp_ctx)
+ return;
+
+ for (i = 0; i < admin_queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(admin_queue, i, false);
+ if (unlikely(!comp_ctx))
+ break;
+
+ comp_ctx->status = ENA_CMD_ABORTED;
+
+ ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
+ }
+}
+
+void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ unsigned long flags = 0;
+
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ ENA_MSLEEP(ENA_POLL_MS);
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ }
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+}
+
+int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
+ struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
+ int ret;
+
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
+
+ destroy_cmd.cq_idx = io_cq->idx;
+ destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
+ ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
+
+ return ret;
+}
+
+bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->admin_queue.running_state;
+}
+
+void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ unsigned long flags = 0;
+
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_dev->admin_queue.running_state = state;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+}
+
+void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
+{
+ u16 depth = ena_dev->aenq.q_depth;
+
+ ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
+
+ /* Init head_db to mark that all entries in the queue
+ * are initially available
+ */
+ ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+}
+
+int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_admin_get_feat_resp get_resp;
+ int ret;
+
+ ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
+ if (ret) {
+ ena_trc_info("Can't get aenq configuration\n");
+ return ret;
+ }
+
+ if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
+ ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+ get_resp.u.aenq.supported_groups,
+ groups_flag);
+ return ENA_COM_UNSUPPORTED;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags = 0;
+ cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
+ cmd.u.aenq.enabled_groups = groups_flag;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to config AENQ ret: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
+{
+ u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+ int width;
+
+ if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
+ ena_trc_err("Reg read timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
+ ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
+
+ ena_trc_dbg("ENA dma width: %d\n", width);
+
+ if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
+ ena_trc_err("DMA width illegal value: %d\n", width);
+ return ENA_COM_INVAL;
+ }
+
+ ena_dev->dma_addr_bits = width;
+
+ return width;
+}
+
+int ena_com_validate_version(struct ena_com_dev *ena_dev)
+{
+ u32 ver;
+ u32 ctrl_ver;
+ u32 ctrl_ver_masked;
+
+ /* Make sure the ENA version and the controller version are at least
+ * as the driver expects
+ */
+ ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
+ ctrl_ver = ena_com_reg_bar_read32(ena_dev,
+ ENA_REGS_CONTROLLER_VERSION_OFF);
+
+ if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
+ (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ ena_trc_err("Reg read timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ ena_trc_info("ena device version: %d.%d\n",
+ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
+
+ if (ver < MIN_ENA_VER) {
+ ena_trc_err("ENA version is lower than the minimal version the driver supports\n");
+ return -1;
+ }
+
+ ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
+ >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
+ >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
+
+ ctrl_ver_masked =
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
+
+ /* Validate the ctrl version without the implementation ID */
+ if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
+ ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
+ u16 size;
+
+ ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
+ if (admin_queue->comp_ctx)
+ ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
+ admin_queue->comp_ctx = NULL;
+ size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+ if (sq->entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
+ sq->dma_addr, sq->mem_handle);
+ sq->entries = NULL;
+
+ size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+ if (cq->entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
+ cq->dma_addr, cq->mem_handle);
+ cq->entries = NULL;
+
+ size = ADMIN_AENQ_SIZE(aenq->q_depth);
+ if (ena_dev->aenq.entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
+ aenq->dma_addr, aenq->mem_handle);
+ aenq->entries = NULL;
+}
+
+void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
+{
+ u32 mask_value = 0;
+
+ if (polling)
+ mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+ ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
+ ena_dev->admin_queue.polling = polling;
+}
+
+int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ ENA_SPINLOCK_INIT(mmio_read->lock);
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ sizeof(*mmio_read->read_resp),
+ mmio_read->read_resp,
+ mmio_read->read_resp_dma_addr,
+ mmio_read->read_resp_mem_handle);
+ if (unlikely(!mmio_read->read_resp))
+ return ENA_COM_NO_MEM;
+
+ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+ mmio_read->read_resp->req_id = 0x0;
+ mmio_read->seq_num = 0x0;
+ mmio_read->readless_supported = true;
+
+ return 0;
+}
+
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ mmio_read->readless_supported = readless_supported;
+}
+
+void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ sizeof(*mmio_read->read_resp),
+ mmio_read->read_resp,
+ mmio_read->read_resp_dma_addr,
+ mmio_read->read_resp_mem_handle);
+
+ mmio_read->read_resp = NULL;
+}
+
+void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+ u32 addr_low, addr_high;
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
+
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+}
+
+int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ struct ena_aenq_handlers *aenq_handlers,
+ bool init_spinlock)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
+ int ret;
+
+ dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+ if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
+ ena_trc_err("Reg read timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
+ ena_trc_err("Device isn't ready, abort com init\n");
+ return ENA_COM_NO_DEVICE;
+ }
+
+ admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
+
+ admin_queue->q_dmadev = ena_dev->dmadev;
+ admin_queue->polling = false;
+ admin_queue->curr_cmd_id = 0;
+
+ ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
+
+ if (init_spinlock)
+ ENA_SPINLOCK_INIT(admin_queue->q_lock);
+
+ ret = ena_com_init_comp_ctxt(admin_queue);
+ if (ret)
+ goto error;
+
+ ret = ena_com_admin_init_sq(admin_queue);
+ if (ret)
+ goto error;
+
+ ret = ena_com_admin_init_cq(admin_queue);
+ if (ret)
+ goto error;
+
+ admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ ENA_REGS_AQ_DB_OFF);
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
+
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
+
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
+
+ aq_caps = 0;
+ aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
+ aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
+ ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
+
+ acq_caps = 0;
+ acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
+ acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
+ ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
+
+ ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
+ ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
+ if (ret)
+ goto error;
+
+ admin_queue->running_state = true;
+
+ return 0;
+error:
+ ena_com_admin_destroy(ena_dev);
+
+ return ret;
+}
+
+int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx)
+{
+ struct ena_com_io_sq *io_sq;
+ struct ena_com_io_cq *io_cq;
+ int ret;
+
+ if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
+ ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
+ ctx->qid, ENA_TOTAL_NUM_QUEUES);
+ return ENA_COM_INVAL;
+ }
+
+ io_sq = &ena_dev->io_sq_queues[ctx->qid];
+ io_cq = &ena_dev->io_cq_queues[ctx->qid];
+
+ memset(io_sq, 0x0, sizeof(*io_sq));
+ memset(io_cq, 0x0, sizeof(*io_cq));
+
+ /* Init CQ */
+ io_cq->q_depth = ctx->queue_size;
+ io_cq->direction = ctx->direction;
+ io_cq->qid = ctx->qid;
+
+ io_cq->msix_vector = ctx->msix_vector;
+
+ io_sq->q_depth = ctx->queue_size;
+ io_sq->direction = ctx->direction;
+ io_sq->qid = ctx->qid;
+
+ io_sq->mem_queue_type = ctx->mem_queue_type;
+
+ if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ /* header length is limited to 8 bits */
+ io_sq->tx_max_header_size =
+ ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
+
+ ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
+ if (ret)
+ goto error;
+ ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
+ if (ret)
+ goto error;
+
+ ret = ena_com_create_io_cq(ena_dev, io_cq);
+ if (ret)
+ goto error;
+
+ ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
+ if (ret)
+ goto destroy_io_cq;
+
+ return 0;
+
+destroy_io_cq:
+ ena_com_destroy_io_cq(ena_dev, io_cq);
+error:
+ ena_com_io_queue_free(ena_dev, io_sq, io_cq);
+ return ret;
+}
+
+void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
+{
+ struct ena_com_io_sq *io_sq;
+ struct ena_com_io_cq *io_cq;
+
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+ ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
+ qid, ENA_TOTAL_NUM_QUEUES);
+ return;
+ }
+
+ io_sq = &ena_dev->io_sq_queues[qid];
+ io_cq = &ena_dev->io_cq_queues[qid];
+
+ ena_com_destroy_io_sq(ena_dev, io_sq);
+ ena_com_destroy_io_cq(ena_dev, io_cq);
+
+ ena_com_io_queue_free(ena_dev, io_sq, io_cq);
+}
+
+int ena_com_get_link_params(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *resp)
+{
+ return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
+}
+
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_DEVICE_ATTRIBUTES);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
+ sizeof(get_resp.u.dev_attr));
+ ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_MAX_QUEUES_NUM);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
+ sizeof(get_resp.u.max_queue));
+ ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_AENQ_CONFIG);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
+ sizeof(get_resp.u.aenq));
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
+ sizeof(get_resp.u.offload));
+
+ /* Driver hints isn't mandatory admin command. So in case the
+ * command isn't supported set driver hints to 0
+ */
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
+
+ if (!rc)
+ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
+ sizeof(get_resp.u.hw_hints));
+ else if (rc == ENA_COM_UNSUPPORTED)
+ memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
+ else
+ return rc;
+
+ return 0;
+}
+
+void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
+{
+ ena_com_handle_admin_completion(&ena_dev->admin_queue);
+}
+
+/* ena_handle_specific_aenq_event:
+ * return the handler that is relevant to the specific event group
+ */
+static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
+ u16 group)
+{
+ struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
+
+ if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
+ return aenq_handlers->handlers[group];
+
+ return aenq_handlers->unimplemented_handler;
+}
+
+/* ena_aenq_intr_handler:
+ * handles the aenq incoming events.
+ * pop events from the queue and apply the specific handler
+ */
+void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
+{
+ struct ena_admin_aenq_entry *aenq_e;
+ struct ena_admin_aenq_common_desc *aenq_common;
+ struct ena_com_aenq *aenq = &dev->aenq;
+ ena_aenq_handler handler_cb;
+ unsigned long long timestamp;
+ u16 masked_head, processed = 0;
+ u8 phase;
+
+ masked_head = aenq->head & (aenq->q_depth - 1);
+ phase = aenq->phase;
+ aenq_e = &aenq->entries[masked_head]; /* Get first entry */
+ aenq_common = &aenq_e->aenq_common_desc;
+
+ /* Go over all the events */
+ while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
+ phase) {
+ timestamp = (unsigned long long)aenq_common->timestamp_low |
+ ((unsigned long long)aenq_common->timestamp_high << 32);
+ ENA_TOUCH(timestamp); /* In case debug is disabled */
+ ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
+ aenq_common->group,
+ aenq_common->syndrom,
+ timestamp);
+
+ /* Handle specific event*/
+ handler_cb = ena_com_get_specific_aenq_cb(dev,
+ aenq_common->group);
+ handler_cb(data, aenq_e); /* call the actual event handler*/
+
+ /* Get next event entry */
+ masked_head++;
+ processed++;
+
+ if (unlikely(masked_head == aenq->q_depth)) {
+ masked_head = 0;
+ phase = !phase;
+ }
+ aenq_e = &aenq->entries[masked_head];
+ aenq_common = &aenq_e->aenq_common_desc;
+ }
+
+ aenq->head += processed;
+ aenq->phase = phase;
+
+ /* Don't update aenq doorbell if there weren't any processed events */
+ if (!processed)
+ return;
+
+ /* write the aenq doorbell after all AENQ descriptors were read */
+ mb();
+ ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+}
+
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason)
+{
+ u32 stat, timeout, cap, reset_val;
+ int rc;
+
+ stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+ cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+
+ if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
+ (cap == ENA_MMIO_READ_TIMEOUT))) {
+ ena_trc_err("Reg read32 timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
+ ena_trc_err("Device isn't ready, can't reset device\n");
+ return ENA_COM_INVAL;
+ }
+
+ timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
+ ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
+ if (timeout == 0) {
+ ena_trc_err("Invalid timeout value\n");
+ return ENA_COM_INVAL;
+ }
+
+ /* start reset */
+ reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
+ reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
+ ENA_REGS_DEV_CTL_RESET_REASON_MASK;
+ ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+
+ /* Write again the MMIO read request address */
+ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+ rc = wait_for_reset_state(ena_dev, timeout,
+ ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+ if (rc != 0) {
+ ena_trc_err("Reset indication didn't turn on\n");
+ return rc;
+ }
+
+ /* reset done */
+ ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+ rc = wait_for_reset_state(ena_dev, timeout, 0);
+ if (rc != 0) {
+ ena_trc_err("Reset indication didn't turn off\n");
+ return rc;
+ }
+
+ timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
+ ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ if (timeout)
+ /* the resolution of timeout reg is 100ms */
+ ena_dev->admin_queue.completion_timeout = timeout * 100000;
+ else
+ ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
+
+ return 0;
+}
+
+static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ struct ena_com_stats_ctx *ctx,
+ enum ena_admin_get_stats_type type)
+{
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+ struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
+ struct ena_com_admin_queue *admin_queue;
+ int ret;
+
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
+ get_cmd->aq_common_descriptor.flags = 0;
+ get_cmd->type = type;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to get stats. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_basic_stats *stats)
+{
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
+ if (likely(ret == 0))
+ memcpy(stats, &ctx.get_resp.basic_stats,
+ sizeof(ctx.get_resp.basic_stats));
+
+ return ret;
+}
+
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
+ ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ return ENA_COM_UNSUPPORTED;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags = 0;
+ cmd.feat_common.feature_id = ENA_ADMIN_MTU;
+ cmd.u.mtu.mtu = mtu;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
+
+ return ret;
+}
+
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload)
+{
+ int ret;
+ struct ena_admin_get_feat_resp resp;
+
+ ret = ena_com_get_feature(ena_dev, &resp,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to get offload capabilities %d\n", ret);
+ return ret;
+ }
+
+ memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
+
+ return 0;
+}
+
+int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_admin_get_feat_resp get_resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ return ENA_COM_UNSUPPORTED;
+ }
+
+ /* Validate hash function is supported */
+ ret = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ if (unlikely(ret))
+ return ret;
+
+ if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
+ ena_trc_err("Func hash %d isn't supported by device, abort\n",
+ rss->hash_func);
+ return ENA_COM_UNSUPPORTED;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
+ cmd.u.flow_hash_func.init_val = rss->hash_init_val;
+ cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->hash_key_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.control_buffer.length = sizeof(*rss->hash_key);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to set hash function %d. error: %d\n",
+ rss->hash_func, ret);
+ return ENA_COM_INVAL;
+ }
+
+ return 0;
+}
+
+int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions func,
+ const u8 *key, u16 key_len, u32 init_val)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ rss->hash_key;
+ int rc;
+
+ /* Make sure size is a mult of DWs */
+ if (unlikely(key_len & 0x3))
+ return ENA_COM_INVAL;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ rss->hash_key_dma_addr,
+ sizeof(*rss->hash_key));
+ if (unlikely(rc))
+ return rc;
+
+ if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
+ ena_trc_err("Flow hash function %d isn't supported\n", func);
+ return ENA_COM_UNSUPPORTED;
+ }
+
+ switch (func) {
+ case ENA_ADMIN_TOEPLITZ:
+ if (key_len > sizeof(hash_key->key)) {
+ ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
+ key_len, sizeof(hash_key->key));
+ return ENA_COM_INVAL;
+ }
+
+ memcpy(hash_key->key, key, key_len);
+ rss->hash_init_val = init_val;
+ hash_key->keys_num = key_len >> 2;
+ break;
+ case ENA_ADMIN_CRC32:
+ rss->hash_init_val = init_val;
+ break;
+ default:
+ ena_trc_err("Invalid hash function (%d)\n", func);
+ return ENA_COM_INVAL;
+ }
+
+ rc = ena_com_set_hash_function(ena_dev);
+
+ /* Restore the old function */
+ if (unlikely(rc))
+ ena_com_get_hash_function(ena_dev, NULL, NULL);
+
+ return rc;
+}
+
+int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions *func,
+ u8 *key)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ rss->hash_key;
+ int rc;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ rss->hash_key_dma_addr,
+ sizeof(*rss->hash_key));
+ if (unlikely(rc))
+ return rc;
+
+ rss->hash_func = get_resp.u.flow_hash_func.selected_func;
+ if (func)
+ *func = rss->hash_func;
+
+ if (key)
+ memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
+
+ return 0;
+}
+
+int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 *fields)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_INPUT,
+ rss->hash_ctrl_dma_addr,
+ sizeof(*rss->hash_ctrl));
+ if (unlikely(rc))
+ return rc;
+
+ if (fields)
+ *fields = rss->hash_ctrl->selected_fields[proto].fields;
+
+ return 0;
+}
+
+int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_INPUT)) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_INPUT);
+ return ENA_COM_UNSUPPORTED;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
+ cmd.u.flow_hash_input.enabled_input_sort =
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->hash_ctrl_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+ cmd.control_buffer.length = sizeof(*hash_ctrl);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret))
+ ena_trc_err("Failed to set hash input. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl =
+ rss->hash_ctrl;
+ u16 available_fields = 0;
+ int rc, i;
+
+ /* Get the supported hash input */
+ rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+ if (unlikely(rc))
+ return rc;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
+ ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
+
+ for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
+ available_fields = hash_ctrl->selected_fields[i].fields &
+ hash_ctrl->supported_fields[i].fields;
+ if (available_fields != hash_ctrl->selected_fields[i].fields) {
+ ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
+ i, hash_ctrl->supported_fields[i].fields,
+ hash_ctrl->selected_fields[i].fields);
+ return ENA_COM_UNSUPPORTED;
+ }
+ }
+
+ rc = ena_com_set_hash_ctrl(ena_dev);
+
+ /* In case of failure, restore the old hash ctrl */
+ if (unlikely(rc))
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+
+ return rc;
+}
+
+int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 hash_fields)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
+ u16 supported_fields;
+ int rc;
+
+ if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
+ ena_trc_err("Invalid proto num (%u)\n", proto);
+ return ENA_COM_INVAL;
+ }
+
+ /* Get the ctrl table */
+ rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
+ if (unlikely(rc))
+ return rc;
+
+ /* Make sure all the fields are supported */
+ supported_fields = hash_ctrl->supported_fields[proto].fields;
+ if ((hash_fields & supported_fields) != hash_fields) {
+ ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
+ proto, hash_fields, supported_fields);
+ }
+
+ hash_ctrl->selected_fields[proto].fields = hash_fields;
+
+ rc = ena_com_set_hash_ctrl(ena_dev);
+
+ /* In case of failure, restore the old hash ctrl */
+ if (unlikely(rc))
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+
+ return 0;
+}
+
+int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
+ u16 entry_idx, u16 entry_value)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
+ return ENA_COM_INVAL;
+
+ if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
+ return ENA_COM_INVAL;
+
+ rss->host_rss_ind_tbl[entry_idx] = entry_value;
+
+ return 0;
+}
+
+int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ return ENA_COM_UNSUPPORTED;
+ }
+
+ ret = ena_com_ind_tbl_convert_to_device(ena_dev);
+ if (ret) {
+ ena_trc_err("Failed to convert host indirection table to device table\n");
+ return ret;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
+ cmd.u.ind_table.size = rss->tbl_log_size;
+ cmd.u.ind_table.inline_index = 0xFFFFFFFF;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->rss_ind_tbl_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to set indirect table. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ u32 tbl_size;
+ int i, rc;
+
+ tbl_size = (1ULL << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
+ rss->rss_ind_tbl_dma_addr,
+ tbl_size);
+ if (unlikely(rc))
+ return rc;
+
+ if (!ind_tbl)
+ return 0;
+
+ rc = ena_com_ind_tbl_convert_from_device(ena_dev);
+ if (unlikely(rc))
+ return rc;
+
+ for (i = 0; i < (1 << rss->tbl_log_size); i++)
+ ind_tbl[i] = rss->host_rss_ind_tbl[i];
+
+ return 0;
+}
+
+int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
+{
+ int rc;
+
+ memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
+
+ rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
+ if (unlikely(rc))
+ goto err_indr_tbl;
+
+ rc = ena_com_hash_key_allocate(ena_dev);
+ if (unlikely(rc))
+ goto err_hash_key;
+
+ rc = ena_com_hash_ctrl_init(ena_dev);
+ if (unlikely(rc))
+ goto err_hash_ctrl;
+
+ return 0;
+
+err_hash_ctrl:
+ ena_com_hash_key_destroy(ena_dev);
+err_hash_key:
+ ena_com_indirect_table_destroy(ena_dev);
+err_indr_tbl:
+
+ return rc;
+}
+
+void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
+{
+ ena_com_indirect_table_destroy(ena_dev);
+ ena_com_hash_key_destroy(ena_dev);
+ ena_com_hash_ctrl_destroy(ena_dev);
+
+ memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
+}
+
+int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ SZ_4K,
+ host_attr->host_info,
+ host_attr->host_info_dma_addr,
+ host_attr->host_info_dma_handle);
+ if (unlikely(!host_attr->host_info))
+ return ENA_COM_NO_MEM;
+
+ return 0;
+}
+
+int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
+ u32 debug_area_size)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ debug_area_size,
+ host_attr->debug_area_virt_addr,
+ host_attr->debug_area_dma_addr,
+ host_attr->debug_area_dma_handle);
+ if (unlikely(!host_attr->debug_area_virt_addr)) {
+ host_attr->debug_area_size = 0;
+ return ENA_COM_NO_MEM;
+ }
+
+ host_attr->debug_area_size = debug_area_size;
+
+ return 0;
+}
+
+void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ if (host_attr->host_info) {
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ SZ_4K,
+ host_attr->host_info,
+ host_attr->host_info_dma_addr,
+ host_attr->host_info_dma_handle);
+ host_attr->host_info = NULL;
+ }
+}
+
+void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ if (host_attr->debug_area_virt_addr) {
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ host_attr->debug_area_size,
+ host_attr->debug_area_virt_addr,
+ host_attr->debug_area_dma_addr,
+ host_attr->debug_area_dma_handle);
+ host_attr->debug_area_virt_addr = NULL;
+ }
+}
+
+int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+
+ int ret;
+
+ /* Host attribute config is called before ena_com_get_dev_attr_feat
+ * so ena_com can't check if the feature is supported.
+ */
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.u.host_attr.debug_ba,
+ host_attr->debug_area_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.u.host_attr.os_info_ba,
+ host_attr->host_info_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to set host attributes: %d\n", ret);
+
+ return ret;
+}
+
+/* Interrupt moderation */
+bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
+{
+ return ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_INTERRUPT_MODERATION);
+}
+
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs)
+{
+ if (!ena_dev->intr_delay_resolution) {
+ ena_trc_err("Illegal interrupt delay granularity value\n");
+ return ENA_COM_FAULT;
+ }
+
+ ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
+ ena_dev->intr_delay_resolution;
+
+ return 0;
+}
+
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs)
+{
+ if (!ena_dev->intr_delay_resolution) {
+ ena_trc_err("Illegal interrupt delay granularity value\n");
+ return ENA_COM_FAULT;
+ }
+
+ /* We use LOWEST entry of moderation table for storing
+ * nonadaptive interrupt coalescing values
+ */
+ ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
+ rx_coalesce_usecs / ena_dev->intr_delay_resolution;
+
+ return 0;
+}
+
+void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
+{
+ if (ena_dev->intr_moder_tbl)
+ ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
+ ena_dev->intr_moder_tbl = NULL;
+}
+
+int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_get_feat_resp get_resp;
+ u16 delay_resolution;
+ int rc;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_INTERRUPT_MODERATION);
+
+ if (rc) {
+ if (rc == ENA_COM_UNSUPPORTED) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
+ rc = 0;
+ } else {
+ ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
+ rc);
+ }
+
+ /* no moderation supported, disable adaptive support */
+ ena_com_disable_adaptive_moderation(ena_dev);
+ return rc;
+ }
+
+ rc = ena_com_init_interrupt_moderation_table(ena_dev);
+ if (rc)
+ goto err;
+
+ /* if moderation is supported by device we set adaptive moderation */
+ delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
+ ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
+ ena_com_enable_adaptive_moderation(ena_dev);
+
+ return 0;
+err:
+ ena_com_destroy_interrupt_moderation(ena_dev);
+ return rc;
+}
+
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (!intr_moder_tbl)
+ return;
+
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
+ ENA_INTR_LOWEST_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
+ ENA_INTR_LOWEST_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
+ ENA_INTR_LOWEST_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
+ ENA_INTR_LOW_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
+ ENA_INTR_LOW_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
+ ENA_INTR_LOW_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
+ ENA_INTR_MID_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
+ ENA_INTR_MID_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
+ ENA_INTR_MID_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
+ ENA_INTR_HIGH_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
+ ENA_INTR_HIGH_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
+ ENA_INTR_HIGH_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
+ ENA_INTR_HIGHEST_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
+ ENA_INTR_HIGHEST_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
+ ENA_INTR_HIGHEST_BYTES;
+}
+
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->intr_moder_tx_interval;
+}
+
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (intr_moder_tbl)
+ return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
+
+ return 0;
+}
+
+void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
+ return;
+
+ intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
+ if (ena_dev->intr_delay_resolution)
+ intr_moder_tbl[level].intr_moder_interval /=
+ ena_dev->intr_delay_resolution;
+ intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
+
+ /* use hardcoded value until ethtool supports bytecount parameter */
+ if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
+ intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
+}
+
+void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
+ return;
+
+ entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
+ if (ena_dev->intr_delay_resolution)
+ entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
+ entry->pkts_per_interval =
+ intr_moder_tbl[level].pkts_per_interval;
+ entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
+}
diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_com.h b/src/spdk/dpdk/drivers/net/ena/base/ena_com.h
new file mode 100644
index 00000000..f58cd86a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/base/ena_com.h
@@ -0,0 +1,1054 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef ENA_COM
+#define ENA_COM
+
+#include "ena_plat.h"
+#include "ena_includes.h"
+
+#define ENA_MAX_NUM_IO_QUEUES 128U
+/* We need to queues for each IO (on for Tx and one for Rx) */
+#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
+
+#define ENA_MAX_HANDLERS 256
+
+#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
+
+/* Unit in usec */
+#define ENA_REG_READ_TIMEOUT 200000
+
+#define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry))
+#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
+#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* ENA adaptive interrupt moderation settings */
+
+#define ENA_INTR_LOWEST_USECS (0)
+#define ENA_INTR_LOWEST_PKTS (3)
+#define ENA_INTR_LOWEST_BYTES (2 * 1524)
+
+#define ENA_INTR_LOW_USECS (32)
+#define ENA_INTR_LOW_PKTS (12)
+#define ENA_INTR_LOW_BYTES (16 * 1024)
+
+#define ENA_INTR_MID_USECS (80)
+#define ENA_INTR_MID_PKTS (48)
+#define ENA_INTR_MID_BYTES (64 * 1024)
+
+#define ENA_INTR_HIGH_USECS (128)
+#define ENA_INTR_HIGH_PKTS (96)
+#define ENA_INTR_HIGH_BYTES (128 * 1024)
+
+#define ENA_INTR_HIGHEST_USECS (192)
+#define ENA_INTR_HIGHEST_PKTS (128)
+#define ENA_INTR_HIGHEST_BYTES (192 * 1024)
+
+#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 4
+#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6
+#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4
+
+#define ENA_INTR_MODER_LEVEL_STRIDE 1
+#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF
+
+#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+
+enum ena_intr_moder_level {
+ ENA_INTR_MODER_LOWEST = 0,
+ ENA_INTR_MODER_LOW,
+ ENA_INTR_MODER_MID,
+ ENA_INTR_MODER_HIGH,
+ ENA_INTR_MODER_HIGHEST,
+ ENA_INTR_MAX_NUM_OF_LEVELS,
+};
+
+struct ena_intr_moder_entry {
+ unsigned int intr_moder_interval;
+ unsigned int pkts_per_interval;
+ unsigned int bytes_per_interval;
+};
+
+enum queue_direction {
+ ENA_COM_IO_QUEUE_DIRECTION_TX,
+ ENA_COM_IO_QUEUE_DIRECTION_RX
+};
+
+struct ena_com_buf {
+ dma_addr_t paddr; /**< Buffer physical address */
+ u16 len; /**< Buffer length in bytes */
+};
+
+struct ena_com_rx_buf_info {
+ u16 len;
+ u16 req_id;
+};
+
+struct ena_com_io_desc_addr {
+ u8 __iomem *pbuf_dev_addr; /* LLQ address */
+ u8 *virt_addr;
+ dma_addr_t phys_addr;
+ ena_mem_handle_t mem_handle;
+};
+
+struct ena_com_tx_meta {
+ u16 mss;
+ u16 l3_hdr_len;
+ u16 l3_hdr_offset;
+ u16 l4_hdr_len; /* In words */
+};
+
+struct ena_com_io_cq {
+ struct ena_com_io_desc_addr cdesc_addr;
+ void *bus;
+
+ /* Interrupt unmask register */
+ u32 __iomem *unmask_reg;
+
+ /* The completion queue head doorbell register */
+ u32 __iomem *cq_head_db_reg;
+
+ /* numa configuration register (for TPH) */
+ u32 __iomem *numa_node_cfg_reg;
+
+ /* The value to write to the above register to unmask
+ * the interrupt of this queue
+ */
+ u32 msix_vector;
+
+ enum queue_direction direction;
+
+ /* holds the number of cdesc of the current packet */
+ u16 cur_rx_pkt_cdesc_count;
+ /* save the firt cdesc idx of the current packet */
+ u16 cur_rx_pkt_cdesc_start_idx;
+
+ u16 q_depth;
+ /* Caller qid */
+ u16 qid;
+
+ /* Device queue index */
+ u16 idx;
+ u16 head;
+ u16 last_head_update;
+ u8 phase;
+ u8 cdesc_entry_size_in_bytes;
+
+} ____cacheline_aligned;
+
+struct ena_com_io_sq {
+ struct ena_com_io_desc_addr desc_addr;
+ void *bus;
+
+ u32 __iomem *db_addr;
+ u8 __iomem *header_addr;
+
+ enum queue_direction direction;
+ enum ena_admin_placement_policy_type mem_queue_type;
+
+ u32 msix_vector;
+ struct ena_com_tx_meta cached_tx_meta;
+
+ u16 q_depth;
+ u16 qid;
+
+ u16 idx;
+ u16 tail;
+ u16 next_to_comp;
+ u32 tx_max_header_size;
+ u8 phase;
+ u8 desc_entry_size;
+ u8 dma_addr_bits;
+} ____cacheline_aligned;
+
+struct ena_com_admin_cq {
+ struct ena_admin_acq_entry *entries;
+ ena_mem_handle_t mem_handle;
+ dma_addr_t dma_addr;
+
+ u16 head;
+ u8 phase;
+};
+
+struct ena_com_admin_sq {
+ struct ena_admin_aq_entry *entries;
+ ena_mem_handle_t mem_handle;
+ dma_addr_t dma_addr;
+
+ u32 __iomem *db_addr;
+
+ u16 head;
+ u16 tail;
+ u8 phase;
+
+};
+
+struct ena_com_stats_admin {
+ u32 aborted_cmd;
+ u32 submitted_cmd;
+ u32 completed_cmd;
+ u32 out_of_space;
+ u32 no_completion;
+};
+
+struct ena_com_admin_queue {
+ void *q_dmadev;
+ void *bus;
+ ena_spinlock_t q_lock; /* spinlock for the admin queue */
+
+ struct ena_comp_ctx *comp_ctx;
+ u32 completion_timeout;
+ u16 q_depth;
+ struct ena_com_admin_cq cq;
+ struct ena_com_admin_sq sq;
+
+ /* Indicate if the admin queue should poll for completion */
+ bool polling;
+
+ u16 curr_cmd_id;
+
+ /* Indicate that the ena was initialized and can
+ * process new admin commands
+ */
+ bool running_state;
+
+ /* Count the number of outstanding admin commands */
+ ena_atomic32_t outstanding_cmds;
+
+ struct ena_com_stats_admin stats;
+};
+
+struct ena_aenq_handlers;
+
+struct ena_com_aenq {
+ u16 head;
+ u8 phase;
+ struct ena_admin_aenq_entry *entries;
+ dma_addr_t dma_addr;
+ ena_mem_handle_t mem_handle;
+ u16 q_depth;
+ struct ena_aenq_handlers *aenq_handlers;
+};
+
+struct ena_com_mmio_read {
+ struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
+ dma_addr_t read_resp_dma_addr;
+ ena_mem_handle_t read_resp_mem_handle;
+ u32 reg_read_to; /* in us */
+ u16 seq_num;
+ bool readless_supported;
+ /* spin lock to ensure a single outstanding read */
+ ena_spinlock_t lock;
+};
+
+struct ena_rss {
+ /* Indirect table */
+ u16 *host_rss_ind_tbl;
+ struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
+ dma_addr_t rss_ind_tbl_dma_addr;
+ ena_mem_handle_t rss_ind_tbl_mem_handle;
+ u16 tbl_log_size;
+
+ /* Hash key */
+ enum ena_admin_hash_functions hash_func;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key;
+ dma_addr_t hash_key_dma_addr;
+ ena_mem_handle_t hash_key_mem_handle;
+ u32 hash_init_val;
+
+ /* Flow Control */
+ struct ena_admin_feature_rss_hash_control *hash_ctrl;
+ dma_addr_t hash_ctrl_dma_addr;
+ ena_mem_handle_t hash_ctrl_mem_handle;
+
+};
+
+struct ena_host_attribute {
+ /* Debug area */
+ u8 *debug_area_virt_addr;
+ dma_addr_t debug_area_dma_addr;
+ ena_mem_handle_t debug_area_dma_handle;
+ u32 debug_area_size;
+
+ /* Host information */
+ struct ena_admin_host_info *host_info;
+ dma_addr_t host_info_dma_addr;
+ ena_mem_handle_t host_info_dma_handle;
+};
+
+/* Each ena_dev is a PCI function. */
+struct ena_com_dev {
+ struct ena_com_admin_queue admin_queue;
+ struct ena_com_aenq aenq;
+ struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
+ struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
+ u8 __iomem *reg_bar;
+ void __iomem *mem_bar;
+ void *dmadev;
+ void *bus;
+
+ enum ena_admin_placement_policy_type tx_mem_queue_type;
+ u32 tx_max_header_size;
+ u16 stats_func; /* Selected function for extended statistic dump */
+ u16 stats_queue; /* Selected queue for extended statistic dump */
+
+ struct ena_com_mmio_read mmio_read;
+
+ struct ena_rss rss;
+ u32 supported_features;
+ u32 dma_addr_bits;
+
+ struct ena_host_attribute host_attr;
+ bool adaptive_coalescing;
+ u16 intr_delay_resolution;
+ u32 intr_moder_tx_interval;
+ struct ena_intr_moder_entry *intr_moder_tbl;
+};
+
+struct ena_com_dev_get_features_ctx {
+ struct ena_admin_queue_feature_desc max_queues;
+ struct ena_admin_device_attr_feature_desc dev_attr;
+ struct ena_admin_feature_aenq_desc aenq;
+ struct ena_admin_feature_offload_desc offload;
+ struct ena_admin_ena_hw_hints hw_hints;
+};
+
+struct ena_com_create_io_ctx {
+ enum ena_admin_placement_policy_type mem_queue_type;
+ enum queue_direction direction;
+ int numa_node;
+ u32 msix_vector;
+ u16 queue_size;
+ u16 qid;
+};
+
+typedef void (*ena_aenq_handler)(void *data,
+ struct ena_admin_aenq_entry *aenq_e);
+
+/* Holds aenq handlers. Indexed by AENQ event group */
+struct ena_aenq_handlers {
+ ena_aenq_handler handlers[ENA_MAX_HANDLERS];
+ ena_aenq_handler unimplemented_handler;
+};
+
+/*****************************************************************************/
+/*****************************************************************************/
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ *
+ * Initialize the register read mechanism.
+ *
+ * @note: This method must be the first stage in the initialization sequence.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ * @readless_supported: readless mode (enable/disable)
+ */
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
+ bool readless_supported);
+
+/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
+ * value physical address.
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
+
+/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_admin_init - Init the admin and the async queues
+ * @ena_dev: ENA communication layer struct
+ * @aenq_handlers: Those handlers to be called upon event.
+ * @init_spinlock: Indicate if this method should init the admin spinlock or
+ * the spinlock was init before (for example, in a case of FLR).
+ *
+ * Initialize the admin submission and completion queues.
+ * Initialize the asynchronous events notification queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ struct ena_aenq_handlers *aenq_handlers,
+ bool init_spinlock);
+
+/* ena_com_admin_destroy - Destroy the admin and the async events queues.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @note: Before calling this method, the caller must validate that the device
+ * won't send any additional admin completions/aenq.
+ * To achieve that, a FLR is recommended.
+ */
+void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_dev_reset - Perform device FLR to the device.
+ * @ena_dev: ENA communication layer struct
+ * @reset_reason: Specify what is the trigger for the reset in case of an error.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason);
+
+/* ena_com_create_io_queue - Create io queue.
+ * @ena_dev: ENA communication layer struct
+ * @ctx - create context structure
+ *
+ * Create the submission and the completion queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx);
+
+/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
+ * @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
+ */
+void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
+
+/* ena_com_get_io_handlers - Return the io queue handlers
+ * @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
+ * @io_sq - IO submission queue handler
+ * @io_cq - IO completion queue handler.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ struct ena_com_io_sq **io_sq,
+ struct ena_com_io_cq **io_cq);
+
+/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
+ * @ena_dev: ENA communication layer struct
+ *
+ * After this method, aenq event can be received via AENQ.
+ */
+void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_running_state - Set the state of the admin queue
+ * @ena_dev: ENA communication layer struct
+ *
+ * Change the state of the admin queue (enable/disable)
+ */
+void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
+
+/* ena_com_get_admin_running_state - Get the admin queue state
+ * @ena_dev: ENA communication layer struct
+ *
+ * Retrieve the state of the admin queue (enable/disable)
+ *
+ * @return - current polling mode (enable/disable)
+ */
+bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
+ * @ena_dev: ENA communication layer struct
+ * @polling: ENAble/Disable polling mode
+ *
+ * Set the admin completion mode.
+ */
+void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
+
+/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
+ * @ena_dev: ENA communication layer struct
+ *
+ * Get the admin completion mode.
+ * If polling mode is on, ena_com_execute_admin_command will perform a
+ * polling on the admin completion queue for the commands completion,
+ * otherwise it will wait on wait event.
+ *
+ * @return state
+ */
+bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
+
+/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method go over the admin completion queue and wake up all the pending
+ * threads that wait on the commands wait event.
+ *
+ * @note: Should be called after MSI-X interrupt.
+ */
+void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
+
+/* ena_com_aenq_intr_handler - AENQ interrupt handler
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method go over the async event notification queue and call the proper
+ * aenq handler.
+ */
+void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
+
+/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method aborts all the outstanding admin commands.
+ * The caller should then call ena_com_wait_for_abort_completion to make sure
+ * all the commands were completed.
+ */
+void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
+
+/* ena_com_wait_for_abort_completion - Wait for admin commands abort.
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method wait until all the outstanding admin commands will be completed.
+ */
+void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
+
+/* ena_com_validate_version - Validate the device parameters
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method validate the device parameters are the same as the saved
+ * parameters in ena_dev.
+ * This method is useful after device reset, to validate the device mac address
+ * and the device offloads are the same as before the reset.
+ *
+ * @return - 0 on success negative value otherwise.
+ */
+int ena_com_validate_version(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_link_params - Retrieve physical link parameters.
+ * @ena_dev: ENA communication layer struct
+ * @resp: Link parameters
+ *
+ * Retrieve the physical link parameters,
+ * like speed, auto-negotiation and full duplex support.
+ *
+ * @return - 0 on Success negative value otherwise.
+ */
+int ena_com_get_link_params(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *resp);
+
+/* ena_com_get_dma_width - Retrieve physical dma address width the device
+ * supports.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Retrieve the maximum physical address bits the device can handle.
+ *
+ * @return: > 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_aenq_config - Set aenq groups configurations
+ * @ena_dev: ENA communication layer struct
+ * @groups flag: bit fields flags of enum ena_admin_aenq_group.
+ *
+ * Configure which aenq event group the driver would like to receive.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
+
+/* ena_com_get_dev_attr_feat - Get device features
+ * @ena_dev: ENA communication layer struct
+ * @get_feat_ctx: returned context that contain the get features.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx);
+
+/* ena_com_get_dev_basic_stats - Get device basic statistics
+ * @ena_dev: ENA communication layer struct
+ * @stats: stats return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_basic_stats *stats);
+
+/* ena_com_set_dev_mtu - Configure the device mtu.
+ * @ena_dev: ENA communication layer struct
+ * @mtu: mtu value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
+
+/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
+ * @ena_dev: ENA communication layer struct
+ * @offlad: offload return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload);
+
+/* ena_com_rss_init - Init RSS
+ * @ena_dev: ENA communication layer struct
+ * @log_size: indirection log size
+ *
+ * Allocate RSS/RFS resources.
+ * The caller then can configure rss using ena_com_set_hash_function,
+ * ena_com_set_hash_ctrl and ena_com_indirect_table_set.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
+
+/* ena_com_rss_destroy - Destroy rss
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free all the RSS/RFS resources.
+ */
+void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_fill_hash_function - Fill RSS hash function
+ * @ena_dev: ENA communication layer struct
+ * @func: The hash function (Toeplitz or crc)
+ * @key: Hash key (for toeplitz hash)
+ * @key_len: key length (max length 10 DW)
+ * @init_val: initial value for the hash function
+ *
+ * Fill the ena_dev resources with the desire hash function, hash key, key_len
+ * and key initial value (if needed by the hash function).
+ * To flush the key into the device the caller should call
+ * ena_com_set_hash_function.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions func,
+ const u8 *key, u16 key_len, u32 init_val);
+
+/* ena_com_set_hash_function - Flush the hash function and it dependencies to
+ * the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the hash function and it dependencies (key, key length and
+ * initial value) if needed.
+ *
+ * @note: Prior to this method the caller should call ena_com_fill_hash_function
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_hash_function - Retrieve the hash function and the hash key
+ * from the device.
+ * @ena_dev: ENA communication layer struct
+ * @func: hash function
+ * @key: hash key
+ *
+ * Retrieve the hash function and the hash key from the device.
+ *
+ * @note: If the caller called ena_com_fill_hash_function but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions *func,
+ u8 *key);
+
+/* ena_com_fill_hash_ctrl - Fill RSS hash control
+ * @ena_dev: ENA communication layer struct.
+ * @proto: The protocol to configure.
+ * @hash_fields: bit mask of ena_admin_flow_hash_fields
+ *
+ * Fill the ena_dev resources with the desire hash control (the ethernet
+ * fields that take part of the hash) for a specific protocol.
+ * To flush the hash control to the device, the caller should call
+ * ena_com_set_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 hash_fields);
+
+/* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the hash control (the ethernet fields that take part of the hash)
+ *
+ * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
+ * @ena_dev: ENA communication layer struct
+ * @proto: The protocol to retrieve.
+ * @fields: bit mask of ena_admin_flow_hash_fields.
+ *
+ * Retrieve the hash control from the device.
+ *
+ * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 *fields);
+
+/* ena_com_set_default_hash_ctrl - Set the hash control to a default
+ * configuration.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Fill the ena_dev resources with the default hash control configuration.
+ * To flush the hash control to the device, the caller should call
+ * ena_com_set_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
+
+/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
+ * indirection table
+ * @ena_dev: ENA communication layer struct.
+ * @entry_idx - indirection table entry.
+ * @entry_value - redirection value
+ *
+ * Fill a single entry of the RSS indirection table in the ena_dev resources.
+ * To flush the indirection table to the device, the called should call
+ * ena_com_indirect_table_set.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
+ u16 entry_idx, u16 entry_value);
+
+/* ena_com_indirect_table_set - Flush the indirection table to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the indirection hash control to the device.
+ * Prior to this method the caller should call ena_com_indirect_table_fill_entry
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
+
+/* ena_com_indirect_table_get - Retrieve the indirection table from the device.
+ * @ena_dev: ENA communication layer struct
+ * @ind_tbl: indirection table
+ *
+ * Retrieve the RSS indirection table from the device.
+ *
+ * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
+
+/* ena_com_allocate_host_info - Allocate host info resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
+
+/* ena_com_allocate_debug_area - Allocate debug area.
+ * @ena_dev: ENA communication layer struct
+ * @debug_area_size - debug area size.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
+ u32 debug_area_size);
+
+/* ena_com_delete_debug_area - Free the debug area resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocate debug area.
+ */
+void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
+
+/* ena_com_delete_host_info - Free the host info resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocate host info.
+ */
+void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_host_attributes - Update the device with the host
+ * attributes (debug area and host info) base address.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
+
+/* ena_com_create_io_cq - Create io completion queue.
+ * @ena_dev: ENA communication layer struct
+ * @io_cq - io completion queue handler
+
+ * Create IO completion queue.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq);
+
+/* ena_com_destroy_io_cq - Destroy io completion queue.
+ * @ena_dev: ENA communication layer struct
+ * @io_cq - io completion queue handler
+
+ * Destroy IO completion queue.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq);
+
+/* ena_com_execute_admin_command - Execute admin command
+ * @admin_queue: admin queue.
+ * @cmd: the admin command to execute.
+ * @cmd_size: the command size.
+ * @cmd_completion: command completion return value.
+ * @cmd_comp_size: command completion size.
+
+ * Submit an admin command and then wait until the device will return a
+ * completion.
+ * The completion will be copyed into cmd_comp.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct ena_admin_acq_entry *cmd_comp,
+ size_t cmd_comp_size);
+
+/* ena_com_init_interrupt_moderation - Init interrupt moderation
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
+
+/* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev);
+
+/* ena_com_interrupt_moderation_supported - Return if interrupt moderation
+ * capability is supported by the device.
+ *
+ * @return - supported or not.
+ */
+bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
+
+/* ena_com_config_default_interrupt_moderation_table - Restore the interrupt
+ * moderation table back to the default parameters.
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
+
+/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
+ * non-adaptive interval in Tx direction.
+ * @ena_dev: ENA communication layer struct
+ * @tx_coalesce_usecs: Interval in usec.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs);
+
+/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
+ * non-adaptive interval in Rx direction.
+ * @ena_dev: ENA communication layer struct
+ * @rx_coalesce_usecs: Interval in usec.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs);
+
+/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
+ * non-adaptive interval in Tx direction.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - interval in usec
+ */
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
+ * non-adaptive interval in Rx direction.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - interval in usec
+ */
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
+
+/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
+ * moderation table.
+ * @ena_dev: ENA communication layer struct
+ * @level: Interrupt moderation table level
+ * @entry: Entry value
+ *
+ * Update a single entry in the interrupt moderation table.
+ */
+void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry);
+
+/* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry.
+ * @ena_dev: ENA communication layer struct
+ * @level: Interrupt moderation table level
+ * @entry: Entry to fill.
+ *
+ * Initialize the entry according to the adaptive interrupt moderation table.
+ */
+void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry);
+
+static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->adaptive_coalescing;
+}
+
+static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
+{
+ ena_dev->adaptive_coalescing = true;
+}
+
+static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
+{
+ ena_dev->adaptive_coalescing = false;
+}
+
+/* ena_com_calculate_interrupt_delay - Calculate new interrupt delay
+ * @ena_dev: ENA communication layer struct
+ * @pkts: Number of packets since the last update
+ * @bytes: Number of bytes received since the last update.
+ * @smoothed_interval: Returned interval
+ * @moder_tbl_idx: Current table level as input update new level as return
+ * value.
+ */
+static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
+ unsigned int pkts,
+ unsigned int bytes,
+ unsigned int *smoothed_interval,
+ unsigned int *moder_tbl_idx)
+{
+ enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
+ struct ena_intr_moder_entry *curr_moder_entry;
+ struct ena_intr_moder_entry *pred_moder_entry;
+ struct ena_intr_moder_entry *new_moder_entry;
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+ unsigned int interval;
+
+ /* We apply adaptive moderation on Rx path only.
+ * Tx uses static interrupt moderation.
+ */
+ if (!pkts || !bytes)
+ /* Tx interrupt, or spurious interrupt,
+ * in both cases we just use same delay values
+ */
+ return;
+
+ curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx);
+ if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) {
+ ena_trc_err("Wrong moderation index %u\n", curr_moder_idx);
+ return;
+ }
+
+ curr_moder_entry = &intr_moder_tbl[curr_moder_idx];
+ new_moder_idx = curr_moder_idx;
+
+ if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
+ if ((pkts > curr_moder_entry->pkts_per_interval) ||
+ (bytes > curr_moder_entry->bytes_per_interval))
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
+ } else {
+ pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE];
+
+ if ((pkts <= pred_moder_entry->pkts_per_interval) ||
+ (bytes <= pred_moder_entry->bytes_per_interval))
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE);
+ else if ((pkts > curr_moder_entry->pkts_per_interval) ||
+ (bytes > curr_moder_entry->bytes_per_interval)) {
+ if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
+ }
+ }
+ new_moder_entry = &intr_moder_tbl[new_moder_idx];
+
+ interval = new_moder_entry->intr_moder_interval;
+ *smoothed_interval = (
+ (interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT +
+ ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) /
+ 10;
+
+ *moder_tbl_idx = new_moder_idx;
+}
+
+/* ena_com_update_intr_reg - Prepare interrupt register
+ * @intr_reg: interrupt register to update.
+ * @rx_delay_interval: Rx interval in usecs
+ * @tx_delay_interval: Tx interval in usecs
+ * @unmask: unask enable/disable
+ *
+ * Prepare interrupt update register with the supplied parameters.
+ */
+static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
+ u32 rx_delay_interval,
+ u32 tx_delay_interval,
+ bool unmask)
+{
+ intr_reg->intr_control = 0;
+ intr_reg->intr_control |= rx_delay_interval &
+ ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+
+ intr_reg->intr_control |=
+ (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
+ & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+
+ if (unmask)
+ intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+}
+
+#if defined(__cplusplus)
+}
+#endif /* __cplusplus */
+#endif /* !(ENA_COM) */
diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_admin_defs.h b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_admin_defs.h
new file mode 100644
index 00000000..04d4e9a5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_admin_defs.h
@@ -0,0 +1,1412 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_ADMIN_H_
+#define _ENA_ADMIN_H_
+
+enum ena_admin_aq_opcode {
+ ENA_ADMIN_CREATE_SQ = 1,
+
+ ENA_ADMIN_DESTROY_SQ = 2,
+
+ ENA_ADMIN_CREATE_CQ = 3,
+
+ ENA_ADMIN_DESTROY_CQ = 4,
+
+ ENA_ADMIN_GET_FEATURE = 8,
+
+ ENA_ADMIN_SET_FEATURE = 9,
+
+ ENA_ADMIN_GET_STATS = 11,
+};
+
+enum ena_admin_aq_completion_status {
+ ENA_ADMIN_SUCCESS = 0,
+
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+
+ ENA_ADMIN_BAD_OPCODE = 2,
+
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
+
+ /* Additional status is provided in ACQ entry extended_status */
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
+};
+
+enum ena_admin_aq_feature_id {
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
+
+ ENA_ADMIN_HW_HINTS = 3,
+
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+
+ ENA_ADMIN_MTU = 14,
+
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
+
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
+
+ ENA_ADMIN_AENQ_CONFIG = 26,
+
+ ENA_ADMIN_LINK_CONFIG = 27,
+
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+};
+
+enum ena_admin_placement_policy_type {
+ /* descriptors and headers are in host memory */
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+
+ /* descriptors and headers are in device memory (a.k.a Low Latency
+ * Queue)
+ */
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+};
+
+enum ena_admin_link_types {
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
+
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
+
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
+
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
+
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
+
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
+
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
+
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
+
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
+};
+
+enum ena_admin_completion_policy_type {
+ /* completion queue entry for each sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
+
+ /* completion queue entry upon request in sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
+
+ /* current queue head pointer is updated in OS memory upon sq
+ * descriptor request
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
+
+ /* current queue head pointer is updated in OS memory for each sq
+ * descriptor
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+};
+
+/* basic stats return ena_admin_basic_stats while extanded stats return a
+ * buffer (string format) with additional statistics per queue and per
+ * device id
+ */
+enum ena_admin_get_stats_type {
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+};
+
+enum ena_admin_get_stats_scope {
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
+
+ ENA_ADMIN_ETH_TRAFFIC = 1,
+};
+
+struct ena_admin_aq_common_desc {
+ /* 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ uint16_t command_id;
+
+ /* as appears in ena_admin_aq_opcode */
+ uint8_t opcode;
+
+ /* 0 : phase
+ * 1 : ctrl_data - control buffer address valid
+ * 2 : ctrl_data_indirect - control buffer address
+ * points to list of pages with addresses of control
+ * buffers
+ * 7:3 : reserved3
+ */
+ uint8_t flags;
+};
+
+/* used in ena_admin_aq_entry. Can point directly to control data, or to a
+ * page list chunk. Used also at the end of indirect mode page list chunks,
+ * for chaining.
+ */
+struct ena_admin_ctrl_buff_info {
+ uint32_t length;
+
+ struct ena_common_mem_addr address;
+};
+
+struct ena_admin_sq {
+ uint16_t sq_idx;
+
+ /* 4:0 : reserved
+ * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx
+ */
+ uint8_t sq_identity;
+
+ uint8_t reserved1;
+};
+
+struct ena_admin_aq_entry {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ uint32_t inline_data_w1[3];
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ uint32_t inline_data_w4[12];
+};
+
+struct ena_admin_acq_common_desc {
+ /* command identifier to associate it with the aq descriptor
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ uint16_t command;
+
+ uint8_t status;
+
+ /* 0 : phase
+ * 7:1 : reserved1
+ */
+ uint8_t flags;
+
+ uint16_t extended_status;
+
+ /* serves as a hint what AQ entries can be revoked */
+ uint16_t sq_head_indx;
+};
+
+struct ena_admin_acq_entry {
+ struct ena_admin_acq_common_desc acq_common_descriptor;
+
+ uint32_t response_specific_data[14];
+};
+
+struct ena_admin_aq_create_sq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* 4:0 : reserved0_w1
+ * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
+ */
+ uint8_t sq_identity;
+
+ uint8_t reserved8_w1;
+
+ /* 3:0 : placement_policy - Describing where the SQ
+ * descriptor ring and the SQ packet headers reside:
+ * 0x1 - descriptors and headers are in OS memory,
+ * 0x3 - descriptors and headers in device memory
+ * (a.k.a Low Latency Queue)
+ * 6:4 : completion_policy - Describing what policy
+ * to use for generation completion entry (cqe) in
+ * the CQ associated with this SQ: 0x0 - cqe for each
+ * sq descriptor, 0x1 - cqe upon request in sq
+ * descriptor, 0x2 - current queue head pointer is
+ * updated in OS memory upon sq descriptor request
+ * 0x3 - current queue head pointer is updated in OS
+ * memory for each sq descriptor
+ * 7 : reserved15_w1
+ */
+ uint8_t sq_caps_2;
+
+ /* 0 : is_physically_contiguous - Described if the
+ * queue ring memory is allocated in physical
+ * contiguous pages or split.
+ * 7:1 : reserved17_w1
+ */
+ uint8_t sq_caps_3;
+
+ /* associated completion queue id. This CQ must be created prior to
+ * SQ creation
+ */
+ uint16_t cq_idx;
+
+ /* submission queue depth in entries */
+ uint16_t sq_depth;
+
+ /* SQ physical base address in OS memory. This field should not be
+ * used for Low Latency queues. Has to be page aligned.
+ */
+ struct ena_common_mem_addr sq_ba;
+
+ /* specifies queue head writeback location in OS memory. Valid if
+ * completion_policy is set to completion_policy_head_on_demand or
+ * completion_policy_head. Has to be cache aligned
+ */
+ struct ena_common_mem_addr sq_head_writeback;
+
+ uint32_t reserved0_w7;
+
+ uint32_t reserved0_w8;
+};
+
+enum ena_admin_sq_direction {
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
+
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
+};
+
+struct ena_admin_acq_create_sq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ uint16_t sq_idx;
+
+ uint16_t reserved;
+
+ /* queue doorbell address as an offset to PCIe MMIO REG BAR */
+ uint32_t sq_doorbell_offset;
+
+ /* low latency queue ring base address as an offset to PCIe MMIO
+ * LLQ_MEM BAR
+ */
+ uint32_t llq_descriptors_offset;
+
+ /* low latency queue headers' memory as an offset to PCIe MMIO
+ * LLQ_MEM BAR
+ */
+ uint32_t llq_headers_offset;
+};
+
+struct ena_admin_aq_destroy_sq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ struct ena_admin_sq sq;
+};
+
+struct ena_admin_acq_destroy_sq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+struct ena_admin_aq_create_cq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* 4:0 : reserved5
+ * 5 : interrupt_mode_enabled - if set, cq operates
+ * in interrupt mode, otherwise - polling
+ * 7:6 : reserved6
+ */
+ uint8_t cq_caps_1;
+
+ /* 4:0 : cq_entry_size_words - size of CQ entry in
+ * 32-bit words, valid values: 4, 8.
+ * 7:5 : reserved7
+ */
+ uint8_t cq_caps_2;
+
+ /* completion queue depth in # of entries. must be power of 2 */
+ uint16_t cq_depth;
+
+ /* msix vector assigned to this cq */
+ uint32_t msix_vector;
+
+ /* cq physical base address in OS memory. CQ must be physically
+ * contiguous
+ */
+ struct ena_common_mem_addr cq_ba;
+};
+
+struct ena_admin_acq_create_cq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ uint16_t cq_idx;
+
+ /* actual cq depth in number of entries */
+ uint16_t cq_actual_depth;
+
+ uint32_t numa_node_register_offset;
+
+ uint32_t cq_head_db_register_offset;
+
+ uint32_t cq_interrupt_unmask_register_offset;
+};
+
+struct ena_admin_aq_destroy_cq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ uint16_t cq_idx;
+
+ uint16_t reserved1;
+};
+
+struct ena_admin_acq_destroy_cq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* ENA AQ Get Statistics command. Extended statistics are placed in control
+ * buffer pointed by AQ entry
+ */
+struct ena_admin_aq_get_stats_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ /* command specific inline data */
+ uint32_t inline_data_w1[3];
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ /* stats type as defined in enum ena_admin_get_stats_type */
+ uint8_t type;
+
+ /* stats scope defined in enum ena_admin_get_stats_scope */
+ uint8_t scope;
+
+ uint16_t reserved3;
+
+ /* queue id. used when scope is specific_queue */
+ uint16_t queue_idx;
+
+ /* device id, value 0xFFFF means mine. only privileged device can get
+ * stats of other device
+ */
+ uint16_t device_id;
+};
+
+/* Basic Statistics Command. */
+struct ena_admin_basic_stats {
+ uint32_t tx_bytes_low;
+
+ uint32_t tx_bytes_high;
+
+ uint32_t tx_pkts_low;
+
+ uint32_t tx_pkts_high;
+
+ uint32_t rx_bytes_low;
+
+ uint32_t rx_bytes_high;
+
+ uint32_t rx_pkts_low;
+
+ uint32_t rx_pkts_high;
+
+ uint32_t rx_drops_low;
+
+ uint32_t rx_drops_high;
+};
+
+struct ena_admin_acq_get_stats_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ struct ena_admin_basic_stats basic_stats;
+};
+
+struct ena_admin_get_set_feature_common_desc {
+ /* 1:0 : select - 0x1 - current value; 0x3 - default
+ * value
+ * 7:3 : reserved3
+ */
+ uint8_t flags;
+
+ /* as appears in ena_admin_aq_feature_id */
+ uint8_t feature_id;
+
+ uint16_t reserved16;
+};
+
+struct ena_admin_device_attr_feature_desc {
+ uint32_t impl_id;
+
+ uint32_t device_version;
+
+ /* bitmap of ena_admin_aq_feature_id */
+ uint32_t supported_features;
+
+ uint32_t reserved3;
+
+ /* Indicates how many bits are used physical address access. */
+ uint32_t phys_addr_width;
+
+ /* Indicates how many bits are used virtual address access. */
+ uint32_t virt_addr_width;
+
+ /* unicast MAC address (in Network byte order) */
+ uint8_t mac_addr[6];
+
+ uint8_t reserved7[2];
+
+ uint32_t max_mtu;
+};
+
+struct ena_admin_queue_feature_desc {
+ /* including LLQs */
+ uint32_t max_sq_num;
+
+ uint32_t max_sq_depth;
+
+ uint32_t max_cq_num;
+
+ uint32_t max_cq_depth;
+
+ uint32_t max_llq_num;
+
+ uint32_t max_llq_depth;
+
+ uint32_t max_header_size;
+
+ /* Maximum Descriptors number, including meta descriptor, allowed for
+ * a single Tx packet
+ */
+ uint16_t max_packet_tx_descs;
+
+ /* Maximum Descriptors number allowed for a single Rx packet */
+ uint16_t max_packet_rx_descs;
+};
+
+struct ena_admin_set_feature_mtu_desc {
+ /* exclude L2 */
+ uint32_t mtu;
+};
+
+struct ena_admin_set_feature_host_attr_desc {
+ /* host OS info base address in OS memory. host info is 4KB of
+ * physically contiguous
+ */
+ struct ena_common_mem_addr os_info_ba;
+
+ /* host debug area base address in OS memory. debug area must be
+ * physically contiguous
+ */
+ struct ena_common_mem_addr debug_ba;
+
+ /* debug area size */
+ uint32_t debug_area_size;
+};
+
+struct ena_admin_feature_intr_moder_desc {
+ /* interrupt delay granularity in usec */
+ uint16_t intr_delay_resolution;
+
+ uint16_t reserved;
+};
+
+struct ena_admin_get_feature_link_desc {
+ /* Link speed in Mb */
+ uint32_t speed;
+
+ /* bit field of enum ena_admin_link types */
+ uint32_t supported;
+
+ /* 0 : autoneg
+ * 1 : duplex - Full Duplex
+ * 31:2 : reserved2
+ */
+ uint32_t flags;
+};
+
+struct ena_admin_feature_aenq_desc {
+ /* bitmask for AENQ groups the device can report */
+ uint32_t supported_groups;
+
+ /* bitmask for AENQ groups to report */
+ uint32_t enabled_groups;
+};
+
+struct ena_admin_feature_offload_desc {
+ /* 0 : TX_L3_csum_ipv4
+ * 1 : TX_L4_ipv4_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 2 : TX_L4_ipv4_csum_full
+ * 3 : TX_L4_ipv6_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 4 : TX_L4_ipv6_csum_full
+ * 5 : tso_ipv4
+ * 6 : tso_ipv6
+ * 7 : tso_ecn
+ */
+ uint32_t tx;
+
+ /* Receive side supported stateless offload
+ * 0 : RX_L3_csum_ipv4 - IPv4 checksum
+ * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum
+ * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum
+ * 3 : RX_hash - Hash calculation
+ */
+ uint32_t rx_supported;
+
+ uint32_t rx_enabled;
+};
+
+enum ena_admin_hash_functions {
+ ENA_ADMIN_TOEPLITZ = 1,
+
+ ENA_ADMIN_CRC32 = 2,
+};
+
+struct ena_admin_feature_rss_flow_hash_control {
+ uint32_t keys_num;
+
+ uint32_t reserved;
+
+ uint32_t key[10];
+};
+
+struct ena_admin_feature_rss_flow_hash_function {
+ /* 7:0 : funcs - bitmask of ena_admin_hash_functions */
+ uint32_t supported_func;
+
+ /* 7:0 : selected_func - bitmask of
+ * ena_admin_hash_functions
+ */
+ uint32_t selected_func;
+
+ /* initial value */
+ uint32_t init_val;
+};
+
+/* RSS flow hash protocols */
+enum ena_admin_flow_hash_proto {
+ ENA_ADMIN_RSS_TCP4 = 0,
+
+ ENA_ADMIN_RSS_UDP4 = 1,
+
+ ENA_ADMIN_RSS_TCP6 = 2,
+
+ ENA_ADMIN_RSS_UDP6 = 3,
+
+ ENA_ADMIN_RSS_IP4 = 4,
+
+ ENA_ADMIN_RSS_IP6 = 5,
+
+ ENA_ADMIN_RSS_IP4_FRAG = 6,
+
+ ENA_ADMIN_RSS_NOT_IP = 7,
+
+ /* TCPv6 with extension header */
+ ENA_ADMIN_RSS_TCP6_EX = 8,
+
+ /* IPv6 with extension header */
+ ENA_ADMIN_RSS_IP6_EX = 9,
+
+ ENA_ADMIN_RSS_PROTO_NUM = 16,
+};
+
+/* RSS flow hash fields */
+enum ena_admin_flow_hash_fields {
+ /* Ethernet Dest Addr */
+ ENA_ADMIN_RSS_L2_DA = BIT(0),
+
+ /* Ethernet Src Addr */
+ ENA_ADMIN_RSS_L2_SA = BIT(1),
+
+ /* ipv4/6 Dest Addr */
+ ENA_ADMIN_RSS_L3_DA = BIT(2),
+
+ /* ipv4/6 Src Addr */
+ ENA_ADMIN_RSS_L3_SA = BIT(3),
+
+ /* tcp/udp Dest Port */
+ ENA_ADMIN_RSS_L4_DP = BIT(4),
+
+ /* tcp/udp Src Port */
+ ENA_ADMIN_RSS_L4_SP = BIT(5),
+};
+
+struct ena_admin_proto_input {
+ /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */
+ uint16_t fields;
+
+ uint16_t reserved2;
+};
+
+struct ena_admin_feature_rss_hash_control {
+ struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM];
+
+ struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM];
+};
+
+struct ena_admin_feature_rss_flow_hash_input {
+ /* supported hash input sorting
+ * 1 : L3_sort - support swap L3 addresses if DA is
+ * smaller than SA
+ * 2 : L4_sort - support swap L4 ports if DP smaller
+ * SP
+ */
+ uint16_t supported_input_sort;
+
+ /* enabled hash input sorting
+ * 1 : enable_L3_sort - enable swap L3 addresses if
+ * DA smaller than SA
+ * 2 : enable_L4_sort - enable swap L4 ports if DP
+ * smaller than SP
+ */
+ uint16_t enabled_input_sort;
+};
+
+enum ena_admin_os_type {
+ ENA_ADMIN_OS_LINUX = 1,
+
+ ENA_ADMIN_OS_WIN = 2,
+
+ ENA_ADMIN_OS_DPDK = 3,
+
+ ENA_ADMIN_OS_FREEBSD = 4,
+
+ ENA_ADMIN_OS_IPXE = 5,
+};
+
+struct ena_admin_host_info {
+ /* defined in enum ena_admin_os_type */
+ uint32_t os_type;
+
+ /* os distribution string format */
+ uint8_t os_dist_str[128];
+
+ /* OS distribution numeric format */
+ uint32_t os_dist;
+
+ /* kernel version string format */
+ uint8_t kernel_ver_str[32];
+
+ /* Kernel version numeric format */
+ uint32_t kernel_ver;
+
+ /* 7:0 : major
+ * 15:8 : minor
+ * 23:16 : sub_minor
+ */
+ uint32_t driver_version;
+
+ /* features bitmap */
+ uint32_t supported_network_features[4];
+};
+
+struct ena_admin_rss_ind_table_entry {
+ uint16_t cq_idx;
+
+ uint16_t reserved;
+};
+
+struct ena_admin_feature_rss_ind_table {
+ /* min supported table size (2^min_size) */
+ uint16_t min_size;
+
+ /* max supported table size (2^max_size) */
+ uint16_t max_size;
+
+ /* table size (2^size) */
+ uint16_t size;
+
+ uint16_t reserved;
+
+ /* index of the inline entry. 0xFFFFFFFF means invalid */
+ uint32_t inline_index;
+
+ /* used for updating single entry, ignored when setting the entire
+ * table through the control buffer.
+ */
+ struct ena_admin_rss_ind_table_entry inline_entry;
+};
+
+/* When hint value is 0, driver should use it's own predefined value */
+struct ena_admin_ena_hw_hints {
+ /* value in ms */
+ uint16_t mmio_read_timeout;
+
+ /* value in ms */
+ uint16_t driver_watchdog_timeout;
+
+ /* Per packet tx completion timeout. value in ms */
+ uint16_t missing_tx_completion_timeout;
+
+ uint16_t missed_tx_completion_count_threshold_to_reset;
+
+ /* value in ms */
+ uint16_t admin_completion_tx_timeout;
+
+ uint16_t netdev_wd_timeout;
+
+ uint16_t max_tx_sgl_size;
+
+ uint16_t max_rx_sgl_size;
+
+ uint16_t reserved[8];
+};
+
+struct ena_admin_get_feat_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+
+ struct ena_admin_get_set_feature_common_desc feat_common;
+
+ uint32_t raw[11];
+};
+
+struct ena_admin_get_feat_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ union {
+ uint32_t raw[14];
+
+ struct ena_admin_device_attr_feature_desc dev_attr;
+
+ struct ena_admin_queue_feature_desc max_queue;
+
+ struct ena_admin_feature_aenq_desc aenq;
+
+ struct ena_admin_get_feature_link_desc link;
+
+ struct ena_admin_feature_offload_desc offload;
+
+ struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+ struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+ struct ena_admin_feature_rss_ind_table ind_table;
+
+ struct ena_admin_feature_intr_moder_desc intr_moderation;
+
+ struct ena_admin_ena_hw_hints hw_hints;
+ } u;
+};
+
+struct ena_admin_set_feat_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+
+ struct ena_admin_get_set_feature_common_desc feat_common;
+
+ union {
+ uint32_t raw[11];
+
+ /* mtu size */
+ struct ena_admin_set_feature_mtu_desc mtu;
+
+ /* host attributes */
+ struct ena_admin_set_feature_host_attr_desc host_attr;
+
+ /* AENQ configuration */
+ struct ena_admin_feature_aenq_desc aenq;
+
+ /* rss flow hash function */
+ struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+ /* rss flow hash input */
+ struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+ /* rss indirection table */
+ struct ena_admin_feature_rss_ind_table ind_table;
+ } u;
+};
+
+struct ena_admin_set_feat_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ union {
+ uint32_t raw[14];
+ } u;
+};
+
+struct ena_admin_aenq_common_desc {
+ uint16_t group;
+
+ uint16_t syndrom;
+
+ /* 0 : phase */
+ uint8_t flags;
+
+ uint8_t reserved1[3];
+
+ uint32_t timestamp_low;
+
+ uint32_t timestamp_high;
+};
+
+/* asynchronous event notification groups */
+enum ena_admin_aenq_group {
+ ENA_ADMIN_LINK_CHANGE = 0,
+
+ ENA_ADMIN_FATAL_ERROR = 1,
+
+ ENA_ADMIN_WARNING = 2,
+
+ ENA_ADMIN_NOTIFICATION = 3,
+
+ ENA_ADMIN_KEEP_ALIVE = 4,
+
+ ENA_ADMIN_AENQ_GROUPS_NUM = 5,
+};
+
+enum ena_admin_aenq_notification_syndrom {
+ ENA_ADMIN_SUSPEND = 0,
+
+ ENA_ADMIN_RESUME = 1,
+
+ ENA_ADMIN_UPDATE_HINTS = 2,
+};
+
+struct ena_admin_aenq_entry {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ /* command specific inline data */
+ uint32_t inline_data_w4[12];
+};
+
+struct ena_admin_aenq_link_change_desc {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ /* 0 : link_status */
+ uint32_t flags;
+};
+
+struct ena_admin_aenq_keep_alive_desc {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ uint32_t rx_drops_low;
+
+ uint32_t rx_drops_high;
+};
+
+struct ena_admin_ena_mmio_req_read_less_resp {
+ uint16_t req_id;
+
+ uint16_t reg_off;
+
+ /* value is valid when poll is cleared */
+ uint32_t reg_val;
+};
+
+/* aq_common_desc */
+#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+
+/* sq */
+#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
+
+/* acq_common_desc */
+#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aq_create_sq_cmd */
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
+
+/* aq_create_cq_cmd */
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+
+/* get_set_feature_common_desc */
+#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+
+/* get_feature_link_desc */
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
+
+/* feature_offload_desc */
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
+
+/* feature_rss_flow_hash_function */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0)
+
+/* feature_rss_flow_hash_input */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
+
+/* host_info */
+#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
+#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+
+/* aenq_common_desc */
+#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aenq_link_change_desc */
+#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint16_t get_ena_admin_aq_common_desc_command_id(const struct ena_admin_aq_common_desc *p)
+{
+ return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline void set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p, uint16_t val)
+{
+ p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p, uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data(const struct ena_admin_aq_common_desc *p)
+{
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
+}
+
+static inline void set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p, uint8_t val)
+{
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data_indirect(const struct ena_admin_aq_common_desc *p)
+{
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
+}
+
+static inline void set_ena_admin_aq_common_desc_ctrl_data_indirect(struct ena_admin_aq_common_desc *p, uint8_t val)
+{
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+}
+
+static inline uint8_t get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
+{
+ return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK) >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
+}
+
+static inline void set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
+{
+ p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+}
+
+static inline uint16_t get_ena_admin_acq_common_desc_command_id(const struct ena_admin_acq_common_desc *p)
+{
+ return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline void set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p, uint16_t val)
+{
+ p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline uint8_t get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p, uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_sq_direction(const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_sq_direction(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+ p->sq_identity |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_placement_policy(const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_placement_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+ p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_completion_policy(const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return (p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_completion_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+ p->sq_caps_2 |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return p->sq_caps_3 & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+}
+
+static inline void set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
+{
+ p->sq_caps_3 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(const struct ena_admin_aq_create_cq_cmd *p)
+{
+ return (p->cq_caps_1 & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK) >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
+}
+
+static inline void set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
+{
+ p->cq_caps_1 |= (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT) & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+}
+
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(const struct ena_admin_aq_create_cq_cmd *p)
+{
+ return p->cq_caps_2 & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+}
+
+static inline void set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
+{
+ p->cq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+}
+
+static inline uint8_t get_ena_admin_get_set_feature_common_desc_select(const struct ena_admin_get_set_feature_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
+}
+
+static inline void set_ena_admin_get_set_feature_common_desc_select(struct ena_admin_get_set_feature_common_desc *p, uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
+}
+
+static inline uint32_t get_ena_admin_get_feature_link_desc_autoneg(const struct ena_admin_get_feature_link_desc *p)
+{
+ return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
+}
+
+static inline void set_ena_admin_get_feature_link_desc_autoneg(struct ena_admin_get_feature_link_desc *p, uint32_t val)
+{
+ p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
+}
+
+static inline uint32_t get_ena_admin_get_feature_link_desc_duplex(const struct ena_admin_get_feature_link_desc *p)
+{
+ return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK) >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
+}
+
+static inline void set_ena_admin_get_feature_link_desc_duplex(struct ena_admin_get_feature_link_desc *p, uint32_t val)
+{
+ p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT) & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
+{
+ return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv4(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_tso_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv6(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_tso_ipv6(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ecn(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_tso_ecn(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
+{
+ return p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->rx_supported |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_hash(const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
+}
+
+static inline void set_ena_admin_feature_offload_desc_RX_hash(struct ena_admin_feature_offload_desc *p, uint32_t val)
+{
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_funcs(const struct ena_admin_feature_rss_flow_hash_function *p)
+{
+ return p->supported_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_function_funcs(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
+{
+ p->supported_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+}
+
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_selected_func(const struct ena_admin_feature_rss_flow_hash_function *p)
+{
+ return p->selected_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_function_selected_func(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
+{
+ p->selected_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+ p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+ p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+ p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
+}
+
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
+}
+
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+{
+ p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
+{
+ return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
+}
+
+static inline void set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
+{
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
+}
+
+static inline void set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_MINOR_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
+{
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
+}
+
+static inline void set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
+}
+
+static inline uint8_t get_ena_admin_aenq_common_desc_phase(const struct ena_admin_aenq_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_admin_aenq_common_desc_phase(struct ena_admin_aenq_common_desc *p, uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_admin_aenq_link_change_desc_link_status(const struct ena_admin_aenq_link_change_desc *p)
+{
+ return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+}
+
+static inline void set_ena_admin_aenq_link_change_desc_link_status(struct ena_admin_aenq_link_change_desc *p, uint32_t val)
+{
+ p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_ADMIN_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_common_defs.h b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_common_defs.h
new file mode 100644
index 00000000..072e6c1f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_common_defs.h
@@ -0,0 +1,50 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_COMMON_H_
+#define _ENA_COMMON_H_
+
+#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */
+#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */
+
+/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
+struct ena_common_mem_addr {
+ uint32_t mem_addr_low;
+
+ uint16_t mem_addr_high;
+
+ /* MBZ */
+ uint16_t reserved16;
+};
+
+#endif /*_ENA_COMMON_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
new file mode 100644
index 00000000..4cf0b205
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
@@ -0,0 +1,960 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_ETH_IO_H_
+#define _ENA_ETH_IO_H_
+
+enum ena_eth_io_l3_proto_index {
+ ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
+
+ ENA_ETH_IO_L3_PROTO_IPV4 = 8,
+
+ ENA_ETH_IO_L3_PROTO_IPV6 = 11,
+
+ ENA_ETH_IO_L3_PROTO_FCOE = 21,
+
+ ENA_ETH_IO_L3_PROTO_ROCE = 22,
+};
+
+enum ena_eth_io_l4_proto_index {
+ ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
+
+ ENA_ETH_IO_L4_PROTO_TCP = 12,
+
+ ENA_ETH_IO_L4_PROTO_UDP = 13,
+
+ ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
+};
+
+struct ena_eth_io_tx_desc {
+ /* 15:0 : length - Buffer length in bytes, must
+ * include any packet trailers that the ENA supposed
+ * to update like End-to-End CRC, Authentication GMAC
+ * etc. This length must not include the
+ * 'Push_Buffer' length. This length must not include
+ * the 4-byte added in the end for 802.3 Ethernet FCS
+ * 21:16 : req_id_hi - Request ID[15:10]
+ * 22 : reserved22 - MBZ
+ * 23 : meta_desc - MBZ
+ * 24 : phase
+ * 25 : reserved1 - MBZ
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : comp_req - Indicates whether completion
+ * should be posted, after packet is transmitted.
+ * Valid only for first descriptor
+ * 30:29 : reserved29 - MBZ
+ * 31 : reserved31 - MBZ
+ */
+ uint32_t len_ctrl;
+
+ /* 3:0 : l3_proto_idx - L3 protocol. This field
+ * required when l3_csum_en,l3_csum or tso_en are set.
+ * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and
+ * DF flags of the IPv4 header is 0. Otherwise must
+ * be set to 1
+ * 6:5 : reserved5
+ * 7 : tso_en - Enable TSO, For TCP only.
+ * 12:8 : l4_proto_idx - L4 protocol. This field need
+ * to be set when l4_csum_en or tso_en are set.
+ * 13 : l3_csum_en - enable IPv4 header checksum.
+ * 14 : l4_csum_en - enable TCP/UDP checksum.
+ * 15 : ethernet_fcs_dis - when set, the controller
+ * will not append the 802.3 Ethernet Frame Check
+ * Sequence to the packet
+ * 16 : reserved16
+ * 17 : l4_csum_partial - L4 partial checksum. when
+ * set to 0, the ENA calculates the L4 checksum,
+ * where the Destination Address required for the
+ * TCP/UDP pseudo-header is taken from the actual
+ * packet L3 header. when set to 1, the ENA doesn't
+ * calculate the sum of the pseudo-header, instead,
+ * the checksum field of the L4 is used instead. When
+ * TSO enabled, the checksum of the pseudo-header
+ * must not include the tcp length field. L4 partial
+ * checksum should be used for IPv6 packet that
+ * contains Routing Headers.
+ * 20:18 : reserved18 - MBZ
+ * 21 : reserved21 - MBZ
+ * 31:22 : req_id_lo - Request ID[9:0]
+ */
+ uint32_t meta_ctrl;
+
+ uint32_t buff_addr_lo;
+
+ /* address high and header size
+ * 15:0 : addr_hi - Buffer Pointer[47:32]
+ * 23:16 : reserved16_w2
+ * 31:24 : header_length - Header length. For Low
+ * Latency Queues, this fields indicates the number
+ * of bytes written to the headers' memory. For
+ * normal queues, if packet is TCP or UDP, and longer
+ * than max_header_size, then this field should be
+ * set to the sum of L4 header offset and L4 header
+ * size(without options), otherwise, this field
+ * should be set to 0. For both modes, this field
+ * must not exceed the max_header_size.
+ * max_header_size value is reported by the Max
+ * Queues Feature descriptor
+ */
+ uint32_t buff_addr_hi_hdr_sz;
+};
+
+struct ena_eth_io_tx_meta_desc {
+ /* 9:0 : req_id_lo - Request ID[9:0]
+ * 11:10 : reserved10 - MBZ
+ * 12 : reserved12 - MBZ
+ * 13 : reserved13 - MBZ
+ * 14 : ext_valid - if set, offset fields in Word2
+ * are valid Also MSS High in Word 0 and bits [31:24]
+ * in Word 3
+ * 15 : reserved15
+ * 19:16 : mss_hi
+ * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1:
+ * Extended Metadata Descriptor
+ * 21 : meta_store - Store extended metadata in queue
+ * cache
+ * 22 : reserved22 - MBZ
+ * 23 : meta_desc - MBO
+ * 24 : phase
+ * 25 : reserved25 - MBZ
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : comp_req - Indicates whether completion
+ * should be posted, after packet is transmitted.
+ * Valid only for first descriptor
+ * 30:29 : reserved29 - MBZ
+ * 31 : reserved31 - MBZ
+ */
+ uint32_t len_ctrl;
+
+ /* 5:0 : req_id_hi
+ * 31:6 : reserved6 - MBZ
+ */
+ uint32_t word1;
+
+ /* 7:0 : l3_hdr_len
+ * 15:8 : l3_hdr_off
+ * 21:16 : l4_hdr_len_in_words - counts the L4 header
+ * length in words. there is an explicit assumption
+ * that L4 header appears right after L3 header and
+ * L4 offset is based on l3_hdr_off+l3_hdr_len
+ * 31:22 : mss_lo
+ */
+ uint32_t word2;
+
+ uint32_t reserved;
+};
+
+struct ena_eth_io_tx_cdesc {
+ /* Request ID[15:0] */
+ uint16_t req_id;
+
+ uint8_t status;
+
+ /* flags
+ * 0 : phase
+ * 7:1 : reserved1
+ */
+ uint8_t flags;
+
+ uint16_t sub_qid;
+
+ uint16_t sq_head_idx;
+};
+
+struct ena_eth_io_rx_desc {
+ /* In bytes. 0 means 64KB */
+ uint16_t length;
+
+ /* MBZ */
+ uint8_t reserved2;
+
+ /* 0 : phase
+ * 1 : reserved1 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction
+ * 3 : last - Indicates last descriptor in transaction
+ * 4 : comp_req
+ * 5 : reserved5 - MBO
+ * 7:6 : reserved6 - MBZ
+ */
+ uint8_t ctrl;
+
+ uint16_t req_id;
+
+ /* MBZ */
+ uint16_t reserved6;
+
+ uint32_t buff_addr_lo;
+
+ uint16_t buff_addr_hi;
+
+ /* MBZ */
+ uint16_t reserved16_w3;
+};
+
+/* 4-word format Note: all ethernet parsing information are valid only when
+ * last=1
+ */
+struct ena_eth_io_rx_cdesc_base {
+ /* 4:0 : l3_proto_idx
+ * 6:5 : src_vlan_cnt
+ * 7 : reserved7 - MBZ
+ * 12:8 : l4_proto_idx
+ * 13 : l3_csum_err - when set, either the L3
+ * checksum error detected, or, the controller didn't
+ * validate the checksum. This bit is valid only when
+ * l3_proto_idx indicates IPv4 packet
+ * 14 : l4_csum_err - when set, either the L4
+ * checksum error detected, or, the controller didn't
+ * validate the checksum. This bit is valid only when
+ * l4_proto_idx indicates TCP/UDP packet, and,
+ * ipv4_frag is not set
+ * 15 : ipv4_frag - Indicates IPv4 fragmented packet
+ * 23:16 : reserved16
+ * 24 : phase
+ * 25 : l3_csum2 - second checksum engine result
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 29:28 : reserved28
+ * 30 : buffer - 0: Metadata descriptor. 1: Buffer
+ * Descriptor was used
+ * 31 : reserved31
+ */
+ uint32_t status;
+
+ uint16_t length;
+
+ uint16_t req_id;
+
+ /* 32-bit hash result */
+ uint32_t hash;
+
+ uint16_t sub_qid;
+
+ uint16_t reserved;
+};
+
+/* 8-word format */
+struct ena_eth_io_rx_cdesc_ext {
+ struct ena_eth_io_rx_cdesc_base base;
+
+ uint32_t buff_addr_lo;
+
+ uint16_t buff_addr_hi;
+
+ uint16_t reserved16;
+
+ uint32_t reserved_w6;
+
+ uint32_t reserved_w7;
+};
+
+struct ena_eth_io_intr_reg {
+ /* 14:0 : rx_intr_delay
+ * 29:15 : tx_intr_delay
+ * 30 : intr_unmask
+ * 31 : reserved
+ */
+ uint32_t intr_control;
+};
+
+struct ena_eth_io_numa_node_cfg_reg {
+ /* 7:0 : numa
+ * 30:8 : reserved
+ * 31 : enabled
+ */
+ uint32_t numa_cfg;
+};
+
+/* tx_desc */
+#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
+#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
+#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
+#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
+#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
+
+/* tx_meta_desc */
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
+
+/* tx_cdesc */
+#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
+
+/* rx_desc */
+#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
+#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
+#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
+#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
+
+/* rx_cdesc_base */
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
+
+/* intr_reg */
+#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
+
+/* numa_node_cfg_reg */
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint32_t get_ena_eth_io_tx_desc_length(const struct ena_eth_io_tx_desc *p)
+{
+ return p->len_ctrl & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_length(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= val & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_req_id_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_meta_desc(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_DESC_META_DESC_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_phase(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_phase(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_first(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_first(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_DESC_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_last(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK) >> ENA_ETH_IO_TX_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_last(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_DESC_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_comp_req(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_comp_req(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(const struct ena_eth_io_tx_desc *p)
+{
+ return p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_l3_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= val & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_DF(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK) >> ENA_ETH_IO_TX_DESC_DF_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_DF(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_DF_SHIFT) & ENA_ETH_IO_TX_DESC_DF_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_tso_en(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK) >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_tso_en(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l3_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK) >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT) & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_csum_partial(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_req_id_lo(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(const struct ena_eth_io_tx_desc *p)
+{
+ return p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_addr_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->buff_addr_hi_hdr_sz |= val & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_header_length(const struct ena_eth_io_tx_desc *p)
+{
+ return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK) >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_header_length(struct ena_eth_io_tx_desc *p, uint32_t val)
+{
+ p->buff_addr_hi_hdr_sz |= (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->len_ctrl & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK) >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_ext_valid(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_mss_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK) >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK) >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_meta_store(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_meta_desc(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_phase(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_first(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_first(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_last(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK) >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_last(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_META_DESC_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_comp_req(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->word1 & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->word1 |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->word2 |= val & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK) >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK) >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_mss_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+{
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_tx_cdesc_phase(const struct ena_eth_io_tx_cdesc *p)
+{
+ return p->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+}
+
+static inline void set_ena_eth_io_tx_cdesc_phase(struct ena_eth_io_tx_cdesc *p, uint8_t val)
+{
+ p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_phase(const struct ena_eth_io_rx_desc *p)
+{
+ return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_eth_io_rx_desc_phase(struct ena_eth_io_rx_desc *p, uint8_t val)
+{
+ p->ctrl |= val & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_first(const struct ena_eth_io_rx_desc *p)
+{
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK) >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_first(struct ena_eth_io_rx_desc *p, uint8_t val)
+{
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT) & ENA_ETH_IO_RX_DESC_FIRST_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_last(const struct ena_eth_io_rx_desc *p)
+{
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK) >> ENA_ETH_IO_RX_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_last(struct ena_eth_io_rx_desc *p, uint8_t val)
+{
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT) & ENA_ETH_IO_RX_DESC_LAST_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_comp_req(const struct ena_eth_io_rx_desc *p)
+{
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_comp_req(struct ena_eth_io_rx_desc *p, uint8_t val)
+{
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= val & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_phase(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_first(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_last(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_buffer(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+{
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(const struct ena_eth_io_intr_reg *p)
+{
+ return p->intr_control & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+}
+
+static inline void set_ena_eth_io_intr_reg_rx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
+{
+ p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(const struct ena_eth_io_intr_reg *p)
+{
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK) >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT;
+}
+
+static inline void set_ena_eth_io_intr_reg_tx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
+{
+ p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(const struct ena_eth_io_intr_reg *p)
+{
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK) >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT;
+}
+
+static inline void set_ena_eth_io_intr_reg_intr_unmask(struct ena_eth_io_intr_reg *p, uint32_t val)
+{
+ p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(const struct ena_eth_io_numa_node_cfg_reg *p)
+{
+ return p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
+}
+
+static inline void set_ena_eth_io_numa_node_cfg_reg_numa(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
+{
+ p->numa_cfg |= val & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(const struct ena_eth_io_numa_node_cfg_reg *p)
+{
+ return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK) >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT;
+}
+
+static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
+{
+ p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT) & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_ETH_IO_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_gen_info.h b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_gen_info.h
new file mode 100644
index 00000000..e87bcfd8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_gen_info.h
@@ -0,0 +1,35 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define ENA_GEN_DATE "Sun Oct 23 12:27:32 IDT 2016"
+#define ENA_GEN_COMMIT "79d82fa"
diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_includes.h b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_includes.h
new file mode 100644
index 00000000..30a920a8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_includes.h
@@ -0,0 +1,37 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "ena_common_defs.h"
+#include "ena_regs_defs.h"
+#include "ena_admin_defs.h"
+#include "ena_eth_io_defs.h"
diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_regs_defs.h b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_regs_defs.h
new file mode 100644
index 00000000..b0870f25
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_regs_defs.h
@@ -0,0 +1,171 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_REGS_H_
+#define _ENA_REGS_H_
+
+enum ena_regs_reset_reason_types {
+ ENA_REGS_RESET_NORMAL = 0,
+
+ ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
+
+ ENA_REGS_RESET_ADMIN_TO = 2,
+
+ ENA_REGS_RESET_MISS_TX_CMPL = 3,
+
+ ENA_REGS_RESET_INV_RX_REQ_ID = 4,
+
+ ENA_REGS_RESET_INV_TX_REQ_ID = 5,
+
+ ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
+
+ ENA_REGS_RESET_INIT_ERR = 7,
+
+ ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
+
+ ENA_REGS_RESET_OS_TRIGGER = 9,
+
+ ENA_REGS_RESET_OS_NETDEV_WD = 10,
+
+ ENA_REGS_RESET_SHUTDOWN = 11,
+
+ ENA_REGS_RESET_USER_TRIGGER = 12,
+
+ ENA_REGS_RESET_GENERIC = 13,
+
+ ENA_REGS_RESET_MISS_INTERRUPT = 14,
+};
+
+/* ena_registers offsets */
+#define ENA_REGS_VERSION_OFF 0x0
+#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
+#define ENA_REGS_CAPS_OFF 0x8
+#define ENA_REGS_CAPS_EXT_OFF 0xc
+#define ENA_REGS_AQ_BASE_LO_OFF 0x10
+#define ENA_REGS_AQ_BASE_HI_OFF 0x14
+#define ENA_REGS_AQ_CAPS_OFF 0x18
+#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
+#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
+#define ENA_REGS_ACQ_CAPS_OFF 0x28
+#define ENA_REGS_AQ_DB_OFF 0x2c
+#define ENA_REGS_ACQ_TAIL_OFF 0x30
+#define ENA_REGS_AENQ_CAPS_OFF 0x34
+#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
+#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
+#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
+#define ENA_REGS_AENQ_TAIL_OFF 0x44
+#define ENA_REGS_INTR_MASK_OFF 0x4c
+#define ENA_REGS_DEV_CTL_OFF 0x54
+#define ENA_REGS_DEV_STS_OFF 0x58
+#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
+#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
+#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+
+/* version register */
+#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
+#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
+#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
+
+/* controller_version register */
+#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
+
+/* caps register */
+#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
+
+/* aq_caps register */
+#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* acq_caps register */
+#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* aenq_caps register */
+#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* dev_ctl register */
+#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
+#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
+#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
+#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
+#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
+#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
+
+/* dev_sts register */
+#define ENA_REGS_DEV_STS_READY_MASK 0x1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
+#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
+#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
+#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
+#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
+
+/* mmio_reg_read register */
+#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+
+/* rss_ind_entry_update register */
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+
+#endif /*_ENA_REGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.c b/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.c
new file mode 100644
index 00000000..4c4989a3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.c
@@ -0,0 +1,526 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "ena_eth_com.h"
+
+static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+ u16 expected_phase, head_masked;
+ u16 desc_phase;
+
+ head_masked = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ + (head_masked * io_cq->cdesc_entry_size_in_bytes));
+
+ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+
+ if (desc_phase != expected_phase)
+ return NULL;
+
+ return cdesc;
+}
+
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
+{
+ io_cq->head++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
+ io_cq->phase ^= 1;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+ u16 tail_masked;
+ u32 offset;
+
+ tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+
+ offset = tail_masked * io_sq->desc_entry_size;
+
+ return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
+}
+
+static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
+{
+ u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+ u32 offset = tail_masked * io_sq->desc_entry_size;
+
+ /* In case this queue isn't a LLQ */
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return;
+
+ memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
+ io_sq->desc_addr.virt_addr + offset,
+ io_sq->desc_entry_size);
+}
+
+static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+ io_sq->tail++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+ io_sq->phase ^= 1;
+}
+
+static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
+ u8 *head_src, u16 header_len)
+{
+ u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+ u8 __iomem *dev_head_addr =
+ io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return 0;
+
+ if (unlikely(!io_sq->header_addr)) {
+ ena_trc_err("Push buffer header ptr is NULL\n");
+ return ENA_COM_INVAL;
+ }
+
+ memcpy_toio(dev_head_addr, head_src, header_len);
+
+ return 0;
+}
+
+static inline struct ena_eth_io_rx_cdesc_base *
+ ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
+{
+ idx &= (io_cq->q_depth - 1);
+ return (struct ena_eth_io_rx_cdesc_base *)
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ idx * io_cq->cdesc_entry_size_in_bytes);
+}
+
+static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+ u16 *first_cdesc_idx)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+ u16 count = 0, head_masked;
+ u32 last = 0;
+
+ do {
+ cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ if (!cdesc)
+ break;
+
+ ena_com_cq_inc_head(io_cq);
+ count++;
+ last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ } while (!last);
+
+ if (last) {
+ *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
+ count += io_cq->cur_rx_pkt_cdesc_count;
+
+ head_masked = io_cq->head & (io_cq->q_depth - 1);
+
+ io_cq->cur_rx_pkt_cdesc_count = 0;
+ io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
+
+ ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
+ io_cq->qid, *first_cdesc_idx, count);
+ } else {
+ io_cq->cur_rx_pkt_cdesc_count += count;
+ count = 0;
+ }
+
+ return count;
+}
+
+static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ int rc;
+
+ if (ena_tx_ctx->meta_valid) {
+ rc = memcmp(&io_sq->cached_tx_meta,
+ &ena_tx_ctx->ena_meta,
+ sizeof(struct ena_com_tx_meta));
+
+ if (unlikely(rc != 0))
+ return true;
+ }
+
+ return false;
+}
+
+static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
+ struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
+
+ meta_desc = get_sq_desc(io_sq);
+ memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
+
+ /* bits 0-9 of the mss */
+ meta_desc->word2 |= (ena_meta->mss <<
+ ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+ /* bits 10-13 of the mss */
+ meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
+
+ /* Extended meta desc */
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+ meta_desc->len_ctrl |= (io_sq->phase <<
+ ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+ meta_desc->word2 |= ena_meta->l3_hdr_len &
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+ meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+
+ meta_desc->word2 |= (ena_meta->l4_hdr_len <<
+ ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+
+ /* Cached the meta desc */
+ memcpy(&io_sq->cached_tx_meta, ena_meta,
+ sizeof(struct ena_com_tx_meta));
+
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+ ena_com_sq_update_tail(io_sq);
+}
+
+static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
+ struct ena_eth_io_rx_cdesc_base *cdesc)
+{
+ ena_rx_ctx->l3_proto = cdesc->status &
+ ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+ ena_rx_ctx->l4_proto =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+ ena_rx_ctx->l3_csum_err =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+ ena_rx_ctx->l4_csum_err =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+ ena_rx_ctx->hash = cdesc->hash;
+ ena_rx_ctx->frag =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+
+ ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
+ ena_rx_ctx->l3_proto,
+ ena_rx_ctx->l4_proto,
+ ena_rx_ctx->l3_csum_err,
+ ena_rx_ctx->l4_csum_err,
+ ena_rx_ctx->hash,
+ ena_rx_ctx->frag,
+ cdesc->status);
+}
+
+/*****************************************************************************/
+/***************************** API **********************************/
+/*****************************************************************************/
+
+int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ int *nb_hw_desc)
+{
+ struct ena_eth_io_tx_desc *desc = NULL;
+ struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
+ void *push_header = ena_tx_ctx->push_header;
+ u16 header_len = ena_tx_ctx->header_len;
+ u16 num_bufs = ena_tx_ctx->num_bufs;
+ int total_desc, i, rc;
+ bool have_meta;
+ u64 addr_hi;
+
+ ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
+ "wrong Q type");
+
+ /* num_bufs +1 for potential meta desc */
+ if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
+ ena_trc_err("Not enough space in the tx queue\n");
+ return ENA_COM_NO_MEM;
+ }
+
+ if (unlikely(header_len > io_sq->tx_max_header_size)) {
+ ena_trc_err("header size is too large %d max header: %d\n",
+ header_len, io_sq->tx_max_header_size);
+ return ENA_COM_INVAL;
+ }
+
+ /* start with pushing the header (if needed) */
+ rc = ena_com_write_header(io_sq, push_header, header_len);
+ if (unlikely(rc))
+ return rc;
+
+ have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
+ ena_tx_ctx);
+ if (have_meta)
+ ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+
+ /* If the caller doesn't want send packets */
+ if (unlikely(!num_bufs && !header_len)) {
+ *nb_hw_desc = have_meta ? 0 : 1;
+ return 0;
+ }
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
+
+ /* Set first desc when we don't have meta descriptor */
+ if (!have_meta)
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
+
+ desc->buff_addr_hi_hdr_sz |= (header_len <<
+ ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
+ ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+ desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_DESC_PHASE_MASK;
+
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+
+ /* Bits 0-9 */
+ desc->meta_ctrl |= (ena_tx_ctx->req_id <<
+ ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
+ ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+
+ desc->meta_ctrl |= (ena_tx_ctx->df <<
+ ENA_ETH_IO_TX_DESC_DF_SHIFT) &
+ ENA_ETH_IO_TX_DESC_DF_MASK;
+
+ /* Bits 10-15 */
+ desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
+ ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
+ ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+
+ if (ena_tx_ctx->meta_valid) {
+ desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
+ ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+ desc->meta_ctrl |= ena_tx_ctx->l3_proto &
+ ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
+ ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
+ ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
+ ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
+ ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ /* The first desc share the same desc as the header */
+ if (likely(i != 0)) {
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+ ena_com_sq_update_tail(io_sq);
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
+
+ desc->len_ctrl |= (io_sq->phase <<
+ ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_DESC_PHASE_MASK;
+ }
+
+ desc->len_ctrl |= ena_bufs->len &
+ ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+
+ addr_hi = ((ena_bufs->paddr &
+ GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+
+ desc->buff_addr_lo = (u32)ena_bufs->paddr;
+ desc->buff_addr_hi_hdr_sz |= addr_hi &
+ ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+ ena_bufs++;
+ }
+
+ /* set the last desc indicator */
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
+
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+
+ ena_com_sq_update_tail(io_sq);
+
+ total_desc = ENA_MAX16(num_bufs, 1);
+ total_desc += have_meta ? 1 : 0;
+
+ *nb_hw_desc = total_desc;
+ return 0;
+}
+
+int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_rx_ctx *ena_rx_ctx)
+{
+ struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
+ struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
+ u16 cdesc_idx = 0;
+ u16 nb_hw_desc;
+ u16 i;
+
+ ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
+ "wrong Q type");
+
+ nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
+ if (nb_hw_desc == 0) {
+ ena_rx_ctx->descs = nb_hw_desc;
+ return 0;
+ }
+
+ ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
+ io_cq->qid, nb_hw_desc);
+
+ if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
+ ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
+ nb_hw_desc, ena_rx_ctx->max_bufs);
+ return ENA_COM_NO_SPACE;
+ }
+
+ for (i = 0; i < nb_hw_desc; i++) {
+ cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
+
+ ena_buf->len = cdesc->length;
+ ena_buf->req_id = cdesc->req_id;
+ ena_buf++;
+ }
+
+ /* Update SQ head ptr */
+ io_sq->next_to_comp += nb_hw_desc;
+
+ ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
+ io_sq->qid, io_sq->next_to_comp);
+
+ /* Get rx flags from the last pkt */
+ ena_com_rx_set_flags(ena_rx_ctx, cdesc);
+
+ ena_rx_ctx->descs = nb_hw_desc;
+ return 0;
+}
+
+int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_buf *ena_buf,
+ u16 req_id)
+{
+ struct ena_eth_io_rx_desc *desc;
+
+ ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
+ "wrong Q type");
+
+ if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
+ return ENA_COM_NO_SPACE;
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
+
+ desc->length = ena_buf->len;
+
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
+ desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+
+ desc->req_id = req_id;
+
+ desc->buff_addr_lo = (u32)ena_buf->paddr;
+ desc->buff_addr_hi =
+ ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+
+ ena_com_sq_update_tail(io_sq);
+
+ return 0;
+}
+
+int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
+{
+ u8 expected_phase, cdesc_phase;
+ struct ena_eth_io_tx_cdesc *cdesc;
+ u16 masked_head;
+
+ masked_head = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_tx_cdesc *)
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
+
+ /* When the current completion descriptor phase isn't the same as the
+ * expected, it mean that the device still didn't update
+ * this completion.
+ */
+ cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ if (cdesc_phase != expected_phase)
+ return ENA_COM_TRY_AGAIN;
+
+ if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
+ ena_trc_err("Invalid req id %d\n", cdesc->req_id);
+ return ENA_COM_INVAL;
+ }
+
+ ena_com_cq_inc_head(io_cq);
+
+ *req_id = READ_ONCE(cdesc->req_id);
+
+ return 0;
+}
+
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+
+ cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ if (cdesc)
+ return false;
+ else
+ return true;
+}
+
diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.h b/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.h
new file mode 100644
index 00000000..56ea4ae6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.h
@@ -0,0 +1,169 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef ENA_ETH_COM_H_
+#define ENA_ETH_COM_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+#include "ena_com.h"
+
+/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
+#define ENA_COMP_HEAD_THRESH 4
+
+struct ena_com_tx_ctx {
+ struct ena_com_tx_meta ena_meta;
+ struct ena_com_buf *ena_bufs;
+ /* For LLQ, header buffer - pushed to the device mem space */
+ void *push_header;
+
+ enum ena_eth_io_l3_proto_index l3_proto;
+ enum ena_eth_io_l4_proto_index l4_proto;
+ u16 num_bufs;
+ u16 req_id;
+ /* For regular queue, indicate the size of the header
+ * For LLQ, indicate the size of the pushed buffer
+ */
+ u16 header_len;
+
+ u8 meta_valid;
+ u8 tso_enable;
+ u8 l3_csum_enable;
+ u8 l4_csum_enable;
+ u8 l4_csum_partial;
+ u8 df; /* Don't fragment */
+};
+
+struct ena_com_rx_ctx {
+ struct ena_com_rx_buf_info *ena_bufs;
+ enum ena_eth_io_l3_proto_index l3_proto;
+ enum ena_eth_io_l4_proto_index l4_proto;
+ bool l3_csum_err;
+ bool l4_csum_err;
+ /* fragmented packet */
+ bool frag;
+ u32 hash;
+ u16 descs;
+ int max_bufs;
+};
+
+int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ int *nb_hw_desc);
+
+int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_rx_ctx *ena_rx_ctx);
+
+int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_buf *ena_buf,
+ u16 req_id);
+
+int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
+
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
+
+static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
+ struct ena_eth_io_intr_reg *intr_reg)
+{
+ ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
+}
+
+static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+{
+ u16 tail, next_to_comp, cnt;
+
+ next_to_comp = io_sq->next_to_comp;
+ tail = io_sq->tail;
+ cnt = tail - next_to_comp;
+
+ return io_sq->q_depth - 1 - cnt;
+}
+
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+{
+ u16 tail;
+
+ tail = io_sq->tail;
+
+ ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
+ io_sq->qid, tail);
+
+ ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
+
+ return 0;
+}
+
+static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
+{
+ u16 unreported_comp, head;
+ bool need_update;
+
+ head = io_cq->head;
+ unreported_comp = head - io_cq->last_head_update;
+ need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
+
+ if (io_cq->cq_head_db_reg && need_update) {
+ ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
+ io_cq->qid, head);
+ ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
+ io_cq->last_head_update = head;
+ }
+
+ return 0;
+}
+
+static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
+ u8 numa_node)
+{
+ struct ena_eth_io_numa_node_cfg_reg numa_cfg;
+
+ if (!io_cq->numa_node_cfg_reg)
+ return;
+
+ numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
+ | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
+
+ ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
+}
+
+static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
+{
+ io_sq->next_to_comp += elem;
+}
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* ENA_ETH_COM_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_plat.h b/src/spdk/dpdk/drivers/net/ena/base/ena_plat.h
new file mode 100644
index 00000000..f829936b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/base/ena_plat.h
@@ -0,0 +1,57 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef ENA_PLAT_H_
+#define ENA_PLAT_H_
+
+#if defined(ENA_IPXE)
+#include "ena_plat_ipxe.h"
+#elif defined(__linux__)
+#if defined(__KERNEL__)
+#include "ena_plat_linux.h"
+#else
+#include "ena_plat_dpdk.h"
+#endif
+#elif defined(__FreeBSD__)
+#if defined(_KERNEL)
+#include "ena_plat_fbsd.h"
+#else
+#include "ena_plat_dpdk.h"
+#endif
+#elif defined(_WIN32)
+#include "ena_plat_windows.h"
+#else
+#error "Invalid platform"
+#endif
+
+#endif /* ENA_PLAT_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_plat_dpdk.h b/src/spdk/dpdk/drivers/net/ena/base/ena_plat_dpdk.h
new file mode 100644
index 00000000..900ba1a6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/base/ena_plat_dpdk.h
@@ -0,0 +1,286 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef DPDK_ENA_COM_ENA_PLAT_DPDK_H_
+#define DPDK_ENA_COM_ENA_PLAT_DPDK_H_
+
+#include <stdbool.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_spinlock.h>
+
+#include <sys/time.h>
+
+typedef uint64_t u64;
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+
+typedef uint64_t dma_addr_t;
+#ifndef ETIME
+#define ETIME ETIMEDOUT
+#endif
+
+#define ena_atomic32_t rte_atomic32_t
+#define ena_mem_handle_t const struct rte_memzone *
+
+#define SZ_256 (256U)
+#define SZ_4K (4096U)
+
+#define ENA_COM_OK 0
+#define ENA_COM_NO_MEM -ENOMEM
+#define ENA_COM_INVAL -EINVAL
+#define ENA_COM_NO_SPACE -ENOSPC
+#define ENA_COM_NO_DEVICE -ENODEV
+#define ENA_COM_TIMER_EXPIRED -ETIME
+#define ENA_COM_FAULT -EFAULT
+#define ENA_COM_TRY_AGAIN -EAGAIN
+#define ENA_COM_UNSUPPORTED -EOPNOTSUPP
+
+#define ____cacheline_aligned __rte_cache_aligned
+
+#define ENA_ABORT() abort()
+
+#define ENA_MSLEEP(x) rte_delay_ms(x)
+#define ENA_UDELAY(x) rte_delay_us(x)
+
+#define ENA_TOUCH(x) ((void)(x))
+#define memcpy_toio memcpy
+#define wmb rte_wmb
+#define rmb rte_wmb
+#define mb rte_mb
+#define __iomem
+
+#define US_PER_S 1000000
+#define ENA_GET_SYSTEM_USECS() \
+ (rte_get_timer_cycles() * US_PER_S / rte_get_timer_hz())
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+#define ENA_ASSERT(cond, format, arg...) \
+ do { \
+ if (unlikely(!(cond))) { \
+ RTE_LOG(ERR, PMD, format, ##arg); \
+ rte_panic("line %d\tassert \"" #cond "\"" \
+ "failed\n", __LINE__); \
+ } \
+ } while (0)
+#else
+#define ENA_ASSERT(cond, format, arg...) do {} while (0)
+#endif
+
+#define ENA_MAX32(x, y) RTE_MAX((x), (y))
+#define ENA_MAX16(x, y) RTE_MAX((x), (y))
+#define ENA_MAX8(x, y) RTE_MAX((x), (y))
+#define ENA_MIN32(x, y) RTE_MIN((x), (y))
+#define ENA_MIN16(x, y) RTE_MIN((x), (y))
+#define ENA_MIN8(x, y) RTE_MIN((x), (y))
+
+#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8)
+#define U64_C(x) x ## ULL
+#define BIT(nr) (1UL << (nr))
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#define GENMASK_ULL(h, l) (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#ifdef RTE_LIBRTE_ENA_COM_DEBUG
+#define ena_trc_dbg(format, arg...) \
+ RTE_LOG(DEBUG, PMD, "[ENA_COM: %s] " format, __func__, ##arg)
+#define ena_trc_info(format, arg...) \
+ RTE_LOG(INFO, PMD, "[ENA_COM: %s] " format, __func__, ##arg)
+#define ena_trc_warn(format, arg...) \
+ RTE_LOG(ERR, PMD, "[ENA_COM: %s] " format, __func__, ##arg)
+#define ena_trc_err(format, arg...) \
+ RTE_LOG(ERR, PMD, "[ENA_COM: %s] " format, __func__, ##arg)
+#else
+#define ena_trc_dbg(format, arg...) do { } while (0)
+#define ena_trc_info(format, arg...) do { } while (0)
+#define ena_trc_warn(format, arg...) do { } while (0)
+#define ena_trc_err(format, arg...) do { } while (0)
+#endif /* RTE_LIBRTE_ENA_COM_DEBUG */
+
+#define ENA_WARN(cond, format, arg...) \
+do { \
+ if (unlikely(cond)) { \
+ ena_trc_err( \
+ "Warn failed on %s:%s:%d:" format, \
+ __FILE__, __func__, __LINE__, ##arg); \
+ } \
+} while (0)
+
+/* Spinlock related methods */
+#define ena_spinlock_t rte_spinlock_t
+#define ENA_SPINLOCK_INIT(spinlock) rte_spinlock_init(&spinlock)
+#define ENA_SPINLOCK_LOCK(spinlock, flags) \
+ ({(void)flags; rte_spinlock_lock(&spinlock); })
+#define ENA_SPINLOCK_UNLOCK(spinlock, flags) \
+ ({(void)flags; rte_spinlock_unlock(&(spinlock)); })
+
+#define q_waitqueue_t \
+ struct { \
+ pthread_cond_t cond; \
+ pthread_mutex_t mutex; \
+ }
+
+#define ena_wait_queue_t q_waitqueue_t
+
+#define ENA_WAIT_EVENT_INIT(waitqueue) \
+ do { \
+ pthread_mutex_init(&(waitqueue).mutex, NULL); \
+ pthread_cond_init(&(waitqueue).cond, NULL); \
+ } while (0)
+
+#define ENA_WAIT_EVENT_WAIT(waitevent, timeout) \
+ do { \
+ struct timespec wait; \
+ struct timeval now; \
+ unsigned long timeout_us; \
+ gettimeofday(&now, NULL); \
+ wait.tv_sec = now.tv_sec + timeout / 1000000UL; \
+ timeout_us = timeout % 1000000UL; \
+ wait.tv_nsec = (now.tv_usec + timeout_us) * 1000UL; \
+ pthread_mutex_lock(&waitevent.mutex); \
+ pthread_cond_timedwait(&waitevent.cond, \
+ &waitevent.mutex, &wait); \
+ pthread_mutex_unlock(&waitevent.mutex); \
+ } while (0)
+#define ENA_WAIT_EVENT_SIGNAL(waitevent) pthread_cond_signal(&waitevent.cond)
+/* pthread condition doesn't need to be rearmed after usage */
+#define ENA_WAIT_EVENT_CLEAR(...)
+#define ENA_WAIT_EVENT_DESTROY(waitqueue) ((void)(waitqueue))
+
+#define ena_wait_event_t ena_wait_queue_t
+#define ENA_MIGHT_SLEEP()
+
+#define ENA_TIME_EXPIRE(timeout) (timeout < rte_get_timer_cycles())
+#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \
+ (timeout_us * rte_get_timer_hz() / 1000000 + rte_get_timer_cycles())
+
+/*
+ * Each rte_memzone should have unique name.
+ * To satisfy it, count number of allocations and add it to name.
+ */
+extern uint32_t ena_alloc_cnt;
+
+#define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, handle) \
+ do { \
+ const struct rte_memzone *mz; \
+ char z_name[RTE_MEMZONE_NAMESIZE]; \
+ ENA_TOUCH(dmadev); ENA_TOUCH(handle); \
+ snprintf(z_name, sizeof(z_name), \
+ "ena_alloc_%d", ena_alloc_cnt++); \
+ mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, \
+ RTE_MEMZONE_IOVA_CONTIG); \
+ handle = mz; \
+ if (mz == NULL) { \
+ virt = NULL; \
+ phys = 0; \
+ } else { \
+ memset(mz->addr, 0, size); \
+ virt = mz->addr; \
+ phys = mz->iova; \
+ } \
+ } while (0)
+#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \
+ ({ ENA_TOUCH(size); ENA_TOUCH(phys); \
+ ENA_TOUCH(dmadev); \
+ rte_memzone_free(handle); })
+
+#define ENA_MEM_ALLOC_COHERENT_NODE( \
+ dmadev, size, virt, phys, mem_handle, node, dev_node) \
+ do { \
+ const struct rte_memzone *mz; \
+ char z_name[RTE_MEMZONE_NAMESIZE]; \
+ ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
+ snprintf(z_name, sizeof(z_name), \
+ "ena_alloc_%d", ena_alloc_cnt++); \
+ mz = rte_memzone_reserve(z_name, size, node, \
+ RTE_MEMZONE_IOVA_CONTIG); \
+ mem_handle = mz; \
+ if (mz == NULL) { \
+ virt = NULL; \
+ phys = 0; \
+ } else { \
+ memset(mz->addr, 0, size); \
+ virt = mz->addr; \
+ phys = mz->iova; \
+ } \
+ } while (0)
+
+#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \
+ do { \
+ ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
+ virt = rte_zmalloc_socket(NULL, size, 0, node); \
+ } while (0)
+
+#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)
+#define ENA_MEM_FREE(dmadev, ptr) ({ENA_TOUCH(dmadev); rte_free(ptr); })
+
+#define ENA_REG_WRITE32(bus, value, reg) \
+ ({ (void)(bus); rte_write32_relaxed((value), (reg)); })
+#define ENA_REG_READ32(bus, reg) \
+ ({ (void)(bus); rte_read32_relaxed((reg)); })
+
+#define ATOMIC32_INC(i32_ptr) rte_atomic32_inc(i32_ptr)
+#define ATOMIC32_DEC(i32_ptr) rte_atomic32_dec(i32_ptr)
+#define ATOMIC32_SET(i32_ptr, val) rte_atomic32_set(i32_ptr, val)
+#define ATOMIC32_READ(i32_ptr) rte_atomic32_read(i32_ptr)
+
+#define msleep(x) rte_delay_us(x * 1000)
+#define udelay(x) rte_delay_us(x)
+
+#define MAX_ERRNO 4095
+#define IS_ERR(x) (((unsigned long)x) >= (unsigned long)-MAX_ERRNO)
+#define ERR_PTR(error) ((void *)(long)error)
+#define PTR_ERR(error) ((long)(void *)error)
+#define might_sleep()
+
+#define lower_32_bits(x) ((uint32_t)(x))
+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
+
+#ifndef READ_ONCE
+#define READ_ONCE(var) (*((volatile typeof(var) *)(&(var))))
+#endif
+
+#endif /* DPDK_ENA_COM_ENA_PLAT_DPDK_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ena/ena_ethdev.c b/src/spdk/dpdk/drivers/net/ena/ena_ethdev.c
new file mode 100644
index 00000000..c255dc6d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/ena_ethdev.c
@@ -0,0 +1,2303 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_tcp.h>
+#include <rte_atomic.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_version.h>
+#include <rte_eal_memconfig.h>
+#include <rte_net.h>
+
+#include "ena_ethdev.h"
+#include "ena_logs.h"
+#include "ena_platform.h"
+#include "ena_com.h"
+#include "ena_eth_com.h"
+
+#include <ena_common_defs.h>
+#include <ena_regs_defs.h>
+#include <ena_admin_defs.h>
+#include <ena_eth_io_defs.h>
+
+#define DRV_MODULE_VER_MAJOR 1
+#define DRV_MODULE_VER_MINOR 1
+#define DRV_MODULE_VER_SUBMINOR 0
+
+#define ENA_IO_TXQ_IDX(q) (2 * (q))
+#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
+/*reverse version of ENA_IO_RXQ_IDX*/
+#define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2)
+
+/* While processing submitted and completed descriptors (rx and tx path
+ * respectively) in a loop it is desired to:
+ * - perform batch submissions while populating sumbissmion queue
+ * - avoid blocking transmission of other packets during cleanup phase
+ * Hence the utilization ratio of 1/8 of a queue size.
+ */
+#define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8)
+
+#define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
+#define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))
+
+#define GET_L4_HDR_LEN(mbuf) \
+ ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \
+ mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
+
+#define ENA_RX_RSS_TABLE_LOG_SIZE 7
+#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
+#define ENA_HASH_KEY_SIZE 40
+#define ENA_ETH_SS_STATS 0xFF
+#define ETH_GSTRING_LEN 32
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+#define ENA_MAX_RING_DESC ENA_DEFAULT_RING_SIZE
+#define ENA_MIN_RING_DESC 128
+
+enum ethtool_stringset {
+ ETH_SS_TEST = 0,
+ ETH_SS_STATS,
+};
+
+struct ena_stats {
+ char name[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define ENA_STAT_ENA_COM_ENTRY(stat) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_com_stats_admin, stat) \
+}
+
+#define ENA_STAT_ENTRY(stat, stat_type) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
+}
+
+#define ENA_STAT_RX_ENTRY(stat) \
+ ENA_STAT_ENTRY(stat, rx)
+
+#define ENA_STAT_TX_ENTRY(stat) \
+ ENA_STAT_ENTRY(stat, tx)
+
+#define ENA_STAT_GLOBAL_ENTRY(stat) \
+ ENA_STAT_ENTRY(stat, dev)
+
+/*
+ * Each rte_memzone should have unique name.
+ * To satisfy it, count number of allocation and add it to name.
+ */
+uint32_t ena_alloc_cnt;
+
+static const struct ena_stats ena_stats_global_strings[] = {
+ ENA_STAT_GLOBAL_ENTRY(tx_timeout),
+ ENA_STAT_GLOBAL_ENTRY(io_suspend),
+ ENA_STAT_GLOBAL_ENTRY(io_resume),
+ ENA_STAT_GLOBAL_ENTRY(wd_expired),
+ ENA_STAT_GLOBAL_ENTRY(interface_up),
+ ENA_STAT_GLOBAL_ENTRY(interface_down),
+ ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
+};
+
+static const struct ena_stats ena_stats_tx_strings[] = {
+ ENA_STAT_TX_ENTRY(cnt),
+ ENA_STAT_TX_ENTRY(bytes),
+ ENA_STAT_TX_ENTRY(queue_stop),
+ ENA_STAT_TX_ENTRY(queue_wakeup),
+ ENA_STAT_TX_ENTRY(dma_mapping_err),
+ ENA_STAT_TX_ENTRY(linearize),
+ ENA_STAT_TX_ENTRY(linearize_failed),
+ ENA_STAT_TX_ENTRY(tx_poll),
+ ENA_STAT_TX_ENTRY(doorbells),
+ ENA_STAT_TX_ENTRY(prepare_ctx_err),
+ ENA_STAT_TX_ENTRY(missing_tx_comp),
+ ENA_STAT_TX_ENTRY(bad_req_id),
+};
+
+static const struct ena_stats ena_stats_rx_strings[] = {
+ ENA_STAT_RX_ENTRY(cnt),
+ ENA_STAT_RX_ENTRY(bytes),
+ ENA_STAT_RX_ENTRY(refil_partial),
+ ENA_STAT_RX_ENTRY(bad_csum),
+ ENA_STAT_RX_ENTRY(page_alloc_fail),
+ ENA_STAT_RX_ENTRY(skb_alloc_fail),
+ ENA_STAT_RX_ENTRY(dma_mapping_err),
+ ENA_STAT_RX_ENTRY(bad_desc_num),
+ ENA_STAT_RX_ENTRY(small_copy_len_pkt),
+};
+
+static const struct ena_stats ena_stats_ena_com_strings[] = {
+ ENA_STAT_ENA_COM_ENTRY(aborted_cmd),
+ ENA_STAT_ENA_COM_ENTRY(submitted_cmd),
+ ENA_STAT_ENA_COM_ENTRY(completed_cmd),
+ ENA_STAT_ENA_COM_ENTRY(out_of_space),
+ ENA_STAT_ENA_COM_ENTRY(no_completion),
+};
+
+#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
+#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
+#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
+#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
+
+#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
+ DEV_TX_OFFLOAD_UDP_CKSUM |\
+ DEV_TX_OFFLOAD_IPV4_CKSUM |\
+ DEV_TX_OFFLOAD_TCP_TSO)
+#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
+ PKT_TX_IP_CKSUM |\
+ PKT_TX_TCP_SEG)
+
+/** Vendor ID used by Amazon devices */
+#define PCI_VENDOR_ID_AMAZON 0x1D0F
+/** Amazon devices */
+#define PCI_DEVICE_ID_ENA_VF 0xEC20
+#define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21
+
+#define ENA_TX_OFFLOAD_MASK (\
+ PKT_TX_L4_MASK | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_SEG)
+
+#define ENA_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
+
+int ena_logtype_init;
+int ena_logtype_driver;
+
+static const struct rte_pci_id pci_id_ena_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) },
+ { .device_id = 0 },
+};
+
+static struct ena_aenq_handlers aenq_handlers;
+
+static int ena_device_init(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx,
+ bool *wd_state);
+static int ena_dev_configure(struct rte_eth_dev *dev);
+static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+static uint16_t eth_ena_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
+static void ena_init_rings(struct ena_adapter *adapter);
+static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int ena_start(struct rte_eth_dev *dev);
+static void ena_stop(struct rte_eth_dev *dev);
+static void ena_close(struct rte_eth_dev *dev);
+static int ena_dev_reset(struct rte_eth_dev *dev);
+static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
+static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
+static void ena_rx_queue_release(void *queue);
+static void ena_tx_queue_release(void *queue);
+static void ena_rx_queue_release_bufs(struct ena_ring *ring);
+static void ena_tx_queue_release_bufs(struct ena_ring *ring);
+static int ena_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int ena_queue_restart(struct ena_ring *ring);
+static int ena_queue_restart_all(struct rte_eth_dev *dev,
+ enum ena_ring_type ring_type);
+static void ena_stats_restart(struct rte_eth_dev *dev);
+static void ena_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int ena_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int ena_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
+static void ena_interrupt_handler_rte(void *cb_arg);
+static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
+
+static const struct eth_dev_ops ena_dev_ops = {
+ .dev_configure = ena_dev_configure,
+ .dev_infos_get = ena_infos_get,
+ .rx_queue_setup = ena_rx_queue_setup,
+ .tx_queue_setup = ena_tx_queue_setup,
+ .dev_start = ena_start,
+ .dev_stop = ena_stop,
+ .link_update = ena_link_update,
+ .stats_get = ena_stats_get,
+ .mtu_set = ena_mtu_set,
+ .rx_queue_release = ena_rx_queue_release,
+ .tx_queue_release = ena_tx_queue_release,
+ .dev_close = ena_close,
+ .dev_reset = ena_dev_reset,
+ .reta_update = ena_rss_reta_update,
+ .reta_query = ena_rss_reta_query,
+};
+
+#define NUMA_NO_NODE SOCKET_ID_ANY
+
+static inline int ena_cpu_to_node(int cpu)
+{
+ struct rte_config *config = rte_eal_get_configuration();
+ struct rte_fbarray *arr = &config->mem_config->memzones;
+ const struct rte_memzone *mz;
+
+ if (unlikely(cpu >= RTE_MAX_MEMZONE))
+ return NUMA_NO_NODE;
+
+ mz = rte_fbarray_get(arr, cpu);
+
+ return mz->socket_id;
+}
+
+static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
+ struct ena_com_rx_ctx *ena_rx_ctx)
+{
+ uint64_t ol_flags = 0;
+ uint32_t packet_type = 0;
+
+ if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
+ packet_type |= RTE_PTYPE_L4_UDP;
+
+ if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4)
+ packet_type |= RTE_PTYPE_L3_IPV4;
+ else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6)
+ packet_type |= RTE_PTYPE_L3_IPV6;
+
+ if (unlikely(ena_rx_ctx->l4_csum_err))
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ if (unlikely(ena_rx_ctx->l3_csum_err))
+ ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ mbuf->ol_flags = ol_flags;
+ mbuf->packet_type = packet_type;
+}
+
+static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ uint64_t queue_offloads)
+{
+ struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
+
+ if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
+ (queue_offloads & QUEUE_OFFLOADS)) {
+ /* check if TSO is required */
+ if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
+ (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
+ ena_tx_ctx->tso_enable = true;
+
+ ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
+ }
+
+ /* check if L3 checksum is needed */
+ if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
+ (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
+ ena_tx_ctx->l3_csum_enable = true;
+
+ if (mbuf->ol_flags & PKT_TX_IPV6) {
+ ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
+ } else {
+ ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
+
+ /* set don't fragment (DF) flag */
+ if (mbuf->packet_type &
+ (RTE_PTYPE_L4_NONFRAG
+ | RTE_PTYPE_INNER_L4_NONFRAG))
+ ena_tx_ctx->df = true;
+ }
+
+ /* check if L4 checksum is needed */
+ if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) &&
+ (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
+ ena_tx_ctx->l4_csum_enable = true;
+ } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) &&
+ (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
+ ena_tx_ctx->l4_csum_enable = true;
+ } else {
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
+ ena_tx_ctx->l4_csum_enable = false;
+ }
+
+ ena_meta->mss = mbuf->tso_segsz;
+ ena_meta->l3_hdr_len = mbuf->l3_len;
+ ena_meta->l3_hdr_offset = mbuf->l2_len;
+
+ ena_tx_ctx->meta_valid = true;
+ } else {
+ ena_tx_ctx->meta_valid = false;
+ }
+}
+
+static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
+{
+ if (likely(req_id < rx_ring->ring_size))
+ return 0;
+
+ RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id);
+
+ rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+ rx_ring->adapter->trigger_reset = true;
+
+ return -EFAULT;
+}
+
+static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info = NULL;
+
+ if (likely(req_id < tx_ring->ring_size)) {
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->mbuf))
+ return 0;
+ }
+
+ if (tx_info)
+ RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n");
+ else
+ RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id);
+
+ /* Trigger device reset */
+ tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
+ tx_ring->adapter->trigger_reset = true;
+ return -EFAULT;
+}
+
+static void ena_config_host_info(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_host_info *host_info;
+ int rc;
+
+ /* Allocate only the host info */
+ rc = ena_com_allocate_host_info(ena_dev);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "Cannot allocate host info\n");
+ return;
+ }
+
+ host_info = ena_dev->host_attr.host_info;
+
+ host_info->os_type = ENA_ADMIN_OS_DPDK;
+ host_info->kernel_ver = RTE_VERSION;
+ snprintf((char *)host_info->kernel_ver_str,
+ sizeof(host_info->kernel_ver_str),
+ "%s", rte_version());
+ host_info->os_dist = RTE_VERSION;
+ snprintf((char *)host_info->os_dist_str,
+ sizeof(host_info->os_dist_str),
+ "%s", rte_version());
+ host_info->driver_version =
+ (DRV_MODULE_VER_MAJOR) |
+ (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
+ (DRV_MODULE_VER_SUBMINOR <<
+ ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
+
+ rc = ena_com_set_host_attributes(ena_dev);
+ if (rc) {
+ if (rc == -ENA_COM_UNSUPPORTED)
+ RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
+ else
+ RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
+
+ goto err;
+ }
+
+ return;
+
+err:
+ ena_com_delete_host_info(ena_dev);
+}
+
+static int
+ena_get_sset_count(struct rte_eth_dev *dev, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ /* Workaround for clang:
+ * touch internal structures to prevent
+ * compiler error
+ */
+ ENA_TOUCH(ena_stats_global_strings);
+ ENA_TOUCH(ena_stats_tx_strings);
+ ENA_TOUCH(ena_stats_rx_strings);
+ ENA_TOUCH(ena_stats_ena_com_strings);
+
+ return dev->data->nb_tx_queues *
+ (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) +
+ ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
+}
+
+static void ena_config_debug_area(struct ena_adapter *adapter)
+{
+ u32 debug_area_size;
+ int rc, ss_count;
+
+ ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS);
+ if (ss_count <= 0) {
+ RTE_LOG(ERR, PMD, "SS count is negative\n");
+ return;
+ }
+
+ /* allocate 32 bytes for each string and 64bit for the value */
+ debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
+
+ rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "Cannot allocate debug area\n");
+ return;
+ }
+
+ rc = ena_com_set_host_attributes(&adapter->ena_dev);
+ if (rc) {
+ if (rc == -ENA_COM_UNSUPPORTED)
+ RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
+ else
+ RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
+
+ goto err;
+ }
+
+ return;
+err:
+ ena_com_delete_debug_area(&adapter->ena_dev);
+}
+
+static void ena_close(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ ena_stop(dev);
+ adapter->state = ENA_ADAPTER_STATE_CLOSED;
+
+ ena_rx_queue_release_all(dev);
+ ena_tx_queue_release_all(dev);
+}
+
+static int
+ena_dev_reset(struct rte_eth_dev *dev)
+{
+ struct rte_mempool *mb_pool_rx[ENA_MAX_NUM_QUEUES];
+ struct rte_eth_dev *eth_dev;
+ struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
+ struct ena_com_dev *ena_dev;
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ struct ena_adapter *adapter;
+ int nb_queues;
+ int rc, i;
+ bool wd_state;
+
+ adapter = (struct ena_adapter *)(dev->data->dev_private);
+ ena_dev = &adapter->ena_dev;
+ eth_dev = adapter->rte_dev;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ intr_handle = &pci_dev->intr_handle;
+ nb_queues = eth_dev->data->nb_rx_queues;
+
+ ena_com_set_admin_running_state(ena_dev, false);
+
+ rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
+ if (rc)
+ RTE_LOG(ERR, PMD, "Device reset failed\n");
+
+ for (i = 0; i < nb_queues; i++)
+ mb_pool_rx[i] = adapter->rx_ring[i].mb_pool;
+
+ ena_rx_queue_release_all(eth_dev);
+ ena_tx_queue_release_all(eth_dev);
+
+ rte_intr_disable(intr_handle);
+
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
+ ena_com_admin_destroy(ena_dev);
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
+ if (rc) {
+ PMD_INIT_LOG(CRIT, "Cannot initialize device\n");
+ return rc;
+ }
+ adapter->wd_state = wd_state;
+
+ rte_intr_enable(intr_handle);
+ ena_com_set_admin_polling_mode(ena_dev, false);
+ ena_com_admin_aenq_enable(ena_dev);
+
+ for (i = 0; i < nb_queues; ++i)
+ ena_rx_queue_setup(eth_dev, i, adapter->rx_ring_size, 0, NULL,
+ mb_pool_rx[i]);
+
+ for (i = 0; i < nb_queues; ++i)
+ ena_tx_queue_setup(eth_dev, i, adapter->tx_ring_size, 0, NULL);
+
+ adapter->trigger_reset = false;
+
+ return 0;
+}
+
+static int ena_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+ int rc, i;
+ u16 entry_value;
+ int conf_idx;
+ int idx;
+
+ if ((reta_size == 0) || (reta_conf == NULL))
+ return -EINVAL;
+
+ if (reta_size > ENA_RX_RSS_TABLE_SIZE) {
+ RTE_LOG(WARNING, PMD,
+ "indirection table %d is bigger than supported (%d)\n",
+ reta_size, ENA_RX_RSS_TABLE_SIZE);
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < reta_size ; i++) {
+ /* each reta_conf is for 64 entries.
+ * to support 128 we use 2 conf of 64
+ */
+ conf_idx = i / RTE_RETA_GROUP_SIZE;
+ idx = i % RTE_RETA_GROUP_SIZE;
+ if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
+ entry_value =
+ ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
+
+ rc = ena_com_indirect_table_fill_entry(ena_dev,
+ i,
+ entry_value);
+ if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
+ RTE_LOG(ERR, PMD,
+ "Cannot fill indirect table\n");
+ return rc;
+ }
+ }
+ }
+
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
+ RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n");
+ return rc;
+ }
+
+ RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n",
+ __func__, reta_size, adapter->rte_dev->data->port_id);
+
+ return 0;
+}
+
+/* Query redirection table. */
+static int ena_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+ int rc;
+ int i;
+ u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0};
+ int reta_conf_idx;
+ int reta_idx;
+
+ if (reta_size == 0 || reta_conf == NULL ||
+ (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
+ return -EINVAL;
+
+ rc = ena_com_indirect_table_get(ena_dev, indirect_table);
+ if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
+ RTE_LOG(ERR, PMD, "cannot get indirect table\n");
+ return -ENOTSUP;
+ }
+
+ for (i = 0 ; i < reta_size ; i++) {
+ reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
+ reta_idx = i % RTE_RETA_GROUP_SIZE;
+ if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
+ reta_conf[reta_conf_idx].reta[reta_idx] =
+ ENA_IO_RXQ_IDX_REV(indirect_table[i]);
+ }
+
+ return 0;
+}
+
+static int ena_rss_init_default(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+ uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues;
+ int rc, i;
+ u32 val;
+
+ rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
+ if (unlikely(rc)) {
+ RTE_LOG(ERR, PMD, "Cannot init indirect table\n");
+ goto err_rss_init;
+ }
+
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
+ val = i % nb_rx_queues;
+ rc = ena_com_indirect_table_fill_entry(ena_dev, i,
+ ENA_IO_RXQ_IDX(val));
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
+ RTE_LOG(ERR, PMD, "Cannot fill indirect table\n");
+ goto err_fill_indir;
+ }
+ }
+
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
+ ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
+ RTE_LOG(INFO, PMD, "Cannot fill hash function\n");
+ goto err_fill_indir;
+ }
+
+ rc = ena_com_set_default_hash_ctrl(ena_dev);
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
+ RTE_LOG(INFO, PMD, "Cannot fill hash control\n");
+ goto err_fill_indir;
+ }
+
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
+ RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n");
+ goto err_fill_indir;
+ }
+ RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n",
+ adapter->rte_dev->data->port_id);
+
+ return 0;
+
+err_fill_indir:
+ ena_com_rss_destroy(ena_dev);
+err_rss_init:
+
+ return rc;
+}
+
+static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
+{
+ struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;
+ int nb_queues = dev->data->nb_rx_queues;
+ int i;
+
+ for (i = 0; i < nb_queues; i++)
+ ena_rx_queue_release(queues[i]);
+}
+
+static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
+{
+ struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;
+ int nb_queues = dev->data->nb_tx_queues;
+ int i;
+
+ for (i = 0; i < nb_queues; i++)
+ ena_tx_queue_release(queues[i]);
+}
+
+static void ena_rx_queue_release(void *queue)
+{
+ struct ena_ring *ring = (struct ena_ring *)queue;
+ struct ena_adapter *adapter = ring->adapter;
+ int ena_qid;
+
+ ena_assert_msg(ring->configured,
+ "API violation - releasing not configured queue");
+ ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING,
+ "API violation");
+
+ /* Destroy HW queue */
+ ena_qid = ENA_IO_RXQ_IDX(ring->id);
+ ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid);
+
+ /* Free all bufs */
+ ena_rx_queue_release_bufs(ring);
+
+ /* Free ring resources */
+ if (ring->rx_buffer_info)
+ rte_free(ring->rx_buffer_info);
+ ring->rx_buffer_info = NULL;
+
+ if (ring->empty_rx_reqs)
+ rte_free(ring->empty_rx_reqs);
+ ring->empty_rx_reqs = NULL;
+
+ ring->configured = 0;
+
+ RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n",
+ ring->port_id, ring->id);
+}
+
+static void ena_tx_queue_release(void *queue)
+{
+ struct ena_ring *ring = (struct ena_ring *)queue;
+ struct ena_adapter *adapter = ring->adapter;
+ int ena_qid;
+
+ ena_assert_msg(ring->configured,
+ "API violation. Releasing not configured queue");
+ ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING,
+ "API violation");
+
+ /* Destroy HW queue */
+ ena_qid = ENA_IO_TXQ_IDX(ring->id);
+ ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid);
+
+ /* Free all bufs */
+ ena_tx_queue_release_bufs(ring);
+
+ /* Free ring resources */
+ if (ring->tx_buffer_info)
+ rte_free(ring->tx_buffer_info);
+
+ if (ring->empty_tx_reqs)
+ rte_free(ring->empty_tx_reqs);
+
+ ring->empty_tx_reqs = NULL;
+ ring->tx_buffer_info = NULL;
+
+ ring->configured = 0;
+
+ RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n",
+ ring->port_id, ring->id);
+}
+
+static void ena_rx_queue_release_bufs(struct ena_ring *ring)
+{
+ unsigned int ring_mask = ring->ring_size - 1;
+
+ while (ring->next_to_clean != ring->next_to_use) {
+ struct rte_mbuf *m =
+ ring->rx_buffer_info[ring->next_to_clean & ring_mask];
+
+ if (m)
+ rte_mbuf_raw_free(m);
+
+ ring->next_to_clean++;
+ }
+}
+
+static void ena_tx_queue_release_bufs(struct ena_ring *ring)
+{
+ unsigned int i;
+
+ for (i = 0; i < ring->ring_size; ++i) {
+ struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
+
+ if (tx_buf->mbuf)
+ rte_pktmbuf_free(tx_buf->mbuf);
+
+ ring->next_to_clean++;
+ }
+}
+
+static int ena_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct rte_eth_link *link = &dev->data->dev_link;
+ struct ena_adapter *adapter;
+
+ adapter = (struct ena_adapter *)(dev->data->dev_private);
+
+ link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+ link->link_speed = ETH_SPEED_NUM_NONE;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ return 0;
+}
+
+static int ena_queue_restart_all(struct rte_eth_dev *dev,
+ enum ena_ring_type ring_type)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ struct ena_ring *queues = NULL;
+ int nb_queues;
+ int i = 0;
+ int rc = 0;
+
+ if (ring_type == ENA_RING_TYPE_RX) {
+ queues = adapter->rx_ring;
+ nb_queues = dev->data->nb_rx_queues;
+ } else {
+ queues = adapter->tx_ring;
+ nb_queues = dev->data->nb_tx_queues;
+ }
+ for (i = 0; i < nb_queues; i++) {
+ if (queues[i].configured) {
+ if (ring_type == ENA_RING_TYPE_RX) {
+ ena_assert_msg(
+ dev->data->rx_queues[i] == &queues[i],
+ "Inconsistent state of rx queues\n");
+ } else {
+ ena_assert_msg(
+ dev->data->tx_queues[i] == &queues[i],
+ "Inconsistent state of tx queues\n");
+ }
+
+ rc = ena_queue_restart(&queues[i]);
+
+ if (rc) {
+ PMD_INIT_LOG(ERR,
+ "failed to restart queue %d type(%d)",
+ i, ring_type);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
+{
+ uint32_t max_frame_len = adapter->max_mtu;
+
+ if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_JUMBO_FRAME)
+ max_frame_len =
+ adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
+
+ return max_frame_len;
+}
+
+static int ena_check_valid_conf(struct ena_adapter *adapter)
+{
+ uint32_t max_frame_len = ena_get_mtu_conf(adapter);
+
+ if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
+ PMD_INIT_LOG(ERR, "Unsupported MTU of %d. "
+ "max mtu: %d, min mtu: %d\n",
+ max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
+ return ENA_COM_UNSUPPORTED;
+ }
+
+ return 0;
+}
+
+static int
+ena_calc_queue_size(struct ena_com_dev *ena_dev,
+ u16 *max_tx_sgl_size,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ uint32_t queue_size = ENA_DEFAULT_RING_SIZE;
+
+ queue_size = RTE_MIN(queue_size,
+ get_feat_ctx->max_queues.max_cq_depth);
+ queue_size = RTE_MIN(queue_size,
+ get_feat_ctx->max_queues.max_sq_depth);
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ queue_size = RTE_MIN(queue_size,
+ get_feat_ctx->max_queues.max_llq_depth);
+
+ /* Round down to power of 2 */
+ if (!rte_is_power_of_2(queue_size))
+ queue_size = rte_align32pow2(queue_size >> 1);
+
+ if (unlikely(queue_size == 0)) {
+ PMD_INIT_LOG(ERR, "Invalid queue size");
+ return -EFAULT;
+ }
+
+ *max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
+ get_feat_ctx->max_queues.max_packet_tx_descs);
+
+ return queue_size;
+}
+
+static void ena_stats_restart(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ rte_atomic64_init(&adapter->drv_stats->ierrors);
+ rte_atomic64_init(&adapter->drv_stats->oerrors);
+ rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
+}
+
+static int ena_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct ena_admin_basic_stats ena_stats;
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+ int rc;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -ENOTSUP;
+
+ memset(&ena_stats, 0, sizeof(ena_stats));
+ rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats);
+ if (unlikely(rc)) {
+ RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA");
+ return rc;
+ }
+
+ /* Set of basic statistics from ENA */
+ stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,
+ ena_stats.rx_pkts_low);
+ stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,
+ ena_stats.tx_pkts_low);
+ stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,
+ ena_stats.rx_bytes_low);
+ stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
+ ena_stats.tx_bytes_low);
+ stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high,
+ ena_stats.rx_drops_low);
+
+ /* Driver related stats */
+ stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
+ stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
+ stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
+ return 0;
+}
+
+static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct ena_adapter *adapter;
+ struct ena_com_dev *ena_dev;
+ int rc = 0;
+
+ ena_assert_msg(dev->data != NULL, "Uninitialized device");
+ ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device");
+ adapter = (struct ena_adapter *)(dev->data->dev_private);
+
+ ena_dev = &adapter->ena_dev;
+ ena_assert_msg(ena_dev != NULL, "Uninitialized device");
+
+ if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
+ RTE_LOG(ERR, PMD,
+ "Invalid MTU setting. new_mtu: %d "
+ "max mtu: %d min mtu: %d\n",
+ mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
+ return -EINVAL;
+ }
+
+ rc = ena_com_set_dev_mtu(ena_dev, mtu);
+ if (rc)
+ RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu);
+ else
+ RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu);
+
+ return rc;
+}
+
+static int ena_start(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ uint64_t ticks;
+ int rc = 0;
+
+ rc = ena_check_valid_conf(adapter);
+ if (rc)
+ return rc;
+
+ rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX);
+ if (rc)
+ return rc;
+
+ rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX);
+ if (rc)
+ return rc;
+
+ if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
+ ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
+ rc = ena_rss_init_default(adapter);
+ if (rc)
+ return rc;
+ }
+
+ ena_stats_restart(dev);
+
+ adapter->timestamp_wd = rte_get_timer_cycles();
+ adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
+
+ ticks = rte_get_timer_hz();
+ rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
+ ena_timer_wd_callback, adapter);
+
+ adapter->state = ENA_ADAPTER_STATE_RUNNING;
+
+ return 0;
+}
+
+static void ena_stop(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ rte_timer_stop_sync(&adapter->timer_wd);
+
+ adapter->state = ENA_ADAPTER_STATE_STOPPED;
+}
+
+static int ena_queue_restart(struct ena_ring *ring)
+{
+ int rc, bufs_num;
+
+ ena_assert_msg(ring->configured == 1,
+ "Trying to restart unconfigured queue\n");
+
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ if (ring->type == ENA_RING_TYPE_TX)
+ return 0;
+
+ bufs_num = ring->ring_size - 1;
+ rc = ena_populate_rx_queue(ring, bufs_num);
+ if (rc != bufs_num) {
+ PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
+ return ENA_COM_FAULT;
+ }
+
+ return 0;
+}
+
+static int ena_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct ena_com_create_io_ctx ctx =
+ /* policy set to _HOST just to satisfy icc compiler */
+ { ENA_ADMIN_PLACEMENT_POLICY_HOST,
+ ENA_COM_IO_QUEUE_DIRECTION_TX, 0, 0, 0, 0 };
+ struct ena_ring *txq = NULL;
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ unsigned int i;
+ int ena_qid;
+ int rc;
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+
+ txq = &adapter->tx_ring[queue_idx];
+
+ if (txq->configured) {
+ RTE_LOG(CRIT, PMD,
+ "API violation. Queue %d is already configured\n",
+ queue_idx);
+ return ENA_COM_FAULT;
+ }
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported size of TX queue: %d is not a power of 2.",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ if (nb_desc > adapter->tx_ring_size) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported size of TX queue (max size: %d)\n",
+ adapter->tx_ring_size);
+ return -EINVAL;
+ }
+
+ ena_qid = ENA_IO_TXQ_IDX(queue_idx);
+
+ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
+ ctx.qid = ena_qid;
+ ctx.msix_vector = -1; /* admin interrupts not used */
+ ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
+ ctx.queue_size = adapter->tx_ring_size;
+ ctx.numa_node = ena_cpu_to_node(queue_idx);
+
+ rc = ena_com_create_io_queue(ena_dev, &ctx);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "failed to create io TX queue #%d (qid:%d) rc: %d\n",
+ queue_idx, ena_qid, rc);
+ return rc;
+ }
+ txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
+ txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
+
+ rc = ena_com_get_io_handlers(ena_dev, ena_qid,
+ &txq->ena_com_io_sq,
+ &txq->ena_com_io_cq);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
+ queue_idx, rc);
+ goto err_destroy_io_queue;
+ }
+
+ ena_com_update_numa_node(txq->ena_com_io_cq, ctx.numa_node);
+
+ txq->port_id = dev->data->port_id;
+ txq->next_to_clean = 0;
+ txq->next_to_use = 0;
+ txq->ring_size = nb_desc;
+
+ txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
+ sizeof(struct ena_tx_buffer) *
+ txq->ring_size,
+ RTE_CACHE_LINE_SIZE);
+ if (!txq->tx_buffer_info) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n");
+ rc = -ENOMEM;
+ goto err_destroy_io_queue;
+ }
+
+ txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
+ sizeof(u16) * txq->ring_size,
+ RTE_CACHE_LINE_SIZE);
+ if (!txq->empty_tx_reqs) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n");
+ rc = -ENOMEM;
+ goto err_free;
+ }
+
+ for (i = 0; i < txq->ring_size; i++)
+ txq->empty_tx_reqs[i] = i;
+
+ if (tx_conf != NULL) {
+ txq->offloads =
+ tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ }
+
+ /* Store pointer to this queue in upper layer */
+ txq->configured = 1;
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+
+err_free:
+ rte_free(txq->tx_buffer_info);
+
+err_destroy_io_queue:
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
+}
+
+static int ena_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct ena_com_create_io_ctx ctx =
+ /* policy set to _HOST just to satisfy icc compiler */
+ { ENA_ADMIN_PLACEMENT_POLICY_HOST,
+ ENA_COM_IO_QUEUE_DIRECTION_RX, 0, 0, 0, 0 };
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ struct ena_ring *rxq = NULL;
+ uint16_t ena_qid = 0;
+ int i, rc = 0;
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+
+ rxq = &adapter->rx_ring[queue_idx];
+ if (rxq->configured) {
+ RTE_LOG(CRIT, PMD,
+ "API violation. Queue %d is already configured\n",
+ queue_idx);
+ return ENA_COM_FAULT;
+ }
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported size of RX queue: %d is not a power of 2.",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ if (nb_desc > adapter->rx_ring_size) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported size of RX queue (max size: %d)\n",
+ adapter->rx_ring_size);
+ return -EINVAL;
+ }
+
+ ena_qid = ENA_IO_RXQ_IDX(queue_idx);
+
+ ctx.qid = ena_qid;
+ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
+ ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ ctx.msix_vector = -1; /* admin interrupts not used */
+ ctx.queue_size = adapter->rx_ring_size;
+ ctx.numa_node = ena_cpu_to_node(queue_idx);
+
+ rc = ena_com_create_io_queue(ena_dev, &ctx);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n",
+ queue_idx, rc);
+ return rc;
+ }
+
+ rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
+ rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
+
+ rc = ena_com_get_io_handlers(ena_dev, ena_qid,
+ &rxq->ena_com_io_sq,
+ &rxq->ena_com_io_cq);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
+ queue_idx, rc);
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
+ }
+
+ rxq->port_id = dev->data->port_id;
+ rxq->next_to_clean = 0;
+ rxq->next_to_use = 0;
+ rxq->ring_size = nb_desc;
+ rxq->mb_pool = mp;
+
+ rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
+ sizeof(struct rte_mbuf *) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->rx_buffer_info) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n");
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return -ENOMEM;
+ }
+
+ rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
+ sizeof(uint16_t) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->empty_rx_reqs) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n");
+ rte_free(rxq->rx_buffer_info);
+ rxq->rx_buffer_info = NULL;
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nb_desc; i++)
+ rxq->empty_tx_reqs[i] = i;
+
+ /* Store pointer to this queue in upper layer */
+ rxq->configured = 1;
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ return rc;
+}
+
+static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
+{
+ unsigned int i;
+ int rc;
+ uint16_t ring_size = rxq->ring_size;
+ uint16_t ring_mask = ring_size - 1;
+ uint16_t next_to_use = rxq->next_to_use;
+ uint16_t in_use, req_id;
+ struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
+
+ if (unlikely(!count))
+ return 0;
+
+ in_use = rxq->next_to_use - rxq->next_to_clean;
+ ena_assert_msg(((in_use + count) < ring_size), "bad ring state");
+
+ count = RTE_MIN(count,
+ (uint16_t)(ring_size - (next_to_use & ring_mask)));
+
+ /* get resources for incoming packets */
+ rc = rte_mempool_get_bulk(rxq->mb_pool,
+ (void **)(&mbufs[next_to_use & ring_mask]),
+ count);
+ if (unlikely(rc < 0)) {
+ rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
+ PMD_RX_LOG(DEBUG, "there are no enough free buffers");
+ return 0;
+ }
+
+ for (i = 0; i < count; i++) {
+ uint16_t next_to_use_masked = next_to_use & ring_mask;
+ struct rte_mbuf *mbuf = mbufs[next_to_use_masked];
+ struct ena_com_buf ebuf;
+
+ rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
+
+ req_id = rxq->empty_rx_reqs[next_to_use_masked];
+ rc = validate_rx_req_id(rxq, req_id);
+ if (unlikely(rc < 0))
+ break;
+
+ /* prepare physical address for DMA transaction */
+ ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
+ ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
+ /* pass resource to device */
+ rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
+ &ebuf, req_id);
+ if (unlikely(rc)) {
+ rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
+ count - i);
+ RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
+ break;
+ }
+ next_to_use++;
+ }
+
+ if (unlikely(i < count))
+ RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d "
+ "buffers (from %d)\n", rxq->id, i, count);
+
+ /* When we submitted free recources to device... */
+ if (likely(i > 0)) {
+ /* ...let HW know that it can fill buffers with data
+ *
+ * Add memory barrier to make sure the desc were written before
+ * issue a doorbell
+ */
+ rte_wmb();
+ ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
+
+ rxq->next_to_use = next_to_use;
+ }
+
+ return i;
+}
+
+static int ena_device_init(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx,
+ bool *wd_state)
+{
+ uint32_t aenq_groups;
+ int rc;
+ bool readless_supported;
+
+ /* Initialize mmio registers */
+ rc = ena_com_mmio_reg_read_request_init(ena_dev);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "failed to init mmio read less\n");
+ return rc;
+ }
+
+ /* The PCIe configuration space revision id indicate if mmio reg
+ * read is disabled.
+ */
+ readless_supported =
+ !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id
+ & ENA_MMIO_DISABLE_REG_READ);
+ ena_com_set_mmio_read_mode(ena_dev, readless_supported);
+
+ /* reset device */
+ rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "cannot reset device\n");
+ goto err_mmio_read_less;
+ }
+
+ /* check FW version */
+ rc = ena_com_validate_version(ena_dev);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "device version is too low\n");
+ goto err_mmio_read_less;
+ }
+
+ ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
+
+ /* ENA device administration layer init */
+ rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "cannot initialize ena admin queue with device\n");
+ goto err_mmio_read_less;
+ }
+
+ /* To enable the msix interrupts the driver needs to know the number
+ * of queues. So the driver uses polling mode to retrieve this
+ * information.
+ */
+ ena_com_set_admin_polling_mode(ena_dev, true);
+
+ ena_config_host_info(ena_dev);
+
+ /* Get Device Attributes and features */
+ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "cannot get attribute for ena device rc= %d\n", rc);
+ goto err_admin_init;
+ }
+
+ aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
+ BIT(ENA_ADMIN_NOTIFICATION) |
+ BIT(ENA_ADMIN_KEEP_ALIVE) |
+ BIT(ENA_ADMIN_FATAL_ERROR) |
+ BIT(ENA_ADMIN_WARNING);
+
+ aenq_groups &= get_feat_ctx->aenq.supported_groups;
+ rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc);
+ goto err_admin_init;
+ }
+
+ *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
+
+ return 0;
+
+err_admin_init:
+ ena_com_admin_destroy(ena_dev);
+
+err_mmio_read_less:
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ return rc;
+}
+
+static void ena_interrupt_handler_rte(void *cb_arg)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)cb_arg;
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+
+ ena_com_admin_q_comp_intr_handler(ena_dev);
+ if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
+ ena_com_aenq_intr_handler(ena_dev, adapter);
+}
+
+static void check_for_missing_keep_alive(struct ena_adapter *adapter)
+{
+ if (!adapter->wd_state)
+ return;
+
+ if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ return;
+
+ if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
+ adapter->keep_alive_timeout)) {
+ RTE_LOG(ERR, PMD, "Keep alive timeout\n");
+ adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
+ adapter->trigger_reset = true;
+ }
+}
+
+/* Check if admin queue is enabled */
+static void check_for_admin_com_state(struct ena_adapter *adapter)
+{
+ if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
+ RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n");
+ adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
+ adapter->trigger_reset = true;
+ }
+}
+
+static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
+ void *arg)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)arg;
+ struct rte_eth_dev *dev = adapter->rte_dev;
+
+ check_for_missing_keep_alive(adapter);
+ check_for_admin_com_state(adapter);
+
+ if (unlikely(adapter->trigger_reset)) {
+ RTE_LOG(ERR, PMD, "Trigger reset is on\n");
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ }
+}
+
+static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ int io_sq_num, io_cq_num, io_queue_num;
+
+ io_sq_num = get_feat_ctx->max_queues.max_sq_num;
+ io_cq_num = get_feat_ctx->max_queues.max_cq_num;
+
+ io_queue_num = RTE_MIN(io_sq_num, io_cq_num);
+
+ if (unlikely(io_queue_num == 0)) {
+ RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n");
+ return -EFAULT;
+ }
+
+ return io_queue_num;
+}
+
+static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(eth_dev->data->dev_private);
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ int queue_size, rc;
+ u16 tx_sgl_size = 0;
+
+ static int adapters_found;
+ bool wd_state;
+
+ memset(adapter, 0, sizeof(struct ena_adapter));
+ ena_dev = &adapter->ena_dev;
+
+ eth_dev->dev_ops = &ena_dev_ops;
+ eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
+ eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
+ adapter->rte_eth_dev_data = eth_dev->data;
+ adapter->rte_dev = eth_dev;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ adapter->pdev = pci_dev;
+
+ PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ intr_handle = &pci_dev->intr_handle;
+
+ adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
+ adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
+
+ if (!adapter->regs) {
+ PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
+ ENA_REGS_BAR);
+ return -ENXIO;
+ }
+
+ ena_dev->reg_bar = adapter->regs;
+ ena_dev->dmadev = adapter->pdev;
+
+ adapter->id_number = adapters_found;
+
+ snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
+ adapter->id_number);
+
+ /* device specific initialization routine */
+ rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
+ if (rc) {
+ PMD_INIT_LOG(CRIT, "Failed to init ENA device");
+ goto err;
+ }
+ adapter->wd_state = wd_state;
+
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ adapter->num_queues = ena_calc_io_queue_num(ena_dev,
+ &get_feat_ctx);
+
+ queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx);
+ if (queue_size <= 0 || adapter->num_queues <= 0) {
+ rc = -EFAULT;
+ goto err_device_destroy;
+ }
+
+ adapter->tx_ring_size = queue_size;
+ adapter->rx_ring_size = queue_size;
+
+ adapter->max_tx_sgl_size = tx_sgl_size;
+
+ /* prepare ring structures */
+ ena_init_rings(adapter);
+
+ ena_config_debug_area(adapter);
+
+ /* Set max MTU for this device */
+ adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
+
+ /* set device support for TSO */
+ adapter->tso4_supported = get_feat_ctx.offload.tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+
+ /* Copy MAC address and point DPDK to it */
+ eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr;
+ ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr,
+ (struct ether_addr *)adapter->mac_addr);
+
+ adapter->drv_stats = rte_zmalloc("adapter stats",
+ sizeof(*adapter->drv_stats),
+ RTE_CACHE_LINE_SIZE);
+ if (!adapter->drv_stats) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n");
+ rc = -ENOMEM;
+ goto err_delete_debug_area;
+ }
+
+ rte_intr_callback_register(intr_handle,
+ ena_interrupt_handler_rte,
+ adapter);
+ rte_intr_enable(intr_handle);
+ ena_com_set_admin_polling_mode(ena_dev, false);
+ ena_com_admin_aenq_enable(ena_dev);
+
+ if (adapters_found == 0)
+ rte_timer_subsystem_init();
+ rte_timer_init(&adapter->timer_wd);
+
+ adapters_found++;
+ adapter->state = ENA_ADAPTER_STATE_INIT;
+
+ return 0;
+
+err_delete_debug_area:
+ ena_com_delete_debug_area(ena_dev);
+
+err_device_destroy:
+ ena_com_delete_host_info(ena_dev);
+ ena_com_admin_destroy(ena_dev);
+
+err:
+ return rc;
+}
+
+static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(eth_dev->data->dev_private);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
+ ena_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ eth_dev->tx_pkt_prepare = NULL;
+
+ rte_free(adapter->drv_stats);
+ adapter->drv_stats = NULL;
+
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ ena_interrupt_handler_rte,
+ adapter);
+
+ adapter->state = ENA_ADAPTER_STATE_FREE;
+
+ return 0;
+}
+
+static int ena_dev_configure(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ adapter->state = ENA_ADAPTER_STATE_CONFIG;
+
+ adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
+ adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
+ return 0;
+}
+
+static void ena_init_rings(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ struct ena_ring *ring = &adapter->tx_ring[i];
+
+ ring->configured = 0;
+ ring->type = ENA_RING_TYPE_TX;
+ ring->adapter = adapter;
+ ring->id = i;
+ ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
+ ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
+ ring->sgl_size = adapter->max_tx_sgl_size;
+ }
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ struct ena_ring *ring = &adapter->rx_ring[i];
+
+ ring->configured = 0;
+ ring->type = ENA_RING_TYPE_RX;
+ ring->adapter = adapter;
+ ring->id = i;
+ }
+}
+
+static void ena_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ena_adapter *adapter;
+ struct ena_com_dev *ena_dev;
+ struct ena_com_dev_get_features_ctx feat;
+ uint64_t rx_feat = 0, tx_feat = 0;
+ int rc = 0;
+
+ ena_assert_msg(dev->data != NULL, "Uninitialized device");
+ ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device");
+ adapter = (struct ena_adapter *)(dev->data->dev_private);
+
+ ena_dev = &adapter->ena_dev;
+ ena_assert_msg(ena_dev != NULL, "Uninitialized device");
+
+ dev_info->speed_capa =
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G |
+ ETH_LINK_SPEED_5G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G |
+ ETH_LINK_SPEED_100G;
+
+ /* Get supported features from HW */
+ rc = ena_com_get_dev_attr_feat(ena_dev, &feat);
+ if (unlikely(rc)) {
+ RTE_LOG(ERR, PMD,
+ "Cannot get attribute for ena device rc= %d\n", rc);
+ return;
+ }
+
+ /* Set Tx & Rx features available for device */
+ if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
+ tx_feat |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ if (feat.offload.tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
+ tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if (feat.offload.rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
+ rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ /* Inform framework about available features */
+ dev_info->rx_offload_capa = rx_feat;
+ dev_info->rx_queue_offload_capa = rx_feat;
+ dev_info->tx_offload_capa = tx_feat;
+ dev_info->tx_queue_offload_capa = tx_feat;
+
+ dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
+ dev_info->max_rx_pktlen = adapter->max_mtu;
+ dev_info->max_mac_addrs = 1;
+
+ dev_info->max_rx_queues = adapter->num_queues;
+ dev_info->max_tx_queues = adapter->num_queues;
+ dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
+
+ adapter->tx_supported_offloads = tx_feat;
+ adapter->rx_supported_offloads = rx_feat;
+
+ dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC;
+ dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
+
+ dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC;
+ dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
+ dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
+ feat.max_queues.max_packet_tx_descs);
+ dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
+ feat.max_queues.max_packet_tx_descs);
+}
+
+static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
+ unsigned int ring_size = rx_ring->ring_size;
+ unsigned int ring_mask = ring_size - 1;
+ uint16_t next_to_clean = rx_ring->next_to_clean;
+ uint16_t desc_in_use = 0;
+ uint16_t req_id;
+ unsigned int recv_idx = 0;
+ struct rte_mbuf *mbuf = NULL;
+ struct rte_mbuf *mbuf_head = NULL;
+ struct rte_mbuf *mbuf_prev = NULL;
+ struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info;
+ unsigned int completed;
+
+ struct ena_com_rx_ctx ena_rx_ctx;
+ int rc = 0;
+
+ /* Check adapter state */
+ if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
+ RTE_LOG(ALERT, PMD,
+ "Trying to receive pkts while device is NOT running\n");
+ return 0;
+ }
+
+ desc_in_use = rx_ring->next_to_use - next_to_clean;
+ if (unlikely(nb_pkts > desc_in_use))
+ nb_pkts = desc_in_use;
+
+ for (completed = 0; completed < nb_pkts; completed++) {
+ int segments = 0;
+
+ ena_rx_ctx.max_bufs = rx_ring->ring_size;
+ ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
+ ena_rx_ctx.descs = 0;
+ /* receive packet context */
+ rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
+ rx_ring->ena_com_io_sq,
+ &ena_rx_ctx);
+ if (unlikely(rc)) {
+ RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc);
+ rx_ring->adapter->trigger_reset = true;
+ return 0;
+ }
+
+ if (unlikely(ena_rx_ctx.descs == 0))
+ break;
+
+ while (segments < ena_rx_ctx.descs) {
+ req_id = ena_rx_ctx.ena_bufs[segments].req_id;
+ rc = validate_rx_req_id(rx_ring, req_id);
+ if (unlikely(rc))
+ break;
+
+ mbuf = rx_buff_info[req_id];
+ mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->refcnt = 1;
+ mbuf->next = NULL;
+ if (unlikely(segments == 0)) {
+ mbuf->nb_segs = ena_rx_ctx.descs;
+ mbuf->port = rx_ring->port_id;
+ mbuf->pkt_len = 0;
+ mbuf_head = mbuf;
+ } else {
+ /* for multi-segment pkts create mbuf chain */
+ mbuf_prev->next = mbuf;
+ }
+ mbuf_head->pkt_len += mbuf->data_len;
+
+ mbuf_prev = mbuf;
+ rx_ring->empty_rx_reqs[next_to_clean & ring_mask] =
+ req_id;
+ segments++;
+ next_to_clean++;
+ }
+
+ /* fill mbuf attributes if any */
+ ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx);
+ mbuf_head->hash.rss = (uint32_t)rx_ring->id;
+
+ /* pass to DPDK application head mbuf */
+ rx_pkts[recv_idx] = mbuf_head;
+ recv_idx++;
+ }
+
+ rx_ring->next_to_clean = next_to_clean;
+
+ desc_in_use = desc_in_use - completed + 1;
+ /* Burst refill to save doorbells, memory barriers, const interval */
+ if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size))
+ ena_populate_rx_queue(rx_ring, ring_size - desc_in_use);
+
+ return recv_idx;
+}
+
+static uint16_t
+eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int32_t ret;
+ uint32_t i;
+ struct rte_mbuf *m;
+ struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
+ struct ipv4_hdr *ip_hdr;
+ uint64_t ol_flags;
+ uint16_t frag_field;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ if (!(ol_flags & PKT_TX_IPV4))
+ continue;
+
+ /* If there was not L2 header length specified, assume it is
+ * length of the ethernet header.
+ */
+ if (unlikely(m->l2_len == 0))
+ m->l2_len = sizeof(struct ether_hdr);
+
+ ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ m->l2_len);
+ frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
+
+ if ((frag_field & IPV4_HDR_DF_FLAG) != 0) {
+ m->packet_type |= RTE_PTYPE_L4_NONFRAG;
+
+ /* If IPv4 header has DF flag enabled and TSO support is
+ * disabled, partial chcecksum should not be calculated.
+ */
+ if (!tx_ring->adapter->tso4_supported)
+ continue;
+ }
+
+ if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
+ (ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_SCTP_CKSUM) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+
+ /* In case we are supposed to TSO and have DF not set (DF=0)
+ * hardware must be provided with partial checksum, otherwise
+ * it will take care of necessary calculations.
+ */
+
+ ret = rte_net_intel_cksum_flags_prepare(m,
+ ol_flags & ~PKT_TX_TCP_SEG);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+static void ena_update_hints(struct ena_adapter *adapter,
+ struct ena_admin_ena_hw_hints *hints)
+{
+ if (hints->admin_completion_tx_timeout)
+ adapter->ena_dev.admin_queue.completion_timeout =
+ hints->admin_completion_tx_timeout * 1000;
+
+ if (hints->mmio_read_timeout)
+ /* convert to usec */
+ adapter->ena_dev.mmio_read.reg_read_to =
+ hints->mmio_read_timeout * 1000;
+
+ if (hints->driver_watchdog_timeout) {
+ if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
+ else
+ // Convert msecs to ticks
+ adapter->keep_alive_timeout =
+ (hints->driver_watchdog_timeout *
+ rte_get_timer_hz()) / 1000;
+ }
+}
+
+static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
+ struct rte_mbuf *mbuf)
+{
+ int num_segments, rc;
+
+ num_segments = mbuf->nb_segs;
+
+ if (likely(num_segments < tx_ring->sgl_size))
+ return 0;
+
+ rc = rte_pktmbuf_linearize(mbuf);
+ if (unlikely(rc))
+ RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n");
+
+ return rc;
+}
+
+static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
+ uint16_t next_to_use = tx_ring->next_to_use;
+ uint16_t next_to_clean = tx_ring->next_to_clean;
+ struct rte_mbuf *mbuf;
+ unsigned int ring_size = tx_ring->ring_size;
+ unsigned int ring_mask = ring_size - 1;
+ struct ena_com_tx_ctx ena_tx_ctx;
+ struct ena_tx_buffer *tx_info;
+ struct ena_com_buf *ebuf;
+ uint16_t rc, req_id, total_tx_descs = 0;
+ uint16_t sent_idx = 0, empty_tx_reqs;
+ int nb_hw_desc;
+
+ /* Check adapter state */
+ if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
+ RTE_LOG(ALERT, PMD,
+ "Trying to xmit pkts while device is NOT running\n");
+ return 0;
+ }
+
+ empty_tx_reqs = ring_size - (next_to_use - next_to_clean);
+ if (nb_pkts > empty_tx_reqs)
+ nb_pkts = empty_tx_reqs;
+
+ for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
+ mbuf = tx_pkts[sent_idx];
+
+ rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
+ if (unlikely(rc))
+ break;
+
+ req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ tx_info->mbuf = mbuf;
+ tx_info->num_of_bufs = 0;
+ ebuf = tx_info->bufs;
+
+ /* Prepare TX context */
+ memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
+ memset(&ena_tx_ctx.ena_meta, 0x0,
+ sizeof(struct ena_com_tx_meta));
+ ena_tx_ctx.ena_bufs = ebuf;
+ ena_tx_ctx.req_id = req_id;
+ if (tx_ring->tx_mem_queue_type ==
+ ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* prepare the push buffer with
+ * virtual address of the data
+ */
+ ena_tx_ctx.header_len =
+ RTE_MIN(mbuf->data_len,
+ tx_ring->tx_max_header_size);
+ ena_tx_ctx.push_header =
+ (void *)((char *)mbuf->buf_addr +
+ mbuf->data_off);
+ } /* there's no else as we take advantage of memset zeroing */
+
+ /* Set TX offloads flags, if applicable */
+ ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads);
+
+ if (unlikely(mbuf->ol_flags &
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD)))
+ rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);
+
+ rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]);
+
+ /* Process first segment taking into
+ * consideration pushed header
+ */
+ if (mbuf->data_len > ena_tx_ctx.header_len) {
+ ebuf->paddr = mbuf->buf_iova +
+ mbuf->data_off +
+ ena_tx_ctx.header_len;
+ ebuf->len = mbuf->data_len - ena_tx_ctx.header_len;
+ ebuf++;
+ tx_info->num_of_bufs++;
+ }
+
+ while ((mbuf = mbuf->next) != NULL) {
+ ebuf->paddr = mbuf->buf_iova + mbuf->data_off;
+ ebuf->len = mbuf->data_len;
+ ebuf++;
+ tx_info->num_of_bufs++;
+ }
+
+ ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+
+ /* Write data to device */
+ rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,
+ &ena_tx_ctx, &nb_hw_desc);
+ if (unlikely(rc))
+ break;
+
+ tx_info->tx_descs = nb_hw_desc;
+
+ next_to_use++;
+ }
+
+ /* If there are ready packets to be xmitted... */
+ if (sent_idx > 0) {
+ /* ...let HW do its best :-) */
+ rte_wmb();
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+
+ tx_ring->next_to_use = next_to_use;
+ }
+
+ /* Clear complete packets */
+ while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) {
+ rc = validate_tx_req_id(tx_ring, req_id);
+ if (rc)
+ break;
+
+ /* Get Tx info & store how many descs were processed */
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ total_tx_descs += tx_info->tx_descs;
+
+ /* Free whole mbuf chain */
+ mbuf = tx_info->mbuf;
+ rte_pktmbuf_free(mbuf);
+ tx_info->mbuf = NULL;
+
+ /* Put back descriptor to the ring for reuse */
+ tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id;
+ next_to_clean++;
+
+ /* If too many descs to clean, leave it for another run */
+ if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size)))
+ break;
+ }
+
+ if (total_tx_descs > 0) {
+ /* acknowledge completion of sent packets */
+ ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
+ tx_ring->next_to_clean = next_to_clean;
+ }
+
+ return sent_idx;
+}
+
+/*********************************************************************
+ * PMD configuration
+ *********************************************************************/
+static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct ena_adapter), eth_ena_dev_init);
+}
+
+static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
+}
+
+static struct rte_pci_driver rte_ena_pmd = {
+ .id_table = pci_id_ena_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_WC_ACTIVATE,
+ .probe = eth_ena_pci_probe,
+ .remove = eth_ena_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(ena_init_log)
+{
+ ena_logtype_init = rte_log_register("pmd.net.ena.init");
+ if (ena_logtype_init >= 0)
+ rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE);
+ ena_logtype_driver = rte_log_register("pmd.net.ena.driver");
+ if (ena_logtype_driver >= 0)
+ rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE);
+}
+
+/******************************************************************************
+ ******************************** AENQ Handlers *******************************
+ *****************************************************************************/
+static void ena_update_on_link_change(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct rte_eth_dev *eth_dev;
+ struct ena_adapter *adapter;
+ struct ena_admin_aenq_link_change_desc *aenq_link_desc;
+ uint32_t status;
+
+ adapter = (struct ena_adapter *)adapter_data;
+ aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
+ eth_dev = adapter->rte_dev;
+
+ status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
+ adapter->link_status = status;
+
+ ena_link_update(eth_dev, 0);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
+static void ena_notification(void *data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)data;
+ struct ena_admin_ena_hw_hints *hints;
+
+ if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
+ RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n",
+ aenq_e->aenq_common_desc.group,
+ ENA_ADMIN_NOTIFICATION);
+
+ switch (aenq_e->aenq_common_desc.syndrom) {
+ case ENA_ADMIN_UPDATE_HINTS:
+ hints = (struct ena_admin_ena_hw_hints *)
+ (&aenq_e->inline_data_w4);
+ ena_update_hints(adapter, hints);
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n",
+ aenq_e->aenq_common_desc.syndrom);
+ }
+}
+
+static void ena_keep_alive(void *adapter_data,
+ __rte_unused struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+
+ adapter->timestamp_wd = rte_get_timer_cycles();
+}
+
+/**
+ * This handler will called for unknown event group or unimplemented handlers
+ **/
+static void unimplemented_aenq_handler(__rte_unused void *data,
+ __rte_unused struct ena_admin_aenq_entry *aenq_e)
+{
+ RTE_LOG(ERR, PMD, "Unknown event was received or event with "
+ "unimplemented handler\n");
+}
+
+static struct ena_aenq_handlers aenq_handlers = {
+ .handlers = {
+ [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
+ [ENA_ADMIN_NOTIFICATION] = ena_notification,
+ [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
+ },
+ .unimplemented_handler = unimplemented_aenq_handler
+};
diff --git a/src/spdk/dpdk/drivers/net/ena/ena_ethdev.h b/src/spdk/dpdk/drivers/net/ena/ena_ethdev.h
new file mode 100644
index 00000000..2dc8129e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/ena_ethdev.h
@@ -0,0 +1,211 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_ETHDEV_H_
+#define _ENA_ETHDEV_H_
+
+#include <rte_cycles.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_timer.h>
+
+#include "ena_com.h"
+
+#define ENA_REGS_BAR 0
+#define ENA_MEM_BAR 2
+
+#define ENA_MAX_NUM_QUEUES 128
+#define ENA_DEFAULT_RING_SIZE (1024)
+#define ENA_MIN_FRAME_LEN 64
+#define ENA_NAME_MAX_LEN 20
+#define ENA_PKT_MAX_BUFS 17
+
+#define ENA_MIN_MTU 128
+
+#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+
+#define ENA_WD_TIMEOUT_SEC 3
+#define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
+
+struct ena_adapter;
+
+enum ena_ring_type {
+ ENA_RING_TYPE_RX = 1,
+ ENA_RING_TYPE_TX = 2,
+};
+
+struct ena_tx_buffer {
+ struct rte_mbuf *mbuf;
+ unsigned int tx_descs;
+ unsigned int num_of_bufs;
+ struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
+};
+
+struct ena_ring {
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ enum ena_ring_type type;
+ enum ena_admin_placement_policy_type tx_mem_queue_type;
+ /* Holds the empty requests for TX/RX OOO completions */
+ union {
+ uint16_t *empty_tx_reqs;
+ uint16_t *empty_rx_reqs;
+ };
+
+ union {
+ struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
+ struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
+ };
+ unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
+
+ struct ena_com_io_cq *ena_com_io_cq;
+ struct ena_com_io_sq *ena_com_io_sq;
+
+ struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]
+ __rte_cache_aligned;
+
+ struct rte_mempool *mb_pool;
+ unsigned int port_id;
+ unsigned int id;
+ /* Max length PMD can push to device for LLQ */
+ uint8_t tx_max_header_size;
+ int configured;
+ struct ena_adapter *adapter;
+ uint64_t offloads;
+ u16 sgl_size;
+} __rte_cache_aligned;
+
+enum ena_adapter_state {
+ ENA_ADAPTER_STATE_FREE = 0,
+ ENA_ADAPTER_STATE_INIT = 1,
+ ENA_ADAPTER_STATE_RUNNING = 2,
+ ENA_ADAPTER_STATE_STOPPED = 3,
+ ENA_ADAPTER_STATE_CONFIG = 4,
+ ENA_ADAPTER_STATE_CLOSED = 5,
+};
+
+struct ena_driver_stats {
+ rte_atomic64_t ierrors;
+ rte_atomic64_t oerrors;
+ rte_atomic64_t rx_nombuf;
+};
+
+struct ena_stats_dev {
+ u64 tx_timeout;
+ u64 io_suspend;
+ u64 io_resume;
+ u64 wd_expired;
+ u64 interface_up;
+ u64 interface_down;
+ u64 admin_q_pause;
+};
+
+struct ena_stats_tx {
+ u64 cnt;
+ u64 bytes;
+ u64 queue_stop;
+ u64 prepare_ctx_err;
+ u64 queue_wakeup;
+ u64 dma_mapping_err;
+ u64 linearize;
+ u64 linearize_failed;
+ u64 tx_poll;
+ u64 doorbells;
+ u64 missing_tx_comp;
+ u64 bad_req_id;
+};
+
+struct ena_stats_rx {
+ u64 cnt;
+ u64 bytes;
+ u64 refil_partial;
+ u64 bad_csum;
+ u64 page_alloc_fail;
+ u64 skb_alloc_fail;
+ u64 dma_mapping_err;
+ u64 bad_desc_num;
+ u64 small_copy_len_pkt;
+};
+
+/* board specific private data structure */
+struct ena_adapter {
+ /* OS defined structs */
+ struct rte_pci_device *pdev;
+ struct rte_eth_dev_data *rte_eth_dev_data;
+ struct rte_eth_dev *rte_dev;
+
+ struct ena_com_dev ena_dev __rte_cache_aligned;
+
+ /* TX */
+ struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
+ int tx_ring_size;
+ u16 max_tx_sgl_size;
+
+ /* RX */
+ struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
+ int rx_ring_size;
+
+ u16 num_queues;
+ u16 max_mtu;
+ u8 tso4_supported;
+
+ int id_number;
+ char name[ENA_NAME_MAX_LEN];
+ u8 mac_addr[ETHER_ADDR_LEN];
+
+ void *regs;
+ void *dev_mem_base;
+
+ struct ena_driver_stats *drv_stats;
+ enum ena_adapter_state state;
+
+ uint64_t tx_supported_offloads;
+ uint64_t tx_selected_offloads;
+ uint64_t rx_supported_offloads;
+ uint64_t rx_selected_offloads;
+
+ bool link_status;
+
+ enum ena_regs_reset_reason_types reset_reason;
+
+ struct rte_timer timer_wd;
+ uint64_t timestamp_wd;
+ uint64_t keep_alive_timeout;
+
+ bool trigger_reset;
+
+ bool wd_state;
+};
+
+#endif /* _ENA_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ena/ena_logs.h b/src/spdk/dpdk/drivers/net/ena/ena_logs.h
new file mode 100644
index 00000000..2c0e91b6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/ena_logs.h
@@ -0,0 +1,68 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_LOGS_H_
+#define _ENA_LOGS_H_
+
+extern int ena_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ena_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#ifdef RTE_LIBRTE_ENA_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_ENA_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+extern int ena_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ena_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#endif /* _ENA_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ena/ena_platform.h b/src/spdk/dpdk/drivers/net/ena/ena_platform.h
new file mode 100644
index 00000000..a2239a92
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/ena_platform.h
@@ -0,0 +1,59 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef __ENA_PLATFORM_H__
+#define __ENA_PLATFORM_H__
+
+#define swap16_to_le(x) (x)
+
+#define swap32_to_le(x) (x)
+
+#define swap64_to_le(x) (x)
+
+#define swap16_from_le(x) (x)
+
+#define swap32_from_le(x) (x)
+
+#define swap64_from_le(x) (x)
+
+#define ena_assert_msg(cond, msg) \
+ do { \
+ if (unlikely(!(cond))) { \
+ rte_log(RTE_LOG_ERR, ena_logtype_driver, \
+ "Assert failed on %s:%s:%d: ", \
+ __FILE__, __func__, __LINE__); \
+ rte_panic(msg); \
+ } \
+ } while (0)
+
+#endif /* __ENA_PLATFORM_H__ */
diff --git a/src/spdk/dpdk/drivers/net/ena/meson.build b/src/spdk/dpdk/drivers/net/ena/meson.build
new file mode 100644
index 00000000..091ca6e3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+sources = files('ena_ethdev.c',
+ 'base/ena_com.c',
+ 'base/ena_eth_com.c')
+
+deps += ['timer']
+
+includes += include_directories('base', 'base/ena_defs')
diff --git a/src/spdk/dpdk/drivers/net/ena/rte_pmd_ena_version.map b/src/spdk/dpdk/drivers/net/ena/rte_pmd_ena_version.map
new file mode 100644
index 00000000..349c6e1c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ena/rte_pmd_ena_version.map
@@ -0,0 +1,4 @@
+DPDK_16.04 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/enic/Makefile b/src/spdk/dpdk/drivers/net/enic/Makefile
new file mode 100644
index 00000000..7c6c29cc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+# Copyright 2007 Nuova Systems, Inc. All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_enic.a
+
+EXPORT_MAP := rte_pmd_enic_version.map
+
+LIBABIVER := 1
+
+CFLAGS += -I$(SRCDIR)/base/
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -Wno-strict-aliasing
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+
+VPATH += $(SRCDIR)/src
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_wq.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_rq.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_rss.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/enic/base/cq_desc.h b/src/spdk/dpdk/drivers/net/enic/base/cq_desc.h
new file mode 100644
index 00000000..ae8847c6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/cq_desc.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _CQ_DESC_H_
+#define _CQ_DESC_H_
+
+/*
+ * Completion queue descriptor types
+ */
+enum cq_desc_types {
+ CQ_DESC_TYPE_WQ_ENET = 0,
+ CQ_DESC_TYPE_DESC_COPY = 1,
+ CQ_DESC_TYPE_WQ_EXCH = 2,
+ CQ_DESC_TYPE_RQ_ENET = 3,
+ CQ_DESC_TYPE_RQ_FCP = 4,
+ CQ_DESC_TYPE_IOMMU_MISS = 5,
+ CQ_DESC_TYPE_SGL = 6,
+ CQ_DESC_TYPE_CLASSIFIER = 7,
+ CQ_DESC_TYPE_TEST = 127,
+};
+
+/* Completion queue descriptor: 16B
+ *
+ * All completion queues have this basic layout. The
+ * type_specfic area is unique for each completion
+ * queue type.
+ */
+struct cq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 type_specfic[11];
+ u8 type_color;
+};
+
+#define CQ_DESC_TYPE_BITS 4
+#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
+#define CQ_DESC_COLOR_MASK 1
+#define CQ_DESC_COLOR_SHIFT 7
+#define CQ_DESC_COLOR_MASK_NOSHIFT 0x80
+#define CQ_DESC_Q_NUM_BITS 10
+#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
+#define CQ_DESC_COMP_NDX_BITS 12
+#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
+
+static inline void cq_color_enc(struct cq_desc *desc, const u8 color)
+{
+ if (color)
+ desc->type_color |= (1 << CQ_DESC_COLOR_SHIFT);
+ else
+ desc->type_color &= ~(1 << CQ_DESC_COLOR_SHIFT);
+}
+
+static inline void cq_desc_enc(struct cq_desc *desc,
+ const u8 type, const u8 color, const u16 q_number,
+ const u16 completed_index)
+{
+ desc->type_color = (type & CQ_DESC_TYPE_MASK) |
+ ((color & CQ_DESC_COLOR_MASK) << CQ_DESC_COLOR_SHIFT);
+ desc->q_number = cpu_to_le16(q_number & CQ_DESC_Q_NUM_MASK);
+ desc->completed_index = cpu_to_le16(completed_index &
+ CQ_DESC_COMP_NDX_MASK);
+}
+
+static inline void cq_desc_dec(const struct cq_desc *desc_arg,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+static inline void cq_color_dec(const struct cq_desc *desc_arg, u8 *color)
+{
+ volatile const struct cq_desc *desc = desc_arg;
+
+ *color = (desc->type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+}
+
+#endif /* _CQ_DESC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/cq_enet_desc.h b/src/spdk/dpdk/drivers/net/enic/base/cq_enet_desc.h
new file mode 100644
index 00000000..5ced63cb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/cq_enet_desc.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _CQ_ENET_DESC_H_
+#define _CQ_ENET_DESC_H_
+
+#include "cq_desc.h"
+
+/* Ethernet completion queue descriptor: 16B */
+struct cq_enet_wq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 reserved[11];
+ u8 type_color;
+};
+
+static inline void cq_enet_wq_desc_enc(struct cq_enet_wq_desc *desc,
+ u8 type, u8 color, u16 q_number, u16 completed_index)
+{
+ cq_desc_enc((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+}
+
+static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+}
+
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_desc {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 type_color;
+};
+
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_clsf_desc {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le16 filter_id;
+ __le16 lif;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 type_color;
+};
+
+#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
+#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
+#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
+ ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
+#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
+
+#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
+
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
+ ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS 12
+#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK \
+ ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_CFI_MASK (0x1 << 12)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS 3
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_MASK \
+ ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS) - 1)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_SHIFT 13
+
+#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 8
+#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
+ ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
+#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
+ ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
+
+#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
+#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
+
+static inline void cq_enet_rq_desc_enc(struct cq_enet_rq_desc *desc,
+ u8 type, u8 color, u16 q_number, u16 completed_index,
+ u8 ingress_port, u8 fcoe, u8 eop, u8 sop, u8 rss_type, u8 csum_not_calc,
+ u32 rss_hash, u16 bytes_written, u8 packet_error, u8 vlan_stripped,
+ u16 vlan, u16 checksum, u8 fcoe_sof, u8 fcoe_fc_crc_ok,
+ u8 fcoe_enc_error, u8 fcoe_eof, u8 tcp_udp_csum_ok, u8 udp, u8 tcp,
+ u8 ipv4_csum_ok, u8 ipv6, u8 ipv4, u8 ipv4_fragment, u8 fcs_ok)
+{
+ cq_desc_enc((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+
+ desc->completed_index_flags |= cpu_to_le16(
+ (ingress_port ? CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT : 0) |
+ (fcoe ? CQ_ENET_RQ_DESC_FLAGS_FCOE : 0) |
+ (eop ? CQ_ENET_RQ_DESC_FLAGS_EOP : 0) |
+ (sop ? CQ_ENET_RQ_DESC_FLAGS_SOP : 0));
+
+ desc->q_number_rss_type_flags |= cpu_to_le16(
+ ((rss_type & CQ_ENET_RQ_DESC_RSS_TYPE_MASK) <<
+ CQ_DESC_Q_NUM_BITS) |
+ (csum_not_calc ? CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC : 0));
+
+ desc->rss_hash = cpu_to_le32(rss_hash);
+
+ desc->bytes_written_flags = cpu_to_le16(
+ (bytes_written & CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK) |
+ (packet_error ? CQ_ENET_RQ_DESC_FLAGS_TRUNCATED : 0) |
+ (vlan_stripped ? CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED : 0));
+
+ desc->vlan = cpu_to_le16(vlan);
+
+ if (fcoe) {
+ desc->checksum_fcoe = cpu_to_le16(
+ (fcoe_sof & CQ_ENET_RQ_DESC_FCOE_SOF_MASK) |
+ ((fcoe_eof & CQ_ENET_RQ_DESC_FCOE_EOF_MASK) <<
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT));
+ } else {
+ desc->checksum_fcoe = cpu_to_le16(checksum);
+ }
+
+ desc->flags =
+ (tcp_udp_csum_ok ? CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK : 0) |
+ (udp ? CQ_ENET_RQ_DESC_FLAGS_UDP : 0) |
+ (tcp ? CQ_ENET_RQ_DESC_FLAGS_TCP : 0) |
+ (ipv4_csum_ok ? CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK : 0) |
+ (ipv6 ? CQ_ENET_RQ_DESC_FLAGS_IPV6 : 0) |
+ (ipv4 ? CQ_ENET_RQ_DESC_FLAGS_IPV4 : 0) |
+ (ipv4_fragment ? CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT : 0) |
+ (fcs_ok ? CQ_ENET_RQ_DESC_FLAGS_FCS_OK : 0) |
+ (fcoe_fc_crc_ok ? CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK : 0) |
+ (fcoe_enc_error ? CQ_ENET_RQ_DESC_FCOE_ENC_ERROR : 0);
+}
+
+static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
+ u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
+ u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
+ u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
+
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ /*
+ * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+ */
+ *vlan_tci = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+#endif /* _CQ_ENET_DESC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/rq_enet_desc.h b/src/spdk/dpdk/drivers/net/enic/base/rq_enet_desc.h
new file mode 100644
index 00000000..3585bf3a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/rq_enet_desc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _RQ_ENET_DESC_H_
+#define _RQ_ENET_DESC_H_
+
+/* Ethernet receive queue descriptor: 16B */
+struct rq_enet_desc {
+ __le64 address;
+ __le16 length_type;
+ u8 reserved[6];
+};
+
+enum rq_enet_type_types {
+ RQ_ENET_TYPE_ONLY_SOP = 0,
+ RQ_ENET_TYPE_NOT_SOP = 1,
+ RQ_ENET_TYPE_RESV2 = 2,
+ RQ_ENET_TYPE_RESV3 = 3,
+};
+
+#define RQ_ENET_ADDR_BITS 64
+#define RQ_ENET_LEN_BITS 14
+#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
+#define RQ_ENET_TYPE_BITS 2
+#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
+
+static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc,
+ u64 address, u8 type, u16 length)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
+ ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
+}
+
+static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
+ u64 *address, u8 *type, u16 *length)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
+ *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
+ RQ_ENET_TYPE_MASK);
+}
+
+#endif /* _RQ_ENET_DESC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_cq.c b/src/spdk/dpdk/drivers/net/enic/base/vnic_cq.c
new file mode 100644
index 00000000..63a503f2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_cq.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+void vnic_cq_free(struct vnic_cq *cq)
+{
+ vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
+
+ cq->ctrl = NULL;
+}
+
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+ unsigned int socket_id,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+ char res_name[NAME_MAX];
+ static int instance;
+
+ cq->index = index;
+ cq->vdev = vdev;
+
+ cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
+ if (!cq->ctrl) {
+ pr_err("Failed to hook CQ[%u] resource\n", index);
+ return -EINVAL;
+ }
+
+ snprintf(res_name, sizeof(res_name), "%d-cq-%u", instance++, index);
+ err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size,
+ socket_id, res_name);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int cq_message_enable,
+ unsigned int interrupt_offset, u64 cq_message_addr)
+{
+ u64 paddr;
+
+ paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &cq->ctrl->ring_base);
+ iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
+ iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
+ iowrite32(color_enable, &cq->ctrl->color_enable);
+ iowrite32(cq_head, &cq->ctrl->cq_head);
+ iowrite32(cq_tail, &cq->ctrl->cq_tail);
+ iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
+ iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
+ iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
+ iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
+ iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
+ writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
+
+ cq->interrupt_offset = interrupt_offset;
+}
+
+void vnic_cq_clean(struct vnic_cq *cq)
+{
+ cq->to_clean = 0;
+ cq->last_color = 0;
+
+ iowrite32(0, &cq->ctrl->cq_head);
+ iowrite32(0, &cq->ctrl->cq_tail);
+ iowrite32(1, &cq->ctrl->cq_tail_color);
+
+ vnic_dev_clear_desc_ring(&cq->ring);
+}
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_cq.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_cq.h
new file mode 100644
index 00000000..432219af
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_cq.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_CQ_H_
+#define _VNIC_CQ_H_
+
+#include <rte_mbuf.h>
+
+#include "cq_desc.h"
+#include "vnic_dev.h"
+
+/* Completion queue control */
+struct vnic_cq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 flow_control_enable; /* 0x10 */
+ u32 pad1;
+ u32 color_enable; /* 0x18 */
+ u32 pad2;
+ u32 cq_head; /* 0x20 */
+ u32 pad3;
+ u32 cq_tail; /* 0x28 */
+ u32 pad4;
+ u32 cq_tail_color; /* 0x30 */
+ u32 pad5;
+ u32 interrupt_enable; /* 0x38 */
+ u32 pad6;
+ u32 cq_entry_enable; /* 0x40 */
+ u32 pad7;
+ u32 cq_message_enable; /* 0x48 */
+ u32 pad8;
+ u32 interrupt_offset; /* 0x50 */
+ u32 pad9;
+ u64 cq_message_addr; /* 0x58 */
+ u32 pad10;
+};
+
+#ifdef ENIC_AIC
+struct vnic_rx_bytes_counter {
+ unsigned int small_pkt_bytes_cnt;
+ unsigned int large_pkt_bytes_cnt;
+};
+#endif
+
+struct vnic_cq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ unsigned int to_clean;
+ unsigned int last_color;
+ unsigned int interrupt_offset;
+#ifdef ENIC_AIC
+ struct vnic_rx_bytes_counter pkt_size_counter;
+ unsigned int cur_rx_coal_timeval;
+ unsigned int tobe_rx_coal_timeval;
+ ktime_t prev_ts;
+#endif
+};
+
+void vnic_cq_free(struct vnic_cq *cq);
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+ unsigned int socket_id,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int message_enable,
+ unsigned int interrupt_offset, u64 message_addr);
+void vnic_cq_clean(struct vnic_cq *cq);
+int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count,
+ unsigned int desc_size);
+
+#endif /* _VNIC_CQ_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.c b/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.c
new file mode 100644
index 00000000..16e8814a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.c
@@ -0,0 +1,1096 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <rte_memzone.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+
+#include "vnic_dev.h"
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+#include "vnic_nic.h"
+#include "vnic_stats.h"
+
+
+enum vnic_proxy_type {
+ PROXY_NONE,
+ PROXY_BY_BDF,
+ PROXY_BY_INDEX,
+};
+
+struct vnic_res {
+ void __iomem *vaddr;
+ dma_addr_t bus_addr;
+ unsigned int count;
+};
+
+struct vnic_intr_coal_timer_info {
+ u32 mul;
+ u32 div;
+ u32 max_usec;
+};
+
+struct vnic_dev {
+ void *priv;
+ struct rte_pci_device *pdev;
+ struct vnic_res res[RES_TYPE_MAX];
+ enum vnic_dev_intr_mode intr_mode;
+ struct vnic_devcmd __iomem *devcmd;
+ struct vnic_devcmd_notify *notify;
+ struct vnic_devcmd_notify notify_copy;
+ dma_addr_t notify_pa;
+ u32 notify_sz;
+ dma_addr_t linkstatus_pa;
+ struct vnic_stats *stats;
+ dma_addr_t stats_pa;
+ struct vnic_devcmd_fw_info *fw_info;
+ dma_addr_t fw_info_pa;
+ enum vnic_proxy_type proxy;
+ u32 proxy_index;
+ u64 args[VNIC_DEVCMD_NARGS];
+ int in_reset;
+ struct vnic_intr_coal_timer_info intr_coal_timer_info;
+ void *(*alloc_consistent)(void *priv, size_t size,
+ dma_addr_t *dma_handle, u8 *name);
+ void (*free_consistent)(void *priv,
+ size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+};
+
+#define VNIC_MAX_RES_HDR_SIZE \
+ (sizeof(struct vnic_resource_header) + \
+ sizeof(struct vnic_resource) * RES_TYPE_MAX)
+#define VNIC_RES_STRIDE 128
+
+void *vnic_dev_priv(struct vnic_dev *vdev)
+{
+ return vdev->priv;
+}
+
+void vnic_register_cbacks(struct vnic_dev *vdev,
+ void *(*alloc_consistent)(void *priv, size_t size,
+ dma_addr_t *dma_handle, u8 *name),
+ void (*free_consistent)(void *priv,
+ size_t size, void *vaddr,
+ dma_addr_t dma_handle))
+{
+ vdev->alloc_consistent = alloc_consistent;
+ vdev->free_consistent = free_consistent;
+}
+
+static int vnic_dev_discover_res(struct vnic_dev *vdev,
+ struct vnic_dev_bar *bar, unsigned int num_bars)
+{
+ struct vnic_resource_header __iomem *rh;
+ struct mgmt_barmap_hdr __iomem *mrh;
+ struct vnic_resource __iomem *r;
+ u8 type;
+
+ if (num_bars == 0)
+ return -EINVAL;
+
+ if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
+ pr_err("vNIC BAR0 res hdr length error\n");
+ return -EINVAL;
+ }
+
+ rh = bar->vaddr;
+ mrh = bar->vaddr;
+ if (!rh) {
+ pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+ return -EINVAL;
+ }
+
+ /* Check for mgmt vnic in addition to normal vnic */
+ if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
+ (ioread32(&rh->version) != VNIC_RES_VERSION)) {
+ if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
+ (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
+ pr_err("vNIC BAR0 res magic/version error " \
+ "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
+ VNIC_RES_MAGIC, VNIC_RES_VERSION,
+ MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
+ ioread32(&rh->magic), ioread32(&rh->version));
+ return -EINVAL;
+ }
+ }
+
+ if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
+ r = (struct vnic_resource __iomem *)(mrh + 1);
+ else
+ r = (struct vnic_resource __iomem *)(rh + 1);
+
+
+ while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
+ u8 bar_num = ioread8(&r->bar);
+ u32 bar_offset = ioread32(&r->bar_offset);
+ u32 count = ioread32(&r->count);
+ u32 len;
+
+ r++;
+
+ if (bar_num >= num_bars)
+ continue;
+
+ if (!bar[bar_num].len || !bar[bar_num].vaddr)
+ continue;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ /* each count is stride bytes long */
+ len = count * VNIC_RES_STRIDE;
+ if (len + bar_offset > bar[bar_num].len) {
+ pr_err("vNIC BAR0 resource %d " \
+ "out-of-bounds, offset 0x%x + " \
+ "size 0x%x > bar len 0x%lx\n",
+ type, bar_offset,
+ len,
+ bar[bar_num].len);
+ return -EINVAL;
+ }
+ break;
+ case RES_TYPE_INTR_PBA_LEGACY:
+ case RES_TYPE_DEVCMD:
+ len = count;
+ break;
+ default:
+ continue;
+ }
+
+ vdev->res[type].count = count;
+ vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
+ bar_offset;
+ vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
+ }
+
+ return 0;
+}
+
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type)
+{
+ return vdev->res[type].count;
+}
+
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index)
+{
+ if (!vdev->res[type].vaddr)
+ return NULL;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ return (char __iomem *)vdev->res[type].vaddr +
+ index * VNIC_RES_STRIDE;
+ default:
+ return (char __iomem *)vdev->res[type].vaddr;
+ }
+}
+
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ /* The base address of the desc rings must be 512 byte aligned.
+ * Descriptor count is aligned to groups of 32 descriptors. A
+ * count of 0 means the maximum 4096 descriptors. Descriptor
+ * size is aligned to 16 bytes.
+ */
+
+ unsigned int count_align = 32;
+ unsigned int desc_align = 16;
+
+ ring->base_align = 512;
+
+ if (desc_count == 0)
+ desc_count = 4096;
+
+ ring->desc_count = VNIC_ALIGN(desc_count, count_align);
+
+ ring->desc_size = VNIC_ALIGN(desc_size, desc_align);
+
+ ring->size = ring->desc_count * ring->desc_size;
+ ring->size_unaligned = ring->size + ring->base_align;
+
+ return ring->size_unaligned;
+}
+
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
+{
+ memset(ring->descs, 0, ring->size);
+}
+
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev,
+ struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size,
+ __attribute__((unused)) unsigned int socket_id,
+ char *z_name)
+{
+ void *alloc_addr;
+ dma_addr_t alloc_pa = 0;
+
+ vnic_dev_desc_ring_size(ring, desc_count, desc_size);
+ alloc_addr = vdev->alloc_consistent(vdev->priv,
+ ring->size_unaligned,
+ &alloc_pa, (u8 *)z_name);
+ if (!alloc_addr) {
+ pr_err("Failed to allocate ring (size=%d), aborting\n",
+ (int)ring->size);
+ return -ENOMEM;
+ }
+ ring->descs_unaligned = alloc_addr;
+ if (!alloc_pa) {
+ pr_err("Failed to map allocated ring (size=%d), aborting\n",
+ (int)ring->size);
+ vdev->free_consistent(vdev->priv,
+ ring->size_unaligned,
+ alloc_addr,
+ alloc_pa);
+ return -ENOMEM;
+ }
+ ring->base_addr_unaligned = alloc_pa;
+
+ ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned,
+ ring->base_align);
+ ring->descs = (u8 *)ring->descs_unaligned +
+ (ring->base_addr - ring->base_addr_unaligned);
+
+ vnic_dev_clear_desc_ring(ring);
+
+ ring->desc_avail = ring->desc_count - 1;
+
+ return 0;
+}
+
+void vnic_dev_free_desc_ring(__attribute__((unused)) struct vnic_dev *vdev,
+ struct vnic_dev_ring *ring)
+{
+ if (ring->descs) {
+ vdev->free_consistent(vdev->priv,
+ ring->size_unaligned,
+ ring->descs_unaligned,
+ ring->base_addr_unaligned);
+ ring->descs = NULL;
+ }
+}
+
+static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait)
+{
+ struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
+ unsigned int i;
+ int delay;
+ u32 status;
+ int err;
+
+ status = ioread32(&devcmd->status);
+ if (status == 0xFFFFFFFF) {
+ /* PCI-e target device is gone */
+ return -ENODEV;
+ }
+ if (status & STAT_BUSY) {
+
+ pr_err("Busy devcmd %d\n", _CMD_N(cmd));
+ return -EBUSY;
+ }
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ writeq(vdev->args[i], &devcmd->args[i]);
+ wmb(); /* complete all writes initiated till now */
+ }
+
+ iowrite32(cmd, &devcmd->cmd);
+
+ if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+ return 0;
+
+ for (delay = 0; delay < wait; delay++) {
+
+ udelay(100);
+
+ status = ioread32(&devcmd->status);
+ if (status == 0xFFFFFFFF) {
+ /* PCI-e target device is gone */
+ return -ENODEV;
+ }
+
+ if (!(status & STAT_BUSY)) {
+ if (status & STAT_ERROR) {
+ err = -(int)readq(&devcmd->args[0]);
+ if (cmd != CMD_CAPABILITY)
+ pr_err("Devcmd %d failed " \
+ "with error code %d\n",
+ _CMD_N(cmd), err);
+ return err;
+ }
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+ rmb();/* finish all reads initiated till now */
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ vdev->args[i] = readq(&devcmd->args[i]);
+ }
+
+ return 0;
+ }
+ }
+
+ pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
+ return -ETIMEDOUT;
+}
+
+static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
+ enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
+ u64 *args, int nargs, int wait)
+{
+ u32 status;
+ int err;
+
+ /*
+ * Proxy command consumes 2 arguments. One for proxy index,
+ * the other is for command to be proxied
+ */
+ if (nargs > VNIC_DEVCMD_NARGS - 2) {
+ pr_err("number of args %d exceeds the maximum\n", nargs);
+ return -EINVAL;
+ }
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ vdev->args[0] = vdev->proxy_index;
+ vdev->args[1] = cmd;
+ memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
+
+ err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
+ if (err)
+ return err;
+
+ status = (u32)vdev->args[0];
+ if (status & STAT_ERROR) {
+ err = (int)vdev->args[1];
+ if (err != ERR_ECMDUNKNOWN ||
+ cmd != CMD_CAPABILITY)
+ pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+ return err;
+ }
+
+ memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
+
+ return 0;
+}
+
+static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
+ enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
+{
+ int err;
+
+ if (nargs > VNIC_DEVCMD_NARGS) {
+ pr_err("number of args %d exceeds the maximum\n", nargs);
+ return -EINVAL;
+ }
+ memset(vdev->args, 0, sizeof(vdev->args));
+ memcpy(vdev->args, args, nargs * sizeof(args[0]));
+
+ err = _vnic_dev_cmd(vdev, cmd, wait);
+
+ memcpy(args, vdev->args, nargs * sizeof(args[0]));
+
+ return err;
+}
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait)
+{
+ u64 args[2];
+ int err;
+
+ args[0] = *a0;
+ args[1] = *a1;
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ switch (vdev->proxy) {
+ case PROXY_BY_INDEX:
+ err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
+ args, ARRAY_SIZE(args), wait);
+ break;
+ case PROXY_BY_BDF:
+ err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
+ args, ARRAY_SIZE(args), wait);
+ break;
+ case PROXY_NONE:
+ default:
+ err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
+ break;
+ }
+
+ if (err == 0) {
+ *a0 = args[0];
+ *a1 = args[1];
+ }
+
+ return err;
+}
+
+int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *args, int nargs, int wait)
+{
+ switch (vdev->proxy) {
+ case PROXY_BY_INDEX:
+ return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
+ args, nargs, wait);
+ case PROXY_BY_BDF:
+ return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
+ args, nargs, wait);
+ case PROXY_NONE:
+ default:
+ return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
+ }
+}
+
+static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args,
+ int nargs)
+{
+ memset(args, 0, nargs * sizeof(*args));
+ args[0] = CMD_ADD_ADV_FILTER;
+ args[1] = FILTER_CAP_MODE_V1_FLAG;
+ return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000);
+}
+
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
+{
+ u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (err)
+ return 0;
+ return (a1 >= (u32)FILTER_DPDK_1);
+}
+
+/* Determine the "best" filtering mode VIC is capaible of. Returns one of 3
+ * value or 0 on error:
+ * FILTER_DPDK_1- advanced filters availabile
+ * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that
+ * the IP layer must explicitly specified. I.e. cannot have a UDP
+ * filter that matches both IPv4 and IPv6.
+ * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available.
+ * all other filter types are not available.
+ * Retrun true in filter_tags if supported
+ */
+int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
+ u8 *filter_actions)
+{
+ u64 args[4];
+ int err;
+ u32 max_level = 0;
+
+ err = vnic_dev_advanced_filters_cap(vdev, args, 4);
+
+ /* determine supported filter actions */
+ *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
+ if (args[2] == FILTER_CAP_MODE_V1)
+ *filter_actions = args[3];
+
+ if (err || ((args[0] == 1) && (args[1] == 0))) {
+ /* Adv filter Command not supported or adv filters available but
+ * not enabled. Try the normal filter capability command.
+ */
+ args[0] = CMD_ADD_FILTER;
+ args[1] = 0;
+ err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
+ if (err)
+ return err;
+ max_level = args[1];
+ goto parse_max_level;
+ } else if (args[2] == FILTER_CAP_MODE_V1) {
+ /* parse filter capability mask in args[1] */
+ if (args[1] & FILTER_DPDK_1_FLAG)
+ *mode = FILTER_DPDK_1;
+ else if (args[1] & FILTER_USNIC_IP_FLAG)
+ *mode = FILTER_USNIC_IP;
+ else if (args[1] & FILTER_IPV4_5TUPLE_FLAG)
+ *mode = FILTER_IPV4_5TUPLE;
+ return 0;
+ }
+ max_level = args[1];
+parse_max_level:
+ if (max_level >= (u32)FILTER_USNIC_IP)
+ *mode = FILTER_USNIC_IP;
+ else
+ *mode = FILTER_IPV4_5TUPLE;
+ return 0;
+}
+
+void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
+ bool *weak)
+{
+ u64 a0 = CMD_NIC_CFG, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *cfg_chk = false;
+ *weak = false;
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (err == 0 && a0 != 0 && a1 != 0) {
+ *cfg_chk = true;
+ *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
+ }
+}
+
+int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
+{
+ u64 a0 = (u32)cmd, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+
+ return !(err || a0);
+}
+
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
+ void *value)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err;
+
+ a0 = offset;
+ a1 = size;
+
+ err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
+
+ switch (size) {
+ case 1:
+ *(u8 *)value = (u8)a0;
+ break;
+ case 2:
+ *(u16 *)value = (u16)a0;
+ break;
+ case 4:
+ *(u32 *)value = (u32)a0;
+ break;
+ case 8:
+ *(u64 *)value = a0;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return err;
+}
+
+int vnic_dev_stats_clear(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
+}
+
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
+{
+ u64 a0, a1;
+ int wait = 1000;
+
+ if (!vdev->stats)
+ return -ENOMEM;
+
+ *stats = vdev->stats;
+ a0 = vdev->stats_pa;
+ a1 = sizeof(struct vnic_stats);
+
+ return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
+}
+
+int vnic_dev_close(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
+}
+
+int vnic_dev_enable_wait(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
+ return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
+ else
+ return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_disable(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_open(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
+}
+
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *done = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
+int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err, i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = ((u8 *)&a0)[i];
+
+ return 0;
+}
+
+int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+ int broadcast, int promisc, int allmulti)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
+ (multicast ? CMD_PFILTER_MULTICAST : 0) |
+ (broadcast ? CMD_PFILTER_BROADCAST : 0) |
+ (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
+ (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+
+ err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't set packet filter\n");
+
+ return err;
+}
+
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = addr[i];
+
+ err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+ err);
+
+ return err;
+}
+
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = addr[i];
+
+ err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+ err);
+
+ return err;
+}
+
+int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
+ u8 ig_vlan_rewrite_mode)
+{
+ u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
+ int wait = 1000;
+
+ if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
+ return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
+ &a0, &a1, wait);
+ else
+ return 0;
+}
+
+void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state)
+{
+ vdev->in_reset = state;
+}
+
+static inline int vnic_dev_in_reset(struct vnic_dev *vdev)
+{
+ return vdev->in_reset;
+}
+
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int r;
+
+ memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
+ if (!vnic_dev_in_reset(vdev)) {
+ vdev->notify = notify_addr;
+ vdev->notify_pa = notify_pa;
+ }
+
+ a0 = (u64)notify_pa;
+ a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ if (!vnic_dev_in_reset(vdev))
+ vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
+
+ return r;
+}
+
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+ void *notify_addr = NULL;
+ dma_addr_t notify_pa = 0;
+ char name[NAME_MAX];
+ static u32 instance;
+
+ if (vdev->notify || vdev->notify_pa) {
+ return vnic_dev_notify_setcmd(vdev, vdev->notify,
+ vdev->notify_pa, intr);
+ }
+ if (!vnic_dev_in_reset(vdev)) {
+ snprintf((char *)name, sizeof(name),
+ "vnic_notify-%u", instance++);
+ notify_addr = vdev->alloc_consistent(vdev->priv,
+ sizeof(struct vnic_devcmd_notify),
+ &notify_pa, (u8 *)name);
+ if (!notify_addr)
+ return -ENOMEM;
+ }
+
+ return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
+}
+
+int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err;
+
+ a0 = 0; /* paddr = 0 to unset notify buffer */
+ a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ if (!vnic_dev_in_reset(vdev)) {
+ vdev->notify = NULL;
+ vdev->notify_pa = 0;
+ vdev->notify_sz = 0;
+ }
+
+ return err;
+}
+
+int vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+ if (vdev->notify && !vnic_dev_in_reset(vdev)) {
+ vdev->free_consistent(vdev->priv,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ }
+
+ return vnic_dev_notify_unsetcmd(vdev);
+}
+
+static int vnic_dev_notify_ready(struct vnic_dev *vdev)
+{
+ u32 *words;
+ unsigned int nwords = vdev->notify_sz / 4;
+ unsigned int i;
+ u32 csum;
+
+ if (!vdev->notify || !vdev->notify_sz)
+ return 0;
+
+ do {
+ csum = 0;
+ rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
+ words = (u32 *)&vdev->notify_copy;
+ for (i = 1; i < nwords; i++)
+ csum += words[i];
+ } while (csum != words[0]);
+
+ return 1;
+}
+
+int vnic_dev_init(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ int r = 0;
+
+ if (vnic_dev_capable(vdev, CMD_INIT))
+ r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
+ else {
+ vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
+ if (a0 & CMD_INITF_DEFAULT_MAC) {
+ /* Emulate these for old CMD_INIT_v1 which
+ * didn't pass a0 so no CMD_INITF_*.
+ */
+ vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
+ vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ }
+ }
+ return r;
+}
+
+void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
+{
+ /* Default: hardware intr coal timer is in units of 1.5 usecs */
+ vdev->intr_coal_timer_info.mul = 2;
+ vdev->intr_coal_timer_info.div = 3;
+ vdev->intr_coal_timer_info.max_usec =
+ vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
+}
+
+int vnic_dev_link_status(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.link_state;
+}
+
+u32 vnic_dev_port_speed(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.port_speed;
+}
+
+u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
+{
+ return (usec * vdev->intr_coal_timer_info.mul) /
+ vdev->intr_coal_timer_info.div;
+}
+
+u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
+{
+ return (hw_cycles * vdev->intr_coal_timer_info.div) /
+ vdev->intr_coal_timer_info.mul;
+}
+
+u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
+{
+ return vdev->intr_coal_timer_info.max_usec;
+}
+
+int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
+{
+ char name[NAME_MAX];
+ static u32 instance;
+
+ snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
+ vdev->stats = vdev->alloc_consistent(vdev->priv,
+ sizeof(struct vnic_stats),
+ &vdev->stats_pa, (u8 *)name);
+ return vdev->stats == NULL ? -ENOMEM : 0;
+}
+
+void vnic_dev_unregister(struct vnic_dev *vdev)
+{
+ if (vdev) {
+ if (vdev->notify)
+ vdev->free_consistent(vdev->priv,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ if (vdev->stats)
+ vdev->free_consistent(vdev->priv,
+ sizeof(struct vnic_stats),
+ vdev->stats, vdev->stats_pa);
+ if (vdev->fw_info)
+ vdev->free_consistent(vdev->priv,
+ sizeof(struct vnic_devcmd_fw_info),
+ vdev->fw_info, vdev->fw_info_pa);
+ rte_free(vdev);
+ }
+}
+
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+ void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
+ unsigned int num_bars)
+{
+ if (!vdev) {
+ char name[NAME_MAX];
+ snprintf((char *)name, sizeof(name), "%s-vnic",
+ pdev->device.name);
+ vdev = (struct vnic_dev *)rte_zmalloc_socket(name,
+ sizeof(struct vnic_dev),
+ RTE_CACHE_LINE_SIZE,
+ pdev->device.numa_node);
+ if (!vdev)
+ return NULL;
+ }
+
+ vdev->priv = priv;
+ vdev->pdev = pdev;
+
+ if (vnic_dev_discover_res(vdev, bar, num_bars))
+ goto err_out;
+
+ vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+ if (!vdev->devcmd)
+ goto err_out;
+
+ return vdev;
+
+err_out:
+ vnic_dev_unregister(vdev);
+ return NULL;
+}
+
+/*
+ * vnic_dev_classifier: Add/Delete classifier entries
+ * @vdev: vdev of the device
+ * @cmd: CLSF_ADD for Add filter
+ * CLSF_DEL for Delete filter
+ * @entry: In case of ADD filter, the caller passes the RQ number in this
+ * variable.
+ * This function stores the filter_id returned by the
+ * firmware in the same variable before return;
+ *
+ * In case of DEL filter, the caller passes the RQ number. Return
+ * value is irrelevant.
+ * @data: filter data
+ * @action: action data
+ */
+int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
+ struct filter_v2 *data, struct filter_action_v2 *action_v2)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ dma_addr_t tlv_pa;
+ int ret = -EINVAL;
+ struct filter_tlv *tlv, *tlv_va;
+ u64 tlv_size;
+ u32 filter_size, action_size;
+ static unsigned int unique_id;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ enum vnic_devcmd_cmd dev_cmd;
+
+ if (cmd == CLSF_ADD) {
+ dev_cmd = (data->type >= FILTER_DPDK_1) ?
+ CMD_ADD_ADV_FILTER : CMD_ADD_FILTER;
+
+ filter_size = vnic_filter_size(data);
+ action_size = vnic_action_size(action_v2);
+
+ tlv_size = filter_size + action_size +
+ 2*sizeof(struct filter_tlv);
+ snprintf((char *)z_name, sizeof(z_name),
+ "vnic_clsf_%u", unique_id++);
+ tlv_va = vdev->alloc_consistent(vdev->priv,
+ tlv_size, &tlv_pa, (u8 *)z_name);
+ if (!tlv_va)
+ return -ENOMEM;
+ tlv = tlv_va;
+ a0 = tlv_pa;
+ a1 = tlv_size;
+ memset(tlv, 0, tlv_size);
+ tlv->type = CLSF_TLV_FILTER;
+ tlv->length = filter_size;
+ memcpy(&tlv->val, (void *)data, filter_size);
+
+ tlv = (struct filter_tlv *)((char *)tlv +
+ sizeof(struct filter_tlv) +
+ filter_size);
+
+ tlv->type = CLSF_TLV_ACTION;
+ tlv->length = action_size;
+ memcpy(&tlv->val, (void *)action_v2, action_size);
+ ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
+ *entry = (u16)a0;
+ vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
+ } else if (cmd == CLSF_DEL) {
+ a0 = *entry;
+ ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
+ }
+
+ return ret;
+}
+
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
+{
+ u64 a0 = overlay;
+ u64 a1 = config;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
+}
+
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+ u16 vxlan_udp_port_number)
+{
+ u64 a1 = vxlan_udp_port_number;
+ u64 a0 = overlay;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
+}
+
+int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
+{
+ u64 a0 = VIC_FEATURE_VXLAN;
+ u64 a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
+ /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
+ return ret == 0 &&
+ (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
+ (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
+}
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.h
new file mode 100644
index 00000000..270a47bd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_DEV_H_
+#define _VNIC_DEV_H_
+
+#include <stdbool.h>
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+#include "enic_compat.h"
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+
+#ifndef VNIC_PADDR_TARGET
+#define VNIC_PADDR_TARGET 0x0000000000000000ULL
+#endif
+
+#ifndef readq
+static inline u64 readq(void __iomem *reg)
+{
+ return ((u64)readl((char *)reg + 0x4UL) << 32) |
+ (u64)readl(reg);
+}
+
+static inline void writeq(u64 val, void __iomem *reg)
+{
+ writel(val & 0xffffffff, reg);
+ writel((u32)(val >> 32), (char *)reg + 0x4UL);
+}
+#endif
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+enum vnic_dev_intr_mode {
+ VNIC_DEV_INTR_MODE_UNKNOWN,
+ VNIC_DEV_INTR_MODE_INTX,
+ VNIC_DEV_INTR_MODE_MSI,
+ VNIC_DEV_INTR_MODE_MSIX,
+};
+
+struct vnic_dev_bar {
+ void __iomem *vaddr;
+ dma_addr_t bus_addr;
+ unsigned long len;
+};
+
+struct vnic_dev_ring {
+ void *descs;
+ size_t size;
+ dma_addr_t base_addr;
+ size_t base_align;
+ void *descs_unaligned;
+ size_t size_unaligned;
+ dma_addr_t base_addr_unaligned;
+ unsigned int desc_size;
+ unsigned int desc_count;
+ unsigned int desc_avail;
+};
+
+struct vnic_dev_iomap_info {
+ dma_addr_t bus_addr;
+ unsigned long len;
+ void __iomem *vaddr;
+};
+
+struct vnic_dev;
+struct vnic_stats;
+
+void *vnic_dev_priv(struct vnic_dev *vdev);
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+void vnic_register_cbacks(struct vnic_dev *vdev,
+ void *(*alloc_consistent)(void *priv, size_t size,
+ dma_addr_t *dma_handle, u8 *name),
+ void (*free_consistent)(void *priv,
+ size_t size, void *vaddr,
+ dma_addr_t dma_handle));
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index);
+dma_addr_t vnic_dev_get_res_bus_addr(struct vnic_dev *vdev,
+ enum vnic_res_type type, unsigned int index);
+uint8_t vnic_dev_get_res_bar(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+uint32_t vnic_dev_get_res_offset(struct vnic_dev *vdev,
+ enum vnic_res_type type, unsigned int index);
+unsigned long vnic_dev_get_res_type_len(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size, unsigned int socket_id,
+ char *z_name);
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
+ struct vnic_dev_ring *ring);
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait);
+int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *args, int nargs, int wait);
+void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index);
+void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
+void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+ struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
+int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd);
+int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
+ u8 *filter_actions);
+void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
+ bool *weak);
+int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
+ void *value);
+int vnic_dev_stats_clear(struct vnic_dev *vdev);
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
+int vnic_dev_hang_notify(struct vnic_dev *vdev);
+int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+ int broadcast, int promisc, int allmulti);
+int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
+ int multicast, int broadcast, int promisc, int allmulti);
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state);
+int vnic_dev_notify_unset(struct vnic_dev *vdev);
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr);
+int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
+int vnic_dev_link_status(struct vnic_dev *vdev);
+u32 vnic_dev_port_speed(struct vnic_dev *vdev);
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
+u32 vnic_dev_mtu(struct vnic_dev *vdev);
+u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
+u32 vnic_dev_notify_status(struct vnic_dev *vdev);
+u32 vnic_dev_uif(struct vnic_dev *vdev);
+int vnic_dev_close(struct vnic_dev *vdev);
+int vnic_dev_enable(struct vnic_dev *vdev);
+int vnic_dev_enable_wait(struct vnic_dev *vdev);
+int vnic_dev_disable(struct vnic_dev *vdev);
+int vnic_dev_open(struct vnic_dev *vdev, int arg);
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_init(struct vnic_dev *vdev, int arg);
+int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
+int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_deinit(struct vnic_dev *vdev);
+void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev);
+int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode);
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
+u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec);
+u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles);
+u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev);
+void vnic_dev_unregister(struct vnic_dev *vdev);
+int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
+ u8 ig_vlan_rewrite_mode);
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+ void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
+ unsigned int num_bars);
+struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev);
+int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev);
+int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
+int vnic_dev_get_size(void);
+int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op);
+int vnic_dev_perbi(struct vnic_dev *vdev, u64 arg, u32 op);
+u32 vnic_dev_perbi_rebuild_cnt(struct vnic_dev *vdev);
+int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_enable2(struct vnic_dev *vdev, int active);
+int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
+int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
+int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
+ struct filter_v2 *data, struct filter_action_v2 *action_v2);
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev,
+ u8 overlay, u8 config);
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+ u16 vxlan_udp_port_number);
+int vnic_dev_capable_vxlan(struct vnic_dev *vdev);
+#endif /* _VNIC_DEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_devcmd.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_devcmd.h
new file mode 100644
index 00000000..a22d8a76
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_devcmd.h
@@ -0,0 +1,1125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_DEVCMD_H_
+#define _VNIC_DEVCMD_H_
+
+#define _CMD_NBITS 14
+#define _CMD_VTYPEBITS 10
+#define _CMD_FLAGSBITS 6
+#define _CMD_DIRBITS 2
+
+#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
+#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
+#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
+#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
+
+#define _CMD_NSHIFT 0
+#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
+#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
+#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
+
+/*
+ * Direction bits (from host perspective).
+ */
+#define _CMD_DIR_NONE 0U
+#define _CMD_DIR_WRITE 1U
+#define _CMD_DIR_READ 2U
+#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
+
+/*
+ * Flag bits.
+ */
+#define _CMD_FLAGS_NONE 0U
+#define _CMD_FLAGS_NOWAIT 1U
+
+/*
+ * vNIC type bits.
+ */
+#define _CMD_VTYPE_NONE 0U
+#define _CMD_VTYPE_ENET 1U
+#define _CMD_VTYPE_FC 2U
+#define _CMD_VTYPE_SCSI 4U
+#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
+
+/*
+ * Used to create cmds..
+ */
+#define _CMDCF(dir, flags, vtype, nr) \
+ (((dir) << _CMD_DIRSHIFT) | \
+ ((flags) << _CMD_FLAGSSHIFT) | \
+ ((vtype) << _CMD_VTYPESHIFT) | \
+ ((nr) << _CMD_NSHIFT))
+#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
+#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
+
+/*
+ * Used to decode cmds..
+ */
+#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
+#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
+#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
+#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+enum vnic_devcmd_cmd {
+ CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
+
+ /*
+ * mcpu fw info in mem:
+ * in:
+ * (u64)a0=paddr to struct vnic_devcmd_fw_info
+ * action:
+ * Fills in struct vnic_devcmd_fw_info (128 bytes)
+ * note:
+ * An old definition of CMD_MCPU_FW_INFO
+ */
+ CMD_MCPU_FW_INFO_OLD = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+ /*
+ * mcpu fw info in mem:
+ * in:
+ * (u64)a0=paddr to struct vnic_devcmd_fw_info
+ * (u16)a1=size of the structure
+ * out:
+ * (u16)a1=0 for in:a1 = 0,
+ * data size actually written for other values.
+ * action:
+ * Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0,
+ * first in:a1 bytes for 0 < in:a1 <= 132,
+ * 132 bytes for other values of in:a1.
+ * note:
+ * CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1
+ * for source compatibility.
+ */
+ CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1),
+
+ /* dev-specific block member:
+ * in: (u16)a0=offset,(u8)a1=size
+ * out: a0=value
+ */
+ CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
+
+ /* stats clear */
+ CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
+
+ /* stats dump in mem: (u64)a0=paddr to stats area,
+ * (u16)a1=sizeof stats area */
+ CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
+
+ /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
+ CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
+
+ /* set Rx packet filter for all: (u32)a0=filters (see CMD_PFILTER_*) */
+ CMD_PACKET_FILTER_ALL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
+
+ /* hang detection notification */
+ CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
+
+ /* MAC address in (u48)a0 */
+ CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+#define CMD_GET_MAC_ADDR CMD_MAC_ADDR /* some uses are aliased */
+
+ /* add addr from (u48)a0 */
+ CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
+
+ /* del addr from (u48)a0 */
+ CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
+
+ /* add VLAN id in (u16)a0 */
+ CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
+
+ /* del VLAN id in (u16)a0 */
+ CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
+
+ /*
+ * nic_cfg in (u32)a0
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if a1 is valid
+ * (u64) a1= (NIC_CFG bits supported) | (flags << 32)
+ * (flags are CMD_NIC_CFG_CAPF_xxx)
+ */
+ CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+ /*
+ * nic_cfg_chk (same as nic_cfg, but may return error)
+ * in (u32)a0
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if a1 is valid
+ * (u64) a1= (NIC_CFG bits supported) | (flags << 32)
+ * (flags are CMD_NIC_CFG_CAPF_xxx)
+ */
+ CMD_NIC_CFG_CHK = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+ /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
+ CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
+
+ /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
+ CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
+
+ /* initiate softreset */
+ CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
+
+ /* softreset status:
+ * out: a0=0 reset complete, a0=1 reset in progress */
+ CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
+
+ /* set struct vnic_devcmd_notify buffer in mem:
+ * in:
+ * (u64)a0=paddr to notify (set paddr=0 to unset)
+ * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * out:
+ * (u32)a1 = effective size
+ */
+ CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
+
+ /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
+ * (u8)a1=PXENV_UNDI_xxx */
+ CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
+
+ /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
+ CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
+
+ /* open status:
+ * out: a0=0 open complete, a0=1 open in progress */
+ CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
+
+ /* close vnic */
+ CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
+
+ /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+/***** Replaced by CMD_INIT *****/
+ CMD_INIT_v1 = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
+
+ /* variant of CMD_INIT, with provisioning info
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
+
+ /* enable virtual link */
+ CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+ /* enable virtual link, waiting variant. */
+ CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+ /* disable virtual link */
+ CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
+
+ /* stats dump sum of all vnic stats on same uplink in mem:
+ * (u64)a0=paddr
+ * (u16)a1=sizeof stats area */
+ CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
+
+ /* init status:
+ * out: a0=0 init complete, a0=1 init in progress
+ * if a0=0, a1=errno */
+ CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
+
+ /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u32)a1=INT13_CMD_xxx */
+ CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
+
+ /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
+ CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
+
+ /* undo initialize of virtual link */
+ CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+
+ /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+ CMD_INIT = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 35),
+
+ /* check fw capability of a cmd:
+ * in: (u32)a0=cmd
+ * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+ CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
+
+ /* persistent binding info
+ * in: (u64)a0=paddr of arg
+ * (u32)a1=CMD_PERBI_XXX */
+ CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
+
+ /* Interrupt Assert Register functionality
+ * in: (u16)a0=interrupt number to assert
+ */
+ CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+ /* initiate hangreset, like softreset after hang detected */
+ CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+ /* hangreset status:
+ * out: a0=0 reset complete, a0=1 reset in progress */
+ CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+ /*
+ * Set hw ingress packet vlan rewrite mode:
+ * in: (u32)a0=new vlan rewrite mode
+ * out: (u32)a0=old vlan rewrite mode */
+ CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+ /*
+ * in: (u16)a0=bdf of target vnic
+ * (u32)a1=cmd to proxy
+ * a2-a15=args to cmd in a1
+ * out: (u32)a0=status of proxied cmd
+ * a1-a15=out args of proxied cmd */
+ CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+ /*
+ * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+ * or SR-IOV virtual vnic
+ */
+ CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+ /*
+ * For HPP toggle:
+ * adapter-info-get
+ * in: (u64)a0=phsical address of buffer passed in from caller.
+ * (u16)a1=size of buffer specified in a0.
+ * out: (u64)a0=phsical address of buffer passed in from caller.
+ * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+ * 0 if no VIF-CONFIG-INFO TLV was ever received. */
+ CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+
+ /*
+ * INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u32)a1=INT13_CMD_xxx
+ */
+ CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+ /*
+ * Set default vlan:
+ * in: (u16)a0=new default vlan
+ * (u16)a1=zero for overriding vlan with param a0,
+ * non-zero for resetting vlan to the default
+ * out: (u16)a0=old default vlan
+ */
+ CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
+
+ /* init_prov_info2:
+ * Variant of CMD_INIT_PROV_INFO, where it will not try to enable
+ * the vnic until CMD_ENABLE2 is issued.
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
+
+ /* enable2:
+ * (u32)a0=0 ==> standby
+ * =CMD_ENABLE2_ACTIVE ==> active
+ */
+ CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48),
+
+ /*
+ * cmd_status:
+ * Returns the status of the specified command
+ * Input:
+ * a0 = command for which status is being queried.
+ * Possible values are:
+ * CMD_SOFT_RESET
+ * CMD_HANG_RESET
+ * CMD_OPEN
+ * CMD_INIT
+ * CMD_INIT_PROV_INFO
+ * CMD_DEINIT
+ * CMD_INIT_PROV_INFO2
+ * CMD_ENABLE2
+ * Output:
+ * if status == STAT_ERROR
+ * a0 = ERR_ENOTSUPPORTED - status for command in a0 is
+ * not supported
+ * if status == STAT_NONE
+ * a0 = status of the devcmd specified in a0 as follows.
+ * ERR_SUCCESS - command in a0 completed successfully
+ * ERR_EINPROGRESS - command in a0 is still in progress
+ */
+ CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49),
+
+ /*
+ * Returns interrupt coalescing timer conversion factors.
+ * After calling this devcmd, ENIC driver can convert
+ * interrupt coalescing timer in usec into CPU cycles as follows:
+ *
+ * intr_timer_cycles = intr_timer_usec * multiplier / divisor
+ *
+ * Interrupt coalescing timer in usecs can be be converted/obtained
+ * from CPU cycles as follows:
+ *
+ * intr_timer_usec = intr_timer_cycles * divisor / multiplier
+ *
+ * in: none
+ * out: (u32)a0 = multiplier
+ * (u32)a1 = divisor
+ * (u32)a2 = maximum timer value in usec
+ */
+ CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
+
+ /*
+ * ISCSI DUMP API:
+ * in: (u64)a0=paddr of the param or param itself
+ * (u32)a1=ISCSI_CMD_xxx
+ */
+ CMD_ISCSI_DUMP_REQ = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 51),
+
+ /*
+ * ISCSI DUMP STATUS API:
+ * in: (u32)a0=cmd tag
+ * in: (u32)a1=ISCSI_CMD_xxx
+ * out: (u32)a0=cmd status
+ */
+ CMD_ISCSI_DUMP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 52),
+
+ /*
+ * Subvnic migration from MQ <--> VF.
+ * Enable the LIF migration from MQ to VF and vice versa. MQ and VF
+ * indexes are statically bound at the time of initialization.
+ * Based on the direction of migration, the resources of either MQ or
+ * the VF shall be attached to the LIF.
+ * in: (u32)a0=Direction of Migration
+ * 0=> Migrate to VF
+ * 1=> Migrate to MQ
+ * (u32)a1=VF index (MQ index)
+ */
+ CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53),
+
+ /*
+ * Register / Deregister the notification block for MQ subvnics
+ * in:
+ * (u64)a0=paddr to notify (set paddr=0 to unset)
+ * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * out:
+ * (u32)a1 = effective size
+ */
+ CMD_SUBVNIC_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 54),
+
+ /*
+ * Set the predefined mac address as default
+ * in:
+ * (u48)a0=mac addr
+ */
+ CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
+
+ /* Update the provisioning info of the given VIF
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+
+ /*
+ * Initialization for the devcmd2 interface.
+ * in: (u64) a0=host result buffer physical address
+ * in: (u16) a1=number of entries in result buffer
+ */
+ CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57),
+
+ /*
+ * Add a filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if capability query supported
+ * (u64) a1= MAX filter type supported
+ */
+ CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
+
+ /*
+ * Delete a filter.
+ * in: (u32) a0=filter identifier
+ */
+ CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59),
+
+ /*
+ * Enable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60),
+
+ /*
+ * Disable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61),
+
+ /*
+ * Stats dump Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u64) a1=host buffer addr for status dump
+ * (u32) a2=length of the buffer
+ */
+ CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62),
+
+ /*
+ * Clear stats for Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ */
+ CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
+
+ /*
+ * UEFI BOOT API: (u64)a0= UEFI FLS_CMD_xxx
+ * (ui64)a1= paddr for the info buffer
+ */
+ CMD_FC_REQ = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 64),
+
+ /*
+ * Return the iSCSI config details required by the EFI Option ROM
+ * in: (u32) a0=0 Get Boot Info for PXE eNIC as per pxe_boot_config_t
+ * a0=1 Get Boot info for iSCSI enic as per
+ * iscsi_boot_efi_cfg_t
+ * in: (u64) a1=Host address where iSCSI config info is returned
+ */
+ CMD_VNIC_BOOT_CONFIG_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 65),
+
+ /*
+ * Create a Queue Pair (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ * (u32) a1 = Remote QP
+ * (u32) a2 = RDMA-RQ
+ * (u16) a3 = RQ Res Group
+ * (u16) a4 = SQ Res Group
+ * (u32) a5 = Protection Domain
+ * (u64) a6 = Remote MAC
+ * (u32) a7 = start PSN
+ * (u16) a8 = MSS
+ * (u32) a9 = protocol version
+ */
+ CMD_RDMA_QP_CREATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 66),
+
+ /*
+ * Delete a Queue Pair (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ */
+ CMD_RDMA_QP_DELETE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 67),
+
+ /*
+ * Retrieve a Queue Pair's status information (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ * (u64) a1 = host buffer addr for QP status struct
+ * (u32) a2 = length of the buffer
+ */
+ CMD_RDMA_QP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 68),
+
+ /*
+ * Use this devcmd for agreeing on the highest common version supported
+ * by both driver and fw for by features who need such a facility.
+ * in: (u64) a0 = feature (driver requests for the supported versions
+ * on this feature)
+ * out: (u64) a0 = bitmap of all supported versions for that feature
+ */
+ CMD_GET_SUPP_FEATURE_VER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 69),
+
+ /*
+ * Initialize the RDMA notification work queue
+ * in: (u64) a0 = host buffer address
+ * in: (u16) a1 = number of entries in buffer
+ * in: (u16) a2 = resource group number
+ * in: (u16) a3 = CQ number to post completion
+ */
+ CMD_RDMA_INIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 70),
+
+ /*
+ * De-init the RDMA notification work queue
+ * in: (u64) a0=resource group number
+ */
+ CMD_RDMA_DEINIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 71),
+
+ /*
+ * Control (Enable/Disable) overlay offloads on the given vnic
+ * in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE
+ * a0 = OVERLAY_FEATURE_VXLAN : VxLAN
+ * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or
+ * a1 = OVERLAY_OFFLOAD_DISABLE : Disable or
+ * a1 = OVERLAY_OFFLOAD_ENABLE_V2 : Enable with version 2
+ */
+ CMD_OVERLAY_OFFLOAD_CTRL =
+ _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
+
+ /*
+ * Configuration of overlay offloads feature on a given vNIC
+ * in: (u8) a0 = OVERLAY_CFG_VXLAN_PORT_UPDATE : VxLAN
+ * in: (u16) a1 = unsigned short int port information
+ */
+ CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Return the configured name for the device
+ * in: (u64) a0=Host address where the name is copied
+ * (u32) a1=Size of the buffer
+ */
+ CMD_GET_CONFIG_NAME = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 74),
+
+ /*
+ * Enable group interrupt for the VF
+ * in: (u32) a0 = GRPINTR_ENABLE : enable
+ * a0 = GRPINTR_DISABLE : disable
+ * a0 = GRPINTR_UPD_VECT: update group vector addr
+ * in: (u32) a1 = interrupt group count
+ * in: (u64) a2 = Start of host buffer address for DMAing group
+ * vector bitmap
+ * in: (u64) a3 = Stride between group vectors
+ */
+ CMD_CONFIG_GRPINTR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 75),
+
+ /*
+ * Set cq arrary base and size in a list of consective wqs and
+ * rqs for a device
+ * in: (u16) a0 = the wq relative index in the device.
+ * -1 indicates skipping wq configuration
+ * in: (u16) a1 = the wcq relative index in the device
+ * in: (u16) a2 = the rq relative index in the device
+ * -1 indicates skipping rq configuration
+ * in: (u16) a3 = the rcq relative index in the device
+ */
+ CMD_CONFIG_CQ_ARRAY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 76),
+
+ /*
+ * Add an advanced filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ *
+ * Capability query:
+ * in: (u64) a1= supported filter capability exchange modes
+ * out: (u64) a0= 1 if capability query supported
+ * if (u64) a1 = 0: a1 = MAX filter type supported
+ * if (u64) a1 & FILTER_CAP_MODE_V1_FLAG:
+ * a1 = bitmask of supported filters
+ * a2 = FILTER_CAP_MODE_V1
+ * a3 = bitmask of supported actions
+ */
+ CMD_ADD_ADV_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 77),
+};
+
+/* Modes for exchanging advanced filter capabilities. The modes supported by
+ * the driver are passed in the CMD_ADD_ADV_FILTER capability command and the
+ * mode selected is returned.
+ * V0: the maximum filter type supported is returned
+ * V1: bitmasks of supported filters and actions are returned
+ */
+enum filter_cap_mode {
+ FILTER_CAP_MODE_V0 = 0, /* Must always be 0 for legacy drivers */
+ FILTER_CAP_MODE_V1 = 1,
+};
+#define FILTER_CAP_MODE_V1_FLAG (1 << FILTER_CAP_MODE_V1)
+
+/* CMD_ENABLE2 flags */
+#define CMD_ENABLE2_STANDBY 0x0
+#define CMD_ENABLE2_ACTIVE 0x1
+
+/* flags for CMD_OPEN */
+#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
+#define CMD_OPENF_IG_DESCCACHE 0x2 /* Do not flush IG DESC cache */
+
+/* flags for CMD_INIT */
+#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
+
+/* flags for CMD_NIC_CFG */
+#define CMD_NIC_CFG_CAPF_UDP_WEAK (1ULL << 0) /* Bodega-style UDP RSS */
+
+/* flags for CMD_PACKET_FILTER */
+#define CMD_PFILTER_DIRECTED 0x01
+#define CMD_PFILTER_MULTICAST 0x02
+#define CMD_PFILTER_BROADCAST 0x04
+#define CMD_PFILTER_PROMISCUOUS 0x08
+#define CMD_PFILTER_ALL_MULTICAST 0x10
+
+/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */
+#define CMD_QP_RQWQ 0x0
+
+/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
+#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0
+#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1
+#define IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN 2
+#define IG_VLAN_REWRITE_MODE_PASS_THRU 3
+
+enum vnic_devcmd_status {
+ STAT_NONE = 0,
+ STAT_BUSY = 1 << 0, /* cmd in progress */
+ STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
+ STAT_FAILOVER = 1 << 2, /* always set on vnics in pci standby state
+ * if seen a failover to the standby happened
+ */
+};
+
+enum vnic_devcmd_error {
+ ERR_SUCCESS = 0,
+ ERR_EINVAL = 1,
+ ERR_EFAULT = 2,
+ ERR_EPERM = 3,
+ ERR_EBUSY = 4,
+ ERR_ECMDUNKNOWN = 5,
+ ERR_EBADSTATE = 6,
+ ERR_ENOMEM = 7,
+ ERR_ETIMEDOUT = 8,
+ ERR_ELINKDOWN = 9,
+ ERR_EMAXRES = 10,
+ ERR_ENOTSUPPORTED = 11,
+ ERR_EINPROGRESS = 12,
+ ERR_MAX
+};
+
+/*
+ * note: hw_version and asic_rev refer to the same thing,
+ * but have different formats. hw_version is
+ * a 32-byte string (e.g. "A2") and asic_rev is
+ * a 16-bit integer (e.g. 0xA2).
+ */
+struct vnic_devcmd_fw_info {
+ char fw_version[32];
+ char fw_build[32];
+ char hw_version[32];
+ char hw_serial_number[32];
+ u16 asic_type;
+ u16 asic_rev;
+};
+
+enum fwinfo_asic_type {
+ FWINFO_ASIC_TYPE_UNKNOWN,
+ FWINFO_ASIC_TYPE_PALO,
+ FWINFO_ASIC_TYPE_SERENO,
+ FWINFO_ASIC_TYPE_CRUZ,
+};
+
+struct vnic_devcmd_notify {
+ u32 csum; /* checksum over following words */
+
+ u32 link_state; /* link up == 1 */
+ u32 port_speed; /* effective port speed (rate limit) */
+ u32 mtu; /* MTU */
+ u32 msglvl; /* requested driver msg lvl */
+ u32 uif; /* uplink interface */
+ u32 status; /* status bits (see VNIC_STF_*) */
+ u32 error; /* error code (see ERR_*) for first ERR */
+ u32 link_down_cnt; /* running count of link down transitions */
+ u32 perbi_rebuild_cnt; /* running count of perbi rebuilds */
+};
+#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
+#define VNIC_STF_STD_PAUSE 0x0002 /* standard link-level pause on */
+#define VNIC_STF_PFC_PAUSE 0x0004 /* priority flow control pause on */
+/* all supported status flags */
+#define VNIC_STF_ALL (VNIC_STF_FATAL_ERR |\
+ VNIC_STF_STD_PAUSE |\
+ VNIC_STF_PFC_PAUSE |\
+ 0)
+
+struct vnic_devcmd_provinfo {
+ u8 oui[3];
+ u8 type;
+ u8 data[0];
+};
+
+/*
+ * These are used in flags field of different filters to denote
+ * valid fields used.
+ */
+#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
+
+#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
+
+#define FILTER_FIELDS_USNIC (FILTER_FIELD_USNIC_VLAN | \
+ FILTER_FIELD_USNIC_ETHTYPE | \
+ FILTER_FIELD_USNIC_PROTO | \
+ FILTER_FIELD_USNIC_ID)
+
+struct filter_usnic_id {
+ u32 flags;
+ u16 vlan;
+ u16 ethtype;
+ u8 proto_version;
+ u32 usnic_id;
+} __attribute__((packed));
+
+#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
+
+#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_5TUP_PROTO | \
+ FILTER_FIELD_5TUP_SRC_AD | \
+ FILTER_FIELD_5TUP_DST_AD | \
+ FILTER_FIELD_5TUP_SRC_PT | \
+ FILTER_FIELD_5TUP_DST_PT)
+
+/* Enums for the protocol field. */
+enum protocol_e {
+ PROTO_UDP = 0,
+ PROTO_TCP = 1,
+ PROTO_IPV4 = 2,
+ PROTO_IPV6 = 3
+};
+
+struct filter_ipv4_5tuple {
+ u32 flags;
+ u32 protocol;
+ u32 src_addr;
+ u32 dst_addr;
+ u16 src_port;
+ u16 dst_port;
+} __attribute__((packed));
+
+#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
+
+#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VMQ_VLAN | \
+ FILTER_FIELD_VMQ_MAC)
+
+#define FILTER_FIELDS_NVGRE FILTER_FIELD_VMQ_MAC
+
+struct filter_mac_vlan {
+ u32 flags;
+ u16 vlan;
+ u8 mac_addr[6];
+} __attribute__((packed));
+
+#define FILTER_FIELD_VLAN_IP_3TUP_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_VLAN_IP_3TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_VLAN_IP_3TUP_DST_PT FILTER_FIELD_VALID(5)
+
+#define FILTER_FIELDS_VLAN_IP_3TUP (FILTER_FIELD_VLAN_IP_3TUP_VLAN | \
+ FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO | \
+ FILTER_FIELD_VLAN_IP_3TUP_DST_AD | \
+ FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO | \
+ FILTER_FIELD_VLAN_IP_3TUP_DST_PT)
+
+struct filter_vlan_ip_3tuple {
+ u32 flags;
+ u16 vlan;
+ u16 l3_protocol;
+ union {
+ u32 dst_addr_v4;
+ u8 dst_addr_v6[16];
+ } u;
+ u32 l4_protocol;
+ u16 dst_port;
+} __attribute__((packed));
+
+#define FILTER_GENERIC_1_BYTES 64
+
+enum filter_generic_1_layer {
+ FILTER_GENERIC_1_L2,
+ FILTER_GENERIC_1_L3,
+ FILTER_GENERIC_1_L4,
+ FILTER_GENERIC_1_L5,
+ FILTER_GENERIC_1_NUM_LAYERS
+};
+
+#define FILTER_GENERIC_1_IPV4 (1 << 0)
+#define FILTER_GENERIC_1_IPV6 (1 << 1)
+#define FILTER_GENERIC_1_UDP (1 << 2)
+#define FILTER_GENERIC_1_TCP (1 << 3)
+#define FILTER_GENERIC_1_TCP_OR_UDP (1 << 4)
+#define FILTER_GENERIC_1_IP4SUM_OK (1 << 5)
+#define FILTER_GENERIC_1_L4SUM_OK (1 << 6)
+#define FILTER_GENERIC_1_IPFRAG (1 << 7)
+
+#define FILTER_GENERIC_1_KEY_LEN 64
+
+/*
+ * Version 1 of generic filter specification
+ * position is only 16 bits, reserving positions > 64k to be used by firmware
+ */
+struct filter_generic_1 {
+ u16 position; /* lower position comes first */
+ u32 mask_flags;
+ u32 val_flags;
+ u16 mask_vlan;
+ u16 val_vlan;
+ struct {
+ u8 mask[FILTER_GENERIC_1_KEY_LEN]; /* 0 bit means "don't care"*/
+ u8 val[FILTER_GENERIC_1_KEY_LEN];
+ } __attribute__((packed)) layer[FILTER_GENERIC_1_NUM_LAYERS];
+} __attribute__((packed));
+
+/* Specifies the filter_action type. */
+enum {
+ FILTER_ACTION_RQ_STEERING = 0,
+ FILTER_ACTION_V2 = 1,
+ FILTER_ACTION_MAX
+};
+
+struct filter_action {
+ u32 type;
+ union {
+ u32 rq_idx;
+ } u;
+} __attribute__((packed));
+
+#define FILTER_ACTION_RQ_STEERING_FLAG (1 << 0)
+#define FILTER_ACTION_FILTER_ID_FLAG (1 << 1)
+#define FILTER_ACTION_DROP_FLAG (1 << 2)
+#define FILTER_ACTION_V2_ALL (FILTER_ACTION_RQ_STEERING_FLAG \
+ | FILTER_ACTION_DROP_FLAG \
+ | FILTER_ACTION_FILTER_ID_FLAG)
+
+/* Version 2 of filter action must be a strict extension of struct filter_action
+ * where the first fields exactly match in size and meaning.
+ */
+struct filter_action_v2 {
+ u32 type;
+ u32 rq_idx;
+ u32 flags; /* use FILTER_ACTION_XXX_FLAG defines */
+ u16 filter_id;
+ u_int8_t reserved[32]; /* for future expansion */
+} __attribute__((packed));
+
+/* Specifies the filter type. */
+enum filter_type {
+ FILTER_USNIC_ID = 0,
+ FILTER_IPV4_5TUPLE = 1,
+ FILTER_MAC_VLAN = 2,
+ FILTER_VLAN_IP_3TUPLE = 3,
+ FILTER_NVGRE_VMQ = 4,
+ FILTER_USNIC_IP = 5,
+ FILTER_DPDK_1 = 6,
+ FILTER_MAX
+};
+
+#define FILTER_USNIC_ID_FLAG (1 << FILTER_USNIC_ID)
+#define FILTER_IPV4_5TUPLE_FLAG (1 << FILTER_IPV4_5TUPLE)
+#define FILTER_MAC_VLAN_FLAG (1 << FILTER_MAC_VLAN)
+#define FILTER_VLAN_IP_3TUPLE_FLAG (1 << FILTER_VLAN_IP_3TUPLE)
+#define FILTER_NVGRE_VMQ_FLAG (1 << FILTER_NVGRE_VMQ)
+#define FILTER_USNIC_IP_FLAG (1 << FILTER_USNIC_IP)
+#define FILTER_DPDK_1_FLAG (1 << FILTER_DPDK_1)
+#define FILTER_V1_ALL (FILTER_USNIC_ID_FLAG | \
+ FILTER_IPV4_5TUPLE_FLAG | \
+ FILTER_MAC_VLAN_FLAG | \
+ FILTER_VLAN_IP_3TUPLE_FLAG | \
+ FILTER_NVGRE_VMQ_FLAG | \
+ FILTER_USNIC_IP_FLAG | \
+ FILTER_DPDK_1_FLAG)
+
+struct filter {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ struct filter_vlan_ip_3tuple vlan_3tuple;
+ } u;
+} __attribute__((packed));
+
+/*
+ * This is a strict superset of "struct filter" and exists only
+ * because many drivers use "sizeof (struct filter)" in deciding TLV size.
+ * This new, larger struct filter would cause any code that uses that method
+ * to not work with older firmware, so we add filter_v2 to hold the
+ * new filter types. Drivers should use vnic_filter_size() to determine
+ * the TLV size instead of sizeof (struct fiter_v2) to guard against future
+ * growth.
+ */
+struct filter_v2 {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ struct filter_vlan_ip_3tuple vlan_3tuple;
+ struct filter_generic_1 generic_1;
+ } u;
+} __attribute__((packed));
+
+enum {
+ CLSF_TLV_FILTER = 0,
+ CLSF_TLV_ACTION = 1,
+};
+
+struct filter_tlv {
+ u_int32_t type;
+ u_int32_t length;
+ u_int32_t val[0];
+};
+
+/* Data for CMD_ADD_FILTER is 2 TLV and filter + action structs */
+#define FILTER_MAX_BUF_SIZE 100
+#define FILTER_V2_MAX_BUF_SIZE (sizeof(struct filter_v2) + \
+ sizeof(struct filter_action_v2) + \
+ (2 * sizeof(struct filter_tlv)))
+
+/*
+ * Compute actual structure size given filter type. To be "future-proof,"
+ * drivers should use this instead of "sizeof (struct filter_v2)" when
+ * computing length for TLV.
+ */
+static inline u_int32_t
+vnic_filter_size(struct filter_v2 *fp)
+{
+ u_int32_t size;
+
+ switch (fp->type) {
+ case FILTER_USNIC_ID:
+ size = sizeof(fp->u.usnic);
+ break;
+ case FILTER_IPV4_5TUPLE:
+ size = sizeof(fp->u.ipv4);
+ break;
+ case FILTER_MAC_VLAN:
+ case FILTER_NVGRE_VMQ:
+ size = sizeof(fp->u.mac_vlan);
+ break;
+ case FILTER_VLAN_IP_3TUPLE:
+ size = sizeof(fp->u.vlan_3tuple);
+ break;
+ case FILTER_USNIC_IP:
+ case FILTER_DPDK_1:
+ size = sizeof(fp->u.generic_1);
+ break;
+ default:
+ size = sizeof(fp->u);
+ break;
+ }
+ size += sizeof(fp->type);
+ return size;
+}
+
+
+enum {
+ CLSF_ADD = 0,
+ CLSF_DEL = 1,
+};
+
+/*
+ * Get the action structure size given action type. To be "future-proof,"
+ * drivers should use this instead of "sizeof (struct filter_action_v2)"
+ * when computing length for TLV.
+ */
+static inline u_int32_t
+vnic_action_size(struct filter_action_v2 *fap)
+{
+ u_int32_t size;
+
+ switch (fap->type) {
+ case FILTER_ACTION_RQ_STEERING:
+ size = sizeof(struct filter_action);
+ break;
+ case FILTER_ACTION_V2:
+ size = sizeof(struct filter_action_v2);
+ break;
+ default:
+ size = sizeof(struct filter_action);
+ break;
+ }
+ return size;
+}
+
+/*
+ * Writing cmd register causes STAT_BUSY to get set in status register.
+ * When cmd completes, STAT_BUSY will be cleared.
+ *
+ * If cmd completed successfully STAT_ERROR will be clear
+ * and args registers contain cmd-specific results.
+ *
+ * If cmd error, STAT_ERROR will be set and args[0] contains error code.
+ *
+ * status register is read-only. While STAT_BUSY is set,
+ * all other register contents are read-only.
+ */
+
+/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
+#define VNIC_DEVCMD_NARGS 15
+struct vnic_devcmd {
+ u32 status; /* RO */
+ u32 cmd; /* RW */
+ u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
+};
+
+/*
+ * Version 2 of the interface.
+ *
+ * Some things are carried over, notably the vnic_devcmd_cmd enum.
+ */
+
+/*
+ * Flags for vnic_devcmd2.flags
+ */
+
+#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS
+struct vnic_devcmd2 {
+ u16 pad;
+ u16 flags;
+ u32 cmd; /* same command #defines as original */
+ u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+ u64 results[VNIC_DEVCMD2_NRESULTS];
+ u32 pad;
+ u16 completed_index; /* into copy WQ */
+ u8 error; /* same error codes as original */
+ u8 color; /* 0 or 1 as with completion queues */
+};
+
+#define DEVCMD2_RING_SIZE 32
+#define DEVCMD2_DESC_SIZE 128
+
+#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1)
+
+/* Overlay related definitions */
+
+/*
+ * This enum lists the flag associated with each of the overlay features
+ */
+typedef enum {
+ OVERLAY_FEATURE_NVGRE = 1,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_FEATURE_MAX,
+} overlay_feature_t;
+
+#define OVERLAY_OFFLOAD_ENABLE 0
+#define OVERLAY_OFFLOAD_DISABLE 1
+#define OVERLAY_OFFLOAD_ENABLE_V2 2
+
+#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0
+
+/*
+ * Use this enum to get the supported versions for each of these features
+ * If you need to use the devcmd_get_supported_feature_version(), add
+ * the new feature into this enum and install function handler in devcmd.c
+ */
+typedef enum {
+ VIC_FEATURE_VXLAN,
+ VIC_FEATURE_RDMA,
+ VIC_FEATURE_MAX,
+} vic_feature_t;
+
+/*
+ * These flags are used in args[1] of devcmd CMD_GET_SUPP_FEATURE_VER
+ * to indicate the host driver about the VxLAN and Multi WQ features
+ * supported
+ */
+#define FEATURE_VXLAN_IPV6_INNER (1 << 0)
+#define FEATURE_VXLAN_IPV6_OUTER (1 << 1)
+#define FEATURE_VXLAN_MULTI_WQ (1 << 2)
+
+#define FEATURE_VXLAN_IPV6 (FEATURE_VXLAN_IPV6_INNER | \
+ FEATURE_VXLAN_IPV6_OUTER)
+
+/*
+ * CMD_CONFIG_GRPINTR subcommands
+ */
+typedef enum {
+ GRPINTR_ENABLE = 1,
+ GRPINTR_DISABLE,
+ GRPINTR_UPD_VECT,
+} grpintr_subcmd_t;
+
+#endif /* _VNIC_DEVCMD_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_enet.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_enet.h
new file mode 100644
index 00000000..901f3b46
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_enet.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_ENIC_H_
+#define _VNIC_ENIC_H_
+
+/* Hardware intr coalesce timer is in units of 1.5us */
+#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2 / 3)
+#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3 / 2)
+
+/* Device-specific region: enet configuration */
+struct vnic_enet_config {
+ u32 flags;
+ u32 wq_desc_count;
+ u32 rq_desc_count;
+ u16 mtu;
+ u16 intr_timer_deprecated;
+ u8 intr_timer_type;
+ u8 intr_mode;
+ char devname[16];
+ u32 intr_timer_usec;
+ u16 loop_tag;
+ u16 vf_rq_count;
+ u16 num_arfs;
+ u64 mem_paddr;
+ u16 rdma_qp_id;
+ u16 rdma_qp_count;
+ u16 rdma_resgrp;
+ u32 rdma_mr_id;
+ u32 rdma_mr_count;
+ u32 max_pkt_size;
+};
+
+#define VENETF_TSO 0x1 /* TSO enabled */
+#define VENETF_LRO 0x2 /* LRO enabled */
+#define VENETF_RXCSUM 0x4 /* RX csum enabled */
+#define VENETF_TXCSUM 0x8 /* TX csum enabled */
+#define VENETF_RSS 0x10 /* RSS enabled */
+#define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */
+#define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */
+#define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */
+#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */
+#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
+#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
+#define VENETF_LOOP 0x800 /* Loopback enabled */
+#define VENETF_FAILOVER 0x1000 /* Fabric failover enabled */
+#define VENETF_USPACE_NIC 0x2000 /* vHPC enabled */
+#define VENETF_VMQ 0x4000 /* VMQ enabled */
+#define VENETF_ARFS 0x8000 /* ARFS enabled */
+#define VENETF_VXLAN 0x10000 /* VxLAN offload */
+#define VENETF_NVGRE 0x20000 /* NVGRE offload */
+#define VENETF_GRPINTR 0x40000 /* group interrupt */
+#define VENETF_NICSWITCH 0x80000 /* NICSWITCH enabled */
+#define VENETF_RSSHASH_UDPIPV4 0x100000 /* Hash on UDP + IPv4 fields */
+#define VENETF_RSSHASH_UDPIPV6 0x200000 /* Hash on UDP + IPv6 fields */
+
+#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */
+#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */
+
+#define VENET_INTR_MODE_ANY 0 /* Try MSI-X, then MSI, then INTx */
+#define VENET_INTR_MODE_MSI 1 /* Try MSI then INTx */
+#define VENET_INTR_MODE_INTX 2 /* Try INTx only */
+
+#endif /* _VNIC_ENIC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_intr.c b/src/spdk/dpdk/drivers/net/enic/base/vnic_intr.c
new file mode 100644
index 00000000..4487c7c4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_intr.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+
+void vnic_intr_free(struct vnic_intr *intr)
+{
+ intr->ctrl = NULL;
+}
+
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index)
+{
+ intr->index = index;
+ intr->vdev = vdev;
+
+ intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
+ if (!intr->ctrl) {
+ pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion)
+{
+ vnic_intr_coalescing_timer_set(intr, coalescing_timer);
+ iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
+ iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
+ iowrite32(0, &intr->ctrl->int_credits);
+}
+
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ u32 coalescing_timer)
+{
+ iowrite32(vnic_dev_intr_coal_timer_usec_to_hw(intr->vdev,
+ coalescing_timer), &intr->ctrl->coalescing_timer);
+}
+
+void vnic_intr_clean(struct vnic_intr *intr)
+{
+ iowrite32(0, &intr->ctrl->int_credits);
+}
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_intr.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_intr.h
new file mode 100644
index 00000000..13637385
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_intr.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_INTR_H_
+#define _VNIC_INTR_H_
+
+
+#include "vnic_dev.h"
+
+#define VNIC_INTR_TIMER_TYPE_ABS 0
+#define VNIC_INTR_TIMER_TYPE_QUIET 1
+
+/* Interrupt control */
+struct vnic_intr_ctrl {
+ u32 coalescing_timer; /* 0x00 */
+ u32 pad0;
+ u32 coalescing_value; /* 0x08 */
+ u32 pad1;
+ u32 coalescing_type; /* 0x10 */
+ u32 pad2;
+ u32 mask_on_assertion; /* 0x18 */
+ u32 pad3;
+ u32 mask; /* 0x20 */
+ u32 pad4;
+ u32 int_credits; /* 0x28 */
+ u32 pad5;
+ u32 int_credit_return; /* 0x30 */
+ u32 pad6;
+};
+
+struct vnic_intr {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
+};
+
+static inline void vnic_intr_unmask(struct vnic_intr *intr)
+{
+ iowrite32(0, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_mask(struct vnic_intr *intr)
+{
+ iowrite32(1, &intr->ctrl->mask);
+}
+
+static inline int vnic_intr_masked(struct vnic_intr *intr)
+{
+ return ioread32(&intr->ctrl->mask);
+}
+
+static inline void vnic_intr_return_credits(struct vnic_intr *intr,
+ unsigned int credits, int unmask, int reset_timer)
+{
+#define VNIC_INTR_UNMASK_SHIFT 16
+#define VNIC_INTR_RESET_TIMER_SHIFT 17
+
+ u32 int_credit_return = (credits & 0xffff) |
+ (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
+ (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
+
+ iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
+}
+
+static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
+{
+ return ioread32(&intr->ctrl->int_credits);
+}
+
+static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
+{
+ unsigned int credits = vnic_intr_credits(intr);
+ int unmask = 1;
+ int reset_timer = 1;
+
+ vnic_intr_return_credits(intr, credits, unmask, reset_timer);
+}
+
+static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
+{
+ /* read PBA without clearing */
+ return ioread32(legacy_pba);
+}
+
+void vnic_intr_free(struct vnic_intr *intr);
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index);
+void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ u32 coalescing_timer);
+void vnic_intr_clean(struct vnic_intr *intr);
+
+#endif /* _VNIC_INTR_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_nic.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_nic.h
new file mode 100644
index 00000000..16040852
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_nic.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_NIC_H_
+#define _VNIC_NIC_H_
+
+#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
+#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
+#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
+#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
+#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
+#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
+#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
+#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
+#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
+#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
+#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
+#define NIC_CFG_RSS_ENABLE (1UL << 22)
+#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
+#define NIC_CFG_RSS_ENABLE_SHIFT 22
+#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
+#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
+#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
+#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
+#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
+#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
+
+#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 (1 << 0)
+#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
+#define NIC_CFG_RSS_HASH_TYPE_RSVD1 (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_RSVD2 (1 << 6)
+#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7)
+
+static inline void vnic_set_nic_cfg(u32 *nic_cfg,
+ u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu,
+ u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en)
+{
+ *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
+ ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
+ << NIC_CFG_RSS_HASH_TYPE_SHIFT) |
+ ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
+ << NIC_CFG_RSS_HASH_BITS_SHIFT) |
+ ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
+ << NIC_CFG_RSS_BASE_CPU_SHIFT) |
+ ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
+ << NIC_CFG_RSS_ENABLE_SHIFT) |
+ ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
+ << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
+ ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
+ << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
+}
+
+#endif /* _VNIC_NIC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_resource.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_resource.h
new file mode 100644
index 00000000..184bfa74
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_resource.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_RESOURCE_H_
+#define _VNIC_RESOURCE_H_
+
+#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
+#define VNIC_RES_VERSION 0x00000000L
+#define MGMTVNIC_MAGIC 0x544d474dL /* 'MGMT' */
+#define MGMTVNIC_VERSION 0x00000000L
+
+/* The MAC address assigned to the CFG vNIC is fixed. */
+#define MGMTVNIC_MAC { 0x02, 0x00, 0x54, 0x4d, 0x47, 0x4d }
+
+/* vNIC resource types */
+enum vnic_res_type {
+ RES_TYPE_EOL, /* End-of-list */
+ RES_TYPE_WQ, /* Work queues */
+ RES_TYPE_RQ, /* Receive queues */
+ RES_TYPE_CQ, /* Completion queues */
+ RES_TYPE_MEM, /* Window to dev memory */
+ RES_TYPE_NIC_CFG, /* Enet NIC config registers */
+ RES_TYPE_RSS_KEY, /* Enet RSS secret key */
+ RES_TYPE_RSS_CPU, /* Enet RSS indirection table */
+ RES_TYPE_TX_STATS, /* Netblock Tx statistic regs */
+ RES_TYPE_RX_STATS, /* Netblock Rx statistic regs */
+ RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
+ RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
+ RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
+ RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
+ RES_TYPE_DEBUG, /* Debug-only info */
+ RES_TYPE_DEV, /* Device-specific region */
+ RES_TYPE_DEVCMD, /* Device command region */
+ RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
+ RES_TYPE_SUBVNIC, /* subvnic resource type */
+ RES_TYPE_MQ_WQ, /* MQ Work queues */
+ RES_TYPE_MQ_RQ, /* MQ Receive queues */
+ RES_TYPE_MQ_CQ, /* MQ Completion queues */
+ RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */
+ RES_TYPE_DEVCMD2, /* Device control region */
+ RES_TYPE_MAX, /* Count of resource types */
+};
+
+struct vnic_resource_header {
+ u32 magic;
+ u32 version;
+};
+
+struct mgmt_barmap_hdr {
+ u32 magic; /* magic number */
+ u32 version; /* header format version */
+ u16 lif; /* loopback lif for mgmt frames */
+ u16 pci_slot; /* installed pci slot */
+ char serial[16]; /* card serial number */
+};
+
+struct vnic_resource {
+ u8 type;
+ u8 bar;
+ u8 pad[2];
+ u32 bar_offset;
+ u32 count;
+};
+
+#endif /* _VNIC_RESOURCE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_rq.c b/src/spdk/dpdk/drivers/net/enic/base/vnic_rq.c
new file mode 100644
index 00000000..72fcef55
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_rq.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "vnic_dev.h"
+#include "vnic_rq.h"
+
+void vnic_rq_free(struct vnic_rq *rq)
+{
+ struct vnic_dev *vdev;
+
+ vdev = rq->vdev;
+
+ vnic_dev_free_desc_ring(vdev, &rq->ring);
+
+ rq->ctrl = NULL;
+}
+
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int rc;
+ char res_name[NAME_MAX];
+ static int instance;
+
+ rq->index = index;
+ rq->vdev = vdev;
+
+ rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
+ if (!rq->ctrl) {
+ pr_err("Failed to hook RQ[%u] resource\n", index);
+ return -EINVAL;
+ }
+
+ vnic_rq_disable(rq);
+
+ snprintf(res_name, sizeof(res_name), "%d-rq-%u", instance++, index);
+ rc = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size,
+ rq->socket_id, res_name);
+ return rc;
+}
+
+void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+ unsigned int count = rq->ring.desc_count;
+
+ paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &rq->ctrl->ring_base);
+ iowrite32(count, &rq->ctrl->ring_size);
+ iowrite32(cq_index, &rq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
+ iowrite32(0, &rq->ctrl->error_status);
+ iowrite32(fetch_index, &rq->ctrl->fetch_index);
+ iowrite32(posted_index, &rq->ctrl->posted_index);
+ if (rq->data_queue_enable)
+ iowrite32(((1 << 10) | rq->data_queue_idx),
+ &rq->ctrl->data_ring);
+ else
+ iowrite32(0, &rq->ctrl->data_ring);
+}
+
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u32 fetch_index = 0;
+
+ /* Use current fetch_index as the ring starting point */
+ fetch_index = ioread32(&rq->ctrl->fetch_index);
+
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: reset fetch_index */
+ fetch_index = 0;
+ }
+
+ vnic_rq_init_start(rq, cq_index,
+ fetch_index, fetch_index,
+ error_interrupt_enable,
+ error_interrupt_offset);
+ rq->rxst_idx = 0;
+ rq->tot_pkts = 0;
+ rq->pkt_first_seg = NULL;
+ rq->pkt_last_seg = NULL;
+}
+
+unsigned int vnic_rq_error_status(struct vnic_rq *rq)
+{
+ return ioread32(&rq->ctrl->error_status);
+}
+
+void vnic_rq_enable(struct vnic_rq *rq)
+{
+ iowrite32(1, &rq->ctrl->enable);
+}
+
+int vnic_rq_disable(struct vnic_rq *rq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &rq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 1000; wait++) {
+ if (!(ioread32(&rq->ctrl->running)))
+ return 0;
+ udelay(10);
+ }
+
+ pr_err("Failed to disable RQ[%d]\n", rq->index);
+
+ return -ETIMEDOUT;
+}
+
+void vnic_rq_clean(struct vnic_rq *rq,
+ void (*buf_clean)(struct rte_mbuf **buf))
+{
+ struct rte_mbuf **buf;
+ u32 fetch_index, i;
+ unsigned int count = rq->ring.desc_count;
+
+ buf = &rq->mbuf_ring[0];
+
+ for (i = 0; i < count; i++) {
+ (*buf_clean)(buf);
+ buf++;
+ }
+ rq->ring.desc_avail = count - 1;
+ rq->rx_nb_hold = 0;
+
+ /* Use current fetch_index as the ring starting point */
+ fetch_index = ioread32(&rq->ctrl->fetch_index);
+
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: reset fetch_index */
+ fetch_index = 0;
+ }
+
+ iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+ vnic_dev_clear_desc_ring(&rq->ring);
+}
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_rq.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_rq.h
new file mode 100644
index 00000000..d8e67f74
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_rq.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_RQ_H_
+#define _VNIC_RQ_H_
+
+#include <stdbool.h>
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/* Receive queue control */
+struct vnic_rq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+ u32 pad2;
+ u32 enable; /* 0x20 */
+ u32 pad3;
+ u32 running; /* 0x28 */
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+ u32 pad5;
+ u32 error_interrupt_enable; /* 0x38 */
+ u32 pad6;
+ u32 error_interrupt_offset; /* 0x40 */
+ u32 pad7;
+ u32 error_status; /* 0x48 */
+ u32 pad8;
+ u32 tcp_sn; /* 0x50 */
+ u32 pad9;
+ u32 unused; /* 0x58 */
+ u32 pad10;
+ u32 dca_select; /* 0x60 */
+ u32 pad11;
+ u32 dca_value; /* 0x68 */
+ u32 pad12;
+ u32 data_ring; /* 0x70 */
+ u32 pad13;
+ u32 header_split; /* 0x78 */
+ u32 pad14;
+};
+
+struct vnic_rq {
+ unsigned int index;
+ unsigned int posted_index;
+ struct vnic_dev *vdev;
+ struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ struct rte_mbuf **free_mbufs; /* reserve of free mbufs */
+ int num_free_mbufs;
+ struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
+ unsigned int mbuf_next_idx; /* next mb to consume */
+ void *os_buf_head;
+ unsigned int pkts_outstanding;
+ uint16_t rx_nb_hold;
+ uint16_t rx_free_thresh;
+ unsigned int socket_id;
+ struct rte_mempool *mp;
+ uint16_t rxst_idx;
+ uint32_t tot_pkts;
+ uint16_t data_queue_idx;
+ uint8_t data_queue_enable;
+ uint8_t is_sop;
+ uint8_t in_use;
+ struct rte_mbuf *pkt_first_seg;
+ struct rte_mbuf *pkt_last_seg;
+ unsigned int max_mbufs_per_pkt;
+ uint16_t tot_nb_desc;
+ bool need_initial_post;
+};
+
+static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
+{
+ /* how many does SW own? */
+ return rq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
+{
+ /* how many does HW own? */
+ return rq->ring.desc_count - rq->ring.desc_avail - 1;
+}
+
+
+
+enum desc_return_options {
+ VNIC_RQ_RETURN_DESC,
+ VNIC_RQ_DEFER_RETURN_DESC,
+};
+
+static inline int vnic_rq_fill(struct vnic_rq *rq,
+ int (*buf_fill)(struct vnic_rq *rq))
+{
+ int err;
+
+ while (vnic_rq_desc_avail(rq) > 0) {
+
+ err = (*buf_fill)(rq);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static inline int vnic_rq_fill_count(struct vnic_rq *rq,
+ int (*buf_fill)(struct vnic_rq *rq), unsigned int count)
+{
+ int err;
+
+ while ((vnic_rq_desc_avail(rq) > 0) && (count--)) {
+
+ err = (*buf_fill)(rq);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq);
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error);
+unsigned int vnic_rq_error_status(struct vnic_rq *rq);
+void vnic_rq_enable(struct vnic_rq *rq);
+int vnic_rq_disable(struct vnic_rq *rq);
+void vnic_rq_clean(struct vnic_rq *rq,
+ void (*buf_clean)(struct rte_mbuf **buf));
+#endif /* _VNIC_RQ_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_rss.c b/src/spdk/dpdk/drivers/net/enic/base/vnic_rss.c
new file mode 100644
index 00000000..f41b8660
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_rss.c
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "enic_compat.h"
+#include "vnic_rss.h"
+
+void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key)
+{
+ u32 i;
+ u32 *p;
+ u16 *q;
+
+ for (i = 0; i < 4; ++i) {
+ p = (u32 *)(key + (10 * i));
+ iowrite32(*p++, &rss_key->key[i].b[0]);
+ iowrite32(*p++, &rss_key->key[i].b[4]);
+ q = (u16 *)p;
+ iowrite32(*q, &rss_key->key[i].b[8]);
+ }
+}
+
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_rss.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_rss.h
new file mode 100644
index 00000000..abd7b9f1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_rss.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_RSS_H_
+#define _VNIC_RSS_H_
+
+/* RSS key array */
+union vnic_rss_key {
+ struct {
+ u8 b[10];
+ u8 b_pad[6];
+ } key[4];
+ u64 raw[8];
+};
+
+/* RSS cpu array */
+union vnic_rss_cpu {
+ struct {
+ u8 b[4];
+ u8 b_pad[4];
+ } cpu[32];
+ u64 raw[32];
+};
+
+void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
+void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
+void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
+void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
+
+#endif /* _VNIC_RSS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_stats.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_stats.h
new file mode 100644
index 00000000..49429cc2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_stats.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_STATS_H_
+#define _VNIC_STATS_H_
+
+/* Tx statistics */
+struct vnic_tx_stats {
+ u64 tx_frames_ok;
+ u64 tx_unicast_frames_ok;
+ u64 tx_multicast_frames_ok;
+ u64 tx_broadcast_frames_ok;
+ u64 tx_bytes_ok;
+ u64 tx_unicast_bytes_ok;
+ u64 tx_multicast_bytes_ok;
+ u64 tx_broadcast_bytes_ok;
+ u64 tx_drops;
+ u64 tx_errors;
+ u64 tx_tso;
+ u64 rsvd[16];
+};
+
+/* Rx statistics */
+struct vnic_rx_stats {
+ u64 rx_frames_ok;
+ u64 rx_frames_total;
+ u64 rx_unicast_frames_ok;
+ u64 rx_multicast_frames_ok;
+ u64 rx_broadcast_frames_ok;
+ u64 rx_bytes_ok;
+ u64 rx_unicast_bytes_ok;
+ u64 rx_multicast_bytes_ok;
+ u64 rx_broadcast_bytes_ok;
+ u64 rx_drop;
+ u64 rx_no_bufs;
+ u64 rx_errors;
+ u64 rx_rss;
+ u64 rx_crc_errors;
+ u64 rx_frames_64;
+ u64 rx_frames_127;
+ u64 rx_frames_255;
+ u64 rx_frames_511;
+ u64 rx_frames_1023;
+ u64 rx_frames_1518;
+ u64 rx_frames_to_max;
+ u64 rsvd[16];
+};
+
+struct vnic_stats {
+ struct vnic_tx_stats tx;
+ struct vnic_rx_stats rx;
+};
+
+#endif /* _VNIC_STATS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_wq.c b/src/spdk/dpdk/drivers/net/enic/base/vnic_wq.c
new file mode 100644
index 00000000..c9bf3572
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_wq.c
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+
+static inline
+int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int index, enum vnic_res_type res_type)
+{
+ wq->ctrl = vnic_dev_get_res(vdev, res_type, index);
+ if (!wq->ctrl)
+ return -EINVAL;
+ return 0;
+}
+
+static inline
+int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ char res_name[NAME_MAX];
+ static int instance;
+
+ snprintf(res_name, sizeof(res_name), "%d-wq-%u", instance++, wq->index);
+ return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size,
+ wq->socket_id, res_name);
+}
+
+static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
+{
+ unsigned int count = wq->ring.desc_count;
+ /* Allocate the mbuf ring */
+ wq->bufs = (struct rte_mbuf **)rte_zmalloc_socket("wq->bufs",
+ sizeof(struct rte_mbuf *) * count,
+ RTE_CACHE_LINE_SIZE, wq->socket_id);
+ wq->head_idx = 0;
+ wq->tail_idx = 0;
+ if (wq->bufs == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void vnic_wq_free(struct vnic_wq *wq)
+{
+ struct vnic_dev *vdev;
+
+ vdev = wq->vdev;
+
+ vnic_dev_free_desc_ring(vdev, &wq->ring);
+
+ rte_free(wq->bufs);
+ wq->ctrl = NULL;
+}
+
+
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ wq->index = index;
+ wq->vdev = vdev;
+
+ err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ);
+ if (err) {
+ pr_err("Failed to hook WQ[%d] resource, err %d\n", index, err);
+ return err;
+ }
+
+ vnic_wq_disable(wq);
+
+ err = vnic_wq_alloc_ring(vdev, wq, desc_count, desc_size);
+ if (err)
+ return err;
+
+ err = vnic_wq_alloc_bufs(wq);
+ if (err) {
+ vnic_wq_free(wq);
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+ unsigned int count = wq->ring.desc_count;
+
+ paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &wq->ctrl->ring_base);
+ iowrite32(count, &wq->ctrl->ring_size);
+ iowrite32(fetch_index, &wq->ctrl->fetch_index);
+ iowrite32(posted_index, &wq->ctrl->posted_index);
+ iowrite32(cq_index, &wq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ wq->head_idx = fetch_index;
+ wq->tail_idx = wq->head_idx;
+}
+
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ vnic_wq_init_start(wq, cq_index, 0, 0,
+ error_interrupt_enable,
+ error_interrupt_offset);
+ wq->cq_pend = 0;
+ wq->last_completed_index = 0;
+}
+
+unsigned int vnic_wq_error_status(struct vnic_wq *wq)
+{
+ return ioread32(&wq->ctrl->error_status);
+}
+
+void vnic_wq_enable(struct vnic_wq *wq)
+{
+ iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_disable(struct vnic_wq *wq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &wq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 1000; wait++) {
+ if (!(ioread32(&wq->ctrl->running)))
+ return 0;
+ udelay(10);
+ }
+
+ pr_err("Failed to disable WQ[%d]\n", wq->index);
+
+ return -ETIMEDOUT;
+}
+
+void vnic_wq_clean(struct vnic_wq *wq,
+ void (*buf_clean)(struct rte_mbuf **buf))
+{
+ struct rte_mbuf **buf;
+ unsigned int to_clean = wq->tail_idx;
+
+ buf = &wq->bufs[to_clean];
+
+ while (vnic_wq_desc_used(wq) > 0) {
+
+ (*buf_clean)(buf);
+ to_clean = buf_idx_incr(wq->ring.desc_count, to_clean);
+
+ buf = &wq->bufs[to_clean];
+ wq->ring.desc_avail++;
+ }
+
+ wq->head_idx = 0;
+ wq->tail_idx = 0;
+ wq->last_completed_index = 0;
+ *((uint32_t *)wq->cqmsg_rz->addr) = 0;
+
+ iowrite32(0, &wq->ctrl->fetch_index);
+ iowrite32(0, &wq->ctrl->posted_index);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ vnic_dev_clear_desc_ring(&wq->ring);
+}
diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_wq.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_wq.h
new file mode 100644
index 00000000..236cf696
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_wq.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_WQ_H_
+#define _VNIC_WQ_H_
+
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+#include <rte_memzone.h>
+
+/* Work queue control */
+struct vnic_wq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+ u32 pad2;
+ u32 enable; /* 0x20 */
+ u32 pad3;
+ u32 running; /* 0x28 */
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+ u32 pad5;
+ u32 dca_value; /* 0x38 */
+ u32 pad6;
+ u32 error_interrupt_enable; /* 0x40 */
+ u32 pad7;
+ u32 error_interrupt_offset; /* 0x48 */
+ u32 pad8;
+ u32 error_status; /* 0x50 */
+ u32 pad9;
+};
+
+struct vnic_wq {
+ unsigned int index;
+ uint64_t tx_offload_notsup_mask;
+ struct vnic_dev *vdev;
+ struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ struct rte_mbuf **bufs;
+ unsigned int head_idx;
+ unsigned int cq_pend;
+ unsigned int tail_idx;
+ unsigned int socket_id;
+ const struct rte_memzone *cqmsg_rz;
+ uint16_t last_completed_index;
+ uint64_t offloads;
+};
+
+static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
+{
+ /* how many does SW own? */
+ return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
+{
+ /* how many does HW own? */
+ return wq->ring.desc_count - wq->ring.desc_avail - 1;
+}
+
+#define PI_LOG2_CACHE_LINE_SIZE 5
+#define PI_INDEX_BITS 12
+#define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1)
+#define PI_PREFETCH_LEN_MASK ((1U << PI_LOG2_CACHE_LINE_SIZE) - 1)
+#define PI_PREFETCH_LEN_OFF 16
+#define PI_PREFETCH_ADDR_BITS 43
+#define PI_PREFETCH_ADDR_MASK ((1ULL << PI_PREFETCH_ADDR_BITS) - 1)
+#define PI_PREFETCH_ADDR_OFF 21
+
+/** How many cache lines are touched by buffer (addr, len). */
+static inline unsigned int num_cache_lines_touched(dma_addr_t addr,
+ unsigned int len)
+{
+ const unsigned long mask = PI_PREFETCH_LEN_MASK;
+ const unsigned long laddr = (unsigned long)addr;
+ unsigned long lines, equiv_len;
+ /* A. If addr is aligned, our solution is just to round up len to the
+ next boundary.
+
+ e.g. addr = 0, len = 48
+ +--------------------+
+ |XXXXXXXXXXXXXXXXXXXX| 32-byte cacheline a
+ +--------------------+
+ |XXXXXXXXXX | cacheline b
+ +--------------------+
+
+ B. If addr is not aligned, however, we may use an extra
+ cacheline. e.g. addr = 12, len = 22
+
+ +--------------------+
+ | XXXXXXXXXXXXX|
+ +--------------------+
+ |XX |
+ +--------------------+
+
+ Our solution is to make the problem equivalent to case A
+ above by adding the empty space in the first cacheline to the length:
+ unsigned long len;
+
+ +--------------------+
+ |eeeeeeeXXXXXXXXXXXXX| "e" is empty space, which we add to len
+ +--------------------+
+ |XX |
+ +--------------------+
+
+ */
+ equiv_len = len + (laddr & mask);
+
+ /* Now we can just round up this len to the next 32-byte boundary. */
+ lines = (equiv_len + mask) & (~mask);
+
+ /* Scale bytes -> cachelines. */
+ return lines >> PI_LOG2_CACHE_LINE_SIZE;
+}
+
+static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len,
+ unsigned int index)
+{
+ unsigned int num_cache_lines = num_cache_lines_touched(addr, len);
+ /* Wish we could avoid a branch here. We could have separate
+ * vnic_wq_post() and vinc_wq_post_inline(), the latter
+ * only supporting < 1k (2^5 * 2^5) sends, I suppose. This would
+ * eliminate the if (eop) branch as well.
+ */
+ if (num_cache_lines > PI_PREFETCH_LEN_MASK)
+ num_cache_lines = 0;
+ return (index & PI_INDEX_MASK) |
+ ((num_cache_lines & PI_PREFETCH_LEN_MASK) << PI_PREFETCH_LEN_OFF) |
+ (((addr >> PI_LOG2_CACHE_LINE_SIZE) &
+ PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF);
+}
+
+static inline uint32_t
+buf_idx_incr(uint32_t n_descriptors, uint32_t idx)
+{
+ idx++;
+ if (unlikely(idx == n_descriptors))
+ idx = 0;
+ return idx;
+}
+
+void vnic_wq_free(struct vnic_wq *wq);
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error);
+unsigned int vnic_wq_error_status(struct vnic_wq *wq);
+void vnic_wq_enable(struct vnic_wq *wq);
+int vnic_wq_disable(struct vnic_wq *wq);
+void vnic_wq_clean(struct vnic_wq *wq,
+ void (*buf_clean)(struct rte_mbuf **buf));
+#endif /* _VNIC_WQ_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/base/wq_enet_desc.h b/src/spdk/dpdk/drivers/net/enic/base/wq_enet_desc.h
new file mode 100644
index 00000000..cdf22fff
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/base/wq_enet_desc.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _WQ_ENET_DESC_H_
+#define _WQ_ENET_DESC_H_
+
+/* Ethernet work queue descriptor: 16B */
+struct wq_enet_desc {
+ __le64 address;
+ __le16 length;
+ __le16 mss_loopback;
+ __le16 header_length_flags;
+ __le16 vlan_tag;
+};
+
+#define WQ_ENET_ADDR_BITS 64
+#define WQ_ENET_LEN_BITS 14
+#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
+#define WQ_ENET_MSS_BITS 14
+#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
+#define WQ_ENET_MSS_SHIFT 2
+#define WQ_ENET_LOOPBACK_SHIFT 1
+#define WQ_ENET_HDRLEN_BITS 10
+#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
+#define WQ_ENET_FLAGS_OM_BITS 2
+#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
+#define WQ_ENET_FLAGS_EOP_SHIFT 12
+#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
+#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
+#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
+
+#define WQ_ENET_OFFLOAD_MODE_CSUM 0
+#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
+#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
+#define WQ_ENET_OFFLOAD_MODE_TSO 3
+
+static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
+ u64 address, u16 length, u16 mss, u16 header_length,
+ u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
+ u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
+ desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
+ WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
+ desc->header_length_flags = cpu_to_le16(
+ (header_length & WQ_ENET_HDRLEN_MASK) |
+ (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
+ (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
+ (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
+ (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
+ (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
+ desc->vlan_tag = cpu_to_le16(vlan_tag);
+}
+
+static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
+ u64 *address, u16 *length, u16 *mss, u16 *header_length,
+ u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
+ u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
+ *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
+ WQ_ENET_MSS_MASK;
+ *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
+ WQ_ENET_LOOPBACK_SHIFT) & 1);
+ *header_length = le16_to_cpu(desc->header_length_flags) &
+ WQ_ENET_HDRLEN_MASK;
+ *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
+ *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_EOP_SHIFT) & 1);
+ *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
+ *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
+ *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
+ *vlan_tag = le16_to_cpu(desc->vlan_tag);
+}
+
+#endif /* _WQ_ENET_DESC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/enic.h b/src/spdk/dpdk/drivers/net/enic/enic.h
new file mode 100644
index 00000000..7c27bd51
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/enic.h
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _ENIC_H_
+#define _ENIC_H_
+
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "vnic_rss.h"
+#include "enic_res.h"
+#include "cq_enet_desc.h"
+#include <stdbool.h>
+#include <sys/queue.h>
+#include <rte_spinlock.h>
+
+#define DRV_NAME "enic_pmd"
+#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
+#define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc"
+
+#define ENIC_MAX_MAC_ADDR 64
+
+#define VLAN_ETH_HLEN 18
+
+#define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
+
+#define ENICPMD_BDF_LENGTH 13 /* 0000:00:00.0'\0' */
+#define ENIC_CALC_IP_CKSUM 1
+#define ENIC_CALC_TCP_UDP_CKSUM 2
+#define ENIC_MAX_MTU 9000
+#define ENIC_PAGE_SIZE 4096
+#define PAGE_ROUND_UP(x) \
+ ((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1)))
+
+#define ENICPMD_VFIO_PATH "/dev/vfio/vfio"
+/*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/
+
+#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
+#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
+
+/* Special Filter id for non-specific packet flagging. Don't change value */
+#define ENIC_MAGIC_FILTER_ID 0xffff
+
+#define ENICPMD_FDIR_MAX 64
+
+/* HW default VXLAN port */
+#define ENIC_DEFAULT_VXLAN_PORT 4789
+
+/*
+ * Interrupt 0: LSC and errors
+ * Interrupt 1: rx queue 0
+ * Interrupt 2: rx queue 1
+ * ...
+ */
+#define ENICPMD_LSC_INTR_OFFSET 0
+#define ENICPMD_RXQ_INTR_OFFSET 1
+
+struct enic_fdir_node {
+ struct rte_eth_fdir_filter filter;
+ u16 fltr_id;
+ u16 rq_index;
+};
+
+struct enic_fdir {
+ struct rte_eth_fdir_stats stats;
+ struct rte_hash *hash;
+ struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
+ u32 modes;
+ u32 types_mask;
+ void (*copy_fltr_fn)(struct filter_v2 *filt,
+ struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
+};
+
+struct enic_soft_stats {
+ rte_atomic64_t rx_nombuf;
+ rte_atomic64_t rx_packet_errors;
+ rte_atomic64_t tx_oversized;
+};
+
+struct enic_memzone_entry {
+ const struct rte_memzone *rz;
+ LIST_ENTRY(enic_memzone_entry) entries;
+};
+
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next;
+ u16 enic_filter_id;
+ struct filter_v2 enic_filter;
+};
+
+/* Per-instance private data structure */
+struct enic {
+ struct enic *next;
+ struct rte_pci_device *pdev;
+ struct vnic_enet_config config;
+ struct vnic_dev_bar bar0;
+ struct vnic_dev *vdev;
+
+ unsigned int port_id;
+ bool overlay_offload;
+ struct rte_eth_dev *rte_dev;
+ struct enic_fdir fdir;
+ char bdf_name[ENICPMD_BDF_LENGTH];
+ int dev_fd;
+ int iommu_group_fd;
+ int iommu_groupid;
+ int eventfd;
+ uint8_t mac_addr[ETH_ALEN];
+ pthread_t err_intr_thread;
+ int promisc;
+ int allmulti;
+ u8 ig_vlan_strip_en;
+ int link_status;
+ u8 hw_ip_checksum;
+ u16 max_mtu;
+ u8 adv_filters;
+ u32 flow_filter_mode;
+ u8 filter_actions; /* HW supported actions */
+ bool vxlan;
+ bool disable_overlay; /* devargs disable_overlay=1 */
+ bool nic_cfg_chk; /* NIC_CFG_CHK available */
+ bool udp_rss_weak; /* Bodega style UDP RSS */
+ uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
+ uint16_t vxlan_port; /* current vxlan port pushed to NIC */
+
+ unsigned int flags;
+ unsigned int priv_flags;
+
+ /* work queue (len = conf_wq_count) */
+ struct vnic_wq *wq;
+ unsigned int wq_count; /* equals eth_dev nb_tx_queues */
+
+ /* receive queue (len = conf_rq_count) */
+ struct vnic_rq *rq;
+ unsigned int rq_count; /* equals eth_dev nb_rx_queues */
+
+ /* completion queue (len = conf_cq_count) */
+ struct vnic_cq *cq;
+ unsigned int cq_count; /* equals rq_count + wq_count */
+
+ /* interrupt vectors (len = conf_intr_count) */
+ struct vnic_intr *intr;
+ unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */
+
+ /* software counters */
+ struct enic_soft_stats soft_stats;
+
+ /* configured resources on vic */
+ unsigned int conf_rq_count;
+ unsigned int conf_wq_count;
+ unsigned int conf_cq_count;
+ unsigned int conf_intr_count;
+
+ /* linked list storing memory allocations */
+ LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
+ rte_spinlock_t memzone_list_lock;
+ rte_spinlock_t mtu_lock;
+
+ LIST_HEAD(enic_flows, rte_flow) flows;
+ rte_spinlock_t flows_lock;
+
+ /* RSS */
+ uint16_t reta_size;
+ uint8_t hash_key_size;
+ uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */
+ /*
+ * Keep a copy of current RSS config for queries, as we cannot retrieve
+ * it from the NIC.
+ */
+ uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
+ uint8_t rss_enable;
+ uint64_t rss_hf; /* ETH_RSS flags */
+ union vnic_rss_key rss_key;
+ union vnic_rss_cpu rss_cpu;
+
+ uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
+ uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
+ uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */
+ uint64_t tx_offload_mask; /* PKT_TX flags accepted */
+};
+
+/* Compute ethdev's max packet size from MTU */
+static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
+{
+ /* ethdev max size includes eth and crc whereas NIC MTU does not */
+ return mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+}
+
+/* Get the CQ index from a Start of Packet(SOP) RQ index */
+static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
+{
+ return sop_idx / 2;
+}
+
+/* Get the RTE RQ index from a Start of Packet(SOP) RQ index */
+static inline unsigned int enic_sop_rq_idx_to_rte_idx(unsigned int sop_idx)
+{
+ return sop_idx / 2;
+}
+
+/* Get the Start of Packet(SOP) RQ index from a RTE RQ index */
+static inline unsigned int enic_rte_rq_idx_to_sop_idx(unsigned int rte_idx)
+{
+ return rte_idx * 2;
+}
+
+/* Get the Data RQ index from a RTE RQ index */
+static inline unsigned int enic_rte_rq_idx_to_data_idx(unsigned int rte_idx)
+{
+ return rte_idx * 2 + 1;
+}
+
+static inline unsigned int enic_vnic_rq_count(struct enic *enic)
+{
+ return enic->rq_count * 2;
+}
+
+static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
+{
+ /* Scatter rx uses two receive queues together with one
+ * completion queue, so the completion queue number is no
+ * longer the same as the rq number.
+ */
+ return rq / 2;
+}
+
+static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
+{
+ return enic->rq_count + wq;
+}
+
+static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
+{
+ return (struct enic *)eth_dev->data->dev_private;
+}
+
+static inline uint32_t
+enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
+{
+ uint32_t d = i0 + i1;
+ d -= (d >= n_descriptors) ? n_descriptors : 0;
+ return d;
+}
+
+static inline uint32_t
+enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
+{
+ int32_t d = i1 - i0;
+ return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
+}
+
+static inline uint32_t
+enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
+{
+ idx++;
+ if (unlikely(idx == n_descriptors))
+ idx = 0;
+ return idx;
+}
+
+void enic_fdir_stats_get(struct enic *enic,
+ struct rte_eth_fdir_stats *stats);
+int enic_fdir_add_fltr(struct enic *enic,
+ struct rte_eth_fdir_filter *params);
+int enic_fdir_del_fltr(struct enic *enic,
+ struct rte_eth_fdir_filter *params);
+void enic_free_wq(void *txq);
+int enic_alloc_intr_resources(struct enic *enic);
+int enic_setup_finish(struct enic *enic);
+int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, uint16_t nb_desc);
+void enic_start_wq(struct enic *enic, uint16_t queue_idx);
+int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
+void enic_start_rq(struct enic *enic, uint16_t queue_idx);
+int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
+void enic_free_rq(void *rxq);
+int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, struct rte_mempool *mp,
+ uint16_t nb_desc, uint16_t free_thresh);
+int enic_set_vnic_res(struct enic *enic);
+int enic_init_rss_nic_cfg(struct enic *enic);
+int enic_set_rss_conf(struct enic *enic,
+ struct rte_eth_rss_conf *rss_conf);
+int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu);
+int enic_set_vlan_strip(struct enic *enic);
+int enic_enable(struct enic *enic);
+int enic_disable(struct enic *enic);
+void enic_remove(struct enic *enic);
+int enic_get_link_status(struct enic *enic);
+int enic_dev_stats_get(struct enic *enic,
+ struct rte_eth_stats *r_stats);
+void enic_dev_stats_clear(struct enic *enic);
+void enic_add_packet_filter(struct enic *enic);
+int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
+int enic_del_mac_address(struct enic *enic, int mac_index);
+unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
+void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
+ struct rte_mbuf *tx_pkt, unsigned short len,
+ uint8_t sop, uint8_t eop, uint8_t cq_entry,
+ uint16_t ol_flags, uint16_t vlan_tag);
+
+void enic_post_wq_index(struct vnic_wq *wq);
+int enic_probe(struct enic *enic);
+int enic_clsf_init(struct enic *enic);
+void enic_clsf_destroy(struct enic *enic);
+uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t enic_dummy_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
+int enic_link_update(struct enic *enic);
+void enic_fdir_info(struct enic *enic);
+void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
+void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
+void copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
+extern const struct rte_flow_ops enic_flow_ops;
+#endif /* _ENIC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/enic_clsf.c b/src/spdk/dpdk/drivers/net/enic/enic_clsf.c
new file mode 100644
index 00000000..9d95201e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/enic_clsf.c
@@ -0,0 +1,491 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <libgen.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_hash.h>
+#include <rte_byteorder.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_sctp.h>
+#include <rte_eth_ctrl.h>
+
+#include "enic_compat.h"
+#include "enic.h"
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_nic.h"
+
+#ifdef RTE_ARCH_X86
+#include <rte_hash_crc.h>
+#define DEFAULT_HASH_FUNC rte_hash_crc
+#else
+#include <rte_jhash.h>
+#define DEFAULT_HASH_FUNC rte_jhash
+#endif
+
+#define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
+
+void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
+{
+ *stats = enic->fdir.stats;
+}
+
+void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
+{
+ info->mode = (enum rte_fdir_mode)enic->fdir.modes;
+ info->flow_types_mask[0] = enic->fdir.types_mask;
+}
+
+void enic_fdir_info(struct enic *enic)
+{
+ enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
+ enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ if (enic->adv_filters) {
+ enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ enic->fdir.copy_fltr_fn = copy_fltr_v2;
+ } else {
+ enic->fdir.copy_fltr_fn = copy_fltr_v1;
+ }
+}
+
+static void
+enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
+ enum filter_generic_1_layer layer, void *mask, void *val,
+ unsigned int len)
+{
+ gp->mask_flags |= flag;
+ gp->val_flags |= gp->mask_flags;
+ memcpy(gp->layer[layer].mask, mask, len);
+ memcpy(gp->layer[layer].val, val, len);
+}
+
+/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
+ * without advanced filter support.
+ */
+void
+copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ __rte_unused struct rte_eth_fdir_masks *masks)
+{
+ fltr->type = FILTER_IPV4_5TUPLE;
+ fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
+ input->flow.ip4_flow.src_ip);
+ fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
+ input->flow.ip4_flow.dst_ip);
+ fltr->u.ipv4.src_port = rte_be_to_cpu_16(
+ input->flow.udp4_flow.src_port);
+ fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
+ input->flow.udp4_flow.dst_port);
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
+ fltr->u.ipv4.protocol = PROTO_TCP;
+ else
+ fltr->u.ipv4.protocol = PROTO_UDP;
+
+ fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+}
+
+/* Copy Flow Director filter to a VIC generic filter (requires advanced
+ * filter support.
+ */
+void
+copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks)
+{
+ struct filter_generic_1 *gp = &fltr->u.generic_1;
+
+ fltr->type = FILTER_DPDK_1;
+ memset(gp, 0, sizeof(*gp));
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ struct udp_hdr udp_mask, udp_val;
+ memset(&udp_mask, 0, sizeof(udp_mask));
+ memset(&udp_val, 0, sizeof(udp_val));
+
+ if (input->flow.udp4_flow.src_port) {
+ udp_mask.src_port = masks->src_port_mask;
+ udp_val.src_port = input->flow.udp4_flow.src_port;
+ }
+ if (input->flow.udp4_flow.dst_port) {
+ udp_mask.dst_port = masks->dst_port_mask;
+ udp_val.dst_port = input->flow.udp4_flow.dst_port;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
+ &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
+ struct tcp_hdr tcp_mask, tcp_val;
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ memset(&tcp_val, 0, sizeof(tcp_val));
+
+ if (input->flow.tcp4_flow.src_port) {
+ tcp_mask.src_port = masks->src_port_mask;
+ tcp_val.src_port = input->flow.tcp4_flow.src_port;
+ }
+ if (input->flow.tcp4_flow.dst_port) {
+ tcp_mask.dst_port = masks->dst_port_mask;
+ tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
+ &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
+ struct sctp_hdr sctp_mask, sctp_val;
+ memset(&sctp_mask, 0, sizeof(sctp_mask));
+ memset(&sctp_val, 0, sizeof(sctp_val));
+
+ if (input->flow.sctp4_flow.src_port) {
+ sctp_mask.src_port = masks->src_port_mask;
+ sctp_val.src_port = input->flow.sctp4_flow.src_port;
+ }
+ if (input->flow.sctp4_flow.dst_port) {
+ sctp_mask.dst_port = masks->dst_port_mask;
+ sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
+ }
+ if (input->flow.sctp4_flow.verify_tag) {
+ sctp_mask.tag = 0xffffffff;
+ sctp_val.tag = input->flow.sctp4_flow.verify_tag;
+ }
+
+ /* v4 proto should be 132, override ip4_flow.proto */
+ input->flow.ip4_flow.proto = 132;
+
+ enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
+ &sctp_val, sizeof(struct sctp_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
+ struct ipv4_hdr ip4_mask, ip4_val;
+ memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
+ memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
+
+ if (input->flow.ip4_flow.tos) {
+ ip4_mask.type_of_service = masks->ipv4_mask.tos;
+ ip4_val.type_of_service = input->flow.ip4_flow.tos;
+ }
+ if (input->flow.ip4_flow.ttl) {
+ ip4_mask.time_to_live = masks->ipv4_mask.ttl;
+ ip4_val.time_to_live = input->flow.ip4_flow.ttl;
+ }
+ if (input->flow.ip4_flow.proto) {
+ ip4_mask.next_proto_id = masks->ipv4_mask.proto;
+ ip4_val.next_proto_id = input->flow.ip4_flow.proto;
+ }
+ if (input->flow.ip4_flow.src_ip) {
+ ip4_mask.src_addr = masks->ipv4_mask.src_ip;
+ ip4_val.src_addr = input->flow.ip4_flow.src_ip;
+ }
+ if (input->flow.ip4_flow.dst_ip) {
+ ip4_mask.dst_addr = masks->ipv4_mask.dst_ip;
+ ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
+ &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ struct udp_hdr udp_mask, udp_val;
+ memset(&udp_mask, 0, sizeof(udp_mask));
+ memset(&udp_val, 0, sizeof(udp_val));
+
+ if (input->flow.udp6_flow.src_port) {
+ udp_mask.src_port = masks->src_port_mask;
+ udp_val.src_port = input->flow.udp6_flow.src_port;
+ }
+ if (input->flow.udp6_flow.dst_port) {
+ udp_mask.dst_port = masks->dst_port_mask;
+ udp_val.dst_port = input->flow.udp6_flow.dst_port;
+ }
+ enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
+ &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
+ struct tcp_hdr tcp_mask, tcp_val;
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ memset(&tcp_val, 0, sizeof(tcp_val));
+
+ if (input->flow.tcp6_flow.src_port) {
+ tcp_mask.src_port = masks->src_port_mask;
+ tcp_val.src_port = input->flow.tcp6_flow.src_port;
+ }
+ if (input->flow.tcp6_flow.dst_port) {
+ tcp_mask.dst_port = masks->dst_port_mask;
+ tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
+ }
+ enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
+ &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
+ struct sctp_hdr sctp_mask, sctp_val;
+ memset(&sctp_mask, 0, sizeof(sctp_mask));
+ memset(&sctp_val, 0, sizeof(sctp_val));
+
+ if (input->flow.sctp6_flow.src_port) {
+ sctp_mask.src_port = masks->src_port_mask;
+ sctp_val.src_port = input->flow.sctp6_flow.src_port;
+ }
+ if (input->flow.sctp6_flow.dst_port) {
+ sctp_mask.dst_port = masks->dst_port_mask;
+ sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
+ }
+ if (input->flow.sctp6_flow.verify_tag) {
+ sctp_mask.tag = 0xffffffff;
+ sctp_val.tag = input->flow.sctp6_flow.verify_tag;
+ }
+
+ /* v4 proto should be 132, override ipv6_flow.proto */
+ input->flow.ipv6_flow.proto = 132;
+
+ enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
+ &sctp_val, sizeof(struct sctp_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
+ struct ipv6_hdr ipv6_mask, ipv6_val;
+ memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
+ memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
+
+ if (input->flow.ipv6_flow.proto) {
+ ipv6_mask.proto = masks->ipv6_mask.proto;
+ ipv6_val.proto = input->flow.ipv6_flow.proto;
+ }
+ memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
+ sizeof(ipv6_mask.src_addr));
+ memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip,
+ sizeof(ipv6_val.src_addr));
+ memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip,
+ sizeof(ipv6_mask.dst_addr));
+ memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip,
+ sizeof(ipv6_val.dst_addr));
+ if (input->flow.ipv6_flow.tc) {
+ ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
+ ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
+ }
+ if (input->flow.ipv6_flow.hop_limits) {
+ ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits;
+ ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
+ &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
+ }
+}
+
+int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
+{
+ int32_t pos;
+ struct enic_fdir_node *key;
+ /* See if the key is in the table */
+ pos = rte_hash_del_key(enic->fdir.hash, params);
+ switch (pos) {
+ case -EINVAL:
+ case -ENOENT:
+ enic->fdir.stats.f_remove++;
+ return -EINVAL;
+ default:
+ /* The entry is present in the table */
+ key = enic->fdir.nodes[pos];
+
+ /* Delete the filter */
+ vnic_dev_classifier(enic->vdev, CLSF_DEL,
+ &key->fltr_id, NULL, NULL);
+ rte_free(key);
+ enic->fdir.nodes[pos] = NULL;
+ enic->fdir.stats.free++;
+ enic->fdir.stats.remove++;
+ break;
+ }
+ return 0;
+}
+
+int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
+{
+ struct enic_fdir_node *key;
+ struct filter_v2 fltr;
+ int32_t pos;
+ u8 do_free = 0;
+ u16 old_fltr_id = 0;
+ u32 flowtype_supported;
+ u16 flex_bytes;
+ u16 queue;
+ struct filter_action_v2 action;
+
+ memset(&fltr, 0, sizeof(fltr));
+ memset(&action, 0, sizeof(action));
+ flowtype_supported = enic->fdir.types_mask
+ & (1 << params->input.flow_type);
+
+ flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
+ (params->input.flow_ext.flexbytes[0] & 0xFF));
+
+ if (!enic->fdir.hash ||
+ (params->input.flow_ext.vlan_tci & 0xFFF) ||
+ !flowtype_supported || flex_bytes ||
+ params->action.behavior /* drop */) {
+ enic->fdir.stats.f_add++;
+ return -ENOTSUP;
+ }
+
+ /* Get the enicpmd RQ from the DPDK Rx queue */
+ queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
+
+ if (!enic->rq[queue].in_use)
+ return -EINVAL;
+
+ /* See if the key is already there in the table */
+ pos = rte_hash_del_key(enic->fdir.hash, params);
+ switch (pos) {
+ case -EINVAL:
+ enic->fdir.stats.f_add++;
+ return -EINVAL;
+ case -ENOENT:
+ /* Add a new classifier entry */
+ if (!enic->fdir.stats.free) {
+ enic->fdir.stats.f_add++;
+ return -ENOSPC;
+ }
+ key = rte_zmalloc("enic_fdir_node",
+ sizeof(struct enic_fdir_node), 0);
+ if (!key) {
+ enic->fdir.stats.f_add++;
+ return -ENOMEM;
+ }
+ break;
+ default:
+ /* The entry is already present in the table.
+ * Check if there is a change in queue
+ */
+ key = enic->fdir.nodes[pos];
+ enic->fdir.nodes[pos] = NULL;
+ if (unlikely(key->rq_index == queue)) {
+ /* Nothing to be done */
+ enic->fdir.stats.f_add++;
+ pos = rte_hash_add_key(enic->fdir.hash, params);
+ if (pos < 0) {
+ dev_err(enic, "Add hash key failed\n");
+ return pos;
+ }
+ enic->fdir.nodes[pos] = key;
+ dev_warning(enic,
+ "FDIR rule is already present\n");
+ return 0;
+ }
+
+ if (likely(enic->fdir.stats.free)) {
+ /* Add the filter and then delete the old one.
+ * This is to avoid packets from going into the
+ * default queue during the window between
+ * delete and add
+ */
+ do_free = 1;
+ old_fltr_id = key->fltr_id;
+ } else {
+ /* No free slots in the classifier.
+ * Delete the filter and add the modified one later
+ */
+ vnic_dev_classifier(enic->vdev, CLSF_DEL,
+ &key->fltr_id, NULL, NULL);
+ enic->fdir.stats.free++;
+ }
+
+ break;
+ }
+
+ key->filter = *params;
+ key->rq_index = queue;
+
+ enic->fdir.copy_fltr_fn(&fltr, &params->input,
+ &enic->rte_dev->data->dev_conf.fdir_conf.mask);
+ action.type = FILTER_ACTION_RQ_STEERING;
+ action.rq_idx = queue;
+
+ if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr,
+ &action)) {
+ key->fltr_id = queue;
+ } else {
+ dev_err(enic, "Add classifier entry failed\n");
+ enic->fdir.stats.f_add++;
+ rte_free(key);
+ return -1;
+ }
+
+ if (do_free)
+ vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL,
+ NULL);
+ else{
+ enic->fdir.stats.free--;
+ enic->fdir.stats.add++;
+ }
+
+ pos = rte_hash_add_key(enic->fdir.hash, params);
+ if (pos < 0) {
+ enic->fdir.stats.f_add++;
+ dev_err(enic, "Add hash key failed\n");
+ return pos;
+ }
+
+ enic->fdir.nodes[pos] = key;
+ return 0;
+}
+
+void enic_clsf_destroy(struct enic *enic)
+{
+ u32 index;
+ struct enic_fdir_node *key;
+ /* delete classifier entries */
+ for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
+ key = enic->fdir.nodes[index];
+ if (key) {
+ vnic_dev_classifier(enic->vdev, CLSF_DEL,
+ &key->fltr_id, NULL, NULL);
+ rte_free(key);
+ enic->fdir.nodes[index] = NULL;
+ }
+ }
+
+ if (enic->fdir.hash) {
+ rte_hash_free(enic->fdir.hash);
+ enic->fdir.hash = NULL;
+ }
+}
+
+int enic_clsf_init(struct enic *enic)
+{
+ char clsf_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters hash_params = {
+ .name = clsf_name,
+ .entries = ENICPMD_CLSF_HASH_ENTRIES,
+ .key_len = sizeof(struct rte_eth_fdir_filter),
+ .hash_func = DEFAULT_HASH_FUNC,
+ .hash_func_init_val = 0,
+ .socket_id = SOCKET_ID_ANY,
+ };
+ snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name);
+ enic->fdir.hash = rte_hash_create(&hash_params);
+ memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
+ enic->fdir.stats.free = ENICPMD_FDIR_MAX;
+ return NULL == enic->fdir.hash;
+}
diff --git a/src/spdk/dpdk/drivers/net/enic/enic_compat.h b/src/spdk/dpdk/drivers/net/enic/enic_compat.h
new file mode 100644
index 00000000..ceb1b096
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/enic_compat.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _ENIC_COMPAT_H_
+#define _ENIC_COMPAT_H_
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#define ENIC_PAGE_ALIGN 4096UL
+#define ENIC_ALIGN ENIC_PAGE_ALIGN
+#define NAME_MAX 255
+#define ETH_ALEN 6
+
+#define __iomem
+
+#define rmb() rte_rmb() /* dpdk rte provided rmb */
+#define wmb() rte_wmb() /* dpdk rte provided wmb */
+
+#define le16_to_cpu
+#define le32_to_cpu
+#define le64_to_cpu
+#define cpu_to_le16
+#define cpu_to_le32
+#define cpu_to_le64
+
+#ifndef offsetof
+#define offsetof(t, m) ((size_t) &((t *)0)->m)
+#endif
+
+#define pr_err(y, args...) dev_err(0, y, ##args)
+#define pr_warn(y, args...) dev_warning(0, y, ##args)
+#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__)
+
+#define VNIC_ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
+#define udelay usleep
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
+#define kzalloc(size, flags) calloc(1, size)
+#define kfree(x) free(x)
+
+#define dev_printk(level, fmt, args...) \
+ RTE_LOG(level, PMD, "rte_enic_pmd: " fmt, ## args)
+
+#define dev_err(x, args...) dev_printk(ERR, args)
+#define dev_info(x, args...) dev_printk(INFO, args)
+#define dev_warning(x, args...) dev_printk(WARNING, args)
+#define dev_debug(x, args...) dev_printk(DEBUG, args)
+
+extern int enicpmd_logtype_flow;
+extern int enicpmd_logtype_init;
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
+ "%s" fmt "\n", __func__, ##args)
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+typedef unsigned long long dma_addr_t;
+
+static inline uint32_t ioread32(volatile void *addr)
+{
+ return rte_read32(addr);
+}
+
+static inline uint8_t ioread8(volatile void *addr)
+{
+ return rte_read8(addr);
+}
+
+static inline void iowrite32(uint32_t val, volatile void *addr)
+{
+ rte_write32(val, addr);
+}
+
+static inline void iowrite32_relaxed(uint32_t val, volatile void *addr)
+{
+ rte_write32_relaxed(val, addr);
+}
+
+static inline unsigned int readl(volatile void __iomem *addr)
+{
+ return rte_read32(addr);
+}
+
+static inline void writel(unsigned int val, volatile void __iomem *addr)
+{
+ rte_write32(val, addr);
+}
+
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1 : __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
+
+#endif /* _ENIC_COMPAT_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/enic_ethdev.c b/src/spdk/dpdk/drivers/net/enic/enic_ethdev.c
new file mode 100644
index 00000000..b3d57771
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/enic_ethdev.c
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+
+#include <rte_dev.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_kvargs.h>
+#include <rte_string_fns.h>
+
+#include "vnic_intr.h"
+#include "vnic_cq.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_enet.h"
+#include "enic.h"
+
+int enicpmd_logtype_init;
+int enicpmd_logtype_flow;
+
+#define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+/*
+ * The set of PCI devices this driver supports
+ */
+#define CISCO_PCI_VENDOR_ID 0x1137
+static const struct rte_pci_id pci_id_enic_map[] = {
+ { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) },
+ { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
+ {.vendor_id = 0, /* sentinel */},
+};
+
+#define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
+#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
+
+RTE_INIT(enicpmd_init_log)
+{
+ enicpmd_logtype_init = rte_log_register("pmd.net.enic.init");
+ if (enicpmd_logtype_init >= 0)
+ rte_log_set_level(enicpmd_logtype_init, RTE_LOG_NOTICE);
+ enicpmd_logtype_flow = rte_log_register("pmd.net.enic.flow");
+ if (enicpmd_logtype_flow >= 0)
+ rte_log_set_level(enicpmd_logtype_flow, RTE_LOG_NOTICE);
+}
+
+static int
+enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op, void *arg)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret = 0;
+
+ ENICPMD_FUNC_TRACE();
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
+ return -EINVAL;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ case RTE_ETH_FILTER_UPDATE:
+ ret = enic_fdir_add_fltr(enic,
+ (struct rte_eth_fdir_filter *)arg);
+ break;
+
+ case RTE_ETH_FILTER_DELETE:
+ ret = enic_fdir_del_fltr(enic,
+ (struct rte_eth_fdir_filter *)arg);
+ break;
+
+ case RTE_ETH_FILTER_STATS:
+ enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
+ break;
+
+ case RTE_ETH_FILTER_FLUSH:
+ dev_warning(enic, "unsupported operation %u", filter_op);
+ ret = -ENOTSUP;
+ break;
+ case RTE_ETH_FILTER_INFO:
+ enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
+ break;
+ default:
+ dev_err(enic, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ ENICPMD_FUNC_TRACE();
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &enic_flow_ops;
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
+ break;
+ default:
+ dev_warning(enic, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static void enicpmd_dev_tx_queue_release(void *txq)
+{
+ ENICPMD_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ enic_free_wq(txq);
+}
+
+static int enicpmd_dev_setup_intr(struct enic *enic)
+{
+ int ret;
+ unsigned int index;
+
+ ENICPMD_FUNC_TRACE();
+
+ /* Are we done with the init of all the queues? */
+ for (index = 0; index < enic->cq_count; index++) {
+ if (!enic->cq[index].ctrl)
+ break;
+ }
+ if (enic->cq_count != index)
+ return 0;
+ for (index = 0; index < enic->wq_count; index++) {
+ if (!enic->wq[index].ctrl)
+ break;
+ }
+ if (enic->wq_count != index)
+ return 0;
+ /* check start of packet (SOP) RQs only in case scatter is disabled. */
+ for (index = 0; index < enic->rq_count; index++) {
+ if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
+ break;
+ }
+ if (enic->rq_count != index)
+ return 0;
+
+ ret = enic_alloc_intr_resources(enic);
+ if (ret) {
+ dev_err(enic, "alloc intr failed\n");
+ return ret;
+ }
+ enic_init_vnic_resources(enic);
+
+ ret = enic_setup_finish(enic);
+ if (ret)
+ dev_err(enic, "setup could not be finished\n");
+
+ return ret;
+}
+
+static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ int ret;
+ struct enic *enic = pmd_priv(eth_dev);
+ struct vnic_wq *wq;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
+ ENICPMD_FUNC_TRACE();
+ RTE_ASSERT(queue_idx < enic->conf_wq_count);
+ wq = &enic->wq[queue_idx];
+ wq->offloads = tx_conf->offloads |
+ eth_dev->data->dev_conf.txmode.offloads;
+ eth_dev->data->tx_queues[queue_idx] = (void *)wq;
+
+ ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
+ if (ret) {
+ dev_err(enic, "error in allocating wq\n");
+ return ret;
+ }
+
+ return enicpmd_dev_setup_intr(enic);
+}
+
+static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+
+ enic_start_wq(enic, queue_idx);
+
+ return 0;
+}
+
+static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx)
+{
+ int ret;
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+
+ ret = enic_stop_wq(enic, queue_idx);
+ if (ret)
+ dev_err(enic, "error in stopping wq %d\n", queue_idx);
+
+ return ret;
+}
+
+static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+
+ enic_start_rq(enic, queue_idx);
+
+ return 0;
+}
+
+static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx)
+{
+ int ret;
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+
+ ret = enic_stop_rq(enic, queue_idx);
+ if (ret)
+ dev_err(enic, "error in stopping rq %d\n", queue_idx);
+
+ return ret;
+}
+
+static void enicpmd_dev_rx_queue_release(void *rxq)
+{
+ ENICPMD_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ enic_free_rq(rxq);
+}
+
+static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id)
+{
+ struct enic *enic = pmd_priv(dev);
+ uint32_t queue_count = 0;
+ struct vnic_cq *cq;
+ uint32_t cq_tail;
+ uint16_t cq_idx;
+ int rq_num;
+
+ rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
+ cq = &enic->cq[enic_cq_rq(enic, rq_num)];
+ cq_idx = cq->to_clean;
+
+ cq_tail = ioread32(&cq->ctrl->cq_tail);
+
+ if (cq_tail < cq_idx)
+ cq_tail += cq->ring.desc_count;
+
+ queue_count = cq_tail - cq_idx;
+
+ return queue_count;
+}
+
+static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ int ret;
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+ RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
+ eth_dev->data->rx_queues[queue_idx] =
+ (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
+
+ ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
+ rx_conf->rx_free_thresh);
+ if (ret) {
+ dev_err(enic, "error in allocating rq\n");
+ return ret;
+ }
+
+ return enicpmd_dev_setup_intr(enic);
+}
+
+static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ uint64_t offloads;
+
+ ENICPMD_FUNC_TRACE();
+
+ offloads = eth_dev->data->dev_conf.rxmode.offloads;
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ enic->ig_vlan_strip_en = 1;
+ else
+ enic->ig_vlan_strip_en = 0;
+ }
+
+ if ((mask & ETH_VLAN_FILTER_MASK) &&
+ (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
+ dev_warning(enic,
+ "Configuration of VLAN filter is not supported\n");
+ }
+
+ if ((mask & ETH_VLAN_EXTEND_MASK) &&
+ (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) {
+ dev_warning(enic,
+ "Configuration of extended VLAN is not supported\n");
+ }
+
+ return enic_set_vlan_strip(enic);
+}
+
+static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ int ret;
+ int mask;
+ struct enic *enic = pmd_priv(eth_dev);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
+ ENICPMD_FUNC_TRACE();
+ ret = enic_set_vnic_res(enic);
+ if (ret) {
+ dev_err(enic, "Set vNIC resource num failed, aborting\n");
+ return ret;
+ }
+
+ enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CHECKSUM);
+ /* All vlan offload masks to apply the current settings */
+ mask = ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ret = enicpmd_vlan_offload_set(eth_dev, mask);
+ if (ret) {
+ dev_err(enic, "Failed to configure VLAN offloads\n");
+ return ret;
+ }
+ /*
+ * Initialize RSS with the default reta and key. If the user key is
+ * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
+ * default key.
+ */
+ return enic_init_rss_nic_cfg(enic);
+}
+
+/* Start the device.
+ * It returns 0 on success.
+ */
+static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
+ ENICPMD_FUNC_TRACE();
+ return enic_enable(enic);
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct rte_eth_link link;
+ struct enic *enic = pmd_priv(eth_dev);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ ENICPMD_FUNC_TRACE();
+ enic_disable(enic);
+
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(eth_dev, &link);
+}
+
+/*
+ * Stop device.
+ */
+static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ enic_remove(enic);
+}
+
+static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
+ __rte_unused int wait_to_complete)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ return enic_link_update(enic);
+}
+
+static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *stats)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ return enic_dev_stats_get(enic, stats);
+}
+
+static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ enic_dev_stats_clear(enic);
+}
+
+static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
+ device_info->max_rx_queues = enic->conf_rq_count / 2;
+ device_info->max_tx_queues = enic->conf_wq_count;
+ device_info->min_rx_bufsize = ENIC_MIN_MTU;
+ /* "Max" mtu is not a typo. HW receives packet sizes up to the
+ * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
+ * a hint to the driver to size receive buffers accordingly so that
+ * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
+ * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
+ * ignoring vNIC mtu.
+ */
+ device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
+ device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
+ device_info->rx_offload_capa = enic->rx_offload_capa;
+ device_info->tx_offload_capa = enic->tx_offload_capa;
+ device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
+ device_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
+ };
+ device_info->reta_size = enic->reta_size;
+ device_info->hash_key_size = enic->hash_key_size;
+ device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
+ device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = enic->config.rq_desc_count,
+ .nb_min = ENIC_MIN_RQ_DESCS,
+ .nb_align = ENIC_ALIGN_DESCS,
+ };
+ device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = enic->config.wq_desc_count,
+ .nb_min = ENIC_MIN_WQ_DESCS,
+ .nb_align = ENIC_ALIGN_DESCS,
+ .nb_seg_max = ENIC_TX_XMIT_MAX,
+ .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
+ };
+ device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
+ .burst_size = ENIC_DEFAULT_RX_BURST,
+ .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
+ ENIC_DEFAULT_RX_RING_SIZE),
+ .nb_queues = ENIC_DEFAULT_RX_RINGS,
+ };
+ device_info->default_txportconf = (struct rte_eth_dev_portconf) {
+ .burst_size = ENIC_DEFAULT_TX_BURST,
+ .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
+ ENIC_DEFAULT_TX_RING_SIZE),
+ .nb_queues = ENIC_DEFAULT_TX_RINGS,
+ };
+}
+
+static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == enic_recv_pkts ||
+ dev->rx_pkt_burst == enic_noscatter_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
+static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ ENICPMD_FUNC_TRACE();
+
+ enic->promisc = 1;
+ enic_add_packet_filter(enic);
+}
+
+static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ ENICPMD_FUNC_TRACE();
+ enic->promisc = 0;
+ enic_add_packet_filter(enic);
+}
+
+static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ ENICPMD_FUNC_TRACE();
+ enic->allmulti = 1;
+ enic_add_packet_filter(enic);
+}
+
+static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ ENICPMD_FUNC_TRACE();
+ enic->allmulti = 0;
+ enic_add_packet_filter(enic);
+}
+
+static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
+ struct ether_addr *mac_addr,
+ __rte_unused uint32_t index, __rte_unused uint32_t pool)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
+ ENICPMD_FUNC_TRACE();
+ return enic_set_mac_address(enic, mac_addr->addr_bytes);
+}
+
+static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ ENICPMD_FUNC_TRACE();
+ if (enic_del_mac_address(enic, index))
+ dev_err(enic, "del mac addr failed\n");
+}
+
+static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
+ struct ether_addr *addr)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
+ ENICPMD_FUNC_TRACE();
+ ret = enic_del_mac_address(enic, 0);
+ if (ret)
+ return ret;
+ return enic_set_mac_address(enic, addr->addr_bytes);
+}
+
+static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ return enic_set_mtu(enic, mtu);
+}
+
+static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64
+ *reta_conf,
+ uint16_t reta_size)
+{
+ struct enic *enic = pmd_priv(dev);
+ uint16_t i, idx, shift;
+
+ ENICPMD_FUNC_TRACE();
+ if (reta_size != ENIC_RSS_RETA_SIZE) {
+ dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
+ reta_size, ENIC_RSS_RETA_SIZE);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
+ enic->rss_cpu.cpu[i / 4].b[i % 4]);
+ }
+
+ return 0;
+}
+
+static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64
+ *reta_conf,
+ uint16_t reta_size)
+{
+ struct enic *enic = pmd_priv(dev);
+ union vnic_rss_cpu rss_cpu;
+ uint16_t i, idx, shift;
+
+ ENICPMD_FUNC_TRACE();
+ if (reta_size != ENIC_RSS_RETA_SIZE) {
+ dev_err(enic, "reta_update: wrong reta_size. given=%u"
+ " expected=%u\n",
+ reta_size, ENIC_RSS_RETA_SIZE);
+ return -EINVAL;
+ }
+ /*
+ * Start with the current reta and modify it per reta_conf, as we
+ * need to push the entire reta even if we only modify one entry.
+ */
+ rss_cpu = enic->rss_cpu;
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ rss_cpu.cpu[i / 4].b[i % 4] =
+ enic_rte_rq_idx_to_sop_idx(
+ reta_conf[idx].reta[shift]);
+ }
+ return enic_set_rss_reta(enic, &rss_cpu);
+}
+
+static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct enic *enic = pmd_priv(dev);
+
+ ENICPMD_FUNC_TRACE();
+ return enic_set_rss_conf(enic, rss_conf);
+}
+
+static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct enic *enic = pmd_priv(dev);
+
+ ENICPMD_FUNC_TRACE();
+ if (rss_conf == NULL)
+ return -EINVAL;
+ if (rss_conf->rss_key != NULL &&
+ rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
+ dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
+ " expected=%u+\n",
+ rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
+ return -EINVAL;
+ }
+ rss_conf->rss_hf = enic->rss_hf;
+ if (rss_conf->rss_key != NULL) {
+ int i;
+ for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
+ rss_conf->rss_key[i] =
+ enic->rss_key.key[i / 10].b[i % 10];
+ }
+ rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
+ }
+ return 0;
+}
+
+static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct enic *enic = pmd_priv(dev);
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
+ struct rte_eth_rxconf *conf;
+ uint16_t sop_queue_idx;
+ uint16_t data_queue_idx;
+
+ ENICPMD_FUNC_TRACE();
+ sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
+ data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
+ rq_sop = &enic->rq[sop_queue_idx];
+ rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
+ qinfo->mp = rq_sop->mp;
+ qinfo->scattered_rx = rq_sop->data_queue_enable;
+ qinfo->nb_desc = rq_sop->ring.desc_count;
+ if (qinfo->scattered_rx)
+ qinfo->nb_desc += rq_data->ring.desc_count;
+ conf = &qinfo->conf;
+ memset(conf, 0, sizeof(*conf));
+ conf->rx_free_thresh = rq_sop->rx_free_thresh;
+ conf->rx_drop_en = 1;
+ /*
+ * Except VLAN stripping (port setting), all the checksum offloads
+ * are always enabled.
+ */
+ conf->offloads = enic->rx_offload_capa;
+ if (!enic->ig_vlan_strip_en)
+ conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ /* rx_thresh and other fields are not applicable for enic */
+}
+
+static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct enic *enic = pmd_priv(dev);
+ struct vnic_wq *wq = &enic->wq[tx_queue_id];
+
+ ENICPMD_FUNC_TRACE();
+ qinfo->nb_desc = wq->ring.desc_count;
+ memset(&qinfo->conf, 0, sizeof(qinfo->conf));
+ qinfo->conf.offloads = wq->offloads;
+ /* tx_thresh, and all the other fields are not applicable for enic */
+}
+
+static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
+ return 0;
+}
+
+static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
+ return 0;
+}
+
+static int udp_tunnel_common_check(struct enic *enic,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
+ return -ENOTSUP;
+ if (!enic->overlay_offload) {
+ PMD_INIT_LOG(DEBUG, " vxlan (overlay offload) is not "
+ "supported\n");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int update_vxlan_port(struct enic *enic, uint16_t port)
+{
+ if (vnic_dev_overlay_offload_cfg(enic->vdev,
+ OVERLAY_CFG_VXLAN_PORT_UPDATE,
+ port)) {
+ PMD_INIT_LOG(DEBUG, " failed to update vxlan port\n");
+ return -EINVAL;
+ }
+ PMD_INIT_LOG(DEBUG, " updated vxlan port to %u\n", port);
+ enic->vxlan_port = port;
+ return 0;
+}
+
+static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ ret = udp_tunnel_common_check(enic, tnl);
+ if (ret)
+ return ret;
+ /*
+ * The NIC has 1 configurable VXLAN port number. "Adding" a new port
+ * number replaces it.
+ */
+ if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
+ PMD_INIT_LOG(DEBUG, " %u is already configured or invalid\n",
+ tnl->udp_port);
+ return -EINVAL;
+ }
+ return update_vxlan_port(enic, tnl->udp_port);
+}
+
+static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ ret = udp_tunnel_common_check(enic, tnl);
+ if (ret)
+ return ret;
+ /*
+ * Clear the previously set port number and restore the
+ * hardware default port number. Some drivers disable VXLAN
+ * offloads when there are no configured port numbers. But
+ * enic does not do that as VXLAN is part of overlay offload,
+ * which is tied to inner RSS and TSO.
+ */
+ if (tnl->udp_port != enic->vxlan_port) {
+ PMD_INIT_LOG(DEBUG, " %u is not a configured vxlan port\n",
+ tnl->udp_port);
+ return -EINVAL;
+ }
+ return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT);
+}
+
+static const struct eth_dev_ops enicpmd_eth_dev_ops = {
+ .dev_configure = enicpmd_dev_configure,
+ .dev_start = enicpmd_dev_start,
+ .dev_stop = enicpmd_dev_stop,
+ .dev_set_link_up = NULL,
+ .dev_set_link_down = NULL,
+ .dev_close = enicpmd_dev_close,
+ .promiscuous_enable = enicpmd_dev_promiscuous_enable,
+ .promiscuous_disable = enicpmd_dev_promiscuous_disable,
+ .allmulticast_enable = enicpmd_dev_allmulticast_enable,
+ .allmulticast_disable = enicpmd_dev_allmulticast_disable,
+ .link_update = enicpmd_dev_link_update,
+ .stats_get = enicpmd_dev_stats_get,
+ .stats_reset = enicpmd_dev_stats_reset,
+ .queue_stats_mapping_set = NULL,
+ .dev_infos_get = enicpmd_dev_info_get,
+ .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
+ .mtu_set = enicpmd_mtu_set,
+ .vlan_filter_set = NULL,
+ .vlan_tpid_set = NULL,
+ .vlan_offload_set = enicpmd_vlan_offload_set,
+ .vlan_strip_queue_set = NULL,
+ .rx_queue_start = enicpmd_dev_rx_queue_start,
+ .rx_queue_stop = enicpmd_dev_rx_queue_stop,
+ .tx_queue_start = enicpmd_dev_tx_queue_start,
+ .tx_queue_stop = enicpmd_dev_tx_queue_stop,
+ .rx_queue_setup = enicpmd_dev_rx_queue_setup,
+ .rx_queue_release = enicpmd_dev_rx_queue_release,
+ .rx_queue_count = enicpmd_dev_rx_queue_count,
+ .rx_descriptor_done = NULL,
+ .tx_queue_setup = enicpmd_dev_tx_queue_setup,
+ .tx_queue_release = enicpmd_dev_tx_queue_release,
+ .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
+ .rxq_info_get = enicpmd_dev_rxq_info_get,
+ .txq_info_get = enicpmd_dev_txq_info_get,
+ .dev_led_on = NULL,
+ .dev_led_off = NULL,
+ .flow_ctrl_get = NULL,
+ .flow_ctrl_set = NULL,
+ .priority_flow_ctrl_set = NULL,
+ .mac_addr_add = enicpmd_add_mac_addr,
+ .mac_addr_remove = enicpmd_remove_mac_addr,
+ .mac_addr_set = enicpmd_set_mac_addr,
+ .filter_ctrl = enicpmd_dev_filter_ctrl,
+ .reta_query = enicpmd_dev_rss_reta_query,
+ .reta_update = enicpmd_dev_rss_reta_update,
+ .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,
+ .rss_hash_update = enicpmd_dev_rss_hash_update,
+ .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del,
+};
+
+static int enic_parse_disable_overlay(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct enic *enic;
+
+ enic = (struct enic *)opaque;
+ if (strcmp(value, "0") == 0) {
+ enic->disable_overlay = false;
+ } else if (strcmp(value, "1") == 0) {
+ enic->disable_overlay = true;
+ } else {
+ dev_err(enic, "Invalid value for " ENIC_DEVARG_DISABLE_OVERLAY
+ ": expected=0|1 given=%s\n", value);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct enic *enic;
+
+ enic = (struct enic *)opaque;
+ if (strcmp(value, "trunk") == 0) {
+ /* Trunk mode: always tag */
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
+ } else if (strcmp(value, "untag") == 0) {
+ /* Untag default VLAN mode: untag if VLAN = default VLAN */
+ enic->ig_vlan_rewrite_mode =
+ IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
+ } else if (strcmp(value, "priority") == 0) {
+ /*
+ * Priority-tag default VLAN mode: priority tag (VLAN header
+ * with ID=0) if VLAN = default
+ */
+ enic->ig_vlan_rewrite_mode =
+ IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
+ } else if (strcmp(value, "pass") == 0) {
+ /* Pass through mode: do not touch tags */
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
+ } else {
+ dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
+ ": expected=trunk|untag|priority|pass given=%s\n",
+ value);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int enic_check_devargs(struct rte_eth_dev *dev)
+{
+ static const char *const valid_keys[] = {
+ ENIC_DEVARG_DISABLE_OVERLAY,
+ ENIC_DEVARG_IG_VLAN_REWRITE,
+ NULL};
+ struct enic *enic = pmd_priv(dev);
+ struct rte_kvargs *kvlist;
+
+ ENICPMD_FUNC_TRACE();
+
+ enic->disable_overlay = false;
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
+ if (!dev->device->devargs)
+ return 0;
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+ if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
+ enic_parse_disable_overlay, enic) < 0 ||
+ rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
+ enic_parse_ig_vlan_rewrite, enic) < 0) {
+ rte_kvargs_free(kvlist);
+ return -EINVAL;
+ }
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
+struct enic *enicpmd_list_head = NULL;
+/* Initialize the driver
+ * It returns 0 on success.
+ */
+static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pdev;
+ struct rte_pci_addr *addr;
+ struct enic *enic = pmd_priv(eth_dev);
+ int err;
+
+ ENICPMD_FUNC_TRACE();
+
+ enic->port_id = eth_dev->data->port_id;
+ enic->rte_dev = eth_dev;
+ eth_dev->dev_ops = &enicpmd_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &enic_recv_pkts;
+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &enic_prep_pkts;
+
+ pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ rte_eth_copy_pci_info(eth_dev, pdev);
+ enic->pdev = pdev;
+ addr = &pdev->addr;
+
+ snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
+ addr->domain, addr->bus, addr->devid, addr->function);
+
+ err = enic_check_devargs(eth_dev);
+ if (err)
+ return err;
+ return enic_probe(enic);
+}
+
+static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
+ eth_enicpmd_dev_init);
+}
+
+static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_enic_pmd = {
+ .id_table = pci_id_enic_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_enic_pci_probe,
+ .remove = eth_enic_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_enic,
+ ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
+ ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
diff --git a/src/spdk/dpdk/drivers/net/enic/enic_flow.c b/src/spdk/dpdk/drivers/net/enic/enic_flow.c
new file mode 100644
index 00000000..0cf04aef
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/enic_flow.c
@@ -0,0 +1,1573 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ */
+
+#include <errno.h>
+#include <stdint.h>
+#include <rte_log.h>
+#include <rte_ethdev_driver.h>
+#include <rte_flow_driver.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+
+#include "enic_compat.h"
+#include "enic.h"
+#include "vnic_dev.h"
+#include "vnic_nic.h"
+
+#define FLOW_TRACE() \
+ rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
+ "%s()\n", __func__)
+#define FLOW_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
+ fmt "\n", ##args)
+
+/** Info about how to copy items into enic filters. */
+struct enic_items {
+ /** Function for copying and validating an item. */
+ int (*copy_item)(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst);
+ /** List of valid previous items. */
+ const enum rte_flow_item_type * const prev_items;
+ /** True if it's OK for this item to be the first item. For some NIC
+ * versions, it's invalid to start the stack above layer 3.
+ */
+ const u8 valid_start_item;
+};
+
+/** Filtering capabilities for various NIC and firmware versions. */
+struct enic_filter_cap {
+ /** list of valid items and their handlers and attributes. */
+ const struct enic_items *item_info;
+};
+
+/* functions for copying flow actions into enic actions */
+typedef int (copy_action_fn)(const struct rte_flow_action actions[],
+ struct filter_action_v2 *enic_action);
+
+/* functions for copying items into enic filters */
+typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst);
+
+/** Action capabilities for various NICs. */
+struct enic_action_cap {
+ /** list of valid actions */
+ const enum rte_flow_action_type *actions;
+ /** copy function for a particular NIC */
+ int (*copy_fn)(const struct rte_flow_action actions[],
+ struct filter_action_v2 *enic_action);
+};
+
+/* Forward declarations */
+static enic_copy_item_fn enic_copy_item_ipv4_v1;
+static enic_copy_item_fn enic_copy_item_udp_v1;
+static enic_copy_item_fn enic_copy_item_tcp_v1;
+static enic_copy_item_fn enic_copy_item_eth_v2;
+static enic_copy_item_fn enic_copy_item_vlan_v2;
+static enic_copy_item_fn enic_copy_item_ipv4_v2;
+static enic_copy_item_fn enic_copy_item_ipv6_v2;
+static enic_copy_item_fn enic_copy_item_udp_v2;
+static enic_copy_item_fn enic_copy_item_tcp_v2;
+static enic_copy_item_fn enic_copy_item_sctp_v2;
+static enic_copy_item_fn enic_copy_item_sctp_v2;
+static enic_copy_item_fn enic_copy_item_vxlan_v2;
+static copy_action_fn enic_copy_action_v1;
+static copy_action_fn enic_copy_action_v2;
+
+/**
+ * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
+ * is supported.
+ */
+static const struct enic_items enic_items_v1[] = {
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .copy_item = enic_copy_item_ipv4_v1,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .copy_item = enic_copy_item_udp_v1,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .copy_item = enic_copy_item_tcp_v1,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+};
+
+/**
+ * NICs have Advanced Filters capability but they are disabled. This means
+ * that layer 3 must be specified.
+ */
+static const struct enic_items enic_items_v2[] = {
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .copy_item = enic_copy_item_eth_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .copy_item = enic_copy_item_vlan_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .copy_item = enic_copy_item_ipv4_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .copy_item = enic_copy_item_ipv6_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .copy_item = enic_copy_item_udp_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .copy_item = enic_copy_item_tcp_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_SCTP] = {
+ .copy_item = enic_copy_item_sctp_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_VXLAN] = {
+ .copy_item = enic_copy_item_vxlan_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+};
+
+/** NICs with Advanced filters enabled */
+static const struct enic_items enic_items_v3[] = {
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .copy_item = enic_copy_item_eth_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .copy_item = enic_copy_item_vlan_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .copy_item = enic_copy_item_ipv4_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .copy_item = enic_copy_item_ipv6_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .copy_item = enic_copy_item_udp_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .copy_item = enic_copy_item_tcp_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_SCTP] = {
+ .copy_item = enic_copy_item_sctp_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_VXLAN] = {
+ .copy_item = enic_copy_item_vxlan_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+};
+
+/** Filtering capabilities indexed this NICs supported filter type. */
+static const struct enic_filter_cap enic_filter_cap[] = {
+ [FILTER_IPV4_5TUPLE] = {
+ .item_info = enic_items_v1,
+ },
+ [FILTER_USNIC_IP] = {
+ .item_info = enic_items_v2,
+ },
+ [FILTER_DPDK_1] = {
+ .item_info = enic_items_v3,
+ },
+};
+
+/** Supported actions for older NICs */
+static const enum rte_flow_action_type enic_supported_actions_v1[] = {
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
+/** Supported actions for newer NICs */
+static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_MARK,
+ RTE_FLOW_ACTION_TYPE_FLAG,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
+static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_MARK,
+ RTE_FLOW_ACTION_TYPE_FLAG,
+ RTE_FLOW_ACTION_TYPE_DROP,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
+/** Action capabilities indexed by NIC version information */
+static const struct enic_action_cap enic_action_cap[] = {
+ [FILTER_ACTION_RQ_STEERING_FLAG] = {
+ .actions = enic_supported_actions_v1,
+ .copy_fn = enic_copy_action_v1,
+ },
+ [FILTER_ACTION_FILTER_ID_FLAG] = {
+ .actions = enic_supported_actions_v2_id,
+ .copy_fn = enic_copy_action_v2,
+ },
+ [FILTER_ACTION_DROP_FLAG] = {
+ .actions = enic_supported_actions_v2_drop,
+ .copy_fn = enic_copy_action_v2,
+ },
+};
+
+static int
+mask_exact_match(const u8 *supported, const u8 *supplied,
+ unsigned int size)
+{
+ unsigned int i;
+ for (i = 0; i < size; i++) {
+ if (supported[i] != supplied[i])
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Copy IPv4 item into version 1 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Should always be 0 for version 1.
+ */
+static int
+enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
+ struct ipv4_hdr supported_mask = {
+ .src_addr = 0xffffffff,
+ .dst_addr = 0xffffffff,
+ };
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return ENOTSUP;
+
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+
+ /* This is an exact match filter, both fields must be set */
+ if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
+ FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
+ return ENOTSUP;
+ }
+
+ /* check that the suppied mask exactly matches capabilty */
+ if (!mask_exact_match((const u8 *)&supported_mask,
+ (const u8 *)item->mask, sizeof(*mask))) {
+ FLOW_LOG(ERR, "IPv4 exact match mask");
+ return ENOTSUP;
+ }
+
+ enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+ enic_5tup->src_addr = spec->hdr.src_addr;
+ enic_5tup->dst_addr = spec->hdr.dst_addr;
+
+ return 0;
+}
+
+/**
+ * Copy UDP item into version 1 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Should always be 0 for version 1.
+ */
+static int
+enic_copy_item_udp_v1(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
+ struct udp_hdr supported_mask = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ };
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return ENOTSUP;
+
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+
+ /* This is an exact match filter, both ports must be set */
+ if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
+ FLOW_LOG(ERR, "UDP exact match src/dst addr");
+ return ENOTSUP;
+ }
+
+ /* check that the suppied mask exactly matches capabilty */
+ if (!mask_exact_match((const u8 *)&supported_mask,
+ (const u8 *)item->mask, sizeof(*mask))) {
+ FLOW_LOG(ERR, "UDP exact match mask");
+ return ENOTSUP;
+ }
+
+ enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+ enic_5tup->src_port = spec->hdr.src_port;
+ enic_5tup->dst_port = spec->hdr.dst_port;
+ enic_5tup->protocol = PROTO_UDP;
+
+ return 0;
+}
+
+/**
+ * Copy TCP item into version 1 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Should always be 0 for version 1.
+ */
+static int
+enic_copy_item_tcp_v1(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
+ struct tcp_hdr supported_mask = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ };
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return ENOTSUP;
+
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+
+ /* This is an exact match filter, both ports must be set */
+ if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
+ FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
+ return ENOTSUP;
+ }
+
+ /* check that the suppied mask exactly matches capabilty */
+ if (!mask_exact_match((const u8 *)&supported_mask,
+ (const u8 *)item->mask, sizeof(*mask))) {
+ FLOW_LOG(ERR, "TCP exact match mask");
+ return ENOTSUP;
+ }
+
+ enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+ enic_5tup->src_port = spec->hdr.src_port;
+ enic_5tup->dst_port = spec->hdr.dst_port;
+ enic_5tup->protocol = PROTO_TCP;
+
+ return 0;
+}
+
+/**
+ * Copy ETH item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * If zero, this is an outer header. If non-zero, this is the offset into L5
+ * where the header begins.
+ */
+static int
+enic_copy_item_eth_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ struct ether_hdr enic_spec;
+ struct ether_hdr enic_mask;
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+
+ memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
+ ETHER_ADDR_LEN);
+ memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
+ ETHER_ADDR_LEN);
+
+ memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
+ ETHER_ADDR_LEN);
+ memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
+ ETHER_ADDR_LEN);
+ enic_spec.ether_type = spec->type;
+ enic_mask.ether_type = mask->type;
+
+ if (*inner_ofst == 0) {
+ /* outer header */
+ memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
+ sizeof(struct ether_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
+ sizeof(struct ether_hdr));
+ } else {
+ /* inner header */
+ if ((*inner_ofst + sizeof(struct ether_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ /* Offset into L5 where inner Ethernet header goes */
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ &enic_mask, sizeof(struct ether_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ &enic_spec, sizeof(struct ether_hdr));
+ *inner_ofst += sizeof(struct ether_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy VLAN item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * If zero, this is an outer header. If non-zero, this is the offset into L5
+ * where the header begins.
+ */
+static int
+enic_copy_item_vlan_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+
+ if (*inner_ofst == 0) {
+ struct ether_hdr *eth_mask =
+ (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
+ struct ether_hdr *eth_val =
+ (void *)gp->layer[FILTER_GENERIC_1_L2].val;
+
+ /* Outer TPID cannot be matched */
+ if (eth_mask->ether_type)
+ return ENOTSUP;
+ eth_mask->ether_type = mask->inner_type;
+ eth_val->ether_type = spec->inner_type;
+
+ /* Outer header. Use the vlan mask/val fields */
+ gp->mask_vlan = mask->tci;
+ gp->val_vlan = spec->tci;
+ } else {
+ /* Inner header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct vlan_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct vlan_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct vlan_hdr));
+ *inner_ofst += sizeof(struct vlan_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy IPv4 item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner IPv4 filtering.
+ */
+static int
+enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ if (*inner_ofst == 0) {
+ /* Match IPv4 */
+ gp->mask_flags |= FILTER_GENERIC_1_IPV4;
+ gp->val_flags |= FILTER_GENERIC_1_IPV4;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
+ sizeof(struct ipv4_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
+ sizeof(struct ipv4_hdr));
+ } else {
+ /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct ipv4_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct ipv4_hdr));
+ *inner_ofst += sizeof(struct ipv4_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy IPv6 item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner IPv6 filtering.
+ */
+static int
+enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match IPv6 */
+ gp->mask_flags |= FILTER_GENERIC_1_IPV6;
+ gp->val_flags |= FILTER_GENERIC_1_IPV6;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+
+ if (*inner_ofst == 0) {
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
+ sizeof(struct ipv6_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
+ sizeof(struct ipv6_hdr));
+ } else {
+ /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct ipv6_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct ipv6_hdr));
+ *inner_ofst += sizeof(struct ipv6_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy UDP item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner UDP filtering.
+ */
+static int
+enic_copy_item_udp_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match UDP */
+ gp->mask_flags |= FILTER_GENERIC_1_UDP;
+ gp->val_flags |= FILTER_GENERIC_1_UDP;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+
+ if (*inner_ofst == 0) {
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct udp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct udp_hdr));
+ } else {
+ /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct udp_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct udp_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct udp_hdr));
+ *inner_ofst += sizeof(struct udp_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy TCP item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner TCP filtering.
+ */
+static int
+enic_copy_item_tcp_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match TCP */
+ gp->mask_flags |= FILTER_GENERIC_1_TCP;
+ gp->val_flags |= FILTER_GENERIC_1_TCP;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ return ENOTSUP;
+
+ if (*inner_ofst == 0) {
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct tcp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct tcp_hdr));
+ } else {
+ /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct tcp_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct tcp_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct tcp_hdr));
+ *inner_ofst += sizeof(struct tcp_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy SCTP item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner SCTP filtering.
+ */
+static int
+enic_copy_item_sctp_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_sctp *spec = item->spec;
+ const struct rte_flow_item_sctp *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return ENOTSUP;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_sctp_mask;
+
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct sctp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct sctp_hdr));
+ return 0;
+}
+
+/**
+ * Copy UDP item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. VxLAN headers always start at the beginning of L5.
+ */
+static int
+enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return EINVAL;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_vxlan_mask;
+
+ memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
+ sizeof(struct vxlan_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
+ sizeof(struct vxlan_hdr));
+
+ *inner_ofst = sizeof(struct vxlan_hdr);
+ return 0;
+}
+
+/**
+ * Return 1 if current item is valid on top of the previous one.
+ *
+ * @param prev_item[in]
+ * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
+ * is the first item.
+ * @param item_info[in]
+ * Info about this item, like valid previous items.
+ * @param is_first[in]
+ * True if this the first item in the pattern.
+ */
+static int
+item_stacking_valid(enum rte_flow_item_type prev_item,
+ const struct enic_items *item_info, u8 is_first_item)
+{
+ enum rte_flow_item_type const *allowed_items = item_info->prev_items;
+
+ FLOW_TRACE();
+
+ for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
+ if (prev_item == *allowed_items)
+ return 1;
+ }
+
+ /* This is the first item in the stack. Check if that's cool */
+ if (is_first_item && item_info->valid_start_item)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Build the intenal enic filter structure from the provided pattern. The
+ * pattern is validated as the items are copied.
+ *
+ * @param pattern[in]
+ * @param items_info[in]
+ * Info about this NICs item support, like valid previous items.
+ * @param enic_filter[out]
+ * NIC specfilc filters derived from the pattern.
+ * @param error[out]
+ */
+static int
+enic_copy_filter(const struct rte_flow_item pattern[],
+ const struct enic_items *items_info,
+ struct filter_v2 *enic_filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ const struct rte_flow_item *item = pattern;
+ u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
+ enum rte_flow_item_type prev_item;
+ const struct enic_items *item_info;
+
+ u8 is_first_item = 1;
+
+ FLOW_TRACE();
+
+ prev_item = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ /* Get info about how to validate and copy the item. If NULL
+ * is returned the nic does not support the item.
+ */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+
+ item_info = &items_info[item->type];
+
+ /* check to see if item stacking is valid */
+ if (!item_stacking_valid(prev_item, item_info, is_first_item))
+ goto stacking_error;
+
+ ret = item_info->copy_item(item, enic_filter, &inner_ofst);
+ if (ret)
+ goto item_not_supported;
+ prev_item = item->type;
+ is_first_item = 0;
+ }
+ return 0;
+
+item_not_supported:
+ rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "enic type error");
+ return -rte_errno;
+
+stacking_error:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "stacking error");
+ return -rte_errno;
+}
+
+/**
+ * Build the intenal version 1 NIC action structure from the provided pattern.
+ * The pattern is validated as the items are copied.
+ *
+ * @param actions[in]
+ * @param enic_action[out]
+ * NIC specfilc actions derived from the actions.
+ * @param error[out]
+ */
+static int
+enic_copy_action_v1(const struct rte_flow_action actions[],
+ struct filter_action_v2 *enic_action)
+{
+ enum { FATE = 1, };
+ uint32_t overlap = 0;
+
+ FLOW_TRACE();
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE: {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+
+ if (overlap & FATE)
+ return ENOTSUP;
+ overlap |= FATE;
+ enic_action->rq_idx =
+ enic_rte_rq_idx_to_sop_idx(queue->index);
+ break;
+ }
+ default:
+ RTE_ASSERT(0);
+ break;
+ }
+ }
+ if (!(overlap & FATE))
+ return ENOTSUP;
+ enic_action->type = FILTER_ACTION_RQ_STEERING;
+ return 0;
+}
+
+/**
+ * Build the intenal version 2 NIC action structure from the provided pattern.
+ * The pattern is validated as the items are copied.
+ *
+ * @param actions[in]
+ * @param enic_action[out]
+ * NIC specfilc actions derived from the actions.
+ * @param error[out]
+ */
+static int
+enic_copy_action_v2(const struct rte_flow_action actions[],
+ struct filter_action_v2 *enic_action)
+{
+ enum { FATE = 1, MARK = 2, };
+ uint32_t overlap = 0;
+
+ FLOW_TRACE();
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE: {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+
+ if (overlap & FATE)
+ return ENOTSUP;
+ overlap |= FATE;
+ enic_action->rq_idx =
+ enic_rte_rq_idx_to_sop_idx(queue->index);
+ enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_MARK: {
+ const struct rte_flow_action_mark *mark =
+ (const struct rte_flow_action_mark *)
+ actions->conf;
+
+ if (overlap & MARK)
+ return ENOTSUP;
+ overlap |= MARK;
+ /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
+ * in the range of allows mark ids.
+ */
+ if (mark->id >= ENIC_MAGIC_FILTER_ID)
+ return EINVAL;
+ enic_action->filter_id = mark->id;
+ enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_FLAG: {
+ if (overlap & MARK)
+ return ENOTSUP;
+ overlap |= MARK;
+ enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
+ enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_DROP: {
+ if (overlap & FATE)
+ return ENOTSUP;
+ overlap |= FATE;
+ enic_action->flags |= FILTER_ACTION_DROP_FLAG;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ default:
+ RTE_ASSERT(0);
+ break;
+ }
+ }
+ if (!(overlap & FATE))
+ return ENOTSUP;
+ enic_action->type = FILTER_ACTION_V2;
+ return 0;
+}
+
+/** Check if the action is supported */
+static int
+enic_match_action(const struct rte_flow_action *action,
+ const enum rte_flow_action_type *supported_actions)
+{
+ for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
+ supported_actions++) {
+ if (action->type == *supported_actions)
+ return 1;
+ }
+ return 0;
+}
+
+/** Get the NIC filter capabilties structure */
+static const struct enic_filter_cap *
+enic_get_filter_cap(struct enic *enic)
+{
+ if (enic->flow_filter_mode)
+ return &enic_filter_cap[enic->flow_filter_mode];
+
+ return NULL;
+}
+
+/** Get the actions for this NIC version. */
+static const struct enic_action_cap *
+enic_get_action_cap(struct enic *enic)
+{
+ const struct enic_action_cap *ea;
+ uint8_t actions;
+
+ actions = enic->filter_actions;
+ if (actions & FILTER_ACTION_DROP_FLAG)
+ ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
+ else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
+ ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
+ else
+ ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
+ return ea;
+}
+
+/* Debug function to dump internal NIC action structure. */
+static void
+enic_dump_actions(const struct filter_action_v2 *ea)
+{
+ if (ea->type == FILTER_ACTION_RQ_STEERING) {
+ FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
+ } else if (ea->type == FILTER_ACTION_V2) {
+ FLOW_LOG(INFO, "Actions(V2)\n");
+ if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
+ FLOW_LOG(INFO, "\tqueue: %u\n",
+ enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
+ if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
+ FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
+ }
+}
+
+/* Debug function to dump internal NIC filter structure. */
+static void
+enic_dump_filter(const struct filter_v2 *filt)
+{
+ const struct filter_generic_1 *gp;
+ int i, j, mbyte;
+ char buf[128], *bp;
+ char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
+ char l4csum[16], ipfrag[16];
+
+ switch (filt->type) {
+ case FILTER_IPV4_5TUPLE:
+ FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
+ break;
+ case FILTER_USNIC_IP:
+ case FILTER_DPDK_1:
+ /* FIXME: this should be a loop */
+ gp = &filt->u.generic_1;
+ FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
+ gp->val_vlan, gp->mask_vlan);
+
+ if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
+ sprintf(ip4, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_IPV4)
+ ? "ip4(y)" : "ip4(n)");
+ else
+ sprintf(ip4, "%s ", "ip4(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
+ sprintf(ip6, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_IPV4)
+ ? "ip6(y)" : "ip6(n)");
+ else
+ sprintf(ip6, "%s ", "ip6(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_UDP)
+ sprintf(udp, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_UDP)
+ ? "udp(y)" : "udp(n)");
+ else
+ sprintf(udp, "%s ", "udp(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_TCP)
+ sprintf(tcp, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_TCP)
+ ? "tcp(y)" : "tcp(n)");
+ else
+ sprintf(tcp, "%s ", "tcp(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
+ sprintf(tcpudp, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
+ ? "tcpudp(y)" : "tcpudp(n)");
+ else
+ sprintf(tcpudp, "%s ", "tcpudp(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
+ sprintf(ip4csum, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
+ ? "ip4csum(y)" : "ip4csum(n)");
+ else
+ sprintf(ip4csum, "%s ", "ip4csum(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
+ sprintf(l4csum, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
+ ? "l4csum(y)" : "l4csum(n)");
+ else
+ sprintf(l4csum, "%s ", "l4csum(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
+ sprintf(ipfrag, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
+ ? "ipfrag(y)" : "ipfrag(n)");
+ else
+ sprintf(ipfrag, "%s ", "ipfrag(x)");
+ FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
+ tcp, tcpudp, ip4csum, l4csum, ipfrag);
+
+ for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
+ mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
+ while (mbyte && !gp->layer[i].mask[mbyte])
+ mbyte--;
+ if (mbyte == 0)
+ continue;
+
+ bp = buf;
+ for (j = 0; j <= mbyte; j++) {
+ sprintf(bp, "%02x",
+ gp->layer[i].mask[j]);
+ bp += 2;
+ }
+ *bp = '\0';
+ FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
+ bp = buf;
+ for (j = 0; j <= mbyte; j++) {
+ sprintf(bp, "%02x",
+ gp->layer[i].val[j]);
+ bp += 2;
+ }
+ *bp = '\0';
+ FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf);
+ }
+ break;
+ default:
+ FLOW_LOG(INFO, "FILTER UNKNOWN\n");
+ break;
+ }
+}
+
+/* Debug function to dump internal NIC flow structures. */
+static void
+enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
+{
+ enic_dump_filter(filt);
+ enic_dump_actions(ea);
+}
+
+
+/**
+ * Internal flow parse/validate function.
+ *
+ * @param dev[in]
+ * This device pointer.
+ * @param pattern[in]
+ * @param actions[in]
+ * @param error[out]
+ * @param enic_filter[out]
+ * Internal NIC filter structure pointer.
+ * @param enic_action[out]
+ * Internal NIC action structure pointer.
+ */
+static int
+enic_flow_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attrs,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct filter_v2 *enic_filter,
+ struct filter_action_v2 *enic_action)
+{
+ unsigned int ret = 0;
+ struct enic *enic = pmd_priv(dev);
+ const struct enic_filter_cap *enic_filter_cap;
+ const struct enic_action_cap *enic_action_cap;
+ const struct rte_flow_action *action;
+
+ FLOW_TRACE();
+
+ memset(enic_filter, 0, sizeof(*enic_filter));
+ memset(enic_action, 0, sizeof(*enic_action));
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No pattern specified");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "No action specified");
+ return -rte_errno;
+ }
+
+ if (attrs) {
+ if (attrs->group) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "priority groups are not supported");
+ return -rte_errno;
+ } else if (attrs->priority) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priorities are not supported");
+ return -rte_errno;
+ } else if (attrs->egress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ return -rte_errno;
+ } else if (attrs->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ return -rte_errno;
+ } else if (!attrs->ingress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "only ingress is supported");
+ return -rte_errno;
+ }
+
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "No attribute specified");
+ return -rte_errno;
+ }
+
+ /* Verify Actions. */
+ enic_action_cap = enic_get_action_cap(enic);
+ for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
+ action++) {
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+ else if (!enic_match_action(action, enic_action_cap->actions))
+ break;
+ }
+ if (action->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "Invalid action.");
+ return -rte_errno;
+ }
+ ret = enic_action_cap->copy_fn(actions, enic_action);
+ if (ret) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unsupported action.");
+ return -rte_errno;
+ }
+
+ /* Verify Flow items. If copying the filter from flow format to enic
+ * format fails, the flow is not supported
+ */
+ enic_filter_cap = enic_get_filter_cap(enic);
+ if (enic_filter_cap == NULL) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Flow API not available");
+ return -rte_errno;
+ }
+ enic_filter->type = enic->flow_filter_mode;
+ ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
+ enic_filter, error);
+ return ret;
+}
+
+/**
+ * Push filter/action to the NIC.
+ *
+ * @param enic[in]
+ * Device structure pointer.
+ * @param enic_filter[in]
+ * Internal NIC filter structure pointer.
+ * @param enic_action[in]
+ * Internal NIC action structure pointer.
+ * @param error[out]
+ */
+static struct rte_flow *
+enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
+ struct filter_action_v2 *enic_action,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *flow;
+ int ret;
+ u16 entry;
+
+ FLOW_TRACE();
+
+ flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate flow memory");
+ return NULL;
+ }
+
+ /* entry[in] is the queue id, entry[out] is the filter Id for delete */
+ entry = enic_action->rq_idx;
+ ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
+ enic_action);
+ if (!ret) {
+ flow->enic_filter_id = entry;
+ flow->enic_filter = *enic_filter;
+ } else {
+ rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "vnic_dev_classifier error");
+ rte_free(flow);
+ return NULL;
+ }
+ return flow;
+}
+
+/**
+ * Remove filter/action from the NIC.
+ *
+ * @param enic[in]
+ * Device structure pointer.
+ * @param filter_id[in]
+ * Id of NIC filter.
+ * @param enic_action[in]
+ * Internal NIC action structure pointer.
+ * @param error[out]
+ */
+static int
+enic_flow_del_filter(struct enic *enic, u16 filter_id,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ FLOW_TRACE();
+
+ ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
+ if (!ret)
+ rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "vnic_dev_classifier failed");
+ return ret;
+}
+
+/*
+ * The following functions are callbacks for Generic flow API.
+ */
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+static int
+enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct filter_v2 enic_filter;
+ struct filter_action_v2 enic_action;
+ int ret;
+
+ FLOW_TRACE();
+
+ ret = enic_flow_parse(dev, attrs, pattern, actions, error,
+ &enic_filter, &enic_action);
+ if (!ret)
+ enic_dump_flow(&enic_action, &enic_filter);
+ return ret;
+}
+
+/**
+ * Create a flow supported by the NIC.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+static struct rte_flow *
+enic_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attrs,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct filter_v2 enic_filter;
+ struct filter_action_v2 enic_action;
+ struct rte_flow *flow;
+ struct enic *enic = pmd_priv(dev);
+
+ FLOW_TRACE();
+
+ ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
+ &enic_action);
+ if (ret < 0)
+ return NULL;
+
+ rte_spinlock_lock(&enic->flows_lock);
+ flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
+ error);
+ if (flow)
+ LIST_INSERT_HEAD(&enic->flows, flow, next);
+ rte_spinlock_unlock(&enic->flows_lock);
+
+ return flow;
+}
+
+/**
+ * Destroy a flow supported by the NIC.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+static int
+enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ __rte_unused struct rte_flow_error *error)
+{
+ struct enic *enic = pmd_priv(dev);
+
+ FLOW_TRACE();
+
+ rte_spinlock_lock(&enic->flows_lock);
+ enic_flow_del_filter(enic, flow->enic_filter_id, error);
+ LIST_REMOVE(flow, next);
+ rte_spinlock_unlock(&enic->flows_lock);
+ return 0;
+}
+
+/**
+ * Flush all flows on the device.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+static int
+enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct rte_flow *flow;
+ struct enic *enic = pmd_priv(dev);
+
+ FLOW_TRACE();
+
+ rte_spinlock_lock(&enic->flows_lock);
+
+ while (!LIST_EMPTY(&enic->flows)) {
+ flow = LIST_FIRST(&enic->flows);
+ enic_flow_del_filter(enic, flow->enic_filter_id, error);
+ LIST_REMOVE(flow, next);
+ }
+ rte_spinlock_unlock(&enic->flows_lock);
+ return 0;
+}
+
+/**
+ * Flow callback registration.
+ *
+ * @see rte_flow_ops
+ */
+const struct rte_flow_ops enic_flow_ops = {
+ .validate = enic_flow_validate,
+ .create = enic_flow_create,
+ .destroy = enic_flow_destroy,
+ .flush = enic_flow_flush,
+};
diff --git a/src/spdk/dpdk/drivers/net/enic/enic_main.c b/src/spdk/dpdk/drivers/net/enic/enic_main.c
new file mode 100644
index 00000000..fd940c58
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/enic_main.c
@@ -0,0 +1,1772 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <stdio.h>
+
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <libgen.h>
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+#include <rte_ethdev_driver.h>
+
+#include "enic_compat.h"
+#include "enic.h"
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_nic.h"
+
+static inline int enic_is_sriov_vf(struct enic *enic)
+{
+ return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
+}
+
+static int is_zero_addr(uint8_t *addr)
+{
+ return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
+}
+
+static int is_mcast_addr(uint8_t *addr)
+{
+ return addr[0] & 1;
+}
+
+static int is_eth_addr_valid(uint8_t *addr)
+{
+ return !is_mcast_addr(addr) && !is_zero_addr(addr);
+}
+
+static void
+enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
+{
+ uint16_t i;
+
+ if (!rq || !rq->mbuf_ring) {
+ dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < rq->ring.desc_count; i++) {
+ if (rq->mbuf_ring[i]) {
+ rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
+ rq->mbuf_ring[i] = NULL;
+ }
+ }
+}
+
+static void enic_free_wq_buf(struct rte_mbuf **buf)
+{
+ struct rte_mbuf *mbuf = *buf;
+
+ rte_pktmbuf_free_seg(mbuf);
+ *buf = NULL;
+}
+
+static void enic_log_q_error(struct enic *enic)
+{
+ unsigned int i;
+ u32 error_status;
+
+ for (i = 0; i < enic->wq_count; i++) {
+ error_status = vnic_wq_error_status(&enic->wq[i]);
+ if (error_status)
+ dev_err(enic, "WQ[%d] error_status %d\n", i,
+ error_status);
+ }
+
+ for (i = 0; i < enic_vnic_rq_count(enic); i++) {
+ if (!enic->rq[i].in_use)
+ continue;
+ error_status = vnic_rq_error_status(&enic->rq[i]);
+ if (error_status)
+ dev_err(enic, "RQ[%d] error_status %d\n", i,
+ error_status);
+ }
+}
+
+static void enic_clear_soft_stats(struct enic *enic)
+{
+ struct enic_soft_stats *soft_stats = &enic->soft_stats;
+ rte_atomic64_clear(&soft_stats->rx_nombuf);
+ rte_atomic64_clear(&soft_stats->rx_packet_errors);
+ rte_atomic64_clear(&soft_stats->tx_oversized);
+}
+
+static void enic_init_soft_stats(struct enic *enic)
+{
+ struct enic_soft_stats *soft_stats = &enic->soft_stats;
+ rte_atomic64_init(&soft_stats->rx_nombuf);
+ rte_atomic64_init(&soft_stats->rx_packet_errors);
+ rte_atomic64_init(&soft_stats->tx_oversized);
+ enic_clear_soft_stats(enic);
+}
+
+void enic_dev_stats_clear(struct enic *enic)
+{
+ if (vnic_dev_stats_clear(enic->vdev))
+ dev_err(enic, "Error in clearing stats\n");
+ enic_clear_soft_stats(enic);
+}
+
+int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
+{
+ struct vnic_stats *stats;
+ struct enic_soft_stats *soft_stats = &enic->soft_stats;
+ int64_t rx_truncated;
+ uint64_t rx_packet_errors;
+ int ret = vnic_dev_stats_dump(enic->vdev, &stats);
+
+ if (ret) {
+ dev_err(enic, "Error in getting stats\n");
+ return ret;
+ }
+
+ /* The number of truncated packets can only be calculated by
+ * subtracting a hardware counter from error packets received by
+ * the driver. Note: this causes transient inaccuracies in the
+ * ipackets count. Also, the length of truncated packets are
+ * counted in ibytes even though truncated packets are dropped
+ * which can make ibytes be slightly higher than it should be.
+ */
+ rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
+ rx_truncated = rx_packet_errors - stats->rx.rx_errors;
+
+ r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
+ r_stats->opackets = stats->tx.tx_frames_ok;
+
+ r_stats->ibytes = stats->rx.rx_bytes_ok;
+ r_stats->obytes = stats->tx.tx_bytes_ok;
+
+ r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
+ r_stats->oerrors = stats->tx.tx_errors
+ + rte_atomic64_read(&soft_stats->tx_oversized);
+
+ r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
+
+ r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
+ return 0;
+}
+
+int enic_del_mac_address(struct enic *enic, int mac_index)
+{
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
+
+ return vnic_dev_del_addr(enic->vdev, mac_addr);
+}
+
+int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
+{
+ int err;
+
+ if (!is_eth_addr_valid(mac_addr)) {
+ dev_err(enic, "invalid mac address\n");
+ return -EINVAL;
+ }
+
+ err = vnic_dev_add_addr(enic->vdev, mac_addr);
+ if (err)
+ dev_err(enic, "add mac addr failed\n");
+ return err;
+}
+
+static void
+enic_free_rq_buf(struct rte_mbuf **mbuf)
+{
+ if (*mbuf == NULL)
+ return;
+
+ rte_pktmbuf_free(*mbuf);
+ *mbuf = NULL;
+}
+
+void enic_init_vnic_resources(struct enic *enic)
+{
+ unsigned int error_interrupt_enable = 1;
+ unsigned int error_interrupt_offset = 0;
+ unsigned int rxq_interrupt_enable = 0;
+ unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
+ unsigned int index = 0;
+ unsigned int cq_idx;
+ struct vnic_rq *data_rq;
+
+ if (enic->rte_dev->data->dev_conf.intr_conf.rxq)
+ rxq_interrupt_enable = 1;
+
+ for (index = 0; index < enic->rq_count; index++) {
+ cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
+
+ vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
+ cq_idx,
+ error_interrupt_enable,
+ error_interrupt_offset);
+
+ data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
+ if (data_rq->in_use)
+ vnic_rq_init(data_rq,
+ cq_idx,
+ error_interrupt_enable,
+ error_interrupt_offset);
+
+ vnic_cq_init(&enic->cq[cq_idx],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ rxq_interrupt_enable,
+ 1 /* cq_entry_enable */,
+ 0 /* cq_message_enable */,
+ rxq_interrupt_offset,
+ 0 /* cq_message_addr */);
+ if (rxq_interrupt_enable)
+ rxq_interrupt_offset++;
+ }
+
+ for (index = 0; index < enic->wq_count; index++) {
+ vnic_wq_init(&enic->wq[index],
+ enic_cq_wq(enic, index),
+ error_interrupt_enable,
+ error_interrupt_offset);
+ /* Compute unsupported ol flags for enic_prep_pkts() */
+ enic->wq[index].tx_offload_notsup_mask =
+ PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
+
+ cq_idx = enic_cq_wq(enic, index);
+ vnic_cq_init(&enic->cq[cq_idx],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ 0 /* interrupt_enable */,
+ 0 /* cq_entry_enable */,
+ 1 /* cq_message_enable */,
+ 0 /* interrupt offset */,
+ (u64)enic->wq[index].cqmsg_rz->iova);
+ }
+
+ for (index = 0; index < enic->intr_count; index++) {
+ vnic_intr_init(&enic->intr[index],
+ enic->config.intr_timer_usec,
+ enic->config.intr_timer_type,
+ /*mask_on_assertion*/1);
+ }
+}
+
+
+static int
+enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
+{
+ struct rte_mbuf *mb;
+ struct rq_enet_desc *rqd = rq->ring.descs;
+ unsigned i;
+ dma_addr_t dma_addr;
+ uint32_t max_rx_pkt_len;
+ uint16_t rq_buf_len;
+
+ if (!rq->in_use)
+ return 0;
+
+ dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
+ rq->ring.desc_count);
+
+ /*
+ * If *not* using scatter and the mbuf size is greater than the
+ * requested max packet size (max_rx_pkt_len), then reduce the
+ * posted buffer size to max_rx_pkt_len. HW still receives packets
+ * larger than max_rx_pkt_len, but they will be truncated, which we
+ * drop in the rx handler. Not ideal, but better than returning
+ * large packets when the user is not expecting them.
+ */
+ max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
+ if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
+ rq_buf_len = max_rx_pkt_len;
+ for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
+ mb = rte_mbuf_raw_alloc(rq->mp);
+ if (mb == NULL) {
+ dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
+ (unsigned)rq->index);
+ return -ENOMEM;
+ }
+
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ dma_addr = (dma_addr_t)(mb->buf_iova
+ + RTE_PKTMBUF_HEADROOM);
+ rq_enet_desc_enc(rqd, dma_addr,
+ (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
+ : RQ_ENET_TYPE_NOT_SOP),
+ rq_buf_len);
+ rq->mbuf_ring[i] = mb;
+ }
+ /*
+ * Do not post the buffers to the NIC until we enable the RQ via
+ * enic_start_rq().
+ */
+ rq->need_initial_post = true;
+ /* Initialize fetch index while RQ is disabled */
+ iowrite32(0, &rq->ctrl->fetch_index);
+ return 0;
+}
+
+/*
+ * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has
+ * allocated the buffers and filled the RQ descriptor ring. Just need to push
+ * the post index to the NIC.
+ */
+static void
+enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq)
+{
+ if (!rq->in_use || !rq->need_initial_post)
+ return;
+
+ /* make sure all prior writes are complete before doing the PIO write */
+ rte_rmb();
+
+ /* Post all but the last buffer to VIC. */
+ rq->posted_index = rq->ring.desc_count - 1;
+
+ rq->rx_nb_hold = 0;
+
+ dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
+ enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
+ iowrite32(rq->posted_index, &rq->ctrl->posted_index);
+ rte_rmb();
+ rq->need_initial_post = false;
+}
+
+static void *
+enic_alloc_consistent(void *priv, size_t size,
+ dma_addr_t *dma_handle, u8 *name)
+{
+ void *vaddr;
+ const struct rte_memzone *rz;
+ *dma_handle = 0;
+ struct enic *enic = (struct enic *)priv;
+ struct enic_memzone_entry *mze;
+
+ rz = rte_memzone_reserve_aligned((const char *)name, size,
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
+ if (!rz) {
+ pr_err("%s : Failed to allocate memory requested for %s\n",
+ __func__, name);
+ return NULL;
+ }
+
+ vaddr = rz->addr;
+ *dma_handle = (dma_addr_t)rz->iova;
+
+ mze = rte_malloc("enic memzone entry",
+ sizeof(struct enic_memzone_entry), 0);
+
+ if (!mze) {
+ pr_err("%s : Failed to allocate memory for memzone list\n",
+ __func__);
+ rte_memzone_free(rz);
+ return NULL;
+ }
+
+ mze->rz = rz;
+
+ rte_spinlock_lock(&enic->memzone_list_lock);
+ LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
+ rte_spinlock_unlock(&enic->memzone_list_lock);
+
+ return vaddr;
+}
+
+static void
+enic_free_consistent(void *priv,
+ __rte_unused size_t size,
+ void *vaddr,
+ dma_addr_t dma_handle)
+{
+ struct enic_memzone_entry *mze;
+ struct enic *enic = (struct enic *)priv;
+
+ rte_spinlock_lock(&enic->memzone_list_lock);
+ LIST_FOREACH(mze, &enic->memzone_list, entries) {
+ if (mze->rz->addr == vaddr &&
+ mze->rz->iova == dma_handle)
+ break;
+ }
+ if (mze == NULL) {
+ rte_spinlock_unlock(&enic->memzone_list_lock);
+ dev_warning(enic,
+ "Tried to free memory, but couldn't find it in the memzone list\n");
+ return;
+ }
+ LIST_REMOVE(mze, entries);
+ rte_spinlock_unlock(&enic->memzone_list_lock);
+ rte_memzone_free(mze->rz);
+ rte_free(mze);
+}
+
+int enic_link_update(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ struct rte_eth_link link;
+
+ memset(&link, 0, sizeof(link));
+ link.link_status = enic_get_link_status(enic);
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = vnic_dev_port_speed(enic->vdev);
+
+ return rte_eth_linkstatus_set(eth_dev, &link);
+}
+
+static void
+enic_intr_handler(void *arg)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
+ struct enic *enic = pmd_priv(dev);
+
+ vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
+
+ enic_link_update(enic);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ enic_log_q_error(enic);
+}
+
+static int enic_rxq_intr_init(struct enic *enic)
+{
+ struct rte_intr_handle *intr_handle;
+ uint32_t rxq_intr_count, i;
+ int err;
+
+ intr_handle = enic->rte_dev->intr_handle;
+ if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
+ return 0;
+ /*
+ * Rx queue interrupts only work when we have MSI-X interrupts,
+ * one per queue. Sharing one interrupt is technically
+ * possible with VIC, but it is not worth the complications it brings.
+ */
+ if (!rte_intr_cap_multiple(intr_handle)) {
+ dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
+ " (vfio-pci driver)\n");
+ return -ENOTSUP;
+ }
+ rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
+ err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
+ if (err) {
+ dev_err(enic, "Failed to enable event fds for Rx queue"
+ " interrupts\n");
+ return err;
+ }
+ intr_handle->intr_vec = rte_zmalloc("enic_intr_vec",
+ rxq_intr_count * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ dev_err(enic, "Failed to allocate intr_vec\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < rxq_intr_count; i++)
+ intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET;
+ return 0;
+}
+
+static void enic_rxq_intr_deinit(struct enic *enic)
+{
+ struct rte_intr_handle *intr_handle;
+
+ intr_handle = enic->rte_dev->intr_handle;
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx)
+{
+ struct wq_enet_desc *desc;
+ struct vnic_wq *wq;
+ unsigned int i;
+
+ /*
+ * Fill WQ descriptor fields that never change. Every descriptor is
+ * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH
+ * descriptors (i.e. request one completion update every 32 packets).
+ */
+ wq = &enic->wq[queue_idx];
+ desc = (struct wq_enet_desc *)wq->ring.descs;
+ for (i = 0; i < wq->ring.desc_count; i++, desc++) {
+ desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT;
+ if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1)
+ desc->header_length_flags |=
+ (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT);
+ }
+}
+
+static void pick_rx_handler(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev;
+
+ /* Use the non-scatter, simplified RX handler if possible. */
+ eth_dev = enic->rte_dev;
+ if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) {
+ PMD_INIT_LOG(DEBUG, " use the non-scatter Rx handler");
+ eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts;
+ } else {
+ PMD_INIT_LOG(DEBUG, " use the normal Rx handler");
+ eth_dev->rx_pkt_burst = &enic_recv_pkts;
+ }
+}
+
+int enic_enable(struct enic *enic)
+{
+ unsigned int index;
+ int err;
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+ eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
+ eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ /* vnic notification of link status has already been turned on in
+ * enic_dev_init() which is called during probe time. Here we are
+ * just turning on interrupt vector 0 if needed.
+ */
+ if (eth_dev->data->dev_conf.intr_conf.lsc)
+ vnic_dev_notify_set(enic->vdev, 0);
+
+ err = enic_rxq_intr_init(enic);
+ if (err)
+ return err;
+ if (enic_clsf_init(enic))
+ dev_warning(enic, "Init of hash table for clsf failed."\
+ "Flow director feature will not work\n");
+
+ for (index = 0; index < enic->rq_count; index++) {
+ err = enic_alloc_rx_queue_mbufs(enic,
+ &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
+ if (err) {
+ dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
+ return err;
+ }
+ err = enic_alloc_rx_queue_mbufs(enic,
+ &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
+ if (err) {
+ /* release the allocated mbufs for the sop rq*/
+ enic_rxmbuf_queue_release(enic,
+ &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
+
+ dev_err(enic, "Failed to alloc data RX queue mbufs\n");
+ return err;
+ }
+ }
+
+ /*
+ * Use the simple TX handler if possible. All offloads must be
+ * disabled.
+ */
+ if (eth_dev->data->dev_conf.txmode.offloads == 0) {
+ PMD_INIT_LOG(DEBUG, " use the simple tx handler");
+ eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
+ for (index = 0; index < enic->wq_count; index++)
+ enic_prep_wq_for_simple_tx(enic, index);
+ } else {
+ PMD_INIT_LOG(DEBUG, " use the default tx handler");
+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+ }
+
+ pick_rx_handler(enic);
+
+ for (index = 0; index < enic->wq_count; index++)
+ enic_start_wq(enic, index);
+ for (index = 0; index < enic->rq_count; index++)
+ enic_start_rq(enic, index);
+
+ vnic_dev_add_addr(enic->vdev, enic->mac_addr);
+
+ vnic_dev_enable_wait(enic->vdev);
+
+ /* Register and enable error interrupt */
+ rte_intr_callback_register(&(enic->pdev->intr_handle),
+ enic_intr_handler, (void *)enic->rte_dev);
+
+ rte_intr_enable(&(enic->pdev->intr_handle));
+ /* Unmask LSC interrupt */
+ vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
+
+ return 0;
+}
+
+int enic_alloc_intr_resources(struct enic *enic)
+{
+ int err;
+ unsigned int i;
+
+ dev_info(enic, "vNIC resources used: "\
+ "wq %d rq %d cq %d intr %d\n",
+ enic->wq_count, enic_vnic_rq_count(enic),
+ enic->cq_count, enic->intr_count);
+
+ for (i = 0; i < enic->intr_count; i++) {
+ err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
+ if (err) {
+ enic_free_vnic_resources(enic);
+ return err;
+ }
+ }
+ return 0;
+}
+
+void enic_free_rq(void *rxq)
+{
+ struct vnic_rq *rq_sop, *rq_data;
+ struct enic *enic;
+
+ if (rxq == NULL)
+ return;
+
+ rq_sop = (struct vnic_rq *)rxq;
+ enic = vnic_dev_priv(rq_sop->vdev);
+ rq_data = &enic->rq[rq_sop->data_queue_idx];
+
+ if (rq_sop->free_mbufs) {
+ struct rte_mbuf **mb;
+ int i;
+
+ mb = rq_sop->free_mbufs;
+ for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs;
+ i < ENIC_RX_BURST_MAX; i++)
+ rte_pktmbuf_free(mb[i]);
+ rte_free(rq_sop->free_mbufs);
+ rq_sop->free_mbufs = NULL;
+ rq_sop->num_free_mbufs = 0;
+ }
+
+ enic_rxmbuf_queue_release(enic, rq_sop);
+ if (rq_data->in_use)
+ enic_rxmbuf_queue_release(enic, rq_data);
+
+ rte_free(rq_sop->mbuf_ring);
+ if (rq_data->in_use)
+ rte_free(rq_data->mbuf_ring);
+
+ rq_sop->mbuf_ring = NULL;
+ rq_data->mbuf_ring = NULL;
+
+ vnic_rq_free(rq_sop);
+ if (rq_data->in_use)
+ vnic_rq_free(rq_data);
+
+ vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
+
+ rq_sop->in_use = 0;
+ rq_data->in_use = 0;
+}
+
+void enic_start_wq(struct enic *enic, uint16_t queue_idx)
+{
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ vnic_wq_enable(&enic->wq[queue_idx]);
+ eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
+}
+
+int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
+{
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ int ret;
+
+ ret = vnic_wq_disable(&enic->wq[queue_idx]);
+ if (ret)
+ return ret;
+
+ eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+void enic_start_rq(struct enic *enic, uint16_t queue_idx)
+{
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
+ rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
+ rq_data = &enic->rq[rq_sop->data_queue_idx];
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+ if (rq_data->in_use) {
+ vnic_rq_enable(rq_data);
+ enic_initial_post_rx(enic, rq_data);
+ }
+ rte_mb();
+ vnic_rq_enable(rq_sop);
+ enic_initial_post_rx(enic, rq_sop);
+ eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
+}
+
+int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
+{
+ int ret1 = 0, ret2 = 0;
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
+ rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
+ rq_data = &enic->rq[rq_sop->data_queue_idx];
+
+ ret2 = vnic_rq_disable(rq_sop);
+ rte_mb();
+ if (rq_data->in_use)
+ ret1 = vnic_rq_disable(rq_data);
+
+ if (ret2)
+ return ret2;
+ else if (ret1)
+ return ret1;
+
+ eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, struct rte_mempool *mp,
+ uint16_t nb_desc, uint16_t free_thresh)
+{
+ int rc;
+ uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
+ uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
+ struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
+ struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
+ unsigned int mbuf_size, mbufs_per_pkt;
+ unsigned int nb_sop_desc, nb_data_desc;
+ uint16_t min_sop, max_sop, min_data, max_data;
+ uint32_t max_rx_pkt_len;
+
+ rq_sop->is_sop = 1;
+ rq_sop->data_queue_idx = data_queue_idx;
+ rq_data->is_sop = 0;
+ rq_data->data_queue_idx = 0;
+ rq_sop->socket_id = socket_id;
+ rq_sop->mp = mp;
+ rq_data->socket_id = socket_id;
+ rq_data->mp = mp;
+ rq_sop->in_use = 1;
+ rq_sop->rx_free_thresh = free_thresh;
+ rq_data->rx_free_thresh = free_thresh;
+ dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
+ free_thresh);
+
+ mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
+ RTE_PKTMBUF_HEADROOM);
+ /* max_rx_pkt_len includes the ethernet header and CRC. */
+ max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ if (enic->rte_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SCATTER) {
+ dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
+ /* ceil((max pkt len)/mbuf_size) */
+ mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
+ } else {
+ dev_info(enic, "Scatter rx mode disabled\n");
+ mbufs_per_pkt = 1;
+ if (max_rx_pkt_len > mbuf_size) {
+ dev_warning(enic, "The maximum Rx packet size (%u) is"
+ " larger than the mbuf size (%u), and"
+ " scatter is disabled. Larger packets will"
+ " be truncated.\n",
+ max_rx_pkt_len, mbuf_size);
+ }
+ }
+
+ if (mbufs_per_pkt > 1) {
+ dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
+ rq_sop->data_queue_enable = 1;
+ rq_data->in_use = 1;
+ /*
+ * HW does not directly support rxmode.max_rx_pkt_len. HW always
+ * receives packet sizes up to the "max" MTU.
+ * If not using scatter, we can achieve the effect of dropping
+ * larger packets by reducing the size of posted buffers.
+ * See enic_alloc_rx_queue_mbufs().
+ */
+ if (max_rx_pkt_len <
+ enic_mtu_to_max_rx_pktlen(enic->max_mtu)) {
+ dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
+ " when scatter rx mode is in use.\n");
+ }
+ } else {
+ dev_info(enic, "Rq %u Scatter rx mode not being used\n",
+ queue_idx);
+ rq_sop->data_queue_enable = 0;
+ rq_data->in_use = 0;
+ }
+
+ /* number of descriptors have to be a multiple of 32 */
+ nb_sop_desc = (nb_desc / mbufs_per_pkt) & ENIC_ALIGN_DESCS_MASK;
+ nb_data_desc = (nb_desc - nb_sop_desc) & ENIC_ALIGN_DESCS_MASK;
+
+ rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
+ rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
+
+ if (mbufs_per_pkt > 1) {
+ min_sop = ENIC_RX_BURST_MAX;
+ max_sop = ((enic->config.rq_desc_count /
+ (mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK);
+ min_data = min_sop * (mbufs_per_pkt - 1);
+ max_data = enic->config.rq_desc_count;
+ } else {
+ min_sop = ENIC_RX_BURST_MAX;
+ max_sop = enic->config.rq_desc_count;
+ min_data = 0;
+ max_data = 0;
+ }
+
+ if (nb_desc < (min_sop + min_data)) {
+ dev_warning(enic,
+ "Number of rx descs too low, adjusting to minimum\n");
+ nb_sop_desc = min_sop;
+ nb_data_desc = min_data;
+ } else if (nb_desc > (max_sop + max_data)) {
+ dev_warning(enic,
+ "Number of rx_descs too high, adjusting to maximum\n");
+ nb_sop_desc = max_sop;
+ nb_data_desc = max_data;
+ }
+ if (mbufs_per_pkt > 1) {
+ dev_info(enic, "For max packet size %u and mbuf size %u valid"
+ " rx descriptor range is %u to %u\n",
+ max_rx_pkt_len, mbuf_size, min_sop + min_data,
+ max_sop + max_data);
+ }
+ dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
+ nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
+
+ /* Allocate sop queue resources */
+ rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
+ nb_sop_desc, sizeof(struct rq_enet_desc));
+ if (rc) {
+ dev_err(enic, "error in allocation of sop rq\n");
+ goto err_exit;
+ }
+ nb_sop_desc = rq_sop->ring.desc_count;
+
+ if (rq_data->in_use) {
+ /* Allocate data queue resources */
+ rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
+ nb_data_desc,
+ sizeof(struct rq_enet_desc));
+ if (rc) {
+ dev_err(enic, "error in allocation of data rq\n");
+ goto err_free_rq_sop;
+ }
+ nb_data_desc = rq_data->ring.desc_count;
+ }
+ rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
+ socket_id, nb_sop_desc + nb_data_desc,
+ sizeof(struct cq_enet_rq_desc));
+ if (rc) {
+ dev_err(enic, "error in allocation of cq for rq\n");
+ goto err_free_rq_data;
+ }
+
+ /* Allocate the mbuf rings */
+ rq_sop->mbuf_ring = (struct rte_mbuf **)
+ rte_zmalloc_socket("rq->mbuf_ring",
+ sizeof(struct rte_mbuf *) * nb_sop_desc,
+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+ if (rq_sop->mbuf_ring == NULL)
+ goto err_free_cq;
+
+ if (rq_data->in_use) {
+ rq_data->mbuf_ring = (struct rte_mbuf **)
+ rte_zmalloc_socket("rq->mbuf_ring",
+ sizeof(struct rte_mbuf *) * nb_data_desc,
+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+ if (rq_data->mbuf_ring == NULL)
+ goto err_free_sop_mbuf;
+ }
+
+ rq_sop->free_mbufs = (struct rte_mbuf **)
+ rte_zmalloc_socket("rq->free_mbufs",
+ sizeof(struct rte_mbuf *) *
+ ENIC_RX_BURST_MAX,
+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+ if (rq_sop->free_mbufs == NULL)
+ goto err_free_data_mbuf;
+ rq_sop->num_free_mbufs = 0;
+
+ rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
+
+ return 0;
+
+err_free_data_mbuf:
+ rte_free(rq_data->mbuf_ring);
+err_free_sop_mbuf:
+ rte_free(rq_sop->mbuf_ring);
+err_free_cq:
+ /* cleanup on error */
+ vnic_cq_free(&enic->cq[queue_idx]);
+err_free_rq_data:
+ if (rq_data->in_use)
+ vnic_rq_free(rq_data);
+err_free_rq_sop:
+ vnic_rq_free(rq_sop);
+err_exit:
+ return -ENOMEM;
+}
+
+void enic_free_wq(void *txq)
+{
+ struct vnic_wq *wq;
+ struct enic *enic;
+
+ if (txq == NULL)
+ return;
+
+ wq = (struct vnic_wq *)txq;
+ enic = vnic_dev_priv(wq->vdev);
+ rte_memzone_free(wq->cqmsg_rz);
+ vnic_wq_free(wq);
+ vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
+}
+
+int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, uint16_t nb_desc)
+{
+ int err;
+ struct vnic_wq *wq = &enic->wq[queue_idx];
+ unsigned int cq_index = enic_cq_wq(enic, queue_idx);
+ char name[NAME_MAX];
+ static int instance;
+
+ wq->socket_id = socket_id;
+ /*
+ * rte_eth_tx_queue_setup() checks min, max, and alignment. So just
+ * print an info message for diagnostics.
+ */
+ dev_info(enic, "TX Queues - effective number of descs:%d\n", nb_desc);
+
+ /* Allocate queue resources */
+ err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
+ nb_desc,
+ sizeof(struct wq_enet_desc));
+ if (err) {
+ dev_err(enic, "error in allocation of wq\n");
+ return err;
+ }
+
+ err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
+ socket_id, nb_desc,
+ sizeof(struct cq_enet_wq_desc));
+ if (err) {
+ vnic_wq_free(wq);
+ dev_err(enic, "error in allocation of cq for wq\n");
+ }
+
+ /* setup up CQ message */
+ snprintf((char *)name, sizeof(name),
+ "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
+ instance++);
+
+ wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
+ sizeof(uint32_t), SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
+ if (!wq->cqmsg_rz)
+ return -ENOMEM;
+
+ return err;
+}
+
+int enic_disable(struct enic *enic)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < enic->intr_count; i++) {
+ vnic_intr_mask(&enic->intr[i]);
+ (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
+ }
+ enic_rxq_intr_deinit(enic);
+ rte_intr_disable(&enic->pdev->intr_handle);
+ rte_intr_callback_unregister(&enic->pdev->intr_handle,
+ enic_intr_handler,
+ (void *)enic->rte_dev);
+
+ vnic_dev_disable(enic->vdev);
+
+ enic_clsf_destroy(enic);
+
+ if (!enic_is_sriov_vf(enic))
+ vnic_dev_del_addr(enic->vdev, enic->mac_addr);
+
+ for (i = 0; i < enic->wq_count; i++) {
+ err = vnic_wq_disable(&enic->wq[i]);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < enic_vnic_rq_count(enic); i++) {
+ if (enic->rq[i].in_use) {
+ err = vnic_rq_disable(&enic->rq[i]);
+ if (err)
+ return err;
+ }
+ }
+
+ /* If we were using interrupts, set the interrupt vector to -1
+ * to disable interrupts. We are not disabling link notifcations,
+ * though, as we want the polling of link status to continue working.
+ */
+ if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
+ vnic_dev_notify_set(enic->vdev, -1);
+
+ vnic_dev_set_reset_flag(enic->vdev, 1);
+
+ for (i = 0; i < enic->wq_count; i++)
+ vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
+
+ for (i = 0; i < enic_vnic_rq_count(enic); i++)
+ if (enic->rq[i].in_use)
+ vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ for (i = 0; i < enic->cq_count; i++)
+ vnic_cq_clean(&enic->cq[i]);
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_clean(&enic->intr[i]);
+
+ return 0;
+}
+
+static int enic_dev_wait(struct vnic_dev *vdev,
+ int (*start)(struct vnic_dev *, int),
+ int (*finished)(struct vnic_dev *, int *),
+ int arg)
+{
+ int done;
+ int err;
+ int i;
+
+ err = start(vdev, arg);
+ if (err)
+ return err;
+
+ /* Wait for func to complete...2 seconds max */
+ for (i = 0; i < 2000; i++) {
+ err = finished(vdev, &done);
+ if (err)
+ return err;
+ if (done)
+ return 0;
+ usleep(1000);
+ }
+ return -ETIMEDOUT;
+}
+
+static int enic_dev_open(struct enic *enic)
+{
+ int err;
+ int flags = CMD_OPENF_IG_DESCCACHE;
+
+ err = enic_dev_wait(enic->vdev, vnic_dev_open,
+ vnic_dev_open_done, flags);
+ if (err)
+ dev_err(enic_get_dev(enic),
+ "vNIC device open failed, err %d\n", err);
+
+ return err;
+}
+
+static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
+{
+ dma_addr_t rss_key_buf_pa;
+ union vnic_rss_key *rss_key_buf_va = NULL;
+ int err, i;
+ u8 name[NAME_MAX];
+
+ RTE_ASSERT(user_key != NULL);
+ snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
+ rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
+ &rss_key_buf_pa, name);
+ if (!rss_key_buf_va)
+ return -ENOMEM;
+
+ for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++)
+ rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i];
+
+ err = enic_set_rss_key(enic,
+ rss_key_buf_pa,
+ sizeof(union vnic_rss_key));
+
+ /* Save for later queries */
+ if (!err) {
+ rte_memcpy(&enic->rss_key, rss_key_buf_va,
+ sizeof(union vnic_rss_key));
+ }
+ enic_free_consistent(enic, sizeof(union vnic_rss_key),
+ rss_key_buf_va, rss_key_buf_pa);
+
+ return err;
+}
+
+int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
+{
+ dma_addr_t rss_cpu_buf_pa;
+ union vnic_rss_cpu *rss_cpu_buf_va = NULL;
+ int err;
+ u8 name[NAME_MAX];
+
+ snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
+ rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
+ &rss_cpu_buf_pa, name);
+ if (!rss_cpu_buf_va)
+ return -ENOMEM;
+
+ rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu));
+
+ err = enic_set_rss_cpu(enic,
+ rss_cpu_buf_pa,
+ sizeof(union vnic_rss_cpu));
+
+ enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
+ rss_cpu_buf_va, rss_cpu_buf_pa);
+
+ /* Save for later queries */
+ if (!err)
+ rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu));
+ return err;
+}
+
+static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
+ u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
+{
+ const u8 tso_ipid_split_en = 0;
+ int err;
+
+ err = enic_set_nic_cfg(enic,
+ rss_default_cpu, rss_hash_type,
+ rss_hash_bits, rss_base_cpu,
+ rss_enable, tso_ipid_split_en,
+ enic->ig_vlan_strip_en);
+
+ return err;
+}
+
+/* Initialize RSS with defaults, called from dev_configure */
+int enic_init_rss_nic_cfg(struct enic *enic)
+{
+ static uint8_t default_rss_key[] = {
+ 85, 67, 83, 97, 119, 101, 115, 111, 109, 101,
+ 80, 65, 76, 79, 117, 110, 105, 113, 117, 101,
+ 76, 73, 78, 85, 88, 114, 111, 99, 107, 115,
+ 69, 78, 73, 67, 105, 115, 99, 111, 111, 108,
+ };
+ struct rte_eth_rss_conf rss_conf;
+ union vnic_rss_cpu rss_cpu;
+ int ret, i;
+
+ rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ /*
+ * If setting key for the first time, and the user gives us none, then
+ * push the default key to NIC.
+ */
+ if (rss_conf.rss_key == NULL) {
+ rss_conf.rss_key = default_rss_key;
+ rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
+ }
+ ret = enic_set_rss_conf(enic, &rss_conf);
+ if (ret) {
+ dev_err(enic, "Failed to configure RSS\n");
+ return ret;
+ }
+ if (enic->rss_enable) {
+ /* If enabling RSS, use the default reta */
+ for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) {
+ rss_cpu.cpu[i / 4].b[i % 4] =
+ enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
+ }
+ ret = enic_set_rss_reta(enic, &rss_cpu);
+ if (ret)
+ dev_err(enic, "Failed to set RSS indirection table\n");
+ }
+ return ret;
+}
+
+int enic_setup_finish(struct enic *enic)
+{
+ enic_init_soft_stats(enic);
+
+ /* Default conf */
+ vnic_dev_packet_filter(enic->vdev,
+ 1 /* directed */,
+ 1 /* multicast */,
+ 1 /* broadcast */,
+ 0 /* promisc */,
+ 1 /* allmulti */);
+
+ enic->promisc = 0;
+ enic->allmulti = 1;
+
+ return 0;
+}
+
+static int enic_rss_conf_valid(struct enic *enic,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ /* RSS is disabled per VIC settings. Ignore rss_conf. */
+ if (enic->flow_type_rss_offloads == 0)
+ return 0;
+ if (rss_conf->rss_key != NULL &&
+ rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) {
+ dev_err(enic, "Given rss_key is %d bytes, it must be %d\n",
+ rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
+ return -EINVAL;
+ }
+ if (rss_conf->rss_hf != 0 &&
+ (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) {
+ dev_err(enic, "Given rss_hf contains none of the supported"
+ " types\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Set hash type and key according to rss_conf */
+int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *eth_dev;
+ uint64_t rss_hf;
+ u8 rss_hash_type;
+ u8 rss_enable;
+ int ret;
+
+ RTE_ASSERT(rss_conf != NULL);
+ ret = enic_rss_conf_valid(enic, rss_conf);
+ if (ret) {
+ dev_err(enic, "RSS configuration (rss_conf) is invalid\n");
+ return ret;
+ }
+
+ eth_dev = enic->rte_dev;
+ rss_hash_type = 0;
+ rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
+ if (enic->rq_count > 1 &&
+ (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+ rss_hf != 0) {
+ rss_enable = 1;
+ if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
+ if (enic->udp_rss_weak) {
+ /*
+ * 'TCP' is not a typo. The "weak" version of
+ * UDP RSS requires both the TCP and UDP bits
+ * be set. It does enable TCP RSS as well.
+ */
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+ }
+ }
+ if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
+ ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
+ if (enic->udp_rss_weak)
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ }
+ } else {
+ rss_enable = 0;
+ rss_hf = 0;
+ }
+
+ /* Set the hash key if provided */
+ if (rss_enable && rss_conf->rss_key) {
+ ret = enic_set_rsskey(enic, rss_conf->rss_key);
+ if (ret) {
+ dev_err(enic, "Failed to set RSS key\n");
+ return ret;
+ }
+ }
+
+ ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type,
+ ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
+ rss_enable);
+ if (!ret) {
+ enic->rss_hf = rss_hf;
+ enic->rss_hash_type = rss_hash_type;
+ enic->rss_enable = rss_enable;
+ } else {
+ dev_err(enic, "Failed to update RSS configurations."
+ " hash=0x%x\n", rss_hash_type);
+ }
+ return ret;
+}
+
+int enic_set_vlan_strip(struct enic *enic)
+{
+ /*
+ * Unfortunately, VLAN strip on/off and RSS on/off are configured
+ * together. So, re-do niccfg, preserving the current RSS settings.
+ */
+ return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type,
+ ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
+ enic->rss_enable);
+}
+
+void enic_add_packet_filter(struct enic *enic)
+{
+ /* Args -> directed, multicast, broadcast, promisc, allmulti */
+ vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
+ enic->promisc, enic->allmulti);
+}
+
+int enic_get_link_status(struct enic *enic)
+{
+ return vnic_dev_link_status(enic->vdev);
+}
+
+static void enic_dev_deinit(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+ /* stop link status checking */
+ vnic_dev_notify_unset(enic->vdev);
+
+ rte_free(eth_dev->data->mac_addrs);
+ rte_free(enic->cq);
+ rte_free(enic->intr);
+ rte_free(enic->rq);
+ rte_free(enic->wq);
+}
+
+
+int enic_set_vnic_res(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ int rc = 0;
+ unsigned int required_rq, required_wq, required_cq, required_intr;
+
+ /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
+ required_rq = eth_dev->data->nb_rx_queues * 2;
+ required_wq = eth_dev->data->nb_tx_queues;
+ required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
+ required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ required_intr += eth_dev->data->nb_rx_queues;
+ }
+
+ if (enic->conf_rq_count < required_rq) {
+ dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
+ eth_dev->data->nb_rx_queues,
+ required_rq, enic->conf_rq_count);
+ rc = -EINVAL;
+ }
+ if (enic->conf_wq_count < required_wq) {
+ dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
+ eth_dev->data->nb_tx_queues, enic->conf_wq_count);
+ rc = -EINVAL;
+ }
+
+ if (enic->conf_cq_count < required_cq) {
+ dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
+ required_cq, enic->conf_cq_count);
+ rc = -EINVAL;
+ }
+ if (enic->conf_intr_count < required_intr) {
+ dev_err(dev, "Not enough Interrupts to support Rx queue"
+ " interrupts. Required:%u, Configured:%u\n",
+ required_intr, enic->conf_intr_count);
+ rc = -EINVAL;
+ }
+
+ if (rc == 0) {
+ enic->rq_count = eth_dev->data->nb_rx_queues;
+ enic->wq_count = eth_dev->data->nb_tx_queues;
+ enic->cq_count = enic->rq_count + enic->wq_count;
+ enic->intr_count = required_intr;
+ }
+
+ return rc;
+}
+
+/* Initialize the completion queue for an RQ */
+static int
+enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
+{
+ struct vnic_rq *sop_rq, *data_rq;
+ unsigned int cq_idx;
+ int rc = 0;
+
+ sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
+ cq_idx = rq_idx;
+
+ vnic_cq_clean(&enic->cq[cq_idx]);
+ vnic_cq_init(&enic->cq[cq_idx],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ 0 /* interrupt_enable */,
+ 1 /* cq_entry_enable */,
+ 0 /* cq_message_enable */,
+ 0 /* interrupt offset */,
+ 0 /* cq_message_addr */);
+
+
+ vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
+ enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
+ sop_rq->ring.desc_count - 1, 1, 0);
+ if (data_rq->in_use) {
+ vnic_rq_init_start(data_rq,
+ enic_cq_rq(enic,
+ enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
+ data_rq->ring.desc_count - 1, 1, 0);
+ }
+
+ rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
+ if (rc)
+ return rc;
+
+ if (data_rq->in_use) {
+ rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
+ if (rc) {
+ enic_rxmbuf_queue_release(enic, sop_rq);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/* The Cisco NIC can send and receive packets up to a max packet size
+ * determined by the NIC type and firmware. There is also an MTU
+ * configured into the NIC via the CIMC/UCSM management interface
+ * which can be overridden by this function (up to the max packet size).
+ * Depending on the network setup, doing so may cause packet drops
+ * and unexpected behavior.
+ */
+int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
+{
+ unsigned int rq_idx;
+ struct vnic_rq *rq;
+ int rc = 0;
+ uint16_t old_mtu; /* previous setting */
+ uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+ old_mtu = eth_dev->data->mtu;
+ config_mtu = enic->config.mtu;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
+ if (new_mtu > enic->max_mtu) {
+ dev_err(enic,
+ "MTU not updated: requested (%u) greater than max (%u)\n",
+ new_mtu, enic->max_mtu);
+ return -EINVAL;
+ }
+ if (new_mtu < ENIC_MIN_MTU) {
+ dev_info(enic,
+ "MTU not updated: requested (%u) less than min (%u)\n",
+ new_mtu, ENIC_MIN_MTU);
+ return -EINVAL;
+ }
+ if (new_mtu > config_mtu)
+ dev_warning(enic,
+ "MTU (%u) is greater than value configured in NIC (%u)\n",
+ new_mtu, config_mtu);
+
+ /* Update the MTU and maximum packet length */
+ eth_dev->data->mtu = new_mtu;
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
+ enic_mtu_to_max_rx_pktlen(new_mtu);
+
+ /*
+ * If the device has not started (enic_enable), nothing to do.
+ * Later, enic_enable() will set up RQs reflecting the new maximum
+ * packet length.
+ */
+ if (!eth_dev->data->dev_started)
+ goto set_mtu_done;
+
+ /*
+ * The device has started, re-do RQs on the fly. In the process, we
+ * pick up the new maximum packet length.
+ *
+ * Some applications rely on the ability to change MTU without stopping
+ * the device. So keep this behavior for now.
+ */
+ rte_spinlock_lock(&enic->mtu_lock);
+
+ /* Stop traffic on all RQs */
+ for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
+ rq = &enic->rq[rq_idx];
+ if (rq->is_sop && rq->in_use) {
+ rc = enic_stop_rq(enic,
+ enic_sop_rq_idx_to_rte_idx(rq_idx));
+ if (rc) {
+ dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
+ goto set_mtu_done;
+ }
+ }
+ }
+
+ /* replace Rx function with a no-op to avoid getting stale pkts */
+ eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
+ rte_mb();
+
+ /* Allow time for threads to exit the real Rx function. */
+ usleep(100000);
+
+ /* now it is safe to reconfigure the RQs */
+
+
+ /* free and reallocate RQs with the new MTU */
+ for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
+ rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ if (!rq->in_use)
+ continue;
+
+ enic_free_rq(rq);
+ rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
+ rq->tot_nb_desc, rq->rx_free_thresh);
+ if (rc) {
+ dev_err(enic,
+ "Fatal MTU alloc error- No traffic will pass\n");
+ goto set_mtu_done;
+ }
+
+ rc = enic_reinit_rq(enic, rq_idx);
+ if (rc) {
+ dev_err(enic,
+ "Fatal MTU RQ reinit- No traffic will pass\n");
+ goto set_mtu_done;
+ }
+ }
+
+ /* put back the real receive function */
+ rte_mb();
+ pick_rx_handler(enic);
+ rte_mb();
+
+ /* restart Rx traffic */
+ for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
+ rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ if (rq->is_sop && rq->in_use)
+ enic_start_rq(enic, rq_idx);
+ }
+
+set_mtu_done:
+ dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
+ rte_spinlock_unlock(&enic->mtu_lock);
+ return rc;
+}
+
+static int enic_dev_init(struct enic *enic)
+{
+ int err;
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+ vnic_dev_intr_coal_timer_info_default(enic->vdev);
+
+ /* Get vNIC configuration
+ */
+ err = enic_get_vnic_config(enic);
+ if (err) {
+ dev_err(dev, "Get vNIC configuration failed, aborting\n");
+ return err;
+ }
+
+ /* Get available resource counts */
+ enic_get_res_counts(enic);
+ if (enic->conf_rq_count == 1) {
+ dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
+ dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
+ dev_err(enic, "See the ENIC PMD guide for more information.\n");
+ return -EINVAL;
+ }
+ /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
+ enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
+ enic->conf_cq_count, 8);
+ enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
+ enic->conf_intr_count, 8);
+ enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
+ enic->conf_rq_count, 8);
+ enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
+ enic->conf_wq_count, 8);
+ if (enic->conf_cq_count > 0 && enic->cq == NULL) {
+ dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
+ return -1;
+ }
+ if (enic->conf_intr_count > 0 && enic->intr == NULL) {
+ dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
+ return -1;
+ }
+ if (enic->conf_rq_count > 0 && enic->rq == NULL) {
+ dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
+ return -1;
+ }
+ if (enic->conf_wq_count > 0 && enic->wq == NULL) {
+ dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
+ return -1;
+ }
+
+ /* Get the supported filters */
+ enic_fdir_info(enic);
+
+ eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
+ * ENIC_MAX_MAC_ADDR, 0);
+ if (!eth_dev->data->mac_addrs) {
+ dev_err(enic, "mac addr storage alloc failed, aborting.\n");
+ return -1;
+ }
+ ether_addr_copy((struct ether_addr *) enic->mac_addr,
+ eth_dev->data->mac_addrs);
+
+ vnic_dev_set_reset_flag(enic->vdev, 0);
+
+ LIST_INIT(&enic->flows);
+ rte_spinlock_init(&enic->flows_lock);
+
+ /* set up link status checking */
+ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+
+ enic->overlay_offload = false;
+ if (!enic->disable_overlay && enic->vxlan &&
+ /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
+ vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_OFFLOAD_ENABLE) == 0) {
+ enic->tx_offload_capa |=
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+ /*
+ * Do not add PKT_TX_OUTER_{IPV4,IPV6} as they are not
+ * 'offload' flags (i.e. not part of PKT_TX_OFFLOAD_MASK).
+ */
+ enic->tx_offload_mask |=
+ PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_MASK;
+ enic->overlay_offload = true;
+ enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
+ dev_info(enic, "Overlay offload is enabled\n");
+ /*
+ * Reset the vxlan port to the default, as the NIC firmware
+ * does not reset it automatically and keeps the old setting.
+ */
+ if (vnic_dev_overlay_offload_cfg(enic->vdev,
+ OVERLAY_CFG_VXLAN_PORT_UPDATE,
+ ENIC_DEFAULT_VXLAN_PORT)) {
+ dev_err(enic, "failed to update vxlan port\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+
+}
+
+int enic_probe(struct enic *enic)
+{
+ struct rte_pci_device *pdev = enic->pdev;
+ int err = -1;
+
+ dev_debug(enic, " Initializing ENIC PMD\n");
+
+ /* if this is a secondary process the hardware is already initialized */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
+ enic->bar0.len = pdev->mem_resource[0].len;
+
+ /* Register vNIC device */
+ enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
+ if (!enic->vdev) {
+ dev_err(enic, "vNIC registration failed, aborting\n");
+ goto err_out;
+ }
+
+ LIST_INIT(&enic->memzone_list);
+ rte_spinlock_init(&enic->memzone_list_lock);
+
+ vnic_register_cbacks(enic->vdev,
+ enic_alloc_consistent,
+ enic_free_consistent);
+
+ /*
+ * Allocate the consistent memory for stats upfront so both primary and
+ * secondary processes can dump stats.
+ */
+ err = vnic_dev_alloc_stats_mem(enic->vdev);
+ if (err) {
+ dev_err(enic, "Failed to allocate cmd memory, aborting\n");
+ goto err_out_unregister;
+ }
+ /* Issue device open to get device in known state */
+ err = enic_dev_open(enic);
+ if (err) {
+ dev_err(enic, "vNIC dev open failed, aborting\n");
+ goto err_out_unregister;
+ }
+
+ /* Set ingress vlan rewrite mode before vnic initialization */
+ dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n",
+ enic->ig_vlan_rewrite_mode);
+ err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
+ enic->ig_vlan_rewrite_mode);
+ if (err) {
+ dev_err(enic,
+ "Failed to set ingress vlan rewrite mode, aborting.\n");
+ goto err_out_dev_close;
+ }
+
+ /* Issue device init to initialize the vnic-to-switch link.
+ * We'll start with carrier off and wait for link UP
+ * notification later to turn on carrier. We don't need
+ * to wait here for the vnic-to-switch link initialization
+ * to complete; link UP notification is the indication that
+ * the process is complete.
+ */
+
+ err = vnic_dev_init(enic->vdev, 0);
+ if (err) {
+ dev_err(enic, "vNIC dev init failed, aborting\n");
+ goto err_out_dev_close;
+ }
+
+ err = enic_dev_init(enic);
+ if (err) {
+ dev_err(enic, "Device initialization failed, aborting\n");
+ goto err_out_dev_close;
+ }
+
+ return 0;
+
+err_out_dev_close:
+ vnic_dev_close(enic->vdev);
+err_out_unregister:
+ vnic_dev_unregister(enic->vdev);
+err_out:
+ return err;
+}
+
+void enic_remove(struct enic *enic)
+{
+ enic_dev_deinit(enic);
+ vnic_dev_close(enic->vdev);
+ vnic_dev_unregister(enic->vdev);
+}
diff --git a/src/spdk/dpdk/drivers/net/enic/enic_res.c b/src/spdk/dpdk/drivers/net/enic/enic_res.c
new file mode 100644
index 00000000..8d493ffe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/enic_res.c
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "enic_compat.h"
+#include "rte_ethdev_driver.h"
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_resource.h"
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "vnic_rss.h"
+#include "enic_res.h"
+#include "enic.h"
+
+int enic_get_vnic_config(struct enic *enic)
+{
+ struct vnic_enet_config *c = &enic->config;
+ int err;
+
+ err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr);
+ if (err) {
+ dev_err(enic_get_dev(enic),
+ "Error getting MAC addr, %d\n", err);
+ return err;
+ }
+
+
+#define GET_CONFIG(m) \
+ do { \
+ err = vnic_dev_spec(enic->vdev, \
+ offsetof(struct vnic_enet_config, m), \
+ sizeof(c->m), &c->m); \
+ if (err) { \
+ dev_err(enic_get_dev(enic), \
+ "Error getting %s, %d\n", #m, err); \
+ return err; \
+ } \
+ } while (0)
+
+ GET_CONFIG(flags);
+ GET_CONFIG(wq_desc_count);
+ GET_CONFIG(rq_desc_count);
+ GET_CONFIG(mtu);
+ GET_CONFIG(intr_timer_type);
+ GET_CONFIG(intr_mode);
+ GET_CONFIG(intr_timer_usec);
+ GET_CONFIG(loop_tag);
+ GET_CONFIG(num_arfs);
+ GET_CONFIG(max_pkt_size);
+
+ /* max packet size is only defined in newer VIC firmware
+ * and will be 0 for legacy firmware and VICs
+ */
+ if (c->max_pkt_size > ENIC_DEFAULT_RX_MAX_PKT_SIZE)
+ enic->max_mtu = c->max_pkt_size - (ETHER_HDR_LEN + 4);
+ else
+ enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE
+ - (ETHER_HDR_LEN + 4);
+ if (c->mtu == 0)
+ c->mtu = 1500;
+
+ enic->rte_dev->data->mtu = min_t(u16, enic->max_mtu,
+ max_t(u16, ENIC_MIN_MTU, c->mtu));
+
+ enic->adv_filters = vnic_dev_capable_adv_filters(enic->vdev);
+ dev_info(enic, "Advanced Filters %savailable\n", ((enic->adv_filters)
+ ? "" : "not "));
+
+ err = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode,
+ &enic->filter_actions);
+ if (err) {
+ dev_err(enic_get_dev(enic),
+ "Error getting filter modes, %d\n", err);
+ return err;
+ }
+ vnic_dev_capable_udp_rss_weak(enic->vdev, &enic->nic_cfg_chk,
+ &enic->udp_rss_weak);
+
+ dev_info(enic, "Flow api filter mode: %s Actions: %s%s%s\n",
+ ((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" :
+ ((enic->flow_filter_mode == FILTER_USNIC_IP) ? "USNIC" :
+ ((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? "5TUPLE" :
+ "NONE"))),
+ ((enic->filter_actions & FILTER_ACTION_RQ_STEERING_FLAG) ?
+ "steer " : ""),
+ ((enic->filter_actions & FILTER_ACTION_FILTER_ID_FLAG) ?
+ "tag " : ""),
+ ((enic->filter_actions & FILTER_ACTION_DROP_FLAG) ?
+ "drop " : ""));
+
+ c->wq_desc_count =
+ min_t(u32, ENIC_MAX_WQ_DESCS,
+ max_t(u32, ENIC_MIN_WQ_DESCS,
+ c->wq_desc_count));
+ c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
+
+ c->rq_desc_count =
+ min_t(u32, ENIC_MAX_RQ_DESCS,
+ max_t(u32, ENIC_MIN_RQ_DESCS,
+ c->rq_desc_count));
+ c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
+
+ c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+
+ dev_info(enic_get_dev(enic),
+ "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
+ "wq/rq %d/%d mtu %d, max mtu:%d\n",
+ enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
+ enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
+ c->wq_desc_count, c->rq_desc_count,
+ enic->rte_dev->data->mtu, enic->max_mtu);
+ dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
+ "rss %s intr mode %s type %s timer %d usec "
+ "loopback tag 0x%04x\n",
+ ENIC_SETTING(enic, TXCSUM) ? "yes" : "no",
+ ENIC_SETTING(enic, RXCSUM) ? "yes" : "no",
+ ENIC_SETTING(enic, RSS) ?
+ (ENIC_SETTING(enic, RSSHASH_UDPIPV4) ? "+UDP" :
+ ((enic->udp_rss_weak ? "+udp" :
+ "yes"))) : "no",
+ c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" :
+ c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" :
+ c->intr_mode == VENET_INTR_MODE_ANY ? "any" :
+ "unknown",
+ c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" :
+ c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" :
+ "unknown",
+ c->intr_timer_usec,
+ c->loop_tag);
+
+ /* RSS settings from vNIC */
+ enic->reta_size = ENIC_RSS_RETA_SIZE;
+ enic->hash_key_size = ENIC_RSS_HASH_KEY_SIZE;
+ enic->flow_type_rss_offloads = 0;
+ if (ENIC_SETTING(enic, RSSHASH_IPV4))
+ /*
+ * IPV4 hash type handles both non-frag and frag packet types.
+ * TCP/UDP is controlled via a separate flag below.
+ */
+ enic->flow_type_rss_offloads |= ETH_RSS_IPV4 |
+ ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER;
+ if (ENIC_SETTING(enic, RSSHASH_TCPIPV4))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (ENIC_SETTING(enic, RSSHASH_IPV6))
+ /*
+ * The VIC adapter can perform RSS on IPv6 packets with and
+ * without extension headers. An IPv6 "fragment" is an IPv6
+ * packet with the fragment extension header.
+ */
+ enic->flow_type_rss_offloads |= ETH_RSS_IPV6 |
+ ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER;
+ if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_IPV6_TCP_EX;
+ if (enic->udp_rss_weak)
+ enic->flow_type_rss_offloads |=
+ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX;
+ if (ENIC_SETTING(enic, RSSHASH_UDPIPV4))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (ENIC_SETTING(enic, RSSHASH_UDPIPV6))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX;
+
+ /* Zero offloads if RSS is not enabled */
+ if (!ENIC_SETTING(enic, RSS))
+ enic->flow_type_rss_offloads = 0;
+
+ enic->vxlan = ENIC_SETTING(enic, VXLAN) &&
+ vnic_dev_capable_vxlan(enic->vdev);
+ /*
+ * Default hardware capabilities. enic_dev_init() may add additional
+ * flags if it enables overlay offloads.
+ */
+ enic->tx_queue_offload_capa = 0;
+ enic->tx_offload_capa =
+ enic->tx_queue_offload_capa |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ enic->rx_offload_capa =
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ enic->tx_offload_mask =
+ PKT_TX_VLAN_PKT |
+ PKT_TX_IP_CKSUM |
+ PKT_TX_L4_MASK |
+ PKT_TX_TCP_SEG;
+
+ return 0;
+}
+
+int enic_add_vlan(struct enic *enic, u16 vlanid)
+{
+ u64 a0 = vlanid, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
+ if (err)
+ dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err);
+
+ return err;
+}
+
+int enic_del_vlan(struct enic *enic, u16 vlanid)
+{
+ u64 a0 = vlanid, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
+ if (err)
+ dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err);
+
+ return err;
+}
+
+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en)
+{
+ enum vnic_devcmd_cmd cmd;
+ u64 a0, a1;
+ u32 nic_cfg;
+ int wait = 1000;
+
+ vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
+ rss_hash_type, rss_hash_bits, rss_base_cpu,
+ rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
+
+ a0 = nic_cfg;
+ a1 = 0;
+ cmd = enic->nic_cfg_chk ? CMD_NIC_CFG_CHK : CMD_NIC_CFG;
+ return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait);
+}
+
+int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len)
+{
+ u64 a0 = (u64)key_pa, a1 = len;
+ int wait = 1000;
+
+ return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait);
+}
+
+int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len)
+{
+ u64 a0 = (u64)cpu_pa, a1 = len;
+ int wait = 1000;
+
+ return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait);
+}
+
+void enic_free_vnic_resources(struct enic *enic)
+{
+ unsigned int i;
+
+ for (i = 0; i < enic->wq_count; i++)
+ vnic_wq_free(&enic->wq[i]);
+ for (i = 0; i < enic_vnic_rq_count(enic); i++)
+ if (enic->rq[i].in_use)
+ vnic_rq_free(&enic->rq[i]);
+ for (i = 0; i < enic->cq_count; i++)
+ vnic_cq_free(&enic->cq[i]);
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_free(&enic->intr[i]);
+}
+
+void enic_get_res_counts(struct enic *enic)
+{
+ enic->conf_wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
+ enic->conf_rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
+ enic->conf_cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
+ enic->conf_intr_count = vnic_dev_get_res_count(enic->vdev,
+ RES_TYPE_INTR_CTRL);
+
+ dev_info(enic_get_dev(enic),
+ "vNIC resources avail: wq %d rq %d cq %d intr %d\n",
+ enic->conf_wq_count, enic->conf_rq_count,
+ enic->conf_cq_count, enic->conf_intr_count);
+}
diff --git a/src/spdk/dpdk/drivers/net/enic/enic_res.h b/src/spdk/dpdk/drivers/net/enic/enic_res.h
new file mode 100644
index 00000000..3786bc0e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/enic_res.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _ENIC_RES_H_
+#define _ENIC_RES_H_
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+
+#define ENIC_MIN_WQ_DESCS 64
+#define ENIC_MAX_WQ_DESCS 4096
+#define ENIC_MIN_RQ_DESCS 64
+#define ENIC_MAX_RQ_DESCS 4096
+
+/* A descriptor ring has a multiple of 32 descriptors */
+#define ENIC_ALIGN_DESCS 32
+#define ENIC_ALIGN_DESCS_MASK ~(ENIC_ALIGN_DESCS - 1)
+
+/* Request a completion index every 32 buffers (roughly packets) */
+#define ENIC_WQ_CQ_THRESH 32
+
+#define ENIC_MIN_MTU 68
+
+/* Does not include (possible) inserted VLAN tag and FCS */
+#define ENIC_DEFAULT_RX_MAX_PKT_SIZE 9022
+
+/* Does not include (possible) inserted VLAN tag and FCS */
+#define ENIC_TX_MAX_PKT_SIZE 9208
+
+#define ENIC_MULTICAST_PERFECT_FILTERS 32
+#define ENIC_UNICAST_PERFECT_FILTERS 32
+
+#define ENIC_NON_TSO_MAX_DESC 16
+#define ENIC_DEFAULT_RX_FREE_THRESH 32
+#define ENIC_TX_XMIT_MAX 64
+#define ENIC_RX_BURST_MAX 64
+
+/* Defaults for dev_info.default_{rx,tx}portconf */
+#define ENIC_DEFAULT_RX_BURST 32
+#define ENIC_DEFAULT_RX_RINGS 1
+#define ENIC_DEFAULT_RX_RING_SIZE 512
+#define ENIC_DEFAULT_TX_BURST 32
+#define ENIC_DEFAULT_TX_RINGS 1
+#define ENIC_DEFAULT_TX_RING_SIZE 512
+
+#define ENIC_RSS_DEFAULT_CPU 0
+#define ENIC_RSS_BASE_CPU 0
+#define ENIC_RSS_HASH_BITS 7
+#define ENIC_RSS_RETA_SIZE (1 << ENIC_RSS_HASH_BITS)
+#define ENIC_RSS_HASH_KEY_SIZE 40
+
+#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
+
+
+struct enic;
+
+int enic_get_vnic_config(struct enic *);
+int enic_add_vlan(struct enic *enic, u16 vlanid);
+int enic_del_vlan(struct enic *enic, u16 vlanid);
+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en);
+int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len);
+int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len);
+void enic_get_res_counts(struct enic *enic);
+void enic_init_vnic_resources(struct enic *enic);
+int enic_alloc_vnic_resources(struct enic *);
+void enic_free_vnic_resources(struct enic *);
+
+#endif /* _ENIC_RES_H_ */
diff --git a/src/spdk/dpdk/drivers/net/enic/enic_rxtx.c b/src/spdk/dpdk/drivers/net/enic/enic_rxtx.c
new file mode 100644
index 00000000..7129e121
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/enic_rxtx.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_net.h>
+#include <rte_prefetch.h>
+
+#include "enic_compat.h"
+#include "rq_enet_desc.h"
+#include "enic.h"
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+
+#define RTE_PMD_USE_PREFETCH
+
+#ifdef RTE_PMD_USE_PREFETCH
+/*Prefetch a cache line into all cache levels. */
+#define rte_enic_prefetch(p) rte_prefetch0(p)
+#else
+#define rte_enic_prefetch(p) do {} while (0)
+#endif
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while (0)
+#endif
+
+static inline uint16_t
+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
+{
+ return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
+}
+
+static inline uint16_t
+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
+{
+ return le16_to_cpu(crd->bytes_written_flags) &
+ ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_packet_error(uint16_t bwflags)
+{
+ return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_eop(uint16_t ciflags)
+{
+ return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
+ == CQ_ENET_RQ_DESC_FLAGS_EOP;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
+{
+ return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
+{
+ return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
+ CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
+{
+ return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
+ CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
+{
+ return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
+ CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+}
+
+static inline uint32_t
+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
+{
+ return le32_to_cpu(cqrd->rss_hash);
+}
+
+static inline uint16_t
+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
+{
+ return le16_to_cpu(cqrd->vlan);
+}
+
+static inline uint16_t
+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ return le16_to_cpu(cqrd->bytes_written_flags) &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+}
+
+
+static inline uint8_t
+enic_cq_rx_check_err(struct cq_desc *cqd)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ uint16_t bwflags;
+
+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
+ if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
+ return 1;
+ return 0;
+}
+
+/* Lookup table to translate RX CQ flags to mbuf flags. */
+static inline uint32_t
+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ uint8_t cqrd_flags = cqrd->flags;
+ /*
+ * Odd-numbered entries are for tunnel packets. All packet type info
+ * applies to the inner packet, and there is no info on the outer
+ * packet. The outer flags in these entries exist only to avoid
+ * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
+ * afterwards.
+ *
+ * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
+ * RTE_PTYPE_TUNNEL_GRENAT..
+ */
+ static const uint32_t cq_type_table[128] __rte_cache_aligned = {
+ [0x00] = RTE_PTYPE_UNKNOWN,
+ [0x01] = RTE_PTYPE_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ /* All others reserved */
+ };
+ cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
+ | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
+ | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
+ return cq_type_table[cqrd_flags + tnl];
+}
+
+static inline void
+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ uint16_t bwflags, pkt_flags = 0, vlan_tci;
+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
+ vlan_tci = enic_cq_rx_desc_vlan(cqrd);
+
+ /* VLAN STRIPPED flag. The L2 packet type updated here also */
+ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
+ pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
+ } else {
+ if (vlan_tci != 0)
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+ else
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
+ }
+ mbuf->vlan_tci = vlan_tci;
+
+ if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
+ struct cq_enet_rq_clsf_desc *clsf_cqd;
+ uint16_t filter_id;
+ clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
+ filter_id = clsf_cqd->filter_id;
+ if (filter_id) {
+ pkt_flags |= PKT_RX_FDIR;
+ if (filter_id != ENIC_MAGIC_FILTER_ID) {
+ mbuf->hash.fdir.hi = clsf_cqd->filter_id;
+ pkt_flags |= PKT_RX_FDIR_ID;
+ }
+ }
+ } else if (enic_cq_rx_desc_rss_type(cqrd)) {
+ /* RSS flag */
+ pkt_flags |= PKT_RX_RSS_HASH;
+ mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
+ }
+
+ /* checksum flags */
+ if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
+ if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
+ uint32_t l4_flags;
+ l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
+
+ /*
+ * When overlay offload is enabled, the NIC may
+ * set ipv4_csum_ok=1 if the inner packet is IPv6..
+ * So, explicitly check for IPv4 before checking
+ * ipv4_csum_ok.
+ */
+ if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
+ if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ }
+
+ if (l4_flags == RTE_PTYPE_L4_UDP ||
+ l4_flags == RTE_PTYPE_L4_TCP) {
+ if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ }
+
+ mbuf->ol_flags = pkt_flags;
+}
+
+/* dummy receive function to replace actual function in
+ * order to do safe reconfiguration operations.
+ */
+uint16_t
+enic_dummy_recv_pkts(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+uint16_t
+enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct vnic_rq *sop_rq = rx_queue;
+ struct vnic_rq *data_rq;
+ struct vnic_rq *rq;
+ struct enic *enic = vnic_dev_priv(sop_rq->vdev);
+ uint16_t cq_idx;
+ uint16_t rq_idx, max_rx;
+ uint16_t rq_num;
+ struct rte_mbuf *nmb, *rxmb;
+ uint16_t nb_rx = 0;
+ struct vnic_cq *cq;
+ volatile struct cq_desc *cqd_ptr;
+ uint8_t color;
+ uint8_t tnl;
+ uint16_t seg_length;
+ struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
+ struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
+
+ cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
+ cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
+ color = cq->last_color;
+
+ data_rq = &enic->rq[sop_rq->data_queue_idx];
+
+ /* Receive until the end of the ring, at most. */
+ max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx);
+
+ while (max_rx) {
+ volatile struct rq_enet_desc *rqd_ptr;
+ struct cq_desc cqd;
+ uint8_t packet_error;
+ uint16_t ciflags;
+
+ max_rx--;
+
+ /* Check for pkts available */
+ if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
+ break;
+
+ /* Get the cq descriptor and extract rq info from it */
+ cqd = *cqd_ptr;
+ rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
+ rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
+
+ rq = &enic->rq[rq_num];
+ rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
+
+ /* allocate a new mbuf */
+ nmb = rte_mbuf_raw_alloc(rq->mp);
+ if (nmb == NULL) {
+ rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
+ break;
+ }
+
+ /* A packet error means descriptor and data are untrusted */
+ packet_error = enic_cq_rx_check_err(&cqd);
+
+ /* Get the mbuf to return and replace with one just allocated */
+ rxmb = rq->mbuf_ring[rq_idx];
+ rq->mbuf_ring[rq_idx] = nmb;
+ cq_idx++;
+
+ /* Prefetch next mbuf & desc while processing current one */
+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
+ rte_enic_prefetch(cqd_ptr);
+
+ ciflags = enic_cq_rx_desc_ciflags(
+ (struct cq_enet_rq_desc *)&cqd);
+
+ /* Push descriptor for newly allocated mbuf */
+ nmb->data_off = RTE_PKTMBUF_HEADROOM;
+ /*
+ * Only the address needs to be refilled. length_type of the
+ * descriptor it set during initialization
+ * (enic_alloc_rx_queue_mbufs) and does not change.
+ */
+ rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
+ RTE_PKTMBUF_HEADROOM);
+
+ /* Fill in the rest of the mbuf */
+ seg_length = enic_cq_rx_desc_n_bytes(&cqd);
+
+ if (rq->is_sop) {
+ first_seg = rxmb;
+ first_seg->pkt_len = seg_length;
+ } else {
+ first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
+ + seg_length);
+ first_seg->nb_segs++;
+ last_seg->next = rxmb;
+ }
+
+ rxmb->port = enic->port_id;
+ rxmb->data_len = seg_length;
+
+ rq->rx_nb_hold++;
+
+ if (!(enic_cq_rx_desc_eop(ciflags))) {
+ last_seg = rxmb;
+ continue;
+ }
+
+ /*
+ * When overlay offload is enabled, CQ.fcoe indicates the
+ * packet is tunnelled.
+ */
+ tnl = enic->overlay_offload &&
+ (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
+ /* cq rx flags are only valid if eop bit is set */
+ first_seg->packet_type =
+ enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
+ enic_cq_rx_to_pkt_flags(&cqd, first_seg);
+
+ /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
+ if (tnl) {
+ first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+ }
+ if (unlikely(packet_error)) {
+ rte_pktmbuf_free(first_seg);
+ rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
+ continue;
+ }
+
+
+ /* prefetch mbuf data for caller */
+ rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
+ RTE_PKTMBUF_HEADROOM));
+
+ /* store the mbuf address into the next entry of the array */
+ rx_pkts[nb_rx++] = first_seg;
+ }
+ if (unlikely(cq_idx == cq->ring.desc_count)) {
+ cq_idx = 0;
+ cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
+ }
+
+ sop_rq->pkt_first_seg = first_seg;
+ sop_rq->pkt_last_seg = last_seg;
+
+ cq->to_clean = cq_idx;
+
+ if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
+ sop_rq->rx_free_thresh) {
+ if (data_rq->in_use) {
+ data_rq->posted_index =
+ enic_ring_add(data_rq->ring.desc_count,
+ data_rq->posted_index,
+ data_rq->rx_nb_hold);
+ data_rq->rx_nb_hold = 0;
+ }
+ sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
+ sop_rq->posted_index,
+ sop_rq->rx_nb_hold);
+ sop_rq->rx_nb_hold = 0;
+
+ rte_mb();
+ if (data_rq->in_use)
+ iowrite32_relaxed(data_rq->posted_index,
+ &data_rq->ctrl->posted_index);
+ rte_compiler_barrier();
+ iowrite32_relaxed(sop_rq->posted_index,
+ &sop_rq->ctrl->posted_index);
+ }
+
+
+ return nb_rx;
+}
+
+uint16_t
+enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *mb, **rx, **rxmb;
+ uint16_t cq_idx, nb_rx, max_rx;
+ struct cq_enet_rq_desc *cqd;
+ struct rq_enet_desc *rqd;
+ unsigned int port_id;
+ struct vnic_cq *cq;
+ struct vnic_rq *rq;
+ struct enic *enic;
+ uint8_t color;
+ bool overlay;
+ bool tnl;
+
+ rq = rx_queue;
+ enic = vnic_dev_priv(rq->vdev);
+ cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ cq_idx = cq->to_clean;
+
+ /*
+ * Fill up the reserve of free mbufs. Below, we restock the receive
+ * ring with these mbufs to avoid allocation failures.
+ */
+ if (rq->num_free_mbufs == 0) {
+ if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
+ ENIC_RX_BURST_MAX))
+ return 0;
+ rq->num_free_mbufs = ENIC_RX_BURST_MAX;
+ }
+
+ /* Receive until the end of the ring, at most. */
+ max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
+ max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
+
+ cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
+ color = cq->last_color;
+ rxmb = rq->mbuf_ring + cq_idx;
+ port_id = enic->port_id;
+ overlay = enic->overlay_offload;
+
+ rx = rx_pkts;
+ while (max_rx) {
+ max_rx--;
+ if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
+ break;
+ if (unlikely(cqd->bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
+ rte_pktmbuf_free(*rxmb++);
+ rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
+ cqd++;
+ continue;
+ }
+
+ mb = *rxmb++;
+ /* prefetch mbuf data for caller */
+ rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr,
+ RTE_PKTMBUF_HEADROOM));
+ mb->data_len = cqd->bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ mb->pkt_len = mb->data_len;
+ mb->port = port_id;
+ tnl = overlay && (cqd->completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
+ mb->packet_type =
+ enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd,
+ tnl);
+ enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
+ /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
+ if (tnl) {
+ mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+ }
+ cqd++;
+ *rx++ = mb;
+ }
+ /* Number of descriptors visited */
+ nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
+ if (nb_rx == 0)
+ return 0;
+ rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
+ rxmb = rq->mbuf_ring + cq_idx;
+ cq_idx += nb_rx;
+ rq->rx_nb_hold += nb_rx;
+ if (unlikely(cq_idx == cq->ring.desc_count)) {
+ cq_idx = 0;
+ cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
+ }
+ cq->to_clean = cq_idx;
+
+ memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
+ sizeof(struct rte_mbuf *) * nb_rx);
+ rq->num_free_mbufs -= nb_rx;
+ while (nb_rx) {
+ nb_rx--;
+ mb = *rxmb++;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+ rqd++;
+ }
+ if (rq->rx_nb_hold > rq->rx_free_thresh) {
+ rq->posted_index = enic_ring_add(rq->ring.desc_count,
+ rq->posted_index,
+ rq->rx_nb_hold);
+ rq->rx_nb_hold = 0;
+ rte_wmb();
+ iowrite32_relaxed(rq->posted_index,
+ &rq->ctrl->posted_index);
+ }
+
+ return rx - rx_pkts;
+}
+
+static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
+{
+ struct rte_mbuf *buf;
+ struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
+ unsigned int nb_to_free, nb_free = 0, i;
+ struct rte_mempool *pool;
+ unsigned int tail_idx;
+ unsigned int desc_count = wq->ring.desc_count;
+
+ nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
+ + 1;
+ tail_idx = wq->tail_idx;
+ pool = wq->bufs[tail_idx]->pool;
+ for (i = 0; i < nb_to_free; i++) {
+ buf = wq->bufs[tail_idx];
+ m = rte_pktmbuf_prefree_seg(buf);
+ if (unlikely(m == NULL)) {
+ tail_idx = enic_ring_incr(desc_count, tail_idx);
+ continue;
+ }
+
+ if (likely(m->pool == pool)) {
+ RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(pool, (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ pool = m->pool;
+ }
+ tail_idx = enic_ring_incr(desc_count, tail_idx);
+ }
+
+ if (nb_free > 0)
+ rte_mempool_put_bulk(pool, (void **)free, nb_free);
+
+ wq->tail_idx = tail_idx;
+ wq->ring.desc_avail += nb_to_free;
+}
+
+unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
+{
+ u16 completed_index;
+
+ completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
+
+ if (wq->last_completed_index != completed_index) {
+ enic_free_wq_bufs(wq, completed_index);
+ wq->last_completed_index = completed_index;
+ }
+ return 0;
+}
+
+uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
+ int32_t ret;
+ uint16_t i;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
+ rte_errno = EINVAL;
+ return i;
+ }
+ ol_flags = m->ol_flags;
+ if (ol_flags & wq->tx_offload_notsup_mask) {
+ rte_errno = ENOTSUP;
+ return i;
+ }
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t index;
+ unsigned int pkt_len, data_len;
+ unsigned int nb_segs;
+ struct rte_mbuf *tx_pkt;
+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+ unsigned short vlan_id;
+ uint64_t ol_flags;
+ uint64_t ol_flags_mask;
+ unsigned int wq_desc_avail;
+ int head_idx;
+ unsigned int desc_count;
+ struct wq_enet_desc *descs, *desc_p, desc_tmp;
+ uint16_t mss;
+ uint8_t vlan_tag_insert;
+ uint8_t eop, cq;
+ uint64_t bus_addr;
+ uint8_t offload_mode;
+ uint16_t header_len;
+ uint64_t tso;
+ rte_atomic64_t *tx_oversized;
+
+ enic_cleanup_wq(enic, wq);
+ wq_desc_avail = vnic_wq_desc_avail(wq);
+ head_idx = wq->head_idx;
+ desc_count = wq->ring.desc_count;
+ ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
+ tx_oversized = &enic->soft_stats.tx_oversized;
+
+ nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
+
+ for (index = 0; index < nb_pkts; index++) {
+ tx_pkt = *tx_pkts++;
+ pkt_len = tx_pkt->pkt_len;
+ data_len = tx_pkt->data_len;
+ ol_flags = tx_pkt->ol_flags;
+ nb_segs = tx_pkt->nb_segs;
+ tso = ol_flags & PKT_TX_TCP_SEG;
+
+ /* drop packet if it's too big to send */
+ if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
+ rte_pktmbuf_free(tx_pkt);
+ rte_atomic64_inc(tx_oversized);
+ continue;
+ }
+
+ if (nb_segs > wq_desc_avail) {
+ if (index > 0)
+ goto post;
+ goto done;
+ }
+
+ mss = 0;
+ vlan_id = tx_pkt->vlan_tci;
+ vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT);
+ bus_addr = (dma_addr_t)
+ (tx_pkt->buf_iova + tx_pkt->data_off);
+
+ descs = (struct wq_enet_desc *)wq->ring.descs;
+ desc_p = descs + head_idx;
+
+ eop = (data_len == pkt_len);
+ offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
+ header_len = 0;
+
+ if (tso) {
+ header_len = tx_pkt->l2_len + tx_pkt->l3_len +
+ tx_pkt->l4_len;
+
+ /* Drop if non-TCP packet or TSO seg size is too big */
+ if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
+ header_len) > ENIC_TX_MAX_PKT_SIZE))) {
+ rte_pktmbuf_free(tx_pkt);
+ rte_atomic64_inc(tx_oversized);
+ continue;
+ }
+
+ offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
+ mss = tx_pkt->tso_segsz;
+ /* For tunnel, need the size of outer+inner headers */
+ if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ header_len += tx_pkt->outer_l2_len +
+ tx_pkt->outer_l3_len;
+ }
+ }
+
+ if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ mss |= ENIC_CALC_IP_CKSUM;
+
+ /* Nic uses just 1 bit for UDP and TCP */
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ case PKT_TX_UDP_CKSUM:
+ mss |= ENIC_CALC_TCP_UDP_CKSUM;
+ break;
+ }
+ }
+ wq->cq_pend++;
+ cq = 0;
+ if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
+ cq = 1;
+ wq->cq_pend = 0;
+ }
+ wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
+ offload_mode, eop, cq, 0, vlan_tag_insert,
+ vlan_id, 0);
+
+ *desc_p = desc_tmp;
+ wq->bufs[head_idx] = tx_pkt;
+ head_idx = enic_ring_incr(desc_count, head_idx);
+ wq_desc_avail--;
+
+ if (!eop) {
+ for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
+ tx_pkt->next) {
+ data_len = tx_pkt->data_len;
+
+ wq->cq_pend++;
+ cq = 0;
+ if (tx_pkt->next == NULL) {
+ eop = 1;
+ if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
+ cq = 1;
+ wq->cq_pend = 0;
+ }
+ }
+ desc_p = descs + head_idx;
+ bus_addr = (dma_addr_t)(tx_pkt->buf_iova
+ + tx_pkt->data_off);
+ wq_enet_desc_enc((struct wq_enet_desc *)
+ &desc_tmp, bus_addr, data_len,
+ mss, 0, offload_mode, eop, cq,
+ 0, vlan_tag_insert, vlan_id,
+ 0);
+
+ *desc_p = desc_tmp;
+ wq->bufs[head_idx] = tx_pkt;
+ head_idx = enic_ring_incr(desc_count, head_idx);
+ wq_desc_avail--;
+ }
+ }
+ }
+ post:
+ rte_wmb();
+ iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
+ done:
+ wq->ring.desc_avail = wq_desc_avail;
+ wq->head_idx = head_idx;
+
+ return index;
+}
+
+static void enqueue_simple_pkts(struct rte_mbuf **pkts,
+ struct wq_enet_desc *desc,
+ uint16_t n,
+ struct enic *enic)
+{
+ struct rte_mbuf *p;
+
+ while (n) {
+ n--;
+ p = *pkts++;
+ desc->address = p->buf_iova + p->data_off;
+ desc->length = p->pkt_len;
+ /*
+ * The app should not send oversized
+ * packets. tx_pkt_prepare includes a check as
+ * well. But some apps ignore the device max size and
+ * tx_pkt_prepare. Oversized packets cause WQ errrors
+ * and the NIC ends up disabling the whole WQ. So
+ * truncate packets..
+ */
+ if (unlikely(p->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
+ desc->length = ENIC_TX_MAX_PKT_SIZE;
+ rte_atomic64_inc(&enic->soft_stats.tx_oversized);
+ }
+ desc++;
+ }
+}
+
+uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ unsigned int head_idx, desc_count;
+ struct wq_enet_desc *desc;
+ struct vnic_wq *wq;
+ struct enic *enic;
+ uint16_t rem, n;
+
+ wq = (struct vnic_wq *)tx_queue;
+ enic = vnic_dev_priv(wq->vdev);
+ enic_cleanup_wq(enic, wq);
+ /* Will enqueue this many packets in this call */
+ nb_pkts = RTE_MIN(nb_pkts, wq->ring.desc_avail);
+ if (nb_pkts == 0)
+ return 0;
+
+ head_idx = wq->head_idx;
+ desc_count = wq->ring.desc_count;
+
+ /* Descriptors until the end of the ring */
+ n = desc_count - head_idx;
+ n = RTE_MIN(nb_pkts, n);
+
+ /* Save mbuf pointers to free later */
+ memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n);
+
+ /* Enqueue until the ring end */
+ rem = nb_pkts - n;
+ desc = ((struct wq_enet_desc *)wq->ring.descs) + head_idx;
+ enqueue_simple_pkts(tx_pkts, desc, n, enic);
+
+ /* Wrap to the start of the ring */
+ if (rem) {
+ tx_pkts += n;
+ memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem);
+ desc = (struct wq_enet_desc *)wq->ring.descs;
+ enqueue_simple_pkts(tx_pkts, desc, rem, enic);
+ }
+ rte_wmb();
+
+ /* Update head_idx and desc_avail */
+ wq->ring.desc_avail -= nb_pkts;
+ head_idx += nb_pkts;
+ if (head_idx >= desc_count)
+ head_idx -= desc_count;
+ wq->head_idx = head_idx;
+ iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
+ return nb_pkts;
+}
diff --git a/src/spdk/dpdk/drivers/net/enic/meson.build b/src/spdk/dpdk/drivers/net/enic/meson.build
new file mode 100644
index 00000000..bfd4e237
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cisco Systems, Inc.
+
+sources = files(
+ 'base/vnic_cq.c',
+ 'base/vnic_dev.c',
+ 'base/vnic_intr.c',
+ 'base/vnic_rq.c',
+ 'base/vnic_rss.c',
+ 'base/vnic_wq.c',
+ 'enic_clsf.c',
+ 'enic_ethdev.c',
+ 'enic_flow.c',
+ 'enic_main.c',
+ 'enic_res.c',
+ 'enic_rxtx.c',
+ )
+deps += ['hash']
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/enic/rte_pmd_enic_version.map b/src/spdk/dpdk/drivers/net/enic/rte_pmd_enic_version.map
new file mode 100644
index 00000000..ef353984
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/enic/rte_pmd_enic_version.map
@@ -0,0 +1,4 @@
+DPDK_2.0 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/failsafe/Makefile b/src/spdk/dpdk/drivers/net/failsafe/Makefile
new file mode 100644
index 00000000..81802d09
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/failsafe/Makefile
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 6WIND S.A.
+# Copyright 2017 Mellanox Technologies, Ltd
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# Library name
+LIB = librte_pmd_failsafe.a
+
+EXPORT_MAP := rte_pmd_failsafe_version.map
+
+LIBABIVER := 1
+
+# Sources are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_args.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_eal.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_ether.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_intr.c
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
+CFLAGS += -DLINUX
+else
+CFLAGS += -DBSD
+endif
+
+# No exported include files
+
+# Basic CFLAGS:
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -std=gnu99 -Wextra
+CFLAGS += -O3
+CFLAGS += -I.
+CFLAGS += -D_DEFAULT_SOURCE
+CFLAGS += -D_XOPEN_SOURCE=700
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -Wno-strict-prototypes
+CFLAGS += -pedantic
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lpthread
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe.c
new file mode 100644
index 00000000..657919f9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe.c
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <rte_alarm.h>
+#include <rte_malloc.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_bus_vdev.h>
+
+#include "failsafe_private.h"
+
+int failsafe_logtype;
+
+const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
+static const struct rte_eth_link eth_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_UP,
+ .link_autoneg = ETH_LINK_AUTONEG,
+};
+
+static int
+fs_sub_device_alloc(struct rte_eth_dev *dev,
+ const char *params)
+{
+ uint8_t nb_subs;
+ int ret;
+ int i;
+
+ ret = failsafe_args_count_subdevice(dev, params);
+ if (ret)
+ return ret;
+ if (PRIV(dev)->subs_tail > FAILSAFE_MAX_ETHPORTS) {
+ ERROR("Cannot allocate more than %d ports",
+ FAILSAFE_MAX_ETHPORTS);
+ return -ENOSPC;
+ }
+ nb_subs = PRIV(dev)->subs_tail;
+ PRIV(dev)->subs = rte_zmalloc(NULL,
+ sizeof(struct sub_device) * nb_subs,
+ RTE_CACHE_LINE_SIZE);
+ if (PRIV(dev)->subs == NULL) {
+ ERROR("Could not allocate sub_devices");
+ return -ENOMEM;
+ }
+ /* Initiate static sub devices linked list. */
+ for (i = 1; i < nb_subs; i++)
+ PRIV(dev)->subs[i - 1].next = PRIV(dev)->subs + i;
+ PRIV(dev)->subs[i - 1].next = PRIV(dev)->subs;
+ return 0;
+}
+
+static void
+fs_sub_device_free(struct rte_eth_dev *dev)
+{
+ rte_free(PRIV(dev)->subs);
+}
+
+static void fs_hotplug_alarm(void *arg);
+
+int
+failsafe_hotplug_alarm_install(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ if (dev == NULL)
+ return -EINVAL;
+ if (PRIV(dev)->pending_alarm)
+ return 0;
+ ret = rte_eal_alarm_set(hotplug_poll * 1000,
+ fs_hotplug_alarm,
+ dev);
+ if (ret) {
+ ERROR("Could not set up plug-in event detection");
+ return ret;
+ }
+ PRIV(dev)->pending_alarm = 1;
+ return 0;
+}
+
+int
+failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev)
+{
+ int ret = 0;
+
+ rte_errno = 0;
+ rte_eal_alarm_cancel(fs_hotplug_alarm, dev);
+ if (rte_errno) {
+ ERROR("rte_eal_alarm_cancel failed (errno: %s)",
+ strerror(rte_errno));
+ ret = -rte_errno;
+ } else {
+ PRIV(dev)->pending_alarm = 0;
+ }
+ return ret;
+}
+
+static void
+fs_hotplug_alarm(void *arg)
+{
+ struct rte_eth_dev *dev = arg;
+ struct sub_device *sdev;
+ int ret;
+ uint8_t i;
+
+ if (!PRIV(dev)->pending_alarm)
+ return;
+ PRIV(dev)->pending_alarm = 0;
+ FOREACH_SUBDEV(sdev, i, dev)
+ if (sdev->state != PRIV(dev)->state)
+ break;
+ /* if we have non-probed device */
+ if (i != PRIV(dev)->subs_tail) {
+ if (fs_lock(dev, 1) != 0)
+ goto reinstall;
+ ret = failsafe_eth_dev_state_sync(dev);
+ fs_unlock(dev, 1);
+ if (ret)
+ ERROR("Unable to synchronize sub_device state");
+ }
+ failsafe_dev_remove(dev);
+reinstall:
+ ret = failsafe_hotplug_alarm_install(dev);
+ if (ret)
+ ERROR("Unable to set up next alarm");
+}
+
+static int
+fs_mutex_init(struct fs_priv *priv)
+{
+ int ret;
+ pthread_mutexattr_t attr;
+
+ ret = pthread_mutexattr_init(&attr);
+ if (ret) {
+ ERROR("Cannot initiate mutex attributes - %s", strerror(ret));
+ return ret;
+ }
+ /* Allow mutex relocks for the thread holding the mutex. */
+ ret = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ if (ret) {
+ ERROR("Cannot set mutex type - %s", strerror(ret));
+ return ret;
+ }
+ ret = pthread_mutex_init(&priv->hotplug_mutex, &attr);
+ if (ret) {
+ ERROR("Cannot initiate mutex - %s", strerror(ret));
+ return ret;
+ }
+ return 0;
+}
+
+static int
+fs_eth_dev_create(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *dev;
+ struct ether_addr *mac;
+ struct fs_priv *priv;
+ struct sub_device *sdev;
+ const char *params;
+ unsigned int socket_id;
+ uint8_t i;
+ int ret;
+
+ dev = NULL;
+ priv = NULL;
+ socket_id = rte_socket_id();
+ INFO("Creating fail-safe device on NUMA socket %u", socket_id);
+ params = rte_vdev_device_args(vdev);
+ if (params == NULL) {
+ ERROR("This PMD requires sub-devices, none provided");
+ return -1;
+ }
+ dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
+ if (dev == NULL) {
+ ERROR("Unable to allocate rte_eth_dev");
+ return -1;
+ }
+ priv = PRIV(dev);
+ priv->dev = dev;
+ dev->dev_ops = &failsafe_ops;
+ dev->data->mac_addrs = &PRIV(dev)->mac_addrs[0];
+ dev->data->dev_link = eth_link;
+ PRIV(dev)->nb_mac_addr = 1;
+ TAILQ_INIT(&PRIV(dev)->flow_list);
+ dev->rx_pkt_burst = (eth_rx_burst_t)&failsafe_rx_burst;
+ dev->tx_pkt_burst = (eth_tx_burst_t)&failsafe_tx_burst;
+ ret = fs_sub_device_alloc(dev, params);
+ if (ret) {
+ ERROR("Could not allocate sub_devices");
+ goto free_dev;
+ }
+ ret = failsafe_args_parse(dev, params);
+ if (ret)
+ goto free_subs;
+ ret = rte_eth_dev_owner_new(&priv->my_owner.id);
+ if (ret) {
+ ERROR("Failed to get unique owner identifier");
+ goto free_args;
+ }
+ snprintf(priv->my_owner.name, sizeof(priv->my_owner.name),
+ FAILSAFE_OWNER_NAME);
+ DEBUG("Failsafe port %u owner info: %s_%016"PRIX64, dev->data->port_id,
+ priv->my_owner.name, priv->my_owner.id);
+ ret = rte_eth_dev_callback_register(RTE_ETH_ALL, RTE_ETH_EVENT_NEW,
+ failsafe_eth_new_event_callback,
+ dev);
+ if (ret) {
+ ERROR("Failed to register NEW callback");
+ goto free_args;
+ }
+ ret = failsafe_eal_init(dev);
+ if (ret)
+ goto unregister_new_callback;
+ ret = fs_mutex_init(priv);
+ if (ret)
+ goto unregister_new_callback;
+ ret = failsafe_hotplug_alarm_install(dev);
+ if (ret) {
+ ERROR("Could not set up plug-in event detection");
+ goto unregister_new_callback;
+ }
+ mac = &dev->data->mac_addrs[0];
+ if (mac_from_arg) {
+ /*
+ * If MAC address was provided as a parameter,
+ * apply to all probed slaves.
+ */
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev),
+ mac);
+ if (ret) {
+ ERROR("Failed to set default MAC address");
+ goto cancel_alarm;
+ }
+ }
+ } else {
+ /*
+ * Use the ether_addr from first probed
+ * device, either preferred or fallback.
+ */
+ FOREACH_SUBDEV(sdev, i, dev)
+ if (sdev->state >= DEV_PROBED) {
+ ether_addr_copy(&ETH(sdev)->data->mac_addrs[0],
+ mac);
+ break;
+ }
+ /*
+ * If no device has been probed and no ether_addr
+ * has been provided on the command line, use a random
+ * valid one.
+ * It will be applied during future slave state syncs to
+ * probed slaves.
+ */
+ if (i == priv->subs_tail)
+ eth_random_addr(&mac->addr_bytes[0]);
+ }
+ INFO("MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
+ mac->addr_bytes[0], mac->addr_bytes[1],
+ mac->addr_bytes[2], mac->addr_bytes[3],
+ mac->addr_bytes[4], mac->addr_bytes[5]);
+ dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ PRIV(dev)->intr_handle = (struct rte_intr_handle){
+ .fd = -1,
+ .type = RTE_INTR_HANDLE_EXT,
+ };
+ rte_eth_dev_probing_finish(dev);
+ return 0;
+cancel_alarm:
+ failsafe_hotplug_alarm_cancel(dev);
+unregister_new_callback:
+ rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW,
+ failsafe_eth_new_event_callback, dev);
+free_args:
+ failsafe_args_free(dev);
+free_subs:
+ fs_sub_device_free(dev);
+free_dev:
+ rte_free(PRIV(dev));
+ rte_eth_dev_release_port(dev);
+ return -1;
+}
+
+static int
+fs_rte_eth_free(const char *name)
+{
+ struct rte_eth_dev *dev;
+ int ret;
+
+ dev = rte_eth_dev_allocated(name);
+ if (dev == NULL)
+ return -ENODEV;
+ rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW,
+ failsafe_eth_new_event_callback, dev);
+ ret = failsafe_eal_uninit(dev);
+ if (ret)
+ ERROR("Error while uninitializing sub-EAL");
+ failsafe_args_free(dev);
+ fs_sub_device_free(dev);
+ ret = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex);
+ if (ret)
+ ERROR("Error while destroying hotplug mutex");
+ rte_free(PRIV(dev));
+ rte_eth_dev_release_port(dev);
+ return ret;
+}
+
+static int
+rte_pmd_failsafe_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ struct rte_eth_dev *eth_dev;
+
+ name = rte_vdev_device_name(vdev);
+ INFO("Initializing " FAILSAFE_DRIVER_NAME " for %s",
+ name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(vdev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ ERROR("Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &failsafe_ops;
+ eth_dev->device = &vdev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ return fs_eth_dev_create(vdev);
+}
+
+static int
+rte_pmd_failsafe_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ INFO("Uninitializing " FAILSAFE_DRIVER_NAME " for %s", name);
+ return fs_rte_eth_free(name);
+}
+
+static struct rte_vdev_driver failsafe_drv = {
+ .probe = rte_pmd_failsafe_probe,
+ .remove = rte_pmd_failsafe_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_failsafe, failsafe_drv);
+RTE_PMD_REGISTER_PARAM_STRING(net_failsafe, PMD_FAILSAFE_PARAM_STRING);
+
+RTE_INIT(failsafe_init_log)
+{
+ failsafe_logtype = rte_log_register("pmd.net.failsafe");
+ if (failsafe_logtype >= 0)
+ rte_log_set_level(failsafe_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_args.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_args.c
new file mode 100644
index 00000000..626883ce
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_args.c
@@ -0,0 +1,521 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <rte_debug.h>
+#include <rte_devargs.h>
+#include <rte_malloc.h>
+#include <rte_kvargs.h>
+#include <rte_string_fns.h>
+
+#include "failsafe_private.h"
+
+/* Callback used when a new device is found in devargs */
+typedef int (parse_cb)(struct rte_eth_dev *dev, const char *params,
+ uint8_t head);
+
+uint64_t hotplug_poll = FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS;
+int mac_from_arg = 0;
+
+const char *pmd_failsafe_init_parameters[] = {
+ PMD_FAILSAFE_HOTPLUG_POLL_KVARG,
+ PMD_FAILSAFE_MAC_KVARG,
+ NULL,
+};
+
+/*
+ * input: text.
+ * output: 0: if text[0] != '(',
+ * 0: if there are no corresponding ')'
+ * n: distance to corresponding ')' otherwise
+ */
+static size_t
+closing_paren(const char *text)
+{
+ int nb_open = 0;
+ size_t i = 0;
+
+ while (text[i] != '\0') {
+ if (text[i] == '(')
+ nb_open++;
+ if (text[i] == ')')
+ nb_open--;
+ if (nb_open == 0)
+ return i;
+ i++;
+ }
+ return 0;
+}
+
+static int
+fs_parse_device(struct sub_device *sdev, char *args)
+{
+ struct rte_devargs *d;
+ int ret;
+
+ d = &sdev->devargs;
+ DEBUG("%s", args);
+ ret = rte_devargs_parse(d, args);
+ if (ret) {
+ DEBUG("devargs parsing failed with code %d", ret);
+ return ret;
+ }
+ sdev->bus = d->bus;
+ sdev->state = DEV_PARSED;
+ return 0;
+}
+
+static void
+fs_sanitize_cmdline(char *args)
+{
+ char *nl;
+
+ nl = strrchr(args, '\n');
+ if (nl)
+ nl[0] = '\0';
+}
+
+static int
+fs_execute_cmd(struct sub_device *sdev, char *cmdline)
+{
+ FILE *fp;
+ /* store possible newline as well */
+ char output[DEVARGS_MAXLEN + 1];
+ size_t len;
+ int ret;
+
+ RTE_ASSERT(cmdline != NULL || sdev->cmdline != NULL);
+ if (sdev->cmdline == NULL) {
+ size_t i;
+
+ len = strlen(cmdline) + 1;
+ sdev->cmdline = calloc(1, len);
+ if (sdev->cmdline == NULL) {
+ ERROR("Command line allocation failed");
+ return -ENOMEM;
+ }
+ snprintf(sdev->cmdline, len, "%s", cmdline);
+ /* Replace all commas in the command line by spaces */
+ for (i = 0; i < len; i++)
+ if (sdev->cmdline[i] == ',')
+ sdev->cmdline[i] = ' ';
+ }
+ DEBUG("'%s'", sdev->cmdline);
+ fp = popen(sdev->cmdline, "r");
+ if (fp == NULL) {
+ ret = -errno;
+ ERROR("popen: %s", strerror(errno));
+ return ret;
+ }
+ /* We only read one line */
+ if (fgets(output, sizeof(output) - 1, fp) == NULL) {
+ DEBUG("Could not read command output");
+ ret = -ENODEV;
+ goto ret_pclose;
+ }
+ fs_sanitize_cmdline(output);
+ if (output[0] == '\0') {
+ ret = -ENODEV;
+ goto ret_pclose;
+ }
+ ret = fs_parse_device(sdev, output);
+ if (ret)
+ ERROR("Parsing device '%s' failed", output);
+ret_pclose:
+ if (pclose(fp) == -1)
+ ERROR("pclose: %s", strerror(errno));
+ return ret;
+}
+
+static int
+fs_read_fd(struct sub_device *sdev, char *fd_str)
+{
+ FILE *fp = NULL;
+ int fd = -1;
+ /* store possible newline as well */
+ char output[DEVARGS_MAXLEN + 1];
+ int err = -ENODEV;
+ int oflags;
+ int lcount;
+
+ RTE_ASSERT(fd_str != NULL || sdev->fd_str != NULL);
+ if (sdev->fd_str == NULL) {
+ sdev->fd_str = strdup(fd_str);
+ if (sdev->fd_str == NULL) {
+ ERROR("Command line allocation failed");
+ return -ENOMEM;
+ }
+ }
+ errno = 0;
+ fd = strtol(fd_str, &fd_str, 0);
+ if (errno || *fd_str || fd < 0) {
+ ERROR("Parsing FD number failed");
+ goto error;
+ }
+ /* Fiddle with copy of file descriptor */
+ fd = dup(fd);
+ if (fd == -1)
+ goto error;
+ oflags = fcntl(fd, F_GETFL);
+ if (oflags == -1)
+ goto error;
+ if (fcntl(fd, F_SETFL, oflags | O_NONBLOCK) == -1)
+ goto error;
+ fp = fdopen(fd, "r");
+ if (fp == NULL)
+ goto error;
+ fd = -1;
+ /* Only take the last line into account */
+ lcount = 0;
+ while (fgets(output, sizeof(output), fp))
+ ++lcount;
+ if (lcount == 0)
+ goto error;
+ else if (ferror(fp) && errno != EAGAIN)
+ goto error;
+ /* Line must end with a newline character */
+ fs_sanitize_cmdline(output);
+ if (output[0] == '\0')
+ goto error;
+ err = fs_parse_device(sdev, output);
+ if (err)
+ ERROR("Parsing device '%s' failed", output);
+error:
+ if (fp)
+ fclose(fp);
+ if (fd != -1)
+ close(fd);
+ return err;
+}
+
+static int
+fs_parse_device_param(struct rte_eth_dev *dev, const char *param,
+ uint8_t head)
+{
+ struct fs_priv *priv;
+ struct sub_device *sdev;
+ char *args = NULL;
+ size_t a, b;
+ int ret;
+
+ priv = PRIV(dev);
+ a = 0;
+ b = 0;
+ ret = 0;
+ while (param[b] != '(' &&
+ param[b] != '\0')
+ b++;
+ a = b;
+ b += closing_paren(&param[b]);
+ if (a == b) {
+ ERROR("Dangling parenthesis");
+ return -EINVAL;
+ }
+ a += 1;
+ args = strndup(&param[a], b - a);
+ if (args == NULL) {
+ ERROR("Not enough memory for parameter parsing");
+ return -ENOMEM;
+ }
+ sdev = &priv->subs[head];
+ if (strncmp(param, "dev", 3) == 0) {
+ ret = fs_parse_device(sdev, args);
+ if (ret)
+ goto free_args;
+ } else if (strncmp(param, "exec", 4) == 0) {
+ ret = fs_execute_cmd(sdev, args);
+ if (ret == -ENODEV) {
+ DEBUG("Reading device info from command line failed");
+ ret = 0;
+ }
+ if (ret)
+ goto free_args;
+ } else if (strncmp(param, "fd(", 3) == 0) {
+ ret = fs_read_fd(sdev, args);
+ if (ret == -ENODEV) {
+ DEBUG("Reading device info from FD failed");
+ ret = 0;
+ }
+ if (ret)
+ goto free_args;
+ } else {
+ ERROR("Unrecognized device type: %.*s", (int)b, param);
+ return -EINVAL;
+ }
+free_args:
+ free(args);
+ return ret;
+}
+
+static int
+fs_parse_sub_devices(parse_cb *cb,
+ struct rte_eth_dev *dev, const char *params)
+{
+ size_t a, b;
+ uint8_t head;
+ int ret;
+
+ a = 0;
+ head = 0;
+ ret = 0;
+ while (params[a] != '\0') {
+ b = a;
+ while (params[b] != '(' &&
+ params[b] != ',' &&
+ params[b] != '\0')
+ b++;
+ if (b == a) {
+ ERROR("Invalid parameter");
+ return -EINVAL;
+ }
+ if (params[b] == ',') {
+ a = b + 1;
+ continue;
+ }
+ if (params[b] == '(') {
+ size_t start = b;
+
+ b += closing_paren(&params[b]);
+ if (b == start) {
+ ERROR("Dangling parenthesis");
+ return -EINVAL;
+ }
+ ret = (*cb)(dev, &params[a], head);
+ if (ret)
+ return ret;
+ head += 1;
+ b += 1;
+ if (params[b] == '\0')
+ return 0;
+ }
+ a = b + 1;
+ }
+ return 0;
+}
+
+static int
+fs_remove_sub_devices_definition(char params[DEVARGS_MAXLEN])
+{
+ char buffer[DEVARGS_MAXLEN] = {0};
+ size_t a, b;
+ int i;
+
+ a = 0;
+ i = 0;
+ while (params[a] != '\0') {
+ b = a;
+ while (params[b] != '(' &&
+ params[b] != ',' &&
+ params[b] != '\0')
+ b++;
+ if (b == a) {
+ ERROR("Invalid parameter");
+ return -EINVAL;
+ }
+ if (params[b] == ',' || params[b] == '\0') {
+ size_t len = b - a;
+
+ if (i > 0)
+ len += 1;
+ snprintf(&buffer[i], len + 1, "%s%s",
+ i ? "," : "", &params[a]);
+ i += len;
+ } else if (params[b] == '(') {
+ size_t start = b;
+
+ b += closing_paren(&params[b]);
+ if (b == start)
+ return -EINVAL;
+ b += 1;
+ if (params[b] == '\0')
+ goto out;
+ }
+ a = b + 1;
+ }
+out:
+ strlcpy(params, buffer, DEVARGS_MAXLEN);
+ return 0;
+}
+
+static int
+fs_get_u64_arg(const char *key __rte_unused,
+ const char *value, void *out)
+{
+ uint64_t *u64 = out;
+ char *endptr = NULL;
+
+ if ((value == NULL) || (out == NULL))
+ return -EINVAL;
+ errno = 0;
+ *u64 = strtoull(value, &endptr, 0);
+ if (errno != 0)
+ return -errno;
+ if (endptr == value)
+ return -1;
+ return 0;
+}
+
+static int
+fs_get_mac_addr_arg(const char *key __rte_unused,
+ const char *value, void *out)
+{
+ struct ether_addr *ea = out;
+ int ret;
+
+ if ((value == NULL) || (out == NULL))
+ return -EINVAL;
+ ret = sscanf(value, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+ &ea->addr_bytes[0], &ea->addr_bytes[1],
+ &ea->addr_bytes[2], &ea->addr_bytes[3],
+ &ea->addr_bytes[4], &ea->addr_bytes[5]);
+ return ret != ETHER_ADDR_LEN;
+}
+
+int
+failsafe_args_parse(struct rte_eth_dev *dev, const char *params)
+{
+ struct fs_priv *priv;
+ char mut_params[DEVARGS_MAXLEN] = "";
+ struct rte_kvargs *kvlist = NULL;
+ unsigned int arg_count;
+ size_t n;
+ int ret;
+
+ priv = PRIV(dev);
+ ret = 0;
+ priv->subs_tx = FAILSAFE_MAX_ETHPORTS;
+ /* default parameters */
+ n = strlcpy(mut_params, params, sizeof(mut_params));
+ if (n >= sizeof(mut_params)) {
+ ERROR("Parameter string too long (>=%zu)",
+ sizeof(mut_params));
+ return -ENOMEM;
+ }
+ ret = fs_parse_sub_devices(fs_parse_device_param,
+ dev, params);
+ if (ret < 0)
+ return ret;
+ ret = fs_remove_sub_devices_definition(mut_params);
+ if (ret < 0)
+ return ret;
+ if (strnlen(mut_params, sizeof(mut_params)) > 0) {
+ kvlist = rte_kvargs_parse(mut_params,
+ pmd_failsafe_init_parameters);
+ if (kvlist == NULL) {
+ ERROR("Error parsing parameters, usage:\n"
+ PMD_FAILSAFE_PARAM_STRING);
+ return -1;
+ }
+ /* PLUG_IN event poll timer */
+ arg_count = rte_kvargs_count(kvlist,
+ PMD_FAILSAFE_HOTPLUG_POLL_KVARG);
+ if (arg_count == 1) {
+ ret = rte_kvargs_process(kvlist,
+ PMD_FAILSAFE_HOTPLUG_POLL_KVARG,
+ &fs_get_u64_arg, &hotplug_poll);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+ /* MAC addr */
+ arg_count = rte_kvargs_count(kvlist,
+ PMD_FAILSAFE_MAC_KVARG);
+ if (arg_count > 0) {
+ ret = rte_kvargs_process(kvlist,
+ PMD_FAILSAFE_MAC_KVARG,
+ &fs_get_mac_addr_arg,
+ &dev->data->mac_addrs[0]);
+ if (ret < 0)
+ goto free_kvlist;
+
+ mac_from_arg = 1;
+ }
+ }
+ PRIV(dev)->state = DEV_PARSED;
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+void
+failsafe_args_free(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ free(sdev->cmdline);
+ sdev->cmdline = NULL;
+ free(sdev->fd_str);
+ sdev->fd_str = NULL;
+ free(sdev->devargs.args);
+ sdev->devargs.args = NULL;
+ }
+}
+
+static int
+fs_count_device(struct rte_eth_dev *dev, const char *param,
+ uint8_t head __rte_unused)
+{
+ size_t b = 0;
+
+ while (param[b] != '(' &&
+ param[b] != '\0')
+ b++;
+ if (strncmp(param, "dev", b) != 0 &&
+ strncmp(param, "exec", b) != 0 &&
+ strncmp(param, "fd(", b) != 0) {
+ ERROR("Unrecognized device type: %.*s", (int)b, param);
+ return -EINVAL;
+ }
+ PRIV(dev)->subs_tail += 1;
+ return 0;
+}
+
+int
+failsafe_args_count_subdevice(struct rte_eth_dev *dev,
+ const char *params)
+{
+ return fs_parse_sub_devices(fs_count_device,
+ dev, params);
+}
+
+static int
+fs_parse_sub_device(struct sub_device *sdev)
+{
+ struct rte_devargs *da;
+ char devstr[DEVARGS_MAXLEN] = "";
+
+ da = &sdev->devargs;
+ snprintf(devstr, sizeof(devstr), "%s,%s", da->name, da->args);
+ return fs_parse_device(sdev, devstr);
+}
+
+int
+failsafe_args_parse_subs(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret = 0;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state >= DEV_PARSED)
+ continue;
+ if (sdev->cmdline)
+ ret = fs_execute_cmd(sdev, sdev->cmdline);
+ else if (sdev->fd_str)
+ ret = fs_read_fd(sdev, sdev->fd_str);
+ else
+ ret = fs_parse_sub_device(sdev);
+ if (ret == 0)
+ sdev->state = DEV_PARSED;
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_eal.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_eal.c
new file mode 100644
index 00000000..ce1633f1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_eal.c
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <rte_malloc.h>
+
+#include "failsafe_private.h"
+
+static int
+fs_ethdev_portid_get(const char *name, uint16_t *port_id)
+{
+ uint16_t pid;
+ size_t len;
+
+ if (name == NULL) {
+ DEBUG("Null pointer is specified\n");
+ return -EINVAL;
+ }
+ len = strlen(name);
+ for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
+ if (rte_eth_dev_is_valid_port(pid) &&
+ !strncmp(name, rte_eth_devices[pid].device->name, len)) {
+ *port_id = pid;
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static int
+fs_bus_init(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ struct rte_devargs *da;
+ uint8_t i;
+ uint16_t pid;
+ int ret;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state != DEV_PARSED)
+ continue;
+ da = &sdev->devargs;
+ if (fs_ethdev_portid_get(da->name, &pid) != 0) {
+ struct rte_eth_dev_owner pid_owner;
+
+ ret = rte_eal_hotplug_add(da->bus->name,
+ da->name,
+ da->args);
+ if (ret) {
+ ERROR("sub_device %d probe failed %s%s%s", i,
+ rte_errno ? "(" : "",
+ rte_errno ? strerror(rte_errno) : "",
+ rte_errno ? ")" : "");
+ continue;
+ }
+ if (fs_ethdev_portid_get(da->name, &pid) != 0) {
+ ERROR("sub_device %d init went wrong", i);
+ return -ENODEV;
+ }
+ /*
+ * The NEW callback tried to take ownership, check
+ * whether it succeed or didn't.
+ */
+ rte_eth_dev_owner_get(pid, &pid_owner);
+ if (pid_owner.id != PRIV(dev)->my_owner.id) {
+ INFO("sub_device %d owner(%s_%016"PRIX64") is not my,"
+ " owner(%s_%016"PRIX64"), will try again later",
+ i, pid_owner.name, pid_owner.id,
+ PRIV(dev)->my_owner.name,
+ PRIV(dev)->my_owner.id);
+ continue;
+ }
+ } else {
+ /* The sub-device port was found. */
+ char devstr[DEVARGS_MAXLEN] = "";
+ struct rte_devargs *probed_da =
+ rte_eth_devices[pid].device->devargs;
+
+ /* Take control of probed device. */
+ free(da->args);
+ memset(da, 0, sizeof(*da));
+ if (probed_da != NULL)
+ snprintf(devstr, sizeof(devstr), "%s,%s",
+ probed_da->name, probed_da->args);
+ else
+ snprintf(devstr, sizeof(devstr), "%s",
+ rte_eth_devices[pid].device->name);
+ ret = rte_devargs_parse(da, devstr);
+ if (ret) {
+ ERROR("Probed devargs parsing failed with code"
+ " %d", ret);
+ return ret;
+ }
+ INFO("Taking control of a probed sub device"
+ " %d named %s", i, da->name);
+ ret = rte_eth_dev_owner_set(pid, &PRIV(dev)->my_owner);
+ if (ret < 0) {
+ INFO("sub_device %d owner set failed (%s), "
+ "will try again later", i, strerror(-ret));
+ continue;
+ } else if (strncmp(rte_eth_devices[pid].device->name,
+ da->name, strlen(da->name)) != 0) {
+ /*
+ * The device probably was removed and its port
+ * id was reallocated before ownership set.
+ */
+ rte_eth_dev_owner_unset(pid,
+ PRIV(dev)->my_owner.id);
+ INFO("sub_device %d was removed before taking"
+ " ownership, will try again later", i);
+ continue;
+ }
+ }
+ ETH(sdev) = &rte_eth_devices[pid];
+ SUB_ID(sdev) = i;
+ sdev->fs_dev = dev;
+ sdev->dev = ETH(sdev)->device;
+ sdev->state = DEV_PROBED;
+ }
+ return 0;
+}
+
+int
+failsafe_eal_init(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = fs_bus_init(dev);
+ if (ret)
+ return ret;
+ if (PRIV(dev)->state < DEV_PROBED)
+ PRIV(dev)->state = DEV_PROBED;
+ fs_switch_dev(dev, NULL);
+ return 0;
+}
+
+static int
+fs_bus_uninit(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev = NULL;
+ uint8_t i;
+ int sdev_ret;
+ int ret = 0;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ sdev_ret = rte_eal_hotplug_remove(sdev->bus->name,
+ sdev->dev->name);
+ if (sdev_ret) {
+ ERROR("Failed to remove requested device %s (err: %d)",
+ sdev->dev->name, sdev_ret);
+ continue;
+ }
+ sdev->state = DEV_PROBED - 1;
+ }
+ return ret;
+}
+
+int
+failsafe_eal_uninit(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = fs_bus_uninit(dev);
+ PRIV(dev)->state = DEV_PROBED - 1;
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_ether.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_ether.c
new file mode 100644
index 00000000..5b5cb3b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_ether.c
@@ -0,0 +1,518 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <unistd.h>
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_cycles.h>
+
+#include "failsafe_private.h"
+
+/** Print a message out of a flow error. */
+static int
+fs_flow_complain(struct rte_flow_error *error)
+{
+ static const char *const errstrlist[] = {
+ [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
+ [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
+ [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
+ [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
+ [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
+ [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
+ [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
+ [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
+ [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
+ };
+ const char *errstr;
+ char buf[32];
+ int err = rte_errno;
+
+ if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
+ !errstrlist[error->type])
+ errstr = "unknown type";
+ else
+ errstr = errstrlist[error->type];
+ ERROR("Caught error type %d (%s): %s%s\n",
+ error->type, errstr,
+ error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
+ error->cause), buf) : "",
+ error->message ? error->message : "(no stated reason)");
+ return -err;
+}
+
+static int
+eth_dev_flow_isolate_set(struct rte_eth_dev *dev,
+ struct sub_device *sdev)
+{
+ struct rte_flow_error ferror;
+ int ret;
+
+ if (!PRIV(dev)->flow_isolated) {
+ DEBUG("Flow isolation already disabled");
+ } else {
+ DEBUG("Enabling flow isolation");
+ ret = rte_flow_isolate(PORT_ID(sdev),
+ PRIV(dev)->flow_isolated,
+ &ferror);
+ if (ret) {
+ fs_flow_complain(&ferror);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+fs_eth_dev_conf_apply(struct rte_eth_dev *dev,
+ struct sub_device *sdev)
+{
+ struct rte_eth_dev *edev;
+ struct rte_vlan_filter_conf *vfc1;
+ struct rte_vlan_filter_conf *vfc2;
+ struct rte_flow *flow;
+ struct rte_flow_error ferror;
+ uint32_t i;
+ int ret;
+
+ edev = ETH(sdev);
+ /* RX queue setup */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct rxq *rxq;
+
+ rxq = dev->data->rx_queues[i];
+ ret = rte_eth_rx_queue_setup(PORT_ID(sdev), i,
+ rxq->info.nb_desc, rxq->socket_id,
+ &rxq->info.conf, rxq->info.mp);
+ if (ret) {
+ ERROR("rx_queue_setup failed");
+ return ret;
+ }
+ }
+ /* TX queue setup */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct txq *txq;
+
+ txq = dev->data->tx_queues[i];
+ ret = rte_eth_tx_queue_setup(PORT_ID(sdev), i,
+ txq->info.nb_desc, txq->socket_id,
+ &txq->info.conf);
+ if (ret) {
+ ERROR("tx_queue_setup failed");
+ return ret;
+ }
+ }
+ /* dev_link.link_status */
+ if (dev->data->dev_link.link_status !=
+ edev->data->dev_link.link_status) {
+ DEBUG("Configuring link_status");
+ if (dev->data->dev_link.link_status)
+ ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
+ else
+ ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
+ if (ret) {
+ ERROR("Failed to apply link_status");
+ return ret;
+ }
+ } else {
+ DEBUG("link_status already set");
+ }
+ /* promiscuous */
+ if (dev->data->promiscuous != edev->data->promiscuous) {
+ DEBUG("Configuring promiscuous");
+ if (dev->data->promiscuous)
+ rte_eth_promiscuous_enable(PORT_ID(sdev));
+ else
+ rte_eth_promiscuous_disable(PORT_ID(sdev));
+ } else {
+ DEBUG("promiscuous already set");
+ }
+ /* all_multicast */
+ if (dev->data->all_multicast != edev->data->all_multicast) {
+ DEBUG("Configuring all_multicast");
+ if (dev->data->all_multicast)
+ rte_eth_allmulticast_enable(PORT_ID(sdev));
+ else
+ rte_eth_allmulticast_disable(PORT_ID(sdev));
+ } else {
+ DEBUG("all_multicast already set");
+ }
+ /* MTU */
+ if (dev->data->mtu != edev->data->mtu) {
+ DEBUG("Configuring MTU");
+ ret = rte_eth_dev_set_mtu(PORT_ID(sdev), dev->data->mtu);
+ if (ret) {
+ ERROR("Failed to apply MTU");
+ return ret;
+ }
+ } else {
+ DEBUG("MTU already set");
+ }
+ /* default MAC */
+ DEBUG("Configuring default MAC address");
+ ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev),
+ &dev->data->mac_addrs[0]);
+ if (ret) {
+ ERROR("Setting default MAC address failed");
+ return ret;
+ }
+ /* additional MAC */
+ if (PRIV(dev)->nb_mac_addr > 1)
+ DEBUG("Configure additional MAC address%s",
+ (PRIV(dev)->nb_mac_addr > 2 ? "es" : ""));
+ for (i = 1; i < PRIV(dev)->nb_mac_addr; i++) {
+ struct ether_addr *ea;
+
+ ea = &dev->data->mac_addrs[i];
+ ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), ea,
+ PRIV(dev)->mac_addr_pool[i]);
+ if (ret) {
+ char ea_fmt[ETHER_ADDR_FMT_SIZE];
+
+ ether_format_addr(ea_fmt, ETHER_ADDR_FMT_SIZE, ea);
+ ERROR("Adding MAC address %s failed", ea_fmt);
+ return ret;
+ }
+ }
+ /* VLAN filter */
+ vfc1 = &dev->data->vlan_filter_conf;
+ vfc2 = &edev->data->vlan_filter_conf;
+ if (memcmp(vfc1, vfc2, sizeof(struct rte_vlan_filter_conf))) {
+ uint64_t vbit;
+ uint64_t ids;
+ size_t i;
+ uint16_t vlan_id;
+
+ DEBUG("Configuring VLAN filter");
+ for (i = 0; i < RTE_DIM(vfc1->ids); i++) {
+ if (vfc1->ids[i] == 0)
+ continue;
+ ids = vfc1->ids[i];
+ while (ids) {
+ vlan_id = 64 * i;
+ /* count trailing zeroes */
+ vbit = ~ids & (ids - 1);
+ /* clear least significant bit set */
+ ids ^= (ids ^ (ids - 1)) ^ vbit;
+ for (; vbit; vlan_id++)
+ vbit >>= 1;
+ ret = rte_eth_dev_vlan_filter(
+ PORT_ID(sdev), vlan_id, 1);
+ if (ret) {
+ ERROR("Failed to apply VLAN filter %hu",
+ vlan_id);
+ return ret;
+ }
+ }
+ }
+ } else {
+ DEBUG("VLAN filter already set");
+ }
+ /* rte_flow */
+ if (TAILQ_EMPTY(&PRIV(dev)->flow_list)) {
+ DEBUG("rte_flow already set");
+ } else {
+ DEBUG("Resetting rte_flow configuration");
+ ret = rte_flow_flush(PORT_ID(sdev), &ferror);
+ if (ret) {
+ fs_flow_complain(&ferror);
+ return ret;
+ }
+ i = 0;
+ rte_errno = 0;
+ DEBUG("Configuring rte_flow");
+ TAILQ_FOREACH(flow, &PRIV(dev)->flow_list, next) {
+ DEBUG("Creating flow #%" PRIu32, i++);
+ flow->flows[SUB_ID(sdev)] =
+ rte_flow_create(PORT_ID(sdev),
+ &flow->fd->attr,
+ flow->fd->items,
+ flow->fd->actions,
+ &ferror);
+ ret = rte_errno;
+ if (ret)
+ break;
+ }
+ if (ret) {
+ fs_flow_complain(&ferror);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void
+fs_dev_remove(struct sub_device *sdev)
+{
+ int ret;
+
+ if (sdev == NULL)
+ return;
+ switch (sdev->state) {
+ case DEV_STARTED:
+ failsafe_rx_intr_uninstall_subdevice(sdev);
+ rte_eth_dev_stop(PORT_ID(sdev));
+ sdev->state = DEV_ACTIVE;
+ /* fallthrough */
+ case DEV_ACTIVE:
+ failsafe_eth_dev_unregister_callbacks(sdev);
+ rte_eth_dev_close(PORT_ID(sdev));
+ sdev->state = DEV_PROBED;
+ /* fallthrough */
+ case DEV_PROBED:
+ ret = rte_eal_hotplug_remove(sdev->bus->name,
+ sdev->dev->name);
+ if (ret) {
+ ERROR("Bus detach failed for sub_device %u",
+ SUB_ID(sdev));
+ } else {
+ rte_eth_dev_release_port(ETH(sdev));
+ }
+ sdev->state = DEV_PARSED;
+ /* fallthrough */
+ case DEV_PARSED:
+ case DEV_UNDEFINED:
+ sdev->state = DEV_UNDEFINED;
+ /* the end */
+ break;
+ }
+ sdev->remove = 0;
+ failsafe_hotplug_alarm_install(sdev->fs_dev);
+}
+
+static void
+fs_dev_stats_save(struct sub_device *sdev)
+{
+ struct rte_eth_stats stats;
+ int err;
+
+ /* Attempt to read current stats. */
+ err = rte_eth_stats_get(PORT_ID(sdev), &stats);
+ if (err) {
+ uint64_t timestamp = sdev->stats_snapshot.timestamp;
+
+ WARN("Could not access latest statistics from sub-device %d.\n",
+ SUB_ID(sdev));
+ if (timestamp != 0)
+ WARN("Using latest snapshot taken before %"PRIu64" seconds.\n",
+ (rte_rdtsc() - timestamp) / rte_get_tsc_hz());
+ }
+ failsafe_stats_increment(&PRIV(sdev->fs_dev)->stats_accumulator,
+ err ? &sdev->stats_snapshot.stats : &stats);
+ memset(&sdev->stats_snapshot, 0, sizeof(sdev->stats_snapshot));
+}
+
+static inline int
+fs_rxtx_clean(struct sub_device *sdev)
+{
+ uint16_t i;
+
+ for (i = 0; i < ETH(sdev)->data->nb_rx_queues; i++)
+ if (FS_ATOMIC_RX(sdev, i))
+ return 0;
+ for (i = 0; i < ETH(sdev)->data->nb_tx_queues; i++)
+ if (FS_ATOMIC_TX(sdev, i))
+ return 0;
+ return 1;
+}
+
+void
+failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev)
+{
+ int ret;
+
+ if (sdev == NULL)
+ return;
+ if (sdev->rmv_callback) {
+ ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_RMV,
+ failsafe_eth_rmv_event_callback,
+ sdev);
+ if (ret)
+ WARN("Failed to unregister RMV callback for sub_device"
+ " %d", SUB_ID(sdev));
+ sdev->rmv_callback = 0;
+ }
+ if (sdev->lsc_callback) {
+ ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_LSC,
+ failsafe_eth_lsc_event_callback,
+ sdev);
+ if (ret)
+ WARN("Failed to unregister LSC callback for sub_device"
+ " %d", SUB_ID(sdev));
+ sdev->lsc_callback = 0;
+ }
+}
+
+void
+failsafe_dev_remove(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ if (sdev->remove && fs_rxtx_clean(sdev)) {
+ if (fs_lock(dev, 1) != 0)
+ return;
+ fs_dev_stats_save(sdev);
+ fs_dev_remove(sdev);
+ fs_unlock(dev, 1);
+ }
+}
+
+int
+failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint32_t inactive;
+ int ret;
+ uint8_t i;
+
+ if (PRIV(dev)->state < DEV_PARSED)
+ return 0;
+
+ ret = failsafe_args_parse_subs(dev);
+ if (ret)
+ goto err_remove;
+
+ if (PRIV(dev)->state < DEV_PROBED)
+ return 0;
+ ret = failsafe_eal_init(dev);
+ if (ret)
+ goto err_remove;
+ if (PRIV(dev)->state < DEV_ACTIVE)
+ return 0;
+ inactive = 0;
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state == DEV_PROBED) {
+ inactive |= UINT32_C(1) << i;
+ ret = eth_dev_flow_isolate_set(dev, sdev);
+ if (ret) {
+ ERROR("Could not apply configuration to sub_device %d",
+ i);
+ goto err_remove;
+ }
+ }
+ }
+ ret = dev->dev_ops->dev_configure(dev);
+ if (ret)
+ goto err_remove;
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (inactive & (UINT32_C(1) << i)) {
+ ret = fs_eth_dev_conf_apply(dev, sdev);
+ if (ret) {
+ ERROR("Could not apply configuration to sub_device %d",
+ i);
+ goto err_remove;
+ }
+ }
+ }
+ /*
+ * If new devices have been configured, check if
+ * the link state has changed.
+ */
+ if (inactive)
+ dev->dev_ops->link_update(dev, 1);
+ if (PRIV(dev)->state < DEV_STARTED)
+ return 0;
+ ret = dev->dev_ops->dev_start(dev);
+ if (ret)
+ goto err_remove;
+ return 0;
+err_remove:
+ FOREACH_SUBDEV(sdev, i, dev)
+ if (sdev->state != PRIV(dev)->state)
+ sdev->remove = 1;
+ return ret;
+}
+
+void
+failsafe_stats_increment(struct rte_eth_stats *to, struct rte_eth_stats *from)
+{
+ uint32_t i;
+
+ RTE_ASSERT(to != NULL && from != NULL);
+ to->ipackets += from->ipackets;
+ to->opackets += from->opackets;
+ to->ibytes += from->ibytes;
+ to->obytes += from->obytes;
+ to->imissed += from->imissed;
+ to->ierrors += from->ierrors;
+ to->oerrors += from->oerrors;
+ to->rx_nombuf += from->rx_nombuf;
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ to->q_ipackets[i] += from->q_ipackets[i];
+ to->q_opackets[i] += from->q_opackets[i];
+ to->q_ibytes[i] += from->q_ibytes[i];
+ to->q_obytes[i] += from->q_obytes[i];
+ to->q_errors[i] += from->q_errors[i];
+ }
+}
+
+int
+failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused,
+ enum rte_eth_event_type event __rte_unused,
+ void *cb_arg, void *out __rte_unused)
+{
+ struct sub_device *sdev = cb_arg;
+
+ fs_lock(sdev->fs_dev, 0);
+ /* Switch as soon as possible tx_dev. */
+ fs_switch_dev(sdev->fs_dev, sdev);
+ /* Use safe bursts in any case. */
+ set_burst_fn(sdev->fs_dev, 1);
+ /*
+ * Async removal, the sub-PMD will try to unregister
+ * the callback at the source of the current thread context.
+ */
+ sdev->remove = 1;
+ fs_unlock(sdev->fs_dev, 0);
+ return 0;
+}
+
+int
+failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused,
+ enum rte_eth_event_type event __rte_unused,
+ void *cb_arg, void *out __rte_unused)
+{
+ struct rte_eth_dev *dev = cb_arg;
+ int ret;
+
+ ret = dev->dev_ops->link_update(dev, 0);
+ /* We must pass on the LSC event */
+ if (ret)
+ return _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ else
+ return 0;
+}
+
+/* Take sub-device ownership before it becomes exposed to the application. */
+int
+failsafe_eth_new_event_callback(uint16_t port_id,
+ enum rte_eth_event_type event __rte_unused,
+ void *cb_arg, void *out __rte_unused)
+{
+ struct rte_eth_dev *fs_dev = cb_arg;
+ struct sub_device *sdev;
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, fs_dev, DEV_PARSED) {
+ if (sdev->state >= DEV_PROBED)
+ continue;
+ if (strcmp(sdev->devargs.name, dev->device->name) != 0)
+ continue;
+ rte_eth_dev_owner_set(port_id, &PRIV(fs_dev)->my_owner);
+ /* The actual owner will be checked after the port probing. */
+ break;
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_flow.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_flow.c
new file mode 100644
index 00000000..bfe42fce
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_flow.c
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <sys/queue.h>
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "failsafe_private.h"
+
+static struct rte_flow *
+fs_flow_allocate(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *items,
+ const struct rte_flow_action *actions)
+{
+ struct rte_flow *flow;
+ size_t fdsz;
+
+ fdsz = rte_flow_copy(NULL, 0, attr, items, actions);
+ flow = rte_zmalloc(NULL,
+ sizeof(struct rte_flow) + fdsz,
+ RTE_CACHE_LINE_SIZE);
+ if (flow == NULL) {
+ ERROR("Could not allocate new flow");
+ return NULL;
+ }
+ flow->fd = (void *)((uintptr_t)flow + sizeof(*flow));
+ if (rte_flow_copy(flow->fd, fdsz, attr, items, actions) != fdsz) {
+ ERROR("Failed to copy flow description");
+ rte_free(flow);
+ return NULL;
+ }
+ return flow;
+}
+
+static void
+fs_flow_release(struct rte_flow **flow)
+{
+ rte_free(*flow);
+ *flow = NULL;
+}
+
+static int
+fs_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_flow_validate on sub_device %d", i);
+ ret = rte_flow_validate(PORT_ID(sdev),
+ attr, patterns, actions, error);
+ if ((ret = fs_err(sdev, ret))) {
+ ERROR("Operation rte_flow_validate failed for sub_device %d"
+ " with error %d", i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static struct rte_flow *
+fs_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ struct rte_flow *flow;
+ uint8_t i;
+
+ fs_lock(dev, 0);
+ flow = fs_flow_allocate(attr, patterns, actions);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ flow->flows[i] = rte_flow_create(PORT_ID(sdev),
+ attr, patterns, actions, error);
+ if (flow->flows[i] == NULL && fs_err(sdev, -rte_errno)) {
+ ERROR("Failed to create flow on sub_device %d",
+ i);
+ goto err;
+ }
+ }
+ TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next);
+ fs_unlock(dev, 0);
+ return flow;
+err:
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (flow->flows[i] != NULL)
+ rte_flow_destroy(PORT_ID(sdev),
+ flow->flows[i], error);
+ }
+ fs_flow_release(&flow);
+ fs_unlock(dev, 0);
+ return NULL;
+}
+
+static int
+fs_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ if (flow == NULL) {
+ ERROR("Invalid flow");
+ return -EINVAL;
+ }
+ ret = 0;
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ int local_ret;
+
+ if (flow->flows[i] == NULL)
+ continue;
+ local_ret = rte_flow_destroy(PORT_ID(sdev),
+ flow->flows[i], error);
+ if ((local_ret = fs_err(sdev, local_ret))) {
+ ERROR("Failed to destroy flow on sub_device %d: %d",
+ i, local_ret);
+ if (ret == 0)
+ ret = local_ret;
+ }
+ }
+ TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
+ fs_flow_release(&flow);
+ fs_unlock(dev, 0);
+ return ret;
+}
+
+static int
+fs_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ struct rte_flow *flow;
+ void *tmp;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_flow_flush on sub_device %d", i);
+ ret = rte_flow_flush(PORT_ID(sdev), error);
+ if ((ret = fs_err(sdev, ret))) {
+ ERROR("Operation rte_flow_flush failed for sub_device %d"
+ " with error %d", i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ TAILQ_FOREACH_SAFE(flow, &PRIV(dev)->flow_list, next, tmp) {
+ TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
+ fs_flow_release(&flow);
+ }
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static int
+fs_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *action,
+ void *arg,
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+
+ fs_lock(dev, 0);
+ sdev = TX_SUBDEV(dev);
+ if (sdev != NULL) {
+ int ret = rte_flow_query(PORT_ID(sdev),
+ flow->flows[SUB_ID(sdev)],
+ action, arg, error);
+
+ if ((ret = fs_err(sdev, ret))) {
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ fs_unlock(dev, 0);
+ WARN("No active sub_device to query about its flow");
+ return -1;
+}
+
+static int
+fs_flow_isolate(struct rte_eth_dev *dev,
+ int set,
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state < DEV_PROBED)
+ continue;
+ DEBUG("Calling rte_flow_isolate on sub_device %d", i);
+ if (PRIV(dev)->flow_isolated != sdev->flow_isolated)
+ WARN("flow isolation mode of sub_device %d in incoherent state.",
+ i);
+ ret = rte_flow_isolate(PORT_ID(sdev), set, error);
+ if ((ret = fs_err(sdev, ret))) {
+ ERROR("Operation rte_flow_isolate failed for sub_device %d"
+ " with error %d", i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ sdev->flow_isolated = set;
+ }
+ PRIV(dev)->flow_isolated = set;
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+const struct rte_flow_ops fs_flow_ops = {
+ .validate = fs_flow_validate,
+ .create = fs_flow_create,
+ .destroy = fs_flow_destroy,
+ .flush = fs_flow_flush,
+ .query = fs_flow_query,
+ .isolate = fs_flow_isolate,
+};
diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_intr.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_intr.c
new file mode 100644
index 00000000..fc6ec37f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_intr.c
@@ -0,0 +1,536 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+/**
+ * @file
+ * Interrupts handling for failsafe driver.
+ */
+
+#if defined(LINUX)
+#include <sys/epoll.h>
+#endif
+#include <unistd.h>
+
+#include <rte_alarm.h>
+#include <rte_config.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_interrupts.h>
+#include <rte_io.h>
+#include <rte_service_component.h>
+
+#include "failsafe_private.h"
+
+#define NUM_RX_PROXIES (FAILSAFE_MAX_ETHPORTS * RTE_MAX_RXTX_INTR_VEC_ID)
+
+
+/**
+ * Open an epoll file descriptor.
+ *
+ * @param flags
+ * Flags for defining epoll behavior.
+ * @return
+ * 0 on success, negative errno value otherwise.
+ */
+static int
+fs_epoll_create1(int flags)
+{
+#if defined(LINUX)
+ return epoll_create1(flags);
+#elif defined(BSD)
+ RTE_SET_USED(flags);
+ return -ENOTSUP;
+#endif
+}
+
+/**
+ * Install failsafe Rx event proxy service.
+ * The Rx event proxy is the service that listens to Rx events from the
+ * subdevices and triggers failsafe Rx events accordingly.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ * @return
+ * 0 on success, negative errno value otherwise.
+ */
+static int
+fs_rx_event_proxy_routine(void *data)
+{
+ struct fs_priv *priv;
+ struct rxq *rxq;
+ struct rte_epoll_event *events;
+ uint64_t u64;
+ int i, n;
+ int rc = 0;
+
+ u64 = 1;
+ priv = data;
+ events = priv->rxp.evec;
+ n = rte_epoll_wait(priv->rxp.efd, events, NUM_RX_PROXIES, -1);
+ for (i = 0; i < n; i++) {
+ rxq = events[i].epdata.data;
+ if (rxq->enable_events && rxq->event_fd != -1) {
+ if (write(rxq->event_fd, &u64, sizeof(u64)) !=
+ sizeof(u64)) {
+ ERROR("Failed to proxy Rx event to socket %d",
+ rxq->event_fd);
+ rc = -EIO;
+ }
+ }
+ }
+ return rc;
+}
+
+/**
+ * Uninstall failsafe Rx event proxy service.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ */
+static void
+fs_rx_event_proxy_service_uninstall(struct fs_priv *priv)
+{
+ /* Unregister the event service. */
+ switch (priv->rxp.sstate) {
+ case SS_RUNNING:
+ rte_service_map_lcore_set(priv->rxp.sid, priv->rxp.scid, 0);
+ /* fall through */
+ case SS_READY:
+ rte_service_runstate_set(priv->rxp.sid, 0);
+ rte_service_set_stats_enable(priv->rxp.sid, 0);
+ rte_service_component_runstate_set(priv->rxp.sid, 0);
+ /* fall through */
+ case SS_REGISTERED:
+ rte_service_component_unregister(priv->rxp.sid);
+ /* fall through */
+ default:
+ break;
+ }
+}
+
+/**
+ * Install the failsafe Rx event proxy service.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ * @return
+ * 0 on success, negative errno value otherwise.
+ */
+static int
+fs_rx_event_proxy_service_install(struct fs_priv *priv)
+{
+ struct rte_service_spec service;
+ int32_t num_service_cores;
+ int ret = 0;
+
+ num_service_cores = rte_service_lcore_count();
+ if (num_service_cores <= 0) {
+ ERROR("Failed to install Rx interrupts, "
+ "no service core found");
+ return -ENOTSUP;
+ }
+ /* prepare service info */
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ snprintf(service.name, sizeof(service.name), "%s_Rx_service",
+ priv->dev->data->name);
+ service.socket_id = priv->dev->data->numa_node;
+ service.callback = fs_rx_event_proxy_routine;
+ service.callback_userdata = priv;
+
+ if (priv->rxp.sstate == SS_NO_SERVICE) {
+ uint32_t service_core_list[num_service_cores];
+
+ /* get a service core to work with */
+ ret = rte_service_lcore_list(service_core_list,
+ num_service_cores);
+ if (ret <= 0) {
+ ERROR("Failed to install Rx interrupts, "
+ "service core list empty or corrupted");
+ return -ENOTSUP;
+ }
+ priv->rxp.scid = service_core_list[0];
+ ret = rte_service_lcore_add(priv->rxp.scid);
+ if (ret && ret != -EALREADY) {
+ ERROR("Failed adding service core");
+ return ret;
+ }
+ /* service core may be in "stopped" state, start it */
+ ret = rte_service_lcore_start(priv->rxp.scid);
+ if (ret && (ret != -EALREADY)) {
+ ERROR("Failed to install Rx interrupts, "
+ "service core not started");
+ return ret;
+ }
+ /* register our service */
+ int32_t ret = rte_service_component_register(&service,
+ &priv->rxp.sid);
+ if (ret) {
+ ERROR("service register() failed");
+ return -ENOEXEC;
+ }
+ priv->rxp.sstate = SS_REGISTERED;
+ /* run the service */
+ ret = rte_service_component_runstate_set(priv->rxp.sid, 1);
+ if (ret < 0) {
+ ERROR("Failed Setting component runstate\n");
+ return ret;
+ }
+ ret = rte_service_set_stats_enable(priv->rxp.sid, 1);
+ if (ret < 0) {
+ ERROR("Failed enabling stats\n");
+ return ret;
+ }
+ ret = rte_service_runstate_set(priv->rxp.sid, 1);
+ if (ret < 0) {
+ ERROR("Failed to run service\n");
+ return ret;
+ }
+ priv->rxp.sstate = SS_READY;
+ /* map the service with the service core */
+ ret = rte_service_map_lcore_set(priv->rxp.sid,
+ priv->rxp.scid, 1);
+ if (ret) {
+ ERROR("Failed to install Rx interrupts, "
+ "could not map service core");
+ return ret;
+ }
+ priv->rxp.sstate = SS_RUNNING;
+ }
+ return 0;
+}
+
+/**
+ * Install failsafe Rx event proxy subsystem.
+ * This is the way the failsafe PMD generates Rx events on behalf of its
+ * subdevices.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+fs_rx_event_proxy_install(struct fs_priv *priv)
+{
+ int rc = 0;
+
+ /*
+ * Create the epoll fd and event vector for the proxy service to
+ * wait on for Rx events generated by the subdevices.
+ */
+ priv->rxp.efd = fs_epoll_create1(0);
+ if (priv->rxp.efd < 0) {
+ rte_errno = errno;
+ ERROR("Failed to create epoll,"
+ " Rx interrupts will not be supported");
+ return -rte_errno;
+ }
+ priv->rxp.evec = calloc(NUM_RX_PROXIES, sizeof(*priv->rxp.evec));
+ if (priv->rxp.evec == NULL) {
+ ERROR("Failed to allocate memory for event vectors,"
+ " Rx interrupts will not be supported");
+ rc = -ENOMEM;
+ goto error;
+ }
+ rc = fs_rx_event_proxy_service_install(priv);
+ if (rc < 0)
+ goto error;
+ return 0;
+error:
+ if (priv->rxp.efd >= 0) {
+ close(priv->rxp.efd);
+ priv->rxp.efd = -1;
+ }
+ if (priv->rxp.evec != NULL) {
+ free(priv->rxp.evec);
+ priv->rxp.evec = NULL;
+ }
+ rte_errno = -rc;
+ return rc;
+}
+
+/**
+ * RX Interrupt control per subdevice.
+ *
+ * @param sdev
+ * Pointer to sub-device structure.
+ * @param op
+ * The operation be performed for the vector.
+ * Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+failsafe_eth_rx_intr_ctl_subdevice(struct sub_device *sdev, int op)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev *fsdev;
+ int epfd;
+ uint16_t pid;
+ uint16_t qid;
+ struct rxq *fsrxq;
+ int rc;
+ int ret = 0;
+
+ if (sdev == NULL || (ETH(sdev) == NULL) ||
+ sdev->fs_dev == NULL || (PRIV(sdev->fs_dev) == NULL)) {
+ ERROR("Called with invalid arguments");
+ return -EINVAL;
+ }
+ dev = ETH(sdev);
+ fsdev = sdev->fs_dev;
+ epfd = PRIV(sdev->fs_dev)->rxp.efd;
+ pid = PORT_ID(sdev);
+
+ if (epfd <= 0) {
+ if (op == RTE_INTR_EVENT_ADD) {
+ ERROR("Proxy events are not initialized");
+ return -EBADF;
+ } else {
+ return 0;
+ }
+ }
+ if (dev->data->nb_rx_queues > fsdev->data->nb_rx_queues) {
+ ERROR("subdevice has too many queues,"
+ " Interrupts will not be enabled");
+ return -E2BIG;
+ }
+ for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
+ fsrxq = fsdev->data->rx_queues[qid];
+ rc = rte_eth_dev_rx_intr_ctl_q(pid, qid, epfd,
+ op, (void *)fsrxq);
+ if (rc) {
+ ERROR("rte_eth_dev_rx_intr_ctl_q failed for "
+ "port %d queue %d, epfd %d, error %d",
+ pid, qid, epfd, rc);
+ ret = rc;
+ }
+ }
+ return ret;
+}
+
+/**
+ * Install Rx interrupts subsystem for a subdevice.
+ * This is a support for dynamically adding subdevices.
+ *
+ * @param sdev
+ * Pointer to subdevice structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int failsafe_rx_intr_install_subdevice(struct sub_device *sdev)
+{
+ int rc;
+ int qid;
+ struct rte_eth_dev *fsdev;
+ struct rxq **rxq;
+ const struct rte_intr_conf *const intr_conf =
+ &ETH(sdev)->data->dev_conf.intr_conf;
+
+ fsdev = sdev->fs_dev;
+ rxq = (struct rxq **)fsdev->data->rx_queues;
+ if (intr_conf->rxq == 0)
+ return 0;
+ rc = failsafe_eth_rx_intr_ctl_subdevice(sdev, RTE_INTR_EVENT_ADD);
+ if (rc)
+ return rc;
+ /* enable interrupts on already-enabled queues */
+ for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) {
+ if (rxq[qid]->enable_events) {
+ int ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev),
+ qid);
+ if (ret && (ret != -ENOTSUP)) {
+ ERROR("Failed to enable interrupts on "
+ "port %d queue %d", PORT_ID(sdev), qid);
+ rc = ret;
+ }
+ }
+ }
+ return rc;
+}
+
+/**
+ * Uninstall Rx interrupts subsystem for a subdevice.
+ * This is a support for dynamically removing subdevices.
+ *
+ * @param sdev
+ * Pointer to subdevice structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev)
+{
+ int qid;
+ struct rte_eth_dev *fsdev;
+ struct rxq *fsrxq;
+
+ fsdev = sdev->fs_dev;
+ for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) {
+ if (qid < fsdev->data->nb_rx_queues) {
+ fsrxq = fsdev->data->rx_queues[qid];
+ if (fsrxq->enable_events)
+ rte_eth_dev_rx_intr_disable(PORT_ID(sdev),
+ qid);
+ }
+ }
+ failsafe_eth_rx_intr_ctl_subdevice(sdev, RTE_INTR_EVENT_DEL);
+}
+
+/**
+ * Uninstall failsafe Rx event proxy.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ */
+static void
+fs_rx_event_proxy_uninstall(struct fs_priv *priv)
+{
+ fs_rx_event_proxy_service_uninstall(priv);
+ if (priv->rxp.evec != NULL) {
+ free(priv->rxp.evec);
+ priv->rxp.evec = NULL;
+ }
+ if (priv->rxp.efd > 0) {
+ close(priv->rxp.efd);
+ priv->rxp.efd = -1;
+ }
+}
+
+/**
+ * Uninstall failsafe interrupt vector.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ */
+static void
+fs_rx_intr_vec_uninstall(struct fs_priv *priv)
+{
+ struct rte_intr_handle *intr_handle;
+
+ intr_handle = &priv->intr_handle;
+ if (intr_handle->intr_vec != NULL) {
+ free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+ intr_handle->nb_efd = 0;
+}
+
+/**
+ * Installs failsafe interrupt vector to be registered with EAL later on.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+fs_rx_intr_vec_install(struct fs_priv *priv)
+{
+ unsigned int i;
+ unsigned int rxqs_n;
+ unsigned int n;
+ unsigned int count;
+ struct rte_intr_handle *intr_handle;
+
+ rxqs_n = priv->dev->data->nb_rx_queues;
+ n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ count = 0;
+ intr_handle = &priv->intr_handle;
+ RTE_ASSERT(intr_handle->intr_vec == NULL);
+ /* Allocate the interrupt vector of the failsafe Rx proxy interrupts */
+ intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
+ if (intr_handle->intr_vec == NULL) {
+ fs_rx_intr_vec_uninstall(priv);
+ rte_errno = ENOMEM;
+ ERROR("Failed to allocate memory for interrupt vector,"
+ " Rx interrupts will not be supported");
+ return -rte_errno;
+ }
+ for (i = 0; i < n; i++) {
+ struct rxq *rxq = priv->dev->data->rx_queues[i];
+
+ /* Skip queues that cannot request interrupts. */
+ if (rxq == NULL || rxq->event_fd < 0) {
+ /* Use invalid intr_vec[] index to disable entry. */
+ intr_handle->intr_vec[i] =
+ RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID;
+ continue;
+ }
+ if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
+ rte_errno = E2BIG;
+ ERROR("Too many Rx queues for interrupt vector size"
+ " (%d), Rx interrupts cannot be enabled",
+ RTE_MAX_RXTX_INTR_VEC_ID);
+ fs_rx_intr_vec_uninstall(priv);
+ return -rte_errno;
+ }
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+ intr_handle->efds[count] = rxq->event_fd;
+ count++;
+ }
+ if (count == 0) {
+ fs_rx_intr_vec_uninstall(priv);
+ } else {
+ intr_handle->nb_efd = count;
+ intr_handle->efd_counter_size = sizeof(uint64_t);
+ }
+ return 0;
+}
+
+
+/**
+ * Uninstall failsafe Rx interrupts subsystem.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+void
+failsafe_rx_intr_uninstall(struct rte_eth_dev *dev)
+{
+ struct fs_priv *priv;
+ struct rte_intr_handle *intr_handle;
+
+ priv = PRIV(dev);
+ intr_handle = &priv->intr_handle;
+ rte_intr_free_epoll_fd(intr_handle);
+ fs_rx_event_proxy_uninstall(priv);
+ fs_rx_intr_vec_uninstall(priv);
+ dev->intr_handle = NULL;
+}
+
+/**
+ * Install failsafe Rx interrupts subsystem.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+failsafe_rx_intr_install(struct rte_eth_dev *dev)
+{
+ struct fs_priv *priv = PRIV(dev);
+ const struct rte_intr_conf *const intr_conf =
+ &priv->dev->data->dev_conf.intr_conf;
+
+ if (intr_conf->rxq == 0 || dev->intr_handle != NULL)
+ return 0;
+ if (fs_rx_intr_vec_install(priv) < 0)
+ return -rte_errno;
+ if (fs_rx_event_proxy_install(priv) < 0) {
+ fs_rx_intr_vec_uninstall(priv);
+ return -rte_errno;
+ }
+ dev->intr_handle = &priv->intr_handle;
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_ops.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_ops.c
new file mode 100644
index 00000000..24e91c93
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_ops.c
@@ -0,0 +1,1041 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_flow.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+
+#include "failsafe_private.h"
+
+static struct rte_eth_dev_info default_infos = {
+ /* Max possible number of elements */
+ .max_rx_pktlen = UINT32_MAX,
+ .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
+ .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
+ .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
+ .max_hash_mac_addrs = UINT32_MAX,
+ .max_vfs = UINT16_MAX,
+ .max_vmdq_pools = UINT16_MAX,
+ .rx_desc_lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ .nb_seg_max = UINT16_MAX,
+ .nb_mtu_seg_max = UINT16_MAX,
+ },
+ .tx_desc_lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ .nb_seg_max = UINT16_MAX,
+ .nb_mtu_seg_max = UINT16_MAX,
+ },
+ /*
+ * Set of capabilities that can be verified upon
+ * configuring a sub-device.
+ */
+ .rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_MACSEC_STRIP |
+ DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_TIMESTAMP |
+ DEV_RX_OFFLOAD_SECURITY,
+ .rx_queue_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_MACSEC_STRIP |
+ DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_TIMESTAMP |
+ DEV_RX_OFFLOAD_SECURITY,
+ .tx_offload_capa =
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO,
+ .flow_type_rss_offloads =
+ ETH_RSS_IP |
+ ETH_RSS_UDP |
+ ETH_RSS_TCP,
+};
+
+static int
+fs_dev_configure(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV(sdev, i, dev) {
+ int rmv_interrupt = 0;
+ int lsc_interrupt = 0;
+ int lsc_enabled;
+
+ if (sdev->state != DEV_PROBED &&
+ !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE))
+ continue;
+
+ rmv_interrupt = ETH(sdev)->data->dev_flags &
+ RTE_ETH_DEV_INTR_RMV;
+ if (rmv_interrupt) {
+ DEBUG("Enabling RMV interrupts for sub_device %d", i);
+ dev->data->dev_conf.intr_conf.rmv = 1;
+ } else {
+ DEBUG("sub_device %d does not support RMV event", i);
+ }
+ lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
+ lsc_interrupt = lsc_enabled &&
+ (ETH(sdev)->data->dev_flags &
+ RTE_ETH_DEV_INTR_LSC);
+ if (lsc_interrupt) {
+ DEBUG("Enabling LSC interrupts for sub_device %d", i);
+ dev->data->dev_conf.intr_conf.lsc = 1;
+ } else if (lsc_enabled && !lsc_interrupt) {
+ DEBUG("Disabling LSC interrupts for sub_device %d", i);
+ dev->data->dev_conf.intr_conf.lsc = 0;
+ }
+ DEBUG("Configuring sub-device %d", i);
+ ret = rte_eth_dev_configure(PORT_ID(sdev),
+ dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues,
+ &dev->data->dev_conf);
+ if (ret) {
+ if (!fs_err(sdev, ret))
+ continue;
+ ERROR("Could not configure sub_device %d", i);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ if (rmv_interrupt && sdev->rmv_callback == 0) {
+ ret = rte_eth_dev_callback_register(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_RMV,
+ failsafe_eth_rmv_event_callback,
+ sdev);
+ if (ret)
+ WARN("Failed to register RMV callback for sub_device %d",
+ SUB_ID(sdev));
+ else
+ sdev->rmv_callback = 1;
+ }
+ dev->data->dev_conf.intr_conf.rmv = 0;
+ if (lsc_interrupt && sdev->lsc_callback == 0) {
+ ret = rte_eth_dev_callback_register(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_LSC,
+ failsafe_eth_lsc_event_callback,
+ dev);
+ if (ret)
+ WARN("Failed to register LSC callback for sub_device %d",
+ SUB_ID(sdev));
+ else
+ sdev->lsc_callback = 1;
+ }
+ dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
+ sdev->state = DEV_ACTIVE;
+ }
+ if (PRIV(dev)->state < DEV_ACTIVE)
+ PRIV(dev)->state = DEV_ACTIVE;
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static int
+fs_dev_start(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ ret = failsafe_rx_intr_install(dev);
+ if (ret) {
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state != DEV_ACTIVE)
+ continue;
+ DEBUG("Starting sub_device %d", i);
+ ret = rte_eth_dev_start(PORT_ID(sdev));
+ if (ret) {
+ if (!fs_err(sdev, ret))
+ continue;
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ ret = failsafe_rx_intr_install_subdevice(sdev);
+ if (ret) {
+ if (!fs_err(sdev, ret))
+ continue;
+ rte_eth_dev_stop(PORT_ID(sdev));
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ sdev->state = DEV_STARTED;
+ }
+ if (PRIV(dev)->state < DEV_STARTED)
+ PRIV(dev)->state = DEV_STARTED;
+ fs_switch_dev(dev, NULL);
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static void
+fs_dev_stop(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ fs_lock(dev, 0);
+ PRIV(dev)->state = DEV_STARTED - 1;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
+ rte_eth_dev_stop(PORT_ID(sdev));
+ failsafe_rx_intr_uninstall_subdevice(sdev);
+ sdev->state = DEV_STARTED - 1;
+ }
+ failsafe_rx_intr_uninstall(dev);
+ fs_unlock(dev, 0);
+}
+
+static int
+fs_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
+ ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
+ if ((ret = fs_err(sdev, ret))) {
+ ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
+ " with error %d", i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static int
+fs_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
+ ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
+ if ((ret = fs_err(sdev, ret))) {
+ ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
+ " with error %d", i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static void fs_dev_free_queues(struct rte_eth_dev *dev);
+static void
+fs_dev_close(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ fs_lock(dev, 0);
+ failsafe_hotplug_alarm_cancel(dev);
+ if (PRIV(dev)->state == DEV_STARTED)
+ dev->dev_ops->dev_stop(dev);
+ PRIV(dev)->state = DEV_ACTIVE - 1;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Closing sub_device %d", i);
+ failsafe_eth_dev_unregister_callbacks(sdev);
+ rte_eth_dev_close(PORT_ID(sdev));
+ sdev->state = DEV_ACTIVE - 1;
+ }
+ fs_dev_free_queues(dev);
+ fs_unlock(dev, 0);
+}
+
+static void
+fs_rx_queue_release(void *queue)
+{
+ struct rte_eth_dev *dev;
+ struct sub_device *sdev;
+ uint8_t i;
+ struct rxq *rxq;
+
+ if (queue == NULL)
+ return;
+ rxq = queue;
+ dev = rxq->priv->dev;
+ fs_lock(dev, 0);
+ if (rxq->event_fd > 0)
+ close(rxq->event_fd);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ SUBOPS(sdev, rx_queue_release)
+ (ETH(sdev)->data->rx_queues[rxq->qid]);
+ dev->data->rx_queues[rxq->qid] = NULL;
+ rte_free(rxq);
+ fs_unlock(dev, 0);
+}
+
+static int
+fs_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ /*
+ * FIXME: Add a proper interface in rte_eal_interrupts for
+ * allocating eventfd as an interrupt vector.
+ * For the time being, fake as if we are using MSIX interrupts,
+ * this will cause rte_intr_efd_enable to allocate an eventfd for us.
+ */
+ struct rte_intr_handle intr_handle = {
+ .type = RTE_INTR_HANDLE_VFIO_MSIX,
+ .efds = { -1, },
+ };
+ struct sub_device *sdev;
+ struct rxq *rxq;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ rxq = dev->data->rx_queues[rx_queue_id];
+ if (rxq != NULL) {
+ fs_rx_queue_release(rxq);
+ dev->data->rx_queues[rx_queue_id] = NULL;
+ }
+ rxq = rte_zmalloc(NULL,
+ sizeof(*rxq) +
+ sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
+ RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL) {
+ fs_unlock(dev, 0);
+ return -ENOMEM;
+ }
+ FOREACH_SUBDEV(sdev, i, dev)
+ rte_atomic64_init(&rxq->refcnt[i]);
+ rxq->qid = rx_queue_id;
+ rxq->socket_id = socket_id;
+ rxq->info.mp = mb_pool;
+ rxq->info.conf = *rx_conf;
+ rxq->info.nb_desc = nb_rx_desc;
+ rxq->priv = PRIV(dev);
+ rxq->sdev = PRIV(dev)->subs;
+ ret = rte_intr_efd_enable(&intr_handle, 1);
+ if (ret < 0) {
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ rxq->event_fd = intr_handle.efds[0];
+ dev->data->rx_queues[rx_queue_id] = rxq;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
+ rx_queue_id,
+ nb_rx_desc, socket_id,
+ rx_conf, mb_pool);
+ if ((ret = fs_err(sdev, ret))) {
+ ERROR("RX queue setup failed for sub_device %d", i);
+ goto free_rxq;
+ }
+ }
+ fs_unlock(dev, 0);
+ return 0;
+free_rxq:
+ fs_rx_queue_release(rxq);
+ fs_unlock(dev, 0);
+ return ret;
+}
+
+static int
+fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct rxq *rxq;
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+ int rc = 0;
+
+ fs_lock(dev, 0);
+ if (idx >= dev->data->nb_rx_queues) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+ rxq = dev->data->rx_queues[idx];
+ if (rxq == NULL || rxq->event_fd <= 0) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+ /* Fail if proxy service is nor running. */
+ if (PRIV(dev)->rxp.sstate != SS_RUNNING) {
+ ERROR("failsafe interrupt services are not running");
+ rc = -EAGAIN;
+ goto unlock;
+ }
+ rxq->enable_events = 1;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
+ ret = fs_err(sdev, ret);
+ if (ret)
+ rc = ret;
+ }
+unlock:
+ fs_unlock(dev, 0);
+ if (rc)
+ rte_errno = -rc;
+ return rc;
+}
+
+static int
+fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct rxq *rxq;
+ struct sub_device *sdev;
+ uint64_t u64;
+ uint8_t i;
+ int rc = 0;
+ int ret;
+
+ fs_lock(dev, 0);
+ if (idx >= dev->data->nb_rx_queues) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+ rxq = dev->data->rx_queues[idx];
+ if (rxq == NULL || rxq->event_fd <= 0) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+ rxq->enable_events = 0;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
+ ret = fs_err(sdev, ret);
+ if (ret)
+ rc = ret;
+ }
+ /* Clear pending events */
+ while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0)
+ ;
+unlock:
+ fs_unlock(dev, 0);
+ if (rc)
+ rte_errno = -rc;
+ return rc;
+}
+
+static void
+fs_tx_queue_release(void *queue)
+{
+ struct rte_eth_dev *dev;
+ struct sub_device *sdev;
+ uint8_t i;
+ struct txq *txq;
+
+ if (queue == NULL)
+ return;
+ txq = queue;
+ dev = txq->priv->dev;
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ SUBOPS(sdev, tx_queue_release)
+ (ETH(sdev)->data->tx_queues[txq->qid]);
+ dev->data->tx_queues[txq->qid] = NULL;
+ rte_free(txq);
+ fs_unlock(dev, 0);
+}
+
+static int
+fs_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct sub_device *sdev;
+ struct txq *txq;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ txq = dev->data->tx_queues[tx_queue_id];
+ if (txq != NULL) {
+ fs_tx_queue_release(txq);
+ dev->data->tx_queues[tx_queue_id] = NULL;
+ }
+ txq = rte_zmalloc("ethdev TX queue",
+ sizeof(*txq) +
+ sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
+ RTE_CACHE_LINE_SIZE);
+ if (txq == NULL) {
+ fs_unlock(dev, 0);
+ return -ENOMEM;
+ }
+ FOREACH_SUBDEV(sdev, i, dev)
+ rte_atomic64_init(&txq->refcnt[i]);
+ txq->qid = tx_queue_id;
+ txq->socket_id = socket_id;
+ txq->info.conf = *tx_conf;
+ txq->info.nb_desc = nb_tx_desc;
+ txq->priv = PRIV(dev);
+ dev->data->tx_queues[tx_queue_id] = txq;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
+ tx_queue_id,
+ nb_tx_desc, socket_id,
+ tx_conf);
+ if ((ret = fs_err(sdev, ret))) {
+ ERROR("TX queue setup failed for sub_device %d", i);
+ goto free_txq;
+ }
+ }
+ fs_unlock(dev, 0);
+ return 0;
+free_txq:
+ fs_tx_queue_release(txq);
+ fs_unlock(dev, 0);
+ return ret;
+}
+
+static void
+fs_dev_free_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ fs_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ fs_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+static void
+fs_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_promiscuous_enable(PORT_ID(sdev));
+ fs_unlock(dev, 0);
+}
+
+static void
+fs_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_promiscuous_disable(PORT_ID(sdev));
+ fs_unlock(dev, 0);
+}
+
+static void
+fs_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_allmulticast_enable(PORT_ID(sdev));
+ fs_unlock(dev, 0);
+}
+
+static void
+fs_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_allmulticast_disable(PORT_ID(sdev));
+ fs_unlock(dev, 0);
+}
+
+static int
+fs_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling link_update on sub_device %d", i);
+ ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
+ if (ret && ret != -1 && sdev->remove == 0 &&
+ rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
+ ERROR("Link update failed for sub_device %d with error %d",
+ i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ if (TX_SUBDEV(dev)) {
+ struct rte_eth_link *l1;
+ struct rte_eth_link *l2;
+
+ l1 = &dev->data->dev_link;
+ l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
+ if (memcmp(l1, l2, sizeof(*l1))) {
+ *l1 = *l2;
+ fs_unlock(dev, 0);
+ return 0;
+ }
+ }
+ fs_unlock(dev, 0);
+ return -1;
+}
+
+static int
+fs_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct rte_eth_stats backup;
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
+ uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
+
+ rte_memcpy(&backup, snapshot, sizeof(backup));
+ ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
+ if (ret) {
+ if (!fs_err(sdev, ret)) {
+ rte_memcpy(snapshot, &backup, sizeof(backup));
+ goto inc;
+ }
+ ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
+ i, ret);
+ *timestamp = 0;
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ *timestamp = rte_rdtsc();
+inc:
+ failsafe_stats_increment(stats, snapshot);
+ }
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static void
+fs_stats_reset(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ rte_eth_stats_reset(PORT_ID(sdev));
+ memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
+ }
+ memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
+ fs_unlock(dev, 0);
+}
+
+/**
+ * Fail-safe dev_infos_get rules:
+ *
+ * No sub_device:
+ * Numerables:
+ * Use the maximum possible values for any field, so as not
+ * to impede any further configuration effort.
+ * Capabilities:
+ * Limits capabilities to those that are understood by the
+ * fail-safe PMD. This understanding stems from the fail-safe
+ * being capable of verifying that the related capability is
+ * expressed within the device configuration (struct rte_eth_conf).
+ *
+ * At least one probed sub_device:
+ * Numerables:
+ * Uses values from the active probed sub_device
+ * The rationale here is that if any sub_device is less capable
+ * (for example concerning the number of queues) than the active
+ * sub_device, then its subsequent configuration will fail.
+ * It is impossible to foresee this failure when the failing sub_device
+ * is supposed to be plugged-in later on, so the configuration process
+ * is the single point of failure and error reporting.
+ * Capabilities:
+ * Uses a logical AND of RX capabilities among
+ * all sub_devices and the default capabilities.
+ * Uses a logical AND of TX capabilities among
+ * the active probed sub_device and the default capabilities.
+ *
+ */
+static void
+fs_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *infos)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ sdev = TX_SUBDEV(dev);
+ if (sdev == NULL) {
+ DEBUG("No probed device, using default infos");
+ rte_memcpy(&PRIV(dev)->infos, &default_infos,
+ sizeof(default_infos));
+ } else {
+ uint64_t rx_offload_capa;
+ uint64_t rxq_offload_capa;
+ uint64_t rss_hf_offload_capa;
+
+ rx_offload_capa = default_infos.rx_offload_capa;
+ rxq_offload_capa = default_infos.rx_queue_offload_capa;
+ rss_hf_offload_capa = default_infos.flow_type_rss_offloads;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ rte_eth_dev_info_get(PORT_ID(sdev),
+ &PRIV(dev)->infos);
+ rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
+ rxq_offload_capa &=
+ PRIV(dev)->infos.rx_queue_offload_capa;
+ rss_hf_offload_capa &=
+ PRIV(dev)->infos.flow_type_rss_offloads;
+ }
+ sdev = TX_SUBDEV(dev);
+ rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
+ PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
+ PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
+ PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa;
+ PRIV(dev)->infos.tx_offload_capa &=
+ default_infos.tx_offload_capa;
+ PRIV(dev)->infos.tx_queue_offload_capa &=
+ default_infos.tx_queue_offload_capa;
+ }
+ rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
+}
+
+static const uint32_t *
+fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ struct rte_eth_dev *edev;
+ const uint32_t *ret;
+
+ fs_lock(dev, 0);
+ sdev = TX_SUBDEV(dev);
+ if (sdev == NULL) {
+ ret = NULL;
+ goto unlock;
+ }
+ edev = ETH(sdev);
+ /* ENOTSUP: counts as no supported ptypes */
+ if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) {
+ ret = NULL;
+ goto unlock;
+ }
+ /*
+ * The API does not permit to do a clean AND of all ptypes,
+ * It is also incomplete by design and we do not really care
+ * to have a best possible value in this context.
+ * We just return the ptypes of the device of highest
+ * priority, usually the PREFERRED device.
+ */
+ ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev);
+unlock:
+ fs_unlock(dev, 0);
+ return ret;
+}
+
+static int
+fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
+ ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
+ if ((ret = fs_err(sdev, ret))) {
+ ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
+ i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static int
+fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
+ ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
+ if ((ret = fs_err(sdev, ret))) {
+ ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
+ " with error %d", i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static int
+fs_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct sub_device *sdev;
+ int ret;
+
+ fs_lock(dev, 0);
+ sdev = TX_SUBDEV(dev);
+ if (sdev == NULL) {
+ ret = 0;
+ goto unlock;
+ }
+ if (SUBOPS(sdev, flow_ctrl_get) == NULL) {
+ ret = -ENOTSUP;
+ goto unlock;
+ }
+ ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
+unlock:
+ fs_unlock(dev, 0);
+ return ret;
+}
+
+static int
+fs_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
+ ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
+ if ((ret = fs_err(sdev, ret))) {
+ ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
+ " with error %d", i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static void
+fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ fs_lock(dev, 0);
+ /* No check: already done within the rte_eth_dev_mac_addr_remove
+ * call for the fail-safe device.
+ */
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
+ &dev->data->mac_addrs[index]);
+ PRIV(dev)->mac_addr_pool[index] = 0;
+ fs_unlock(dev, 0);
+}
+
+static int
+fs_mac_addr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t vmdq)
+{
+ struct sub_device *sdev;
+ int ret;
+ uint8_t i;
+
+ RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
+ if ((ret = fs_err(sdev, ret))) {
+ ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
+ PRIu8 " with error %d", i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ if (index >= PRIV(dev)->nb_mac_addr) {
+ DEBUG("Growing mac_addrs array");
+ PRIV(dev)->nb_mac_addr = index;
+ }
+ PRIV(dev)->mac_addr_pool[index] = vmdq;
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static int
+fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d",
+ i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ fs_unlock(dev, 0);
+
+ return 0;
+}
+
+static int
+fs_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_rss_hash_update"
+ " failed for sub_device %d with error %d",
+ i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ fs_unlock(dev, 0);
+
+ return 0;
+}
+
+static int
+fs_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type type,
+ enum rte_filter_op op,
+ void *arg)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ if (type == RTE_ETH_FILTER_GENERIC &&
+ op == RTE_ETH_FILTER_GET) {
+ *(const void **)arg = &fs_flow_ops;
+ return 0;
+ }
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
+ ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
+ if ((ret = fs_err(sdev, ret))) {
+ ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
+ " with error %d", i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+const struct eth_dev_ops failsafe_ops = {
+ .dev_configure = fs_dev_configure,
+ .dev_start = fs_dev_start,
+ .dev_stop = fs_dev_stop,
+ .dev_set_link_down = fs_dev_set_link_down,
+ .dev_set_link_up = fs_dev_set_link_up,
+ .dev_close = fs_dev_close,
+ .promiscuous_enable = fs_promiscuous_enable,
+ .promiscuous_disable = fs_promiscuous_disable,
+ .allmulticast_enable = fs_allmulticast_enable,
+ .allmulticast_disable = fs_allmulticast_disable,
+ .link_update = fs_link_update,
+ .stats_get = fs_stats_get,
+ .stats_reset = fs_stats_reset,
+ .dev_infos_get = fs_dev_infos_get,
+ .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
+ .mtu_set = fs_mtu_set,
+ .vlan_filter_set = fs_vlan_filter_set,
+ .rx_queue_setup = fs_rx_queue_setup,
+ .tx_queue_setup = fs_tx_queue_setup,
+ .rx_queue_release = fs_rx_queue_release,
+ .tx_queue_release = fs_tx_queue_release,
+ .rx_queue_intr_enable = fs_rx_intr_enable,
+ .rx_queue_intr_disable = fs_rx_intr_disable,
+ .flow_ctrl_get = fs_flow_ctrl_get,
+ .flow_ctrl_set = fs_flow_ctrl_set,
+ .mac_addr_remove = fs_mac_addr_remove,
+ .mac_addr_add = fs_mac_addr_add,
+ .mac_addr_set = fs_mac_addr_set,
+ .rss_hash_update = fs_rss_hash_update,
+ .filter_ctrl = fs_filter_ctrl,
+};
diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_private.h b/src/spdk/dpdk/drivers/net/failsafe/failsafe_private.h
new file mode 100644
index 00000000..886af861
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_private.h
@@ -0,0 +1,486 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_
+#define _RTE_ETH_FAILSAFE_PRIVATE_H_
+
+#include <sys/queue.h>
+#include <pthread.h>
+
+#include <rte_atomic.h>
+#include <rte_dev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_devargs.h>
+#include <rte_interrupts.h>
+
+#define FAILSAFE_DRIVER_NAME "Fail-safe PMD"
+#define FAILSAFE_OWNER_NAME "Fail-safe"
+
+#define PMD_FAILSAFE_MAC_KVARG "mac"
+#define PMD_FAILSAFE_HOTPLUG_POLL_KVARG "hotplug_poll"
+#define PMD_FAILSAFE_PARAM_STRING \
+ "dev(<ifc>)," \
+ "exec(<shell command>)," \
+ "fd(<fd number>)," \
+ "mac=mac_addr," \
+ "hotplug_poll=u64" \
+ ""
+
+#define FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS 2000
+
+#define FAILSAFE_MAX_ETHPORTS 2
+#define FAILSAFE_MAX_ETHADDR 128
+
+#define DEVARGS_MAXLEN 4096
+
+enum rxp_service_state {
+ SS_NO_SERVICE = 0,
+ SS_REGISTERED,
+ SS_READY,
+ SS_RUNNING,
+};
+
+/* TYPES */
+
+struct rx_proxy {
+ /* epoll file descriptor */
+ int efd;
+ /* event vector to be used by epoll */
+ struct rte_epoll_event *evec;
+ /* rte service id */
+ uint32_t sid;
+ /* service core id */
+ uint32_t scid;
+ enum rxp_service_state sstate;
+};
+
+struct rxq {
+ struct fs_priv *priv;
+ uint16_t qid;
+ /* next sub_device to poll */
+ struct sub_device *sdev;
+ unsigned int socket_id;
+ int event_fd;
+ unsigned int enable_events:1;
+ struct rte_eth_rxq_info info;
+ rte_atomic64_t refcnt[];
+};
+
+struct txq {
+ struct fs_priv *priv;
+ uint16_t qid;
+ unsigned int socket_id;
+ struct rte_eth_txq_info info;
+ rte_atomic64_t refcnt[];
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next;
+ /* sub_flows */
+ struct rte_flow *flows[FAILSAFE_MAX_ETHPORTS];
+ /* flow description for synchronization */
+ struct rte_flow_desc *fd;
+};
+
+enum dev_state {
+ DEV_UNDEFINED,
+ DEV_PARSED,
+ DEV_PROBED,
+ DEV_ACTIVE,
+ DEV_STARTED,
+};
+
+struct fs_stats {
+ struct rte_eth_stats stats;
+ uint64_t timestamp;
+};
+
+struct sub_device {
+ /* Exhaustive DPDK device description */
+ struct sub_device *next;
+ struct rte_devargs devargs;
+ struct rte_bus *bus;
+ struct rte_device *dev;
+ struct rte_eth_dev *edev;
+ uint8_t sid;
+ /* Device state machine */
+ enum dev_state state;
+ /* Last stats snapshot passed to user */
+ struct fs_stats stats_snapshot;
+ /* Some device are defined as a command line */
+ char *cmdline;
+ /* Others are retrieved through a file descriptor */
+ char *fd_str;
+ /* fail-safe device backreference */
+ struct rte_eth_dev *fs_dev;
+ /* flag calling for recollection */
+ volatile unsigned int remove:1;
+ /* flow isolation state */
+ int flow_isolated:1;
+ /* RMV callback registration state */
+ unsigned int rmv_callback:1;
+ /* LSC callback registration state */
+ unsigned int lsc_callback:1;
+};
+
+struct fs_priv {
+ struct rte_eth_dev *dev;
+ /*
+ * Set of sub_devices.
+ * subs[0] is the preferred device
+ * any other is just another slave
+ */
+ struct sub_device *subs;
+ uint8_t subs_head; /* if head == tail, no subs */
+ uint8_t subs_tail; /* first invalid */
+ uint8_t subs_tx; /* current emitting device */
+ uint8_t current_probed;
+ /* flow mapping */
+ TAILQ_HEAD(sub_flows, rte_flow) flow_list;
+ /* current number of mac_addr slots allocated. */
+ uint32_t nb_mac_addr;
+ struct ether_addr mac_addrs[FAILSAFE_MAX_ETHADDR];
+ uint32_t mac_addr_pool[FAILSAFE_MAX_ETHADDR];
+ /* current capabilities */
+ struct rte_eth_dev_info infos;
+ struct rte_eth_dev_owner my_owner; /* Unique owner. */
+ struct rte_intr_handle intr_handle; /* Port interrupt handle. */
+ /*
+ * Fail-safe state machine.
+ * This level will be tracking state of the EAL and eth
+ * layer at large as defined by the user application.
+ * It will then steer the sub_devices toward the same
+ * synchronized state.
+ */
+ enum dev_state state;
+ struct rte_eth_stats stats_accumulator;
+ /*
+ * Rx interrupts/events proxy.
+ * The PMD issues Rx events to the EAL on behalf of its subdevices,
+ * it does that by registering an event-fd for each of its queues with
+ * the EAL. A PMD service thread listens to all the Rx events from the
+ * subdevices, when an Rx event is issued by a subdevice it will be
+ * caught by this service with will trigger an Rx event in the
+ * appropriate failsafe Rx queue.
+ */
+ struct rx_proxy rxp;
+ pthread_mutex_t hotplug_mutex;
+ /* Hot-plug mutex is locked by the alarm mechanism. */
+ volatile unsigned int alarm_lock:1;
+ unsigned int pending_alarm:1; /* An alarm is pending */
+ /* flow isolation state */
+ int flow_isolated:1;
+};
+
+/* FAILSAFE_INTR */
+
+int failsafe_rx_intr_install(struct rte_eth_dev *dev);
+void failsafe_rx_intr_uninstall(struct rte_eth_dev *dev);
+int failsafe_rx_intr_install_subdevice(struct sub_device *sdev);
+void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev);
+
+/* MISC */
+
+int failsafe_hotplug_alarm_install(struct rte_eth_dev *dev);
+int failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev);
+
+/* RX / TX */
+
+void set_burst_fn(struct rte_eth_dev *dev, int force_safe);
+
+uint16_t failsafe_rx_burst(void *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t failsafe_tx_burst(void *txq,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+
+uint16_t failsafe_rx_burst_fast(void *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t failsafe_tx_burst_fast(void *txq,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+
+/* ARGS */
+
+int failsafe_args_parse(struct rte_eth_dev *dev, const char *params);
+void failsafe_args_free(struct rte_eth_dev *dev);
+int failsafe_args_count_subdevice(struct rte_eth_dev *dev, const char *params);
+int failsafe_args_parse_subs(struct rte_eth_dev *dev);
+
+/* EAL */
+
+int failsafe_eal_init(struct rte_eth_dev *dev);
+int failsafe_eal_uninit(struct rte_eth_dev *dev);
+
+/* ETH_DEV */
+
+int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev);
+void failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev);
+void failsafe_dev_remove(struct rte_eth_dev *dev);
+void failsafe_stats_increment(struct rte_eth_stats *to,
+ struct rte_eth_stats *from);
+int failsafe_eth_rmv_event_callback(uint16_t port_id,
+ enum rte_eth_event_type type,
+ void *arg, void *out);
+int failsafe_eth_lsc_event_callback(uint16_t port_id,
+ enum rte_eth_event_type event,
+ void *cb_arg, void *out);
+int failsafe_eth_new_event_callback(uint16_t port_id,
+ enum rte_eth_event_type event,
+ void *cb_arg, void *out);
+
+/* GLOBALS */
+
+extern const char pmd_failsafe_driver_name[];
+extern const struct eth_dev_ops failsafe_ops;
+extern const struct rte_flow_ops fs_flow_ops;
+extern uint64_t hotplug_poll;
+extern int mac_from_arg;
+
+/* HELPERS */
+
+/* dev: (struct rte_eth_dev *) fail-safe device */
+#define PRIV(dev) \
+ ((struct fs_priv *)(dev)->data->dev_private)
+
+/* sdev: (struct sub_device *) */
+#define ETH(sdev) \
+ ((sdev)->edev)
+
+/* sdev: (struct sub_device *) */
+#define PORT_ID(sdev) \
+ (ETH(sdev)->data->port_id)
+
+/* sdev: (struct sub_device *) */
+#define SUB_ID(sdev) \
+ ((sdev)->sid)
+
+/**
+ * Stateful iterator construct over fail-safe sub-devices:
+ * s: (struct sub_device *), iterator
+ * i: (uint8_t), increment
+ * dev: (struct rte_eth_dev *), fail-safe ethdev
+ * state: (enum dev_state), minimum acceptable device state
+ */
+#define FOREACH_SUBDEV_STATE(s, i, dev, state) \
+ for (s = fs_find_next((dev), 0, state, &i); \
+ s != NULL; \
+ s = fs_find_next((dev), i + 1, state, &i))
+
+/**
+ * Iterator construct over fail-safe sub-devices:
+ * s: (struct sub_device *), iterator
+ * i: (uint8_t), increment
+ * dev: (struct rte_eth_dev *), fail-safe ethdev
+ */
+#define FOREACH_SUBDEV(s, i, dev) \
+ FOREACH_SUBDEV_STATE(s, i, dev, DEV_UNDEFINED)
+
+/* dev: (struct rte_eth_dev *) fail-safe device */
+#define PREFERRED_SUBDEV(dev) \
+ (&PRIV(dev)->subs[0])
+
+/* dev: (struct rte_eth_dev *) fail-safe device */
+#define TX_SUBDEV(dev) \
+ (PRIV(dev)->subs_tx >= PRIV(dev)->subs_tail ? NULL \
+ : (PRIV(dev)->subs[PRIV(dev)->subs_tx].state < DEV_PROBED ? NULL \
+ : &PRIV(dev)->subs[PRIV(dev)->subs_tx]))
+
+/**
+ * s: (struct sub_device *)
+ * ops: (struct eth_dev_ops) member
+ */
+#define SUBOPS(s, ops) \
+ (ETH(s)->dev_ops->ops)
+
+/**
+ * Atomic guard
+ */
+
+/**
+ * a: (rte_atomic64_t)
+ */
+#define FS_ATOMIC_P(a) \
+ rte_atomic64_set(&(a), 1)
+
+/**
+ * a: (rte_atomic64_t)
+ */
+#define FS_ATOMIC_V(a) \
+ rte_atomic64_set(&(a), 0)
+
+/**
+ * s: (struct sub_device *)
+ * i: uint16_t qid
+ */
+#define FS_ATOMIC_RX(s, i) \
+ rte_atomic64_read( \
+ &((struct rxq *)((s)->fs_dev->data->rx_queues[i]))->refcnt[(s)->sid] \
+ )
+/**
+ * s: (struct sub_device *)
+ * i: uint16_t qid
+ */
+#define FS_ATOMIC_TX(s, i) \
+ rte_atomic64_read( \
+ &((struct txq *)((s)->fs_dev->data->tx_queues[i]))->refcnt[(s)->sid] \
+ )
+
+#ifdef RTE_EXEC_ENV_BSDAPP
+#define FS_THREADID_TYPE void*
+#define FS_THREADID_FMT "p"
+#else
+#define FS_THREADID_TYPE unsigned long
+#define FS_THREADID_FMT "lu"
+#endif
+
+extern int failsafe_logtype;
+
+#define LOG__(l, m, ...) \
+ rte_log(RTE_LOG_ ## l, failsafe_logtype, \
+ "net_failsafe: " m "%c", __VA_ARGS__)
+
+#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
+#define DEBUG(...) LOG_(DEBUG, __VA_ARGS__)
+#define INFO(...) LOG_(INFO, __VA_ARGS__)
+#define WARN(...) LOG_(WARNING, __VA_ARGS__)
+#define ERROR(...) LOG_(ERR, __VA_ARGS__)
+
+/* inlined functions */
+
+static inline struct sub_device *
+fs_find_next(struct rte_eth_dev *dev,
+ uint8_t sid,
+ enum dev_state min_state,
+ uint8_t *sid_out)
+{
+ struct sub_device *subs;
+ uint8_t tail;
+
+ subs = PRIV(dev)->subs;
+ tail = PRIV(dev)->subs_tail;
+ while (sid < tail) {
+ if (subs[sid].state >= min_state)
+ break;
+ sid++;
+ }
+ *sid_out = sid;
+ if (sid >= tail)
+ return NULL;
+ return &subs[sid];
+}
+
+/*
+ * Lock hot-plug mutex.
+ * is_alarm means that the caller is, for sure, the hot-plug alarm mechanism.
+ */
+static inline int
+fs_lock(struct rte_eth_dev *dev, unsigned int is_alarm)
+{
+ int ret;
+
+ if (is_alarm) {
+ ret = pthread_mutex_trylock(&PRIV(dev)->hotplug_mutex);
+ if (ret) {
+ DEBUG("Hot-plug mutex lock trying failed(%s), will try"
+ " again later...", strerror(ret));
+ return ret;
+ }
+ PRIV(dev)->alarm_lock = 1;
+ } else {
+ ret = pthread_mutex_lock(&PRIV(dev)->hotplug_mutex);
+ if (ret) {
+ ERROR("Cannot lock mutex(%s)", strerror(ret));
+ return ret;
+ }
+ }
+ DEBUG("Hot-plug mutex was locked by thread %" FS_THREADID_FMT "%s",
+ (FS_THREADID_TYPE)pthread_self(),
+ PRIV(dev)->alarm_lock ? " by the hot-plug alarm" : "");
+ return ret;
+}
+
+/*
+ * Unlock hot-plug mutex.
+ * is_alarm means that the caller is, for sure, the hot-plug alarm mechanism.
+ */
+static inline void
+fs_unlock(struct rte_eth_dev *dev, unsigned int is_alarm)
+{
+ int ret;
+ unsigned int prev_alarm_lock = PRIV(dev)->alarm_lock;
+
+ if (is_alarm) {
+ RTE_ASSERT(PRIV(dev)->alarm_lock == 1);
+ PRIV(dev)->alarm_lock = 0;
+ }
+ ret = pthread_mutex_unlock(&PRIV(dev)->hotplug_mutex);
+ if (ret)
+ ERROR("Cannot unlock hot-plug mutex(%s)", strerror(ret));
+ else
+ DEBUG("Hot-plug mutex was unlocked by thread %" FS_THREADID_FMT "%s",
+ (FS_THREADID_TYPE)pthread_self(),
+ prev_alarm_lock ? " by the hot-plug alarm" : "");
+}
+
+/*
+ * Switch emitting device.
+ * If banned is set, banned must not be considered for
+ * the role of emitting device.
+ */
+static inline void
+fs_switch_dev(struct rte_eth_dev *dev,
+ struct sub_device *banned)
+{
+ struct sub_device *txd;
+ enum dev_state req_state;
+
+ req_state = PRIV(dev)->state;
+ txd = TX_SUBDEV(dev);
+ if (PREFERRED_SUBDEV(dev)->state >= req_state &&
+ PREFERRED_SUBDEV(dev) != banned) {
+ if (txd != PREFERRED_SUBDEV(dev) &&
+ (txd == NULL ||
+ (req_state == DEV_STARTED) ||
+ (txd && txd->state < DEV_STARTED))) {
+ DEBUG("Switching tx_dev to preferred sub_device");
+ PRIV(dev)->subs_tx = 0;
+ }
+ } else if ((txd && txd->state < req_state) ||
+ txd == NULL ||
+ txd == banned) {
+ struct sub_device *sdev = NULL;
+ uint8_t i;
+
+ /* Using acceptable device */
+ FOREACH_SUBDEV_STATE(sdev, i, dev, req_state) {
+ if (sdev == banned)
+ continue;
+ DEBUG("Switching tx_dev to sub_device %d",
+ i);
+ PRIV(dev)->subs_tx = i;
+ break;
+ }
+ if (i >= PRIV(dev)->subs_tail || sdev == NULL) {
+ DEBUG("No device ready, deactivating tx_dev");
+ PRIV(dev)->subs_tx = PRIV(dev)->subs_tail;
+ }
+ } else {
+ return;
+ }
+ set_burst_fn(dev, 0);
+ rte_wmb();
+}
+
+/*
+ * Adjust error value and rte_errno to the fail-safe actual error value.
+ */
+static inline int
+fs_err(struct sub_device *sdev, int err)
+{
+ /* A device removal shouldn't be reported as an error. */
+ if (sdev->remove == 1 || err == -EIO)
+ return rte_errno = 0;
+ return err;
+}
+#endif /* _RTE_ETH_FAILSAFE_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_rxtx.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_rxtx.c
new file mode 100644
index 00000000..7bd0f963
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_rxtx.c
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <rte_atomic.h>
+#include <rte_debug.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+
+#include "failsafe_private.h"
+
+static inline int
+fs_rx_unsafe(struct sub_device *sdev)
+{
+ return (ETH(sdev) == NULL) ||
+ (ETH(sdev)->rx_pkt_burst == NULL) ||
+ (sdev->state != DEV_STARTED) ||
+ (sdev->remove != 0);
+}
+
+static inline int
+fs_tx_unsafe(struct sub_device *sdev)
+{
+ return (sdev == NULL) ||
+ (ETH(sdev) == NULL) ||
+ (ETH(sdev)->tx_pkt_burst == NULL) ||
+ (sdev->state != DEV_STARTED);
+}
+
+void
+set_burst_fn(struct rte_eth_dev *dev, int force_safe)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int need_safe;
+ int safe_set;
+
+ need_safe = force_safe;
+ FOREACH_SUBDEV(sdev, i, dev)
+ need_safe |= fs_rx_unsafe(sdev);
+ safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst);
+ if (need_safe && !safe_set) {
+ DEBUG("Using safe RX bursts%s",
+ (force_safe ? " (forced)" : ""));
+ dev->rx_pkt_burst = &failsafe_rx_burst;
+ } else if (!need_safe && safe_set) {
+ DEBUG("Using fast RX bursts");
+ dev->rx_pkt_burst = &failsafe_rx_burst_fast;
+ }
+ need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev));
+ safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst);
+ if (need_safe && !safe_set) {
+ DEBUG("Using safe TX bursts%s",
+ (force_safe ? " (forced)" : ""));
+ dev->tx_pkt_burst = &failsafe_tx_burst;
+ } else if (!need_safe && safe_set) {
+ DEBUG("Using fast TX bursts");
+ dev->tx_pkt_burst = &failsafe_tx_burst_fast;
+ }
+ rte_wmb();
+}
+
+uint16_t
+failsafe_rx_burst(void *queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sub_device *sdev;
+ struct rxq *rxq;
+ void *sub_rxq;
+ uint16_t nb_rx;
+
+ rxq = queue;
+ sdev = rxq->sdev;
+ do {
+ if (fs_rx_unsafe(sdev)) {
+ nb_rx = 0;
+ sdev = sdev->next;
+ continue;
+ }
+ sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
+ FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
+ nb_rx = ETH(sdev)->
+ rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
+ FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
+ sdev = sdev->next;
+ } while (nb_rx == 0 && sdev != rxq->sdev);
+ rxq->sdev = sdev;
+ return nb_rx;
+}
+
+uint16_t
+failsafe_rx_burst_fast(void *queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sub_device *sdev;
+ struct rxq *rxq;
+ void *sub_rxq;
+ uint16_t nb_rx;
+
+ rxq = queue;
+ sdev = rxq->sdev;
+ do {
+ RTE_ASSERT(!fs_rx_unsafe(sdev));
+ sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
+ FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
+ nb_rx = ETH(sdev)->
+ rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
+ FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
+ sdev = sdev->next;
+ } while (nb_rx == 0 && sdev != rxq->sdev);
+ rxq->sdev = sdev;
+ return nb_rx;
+}
+
+uint16_t
+failsafe_tx_burst(void *queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sub_device *sdev;
+ struct txq *txq;
+ void *sub_txq;
+ uint16_t nb_tx;
+
+ txq = queue;
+ sdev = TX_SUBDEV(txq->priv->dev);
+ if (unlikely(fs_tx_unsafe(sdev)))
+ return 0;
+ sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
+ FS_ATOMIC_P(txq->refcnt[sdev->sid]);
+ nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
+ FS_ATOMIC_V(txq->refcnt[sdev->sid]);
+ return nb_tx;
+}
+
+uint16_t
+failsafe_tx_burst_fast(void *queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sub_device *sdev;
+ struct txq *txq;
+ void *sub_txq;
+ uint16_t nb_tx;
+
+ txq = queue;
+ sdev = TX_SUBDEV(txq->priv->dev);
+ RTE_ASSERT(!fs_tx_unsafe(sdev));
+ sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
+ FS_ATOMIC_P(txq->refcnt[sdev->sid]);
+ nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
+ FS_ATOMIC_V(txq->refcnt[sdev->sid]);
+ return nb_tx;
+}
diff --git a/src/spdk/dpdk/drivers/net/failsafe/meson.build b/src/spdk/dpdk/drivers/net/failsafe/meson.build
new file mode 100644
index 00000000..a249ff4a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/failsafe/meson.build
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+cflags += '-std=gnu99'
+cflags += '-D_DEFAULT_SOURCE'
+cflags += '-D_XOPEN_SOURCE=700'
+cflags += '-pedantic'
+if host_machine.system() == 'linux'
+ cflags += '-DLINUX'
+else
+ cflags += '-DBSD'
+endif
+
+allow_experimental_apis = true
+
+sources = files('failsafe_args.c',
+ 'failsafe.c',
+ 'failsafe_eal.c',
+ 'failsafe_ether.c',
+ 'failsafe_flow.c',
+ 'failsafe_intr.c',
+ 'failsafe_ops.c',
+ 'failsafe_rxtx.c')
diff --git a/src/spdk/dpdk/drivers/net/failsafe/rte_pmd_failsafe_version.map b/src/spdk/dpdk/drivers/net/failsafe/rte_pmd_failsafe_version.map
new file mode 100644
index 00000000..b6d2840b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/failsafe/rte_pmd_failsafe_version.map
@@ -0,0 +1,4 @@
+DPDK_17.08 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/fm10k/Makefile b/src/spdk/dpdk/drivers/net/fm10k/Makefile
new file mode 100644
index 00000000..d657dff8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/Makefile
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2013-2015 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_fm10k.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_fm10k_version.map
+
+LIBABIVER := 1
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
+
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+
+#
+## CFLAGS for clang
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers
+
+else
+#
+# CFLAGS for gcc
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers
+
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
+CFLAGS += -Wno-deprecated
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
+endif
+endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+
+#
+# Add extra flags for base driver source files to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+# base driver is based on the package of cid-fm10k.2017.01.24.tar.gz
+#
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_rxtx.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_tlv.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_mbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR) += fm10k_rxtx_vec.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c
new file mode 100644
index 00000000..c49d20df
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c
@@ -0,0 +1,363 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "fm10k_api.h"
+#include "fm10k_common.h"
+
+/**
+ * fm10k_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+s32 fm10k_set_mac_type(struct fm10k_hw *hw)
+{
+ s32 ret_val = FM10K_SUCCESS;
+
+ DEBUGFUNC("fm10k_set_mac_type");
+
+ if (hw->vendor_id != FM10K_INTEL_VENDOR_ID) {
+ ERROR_REPORT2(FM10K_ERROR_UNSUPPORTED,
+ "Unsupported vendor id: %x\n", hw->vendor_id);
+ return FM10K_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ switch (hw->device_id) {
+ case FM10K_DEV_ID_PF:
+#ifdef BOULDER_RAPIDS_HW
+ case FM10K_DEV_ID_SDI_FM10420_QDA2:
+#endif /* BOULDER_RAPIDS_HW */
+#ifdef ATWOOD_CHANNEL_HW
+ case FM10K_DEV_ID_SDI_FM10420_DA2:
+#endif /* ATWOOD_CHANNEL_HW */
+ hw->mac.type = fm10k_mac_pf;
+ break;
+ case FM10K_DEV_ID_VF:
+ hw->mac.type = fm10k_mac_vf;
+ break;
+ default:
+ ret_val = FM10K_ERR_DEVICE_NOT_SUPPORTED;
+ ERROR_REPORT2(FM10K_ERROR_UNSUPPORTED,
+ "Unsupported device id: %x\n",
+ hw->device_id);
+ break;
+ }
+
+ DEBUGOUT2("fm10k_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, ret_val);
+
+ return ret_val;
+}
+
+/**
+ * fm10k_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers and assign the MAC type and PHY code.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The fm10k_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+s32 fm10k_init_shared_code(struct fm10k_hw *hw)
+{
+ s32 status;
+
+ DEBUGFUNC("fm10k_init_shared_code");
+
+ /* Set the mac type */
+ fm10k_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case fm10k_mac_pf:
+ status = fm10k_init_ops_pf(hw);
+ break;
+ case fm10k_mac_vf:
+ status = fm10k_init_ops_vf(hw);
+ break;
+ default:
+ status = FM10K_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+
+ return status;
+}
+
+#define fm10k_call_func(hw, func, params, error) \
+ ((func) ? (func params) : (error))
+
+/**
+ * fm10k_reset_hw - Reset the hardware to known good state
+ * @hw: pointer to hardware structure
+ *
+ * This function should return the hardware to a state similar to the
+ * one it is in after being powered on.
+ **/
+s32 fm10k_reset_hw(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.reset_hw, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_init_hw - Initialize the hardware
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting and then starting the hardware
+ **/
+s32 fm10k_init_hw(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.init_hw, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_stop_hw - Prepares hardware to shutdown Rx/Tx
+ * @hw: pointer to hardware structure
+ *
+ * Disables Rx/Tx queues and disables the DMA engine.
+ **/
+s32 fm10k_stop_hw(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.stop_hw, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_start_hw - Prepares hardware for Rx/Tx
+ * @hw: pointer to hardware structure
+ *
+ * This function sets the flags indicating that the hardware is ready to
+ * begin operation.
+ **/
+s32 fm10k_start_hw(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.start_hw, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_get_bus_info - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets the PCI bus info (speed, width, type) within the fm10k_hw structure
+ **/
+s32 fm10k_get_bus_info(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.get_bus_info, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+/**
+ * fm10k_is_slot_appropriate - Indicate appropriate slot for this SKU
+ * @hw: pointer to hardware structure
+ *
+ * Looks at the PCIe bus info to confirm whether or not this slot can support
+ * the necessary bandwidth for this device.
+ **/
+bool fm10k_is_slot_appropriate(struct fm10k_hw *hw)
+{
+ if (hw->mac.ops.is_slot_appropriate)
+ return hw->mac.ops.is_slot_appropriate(hw);
+ return true;
+}
+
+#endif
+/**
+ * fm10k_update_vlan - Clear VLAN ID to VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vid: VLAN ID to add to table
+ * @idx: Index indicating VF ID or PF ID in table
+ * @set: Indicates if this is a set or clear operation
+ *
+ * This function adds or removes the corresponding VLAN ID from the VLAN
+ * filter table for the corresponding function.
+ **/
+s32 fm10k_update_vlan(struct fm10k_hw *hw, u32 vid, u8 idx, bool set)
+{
+ return fm10k_call_func(hw, hw->mac.ops.update_vlan, (hw, vid, idx, set),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_read_mac_addr - Reads MAC address
+ * @hw: pointer to hardware structure
+ *
+ * Reads the MAC address out of the interface and stores it in the HW
+ * structures.
+ **/
+s32 fm10k_read_mac_addr(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.read_mac_addr, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_update_hw_stats - Update hw statistics
+ * @hw: pointer to hardware structure
+ *
+ * This function updates statistics that are related to hardware.
+ * */
+void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats)
+{
+ if (hw->mac.ops.update_hw_stats)
+ hw->mac.ops.update_hw_stats(hw, stats);
+}
+
+/**
+ * fm10k_rebind_hw_stats - Reset base for hw statistics
+ * @hw: pointer to hardware structure
+ *
+ * This function resets the base for statistics that are related to hardware.
+ * */
+void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats)
+{
+ if (hw->mac.ops.rebind_hw_stats)
+ hw->mac.ops.rebind_hw_stats(hw, stats);
+}
+
+/**
+ * fm10k_configure_dglort_map - Configures GLORT entry and queues
+ * @hw: pointer to hardware structure
+ * @dglort: pointer to dglort configuration structure
+ *
+ * Reads the configuration structure contained in dglort_cfg and uses
+ * that information to then populate a DGLORTMAP/DEC entry and the queues
+ * to which it has been assigned.
+ **/
+s32 fm10k_configure_dglort_map(struct fm10k_hw *hw,
+ struct fm10k_dglort_cfg *dglort)
+{
+ return fm10k_call_func(hw, hw->mac.ops.configure_dglort_map,
+ (hw, dglort), FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_set_dma_mask - Configures PhyAddrSpace to limit DMA to system
+ * @hw: pointer to hardware structure
+ * @dma_mask: 64 bit DMA mask required for platform
+ *
+ * This function configures the endpoint to limit the access to memory
+ * beyond what is physically in the system.
+ **/
+void fm10k_set_dma_mask(struct fm10k_hw *hw, u64 dma_mask)
+{
+ if (hw->mac.ops.set_dma_mask)
+ hw->mac.ops.set_dma_mask(hw, dma_mask);
+}
+
+/**
+ * fm10k_get_fault - Record a fault in one of the interface units
+ * @hw: pointer to hardware structure
+ * @type: pointer to fault type register offset
+ * @fault: pointer to memory location to record the fault
+ *
+ * Record the fault register contents to the fault data structure and
+ * clear the entry from the register.
+ *
+ * Returns ERR_PARAM if invalid register is specified or no error is present.
+ **/
+s32 fm10k_get_fault(struct fm10k_hw *hw, int type, struct fm10k_fault *fault)
+{
+ return fm10k_call_func(hw, hw->mac.ops.get_fault, (hw, type, fault),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_update_uc_addr - Update device unicast address
+ * @hw: pointer to the HW structure
+ * @lport: logical port ID to update - unused
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ * @flags: flags field to indicate add and secure - unused
+ *
+ * This function is used to add or remove unicast MAC addresses
+ **/
+s32 fm10k_update_uc_addr(struct fm10k_hw *hw, u16 lport,
+ const u8 *mac, u16 vid, bool add, u8 flags)
+{
+ return fm10k_call_func(hw, hw->mac.ops.update_uc_addr,
+ (hw, lport, mac, vid, add, flags),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_update_mc_addr - Update device multicast address
+ * @hw: pointer to the HW structure
+ * @lport: logical port ID to update - unused
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ *
+ * This function is used to add or remove multicast MAC addresses
+ **/
+s32 fm10k_update_mc_addr(struct fm10k_hw *hw, u16 lport,
+ const u8 *mac, u16 vid, bool add)
+{
+ return fm10k_call_func(hw, hw->mac.ops.update_mc_addr,
+ (hw, lport, mac, vid, add),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_adjust_systime - Adjust systime frequency
+ * @hw: pointer to hardware structure
+ * @ppb: adjustment rate in parts per billion
+ *
+ * This function is meant to update the frequency of the clock represented
+ * by the SYSTIME register.
+ **/
+s32 fm10k_adjust_systime(struct fm10k_hw *hw, s32 ppb)
+{
+ return fm10k_call_func(hw, hw->mac.ops.adjust_systime,
+ (hw, ppb), FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_notify_offset - Notify switch of change in PTP offset
+ * @hw: pointer to hardware structure
+ * @offset: 64bit unsigned offset from hardware SYSTIME value
+ *
+ * This function is meant to notify switch of change in the PTP offset for
+ * the hardware SYSTIME registers.
+ **/
+s32 fm10k_notify_offset(struct fm10k_hw *hw, u64 offset)
+{
+ return fm10k_call_func(hw, hw->mac.ops.notify_offset,
+ (hw, offset), FM10K_NOT_IMPLEMENTED);
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h
new file mode 100644
index 00000000..2ab31496
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h
@@ -0,0 +1,64 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_API_H_
+#define _FM10K_API_H_
+
+#include "fm10k_pf.h"
+#include "fm10k_vf.h"
+
+s32 fm10k_set_mac_type(struct fm10k_hw *hw);
+s32 fm10k_reset_hw(struct fm10k_hw *hw);
+s32 fm10k_init_hw(struct fm10k_hw *hw);
+s32 fm10k_stop_hw(struct fm10k_hw *hw);
+s32 fm10k_start_hw(struct fm10k_hw *hw);
+s32 fm10k_init_shared_code(struct fm10k_hw *hw);
+s32 fm10k_get_bus_info(struct fm10k_hw *hw);
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+bool fm10k_is_slot_appropriate(struct fm10k_hw *hw);
+#endif
+s32 fm10k_update_vlan(struct fm10k_hw *hw, u32 vid, u8 idx, bool set);
+s32 fm10k_read_mac_addr(struct fm10k_hw *hw);
+void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats);
+void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats);
+s32 fm10k_configure_dglort_map(struct fm10k_hw *hw,
+ struct fm10k_dglort_cfg *dglort);
+void fm10k_set_dma_mask(struct fm10k_hw *hw, u64 dma_mask);
+s32 fm10k_get_fault(struct fm10k_hw *hw, int type, struct fm10k_fault *fault);
+s32 fm10k_update_uc_addr(struct fm10k_hw *hw, u16 lport,
+ const u8 *mac, u16 vid, bool add, u8 flags);
+s32 fm10k_update_mc_addr(struct fm10k_hw *hw, u16 lport,
+ const u8 *mac, u16 vid, bool add);
+s32 fm10k_adjust_systime(struct fm10k_hw *hw, s32 ppb);
+s32 fm10k_notify_offset(struct fm10k_hw *hw, u64 offset);
+#endif /* _FM10K_API_H_ */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.c
new file mode 100644
index 00000000..29f35d7d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.c
@@ -0,0 +1,579 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "fm10k_common.h"
+
+/**
+ * fm10k_get_bus_info_generic - Generic set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Gets the PCI bus info (speed, width, type) then calls helper function to
+ * store this data within the fm10k_hw structure.
+ **/
+STATIC s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw)
+{
+ u16 link_cap, link_status, device_cap, device_control;
+
+ DEBUGFUNC("fm10k_get_bus_info_generic");
+
+ /* Get the maximum link width and speed from PCIe config space */
+ link_cap = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_LINK_CAP);
+
+ switch (link_cap & FM10K_PCIE_LINK_WIDTH) {
+ case FM10K_PCIE_LINK_WIDTH_1:
+ hw->bus_caps.width = fm10k_bus_width_pcie_x1;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_2:
+ hw->bus_caps.width = fm10k_bus_width_pcie_x2;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_4:
+ hw->bus_caps.width = fm10k_bus_width_pcie_x4;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_8:
+ hw->bus_caps.width = fm10k_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus_caps.width = fm10k_bus_width_unknown;
+ break;
+ }
+
+ switch (link_cap & FM10K_PCIE_LINK_SPEED) {
+ case FM10K_PCIE_LINK_SPEED_2500:
+ hw->bus_caps.speed = fm10k_bus_speed_2500;
+ break;
+ case FM10K_PCIE_LINK_SPEED_5000:
+ hw->bus_caps.speed = fm10k_bus_speed_5000;
+ break;
+ case FM10K_PCIE_LINK_SPEED_8000:
+ hw->bus_caps.speed = fm10k_bus_speed_8000;
+ break;
+ default:
+ hw->bus_caps.speed = fm10k_bus_speed_unknown;
+ break;
+ }
+
+ /* Get the PCIe maximum payload size for the PCIe function */
+ device_cap = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_DEV_CAP);
+
+ switch (device_cap & FM10K_PCIE_DEV_CAP_PAYLOAD) {
+ case FM10K_PCIE_DEV_CAP_PAYLOAD_128:
+ hw->bus_caps.payload = fm10k_bus_payload_128;
+ break;
+ case FM10K_PCIE_DEV_CAP_PAYLOAD_256:
+ hw->bus_caps.payload = fm10k_bus_payload_256;
+ break;
+ case FM10K_PCIE_DEV_CAP_PAYLOAD_512:
+ hw->bus_caps.payload = fm10k_bus_payload_512;
+ break;
+ default:
+ hw->bus_caps.payload = fm10k_bus_payload_unknown;
+ break;
+ }
+
+ /* Get the negotiated link width and speed from PCIe config space */
+ link_status = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_LINK_STATUS);
+
+ switch (link_status & FM10K_PCIE_LINK_WIDTH) {
+ case FM10K_PCIE_LINK_WIDTH_1:
+ hw->bus.width = fm10k_bus_width_pcie_x1;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_2:
+ hw->bus.width = fm10k_bus_width_pcie_x2;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_4:
+ hw->bus.width = fm10k_bus_width_pcie_x4;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_8:
+ hw->bus.width = fm10k_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = fm10k_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & FM10K_PCIE_LINK_SPEED) {
+ case FM10K_PCIE_LINK_SPEED_2500:
+ hw->bus.speed = fm10k_bus_speed_2500;
+ break;
+ case FM10K_PCIE_LINK_SPEED_5000:
+ hw->bus.speed = fm10k_bus_speed_5000;
+ break;
+ case FM10K_PCIE_LINK_SPEED_8000:
+ hw->bus.speed = fm10k_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = fm10k_bus_speed_unknown;
+ break;
+ }
+
+ /* Get the negotiated PCIe maximum payload size for the PCIe function */
+ device_control = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_DEV_CTRL);
+
+ switch (device_control & FM10K_PCIE_DEV_CTRL_PAYLOAD) {
+ case FM10K_PCIE_DEV_CTRL_PAYLOAD_128:
+ hw->bus.payload = fm10k_bus_payload_128;
+ break;
+ case FM10K_PCIE_DEV_CTRL_PAYLOAD_256:
+ hw->bus.payload = fm10k_bus_payload_256;
+ break;
+ case FM10K_PCIE_DEV_CTRL_PAYLOAD_512:
+ hw->bus.payload = fm10k_bus_payload_512;
+ break;
+ default:
+ hw->bus.payload = fm10k_bus_payload_unknown;
+ break;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw)
+{
+ u16 msix_count;
+
+ DEBUGFUNC("fm10k_get_pcie_msix_count_generic");
+
+ /* read in value from MSI-X capability register */
+ msix_count = FM10K_READ_PCI_WORD(hw, FM10K_PCI_MSIX_MSG_CTRL);
+ msix_count &= FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK;
+
+ /* MSI-X count is zero-based in HW */
+ msix_count++;
+
+ if (msix_count > FM10K_MAX_MSIX_VECTORS)
+ msix_count = FM10K_MAX_MSIX_VECTORS;
+
+ return msix_count;
+}
+
+/**
+ * fm10k_init_ops_generic - Inits function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ **/
+s32 fm10k_init_ops_generic(struct fm10k_hw *hw)
+{
+ struct fm10k_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("fm10k_init_ops_generic");
+
+ /* MAC */
+ mac->ops.get_bus_info = &fm10k_get_bus_info_generic;
+
+ /* initialize GLORT state to avoid any false hits */
+ mac->dglort_map = FM10K_DGLORTMAP_NONE;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_start_hw_generic - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * This function sets the Tx ready flag to indicate that the Tx path has
+ * been initialized.
+ **/
+s32 fm10k_start_hw_generic(struct fm10k_hw *hw)
+{
+ DEBUGFUNC("fm10k_start_hw_generic");
+
+ /* set flag indicating we are beginning Tx */
+ hw->mac.tx_ready = true;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_disable_queues_generic - Stop Tx/Rx queues
+ * @hw: pointer to hardware structure
+ * @q_cnt: number of queues to be disabled
+ *
+ **/
+s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt)
+{
+ u32 reg;
+ u16 i, time;
+
+ DEBUGFUNC("fm10k_disable_queues_generic");
+
+ /* clear tx_ready to prevent any false hits for reset */
+ hw->mac.tx_ready = false;
+
+ if (FM10K_REMOVED(hw->hw_addr))
+ return FM10K_SUCCESS;
+
+ /* clear the enable bit for all rings */
+ for (i = 0; i < q_cnt; i++) {
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(i),
+ reg & ~FM10K_TXDCTL_ENABLE);
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(i),
+ reg & ~FM10K_RXQCTL_ENABLE);
+ }
+
+ FM10K_WRITE_FLUSH(hw);
+ usec_delay(1);
+
+ /* loop through all queues to verify that they are all disabled */
+ for (i = 0, time = FM10K_QUEUE_DISABLE_TIMEOUT; time;) {
+ /* if we are at end of rings all rings are disabled */
+ if (i == q_cnt)
+ return FM10K_SUCCESS;
+
+ /* if queue enables cleared, then move to next ring pair */
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
+ if (!~reg || !(reg & FM10K_TXDCTL_ENABLE)) {
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
+ if (!~reg || !(reg & FM10K_RXQCTL_ENABLE)) {
+ i++;
+ continue;
+ }
+ }
+
+ /* decrement time and wait 1 usec */
+ time--;
+ if (time)
+ usec_delay(1);
+ }
+
+ return FM10K_ERR_REQUESTS_PENDING;
+}
+
+/**
+ * fm10k_stop_hw_generic - Stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ **/
+s32 fm10k_stop_hw_generic(struct fm10k_hw *hw)
+{
+ DEBUGFUNC("fm10k_stop_hw_generic");
+
+ return fm10k_disable_queues_generic(hw, hw->mac.max_queues);
+}
+
+/**
+ * fm10k_read_hw_stats_32b - Reads value of 32-bit registers
+ * @hw: pointer to the hardware structure
+ * @addr: address of register containing a 32-bit value
+ *
+ * Function reads the content of the register and returns the delta
+ * between the base and the current value.
+ * **/
+u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
+ struct fm10k_hw_stat *stat)
+{
+ u32 delta = FM10K_READ_REG(hw, addr) - stat->base_l;
+
+ DEBUGFUNC("fm10k_read_hw_stats_32b");
+
+ if (FM10K_REMOVED(hw->hw_addr))
+ stat->base_h = 0;
+
+ return delta;
+}
+
+/**
+ * fm10k_read_hw_stats_48b - Reads value of 48-bit registers
+ * @hw: pointer to the hardware structure
+ * @addr: address of register containing the lower 32-bit value
+ *
+ * Function reads the content of 2 registers, combined to represent a 48-bit
+ * statistical value. Extra processing is required to handle overflowing.
+ * Finally, a delta value is returned representing the difference between the
+ * values stored in registers and values stored in the statistic counters.
+ * **/
+STATIC u64 fm10k_read_hw_stats_48b(struct fm10k_hw *hw, u32 addr,
+ struct fm10k_hw_stat *stat)
+{
+ u32 count_l;
+ u32 count_h;
+ u32 count_tmp;
+ u64 delta;
+
+ DEBUGFUNC("fm10k_read_hw_stats_48b");
+
+ count_h = FM10K_READ_REG(hw, addr + 1);
+
+ /* Check for overflow */
+ do {
+ count_tmp = count_h;
+ count_l = FM10K_READ_REG(hw, addr);
+ count_h = FM10K_READ_REG(hw, addr + 1);
+ } while (count_h != count_tmp);
+
+ delta = ((u64)(count_h - stat->base_h) << 32) + count_l;
+ delta -= stat->base_l;
+
+ return delta & FM10K_48_BIT_MASK;
+}
+
+/**
+ * fm10k_update_hw_base_48b - Updates 48-bit statistic base value
+ * @stat: pointer to the hardware statistic structure
+ * @delta: value to be updated into the hardware statistic structure
+ *
+ * Function receives a value and determines if an update is required based on
+ * a delta calculation. Only the base value will be updated.
+ **/
+STATIC void fm10k_update_hw_base_48b(struct fm10k_hw_stat *stat, u64 delta)
+{
+ DEBUGFUNC("fm10k_update_hw_base_48b");
+
+ if (!delta)
+ return;
+
+ /* update lower 32 bits */
+ delta += stat->base_l;
+ stat->base_l = (u32)delta;
+
+ /* update upper 32 bits */
+ stat->base_h += (u32)(delta >> 32);
+}
+
+/**
+ * fm10k_update_hw_stats_tx_q - Updates TX queue statistics counters
+ * @hw: pointer to the hardware structure
+ * @q: pointer to the ring of hardware statistics queue
+ * @idx: index pointing to the start of the ring iteration
+ *
+ * Function updates the TX queue statistics counters that are related to the
+ * hardware.
+ **/
+STATIC void fm10k_update_hw_stats_tx_q(struct fm10k_hw *hw,
+ struct fm10k_hw_stats_q *q,
+ u32 idx)
+{
+ u32 id_tx, id_tx_prev, tx_packets;
+ u64 tx_bytes = 0;
+
+ DEBUGFUNC("fm10k_update_hw_stats_tx_q");
+
+ /* Retrieve TX Owner Data */
+ id_tx = FM10K_READ_REG(hw, FM10K_TXQCTL(idx));
+
+ /* Process TX Ring */
+ do {
+ tx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPTC(idx),
+ &q->tx_packets);
+
+ if (tx_packets)
+ tx_bytes = fm10k_read_hw_stats_48b(hw,
+ FM10K_QBTC_L(idx),
+ &q->tx_bytes);
+
+ /* Re-Check Owner Data */
+ id_tx_prev = id_tx;
+ id_tx = FM10K_READ_REG(hw, FM10K_TXQCTL(idx));
+ } while ((id_tx ^ id_tx_prev) & FM10K_TXQCTL_ID_MASK);
+
+ /* drop non-ID bits and set VALID ID bit */
+ id_tx &= FM10K_TXQCTL_ID_MASK;
+ id_tx |= FM10K_STAT_VALID;
+
+ /* update packet counts */
+ if (q->tx_stats_idx == id_tx) {
+ q->tx_packets.count += tx_packets;
+ q->tx_bytes.count += tx_bytes;
+ }
+
+ /* update bases and record ID */
+ fm10k_update_hw_base_32b(&q->tx_packets, tx_packets);
+ fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes);
+
+ q->tx_stats_idx = id_tx;
+}
+
+/**
+ * fm10k_update_hw_stats_rx_q - Updates RX queue statistics counters
+ * @hw: pointer to the hardware structure
+ * @q: pointer to the ring of hardware statistics queue
+ * @idx: index pointing to the start of the ring iteration
+ *
+ * Function updates the RX queue statistics counters that are related to the
+ * hardware.
+ **/
+STATIC void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw,
+ struct fm10k_hw_stats_q *q,
+ u32 idx)
+{
+ u32 id_rx, id_rx_prev, rx_packets, rx_drops;
+ u64 rx_bytes = 0;
+
+ DEBUGFUNC("fm10k_update_hw_stats_rx_q");
+
+ /* Retrieve RX Owner Data */
+ id_rx = FM10K_READ_REG(hw, FM10K_RXQCTL(idx));
+
+ /* Process RX Ring */
+ do {
+ rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx),
+ &q->rx_drops);
+
+ rx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPRC(idx),
+ &q->rx_packets);
+
+ if (rx_packets)
+ rx_bytes = fm10k_read_hw_stats_48b(hw,
+ FM10K_QBRC_L(idx),
+ &q->rx_bytes);
+
+ /* Re-Check Owner Data */
+ id_rx_prev = id_rx;
+ id_rx = FM10K_READ_REG(hw, FM10K_RXQCTL(idx));
+ } while ((id_rx ^ id_rx_prev) & FM10K_RXQCTL_ID_MASK);
+
+ /* drop non-ID bits and set VALID ID bit */
+ id_rx &= FM10K_RXQCTL_ID_MASK;
+ id_rx |= FM10K_STAT_VALID;
+
+ /* update packet counts */
+ if (q->rx_stats_idx == id_rx) {
+ q->rx_drops.count += rx_drops;
+ q->rx_packets.count += rx_packets;
+ q->rx_bytes.count += rx_bytes;
+ }
+
+ /* update bases and record ID */
+ fm10k_update_hw_base_32b(&q->rx_drops, rx_drops);
+ fm10k_update_hw_base_32b(&q->rx_packets, rx_packets);
+ fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes);
+
+ q->rx_stats_idx = id_rx;
+}
+
+/**
+ * fm10k_update_hw_stats_q - Updates queue statistics counters
+ * @hw: pointer to the hardware structure
+ * @q: pointer to the ring of hardware statistics queue
+ * @idx: index pointing to the start of the ring iteration
+ * @count: number of queues to iterate over
+ *
+ * Function updates the queue statistics counters that are related to the
+ * hardware.
+ **/
+void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
+ u32 idx, u32 count)
+{
+ u32 i;
+
+ DEBUGFUNC("fm10k_update_hw_stats_q");
+
+ for (i = 0; i < count; i++, idx++, q++) {
+ fm10k_update_hw_stats_tx_q(hw, q, idx);
+ fm10k_update_hw_stats_rx_q(hw, q, idx);
+ }
+}
+
+/**
+ * fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues
+ * @hw: pointer to the hardware structure
+ * @q: pointer to the ring of hardware statistics queue
+ * @idx: index pointing to the start of the ring iteration
+ * @count: number of queues to iterate over
+ *
+ * Function invalidates the index values for the queues so any updates that
+ * may have happened are ignored and the base for the queue stats is reset.
+ **/
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
+{
+ u32 i;
+
+ for (i = 0; i < count; i++, idx++, q++) {
+ q->rx_stats_idx = 0;
+ q->tx_stats_idx = 0;
+ }
+}
+
+/**
+ * fm10k_get_host_state_generic - Returns the state of the host
+ * @hw: pointer to hardware structure
+ * @host_ready: pointer to boolean value that will record host state
+ *
+ * This function will check the health of the mailbox and Tx queue 0
+ * in order to determine if we should report that the link is up or not.
+ **/
+s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ struct fm10k_mac_info *mac = &hw->mac;
+ s32 ret_val = FM10K_SUCCESS;
+ u32 txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(0));
+
+ DEBUGFUNC("fm10k_get_host_state_generic");
+
+ /* process upstream mailbox in case interrupts were disabled */
+ mbx->ops.process(hw, mbx);
+
+ /* If Tx is no longer enabled link should come down */
+ if (!(~txdctl) || !(txdctl & FM10K_TXDCTL_ENABLE))
+ mac->get_host_state = true;
+
+ /* exit if not checking for link, or link cannot be changed */
+ if (!mac->get_host_state || !(~txdctl))
+ goto out;
+
+ /* if we somehow dropped the Tx enable we should reset */
+ if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
+ ret_val = FM10K_ERR_RESET_REQUESTED;
+ goto out;
+ }
+
+ /* if Mailbox timed out we should request reset */
+ if (!mbx->timeout) {
+ ret_val = FM10K_ERR_RESET_REQUESTED;
+ goto out;
+ }
+
+ /* verify Mailbox is still valid */
+ if (!mbx->ops.tx_ready(mbx, FM10K_VFMBX_MSG_MTU))
+ goto out;
+
+ /* interface cannot receive traffic without logical ports */
+ if (mac->dglort_map == FM10K_DGLORTMAP_NONE) {
+ if (mac->ops.request_lport_map)
+ ret_val = mac->ops.request_lport_map(hw);
+
+ goto out;
+ }
+
+ /* if we passed all the tests above then the switch is ready and we no
+ * longer need to check for link
+ */
+ mac->get_host_state = false;
+
+out:
+ *host_ready = !mac->get_host_state;
+ return ret_val;
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.h
new file mode 100644
index 00000000..45fbbc0b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.h
@@ -0,0 +1,52 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_COMMON_H_
+#define _FM10K_COMMON_H_
+
+#include "fm10k_type.h"
+
+u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw);
+s32 fm10k_init_ops_generic(struct fm10k_hw *hw);
+s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt);
+s32 fm10k_start_hw_generic(struct fm10k_hw *hw);
+s32 fm10k_stop_hw_generic(struct fm10k_hw *hw);
+u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
+ struct fm10k_hw_stat *stat);
+#define fm10k_update_hw_base_32b(stat, delta) ((stat)->base_l += (delta))
+void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
+ u32 idx, u32 count);
+#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0)
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count);
+s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready);
+#endif /* _FM10K_COMMON_H_ */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c
new file mode 100644
index 00000000..e766e45c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c
@@ -0,0 +1,2254 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "fm10k_common.h"
+
+/**
+ * fm10k_fifo_init - Initialize a message FIFO
+ * @fifo: pointer to FIFO
+ * @buffer: pointer to memory to be used to store FIFO
+ * @size: maximum message size to store in FIFO, must be 2^n - 1
+ **/
+STATIC void fm10k_fifo_init(struct fm10k_mbx_fifo *fifo, u32 *buffer, u16 size)
+{
+ fifo->buffer = buffer;
+ fifo->size = size;
+ fifo->head = 0;
+ fifo->tail = 0;
+}
+
+/**
+ * fm10k_fifo_used - Retrieve used space in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function returns the number of DWORDs used in the FIFO
+ **/
+STATIC u16 fm10k_fifo_used(struct fm10k_mbx_fifo *fifo)
+{
+ return fifo->tail - fifo->head;
+}
+
+/**
+ * fm10k_fifo_unused - Retrieve unused space in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function returns the number of unused DWORDs in the FIFO
+ **/
+STATIC u16 fm10k_fifo_unused(struct fm10k_mbx_fifo *fifo)
+{
+ return fifo->size + fifo->head - fifo->tail;
+}
+
+/**
+ * fm10k_fifo_empty - Test to verify if FIFO is empty
+ * @fifo: pointer to FIFO
+ *
+ * This function returns true if the FIFO is empty, else false
+ **/
+STATIC bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo)
+{
+ return fifo->head == fifo->tail;
+}
+
+/**
+ * fm10k_fifo_head_offset - returns indices of head with given offset
+ * @fifo: pointer to FIFO
+ * @offset: offset to add to head
+ *
+ * This function returns the indices into the FIFO based on head + offset
+ **/
+STATIC u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
+{
+ return (fifo->head + offset) & (fifo->size - 1);
+}
+
+/**
+ * fm10k_fifo_tail_offset - returns indices of tail with given offset
+ * @fifo: pointer to FIFO
+ * @offset: offset to add to tail
+ *
+ * This function returns the indices into the FIFO based on tail + offset
+ **/
+STATIC u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
+{
+ return (fifo->tail + offset) & (fifo->size - 1);
+}
+
+/**
+ * fm10k_fifo_head_len - Retrieve length of first message in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function returns the size of the first message in the FIFO
+ **/
+STATIC u16 fm10k_fifo_head_len(struct fm10k_mbx_fifo *fifo)
+{
+ u32 *head = fifo->buffer + fm10k_fifo_head_offset(fifo, 0);
+
+ /* verify there is at least 1 DWORD in the fifo so *head is valid */
+ if (fm10k_fifo_empty(fifo))
+ return 0;
+
+ /* retieve the message length */
+ return FM10K_TLV_DWORD_LEN(*head);
+}
+
+/**
+ * fm10k_fifo_head_drop - Drop the first message in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function returns the size of the message dropped from the FIFO
+ **/
+STATIC u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo)
+{
+ u16 len = fm10k_fifo_head_len(fifo);
+
+ /* update head so it is at the start of next frame */
+ fifo->head += len;
+
+ return len;
+}
+
+/**
+ * fm10k_fifo_drop_all - Drop all messages in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function resets the head pointer to drop all messages in the FIFO and
+ * ensure the FIFO is empty.
+ **/
+STATIC void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo)
+{
+ fifo->head = fifo->tail;
+}
+
+/**
+ * fm10k_mbx_index_len - Convert a head/tail index into a length value
+ * @mbx: pointer to mailbox
+ * @head: head index
+ * @tail: head index
+ *
+ * This function takes the head and tail index and determines the length
+ * of the data indicated by this pair.
+ **/
+STATIC u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail)
+{
+ u16 len = tail - head;
+
+ /* we wrapped so subtract 2, one for index 0, one for all 1s index */
+ if (len > tail)
+ len -= 2;
+
+ return len & ((mbx->mbmem_len << 1) - 1);
+}
+
+/**
+ * fm10k_mbx_tail_add - Determine new tail value with added offset
+ * @mbx: pointer to mailbox
+ * @offset: length to add to tail offset
+ *
+ * This function takes the local tail index and recomputes it for
+ * a given length added as an offset.
+ **/
+STATIC u16 fm10k_mbx_tail_add(struct fm10k_mbx_info *mbx, u16 offset)
+{
+ u16 tail = (mbx->tail + offset + 1) & ((mbx->mbmem_len << 1) - 1);
+
+ /* add/sub 1 because we cannot have offset 0 or all 1s */
+ return (tail > mbx->tail) ? --tail : ++tail;
+}
+
+/**
+ * fm10k_mbx_tail_sub - Determine new tail value with subtracted offset
+ * @mbx: pointer to mailbox
+ * @offset: length to add to tail offset
+ *
+ * This function takes the local tail index and recomputes it for
+ * a given length added as an offset.
+ **/
+STATIC u16 fm10k_mbx_tail_sub(struct fm10k_mbx_info *mbx, u16 offset)
+{
+ u16 tail = (mbx->tail - offset - 1) & ((mbx->mbmem_len << 1) - 1);
+
+ /* sub/add 1 because we cannot have offset 0 or all 1s */
+ return (tail < mbx->tail) ? ++tail : --tail;
+}
+
+/**
+ * fm10k_mbx_head_add - Determine new head value with added offset
+ * @mbx: pointer to mailbox
+ * @offset: length to add to head offset
+ *
+ * This function takes the local head index and recomputes it for
+ * a given length added as an offset.
+ **/
+STATIC u16 fm10k_mbx_head_add(struct fm10k_mbx_info *mbx, u16 offset)
+{
+ u16 head = (mbx->head + offset + 1) & ((mbx->mbmem_len << 1) - 1);
+
+ /* add/sub 1 because we cannot have offset 0 or all 1s */
+ return (head > mbx->head) ? --head : ++head;
+}
+
+/**
+ * fm10k_mbx_head_sub - Determine new head value with subtracted offset
+ * @mbx: pointer to mailbox
+ * @offset: length to add to head offset
+ *
+ * This function takes the local head index and recomputes it for
+ * a given length added as an offset.
+ **/
+STATIC u16 fm10k_mbx_head_sub(struct fm10k_mbx_info *mbx, u16 offset)
+{
+ u16 head = (mbx->head - offset - 1) & ((mbx->mbmem_len << 1) - 1);
+
+ /* sub/add 1 because we cannot have offset 0 or all 1s */
+ return (head < mbx->head) ? ++head : --head;
+}
+
+/**
+ * fm10k_mbx_pushed_tail_len - Retrieve the length of message being pushed
+ * @mbx: pointer to mailbox
+ *
+ * This function will return the length of the message currently being
+ * pushed onto the tail of the Rx queue.
+ **/
+STATIC u16 fm10k_mbx_pushed_tail_len(struct fm10k_mbx_info *mbx)
+{
+ u32 *tail = mbx->rx.buffer + fm10k_fifo_tail_offset(&mbx->rx, 0);
+
+ /* pushed tail is only valid if pushed is set */
+ if (!mbx->pushed)
+ return 0;
+
+ return FM10K_TLV_DWORD_LEN(*tail);
+}
+
+/**
+ * fm10k_fifo_write_copy - pulls data off of msg and places it in FIFO
+ * @fifo: pointer to FIFO
+ * @msg: message array to populate
+ * @tail_offset: additional offset to add to tail pointer
+ * @len: length of FIFO to copy into message header
+ *
+ * This function will take a message and copy it into a section of the
+ * FIFO. In order to get something into a location other than just
+ * the tail you can use tail_offset to adjust the pointer.
+ **/
+STATIC void fm10k_fifo_write_copy(struct fm10k_mbx_fifo *fifo,
+ const u32 *msg, u16 tail_offset, u16 len)
+{
+ u16 end = fm10k_fifo_tail_offset(fifo, tail_offset);
+ u32 *tail = fifo->buffer + end;
+
+ /* track when we should cross the end of the FIFO */
+ end = fifo->size - end;
+
+ /* copy end of message before start of message */
+ if (end < len)
+ memcpy(fifo->buffer, msg + end, (len - end) << 2);
+ else
+ end = len;
+
+ /* Copy remaining message into Tx FIFO */
+ memcpy(tail, msg, end << 2);
+}
+
+/**
+ * fm10k_fifo_enqueue - Enqueues the message to the tail of the FIFO
+ * @fifo: pointer to FIFO
+ * @msg: message array to read
+ *
+ * This function enqueues a message up to the size specified by the length
+ * contained in the first DWORD of the message and will place at the tail
+ * of the FIFO. It will return 0 on success, or a negative value on error.
+ **/
+STATIC s32 fm10k_fifo_enqueue(struct fm10k_mbx_fifo *fifo, const u32 *msg)
+{
+ u16 len = FM10K_TLV_DWORD_LEN(*msg);
+
+ DEBUGFUNC("fm10k_fifo_enqueue");
+
+ /* verify parameters */
+ if (len > fifo->size)
+ return FM10K_MBX_ERR_SIZE;
+
+ /* verify there is room for the message */
+ if (len > fm10k_fifo_unused(fifo))
+ return FM10K_MBX_ERR_NO_SPACE;
+
+ /* Copy message into FIFO */
+ fm10k_fifo_write_copy(fifo, msg, 0, len);
+
+ /* memory barrier to guarantee FIFO is written before tail update */
+ FM10K_WMB();
+
+ /* Update Tx FIFO tail */
+ fifo->tail += len;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_validate_msg_size - Validate incoming message based on size
+ * @mbx: pointer to mailbox
+ * @len: length of data pushed onto buffer
+ *
+ * This function analyzes the frame and will return a non-zero value when
+ * the start of a message larger than the mailbox is detected.
+ **/
+STATIC u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ u16 total_len = 0, msg_len;
+ u32 *msg;
+
+ DEBUGFUNC("fm10k_mbx_validate_msg_size");
+
+ /* length should include previous amounts pushed */
+ len += mbx->pushed;
+
+ /* offset in message is based off of current message size */
+ do {
+ msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len);
+ msg_len = FM10K_TLV_DWORD_LEN(*msg);
+ total_len += msg_len;
+ } while (total_len < len);
+
+ /* message extends out of pushed section, but fits in FIFO */
+ if ((len < total_len) && (msg_len <= mbx->max_size))
+ return 0;
+
+ /* return length of invalid section */
+ return (len < total_len) ? len : (len - total_len);
+}
+
+/**
+ * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will take a section of the Tx FIFO and copy it into the
+ * mailbox memory. The offset in mbmem is based on the lower bits of the
+ * tail and len determines the length to copy.
+ **/
+STATIC void fm10k_mbx_write_copy(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->tx;
+ u32 mbmem = mbx->mbmem_reg;
+ u32 *head = fifo->buffer;
+ u16 end, len, tail, mask;
+
+ DEBUGFUNC("fm10k_mbx_write_copy");
+
+ if (!mbx->tail_len)
+ return;
+
+ /* determine data length and mbmem tail index */
+ mask = mbx->mbmem_len - 1;
+ len = mbx->tail_len;
+ tail = fm10k_mbx_tail_sub(mbx, len);
+ if (tail > mask)
+ tail++;
+
+ /* determine offset in the ring */
+ end = fm10k_fifo_head_offset(fifo, mbx->pulled);
+ head += end;
+
+ /* memory barrier to guarantee data is ready to be read */
+ FM10K_RMB();
+
+ /* Copy message from Tx FIFO */
+ for (end = fifo->size - end; len; head = fifo->buffer) {
+ do {
+ /* adjust tail to match offset for FIFO */
+ tail &= mask;
+ if (!tail)
+ tail++;
+
+ mbx->tx_mbmem_pulled++;
+
+ /* write message to hardware FIFO */
+ FM10K_WRITE_MBX(hw, mbmem + tail++, *(head++));
+ } while (--len && --end);
+ }
+}
+
+/**
+ * fm10k_mbx_pull_head - Pulls data off of head of Tx FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @head: acknowledgement number last received
+ *
+ * This function will push the tail index forward based on the remote
+ * head index. It will then pull up to mbmem_len DWORDs off of the
+ * head of the FIFO and will place it in the MBMEM registers
+ * associated with the mailbox.
+ **/
+STATIC void fm10k_mbx_pull_head(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, u16 head)
+{
+ u16 mbmem_len, len, ack = fm10k_mbx_index_len(mbx, head, mbx->tail);
+ struct fm10k_mbx_fifo *fifo = &mbx->tx;
+
+ /* update number of bytes pulled and update bytes in transit */
+ mbx->pulled += mbx->tail_len - ack;
+
+ /* determine length of data to pull, reserve space for mbmem header */
+ mbmem_len = mbx->mbmem_len - 1;
+ len = fm10k_fifo_used(fifo) - mbx->pulled;
+ if (len > mbmem_len)
+ len = mbmem_len;
+
+ /* update tail and record number of bytes in transit */
+ mbx->tail = fm10k_mbx_tail_add(mbx, len - ack);
+ mbx->tail_len = len;
+
+ /* drop pulled messages from the FIFO */
+ for (len = fm10k_fifo_head_len(fifo);
+ len && (mbx->pulled >= len);
+ len = fm10k_fifo_head_len(fifo)) {
+ mbx->pulled -= fm10k_fifo_head_drop(fifo);
+ mbx->tx_messages++;
+ mbx->tx_dwords += len;
+ }
+
+ /* Copy message out from the Tx FIFO */
+ fm10k_mbx_write_copy(hw, mbx);
+}
+
+/**
+ * fm10k_mbx_read_copy - pulls data off of mbmem and places it in Rx FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will take a section of the mailbox memory and copy it
+ * into the Rx FIFO. The offset is based on the lower bits of the
+ * head and len determines the length to copy.
+ **/
+STATIC void fm10k_mbx_read_copy(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ u32 mbmem = mbx->mbmem_reg ^ mbx->mbmem_len;
+ u32 *tail = fifo->buffer;
+ u16 end, len, head;
+
+ DEBUGFUNC("fm10k_mbx_read_copy");
+
+ /* determine data length and mbmem head index */
+ len = mbx->head_len;
+ head = fm10k_mbx_head_sub(mbx, len);
+ if (head >= mbx->mbmem_len)
+ head++;
+
+ /* determine offset in the ring */
+ end = fm10k_fifo_tail_offset(fifo, mbx->pushed);
+ tail += end;
+
+ /* Copy message into Rx FIFO */
+ for (end = fifo->size - end; len; tail = fifo->buffer) {
+ do {
+ /* adjust head to match offset for FIFO */
+ head &= mbx->mbmem_len - 1;
+ if (!head)
+ head++;
+
+ mbx->rx_mbmem_pushed++;
+
+ /* read message from hardware FIFO */
+ *(tail++) = FM10K_READ_MBX(hw, mbmem + head++);
+ } while (--len && --end);
+ }
+
+ /* memory barrier to guarantee FIFO is written before tail update */
+ FM10K_WMB();
+}
+
+/**
+ * fm10k_mbx_push_tail - Pushes up to 15 DWORDs on to tail of FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @tail: tail index of message
+ *
+ * This function will first validate the tail index and size for the
+ * incoming message. It then updates the acknowledgment number and
+ * copies the data into the FIFO. It will return the number of messages
+ * dequeued on success and a negative value on error.
+ **/
+STATIC s32 fm10k_mbx_push_tail(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx,
+ u16 tail)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ u16 len, seq = fm10k_mbx_index_len(mbx, mbx->head, tail);
+
+ DEBUGFUNC("fm10k_mbx_push_tail");
+
+ /* determine length of data to push */
+ len = fm10k_fifo_unused(fifo) - mbx->pushed;
+ if (len > seq)
+ len = seq;
+
+ /* update head and record bytes received */
+ mbx->head = fm10k_mbx_head_add(mbx, len);
+ mbx->head_len = len;
+
+ /* nothing to do if there is no data */
+ if (!len)
+ return FM10K_SUCCESS;
+
+ /* Copy msg into Rx FIFO */
+ fm10k_mbx_read_copy(hw, mbx);
+
+ /* determine if there are any invalid lengths in message */
+ if (fm10k_mbx_validate_msg_size(mbx, len))
+ return FM10K_MBX_ERR_SIZE;
+
+ /* Update pushed */
+ mbx->pushed += len;
+
+ /* flush any completed messages */
+ for (len = fm10k_mbx_pushed_tail_len(mbx);
+ len && (mbx->pushed >= len);
+ len = fm10k_mbx_pushed_tail_len(mbx)) {
+ fifo->tail += len;
+ mbx->pushed -= len;
+ mbx->rx_messages++;
+ mbx->rx_dwords += len;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/* pre-generated data for generating the CRC based on the poly 0xAC9A. */
+static const u16 fm10k_crc_16b_table[256] = {
+ 0x0000, 0x7956, 0xF2AC, 0x8BFA, 0xBC6D, 0xC53B, 0x4EC1, 0x3797,
+ 0x21EF, 0x58B9, 0xD343, 0xAA15, 0x9D82, 0xE4D4, 0x6F2E, 0x1678,
+ 0x43DE, 0x3A88, 0xB172, 0xC824, 0xFFB3, 0x86E5, 0x0D1F, 0x7449,
+ 0x6231, 0x1B67, 0x909D, 0xE9CB, 0xDE5C, 0xA70A, 0x2CF0, 0x55A6,
+ 0x87BC, 0xFEEA, 0x7510, 0x0C46, 0x3BD1, 0x4287, 0xC97D, 0xB02B,
+ 0xA653, 0xDF05, 0x54FF, 0x2DA9, 0x1A3E, 0x6368, 0xE892, 0x91C4,
+ 0xC462, 0xBD34, 0x36CE, 0x4F98, 0x780F, 0x0159, 0x8AA3, 0xF3F5,
+ 0xE58D, 0x9CDB, 0x1721, 0x6E77, 0x59E0, 0x20B6, 0xAB4C, 0xD21A,
+ 0x564D, 0x2F1B, 0xA4E1, 0xDDB7, 0xEA20, 0x9376, 0x188C, 0x61DA,
+ 0x77A2, 0x0EF4, 0x850E, 0xFC58, 0xCBCF, 0xB299, 0x3963, 0x4035,
+ 0x1593, 0x6CC5, 0xE73F, 0x9E69, 0xA9FE, 0xD0A8, 0x5B52, 0x2204,
+ 0x347C, 0x4D2A, 0xC6D0, 0xBF86, 0x8811, 0xF147, 0x7ABD, 0x03EB,
+ 0xD1F1, 0xA8A7, 0x235D, 0x5A0B, 0x6D9C, 0x14CA, 0x9F30, 0xE666,
+ 0xF01E, 0x8948, 0x02B2, 0x7BE4, 0x4C73, 0x3525, 0xBEDF, 0xC789,
+ 0x922F, 0xEB79, 0x6083, 0x19D5, 0x2E42, 0x5714, 0xDCEE, 0xA5B8,
+ 0xB3C0, 0xCA96, 0x416C, 0x383A, 0x0FAD, 0x76FB, 0xFD01, 0x8457,
+ 0xAC9A, 0xD5CC, 0x5E36, 0x2760, 0x10F7, 0x69A1, 0xE25B, 0x9B0D,
+ 0x8D75, 0xF423, 0x7FD9, 0x068F, 0x3118, 0x484E, 0xC3B4, 0xBAE2,
+ 0xEF44, 0x9612, 0x1DE8, 0x64BE, 0x5329, 0x2A7F, 0xA185, 0xD8D3,
+ 0xCEAB, 0xB7FD, 0x3C07, 0x4551, 0x72C6, 0x0B90, 0x806A, 0xF93C,
+ 0x2B26, 0x5270, 0xD98A, 0xA0DC, 0x974B, 0xEE1D, 0x65E7, 0x1CB1,
+ 0x0AC9, 0x739F, 0xF865, 0x8133, 0xB6A4, 0xCFF2, 0x4408, 0x3D5E,
+ 0x68F8, 0x11AE, 0x9A54, 0xE302, 0xD495, 0xADC3, 0x2639, 0x5F6F,
+ 0x4917, 0x3041, 0xBBBB, 0xC2ED, 0xF57A, 0x8C2C, 0x07D6, 0x7E80,
+ 0xFAD7, 0x8381, 0x087B, 0x712D, 0x46BA, 0x3FEC, 0xB416, 0xCD40,
+ 0xDB38, 0xA26E, 0x2994, 0x50C2, 0x6755, 0x1E03, 0x95F9, 0xECAF,
+ 0xB909, 0xC05F, 0x4BA5, 0x32F3, 0x0564, 0x7C32, 0xF7C8, 0x8E9E,
+ 0x98E6, 0xE1B0, 0x6A4A, 0x131C, 0x248B, 0x5DDD, 0xD627, 0xAF71,
+ 0x7D6B, 0x043D, 0x8FC7, 0xF691, 0xC106, 0xB850, 0x33AA, 0x4AFC,
+ 0x5C84, 0x25D2, 0xAE28, 0xD77E, 0xE0E9, 0x99BF, 0x1245, 0x6B13,
+ 0x3EB5, 0x47E3, 0xCC19, 0xB54F, 0x82D8, 0xFB8E, 0x7074, 0x0922,
+ 0x1F5A, 0x660C, 0xEDF6, 0x94A0, 0xA337, 0xDA61, 0x519B, 0x28CD };
+
+/**
+ * fm10k_crc_16b - Generate a 16 bit CRC for a region of 16 bit data
+ * @data: pointer to data to process
+ * @seed: seed value for CRC
+ * @len: length measured in 16 bits words
+ *
+ * This function will generate a CRC based on the polynomial 0xAC9A and
+ * whatever value is stored in the seed variable. Note that this
+ * value inverts the local seed and the result in order to capture all
+ * leading and trailing zeros.
+ */
+STATIC u16 fm10k_crc_16b(const u32 *data, u16 seed, u16 len)
+{
+ u32 result = seed;
+
+ while (len--) {
+ result ^= *(data++);
+ result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF];
+ result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF];
+
+ if (!(len--))
+ break;
+
+ result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF];
+ result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF];
+ }
+
+ return (u16)result;
+}
+
+/**
+ * fm10k_fifo_crc - generate a CRC based off of FIFO data
+ * @fifo: pointer to FIFO
+ * @offset: offset point for start of FIFO
+ * @len: number of DWORDS words to process
+ * @seed: seed value for CRC
+ *
+ * This function generates a CRC for some region of the FIFO
+ **/
+STATIC u16 fm10k_fifo_crc(struct fm10k_mbx_fifo *fifo, u16 offset,
+ u16 len, u16 seed)
+{
+ u32 *data = fifo->buffer + offset;
+
+ /* track when we should cross the end of the FIFO */
+ offset = fifo->size - offset;
+
+ /* if we are in 2 blocks process the end of the FIFO first */
+ if (offset < len) {
+ seed = fm10k_crc_16b(data, seed, offset * 2);
+ data = fifo->buffer;
+ len -= offset;
+ }
+
+ /* process any remaining bits */
+ return fm10k_crc_16b(data, seed, len * 2);
+}
+
+/**
+ * fm10k_mbx_update_local_crc - Update the local CRC for outgoing data
+ * @mbx: pointer to mailbox
+ * @head: head index provided by remote mailbox
+ *
+ * This function will generate the CRC for all data from the end of the
+ * last head update to the current one. It uses the result of the
+ * previous CRC as the seed for this update. The result is stored in
+ * mbx->local.
+ **/
+STATIC void fm10k_mbx_update_local_crc(struct fm10k_mbx_info *mbx, u16 head)
+{
+ u16 len = mbx->tail_len - fm10k_mbx_index_len(mbx, head, mbx->tail);
+
+ /* determine the offset for the start of the region to be pulled */
+ head = fm10k_fifo_head_offset(&mbx->tx, mbx->pulled);
+
+ /* update local CRC to include all of the pulled data */
+ mbx->local = fm10k_fifo_crc(&mbx->tx, head, len, mbx->local);
+}
+
+/**
+ * fm10k_mbx_verify_remote_crc - Verify the CRC is correct for current data
+ * @mbx: pointer to mailbox
+ *
+ * This function will take all data that has been provided from the remote
+ * end and generate a CRC for it. This is stored in mbx->remote. The
+ * CRC for the header is then computed and if the result is non-zero this
+ * is an error and we signal an error dropping all data and resetting the
+ * connection.
+ */
+STATIC s32 fm10k_mbx_verify_remote_crc(struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ u16 len = mbx->head_len;
+ u16 offset = fm10k_fifo_tail_offset(fifo, mbx->pushed) - len;
+ u16 crc;
+
+ /* update the remote CRC if new data has been received */
+ if (len)
+ mbx->remote = fm10k_fifo_crc(fifo, offset, len, mbx->remote);
+
+ /* process the full header as we have to validate the CRC */
+ crc = fm10k_crc_16b(&mbx->mbx_hdr, mbx->remote, 1);
+
+ /* notify other end if we have a problem */
+ return crc ? FM10K_MBX_ERR_CRC : FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_rx_ready - Indicates that a message is ready in the Rx FIFO
+ * @mbx: pointer to mailbox
+ *
+ * This function returns true if there is a message in the Rx FIFO to dequeue.
+ **/
+STATIC bool fm10k_mbx_rx_ready(struct fm10k_mbx_info *mbx)
+{
+ u16 msg_size = fm10k_fifo_head_len(&mbx->rx);
+
+ return msg_size && (fm10k_fifo_used(&mbx->rx) >= msg_size);
+}
+
+/**
+ * fm10k_mbx_tx_ready - Indicates that the mailbox is in state ready for Tx
+ * @mbx: pointer to mailbox
+ * @len: verify free space is >= this value
+ *
+ * This function returns true if the mailbox is in a state ready to transmit.
+ **/
+STATIC bool fm10k_mbx_tx_ready(struct fm10k_mbx_info *mbx, u16 len)
+{
+ u16 fifo_unused = fm10k_fifo_unused(&mbx->tx);
+
+ return (mbx->state == FM10K_STATE_OPEN) && (fifo_unused >= len);
+}
+
+/**
+ * fm10k_mbx_tx_complete - Indicates that the Tx FIFO has been emptied
+ * @mbx: pointer to mailbox
+ *
+ * This function returns true if the Tx FIFO is empty.
+ **/
+STATIC bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx)
+{
+ return fm10k_fifo_empty(&mbx->tx);
+}
+
+/**
+ * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function dequeues messages and hands them off to the TLV parser.
+ * It will return the number of messages processed when called.
+ **/
+STATIC u16 fm10k_mbx_dequeue_rx(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ s32 err;
+ u16 cnt;
+
+ /* parse Rx messages out of the Rx FIFO to empty it */
+ for (cnt = 0; !fm10k_fifo_empty(fifo); cnt++) {
+ err = fm10k_tlv_msg_parse(hw, fifo->buffer + fifo->head,
+ mbx, mbx->msg_data);
+ if (err < 0)
+ mbx->rx_parse_err++;
+
+ fm10k_fifo_head_drop(fifo);
+ }
+
+ /* shift remaining bytes back to start of FIFO */
+ memmove(fifo->buffer, fifo->buffer + fifo->tail, mbx->pushed << 2);
+
+ /* shift head and tail based on the memory we moved */
+ fifo->tail -= fifo->head;
+ fifo->head = 0;
+
+ return cnt;
+}
+
+/**
+ * fm10k_mbx_enqueue_tx - Enqueues the message to the tail of the Tx FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @msg: message array to read
+ *
+ * This function enqueues a message up to the size specified by the length
+ * contained in the first DWORD of the message and will place at the tail
+ * of the FIFO. It will return 0 on success, or a negative value on error.
+ **/
+STATIC s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, const u32 *msg)
+{
+ u32 countdown = mbx->timeout;
+ s32 err;
+
+ switch (mbx->state) {
+ case FM10K_STATE_CLOSED:
+ case FM10K_STATE_DISCONNECT:
+ return FM10K_MBX_ERR_NO_MBX;
+ default:
+ break;
+ }
+
+ /* enqueue the message on the Tx FIFO */
+ err = fm10k_fifo_enqueue(&mbx->tx, msg);
+
+ /* if it failed give the FIFO a chance to drain */
+ while (err && countdown) {
+ countdown--;
+ usec_delay(mbx->usec_delay);
+ mbx->ops.process(hw, mbx);
+ err = fm10k_fifo_enqueue(&mbx->tx, msg);
+ }
+
+ /* if we failed treat the error */
+ if (err) {
+ mbx->timeout = 0;
+ mbx->tx_busy++;
+ }
+
+ /* begin processing message, ignore errors as this is just meant
+ * to start the mailbox flow so we are not concerned if there
+ * is a bad error, or the mailbox is already busy with a request
+ */
+ if (!mbx->tail_len)
+ mbx->ops.process(hw, mbx);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_read - Copies the mbmem to local message buffer
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function copies the message from the mbmem to the message array
+ **/
+STATIC s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
+{
+ DEBUGFUNC("fm10k_mbx_read");
+
+ /* only allow one reader in here at a time */
+ if (mbx->mbx_hdr)
+ return FM10K_MBX_ERR_BUSY;
+
+ /* read to capture initial interrupt bits */
+ if (FM10K_READ_MBX(hw, mbx->mbx_reg) & FM10K_MBX_REQ_INTERRUPT)
+ mbx->mbx_lock = FM10K_MBX_ACK;
+
+ /* write back interrupt bits to clear */
+ FM10K_WRITE_MBX(hw, mbx->mbx_reg,
+ FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT);
+
+ /* read remote header */
+ mbx->mbx_hdr = FM10K_READ_MBX(hw, mbx->mbmem_reg ^ mbx->mbmem_len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_write - Copies the local message buffer to mbmem
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function copies the message from the message array to mbmem
+ **/
+STATIC void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
+{
+ u32 mbmem = mbx->mbmem_reg;
+
+ DEBUGFUNC("fm10k_mbx_write");
+
+ /* write new msg header to notify recipient of change */
+ FM10K_WRITE_MBX(hw, mbmem, mbx->mbx_hdr);
+
+ /* write mailbox to send interrupt */
+ if (mbx->mbx_lock)
+ FM10K_WRITE_MBX(hw, mbx->mbx_reg, mbx->mbx_lock);
+
+ /* we no longer are using the header so free it */
+ mbx->mbx_hdr = 0;
+ mbx->mbx_lock = 0;
+}
+
+/**
+ * fm10k_mbx_create_connect_hdr - Generate a connect mailbox header
+ * @mbx: pointer to mailbox
+ *
+ * This function returns a connection mailbox header
+ **/
+STATIC void fm10k_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx)
+{
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_CONNECT, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->rx.size - 1, CONNECT_SIZE);
+}
+
+/**
+ * fm10k_mbx_create_data_hdr - Generate a data mailbox header
+ * @mbx: pointer to mailbox
+ *
+ * This function returns a data mailbox header
+ **/
+STATIC void fm10k_mbx_create_data_hdr(struct fm10k_mbx_info *mbx)
+{
+ u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DATA, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD);
+ struct fm10k_mbx_fifo *fifo = &mbx->tx;
+ u16 crc;
+
+ if (mbx->tail_len)
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ /* generate CRC for data in flight and header */
+ crc = fm10k_fifo_crc(fifo, fm10k_fifo_head_offset(fifo, mbx->pulled),
+ mbx->tail_len, mbx->local);
+ crc = fm10k_crc_16b(&hdr, crc, 1);
+
+ /* load header to memory to be written */
+ mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC);
+}
+
+/**
+ * fm10k_mbx_create_disconnect_hdr - Generate a disconnect mailbox header
+ * @mbx: pointer to mailbox
+ *
+ * This function returns a disconnect mailbox header
+ **/
+STATIC void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx)
+{
+ u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD);
+ u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1);
+
+ mbx->mbx_lock |= FM10K_MBX_ACK;
+
+ /* load header to memory to be written */
+ mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC);
+}
+
+/**
+ * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mbox hdr
+ * @mbx: pointer to mailbox
+ *
+ * This function creates a fake disconnect header for loading into remote
+ * mailbox header. The primary purpose is to prevent errors on immediate
+ * start up after mbx->connect.
+ **/
+STATIC void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx)
+{
+ u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->tail, HEAD);
+ u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1);
+
+ mbx->mbx_lock |= FM10K_MBX_ACK;
+
+ /* load header to memory to be written */
+ mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC);
+}
+
+/**
+ * fm10k_mbx_create_error_msg - Generate an error message
+ * @mbx: pointer to mailbox
+ * @err: local error encountered
+ *
+ * This function will interpret the error provided by err, and based on
+ * that it may shift the message by 1 DWORD and then place an error header
+ * at the start of the message.
+ **/
+STATIC void fm10k_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err)
+{
+ /* only generate an error message for these types */
+ switch (err) {
+ case FM10K_MBX_ERR_TAIL:
+ case FM10K_MBX_ERR_HEAD:
+ case FM10K_MBX_ERR_TYPE:
+ case FM10K_MBX_ERR_SIZE:
+ case FM10K_MBX_ERR_RSVD0:
+ case FM10K_MBX_ERR_CRC:
+ break;
+ default:
+ return;
+ }
+
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_ERROR, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(err, ERR_NO) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD);
+}
+
+/**
+ * fm10k_mbx_validate_msg_hdr - Validate common fields in the message header
+ * @mbx: pointer to mailbox
+ *
+ * This function will parse up the fields in the mailbox header and return
+ * an error if the header contains any of a number of invalid configurations
+ * including unrecognized type, invalid route, or a malformed message.
+ **/
+STATIC s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx)
+{
+ u16 type, rsvd0, head, tail, size;
+ const u32 *hdr = &mbx->mbx_hdr;
+
+ DEBUGFUNC("fm10k_mbx_validate_msg_hdr");
+
+ type = FM10K_MSG_HDR_FIELD_GET(*hdr, TYPE);
+ rsvd0 = FM10K_MSG_HDR_FIELD_GET(*hdr, RSVD0);
+ tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL);
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+ size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE);
+
+ if (rsvd0)
+ return FM10K_MBX_ERR_RSVD0;
+
+ switch (type) {
+ case FM10K_MSG_DISCONNECT:
+ /* validate that all data has been received */
+ if (tail != mbx->head)
+ return FM10K_MBX_ERR_TAIL;
+
+ /* fall through */
+ case FM10K_MSG_DATA:
+ /* validate that head is moving correctly */
+ if (!head || (head == FM10K_MSG_HDR_MASK(HEAD)))
+ return FM10K_MBX_ERR_HEAD;
+ if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len)
+ return FM10K_MBX_ERR_HEAD;
+
+ /* validate that tail is moving correctly */
+ if (!tail || (tail == FM10K_MSG_HDR_MASK(TAIL)))
+ return FM10K_MBX_ERR_TAIL;
+ if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len)
+ break;
+
+ return FM10K_MBX_ERR_TAIL;
+ case FM10K_MSG_CONNECT:
+ /* validate size is in range and is power of 2 mask */
+ if ((size < FM10K_VFMBX_MSG_MTU) || (size & (size + 1)))
+ return FM10K_MBX_ERR_SIZE;
+
+ /* fall through */
+ case FM10K_MSG_ERROR:
+ if (!head || (head == FM10K_MSG_HDR_MASK(HEAD)))
+ return FM10K_MBX_ERR_HEAD;
+ /* neither create nor error include a tail offset */
+ if (tail)
+ return FM10K_MBX_ERR_TAIL;
+
+ break;
+ default:
+ return FM10K_MBX_ERR_TYPE;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_create_reply - Generate reply based on state and remote head
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @head: acknowledgement number
+ *
+ * This function will generate an outgoing message based on the current
+ * mailbox state and the remote FIFO head. It will return the length
+ * of the outgoing message excluding header on success, and a negative value
+ * on error.
+ **/
+STATIC s32 fm10k_mbx_create_reply(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, u16 head)
+{
+ switch (mbx->state) {
+ case FM10K_STATE_OPEN:
+ case FM10K_STATE_DISCONNECT:
+ /* update our checksum for the outgoing data */
+ fm10k_mbx_update_local_crc(mbx, head);
+
+ /* as long as other end recognizes us keep sending data */
+ fm10k_mbx_pull_head(hw, mbx, head);
+
+ /* generate new header based on data */
+ if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN))
+ fm10k_mbx_create_data_hdr(mbx);
+ else
+ fm10k_mbx_create_disconnect_hdr(mbx);
+ break;
+ case FM10K_STATE_CONNECT:
+ /* send disconnect even if we aren't connected */
+ fm10k_mbx_create_connect_hdr(mbx);
+ break;
+ case FM10K_STATE_CLOSED:
+ /* generate new header based on data */
+ fm10k_mbx_create_disconnect_hdr(mbx);
+ default:
+ break;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_reset_work- Reset internal pointers for any pending work
+ * @mbx: pointer to mailbox
+ *
+ * This function will reset all internal pointers so any work in progress
+ * is dropped. This call should occur every time we transition from the
+ * open state to the connect state.
+ **/
+STATIC void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx)
+{
+ u16 len, head, ack;
+
+ /* reset our outgoing max size back to Rx limits */
+ mbx->max_size = mbx->rx.size - 1;
+
+ /* update mbx->pulled to account for tail_len and ack */
+ head = FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, HEAD);
+ ack = fm10k_mbx_index_len(mbx, head, mbx->tail);
+ mbx->pulled += mbx->tail_len - ack;
+
+ /* now drop any messages which have started or finished transmitting */
+ while (fm10k_fifo_head_len(&mbx->tx) && mbx->pulled) {
+ len = fm10k_fifo_head_drop(&mbx->tx);
+ mbx->tx_dropped++;
+ if (mbx->pulled >= len)
+ mbx->pulled -= len;
+ else
+ mbx->pulled = 0;
+ }
+
+ /* just do a quick resysnc to start of message */
+ mbx->pushed = 0;
+ mbx->pulled = 0;
+ mbx->tail_len = 0;
+ mbx->head_len = 0;
+ mbx->rx.tail = 0;
+ mbx->rx.head = 0;
+}
+
+/**
+ * fm10k_mbx_update_max_size - Update the max_size and drop any large messages
+ * @mbx: pointer to mailbox
+ * @size: new value for max_size
+ *
+ * This function updates the max_size value and drops any outgoing messages
+ * at the head of the Tx FIFO if they are larger than max_size. It does not
+ * drop all messages, as this is too difficult to parse and remove them from
+ * the FIFO. Instead, rely on the checking to ensure that messages larger
+ * than max_size aren't pushed into the memory buffer.
+ **/
+STATIC void fm10k_mbx_update_max_size(struct fm10k_mbx_info *mbx, u16 size)
+{
+ u16 len;
+
+ DEBUGFUNC("fm10k_mbx_update_max_size");
+
+ mbx->max_size = size;
+
+ /* flush any oversized messages from the queue */
+ for (len = fm10k_fifo_head_len(&mbx->tx);
+ len > size;
+ len = fm10k_fifo_head_len(&mbx->tx)) {
+ fm10k_fifo_head_drop(&mbx->tx);
+ mbx->tx_dropped++;
+ }
+}
+
+/**
+ * fm10k_mbx_connect_reset - Reset following request for reset
+ * @mbx: pointer to mailbox
+ *
+ * This function resets the mailbox to either a disconnected state
+ * or a connect state depending on the current mailbox state
+ **/
+STATIC void fm10k_mbx_connect_reset(struct fm10k_mbx_info *mbx)
+{
+ /* just do a quick resysnc to start of frame */
+ fm10k_mbx_reset_work(mbx);
+
+ /* reset CRC seeds */
+ mbx->local = FM10K_MBX_CRC_SEED;
+ mbx->remote = FM10K_MBX_CRC_SEED;
+
+ /* we cannot exit connect until the size is good */
+ if (mbx->state == FM10K_STATE_OPEN)
+ mbx->state = FM10K_STATE_CONNECT;
+ else
+ mbx->state = FM10K_STATE_CLOSED;
+}
+
+/**
+ * fm10k_mbx_process_connect - Process connect header
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will read an incoming connect header and reply with the
+ * appropriate message. It will return a value indicating the number of
+ * data DWORDs on success, or will return a negative value on failure.
+ **/
+STATIC s32 fm10k_mbx_process_connect(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const enum fm10k_mbx_state state = mbx->state;
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 size, head;
+
+ /* we will need to pull all of the fields for verification */
+ size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE);
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+
+ switch (state) {
+ case FM10K_STATE_DISCONNECT:
+ case FM10K_STATE_OPEN:
+ /* reset any in-progress work */
+ fm10k_mbx_connect_reset(mbx);
+ break;
+ case FM10K_STATE_CONNECT:
+ /* we cannot exit connect until the size is good */
+ if (size > mbx->rx.size) {
+ mbx->max_size = mbx->rx.size - 1;
+ } else {
+ /* record the remote system requesting connection */
+ mbx->state = FM10K_STATE_OPEN;
+
+ fm10k_mbx_update_max_size(mbx, size);
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* align our tail index to remote head index */
+ mbx->tail = head;
+
+ return fm10k_mbx_create_reply(hw, mbx, head);
+}
+
+/**
+ * fm10k_mbx_process_data - Process data header
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will read an incoming data header and reply with the
+ * appropriate message. It will return a value indicating the number of
+ * data DWORDs on success, or will return a negative value on failure.
+ **/
+STATIC s32 fm10k_mbx_process_data(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 head, tail;
+ s32 err;
+
+ DEBUGFUNC("fm10k_mbx_process_data");
+
+ /* we will need to pull all of the fields for verification */
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+ tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL);
+
+ /* if we are in connect just update our data and go */
+ if (mbx->state == FM10K_STATE_CONNECT) {
+ mbx->tail = head;
+ mbx->state = FM10K_STATE_OPEN;
+ }
+
+ /* abort on message size errors */
+ err = fm10k_mbx_push_tail(hw, mbx, tail);
+ if (err < 0)
+ return err;
+
+ /* verify the checksum on the incoming data */
+ err = fm10k_mbx_verify_remote_crc(mbx);
+ if (err)
+ return err;
+
+ /* process messages if we have received any */
+ fm10k_mbx_dequeue_rx(hw, mbx);
+
+ return fm10k_mbx_create_reply(hw, mbx, head);
+}
+
+/**
+ * fm10k_mbx_process_disconnect - Process disconnect header
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will read an incoming disconnect header and reply with the
+ * appropriate message. It will return a value indicating the number of
+ * data DWORDs on success, or will return a negative value on failure.
+ **/
+STATIC s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const enum fm10k_mbx_state state = mbx->state;
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 head;
+ s32 err;
+
+ /* we will need to pull the header field for verification */
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+
+ /* We should not be receiving disconnect if Rx is incomplete */
+ if (mbx->pushed)
+ return FM10K_MBX_ERR_TAIL;
+
+ /* we have already verified mbx->head == tail so we know this is 0 */
+ mbx->head_len = 0;
+
+ /* verify the checksum on the incoming header is correct */
+ err = fm10k_mbx_verify_remote_crc(mbx);
+ if (err)
+ return err;
+
+ switch (state) {
+ case FM10K_STATE_DISCONNECT:
+ case FM10K_STATE_OPEN:
+ /* state doesn't change if we still have work to do */
+ if (!fm10k_mbx_tx_complete(mbx))
+ break;
+
+ /* verify the head indicates we completed all transmits */
+ if (head != mbx->tail)
+ return FM10K_MBX_ERR_HEAD;
+
+ /* reset any in-progress work */
+ fm10k_mbx_connect_reset(mbx);
+ break;
+ default:
+ break;
+ }
+
+ return fm10k_mbx_create_reply(hw, mbx, head);
+}
+
+/**
+ * fm10k_mbx_process_error - Process error header
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will read an incoming error header and reply with the
+ * appropriate message. It will return a value indicating the number of
+ * data DWORDs on success, or will return a negative value on failure.
+ **/
+STATIC s32 fm10k_mbx_process_error(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 head;
+
+ /* we will need to pull all of the fields for verification */
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+
+ switch (mbx->state) {
+ case FM10K_STATE_OPEN:
+ case FM10K_STATE_DISCONNECT:
+ /* flush any uncompleted work */
+ fm10k_mbx_reset_work(mbx);
+
+ /* reset CRC seeds */
+ mbx->local = FM10K_MBX_CRC_SEED;
+ mbx->remote = FM10K_MBX_CRC_SEED;
+
+ /* reset tail index and size to prepare for reconnect */
+ mbx->tail = head;
+
+ /* if open then reset max_size and go back to connect */
+ if (mbx->state == FM10K_STATE_OPEN) {
+ mbx->state = FM10K_STATE_CONNECT;
+ break;
+ }
+
+ /* send a connect message to get data flowing again */
+ fm10k_mbx_create_connect_hdr(mbx);
+ return FM10K_SUCCESS;
+ default:
+ break;
+ }
+
+ return fm10k_mbx_create_reply(hw, mbx, mbx->tail);
+}
+
+/**
+ * fm10k_mbx_process - Process mailbox interrupt
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will process incoming mailbox events and generate mailbox
+ * replies. It will return a value indicating the number of DWORDs
+ * transmitted excluding header on success or a negative value on error.
+ **/
+STATIC s32 fm10k_mbx_process(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ s32 err;
+
+ DEBUGFUNC("fm10k_mbx_process");
+
+ /* we do not read mailbox if closed */
+ if (mbx->state == FM10K_STATE_CLOSED)
+ return FM10K_SUCCESS;
+
+ /* copy data from mailbox */
+ err = fm10k_mbx_read(hw, mbx);
+ if (err)
+ return err;
+
+ /* validate type, source, and destination */
+ err = fm10k_mbx_validate_msg_hdr(mbx);
+ if (err < 0)
+ goto msg_err;
+
+ switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, TYPE)) {
+ case FM10K_MSG_CONNECT:
+ err = fm10k_mbx_process_connect(hw, mbx);
+ break;
+ case FM10K_MSG_DATA:
+ err = fm10k_mbx_process_data(hw, mbx);
+ break;
+ case FM10K_MSG_DISCONNECT:
+ err = fm10k_mbx_process_disconnect(hw, mbx);
+ break;
+ case FM10K_MSG_ERROR:
+ err = fm10k_mbx_process_error(hw, mbx);
+ break;
+ default:
+ err = FM10K_MBX_ERR_TYPE;
+ break;
+ }
+
+msg_err:
+ /* notify partner of errors on our end */
+ if (err < 0)
+ fm10k_mbx_create_error_msg(mbx, err);
+
+ /* copy data from mailbox */
+ fm10k_mbx_write(hw, mbx);
+
+ return err;
+}
+
+/**
+ * fm10k_mbx_disconnect - Shutdown mailbox connection
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will shut down the mailbox. It places the mailbox first
+ * in the disconnect state, it then allows up to a predefined timeout for
+ * the mailbox to transition to close on its own. If this does not occur
+ * then the mailbox will be forced into the closed state.
+ *
+ * Any mailbox transactions not completed before calling this function
+ * are not guaranteed to complete and may be dropped.
+ **/
+STATIC void fm10k_mbx_disconnect(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0;
+
+ DEBUGFUNC("fm10k_mbx_disconnect");
+
+ /* Place mbx in ready to disconnect state */
+ mbx->state = FM10K_STATE_DISCONNECT;
+
+ /* trigger interrupt to start shutdown process */
+ FM10K_WRITE_MBX(hw, mbx->mbx_reg, FM10K_MBX_REQ |
+ FM10K_MBX_INTERRUPT_DISABLE);
+ do {
+ usec_delay(FM10K_MBX_POLL_DELAY);
+ mbx->ops.process(hw, mbx);
+ timeout -= FM10K_MBX_POLL_DELAY;
+ } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED));
+
+ /* in case we didn't close, just force the mailbox into shutdown and
+ * drop all left over messages in the FIFO.
+ */
+ fm10k_mbx_connect_reset(mbx);
+ fm10k_fifo_drop_all(&mbx->tx);
+
+ FM10K_WRITE_MBX(hw, mbx->mbmem_reg, 0);
+}
+
+/**
+ * fm10k_mbx_connect - Start mailbox connection
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will initiate a mailbox connection. It will populate the
+ * mailbox with a broadcast connect message and then initialize the lock.
+ * This is safe since the connect message is a single DWORD so the mailbox
+ * transaction is guaranteed to be atomic.
+ *
+ * This function will return an error if the mailbox has not been initiated
+ * or is currently in use.
+ **/
+STATIC s32 fm10k_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
+{
+ DEBUGFUNC("fm10k_mbx_connect");
+
+ /* we cannot connect an uninitialized mailbox */
+ if (!mbx->rx.buffer)
+ return FM10K_MBX_ERR_NO_SPACE;
+
+ /* we cannot connect an already connected mailbox */
+ if (mbx->state != FM10K_STATE_CLOSED)
+ return FM10K_MBX_ERR_BUSY;
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = FM10K_MBX_INIT_TIMEOUT;
+
+ /* Place mbx in ready to connect state */
+ mbx->state = FM10K_STATE_CONNECT;
+
+ fm10k_mbx_reset_work(mbx);
+
+ /* initialize header of remote mailbox */
+ fm10k_mbx_create_fake_disconnect_hdr(mbx);
+ FM10K_WRITE_MBX(hw, mbx->mbmem_reg ^ mbx->mbmem_len, mbx->mbx_hdr);
+
+ /* enable interrupt and notify other party of new message */
+ mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT |
+ FM10K_MBX_INTERRUPT_ENABLE;
+
+ /* generate and load connect header into mailbox */
+ fm10k_mbx_create_connect_hdr(mbx);
+ fm10k_mbx_write(hw, mbx);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_validate_handlers - Validate layout of message parsing data
+ * @msg_data: handlers for mailbox events
+ *
+ * This function validates the layout of the message parsing data. This
+ * should be mostly static, but it is important to catch any errors that
+ * are made when constructing the parsers.
+ **/
+STATIC s32 fm10k_mbx_validate_handlers(const struct fm10k_msg_data *msg_data)
+{
+ const struct fm10k_tlv_attr *attr;
+ unsigned int id;
+
+ DEBUGFUNC("fm10k_mbx_validate_handlers");
+
+ /* Allow NULL mailboxes that transmit but don't receive */
+ if (!msg_data)
+ return FM10K_SUCCESS;
+
+ while (msg_data->id != FM10K_TLV_ERROR) {
+ /* all messages should have a function handler */
+ if (!msg_data->func)
+ return FM10K_ERR_PARAM;
+
+ /* parser is optional */
+ attr = msg_data->attr;
+ if (attr) {
+ while (attr->id != FM10K_TLV_ERROR) {
+ id = attr->id;
+ attr++;
+ /* ID should always be increasing */
+ if (id >= attr->id)
+ return FM10K_ERR_PARAM;
+ /* ID should fit in results array */
+ if (id >= FM10K_TLV_RESULTS_MAX)
+ return FM10K_ERR_PARAM;
+ }
+
+ /* verify terminator is in the list */
+ if (attr->id != FM10K_TLV_ERROR)
+ return FM10K_ERR_PARAM;
+ }
+
+ id = msg_data->id;
+ msg_data++;
+ /* ID should always be increasing */
+ if (id >= msg_data->id)
+ return FM10K_ERR_PARAM;
+ }
+
+ /* verify terminator is in the list */
+ if ((msg_data->id != FM10K_TLV_ERROR) || !msg_data->func)
+ return FM10K_ERR_PARAM;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_register_handlers - Register a set of handler ops for mailbox
+ * @mbx: pointer to mailbox
+ * @msg_data: handlers for mailbox events
+ *
+ * This function associates a set of message handling ops with a mailbox.
+ **/
+STATIC s32 fm10k_mbx_register_handlers(struct fm10k_mbx_info *mbx,
+ const struct fm10k_msg_data *msg_data)
+{
+ DEBUGFUNC("fm10k_mbx_register_handlers");
+
+ /* validate layout of handlers before assigning them */
+ if (fm10k_mbx_validate_handlers(msg_data))
+ return FM10K_ERR_PARAM;
+
+ /* initialize the message handlers */
+ mbx->msg_data = msg_data;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_pfvf_mbx_init - Initialize mailbox memory for PF/VF mailbox
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @msg_data: handlers for mailbox events
+ * @id: ID reference for PF as it supports up to 64 PF/VF mailboxes
+ *
+ * This function initializes the mailbox for use. It will split the
+ * buffer provided and use that to populate both the Tx and Rx FIFO by
+ * evenly splitting it. In order to allow for easy masking of head/tail
+ * the value reported in size must be a power of 2 and is reported in
+ * DWORDs, not bytes. Any invalid values will cause the mailbox to return
+ * error.
+ **/
+s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
+ const struct fm10k_msg_data *msg_data, u8 id)
+{
+ DEBUGFUNC("fm10k_pfvf_mbx_init");
+
+ /* initialize registers */
+ switch (hw->mac.type) {
+ case fm10k_mac_vf:
+ mbx->mbx_reg = FM10K_VFMBX;
+ mbx->mbmem_reg = FM10K_VFMBMEM(FM10K_VFMBMEM_VF_XOR);
+ break;
+ case fm10k_mac_pf:
+ /* there are only 64 VF <-> PF mailboxes */
+ if (id < 64) {
+ mbx->mbx_reg = FM10K_MBX(id);
+ mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0);
+ break;
+ }
+ /* fallthough */
+ default:
+ return FM10K_MBX_ERR_NO_MBX;
+ }
+
+ /* start out in closed state */
+ mbx->state = FM10K_STATE_CLOSED;
+
+ /* validate layout of handlers before assigning them */
+ if (fm10k_mbx_validate_handlers(msg_data))
+ return FM10K_ERR_PARAM;
+
+ /* initialize the message handlers */
+ mbx->msg_data = msg_data;
+
+ /* start mailbox as timed out and let the reset_hw call
+ * set the timeout value to begin communications
+ */
+ mbx->timeout = 0;
+ mbx->usec_delay = FM10K_MBX_INIT_DELAY;
+
+ /* initialize tail and head */
+ mbx->tail = 1;
+ mbx->head = 1;
+
+ /* initialize CRC seeds */
+ mbx->local = FM10K_MBX_CRC_SEED;
+ mbx->remote = FM10K_MBX_CRC_SEED;
+
+ /* Split buffer for use by Tx/Rx FIFOs */
+ mbx->max_size = FM10K_MBX_MSG_MAX_SIZE;
+ mbx->mbmem_len = FM10K_VFMBMEM_VF_XOR;
+
+ /* initialize the FIFOs, sizes are in 4 byte increments */
+ fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE);
+ fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE],
+ FM10K_MBX_RX_BUFFER_SIZE);
+
+ /* initialize function pointers */
+ mbx->ops.connect = fm10k_mbx_connect;
+ mbx->ops.disconnect = fm10k_mbx_disconnect;
+ mbx->ops.rx_ready = fm10k_mbx_rx_ready;
+ mbx->ops.tx_ready = fm10k_mbx_tx_ready;
+ mbx->ops.tx_complete = fm10k_mbx_tx_complete;
+ mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx;
+ mbx->ops.process = fm10k_mbx_process;
+ mbx->ops.register_handlers = fm10k_mbx_register_handlers;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_sm_mbx_create_data_hdr - Generate a mailbox header for local FIFO
+ * @mbx: pointer to mailbox
+ *
+ * This function returns a data mailbox header
+ **/
+STATIC void fm10k_sm_mbx_create_data_hdr(struct fm10k_mbx_info *mbx)
+{
+ if (mbx->tail_len)
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD);
+}
+
+/**
+ * fm10k_sm_mbx_create_connect_hdr - Generate a mailbox header for local FIFO
+ * @mbx: pointer to mailbox
+ * @err: error flags to report if any
+ *
+ * This function returns a connection mailbox header
+ **/
+STATIC void fm10k_sm_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx, u8 err)
+{
+ if (mbx->local)
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD) |
+ FM10K_MSG_HDR_FIELD_SET(err, SM_ERR);
+}
+
+/**
+ * fm10k_sm_mbx_connect_reset - Reset following request for reset
+ * @mbx: pointer to mailbox
+ *
+ * This function resets the mailbox to a just connected state
+ **/
+STATIC void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx)
+{
+ /* flush any uncompleted work */
+ fm10k_mbx_reset_work(mbx);
+
+ /* set local version to max and remote version to 0 */
+ mbx->local = FM10K_SM_MBX_VERSION;
+ mbx->remote = 0;
+
+ /* initialize tail and head */
+ mbx->tail = 1;
+ mbx->head = 1;
+
+ /* reset state back to connect */
+ mbx->state = FM10K_STATE_CONNECT;
+}
+
+/**
+ * fm10k_sm_mbx_connect - Start switch manager mailbox connection
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will initiate a mailbox connection with the switch
+ * manager. To do this it will first disconnect the mailbox, and then
+ * reconnect it in order to complete a reset of the mailbox.
+ *
+ * This function will return an error if the mailbox has not been initiated
+ * or is currently in use.
+ **/
+STATIC s32 fm10k_sm_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
+{
+ DEBUGFUNC("fm10k_sm_mbx_connect");
+
+ /* we cannot connect an uninitialized mailbox */
+ if (!mbx->rx.buffer)
+ return FM10K_MBX_ERR_NO_SPACE;
+
+ /* we cannot connect an already connected mailbox */
+ if (mbx->state != FM10K_STATE_CLOSED)
+ return FM10K_MBX_ERR_BUSY;
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = FM10K_MBX_INIT_TIMEOUT;
+
+ /* Place mbx in ready to connect state */
+ mbx->state = FM10K_STATE_CONNECT;
+ mbx->max_size = FM10K_MBX_MSG_MAX_SIZE;
+
+ /* reset interface back to connect */
+ fm10k_sm_mbx_connect_reset(mbx);
+
+ /* enable interrupt and notify other party of new message */
+ mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT |
+ FM10K_MBX_INTERRUPT_ENABLE;
+
+ /* generate and load connect header into mailbox */
+ fm10k_sm_mbx_create_connect_hdr(mbx, 0);
+ fm10k_mbx_write(hw, mbx);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_sm_mbx_disconnect - Shutdown mailbox connection
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will shut down the mailbox. It places the mailbox first
+ * in the disconnect state, it then allows up to a predefined timeout for
+ * the mailbox to transition to close on its own. If this does not occur
+ * then the mailbox will be forced into the closed state.
+ *
+ * Any mailbox transactions not completed before calling this function
+ * are not guaranteed to complete and may be dropped.
+ **/
+STATIC void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0;
+
+ DEBUGFUNC("fm10k_sm_mbx_disconnect");
+
+ /* Place mbx in ready to disconnect state */
+ mbx->state = FM10K_STATE_DISCONNECT;
+
+ /* trigger interrupt to start shutdown process */
+ FM10K_WRITE_REG(hw, mbx->mbx_reg, FM10K_MBX_REQ |
+ FM10K_MBX_INTERRUPT_DISABLE);
+ do {
+ usec_delay(FM10K_MBX_POLL_DELAY);
+ mbx->ops.process(hw, mbx);
+ timeout -= FM10K_MBX_POLL_DELAY;
+ } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED));
+
+ /* in case we didn't close just force the mailbox into shutdown */
+ mbx->state = FM10K_STATE_CLOSED;
+ mbx->remote = 0;
+ fm10k_mbx_reset_work(mbx);
+ fm10k_fifo_drop_all(&mbx->tx);
+
+ FM10K_WRITE_REG(hw, mbx->mbmem_reg, 0);
+}
+
+/**
+ * fm10k_sm_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header
+ * @mbx: pointer to mailbox
+ *
+ * This function will parse up the fields in the mailbox header and return
+ * an error if the header contains any of a number of invalid configurations
+ * including unrecognized offsets or version numbers.
+ **/
+STATIC s32 fm10k_sm_mbx_validate_fifo_hdr(struct fm10k_mbx_info *mbx)
+{
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 tail, head, ver;
+
+ DEBUGFUNC("fm10k_sm_mbx_validate_fifo_hdr");
+
+ tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL);
+ ver = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_VER);
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD);
+
+ switch (ver) {
+ case 0:
+ break;
+ case FM10K_SM_MBX_VERSION:
+ if (!head || head > FM10K_SM_MBX_FIFO_LEN)
+ return FM10K_MBX_ERR_HEAD;
+ if (!tail || tail > FM10K_SM_MBX_FIFO_LEN)
+ return FM10K_MBX_ERR_TAIL;
+ if (mbx->tail < head)
+ head += mbx->mbmem_len - 1;
+ if (tail < mbx->head)
+ tail += mbx->mbmem_len - 1;
+ if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len)
+ return FM10K_MBX_ERR_HEAD;
+ if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len)
+ break;
+ return FM10K_MBX_ERR_TAIL;
+ default:
+ return FM10K_MBX_ERR_SRC;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_sm_mbx_process_error - Process header with error flag set
+ * @mbx: pointer to mailbox
+ *
+ * This function is meant to respond to a request where the error flag
+ * is set. As a result we will terminate a connection if one is present
+ * and fall back into the reset state with a connection header of version
+ * 0 (RESET).
+ **/
+STATIC void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx)
+{
+ const enum fm10k_mbx_state state = mbx->state;
+
+ switch (state) {
+ case FM10K_STATE_DISCONNECT:
+ /* if there is an error just disconnect */
+ mbx->remote = 0;
+ break;
+ case FM10K_STATE_OPEN:
+ /* flush any uncompleted work */
+ fm10k_sm_mbx_connect_reset(mbx);
+ break;
+ case FM10K_STATE_CONNECT:
+ /* try connnecting at lower version */
+ if (mbx->remote) {
+ while (mbx->local > 1)
+ mbx->local--;
+ mbx->remote = 0;
+ }
+ break;
+ default:
+ break;
+ }
+
+ fm10k_sm_mbx_create_connect_hdr(mbx, 0);
+}
+
+/**
+ * fm10k_sm_mbx_create_error_msg - Process an error in FIFO header
+ * @mbx: pointer to mailbox
+ * @err: local error encountered
+ *
+ * This function will interpret the error provided by err, and based on
+ * that it may set the error bit in the local message header
+ **/
+STATIC void fm10k_sm_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err)
+{
+ /* only generate an error message for these types */
+ switch (err) {
+ case FM10K_MBX_ERR_TAIL:
+ case FM10K_MBX_ERR_HEAD:
+ case FM10K_MBX_ERR_SRC:
+ case FM10K_MBX_ERR_SIZE:
+ case FM10K_MBX_ERR_RSVD0:
+ break;
+ default:
+ return;
+ }
+
+ /* process it as though we received an error, and send error reply */
+ fm10k_sm_mbx_process_error(mbx);
+ fm10k_sm_mbx_create_connect_hdr(mbx, 1);
+}
+
+/**
+ * fm10k_sm_mbx_receive - Take message from Rx mailbox FIFO and put it in Rx
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @tail: tail index of message
+ *
+ * This function will dequeue one message from the Rx switch manager mailbox
+ * FIFO and place it in the Rx mailbox FIFO for processing by software.
+ **/
+STATIC s32 fm10k_sm_mbx_receive(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx,
+ u16 tail)
+{
+ /* reduce length by 1 to convert to a mask */
+ u16 mbmem_len = mbx->mbmem_len - 1;
+ s32 err;
+
+ DEBUGFUNC("fm10k_sm_mbx_receive");
+
+ /* push tail in front of head */
+ if (tail < mbx->head)
+ tail += mbmem_len;
+
+ /* copy data to the Rx FIFO */
+ err = fm10k_mbx_push_tail(hw, mbx, tail);
+ if (err < 0)
+ return err;
+
+ /* process messages if we have received any */
+ fm10k_mbx_dequeue_rx(hw, mbx);
+
+ /* guarantee head aligns with the end of the last message */
+ mbx->head = fm10k_mbx_head_sub(mbx, mbx->pushed);
+ mbx->pushed = 0;
+
+ /* clear any extra bits left over since index adds 1 extra bit */
+ if (mbx->head > mbmem_len)
+ mbx->head -= mbmem_len;
+
+ return err;
+}
+
+/**
+ * fm10k_sm_mbx_transmit - Take message from Tx and put it in Tx mailbox FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @head: head index of message
+ *
+ * This function will dequeue one message from the Tx mailbox FIFO and place
+ * it in the Tx switch manager mailbox FIFO for processing by hardware.
+ **/
+STATIC void fm10k_sm_mbx_transmit(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, u16 head)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->tx;
+ /* reduce length by 1 to convert to a mask */
+ u16 mbmem_len = mbx->mbmem_len - 1;
+ u16 tail_len, len = 0;
+ u32 *msg;
+
+ DEBUGFUNC("fm10k_sm_mbx_transmit");
+
+ /* push head behind tail */
+ if (mbx->tail < head)
+ head += mbmem_len;
+
+ fm10k_mbx_pull_head(hw, mbx, head);
+
+ /* determine msg aligned offset for end of buffer */
+ do {
+ msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len);
+ tail_len = len;
+ len += FM10K_TLV_DWORD_LEN(*msg);
+ } while ((len <= mbx->tail_len) && (len < mbmem_len));
+
+ /* guarantee we stop on a message boundary */
+ if (mbx->tail_len > tail_len) {
+ mbx->tail = fm10k_mbx_tail_sub(mbx, mbx->tail_len - tail_len);
+ mbx->tail_len = tail_len;
+ }
+
+ /* clear any extra bits left over since index adds 1 extra bit */
+ if (mbx->tail > mbmem_len)
+ mbx->tail -= mbmem_len;
+}
+
+/**
+ * fm10k_sm_mbx_create_reply - Generate reply based on state and remote head
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @head: acknowledgement number
+ *
+ * This function will generate an outgoing message based on the current
+ * mailbox state and the remote FIFO head. It will return the length
+ * of the outgoing message excluding header on success, and a negative value
+ * on error.
+ **/
+STATIC void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, u16 head)
+{
+ switch (mbx->state) {
+ case FM10K_STATE_OPEN:
+ case FM10K_STATE_DISCONNECT:
+ /* flush out Tx data */
+ fm10k_sm_mbx_transmit(hw, mbx, head);
+
+ /* generate new header based on data */
+ if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN)) {
+ fm10k_sm_mbx_create_data_hdr(mbx);
+ } else {
+ mbx->remote = 0;
+ fm10k_sm_mbx_create_connect_hdr(mbx, 0);
+ }
+ break;
+ case FM10K_STATE_CONNECT:
+ case FM10K_STATE_CLOSED:
+ fm10k_sm_mbx_create_connect_hdr(mbx, 0);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * fm10k_sm_mbx_process_reset - Process header with version == 0 (RESET)
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function is meant to respond to a request where the version data
+ * is set to 0. As such we will either terminate the connection or go
+ * into the connect state in order to re-establish the connection. This
+ * function can also be used to respond to an error as the connection
+ * resetting would also be a means of dealing with errors.
+ **/
+STATIC s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ s32 err = FM10K_SUCCESS;
+ const enum fm10k_mbx_state state = mbx->state;
+
+ switch (state) {
+ case FM10K_STATE_DISCONNECT:
+ /* drop remote connections and disconnect */
+ mbx->state = FM10K_STATE_CLOSED;
+ mbx->remote = 0;
+ mbx->local = 0;
+ break;
+ case FM10K_STATE_OPEN:
+ /* flush any incomplete work */
+ fm10k_sm_mbx_connect_reset(mbx);
+ err = FM10K_ERR_RESET_REQUESTED;
+ break;
+ case FM10K_STATE_CONNECT:
+ /* Update remote value to match local value */
+ mbx->remote = mbx->local;
+ default:
+ break;
+ }
+
+ fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail);
+
+ return err;
+}
+
+/**
+ * fm10k_sm_mbx_process_version_1 - Process header with version == 1
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function is meant to process messages received when the remote
+ * mailbox is active.
+ **/
+STATIC s32 fm10k_sm_mbx_process_version_1(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 head, tail;
+ s32 len;
+
+ /* pull all fields needed for verification */
+ tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL);
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD);
+
+ /* if we are in connect and wanting version 1 then start up and go */
+ if (mbx->state == FM10K_STATE_CONNECT) {
+ if (!mbx->remote)
+ goto send_reply;
+ if (mbx->remote != 1)
+ return FM10K_MBX_ERR_SRC;
+
+ mbx->state = FM10K_STATE_OPEN;
+ }
+
+ do {
+ /* abort on message size errors */
+ len = fm10k_sm_mbx_receive(hw, mbx, tail);
+ if (len < 0)
+ return len;
+
+ /* continue until we have flushed the Rx FIFO */
+ } while (len);
+
+send_reply:
+ fm10k_sm_mbx_create_reply(hw, mbx, head);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_sm_mbx_process - Process switch manager mailbox interrupt
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will process incoming mailbox events and generate mailbox
+ * replies. It will return a value indicating the number of DWORDs
+ * transmitted excluding header on success or a negative value on error.
+ **/
+STATIC s32 fm10k_sm_mbx_process(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ s32 err;
+
+ DEBUGFUNC("fm10k_sm_mbx_process");
+
+ /* we do not read mailbox if closed */
+ if (mbx->state == FM10K_STATE_CLOSED)
+ return FM10K_SUCCESS;
+
+ /* retrieve data from switch manager */
+ err = fm10k_mbx_read(hw, mbx);
+ if (err)
+ return err;
+
+ err = fm10k_sm_mbx_validate_fifo_hdr(mbx);
+ if (err < 0)
+ goto fifo_err;
+
+ if (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_ERR)) {
+ fm10k_sm_mbx_process_error(mbx);
+ goto fifo_err;
+ }
+
+ switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) {
+ case 0:
+ err = fm10k_sm_mbx_process_reset(hw, mbx);
+ break;
+ case FM10K_SM_MBX_VERSION:
+ err = fm10k_sm_mbx_process_version_1(hw, mbx);
+ break;
+ }
+
+fifo_err:
+ if (err < 0)
+ fm10k_sm_mbx_create_error_msg(mbx, err);
+
+ /* report data to switch manager */
+ fm10k_mbx_write(hw, mbx);
+
+ return err;
+}
+
+/**
+ * fm10k_sm_mbx_init - Initialize mailbox memory for PF/SM mailbox
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @msg_data: handlers for mailbox events
+ *
+ * This function initializes the PF/SM mailbox for use. It will split the
+ * buffer provided and use that to populate both the Tx and Rx FIFO by
+ * evenly splitting it. In order to allow for easy masking of head/tail
+ * the value reported in size must be a power of 2 and is reported in
+ * DWORDs, not bytes. Any invalid values will cause the mailbox to return
+ * error.
+ **/
+s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
+ const struct fm10k_msg_data *msg_data)
+{
+ DEBUGFUNC("fm10k_sm_mbx_init");
+ UNREFERENCED_1PARAMETER(hw);
+
+ mbx->mbx_reg = FM10K_GMBX;
+ mbx->mbmem_reg = FM10K_MBMEM_PF(0);
+
+ /* start out in closed state */
+ mbx->state = FM10K_STATE_CLOSED;
+
+ /* validate layout of handlers before assigning them */
+ if (fm10k_mbx_validate_handlers(msg_data))
+ return FM10K_ERR_PARAM;
+
+ /* initialize the message handlers */
+ mbx->msg_data = msg_data;
+
+ /* start mailbox as timed out and let the reset_hw call
+ * set the timeout value to begin communications
+ */
+ mbx->timeout = 0;
+ mbx->usec_delay = FM10K_MBX_INIT_DELAY;
+
+ /* Split buffer for use by Tx/Rx FIFOs */
+ mbx->max_size = FM10K_MBX_MSG_MAX_SIZE;
+ mbx->mbmem_len = FM10K_MBMEM_PF_XOR;
+
+ /* initialize the FIFOs, sizes are in 4 byte increments */
+ fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE);
+ fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE],
+ FM10K_MBX_RX_BUFFER_SIZE);
+
+ /* initialize function pointers */
+ mbx->ops.connect = fm10k_sm_mbx_connect;
+ mbx->ops.disconnect = fm10k_sm_mbx_disconnect;
+ mbx->ops.rx_ready = fm10k_mbx_rx_ready;
+ mbx->ops.tx_ready = fm10k_mbx_tx_ready;
+ mbx->ops.tx_complete = fm10k_mbx_tx_complete;
+ mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx;
+ mbx->ops.process = fm10k_sm_mbx_process;
+ mbx->ops.register_handlers = fm10k_mbx_register_handlers;
+
+ return FM10K_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h
new file mode 100644
index 00000000..2fac012c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h
@@ -0,0 +1,326 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_MBX_H_
+#define _FM10K_MBX_H_
+
+/* forward declaration */
+struct fm10k_mbx_info;
+
+#include "fm10k_type.h"
+#include "fm10k_tlv.h"
+
+/* PF Mailbox Registers */
+#define FM10K_MBMEM(_n) ((_n) + 0x18000)
+#define FM10K_MBMEM_VF(_n, _m) (((_n) * 0x10) + (_m) + 0x18000)
+#define FM10K_MBMEM_SM(_n) ((_n) + 0x18400)
+#define FM10K_MBMEM_PF(_n) ((_n) + 0x18600)
+/* XOR provides means of switching from Tx to Rx FIFO */
+#define FM10K_MBMEM_PF_XOR (FM10K_MBMEM_SM(0) ^ FM10K_MBMEM_PF(0))
+#define FM10K_MBX(_n) ((_n) + 0x18800)
+#define FM10K_MBX_REQ 0x00000002
+#define FM10K_MBX_ACK 0x00000004
+#define FM10K_MBX_REQ_INTERRUPT 0x00000008
+#define FM10K_MBX_ACK_INTERRUPT 0x00000010
+#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020
+#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040
+#define FM10K_MBX_GLOBAL_REQ_INTERRUPT 0x00000200
+#define FM10K_MBX_GLOBAL_ACK_INTERRUPT 0x00000400
+#define FM10K_MBICR(_n) ((_n) + 0x18840)
+#define FM10K_GMBX 0x18842
+
+/* VF Mailbox Registers */
+#define FM10K_VFMBX 0x00010
+#define FM10K_VFMBMEM(_n) ((_n) + 0x00020)
+#define FM10K_VFMBMEM_LEN 16
+#define FM10K_VFMBMEM_VF_XOR (FM10K_VFMBMEM_LEN / 2)
+
+/* Delays/timeouts */
+#define FM10K_MBX_DISCONNECT_TIMEOUT 500
+#define FM10K_MBX_POLL_DELAY 19
+#define FM10K_MBX_INT_DELAY 20
+
+#define FM10K_WRITE_MBX(hw, reg, value) FM10K_WRITE_REG(hw, reg, value)
+
+/* PF/VF Mailbox state machine
+ *
+ * +----------+ connect() +----------+
+ * | CLOSED | --------------> | CONNECT |
+ * +----------+ +----------+
+ * ^ ^ |
+ * | rcv: rcv: | | rcv:
+ * | Connect Disconnect | | Connect
+ * | Disconnect Error | | Data
+ * | | |
+ * | | V
+ * +----------+ disconnect() +----------+
+ * |DISCONNECT| <-------------- | OPEN |
+ * +----------+ +----------+
+ *
+ * The diagram above describes the PF/VF mailbox state machine. There
+ * are four main states to this machine.
+ * Closed: This state represents a mailbox that is in a standby state
+ * with interrupts disabled. In this state the mailbox should not
+ * read the mailbox or write any data. The only means of exiting
+ * this state is for the system to make the connect() call for the
+ * mailbox, it will then transition to the connect state.
+ * Connect: In this state the mailbox is seeking a connection. It will
+ * post a connect message with no specified destination and will
+ * wait for a reply from the other side of the mailbox. This state
+ * is exited when either a connect with the local mailbox as the
+ * destination is received or when a data message is received with
+ * a valid sequence number.
+ * Open: In this state the mailbox is able to transfer data between the local
+ * entity and the remote. It will fall back to connect in the event of
+ * receiving either an error message, or a disconnect message. It will
+ * transition to disconnect on a call to disconnect();
+ * Disconnect: In this state the mailbox is attempting to gracefully terminate
+ * the connection. It will do so at the first point where it knows
+ * that the remote endpoint is either done sending, or when the
+ * remote endpoint has fallen back into connect.
+ */
+enum fm10k_mbx_state {
+ FM10K_STATE_CLOSED,
+ FM10K_STATE_CONNECT,
+ FM10K_STATE_OPEN,
+ FM10K_STATE_DISCONNECT,
+};
+
+/* PF/VF Mailbox header format
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Size/Err_no/CRC | Rsvd0 | Head | Tail | Type |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * The layout above describes the format for the header used in the PF/VF
+ * mailbox. The header is broken out into the following fields:
+ * Type: There are 4 supported message types
+ * 0x8: Data header - used to transport message data
+ * 0xC: Connect header - used to establish connection
+ * 0xD: Disconnect header - used to tear down a connection
+ * 0xE: Error header - used to address message exceptions
+ * Tail: Tail index for local FIFO
+ * Tail index actually consists of two parts. The MSB of
+ * the head is a loop tracker, it is 0 on an even numbered
+ * loop through the FIFO, and 1 on the odd numbered loops.
+ * To get the actual mailbox offset based on the tail it
+ * is necessary to add bit 3 to bit 0 and clear bit 3. This
+ * gives us a valid range of 0x1 - 0xE.
+ * Head: Head index for remote FIFO
+ * Head index follows the same format as the tail index.
+ * Rsvd0: Reserved 0 portion of the mailbox header
+ * CRC: Running CRC for all data since connect plus current message header
+ * Size: Maximum message size - Applies only to connect headers
+ * The maximum message size is provided during connect to avoid
+ * jamming the mailbox with messages that do not fit.
+ * Err_no: Error number - Applies only to error headers
+ * The error number provides an indication of the type of error
+ * experienced.
+ */
+
+/* macros for retrieving and setting header values */
+#define FM10K_MSG_HDR_MASK(name) \
+ ((0x1u << FM10K_MSG_##name##_SIZE) - 1)
+#define FM10K_MSG_HDR_FIELD_SET(value, name) \
+ (((u32)(value) & FM10K_MSG_HDR_MASK(name)) << FM10K_MSG_##name##_SHIFT)
+#define FM10K_MSG_HDR_FIELD_GET(value, name) \
+ ((u16)((value) >> FM10K_MSG_##name##_SHIFT) & FM10K_MSG_HDR_MASK(name))
+
+/* offsets shared between all headers */
+#define FM10K_MSG_TYPE_SHIFT 0
+#define FM10K_MSG_TYPE_SIZE 4
+#define FM10K_MSG_TAIL_SHIFT 4
+#define FM10K_MSG_TAIL_SIZE 4
+#define FM10K_MSG_HEAD_SHIFT 8
+#define FM10K_MSG_HEAD_SIZE 4
+#define FM10K_MSG_RSVD0_SHIFT 12
+#define FM10K_MSG_RSVD0_SIZE 4
+
+/* offsets for data/disconnect headers */
+#define FM10K_MSG_CRC_SHIFT 16
+#define FM10K_MSG_CRC_SIZE 16
+
+/* offsets for connect headers */
+#define FM10K_MSG_CONNECT_SIZE_SHIFT 16
+#define FM10K_MSG_CONNECT_SIZE_SIZE 16
+
+/* offsets for error headers */
+#define FM10K_MSG_ERR_NO_SHIFT 16
+#define FM10K_MSG_ERR_NO_SIZE 16
+
+enum fm10k_msg_type {
+ FM10K_MSG_DATA = 0x8,
+ FM10K_MSG_CONNECT = 0xC,
+ FM10K_MSG_DISCONNECT = 0xD,
+ FM10K_MSG_ERROR = 0xE,
+};
+
+/* HNI/SM Mailbox FIFO format
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-------+-----------------------+-------+-----------------------+
+ * | Error | Remote Head |Version| Local Tail |
+ * +-------+-----------------------+-------+-----------------------+
+ * | |
+ * . Local FIFO Data .
+ * . .
+ * +-------+-----------------------+-------+-----------------------+
+ *
+ * The layout above describes the format for the FIFOs used by the host
+ * network interface and the switch manager to communicate messages back
+ * and forth. Both the HNI and the switch maintain one such FIFO. The
+ * layout in memory has the switch manager FIFO followed immediately by
+ * the HNI FIFO. For this reason I am using just the pointer to the
+ * HNI FIFO in the mailbox ops as the offset between the two is fixed.
+ *
+ * The header for the FIFO is broken out into the following fields:
+ * Local Tail: Offset into FIFO region for next DWORD to write.
+ * Version: Version info for mailbox, only values of 0/1 are supported.
+ * Remote Head: Offset into remote FIFO to indicate how much we have read.
+ * Error: Error indication, values TBD.
+ */
+
+/* version number for switch manager mailboxes */
+#define FM10K_SM_MBX_VERSION 1
+#define FM10K_SM_MBX_FIFO_LEN (FM10K_MBMEM_PF_XOR - 1)
+
+/* offsets shared between all SM FIFO headers */
+#define FM10K_MSG_SM_TAIL_SHIFT 0
+#define FM10K_MSG_SM_TAIL_SIZE 12
+#define FM10K_MSG_SM_VER_SHIFT 12
+#define FM10K_MSG_SM_VER_SIZE 4
+#define FM10K_MSG_SM_HEAD_SHIFT 16
+#define FM10K_MSG_SM_HEAD_SIZE 12
+#define FM10K_MSG_SM_ERR_SHIFT 28
+#define FM10K_MSG_SM_ERR_SIZE 4
+
+/* All error messages returned by mailbox functions
+ * The value -511 is 0xFE01 in hex. The idea is to order the errors
+ * from 0xFE01 - 0xFEFF so error codes are easily visible in the mailbox
+ * messages. This also helps to avoid error number collisions as Linux
+ * doesn't appear to use error numbers 256 - 511.
+ */
+#define FM10K_MBX_ERR(_n) ((_n) - 512)
+#define FM10K_MBX_ERR_NO_MBX FM10K_MBX_ERR(0x01)
+#define FM10K_MBX_ERR_NO_SPACE FM10K_MBX_ERR(0x03)
+#define FM10K_MBX_ERR_TAIL FM10K_MBX_ERR(0x05)
+#define FM10K_MBX_ERR_HEAD FM10K_MBX_ERR(0x06)
+#define FM10K_MBX_ERR_SRC FM10K_MBX_ERR(0x08)
+#define FM10K_MBX_ERR_TYPE FM10K_MBX_ERR(0x09)
+#define FM10K_MBX_ERR_SIZE FM10K_MBX_ERR(0x0B)
+#define FM10K_MBX_ERR_BUSY FM10K_MBX_ERR(0x0C)
+#define FM10K_MBX_ERR_RSVD0 FM10K_MBX_ERR(0x0E)
+#define FM10K_MBX_ERR_CRC FM10K_MBX_ERR(0x0F)
+
+#define FM10K_MBX_CRC_SEED 0xFFFF
+
+struct fm10k_mbx_ops {
+ s32 (*connect)(struct fm10k_hw *, struct fm10k_mbx_info *);
+ void (*disconnect)(struct fm10k_hw *, struct fm10k_mbx_info *);
+ bool (*rx_ready)(struct fm10k_mbx_info *);
+ bool (*tx_ready)(struct fm10k_mbx_info *, u16);
+ bool (*tx_complete)(struct fm10k_mbx_info *);
+ s32 (*enqueue_tx)(struct fm10k_hw *, struct fm10k_mbx_info *,
+ const u32 *);
+ s32 (*process)(struct fm10k_hw *, struct fm10k_mbx_info *);
+ s32 (*register_handlers)(struct fm10k_mbx_info *,
+ const struct fm10k_msg_data *);
+};
+
+struct fm10k_mbx_fifo {
+ u32 *buffer;
+ u16 head;
+ u16 tail;
+ u16 size;
+};
+
+/* size of buffer to be stored in mailbox for FIFOs */
+#define FM10K_MBX_TX_BUFFER_SIZE 512
+#define FM10K_MBX_RX_BUFFER_SIZE 128
+#define FM10K_MBX_BUFFER_SIZE \
+ (FM10K_MBX_TX_BUFFER_SIZE + FM10K_MBX_RX_BUFFER_SIZE)
+
+/* minimum and maximum message size in dwords */
+#define FM10K_MBX_MSG_MAX_SIZE \
+ ((FM10K_MBX_TX_BUFFER_SIZE - 1) & (FM10K_MBX_RX_BUFFER_SIZE - 1))
+#define FM10K_VFMBX_MSG_MTU ((FM10K_VFMBMEM_LEN / 2) - 1)
+
+#define FM10K_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define FM10K_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+struct fm10k_mbx_info {
+ /* function pointers for mailbox operations */
+ struct fm10k_mbx_ops ops;
+ const struct fm10k_msg_data *msg_data;
+
+ /* message FIFOs */
+ struct fm10k_mbx_fifo rx;
+ struct fm10k_mbx_fifo tx;
+
+ /* delay for handling timeouts */
+ u32 timeout;
+ u32 usec_delay;
+
+ /* mailbox state info */
+ u32 mbx_reg, mbmem_reg, mbx_lock, mbx_hdr;
+ u16 max_size, mbmem_len;
+ u16 tail, tail_len, pulled;
+ u16 head, head_len, pushed;
+ u16 local, remote;
+ enum fm10k_mbx_state state;
+
+ /* result of last mailbox test */
+ s32 test_result;
+
+ /* statistics */
+ u64 tx_busy;
+ u64 tx_dropped;
+ u64 tx_messages;
+ u64 tx_dwords;
+ u64 tx_mbmem_pulled;
+ u64 rx_messages;
+ u64 rx_dwords;
+ u64 rx_mbmem_pushed;
+ u64 rx_parse_err;
+
+ /* Buffer to store messages */
+ u32 buffer[FM10K_MBX_BUFFER_SIZE];
+};
+
+s32 fm10k_pfvf_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *,
+ const struct fm10k_msg_data *, u8);
+s32 fm10k_sm_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *,
+ const struct fm10k_msg_data *);
+
+#endif /* _FM10K_MBX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h
new file mode 100644
index 00000000..199ebd8e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h
@@ -0,0 +1,174 @@
+/*******************************************************************************
+
+Copyright (c) 2013-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_OSDEP_H_
+#define _FM10K_OSDEP_H_
+
+#include <stdint.h>
+#include <string.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+
+#include "../fm10k_logs.h"
+
+/* TODO: this does not look like it should be used... */
+#define ERROR_REPORT2(v1, v2, v3) do { } while (0)
+
+#ifndef BOULDER_RAPIDS_HW
+#define BOULDER_RAPIDS_HW
+#endif
+
+#define STATIC static
+#define DEBUGFUNC(F) DEBUGOUT(F "\n");
+#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args)
+#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args)
+
+#define FALSE 0
+#define TRUE 1
+#ifndef false
+#define false FALSE
+#endif
+#ifndef true
+#define true TRUE
+#endif
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef int64_t s64;
+typedef uint64_t u64;
+typedef int bool;
+
+#ifndef __le16
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+#endif
+#ifndef __be16
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+#endif
+
+/* offsets are WORD offsets, not BYTE offsets */
+#define FM10K_WRITE_REG(hw, reg, val) \
+ rte_write32((val), ((hw)->hw_addr + (reg)))
+
+#define FM10K_READ_REG(hw, reg) rte_read32(((hw)->hw_addr + (reg)))
+
+#define FM10K_WRITE_FLUSH(a) FM10K_READ_REG(a, FM10K_CTRL)
+
+#define FM10K_PCI_REG(reg) rte_read32(reg)
+
+#define FM10K_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
+
+/* not implemented */
+#define FM10K_READ_PCI_WORD(hw, reg) 0
+
+#define FM10K_WRITE_MBX(hw, reg, value) FM10K_WRITE_REG(hw, reg, value)
+#define FM10K_READ_MBX(hw, reg) FM10K_READ_REG(hw, reg)
+
+#define FM10K_LE16_TO_CPU rte_le_to_cpu_16
+#define FM10K_LE32_TO_CPU rte_le_to_cpu_32
+#define FM10K_CPU_TO_LE32 rte_cpu_to_le_32
+#define FM10K_CPU_TO_LE16 rte_cpu_to_le_16
+#define le16_to_cpu rte_le_to_cpu_16
+
+#define FM10K_RMB rte_rmb
+#define FM10K_WMB rte_wmb
+
+#define usec_delay rte_delay_us
+
+#define FM10K_REMOVED(hw_addr) (!(hw_addr))
+
+#ifndef FM10K_IS_ZERO_ETHER_ADDR
+/* make certain address is not 0 */
+#define FM10K_IS_ZERO_ETHER_ADDR(addr) \
+(!((addr)[0] | (addr)[1] | (addr)[2] | (addr)[3] | (addr)[4] | (addr)[5]))
+#endif
+
+#ifndef FM10K_IS_MULTICAST_ETHER_ADDR
+#define FM10K_IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1)
+#endif
+
+#ifndef FM10K_IS_VALID_ETHER_ADDR
+/* make certain address is not multicast or 0 */
+#define FM10K_IS_VALID_ETHER_ADDR(addr) \
+(!FM10K_IS_MULTICAST_ETHER_ADDR(addr) && !FM10K_IS_ZERO_ETHER_ADDR(addr))
+#endif
+
+#ifndef do_div
+#define do_div(n, base) ({\
+ (n) = (n) / (base);\
+})
+#endif /* do_div */
+
+/* DPDK can't access IOMEM directly */
+#ifndef FM10K_WRITE_SW_REG
+#define FM10K_WRITE_SW_REG(v1, v2, v3) do { } while (0)
+#endif
+
+#ifndef fm10k_read_reg
+#define fm10k_read_reg FM10K_READ_REG
+#endif
+
+#define FM10K_INTEL_VENDOR_ID 0x8086
+#define FM10K_DMA_CTRL_MINMSS_SHIFT 9
+#define FM10K_EICR_PCA_FAULT 0x00000001
+#define FM10K_EICR_THI_FAULT 0x00000004
+#define FM10K_EICR_FUM_FAULT 0x00000020
+#define FM10K_EICR_SRAMERROR 0x00000400
+#define FM10K_SRAM_IP 0x13003
+#define FM10K_RXINT_TIMER_SHIFT 8
+#define FM10K_TXINT_TIMER_SHIFT 8
+#define FM10K_RXD_PKTTYPE_MASK 0x03F0
+#define FM10K_RXD_PKTTYPE_SHIFT 4
+
+#define FM10K_RXD_STATUS_IPCS 0x0008 /* Indicates IPv4 csum */
+#define FM10K_RXD_STATUS_HBO 0x0400 /* header buffer overrun */
+
+#define FM10K_TSO_MINMSS \
+ (FM10K_DMA_CTRL_MINMSS_64 >> FM10K_DMA_CTRL_MINMSS_SHIFT)
+#define FM10K_TSO_MIN_HEADERLEN 54
+#define FM10K_TSO_MAX_HEADERLEN 192
+
+#endif /* _FM10K_OSDEP_H_ */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c
new file mode 100644
index 00000000..db5f4912
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c
@@ -0,0 +1,2128 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "fm10k_pf.h"
+#include "fm10k_vf.h"
+
+/**
+ * fm10k_reset_hw_pf - PF hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * This function should return the hardware to a state similar to the
+ * one it is in after being powered on.
+ **/
+STATIC s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
+{
+ s32 err;
+ u32 reg;
+ u16 i;
+
+ DEBUGFUNC("fm10k_reset_hw_pf");
+
+ /* Disable interrupts */
+ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL));
+
+ /* Lock ITR2 reg 0 into itself and disable interrupt moderation */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0);
+ FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0);
+
+ /* We assume here Tx and Rx queue 0 are owned by the PF */
+
+ /* Shut off VF access to their queues forcing them to queue 0 */
+ for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0);
+ }
+
+ /* shut down all rings */
+ err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
+ if (err == FM10K_ERR_REQUESTS_PENDING) {
+ hw->mac.reset_while_pending++;
+ goto force_reset;
+ } else if (err) {
+ return err;
+ }
+
+ /* Verify that DMA is no longer active */
+ reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL);
+ if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
+ return FM10K_ERR_DMA_PENDING;
+
+force_reset:
+ /* Inititate data path reset */
+ reg = FM10K_DMA_CTRL_DATAPATH_RESET;
+ FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, reg);
+
+ /* Flush write and allow 100us for reset to complete */
+ FM10K_WRITE_FLUSH(hw);
+ usec_delay(FM10K_RESET_TIMEOUT);
+
+ /* Verify we made it out of reset */
+ reg = FM10K_READ_REG(hw, FM10K_IP);
+ if (!(reg & FM10K_IP_NOTINRESET))
+ return FM10K_ERR_RESET_FAILED;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support
+ * @hw: pointer to hardware structure
+ *
+ * Looks at the ARI hierarchy bit to determine whether ARI is supported or not.
+ **/
+STATIC bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw)
+{
+ u16 sriov_ctrl = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_SRIOV_CTRL);
+
+ DEBUGFUNC("fm10k_is_ari_hierarchy_pf");
+
+ return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI);
+}
+
+/**
+ * fm10k_init_hw_pf - PF hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ **/
+STATIC s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
+{
+ u32 dma_ctrl, txqctl;
+ u16 i;
+
+ DEBUGFUNC("fm10k_init_hw_pf");
+
+ /* Establish default VSI as valid */
+ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0);
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(fm10k_dglort_default),
+ FM10K_DGLORTMAP_ANY);
+
+ /* Invalidate all other GLORT entries */
+ for (i = 1; i < FM10K_DGLORT_COUNT; i++)
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE);
+
+ /* reset ITR2(0) to point to itself */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0);
+
+ /* reset VF ITR2(0) to point to 0 avoid PF registers */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0);
+
+ /* loop through all PF ITR2 registers pointing them to the previous */
+ for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++)
+ FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1);
+
+ /* Enable interrupt moderator if not already enabled */
+ FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
+
+ /* compute the default txqctl configuration */
+ txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW |
+ (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT);
+
+ for (i = 0; i < FM10K_MAX_QUEUES; i++) {
+ /* configure rings for 256 Queue / 32 Descriptor cache mode */
+ FM10K_WRITE_REG(hw, FM10K_TQDLOC(i),
+ (i * FM10K_TQDLOC_BASE_32_DESC) |
+ FM10K_TQDLOC_SIZE_32_DESC);
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl);
+
+ /* configure rings to provide TPH processing hints */
+ FM10K_WRITE_REG(hw, FM10K_TPH_TXCTRL(i),
+ FM10K_TPH_TXCTRL_DESC_TPHEN |
+ FM10K_TPH_TXCTRL_DESC_RROEN |
+ FM10K_TPH_TXCTRL_DESC_WROEN |
+ FM10K_TPH_TXCTRL_DATA_RROEN);
+ FM10K_WRITE_REG(hw, FM10K_TPH_RXCTRL(i),
+ FM10K_TPH_RXCTRL_DESC_TPHEN |
+ FM10K_TPH_RXCTRL_DESC_RROEN |
+ FM10K_TPH_RXCTRL_DATA_WROEN |
+ FM10K_TPH_RXCTRL_HDR_WROEN);
+ }
+
+ /* set max hold interval to align with 1.024 usec in all modes and
+ * store ITR scale
+ */
+ switch (hw->bus.speed) {
+ case fm10k_bus_speed_2500:
+ dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1;
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1;
+ break;
+ case fm10k_bus_speed_5000:
+ dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2;
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2;
+ break;
+ case fm10k_bus_speed_8000:
+ dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3;
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
+ break;
+ default:
+ dma_ctrl = 0;
+ /* just in case, assume Gen3 ITR scale */
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
+ break;
+ }
+
+ /* Configure TSO flags */
+ FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW);
+ FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI);
+
+ /* Enable DMA engine
+ * Set Rx Descriptor size to 32
+ * Set Minimum MSS to 64
+ * Set Maximum number of Rx queues to 256 / 32 Descriptor
+ */
+ dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE |
+ FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 |
+ FM10K_DMA_CTRL_32_DESC;
+
+ FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, dma_ctrl);
+
+ /* record maximum queue count, we limit ourselves to 128 */
+ hw->mac.max_queues = FM10K_MAX_QUEUES_PF;
+
+ /* We support either 64 VFs or 7 VFs depending on if we have ARI */
+ hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7;
+
+ return FM10K_SUCCESS;
+}
+
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+/**
+ * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU
+ * @hw: pointer to hardware structure
+ *
+ * Looks at the PCIe bus info to confirm whether or not this slot can support
+ * the necessary bandwidth for this device.
+ **/
+STATIC bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw)
+{
+ DEBUGFUNC("fm10k_is_slot_appropriate_pf");
+
+ return (hw->bus.speed == hw->bus_caps.speed) &&
+ (hw->bus.width == hw->bus_caps.width);
+}
+
+#endif
+/**
+ * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vid: VLAN ID to add to table
+ * @vsi: Index indicating VF ID or PF ID in table
+ * @set: Indicates if this is a set or clear operation
+ *
+ * This function adds or removes the corresponding VLAN ID from the VLAN
+ * filter table for the corresponding function. In addition to the
+ * standard set/clear that supports one bit a multi-bit write is
+ * supported to set 64 bits at a time.
+ **/
+STATIC s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
+{
+ u32 vlan_table, reg, mask, bit, len;
+
+ /* verify the VSI index is valid */
+ if (vsi > FM10K_VLAN_TABLE_VSI_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* VLAN multi-bit write:
+ * The multi-bit write has several parts to it.
+ * 24 16 8 0
+ * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RSVD0 | Length |C|RSVD0| VLAN ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * VLAN ID: Vlan Starting value
+ * RSVD0: Reserved section, must be 0
+ * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message)
+ * Length: Number of times to repeat the bit being set
+ */
+ len = vid >> 16;
+ vid = (vid << 17) >> 17;
+
+ /* verify the reserved 0 fields are 0 */
+ if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* Loop through the table updating all required VLANs */
+ for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32;
+ len < FM10K_VLAN_TABLE_VID_MAX;
+ len -= 32 - bit, reg++, bit = 0) {
+ /* record the initial state of the register */
+ vlan_table = FM10K_READ_REG(hw, reg);
+
+ /* truncate mask if we are at the start or end of the run */
+ mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit;
+
+ /* make necessary modifications to the register */
+ mask &= set ? ~vlan_table : vlan_table;
+ if (mask)
+ FM10K_WRITE_REG(hw, reg, vlan_table ^ mask);
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_read_mac_addr_pf - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the SM_AREA and stores the value.
+ **/
+STATIC s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
+{
+ u8 perm_addr[ETH_ALEN];
+ u32 serial_num;
+
+ DEBUGFUNC("fm10k_read_mac_addr_pf");
+
+ serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(1));
+
+ /* last byte should be all 1's */
+ if ((~serial_num) << 24)
+ return FM10K_ERR_INVALID_MAC_ADDR;
+
+ perm_addr[0] = (u8)(serial_num >> 24);
+ perm_addr[1] = (u8)(serial_num >> 16);
+ perm_addr[2] = (u8)(serial_num >> 8);
+
+ serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(0));
+
+ /* first byte should be all 1's */
+ if ((~serial_num) >> 24)
+ return FM10K_ERR_INVALID_MAC_ADDR;
+
+ perm_addr[3] = (u8)(serial_num >> 16);
+ perm_addr[4] = (u8)(serial_num >> 8);
+ perm_addr[5] = (u8)(serial_num);
+
+ memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
+ memcpy(hw->mac.addr, perm_addr, ETH_ALEN);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_glort_valid_pf - Validate that the provided glort is valid
+ * @hw: pointer to the HW structure
+ * @glort: base glort to be validated
+ *
+ * This function will return an error if the provided glort is invalid
+ **/
+bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
+{
+ glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT;
+
+ return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE);
+}
+
+/**
+ * fm10k_update_xc_addr_pf - Update device addresses
+ * @hw: pointer to the HW structure
+ * @glort: base resource tag for this request
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ * @flags: flags field to indicate add and secure
+ *
+ * This function generates a message to the Switch API requesting
+ * that the given logical port add/remove the given L2 MAC/VLAN address.
+ **/
+STATIC s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add, u8 flags)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ struct fm10k_mac_update mac_update;
+ u32 msg[5];
+
+ DEBUGFUNC("fm10k_update_xc_addr_pf");
+
+ /* clear set bit from VLAN ID */
+ vid &= ~FM10K_VLAN_CLEAR;
+
+ /* if glort or VLAN are not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* record fields */
+ mac_update.mac_lower = FM10K_CPU_TO_LE32(((u32)mac[2] << 24) |
+ ((u32)mac[3] << 16) |
+ ((u32)mac[4] << 8) |
+ ((u32)mac[5]));
+ mac_update.mac_upper = FM10K_CPU_TO_LE16(((u16)mac[0] << 8) |
+ ((u16)mac[1]));
+ mac_update.vlan = FM10K_CPU_TO_LE16(vid);
+ mac_update.glort = FM10K_CPU_TO_LE16(glort);
+ mac_update.action = add ? 0 : 1;
+ mac_update.flags = flags;
+
+ /* populate mac_update fields */
+ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE);
+ fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE,
+ &mac_update, sizeof(mac_update));
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_uc_addr_pf - Update device unicast addresses
+ * @hw: pointer to the HW structure
+ * @glort: base resource tag for this request
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ * @flags: flags field to indicate add and secure
+ *
+ * This function is used to add or remove unicast addresses for
+ * the PF.
+ **/
+STATIC s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add, u8 flags)
+{
+ DEBUGFUNC("fm10k_update_uc_addr_pf");
+
+ /* verify MAC address is valid */
+ if (!IS_VALID_ETHER_ADDR(mac))
+ return FM10K_ERR_PARAM;
+
+ return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
+}
+
+/**
+ * fm10k_update_mc_addr_pf - Update device multicast addresses
+ * @hw: pointer to the HW structure
+ * @glort: base resource tag for this request
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ *
+ * This function is used to add or remove multicast MAC addresses for
+ * the PF.
+ **/
+STATIC s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add)
+{
+ DEBUGFUNC("fm10k_update_mc_addr_pf");
+
+ /* verify multicast address is valid */
+ if (!IS_MULTICAST_ETHER_ADDR(mac))
+ return FM10K_ERR_PARAM;
+
+ return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
+}
+
+/**
+ * fm10k_update_xcast_mode_pf - Request update of multicast mode
+ * @hw: pointer to hardware structure
+ * @glort: base resource tag for this request
+ * @mode: integer value indicating mode being requested
+ *
+ * This function will attempt to request a higher mode for the port
+ * so that it can enable either multicast, multicast promiscuous, or
+ * promiscuous mode of operation.
+ **/
+STATIC s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[3], xcast_mode;
+
+ DEBUGFUNC("fm10k_update_xcast_mode_pf");
+
+ if (mode > FM10K_XCAST_MODE_NONE)
+ return FM10K_ERR_PARAM;
+
+ /* if glort is not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort))
+ return FM10K_ERR_PARAM;
+
+ /* write xcast mode as a single u32 value,
+ * lower 16 bits: glort
+ * upper 16 bits: mode
+ */
+ xcast_mode = ((u32)mode << 16) | glort;
+
+ /* generate message requesting to change xcast mode */
+ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES);
+ fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_int_moderator_pf - Update interrupt moderator linked list
+ * @hw: pointer to hardware structure
+ *
+ * This function walks through the MSI-X vector table to determine the
+ * number of active interrupts and based on that information updates the
+ * interrupt moderator linked list.
+ **/
+STATIC void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
+{
+ u32 i;
+
+ /* Disable interrupt moderator */
+ FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0);
+
+ /* loop through PF from last to first looking enabled vectors */
+ for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) {
+ if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i)))
+ break;
+ }
+
+ /* always reset VFITR2[0] to point to last enabled PF vector */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
+
+ /* reset ITR2[0] to point to last enabled PF vector */
+ if (!hw->iov.num_vfs)
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), i);
+
+ /* Enable interrupt moderator */
+ FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
+}
+
+/**
+ * fm10k_update_lport_state_pf - Notify the switch of a change in port state
+ * @hw: pointer to the HW structure
+ * @glort: base resource tag for this request
+ * @count: number of logical ports being updated
+ * @enable: boolean value indicating enable or disable
+ *
+ * This function is used to add/remove a logical port from the switch.
+ **/
+STATIC s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
+ u16 count, bool enable)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[3], lport_msg;
+
+ DEBUGFUNC("fm10k_lport_state_pf");
+
+ /* do nothing if we are being asked to create or destroy 0 ports */
+ if (!count)
+ return FM10K_SUCCESS;
+
+ /* if glort is not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort))
+ return FM10K_ERR_PARAM;
+
+ /* reset multicast mode if deleting lport */
+ if (!enable)
+ fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
+
+ /* construct the lport message from the 2 pieces of data we have */
+ lport_msg = ((u32)count << 16) | glort;
+
+ /* generate lport create/delete message */
+ fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE :
+ FM10K_PF_MSG_ID_LPORT_DELETE);
+ fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues
+ * @hw: pointer to hardware structure
+ * @dglort: pointer to dglort configuration structure
+ *
+ * Reads the configuration structure contained in dglort_cfg and uses
+ * that information to then populate a DGLORTMAP/DEC entry and the queues
+ * to which it has been assigned.
+ **/
+STATIC s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
+ struct fm10k_dglort_cfg *dglort)
+{
+ u16 glort, queue_count, vsi_count, pc_count;
+ u16 vsi, queue, pc, q_idx;
+ u32 txqctl, dglortdec, dglortmap;
+
+ /* verify the dglort pointer */
+ if (!dglort)
+ return FM10K_ERR_PARAM;
+
+ /* verify the dglort values */
+ if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) ||
+ (dglort->vsi_l > 6) || (dglort->vsi_b > 64) ||
+ (dglort->queue_l > 8) || (dglort->queue_b >= 256))
+ return FM10K_ERR_PARAM;
+
+ /* determine count of VSIs and queues */
+ queue_count = BIT(dglort->rss_l + dglort->pc_l);
+ vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
+ glort = dglort->glort;
+ q_idx = dglort->queue_b;
+
+ /* configure SGLORT for queues */
+ for (vsi = 0; vsi < vsi_count; vsi++, glort++) {
+ for (queue = 0; queue < queue_count; queue++, q_idx++) {
+ if (q_idx >= FM10K_MAX_QUEUES)
+ break;
+
+ FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(q_idx), glort);
+ FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(q_idx), glort);
+ }
+ }
+
+ /* determine count of PCs and queues */
+ queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
+ pc_count = BIT(dglort->pc_l);
+
+ /* configure PC for Tx queues */
+ for (pc = 0; pc < pc_count; pc++) {
+ q_idx = pc + dglort->queue_b;
+ for (queue = 0; queue < queue_count; queue++) {
+ if (q_idx >= FM10K_MAX_QUEUES)
+ break;
+
+ txqctl = FM10K_READ_REG(hw, FM10K_TXQCTL(q_idx));
+ txqctl &= ~FM10K_TXQCTL_PC_MASK;
+ txqctl |= pc << FM10K_TXQCTL_PC_SHIFT;
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(q_idx), txqctl);
+
+ q_idx += pc_count;
+ }
+ }
+
+ /* configure DGLORTDEC */
+ dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) |
+ ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) |
+ ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) |
+ ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) |
+ ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) |
+ ((u32)(dglort->queue_l));
+ if (dglort->inner_rss)
+ dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE;
+
+ /* configure DGLORTMAP */
+ dglortmap = (dglort->idx == fm10k_dglort_default) ?
+ FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO;
+ dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l;
+ dglortmap |= dglort->glort;
+
+ /* write values to hardware */
+ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec);
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap);
+
+ return FM10K_SUCCESS;
+}
+
+u16 fm10k_queues_per_pool(struct fm10k_hw *hw)
+{
+ u16 num_pools = hw->iov.num_pools;
+
+ return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ?
+ 8 : FM10K_MAX_QUEUES_POOL;
+}
+
+u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx)
+{
+ u16 num_vfs = hw->iov.num_vfs;
+ u16 vf_q_idx = FM10K_MAX_QUEUES;
+
+ vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx);
+
+ return vf_q_idx;
+}
+
+STATIC u16 fm10k_vectors_per_pool(struct fm10k_hw *hw)
+{
+ u16 num_pools = hw->iov.num_pools;
+
+ return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 :
+ FM10K_MAX_VECTORS_POOL;
+}
+
+STATIC u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx)
+{
+ u16 vf_v_idx = FM10K_MAX_VECTORS_PF;
+
+ vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx;
+
+ return vf_v_idx;
+}
+
+/**
+ * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization
+ * @hw: pointer to the HW structure
+ * @num_vfs: number of VFs to be allocated
+ * @num_pools: number of virtualization pools to be allocated
+ *
+ * Allocates queues and traffic classes to virtualization entities to prepare
+ * the PF for SR-IOV and VMDq
+ **/
+STATIC s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
+ u16 num_pools)
+{
+ u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx;
+ u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT;
+ int i, j;
+
+ /* hardware only supports up to 64 pools */
+ if (num_pools > 64)
+ return FM10K_ERR_PARAM;
+
+ /* the number of VFs cannot exceed the number of pools */
+ if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs))
+ return FM10K_ERR_PARAM;
+
+ /* record number of virtualization entities */
+ hw->iov.num_vfs = num_vfs;
+ hw->iov.num_pools = num_pools;
+
+ /* determine qmap offsets and counts */
+ qmap_stride = (num_vfs > 8) ? 32 : 256;
+ qpp = fm10k_queues_per_pool(hw);
+ vpp = fm10k_vectors_per_pool(hw);
+
+ /* calculate starting index for queues */
+ vf_q_idx = fm10k_vf_queue_index(hw, 0);
+ qmap_idx = 0;
+
+ /* establish TCs with -1 credits and no quanta to prevent transmit */
+ for (i = 0; i < num_vfs; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_TC_RATE(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(i),
+ FM10K_TC_CREDIT_CREDIT_MASK);
+ }
+
+ /* zero out all mbmem registers */
+ for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;)
+ FM10K_WRITE_REG(hw, FM10K_MBMEM(i), 0);
+
+ /* clear event notification of VF FLR */
+ FM10K_WRITE_REG(hw, FM10K_PFVFLREC(0), ~0);
+ FM10K_WRITE_REG(hw, FM10K_PFVFLREC(1), ~0);
+
+ /* loop through unallocated rings assigning them back to PF */
+ for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF |
+ FM10K_TXQCTL_UNLIMITED_BW | vid);
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF);
+ }
+
+ /* PF should have already updated VFITR2[0] */
+
+ /* update all ITR registers to flow to VFITR2[0] */
+ for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) {
+ if (!(i & (vpp - 1)))
+ FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - vpp);
+ else
+ FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1);
+ }
+
+ /* update PF ITR2[0] to reference the last vector */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0),
+ fm10k_vf_vector_index(hw, num_vfs - 1));
+
+ /* loop through rings populating rings and TCs */
+ for (i = 0; i < num_vfs; i++) {
+ /* record index for VF queue 0 for use in end of loop */
+ vf_q_idx0 = vf_q_idx;
+
+ for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) {
+ /* assign VF and locked TC to queues */
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx),
+ (i << FM10K_TXQCTL_TC_SHIFT) | i |
+ FM10K_TXQCTL_VF | vid);
+ FM10K_WRITE_REG(hw, FM10K_RXDCTL(vf_q_idx),
+ FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
+ FM10K_RXDCTL_DROP_ON_EMPTY);
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(vf_q_idx),
+ (i << FM10K_RXQCTL_VF_SHIFT) |
+ FM10K_RXQCTL_VF);
+
+ /* map queue pair to VF */
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx);
+ }
+
+ /* repeat the first ring for all of the remaining VF rings */
+ for (; j < qmap_stride; j++, qmap_idx++) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0);
+ }
+ }
+
+ /* loop through remaining indexes assigning all to queue 0 */
+ while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), 0);
+ qmap_idx++;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_configure_tc_pf - Configure the shaping group for VF
+ * @hw: pointer to the HW structure
+ * @vf_idx: index of VF receiving GLORT
+ * @rate: Rate indicated in Mb/s
+ *
+ * Configured the TC for a given VF to allow only up to a given number
+ * of Mb/s of outgoing Tx throughput.
+ **/
+STATIC s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate)
+{
+ /* configure defaults */
+ u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3;
+ u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK;
+
+ /* verify vf is in range */
+ if (vf_idx >= hw->iov.num_vfs)
+ return FM10K_ERR_PARAM;
+
+ /* set interval to align with 4.096 usec in all modes */
+ switch (hw->bus.speed) {
+ case fm10k_bus_speed_2500:
+ interval = FM10K_TC_RATE_INTERVAL_4US_GEN1;
+ break;
+ case fm10k_bus_speed_5000:
+ interval = FM10K_TC_RATE_INTERVAL_4US_GEN2;
+ break;
+ default:
+ break;
+ }
+
+ if (rate) {
+ if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN)
+ return FM10K_ERR_PARAM;
+
+ /* The quanta is measured in Bytes per 4.096 or 8.192 usec
+ * The rate is provided in Mbits per second
+ * To tralslate from rate to quanta we need to multiply the
+ * rate by 8.192 usec and divide by 8 bits/byte. To avoid
+ * dealing with floating point we can round the values up
+ * to the nearest whole number ratio which gives us 128 / 125.
+ */
+ tc_rate = (rate * 128) / 125;
+
+ /* try to keep the rate limiting accurate by increasing
+ * the number of credits and interval for rates less than 4Gb/s
+ */
+ if (rate < 4000)
+ interval <<= 1;
+ else
+ tc_rate >>= 1;
+ }
+
+ /* update rate limiter with new values */
+ FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval);
+ FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
+ FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list
+ * @hw: pointer to the HW structure
+ * @vf_idx: index of VF receiving GLORT
+ *
+ * Update the interrupt moderator linked list to include any MSI-X
+ * interrupts which the VF has enabled in the MSI-X vector table.
+ **/
+STATIC s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
+{
+ u16 vf_v_idx, vf_v_limit, i;
+
+ /* verify vf is in range */
+ if (vf_idx >= hw->iov.num_vfs)
+ return FM10K_ERR_PARAM;
+
+ /* determine vector offset and count */
+ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
+ vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
+
+ /* search for first vector that is not masked */
+ for (i = vf_v_limit - 1; i > vf_v_idx; i--) {
+ if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i)))
+ break;
+ }
+
+ /* reset linked list so it now includes our active vectors */
+ if (vf_idx == (hw->iov.num_vfs - 1))
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), i);
+ else
+ FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), i);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF
+ * @hw: pointer to the HW structure
+ * @vf_info: pointer to VF information structure
+ *
+ * Assign a MAC address and default VLAN to a VF and notify it of the update
+ **/
+STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info)
+{
+ u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i;
+ u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0;
+ s32 err = FM10K_SUCCESS;
+ u16 vf_idx, vf_vid;
+
+ /* verify vf is in range */
+ if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs)
+ return FM10K_ERR_PARAM;
+
+ /* determine qmap offsets and counts */
+ qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
+ queues_per_pool = fm10k_queues_per_pool(hw);
+
+ /* calculate starting index for queues */
+ vf_idx = vf_info->vf_idx;
+ vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
+ qmap_idx = qmap_stride * vf_idx;
+
+ /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
+ * used here to indicate to the VF that it will not have privilege to
+ * write VLAN_TABLE. All policy is enforced on the PF but this allows
+ * the VF to correctly report errors to userspace rqeuests.
+ */
+ if (vf_info->pf_vid)
+ vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
+ else
+ vf_vid = vf_info->sw_vid;
+
+ /* generate MAC_ADDR request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
+ fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
+ vf_info->mac, vf_vid);
+
+ /* Configure Queue control register with new VLAN ID. The TXQCTL
+ * register is RO from the VF, so the PF must do this even in the
+ * case of notifying the VF of a new VID via the mailbox.
+ */
+ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
+ FM10K_TXQCTL_VID_MASK;
+ txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
+ FM10K_TXQCTL_VF | vf_idx;
+
+ for (i = 0; i < queues_per_pool; i++)
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
+
+ /* try loading a message onto outgoing mailbox first */
+ if (vf_info->mbx.ops.enqueue_tx) {
+ err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ if (err != FM10K_MBX_ERR_NO_MBX)
+ return err;
+ err = FM10K_SUCCESS;
+ }
+
+ /* If we aren't connected to a mailbox, this is most likely because
+ * the VF driver is not running. It should thus be safe to re-map
+ * queues and use the registers to pass the MAC address so that the VF
+ * driver gets correct information during its initialization.
+ */
+
+ /* MAP Tx queue back to 0 temporarily, and disable it */
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
+
+ /* verify ring has disabled before modifying base address registers */
+ txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx));
+ for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) {
+ /* limit ourselves to a 1ms timeout */
+ if (timeout == 10) {
+ err = FM10K_ERR_DMA_PENDING;
+ goto err_out;
+ }
+
+ usec_delay(100);
+ txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx));
+ }
+
+ /* Update base address registers to contain MAC address */
+ if (IS_VALID_ETHER_ADDR(vf_info->mac)) {
+ tdbal = (((u32)vf_info->mac[3]) << 24) |
+ (((u32)vf_info->mac[4]) << 16) |
+ (((u32)vf_info->mac[5]) << 8);
+
+ tdbah = (((u32)0xFF) << 24) |
+ (((u32)vf_info->mac[0]) << 16) |
+ (((u32)vf_info->mac[1]) << 8) |
+ ((u32)vf_info->mac[2]);
+ }
+
+ /* Record the base address into queue 0 */
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx), tdbal);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx), tdbah);
+
+ /* Provide the VF the ITR scale, using software-defined fields in TDLEN
+ * to pass the information during VF initialization. See definition of
+ * FM10K_TDLEN_ITR_SCALE_SHIFT for more details.
+ */
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale <<
+ FM10K_TDLEN_ITR_SCALE_SHIFT);
+
+err_out:
+ /* restore the queue back to VF ownership */
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
+ return err;
+}
+
+/**
+ * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF
+ * @hw: pointer to the HW structure
+ * @vf_info: pointer to VF information structure
+ *
+ * Reassign the interrupts and queues to a VF following an FLR
+ **/
+STATIC s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info)
+{
+ u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx;
+ u32 tdbal = 0, tdbah = 0, txqctl, rxqctl;
+ u16 vf_v_idx, vf_v_limit, vf_vid;
+ u8 vf_idx = vf_info->vf_idx;
+ int i;
+
+ /* verify vf is in range */
+ if (vf_idx >= hw->iov.num_vfs)
+ return FM10K_ERR_PARAM;
+
+ /* clear event notification of VF FLR */
+ FM10K_WRITE_REG(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
+
+ /* force timeout and then disconnect the mailbox */
+ vf_info->mbx.timeout = 0;
+ if (vf_info->mbx.ops.disconnect)
+ vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
+
+ /* determine vector offset and count */
+ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
+ vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
+
+ /* determine qmap offsets and counts */
+ qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
+ queues_per_pool = fm10k_queues_per_pool(hw);
+ qmap_idx = qmap_stride * vf_idx;
+
+ /* make all the queues inaccessible to the VF */
+ for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0);
+ }
+
+ /* calculate starting index for queues */
+ vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
+
+ /* determine correct default VLAN ID */
+ if (vf_info->pf_vid)
+ vf_vid = vf_info->pf_vid;
+ else
+ vf_vid = vf_info->sw_vid;
+
+ /* configure Queue control register */
+ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
+ (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
+ FM10K_TXQCTL_VF | vf_idx;
+ rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
+
+ /* stop further DMA and reset queue ownership back to VF */
+ for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl);
+ FM10K_WRITE_REG(hw, FM10K_RXDCTL(i),
+ FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
+ FM10K_RXDCTL_DROP_ON_EMPTY);
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), rxqctl);
+ }
+
+ /* reset TC with -1 credits and no quanta to prevent transmit */
+ FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx),
+ FM10K_TC_CREDIT_CREDIT_MASK);
+
+ /* update our first entry in the table based on previous VF */
+ if (!vf_idx)
+ hw->mac.ops.update_int_moderator(hw);
+ else
+ hw->iov.ops.assign_int_moderator(hw, vf_idx - 1);
+
+ /* reset linked list so it now includes our active vectors */
+ if (vf_idx == (hw->iov.num_vfs - 1))
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), vf_v_idx);
+ else
+ FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), vf_v_idx);
+
+ /* link remaining vectors so that next points to previous */
+ for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++)
+ FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1);
+
+ /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */
+ for (i = FM10K_VFMBMEM_LEN; i--;)
+ FM10K_WRITE_REG(hw, FM10K_MBMEM_VF(vf_idx, i), 0);
+ for (i = FM10K_VLAN_TABLE_SIZE; i--;)
+ FM10K_WRITE_REG(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0);
+ for (i = FM10K_RETA_SIZE; i--;)
+ FM10K_WRITE_REG(hw, FM10K_RETA(vf_info->vsi, i), 0);
+ for (i = FM10K_RSSRK_SIZE; i--;)
+ FM10K_WRITE_REG(hw, FM10K_RSSRK(vf_info->vsi, i), 0);
+ FM10K_WRITE_REG(hw, FM10K_MRQC(vf_info->vsi), 0);
+
+ /* Update base address registers to contain MAC address */
+ if (IS_VALID_ETHER_ADDR(vf_info->mac)) {
+ tdbal = (((u32)vf_info->mac[3]) << 24) |
+ (((u32)vf_info->mac[4]) << 16) |
+ (((u32)vf_info->mac[5]) << 8);
+ tdbah = (((u32)0xFF) << 24) |
+ (((u32)vf_info->mac[0]) << 16) |
+ (((u32)vf_info->mac[1]) << 8) |
+ ((u32)vf_info->mac[2]);
+ }
+
+ /* map queue pairs back to VF from last to first */
+ for (i = queues_per_pool; i--;) {
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
+ /* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an
+ * explanation of how TDLEN is used.
+ */
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx + i),
+ hw->mac.itr_scale <<
+ FM10K_TDLEN_ITR_SCALE_SHIFT);
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
+ }
+
+ /* repeat the first ring for all the remaining VF rings */
+ for (i = queues_per_pool; i < qmap_stride; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF
+ * @hw: pointer to hardware structure
+ * @vf_info: pointer to VF information structure
+ * @lport_idx: Logical port offset from the hardware glort
+ * @flags: Set of capability flags to extend port beyond basic functionality
+ *
+ * This function allows enabling a VF port by assigning it a GLORT and
+ * setting the flags so that it can enable an Rx mode.
+ **/
+STATIC s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info,
+ u16 lport_idx, u8 flags)
+{
+ u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE;
+
+ DEBUGFUNC("fm10k_iov_set_lport_state_pf");
+
+ /* if glort is not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort))
+ return FM10K_ERR_PARAM;
+
+ vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE;
+ vf_info->glort = glort;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF
+ * @hw: pointer to hardware structure
+ * @vf_info: pointer to VF information structure
+ *
+ * This function disables a VF port by stripping it of a GLORT and
+ * setting the flags so that it cannot enable any Rx mode.
+ **/
+STATIC void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info)
+{
+ u32 msg[1];
+
+ DEBUGFUNC("fm10k_iov_reset_lport_state_pf");
+
+ /* need to disable the port if it is already enabled */
+ if (FM10K_VF_FLAG_ENABLED(vf_info)) {
+ /* notify switch that this port has been disabled */
+ fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false);
+
+ /* generate port state response to notify VF it is not ready */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
+ vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ }
+
+ /* clear flags and glort if it exists */
+ vf_info->vf_flags = 0;
+ vf_info->glort = 0;
+}
+
+/**
+ * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs
+ * @hw: pointer to hardware structure
+ * @q: stats for all queues of a VF
+ * @vf_idx: index of VF
+ *
+ * This function collects queue stats for VFs.
+ **/
+STATIC void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats_q *q,
+ u16 vf_idx)
+{
+ u32 idx, qpp;
+
+ /* get stats for all of the queues */
+ qpp = fm10k_queues_per_pool(hw);
+ idx = fm10k_vf_queue_index(hw, vf_idx);
+ fm10k_update_hw_stats_q(hw, q, idx, qpp);
+}
+
+/**
+ * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function is a default handler for MSI-X requests from the VF. The
+ * assumption is that in this case it is acceptable to just directly
+ * hand off the message from the VF to the underlying shared code.
+ **/
+s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
+ u8 vf_idx = vf_info->vf_idx;
+
+ UNREFERENCED_1PARAMETER(results);
+ DEBUGFUNC("fm10k_iov_msg_msix_pf");
+
+ return hw->iov.ops.assign_int_moderator(hw, vf_idx);
+}
+
+/**
+ * fm10k_iov_select_vid - Select correct default VLAN ID
+ * @hw: Pointer to hardware structure
+ * @vid: VLAN ID to correct
+ *
+ * Will report an error if the VLAN ID is out of range. For VID = 0, it will
+ * return either the pf_vid or sw_vid depending on which one is set.
+ */
+STATIC s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
+{
+ if (!vid)
+ return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
+ else if (vf_info->pf_vid && vid != vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ else
+ return vid;
+}
+
+/**
+ * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function is a default handler for MAC/VLAN requests from the VF.
+ * The assumption is that in this case it is acceptable to just directly
+ * hand off the message from the VF to the underlying shared code.
+ **/
+s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
+ u8 mac[ETH_ALEN];
+ u32 *result;
+ int err = FM10K_SUCCESS;
+ bool set;
+ u16 vlan;
+ u32 vid;
+
+ DEBUGFUNC("fm10k_iov_msg_mac_vlan_pf");
+
+ /* we shouldn't be updating rules on a disabled interface */
+ if (!FM10K_VF_FLAG_ENABLED(vf_info))
+ err = FM10K_ERR_PARAM;
+
+ if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
+ result = results[FM10K_MAC_VLAN_MSG_VLAN];
+
+ /* record VLAN id requested */
+ err = fm10k_tlv_attr_get_u32(result, &vid);
+ if (err)
+ return err;
+
+ set = !(vid & FM10K_VLAN_CLEAR);
+ vid &= ~FM10K_VLAN_CLEAR;
+
+ /* if the length field has been set, this is a multi-bit
+ * update request. For multi-bit requests, simply disallow
+ * them when the pf_vid has been set. In this case, the PF
+ * should have already cleared the VLAN_TABLE, and if we
+ * allowed them, it could allow a rogue VF to receive traffic
+ * on a VLAN it was not assigned. In the single-bit case, we
+ * need to modify requests for VLAN 0 to use the default PF or
+ * SW vid when assigned.
+ */
+
+ if (vid >> 16) {
+ /* prevent multi-bit requests when PF has
+ * administratively set the VLAN for this VF
+ */
+ if (vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ } else {
+ err = fm10k_iov_select_vid(vf_info, (u16)vid);
+ if (err < 0)
+ return err;
+
+ vid = err;
+ }
+
+ /* update VSI info for VF in regards to VLAN table */
+ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
+ }
+
+ if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
+ result = results[FM10K_MAC_VLAN_MSG_MAC];
+
+ /* record unicast MAC address requested */
+ err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
+ if (err)
+ return err;
+
+ /* block attempts to set MAC for a locked device */
+ if (IS_VALID_ETHER_ADDR(vf_info->mac) &&
+ memcmp(mac, vf_info->mac, ETH_ALEN))
+ return FM10K_ERR_PARAM;
+
+ set = !(vlan & FM10K_VLAN_CLEAR);
+ vlan &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vlan);
+ if (err < 0)
+ return err;
+
+ vlan = (u16)err;
+
+ /* notify switch of request for new unicast address */
+ err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
+ mac, vlan, set, 0);
+ }
+
+ if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
+ result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
+
+ /* record multicast MAC address requested */
+ err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
+ if (err)
+ return err;
+
+ /* verify that the VF is allowed to request multicast */
+ if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
+ return FM10K_ERR_PARAM;
+
+ set = !(vlan & FM10K_VLAN_CLEAR);
+ vlan &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vlan);
+ if (err < 0)
+ return err;
+
+ vlan = (u16)err;
+
+ /* notify switch of request for new multicast address */
+ err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
+ mac, vlan, set);
+ }
+
+ return err;
+}
+
+/**
+ * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
+ * @vf_info: VF info structure containing capability flags
+ * @mode: Requested xcast mode
+ *
+ * This function outputs the mode that most closely matches the requested
+ * mode. If not modes match it will request we disable the port
+ **/
+STATIC u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
+ u8 mode)
+{
+ u8 vf_flags = vf_info->vf_flags;
+
+ /* match up mode to capabilities as best as possible */
+ switch (mode) {
+ case FM10K_XCAST_MODE_PROMISC:
+ if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
+ return FM10K_XCAST_MODE_PROMISC;
+ /* fallthough */
+ case FM10K_XCAST_MODE_ALLMULTI:
+ if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
+ return FM10K_XCAST_MODE_ALLMULTI;
+ /* fallthough */
+ case FM10K_XCAST_MODE_MULTI:
+ if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
+ return FM10K_XCAST_MODE_MULTI;
+ /* fallthough */
+ case FM10K_XCAST_MODE_NONE:
+ if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
+ return FM10K_XCAST_MODE_NONE;
+ /* fallthough */
+ default:
+ break;
+ }
+
+ /* disable interface as it should not be able to request any */
+ return FM10K_XCAST_MODE_DISABLE;
+}
+
+/**
+ * fm10k_iov_msg_lport_state_pf - Message handler for port state requests
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function is a default handler for port state requests. The port
+ * state requests for now are basic and consist of enabling or disabling
+ * the port.
+ **/
+s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
+ u32 *result;
+ s32 err = FM10K_SUCCESS;
+ u32 msg[2];
+ u8 mode = 0;
+
+ DEBUGFUNC("fm10k_iov_msg_lport_state_pf");
+
+ /* verify VF is allowed to enable even minimal mode */
+ if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE))
+ return FM10K_ERR_PARAM;
+
+ if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
+ result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
+
+ /* XCAST mode update requested */
+ err = fm10k_tlv_attr_get_u8(result, &mode);
+ if (err)
+ return FM10K_ERR_PARAM;
+
+ /* prep for possible demotion depending on capabilities */
+ mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
+
+ /* if mode is not currently enabled, enable it */
+ if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
+ fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
+
+ /* swap mode back to a bit flag */
+ mode = FM10K_VF_FLAG_SET_MODE(mode);
+ } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) {
+ /* need to disable the port if it is already enabled */
+ if (FM10K_VF_FLAG_ENABLED(vf_info))
+ err = fm10k_update_lport_state_pf(hw, vf_info->glort,
+ 1, false);
+
+ /* we need to clear VF_FLAG_ENABLED flags in order to ensure
+ * that we actually re-enable the LPORT state below. Note that
+ * this has no impact if the VF is already disabled, as the
+ * flags are already cleared.
+ */
+ if (!err)
+ vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
+
+ /* when enabling the port we should reset the rate limiters */
+ hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
+
+ /* set mode for minimal functionality */
+ mode = FM10K_VF_FLAG_SET_MODE_NONE;
+
+ /* generate port state response to notify VF it is ready */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
+ fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY);
+ mbx->ops.enqueue_tx(hw, mbx, msg);
+ }
+
+ /* if enable state toggled note the update */
+ if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode))
+ err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1,
+ !!mode);
+
+ /* if state change succeeded, then update our stored state */
+ mode |= FM10K_VF_FLAG_CAPABLE(vf_info);
+ if (!err)
+ vf_info->vf_flags = mode;
+
+ return err;
+}
+
+#ifndef NO_DEFAULT_SRIOV_MSG_HANDLERS
+const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = {
+ FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
+ FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
+ FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
+ FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+#endif
+/**
+ * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
+ * @hw: pointer to hardware structure
+ * @stats: pointer to the stats structure to update
+ *
+ * This function collects and aggregates global and per queue hardware
+ * statistics.
+ **/
+STATIC void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats)
+{
+ u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
+ u32 id, id_prev;
+
+ DEBUGFUNC("fm10k_update_hw_stats_pf");
+
+ /* Use Tx queue 0 as a canary to detect a reset */
+ id = FM10K_READ_REG(hw, FM10K_TXQCTL(0));
+
+ /* Read Global Statistics */
+ do {
+ timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT,
+ &stats->timeout);
+ ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur);
+ ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca);
+ um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um);
+ xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec);
+ vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP,
+ &stats->vlan_drop);
+ loopback_drop =
+ fm10k_read_hw_stats_32b(hw,
+ FM10K_STATS_LOOPBACK_DROP,
+ &stats->loopback_drop);
+ nodesc_drop = fm10k_read_hw_stats_32b(hw,
+ FM10K_STATS_NODESC_DROP,
+ &stats->nodesc_drop);
+
+ /* if value has not changed then we have consistent data */
+ id_prev = id;
+ id = FM10K_READ_REG(hw, FM10K_TXQCTL(0));
+ } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK);
+
+ /* drop non-ID bits and set VALID ID bit */
+ id &= FM10K_TXQCTL_ID_MASK;
+ id |= FM10K_STAT_VALID;
+
+ /* Update Global Statistics */
+ if (stats->stats_idx == id) {
+ stats->timeout.count += timeout;
+ stats->ur.count += ur;
+ stats->ca.count += ca;
+ stats->um.count += um;
+ stats->xec.count += xec;
+ stats->vlan_drop.count += vlan_drop;
+ stats->loopback_drop.count += loopback_drop;
+ stats->nodesc_drop.count += nodesc_drop;
+ }
+
+ /* Update bases and record current PF id */
+ fm10k_update_hw_base_32b(&stats->timeout, timeout);
+ fm10k_update_hw_base_32b(&stats->ur, ur);
+ fm10k_update_hw_base_32b(&stats->ca, ca);
+ fm10k_update_hw_base_32b(&stats->um, um);
+ fm10k_update_hw_base_32b(&stats->xec, xec);
+ fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop);
+ fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop);
+ fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop);
+ stats->stats_idx = id;
+
+ /* Update Queue Statistics */
+ fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
+}
+
+/**
+ * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF
+ * @hw: pointer to hardware structure
+ * @stats: pointer to the stats structure to update
+ *
+ * This function resets the base for global and per queue hardware
+ * statistics.
+ **/
+STATIC void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats)
+{
+ DEBUGFUNC("fm10k_rebind_hw_stats_pf");
+
+ /* Unbind Global Statistics */
+ fm10k_unbind_hw_stats_32b(&stats->timeout);
+ fm10k_unbind_hw_stats_32b(&stats->ur);
+ fm10k_unbind_hw_stats_32b(&stats->ca);
+ fm10k_unbind_hw_stats_32b(&stats->um);
+ fm10k_unbind_hw_stats_32b(&stats->xec);
+ fm10k_unbind_hw_stats_32b(&stats->vlan_drop);
+ fm10k_unbind_hw_stats_32b(&stats->loopback_drop);
+ fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
+
+ /* Unbind Queue Statistics */
+ fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+
+ /* Reinitialize bases for all stats */
+ fm10k_update_hw_stats_pf(hw, stats);
+}
+
+/**
+ * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system
+ * @hw: pointer to hardware structure
+ * @dma_mask: 64 bit DMA mask required for platform
+ *
+ * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order
+ * to limit the access to memory beyond what is physically in the system.
+ **/
+STATIC void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask)
+{
+ /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */
+ u32 phyaddr = (u32)(dma_mask >> 32);
+
+ DEBUGFUNC("fm10k_set_dma_mask_pf");
+
+ FM10K_WRITE_REG(hw, FM10K_PHYADDR, phyaddr);
+}
+
+/**
+ * fm10k_get_fault_pf - Record a fault in one of the interface units
+ * @hw: pointer to hardware structure
+ * @type: pointer to fault type register offset
+ * @fault: pointer to memory location to record the fault
+ *
+ * Record the fault register contents to the fault data structure and
+ * clear the entry from the register.
+ *
+ * Returns ERR_PARAM if invalid register is specified or no error is present.
+ **/
+STATIC s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
+ struct fm10k_fault *fault)
+{
+ u32 func;
+
+ DEBUGFUNC("fm10k_get_fault_pf");
+
+ /* verify the fault register is in range and is aligned */
+ switch (type) {
+ case FM10K_PCA_FAULT:
+ case FM10K_THI_FAULT:
+ case FM10K_FUM_FAULT:
+ break;
+ default:
+ return FM10K_ERR_PARAM;
+ }
+
+ /* only service faults that are valid */
+ func = FM10K_READ_REG(hw, type + FM10K_FAULT_FUNC);
+ if (!(func & FM10K_FAULT_FUNC_VALID))
+ return FM10K_ERR_PARAM;
+
+ /* read remaining fields */
+ fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_HI);
+ fault->address <<= 32;
+ fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_LO);
+ fault->specinfo = FM10K_READ_REG(hw, type + FM10K_FAULT_SPECINFO);
+
+ /* clear valid bit to allow for next error */
+ FM10K_WRITE_REG(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID);
+
+ /* Record which function triggered the error */
+ if (func & FM10K_FAULT_FUNC_PF)
+ fault->func = 0;
+ else
+ fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
+ FM10K_FAULT_FUNC_VF_SHIFT);
+
+ /* record fault type */
+ fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_request_lport_map_pf - Request LPORT map from the switch API
+ * @hw: pointer to hardware structure
+ *
+ **/
+STATIC s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[1];
+
+ DEBUGFUNC("fm10k_request_lport_pf");
+
+ /* issue request asking for LPORT map */
+ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_get_host_state_pf - Returns the state of the switch and mailbox
+ * @hw: pointer to hardware structure
+ * @switch_ready: pointer to boolean value that will record switch state
+ *
+ * This function will check the DMA_CTRL2 register and mailbox in order
+ * to determine if the switch is ready for the PF to begin requesting
+ * addresses and mapping traffic to the local interface.
+ **/
+STATIC s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
+{
+ u32 dma_ctrl2;
+
+ DEBUGFUNC("fm10k_get_host_state_pf");
+
+ /* verify the switch is ready for interaction */
+ dma_ctrl2 = FM10K_READ_REG(hw, FM10K_DMA_CTRL2);
+ if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
+ return FM10K_SUCCESS;
+
+ /* retrieve generic host state info */
+ return fm10k_get_host_state_generic(hw, switch_ready);
+}
+
+/* This structure defines the attibutes to be parsed below */
+const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+ sizeof(struct fm10k_swapi_error)),
+ FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM
+ * @hw: Pointer to hardware structure
+ * @results: pointer array containing parsed data
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler configures the lport mapping based on the reply from the
+ * switch API.
+ **/
+s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ u16 glort, mask;
+ u32 dglort_map;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_lport_map_pf");
+
+ err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP],
+ &dglort_map);
+ if (err)
+ return err;
+
+ /* extract values out of the header */
+ glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT);
+ mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK);
+
+ /* verify mask is set and none of the masked bits in glort are set */
+ if (!mask || (glort & ~mask))
+ return FM10K_ERR_PARAM;
+
+ /* verify the mask is contiguous, and that it is 1's followed by 0's */
+ if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE)
+ return FM10K_ERR_PARAM;
+
+ /* record the glort, mask, and port count */
+ hw->mac.dglort_map = dglort_map;
+
+ return FM10K_SUCCESS;
+}
+
+const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
+ FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM
+ * @hw: Pointer to hardware structure
+ * @results: pointer array containing parsed data
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler configures the default VLAN for the PF
+ **/
+static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ u16 glort, pvid;
+ u32 pvid_update;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_update_pvid_pf");
+
+ err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
+ &pvid_update);
+ if (err)
+ return err;
+
+ /* extract values from the pvid update */
+ glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
+ pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
+
+ /* if glort is not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort))
+ return FM10K_ERR_PARAM;
+
+ /* verify VLAN ID is valid */
+ if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* record the port VLAN ID value */
+ hw->mac.default_vid = pvid;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_record_global_table_data - Move global table data to swapi table info
+ * @from: pointer to source table data structure
+ * @to: pointer to destination table info structure
+ *
+ * This function is will copy table_data to the table_info contained in
+ * the hw struct.
+ **/
+static void fm10k_record_global_table_data(struct fm10k_global_table_data *from,
+ struct fm10k_swapi_table_info *to)
+{
+ /* convert from le32 struct to CPU byte ordered values */
+ to->used = FM10K_LE32_TO_CPU(from->used);
+ to->avail = FM10K_LE32_TO_CPU(from->avail);
+}
+
+const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+ sizeof(struct fm10k_swapi_error)),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_msg_err_pf - Message handler for error reply
+ * @hw: Pointer to hardware structure
+ * @results: pointer array containing parsed data
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler will capture the data for any error replies to previous
+ * messages that the PF has sent.
+ **/
+s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_swapi_error err_msg;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_err_pf");
+
+ /* extract structure from message */
+ err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR],
+ &err_msg, sizeof(err_msg));
+ if (err)
+ return err;
+
+ /* record table status */
+ fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac);
+ fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop);
+ fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu);
+
+ /* record SW API status value */
+ hw->swapi.status = FM10K_LE32_TO_CPU(err_msg.status);
+
+ return FM10K_SUCCESS;
+}
+
+/* currently there is no shared 1588 timestamp handler */
+
+const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
+ sizeof(struct fm10k_swapi_1588_timestamp)),
+ FM10K_TLV_ATTR_LAST
+};
+
+const struct fm10k_tlv_attr fm10k_1588_clock_owner_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_CLOCK_OWNER,
+ sizeof(struct fm10k_swapi_1588_clock_owner)),
+ FM10K_TLV_ATTR_LAST
+};
+
+const struct fm10k_tlv_attr fm10k_master_clk_offset_attr[] = {
+ FM10K_TLV_ATTR_U64(FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_iov_notify_offset_pf - Notify VF of change in PTP offset
+ * @hw: pointer to hardware structure
+ * @vf_info: pointer to the vf info structure
+ * @offset: 64bit unsigned offset from hardware SYSTIME
+ *
+ * This function sends a message to a given VF to notify it of PTP offset
+ * changes.
+ **/
+STATIC void fm10k_iov_notify_offset_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info,
+ u64 offset)
+{
+ u32 msg[4];
+
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
+ fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_CLK_OFFSET, offset);
+
+ if (vf_info->mbx.ops.enqueue_tx)
+ vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+}
+
+/**
+ * fm10k_msg_1588_clock_owner_pf - Message handler for clock ownership from SM
+ * @hw: pointer to hardware structure
+ * @results: pointer to array containing parsed data,
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler configures the FM10K_HW_FLAG_CLOCK_OWNER field for the PF
+ */
+s32 fm10k_msg_1588_clock_owner_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_swapi_1588_clock_owner msg;
+ u16 glort;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_1588_clock_owner");
+
+ err = fm10k_tlv_attr_get_le_struct(
+ results[FM10K_PF_ATTR_ID_1588_CLOCK_OWNER],
+ &msg, sizeof(msg));
+ if (err)
+ return err;
+
+ /* We own the clock iff the glort matches us and the enabled field is
+ * true. Otherwise, the clock must belong to some other port.
+ */
+ glort = le16_to_cpu(msg.glort);
+ if (fm10k_glort_valid_pf(hw, glort) && msg.enabled)
+ hw->flags |= FM10K_HW_FLAG_CLOCK_OWNER;
+ else
+ hw->flags &= ~FM10K_HW_FLAG_CLOCK_OWNER;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_adjust_systime_pf - Adjust systime frequency
+ * @hw: pointer to hardware structure
+ * @ppb: adjustment rate in parts per billion
+ *
+ * This function will adjust the SYSTIME_CFG register contained in BAR 4
+ * if this function is supported for BAR 4 access. The adjustment amount
+ * is based on the parts per billion value provided and adjusted to a
+ * value based on parts per 2^48 clock cycles.
+ *
+ * If adjustment is not supported or the requested value is too large
+ * we will return an error.
+ **/
+STATIC s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
+{
+ u64 systime_adjust;
+
+ DEBUGFUNC("fm10k_adjust_systime_pf");
+
+ /* ensure that we control the clock */
+ if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER))
+ return FM10K_ERR_DEVICE_NOT_SUPPORTED;
+
+ /* if sw_addr is not set we don't have switch register access */
+ if (!hw->sw_addr)
+ return ppb ? FM10K_ERR_PARAM : FM10K_SUCCESS;
+
+ /* we must convert the value from parts per billion to parts per
+ * 2^48 cycles. In addition I have opted to only use the 30 most
+ * significant bits of the adjustment value as the 8 least
+ * significant bits are located in another register and represent
+ * a value significantly less than a part per billion, the result
+ * of dropping the 8 least significant bits is that the adjustment
+ * value is effectively multiplied by 2^8 when we write it.
+ *
+ * As a result of all this the math for this breaks down as follows:
+ * ppb / 10^9 == adjust * 2^8 / 2^48
+ * If we solve this for adjust, and simplify it comes out as:
+ * ppb * 2^31 / 5^9 == adjust
+ */
+ systime_adjust = (ppb < 0) ? -ppb : ppb;
+ systime_adjust <<= 31;
+ do_div(systime_adjust, 1953125);
+
+ /* verify the requested adjustment value is in range */
+ if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
+ return FM10K_ERR_PARAM;
+
+ if (ppb > 0)
+ systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
+
+ FM10K_WRITE_SW_REG(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_notify_offset_pf - Notify switch of change in PTP offset
+ * @hw: pointer to hardware structure
+ * @offset: 64bit unsigned offset of SYSTIME
+ *
+ * This function sends a message to the switch to indicate a change in the
+ * offset of the hardware SYSTIME registers. The switch manager is
+ * responsible for transmitting this message to other hosts.
+ */
+STATIC s32 fm10k_notify_offset_pf(struct fm10k_hw *hw, u64 offset)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[4];
+
+ DEBUGFUNC("fm10k_notify_offset_pf");
+
+ /* ensure that we control the clock */
+ if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER))
+ return FM10K_ERR_DEVICE_NOT_SUPPORTED;
+
+ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_MASTER_CLK_OFFSET);
+ fm10k_tlv_attr_put_u64(msg, FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET, offset);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_read_systime_pf - Reads value of systime registers
+ * @hw: pointer to the hardware structure
+ *
+ * Function reads the content of 2 registers, combined to represent a 64 bit
+ * value measured in nanosecods. In order to guarantee the value is accurate
+ * we check the 32 most significant bits both before and after reading the
+ * 32 least significant bits to verify they didn't change as we were reading
+ * the registers.
+ **/
+static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
+{
+ u32 systime_l, systime_h, systime_tmp;
+
+ systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
+
+ do {
+ systime_tmp = systime_h;
+ systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
+ systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
+ } while (systime_tmp != systime_h);
+
+ return ((u64)systime_h << 32) | systime_l;
+}
+
+static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
+ FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
+ FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
+ FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
+ FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
+ FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
+ FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
+ FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(fm10k_msg_1588_clock_owner_pf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+/**
+ * fm10k_init_ops_pf - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for PF.
+ * Does not touch the hardware.
+ **/
+s32 fm10k_init_ops_pf(struct fm10k_hw *hw)
+{
+ struct fm10k_mac_info *mac = &hw->mac;
+ struct fm10k_iov_info *iov = &hw->iov;
+
+ DEBUGFUNC("fm10k_init_ops_pf");
+
+ fm10k_init_ops_generic(hw);
+
+ mac->ops.reset_hw = &fm10k_reset_hw_pf;
+ mac->ops.init_hw = &fm10k_init_hw_pf;
+ mac->ops.start_hw = &fm10k_start_hw_generic;
+ mac->ops.stop_hw = &fm10k_stop_hw_generic;
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+ mac->ops.is_slot_appropriate = &fm10k_is_slot_appropriate_pf;
+#endif
+ mac->ops.update_vlan = &fm10k_update_vlan_pf;
+ mac->ops.read_mac_addr = &fm10k_read_mac_addr_pf;
+ mac->ops.update_uc_addr = &fm10k_update_uc_addr_pf;
+ mac->ops.update_mc_addr = &fm10k_update_mc_addr_pf;
+ mac->ops.update_xcast_mode = &fm10k_update_xcast_mode_pf;
+ mac->ops.update_int_moderator = &fm10k_update_int_moderator_pf;
+ mac->ops.update_lport_state = &fm10k_update_lport_state_pf;
+ mac->ops.update_hw_stats = &fm10k_update_hw_stats_pf;
+ mac->ops.rebind_hw_stats = &fm10k_rebind_hw_stats_pf;
+ mac->ops.configure_dglort_map = &fm10k_configure_dglort_map_pf;
+ mac->ops.set_dma_mask = &fm10k_set_dma_mask_pf;
+ mac->ops.get_fault = &fm10k_get_fault_pf;
+ mac->ops.get_host_state = &fm10k_get_host_state_pf;
+ mac->ops.request_lport_map = &fm10k_request_lport_map_pf;
+ mac->ops.adjust_systime = &fm10k_adjust_systime_pf;
+ mac->ops.notify_offset = &fm10k_notify_offset_pf;
+ mac->ops.read_systime = &fm10k_read_systime_pf;
+
+ mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw);
+
+ iov->ops.assign_resources = &fm10k_iov_assign_resources_pf;
+ iov->ops.configure_tc = &fm10k_iov_configure_tc_pf;
+ iov->ops.assign_int_moderator = &fm10k_iov_assign_int_moderator_pf;
+ iov->ops.assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf;
+ iov->ops.reset_resources = &fm10k_iov_reset_resources_pf;
+ iov->ops.set_lport = &fm10k_iov_set_lport_pf;
+ iov->ops.reset_lport = &fm10k_iov_reset_lport_pf;
+ iov->ops.update_stats = &fm10k_iov_update_stats_pf;
+ iov->ops.notify_offset = &fm10k_iov_notify_offset_pf;
+
+ return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf);
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h
new file mode 100644
index 00000000..ca125c27
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h
@@ -0,0 +1,187 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_PF_H_
+#define _FM10K_PF_H_
+
+#include "fm10k_type.h"
+#include "fm10k_common.h"
+
+bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort);
+u16 fm10k_queues_per_pool(struct fm10k_hw *hw);
+u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx);
+
+enum fm10k_pf_tlv_msg_id_v1 {
+ FM10K_PF_MSG_ID_TEST = 0x000, /* msg ID reserved */
+ FM10K_PF_MSG_ID_XCAST_MODES = 0x001,
+ FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE = 0x002,
+ FM10K_PF_MSG_ID_LPORT_MAP = 0x100,
+ FM10K_PF_MSG_ID_LPORT_CREATE = 0x200,
+ FM10K_PF_MSG_ID_LPORT_DELETE = 0x201,
+ FM10K_PF_MSG_ID_CONFIG = 0x300,
+ FM10K_PF_MSG_ID_UPDATE_PVID = 0x400,
+ FM10K_PF_MSG_ID_CREATE_FLOW_TABLE = 0x501,
+ FM10K_PF_MSG_ID_DELETE_FLOW_TABLE = 0x502,
+ FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503,
+ FM10K_PF_MSG_ID_DELETE_FLOW = 0x504,
+ FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505,
+ FM10K_PF_MSG_ID_GET_1588_INFO = 0x506,
+ FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701,
+ FM10K_PF_MSG_ID_1588_CLOCK_OWNER = 0x702,
+ FM10K_PF_MSG_ID_MASTER_CLK_OFFSET = 0x703,
+};
+
+enum fm10k_pf_tlv_attr_id_v1 {
+ FM10K_PF_ATTR_ID_ERR = 0x00,
+ FM10K_PF_ATTR_ID_LPORT_MAP = 0x01,
+ FM10K_PF_ATTR_ID_XCAST_MODE = 0x02,
+ FM10K_PF_ATTR_ID_MAC_UPDATE = 0x03,
+ FM10K_PF_ATTR_ID_VLAN_UPDATE = 0x04,
+ FM10K_PF_ATTR_ID_CONFIG = 0x05,
+ FM10K_PF_ATTR_ID_CREATE_FLOW_TABLE = 0x06,
+ FM10K_PF_ATTR_ID_DELETE_FLOW_TABLE = 0x07,
+ FM10K_PF_ATTR_ID_UPDATE_FLOW = 0x08,
+ FM10K_PF_ATTR_ID_FLOW_STATE = 0x09,
+ FM10K_PF_ATTR_ID_FLOW_HANDLE = 0x0A,
+ FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B,
+ FM10K_PF_ATTR_ID_PORT = 0x0C,
+ FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D,
+ FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10,
+ FM10K_PF_ATTR_ID_1588_CLOCK_OWNER = 0x12,
+ FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET = 0x14,
+};
+
+#define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0
+#define FM10K_MSG_LPORT_MAP_GLORT_SIZE 16
+#define FM10K_MSG_LPORT_MAP_MASK_SHIFT 16
+#define FM10K_MSG_LPORT_MAP_MASK_SIZE 16
+
+#define FM10K_MSG_UPDATE_PVID_GLORT_SHIFT 0
+#define FM10K_MSG_UPDATE_PVID_GLORT_SIZE 16
+#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16
+#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16
+
+#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED 280
+
+/* The following data structures are overlayed directly onto TLV mailbox
+ * messages, and must not break 4 byte alignment. Ensure the structures line
+ * up correctly as per their TLV definition.
+ */
+#ifdef C99
+#pragma pack(push, 4)
+#else
+#pragma pack(4)
+#endif /* C99 */
+
+struct fm10k_mac_update {
+ __le32 mac_lower;
+ __le16 mac_upper;
+ __le16 vlan;
+ __le16 glort;
+ u8 flags;
+ u8 action;
+};
+
+struct fm10k_global_table_data {
+ __le32 used;
+ __le32 avail;
+};
+
+struct fm10k_swapi_error {
+ __le32 status;
+ struct fm10k_global_table_data mac;
+ struct fm10k_global_table_data nexthop;
+ struct fm10k_global_table_data ffu;
+};
+
+struct fm10k_swapi_1588_timestamp {
+ __le64 egress;
+ __le64 ingress;
+ __le16 dglort;
+ __le16 sglort;
+};
+
+struct fm10k_swapi_1588_clock_owner {
+ __le16 glort;
+ __le16 enabled;
+};
+
+#ifdef C99
+#pragma pack(pop)
+#else
+#pragma pack()
+#endif /* C99 */
+
+s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
+#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \
+ fm10k_lport_map_msg_attr, func)
+extern const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[];
+#define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \
+ fm10k_update_pvid_msg_attr, func)
+
+s32 fm10k_msg_err_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
+#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func)
+
+extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[];
+#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \
+ fm10k_1588_timestamp_msg_attr, func)
+
+s32 fm10k_msg_1588_clock_owner_pf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_1588_clock_owner_attr[];
+#define FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_CLOCK_OWNER, \
+ fm10k_1588_clock_owner_attr, func)
+
+extern const struct fm10k_tlv_attr fm10k_master_clk_offset_attr[];
+#define FM10K_PF_MSG_MASTER_CLK_OFFSET_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_MASTER_CLK_OFFSET, \
+ fm10k_master_clk_offset_attr, func)
+
+s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+#ifndef NO_DEFAULT_SRIOV_MSG_HANDLERS
+extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[];
+#endif
+
+s32 fm10k_init_ops_pf(struct fm10k_hw *hw);
+#endif /* _FM10K_PF_H */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c
new file mode 100644
index 00000000..0328ede2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c
@@ -0,0 +1,916 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "fm10k_tlv.h"
+
+/**
+ * fm10k_tlv_msg_init - Initialize message block for TLV data storage
+ * @msg: Pointer to message block
+ * @msg_id: Message ID indicating message type
+ *
+ * This function return success if provided with a valid message pointer
+ **/
+s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id)
+{
+ DEBUGFUNC("fm10k_tlv_msg_init");
+
+ /* verify pointer is not NULL */
+ if (!msg)
+ return FM10K_ERR_PARAM;
+
+ *msg = (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT) | msg_id;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_null_string - Place null terminated string on message
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ * @string: Pointer to string to be stored in attribute
+ *
+ * This function will reorder a string to be CPU endian and store it in
+ * the attribute buffer. It will return success if provided with a valid
+ * pointers.
+ **/
+static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
+ const unsigned char *string)
+{
+ u32 attr_data = 0, len = 0;
+ u32 *attr;
+
+ DEBUGFUNC("fm10k_tlv_attr_put_null_string");
+
+ /* verify pointers are not NULL */
+ if (!string || !msg)
+ return FM10K_ERR_PARAM;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ /* copy string into local variable and then write to msg */
+ do {
+ /* write data to message */
+ if (len && !(len % 4)) {
+ attr[len / 4] = attr_data;
+ attr_data = 0;
+ }
+
+ /* record character to offset location */
+ attr_data |= (u32)(*string) << (8 * (len % 4));
+ len++;
+
+ /* test for NULL and then increment */
+ } while (*(string++));
+
+ /* write last piece of data to message */
+ attr[(len + 3) / 4] = attr_data;
+
+ /* record attribute header, update message length */
+ len <<= FM10K_TLV_LEN_SHIFT;
+ attr[0] = len | attr_id;
+
+ /* add header length to length */
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += FM10K_TLV_LEN_ALIGN(len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_get_null_string - Get null terminated string from attribute
+ * @attr: Pointer to attribute
+ * @string: Pointer to location of destination string
+ *
+ * This function pulls the string back out of the attribute and will place
+ * it in the array pointed by by string. It will return success if provided
+ * with a valid pointers.
+ **/
+static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string)
+{
+ u32 len;
+
+ DEBUGFUNC("fm10k_tlv_attr_get_null_string");
+
+ /* verify pointers are not NULL */
+ if (!string || !attr)
+ return FM10K_ERR_PARAM;
+
+ len = *attr >> FM10K_TLV_LEN_SHIFT;
+ attr++;
+
+ while (len--)
+ string[len] = (u8)(attr[len / 4] >> (8 * (len % 4)));
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_mac_vlan - Store MAC/VLAN attribute in message
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ * @mac_addr: MAC address to be stored
+ *
+ * This function will reorder a MAC address to be CPU endian and store it
+ * in the attribute buffer. It will return success if provided with a
+ * valid pointers.
+ **/
+s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id,
+ const u8 *mac_addr, u16 vlan)
+{
+ u32 len = ETH_ALEN << FM10K_TLV_LEN_SHIFT;
+ u32 *attr;
+
+ DEBUGFUNC("fm10k_tlv_attr_put_mac_vlan");
+
+ /* verify pointers are not NULL */
+ if (!msg || !mac_addr)
+ return FM10K_ERR_PARAM;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ /* record attribute header, update message length */
+ attr[0] = len | attr_id;
+
+ /* copy value into local variable and then write to msg */
+ attr[1] = FM10K_LE32_TO_CPU(*(const __le32 *)&mac_addr[0]);
+ attr[2] = FM10K_LE16_TO_CPU(*(const __le16 *)&mac_addr[4]);
+ attr[2] |= (u32)vlan << 16;
+
+ /* add header length to length */
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += FM10K_TLV_LEN_ALIGN(len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_get_mac_vlan - Get MAC/VLAN stored in attribute
+ * @attr: Pointer to attribute
+ * @attr_id: Attribute ID
+ * @mac_addr: location of buffer to store MAC address
+ *
+ * This function pulls the MAC address back out of the attribute and will
+ * place it in the array pointed by by mac_addr. It will return success
+ * if provided with a valid pointers.
+ **/
+s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan)
+{
+ DEBUGFUNC("fm10k_tlv_attr_get_mac_vlan");
+
+ /* verify pointers are not NULL */
+ if (!mac_addr || !attr)
+ return FM10K_ERR_PARAM;
+
+ *(__le32 *)&mac_addr[0] = FM10K_CPU_TO_LE32(attr[1]);
+ *(__le16 *)&mac_addr[4] = FM10K_CPU_TO_LE16((u16)(attr[2]));
+ *vlan = (u16)(attr[2] >> 16);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_bool - Add header indicating value "true"
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ *
+ * This function will simply add an attribute header, the fact
+ * that the header is here means the attribute value is true, else
+ * it is false. The function will return success if provided with a
+ * valid pointers.
+ **/
+s32 fm10k_tlv_attr_put_bool(u32 *msg, u16 attr_id)
+{
+ DEBUGFUNC("fm10k_tlv_attr_put_bool");
+
+ /* verify pointers are not NULL */
+ if (!msg)
+ return FM10K_ERR_PARAM;
+
+ /* record attribute header */
+ msg[FM10K_TLV_DWORD_LEN(*msg)] = attr_id;
+
+ /* add header length to length */
+ *msg += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_value - Store integer value attribute in message
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ * @value: Value to be written
+ * @len: Size of value
+ *
+ * This function will place an integer value of up to 8 bytes in size
+ * in a message attribute. The function will return success provided
+ * that msg is a valid pointer, and len is 1, 2, 4, or 8.
+ **/
+s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len)
+{
+ u32 *attr;
+
+ DEBUGFUNC("fm10k_tlv_attr_put_value");
+
+ /* verify non-null msg and len is 1, 2, 4, or 8 */
+ if (!msg || !len || len > 8 || (len & (len - 1)))
+ return FM10K_ERR_PARAM;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ if (len < 4) {
+ attr[1] = (u32)value & (BIT(8 * len) - 1);
+ } else {
+ attr[1] = (u32)value;
+ if (len > 4)
+ attr[2] = (u32)(value >> 32);
+ }
+
+ /* record attribute header, update message length */
+ len <<= FM10K_TLV_LEN_SHIFT;
+ attr[0] = len | attr_id;
+
+ /* add header length to length */
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += FM10K_TLV_LEN_ALIGN(len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_get_value - Get integer value stored in attribute
+ * @attr: Pointer to attribute
+ * @value: Pointer to destination buffer
+ * @len: Size of value
+ *
+ * This function will place an integer value of up to 8 bytes in size
+ * in the offset pointed to by value. The function will return success
+ * provided that pointers are valid and the len value matches the
+ * attribute length.
+ **/
+s32 fm10k_tlv_attr_get_value(u32 *attr, void *value, u32 len)
+{
+ DEBUGFUNC("fm10k_tlv_attr_get_value");
+
+ /* verify pointers are not NULL */
+ if (!attr || !value)
+ return FM10K_ERR_PARAM;
+
+ if ((*attr >> FM10K_TLV_LEN_SHIFT) != len)
+ return FM10K_ERR_PARAM;
+
+ if (len == 8)
+ *(u64 *)value = ((u64)attr[2] << 32) | attr[1];
+ else if (len == 4)
+ *(u32 *)value = attr[1];
+ else if (len == 2)
+ *(u16 *)value = (u16)attr[1];
+ else
+ *(u8 *)value = (u8)attr[1];
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_le_struct - Store little endian structure in message
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ * @le_struct: Pointer to structure to be written
+ * @len: Size of le_struct
+ *
+ * This function will place a little endian structure value in a message
+ * attribute. The function will return success provided that all pointers
+ * are valid and length is a non-zero multiple of 4.
+ **/
+s32 fm10k_tlv_attr_put_le_struct(u32 *msg, u16 attr_id,
+ const void *le_struct, u32 len)
+{
+ const __le32 *le32_ptr = (const __le32 *)le_struct;
+ u32 *attr;
+ u32 i;
+
+ DEBUGFUNC("fm10k_tlv_attr_put_le_struct");
+
+ /* verify non-null msg and len is in 32 bit words */
+ if (!msg || !len || (len % 4))
+ return FM10K_ERR_PARAM;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ /* copy le32 structure into host byte order at 32b boundaries */
+ for (i = 0; i < (len / 4); i++)
+ attr[i + 1] = FM10K_LE32_TO_CPU(le32_ptr[i]);
+
+ /* record attribute header, update message length */
+ len <<= FM10K_TLV_LEN_SHIFT;
+ attr[0] = len | attr_id;
+
+ /* add header length to length */
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += FM10K_TLV_LEN_ALIGN(len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_get_le_struct - Get little endian struct form attribute
+ * @attr: Pointer to attribute
+ * @le_struct: Pointer to structure to be written
+ * @len: Size of structure
+ *
+ * This function will place a little endian structure in the buffer
+ * pointed to by le_struct. The function will return success
+ * provided that pointers are valid and the len value matches the
+ * attribute length.
+ **/
+s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len)
+{
+ __le32 *le32_ptr = (__le32 *)le_struct;
+ u32 i;
+
+ DEBUGFUNC("fm10k_tlv_attr_get_le_struct");
+
+ /* verify pointers are not NULL */
+ if (!le_struct || !attr)
+ return FM10K_ERR_PARAM;
+
+ if ((*attr >> FM10K_TLV_LEN_SHIFT) != len)
+ return FM10K_ERR_PARAM;
+
+ attr++;
+
+ for (i = 0; len; i++, len -= 4)
+ le32_ptr[i] = FM10K_CPU_TO_LE32(attr[i]);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_nest_start - Start a set of nested attributes
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ *
+ * This function will mark off a new nested region for encapsulating
+ * a given set of attributes. The idea is if you wish to place a secondary
+ * structure within the message this mechanism allows for that. The
+ * function will return NULL on failure, and a pointer to the start
+ * of the nested attributes on success.
+ **/
+static u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id)
+{
+ u32 *attr;
+
+ DEBUGFUNC("fm10k_tlv_attr_nest_start");
+
+ /* verify pointer is not NULL */
+ if (!msg)
+ return NULL;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ attr[0] = attr_id;
+
+ /* return pointer to nest header */
+ return attr;
+}
+
+/**
+ * fm10k_tlv_attr_nest_stop - Stop a set of nested attributes
+ * @msg: Pointer to message block
+ *
+ * This function closes off an existing set of nested attributes. The
+ * message pointer should be pointing to the parent of the nest. So in
+ * the case of a nest within the nest this would be the outer nest pointer.
+ * This function will return success provided all pointers are valid.
+ **/
+static s32 fm10k_tlv_attr_nest_stop(u32 *msg)
+{
+ u32 *attr;
+ u32 len;
+
+ DEBUGFUNC("fm10k_tlv_attr_nest_stop");
+
+ /* verify pointer is not NULL */
+ if (!msg)
+ return FM10K_ERR_PARAM;
+
+ /* locate the nested header and retrieve its length */
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+ len = (attr[0] >> FM10K_TLV_LEN_SHIFT) << FM10K_TLV_LEN_SHIFT;
+
+ /* only include nest if data was added to it */
+ if (len) {
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += len;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_validate - Validate attribute metadata
+ * @attr: Pointer to attribute
+ * @tlv_attr: Type and length info for attribute
+ *
+ * This function does some basic validation of the input TLV. It
+ * verifies the length, and in the case of null terminated strings
+ * it verifies that the last byte is null. The function will
+ * return FM10K_ERR_PARAM if any attribute is malformed, otherwise
+ * it returns 0.
+ **/
+STATIC s32 fm10k_tlv_attr_validate(u32 *attr,
+ const struct fm10k_tlv_attr *tlv_attr)
+{
+ u32 attr_id = *attr & FM10K_TLV_ID_MASK;
+ u16 len = *attr >> FM10K_TLV_LEN_SHIFT;
+
+ DEBUGFUNC("fm10k_tlv_attr_validate");
+
+ /* verify this is an attribute and not a message */
+ if (*attr & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT))
+ return FM10K_ERR_PARAM;
+
+ /* search through the list of attributes to find a matching ID */
+ while (tlv_attr->id < attr_id)
+ tlv_attr++;
+
+ /* if didn't find a match then we should exit */
+ if (tlv_attr->id != attr_id)
+ return FM10K_NOT_IMPLEMENTED;
+
+ /* move to start of attribute data */
+ attr++;
+
+ switch (tlv_attr->type) {
+ case FM10K_TLV_NULL_STRING:
+ if (!len ||
+ (attr[(len - 1) / 4] & (0xFF << (8 * ((len - 1) % 4)))))
+ return FM10K_ERR_PARAM;
+ if (len > tlv_attr->len)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_MAC_ADDR:
+ if (len != ETH_ALEN)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_BOOL:
+ if (len)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_UNSIGNED:
+ case FM10K_TLV_SIGNED:
+ if (len != tlv_attr->len)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_LE_STRUCT:
+ /* struct must be 4 byte aligned */
+ if ((len % 4) || len != tlv_attr->len)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_NESTED:
+ /* nested attributes must be 4 byte aligned */
+ if (len % 4)
+ return FM10K_ERR_PARAM;
+ break;
+ default:
+ /* attribute id is mapped to bad value */
+ return FM10K_ERR_PARAM;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_parse - Parses stream of attribute data
+ * @attr: Pointer to attribute list
+ * @results: Pointer array to store pointers to attributes
+ * @tlv_attr: Type and length info for attributes
+ *
+ * This function validates a stream of attributes and parses them
+ * up into an array of pointers stored in results. The function will
+ * return FM10K_ERR_PARAM on any input or message error,
+ * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
+ * and 0 on success. Any attributes not found in tlv_attr will be silently
+ * ignored.
+ **/
+static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
+ const struct fm10k_tlv_attr *tlv_attr)
+{
+ u32 i, attr_id, offset = 0;
+ s32 err = 0;
+ u16 len;
+
+ DEBUGFUNC("fm10k_tlv_attr_parse");
+
+ /* verify pointers are not NULL */
+ if (!attr || !results)
+ return FM10K_ERR_PARAM;
+
+ /* initialize results to NULL */
+ for (i = 0; i < FM10K_TLV_RESULTS_MAX; i++)
+ results[i] = NULL;
+
+ /* pull length from the message header */
+ len = *attr >> FM10K_TLV_LEN_SHIFT;
+
+ /* no attributes to parse if there is no length */
+ if (!len)
+ return FM10K_SUCCESS;
+
+ /* no attributes to parse, just raw data, message becomes attribute */
+ if (!tlv_attr) {
+ results[0] = attr;
+ return FM10K_SUCCESS;
+ }
+
+ /* move to start of attribute data */
+ attr++;
+
+ /* run through list parsing all attributes */
+ while (offset < len) {
+ attr_id = *attr & FM10K_TLV_ID_MASK;
+
+ if (attr_id >= FM10K_TLV_RESULTS_MAX)
+ return FM10K_NOT_IMPLEMENTED;
+
+ err = fm10k_tlv_attr_validate(attr, tlv_attr);
+ if (err == FM10K_NOT_IMPLEMENTED)
+ ; /* silently ignore non-implemented attributes */
+ else if (err)
+ return err;
+ else
+ results[attr_id] = attr;
+
+ /* update offset */
+ offset += FM10K_TLV_DWORD_LEN(*attr) * 4;
+
+ /* move to next attribute */
+ attr = &attr[FM10K_TLV_DWORD_LEN(*attr)];
+ }
+
+ /* we should find ourselves at the end of the list */
+ if (offset != len)
+ return FM10K_ERR_PARAM;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_msg_parse - Parses message header and calls function handler
+ * @hw: Pointer to hardware structure
+ * @msg: Pointer to message
+ * @mbx: Pointer to mailbox information structure
+ * @func: Function array containing list of message handling functions
+ *
+ * This function should be the first function called upon receiving a
+ * message. The handler will identify the message type and call the correct
+ * handler for the given message. It will return the value from the function
+ * call on a recognized message type, otherwise it will return
+ * FM10K_NOT_IMPLEMENTED on an unrecognized type.
+ **/
+s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
+ struct fm10k_mbx_info *mbx,
+ const struct fm10k_msg_data *data)
+{
+ u32 *results[FM10K_TLV_RESULTS_MAX];
+ u32 msg_id;
+ s32 err;
+
+ DEBUGFUNC("fm10k_tlv_msg_parse");
+
+ /* verify pointer is not NULL */
+ if (!msg || !data)
+ return FM10K_ERR_PARAM;
+
+ /* verify this is a message and not an attribute */
+ if (!(*msg & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT)))
+ return FM10K_ERR_PARAM;
+
+ /* grab message ID */
+ msg_id = *msg & FM10K_TLV_ID_MASK;
+
+ while (data->id < msg_id)
+ data++;
+
+ /* if we didn't find it then pass it up as an error */
+ if (data->id != msg_id) {
+ while (data->id != FM10K_TLV_ERROR)
+ data++;
+ }
+
+ /* parse the attributes into the results list */
+ err = fm10k_tlv_attr_parse(msg, results, data->attr);
+ if (err < 0)
+ return err;
+
+ return data->func(hw, results, mbx);
+}
+
+/**
+ * fm10k_tlv_msg_error - Default handler for unrecognized TLV message IDs
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Unused mailbox pointer
+ *
+ * This function is a default handler for unrecognized messages. At a
+ * a minimum it just indicates that the message requested was
+ * unimplemented.
+ **/
+s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ UNREFERENCED_3PARAMETER(hw, results, mbx);
+ DEBUGOUT1("Unknown message ID %u\n", **results & FM10K_TLV_ID_MASK);
+ return FM10K_NOT_IMPLEMENTED;
+}
+
+STATIC const unsigned char test_str[] = "fm10k";
+STATIC const unsigned char test_mac[ETH_ALEN] = { 0x12, 0x34, 0x56,
+ 0x78, 0x9a, 0xbc };
+STATIC const u16 test_vlan = 0x0FED;
+STATIC const u64 test_u64 = 0xfedcba9876543210ull;
+STATIC const u32 test_u32 = 0x87654321;
+STATIC const u16 test_u16 = 0x8765;
+STATIC const u8 test_u8 = 0x87;
+STATIC const s64 test_s64 = -0x123456789abcdef0ll;
+STATIC const s32 test_s32 = -0x1235678;
+STATIC const s16 test_s16 = -0x1234;
+STATIC const s8 test_s8 = -0x12;
+STATIC const __le32 test_le[2] = { FM10K_CPU_TO_LE32(0x12345678),
+ FM10K_CPU_TO_LE32(0x9abcdef0)};
+
+/* The message below is meant to be used as a test message to demonstrate
+ * how to use the TLV interface and to test the types. Normally this code
+ * be compiled out by stripping the code wrapped in FM10K_TLV_TEST_MSG
+ */
+const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = {
+ FM10K_TLV_ATTR_NULL_STRING(FM10K_TEST_MSG_STRING, 80),
+ FM10K_TLV_ATTR_MAC_ADDR(FM10K_TEST_MSG_MAC_ADDR),
+ FM10K_TLV_ATTR_U8(FM10K_TEST_MSG_U8),
+ FM10K_TLV_ATTR_U16(FM10K_TEST_MSG_U16),
+ FM10K_TLV_ATTR_U32(FM10K_TEST_MSG_U32),
+ FM10K_TLV_ATTR_U64(FM10K_TEST_MSG_U64),
+ FM10K_TLV_ATTR_S8(FM10K_TEST_MSG_S8),
+ FM10K_TLV_ATTR_S16(FM10K_TEST_MSG_S16),
+ FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_S32),
+ FM10K_TLV_ATTR_S64(FM10K_TEST_MSG_S64),
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_TEST_MSG_LE_STRUCT, 8),
+ FM10K_TLV_ATTR_NESTED(FM10K_TEST_MSG_NESTED),
+ FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_RESULT),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_tlv_msg_test_generate_data - Stuff message with data
+ * @msg: Pointer to message
+ * @attr_flags: List of flags indicating what attributes to add
+ *
+ * This function is meant to load a message buffer with attribute data
+ **/
+STATIC void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags)
+{
+ DEBUGFUNC("fm10k_tlv_msg_test_generate_data");
+
+ if (attr_flags & BIT(FM10K_TEST_MSG_STRING))
+ fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING,
+ test_str);
+ if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR))
+ fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR,
+ test_mac, test_vlan);
+ if (attr_flags & BIT(FM10K_TEST_MSG_U8))
+ fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8);
+ if (attr_flags & BIT(FM10K_TEST_MSG_U16))
+ fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16);
+ if (attr_flags & BIT(FM10K_TEST_MSG_U32))
+ fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32);
+ if (attr_flags & BIT(FM10K_TEST_MSG_U64))
+ fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64);
+ if (attr_flags & BIT(FM10K_TEST_MSG_S8))
+ fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8);
+ if (attr_flags & BIT(FM10K_TEST_MSG_S16))
+ fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16);
+ if (attr_flags & BIT(FM10K_TEST_MSG_S32))
+ fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32);
+ if (attr_flags & BIT(FM10K_TEST_MSG_S64))
+ fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64);
+ if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT))
+ fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT,
+ test_le, 8);
+}
+
+/**
+ * fm10k_tlv_msg_test_create - Create a test message testing all attributes
+ * @msg: Pointer to message
+ * @attr_flags: List of flags indicating what attributes to add
+ *
+ * This function is meant to load a message buffer with all attribute types
+ * including a nested attribute.
+ **/
+void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags)
+{
+ u32 *nest = NULL;
+
+ DEBUGFUNC("fm10k_tlv_msg_test_create");
+
+ fm10k_tlv_msg_init(msg, FM10K_TLV_MSG_ID_TEST);
+
+ fm10k_tlv_msg_test_generate_data(msg, attr_flags);
+
+ /* check for nested attributes */
+ attr_flags >>= FM10K_TEST_MSG_NESTED;
+
+ if (attr_flags) {
+ nest = fm10k_tlv_attr_nest_start(msg, FM10K_TEST_MSG_NESTED);
+
+ fm10k_tlv_msg_test_generate_data(nest, attr_flags);
+
+ fm10k_tlv_attr_nest_stop(msg);
+ }
+}
+
+/**
+ * fm10k_tlv_msg_test - Validate all results on test message receive
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to attributes in the message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function does a check to verify all attributes match what the test
+ * message placed in the message buffer. It is the default handler
+ * for TLV test messages.
+ **/
+s32 fm10k_tlv_msg_test(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ u32 *nest_results[FM10K_TLV_RESULTS_MAX];
+ unsigned char result_str[80];
+ unsigned char result_mac[ETH_ALEN];
+ s32 err = FM10K_SUCCESS;
+ __le32 result_le[2];
+ u16 result_vlan;
+ u64 result_u64;
+ u32 result_u32;
+ u16 result_u16;
+ u8 result_u8;
+ s64 result_s64;
+ s32 result_s32;
+ s16 result_s16;
+ s8 result_s8;
+ u32 reply[3];
+
+ DEBUGFUNC("fm10k_tlv_msg_test");
+
+ /* retrieve results of a previous test */
+ if (!!results[FM10K_TEST_MSG_RESULT])
+ return fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_RESULT],
+ &mbx->test_result);
+
+parse_nested:
+ if (!!results[FM10K_TEST_MSG_STRING]) {
+ err = fm10k_tlv_attr_get_null_string(
+ results[FM10K_TEST_MSG_STRING],
+ result_str);
+ if (!err && memcmp(test_str, result_str, sizeof(test_str)))
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_MAC_ADDR]) {
+ err = fm10k_tlv_attr_get_mac_vlan(
+ results[FM10K_TEST_MSG_MAC_ADDR],
+ result_mac, &result_vlan);
+ if (!err && memcmp(test_mac, result_mac, ETH_ALEN))
+ err = FM10K_ERR_INVALID_VALUE;
+ if (!err && test_vlan != result_vlan)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_U8]) {
+ err = fm10k_tlv_attr_get_u8(results[FM10K_TEST_MSG_U8],
+ &result_u8);
+ if (!err && test_u8 != result_u8)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_U16]) {
+ err = fm10k_tlv_attr_get_u16(results[FM10K_TEST_MSG_U16],
+ &result_u16);
+ if (!err && test_u16 != result_u16)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_U32]) {
+ err = fm10k_tlv_attr_get_u32(results[FM10K_TEST_MSG_U32],
+ &result_u32);
+ if (!err && test_u32 != result_u32)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_U64]) {
+ err = fm10k_tlv_attr_get_u64(results[FM10K_TEST_MSG_U64],
+ &result_u64);
+ if (!err && test_u64 != result_u64)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_S8]) {
+ err = fm10k_tlv_attr_get_s8(results[FM10K_TEST_MSG_S8],
+ &result_s8);
+ if (!err && test_s8 != result_s8)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_S16]) {
+ err = fm10k_tlv_attr_get_s16(results[FM10K_TEST_MSG_S16],
+ &result_s16);
+ if (!err && test_s16 != result_s16)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_S32]) {
+ err = fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_S32],
+ &result_s32);
+ if (!err && test_s32 != result_s32)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_S64]) {
+ err = fm10k_tlv_attr_get_s64(results[FM10K_TEST_MSG_S64],
+ &result_s64);
+ if (!err && test_s64 != result_s64)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_LE_STRUCT]) {
+ err = fm10k_tlv_attr_get_le_struct(
+ results[FM10K_TEST_MSG_LE_STRUCT],
+ result_le,
+ sizeof(result_le));
+ if (!err && memcmp(test_le, result_le, sizeof(test_le)))
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+
+ if (!!results[FM10K_TEST_MSG_NESTED]) {
+ /* clear any pointers */
+ memset(nest_results, 0, sizeof(nest_results));
+
+ /* parse the nested attributes into the nest results list */
+ err = fm10k_tlv_attr_parse(results[FM10K_TEST_MSG_NESTED],
+ nest_results,
+ fm10k_tlv_msg_test_attr);
+ if (err)
+ goto report_result;
+
+ /* loop back through to the start */
+ results = nest_results;
+ goto parse_nested;
+ }
+
+report_result:
+ /* generate reply with test result */
+ fm10k_tlv_msg_init(reply, FM10K_TLV_MSG_ID_TEST);
+ fm10k_tlv_attr_put_s32(reply, FM10K_TEST_MSG_RESULT, err);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, reply);
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h
new file mode 100644
index 00000000..8f85fce3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h
@@ -0,0 +1,194 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_TLV_H_
+#define _FM10K_TLV_H_
+
+/* forward declaration */
+struct fm10k_msg_data;
+
+#include "fm10k_type.h"
+
+/* Message / Argument header format
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Length | Flags | Type / ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * The message header format described here is used for messages that are
+ * passed between the PF and the VF. To allow for messages larger then
+ * mailbox size we will provide a message with the above header and it
+ * will be segmented and transported to the mailbox to the other side where
+ * it is reassembled. It contains the following fields:
+ * Length: Length of the message in bytes excluding the message header
+ * Flags: TBD
+ * Type/ID: These will be the message/argument types we pass
+ */
+/* message data header */
+#define FM10K_TLV_ID_SHIFT 0
+#define FM10K_TLV_ID_SIZE 16
+#define FM10K_TLV_ID_MASK ((1u << FM10K_TLV_ID_SIZE) - 1)
+#define FM10K_TLV_FLAGS_SHIFT 16
+#define FM10K_TLV_FLAGS_MSG 0x1
+#define FM10K_TLV_FLAGS_SIZE 4
+#define FM10K_TLV_LEN_SHIFT 20
+#define FM10K_TLV_LEN_SIZE 12
+
+#define FM10K_TLV_HDR_LEN 4ul
+#define FM10K_TLV_LEN_ALIGN_MASK \
+ ((FM10K_TLV_HDR_LEN - 1) << FM10K_TLV_LEN_SHIFT)
+#define FM10K_TLV_LEN_ALIGN(tlv) \
+ (((tlv) + FM10K_TLV_LEN_ALIGN_MASK) & ~FM10K_TLV_LEN_ALIGN_MASK)
+#define FM10K_TLV_DWORD_LEN(tlv) \
+ ((u16)((FM10K_TLV_LEN_ALIGN(tlv)) >> (FM10K_TLV_LEN_SHIFT + 2)) + 1)
+
+#define FM10K_TLV_RESULTS_MAX 32
+
+enum fm10k_tlv_type {
+ FM10K_TLV_NULL_STRING,
+ FM10K_TLV_MAC_ADDR,
+ FM10K_TLV_BOOL,
+ FM10K_TLV_UNSIGNED,
+ FM10K_TLV_SIGNED,
+ FM10K_TLV_LE_STRUCT,
+ FM10K_TLV_NESTED,
+ FM10K_TLV_MAX_TYPE
+};
+
+#define FM10K_TLV_ERROR (~0u)
+
+struct fm10k_tlv_attr {
+ unsigned int id;
+ enum fm10k_tlv_type type;
+ u16 len;
+};
+
+#define FM10K_TLV_ATTR_NULL_STRING(id, len) { id, FM10K_TLV_NULL_STRING, len }
+#define FM10K_TLV_ATTR_MAC_ADDR(id) { id, FM10K_TLV_MAC_ADDR, 6 }
+#define FM10K_TLV_ATTR_BOOL(id) { id, FM10K_TLV_BOOL, 0 }
+#define FM10K_TLV_ATTR_U8(id) { id, FM10K_TLV_UNSIGNED, 1 }
+#define FM10K_TLV_ATTR_U16(id) { id, FM10K_TLV_UNSIGNED, 2 }
+#define FM10K_TLV_ATTR_U32(id) { id, FM10K_TLV_UNSIGNED, 4 }
+#define FM10K_TLV_ATTR_U64(id) { id, FM10K_TLV_UNSIGNED, 8 }
+#define FM10K_TLV_ATTR_S8(id) { id, FM10K_TLV_SIGNED, 1 }
+#define FM10K_TLV_ATTR_S16(id) { id, FM10K_TLV_SIGNED, 2 }
+#define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 }
+#define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 }
+#define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len }
+#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED }
+#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR }
+
+struct fm10k_msg_data {
+ unsigned int id;
+ const struct fm10k_tlv_attr *attr;
+ s32 (*func)(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+};
+
+#define FM10K_MSG_HANDLER(id, attr, func) { id, attr, func }
+
+s32 fm10k_tlv_msg_init(u32 *, u16);
+s32 fm10k_tlv_attr_put_mac_vlan(u32 *, u16, const u8 *, u16);
+s32 fm10k_tlv_attr_get_mac_vlan(u32 *, u8 *, u16 *);
+s32 fm10k_tlv_attr_put_bool(u32 *, u16);
+s32 fm10k_tlv_attr_put_value(u32 *, u16, s64, u32);
+#define fm10k_tlv_attr_put_u8(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 1)
+#define fm10k_tlv_attr_put_u16(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 2)
+#define fm10k_tlv_attr_put_u32(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 4)
+#define fm10k_tlv_attr_put_u64(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 8)
+#define fm10k_tlv_attr_put_s8(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 1)
+#define fm10k_tlv_attr_put_s16(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 2)
+#define fm10k_tlv_attr_put_s32(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 4)
+#define fm10k_tlv_attr_put_s64(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 8)
+s32 fm10k_tlv_attr_get_value(u32 *, void *, u32);
+#define fm10k_tlv_attr_get_u8(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(u8))
+#define fm10k_tlv_attr_get_u16(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(u16))
+#define fm10k_tlv_attr_get_u32(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(u32))
+#define fm10k_tlv_attr_get_u64(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(u64))
+#define fm10k_tlv_attr_get_s8(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(s8))
+#define fm10k_tlv_attr_get_s16(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(s16))
+#define fm10k_tlv_attr_get_s32(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(s32))
+#define fm10k_tlv_attr_get_s64(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(s64))
+s32 fm10k_tlv_attr_put_le_struct(u32 *, u16, const void *, u32);
+s32 fm10k_tlv_attr_get_le_struct(u32 *, void *, u32);
+s32 fm10k_tlv_msg_parse(struct fm10k_hw *, u32 *, struct fm10k_mbx_info *,
+ const struct fm10k_msg_data *);
+s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *);
+
+#define FM10K_TLV_MSG_ID_TEST 0
+
+enum fm10k_tlv_test_attr_id {
+ FM10K_TEST_MSG_UNSET,
+ FM10K_TEST_MSG_STRING,
+ FM10K_TEST_MSG_MAC_ADDR,
+ FM10K_TEST_MSG_U8,
+ FM10K_TEST_MSG_U16,
+ FM10K_TEST_MSG_U32,
+ FM10K_TEST_MSG_U64,
+ FM10K_TEST_MSG_S8,
+ FM10K_TEST_MSG_S16,
+ FM10K_TEST_MSG_S32,
+ FM10K_TEST_MSG_S64,
+ FM10K_TEST_MSG_LE_STRUCT,
+ FM10K_TEST_MSG_NESTED,
+ FM10K_TEST_MSG_RESULT,
+ FM10K_TEST_MSG_MAX
+};
+
+extern const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[];
+void fm10k_tlv_msg_test_create(u32 *, u32);
+s32 fm10k_tlv_msg_test(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+
+#define FM10K_TLV_MSG_TEST_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_TLV_MSG_ID_TEST, fm10k_tlv_msg_test_attr, func)
+#define FM10K_TLV_MSG_ERROR_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_TLV_ERROR, NULL, func)
+#endif /* _FM10K_MSG_H_ */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h
new file mode 100644
index 00000000..1f38a02c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h
@@ -0,0 +1,883 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_TYPE_H_
+#define _FM10K_TYPE_H_
+
+/* forward declaration */
+struct fm10k_hw;
+
+#include "fm10k_osdep.h"
+#include "fm10k_mbx.h"
+
+#define FM10K_INTEL_VENDOR_ID 0x8086
+#define FM10K_DEV_ID_PF 0x15A4
+#define FM10K_DEV_ID_VF 0x15A5
+#ifdef BOULDER_RAPIDS_HW
+#define FM10K_DEV_ID_SDI_FM10420_QDA2 0x15D0
+#endif /* BOULDER_RAPIDS_HW */
+#ifdef ATWOOD_CHANNEL_HW
+#define FM10K_DEV_ID_SDI_FM10420_DA2 0x15D5
+#endif /* ATWOOD_CHANNEL_HW */
+
+#ifndef LINUX_MACROS
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif
+#endif /* LINUX_MACROS */
+
+#define FM10K_MAX_QUEUES 256
+#define FM10K_MAX_QUEUES_PF 128
+#define FM10K_MAX_QUEUES_POOL 16
+
+#define FM10K_48_BIT_MASK 0x0000FFFFFFFFFFFFull
+#define FM10K_STAT_VALID 0x80000000
+
+/* PCI Bus Info */
+#define FM10K_PCIE_LINK_CAP 0x7C
+#define FM10K_PCIE_LINK_STATUS 0x82
+#define FM10K_PCIE_LINK_WIDTH 0x3F0
+#define FM10K_PCIE_LINK_WIDTH_1 0x10
+#define FM10K_PCIE_LINK_WIDTH_2 0x20
+#define FM10K_PCIE_LINK_WIDTH_4 0x40
+#define FM10K_PCIE_LINK_WIDTH_8 0x80
+#define FM10K_PCIE_LINK_SPEED 0xF
+#define FM10K_PCIE_LINK_SPEED_2500 0x1
+#define FM10K_PCIE_LINK_SPEED_5000 0x2
+#define FM10K_PCIE_LINK_SPEED_8000 0x3
+
+/* PCIe payload size */
+#define FM10K_PCIE_DEV_CAP 0x74
+#define FM10K_PCIE_DEV_CAP_PAYLOAD 0x07
+#define FM10K_PCIE_DEV_CAP_PAYLOAD_128 0x00
+#define FM10K_PCIE_DEV_CAP_PAYLOAD_256 0x01
+#define FM10K_PCIE_DEV_CAP_PAYLOAD_512 0x02
+#define FM10K_PCIE_DEV_CTRL 0x78
+#define FM10K_PCIE_DEV_CTRL_PAYLOAD 0xE0
+#define FM10K_PCIE_DEV_CTRL_PAYLOAD_128 0x00
+#define FM10K_PCIE_DEV_CTRL_PAYLOAD_256 0x20
+#define FM10K_PCIE_DEV_CTRL_PAYLOAD_512 0x40
+
+/* PCIe MSI-X Capability info */
+#define FM10K_PCI_MSIX_MSG_CTRL 0xB2
+#define FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK 0x7FF
+#define FM10K_MAX_MSIX_VECTORS 256
+#define FM10K_MAX_VECTORS_PF 256
+#define FM10K_MAX_VECTORS_POOL 32
+
+/* PCIe SR-IOV Info */
+#define FM10K_PCIE_SRIOV_CTRL 0x190
+#define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10
+
+#define FM10K_SUCCESS 0
+#define FM10K_ERR_DEVICE_NOT_SUPPORTED -1
+#define FM10K_ERR_PARAM -2
+#define FM10K_ERR_NO_RESOURCES -3
+#define FM10K_ERR_REQUESTS_PENDING -4
+#define FM10K_ERR_RESET_REQUESTED -5
+#define FM10K_ERR_DMA_PENDING -6
+#define FM10K_ERR_RESET_FAILED -7
+#define FM10K_ERR_INVALID_MAC_ADDR -8
+#define FM10K_ERR_INVALID_VALUE -9
+#define FM10K_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_1PARAMETER(_p) (_p)
+#define UNREFERENCED_2PARAMETER(_p, _q) do { (_p); (_q); } while (0)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { (_p); (_q); (_r); } while (0)
+
+/* Start of PF registers */
+#define FM10K_CTRL 0x0000
+#define FM10K_CTRL_BAR4_ALLOWED 0x00000004
+
+#define FM10K_CTRL_EXT 0x0001
+#define FM10K_GCR 0x0003
+#define FM10K_GCR_EXT 0x0005
+
+/* Interrupt control registers */
+#define FM10K_EICR 0x0006
+#define FM10K_EICR_PCA_FAULT 0x00000001
+#define FM10K_EICR_THI_FAULT 0x00000004
+#define FM10K_EICR_FUM_FAULT 0x00000020
+#define FM10K_EICR_FAULT_MASK 0x0000003F
+#define FM10K_EICR_MAILBOX 0x00000040
+#define FM10K_EICR_SWITCHREADY 0x00000080
+#define FM10K_EICR_SWITCHNOTREADY 0x00000100
+#define FM10K_EICR_SWITCHINTERRUPT 0x00000200
+#define FM10K_EICR_SRAMERROR 0x00000400
+#define FM10K_EICR_VFLR 0x00000800
+#define FM10K_EICR_MAXHOLDTIME 0x00001000
+#define FM10K_EIMR 0x0007
+#define FM10K_EIMR_PCA_FAULT 0x00000001
+#define FM10K_EIMR_THI_FAULT 0x00000010
+#define FM10K_EIMR_FUM_FAULT 0x00000400
+#define FM10K_EIMR_MAILBOX 0x00001000
+#define FM10K_EIMR_SWITCHREADY 0x00004000
+#define FM10K_EIMR_SWITCHNOTREADY 0x00010000
+#define FM10K_EIMR_SWITCHINTERRUPT 0x00040000
+#define FM10K_EIMR_SRAMERROR 0x00100000
+#define FM10K_EIMR_VFLR 0x00400000
+#define FM10K_EIMR_MAXHOLDTIME 0x01000000
+#define FM10K_EIMR_ALL 0x55555555
+#define FM10K_EIMR_DISABLE(NAME) ((FM10K_EIMR_ ## NAME) << 0)
+#define FM10K_EIMR_ENABLE(NAME) ((FM10K_EIMR_ ## NAME) << 1)
+#define FM10K_FAULT_ADDR_LO 0x0
+#define FM10K_FAULT_ADDR_HI 0x1
+#define FM10K_FAULT_SPECINFO 0x2
+#define FM10K_FAULT_FUNC 0x3
+#define FM10K_FAULT_SIZE 0x4
+#define FM10K_FAULT_FUNC_VALID 0x00008000
+#define FM10K_FAULT_FUNC_PF 0x00004000
+#define FM10K_FAULT_FUNC_VF_MASK 0x00003F00
+#define FM10K_FAULT_FUNC_VF_SHIFT 8
+#define FM10K_FAULT_FUNC_TYPE_MASK 0x000000FF
+
+#define FM10K_PCA_FAULT 0x0008
+#define FM10K_THI_FAULT 0x0010
+#define FM10K_FUM_FAULT 0x001C
+
+/* Rx queue timeout indicator */
+#define FM10K_MAXHOLDQ(_n) ((_n) + 0x0020)
+
+/* Switch Manager info */
+#define FM10K_SM_AREA(_n) ((_n) + 0x0028)
+
+/* GLORT mapping registers */
+#define FM10K_DGLORTMAP(_n) ((_n) + 0x0030)
+#define FM10K_DGLORT_COUNT 8
+#define FM10K_DGLORTMAP_MASK_SHIFT 16
+#define FM10K_DGLORTMAP_ANY 0x00000000
+#define FM10K_DGLORTMAP_NONE 0x0000FFFF
+#define FM10K_DGLORTMAP_ZERO 0xFFFF0000
+#define FM10K_DGLORTDEC(_n) ((_n) + 0x0038)
+#define FM10K_DGLORTDEC_VSILENGTH_SHIFT 4
+#define FM10K_DGLORTDEC_VSIBASE_SHIFT 7
+#define FM10K_DGLORTDEC_PCLENGTH_SHIFT 14
+#define FM10K_DGLORTDEC_QBASE_SHIFT 16
+#define FM10K_DGLORTDEC_RSSLENGTH_SHIFT 24
+#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000
+#define FM10K_TUNNEL_CFG 0x0040
+#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16
+#define FM10K_TUNNEL_CFG_GENEVE 0x0041
+#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050)
+#define FM10K_SWPRI_MAX 16
+#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800)
+#define FM10K_RSSRK_SIZE 10
+#define FM10K_RSSRK_ENTRIES_PER_REG 4
+#define FM10K_RETA(_n, _m) (((_n) * 0x20) + (_m) + 0x1000)
+#define FM10K_RETA_SIZE 32
+#define FM10K_RETA_ENTRIES_PER_REG 4
+#define FM10K_MAX_RSS_INDICES 128
+
+/* Rate limiting registers */
+#define FM10K_TC_CREDIT(_n) ((_n) + 0x2000)
+#define FM10K_TC_CREDIT_CREDIT_MASK 0x001FFFFF
+#define FM10K_TC_MAXCREDIT(_n) ((_n) + 0x2040)
+#define FM10K_TC_MAXCREDIT_64K 0x00010000
+#define FM10K_TC_RATE(_n) ((_n) + 0x2080)
+#define FM10K_TC_RATE_QUANTA_MASK 0x0000FFFF
+#define FM10K_TC_RATE_INTERVAL_4US_GEN1 0x00020000
+#define FM10K_TC_RATE_INTERVAL_4US_GEN2 0x00040000
+#define FM10K_TC_RATE_INTERVAL_4US_GEN3 0x00080000
+
+/* DMA control registers */
+#define FM10K_DMA_CTRL 0x20C3
+#define FM10K_DMA_CTRL_TX_ENABLE 0x00000001
+#define FM10K_DMA_CTRL_TX_ACTIVE 0x00000008
+#define FM10K_DMA_CTRL_RX_ENABLE 0x00000010
+#define FM10K_DMA_CTRL_RX_ACTIVE 0x00000080
+#define FM10K_DMA_CTRL_RX_DESC_SIZE 0x00000100
+#define FM10K_DMA_CTRL_MINMSS_SHIFT 9
+#define FM10K_DMA_CTRL_MINMSS_64 0x00008000
+#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3 0x04800000
+#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2 0x04000000
+#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1 0x03800000
+#define FM10K_DMA_CTRL_DATAPATH_RESET 0x20000000
+#define FM10K_DMA_CTRL_32_DESC 0x00000000
+
+#define FM10K_DMA_CTRL2 0x20C4
+#define FM10K_DMA_CTRL2_SWITCH_READY 0x00002000
+
+/* TSO flags configuration
+ * First packet contains all flags except for fin and psh
+ * Middle packet contains only urg and ack
+ * Last packet contains urg, ack, fin, and psh
+ */
+#define FM10K_TSO_FLAGS_LOW 0x00300FF6
+#define FM10K_TSO_FLAGS_HI 0x00000039
+#define FM10K_DTXTCPFLGL 0x20C5
+#define FM10K_DTXTCPFLGH 0x20C6
+
+#define FM10K_TPH_CTRL 0x20C7
+#define FM10K_MRQC(_n) ((_n) + 0x2100)
+#define FM10K_MRQC_TCP_IPV4 0x00000001
+#define FM10K_MRQC_IPV4 0x00000002
+#define FM10K_MRQC_IPV6 0x00000010
+#define FM10K_MRQC_TCP_IPV6 0x00000020
+#define FM10K_MRQC_UDP_IPV4 0x00000040
+#define FM10K_MRQC_UDP_IPV6 0x00000080
+
+#define FM10K_TQMAP(_n) ((_n) + 0x2800)
+#define FM10K_TQMAP_TABLE_SIZE 2048
+#define FM10K_RQMAP(_n) ((_n) + 0x3000)
+
+/* Hardware Statistics */
+#define FM10K_STATS_TIMEOUT 0x3800
+#define FM10K_STATS_UR 0x3801
+#define FM10K_STATS_CA 0x3802
+#define FM10K_STATS_UM 0x3803
+#define FM10K_STATS_XEC 0x3804
+#define FM10K_STATS_VLAN_DROP 0x3805
+#define FM10K_STATS_LOOPBACK_DROP 0x3806
+#define FM10K_STATS_NODESC_DROP 0x3807
+
+/* Timesync registers */
+#define FM10K_SYSTIME 0x3814
+#define FM10K_SYSTIME_CFG 0x3818
+#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F
+
+/* PCIe state registers */
+#define FM10K_PHYADDR 0x381C
+
+/* Rx ring registers */
+#define FM10K_RDBAL(_n) ((0x40 * (_n)) + 0x4000)
+#define FM10K_RDBAH(_n) ((0x40 * (_n)) + 0x4001)
+#define FM10K_RDLEN(_n) ((0x40 * (_n)) + 0x4002)
+#define FM10K_TPH_RXCTRL(_n) ((0x40 * (_n)) + 0x4003)
+#define FM10K_TPH_RXCTRL_DESC_TPHEN 0x00000020
+#define FM10K_TPH_RXCTRL_DESC_RROEN 0x00000200
+#define FM10K_TPH_RXCTRL_DATA_WROEN 0x00002000
+#define FM10K_TPH_RXCTRL_HDR_WROEN 0x00008000
+#define FM10K_RDH(_n) ((0x40 * (_n)) + 0x4004)
+#define FM10K_RDT(_n) ((0x40 * (_n)) + 0x4005)
+#define FM10K_RXQCTL(_n) ((0x40 * (_n)) + 0x4006)
+#define FM10K_RXQCTL_ENABLE 0x00000001
+#define FM10K_RXQCTL_PF 0x000000FC
+#define FM10K_RXQCTL_VF_SHIFT 2
+#define FM10K_RXQCTL_VF 0x00000100
+#define FM10K_RXQCTL_ID_MASK (FM10K_RXQCTL_PF | FM10K_RXQCTL_VF)
+#define FM10K_RXDCTL(_n) ((0x40 * (_n)) + 0x4007)
+#define FM10K_RXDCTL_WRITE_BACK_MIN_DELAY 0x00000001
+#define FM10K_RXDCTL_DROP_ON_EMPTY 0x00000200
+#define FM10K_RXINT(_n) ((0x40 * (_n)) + 0x4008)
+#define FM10K_RXINT_TIMER_SHIFT 8
+#define FM10K_SRRCTL(_n) ((0x40 * (_n)) + 0x4009)
+#define FM10K_SRRCTL_BSIZEPKT_SHIFT 8 /* shift _right_ */
+#define FM10K_SRRCTL_LOOPBACK_SUPPRESS 0x40000000
+#define FM10K_SRRCTL_BUFFER_CHAINING_EN 0x80000000
+
+/* Rx Statistics */
+#define FM10K_QPRC(_n) ((0x40 * (_n)) + 0x400A)
+#define FM10K_QPRDC(_n) ((0x40 * (_n)) + 0x400B)
+#define FM10K_QBRC_L(_n) ((0x40 * (_n)) + 0x400C)
+#define FM10K_QBRC_H(_n) ((0x40 * (_n)) + 0x400D)
+
+/* Rx GLORT register */
+#define FM10K_RX_SGLORT(_n) ((0x40 * (_n)) + 0x400E)
+
+/* Tx ring registers */
+#define FM10K_TDBAL(_n) ((0x40 * (_n)) + 0x8000)
+#define FM10K_TDBAH(_n) ((0x40 * (_n)) + 0x8001)
+#define FM10K_TDLEN(_n) ((0x40 * (_n)) + 0x8002)
+/* When fist initialized, VFs need to know the Interrupt Throttle Rate (ITR)
+ * scale which is based on the PCIe speed but the speed information in the PCI
+ * configuration space may not be accurate. The PF already knows the ITR scale
+ * but there is no defined method to pass that information from the PF to the
+ * VF. This is accomplished during VF initialization by temporarily co-opting
+ * the yet-to-be-used TDLEN register to have the PF store the ITR shift for
+ * the VF to retrieve before the VF needs to use the TDLEN register for its
+ * intended purpose, i.e. before the Tx resources are allocated.
+ */
+#define FM10K_TDLEN_ITR_SCALE_SHIFT 9
+#define FM10K_TDLEN_ITR_SCALE_MASK 0x00000E00
+#define FM10K_TDLEN_ITR_SCALE_GEN1 2
+#define FM10K_TDLEN_ITR_SCALE_GEN2 1
+#define FM10K_TDLEN_ITR_SCALE_GEN3 0
+#define FM10K_TPH_TXCTRL(_n) ((0x40 * (_n)) + 0x8003)
+#define FM10K_TPH_TXCTRL_DESC_TPHEN 0x00000020
+#define FM10K_TPH_TXCTRL_DESC_RROEN 0x00000200
+#define FM10K_TPH_TXCTRL_DESC_WROEN 0x00000800
+#define FM10K_TPH_TXCTRL_DATA_RROEN 0x00002000
+#define FM10K_TDH(_n) ((0x40 * (_n)) + 0x8004)
+#define FM10K_TDT(_n) ((0x40 * (_n)) + 0x8005)
+#define FM10K_TXDCTL(_n) ((0x40 * (_n)) + 0x8006)
+#define FM10K_TXDCTL_ENABLE 0x00004000
+#define FM10K_TXDCTL_MAX_TIME_SHIFT 16
+#define FM10K_TXQCTL(_n) ((0x40 * (_n)) + 0x8007)
+#define FM10K_TXQCTL_PF 0x0000003F
+#define FM10K_TXQCTL_VF 0x00000040
+#define FM10K_TXQCTL_ID_MASK (FM10K_TXQCTL_PF | FM10K_TXQCTL_VF)
+#define FM10K_TXQCTL_PC_SHIFT 7
+#define FM10K_TXQCTL_PC_MASK 0x00000380
+#define FM10K_TXQCTL_TC_SHIFT 10
+#define FM10K_TXQCTL_VID_SHIFT 16
+#define FM10K_TXQCTL_VID_MASK 0x0FFF0000
+#define FM10K_TXQCTL_UNLIMITED_BW 0x10000000
+#define FM10K_TXINT(_n) ((0x40 * (_n)) + 0x8008)
+#define FM10K_TXINT_TIMER_SHIFT 8
+
+/* Tx Statistics */
+#define FM10K_QPTC(_n) ((0x40 * (_n)) + 0x8009)
+#define FM10K_QBTC_L(_n) ((0x40 * (_n)) + 0x800A)
+#define FM10K_QBTC_H(_n) ((0x40 * (_n)) + 0x800B)
+
+/* Tx Push registers */
+#define FM10K_TQDLOC(_n) ((0x40 * (_n)) + 0x800C)
+#define FM10K_TQDLOC_BASE_32_DESC 0x08
+#define FM10K_TQDLOC_SIZE_32_DESC 0x00050000
+
+/* Tx GLORT registers */
+#define FM10K_TX_SGLORT(_n) ((0x40 * (_n)) + 0x800D)
+#define FM10K_PFVTCTL(_n) ((0x40 * (_n)) + 0x800E)
+#define FM10K_PFVTCTL_FTAG_DESC_ENABLE 0x00000001
+
+/* Interrupt moderation and control registers */
+#define FM10K_INT_MAP(_n) ((_n) + 0x10080)
+#define FM10K_INT_MAP_TIMER0 0x00000000
+#define FM10K_INT_MAP_TIMER1 0x00000100
+#define FM10K_INT_MAP_IMMEDIATE 0x00000200
+#define FM10K_INT_MAP_DISABLE 0x00000300
+#define FM10K_MSIX_VECTOR_MASK(_n) ((0x4 * (_n)) + 0x11003)
+#define FM10K_INT_CTRL 0x12000
+#define FM10K_INT_CTRL_ENABLEMODERATOR 0x00000400
+#define FM10K_ITR(_n) ((_n) + 0x12400)
+#define FM10K_ITR_INTERVAL1_SHIFT 12
+#define FM10K_ITR_PENDING2 0x10000000
+#define FM10K_ITR_AUTOMASK 0x20000000
+#define FM10K_ITR_MASK_SET 0x40000000
+#define FM10K_ITR_MASK_CLEAR 0x80000000
+#define FM10K_ITR2(_n) ((0x2 * (_n)) + 0x12800)
+#define FM10K_ITR_REG_COUNT 768
+#define FM10K_ITR_REG_COUNT_PF 256
+
+/* Switch manager interrupt registers */
+#define FM10K_IP 0x13000
+#define FM10K_IP_NOTINRESET 0x00000100
+#define FM10K_SRAM_IP 0x13003
+
+/* VLAN registers */
+#define FM10K_VLAN_TABLE(_n, _m) ((0x80 * (_n)) + (_m) + 0x14000)
+#define FM10K_VLAN_TABLE_SIZE 128
+
+/* VLAN specific message offsets */
+#define FM10K_VLAN_TABLE_VID_MAX 4096
+#define FM10K_VLAN_TABLE_VSI_MAX 64
+#define FM10K_VLAN_LENGTH_SHIFT 16
+#define FM10K_VLAN_CLEAR BIT(15)
+#define FM10K_VLAN_OVERRIDE FM10K_VLAN_CLEAR
+#define FM10K_VLAN_ALL \
+ ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
+
+/* VF FLR event notification registers */
+#define FM10K_PFVFLRE(_n) ((0x1 * (_n)) + 0x18844)
+#define FM10K_PFVFLREC(_n) ((0x1 * (_n)) + 0x18846)
+
+/* Defines for size of uncacheable and write-combining memories */
+#define FM10K_UC_ADDR_START 0x000000 /* start of standard regs */
+#define FM10K_WC_ADDR_START 0x100000 /* start of Tx Desc Cache */
+#define FM10K_DBI_ADDR_START 0x200000 /* start of debug registers */
+#define FM10K_UC_ADDR_SIZE (FM10K_WC_ADDR_START - FM10K_UC_ADDR_START)
+#define FM10K_WC_ADDR_SIZE (FM10K_DBI_ADDR_START - FM10K_WC_ADDR_START)
+
+/* Define timeouts for resets and disables */
+#define FM10K_QUEUE_DISABLE_TIMEOUT 100
+#define FM10K_RESET_TIMEOUT 150
+
+/* Maximum supported combined inner and outer header length for encapsulation */
+#define FM10K_TUNNEL_HEADER_LENGTH 184
+
+/* VF registers */
+#define FM10K_VFCTRL 0x00000
+#define FM10K_VFCTRL_RST 0x00000008
+#define FM10K_VFINT_MAP 0x00030
+#define FM10K_VFSYSTIME 0x00040
+#define FM10K_VFITR(_n) ((_n) + 0x00060)
+
+/* Registers contained in BAR 4 for Switch management */
+#define FM10K_SW_SYSTIME_ADJUST 0x0224D
+#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF
+#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000
+#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252)
+
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif /* ETH_ALEN */
+
+#ifndef IS_ZERO_ETHER_ADDR
+/* make certain address is not 0 */
+#define IS_ZERO_ETHER_ADDR(addr) \
+(!((addr)[0] | (addr)[1] | (addr)[2] | (addr)[3] | (addr)[4] | (addr)[5]))
+#endif
+
+#ifndef IS_MULTICAST_ETHER_ADDR
+#define IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1)
+#endif
+
+#ifndef IS_VALID_ETHER_ADDR
+/* make certain address is not multicast or 0 */
+#define IS_VALID_ETHER_ADDR(addr) \
+(!IS_MULTICAST_ETHER_ADDR(addr) && !IS_ZERO_ETHER_ADDR(addr))
+#endif
+
+enum fm10k_int_source {
+ fm10k_int_mailbox = 0,
+ fm10k_int_pcie_fault = 1,
+ fm10k_int_switch_up_down = 2,
+ fm10k_int_switch_event = 3,
+ fm10k_int_sram = 4,
+ fm10k_int_vflr = 5,
+ fm10k_int_max_hold_time = 6,
+ fm10k_int_sources_max_pf
+};
+
+/* PCIe bus speeds */
+enum fm10k_bus_speed {
+ fm10k_bus_speed_unknown = 0,
+ fm10k_bus_speed_2500 = 2500,
+ fm10k_bus_speed_5000 = 5000,
+ fm10k_bus_speed_8000 = 8000,
+ fm10k_bus_speed_reserved
+};
+
+/* PCIe bus widths */
+enum fm10k_bus_width {
+ fm10k_bus_width_unknown = 0,
+ fm10k_bus_width_pcie_x1 = 1,
+ fm10k_bus_width_pcie_x2 = 2,
+ fm10k_bus_width_pcie_x4 = 4,
+ fm10k_bus_width_pcie_x8 = 8,
+ fm10k_bus_width_reserved
+};
+
+/* PCIe payload sizes */
+enum fm10k_bus_payload {
+ fm10k_bus_payload_unknown = 0,
+ fm10k_bus_payload_128 = 1,
+ fm10k_bus_payload_256 = 2,
+ fm10k_bus_payload_512 = 3,
+ fm10k_bus_payload_reserved
+};
+
+/* Bus parameters */
+struct fm10k_bus_info {
+ enum fm10k_bus_speed speed;
+ enum fm10k_bus_width width;
+ enum fm10k_bus_payload payload;
+};
+
+/* Statistics related declarations */
+struct fm10k_hw_stat {
+ u64 count;
+ u32 base_l;
+ u32 base_h;
+};
+
+struct fm10k_hw_stats_q {
+ struct fm10k_hw_stat tx_bytes;
+ struct fm10k_hw_stat tx_packets;
+#define tx_stats_idx tx_packets.base_h
+ struct fm10k_hw_stat rx_bytes;
+ struct fm10k_hw_stat rx_packets;
+#define rx_stats_idx rx_packets.base_h
+ struct fm10k_hw_stat rx_drops;
+};
+
+struct fm10k_hw_stats {
+ struct fm10k_hw_stat timeout;
+#define stats_idx timeout.base_h
+ struct fm10k_hw_stat ur;
+ struct fm10k_hw_stat ca;
+ struct fm10k_hw_stat um;
+ struct fm10k_hw_stat xec;
+ struct fm10k_hw_stat vlan_drop;
+ struct fm10k_hw_stat loopback_drop;
+ struct fm10k_hw_stat nodesc_drop;
+ struct fm10k_hw_stats_q q[FM10K_MAX_QUEUES_PF];
+};
+
+/* Establish DGLORT feature priority */
+enum fm10k_dglortdec_idx {
+ fm10k_dglort_default = 0,
+ fm10k_dglort_vf_rsvd0 = 1,
+ fm10k_dglort_vf_rss = 2,
+ fm10k_dglort_pf_rsvd0 = 3,
+ fm10k_dglort_pf_queue = 4,
+ fm10k_dglort_pf_vsi = 5,
+ fm10k_dglort_pf_rsvd1 = 6,
+ fm10k_dglort_pf_rss = 7
+};
+
+struct fm10k_dglort_cfg {
+ u16 glort; /* GLORT base */
+ u16 queue_b; /* Base value for queue */
+ u8 vsi_b; /* Base value for VSI */
+ u8 idx; /* index of DGLORTDEC entry */
+ u8 rss_l; /* RSS indices */
+ u8 pc_l; /* Priority Class indices */
+ u8 vsi_l; /* Number of bits from GLORT used to determine VSI */
+ u8 queue_l; /* Number of bits from GLORT used to determine queue */
+ u8 shared_l; /* Ignored bits from GLORT resulting in shared VSI */
+ u8 inner_rss; /* Boolean value if inner header is used for RSS */
+};
+
+enum fm10k_pca_fault {
+ PCA_NO_FAULT,
+ PCA_UNMAPPED_ADDR,
+ PCA_BAD_QACCESS_PF,
+ PCA_BAD_QACCESS_VF,
+ PCA_MALICIOUS_REQ,
+ PCA_POISONED_TLP,
+ PCA_TLP_ABORT,
+ __PCA_MAX
+};
+
+enum fm10k_thi_fault {
+ THI_NO_FAULT,
+ THI_MAL_DIS_Q_FAULT,
+ __THI_MAX
+};
+
+enum fm10k_fum_fault {
+ FUM_NO_FAULT,
+ FUM_UNMAPPED_ADDR,
+ FUM_POISONED_TLP,
+ FUM_BAD_VF_QACCESS,
+ FUM_ADD_DECODE_ERR,
+ FUM_RO_ERROR,
+ FUM_QPRC_CRC_ERROR,
+ FUM_CSR_TIMEOUT,
+ FUM_INVALID_TYPE,
+ FUM_INVALID_LENGTH,
+ FUM_INVALID_BE,
+ FUM_INVALID_ALIGN,
+ __FUM_MAX
+};
+
+struct fm10k_fault {
+ u64 address; /* Address at the time fault was detected */
+ u32 specinfo; /* Extra info on this fault (fault dependent) */
+ u8 type; /* Fault value dependent on subunit */
+ u8 func; /* Function number of the fault */
+};
+
+struct fm10k_mac_ops {
+ /* basic bring-up and tear-down */
+ s32 (*reset_hw)(struct fm10k_hw *);
+ s32 (*init_hw)(struct fm10k_hw *);
+ s32 (*start_hw)(struct fm10k_hw *);
+ s32 (*stop_hw)(struct fm10k_hw *);
+ s32 (*get_bus_info)(struct fm10k_hw *);
+ s32 (*get_host_state)(struct fm10k_hw *, bool *);
+ s32 (*request_lport_map)(struct fm10k_hw *);
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+ bool (*is_slot_appropriate)(struct fm10k_hw *);
+#endif
+ s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool);
+ s32 (*read_mac_addr)(struct fm10k_hw *);
+ s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *,
+ u16, bool, u8);
+ s32 (*update_mc_addr)(struct fm10k_hw *, u16, const u8 *, u16, bool);
+ s32 (*update_xcast_mode)(struct fm10k_hw *, u16, u8);
+ void (*update_int_moderator)(struct fm10k_hw *);
+ s32 (*update_lport_state)(struct fm10k_hw *, u16, u16, bool);
+ void (*update_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *);
+ void (*rebind_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *);
+ s32 (*configure_dglort_map)(struct fm10k_hw *,
+ struct fm10k_dglort_cfg *);
+ void (*set_dma_mask)(struct fm10k_hw *, u64);
+ s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *);
+ s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb);
+ s32 (*notify_offset)(struct fm10k_hw *, u64 offset);
+ u64 (*read_systime)(struct fm10k_hw *);
+};
+
+enum fm10k_mac_type {
+ fm10k_mac_unknown = 0,
+ fm10k_mac_pf,
+ fm10k_mac_vf,
+ fm10k_num_macs
+};
+
+struct fm10k_mac_info {
+ struct fm10k_mac_ops ops;
+ enum fm10k_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u16 default_vid;
+ u16 max_msix_vectors;
+ u16 max_queues;
+ bool vlan_override;
+ bool get_host_state;
+ bool tx_ready;
+ u32 dglort_map;
+ u8 itr_scale;
+ u64 reset_while_pending;
+};
+
+struct fm10k_swapi_table_info {
+ u32 used;
+ u32 avail;
+};
+
+struct fm10k_swapi_info {
+ u32 status;
+ struct fm10k_swapi_table_info mac;
+ struct fm10k_swapi_table_info nexthop;
+ struct fm10k_swapi_table_info ffu;
+};
+
+enum fm10k_xcast_modes {
+ FM10K_XCAST_MODE_ALLMULTI = 0,
+ FM10K_XCAST_MODE_MULTI = 1,
+ FM10K_XCAST_MODE_PROMISC = 2,
+ FM10K_XCAST_MODE_NONE = 3,
+ FM10K_XCAST_MODE_DISABLE = 4
+};
+
+enum fm10k_timestamp_modes {
+ FM10K_TIMESTAMP_MODE_NONE = 0,
+ FM10K_TIMESTAMP_MODE_PEP_TO_PEP = 1,
+ FM10K_TIMESTAMP_MODE_PEP_TO_ANY = 2,
+};
+
+#define FM10K_VF_TC_MAX 100000 /* 100,000 Mb/s aka 100Gb/s */
+#define FM10K_VF_TC_MIN 1 /* 1 Mb/s is the slowest rate */
+
+struct fm10k_vf_info {
+ /* mbx must be first field in struct unless all default IOV message
+ * handlers are redone as the assumption is that vf_info starts
+ * at the same offset as the mailbox
+ */
+ struct fm10k_mbx_info mbx; /* PF side of VF mailbox */
+ int rate; /* Tx BW cap as defined by OS */
+ u16 glort; /* resource tag for this VF */
+ u16 sw_vid; /* Switch API assigned VLAN */
+ u16 pf_vid; /* PF assigned Default VLAN */
+ u8 mac[ETH_ALEN]; /* PF Default MAC address */
+ u8 vsi; /* VSI identifier */
+ u8 vf_idx; /* which VF this is */
+ u8 vf_flags; /* flags indicating what modes
+ * are supported for the port
+ */
+#ifndef NO_FM10K_VF_TRUSTED_MODE
+ bool trusted; /* VF trust mode */
+#endif
+};
+
+#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI))
+#define FM10K_VF_FLAG_MULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_MULTI))
+#define FM10K_VF_FLAG_PROMISC_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_PROMISC))
+#define FM10K_VF_FLAG_NONE_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_NONE))
+#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF)
+#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4)
+#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode))
+#define FM10K_VF_FLAG_SET_MODE_NONE \
+ FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_NONE)
+#define FM10K_VF_FLAG_MULTI_ENABLED \
+ (FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_ALLMULTI) | \
+ FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_MULTI) | \
+ FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_PROMISC))
+
+struct fm10k_iov_ops {
+ /* IOV related bring-up and tear-down */
+ s32 (*assign_resources)(struct fm10k_hw *, u16, u16);
+ s32 (*configure_tc)(struct fm10k_hw *, u16, int);
+ s32 (*assign_int_moderator)(struct fm10k_hw *, u16);
+ s32 (*assign_default_mac_vlan)(struct fm10k_hw *,
+ struct fm10k_vf_info *);
+ s32 (*reset_resources)(struct fm10k_hw *,
+ struct fm10k_vf_info *);
+ s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8);
+ void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *);
+ void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16);
+ void (*notify_offset)(struct fm10k_hw *, struct fm10k_vf_info*, u64);
+};
+
+struct fm10k_iov_info {
+ struct fm10k_iov_ops ops;
+ u16 total_vfs;
+ u16 num_vfs;
+ u16 num_pools;
+};
+
+struct fm10k_hw {
+ u32 *hw_addr;
+ u32 *sw_addr;
+ void *back;
+ struct fm10k_mac_info mac;
+ struct fm10k_bus_info bus;
+ struct fm10k_bus_info bus_caps;
+ struct fm10k_iov_info iov;
+ struct fm10k_mbx_info mbx;
+ struct fm10k_swapi_info swapi;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u32 flags;
+#define FM10K_HW_FLAG_CLOCK_OWNER BIT(0)
+};
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define FM10K_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define FM10K_REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Transmit Descriptor */
+struct fm10k_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 buflen; /* Length of data to be DMAed */
+ __le16 vlan; /* VLAN_ID and VPRI to be inserted in FTAG */
+ __le16 mss; /* MSS for segmentation offload */
+ u8 hdrlen; /* Header size for segmentation offload */
+ u8 flags; /* Status and offload request flags */
+};
+
+/* Transmit Descriptor Cache Structure */
+struct fm10k_tx_desc_cache {
+ struct fm10k_tx_desc tx_desc[256];
+};
+
+#define FM10K_TXD_FLAG_INT 0x01
+#define FM10K_TXD_FLAG_TIME 0x02
+#define FM10K_TXD_FLAG_CSUM 0x04
+#define FM10K_TXD_FLAG_FTAG 0x10
+#define FM10K_TXD_FLAG_RS 0x20
+#define FM10K_TXD_FLAG_LAST 0x40
+#define FM10K_TXD_FLAG_DONE 0x80
+
+
+/* These macros are meant to enable optimal placement of the RS and INT
+ * bits. It will point us to the last descriptor in the cache for either the
+ * start of the packet, or the end of the packet. If the index is actually
+ * at the start of the FIFO it will point to the offset for the last index
+ * in the FIFO to prevent an unnecessary write.
+ */
+#define FM10K_TXD_WB_FIFO_SIZE 4
+
+/* Receive Descriptor - 32B */
+union fm10k_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ __le64 reserved; /* Empty space, RSS hash */
+ __le64 timestamp;
+ } q; /* Read, Writeback, 64b quad-words */
+ struct {
+ __le32 data; /* RSS and header data */
+ __le32 rss; /* RSS Hash */
+ __le32 staterr;
+ __le32 vlan_len;
+ __le32 glort; /* sglort/dglort */
+ } d; /* Writeback, 32b double-words */
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen, xC */
+ __le16 rss_lower;
+ __le16 rss_upper;
+ __le16 status; /* status/error */
+ __le16 csum_err; /* checksum or extended error value */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ __le16 dglort;
+ __le16 sglort;
+ } w; /* Writeback, 16b words */
+};
+
+#define FM10K_RXD_RSSTYPE_MASK 0x000F
+enum fm10k_rdesc_rss_type {
+ FM10K_RSSTYPE_NONE = 0x0,
+ FM10K_RSSTYPE_IPV4_TCP = 0x1,
+ FM10K_RSSTYPE_IPV4 = 0x2,
+ FM10K_RSSTYPE_IPV6_TCP = 0x3,
+ /* Reserved 0x4 */
+ FM10K_RSSTYPE_IPV6 = 0x5,
+ /* Reserved 0x6 */
+ FM10K_RSSTYPE_IPV4_UDP = 0x7,
+ FM10K_RSSTYPE_IPV6_UDP = 0x8
+ /* Reserved 0x9 - 0xF */
+};
+
+#define FM10K_RXD_PKTTYPE_MASK 0x03F0
+#define FM10K_RXD_PKTTYPE_SHIFT 4
+enum fm10k_rdesc_pkt_type {
+ /* L3 type */
+ FM10K_PKTTYPE_OTHER = 0x00,
+ FM10K_PKTTYPE_IPV4 = 0x01,
+ FM10K_PKTTYPE_IPV4_EX = 0x02,
+ FM10K_PKTTYPE_IPV6 = 0x03,
+ FM10K_PKTTYPE_IPV6_EX = 0x04,
+
+ /* L4 type */
+ FM10K_PKTTYPE_TCP = 0x08,
+ FM10K_PKTTYPE_UDP = 0x10,
+ FM10K_PKTTYPE_GRE = 0x18,
+ FM10K_PKTTYPE_VXLAN = 0x20,
+ FM10K_PKTTYPE_NVGRE = 0x28,
+ FM10K_PKTTYPE_GENEVE = 0x30
+};
+
+#define FM10K_RXD_HDR_INFO_XC_MASK 0x0006
+enum fm10k_rxdesc_xc {
+ FM10K_XC_UNICAST = 0x0,
+ FM10K_XC_MULTICAST = 0x4,
+ FM10K_XC_BROADCAST = 0x6
+};
+
+
+#define FM10K_RXD_STATUS_DD 0x0001 /* Descriptor done */
+#define FM10K_RXD_STATUS_EOP 0x0002 /* End of packet */
+#define FM10K_RXD_STATUS_IPCS 0x0008 /* Indicates IPv4 csum */
+#define FM10K_RXD_STATUS_L4CS 0x0010 /* Indicates an L4 csum */
+#define FM10K_RXD_STATUS_L4CS2 0x0040 /* Inner header L4 csum */
+#define FM10K_RXD_STATUS_L4E2 0x0800 /* Inner header L4 csum err */
+#define FM10K_RXD_STATUS_IPE2 0x1000 /* Inner header IPv4 csum err */
+#define FM10K_RXD_STATUS_RXE 0x2000 /* Generic Rx error */
+#define FM10K_RXD_STATUS_L4E 0x4000 /* L4 csum error */
+#define FM10K_RXD_STATUS_IPE 0x8000 /* IPv4 csum error */
+
+#define FM10K_RXD_ERR_SWITCH_ERROR 0x0001 /* Switch found bad packet */
+#define FM10K_RXD_ERR_NO_DESCRIPTOR 0x0002 /* No descriptor available */
+#define FM10K_RXD_ERR_PP_ERROR 0x0004 /* RAM error during processing */
+#define FM10K_RXD_ERR_SWITCH_READY 0x0008 /* Link transition mid-packet */
+#define FM10K_RXD_ERR_TOO_BIG 0x0010 /* Pkt too big for single buf */
+
+
+struct fm10k_ftag {
+ __be16 swpri_type_user;
+ __be16 vlan;
+ __be16 sglort;
+ __be16 dglort;
+};
+
+#endif /* _FM10K_TYPE_H */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c
new file mode 100644
index 00000000..bd449773
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c
@@ -0,0 +1,675 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "fm10k_vf.h"
+
+/**
+ * fm10k_stop_hw_vf - Stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ **/
+STATIC s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
+{
+ u8 *perm_addr = hw->mac.perm_addr;
+ u32 bal = 0, bah = 0, tdlen;
+ s32 err;
+ u16 i;
+
+ DEBUGFUNC("fm10k_stop_hw_vf");
+
+ /* we need to disable the queues before taking further steps */
+ err = fm10k_stop_hw_generic(hw);
+ if (err && err != FM10K_ERR_REQUESTS_PENDING)
+ return err;
+
+ /* If permanent address is set then we need to restore it */
+ if (IS_VALID_ETHER_ADDR(perm_addr)) {
+ bal = (((u32)perm_addr[3]) << 24) |
+ (((u32)perm_addr[4]) << 16) |
+ (((u32)perm_addr[5]) << 8);
+ bah = (((u32)0xFF) << 24) |
+ (((u32)perm_addr[0]) << 16) |
+ (((u32)perm_addr[1]) << 8) |
+ ((u32)perm_addr[2]);
+ }
+
+ /* restore default itr_scale for next VF initialization */
+ tdlen = hw->mac.itr_scale << FM10K_TDLEN_ITR_SCALE_SHIFT;
+
+ /* The queues have already been disabled so we just need to
+ * update their base address registers
+ */
+ for (i = 0; i < hw->mac.max_queues; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(i), bal);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(i), bah);
+ FM10K_WRITE_REG(hw, FM10K_RDBAL(i), bal);
+ FM10K_WRITE_REG(hw, FM10K_RDBAH(i), bah);
+ /* Restore ITR scale in software-defined mechanism in TDLEN
+ * for next VF initialization. See definition of
+ * FM10K_TDLEN_ITR_SCALE_SHIFT for more details on the use of
+ * TDLEN here.
+ */
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(i), tdlen);
+ }
+
+ return err;
+}
+
+/**
+ * fm10k_reset_hw_vf - VF hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * This function should return the hardware to a state similar to the
+ * one it is in after just being initialized.
+ **/
+STATIC s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
+{
+ s32 err;
+
+ DEBUGFUNC("fm10k_reset_hw_vf");
+
+ /* shut down queues we own and reset DMA configuration */
+ err = fm10k_stop_hw_vf(hw);
+ if (err == FM10K_ERR_REQUESTS_PENDING)
+ hw->mac.reset_while_pending++;
+ else if (err)
+ return err;
+
+ /* Inititate VF reset */
+ FM10K_WRITE_REG(hw, FM10K_VFCTRL, FM10K_VFCTRL_RST);
+
+ /* Flush write and allow 100us for reset to complete */
+ FM10K_WRITE_FLUSH(hw);
+ usec_delay(FM10K_RESET_TIMEOUT);
+
+ /* Clear reset bit and verify it was cleared */
+ FM10K_WRITE_REG(hw, FM10K_VFCTRL, 0);
+ if (FM10K_READ_REG(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST)
+ return FM10K_ERR_RESET_FAILED;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_init_hw_vf - VF hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ **/
+STATIC s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
+{
+ u32 tqdloc, tqdloc0 = ~FM10K_READ_REG(hw, FM10K_TQDLOC(0));
+ s32 err;
+ u16 i;
+
+ DEBUGFUNC("fm10k_init_hw_vf");
+
+ /* verify we have at least 1 queue */
+ if (!~FM10K_READ_REG(hw, FM10K_TXQCTL(0)) ||
+ !~FM10K_READ_REG(hw, FM10K_RXQCTL(0))) {
+ err = FM10K_ERR_NO_RESOURCES;
+ goto reset_max_queues;
+ }
+
+ /* determine how many queues we have */
+ for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) {
+ /* verify the Descriptor cache offsets are increasing */
+ tqdloc = ~FM10K_READ_REG(hw, FM10K_TQDLOC(i));
+ if (!tqdloc || (tqdloc == tqdloc0))
+ break;
+
+ /* check to verify the PF doesn't own any of our queues */
+ if (!~FM10K_READ_REG(hw, FM10K_TXQCTL(i)) ||
+ !~FM10K_READ_REG(hw, FM10K_RXQCTL(i)))
+ break;
+ }
+
+ /* shut down queues we own and reset DMA configuration */
+ err = fm10k_disable_queues_generic(hw, i);
+ if (err)
+ goto reset_max_queues;
+
+ /* record maximum queue count */
+ hw->mac.max_queues = i;
+
+ /* fetch default VLAN and ITR scale */
+ hw->mac.default_vid = (FM10K_READ_REG(hw, FM10K_TXQCTL(0)) &
+ FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
+ /* Read the ITR scale from TDLEN. See the definition of
+ * FM10K_TDLEN_ITR_SCALE_SHIFT for more information about how TDLEN is
+ * used here.
+ */
+ hw->mac.itr_scale = (FM10K_READ_REG(hw, FM10K_TDLEN(0)) &
+ FM10K_TDLEN_ITR_SCALE_MASK) >>
+ FM10K_TDLEN_ITR_SCALE_SHIFT;
+
+ return FM10K_SUCCESS;
+
+reset_max_queues:
+ hw->mac.max_queues = 0;
+
+ return err;
+}
+
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+/**
+ * fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU
+ * @hw: pointer to hardware structure
+ *
+ * Looks at the PCIe bus info to confirm whether or not this slot can support
+ * the necessary bandwidth for this device. Since the VF has no control over
+ * the "slot" it is in, always indicate that the slot is appropriate.
+ **/
+STATIC bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ DEBUGFUNC("fm10k_is_slot_appropriate_vf");
+
+ return TRUE;
+}
+
+#endif
+/* This structure defines the attibutes to be parsed below */
+const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = {
+ FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN),
+ FM10K_TLV_ATTR_BOOL(FM10K_MAC_VLAN_MSG_SET),
+ FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MAC),
+ FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_DEFAULT_MAC),
+ FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MULTICAST),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_update_vlan_vf - Update status of VLAN ID in VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vid: VLAN ID to add to table
+ * @vsi: Reserved, should always be 0
+ * @set: Indicates if this is a set or clear operation
+ *
+ * This function adds or removes the corresponding VLAN ID from the VLAN
+ * filter table for this VF.
+ **/
+STATIC s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[4];
+
+ /* verify the index is not set */
+ if (vsi)
+ return FM10K_ERR_PARAM;
+
+ /* clever trick to verify reserved bits in both vid and length */
+ if ((vid << 16 | vid) >> 28)
+ return FM10K_ERR_PARAM;
+
+ /* encode set bit into the VLAN ID */
+ if (!set)
+ vid |= FM10K_VLAN_CLEAR;
+
+ /* generate VLAN request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
+ fm10k_tlv_attr_put_u32(msg, FM10K_MAC_VLAN_MSG_VLAN, vid);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_msg_mac_vlan_vf - Read device MAC address from mailbox message
+ * @hw: pointer to the HW structure
+ * @results: Attributes for message
+ * @mbx: unused mailbox data
+ *
+ * This function should determine the MAC address for the VF
+ **/
+s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ u8 perm_addr[ETH_ALEN];
+ u16 vid;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_mac_vlan_vf");
+
+ /* record MAC address requested */
+ err = fm10k_tlv_attr_get_mac_vlan(
+ results[FM10K_MAC_VLAN_MSG_DEFAULT_MAC],
+ perm_addr, &vid);
+ if (err)
+ return err;
+
+ memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
+ hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
+ hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_read_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * This function should determine the MAC address for the VF
+ **/
+STATIC s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
+{
+ u8 perm_addr[ETH_ALEN];
+ u32 base_addr;
+
+ DEBUGFUNC("fm10k_read_mac_addr_vf");
+
+ base_addr = FM10K_READ_REG(hw, FM10K_TDBAL(0));
+
+ /* last byte should be 0 */
+ if (base_addr << 24)
+ return FM10K_ERR_INVALID_MAC_ADDR;
+
+ perm_addr[3] = (u8)(base_addr >> 24);
+ perm_addr[4] = (u8)(base_addr >> 16);
+ perm_addr[5] = (u8)(base_addr >> 8);
+
+ base_addr = FM10K_READ_REG(hw, FM10K_TDBAH(0));
+
+ /* first byte should be all 1's */
+ if ((~base_addr) >> 24)
+ return FM10K_ERR_INVALID_MAC_ADDR;
+
+ perm_addr[0] = (u8)(base_addr >> 16);
+ perm_addr[1] = (u8)(base_addr >> 8);
+ perm_addr[2] = (u8)(base_addr);
+
+ memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
+ memcpy(hw->mac.addr, perm_addr, ETH_ALEN);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_update_uc_addr_vf - Update device unicast addresses
+ * @hw: pointer to the HW structure
+ * @glort: unused
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ * @flags: flags field to indicate add and secure - unused
+ *
+ * This function is used to add or remove unicast MAC addresses for
+ * the VF.
+ **/
+STATIC s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add, u8 flags)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[7];
+
+ DEBUGFUNC("fm10k_update_uc_addr_vf");
+
+ UNREFERENCED_2PARAMETER(glort, flags);
+
+ /* verify VLAN ID is valid */
+ if (vid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* verify MAC address is valid */
+ if (!IS_VALID_ETHER_ADDR(mac))
+ return FM10K_ERR_PARAM;
+
+ /* verify we are not locked down on the MAC address */
+ if (IS_VALID_ETHER_ADDR(hw->mac.perm_addr) &&
+ memcmp(hw->mac.perm_addr, mac, ETH_ALEN))
+ return FM10K_ERR_PARAM;
+
+ /* add bit to notify us if this is a set or clear operation */
+ if (!add)
+ vid |= FM10K_VLAN_CLEAR;
+
+ /* generate VLAN request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
+ fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MAC, mac, vid);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_mc_addr_vf - Update device multicast addresses
+ * @hw: pointer to the HW structure
+ * @glort: unused
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ *
+ * This function is used to add or remove multicast MAC addresses for
+ * the VF.
+ **/
+STATIC s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[7];
+
+ DEBUGFUNC("fm10k_update_uc_addr_vf");
+
+ UNREFERENCED_1PARAMETER(glort);
+
+ /* verify VLAN ID is valid */
+ if (vid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* verify multicast address is valid */
+ if (!IS_MULTICAST_ETHER_ADDR(mac))
+ return FM10K_ERR_PARAM;
+
+ /* add bit to notify us if this is a set or clear operation */
+ if (!add)
+ vid |= FM10K_VLAN_CLEAR;
+
+ /* generate VLAN request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
+ fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MULTICAST,
+ mac, vid);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_int_moderator_vf - Request update of interrupt moderator list
+ * @hw: pointer to hardware structure
+ *
+ * This function will issue a request to the PF to rescan our MSI-X table
+ * and to update the interrupt moderator linked list.
+ **/
+STATIC void fm10k_update_int_moderator_vf(struct fm10k_hw *hw)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[1];
+
+ /* generate MSI-X request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MSIX);
+
+ /* load onto outgoing mailbox */
+ mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/* This structure defines the attibutes to be parsed below */
+const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = {
+ FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_DISABLE),
+ FM10K_TLV_ATTR_U8(FM10K_LPORT_STATE_MSG_XCAST_MODE),
+ FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_READY),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_msg_lport_state_vf - Message handler for lport_state message from PF
+ * @hw: Pointer to hardware structure
+ * @results: pointer array containing parsed data
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler is meant to capture the indication from the PF that we
+ * are ready to bring up the interface.
+ **/
+s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_lport_state_vf");
+
+ hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ?
+ FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_update_lport_state_vf - Update device state in lower device
+ * @hw: pointer to the HW structure
+ * @glort: unused
+ * @count: number of logical ports to enable - unused (always 1)
+ * @enable: boolean value indicating if this is an enable or disable request
+ *
+ * Notify the lower device of a state change. If the lower device is
+ * enabled we can add filters, if it is disabled all filters for this
+ * logical port are flushed.
+ **/
+STATIC s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
+ u16 count, bool enable)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[2];
+
+ UNREFERENCED_2PARAMETER(glort, count);
+ DEBUGFUNC("fm10k_update_lport_state_vf");
+
+ /* reset glort mask 0 as we have to wait to be enabled */
+ hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
+
+ /* generate port state request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
+ if (!enable)
+ fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_DISABLE);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_xcast_mode_vf - Request update of multicast mode
+ * @hw: pointer to hardware structure
+ * @glort: unused
+ * @mode: integer value indicating mode being requested
+ *
+ * This function will attempt to request a higher mode for the port
+ * so that it can enable either multicast, multicast promiscuous, or
+ * promiscuous mode of operation.
+ **/
+STATIC s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[3];
+
+ UNREFERENCED_1PARAMETER(glort);
+ DEBUGFUNC("fm10k_update_xcast_mode_vf");
+
+ if (mode > FM10K_XCAST_MODE_NONE)
+ return FM10K_ERR_PARAM;
+
+ /* generate message requesting to change xcast mode */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
+ fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = {
+ FM10K_TLV_ATTR_U64(FM10K_1588_MSG_CLK_OFFSET),
+ FM10K_TLV_ATTR_LAST
+};
+
+/* currently there is no shared 1588 message handler */
+
+/**
+ * fm10k_update_hw_stats_vf - Updates hardware related statistics of VF
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ *
+ * This function collects and aggregates per queue hardware statistics.
+ **/
+STATIC void fm10k_update_hw_stats_vf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats)
+{
+ DEBUGFUNC("fm10k_update_hw_stats_vf");
+
+ fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
+}
+
+/**
+ * fm10k_rebind_hw_stats_vf - Resets base for hardware statistics of VF
+ * @hw: pointer to hardware structure
+ * @stats: pointer to the stats structure to update
+ *
+ * This function resets the base for queue hardware statistics.
+ **/
+STATIC void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats)
+{
+ DEBUGFUNC("fm10k_rebind_hw_stats_vf");
+
+ /* Unbind Queue Statistics */
+ fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+
+ /* Reinitialize bases for all stats */
+ fm10k_update_hw_stats_vf(hw, stats);
+}
+
+/**
+ * fm10k_configure_dglort_map_vf - Configures GLORT entry and queues
+ * @hw: pointer to hardware structure
+ * @dglort: pointer to dglort configuration structure
+ *
+ * Reads the configuration structure contained in dglort_cfg and uses
+ * that information to then populate a DGLORTMAP/DEC entry and the queues
+ * to which it has been assigned.
+ **/
+STATIC s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
+ struct fm10k_dglort_cfg *dglort)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ DEBUGFUNC("fm10k_configure_dglort_map_vf");
+
+ /* verify the dglort pointer */
+ if (!dglort)
+ return FM10K_ERR_PARAM;
+
+ /* stub for now until we determine correct message for this */
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_adjust_systime_vf - Adjust systime frequency
+ * @hw: pointer to hardware structure
+ * @ppb: adjustment rate in parts per billion
+ *
+ * This function takes an adjustment rate in parts per billion and will
+ * verify that this value is 0 as the VF cannot support adjusting the
+ * systime clock.
+ *
+ * If the ppb value is non-zero the return is ERR_PARAM else success
+ **/
+STATIC s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ DEBUGFUNC("fm10k_adjust_systime_vf");
+
+ /* The VF cannot adjust the clock frequency, however it should
+ * already have a syntonic clock with whichever host interface is
+ * running as the master for the host interface clock domain so
+ * there should be not frequency adjustment necessary.
+ */
+ return ppb ? FM10K_ERR_PARAM : FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_read_systime_vf - Reads value of systime registers
+ * @hw: pointer to the hardware structure
+ *
+ * Function reads the content of 2 registers, combined to represent a 64 bit
+ * value measured in nanoseconds. In order to guarantee the value is accurate
+ * we check the 32 most significant bits both before and after reading the
+ * 32 least significant bits to verify they didn't change as we were reading
+ * the registers.
+ **/
+static u64 fm10k_read_systime_vf(struct fm10k_hw *hw)
+{
+ u32 systime_l, systime_h, systime_tmp;
+
+ systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
+
+ do {
+ systime_tmp = systime_h;
+ systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME);
+ systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
+ } while (systime_tmp != systime_h);
+
+ return ((u64)systime_h << 32) | systime_l;
+}
+
+static const struct fm10k_msg_data fm10k_msg_data_vf[] = {
+ FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
+ FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
+ FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+/**
+ * fm10k_init_ops_vf - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for VF.
+ * Does not touch the hardware.
+ **/
+s32 fm10k_init_ops_vf(struct fm10k_hw *hw)
+{
+ struct fm10k_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("fm10k_init_ops_vf");
+
+ fm10k_init_ops_generic(hw);
+
+ mac->ops.reset_hw = &fm10k_reset_hw_vf;
+ mac->ops.init_hw = &fm10k_init_hw_vf;
+ mac->ops.start_hw = &fm10k_start_hw_generic;
+ mac->ops.stop_hw = &fm10k_stop_hw_vf;
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+ mac->ops.is_slot_appropriate = &fm10k_is_slot_appropriate_vf;
+#endif
+ mac->ops.update_vlan = &fm10k_update_vlan_vf;
+ mac->ops.read_mac_addr = &fm10k_read_mac_addr_vf;
+ mac->ops.update_uc_addr = &fm10k_update_uc_addr_vf;
+ mac->ops.update_mc_addr = &fm10k_update_mc_addr_vf;
+ mac->ops.update_xcast_mode = &fm10k_update_xcast_mode_vf;
+ mac->ops.update_int_moderator = &fm10k_update_int_moderator_vf;
+ mac->ops.update_lport_state = &fm10k_update_lport_state_vf;
+ mac->ops.update_hw_stats = &fm10k_update_hw_stats_vf;
+ mac->ops.rebind_hw_stats = &fm10k_rebind_hw_stats_vf;
+ mac->ops.configure_dglort_map = &fm10k_configure_dglort_map_vf;
+ mac->ops.get_host_state = &fm10k_get_host_state_generic;
+ mac->ops.adjust_systime = &fm10k_adjust_systime_vf;
+ mac->ops.read_systime = &fm10k_read_systime_vf;
+
+ mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw);
+
+ return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0);
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h
new file mode 100644
index 00000000..116c56fc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h
@@ -0,0 +1,92 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_VF_H_
+#define _FM10K_VF_H_
+
+#include "fm10k_type.h"
+#include "fm10k_common.h"
+
+enum fm10k_vf_tlv_msg_id {
+ FM10K_VF_MSG_ID_TEST = 0, /* msg ID reserved for testing */
+ FM10K_VF_MSG_ID_MSIX,
+ FM10K_VF_MSG_ID_MAC_VLAN,
+ FM10K_VF_MSG_ID_LPORT_STATE,
+ FM10K_VF_MSG_ID_1588,
+ FM10K_VF_MSG_ID_MAX,
+};
+
+enum fm10k_tlv_mac_vlan_attr_id {
+ FM10K_MAC_VLAN_MSG_VLAN,
+ FM10K_MAC_VLAN_MSG_SET,
+ FM10K_MAC_VLAN_MSG_MAC,
+ FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
+ FM10K_MAC_VLAN_MSG_MULTICAST,
+ FM10K_MAC_VLAN_MSG_ID_MAX
+};
+
+enum fm10k_tlv_lport_state_attr_id {
+ FM10K_LPORT_STATE_MSG_DISABLE,
+ FM10K_LPORT_STATE_MSG_XCAST_MODE,
+ FM10K_LPORT_STATE_MSG_READY,
+ FM10K_LPORT_STATE_MSG_MAX
+};
+
+enum fm10k_tlv_1588_attr_id {
+ FM10K_1588_MSG_TIMESTAMP = 0, /* deprecated */
+ FM10K_1588_MSG_CLK_OFFSET,
+ FM10K_1588_MSG_MAX
+};
+
+#define FM10K_VF_MSG_MSIX_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func)
+
+s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[];
+#define FM10K_VF_MSG_MAC_VLAN_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MAC_VLAN, \
+ fm10k_mac_vlan_msg_attr, func)
+
+s32 fm10k_msg_lport_state_vf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[];
+#define FM10K_VF_MSG_LPORT_STATE_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \
+ fm10k_lport_state_msg_attr, func)
+
+extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
+#define FM10K_VF_MSG_1588_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func)
+
+s32 fm10k_init_ops_vf(struct fm10k_hw *hw);
+#endif /* _FM10K_VF_H */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/meson.build b/src/spdk/dpdk/drivers/net/fm10k/base/meson.build
new file mode 100644
index 00000000..a8fc5fa8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = [
+ 'fm10k_api.c',
+ 'fm10k_common.c',
+ 'fm10k_mbx.c',
+ 'fm10k_pf.c',
+ 'fm10k_tlv.c',
+ 'fm10k_vf.c'
+]
+
+error_cflags = ['-Wno-unused-parameter', '-Wno-unused-value',
+ '-Wno-strict-aliasing', '-Wno-format-extra-args',
+ '-Wno-unused-variable', '-Wno-missing-field-initializers'
+]
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('fm10k_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k.h b/src/spdk/dpdk/drivers/net/fm10k/fm10k.h
new file mode 100644
index 00000000..dc814855
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k.h
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2015 Intel Corporation
+ */
+
+#ifndef _FM10K_H_
+#define _FM10K_H_
+
+#include <stdint.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+#include "fm10k_logs.h"
+#include "base/fm10k_type.h"
+
+/* descriptor ring base addresses must be aligned to the following */
+#define FM10K_ALIGN_RX_DESC 128
+#define FM10K_ALIGN_TX_DESC 128
+
+/* The maximum packet size that FM10K supports */
+#define FM10K_MAX_PKT_SIZE (15 * 1024)
+
+/* Minimum size of RX buffer FM10K supported */
+#define FM10K_MIN_RX_BUF_SIZE 256
+
+/* The maximum of SRIOV VFs per port supported */
+#define FM10K_MAX_VF_NUM 64
+
+/* number of descriptors must be a multiple of the following */
+#define FM10K_MULT_RX_DESC FM10K_REQ_RX_DESCRIPTOR_MULTIPLE
+#define FM10K_MULT_TX_DESC FM10K_REQ_TX_DESCRIPTOR_MULTIPLE
+
+/* maximum size of descriptor rings */
+#define FM10K_MAX_RX_RING_SZ (512 * 1024)
+#define FM10K_MAX_TX_RING_SZ (512 * 1024)
+
+/* minimum and maximum number of descriptors in a ring */
+#define FM10K_MIN_RX_DESC 32
+#define FM10K_MIN_TX_DESC 32
+#define FM10K_MAX_RX_DESC (FM10K_MAX_RX_RING_SZ / sizeof(union fm10k_rx_desc))
+#define FM10K_MAX_TX_DESC (FM10K_MAX_TX_RING_SZ / sizeof(struct fm10k_tx_desc))
+
+#define FM10K_TX_MAX_SEG UINT8_MAX
+#define FM10K_TX_MAX_MTU_SEG UINT8_MAX
+
+/*
+ * byte aligment for HW RX data buffer
+ * Datasheet requires RX buffer addresses shall either be 512-byte aligned or
+ * be 8-byte aligned but without crossing host memory pages (4KB alignment
+ * boundaries). Satisfy first option.
+ */
+#define FM10K_RX_DATABUF_ALIGN 512
+
+/*
+ * threshold default, min, max, and divisor constraints
+ * the configured values must satisfy the following:
+ * MIN <= value <= MAX
+ * DIV % value == 0
+ */
+#define FM10K_RX_FREE_THRESH_DEFAULT(rxq) 32
+#define FM10K_RX_FREE_THRESH_MIN(rxq) 1
+#define FM10K_RX_FREE_THRESH_MAX(rxq) ((rxq)->nb_desc - 1)
+#define FM10K_RX_FREE_THRESH_DIV(rxq) ((rxq)->nb_desc)
+
+#define FM10K_TX_FREE_THRESH_DEFAULT(txq) 32
+#define FM10K_TX_FREE_THRESH_MIN(txq) 1
+#define FM10K_TX_FREE_THRESH_MAX(txq) ((txq)->nb_desc - 3)
+#define FM10K_TX_FREE_THRESH_DIV(txq) 0
+
+#define FM10K_DEFAULT_RX_PTHRESH 8
+#define FM10K_DEFAULT_RX_HTHRESH 8
+#define FM10K_DEFAULT_RX_WTHRESH 0
+
+#define FM10K_DEFAULT_TX_PTHRESH 32
+#define FM10K_DEFAULT_TX_HTHRESH 0
+#define FM10K_DEFAULT_TX_WTHRESH 0
+
+#define FM10K_TX_RS_THRESH_DEFAULT(txq) 32
+#define FM10K_TX_RS_THRESH_MIN(txq) 1
+#define FM10K_TX_RS_THRESH_MAX(txq) \
+ RTE_MIN(((txq)->nb_desc - 2), (txq)->free_thresh)
+#define FM10K_TX_RS_THRESH_DIV(txq) ((txq)->nb_desc)
+
+#define FM10K_VLAN_TAG_SIZE 4
+
+/* Maximum number of MAC addresses per PF/VF */
+#define FM10K_MAX_MACADDR_NUM 64
+
+#define FM10K_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t))
+#define FM10K_VFTA_SIZE (4096 / FM10K_UINT32_BIT_SIZE)
+
+/* vlan_id is a 12 bit number.
+ * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
+ * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
+ * The higher 7 bit val specifies VFTA array index.
+ */
+#define FM10K_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F))
+#define FM10K_VFTA_IDX(vlan_id) ((vlan_id) >> 5)
+
+#define RTE_FM10K_RXQ_REARM_THRESH 32
+#define RTE_FM10K_VPMD_TX_BURST 32
+#define RTE_FM10K_MAX_RX_BURST RTE_FM10K_RXQ_REARM_THRESH
+#define RTE_FM10K_TX_MAX_FREE_BUF_SZ 64
+#define RTE_FM10K_DESCS_PER_LOOP 4
+
+#define FM10K_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define FM10K_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+struct fm10k_macvlan_filter_info {
+ uint16_t vlan_num; /* Total VLAN number */
+ uint16_t mac_num; /* Total mac number */
+ uint16_t nb_queue_pools; /* Active queue pools number */
+ /* VMDQ ID for each MAC address */
+ uint8_t mac_vmdq_id[FM10K_MAX_MACADDR_NUM];
+ uint32_t vfta[FM10K_VFTA_SIZE]; /* VLAN bitmap */
+};
+
+struct fm10k_dev_info {
+ volatile uint32_t enable;
+ volatile uint32_t glort;
+ /* Protect the mailbox to avoid race condition */
+ rte_spinlock_t mbx_lock;
+ struct fm10k_macvlan_filter_info macvlan;
+ /* Flag to indicate if RX vector conditions satisfied */
+ bool rx_vec_allowed;
+ bool sm_down;
+};
+
+/*
+ * Structure to store private data for each driver instance.
+ */
+struct fm10k_adapter {
+ struct fm10k_hw hw;
+ struct fm10k_hw_stats stats;
+ struct fm10k_dev_info info;
+};
+
+#define FM10K_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct fm10k_adapter *)adapter)->hw)
+
+#define FM10K_DEV_PRIVATE_TO_STATS(adapter) \
+ (&((struct fm10k_adapter *)adapter)->stats)
+
+#define FM10K_DEV_PRIVATE_TO_INFO(adapter) \
+ (&((struct fm10k_adapter *)adapter)->info)
+
+#define FM10K_DEV_PRIVATE_TO_MBXLOCK(adapter) \
+ (&(((struct fm10k_adapter *)adapter)->info.mbx_lock))
+
+#define FM10K_DEV_PRIVATE_TO_MACVLAN(adapter) \
+ (&(((struct fm10k_adapter *)adapter)->info.macvlan))
+
+struct fm10k_rx_queue {
+ struct rte_mempool *mp;
+ struct rte_mbuf **sw_ring;
+ volatile union fm10k_rx_desc *hw_ring;
+ struct rte_mbuf *pkt_first_seg; /* First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /* Last segment of current packet. */
+ uint64_t hw_ring_phys_addr;
+ uint64_t mbuf_initializer; /* value to init mbufs */
+ /* need to alloc dummy mbuf, for wraparound when scanning hw ring */
+ struct rte_mbuf fake_mbuf;
+ uint16_t next_dd;
+ uint16_t next_alloc;
+ uint16_t next_trigger;
+ uint16_t alloc_thresh;
+ volatile uint32_t *tail_ptr;
+ uint16_t nb_desc;
+ /* Number of faked desc added at the tail for Vector RX function */
+ uint16_t nb_fake_desc;
+ uint16_t queue_id;
+ /* Below 2 fields only valid in case vPMD is applied. */
+ uint16_t rxrearm_nb; /* number of remaining to be re-armed */
+ uint16_t rxrearm_start; /* the idx we start the re-arming from */
+ uint16_t rx_using_sse; /* indicates that vector RX is in use */
+ uint16_t port_id;
+ uint8_t drop_en;
+ uint8_t rx_deferred_start; /* don't start this queue in dev start. */
+ uint16_t rx_ftag_en; /* indicates FTAG RX supported */
+ uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */
+};
+
+/*
+ * a FIFO is used to track which descriptors have their RS bit set for Tx
+ * queues which are configured to allow multiple descriptors per packet
+ */
+struct fifo {
+ uint16_t *list;
+ uint16_t *head;
+ uint16_t *tail;
+ uint16_t *endp;
+};
+
+struct fm10k_txq_ops;
+
+struct fm10k_tx_queue {
+ struct rte_mbuf **sw_ring;
+ struct fm10k_tx_desc *hw_ring;
+ uint64_t hw_ring_phys_addr;
+ struct fifo rs_tracker;
+ const struct fm10k_txq_ops *ops; /* txq ops */
+ uint16_t last_free;
+ uint16_t next_free;
+ uint16_t nb_free;
+ uint16_t nb_used;
+ uint16_t free_thresh;
+ uint16_t rs_thresh;
+ /* Below 2 fields only valid in case vPMD is applied. */
+ uint16_t next_rs; /* Next pos to set RS flag */
+ uint16_t next_dd; /* Next pos to check DD flag */
+ volatile uint32_t *tail_ptr;
+ uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */
+ uint16_t nb_desc;
+ uint16_t port_id;
+ uint8_t tx_deferred_start; /** don't start this queue in dev start. */
+ uint16_t queue_id;
+ uint16_t tx_ftag_en; /* indicates FTAG TX supported */
+};
+
+struct fm10k_txq_ops {
+ void (*reset)(struct fm10k_tx_queue *txq);
+};
+
+#define MBUF_DMA_ADDR(mb) \
+ ((uint64_t) ((mb)->buf_iova + (mb)->data_off))
+
+/* enforce 512B alignment on default Rx DMA addresses */
+#define MBUF_DMA_ADDR_DEFAULT(mb) \
+ ((uint64_t) RTE_ALIGN(((mb)->buf_iova + RTE_PKTMBUF_HEADROOM),\
+ FM10K_RX_DATABUF_ALIGN))
+
+static inline void fifo_reset(struct fifo *fifo, uint32_t len)
+{
+ fifo->head = fifo->tail = fifo->list;
+ fifo->endp = fifo->list + len;
+}
+
+static inline void fifo_insert(struct fifo *fifo, uint16_t val)
+{
+ *fifo->head = val;
+ if (++fifo->head == fifo->endp)
+ fifo->head = fifo->list;
+}
+
+/* do not worry about list being empty since we only check it once we know
+ * we have used enough descriptors to set the RS bit at least once */
+static inline uint16_t fifo_peek(struct fifo *fifo)
+{
+ return *fifo->tail;
+}
+
+static inline uint16_t fifo_remove(struct fifo *fifo)
+{
+ uint16_t val;
+ val = *fifo->tail;
+ if (++fifo->tail == fifo->endp)
+ fifo->tail = fifo->list;
+ return val;
+}
+
+static inline void
+fm10k_pktmbuf_reset(struct rte_mbuf *mb, uint16_t in_port)
+{
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->next = NULL;
+ mb->nb_segs = 1;
+
+ /* enforce 512B alignment on default Rx virtual addresses */
+ mb->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb->buf_addr +
+ RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
+ - (char *)mb->buf_addr);
+ mb->port = in_port;
+}
+
+/*
+ * Verify Rx packet buffer alignment is valid.
+ *
+ * Hardware requires specific alignment for Rx packet buffers. At
+ * least one of the following two conditions must be satisfied.
+ * 1. Address is 512B aligned
+ * 2. Address is 8B aligned and buffer does not cross 4K boundary.
+ *
+ * Return 1 if buffer alignment satisfies at least one condition,
+ * otherwise return 0.
+ *
+ * Note: Alignment is checked by the driver when the Rx queue is reset. It
+ * is assumed that if an entire descriptor ring can be filled with
+ * buffers containing valid alignment, then all buffers in that mempool
+ * have valid address alignment. It is the responsibility of the user
+ * to ensure all buffers have valid alignment, as it is the user who
+ * creates the mempool.
+ * Note: It is assumed the buffer needs only to store a maximum size Ethernet
+ * frame.
+ */
+static inline int
+fm10k_addr_alignment_valid(struct rte_mbuf *mb)
+{
+ uint64_t addr = MBUF_DMA_ADDR_DEFAULT(mb);
+ uint64_t boundary1, boundary2;
+
+ /* 512B aligned? */
+ if (RTE_ALIGN(addr, FM10K_RX_DATABUF_ALIGN) == addr)
+ return 1;
+
+ /* 8B aligned, and max Ethernet frame would not cross a 4KB boundary? */
+ if (RTE_ALIGN(addr, 8) == addr) {
+ boundary1 = RTE_ALIGN_FLOOR(addr, 4096);
+ boundary2 = RTE_ALIGN_FLOOR(addr + ETHER_MAX_VLAN_FRAME_LEN,
+ 4096);
+ if (boundary1 == boundary2)
+ return 1;
+ }
+
+ PMD_INIT_LOG(ERR, "Error: Invalid buffer alignment!");
+
+ return 0;
+}
+
+/* Rx and Tx prototypes */
+uint16_t fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t fm10k_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+int
+fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int
+fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+
+int
+fm10k_dev_tx_descriptor_status(void *rx_queue, uint16_t offset);
+
+
+uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t fm10k_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+int fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq);
+int fm10k_rx_vec_condition_check(struct rte_eth_dev *);
+void fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq);
+uint16_t fm10k_recv_pkts_vec(void *, struct rte_mbuf **, uint16_t);
+uint16_t fm10k_recv_scattered_pkts_vec(void *, struct rte_mbuf **,
+ uint16_t);
+uint16_t fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+void fm10k_txq_vec_setup(struct fm10k_tx_queue *txq);
+int fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c b/src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c
new file mode 100644
index 00000000..541a49b7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c
@@ -0,0 +1,3294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2016 Intel Corporation
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+#include <rte_dev.h>
+#include <rte_spinlock.h>
+#include <rte_kvargs.h>
+
+#include "fm10k.h"
+#include "base/fm10k_api.h"
+
+/* Default delay to acquire mailbox lock */
+#define FM10K_MBXLOCK_DELAY_US 20
+#define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
+
+#define MAIN_VSI_POOL_NUMBER 0
+
+/* Max try times to acquire switch status */
+#define MAX_QUERY_SWITCH_STATE_TIMES 10
+/* Wait interval to get switch status */
+#define WAIT_SWITCH_MSG_US 100000
+/* A period of quiescence for switch */
+#define FM10K_SWITCH_QUIESCE_US 100000
+/* Number of chars per uint32 type */
+#define CHARS_PER_UINT32 (sizeof(uint32_t))
+#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
+
+/* default 1:1 map from queue ID to interrupt vector ID */
+#define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
+
+/* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
+#define MAX_LPORT_NUM 128
+#define GLORT_FD_Q_BASE 0x40
+#define GLORT_PF_MASK 0xFFC0
+#define GLORT_FD_MASK GLORT_PF_MASK
+#define GLORT_FD_INDEX GLORT_FD_Q_BASE
+
+int fm10k_logtype_init;
+int fm10k_logtype_driver;
+
+static void fm10k_close_mbx_service(struct fm10k_hw *hw);
+static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static inline int fm10k_glort_valid(struct fm10k_hw *hw);
+static int
+fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
+static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
+ const u8 *mac, bool add, uint32_t pool);
+static void fm10k_tx_queue_release(void *queue);
+static void fm10k_rx_queue_release(void *queue);
+static void fm10k_set_rx_function(struct rte_eth_dev *dev);
+static void fm10k_set_tx_function(struct rte_eth_dev *dev);
+static int fm10k_check_ftag(struct rte_devargs *devargs);
+static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+
+static void fm10k_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+
+struct fm10k_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
+ {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
+ {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
+ {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
+ {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
+ {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
+ {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
+ {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
+ {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
+ nodesc_drop)},
+};
+
+#define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
+ sizeof(fm10k_hw_stats_strings[0]))
+
+struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
+ {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
+ {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
+ {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
+};
+
+#define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
+ sizeof(fm10k_hw_stats_rx_q_strings[0]))
+
+struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
+ {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
+ {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
+};
+
+#define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
+ sizeof(fm10k_hw_stats_tx_q_strings[0]))
+
+#define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
+ (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
+static int
+fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
+
+static void
+fm10k_mbx_initlock(struct fm10k_hw *hw)
+{
+ rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
+}
+
+static void
+fm10k_mbx_lock(struct fm10k_hw *hw)
+{
+ while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
+ rte_delay_us(FM10K_MBXLOCK_DELAY_US);
+}
+
+static void
+fm10k_mbx_unlock(struct fm10k_hw *hw)
+{
+ rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
+}
+
+/* Stubs needed for linkage when vPMD is disabled */
+int __attribute__((weak))
+fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
+{
+ return -1;
+}
+
+uint16_t __attribute__((weak))
+fm10k_recv_pkts_vec(
+ __rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+fm10k_recv_scattered_pkts_vec(
+ __rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
+
+{
+ return -1;
+}
+
+void __attribute__((weak))
+fm10k_rx_queue_release_mbufs_vec(
+ __rte_unused struct fm10k_rx_queue *rxq)
+{
+ return;
+}
+
+void __attribute__((weak))
+fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
+{
+ return;
+}
+
+int __attribute__((weak))
+fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
+{
+ return -1;
+}
+
+uint16_t __attribute__((weak))
+fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+/*
+ * reset queue to initial state, allocate software buffers used when starting
+ * device.
+ * return 0 on success
+ * return -ENOMEM if buffers cannot be allocated
+ * return -EINVAL if buffers do not satisfy alignment condition
+ */
+static inline int
+rx_queue_reset(struct fm10k_rx_queue *q)
+{
+ static const union fm10k_rx_desc zero = {{0} };
+ uint64_t dma_addr;
+ int i, diag;
+ PMD_INIT_FUNC_TRACE();
+
+ diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
+ if (diag != 0)
+ return -ENOMEM;
+
+ for (i = 0; i < q->nb_desc; ++i) {
+ fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
+ if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
+ rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
+ q->nb_desc);
+ return -EINVAL;
+ }
+ dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
+ q->hw_ring[i].q.pkt_addr = dma_addr;
+ q->hw_ring[i].q.hdr_addr = dma_addr;
+ }
+
+ /* initialize extra software ring entries. Space for these extra
+ * entries is always allocated.
+ */
+ memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
+ for (i = 0; i < q->nb_fake_desc; ++i) {
+ q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
+ q->hw_ring[q->nb_desc + i] = zero;
+ }
+
+ q->next_dd = 0;
+ q->next_alloc = 0;
+ q->next_trigger = q->alloc_thresh - 1;
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
+ q->rxrearm_start = 0;
+ q->rxrearm_nb = 0;
+
+ return 0;
+}
+
+/*
+ * clean queue, descriptor rings, free software buffers used when stopping
+ * device.
+ */
+static inline void
+rx_queue_clean(struct fm10k_rx_queue *q)
+{
+ union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
+ uint32_t i;
+ PMD_INIT_FUNC_TRACE();
+
+ /* zero descriptor rings */
+ for (i = 0; i < q->nb_desc; ++i)
+ q->hw_ring[i] = zero;
+
+ /* zero faked descriptors */
+ for (i = 0; i < q->nb_fake_desc; ++i)
+ q->hw_ring[q->nb_desc + i] = zero;
+
+ /* vPMD driver has a different way of releasing mbufs. */
+ if (q->rx_using_sse) {
+ fm10k_rx_queue_release_mbufs_vec(q);
+ return;
+ }
+
+ /* free software buffers */
+ for (i = 0; i < q->nb_desc; ++i) {
+ if (q->sw_ring[i]) {
+ rte_pktmbuf_free_seg(q->sw_ring[i]);
+ q->sw_ring[i] = NULL;
+ }
+ }
+}
+
+/*
+ * free all queue memory used when releasing the queue (i.e. configure)
+ */
+static inline void
+rx_queue_free(struct fm10k_rx_queue *q)
+{
+ PMD_INIT_FUNC_TRACE();
+ if (q) {
+ PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
+ rx_queue_clean(q);
+ if (q->sw_ring) {
+ rte_free(q->sw_ring);
+ q->sw_ring = NULL;
+ }
+ rte_free(q);
+ q = NULL;
+ }
+}
+
+/*
+ * disable RX queue, wait unitl HW finished necessary flush operation
+ */
+static inline int
+rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
+{
+ uint32_t reg, i;
+
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
+ reg & ~FM10K_RXQCTL_ENABLE);
+
+ /* Wait 100us at most */
+ for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
+ rte_delay_us(1);
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
+ if (!(reg & FM10K_RXQCTL_ENABLE))
+ break;
+ }
+
+ if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * reset queue to initial state, allocate software buffers used when starting
+ * device
+ */
+static inline void
+tx_queue_reset(struct fm10k_tx_queue *q)
+{
+ PMD_INIT_FUNC_TRACE();
+ q->last_free = 0;
+ q->next_free = 0;
+ q->nb_used = 0;
+ q->nb_free = q->nb_desc - 1;
+ fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
+ FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
+}
+
+/*
+ * clean queue, descriptor rings, free software buffers used when stopping
+ * device
+ */
+static inline void
+tx_queue_clean(struct fm10k_tx_queue *q)
+{
+ struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
+ uint32_t i;
+ PMD_INIT_FUNC_TRACE();
+
+ /* zero descriptor rings */
+ for (i = 0; i < q->nb_desc; ++i)
+ q->hw_ring[i] = zero;
+
+ /* free software buffers */
+ for (i = 0; i < q->nb_desc; ++i) {
+ if (q->sw_ring[i]) {
+ rte_pktmbuf_free_seg(q->sw_ring[i]);
+ q->sw_ring[i] = NULL;
+ }
+ }
+}
+
+/*
+ * free all queue memory used when releasing the queue (i.e. configure)
+ */
+static inline void
+tx_queue_free(struct fm10k_tx_queue *q)
+{
+ PMD_INIT_FUNC_TRACE();
+ if (q) {
+ PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
+ tx_queue_clean(q);
+ if (q->rs_tracker.list) {
+ rte_free(q->rs_tracker.list);
+ q->rs_tracker.list = NULL;
+ }
+ if (q->sw_ring) {
+ rte_free(q->sw_ring);
+ q->sw_ring = NULL;
+ }
+ rte_free(q);
+ q = NULL;
+ }
+}
+
+/*
+ * disable TX queue, wait unitl HW finished necessary flush operation
+ */
+static inline int
+tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
+{
+ uint32_t reg, i;
+
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
+ reg & ~FM10K_TXDCTL_ENABLE);
+
+ /* Wait 100us at most */
+ for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
+ rte_delay_us(1);
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
+ if (!(reg & FM10K_TXDCTL_ENABLE))
+ break;
+ }
+
+ if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
+ return -1;
+
+ return 0;
+}
+
+static int
+fm10k_check_mq_mode(struct rte_eth_dev *dev)
+{
+ enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+
+ vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ PMD_INIT_LOG(ERR, "DCB mode is not supported.");
+ return -EINVAL;
+ }
+
+ if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+ return 0;
+
+ if (hw->mac.type == fm10k_mac_vf) {
+ PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
+ return -EINVAL;
+ }
+
+ /* Check VMDQ queue pool number */
+ if (vmdq_conf->nb_queue_pools >
+ sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
+ vmdq_conf->nb_queue_pools > nb_rx_q) {
+ PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
+ vmdq_conf->nb_queue_pools);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct fm10k_txq_ops def_txq_ops = {
+ .reset = tx_queue_reset,
+};
+
+static int
+fm10k_dev_configure(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
+
+ /* multipe queue mode checking */
+ ret = fm10k_check_mq_mode(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
+ ret);
+ return ret;
+ }
+
+ dev->data->scattered_rx = 0;
+
+ return 0;
+}
+
+/* fls = find last set bit = 32 minus the number of leading zeros */
+#ifndef fls
+#define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
+#endif
+
+static void
+fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ uint32_t i;
+
+ vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
+ if (!vmdq_conf->pool_map[i].pools)
+ continue;
+ fm10k_mbx_lock(hw);
+ fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
+ fm10k_mbx_unlock(hw);
+ }
+}
+
+static void
+fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Add default mac address */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+}
+
+static void
+fm10k_dev_rss_configure(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ uint32_t mrqc, *key, i, reta, j;
+ uint64_t hf;
+
+#define RSS_KEY_SIZE 40
+ static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+ };
+
+ if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
+ dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
+ FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
+ return;
+ }
+
+ /* random key is rss_intel_key (default) or user provided (rss_key) */
+ if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
+ key = (uint32_t *)rss_intel_key;
+ else
+ key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
+
+ /* Now fill our hash function seeds, 4 bytes at a time */
+ for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
+ FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
+
+ /*
+ * Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ reta = 0;
+ for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ reta = (reta << CHAR_BIT) | j;
+ if ((i & 3) == 3)
+ FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
+ rte_bswap32(reta));
+ }
+
+ /*
+ * Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+ */
+ hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
+ mrqc = 0;
+ mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
+
+ if (mrqc == 0) {
+ PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
+ "supported", hf);
+ return;
+ }
+
+ FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
+}
+
+static void
+fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t i;
+
+ for (i = 0; i < nb_lport_new; i++) {
+ /* Set unicast mode by default. App can change
+ * to other mode in other API func.
+ */
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
+ FM10K_XCAST_MODE_NONE);
+ fm10k_mbx_unlock(hw);
+ }
+}
+
+static void
+fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct fm10k_macvlan_filter_info *macvlan;
+ uint16_t nb_queue_pools = 0; /* pool number in configuration */
+ uint16_t nb_lport_new;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ fm10k_dev_rss_configure(dev);
+
+ /* only PF supports VMDQ */
+ if (hw->mac.type != fm10k_mac_pf)
+ return;
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ nb_queue_pools = vmdq_conf->nb_queue_pools;
+
+ /* no pool number change, no need to update logic port and VLAN/MAC */
+ if (macvlan->nb_queue_pools == nb_queue_pools)
+ return;
+
+ nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
+ fm10k_dev_logic_port_update(dev, nb_lport_new);
+
+ /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
+ memset(dev->data->mac_addrs, 0,
+ ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
+ ether_addr_copy((const struct ether_addr *)hw->mac.addr,
+ &dev->data->mac_addrs[0]);
+ memset(macvlan, 0, sizeof(*macvlan));
+ macvlan->nb_queue_pools = nb_queue_pools;
+
+ if (nb_queue_pools)
+ fm10k_dev_vmdq_rx_configure(dev);
+ else
+ fm10k_dev_pf_main_vsi_reset(dev);
+}
+
+static int
+fm10k_dev_tx_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i, ret;
+ struct fm10k_tx_queue *txq;
+ uint64_t base_addr;
+ uint32_t size;
+
+ /* Disable TXINT to avoid possible interrupt */
+ for (i = 0; i < hw->mac.max_queues; i++)
+ FM10K_WRITE_REG(hw, FM10K_TXINT(i),
+ 3 << FM10K_TXINT_TIMER_SHIFT);
+
+ /* Setup TX queue */
+ for (i = 0; i < dev->data->nb_tx_queues; ++i) {
+ txq = dev->data->tx_queues[i];
+ base_addr = txq->hw_ring_phys_addr;
+ size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
+
+ /* disable queue to avoid issues while updating state */
+ ret = tx_queue_disable(hw, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
+ return -1;
+ }
+ /* Enable use of FTAG bit in TX descriptor, PFVTCTL
+ * register is read-only for VF.
+ */
+ if (fm10k_check_ftag(dev->device->devargs)) {
+ if (hw->mac.type == fm10k_mac_pf) {
+ FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
+ FM10K_PFVTCTL_FTAG_DESC_ENABLE);
+ PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
+ } else {
+ PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
+ return -ENOTSUP;
+ }
+ }
+
+ /* set location and size for descriptor ring */
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
+ base_addr & UINT64_LOWER_32BITS_MASK);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
+ base_addr >> (CHAR_BIT * sizeof(uint32_t)));
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
+
+ /* assign default SGLORT for each TX queue by PF */
+ if (hw->mac.type == fm10k_mac_pf)
+ FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
+ }
+
+ /* set up vector or scalar TX function as appropriate */
+ fm10k_set_tx_function(dev);
+
+ return 0;
+}
+
+static int
+fm10k_dev_rx_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_macvlan_filter_info *macvlan;
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+ int i, ret;
+ struct fm10k_rx_queue *rxq;
+ uint64_t base_addr;
+ uint32_t size;
+ uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
+ uint32_t logic_port = hw->mac.dglort_map;
+ uint16_t buf_size;
+ uint16_t queue_stride = 0;
+
+ /* enable RXINT for interrupt mode */
+ i = 0;
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (; i < dev->data->nb_rx_queues; i++) {
+ FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
+ if (hw->mac.type == fm10k_mac_pf)
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
+ FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ else
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
+ FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ }
+ }
+ /* Disable other RXINT to avoid possible interrupt */
+ for (; i < hw->mac.max_queues; i++)
+ FM10K_WRITE_REG(hw, FM10K_RXINT(i),
+ 3 << FM10K_RXINT_TIMER_SHIFT);
+
+ /* Setup RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ rxq = dev->data->rx_queues[i];
+ base_addr = rxq->hw_ring_phys_addr;
+ size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
+
+ /* disable queue to avoid issues while updating state */
+ ret = rx_queue_disable(hw, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
+ return -1;
+ }
+
+ /* Setup the Base and Length of the Rx Descriptor Ring */
+ FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
+ base_addr & UINT64_LOWER_32BITS_MASK);
+ FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
+ base_addr >> (CHAR_BIT * sizeof(uint32_t)));
+ FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
+
+ /* Configure the Rx buffer size for one buff without split */
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+ /* As RX buffer is aligned to 512B within mbuf, some bytes are
+ * reserved for this purpose, and the worst case could be 511B.
+ * But SRR reg assumes all buffers have the same size. In order
+ * to fill the gap, we'll have to consider the worst case and
+ * assume 512B is reserved. If we don't do so, it's possible
+ * for HW to overwrite data to next mbuf.
+ */
+ buf_size -= FM10K_RX_DATABUF_ALIGN;
+
+ FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
+ (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
+ FM10K_SRRCTL_LOOPBACK_SUPPRESS);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
+ rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
+ uint32_t reg;
+ dev->data->scattered_rx = 1;
+ reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
+ reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
+ FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
+ }
+
+ /* Enable drop on empty, it's RO for VF */
+ if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
+ rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
+
+ FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
+ FM10K_WRITE_FLUSH(hw);
+ }
+
+ /* Configure VMDQ/RSS if applicable */
+ fm10k_dev_mq_rx_configure(dev);
+
+ /* Decide the best RX function */
+ fm10k_set_rx_function(dev);
+
+ /* update RX_SGLORT for loopback suppress*/
+ if (hw->mac.type != fm10k_mac_pf)
+ return 0;
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ if (macvlan->nb_queue_pools)
+ queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ if (i && queue_stride && !(i % queue_stride))
+ logic_port++;
+ FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
+ }
+
+ return 0;
+}
+
+static int
+fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int err;
+ uint32_t reg;
+ struct fm10k_rx_queue *rxq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ err = rx_queue_reset(rxq);
+ if (err == -ENOMEM) {
+ PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
+ return err;
+ } else if (err == -EINVAL) {
+ PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
+ " %d", err);
+ return err;
+ }
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled on real
+ * hardware, but BEFORE the queue is enabled when using the
+ * emulation platform. Do it in both places for now and remove
+ * this comment and the following two register writes when the
+ * emulation platform is no longer being used.
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+
+ /* Set PF ownership flag for PF devices */
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
+ if (hw->mac.type == fm10k_mac_pf)
+ reg |= FM10K_RXQCTL_PF;
+ reg |= FM10K_RXQCTL_ENABLE;
+ /* enable RX queue */
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
+ FM10K_WRITE_FLUSH(hw);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Disable RX queue */
+ rx_queue_disable(hw, rx_queue_id);
+
+ /* Free mbuf and clean HW ring */
+ rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /** @todo - this should be defined in the shared code */
+#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
+ uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
+ struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ q->ops->reset(q);
+
+ /* reset head and tail pointers */
+ FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
+
+ /* enable TX queue */
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
+ FM10K_TXDCTL_ENABLE | txdctl);
+ FM10K_WRITE_FLUSH(hw);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ tx_queue_disable(hw, tx_queue_id);
+ tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static inline int fm10k_glort_valid(struct fm10k_hw *hw)
+{
+ return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
+ != FM10K_DGLORTMAP_NONE);
+}
+
+static void
+fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if it didn't acquire valid glort range */
+ if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
+ return;
+
+ fm10k_mbx_lock(hw);
+ status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_PROMISC);
+ fm10k_mbx_unlock(hw);
+
+ if (status != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
+}
+
+static void
+fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t mode;
+ int status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if it didn't acquire valid glort range */
+ if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
+ return;
+
+ if (dev->data->all_multicast == 1)
+ mode = FM10K_XCAST_MODE_ALLMULTI;
+ else
+ mode = FM10K_XCAST_MODE_NONE;
+
+ fm10k_mbx_lock(hw);
+ status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ mode);
+ fm10k_mbx_unlock(hw);
+
+ if (status != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
+}
+
+static void
+fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if it didn't acquire valid glort range */
+ if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
+ return;
+
+ /* If promiscuous mode is enabled, it doesn't make sense to enable
+ * allmulticast and disable promiscuous since fm10k only can select
+ * one of the modes.
+ */
+ if (dev->data->promiscuous) {
+ PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
+ "needn't enable allmulticast");
+ return;
+ }
+
+ fm10k_mbx_lock(hw);
+ status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_ALLMULTI);
+ fm10k_mbx_unlock(hw);
+
+ if (status != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
+}
+
+static void
+fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if it didn't acquire valid glort range */
+ if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
+ return;
+
+ if (dev->data->promiscuous) {
+ PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
+ "since promisc mode is enabled");
+ return;
+ }
+
+ fm10k_mbx_lock(hw);
+ /* Change mode to unicast mode */
+ status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_NONE);
+ fm10k_mbx_unlock(hw);
+
+ if (status != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
+}
+
+static void
+fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
+ uint16_t nb_queue_pools;
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ nb_queue_pools = macvlan->nb_queue_pools;
+ pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
+ rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
+
+ /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */
+ dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
+ dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
+ hw->mac.dglort_map;
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
+ /* Configure VMDQ/RSS DGlort Decoder */
+ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
+
+ /* Flow Director configurations, only queue number is valid. */
+ dglortdec = fls(dev->data->nb_rx_queues - 1);
+ dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
+ (hw->mac.dglort_map + GLORT_FD_Q_BASE);
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
+ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
+
+ /* Invalidate all other GLORT entries */
+ for (i = 2; i < FM10K_DGLORT_COUNT; i++)
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
+ FM10K_DGLORTMAP_NONE);
+}
+
+#define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
+static int
+fm10k_dev_start(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i, diag;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* stop, init, then start the hw */
+ diag = fm10k_stop_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_init_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_start_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_dev_tx_init(dev);
+ if (diag) {
+ PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
+ return diag;
+ }
+
+ if (fm10k_dev_rxq_interrupt_setup(dev))
+ return -EIO;
+
+ diag = fm10k_dev_rx_init(dev);
+ if (diag) {
+ PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
+ return diag;
+ }
+
+ if (hw->mac.type == fm10k_mac_pf)
+ fm10k_dev_dglort_map_configure(dev);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct fm10k_rx_queue *rxq;
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq->rx_deferred_start)
+ continue;
+ diag = fm10k_dev_rx_queue_start(dev, i);
+ if (diag != 0) {
+ int j;
+ for (j = 0; j < i; ++j)
+ rx_queue_clean(dev->data->rx_queues[j]);
+ return diag;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct fm10k_tx_queue *txq;
+ txq = dev->data->tx_queues[i];
+
+ if (txq->tx_deferred_start)
+ continue;
+ diag = fm10k_dev_tx_queue_start(dev, i);
+ if (diag != 0) {
+ int j;
+ for (j = 0; j < i; ++j)
+ tx_queue_clean(dev->data->tx_queues[j]);
+ for (j = 0; j < dev->data->nb_rx_queues; ++j)
+ rx_queue_clean(dev->data->rx_queues[j]);
+ return diag;
+ }
+ }
+
+ /* Update default vlan when not in VMDQ mode */
+ if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
+
+ fm10k_link_update(dev, 0);
+
+ return 0;
+}
+
+static void
+fm10k_dev_stop(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queues)
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ fm10k_dev_tx_queue_stop(dev, i);
+
+ if (dev->data->rx_queues)
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ fm10k_dev_rx_queue_stop(dev, i);
+
+ /* Disable datapath event */
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ FM10K_WRITE_REG(hw, FM10K_RXINT(i),
+ 3 << FM10K_RXINT_TIMER_SHIFT);
+ if (hw->mac.type == fm10k_mac_pf)
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
+ FM10K_ITR_MASK_SET);
+ else
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
+ FM10K_ITR_MASK_SET);
+ }
+ }
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+}
+
+static void
+fm10k_dev_queue_release(struct rte_eth_dev *dev)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queues) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
+
+ tx_queue_free(txq);
+ }
+ }
+
+ if (dev->data->rx_queues) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ fm10k_rx_queue_release(dev->data->rx_queues[i]);
+ }
+}
+
+static void
+fm10k_dev_close(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
+ MAX_LPORT_NUM, false);
+ fm10k_mbx_unlock(hw);
+
+ /* allow 100ms for device to quiesce */
+ rte_delay_us(FM10K_SWITCH_QUIESCE_US);
+
+ /* Stop mailbox service first */
+ fm10k_close_mbx_service(hw);
+ fm10k_dev_stop(dev);
+ fm10k_dev_queue_release(dev);
+ fm10k_stop_hw(hw);
+}
+
+static int
+fm10k_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ PMD_INIT_FUNC_TRACE();
+
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G;
+ dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ dev->data->dev_link.link_status =
+ dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
+ dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+
+ return 0;
+}
+
+static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
+{
+ unsigned i, q;
+ unsigned count = 0;
+
+ if (xstats_names != NULL) {
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ /* Global stats */
+ for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", fm10k_hw_stats_strings[count].name);
+ count++;
+ }
+
+ /* PF queue stats */
+ for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
+ for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", q,
+ fm10k_hw_stats_rx_q_strings[i].name);
+ count++;
+ }
+ for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", q,
+ fm10k_hw_stats_tx_q_strings[i].name);
+ count++;
+ }
+ }
+ }
+ return FM10K_NB_XSTATS;
+}
+
+static int
+fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned n)
+{
+ struct fm10k_hw_stats *hw_stats =
+ FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ unsigned i, q, count = 0;
+
+ if (n < FM10K_NB_XSTATS)
+ return FM10K_NB_XSTATS;
+
+ /* Global stats */
+ for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ fm10k_hw_stats_strings[count].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
+ /* PF queue stats */
+ for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
+ for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)&hw_stats->q[q]) +
+ fm10k_hw_stats_rx_q_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)&hw_stats->q[q]) +
+ fm10k_hw_stats_tx_q_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ return FM10K_NB_XSTATS;
+}
+
+static int
+fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint64_t ipackets, opackets, ibytes, obytes;
+ struct fm10k_hw *hw =
+ FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_hw_stats *hw_stats =
+ FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fm10k_update_hw_stats(hw, hw_stats);
+
+ ipackets = opackets = ibytes = obytes = 0;
+ for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
+ (i < hw->mac.max_queues); ++i) {
+ stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
+ stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
+ stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
+ stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
+ ipackets += stats->q_ipackets[i];
+ opackets += stats->q_opackets[i];
+ ibytes += stats->q_ibytes[i];
+ obytes += stats->q_obytes[i];
+ }
+ stats->ipackets = ipackets;
+ stats->opackets = opackets;
+ stats->ibytes = ibytes;
+ stats->obytes = obytes;
+ return 0;
+}
+
+static void
+fm10k_stats_reset(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_hw_stats *hw_stats =
+ FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(hw_stats, 0, sizeof(*hw_stats));
+ fm10k_rebind_hw_stats(hw, hw_stats);
+}
+
+static void
+fm10k_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
+ dev_info->max_rx_queues = hw->mac.max_queues;
+ dev_info->max_tx_queues = hw->mac.max_queues;
+ dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
+ dev_info->max_hash_mac_addrs = 0;
+ dev_info->max_vfs = pdev->max_vfs;
+ dev_info->vmdq_pool_base = 0;
+ dev_info->vmdq_queue_base = 0;
+ dev_info->max_vmdq_pools = ETH_32_POOLS;
+ dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
+ dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
+
+ dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
+ dev_info->reta_size = FM10K_MAX_RSS_INDICES;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = FM10K_DEFAULT_RX_PTHRESH,
+ .hthresh = FM10K_DEFAULT_RX_HTHRESH,
+ .wthresh = FM10K_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = FM10K_DEFAULT_TX_PTHRESH,
+ .hthresh = FM10K_DEFAULT_TX_HTHRESH,
+ .wthresh = FM10K_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
+ .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = FM10K_MAX_RX_DESC,
+ .nb_min = FM10K_MIN_RX_DESC,
+ .nb_align = FM10K_MULT_RX_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = FM10K_MAX_TX_DESC,
+ .nb_min = FM10K_MIN_TX_DESC,
+ .nb_align = FM10K_MULT_TX_DESC,
+ .nb_seg_max = FM10K_TX_MAX_SEG,
+ .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
+ };
+
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
+ ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+}
+
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+static const uint32_t *
+fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ if (dev->rx_pkt_burst == fm10k_recv_pkts ||
+ dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
+ static uint32_t ptypes[] = {
+ /* refers to rx_desc_to_ol_flags() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes;
+ } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
+ dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
+ static uint32_t ptypes_vec[] = {
+ /* refers to fm10k_desc_to_pktype_v() */
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_GENEVE,
+ RTE_PTYPE_TUNNEL_NVGRE,
+ RTE_PTYPE_TUNNEL_VXLAN,
+ RTE_PTYPE_TUNNEL_GRE,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes_vec;
+ }
+
+ return NULL;
+}
+#else
+static const uint32_t *
+fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ return NULL;
+}
+#endif
+
+static int
+fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ s32 result;
+ uint16_t mac_num = 0;
+ uint32_t vid_idx, vid_bit, mac_index;
+ struct fm10k_hw *hw;
+ struct fm10k_macvlan_filter_info *macvlan;
+ struct rte_eth_dev_data *data = dev->data;
+
+ hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+
+ if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
+ PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
+ return -EINVAL;
+ }
+
+ if (vlan_id > ETH_VLAN_ID_MAX) {
+ PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
+ return -EINVAL;
+ }
+
+ vid_idx = FM10K_VFTA_IDX(vlan_id);
+ vid_bit = FM10K_VFTA_BIT(vlan_id);
+ /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
+ if (on && (macvlan->vfta[vid_idx] & vid_bit))
+ return 0;
+ /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
+ if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
+ PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
+ "in the VLAN filter table");
+ return -EINVAL;
+ }
+
+ fm10k_mbx_lock(hw);
+ result = fm10k_update_vlan(hw, vlan_id, 0, on);
+ fm10k_mbx_unlock(hw);
+ if (result != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
+ return -EIO;
+ }
+
+ for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
+ (result == FM10K_SUCCESS); mac_index++) {
+ if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
+ continue;
+ if (mac_num > macvlan->mac_num - 1) {
+ PMD_INIT_LOG(ERR, "MAC address number "
+ "not match");
+ break;
+ }
+ fm10k_mbx_lock(hw);
+ result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
+ data->mac_addrs[mac_index].addr_bytes,
+ vlan_id, on, 0);
+ fm10k_mbx_unlock(hw);
+ mac_num++;
+ }
+ if (result != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
+ return -EIO;
+ }
+
+ if (on) {
+ macvlan->vlan_num++;
+ macvlan->vfta[vid_idx] |= vid_bit;
+ } else {
+ macvlan->vlan_num--;
+ macvlan->vfta[vid_idx] &= ~vid_bit;
+ }
+ return 0;
+}
+
+static int
+fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP))
+ PMD_INIT_LOG(ERR, "VLAN stripping is "
+ "always on in fm10k");
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND)
+ PMD_INIT_LOG(ERR, "VLAN QinQ is not "
+ "supported in fm10k");
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER))
+ PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
+ }
+
+ return 0;
+}
+
+/* Add/Remove a MAC address, and update filters to main VSI */
+static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
+ const u8 *mac, bool add, uint32_t pool)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_macvlan_filter_info *macvlan;
+ uint32_t i, j, k;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+
+ if (pool != MAIN_VSI_POOL_NUMBER) {
+ PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
+ "mac to pool %u", pool);
+ return;
+ }
+ for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
+ if (!macvlan->vfta[j])
+ continue;
+ for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
+ if (!(macvlan->vfta[j] & (1 << k)))
+ continue;
+ if (i + 1 > macvlan->vlan_num) {
+ PMD_INIT_LOG(ERR, "vlan number not match");
+ return;
+ }
+ fm10k_mbx_lock(hw);
+ fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
+ j * FM10K_UINT32_BIT_SIZE + k, add, 0);
+ fm10k_mbx_unlock(hw);
+ i++;
+ }
+ }
+}
+
+/* Add/Remove a MAC address, and update filters to VMDQ */
+static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
+ const u8 *mac, bool add, uint32_t pool)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_macvlan_filter_info *macvlan;
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ uint32_t i;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ if (pool > macvlan->nb_queue_pools) {
+ PMD_DRV_LOG(ERR, "Pool number %u invalid."
+ " Max pool is %u",
+ pool, macvlan->nb_queue_pools);
+ return;
+ }
+ for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
+ if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
+ continue;
+ fm10k_mbx_lock(hw);
+ fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
+ vmdq_conf->pool_map[i].vlan_id, add, 0);
+ fm10k_mbx_unlock(hw);
+ }
+}
+
+/* Add/Remove a MAC address, and update filters */
+static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
+ const u8 *mac, bool add, uint32_t pool)
+{
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+
+ if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
+ fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
+ else
+ fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
+
+ if (add)
+ macvlan->mac_num++;
+ else
+ macvlan->mac_num--;
+}
+
+/* Add a MAC address, and update filters */
+static int
+fm10k_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool)
+{
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
+ macvlan->mac_vmdq_id[index] = pool;
+ return 0;
+}
+
+/* Remove a MAC address, and update filters */
+static void
+fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
+ FALSE, macvlan->mac_vmdq_id[index]);
+ macvlan->mac_vmdq_id[index] = 0;
+}
+
+static inline int
+check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
+{
+ if ((request < min) || (request > max) || ((request % mult) != 0))
+ return -1;
+ else
+ return 0;
+}
+
+
+static inline int
+check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
+{
+ if ((request < min) || (request > max) || ((div % request) != 0))
+ return -1;
+ else
+ return 0;
+}
+
+static inline int
+handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
+{
+ uint16_t rx_free_thresh;
+
+ if (conf->rx_free_thresh == 0)
+ rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
+ else
+ rx_free_thresh = conf->rx_free_thresh;
+
+ /* make sure the requested threshold satisfies the constraints */
+ if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
+ FM10K_RX_FREE_THRESH_MAX(q),
+ FM10K_RX_FREE_THRESH_DIV(q),
+ rx_free_thresh)) {
+ PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
+ "less than or equal to %u, "
+ "greater than or equal to %u, "
+ "and a divisor of %u",
+ rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
+ FM10K_RX_FREE_THRESH_MIN(q),
+ FM10K_RX_FREE_THRESH_DIV(q));
+ return -EINVAL;
+ }
+
+ q->alloc_thresh = rx_free_thresh;
+ q->drop_en = conf->rx_drop_en;
+ q->rx_deferred_start = conf->rx_deferred_start;
+
+ return 0;
+}
+
+/*
+ * Hardware requires specific alignment for Rx packet buffers. At
+ * least one of the following two conditions must be satisfied.
+ * 1. Address is 512B aligned
+ * 2. Address is 8B aligned and buffer does not cross 4K boundary.
+ *
+ * As such, the driver may need to adjust the DMA address within the
+ * buffer by up to 512B.
+ *
+ * return 1 if the element size is valid, otherwise return 0.
+ */
+static int
+mempool_element_size_valid(struct rte_mempool *mp)
+{
+ uint32_t min_size;
+
+ /* elt_size includes mbuf header and headroom */
+ min_size = mp->elt_size - sizeof(struct rte_mbuf) -
+ RTE_PKTMBUF_HEADROOM;
+
+ /* account for up to 512B of alignment */
+ min_size -= FM10K_RX_DATABUF_ALIGN;
+
+ /* sanity check for overflow */
+ if (min_size > mp->elt_size)
+ return 0;
+
+ /* size is valid */
+ return 1;
+}
+
+static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+}
+
+static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_HEADER_SPLIT);
+}
+
+static int
+fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ struct fm10k_rx_queue *q;
+ const struct rte_memzone *mz;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ /* make sure the mempool element size can account for alignment. */
+ if (!mempool_element_size_valid(mp)) {
+ PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
+ return -EINVAL;
+ }
+
+ /* make sure a valid number of descriptors have been requested */
+ if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
+ FM10K_MULT_RX_DESC, nb_desc)) {
+ PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
+ "less than or equal to %"PRIu32", "
+ "greater than or equal to %u, "
+ "and a multiple of %u",
+ nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
+ FM10K_MULT_RX_DESC);
+ return -EINVAL;
+ }
+
+ /*
+ * if this queue existed already, free the associated memory. The
+ * queue cannot be reused in case we need to allocate memory on
+ * different socket than was previously used.
+ */
+ if (dev->data->rx_queues[queue_id] != NULL) {
+ rx_queue_free(dev->data->rx_queues[queue_id]);
+ dev->data->rx_queues[queue_id] = NULL;
+ }
+
+ /* allocate memory for the queue structure */
+ q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (q == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
+ return -ENOMEM;
+ }
+
+ /* setup queue */
+ q->mp = mp;
+ q->nb_desc = nb_desc;
+ q->nb_fake_desc = FM10K_MULT_RX_DESC;
+ q->port_id = dev->data->port_id;
+ q->queue_id = queue_id;
+ q->tail_ptr = (volatile uint32_t *)
+ &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
+ q->offloads = offloads;
+ if (handle_rxconf(q, conf))
+ return -EINVAL;
+
+ /* allocate memory for the software ring */
+ q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
+ (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (q->sw_ring == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate software ring");
+ rte_free(q);
+ return -ENOMEM;
+ }
+
+ /*
+ * allocate memory for the hardware descriptor ring. A memzone large
+ * enough to hold the maximum ring size is requested to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
+ FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
+ socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
+ rte_free(q->sw_ring);
+ rte_free(q);
+ return -ENOMEM;
+ }
+ q->hw_ring = mz->addr;
+ q->hw_ring_phys_addr = mz->iova;
+
+ /* Check if number of descs satisfied Vector requirement */
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ q->queue_id, q->port_id);
+ dev_info->rx_vec_allowed = false;
+ } else
+ fm10k_rxq_vec_setup(q);
+
+ dev->data->rx_queues[queue_id] = q;
+ return 0;
+}
+
+static void
+fm10k_rx_queue_release(void *queue)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rx_queue_free(queue);
+}
+
+static inline int
+handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
+{
+ uint16_t tx_free_thresh;
+ uint16_t tx_rs_thresh;
+
+ /* constraint MACROs require that tx_free_thresh is configured
+ * before tx_rs_thresh */
+ if (conf->tx_free_thresh == 0)
+ tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
+ else
+ tx_free_thresh = conf->tx_free_thresh;
+
+ /* make sure the requested threshold satisfies the constraints */
+ if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
+ FM10K_TX_FREE_THRESH_MAX(q),
+ FM10K_TX_FREE_THRESH_DIV(q),
+ tx_free_thresh)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
+ "less than or equal to %u, "
+ "greater than or equal to %u, "
+ "and a divisor of %u",
+ tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
+ FM10K_TX_FREE_THRESH_MIN(q),
+ FM10K_TX_FREE_THRESH_DIV(q));
+ return -EINVAL;
+ }
+
+ q->free_thresh = tx_free_thresh;
+
+ if (conf->tx_rs_thresh == 0)
+ tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
+ else
+ tx_rs_thresh = conf->tx_rs_thresh;
+
+ q->tx_deferred_start = conf->tx_deferred_start;
+
+ /* make sure the requested threshold satisfies the constraints */
+ if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
+ FM10K_TX_RS_THRESH_MAX(q),
+ FM10K_TX_RS_THRESH_DIV(q),
+ tx_rs_thresh)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
+ "less than or equal to %u, "
+ "greater than or equal to %u, "
+ "and a divisor of %u",
+ tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
+ FM10K_TX_RS_THRESH_MIN(q),
+ FM10K_TX_RS_THRESH_DIV(q));
+ return -EINVAL;
+ }
+
+ q->rs_thresh = tx_rs_thresh;
+
+ return 0;
+}
+
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO);
+}
+
+static int
+fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *conf)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_tx_queue *q;
+ const struct rte_memzone *mz;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+
+ offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ /* make sure a valid number of descriptors have been requested */
+ if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
+ FM10K_MULT_TX_DESC, nb_desc)) {
+ PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
+ "less than or equal to %"PRIu32", "
+ "greater than or equal to %u, "
+ "and a multiple of %u",
+ nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
+ FM10K_MULT_TX_DESC);
+ return -EINVAL;
+ }
+
+ /*
+ * if this queue existed already, free the associated memory. The
+ * queue cannot be reused in case we need to allocate memory on
+ * different socket than was previously used.
+ */
+ if (dev->data->tx_queues[queue_id] != NULL) {
+ struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
+
+ tx_queue_free(txq);
+ dev->data->tx_queues[queue_id] = NULL;
+ }
+
+ /* allocate memory for the queue structure */
+ q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (q == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
+ return -ENOMEM;
+ }
+
+ /* setup queue */
+ q->nb_desc = nb_desc;
+ q->port_id = dev->data->port_id;
+ q->queue_id = queue_id;
+ q->offloads = offloads;
+ q->ops = &def_txq_ops;
+ q->tail_ptr = (volatile uint32_t *)
+ &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
+ if (handle_txconf(q, conf))
+ return -EINVAL;
+
+ /* allocate memory for the software ring */
+ q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
+ nb_desc * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (q->sw_ring == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate software ring");
+ rte_free(q);
+ return -ENOMEM;
+ }
+
+ /*
+ * allocate memory for the hardware descriptor ring. A memzone large
+ * enough to hold the maximum ring size is requested to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
+ FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
+ socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
+ rte_free(q->sw_ring);
+ rte_free(q);
+ return -ENOMEM;
+ }
+ q->hw_ring = mz->addr;
+ q->hw_ring_phys_addr = mz->iova;
+
+ /*
+ * allocate memory for the RS bit tracker. Enough slots to hold the
+ * descriptor index for each RS bit needing to be set are required.
+ */
+ q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
+ ((nb_desc + 1) / q->rs_thresh) *
+ sizeof(uint16_t),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (q->rs_tracker.list == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
+ rte_free(q->sw_ring);
+ rte_free(q);
+ return -ENOMEM;
+ }
+
+ dev->data->tx_queues[queue_id] = q;
+ return 0;
+}
+
+static void
+fm10k_tx_queue_release(void *queue)
+{
+ struct fm10k_tx_queue *q = queue;
+ PMD_INIT_FUNC_TRACE();
+
+ tx_queue_free(q);
+}
+
+static int
+fm10k_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t i, j, idx, shift;
+ uint8_t mask;
+ uint32_t reta;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (reta_size > FM10K_MAX_RSS_INDICES) {
+ PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
+ return -EINVAL;
+ }
+
+ /*
+ * Update Redirection Table RETA[n], n=0..31. The redirection table has
+ * 128-entries in 32 registers
+ */
+ for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ BIT_MASK_PER_UINT32);
+ if (mask == 0)
+ continue;
+
+ reta = 0;
+ if (mask != BIT_MASK_PER_UINT32)
+ reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
+
+ for (j = 0; j < CHARS_PER_UINT32; j++) {
+ if (mask & (0x1 << j)) {
+ if (mask != 0xF)
+ reta &= ~(UINT8_MAX << CHAR_BIT * j);
+ reta |= reta_conf[idx].reta[shift + j] <<
+ (CHAR_BIT * j);
+ }
+ }
+ FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
+ }
+
+ return 0;
+}
+
+static int
+fm10k_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t i, j, idx, shift;
+ uint8_t mask;
+ uint32_t reta;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (reta_size < FM10K_MAX_RSS_INDICES) {
+ PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
+ return -EINVAL;
+ }
+
+ /*
+ * Read Redirection Table RETA[n], n=0..31. The redirection table has
+ * 128-entries in 32 registers
+ */
+ for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ BIT_MASK_PER_UINT32);
+ if (mask == 0)
+ continue;
+
+ reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
+ for (j = 0; j < CHARS_PER_UINT32; j++) {
+ if (mask & (0x1 << j))
+ reta_conf[idx].reta[shift + j] = ((reta >>
+ CHAR_BIT * j) & UINT8_MAX);
+ }
+ }
+
+ return 0;
+}
+
+static int
+fm10k_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *key = (uint32_t *)rss_conf->rss_key;
+ uint32_t mrqc;
+ uint64_t hf = rss_conf->rss_hf;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
+ FM10K_RSSRK_ENTRIES_PER_REG))
+ return -EINVAL;
+
+ if (hf == 0)
+ return -EINVAL;
+
+ mrqc = 0;
+ mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
+
+ /* If the mapping doesn't fit any supported, return */
+ if (mrqc == 0)
+ return -EINVAL;
+
+ if (key != NULL)
+ for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
+ FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
+
+ FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
+
+ return 0;
+}
+
+static int
+fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *key = (uint32_t *)rss_conf->rss_key;
+ uint32_t mrqc;
+ uint64_t hf;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
+ FM10K_RSSRK_ENTRIES_PER_REG))
+ return -EINVAL;
+
+ if (key != NULL)
+ for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
+ key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
+
+ mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
+ hf = 0;
+ hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
+ hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
+ hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
+
+ rss_conf->rss_hf = hf;
+
+ return 0;
+}
+
+static void
+fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
+
+ /* Bind all local non-queue interrupt to vector 0 */
+ int_map |= FM10K_MISC_VEC_ID;
+
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
+
+ /* Enable misc causes */
+ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
+ FM10K_EIMR_ENABLE(THI_FAULT) |
+ FM10K_EIMR_ENABLE(FUM_FAULT) |
+ FM10K_EIMR_ENABLE(MAILBOX) |
+ FM10K_EIMR_ENABLE(SWITCHREADY) |
+ FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
+ FM10K_EIMR_ENABLE(SRAMERROR) |
+ FM10K_EIMR_ENABLE(VFLR));
+
+ /* Enable ITR 0 */
+ FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ FM10K_WRITE_FLUSH(hw);
+}
+
+static void
+fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t int_map = FM10K_INT_MAP_DISABLE;
+
+ int_map |= FM10K_MISC_VEC_ID;
+
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
+
+ /* Disable misc causes */
+ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
+ FM10K_EIMR_DISABLE(THI_FAULT) |
+ FM10K_EIMR_DISABLE(FUM_FAULT) |
+ FM10K_EIMR_DISABLE(MAILBOX) |
+ FM10K_EIMR_DISABLE(SWITCHREADY) |
+ FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
+ FM10K_EIMR_DISABLE(SRAMERROR) |
+ FM10K_EIMR_DISABLE(VFLR));
+
+ /* Disable ITR 0 */
+ FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
+ FM10K_WRITE_FLUSH(hw);
+}
+
+static void
+fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
+
+ /* Bind all local non-queue interrupt to vector 0 */
+ int_map |= FM10K_MISC_VEC_ID;
+
+ /* Only INT 0 available, other 15 are reserved. */
+ FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
+
+ /* Enable ITR 0 */
+ FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ FM10K_WRITE_FLUSH(hw);
+}
+
+static void
+fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t int_map = FM10K_INT_MAP_DISABLE;
+
+ int_map |= FM10K_MISC_VEC_ID;
+
+ /* Only INT 0 available, other 15 are reserved. */
+ FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
+
+ /* Disable ITR 0 */
+ FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
+ FM10K_WRITE_FLUSH(hw);
+}
+
+static int
+fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+
+ /* Enable ITR */
+ if (hw->mac.type == fm10k_mac_pf)
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
+ FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
+ else
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
+ FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
+ rte_intr_enable(&pdev->intr_handle);
+ return 0;
+}
+
+static int
+fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+
+ /* Disable ITR */
+ if (hw->mac.type == fm10k_mac_pf)
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
+ FM10K_ITR_MASK_SET);
+ else
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
+ FM10K_ITR_MASK_SET);
+ return 0;
+}
+
+static int
+fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+ uint32_t intr_vector, vec;
+ uint16_t queue_id;
+ int result = 0;
+
+ /* fm10k needs one separate interrupt for mailbox,
+ * so only drivers which support multiple interrupt vectors
+ * e.g. vfio-pci can work for fm10k interrupt mode
+ */
+ if (!rte_intr_cap_multiple(intr_handle) ||
+ dev->data->dev_conf.intr_conf.rxq == 0)
+ return result;
+
+ intr_vector = dev->data->nb_rx_queues;
+
+ /* disable interrupt first */
+ rte_intr_disable(intr_handle);
+ if (hw->mac.type == fm10k_mac_pf)
+ fm10k_dev_disable_intr_pf(dev);
+ else
+ fm10k_dev_disable_intr_vf(dev);
+
+ if (rte_intr_efd_enable(intr_handle, intr_vector)) {
+ PMD_INIT_LOG(ERR, "Failed to init event fd");
+ result = -EIO;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !result) {
+ intr_handle->intr_vec = rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec) {
+ for (queue_id = 0, vec = FM10K_RX_VEC_START;
+ queue_id < dev->data->nb_rx_queues;
+ queue_id++) {
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < intr_handle->nb_efd - 1
+ + FM10K_RX_VEC_START)
+ vec++;
+ }
+ } else {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ rte_intr_efd_disable(intr_handle);
+ result = -ENOMEM;
+ }
+ }
+
+ if (hw->mac.type == fm10k_mac_pf)
+ fm10k_dev_enable_intr_pf(dev);
+ else
+ fm10k_dev_enable_intr_vf(dev);
+ rte_intr_enable(intr_handle);
+ hw->mac.ops.update_int_moderator(hw);
+ return result;
+}
+
+static int
+fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
+{
+ struct fm10k_fault fault;
+ int err;
+ const char *estr = "Unknown error";
+
+ /* Process PCA fault */
+ if (eicr & FM10K_EICR_PCA_FAULT) {
+ err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
+ if (err)
+ goto error;
+ switch (fault.type) {
+ case PCA_NO_FAULT:
+ estr = "PCA_NO_FAULT"; break;
+ case PCA_UNMAPPED_ADDR:
+ estr = "PCA_UNMAPPED_ADDR"; break;
+ case PCA_BAD_QACCESS_PF:
+ estr = "PCA_BAD_QACCESS_PF"; break;
+ case PCA_BAD_QACCESS_VF:
+ estr = "PCA_BAD_QACCESS_VF"; break;
+ case PCA_MALICIOUS_REQ:
+ estr = "PCA_MALICIOUS_REQ"; break;
+ case PCA_POISONED_TLP:
+ estr = "PCA_POISONED_TLP"; break;
+ case PCA_TLP_ABORT:
+ estr = "PCA_TLP_ABORT"; break;
+ default:
+ goto error;
+ }
+ PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
+ estr, fault.func ? "VF" : "PF", fault.func,
+ fault.address, fault.specinfo);
+ }
+
+ /* Process THI fault */
+ if (eicr & FM10K_EICR_THI_FAULT) {
+ err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
+ if (err)
+ goto error;
+ switch (fault.type) {
+ case THI_NO_FAULT:
+ estr = "THI_NO_FAULT"; break;
+ case THI_MAL_DIS_Q_FAULT:
+ estr = "THI_MAL_DIS_Q_FAULT"; break;
+ default:
+ goto error;
+ }
+ PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
+ estr, fault.func ? "VF" : "PF", fault.func,
+ fault.address, fault.specinfo);
+ }
+
+ /* Process FUM fault */
+ if (eicr & FM10K_EICR_FUM_FAULT) {
+ err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
+ if (err)
+ goto error;
+ switch (fault.type) {
+ case FUM_NO_FAULT:
+ estr = "FUM_NO_FAULT"; break;
+ case FUM_UNMAPPED_ADDR:
+ estr = "FUM_UNMAPPED_ADDR"; break;
+ case FUM_POISONED_TLP:
+ estr = "FUM_POISONED_TLP"; break;
+ case FUM_BAD_VF_QACCESS:
+ estr = "FUM_BAD_VF_QACCESS"; break;
+ case FUM_ADD_DECODE_ERR:
+ estr = "FUM_ADD_DECODE_ERR"; break;
+ case FUM_RO_ERROR:
+ estr = "FUM_RO_ERROR"; break;
+ case FUM_QPRC_CRC_ERROR:
+ estr = "FUM_QPRC_CRC_ERROR"; break;
+ case FUM_CSR_TIMEOUT:
+ estr = "FUM_CSR_TIMEOUT"; break;
+ case FUM_INVALID_TYPE:
+ estr = "FUM_INVALID_TYPE"; break;
+ case FUM_INVALID_LENGTH:
+ estr = "FUM_INVALID_LENGTH"; break;
+ case FUM_INVALID_BE:
+ estr = "FUM_INVALID_BE"; break;
+ case FUM_INVALID_ALIGN:
+ estr = "FUM_INVALID_ALIGN"; break;
+ default:
+ goto error;
+ }
+ PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
+ estr, fault.func ? "VF" : "PF", fault.func,
+ fault.address, fault.specinfo);
+ }
+
+ return 0;
+error:
+ PMD_INIT_LOG(ERR, "Failed to handle fault event.");
+ return err;
+}
+
+/**
+ * PF interrupt handler triggered by NIC for handling specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+fm10k_dev_interrupt_handler_pf(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t cause, status;
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ int status_mbx;
+ s32 err;
+
+ if (hw->mac.type != fm10k_mac_pf)
+ return;
+
+ cause = FM10K_READ_REG(hw, FM10K_EICR);
+
+ /* Handle PCI fault cases */
+ if (cause & FM10K_EICR_FAULT_MASK) {
+ PMD_INIT_LOG(ERR, "INT: find fault!");
+ fm10k_dev_handle_fault(hw, cause);
+ }
+
+ /* Handle switch up/down */
+ if (cause & FM10K_EICR_SWITCHNOTREADY)
+ PMD_INIT_LOG(ERR, "INT: Switch is not ready");
+
+ if (cause & FM10K_EICR_SWITCHREADY) {
+ PMD_INIT_LOG(INFO, "INT: Switch is ready");
+ if (dev_info->sm_down == 1) {
+ fm10k_mbx_lock(hw);
+
+ /* For recreating logical ports */
+ status_mbx = hw->mac.ops.update_lport_state(hw,
+ hw->mac.dglort_map, MAX_LPORT_NUM, 1);
+ if (status_mbx == FM10K_SUCCESS)
+ PMD_INIT_LOG(INFO,
+ "INT: Recreated Logical port");
+ else
+ PMD_INIT_LOG(INFO,
+ "INT: Logical ports weren't recreated");
+
+ status_mbx = hw->mac.ops.update_xcast_mode(hw,
+ hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
+ if (status_mbx != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
+
+ fm10k_mbx_unlock(hw);
+
+ /* first clear the internal SW recording structure */
+ if (!(dev->data->dev_conf.rxmode.mq_mode &
+ ETH_MQ_RX_VMDQ_FLAG))
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid,
+ false);
+
+ fm10k_MAC_filter_set(dev, hw->mac.addr, false,
+ MAIN_VSI_POOL_NUMBER);
+
+ /*
+ * Add default mac address and vlan for the logical
+ * ports that have been created, leave to the
+ * application to fully recover Rx filtering.
+ */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+
+ if (!(dev->data->dev_conf.rxmode.mq_mode &
+ ETH_MQ_RX_VMDQ_FLAG))
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid,
+ true);
+
+ dev_info->sm_down = 0;
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+ }
+
+ /* Handle mailbox message */
+ fm10k_mbx_lock(hw);
+ err = hw->mbx.ops.process(hw, &hw->mbx);
+ fm10k_mbx_unlock(hw);
+
+ if (err == FM10K_ERR_RESET_REQUESTED) {
+ PMD_INIT_LOG(INFO, "INT: Switch is down");
+ dev_info->sm_down = 1;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+
+ /* Handle SRAM error */
+ if (cause & FM10K_EICR_SRAMERROR) {
+ PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
+
+ status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
+ /* Write to clear pending bits */
+ FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
+
+ /* Todo: print out error message after shared code updates */
+ }
+
+ /* Clear these 3 events if having any */
+ cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
+ FM10K_EICR_SWITCHREADY;
+ if (cause)
+ FM10K_WRITE_REG(hw, FM10K_EICR, cause);
+
+ /* Re-enable interrupt from device side */
+ FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ /* Re-enable interrupt from host side */
+ rte_intr_enable(dev->intr_handle);
+}
+
+/**
+ * VF interrupt handler triggered by NIC for handling specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+fm10k_dev_interrupt_handler_vf(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ const enum fm10k_mbx_state state = mbx->state;
+ int status_mbx;
+
+ if (hw->mac.type != fm10k_mac_vf)
+ return;
+
+ /* Handle mailbox message if lock is acquired */
+ fm10k_mbx_lock(hw);
+ hw->mbx.ops.process(hw, &hw->mbx);
+ fm10k_mbx_unlock(hw);
+
+ if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
+ PMD_INIT_LOG(INFO, "INT: Switch has gone down");
+
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
+ MAX_LPORT_NUM, 1);
+ fm10k_mbx_unlock(hw);
+
+ /* Setting reset flag */
+ dev_info->sm_down = 1;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+
+ if (dev_info->sm_down == 1 &&
+ hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
+ PMD_INIT_LOG(INFO, "INT: Switch has gone up");
+ fm10k_mbx_lock(hw);
+ status_mbx = hw->mac.ops.update_xcast_mode(hw,
+ hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
+ if (status_mbx != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
+ fm10k_mbx_unlock(hw);
+
+ /* first clear the internal SW recording structure */
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
+ fm10k_MAC_filter_set(dev, hw->mac.addr, false,
+ MAIN_VSI_POOL_NUMBER);
+
+ /*
+ * Add default mac address and vlan for the logical ports that
+ * have been created, leave to the application to fully recover
+ * Rx filtering.
+ */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
+
+ dev_info->sm_down = 0;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+
+ /* Re-enable interrupt from device side */
+ FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ /* Re-enable interrupt from host side */
+ rte_intr_enable(dev->intr_handle);
+}
+
+/* Mailbox message handler in VF */
+static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
+ FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
+ FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
+ FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+static int
+fm10k_setup_mbx_service(struct fm10k_hw *hw)
+{
+ int err = 0;
+
+ /* Initialize mailbox lock */
+ fm10k_mbx_initlock(hw);
+
+ /* Replace default message handler with new ones */
+ if (hw->mac.type == fm10k_mac_vf)
+ err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
+
+ if (err) {
+ PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
+ err);
+ return err;
+ }
+ /* Connect to SM for PF device or PF for VF device */
+ return hw->mbx.ops.connect(hw, &hw->mbx);
+}
+
+static void
+fm10k_close_mbx_service(struct fm10k_hw *hw)
+{
+ /* Disconnect from SM for PF device or PF for VF device */
+ hw->mbx.ops.disconnect(hw, &hw->mbx);
+}
+
+static const struct eth_dev_ops fm10k_eth_dev_ops = {
+ .dev_configure = fm10k_dev_configure,
+ .dev_start = fm10k_dev_start,
+ .dev_stop = fm10k_dev_stop,
+ .dev_close = fm10k_dev_close,
+ .promiscuous_enable = fm10k_dev_promiscuous_enable,
+ .promiscuous_disable = fm10k_dev_promiscuous_disable,
+ .allmulticast_enable = fm10k_dev_allmulticast_enable,
+ .allmulticast_disable = fm10k_dev_allmulticast_disable,
+ .stats_get = fm10k_stats_get,
+ .xstats_get = fm10k_xstats_get,
+ .xstats_get_names = fm10k_xstats_get_names,
+ .stats_reset = fm10k_stats_reset,
+ .xstats_reset = fm10k_stats_reset,
+ .link_update = fm10k_link_update,
+ .dev_infos_get = fm10k_dev_infos_get,
+ .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
+ .vlan_filter_set = fm10k_vlan_filter_set,
+ .vlan_offload_set = fm10k_vlan_offload_set,
+ .mac_addr_add = fm10k_macaddr_add,
+ .mac_addr_remove = fm10k_macaddr_remove,
+ .rx_queue_start = fm10k_dev_rx_queue_start,
+ .rx_queue_stop = fm10k_dev_rx_queue_stop,
+ .tx_queue_start = fm10k_dev_tx_queue_start,
+ .tx_queue_stop = fm10k_dev_tx_queue_stop,
+ .rx_queue_setup = fm10k_rx_queue_setup,
+ .rx_queue_release = fm10k_rx_queue_release,
+ .tx_queue_setup = fm10k_tx_queue_setup,
+ .tx_queue_release = fm10k_tx_queue_release,
+ .rx_descriptor_done = fm10k_dev_rx_descriptor_done,
+ .rx_descriptor_status = fm10k_dev_rx_descriptor_status,
+ .tx_descriptor_status = fm10k_dev_tx_descriptor_status,
+ .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
+ .reta_update = fm10k_reta_update,
+ .reta_query = fm10k_reta_query,
+ .rss_hash_update = fm10k_rss_hash_update,
+ .rss_hash_conf_get = fm10k_rss_hash_conf_get,
+};
+
+static int ftag_check_handler(__rte_unused const char *key,
+ const char *value, __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+fm10k_check_ftag(struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ const char *ftag_key = "enable_ftag";
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, ftag_key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
+ if (rte_kvargs_process(kvlist, ftag_key,
+ ftag_check_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
+static uint16_t
+fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
+ ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+static void __attribute__((cold))
+fm10k_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct fm10k_tx_queue *txq;
+ int i;
+ int use_sse = 1;
+ uint16_t tx_ftag_en = 0;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ /* primary process has set the ftag flag and offloads */
+ txq = dev->data->tx_queues[0];
+ if (fm10k_tx_vec_condition_check(txq)) {
+ dev->tx_pkt_burst = fm10k_xmit_pkts;
+ dev->tx_pkt_prepare = fm10k_prep_pkts;
+ PMD_INIT_LOG(DEBUG, "Use regular Tx func");
+ } else {
+ PMD_INIT_LOG(DEBUG, "Use vector Tx func");
+ dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
+ dev->tx_pkt_prepare = NULL;
+ }
+ return;
+ }
+
+ if (fm10k_check_ftag(dev->device->devargs))
+ tx_ftag_en = 1;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ txq->tx_ftag_en = tx_ftag_en;
+ /* Check if Vector Tx is satisfied */
+ if (fm10k_tx_vec_condition_check(txq))
+ use_sse = 0;
+ }
+
+ if (use_sse) {
+ PMD_INIT_LOG(DEBUG, "Use vector Tx func");
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ fm10k_txq_vec_setup(txq);
+ }
+ dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
+ dev->tx_pkt_prepare = NULL;
+ } else {
+ dev->tx_pkt_burst = fm10k_xmit_pkts;
+ dev->tx_pkt_prepare = fm10k_prep_pkts;
+ PMD_INIT_LOG(DEBUG, "Use regular Tx func");
+ }
+}
+
+static void __attribute__((cold))
+fm10k_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ uint16_t i, rx_using_sse;
+ uint16_t rx_ftag_en = 0;
+
+ if (fm10k_check_ftag(dev->device->devargs))
+ rx_ftag_en = 1;
+
+ /* In order to allow Vector Rx there are a few configuration
+ * conditions to be met.
+ */
+ if (!fm10k_rx_vec_condition_check(dev) &&
+ dev_info->rx_vec_allowed && !rx_ftag_en) {
+ if (dev->data->scattered_rx)
+ dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
+ else
+ dev->rx_pkt_burst = fm10k_recv_pkts_vec;
+ } else if (dev->data->scattered_rx)
+ dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
+ else
+ dev->rx_pkt_burst = fm10k_recv_pkts;
+
+ rx_using_sse =
+ (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == fm10k_recv_pkts_vec);
+
+ if (rx_using_sse)
+ PMD_INIT_LOG(DEBUG, "Use vector Rx func");
+ else
+ PMD_INIT_LOG(DEBUG, "Use regular Rx func");
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->rx_using_sse = rx_using_sse;
+ rxq->rx_ftag_en = rx_ftag_en;
+ }
+}
+
+static void
+fm10k_params_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_dev_info *info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+
+ /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
+ * there is no way to get link status without reading BAR4. Until this
+ * works, assume we have maximum bandwidth.
+ * @todo - fix bus info
+ */
+ hw->bus_caps.speed = fm10k_bus_speed_8000;
+ hw->bus_caps.width = fm10k_bus_width_pcie_x8;
+ hw->bus_caps.payload = fm10k_bus_payload_512;
+ hw->bus.speed = fm10k_bus_speed_8000;
+ hw->bus.width = fm10k_bus_width_pcie_x8;
+ hw->bus.payload = fm10k_bus_payload_256;
+
+ info->rx_vec_allowed = true;
+}
+
+static int
+eth_fm10k_dev_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+ int diag, i;
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev->dev_ops = &fm10k_eth_dev_ops;
+ dev->rx_pkt_burst = &fm10k_recv_pkts;
+ dev->tx_pkt_burst = &fm10k_xmit_pkts;
+ dev->tx_pkt_prepare = &fm10k_prep_pkts;
+
+ /*
+ * Primary process does the whole initialization, for secondary
+ * processes, we just select the same Rx and Tx function as primary.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ fm10k_set_rx_function(dev);
+ fm10k_set_tx_function(dev);
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(dev, pdev);
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ memset(macvlan, 0, sizeof(*macvlan));
+ /* Vendor and Device ID need to be set before init of shared code */
+ memset(hw, 0, sizeof(*hw));
+ hw->device_id = pdev->id.device_id;
+ hw->vendor_id = pdev->id.vendor_id;
+ hw->subsystem_device_id = pdev->id.subsystem_device_id;
+ hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
+ hw->revision_id = 0;
+ hw->hw_addr = (void *)pdev->mem_resource[0].addr;
+ if (hw->hw_addr == NULL) {
+ PMD_INIT_LOG(ERR, "Bad mem resource."
+ " Try to blacklist unused devices.");
+ return -EIO;
+ }
+
+ /* Store fm10k_adapter pointer */
+ hw->back = dev->data->dev_private;
+
+ /* Initialize the shared code */
+ diag = fm10k_init_shared_code(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
+ return -EIO;
+ }
+
+ /* Initialize parameters */
+ fm10k_params_init(dev);
+
+ /* Initialize the hw */
+ diag = fm10k_init_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
+ return -EIO;
+ }
+
+ /* Initialize MAC address(es) */
+ dev->data->mac_addrs = rte_zmalloc("fm10k",
+ ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
+ if (dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
+ return -ENOMEM;
+ }
+
+ diag = fm10k_read_mac_addr(hw);
+
+ ether_addr_copy((const struct ether_addr *)hw->mac.addr,
+ &dev->data->mac_addrs[0]);
+
+ if (diag != FM10K_SUCCESS ||
+ !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
+
+ /* Generate a random addr */
+ eth_random_addr(hw->mac.addr);
+ memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
+ ether_addr_copy((const struct ether_addr *)hw->mac.addr,
+ &dev->data->mac_addrs[0]);
+ }
+
+ /* Reset the hw statistics */
+ fm10k_stats_reset(dev);
+
+ /* Reset the hw */
+ diag = fm10k_reset_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
+ return -EIO;
+ }
+
+ /* Setup mailbox service */
+ diag = fm10k_setup_mbx_service(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
+ return -EIO;
+ }
+
+ /*PF/VF has different interrupt handling mechanism */
+ if (hw->mac.type == fm10k_mac_pf) {
+ /* register callback func to eal lib */
+ rte_intr_callback_register(intr_handle,
+ fm10k_dev_interrupt_handler_pf, (void *)dev);
+
+ /* enable MISC interrupt */
+ fm10k_dev_enable_intr_pf(dev);
+ } else { /* VF */
+ rte_intr_callback_register(intr_handle,
+ fm10k_dev_interrupt_handler_vf, (void *)dev);
+
+ fm10k_dev_enable_intr_vf(dev);
+ }
+
+ /* Enable intr after callback registered */
+ rte_intr_enable(intr_handle);
+
+ hw->mac.ops.update_int_moderator(hw);
+
+ /* Make sure Switch Manager is ready before going forward. */
+ if (hw->mac.type == fm10k_mac_pf) {
+ int switch_ready = 0;
+
+ for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.get_host_state(hw, &switch_ready);
+ fm10k_mbx_unlock(hw);
+ if (switch_ready)
+ break;
+ /* Delay some time to acquire async LPORT_MAP info. */
+ rte_delay_us(WAIT_SWITCH_MSG_US);
+ }
+
+ if (switch_ready == 0) {
+ PMD_INIT_LOG(ERR, "switch is not ready");
+ return -1;
+ }
+ }
+
+ /*
+ * Below function will trigger operations on mailbox, acquire lock to
+ * avoid race condition from interrupt handler. Operations on mailbox
+ * FIFO will trigger interrupt to PF/SM, in which interrupt handler
+ * will handle and generate an interrupt to our side. Then, FIFO in
+ * mailbox will be touched.
+ */
+ fm10k_mbx_lock(hw);
+ /* Enable port first */
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
+ MAX_LPORT_NUM, 1);
+
+ /* Set unicast mode by default. App can change to other mode in other
+ * API func.
+ */
+ hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_NONE);
+
+ fm10k_mbx_unlock(hw);
+
+ /* Make sure default VID is ready before going forward. */
+ if (hw->mac.type == fm10k_mac_pf) {
+ for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
+ if (hw->mac.default_vid)
+ break;
+ /* Delay some time to acquire async port VLAN info. */
+ rte_delay_us(WAIT_SWITCH_MSG_US);
+ }
+
+ if (!hw->mac.default_vid) {
+ PMD_INIT_LOG(ERR, "default VID is not ready");
+ return -1;
+ }
+ }
+
+ /* Add default mac address */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+
+ return 0;
+}
+
+static int
+eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+ PMD_INIT_FUNC_TRACE();
+
+ /* only uninitialize in the primary process */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* safe to close dev here */
+ fm10k_dev_close(dev);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ /* disable uio/vfio intr */
+ rte_intr_disable(intr_handle);
+
+ /*PF/VF has different interrupt handling mechanism */
+ if (hw->mac.type == fm10k_mac_pf) {
+ /* disable interrupt */
+ fm10k_dev_disable_intr_pf(dev);
+
+ /* unregister callback func to eal lib */
+ rte_intr_callback_unregister(intr_handle,
+ fm10k_dev_interrupt_handler_pf, (void *)dev);
+ } else {
+ /* disable interrupt */
+ fm10k_dev_disable_intr_vf(dev);
+
+ rte_intr_callback_unregister(intr_handle,
+ fm10k_dev_interrupt_handler_vf, (void *)dev);
+ }
+
+ /* free mac memory */
+ if (dev->data->mac_addrs) {
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+ }
+
+ memset(hw, 0, sizeof(*hw));
+
+ return 0;
+}
+
+static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
+}
+
+static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
+}
+
+/*
+ * The set of PCI devices this driver supports. This driver will enable both PF
+ * and SRIOV-VF devices.
+ */
+static const struct rte_pci_id pci_id_fm10k_map[] = {
+ { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
+ { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
+ { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_pmd_fm10k = {
+ .id_table = pci_id_fm10k_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = eth_fm10k_pci_probe,
+ .remove = eth_fm10k_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
+RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(fm10k_init_log)
+{
+ fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init");
+ if (fm10k_logtype_init >= 0)
+ rte_log_set_level(fm10k_logtype_init, RTE_LOG_NOTICE);
+ fm10k_logtype_driver = rte_log_register("pmd.net.fm10k.driver");
+ if (fm10k_logtype_driver >= 0)
+ rte_log_set_level(fm10k_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h b/src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h
new file mode 100644
index 00000000..df56a51c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2015 Intel Corporation
+ */
+
+#ifndef _FM10K_LOGS_H_
+#define _FM10K_LOGS_H_
+
+#include <rte_log.h>
+
+extern int fm10k_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, fm10k_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+extern int fm10k_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, fm10k_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _FM10K_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c b/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c
new file mode 100644
index 00000000..4a5b46ec
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c
@@ -0,0 +1,696 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2016 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_common.h>
+#include <rte_net.h>
+#include "fm10k.h"
+#include "base/fm10k_type.h"
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while (0)
+#endif
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+static inline void dump_rxd(union fm10k_rx_desc *rxd)
+{
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+ PMD_RX_LOG(DEBUG, "| GLORT | PKT HDR & TYPE |");
+ PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.glort,
+ rxd->d.data);
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+ PMD_RX_LOG(DEBUG, "| VLAN & LEN | STATUS |");
+ PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.vlan_len,
+ rxd->d.staterr);
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+ PMD_RX_LOG(DEBUG, "| RESERVED | RSS_HASH |");
+ PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", 0, rxd->d.rss);
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+ PMD_RX_LOG(DEBUG, "| TIME TAG |");
+ PMD_RX_LOG(DEBUG, "| 0x%016"PRIx64" |", rxd->q.timestamp);
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+}
+#endif
+
+#define FM10K_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define FM10K_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK)
+
+/* @note: When this function is changed, make corresponding change to
+ * fm10k_dev_supported_ptypes_get()
+ */
+static inline void
+rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
+{
+ static const uint32_t
+ ptype_table[FM10K_RXD_PKTTYPE_MASK >> FM10K_RXD_PKTTYPE_SHIFT]
+ __rte_cache_aligned = {
+ [FM10K_PKTTYPE_OTHER] = RTE_PTYPE_L2_ETHER,
+ [FM10K_PKTTYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
+ [FM10K_PKTTYPE_IPV4_EX] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [FM10K_PKTTYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
+ [FM10K_PKTTYPE_IPV6_EX] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ };
+
+ m->packet_type = ptype_table[(d->w.pkt_info & FM10K_RXD_PKTTYPE_MASK)
+ >> FM10K_RXD_PKTTYPE_SHIFT];
+
+ if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
+ m->ol_flags |= PKT_RX_RSS_HASH;
+
+ if (unlikely((d->d.staterr &
+ (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
+ (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
+ m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (unlikely((d->d.staterr &
+ (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
+ (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
+ m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+}
+
+uint16_t
+fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *mbuf;
+ union fm10k_rx_desc desc;
+ struct fm10k_rx_queue *q = rx_queue;
+ uint16_t count = 0;
+ int alloc = 0;
+ uint16_t next_dd;
+ int ret;
+
+ next_dd = q->next_dd;
+
+ nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh);
+ for (count = 0; count < nb_pkts; ++count) {
+ if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
+ break;
+ mbuf = q->sw_ring[next_dd];
+ desc = q->hw_ring[next_dd];
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+ dump_rxd(&desc);
+#endif
+ rte_pktmbuf_pkt_len(mbuf) = desc.w.length;
+ rte_pktmbuf_data_len(mbuf) = desc.w.length;
+
+ mbuf->ol_flags = 0;
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+ rx_desc_to_ol_flags(mbuf, &desc);
+#endif
+
+ mbuf->hash.rss = desc.d.rss;
+ /**
+ * Packets in fm10k device always carry at least one VLAN tag.
+ * For those packets coming in without VLAN tag,
+ * the port default VLAN tag will be used.
+ * So, always PKT_RX_VLAN flag is set and vlan_tci
+ * is valid for each RX packet's mbuf.
+ */
+ mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->vlan_tci = desc.w.vlan;
+ /**
+ * mbuf->vlan_tci_outer is an idle field in fm10k driver,
+ * so it can be selected to store sglort value.
+ */
+ if (q->rx_ftag_en)
+ mbuf->vlan_tci_outer = rte_le_to_cpu_16(desc.w.sglort);
+
+ rx_pkts[count] = mbuf;
+ if (++next_dd == q->nb_desc) {
+ next_dd = 0;
+ alloc = 1;
+ }
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_prefetch0(q->sw_ring[next_dd]);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((next_dd & 0x3) == 0) {
+ rte_prefetch0(&q->hw_ring[next_dd]);
+ rte_prefetch0(&q->sw_ring[next_dd]);
+ }
+ }
+
+ q->next_dd = next_dd;
+
+ if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
+ ret = rte_mempool_get_bulk(q->mp,
+ (void **)&q->sw_ring[q->next_alloc],
+ q->alloc_thresh);
+
+ if (unlikely(ret != 0)) {
+ uint16_t port = q->port_id;
+ PMD_RX_LOG(ERR, "Failed to alloc mbuf");
+ /*
+ * Need to restore next_dd if we cannot allocate new
+ * buffers to replenish the old ones.
+ */
+ q->next_dd = (q->next_dd + q->nb_desc - count) %
+ q->nb_desc;
+ rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
+ return 0;
+ }
+
+ for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
+ mbuf = q->sw_ring[q->next_alloc];
+
+ /* setup static mbuf fields */
+ fm10k_pktmbuf_reset(mbuf, q->port_id);
+
+ /* write descriptor */
+ desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+ desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+ q->hw_ring[q->next_alloc] = desc;
+ }
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
+ q->next_trigger += q->alloc_thresh;
+ if (q->next_trigger >= q->nb_desc) {
+ q->next_trigger = q->alloc_thresh - 1;
+ q->next_alloc = 0;
+ }
+ }
+
+ return count;
+}
+
+uint16_t
+fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *mbuf;
+ union fm10k_rx_desc desc;
+ struct fm10k_rx_queue *q = rx_queue;
+ uint16_t count = 0;
+ uint16_t nb_rcv, nb_seg;
+ int alloc = 0;
+ uint16_t next_dd;
+ struct rte_mbuf *first_seg = q->pkt_first_seg;
+ struct rte_mbuf *last_seg = q->pkt_last_seg;
+ int ret;
+
+ next_dd = q->next_dd;
+ nb_rcv = 0;
+
+ nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh);
+ for (count = 0; count < nb_seg; count++) {
+ if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
+ break;
+ mbuf = q->sw_ring[next_dd];
+ desc = q->hw_ring[next_dd];
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+ dump_rxd(&desc);
+#endif
+
+ if (++next_dd == q->nb_desc) {
+ next_dd = 0;
+ alloc = 1;
+ }
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_prefetch0(q->sw_ring[next_dd]);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((next_dd & 0x3) == 0) {
+ rte_prefetch0(&q->hw_ring[next_dd]);
+ rte_prefetch0(&q->sw_ring[next_dd]);
+ }
+
+ /* Fill data length */
+ rte_pktmbuf_data_len(mbuf) = desc.w.length;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (!first_seg) {
+ first_seg = mbuf;
+ first_seg->pkt_len = desc.w.length;
+ } else {
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
+ rte_pktmbuf_data_len(mbuf));
+ first_seg->nb_segs++;
+ last_seg->next = mbuf;
+ }
+
+ /*
+ * If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) {
+ last_seg = mbuf;
+ continue;
+ }
+
+ first_seg->ol_flags = 0;
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+ rx_desc_to_ol_flags(first_seg, &desc);
+#endif
+ first_seg->hash.rss = desc.d.rss;
+ /**
+ * Packets in fm10k device always carry at least one VLAN tag.
+ * For those packets coming in without VLAN tag,
+ * the port default VLAN tag will be used.
+ * So, always PKT_RX_VLAN flag is set and vlan_tci
+ * is valid for each RX packet's mbuf.
+ */
+ first_seg->ol_flags |= PKT_RX_VLAN;
+ first_seg->vlan_tci = desc.w.vlan;
+ /**
+ * mbuf->vlan_tci_outer is an idle field in fm10k driver,
+ * so it can be selected to store sglort value.
+ */
+ if (q->rx_ftag_en)
+ first_seg->vlan_tci_outer =
+ rte_le_to_cpu_16(desc.w.sglort);
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rcv++] = first_seg;
+
+ /*
+ * Setup receipt context for a new packet.
+ */
+ first_seg = NULL;
+ }
+
+ q->next_dd = next_dd;
+
+ if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
+ ret = rte_mempool_get_bulk(q->mp,
+ (void **)&q->sw_ring[q->next_alloc],
+ q->alloc_thresh);
+
+ if (unlikely(ret != 0)) {
+ uint16_t port = q->port_id;
+ PMD_RX_LOG(ERR, "Failed to alloc mbuf");
+ /*
+ * Need to restore next_dd if we cannot allocate new
+ * buffers to replenish the old ones.
+ */
+ q->next_dd = (q->next_dd + q->nb_desc - count) %
+ q->nb_desc;
+ rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
+ return 0;
+ }
+
+ for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
+ mbuf = q->sw_ring[q->next_alloc];
+
+ /* setup static mbuf fields */
+ fm10k_pktmbuf_reset(mbuf, q->port_id);
+
+ /* write descriptor */
+ desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+ desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+ q->hw_ring[q->next_alloc] = desc;
+ }
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
+ q->next_trigger += q->alloc_thresh;
+ if (q->next_trigger >= q->nb_desc) {
+ q->next_trigger = q->alloc_thresh - 1;
+ q->next_alloc = 0;
+ }
+ }
+
+ q->pkt_first_seg = first_seg;
+ q->pkt_last_seg = last_seg;
+
+ return nb_rcv;
+}
+
+int
+fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile union fm10k_rx_desc *rxdp;
+ struct fm10k_rx_queue *rxq = rx_queue;
+ uint16_t desc;
+ int ret;
+
+ if (unlikely(offset >= rxq->nb_desc)) {
+ PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
+ return 0;
+ }
+
+ desc = rxq->next_dd + offset;
+ if (desc >= rxq->nb_desc)
+ desc -= rxq->nb_desc;
+
+ rxdp = &rxq->hw_ring[desc];
+
+ ret = !!(rxdp->w.status &
+ rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
+
+ return ret;
+}
+
+int
+fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ volatile union fm10k_rx_desc *rxdp;
+ struct fm10k_rx_queue *rxq = rx_queue;
+ uint16_t nb_hold, trigger_last;
+ uint16_t desc;
+ int ret;
+
+ if (unlikely(offset >= rxq->nb_desc)) {
+ PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
+ return 0;
+ }
+
+ if (rxq->next_trigger < rxq->alloc_thresh)
+ trigger_last = rxq->next_trigger +
+ rxq->nb_desc - rxq->alloc_thresh;
+ else
+ trigger_last = rxq->next_trigger - rxq->alloc_thresh;
+
+ if (rxq->next_dd < trigger_last)
+ nb_hold = rxq->next_dd + rxq->nb_desc - trigger_last;
+ else
+ nb_hold = rxq->next_dd - trigger_last;
+
+ if (offset >= rxq->nb_desc - nb_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->next_dd + offset;
+ if (desc >= rxq->nb_desc)
+ desc -= rxq->nb_desc;
+
+ rxdp = &rxq->hw_ring[desc];
+
+ ret = !!(rxdp->w.status &
+ rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
+
+ return ret;
+}
+
+int
+fm10k_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ volatile struct fm10k_tx_desc *txdp;
+ struct fm10k_tx_queue *txq = tx_queue;
+ uint16_t desc;
+ uint16_t next_rs = txq->nb_desc;
+ struct fifo rs_tracker = txq->rs_tracker;
+ struct fifo *r = &rs_tracker;
+
+ if (unlikely(offset >= txq->nb_desc))
+ return -EINVAL;
+
+ desc = txq->next_free + offset;
+ /* go to next desc that has the RS bit */
+ desc = (desc / txq->rs_thresh + 1) *
+ txq->rs_thresh - 1;
+
+ if (desc >= txq->nb_desc) {
+ desc -= txq->nb_desc;
+ if (desc >= txq->nb_desc)
+ desc -= txq->nb_desc;
+ }
+
+ r->head = r->list;
+ for ( ; r->head != r->endp; ) {
+ if (*r->head >= desc && *r->head < next_rs)
+ next_rs = *r->head;
+ ++r->head;
+ }
+
+ txdp = &txq->hw_ring[next_rs];
+ if (txdp->flags & FM10K_TXD_FLAG_DONE)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+/*
+ * Free multiple TX mbuf at a time if they are in the same pool
+ *
+ * @txep: software desc ring index that starts to free
+ * @num: number of descs to free
+ *
+ */
+static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
+{
+ struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
+ int i;
+ int nb_free = 0;
+
+ if (unlikely(num == 0))
+ return;
+
+ m = rte_pktmbuf_prefree_seg(txep[0]);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < num; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i]);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ txep[i] = NULL;
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < num; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i]);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ txep[i] = NULL;
+ }
+ }
+}
+
+static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
+{
+ uint16_t next_rs, count = 0;
+
+ next_rs = fifo_peek(&q->rs_tracker);
+ if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE))
+ return;
+
+ /* the DONE flag is set on this descriptor so remove the ID
+ * from the RS bit tracker and free the buffers */
+ fifo_remove(&q->rs_tracker);
+
+ /* wrap around? if so, free buffers from last_free up to but NOT
+ * including nb_desc */
+ if (q->last_free > next_rs) {
+ count = q->nb_desc - q->last_free;
+ tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
+ q->last_free = 0;
+ }
+
+ /* adjust free descriptor count before the next loop */
+ q->nb_free += count + (next_rs + 1 - q->last_free);
+
+ /* free buffers from last_free, up to and including next_rs */
+ if (q->last_free <= next_rs) {
+ count = next_rs - q->last_free + 1;
+ tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
+ q->last_free += count;
+ }
+
+ if (q->last_free == q->nb_desc)
+ q->last_free = 0;
+}
+
+static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
+{
+ uint16_t last_id;
+ uint8_t flags, hdrlen;
+
+ /* always set the LAST flag on the last descriptor used to
+ * transmit the packet */
+ flags = FM10K_TXD_FLAG_LAST;
+ last_id = q->next_free + mb->nb_segs - 1;
+ if (last_id >= q->nb_desc)
+ last_id = last_id - q->nb_desc;
+
+ /* but only set the RS flag on the last descriptor if rs_thresh
+ * descriptors will be used since the RS flag was last set */
+ if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) {
+ flags |= FM10K_TXD_FLAG_RS;
+ fifo_insert(&q->rs_tracker, last_id);
+ q->nb_used = 0;
+ } else {
+ q->nb_used = q->nb_used + mb->nb_segs;
+ }
+
+ q->nb_free -= mb->nb_segs;
+
+ q->hw_ring[q->next_free].flags = 0;
+ if (q->tx_ftag_en)
+ q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_FTAG;
+ /* set checksum flags on first descriptor of packet. SCTP checksum
+ * offload is not supported, but we do not explicitly check for this
+ * case in favor of greatly simplified processing. */
+ if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
+ q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
+
+ /* set vlan if requested */
+ if (mb->ol_flags & PKT_TX_VLAN_PKT)
+ q->hw_ring[q->next_free].vlan = mb->vlan_tci;
+
+ q->sw_ring[q->next_free] = mb;
+ q->hw_ring[q->next_free].buffer_addr =
+ rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
+ q->hw_ring[q->next_free].buflen =
+ rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
+
+ if (mb->ol_flags & PKT_TX_TCP_SEG) {
+ hdrlen = mb->outer_l2_len + mb->outer_l3_len + mb->l2_len +
+ mb->l3_len + mb->l4_len;
+ if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG)
+ hdrlen += sizeof(struct fm10k_ftag);
+
+ if (likely((hdrlen >= FM10K_TSO_MIN_HEADERLEN) &&
+ (hdrlen <= FM10K_TSO_MAX_HEADERLEN) &&
+ (mb->tso_segsz >= FM10K_TSO_MINMSS))) {
+ q->hw_ring[q->next_free].mss = mb->tso_segsz;
+ q->hw_ring[q->next_free].hdrlen = hdrlen;
+ }
+ }
+
+ if (++q->next_free == q->nb_desc)
+ q->next_free = 0;
+
+ /* fill up the rings */
+ for (mb = mb->next; mb != NULL; mb = mb->next) {
+ q->sw_ring[q->next_free] = mb;
+ q->hw_ring[q->next_free].buffer_addr =
+ rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
+ q->hw_ring[q->next_free].buflen =
+ rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
+ q->hw_ring[q->next_free].flags = 0;
+ if (++q->next_free == q->nb_desc)
+ q->next_free = 0;
+ }
+
+ q->hw_ring[last_id].flags |= flags;
+}
+
+uint16_t
+fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fm10k_tx_queue *q = tx_queue;
+ struct rte_mbuf *mb;
+ uint16_t count;
+
+ for (count = 0; count < nb_pkts; ++count) {
+ mb = tx_pkts[count];
+
+ /* running low on descriptors? try to free some... */
+ if (q->nb_free < q->free_thresh)
+ tx_free_descriptors(q);
+
+ /* make sure there are enough free descriptors to transmit the
+ * entire packet before doing anything */
+ if (q->nb_free < mb->nb_segs)
+ break;
+
+ /* sanity check to make sure the mbuf is valid */
+ if ((mb->nb_segs == 0) ||
+ ((mb->nb_segs > 1) && (mb->next == NULL)))
+ break;
+
+ /* process the packet */
+ tx_xmit_pkt(q, mb);
+ }
+
+ /* update the tail pointer if any packets were processed */
+ if (likely(count > 0))
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_free);
+
+ return count;
+}
+
+uint16_t
+fm10k_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ if ((m->ol_flags & PKT_TX_TCP_SEG) &&
+ (m->tso_segsz < FM10K_TSO_MINMSS)) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (m->ol_flags & FM10K_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c b/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c
new file mode 100644
index 00000000..005fda63
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -0,0 +1,882 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2015 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_common.h>
+#include "fm10k.h"
+#include "base/fm10k_type.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static void
+fm10k_reset_tx_queue(struct fm10k_tx_queue *txq);
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+
+/* Vlan present flag shift */
+#define VP_SHIFT (2)
+/* L3 type shift */
+#define L3TYPE_SHIFT (4)
+/* L4 type shift */
+#define L4TYPE_SHIFT (7)
+/* HBO flag shift */
+#define HBOFLAG_SHIFT (10)
+/* RXE flag shift */
+#define RXEFLAG_SHIFT (13)
+/* IPE/L4E flag shift */
+#define L3L4EFLAG_SHIFT (14)
+/* shift PKT_RX_L4_CKSUM_GOOD into one byte by 1 bit */
+#define CKSUM_SHIFT (1)
+
+static inline void
+fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i ptype0, ptype1, vtag0, vtag1, eflag0, eflag1, cksumflag;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ const __m128i pkttype_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ PKT_RX_VLAN, PKT_RX_VLAN,
+ PKT_RX_VLAN, PKT_RX_VLAN);
+
+ /* mask everything except rss type */
+ const __m128i rsstype_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x000F, 0x000F, 0x000F, 0x000F);
+
+ /* mask for HBO and RXE flag flags */
+ const __m128i rxe_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0001, 0x0001, 0x0001, 0x0001);
+
+ /* mask the lower byte of ol_flags */
+ const __m128i ol_flags_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x00FF, 0x00FF, 0x00FF, 0x00FF);
+
+ const __m128i l3l4cksum_flag = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
+ (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT);
+
+ const __m128i rxe_flag = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0);
+
+ /* map rss type to rss hash flag */
+ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
+
+ /* Calculate RSS_hash and Vlan fields */
+ ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
+ ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
+ vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
+ vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
+
+ ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
+ ptype0 = _mm_and_si128(ptype0, rsstype_msk);
+ ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
+
+ vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
+ eflag0 = vtag1;
+ cksumflag = vtag1;
+ vtag1 = _mm_srli_epi16(vtag1, VP_SHIFT);
+ vtag1 = _mm_and_si128(vtag1, pkttype_msk);
+
+ vtag1 = _mm_or_si128(ptype0, vtag1);
+
+ /* Process err flags, simply set RECIP_ERR bit if HBO/IXE is set */
+ eflag1 = _mm_srli_epi16(eflag0, RXEFLAG_SHIFT);
+ eflag0 = _mm_srli_epi16(eflag0, HBOFLAG_SHIFT);
+ eflag0 = _mm_or_si128(eflag0, eflag1);
+ eflag0 = _mm_and_si128(eflag0, rxe_msk);
+ eflag0 = _mm_shuffle_epi8(rxe_flag, eflag0);
+
+ vtag1 = _mm_or_si128(eflag0, vtag1);
+
+ /* Process L4/L3 checksum error flags */
+ cksumflag = _mm_srli_epi16(cksumflag, L3L4EFLAG_SHIFT);
+ cksumflag = _mm_shuffle_epi8(l3l4cksum_flag, cksumflag);
+
+ /* clean the higher byte and shift back the flag bits */
+ cksumflag = _mm_and_si128(cksumflag, ol_flags_msk);
+ cksumflag = _mm_slli_epi16(cksumflag, CKSUM_SHIFT);
+ vtag1 = _mm_or_si128(cksumflag, vtag1);
+
+ vol.dword = _mm_cvtsi128_si64(vtag1);
+
+ rx_pkts[0]->ol_flags = vol.e[0];
+ rx_pkts[1]->ol_flags = vol.e[1];
+ rx_pkts[2]->ol_flags = vol.e[2];
+ rx_pkts[3]->ol_flags = vol.e[3];
+}
+
+/* @note: When this function is changed, make corresponding change to
+ * fm10k_dev_supported_ptypes_get().
+ */
+static inline void
+fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i l3l4type0, l3l4type1, l3type, l4type;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ /* L3 pkt type mask Bit4 to Bit6 */
+ const __m128i l3type_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0070, 0x0070, 0x0070, 0x0070);
+
+ /* L4 pkt type mask Bit7 to Bit9 */
+ const __m128i l4type_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0380, 0x0380, 0x0380, 0x0380);
+
+ /* convert RRC l3 type to mbuf format */
+ const __m128i l3type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L3_IPV6, RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV4, 0);
+
+ /* Convert RRC l4 type to mbuf format l4type_flags shift-left 8 bits
+ * to fill into8 bits length.
+ */
+ const __m128i l4type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
+ RTE_PTYPE_TUNNEL_GENEVE >> 8,
+ RTE_PTYPE_TUNNEL_NVGRE >> 8,
+ RTE_PTYPE_TUNNEL_VXLAN >> 8,
+ RTE_PTYPE_TUNNEL_GRE >> 8,
+ RTE_PTYPE_L4_UDP >> 8,
+ RTE_PTYPE_L4_TCP >> 8,
+ 0);
+
+ l3l4type0 = _mm_unpacklo_epi16(descs[0], descs[1]);
+ l3l4type1 = _mm_unpacklo_epi16(descs[2], descs[3]);
+ l3l4type0 = _mm_unpacklo_epi32(l3l4type0, l3l4type1);
+
+ l3type = _mm_and_si128(l3l4type0, l3type_msk);
+ l4type = _mm_and_si128(l3l4type0, l4type_msk);
+
+ l3type = _mm_srli_epi16(l3type, L3TYPE_SHIFT);
+ l4type = _mm_srli_epi16(l4type, L4TYPE_SHIFT);
+
+ l3type = _mm_shuffle_epi8(l3type_flags, l3type);
+ /* l4type_flags shift-left for 8 bits, need shift-right back */
+ l4type = _mm_shuffle_epi8(l4type_flags, l4type);
+
+ l4type = _mm_slli_epi16(l4type, 8);
+ l3l4type0 = _mm_or_si128(l3type, l4type);
+ vol.dword = _mm_cvtsi128_si64(l3l4type0);
+
+ rx_pkts[0]->packet_type = vol.e[0];
+ rx_pkts[1]->packet_type = vol.e[1];
+ rx_pkts[2]->packet_type = vol.e[2];
+ rx_pkts[3]->packet_type = vol.e[3];
+}
+#else
+#define fm10k_desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#define fm10k_desc_to_pktype_v(desc, rx_pkts) do {} while (0)
+#endif
+
+int __attribute__((cold))
+fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /* no header split support */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
+
+int __attribute__((cold))
+fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ /* data_off will be ajusted after new mbuf allocated for 512-byte
+ * alignment.
+ */
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+static inline void
+fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union fm10k_rx_desc *rxdp;
+ struct rte_mbuf **mb_alloc = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i head_off = _mm_set_epi64x(
+ RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1,
+ RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1);
+ __m128i dma_addr0, dma_addr1;
+ /* Rx buffer need to be aligned with 512 byte */
+ const __m128i hba_msk = _mm_set_epi64x(0,
+ UINT64_MAX - FM10K_RX_DATABUF_ALIGN + 1);
+
+ rxdp = rxq->hw_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)mb_alloc,
+ RTE_FM10K_RXQ_REARM_THRESH) < 0) {
+ dma_addr0 = _mm_setzero_si128();
+ /* Clean up all the HW/SW ring content */
+ for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i++) {
+ mb_alloc[i] = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].q,
+ dma_addr0);
+ }
+
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_FM10K_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i += 2, mb_alloc += 2) {
+ __m128i vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = mb_alloc[0];
+ mb1 = mb_alloc[1];
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, head_off);
+ dma_addr1 = _mm_add_epi64(dma_addr1, head_off);
+
+ /* Do 512 byte alignment to satisfy HW requirement, in the
+ * meanwhile, set Header Buffer Address to zero.
+ */
+ dma_addr0 = _mm_and_si128(dma_addr0, hba_msk);
+ dma_addr1 = _mm_and_si128(dma_addr1, hba_msk);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->q, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->q, dma_addr1);
+
+ /* enforce 512B alignment on default Rx virtual addresses */
+ mb0->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb0->buf_addr
+ + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
+ - (char *)mb0->buf_addr);
+ mb1->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb1->buf_addr
+ + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
+ - (char *)mb1->buf_addr);
+ }
+
+ rxq->rxrearm_start += RTE_FM10K_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_FM10K_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id);
+}
+
+void __attribute__((cold))
+fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->next_dd; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ rxq->rxrearm_nb = rxq->nb_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_desc);
+}
+
+static inline uint16_t
+fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union fm10k_rx_desc *rxdp;
+ struct rte_mbuf **mbufp;
+ uint16_t nb_pkts_recd;
+ int pos;
+ struct fm10k_rx_queue *rxq = rx_queue;
+ uint64_t var;
+ __m128i shuf_msk;
+ __m128i dd_check, eop_check;
+ uint16_t next_dd;
+
+ next_dd = rxq->next_dd;
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->hw_ring + next_dd;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_FM10K_RXQ_REARM_THRESH)
+ fm10k_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD))
+ return 0;
+
+ /* Vecotr RX will process 4 packets at a time, strip the unaligned
+ * tails in case it's not multiple of 4.
+ */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP);
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 15, 14, /* octet 14~15, low 16 bits vlan_macip */
+ 13, 12, /* octet 12~13, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 13, 12, /* octet 12~13, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_type */
+ 0xFF, 0xFF /* Skip pkt_type field in shuffle operation */
+ );
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ mbufp = &rxq->sw_ring[next_dd];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_FM10K_DESCS_PER_LOOP,
+ rxdp += RTE_FM10K_DESCS_PER_LOOP) {
+ __m128i descs0[RTE_FM10K_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ __m128i mbp1;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
+
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
+ mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
+
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf poitns */
+ mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
+#endif
+
+ descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ rte_compiler_barrier();
+ descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs0[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs0[2], shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs0[3], descs0[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = _mm_unpackhi_epi32(descs0[1], descs0[0]);
+
+ /* set ol_flags with vlan packet type */
+ fm10k_desc_to_olflags_v(descs0, &rx_pkts[pos]);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs0[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs0[0], shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += RTE_FM10K_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+
+ fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]);
+
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_FM10K_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->next_dd = (uint16_t)(rxq->next_dd + nb_pkts_recd);
+ rxq->next_dd = (uint16_t)(rxq->next_dd & (rxq->nb_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+/* vPMD receive routine
+ *
+ * Notice:
+ * - don't support ol_flags for rss and csum err
+ */
+uint16_t
+fm10k_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return fm10k_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+static inline uint16_t
+fm10k_reassemble_packets(struct fm10k_rx_queue *rxq,
+ struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[RTE_FM10K_MAX_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ start->packet_type = end->packet_type;
+#endif
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+/*
+ * vPMD receive routine that reassembles scattered packets
+ *
+ * Notice:
+ * - don't support ol_flags for rss and csum err
+ * - nb_pkts > RTE_FM10K_MAX_RX_BURST, only scan RTE_FM10K_MAX_RX_BURST
+ * numbers of DD bit
+ */
+uint16_t
+fm10k_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fm10k_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_FM10K_MAX_RX_BURST] = {0};
+ unsigned i = 0;
+
+ /* Split_flags only can support max of RTE_FM10K_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_FM10K_MAX_RX_BURST);
+ /* get some new buffers */
+ uint16_t nb_bufs = fm10k_recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + fm10k_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static const struct fm10k_txq_ops vec_txq_ops = {
+ .reset = fm10k_reset_tx_queue,
+};
+
+void __attribute__((cold))
+fm10k_txq_vec_setup(struct fm10k_tx_queue *txq)
+{
+ txq->ops = &vec_txq_ops;
+}
+
+int __attribute__((cold))
+fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq)
+{
+ /* Vector TX can't offload any features yet */
+ if (txq->offloads != 0)
+ return -1;
+
+ if (txq->tx_ftag_en)
+ return -1;
+
+ return 0;
+}
+
+static inline void
+vtx1(volatile struct fm10k_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ __m128i descriptor = _mm_set_epi64x(flags << 56 |
+ pkt->vlan_tci << 16 | pkt->data_len,
+ MBUF_DMA_ADDR(pkt));
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct fm10k_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+static __rte_always_inline int
+fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
+{
+ struct rte_mbuf **txep;
+ uint8_t flags;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ flags = txq->hw_ring[txq->next_dd].flags;
+ if (!(flags & FM10K_TXD_FLAG_DONE))
+ return 0;
+
+ n = txq->rs_thresh;
+
+ /* First buffer to free from S/W ring is at index
+ * next_dd - (rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->next_dd - (n - 1)];
+ m = rte_pktmbuf_prefree_seg(txep[0]);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i]);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i]);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
+ txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
+ if (txq->next_dd >= txq->nb_desc)
+ txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+
+ return txq->rs_thresh;
+}
+
+static __rte_always_inline void
+tx_backlog_entry(struct rte_mbuf **txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i] = tx_pkts[i];
+}
+
+uint16_t
+fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
+ volatile struct fm10k_tx_desc *txdp;
+ struct rte_mbuf **txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = FM10K_TXD_FLAG_LAST;
+ uint64_t rs = FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_LAST;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
+
+ if (txq->nb_free < txq->free_thresh)
+ fm10k_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->next_free;
+ txdp = &txq->hw_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &(txq->hw_ring[tx_id]);
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->next_rs) {
+ txq->hw_ring[txq->next_rs].flags |= FM10K_TXD_FLAG_RS;
+ txq->next_rs = (uint16_t)(txq->next_rs + txq->rs_thresh);
+ }
+
+ txq->next_free = tx_id;
+
+ FM10K_PCI_REG_WRITE(txq->tail_ptr, txq->next_free);
+
+ return nb_pkts;
+}
+
+static void __attribute__((cold))
+fm10k_reset_tx_queue(struct fm10k_tx_queue *txq)
+{
+ static const struct fm10k_tx_desc zeroed_desc = {0};
+ struct rte_mbuf **txe = txq->sw_ring;
+ uint16_t i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_desc; i++)
+ txq->hw_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ for (i = 0; i < txq->nb_desc; i++)
+ txe[i] = NULL;
+
+ txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ txq->next_free = 0;
+ txq->nb_used = 0;
+ /* Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->nb_free = (uint16_t)(txq->nb_desc - 1);
+ FM10K_PCI_REG_WRITE(txq->tail_ptr, 0);
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/meson.build b/src/spdk/dpdk/drivers/net/fm10k/meson.build
new file mode 100644
index 00000000..2772ea4d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'fm10k_ethdev.c',
+ 'fm10k_rxtx.c',
+)
+if arch_subdir == 'x86'
+ dpdk_conf.set('RTE_LIBRTE_FM10K_INC_VECTOR', 1)
+ sources += files('fm10k_rxtx_vec.c')
+endif
+
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/fm10k/rte_pmd_fm10k_version.map b/src/spdk/dpdk/drivers/net/fm10k/rte_pmd_fm10k_version.map
new file mode 100644
index 00000000..ef353984
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/rte_pmd_fm10k_version.map
@@ -0,0 +1,4 @@
+DPDK_2.0 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/i40e/Makefile b/src/spdk/dpdk/drivers/net/i40e/Makefile
new file mode 100644
index 00000000..3f869a8d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/Makefile
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_i40e.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF
+CFLAGS += -DX722_A0_SUPPORT
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+
+EXPORT_MAP := rte_pmd_i40e_version.map
+
+LIBABIVER := 2
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings
+#
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS_BASE_DRIVER = -diag-disable 593
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+CFLAGS_BASE_DRIVER += -Wno-sign-compare
+CFLAGS_BASE_DRIVER += -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing
+CFLAGS_BASE_DRIVER += -Wno-format
+CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers
+CFLAGS_BASE_DRIVER += -Wno-pointer-to-int-cast
+CFLAGS_BASE_DRIVER += -Wno-format-nonliteral
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+else
+CFLAGS_BASE_DRIVER = -Wno-sign-compare
+CFLAGS_BASE_DRIVER += -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing
+CFLAGS_BASE_DRIVER += -Wno-format
+CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers
+CFLAGS_BASE_DRIVER += -Wno-pointer-to-int-cast
+CFLAGS_BASE_DRIVER += -Wno-format-nonliteral
+CFLAGS_BASE_DRIVER += -Wno-format-security
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+endif
+
+CFLAGS_i40e_lan_hmc.o += -Wno-error
+endif
+OBJS_BASE_DRIVER=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+# base driver is based on the package of dpdk-i40e.2016.04.18.12.tar.gz.
+#
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_adminq.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_diag.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_hmc.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_lan_hmc.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_nvm.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_dcb.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
+ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c
+else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_altivec.c
+else
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_vf_representor.c
+
+ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
+ CC_AVX2_SUPPORT=1
+else
+ CC_AVX2_SUPPORT=\
+ $(shell $(CC) -march=core-avx2 -dM -E - </dev/null 2>&1 | \
+ grep -q AVX2 && echo 1)
+ ifeq ($(CC_AVX2_SUPPORT), 1)
+ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+ CFLAGS_i40e_rxtx_vec_avx2.o += -march=core-avx2
+ else
+ CFLAGS_i40e_rxtx_vec_avx2.o += -mavx2
+ endif
+ endif
+endif
+
+ifeq ($(CC_AVX2_SUPPORT), 1)
+ SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_avx2.c
+endif
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_I40E_PMD)-include := rte_pmd_i40e.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/README b/src/spdk/dpdk/drivers/net/i40e/base/README
new file mode 100644
index 00000000..247ba11d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/README
@@ -0,0 +1,59 @@
+..
+ BSD LICENSE
+
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Intel® I40E driver
+==================
+
+This directory contains source code of FreeBSD i40e driver of version
+cid-i40e.2018.01.02.tar.gz released by the team which develops
+basic drivers for any i40e NIC. The directory of base/ contains the
+original source package.
+This driver is valid for the product(s) listed below
+
+* Intel® Ethernet Converged Network Adapters X710
+* Intel® Ethernet Converged Network Adapters XL710
+* Intel® Ethernet Network Adapter XXV710
+* Intel® Ethernet Connection X722 for 10GBASE-T
+* Intel® Ethernet Connection X722 for 10GbE backplane
+* Intel® Ethernet Connection X722 for 10GbE SFP+
+* Intel® Ethernet Connection X722 for 1GbE
+* Intel® Ethernet Controller X710 and XL710 Family
+* Intel® Ethernet Controller XXV710 for 25GbE backplane
+* Intel® Ethernet Controller XXV710 for 25GbE SFP28
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ i40e_osdep.h
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.c
new file mode 100644
index 00000000..612be883
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.c
@@ -0,0 +1,1163 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (i40e_is_vf(hw)) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
+ hw->aq.asq.bal = I40E_VF_ATQBAL1;
+ hw->aq.asq.bah = I40E_VF_ATQBAH1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
+ hw->aq.arq.bal = I40E_VF_ARQBAL1;
+ hw->aq.arq.bah = I40E_VF_ARQBAH1;
+#ifdef PF_DRIVER
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
+ hw->aq.asq.bal = I40E_PF_ATQBAL;
+ hw->aq.asq.bah = I40E_PF_ATQBAH;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
+ hw->aq.arq.bal = I40E_PF_ARQBAL;
+ hw->aq.arq.bah = I40E_PF_ARQBAH;
+#endif
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
+ desc->params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+
+ /* set starting point */
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ if (!i40e_is_vf(hw))
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
+#ifdef INTEGRATED_VF
+ if (i40e_is_vf(hw))
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+#endif /* VF_DRIVER */
+ wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
+ wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.asq.bal);
+ if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+
+ /* set starting point */
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ if (!i40e_is_vf(hw))
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
+#ifdef INTEGRATED_VF
+ if (i40e_is_vf(hw))
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+#endif /* VF_DRIVER */
+ wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
+ wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.arq.bal);
+ if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+ wr32(hw, hw->aq.asq.bal, 0);
+ wr32(hw, hw->aq.asq.bah, 0);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+
+shutdown_asq_out:
+ i40e_release_spinlock(&hw->aq.asq_spinlock);
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+ wr32(hw, hw->aq.arq.bal, 0);
+ wr32(hw, hw->aq.arq.bah, 0);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+
+shutdown_arq_out:
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
+ return ret_code;
+}
+#ifdef PF_DRIVER
+
+/**
+ * i40e_resume_aq - resume AQ processing from 0
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void i40e_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
+#endif /* PF_DRIVER */
+
+/**
+ * i40e_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
+{
+#ifdef PF_DRIVER
+ u16 cfg_ptr, oem_hi, oem_lo;
+ u16 eetrack_lo, eetrack_hi;
+#endif
+ enum i40e_status_code ret_code;
+#ifdef PF_DRIVER
+ int retry = 0;
+#endif
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+ i40e_init_spinlock(&hw->aq.asq_spinlock);
+ i40e_init_spinlock(&hw->aq.arq_spinlock);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* setup ASQ command write back timeout */
+ hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_destroy_spinlocks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_asq;
+
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ /* VF has no need of firmware */
+ if (i40e_is_vf(hw))
+ goto init_adminq_exit;
+#endif
+ /* There are some cases where the firmware may not be quite ready
+ * for AdminQ operations, so we retry the AdminQ setup a few times
+ * if we see timeouts in this first AQ call.
+ */
+ do {
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver,
+ &hw->aq.fw_min_ver,
+ &hw->aq.fw_build,
+ &hw->aq.api_maj_ver,
+ &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ break;
+ retry++;
+ i40e_msec_delay(100);
+ i40e_resume_aq(hw);
+ } while (retry < 10);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_arq;
+
+ /* get the NVM version info */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
+ &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
+ hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+ i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+ &oem_hi);
+ i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+ &oem_lo);
+ hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+
+ /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
+ if ((hw->aq.api_maj_ver > 1) ||
+ ((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver >= 7)))
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ }
+
+ /* Newer versions of firmware require lock when reading the NVM */
+ if ((hw->aq.api_maj_ver > 1) ||
+ ((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver >= 5)))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+
+ /* pre-emptive resource lock release */
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ hw->nvm_release_on_done = false;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
+#endif /* PF_DRIVER */
+ ret_code = I40E_SUCCESS;
+
+ /* success! */
+ goto init_adminq_exit;
+
+#ifdef PF_DRIVER
+init_adminq_free_arq:
+ i40e_shutdown_arq(hw);
+#endif
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_spinlocks:
+ i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+ i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_queue_shutdown(hw, true);
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+ i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+ i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->nvm_buff.va)
+ i40e_free_virt_mem(hw, &hw->nvm_buff);
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_DMA);
+ cb_func(hw, &desc_cb);
+ }
+ i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
+ i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40e_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+#ifdef VF_DRIVER
+bool i40e_asq_done(struct i40e_hw *hw)
+#else
+STATIC bool i40e_asq_done(struct i40e_hw *hw)
+#endif
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * i40e_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+ u32 val = 0;
+
+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ i40e_memcpy(details,
+ cmd_details,
+ sizeof(struct i40e_asq_cmd_details),
+ I40E_NONDMA_TO_NONDMA);
+
+ /* If the cmd_details are defined copy the cookie. The
+ * CPU_TO_LE32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
+ desc->cookie_low =
+ CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
+ }
+ } else {
+ i40e_memset(details, 0,
+ sizeof(struct i40e_asq_cmd_details),
+ I40E_NONDMA_MEM);
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+ desc->flags |= CPU_TO_LE16(details->flags_ena);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
+ I40E_NONDMA_TO_DMA);
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ i40e_memcpy(dma_buff->va, buff, buff_size,
+ I40E_NONDMA_TO_DMA);
+ desc_on_ring->datalen = CPU_TO_LE16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40e_asq_done(hw))
+ break;
+ i40e_usec_delay(50);
+ total_delay += 50;
+ } while (total_delay < hw->aq.asq_cmd_timeout);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40e_asq_done(hw)) {
+ i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_NONDMA);
+ if (buff != NULL)
+ i40e_memcpy(buff, dma_buff->va, buff_size,
+ I40E_DMA_TO_NONDMA);
+ retval = LE16_TO_CPU(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = I40E_SUCCESS;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: desc and buffer writeback:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ i40e_memcpy(details->wb_desc, desc_on_ring,
+ sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+#ifdef PF_DRIVER
+ if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+#else
+ if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
+#endif
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+ }
+
+asq_send_command_error:
+ i40e_release_spinlock(&hw->aq.asq_spinlock);
+ return status;
+}
+
+/**
+ * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
+ I40E_NONDMA_MEM);
+ desc->opcode = CPU_TO_LE16(opcode);
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40e_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
+
+ /* take the lock before we start messing with the ring */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = I40E_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
+ /* set next_to_use to head */
+#ifdef INTEGRATED_VF
+ if (!i40e_is_vf(hw))
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+ else
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
+#else
+#ifdef PF_DRIVER
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
+#endif /* VF_DRIVER */
+#endif /* INTEGRATED_VF */
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
+ flags = LE16_TO_CPU(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ }
+
+ i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_NONDMA);
+ datalen = LE16_TO_CPU(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
+ i40e_memcpy(e->msg_buf,
+ hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_len, I40E_DMA_TO_NONDMA);
+
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
+
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
+ desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+#ifdef PF_DRIVER
+ i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
+#endif /* PF_DRIVER */
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+clean_arq_element_err:
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.h
new file mode 100644
index 00000000..de4ab3f3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.h
@@ -0,0 +1,166 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_status.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+ struct i40e_aq_desc *wb_desc;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
+ struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/**
+ * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
+ **/
+STATIC INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
+{
+ int aq_to_posix[] = {
+ 0, /* I40E_AQ_RC_OK */
+ -EPERM, /* I40E_AQ_RC_EPERM */
+ -ENOENT, /* I40E_AQ_RC_ENOENT */
+ -ESRCH, /* I40E_AQ_RC_ESRCH */
+ -EINTR, /* I40E_AQ_RC_EINTR */
+ -EIO, /* I40E_AQ_RC_EIO */
+ -ENXIO, /* I40E_AQ_RC_ENXIO */
+ -E2BIG, /* I40E_AQ_RC_E2BIG */
+ -EAGAIN, /* I40E_AQ_RC_EAGAIN */
+ -ENOMEM, /* I40E_AQ_RC_ENOMEM */
+ -EACCES, /* I40E_AQ_RC_EACCES */
+ -EFAULT, /* I40E_AQ_RC_EFAULT */
+ -EBUSY, /* I40E_AQ_RC_EBUSY */
+ -EEXIST, /* I40E_AQ_RC_EEXIST */
+ -EINVAL, /* I40E_AQ_RC_EINVAL */
+ -ENOTTY, /* I40E_AQ_RC_ENOTTY */
+ -ENOSPC, /* I40E_AQ_RC_ENOSPC */
+ -ENOSYS, /* I40E_AQ_RC_ENOSYS */
+ -ERANGE, /* I40E_AQ_RC_ERANGE */
+ -EPIPE, /* I40E_AQ_RC_EFLUSHED */
+ -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */
+ -EROFS, /* I40E_AQ_RC_EMODE */
+ -EFBIG, /* I40E_AQ_RC_EFBIG */
+ };
+
+ /* aq_rc is invalid if AQ timed out */
+ if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ return -EAGAIN;
+
+ if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
+ return -ERANGE;
+
+ return aq_to_posix[aq_rc];
+}
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
+
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h
new file mode 100644
index 00000000..801c0ff1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -0,0 +1,2822 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+ I40E_FW_API_VERSION_MINOR_X710 : \
+ I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+ i40e_aqc_opc_clear_all_wol_filters = 0x025E,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+ i40e_aqc_opc_set_switch_config = 0x0205,
+ i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
+ i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
+ i40e_aqc_opc_replace_cloud_filters = 0x025F,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* Dynamic Device Personalization */
+ i40e_aqc_opc_write_personalization_profile = 0x0270,
+ i40e_aqc_opc_get_personalization_profile_list = 0x0271,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+ i40e_aqc_opc_run_phy_activity = 0x0626,
+ i40e_aqc_opc_set_phy_register = 0x0628,
+ i40e_aqc_opc_get_phy_register = 0x0629,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_nvm_progress = 0x0706,
+ i40e_aqc_opc_oem_post_update = 0x0720,
+ i40e_aqc_opc_thermal_sensor = 0x0721,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+ i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
+ i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_ISCSI 0x0022
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0800
+#define I40E_AQ_ARP_UNSUP_CTL 0x1000
+#define I40E_AQ_ARP_ENA 0x2000
+#define I40E_AQ_ARP_ADD_IPV4 0x4000
+#define I40E_AQ_ARP_DEL_IPV4 0x8000
+ __le16 table_id;
+ __le32 enabled_offloads;
+#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
+#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
+ __le32 ip_addr;
+ u8 mac_addr[6];
+ u8 reserved[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0001
+#define I40E_AQ_NS_PROXY_DEL_0 0x0002
+#define I40E_AQ_NS_PROXY_ADD_1 0x0004
+#define I40E_AQ_NS_PROXY_DEL_1 0x0008
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
+#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
+#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_MC_MAG_EN_VALID 0x100
+#define I40E_AQC_WOL_PRESERVE_STATUS 0x200
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_MC_MAG_EN 0x0100
+#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define I40E_AQC_WRITE_TYPE_MASK 0xC000
+
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+ __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
+ __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+
+/* Set Switch Configuration (direct 0x0205) */
+struct i40e_aqc_set_switch_config {
+ __le16 flags;
+/* flags used for both fields below */
+#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
+#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
+#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
+ __le16 valid_flags;
+ /* The ethertype in switch_tag is dropped on ingress and used
+ * internally by the switch. Set this to zero for the default
+ * of 0x88a8 (802.1ad). Should be zero for firmware API
+ * versions lower than 1.7.
+ */
+ __le16 switch_tag;
+ /* The ethertypes in first_tag and second_tag are used to
+ * match the outer and inner VLAN tags (respectively) when HW
+ * double VLAN tagging is enabled via the set port parameters
+ * AQ command. Otherwise these are both ignored. Set them to
+ * zero for their defaults of 0x8100 (802.1Q). Should be zero
+ * for firmware API versions lower than 1.7.
+ */
+ __le16 first_tag;
+ __le16 second_tag;
+ u8 reserved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
+
+/* Read Receive control registers (direct 0x0206)
+ * Write Receive control registers (direct 0x0207)
+ * used for accessing Rx control registers that can be
+ * slow and need special handling when under high Rx load
+ */
+struct i40e_aqc_rx_ctl_reg_read_write {
+ __le32 reserved1;
+ __le32 address;
+ __le32 reserved2;
+ __le32 value;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write);
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */
+#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 big_buffer_flag;
+#define I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1
+ u8 reserved2[3];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+/* 0x0010 to 0x0017 is for custom filters */
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+/* i40e_aqc_add_rm_cloud_filt_elem_ext is used when
+ * I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set. refer to
+ * DCR288
+ */
+struct i40e_aqc_add_rm_cloud_filt_elem_ext {
+ struct i40e_aqc_add_remove_cloud_filters_element_data element;
+ u16 general_fields[32];
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Replace filter Command 0x025F
+ * uses the i40e_aqc_replace_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_filter_data {
+ u8 filter_type;
+ u8 input[3];
+};
+
+struct i40e_aqc_replace_cloud_filters_cmd {
+ u8 valid_flags;
+#define I40E_AQC_REPLACE_L1_FILTER 0x0
+#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
+#define I40E_AQC_GET_CLOUD_FILTERS 0x2
+#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
+#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
+ u8 old_filter_type;
+ u8 new_filter_type;
+ u8 tr_bit;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_replace_cloud_filters_cmd_buf {
+ u8 data[32];
+/* Filter type INPUT codes*/
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED (1 << 7UL)
+
+/* Field Vector offsets */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
+ struct i40e_filter_data filters[8];
+};
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* Dynamic Device Personalization */
+struct i40e_aqc_write_personalization_profile {
+ u8 flags;
+ u8 reserved[3];
+ __le32 profile_track_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
+
+struct i40e_aqc_write_ddp_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_get_applied_profiles {
+ u8 flags;
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
+ u8 rsv[3];
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
+};
+
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40,
+ i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
+I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_UNRECOGNIZED = 0xE,
+ I40E_PHY_TYPE_UNSUPPORTED = 0xF,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_25GBASE_KR = 0x1F,
+ I40E_PHY_TYPE_25GBASE_CR = 0x20,
+ I40E_PHY_TYPE_25GBASE_SR = 0x21,
+ I40E_PHY_TYPE_25GBASE_LR = 0x22,
+ I40E_PHY_TYPE_25GBASE_AOC = 0x23,
+ I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_MAX,
+ I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
+ I40E_PHY_TYPE_EMPTY = 0xFE,
+ I40E_PHY_TYPE_DEFAULT = 0xFF,
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT),
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
+#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
+#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
+ u8 ext_comp_code;
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 phy_type_ext;
+ u8 fec_config;
+#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define I40E_AQ_SET_FEC_AUTO BIT(4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
+ u8 reserved;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01 /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_LINK_UP_PORT 0x20
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define I40E_AQ_25G_NO_ERR 0X00
+#define I40E_AQ_25G_NOT_PRESENT 0X01
+#define I40E_AQ_25G_NVM_CRC_ERR 0X02
+#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
+#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
+#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+/* Since firmware API 1.7 loopback field keeps power class info as well */
+#define I40E_AQ_LOOPBACK_MASK 0x07
+#define I40E_AQ_PWR_CLASS_SHIFT_LB 6
+#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
+#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ union {
+ struct {
+ u8 power_desc;
+#define I40E_AQ_LINK_POWER_CLASS_1 0x00
+#define I40E_AQ_LINK_POWER_CLASS_2 0x01
+#define I40E_AQ_LINK_POWER_CLASS_3 0x02
+#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
+ u8 reserved[4];
+ };
+ struct {
+ u8 link_type[4];
+ u8 link_type_ext;
+ };
+ };
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+/* Disable link manageability on a single port */
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+/* Disable link manageability on all ports needs both bits 4 and 5 */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* Run PHY Activity (0x0626) */
+struct i40e_aqc_run_phy_activity {
+ __le16 activity_id;
+ u8 flags;
+ u8 reserved1;
+ __le32 control;
+ __le32 data;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+
+/* Set PHY Register command (0x0628) */
+/* Get PHY Register command (0x0629) */
+struct i40e_aqc_phy_register_access {
+ u8 phy_interface;
+#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
+ u8 dev_addres;
+ u8 reserved1[2];
+ __le32 reg_address;
+ __le32 reg_value;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
+#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ __le16 element_id_msw; /* MSWord of field ID */
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+ (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE 0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
+#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
+#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+ __le32 field_id;
+ __le32 field_value;
+ __le16 field_options;
+ __le16 reserved;
+};
+
+I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+struct i40e_aqc_nvm_oem_post_update {
+#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01
+ u8 sel_data;
+ u8 reserved[7];
+};
+
+I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
+
+struct i40e_aqc_nvm_oem_post_update_buffer {
+ u8 str_len;
+ u8 dev_addr;
+ __le16 eeprom_addr;
+ u8 data[36];
+};
+
+I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+
+/* Thermal Sensor (indirect 0x0721)
+ * read or set thermal sensor configs and values
+ * takes a sensor and command specific data buffer, not detailed here
+ */
+struct i40e_aqc_thermal_sensor {
+ u8 sensor_action;
+#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0
+#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1
+#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+ u8 command;
+#define I40E_AQ_DCB_SET_AGENT 0x1
+#define I40E_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
+ */
+
+#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
+#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
+#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
+#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
+#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
+#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
+struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
+ u8 reserved1;
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 reserved2;
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ u8 reserved3[2];
+ __le16 oper_app_prio;
+ u8 reserved4[2];
+ __le16 tlv_status;
+};
+
+I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct i40e_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+ __le32 tlv_status;
+ u8 reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
+ u8 type;
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
+
+struct i40e_aqc_lldp_set_local_mib_resp {
+#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01
+ u8 status;
+ u8 reserved[15];
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp);
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_stop_start_specific_agent {
+#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
+#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
+ (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+ u8 command;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11
+ u8 reserved1[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 reserved2[13];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ __le16 param_value2;
+ u8 reserved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* Initialize OCSD (0xFE02, direct) */
+struct i40e_aqc_opc_oem_ocsd_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocsd_memory_block_addr_high;
+ __le32 ocsd_memory_block_addr_low;
+ __le32 requested_update_interval;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB (0xFE03, direct) */
+struct i40e_aqc_opc_oem_ocbb_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocbb_memory_block_addr_high;
+ __le32 ocbb_memory_block_addr_low;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif /* _I40E_ADMINQ_CMD_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_alloc.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_alloc.h
new file mode 100644
index 00000000..38c2f655
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_alloc.h
@@ -0,0 +1,65 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+enum i40e_status_code i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+enum i40e_status_code i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+enum i40e_status_code i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+enum i40e_status_code i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_common.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_common.c
new file mode 100644
index 00000000..e0a5be14
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_common.c
@@ -0,0 +1,7814 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "virtchnl.h"
+
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
+enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
+#else
+STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
+#endif
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ DEBUGFUNC("i40e_set_mac_type\n");
+
+ if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+#ifdef X722_A0_SUPPORT
+ case I40E_DEV_ID_X722_A0:
+#endif
+ case I40E_DEV_ID_KX_X722:
+ case I40E_DEV_ID_QSFP_X722:
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
+ case I40E_DEV_ID_X722_VF:
+#ifdef X722_A0_SUPPORT
+ case I40E_DEV_ID_X722_A0_VF:
+#endif
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
+#endif /* INTEGRATED_VF || VF_DRIVER */
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
+ case I40E_DEV_ID_ADAPTIVE_VF:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+#endif
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ DEBUGOUT2("i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
+{
+ switch (stat_err) {
+ case I40E_SUCCESS:
+ return "OK";
+ case I40E_ERR_NVM:
+ return "I40E_ERR_NVM";
+ case I40E_ERR_NVM_CHECKSUM:
+ return "I40E_ERR_NVM_CHECKSUM";
+ case I40E_ERR_PHY:
+ return "I40E_ERR_PHY";
+ case I40E_ERR_CONFIG:
+ return "I40E_ERR_CONFIG";
+ case I40E_ERR_PARAM:
+ return "I40E_ERR_PARAM";
+ case I40E_ERR_MAC_TYPE:
+ return "I40E_ERR_MAC_TYPE";
+ case I40E_ERR_UNKNOWN_PHY:
+ return "I40E_ERR_UNKNOWN_PHY";
+ case I40E_ERR_LINK_SETUP:
+ return "I40E_ERR_LINK_SETUP";
+ case I40E_ERR_ADAPTER_STOPPED:
+ return "I40E_ERR_ADAPTER_STOPPED";
+ case I40E_ERR_INVALID_MAC_ADDR:
+ return "I40E_ERR_INVALID_MAC_ADDR";
+ case I40E_ERR_DEVICE_NOT_SUPPORTED:
+ return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+ case I40E_ERR_MASTER_REQUESTS_PENDING:
+ return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_INVALID_LINK_SETTINGS:
+ return "I40E_ERR_INVALID_LINK_SETTINGS";
+ case I40E_ERR_AUTONEG_NOT_COMPLETE:
+ return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+ case I40E_ERR_RESET_FAILED:
+ return "I40E_ERR_RESET_FAILED";
+ case I40E_ERR_SWFW_SYNC:
+ return "I40E_ERR_SWFW_SYNC";
+ case I40E_ERR_NO_AVAILABLE_VSI:
+ return "I40E_ERR_NO_AVAILABLE_VSI";
+ case I40E_ERR_NO_MEMORY:
+ return "I40E_ERR_NO_MEMORY";
+ case I40E_ERR_BAD_PTR:
+ return "I40E_ERR_BAD_PTR";
+ case I40E_ERR_RING_FULL:
+ return "I40E_ERR_RING_FULL";
+ case I40E_ERR_INVALID_PD_ID:
+ return "I40E_ERR_INVALID_PD_ID";
+ case I40E_ERR_INVALID_QP_ID:
+ return "I40E_ERR_INVALID_QP_ID";
+ case I40E_ERR_INVALID_CQ_ID:
+ return "I40E_ERR_INVALID_CQ_ID";
+ case I40E_ERR_INVALID_CEQ_ID:
+ return "I40E_ERR_INVALID_CEQ_ID";
+ case I40E_ERR_INVALID_AEQ_ID:
+ return "I40E_ERR_INVALID_AEQ_ID";
+ case I40E_ERR_INVALID_SIZE:
+ return "I40E_ERR_INVALID_SIZE";
+ case I40E_ERR_INVALID_ARP_INDEX:
+ return "I40E_ERR_INVALID_ARP_INDEX";
+ case I40E_ERR_INVALID_FPM_FUNC_ID:
+ return "I40E_ERR_INVALID_FPM_FUNC_ID";
+ case I40E_ERR_QP_INVALID_MSG_SIZE:
+ return "I40E_ERR_QP_INVALID_MSG_SIZE";
+ case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+ return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+ case I40E_ERR_INVALID_FRAG_COUNT:
+ return "I40E_ERR_INVALID_FRAG_COUNT";
+ case I40E_ERR_QUEUE_EMPTY:
+ return "I40E_ERR_QUEUE_EMPTY";
+ case I40E_ERR_INVALID_ALIGNMENT:
+ return "I40E_ERR_INVALID_ALIGNMENT";
+ case I40E_ERR_FLUSHED_QUEUE:
+ return "I40E_ERR_FLUSHED_QUEUE";
+ case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+ case I40E_ERR_INVALID_IMM_DATA_SIZE:
+ return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+ case I40E_ERR_TIMEOUT:
+ return "I40E_ERR_TIMEOUT";
+ case I40E_ERR_OPCODE_MISMATCH:
+ return "I40E_ERR_OPCODE_MISMATCH";
+ case I40E_ERR_CQP_COMPL_ERROR:
+ return "I40E_ERR_CQP_COMPL_ERROR";
+ case I40E_ERR_INVALID_VF_ID:
+ return "I40E_ERR_INVALID_VF_ID";
+ case I40E_ERR_INVALID_HMCFN_ID:
+ return "I40E_ERR_INVALID_HMCFN_ID";
+ case I40E_ERR_BACKING_PAGE_ERROR:
+ return "I40E_ERR_BACKING_PAGE_ERROR";
+ case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case I40E_ERR_INVALID_PBLE_INDEX:
+ return "I40E_ERR_INVALID_PBLE_INDEX";
+ case I40E_ERR_INVALID_SD_INDEX:
+ return "I40E_ERR_INVALID_SD_INDEX";
+ case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+ return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+ case I40E_ERR_INVALID_SD_TYPE:
+ return "I40E_ERR_INVALID_SD_TYPE";
+ case I40E_ERR_MEMCPY_FAILED:
+ return "I40E_ERR_MEMCPY_FAILED";
+ case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+ return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+ case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+ return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+ case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+ case I40E_ERR_SRQ_ENABLED:
+ return "I40E_ERR_SRQ_ENABLED";
+ case I40E_ERR_ADMIN_QUEUE_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_ERROR";
+ case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+ case I40E_ERR_BUF_TOO_SHORT:
+ return "I40E_ERR_BUF_TOO_SHORT";
+ case I40E_ERR_ADMIN_QUEUE_FULL:
+ return "I40E_ERR_ADMIN_QUEUE_FULL";
+ case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+ return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+ case I40E_ERR_BAD_IWARP_CQE:
+ return "I40E_ERR_BAD_IWARP_CQE";
+ case I40E_ERR_NVM_BLANK_MODE:
+ return "I40E_ERR_NVM_BLANK_MODE";
+ case I40E_ERR_NOT_IMPLEMENTED:
+ return "I40E_ERR_NOT_IMPLEMENTED";
+ case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+ case I40E_ERR_DIAG_TEST_FAILED:
+ return "I40E_ERR_DIAG_TEST_FAILED";
+ case I40E_ERR_NOT_READY:
+ return "I40E_ERR_NOT_READY";
+ case I40E_NOT_SUPPORTED:
+ return "I40E_NOT_SUPPORTED";
+ case I40E_ERR_FIRMWARE_API_VERSION:
+ return "I40E_ERR_FIRMWARE_API_VERSION";
+ case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
+ * i40e_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer, u16 buf_len)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u8 *buf = (u8 *)buffer;
+ u16 len;
+ u16 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ len = LE16_TO_CPU(aq_desc->datalen);
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ LE16_TO_CPU(aq_desc->opcode),
+ LE16_TO_CPU(aq_desc->flags),
+ LE16_TO_CPU(aq_desc->datalen),
+ LE16_TO_CPU(aq_desc->retval));
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->cookie_high),
+ LE32_TO_CPU(aq_desc->cookie_low));
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.internal.param0),
+ LE32_TO_CPU(aq_desc->params.internal.param1));
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.external.addr_high),
+ LE32_TO_CPU(aq_desc->params.external.addr_low));
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+ /* write the full 16-byte chunks */
+ for (i = 0; i < (len - 16); i += 16)
+ i40e_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, buf[i], buf[i+1], buf[i+2], buf[i+3],
+ buf[i+4], buf[i+5], buf[i+6], buf[i+7],
+ buf[i+8], buf[i+9], buf[i+10], buf[i+11],
+ buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
+ /* the most we could have left is 16 bytes, pad with zeros */
+ if (i < len) {
+ char d_buf[16];
+ int j, i_sav;
+
+ i_sav = i;
+ memset(d_buf, 0, sizeof(d_buf));
+ for (j = 0; i < len; j++, i++)
+ d_buf[j] = buf[i];
+ i40e_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
+ d_buf[4], d_buf[5], d_buf[6], d_buf[7],
+ d_buf[8], d_buf[9], d_buf[10], d_buf[11],
+ d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
+ }
+ }
+}
+
+/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+ if (hw->aq.asq.len)
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ if (!i40e_is_vf(hw))
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+#else
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+#endif /* INTEGRATED_VF */
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
+#ifdef INTEGRATED_VF
+ if (i40e_is_vf(hw))
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_VF_ATQLEN1_ATQENABLE_MASK);
+#else
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_VF_ATQLEN1_ATQENABLE_MASK);
+#endif /* INTEGRATED_VF */
+#endif /* VF_DRIVER */
+ return false;
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+STATIC enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+STATIC enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
+/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40e_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ I40E_RX_PTYPE_##OUTER_FRAG, \
+ I40E_RX_PTYPE_TUNNEL_##T, \
+ I40E_RX_PTYPE_TUNNEL_END_##TE, \
+ I40E_RX_PTYPE_##TEF, \
+ I40E_RX_PTYPE_INNER_PROT_##I, \
+ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+ /* L2 Packet types */
+ I40E_PTT_UNUSED_ENTRY(0),
+ I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(4),
+ I40E_PTT_UNUSED_ENTRY(5),
+ I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(8),
+ I40E_PTT_UNUSED_ENTRY(9),
+ I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(25),
+ I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(32),
+ I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(39),
+ I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(47),
+ I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(54),
+ I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(62),
+ I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(69),
+ I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(77),
+ I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(84),
+ I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(91),
+ I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(98),
+ I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(105),
+ I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(113),
+ I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(120),
+ I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(128),
+ I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(135),
+ I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(143),
+ I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(150),
+ I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ I40E_PTT_UNUSED_ENTRY(154),
+ I40E_PTT_UNUSED_ENTRY(155),
+ I40E_PTT_UNUSED_ENTRY(156),
+ I40E_PTT_UNUSED_ENTRY(157),
+ I40E_PTT_UNUSED_ENTRY(158),
+ I40E_PTT_UNUSED_ENTRY(159),
+
+ I40E_PTT_UNUSED_ENTRY(160),
+ I40E_PTT_UNUSED_ENTRY(161),
+ I40E_PTT_UNUSED_ENTRY(162),
+ I40E_PTT_UNUSED_ENTRY(163),
+ I40E_PTT_UNUSED_ENTRY(164),
+ I40E_PTT_UNUSED_ENTRY(165),
+ I40E_PTT_UNUSED_ENTRY(166),
+ I40E_PTT_UNUSED_ENTRY(167),
+ I40E_PTT_UNUSED_ENTRY(168),
+ I40E_PTT_UNUSED_ENTRY(169),
+
+ I40E_PTT_UNUSED_ENTRY(170),
+ I40E_PTT_UNUSED_ENTRY(171),
+ I40E_PTT_UNUSED_ENTRY(172),
+ I40E_PTT_UNUSED_ENTRY(173),
+ I40E_PTT_UNUSED_ENTRY(174),
+ I40E_PTT_UNUSED_ENTRY(175),
+ I40E_PTT_UNUSED_ENTRY(176),
+ I40E_PTT_UNUSED_ENTRY(177),
+ I40E_PTT_UNUSED_ENTRY(178),
+ I40E_PTT_UNUSED_ENTRY(179),
+
+ I40E_PTT_UNUSED_ENTRY(180),
+ I40E_PTT_UNUSED_ENTRY(181),
+ I40E_PTT_UNUSED_ENTRY(182),
+ I40E_PTT_UNUSED_ENTRY(183),
+ I40E_PTT_UNUSED_ENTRY(184),
+ I40E_PTT_UNUSED_ENTRY(185),
+ I40E_PTT_UNUSED_ENTRY(186),
+ I40E_PTT_UNUSED_ENTRY(187),
+ I40E_PTT_UNUSED_ENTRY(188),
+ I40E_PTT_UNUSED_ENTRY(189),
+
+ I40E_PTT_UNUSED_ENTRY(190),
+ I40E_PTT_UNUSED_ENTRY(191),
+ I40E_PTT_UNUSED_ENTRY(192),
+ I40E_PTT_UNUSED_ENTRY(193),
+ I40E_PTT_UNUSED_ENTRY(194),
+ I40E_PTT_UNUSED_ENTRY(195),
+ I40E_PTT_UNUSED_ENTRY(196),
+ I40E_PTT_UNUSED_ENTRY(197),
+ I40E_PTT_UNUSED_ENTRY(198),
+ I40E_PTT_UNUSED_ENTRY(199),
+
+ I40E_PTT_UNUSED_ENTRY(200),
+ I40E_PTT_UNUSED_ENTRY(201),
+ I40E_PTT_UNUSED_ENTRY(202),
+ I40E_PTT_UNUSED_ENTRY(203),
+ I40E_PTT_UNUSED_ENTRY(204),
+ I40E_PTT_UNUSED_ENTRY(205),
+ I40E_PTT_UNUSED_ENTRY(206),
+ I40E_PTT_UNUSED_ENTRY(207),
+ I40E_PTT_UNUSED_ENTRY(208),
+ I40E_PTT_UNUSED_ENTRY(209),
+
+ I40E_PTT_UNUSED_ENTRY(210),
+ I40E_PTT_UNUSED_ENTRY(211),
+ I40E_PTT_UNUSED_ENTRY(212),
+ I40E_PTT_UNUSED_ENTRY(213),
+ I40E_PTT_UNUSED_ENTRY(214),
+ I40E_PTT_UNUSED_ENTRY(215),
+ I40E_PTT_UNUSED_ENTRY(216),
+ I40E_PTT_UNUSED_ENTRY(217),
+ I40E_PTT_UNUSED_ENTRY(218),
+ I40E_PTT_UNUSED_ENTRY(219),
+
+ I40E_PTT_UNUSED_ENTRY(220),
+ I40E_PTT_UNUSED_ENTRY(221),
+ I40E_PTT_UNUSED_ENTRY(222),
+ I40E_PTT_UNUSED_ENTRY(223),
+ I40E_PTT_UNUSED_ENTRY(224),
+ I40E_PTT_UNUSED_ENTRY(225),
+ I40E_PTT_UNUSED_ENTRY(226),
+ I40E_PTT_UNUSED_ENTRY(227),
+ I40E_PTT_UNUSED_ENTRY(228),
+ I40E_PTT_UNUSED_ENTRY(229),
+
+ I40E_PTT_UNUSED_ENTRY(230),
+ I40E_PTT_UNUSED_ENTRY(231),
+ I40E_PTT_UNUSED_ENTRY(232),
+ I40E_PTT_UNUSED_ENTRY(233),
+ I40E_PTT_UNUSED_ENTRY(234),
+ I40E_PTT_UNUSED_ENTRY(235),
+ I40E_PTT_UNUSED_ENTRY(236),
+ I40E_PTT_UNUSED_ENTRY(237),
+ I40E_PTT_UNUSED_ENTRY(238),
+ I40E_PTT_UNUSED_ENTRY(239),
+
+ I40E_PTT_UNUSED_ENTRY(240),
+ I40E_PTT_UNUSED_ENTRY(241),
+ I40E_PTT_UNUSED_ENTRY(242),
+ I40E_PTT_UNUSED_ENTRY(243),
+ I40E_PTT_UNUSED_ENTRY(244),
+ I40E_PTT_UNUSED_ENTRY(245),
+ I40E_PTT_UNUSED_ENTRY(246),
+ I40E_PTT_UNUSED_ENTRY(247),
+ I40E_PTT_UNUSED_ENTRY(248),
+ I40E_PTT_UNUSED_ENTRY(249),
+
+ I40E_PTT_UNUSED_ENTRY(250),
+ I40E_PTT_UNUSED_ENTRY(251),
+ I40E_PTT_UNUSED_ENTRY(252),
+ I40E_PTT_UNUSED_ENTRY(253),
+ I40E_PTT_UNUSED_ENTRY(254),
+ I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
+/**
+ * i40e_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ DEBUGFUNC("i40e_validate_mac_addr");
+
+ /* Broadcast addresses ARE multicast addresses
+ * Make sure it is not a multicast address
+ * Reject the zero address
+ */
+ if (I40E_IS_MULTICAST(mac_addr) ||
+ (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+#ifdef PF_DRIVER
+
+/**
+ * i40e_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This assigns the MAC type and PHY code and inits the NVM.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The i40e_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u32 port, ari, func_rid;
+
+ DEBUGFUNC("i40e_init_shared_code");
+
+ i40e_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ case I40E_MAC_X722:
+ break;
+ default:
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw->phy.get_link_info = true;
+
+ /* Determine port number and PF number*/
+ port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
+ >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
+ hw->port = (u8)port;
+ ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
+ I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
+ func_rid = rd32(hw, I40E_PF_FUNC_RID);
+ if (ari)
+ hw->pf_id = (u8)(func_rid & 0xff);
+ else
+ hw->pf_id = (u8)(func_rid & 0x7);
+
+ if (hw->mac.type == I40E_MAC_X722)
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ status = i40e_init_nvm(hw);
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_read - Retrieve the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: a return indicator of what addresses were added to the addr store
+ * @addrs: the requestor's mac addr store
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+STATIC enum i40e_status_code i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_read *cmd_data =
+ (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, addrs,
+ sizeof(*addrs), cmd_details);
+ *flags = LE16_TO_CPU(cmd_data->command_flags);
+
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_write - Change the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: indicates which MAC to be written
+ * @mac_addr: address to write
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_write *cmd_data =
+ (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_mac_address_write);
+ cmd_data->command_flags = CPU_TO_LE16(flags);
+ cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]);
+ cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) |
+ ((u32)mac_addr[3] << 16) |
+ ((u32)mac_addr[4] << 8) |
+ mac_addr[5]);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_mac_addr - get MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to MAC address
+ *
+ * Reads the adapter's MAC address from register
+ **/
+enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+
+ if (flags & I40E_AQC_LAN_ADDR_VALID)
+ i40e_memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac),
+ I40E_NONDMA_TO_NONDMA);
+
+ return status;
+}
+
+/**
+ * i40e_get_port_mac_addr - get Port MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to Port MAC address
+ *
+ * Reads the adapter's Port MAC address
+ **/
+enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ if (status)
+ return status;
+
+ if (flags & I40E_AQC_PORT_ADDR_VALID)
+ i40e_memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac),
+ I40E_NONDMA_TO_NONDMA);
+ else
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * i40e_pre_tx_queue_cfg - pre tx queue configure
+ * @hw: pointer to the HW structure
+ * @queue: target pf queue index
+ * @enable: state change request
+ *
+ * Handles hw requirement to indicate intention to enable
+ * or disable target queue.
+ **/
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+{
+ u32 abs_queue_idx = hw->func_caps.base_queue + queue;
+ u32 reg_block = 0;
+ u32 reg_val;
+
+ if (abs_queue_idx >= 128) {
+ reg_block = abs_queue_idx / 128;
+ abs_queue_idx %= 128;
+ }
+
+ reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+
+ if (enable)
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
+ else
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
+}
+
+/**
+ * i40e_get_san_mac_addr - get SAN MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to SAN MAC address
+ *
+ * Reads the adapter's SAN MAC address from NVM
+ **/
+enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw,
+ u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ if (status)
+ return status;
+
+ if (flags & I40E_AQC_SAN_ADDR_VALID)
+ i40e_memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac),
+ I40E_NONDMA_TO_NONDMA);
+ else
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * i40e_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u16 pba_word = 0;
+ u16 pba_size = 0;
+ u16 pba_ptr = 0;
+ u16 i = 0;
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+ if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) {
+ DEBUGOUT("Failed to read PBA flags or flag is invalid.\n");
+ return status;
+ }
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
+ if (status != I40E_SUCCESS) {
+ DEBUGOUT("Failed to read PBA Block pointer.\n");
+ return status;
+ }
+
+ status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
+ if (status != I40E_SUCCESS) {
+ DEBUGOUT("Failed to read PBA Block size.\n");
+ return status;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ DEBUGOUT("Buffer to small for PBA data.\n");
+ return I40E_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
+ if (status != I40E_SUCCESS) {
+ DEBUGOUT1("Failed to read PBA Block word %d.\n", i);
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
+
+/**
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+{
+ enum i40e_media_type media;
+
+ switch (hw->phy.link_info.phy_type) {
+ case I40E_PHY_TYPE_10GBASE_SR:
+ case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_1000BASE_SX:
+ case I40E_PHY_TYPE_1000BASE_LX:
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ case I40E_PHY_TYPE_25GBASE_SR:
+ media = I40E_MEDIA_TYPE_FIBER;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_10GBASE_T:
+ media = I40E_MEDIA_TYPE_BASET;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ case I40E_PHY_TYPE_10GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_CR:
+ case I40E_PHY_TYPE_25GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_ACC:
+ media = I40E_MEDIA_TYPE_DA;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_25GBASE_KR:
+ media = I40E_MEDIA_TYPE_BACKPLANE;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ default:
+ media = I40E_MEDIA_TYPE_UNKNOWN;
+ break;
+ }
+
+ return media;
+}
+
+#define I40E_PF_RESET_WAIT_COUNT 200
+/**
+ * i40e_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * Assuming someone else has triggered a global reset,
+ * assure the global reset is complete and then reset the PF
+ **/
+enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
+{
+ u32 cnt = 0;
+ u32 cnt1 = 0;
+ u32 reg = 0;
+ u32 grst_del;
+
+ /* Poll for Global Reset steady state in case of recent GRST.
+ * The grst delay value is in 100ms units, and we'll wait a
+ * couple counts longer to be sure we don't just miss the end.
+ */
+ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+ I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+ I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+
+ grst_del = grst_del * 20;
+
+ for (cnt = 0; cnt < grst_del; cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ break;
+ i40e_msec_delay(100);
+ }
+ if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ DEBUGOUT("Global reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* Now Wait for the FW to be ready */
+ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+ reg = rd32(hw, I40E_GLNVM_ULD);
+ reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+ if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+ DEBUGOUT1("Core and Global modules ready %d\n", cnt1);
+ break;
+ }
+ i40e_msec_delay(10);
+ }
+ if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+ DEBUGOUT("wait for FW Reset complete timedout\n");
+ DEBUGOUT1("I40E_GLNVM_ULD = 0x%x\n", reg);
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* If there was a Global Reset in progress when we got here,
+ * we don't need to do the PF Reset
+ */
+ if (!cnt) {
+ u32 reg2 = 0;
+
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ wr32(hw, I40E_PFGEN_CTRL,
+ (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
+ break;
+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ DEBUGOUT("Core reset upcoming. Skipping PF reset request.\n");
+ DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2);
+ return I40E_ERR_NOT_READY;
+ }
+ i40e_msec_delay(1);
+ }
+ if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+ DEBUGOUT("PF reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+ }
+
+ i40e_clear_pxe_mode(hw);
+
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hw - clear out any left over hw state
+ * @hw: pointer to the hw struct
+ *
+ * Clear queues and interrupts, typically called at init time,
+ * but after the capabilities have been found so we know how many
+ * queues and msix vectors have been allocated.
+ **/
+void i40e_clear_hw(struct i40e_hw *hw)
+{
+ u32 num_queues, base_queue;
+ u32 num_pf_int;
+ u32 num_vf_int;
+ u32 num_vfs;
+ u32 i, j;
+ u32 val;
+ u32 eol = 0x7ff;
+
+ /* get number of interrupts, queues, and vfs */
+ val = rd32(hw, I40E_GLPCI_CNF2);
+ num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
+ I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
+ num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
+ I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+
+ val = rd32(hw, I40E_PFLAN_QALLOC);
+ base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
+ I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+ j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
+ I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+ if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+ num_queues = (j - base_queue) + 1;
+ else
+ num_queues = 0;
+
+ val = rd32(hw, I40E_PF_VT_PFALLOC);
+ i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
+ I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+ j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
+ I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+ if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+ num_vfs = (j - i) + 1;
+ else
+ num_vfs = 0;
+
+ /* stop all the interrupts */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+ val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+ for (i = 0; i < num_pf_int - 2; i++)
+ wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
+
+ /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
+ val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLST0, val);
+ for (i = 0; i < num_pf_int - 2; i++)
+ wr32(hw, I40E_PFINT_LNKLSTN(i), val);
+ val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ for (i = 0; i < num_vfs; i++)
+ wr32(hw, I40E_VPINT_LNKLST0(i), val);
+ for (i = 0; i < num_vf_int - 2; i++)
+ wr32(hw, I40E_VPINT_LNKLSTN(i), val);
+
+ /* warn the HW of the coming Tx disables */
+ for (i = 0; i < num_queues; i++) {
+ u32 abs_queue_idx = base_queue + i;
+ u32 reg_block = 0;
+
+ if (abs_queue_idx >= 128) {
+ reg_block = abs_queue_idx / 128;
+ abs_queue_idx %= 128;
+ }
+
+ val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+ val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
+ }
+ i40e_usec_delay(400);
+
+ /* stop all the queues */
+ for (i = 0; i < num_queues; i++) {
+ wr32(hw, I40E_QINT_TQCTL(i), 0);
+ wr32(hw, I40E_QTX_ENA(i), 0);
+ wr32(hw, I40E_QINT_RQCTL(i), 0);
+ wr32(hw, I40E_QRX_ENA(i), 0);
+ }
+
+ /* short wait for all queue disables to settle */
+ i40e_usec_delay(50);
+}
+
+/**
+ * i40e_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ **/
+void i40e_clear_pxe_mode(struct i40e_hw *hw)
+{
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_clear_pxe_mode(hw, NULL);
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+ u32 gpio_val = 0;
+ u32 port;
+
+ if (!hw->func_caps.led[idx])
+ return 0;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+ I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+ * if it is not our port then ignore
+ */
+ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+ (port != hw->port))
+ return 0;
+
+ return gpio_val;
+}
+
+#define I40E_COMBINED_ACTIVITY 0xA
+#define I40E_FILTER_ACTIVITY 0xE
+#define I40E_LINK_ACTIVITY 0xC
+#define I40E_MAC_ACTIVITY 0xD
+#define I40E_LED0 22
+
+/**
+ * i40e_led_get - return current on/off mode
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the 'mode' field as defined in the
+ * GPIO register definitions: 0x0 = off, 0xf = on, and other
+ * values are variations of possible behaviors relating to
+ * blink, link, and wire.
+ **/
+u32 i40e_led_get(struct i40e_hw *hw)
+{
+ u32 current_mode = 0;
+ u32 mode = 0;
+ int i;
+
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
+
+ if (!gpio_val)
+ continue;
+
+ /* ignore gpio LED src mode entries related to the activity
+ * LEDs
+ */
+ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+ switch (current_mode) {
+ case I40E_COMBINED_ACTIVITY:
+ case I40E_FILTER_ACTIVITY:
+ case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
+ continue;
+ default:
+ break;
+ }
+
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
+ break;
+ }
+
+ return mode;
+}
+
+/**
+ * i40e_led_set - set new on/off mode
+ * @hw: pointer to the hw struct
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: true if the LED should blink when on, false if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
+ **/
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
+{
+ u32 current_mode = 0;
+ int i;
+
+ if (mode & 0xfffffff0)
+ DEBUGOUT1("invalid mode passed in %X\n", mode);
+
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
+
+ if (!gpio_val)
+ continue;
+
+ /* ignore gpio LED src mode entries related to the activity
+ * LEDs
+ */
+ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+ switch (current_mode) {
+ case I40E_COMBINED_ACTIVITY:
+ case I40E_FILTER_ACTIVITY:
+ case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
+ continue;
+ default:
+ break;
+ }
+
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+ /* this & is a bit of paranoia, but serves as a range check */
+ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+ if (blink)
+ gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ else
+ gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+
+ wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ break;
+ }
+}
+
+/* Admin command wrappers */
+
+/**
+ * i40e_aq_get_phy_capabilities
+ * @hw: pointer to the hw struct
+ * @abilities: structure for PHY capabilities to be filled
+ * @qualified_modules: report Qualified Modules
+ * @report_init: report init capabilities (active are default)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the various PHY abilities supported on the Port.
+ **/
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+ u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
+ u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+
+ if (!abilities)
+ return I40E_ERR_PARAM;
+
+ do {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_abilities);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (abilities_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ if (qualified_modules)
+ desc.params.external.param0 |=
+ CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
+
+ if (report_init)
+ desc.params.external.param0 |=
+ CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
+
+ status = i40e_asq_send_command(hw, &desc, abilities,
+ abilities_size, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ break;
+
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
+ i40e_msec_delay(1);
+ total_delay++;
+ status = I40E_ERR_TIMEOUT;
+ }
+ } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
+ (total_delay < max_delay));
+
+ if (status != I40E_SUCCESS)
+ return status;
+
+ if (report_init) {
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ } else {
+ hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+ hw->phy.phy_types |=
+ ((u64)abilities->phy_type_ext << 32);
+ }
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_config
+ * @hw: pointer to the hw struct
+ * @config: structure with PHY configuration to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters
+ * supported on the Port.One or more of the Set PHY config parameters may be
+ * ignored in an MFP mode as the PF may not have the privilege to set some
+ * of the PHY Config parameters. This status will be indicated by the
+ * command response.
+ **/
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_set_phy_config *cmd =
+ (struct i40e_aq_set_phy_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (!config)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_config);
+
+ *cmd = *config;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ * @aq_failures: buffer to return AdminQ failure information
+ * @atomic_restart: whether to enable atomic link restart
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
+{
+ enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config;
+ enum i40e_status_code status;
+ u8 pause_mask = 0x0;
+
+ *aq_failures = 0x0;
+
+ switch (fc_mode) {
+ case I40E_FC_FULL:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+ break;
+ case I40E_FC_RX_PAUSE:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+ break;
+ case I40E_FC_TX_PAUSE:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+ break;
+ default:
+ break;
+ }
+
+ /* Get the current phy config */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
+ return status;
+ }
+
+ memset(&config, 0, sizeof(config));
+ /* clear the old pause settings */
+ config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+ ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+ /* set the new abilities */
+ config.abilities |= pause_mask;
+ /* If the abilities have changed, then set the new config */
+ if (config.abilities != abilities.abilities) {
+ /* Auto restart link so settings take effect */
+ if (atomic_restart)
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* Copy over all the old settings */
+ config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
+ config.link_speed = abilities.link_speed;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
+ }
+ /* Update the link info */
+ status = i40e_update_link_info(hw);
+ if (status) {
+ /* Wait a little bit (on 40G cards it sometimes takes a really
+ * long time for link to come back from the atomic reset)
+ * and try once more
+ */
+ i40e_msec_delay(1000);
+ status = i40e_update_link_info(hw);
+ }
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_mac_config
+ * @hw: pointer to the hw struct
+ * @max_frame_size: Maximum Frame Size to be supported by the port
+ * @crc_en: Tell HW to append a CRC to outgoing frames
+ * @pacing: Pacing configurations
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Configure MAC settings for frame size, jumbo frame support and the
+ * addition of a CRC by the hardware.
+ **/
+enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
+ u16 max_frame_size,
+ bool crc_en, u16 pacing,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_set_mac_config *cmd =
+ (struct i40e_aq_set_mac_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (max_frame_size == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_mac_config);
+
+ cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
+ cmd->params = ((u8)pacing & 0x0F) << 3;
+ if (crc_en)
+ cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Tell the firmware that the driver is taking over from PXE
+ **/
+enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_clear_pxe *cmd =
+ (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_pxe_mode);
+
+ cmd->rx_cnt = 0x2;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_link_restart_an
+ * @hw: pointer to the hw struct
+ * @enable_link: if true: enable link, if false: disable link
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ **/
+enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_link_restart_an *cmd =
+ (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_link_restart_an);
+
+ cmd->command = I40E_AQ_PHY_RESTART_AN;
+ if (enable_link)
+ cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
+ else
+ cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the link status of the adapter.
+ **/
+enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_link_status *resp =
+ (struct i40e_aqc_get_link_status *)&desc.params.raw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ enum i40e_status_code status;
+ bool tx_pause, rx_pause;
+ u16 command_flags;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+
+ if (enable_lse)
+ command_flags = I40E_AQ_LSE_ENABLE;
+ else
+ command_flags = I40E_AQ_LSE_DISABLE;
+ resp->command_flags = CPU_TO_LE16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_link_info_exit;
+
+ /* save off old link status information */
+ i40e_memcpy(&hw->phy.link_info_old, hw_link_info,
+ sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA);
+
+ /* update link status */
+ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw->phy.media_type = i40e_get_media_type(hw);
+ hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
+ hw_link_info->link_info = resp->link_info;
+ hw_link_info->an_info = resp->an_info;
+ hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
+ I40E_AQ_CONFIG_FEC_RS_ENA);
+ hw_link_info->ext_info = resp->ext_info;
+ hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
+ hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
+ hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+ /* update fc info */
+ tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
+ rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
+ if (tx_pause & rx_pause)
+ hw->fc.current_mode = I40E_FC_FULL;
+ else if (tx_pause)
+ hw->fc.current_mode = I40E_FC_TX_PAUSE;
+ else if (rx_pause)
+ hw->fc.current_mode = I40E_FC_RX_PAUSE;
+ else
+ hw->fc.current_mode = I40E_FC_NONE;
+
+ if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+ hw_link_info->crc_enable = true;
+ else
+ hw_link_info->crc_enable = false;
+
+ if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED))
+ hw_link_info->lse_enable = true;
+ else
+ hw_link_info->lse_enable = false;
+
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+ hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= 7) {
+ __le32 tmp;
+
+ i40e_memcpy(&tmp, resp->link_type, sizeof(tmp),
+ I40E_NONDMA_TO_NONDMA);
+ hw->phy.phy_types = LE32_TO_CPU(tmp);
+ hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
+ }
+
+ /* save link status information */
+ if (link)
+ i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
+ I40E_NONDMA_TO_NONDMA);
+
+ /* flag cleared so helper functions don't call AQ again */
+ hw->phy.get_link_info = false;
+
+aq_get_link_info_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_int_mask
+ * @hw: pointer to the hw struct
+ * @mask: interrupt mask to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set link interrupt mask.
+ **/
+enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+ u16 mask,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_int_mask *cmd =
+ (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_int_mask);
+
+ cmd->event_mask = CPU_TO_LE16(mask);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *resp =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_local_advt_reg);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_local_advt_reg_exit;
+
+ *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+ *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_local_advt_reg_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *cmd =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_local_advt_reg);
+
+ cmd->local_an_reg0 = CPU_TO_LE32(I40E_LO_DWORD(advt_reg));
+ cmd->local_an_reg1 = CPU_TO_LE16(I40E_HI_DWORD(advt_reg));
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_partner_advt
+ * @hw: pointer to the hw struct
+ * @advt_reg: AN partner advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the link partner AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *resp =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_partner_advt);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_partner_advt_exit;
+
+ *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+ *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_partner_advt_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_lb_modes
+ * @hw: pointer to the hw struct
+ * @lb_modes: loopback mode to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets loopback modes.
+ **/
+enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
+ u16 lb_modes,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_lb_mode *cmd =
+ (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_lb_modes);
+
+ cmd->lb_mode = CPU_TO_LE16(lb_modes);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_debug
+ * @hw: pointer to the hw struct
+ * @cmd_flags: debug command flags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Reset the external PHY.
+ **/
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_debug *cmd =
+ (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_debug);
+
+ cmd->command_flags = cmd_flags;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware.
+**/
+enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_vsi);
+
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid);
+ cmd->connection_type = vsi_ctx->connection_type;
+ cmd->vf_id = vsi_ctx->vf_num;
+ cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_add_vsi_exit;
+
+ vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+ vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_add_vsi_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_clear_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = CPU_TO_LE16(0);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_unicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set unicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
+ **/
+enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set) {
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+ if (rx_only_promisc &&
+ (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1)))
+ flags |= I40E_AQC_SET_VSI_PROMISC_TX;
+ }
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1))
+ cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_multicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set multicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+* i40e_aq_set_vsi_full_promiscuous
+* @hw: pointer to the hw struct
+* @seid: VSI number
+* @set: set promiscuous enable/disable
+* @cmd_details: pointer to command details structure or NULL
+**/
+enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags = I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_bc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set broadcast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: true to set filter, false to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set_filter)
+ cmd->promiscuous_flags
+ |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ else
+ cmd->promiscuous_flags
+ &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_vsi_params - get VSI configuration info
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ UNREFERENCED_1PARAMETER(cmd_details);
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_vsi_parameters);
+
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), NULL);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_vsi_params_exit;
+
+ vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+ vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_get_vsi_params_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_vsi_params
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update a VSI context.
+ **/
+enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_update_vsi_parameters);
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_config
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of input buffer
+ * @start_seid: seid to start for the report, 0 == beginning
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Fill the buf with switch configuration returned from AdminQ command
+ **/
+enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *scfg =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_config);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ scfg->seid = CPU_TO_LE16(*start_seid);
+
+ status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
+ *start_seid = LE16_TO_CPU(scfg->seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_switch_config
+ * @hw: pointer to the hardware structure
+ * @flags: bit flag values to set
+ * @valid_flags: which bit flags to set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set switch configuration bits
+ **/
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags, u16 valid_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_switch_config *scfg =
+ (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_switch_config);
+ scfg->flags = CPU_TO_LE16(flags);
+ scfg->valid_flags = CPU_TO_LE16(valid_flags);
+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ scfg->switch_tag = CPU_TO_LE16(hw->switch_tag);
+ scfg->first_tag = CPU_TO_LE16(hw->first_tag);
+ scfg->second_tag = CPU_TO_LE16(hw->second_tag);
+ }
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_firmware_version
+ * @hw: pointer to the hw struct
+ * @fw_major_version: firmware major version
+ * @fw_minor_version: firmware minor version
+ * @fw_build: firmware build number
+ * @api_major_version: major queue version
+ * @api_minor_version: minor queue version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the firmware version from the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_version *resp =
+ (struct i40e_aqc_get_version *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS) {
+ if (fw_major_version != NULL)
+ *fw_major_version = LE16_TO_CPU(resp->fw_major);
+ if (fw_minor_version != NULL)
+ *fw_minor_version = LE16_TO_CPU(resp->fw_minor);
+ if (fw_build != NULL)
+ *fw_build = LE32_TO_CPU(resp->fw_build);
+ if (api_major_version != NULL)
+ *api_major_version = LE16_TO_CPU(resp->api_major);
+ if (api_minor_version != NULL)
+ *api_minor_version = LE16_TO_CPU(resp->api_minor);
+
+ /* A workaround to fix the API version in SW */
+ if (api_major_version && api_minor_version &&
+ fw_major_version && fw_minor_version &&
+ ((*api_major_version == 1) && (*api_minor_version == 1)) &&
+ (((*fw_major_version == 4) && (*fw_minor_version >= 2)) ||
+ (*fw_major_version > 4)))
+ *api_minor_version = 2;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_driver_version
+ * @hw: pointer to the hw struct
+ * @dv: driver's major, minor version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Send the driver version to the firmware
+ **/
+enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_driver_version *cmd =
+ (struct i40e_aqc_driver_version *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 len;
+
+ if (dv == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ cmd->driver_major_ver = dv->major_version;
+ cmd->driver_minor_ver = dv->minor_version;
+ cmd->driver_build_ver = dv->build_version;
+ cmd->driver_subbuild_ver = dv->subbuild_version;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ (dv->driver_string[len] < 0x80) &&
+ dv->driver_string[len])
+ len++;
+ status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+ len, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_link_status - get status of the HW network link
+ * @hw: pointer to the hw struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up true if link is up, false if link is down.
+ * The variable link_up is invalid if returned value of status != I40E_SUCCESS
+ *
+ * Side effect: LinkStatusEvent reporting becomes enabled
+ **/
+enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_update_link_info(hw);
+
+ if (status != I40E_SUCCESS)
+ i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+ status);
+ }
+
+ *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+ return status;
+}
+
+/**
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ if (status)
+ return status;
+
+ /* extra checking needed to ensure link info to user is timely */
+ if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
+ !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
+ status = i40e_aq_get_phy_capabilities(hw, false, false,
+ &abilities, NULL);
+ if (status)
+ return status;
+
+ hw->phy.link_info.req_fec_info =
+ abilities.fec_cfg_curr_mod_ext_info &
+ (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+
+ i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
+ }
+ return status;
+}
+
+
+/**
+ * i40e_get_link_speed
+ * @hw: pointer to the hw struct
+ *
+ * Returns the link speed of the adapter.
+ **/
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw)
+{
+ enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+
+ if (status != I40E_SUCCESS)
+ goto i40e_link_speed_exit;
+ }
+
+ speed = hw->phy.link_info.link_speed;
+
+i40e_link_speed_exit:
+ return speed;
+}
+
+/**
+ * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
+ * @hw: pointer to the hw struct
+ * @uplink_seid: the MAC or other gizmo SEID
+ * @downlink_seid: the VSI SEID
+ * @enabled_tc: bitmap of TCs to be enabled
+ * @default_port: true for default port VSI, false for control port
+ * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @enable_stats: true to turn on VEB stats
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This asks the FW to add a VEB between the uplink and downlink
+ * elements. If the uplink SEID is 0, this will be a floating VEB.
+ **/
+enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *veb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_veb *cmd =
+ (struct i40e_aqc_add_veb *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp =
+ (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 veb_flags = 0;
+
+ /* SEIDs need to either both be set or both be 0 for floating VEB */
+ if (!!uplink_seid != !!downlink_seid)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
+
+ cmd->uplink_seid = CPU_TO_LE16(uplink_seid);
+ cmd->downlink_seid = CPU_TO_LE16(downlink_seid);
+ cmd->enable_tcs = enabled_tc;
+ if (!uplink_seid)
+ veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
+ if (default_port)
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
+ else
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+ /* reverse logic here: set the bitflag to disable the stats */
+ if (!enable_stats)
+ veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
+
+ cmd->veb_flags = CPU_TO_LE16(veb_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && veb_seid)
+ *veb_seid = LE16_TO_CPU(resp->veb_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_veb_parameters - Retrieve VEB parameters
+ * @hw: pointer to the hw struct
+ * @veb_seid: the SEID of the VEB to query
+ * @switch_id: the uplink switch id
+ * @floating: set to true if the VEB is floating
+ * @statistic_index: index of the stats counter block for this VEB
+ * @vebs_used: number of VEB's used by function
+ * @vebs_free: total VEB's not reserved by any function
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This retrieves the parameters for a particular VEB, specified by
+ * uplink_seid, and returns them to the caller.
+ **/
+enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+ (struct i40e_aqc_get_veb_parameters_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ if (veb_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_veb_parameters);
+ cmd_resp->seid = CPU_TO_LE16(veb_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status)
+ goto get_veb_exit;
+
+ if (switch_id)
+ *switch_id = LE16_TO_CPU(cmd_resp->switch_id);
+ if (statistic_index)
+ *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index);
+ if (vebs_used)
+ *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used);
+ if (vebs_free)
+ *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free);
+ if (floating) {
+ u16 flags = LE16_TO_CPU(cmd_resp->veb_flags);
+
+ if (flags & I40E_AQC_ADD_VEB_FLOATING)
+ *floating = true;
+ else
+ *floating = false;
+ }
+
+get_veb_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+ int i;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ for (i = 0; i < count; i++)
+ if (I40E_IS_MULTICAST(mv_list[i].mac_addr))
+ mv_list[i].flags |=
+ CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
+ * @hw: pointer to the hw struct
+ * @opcode: AQ opcode for add or delete mirror rule
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @id: Destination VSI SEID or Rule ID
+ * @count: length of the list
+ * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_id: Rule ID returned from FW
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
+ *
+ * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
+ * VEBs/VEPA elements only
+ **/
+static enum i40e_status_code i40e_mirrorrule_op(struct i40e_hw *hw,
+ u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
+ u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_delete_mirror_rule *cmd =
+ (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
+ struct i40e_aqc_add_delete_mirror_rule_completion *resp =
+ (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ buf_size = count * sizeof(*mr_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+ cmd->seid = CPU_TO_LE16(sw_seid);
+ cmd->rule_type = CPU_TO_LE16(rule_type &
+ I40E_AQC_MIRROR_RULE_TYPE_MASK);
+ cmd->num_entries = CPU_TO_LE16(count);
+ /* Dest VSI for add, rule_id for delete */
+ cmd->destination = CPU_TO_LE16(id);
+ if (mr_list) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ }
+
+ status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
+ cmd_details);
+ if (status == I40E_SUCCESS ||
+ hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
+ if (rule_id)
+ *rule_id = LE16_TO_CPU(resp->rule_id);
+ if (rules_used)
+ *rules_used = LE16_TO_CPU(resp->mirror_rules_used);
+ if (rules_free)
+ *rules_free = LE16_TO_CPU(resp->mirror_rules_free);
+ }
+ return status;
+}
+
+/**
+ * i40e_aq_add_mirrorrule - add a mirror rule
+ * @hw: pointer to the hw struct
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @dest_vsi: SEID of VSI to which packets will be mirrored
+ * @count: length of the list
+ * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_id: Rule ID returned from FW
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
+ *
+ * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
+ **/
+enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
+{
+ if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
+ rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
+ if (count == 0 || !mr_list)
+ return I40E_ERR_PARAM;
+ }
+
+ return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
+ rule_type, dest_vsi, count, mr_list,
+ cmd_details, rule_id, rules_used, rules_free);
+}
+
+/**
+ * i40e_aq_delete_mirrorrule - delete a mirror rule
+ * @hw: pointer to the hw struct
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @count: length of the list
+ * @rule_id: Rule ID that is returned in the receive desc as part of
+ * add_mirrorrule.
+ * @mr_list: list of mirrored VLAN IDs to be removed
+ * @cmd_details: pointer to command details structure or NULL
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
+ *
+ * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
+ **/
+enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free)
+{
+ /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
+ /* count and mr_list shall be valid for rule_type INGRESS VLAN
+ * mirroring. For other rule_type, count and rule_type should
+ * not matter.
+ */
+ if (count == 0 || !mr_list)
+ return I40E_ERR_PARAM;
+ }
+
+ return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
+ rule_type, rule_id, count, mr_list,
+ cmd_details, NULL, rules_used, rules_free);
+}
+
+/**
+ * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of vlan filters to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*v_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*v_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
+ **/
+enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pf_vf_message *cmd =
+ (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd->id = CPU_TO_LE32(vfid);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_debug_read_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the register using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd_resp =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reg_val == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
+
+ cmd_resp->address = CPU_TO_LE32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS) {
+ *reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) |
+ (u64)LE32_TO_CPU(cmd_resp->value_low);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_debug_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write to a register using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+
+ cmd->address = CPU_TO_LE32(reg_addr);
+ cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32));
+ cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF));
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_request_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd_resp =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_request_resource");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+
+ cmd_resp->resource_id = CPU_TO_LE16(resource);
+ cmd_resp->access_type = CPU_TO_LE16(access);
+ cmd_resp->resource_number = CPU_TO_LE32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by someone else, the command completes with
+ * busy return value and the timeout field indicates the maximum time
+ * the current owner of the resource has to free it.
+ */
+ if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ *timeout = LE32_TO_CPU(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * i40e_aq_release_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @sdp_number: resource number
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_release_resource");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+
+ cmd->resource_id = CPU_TO_LE16(resource);
+ cmd->resource_number = CPU_TO_LE32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_read_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_read_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_read_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_read_nvm_config - read an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature id
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: pointer to count of elements read by FW
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, u32 field_id, void *data,
+ u16 buf_size, u16 *element_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_config_read *cmd =
+ (struct i40e_aqc_nvm_config_read *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+ cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id));
+ if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK)
+ cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16));
+ else
+ cmd->element_id_msw = 0;
+
+ status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+ if (!status && element_count)
+ *element_count = LE16_TO_CPU(cmd->element_count);
+
+ return status;
+}
+
+/**
+ * i40e_aq_write_nvm_config - write an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: count of elements to be written
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, void *data, u16 buf_size,
+ u16 element_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_config_write *cmd =
+ (struct i40e_aqc_nvm_config_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->element_count = CPU_TO_LE16(element_count);
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+ status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_oem_post_update - triggers an OEM specific flow after update
+ * @hw: pointer to the hw struct
+ * @buff: buffer for result
+ * @buff_size: buffer size
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ UNREFERENCED_2PARAMETER(buff, buff_size);
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH)
+ status = I40E_ERR_NOT_IMPLEMENTED;
+
+ return status;
+}
+
+/**
+ * i40e_aq_erase_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in the module (expressed in 4 KB from module's beginning)
+ * @length: length of the section to be erased (expressed in 4 KB)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_erase_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_erase_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_erase_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_parse_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: pointer to a buffer containing device/function capability records
+ * @cap_count: number of capability records in the list
+ * @list_type_opc: type of capabilities list to parse
+ *
+ * Parse the device/function capabilities list.
+ **/
+STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+ u32 cap_count,
+ enum i40e_admin_queue_opc list_type_opc)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap;
+ u32 valid_functions, num_functions;
+ u32 number, logical_id, phys_id;
+ struct i40e_hw_capabilities *p;
+ u8 major_rev;
+ u32 i = 0;
+ u16 id;
+
+ cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+
+ if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+ else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->func_caps;
+ else
+ return;
+
+ for (i = 0; i < cap_count; i++, cap++) {
+ id = LE16_TO_CPU(cap->id);
+ number = LE32_TO_CPU(cap->number);
+ logical_id = LE32_TO_CPU(cap->logical_id);
+ phys_id = LE32_TO_CPU(cap->phys_id);
+ major_rev = cap->major_rev;
+
+ switch (id) {
+ case I40E_AQ_CAP_ID_SWITCH_MODE:
+ p->switch_mode = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Switch mode = %d\n",
+ p->switch_mode);
+ break;
+ case I40E_AQ_CAP_ID_MNG_MODE:
+ p->management_mode = number;
+ if (major_rev > 1) {
+ p->mng_protocols_over_mctp = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Protocols over MCTP = %d\n",
+ p->mng_protocols_over_mctp);
+ } else {
+ p->mng_protocols_over_mctp = 0;
+ }
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Management Mode = %d\n",
+ p->management_mode);
+ break;
+ case I40E_AQ_CAP_ID_NPAR_ACTIVE:
+ p->npar_enable = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: NPAR enable = %d\n",
+ p->npar_enable);
+ break;
+ case I40E_AQ_CAP_ID_OS2BMC_CAP:
+ p->os2bmc = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: OS2BMC = %d\n", p->os2bmc);
+ break;
+ case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
+ p->valid_functions = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Valid Functions = %d\n",
+ p->valid_functions);
+ break;
+ case I40E_AQ_CAP_ID_SRIOV:
+ if (number == 1)
+ p->sr_iov_1_1 = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: SR-IOV = %d\n",
+ p->sr_iov_1_1);
+ break;
+ case I40E_AQ_CAP_ID_VF:
+ p->num_vfs = number;
+ p->vf_base_id = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: VF count = %d\n",
+ p->num_vfs);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: VF base_id = %d\n",
+ p->vf_base_id);
+ break;
+ case I40E_AQ_CAP_ID_VMDQ:
+ if (number == 1)
+ p->vmdq = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: VMDQ = %d\n", p->vmdq);
+ break;
+ case I40E_AQ_CAP_ID_8021QBG:
+ if (number == 1)
+ p->evb_802_1_qbg = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: 802.1Qbg = %d\n", number);
+ break;
+ case I40E_AQ_CAP_ID_8021QBR:
+ if (number == 1)
+ p->evb_802_1_qbh = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: 802.1Qbh = %d\n", number);
+ break;
+ case I40E_AQ_CAP_ID_VSI:
+ p->num_vsis = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: VSI count = %d\n",
+ p->num_vsis);
+ break;
+ case I40E_AQ_CAP_ID_DCB:
+ if (number == 1) {
+ p->dcb = true;
+ p->enabled_tcmap = logical_id;
+ p->maxtc = phys_id;
+ }
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: DCB = %d\n", p->dcb);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: TC Mapping = %d\n",
+ logical_id);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: TC Max = %d\n", p->maxtc);
+ break;
+ case I40E_AQ_CAP_ID_FCOE:
+ if (number == 1)
+ p->fcoe = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: FCOE = %d\n", p->fcoe);
+ break;
+ case I40E_AQ_CAP_ID_ISCSI:
+ if (number == 1)
+ p->iscsi = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: iSCSI = %d\n", p->iscsi);
+ break;
+ case I40E_AQ_CAP_ID_RSS:
+ p->rss = true;
+ p->rss_table_size = number;
+ p->rss_table_entry_width = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: RSS = %d\n", p->rss);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: RSS table size = %d\n",
+ p->rss_table_size);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: RSS table width = %d\n",
+ p->rss_table_entry_width);
+ break;
+ case I40E_AQ_CAP_ID_RXQ:
+ p->num_rx_qp = number;
+ p->base_queue = phys_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Rx QP = %d\n", number);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: base_queue = %d\n",
+ p->base_queue);
+ break;
+ case I40E_AQ_CAP_ID_TXQ:
+ p->num_tx_qp = number;
+ p->base_queue = phys_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Tx QP = %d\n", number);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: base_queue = %d\n",
+ p->base_queue);
+ break;
+ case I40E_AQ_CAP_ID_MSIX:
+ p->num_msix_vectors = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MSIX vector count = %d\n",
+ p->num_msix_vectors);
+ break;
+ case I40E_AQ_CAP_ID_VF_MSIX:
+ p->num_msix_vectors_vf = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MSIX VF vector count = %d\n",
+ p->num_msix_vectors_vf);
+ break;
+ case I40E_AQ_CAP_ID_FLEX10:
+ if (major_rev == 1) {
+ if (number == 1) {
+ p->flex10_enable = true;
+ p->flex10_capable = true;
+ }
+ } else {
+ /* Capability revision >= 2 */
+ if (number & 1)
+ p->flex10_enable = true;
+ if (number & 2)
+ p->flex10_capable = true;
+ }
+ p->flex10_mode = logical_id;
+ p->flex10_status = phys_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Flex10 mode = %d\n",
+ p->flex10_mode);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Flex10 status = %d\n",
+ p->flex10_status);
+ break;
+ case I40E_AQ_CAP_ID_CEM:
+ if (number == 1)
+ p->mgmt_cem = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: CEM = %d\n", p->mgmt_cem);
+ break;
+ case I40E_AQ_CAP_ID_IWARP:
+ if (number == 1)
+ p->iwarp = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: iWARP = %d\n", p->iwarp);
+ break;
+ case I40E_AQ_CAP_ID_LED:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->led[phys_id] = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: LED - PIN %d\n", phys_id);
+ break;
+ case I40E_AQ_CAP_ID_SDP:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->sdp[phys_id] = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: SDP - PIN %d\n", phys_id);
+ break;
+ case I40E_AQ_CAP_ID_MDIO:
+ if (number == 1) {
+ p->mdio_port_num = phys_id;
+ p->mdio_port_mode = logical_id;
+ }
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MDIO port number = %d\n",
+ p->mdio_port_num);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MDIO port mode = %d\n",
+ p->mdio_port_mode);
+ break;
+ case I40E_AQ_CAP_ID_1588:
+ if (number == 1)
+ p->ieee_1588 = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: IEEE 1588 = %d\n",
+ p->ieee_1588);
+ break;
+ case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
+ p->fd = true;
+ p->fd_filters_guaranteed = number;
+ p->fd_filters_best_effort = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Flow Director = 1\n");
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Guaranteed FD filters = %d\n",
+ p->fd_filters_guaranteed);
+ break;
+ case I40E_AQ_CAP_ID_WSR_PROT:
+ p->wr_csr_prot = (u64)number;
+ p->wr_csr_prot |= (u64)logical_id << 32;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: wr_csr_prot = 0x%llX\n\n",
+ (p->wr_csr_prot & 0xffff));
+ break;
+ case I40E_AQ_CAP_ID_NVM_MGMT:
+ if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+ p->sec_rev_disabled = true;
+ if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+ p->update_disabled = true;
+ break;
+ case I40E_AQ_CAP_ID_WOL_AND_PROXY:
+ hw->num_wol_proxy_filters = (u16)number;
+ hw->wol_proxy_vsi_seid = (u16)logical_id;
+ p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK;
+ if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK)
+ p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK;
+ else
+ p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
+ p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: WOL proxy filters = %d\n",
+ hw->num_wol_proxy_filters);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (p->fcoe)
+ i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
+
+ /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
+ p->fcoe = false;
+
+ /* count the enabled ports (aka the "not disabled" ports) */
+ hw->num_ports = 0;
+ for (i = 0; i < 4; i++) {
+ u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
+ u64 port_cfg = 0;
+
+ /* use AQ read to get the physical register offset instead
+ * of the port relative offset
+ */
+ i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
+ if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
+ hw->num_ports++;
+ }
+
+ valid_functions = p->valid_functions;
+ num_functions = 0;
+ while (valid_functions) {
+ if (valid_functions & 1)
+ num_functions++;
+ valid_functions >>= 1;
+ }
+
+ /* partition id is 1-based, and functions are evenly spread
+ * across the ports as partitions
+ */
+ if (hw->num_ports != 0) {
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+ }
+
+ /* additional HW specific goodies that might
+ * someday be HW version specific
+ */
+ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+}
+
+/**
+ * i40e_aq_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: a virtual buffer to hold the capabilities
+ * @buff_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
+ * @list_type_opc: capabilities type to discover - pass in the command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the device capabilities descriptions from the firmware
+ **/
+enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_list_capabilites *cmd;
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+
+ if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
+ list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
+ status = I40E_ERR_PARAM;
+ goto exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ *data_size = LE16_TO_CPU(desc.datalen);
+
+ if (status)
+ goto exit;
+
+ i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count),
+ list_type_opc);
+
+exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @preservation_flags: Preservation mode flags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_update_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_update_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ }
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_update_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_nvm_progress
+ * @hw: pointer to the hw struct
+ * @progress: pointer to progress returned from AQ
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Gets progress of flash rearrangement process
+ **/
+enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+
+ DEBUGFUNC("i40e_aq_nvm_progress");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_progress);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ *progress = desc.params.raw[0];
+ return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_get_mib *cmd =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+ cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (local_len != NULL)
+ *local_len = LE16_TO_CPU(resp->local_len);
+ if (remote_len != NULL)
+ *remote_len = LE16_TO_CPU(resp->remote_len);
+ }
+
+ return status;
+}
+
+ /**
+ * i40e_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the hw struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB.
+ **/
+enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_set_local_mib *cmd =
+ (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_lldp_set_local_mib);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = mib_type;
+ cmd->length = CPU_TO_LE16(buff_size);
+ cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((u64)buff));
+ cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff));
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_aq_cfg_lldp_mib_change_event
+ * @hw: pointer to the hw struct
+ * @enable_update: Enable or Disable event posting
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes
+ **/
+enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_mib *cmd =
+ (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+
+ if (!enable_update)
+ cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to add
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be added
+ * @mib_len: length of the LLDP MIB returned in response
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add the specified TLV to LLDP Local MIB for the given bridge type,
+ * it is responsibility of the caller to make sure that the TLV is not
+ * already present in the LLDPDU.
+ * In return firmware will write the complete LLDP MIB with the newly
+ * added TLV in the response buffer.
+ **/
+enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+ void *buff, u16 buff_size, u16 tlv_len,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_add_tlv *cmd =
+ (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff || tlv_len == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->len = CPU_TO_LE16(tlv_len);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_update_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to update
+ * @buff_size: size of the buffer holding original and updated TLVs
+ * @old_len: Length of the Original TLV
+ * @new_len: Length of the Updated TLV
+ * @offset: offset of the updated TLV in the buff
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the specified TLV to the LLDP Local MIB for the given bridge type.
+ * Firmware will place the complete LLDP MIB in response buffer with the
+ * updated TLV.
+ **/
+enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_tlv *cmd =
+ (struct i40e_aqc_lldp_update_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff || offset == 0 ||
+ old_len == 0 || new_len == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->old_len = CPU_TO_LE16(old_len);
+ cmd->new_offset = CPU_TO_LE16(offset);
+ cmd->new_len = CPU_TO_LE16(new_len);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: pointer to a user supplied buffer that has the TLV
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be deleted
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Delete the specified TLV from LLDP Local MIB for the given bridge type.
+ * The firmware places the entire LLDP MIB in the response buffer.
+ **/
+enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 tlv_len, u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_add_tlv *cmd =
+ (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+ cmd->len = CPU_TO_LE16(tlv_len);
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_stop_lldp
+ * @hw: pointer to the hw struct
+ * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent
+ **/
+enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop *cmd =
+ (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+
+ if (shutdown_agent)
+ cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_lldp
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports.
+ **/
+enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_start *cmd =
+ (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+
+ cmd->command = I40E_AQ_LLDP_AGENT_START;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_dcb_parameters
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ * @dcb_enable: True if DCB configuration needs to be applied
+ *
+ **/
+enum i40e_status_code
+i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_dcb_parameters *cmd =
+ (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_dcb_parameters);
+
+ if (dcb_enable) {
+ cmd->valid_flags = I40E_DCB_VALID;
+ cmd->command = I40E_AQ_DCB_SET_AGENT;
+ }
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_cee_dcb_config
+ * @hw: pointer to the hw struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @buff_size: size of the buffer passed
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware
+ **/
+enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW
+ * @hw: pointer to the hw struct
+ * @start_agent: True if DCBx Agent needs to be Started
+ * False if DCBx Agent needs to be Stopped
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start/Stop the embedded dcbx Agent
+ **/
+enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+ bool start_agent,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop_start_specific_agent *cmd =
+ (struct i40e_aqc_lldp_stop_start_specific_agent *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_lldp_stop_start_spec_agent);
+
+ if (start_agent)
+ cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add in Host byte order
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Note: Firmware expects the udp_port value to be in Little Endian format,
+ * and this function will call CPU_TO_LE16 to convert from Host byte order to
+ * Little Endian order.
+ **/
+enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_udp_tunnel *cmd =
+ (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp =
+ (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+ cmd->udp_port = CPU_TO_LE16(udp_port);
+ cmd->protocol_type = protocol_index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && filter_index)
+ *filter_index = resp->index;
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_udp_tunnel *cmd =
+ (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+ cmd->index = index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_resource_alloc (0x0204)
+ * @hw: pointer to the hw struct
+ * @num_entries: pointer to u8 to store the number of resource entries returned
+ * @buf: pointer to a user supplied buffer. This buffer must be large enough
+ * to store the resource information for all resource types. Each
+ * resource type is a i40e_aqc_switch_resource_alloc_data structure.
+ * @count: size, in bytes, of the buffer provided
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Query the resources allocated to a function.
+ **/
+enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+ u8 *num_entries,
+ struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+ u16 count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_switch_resource_alloc *cmd_resp =
+ (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 length = count * sizeof(*buf);
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_resource_alloc);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+ if (!status && num_entries)
+ *num_entries = cmd_resp->num_entries;
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_element - Delete switch element
+ * @hw: pointer to the hw struct
+ * @seid: the SEID to delete from the switch
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes a switch element from the switch.
+ **/
+enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_pvirt - Instantiate a Port Virtualizer on a port
+ * @hw: pointer to the hw struct
+ * @flags: component flags
+ * @mac_seid: uplink seid (MAC SEID)
+ * @vsi_seid: connected vsi seid
+ * @ret_seid: seid of create pv component
+ *
+ * This instantiates an i40e port virtualizer with specified flags.
+ * Depending on specified flags the port virtualizer can act as a
+ * 802.1Qbr port virtualizer or a 802.1Qbg S-component.
+ */
+enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+ u16 mac_seid, u16 vsi_seid,
+ u16 *ret_seid)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_update_pv *cmd =
+ (struct i40e_aqc_add_update_pv *)&desc.params.raw;
+ struct i40e_aqc_add_update_pv_completion *resp =
+ (struct i40e_aqc_add_update_pv_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv);
+ cmd->command_flags = CPU_TO_LE16(flags);
+ cmd->uplink_seid = CPU_TO_LE16(mac_seid);
+ cmd->connected_seid = CPU_TO_LE16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ if (!status && ret_seid)
+ *ret_seid = LE16_TO_CPU(resp->pv_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_tag - Add an S/E-tag
+ * @hw: pointer to the hw struct
+ * @direct_to_queue: should s-tag direct flow to a specific queue
+ * @vsi_seid: VSI SEID to use this tag
+ * @tag: value of the tag
+ * @queue_num: queue number, only valid is direct_to_queue is true
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates an S- or E-tag to a VSI in the switch complex. It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+ u16 vsi_seid, u16 tag, u16 queue_num,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_tag *cmd =
+ (struct i40e_aqc_add_tag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_tag_completion *resp =
+ (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->tag = CPU_TO_LE16(tag);
+ if (direct_to_queue) {
+ cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE);
+ cmd->queue_number = CPU_TO_LE16(queue_num);
+ }
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_tag - Remove an S- or E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID this tag is associated with
+ * @tag: value of the S-tag to delete
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an S- or E-tag from a VSI in the switch complex. It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 tag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_tag *cmd =
+ (struct i40e_aqc_remove_tag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_tag_completion *resp =
+ (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->tag = CPU_TO_LE16(tag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_mcast_etag - Add a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer of this SEID to associate E-tag with
+ * @etag: value of E-tag to add
+ * @num_tags_in_buf: number of unicast E-tags in indirect buffer
+ * @buf: address of indirect buffer
+ * @tags_used: return value, number of E-tags in use by this port
+ * @tags_free: return value, number of unallocated M-tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates a multicast E-tag to a port virtualizer. It will return
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ *
+ * The indirect buffer pointed to by buf is a list of 2-byte E-tags,
+ * num_tags_in_buf long.
+ **/
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+ u16 etag, u8 num_tags_in_buf, void *buf,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_mcast_etag *cmd =
+ (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+ (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 length = sizeof(u16) * num_tags_in_buf;
+
+ if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0))
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_multicast_etag);
+
+ cmd->pv_seid = CPU_TO_LE16(pv_seid);
+ cmd->etag = CPU_TO_LE16(etag);
+ cmd->num_unicast_etags = num_tags_in_buf;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_mcast_etag - Remove a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer SEID this M-tag is associated with
+ * @etag: value of the E-tag to remove
+ * @tags_used: return value, number of tags in use by this port
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an E-tag from the port virtualizer. It will return
+ * the number of tags allocated by the port, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+ u16 etag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_mcast_etag *cmd =
+ (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+ (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+
+ if (pv_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_multicast_etag);
+
+ cmd->pv_seid = CPU_TO_LE16(pv_seid);
+ cmd->etag = CPU_TO_LE16(etag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_update_tag - Update an S/E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID using this S-tag
+ * @old_tag: old tag value
+ * @new_tag: new tag value
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This updates the value of the tag currently attached to this VSI
+ * in the switch complex. It will return the number of tags allocated
+ * by the PF, and the number of unallocated tags available.
+ **/
+enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 old_tag, u16 new_tag, u16 *tags_used,
+ u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_update_tag *cmd =
+ (struct i40e_aqc_update_tag *)&desc.params.raw;
+ struct i40e_aqc_update_tag_completion *resp =
+ (struct i40e_aqc_update_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->old_tag = CPU_TO_LE16(old_tag);
+ cmd->new_tag = CPU_TO_LE16(new_tag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs
+ * @hw: pointer to the hw struct
+ * @tcmap: TC map for request/release any ignore PFC condition
+ * @request: request or release ignore PFC condition
+ * @tcmap_ret: return TCs for which PFC is currently ignored
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This sends out request/release to ignore PFC condition for a TC.
+ * It will return the TCs for which PFC is currently ignored.
+ **/
+enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap,
+ bool request, u8 *tcmap_ret,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pfc_ignore *cmd_resp =
+ (struct i40e_aqc_pfc_ignore *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc);
+
+ if (request)
+ cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET;
+
+ cmd_resp->tc_bitmap = tcmap;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tcmap_ret != NULL)
+ *tcmap_ret = cmd_resp->tc_bitmap;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * When LLDP is handled in PF this command is used by the PF
+ * to notify EMP that a DCB setting is modified.
+ * When LLDP is handled in EMP this command is used by the PF
+ * to notify EMP whenever one of the following parameters get
+ * modified:
+ * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA
+ * - PCIRTT in PRTDCB_GENC.PCIRTT
+ * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME.
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * allocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 *stat_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_statistics *cmd_resp =
+ (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if ((seid == 0) || (stat_index == NULL))
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics);
+
+ cmd_resp->seid = CPU_TO_LE16(seid);
+ cmd_resp->vlan = CPU_TO_LE16(vlan_id);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stat_index)
+ *stat_index = LE16_TO_CPU(cmd_resp->stat_index);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * deallocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 stat_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_statistics *cmd =
+ (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_statistics);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan = CPU_TO_LE16(vlan_id);
+ cmd->stat_index = CPU_TO_LE16(stat_index);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_port_parameters - set physical port parameters.
+ * @hw: pointer to the hw struct
+ * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
+ * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
+ * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
+ * @double_vlan: if set double VLAN is enabled
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
+ u16 bad_frame_vsi, bool save_bad_pac,
+ bool pad_short_pac, bool double_vlan,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_set_port_parameters *cmd;
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u16 command_flags = 0;
+
+ cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_port_parameters);
+
+ cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
+ if (save_bad_pac)
+ command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS;
+ if (pad_short_pac)
+ command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS;
+ if (double_vlan)
+ command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+ cmd->command_flags = CPU_TO_LE16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
+ * @hw: pointer to the hw struct
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Generic command handler for Tx scheduler AQ commands
+ **/
+static enum i40e_status_code i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+ void *buff, u16 buff_size,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_tx_sched_ind *cmd =
+ (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ enum i40e_status_code status;
+ bool cmd_param_flag = false;
+
+ switch (opcode) {
+ case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+ case i40e_aqc_opc_configure_vsi_tc_bw:
+ case i40e_aqc_opc_enable_switching_comp_ets:
+ case i40e_aqc_opc_modify_switching_comp_ets:
+ case i40e_aqc_opc_disable_switching_comp_ets:
+ case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+ case i40e_aqc_opc_configure_switching_comp_bw_config:
+ cmd_param_flag = true;
+ break;
+ case i40e_aqc_opc_query_vsi_bw_config:
+ case i40e_aqc_opc_query_vsi_ets_sla_config:
+ case i40e_aqc_opc_query_switching_comp_ets_config:
+ case i40e_aqc_opc_query_port_ets_config:
+ case i40e_aqc_opc_query_switching_comp_bw_config:
+ cmd_param_flag = false;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (cmd_param_flag)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->vsi_seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
+
+ cmd->vsi_seid = CPU_TO_LE16(seid);
+ cmd->credit = CPU_TO_LE16(credit);
+ cmd->max_credit = max_credit;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: switching component seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_bw: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_switching_comp_bw_limit *cmd =
+ (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_switching_comp_bw_limit);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->credit = CPU_TO_LE16(credit);
+ cmd->max_bw = max_bw;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_tc_bw,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component connected to Physical Port
+ * @ets_data: Buffer holding ETS parameters
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
+ sizeof(*ets_data), opcode, cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration per TC
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_ets_sla_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's per TC BW config
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI or switching component connected to Physical Port
+ * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_port_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_validate_filter_settings
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Check and validate the filter control settings passed.
+ * The function checks for the valid filter/context sizes being
+ * passed for FCoE and PE.
+ *
+ * Returns I40E_SUCCESS if the values passed are valid and within
+ * range else returns an error.
+ **/
+STATIC enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ u32 fcoe_cntx_size, fcoe_filt_size;
+ u32 pe_cntx_size, pe_filt_size;
+ u32 fcoe_fmax;
+
+ u32 val;
+
+ /* Validate FCoE settings passed */
+ switch (settings->fcoe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->fcoe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* Validate PE settings passed */
+ switch (settings->pe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ case I40E_HASH_FILTER_SIZE_64K:
+ case I40E_HASH_FILTER_SIZE_128K:
+ case I40E_HASH_FILTER_SIZE_256K:
+ case I40E_HASH_FILTER_SIZE_512K:
+ case I40E_HASH_FILTER_SIZE_1M:
+ pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ pe_filt_size <<= (u32)settings->pe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->pe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ case I40E_DMA_CNTX_SIZE_8K:
+ case I40E_DMA_CNTX_SIZE_16K:
+ case I40E_DMA_CNTX_SIZE_32K:
+ case I40E_DMA_CNTX_SIZE_64K:
+ case I40E_DMA_CNTX_SIZE_128K:
+ case I40E_DMA_CNTX_SIZE_256K:
+ pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ pe_cntx_size <<= (u32)settings->pe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
+ val = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
+ >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+ if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
+ return I40E_ERR_INVALID_SIZE;
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_filter_control
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Set the Queue Filters for PE/FCoE and enable filters required
+ * for a single PF. It is expected that these settings are programmed
+ * at the driver initialization time.
+ **/
+enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ u32 hash_lut_size = 0;
+ u32 val;
+
+ if (!settings)
+ return I40E_ERR_PARAM;
+
+ /* Validate the input settings */
+ ret = i40e_validate_filter_settings(hw, settings);
+ if (ret)
+ return ret;
+
+ /* Read the PF Queue Filter control register */
+ val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
+
+ /* Program required PE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ /* Program required PE contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
+ val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEDSIZE_MASK;
+
+ /* Program required FCoE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ val |= ((u32)settings->fcoe_filt_num <<
+ I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ /* Program required FCoE DDP contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+ val |= ((u32)settings->fcoe_cntx_num <<
+ I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+
+ /* Program Hash LUT size for the PF */
+ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+ if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
+ hash_lut_size = 1;
+ val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+
+ /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
+ if (settings->enable_fdir)
+ val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+ if (settings->enable_ethtype)
+ val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
+ if (settings->enable_macvlan)
+ val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
+
+ i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd =
+ (struct i40e_aqc_add_remove_control_packet_filter *)
+ &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+ (struct i40e_aqc_add_remove_control_packet_filter_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ if (is_add) {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_control_packet_filter);
+ cmd->queue = CPU_TO_LE16(queue);
+ } else {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_control_packet_filter);
+ }
+
+ if (mac_addr)
+ i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN,
+ I40E_NONDMA_TO_NONDMA);
+
+ cmd->etype = CPU_TO_LE16(ethtype);
+ cmd->flags = CPU_TO_LE16(flags);
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stats) {
+ stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used);
+ stats->etype_used = LE16_TO_CPU(resp->etype_used);
+ stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free);
+ stats->etype_free = LE16_TO_CPU(resp->etype_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid to add ethertype filter from
+ **/
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 seid)
+{
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
+ u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+ u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+ enum i40e_status_code status;
+
+ status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
+ seid, 0, true, NULL,
+ NULL);
+ if (status)
+ DEBUGOUT("Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
+}
+
+/**
+ * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue
+ * @filters: list of cloud filters
+ * @filter_count: length of list
+ *
+ * There's an issue in the device where the Geneve VNI layout needs
+ * to be shifted 1 byte over from the VxLAN VNI
+ **/
+STATIC void i40e_fix_up_geneve_vni(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aqc_add_remove_cloud_filters_element_data *f = filters;
+ int i;
+
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(f[i].flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(f[i].tenant_id);
+ f[i].tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+}
+
+/**
+ * i40e_aq_add_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+
+ i40e_fix_up_geneve_vni(filters, filter_count);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_cloud_filters_big_buffer
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_rm_cloud_filt_elem_ext are filled in by the caller of
+ * the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER;
+
+ /* adjust Geneve VNI for HW issue */
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+
+ i40e_fix_up_geneve_vni(filters, filter_count);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_cloud_filters_big_buffer
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_rm_cloud_filt_elem_ext are filled in by the caller of
+ * the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_remove_cloud_filters_big_buffer(
+ struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER;
+
+ /* adjust Geneve VNI for HW issue */
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_replace_cloud_filters - Replace cloud filter command
+ * @hw: pointer to the hw struct
+ * @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct
+ * @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct
+ *
+ **/
+enum
+i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+ struct i40e_aqc_replace_cloud_filters_cmd *filters,
+ struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_replace_cloud_filters_cmd *cmd =
+ (struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw;
+ enum i40e_status_code status = I40E_SUCCESS;
+ int i = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_replace_cloud_filters);
+
+ desc.datalen = CPU_TO_LE16(32);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->old_filter_type = filters->old_filter_type;
+ cmd->new_filter_type = filters->new_filter_type;
+ cmd->valid_flags = filters->valid_flags;
+ cmd->tr_bit = filters->tr_bit;
+
+ status = i40e_asq_send_command(hw, &desc, cmd_buf,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf), NULL);
+
+ /* for get cloud filters command */
+ for (i = 0; i < 32; i += 4) {
+ cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i];
+ cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1];
+ cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2];
+ cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3];
+ }
+
+ return status;
+}
+
+
+/**
+ * i40e_aq_alternate_write
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
+ u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write);
+ cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+ cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+ cmd_resp->data0 = CPU_TO_LE32(reg_val0);
+ cmd_resp->data1 = CPU_TO_LE32(reg_val1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of a first register to be modified
+ * @dw_count: number of alternate structure fields to write
+ * @buffer: pointer to the command buffer
+ *
+ * Write 'dw_count' dwords from 'buffer' to alternate structure
+ * starting at 'addr'.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_ind_write *cmd_resp =
+ (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buffer == NULL)
+ return I40E_ERR_PARAM;
+
+ /* Indirect command */
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_write_indirect);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (dw_count > (I40E_AQ_LARGE_BUF/4))
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd_resp->address = CPU_TO_LE32(addr);
+ cmd_resp->length = CPU_TO_LE32(dw_count);
+
+ status = i40e_asq_send_command(hw, &desc, buffer,
+ I40E_LO_DWORD(4*dw_count), NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reg_val0 == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+ cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+ cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ if (status == I40E_SUCCESS) {
+ *reg_val0 = LE32_TO_CPU(cmd_resp->data0);
+
+ if (reg_val1 != NULL)
+ *reg_val1 = LE32_TO_CPU(cmd_resp->data1);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_read_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of the alternate structure field
+ * @dw_count: number of alternate structure fields to read
+ * @buffer: pointer to the command buffer
+ *
+ * Read 'dw_count' dwords from alternate structure starting at 'addr' and
+ * place them in 'buffer'. The buffer should be allocated by caller.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_ind_write *cmd_resp =
+ (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buffer == NULL)
+ return I40E_ERR_PARAM;
+
+ /* Indirect command */
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_read_indirect);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (dw_count > (I40E_AQ_LARGE_BUF/4))
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd_resp->address = CPU_TO_LE32(addr);
+ cmd_resp->length = CPU_TO_LE32(dw_count);
+
+ status = i40e_asq_send_command(hw, &desc, buffer,
+ I40E_LO_DWORD(4*dw_count), NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_clear
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_clear_port);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write_done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
+ u8 bios_mode, bool *reset_needed)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write_done *cmd =
+ (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reset_needed == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_write_done);
+
+ cmd->cmd_flags = CPU_TO_LE16(bios_mode);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ if (!status && reset_needed)
+ *reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) &
+ I40E_AQ_ALTERNATE_RESET_NEEDED) != 0);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_oem_mode
+ * @hw: pointer to the HW structure.
+ * @oem_mode: the OEM mode to be used
+ *
+ * Sets the device to a specific operating mode. Currently the only supported
+ * mode is no_clp, which causes FW to refrain from using Alternate RAM.
+ *
+ **/
+enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
+ u8 oem_mode)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write_done *cmd =
+ (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_set_mode);
+
+ cmd->cmd_flags = CPU_TO_LE16(oem_mode);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_resume_port_tx
+ * @hw: pointer to the hardware structure
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Resume port's Tx traffic
+ **/
+enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+ hw->bus.type = i40e_bus_type_pci_express;
+
+ switch (link_status & I40E_PCI_LINK_WIDTH) {
+ case I40E_PCI_LINK_WIDTH_1:
+ hw->bus.width = i40e_bus_width_pcie_x1;
+ break;
+ case I40E_PCI_LINK_WIDTH_2:
+ hw->bus.width = i40e_bus_width_pcie_x2;
+ break;
+ case I40E_PCI_LINK_WIDTH_4:
+ hw->bus.width = i40e_bus_width_pcie_x4;
+ break;
+ case I40E_PCI_LINK_WIDTH_8:
+ hw->bus.width = i40e_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = i40e_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & I40E_PCI_LINK_SPEED) {
+ case I40E_PCI_LINK_SPEED_2500:
+ hw->bus.speed = i40e_bus_speed_2500;
+ break;
+ case I40E_PCI_LINK_SPEED_5000:
+ hw->bus.speed = i40e_bus_speed_5000;
+ break;
+ case I40E_PCI_LINK_SPEED_8000:
+ hw->bus.speed = i40e_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = i40e_bus_speed_unknown;
+ break;
+ }
+}
+
+/**
+ * i40e_aq_debug_dump
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table id within cluster
+ * @start_index: index of line in the block to read
+ * @buff_size: dump buffer size
+ * @buff: dump buffer
+ * @ret_buff_size: actual buffer size returned
+ * @ret_next_table: next block to read
+ * @ret_next_index: next index to read
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Dump internal FW/HW data for debug purposes.
+ *
+ **/
+enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_dump_internals *cmd =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ struct i40e_aqc_debug_dump_internals *resp =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_debug_dump_internals);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->cluster_id = cluster_id;
+ cmd->table_id = table_id;
+ cmd->idx = CPU_TO_LE32(start_index);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (ret_buff_size != NULL)
+ *ret_buff_size = LE16_TO_CPU(desc.datalen);
+ if (ret_next_table != NULL)
+ *ret_next_table = resp->table_id;
+ if (ret_next_index != NULL)
+ *ret_next_index = LE32_TO_CPU(resp->idx);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_bw_from_alt_ram
+ * @hw: pointer to the hardware structure
+ * @max_bw: pointer for max_bw read
+ * @min_bw: pointer for min_bw read
+ * @min_valid: pointer for bool that is true if min_bw is a valid value
+ * @max_valid: pointer for bool that is true if max_bw is a valid value
+ *
+ * Read bw from the alternate ram for the given pf
+ **/
+enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw,
+ bool *min_valid, bool *max_valid)
+{
+ enum i40e_status_code status;
+ u32 max_bw_addr, min_bw_addr;
+
+ /* Calculate the address of the min/max bw registers */
+ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MAX_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+ min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MIN_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+
+ /* Read the bandwidths from alt ram */
+ status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
+ min_bw_addr, min_bw);
+
+ if (*min_bw & I40E_ALT_BW_VALID_MASK)
+ *min_valid = true;
+ else
+ *min_valid = false;
+
+ if (*max_bw & I40E_ALT_BW_VALID_MASK)
+ *max_valid = true;
+ else
+ *max_valid = false;
+
+ return status;
+}
+
+/**
+ * i40e_aq_configure_partition_bw
+ * @hw: pointer to the hardware structure
+ * @bw_data: Buffer holding valid pfs and bw limits
+ * @cmd_details: pointer to command details
+ *
+ * Configure partitions guaranteed/max bw
+ **/
+enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u16 bwd_size = sizeof(*bw_data);
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_partition_bw);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ desc.datalen = CPU_TO_LE16(bwd_size);
+
+ status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_addr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ } else {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_write_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_addr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes specified PHY register value
+ **/
+enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register_clause45
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_addr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u32 command = 0;
+ u16 retry = 1000;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+
+ command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ goto phy_read_end;
+ }
+
+ command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ status = I40E_ERR_TIMEOUT;
+ retry = 1000;
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ if (!status) {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't read register value from external PHY.\n");
+ }
+
+phy_read_end:
+ return status;
+}
+
+/**
+ * i40e_write_phy_register_clause45
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_addr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u32 command = 0;
+ u16 retry = 1000;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+
+ command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ goto phy_write_end;
+ }
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ status = I40E_ERR_TIMEOUT;
+ retry = 1000;
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+phy_write_end:
+ return status;
+}
+
+/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_addr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_write_phy_register_clause22(hw,
+ reg, phy_addr, value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_write_phy_register_clause45(hw,
+ page, reg, phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_addr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_read_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_get_phy_address
+ * @hw: pointer to the HW structure
+ * @dev_num: PHY port num that address we want
+ *
+ * Gets PHY address for current port
+ **/
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
+{
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
+
+ return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
+}
+
+/**
+ * i40e_blink_phy_led
+ * @hw: pointer to the HW structure
+ * @time: time how long led will blinks in secs
+ * @interval: gap between LED on and off in msecs
+ *
+ * Blinks PHY link LED
+ **/
+enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u32 i;
+ u16 led_ctl = 0;
+ u16 gpio_led_port;
+ u16 led_reg;
+ u16 led_addr = I40E_PHY_LED_PROV_REG_1;
+ u8 phy_addr = 0;
+ u8 port_num;
+
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ led_addr++) {
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ &led_reg);
+ if (status)
+ goto phy_blinking_end;
+ led_ctl = led_reg;
+ if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
+ led_reg = 0;
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
+ if (status)
+ goto phy_blinking_end;
+ break;
+ }
+ }
+
+ if (time > 0 && interval > 0) {
+ for (i = 0; i < time * 1000; i += interval) {
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
+ if (status)
+ goto restore_config;
+ if (led_reg & I40E_PHY_LED_MANUAL_ON)
+ led_reg = 0;
+ else
+ led_reg = I40E_PHY_LED_MANUAL_ON;
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
+ if (status)
+ goto restore_config;
+ i40e_msec_delay(interval);
+ }
+ }
+
+restore_config:
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
+
+phy_blinking_end:
+ return status;
+}
+
+/**
+ * i40e_led_get_reg - read LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: read register value
+ **/
+static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 *reg_val)
+{
+ enum i40e_status_code status;
+ u8 phy_addr = 0;
+
+ *reg_val = 0;
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ reg_val, NULL);
+ } else {
+ phy_addr = i40e_get_phy_address(hw, hw->port);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16 *)reg_val);
+ }
+ return status;
+}
+
+/**
+ * i40e_led_set_reg - write LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: register value to write
+ **/
+static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 reg_val)
+{
+ enum i40e_status_code status;
+ u8 phy_addr = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ reg_val, NULL);
+ } else {
+ phy_addr = i40e_get_phy_address(hw, hw->port);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)reg_val);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_led_get_phy - return current on/off mode
+ * @hw: pointer to the hw struct
+ * @led_addr: address of led register to use
+ * @val: original value of register to use
+ *
+ **/
+enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u16 gpio_led_port;
+ u32 reg_val_aq;
+ u16 temp_addr;
+ u8 phy_addr = 0;
+ u16 reg_val;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &reg_val_aq, NULL);
+ if (status == I40E_SUCCESS)
+ *val = (u16)reg_val_aq;
+ return status;
+ }
+ temp_addr = I40E_PHY_LED_PROV_REG_1;
+ phy_addr = i40e_get_phy_address(hw, hw->port);
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ temp_addr++) {
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ temp_addr, phy_addr,
+ &reg_val);
+ if (status)
+ return status;
+ *val = reg_val;
+ if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
+ *led_addr = temp_addr;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_led_set_phy
+ * @hw: pointer to the HW structure
+ * @on: true or false
+ * @led_addr: address of led register to use
+ * @mode: original val plus bit for set or ignore
+ *
+ * Set led's on or off when controlled by the PHY
+ *
+ **/
+enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u32 led_ctl = 0;
+ u32 led_reg = 0;
+
+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
+ if (status)
+ return status;
+ led_ctl = led_reg;
+ if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
+ led_reg = 0;
+ status = i40e_led_set_reg(hw, led_addr, led_reg);
+ if (status)
+ return status;
+ }
+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
+ if (status)
+ goto restore_config;
+ if (on)
+ led_reg = I40E_PHY_LED_MANUAL_ON;
+ else
+ led_reg = 0;
+ status = i40e_led_set_reg(hw, led_addr, led_reg);
+ if (status)
+ goto restore_config;
+ if (mode & I40E_PHY_LED_MODE_ORIG) {
+ led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
+ }
+ return status;
+
+restore_config:
+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
+ return status;
+}
+#endif /* PF_DRIVER */
+
+/**
+ * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: ptr to register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to read the Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
+ (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reg_val == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
+
+ cmd_resp->address = CPU_TO_LE32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS)
+ *reg_val = LE32_TO_CPU(cmd_resp->value);
+
+ return status;
+}
+
+/**
+ * i40e_read_rx_ctl - read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ **/
+u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ bool use_register;
+ int retry = 5;
+ u32 val = 0;
+
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
+ if (!use_register) {
+do_retry:
+ status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ i40e_msec_delay(1);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ val = rd32(hw, reg_addr);
+
+ return val;
+}
+
+/**
+ * i40e_aq_rx_ctl_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to write to an Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd =
+ (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
+
+ cmd->address = CPU_TO_LE32(reg_addr);
+ cmd->value = CPU_TO_LE32(reg_val);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_write_rx_ctl - write to an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ **/
+void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ bool use_register;
+ int retry = 5;
+
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
+ if (!use_register) {
+do_retry:
+ status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
+ reg_val, NULL);
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ i40e_msec_delay(1);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ wr32(hw, reg_addr, reg_val);
+}
+
+/**
+ * i40e_aq_set_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: new register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write the external PHY register.
+ **/
+enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = CPU_TO_LE32(reg_addr);
+ cmd->reg_value = CPU_TO_LE32(reg_val);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: read register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the external PHY register.
+ **/
+enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = CPU_TO_LE32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (!status)
+ *reg_val = LE32_TO_CPU(cmd->reg_value);
+
+ return status;
+}
+
+#ifdef VF_DRIVER
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40e_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum i40e_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_asq_cmd_details details;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF
+ | I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ if (!cmd_details) {
+ i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM);
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct virtchnl_vf_resource *msg)
+{
+ struct virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.iwarp = (msg->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
+ i40e_memcpy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr,
+ ETH_ALEN,
+ I40E_NONDMA_TO_NONDMA);
+ i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ ETH_ALEN,
+ I40E_NONDMA_TO_NONDMA);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
+{
+ return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
+ I40E_SUCCESS, NULL, 0, NULL);
+}
+#endif /* VF_DRIVER */
+
+/**
+ * i40e_aq_set_arp_proxy_config
+ * @hw: pointer to the HW structure
+ * @proxy_config: pointer to proxy config command table struct
+ * @cmd_details: pointer to command details
+ *
+ * Set ARP offload parameters from pre-populated
+ * i40e_aqc_arp_proxy_data struct
+ **/
+enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+ struct i40e_aqc_arp_proxy_data *proxy_config,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ if (!proxy_config)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+ desc.params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
+ desc.params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data));
+
+ status = i40e_asq_send_command(hw, &desc, proxy_config,
+ sizeof(struct i40e_aqc_arp_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_opc_set_ns_proxy_table_entry
+ * @hw: pointer to the HW structure
+ * @ns_proxy_table_entry: pointer to NS table entry command struct
+ * @cmd_details: pointer to command details
+ *
+ * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters
+ * from pre-populated i40e_aqc_ns_proxy_data struct
+ **/
+enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+ struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ if (!ns_proxy_table_entry)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_ns_proxy_table_entry);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+ desc.params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
+ desc.params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data));
+
+ status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
+ sizeof(struct i40e_aqc_ns_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_clear_wol_filter
+ * @hw: pointer to the hw struct
+ * @filter_index: index of filter to modify (0-7)
+ * @filter: buffer containing filter to be set
+ * @set_filter: true to set filter, false to clear filter
+ * @no_wol_tco: if true, pass through packets cannot cause wake-up
+ * if false, pass through packets may cause wake-up
+ * @filter_valid: true if filter action is valid
+ * @no_wol_tco_valid: true if no WoL in TCO traffic action valid
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear WoL filter for port attached to the PF
+ **/
+enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+ u8 filter_index,
+ struct i40e_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_wol_filter *cmd =
+ (struct i40e_aqc_set_wol_filter *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 cmd_flags = 0;
+ u16 valid_flags = 0;
+ u16 buff_len = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter);
+
+ if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS)
+ return I40E_ERR_PARAM;
+ cmd->filter_index = CPU_TO_LE16(filter_index);
+
+ if (set_filter) {
+ if (!filter)
+ return I40E_ERR_PARAM;
+
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER;
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
+ }
+
+ if (no_wol_tco)
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+
+ if (filter_valid)
+ valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID;
+ if (no_wol_tco_valid)
+ valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
+ cmd->valid_flags = CPU_TO_LE16(valid_flags);
+
+ buff_len = sizeof(*filter);
+ desc.datalen = CPU_TO_LE16(buff_len);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
+ cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
+
+ status = i40e_asq_send_command(hw, &desc, filter,
+ buff_len, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_wake_event_reason
+ * @hw: pointer to the hw struct
+ * @wake_reason: return value, index of matching filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get information for the reason of a Wake Up event
+ **/
+enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+ u16 *wake_reason,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_wake_reason_completion *resp =
+ (struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS)
+ *wake_reason = LE16_TO_CPU(resp->wake_reason);
+
+ return status;
+}
+
+/**
+* i40e_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_all_wol_filters);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @track_id: package tracking id
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_write_personalization_profile *cmd =
+ (struct i40e_aqc_write_personalization_profile *)
+ &desc.params.raw;
+ struct i40e_aqc_write_ddp_resp *resp;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_write_personalization_profile);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->profile_track_id = CPU_TO_LE32(track_id);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @flags: AdminQ command flags
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_applied_profiles *cmd =
+ (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_personalization_profile_list);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->flags = flags;
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_find_segment_in_package
+ * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_hdr)
+{
+ struct i40e_generic_seg_header *segment;
+ u32 i;
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < pkg_hdr->segment_count; i++) {
+ segment =
+ (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
+ pkg_hdr->segment_offset[i]);
+
+ if (segment->type == segment_type)
+ return segment;
+ }
+
+ return NULL;
+}
+
+/* Get section table in profile */
+#define I40E_SECTION_TABLE(profile, sec_tbl) \
+ do { \
+ struct i40e_profile_segment *p = (profile); \
+ u32 count; \
+ u32 *nvm; \
+ count = p->device_table_count; \
+ nvm = (u32 *)&p->device_table[count]; \
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
+ } while (0)
+
+/* Get section header in profile */
+#define I40E_SECTION_HEADER(profile, offset) \
+ (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
+
+/**
+ * i40e_find_section_in_profile
+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
+ * @profile: pointer to the i40e segment header to be searched
+ *
+ * This function searches i40e segment for a particular section type. On
+ * success it returns a pointer to the section header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_section_table *sec_tbl;
+ u32 sec_off;
+ u32 i;
+
+ if (profile->header.type != SEGMENT_TYPE_I40E)
+ return NULL;
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (sec->section.type == section_type)
+ return sec;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
+ * @hw: pointer to the hw struct
+ * @aq: command buffer containing all data to execute AQ
+ **/
+STATIC enum
+i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+ struct i40e_profile_aq_section *aq)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u8 *msg = NULL;
+ u16 msglen;
+
+ i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
+ desc.flags |= CPU_TO_LE16(aq->flags);
+ i40e_memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw),
+ I40E_NONDMA_TO_NONDMA);
+
+ msglen = aq->datalen;
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ msg = &aq->data[0];
+ }
+
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
+
+ if (status != I40E_SUCCESS) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "unable to exec DDP AQ opcode %u, error %d\n",
+ aq->opcode, status);
+ return status;
+ }
+
+ /* copy returned desc to aq_buf */
+ i40e_memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw),
+ I40E_NONDMA_TO_NONDMA);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_validate_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be validated
+ * @track_id: package tracking id
+ * @rollback: flag if the profile is for rollback.
+ *
+ * Validates supported devices and profile's sections.
+ */
+STATIC enum i40e_status_code
+i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id, bool rollback)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_section_table *sec_tbl;
+ u32 vendor_dev_id;
+ u32 dev_cnt;
+ u32 sec_off;
+ u32 i;
+
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == I40E_INTEL_VENDOR_ID &&
+ hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (dev_cnt && (i == dev_cnt)) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Device doesn't support DDP\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* Validate sections types */
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (rollback) {
+ if (sec->section.type == SECTION_TYPE_MMIO ||
+ sec->section.type == SECTION_TYPE_AQ ||
+ sec->section.type == SECTION_TYPE_RB_AQ) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not a roll-back package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ } else {
+ if (sec->section.type == SECTION_TYPE_RB_AQ ||
+ sec->section.type == SECTION_TYPE_RB_MMIO) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not an original package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ }
+ }
+
+ return status;
+}
+
+/**
+ * i40e_write_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be downloaded
+ * @track_id: package tracking id
+ *
+ * Handles the download of a complete package.
+ */
+enum i40e_status_code
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_section_table *sec_tbl;
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_aq_section *ddp_aq;
+ u32 section_size = 0;
+ u32 offset = 0, info = 0;
+ u32 sec_off;
+ u32 i;
+
+ status = i40e_validate_profile(hw, profile, track_id, false);
+ if (status)
+ return status;
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ /* Process generic admin command */
+ if (sec->section.type == SECTION_TYPE_AQ) {
+ ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
+ status = i40e_ddp_exec_aq_section(hw, ddp_aq);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to execute aq: section %d, opcode %u\n",
+ i, ddp_aq->opcode);
+ break;
+ }
+ sec->section.type = SECTION_TYPE_RB_AQ;
+ }
+
+ /* Skip any non-mmio sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write MMIO section */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_section_table *sec_tbl;
+ u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ u32 sec_off;
+ int i;
+
+ status = i40e_validate_profile(hw, profile, track_id, true);
+ if (status)
+ return status;
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* For rollback write sections in reverse */
+ for (i = sec_tbl->section_count - 1; i >= 0; i--) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+
+ /* Skip any non-rollback sections */
+ if (sec->section.type != SECTION_TYPE_RB_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write roll-back MMIO section */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_add_pinfo_to_list
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+enum i40e_status_code
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ i40e_memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE,
+ I40E_NONDMA_TO_NONDMA);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.c
new file mode 100644
index 00000000..7600c922
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.c
@@ -0,0 +1,1381 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+ u32 reg;
+
+ if (!status)
+ return I40E_ERR_PARAM;
+
+ reg = rd32(hw, I40E_PRTDCB_GENS);
+ *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+ I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+ I40E_IEEE_ETS_WILLING_SHIFT);
+ etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+ I40E_IEEE_ETS_CBS_SHIFT);
+ etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+ I40E_IEEE_ETS_MAXTC_SHIFT);
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* Move offset to priority table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+ I40E_IEEE_PFC_WILLING_SHIFT);
+ dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+ I40E_IEEE_PFC_MBC_SHIFT);
+ dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+ I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength;
+ u16 offset = 0;
+ u16 length;
+ int i = 0;
+ u8 *buf;
+
+ typelength = I40E_NTOHS(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ buf = tlv->tlvinfo;
+
+ /* The App priority table starts 5 octets after TLV header */
+ length -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < length) {
+ dcbcfg->app[i].priority = (u8)((buf[offset] &
+ I40E_IEEE_APP_PRIO_MASK) >>
+ I40E_IEEE_APP_PRIO_SHIFT);
+ dcbcfg->app[i].selector = (u8)((buf[offset] &
+ I40E_IEEE_APP_SEL_MASK) >>
+ I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ switch (subtype) {
+ case I40E_IEEE_SUBTYPE_ETS_CFG:
+ i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_ETS_REC:
+ i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_PFC_CFG:
+ i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_APP_PRI:
+ i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ **/
+static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * i40e_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ **/
+static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcenable = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * i40e_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ **/
+static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, typelength, offset = 0;
+ struct i40e_cee_app_prio *app;
+ u8 i;
+
+ typelength = I40E_NTOHS(tlv->hdr.typelen);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+
+ dcbcfg->numapps = length / sizeof(*app);
+ if (!dcbcfg->numapps)
+ return;
+ if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
+ dcbcfg->numapps = I40E_DCBX_MAX_APPS;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ u8 up, selector;
+
+ app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
+ if (app->prio_map & BIT(up))
+ break;
+ }
+ dcbcfg->app[i].priority = up;
+
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
+ switch (selector) {
+ case I40E_CEE_APP_SEL_ETHTYPE:
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ break;
+ case I40E_CEE_APP_SEL_TCPIP:
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ break;
+ default:
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+ }
+
+ dcbcfg->app[i].protocolid = I40E_NTOHS(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * i40e_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 len, tlvlen, sublen, typelength;
+ struct i40e_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u32 ouisubtype;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ /* Return if not CEE DCBX */
+ if (subtype != I40E_CEE_DCBX_TYPE)
+ return;
+
+ typelength = I40E_NTOHS(tlv->typelength);
+ tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
+ sizeof(struct i40e_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
+ typelength = I40E_NTOHS(sub_tlv->hdr.typelen);
+ sublen = (u16)((typelength &
+ I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ switch (subtype) {
+ case I40E_CEE_SUBTYPE_PG_CFG:
+ i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_PFC_CFG:
+ i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_APP_PRI:
+ i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
+ sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+ I40E_LLDP_TLV_OUI_SHIFT);
+ switch (oui) {
+ case I40E_IEEE_8021QAZ_OUI:
+ i40e_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ case I40E_CEE_DCBX_OUI:
+ i40e_parse_cee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 type;
+ u16 length;
+ u16 typelength;
+ u16 offset = 0;
+
+ if (!lldpmib || !dcbcfg)
+ return I40E_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += I40E_LLDP_MIB_HLEN;
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelength = I40E_NTOHS(tlv->typelength);
+ type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ offset += sizeof(typelength) + length;
+
+ /* END TLV or beyond LLDPDU size */
+ if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
+ break;
+
+ switch (type) {
+ case I40E_TLV_TYPE_ORG:
+ i40e_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) +
+ length);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+ (void *)lldpmib, I40E_LLDPDU_SIZE,
+ NULL, NULL, NULL);
+ if (ret)
+ goto free_mem;
+
+ /* Parse LLDP MIB to get dcb configuration */
+ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_cee_to_dcb_v1_config
+ * @cee_cfg: pointer to CEE v1 response configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE v1 configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_v1_config(
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status);
+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+ u8 i, tc, err;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
+ I40E_AQC_CEE_APP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ /* Add APPs if Error is False */
+ if (!err) {
+ /* CEE operating configuration supports FCoE/iSCSI/FIP only */
+ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
+
+ /* FCoE APP */
+ dcbcfg->app[0].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ /* iSCSI APP */
+ dcbcfg->app[1].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+
+ /* FIP APP */
+ dcbcfg->app[2].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+ }
+}
+
+/**
+ * i40e_cee_to_dcb_config
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_config(
+ struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status);
+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+ u8 i, tc, err, sync, oper;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ i = 0;
+ status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
+ I40E_AQC_CEE_FCOE_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FCoE APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* FCoE APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
+ i++;
+ }
+
+ status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
+ I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add iSCSI APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* iSCSI APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
+ i++;
+ }
+
+ status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
+ I40E_AQC_CEE_FIP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FIP APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* FIP APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
+ i++;
+ }
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_get_ieee_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get IEEE mode DCB configuration from the Firmware
+ **/
+STATIC enum i40e_status_code i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = I40E_SUCCESS;
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+
+ /* If Firmware version < v4.33 on X710/XL710, IEEE only */
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)))
+ return i40e_get_ieee_dcb_config(hw);
+
+ /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
+ sizeof(cee_v1_cfg), NULL);
+ if (ret == I40E_SUCCESS) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ LE16_TO_CPU(cee_v1_cfg.tlv_status);
+ i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
+ &hw->local_dcbx_config);
+ }
+ } else {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg,
+ sizeof(cee_cfg), NULL);
+ if (ret == I40E_SUCCESS) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ LE32_TO_CPU(cee_cfg.tlv_status);
+ i40e_cee_to_dcb_config(&cee_cfg,
+ &hw->local_dcbx_config);
+ }
+ }
+
+ /* CEE mode not enabled try querying IEEE data */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ return i40e_get_ieee_dcb_config(hw);
+
+ if (ret != I40E_SUCCESS)
+ goto out;
+
+ /* Get CEE DCB Desired Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->desired_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = I40E_SUCCESS;
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ *
+ * Update DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_variables lldp_cfg;
+ u8 adminstatus = 0;
+
+ if (!hw->func_caps.dcb)
+ return ret;
+
+ /* Read LLDP NVM area */
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (ret)
+ return ret;
+
+ /* Get the LLDP AdminStatus for the current port */
+ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
+ adminstatus &= 0xF;
+
+ /* LLDP agent disabled */
+ if (!adminstatus) {
+ hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
+ return ret;
+ }
+
+ /* Get DCBX status */
+ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+ if (ret)
+ return ret;
+
+ /* Check the DCBX Status */
+ switch (hw->dcbx_status) {
+ case I40E_DCBX_STATUS_DONE:
+ case I40E_DCBX_STATUS_IN_PROGRESS:
+ /* Get current DCBX configuration */
+ ret = i40e_get_dcb_config(hw);
+ if (ret)
+ return ret;
+ break;
+ case I40E_DCBX_STATUS_DISABLED:
+ return ret;
+ case I40E_DCBX_STATUS_NOT_STARTED:
+ case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+ default:
+ break;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+/**
+ * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 priority0, priority1, maxtcwilling = 0;
+ struct i40e_dcb_ets_config *etscfg;
+ u16 offset = 0, typelength, i;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
+ maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
+ buf[offset] = maxtcwilling;
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etscfg->prioritytable[i * 2] & 0xF;
+ priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etsrec;
+ u16 offset = 0, typelength, i;
+ u8 priority0, priority1;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+ /* First Octet is reserved */
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etsrec->prioritytable[i * 2] & 0xF;
+ priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tsatable[i];
+}
+
+ /**
+ * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store to get PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelength;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_PFC_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcenable;
+}
+
+/**
+ * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store to get APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ **/
+static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength, length, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+ /* length includes size of ouisubtype + 1 reserved + 3*numapps */
+ length = sizeof(tlv->ouisubtype) + 1 + (i*3);
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ (length & 0x1FF));
+ tlv->typelength = I40E_HTONS(typelength);
+}
+
+ /**
+ * i40e_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: pointer to org tlv
+ *
+ * add tlv information
+ **/
+static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case I40E_IEEE_TLV_ID_ETS_CFG:
+ i40e_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_ETS_REC:
+ i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_PFC_CFG:
+ i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_APP_PRI:
+ i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+ /**
+ * i40e_set_dcb_config - Set the local LLDP MIB to FW
+ * @hw: pointer to the hw struct
+ *
+ * Set DCB configuration to the Firmware
+ **/
+enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_dcbx_config *dcbcfg;
+ struct i40e_virt_mem mem;
+ u8 mib_type, *lldpmib;
+ u16 miblen;
+
+ /* update the hw local config */
+ dcbcfg = &hw->local_dcbx_config;
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
+ mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
+ }
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
+
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
+ * @hw: pointer to the hw struct
+ * @dcbcfg: store for LLDPDU data
+ *
+ * send DCB configuration to FW
+ **/
+enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, offset = 0, tlvid = I40E_TLV_ID_START;
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 typelength;
+
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelength = I40E_NTOHS(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ if (length)
+ offset += length + 2;
+ /* END TLV or beyond LLDPDU size */
+ if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) ||
+ (offset > I40E_LLDPDU_SIZE))
+ break;
+ /* Move to next TLV */
+ if (length)
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) + length);
+ }
+ *miblen = offset;
+ return ret;
+}
+
+
+/**
+ * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ * @module: address of the module pointer
+ * @word_offset: offset of LLDP configuration
+ *
+ * Reads the LLDP configuration data from NVM using passed addresses
+ **/
+static enum i40e_status_code _i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg,
+ u8 module, u32 word_offset)
+{
+ u32 address, offset = (2 * word_offset);
+ enum i40e_status_code ret;
+ u16 mem;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(mem), &mem, true,
+ NULL);
+ i40e_release_nvm(hw);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ /* Check if this pointer needs to be read in word size or 4K sector
+ * units.
+ */
+ if (mem & I40E_PTR_TYPE)
+ address = (0x7FFF & mem) * 4096;
+ else
+ address = (0x7FFF & mem) * 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, module, offset, sizeof(mem), &mem, true,
+ NULL);
+ i40e_release_nvm(hw);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ offset = mem + word_offset;
+ offset *= 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, 0, address + offset,
+ sizeof(struct i40e_lldp_variables), lldp_cfg,
+ true, NULL);
+ i40e_release_nvm(hw);
+
+err_lldp_cfg:
+ return ret;
+}
+
+/**
+ * i40e_read_lldp_cfg - read LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ *
+ * Reads the LLDP configuration data from NVM
+ **/
+enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ u32 mem;
+
+ if (!lldp_cfg)
+ return I40E_ERR_PARAM;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem),
+ &mem, true, NULL);
+ i40e_release_nvm(hw);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ /* Read a bit that holds information whether we are running flat or
+ * structured NVM image. Flat image has LLDP configuration in shadow
+ * ram, so there is a need to pass different addresses for both cases.
+ */
+ if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) {
+ /* Flat NVM case */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR,
+ I40E_SR_LLDP_CFG_PTR);
+ } else {
+ /* Good old structured NVM image */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR,
+ I40E_NVM_LLDP_CFG_PTR);
+ }
+
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.h
new file mode 100644
index 00000000..3b709efd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.h
@@ -0,0 +1,223 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_DCB_H_
+#define _I40E_DCB_H_
+
+#include "i40e_type.h"
+
+#define I40E_DCBX_OFFLOAD_DISABLED 0
+#define I40E_DCBX_OFFLOAD_ENABLED 1
+
+#define I40E_DCBX_STATUS_NOT_STARTED 0
+#define I40E_DCBX_STATUS_IN_PROGRESS 1
+#define I40E_DCBX_STATUS_DONE 2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
+#define I40E_DCBX_STATUS_DISABLED 7
+
+#define I40E_TLV_TYPE_END 0
+#define I40E_TLV_TYPE_ORG 127
+
+#define I40E_IEEE_8021QAZ_OUI 0x0080C2
+#define I40E_IEEE_SUBTYPE_ETS_CFG 9
+#define I40E_IEEE_SUBTYPE_ETS_REC 10
+#define I40E_IEEE_SUBTYPE_PFC_CFG 11
+#define I40E_IEEE_SUBTYPE_APP_PRI 12
+
+#define I40E_CEE_DCBX_OUI 0x001b21
+#define I40E_CEE_DCBX_TYPE 2
+
+#define I40E_CEE_SUBTYPE_CTRL 1
+#define I40E_CEE_SUBTYPE_PG_CFG 2
+#define I40E_CEE_SUBTYPE_PFC_CFG 3
+#define I40E_CEE_SUBTYPE_APP_PRI 4
+
+#define I40E_CEE_MAX_FEAT_TYPE 3
+#define I40E_LLDP_ADMINSTATUS_DISABLED 0
+#define I40E_LLDP_ADMINSTATUS_ENABLED_RX 1
+#define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2
+#define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX 3
+
+/* Defines for LLDP TLV header */
+#define I40E_LLDP_MIB_HLEN 14
+#define I40E_LLDP_TLV_LEN_SHIFT 0
+#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+#define I40E_LLDP_TLV_TYPE_SHIFT 9
+#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
+#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
+#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
+#define I40E_LLDP_TLV_OUI_SHIFT 8
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+
+/* Defines for IEEE ETS TLV */
+#define I40E_IEEE_ETS_MAXTC_SHIFT 0
+#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
+#define I40E_IEEE_ETS_CBS_SHIFT 6
+#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_WILLING_SHIFT 7
+#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
+#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
+#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
+#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_PRIO_0_SHIFT 0
+#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT)
+#define I40E_CEE_PGID_PRIO_1_SHIFT 4
+#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_STRICT 15
+
+/* Defines for IEEE TSA types */
+#define I40E_IEEE_TSA_STRICT 0
+#define I40E_IEEE_TSA_CBS 1
+#define I40E_IEEE_TSA_ETS 2
+#define I40E_IEEE_TSA_VENDOR 255
+
+/* Defines for IEEE PFC TLV */
+#define I40E_IEEE_PFC_CAP_SHIFT 0
+#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
+#define I40E_IEEE_PFC_MBC_SHIFT 6
+#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_WILLING_SHIFT 7
+#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT)
+
+/* Defines for IEEE APP TLV */
+#define I40E_IEEE_APP_SEL_SHIFT 0
+#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT)
+#define I40E_IEEE_APP_PRIO_SHIFT 5
+#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+
+/* TLV definitions for preparing MIB */
+#define I40E_TLV_ID_CHASSIS_ID 0
+#define I40E_TLV_ID_PORT_ID 1
+#define I40E_TLV_ID_TIME_TO_LIVE 2
+#define I40E_IEEE_TLV_ID_ETS_CFG 3
+#define I40E_IEEE_TLV_ID_ETS_REC 4
+#define I40E_IEEE_TLV_ID_PFC_CFG 5
+#define I40E_IEEE_TLV_ID_APP_PRI 6
+#define I40E_TLV_ID_END_OF_LLDPPDU 7
+#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG
+
+#define I40E_IEEE_ETS_TLV_LENGTH 25
+#define I40E_IEEE_PFC_TLV_LENGTH 6
+#define I40E_IEEE_APP_TLV_LENGTH 11
+
+#pragma pack(1)
+
+/* IEEE 802.1AB LLDP TLV structure */
+struct i40e_lldp_generic_tlv {
+ __be16 typelength;
+ u8 tlvinfo[1];
+};
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct i40e_lldp_org_tlv {
+ __be16 typelength;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct i40e_cee_ctrl_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct i40e_cee_feat_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
+#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define I40E_CEE_APP_SELECTOR_MASK 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+};
+#pragma pack()
+
+/*
+ * TODO: The below structures related LLDP/DCBX variables
+ * and statistics are defined but need to find how to get
+ * the required information from the Firmware to use them
+ */
+
+/* IEEE 802.1AB LLDP Agent Statistics */
+struct i40e_lldp_stats {
+ u64 remtablelastchangetime;
+ u64 remtableinserts;
+ u64 remtabledeletes;
+ u64 remtabledrops;
+ u64 remtableageouts;
+ u64 txframestotal;
+ u64 rxframesdiscarded;
+ u64 rxportframeerrors;
+ u64 rxportframestotal;
+ u64 rxporttlvsdiscardedtotal;
+ u64 rxporttlvsunrecognizedtotal;
+ u64 remtoomanyneighbors;
+};
+
+/* IEEE 802.1Qaz DCBX variables */
+struct i40e_dcbx_variables {
+ u32 defmaxtrafficclasses;
+ u32 defprioritytcmapping;
+ u32 deftcbandwidth;
+ u32 deftsaassignment;
+};
+
+enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw);
+enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw);
+enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg);
+
+#endif /* _I40E_DCB_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_devids.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_devids.h
new file mode 100644
index 00000000..66ff1ccf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_devids.h
@@ -0,0 +1,82 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Vendor ID */
+#define I40E_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_20G_KR2_A 0x1588
+#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
+#endif /* VF_DRIVER */
+#ifdef X722_A0_SUPPORT
+#define I40E_DEV_ID_X722_A0 0x374C
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
+#define I40E_DEV_ID_X722_A0_VF 0x374D
+#endif
+#endif
+#define I40E_DEV_ID_KX_X722 0x37CE
+#define I40E_DEV_ID_QSFP_X722 0x37CF
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
+#define I40E_DEV_ID_X722_VF 0x37CD
+#endif /* VF_DRIVER */
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#define i40e_is_25G_device(d) ((d) == I40E_DEV_ID_25G_B || \
+ (d) == I40E_DEV_ID_25G_SFP28)
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.c
new file mode 100644
index 00000000..c3c76a0c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.c
@@ -0,0 +1,175 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_diag.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_diag_set_loopback
+ * @hw: pointer to the hw struct
+ * @mode: loopback mode
+ *
+ * Set chosen loopback mode
+ **/
+enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw,
+ enum i40e_lb_mode mode)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_aq_set_lb_modes(hw, mode, NULL))
+ ret_code = I40E_ERR_DIAG_TEST_FAILED;
+
+ return ret_code;
+}
+
+/**
+ * i40e_diag_reg_pattern_test
+ * @hw: pointer to the hw struct
+ * @reg: reg to be tested
+ * @mask: bits to be touched
+ **/
+static enum i40e_status_code i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ u32 reg, u32 mask)
+{
+ const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ u32 pat, val, orig_val;
+ int i;
+
+ orig_val = rd32(hw, reg);
+ for (i = 0; i < ARRAY_SIZE(patterns); i++) {
+ pat = patterns[i];
+ wr32(hw, reg, (pat & mask));
+ val = rd32(hw, reg);
+ if ((val & mask) != (pat & mask)) {
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+ }
+
+ wr32(hw, reg, orig_val);
+ val = rd32(hw, reg);
+ if (val != orig_val) {
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+
+ return I40E_SUCCESS;
+}
+
+struct i40e_diag_reg_test_info i40e_reg_list[] = {
+ /* offset mask elements stride */
+ {I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
+ {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
+ { 0 }
+};
+
+/**
+ * i40e_diag_reg_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform registers diagnostic test
+ **/
+enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg, mask;
+ u32 i, j;
+
+ for (i = 0; i40e_reg_list[i].offset != 0 &&
+ ret_code == I40E_SUCCESS; i++) {
+
+ /* set actual reg range for dynamically allocated resources */
+ if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
+ hw->func_caps.num_tx_qp != 0)
+ i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+ if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
+ i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
+ i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
+ hw->func_caps.num_msix_vectors != 0)
+ i40e_reg_list[i].elements =
+ hw->func_caps.num_msix_vectors - 1;
+
+ /* test register access */
+ mask = i40e_reg_list[i].mask;
+ for (j = 0; j < i40e_reg_list[i].elements &&
+ ret_code == I40E_SUCCESS; j++) {
+ reg = i40e_reg_list[i].offset
+ + (j * i40e_reg_list[i].stride);
+ ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
+ }
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_diag_eeprom_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform EEPROM diagnostic test
+ **/
+enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ u16 reg_val;
+
+ /* read NVM control word and if NVM valid, validate EEPROM checksum*/
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
+ if ((ret_code == I40E_SUCCESS) &&
+ ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
+ BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+ return i40e_validate_nvm_checksum(hw, NULL);
+ else
+ return I40E_ERR_DIAG_TEST_FAILED;
+}
+
+/**
+ * i40e_diag_fw_alive_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform FW alive diagnostic test
+ **/
+enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return I40E_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.h
new file mode 100644
index 00000000..105b1191
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.h
@@ -0,0 +1,61 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_DIAG_H_
+#define _I40E_DIAG_H_
+
+#include "i40e_type.h"
+
+enum i40e_lb_mode {
+ I40E_LB_MODE_NONE = 0x0,
+ I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL,
+ I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
+ I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL,
+};
+
+struct i40e_diag_reg_test_info {
+ u32 offset; /* the base register */
+ u32 mask; /* bits that can be tested */
+ u32 elements; /* number of elements if array */
+ u32 stride; /* bytes between each element */
+};
+
+extern struct i40e_diag_reg_test_info i40e_reg_list[];
+
+enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw,
+ enum i40e_lb_mode mode);
+enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw);
+enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw);
+enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw);
+
+#endif /* _I40E_DIAG_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.c
new file mode 100644
index 00000000..502407bd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.c
@@ -0,0 +1,369 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_status.h"
+#include "i40e_alloc.h"
+#include "i40e_hmc.h"
+#include "i40e_type.h"
+
+/**
+ * i40e_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ **/
+enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+ enum i40e_memory_type mem_type;
+ bool dma_mem_alloc_done = false;
+ struct i40e_dma_mem mem;
+ u64 alloc_len;
+
+ if (NULL == hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (sd_index >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
+ goto exit;
+ }
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (I40E_SD_TYPE_PAGED == type) {
+ mem_type = i40e_mem_pd;
+ alloc_len = I40E_HMC_PAGED_BP_SIZE;
+ } else {
+ mem_type = i40e_mem_bp_jumbo;
+ alloc_len = direct_mode_sz;
+ }
+
+ /* allocate a 4K pd page or 2M backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ dma_mem_alloc_done = true;
+ if (I40E_SD_TYPE_PAGED == type) {
+ ret_code = i40e_allocate_virt_mem(hw,
+ &sd_entry->u.pd_table.pd_entry_virt_mem,
+ sizeof(struct i40e_hmc_pd_entry) * 512);
+ if (ret_code)
+ goto exit;
+ sd_entry->u.pd_table.pd_entry =
+ (struct i40e_hmc_pd_entry *)
+ sd_entry->u.pd_table.pd_entry_virt_mem.va;
+ i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
+ &mem, sizeof(struct i40e_dma_mem),
+ I40E_NONDMA_TO_NONDMA);
+ } else {
+ i40e_memcpy(&sd_entry->u.bp.addr,
+ &mem, sizeof(struct i40e_dma_mem),
+ I40E_NONDMA_TO_NONDMA);
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+ /* initialize the sd entry */
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+ /* increment the ref count */
+ I40E_INC_SD_REFCNT(&hmc_info->sd_table);
+ }
+ /* Increment backing page reference count */
+ if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
+ I40E_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+ if (I40E_SUCCESS != ret_code)
+ if (dma_mem_alloc_done)
+ i40e_free_dma_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in i40e_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ **/
+enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_dma_mem mem;
+ struct i40e_dma_mem *page = &mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
+ goto exit;
+ }
+
+ /* find corresponding sd */
+ sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
+ if (I40E_SD_TYPE_PAGED !=
+ hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ goto exit;
+
+ rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = true;
+ page = rsrc_pg;
+ } else {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ pd_entry->rsrc_pg = false;
+ }
+
+ i40e_memcpy(&pd_entry->bp.addr, page,
+ sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
+ /* Set page address and valid bit */
+ page_desc = page->pa | 0x1;
+
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+
+ /* Add the backing page physical address in the pd entry */
+ i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
+ I40E_NONDMA_TO_DMA);
+
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = true;
+ I40E_INC_PD_REFCNT(pd_table);
+ }
+ I40E_INC_BP_REFCNT(&pd_entry->bp);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+
+ /* calculate index */
+ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
+ goto exit;
+ }
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
+ goto exit;
+ }
+ /* get the entry and decrease its ref counter */
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ I40E_DEC_BP_REFCNT(&pd_entry->bp);
+ if (pd_entry->bp.ref_cnt)
+ goto exit;
+
+ /* mark the entry invalid */
+ pd_entry->valid = false;
+ I40E_DEC_PD_REFCNT(pd_table);
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+
+ /* free memory here */
+ if (!pd_entry->rsrc_pg)
+ ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ if (!pd_table->ref_cnt)
+ i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ **/
+enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
+ if (sd_entry->u.bp.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: used to distinguish between VF and PF
+ **/
+enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+ return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
+}
+
+/**
+ * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ **/
+enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page_new - Removes a PD page from sd entry.
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ * @is_pf: used to distinguish between VF and PF
+ **/
+enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+ return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.h
new file mode 100644
index 00000000..343b251f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.h
@@ -0,0 +1,245 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool rsrc_pg;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(I40E_HI_DWORD(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
+enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c
new file mode 100644
index 00000000..f03f3813
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c
@@ -0,0 +1,1406 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_type.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_prototype.h"
+
+/* lan specific interface functions */
+
+/**
+ * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
+ * @offset: base address offset needing alignment
+ *
+ * Aligns the layer 2 function private memory so it's 512-byte aligned.
+ **/
+STATIC u64 i40e_align_l2obj_base(u64 offset)
+{
+ u64 aligned_offset = offset;
+
+ if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
+ aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
+ (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
+
+ return aligned_offset;
+}
+
+/**
+ * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * Calculates the maximum amount of memory for the function required, based
+ * on the number of resources it must provide context for.
+ **/
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num)
+{
+ u64 fpm_size = 0;
+
+ fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ return fpm_size;
+}
+
+/**
+ * i40e_init_lan_hmc - initialize i40e_hmc_info struct
+ * @hw: pointer to the HW structure
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * This function will be called once per physical function initialization.
+ * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
+ * the driver's provided input, as well as information from the HMC itself
+ * loaded from NVRAM.
+ *
+ * Assumptions:
+ * - HMC Resource Profile has been selected before calling this function.
+ **/
+enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
+{
+ struct i40e_hmc_obj_info *obj, *full_obj;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 l2fpm_size;
+ u32 size_exp;
+
+ hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
+ hw->hmc.hmc_fn_id = hw->pf_id;
+
+ /* allocate memory for hmc_obj */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
+ sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
+ hw->hmc.hmc_obj_virt_mem.va;
+
+ /* The full object will be used to create the LAN HMC SD */
+ full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
+ full_obj->max_cnt = 0;
+ full_obj->cnt = 0;
+ full_obj->base = 0;
+ full_obj->size = 0;
+
+ /* Tx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = txq_num;
+ obj->base = 0;
+ size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (txq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ txq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* Rx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = rxq_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (rxq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ rxq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
+ obj->cnt = fcoe_cntx_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_cntx_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_cntx_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE filter information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ obj->cnt = fcoe_filt_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_filt_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_filt_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ hw->hmc.first_sd_index = 0;
+ hw->hmc.sd_table.ref_cnt = 0;
+ l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
+ fcoe_filt_num);
+ if (NULL == hw->hmc.sd_table.sd_entry) {
+ hw->hmc.sd_table.sd_cnt = (u32)
+ (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
+ I40E_HMC_DIRECT_BP_SIZE;
+
+ /* allocate the sd_entry members in the sd_table */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
+ (sizeof(struct i40e_hmc_sd_entry) *
+ hw->hmc.sd_table.sd_cnt));
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.sd_table.sd_entry =
+ (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
+ }
+ /* store in the LAN full object for later */
+ full_obj->size = l2fpm_size;
+
+init_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page - Remove a page from the page descriptor table
+ * @hw: pointer to the HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) invalid
+ * 2. write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for pd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by pd after this function
+ * returns.
+ **/
+STATIC enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
+ ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp - remove a backing page from a segment descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in sd table (for direct address mode) invalid
+ * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
+ * to 0) and PMSDDATAHIGH to invalidate the sd page
+ * 3. Decrement the ref count for the sd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+STATIC enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
+ ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_create_lan_hmc_object - allocate backing store for hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ **/
+enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 pd_idx = 0, pd_lmt = 0;
+ bool pd_error = false;
+ u32 sd_idx, sd_lmt;
+ u64 sd_size;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
+ goto exit;
+ }
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+ /* find pd index */
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ /* This is to cover for cases where you may not want to have an SD with
+ * the full 2M memory but something smaller. By not filling out any
+ * size, the function will default the SD size to be 2M.
+ */
+ if (info->direct_mode_sz == 0)
+ sd_size = I40E_HMC_DIRECT_BP_SIZE;
+ else
+ sd_size = info->direct_mode_sz;
+
+ /* check if all the sds are valid. If not, allocate a page and
+ * initialize it.
+ */
+ for (j = sd_idx; j < sd_lmt; j++) {
+ /* update the sd table entry */
+ ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
+ info->entry_type,
+ sd_size);
+ if (I40E_SUCCESS != ret_code)
+ goto exit_sd_error;
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ /* check if all the pds in this sd are valid. If not,
+ * allocate a page and initialize it.
+ */
+
+ /* find pd_idx and pd_lmt in this sd */
+ pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt,
+ ((j + 1) * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = i40e_add_pd_table_entry(hw,
+ info->hmc_info,
+ i, NULL);
+ if (I40E_SUCCESS != ret_code) {
+ pd_error = true;
+ break;
+ }
+ }
+ if (pd_error) {
+ /* remove the backing pages from pd_idx1 to i */
+ while (i && (i > pd_idx1)) {
+ i40e_remove_pd_bp(hw, info->hmc_info,
+ (i - 1));
+ i--;
+ }
+ }
+ }
+ if (!sd_entry->valid) {
+ sd_entry->valid = true;
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ I40E_SET_PF_SD_ENTRY(hw,
+ sd_entry->u.pd_table.pd_page_addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ goto exit;
+ }
+ }
+ }
+ goto exit;
+
+exit_sd_error:
+ /* cleanup for sd entries from j to sd_idx */
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx,
+ ((j - 1) * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++)
+ i40e_remove_pd_bp(hw, info->hmc_info, i);
+ i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ break;
+ }
+ j--;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_configure_lan_hmc - prepare the HMC backing store
+ * @hw: pointer to the hw structure
+ * @model: the model for the layout of the SD/PD tables
+ *
+ * - This function will be called once per physical function initialization.
+ * - This function will be called after i40e_init_lan_hmc() and before
+ * any LAN/FCoE HMC objects can be created.
+ **/
+enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
+{
+ struct i40e_hmc_lan_create_obj_info info;
+ u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+ struct i40e_hmc_obj_info *obj;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ /* Initialize part of the create object info struct */
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
+
+ /* Build the SD entry for the LAN objects */
+ switch (model) {
+ case I40E_HMC_MODEL_DIRECT_PREFERRED:
+ case I40E_HMC_MODEL_DIRECT_ONLY:
+ info.entry_type = I40E_SD_TYPE_DIRECT;
+ /* Make one big object, a single SD */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ goto try_type_paged;
+ else if (ret_code != I40E_SUCCESS)
+ goto configure_lan_hmc_out;
+ /* else clause falls through the break */
+ break;
+ case I40E_HMC_MODEL_PAGED_ONLY:
+try_type_paged:
+ info.entry_type = I40E_SD_TYPE_PAGED;
+ /* Make one big object in the PD table */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if (ret_code != I40E_SUCCESS)
+ goto configure_lan_hmc_out;
+ break;
+ default:
+ /* unsupported type */
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
+ ret_code);
+ goto configure_lan_hmc_out;
+ }
+
+ /* Configure and program the FPM registers so objects can be created */
+
+ /* Tx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
+
+ /* Rx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE filters */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
+
+configure_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_delete_hmc_object - remove hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_delete_obj_info struct
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ **/
+enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_table *pd_table;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 sd_idx, sd_lmt;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
+ goto exit;
+ }
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
+
+ if (I40E_SD_TYPE_PAGED !=
+ info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ continue;
+
+ rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
+
+ pd_table =
+ &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ }
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case I40E_SD_TYPE_DIRECT:
+ ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ break;
+ case I40E_SD_TYPE_PAGED:
+ ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ break;
+ default:
+ break;
+ }
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
+ * @hw: pointer to the hw structure
+ *
+ * This must be called by drivers as they are shutting down and being
+ * removed from the OS.
+ **/
+enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+{
+ struct i40e_hmc_lan_delete_obj_info info;
+ enum i40e_status_code ret_code;
+
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.count = 1;
+
+ /* delete the object */
+ ret_code = i40e_delete_lan_hmc_object(hw, &info);
+
+ /* free the SD table entry for LAN */
+ i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
+ hw->hmc.sd_table.sd_cnt = 0;
+ hw->hmc.sd_table.sd_entry = NULL;
+
+ /* free memory used for hmc_obj */
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+ hw->hmc.hmc_obj = NULL;
+
+ return ret_code;
+}
+
+#define I40E_HMC_STORE(_struct, _ele) \
+ offsetof(struct _struct, _ele), \
+ FIELD_SIZEOF(struct _struct, _ele)
+
+struct i40e_context_ele {
+ u16 offset;
+ u16 size_of;
+ u16 width;
+ u16 lsb;
+};
+
+/* LAN Tx Queue Context */
+static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
+ /* Field Width LSB */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
+/* line 1 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
+/* line 7 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
+ { 0 }
+};
+
+/* LAN Rx Queue Context */
+static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
+ /* Field Width LSB */
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
+ { 0 }
+};
+
+/**
+ * i40e_write_byte - replace HMC context byte
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_byte(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u8 src_byte, dest_byte, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = (u8)(BIT(ce_info->width) - 1);
+
+ src_byte = *from;
+ src_byte &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_byte <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+ dest_byte &= ~mask; /* get the bits not changing */
+ dest_byte |= src_byte; /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_word - replace HMC context word
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_word(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u16 src_word, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le16 dest_word;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_word = *(u16 *)from;
+ src_word &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_word <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
+
+ dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
+ dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_dword - replace HMC context dword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_dword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u32 src_dword, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le32 dest_dword;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = BIT(ce_info->width) - 1;
+ else
+ mask = ~(u32)0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_dword = *(u32 *)from;
+ src_dword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_dword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
+
+ dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
+ dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_qword - replace HMC context qword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_qword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u64 src_qword, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le64 dest_qword;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = BIT_ULL(ce_info->width) - 1;
+ else
+ mask = ~(u64)0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_qword = *(u64 *)from;
+ src_qword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_qword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
+
+ dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
+ dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_byte - read HMC context byte into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_byte(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u8 dest_byte, mask;
+ u8 *src, *target;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = (u8)(BIT(ce_info->width) - 1);
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+ dest_byte &= ~(mask);
+
+ dest_byte >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_word - read HMC context word into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_word(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u16 dest_word, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le16 src_word;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_word &= ~(CPU_TO_LE16(mask));
+
+ /* get the data back into host order before shifting */
+ dest_word = LE16_TO_CPU(src_word);
+
+ dest_word >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_dword - read HMC context dword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_dword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u32 dest_dword, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le32 src_dword;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = BIT(ce_info->width) - 1;
+ else
+ mask = ~(u32)0;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_dword &= ~(CPU_TO_LE32(mask));
+
+ /* get the data back into host order before shifting */
+ dest_dword = LE32_TO_CPU(src_dword);
+
+ dest_dword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
+ I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_qword - read HMC context qword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_qword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u64 dest_qword, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le64 src_qword;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = BIT_ULL(ce_info->width) - 1;
+ else
+ mask = ~(u64)0;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_qword &= ~(CPU_TO_LE64(mask));
+
+ /* get the data back into host order before shifting */
+ dest_qword = LE64_TO_CPU(src_qword);
+
+ dest_qword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
+ I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_get_hmc_context - extract HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+ switch (ce_info[f].size_of) {
+ case 1:
+ i40e_read_byte(context_bytes, &ce_info[f], dest);
+ break;
+ case 2:
+ i40e_read_word(context_bytes, &ce_info[f], dest);
+ break;
+ case 4:
+ i40e_read_dword(context_bytes, &ce_info[f], dest);
+ break;
+ case 8:
+ i40e_read_qword(context_bytes, &ce_info[f], dest);
+ break;
+ default:
+ /* nothing to do, just keep going */
+ break;
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hmc_context - zero out the HMC context bits
+ * @hw: the hardware struct
+ * @context_bytes: pointer to the context bit array (DMA memory)
+ * @hmc_type: the type of HMC resource
+ **/
+static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
+{
+ /* clean the bit array */
+ i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
+ I40E_DMA_MEM);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_hmc_context - replace HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+
+ /* we have to deal with each element of the HMC using the
+ * correct size so that we are correct regardless of the
+ * endianness of the machine
+ */
+ switch (ce_info[f].size_of) {
+ case 1:
+ i40e_write_byte(context_bytes, &ce_info[f], dest);
+ break;
+ case 2:
+ i40e_write_word(context_bytes, &ce_info[f], dest);
+ break;
+ case 4:
+ i40e_write_dword(context_bytes, &ce_info[f], dest);
+ break;
+ case 8:
+ i40e_write_qword(context_bytes, &ce_info[f], dest);
+ break;
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_hmc_get_object_va - retrieves an object's virtual address
+ * @hw: pointer to the hw structure
+ * @object_base: pointer to u64 to get the va
+ * @rsrc_type: the hmc resource type
+ * @obj_idx: hmc object index
+ *
+ * This function retrieves the object's virtual address from the object
+ * base pointer. This function is used for LAN Queue contexts.
+ **/
+STATIC
+enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
+ u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
+{
+ u32 obj_offset_in_sd, obj_offset_in_pd;
+ struct i40e_hmc_info *hmc_info = &hw->hmc;
+ struct i40e_hmc_sd_entry *sd_entry;
+ struct i40e_hmc_pd_entry *pd_entry;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 obj_offset_in_fpm;
+ u32 sd_idx, sd_lmt;
+
+ if (NULL == hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
+ goto exit;
+ }
+ if (NULL == object_base) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
+ goto exit;
+ }
+ if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
+ DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
+ ret_code);
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ goto exit;
+ }
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &sd_idx, &sd_lmt);
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
+ hmc_info->hmc_obj[rsrc_type].size * obj_idx;
+
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &pd_idx, &pd_lmt);
+ rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
+ obj_offset_in_pd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_PAGED_BP_SIZE);
+ *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
+ } else {
+ obj_offset_in_sd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_DIRECT_BP_SIZE);
+ *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_get_lan_tx_queue_context - return the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_get_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
+}
+
+/**
+ * i40e_set_lan_tx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_get_lan_rx_queue_context - return the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_get_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
+}
+
+/**
+ * i40e_set_lan_rx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h
new file mode 100644
index 00000000..b2a43104
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h
@@ -0,0 +1,200 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u16 cpuid; /* bigger than needed, see above for reason */
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u16 dbuff; /* bigger than needed, see above for reason */
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u16 hbuff; /* bigger than needed, see above for reason */
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u32 rxmax; /* bigger than needed, see above for reason */
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
+};
+
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u8 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num);
+enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info);
+enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_nvm.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_nvm.c
new file mode 100644
index 00000000..c77dac02
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_nvm.c
@@ -0,0 +1,1714 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_prototype.h"
+
+/**
+ * i40e_init_nvm_ops - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always through the Shadow RAM.
+ **/
+enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
+{
+ struct i40e_nvm_info *nvm = &hw->nvm;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 fla, gens;
+ u8 sr_size;
+
+ DEBUGFUNC("i40e_init_nvm");
+
+ /* The SR size is stored regardless of the nvm programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens = rd32(hw, I40E_GLNVM_GENS);
+ sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
+ I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+ /* Switching to words (sr_size contains power of 2KB) */
+ nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = rd32(hw, I40E_GLNVM_FLA);
+ if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
+ /* Max NVM timeout */
+ nvm->timeout = I40E_MAX_NVM_TIMEOUT;
+ nvm->blank_nvm_mode = false;
+ } else { /* Blank programming mode */
+ nvm->blank_nvm_mode = true;
+ ret_code = I40E_ERR_NVM_BLANK_MODE;
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
+ **/
+enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 gtime, timeout;
+ u64 time_left = 0;
+
+ DEBUGFUNC("i40e_acquire_nvm");
+
+ if (hw->nvm.blank_nvm_mode)
+ goto i40e_i40e_acquire_nvm_exit;
+
+ ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
+ 0, &time_left, NULL);
+ /* Reading the Global Device Timer */
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+
+ /* Store the timeout */
+ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
+
+ if (ret_code)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
+ access, time_left, ret_code, hw->aq.asq_last_status);
+
+ if (ret_code && time_left) {
+ /* Poll until the current NVM owner timeouts */
+ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
+ while ((gtime < timeout) && time_left) {
+ i40e_msec_delay(10);
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ ret_code = i40e_aq_request_resource(hw,
+ I40E_NVM_RESOURCE_ID,
+ access, 0, &time_left,
+ NULL);
+ if (ret_code == I40E_SUCCESS) {
+ hw->nvm.hw_semaphore_timeout =
+ I40E_MS_TO_GTIME(time_left) + gtime;
+ break;
+ }
+ }
+ if (ret_code != I40E_SUCCESS) {
+ hw->nvm.hw_semaphore_timeout = 0;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
+ time_left, ret_code, hw->aq.asq_last_status);
+ }
+ }
+
+i40e_i40e_acquire_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * This function will release NVM resource via the proper Admin Command.
+ **/
+void i40e_release_nvm(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 total_delay = 0;
+
+ DEBUGFUNC("i40e_release_nvm");
+
+ if (hw->nvm.blank_nvm_mode)
+ return;
+
+ ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
+ /* there are some rare cases when trying to release the resource
+ * results in an admin Q timeout, so handle them correctly
+ */
+ while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
+ (total_delay < hw->aq.asq_cmd_timeout)) {
+ i40e_msec_delay(1);
+ ret_code = i40e_aq_release_resource(hw,
+ I40E_NVM_RESOURCE_ID, 0, NULL);
+ total_delay++;
+ }
+}
+
+/**
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
+ * @hw: pointer to the HW structure
+ *
+ * Polls the SRCTL Shadow RAM register done bit.
+ **/
+static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+ u32 srctl, wait_cnt;
+
+ DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
+
+ /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
+ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
+ srctl = rd32(hw, I40E_GLNVM_SRCTL);
+ if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
+ ret_code = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(5);
+ }
+ if (ret_code == I40E_ERR_TIMEOUT)
+ i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+STATIC enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw,
+ u16 offset,
+ u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+ u32 sr_reg;
+
+ DEBUGFUNC("i40e_read_nvm_word_srctl");
+
+ if (offset >= hw->nvm.sr_size) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
+ offset, hw->nvm.sr_size);
+ ret_code = I40E_ERR_PARAM;
+ goto read_nvm_exit;
+ }
+
+ /* Poll the done bit first */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (ret_code == I40E_SUCCESS) {
+ /* Write the address and start reading */
+ sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ BIT(I40E_GLNVM_SRCTL_START_SHIFT);
+ wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
+
+ /* Poll I40E_GLNVM_SRCTL until the done bit is set */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (ret_code == I40E_SUCCESS) {
+ sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
+ *data = (u16)((sr_reg &
+ I40E_GLNVM_SRDATA_RDDATA_MASK)
+ >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+ }
+ }
+ if (ret_code != I40E_SUCCESS)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+ offset);
+
+read_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data,
+ bool last_command)
+{
+ enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ DEBUGFUNC("i40e_read_nvm_aq");
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the AdminQ
+ **/
+STATIC enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+
+ DEBUGFUNC("i40e_read_nvm_word_aq");
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+ *data = LE16_TO_CPU(*(__le16 *)data);
+
+ return ret_code;
+}
+
+/**
+ * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM.
+ *
+ * Do not use this function except in cases where the nvm lock is already
+ * taken via i40e_acquire_nvm().
+ **/
+enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
+ u16 offset,
+ u16 *data)
+{
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ return i40e_read_nvm_word_aq(hw, offset, data);
+
+ return i40e_read_nvm_word_srctl(hw, offset, data);
+}
+
+/**
+ * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM.
+ **/
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+
+ if (ret_code)
+ return ret_code;
+ ret_code = __i40e_read_nvm_word(hw, offset, data);
+
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ i40e_release_nvm(hw);
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+STATIC enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 index, word;
+
+ DEBUGFUNC("i40e_read_nvm_buffer_srctl");
+
+ /* Loop through the selected region */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
+ if (ret_code != I40E_SUCCESS)
+ break;
+ }
+
+ /* Update the number of words read from the Shadow RAM */
+ *words = word;
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+STATIC enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code;
+ u16 read_size = *words;
+ bool last_cmd = false;
+ u16 words_read = 0;
+ u16 i = 0;
+
+ DEBUGFUNC("i40e_read_nvm_buffer_aq");
+
+ do {
+ /* Calculate number of bytes we should read in this step.
+ * FVL AQ do not allow to read more than one page at a time or
+ * to cross page boundaries.
+ */
+ if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+ read_size = min(*words,
+ (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+ (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+ else
+ read_size = min((*words - words_read),
+ I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+ /* Check if this is last command, if so set proper flag */
+ if ((words_read + read_size) >= *words)
+ last_cmd = true;
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+ data + words_read, last_cmd);
+ if (ret_code != I40E_SUCCESS)
+ goto read_nvm_buffer_aq_exit;
+
+ /* Increment counter for words already read and move offset to
+ * new read location
+ */
+ words_read += read_size;
+ offset += read_size;
+ } while (words_read < *words);
+
+ for (i = 0; i < *words; i++)
+ data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+ *words = words_read;
+ return ret_code;
+}
+
+/**
+ * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method.
+ **/
+enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
+ u16 offset,
+ u16 *words, u16 *data)
+{
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
+
+ return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_write_nvm_aq - Writes Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
+{
+ enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ DEBUGFUNC("i40e_write_nvm_aq");
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
+ else
+ ret_code = i40e_aq_update_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, 0,
+ &cmd_details);
+
+ return ret_code;
+}
+
+/**
+ * __i40e_write_nvm_word - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
+ * NVM ownership have to be acquired and released (on ARQ completion event
+ * reception) by caller. To commit SR to NVM update checksum function
+ * should be called.
+ **/
+enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+ void *data)
+{
+ DEBUGFUNC("i40e_write_nvm_word");
+
+ *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
+
+ /* Value 0x00 below means that we treat SR as a flat mem */
+ return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
+}
+
+/**
+ * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller. To commit SR to NVM update
+ * checksum function should be called.
+ **/
+enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data)
+{
+ __le16 *le_word_ptr = (__le16 *)data;
+ u16 *word_ptr = (u16 *)data;
+ u32 i = 0;
+
+ DEBUGFUNC("i40e_write_nvm_buffer");
+
+ for (i = 0; i < words; i++)
+ le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
+
+ /* Here we will only write one buffer as the size of the modules
+ * mirrored in the Shadow RAM is always less than 4K.
+ */
+ return i40e_write_nvm_aq(hw, module_pointer, offset, words,
+ data, false);
+}
+
+/**
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
+ *
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ **/
+enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_virt_mem vmem;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 vpd_module = 0;
+ u16 *data;
+ u16 i = 0;
+
+ DEBUGFUNC("i40e_calc_nvm_checksum");
+
+ ret_code = i40e_allocate_virt_mem(hw, &vmem,
+ I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
+ if (ret_code)
+ goto i40e_calc_nvm_checksum_exit;
+ data = (u16 *)vmem.va;
+
+ /* read pointer to VPD area */
+ ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* read pointer to PCIe Alt Auto-load module */
+ ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->nvm.sr_size; i++) {
+ /* Read SR page */
+ if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+
+ ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+ }
+
+ /* Skip Checksum word */
+ if (i == I40E_SR_SW_CHECKSUM_WORD)
+ continue;
+ /* Skip VPD module (convert byte size to word count) */
+ if ((i >= (u32)vpd_module) &&
+ (i < ((u32)vpd_module +
+ (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
+ continue;
+ }
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if ((i >= (u32)pcie_alt_module) &&
+ (i < ((u32)pcie_alt_module +
+ (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
+ continue;
+ }
+
+ checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
+ }
+
+ *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
+
+i40e_calc_nvm_checksum_exit:
+ i40e_free_virt_mem(hw, &vmem);
+ return ret_code;
+}
+
+/**
+ * i40e_update_nvm_checksum - Updates the NVM checksum
+ * @hw: pointer to hardware structure
+ *
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller.
+ * This function will commit SR to NVM.
+ **/
+enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 checksum;
+ __le16 le_sum;
+
+ DEBUGFUNC("i40e_update_nvm_checksum");
+
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+ le_sum = CPU_TO_LE16(checksum);
+ if (ret_code == I40E_SUCCESS)
+ ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
+ 1, &le_sum, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
+ *
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
+ **/
+enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 checksum_sr = 0;
+ u16 checksum_local = 0;
+
+ DEBUGFUNC("i40e_validate_nvm_checksum");
+
+ /* We must acquire the NVM lock in order to correctly synchronize the
+ * NVM accesses across multiple PFs. Without doing so it is possible
+ * for one of the PFs to read invalid data potentially indicating that
+ * the checksum is invalid.
+ */
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_code)
+ return ret_code;
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+ i40e_release_nvm(hw);
+ if (ret_code)
+ return ret_code;
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (checksum_local != checksum_sr)
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum)
+ *checksum = checksum_local;
+
+ return ret_code;
+}
+
+STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
+{
+ return (u8)(val & I40E_NVM_MOD_PNT_MASK);
+}
+STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
+{
+ return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
+}
+
+STATIC INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
+{
+ return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
+ I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+}
+
+STATIC const char *i40e_nvm_update_state_str[] = {
+ "I40E_NVMUPD_INVALID",
+ "I40E_NVMUPD_READ_CON",
+ "I40E_NVMUPD_READ_SNT",
+ "I40E_NVMUPD_READ_LCB",
+ "I40E_NVMUPD_READ_SA",
+ "I40E_NVMUPD_WRITE_ERA",
+ "I40E_NVMUPD_WRITE_CON",
+ "I40E_NVMUPD_WRITE_SNT",
+ "I40E_NVMUPD_WRITE_LCB",
+ "I40E_NVMUPD_WRITE_SA",
+ "I40E_NVMUPD_CSUM_CON",
+ "I40E_NVMUPD_CSUM_SA",
+ "I40E_NVMUPD_CSUM_LCB",
+ "I40E_NVMUPD_STATUS",
+ "I40E_NVMUPD_EXEC_AQ",
+ "I40E_NVMUPD_GET_AQ_RESULT",
+ "I40E_NVMUPD_GET_AQ_EVENT",
+};
+
+/**
+ * i40e_nvmupd_command - Process an NVM update command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * Dispatches command depending on what update state is current
+ **/
+enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_status_code status;
+ enum i40e_nvmupd_cmd upd_cmd;
+
+ DEBUGFUNC("i40e_nvmupd_command");
+
+ /* assume success */
+ *perrno = 0;
+
+ /* early check for status command and debug msgs */
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_nvm_update_state_str[upd_cmd],
+ hw->nvmupd_state,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
+ cmd->command, cmd->config, cmd->offset, cmd->data_size);
+
+ if (upd_cmd == I40E_NVMUPD_INVALID) {
+ *perrno = -EFAULT;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command returns %d errno %d\n",
+ upd_cmd, *perrno);
+ }
+
+ /* a status request returns immediately rather than
+ * going into the state machine
+ */
+ if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return I40E_ERR_BUF_TOO_SHORT;
+ }
+
+ bytes[0] = hw->nvmupd_state;
+
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
+
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
+ return I40E_SUCCESS;
+ }
+
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
+
+ /* Acquire lock to prevent race condition where adminq_task
+ * can execute after i40e_nvmupd_nvm_read/write but before state
+ * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
+ *
+ * During NVMUpdate, it is observed that lock could be held for
+ * ~5ms for most commands. However lock is held for ~60ms for
+ * NVMUPD_CSUM_LCB command.
+ */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT:
+ status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_STATE_READING:
+ status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_STATE_WRITING:
+ status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_clear_wait_state(hw);
+ status = I40E_SUCCESS;
+ break;
+ }
+
+ status = I40E_ERR_NOT_READY;
+ *perrno = -EBUSY;
+ break;
+
+ default:
+ /* invalid state, should never happen */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: no such state %d\n", hw->nvmupd_state);
+ status = I40E_NOT_SUPPORTED;
+ *perrno = -ESRCH;
+ break;
+ }
+
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
+ return status;
+}
+
+/**
+ * i40e_nvmupd_state_init - Handle NVM update state Init
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * Process legitimate commands of the Init state and conditionally set next
+ * state. Reject all other commands.
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ enum i40e_nvmupd_cmd upd_cmd;
+
+ DEBUGFUNC("i40e_nvmupd_state_init");
+
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+ switch (upd_cmd) {
+ case I40E_NVMUPD_READ_SA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ i40e_release_nvm(hw);
+ }
+ break;
+
+ case I40E_NVMUPD_READ_SNT:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ if (status)
+ i40e_release_nvm(hw);
+ else
+ hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_ERA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
+ if (status) {
+ i40e_release_nvm(hw);
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_SA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
+ i40e_release_nvm(hw);
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_SNT:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
+ i40e_release_nvm(hw);
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
+ }
+ break;
+
+ case I40E_NVMUPD_CSUM_SA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_update_nvm_checksum(hw);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ i40e_release_nvm(hw);
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ }
+ break;
+
+ case I40E_NVMUPD_EXEC_AQ:
+ status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_GET_AQ_RESULT:
+ status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_GET_AQ_EVENT:
+ status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
+ break;
+
+ default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in init state\n",
+ i40e_nvm_update_state_str[upd_cmd]);
+ status = I40E_ERR_NVM;
+ *perrno = -ESRCH;
+ break;
+ }
+ return status;
+}
+
+/**
+ * i40e_nvmupd_state_reading - Handle NVM update state Reading
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * NVM ownership is already held. Process legitimate commands and set any
+ * change in state; reject all other commands.
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ enum i40e_nvmupd_cmd upd_cmd;
+
+ DEBUGFUNC("i40e_nvmupd_state_reading");
+
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+ switch (upd_cmd) {
+ case I40E_NVMUPD_READ_SA:
+ case I40E_NVMUPD_READ_CON:
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_READ_LCB:
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ i40e_release_nvm(hw);
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in reading state.\n",
+ i40e_nvm_update_state_str[upd_cmd]);
+ status = I40E_NOT_SUPPORTED;
+ *perrno = -ESRCH;
+ break;
+ }
+ return status;
+}
+
+/**
+ * i40e_nvmupd_state_writing - Handle NVM update state Writing
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * NVM ownership is already held. Process legitimate commands and set any
+ * change in state; reject all other commands
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ enum i40e_nvmupd_cmd upd_cmd;
+ bool retry_attempt = false;
+
+ DEBUGFUNC("i40e_nvmupd_state_writing");
+
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+retry:
+ switch (upd_cmd) {
+ case I40E_NVMUPD_WRITE_CON:
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (!status) {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_LCB:
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ break;
+
+ case I40E_NVMUPD_CSUM_CON:
+ /* Assumes the caller has acquired the nvm */
+ status = i40e_update_nvm_checksum(hw);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
+ break;
+
+ case I40E_NVMUPD_CSUM_LCB:
+ /* Assumes the caller has acquired the nvm */
+ status = i40e_update_nvm_checksum(hw);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ break;
+
+ default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in writing state.\n",
+ i40e_nvm_update_state_str[upd_cmd]);
+ status = I40E_NOT_SUPPORTED;
+ *perrno = -ESRCH;
+ break;
+ }
+
+ /* In some circumstances, a multi-write transaction takes longer
+ * than the default 3 minute timeout on the write semaphore. If
+ * the write failed with an EBUSY status, this is likely the problem,
+ * so here we try to reacquire the semaphore then retry the write.
+ * We only do one retry, then give up.
+ */
+ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ !retry_attempt) {
+ enum i40e_status_code old_status = status;
+ u32 old_asq_status = hw->aq.asq_last_status;
+ u32 gtime;
+
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (gtime >= hw->nvm.hw_semaphore_timeout) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+ gtime, hw->nvm.hw_semaphore_timeout);
+ i40e_release_nvm(hw);
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+ hw->aq.asq_last_status);
+ status = old_status;
+ hw->aq.asq_last_status = old_asq_status;
+ } else {
+ retry_attempt = true;
+ goto retry;
+ }
+ }
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
+ * @hw: pointer to the hardware structure
+ **/
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
+{
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n",
+ hw->nvm_wait_opcode);
+
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
+ }
+ hw->nvm_wait_opcode = 0;
+
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ * @desc: AdminQ descriptor
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc)
+{
+ u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+
+ if (opcode == hw->nvm_wait_opcode) {
+ i40e_memcpy(&hw->nvm_aq_event_desc, desc,
+ aq_desc_len, I40E_NONDMA_TO_NONDMA);
+ i40e_nvmupd_clear_wait_state(hw);
+ }
+}
+
+/**
+ * i40e_nvmupd_validate_command - Validate given command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @perrno: pointer to return error code
+ *
+ * Return one of the valid command types or I40E_NVMUPD_INVALID
+ **/
+STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
+{
+ enum i40e_nvmupd_cmd upd_cmd;
+ u8 module, transaction;
+
+ DEBUGFUNC("i40e_nvmupd_validate_command\n");
+
+ /* anything that doesn't match a recognized case is an error */
+ upd_cmd = I40E_NVMUPD_INVALID;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+
+ /* limits on data size */
+ if ((cmd->data_size < 1) ||
+ (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command data_size %d\n",
+ cmd->data_size);
+ *perrno = -EFAULT;
+ return I40E_NVMUPD_INVALID;
+ }
+
+ switch (cmd->command) {
+ case I40E_NVM_READ:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_READ_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_READ_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_READ_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_READ_SA;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0xf)
+ upd_cmd = I40E_NVMUPD_STATUS;
+ else if (module == 0)
+ upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+ break;
+ case I40E_NVM_AQE:
+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+ break;
+ }
+ break;
+
+ case I40E_NVM_WRITE:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_WRITE_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_WRITE_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_WRITE_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_WRITE_SA;
+ break;
+ case I40E_NVM_ERA:
+ upd_cmd = I40E_NVMUPD_WRITE_ERA;
+ break;
+ case I40E_NVM_CSUM:
+ upd_cmd = I40E_NVMUPD_CSUM_CON;
+ break;
+ case (I40E_NVM_CSUM|I40E_NVM_SA):
+ upd_cmd = I40E_NVMUPD_CSUM_SA;
+ break;
+ case (I40E_NVM_CSUM|I40E_NVM_LCB):
+ upd_cmd = I40E_NVMUPD_CSUM_LCB;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0)
+ upd_cmd = I40E_NVMUPD_EXEC_AQ;
+ break;
+ }
+ break;
+ }
+
+ return upd_cmd;
+}
+
+/**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ enum i40e_status_code status;
+ struct i40e_aq_desc *aq_desc;
+ u32 buff_size = 0;
+ u8 *buff = NULL;
+ u32 aq_desc_len;
+ u32 aq_data_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ if (cmd->offset == 0xffff)
+ return I40E_SUCCESS;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+ /* get the aq descriptor */
+ if (cmd->data_size < aq_desc_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+ cmd->data_size, aq_desc_len);
+ *perrno = -EINVAL;
+ return I40E_ERR_PARAM;
+ }
+ aq_desc = (struct i40e_aq_desc *)bytes;
+
+ /* if data buffer needed, make sure it's ready */
+ aq_data_len = cmd->data_size - aq_desc_len;
+ buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
+ if (buff_size) {
+ if (!hw->nvm_buff.va) {
+ status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+ hw->aq.asq_buf_size);
+ if (status)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+ status);
+ }
+
+ if (hw->nvm_buff.va) {
+ buff = hw->nvm_buff.va;
+ i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
+ I40E_NONDMA_TO_NONDMA);
+ }
+ }
+
+ if (cmd->offset)
+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
+ /* and away we go! */
+ status = i40e_asq_send_command(hw, aq_desc, buff,
+ buff_size, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_exec_aq err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
+ }
+
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+ int remainder;
+ u8 *buff;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
+
+ /* check offset range */
+ if (cmd->offset > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+ __func__, cmd->offset, aq_total_len);
+ *perrno = -EINVAL;
+ return I40E_ERR_PARAM;
+ }
+
+ /* check copylength range */
+ if (cmd->data_size > (aq_total_len - cmd->offset)) {
+ int new_len = aq_total_len - cmd->offset;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, new_len);
+ cmd->data_size = new_len;
+ }
+
+ remainder = cmd->data_size;
+ if (cmd->offset < aq_desc_len) {
+ u32 len = aq_desc_len - cmd->offset;
+
+ len = min(len, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+ __func__, cmd->offset, cmd->offset + len);
+
+ buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+ i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
+
+ bytes += len;
+ remainder -= len;
+ buff = hw->nvm_buff.va;
+ } else {
+ buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ }
+
+ if (remainder > 0) {
+ int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+ __func__, start_byte, start_byte + remainder);
+ i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
+
+ /* check copylength range */
+ if (cmd->data_size > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, aq_total_len);
+ cmd->data_size = aq_total_len;
+ }
+
+ i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
+ I40E_NONDMA_TO_NONDMA);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_nvmupd_nvm_read - Read NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ enum i40e_status_code status;
+ u8 module, transaction;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ bytes, last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_read status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_erase - Erase an NVM module
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_erase status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_write - Write NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ u8 preservation_flags;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_update_nvm(hw, module, cmd->offset,
+ (u16)cmd->data_size, bytes, last,
+ preservation_flags, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_write status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_osdep.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_osdep.h
new file mode 100644
index 00000000..8e5c593c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_osdep.h
@@ -0,0 +1,243 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_memcpy.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#include "../i40e_logs.h"
+
+#define INLINE inline
+#define STATIC static
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+
+typedef enum i40e_status_code i40e_status;
+#define __iomem
+#define hw_dbg(hw, S, A...) do {} while (0)
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+#define lower_32_bits(n) ((u32)(n))
+#define low_16_bits(x) ((x) & 0xFFFF)
+#define high_16_bits(x) (((x) & 0xFFFF0000) >> 16)
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
+
+#ifndef __le16
+#define __le16 uint16_t
+#endif
+#ifndef __le32
+#define __le32 uint32_t
+#endif
+#ifndef __le64
+#define __le64 uint64_t
+#endif
+#ifndef __be16
+#define __be16 uint16_t
+#endif
+#ifndef __be32
+#define __be32 uint32_t
+#endif
+#ifndef __be64
+#define __be64 uint64_t
+#endif
+
+#define FALSE 0
+#define TRUE 1
+#define false 0
+#define true 1
+
+#define min(a,b) RTE_MIN(a,b)
+#define max(a,b) RTE_MAX(a,b)
+
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+
+#define DEBUGOUT(S) PMD_DRV_LOG_RAW(DEBUG, S)
+#define DEBUGOUT1(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A)
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT6 DEBUGOUT3
+#define DEBUGOUT7 DEBUGOUT6
+
+#define i40e_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ PMD_DRV_LOG_RAW(DEBUG, "i40e %02x.%x " s, \
+ (h)->bus.device, (h)->bus.func, \
+ ##__VA_ARGS__); \
+} while (0)
+
+/* AQ commands based interfaces of i40e_read_rx_ctl() and i40e_write_rx_ctl()
+ * are required for reading/writing below registers, as reading/writing it
+ * directly may not function correctly if the device is under heavy small
+ * packet traffic. Note that those interfaces are available from FVL5 and not
+ * suitable before the AdminQ is ready during initialization.
+ *
+ * I40E_PFQF_CTL_0
+ * I40E_PFQF_HENA
+ * I40E_PFQF_FDALLOC
+ * I40E_PFQF_HREGION
+ * I40E_PFLAN_QALLOC
+ * I40E_VPQF_CTL
+ * I40E_VFQF_HENA
+ * I40E_VFQF_HREGION
+ * I40E_VSIQF_CTL
+ * I40E_VSILAN_QBASE
+ * I40E_VSILAN_QTABLE
+ * I40E_VSIQF_TCREGION
+ * I40E_PFQF_HKEY
+ * I40E_VFQF_HKEY
+ * I40E_PRTQF_CTL_0
+ * I40E_GLFCOE_RCTL
+ * I40E_GLFCOE_RSOF
+ * I40E_GLQF_CTL
+ * I40E_GLQF_SWAP
+ * I40E_GLQF_HASH_MSK
+ * I40E_GLQF_HASH_INSET
+ * I40E_GLQF_HSYM
+ * I40E_GLQF_FC_MSK
+ * I40E_GLQF_FC_INSET
+ * I40E_GLQF_FD_MSK
+ * I40E_PRTQF_FD_INSET
+ * I40E_PRTQF_FD_FLXINSET
+ * I40E_PRTQF_FD_MSK
+ */
+
+#define I40E_PCI_REG(reg) rte_read32(reg)
+#define I40E_PCI_REG_ADDR(a, reg) \
+ ((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
+static inline uint32_t i40e_read_addr(volatile void *addr)
+{
+ return rte_le_to_cpu_32(I40E_PCI_REG(addr));
+}
+
+#define I40E_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+#define I40E_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
+
+#define I40E_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_GLGEN_STAT)
+#define I40EVF_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_VFGEN_RSTAT)
+
+#define I40E_READ_REG(hw, reg) i40e_read_addr(I40E_PCI_REG_ADDR((hw), (reg)))
+#define I40E_WRITE_REG(hw, reg, value) \
+ I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), (reg)), (value))
+
+#define rd32(a, reg) i40e_read_addr(I40E_PCI_REG_ADDR((a), (reg)))
+#define wr32(a, reg, value) \
+ I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((a), (reg)), (value))
+#define flush(a) i40e_read_addr(I40E_PCI_REG_ADDR((a), (I40E_GLGEN_STAT)))
+
+#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0]))
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ u64 pa;
+ u32 size;
+ const void *zone;
+} __attribute__((packed));
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+ i40e_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+} __attribute__((packed));
+
+#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
+
+#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
+#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
+#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
+#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
+#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
+#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
+
+#define cpu_to_le16(o) rte_cpu_to_le_16(o)
+#define cpu_to_le32(s) rte_cpu_to_le_32(s)
+#define cpu_to_le64(h) rte_cpu_to_le_64(h)
+#define le16_to_cpu(a) rte_le_to_cpu_16(a)
+#define le32_to_cpu(c) rte_le_to_cpu_32(c)
+#define le64_to_cpu(k) rte_le_to_cpu_64(k)
+
+/* SW spinlock */
+struct i40e_spinlock {
+ rte_spinlock_t spinlock;
+};
+
+#define i40e_init_spinlock(_sp) i40e_init_spinlock_d(_sp)
+#define i40e_acquire_spinlock(_sp) i40e_acquire_spinlock_d(_sp)
+#define i40e_release_spinlock(_sp) i40e_release_spinlock_d(_sp)
+#define i40e_destroy_spinlock(_sp) i40e_destroy_spinlock_d(_sp)
+
+#define I40E_NTOHS(a) rte_be_to_cpu_16(a)
+#define I40E_NTOHL(a) rte_be_to_cpu_32(a)
+#define I40E_HTONS(a) rte_cpu_to_be_16(a)
+#define I40E_HTONL(a) rte_cpu_to_be_32(a)
+
+#define i40e_memset(a, b, c, d) memset((a), (b), (c))
+#define i40e_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define DELAY(x) rte_delay_us(x)
+#define i40e_usec_delay(x) rte_delay_us(x)
+#define i40e_msec_delay(x) rte_delay_us(1000*(x))
+#define udelay(x) DELAY(x)
+#define msleep(x) DELAY(1000*(x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#endif /* _I40E_OSDEP_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_prototype.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_prototype.h
new file mode 100644
index 00000000..c6ec2d76
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_prototype.h
@@ -0,0 +1,642 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_asq(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_arq(struct i40e_hw *hw);
+enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw);
+enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw);
+u16 i40e_clean_asq(struct i40e_hw *hw);
+void i40e_free_adminq_asq(struct i40e_hw *hw);
+void i40e_free_adminq_arq(struct i40e_hw *hw);
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+#ifdef VF_DRIVER
+bool i40e_asq_done(struct i40e_hw *hw);
+#endif
+
+/* debug function for adminq */
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
+
+#ifdef PF_DRIVER
+
+u32 i40e_led_get(struct i40e_hw *hw);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode);
+enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val);
+enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
+
+/* admin send queue commands */
+
+enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_reset);
+enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
+ u16 max_frame_size, bool crc_en, u16 pacing,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
+enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *pveb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free);
+enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free);
+
+enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags, u16 valid_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, u32 field_id, void *data,
+ u16 buf_size, u16 *element_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, void *data, u16 buf_size,
+ u16 element_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+ void *buff, u16 buff_size, u16 tlv_len,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 tlv_len, u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+ bool dcb_enable,
+ struct i40e_asq_cmd_details
+ *cmd_details);
+enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+ bool start_agent,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+ u8 *num_entries,
+ struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+ u16 count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+ u16 mac_seid, u16 vsi_seid,
+ u16 *ret_seid);
+enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+ u16 vsi_seid, u16 tag, u16 queue_num,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 tag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+ u16 etag, u8 num_tags_in_buf, void *buf,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+ u16 etag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 old_tag, u16 new_tag, u16 *tags_used,
+ u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 *stat_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 stat_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
+ u16 bad_frame_vsi, bool save_bad_pac,
+ bool pad_short_pac, bool double_vlan,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
+ u8 tcmap, bool request, u8 *tcmap_ret,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg);
+enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+ u16 vsi,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count);
+enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count);
+enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+ u16 vsi,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count);
+enum i40e_status_code i40e_aq_remove_cloud_filters_big_buffer(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count);
+enum i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+ struct i40e_aqc_replace_cloud_filters_cmd *filters,
+ struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf);
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1);
+enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer);
+enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
+ u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1);
+enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer);
+enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
+ u8 bios_mode, bool *reset_needed);
+enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
+ u8 oem_mode);
+
+/* i40e_common */
+enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw);
+enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_hw(struct i40e_hw *hw);
+void i40e_clear_pxe_mode(struct i40e_hw *hw);
+enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw);
+enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
+enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
+/* prototype for functions used for NVM access */
+enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw);
+enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access);
+void i40e_release_nvm(struct i40e_hw *hw);
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
+ u32 offset, u16 words, void *data,
+ bool last_command);
+enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+ void *data);
+enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module,
+ u32 offset, u16 words, void *data);
+enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
+enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw);
+enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum);
+enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc);
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+#endif /* PF_DRIVER */
+
+#if defined(I40E_QV) || defined(VF_DRIVER)
+enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
+
+#endif
+extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+
+STATIC INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return i40e_ptype_lookup[ptype];
+}
+
+#ifdef PF_DRIVER
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+STATIC INLINE enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+ switch (link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ return VIRTCHNL_LINK_SPEED_100MB;
+ case I40E_LINK_SPEED_1GB:
+ return VIRTCHNL_LINK_SPEED_1GB;
+ case I40E_LINK_SPEED_10GB:
+ return VIRTCHNL_LINK_SPEED_10GB;
+ case I40E_LINK_SPEED_40GB:
+ return VIRTCHNL_LINK_SPEED_40GB;
+ case I40E_LINK_SPEED_20GB:
+ return VIRTCHNL_LINK_SPEED_20GB;
+ case I40E_LINK_SPEED_25GB:
+ return VIRTCHNL_LINK_SPEED_25GB;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
+ }
+}
+#endif /* PF_DRIVER */
+/* prototype for functions used for SW spinlocks */
+void i40e_init_spinlock(struct i40e_spinlock *sp);
+void i40e_acquire_spinlock(struct i40e_spinlock *sp);
+void i40e_release_spinlock(struct i40e_spinlock *sp);
+void i40e_destroy_spinlock(struct i40e_spinlock *sp);
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct virtchnl_vf_resource *msg);
+enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum i40e_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 vsi_seid);
+enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
+enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+
+enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+ struct i40e_aqc_arp_proxy_data *proxy_config,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+ struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+ u8 filter_index,
+ struct i40e_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+ u16 *wake_reason,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
+enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
+enum i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+enum i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_header);
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile);
+enum i40e_status_code
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_register.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_register.h
new file mode 100644
index 00000000..df66e76a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_register.h
@@ -0,0 +1,5368 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+
+#ifdef PF_DRIVER
+#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
+#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
+#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
+#define I40E_GL_ARQH_ARQH_SHIFT 0
+#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
+#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
+#define I40E_GL_ARQT_ARQT_SHIFT 0
+#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
+#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
+#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
+#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
+#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
+#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
+#define I40E_GL_ATQH_ATQH_SHIFT 0
+#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
+#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
+#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
+#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
+#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
+#define I40E_GL_ATQT_ATQT_SHIFT 0
+#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
+#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
+#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
+#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
+#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
+#endif /* PF_DRIVER */
+#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#ifdef PF_DRIVER
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
+#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
+#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
+#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
+#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
+#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
+#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
+#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
+#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
+#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
+#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
+#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
+#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
+#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GL_PRS_FVBM_MAX_INDEX 3
+#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0
+#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT)
+#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8
+#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT)
+#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31
+#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_GLQF_HKEY_MAX_INDEX 12
+#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
+#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
+#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
+#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
+#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
+#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
+#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
+#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1_L_MAX_INDEX 143
+#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
+#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR2_L_MAX_INDEX 143
+#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
+#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
+#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
+#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
+#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 25
+#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#endif /* PF_DRIVER */
+#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
+
+#ifdef PF_DRIVER
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_FD_MSK_MAX_INDEX 1
+#define I40E_GLQF_FD_MSK_MASK_SHIFT 0
+#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT)
+#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_INSET_MAX_INDEX 1
+#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0
+#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT)
+#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_MSK_MAX_INDEX 1
+#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0
+#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT)
+#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT)
+#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_ORT_MAX_INDEX 63
+#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0
+#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT)
+#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5
+#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT)
+#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
+#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
+#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */
+#define I40E_GLQF_PIT_MAX_INDEX 23
+#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
+#define I40E_GLQF_PIT_FSIZE_SHIFT 5
+#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT)
+#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10
+#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#endif /* PF_DRIVER */
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+
+#endif /* _I40E_REGISTER_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_status.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_status.h
new file mode 100644
index 00000000..49af2d9f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_status.h
@@ -0,0 +1,108 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+ I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_type.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_type.h
new file mode 100644
index 00000000..006a11a8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_type.h
@@ -0,0 +1,2024 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_devids.h"
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_1PARAMETER(_p) (_p);
+#define UNREFERENCED_2PARAMETER(_p, _q) (_p); (_q);
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) (_p); (_q); (_r);
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) (_p); (_q); (_r); (_s);
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) (_p); (_q); (_r); (_s); (_t);
+
+#ifndef LINUX_MACROS
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+#endif /* LINUX_MACROS */
+
+#ifndef I40E_MASK
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) (mask << shift)
+#endif
+
+#define I40E_MAX_PF 16
+#define I40E_MAX_PF_VSI 64
+#define I40E_MAX_PF_QP 128
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* something less than 1 minute */
+#define I40E_HEARTBEAT_TIMEOUT (HZ * 50)
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Max timeout in ms for the phy to respond */
+#define I40E_MAX_PHY_TIMEOUT 500
+
+/* Check whether address is multicast. */
+#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define I40E_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
+/* Data type manipulation macros. */
+#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+
+#define I40E_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define I40E_LO_WORD(x) ((u16)((x) & 0xFFFF))
+
+#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF))
+
+/* Number of Transmit Descriptors must be a multiple of 8. */
+#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Receive Descriptors must be a multiple of 32 if
+ * the number of descriptors is greater than 32.
+ */
+#define I40E_REQ_RX_DESCRIPTOR_MULTIPLE 32
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
+ I40E_DEBUG_PACKAGE = 0x00002000,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define I40E_PCI_LINK_STATUS 0xB2
+#define I40E_PCI_LINK_WIDTH 0x3F0
+#define I40E_PCI_LINK_WIDTH_1 0x10
+#define I40E_PCI_LINK_WIDTH_2 0x20
+#define I40E_PCI_LINK_WIDTH_4 0x40
+#define I40E_PCI_LINK_WIDTH_8 0x80
+#define I40E_PCI_LINK_SPEED 0xF
+#define I40E_PCI_LINK_SPEED_2500 0x1
+#define I40E_PCI_LINK_SPEED_5000 0x2
+#define I40E_PCI_LINK_SPEED_8000 0x3
+
+#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define I40E_PHY_COM_REG_PAGE 0x1E
+#define I40E_PHY_LED_LINK_MODE_MASK 0xF0
+#define I40E_PHY_LED_MANUAL_ON 0x100
+#define I40E_PHY_LED_PROV_REG_1 0xC430
+#define I40E_PHY_LED_MODE_MASK 0xFFFF
+#define I40E_PHY_LED_MODE_ORIG 0x80000000
+
+/* Memory types */
+enum i40e_memset_type {
+ I40E_NONDMA_MEM = 0,
+ I40E_DMA_MEM
+};
+
+/* Memcpy types */
+enum i40e_memcpy_type {
+ I40E_NONDMA_TO_NONDMA = 0,
+ I40E_NONDMA_TO_DMA,
+ I40E_DMA_TO_DMA,
+ I40E_DMA_TO_NONDMA
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_set_fc_aq_failures {
+ I40E_SET_FC_AQ_FAIL_NONE = 0,
+ I40E_SET_FC_AQ_FAIL_GET = 1,
+ I40E_SET_FC_AQ_FAIL_SET = 2,
+ I40E_SET_FC_AQ_FAIL_UPDATE = 4,
+ I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1 = 1,
+ I40E_VSI_VMDQ2 = 2,
+ I40E_VSI_CTRL = 3,
+ I40E_VSI_FCOE = 4,
+ I40E_VSI_MIRROR = 5,
+ I40E_VSI_SRIOV = 6,
+ I40E_VSI_FDIR = 7,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 req_fec_info;
+ u8 fec_info;
+ u8 ext_info;
+ u8 loopback;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
+ u8 requested_speeds;
+ u8 module_type[3];
+ /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP 0x03
+#define I40E_MODULE_TYPE_QSFP 0x0D
+ /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE 0x01
+#define I40E_MODULE_TYPE_40G_LR4 0x02
+#define I40E_MODULE_TYPE_40G_SR4 0x04
+#define I40E_MODULE_TYPE_40G_CR4 0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR 0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR 0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER 0x80
+ /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX 0x01
+#define I40E_MODULE_TYPE_1000BASE_LX 0x02
+#define I40E_MODULE_TYPE_1000BASE_CX 0x04
+#define I40E_MODULE_TYPE_1000BASE_T 0x08
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+ /* all the phy types the NVM is capable of */
+ u64 phy_types;
+};
+
+#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
+#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
+#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
+#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
+#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
+#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
+#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
+#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
+#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
+#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
+#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
+#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
+#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
+#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
+#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
+/*
+ * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_HW_CAP_MAX_GPIO 30
+#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
+#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
+
+enum i40e_acpi_programming_method {
+ I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
+ I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
+};
+
+#define I40E_WOL_SUPPORT_MASK 0x1
+#define I40E_ACPI_PROGRAMMING_METHOD_MASK 0x2
+#define I40E_PROXY_SUPPORT_MASK 0x4
+
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 mng_protocols_over_mctp;
+#define I40E_MNG_PROTOCOL_PLDM 0x2
+#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
+#define I40E_MNG_PROTOCOL_NCSI 0x8
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool iscsi; /* Indicates iSCSI enabled */
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN 0x0
+#define I40E_FLEX10_MODE_DCC 0x1
+#define I40E_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
+#define I40E_FLEX10_STATUS_VC_MODE 0x2
+
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+ u64 wr_csr_prot;
+ bool apm_wol_support;
+ enum i40e_acpi_programming_method acpi_prog_method;
+ bool proxy_support;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
+};
+
+/* definitions used in NVM update support */
+
+enum i40e_nvmupd_cmd {
+ I40E_NVMUPD_INVALID,
+ I40E_NVMUPD_READ_CON,
+ I40E_NVMUPD_READ_SNT,
+ I40E_NVMUPD_READ_LCB,
+ I40E_NVMUPD_READ_SA,
+ I40E_NVMUPD_WRITE_ERA,
+ I40E_NVMUPD_WRITE_CON,
+ I40E_NVMUPD_WRITE_SNT,
+ I40E_NVMUPD_WRITE_LCB,
+ I40E_NVMUPD_WRITE_SA,
+ I40E_NVMUPD_CSUM_CON,
+ I40E_NVMUPD_CSUM_SA,
+ I40E_NVMUPD_CSUM_LCB,
+ I40E_NVMUPD_STATUS,
+ I40E_NVMUPD_EXEC_AQ,
+ I40E_NVMUPD_GET_AQ_RESULT,
+ I40E_NVMUPD_GET_AQ_EVENT,
+};
+
+enum i40e_nvmupd_state {
+ I40E_NVMUPD_STATE_INIT,
+ I40E_NVMUPD_STATE_READING,
+ I40E_NVMUPD_STATE_WRITING,
+ I40E_NVMUPD_STATE_INIT_WAIT,
+ I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code. Where is the right file?
+ */
+#define I40E_NVM_READ 0xB
+#define I40E_NVM_WRITE 0xC
+
+#define I40E_NVM_MOD_PNT_MASK 0xFF
+
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_AQE 0xe
+#define I40E_NVM_EXEC 0xf
+
+#define I40E_NVM_ADAPT_SHIFT 16
+#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
+
+#define I40E_NVMUPD_MAX_DATA 4096
+#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct i40e_nvm_access {
+ u32 command;
+ u32 config;
+ u32 offset; /* in bytes */
+ u32 data_size; /* in bytes */
+ u8 data[1];
+};
+
+/* (Q)SFP module access definitions */
+#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
+#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
+#define I40E_MODULE_TYPE_ADDR 0x00
+#define I40E_MODULE_REVISION_ADDR 0x01
+#define I40E_MODULE_SFF_8472_COMP 0x5E
+#define I40E_MODULE_SFF_8472_SWAP 0x5C
+#define I40E_MODULE_SFF_ADDR_MODE 0x04
+#define I40E_MODULE_SFF_DIAG_CAPAB 0x40
+#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
+#define I40E_MODULE_TYPE_QSFP28 0x11
+#define I40E_MODULE_QSFP_MAX_LEN 640
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+ u16 bus_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+#define I40E_TLV_STATUS_OPER 0x1
+#define I40E_TLV_STATUS_SYNC 0x2
+#define I40E_TLV_STATUS_ERR 0x4
+#define I40E_CEE_OPER_MAX_APPS 3
+#define I40E_APP_PROTOID_FCOE 0x8906
+#define I40E_APP_PROTOID_ISCSI 0x0cbc
+#define I40E_APP_PROTOID_FIP 0x8914
+#define I40E_APP_SEL_ETHTYPE 0x1
+#define I40E_APP_SEL_TCPIP 0x2
+#define I40E_CEE_APP_SEL_ETHTYPE 0x0
+#define I40E_CEE_APP_SEL_TCPIP 0x1
+
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct i40e_dcb_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct i40e_dcb_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct i40e_dcb_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u8 dcbx_mode;
+#define I40E_DCBX_MODE_CEE 0x1
+#define I40E_DCBX_MODE_IEEE 0x2
+ u8 app_mode;
+#define I40E_DCBX_APPS_NON_WILLING 0x1
+ u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
+ struct i40e_dcb_ets_config etscfg;
+ struct i40e_dcb_ets_config etsrec;
+ struct i40e_dcb_pfc_config pfc;
+ struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 *hw_addr;
+ void *back;
+
+ /* subsystem structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* for multi-function MACs */
+ u16 partition_id;
+ u16 num_partitions;
+ u16 num_ports;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* state of nvm update process */
+ enum i40e_nvmupd_state nvmupd_state;
+ struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_aq_desc nvm_aq_event_desc;
+ struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+
+ /* WoL and proxy support */
+ u16 num_wol_proxy_filters;
+ u16 wol_proxy_vsi_seid;
+
+#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+ u64 flags;
+
+ /* Used in set switch config AQ command */
+ u16 switch_tag;
+ u16 first_tag;
+ u16 second_tag;
+
+ /* debug mask */
+ u32 debug_mask;
+ char err_str[16];
+};
+
+STATIC INLINE bool i40e_is_vf(struct i40e_hw *hw)
+{
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
+}
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define I40E_RXD_QW0_MIRROR_STATUS_SHIFT 8
+#define I40E_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \
+ I40E_RXD_QW0_MIRROR_STATUS_SHIFT)
+#define I40E_RXD_QW0_FCOEINDX_SHIFT 0
+#define I40E_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \
+ I40E_RXD_QW0_FCOEINDX_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
+
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \
+ I40E_RXD_QW1_STATUS_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST
+#define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_UMBCAST_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_PACKET_TYPE_UNICAST 0
+#define I40E_RXD_PACKET_TYPE_MULTICAST 1
+#define I40E_RXD_PACKET_TYPE_BROADCAST 2
+#define I40E_RXD_PACKET_TYPE_MIRRORED 3
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RX_PTYPE_BIT_MASK 0x0FFFFFFF
+#define I40E_RX_PTYPE_SHIFT 56
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+#define I40E_RXD_QW1_NEXTP_SHIFT 38
+#define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT)
+
+#define I40E_RXD_QW2_EXT_STATUS_SHIFT 0
+#define I40E_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \
+ I40E_RXD_QW2_EXT_STATUS_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+#define I40E_RXD_QW2_L2TAG2_SHIFT 0
+#define I40E_RXD_QW2_L2TAG2_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT)
+
+#define I40E_RXD_QW2_L2TAG3_SHIFT 16
+#define I40E_RXD_QW2_L2TAG3_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT)
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+#define I40E_TWO_BIT_MASK 0x3
+#define I40E_THREE_BIT_MASK 0x7
+#define I40E_FOUR_BIT_MASK 0xF
+#define I40E_EIGHTEEN_BIT_MASK 0x3FFFF
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define I40E_TXD_QW1_IPLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define I40E_TXD_QW1_L4LEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define I40E_TXD_QW1_FCLEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
+struct i40e_nop_desc {
+ __le64 rsvd;
+ __le64 dtype_cmd;
+};
+
+#define I40E_TXD_NOP_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_NOP_QW1_DTYPE_MASK (0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_NOP_QW1_CMD_SHIFT 4
+#define I40E_TXD_NOP_QW1_CMD_MASK (0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT)
+
+enum i40e_tx_nop_desc_cmd_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_NOP_DESC_EOP_SHIFT = 0,
+ I40E_TX_NOP_DESC_RS_SHIFT = 1,
+ I40E_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */
+};
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+ u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* Statistics collected per function for FCoE */
+struct i40e_fcoe_stats {
+ u64 rx_fcoe_packets; /* fcoeprc */
+ u64 rx_fcoe_dwords; /* focedwrc */
+ u64 rx_fcoe_dropped; /* fcoerpdc */
+ u64 tx_fcoe_packets; /* fcoeptc */
+ u64 tx_fcoe_dwords; /* focedwtc */
+ u64 fcoe_bad_fccrc; /* fcoecrc */
+ u64 fcoe_last_error; /* fcoelast */
+ u64 fcoe_ddp_count; /* fcoeddpc */
+};
+
+/* offset to per function FCoE statistics block */
+#define I40E_FCOE_VF_STAT_OFFSET 0
+#define I40E_FCOE_PF_STAT_OFFSET 128
+#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF)
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
+ /* EEE LPI */
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03
+#define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04
+#define I40E_SR_OPTION_ROM_PTR 0x05
+#define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
+#define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07
+#define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
+#define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09
+#define I40E_SR_RO_PCIE_LCB_PTR 0x0A
+#define I40E_SR_EMP_IMAGE_PTR 0x0B
+#define I40E_SR_PE_IMAGE_PTR 0x0C
+#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
+#define I40E_SR_MNG_CONFIG_PTR 0x0E
+#define I40E_EMP_MODULE_PTR 0x0F
+#define I40E_SR_EMP_MODULE_PTR 0x48
+#define I40E_SR_PBA_FLAGS 0x15
+#define I40E_SR_PBA_BLOCK_PTR 0x16
+#define I40E_SR_BOOT_CONFIG_PTR 0x17
+#define I40E_NVM_OEM_VER_OFF 0x83
+#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
+#define I40E_SR_NVM_MAP_VERSION 0x29
+#define I40E_SR_NVM_IMAGE_VERSION 0x2A
+#define I40E_SR_NVM_STRUCTURE_VERSION 0x2B
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PXE_SETUP_PTR 0x30
+#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
+#define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34
+#define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35
+#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
+#define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38
+#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
+#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
+#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define I40E_SR_PHY_ACTIVITY_LIST_PTR 0x3D
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
+#define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
+#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
+#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
+#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
+#define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49
+#define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D
+#define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
+#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
+#define I40E_PTR_TYPE BIT(15)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_BUF_ALIGNMENT 4096
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */
+
+enum i40E_fcoe_tx_ctx_desc_cmd_bits {
+ I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10,
+ I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20,
+ I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40,
+ I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80
+};
+
+/* FCoE DIF/DIX Context descriptor */
+struct i40e_fcoe_difdix_context_desc {
+ __le64 flags_buff0_buff1_ref;
+ __le64 difapp_msk_bias;
+};
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT 0
+#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_MASK (0xFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT)
+
+enum i40e_fcoe_difdix_ctx_desc_flags_bits {
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_RSVD = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGCHK = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGNOTCHK = 0x0004,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_OPAQUE = 0x0000,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY = 0x0008,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPTAG = 0x0010,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPREFTAG = 0x0018,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_CNST = 0x0000,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_INC1BLK = 0x0020,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_APPTAG = 0x0040,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_RSVD = 0x0060,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIXMODE_XSUM = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIXMODE_CRC = 0x0080,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_UNTAG = 0x0000,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_BUF = 0x0100,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_RSVD = 0x0200,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_EMBDTAGS = 0x0300,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFLAN_UNTAG = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFLAN_TAG = 0x0400,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_512B = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_4K = 0x0800
+};
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT 12
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_MASK (0x3FFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT 22
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_MASK (0x3FFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT 32
+#define I40E_FCOE_DIFDIX_CTX_QW0_REF_MASK (0xFFFFFFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT 0
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MASK (0xFFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT 16
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_MASK (0xFFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT 32
+#define I40E_FCOE_DIFDIX_CTX_QW0_REF_BIAS_MASK (0xFFFFFFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT)
+
+/* FCoE DIF/DIX Buffers descriptor */
+struct i40e_fcoe_difdix_buffers_desc {
+ __le64 buff_addr0;
+ __le64 buff_addr1;
+};
+
+/* FCoE DDP Context descriptor */
+struct i40e_fcoe_ddp_context_desc {
+ __le64 rsvd;
+ __le64 type_cmd_foff_lsize;
+};
+
+#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \
+ I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4
+#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
+ I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)
+
+enum i40e_fcoe_ddp_ctx_desc_cmd_bits {
+ I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */
+ I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */
+ I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */
+ I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */
+ I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */
+ I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */
+};
+
+#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16
+#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \
+ I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
+
+#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32
+#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \
+ I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
+
+/* FCoE DDP/DWO Queue Context descriptor */
+struct i40e_fcoe_queue_context_desc {
+ __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */
+ __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */
+};
+
+#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0
+#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \
+ I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12
+#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \
+ I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0
+#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \
+ I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13
+#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \
+ I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+enum i40e_fcoe_queue_ctx_desc_tph_bits {
+ I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1,
+ I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2
+};
+
+#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30
+#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \
+ I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
+
+/* FCoE DDP/DWO Filter Context descriptor */
+struct i40e_fcoe_filter_context_desc {
+ __le32 param;
+ __le16 seqn;
+
+ /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
+ __le16 rsvd_dmaindx;
+
+ /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
+ __le64 flags_rsvd_lanq;
+};
+
+#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
+#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \
+ I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
+
+enum i40e_fcoe_filter_ctx_desc_flags_bits {
+ I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00,
+ I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01,
+ I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00,
+ I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02,
+ I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00,
+ I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04
+};
+
+#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0
+#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \
+ I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
+
+#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
+#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \
+ I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
+
+#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53
+#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \
+ I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR 0x06
+#define I40E_SR_LLDP_CFG_PTR 0x31
+struct i40e_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
+/* Offsets into Alternate Ram */
+#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
+#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
+#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define I40E_ALT_BW_VALUE_MASK 0xFF
+#define I40E_ALT_BW_RELATIVE_MASK 0x40000000
+#define I40E_ALT_BW_VALID_MASK 0x80000000
+
+/* RSS Hash Table Size */
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_L3_SRC_SHIFT 47
+#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
+#define I40E_L3_V6_SRC_SHIFT 43
+#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT)
+#define I40E_L3_DST_SHIFT 35
+#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT)
+#define I40E_L3_V6_DST_SHIFT 35
+#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT)
+#define I40E_L4_SRC_SHIFT 34
+#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT)
+#define I40E_L4_DST_SHIFT 33
+#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
+#define I40E_VERIFY_TAG_SHIFT 31
+#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+
+#define I40E_FLEX_50_SHIFT 13
+#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
+#define I40E_FLEX_51_SHIFT 12
+#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT)
+#define I40E_FLEX_52_SHIFT 11
+#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT)
+#define I40E_FLEX_53_SHIFT 10
+#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT)
+#define I40E_FLEX_54_SHIFT 9
+#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT)
+#define I40E_FLEX_55_SHIFT 8
+#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT)
+#define I40E_FLEX_56_SHIFT 7
+#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
+#define I40E_FLEX_57_SHIFT 6
+#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
+
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define I40E_DDP_NAME_SIZE 32
+
+/* Package header */
+struct i40e_package_header {
+ struct i40e_ddp_version version;
+ u32 segment_count;
+ u32 segment_offset[1];
+};
+
+/* Generic segment header */
+struct i40e_generic_seg_header {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_NOTES 0x00000002
+#define SEGMENT_TYPE_I40E 0x00000011
+#define SEGMENT_TYPE_X722 0x00000012
+ u32 type;
+ struct i40e_ddp_version version;
+ u32 size;
+ char name[I40E_DDP_NAME_SIZE];
+};
+
+struct i40e_metadata_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ddp_version version;
+#define I40E_DDP_TRACKID_RDONLY 0
+#define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF
+ u32 track_id;
+ char name[I40E_DDP_NAME_SIZE];
+};
+
+struct i40e_device_id_entry {
+ u32 vendor_dev_id;
+ u32 sub_vendor_dev_id;
+};
+
+struct i40e_profile_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
+ u32 device_table_count;
+ struct i40e_device_id_entry device_table[1];
+};
+
+struct i40e_section_table {
+ u32 section_count;
+ u32 section_offset[1];
+};
+
+struct i40e_profile_section_header {
+ u16 tbl_size;
+ u16 data_end;
+ struct {
+#define SECTION_TYPE_INFO 0x00000010
+#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_RB_MMIO 0x00001800
+#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_RB_AQ 0x00001801
+#define SECTION_TYPE_NOTE 0x80000000
+#define SECTION_TYPE_NAME 0x80000001
+#define SECTION_TYPE_PROTO 0x80000002
+#define SECTION_TYPE_PCTYPE 0x80000003
+#define SECTION_TYPE_PTYPE 0x80000004
+ u32 type;
+ u32 offset;
+ u32 size;
+ } section;
+};
+
+struct i40e_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct i40e_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
+struct i40e_profile_info {
+ u32 track_id;
+ struct i40e_ddp_version version;
+ u8 op;
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
+ u8 reserved[7];
+ u8 name[I40E_DDP_NAME_SIZE];
+};
+#endif /* _I40E_TYPE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/meson.build b/src/spdk/dpdk/drivers/net/i40e/base/meson.build
new file mode 100644
index 00000000..401a1477
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/meson.build
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = [
+ 'i40e_adminq.c',
+ 'i40e_common.c',
+ 'i40e_dcb.c',
+ 'i40e_diag.c',
+ 'i40e_hmc.c',
+ 'i40e_lan_hmc.c',
+ 'i40e_nvm.c'
+]
+
+error_cflags = ['-Wno-sign-compare', '-Wno-unused-value',
+ '-Wno-format', '-Wno-unused-but-set-variable',
+ '-Wno-strict-aliasing'
+]
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('i40e_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/src/spdk/dpdk/drivers/net/i40e/base/virtchnl.h b/src/spdk/dpdk/drivers/net/i40e/base/virtchnl.h
new file mode 100644
index 00000000..b2d5fe73
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/base/virtchnl.h
@@ -0,0 +1,772 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+};
+
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+#ifdef VIRTCHNL_SOL_VF_SUPPORT
+ VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
+#endif
+#ifdef VIRTCHNL_IWARP
+ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+#endif
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ {virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)}
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures.*/
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF capability flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_cap_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u32 pad1;
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support. If the request is successful, PF will
+ * then reset the VF to institute required changes.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+#ifdef VIRTCHNL_SOL_VF_SUPPORT
+/* VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG
+ * VF sends this message to get the default MTU and list of additional ethernet
+ * addresses it is allowed to use.
+ * PF responds with an indirect message containing
+ * virtchnl_addnl_solaris_config with zero or more
+ * virtchnl_ether_addr structures.
+ *
+ * It is expected that this operation will only ever be needed for Solaris VFs
+ * running under a Solaris PF.
+ */
+struct virtchnl_addnl_solaris_config {
+ u16 default_mtu;
+ struct virtchnl_ether_addr_list al;
+};
+
+#endif
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct eth_stats in an external buffer.
+ */
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_ATTENTION 1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ struct {
+ enum virtchnl_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+#ifdef VIRTCHNL_IWARP
+
+/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define QUEUE_TYPE_PE_AEQ 0x80
+#define QUEUE_INVALID_IDX 0xFFFF
+
+struct virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+
+struct virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct virtchnl_iwarp_qv_info qv_info[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+
+#endif
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ int valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+#ifdef VIRTCHNL_IWARP
+ case VIRTCHNL_OP_IWARP:
+ /* These messages are opaque to us and will be validated in
+ * the RDMA client code. We just need to check for nonzero
+ * length. The firmware will enforce max length restrictions.
+ */
+ if (msglen)
+ valid_len = msglen;
+ else
+ err_msg_format = true;
+ break;
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ break;
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_iwarp_qvlist_info *qv =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
+ if (qv->num_vectors == 0) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += ((qv->num_vectors - 1) *
+ sizeof(struct virtchnl_iwarp_qv_info));
+ }
+ break;
+#endif
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_ERR_PARAM;
+ }
+ /* few more checks */
+ if (err_msg_format || valid_len != msglen)
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.c b/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.c
new file mode 100644
index 00000000..85a6a867
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.c
@@ -0,0 +1,12530 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_string_fns.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_alarm.h>
+#include <rte_dev.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_hash_crc.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_prototype.h"
+#include "base/i40e_adminq_cmd.h"
+#include "base/i40e_type.h"
+#include "base/i40e_register.h"
+#include "base/i40e_dcb.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_pf.h"
+#include "i40e_regs.h"
+#include "rte_pmd_i40e.h"
+
+#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
+#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
+#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
+#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
+
+#define I40E_CLEAR_PXE_WAIT_MS 200
+
+/* Maximun number of capability elements */
+#define I40E_MAX_CAP_ELE_NUM 128
+
+/* Wait count and interval */
+#define I40E_CHK_Q_ENA_COUNT 1000
+#define I40E_CHK_Q_ENA_INTERVAL_US 1000
+
+/* Maximun number of VSI */
+#define I40E_MAX_NUM_VSIS (384UL)
+
+#define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
+
+/* Flow control default timer */
+#define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
+
+/* Flow control enable fwd bit */
+#define I40E_PRTMAC_FWD_CTRL 0x00000001
+
+/* Receive Packet Buffer size */
+#define I40E_RXPBSIZE (968 * 1024)
+
+/* Kilobytes shift */
+#define I40E_KILOSHIFT 10
+
+/* Flow control default high water */
+#define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
+
+/* Flow control default low water */
+#define I40E_DEFAULT_LOW_WATER (0xF2000 >> I40E_KILOSHIFT)
+
+/* Receive Average Packet Size in Byte*/
+#define I40E_PACKET_AVERAGE_SIZE 128
+
+/* Mask of PF interrupt causes */
+#define I40E_PFINT_ICR0_ENA_MASK ( \
+ I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
+ I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
+ I40E_PFINT_ICR0_ENA_GRST_MASK | \
+ I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
+ I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
+ I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
+ I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
+ I40E_PFINT_ICR0_ENA_VFLR_MASK | \
+ I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
+
+#define I40E_FLOW_TYPES ( \
+ (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+ (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
+ (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
+
+/* Additional timesync values. */
+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
+#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
+#define I40E_PRTTSYN_TSYNENA 0x80000000
+#define I40E_PRTTSYN_TSYNTYPE 0x0e000000
+#define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+
+/**
+ * Below are values for writing un-exposed registers suggested
+ * by silicon experts
+ */
+/* Destination MAC address */
+#define I40E_REG_INSET_L2_DMAC 0xE000000000000000ULL
+/* Source MAC address */
+#define I40E_REG_INSET_L2_SMAC 0x1C00000000000000ULL
+/* Outer (S-Tag) VLAN tag in the outer L2 header */
+#define I40E_REG_INSET_L2_OUTER_VLAN 0x0000000004000000ULL
+/* Inner (C-Tag) or single VLAN tag in the outer L2 header */
+#define I40E_REG_INSET_L2_INNER_VLAN 0x0080000000000000ULL
+/* Single VLAN tag in the inner L2 header */
+#define I40E_REG_INSET_TUNNEL_VLAN 0x0100000000000000ULL
+/* Source IPv4 address */
+#define I40E_REG_INSET_L3_SRC_IP4 0x0001800000000000ULL
+/* Destination IPv4 address */
+#define I40E_REG_INSET_L3_DST_IP4 0x0000001800000000ULL
+/* Source IPv4 address for X722 */
+#define I40E_X722_REG_INSET_L3_SRC_IP4 0x0006000000000000ULL
+/* Destination IPv4 address for X722 */
+#define I40E_X722_REG_INSET_L3_DST_IP4 0x0000060000000000ULL
+/* IPv4 Protocol for X722 */
+#define I40E_X722_REG_INSET_L3_IP4_PROTO 0x0010000000000000ULL
+/* IPv4 Time to Live for X722 */
+#define I40E_X722_REG_INSET_L3_IP4_TTL 0x0010000000000000ULL
+/* IPv4 Type of Service (TOS) */
+#define I40E_REG_INSET_L3_IP4_TOS 0x0040000000000000ULL
+/* IPv4 Protocol */
+#define I40E_REG_INSET_L3_IP4_PROTO 0x0004000000000000ULL
+/* IPv4 Time to Live */
+#define I40E_REG_INSET_L3_IP4_TTL 0x0004000000000000ULL
+/* Source IPv6 address */
+#define I40E_REG_INSET_L3_SRC_IP6 0x0007F80000000000ULL
+/* Destination IPv6 address */
+#define I40E_REG_INSET_L3_DST_IP6 0x000007F800000000ULL
+/* IPv6 Traffic Class (TC) */
+#define I40E_REG_INSET_L3_IP6_TC 0x0040000000000000ULL
+/* IPv6 Next Header */
+#define I40E_REG_INSET_L3_IP6_NEXT_HDR 0x0008000000000000ULL
+/* IPv6 Hop Limit */
+#define I40E_REG_INSET_L3_IP6_HOP_LIMIT 0x0008000000000000ULL
+/* Source L4 port */
+#define I40E_REG_INSET_L4_SRC_PORT 0x0000000400000000ULL
+/* Destination L4 port */
+#define I40E_REG_INSET_L4_DST_PORT 0x0000000200000000ULL
+/* SCTP verification tag */
+#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG 0x0000000180000000ULL
+/* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
+#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC 0x0000000001C00000ULL
+/* Source port of tunneling UDP */
+#define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT 0x0000000000200000ULL
+/* Destination port of tunneling UDP */
+#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT 0x0000000000100000ULL
+/* UDP Tunneling ID, NVGRE/GRE key */
+#define I40E_REG_INSET_TUNNEL_ID 0x00000000000C0000ULL
+/* Last ether type */
+#define I40E_REG_INSET_LAST_ETHER_TYPE 0x0000000000004000ULL
+/* Tunneling outer destination IPv4 address */
+#define I40E_REG_INSET_TUNNEL_L3_DST_IP4 0x00000000000000C0ULL
+/* Tunneling outer destination IPv6 address */
+#define I40E_REG_INSET_TUNNEL_L3_DST_IP6 0x0000000000003FC0ULL
+/* 1st word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1 0x0000000000002000ULL
+/* 2nd word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2 0x0000000000001000ULL
+/* 3rd word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3 0x0000000000000800ULL
+/* 4th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4 0x0000000000000400ULL
+/* 5th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5 0x0000000000000200ULL
+/* 6th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6 0x0000000000000100ULL
+/* 7th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7 0x0000000000000080ULL
+/* 8th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8 0x0000000000000040ULL
+/* all 8 words flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS 0x0000000000003FC0ULL
+#define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL
+
+#define I40E_TRANSLATE_INSET 0
+#define I40E_TRANSLATE_REG 1
+
+#define I40E_INSET_IPV4_TOS_MASK 0x0009FF00UL
+#define I40E_INSET_IPv4_TTL_MASK 0x000D00FFUL
+#define I40E_INSET_IPV4_PROTO_MASK 0x000DFF00UL
+#define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL
+#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL
+#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
+
+/* PCI offset for querying capability */
+#define PCI_DEV_CAP_REG 0xA4
+/* PCI offset for enabling/disabling Extended Tag */
+#define PCI_DEV_CTRL_REG 0xA8
+/* Bit mask of Extended Tag capability */
+#define PCI_DEV_CAP_EXT_TAG_MASK 0x20
+/* Bit shift of Extended Tag enable/disable */
+#define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
+/* Bit mask of Extended Tag enable/disable */
+#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
+
+static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
+static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
+static int i40e_dev_configure(struct rte_eth_dev *dev);
+static int i40e_dev_start(struct rte_eth_dev *dev);
+static void i40e_dev_stop(struct rte_eth_dev *dev);
+static void i40e_dev_close(struct rte_eth_dev *dev);
+static int i40e_dev_reset(struct rte_eth_dev *dev);
+static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
+static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
+static int i40e_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned n);
+static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit);
+static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
+static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx);
+static int i40e_fw_version_get(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size);
+static void i40e_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id,
+ int on);
+static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid);
+static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
+ uint16_t queue,
+ int on);
+static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
+static int i40e_dev_led_on(struct rte_eth_dev *dev);
+static int i40e_dev_led_off(struct rte_eth_dev *dev);
+static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_conf *pfc_conf);
+static int i40e_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool);
+static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
+static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
+static int i40e_get_cap(struct i40e_hw *hw);
+static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
+static int i40e_pf_setup(struct i40e_pf *pf);
+static int i40e_dev_rxtx_init(struct i40e_pf *pf);
+static int i40e_vmdq_setup(struct rte_eth_dev *dev);
+static int i40e_dcb_setup(struct rte_eth_dev *dev);
+static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
+ bool offset_loaded, uint64_t *offset, uint64_t *stat);
+static void i40e_stat_update_48(struct i40e_hw *hw,
+ uint32_t hireg,
+ uint32_t loreg,
+ bool offset_loaded,
+ uint64_t *offset,
+ uint64_t *stat);
+static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
+static void i40e_dev_interrupt_handler(void *param);
+static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
+ uint32_t base, uint32_t num);
+static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
+static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
+ uint32_t base);
+static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
+ uint16_t num);
+static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
+static int i40e_veb_release(struct i40e_veb *veb);
+static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
+ struct i40e_vsi *vsi);
+static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
+static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
+static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num,
+ uint16_t vlan);
+static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
+static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
+static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
+static void i40e_filter_input_set_init(struct i40e_pf *pf);
+static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
+static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
+static void i40e_configure_registers(struct i40e_hw *hw);
+static void i40e_hw_init(struct rte_eth_dev *dev);
+static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
+static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
+ uint16_t seid,
+ uint16_t rule_type,
+ uint16_t *entries,
+ uint16_t count,
+ uint16_t rule_id);
+static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t sw_id, uint8_t on);
+static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
+
+static int i40e_timesync_enable(struct rte_eth_dev *dev);
+static int i40e_timesync_disable(struct rte_eth_dev *dev);
+static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
+
+static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+
+static int i40e_timesync_read_time(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int i40e_timesync_write_time(struct rte_eth_dev *dev,
+ const struct timespec *timestamp);
+
+static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+
+static int i40e_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+
+static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
+
+static int i40e_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+
+static int i40e_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+
+static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+
+static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
+static int i40e_ethertype_filter_convert(
+ const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter);
+static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+
+static int i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
+
+static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
+static void i40e_filter_restore(struct i40e_pf *pf);
+static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
+
+int i40e_logtype_init;
+int i40e_logtype_driver;
+
+static const char *const valid_keys[] = {
+ ETH_I40E_FLOATING_VEB_ARG,
+ ETH_I40E_FLOATING_VEB_LIST_ARG,
+ ETH_I40E_SUPPORT_MULTI_DRIVER,
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG,
+ NULL};
+
+static const struct rte_pci_id pci_id_i40e_map[] = {
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct eth_dev_ops i40e_eth_dev_ops = {
+ .dev_configure = i40e_dev_configure,
+ .dev_start = i40e_dev_start,
+ .dev_stop = i40e_dev_stop,
+ .dev_close = i40e_dev_close,
+ .dev_reset = i40e_dev_reset,
+ .promiscuous_enable = i40e_dev_promiscuous_enable,
+ .promiscuous_disable = i40e_dev_promiscuous_disable,
+ .allmulticast_enable = i40e_dev_allmulticast_enable,
+ .allmulticast_disable = i40e_dev_allmulticast_disable,
+ .dev_set_link_up = i40e_dev_set_link_up,
+ .dev_set_link_down = i40e_dev_set_link_down,
+ .link_update = i40e_dev_link_update,
+ .stats_get = i40e_dev_stats_get,
+ .xstats_get = i40e_dev_xstats_get,
+ .xstats_get_names = i40e_dev_xstats_get_names,
+ .stats_reset = i40e_dev_stats_reset,
+ .xstats_reset = i40e_dev_stats_reset,
+ .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
+ .fw_version_get = i40e_fw_version_get,
+ .dev_infos_get = i40e_dev_info_get,
+ .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
+ .vlan_filter_set = i40e_vlan_filter_set,
+ .vlan_tpid_set = i40e_vlan_tpid_set,
+ .vlan_offload_set = i40e_vlan_offload_set,
+ .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
+ .vlan_pvid_set = i40e_vlan_pvid_set,
+ .rx_queue_start = i40e_dev_rx_queue_start,
+ .rx_queue_stop = i40e_dev_rx_queue_stop,
+ .tx_queue_start = i40e_dev_tx_queue_start,
+ .tx_queue_stop = i40e_dev_tx_queue_stop,
+ .rx_queue_setup = i40e_dev_rx_queue_setup,
+ .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable,
+ .rx_queue_release = i40e_dev_rx_queue_release,
+ .rx_queue_count = i40e_dev_rx_queue_count,
+ .rx_descriptor_done = i40e_dev_rx_descriptor_done,
+ .rx_descriptor_status = i40e_dev_rx_descriptor_status,
+ .tx_descriptor_status = i40e_dev_tx_descriptor_status,
+ .tx_queue_setup = i40e_dev_tx_queue_setup,
+ .tx_queue_release = i40e_dev_tx_queue_release,
+ .dev_led_on = i40e_dev_led_on,
+ .dev_led_off = i40e_dev_led_off,
+ .flow_ctrl_get = i40e_flow_ctrl_get,
+ .flow_ctrl_set = i40e_flow_ctrl_set,
+ .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
+ .mac_addr_add = i40e_macaddr_add,
+ .mac_addr_remove = i40e_macaddr_remove,
+ .reta_update = i40e_dev_rss_reta_update,
+ .reta_query = i40e_dev_rss_reta_query,
+ .rss_hash_update = i40e_dev_rss_hash_update,
+ .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
+ .udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del,
+ .filter_ctrl = i40e_dev_filter_ctrl,
+ .rxq_info_get = i40e_rxq_info_get,
+ .txq_info_get = i40e_txq_info_get,
+ .mirror_rule_set = i40e_mirror_rule_set,
+ .mirror_rule_reset = i40e_mirror_rule_reset,
+ .timesync_enable = i40e_timesync_enable,
+ .timesync_disable = i40e_timesync_disable,
+ .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp,
+ .get_dcb_info = i40e_dev_get_dcb_info,
+ .timesync_adjust_time = i40e_timesync_adjust_time,
+ .timesync_read_time = i40e_timesync_read_time,
+ .timesync_write_time = i40e_timesync_write_time,
+ .get_reg = i40e_get_regs,
+ .get_eeprom_length = i40e_get_eeprom_length,
+ .get_eeprom = i40e_get_eeprom,
+ .get_module_info = i40e_get_module_info,
+ .get_module_eeprom = i40e_get_module_eeprom,
+ .mac_addr_set = i40e_set_default_mac_addr,
+ .mtu_set = i40e_dev_mtu_set,
+ .tm_ops_get = i40e_tm_ops_get,
+};
+
+/* store statistics names and its offset in stats structure */
+struct rte_i40e_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
+ {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
+ {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
+ {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
+ {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
+ {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
+ rx_unknown_protocol)},
+ {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
+ {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
+ {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
+ {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
+};
+
+#define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
+ sizeof(rte_i40e_stats_strings[0]))
+
+static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
+ {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
+ tx_dropped_link_down)},
+ {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
+ {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
+ illegal_bytes)},
+ {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
+ {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
+ mac_local_faults)},
+ {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
+ mac_remote_faults)},
+ {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
+ rx_length_errors)},
+ {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
+ {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
+ {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
+ {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
+ {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
+ {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_127)},
+ {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_255)},
+ {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_511)},
+ {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_1023)},
+ {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_1522)},
+ {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_big)},
+ {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
+ rx_undersize)},
+ {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
+ rx_oversize)},
+ {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
+ mac_short_packet_dropped)},
+ {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
+ rx_fragments)},
+ {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
+ {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
+ {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_127)},
+ {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_255)},
+ {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_511)},
+ {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_1023)},
+ {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_1522)},
+ {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_big)},
+ {"rx_flow_director_atr_match_packets",
+ offsetof(struct i40e_hw_port_stats, fd_atr_match)},
+ {"rx_flow_director_sb_match_packets",
+ offsetof(struct i40e_hw_port_stats, fd_sb_match)},
+ {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
+ tx_lpi_status)},
+ {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
+ rx_lpi_status)},
+ {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
+ tx_lpi_count)},
+ {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
+ rx_lpi_count)},
+};
+
+#define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
+ sizeof(rte_i40e_hw_port_strings[0]))
+
+static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
+ {"xon_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xon_rx)},
+ {"xoff_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xoff_rx)},
+};
+
+#define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
+ sizeof(rte_i40e_rxq_prio_strings[0]))
+
+static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
+ {"xon_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xon_tx)},
+ {"xoff_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xoff_tx)},
+ {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xon_2_xoff)},
+};
+
+#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
+ sizeof(rte_i40e_txq_prio_strings[0]))
+
+static int
+eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
+ int i, retval;
+
+ if (pci_dev->device.devargs) {
+ retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ &eth_da);
+ if (retval)
+ return retval;
+ }
+
+ retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+ sizeof(struct i40e_adapter),
+ eth_dev_pci_specific_init, pci_dev,
+ eth_i40e_dev_init, NULL);
+
+ if (retval || eth_da.nb_representor_ports < 1)
+ return retval;
+
+ /* probe VF representor ports */
+ struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
+ pci_dev->device.name);
+
+ if (pf_ethdev == NULL)
+ return -ENODEV;
+
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ struct i40e_vf_representor representor = {
+ .vf_id = eth_da.representor_ports[i],
+ .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
+ pf_ethdev->data->dev_private)->switch_domain_id,
+ .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
+ pf_ethdev->data->dev_private)
+ };
+
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ pci_dev->device.name, eth_da.representor_ports[i]);
+
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct i40e_vf_representor), NULL, NULL,
+ i40e_vf_representor_init, &representor);
+
+ if (retval)
+ PMD_DRV_LOG(ERR, "failed to create i40e vf "
+ "representor %s.", name);
+ }
+
+ return 0;
+}
+
+static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *ethdev;
+
+ ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!ethdev)
+ return -ENODEV;
+
+
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_destroy(ethdev, i40e_vf_representor_uninit);
+ else
+ return rte_eth_dev_destroy(ethdev, eth_i40e_dev_uninit);
+}
+
+static struct rte_pci_driver rte_i40e_pmd = {
+ .id_table = pci_id_i40e_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = eth_i40e_pci_probe,
+ .remove = eth_i40e_pci_remove,
+};
+
+static inline void
+i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
+ uint32_t reg_val)
+{
+ uint32_t ori_reg_val;
+ struct rte_eth_dev *dev;
+
+ ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ i40e_write_rx_ctl(hw, reg_addr, reg_val);
+ if (ori_reg_val != reg_val)
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%08x, new: 0x%08x",
+ dev->device->name, reg_addr, ori_reg_val, reg_val);
+}
+
+RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
+
+#ifndef I40E_GLQF_ORT
+#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
+#endif
+#ifndef I40E_GLQF_PIT
+#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
+#endif
+#ifndef I40E_GLQF_L3_MAP
+#define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
+#endif
+
+static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
+{
+ /*
+ * Initialize registers for parsing packet type of QinQ
+ * This should be removed from code once proper
+ * configuration API is added to avoid configuration conflicts
+ * between ports of the same device.
+ */
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
+}
+
+static inline void i40e_config_automask(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t val;
+
+ /* INTENA flag is not auto-cleared for interrupt */
+ val = I40E_READ_REG(hw, I40E_GLINT_CTL);
+ val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
+ I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+
+ /* If support multi-driver, PF will use INT0. */
+ if (!pf->support_multi_driver)
+ val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
+
+ I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
+}
+
+#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
+
+/*
+ * Add a ethertype filter to drop all flow control frames transmitted
+ * from VSIs.
+*/
+static void
+i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+ int ret;
+
+ ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
+ I40E_FLOW_CONTROL_ETHERTYPE, flags,
+ pf->main_vsi_seid, 0,
+ TRUE, NULL, NULL);
+ if (ret)
+ PMD_INIT_LOG(ERR,
+ "Failed to add filter to drop flow control frames from VSIs.");
+}
+
+static int
+floating_veb_list_handler(__rte_unused const char *key,
+ const char *floating_veb_value,
+ void *opaque)
+{
+ int idx = 0;
+ unsigned int count = 0;
+ char *end = NULL;
+ int min, max;
+ bool *vf_floating_veb = opaque;
+
+ while (isblank(*floating_veb_value))
+ floating_veb_value++;
+
+ /* Reset floating VEB configuration for VFs */
+ for (idx = 0; idx < I40E_MAX_VF; idx++)
+ vf_floating_veb[idx] = false;
+
+ min = I40E_MAX_VF;
+ do {
+ while (isblank(*floating_veb_value))
+ floating_veb_value++;
+ if (*floating_veb_value == '\0')
+ return -1;
+ errno = 0;
+ idx = strtoul(floating_veb_value, &end, 10);
+ if (errno || end == NULL)
+ return -1;
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ min = idx;
+ } else if ((*end == ';') || (*end == '\0')) {
+ max = idx;
+ if (min == I40E_MAX_VF)
+ min = idx;
+ if (max >= I40E_MAX_VF)
+ max = I40E_MAX_VF - 1;
+ for (idx = min; idx <= max; idx++) {
+ vf_floating_veb[idx] = true;
+ count++;
+ }
+ min = I40E_MAX_VF;
+ } else {
+ return -1;
+ }
+ floating_veb_value = end + 1;
+ } while (*end != '\0');
+
+ if (count == 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+config_vf_floating_veb(struct rte_devargs *devargs,
+ uint16_t floating_veb,
+ bool *vf_floating_veb)
+{
+ struct rte_kvargs *kvlist;
+ int i;
+ const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
+
+ if (!floating_veb)
+ return;
+ /* All the VFs attach to the floating VEB by default
+ * when the floating VEB is enabled.
+ */
+ for (i = 0; i < I40E_MAX_VF; i++)
+ vf_floating_veb[i] = true;
+
+ if (devargs == NULL)
+ return;
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_keys);
+ if (kvlist == NULL)
+ return;
+
+ if (!rte_kvargs_count(kvlist, floating_veb_list)) {
+ rte_kvargs_free(kvlist);
+ return;
+ }
+ /* When the floating_veb_list parameter exists, all the VFs
+ * will attach to the legacy VEB firstly, then configure VFs
+ * to the floating VEB according to the floating_veb_list.
+ */
+ if (rte_kvargs_process(kvlist, floating_veb_list,
+ floating_veb_list_handler,
+ vf_floating_veb) < 0) {
+ rte_kvargs_free(kvlist);
+ return;
+ }
+ rte_kvargs_free(kvlist);
+}
+
+static int
+i40e_check_floating_handler(__rte_unused const char *key,
+ const char *value,
+ __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+is_floating_veb_supported(struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_keys);
+ if (kvlist == NULL)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, floating_veb_key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ /* Floating VEB is enabled when there's key-value:
+ * enable_floating_veb=1
+ */
+ if (rte_kvargs_process(kvlist, floating_veb_key,
+ i40e_check_floating_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
+static void
+config_floating_veb(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
+
+ if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
+ pf->floating_veb =
+ is_floating_veb_supported(pci_dev->device.devargs);
+ config_vf_floating_veb(pci_dev->device.devargs,
+ pf->floating_veb,
+ pf->floating_veb_list);
+ } else {
+ pf->floating_veb = false;
+ }
+}
+
+#define I40E_L2_TAGS_S_TAG_SHIFT 1
+#define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
+
+static int
+i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ char ethertype_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters ethertype_hash_params = {
+ .name = ethertype_hash_name,
+ .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
+ .key_len = sizeof(struct i40e_ethertype_filter_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize ethertype filter rule list and hash */
+ TAILQ_INIT(&ethertype_rule->ethertype_list);
+ snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
+ "ethertype_%s", dev->device->name);
+ ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
+ if (!ethertype_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
+ return -EINVAL;
+ }
+ ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
+ sizeof(struct i40e_ethertype_filter *) *
+ I40E_MAX_ETHERTYPE_FILTER_NUM,
+ 0);
+ if (!ethertype_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for ethertype hash map!");
+ ret = -ENOMEM;
+ goto err_ethertype_hash_map_alloc;
+ }
+
+ return 0;
+
+err_ethertype_hash_map_alloc:
+ rte_hash_free(ethertype_rule->hash_table);
+
+ return ret;
+}
+
+static int
+i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ char tunnel_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters tunnel_hash_params = {
+ .name = tunnel_hash_name,
+ .entries = I40E_MAX_TUNNEL_FILTER_NUM,
+ .key_len = sizeof(struct i40e_tunnel_filter_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize tunnel filter rule list and hash */
+ TAILQ_INIT(&tunnel_rule->tunnel_list);
+ snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
+ "tunnel_%s", dev->device->name);
+ tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
+ if (!tunnel_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
+ return -EINVAL;
+ }
+ tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
+ sizeof(struct i40e_tunnel_filter *) *
+ I40E_MAX_TUNNEL_FILTER_NUM,
+ 0);
+ if (!tunnel_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for tunnel hash map!");
+ ret = -ENOMEM;
+ goto err_tunnel_hash_map_alloc;
+ }
+
+ return 0;
+
+err_tunnel_hash_map_alloc:
+ rte_hash_free(tunnel_rule->hash_table);
+
+ return ret;
+}
+
+static int
+i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = I40E_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct i40e_fdir_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize flow director filter rule list and hash */
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->device->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
+ sizeof(struct i40e_fdir_filter *) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+ return 0;
+
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+
+ return ret;
+}
+
+static void
+i40e_init_customized_info(struct i40e_pf *pf)
+{
+ int i;
+
+ /* Initialize customized pctype */
+ for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
+ pf->customized_pctype[i].index = i;
+ pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
+ pf->customized_pctype[i].valid = false;
+ }
+
+ pf->gtp_support = false;
+}
+
+void
+i40e_init_queue_region_conf(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_queue_regions *info = &pf->queue_region;
+ uint16_t i;
+
+ for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
+
+ memset(info, 0, sizeof(struct i40e_queue_regions));
+}
+
+static int
+i40e_parse_multi_drv_handler(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct i40e_pf *pf;
+ unsigned long support_multi_driver;
+ char *end;
+
+ pf = (struct i40e_pf *)opaque;
+
+ errno = 0;
+ support_multi_driver = strtoul(value, &end, 10);
+ if (errno != 0 || end == value || *end != 0) {
+ PMD_DRV_LOG(WARNING, "Wrong global configuration");
+ return -(EINVAL);
+ }
+
+ if (support_multi_driver == 1 || support_multi_driver == 0)
+ pf->support_multi_driver = (bool)support_multi_driver;
+ else
+ PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
+ "enable global configuration by default."
+ ETH_I40E_SUPPORT_MULTI_DRIVER);
+ return 0;
+}
+
+static int
+i40e_support_multi_driver(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_kvargs *kvlist;
+ int kvargs_count;
+
+ /* Enable global configuration by default */
+ pf->support_multi_driver = false;
+
+ if (!dev->device->devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
+ if (!kvargs_count) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (kvargs_count > 1)
+ PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+ "the first invalid or last valid one is used !",
+ ETH_I40E_SUPPORT_MULTI_DRIVER);
+
+ if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
+ i40e_parse_multi_drv_handler, pf) < 0) {
+ rte_kvargs_free(kvlist);
+ return -EINVAL;
+ }
+
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
+static int
+i40e_aq_debug_write_global_register(struct i40e_hw *hw,
+ uint32_t reg_addr, uint64_t reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ uint64_t ori_reg_val;
+ struct rte_eth_dev *dev;
+ int ret;
+
+ ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Fail to debug read from 0x%08x",
+ reg_addr);
+ return -EIO;
+ }
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+
+ if (ori_reg_val != reg_val)
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%"PRIx64", after: 0x%"PRIx64,
+ dev->device->name, reg_addr, ori_reg_val, reg_val);
+
+ return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
+}
+
+static int
+eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
+{
+ struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi;
+ int ret;
+ uint32_t len;
+ uint8_t aq_fail = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev->dev_ops = &i40e_eth_dev_ops;
+ dev->rx_pkt_burst = i40e_recv_pkts;
+ dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ i40e_set_rx_function(dev);
+ i40e_set_tx_function(dev);
+ return 0;
+ }
+ i40e_set_default_ptype_table(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ intr_handle = &pci_dev->intr_handle;
+
+ rte_eth_copy_pci_info(dev, pci_dev);
+
+ pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ pf->adapter->eth_dev = dev;
+ pf->dev_data = dev->data;
+
+ hw->back = I40E_PF_TO_ADAPTER(pf);
+ hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
+ if (!hw->hw_addr) {
+ PMD_INIT_LOG(ERR,
+ "Hardware is not available, as address is NULL");
+ return -ENODEV;
+ }
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+ hw->adapter_stopped = 0;
+
+ /*
+ * Switch Tag value should not be identical to either the First Tag
+ * or Second Tag values. So set something other than common Ethertype
+ * for internal switching.
+ */
+ hw->switch_tag = 0xffff;
+
+ /* Check if need to support multi-driver */
+ i40e_support_multi_driver(dev);
+
+ /* Make sure all is clean before doing PF reset */
+ i40e_clear_hw(hw);
+
+ /* Initialize the hardware */
+ i40e_hw_init(dev);
+
+ /* Reset here to make sure all is clean for each PF */
+ ret = i40e_pf_reset(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
+ return ret;
+ }
+
+ /* Initialize the shared code (base driver) */
+ ret = i40e_init_shared_code(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
+ return ret;
+ }
+
+ i40e_config_automask(pf);
+
+ i40e_set_default_pctype_table(dev);
+
+ /*
+ * To work around the NVM issue, initialize registers
+ * for packet type of QinQ by software.
+ * It should be removed once issues are fixed in NVM.
+ */
+ if (!pf->support_multi_driver)
+ i40e_GLQF_reg_init(hw);
+
+ /* Initialize the input set for filters (hash and fd) to default value */
+ i40e_filter_input_set_init(pf);
+
+ /* Initialize the parameters for adminq */
+ i40e_init_adminq_parameter(hw);
+ ret = i40e_init_adminq(hw);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
+ return -EIO;
+ }
+ PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ ((hw->nvm.version >> 12) & 0xf),
+ ((hw->nvm.version >> 4) & 0xff),
+ (hw->nvm.version & 0xf), hw->nvm.eetrack);
+
+ /* initialise the L3_MAP register */
+ if (!pf->support_multi_driver) {
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GLQF_L3_MAP(40),
+ 0x00000028, NULL);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
+ ret);
+ PMD_INIT_LOG(DEBUG,
+ "Global register 0x%08x is changed with 0x28",
+ I40E_GLQF_L3_MAP(40));
+ }
+
+ /* Need the special FW version to support floating VEB */
+ config_floating_veb(dev);
+ /* Clear PXE mode */
+ i40e_clear_pxe_mode(hw);
+ i40e_dev_sync_phy_type(hw);
+
+ /*
+ * On X710, performance number is far from the expectation on recent
+ * firmware versions. The fix for this issue may not be integrated in
+ * the following firmware version. So the workaround in software driver
+ * is needed. It needs to modify the initial values of 3 internal only
+ * registers. Note that the workaround can be removed when it is fixed
+ * in firmware in the future.
+ */
+ i40e_configure_registers(hw);
+
+ /* Get hw capabilities */
+ ret = i40e_get_cap(hw);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
+ goto err_get_capabilities;
+ }
+
+ /* Initialize parameters for PF */
+ ret = i40e_pf_parameter_init(dev);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
+ goto err_parameter_init;
+ }
+
+ /* Initialize the queue management */
+ ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to init queue pool");
+ goto err_qp_pool_init;
+ }
+ ret = i40e_res_pool_init(&pf->msix_pool, 1,
+ hw->func_caps.num_msix_vectors - 1);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
+ goto err_msix_pool_init;
+ }
+
+ /* Initialize lan hmc */
+ ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
+ goto err_init_lan_hmc;
+ }
+
+ /* Configure lan hmc */
+ ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
+ goto err_configure_lan_hmc;
+ }
+
+ /* Get and check the mac address */
+ i40e_get_mac_addr(hw, hw->mac.addr);
+ if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "mac address is not valid");
+ ret = -EIO;
+ goto err_get_mac_addr;
+ }
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->mac.addr,
+ (struct ether_addr *) hw->mac.perm_addr);
+
+ /* Disable flow control */
+ hw->fc.requested_mode = I40E_FC_NONE;
+ i40e_set_fc(hw, &aq_fail, TRUE);
+
+ /* Set the global registers with default ether type value */
+ if (!pf->support_multi_driver) {
+ ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ ETHER_TYPE_VLAN);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR,
+ "Failed to set the default outer "
+ "VLAN ether type");
+ goto err_setup_pf_switch;
+ }
+ }
+
+ /* PF setup, which includes VSI setup */
+ ret = i40e_pf_setup(pf);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
+ goto err_setup_pf_switch;
+ }
+
+ /* reset all stats of the device, including pf and main vsi */
+ i40e_dev_stats_reset(dev);
+
+ vsi = pf->main_vsi;
+
+ /* Disable double vlan by default */
+ i40e_vsi_config_double_vlan(vsi, FALSE);
+
+ /* Disable S-TAG identification when floating_veb is disabled */
+ if (!pf->floating_veb) {
+ ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
+ if (ret & I40E_L2_TAGS_S_TAG_MASK) {
+ ret &= ~I40E_L2_TAGS_S_TAG_MASK;
+ I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
+ }
+ }
+
+ if (!vsi->max_macaddrs)
+ len = ETHER_ADDR_LEN;
+ else
+ len = ETHER_ADDR_LEN * vsi->max_macaddrs;
+
+ /* Should be after VSI initialized */
+ dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
+ if (!dev->data->mac_addrs) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocated memory for storing mac address");
+ goto err_mac_alloc;
+ }
+ ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
+ &dev->data->mac_addrs[0]);
+
+ /* Init dcb to sw mode by default */
+ ret = i40e_dcb_init_configure(dev, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(INFO, "Failed to init dcb.");
+ pf->flags &= ~I40E_FLAG_DCB;
+ }
+ /* Update HW struct after DCB configuration */
+ i40e_get_cap(hw);
+
+ /* initialize pf host driver to setup SRIOV resource if applicable */
+ i40e_pf_host_init(dev);
+
+ /* register callback func to eal lib */
+ rte_intr_callback_register(intr_handle,
+ i40e_dev_interrupt_handler, dev);
+
+ /* configure and enable device interrupt */
+ i40e_pf_config_irq0(hw, TRUE);
+ i40e_pf_enable_irq0(hw);
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(intr_handle);
+
+ /* By default disable flexible payload in global configuration */
+ if (!pf->support_multi_driver)
+ i40e_flex_payload_reg_set_default(hw);
+
+ /*
+ * Add an ethertype filter to drop all flow control frames transmitted
+ * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
+ * frames to wire.
+ */
+ i40e_add_tx_flow_control_drop_filter(pf);
+
+ /* Set the max frame size to 0x2600 by default,
+ * in case other drivers changed the default value.
+ */
+ i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
+
+ /* initialize mirror rule list */
+ TAILQ_INIT(&pf->mirror_list);
+
+ /* initialize Traffic Manager configuration */
+ i40e_tm_conf_init(dev);
+
+ /* Initialize customized information */
+ i40e_init_customized_info(pf);
+
+ ret = i40e_init_ethtype_filter_list(dev);
+ if (ret < 0)
+ goto err_init_ethtype_filter_list;
+ ret = i40e_init_tunnel_filter_list(dev);
+ if (ret < 0)
+ goto err_init_tunnel_filter_list;
+ ret = i40e_init_fdir_filter_list(dev);
+ if (ret < 0)
+ goto err_init_fdir_filter_list;
+
+ /* initialize queue region configuration */
+ i40e_init_queue_region_conf(dev);
+
+ /* initialize rss configuration from rte_flow */
+ memset(&pf->rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+
+ return 0;
+
+err_init_fdir_filter_list:
+ rte_free(pf->tunnel.hash_table);
+ rte_free(pf->tunnel.hash_map);
+err_init_tunnel_filter_list:
+ rte_free(pf->ethertype.hash_table);
+ rte_free(pf->ethertype.hash_map);
+err_init_ethtype_filter_list:
+ rte_free(dev->data->mac_addrs);
+err_mac_alloc:
+ i40e_vsi_release(pf->main_vsi);
+err_setup_pf_switch:
+err_get_mac_addr:
+err_configure_lan_hmc:
+ (void)i40e_shutdown_lan_hmc(hw);
+err_init_lan_hmc:
+ i40e_res_pool_destroy(&pf->msix_pool);
+err_msix_pool_init:
+ i40e_res_pool_destroy(&pf->qp_pool);
+err_qp_pool_init:
+err_parameter_init:
+err_get_capabilities:
+ (void)i40e_shutdown_adminq(hw);
+
+ return ret;
+}
+
+static void
+i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter *p_ethertype;
+ struct i40e_ethertype_rule *ethertype_rule;
+
+ ethertype_rule = &pf->ethertype;
+ /* Remove all ethertype filter rules and hash */
+ if (ethertype_rule->hash_map)
+ rte_free(ethertype_rule->hash_map);
+ if (ethertype_rule->hash_table)
+ rte_hash_free(ethertype_rule->hash_table);
+
+ while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
+ TAILQ_REMOVE(&ethertype_rule->ethertype_list,
+ p_ethertype, rules);
+ rte_free(p_ethertype);
+ }
+}
+
+static void
+i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter *p_tunnel;
+ struct i40e_tunnel_rule *tunnel_rule;
+
+ tunnel_rule = &pf->tunnel;
+ /* Remove all tunnel director rules and hash */
+ if (tunnel_rule->hash_map)
+ rte_free(tunnel_rule->hash_map);
+ if (tunnel_rule->hash_table)
+ rte_hash_free(tunnel_rule->hash_table);
+
+ while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
+ TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
+ rte_free(p_tunnel);
+ }
+}
+
+static void
+i40e_rm_fdir_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter *p_fdir;
+ struct i40e_fdir_info *fdir_info;
+
+ fdir_info = &pf->fdir;
+ /* Remove all flow director rules and hash */
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+ rte_free(p_fdir);
+ }
+}
+
+void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
+{
+ /*
+ * Disable by default flexible payload
+ * for corresponding L2/L3/L4 layers.
+ */
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
+}
+
+static int
+eth_i40e_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf;
+ struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
+ struct i40e_hw *hw;
+ struct i40e_filter_control_settings settings;
+ struct rte_flow *p_flow;
+ int ret;
+ uint8_t aq_fail = 0;
+ int retries = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ intr_handle = &pci_dev->intr_handle;
+
+ ret = rte_eth_switch_domain_free(pf->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
+ if (hw->adapter_stopped == 0)
+ i40e_dev_close(dev);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ /* Clear PXE mode */
+ i40e_clear_pxe_mode(hw);
+
+ /* Unconfigure filter control */
+ memset(&settings, 0, sizeof(settings));
+ ret = i40e_set_filter_control(hw, &settings);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
+ ret);
+
+ /* Disable flow control */
+ hw->fc.requested_mode = I40E_FC_NONE;
+ i40e_set_fc(hw, &aq_fail, TRUE);
+
+ /* uninitialize pf host driver */
+ i40e_pf_host_uninit(dev);
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+
+ /* unregister callback func to eal lib */
+ do {
+ ret = rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler, dev);
+ if (ret >= 0) {
+ break;
+ } else if (ret != -EAGAIN) {
+ PMD_INIT_LOG(ERR,
+ "intr callback unregister failed: %d",
+ ret);
+ return ret;
+ }
+ i40e_msec_delay(500);
+ } while (retries++ < 5);
+
+ i40e_rm_ethtype_filter_list(pf);
+ i40e_rm_tunnel_filter_list(pf);
+ i40e_rm_fdir_filter_list(pf);
+
+ /* Remove all flows */
+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+ rte_free(p_flow);
+ }
+
+ /* Remove all Traffic Manager configuration */
+ i40e_tm_conf_uninit(dev);
+
+ return 0;
+}
+
+static int
+i40e_dev_configure(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ int i, ret;
+
+ ret = i40e_dev_sync_phy_type(hw);
+ if (ret)
+ return ret;
+
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the
+ * bulk allocation or vector Rx preconditions we will reset it.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
+
+ if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
+ ret = i40e_fdir_setup(pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to setup flow director.");
+ return -ENOTSUP;
+ }
+ ret = i40e_fdir_configure(dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to configure fdir.");
+ goto err;
+ }
+ } else
+ i40e_fdir_teardown(pf);
+
+ ret = i40e_dev_init_vlan(dev);
+ if (ret < 0)
+ goto err;
+
+ /* VMDQ setup.
+ * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
+ * RSS setting have different requirements.
+ * General PMD driver call sequence are NIC init, configure,
+ * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
+ * will try to lookup the VSI that specific queue belongs to if VMDQ
+ * applicable. So, VMDQ setting has to be done before
+ * rx/tx_queue_setup(). This function is good to place vmdq_setup.
+ * For RSS setting, it will try to calculate actual configured RX queue
+ * number, which will be available after rx_queue_setup(). dev_start()
+ * function is good to place RSS setup.
+ */
+ if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+ ret = i40e_vmdq_setup(dev);
+ if (ret)
+ goto err;
+ }
+
+ if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ ret = i40e_dcb_setup(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to configure DCB.");
+ goto err_dcb;
+ }
+ }
+
+ TAILQ_INIT(&pf->flow_list);
+
+ return 0;
+
+err_dcb:
+ /* need to release vmdq resource if exists */
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ i40e_vsi_release(pf->vmdq[i].vsi);
+ pf->vmdq[i].vsi = NULL;
+ }
+ rte_free(pf->vmdq);
+ pf->vmdq = NULL;
+err:
+ /* need to release fdir resource if exists */
+ i40e_fdir_teardown(pf);
+ return ret;
+}
+
+void
+i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t msix_vect = vsi->msix_intr;
+ uint16_t i;
+
+ for (i = 0; i < vsi->nb_qps; i++) {
+ I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
+ rte_wmb();
+ }
+
+ if (vsi->type != I40E_VSI_SRIOV) {
+ if (!rte_intr_allow_others(intr_handle)) {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
+ 0);
+ } else {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
+ I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
+ msix_vect - 1), 0);
+ }
+ } else {
+ uint32_t reg;
+ reg = (hw->func_caps.num_msix_vectors_vf - 1) *
+ vsi->user_param + (msix_vect - 1);
+
+ I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ }
+ I40E_WRITE_FLUSH(hw);
+}
+
+static void
+__vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
+ int base_queue, int nb_queue,
+ uint16_t itr_idx)
+{
+ int i;
+ uint32_t val;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+
+ /* Bind all RX queues to allocated MSIX interrupt */
+ for (i = 0; i < nb_queue; i++) {
+ val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
+ ((base_queue + i + 1) <<
+ I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+
+ if (i == nb_queue - 1)
+ val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
+ }
+
+ /* Write first RX queue to Link list register as the head element */
+ if (vsi->type != I40E_VSI_SRIOV) {
+ uint16_t interval =
+ i40e_calc_itr_interval(1, pf->support_multi_driver);
+
+ if (msix_vect == I40E_MISC_VEC_ID) {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ (base_queue <<
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
+ interval);
+ } else {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
+ (base_queue <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
+ msix_vect - 1),
+ interval);
+ }
+ } else {
+ uint32_t reg;
+
+ if (msix_vect == I40E_MISC_VEC_ID) {
+ I40E_WRITE_REG(hw,
+ I40E_VPINT_LNKLST0(vsi->user_param),
+ (base_queue <<
+ I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+ } else {
+ /* num_msix_vectors_vf needs to minus irq0 */
+ reg = (hw->func_caps.num_msix_vectors_vf - 1) *
+ vsi->user_param + (msix_vect - 1);
+
+ I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
+ (base_queue <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+ }
+ }
+
+ I40E_WRITE_FLUSH(hw);
+}
+
+void
+i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t msix_vect = vsi->msix_intr;
+ uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+ uint16_t queue_idx = 0;
+ int record = 0;
+ int i;
+
+ for (i = 0; i < vsi->nb_qps; i++) {
+ I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
+ }
+
+ /* VF bind interrupt */
+ if (vsi->type == I40E_VSI_SRIOV) {
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue, vsi->nb_qps,
+ itr_idx);
+ return;
+ }
+
+ /* PF & VMDq bind interrupt */
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (vsi->type == I40E_VSI_MAIN) {
+ queue_idx = 0;
+ record = 1;
+ } else if (vsi->type == I40E_VSI_VMDQ2) {
+ struct i40e_vsi *main_vsi =
+ I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
+ queue_idx = vsi->base_queue - main_vsi->nb_qps;
+ record = 1;
+ }
+ }
+
+ for (i = 0; i < vsi->nb_used_qps; i++) {
+ if (nb_msix <= 1) {
+ if (!rte_intr_allow_others(intr_handle))
+ /* allow to share MISC_VEC_ID */
+ msix_vect = I40E_MISC_VEC_ID;
+
+ /* no enough msix_vect, map all to one */
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue + i,
+ vsi->nb_used_qps - i,
+ itr_idx);
+ for (; !!record && i < vsi->nb_used_qps; i++)
+ intr_handle->intr_vec[queue_idx + i] =
+ msix_vect;
+ break;
+ }
+ /* 1:1 queue/msix_vect mapping */
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue + i, 1,
+ itr_idx);
+ if (!!record)
+ intr_handle->intr_vec[queue_idx + i] = msix_vect;
+
+ msix_vect++;
+ nb_msix--;
+ }
+}
+
+static void
+i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ uint16_t msix_intr, i;
+
+ if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
+ for (i = 0; i < vsi->nb_msix; i++) {
+ msix_intr = vsi->msix_intr + i;
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
+ }
+ else
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
+
+ I40E_WRITE_FLUSH(hw);
+}
+
+static void
+i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ uint16_t msix_intr, i;
+
+ if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
+ for (i = 0; i < vsi->nb_msix; i++) {
+ msix_intr = vsi->msix_intr + i;
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
+ }
+ else
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
+
+ I40E_WRITE_FLUSH(hw);
+}
+
+static inline uint8_t
+i40e_parse_link_speeds(uint16_t link_speeds)
+{
+ uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
+
+ if (link_speeds & ETH_LINK_SPEED_40G)
+ link_speed |= I40E_LINK_SPEED_40GB;
+ if (link_speeds & ETH_LINK_SPEED_25G)
+ link_speed |= I40E_LINK_SPEED_25GB;
+ if (link_speeds & ETH_LINK_SPEED_20G)
+ link_speed |= I40E_LINK_SPEED_20GB;
+ if (link_speeds & ETH_LINK_SPEED_10G)
+ link_speed |= I40E_LINK_SPEED_10GB;
+ if (link_speeds & ETH_LINK_SPEED_1G)
+ link_speed |= I40E_LINK_SPEED_1GB;
+ if (link_speeds & ETH_LINK_SPEED_100M)
+ link_speed |= I40E_LINK_SPEED_100MB;
+
+ return link_speed;
+}
+
+static int
+i40e_phy_conf_link(struct i40e_hw *hw,
+ uint8_t abilities,
+ uint8_t force_speed,
+ bool is_up)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp phy_ab;
+ struct i40e_aq_set_phy_config phy_conf;
+ enum i40e_aq_phy_type cnt;
+ uint8_t avail_speed;
+ uint32_t phy_type_mask = 0;
+
+ const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
+ I40E_AQ_PHY_FLAG_PAUSE_RX |
+ I40E_AQ_PHY_FLAG_PAUSE_RX |
+ I40E_AQ_PHY_FLAG_LOW_POWER;
+ int ret = -ENOTSUP;
+
+ /* To get phy capabilities of available speeds. */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
+ NULL);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
+ status);
+ return ret;
+ }
+ avail_speed = phy_ab.link_speed;
+
+ /* To get the current phy config. */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
+ NULL);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
+ status);
+ return ret;
+ }
+
+ /* If link needs to go up and it is in autoneg mode the speed is OK,
+ * no need to set up again.
+ */
+ if (is_up && phy_ab.phy_type != 0 &&
+ abilities & I40E_AQ_PHY_AN_ENABLED &&
+ phy_ab.link_speed != 0)
+ return I40E_SUCCESS;
+
+ memset(&phy_conf, 0, sizeof(phy_conf));
+
+ /* bits 0-2 use the values from get_phy_abilities_resp */
+ abilities &= ~mask;
+ abilities |= phy_ab.abilities & mask;
+
+ phy_conf.abilities = abilities;
+
+ /* If link needs to go up, but the force speed is not supported,
+ * Warn users and config the default available speeds.
+ */
+ if (is_up && !(force_speed & avail_speed)) {
+ PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
+ phy_conf.link_speed = avail_speed;
+ } else {
+ phy_conf.link_speed = is_up ? force_speed : avail_speed;
+ }
+
+ /* PHY type mask needs to include each type except PHY type extension */
+ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
+ phy_type_mask |= 1 << cnt;
+
+ /* use get_phy_abilities_resp value for the rest */
+ phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
+ phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
+ I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
+ I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
+ phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
+ phy_conf.eee_capability = phy_ab.eee_capability;
+ phy_conf.eeer = phy_ab.eeer_val;
+ phy_conf.low_power_ctrl = phy_ab.d3_lpan;
+
+ PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
+ phy_ab.abilities, phy_ab.link_speed);
+ PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
+ phy_conf.abilities, phy_conf.link_speed);
+
+ status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
+ if (status)
+ return ret;
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_apply_link_speed(struct rte_eth_dev *dev)
+{
+ uint8_t speed;
+ uint8_t abilities = 0;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+
+ if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ conf->link_speeds = ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_20G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_100M;
+ }
+ speed = i40e_parse_link_speeds(conf->link_speeds);
+ abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
+ I40E_AQ_PHY_AN_ENABLED |
+ I40E_AQ_PHY_LINK_ENABLED;
+
+ return i40e_phy_conf_link(hw, abilities, speed, true);
+}
+
+static int
+i40e_dev_start(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ int ret, i;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
+ struct i40e_vsi *vsi;
+
+ hw->adapter_stopped = 0;
+
+ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+ PMD_INIT_LOG(ERR,
+ "Invalid link_speeds for port %u, autonegotiation disabled",
+ dev->data->port_id);
+ return -EINVAL;
+ }
+
+ rte_intr_disable(intr_handle);
+
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ ret = rte_intr_efd_enable(intr_handle, intr_vector);
+ if (ret)
+ return ret;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int),
+ 0);
+ if (!intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d rx_queues intr_vec",
+ dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* Initialize VSI */
+ ret = i40e_dev_rxtx_init(pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
+ goto err_up;
+ }
+
+ /* Map queues with MSIX interrupt */
+ main_vsi->nb_used_qps = dev->data->nb_rx_queues -
+ pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+ i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
+ i40e_vsi_enable_queues_intr(main_vsi);
+
+ /* Map VMDQ VSI queues with MSIX interrupt */
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+ i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
+ I40E_ITR_INDEX_DEFAULT);
+ i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
+ }
+
+ /* enable FDIR MSIX interrupt */
+ if (pf->fdir.fdir_vsi) {
+ i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
+ I40E_ITR_INDEX_NONE);
+ i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+ }
+
+ /* Enable all queues which have been configured */
+ ret = i40e_dev_switch_queues(pf, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to enable VSI");
+ goto err_up;
+ }
+
+ /* Enable receiving broadcast packets */
+ ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
+
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
+ true, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
+ }
+
+ /* Enable the VLAN promiscuous mode. */
+ if (pf->vfs) {
+ for (i = 0; i < pf->vf_num; i++) {
+ vsi = pf->vfs[i].vsi;
+ i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
+ true, NULL);
+ }
+ }
+
+ /* Enable mac loopback mode */
+ if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
+ dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
+ ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "fail to set loopback link");
+ goto err_up;
+ }
+ }
+
+ /* Apply link configure */
+ ret = i40e_apply_link_speed(dev);
+ if (I40E_SUCCESS != ret) {
+ PMD_DRV_LOG(ERR, "Fail to apply link setting");
+ goto err_up;
+ }
+
+ if (!rte_intr_allow_others(intr_handle)) {
+ rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler,
+ (void *)dev);
+ /* configure and enable device interrupt */
+ i40e_pf_config_irq0(hw, FALSE);
+ i40e_pf_enable_irq0(hw);
+
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO,
+ "lsc won't enable because of no intr multiplex");
+ } else {
+ ret = i40e_aq_set_phy_int_mask(hw,
+ ~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL |
+ I40E_AQ_EVENT_MEDIA_NA), NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(WARNING, "Fail to set phy mask");
+
+ /* Call get_link_info aq commond to enable/disable LSE */
+ i40e_dev_link_update(dev, 0);
+ }
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(intr_handle);
+
+ i40e_filter_restore(pf);
+
+ if (pf->tm_conf.root && !pf->tm_conf.committed)
+ PMD_DRV_LOG(WARNING,
+ "please call hierarchy_commit() "
+ "before starting the port");
+
+ return I40E_SUCCESS;
+
+err_up:
+ i40e_dev_switch_queues(pf, FALSE);
+ i40e_dev_clear_queues(dev);
+
+ return ret;
+}
+
+static void
+i40e_dev_stop(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int i;
+
+ if (hw->adapter_stopped == 1)
+ return;
+ /* Disable all queues */
+ i40e_dev_switch_queues(pf, FALSE);
+
+ /* un-map queues with interrupt registers */
+ i40e_vsi_disable_queues_intr(main_vsi);
+ i40e_vsi_queues_unbind_intr(main_vsi);
+
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
+ i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
+ }
+
+ if (pf->fdir.fdir_vsi) {
+ i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
+ }
+ /* Clear all queues and release memory */
+ i40e_dev_clear_queues(dev);
+
+ /* Set link down */
+ i40e_dev_set_link_down(dev);
+
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ i40e_dev_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+
+ /* reset hierarchy commit */
+ pf->tm_conf.committed = false;
+
+ hw->adapter_stopped = 1;
+}
+
+static void
+i40e_dev_close(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_mirror_rule *p_mirror;
+ uint32_t reg;
+ int i;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ i40e_dev_stop(dev);
+
+ /* Remove all mirror rules */
+ while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
+ ret = i40e_aq_del_mirror_rule(hw,
+ pf->main_vsi->veb->seid,
+ p_mirror->rule_type,
+ p_mirror->entries,
+ p_mirror->num_entries,
+ p_mirror->id);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
+ "status = %d, aq_err = %d.", ret,
+ hw->aq.asq_last_status);
+
+ /* remove mirror software resource anyway */
+ TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
+ rte_free(p_mirror);
+ pf->nb_mirror_rule--;
+ }
+
+ i40e_dev_free_queues(dev);
+
+ /* Disable interrupt */
+ i40e_pf_disable_irq0(hw);
+ rte_intr_disable(intr_handle);
+
+ i40e_fdir_teardown(pf);
+
+ /* shutdown and destroy the HMC */
+ i40e_shutdown_lan_hmc(hw);
+
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ i40e_vsi_release(pf->vmdq[i].vsi);
+ pf->vmdq[i].vsi = NULL;
+ }
+ rte_free(pf->vmdq);
+ pf->vmdq = NULL;
+
+ /* release all the existing VSIs and VEBs */
+ i40e_vsi_release(pf->main_vsi);
+
+ /* shutdown the adminq */
+ i40e_aq_queue_shutdown(hw, true);
+ i40e_shutdown_adminq(hw);
+
+ i40e_res_pool_destroy(&pf->qp_pool);
+ i40e_res_pool_destroy(&pf->msix_pool);
+
+ /* Disable flexible payload in global configuration */
+ if (!pf->support_multi_driver)
+ i40e_flex_payload_reg_set_default(hw);
+
+ /* force a PF reset to clean anything leftover */
+ reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
+ I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
+ (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ I40E_WRITE_FLUSH(hw);
+}
+
+/*
+ * Reset PF device only to re-initialize resources in PMD layer
+ */
+static int
+i40e_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ /* When a DPDK PMD PF begin to reset PF port, it should notify all
+ * its VF to make them align with it. The detailed notification
+ * mechanism is PMD specific. As to i40e PF, it is rather complex.
+ * To avoid unexpected behavior in VF, currently reset of PF with
+ * SR-IOV activation is not supported. It might be supported later.
+ */
+ if (dev->data->sriov.active)
+ return -ENOTSUP;
+
+ ret = eth_i40e_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_i40e_dev_init(dev, NULL);
+
+ return ret;
+}
+
+static void
+i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ int status;
+
+ status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ true, NULL, true);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
+
+ status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ TRUE, NULL);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
+
+}
+
+static void
+i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ int status;
+
+ status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ false, NULL, true);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
+
+ status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ false, NULL);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
+}
+
+static void
+i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ int ret;
+
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
+}
+
+static void
+i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ int ret;
+
+ if (dev->data->promiscuous == 1)
+ return; /* must remain in all_multicast mode */
+
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
+ vsi->seid, FALSE, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
+}
+
+/*
+ * Set device link up.
+ */
+static int
+i40e_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ /* re-apply link speed setting */
+ return i40e_apply_link_speed(dev);
+}
+
+/*
+ * Set device link down.
+ */
+static int
+i40e_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
+ uint8_t abilities = 0;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ return i40e_phy_conf_link(hw, abilities, speed, false);
+}
+
+static __rte_always_inline void
+update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
+{
+/* Link status registers and values*/
+#define I40E_PRTMAC_LINKSTA 0x001E2420
+#define I40E_REG_LINK_UP 0x40000080
+#define I40E_PRTMAC_MACC 0x001E24E0
+#define I40E_REG_MACC_25GB 0x00020000
+#define I40E_REG_SPEED_MASK 0x38000000
+#define I40E_REG_SPEED_100MB 0x00000000
+#define I40E_REG_SPEED_1GB 0x08000000
+#define I40E_REG_SPEED_10GB 0x10000000
+#define I40E_REG_SPEED_20GB 0x20000000
+#define I40E_REG_SPEED_25_40GB 0x18000000
+ uint32_t link_speed;
+ uint32_t reg_val;
+
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
+ link_speed = reg_val & I40E_REG_SPEED_MASK;
+ reg_val &= I40E_REG_LINK_UP;
+ link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
+
+ if (unlikely(link->link_status == 0))
+ return;
+
+ /* Parse the link status */
+ switch (link_speed) {
+ case I40E_REG_SPEED_100MB:
+ link->link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case I40E_REG_SPEED_1GB:
+ link->link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case I40E_REG_SPEED_10GB:
+ link->link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case I40E_REG_SPEED_20GB:
+ link->link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case I40E_REG_SPEED_25_40GB:
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
+
+ if (reg_val & I40E_REG_MACC_25GB)
+ link->link_speed = ETH_SPEED_NUM_25G;
+ else
+ link->link_speed = ETH_SPEED_NUM_40G;
+
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
+ break;
+ }
+}
+
+static __rte_always_inline void
+update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
+ bool enable_lse, int wait_to_complete)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
+ uint32_t rep_cnt = MAX_REPEAT_TIME;
+ struct i40e_link_status link_status;
+ int status;
+
+ memset(&link_status, 0, sizeof(link_status));
+
+ do {
+ memset(&link_status, 0, sizeof(link_status));
+
+ /* Get link status information from hardware */
+ status = i40e_aq_get_link_info(hw, enable_lse,
+ &link_status, NULL);
+ if (unlikely(status != I40E_SUCCESS)) {
+ link->link_speed = ETH_SPEED_NUM_100M;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ PMD_DRV_LOG(ERR, "Failed to get link info");
+ return;
+ }
+
+ link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
+ if (!wait_to_complete || link->link_status)
+ break;
+
+ rte_delay_ms(CHECK_INTERVAL);
+ } while (--rep_cnt);
+
+ /* Parse the link status */
+ switch (link_status.link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ link->link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ link->link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ link->link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ link->link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case I40E_LINK_SPEED_25GB:
+ link->link_speed = ETH_SPEED_NUM_25G;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ link->link_speed = ETH_SPEED_NUM_40G;
+ break;
+ default:
+ link->link_speed = ETH_SPEED_NUM_100M;
+ break;
+ }
+}
+
+int
+i40e_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link;
+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+ int ret;
+
+ memset(&link, 0, sizeof(link));
+
+ /* i40e uses full duplex only */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+
+ if (!wait_to_complete && !enable_lse)
+ update_link_reg(hw, &link);
+ else
+ update_link_aq(hw, &link, enable_lse, wait_to_complete);
+
+ ret = rte_eth_linkstatus_set(dev, &link);
+ i40e_notify_all_vfs_link_status(dev);
+
+ return ret;
+}
+
+/* Get all the statistics of a VSI */
+void
+i40e_update_vsi_stats(struct i40e_vsi *vsi)
+{
+ struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
+ struct i40e_eth_stats *nes = &vsi->eth_stats;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
+
+ i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
+ vsi->offset_loaded, &oes->rx_bytes,
+ &nes->rx_bytes);
+ i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
+ vsi->offset_loaded, &oes->rx_unicast,
+ &nes->rx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
+ vsi->offset_loaded, &oes->rx_multicast,
+ &nes->rx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
+ vsi->offset_loaded, &oes->rx_broadcast,
+ &nes->rx_broadcast);
+ /* exclude CRC bytes */
+ nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
+ nes->rx_broadcast) * ETHER_CRC_LEN;
+
+ i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
+ &oes->rx_discards, &nes->rx_discards);
+ /* GLV_REPC not supported */
+ /* GLV_RMPC not supported */
+ i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
+ &oes->rx_unknown_protocol,
+ &nes->rx_unknown_protocol);
+ i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
+ vsi->offset_loaded, &oes->tx_bytes,
+ &nes->tx_bytes);
+ i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
+ vsi->offset_loaded, &oes->tx_unicast,
+ &nes->tx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
+ vsi->offset_loaded, &oes->tx_multicast,
+ &nes->tx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
+ vsi->offset_loaded, &oes->tx_broadcast,
+ &nes->tx_broadcast);
+ /* GLV_TDPC not supported */
+ i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
+ &oes->tx_errors, &nes->tx_errors);
+ vsi->offset_loaded = true;
+
+ PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
+ vsi->vsi_id);
+ PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
+ PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
+ PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
+ PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
+ PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
+ PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
+ nes->rx_unknown_protocol);
+ PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
+ PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
+ PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
+ PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
+ PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
+ PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
+ PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
+ vsi->vsi_id);
+}
+
+static void
+i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
+{
+ unsigned int i;
+ struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
+ struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
+
+ /* Get rx/tx bytes of internal transfer packets */
+ i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
+ I40E_GLV_GORCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_bytes,
+ &pf->internal_stats.rx_bytes);
+
+ i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
+ I40E_GLV_GOTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.tx_bytes,
+ &pf->internal_stats.tx_bytes);
+ /* Get total internal rx packet count */
+ i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
+ I40E_GLV_UPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_unicast,
+ &pf->internal_stats.rx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
+ I40E_GLV_MPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_multicast,
+ &pf->internal_stats.rx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
+ I40E_GLV_BPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_broadcast,
+ &pf->internal_stats.rx_broadcast);
+ /* Get total internal tx packet count */
+ i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
+ I40E_GLV_UPTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.tx_unicast,
+ &pf->internal_stats.tx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
+ I40E_GLV_MPTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.tx_multicast,
+ &pf->internal_stats.tx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
+ I40E_GLV_BPTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.tx_broadcast,
+ &pf->internal_stats.tx_broadcast);
+
+ /* exclude CRC size */
+ pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
+ pf->internal_stats.rx_multicast +
+ pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
+
+ /* Get statistics of struct i40e_eth_stats */
+ i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_bytes,
+ &ns->eth.rx_bytes);
+ i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
+ I40E_GLPRT_UPRCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_unicast,
+ &ns->eth.rx_unicast);
+ i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_multicast,
+ &ns->eth.rx_multicast);
+ i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
+ I40E_GLPRT_BPRCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_broadcast,
+ &ns->eth.rx_broadcast);
+ /* Workaround: CRC size should not be included in byte statistics,
+ * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
+ */
+ ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
+ ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+
+ /* exclude internal rx bytes
+ * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
+ * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
+ * value.
+ * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
+ */
+ if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
+ ns->eth.rx_bytes = 0;
+ else
+ ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
+
+ if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
+ ns->eth.rx_unicast = 0;
+ else
+ ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
+
+ if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
+ ns->eth.rx_multicast = 0;
+ else
+ ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
+
+ if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
+ ns->eth.rx_broadcast = 0;
+ else
+ ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
+
+ i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->offset_loaded, &os->eth.rx_discards,
+ &ns->eth.rx_discards);
+ /* GLPRT_REPC not supported */
+ /* GLPRT_RMPC not supported */
+ i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
+ pf->offset_loaded,
+ &os->eth.rx_unknown_protocol,
+ &ns->eth.rx_unknown_protocol);
+ i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_bytes,
+ &ns->eth.tx_bytes);
+ i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
+ I40E_GLPRT_UPTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_unicast,
+ &ns->eth.tx_unicast);
+ i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
+ I40E_GLPRT_MPTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_multicast,
+ &ns->eth.tx_multicast);
+ i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
+ I40E_GLPRT_BPTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_broadcast,
+ &ns->eth.tx_broadcast);
+ ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
+ ns->eth.tx_broadcast) * ETHER_CRC_LEN;
+
+ /* exclude internal tx bytes
+ * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
+ * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
+ * value.
+ * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
+ */
+ if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
+ ns->eth.tx_bytes = 0;
+ else
+ ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
+
+ if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
+ ns->eth.tx_unicast = 0;
+ else
+ ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
+
+ if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
+ ns->eth.tx_multicast = 0;
+ else
+ ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
+
+ if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
+ ns->eth.tx_broadcast = 0;
+ else
+ ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
+
+ /* GLPRT_TEPC not supported */
+
+ /* additional port specific stats */
+ i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->offset_loaded, &os->tx_dropped_link_down,
+ &ns->tx_dropped_link_down);
+ i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->offset_loaded, &os->crc_errors,
+ &ns->crc_errors);
+ i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->offset_loaded, &os->illegal_bytes,
+ &ns->illegal_bytes);
+ /* GLPRT_ERRBC not supported */
+ i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->offset_loaded, &os->mac_local_faults,
+ &ns->mac_local_faults);
+ i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->offset_loaded, &os->mac_remote_faults,
+ &ns->mac_remote_faults);
+ i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->offset_loaded, &os->rx_length_errors,
+ &ns->rx_length_errors);
+ i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->offset_loaded, &os->link_xon_rx,
+ &ns->link_xon_rx);
+ i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+ pf->offset_loaded, &os->link_xoff_rx,
+ &ns->link_xoff_rx);
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xon_rx[i],
+ &ns->priority_xon_rx[i]);
+ i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xoff_rx[i],
+ &ns->priority_xoff_rx[i]);
+ }
+ i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->offset_loaded, &os->link_xon_tx,
+ &ns->link_xon_tx);
+ i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->offset_loaded, &os->link_xoff_tx,
+ &ns->link_xoff_tx);
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xon_tx[i],
+ &ns->priority_xon_tx[i]);
+ i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xoff_tx[i],
+ &ns->priority_xoff_tx[i]);
+ i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xon_2_xoff[i],
+ &ns->priority_xon_2_xoff[i]);
+ }
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->offset_loaded, &os->rx_size_64,
+ &ns->rx_size_64);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->offset_loaded, &os->rx_size_127,
+ &ns->rx_size_127);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->offset_loaded, &os->rx_size_255,
+ &ns->rx_size_255);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->offset_loaded, &os->rx_size_511,
+ &ns->rx_size_511);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->offset_loaded, &os->rx_size_1023,
+ &ns->rx_size_1023);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->offset_loaded, &os->rx_size_1522,
+ &ns->rx_size_1522);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->offset_loaded, &os->rx_size_big,
+ &ns->rx_size_big);
+ i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->offset_loaded, &os->rx_undersize,
+ &ns->rx_undersize);
+ i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->offset_loaded, &os->rx_fragments,
+ &ns->rx_fragments);
+ i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->offset_loaded, &os->rx_oversize,
+ &ns->rx_oversize);
+ i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->offset_loaded, &os->rx_jabber,
+ &ns->rx_jabber);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->offset_loaded, &os->tx_size_64,
+ &ns->tx_size_64);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->offset_loaded, &os->tx_size_127,
+ &ns->tx_size_127);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->offset_loaded, &os->tx_size_255,
+ &ns->tx_size_255);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->offset_loaded, &os->tx_size_511,
+ &ns->tx_size_511);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->offset_loaded, &os->tx_size_1023,
+ &ns->tx_size_1023);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->offset_loaded, &os->tx_size_1522,
+ &ns->tx_size_1522);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->offset_loaded, &os->tx_size_big,
+ &ns->tx_size_big);
+ i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
+ pf->offset_loaded,
+ &os->fd_sb_match, &ns->fd_sb_match);
+ /* GLPRT_MSPDC not supported */
+ /* GLPRT_XEC not supported */
+
+ pf->offset_loaded = true;
+
+ if (pf->main_vsi)
+ i40e_update_vsi_stats(pf->main_vsi);
+}
+
+/* Get all statistics of a port */
+static int
+i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
+ unsigned i;
+
+ /* call read registers - updates values, now write them to struct */
+ i40e_read_stats_registers(pf, hw);
+
+ stats->ipackets = ns->eth.rx_unicast +
+ ns->eth.rx_multicast +
+ ns->eth.rx_broadcast -
+ ns->eth.rx_discards -
+ pf->main_vsi->eth_stats.rx_discards;
+ stats->opackets = ns->eth.tx_unicast +
+ ns->eth.tx_multicast +
+ ns->eth.tx_broadcast;
+ stats->ibytes = ns->eth.rx_bytes;
+ stats->obytes = ns->eth.tx_bytes;
+ stats->oerrors = ns->eth.tx_errors +
+ pf->main_vsi->eth_stats.tx_errors;
+
+ /* Rx Errors */
+ stats->imissed = ns->eth.rx_discards +
+ pf->main_vsi->eth_stats.rx_discards;
+ stats->ierrors = ns->crc_errors +
+ ns->rx_length_errors + ns->rx_undersize +
+ ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
+
+ PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
+ PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
+ PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
+ PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", ns->eth.rx_multicast);
+ PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", ns->eth.rx_broadcast);
+ PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", ns->eth.rx_discards);
+ PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
+ ns->eth.rx_unknown_protocol);
+ PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
+ PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
+ PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", ns->eth.tx_multicast);
+ PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", ns->eth.tx_broadcast);
+ PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", ns->eth.tx_discards);
+ PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
+
+ PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
+ ns->tx_dropped_link_down);
+ PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
+ PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
+ ns->illegal_bytes);
+ PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
+ PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
+ ns->mac_local_faults);
+ PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
+ ns->mac_remote_faults);
+ PMD_DRV_LOG(DEBUG, "rx_length_errors: %"PRIu64"",
+ ns->rx_length_errors);
+ PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
+ PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
+ for (i = 0; i < 8; i++) {
+ PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %"PRIu64"",
+ i, ns->priority_xon_rx[i]);
+ PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %"PRIu64"",
+ i, ns->priority_xoff_rx[i]);
+ }
+ PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
+ PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
+ for (i = 0; i < 8; i++) {
+ PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %"PRIu64"",
+ i, ns->priority_xon_tx[i]);
+ PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %"PRIu64"",
+ i, ns->priority_xoff_tx[i]);
+ PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %"PRIu64"",
+ i, ns->priority_xon_2_xoff[i]);
+ }
+ PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
+ PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
+ PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
+ PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
+ PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
+ PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
+ PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
+ PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
+ PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
+ PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
+ PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
+ PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
+ PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
+ PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
+ PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
+ PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
+ PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
+ PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
+ PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
+ ns->mac_short_packet_dropped);
+ PMD_DRV_LOG(DEBUG, "checksum_error: %"PRIu64"",
+ ns->checksum_error);
+ PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match);
+ PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
+ return 0;
+}
+
+/* Reset the statistics */
+static void
+i40e_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Mark PF and VSI stats to update the offset, aka "reset" */
+ pf->offset_loaded = false;
+ if (pf->main_vsi)
+ pf->main_vsi->offset_loaded = false;
+
+ /* read the stats, reading current register values into offset */
+ i40e_read_stats_registers(pf, hw);
+}
+
+static uint32_t
+i40e_xstats_calc_num(void)
+{
+ return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
+ (I40E_NB_RXQ_PRIO_XSTATS * 8) +
+ (I40E_NB_TXQ_PRIO_XSTATS * 8);
+}
+
+static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned limit)
+{
+ unsigned count = 0;
+ unsigned i, prio;
+
+ if (xstats_names == NULL)
+ return i40e_xstats_calc_num();
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ /* Get stats from i40e_eth_stats struct */
+ for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", rte_i40e_stats_strings[i].name);
+ count++;
+ }
+
+ /* Get individiual stats from i40e_hw_port struct */
+ for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", rte_i40e_hw_port_strings[i].name);
+ count++;
+ }
+
+ for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_priority%u_%s", prio,
+ rte_i40e_rxq_prio_strings[i].name);
+ count++;
+ }
+ }
+
+ for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_priority%u_%s", prio,
+ rte_i40e_txq_prio_strings[i].name);
+ count++;
+ }
+ }
+ return count;
+}
+
+static int
+i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned n)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ unsigned i, count, prio;
+ struct i40e_hw_port_stats *hw_stats = &pf->stats;
+
+ count = i40e_xstats_calc_num();
+ if (n < count)
+ return count;
+
+ i40e_read_stats_registers(pf, hw);
+
+ if (xstats == NULL)
+ return 0;
+
+ count = 0;
+
+ /* Get stats from i40e_eth_stats struct */
+ for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
+ rte_i40e_stats_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
+ /* Get individiual stats from i40e_hw_port struct */
+ for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_i40e_hw_port_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
+ for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_i40e_rxq_prio_strings[i].offset +
+ (sizeof(uint64_t) * prio));
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_i40e_txq_prio_strings[i].offset +
+ (sizeof(uint64_t) * prio));
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ __rte_unused uint8_t stat_idx,
+ __rte_unused uint8_t is_rx)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return -ENOSYS;
+}
+
+static int
+i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 full_ver;
+ u8 ver, patch;
+ u16 build;
+ int ret;
+
+ full_ver = hw->nvm.oem_ver;
+ ver = (u8)(full_ver >> 24);
+ build = (u16)((full_ver >> 8) & 0xffff);
+ patch = (u8)(full_ver & 0xff);
+
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d%d 0x%08x %d.%d.%d",
+ ((hw->nvm.version >> 12) & 0xf),
+ ((hw->nvm.version >> 4) & 0xff),
+ (hw->nvm.version & 0xf), hw->nvm.eetrack,
+ ver, build, patch);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static void
+i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ dev_info->max_rx_queues = vsi->nb_qps;
+ dev_info->max_tx_queues = vsi->nb_qps;
+ dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->max_mac_addrs = vsi->max_macaddrs;
+ dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ dev_info->tx_queue_offload_capa;
+ dev_info->dev_capa =
+ RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
+ dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ dev_info->reta_size = pf->hash_lut_size;
+ dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = I40E_DEFAULT_RX_PTHRESH,
+ .hthresh = I40E_DEFAULT_RX_HTHRESH,
+ .wthresh = I40E_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = I40E_DEFAULT_TX_PTHRESH,
+ .hthresh = I40E_DEFAULT_TX_HTHRESH,
+ .wthresh = I40E_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ .nb_seg_max = I40E_TX_MAX_SEG,
+ .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
+ };
+
+ if (pf->flags & I40E_FLAG_VMDQ) {
+ dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
+ dev_info->vmdq_queue_base = dev_info->max_rx_queues;
+ dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
+ pf->max_nb_vmdq_vsi;
+ dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
+ dev_info->max_rx_queues += dev_info->vmdq_queue_num;
+ dev_info->max_tx_queues += dev_info->vmdq_queue_num;
+ }
+
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
+ /* For XL710 */
+ dev_info->speed_capa = ETH_LINK_SPEED_40G;
+ dev_info->default_rxportconf.nb_queues = 2;
+ dev_info->default_txportconf.nb_queues = 2;
+ if (dev->data->nb_rx_queues == 1)
+ dev_info->default_rxportconf.ring_size = 2048;
+ else
+ dev_info->default_rxportconf.ring_size = 1024;
+ if (dev->data->nb_tx_queues == 1)
+ dev_info->default_txportconf.ring_size = 1024;
+ else
+ dev_info->default_txportconf.ring_size = 512;
+
+ } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
+ /* For XXV710 */
+ dev_info->speed_capa = ETH_LINK_SPEED_25G;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+ } else {
+ /* For X710 */
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+ dev_info->default_rxportconf.ring_size = 512;
+ dev_info->default_txportconf.ring_size = 256;
+ } else {
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+ }
+ }
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
+}
+
+static int
+i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ PMD_INIT_FUNC_TRACE();
+
+ if (on)
+ return i40e_vsi_add_vlan(vsi, vlan_id);
+ else
+ return i40e_vsi_delete_vlan(vsi, vlan_id);
+}
+
+static int
+i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid, int qinq)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t reg_r = 0;
+ uint64_t reg_w = 0;
+ uint16_t reg_id = 3;
+ int ret;
+
+ if (qinq) {
+ if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ reg_id = 2;
+ }
+
+ ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ &reg_r, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
+ return -EIO;
+ }
+ PMD_DRV_LOG(DEBUG,
+ "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
+ reg_id, reg_r);
+
+ reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
+ reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
+ if (reg_r == reg_w) {
+ PMD_DRV_LOG(DEBUG, "No need to write");
+ return 0;
+ }
+
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GL_SWT_L2TAGCTRL(reg_id),
+ reg_w, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
+ return -EIO;
+ }
+ PMD_DRV_LOG(DEBUG,
+ "Global register 0x%08x is changed with value 0x%08x",
+ I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
+
+ return 0;
+}
+
+static int
+i40e_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
+ int ret = 0;
+
+ if ((vlan_type != ETH_VLAN_TYPE_INNER &&
+ vlan_type != ETH_VLAN_TYPE_OUTER) ||
+ (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+ PMD_DRV_LOG(ERR,
+ "Unsupported vlan type.");
+ return -EINVAL;
+ }
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
+ return -ENOTSUP;
+ }
+
+ /* 802.1ad frames ability is added in NVM API 1.7*/
+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ if (qinq) {
+ if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ hw->first_tag = rte_cpu_to_le_16(tpid);
+ else if (vlan_type == ETH_VLAN_TYPE_INNER)
+ hw->second_tag = rte_cpu_to_le_16(tpid);
+ } else {
+ if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ hw->second_tag = rte_cpu_to_le_16(tpid);
+ }
+ ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Set switch config failed aq_err: %d",
+ hw->aq.asq_last_status);
+ ret = -EIO;
+ }
+ } else
+ /* If NVM API < 1.7, keep the register setting */
+ ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
+ tpid, qinq);
+
+ return ret;
+}
+
+static int
+i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ i40e_vsi_config_vlan_filter(vsi, TRUE);
+ else
+ i40e_vsi_config_vlan_filter(vsi, FALSE);
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ i40e_vsi_config_vlan_stripping(vsi, TRUE);
+ else
+ i40e_vsi_config_vlan_stripping(vsi, FALSE);
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+ i40e_vsi_config_double_vlan(vsi, TRUE);
+ /* Set global registers with default ethertype. */
+ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ ETHER_TYPE_VLAN);
+ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+ ETHER_TYPE_VLAN);
+ }
+ else
+ i40e_vsi_config_double_vlan(vsi, FALSE);
+ }
+
+ return 0;
+}
+
+static void
+i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue,
+ __rte_unused int on)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
+ struct i40e_vsi_vlan_pvid_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.on = on;
+ if (info.on)
+ info.config.pvid = pvid;
+ else {
+ info.config.reject.tagged =
+ data->dev_conf.txmode.hw_vlan_reject_tagged;
+ info.config.reject.untagged =
+ data->dev_conf.txmode.hw_vlan_reject_untagged;
+ }
+
+ return i40e_vsi_vlan_pvid_set(vsi, &info);
+}
+
+static int
+i40e_dev_led_on(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mode = i40e_led_get(hw);
+
+ if (mode == 0)
+ i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
+
+ return 0;
+}
+
+static int
+i40e_dev_led_off(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mode = i40e_led_get(hw);
+
+ if (mode != 0)
+ i40e_led_set(hw, 0, false);
+
+ return 0;
+}
+
+static int
+i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ fc_conf->pause_time = pf->fc_conf.pause_time;
+
+ /* read out from register, in case they are modified by other port */
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
+ I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
+ I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
+
+ fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
+ fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
+
+ /* Return current mode according to actual setting*/
+ switch (hw->fc.current_mode) {
+ case I40E_FC_FULL:
+ fc_conf->mode = RTE_FC_FULL;
+ break;
+ case I40E_FC_TX_PAUSE:
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ break;
+ case I40E_FC_RX_PAUSE:
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ break;
+ case I40E_FC_NONE:
+ default:
+ fc_conf->mode = RTE_FC_NONE;
+ };
+
+ return 0;
+}
+
+static int
+i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ uint32_t mflcn_reg, fctrl_reg, reg;
+ uint32_t max_high_water;
+ uint8_t i, aq_failure;
+ int err;
+ struct i40e_hw *hw;
+ struct i40e_pf *pf;
+ enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
+ [RTE_FC_NONE] = I40E_FC_NONE,
+ [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+ [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+ [RTE_FC_FULL] = I40E_FC_FULL
+ };
+
+ /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
+
+ max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
+ if ((fc_conf->high_water > max_high_water) ||
+ (fc_conf->high_water < fc_conf->low_water)) {
+ PMD_INIT_LOG(ERR,
+ "Invalid high/low water setup value in KB, High_water must be <= %d.",
+ max_high_water);
+ return -EINVAL;
+ }
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
+
+ pf->fc_conf.pause_time = fc_conf->pause_time;
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* All the link flow control related enable/disable register
+ * configuration is handle by the F/W
+ */
+ err = i40e_set_fc(hw, &aq_failure, true);
+ if (err < 0)
+ return -ENOSYS;
+
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
+ /* Configure flow control refresh threshold,
+ * the value for stat_tx_pause_refresh_timer[8]
+ * is used for global pause operation.
+ */
+
+ I40E_WRITE_REG(hw,
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
+ pf->fc_conf.pause_time);
+
+ /* configure the timer value included in transmitted pause
+ * frame,
+ * the value for stat_tx_pause_quanta[8] is used for global
+ * pause operation
+ */
+ I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
+ pf->fc_conf.pause_time);
+
+ fctrl_reg = I40E_READ_REG(hw,
+ I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
+
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
+ else
+ fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
+
+ I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
+ fctrl_reg);
+ } else {
+ /* Configure pause time (2 TCs per register) */
+ reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
+ I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
+ pf->fc_conf.pause_time / 2);
+
+ mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
+
+ /* set or clear MFLCN.PMCF & MFLCN.DPF bits
+ *depending on configuration
+ */
+ if (fc_conf->mac_ctrl_frame_fwd != 0) {
+ mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
+ mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
+ } else {
+ mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
+ mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
+ }
+
+ I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
+ }
+
+ if (!pf->support_multi_driver) {
+ /* config water marker both based on the packets and bytes */
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
+ (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
+ (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT);
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT);
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Water marker configuration is not supported.");
+ }
+
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct rte_eth_pfc_conf *pfc_conf)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return -ENOSYS;
+}
+
+/* Add a MAC address, and update filters */
+static int
+i40e_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ __rte_unused uint32_t index,
+ uint32_t pool)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_mac_filter_info mac_filter;
+ struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ int ret;
+
+ /* If VMDQ not enabled or configured, return */
+ if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
+ !pf->nb_cfg_vmdq_vsi)) {
+ PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
+ pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
+ pool);
+ return -ENOTSUP;
+ }
+
+ if (pool > pf->nb_cfg_vmdq_vsi) {
+ PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
+ pool, pf->nb_cfg_vmdq_vsi);
+ return -EINVAL;
+ }
+
+ rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ else
+ mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
+
+ if (pool == 0)
+ vsi = pf->main_vsi;
+ else
+ vsi = pf->vmdq[pool - 1].vsi;
+
+ ret = i40e_vsi_add_mac(vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/* Remove a MAC address, and update filters */
+static void
+i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi;
+ struct rte_eth_dev_data *data = dev->data;
+ struct ether_addr *macaddr;
+ int ret;
+ uint32_t i;
+ uint64_t pool_sel;
+
+ macaddr = &(data->mac_addrs[index]);
+
+ pool_sel = dev->data->mac_pool_sel[index];
+
+ for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
+ if (pool_sel & (1ULL << i)) {
+ if (i == 0)
+ vsi = pf->main_vsi;
+ else {
+ /* No VMDQ pool enabled or configured */
+ if (!(pf->flags & I40E_FLAG_VMDQ) ||
+ (i > pf->nb_cfg_vmdq_vsi)) {
+ PMD_DRV_LOG(ERR,
+ "No VMDQ pool enabled/configured");
+ return;
+ }
+ vsi = pf->vmdq[i - 1].vsi;
+ }
+ ret = i40e_vsi_delete_mac(vsi, macaddr);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
+ return;
+ }
+ }
+ }
+}
+
+/* Set perfect match or hash match of MAC and VLAN for a VF */
+static int
+i40e_vf_mac_filter_set(struct i40e_pf *pf,
+ struct rte_eth_mac_filter *filter,
+ bool add)
+{
+ struct i40e_hw *hw;
+ struct i40e_mac_filter_info mac_filter;
+ struct ether_addr old_mac;
+ struct ether_addr *new_mac;
+ struct i40e_pf_vf *vf = NULL;
+ uint16_t vf_id;
+ int ret;
+
+ if (pf == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid PF argument.");
+ return -EINVAL;
+ }
+ hw = I40E_PF_TO_HW(pf);
+
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
+ return -EINVAL;
+ }
+
+ new_mac = &filter->mac_addr;
+
+ if (is_zero_ether_addr(new_mac)) {
+ PMD_DRV_LOG(ERR, "Invalid ethernet address.");
+ return -EINVAL;
+ }
+
+ vf_id = filter->dst_id;
+
+ if (vf_id > pf->vf_num - 1 || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+ vf = &pf->vfs[vf_id];
+
+ if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
+ PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
+ return -EINVAL;
+ }
+
+ if (add) {
+ rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
+ rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
+ ETHER_ADDR_LEN);
+ rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
+ ETHER_ADDR_LEN);
+
+ mac_filter.filter_type = filter->filter_type;
+ ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
+ return -1;
+ }
+ ether_addr_copy(new_mac, &pf->dev_addr);
+ } else {
+ rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
+ ETHER_ADDR_LEN);
+ ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
+ return -1;
+ }
+
+ /* Clear device address as it has been removed */
+ if (is_same_ether_addr(&(pf->dev_addr), new_mac))
+ memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
+ }
+
+ return 0;
+}
+
+/* MAC filter handle */
+static int
+i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_mac_filter *filter;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ int ret = I40E_NOT_SUPPORTED;
+
+ filter = (struct rte_eth_mac_filter *)(arg);
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ ret = I40E_SUCCESS;
+ break;
+ case RTE_ETH_FILTER_ADD:
+ i40e_pf_disable_irq0(hw);
+ if (filter->is_vf)
+ ret = i40e_vf_mac_filter_set(pf, filter, 1);
+ i40e_pf_enable_irq0(hw);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ i40e_pf_disable_irq0(hw);
+ if (filter->is_vf)
+ ret = i40e_vf_mac_filter_set(pf, filter, 0);
+ i40e_pf_enable_irq0(hw);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ ret = I40E_ERR_PARAM;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint32_t reg;
+ int ret;
+
+ if (!lut)
+ return -EINVAL;
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= lut_size_dw; i++) {
+ reg = I40E_VFQF_HLUT1(i, vsi->user_param);
+ lut_dw[i] = i40e_read_rx_ctl(hw, reg);
+ }
+ } else {
+ for (i = 0; i < lut_size_dw; i++)
+ lut_dw[i] = I40E_READ_REG(hw,
+ I40E_PFQF_HLUT(i));
+ }
+ }
+
+ return 0;
+}
+
+int
+i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ int ret;
+
+ if (!vsi || !lut)
+ return -EINVAL;
+
+ pf = I40E_VSI_TO_PF(vsi);
+ hw = I40E_VSI_TO_HW(vsi);
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i < lut_size_dw; i++)
+ I40E_WRITE_REG(
+ hw,
+ I40E_VFQF_HLUT1(i, vsi->user_param),
+ lut_dw[i]);
+ } else {
+ for (i = 0; i < lut_size_dw; i++)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
+ lut_dw[i]);
+ }
+ I40E_WRITE_FLUSH(hw);
+ }
+
+ return 0;
+}
+
+static int
+i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint16_t i, lut_size = pf->hash_lut_size;
+ uint16_t idx, shift;
+ uint8_t *lut;
+ int ret;
+
+ if (reta_size != lut_size ||
+ reta_size > ETH_RSS_RETA_SIZE_512) {
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
+ reta_size, lut_size);
+ return -EINVAL;
+ }
+
+ lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+ ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
+ if (ret)
+ goto out;
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ lut[i] = reta_conf[idx].reta[shift];
+ }
+ ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
+
+out:
+ rte_free(lut);
+
+ return ret;
+}
+
+static int
+i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint16_t i, lut_size = pf->hash_lut_size;
+ uint16_t idx, shift;
+ uint8_t *lut;
+ int ret;
+
+ if (reta_size != lut_size ||
+ reta_size > ETH_RSS_RETA_SIZE_512) {
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
+ reta_size, lut_size);
+ return -EINVAL;
+ }
+
+ lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+
+ ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
+ if (ret)
+ goto out;
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = lut[i];
+ }
+
+out:
+ rte_free(lut);
+
+ return ret;
+}
+
+/**
+ * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
+ * @hw: pointer to the HW structure
+ * @mem: pointer to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+enum i40e_status_code
+i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ u64 size,
+ u32 alignment)
+{
+ const struct rte_memzone *mz = NULL;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
+ if (!mz)
+ return I40E_ERR_NO_MEMORY;
+
+ mem->size = size;
+ mem->va = mz->addr;
+ mem->pa = mz->iova;
+ mem->zone = (const void *)mz;
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s allocated with physical address: %"PRIu64,
+ mz->name, mem->pa);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+enum i40e_status_code
+i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+ struct i40e_dma_mem *mem)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s to be freed with physical address: %"PRIu64,
+ ((const struct rte_memzone *)mem->zone)->name, mem->pa);
+ rte_memzone_free((const struct rte_memzone *)mem->zone);
+ mem->zone = NULL;
+ mem->va = NULL;
+ mem->pa = (u64)0;
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
+ * @hw: pointer to the HW structure
+ * @mem: pointer to mem struct to fill out
+ * @size: size of memory requested
+ **/
+enum i40e_status_code
+i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = rte_zmalloc("i40e", size, 0);
+
+ if (mem->va)
+ return I40E_SUCCESS;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
+ * @hw: pointer to the HW structure
+ * @mem: pointer to mem struct to free
+ **/
+enum i40e_status_code
+i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+ struct i40e_virt_mem *mem)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ rte_free(mem->va);
+ mem->va = NULL;
+
+ return I40E_SUCCESS;
+}
+
+void
+i40e_init_spinlock_d(struct i40e_spinlock *sp)
+{
+ rte_spinlock_init(&sp->spinlock);
+}
+
+void
+i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
+{
+ rte_spinlock_lock(&sp->spinlock);
+}
+
+void
+i40e_release_spinlock_d(struct i40e_spinlock *sp)
+{
+ rte_spinlock_unlock(&sp->spinlock);
+}
+
+void
+i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
+{
+ return;
+}
+
+/**
+ * Get the hardware capabilities, which will be parsed
+ * and saved into struct i40e_hw.
+ */
+static int
+i40e_get_cap(struct i40e_hw *hw)
+{
+ struct i40e_aqc_list_capabilities_element_resp *buf;
+ uint16_t len, size = 0;
+ int ret;
+
+ /* Calculate a huge enough buff for saving response data temporarily */
+ len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
+ I40E_MAX_CAP_ELE_NUM;
+ buf = rte_zmalloc("i40e", len, 0);
+ if (!buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ /* Get, parse the capabilities and save it to hw */
+ ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
+ i40e_aqc_opc_list_func_capabilities, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to discover capabilities");
+
+ /* Free the temporary buffer after being used */
+ rte_free(buf);
+
+ return ret;
+}
+
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
+
+static int i40e_pf_parse_vf_queue_number_handler(const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct i40e_pf *pf;
+ unsigned long num;
+ char *end;
+
+ pf = (struct i40e_pf *)opaque;
+ RTE_SET_USED(key);
+
+ errno = 0;
+ num = strtoul(value, &end, 0);
+ if (errno != 0 || end == value || *end != 0) {
+ PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
+ "kept the value = %hu", value, pf->vf_nb_qp_max);
+ return -(EINVAL);
+ }
+
+ if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
+ pf->vf_nb_qp_max = (uint16_t)num;
+ else
+ /* here return 0 to make next valid same argument work */
+ PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
+ "power of 2 and equal or less than 16 !, Now it is "
+ "kept the value = %hu", num, pf->vf_nb_qp_max);
+
+ return 0;
+}
+
+static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_kvargs *kvlist;
+ int kvargs_count;
+
+ /* set default queue number per VF as 4 */
+ pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+
+ if (dev->device->devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (kvlist == NULL)
+ return -(EINVAL);
+
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
+ if (!kvargs_count) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (kvargs_count > 1)
+ PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+ "the first invalid or last valid one is used !",
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG);
+
+ rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
+ i40e_pf_parse_vf_queue_number_handler, pf);
+
+ rte_kvargs_free(kvlist);
+
+ return 0;
+}
+
+static int
+i40e_pf_parameter_init(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint16_t qp_count = 0, vsi_count = 0;
+
+ if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
+ PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
+ return -EINVAL;
+ }
+
+ i40e_pf_config_vf_rxq_number(dev);
+
+ /* Add the parameter init for LFC */
+ pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
+
+ pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
+ pf->max_num_vsi = hw->func_caps.num_vsis;
+ pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
+ pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+
+ /* FDir queue/VSI allocation */
+ pf->fdir_qp_offset = 0;
+ if (hw->func_caps.fd) {
+ pf->flags |= I40E_FLAG_FDIR;
+ pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
+ } else {
+ pf->fdir_nb_qps = 0;
+ }
+ qp_count += pf->fdir_nb_qps;
+ vsi_count += 1;
+
+ /* LAN queue/VSI allocation */
+ pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
+ if (!hw->func_caps.rss) {
+ pf->lan_nb_qps = 1;
+ } else {
+ pf->flags |= I40E_FLAG_RSS;
+ if (hw->mac.type == I40E_MAC_X722)
+ pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
+ pf->lan_nb_qps = pf->lan_nb_qp_max;
+ }
+ qp_count += pf->lan_nb_qps;
+ vsi_count += 1;
+
+ /* VF queue/VSI allocation */
+ pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
+ if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
+ pf->flags |= I40E_FLAG_SRIOV;
+ pf->vf_nb_qps = pf->vf_nb_qp_max;
+ pf->vf_num = pci_dev->max_vfs;
+ PMD_DRV_LOG(DEBUG,
+ "%u VF VSIs, %u queues per VF VSI, in total %u queues",
+ pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
+ } else {
+ pf->vf_nb_qps = 0;
+ pf->vf_num = 0;
+ }
+ qp_count += pf->vf_nb_qps * pf->vf_num;
+ vsi_count += pf->vf_num;
+
+ /* VMDq queue/VSI allocation */
+ pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
+ pf->vmdq_nb_qps = 0;
+ pf->max_nb_vmdq_vsi = 0;
+ if (hw->func_caps.vmdq) {
+ if (qp_count < hw->func_caps.num_tx_qp &&
+ vsi_count < hw->func_caps.num_vsis) {
+ pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
+ qp_count) / pf->vmdq_nb_qp_max;
+
+ /* Limit the maximum number of VMDq vsi to the maximum
+ * ethdev can support
+ */
+ pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
+ hw->func_caps.num_vsis - vsi_count);
+ pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
+ ETH_64_POOLS);
+ if (pf->max_nb_vmdq_vsi) {
+ pf->flags |= I40E_FLAG_VMDQ;
+ pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
+ PMD_DRV_LOG(DEBUG,
+ "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
+ pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
+ pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
+ } else {
+ PMD_DRV_LOG(INFO,
+ "No enough queues left for VMDq");
+ }
+ } else {
+ PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
+ }
+ }
+ qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
+ vsi_count += pf->max_nb_vmdq_vsi;
+
+ if (hw->func_caps.dcb)
+ pf->flags |= I40E_FLAG_DCB;
+
+ if (qp_count > hw->func_caps.num_tx_qp) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %u queues, which exceeds the hardware maximum %u",
+ qp_count, hw->func_caps.num_tx_qp);
+ return -EINVAL;
+ }
+ if (vsi_count > hw->func_caps.num_vsis) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
+ vsi_count, hw->func_caps.num_vsis);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_pf_get_switch_config(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_aqc_get_switch_config_resp *switch_config;
+ struct i40e_aqc_switch_config_element_resp *element;
+ uint16_t start_seid = 0, num_reported;
+ int ret;
+
+ switch_config = (struct i40e_aqc_get_switch_config_resp *)\
+ rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
+ if (!switch_config) {
+ PMD_DRV_LOG(ERR, "Failed to allocated memory");
+ return -ENOMEM;
+ }
+
+ /* Get the switch configurations */
+ ret = i40e_aq_get_switch_config(hw, switch_config,
+ I40E_AQ_LARGE_BUF, &start_seid, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get switch configurations");
+ goto fail;
+ }
+ num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
+ if (num_reported != 1) { /* The number should be 1 */
+ PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
+ goto fail;
+ }
+
+ /* Parse the switch configuration elements */
+ element = &(switch_config->element[0]);
+ if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
+ pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
+ pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
+ } else
+ PMD_DRV_LOG(INFO, "Unknown element type");
+
+fail:
+ rte_free(switch_config);
+
+ return ret;
+}
+
+static int
+i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
+ uint32_t num)
+{
+ struct pool_entry *entry;
+
+ if (pool == NULL || num == 0)
+ return -EINVAL;
+
+ entry = rte_zmalloc("i40e", sizeof(*entry), 0);
+ if (entry == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
+ return -ENOMEM;
+ }
+
+ /* queue heap initialize */
+ pool->num_free = num;
+ pool->num_alloc = 0;
+ pool->base = base;
+ LIST_INIT(&pool->alloc_list);
+ LIST_INIT(&pool->free_list);
+
+ /* Initialize element */
+ entry->base = 0;
+ entry->len = num;
+
+ LIST_INSERT_HEAD(&pool->free_list, entry, next);
+ return 0;
+}
+
+static void
+i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
+{
+ struct pool_entry *entry, *next_entry;
+
+ if (pool == NULL)
+ return;
+
+ for (entry = LIST_FIRST(&pool->alloc_list);
+ entry && (next_entry = LIST_NEXT(entry, next), 1);
+ entry = next_entry) {
+ LIST_REMOVE(entry, next);
+ rte_free(entry);
+ }
+
+ for (entry = LIST_FIRST(&pool->free_list);
+ entry && (next_entry = LIST_NEXT(entry, next), 1);
+ entry = next_entry) {
+ LIST_REMOVE(entry, next);
+ rte_free(entry);
+ }
+
+ pool->num_free = 0;
+ pool->num_alloc = 0;
+ pool->base = 0;
+ LIST_INIT(&pool->alloc_list);
+ LIST_INIT(&pool->free_list);
+}
+
+static int
+i40e_res_pool_free(struct i40e_res_pool_info *pool,
+ uint32_t base)
+{
+ struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
+ uint32_t pool_offset;
+ int insert;
+
+ if (pool == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid parameter");
+ return -EINVAL;
+ }
+
+ pool_offset = base - pool->base;
+ /* Lookup in alloc list */
+ LIST_FOREACH(entry, &pool->alloc_list, next) {
+ if (entry->base == pool_offset) {
+ valid_entry = entry;
+ LIST_REMOVE(entry, next);
+ break;
+ }
+ }
+
+ /* Not find, return */
+ if (valid_entry == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to find entry");
+ return -EINVAL;
+ }
+
+ /**
+ * Found it, move it to free list and try to merge.
+ * In order to make merge easier, always sort it by qbase.
+ * Find adjacent prev and last entries.
+ */
+ prev = next = NULL;
+ LIST_FOREACH(entry, &pool->free_list, next) {
+ if (entry->base > valid_entry->base) {
+ next = entry;
+ break;
+ }
+ prev = entry;
+ }
+
+ insert = 0;
+ /* Try to merge with next one*/
+ if (next != NULL) {
+ /* Merge with next one */
+ if (valid_entry->base + valid_entry->len == next->base) {
+ next->base = valid_entry->base;
+ next->len += valid_entry->len;
+ rte_free(valid_entry);
+ valid_entry = next;
+ insert = 1;
+ }
+ }
+
+ if (prev != NULL) {
+ /* Merge with previous one */
+ if (prev->base + prev->len == valid_entry->base) {
+ prev->len += valid_entry->len;
+ /* If it merge with next one, remove next node */
+ if (insert == 1) {
+ LIST_REMOVE(valid_entry, next);
+ rte_free(valid_entry);
+ } else {
+ rte_free(valid_entry);
+ insert = 1;
+ }
+ }
+ }
+
+ /* Not find any entry to merge, insert */
+ if (insert == 0) {
+ if (prev != NULL)
+ LIST_INSERT_AFTER(prev, valid_entry, next);
+ else if (next != NULL)
+ LIST_INSERT_BEFORE(next, valid_entry, next);
+ else /* It's empty list, insert to head */
+ LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
+ }
+
+ pool->num_free += valid_entry->len;
+ pool->num_alloc -= valid_entry->len;
+
+ return 0;
+}
+
+static int
+i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
+ uint16_t num)
+{
+ struct pool_entry *entry, *valid_entry;
+
+ if (pool == NULL || num == 0) {
+ PMD_DRV_LOG(ERR, "Invalid parameter");
+ return -EINVAL;
+ }
+
+ if (pool->num_free < num) {
+ PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
+ num, pool->num_free);
+ return -ENOMEM;
+ }
+
+ valid_entry = NULL;
+ /* Lookup in free list and find most fit one */
+ LIST_FOREACH(entry, &pool->free_list, next) {
+ if (entry->len >= num) {
+ /* Find best one */
+ if (entry->len == num) {
+ valid_entry = entry;
+ break;
+ }
+ if (valid_entry == NULL || valid_entry->len > entry->len)
+ valid_entry = entry;
+ }
+ }
+
+ /* Not find one to satisfy the request, return */
+ if (valid_entry == NULL) {
+ PMD_DRV_LOG(ERR, "No valid entry found");
+ return -ENOMEM;
+ }
+ /**
+ * The entry have equal queue number as requested,
+ * remove it from alloc_list.
+ */
+ if (valid_entry->len == num) {
+ LIST_REMOVE(valid_entry, next);
+ } else {
+ /**
+ * The entry have more numbers than requested,
+ * create a new entry for alloc_list and minus its
+ * queue base and number in free_list.
+ */
+ entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
+ if (entry == NULL) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate memory for resource pool");
+ return -ENOMEM;
+ }
+ entry->base = valid_entry->base;
+ entry->len = num;
+ valid_entry->base += num;
+ valid_entry->len -= num;
+ valid_entry = entry;
+ }
+
+ /* Insert it into alloc list, not sorted */
+ LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
+
+ pool->num_free -= valid_entry->len;
+ pool->num_alloc += valid_entry->len;
+
+ return valid_entry->base + pool->base;
+}
+
+/**
+ * bitmap_is_subset - Check whether src2 is subset of src1
+ **/
+static inline int
+bitmap_is_subset(uint8_t src1, uint8_t src2)
+{
+ return !((src1 ^ src2) & src2);
+}
+
+static enum i40e_status_code
+validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+
+ /* If DCB is not supported, only default TC is supported */
+ if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
+ PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
+ PMD_DRV_LOG(ERR,
+ "Enabled TC map 0x%x not applicable to HW support 0x%x",
+ hw->func_caps.enabled_tcmap, enabled_tcmap);
+ return I40E_NOT_SUPPORTED;
+ }
+ return I40E_SUCCESS;
+}
+
+int
+i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
+ struct i40e_vsi_vlan_pvid_info *info)
+{
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ uint8_t vlan_flags = 0;
+ int ret;
+
+ if (vsi == NULL || info == NULL) {
+ PMD_DRV_LOG(ERR, "invalid parameters");
+ return I40E_ERR_PARAM;
+ }
+
+ if (info->on) {
+ vsi->info.pvid = info->config.pvid;
+ /**
+ * If insert pvid is enabled, only tagged pkts are
+ * allowed to be sent out.
+ */
+ vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ } else {
+ vsi->info.pvid = 0;
+ if (info->config.reject.tagged == 0)
+ vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+
+ if (info->config.reject.untagged == 0)
+ vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ }
+ vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_MODE_MASK);
+ vsi->info.port_vlan_flags |= vlan_flags;
+ vsi->info.valid_sections =
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ memset(&ctxt, 0, sizeof(ctxt));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+
+ return ret;
+}
+
+static int
+i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int i, ret;
+ struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
+
+ ret = validate_tcmap_parameter(vsi, enabled_tcmap);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ if (!vsi->seid) {
+ PMD_DRV_LOG(ERR, "seid not valid");
+ return -EINVAL;
+ }
+
+ memset(&tc_bw_data, 0, sizeof(tc_bw_data));
+ tc_bw_data.tc_valid_bits = enabled_tcmap;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ tc_bw_data.tc_bw_credits[i] =
+ (enabled_tcmap & (1 << i)) ? 1 : 0;
+
+ ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure TC BW");
+ return ret;
+ }
+
+ rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
+ sizeof(vsi->info.qs_handle));
+ return I40E_SUCCESS;
+}
+
+static enum i40e_status_code
+i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
+ struct i40e_aqc_vsi_properties_data *info,
+ uint8_t enabled_tcmap)
+{
+ enum i40e_status_code ret;
+ int i, total_tc = 0;
+ uint16_t qpnum_per_tc, bsf, qp_idx;
+
+ ret = validate_tcmap_parameter(vsi, enabled_tcmap);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ if (enabled_tcmap & (1 << i))
+ total_tc++;
+ if (total_tc == 0)
+ total_tc = 1;
+ vsi->enabled_tc = enabled_tcmap;
+
+ /* Number of queues per enabled TC */
+ qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
+ qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
+ bsf = rte_bsf32(qpnum_per_tc);
+
+ /* Adjust the queue number to actual queues that can be applied */
+ if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
+ vsi->nb_qps = qpnum_per_tc * total_tc;
+
+ /**
+ * Configure TC and queue mapping parameters, for enabled TC,
+ * allocate qpnum_per_tc queues to this traffic. For disabled TC,
+ * default queue will serve it.
+ */
+ qp_idx = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & (1 << i)) {
+ info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+ qp_idx += qpnum_per_tc;
+ } else
+ info->tc_mapping[i] = 0;
+ }
+
+ /* Associate queue number with VSI */
+ if (vsi->type == I40E_VSI_SRIOV) {
+ info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+ for (i = 0; i < vsi->nb_qps; i++)
+ info->queue_mapping[i] =
+ rte_cpu_to_le_16(vsi->base_queue + i);
+ } else {
+ info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
+ }
+ info->valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_veb_release(struct i40e_veb *veb)
+{
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+
+ if (veb == NULL)
+ return -EINVAL;
+
+ if (!TAILQ_EMPTY(&veb->head)) {
+ PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
+ return -EACCES;
+ }
+ /* associate_vsi field is NULL for floating VEB */
+ if (veb->associate_vsi != NULL) {
+ vsi = veb->associate_vsi;
+ hw = I40E_VSI_TO_HW(vsi);
+
+ vsi->uplink_seid = veb->uplink_seid;
+ vsi->veb = NULL;
+ } else {
+ veb->associate_pf->main_vsi->floating_veb = NULL;
+ hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
+ }
+
+ i40e_aq_delete_element(hw, veb->seid, NULL);
+ rte_free(veb);
+ return I40E_SUCCESS;
+}
+
+/* Setup a veb */
+static struct i40e_veb *
+i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
+{
+ struct i40e_veb *veb;
+ int ret;
+ struct i40e_hw *hw;
+
+ if (pf == NULL) {
+ PMD_DRV_LOG(ERR,
+ "veb setup failed, associated PF shouldn't null");
+ return NULL;
+ }
+ hw = I40E_PF_TO_HW(pf);
+
+ veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
+ if (!veb) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
+ goto fail;
+ }
+
+ veb->associate_vsi = vsi;
+ veb->associate_pf = pf;
+ TAILQ_INIT(&veb->head);
+ veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
+
+ /* create floating veb if vsi is NULL */
+ if (vsi != NULL) {
+ ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
+ I40E_DEFAULT_TCMAP, false,
+ &veb->seid, false, NULL);
+ } else {
+ ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
+ true, &veb->seid, false, NULL);
+ }
+
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
+ hw->aq.asq_last_status);
+ goto fail;
+ }
+ veb->enabled_tc = I40E_DEFAULT_TCMAP;
+
+ /* get statistics index */
+ ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
+ &veb->stats_idx, NULL, NULL, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
+ hw->aq.asq_last_status);
+ goto fail;
+ }
+ /* Get VEB bandwidth, to be implemented */
+ /* Now associated vsi binding to the VEB, set uplink to this VEB */
+ if (vsi)
+ vsi->uplink_seid = veb->seid;
+
+ return veb;
+fail:
+ rte_free(veb);
+ return NULL;
+}
+
+int
+i40e_vsi_release(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi_list *vsi_list;
+ void *temp;
+ int ret;
+ struct i40e_mac_filter *f;
+ uint16_t user_param;
+
+ if (!vsi)
+ return I40E_SUCCESS;
+
+ if (!vsi->adapter)
+ return -EFAULT;
+
+ user_param = vsi->user_param;
+
+ pf = I40E_VSI_TO_PF(vsi);
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* VSI has child to attach, release child first */
+ if (vsi->veb) {
+ TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
+ if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
+ return -1;
+ }
+ i40e_veb_release(vsi->veb);
+ }
+
+ if (vsi->floating_veb) {
+ TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
+ if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
+ return -1;
+ }
+ }
+
+ /* Remove all macvlan filters of the VSI */
+ i40e_vsi_remove_all_macvlan_filter(vsi);
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
+ rte_free(f);
+
+ if (vsi->type != I40E_VSI_MAIN &&
+ ((vsi->type != I40E_VSI_SRIOV) ||
+ !pf->floating_veb_list[user_param])) {
+ /* Remove vsi from parent's sibling list */
+ if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
+ PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
+ return I40E_ERR_PARAM;
+ }
+ TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
+ &vsi->sib_vsi_list, list);
+
+ /* Remove all switch element of the VSI */
+ ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to delete element");
+ }
+
+ if ((vsi->type == I40E_VSI_SRIOV) &&
+ pf->floating_veb_list[user_param]) {
+ /* Remove vsi from parent's sibling list */
+ if (vsi->parent_vsi == NULL ||
+ vsi->parent_vsi->floating_veb == NULL) {
+ PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
+ return I40E_ERR_PARAM;
+ }
+ TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
+ &vsi->sib_vsi_list, list);
+
+ /* Remove all switch element of the VSI */
+ ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to delete element");
+ }
+
+ i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
+
+ if (vsi->type != I40E_VSI_SRIOV)
+ i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
+ rte_free(vsi);
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_update_default_filter_setting(struct i40e_vsi *vsi)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_remove_macvlan_element_data def_filter;
+ struct i40e_mac_filter_info filter;
+ int ret;
+
+ if (vsi->type != I40E_VSI_MAIN)
+ return I40E_ERR_CONFIG;
+ memset(&def_filter, 0, sizeof(def_filter));
+ rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
+ ETH_ADDR_LEN);
+ def_filter.vlan_tag = 0;
+ def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
+ if (ret != I40E_SUCCESS) {
+ struct i40e_mac_filter *f;
+ struct ether_addr *mac;
+
+ PMD_DRV_LOG(DEBUG,
+ "Cannot remove the default macvlan filter");
+ /* It needs to add the permanent mac into mac list */
+ f = rte_zmalloc("macv_filter", sizeof(*f), 0);
+ if (f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+ mac = &f->mac_info.mac_addr;
+ rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
+ ETH_ADDR_LEN);
+ f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
+ vsi->mac_num++;
+
+ return ret;
+ }
+ rte_memcpy(&filter.mac_addr,
+ (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ return i40e_vsi_add_mac(vsi, &filter);
+}
+
+/*
+ * i40e_vsi_get_bw_config - Query VSI BW Information
+ * @vsi: the VSI to be queried
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static enum i40e_status_code
+i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
+{
+ struct i40e_aqc_query_vsi_bw_config_resp bw_config;
+ struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
+ struct i40e_hw *hw = &vsi->adapter->hw;
+ i40e_status ret;
+ int i;
+ uint32_t bw_max;
+
+ memset(&bw_config, 0, sizeof(bw_config));
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
+ hw->aq.asq_last_status);
+ return ret;
+ }
+
+ memset(&ets_sla_config, 0, sizeof(ets_sla_config));
+ ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
+ &ets_sla_config, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "VSI failed to get TC bandwdith configuration %u",
+ hw->aq.asq_last_status);
+ return ret;
+ }
+
+ /* store and print out BW info */
+ vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
+ vsi->bw_info.bw_max = bw_config.max_bw;
+ PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
+ PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
+ bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
+ (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
+ I40E_16_BIT_WIDTH);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ vsi->bw_info.bw_ets_share_credits[i] =
+ ets_sla_config.share_credits[i];
+ vsi->bw_info.bw_ets_credits[i] =
+ rte_le_to_cpu_16(ets_sla_config.credits[i]);
+ /* 4 bits per TC, 4th bit is reserved */
+ vsi->bw_info.bw_ets_max[i] =
+ (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
+ RTE_LEN2MASK(3, uint8_t));
+ PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
+ vsi->bw_info.bw_ets_share_credits[i]);
+ PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
+ vsi->bw_info.bw_ets_credits[i]);
+ PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
+ vsi->bw_info.bw_ets_max[i]);
+ }
+
+ return I40E_SUCCESS;
+}
+
+/* i40e_enable_pf_lb
+ * @pf: pointer to the pf structure
+ *
+ * allow loopback on pf
+ */
+static inline void
+i40e_enable_pf_lb(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ /* Use the FW API if FW >= v5.0 */
+ if (hw->aq.fw_maj_ver < 5) {
+ PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
+ return;
+ }
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = hw->pf_id;
+ ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
+ ret, hw->aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections =
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret)
+ PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
+ hw->aq.asq_last_status);
+}
+
+/* Setup a VSI */
+struct i40e_vsi *
+i40e_vsi_setup(struct i40e_pf *pf,
+ enum i40e_vsi_type type,
+ struct i40e_vsi *uplink_vsi,
+ uint16_t user_param)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_mac_filter_info filter;
+ int ret;
+ struct i40e_vsi_context ctxt;
+ struct ether_addr broadcast =
+ {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
+
+ if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
+ uplink_vsi == NULL) {
+ PMD_DRV_LOG(ERR,
+ "VSI setup failed, VSI link shouldn't be NULL");
+ return NULL;
+ }
+
+ if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
+ PMD_DRV_LOG(ERR,
+ "VSI setup failed, MAIN VSI uplink VSI should be NULL");
+ return NULL;
+ }
+
+ /* two situations
+ * 1.type is not MAIN and uplink vsi is not NULL
+ * If uplink vsi didn't setup VEB, create one first under veb field
+ * 2.type is SRIOV and the uplink is NULL
+ * If floating VEB is NULL, create one veb under floating veb field
+ */
+
+ if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
+ uplink_vsi->veb == NULL) {
+ uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
+
+ if (uplink_vsi->veb == NULL) {
+ PMD_DRV_LOG(ERR, "VEB setup failed");
+ return NULL;
+ }
+ /* set ALLOWLOOPBACk on pf, when veb is created */
+ i40e_enable_pf_lb(pf);
+ }
+
+ if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
+ pf->main_vsi->floating_veb == NULL) {
+ pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
+
+ if (pf->main_vsi->floating_veb == NULL) {
+ PMD_DRV_LOG(ERR, "VEB setup failed");
+ return NULL;
+ }
+ }
+
+ vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
+ return NULL;
+ }
+ TAILQ_INIT(&vsi->mac_list);
+ vsi->type = type;
+ vsi->adapter = I40E_PF_TO_ADAPTER(pf);
+ vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
+ vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
+ vsi->user_param = user_param;
+ vsi->vlan_anti_spoof_on = 0;
+ vsi->vlan_filter_on = 0;
+ /* Allocate queues */
+ switch (vsi->type) {
+ case I40E_VSI_MAIN :
+ vsi->nb_qps = pf->lan_nb_qps;
+ break;
+ case I40E_VSI_SRIOV :
+ vsi->nb_qps = pf->vf_nb_qps;
+ break;
+ case I40E_VSI_VMDQ2:
+ vsi->nb_qps = pf->vmdq_nb_qps;
+ break;
+ case I40E_VSI_FDIR:
+ vsi->nb_qps = pf->fdir_nb_qps;
+ break;
+ default:
+ goto fail_mem;
+ }
+ /*
+ * The filter status descriptor is reported in rx queue 0,
+ * while the tx queue for fdir filter programming has no
+ * such constraints, can be non-zero queues.
+ * To simplify it, choose FDIR vsi use queue 0 pair.
+ * To make sure it will use queue 0 pair, queue allocation
+ * need be done before this function is called
+ */
+ if (type != I40E_VSI_FDIR) {
+ ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
+ vsi->seid, ret);
+ goto fail_mem;
+ }
+ vsi->base_queue = ret;
+ } else
+ vsi->base_queue = I40E_FDIR_QUEUE_ID;
+
+ /* VF has MSIX interrupt in VF range, don't allocate here */
+ if (type == I40E_VSI_MAIN) {
+ if (pf->support_multi_driver) {
+ /* If support multi-driver, need to use INT0 instead of
+ * allocating from msix pool. The Msix pool is init from
+ * INT1, so it's OK just set msix_intr to 0 and nb_msix
+ * to 1 without calling i40e_res_pool_alloc.
+ */
+ vsi->msix_intr = 0;
+ vsi->nb_msix = 1;
+ } else {
+ ret = i40e_res_pool_alloc(&pf->msix_pool,
+ RTE_MIN(vsi->nb_qps,
+ RTE_MAX_RXTX_INTR_VEC_ID));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "VSI MAIN %d get heap failed %d",
+ vsi->seid, ret);
+ goto fail_queue_alloc;
+ }
+ vsi->msix_intr = ret;
+ vsi->nb_msix = RTE_MIN(vsi->nb_qps,
+ RTE_MAX_RXTX_INTR_VEC_ID);
+ }
+ } else if (type != I40E_VSI_SRIOV) {
+ ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
+ goto fail_queue_alloc;
+ }
+ vsi->msix_intr = ret;
+ vsi->nb_msix = 1;
+ } else {
+ vsi->msix_intr = 0;
+ vsi->nb_msix = 0;
+ }
+
+ /* Add VSI */
+ if (type == I40E_VSI_MAIN) {
+ /* For main VSI, no need to add since it's default one */
+ vsi->uplink_seid = pf->mac_seid;
+ vsi->seid = pf->main_vsi_seid;
+ /* Bind queues with specific MSIX interrupt */
+ /**
+ * Needs 2 interrupt at least, one for misc cause which will
+ * enabled from OS side, Another for queues binding the
+ * interrupt from device side only.
+ */
+
+ /* Get default VSI parameters from hardware */
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.vf_num = 0;
+ ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get VSI params");
+ goto fail_msix_alloc;
+ }
+ rte_memcpy(&vsi->info, &ctxt.info,
+ sizeof(struct i40e_aqc_vsi_properties_data));
+ vsi->vsi_id = ctxt.vsi_number;
+ vsi->info.valid_sections = 0;
+
+ /* Configure tc, enabled TC0 only */
+ if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
+ I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
+ goto fail_msix_alloc;
+ }
+
+ /* TC, queue mapping */
+ memset(&ctxt, 0, sizeof(ctxt));
+ vsi->info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ rte_memcpy(&ctxt.info, &vsi->info,
+ sizeof(struct i40e_aqc_vsi_properties_data));
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
+ goto fail_msix_alloc;
+ }
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.vf_num = 0;
+
+ /* Update VSI parameters */
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ goto fail_msix_alloc;
+ }
+
+ rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+ rte_memcpy(&vsi->info.queue_mapping,
+ &ctxt.info.queue_mapping,
+ sizeof(vsi->info.queue_mapping));
+ vsi->info.mapping_flags = ctxt.info.mapping_flags;
+ vsi->info.valid_sections = 0;
+
+ rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
+ ETH_ADDR_LEN);
+
+ /**
+ * Updating default filter settings are necessary to prevent
+ * reception of tagged packets.
+ * Some old firmware configurations load a default macvlan
+ * filter which accepts both tagged and untagged packets.
+ * The updating is to use a normal filter instead if needed.
+ * For NVM 4.2.2 or after, the updating is not needed anymore.
+ * The firmware with correct configurations load the default
+ * macvlan filter which is expected and cannot be removed.
+ */
+ i40e_update_default_filter_setting(vsi);
+ i40e_config_qinq(hw, vsi);
+ } else if (type == I40E_VSI_SRIOV) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ /**
+ * For other VSI, the uplink_seid equals to uplink VSI's
+ * uplink_seid since they share same VEB
+ */
+ if (uplink_vsi == NULL)
+ vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
+ else
+ vsi->uplink_seid = uplink_vsi->uplink_seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1;
+ ctxt.flags = I40E_AQ_VSI_TYPE_VF;
+
+ /* Use the VEB configuration if FW >= v5.0 */
+ if (hw->aq.fw_maj_ver >= 5) {
+ /* Configure switch ID */
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
+
+ /* Configure port/vlan */
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ hw->func_caps.enabled_tcmap);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
+ goto fail_msix_alloc;
+ }
+
+ ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+ /**
+ * Since VSI is not created yet, only configure parameter,
+ * will add vsi below.
+ */
+
+ i40e_config_qinq(hw, vsi);
+ } else if (type == I40E_VSI_VMDQ2) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ /*
+ * For other VSI, the uplink_seid equals to uplink VSI's
+ * uplink_seid since they share same VEB
+ */
+ vsi->uplink_seid = uplink_vsi->uplink_seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1;
+ ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
+
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ /* user_param carries flag to enable loop back */
+ if (user_param) {
+ ctxt.info.switch_id =
+ rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
+ ctxt.info.switch_id |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
+
+ /* Configure port/vlan */
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
+ goto fail_msix_alloc;
+ }
+ ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+ } else if (type == I40E_VSI_FDIR) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ vsi->uplink_seid = uplink_vsi->uplink_seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping.");
+ goto fail_msix_alloc;
+ }
+ ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+ } else {
+ PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
+ goto fail_msix_alloc;
+ }
+
+ if (vsi->type != I40E_VSI_MAIN) {
+ ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
+ hw->aq.asq_last_status);
+ goto fail_msix_alloc;
+ }
+ memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info.valid_sections = 0;
+ vsi->seid = ctxt.seid;
+ vsi->vsi_id = ctxt.vsi_number;
+ vsi->sib_vsi_list.vsi = vsi;
+ if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
+ TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
+ &vsi->sib_vsi_list, list);
+ } else {
+ TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
+ &vsi->sib_vsi_list, list);
+ }
+ }
+
+ /* MAC/VLAN configuration */
+ rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+
+ ret = i40e_vsi_add_mac(vsi, &filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
+ goto fail_msix_alloc;
+ }
+
+ /* Get VSI BW information */
+ i40e_vsi_get_bw_config(vsi);
+ return vsi;
+fail_msix_alloc:
+ i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
+fail_queue_alloc:
+ i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
+fail_mem:
+ rte_free(vsi);
+ return NULL;
+}
+
+/* Configure vlan filter on or off */
+int
+i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
+{
+ int i, num;
+ struct i40e_mac_filter *f;
+ void *temp;
+ struct i40e_mac_filter_info *mac_filter;
+ enum rte_mac_filter_type desired_filter;
+ int ret = I40E_SUCCESS;
+
+ if (on) {
+ /* Filter to match MAC and VLAN */
+ desired_filter = RTE_MACVLAN_PERFECT_MATCH;
+ } else {
+ /* Filter to match only MAC */
+ desired_filter = RTE_MAC_PERFECT_MATCH;
+ }
+
+ num = vsi->mac_num;
+
+ mac_filter = rte_zmalloc("mac_filter_info_data",
+ num * sizeof(*mac_filter), 0);
+ if (mac_filter == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ i = 0;
+
+ /* Remove all existing mac */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+ mac_filter[i] = f->mac_info;
+ ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
+ on ? "enable" : "disable");
+ goto DONE;
+ }
+ i++;
+ }
+
+ /* Override with new filter */
+ for (i = 0; i < num; i++) {
+ mac_filter[i].filter_type = desired_filter;
+ ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
+ on ? "enable" : "disable");
+ goto DONE;
+ }
+ }
+
+DONE:
+ rte_free(mac_filter);
+ return ret;
+}
+
+/* Configure vlan stripping on or off */
+int
+i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_vsi_context ctxt;
+ uint8_t vlan_flags;
+ int ret = I40E_SUCCESS;
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
+ if (on) {
+ if ((vsi->info.port_vlan_flags &
+ I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.port_vlan_flags &
+ I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
+ I40E_AQ_VSI_PVLAN_EMOD_MASK)
+ return 0; /* already off */
+ }
+ }
+
+ if (on)
+ vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ else
+ vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+ vsi->info.valid_sections =
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
+ vsi->info.port_vlan_flags |= vlan_flags;
+ ctxt.seid = vsi->seid;
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret)
+ PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
+ on ? "enable" : "disable");
+
+ return ret;
+}
+
+static int
+i40e_dev_init_vlan(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ int ret;
+ int mask = 0;
+
+ /* Apply vlan offload setting */
+ mask = ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ret = i40e_vlan_offload_set(dev, mask);
+ if (ret) {
+ PMD_DRV_LOG(INFO, "Failed to update vlan offload");
+ return ret;
+ }
+
+ /* Apply pvid setting */
+ ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
+ data->dev_conf.txmode.hw_vlan_insert_pvid);
+ if (ret)
+ PMD_DRV_LOG(INFO, "Failed to update VSI params");
+
+ return ret;
+}
+
+static int
+i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+
+ return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
+}
+
+static int
+i40e_update_flow_control(struct i40e_hw *hw)
+{
+#define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
+ struct i40e_link_status link_status;
+ uint32_t rxfc = 0, txfc = 0, reg;
+ uint8_t an_info;
+ int ret;
+
+ memset(&link_status, 0, sizeof(link_status));
+ ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get link status information");
+ goto write_reg; /* Disable flow control */
+ }
+
+ an_info = hw->phy.link_info.an_info;
+ if (!(an_info & I40E_AQ_AN_COMPLETED)) {
+ PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
+ ret = I40E_ERR_NOT_READY;
+ goto write_reg; /* Disable flow control */
+ }
+ /**
+ * If link auto negotiation is enabled, flow control needs to
+ * be configured according to it
+ */
+ switch (an_info & I40E_LINK_PAUSE_RXTX) {
+ case I40E_LINK_PAUSE_RXTX:
+ rxfc = 1;
+ txfc = 1;
+ hw->fc.current_mode = I40E_FC_FULL;
+ break;
+ case I40E_AQ_LINK_PAUSE_RX:
+ rxfc = 1;
+ hw->fc.current_mode = I40E_FC_RX_PAUSE;
+ break;
+ case I40E_AQ_LINK_PAUSE_TX:
+ txfc = 1;
+ hw->fc.current_mode = I40E_FC_TX_PAUSE;
+ break;
+ default:
+ hw->fc.current_mode = I40E_FC_NONE;
+ break;
+ }
+
+write_reg:
+ I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
+ txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
+ reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
+ reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
+ I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
+
+ return ret;
+}
+
+/* PF setup */
+static int
+i40e_pf_setup(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_filter_control_settings settings;
+ struct i40e_vsi *vsi;
+ int ret;
+
+ /* Clear all stats counters */
+ pf->offset_loaded = FALSE;
+ memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
+ memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
+ memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
+ memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
+
+ ret = i40e_pf_get_switch_config(pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
+ return ret;
+ }
+
+ ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING,
+ "failed to allocate switch domain for device %d", ret);
+
+ if (pf->flags & I40E_FLAG_FDIR) {
+ /* make queue allocated first, let FDIR use queue pair 0*/
+ ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
+ if (ret != I40E_FDIR_QUEUE_ID) {
+ PMD_DRV_LOG(ERR,
+ "queue allocation fails for FDIR: ret =%d",
+ ret);
+ pf->flags &= ~I40E_FLAG_FDIR;
+ }
+ }
+ /* main VSI setup */
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Setup of main vsi failed");
+ return I40E_ERR_NOT_READY;
+ }
+ pf->main_vsi = vsi;
+
+ /* Configure filter control */
+ memset(&settings, 0, sizeof(settings));
+ if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+ settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
+ else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+ settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
+ else {
+ PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
+ hw->func_caps.rss_table_size);
+ return I40E_ERR_PARAM;
+ }
+ PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
+ hw->func_caps.rss_table_size);
+ pf->hash_lut_size = hw->func_caps.rss_table_size;
+
+ /* Enable ethtype and macvlan filters */
+ settings.enable_ethtype = TRUE;
+ settings.enable_macvlan = TRUE;
+ ret = i40e_set_filter_control(hw, &settings);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
+ ret);
+
+ /* Update flow control according to the auto negotiation */
+ i40e_update_flow_control(hw);
+
+ return I40E_SUCCESS;
+}
+
+int
+i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
+{
+ uint32_t reg;
+ uint16_t j;
+
+ /**
+ * Set or clear TX Queue Disable flags,
+ * which is required by hardware.
+ */
+ i40e_pre_tx_queue_cfg(hw, q_idx, on);
+ rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
+
+ /* Wait until the request is finished */
+ for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
+ rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
+ reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
+ if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
+ ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
+ & 0x1))) {
+ break;
+ }
+ }
+ if (on) {
+ if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
+ return I40E_SUCCESS; /* already on, skip next steps */
+
+ I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
+ reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ } else {
+ if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ return I40E_SUCCESS; /* already off, skip next steps */
+ reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ }
+ /* Write the register */
+ I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
+ /* Check the result */
+ for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
+ rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
+ reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
+ if (on) {
+ if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
+ (reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ } else {
+ if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
+ !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ }
+ }
+ /* Check if it is timeout */
+ if (j >= I40E_CHK_Q_ENA_COUNT) {
+ PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
+ (on ? "enable" : "disable"), q_idx);
+ return I40E_ERR_TIMEOUT;
+ }
+
+ return I40E_SUCCESS;
+}
+
+/* Swith on or off the tx queues */
+static int
+i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
+{
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
+ struct i40e_tx_queue *txq;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ uint16_t i;
+ int ret;
+
+ for (i = 0; i < dev_data->nb_tx_queues; i++) {
+ txq = dev_data->tx_queues[i];
+ /* Don't operate the queue if not configured or
+ * if starting only per queue */
+ if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
+ continue;
+ if (on)
+ ret = i40e_dev_tx_queue_start(dev, i);
+ else
+ ret = i40e_dev_tx_queue_stop(dev, i);
+ if ( ret != I40E_SUCCESS)
+ return ret;
+ }
+
+ return I40E_SUCCESS;
+}
+
+int
+i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
+{
+ uint32_t reg;
+ uint16_t j;
+
+ /* Wait until the request is finished */
+ for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
+ rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
+ reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
+ if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
+ ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
+ break;
+ }
+
+ if (on) {
+ if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
+ return I40E_SUCCESS; /* Already on, skip next steps */
+ reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+ } else {
+ if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ return I40E_SUCCESS; /* Already off, skip next steps */
+ reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+ }
+
+ /* Write the register */
+ I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
+ /* Check the result */
+ for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
+ rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
+ reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
+ if (on) {
+ if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
+ (reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ } else {
+ if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
+ !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ }
+ }
+
+ /* Check if it is timeout */
+ if (j >= I40E_CHK_Q_ENA_COUNT) {
+ PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
+ (on ? "enable" : "disable"), q_idx);
+ return I40E_ERR_TIMEOUT;
+ }
+
+ return I40E_SUCCESS;
+}
+/* Switch on or off the rx queues */
+static int
+i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
+{
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
+ struct i40e_rx_queue *rxq;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ uint16_t i;
+ int ret;
+
+ for (i = 0; i < dev_data->nb_rx_queues; i++) {
+ rxq = dev_data->rx_queues[i];
+ /* Don't operate the queue if not configured or
+ * if starting only per queue */
+ if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
+ continue;
+ if (on)
+ ret = i40e_dev_rx_queue_start(dev, i);
+ else
+ ret = i40e_dev_rx_queue_stop(dev, i);
+ if (ret != I40E_SUCCESS)
+ return ret;
+ }
+
+ return I40E_SUCCESS;
+}
+
+/* Switch on or off all the rx/tx queues */
+int
+i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
+{
+ int ret;
+
+ if (on) {
+ /* enable rx queues before enabling tx queues */
+ ret = i40e_dev_switch_rx_queues(pf, on);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to switch rx queues");
+ return ret;
+ }
+ ret = i40e_dev_switch_tx_queues(pf, on);
+ } else {
+ /* Stop tx queues before stopping rx queues */
+ ret = i40e_dev_switch_tx_queues(pf, on);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to switch tx queues");
+ return ret;
+ }
+ ret = i40e_dev_switch_rx_queues(pf, on);
+ }
+
+ return ret;
+}
+
+/* Initialize VSI for TX */
+static int
+i40e_dev_tx_init(struct i40e_pf *pf)
+{
+ struct rte_eth_dev_data *data = pf->dev_data;
+ uint16_t i;
+ uint32_t ret = I40E_SUCCESS;
+ struct i40e_tx_queue *txq;
+
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ txq = data->tx_queues[i];
+ if (!txq || !txq->q_set)
+ continue;
+ ret = i40e_tx_queue_init(txq);
+ if (ret != I40E_SUCCESS)
+ break;
+ }
+ if (ret == I40E_SUCCESS)
+ i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
+
+ return ret;
+}
+
+/* Initialize VSI for RX */
+static int
+i40e_dev_rx_init(struct i40e_pf *pf)
+{
+ struct rte_eth_dev_data *data = pf->dev_data;
+ int ret = I40E_SUCCESS;
+ uint16_t i;
+ struct i40e_rx_queue *rxq;
+
+ i40e_pf_config_mq_rx(pf);
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ rxq = data->rx_queues[i];
+ if (!rxq || !rxq->q_set)
+ continue;
+
+ ret = i40e_rx_queue_init(rxq);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to do RX queue initialization");
+ break;
+ }
+ }
+ if (ret == I40E_SUCCESS)
+ i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
+
+ return ret;
+}
+
+static int
+i40e_dev_rxtx_init(struct i40e_pf *pf)
+{
+ int err;
+
+ err = i40e_dev_tx_init(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do TX initialization");
+ return err;
+ }
+ err = i40e_dev_rx_init(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do RX initialization");
+ return err;
+ }
+
+ return err;
+}
+
+static int
+i40e_vmdq_setup(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int i, err, conf_vsis, j, loop;
+ struct i40e_vsi *vsi;
+ struct i40e_vmdq_info *vmdq_info;
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ /*
+ * Disable interrupt to avoid message from VF. Furthermore, it will
+ * avoid race condition in VSI creation/destroy.
+ */
+ i40e_pf_disable_irq0(hw);
+
+ if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
+ PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
+ return -ENOTSUP;
+ }
+
+ conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
+ if (conf_vsis > pf->max_nb_vmdq_vsi) {
+ PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
+ conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
+ pf->max_nb_vmdq_vsi);
+ return -ENOTSUP;
+ }
+
+ if (pf->vmdq != NULL) {
+ PMD_INIT_LOG(INFO, "VMDQ already configured");
+ return 0;
+ }
+
+ pf->vmdq = rte_zmalloc("vmdq_info_struct",
+ sizeof(*vmdq_info) * conf_vsis, 0);
+
+ if (pf->vmdq == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory");
+ return -ENOMEM;
+ }
+
+ vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
+
+ /* Create VMDQ VSI */
+ for (i = 0; i < conf_vsis; i++) {
+ vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
+ vmdq_conf->enable_loop_back);
+ if (vsi == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
+ err = -1;
+ goto err_vsi_setup;
+ }
+ vmdq_info = &pf->vmdq[i];
+ vmdq_info->pf = pf;
+ vmdq_info->vsi = vsi;
+ }
+ pf->nb_cfg_vmdq_vsi = conf_vsis;
+
+ /* Configure Vlan */
+ loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
+ for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
+ for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
+ if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
+ PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
+ vmdq_conf->pool_map[i].vlan_id, j);
+
+ err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
+ vmdq_conf->pool_map[i].vlan_id);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Failed to add vlan");
+ err = -1;
+ goto err_vsi_setup;
+ }
+ }
+ }
+ }
+
+ i40e_pf_enable_irq0(hw);
+
+ return 0;
+
+err_vsi_setup:
+ for (i = 0; i < conf_vsis; i++)
+ if (pf->vmdq[i].vsi == NULL)
+ break;
+ else
+ i40e_vsi_release(pf->vmdq[i].vsi);
+
+ rte_free(pf->vmdq);
+ pf->vmdq = NULL;
+ i40e_pf_enable_irq0(hw);
+ return err;
+}
+
+static void
+i40e_stat_update_32(struct i40e_hw *hw,
+ uint32_t reg,
+ bool offset_loaded,
+ uint64_t *offset,
+ uint64_t *stat)
+{
+ uint64_t new_data;
+
+ new_data = (uint64_t)I40E_READ_REG(hw, reg);
+ if (!offset_loaded)
+ *offset = new_data;
+
+ if (new_data >= *offset)
+ *stat = (uint64_t)(new_data - *offset);
+ else
+ *stat = (uint64_t)((new_data +
+ ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
+}
+
+static void
+i40e_stat_update_48(struct i40e_hw *hw,
+ uint32_t hireg,
+ uint32_t loreg,
+ bool offset_loaded,
+ uint64_t *offset,
+ uint64_t *stat)
+{
+ uint64_t new_data;
+
+ new_data = (uint64_t)I40E_READ_REG(hw, loreg);
+ new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
+ I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
+
+ if (!offset_loaded)
+ *offset = new_data;
+
+ if (new_data >= *offset)
+ *stat = new_data - *offset;
+ else
+ *stat = (uint64_t)((new_data +
+ ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
+
+ *stat &= I40E_48_BIT_MASK;
+}
+
+/* Disable IRQ0 */
+void
+i40e_pf_disable_irq0(struct i40e_hw *hw)
+{
+ /* Disable all interrupt types */
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
+ I40E_WRITE_FLUSH(hw);
+}
+
+/* Enable IRQ0 */
+void
+i40e_pf_enable_irq0(struct i40e_hw *hw)
+{
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
+ I40E_WRITE_FLUSH(hw);
+}
+
+static void
+i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
+{
+ /* read pending request and disable first */
+ i40e_pf_disable_irq0(hw);
+ I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
+ I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
+ I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
+
+ if (no_queue)
+ /* Link no queues with irq0 */
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
+}
+
+static void
+i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int i;
+ uint16_t abs_vf_id;
+ uint32_t index, offset, val;
+
+ if (!pf->vfs)
+ return;
+ /**
+ * Try to find which VF trigger a reset, use absolute VF id to access
+ * since the reg is global register.
+ */
+ for (i = 0; i < pf->vf_num; i++) {
+ abs_vf_id = hw->func_caps.vf_base_id + i;
+ index = abs_vf_id / I40E_UINT32_BIT_SIZE;
+ offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
+ val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
+ /* VFR event occurred */
+ if (val & (0x1 << offset)) {
+ int ret;
+
+ /* Clear the event first */
+ I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
+ (0x1 << offset));
+ PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
+ /**
+ * Only notify a VF reset event occurred,
+ * don't trigger another SW reset
+ */
+ ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to do VF reset");
+ }
+ }
+}
+
+static void
+i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < pf->vf_num; i++)
+ i40e_notify_vf_link_status(dev, &pf->vfs[i]);
+}
+
+static void
+i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_arq_event_info info;
+ uint16_t pending, opcode;
+ int ret;
+
+ info.buf_len = I40E_AQ_BUF_SZ;
+ info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
+ if (!info.msg_buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mem");
+ return;
+ }
+
+ pending = 1;
+ while (pending) {
+ ret = i40e_clean_arq_element(hw, &info, &pending);
+
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO,
+ "Failed to read msg from AdminQ, aq_err: %u",
+ hw->aq.asq_last_status);
+ break;
+ }
+ opcode = rte_le_to_cpu_16(info.desc.opcode);
+
+ switch (opcode) {
+ case i40e_aqc_opc_send_msg_to_pf:
+ /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
+ i40e_pf_host_handle_vf_msg(dev,
+ rte_le_to_cpu_16(info.desc.retval),
+ rte_le_to_cpu_32(info.desc.cookie_high),
+ rte_le_to_cpu_32(info.desc.cookie_low),
+ info.msg_buf,
+ info.msg_len);
+ break;
+ case i40e_aqc_opc_get_link_status:
+ ret = i40e_dev_link_update(dev, 0);
+ if (!ret)
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
+ opcode);
+ break;
+ }
+ }
+ rte_free(info.msg_buf);
+}
+
+/**
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+i40e_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t icr0;
+
+ /* Disable interrupt */
+ i40e_pf_disable_irq0(hw);
+
+ /* read out interrupt causes */
+ icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
+
+ /* No interrupt event indicated */
+ if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
+ PMD_DRV_LOG(INFO, "No interrupt event");
+ goto done;
+ }
+ if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
+ if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: global reset requested");
+ if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
+ if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: HMC error");
+ if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
+
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+ PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
+ i40e_dev_handle_vfr_event(dev);
+ }
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ PMD_DRV_LOG(INFO, "ICR0: adminq event");
+ i40e_dev_handle_aq_msg(dev);
+ }
+
+done:
+ /* Enable interrupt */
+ i40e_pf_enable_irq0(hw);
+ rte_intr_enable(dev->intr_handle);
+}
+
+int
+i40e_add_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total)
+{
+ int ele_num, ele_buff_size;
+ int num, actual_num, i;
+ uint16_t flags;
+ int ret = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_macvlan_element_data *req_list;
+
+ if (filter == NULL || total == 0)
+ return I40E_ERR_PARAM;
+ ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
+ ele_buff_size = hw->aq.asq_buf_size;
+
+ req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
+ if (req_list == NULL) {
+ PMD_DRV_LOG(ERR, "Fail to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ num = 0;
+ do {
+ actual_num = (num + ele_num > total) ? (total - num) : ele_num;
+ memset(req_list, 0, ele_buff_size);
+
+ for (i = 0; i < actual_num; i++) {
+ rte_memcpy(req_list[i].mac_addr,
+ &filter[num + i].macaddr, ETH_ADDR_LEN);
+ req_list[i].vlan_tag =
+ rte_cpu_to_le_16(filter[num + i].vlan_id);
+
+ switch (filter[num + i].filter_type) {
+ case RTE_MAC_PERFECT_MATCH:
+ flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ break;
+ case RTE_MACVLAN_PERFECT_MATCH:
+ flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+ break;
+ case RTE_MAC_HASH_MATCH:
+ flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
+ I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ break;
+ case RTE_MACVLAN_HASH_MATCH:
+ flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid MAC match type");
+ ret = I40E_ERR_PARAM;
+ goto DONE;
+ }
+
+ req_list[i].queue_number = 0;
+
+ req_list[i].flags = rte_cpu_to_le_16(flags);
+ }
+
+ ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
+ actual_num, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
+ goto DONE;
+ }
+ num += actual_num;
+ } while (num < total);
+
+DONE:
+ rte_free(req_list);
+ return ret;
+}
+
+int
+i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total)
+{
+ int ele_num, ele_buff_size;
+ int num, actual_num, i;
+ uint16_t flags;
+ int ret = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_remove_macvlan_element_data *req_list;
+
+ if (filter == NULL || total == 0)
+ return I40E_ERR_PARAM;
+
+ ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
+ ele_buff_size = hw->aq.asq_buf_size;
+
+ req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
+ if (req_list == NULL) {
+ PMD_DRV_LOG(ERR, "Fail to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ num = 0;
+ do {
+ actual_num = (num + ele_num > total) ? (total - num) : ele_num;
+ memset(req_list, 0, ele_buff_size);
+
+ for (i = 0; i < actual_num; i++) {
+ rte_memcpy(req_list[i].mac_addr,
+ &filter[num + i].macaddr, ETH_ADDR_LEN);
+ req_list[i].vlan_tag =
+ rte_cpu_to_le_16(filter[num + i].vlan_id);
+
+ switch (filter[num + i].filter_type) {
+ case RTE_MAC_PERFECT_MATCH:
+ flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ break;
+ case RTE_MACVLAN_PERFECT_MATCH:
+ flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ break;
+ case RTE_MAC_HASH_MATCH:
+ flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ break;
+ case RTE_MACVLAN_HASH_MATCH:
+ flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid MAC filter type");
+ ret = I40E_ERR_PARAM;
+ goto DONE;
+ }
+ req_list[i].flags = rte_cpu_to_le_16(flags);
+ }
+
+ ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
+ actual_num, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
+ goto DONE;
+ }
+ num += actual_num;
+ } while (num < total);
+
+DONE:
+ rte_free(req_list);
+ return ret;
+}
+
+/* Find out specific MAC filter */
+static struct i40e_mac_filter *
+i40e_find_mac_filter(struct i40e_vsi *vsi,
+ struct ether_addr *macaddr)
+{
+ struct i40e_mac_filter *f;
+
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
+ return f;
+ }
+
+ return NULL;
+}
+
+static bool
+i40e_find_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id)
+{
+ uint32_t vid_idx, vid_bit;
+
+ if (vlan_id > ETH_VLAN_ID_MAX)
+ return 0;
+
+ vid_idx = I40E_VFTA_IDX(vlan_id);
+ vid_bit = I40E_VFTA_BIT(vlan_id);
+
+ if (vsi->vfta[vid_idx] & vid_bit)
+ return 1;
+ else
+ return 0;
+}
+
+static void
+i40e_store_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id, bool on)
+{
+ uint32_t vid_idx, vid_bit;
+
+ vid_idx = I40E_VFTA_IDX(vlan_id);
+ vid_bit = I40E_VFTA_BIT(vlan_id);
+
+ if (on)
+ vsi->vfta[vid_idx] |= vid_bit;
+ else
+ vsi->vfta[vid_idx] &= ~vid_bit;
+}
+
+void
+i40e_set_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id, bool on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
+ int ret;
+
+ if (vlan_id > ETH_VLAN_ID_MAX)
+ return;
+
+ i40e_store_vlan_filter(vsi, vlan_id, on);
+
+ if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
+ return;
+
+ vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
+
+ if (on) {
+ ret = i40e_aq_add_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to add vlan filter");
+ } else {
+ ret = i40e_aq_remove_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR,
+ "Failed to remove vlan filter");
+ }
+}
+
+/**
+ * Find all vlan options for specific mac addr,
+ * return with actual vlan found.
+ */
+int
+i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num, struct ether_addr *addr)
+{
+ int i;
+ uint32_t j, k;
+
+ /**
+ * Not to use i40e_find_vlan_filter to decrease the loop time,
+ * although the code looks complex.
+ */
+ if (num < vsi->vlan_num)
+ return I40E_ERR_PARAM;
+
+ i = 0;
+ for (j = 0; j < I40E_VFTA_SIZE; j++) {
+ if (vsi->vfta[j]) {
+ for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+ if (vsi->vfta[j] & (1 << k)) {
+ if (i > num - 1) {
+ PMD_DRV_LOG(ERR,
+ "vlan number doesn't match");
+ return I40E_ERR_PARAM;
+ }
+ rte_memcpy(&mv_f[i].macaddr,
+ addr, ETH_ADDR_LEN);
+ mv_f[i].vlan_id =
+ j * I40E_UINT32_BIT_SIZE + k;
+ i++;
+ }
+ }
+ }
+ }
+ return I40E_SUCCESS;
+}
+
+static inline int
+i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num,
+ uint16_t vlan)
+{
+ int i = 0;
+ struct i40e_mac_filter *f;
+
+ if (num < vsi->mac_num)
+ return I40E_ERR_PARAM;
+
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ if (i > num - 1) {
+ PMD_DRV_LOG(ERR, "buffer number not match");
+ return I40E_ERR_PARAM;
+ }
+ rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ mv_f[i].vlan_id = vlan;
+ mv_f[i].filter_type = f->mac_info.filter_type;
+ i++;
+ }
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
+{
+ int i, j, num;
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int ret = I40E_SUCCESS;
+
+ if (vsi == NULL || vsi->mac_num == 0)
+ return I40E_ERR_PARAM;
+
+ /* Case that no vlan is set */
+ if (vsi->vlan_num == 0)
+ num = vsi->mac_num;
+ else
+ num = vsi->mac_num * vsi->vlan_num;
+
+ mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ i = 0;
+ if (vsi->vlan_num == 0) {
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr, ETH_ADDR_LEN);
+ mv_f[i].filter_type = f->mac_info.filter_type;
+ mv_f[i].vlan_id = 0;
+ i++;
+ }
+ } else {
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
+ vsi->vlan_num, &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+ for (j = i; j < i + vsi->vlan_num; j++)
+ mv_f[j].filter_type = f->mac_info.filter_type;
+ i += vsi->vlan_num;
+ }
+ }
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
+DONE:
+ rte_free(mv_f);
+
+ return ret;
+}
+
+int
+i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
+{
+ struct i40e_macvlan_filter *mv_f;
+ int mac_num;
+ int ret = I40E_SUCCESS;
+
+ if (!vsi || vlan > ETHER_MAX_VLAN_ID)
+ return I40E_ERR_PARAM;
+
+ /* If it's already set, just return */
+ if (i40e_find_vlan_filter(vsi,vlan))
+ return I40E_SUCCESS;
+
+ mac_num = vsi->mac_num;
+
+ if (mac_num == 0) {
+ PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
+ return I40E_ERR_PARAM;
+ }
+
+ mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
+
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
+
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
+
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ i40e_set_vlan_filter(vsi, vlan, 1);
+
+ vsi->vlan_num++;
+ ret = I40E_SUCCESS;
+DONE:
+ rte_free(mv_f);
+ return ret;
+}
+
+int
+i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
+{
+ struct i40e_macvlan_filter *mv_f;
+ int mac_num;
+ int ret = I40E_SUCCESS;
+
+ /**
+ * Vlan 0 is the generic filter for untagged packets
+ * and can't be removed.
+ */
+ if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
+ return I40E_ERR_PARAM;
+
+ /* If can't find it, just return */
+ if (!i40e_find_vlan_filter(vsi, vlan))
+ return I40E_ERR_PARAM;
+
+ mac_num = vsi->mac_num;
+
+ if (mac_num == 0) {
+ PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
+ return I40E_ERR_PARAM;
+ }
+
+ mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
+
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
+
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
+
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ /* This is last vlan to remove, replace all mac filter with vlan 0 */
+ if (vsi->vlan_num == 1) {
+ ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+ }
+
+ i40e_set_vlan_filter(vsi, vlan, 0);
+
+ vsi->vlan_num--;
+ ret = I40E_SUCCESS;
+DONE:
+ rte_free(mv_f);
+ return ret;
+}
+
+int
+i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num = 0;
+ int ret = I40E_SUCCESS;
+
+ /* If it's add and we've config it, return */
+ f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
+ if (f != NULL)
+ return I40E_SUCCESS;
+ if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
+ (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
+
+ /**
+ * If vlan_num is 0, that's the first time to add mac,
+ * set mask for vlan_id 0.
+ */
+ if (vsi->vlan_num == 0) {
+ i40e_set_vlan_filter(vsi, 0, 1);
+ vsi->vlan_num = 1;
+ }
+ vlan_num = vsi->vlan_num;
+ } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
+ (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = mac_filter->filter_type;
+ rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
+ ETH_ADDR_LEN);
+ }
+
+ if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &mac_filter->mac_addr);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+ }
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ /* Add the mac addr into mac list */
+ f = rte_zmalloc("macv_filter", sizeof(*f), 0);
+ if (f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ ret = I40E_ERR_NO_MEMORY;
+ goto DONE;
+ }
+ rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
+ ETH_ADDR_LEN);
+ f->mac_info.filter_type = mac_filter->filter_type;
+ TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
+ vsi->mac_num++;
+
+ ret = I40E_SUCCESS;
+DONE:
+ rte_free(mv_f);
+
+ return ret;
+}
+
+int
+i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num;
+ enum rte_mac_filter_type filter_type;
+ int ret = I40E_SUCCESS;
+
+ /* Can't find it, return an error */
+ f = i40e_find_mac_filter(vsi, addr);
+ if (f == NULL)
+ return I40E_ERR_PARAM;
+
+ vlan_num = vsi->vlan_num;
+ filter_type = f->mac_info.filter_type;
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ if (vlan_num == 0) {
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
+ return I40E_ERR_PARAM;
+ }
+ } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
+ filter_type == RTE_MAC_HASH_MATCH)
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = filter_type;
+ rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+ }
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ /* Remove the mac addr into mac list */
+ TAILQ_REMOVE(&vsi->mac_list, f, next);
+ rte_free(f);
+ vsi->mac_num--;
+
+ ret = I40E_SUCCESS;
+DONE:
+ rte_free(mv_f);
+ return ret;
+}
+
+/* Configure hash enable flags for RSS */
+uint64_t
+i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
+{
+ uint64_t hena = 0;
+ int i;
+
+ if (!flags)
+ return hena;
+
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
+ if (flags & (1ULL << i))
+ hena |= adapter->pctypes_tbl[i];
+ }
+
+ return hena;
+}
+
+/* Parse the hash enable flags */
+uint64_t
+i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
+{
+ uint64_t rss_hf = 0;
+
+ if (!flags)
+ return rss_hf;
+ int i;
+
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
+ if (flags & adapter->pctypes_tbl[i])
+ rss_hf |= (1ULL << i);
+ }
+ return rss_hf;
+}
+
+/* Disable RSS */
+static void
+i40e_pf_disable_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
+ I40E_WRITE_FLUSH(hw);
+}
+
+int
+i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
+{
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
+ I40E_VFQF_HKEY_MAX_INDEX :
+ I40E_PFQF_HKEY_MAX_INDEX;
+ int ret = 0;
+
+ if (!key || key_len == 0) {
+ PMD_DRV_LOG(DEBUG, "No key to be configured");
+ return 0;
+ } else if (key_len != (key_idx + 1) *
+ sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ struct i40e_aqc_get_set_rss_key_data *key_dw =
+ (struct i40e_aqc_get_set_rss_key_data *)key;
+
+ ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
+ } else {
+ uint32_t *hash_key = (uint32_t *)key;
+ uint16_t i;
+
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ I40E_WRITE_REG(
+ hw,
+ I40E_VFQF_HKEY1(i, vsi->user_param),
+ hash_key[i]);
+
+ } else {
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
+ hash_key[i]);
+ }
+ I40E_WRITE_FLUSH(hw);
+ }
+
+ return ret;
+}
+
+static int
+i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
+{
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint32_t reg;
+ int ret;
+
+ if (!key || !key_len)
+ return -EINVAL;
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
+ (struct i40e_aqc_get_set_rss_key_data *)key);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
+ return ret;
+ }
+ } else {
+ uint32_t *key_dw = (uint32_t *)key;
+ uint16_t i;
+
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
+ reg = I40E_VFQF_HKEY1(i, vsi->user_param);
+ key_dw[i] = i40e_read_rx_ctl(hw, reg);
+ }
+ *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ } else {
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+ reg = I40E_PFQF_HKEY(i);
+ key_dw[i] = i40e_read_rx_ctl(hw, reg);
+ }
+ *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
+ }
+ return 0;
+}
+
+static int
+i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint64_t hena;
+ int ret;
+
+ ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ if (ret)
+ return ret;
+
+ hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
+ uint64_t hena;
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+
+ if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
+ if (rss_hf != 0) /* Enable RSS */
+ return -EINVAL;
+ return 0; /* Nothing to do */
+ }
+ /* RSS enabled */
+ if (rss_hf == 0) /* Disable RSS */
+ return -EINVAL;
+
+ return i40e_hw_rss_hash_set(pf, rss_conf);
+}
+
+static int
+i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t hena;
+
+ i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
+ &rss_conf->rss_key_len);
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+ rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
+
+ return 0;
+}
+
+static int
+i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
+{
+ switch (filter_type) {
+ case RTE_TUNNEL_FILTER_IMAC_IVLAN:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+ break;
+ case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+ break;
+ case RTE_TUNNEL_FILTER_IMAC_TENID:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
+ break;
+ case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
+ break;
+ case ETH_TUNNEL_FILTER_IMAC:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
+ break;
+ case ETH_TUNNEL_FILTER_OIP:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
+ break;
+ case ETH_TUNNEL_FILTER_IIP:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid tunnel filter type");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Convert tunnel filter structure */
+static int
+i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
+ (struct ether_addr *)&tunnel_filter->input.outer_mac);
+ ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
+ (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
+ if ((rte_le_to_cpu_16(cld_filter->element.flags) &
+ I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
+ I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
+ tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ else
+ tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ tunnel_filter->input.flags = cld_filter->element.flags;
+ tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
+ tunnel_filter->queue = cld_filter->element.queue_number;
+ rte_memcpy(tunnel_filter->input.general_fields,
+ cld_filter->general_fields,
+ sizeof(cld_filter->general_fields));
+
+ return 0;
+}
+
+/* Check if there exists the tunnel filter */
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return tunnel_rule->hash_map[ret];
+}
+
+/* Add a tunnel filter into the SW list */
+static int
+i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = tunnel_filter;
+
+ TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
+
+ return 0;
+}
+
+/* Delete a tunnel filter from the SW list */
+int
+i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel_filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ tunnel_filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
+ rte_free(tunnel_filter);
+
+ return 0;
+}
+
+int
+i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add)
+{
+ uint16_t ip_type;
+ uint32_t ipv4_addr, ipv4_addr_le;
+ uint8_t i, tun_type = 0;
+ /* internal varialbe to convert ipv6 byte order */
+ uint32_t convert_ipv6[4];
+ int val, ret = 0;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
+
+ cld_filter = rte_zmalloc("tunnel_filter",
+ sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
+ 0);
+
+ if (NULL == cld_filter) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -ENOMEM;
+ }
+ pfilter = cld_filter;
+
+ ether_addr_copy(&tunnel_filter->outer_mac,
+ (struct ether_addr *)&pfilter->element.outer_mac);
+ ether_addr_copy(&tunnel_filter->inner_mac,
+ (struct ether_addr *)&pfilter->element.inner_mac);
+
+ pfilter->element.inner_vlan =
+ rte_cpu_to_le_16(tunnel_filter->inner_vlan);
+ if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
+ ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+ ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
+ rte_memcpy(&pfilter->element.ipaddr.v4.data,
+ &ipv4_addr_le,
+ sizeof(pfilter->element.ipaddr.v4.data));
+ } else {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
+ for (i = 0; i < 4; i++) {
+ convert_ipv6[i] =
+ rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
+ }
+ rte_memcpy(&pfilter->element.ipaddr.v6.data,
+ &convert_ipv6,
+ sizeof(pfilter->element.ipaddr.v6.data));
+ }
+
+ /* check tunneled type */
+ switch (tunnel_filter->tunnel_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
+ break;
+ case RTE_TUNNEL_TYPE_NVGRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
+ break;
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
+ break;
+ default:
+ /* Other tunnel types is not supported. */
+ PMD_DRV_LOG(ERR, "tunnel type is not supported.");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
+ &pfilter->element.flags);
+ if (val < 0) {
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ pfilter->element.flags |= rte_cpu_to_le_16(
+ I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
+ ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
+ pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->element.queue_number =
+ rte_cpu_to_le_16(tunnel_filter->queue_id);
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ if (add) {
+ ret = i40e_aq_add_cloud_filters(hw,
+ vsi->seid, &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ rte_free(cld_filter);
+ return -ENOTSUP;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ if (tunnel == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ rte_free(cld_filter);
+ return -ENOMEM;
+ }
+
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ if (ret < 0)
+ rte_free(tunnel);
+ } else {
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ rte_free(cld_filter);
+ return -ENOTSUP;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
+
+ rte_free(cld_filter);
+ return ret;
+}
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
+#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4
+#define I40E_TR_GENEVE_KEY_MASK 0x8
+#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40
+#define I40E_TR_GRE_KEY_MASK 0x400
+#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800
+#define I40E_TR_GRE_NO_KEY_MASK 0x8000
+
+static enum
+i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
+ filter_replace.tr_bit = 0;
+
+ /* Prepare the buffer, 3 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0xFF;
+ filter_replace_buf.data[3] = 0xFF;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[7] = 0xF0;
+ filter_replace_buf.data[8]
+ = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
+ filter_replace_buf.data[8] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
+ I40E_TR_GENEVE_KEY_MASK |
+ I40E_TR_GENERIC_UDP_TUNNEL_MASK;
+ filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
+ I40E_TR_GRE_KEY_WITH_XSUM_MASK |
+ I40E_TR_GRE_NO_KEY_MASK) >> 8;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ return status;
+}
+
+static enum
+i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ /* For MPLSoUDP */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
+ I40E_AQC_MIRROR_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (status < 0)
+ return status;
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ /* For MPLSoGRE */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
+ I40E_AQC_MIRROR_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ return status;
+}
+
+static enum i40e_status_code
+i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ /* For GTP-C */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
+ filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0xFF;
+ filter_replace_buf.data[3] = 0xFF;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[6] = 0xFF;
+ filter_replace_buf.data[7] = 0xFF;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (status < 0)
+ return status;
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ /* for GTP-U */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
+ filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0xFF;
+ filter_replace_buf.data[3] = 0xFF;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[6] = 0xFF;
+ filter_replace_buf.data[7] = 0xFF;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ return status;
+}
+
+static enum
+i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ /* for GTP-C */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (status < 0)
+ return status;
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ /* for GTP-U */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.old_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ return status;
+}
+
+int
+i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_conf *tunnel_filter,
+ uint8_t add)
+{
+ uint16_t ip_type;
+ uint32_t ipv4_addr, ipv4_addr_le;
+ uint8_t i, tun_type = 0;
+ /* internal variable to convert ipv6 byte order */
+ uint32_t convert_ipv6[4];
+ int val, ret = 0;
+ struct i40e_pf_vf *vf = NULL;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
+ uint32_t teid_le;
+ bool big_buffer = 0;
+
+ cld_filter = rte_zmalloc("tunnel_filter",
+ sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
+ 0);
+
+ if (cld_filter == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -ENOMEM;
+ }
+ pfilter = cld_filter;
+
+ ether_addr_copy(&tunnel_filter->outer_mac,
+ (struct ether_addr *)&pfilter->element.outer_mac);
+ ether_addr_copy(&tunnel_filter->inner_mac,
+ (struct ether_addr *)&pfilter->element.inner_mac);
+
+ pfilter->element.inner_vlan =
+ rte_cpu_to_le_16(tunnel_filter->inner_vlan);
+ if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
+ ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+ ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
+ rte_memcpy(&pfilter->element.ipaddr.v4.data,
+ &ipv4_addr_le,
+ sizeof(pfilter->element.ipaddr.v4.data));
+ } else {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
+ for (i = 0; i < 4; i++) {
+ convert_ipv6[i] =
+ rte_cpu_to_le_32(rte_be_to_cpu_32(
+ tunnel_filter->ip_addr.ipv6_addr[i]));
+ }
+ rte_memcpy(&pfilter->element.ipaddr.v6.data,
+ &convert_ipv6,
+ sizeof(pfilter->element.ipaddr.v6.data));
+ }
+
+ /* check tunneled type */
+ switch (tunnel_filter->tunnel_type) {
+ case I40E_TUNNEL_TYPE_VXLAN:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
+ break;
+ case I40E_TUNNEL_TYPE_NVGRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
+ break;
+ case I40E_TUNNEL_TYPE_IP_IN_GRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
+ break;
+ case I40E_TUNNEL_TYPE_MPLSoUDP:
+ if (!pf->mpls_replace_flag) {
+ i40e_replace_mpls_l1_filter(pf);
+ i40e_replace_mpls_cloud_filter(pf);
+ pf->mpls_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
+ teid_le >> 4;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ (teid_le & 0xF) << 12;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
+ 0x40;
+ big_buffer = 1;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
+ break;
+ case I40E_TUNNEL_TYPE_MPLSoGRE:
+ if (!pf->mpls_replace_flag) {
+ i40e_replace_mpls_l1_filter(pf);
+ i40e_replace_mpls_cloud_filter(pf);
+ pf->mpls_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
+ teid_le >> 4;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ (teid_le & 0xF) << 12;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
+ 0x0;
+ big_buffer = 1;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
+ break;
+ case I40E_TUNNEL_TYPE_GTPC:
+ if (!pf->gtp_replace_flag) {
+ i40e_replace_gtp_l1_filter(pf);
+ i40e_replace_gtp_cloud_filter(pf);
+ pf->gtp_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
+ (teid_le >> 16) & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
+ teid_le & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
+ 0x0;
+ big_buffer = 1;
+ break;
+ case I40E_TUNNEL_TYPE_GTPU:
+ if (!pf->gtp_replace_flag) {
+ i40e_replace_gtp_l1_filter(pf);
+ i40e_replace_gtp_cloud_filter(pf);
+ pf->gtp_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
+ (teid_le >> 16) & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
+ teid_le & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
+ 0x0;
+ big_buffer = 1;
+ break;
+ case I40E_TUNNEL_TYPE_QINQ:
+ if (!pf->qinq_replace_flag) {
+ ret = i40e_cloud_filter_qinq_create(pf);
+ if (ret < 0)
+ PMD_DRV_LOG(DEBUG,
+ "QinQ tunnel filter already created.");
+ pf->qinq_replace_flag = 1;
+ }
+ /* Add in the General fields the values of
+ * the Outer and Inner VLAN
+ * Big Buffer should be set, see changes in
+ * i40e_aq_add_cloud_filters
+ */
+ pfilter->general_fields[0] = tunnel_filter->inner_vlan;
+ pfilter->general_fields[1] = tunnel_filter->outer_vlan;
+ big_buffer = 1;
+ break;
+ default:
+ /* Other tunnel types is not supported. */
+ PMD_DRV_LOG(ERR, "tunnel type is not supported.");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
+ pfilter->element.flags |=
+ I40E_AQC_ADD_CLOUD_FILTER_0X10;
+ else {
+ val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
+ &pfilter->element.flags);
+ if (val < 0) {
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+ }
+
+ pfilter->element.flags |= rte_cpu_to_le_16(
+ I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
+ ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
+ pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->element.queue_number =
+ rte_cpu_to_le_16(tunnel_filter->queue_id);
+
+ if (!tunnel_filter->is_to_vf)
+ vsi = pf->main_vsi;
+ else {
+ if (tunnel_filter->vf_id >= pf->vf_num) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+ vf = &pf->vfs[tunnel_filter->vf_id];
+ vsi = vf->vsi;
+ }
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ check_filter.is_to_vf = tunnel_filter->is_to_vf;
+ check_filter.vf_id = tunnel_filter->vf_id;
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ if (add) {
+ if (big_buffer)
+ ret = i40e_aq_add_cloud_filters_big_buffer(hw,
+ vsi->seid, cld_filter, 1);
+ else
+ ret = i40e_aq_add_cloud_filters(hw,
+ vsi->seid, &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ rte_free(cld_filter);
+ return -ENOTSUP;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ if (tunnel == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ rte_free(cld_filter);
+ return -ENOMEM;
+ }
+
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ if (ret < 0)
+ rte_free(tunnel);
+ } else {
+ if (big_buffer)
+ ret = i40e_aq_remove_cloud_filters_big_buffer(
+ hw, vsi->seid, cld_filter, 1);
+ else
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ rte_free(cld_filter);
+ return -ENOTSUP;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
+
+ rte_free(cld_filter);
+ return ret;
+}
+
+static int
+i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
+{
+ uint8_t i;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->vxlan_ports[i] == port)
+ return i;
+ }
+
+ return -1;
+}
+
+static int
+i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
+{
+ int idx, ret;
+ uint8_t filter_idx;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx >= 0) {
+ PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
+ return -EINVAL;
+ }
+
+ /* Now check if there is space to add the new port */
+ idx = i40e_get_vxlan_port_idx(pf, 0);
+ if (idx < 0) {
+ PMD_DRV_LOG(ERR,
+ "Maximum number of UDP ports reached, not adding port %d",
+ port);
+ return -ENOSPC;
+ }
+
+ ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
+ &filter_idx, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
+ return -1;
+ }
+
+ PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
+ port, filter_idx);
+
+ /* New port: add it and mark its index in the bitmap */
+ pf->vxlan_ports[idx] = port;
+ pf->vxlan_bitmap |= (1 << idx);
+
+ if (!(pf->flags & I40E_FLAG_VXLAN))
+ pf->flags |= I40E_FLAG_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
+{
+ int idx;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ if (!(pf->flags & I40E_FLAG_VXLAN)) {
+ PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
+ return -EINVAL;
+ }
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ if (idx < 0) {
+ PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
+ return -EINVAL;
+ }
+
+ if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
+ return -1;
+ }
+
+ PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
+ port, idx);
+
+ pf->vxlan_ports[idx] = 0;
+ pf->vxlan_bitmap &= ~(1 << idx);
+
+ if (!pf->vxlan_bitmap)
+ pf->flags &= ~I40E_FLAG_VXLAN;
+
+ return 0;
+}
+
+/* Add UDP tunneling port */
+static int
+i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ int ret = 0;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
+ break;
+
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+ ret = -1;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ int ret = 0;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+ ret = -1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Calculate the maximum number of contiguous PF queues that are configured */
+static int
+i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
+{
+ struct rte_eth_dev_data *data = pf->dev_data;
+ int i, num;
+ struct i40e_rx_queue *rxq;
+
+ num = 0;
+ for (i = 0; i < pf->lan_nb_qps; i++) {
+ rxq = data->rx_queues[i];
+ if (rxq && rxq->q_set)
+ num++;
+ else
+ break;
+ }
+
+ return num;
+}
+
+/* Configure RSS */
+static int
+i40e_pf_config_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_rss_conf rss_conf;
+ uint32_t i, lut = 0;
+ uint16_t j, num;
+
+ /*
+ * If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ num = i40e_pf_calc_configured_queues_num(pf);
+ else
+ num = pf->dev_data->nb_rx_queues;
+
+ num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
+ PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
+ return -ENOTSUP;
+ }
+
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (j & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
+ i40e_pf_disable_rss(pf);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+ (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Random default keys */
+ static uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+ rss_conf.rss_key = (uint8_t *)rss_key_default;
+ rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
+
+ return i40e_hw_rss_hash_set(pf, &rss_conf);
+}
+
+static int
+i40e_tunnel_filter_param_check(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ if (pf == NULL || filter == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid parameter");
+ return -EINVAL;
+ }
+
+ if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue ID");
+ return -EINVAL;
+ }
+
+ if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
+ PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
+ return -EINVAL;
+ }
+
+ if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
+ (is_zero_ether_addr(&filter->outer_mac))) {
+ PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
+ return -EINVAL;
+ }
+
+ if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
+ (is_zero_ether_addr(&filter->inner_mac))) {
+ PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
+#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4))
+static int
+i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
+{
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
+ uint32_t val, reg;
+ int ret = -EINVAL;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
+ return -ENOTSUP;
+ }
+
+ val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
+ PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
+
+ if (len == 3) {
+ reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
+ } else if (len == 4) {
+ reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
+ } else {
+ PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
+ return ret;
+ }
+
+ if (reg != val) {
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GL_PRS_FVBM(2),
+ reg, NULL);
+ if (ret != 0)
+ return ret;
+ PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
+ "with value 0x%08x",
+ I40E_GL_PRS_FVBM(2), reg);
+ } else {
+ ret = 0;
+ }
+ PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
+ I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
+
+ return ret;
+}
+
+static int
+i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
+{
+ int ret = -EINVAL;
+
+ if (!hw || !cfg)
+ return -EINVAL;
+
+ switch (cfg->cfg_type) {
+ case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
+ ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = I40E_ERR_PARAM;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_SET:
+ ret = i40e_dev_global_config_set(hw,
+ (struct rte_eth_global_cfg *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct rte_eth_tunnel_filter_conf *filter;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret = I40E_SUCCESS;
+
+ filter = (struct rte_eth_tunnel_filter_conf *)(arg);
+
+ if (i40e_tunnel_filter_param_check(pf, filter) < 0)
+ return I40E_ERR_PARAM;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ if (!(pf->flags & I40E_FLAG_VXLAN))
+ ret = I40E_NOT_SUPPORTED;
+ break;
+ case RTE_ETH_FILTER_ADD:
+ ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ ret = I40E_ERR_PARAM;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_pf_config_mq_rx(struct i40e_pf *pf)
+{
+ int ret = 0;
+ enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
+
+ /* RSS setup */
+ if (mq_mode & ETH_MQ_RX_RSS_FLAG)
+ ret = i40e_pf_config_rss(pf);
+ else
+ i40e_pf_disable_rss(pf);
+
+ return ret;
+}
+
+/* Get the symmetric hash enable configurations per port */
+static void
+i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
+{
+ uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
+
+ *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
+}
+
+/* Set the symmetric hash enable configurations per port */
+static void
+i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
+{
+ uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
+
+ if (enable > 0) {
+ if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
+ PMD_DRV_LOG(INFO,
+ "Symmetric hash has already been enabled");
+ return;
+ }
+ reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
+ } else {
+ if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
+ PMD_DRV_LOG(INFO,
+ "Symmetric hash has already been disabled");
+ return;
+ }
+ reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
+ }
+ i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
+ I40E_WRITE_FLUSH(hw);
+}
+
+/*
+ * Get global configurations of hash function type and symmetric hash enable
+ * per flow type (pctype). Note that global configuration means it affects all
+ * the ports on the same NIC.
+ */
+static int
+i40e_get_hash_filter_global_config(struct i40e_hw *hw,
+ struct rte_eth_hash_global_conf *g_cfg)
+{
+ struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
+ uint32_t reg;
+ uint16_t i, j;
+
+ memset(g_cfg, 0, sizeof(*g_cfg));
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (reg & I40E_GLQF_CTL_HTOEP_MASK)
+ g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+ else
+ g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+ PMD_DRV_LOG(DEBUG, "Hash function is %s",
+ (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
+
+ /*
+ * As i40e supports less than 64 flow types, only first 64 bits need to
+ * be checked.
+ */
+ for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
+ g_cfg->valid_bit_mask[i] = 0ULL;
+ g_cfg->sym_hash_enable_mask[i] = 0ULL;
+ }
+
+ g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
+
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
+ if (!adapter->pctypes_tbl[i])
+ continue;
+ for (j = I40E_FILTER_PCTYPE_INVALID + 1;
+ j < I40E_FILTER_PCTYPE_MAX; j++) {
+ if (adapter->pctypes_tbl[i] & (1ULL << j)) {
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
+ if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
+ g_cfg->sym_hash_enable_mask[0] |=
+ (1ULL << i);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_hash_global_config_check(const struct i40e_adapter *adapter,
+ const struct rte_eth_hash_global_conf *g_cfg)
+{
+ uint32_t i;
+ uint64_t mask0, i40e_mask = adapter->flow_types_mask;
+
+ if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
+ g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
+ g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
+ PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
+ g_cfg->hash_func);
+ return -EINVAL;
+ }
+
+ /*
+ * As i40e supports less than 64 flow types, only first 64 bits need to
+ * be checked.
+ */
+ mask0 = g_cfg->valid_bit_mask[0];
+ for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
+ if (i == 0) {
+ /* Check if any unsupported flow type configured */
+ if ((mask0 | i40e_mask) ^ i40e_mask)
+ goto mask_err;
+ } else {
+ if (g_cfg->valid_bit_mask[i])
+ goto mask_err;
+ }
+ }
+
+ return 0;
+
+mask_err:
+ PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
+
+ return -EINVAL;
+}
+
+/*
+ * Set global configurations of hash function type and symmetric hash enable
+ * per flow type (pctype). Note any modifying global configuration will affect
+ * all the ports on the same NIC.
+ */
+static int
+i40e_set_hash_filter_global_config(struct i40e_hw *hw,
+ struct rte_eth_hash_global_conf *g_cfg)
+{
+ struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
+ int ret;
+ uint16_t i, j;
+ uint32_t reg;
+ uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
+ return -ENOTSUP;
+ }
+
+ /* Check the input parameters */
+ ret = i40e_hash_global_config_check(adapter, g_cfg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * As i40e supports less than 64 flow types, only first 64 bits need to
+ * be configured.
+ */
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
+ if (mask0 & (1UL << i)) {
+ reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
+ I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
+
+ for (j = I40E_FILTER_PCTYPE_INVALID + 1;
+ j < I40E_FILTER_PCTYPE_MAX; j++) {
+ if (adapter->pctypes_tbl[i] & (1ULL << j))
+ i40e_write_global_rx_ctl(hw,
+ I40E_GLQF_HSYM(j),
+ reg);
+ }
+ }
+ }
+
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
+ /* Toeplitz */
+ if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
+ PMD_DRV_LOG(DEBUG,
+ "Hash function already set to Toeplitz");
+ goto out;
+ }
+ reg |= I40E_GLQF_CTL_HTOEP_MASK;
+ } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+ /* Simple XOR */
+ if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
+ PMD_DRV_LOG(DEBUG,
+ "Hash function already set to Simple XOR");
+ goto out;
+ }
+ reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
+ } else
+ /* Use the default, and keep it as it is */
+ goto out;
+
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
+
+out:
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * Valid input sets for hash and flow director filters per PCTYPE
+ */
+static uint64_t
+i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
+ enum rte_filter_type filter)
+{
+ uint64_t valid;
+
+ static const uint64_t valid_hash_inset_table[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
+ I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
+ I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
+ I40E_INSET_FLEX_PAYLOAD,
+ };
+
+ /**
+ * Flow director supports only fields defined in
+ * union rte_eth_fdir_flow.
+ */
+ static const uint64_t valid_fdir_inset_table[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
+ I40E_INSET_IPV4_TTL,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
+ I40E_INSET_IPV4_TTL,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT,
+ [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_LAST_ETHER_TYPE,
+ };
+
+ if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
+ return 0;
+ if (filter == RTE_ETH_FILTER_HASH)
+ valid = valid_hash_inset_table[pctype];
+ else
+ valid = valid_fdir_inset_table[pctype];
+
+ return valid;
+}
+
+/**
+ * Validate if the input set is allowed for a specific PCTYPE
+ */
+int
+i40e_validate_input_set(enum i40e_filter_pctype pctype,
+ enum rte_filter_type filter, uint64_t inset)
+{
+ uint64_t valid;
+
+ valid = i40e_get_valid_input_set(pctype, filter);
+ if (inset & (~valid))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* default input set fields combination per pctype */
+uint64_t
+i40e_get_default_input_set(uint16_t pctype)
+{
+ static const uint64_t default_inset_table[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
+ [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
+ I40E_INSET_LAST_ETHER_TYPE,
+ };
+
+ if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
+ return 0;
+
+ return default_inset_table[pctype];
+}
+
+/**
+ * Parse the input set from index to logical bit masks
+ */
+static int
+i40e_parse_input_set(uint64_t *inset,
+ enum i40e_filter_pctype pctype,
+ enum rte_eth_input_set_field *field,
+ uint16_t size)
+{
+ uint16_t i, j;
+ int ret = -EINVAL;
+
+ static const struct {
+ enum rte_eth_input_set_field field;
+ uint64_t inset;
+ } inset_convert_table[] = {
+ {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
+ {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
+ {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
+ {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
+ {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
+ {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
+ {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
+ {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
+ {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
+ {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
+ {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
+ {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
+ {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
+ {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
+ {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
+ I40E_INSET_IPV6_NEXT_HDR},
+ {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
+ I40E_INSET_IPV6_HOP_LIMIT},
+ {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
+ {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
+ {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
+ {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
+ {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
+ {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
+ {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
+ I40E_INSET_SCTP_VT},
+ {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
+ I40E_INSET_TUNNEL_DMAC},
+ {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
+ I40E_INSET_VLAN_TUNNEL},
+ {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
+ I40E_INSET_TUNNEL_ID},
+ {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W1},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W2},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W3},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W4},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W5},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W6},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W7},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W8},
+ };
+
+ if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
+ return ret;
+
+ /* Only one item allowed for default or all */
+ if (size == 1) {
+ if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
+ *inset = i40e_get_default_input_set(pctype);
+ return 0;
+ } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
+ *inset = I40E_INSET_NONE;
+ return 0;
+ }
+ }
+
+ for (i = 0, *inset = 0; i < size; i++) {
+ for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
+ if (field[i] == inset_convert_table[j].field) {
+ *inset |= inset_convert_table[j].inset;
+ break;
+ }
+ }
+
+ /* It contains unsupported input set, return immediately */
+ if (j == RTE_DIM(inset_convert_table))
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Translate the input set from bit masks to register aware bit masks
+ * and vice versa
+ */
+uint64_t
+i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
+{
+ uint64_t val = 0;
+ uint16_t i;
+
+ struct inset_map {
+ uint64_t inset;
+ uint64_t inset_reg;
+ };
+
+ static const struct inset_map inset_map_common[] = {
+ {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
+ {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
+ {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
+ {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
+ {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
+ {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
+ {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
+ {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
+ {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
+ {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
+ {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
+ {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
+ {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
+ {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
+ {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
+ {I40E_INSET_TUNNEL_DMAC,
+ I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
+ {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
+ {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
+ {I40E_INSET_TUNNEL_SRC_PORT,
+ I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
+ {I40E_INSET_TUNNEL_DST_PORT,
+ I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
+ {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
+ {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
+ {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
+ {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
+ {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
+ {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
+ {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
+ {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
+ {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
+ };
+
+ /* some different registers map in x722*/
+ static const struct inset_map inset_map_diff_x722[] = {
+ {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
+ {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
+ {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
+ {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
+ };
+
+ static const struct inset_map inset_map_diff_not_x722[] = {
+ {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
+ {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
+ {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
+ {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
+ };
+
+ if (input == 0)
+ return val;
+
+ /* Translate input set to register aware inset */
+ if (type == I40E_MAC_X722) {
+ for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
+ if (input & inset_map_diff_x722[i].inset)
+ val |= inset_map_diff_x722[i].inset_reg;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
+ if (input & inset_map_diff_not_x722[i].inset)
+ val |= inset_map_diff_not_x722[i].inset_reg;
+ }
+ }
+
+ for (i = 0; i < RTE_DIM(inset_map_common); i++) {
+ if (input & inset_map_common[i].inset)
+ val |= inset_map_common[i].inset_reg;
+ }
+
+ return val;
+}
+
+int
+i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
+{
+ uint8_t i, idx = 0;
+ uint64_t inset_need_mask = inset;
+
+ static const struct {
+ uint64_t inset;
+ uint32_t mask;
+ } inset_mask_map[] = {
+ {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
+ {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
+ {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
+ {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
+ {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
+ {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
+ {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
+ {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
+ };
+
+ if (!inset || !mask || !nb_elem)
+ return 0;
+
+ for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
+ /* Clear the inset bit, if no MASK is required,
+ * for example proto + ttl
+ */
+ if ((inset & inset_mask_map[i].inset) ==
+ inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
+ inset_need_mask &= ~inset_mask_map[i].inset;
+ if (!inset_need_mask)
+ return 0;
+ }
+ for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
+ if ((inset_need_mask & inset_mask_map[i].inset) ==
+ inset_mask_map[i].inset) {
+ if (idx >= nb_elem) {
+ PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
+ return -EINVAL;
+ }
+ mask[idx] = inset_mask_map[i].mask;
+ idx++;
+ }
+ }
+
+ return idx;
+}
+
+void
+i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
+{
+ uint32_t reg = i40e_read_rx_ctl(hw, addr);
+
+ PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
+ if (reg != val)
+ i40e_write_rx_ctl(hw, addr, val);
+ PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
+ (uint32_t)i40e_read_rx_ctl(hw, addr));
+}
+
+void
+i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
+{
+ uint32_t reg = i40e_read_rx_ctl(hw, addr);
+ struct rte_eth_dev *dev;
+
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ if (reg != val) {
+ i40e_write_rx_ctl(hw, addr, val);
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%08x, new: 0x%08x",
+ dev->device->name, addr, reg,
+ (uint32_t)i40e_read_rx_ctl(hw, addr));
+ }
+}
+
+static void
+i40e_filter_input_set_init(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set, inset_reg;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
+ int num, i;
+ uint16_t flow_type;
+
+ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
+ flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
+
+ if (flow_type == RTE_ETH_FLOW_UNKNOWN)
+ continue;
+
+ input_set = i40e_get_default_input_set(pctype);
+
+ num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ I40E_INSET_MASK_NUM_REG);
+ if (num < 0)
+ return;
+ if (pf->support_multi_driver && num > 0) {
+ PMD_DRV_LOG(ERR, "Input set setting is not supported.");
+ return;
+ }
+ inset_reg = i40e_translate_input_set_reg(hw->mac.type,
+ input_set);
+
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+ if (!pf->support_multi_driver) {
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ for (i = 0; i < num; i++) {
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
+ }
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ 0);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_MSK(i, pctype),
+ 0);
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "Input set setting is not supported.");
+ }
+ I40E_WRITE_FLUSH(hw);
+
+ /* store the default input set */
+ if (!pf->support_multi_driver)
+ pf->hash_input_set[pctype] = input_set;
+ pf->fdir.input_set[pctype] = input_set;
+ }
+}
+
+int
+i40e_hash_filter_inset_select(struct i40e_hw *hw,
+ struct rte_eth_input_set_conf *conf)
+{
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set, inset_reg = 0;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
+ int ret, i, num;
+
+ if (!conf) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+ if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
+ conf->op != RTE_ETH_INPUT_SET_ADD) {
+ PMD_DRV_LOG(ERR, "Unsupported input set operation");
+ return -EINVAL;
+ }
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
+ return -ENOTSUP;
+ }
+
+ pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
+ if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+ PMD_DRV_LOG(ERR, "invalid flow_type input.");
+ return -EINVAL;
+ }
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ /* get translated pctype value in fd pctype register */
+ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
+ I40E_GLQF_FD_PCTYPES((int)pctype));
+ }
+
+ ret = i40e_parse_input_set(&input_set, pctype, conf->field,
+ conf->inset_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to parse input set");
+ return -EINVAL;
+ }
+
+ if (conf->op == RTE_ETH_INPUT_SET_ADD) {
+ /* get inset value in register */
+ inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
+ inset_reg <<= I40E_32_BIT_WIDTH;
+ inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
+ input_set |= pf->hash_input_set[pctype];
+ }
+ num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ I40E_INSET_MASK_NUM_REG);
+ if (num < 0)
+ return -EINVAL;
+
+ inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
+
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ for (i = 0; i < num; i++)
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ 0);
+ I40E_WRITE_FLUSH(hw);
+
+ pf->hash_input_set[pctype] = input_set;
+ return 0;
+}
+
+int
+i40e_fdir_filter_inset_select(struct i40e_pf *pf,
+ struct rte_eth_input_set_conf *conf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set, inset_reg = 0;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
+ int ret, i, num;
+
+ if (!hw || !conf) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+ if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
+ conf->op != RTE_ETH_INPUT_SET_ADD) {
+ PMD_DRV_LOG(ERR, "Unsupported input set operation");
+ return -EINVAL;
+ }
+
+ pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
+
+ if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+ PMD_DRV_LOG(ERR, "invalid flow_type input.");
+ return -EINVAL;
+ }
+
+ ret = i40e_parse_input_set(&input_set, pctype, conf->field,
+ conf->inset_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to parse input set");
+ return -EINVAL;
+ }
+
+ /* get inset value in register */
+ inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
+ inset_reg <<= I40E_32_BIT_WIDTH;
+ inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
+
+ /* Can not change the inset reg for flex payload for fdir,
+ * it is done by writing I40E_PRTQF_FD_FLXINSET
+ * in i40e_set_flex_mask_on_pctype.
+ */
+ if (conf->op == RTE_ETH_INPUT_SET_SELECT)
+ inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
+ else
+ input_set |= pf->fdir.input_set[pctype];
+ num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ I40E_INSET_MASK_NUM_REG);
+ if (num < 0)
+ return -EINVAL;
+ if (pf->support_multi_driver && num > 0) {
+ PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
+ return -ENOTSUP;
+ }
+
+ inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
+
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ if (!pf->support_multi_driver) {
+ for (i = 0; i < num; i++)
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ 0);
+ } else {
+ PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
+ }
+ I40E_WRITE_FLUSH(hw);
+
+ pf->fdir.input_set[pctype] = input_set;
+ return 0;
+}
+
+static int
+i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
+{
+ int ret = 0;
+
+ if (!hw || !info) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+
+ switch (info->info_type) {
+ case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
+ i40e_get_symmetric_hash_enable_per_port(hw,
+ &(info->info.enable));
+ break;
+ case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
+ ret = i40e_get_hash_filter_global_config(hw,
+ &(info->info.global_conf));
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
+ info->info_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
+{
+ int ret = 0;
+
+ if (!hw || !info) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+
+ switch (info->info_type) {
+ case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
+ i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
+ break;
+ case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
+ ret = i40e_set_hash_filter_global_config(hw,
+ &(info->info.global_conf));
+ break;
+ case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
+ ret = i40e_hash_filter_inset_select(hw,
+ &(info->info.input_set_conf));
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
+ info->info_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Operations for hash function */
+static int
+i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = 0;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = i40e_hash_filter_get(hw,
+ (struct rte_eth_hash_filter_info *)arg);
+ break;
+ case RTE_ETH_FILTER_SET:
+ ret = i40e_hash_filter_set(hw,
+ (struct rte_eth_hash_filter_info *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
+ filter_op);
+ ret = -ENOTSUP;
+ break;
+ }
+
+ return ret;
+}
+
+/* Convert ethertype filter structure */
+static int
+i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter)
+{
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ filter->input.ether_type = input->ether_type;
+ filter->flags = input->flags;
+ filter->queue = input->queue;
+
+ return 0;
+}
+
+/* Check if there exists the ehtertype filter */
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return ethertype_rule->hash_map[ret];
+}
+
+/* Add ethertype filter in SW list */
+static int
+i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete ethertype filter in SW list */
+int
+i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ struct i40e_ethertype_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
+/*
+ * Configure ethertype filter, which can director packet by filtering
+ * with mac address and ether_type or only ether_type
+ */
+int
+i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *ethertype_filter, *node;
+ struct i40e_ethertype_filter check_filter;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret;
+
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue ID");
+ return -EINVAL;
+ }
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ PMD_DRV_LOG(ERR,
+ "unsupported ether_type(0x%04x) in control packet filter.",
+ filter->ether_type);
+ return -EINVAL;
+ }
+ if (filter->ether_type == ETHER_TYPE_VLAN)
+ PMD_DRV_LOG(WARNING,
+ "filter vlan ether_type in first tag is not supported.");
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_ethertype_filter_convert(filter, &check_filter);
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
+ &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
+ return -EINVAL;
+ }
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->mac_addr.addr_bytes,
+ filter->ether_type, flags,
+ pf->main_vsi->seid,
+ filter->queue, add, &stats, NULL);
+
+ PMD_DRV_LOG(INFO,
+ "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
+ ret, stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+ if (ret < 0)
+ return -ENOSYS;
+
+ /* Add or delete a filter in SW list */
+ if (add) {
+ ethertype_filter = rte_zmalloc("ethertype_filter",
+ sizeof(*ethertype_filter), 0);
+ if (ethertype_filter == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -ENOMEM;
+ }
+
+ rte_memcpy(ethertype_filter, &check_filter,
+ sizeof(check_filter));
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ if (ret < 0)
+ rte_free(ethertype_filter);
+ } else {
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+ }
+
+ return ret;
+}
+
+/*
+ * Handle operations for ethertype filter.
+ */
+static int
+i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret = 0;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return ret;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = i40e_ethertype_filter_set(pf,
+ (struct rte_eth_ethertype_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = i40e_ethertype_filter_set(pf,
+ (struct rte_eth_ethertype_filter *)arg,
+ FALSE);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
+ ret = -ENOSYS;
+ break;
+ }
+ return ret;
+}
+
+static int
+i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NONE:
+ /* For global configuration */
+ ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_MACVLAN:
+ ret = i40e_mac_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &i40e_flow_ops;
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Check and enable Extended Tag.
+ * Enabling Extended Tag is important for 40G performance.
+ */
+static void
+i40e_enable_extended_tag(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint32_t buf = 0;
+ int ret;
+
+ ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
+ PCI_DEV_CAP_REG);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
+ PCI_DEV_CAP_REG);
+ return;
+ }
+ if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
+ PMD_DRV_LOG(ERR, "Does not support Extended Tag");
+ return;
+ }
+
+ buf = 0;
+ ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
+ PCI_DEV_CTRL_REG);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
+ PCI_DEV_CTRL_REG);
+ return;
+ }
+ if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
+ PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
+ return;
+ }
+ buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
+ ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
+ PCI_DEV_CTRL_REG);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
+ PCI_DEV_CTRL_REG);
+ return;
+ }
+}
+
+/*
+ * As some registers wouldn't be reset unless a global hardware reset,
+ * hardware initialization is needed to put those registers into an
+ * expected initial state.
+ */
+static void
+i40e_hw_init(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ i40e_enable_extended_tag(dev);
+
+ /* clear the PF Queue Filter control register */
+ i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
+
+ /* Disable symmetric hash per port */
+ i40e_set_symmetric_hash_enable_per_port(hw, 0);
+}
+
+/*
+ * For X722 it is possible to have multiple pctypes mapped to the same flowtype
+ * however this function will return only one highest pctype index,
+ * which is not quite correct. This is known problem of i40e driver
+ * and needs to be fixed later.
+ */
+enum i40e_filter_pctype
+i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
+{
+ int i;
+ uint64_t pctype_mask;
+
+ if (flow_type < I40E_FLOW_TYPE_MAX) {
+ pctype_mask = adapter->pctypes_tbl[flow_type];
+ for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
+ if (pctype_mask & (1ULL << i))
+ return (enum i40e_filter_pctype)i;
+ }
+ }
+ return I40E_FILTER_PCTYPE_INVALID;
+}
+
+uint16_t
+i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
+ enum i40e_filter_pctype pctype)
+{
+ uint16_t flowtype;
+ uint64_t pctype_mask = 1ULL << pctype;
+
+ for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
+ flowtype++) {
+ if (adapter->pctypes_tbl[flowtype] & pctype_mask)
+ return flowtype;
+ }
+
+ return RTE_ETH_FLOW_UNKNOWN;
+}
+
+/*
+ * On X710, performance number is far from the expectation on recent firmware
+ * versions; on XL710, performance number is also far from the expectation on
+ * recent firmware versions, if promiscuous mode is disabled, or promiscuous
+ * mode is enabled and port MAC address is equal to the packet destination MAC
+ * address. The fix for this issue may not be integrated in the following
+ * firmware version. So the workaround in software driver is needed. It needs
+ * to modify the initial values of 3 internal only registers for both X710 and
+ * XL710. Note that the values for X710 or XL710 could be different, and the
+ * workaround can be removed when it is fixed in firmware in the future.
+ */
+
+/* For both X710 and XL710 */
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x203F0200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
+
+#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
+#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
+
+/* For X722 */
+#define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
+#define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
+
+/* For X710 */
+#define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303
+/* For XL710 */
+#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
+#define I40E_GL_SWR_PM_UP_THR 0x269FBC
+
+/*
+ * GL_SWR_PM_UP_THR:
+ * The value is not impacted from the link speed, its value is set according
+ * to the total number of ports for a better pipe-monitor configuration.
+ */
+static bool
+i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
+{
+#define I40E_GL_SWR_PM_EF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
+
+#define I40E_GL_SWR_PM_SF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
+
+ static const struct {
+ uint16_t device_id;
+ uint32_t val;
+ } swr_pm_table[] = {
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
+
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
+ };
+ uint32_t i;
+
+ if (value == NULL) {
+ PMD_DRV_LOG(ERR, "value is NULL");
+ return false;
+ }
+
+ for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
+ if (hw->device_id == swr_pm_table[i].device_id) {
+ *value = swr_pm_table[i].val;
+
+ PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
+ "value - 0x%08x",
+ hw->device_id, *value);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int
+i40e_dev_sync_phy_type(struct i40e_hw *hw)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp phy_ab;
+ int ret = -ENOTSUP;
+ int retries = 0;
+
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
+ NULL);
+
+ while (status) {
+ PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
+ status);
+ retries++;
+ rte_delay_us(100000);
+ if (retries < 5)
+ status = i40e_aq_get_phy_capabilities(hw, false,
+ true, &phy_ab, NULL);
+ else
+ return ret;
+ }
+ return 0;
+}
+
+static void
+i40e_configure_registers(struct i40e_hw *hw)
+{
+ static struct {
+ uint32_t addr;
+ uint64_t val;
+ } reg_table[] = {
+ {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
+ {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
+ {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
+ };
+ uint64_t reg;
+ uint32_t i;
+ int ret;
+
+ for (i = 0; i < RTE_DIM(reg_table); i++) {
+ if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
+ if (hw->mac.type == I40E_MAC_X722) /* For X722 */
+ reg_table[i].val =
+ I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
+ else /* For X710/XL710/XXV710 */
+ if (hw->aq.fw_maj_ver < 6)
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
+ else
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
+ }
+
+ if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
+ if (hw->mac.type == I40E_MAC_X722) /* For X722 */
+ reg_table[i].val =
+ I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
+ else /* For X710/XL710/XXV710 */
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
+ }
+
+ if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
+ uint32_t cfg_val;
+
+ if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
+ PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
+ "GL_SWR_PM_UP_THR value fixup",
+ hw->device_id);
+ continue;
+ }
+
+ reg_table[i].val = cfg_val;
+ }
+
+ ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
+ &reg, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
+ reg_table[i].addr);
+ break;
+ }
+ PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
+ reg_table[i].addr, reg);
+ if (reg == reg_table[i].val)
+ continue;
+
+ ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
+ reg_table[i].val, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
+ reg_table[i].val, reg_table[i].addr);
+ break;
+ }
+ PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
+ "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
+ }
+}
+
+#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4))
+#define I40E_VSI_TSR_QINQ_CONFIG 0xc030
+#define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4))
+#define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
+static int
+i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
+{
+ uint32_t reg;
+ int ret;
+
+ if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
+ PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
+ return -EINVAL;
+ }
+
+ /* Configure for double VLAN RX stripping */
+ reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
+ if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
+ reg |= I40E_VSI_TSR_QINQ_CONFIG;
+ ret = i40e_aq_debug_write_register(hw,
+ I40E_VSI_TSR(vsi->vsi_id),
+ reg, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
+ vsi->vsi_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ /* Configure for double VLAN TX insertion */
+ reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
+ if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
+ reg = I40E_VSI_L2TAGSTXVALID_QINQ;
+ ret = i40e_aq_debug_write_register(hw,
+ I40E_VSI_L2TAGSTXVALID(
+ vsi->vsi_id), reg, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to update VSI_L2TAGSTXVALID[%d]",
+ vsi->vsi_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_aq_add_mirror_rule
+ * @hw: pointer to the hardware structure
+ * @seid: VEB seid to add mirror rule to
+ * @dst_id: destination vsi seid
+ * @entries: Buffer which contains the entities to be mirrored
+ * @count: number of entities contained in the buffer
+ * @rule_id:the rule_id of the rule to be added
+ *
+ * Add a mirror rule for a given veb.
+ *
+ **/
+static enum i40e_status_code
+i40e_aq_add_mirror_rule(struct i40e_hw *hw,
+ uint16_t seid, uint16_t dst_id,
+ uint16_t rule_type, uint16_t *entries,
+ uint16_t count, uint16_t *rule_id)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_delete_mirror_rule cmd;
+ struct i40e_aqc_add_delete_mirror_rule_completion *resp =
+ (struct i40e_aqc_add_delete_mirror_rule_completion *)
+ &desc.params.raw;
+ uint16_t buff_len;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_mirror_rule);
+ memset(&cmd, 0, sizeof(cmd));
+
+ buff_len = sizeof(uint16_t) * count;
+ desc.datalen = rte_cpu_to_le_16(buff_len);
+ if (buff_len > 0)
+ desc.flags |= rte_cpu_to_le_16(
+ (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd.rule_type = rte_cpu_to_le_16(rule_type <<
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
+ cmd.num_entries = rte_cpu_to_le_16(count);
+ cmd.seid = rte_cpu_to_le_16(seid);
+ cmd.destination = rte_cpu_to_le_16(dst_id);
+
+ rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
+ status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
+ PMD_DRV_LOG(INFO,
+ "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
+ hw->aq.asq_last_status, resp->rule_id,
+ resp->mirror_rules_used, resp->mirror_rules_free);
+ *rule_id = rte_le_to_cpu_16(resp->rule_id);
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_mirror_rule
+ * @hw: pointer to the hardware structure
+ * @seid: VEB seid to add mirror rule to
+ * @entries: Buffer which contains the entities to be mirrored
+ * @count: number of entities contained in the buffer
+ * @rule_id:the rule_id of the rule to be delete
+ *
+ * Delete a mirror rule for a given veb.
+ *
+ **/
+static enum i40e_status_code
+i40e_aq_del_mirror_rule(struct i40e_hw *hw,
+ uint16_t seid, uint16_t rule_type, uint16_t *entries,
+ uint16_t count, uint16_t rule_id)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_delete_mirror_rule cmd;
+ uint16_t buff_len = 0;
+ enum i40e_status_code status;
+ void *buff = NULL;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_delete_mirror_rule);
+ memset(&cmd, 0, sizeof(cmd));
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
+ desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ cmd.num_entries = count;
+ buff_len = sizeof(uint16_t) * count;
+ desc.datalen = rte_cpu_to_le_16(buff_len);
+ buff = (void *)entries;
+ } else
+ /* rule id is filled in destination field for deleting mirror rule */
+ cmd.destination = rte_cpu_to_le_16(rule_id);
+
+ cmd.rule_type = rte_cpu_to_le_16(rule_type <<
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
+ cmd.seid = rte_cpu_to_le_16(seid);
+
+ rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
+ status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_mirror_rule_set
+ * @dev: pointer to the hardware structure
+ * @mirror_conf: mirror rule info
+ * @sw_id: mirror rule's sw_id
+ * @on: enable/disable
+ *
+ * set a mirror rule.
+ *
+ **/
+static int
+i40e_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t sw_id, uint8_t on)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_mirror_rule *it, *mirr_rule = NULL;
+ struct i40e_mirror_rule *parent = NULL;
+ uint16_t seid, dst_seid, rule_id;
+ uint16_t i, j = 0;
+ int ret;
+
+ PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
+
+ if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
+ PMD_DRV_LOG(ERR,
+ "mirror rule can not be configured without veb or vfs.");
+ return -ENOSYS;
+ }
+ if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
+ PMD_DRV_LOG(ERR, "mirror table is full.");
+ return -ENOSPC;
+ }
+ if (mirror_conf->dst_pool > pf->vf_num) {
+ PMD_DRV_LOG(ERR, "invalid destination pool %u.",
+ mirror_conf->dst_pool);
+ return -EINVAL;
+ }
+
+ seid = pf->main_vsi->veb->seid;
+
+ TAILQ_FOREACH(it, &pf->mirror_list, rules) {
+ if (sw_id <= it->index) {
+ mirr_rule = it;
+ break;
+ }
+ parent = it;
+ }
+ if (mirr_rule && sw_id == mirr_rule->index) {
+ if (on) {
+ PMD_DRV_LOG(ERR, "mirror rule exists.");
+ return -EEXIST;
+ } else {
+ ret = i40e_aq_del_mirror_rule(hw, seid,
+ mirr_rule->rule_type,
+ mirr_rule->entries,
+ mirr_rule->num_entries, mirr_rule->id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "failed to remove mirror rule: ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
+ return -ENOSYS;
+ }
+ TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
+ rte_free(mirr_rule);
+ pf->nb_mirror_rule--;
+ return 0;
+ }
+ } else if (!on) {
+ PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
+ return -ENOENT;
+ }
+
+ mirr_rule = rte_zmalloc("i40e_mirror_rule",
+ sizeof(struct i40e_mirror_rule) , 0);
+ if (!mirr_rule) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+ switch (mirror_conf->rule_type) {
+ case ETH_MIRROR_VLAN:
+ for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
+ if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+ mirr_rule->entries[j] =
+ mirror_conf->vlan.vlan_id[i];
+ j++;
+ }
+ }
+ if (j == 0) {
+ PMD_DRV_LOG(ERR, "vlan is not specified.");
+ rte_free(mirr_rule);
+ return -EINVAL;
+ }
+ mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
+ break;
+ case ETH_MIRROR_VIRTUAL_POOL_UP:
+ case ETH_MIRROR_VIRTUAL_POOL_DOWN:
+ /* check if the specified pool bit is out of range */
+ if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
+ PMD_DRV_LOG(ERR, "pool mask is out of range.");
+ rte_free(mirr_rule);
+ return -EINVAL;
+ }
+ for (i = 0, j = 0; i < pf->vf_num; i++) {
+ if (mirror_conf->pool_mask & (1ULL << i)) {
+ mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
+ j++;
+ }
+ }
+ if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
+ /* add pf vsi to entries */
+ mirr_rule->entries[j] = pf->main_vsi_seid;
+ j++;
+ }
+ if (j == 0) {
+ PMD_DRV_LOG(ERR, "pool is not specified.");
+ rte_free(mirr_rule);
+ return -EINVAL;
+ }
+ /* egress and ingress in aq commands means from switch but not port */
+ mirr_rule->rule_type =
+ (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
+ I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
+ I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
+ break;
+ case ETH_MIRROR_UPLINK_PORT:
+ /* egress and ingress in aq commands means from switch but not port*/
+ mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
+ break;
+ case ETH_MIRROR_DOWNLINK_PORT:
+ mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
+ mirror_conf->rule_type);
+ rte_free(mirr_rule);
+ return -EINVAL;
+ }
+
+ /* If the dst_pool is equal to vf_num, consider it as PF */
+ if (mirror_conf->dst_pool == pf->vf_num)
+ dst_seid = pf->main_vsi_seid;
+ else
+ dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
+
+ ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
+ mirr_rule->rule_type, mirr_rule->entries,
+ j, &rule_id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "failed to add mirror rule: ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
+ rte_free(mirr_rule);
+ return -ENOSYS;
+ }
+
+ mirr_rule->index = sw_id;
+ mirr_rule->num_entries = j;
+ mirr_rule->id = rule_id;
+ mirr_rule->dst_vsi_seid = dst_seid;
+
+ if (parent)
+ TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
+ else
+ TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
+
+ pf->nb_mirror_rule++;
+ return 0;
+}
+
+/**
+ * i40e_mirror_rule_reset
+ * @dev: pointer to the device
+ * @sw_id: mirror rule's sw_id
+ *
+ * reset a mirror rule.
+ *
+ **/
+static int
+i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_mirror_rule *it, *mirr_rule = NULL;
+ uint16_t seid;
+ int ret;
+
+ PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
+
+ seid = pf->main_vsi->veb->seid;
+
+ TAILQ_FOREACH(it, &pf->mirror_list, rules) {
+ if (sw_id == it->index) {
+ mirr_rule = it;
+ break;
+ }
+ }
+ if (mirr_rule) {
+ ret = i40e_aq_del_mirror_rule(hw, seid,
+ mirr_rule->rule_type,
+ mirr_rule->entries,
+ mirr_rule->num_entries, mirr_rule->id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "failed to remove mirror rule: status = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
+ return -ENOSYS;
+ }
+ TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
+ rte_free(mirr_rule);
+ pf->nb_mirror_rule--;
+ } else {
+ PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
+ return -ENOENT;
+ }
+ return 0;
+}
+
+static uint64_t
+i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t systim_cycles;
+
+ systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
+ systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
+ << 32;
+
+ return systim_cycles;
+}
+
+static uint64_t
+i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rx_tstamp;
+
+ rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
+ rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
+ << 32;
+
+ return rx_tstamp;
+}
+
+static uint64_t
+i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t tx_tstamp;
+
+ tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
+ tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
+ << 32;
+
+ return tx_tstamp;
+}
+
+static void
+i40e_start_timecounters(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+ struct rte_eth_link link;
+ uint32_t tsync_inc_l;
+ uint32_t tsync_inc_h;
+
+ /* Get current link speed. */
+ i40e_dev_link_update(dev, 1);
+ rte_eth_linkstatus_get(dev, &link);
+
+ switch (link.link_speed) {
+ case ETH_SPEED_NUM_40G:
+ tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
+ tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
+ break;
+ case ETH_SPEED_NUM_10G:
+ tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
+ tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
+ break;
+ case ETH_SPEED_NUM_1G:
+ tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
+ tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
+ break;
+ default:
+ tsync_inc_l = 0x0;
+ tsync_inc_h = 0x0;
+ }
+
+ /* Set the timesync increment value. */
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
+
+ memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
+ adapter->systime_tc.cc_shift = 0;
+ adapter->systime_tc.nsec_mask = 0;
+
+ adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
+ adapter->rx_tstamp_tc.cc_shift = 0;
+ adapter->rx_tstamp_tc.nsec_mask = 0;
+
+ adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
+ adapter->tx_tstamp_tc.cc_shift = 0;
+ adapter->tx_tstamp_tc.nsec_mask = 0;
+}
+
+static int
+i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ adapter->systime_tc.nsec += delta;
+ adapter->rx_tstamp_tc.nsec += delta;
+ adapter->tx_tstamp_tc.nsec += delta;
+
+ return 0;
+}
+
+static int
+i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ uint64_t ns;
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ ns = rte_timespec_to_ns(ts);
+
+ /* Set the timecounters to a new value. */
+ adapter->systime_tc.nsec = ns;
+ adapter->rx_tstamp_tc.nsec = ns;
+ adapter->tx_tstamp_tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ uint64_t ns, systime_cycles;
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ systime_cycles = i40e_read_systime_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+i40e_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl_l;
+ uint32_t tsync_ctl_h;
+
+ /* Stop the timesync system time. */
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
+ /* Reset the timesync system time value. */
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
+
+ i40e_start_timecounters(dev);
+
+ /* Clear timesync registers. */
+ I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
+ I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
+ I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
+ I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
+ I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
+ I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
+
+ /* Enable timestamping of PTP packets. */
+ tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
+ tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
+
+ tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
+ tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
+ tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
+
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
+
+ return 0;
+}
+
+static int
+i40e_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl_l;
+ uint32_t tsync_ctl_h;
+
+ /* Disable timestamping of transmitted PTP packets. */
+ tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
+ tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
+
+ tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
+ tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
+
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
+
+ /* Reset the timesync increment value. */
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
+
+ return 0;
+}
+
+static int
+i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp, uint32_t flags)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ uint32_t sync_status;
+ uint32_t index = flags & 0x03;
+ uint64_t rx_tstamp_cycles;
+ uint64_t ns;
+
+ sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
+ if ((sync_status & (1 << index)) == 0)
+ return -EINVAL;
+
+ rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
+ ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ uint32_t sync_status;
+ uint64_t tx_tstamp_cycles;
+ uint64_t ns;
+
+ sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
+ if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
+ return -EINVAL;
+
+ tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+/*
+ * i40e_parse_dcb_configure - parse dcb configure from user
+ * @dev: the device being configured
+ * @dcb_cfg: pointer of the result of parse
+ * @*tc_map: bit map of enabled traffic classes
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int
+i40e_parse_dcb_configure(struct rte_eth_dev *dev,
+ struct i40e_dcbx_config *dcb_cfg,
+ uint8_t *tc_map)
+{
+ struct rte_eth_dcb_rx_conf *dcb_rx_conf;
+ uint8_t i, tc_bw, bw_lf;
+
+ memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
+
+ dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
+ PMD_INIT_LOG(ERR, "number of tc exceeds max.");
+ return -EINVAL;
+ }
+
+ /* assume each tc has the same bw */
+ tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
+ for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
+ dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
+ /* to ensure the sum of tcbw is equal to 100 */
+ bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
+ for (i = 0; i < bw_lf; i++)
+ dcb_cfg->etscfg.tcbwtable[i]++;
+
+ /* assume each tc has the same Transmission Selection Algorithm */
+ for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
+ dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ dcb_cfg->etscfg.prioritytable[i] =
+ dcb_rx_conf->dcb_tc[i];
+
+ /* FW needs one App to configure HW */
+ dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
+ dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
+ dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ if (dcb_rx_conf->nb_tcs == 0)
+ *tc_map = 1; /* tc0 only */
+ else
+ *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
+
+ if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ dcb_cfg->pfc.willing = 0;
+ dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ dcb_cfg->pfc.pfcenable = *tc_map;
+ }
+ return 0;
+}
+
+
+static enum i40e_status_code
+i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
+ struct i40e_aqc_vsi_properties_data *info,
+ uint8_t enabled_tcmap)
+{
+ enum i40e_status_code ret;
+ int i, total_tc = 0;
+ uint16_t qpnum_per_tc, bsf, qp_idx;
+ struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ uint16_t used_queues;
+
+ ret = validate_tcmap_parameter(vsi, enabled_tcmap);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tcmap & (1 << i))
+ total_tc++;
+ }
+ if (total_tc == 0)
+ total_tc = 1;
+ vsi->enabled_tc = enabled_tcmap;
+
+ /* different VSI has different queues assigned */
+ if (vsi->type == I40E_VSI_MAIN)
+ used_queues = dev_data->nb_rx_queues -
+ pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+ else if (vsi->type == I40E_VSI_VMDQ2)
+ used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+ else {
+ PMD_INIT_LOG(ERR, "unsupported VSI type.");
+ return I40E_ERR_NO_AVAILABLE_VSI;
+ }
+
+ qpnum_per_tc = used_queues / total_tc;
+ /* Number of queues per enabled TC */
+ if (qpnum_per_tc == 0) {
+ PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
+ return I40E_ERR_INVALID_QP_ID;
+ }
+ qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
+ I40E_MAX_Q_PER_TC);
+ bsf = rte_bsf32(qpnum_per_tc);
+
+ /**
+ * Configure TC and queue mapping parameters, for enabled TC,
+ * allocate qpnum_per_tc queues to this traffic. For disabled TC,
+ * default queue will serve it.
+ */
+ qp_idx = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & (1 << i)) {
+ info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+ qp_idx += qpnum_per_tc;
+ } else
+ info->tc_mapping[i] = 0;
+ }
+
+ /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
+ if (vsi->type == I40E_VSI_SRIOV) {
+ info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+ for (i = 0; i < vsi->nb_qps; i++)
+ info->queue_mapping[i] =
+ rte_cpu_to_le_16(vsi->base_queue + i);
+ } else {
+ info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
+ }
+ info->valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+
+ return I40E_SUCCESS;
+}
+
+/*
+ * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
+ * @veb: VEB to be configured
+ * @tc_map: enabled TC bitmap
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static enum i40e_status_code
+i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
+{
+ struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
+ struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
+ struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
+ enum i40e_status_code ret = I40E_SUCCESS;
+ int i;
+ uint32_t bw_max;
+
+ /* Check if enabled_tc is same as existing or new TCs */
+ if (veb->enabled_tc == tc_map)
+ return ret;
+
+ /* configure tc bandwidth */
+ memset(&veb_bw, 0, sizeof(veb_bw));
+ veb_bw.tc_valid_bits = tc_map;
+ /* Enable ETS TCs with equal BW Share for now across all VSIs */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (tc_map & BIT_ULL(i))
+ veb_bw.tc_bw_share_credits[i] = 1;
+ }
+ ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
+ &veb_bw, NULL);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "AQ command Config switch_comp BW allocation per TC failed = %d",
+ hw->aq.asq_last_status);
+ return ret;
+ }
+
+ memset(&ets_query, 0, sizeof(ets_query));
+ ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
+ &ets_query, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to get switch_comp ETS configuration %u",
+ hw->aq.asq_last_status);
+ return ret;
+ }
+ memset(&bw_query, 0, sizeof(bw_query));
+ ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
+ &bw_query, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to get switch_comp bandwidth configuration %u",
+ hw->aq.asq_last_status);
+ return ret;
+ }
+
+ /* store and print out BW info */
+ veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
+ veb->bw_info.bw_max = ets_query.tc_bw_max;
+ PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
+ PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
+ bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
+ (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
+ I40E_16_BIT_WIDTH);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ veb->bw_info.bw_ets_share_credits[i] =
+ bw_query.tc_bw_share_credits[i];
+ veb->bw_info.bw_ets_credits[i] =
+ rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
+ /* 4 bits per TC, 4th bit is reserved */
+ veb->bw_info.bw_ets_max[i] =
+ (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
+ RTE_LEN2MASK(3, uint8_t));
+ PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
+ veb->bw_info.bw_ets_share_credits[i]);
+ PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
+ veb->bw_info.bw_ets_credits[i]);
+ PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
+ veb->bw_info.bw_ets_max[i]);
+ }
+
+ veb->enabled_tc = tc_map;
+
+ return ret;
+}
+
+
+/*
+ * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
+ * @vsi: VSI to be configured
+ * @tc_map: enabled TC bitmap
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static enum i40e_status_code
+i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
+{
+ struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ enum i40e_status_code ret = I40E_SUCCESS;
+ int i;
+
+ /* Check if enabled_tc is same as existing or new TCs */
+ if (vsi->enabled_tc == tc_map)
+ return ret;
+
+ /* configure tc bandwidth */
+ memset(&bw_data, 0, sizeof(bw_data));
+ bw_data.tc_valid_bits = tc_map;
+ /* Enable ETS TCs with equal BW Share for now across all VSIs */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (tc_map & BIT_ULL(i))
+ bw_data.tc_bw_credits[i] = 1;
+ }
+ ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "AQ command Config VSI BW allocation per TC failed = %d",
+ hw->aq.asq_last_status);
+ goto out;
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ vsi->info.qs_handle[i] = bw_data.qs_handles[i];
+
+ /* Update Queue Pairs Mapping for currently enabled UPs */
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.info = vsi->info;
+ i40e_get_cap(hw);
+ ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
+ if (ret)
+ goto out;
+
+ /* Update the VSI after updating the VSI queue-mapping information */
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
+ hw->aq.asq_last_status);
+ goto out;
+ }
+ /* update the local VSI info with updated queue map */
+ rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+ rte_memcpy(&vsi->info.queue_mapping,
+ &ctxt.info.queue_mapping,
+ sizeof(vsi->info.queue_mapping));
+ vsi->info.mapping_flags = ctxt.info.mapping_flags;
+ vsi->info.valid_sections = 0;
+
+ /* query and update current VSI BW information */
+ ret = i40e_vsi_get_bw_config(vsi);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "Failed updating vsi bw info, err %s aq_err %s",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto out;
+ }
+
+ vsi->enabled_tc = tc_map;
+
+out:
+ return ret;
+}
+
+/*
+ * i40e_dcb_hw_configure - program the dcb setting to hw
+ * @pf: pf the configuration is taken on
+ * @new_cfg: new configuration
+ * @tc_map: enabled TC bitmap
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static enum i40e_status_code
+i40e_dcb_hw_configure(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg,
+ uint8_t tc_map)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ struct i40e_vsi_list *vsi_list;
+ enum i40e_status_code ret;
+ int i;
+ uint32_t val;
+
+ /* Use the FW API if FW > v4.4*/
+ if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
+ (hw->aq.fw_maj_ver >= 5))) {
+ PMD_INIT_LOG(ERR,
+ "FW < v4.4, can not use FW LLDP API to configure DCB");
+ return I40E_ERR_FIRMWARE_API_VERSION;
+ }
+
+ /* Check if need reconfiguration */
+ if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
+ PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
+ return I40E_SUCCESS;
+ }
+
+ /* Copy the new config to the current config */
+ *old_cfg = *new_cfg;
+ old_cfg->etsrec = old_cfg->etscfg;
+ ret = i40e_set_dcb_config(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ /* set receive Arbiter to RR mode and ETS scheme by default */
+ for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
+ val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
+ val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
+ I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
+ I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
+ val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
+ I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
+ I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
+ val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
+ I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
+ val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
+ I40E_PRTDCB_RETSTCC_ETSTC_MASK;
+ I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
+ }
+ /* get local mib to check whether it is configured correctly */
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+ /* Get Local DCB Config */
+ i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+
+ /* if Veb is created, need to update TC of it at first */
+ if (main_vsi->veb) {
+ ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
+ if (ret)
+ PMD_INIT_LOG(WARNING,
+ "Failed configuring TC for VEB seid=%d",
+ main_vsi->veb->seid);
+ }
+ /* Update each VSI */
+ i40e_vsi_config_tc(main_vsi, tc_map);
+ if (main_vsi->veb) {
+ TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
+ /* Beside main VSI and VMDQ VSIs, only enable default
+ * TC for other VSIs
+ */
+ if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
+ ret = i40e_vsi_config_tc(vsi_list->vsi,
+ tc_map);
+ else
+ ret = i40e_vsi_config_tc(vsi_list->vsi,
+ I40E_DEFAULT_TCMAP);
+ if (ret)
+ PMD_INIT_LOG(WARNING,
+ "Failed configuring TC for VSI seid=%d",
+ vsi_list->vsi->seid);
+ /* continue */
+ }
+ }
+ return I40E_SUCCESS;
+}
+
+/*
+ * i40e_dcb_init_configure - initial dcb config
+ * @dev: device being configured
+ * @sw_dcb: indicate whether dcb is sw configured or hw offload
+ *
+ * Returns 0 on success, negative value on failure
+ */
+int
+i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i, ret = 0;
+
+ if ((pf->flags & I40E_FLAG_DCB) == 0) {
+ PMD_INIT_LOG(ERR, "HW doesn't support DCB");
+ return -ENOTSUP;
+ }
+
+ /* DCB initialization:
+ * Update DCB configuration from the Firmware and configure
+ * LLDP MIB change event.
+ */
+ if (sw_dcb == TRUE) {
+ ret = i40e_init_dcb(hw);
+ /* If lldp agent is stopped, the return value from
+ * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
+ * adminq status. Otherwise, it should return success.
+ */
+ if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
+ hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
+ memset(&hw->local_dcbx_config, 0,
+ sizeof(struct i40e_dcbx_config));
+ /* set dcb default configuration */
+ hw->local_dcbx_config.etscfg.willing = 0;
+ hw->local_dcbx_config.etscfg.maxtcs = 0;
+ hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
+ hw->local_dcbx_config.etscfg.tsatable[0] =
+ I40E_IEEE_TSA_ETS;
+ /* all UPs mapping to TC0 */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
+ hw->local_dcbx_config.etsrec =
+ hw->local_dcbx_config.etscfg;
+ hw->local_dcbx_config.pfc.willing = 0;
+ hw->local_dcbx_config.pfc.pfccap =
+ I40E_MAX_TRAFFIC_CLASS;
+ /* FW needs one App to configure HW */
+ hw->local_dcbx_config.numapps = 1;
+ hw->local_dcbx_config.app[0].selector =
+ I40E_APP_SEL_ETHTYPE;
+ hw->local_dcbx_config.app[0].priority = 3;
+ hw->local_dcbx_config.app[0].protocolid =
+ I40E_APP_PROTOID_FCOE;
+ ret = i40e_set_dcb_config(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "default dcb config fails. err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
+ return -ENOSYS;
+ }
+ } else {
+ PMD_INIT_LOG(ERR,
+ "DCB initialization in FW fails, err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
+ return -ENOTSUP;
+ }
+ } else {
+ ret = i40e_aq_start_lldp(hw, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "Failed to start lldp");
+
+ ret = i40e_init_dcb(hw);
+ if (!ret) {
+ if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
+ PMD_INIT_LOG(ERR,
+ "HW doesn't support DCBX offload.");
+ return -ENOTSUP;
+ }
+ } else {
+ PMD_INIT_LOG(ERR,
+ "DCBX configuration failed, err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
+ return -ENOTSUP;
+ }
+ }
+ return 0;
+}
+
+/*
+ * i40e_dcb_setup - setup dcb related config
+ * @dev: device being configured
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int
+i40e_dcb_setup(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_dcbx_config dcb_cfg;
+ uint8_t tc_map = 0;
+ int ret = 0;
+
+ if ((pf->flags & I40E_FLAG_DCB) == 0) {
+ PMD_INIT_LOG(ERR, "HW doesn't support DCB");
+ return -ENOTSUP;
+ }
+
+ if (pf->vf_num != 0)
+ PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
+
+ ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "invalid dcb config");
+ return -EINVAL;
+ }
+ ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "dcb sw configure fails");
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static int
+i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
+ uint16_t bsf, tc_mapping;
+ int i, j = 0;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
+ else
+ dcb_info->nb_tcs = 1;
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
+ for (i = 0; i < dcb_info->nb_tcs; i++)
+ dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
+
+ /* get queue mapping if vmdq is disabled */
+ if (!pf->nb_cfg_vmdq_vsi) {
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->enabled_tc & (1 << i)))
+ continue;
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ dcb_info->tc_queue.tc_rxq[j][i].base =
+ (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ dcb_info->tc_queue.tc_txq[j][i].base =
+ dcb_info->tc_queue.tc_rxq[j][i].base;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+ dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
+ dcb_info->tc_queue.tc_txq[j][i].nb_queue =
+ dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
+ }
+ return 0;
+ }
+
+ /* get queue mapping if vmdq is enabled */
+ do {
+ vsi = pf->vmdq[j].vsi;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->enabled_tc & (1 << i)))
+ continue;
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ dcb_info->tc_queue.tc_rxq[j][i].base =
+ (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ dcb_info->tc_queue.tc_txq[j][i].base =
+ dcb_info->tc_queue.tc_rxq[j][i].base;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+ dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
+ dcb_info->tc_queue.tc_txq[j][i].nb_queue =
+ dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
+ }
+ j++;
+ } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
+ return 0;
+}
+
+static int
+i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
+ else
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_DYN_CTLN(msix_intr -
+ I40E_RX_VEC_START),
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
+
+ I40E_WRITE_FLUSH(hw);
+ rte_intr_enable(&pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
+ else
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_DYN_CTLN(msix_intr -
+ I40E_RX_VEC_START),
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int i40e_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *ptr_data = regs->data;
+ uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
+ const struct i40e_reg_info *reg_info;
+
+ if (ptr_data == NULL) {
+ regs->length = I40E_GLGEN_STAT_CLEAR + 4;
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
+ /* The first few registers have to be read using AQ operations */
+ reg_idx = 0;
+ while (i40e_regs_adminq[reg_idx].name) {
+ reg_info = &i40e_regs_adminq[reg_idx++];
+ for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
+ for (arr_idx2 = 0;
+ arr_idx2 <= reg_info->count2;
+ arr_idx2++) {
+ reg_offset = arr_idx * reg_info->stride1 +
+ arr_idx2 * reg_info->stride2;
+ reg_offset += reg_info->base_addr;
+ ptr_data[reg_offset >> 2] =
+ i40e_read_rx_ctl(hw, reg_offset);
+ }
+ }
+
+ /* The remaining registers can be read using primitives */
+ reg_idx = 0;
+ while (i40e_regs_others[reg_idx].name) {
+ reg_info = &i40e_regs_others[reg_idx++];
+ for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
+ for (arr_idx2 = 0;
+ arr_idx2 <= reg_info->count2;
+ arr_idx2++) {
+ reg_offset = arr_idx * reg_info->stride1 +
+ arr_idx2 * reg_info->stride2;
+ reg_offset += reg_info->base_addr;
+ ptr_data[reg_offset >> 2] =
+ I40E_READ_REG(hw, reg_offset);
+ }
+ }
+
+ return 0;
+}
+
+static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Convert word count to byte count */
+ return hw->nvm.sr_size << 1;
+}
+
+static int i40e_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t *data = eeprom->data;
+ uint16_t offset, length, cnt_words;
+ int ret_code;
+
+ offset = eeprom->offset >> 1;
+ length = eeprom->length >> 1;
+ cnt_words = length;
+
+ if (offset > hw->nvm.sr_size ||
+ offset + length > hw->nvm.sr_size) {
+ PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
+ return -EINVAL;
+ }
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
+ if (ret_code != I40E_SUCCESS || cnt_words != length) {
+ PMD_DRV_LOG(ERR, "EEPROM read failed.");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int i40e_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t sff8472_comp = 0;
+ uint32_t sff8472_swap = 0;
+ uint32_t sff8636_rev = 0;
+ i40e_status status;
+ uint32_t type = 0;
+
+ /* Check if firmware supports reading module EEPROM. */
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+ PMD_DRV_LOG(ERR,
+ "Module EEPROM memory read not supported. "
+ "Please update the NVM image.\n");
+ return -EINVAL;
+ }
+
+ status = i40e_update_link_info(hw);
+ if (status)
+ return -EIO;
+
+ if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
+ PMD_DRV_LOG(ERR,
+ "Cannot read module EEPROM memory. "
+ "No module connected.\n");
+ return -EINVAL;
+ }
+
+ type = hw->phy.link_info.module_type[0];
+
+ switch (type) {
+ case I40E_MODULE_TYPE_SFP:
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_MODULE_SFF_8472_COMP,
+ &sff8472_comp, NULL);
+ if (status)
+ return -EIO;
+
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_MODULE_SFF_8472_SWAP,
+ &sff8472_swap, NULL);
+ if (status)
+ return -EIO;
+
+ /* Check if the module requires address swap to access
+ * the other EEPROM memory page.
+ */
+ if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
+ PMD_DRV_LOG(WARNING,
+ "Module address swap to access "
+ "page 0xA2 is not supported.\n");
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else if (sff8472_comp == 0x00) {
+ /* Module is not SFF-8472 compliant */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP_PLUS:
+ /* Read from memory page 0. */
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ 0,
+ I40E_MODULE_REVISION_ADDR,
+ &sff8636_rev, NULL);
+ if (status)
+ return -EIO;
+ /* Determine revision compliance byte */
+ if (sff8636_rev > 0x02) {
+ /* Module is SFF-8636 compliant */
+ modinfo->type = RTE_ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ } else {
+ modinfo->type = RTE_ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP28:
+ modinfo->type = RTE_ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Module type unrecognized\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ bool is_sfp = false;
+ i40e_status status;
+ uint8_t *data = info->data;
+ uint32_t value = 0;
+ uint32_t i;
+
+ if (!info || !info->length || !data)
+ return -EINVAL;
+
+ if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
+ is_sfp = true;
+
+ for (i = 0; i < info->length; i++) {
+ u32 offset = i + info->offset;
+ u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
+
+ /* Check if we need to access the other memory page */
+ if (is_sfp) {
+ if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
+ offset -= RTE_ETH_MODULE_SFF_8079_LEN;
+ addr = I40E_I2C_EEPROM_DEV_ADDR2;
+ }
+ } else {
+ while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
+ /* Compute memory page number and offset. */
+ offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
+ addr++;
+ }
+ }
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ addr, offset, &value, NULL);
+ if (status)
+ return -EIO;
+ data[i] = (uint8_t)value;
+ }
+ return 0;
+}
+
+static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_mac_filter_info mac_filter;
+ struct i40e_mac_filter *f;
+ int ret;
+
+ if (!is_valid_assigned_ether_addr(mac_addr)) {
+ PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
+ return -EINVAL;
+ }
+
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
+ break;
+ }
+
+ if (f == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
+ return -EIO;
+ }
+
+ mac_filter = f->mac_info;
+ ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to delete mac filter");
+ return -EIO;
+ }
+ memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
+ ret = i40e_vsi_add_mac(vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add mac filter");
+ return -EIO;
+ }
+ memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
+
+ ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+ mac_addr->addr_bytes, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to change mac");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
+ uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
+ int ret = 0;
+
+ /* check if mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev_data->dev_started) {
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+ dev_data->port_id);
+ return -EBUSY;
+ }
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return ret;
+}
+
+/* Restore ethertype filter */
+static void
+i40e_ethertype_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags;
+
+ TAILQ_FOREACH(f, ethertype_list, rules) {
+ flags = 0;
+ if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ i40e_aq_add_rem_control_packet_filter(hw,
+ f->input.mac_addr.addr_bytes,
+ f->input.ether_type,
+ flags, pf->main_vsi->seid,
+ f->queue, 1, &stats, NULL);
+ }
+ PMD_DRV_LOG(INFO, "Ethertype filter:"
+ " mac_etype_used = %u, etype_used = %u,"
+ " mac_etype_free = %u, etype_free = %u",
+ stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+}
+
+/* Restore tunnel filter */
+static void
+i40e_tunnel_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_pf_vf *vf;
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+ bool big_buffer = 0;
+
+ TAILQ_FOREACH(f, tunnel_list, rules) {
+ if (!f->is_to_vf)
+ vsi = pf->main_vsi;
+ else {
+ vf = &pf->vfs[f->vf_id];
+ vsi = vf->vsi;
+ }
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
+ (struct ether_addr *)&cld_filter.element.outer_mac);
+ ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
+ (struct ether_addr *)&cld_filter.element.inner_mac);
+ cld_filter.element.inner_vlan = f->input.inner_vlan;
+ cld_filter.element.flags = f->input.flags;
+ cld_filter.element.tenant_id = f->input.tenant_id;
+ cld_filter.element.queue_number = f->queue;
+ rte_memcpy(cld_filter.general_fields,
+ f->input.general_fields,
+ sizeof(f->input.general_fields));
+
+ if (((f->input.flags &
+ I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
+ ((f->input.flags &
+ I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
+ ((f->input.flags &
+ I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X10))
+ big_buffer = 1;
+
+ if (big_buffer)
+ i40e_aq_add_cloud_filters_big_buffer(hw,
+ vsi->seid, &cld_filter, 1);
+ else
+ i40e_aq_add_cloud_filters(hw, vsi->seid,
+ &cld_filter.element, 1);
+ }
+}
+
+/* Restore rss filter */
+static inline void
+i40e_rss_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_rte_flow_rss_conf *conf =
+ &pf->rss_info;
+ if (conf->conf.queue_num)
+ i40e_config_rss_filter(pf, conf, TRUE);
+}
+
+static void
+i40e_filter_restore(struct i40e_pf *pf)
+{
+ i40e_ethertype_filter_restore(pf);
+ i40e_tunnel_filter_restore(pf);
+ i40e_fdir_filter_restore(pf);
+ i40e_rss_filter_restore(pf);
+}
+
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
+{
+ if (strcmp(dev->device->driver->name, drv->driver.name))
+ return false;
+
+ return true;
+}
+
+bool
+is_i40e_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_i40e_pmd);
+}
+
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
+{
+ int i;
+
+ for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
+ if (pf->customized_pctype[i].index == index)
+ return &pf->customized_pctype[i];
+ }
+ return NULL;
+}
+
+static int
+i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
+ uint32_t pkg_size, uint32_t proto_num,
+ struct rte_pmd_i40e_proto_info *proto,
+ enum rte_pmd_i40e_package_op op)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint32_t pctype_num;
+ struct rte_pmd_i40e_ptype_info *pctype;
+ uint32_t buff_size;
+ struct i40e_customized_pctype *new_pctype = NULL;
+ uint8_t proto_id;
+ uint8_t pctype_value;
+ char name[64];
+ uint32_t i, j, n;
+ int ret;
+
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return -1;
+ }
+
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&pctype_num, sizeof(pctype_num),
+ RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get pctype number");
+ return -1;
+ }
+ if (!pctype_num) {
+ PMD_DRV_LOG(INFO, "No new pctype added");
+ return -1;
+ }
+
+ buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
+ pctype = rte_zmalloc("new_pctype", buff_size, 0);
+ if (!pctype) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return -1;
+ }
+ /* get information about new pctype list */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)pctype, buff_size,
+ RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get pctype list");
+ rte_free(pctype);
+ return -1;
+ }
+
+ /* Update customized pctype. */
+ for (i = 0; i < pctype_num; i++) {
+ pctype_value = pctype[i].ptype_id;
+ memset(name, 0, sizeof(name));
+ for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+ proto_id = pctype[i].protocols[j];
+ if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+ continue;
+ for (n = 0; n < proto_num; n++) {
+ if (proto[n].proto_id != proto_id)
+ continue;
+ strcat(name, proto[n].name);
+ strcat(name, "_");
+ break;
+ }
+ }
+ name[strlen(name) - 1] = '\0';
+ if (!strcmp(name, "GTPC"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPC);
+ else if (!strcmp(name, "GTPU_IPV4"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU_IPV4);
+ else if (!strcmp(name, "GTPU_IPV6"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU_IPV6);
+ else if (!strcmp(name, "GTPU"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU);
+ if (new_pctype) {
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
+ new_pctype->pctype = pctype_value;
+ new_pctype->valid = true;
+ } else {
+ new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
+ new_pctype->valid = false;
+ }
+ }
+ }
+
+ rte_free(pctype);
+ return 0;
+}
+
+static int
+i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
+ uint32_t pkg_size, uint32_t proto_num,
+ struct rte_pmd_i40e_proto_info *proto,
+ enum rte_pmd_i40e_package_op op)
+{
+ struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
+ uint16_t port_id = dev->data->port_id;
+ uint32_t ptype_num;
+ struct rte_pmd_i40e_ptype_info *ptype;
+ uint32_t buff_size;
+ uint8_t proto_id;
+ char name[RTE_PMD_I40E_DDP_NAME_SIZE];
+ uint32_t i, j, n;
+ bool in_tunnel;
+ int ret;
+
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return -1;
+ }
+
+ if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ rte_pmd_i40e_ptype_mapping_reset(port_id);
+ return 0;
+ }
+
+ /* get information about new ptype num */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&ptype_num, sizeof(ptype_num),
+ RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get ptype number");
+ return ret;
+ }
+ if (!ptype_num) {
+ PMD_DRV_LOG(INFO, "No new ptype added");
+ return -1;
+ }
+
+ buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
+ ptype = rte_zmalloc("new_ptype", buff_size, 0);
+ if (!ptype) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return -1;
+ }
+
+ /* get information about new ptype list */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)ptype, buff_size,
+ RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get ptype list");
+ rte_free(ptype);
+ return ret;
+ }
+
+ buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
+ ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
+ if (!ptype_mapping) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ rte_free(ptype);
+ return -1;
+ }
+
+ /* Update ptype mapping table. */
+ for (i = 0; i < ptype_num; i++) {
+ ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
+ ptype_mapping[i].sw_ptype = 0;
+ in_tunnel = false;
+ for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+ proto_id = ptype[i].protocols[j];
+ if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+ continue;
+ for (n = 0; n < proto_num; n++) {
+ if (proto[n].proto_id != proto_id)
+ continue;
+ memset(name, 0, sizeof(name));
+ strcpy(name, proto[n].name);
+ if (!strncasecmp(name, "PPPOE", 5))
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L2_ETHER_PPPOE;
+ else if (!strncasecmp(name, "IPV4FRAG", 8) &&
+ !in_tunnel) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_FRAG;
+ } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
+ in_tunnel) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_FRAG;
+ } else if (!strncasecmp(name, "OIPV4", 5)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "IPV4", 4) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (!strncasecmp(name, "IPV4", 4) &&
+ in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ else if (!strncasecmp(name, "IPV6FRAG", 8) &&
+ !in_tunnel) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_FRAG;
+ } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
+ in_tunnel) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_FRAG;
+ } else if (!strncasecmp(name, "OIPV6", 5)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "IPV6", 4) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ else if (!strncasecmp(name, "IPV6", 4) &&
+ in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ else if (!strncasecmp(name, "UDP", 3) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_UDP;
+ else if (!strncasecmp(name, "UDP", 3) &&
+ in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_UDP;
+ else if (!strncasecmp(name, "TCP", 3) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_TCP;
+ else if (!strncasecmp(name, "TCP", 3) &&
+ in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_TCP;
+ else if (!strncasecmp(name, "SCTP", 4) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_SCTP;
+ else if (!strncasecmp(name, "SCTP", 4) &&
+ in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_SCTP;
+ else if ((!strncasecmp(name, "ICMP", 4) ||
+ !strncasecmp(name, "ICMPV6", 6)) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_ICMP;
+ else if ((!strncasecmp(name, "ICMP", 4) ||
+ !strncasecmp(name, "ICMPV6", 6)) &&
+ in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_ICMP;
+ else if (!strncasecmp(name, "GTPC", 4)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_GTPC;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "GTPU", 4)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_GTPU;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "GRENAT", 6)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_GRENAT;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
+ !strncasecmp(name, "L2TPV2", 6)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_L2TP;
+ in_tunnel = true;
+ }
+
+ break;
+ }
+ }
+ }
+
+ ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
+ ptype_num, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to update mapping table.");
+
+ rte_free(ptype_mapping);
+ rte_free(ptype);
+ return ret;
+}
+
+void
+i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+ uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint32_t proto_num;
+ struct rte_pmd_i40e_proto_info *proto;
+ uint32_t buff_size;
+ uint32_t i;
+ int ret;
+
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return;
+ }
+
+ /* get information about protocol number */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&proto_num, sizeof(proto_num),
+ RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get protocol number");
+ return;
+ }
+ if (!proto_num) {
+ PMD_DRV_LOG(INFO, "No new protocol added");
+ return;
+ }
+
+ buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
+ proto = rte_zmalloc("new_proto", buff_size, 0);
+ if (!proto) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return;
+ }
+
+ /* get information about protocol list */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)proto, buff_size,
+ RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get protocol list");
+ rte_free(proto);
+ return;
+ }
+
+ /* Check if GTP is supported. */
+ for (i = 0; i < proto_num; i++) {
+ if (!strncmp(proto[i].name, "GTP", 3)) {
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ pf->gtp_support = true;
+ else
+ pf->gtp_support = false;
+ break;
+ }
+ }
+
+ /* Update customized pctype info */
+ ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
+ proto_num, proto, op);
+ if (ret)
+ PMD_DRV_LOG(INFO, "No pctype is updated.");
+
+ /* Update customized ptype info */
+ ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
+ proto_num, proto, op);
+ if (ret)
+ PMD_DRV_LOG(INFO, "No ptype is updated.");
+
+ rte_free(proto);
+}
+
+/* Create a QinQ cloud filter
+ *
+ * The Fortville NIC has limited resources for tunnel filters,
+ * so we can only reuse existing filters.
+ *
+ * In step 1 we define which Field Vector fields can be used for
+ * filter types.
+ * As we do not have the inner tag defined as a field,
+ * we have to define it first, by reusing one of L1 entries.
+ *
+ * In step 2 we are replacing one of existing filter types with
+ * a new one for QinQ.
+ * As we reusing L1 and replacing L2, some of the default filter
+ * types will disappear,which depends on L1 and L2 entries we reuse.
+ *
+ * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
+ *
+ * 1. Create L1 filter of outer vlan (12b) which will be in use
+ * later when we define the cloud filter.
+ * a. Valid_flags.replace_cloud = 0
+ * b. Old_filter = 10 (Stag_Inner_Vlan)
+ * c. New_filter = 0x10
+ * d. TR bit = 0xff (optional, not used here)
+ * e. Buffer – 2 entries:
+ * i. Byte 0 = 8 (outer vlan FV index).
+ * Byte 1 = 0 (rsv)
+ * Byte 2-3 = 0x0fff
+ * ii. Byte 0 = 37 (inner vlan FV index).
+ * Byte 1 =0 (rsv)
+ * Byte 2-3 = 0x0fff
+ *
+ * Step 2:
+ * 2. Create cloud filter using two L1 filters entries: stag and
+ * new filter(outer vlan+ inner vlan)
+ * a. Valid_flags.replace_cloud = 1
+ * b. Old_filter = 1 (instead of outer IP)
+ * c. New_filter = 0x10
+ * d. Buffer – 2 entries:
+ * i. Byte 0 = 0x80 | 7 (valid | Stag).
+ * Byte 1-3 = 0 (rsv)
+ * ii. Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
+ * Byte 9-11 = 0 (rsv)
+ */
+static int
+i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
+{
+ int ret = -ENOTSUP;
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
+ return ret;
+ }
+
+ /* Init */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
+ filter_replace.tr_bit = 0;
+
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Field Vector 12b mask */
+ filter_replace_buf.data[2] = 0xff;
+ filter_replace_buf.data[3] = 0x0f;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Field Vector 12b mask */
+ filter_replace_buf.data[6] = 0xff;
+ filter_replace_buf.data[7] = 0x0f;
+ ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ /* Apply the second L2 cloud filter */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L2 filter, input for L2 filter will be L1 filter */
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
+
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (!ret && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ return ret;
+}
+
+int
+i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+i40e_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
+i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t i, lut = 0;
+ uint16_t j, num;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+
+ if (!add) {
+ if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
+ i40e_pf_disable_rss(pf);
+ memset(rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (rss_info->conf.queue_num)
+ return -EINVAL;
+
+ /* If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ num = i40e_pf_calc_configured_queues_num(pf);
+ else
+ num = pf->dev_data->nb_rx_queues;
+
+ num = RTE_MIN(num, conf->conf.queue_num);
+ PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
+ return -ENOTSUP;
+ }
+
+ /* Fill in redirection table */
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
+ i40e_pf_disable_rss(pf);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+ (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Random default keys */
+ static uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+ rss_conf.rss_key = (uint8_t *)rss_key_default;
+ rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
+
+ i40e_hw_rss_hash_set(pf, &rss_conf);
+
+ if (i40e_rss_conf_init(rss_info, &conf->conf))
+ return -EINVAL;
+
+ return 0;
+}
+
+RTE_INIT(i40e_init_log)
+{
+ i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
+ if (i40e_logtype_init >= 0)
+ rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
+ i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
+ if (i40e_logtype_driver >= 0)
+ rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
+}
+
+RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
+ ETH_I40E_FLOATING_VEB_ARG "=1"
+ ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
+ ETH_I40E_SUPPORT_MULTI_DRIVER "=1");
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.h b/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.h
new file mode 100644
index 00000000..3fffe5a5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.h
@@ -0,0 +1,1398 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#ifndef _I40E_ETHDEV_H_
+#define _I40E_ETHDEV_H_
+
+#include <stdint.h>
+
+#include <rte_eth_ctrl.h>
+#include <rte_time.h>
+#include <rte_kvargs.h>
+#include <rte_hash.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_tm_driver.h>
+#include "rte_pmd_i40e.h"
+
+#include "base/i40e_register.h"
+
+#define I40E_VLAN_TAG_SIZE 4
+
+#define I40E_AQ_LEN 32
+#define I40E_AQ_BUF_SZ 4096
+/* Number of queues per TC should be one of 1, 2, 4, 8, 16, 32, 64 */
+#define I40E_MAX_Q_PER_TC 64
+#define I40E_NUM_DESC_DEFAULT 512
+#define I40E_NUM_DESC_ALIGN 32
+#define I40E_BUF_SIZE_MIN 1024
+#define I40E_FRAME_SIZE_MAX 9728
+#define I40E_TSO_FRAME_SIZE_MAX 262144
+#define I40E_QUEUE_BASE_ADDR_UNIT 128
+/* number of VSIs and queue default setting */
+#define I40E_MAX_QP_NUM_PER_VF 16
+#define I40E_DEFAULT_QP_NUM_FDIR 1
+#define I40E_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t))
+#define I40E_VFTA_SIZE (4096 / I40E_UINT32_BIT_SIZE)
+/* Maximun number of MAC addresses */
+#define I40E_NUM_MACADDR_MAX 64
+/* Maximum number of VFs */
+#define I40E_MAX_VF 128
+/*flag of no loopback*/
+#define I40E_AQ_LB_MODE_NONE 0x0
+/*
+ * vlan_id is a 12 bit number.
+ * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
+ * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
+ * The higher 7 bit val specifies VFTA array index.
+ */
+#define I40E_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F))
+#define I40E_VFTA_IDX(vlan_id) ((vlan_id) >> 5)
+
+/* Default TC traffic in case DCB is not enabled */
+#define I40E_DEFAULT_TCMAP 0x1
+#define I40E_FDIR_QUEUE_ID 0
+
+/* Always assign pool 0 to main VSI, VMDQ will start from 1 */
+#define I40E_VMDQ_POOL_BASE 1
+
+#define I40E_DEFAULT_RX_FREE_THRESH 32
+#define I40E_DEFAULT_RX_PTHRESH 8
+#define I40E_DEFAULT_RX_HTHRESH 8
+#define I40E_DEFAULT_RX_WTHRESH 0
+
+#define I40E_DEFAULT_TX_FREE_THRESH 32
+#define I40E_DEFAULT_TX_PTHRESH 32
+#define I40E_DEFAULT_TX_HTHRESH 0
+#define I40E_DEFAULT_TX_WTHRESH 0
+#define I40E_DEFAULT_TX_RSBIT_THRESH 32
+
+/* Bit shift and mask */
+#define I40E_4_BIT_WIDTH (CHAR_BIT / 2)
+#define I40E_4_BIT_MASK RTE_LEN2MASK(I40E_4_BIT_WIDTH, uint8_t)
+#define I40E_8_BIT_WIDTH CHAR_BIT
+#define I40E_8_BIT_MASK UINT8_MAX
+#define I40E_16_BIT_WIDTH (CHAR_BIT * 2)
+#define I40E_16_BIT_MASK UINT16_MAX
+#define I40E_32_BIT_WIDTH (CHAR_BIT * 4)
+#define I40E_32_BIT_MASK UINT32_MAX
+#define I40E_48_BIT_WIDTH (CHAR_BIT * 6)
+#define I40E_48_BIT_MASK RTE_LEN2MASK(I40E_48_BIT_WIDTH, uint64_t)
+
+/* Linux PF host with virtchnl version 1.1 */
+#define PF_IS_V11(vf) \
+ (((vf)->version_major == VIRTCHNL_VERSION_MAJOR) && \
+ ((vf)->version_minor == 1))
+
+#define I40E_WRITE_GLB_REG(hw, reg, value) \
+ do { \
+ uint32_t ori_val; \
+ struct rte_eth_dev *dev; \
+ ori_val = I40E_READ_REG((hw), (reg)); \
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev; \
+ I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), \
+ (reg)), (value)); \
+ if (ori_val != value) \
+ PMD_DRV_LOG(WARNING, \
+ "i40e device %s changed global " \
+ "register [0x%08x]. original: 0x%08x, " \
+ "new: 0x%08x ", \
+ (dev->device->name), (reg), \
+ (ori_val), (value)); \
+ } while (0)
+
+/* index flex payload per layer */
+enum i40e_flxpld_layer_idx {
+ I40E_FLXPLD_L2_IDX = 0,
+ I40E_FLXPLD_L3_IDX = 1,
+ I40E_FLXPLD_L4_IDX = 2,
+ I40E_MAX_FLXPLD_LAYER = 3,
+};
+#define I40E_MAX_FLXPLD_FIED 3 /* max number of flex payload fields */
+#define I40E_FDIR_BITMASK_NUM_WORD 2 /* max number of bitmask words */
+#define I40E_FDIR_MAX_FLEXWORD_NUM 8 /* max number of flexpayload words */
+#define I40E_FDIR_MAX_FLEX_LEN 16 /* len in bytes of flex payload */
+#define I40E_INSET_MASK_NUM_REG 2 /* number of input set mask registers */
+
+/* i40e flags */
+#define I40E_FLAG_RSS (1ULL << 0)
+#define I40E_FLAG_DCB (1ULL << 1)
+#define I40E_FLAG_VMDQ (1ULL << 2)
+#define I40E_FLAG_SRIOV (1ULL << 3)
+#define I40E_FLAG_HEADER_SPLIT_DISABLED (1ULL << 4)
+#define I40E_FLAG_HEADER_SPLIT_ENABLED (1ULL << 5)
+#define I40E_FLAG_FDIR (1ULL << 6)
+#define I40E_FLAG_VXLAN (1ULL << 7)
+#define I40E_FLAG_RSS_AQ_CAPABLE (1ULL << 8)
+#define I40E_FLAG_VF_MAC_BY_PF (1ULL << 9)
+#define I40E_FLAG_ALL (I40E_FLAG_RSS | \
+ I40E_FLAG_DCB | \
+ I40E_FLAG_VMDQ | \
+ I40E_FLAG_SRIOV | \
+ I40E_FLAG_HEADER_SPLIT_DISABLED | \
+ I40E_FLAG_HEADER_SPLIT_ENABLED | \
+ I40E_FLAG_FDIR | \
+ I40E_FLAG_VXLAN | \
+ I40E_FLAG_RSS_AQ_CAPABLE | \
+ I40E_FLAG_VF_MAC_BY_PF)
+
+#define I40E_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_SCTP | \
+ ETH_RSS_NONFRAG_IPV4_OTHER | \
+ ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_NONFRAG_IPV6_SCTP | \
+ ETH_RSS_NONFRAG_IPV6_OTHER | \
+ ETH_RSS_L2_PAYLOAD)
+
+/* All bits of RSS hash enable for X722*/
+#define I40E_RSS_HENA_ALL_X722 ( \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ I40E_RSS_HENA_ALL)
+
+/* All bits of RSS hash enable */
+#define I40E_RSS_HENA_ALL ( \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ (1ULL << I40E_FILTER_PCTYPE_FCOE_OX) | \
+ (1ULL << I40E_FILTER_PCTYPE_FCOE_RX) | \
+ (1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \
+ (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define I40E_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+/* Default queue interrupt throttling time in microseconds */
+#define I40E_ITR_INDEX_DEFAULT 0
+#define I40E_ITR_INDEX_NONE 3
+#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
+#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
+#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
+/* Special FW support this floating VEB feature */
+#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
+#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+
+#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
+ I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
+
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
+/* The max bandwidth of i40e is 40Gbps. */
+#define I40E_QOS_BW_MAX 40000
+/* The bandwidth should be the multiple of 50Mbps. */
+#define I40E_QOS_BW_GRANULARITY 50
+/* The min bandwidth weight is 1. */
+#define I40E_QOS_BW_WEIGHT_MIN 1
+/* The max bandwidth weight is 127. */
+#define I40E_QOS_BW_WEIGHT_MAX 127
+/* The max queue region index is 7. */
+#define I40E_REGION_MAX_INDEX 7
+
+#define I40E_MAX_PERCENT 100
+#define I40E_DEFAULT_DCB_APP_NUM 1
+#define I40E_DEFAULT_DCB_APP_PRIO 3
+
+/**
+ * The overhead from MTU to max frame size.
+ * Considering QinQ packet, the VLAN tag needs to be counted twice.
+ */
+#define I40E_ETH_OVERHEAD \
+ (ETHER_HDR_LEN + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2)
+
+struct i40e_adapter;
+
+/**
+ * MAC filter structure
+ */
+struct i40e_mac_filter_info {
+ enum rte_mac_filter_type filter_type;
+ struct ether_addr mac_addr;
+};
+
+TAILQ_HEAD(i40e_mac_filter_list, i40e_mac_filter);
+
+/* MAC filter list structure */
+struct i40e_mac_filter {
+ TAILQ_ENTRY(i40e_mac_filter) next;
+ struct i40e_mac_filter_info mac_info;
+};
+
+TAILQ_HEAD(i40e_vsi_list_head, i40e_vsi_list);
+
+struct i40e_vsi;
+
+/* VSI list structure */
+struct i40e_vsi_list {
+ TAILQ_ENTRY(i40e_vsi_list) list;
+ struct i40e_vsi *vsi;
+};
+
+struct i40e_rx_queue;
+struct i40e_tx_queue;
+
+/* Bandwidth limit information */
+struct i40e_bw_info {
+ uint16_t bw_limit; /* BW Limit (0 = disabled) */
+ uint8_t bw_max; /* Max BW limit if enabled */
+
+ /* Relative credits within same TC with respect to other VSIs or Comps */
+ uint8_t bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* Bandwidth limit per TC */
+ uint16_t bw_ets_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* Max bandwidth limit per TC */
+ uint8_t bw_ets_max[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* Structure that defines a VEB */
+struct i40e_veb {
+ struct i40e_vsi_list_head head;
+ struct i40e_vsi *associate_vsi; /* Associate VSI who owns the VEB */
+ struct i40e_pf *associate_pf; /* Associate PF who owns the VEB */
+ uint16_t seid; /* The seid of VEB itself */
+ uint16_t uplink_seid; /* The uplink seid of this VEB */
+ uint16_t stats_idx;
+ struct i40e_eth_stats stats;
+ uint8_t enabled_tc; /* The traffic class enabled */
+ uint8_t strict_prio_tc; /* bit map of TCs set to strict priority mode */
+ struct i40e_bw_info bw_info; /* VEB bandwidth information */
+};
+
+/* i40e MACVLAN filter structure */
+struct i40e_macvlan_filter {
+ struct ether_addr macaddr;
+ enum rte_mac_filter_type filter_type;
+ uint16_t vlan_id;
+};
+
+/*
+ * Structure that defines a VSI, associated with a adapter.
+ */
+struct i40e_vsi {
+ struct i40e_adapter *adapter; /* Backreference to associated adapter */
+ struct i40e_aqc_vsi_properties_data info; /* VSI properties */
+
+ struct i40e_eth_stats eth_stats_offset;
+ struct i40e_eth_stats eth_stats;
+ /*
+ * When drivers loaded, only a default main VSI exists. In case new VSI
+ * needs to add, HW needs to know the layout that VSIs are organized.
+ * Besides that, VSI isan element and can't switch packets, which needs
+ * to add new component VEB to perform switching. So, a new VSI needs
+ * to specify the uplink VSI (Parent VSI) before created. The
+ * uplink VSI will check whether it had a VEB to switch packets. If no,
+ * it will try to create one. Then, uplink VSI will move the new VSI
+ * into its' sib_vsi_list to manage all the downlink VSI.
+ * sib_vsi_list: the VSI list that shared the same uplink VSI.
+ * parent_vsi : the uplink VSI. It's NULL for main VSI.
+ * veb : the VEB associates with the VSI.
+ */
+ struct i40e_vsi_list sib_vsi_list; /* sibling vsi list */
+ struct i40e_vsi *parent_vsi;
+ struct i40e_veb *veb; /* Associated veb, could be null */
+ struct i40e_veb *floating_veb; /* Associated floating veb */
+ bool offset_loaded;
+ enum i40e_vsi_type type; /* VSI types */
+ uint16_t vlan_num; /* Total VLAN number */
+ uint16_t mac_num; /* Total mac number */
+ uint32_t vfta[I40E_VFTA_SIZE]; /* VLAN bitmap */
+ struct i40e_mac_filter_list mac_list; /* macvlan filter list */
+ /* specific VSI-defined parameters, SRIOV stored the vf_id */
+ uint32_t user_param;
+ uint16_t seid; /* The seid of VSI itself */
+ uint16_t uplink_seid; /* The uplink seid of this VSI */
+ uint16_t nb_qps; /* Number of queue pairs VSI can occupy */
+ uint16_t nb_used_qps; /* Number of queue pairs VSI uses */
+ uint16_t max_macaddrs; /* Maximum number of MAC addresses */
+ uint16_t base_queue; /* The first queue index of this VSI */
+ /*
+ * The offset to visit VSI related register, assigned by HW when
+ * creating VSI
+ */
+ uint16_t vsi_id;
+ uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
+ uint16_t nb_msix; /* The max number of msix vector */
+ uint8_t enabled_tc; /* The traffic class enabled */
+ uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */
+ uint8_t vlan_filter_on; /* The VLAN filter enabled */
+ struct i40e_bw_info bw_info; /* VSI bandwidth information */
+};
+
+struct pool_entry {
+ LIST_ENTRY(pool_entry) next;
+ uint16_t base;
+ uint16_t len;
+};
+
+LIST_HEAD(res_list, pool_entry);
+
+struct i40e_res_pool_info {
+ uint32_t base; /* Resource start index */
+ uint32_t num_alloc; /* Allocated resource number */
+ uint32_t num_free; /* Total available resource number */
+ struct res_list alloc_list; /* Allocated resource list */
+ struct res_list free_list; /* Available resource list */
+};
+
+enum I40E_VF_STATE {
+ I40E_VF_INACTIVE = 0,
+ I40E_VF_INRESET,
+ I40E_VF_ININIT,
+ I40E_VF_ACTIVE,
+};
+
+/*
+ * Structure to store private data for PF host.
+ */
+struct i40e_pf_vf {
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ enum I40E_VF_STATE state; /* The number of queue pairs available */
+ uint16_t vf_idx; /* VF index in pf->vfs */
+ uint16_t lan_nb_qps; /* Actual queues allocated */
+ uint16_t reset_cnt; /* Total vf reset times */
+ struct ether_addr mac_addr; /* Default MAC address */
+ /* version of the virtchnl from VF */
+ struct virtchnl_version_info version;
+ uint32_t request_caps; /* offload caps requested from VF */
+};
+
+/*
+ * Structure to store private data for flow control.
+ */
+struct i40e_fc_conf {
+ uint16_t pause_time; /* Flow control pause timer */
+ /* FC high water 0-7 for pfc and 8 for lfc unit:kilobytes */
+ uint32_t high_water[I40E_MAX_TRAFFIC_CLASS + 1];
+ /* FC low water 0-7 for pfc and 8 for lfc unit:kilobytes */
+ uint32_t low_water[I40E_MAX_TRAFFIC_CLASS + 1];
+};
+
+/*
+ * Structure to store private data for VMDQ instance
+ */
+struct i40e_vmdq_info {
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+};
+
+#define I40E_FDIR_MAX_FLEXLEN 16 /**< Max length of flexbytes. */
+#define I40E_MAX_FLX_SOURCE_OFF 480
+#define NONUSE_FLX_PIT_DEST_OFF 63
+#define NONUSE_FLX_PIT_FSIZE 1
+#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50
+#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \
+ (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \
+ (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \
+ ((((dst_offset) == NONUSE_FLX_PIT_DEST_OFF ? \
+ NONUSE_FLX_PIT_DEST_OFF : \
+ ((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR)) << \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
+#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF))
+#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
+#define I40E_FDIR_IPv6_TC_OFFSET 20
+
+/* A structure used to define the input for GTP flow */
+struct i40e_gtp_flow {
+ struct rte_eth_udpv4_flow udp; /* IPv4 UDP fields to match. */
+ uint8_t msg_type; /* Message type. */
+ uint32_t teid; /* TEID in big endian. */
+};
+
+/* A structure used to define the input for GTP IPV4 flow */
+struct i40e_gtp_ipv4_flow {
+ struct i40e_gtp_flow gtp;
+ struct rte_eth_ipv4_flow ip4;
+};
+
+/* A structure used to define the input for GTP IPV6 flow */
+struct i40e_gtp_ipv6_flow {
+ struct i40e_gtp_flow gtp;
+ struct rte_eth_ipv6_flow ip6;
+};
+
+/* A structure used to define the input for raw type flow */
+struct i40e_raw_flow {
+ uint16_t pctype;
+ void *packet;
+ uint32_t length;
+};
+
+/*
+ * A union contains the inputs for all types of flow
+ * items in flows need to be in big endian
+ */
+union i40e_fdir_flow {
+ struct rte_eth_l2_flow l2_flow;
+ struct rte_eth_udpv4_flow udp4_flow;
+ struct rte_eth_tcpv4_flow tcp4_flow;
+ struct rte_eth_sctpv4_flow sctp4_flow;
+ struct rte_eth_ipv4_flow ip4_flow;
+ struct rte_eth_udpv6_flow udp6_flow;
+ struct rte_eth_tcpv6_flow tcp6_flow;
+ struct rte_eth_sctpv6_flow sctp6_flow;
+ struct rte_eth_ipv6_flow ipv6_flow;
+ struct i40e_gtp_flow gtp_flow;
+ struct i40e_gtp_ipv4_flow gtp_ipv4_flow;
+ struct i40e_gtp_ipv6_flow gtp_ipv6_flow;
+ struct i40e_raw_flow raw_flow;
+};
+
+enum i40e_fdir_ip_type {
+ I40E_FDIR_IPTYPE_IPV4,
+ I40E_FDIR_IPTYPE_IPV6,
+};
+
+/* A structure used to contain extend input of flow */
+struct i40e_fdir_flow_ext {
+ uint16_t vlan_tci;
+ uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+ /* It is filled by the flexible payload to match. */
+ uint8_t is_vf; /* 1 for VF, 0 for port dev */
+ uint16_t dst_id; /* VF ID, available when is_vf is 1*/
+ bool inner_ip; /* If there is inner ip */
+ enum i40e_fdir_ip_type iip_type; /* ip type for inner ip */
+ bool customized_pctype; /* If customized pctype is used */
+ bool pkt_template; /* If raw packet template is used */
+};
+
+/* A structure used to define the input for a flow director filter entry */
+struct i40e_fdir_input {
+ enum i40e_filter_pctype pctype;
+ union i40e_fdir_flow flow;
+ /* Flow fields to match, dependent on flow_type */
+ struct i40e_fdir_flow_ext flow_ext;
+ /* Additional fields to match */
+};
+
+/* Behavior will be taken if FDIR match */
+enum i40e_fdir_behavior {
+ I40E_FDIR_ACCEPT = 0,
+ I40E_FDIR_REJECT,
+ I40E_FDIR_PASSTHRU,
+};
+
+/* Flow director report status
+ * It defines what will be reported if FDIR entry is matched.
+ */
+enum i40e_fdir_status {
+ I40E_FDIR_NO_REPORT_STATUS = 0, /* Report nothing. */
+ I40E_FDIR_REPORT_ID, /* Only report FD ID. */
+ I40E_FDIR_REPORT_ID_FLEX_4, /* Report FD ID and 4 flex bytes. */
+ I40E_FDIR_REPORT_FLEX_8, /* Report 8 flex bytes. */
+};
+
+/* A structure used to define an action when match FDIR packet filter. */
+struct i40e_fdir_action {
+ uint16_t rx_queue; /* Queue assigned to if FDIR match. */
+ enum i40e_fdir_behavior behavior; /* Behavior will be taken */
+ enum i40e_fdir_status report_status; /* Status report option */
+ /* If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or
+ * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
+ * flex bytes start from in flexible payload.
+ */
+ uint8_t flex_off;
+};
+
+/* A structure used to define the flow director filter entry by filter_ctrl API
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
+ * RTE_ETH_FILTER_DELETE operations.
+ */
+struct i40e_fdir_filter_conf {
+ uint32_t soft_id;
+ /* ID, an unique value is required when deal with FDIR entry */
+ struct i40e_fdir_input input; /* Input set */
+ struct i40e_fdir_action action; /* Action taken when match */
+};
+
+/*
+ * Structure to store flex pit for flow diretor.
+ */
+struct i40e_fdir_flex_pit {
+ uint8_t src_offset; /* offset in words from the beginning of payload */
+ uint8_t size; /* size in words */
+ uint8_t dst_offset; /* offset in words of flexible payload */
+};
+
+struct i40e_fdir_flex_mask {
+ uint8_t word_mask; /**< Bit i enables word i of flexible payload */
+ uint8_t nb_bitmask;
+ struct {
+ uint8_t offset;
+ uint16_t mask;
+ } bitmask[I40E_FDIR_BITMASK_NUM_WORD];
+};
+
+#define I40E_FILTER_PCTYPE_INVALID 0
+#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+
+struct i40e_fdir_filter {
+ TAILQ_ENTRY(i40e_fdir_filter) rules;
+ struct i40e_fdir_filter_conf fdir;
+};
+
+TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
+/*
+ * A structure used to define fields of a FDIR related info.
+ */
+struct i40e_fdir_info {
+ struct i40e_vsi *fdir_vsi; /* pointer to fdir VSI structure */
+ uint16_t match_counter_index; /* Statistic counter index used for fdir*/
+ struct i40e_tx_queue *txq;
+ struct i40e_rx_queue *rxq;
+ void *prg_pkt; /* memory for fdir program packet */
+ uint64_t dma_addr; /* physic address of packet memory*/
+ /* input set bits for each pctype */
+ uint64_t input_set[I40E_FILTER_PCTYPE_MAX];
+ /*
+ * the rule how bytes stream is extracted as flexible payload
+ * for each payload layer, the setting can up to three elements
+ */
+ struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
+ struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+
+ struct i40e_fdir_filter_list fdir_list;
+ struct i40e_fdir_filter **hash_map;
+ struct rte_hash *hash_table;
+
+ /* Mark if flex pit and mask is set */
+ bool flex_pit_flag[I40E_MAX_FLXPLD_LAYER];
+ bool flex_mask_flag[I40E_FILTER_PCTYPE_MAX];
+
+ bool inset_flag[I40E_FILTER_PCTYPE_MAX]; /* Mark if input set is set */
+};
+
+/* Ethertype filter number HW supports */
+#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
+
+/* Ethertype filter struct */
+struct i40e_ethertype_filter_input {
+ struct ether_addr mac_addr; /* Mac address to match */
+ uint16_t ether_type; /* Ether type to match */
+};
+
+struct i40e_ethertype_filter {
+ TAILQ_ENTRY(i40e_ethertype_filter) rules;
+ struct i40e_ethertype_filter_input input;
+ uint16_t flags; /* Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter);
+
+struct i40e_ethertype_rule {
+ struct i40e_ethertype_filter_list ethertype_list;
+ struct i40e_ethertype_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/* queue region info */
+struct i40e_queue_region_info {
+ /* the region id for this configuration */
+ uint8_t region_id;
+ /* the start queue index for this region */
+ uint8_t queue_start_index;
+ /* the total queue number of this queue region */
+ uint8_t queue_num;
+ /* the total number of user priority for this region */
+ uint8_t user_priority_num;
+ /* the packet's user priority for this region */
+ uint8_t user_priority[I40E_MAX_USER_PRIORITY];
+ /* the total number of flowtype for this region */
+ uint8_t flowtype_num;
+ /**
+ * the pctype or hardware flowtype of packet,
+ * the specific index for each type has been defined
+ * in file i40e_type.h as enum i40e_filter_pctype.
+ */
+ uint8_t hw_flowtype[I40E_FILTER_PCTYPE_MAX];
+};
+
+struct i40e_queue_regions {
+ /* the total number of queue region for this port */
+ uint16_t queue_region_number;
+ struct i40e_queue_region_info region[I40E_REGION_MAX_INDEX + 1];
+};
+
+/* Tunnel filter number HW supports */
+#define I40E_MAX_TUNNEL_FILTER_NUM 400
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP 8
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE 9
+#define I40E_AQC_ADD_CLOUD_FILTER_0X10 0x10
+#define I40E_AQC_ADD_CLOUD_FILTER_0X11 0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_0X12 0x12
+#define I40E_AQC_ADD_L1_FILTER_0X11 0x11
+#define I40E_AQC_ADD_L1_FILTER_0X12 0x12
+#define I40E_AQC_ADD_L1_FILTER_0X13 0x13
+#define I40E_AQC_NEW_TR_21 21
+#define I40E_AQC_NEW_TR_22 22
+
+enum i40e_tunnel_iptype {
+ I40E_TUNNEL_IPTYPE_IPV4,
+ I40E_TUNNEL_IPTYPE_IPV6,
+};
+
+/* Tunnel filter struct */
+struct i40e_tunnel_filter_input {
+ uint8_t outer_mac[6]; /* Outer mac address to match */
+ uint8_t inner_mac[6]; /* Inner mac address to match */
+ uint16_t inner_vlan; /* Inner vlan address to match */
+ enum i40e_tunnel_iptype ip_type;
+ uint16_t flags; /* Filter type flag */
+ uint32_t tenant_id; /* Tenant id to match */
+ uint16_t general_fields[32]; /* Big buffer */
+};
+
+struct i40e_tunnel_filter {
+ TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ struct i40e_tunnel_filter_input input;
+ uint8_t is_to_vf; /* 0 - to PF, 1 - to VF */
+ uint16_t vf_id; /* VF id, avaiblable when is_to_vf is 1. */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter);
+
+struct i40e_tunnel_rule {
+ struct i40e_tunnel_filter_list tunnel_list;
+ struct i40e_tunnel_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/**
+ * Tunnel type.
+ */
+enum i40e_tunnel_type {
+ I40E_TUNNEL_TYPE_NONE = 0,
+ I40E_TUNNEL_TYPE_VXLAN,
+ I40E_TUNNEL_TYPE_GENEVE,
+ I40E_TUNNEL_TYPE_TEREDO,
+ I40E_TUNNEL_TYPE_NVGRE,
+ I40E_TUNNEL_TYPE_IP_IN_GRE,
+ I40E_L2_TUNNEL_TYPE_E_TAG,
+ I40E_TUNNEL_TYPE_MPLSoUDP,
+ I40E_TUNNEL_TYPE_MPLSoGRE,
+ I40E_TUNNEL_TYPE_QINQ,
+ I40E_TUNNEL_TYPE_GTPC,
+ I40E_TUNNEL_TYPE_GTPU,
+ I40E_TUNNEL_TYPE_MAX,
+};
+
+/**
+ * Tunneling Packet filter configuration.
+ */
+struct i40e_tunnel_filter_conf {
+ struct ether_addr outer_mac; /**< Outer MAC address to match. */
+ struct ether_addr inner_mac; /**< Inner MAC address to match. */
+ uint16_t inner_vlan; /**< Inner VLAN to match. */
+ uint32_t outer_vlan; /**< Outer VLAN to match */
+ enum i40e_tunnel_iptype ip_type; /**< IP address type. */
+ /**
+ * Outer destination IP address to match if ETH_TUNNEL_FILTER_OIP
+ * is set in filter_type, or inner destination IP address to match
+ * if ETH_TUNNEL_FILTER_IIP is set in filter_type.
+ */
+ union {
+ uint32_t ipv4_addr; /**< IPv4 address in big endian. */
+ uint32_t ipv6_addr[4]; /**< IPv6 address in big endian. */
+ } ip_addr;
+ /** Flags from ETH_TUNNEL_FILTER_XX - see above. */
+ uint16_t filter_type;
+ enum i40e_tunnel_type tunnel_type; /**< Tunnel Type. */
+ uint32_t tenant_id; /**< Tenant ID to match. VNI, GRE key... */
+ uint16_t queue_id; /**< Queue assigned to if match. */
+ uint8_t is_to_vf; /**< 0 - to PF, 1 - to VF */
+ uint16_t vf_id; /**< VF id, avaiblable when is_to_vf is 1. */
+};
+
+#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
+#define I40E_MAX_MIRROR_RULES 64
+/*
+ * Mirror rule structure
+ */
+struct i40e_mirror_rule {
+ TAILQ_ENTRY(i40e_mirror_rule) rules;
+ uint8_t rule_type;
+ uint16_t index; /* the sw index of mirror rule */
+ uint16_t id; /* the rule id assigned by firmware */
+ uint16_t dst_vsi_seid; /* destination vsi for this mirror rule. */
+ uint16_t num_entries;
+ /* the info stores depend on the rule type.
+ If type is I40E_MIRROR_TYPE_VLAN, vlan ids are stored here.
+ If type is I40E_MIRROR_TYPE_VPORT_*, vsi's seid are stored.
+ */
+ uint16_t entries[I40E_MIRROR_MAX_ENTRIES_PER_RULE];
+};
+
+TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
+
+/*
+ * Struct to store flow created.
+ */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) node;
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+TAILQ_HEAD(i40e_flow_list, rte_flow);
+
+/* Struct to store Traffic Manager shaper profile. */
+struct i40e_tm_shaper_profile {
+ TAILQ_ENTRY(i40e_tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t reference_count;
+ struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(i40e_shaper_profile_list, i40e_tm_shaper_profile);
+
+/* node type of Traffic Manager */
+enum i40e_tm_node_type {
+ I40E_TM_NODE_TYPE_PORT,
+ I40E_TM_NODE_TYPE_TC,
+ I40E_TM_NODE_TYPE_QUEUE,
+ I40E_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store Traffic Manager node configuration. */
+struct i40e_tm_node {
+ TAILQ_ENTRY(i40e_tm_node) node;
+ uint32_t id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t reference_count;
+ struct i40e_tm_node *parent;
+ struct i40e_tm_shaper_profile *shaper_profile;
+ struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(i40e_tm_node_list, i40e_tm_node);
+
+/* Struct to store all the Traffic Manager configuration. */
+struct i40e_tm_conf {
+ struct i40e_shaper_profile_list shaper_profile_list;
+ struct i40e_tm_node *root; /* root node - port */
+ struct i40e_tm_node_list tc_list; /* node list for all the TCs */
+ struct i40e_tm_node_list queue_list; /* node list for all the queues */
+ /**
+ * The number of added TC nodes.
+ * It should be no more than the TC number of this port.
+ */
+ uint32_t nb_tc_node;
+ /**
+ * The number of added queue nodes.
+ * It should be no more than the queue number of this port.
+ */
+ uint32_t nb_queue_node;
+ /**
+ * This flag is used to check if APP can change the TM node
+ * configuration.
+ * When it's true, means the configuration is applied to HW,
+ * APP should not change the configuration.
+ * As we don't support on-the-fly configuration, when starting
+ * the port, APP should call the hierarchy_commit API to set this
+ * flag to true. When stopping the port, this flag should be set
+ * to false.
+ */
+ bool committed;
+};
+
+enum i40e_new_pctype {
+ I40E_CUSTOMIZED_GTPC = 0,
+ I40E_CUSTOMIZED_GTPU_IPV4,
+ I40E_CUSTOMIZED_GTPU_IPV6,
+ I40E_CUSTOMIZED_GTPU,
+ I40E_CUSTOMIZED_MAX,
+};
+
+#define I40E_FILTER_PCTYPE_INVALID 0
+struct i40e_customized_pctype {
+ enum i40e_new_pctype index; /* Indicate which customized pctype */
+ uint8_t pctype; /* New pctype value */
+ bool valid; /* Check if it's valid */
+};
+
+struct i40e_rte_flow_rss_conf {
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+ uint16_t queue_region_conf; /**< Queue region config flag */
+ uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
+ I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t)]; /* Hash key. */
+ uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
+};
+
+/*
+ * Structure to store private data specific for PF instance.
+ */
+struct i40e_pf {
+ struct i40e_adapter *adapter; /* The adapter this PF associate to */
+ struct i40e_vsi *main_vsi; /* pointer to main VSI structure */
+ uint16_t mac_seid; /* The seid of the MAC of this PF */
+ uint16_t main_vsi_seid; /* The seid of the main VSI */
+ uint16_t max_num_vsi;
+ struct i40e_res_pool_info qp_pool; /*Queue pair pool */
+ struct i40e_res_pool_info msix_pool; /* MSIX interrupt pool */
+
+ struct i40e_hw_port_stats stats_offset;
+ struct i40e_hw_port_stats stats;
+ /* internal packet statistics, it should be excluded from the total */
+ struct i40e_eth_stats internal_stats_offset;
+ struct i40e_eth_stats internal_stats;
+ bool offset_loaded;
+
+ struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
+ struct ether_addr dev_addr; /* PF device mac address */
+ uint64_t flags; /* PF feature flags */
+ /* All kinds of queue pair setting for different VSIs */
+ struct i40e_pf_vf *vfs;
+ uint16_t vf_num;
+ /* Each of below queue pairs should be power of 2 since it's the
+ precondition after TC configuration applied */
+ uint16_t lan_nb_qp_max;
+ uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+ uint16_t lan_qp_offset;
+ uint16_t vmdq_nb_qp_max;
+ uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */
+ uint16_t vmdq_qp_offset;
+ uint16_t vf_nb_qp_max;
+ uint16_t vf_nb_qps; /* The number of queue pairs of VF */
+ uint16_t vf_qp_offset;
+ uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
+ uint16_t fdir_qp_offset;
+
+ uint16_t hash_lut_size; /* The size of hash lookup table */
+ /* input set bits for each pctype */
+ uint64_t hash_input_set[I40E_FILTER_PCTYPE_MAX];
+ /* store VXLAN UDP ports */
+ uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
+ uint16_t vxlan_bitmap; /* Vxlan bit mask */
+
+ /* VMDQ related info */
+ uint16_t max_nb_vmdq_vsi; /* Max number of VMDQ VSIs supported */
+ uint16_t nb_cfg_vmdq_vsi; /* number of VMDQ VSIs configured */
+ struct i40e_vmdq_info *vmdq;
+
+ struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
+ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
+ struct i40e_rte_flow_rss_conf rss_info; /* rss info */
+ struct i40e_queue_regions queue_region; /* queue region info */
+ struct i40e_fc_conf fc_conf; /* Flow control conf */
+ struct i40e_mirror_rule_list mirror_list;
+ uint16_t nb_mirror_rule; /* The number of mirror rules */
+ bool floating_veb; /* The flag to use the floating VEB */
+ /* The floating enable flag for the specific VF */
+ bool floating_veb_list[I40E_MAX_VF];
+ struct i40e_flow_list flow_list;
+ bool mpls_replace_flag; /* 1 - MPLS filter replace is done */
+ bool gtp_replace_flag; /* 1 - GTP-C/U filter replace is done */
+ bool qinq_replace_flag; /* QINQ filter replace is done */
+ struct i40e_tm_conf tm_conf;
+ bool support_multi_driver; /* 1 - support multiple driver */
+
+ /* Dynamic Device Personalization */
+ bool gtp_support; /* 1 - support GTP-C and GTP-U */
+ /* customer customized pctype */
+ struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX];
+ /* Switch Domain Id */
+ uint16_t switch_domain_id;
+};
+
+enum pending_msg {
+ PFMSG_LINK_CHANGE = 0x1,
+ PFMSG_RESET_IMPENDING = 0x2,
+ PFMSG_DRIVER_CLOSE = 0x4,
+};
+
+struct i40e_vsi_vlan_pvid_info {
+ uint16_t on; /* Enable or disable pvid */
+ union {
+ uint16_t pvid; /* Valid in case 'on' is set to set pvid */
+ struct {
+ /* Valid in case 'on' is cleared. 'tagged' will reject tagged packets,
+ * while 'untagged' will reject untagged packets.
+ */
+ uint8_t tagged;
+ uint8_t untagged;
+ } reject;
+ } config;
+};
+
+struct i40e_vf_rx_queues {
+ uint64_t rx_dma_addr;
+ uint32_t rx_ring_len;
+ uint32_t buff_size;
+};
+
+struct i40e_vf_tx_queues {
+ uint64_t tx_dma_addr;
+ uint32_t tx_ring_len;
+};
+
+/*
+ * Structure to store private data specific for VF instance.
+ */
+struct i40e_vf {
+ struct i40e_adapter *adapter; /* The adapter this VF associate to */
+ struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
+ uint16_t num_queue_pairs;
+ uint16_t max_pkt_len; /* Maximum packet length */
+ bool promisc_unicast_enabled;
+ bool promisc_multicast_enabled;
+
+ uint32_t version_major; /* Major version number */
+ uint32_t version_minor; /* Minor version number */
+ uint16_t promisc_flags; /* Promiscuous setting */
+ uint32_t vlan[I40E_VFTA_SIZE]; /* VLAN bit map */
+
+ struct ether_addr mc_addrs[I40E_NUM_MACADDR_MAX]; /* Multicast addrs */
+ uint16_t mc_addrs_num; /* Multicast mac addresses number */
+
+ /* Event from pf */
+ bool dev_closed;
+ bool link_up;
+ enum virtchnl_link_speed link_speed;
+ bool vf_reset;
+ volatile uint32_t pend_cmd; /* pending command not finished yet */
+ int32_t cmd_retval; /* return value of the cmd response from PF */
+ u16 pend_msg; /* flags indicates events from pf not handled yet */
+ uint8_t *aq_resp; /* buffer to store the adminq response from PF */
+
+ /* VSI info */
+ struct virtchnl_vf_resource *vf_res; /* All VSIs */
+ struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
+ struct i40e_vsi vsi;
+ uint64_t flags;
+};
+
+#define I40E_MAX_PKT_TYPE 256
+#define I40E_FLOW_TYPE_MAX 64
+
+/*
+ * Structure to store private data for each PF/VF instance.
+ */
+struct i40e_adapter {
+ /* Common for both PF and VF */
+ struct i40e_hw hw;
+ struct rte_eth_dev *eth_dev;
+
+ /* Specific for PF or VF */
+ union {
+ struct i40e_pf pf;
+ struct i40e_vf vf;
+ };
+
+ /* For vector PMD */
+ bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
+ bool tx_simple_allowed;
+ bool tx_vec_allowed;
+
+ /* For PTP */
+ struct rte_timecounter systime_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct rte_timecounter tx_tstamp_tc;
+
+ /* ptype mapping table */
+ uint32_t ptype_tbl[I40E_MAX_PKT_TYPE] __rte_cache_min_aligned;
+ /* flow type to pctype mapping table */
+ uint64_t pctypes_tbl[I40E_FLOW_TYPE_MAX] __rte_cache_min_aligned;
+ uint64_t flow_types_mask;
+ uint64_t pctypes_mask;
+};
+
+/**
+ * Strucute to store private data for each VF representor instance
+ */
+struct i40e_vf_representor {
+ uint16_t switch_domain_id;
+ /**< Virtual Function ID */
+ uint16_t vf_id;
+ /**< Virtual Function ID */
+ struct i40e_adapter *adapter;
+ /**< Private data store of assocaiated physical function */
+ struct i40e_eth_stats stats_offset;
+ /**< Zero-point of VF statistics*/
+};
+
+extern const struct rte_flow_ops i40e_flow_ops;
+
+union i40e_filter_t {
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct i40e_fdir_filter_conf fdir_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+ struct i40e_tunnel_filter_conf consistent_tunnel_filter;
+ struct i40e_rte_flow_rss_conf rss_conf;
+};
+
+typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+struct i40e_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
+};
+
+int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
+int i40e_vsi_release(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
+ enum i40e_vsi_type type,
+ struct i40e_vsi *uplink_vsi,
+ uint16_t user_param);
+int i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on);
+int i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan);
+int i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan);
+int i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *filter);
+int i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr);
+void i40e_update_vsi_stats(struct i40e_vsi *vsi);
+void i40e_pf_disable_irq0(struct i40e_hw *hw);
+void i40e_pf_enable_irq0(struct i40e_hw *hw);
+int i40e_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx);
+void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi);
+int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
+ struct i40e_vsi_vlan_pvid_info *info);
+int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on);
+int i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on);
+uint64_t i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags);
+uint64_t i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags);
+enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf);
+enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf);
+int i40e_fdir_setup(struct i40e_pf *pf);
+const struct rte_memzone *i40e_memzone_reserve(const char *name,
+ uint32_t len,
+ int socket_id);
+int i40e_fdir_configure(struct rte_eth_dev *dev);
+void i40e_fdir_teardown(struct i40e_pf *pf);
+enum i40e_filter_pctype
+ i40e_flowtype_to_pctype(const struct i40e_adapter *adapter,
+ uint16_t flow_type);
+uint16_t i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
+ enum i40e_filter_pctype pctype);
+int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+int i40e_select_filter_input_set(struct i40e_hw *hw,
+ struct rte_eth_input_set_conf *conf,
+ enum rte_filter_type filter);
+void i40e_fdir_filter_restore(struct i40e_pf *pf);
+int i40e_hash_filter_inset_select(struct i40e_hw *hw,
+ struct rte_eth_input_set_conf *conf);
+int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
+ struct rte_eth_input_set_conf *conf);
+int i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf, uint32_t opcode,
+ uint32_t retval, uint8_t *msg,
+ uint16_t msglen);
+void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input);
+int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input);
+int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct i40e_fdir_input *input);
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input);
+int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input);
+uint64_t i40e_get_default_input_set(uint16_t pctype);
+int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct i40e_fdir_filter_conf *filter,
+ bool add);
+int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
+int i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
+int i40e_fdir_flush(struct rte_eth_dev *dev);
+int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num, struct ether_addr *addr);
+int i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total);
+void i40e_set_vlan_filter(struct i40e_vsi *vsi, uint16_t vlan_id, bool on);
+int i40e_add_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total);
+bool is_i40e_supported(struct rte_eth_dev *dev);
+
+int i40e_validate_input_set(enum i40e_filter_pctype pctype,
+ enum rte_filter_type filter, uint64_t inset);
+int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask,
+ uint8_t nb_elem);
+uint64_t i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input);
+void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
+void i40e_check_write_global_reg(struct i40e_hw *hw,
+ uint32_t addr, uint32_t val);
+
+int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
+void i40e_tm_conf_init(struct rte_eth_dev *dev);
+void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index);
+void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+ uint32_t pkg_size,
+ enum rte_pmd_i40e_package_op op);
+int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
+int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
+ struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
+void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
+void i40e_flex_payload_reg_set_default(struct i40e_hw *hw);
+int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len);
+int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size);
+int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int i40e_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
+int i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add);
+int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
+int i40e_vf_representor_uninit(struct rte_eth_dev *ethdev);
+
+#define I40E_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
+
+/* I40E_DEV_PRIVATE_TO */
+#define I40E_DEV_PRIVATE_TO_PF(adapter) \
+ (&((struct i40e_adapter *)adapter)->pf)
+#define I40E_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct i40e_adapter *)adapter)->hw)
+#define I40E_DEV_PRIVATE_TO_ADAPTER(adapter) \
+ ((struct i40e_adapter *)adapter)
+
+/* I40EVF_DEV_PRIVATE_TO */
+#define I40EVF_DEV_PRIVATE_TO_VF(adapter) \
+ (&((struct i40e_adapter *)adapter)->vf)
+
+static inline struct i40e_vsi *
+i40e_get_vsi_from_adapter(struct i40e_adapter *adapter)
+{
+ struct i40e_hw *hw;
+
+ if (!adapter)
+ return NULL;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(adapter);
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(adapter);
+ return &vf->vsi;
+ } else {
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(adapter);
+ return pf->main_vsi;
+ }
+}
+#define I40E_DEV_PRIVATE_TO_MAIN_VSI(adapter) \
+ i40e_get_vsi_from_adapter((struct i40e_adapter *)adapter)
+
+/* I40E_VSI_TO */
+#define I40E_VSI_TO_HW(vsi) \
+ (&(((struct i40e_vsi *)vsi)->adapter->hw))
+#define I40E_VSI_TO_PF(vsi) \
+ (&(((struct i40e_vsi *)vsi)->adapter->pf))
+#define I40E_VSI_TO_VF(vsi) \
+ (&(((struct i40e_vsi *)vsi)->adapter->vf))
+#define I40E_VSI_TO_DEV_DATA(vsi) \
+ (((struct i40e_vsi *)vsi)->adapter->pf.dev_data)
+#define I40E_VSI_TO_ETH_DEV(vsi) \
+ (((struct i40e_vsi *)vsi)->adapter->eth_dev)
+
+/* I40E_PF_TO */
+#define I40E_PF_TO_HW(pf) \
+ (&(((struct i40e_pf *)pf)->adapter->hw))
+#define I40E_PF_TO_ADAPTER(pf) \
+ ((struct i40e_adapter *)pf->adapter)
+
+/* I40E_VF_TO */
+#define I40E_VF_TO_HW(vf) \
+ (&(((struct i40e_vf *)vf)->adapter->hw))
+
+static inline void
+i40e_init_adminq_parameter(struct i40e_hw *hw)
+{
+ hw->aq.num_arq_entries = I40E_AQ_LEN;
+ hw->aq.num_asq_entries = I40E_AQ_LEN;
+ hw->aq.arq_buf_size = I40E_AQ_BUF_SZ;
+ hw->aq.asq_buf_size = I40E_AQ_BUF_SZ;
+}
+
+static inline int
+i40e_align_floor(int n)
+{
+ if (n == 0)
+ return 0;
+ return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n));
+}
+
+static inline uint16_t
+i40e_calc_itr_interval(bool is_pf, bool is_multi_drv)
+{
+ uint16_t interval = 0;
+
+ if (is_multi_drv) {
+ interval = I40E_QUEUE_ITR_INTERVAL_MAX;
+ } else {
+ if (is_pf)
+ interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+ else
+ interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT;
+ }
+
+ /* Convert to hardware count, as writing each 1 represents 2 us */
+ return interval / 2;
+}
+
+#define I40E_VALID_FLOW(flow_type) \
+ ((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER || \
+ (flow_type) == RTE_ETH_FLOW_FRAG_IPV6 || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER || \
+ (flow_type) == RTE_ETH_FLOW_L2_PAYLOAD)
+
+#define I40E_VALID_PCTYPE_X722(pctype) \
+ ((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || \
+ (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV6 || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \
+ (pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD)
+
+#define I40E_VALID_PCTYPE(pctype) \
+ ((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || \
+ (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV6 || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \
+ (pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD)
+
+#define I40E_PHY_TYPE_SUPPORT_40G(phy_type) \
+ (((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_KR4) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_AOC) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_CR4) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_SR4) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_LR4))
+
+#define I40E_PHY_TYPE_SUPPORT_25G(phy_type) \
+ (((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_KR) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_CR) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_SR) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR))
+
+#endif /* _I40E_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev_vf.c b/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev_vf.c
new file mode 100644
index 00000000..001c301b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev_vf.c
@@ -0,0 +1,2758 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_prototype.h"
+#include "base/i40e_adminq_cmd.h"
+#include "base/i40e_type.h"
+
+#include "i40e_rxtx.h"
+#include "i40e_ethdev.h"
+#include "i40e_pf.h"
+
+/* busy wait delay in msec */
+#define I40EVF_BUSY_WAIT_DELAY 10
+#define I40EVF_BUSY_WAIT_COUNT 50
+#define MAX_RESET_WAIT_CNT 20
+
+#define I40EVF_ALARM_INTERVAL 50000 /* us */
+
+struct i40evf_arq_msg_info {
+ enum virtchnl_ops ops;
+ enum i40e_status_code result;
+ uint16_t buf_len;
+ uint16_t msg_len;
+ uint8_t *msg;
+};
+
+struct vf_cmd_info {
+ enum virtchnl_ops ops;
+ uint8_t *in_args;
+ uint32_t in_args_size;
+ uint8_t *out_buffer;
+ /* Input & output type. pass in buffer size and pass out
+ * actual return result
+ */
+ uint32_t out_size;
+};
+
+enum i40evf_aq_result {
+ I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
+ I40EVF_MSG_NON, /* Read nothing from admin queue */
+ I40EVF_MSG_SYS, /* Read system msg from admin queue */
+ I40EVF_MSG_CMD, /* Read async command result */
+};
+
+static int i40evf_dev_configure(struct rte_eth_dev *dev);
+static int i40evf_dev_start(struct rte_eth_dev *dev);
+static void i40evf_dev_stop(struct rte_eth_dev *dev);
+static void i40evf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int i40evf_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int i40evf_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned n);
+static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit);
+static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
+static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void i40evf_dev_close(struct rte_eth_dev *dev);
+static int i40evf_dev_reset(struct rte_eth_dev *dev);
+static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int i40evf_init_vlan(struct rte_eth_dev *dev);
+static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id);
+static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id);
+static int i40evf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ uint32_t index,
+ uint32_t pool);
+static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int i40evf_config_rss(struct i40e_vf *vf);
+static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+static int
+i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
+static int
+i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
+static void i40evf_handle_pf_event(struct rte_eth_dev *dev,
+ uint8_t *msg,
+ uint16_t msglen);
+
+static int
+i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr, bool add);
+static int
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+
+/* Default hash key buffer for RSS */
+static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
+
+struct rte_i40evf_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
+ {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)},
+ {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
+ {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
+ {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
+ {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
+ {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
+ rx_unknown_protocol)},
+ {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
+ {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
+ {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
+ {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
+ {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_errors)},
+};
+
+#define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
+ sizeof(rte_i40evf_stats_strings[0]))
+
+static const struct eth_dev_ops i40evf_eth_dev_ops = {
+ .dev_configure = i40evf_dev_configure,
+ .dev_start = i40evf_dev_start,
+ .dev_stop = i40evf_dev_stop,
+ .promiscuous_enable = i40evf_dev_promiscuous_enable,
+ .promiscuous_disable = i40evf_dev_promiscuous_disable,
+ .allmulticast_enable = i40evf_dev_allmulticast_enable,
+ .allmulticast_disable = i40evf_dev_allmulticast_disable,
+ .link_update = i40evf_dev_link_update,
+ .stats_get = i40evf_dev_stats_get,
+ .stats_reset = i40evf_dev_xstats_reset,
+ .xstats_get = i40evf_dev_xstats_get,
+ .xstats_get_names = i40evf_dev_xstats_get_names,
+ .xstats_reset = i40evf_dev_xstats_reset,
+ .dev_close = i40evf_dev_close,
+ .dev_reset = i40evf_dev_reset,
+ .dev_infos_get = i40evf_dev_info_get,
+ .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
+ .vlan_filter_set = i40evf_vlan_filter_set,
+ .vlan_offload_set = i40evf_vlan_offload_set,
+ .rx_queue_start = i40evf_dev_rx_queue_start,
+ .rx_queue_stop = i40evf_dev_rx_queue_stop,
+ .tx_queue_start = i40evf_dev_tx_queue_start,
+ .tx_queue_stop = i40evf_dev_tx_queue_stop,
+ .rx_queue_setup = i40e_dev_rx_queue_setup,
+ .rx_queue_release = i40e_dev_rx_queue_release,
+ .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
+ .rx_descriptor_done = i40e_dev_rx_descriptor_done,
+ .rx_descriptor_status = i40e_dev_rx_descriptor_status,
+ .tx_descriptor_status = i40e_dev_tx_descriptor_status,
+ .tx_queue_setup = i40e_dev_tx_queue_setup,
+ .tx_queue_release = i40e_dev_tx_queue_release,
+ .rx_queue_count = i40e_dev_rx_queue_count,
+ .rxq_info_get = i40e_rxq_info_get,
+ .txq_info_get = i40e_txq_info_get,
+ .mac_addr_add = i40evf_add_mac_addr,
+ .mac_addr_remove = i40evf_del_mac_addr,
+ .set_mc_addr_list = i40evf_set_mc_addr_list,
+ .reta_update = i40evf_dev_rss_reta_update,
+ .reta_query = i40evf_dev_rss_reta_query,
+ .rss_hash_update = i40evf_dev_rss_hash_update,
+ .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
+ .mtu_set = i40evf_dev_mtu_set,
+ .mac_addr_set = i40evf_set_default_mac_addr,
+};
+
+/*
+ * Read data in admin queue to get msg from pf driver
+ */
+static enum i40evf_aq_result
+i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_arq_event_info event;
+ enum virtchnl_ops opcode;
+ enum i40e_status_code retval;
+ int ret;
+ enum i40evf_aq_result result = I40EVF_MSG_NON;
+
+ event.buf_len = data->buf_len;
+ event.msg_buf = data->msg;
+ ret = i40e_clean_arq_element(hw, &event, NULL);
+ /* Can't read any msg from adminQ */
+ if (ret) {
+ if (ret != I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ result = I40EVF_MSG_ERR;
+ return result;
+ }
+
+ opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
+ retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low);
+ /* pf sys event */
+ if (opcode == VIRTCHNL_OP_EVENT) {
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)event.msg_buf;
+
+ result = I40EVF_MSG_SYS;
+ switch (vpe->event) {
+ case VIRTCHNL_EVENT_LINK_CHANGE:
+ vf->link_up =
+ vpe->event_data.link_event.link_status;
+ vf->link_speed =
+ vpe->event_data.link_event.link_speed;
+ vf->pend_msg |= PFMSG_LINK_CHANGE;
+ PMD_DRV_LOG(INFO, "Link status update:%s",
+ vf->link_up ? "up" : "down");
+ break;
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
+ vf->vf_reset = true;
+ vf->pend_msg |= PFMSG_RESET_IMPENDING;
+ PMD_DRV_LOG(INFO, "vf is reseting");
+ break;
+ case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ vf->dev_closed = true;
+ vf->pend_msg |= PFMSG_DRIVER_CLOSE;
+ PMD_DRV_LOG(INFO, "PF driver closed");
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
+ __func__, vpe->event);
+ }
+ } else {
+ /* async reply msg on command issued by vf previously */
+ result = I40EVF_MSG_CMD;
+ /* Actual data length read from PF */
+ data->msg_len = event.msg_len;
+ }
+
+ data->result = retval;
+ data->ops = opcode;
+
+ return result;
+}
+
+/**
+ * clear current command. Only call in case execute
+ * _atomic_set_cmd successfully.
+ */
+static inline void
+_clear_cmd(struct i40e_vf *vf)
+{
+ rte_wmb();
+ vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
+}
+
+/*
+ * Check there is pending cmd in execution. If none, set new command.
+ */
+static inline int
+_atomic_set_cmd(struct i40e_vf *vf, enum virtchnl_ops ops)
+{
+ int ret = rte_atomic32_cmpset(&vf->pend_cmd,
+ VIRTCHNL_OP_UNKNOWN, ops);
+
+ if (!ret)
+ PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+ return !ret;
+}
+
+#define MAX_TRY_TIMES 200
+#define ASQ_DELAY_MS 10
+
+static int
+i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40evf_arq_msg_info info;
+ enum i40evf_aq_result ret;
+ int err, i = 0;
+
+ if (_atomic_set_cmd(vf, args->ops))
+ return -1;
+
+ info.msg = args->out_buffer;
+ info.buf_len = args->out_size;
+ info.ops = VIRTCHNL_OP_UNKNOWN;
+ info.result = I40E_SUCCESS;
+
+ err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
+ args->in_args, args->in_args_size, NULL);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
+ _clear_cmd(vf);
+ return err;
+ }
+
+ switch (args->ops) {
+ case VIRTCHNL_OP_RESET_VF:
+ /*no need to process in this function */
+ err = 0;
+ break;
+ case VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ /* for init adminq commands, need to poll the response */
+ err = -1;
+ do {
+ ret = i40evf_read_pfmsg(dev, &info);
+ vf->cmd_retval = info.result;
+ if (ret == I40EVF_MSG_CMD) {
+ err = 0;
+ break;
+ } else if (ret == I40EVF_MSG_ERR)
+ break;
+ rte_delay_ms(ASQ_DELAY_MS);
+ /* If don't read msg or read sys event, continue */
+ } while (i++ < MAX_TRY_TIMES);
+ _clear_cmd(vf);
+ break;
+
+ default:
+ /* for other adminq in running time, waiting the cmd done flag */
+ err = -1;
+ do {
+ if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) {
+ err = 0;
+ break;
+ }
+ rte_delay_ms(ASQ_DELAY_MS);
+ /* If don't read msg or read sys event, continue */
+ } while (i++ < MAX_TRY_TIMES);
+ /* If there's no response is received, clear command */
+ if (i >= MAX_TRY_TIMES) {
+ PMD_DRV_LOG(WARNING, "No response for %d", args->ops);
+ _clear_cmd(vf);
+ }
+ break;
+ }
+
+ return err | vf->cmd_retval;
+}
+
+/*
+ * Check API version with sync wait until version read or fail from admin queue
+ */
+static int
+i40evf_check_api_version(struct rte_eth_dev *dev)
+{
+ struct virtchnl_version_info version, *pver;
+ int err;
+ struct vf_cmd_info args;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ version.major = VIRTCHNL_VERSION_MAJOR;
+ version.minor = VIRTCHNL_VERSION_MINOR;
+
+ args.ops = VIRTCHNL_OP_VERSION;
+ args.in_args = (uint8_t *)&version;
+ args.in_args_size = sizeof(version);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
+ return err;
+ }
+
+ pver = (struct virtchnl_version_info *)args.out_buffer;
+ vf->version_major = pver->major;
+ vf->version_minor = pver->minor;
+ if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
+ (vf->version_minor <= VIRTCHNL_VERSION_MINOR))
+ PMD_DRV_LOG(INFO, "Peer is Linux PF host");
+ else {
+ PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
+ vf->version_major, vf->version_minor,
+ VIRTCHNL_VERSION_MAJOR,
+ VIRTCHNL_VERSION_MINOR);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_get_vf_resource(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+ struct vf_cmd_info args;
+ uint32_t caps, len;
+
+ args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ if (PF_IS_V11(vf)) {
+ caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN |
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ args.in_args = (uint8_t *)&caps;
+ args.in_args_size = sizeof(caps);
+ } else {
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ }
+ err = i40evf_execute_vf_cmd(dev, &args);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
+ return err;
+ }
+
+ len = sizeof(struct virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
+
+ rte_memcpy(vf->vf_res, args.out_buffer,
+ RTE_MIN(args.out_size, len));
+ i40e_vf_parse_hw_config(hw, vf->vf_res);
+
+ return 0;
+}
+
+static int
+i40evf_config_promisc(struct rte_eth_dev *dev,
+ bool enable_unicast,
+ bool enable_multicast)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+ struct vf_cmd_info args;
+ struct virtchnl_promisc_info promisc;
+
+ promisc.flags = 0;
+ promisc.vsi_id = vf->vsi_res->vsi_id;
+
+ if (enable_unicast)
+ promisc.flags |= FLAG_VF_UNICAST_PROMISC;
+
+ if (enable_multicast)
+ promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
+
+ args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ args.in_args = (uint8_t *)&promisc;
+ args.in_args_size = sizeof(promisc);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command "
+ "CONFIG_PROMISCUOUS_MODE");
+ return err;
+}
+
+static int
+i40evf_enable_vlan_strip(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct vf_cmd_info args;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING");
+
+ return ret;
+}
+
+static int
+i40evf_disable_vlan_strip(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct vf_cmd_info args;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING");
+
+ return ret;
+}
+
+static void
+i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info,
+ uint16_t vsi_id,
+ uint16_t queue_id,
+ uint16_t nb_txq,
+ struct i40e_tx_queue *txq)
+{
+ txq_info->vsi_id = vsi_id;
+ txq_info->queue_id = queue_id;
+ if (queue_id < nb_txq) {
+ txq_info->ring_len = txq->nb_tx_desc;
+ txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
+ }
+}
+
+static void
+i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info,
+ uint16_t vsi_id,
+ uint16_t queue_id,
+ uint16_t nb_rxq,
+ uint32_t max_pkt_size,
+ struct i40e_rx_queue *rxq)
+{
+ rxq_info->vsi_id = vsi_id;
+ rxq_info->queue_id = queue_id;
+ rxq_info->max_pkt_size = max_pkt_size;
+ if (queue_id < nb_rxq) {
+ rxq_info->ring_len = rxq->nb_rx_desc;
+ rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
+ rxq_info->databuffer_size =
+ (rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+ }
+}
+
+static int
+i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_rx_queue **rxq =
+ (struct i40e_rx_queue **)dev->data->rx_queues;
+ struct i40e_tx_queue **txq =
+ (struct i40e_tx_queue **)dev->data->tx_queues;
+ struct virtchnl_vsi_queue_config_info *vc_vqci;
+ struct virtchnl_queue_pair_info *vc_qpi;
+ struct vf_cmd_info args;
+ uint16_t i, nb_qp = vf->num_queue_pairs;
+ const uint32_t size =
+ I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
+ uint8_t buff[size];
+ int ret;
+
+ memset(buff, 0, sizeof(buff));
+ vc_vqci = (struct virtchnl_vsi_queue_config_info *)buff;
+ vc_vqci->vsi_id = vf->vsi_res->vsi_id;
+ vc_vqci->num_queue_pairs = nb_qp;
+
+ for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
+ i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
+ vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
+ i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
+ vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
+ vf->max_pkt_len, rxq[i]);
+ }
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ args.in_args = (uint8_t *)vc_vqci;
+ args.in_args_size = size;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "VIRTCHNL_OP_CONFIG_VSI_QUEUES");
+
+ return ret;
+}
+
+static int
+i40evf_config_irq_map(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct vf_cmd_info args;
+ uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \
+ sizeof(struct virtchnl_vector_map)];
+ struct virtchnl_irq_map_info *map_info;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t vector_id;
+ int i, err;
+
+ if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+ rte_intr_allow_others(intr_handle))
+ vector_id = I40E_RX_VEC_START;
+ else
+ vector_id = I40E_MISC_VEC_ID;
+
+ map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
+ map_info->num_vectors = 1;
+ map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
+ map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
+ /* Alway use default dynamic MSIX interrupt */
+ map_info->vecmap[0].vector_id = vector_id;
+ /* Don't map any tx queue */
+ map_info->vecmap[0].txq_map = 0;
+ map_info->vecmap[0].rxq_map = 0;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ map_info->vecmap[0].rxq_map |= 1 << i;
+ if (rte_intr_dp_is_en(intr_handle))
+ intr_handle->intr_vec[i] = vector_id;
+ }
+
+ args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ args.in_args = (u8 *)cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
+
+ return err;
+}
+
+static int
+i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
+ bool on)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct virtchnl_queue_select queue_select;
+ int err;
+ struct vf_cmd_info args;
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = vf->vsi_res->vsi_id;
+
+ if (isrx)
+ queue_select.rx_queues |= 1 << qid;
+ else
+ queue_select.tx_queues |= 1 << qid;
+
+ if (on)
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
+ else
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
+ args.in_args = (u8 *)&queue_select;
+ args.in_args_size = sizeof(queue_select);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
+ isrx ? "RX" : "TX", qid, on ? "on" : "off");
+
+ return err;
+}
+
+static int
+i40evf_start_queues(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *dev_data = dev->data;
+ int i;
+ struct i40e_rx_queue *rxq;
+ struct i40e_tx_queue *txq;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev_data->rx_queues[i];
+ if (rxq->rx_deferred_start)
+ continue;
+ if (i40evf_dev_rx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev_data->tx_queues[i];
+ if (txq->tx_deferred_start)
+ continue;
+ if (i40evf_dev_tx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40evf_stop_queues(struct rte_eth_dev *dev)
+{
+ int i;
+
+ /* Stop TX queues first */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
+ return -1;
+ }
+ }
+
+ /* Then stop RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40evf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
+ sizeof(struct virtchnl_ether_addr)];
+ int err;
+ struct vf_cmd_info args;
+
+ if (is_zero_ether_addr(addr)) {
+ PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+ return I40E_ERR_INVALID_MAC_ADDR;
+ }
+
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = 1;
+ rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+
+ args.ops = VIRTCHNL_OP_ADD_ETH_ADDR;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command "
+ "OP_ADD_ETHER_ADDRESS");
+ else
+ vf->vsi.mac_num++;
+
+ return err;
+}
+
+static void
+i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
+ sizeof(struct virtchnl_ether_addr)];
+ int err;
+ struct vf_cmd_info args;
+
+ if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+ return;
+ }
+
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = 1;
+ rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+
+ args.ops = VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command "
+ "OP_DEL_ETHER_ADDRESS");
+ else
+ vf->vsi.mac_num--;
+ return;
+}
+
+static void
+i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct ether_addr *addr;
+
+ addr = &data->mac_addrs[index];
+
+ i40evf_del_mac_addr_by_addr(dev, addr);
+}
+
+static int
+i40evf_query_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct virtchnl_queue_select q_stats;
+ int err;
+ struct vf_cmd_info args;
+
+ memset(&q_stats, 0, sizeof(q_stats));
+ q_stats.vsi_id = vf->vsi_res->vsi_id;
+ args.ops = VIRTCHNL_OP_GET_STATS;
+ args.in_args = (u8 *)&q_stats;
+ args.in_args_size = sizeof(q_stats);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
+ *pstats = NULL;
+ return err;
+ }
+ *pstats = (struct i40e_eth_stats *)args.out_buffer;
+ return 0;
+}
+
+static void
+i40evf_stat_update_48(uint64_t *offset,
+ uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = *stat - *offset;
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
+
+ *stat &= I40E_48_BIT_MASK;
+}
+
+static void
+i40evf_stat_update_32(uint64_t *offset,
+ uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = (uint64_t)(*stat - *offset);
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
+}
+
+static void
+i40evf_update_stats(struct i40e_vsi *vsi,
+ struct i40e_eth_stats *nes)
+{
+ struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
+
+ i40evf_stat_update_48(&oes->rx_bytes,
+ &nes->rx_bytes);
+ i40evf_stat_update_48(&oes->rx_unicast,
+ &nes->rx_unicast);
+ i40evf_stat_update_48(&oes->rx_multicast,
+ &nes->rx_multicast);
+ i40evf_stat_update_48(&oes->rx_broadcast,
+ &nes->rx_broadcast);
+ i40evf_stat_update_32(&oes->rx_discards,
+ &nes->rx_discards);
+ i40evf_stat_update_32(&oes->rx_unknown_protocol,
+ &nes->rx_unknown_protocol);
+ i40evf_stat_update_48(&oes->tx_bytes,
+ &nes->tx_bytes);
+ i40evf_stat_update_48(&oes->tx_unicast,
+ &nes->tx_unicast);
+ i40evf_stat_update_48(&oes->tx_multicast,
+ &nes->tx_multicast);
+ i40evf_stat_update_48(&oes->tx_broadcast,
+ &nes->tx_broadcast);
+ i40evf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
+ i40evf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
+}
+
+static void
+i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_eth_stats *pstats = NULL;
+
+ /* read stat values to clear hardware registers */
+ ret = i40evf_query_stats(dev, &pstats);
+
+ /* set stats offset base on current values */
+ if (ret == 0)
+ vf->vsi.eth_stats_offset = *pstats;
+}
+
+static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned limit)
+{
+ unsigned i;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < I40EVF_NB_XSTATS; i++) {
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s", rte_i40evf_stats_strings[i].name);
+ }
+ return I40EVF_NB_XSTATS;
+}
+
+static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned n)
+{
+ int ret;
+ unsigned i;
+ struct i40e_eth_stats *pstats = NULL;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_vsi *vsi = &vf->vsi;
+
+ if (n < I40EVF_NB_XSTATS)
+ return I40EVF_NB_XSTATS;
+
+ ret = i40evf_query_stats(dev, &pstats);
+ if (ret != 0)
+ return 0;
+
+ if (!xstats)
+ return 0;
+
+ i40evf_update_stats(vsi, pstats);
+
+ /* loop over xstats array and values from pstats */
+ for (i = 0; i < I40EVF_NB_XSTATS; i++) {
+ xstats[i].id = i;
+ xstats[i].value = *(uint64_t *)(((char *)pstats) +
+ rte_i40evf_stats_strings[i].offset);
+ }
+
+ return I40EVF_NB_XSTATS;
+}
+
+static int
+i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
+ sizeof(uint16_t)];
+ int err;
+ struct vf_cmd_info args;
+
+ vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list->vsi_id = vf->vsi_res->vsi_id;
+ vlan_list->num_elements = 1;
+ vlan_list->vlan_id[0] = vlanid;
+
+ args.ops = VIRTCHNL_OP_ADD_VLAN;
+ args.in_args = (u8 *)&cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
+
+ return err;
+}
+
+static int
+i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
+ sizeof(uint16_t)];
+ int err;
+ struct vf_cmd_info args;
+
+ vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list->vsi_id = vf->vsi_res->vsi_id;
+ vlan_list->num_elements = 1;
+ vlan_list->vlan_id[0] = vlanid;
+
+ args.ops = VIRTCHNL_OP_DEL_VLAN;
+ args.in_args = (u8 *)&cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
+
+ return err;
+}
+
+static const struct rte_pci_id pci_id_i40evf_map[] = {
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+/* Disable IRQ0 */
+static inline void
+i40evf_disable_irq0(struct i40e_hw *hw)
+{
+ /* Disable all interrupt types */
+ I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, 0);
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ I40EVF_WRITE_FLUSH(hw);
+}
+
+/* Enable IRQ0 */
+static inline void
+i40evf_enable_irq0(struct i40e_hw *hw)
+{
+ /* Enable admin queue interrupt trigger */
+ uint32_t val;
+
+ i40evf_disable_irq0(hw);
+ val = I40E_READ_REG(hw, I40E_VFINT_ICR0_ENA1);
+ val |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK |
+ I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK;
+ I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, val);
+
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+
+ I40EVF_WRITE_FLUSH(hw);
+}
+
+static int
+i40evf_check_vf_reset_done(struct i40e_hw *hw)
+{
+ int i, reset;
+
+ for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
+ reset = I40E_READ_REG(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
+ if (reset == VIRTCHNL_VFR_VFACTIVE ||
+ reset == VIRTCHNL_VFR_COMPLETED)
+ break;
+ rte_delay_ms(50);
+ }
+
+ if (i >= MAX_RESET_WAIT_CNT)
+ return -1;
+
+ return 0;
+}
+static int
+i40evf_reset_vf(struct i40e_hw *hw)
+{
+ int ret;
+
+ if (i40e_vf_reset(hw) != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Reset VF NIC failed");
+ return -1;
+ }
+ /**
+ * After issuing vf reset command to pf, pf won't necessarily
+ * reset vf, it depends on what state it exactly is. If it's not
+ * initialized yet, it won't have vf reset since it's in a certain
+ * state. If not, it will try to reset. Even vf is reset, pf will
+ * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
+ * it to ACTIVE. In this duration, vf may not catch the moment that
+ * COMPLETE is set. So, for vf, we'll try to wait a long time.
+ */
+ rte_delay_ms(200);
+
+ ret = i40evf_check_vf_reset_done(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "VF is still resetting");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_init_vf(struct rte_eth_dev *dev)
+{
+ int i, err, bufsz;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint16_t interval =
+ i40e_calc_itr_interval(0, 0);
+
+ vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ vf->dev_data = dev->data;
+ err = i40e_set_mac_type(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
+ goto err;
+ }
+
+ err = i40evf_check_vf_reset_done(hw);
+ if (err)
+ goto err;
+
+ i40e_init_adminq_parameter(hw);
+ err = i40e_init_adminq(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
+ goto err;
+ }
+
+ /* Reset VF and wait until it's complete */
+ if (i40evf_reset_vf(hw)) {
+ PMD_INIT_LOG(ERR, "reset NIC failed");
+ goto err_aq;
+ }
+
+ /* VF reset, shutdown admin queue and initialize again */
+ if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
+ goto err;
+ }
+
+ i40e_init_adminq_parameter(hw);
+ if (i40e_init_adminq(hw) != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "init_adminq failed");
+ goto err;
+ }
+
+ vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0);
+ if (!vf->aq_resp) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
+ goto err_aq;
+ }
+ if (i40evf_check_api_version(dev) != 0) {
+ PMD_INIT_LOG(ERR, "check_api version failed");
+ goto err_api;
+ }
+ bufsz = sizeof(struct virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
+ vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
+ if (!vf->vf_res) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
+ goto err_api;
+ }
+
+ if (i40evf_get_vf_resource(dev) != 0) {
+ PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
+ goto err_alloc;
+ }
+
+ /* got VF config message back from PF, now we can parse it */
+ for (i = 0; i < vf->vf_res->num_vsis; i++) {
+ if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
+ vf->vsi_res = &vf->vf_res->vsi_res[i];
+ }
+
+ if (!vf->vsi_res) {
+ PMD_INIT_LOG(ERR, "no LAN VSI found");
+ goto err_alloc;
+ }
+
+ if (hw->mac.type == I40E_MAC_X722_VF)
+ vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
+ vf->vsi.vsi_id = vf->vsi_res->vsi_id;
+
+ switch (vf->vsi_res->vsi_type) {
+ case VIRTCHNL_VSI_SRIOV:
+ vf->vsi.type = I40E_VSI_SRIOV;
+ break;
+ default:
+ vf->vsi.type = I40E_VSI_TYPE_UNKNOWN;
+ break;
+ }
+ vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
+ vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Store the MAC address configured by host, or generate random one */
+ if (is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
+ vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
+ else
+ eth_random_addr(hw->mac.addr); /* Generate a random one */
+
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ (I40E_ITR_INDEX_DEFAULT <<
+ I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT));
+ I40EVF_WRITE_FLUSH(hw);
+
+ return 0;
+
+err_alloc:
+ rte_free(vf->vf_res);
+ vf->vsi_res = NULL;
+err_api:
+ rte_free(vf->aq_resp);
+err_aq:
+ i40e_shutdown_adminq(hw); /* ignore error */
+err:
+ return -1;
+}
+
+static int
+i40evf_uninit_vf(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->adapter_stopped == 0)
+ i40evf_dev_close(dev);
+ rte_free(vf->vf_res);
+ vf->vf_res = NULL;
+ rte_free(vf->aq_resp);
+ vf->aq_resp = NULL;
+
+ return 0;
+}
+
+static void
+i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
+ __rte_unused uint16_t msglen)
+{
+ struct virtchnl_pf_event *pf_msg =
+ (struct virtchnl_pf_event *)msg;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ switch (pf_msg->event) {
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ break;
+ case VIRTCHNL_EVENT_LINK_CHANGE:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
+ vf->link_up = pf_msg->event_data.link_event.link_status;
+ vf->link_speed = pf_msg->event_data.link_event.link_speed;
+ break;
+ case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
+ break;
+ default:
+ PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
+ break;
+ }
+}
+
+static void
+i40evf_handle_aq_msg(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_arq_event_info info;
+ uint16_t pending, aq_opc;
+ enum virtchnl_ops msg_opc;
+ enum i40e_status_code msg_ret;
+ int ret;
+
+ info.buf_len = I40E_AQ_BUF_SZ;
+ if (!vf->aq_resp) {
+ PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
+ return;
+ }
+ info.msg_buf = vf->aq_resp;
+
+ pending = 1;
+ while (pending) {
+ ret = i40e_clean_arq_element(hw, &info, &pending);
+
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
+ "ret: %d", ret);
+ break;
+ }
+ aq_opc = rte_le_to_cpu_16(info.desc.opcode);
+ /* For the message sent from pf to vf, opcode is stored in
+ * cookie_high of struct i40e_aq_desc, while return error code
+ * are stored in cookie_low, Which is done by
+ * i40e_aq_send_msg_to_vf in PF driver.*/
+ msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
+ info.desc.cookie_high);
+ msg_ret = (enum i40e_status_code)rte_le_to_cpu_32(
+ info.desc.cookie_low);
+ switch (aq_opc) {
+ case i40e_aqc_opc_send_msg_to_vf:
+ if (msg_opc == VIRTCHNL_OP_EVENT)
+ /* process event*/
+ i40evf_handle_pf_event(dev, info.msg_buf,
+ info.msg_len);
+ else {
+ /* read message and it's expected one */
+ if (msg_opc == vf->pend_cmd) {
+ vf->cmd_retval = msg_ret;
+ /* prevent compiler reordering */
+ rte_compiler_barrier();
+ _clear_cmd(vf);
+ } else
+ PMD_DRV_LOG(ERR, "command mismatch,"
+ "expect %u, get %u",
+ vf->pend_cmd, msg_opc);
+ PMD_DRV_LOG(DEBUG, "adminq response is received,"
+ " opcode = %d", msg_opc);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+ aq_opc);
+ break;
+ }
+ }
+}
+
+/**
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt. Only adminq interrupt is processed in VF.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+i40evf_dev_alarm_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t icr0;
+
+ i40evf_disable_irq0(hw);
+
+ /* read out interrupt causes */
+ icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01);
+
+ /* No interrupt event indicated */
+ if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
+ PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
+ goto done;
+ }
+
+ if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
+ PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
+ i40evf_handle_aq_msg(dev);
+ }
+
+ /* Link Status Change interrupt */
+ if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
+ PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
+ " do nothing");
+
+done:
+ i40evf_enable_irq0(hw);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, dev);
+}
+
+static int
+i40evf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct i40e_hw *hw
+ = I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* assign ops func pointer */
+ eth_dev->dev_ops = &i40evf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &i40e_recv_pkts;
+ eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ i40e_set_rx_function(eth_dev);
+ i40e_set_tx_function(eth_dev);
+ return 0;
+ }
+ i40e_set_default_ptype_table(eth_dev);
+ i40e_set_default_pctype_table(eth_dev);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->adapter_stopped = 0;
+
+ if(i40evf_init_vf(eth_dev) != 0) {
+ PMD_INIT_LOG(ERR, "Init vf failed");
+ return -1;
+ }
+
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, eth_dev);
+
+ /* configure and enable device interrupt */
+ i40evf_enable_irq0(hw);
+
+ /* copy mac addr */
+ eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
+ ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
+ 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
+ " store MAC addresses",
+ ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
+ return -ENOMEM;
+ }
+ ether_addr_copy((struct ether_addr *)hw->mac.addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ return 0;
+}
+
+static int
+i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ if (i40evf_uninit_vf(eth_dev) != 0) {
+ PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
+ return -1;
+ }
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
+static int eth_i40evf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct i40e_adapter), i40evf_dev_init);
+}
+
+static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, i40evf_dev_uninit);
+}
+
+/*
+ * virtual function driver struct
+ */
+static struct rte_pci_driver rte_i40evf_pmd = {
+ .id_table = pci_id_i40evf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = eth_i40evf_pci_probe,
+ .remove = eth_i40evf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio-pci");
+
+static int
+i40evf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ struct i40e_vf *vf;
+
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation or vector Rx preconditions we will reset it.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
+
+ /* For non-DPDK PF drivers, VF has no ability to disable HW
+ * CRC strip, and is implicitly enabled by the PF.
+ */
+ if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
+ vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
+ (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
+ /* Peer is running non-DPDK PF driver. */
+ PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
+ return -EINVAL;
+ }
+ }
+
+ return i40evf_init_vlan(dev);
+}
+
+static int
+i40evf_init_vlan(struct rte_eth_dev *dev)
+{
+ /* Apply vlan offload setting */
+ i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+
+ return 0;
+}
+
+static int
+i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ return -ENOTSUP;
+
+ /* Vlan stripping setting */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ i40evf_enable_vlan_strip(dev);
+ else
+ i40evf_disable_vlan_strip(dev);
+ }
+
+ return 0;
+}
+
+static int
+i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail register. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
+
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ return err;
+ }
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
+ }
+
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct i40e_tx_queue *txq;
+ int err;
+
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
+ }
+
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ int ret;
+
+ if (on)
+ ret = i40evf_add_vlan(dev, vlan_id);
+ else
+ ret = i40evf_del_vlan(dev,vlan_id);
+
+ return ret;
+}
+
+static int
+i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = dev->data;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint16_t buf_size, len;
+
+ rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id);
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
+
+ /* Calculate the maximum packet length allowed */
+ mbp_priv = rte_mempool_get_priv(rxq->mp);
+ buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+ rxq->hs_mode = i40e_header_split_none;
+ rxq->rx_hdr_len = 0;
+ rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+ len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS;
+ rxq->max_pkt_len = RTE_MIN(len,
+ dev_data->dev_conf.rxmode.max_rx_pkt_len);
+
+ /**
+ * Check if the jumbo frame and maximum packet length are set correctly
+ */
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, as jumbo "
+ "frame is enabled", (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)I40E_FRAME_SIZE_MAX);
+ return I40E_ERR_CONFIG;
+ }
+ } else {
+ if (rxq->max_pkt_len < ETHER_MIN_LEN ||
+ rxq->max_pkt_len > ETHER_MAX_LEN) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, as jumbo "
+ "frame is disabled", (uint32_t)ETHER_MIN_LEN,
+ (uint32_t)ETHER_MAX_LEN);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ dev_data->scattered_rx = 1;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_rx_init(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint16_t i;
+ int ret = I40E_SUCCESS;
+ struct i40e_rx_queue **rxq =
+ (struct i40e_rx_queue **)dev->data->rx_queues;
+
+ i40evf_config_rss(vf);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!rxq[i] || !rxq[i]->q_set)
+ continue;
+ ret = i40evf_rxq_init(dev, rxq[i]);
+ if (ret != I40E_SUCCESS)
+ break;
+ }
+ if (ret == I40E_SUCCESS)
+ i40e_set_rx_function(dev);
+
+ return ret;
+}
+
+static void
+i40evf_tx_init(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct i40e_tx_queue **txq =
+ (struct i40e_tx_queue **)dev->data->tx_queues;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+
+ i40e_set_tx_function(dev);
+}
+
+static inline void
+i40evf_enable_queues_intr(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ if (!rte_intr_allow_others(intr_handle)) {
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ I40EVF_WRITE_FLUSH(hw);
+ return;
+ }
+
+ I40EVF_WRITE_FLUSH(hw);
+}
+
+static inline void
+i40evf_disable_queues_intr(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ if (!rte_intr_allow_others(intr_handle)) {
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ I40EVF_WRITE_FLUSH(hw);
+ return;
+ }
+
+ I40EVF_WRITE_FLUSH(hw);
+}
+
+static int
+i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t interval =
+ i40e_calc_itr_interval(0, 0);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
+ (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT));
+ else
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTLN1(msix_intr -
+ I40E_RX_VEC_START),
+ I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
+
+ I40EVF_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
+ else
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTLN1(msix_intr -
+ I40E_RX_VEC_START),
+ 0);
+
+ I40EVF_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static void
+i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err, i, j;
+ int next_begin = 0;
+ int begin = 0;
+ uint32_t len;
+ struct ether_addr *addr;
+ struct vf_cmd_info args;
+
+ do {
+ j = 0;
+ len = sizeof(struct virtchnl_ether_addr_list);
+ for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
+ if (is_zero_ether_addr(&dev->data->mac_addrs[i]))
+ continue;
+ len += sizeof(struct virtchnl_ether_addr);
+ if (len >= I40E_AQ_BUF_SZ) {
+ next_begin = i + 1;
+ break;
+ }
+ }
+
+ list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
+ if (!list) {
+ PMD_DRV_LOG(ERR, "fail to allocate memory");
+ return;
+ }
+
+ for (i = begin; i < next_begin; i++) {
+ addr = &dev->data->mac_addrs[i];
+ if (is_zero_ether_addr(addr))
+ continue;
+ rte_memcpy(list->list[j].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+ PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+ j++;
+ }
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = j;
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
+ VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = (uint8_t *)list;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETHER_ADDRESS" :
+ "OP_DEL_ETHER_ADDRESS");
+ } else {
+ if (add)
+ vf->vsi.mac_num++;
+ else
+ vf->vsi.mac_num--;
+ }
+ rte_free(list);
+ begin = next_begin;
+ } while (begin < I40E_NUM_MACADDR_MAX);
+}
+
+static int
+i40evf_dev_start(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->adapter_stopped = 0;
+
+ vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
+
+ /* check and configure queue intr-vector mapping */
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (!intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ if (i40evf_rx_init(dev) != 0){
+ PMD_DRV_LOG(ERR, "failed to do RX init");
+ return -1;
+ }
+
+ i40evf_tx_init(dev);
+
+ if (i40evf_configure_vsi_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "configure queues failed");
+ goto err_queue;
+ }
+ if (i40evf_config_irq_map(dev)) {
+ PMD_DRV_LOG(ERR, "config_irq_map failed");
+ goto err_queue;
+ }
+
+ /* Set all mac addrs */
+ i40evf_add_del_all_mac_addr(dev, TRUE);
+ /* Set all multicast addresses */
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ TRUE);
+
+ if (i40evf_start_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "enable queues failed");
+ goto err_mac;
+ }
+
+ /* only enable interrupt in rx interrupt mode */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ rte_intr_enable(intr_handle);
+
+ i40evf_enable_queues_intr(dev);
+
+ return 0;
+
+err_mac:
+ i40evf_add_del_all_mac_addr(dev, FALSE);
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
+err_queue:
+ return -1;
+}
+
+static void
+i40evf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ rte_intr_disable(intr_handle);
+
+ if (hw->adapter_stopped == 1)
+ return;
+ i40evf_stop_queues(dev);
+ i40evf_disable_queues_intr(dev);
+ i40e_dev_clear_queues(dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+ /* remove all mac addrs */
+ i40evf_add_del_all_mac_addr(dev, FALSE);
+ /* remove all multicast addresses */
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
+ hw->adapter_stopped = 1;
+
+}
+
+static int
+i40evf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct rte_eth_link new_link;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ /*
+ * DPDK pf host provide interfacet to acquire link status
+ * while Linux driver does not
+ */
+
+ memset(&new_link, 0, sizeof(new_link));
+ /* Linux driver PF host */
+ switch (vf->link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ new_link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ new_link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ new_link.link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case I40E_LINK_SPEED_25GB:
+ new_link.link_speed = ETH_SPEED_NUM_25G;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ new_link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+ default:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ }
+ /* full duplex only */
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = vf->link_up ? ETH_LINK_UP :
+ ETH_LINK_DOWN;
+ new_link.link_autoneg =
+ !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+
+ return rte_eth_linkstatus_set(dev, &new_link);
+}
+
+static void
+i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ /* If enabled, just return */
+ if (vf->promisc_unicast_enabled)
+ return;
+
+ ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
+ if (ret == 0)
+ vf->promisc_unicast_enabled = TRUE;
+}
+
+static void
+i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ /* If disabled, just return */
+ if (!vf->promisc_unicast_enabled)
+ return;
+
+ ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
+ if (ret == 0)
+ vf->promisc_unicast_enabled = FALSE;
+}
+
+static void
+i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ /* If enabled, just return */
+ if (vf->promisc_multicast_enabled)
+ return;
+
+ ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
+ if (ret == 0)
+ vf->promisc_multicast_enabled = TRUE;
+}
+
+static void
+i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ /* If enabled, just return */
+ if (!vf->promisc_multicast_enabled)
+ return;
+
+ ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
+ if (ret == 0)
+ vf->promisc_multicast_enabled = FALSE;
+}
+
+static void
+i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+ dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
+ dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ dev_info->tx_queue_offload_capa = 0;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = I40E_DEFAULT_RX_PTHRESH,
+ .hthresh = I40E_DEFAULT_RX_HTHRESH,
+ .wthresh = I40E_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = I40E_DEFAULT_TX_PTHRESH,
+ .hthresh = I40E_DEFAULT_TX_HTHRESH,
+ .wthresh = I40E_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+}
+
+static int
+i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ int ret;
+ struct i40e_eth_stats *pstats = NULL;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_vsi *vsi = &vf->vsi;
+
+ ret = i40evf_query_stats(dev, &pstats);
+ if (ret == 0) {
+ i40evf_update_stats(vsi, pstats);
+
+ stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
+ pstats->rx_broadcast;
+ stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
+ pstats->tx_unicast;
+ stats->imissed = pstats->rx_discards;
+ stats->oerrors = pstats->tx_errors + pstats->tx_discards;
+ stats->ibytes = pstats->rx_bytes;
+ stats->obytes = pstats->tx_bytes;
+ } else {
+ PMD_DRV_LOG(ERR, "Get statistics failed");
+ }
+ return ret;
+}
+
+static void
+i40evf_dev_close(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
+ i40evf_dev_stop(dev);
+ i40e_dev_free_queues(dev);
+ /*
+ * disable promiscuous mode before reset vf
+ * it is a workaround solution when work with kernel driver
+ * and it is not the normal way
+ */
+ i40evf_dev_promiscuous_disable(dev);
+ i40evf_dev_allmulticast_disable(dev);
+
+ i40evf_reset_vf(hw);
+ i40e_shutdown_adminq(hw);
+ i40evf_disable_irq0(hw);
+}
+
+/*
+ * Reset VF device only to re-initialize resources in PMD layer
+ */
+static int
+i40evf_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = i40evf_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = i40evf_dev_init(dev);
+
+ return ret;
+}
+
+static int
+i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!lut)
+ return -EINVAL;
+
+ if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ for (i = 0; i < lut_size_dw; i++)
+ lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i));
+ }
+
+ return 0;
+}
+
+static int
+i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_vf *vf;
+ struct i40e_hw *hw;
+ int ret;
+
+ if (!vsi || !lut)
+ return -EINVAL;
+
+ vf = I40E_VSI_TO_VF(vsi);
+ hw = I40E_VSI_TO_HW(vsi);
+
+ if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ for (i = 0; i < lut_size_dw; i++)
+ I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
+ I40EVF_WRITE_FLUSH(hw);
+ }
+
+ return 0;
+}
+
+static int
+i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t *lut;
+ uint16_t i, idx, shift;
+ int ret;
+
+ if (reta_size != ETH_RSS_RETA_SIZE_64) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
+ return -EINVAL;
+ }
+
+ lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+ ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
+ if (ret)
+ goto out;
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ lut[i] = reta_conf[idx].reta[shift];
+ }
+ ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size);
+
+out:
+ rte_free(lut);
+
+ return ret;
+}
+
+static int
+i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint16_t i, idx, shift;
+ uint8_t *lut;
+ int ret;
+
+ if (reta_size != ETH_RSS_RETA_SIZE_64) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
+ return -EINVAL;
+ }
+
+ lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+
+ ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
+ if (ret)
+ goto out;
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = lut[i];
+ }
+
+out:
+ rte_free(lut);
+
+ return ret;
+}
+
+static int
+i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
+{
+ struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret = 0;
+
+ if (!key || key_len == 0) {
+ PMD_DRV_LOG(DEBUG, "No key to be configured");
+ return 0;
+ } else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
+ return -EINVAL;
+ }
+
+ if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ struct i40e_aqc_get_set_rss_key_data *key_dw =
+ (struct i40e_aqc_get_set_rss_key_data *)key;
+
+ ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key "
+ "via AQ");
+ } else {
+ uint32_t *hash_key = (uint32_t *)key;
+ uint16_t i;
+
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_VFQF_HKEY(i), hash_key[i]);
+ I40EVF_WRITE_FLUSH(hw);
+ }
+
+ return ret;
+}
+
+static int
+i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
+{
+ struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!key || !key_len)
+ return -EINVAL;
+
+ if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
+ (struct i40e_aqc_get_set_rss_key_data *)key);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
+ return ret;
+ }
+ } else {
+ uint32_t *key_dw = (uint32_t *)key;
+ uint16_t i;
+
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ key_dw[i] = i40e_read_rx_ctl(hw, I40E_VFQF_HKEY(i));
+ }
+ *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
+
+ return 0;
+}
+
+static int
+i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = I40E_VF_TO_HW(vf);
+ uint64_t hena;
+ int ret;
+
+ ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ if (ret)
+ return ret;
+
+ hena = i40e_config_hena(vf->adapter, rss_conf->rss_hf);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40EVF_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static void
+i40evf_disable_rss(struct i40e_vf *vf)
+{
+ struct i40e_hw *hw = I40E_VF_TO_HW(vf);
+
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), 0);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), 0);
+ I40EVF_WRITE_FLUSH(hw);
+}
+
+static int
+i40evf_config_rss(struct i40e_vf *vf)
+{
+ struct i40e_hw *hw = I40E_VF_TO_HW(vf);
+ struct rte_eth_rss_conf rss_conf;
+ uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+ uint16_t num;
+
+ if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ i40evf_disable_rss(vf);
+ PMD_DRV_LOG(DEBUG, "RSS not configured");
+ return 0;
+ }
+
+ num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF);
+ /* Fill out the look up table */
+ for (i = 0, j = 0; i < nb_q; i++, j++) {
+ if (j >= num)
+ j = 0;
+ lut = (lut << 8) | j;
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ }
+
+ rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & vf->adapter->flow_types_mask) == 0) {
+ i40evf_disable_rss(vf);
+ PMD_DRV_LOG(DEBUG, "No hash flag is set");
+ return 0;
+ }
+
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+ (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Calculate the default hash key */
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ rss_key_default[i] = (uint32_t)rte_rand();
+ rss_conf.rss_key = (uint8_t *)rss_key_default;
+ rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
+
+ return i40evf_hw_rss_hash_set(vf, &rss_conf);
+}
+
+static int
+i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rss_hf = rss_conf->rss_hf & vf->adapter->flow_types_mask;
+ uint64_t hena;
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
+
+ if (!(hena & vf->adapter->pctypes_mask)) { /* RSS disabled */
+ if (rss_hf != 0) /* Enable RSS */
+ return -EINVAL;
+ return 0;
+ }
+
+ /* RSS enabled */
+ if (rss_hf == 0) /* Disable RSS */
+ return -EINVAL;
+
+ return i40evf_hw_rss_hash_set(vf, rss_conf);
+}
+
+static int
+i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t hena;
+
+ i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key,
+ &rss_conf->rss_key_len);
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
+ rss_conf->rss_hf = i40e_parse_hena(vf->adapter, hena);
+
+ return 0;
+}
+
+static int
+i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = vf->dev_data;
+ uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
+ int ret = 0;
+
+ /* check if mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev_data->dev_started) {
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+ dev_data->port_id);
+ return -EBUSY;
+ }
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return ret;
+}
+
+static int
+i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!is_valid_assigned_ether_addr(mac_addr)) {
+ PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
+ return -EINVAL;
+ }
+
+ if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
+ return -EPERM;
+
+ i40evf_del_mac_addr_by_addr(dev, (struct ether_addr *)hw->mac.addr);
+
+ if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0)
+ return -EIO;
+
+ ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
+ return 0;
+}
+
+static int
+i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num, bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
+ (I40E_NUM_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))];
+ uint32_t i;
+ int err;
+ struct vf_cmd_info args;
+
+ if (mc_addrs == NULL || mc_addrs_num == 0)
+ return 0;
+
+ if (mc_addrs_num > I40E_NUM_MACADDR_MAX)
+ return -EINVAL;
+
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = mc_addrs_num;
+
+ for (i = 0; i < mc_addrs_num; i++) {
+ if (!I40E_IS_MULTICAST(mc_addrs[i].addr_bytes)) {
+ PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
+ mc_addrs[i].addr_bytes[0],
+ mc_addrs[i].addr_bytes[1],
+ mc_addrs[i].addr_bytes[2],
+ mc_addrs[i].addr_bytes[3],
+ mc_addrs[i].addr_bytes[4],
+ mc_addrs[i].addr_bytes[5]);
+ return -EINVAL;
+ }
+
+ memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
+ sizeof(list->list[i].addr));
+ }
+
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(struct virtchnl_ether_addr_list) +
+ i * sizeof(struct virtchnl_ether_addr);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+
+ /* flush previous addresses */
+ err = i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
+ if (err)
+ return err;
+
+ vf->mc_addrs_num = 0;
+
+ /* add new ones */
+ err = i40evf_add_del_mc_addr_list(dev, mc_addrs, mc_addrs_num,
+ TRUE);
+ if (err)
+ return err;
+
+ vf->mc_addrs_num = mc_addrs_num;
+ memcpy(vf->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_fdir.c b/src/spdk/dpdk/drivers/net/i40e/i40e_fdir.c
new file mode 100644
index 00000000..d41601a1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_fdir.c
@@ -0,0 +1,2182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_arp.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_hash_crc.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+#define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
+#ifndef IPV6_ADDR_LEN
+#define IPV6_ADDR_LEN 16
+#endif
+
+#define I40E_FDIR_PKT_LEN 512
+#define I40E_FDIR_IP_DEFAULT_LEN 420
+#define I40E_FDIR_IP_DEFAULT_TTL 0x40
+#define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
+#define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
+#define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
+
+#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
+#define I40E_FDIR_IPv6_PAYLOAD_LEN 380
+#define I40E_FDIR_UDP_DEFAULT_LEN 400
+#define I40E_FDIR_GTP_DEFAULT_LEN 384
+#define I40E_FDIR_INNER_IP_DEFAULT_LEN 384
+#define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344
+
+#define I40E_FDIR_GTPC_DST_PORT 2123
+#define I40E_FDIR_GTPU_DST_PORT 2152
+#define I40E_FDIR_GTP_VER_FLAG_0X30 0x30
+#define I40E_FDIR_GTP_VER_FLAG_0X32 0x32
+#define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01
+#define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF
+
+/* Wait time for fdir filter programming */
+#define I40E_FDIR_MAX_WAIT_US 10000
+
+/* Wait count and interval for fdir filter flush */
+#define I40E_FDIR_FLUSH_RETRY 50
+#define I40E_FDIR_FLUSH_INTERVAL_MS 5
+
+#define I40E_COUNTER_PF 2
+/* Statistic counter index for one pf */
+#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
+
+#define I40E_FDIR_FLOWS ( \
+ (1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+ (1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
+ (1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
+
+static int i40e_fdir_filter_programming(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
+ struct i40e_fdir_filter *filter);
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct i40e_fdir_input *input);
+static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct i40e_fdir_filter_conf *filter,
+ bool add);
+
+static int
+i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
+ struct i40e_hmc_obj_rxq rx_ctx;
+ int err = I40E_SUCCESS;
+
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ /* Init the RX queue in hardware */
+ rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = 0;
+ rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ rx_ctx.qlen = rxq->nb_rx_desc;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rx_ctx.dsize = 1;
+#endif
+ rx_ctx.dtype = i40e_header_split_none;
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
+ rx_ctx.rxmax = ETHER_MAX_LEN;
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = 0;
+ rx_ctx.l2tsel = 1;
+ rx_ctx.showiv = 0;
+ rx_ctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
+ return err;
+ }
+ err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
+ return err;
+ }
+ rxq->qrx_tail = hw->hw_addr +
+ I40E_QRX_TAIL(rxq->vsi->base_queue);
+
+ rte_wmb();
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ return err;
+}
+
+/*
+ * i40e_fdir_setup - reserve and initialize the Flow Director resources
+ * @pf: board private structure
+ */
+int
+i40e_fdir_setup(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ int err = I40E_SUCCESS;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz = NULL;
+ struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+
+ if ((pf->flags & I40E_FLAG_FDIR) == 0) {
+ PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
+ " num_filters_best_effort = %u.",
+ hw->func_caps.fd_filters_guaranteed,
+ hw->func_caps.fd_filters_best_effort);
+
+ vsi = pf->fdir.fdir_vsi;
+ if (vsi) {
+ PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
+ return I40E_SUCCESS;
+ }
+ /* make new FDIR VSI */
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
+ return I40E_ERR_NO_AVAILABLE_VSI;
+ }
+ pf->fdir.fdir_vsi = vsi;
+
+ /*Fdir tx queue setup*/
+ err = i40e_fdir_setup_tx_resources(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
+ goto fail_setup_tx;
+ }
+
+ /*Fdir rx queue setup*/
+ err = i40e_fdir_setup_rx_resources(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
+ goto fail_setup_rx;
+ }
+
+ err = i40e_tx_queue_init(pf->fdir.txq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
+ goto fail_mem;
+ }
+
+ /* need switch on before dev start*/
+ err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
+ goto fail_mem;
+ }
+
+ /* Init the rx queue in hardware */
+ err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
+ goto fail_mem;
+ }
+
+ /* switch on rx queue */
+ err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
+ goto fail_mem;
+ }
+
+ /* reserve memory for the fdir programming packet */
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d",
+ eth_dev->device->driver->name,
+ I40E_FDIR_MZ_NAME,
+ eth_dev->data->port_id);
+ mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
+ if (!mz) {
+ PMD_DRV_LOG(ERR, "Cannot init memzone for "
+ "flow director program packet.");
+ err = I40E_ERR_NO_MEMORY;
+ goto fail_mem;
+ }
+ pf->fdir.prg_pkt = mz->addr;
+ pf->fdir.dma_addr = mz->iova;
+
+ pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
+ PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
+ vsi->base_queue);
+ return I40E_SUCCESS;
+
+fail_mem:
+ i40e_dev_rx_queue_release(pf->fdir.rxq);
+ pf->fdir.rxq = NULL;
+fail_setup_rx:
+ i40e_dev_tx_queue_release(pf->fdir.txq);
+ pf->fdir.txq = NULL;
+fail_setup_tx:
+ i40e_vsi_release(vsi);
+ pf->fdir.fdir_vsi = NULL;
+ return err;
+}
+
+/*
+ * i40e_fdir_teardown - release the Flow Director resources
+ * @pf: board private structure
+ */
+void
+i40e_fdir_teardown(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+
+ vsi = pf->fdir.fdir_vsi;
+ if (!vsi)
+ return;
+ int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
+ if (err)
+ PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
+ err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
+ if (err)
+ PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
+ i40e_dev_rx_queue_release(pf->fdir.rxq);
+ pf->fdir.rxq = NULL;
+ i40e_dev_tx_queue_release(pf->fdir.txq);
+ pf->fdir.txq = NULL;
+ i40e_vsi_release(vsi);
+ pf->fdir.fdir_vsi = NULL;
+}
+
+/* check whether the flow director table in empty */
+static inline int
+i40e_fdir_empty(struct i40e_hw *hw)
+{
+ uint32_t guarant_cnt, best_cnt;
+
+ guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
+ I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
+ I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ if (best_cnt + guarant_cnt > 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Initialize the configuration about bytes stream extracted as flexible payload
+ * and mask setting
+ */
+static inline void
+i40e_init_flx_pld(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint8_t pctype;
+ int i, index;
+ uint16_t flow_type;
+
+ /*
+ * Define the bytes stream extracted as flexible payload in
+ * field vector. By default, select 8 words from the beginning
+ * of payload as flexible payload.
+ */
+ for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
+ index = i * I40E_MAX_FLXPLD_FIED;
+ pf->fdir.flex_set[index].src_offset = 0;
+ pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
+ pf->fdir.flex_set[index].dst_offset = 0;
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
+ I40E_WRITE_REG(hw,
+ I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
+ I40E_WRITE_REG(hw,
+ I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
+ }
+
+ /* initialize the masks */
+ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
+ flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
+
+ if (flow_type == RTE_ETH_FLOW_UNKNOWN)
+ continue;
+ pf->fdir.flex_mask[pctype].word_mask = 0;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
+ for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
+ pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
+ pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
+ }
+ }
+}
+
+#define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
+ if ((flex_pit2).src_offset < \
+ (flex_pit1).src_offset + (flex_pit1).size) { \
+ PMD_DRV_LOG(ERR, "src_offset should be not" \
+ " less than than previous offset" \
+ " + previous FSIZE."); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+/*
+ * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
+ * and the flex_pit will be sorted by it's src_offset value
+ */
+static inline uint16_t
+i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
+ struct i40e_fdir_flex_pit *flex_pit)
+{
+ uint16_t src_tmp, size, num = 0;
+ uint16_t i, k, j = 0;
+
+ while (j < I40E_FDIR_MAX_FLEX_LEN) {
+ size = 1;
+ for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
+ if (src_offset[j + 1] == src_offset[j] + 1)
+ size++;
+ else
+ break;
+ }
+ src_tmp = src_offset[j] + 1 - size;
+ /* the flex_pit need to be sort by src_offset */
+ for (i = 0; i < num; i++) {
+ if (src_tmp < flex_pit[i].src_offset)
+ break;
+ }
+ /* if insert required, move backward */
+ for (k = num; k > i; k--)
+ flex_pit[k] = flex_pit[k - 1];
+ /* insert */
+ flex_pit[i].dst_offset = j + 1 - size;
+ flex_pit[i].src_offset = src_tmp;
+ flex_pit[i].size = size;
+ j++;
+ num++;
+ }
+ return num;
+}
+
+/* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
+static inline int
+i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
+{
+ struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
+ uint16_t num, i;
+
+ for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
+ if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
+ PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
+ return -EINVAL;
+ }
+ }
+
+ memset(flex_pit, 0, sizeof(flex_pit));
+ num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
+ if (num > I40E_MAX_FLXPLD_FIED) {
+ PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
+ return -EINVAL;
+ }
+ for (i = 0; i < num; i++) {
+ if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
+ flex_pit[i].src_offset & 0x01) {
+ PMD_DRV_LOG(ERR, "flexpayload should be measured"
+ " in word");
+ return -EINVAL;
+ }
+ if (i != num - 1)
+ I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
+ }
+ return 0;
+}
+
+/*
+ * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
+ * arguments are valid
+ */
+static int
+i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
+ const struct rte_eth_fdir_flex_conf *conf)
+{
+ const struct rte_eth_flex_payload_cfg *flex_cfg;
+ const struct rte_eth_fdir_flex_mask *flex_mask;
+ uint16_t mask_tmp;
+ uint8_t nb_bitmask;
+ uint16_t i, j;
+ int ret = 0;
+ enum i40e_filter_pctype pctype;
+
+ if (conf == NULL) {
+ PMD_DRV_LOG(INFO, "NULL pointer.");
+ return -EINVAL;
+ }
+ /* check flexible payload setting configuration */
+ if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
+ PMD_DRV_LOG(ERR, "invalid number of payload setting.");
+ return -EINVAL;
+ }
+ for (i = 0; i < conf->nb_payloads; i++) {
+ flex_cfg = &conf->flex_set[i];
+ if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
+ PMD_DRV_LOG(ERR, "invalid payload type.");
+ return -EINVAL;
+ }
+ ret = i40e_check_fdir_flex_payload(flex_cfg);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
+ return -EINVAL;
+ }
+ }
+
+ /* check flex mask setting configuration */
+ if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
+ PMD_DRV_LOG(ERR, "invalid number of flex masks.");
+ return -EINVAL;
+ }
+ for (i = 0; i < conf->nb_flexmasks; i++) {
+ flex_mask = &conf->flex_mask[i];
+ pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
+ if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+ PMD_DRV_LOG(WARNING, "invalid flow type.");
+ return -EINVAL;
+ }
+ nb_bitmask = 0;
+ for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
+ mask_tmp = I40E_WORD(flex_mask->mask[j],
+ flex_mask->mask[j + 1]);
+ if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
+ nb_bitmask++;
+ if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
+ PMD_DRV_LOG(ERR, " exceed maximal"
+ " number of bitmasks.");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
+ * @pf: board private structure
+ * @cfg: the rule how bytes stream is extracted as flexible payload
+ */
+static void
+i40e_set_flx_pld_cfg(struct i40e_pf *pf,
+ const struct rte_eth_flex_payload_cfg *cfg)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
+ uint32_t flx_pit, flx_ort;
+ uint16_t num, min_next_off; /* in words */
+ uint8_t field_idx = 0;
+ uint8_t layer_idx = 0;
+ uint16_t i;
+
+ if (cfg->type == RTE_ETH_L2_PAYLOAD)
+ layer_idx = I40E_FLXPLD_L2_IDX;
+ else if (cfg->type == RTE_ETH_L3_PAYLOAD)
+ layer_idx = I40E_FLXPLD_L3_IDX;
+ else if (cfg->type == RTE_ETH_L4_PAYLOAD)
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
+ memset(flex_pit, 0, sizeof(flex_pit));
+ num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit),
+ RTE_DIM(flex_pit));
+
+ if (num) {
+ flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
+ (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
+ (layer_idx * I40E_MAX_FLXPLD_FIED);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
+ }
+
+ for (i = 0; i < num; i++) {
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+ /* record the info in fdir structure */
+ pf->fdir.flex_set[field_idx].src_offset =
+ flex_pit[i].src_offset / sizeof(uint16_t);
+ pf->fdir.flex_set[field_idx].size =
+ flex_pit[i].size / sizeof(uint16_t);
+ pf->fdir.flex_set[field_idx].dst_offset =
+ flex_pit[i].dst_offset / sizeof(uint16_t);
+ flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
+ pf->fdir.flex_set[field_idx].size,
+ pf->fdir.flex_set[field_idx].dst_offset);
+
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+ }
+ min_next_off = pf->fdir.flex_set[field_idx].src_offset +
+ pf->fdir.flex_set[field_idx].size;
+
+ for (; i < I40E_MAX_FLXPLD_FIED; i++) {
+ /* set the non-used register obeying register's constrain */
+ flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
+ NONUSE_FLX_PIT_DEST_OFF);
+ I40E_WRITE_REG(hw,
+ I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
+ flx_pit);
+ min_next_off++;
+ }
+}
+
+/*
+ * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
+ * @pf: board private structure
+ * @pctype: packet classify type
+ * @flex_masks: mask for flexible payload
+ */
+static void
+i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct rte_eth_fdir_flex_mask *mask_cfg)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_fdir_flex_mask *flex_mask;
+ uint32_t flxinset, fd_mask;
+ uint16_t mask_tmp;
+ uint8_t i, nb_bitmask = 0;
+
+ flex_mask = &pf->fdir.flex_mask[pctype];
+ memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
+ for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
+ mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
+ if (mask_tmp != 0x0) {
+ flex_mask->word_mask |=
+ I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
+ if (mask_tmp != UINT16_MAX) {
+ /* set bit mask */
+ flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
+ flex_mask->bitmask[nb_bitmask].offset =
+ i / sizeof(uint16_t);
+ nb_bitmask++;
+ }
+ }
+ }
+ /* write mask to hw */
+ flxinset = (flex_mask->word_mask <<
+ I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
+ I40E_PRTQF_FD_FLXINSET_INSET_MASK;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
+
+ for (i = 0; i < nb_bitmask; i++) {
+ fd_mask = (flex_mask->bitmask[i].mask <<
+ I40E_PRTQF_FD_MSK_MASK_SHIFT) &
+ I40E_PRTQF_FD_MSK_MASK_MASK;
+ fd_mask |= ((flex_mask->bitmask[i].offset +
+ I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
+ I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
+ I40E_PRTQF_FD_MSK_OFFSET_MASK;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
+ }
+}
+
+/*
+ * Configure flow director related setting
+ */
+int
+i40e_fdir_configure(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_fdir_flex_conf *conf;
+ enum i40e_filter_pctype pctype;
+ uint32_t val;
+ uint8_t i;
+ int ret = 0;
+
+ /*
+ * configuration need to be done before
+ * flow director filters are added
+ * If filters exist, flush them.
+ */
+ if (i40e_fdir_empty(hw) < 0) {
+ ret = i40e_fdir_flush(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to flush fdir table.");
+ return ret;
+ }
+ }
+
+ /* enable FDIR filter */
+ val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
+ val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+ i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
+
+ i40e_init_flx_pld(pf); /* set flex config to default value */
+
+ conf = &dev->data->dev_conf.fdir_conf.flex_conf;
+ ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, " invalid configuration arguments.");
+ return -EINVAL;
+ }
+
+ if (!pf->support_multi_driver) {
+ /* configure flex payload */
+ for (i = 0; i < conf->nb_payloads; i++)
+ i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
+ /* configure flex mask*/
+ for (i = 0; i < conf->nb_flexmasks; i++) {
+ if (hw->mac.type == I40E_MAC_X722) {
+ /* get pctype value in fd pctype register */
+ pctype = (enum i40e_filter_pctype)
+ i40e_read_rx_ctl(hw,
+ I40E_GLQF_FD_PCTYPES(
+ (int)i40e_flowtype_to_pctype(
+ pf->adapter,
+ conf->flex_mask[i].flow_type)));
+ } else {
+ pctype = i40e_flowtype_to_pctype(pf->adapter,
+ conf->flex_mask[i].flow_type);
+ }
+
+ i40e_set_flex_mask_on_pctype(pf, pctype,
+ &conf->flex_mask[i]);
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "Not support flexible payload.");
+ }
+
+ return ret;
+}
+
+static inline int
+i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
+ unsigned char *raw_pkt,
+ bool vlan)
+{
+ static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+ uint16_t *ether_type;
+ uint8_t len = 2 * sizeof(struct ether_addr);
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ static const uint8_t next_proto[] = {
+ [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
+ [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
+ };
+
+ raw_pkt += 2 * sizeof(struct ether_addr);
+ if (vlan && fdir_input->flow_ext.vlan_tci) {
+ rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+ rte_memcpy(raw_pkt + sizeof(uint16_t),
+ &fdir_input->flow_ext.vlan_tci,
+ sizeof(uint16_t));
+ raw_pkt += sizeof(vlan_frame);
+ len += sizeof(vlan_frame);
+ }
+ ether_type = (uint16_t *)raw_pkt;
+ raw_pkt += sizeof(uint16_t);
+ len += sizeof(uint16_t);
+
+ switch (fdir_input->flow_type) {
+ case RTE_ETH_FLOW_L2_PAYLOAD:
+ *ether_type = fdir_input->flow.l2_flow.ether_type;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ case RTE_ETH_FLOW_FRAG_IPV4:
+ ip = (struct ipv4_hdr *)raw_pkt;
+
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+ /* set len to by default */
+ ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+ ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+ fdir_input->flow.ip4_flow.proto :
+ next_proto[fdir_input->flow_type];
+ ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+ fdir_input->flow.ip4_flow.ttl :
+ I40E_FDIR_IP_DEFAULT_TTL;
+ ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+ ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+ len += sizeof(struct ipv4_hdr);
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ case RTE_ETH_FLOW_FRAG_IPV6:
+ ip6 = (struct ipv6_hdr *)raw_pkt;
+
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ip6->vtc_flow =
+ rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+ (fdir_input->flow.ipv6_flow.tc <<
+ I40E_FDIR_IPv6_TC_OFFSET));
+ ip6->payload_len =
+ rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+ fdir_input->flow.ipv6_flow.proto :
+ next_proto[fdir_input->flow_type];
+ ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
+ fdir_input->flow.ipv6_flow.hop_limits :
+ I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ rte_memcpy(&(ip6->src_addr),
+ &(fdir_input->flow.ipv6_flow.dst_ip),
+ IPV6_ADDR_LEN);
+ rte_memcpy(&(ip6->dst_addr),
+ &(fdir_input->flow.ipv6_flow.src_ip),
+ IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown flow type %u.",
+ fdir_input->flow_type);
+ return -1;
+ }
+ return len;
+}
+
+
+/*
+ * i40e_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_fdir_construct_pkt(struct i40e_pf *pf,
+ const struct rte_eth_fdir_input *fdir_input,
+ unsigned char *raw_pkt)
+{
+ unsigned char *payload, *ptr;
+ struct udp_hdr *udp;
+ struct tcp_hdr *tcp;
+ struct sctp_hdr *sctp;
+ uint8_t size, dst = 0;
+ uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+ int len;
+
+ /* fill the ethernet and IP head */
+ len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+ !!fdir_input->flow_ext.vlan_tci);
+ if (len < 0)
+ return -EINVAL;
+
+ /* fill the L4 head */
+ switch (fdir_input->flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+ udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+ udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+ tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+ tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ sctp = (struct sctp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+ sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
+ sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ case RTE_ETH_FLOW_FRAG_IPV4:
+ payload = raw_pkt + len;
+ set_idx = I40E_FLXPLD_L3_IDX;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+ udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+ udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+ tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+ tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ sctp = (struct sctp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+ sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
+ sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ case RTE_ETH_FLOW_FRAG_IPV6:
+ payload = raw_pkt + len;
+ set_idx = I40E_FLXPLD_L3_IDX;
+ break;
+ case RTE_ETH_FLOW_L2_PAYLOAD:
+ payload = raw_pkt + len;
+ /*
+ * ARP packet is a special case on which the payload
+ * starts after the whole ARP header
+ */
+ if (fdir_input->flow.l2_flow.ether_type ==
+ rte_cpu_to_be_16(ETHER_TYPE_ARP))
+ payload += sizeof(struct arp_hdr);
+ set_idx = I40E_FLXPLD_L2_IDX;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
+ return -EINVAL;
+ }
+
+ /* fill the flexbytes to payload */
+ for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+ pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+ size = pf->fdir.flex_set[pit_idx].size;
+ if (size == 0)
+ continue;
+ dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+ ptr = payload +
+ pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+ rte_memcpy(ptr,
+ &fdir_input->flow_ext.flexbytes[dst],
+ size * sizeof(uint16_t));
+ }
+
+ return 0;
+}
+
+static struct i40e_customized_pctype *
+i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
+{
+ struct i40e_customized_pctype *cus_pctype;
+ enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
+
+ for (; i < I40E_CUSTOMIZED_MAX; i++) {
+ cus_pctype = &pf->customized_pctype[i];
+ if (pctype == cus_pctype->pctype)
+ return cus_pctype;
+ }
+ return NULL;
+}
+
+static inline int
+i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
+ const struct i40e_fdir_input *fdir_input,
+ unsigned char *raw_pkt,
+ bool vlan)
+{
+ struct i40e_customized_pctype *cus_pctype = NULL;
+ static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+ uint16_t *ether_type;
+ uint8_t len = 2 * sizeof(struct ether_addr);
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ uint8_t pctype = fdir_input->pctype;
+ bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
+ static const uint8_t next_proto[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
+ };
+
+ raw_pkt += 2 * sizeof(struct ether_addr);
+ if (vlan && fdir_input->flow_ext.vlan_tci) {
+ rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+ rte_memcpy(raw_pkt + sizeof(uint16_t),
+ &fdir_input->flow_ext.vlan_tci,
+ sizeof(uint16_t));
+ raw_pkt += sizeof(vlan_frame);
+ len += sizeof(vlan_frame);
+ }
+ ether_type = (uint16_t *)raw_pkt;
+ raw_pkt += sizeof(uint16_t);
+ len += sizeof(uint16_t);
+
+ if (is_customized_pctype) {
+ cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+ if (!cus_pctype) {
+ PMD_DRV_LOG(ERR, "unknown pctype %u.",
+ fdir_input->pctype);
+ return -1;
+ }
+ }
+
+ if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
+ *ether_type = fdir_input->flow.l2_flow.ether_type;
+ else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+ pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
+ is_customized_pctype) {
+ ip = (struct ipv4_hdr *)raw_pkt;
+
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+ /* set len to by default */
+ ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+ ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+ fdir_input->flow.ip4_flow.ttl :
+ I40E_FDIR_IP_DEFAULT_TTL;
+ ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+ ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+
+ if (!is_customized_pctype)
+ ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+ fdir_input->flow.ip4_flow.proto :
+ next_proto[fdir_input->pctype];
+ else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+ cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+ cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+ cus_pctype->index == I40E_CUSTOMIZED_GTPU)
+ ip->next_proto_id = IPPROTO_UDP;
+ len += sizeof(struct ipv4_hdr);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+ pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
+ ip6 = (struct ipv6_hdr *)raw_pkt;
+
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ip6->vtc_flow =
+ rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+ (fdir_input->flow.ipv6_flow.tc <<
+ I40E_FDIR_IPv6_TC_OFFSET));
+ ip6->payload_len =
+ rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+ fdir_input->flow.ipv6_flow.proto :
+ next_proto[fdir_input->pctype];
+ ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
+ fdir_input->flow.ipv6_flow.hop_limits :
+ I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ rte_memcpy(&ip6->src_addr,
+ &fdir_input->flow.ipv6_flow.dst_ip,
+ IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr,
+ &fdir_input->flow.ipv6_flow.src_ip,
+ IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+ } else {
+ PMD_DRV_LOG(ERR, "unknown pctype %u.",
+ fdir_input->pctype);
+ return -1;
+ }
+
+ return len;
+}
+
+/**
+ * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
+ const struct i40e_fdir_input *fdir_input,
+ unsigned char *raw_pkt)
+{
+ unsigned char *payload = NULL;
+ unsigned char *ptr;
+ struct udp_hdr *udp;
+ struct tcp_hdr *tcp;
+ struct sctp_hdr *sctp;
+ struct rte_flow_item_gtp *gtp;
+ struct ipv4_hdr *gtp_ipv4;
+ struct ipv6_hdr *gtp_ipv6;
+ uint8_t size, dst = 0;
+ uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+ int len;
+ uint8_t pctype = fdir_input->pctype;
+ struct i40e_customized_pctype *cus_pctype;
+
+ /* raw pcket template - just copy contents of the raw packet */
+ if (fdir_input->flow_ext.pkt_template) {
+ memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
+ fdir_input->flow.raw_flow.length);
+ return 0;
+ }
+
+ /* fill the ethernet and IP head */
+ len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
+ !!fdir_input->flow_ext.vlan_tci);
+ if (len < 0)
+ return -EINVAL;
+
+ /* fill the L4 head */
+ if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+ udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+ udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+ tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+ tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
+ sctp = (struct sctp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+ sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
+ sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+ pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
+ payload = raw_pkt + len;
+ set_idx = I40E_FLXPLD_L3_IDX;
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+ udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+ udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+ tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+ tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
+ sctp = (struct sctp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+ sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
+ sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+ pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
+ payload = raw_pkt + len;
+ set_idx = I40E_FLXPLD_L3_IDX;
+ } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ payload = raw_pkt + len;
+ /**
+ * ARP packet is a special case on which the payload
+ * starts after the whole ARP header
+ */
+ if (fdir_input->flow.l2_flow.ether_type ==
+ rte_cpu_to_be_16(ETHER_TYPE_ARP))
+ payload += sizeof(struct arp_hdr);
+ set_idx = I40E_FLXPLD_L2_IDX;
+ } else if (fdir_input->flow_ext.customized_pctype) {
+ /* If customized pctype is used */
+ cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+ if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+ cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+ cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+ cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+
+ gtp = (struct rte_flow_item_gtp *)
+ ((unsigned char *)udp + sizeof(struct udp_hdr));
+ gtp->msg_len =
+ rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
+ gtp->teid = fdir_input->flow.gtp_flow.teid;
+ gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
+
+ /* GTP-C message type is not supported. */
+ if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
+ udp->dst_port =
+ rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
+ gtp->v_pt_rsv_flags =
+ I40E_FDIR_GTP_VER_FLAG_0X32;
+ } else {
+ udp->dst_port =
+ rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
+ gtp->v_pt_rsv_flags =
+ I40E_FDIR_GTP_VER_FLAG_0X30;
+ }
+
+ if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
+ gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
+ gtp_ipv4 = (struct ipv4_hdr *)
+ ((unsigned char *)gtp +
+ sizeof(struct rte_flow_item_gtp));
+ gtp_ipv4->version_ihl =
+ I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+ gtp_ipv4->next_proto_id = IPPROTO_IP;
+ gtp_ipv4->total_length =
+ rte_cpu_to_be_16(
+ I40E_FDIR_INNER_IP_DEFAULT_LEN);
+ payload = (unsigned char *)gtp_ipv4 +
+ sizeof(struct ipv4_hdr);
+ } else if (cus_pctype->index ==
+ I40E_CUSTOMIZED_GTPU_IPV6) {
+ gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
+ gtp_ipv6 = (struct ipv6_hdr *)
+ ((unsigned char *)gtp +
+ sizeof(struct rte_flow_item_gtp));
+ gtp_ipv6->vtc_flow =
+ rte_cpu_to_be_32(
+ I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+ (0 << I40E_FDIR_IPv6_TC_OFFSET));
+ gtp_ipv6->proto = IPPROTO_NONE;
+ gtp_ipv6->payload_len =
+ rte_cpu_to_be_16(
+ I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
+ gtp_ipv6->hop_limits =
+ I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+ payload = (unsigned char *)gtp_ipv6 +
+ sizeof(struct ipv6_hdr);
+ } else
+ payload = (unsigned char *)gtp +
+ sizeof(struct rte_flow_item_gtp);
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "unknown pctype %u.",
+ fdir_input->pctype);
+ return -1;
+ }
+
+ /* fill the flexbytes to payload */
+ for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+ pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+ size = pf->fdir.flex_set[pit_idx].size;
+ if (size == 0)
+ continue;
+ dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+ ptr = payload +
+ pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+ (void)rte_memcpy(ptr,
+ &fdir_input->flow_ext.flexbytes[dst],
+ size * sizeof(uint16_t));
+ }
+
+ return 0;
+}
+
+/* Construct the tx flags */
+static inline uint64_t
+i40e_build_ctob(uint32_t td_cmd,
+ uint32_t td_offset,
+ unsigned int size,
+ uint32_t td_tag)
+{
+ return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+/*
+ * check the programming status descriptor in rx queue.
+ * done after Programming Flow Director is programmed on
+ * tx queue
+ */
+static inline int
+i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
+{
+ volatile union i40e_rx_desc *rxdp;
+ uint64_t qword1;
+ uint32_t rx_status;
+ uint32_t len, id;
+ uint32_t error;
+ int ret = 0;
+
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
+ id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
+
+ if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
+ id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
+ error = (qword1 &
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+ if (error == (0x1 <<
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+ PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
+ " (FD_ID %u): programming status"
+ " reported.",
+ rxdp->wb.qword0.hi_dword.fd_id);
+ ret = -1;
+ } else if (error == (0x1 <<
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+ PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
+ " (FD_ID %u): programming status"
+ " reported.",
+ rxdp->wb.qword0.hi_dword.fd_id);
+ ret = -1;
+ } else
+ PMD_DRV_LOG(ERR, "invalid programming status"
+ " reported, error = %u.", error);
+ } else
+ PMD_DRV_LOG(INFO, "unknown programming status"
+ " reported, len = %d, id = %u.", len, id);
+ rxdp->wb.qword1.status_error_len = 0;
+ rxq->rx_tail++;
+ if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
+ rxq->rx_tail = 0;
+ if (rxq->rx_tail == 0)
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ else
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
+ }
+
+ return ret;
+}
+
+static int
+i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
+ struct i40e_fdir_filter *filter)
+{
+ rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
+ if (input->input.flow_ext.pkt_template) {
+ filter->fdir.input.flow.raw_flow.packet = NULL;
+ filter->fdir.input.flow.raw_flow.length =
+ rte_hash_crc(input->input.flow.raw_flow.packet,
+ input->input.flow.raw_flow.length,
+ input->input.flow.raw_flow.pctype);
+ }
+ return 0;
+}
+
+/* Check if there exists the flow director filter */
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct i40e_fdir_input *input)
+{
+ int ret;
+
+ if (input->flow_ext.pkt_template)
+ ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
+ (const void *)input,
+ input->flow.raw_flow.length);
+ else
+ ret = rte_hash_lookup(fdir_info->hash_table,
+ (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director filter into the SW list */
+static int
+i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret;
+
+ if (filter->fdir.input.flow_ext.pkt_template)
+ ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
+ &filter->fdir.input,
+ filter->fdir.input.flow.raw_flow.length);
+ else
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ fdir_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete a flow director filter from the SW list */
+int
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *filter;
+ int ret;
+
+ if (input->flow_ext.pkt_template)
+ ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
+ input,
+ input->flow.raw_flow.length);
+ else
+ ret = rte_hash_del_key(fdir_info->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
+/*
+ * i40e_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+int
+i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+ enum i40e_filter_pctype pctype;
+ int ret = 0;
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+ PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
+ " check the mode in fdir_conf.");
+ return -ENOTSUP;
+ }
+
+ pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type);
+ if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+ PMD_DRV_LOG(ERR, "invalid flow_type input.");
+ return -EINVAL;
+ }
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue ID");
+ return -EINVAL;
+ }
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID");
+ return -EINVAL;
+ }
+
+ memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+ ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+ return ret;
+ }
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ /* get translated pctype value in fd pctype register */
+ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+ hw, I40E_GLQF_FD_PCTYPES((int)pctype));
+ }
+
+ ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+ pctype);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+int
+i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct i40e_fdir_filter_conf *filter,
+ bool add)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+ enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
+ struct i40e_fdir_filter check_filter; /* Check if the filter exists */
+ int ret = 0;
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+ PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
+ return -ENOTSUP;
+ }
+
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue ID");
+ return -EINVAL;
+ }
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID");
+ return -EINVAL;
+ }
+ if (filter->input.flow_ext.pkt_template) {
+ if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
+ !filter->input.flow.raw_flow.packet) {
+ PMD_DRV_LOG(ERR, "Invalid raw packet template"
+ " flow filter parameters!");
+ return -EINVAL;
+ }
+ pctype = filter->input.flow.raw_flow.pctype;
+ } else {
+ pctype = filter->input.pctype;
+ }
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_fdir_filter_convert(filter, &check_filter);
+ node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ return -EINVAL;
+ }
+
+ memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+ ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+ return ret;
+ }
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ /* get translated pctype value in fd pctype register */
+ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+ hw, I40E_GLQF_FD_PCTYPES((int)pctype));
+ }
+
+ ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+ pctype);
+ return ret;
+ }
+
+ if (add) {
+ fdir_filter = rte_zmalloc("fdir_filter",
+ sizeof(*fdir_filter), 0);
+ if (fdir_filter == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -ENOMEM;
+ }
+
+ rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ if (ret < 0)
+ rte_free(fdir_filter);
+ } else {
+ ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
+ }
+
+ return ret;
+}
+
+/*
+ * i40e_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_fdir_filter_programming(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct rte_eth_fdir_filter *filter,
+ bool add)
+{
+ struct i40e_tx_queue *txq = pf->fdir.txq;
+ struct i40e_rx_queue *rxq = pf->fdir.rxq;
+ const struct rte_eth_fdir_action *fdir_action = &filter->action;
+ volatile struct i40e_tx_desc *txdp;
+ volatile struct i40e_filter_program_desc *fdirdp;
+ uint32_t td_cmd;
+ uint16_t vsi_id, i;
+ uint8_t dest;
+
+ PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+ fdirdp = (volatile struct i40e_filter_program_desc *)
+ (&(txq->tx_ring[txq->tx_tail]));
+
+ fdirdp->qindex_flex_ptype_vsi =
+ rte_cpu_to_le_32((fdir_action->rx_queue <<
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32((fdir_action->flex_off <<
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+ I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32((pctype <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+ I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+ if (filter->input.flow_ext.is_vf)
+ vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+ else
+ /* Use LAN VSI Id by default */
+ vsi_id = pf->main_vsi->vsi_id;
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32(((uint32_t)vsi_id <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+ I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+ fdirdp->dtype_cmd_cntindex =
+ rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+ if (add)
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ else
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+ else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+ else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+ else {
+ PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
+ " unsupported fdir behavior.");
+ return -EINVAL;
+ }
+
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+ I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+ I40E_TXD_FLTR_QW1_DEST_MASK);
+
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32((fdir_action->report_status<<
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+ I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32(
+ ((uint32_t)pf->fdir.match_counter_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+ fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+ PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+ txdp = &(txq->tx_ring[txq->tx_tail + 1]);
+ txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+ td_cmd = I40E_TX_DESC_CMD_EOP |
+ I40E_TX_DESC_CMD_RS |
+ I40E_TX_DESC_CMD_DUMMY;
+
+ txdp->cmd_type_offset_bsz =
+ i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+ txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+ /* Update the tx tail register */
+ rte_wmb();
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
+ if ((txdp->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ break;
+ rte_delay_us(1);
+ }
+ if (i >= I40E_FDIR_MAX_WAIT_US) {
+ PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
+ " time out to get DD on tx queue.");
+ return -ETIMEDOUT;
+ }
+ /* totally delay 10 ms to check programming status*/
+ for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
+ if (i40e_check_fdir_programming_status(rxq) >= 0)
+ return 0;
+ rte_delay_us(1);
+ }
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: programming status reported.");
+ return -ETIMEDOUT;
+}
+
+/*
+ * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct i40e_fdir_filter_conf *filter,
+ bool add)
+{
+ struct i40e_tx_queue *txq = pf->fdir.txq;
+ struct i40e_rx_queue *rxq = pf->fdir.rxq;
+ const struct i40e_fdir_action *fdir_action = &filter->action;
+ volatile struct i40e_tx_desc *txdp;
+ volatile struct i40e_filter_program_desc *fdirdp;
+ uint32_t td_cmd;
+ uint16_t vsi_id, i;
+ uint8_t dest;
+
+ PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+ fdirdp = (volatile struct i40e_filter_program_desc *)
+ (&txq->tx_ring[txq->tx_tail]);
+
+ fdirdp->qindex_flex_ptype_vsi =
+ rte_cpu_to_le_32((fdir_action->rx_queue <<
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32((fdir_action->flex_off <<
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+ I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32((pctype <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+ I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+ if (filter->input.flow_ext.is_vf)
+ vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+ else
+ /* Use LAN VSI Id by default */
+ vsi_id = pf->main_vsi->vsi_id;
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32(((uint32_t)vsi_id <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+ I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+ fdirdp->dtype_cmd_cntindex =
+ rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+ if (add)
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ else
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ if (fdir_action->behavior == I40E_FDIR_REJECT)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+ else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+ else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+ else {
+ PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
+ return -EINVAL;
+ }
+
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+ I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+ I40E_TXD_FLTR_QW1_DEST_MASK);
+
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32((fdir_action->report_status <<
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+ I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32(
+ ((uint32_t)pf->fdir.match_counter_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+ fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+ PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+ txdp = &txq->tx_ring[txq->tx_tail + 1];
+ txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+ td_cmd = I40E_TX_DESC_CMD_EOP |
+ I40E_TX_DESC_CMD_RS |
+ I40E_TX_DESC_CMD_DUMMY;
+
+ txdp->cmd_type_offset_bsz =
+ i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+ txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+ /* Update the tx tail register */
+ rte_wmb();
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
+ if ((txdp->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ break;
+ rte_delay_us(1);
+ }
+ if (i >= I40E_FDIR_MAX_WAIT_US) {
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: time out to get DD on tx queue.");
+ return -ETIMEDOUT;
+ }
+ /* totally delay 10 ms to check programming status*/
+ rte_delay_us(I40E_FDIR_MAX_WAIT_US);
+ if (i40e_check_fdir_programming_status(rxq) < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: programming status reported.");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * i40e_fdir_flush - clear all filters of Flow Director table
+ * @pf: board private structure
+ */
+int
+i40e_fdir_flush(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t reg;
+ uint16_t guarant_cnt, best_cnt;
+ uint16_t i;
+
+ I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+ I40E_WRITE_FLUSH(hw);
+
+ for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
+ rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
+ reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
+ if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+ break;
+ }
+ if (i >= I40E_FDIR_FLUSH_RETRY) {
+ PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
+ return -ETIMEDOUT;
+ }
+ guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
+ I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
+ I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ if (guarant_cnt != 0 || best_cnt != 0) {
+ PMD_DRV_LOG(ERR, "Failed to flush FD table.");
+ return -ENOSYS;
+ } else
+ PMD_DRV_LOG(INFO, "FD table Flush success.");
+ return 0;
+}
+
+static inline void
+i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
+ struct rte_eth_flex_payload_cfg *flex_set,
+ uint16_t *num)
+{
+ struct i40e_fdir_flex_pit *flex_pit;
+ struct rte_eth_flex_payload_cfg *ptr = flex_set;
+ uint16_t src, dst, size, j, k;
+ uint8_t i, layer_idx;
+
+ for (layer_idx = I40E_FLXPLD_L2_IDX;
+ layer_idx <= I40E_FLXPLD_L4_IDX;
+ layer_idx++) {
+ if (layer_idx == I40E_FLXPLD_L2_IDX)
+ ptr->type = RTE_ETH_L2_PAYLOAD;
+ else if (layer_idx == I40E_FLXPLD_L3_IDX)
+ ptr->type = RTE_ETH_L3_PAYLOAD;
+ else if (layer_idx == I40E_FLXPLD_L4_IDX)
+ ptr->type = RTE_ETH_L4_PAYLOAD;
+
+ for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+ flex_pit = &pf->fdir.flex_set[layer_idx *
+ I40E_MAX_FLXPLD_FIED + i];
+ if (flex_pit->size == 0)
+ continue;
+ src = flex_pit->src_offset * sizeof(uint16_t);
+ dst = flex_pit->dst_offset * sizeof(uint16_t);
+ size = flex_pit->size * sizeof(uint16_t);
+ for (j = src, k = dst; j < src + size; j++, k++)
+ ptr->src_offset[k] = j;
+ }
+ (*num)++;
+ ptr++;
+ }
+}
+
+static inline void
+i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
+ struct rte_eth_fdir_flex_mask *flex_mask,
+ uint16_t *num)
+{
+ struct i40e_fdir_flex_mask *mask;
+ struct rte_eth_fdir_flex_mask *ptr = flex_mask;
+ uint16_t flow_type;
+ uint8_t i, j;
+ uint16_t off_bytes, mask_tmp;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
+ i++) {
+ mask = &pf->fdir.flex_mask[i];
+ flow_type = i40e_pctype_to_flowtype(pf->adapter,
+ (enum i40e_filter_pctype)i);
+ if (flow_type == RTE_ETH_FLOW_UNKNOWN)
+ continue;
+
+ for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
+ if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
+ ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
+ ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
+ } else {
+ ptr->mask[j * sizeof(uint16_t)] = 0x0;
+ ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
+ }
+ }
+ for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
+ off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
+ mask_tmp = ~mask->bitmask[j].mask;
+ ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
+ ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
+ }
+ ptr->flow_type = flow_type;
+ ptr++;
+ (*num)++;
+ }
+}
+
+/*
+ * i40e_fdir_info_get - get information of Flow Director
+ * @pf: ethernet device to get info from
+ * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
+ * the flow director information.
+ */
+static void
+i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint16_t num_flex_set = 0;
+ uint16_t num_flex_mask = 0;
+ uint16_t i;
+
+ if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+ fdir->mode = RTE_FDIR_MODE_PERFECT;
+ else
+ fdir->mode = RTE_FDIR_MODE_NONE;
+
+ fdir->guarant_spc =
+ (uint32_t)hw->func_caps.fd_filters_guaranteed;
+ fdir->best_spc =
+ (uint32_t)hw->func_caps.fd_filters_best_effort;
+ fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
+ fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
+ for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
+ fdir->flow_types_mask[i] = 0ULL;
+ fdir->flex_payload_unit = sizeof(uint16_t);
+ fdir->flex_bitmask_unit = sizeof(uint16_t);
+ fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
+ fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
+ fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
+
+ i40e_fdir_info_get_flex_set(pf,
+ fdir->flex_conf.flex_set,
+ &num_flex_set);
+ i40e_fdir_info_get_flex_mask(pf,
+ fdir->flex_conf.flex_mask,
+ &num_flex_mask);
+
+ fdir->flex_conf.nb_payloads = num_flex_set;
+ fdir->flex_conf.nb_flexmasks = num_flex_mask;
+}
+
+/*
+ * i40e_fdir_stat_get - get statistics of Flow Director
+ * @pf: ethernet device to get info from
+ * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
+ * the flow director statistics.
+ */
+static void
+i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t fdstat;
+
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ stat->guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ stat->best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+}
+
+static int
+i40e_fdir_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_fdir_filter_info *info)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret = 0;
+
+ if (!info) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+
+ switch (info->info_type) {
+ case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
+ ret = i40e_fdir_filter_inset_select(pf,
+ &(info->info.input_set_conf));
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
+ info->info_type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * i40e_fdir_ctrl_func - deal with all operations on flow director.
+ * @pf: board private structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+int
+i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret = 0;
+
+ if ((pf->flags & I40E_FLAG_FDIR) == 0)
+ return -ENOTSUP;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
+ return -EINVAL;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = i40e_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = i40e_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ ret = i40e_fdir_flush(dev);
+ break;
+ case RTE_ETH_FILTER_INFO:
+ i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
+ break;
+ case RTE_ETH_FILTER_SET:
+ ret = i40e_fdir_filter_set(dev,
+ (struct rte_eth_fdir_filter_info *)arg);
+ break;
+ case RTE_ETH_FILTER_STATS:
+ i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/* Restore flow director filter */
+void
+i40e_fdir_filter_restore(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
+ struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
+ struct i40e_fdir_filter *f;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t fdstat;
+ uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
+ uint32_t best_cnt; /**< Number of filters in best effort spaces. */
+
+ TAILQ_FOREACH(f, fdir_list, rules)
+ i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
+
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+
+ PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
+ guarant_cnt, best_cnt);
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_flow.c b/src/spdk/dpdk/drivers/net/i40e/i40e_flow.c
new file mode 100644
index 00000000..c67b264d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_flow.c
@@ -0,0 +1,4976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+
+#define I40E_IPV6_TC_MASK (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
+#define I40E_IPV6_FRAG_HEADER 44
+#define I40E_TENANT_ARRAY_NUM 3
+#define I40E_TCI_MASK 0xFFFF
+
+static int i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_fdir_filter_conf *filter);
+static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct i40e_fdir_filter_conf *filter);
+static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter);
+static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
+static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
+static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
+static int
+i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int
+i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter);
+
+const struct rte_flow_ops i40e_flow_ops = {
+ .validate = i40e_flow_validate,
+ .create = i40e_flow_create,
+ .destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
+};
+
+union i40e_filter_t cons_filter;
+enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
+
+/* Pattern matched ethertype filter */
+static enum rte_flow_item_type pattern_ethertype[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched flow director filter */
+static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPC,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPC,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched tunnel filter */
+static enum rte_flow_item_type pattern_vxlan_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_GRE,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_GRE,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_qinq_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct i40e_valid_pattern i40e_supported_patterns[] = {
+ /* Ethertype */
+ { pattern_ethertype, i40e_flow_parse_ethertype_filter },
+ /* FDIR - support default flow type without flexible payload*/
+ { pattern_ethertype, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
+ /* FDIR - support default flow type with flexible payload */
+ { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ /* FDIR - support single vlan input set */
+ { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ /* FDIR - support VF item */
+ { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ /* VXLAN */
+ { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
+ { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
+ { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
+ { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
+ /* NVGRE */
+ { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
+ /* MPLSoUDP & MPLSoGRE */
+ { pattern_mpls_1, i40e_flow_parse_mpls_filter },
+ { pattern_mpls_2, i40e_flow_parse_mpls_filter },
+ { pattern_mpls_3, i40e_flow_parse_mpls_filter },
+ { pattern_mpls_4, i40e_flow_parse_mpls_filter },
+ /* GTP-C & GTP-U */
+ { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
+ { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
+ { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
+ { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
+ /* QINQ */
+ { pattern_qinq_1, i40e_flow_parse_qinq_filter },
+};
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+i40e_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = i40e_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = i40e_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+i40e_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+static parse_filter_t
+i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = *idx;
+
+ for (; i < RTE_DIM(i40e_supported_patterns); i++) {
+ if (i40e_match_pattern(i40e_supported_patterns[i].items,
+ pattern)) {
+ parse_filter = i40e_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ *idx = ++i;
+
+ return parse_filter;
+}
+
+/* Parse attributes */
+static int
+i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static uint16_t
+i40e_get_outer_vlan(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
+ uint64_t reg_r = 0;
+ uint16_t reg_id;
+ uint16_t tpid;
+
+ if (qinq)
+ reg_id = 2;
+ else
+ reg_id = 3;
+
+ i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ &reg_r, NULL);
+
+ tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
+
+ return tpid;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
+ * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
+ * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
+ * FF:FF:FF:FF:FF:FF
+ * 5. Ether_type mask should be 0xFFFF.
+ */
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ enum rte_flow_item_type item_type;
+ uint16_t outer_tpid;
+
+ outer_tpid = i40e_get_outer_vlan(dev);
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+ /* Get the MAC info. */
+ if (!eth_spec || !eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL ETH spec/mask");
+ return -rte_errno;
+ }
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6 ||
+ filter->ether_type == ETHER_TYPE_LLDP ||
+ filter->ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type in"
+ " control packet filter.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* Ethertype action only supports QUEUE or DROP. */
+static int
+i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for"
+ " ethertype_filter.");
+ return -rte_errno;
+ }
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_ethertype_filter *ethertype_filter =
+ &filter->ethertype_filter;
+ int ret;
+
+ ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_ethertype_action(dev, actions, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
+ return ret;
+}
+
+static int
+i40e_flow_check_raw_item(const struct rte_flow_item *item,
+ const struct rte_flow_item_raw *raw_spec,
+ struct rte_flow_error *error)
+{
+ if (!raw_spec->relative) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Relative should be 1.");
+ return -rte_errno;
+ }
+
+ if (raw_spec->offset % sizeof(uint16_t)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Offset should be even.");
+ return -rte_errno;
+ }
+
+ if (raw_spec->search || raw_spec->limit) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "search or limit is not supported.");
+ return -rte_errno;
+ }
+
+ if (raw_spec->offset < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Offset should be non-negative.");
+ return -rte_errno;
+ }
+ return 0;
+}
+
+static int
+i40e_flow_store_flex_pit(struct i40e_pf *pf,
+ struct i40e_fdir_flex_pit *flex_pit,
+ enum i40e_flxpld_layer_idx layer_idx,
+ uint8_t raw_id)
+{
+ uint8_t field_idx;
+
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
+ /* Check if the configuration is conflicted */
+ if (pf->fdir.flex_pit_flag[layer_idx] &&
+ (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
+ pf->fdir.flex_set[field_idx].size != flex_pit->size ||
+ pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
+ return -1;
+
+ /* Check if the configuration exists. */
+ if (pf->fdir.flex_pit_flag[layer_idx] &&
+ (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
+ pf->fdir.flex_set[field_idx].size == flex_pit->size &&
+ pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
+ return 1;
+
+ pf->fdir.flex_set[field_idx].src_offset =
+ flex_pit->src_offset;
+ pf->fdir.flex_set[field_idx].size =
+ flex_pit->size;
+ pf->fdir.flex_set[field_idx].dst_offset =
+ flex_pit->dst_offset;
+
+ return 0;
+}
+
+static int
+i40e_flow_store_flex_mask(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ uint8_t *mask)
+{
+ struct i40e_fdir_flex_mask flex_mask;
+ uint16_t mask_tmp;
+ uint8_t i, nb_bitmask = 0;
+
+ memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
+ for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
+ mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
+ if (mask_tmp) {
+ flex_mask.word_mask |=
+ I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
+ if (mask_tmp != UINT16_MAX) {
+ flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
+ flex_mask.bitmask[nb_bitmask].offset =
+ i / sizeof(uint16_t);
+ nb_bitmask++;
+ if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
+ return -1;
+ }
+ }
+ }
+ flex_mask.nb_bitmask = nb_bitmask;
+
+ if (pf->fdir.flex_mask_flag[pctype] &&
+ (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
+ sizeof(struct i40e_fdir_flex_mask))))
+ return -2;
+ else if (pf->fdir.flex_mask_flag[pctype] &&
+ !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
+ sizeof(struct i40e_fdir_flex_mask))))
+ return 1;
+
+ memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
+ sizeof(struct i40e_fdir_flex_mask));
+ return 0;
+}
+
+static void
+i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
+ enum i40e_flxpld_layer_idx layer_idx,
+ uint8_t raw_id)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t flx_pit, flx_ort;
+ uint8_t field_idx;
+ uint16_t min_next_off = 0; /* in words */
+ uint8_t i;
+
+ if (raw_id) {
+ flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
+ (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
+ (layer_idx * I40E_MAX_FLXPLD_FIED);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
+ }
+
+ /* Set flex pit */
+ for (i = 0; i < raw_id; i++) {
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+ flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
+ pf->fdir.flex_set[field_idx].size,
+ pf->fdir.flex_set[field_idx].dst_offset);
+
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+ min_next_off = pf->fdir.flex_set[field_idx].src_offset +
+ pf->fdir.flex_set[field_idx].size;
+ }
+
+ for (; i < I40E_MAX_FLXPLD_FIED; i++) {
+ /* set the non-used register obeying register's constrain */
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+ flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
+ NONUSE_FLX_PIT_DEST_OFF);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+ min_next_off++;
+ }
+
+ pf->fdir.flex_pit_flag[layer_idx] = 1;
+}
+
+static void
+i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_fdir_flex_mask *flex_mask;
+ uint32_t flxinset, fd_mask;
+ uint8_t i;
+
+ /* Set flex mask */
+ flex_mask = &pf->fdir.flex_mask[pctype];
+ flxinset = (flex_mask->word_mask <<
+ I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
+ I40E_PRTQF_FD_FLXINSET_INSET_MASK;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
+
+ for (i = 0; i < flex_mask->nb_bitmask; i++) {
+ fd_mask = (flex_mask->bitmask[i].mask <<
+ I40E_PRTQF_FD_MSK_MASK_SHIFT) &
+ I40E_PRTQF_FD_MSK_MASK_MASK;
+ fd_mask |= ((flex_mask->bitmask[i].offset +
+ I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
+ I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
+ I40E_PRTQF_FD_MSK_OFFSET_MASK;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
+ }
+
+ pf->fdir.flex_mask_flag[pctype] = 1;
+}
+
+static int
+i40e_flow_set_fdir_inset(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ uint64_t input_set)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint64_t inset_reg = 0;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
+ int i, num;
+
+ /* Check if the input set is valid */
+ if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
+ input_set) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid input set");
+ return -EINVAL;
+ }
+
+ /* Check if the configuration is conflicted */
+ if (pf->fdir.inset_flag[pctype] &&
+ memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
+ return -1;
+
+ if (pf->fdir.inset_flag[pctype] &&
+ !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
+ return 0;
+
+ num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ I40E_INSET_MASK_NUM_REG);
+ if (num < 0)
+ return -EINVAL;
+
+ inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
+
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ for (i = 0; i < num; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
+ I40E_WRITE_FLUSH(hw);
+
+ pf->fdir.input_set[pctype] = input_set;
+ pf->fdir.inset_flag[pctype] = 1;
+ return 0;
+}
+
+static uint8_t
+i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
+ enum rte_flow_item_type item_type,
+ struct i40e_fdir_filter_conf *filter)
+{
+ struct i40e_customized_pctype *cus_pctype = NULL;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_GTPC:
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPC);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ if (!filter->input.flow_ext.inner_ip)
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU);
+ else if (filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4)
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU_IPV4);
+ else if (filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV6)
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU_IPV6);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported item type");
+ break;
+ }
+
+ if (cus_pctype && cus_pctype->valid)
+ return cus_pctype->pctype;
+
+ return I40E_FILTER_PCTYPE_INVALID;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported patterns: refer to array i40e_supported_patterns.
+ * 3. Default supported flow type and input set: refer to array
+ * valid_fdir_inset_table in i40e_ethdev.c.
+ * 4. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 5. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ * 6. GTP profile supports GTPv1 only.
+ * 7. GTP-C response message ('source_port' = 2123) is not supported.
+ */
+static int
+i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_fdir_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
+ const struct rte_flow_item_raw *raw_spec, *raw_mask;
+ const struct rte_flow_item_vf *vf_spec;
+
+ uint8_t pctype = 0;
+ uint64_t input_set = I40E_INSET_NONE;
+ uint16_t frag_off;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
+ uint32_t i, j;
+ uint8_t ipv6_addr_mask[16] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
+ uint8_t raw_id = 0;
+ int32_t off_arr[I40E_MAX_FLXPLD_FIED];
+ uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
+ struct i40e_fdir_flex_pit flex_pit;
+ uint8_t next_dst_off = 0;
+ uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
+ uint16_t flex_size;
+ bool cfg_flex_pit = true;
+ bool cfg_flex_msk = true;
+ uint16_t outer_tpid;
+ uint16_t ether_type;
+ uint32_t vtc_flow_cpu;
+ bool outer_ip = true;
+ int ret;
+
+ memset(off_arr, 0, sizeof(off_arr));
+ memset(len_arr, 0, sizeof(len_arr));
+ memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
+ outer_tpid = i40e_get_outer_vlan(dev);
+ filter->input.flow_ext.customized_pctype = false;
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ if (eth_spec && eth_mask) {
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ !is_zero_ether_addr(&eth_mask->dst)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask.");
+ return -rte_errno;
+ }
+ }
+ if (eth_spec && eth_mask && eth_mask->type) {
+ enum rte_flow_item_type next = (item + 1)->type;
+
+ if (eth_mask->type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid type mask.");
+ return -rte_errno;
+ }
+
+ ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ if (next == RTE_FLOW_ITEM_TYPE_VLAN ||
+ ether_type == ETHER_TYPE_IPv4 ||
+ ether_type == ETHER_TYPE_IPv6 ||
+ ether_type == ETHER_TYPE_ARP ||
+ ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type.");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ eth_spec->type;
+ }
+
+ pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
+ layer_idx = I40E_FLXPLD_L2_IDX;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+
+ RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) {
+ input_set |= I40E_INSET_VLAN_INNER;
+ filter->input.flow_ext.vlan_tci =
+ vlan_spec->tci;
+ }
+ }
+ if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
+ if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid inner_type"
+ " mask.");
+ return -rte_errno;
+ }
+
+ ether_type =
+ rte_be_to_cpu_16(vlan_spec->inner_type);
+
+ if (ether_type == ETHER_TYPE_IPv4 ||
+ ether_type == ETHER_TYPE_IPv6 ||
+ ether_type == ETHER_TYPE_ARP ||
+ ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported inner_type.");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ vlan_spec->inner_type;
+ }
+
+ pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
+ layer_idx = I40E_FLXPLD_L2_IDX;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+ pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ layer_idx = I40E_FLXPLD_L3_IDX;
+
+ if (ipv4_spec && ipv4_mask && outer_ip) {
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Check if it is fragment. */
+ frag_off = ipv4_spec->hdr.fragment_offset;
+ frag_off = rte_be_to_cpu_16(frag_off);
+ if (frag_off & IPV4_HDR_OFFSET_MASK ||
+ frag_off & IPV4_HDR_MF_FLAG)
+ pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+ } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
+ filter->input.flow_ext.inner_ip = true;
+ filter->input.flow_ext.iip_type =
+ I40E_FDIR_IPTYPE_IPV4;
+ } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid inner IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (outer_ip)
+ outer_ip = false;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+ pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ layer_idx = I40E_FLXPLD_L3_IDX;
+
+ if (ipv6_spec && ipv6_mask && outer_ip) {
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ if (!memcmp(ipv6_mask->hdr.src_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.src_addr)))
+ input_set |= I40E_INSET_IPV6_SRC;
+ if (!memcmp(ipv6_mask->hdr.dst_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ vtc_flow_cpu =
+ rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(vtc_flow_cpu >>
+ I40E_FDIR_IPv6_TC_OFFSET);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto ==
+ I40E_IPV6_FRAG_HEADER)
+ pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
+ } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
+ filter->input.flow_ext.inner_ip = true;
+ filter->input.flow_ext.iip_type =
+ I40E_FDIR_IPTYPE_IPV6;
+ } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid inner IPv6 mask");
+ return -rte_errno;
+ }
+
+ if (outer_ip)
+ outer_ip = false;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ if (tcp_spec && tcp_mask) {
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (tcp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ }
+
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+
+ if (udp_spec && udp_mask) {
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (udp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ }
+
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTPC:
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ if (!pf->gtp_support) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported protocol");
+ return -rte_errno;
+ }
+
+ gtp_spec = item->spec;
+ gtp_mask = item->mask;
+
+ if (gtp_spec && gtp_mask) {
+ if (gtp_mask->v_pt_rsv_flags ||
+ gtp_mask->msg_type ||
+ gtp_mask->msg_len ||
+ gtp_mask->teid != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GTP mask");
+ return -rte_errno;
+ }
+
+ filter->input.flow.gtp_flow.teid =
+ gtp_spec->teid;
+ filter->input.flow_ext.customized_pctype = true;
+ cus_proto = item_type;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+
+ if (sctp_spec && sctp_mask) {
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (sctp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+ if (sctp_mask->hdr.tag == UINT32_MAX)
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag
+ = sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag
+ = sctp_spec->hdr.tag;
+ }
+ }
+
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_RAW:
+ raw_spec = item->spec;
+ raw_mask = item->mask;
+
+ if (!raw_spec || !raw_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL RAW spec/mask");
+ return -rte_errno;
+ }
+
+ if (pf->support_multi_driver) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported flexible payload.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_check_raw_item(item, raw_spec, error);
+ if (ret < 0)
+ return ret;
+
+ off_arr[raw_id] = raw_spec->offset;
+ len_arr[raw_id] = raw_spec->length;
+
+ flex_size = 0;
+ memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
+ flex_pit.size =
+ raw_spec->length / sizeof(uint16_t);
+ flex_pit.dst_offset =
+ next_dst_off / sizeof(uint16_t);
+
+ for (i = 0; i <= raw_id; i++) {
+ if (i == raw_id)
+ flex_pit.src_offset +=
+ raw_spec->offset /
+ sizeof(uint16_t);
+ else
+ flex_pit.src_offset +=
+ (off_arr[i] + len_arr[i]) /
+ sizeof(uint16_t);
+ flex_size += len_arr[i];
+ }
+ if (((flex_pit.src_offset + flex_pit.size) >=
+ I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
+ flex_size > I40E_FDIR_MAX_FLEXLEN) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Exceeds maxmial payload limit.");
+ return -rte_errno;
+ }
+
+ /* Store flex pit to SW */
+ ret = i40e_flow_store_flex_pit(pf, &flex_pit,
+ layer_idx, raw_id);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Conflict with the first flexible rule.");
+ return -rte_errno;
+ } else if (ret > 0)
+ cfg_flex_pit = false;
+
+ for (i = 0; i < raw_spec->length; i++) {
+ j = i + next_dst_off;
+ filter->input.flow_ext.flexbytes[j] =
+ raw_spec->pattern[i];
+ flex_mask[j] = raw_mask->pattern[i];
+ }
+
+ next_dst_off += raw_spec->length;
+ raw_id++;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = item->spec;
+ if (!attr->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Matching VF traffic"
+ " without affecting it"
+ " (transfer attribute)"
+ " is unsupported");
+ return -rte_errno;
+ }
+ filter->input.flow_ext.is_vf = 1;
+ filter->input.flow_ext.dst_id = vf_spec->id;
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VF ID for FDIR.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Get customized pctype value */
+ if (filter->input.flow_ext.customized_pctype) {
+ pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
+ if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported pctype");
+ return -rte_errno;
+ }
+ }
+
+ /* If customized pctype is not used, set fdir configuration.*/
+ if (!filter->input.flow_ext.customized_pctype) {
+ ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Conflict with the first rule's input set.");
+ return -rte_errno;
+ } else if (ret == -EINVAL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid pattern mask.");
+ return -rte_errno;
+ }
+
+ /* Store flex mask to SW */
+ ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Exceed maximal number of bitmasks");
+ return -rte_errno;
+ } else if (ret == -2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Conflict with the first flexible rule");
+ return -rte_errno;
+ } else if (ret > 0)
+ cfg_flex_msk = false;
+
+ if (cfg_flex_pit)
+ i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+
+ if (cfg_flex_msk)
+ i40e_flow_set_fdir_flex_msk(pf, pctype);
+ }
+
+ filter->input.pctype = pctype;
+
+ return 0;
+}
+
+/* Parse to get the action info of a FDIR filter.
+ * FDIR action supports QUEUE or (QUEUE + MARK).
+ */
+static int
+i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct i40e_fdir_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ act_q = act->conf;
+ filter->action.rx_queue = act_q->index;
+ if ((!filter->input.flow_ext.is_vf &&
+ filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
+ (filter->input.flow_ext.is_vf &&
+ filter->action.rx_queue >= pf->vf_nb_qps)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
+ filter->action.behavior = I40E_FDIR_ACCEPT;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ filter->action.behavior = I40E_FDIR_REJECT;
+ break;
+ case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+ filter->action.behavior = I40E_FDIR_PASSTHRU;
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid action.");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is MARK or FLAG or END. */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ mark_spec = act->conf;
+ filter->action.report_status = I40E_FDIR_REPORT_ID;
+ filter->soft_id = mark_spec->id;
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
+ break;
+ case RTE_FLOW_ACTION_TYPE_END:
+ return 0;
+ default:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_fdir_filter_conf *fdir_filter =
+ &filter->fdir_filter;
+ int ret;
+
+ ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
+ fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
+ if (dev->data->dev_conf.fdir_conf.mode !=
+ RTE_FDIR_MODE_PERFECT) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Check the mode in fdir_conf.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* Parse to get the action info of a tunnel filter
+ * Tunnel action only supports PF, VF and QUEUE.
+ */
+static int
+i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_vf *act_vf;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is PF or VF. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
+ act->type != RTE_FLOW_ACTION_TYPE_VF) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
+ act_vf = act->conf;
+ filter->vf_id = act_vf->id;
+ filter->is_to_vf = 1;
+ if (filter->vf_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid VF ID for tunnel filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Check if the next non-void item is QUEUE */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = act->conf;
+ filter->queue_id = act_q->index;
+ if ((!filter->is_to_vf) &&
+ (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ } else if (filter->is_to_vf &&
+ (filter->queue_id >= pf->vf_nb_qps)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static uint16_t i40e_supported_tunnel_filter_types[] = {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
+ ETH_TUNNEL_FILTER_IVLAN,
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
+ ETH_TUNNEL_FILTER_IMAC,
+ ETH_TUNNEL_FILTER_IMAC,
+};
+
+static int
+i40e_check_tunnel_filter_type(uint8_t filter_type)
+{
+ uint8_t i;
+
+ for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
+ if (filter_type == i40e_supported_tunnel_filter_types[i])
+ return 0;
+ }
+
+ return -1;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ uint8_t filter_type = 0;
+ bool is_vni_masked = 0;
+ uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
+ enum rte_flow_item_type item_type;
+ bool vxlan_flag = 0;
+ uint32_t tenant_id_be = 0;
+ int ret;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ /* Check if ETH item is used for place holder.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(&eth_mask->dst) ||
+ !is_zero_ether_addr(&eth_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!vxlan_flag) {
+ rte_memcpy(&filter->outer_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ } else {
+ rte_memcpy(&filter->inner_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK))
+ filter->inner_vlan =
+ rte_be_to_cpu_16(vlan_spec->tci) &
+ I40E_TCI_MASK;
+ filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* UDP is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_spec && vxlan_mask) {
+ is_vni_masked =
+ !!memcmp(vxlan_mask->vni, vni_mask,
+ RTE_DIM(vni_mask));
+ if (is_vni_masked) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter_type |= ETH_TUNNEL_FILTER_TENID;
+ }
+
+ vxlan_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ ret = i40e_check_tunnel_filter_type(filter_type);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ filter->filter_type = filter_type;
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ enum rte_flow_item_type item_type;
+ uint8_t filter_type = 0;
+ bool is_tni_masked = 0;
+ uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
+ bool nvgre_flag = 0;
+ uint32_t tenant_id_be = 0;
+ int ret;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ /* Check if ETH item is used for place holder.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(&eth_mask->dst) ||
+ !is_zero_ether_addr(&eth_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!nvgre_flag) {
+ rte_memcpy(&filter->outer_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ } else {
+ rte_memcpy(&filter->inner_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ }
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK))
+ filter->inner_vlan =
+ rte_be_to_cpu_16(vlan_spec->tci) &
+ I40E_TCI_MASK;
+ filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre_spec = item->spec;
+ nvgre_mask = item->mask;
+ /* Check if NVGRE item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!nvgre_spec && nvgre_mask) ||
+ (nvgre_spec && !nvgre_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec && nvgre_mask) {
+ is_tni_masked =
+ !!memcmp(nvgre_mask->tni, tni_mask,
+ RTE_DIM(tni_mask));
+ if (is_tni_masked) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TNI mask");
+ return -rte_errno;
+ }
+ if (nvgre_mask->protocol &&
+ nvgre_mask->protocol != 0xFFFF) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+ if (nvgre_mask->c_k_s_rsvd0_ver &&
+ nvgre_mask->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0xFFFF)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+ if (nvgre_spec->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0x2000) &&
+ nvgre_mask->c_k_s_rsvd0_ver) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+ if (nvgre_mask->protocol &&
+ nvgre_spec->protocol !=
+ rte_cpu_to_be_16(0x6558)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ nvgre_spec->tni, 3);
+ filter->tenant_id =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter_type |= ETH_TUNNEL_FILTER_TENID;
+ }
+
+ nvgre_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ ret = i40e_check_tunnel_filter_type(filter_type);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ filter->filter_type = filter_type;
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: MPLS label.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_mpls *mpls_spec;
+ const struct rte_flow_item_mpls *mpls_mask;
+ enum rte_flow_item_type item_type;
+ bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
+ const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
+ uint32_t label_be = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* UDP is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP item");
+ return -rte_errno;
+ }
+ is_mplsoudp = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ /* GRE is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GRE item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ mpls_spec = item->spec;
+ mpls_mask = item->mask;
+
+ if (!mpls_spec || !mpls_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MPLS item");
+ return -rte_errno;
+ }
+
+ if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MPLS label mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&label_be + 1),
+ mpls_spec->label_tc_s, 3);
+ filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (is_mplsoudp)
+ filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
+ else
+ filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_mpls_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: GTP TEID.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ * 5. GTP profile supports GTPv1 only.
+ * 6. GTP-C response message ('source_port' = 2123) is not supported.
+ */
+static int
+i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_gtp *gtp_spec;
+ const struct rte_flow_item_gtp *gtp_mask;
+ enum rte_flow_item_type item_type;
+
+ if (!pf->gtp_support) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "GTP is not supported by default.");
+ return -rte_errno;
+ }
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTPC:
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ gtp_spec = item->spec;
+ gtp_mask = item->mask;
+
+ if (!gtp_spec || !gtp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GTP item");
+ return -rte_errno;
+ }
+
+ if (gtp_mask->v_pt_rsv_flags ||
+ gtp_mask->msg_type ||
+ gtp_mask->msg_len ||
+ gtp_mask->teid != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GTP mask");
+ return -rte_errno;
+ }
+
+ if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
+ filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
+ else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
+ filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
+
+ filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_gtp_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: QINQ.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ const struct rte_flow_item_vlan *i_vlan_spec = NULL;
+ const struct rte_flow_item_vlan *i_vlan_mask = NULL;
+ const struct rte_flow_item_vlan *o_vlan_spec = NULL;
+ const struct rte_flow_item_vlan *o_vlan_mask = NULL;
+
+ enum rte_flow_item_type item_type;
+ bool vlan_flag = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+
+ if (!vlan_flag) {
+ o_vlan_spec = vlan_spec;
+ o_vlan_mask = vlan_mask;
+ vlan_flag = 1;
+ } else {
+ i_vlan_spec = vlan_spec;
+ i_vlan_mask = vlan_mask;
+ vlan_flag = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* Get filter specification */
+ if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) &&
+ (i_vlan_mask != NULL) &&
+ (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
+ & I40E_TCI_MASK;
+ filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
+ & I40E_TCI_MASK;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
+ return 0;
+}
+
+static int
+i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_qinq_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/**
+ * This function is used to do configuration i40e existing RSS with rte_flow.
+ * It also enable queue region configuration using flow API for i40e.
+ * pattern can be used indicate what parameters will be include in flow,
+ * like user_priority or flowtype for queue region or HASH function for RSS.
+ * Action is used to transmit parameter like queue index and HASH
+ * function for RSS, or flowtype for queue region configuration.
+ * For example:
+ * pattern:
+ * Case 1: only ETH, indicate flowtype for queue region will be parsed.
+ * Case 2: only VLAN, indicate user_priority for queue region will be parsed.
+ * Case 3: none, indicate RSS related will be parsed in action.
+ * Any pattern other the ETH or VLAN will be treated as invalid except END.
+ * So, pattern choice is depened on the purpose of configuration of
+ * that flow.
+ * action:
+ * action RSS will be uaed to transmit valid parameter with
+ * struct rte_flow_action_rss for all the 3 case.
+ */
+static int
+i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ uint8_t *action_flag,
+ struct i40e_queue_regions *info)
+{
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ return 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ *action_flag = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) {
+ info->region[0].user_priority[0] =
+ (rte_be_to_cpu_16(
+ vlan_spec->tci) >> 13) & 0x7;
+ info->region[0].user_priority_num = 1;
+ info->queue_region_number = 1;
+ *action_flag = 0;
+ }
+ }
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * This function is used to parse rss queue index, total queue number and
+ * hash functions, If the purpose of this configuration is for queue region
+ * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
+ * In queue region configuration, it also need to parse hardware flowtype
+ * and user_priority from configuration, it will also cheeck the validity
+ * of these parameters. For example, The queue region sizes should
+ * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
+ * hw_flowtype or PCTYPE max index should be 63, the user priority
+ * max index should be 7, and so on. And also, queue index should be
+ * continuous sequence and queue region index should be part of rss
+ * queue index for this port.
+ */
+static int
+i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ uint8_t action_flag,
+ struct i40e_queue_regions *conf_info,
+ union i40e_filter_t *filter)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_queue_regions *info = &pf->queue_region;
+ struct i40e_rte_flow_rss_conf *rss_config =
+ &filter->rss_conf;
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ uint16_t i, j, n, tmp;
+ uint32_t index = 0;
+ uint64_t hf_bit = 1;
+
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ rss = act->conf;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (action_flag) {
+ for (n = 0; n < 64; n++) {
+ if (rss->types & (hf_bit << n)) {
+ conf_info->region[0].hw_flowtype[0] = n;
+ conf_info->region[0].flowtype_num = 1;
+ conf_info->queue_region_number = 1;
+ break;
+ }
+ }
+ }
+
+ /**
+ * Do some queue region related parameters check
+ * in order to keep queue index for queue region to be
+ * continuous sequence and also to be part of RSS
+ * queue index for this port.
+ */
+ if (conf_info->queue_region_number) {
+ for (i = 0; i < rss->queue_num; i++) {
+ for (j = 0; j < rss_info->conf.queue_num; j++) {
+ if (rss->queue[i] == rss_info->conf.queue[j])
+ break;
+ }
+ if (j == rss_info->conf.queue_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+ }
+
+ for (i = 0; i < rss->queue_num - 1; i++) {
+ if (rss->queue[i + 1] != rss->queue[i] + 1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+ }
+ }
+
+ /* Parse queue region related parameters from configuration */
+ for (n = 0; n < conf_info->queue_region_number; n++) {
+ if (conf_info->region[n].user_priority_num ||
+ conf_info->region[n].flowtype_num) {
+ if (!((rte_is_power_of_2(rss->queue_num)) &&
+ rss->queue_num <= 64)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].user_priority[n] >=
+ I40E_MAX_USER_PRIORITY) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "the user priority max index is 7");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].hw_flowtype[n] >=
+ I40E_FILTER_PCTYPE_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "the hw_flowtype or PCTYPE max index is 63");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ if (info->region[i].queue_num ==
+ rss->queue_num &&
+ info->region[i].queue_start_index ==
+ rss->queue[0])
+ break;
+ }
+
+ if (i == info->queue_region_number) {
+ if (i > I40E_REGION_MAX_INDEX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "the queue region max index is 7");
+ return -rte_errno;
+ }
+
+ info->region[i].queue_num =
+ rss->queue_num;
+ info->region[i].queue_start_index =
+ rss->queue[0];
+ info->region[i].region_id =
+ info->queue_region_number;
+
+ j = info->region[i].user_priority_num;
+ tmp = conf_info->region[n].user_priority[0];
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] = tmp;
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ tmp = conf_info->region[n].hw_flowtype[0];
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] = tmp;
+ info->region[i].flowtype_num++;
+ }
+ info->queue_region_number++;
+ } else {
+ j = info->region[i].user_priority_num;
+ tmp = conf_info->region[n].user_priority[0];
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] = tmp;
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ tmp = conf_info->region[n].hw_flowtype[0];
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] = tmp;
+ info->region[i].flowtype_num++;
+ }
+ }
+ }
+
+ rss_config->queue_region_conf = TRUE;
+ }
+
+ /**
+ * Return function if this flow is used for queue region configuration
+ */
+ if (rss_config->queue_region_conf)
+ return 0;
+
+ if (!rss || !rss->queue_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->queue_num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+
+ /* Parse RSS related parameters from configuration */
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key too large");
+ if (rss->queue_num > RTE_DIM(rss_config->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (i40e_rss_conf_init(rss_config, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
+
+ index++;
+
+ /* check if the next not void action is END */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+ rss_config->queue_region_conf = FALSE;
+
+ return 0;
+}
+
+static int
+i40e_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ union i40e_filter_t *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct i40e_queue_regions info;
+ uint8_t action_flag = 0;
+
+ memset(&info, 0, sizeof(struct i40e_queue_regions));
+
+ ret = i40e_flow_parse_rss_pattern(dev, pattern,
+ error, &action_flag, &info);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_rss_action(dev, actions, error,
+ action_flag, &info, filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_HASH;
+
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_set(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ if (conf->queue_region_conf) {
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ conf->queue_region_conf = 0;
+ } else {
+ ret = i40e_config_rss_filter(pf, conf, 1);
+ }
+ return ret;
+}
+
+static int
+i40e_config_rss_filter_del(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ i40e_config_rss_filter(pf, conf, 0);
+ return 0;
+}
+
+static int
+i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items; /* internal pattern w/o VOID items */
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0; /* non-void item number of pattern*/
+ uint32_t i = 0;
+ bool flag = false;
+ int ret = I40E_NOT_SUPPORTED;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ memset(&cons_filter, 0, sizeof(cons_filter));
+
+ /* Get the non-void item of action */
+ while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ i++;
+
+ if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ ret = i40e_parse_rss_filter(dev, attr, pattern,
+ actions, &cons_filter, error);
+ return ret;
+ }
+
+ i = 0;
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("i40e_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return -ENOMEM;
+ }
+
+ i40e_pattern_skip_void_item(items, pattern);
+
+ i = 0;
+ do {
+ parse_filter = i40e_find_parse_filter_func(items, &i);
+ if (!parse_filter && !flag) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ rte_free(items);
+ return -rte_errno;
+ }
+ if (parse_filter)
+ ret = parse_filter(dev, attr, items, actions,
+ error, &cons_filter);
+ flag = true;
+ } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
+
+ rte_free(items);
+
+ return ret;
+}
+
+static struct rte_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_flow *flow;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ret = i40e_flow_validate(dev, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cons_filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_set(pf,
+ &cons_filter.ethertype_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_flow_add_del_fdir_filter(dev,
+ &cons_filter.fdir_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_consistent_tunnel_filter_set(pf,
+ &cons_filter.consistent_tunnel_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_set(dev,
+ &cons_filter.rss_conf);
+ if (ret)
+ goto free_flow;
+ flow->rule = &pf->rss_info;
+ break;
+ default:
+ goto free_flow;
+ }
+
+ flow->filter_type = cons_filter_type;
+ TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+ return flow;
+
+free_flow:
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ return NULL;
+}
+
+static int
+i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum rte_filter_type filter_type = flow->filter_type;
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_flow_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)flow->rule);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_flow_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)flow->rule);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_flow_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
+ break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_del(dev,
+ (struct i40e_rte_flow_rss_conf *)flow->rule);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ } else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_pf_vf *vf;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ bool big_buffer = 0;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.element.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.element.inner_mac);
+ cld_filter.element.inner_vlan = filter->input.inner_vlan;
+ cld_filter.element.flags = filter->input.flags;
+ cld_filter.element.tenant_id = filter->input.tenant_id;
+ cld_filter.element.queue_number = filter->queue;
+ rte_memcpy(cld_filter.general_fields,
+ filter->input.general_fields,
+ sizeof(cld_filter.general_fields));
+
+ if (!filter->is_to_vf)
+ vsi = pf->main_vsi;
+ else {
+ vf = &pf->vfs[filter->vf_id];
+ vsi = vf->vsi;
+ }
+
+ if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
+ ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
+ ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X10))
+ big_buffer = 1;
+
+ if (big_buffer)
+ ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
+ &cld_filter, 1);
+ else
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter.element, 1);
+ if (ret < 0)
+ return -ENOTSUP;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+
+ return ret;
+}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ ret = i40e_flow_flush_fdir_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush FDIR flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_ethertype_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to ethertype flush flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_tunnel_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush tunnel flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_rss_filter(dev);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush rss flows.");
+ return -rte_errno;
+ }
+
+ return ret;
+}
+
+static int
+i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter;
+ enum i40e_filter_pctype pctype;
+ struct rte_flow *flow;
+ void *temp;
+ int ret;
+
+ ret = i40e_fdir_flush(dev);
+ if (!ret) {
+ /* Delete FDIR filters in FDIR list. */
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ ret = i40e_sw_fdir_filter_del(pf,
+ &fdir_filter->fdir.input);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Delete FDIR flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
+ pf->fdir.inset_flag[pctype] = 0;
+ }
+
+ return ret;
+}
+
+/* Flush all ethertype filters */
+static int
+i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(ethertype_list))) {
+ ret = i40e_flow_destroy_ethertype_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete ethertype flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
+
+/* Flush all tunnel filters */
+static int
+i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(tunnel_list))) {
+ ret = i40e_flow_destroy_tunnel_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete tunnel flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
+
+/* remove the rss filter */
+static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int32_t ret = -EINVAL;
+
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ if (rss_info->conf.queue_num)
+ ret = i40e_config_rss_filter(pf, rss_info, FALSE);
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_logs.h b/src/spdk/dpdk/drivers/net/i40e/i40e_logs.h
new file mode 100644
index 00000000..b1049699
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_logs.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef _I40E_LOGS_H_
+#define _I40E_LOGS_H_
+
+extern int i40e_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, i40e_logtype_init, "%s(): " fmt "\n", \
+ __func__, ##args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+extern int i40e_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, i40e_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _I40E_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_pf.c b/src/spdk/dpdk/drivers/net/i40e/i40e_pf.c
new file mode 100644
index 00000000..dd3962d3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_pf.c
@@ -0,0 +1,1450 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_string_fns.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_prototype.h"
+#include "base/i40e_adminq_cmd.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_pf.h"
+#include "rte_pmd_i40e.h"
+
+#define I40E_CFG_CRCSTRIP_DEFAULT 1
+
+static int
+i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
+ struct virtchnl_queue_select *qsel,
+ bool on);
+
+/**
+ * Bind PF queues with VSI and VF.
+ **/
+static int
+i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf)
+{
+ int i;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t vsi_id = vf->vsi->vsi_id;
+ uint16_t vf_id = vf->vf_idx;
+ uint16_t nb_qps = vf->vsi->nb_qps;
+ uint16_t qbase = vf->vsi->base_queue;
+ uint16_t q1, q2;
+ uint32_t val;
+
+ /*
+ * VF should use scatter range queues. So, it needn't
+ * to set QBASE in this register.
+ */
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id),
+ I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+ /* Set to enable VFLAN_QTABLE[] registers valid */
+ I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id),
+ I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
+
+ /* map PF queues to VF */
+ for (i = 0; i < nb_qps; i++) {
+ val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK);
+ I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val);
+ }
+
+ /* map PF queues to VSI */
+ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) {
+ if (2 * i > nb_qps - 1)
+ q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
+ else
+ q1 = qbase + 2 * i;
+
+ if (2 * i + 1 > nb_qps - 1)
+ q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
+ else
+ q2 = qbase + 2 * i + 1;
+
+ val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1;
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val);
+ }
+ I40E_WRITE_FLUSH(hw);
+
+ return I40E_SUCCESS;
+}
+
+
+/**
+ * Proceed VF reset operation.
+ */
+int
+i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
+{
+ uint32_t val, i;
+ struct i40e_hw *hw;
+ struct i40e_pf *pf;
+ uint16_t vf_id, abs_vf_id, vf_msix_num;
+ int ret;
+ struct virtchnl_queue_select qsel;
+
+ if (vf == NULL)
+ return -EINVAL;
+
+ pf = vf->pf;
+ hw = I40E_PF_TO_HW(vf->pf);
+ vf_id = vf->vf_idx;
+ abs_vf_id = vf_id + hw->func_caps.vf_base_id;
+
+ /* Notify VF that we are in VFR progress */
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_INPROGRESS);
+
+ /*
+ * If require a SW VF reset, a VFLR interrupt will be generated,
+ * this function will be called again. To avoid it,
+ * disable interrupt first.
+ */
+ if (do_hw_reset) {
+ vf->state = I40E_VF_INRESET;
+ val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
+ val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
+ I40E_WRITE_FLUSH(hw);
+ }
+
+#define VFRESET_MAX_WAIT_CNT 100
+ /* Wait until VF reset is done */
+ for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
+ rte_delay_us(10);
+ val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
+ if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
+ break;
+ }
+
+ if (i >= VFRESET_MAX_WAIT_CNT) {
+ PMD_DRV_LOG(ERR, "VF reset timeout");
+ return -ETIMEDOUT;
+ }
+ /* This is not first time to do reset, do cleanup job first */
+ if (vf->vsi) {
+ /* Disable queues */
+ memset(&qsel, 0, sizeof(qsel));
+ for (i = 0; i < vf->vsi->nb_qps; i++)
+ qsel.rx_queues |= 1 << i;
+ qsel.tx_queues = qsel.rx_queues;
+ ret = i40e_pf_host_switch_queues(vf, &qsel, false);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Disable VF queues failed");
+ return -EFAULT;
+ }
+
+ /* Disable VF interrupt setting */
+ vf_msix_num = hw->func_caps.num_msix_vectors_vf;
+ for (i = 0; i < vf_msix_num; i++) {
+ if (!i)
+ val = I40E_VFINT_DYN_CTL0(vf_id);
+ else
+ val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
+ (vf_id)) + (i - 1));
+ I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ }
+ I40E_WRITE_FLUSH(hw);
+
+ /* remove VSI */
+ ret = i40e_vsi_release(vf->vsi);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Release VSI failed");
+ return -EFAULT;
+ }
+ }
+
+#define I40E_VF_PCI_ADDR 0xAA
+#define I40E_VF_PEND_MASK 0x20
+ /* Check the pending transactions of this VF */
+ /* Use absolute VF id, refer to datasheet for details */
+ I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
+ (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+ for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
+ rte_delay_us(1);
+ val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
+ if ((val & I40E_VF_PEND_MASK) == 0)
+ break;
+ }
+
+ if (i >= VFRESET_MAX_WAIT_CNT) {
+ PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout");
+ return -ETIMEDOUT;
+ }
+
+ /* Reset done, Set COMPLETE flag and clear reset bit */
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_COMPLETED);
+ val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
+ val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
+ vf->reset_cnt++;
+ I40E_WRITE_FLUSH(hw);
+
+ /* Allocate resource again */
+ if (pf->floating_veb && pf->floating_veb_list[vf_id]) {
+ vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
+ NULL, vf->vf_idx);
+ } else {
+ vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
+ vf->pf->main_vsi, vf->vf_idx);
+ }
+
+ if (vf->vsi == NULL) {
+ PMD_DRV_LOG(ERR, "Add vsi failed");
+ return -EFAULT;
+ }
+
+ ret = i40e_pf_vf_queues_mapping(vf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "queue mapping error");
+ i40e_vsi_release(vf->vsi);
+ return -EFAULT;
+ }
+
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_VFACTIVE);
+
+ return ret;
+}
+
+int
+i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
+ uint32_t opcode,
+ uint32_t retval,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
+ int ret;
+
+ ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
+ msg, msglen, NULL);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
+ hw->aq.asq_last_status);
+ }
+
+ return ret;
+}
+
+static void
+i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, uint8_t *msg,
+ bool b_op)
+{
+ struct virtchnl_version_info info;
+
+ /* VF and PF drivers need to follow the Virtchnl definition, No matter
+ * it's DPDK or other kernel drivers.
+ * The original DPDK host specific feature
+ * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
+ */
+
+ info.major = VIRTCHNL_VERSION_MAJOR;
+ vf->version = *(struct virtchnl_version_info *)msg;
+ if (VF_IS_V10(&vf->version))
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ else
+ info.minor = VIRTCHNL_VERSION_MINOR;
+
+ if (b_op)
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
+ I40E_SUCCESS,
+ (uint8_t *)&info,
+ sizeof(info));
+ else
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
+ I40E_NOT_SUPPORTED,
+ (uint8_t *)&info,
+ sizeof(info));
+}
+
+static int
+i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
+{
+ i40e_pf_host_vf_reset(vf, 1);
+
+ /* No feedback will be sent to VF for VFLR */
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, uint8_t *msg,
+ bool b_op)
+{
+ struct virtchnl_vf_resource *vf_res = NULL;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint32_t len = 0;
+ uint64_t default_hena = I40E_RSS_HENA_ALL;
+ int ret = I40E_SUCCESS;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(vf,
+ VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ /* only have 1 VSI by default */
+ len = sizeof(struct virtchnl_vf_resource) +
+ I40E_DEFAULT_VF_VSI_NUM *
+ sizeof(struct virtchnl_vsi_resource);
+
+ vf_res = rte_zmalloc("i40e_vf_res", len, 0);
+ if (vf_res == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate mem");
+ ret = I40E_ERR_NO_MEMORY;
+ vf_res = NULL;
+ len = 0;
+ goto send_msg;
+ }
+
+ if (VF_IS_V10(&vf->version)) /* doesn't support offload negotiate */
+ vf->request_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
+ else
+ vf->request_caps = *(uint32_t *)msg;
+
+ /* enable all RSS by default,
+ * doesn't support hena setting by virtchnnl yet.
+ */
+ if (vf->request_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ I40E_WRITE_REG(hw, I40E_VFQF_HENA1(0, vf->vf_idx),
+ (uint32_t)default_hena);
+ I40E_WRITE_REG(hw, I40E_VFQF_HENA1(1, vf->vf_idx),
+ (uint32_t)(default_hena >> 32));
+ I40E_WRITE_FLUSH(hw);
+ }
+
+ vf_res->vf_cap_flags = vf->request_caps &
+ I40E_VIRTCHNL_OFFLOAD_CAPS;
+ /* For X722, it supports write back on ITR
+ * without binding queue to interrupt vector.
+ */
+ if (hw->mac.type == I40E_MAC_X722)
+ vf_res->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+ vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
+ vf_res->num_queue_pairs = vf->vsi->nb_qps;
+ vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
+ vf_res->rss_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * 4;
+ vf_res->rss_lut_size = (I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4;
+
+ /* Change below setting if PF host can support more VSIs for VF */
+ vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+ vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
+ vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
+ ether_addr_copy(&vf->mac_addr,
+ (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
+ ret, (uint8_t *)vf_res, len);
+ rte_free(vf_res);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
+ struct i40e_pf_vf *vf,
+ struct virtchnl_rxq_info *rxq,
+ uint8_t crcstrip)
+{
+ int err = I40E_SUCCESS;
+ struct i40e_hmc_obj_rxq rx_ctx;
+ uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id;
+
+ /* Clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+ rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ rx_ctx.qlen = rxq->ring_len;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rx_ctx.dsize = 1;
+#endif
+
+ if (rxq->splithdr_enabled) {
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
+ rx_ctx.dtype = i40e_header_split_enabled;
+ } else {
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
+ rx_ctx.dtype = i40e_header_split_none;
+ }
+ rx_ctx.rxmax = rxq->max_pkt_size;
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = crcstrip;
+ rx_ctx.l2tsel = 1;
+ rx_ctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id);
+ if (err != I40E_SUCCESS)
+ return err;
+ err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx);
+
+ return err;
+}
+
+static inline uint8_t
+i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
+ uint16_t queue_id)
+{
+ struct i40e_aqc_vsi_properties_data *info = &vsi->info;
+ uint16_t bsf, qp_idx;
+ uint8_t i;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & (1 << i)) {
+ qp_idx = rte_le_to_cpu_16((info->tc_mapping[i] &
+ I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT);
+ bsf = rte_le_to_cpu_16((info->tc_mapping[i] &
+ I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+ if (queue_id >= qp_idx && queue_id < qp_idx + (1 << bsf))
+ return i;
+ }
+ }
+ return 0;
+}
+
+static int
+i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
+ struct i40e_pf_vf *vf,
+ struct virtchnl_txq_info *txq)
+{
+ int err = I40E_SUCCESS;
+ struct i40e_hmc_obj_txq tx_ctx;
+ struct i40e_vsi *vsi = vf->vsi;
+ uint32_t qtx_ctl;
+ uint16_t abs_queue_id = vsi->base_queue + txq->queue_id;
+ uint8_t dcb_tc;
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(tx_ctx));
+ tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ tx_ctx.qlen = txq->ring_len;
+ dcb_tc = i40e_vsi_get_tc_of_queue(vsi, txq->queue_id);
+ tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[dcb_tc]);
+ tx_ctx.head_wb_ena = txq->headwb_enabled;
+ tx_ctx.head_wb_addr = txq->dma_headwb_addr;
+
+ err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
+ if (err != I40E_SUCCESS)
+ return err;
+
+ err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
+ if (err != I40E_SUCCESS)
+ return err;
+
+ /* bind queue with VF function, since TX/QX will appear in pair,
+ * so only has QTX_CTL to set.
+ */
+ qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
+ ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK) |
+ (((vf->vf_idx + hw->func_caps.vf_base_id) <<
+ I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+ I40E_QTX_CTL_VFVM_INDX_MASK);
+ I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
+ I40E_WRITE_FLUSH(hw);
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen,
+ bool b_op)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ struct i40e_vsi *vsi = vf->vsi;
+ struct virtchnl_vsi_queue_config_info *vc_vqci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *vc_qpi;
+ int i, ret = I40E_SUCCESS;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(vf,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
+ vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
+ msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
+ vc_vqci->num_queue_pairs)) {
+ PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ vc_qpi = vc_vqci->qpair;
+ for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
+ if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
+ vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ /*
+ * Apply VF RX queue setting to HMC.
+ * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ * then the extra information of
+ * 'struct virtchnl_queue_pair_extra_info' is needed,
+ * otherwise set the last parameter to NULL.
+ */
+ if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
+ I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ /* Apply VF TX queue setting to HMC */
+ if (i40e_pf_host_hmc_config_txq(hw, vf,
+ &vc_qpi[i].txq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static void
+i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
+ struct virtchnl_vector_map *vvm)
+{
+#define BITS_PER_CHAR 8
+ uint64_t linklistmap = 0, tempmap;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t qid;
+ bool b_first_q = true;
+ enum i40e_queue_type qtype;
+ uint16_t vector_id;
+ uint32_t reg, reg_idx;
+ uint16_t itr_idx = 0, i;
+
+ vector_id = vvm->vector_id;
+ /* setup the head */
+ if (!vector_id)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_idx);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(
+ ((hw->func_caps.num_msix_vectors_vf - 1) * vf->vf_idx)
+ + (vector_id - 1));
+
+ if (vvm->rxq_map == 0 && vvm->txq_map == 0) {
+ I40E_WRITE_REG(hw, reg_idx,
+ I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
+ goto cfg_irq_done;
+ }
+
+ /* sort all rx and tx queues */
+ tempmap = vvm->rxq_map;
+ for (i = 0; i < sizeof(vvm->rxq_map) * BITS_PER_CHAR; i++) {
+ if (tempmap & 0x1)
+ linklistmap |= (1 << (2 * i));
+ tempmap >>= 1;
+ }
+
+ tempmap = vvm->txq_map;
+ for (i = 0; i < sizeof(vvm->txq_map) * BITS_PER_CHAR; i++) {
+ if (tempmap & 0x1)
+ linklistmap |= (1 << (2 * i + 1));
+ tempmap >>= 1;
+ }
+
+ /* Link all rx and tx queues into a chained list */
+ tempmap = linklistmap;
+ i = 0;
+ b_first_q = true;
+ do {
+ if (tempmap & 0x1) {
+ qtype = (enum i40e_queue_type)(i % 2);
+ qid = vf->vsi->base_queue + i / 2;
+ if (b_first_q) {
+ /* This is header */
+ b_first_q = false;
+ reg = ((qtype <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+ | qid);
+ } else {
+ /* element in the link list */
+ reg = (vector_id) |
+ (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (qid << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ }
+ I40E_WRITE_REG(hw, reg_idx, reg);
+ /* find next register to program */
+ switch (qtype) {
+ case I40E_QUEUE_TYPE_RX:
+ reg_idx = I40E_QINT_RQCTL(qid);
+ itr_idx = vvm->rxitr_idx;
+ break;
+ case I40E_QUEUE_TYPE_TX:
+ reg_idx = I40E_QINT_TQCTL(qid);
+ itr_idx = vvm->txitr_idx;
+ break;
+ default:
+ break;
+ }
+ }
+ i++;
+ tempmap >>= 1;
+ } while (tempmap);
+
+ /* Terminate the link list */
+ reg = (vector_id) |
+ (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ I40E_WRITE_REG(hw, reg_idx, reg);
+
+cfg_irq_done:
+ I40E_WRITE_FLUSH(hw);
+}
+
+static int
+i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
+ uint8_t *msg, uint16_t msglen,
+ bool b_op)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ struct virtchnl_irq_map_info *irqmap =
+ (struct virtchnl_irq_map_info *)msg;
+ struct virtchnl_vector_map *map;
+ int i;
+ uint16_t vector_id, itr_idx;
+ unsigned long qbit_max;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ if (msg == NULL || msglen < sizeof(struct virtchnl_irq_map_info)) {
+ PMD_DRV_LOG(ERR, "buffer too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ /* PF host will support both DPDK VF or Linux VF driver, identify by
+ * number of vectors requested.
+ */
+
+ /* DPDK VF only requires single vector */
+ if (irqmap->num_vectors == 1) {
+ /* This MSIX intr store the intr in VF range */
+ vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
+ vf->vsi->nb_msix = irqmap->num_vectors;
+ vf->vsi->nb_used_qps = vf->vsi->nb_qps;
+ itr_idx = irqmap->vecmap[0].rxitr_idx;
+
+ /* Don't care how the TX/RX queue mapping with this vector.
+ * Link all VF RX queues together. Only did mapping work.
+ * VF can disable/enable the intr by itself.
+ */
+ i40e_vsi_queues_bind_intr(vf->vsi, itr_idx);
+ goto send_msg;
+ }
+
+ /* Then, it's Linux VF driver */
+ qbit_max = 1 << pf->vf_nb_qp_max;
+ for (i = 0; i < irqmap->num_vectors; i++) {
+ map = &irqmap->vecmap[i];
+
+ vector_id = map->vector_id;
+ /* validate msg params */
+ if (vector_id >= hw->func_caps.num_msix_vectors_vf) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
+ i40e_pf_config_irq_link_list(vf, map);
+ } else {
+ /* configured queue size excceed limit */
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
+ struct virtchnl_queue_select *qsel,
+ bool on)
+{
+ int ret = I40E_SUCCESS;
+ int i;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t baseq = vf->vsi->base_queue;
+
+ if (qsel->rx_queues + qsel->tx_queues == 0)
+ return I40E_ERR_PARAM;
+
+ /* always enable RX first and disable last */
+ /* Enable RX if it's enable */
+ if (on) {
+ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
+ if (qsel->rx_queues & (1 << i)) {
+ ret = i40e_switch_rx_queue(hw, baseq + i, on);
+ if (ret != I40E_SUCCESS)
+ return ret;
+ }
+ }
+
+ /* Enable/Disable TX */
+ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
+ if (qsel->tx_queues & (1 << i)) {
+ ret = i40e_switch_tx_queue(hw, baseq + i, on);
+ if (ret != I40E_SUCCESS)
+ return ret;
+ }
+
+ /* disable RX last if it's disable */
+ if (!on) {
+ /* disable RX */
+ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
+ if (qsel->rx_queues & (1 << i)) {
+ ret = i40e_switch_rx_queue(hw, baseq + i, on);
+ if (ret != I40E_SUCCESS)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct virtchnl_queue_select *q_sel =
+ (struct virtchnl_queue_select *)msg;
+
+ if (msg == NULL || msglen != sizeof(*q_sel)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ ret = i40e_pf_host_switch_queues(vf, q_sel, true);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen,
+ bool b_op)
+{
+ int ret = I40E_SUCCESS;
+ struct virtchnl_queue_select *q_sel =
+ (struct virtchnl_queue_select *)msg;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ if (msg == NULL || msglen != sizeof(*q_sel)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ ret = i40e_pf_host_switch_queues(vf, q_sel, false);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+
+static int
+i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen,
+ bool b_op)
+{
+ int ret = I40E_SUCCESS;
+ struct virtchnl_ether_addr_list *addr_list =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct i40e_mac_filter_info filter;
+ int i;
+ struct ether_addr *mac;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_ADD_ETH_ADDR,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
+
+ if (msg == NULL || msglen <= sizeof(*addr_list)) {
+ PMD_DRV_LOG(ERR, "add_ether_address argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ mac = (struct ether_addr *)(addr_list->list[i].addr);
+ rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ if (is_zero_ether_addr(mac) ||
+ i40e_vsi_add_mac(vf->vsi, &filter)) {
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ goto send_msg;
+ }
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen,
+ bool b_op)
+{
+ int ret = I40E_SUCCESS;
+ struct virtchnl_ether_addr_list *addr_list =
+ (struct virtchnl_ether_addr_list *)msg;
+ int i;
+ struct ether_addr *mac;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_DEL_ETH_ADDR,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ if (msg == NULL || msglen <= sizeof(*addr_list)) {
+ PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ mac = (struct ether_addr *)(addr_list->list[i].addr);
+ if(is_zero_ether_addr(mac) ||
+ i40e_vsi_delete_mac(vf->vsi, mac)) {
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ goto send_msg;
+ }
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
+ uint8_t *msg, uint16_t msglen,
+ bool b_op)
+{
+ int ret = I40E_SUCCESS;
+ struct virtchnl_vlan_filter_list *vlan_filter_list =
+ (struct virtchnl_vlan_filter_list *)msg;
+ int i;
+ uint16_t *vid;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_ADD_VLAN,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
+ PMD_DRV_LOG(ERR, "add_vlan argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ vid = vlan_filter_list->vlan_id;
+
+ for (i = 0; i < vlan_filter_list->num_elements; i++) {
+ ret = i40e_vsi_add_vlan(vf->vsi, vid[i]);
+ if(ret != I40E_SUCCESS)
+ goto send_msg;
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen,
+ bool b_op)
+{
+ int ret = I40E_SUCCESS;
+ struct virtchnl_vlan_filter_list *vlan_filter_list =
+ (struct virtchnl_vlan_filter_list *)msg;
+ int i;
+ uint16_t *vid;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_DEL_VLAN,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
+ PMD_DRV_LOG(ERR, "delete_vlan argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ vid = vlan_filter_list->vlan_id;
+ for (i = 0; i < vlan_filter_list->num_elements; i++) {
+ ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]);
+ if(ret != I40E_SUCCESS)
+ goto send_msg;
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_config_promisc_mode(
+ struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen,
+ bool b_op)
+{
+ int ret = I40E_SUCCESS;
+ struct virtchnl_promisc_info *promisc =
+ (struct virtchnl_promisc_info *)msg;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ bool unicast = FALSE, multicast = FALSE;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ if (msg == NULL || msglen != sizeof(*promisc)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ if (promisc->flags & FLAG_VF_UNICAST_PROMISC)
+ unicast = TRUE;
+ ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
+ vf->vsi->seid, unicast, NULL, true);
+ if (ret != I40E_SUCCESS)
+ goto send_msg;
+
+ if (promisc->flags & FLAG_VF_MULTICAST_PROMISC)
+ multicast = TRUE;
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
+ multicast, NULL);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
+{
+ i40e_update_vsi_stats(vf->vsi);
+
+ if (b_op)
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
+ I40E_SUCCESS,
+ (uint8_t *)&vf->vsi->eth_stats,
+ sizeof(vf->vsi->eth_stats));
+ else
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
+ I40E_NOT_SUPPORTED,
+ (uint8_t *)&vf->vsi->eth_stats,
+ sizeof(vf->vsi->eth_stats));
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_pf_host_process_cmd_enable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
+{
+ int ret = I40E_SUCCESS;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ ret = i40e_vsi_config_vlan_stripping(vf->vsi, TRUE);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Failed to enable vlan stripping");
+
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
+{
+ int ret = I40E_SUCCESS;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ ret = i40e_vsi_config_vlan_stripping(vf->vsi, FALSE);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Failed to disable vlan stripping");
+
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_set_rss_lut(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen,
+ bool b_op)
+{
+ struct virtchnl_rss_lut *rss_lut = (struct virtchnl_rss_lut *)msg;
+ uint16_t valid_len;
+ int ret = I40E_SUCCESS;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_CONFIG_RSS_LUT,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ if (!msg || msglen <= sizeof(struct virtchnl_rss_lut)) {
+ PMD_DRV_LOG(ERR, "set_rss_lut argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ valid_len = sizeof(struct virtchnl_rss_lut) + rss_lut->lut_entries - 1;
+ if (msglen < valid_len) {
+ PMD_DRV_LOG(ERR, "set_rss_lut length mismatch");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ ret = i40e_set_rss_lut(vf->vsi, rss_lut->lut, rss_lut->lut_entries);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_set_rss_key(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen,
+ bool b_op)
+{
+ struct virtchnl_rss_key *rss_key = (struct virtchnl_rss_key *)msg;
+ uint16_t valid_len;
+ int ret = I40E_SUCCESS;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_DEL_VLAN,
+ VIRTCHNL_OP_CONFIG_RSS_KEY, NULL, 0);
+ return ret;
+ }
+
+ if (!msg || msglen <= sizeof(struct virtchnl_rss_key)) {
+ PMD_DRV_LOG(ERR, "set_rss_key argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ valid_len = sizeof(struct virtchnl_rss_key) + rss_key->key_len - 1;
+ if (msglen < valid_len) {
+ PMD_DRV_LOG(ERR, "set_rss_key length mismatch");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ ret = i40e_set_rss_key(vf->vsi, rss_key->key, rss_key->key_len);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+void
+i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ struct virtchnl_pf_event event;
+ uint16_t vf_id = vf->vf_idx;
+ uint32_t tval, rval;
+
+ event.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ event.event_data.link_event.link_status =
+ dev->data->dev_link.link_status;
+
+ /* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
+ switch (dev->data->dev_link.link_speed) {
+ case ETH_SPEED_NUM_100M:
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
+ break;
+ case ETH_SPEED_NUM_1G:
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
+ break;
+ case ETH_SPEED_NUM_10G:
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
+ break;
+ case ETH_SPEED_NUM_20G:
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
+ break;
+ case ETH_SPEED_NUM_25G:
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
+ break;
+ case ETH_SPEED_NUM_40G:
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
+ break;
+ default:
+ event.event_data.link_event.link_speed =
+ VIRTCHNL_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ tval = I40E_READ_REG(hw, I40E_VF_ATQLEN(vf_id));
+ rval = I40E_READ_REG(hw, I40E_VF_ARQLEN(vf_id));
+
+ if (tval & I40E_VF_ATQLEN_ATQLEN_MASK ||
+ tval & I40E_VF_ATQLEN_ATQENABLE_MASK ||
+ rval & I40E_VF_ARQLEN_ARQLEN_MASK ||
+ rval & I40E_VF_ARQLEN_ARQENABLE_MASK)
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT,
+ I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+}
+
+void
+i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
+ uint16_t abs_vf_id, uint32_t opcode,
+ __rte_unused uint32_t retval,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf_vf *vf;
+ /* AdminQ will pass absolute VF id, transfer to internal vf id */
+ uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
+ struct rte_pmd_i40e_mb_event_param ret_param;
+ bool b_op = TRUE;
+
+ if (vf_id > pf->vf_num - 1 || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "invalid argument");
+ return;
+ }
+
+ vf = &pf->vfs[vf_id];
+ if (!vf->vsi) {
+ PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
+ i40e_pf_host_send_msg_to_vf(vf, opcode,
+ I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
+ return;
+ }
+
+ /**
+ * initialise structure to send to user application
+ * will return response from user in retval field
+ */
+ ret_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
+ ret_param.vfid = vf_id;
+ ret_param.msg_type = opcode;
+ ret_param.msg = (void *)msg;
+ ret_param.msglen = msglen;
+
+ /**
+ * Ask user application if we're allowed to perform those functions.
+ * If we get ret_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
+ * then business as usual.
+ * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
+ * do nothing and send not_supported to VF. As PF must send a response
+ * to VF and ACK/NACK is not defined.
+ */
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param);
+ if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
+ PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
+ opcode);
+ b_op = FALSE;
+ }
+
+ switch (opcode) {
+ case VIRTCHNL_OP_VERSION:
+ PMD_DRV_LOG(INFO, "OP_VERSION received");
+ i40e_pf_host_process_cmd_version(vf, msg, b_op);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ PMD_DRV_LOG(INFO, "OP_RESET_VF received");
+ i40e_pf_host_process_cmd_reset_vf(vf);
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
+ i40e_pf_host_process_cmd_get_vf_resource(vf, msg, b_op);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
+ i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
+ msglen, b_op);
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
+ i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
+ if (b_op) {
+ i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
+ i40e_notify_vf_link_status(dev, vf);
+ } else {
+ i40e_pf_host_send_msg_to_vf(
+ vf, VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ }
+ break;
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
+ i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
+ i40e_pf_host_process_cmd_add_ether_address(vf, msg,
+ msglen, b_op);
+ break;
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
+ i40e_pf_host_process_cmd_del_ether_address(vf, msg,
+ msglen, b_op);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
+ i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN:
+ PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
+ i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
+ i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
+ msglen, b_op);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ PMD_DRV_LOG(INFO, "OP_GET_STATS received");
+ i40e_pf_host_process_cmd_get_stats(vf, b_op);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ PMD_DRV_LOG(INFO, "OP_ENABLE_VLAN_STRIPPING received");
+ i40e_pf_host_process_cmd_enable_vlan_strip(vf, b_op);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received");
+ i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_LUT received");
+ i40e_pf_host_process_cmd_set_rss_lut(vf, msg, msglen, b_op);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_KEY received");
+ i40e_pf_host_process_cmd_set_rss_key(vf, msg, msglen, b_op);
+ break;
+ /* Don't add command supported below, which will
+ * return an error code.
+ */
+ default:
+ PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
+ i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
+ NULL, 0);
+ break;
+ }
+}
+
+int
+i40e_pf_host_init(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ int ret, i;
+ uint32_t val;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /**
+ * return if SRIOV not enabled, VF number not configured or
+ * no queue assigned.
+ */
+ if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
+ return I40E_SUCCESS;
+
+ /* Allocate memory to store VF structure */
+ pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
+ if(pf->vfs == NULL)
+ return -ENOMEM;
+
+ /* Disable irq0 for VFR event */
+ i40e_pf_disable_irq0(hw);
+
+ /* Disable VF link status interrupt */
+ val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
+ val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
+ I40E_WRITE_FLUSH(hw);
+
+ for (i = 0; i < pf->vf_num; i++) {
+ pf->vfs[i].pf = pf;
+ pf->vfs[i].state = I40E_VF_INACTIVE;
+ pf->vfs[i].vf_idx = i;
+ ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
+ if (ret != I40E_SUCCESS)
+ goto fail;
+ }
+
+ RTE_ETH_DEV_SRIOV(dev).active = pf->vf_num;
+ /* restore irq0 */
+ i40e_pf_enable_irq0(hw);
+
+ return I40E_SUCCESS;
+
+fail:
+ rte_free(pf->vfs);
+ i40e_pf_enable_irq0(hw);
+
+ return ret;
+}
+
+int
+i40e_pf_host_uninit(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t val;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /**
+ * return if SRIOV not enabled, VF number not configured or
+ * no queue assigned.
+ */
+ if ((!hw->func_caps.sr_iov_1_1) ||
+ (pf->vf_num == 0) ||
+ (pf->vf_nb_qps == 0))
+ return I40E_SUCCESS;
+
+ /* free memory to store VF structure */
+ rte_free(pf->vfs);
+ pf->vfs = NULL;
+
+ /* Disable irq0 for VFR event */
+ i40e_pf_disable_irq0(hw);
+
+ /* Disable VF link status interrupt */
+ val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
+ val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
+ I40E_WRITE_FLUSH(hw);
+
+ return I40E_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_pf.h b/src/spdk/dpdk/drivers/net/i40e/i40e_pf.h
new file mode 100644
index 00000000..1809ba4d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_pf.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#ifndef _I40E_PF_H_
+#define _I40E_PF_H_
+
+/* Default setting on number of VSIs that VF can contain */
+#define I40E_DEFAULT_VF_VSI_NUM 1
+
+#define I40E_VIRTCHNL_OFFLOAD_CAPS ( \
+ VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF | \
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+
+struct virtchnl_vlan_offload_info {
+ uint16_t vsi_id;
+ uint8_t enable_vlan_strip;
+ uint8_t reserved;
+};
+
+/*
+ * Macro to calculate the memory size for configuring VSI queues
+ * via virtual channel.
+ */
+#define I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(x, n) \
+ (sizeof(*(x)) + sizeof((x)->qpair[0]) * (n))
+
+int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset);
+void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
+ uint16_t abs_vf_id, uint32_t opcode,
+ uint32_t retval,
+ uint8_t *msg, uint16_t msglen);
+int i40e_pf_host_init(struct rte_eth_dev *dev);
+int i40e_pf_host_uninit(struct rte_eth_dev *dev);
+void i40e_notify_vf_link_status(struct rte_eth_dev *dev,
+ struct i40e_pf_vf *vf);
+
+#endif /* _I40E_PF_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_regs.h b/src/spdk/dpdk/drivers/net/i40e/i40e_regs.h
new file mode 100644
index 00000000..b19bb1d5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_regs.h
@@ -0,0 +1,968 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+struct i40e_reg_info {
+ uint32_t base_addr;
+ uint32_t count1;
+ uint32_t stride1;
+ uint32_t count2;
+ uint32_t stride2;
+ const char *name;
+};
+
+static const struct i40e_reg_info i40e_regs_adminq[] = {
+ {I40E_VFQF_HENA(0), 1, 4, 0, 0, "VFQF_HENA"},
+ {I40E_VFQF_HKEY(0), 12, 4, 0, 0, "VFQF_HKEY"},
+ {I40E_VFQF_HREGION(0), 7, 4, 0, 0, "VFQF_HREGION"},
+ {I40E_VPQF_CTL(0), 127, 4, 0, 0, "VPQF_CTL"},
+ {I40E_PFLAN_QALLOC, 0, 0, 0, 0, "PFLAN_QALLOC"},
+ {I40E_PFQF_CTL_0, 0, 0, 0, 0, "PFQF_CTL_0"},
+ {I40E_VSILAN_QTABLE(0, 0), 7, 2048, 383, 4, "VSILAN_QTABLE"},
+ {I40E_VSIQF_TCREGION(0, 0), 3, 2048, 383, 4, "VSIQF_TCREGION"},
+ {I40E_VSILAN_QBASE(0), 383, 4, 0, 0, "VSILAN_QBASE"},
+ {I40E_VSIQF_CTL(0), 383, 4, 0, 0, "VSIQF_CTL"},
+ {I40E_PFQF_HKEY(0), 12, 128, 0, 0, "PFQF_HKEY"},
+ {I40E_PFQF_HREGION(0), 7, 128, 0, 0, "PFQF_HREGION"},
+ {I40E_PFQF_HENA(0), 1, 128, 0, 0, "PFQF_HENA"},
+ {I40E_PFQF_FDALLOC, 0, 0, 0, 0, "PFQF_FDALLOC"},
+ {I40E_PRTQF_FD_INSET(0, 0), 63, 64, 1, 32, "PRTQF_FD_INSET"},
+ {I40E_PRTQF_FD_INSET(0, 0), 63, 64, 1, 32, "PRTQF_FD_INSET"},
+ {I40E_PRTQF_FD_MSK(0, 0), 63, 64, 1, 32, "PRTQF_FD_MSK"},
+ {I40E_PRTQF_FD_FLXINSET(0), 63, 32, 0, 0, "PRTQF_FD_FLXINSET"},
+ {I40E_PRTQF_CTL_0, 0, 0, 0, 0, "PRTQF_CTL_0"},
+ {I40E_GLQF_FD_MSK(0, 0), 1, 4, 63, 8, "GLQF_FD_MSK"},
+ {I40E_GLQF_HASH_INSET(0, 0), 1, 4, 63, 8, "GLQF_HASH_INSET"},
+ {I40E_GLQF_HASH_MSK(0, 0), 1, 4, 63, 8, "GLQF_HASH_MSK"},
+ {I40E_GLQF_SWAP(0, 0), 1, 4, 63, 8, "GLQF_SWAP"},
+ {I40E_GLFCOE_RCTL, 0, 0, 0, 0, "GLFCOE_RCTL"},
+ {I40E_GLQF_CTL, 0, 0, 0, 0, "GLQF_CTL"},
+ {I40E_GLQF_HSYM(0), 63, 4, 0, 0, "GLQF_HSYM"},
+ {0, 0, 0, 0, 0, NULL}
+};
+
+static const struct i40e_reg_info i40e_regs_others[] = {
+ {I40E_QTX_TAIL1(0), 15, 4, 0, 0, "QTX_TAIL1"},
+ {I40E_VFPE_CQPDB(0), 127, 4, 0, 0, "VFPE_CQPDB"},
+ {I40E_VFPE_CQPTAIL(0), 127, 4, 0, 0, "VFPE_CQPTAIL"},
+ {I40E_VFPE_CCQPSTATUS(0), 127, 4, 0, 0, "VFPE_CCQPSTATUS"},
+ {I40E_VFPE_CCQPLOW(0), 127, 4, 0, 0, "VFPE_CCQPLOW"},
+ {I40E_VFPE_CCQPHIGH(0), 127, 4, 0, 0, "VFPE_CCQPHIGH"},
+ {I40E_VFPE_IPCONFIG0(0), 127, 4, 0, 0, "VFPE_IPCONFIG0"},
+ {I40E_VFPE_CQPERRCODES(0), 127, 4, 0, 0, "VFPE_CQPERRCODES"},
+ {I40E_QRX_TAIL1(0), 15, 4, 0, 0, "QRX_TAIL1"},
+ {I40E_VFINT_ITRN1(0, 0), 2, 64, 15, 4, "VFINT_ITRN1"},
+ {I40E_VFPE_TCPNOWTIMER(0), 127, 4, 0, 0, "VFPE_TCPNOWTIMER"},
+ {I40E_VFPE_MRTEIDXMASK(0), 127, 4, 0, 0, "VFPE_MRTEIDXMASK"},
+ {I40E_VFPE_RCVUNEXPECTEDERROR(0), 127, 4, 0, 0,
+ "VFPE_RCVUNEXPECTEDERROR"},
+ {I40E_VFINT_DYN_CTLN1(0), 15, 4, 0, 0, "VFINT_DYN_CTLN1"},
+ {I40E_VFINT_ICR01, 0, 0, 0, 0, "VFINT_ICR01"},
+ {I40E_VFINT_ITR01(0), 2, 4, 0, 0, "VFINT_ITR01"},
+ {I40E_VFINT_ICR0_ENA1, 0, 0, 0, 0, "VFINT_ICR0_ENA1"},
+ {I40E_VFINT_STAT_CTL01, 0, 0, 0, 0, "VFINT_STAT_CTL01"},
+ {I40E_VFINT_DYN_CTL01, 0, 0, 0, 0, "VFINT_DYN_CTL01"},
+ {I40E_VF_ARQBAH1, 0, 0, 0, 0, "VF_ARQBAH1"},
+ {I40E_VF_ATQH1, 0, 0, 0, 0, "VF_ATQH1"},
+ {I40E_VF_ATQLEN1, 0, 0, 0, 0, "VF_ATQLEN1"},
+ {I40E_VF_ARQBAL1, 0, 0, 0, 0, "VF_ARQBAL1"},
+ {I40E_VF_ARQT1, 0, 0, 0, 0, "VF_ARQT1"},
+ {I40E_VF_ARQH1, 0, 0, 0, 0, "VF_ARQH1"},
+ {I40E_VF_ATQBAH1, 0, 0, 0, 0, "VF_ATQBAH1"},
+ {I40E_VF_ATQBAL1, 0, 0, 0, 0, "VF_ATQBAL1"},
+ {I40E_VF_ARQLEN1, 0, 0, 0, 0, "VF_ARQLEN1"},
+ {I40E_PFPE_CQPDB, 0, 0, 0, 0, "PFPE_CQPDB"},
+ {I40E_PFPE_CQPTAIL, 0, 0, 0, 0, "PFPE_CQPTAIL"},
+ {I40E_PFPE_CCQPSTATUS, 0, 0, 0, 0, "PFPE_CCQPSTATUS"},
+ {I40E_PFPE_CCQPLOW, 0, 0, 0, 0, "PFPE_CCQPLOW"},
+ {I40E_PFPE_CCQPHIGH, 0, 0, 0, 0, "PFPE_CCQPHIGH"},
+ {I40E_PFPE_IPCONFIG0, 0, 0, 0, 0, "PFPE_IPCONFIG0"},
+ {I40E_VF_ATQT1, 0, 0, 0, 0, "VF_ATQT1"},
+ {I40E_PFPE_TCPNOWTIMER, 0, 0, 0, 0, "PFPE_TCPNOWTIMER"},
+ {I40E_PFPE_MRTEIDXMASK, 0, 0, 0, 0, "PFPE_MRTEIDXMASK"},
+ {I40E_PFPE_RCVUNEXPECTEDERROR, 0, 0, 0, 0, "PFPE_RCVUNEXPECTEDERROR"},
+ {I40E_PFPE_UDACTRL, 0, 0, 0, 0, "PFPE_UDACTRL"},
+ {I40E_PFPE_UDAUCFBQPN, 0, 0, 0, 0, "PFPE_UDAUCFBQPN"},
+ {I40E_VFGEN_RSTAT, 0, 0, 0, 0, "VFGEN_RSTAT"},
+ {I40E_PFPE_CQPERRCODES, 0, 0, 0, 0, "PFPE_CQPERRCODES"},
+ {I40E_PFPE_FLMXMITALLOCERR, 0, 0, 0, 0, "PFPE_FLMXMITALLOCERR"},
+ {I40E_PFPE_FLMQ1ALLOCERR, 0, 0, 0, 0, "PFPE_FLMQ1ALLOCERR"},
+ {I40E_VFPE_IPCONFIG01, 0, 0, 0, 0, "VFPE_IPCONFIG01"},
+ {I40E_VFPE_MRTEIDXMASK1, 0, 0, 0, 0, "VFPE_MRTEIDXMASK1"},
+ {I40E_VFPE_RCVUNEXPECTEDERROR1, 0, 0, 0, 0, "VFPE_RCVUNEXPECTEDERROR1"},
+ {I40E_VFPE_CCQPHIGH1, 0, 0, 0, 0, "VFPE_CCQPHIGH1"},
+ {I40E_VFPE_CQPERRCODES1, 0, 0, 0, 0, "VFPE_CQPERRCODES1"},
+ {I40E_VFPE_CQPTAIL1, 0, 0, 0, 0, "VFPE_CQPTAIL1"},
+ {I40E_VFPE_AEQALLOC1, 0, 0, 0, 0, "VFPE_AEQALLOC1"},
+ {I40E_VFPE_TCPNOWTIMER1, 0, 0, 0, 0, "VFPE_TCPNOWTIMER1"},
+ {I40E_VFPE_CCQPLOW1, 0, 0, 0, 0, "VFPE_CCQPLOW1"},
+ {I40E_VFPE_CQACK1, 0, 0, 0, 0, "VFPE_CQACK1"},
+ {I40E_VFPE_CQARM1, 0, 0, 0, 0, "VFPE_CQARM1"},
+ {I40E_VFPE_CCQPSTATUS1, 0, 0, 0, 0, "VFPE_CCQPSTATUS1"},
+ {I40E_VFPE_CQPDB1, 0, 0, 0, 0, "VFPE_CQPDB1"},
+ {I40E_GLPE_VFUDACTRL(0), 31, 4, 0, 0, "GLPE_VFUDACTRL"},
+ {I40E_VFPE_WQEALLOC1, 0, 0, 0, 0, "VFPE_WQEALLOC1"},
+ {I40E_GLPE_VFUDAUCFBQPN(0), 31, 4, 0, 0, "GLPE_VFUDAUCFBQPN"},
+ {I40E_GLPE_VFFLMXMITALLOCERR(0), 31, 4, 0, 0, "GLPE_VFFLMXMITALLOCERR"},
+ {I40E_GLPE_VFFLMQ1ALLOCERR(0), 31, 4, 0, 0, "GLPE_VFFLMQ1ALLOCERR"},
+ {I40E_VFQF_HLUT(0), 15, 4, 0, 0, "VFQF_HLUT"},
+ {I40E_GLPE_CPUSTATUS0, 0, 0, 0, 0, "GLPE_CPUSTATUS0"},
+ {I40E_GLPE_CPUSTATUS1, 0, 0, 0, 0, "GLPE_CPUSTATUS1"},
+ {I40E_GLPE_CPUSTATUS2, 0, 0, 0, 0, "GLPE_CPUSTATUS2"},
+ {I40E_GLPE_CPUTRIG0, 0, 0, 0, 0, "GLPE_CPUTRIG0"},
+ {I40E_GLPE_VFFLMOBJCTRL(0), 31, 4, 0, 0, "GLPE_VFFLMOBJCTRL"},
+ {I40E_VFCM_PE_ERRINFO, 0, 0, 0, 0, "VFCM_PE_ERRINFO"},
+ {I40E_GLPE_RUPM_GCTL, 0, 0, 0, 0, "GLPE_RUPM_GCTL"},
+ {I40E_GLPE_DUAL40_RUPM, 0, 0, 0, 0, "GLPE_DUAL40_RUPM"},
+ {I40E_GLPE_RUPM_TXHOST_EN, 0, 0, 0, 0, "GLPE_RUPM_TXHOST_EN"},
+ {I40E_PRTPE_RUPM_THRES, 0, 0, 0, 0, "PRTPE_RUPM_THRES"},
+ {I40E_PRTPE_RUPM_CTL, 0, 0, 0, 0, "PRTPE_RUPM_CTL"},
+ {I40E_PRTPE_RUPM_PFCCTL, 0, 0, 0, 0, "PRTPE_RUPM_PFCCTL"},
+ {I40E_PRTPE_RUPM_PFCPC, 0, 0, 0, 0, "PRTPE_RUPM_PFCPC"},
+ {I40E_PRTPE_RUPM_PFCTCC, 0, 0, 0, 0, "PRTPE_RUPM_PFCTCC"},
+ {I40E_GLPE_RUPM_PUSHPOOL, 0, 0, 0, 0, "GLPE_RUPM_PUSHPOOL"},
+ {I40E_GLPE_RUPM_FLRPOOL, 0, 0, 0, 0, "GLPE_RUPM_FLRPOOL"},
+ {I40E_GLPE_RUPM_PTXPOOL, 0, 0, 0, 0, "GLPE_RUPM_PTXPOOL"},
+ {I40E_GLPE_RUPM_CQPPOOL, 0, 0, 0, 0, "GLPE_RUPM_CQPPOOL"},
+ {I40E_PRTE_RUPM_TCCNTR03, 0, 0, 0, 0, "PRTE_RUPM_TCCNTR03"},
+ {I40E_PRTPE_RUPM_TCCNTR47, 0, 0, 0, 0, "PRTPE_RUPM_TCCNTR47"},
+ {I40E_PRTPE_RUPM_CNTR, 0, 0, 0, 0, "PRTPE_RUPM_CNTR"},
+ {I40E_PRTPE_RUPM_PTXTCCNTR03, 0, 0, 0, 0, "PRTPE_RUPM_PTXTCCNTR03"},
+ {I40E_PRTPE_RUPM_PTCTCCNTR47, 0, 0, 0, 0, "PRTPE_RUPM_PTCTCCNTR47"},
+ {I40E_VFCM_PE_ERRDATA, 0, 0, 0, 0, "VFCM_PE_ERRDATA"},
+ {I40E_PFPCI_VF_FLUSH_DONE, 0, 0, 0, 0, "PFPCI_VF_FLUSH_DONE"},
+ {I40E_GLPES_PFRXVLANERR(0), 15, 4, 0, 0, "GLPES_PFRXVLANERR"},
+ {I40E_GLPES_PFIP4RXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXOCTSLO"},
+ {I40E_GLPES_PFIP4RXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXOCTSHI"},
+ {I40E_GLPES_PFIP4RXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXPKTSLO"},
+ {I40E_GLPES_PFIP4RXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXPKTSHI"},
+ {I40E_GLPES_PFIP4RXDISCARD(0), 15, 4, 0, 0, "GLPES_PFIP4RXDISCARD"},
+ {I40E_GLPES_PFIP4RXTRUNC(0), 15, 4, 0, 0, "GLPES_PFIP4RXTRUNC"},
+ {I40E_GLPES_PFIP4RXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXFRAGSLO"},
+ {I40E_GLPES_PFIP4RXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXFRAGSHI"},
+ {I40E_GLPES_PFIP4RXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCOCTSLO"},
+ {I40E_GLPES_PFIP4RXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCOCTSHI"},
+ {I40E_GLPES_PFIP4RXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCPKTSLO"},
+ {I40E_GLPES_PFIP4RXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCPKTSHI"},
+ {I40E_GLPES_PFIP6RXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXOCTSLO"},
+ {I40E_GLPES_PFIP6RXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXOCTSHI"},
+ {I40E_GLPES_PFIP6RXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXPKTSLO"},
+ {I40E_GLPES_PFIP6RXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXPKTSHI"},
+ {I40E_GLPES_PFIP6RXDISCARD(0), 15, 4, 0, 0, "GLPES_PFIP6RXDISCARD"},
+ {I40E_GLPES_PFIP6RXTRUNC(0), 15, 4, 0, 0, "GLPES_PFIP6RXTRUNC"},
+ {I40E_GLPES_PFIP6RXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXFRAGSLO"},
+ {I40E_GLPES_PFIP6RXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXFRAGSHI"},
+ {I40E_GLPES_PFIP6RXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCOCTSLO"},
+ {I40E_GLPES_PFIP6RXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCOCTSHI"},
+ {I40E_GLPES_PFIP6RXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCPKTSLO"},
+ {I40E_GLPES_PFIP6RXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCPKTSHI"},
+ {I40E_GLPES_PFIP4TXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXOCTSLO"},
+ {I40E_GLPES_PFIP4TXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXOCTSHI"},
+ {I40E_GLPES_PFIP4TXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXPKTSLO"},
+ {I40E_GLPES_PFIP4TXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXPKTSHI"},
+ {I40E_GLPES_PFIP4TXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXFRAGSLO"},
+ {I40E_GLPES_PFIP4TXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXFRAGSHI"},
+ {I40E_GLPES_PFIP4TXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCOCTSLO"},
+ {I40E_GLPES_PFIP4TXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCOCTSHI"},
+ {I40E_GLPES_PFIP4TXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCPKTSLO"},
+ {I40E_GLPES_PFIP4TXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCPKTSHI"},
+ {I40E_GLPES_PFIP6TXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXOCTSLO"},
+ {I40E_GLPES_PFIP6TXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXOCTSHI"},
+ {I40E_GLPES_PFIP6TXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXPKTSLO"},
+ {I40E_GLPES_PFIP6TXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXPKTSHI"},
+ {I40E_GLPES_PFIP6TXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXFRAGSLO"},
+ {I40E_GLPES_PFIP6TXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXFRAGSHI"},
+ {I40E_GLPES_PFIP6TXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCOCTSLO"},
+ {I40E_GLPES_PFIP6TXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCOCTSHI"},
+ {I40E_GLPES_PFIP6TXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCPKTSLO"},
+ {I40E_GLPES_PFIP6TXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCPKTSHI"},
+ {I40E_GLPES_PFIP4TXNOROUTE(0), 15, 4, 0, 0, "GLPES_PFIP4TXNOROUTE"},
+ {I40E_GLPES_PFIP6TXNOROUTE(0), 15, 4, 0, 0, "GLPES_PFIP6TXNOROUTE"},
+ {I40E_GLPES_PFTCPRXSEGSLO(0), 15, 8, 0, 0, "GLPES_PFTCPRXSEGSLO"},
+ {I40E_GLPES_PFTCPRXSEGSHI(0), 15, 8, 0, 0, "GLPES_PFTCPRXSEGSHI"},
+ {I40E_GLPES_PFTCPRXOPTERR(0), 15, 4, 0, 0, "GLPES_PFTCPRXOPTERR"},
+ {I40E_GLPES_PFTCPRXPROTOERR(0), 15, 4, 0, 0, "GLPES_PFTCPRXPROTOERR"},
+ {I40E_GLPES_PFTCPTXSEGLO(0), 15, 8, 0, 0, "GLPES_PFTCPTXSEGLO"},
+ {I40E_GLPES_PFTCPTXSEGHI(0), 15, 8, 0, 0, "GLPES_PFTCPTXSEGHI"},
+ {I40E_GLPES_PFTCPRTXSEG(0), 15, 4, 0, 0, "GLPES_PFTCPRTXSEG"},
+ {I40E_GLPES_PFUDPRXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFUDPRXPKTSLO"},
+ {I40E_GLPES_PFUDPRXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFUDPRXPKTSHI"},
+ {I40E_GLPES_PFUDPTXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFUDPTXPKTSLO"},
+ {I40E_GLPES_PFUDPTXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFUDPTXPKTSHI"},
+ {I40E_GLPES_PFRDMARXWRSLO(0), 15, 8, 0, 0, "GLPES_PFRDMARXWRSLO"},
+ {I40E_GLPES_PFRDMARXWRSHI(0), 15, 8, 0, 0, "GLPES_PFRDMARXWRSHI"},
+ {I40E_GLPES_PFRDMARXRDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMARXRDSLO"},
+ {I40E_GLPES_PFRDMARXRDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMARXRDSHI"},
+ {I40E_GLPES_PFRDMARXSNDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMARXSNDSLO"},
+ {I40E_GLPES_PFRDMARXSNDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMARXSNDSHI"},
+ {I40E_GLPES_PFRDMATXWRSLO(0), 15, 8, 0, 0, "GLPES_PFRDMATXWRSLO"},
+ {I40E_GLPES_PFRDMATXWRSHI(0), 15, 8, 0, 0, "GLPES_PFRDMATXWRSHI"},
+ {I40E_GLPES_PFRDMATXRDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMATXRDSLO"},
+ {I40E_GLPES_PFRDMATXRDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMATXRDSHI"},
+ {I40E_GLPES_PFRDMATXSNDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMATXSNDSLO"},
+ {I40E_GLPES_PFRDMATXSNDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMATXSNDSHI"},
+ {I40E_GLPES_PFRDMAVBNDLO(0), 15, 8, 0, 0, "GLPES_PFRDMAVBNDLO"},
+ {I40E_GLPES_PFRDMAVBNDHI(0), 15, 8, 0, 0, "GLPES_PFRDMAVBNDHI"},
+ {I40E_GLPES_PFRDMAVINVLO(0), 15, 8, 0, 0, "GLPES_PFRDMAVINVLO"},
+ {I40E_GLPES_PFRDMAVINVHI(0), 15, 8, 0, 0, "GLPES_PFRDMAVINVHI"},
+ {I40E_GLPES_VFRXVLANERR(0), 31, 4, 0, 0, "GLPES_VFRXVLANERR"},
+ {I40E_GLPES_VFIP4RXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXOCTSLO"},
+ {I40E_GLPES_VFIP4RXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXOCTSHI"},
+ {I40E_GLPES_VFIP4RXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXPKTSLO"},
+ {I40E_GLPES_VFIP4RXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXPKTSHI"},
+ {I40E_GLPES_VFIP4RXDISCARD(0), 31, 4, 0, 0, "GLPES_VFIP4RXDISCARD"},
+ {I40E_GLPES_VFIP4RXTRUNC(0), 31, 4, 0, 0, "GLPES_VFIP4RXTRUNC"},
+ {I40E_GLPES_VFIP4RXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXFRAGSLO"},
+ {I40E_GLPES_VFIP4RXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXFRAGSHI"},
+ {I40E_GLPES_VFIP4RXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCOCTSLO"},
+ {I40E_GLPES_VFIP4RXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCOCTSHI"},
+ {I40E_GLPES_VFIP4RXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCPKTSLO"},
+ {I40E_GLPES_VFIP4RXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCPKTSHI"},
+ {I40E_GLPES_VFIP6RXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXOCTSLO"},
+ {I40E_GLPES_VFIP6RXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXOCTSHI"},
+ {I40E_GLPES_VFIP6RXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXPKTSLO"},
+ {I40E_GLPES_VFIP6RXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXPKTSHI"},
+ {I40E_GLPES_VFIP6RXDISCARD(0), 31, 4, 0, 0, "GLPES_VFIP6RXDISCARD"},
+ {I40E_GLPES_VFIP6RXTRUNC(0), 31, 4, 0, 0, "GLPES_VFIP6RXTRUNC"},
+ {I40E_GLPES_VFIP6RXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXFRAGSLO"},
+ {I40E_GLPES_VFIP6RXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXFRAGSHI"},
+ {I40E_GLPES_VFIP6RXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCOCTSLO"},
+ {I40E_GLPES_VFIP6RXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCOCTSHI"},
+ {I40E_GLPES_VFIP6RXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCPKTSLO"},
+ {I40E_GLPES_VFIP6RXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCPKTSHI"},
+ {I40E_GLPES_VFIP4TXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXOCTSLO"},
+ {I40E_GLPES_VFIP4TXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXOCTSHI"},
+ {I40E_GLPES_VFIP4TXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXPKTSLO"},
+ {I40E_GLPES_VFIP4TXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXPKTSHI"},
+ {I40E_GLPES_VFIP4TXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXFRAGSLO"},
+ {I40E_GLPES_VFIP4TXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXFRAGSHI"},
+ {I40E_GLPES_VFIP4TXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCOCTSLO"},
+ {I40E_GLPES_VFIP4TXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCOCTSHI"},
+ {I40E_GLPES_VFIP4TXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCPKTSLO"},
+ {I40E_GLPES_VFIP4TXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCPKTSHI"},
+ {I40E_GLPES_VFIP6TXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXOCTSLO"},
+ {I40E_GLPES_VFIP6TXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXOCTSHI"},
+ {I40E_GLPES_VFIP6TXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXPKTSLO"},
+ {I40E_GLPES_VFIP6TXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXPKTSHI"},
+ {I40E_GLPES_VFIP6TXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXFRAGSLO"},
+ {I40E_GLPES_VFIP6TXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXFRAGSHI"},
+ {I40E_GLPES_VFIP6TXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCOCTSLO"},
+ {I40E_GLPES_VFIP6TXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCOCTSHI"},
+ {I40E_GLPES_VFIP6TXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCPKTSLO"},
+ {I40E_GLPES_VFIP6TXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCPKTSHI"},
+ {I40E_GLPES_VFIP4TXNOROUTE(0), 31, 4, 0, 0, "GLPES_VFIP4TXNOROUTE"},
+ {I40E_GLPES_VFIP6TXNOROUTE(0), 31, 4, 0, 0, "GLPES_VFIP6TXNOROUTE"},
+ {I40E_GLPES_VFTCPRXSEGSLO(0), 31, 8, 0, 0, "GLPES_VFTCPRXSEGSLO"},
+ {I40E_GLPES_VFTCPRXSEGSHI(0), 31, 8, 0, 0, "GLPES_VFTCPRXSEGSHI"},
+ {I40E_GLPES_VFTCPRXOPTERR(0), 31, 4, 0, 0, "GLPES_VFTCPRXOPTERR"},
+ {I40E_GLPES_VFTCPRXPROTOERR(0), 31, 4, 0, 0, "GLPES_VFTCPRXPROTOERR"},
+ {I40E_GLPES_VFTCPTXSEGLO(0), 31, 8, 0, 0, "GLPES_VFTCPTXSEGLO"},
+ {I40E_GLPES_VFTCPTXSEGHI(0), 31, 8, 0, 0, "GLPES_VFTCPTXSEGHI"},
+ {I40E_GLPES_VFTCPRTXSEG(0), 31, 4, 0, 0, "GLPES_VFTCPRTXSEG"},
+ {I40E_GLPES_VFUDPRXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFUDPRXPKTSLO"},
+ {I40E_GLPES_VFUDPRXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFUDPRXPKTSHI"},
+ {I40E_GLPES_VFUDPTXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFUDPTXPKTSLO"},
+ {I40E_GLPES_VFUDPTXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFUDPTXPKTSHI"},
+ {I40E_GLPES_VFRDMARXWRSLO(0), 31, 8, 0, 0, "GLPES_VFRDMARXWRSLO"},
+ {I40E_GLPES_VFRDMARXWRSHI(0), 31, 8, 0, 0, "GLPES_VFRDMARXWRSHI"},
+ {I40E_GLPES_VFRDMARXRDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMARXRDSLO"},
+ {I40E_GLPES_VFRDMARXRDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMARXRDSHI"},
+ {I40E_GLPES_VFRDMARXSNDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMARXSNDSLO"},
+ {I40E_GLPES_VFRDMARXSNDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMARXSNDSHI"},
+ {I40E_GLPES_VFRDMATXWRSLO(0), 31, 8, 0, 0, "GLPES_VFRDMATXWRSLO"},
+ {I40E_GLPES_VFRDMATXWRSHI(0), 31, 8, 0, 0, "GLPES_VFRDMATXWRSHI"},
+ {I40E_GLPES_VFRDMATXRDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMATXRDSLO"},
+ {I40E_GLPES_VFRDMATXRDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMATXRDSHI"},
+ {I40E_GLPES_VFRDMATXSNDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMATXSNDSLO"},
+ {I40E_GLPES_VFRDMATXSNDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMATXSNDSHI"},
+ {I40E_GLPES_VFRDMAVBNDLO(0), 31, 8, 0, 0, "GLPES_VFRDMAVBNDLO"},
+ {I40E_GLPES_VFRDMAVBNDHI(0), 31, 8, 0, 0, "GLPES_VFRDMAVBNDHI"},
+ {I40E_GLPES_VFRDMAVINVLO(0), 31, 8, 0, 0, "GLPES_VFRDMAVINVLO"},
+ {I40E_GLPES_VFRDMAVINVHI(0), 31, 8, 0, 0, "GLPES_VFRDMAVINVHI"},
+ {I40E_GLPES_RDMARXUNALIGN, 0, 0, 0, 0, "GLPES_RDMARXUNALIGN"},
+ {I40E_GLPES_RDMARXOOONOMARK, 0, 0, 0, 0, "GLPES_RDMARXOOONOMARK"},
+ {I40E_GLPES_RDMARXMULTFPDUSLO, 0, 0, 0, 0, "GLPES_RDMARXMULTFPDUSLO"},
+ {I40E_GLPES_RDMARXMULTFPDUSHI, 0, 0, 0, 0, "GLPES_RDMARXMULTFPDUSHI"},
+ {I40E_GLPES_RDMARXOOODDPLO, 0, 0, 0, 0, "GLPES_RDMARXOOODDPLO"},
+ {I40E_GLPES_RDMARXOOODDPHI, 0, 0, 0, 0, "GLPES_RDMARXOOODDPHI"},
+ {I40E_GLPES_TCPRXPUREACKSLO, 0, 0, 0, 0, "GLPES_TCPRXPUREACKSLO"},
+ {I40E_GLPES_TCPRXPUREACKHI, 0, 0, 0, 0, "GLPES_TCPRXPUREACKHI"},
+ {I40E_GLPES_TCPRXONEHOLELO, 0, 0, 0, 0, "GLPES_TCPRXONEHOLELO"},
+ {I40E_GLPES_TCPRXONEHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXONEHOLEHI"},
+ {I40E_GLPES_TCPRXTWOHOLELO, 0, 0, 0, 0, "GLPES_TCPRXTWOHOLELO"},
+ {I40E_GLPES_TCPRXTWOHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXTWOHOLEHI"},
+ {I40E_GLPES_TCPRXTHREEHOLELO, 0, 0, 0, 0, "GLPES_TCPRXTHREEHOLELO"},
+ {I40E_GLPES_TCPRXTHREEHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXTHREEHOLEHI"},
+ {I40E_GLPES_TCPRXFOURHOLELO, 0, 0, 0, 0, "GLPES_TCPRXFOURHOLELO"},
+ {I40E_GLPES_TCPRXFOURHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXFOURHOLEHI"},
+ {I40E_GLPES_TCPTXRETRANSFASTLO, 0, 0, 0, 0, "GLPES_TCPTXRETRANSFASTLO"},
+ {I40E_GLPES_TCPTXRETRANSFASTHI, 0, 0, 0, 0, "GLPES_TCPTXRETRANSFASTHI"},
+ {I40E_GLPES_TCPTXTOUTSFASTLO, 0, 0, 0, 0, "GLPES_TCPTXTOUTSFASTLO"},
+ {I40E_GLPES_TCPTXTOUTSFASTHI, 0, 0, 0, 0, "GLPES_TCPTXTOUTSFASTHI"},
+ {I40E_GLPES_TCPTXTOUTSLO, 0, 0, 0, 0, "GLPES_TCPTXTOUTSLO"},
+ {I40E_GLPES_TCPTXTOUTSHI, 0, 0, 0, 0, "GLPES_TCPTXTOUTSHI"},
+ {I40E_PRTDCB_TCMSTC_RLPM(0), 7, 32, 0, 0, "PRTDCB_TCMSTC_RLPM"},
+ {I40E_PRTDCB_RLPMC, 0, 0, 0, 0, "PRTDCB_RLPMC"},
+ {I40E_PRTDCB_TCPMC_RLPM, 0, 0, 0, 0, "PRTDCB_TCPMC_RLPM"},
+ {I40E_VFINT_ITRN(0, 0), 2, 2048, 511, 4, "VFINT_ITRN"},
+ {I40E_VFINT_DYN_CTLN(0), 511, 4, 0, 0, "VFINT_DYN_CTLN"},
+ {I40E_VPINT_LNKLSTN(0), 511, 4, 0, 0, "VPINT_LNKLSTN"},
+ {I40E_VPINT_RATEN(0), 511, 4, 0, 0, "VPINT_RATEN"},
+ {I40E_VPINT_CEQCTL(0), 511, 4, 0, 0, "VPINT_CEQCTL"},
+ {I40E_VFINT_ITR0(0, 0), 2, 1024, 127, 4, "VFINT_ITR0"},
+ {I40E_VFINT_STAT_CTL0(0), 127, 4, 0, 0, "VFINT_STAT_CTL0"},
+ {I40E_VFINT_DYN_CTL0(0), 127, 4, 0, 0, "VFINT_DYN_CTL0"},
+ {I40E_VPINT_LNKLST0(0), 127, 4, 0, 0, "VPINT_LNKLST0"},
+ {I40E_VPINT_RATE0(0), 127, 4, 0, 0, "VPINT_RATE0"},
+ {I40E_VPINT_AEQCTL(0), 127, 4, 0, 0, "VPINT_AEQCTL"},
+ {I40E_VFINT_ICR0(0), 127, 4, 0, 0, "VFINT_ICR0"},
+ {I40E_VFINT_ICR0_ENA(0), 127, 4, 0, 0, "VFINT_ICR0_ENA"},
+ {I40E_PFINT_ITRN(0, 0), 2, 2048, 511, 4, "PFINT_ITRN"},
+ {I40E_PFINT_DYN_CTLN(0), 511, 4, 0, 0, "PFINT_DYN_CTLN"},
+ {I40E_PFINT_LNKLSTN(0), 511, 4, 0, 0, "PFINT_LNKLSTN"},
+ {I40E_PFINT_RATEN(0), 511, 4, 0, 0, "PFINT_RATEN"},
+ {I40E_PFINT_CEQCTL(0), 511, 4, 0, 0, "PFINT_CEQCTL"},
+ {I40E_PFINT_ITR0(0), 2, 128, 0, 0, "PFINT_ITR0"},
+ {I40E_PFINT_STAT_CTL0, 0, 0, 0, 0, "PFINT_STAT_CTL0"},
+ {I40E_PFINT_DYN_CTL0, 0, 0, 0, 0, "PFINT_DYN_CTL0"},
+ {I40E_PFINT_LNKLST0, 0, 0, 0, 0, "PFINT_LNKLST0"},
+ {I40E_PFINT_RATE0, 0, 0, 0, 0, "PFINT_RATE0"},
+ {I40E_PFINT_AEQCTL, 0, 0, 0, 0, "PFINT_AEQCTL"},
+ {I40E_PFINT_ICR0, 0, 0, 0, 0, "PFINT_ICR0"},
+ {I40E_PFINT_ICR0_ENA, 0, 0, 0, 0, "PFINT_ICR0_ENA"},
+ {I40E_QINT_RQCTL(0), 1535, 4, 0, 0, "QINT_RQCTL"},
+ {I40E_QINT_TQCTL(0), 1535, 4, 0, 0, "QINT_TQCTL"},
+ {I40E_PFGEN_PORTMDIO_NUM, 0, 0, 0, 0, "PFGEN_PORTMDIO_NUM"},
+ {I40E_GLINT_CTL, 0, 0, 0, 0, "GLINT_CTL"},
+ {I40E_GLLAN_TSOMSK_F, 0, 0, 0, 0, "GLLAN_TSOMSK_F"},
+ {I40E_GLLAN_TSOMSK_M, 0, 0, 0, 0, "GLLAN_TSOMSK_M"},
+ {I40E_GLLAN_TSOMSK_L, 0, 0, 0, 0, "GLLAN_TSOMSK_L"},
+ {I40E_GL_RDPU_CNTRL, 0, 0, 0, 0, "GL_RDPU_CNTRL"},
+ {I40E_PFPM_FHFT_LENGTH(0), 7, 128, 0, 0, "PFPM_FHFT_LENGTH"},
+ {I40E_PFPM_WUC, 0, 0, 0, 0, "PFPM_WUC"},
+ {I40E_PFPM_WUFC, 0, 0, 0, 0, "PFPM_WUFC"},
+ {I40E_PFPM_WUS, 0, 0, 0, 0, "PFPM_WUS"},
+ {I40E_PRTPM_FHFHR, 0, 0, 0, 0, "PRTPM_FHFHR"},
+ {I40E_GLPM_WUMC, 0, 0, 0, 0, "GLPM_WUMC"},
+ {I40E_VPLAN_QTABLE(0, 0), 15, 1024, 127, 4, "VPLAN_QTABLE"},
+ {I40E_VPLAN_MAPENA(0), 127, 4, 0, 0, "VPLAN_MAPENA"},
+ {I40E_VFGEN_RSTAT1(0), 127, 4, 0, 0, "VFGEN_RSTAT1"},
+ {I40E_VPLAN_QBASE(0), 127, 4, 0, 0, "VPLAN_QBASE"},
+ {I40E_PF_ATQBAL, 0, 0, 0, 0, "PF_ATQBAL"},
+ {I40E_GL_ATQBAL, 0, 0, 0, 0, "GL_ATQBAL"},
+ {I40E_PF_ARQBAL, 0, 0, 0, 0, "PF_ARQBAL"},
+ {I40E_GL_ARQBAL, 0, 0, 0, 0, "GL_ARQBAL"},
+ {I40E_PF_ATQBAH, 0, 0, 0, 0, "PF_ATQBAH"},
+ {I40E_GL_ATQBAH, 0, 0, 0, 0, "GL_ATQBAH"},
+ {I40E_PF_ARQBAH, 0, 0, 0, 0, "PF_ARQBAH"},
+ {I40E_GL_ARQBAH, 0, 0, 0, 0, "GL_ARQBAH"},
+ {I40E_PF_ATQLEN, 0, 0, 0, 0, "PF_ATQLEN"},
+ {I40E_GL_ATQLEN, 0, 0, 0, 0, "GL_ATQLEN"},
+ {I40E_PF_ARQLEN, 0, 0, 0, 0, "PF_ARQLEN"},
+ {I40E_PF_ATQH, 0, 0, 0, 0, "PF_ATQH"},
+ {I40E_GL_ATQH, 0, 0, 0, 0, "GL_ATQH"},
+ {I40E_PF_ARQH, 0, 0, 0, 0, "PF_ARQH"},
+ {I40E_GL_ARQH, 0, 0, 0, 0, "GL_ARQH"},
+ {I40E_PF_ATQT, 0, 0, 0, 0, "PF_ATQT"},
+ {I40E_GL_ATQT, 0, 0, 0, 0, "GL_ATQT"},
+ {I40E_PF_ARQT, 0, 0, 0, 0, "PF_ARQT"},
+ {I40E_GL_ARQT, 0, 0, 0, 0, "GL_ARQT"},
+ {I40E_VF_ATQBAL(0), 127, 4, 0, 0, "VF_ATQBAL"},
+ {I40E_VF_ARQBAL(0), 127, 4, 0, 0, "VF_ARQBAL"},
+ {I40E_VF_ATQBAH(0), 127, 4, 0, 0, "VF_ATQBAH"},
+ {I40E_VF_ARQBAH(0), 127, 4, 0, 0, "VF_ARQBAH"},
+ {I40E_VF_ATQLEN(0), 127, 4, 0, 0, "VF_ATQLEN"},
+ {I40E_VF_ARQLEN(0), 127, 4, 0, 0, "VF_ARQLEN"},
+ {I40E_VF_ATQH(0), 127, 4, 0, 0, "VF_ATQH"},
+ {I40E_VF_ARQH(0), 127, 4, 0, 0, "VF_ARQH"},
+ {I40E_VF_ATQT(0), 127, 4, 0, 0, "VF_ATQT"},
+ {I40E_VF_ARQT(0), 127, 4, 0, 0, "VF_ARQT"},
+ {I40E_PRTDCB_GENC, 0, 0, 0, 0, "PRTDCB_GENC"},
+ {I40E_PRTDCB_GENS, 0, 0, 0, 0, "PRTDCB_GENS"},
+ {I40E_GLDCB_GENC, 0, 0, 0, 0, "GLDCB_GENC"},
+ {I40E_GL_FWSTS, 0, 0, 0, 0, "GL_FWSTS"},
+ {I40E_GL_FWRESETCNT, 0, 0, 0, 0, "GL_FWRESETCNT"},
+ {I40E_GL_VF_CTRL_TX(0), 127, 4, 0, 0, "GL_VF_CTRL_TX"},
+ {I40E_GL_VF_CTRL_RX(0), 127, 4, 0, 0, "GL_VF_CTRL_RX"},
+ {I40E_PRTTSYN_CTL1, 0, 0, 0, 0, "PRTTSYN_CTL1"},
+ {I40E_PRTTSYN_RXTIME_H(0), 3, 32, 0, 0, "PRTTSYN_RXTIME_H"},
+ {I40E_PRTTSYN_RXTIME_L(0), 3, 32, 0, 0, "PRTTSYN_RXTIME_L"},
+ {I40E_PRTTSYN_STAT_1, 0, 0, 0, 0, "PRTTSYN_STAT_1"},
+ {I40E_PRT_MNG_FTFT_MASK(0), 7, 32, 0, 0, "PRT_MNG_FTFT_MASK"},
+ {I40E_PRT_MNG_FTFT_LENGTH, 0, 0, 0, 0, "PRT_MNG_FTFT_LENGTH"},
+ {I40E_PRT_MNG_FTFT_DATA(0), 31, 32, 0, 0, "PRT_MNG_FTFT_DATA"},
+ {I40E_GL_PPRS_SPARE, 0, 0, 0, 0, "GL_PPRS_SPARE"},
+ {I40E_PFGEN_STATE, 0, 0, 0, 0, "PFGEN_STATE"},
+ {I40E_PFINT_GPIO_ENA, 0, 0, 0, 0, "PFINT_GPIO_ENA"},
+ {I40E_GLGEN_MISC_SPARE, 0, 0, 0, 0, "GLGEN_MISC_SPARE"},
+ {I40E_GLGEN_GPIO_CTL(0), 29, 4, 0, 0, "GLGEN_GPIO_CTL"},
+ {I40E_GLGEN_LED_CTL, 0, 0, 0, 0, "GLGEN_LED_CTL"},
+ {I40E_GLGEN_GPIO_STAT, 0, 0, 0, 0, "GLGEN_GPIO_STAT"},
+ {I40E_GLGEN_GPIO_TRANSIT, 0, 0, 0, 0, "GLGEN_GPIO_TRANSIT"},
+ {I40E_GLGEN_GPIO_SET, 0, 0, 0, 0, "GLGEN_GPIO_SET"},
+ {I40E_EMPINT_GPIO_ENA, 0, 0, 0, 0, "EMPINT_GPIO_ENA"},
+ {I40E_GLGEN_MSCA(0), 3, 4, 0, 0, "GLGEN_MSCA"},
+ {I40E_GLGEN_MSRWD(0), 3, 4, 0, 0, "GLGEN_MSRWD"},
+ {I40E_GLGEN_I2CPARAMS(0), 3, 4, 0, 0, "GLGEN_I2CPARAMS"},
+ {I40E_GLVFGEN_TIMER, 0, 0, 0, 0, "GLVFGEN_TIMER"},
+ {I40E_GLGEN_MDIO_I2C_SEL(0), 3, 4, 0, 0, "GLGEN_MDIO_I2C_SEL"},
+ {I40E_GLGEN_MDIO_CTRL(0), 3, 4, 0, 0, "GLGEN_MDIO_CTRL"},
+ {I40E_GLGEN_I2CCMD(0), 3, 4, 0, 0, "GLGEN_I2CCMD"},
+ {I40E_PRTMAC_PCS_XAUI_SWAP_A, 0, 0, 0, 0, "PRTMAC_PCS_XAUI_SWAP_A"},
+ {I40E_PRTMAC_PCS_XAUI_SWAP_B, 0, 0, 0, 0, "PRTMAC_PCS_XAUI_SWAP_B"},
+ {I40E_VSIGEN_RTRIG(0), 383, 4, 0, 0, "VSIGEN_RTRIG"},
+ {I40E_VSIGEN_RSTAT(0), 383, 4, 0, 0, "VSIGEN_RSTAT"},
+ {I40E_VPGEN_VFRTRIG(0), 127, 4, 0, 0, "VPGEN_VFRTRIG"},
+ {I40E_VPGEN_VFRSTAT(0), 127, 4, 0, 0, "VPGEN_VFRSTAT"},
+ {I40E_PFGEN_CTRL, 0, 0, 0, 0, "PFGEN_CTRL"},
+ {I40E_PFGEN_DRUN, 0, 0, 0, 0, "PFGEN_DRUN"},
+ {I40E_GLGEN_VFLRSTAT(0), 3, 4, 0, 0, "GLGEN_VFLRSTAT"},
+ {I40E_GL_UFUSE, 0, 0, 0, 0, "GL_UFUSE"},
+ {I40E_GL_GP_FUSE(0), 28, 4, 0, 0, "GL_GP_FUSE"},
+ {I40E_PRTDCB_TETSC_TPB, 0, 0, 0, 0, "PRTDCB_TETSC_TPB"},
+ {I40E_PF_FUNC_RID, 0, 0, 0, 0, "PF_FUNC_RID"},
+ {I40E_PF_PCI_CIAA, 0, 0, 0, 0, "PF_PCI_CIAA"},
+ {I40E_PF_PCI_CIAD, 0, 0, 0, 0, "PF_PCI_CIAD"},
+ {I40E_PFPCI_FACTPS, 0, 0, 0, 0, "PFPCI_FACTPS"},
+ {I40E_PFPCI_ICAUSE, 0, 0, 0, 0, "PFPCI_ICAUSE"},
+ {I40E_PFPCI_IENA, 0, 0, 0, 0, "PFPCI_IENA"},
+ {I40E_PFPCI_VMINDEX, 0, 0, 0, 0, "PFPCI_VMINDEX"},
+ {I40E_PFPCI_VMPEND, 0, 0, 0, 0, "PFPCI_VMPEND"},
+ {I40E_GLPCI_DREVID, 0, 0, 0, 0, "GLPCI_DREVID"},
+ {I40E_GLPCI_BYTCTH, 0, 0, 0, 0, "GLPCI_BYTCTH"},
+ {I40E_GLPCI_BYTCTL, 0, 0, 0, 0, "GLPCI_BYTCTL"},
+ {I40E_GLPCI_GSCL_1, 0, 0, 0, 0, "GLPCI_GSCL_1"},
+ {I40E_GLPCI_GSCL_2, 0, 0, 0, 0, "GLPCI_GSCL_2"},
+ {I40E_GLPCI_GSCL_5_8(0), 3, 4, 0, 0, "GLPCI_GSCL_5_8"},
+ {I40E_GLPCI_GSCN_0_3(0), 3, 4, 0, 0, "GLPCI_GSCN_0_3"},
+ {I40E_GLPCI_PKTCT, 0, 0, 0, 0, "GLPCI_PKTCT"},
+ {I40E_GLPCI_PQ_MAX_USED_SPC, 0, 0, 0, 0, "GLPCI_PQ_MAX_USED_SPC"},
+ {I40E_GLPCI_PM_MUX_PFB, 0, 0, 0, 0, "GLPCI_PM_MUX_PFB"},
+ {I40E_GLPCI_PM_MUX_NPQ, 0, 0, 0, 0, "GLPCI_PM_MUX_NPQ"},
+ {I40E_GLPCI_SPARE_BITS_0, 0, 0, 0, 0, "GLPCI_SPARE_BITS_0"},
+ {I40E_GLPCI_SPARE_BITS_1, 0, 0, 0, 0, "GLPCI_SPARE_BITS_1"},
+ {I40E_GLPCI_CUR_RLAN_ALWD, 0, 0, 0, 0, "GLPCI_CUR_RLAN_ALWD"},
+ {I40E_GLPCI_CUR_TLAN_ALWD, 0, 0, 0, 0, "GLPCI_CUR_TLAN_ALWD"},
+ {I40E_GLPCI_CUR_RXPE_ALWD, 0, 0, 0, 0, "GLPCI_CUR_RXPE_ALWD"},
+ {I40E_GLPCI_CUR_TXPE_ALWD, 0, 0, 0, 0, "GLPCI_CUR_TXPE_ALWD"},
+ {I40E_GLPCI_CUR_PMAT_ALWD, 0, 0, 0, 0, "GLPCI_CUR_PMAT_ALWD"},
+ {I40E_GLPCI_CUR_MNG_ALWD, 0, 0, 0, 0, "GLPCI_CUR_MNG_ALWD"},
+ {I40E_GLPCI_CUR_TDPU_ALWD, 0, 0, 0, 0, "GLPCI_CUR_TDPU_ALWD"},
+ {I40E_GLPCI_CUR_RLAN_RSVD, 0, 0, 0, 0, "GLPCI_CUR_RLAN_RSVD"},
+ {I40E_GLPCI_CUR_TLAN_RSVD, 0, 0, 0, 0, "GLPCI_CUR_TLAN_RSVD"},
+ {I40E_GLPCI_CUR_RXPE_RSVD, 0, 0, 0, 0, "GLPCI_CUR_RXPE_RSVD"},
+ {I40E_GLPCI_CUR_TXPE_RSVD, 0, 0, 0, 0, "GLPCI_CUR_TXPE_RSVD"},
+ {I40E_GLPCI_CUR_PMAT_RSVD, 0, 0, 0, 0, "GLPCI_CUR_PMAT_RSVD"},
+ {I40E_GLPCI_CUR_MNG_RSVD, 0, 0, 0, 0, "GLPCI_CUR_MNG_RSVD"},
+ {I40E_GLPCI_CUR_TDPU_RSVD, 0, 0, 0, 0, "GLPCI_CUR_TDPU_RSVD"},
+ {I40E_PFPCI_VF_FLUSH_DONE1(0), 127, 4, 0, 0, "PFPCI_VF_FLUSH_DONE1"},
+ {I40E_PFPCI_PF_FLUSH_DONE, 0, 0, 0, 0, "PFPCI_PF_FLUSH_DONE"},
+ {I40E_PFPCI_VM_FLUSH_DONE, 0, 0, 0, 0, "PFPCI_VM_FLUSH_DONE"},
+ {I40E_GLPCI_NPQ_CFG, 0, 0, 0, 0, "GLPCI_NPQ_CFG"},
+ {I40E_GLPCI_CUR_CLNT_COMMON, 0, 0, 0, 0, "GLPCI_CUR_CLNT_COMMON"},
+ {I40E_GLPCI_CUR_CLNT_PIPEMON, 0, 0, 0, 0, "GLPCI_CUR_CLNT_PIPEMON"},
+ {I40E_GLPCI_CUR_WATMK_CLNT_COMMON, 0, 0, 0, 0,
+ "GLPCI_CUR_WATMK_CLNT_COMMON"},
+ {I40E_GLPCI_WATMK_CLNT_PIPEMON, 0, 0, 0, 0,
+ "GLPCI_WATMK_CLNT_PIPEMON"},
+ {I40E_GLPCI_WATMK_RLAN_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_RLAN_ALWD"},
+ {I40E_GLPCI_WATMK_TLAN_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_TLAN_ALWD"},
+ {I40E_GLPCI_WATMK_RXPE_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_RXPE_ALWD"},
+ {I40E_GLPCI_WATMK_TXPE_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_TXPE_ALWD"},
+ {I40E_GLPCI_WATMK_PMAT_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_PMAT_ALWD"},
+ {I40E_GLPCI_WATMK_MNG_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_MNG_ALWD"},
+ {I40E_GLPCI_WATMK_TPDU_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_TPDU_ALWD"},
+ {I40E_PRTDCB_TCMSTC(0), 7, 32, 0, 0, "PRTDCB_TCMSTC"},
+ {I40E_PRTDCB_TFMSTC(0), 7, 32, 0, 0, "PRTDCB_TFMSTC"},
+ {I40E_PRTDCB_TDPMC, 0, 0, 0, 0, "PRTDCB_TDPMC"},
+ {I40E_PRTDCB_TCWSTC(0), 7, 32, 0, 0, "PRTDCB_TCWSTC"},
+ {I40E_PRTDCB_TCPMC, 0, 0, 0, 0, "PRTDCB_TCPMC"},
+ {I40E_GL_TUPM_SPARE, 0, 0, 0, 0, "GL_TUPM_SPARE"},
+ {I40E_GLPEOC_CACHESIZE, 0, 0, 0, 0, "GLPEOC_CACHESIZE"},
+ {I40E_GLPBLOC_CACHESIZE, 0, 0, 0, 0, "GLPBLOC_CACHESIZE"},
+ {I40E_GLFOC_CACHESIZE, 0, 0, 0, 0, "GLFOC_CACHESIZE"},
+ {I40E_PRTRPB_DHW(0), 7, 32, 0, 0, "PRTRPB_DHW"},
+ {I40E_PRTRPB_DLW(0), 7, 32, 0, 0, "PRTRPB_DLW"},
+ {I40E_PRTRPB_DPS(0), 7, 32, 0, 0, "PRTRPB_DPS"},
+ {I40E_PRTRPB_SHT(0), 7, 32, 0, 0, "PRTRPB_SHT"},
+ {I40E_PRTRPB_SHW, 0, 0, 0, 0, "PRTRPB_SHW"},
+ {I40E_PRTRPB_SLT(0), 7, 32, 0, 0, "PRTRPB_SLT"},
+ {I40E_PRTRPB_SLW, 0, 0, 0, 0, "PRTRPB_SLW"},
+ {I40E_PRTRPB_SPS, 0, 0, 0, 0, "PRTRPB_SPS"},
+ {I40E_GLRPB_DPSS, 0, 0, 0, 0, "GLRPB_DPSS"},
+ {I40E_GLRPB_GHW, 0, 0, 0, 0, "GLRPB_GHW"},
+ {I40E_GLRPB_GLW, 0, 0, 0, 0, "GLRPB_GLW"},
+ {I40E_GLRPB_PHW, 0, 0, 0, 0, "GLRPB_PHW"},
+ {I40E_GLRPB_PLW, 0, 0, 0, 0, "GLRPB_PLW"},
+ {I40E_PRTDCB_TETSC_TCB, 0, 0, 0, 0, "PRTDCB_TETSC_TCB"},
+ {I40E_GLNVM_ULD, 0, 0, 0, 0, "GLNVM_ULD"},
+ {I40E_GLNVM_ULD, 0, 0, 0, 0, "GLNVM_ULD"},
+ {I40E_GLNVM_PROTCSR(0), 59, 4, 0, 0, "GLNVM_PROTCSR"},
+ {I40E_GLNVM_GENS, 0, 0, 0, 0, "GLNVM_GENS"},
+ {I40E_GLNVM_FLASHID, 0, 0, 0, 0, "GLNVM_FLASHID"},
+ {I40E_GLNVM_FLA, 0, 0, 0, 0, "GLNVM_FLA"},
+ {I40E_GLNVM_FLA, 0, 0, 0, 0, "GLNVM_FLA"},
+ {I40E_GLNVM_SRCTL, 0, 0, 0, 0, "GLNVM_SRCTL"},
+ {I40E_GLNVM_SRDATA, 0, 0, 0, 0, "GLNVM_SRDATA"},
+ {I40E_GLGEN_STAT, 0, 0, 0, 0, "GLGEN_STAT"},
+ {I40E_GL_MNG_HWARB_CTRL, 0, 0, 0, 0, "GL_MNG_HWARB_CTRL"},
+ {I40E_GL_MNG_FWSM, 0, 0, 0, 0, "GL_MNG_FWSM"},
+ {I40E_GLNVM_ALTIMERS, 0, 0, 0, 0, "GLNVM_ALTIMERS"},
+ {I40E_GLNVM_ULT, 0, 0, 0, 0, "GLNVM_ULT"},
+ {I40E_MEM_INIT_DONE_STAT, 0, 0, 0, 0, "MEM_INIT_DONE_STAT"},
+ {I40E_GLNVM_AL_REQ, 0, 0, 0, 0, "GLNVM_AL_REQ"},
+ {I40E_MNGSB_MSGCTL, 0, 0, 0, 0, "MNGSB_MSGCTL"},
+ {I40E_MNGSB_RSPCTL, 0, 0, 0, 0, "MNGSB_RSPCTL"},
+ {I40E_MNGSB_DADD, 0, 0, 0, 0, "MNGSB_DADD"},
+ {I40E_MNGSB_DCNT, 0, 0, 0, 0, "MNGSB_DCNT"},
+ {I40E_MNGSB_FDCS, 0, 0, 0, 0, "MNGSB_FDCS"},
+ {I40E_MNGSB_FDS, 0, 0, 0, 0, "MNGSB_FDS"},
+ {I40E_MNGSB_FDCRC, 0, 0, 0, 0, "MNGSB_FDCRC"},
+ {I40E_MNGSB_WHDR0, 0, 0, 0, 0, "MNGSB_WHDR0"},
+ {I40E_MNGSB_WHDR1, 0, 0, 0, 0, "MNGSB_WHDR1"},
+ {I40E_MNGSB_WHDR2, 0, 0, 0, 0, "MNGSB_WHDR2"},
+ {I40E_MNGSB_WDATA, 0, 0, 0, 0, "MNGSB_WDATA"},
+ {I40E_MNGSB_RHDR0, 0, 0, 0, 0, "MNGSB_RHDR0"},
+ {I40E_MNGSB_RDATA, 0, 0, 0, 0, "MNGSB_RDATA"},
+ {I40E_PFPM_APM, 0, 0, 0, 0, "PFPM_APM"},
+ {I40E_PRTGEN_STATUS, 0, 0, 0, 0, "PRTGEN_STATUS"},
+ {I40E_PRTGEN_CNF, 0, 0, 0, 0, "PRTGEN_CNF"},
+ {I40E_PRTPM_GC, 0, 0, 0, 0, "PRTPM_GC"},
+ {I40E_PRTGEN_CNF2, 0, 0, 0, 0, "PRTGEN_CNF2"},
+ {I40E_GLGEN_RSTCTL, 0, 0, 0, 0, "GLGEN_RSTCTL"},
+ {I40E_GLGEN_CLKSTAT, 0, 0, 0, 0, "GLGEN_CLKSTAT"},
+ {I40E_GLGEN_RSTAT, 0, 0, 0, 0, "GLGEN_RSTAT"},
+ {I40E_GLGEN_RTRIG, 0, 0, 0, 0, "GLGEN_RTRIG"},
+ {I40E_GLGEN_PME_TO, 0, 0, 0, 0, "GLGEN_PME_TO"},
+ {I40E_GLGEN_CAR_DEBUG, 0, 0, 0, 0, "GLGEN_CAR_DEBUG"},
+ {I40E_PFPCI_CNF, 0, 0, 0, 0, "PFPCI_CNF"},
+ {I40E_PFPCI_DEVID, 0, 0, 0, 0, "PFPCI_DEVID"},
+ {I40E_PFPCI_SUBSYSID, 0, 0, 0, 0, "PFPCI_SUBSYSID"},
+ {I40E_PFPCI_FUNC2, 0, 0, 0, 0, "PFPCI_FUNC2"},
+ {I40E_PFPCI_FUNC, 0, 0, 0, 0, "PFPCI_FUNC"},
+ {I40E_PFPCI_STATUS1, 0, 0, 0, 0, "PFPCI_STATUS1"},
+ {I40E_PFPCI_PM, 0, 0, 0, 0, "PFPCI_PM"},
+ {I40E_PFPCI_CLASS, 0, 0, 0, 0, "PFPCI_CLASS"},
+ {I40E_GLTPH_CTRL, 0, 0, 0, 0, "GLTPH_CTRL"},
+ {I40E_GLPCI_LBARCTRL, 0, 0, 0, 0, "GLPCI_LBARCTRL"},
+ {I40E_GLPCI_SUBVENID, 0, 0, 0, 0, "GLPCI_SUBVENID"},
+ {I40E_GLPCI_PWRDATA, 0, 0, 0, 0, "GLPCI_PWRDATA"},
+ {I40E_GLPCI_CNF2, 0, 0, 0, 0, "GLPCI_CNF2"},
+ {I40E_GLPCI_SERL, 0, 0, 0, 0, "GLPCI_SERL"},
+ {I40E_GLPCI_SERH, 0, 0, 0, 0, "GLPCI_SERH"},
+ {I40E_GLPCI_CAPCTRL, 0, 0, 0, 0, "GLPCI_CAPCTRL"},
+ {I40E_GLPCI_CAPSUP, 0, 0, 0, 0, "GLPCI_CAPSUP"},
+ {I40E_GLPCI_LINKCAP, 0, 0, 0, 0, "GLPCI_LINKCAP"},
+ {I40E_GLPCI_PMSUP, 0, 0, 0, 0, "GLPCI_PMSUP"},
+ {I40E_GLPCI_REVID, 0, 0, 0, 0, "GLPCI_REVID"},
+ {I40E_GLPCI_VFSUP, 0, 0, 0, 0, "GLPCI_VFSUP"},
+ {I40E_GLPCI_CNF, 0, 0, 0, 0, "GLPCI_CNF"},
+ {I40E_GLPCI_UPADD, 0, 0, 0, 0, "GLPCI_UPADD"},
+ {I40E_GLPCI_PCIERR, 0, 0, 0, 0, "GLPCI_PCIERR"},
+ {I40E_GLPCI_VENDORID, 0, 0, 0, 0, "GLPCI_VENDORID"},
+ {I40E_GL_UFUSE_SOC, 0, 0, 0, 0, "GL_UFUSE_SOC"},
+ {I40E_PFHMC_SDCMD, 0, 0, 0, 0, "PFHMC_SDCMD"},
+ {I40E_PFHMC_SDDATALOW, 0, 0, 0, 0, "PFHMC_SDDATALOW"},
+ {I40E_PFHMC_SDDATAHIGH, 0, 0, 0, 0, "PFHMC_SDDATAHIGH"},
+ {I40E_PFHMC_PDINV, 0, 0, 0, 0, "PFHMC_PDINV"},
+ {I40E_PFHMC_ERRORINFO, 0, 0, 0, 0, "PFHMC_ERRORINFO"},
+ {I40E_PFHMC_ERRORDATA, 0, 0, 0, 0, "PFHMC_ERRORDATA"},
+ {I40E_GLHMC_SDPART(0), 15, 4, 0, 0, "GLHMC_SDPART"},
+ {I40E_GLHMC_PFPESDPART(0), 15, 4, 0, 0, "GLHMC_PFPESDPART"},
+ {I40E_GLHMC_PFASSIGN(0), 15, 4, 0, 0, "GLHMC_PFASSIGN"},
+ {I40E_GLHMC_LANTXOBJSZ, 0, 0, 0, 0, "GLHMC_LANTXOBJSZ"},
+ {I40E_GLHMC_LANQMAX, 0, 0, 0, 0, "GLHMC_LANQMAX"},
+ {I40E_GLHMC_LANRXOBJSZ, 0, 0, 0, 0, "GLHMC_LANRXOBJSZ"},
+ {I40E_GLHMC_FCOEDDPOBJSZ, 0, 0, 0, 0, "GLHMC_FCOEDDPOBJSZ"},
+ {I40E_GLHMC_FCOEMAX, 0, 0, 0, 0, "GLHMC_FCOEMAX"},
+ {I40E_GLHMC_FCOEFOBJSZ, 0, 0, 0, 0, "GLHMC_FCOEFOBJSZ"},
+ {I40E_GLHMC_PEQPOBJSZ, 0, 0, 0, 0, "GLHMC_PEQPOBJSZ"},
+ {I40E_GLHMC_PECQOBJSZ, 0, 0, 0, 0, "GLHMC_PECQOBJSZ"},
+ {I40E_GLHMC_PESRQOBJSZ, 0, 0, 0, 0, "GLHMC_PESRQOBJSZ"},
+ {I40E_GLHMC_PESRQMAX, 0, 0, 0, 0, "GLHMC_PESRQMAX"},
+ {I40E_GLHMC_PEHTEOBJSZ, 0, 0, 0, 0, "GLHMC_PEHTEOBJSZ"},
+ {I40E_GLHMC_PEHTMAX, 0, 0, 0, 0, "GLHMC_PEHTMAX"},
+ {I40E_GLHMC_PEARPOBJSZ, 0, 0, 0, 0, "GLHMC_PEARPOBJSZ"},
+ {I40E_GLHMC_PEARPMAX, 0, 0, 0, 0, "GLHMC_PEARPMAX"},
+ {I40E_GLHMC_PEMROBJSZ, 0, 0, 0, 0, "GLHMC_PEMROBJSZ"},
+ {I40E_GLHMC_PEMRMAX, 0, 0, 0, 0, "GLHMC_PEMRMAX"},
+ {I40E_GLHMC_PEXFOBJSZ, 0, 0, 0, 0, "GLHMC_PEXFOBJSZ"},
+ {I40E_GLHMC_PEXFMAX, 0, 0, 0, 0, "GLHMC_PEXFMAX"},
+ {I40E_GLHMC_PEXFFLMAX, 0, 0, 0, 0, "GLHMC_PEXFFLMAX"},
+ {I40E_GLHMC_PEQ1OBJSZ, 0, 0, 0, 0, "GLHMC_PEQ1OBJSZ"},
+ {I40E_GLHMC_PEQ1MAX, 0, 0, 0, 0, "GLHMC_PEQ1MAX"},
+ {I40E_GLHMC_PEQ1FLMAX, 0, 0, 0, 0, "GLHMC_PEQ1FLMAX"},
+ {I40E_GLHMC_FSIMCOBJSZ, 0, 0, 0, 0, "GLHMC_FSIMCOBJSZ"},
+ {I40E_GLHMC_FSIMCMAX, 0, 0, 0, 0, "GLHMC_FSIMCMAX"},
+ {I40E_GLHMC_FSIAVOBJSZ, 0, 0, 0, 0, "GLHMC_FSIAVOBJSZ"},
+ {I40E_GLHMC_FSIAVMAX, 0, 0, 0, 0, "GLHMC_FSIAVMAX"},
+ {I40E_GLHMC_PEPBLMAX, 0, 0, 0, 0, "GLHMC_PEPBLMAX"},
+ {I40E_GLHMC_PETIMEROBJSZ, 0, 0, 0, 0, "GLHMC_PETIMEROBJSZ"},
+ {I40E_GLHMC_PETIMERMAX, 0, 0, 0, 0, "GLHMC_PETIMERMAX"},
+ {I40E_GLHMC_FCOEFMAX, 0, 0, 0, 0, "GLHMC_FCOEFMAX"},
+ {I40E_GLHMC_PEPFFIRSTSD, 0, 0, 0, 0, "GLHMC_PEPFFIRSTSD"},
+ {I40E_GLHMC_DBQPMAX, 0, 0, 0, 0, "GLHMC_DBQPMAX"},
+ {I40E_GLHMC_DBCQMAX, 0, 0, 0, 0, "GLHMC_DBCQMAX"},
+ {I40E_GLHMC_PEQPBASE(0), 15, 4, 0, 0, "GLHMC_PEQPBASE"},
+ {I40E_GLHMC_PEQPCNT(0), 15, 4, 0, 0, "GLHMC_PEQPCNT"},
+ {I40E_GLHMC_PECQBASE(0), 15, 4, 0, 0, "GLHMC_PECQBASE"},
+ {I40E_GLHMC_PECQCNT(0), 15, 4, 0, 0, "GLHMC_PECQCNT"},
+ {I40E_GLHMC_PESRQBASE(0), 15, 4, 0, 0, "GLHMC_PESRQBASE"},
+ {I40E_GLHMC_PESRQCNT(0), 15, 4, 0, 0, "GLHMC_PESRQCNT"},
+ {I40E_GLHMC_PEHTEBASE(0), 15, 4, 0, 0, "GLHMC_PEHTEBASE"},
+ {I40E_GLHMC_PEHTCNT(0), 15, 4, 0, 0, "GLHMC_PEHTCNT"},
+ {I40E_GLHMC_PEARPBASE(0), 15, 4, 0, 0, "GLHMC_PEARPBASE"},
+ {I40E_GLHMC_PEARPCNT(0), 15, 4, 0, 0, "GLHMC_PEARPCNT"},
+ {I40E_GLHMC_APBVTINUSEBASE(0), 15, 4, 0, 0, "GLHMC_APBVTINUSEBASE"},
+ {I40E_GLHMC_PEMRBASE(0), 15, 4, 0, 0, "GLHMC_PEMRBASE"},
+ {I40E_GLHMC_PEMRCNT(0), 15, 4, 0, 0, "GLHMC_PEMRCNT"},
+ {I40E_GLHMC_PEXFBASE(0), 15, 4, 0, 0, "GLHMC_PEXFBASE"},
+ {I40E_GLHMC_PEXFCNT(0), 15, 4, 0, 0, "GLHMC_PEXFCNT"},
+ {I40E_GLHMC_PEXFFLBASE(0), 15, 4, 0, 0, "GLHMC_PEXFFLBASE"},
+ {I40E_GLHMC_PEQ1BASE(0), 15, 4, 0, 0, "GLHMC_PEQ1BASE"},
+ {I40E_GLHMC_PEQ1CNT(0), 15, 4, 0, 0, "GLHMC_PEQ1CNT"},
+ {I40E_GLHMC_PEQ1FLBASE(0), 15, 4, 0, 0, "GLHMC_PEQ1FLBASE"},
+ {I40E_GLHMC_FSIAVBASE(0), 15, 4, 0, 0, "GLHMC_FSIAVBASE"},
+ {I40E_GLHMC_FSIAVCNT(0), 15, 4, 0, 0, "GLHMC_FSIAVCNT"},
+ {I40E_GLHMC_PEPBLBASE(0), 15, 4, 0, 0, "GLHMC_PEPBLBASE"},
+ {I40E_GLHMC_PEPBLCNT(0), 15, 4, 0, 0, "GLHMC_PEPBLCNT"},
+ {I40E_GLHMC_PETIMERBASE(0), 15, 4, 0, 0, "GLHMC_PETIMERBASE"},
+ {I40E_GLHMC_PETIMERCNT(0), 15, 4, 0, 0, "GLHMC_PETIMERCNT"},
+ {I40E_GLHMC_FSIMCBASE(0), 15, 4, 0, 0, "GLHMC_FSIMCBASE"},
+ {I40E_GLHMC_FSIMCCNT(0), 15, 4, 0, 0, "GLHMC_FSIMCCNT"},
+ {I40E_GLHMC_LANTXBASE(0), 15, 4, 0, 0, "GLHMC_LANTXBASE"},
+ {I40E_GLHMC_LANTXCNT(0), 15, 4, 0, 0, "GLHMC_LANTXCNT"},
+ {I40E_GLHMC_LANRXBASE(0), 15, 4, 0, 0, "GLHMC_LANRXBASE"},
+ {I40E_GLHMC_LANRXCNT(0), 15, 4, 0, 0, "GLHMC_LANRXCNT"},
+ {I40E_GLHMC_FCOEDDPBASE(0), 15, 4, 0, 0, "GLHMC_FCOEDDPBASE"},
+ {I40E_GLHMC_FCOEDDPCNT(0), 15, 4, 0, 0, "GLHMC_FCOEDDPCNT"},
+ {I40E_GLHMC_FCOEFBASE(0), 15, 4, 0, 0, "GLHMC_FCOEFBASE"},
+ {I40E_GLHMC_FCOEFCNT(0), 15, 4, 0, 0, "GLHMC_FCOEFCNT"},
+ {I40E_GLHMC_VFPDINV(0), 31, 4, 0, 0, "GLHMC_VFPDINV"},
+ {I40E_GLHMC_VFSDPART(0), 31, 4, 0, 0, "GLHMC_VFSDPART"},
+ {I40E_GLHMC_VFPEQPBASE(0), 31, 4, 0, 0, "GLHMC_VFPEQPBASE"},
+ {I40E_GLHMC_VFPEQPCNT(0), 31, 4, 0, 0, "GLHMC_VFPEQPCNT"},
+ {I40E_GLHMC_VFPECQBASE(0), 31, 4, 0, 0, "GLHMC_VFPECQBASE"},
+ {I40E_GLHMC_VFPECQCNT(0), 31, 4, 0, 0, "GLHMC_VFPECQCNT"},
+ {I40E_GLHMC_VFPESRQBASE(0), 31, 4, 0, 0, "GLHMC_VFPESRQBASE"},
+ {I40E_GLHMC_VFPESRQCNT(0), 31, 4, 0, 0, "GLHMC_VFPESRQCNT"},
+ {I40E_GLHMC_VFPEHTEBASE(0), 31, 4, 0, 0, "GLHMC_VFPEHTEBASE"},
+ {I40E_GLHMC_VFPEHTCNT(0), 31, 4, 0, 0, "GLHMC_VFPEHTCNT"},
+ {I40E_GLHMC_VFPEARPBASE(0), 31, 4, 0, 0, "GLHMC_VFPEARPBASE"},
+ {I40E_GLHMC_VFPEARPCNT(0), 31, 4, 0, 0, "GLHMC_VFPEARPCNT"},
+ {I40E_GLHMC_VFAPBVTINUSEBASE(0), 31, 4, 0, 0, "GLHMC_VFAPBVTINUSEBASE"},
+ {I40E_GLHMC_VFPEMRBASE(0), 31, 4, 0, 0, "GLHMC_VFPEMRBASE"},
+ {I40E_GLHMC_VFPEMRCNT(0), 31, 4, 0, 0, "GLHMC_VFPEMRCNT"},
+ {I40E_GLHMC_VFPEXFBASE(0), 31, 4, 0, 0, "GLHMC_VFPEXFBASE"},
+ {I40E_GLHMC_VFPEXFCNT(0), 31, 4, 0, 0, "GLHMC_VFPEXFCNT"},
+ {I40E_GLHMC_VFPEXFFLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEXFFLBASE"},
+ {I40E_GLHMC_VFPEQ1BASE(0), 31, 4, 0, 0, "GLHMC_VFPEQ1BASE"},
+ {I40E_GLHMC_VFPEQ1CNT(0), 31, 4, 0, 0, "GLHMC_VFPEQ1CNT"},
+ {I40E_GLHMC_VFPEQ1FLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEQ1FLBASE"},
+ {I40E_GLHMC_VFFSIAVBASE(0), 31, 4, 0, 0, "GLHMC_VFFSIAVBASE"},
+ {I40E_GLHMC_VFFSIAVCNT(0), 31, 4, 0, 0, "GLHMC_VFFSIAVCNT"},
+ {I40E_GLHMC_VFPEPBLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEPBLBASE"},
+ {I40E_GLHMC_VFPEPBLCNT(0), 31, 4, 0, 0, "GLHMC_VFPEPBLCNT"},
+ {I40E_GLHMC_VFPETIMERBASE(0), 31, 4, 0, 0, "GLHMC_VFPETIMERBASE"},
+ {I40E_GLHMC_VFPETIMERCNT(0), 31, 4, 0, 0, "GLHMC_VFPETIMERCNT"},
+ {I40E_GLPDOC_CACHESIZE, 0, 0, 0, 0, "GLPDOC_CACHESIZE"},
+ {I40E_QTX_HEAD(0), 1535, 4, 0, 0, "QTX_HEAD"},
+ {I40E_VP_MDET_TX(0), 127, 4, 0, 0, "VP_MDET_TX"},
+ {I40E_PF_MDET_TX, 0, 0, 0, 0, "PF_MDET_TX"},
+ {I40E_GL_MDET_TX, 0, 0, 0, 0, "GL_MDET_TX"},
+ {I40E_GL_TLAN_SPARE, 0, 0, 0, 0, "GL_TLAN_SPARE"},
+ {I40E_GLLAN_TXPRE_QDIS(0), 11, 4, 0, 0, "GLLAN_TXPRE_QDIS"},
+ {I40E_QTX_ENA(0), 1535, 4, 0, 0, "QTX_ENA"},
+ {I40E_QTX_CTL(0), 1535, 4, 0, 0, "QTX_CTL"},
+ {I40E_QTX_TAIL(0), 1535, 4, 0, 0, "QTX_TAIL"},
+ {I40E_PFCM_LAN_ERRINFO, 0, 0, 0, 0, "PFCM_LAN_ERRINFO"},
+ {I40E_PFCM_LAN_ERRDATA, 0, 0, 0, 0, "PFCM_LAN_ERRDATA"},
+ {I40E_PFCM_LANCTXDATA(0), 3, 128, 0, 0, "PFCM_LANCTXDATA"},
+ {I40E_PFCM_LANCTXCTL, 0, 0, 0, 0, "PFCM_LANCTXCTL"},
+ {I40E_PFCM_LANCTXSTAT, 0, 0, 0, 0, "PFCM_LANCTXSTAT"},
+ {I40E_GLCM_LAN_CACHESIZE, 0, 0, 0, 0, "GLCM_LAN_CACHESIZE"},
+ {I40E_QRX_ENA(0), 1535, 4, 0, 0, "QRX_ENA"},
+ {I40E_PRTDCB_RETSTCC(0), 7, 32, 0, 0, "PRTDCB_RETSTCC"},
+ {I40E_PRTDCB_RPPMC, 0, 0, 0, 0, "PRTDCB_RPPMC"},
+ {I40E_PRTDCB_RETSC, 0, 0, 0, 0, "PRTDCB_RETSC"},
+ {I40E_PRTDCB_RUPTQ(0), 7, 32, 0, 0, "PRTDCB_RUPTQ"},
+ {I40E_GLDCB_RUPTI, 0, 0, 0, 0, "GLDCB_RUPTI"},
+ {I40E_QRX_TAIL(0), 1535, 4, 0, 0, "QRX_TAIL"},
+ {I40E_VP_MDET_RX(0), 127, 4, 0, 0, "VP_MDET_RX"},
+ {I40E_PF_MDET_RX, 0, 0, 0, 0, "PF_MDET_RX"},
+ {I40E_GLLAN_RCTL_0, 0, 0, 0, 0, "GLLAN_RCTL_0"},
+ {I40E_GL_MDET_RX, 0, 0, 0, 0, "GL_MDET_RX"},
+ {I40E_VFPE_CQARM(0), 127, 4, 0, 0, "VFPE_CQARM"},
+ {I40E_VFPE_CQACK(0), 127, 4, 0, 0, "VFPE_CQACK"},
+ {I40E_VFPE_AEQALLOC(0), 127, 4, 0, 0, "VFPE_AEQALLOC"},
+ {I40E_PFPE_CQARM, 0, 0, 0, 0, "PFPE_CQARM"},
+ {I40E_PFPE_CQACK, 0, 0, 0, 0, "PFPE_CQACK"},
+ {I40E_PFPE_AEQALLOC, 0, 0, 0, 0, "PFPE_AEQALLOC"},
+ {I40E_GLHMC_DBCQPART(0), 15, 4, 0, 0, "GLHMC_DBCQPART"},
+ {I40E_GLHMC_CEQPART(0), 15, 4, 0, 0, "GLHMC_CEQPART"},
+ {I40E_GLPE_PFCQEDROPCNT(0), 15, 4, 0, 0, "GLPE_PFCQEDROPCNT"},
+ {I40E_GLPE_PFCEQEDROPCNT(0), 15, 4, 0, 0, "GLPE_PFCEQEDROPCNT"},
+ {I40E_GLPE_PFAEQEDROPCNT(0), 15, 4, 0, 0, "GLPE_PFAEQEDROPCNT"},
+ {I40E_GLHMC_VFDBCQPART(0), 31, 4, 0, 0, "GLHMC_VFDBCQPART"},
+ {I40E_GLHMC_VFCEQPART(0), 31, 4, 0, 0, "GLHMC_VFCEQPART"},
+ {I40E_GLPE_VFCQEDROPCNT(0), 31, 4, 0, 0, "GLPE_VFCQEDROPCNT"},
+ {I40E_GLPE_VFCEQEDROPCNT(0), 31, 4, 0, 0, "GLPE_VFCEQEDROPCNT"},
+ {I40E_GLPE_VFAEQEDROPCNT(0), 31, 4, 0, 0, "GLPE_VFAEQEDROPCNT"},
+ {I40E_VFPE_WQEALLOC(0), 127, 4, 0, 0, "VFPE_WQEALLOC"},
+ {I40E_VFCM_PE_ERRINFO1(0), 127, 4, 0, 0, "VFCM_PE_ERRINFO1"},
+ {I40E_VFCM_PE_ERRDATA1(0), 127, 4, 0, 0, "VFCM_PE_ERRDATA1"},
+ {I40E_PFPE_WQEALLOC, 0, 0, 0, 0, "PFPE_WQEALLOC"},
+ {I40E_PFCM_PE_ERRINFO, 0, 0, 0, 0, "PFCM_PE_ERRINFO"},
+ {I40E_PFCM_PE_ERRDATA, 0, 0, 0, 0, "PFCM_PE_ERRDATA"},
+ {I40E_GLHMC_DBQPPART(0), 15, 4, 0, 0, "GLHMC_DBQPPART"},
+ {I40E_GLHMC_VFDBQPPART(0), 31, 4, 0, 0, "GLHMC_VFDBQPPART"},
+ {I40E_GLCM_PE_CACHESIZE, 0, 0, 0, 0, "GLCM_PE_CACHESIZE"},
+ {I40E_PFGEN_PORTNUM, 0, 0, 0, 0, "PFGEN_PORTNUM"},
+ {I40E_PF_VT_PFALLOC, 0, 0, 0, 0, "PF_VT_PFALLOC"},
+ {I40E_PRTDCB_TC2PFC, 0, 0, 0, 0, "PRTDCB_TC2PFC"},
+ {I40E_PRTDCB_RUP2TC, 0, 0, 0, 0, "PRTDCB_RUP2TC"},
+ {I40E_GLGEN_PCIFCNCNT, 0, 0, 0, 0, "GLGEN_PCIFCNCNT"},
+ {I40E_PRTDCB_RUP, 0, 0, 0, 0, "PRTDCB_RUP"},
+ {I40E_PRT_L2TAGSEN, 0, 0, 0, 0, "PRT_L2TAGSEN"},
+ {I40E_PRTGL_SAL, 0, 0, 0, 0, "PRTGL_SAL"},
+ {I40E_PRTGL_SAH, 0, 0, 0, 0, "PRTGL_SAH"},
+ {I40E_PRTDCB_MFLCN, 0, 0, 0, 0, "PRTDCB_MFLCN"},
+ {I40E_PRTMAC_LINK_DOWN_COUNTER, 0, 0, 0, 0,
+ "PRTMAC_LINK_DOWN_COUNTER"},
+ {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE"},
+ {I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE"},
+ {I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_ENABLE_GCP"},
+ {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1"},
+ {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2"},
+ {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1"},
+ {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2"},
+ {I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_ENABLE_GPP"},
+ {I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_ENABLE_PPP"},
+ {I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL"},
+ {I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(0), 8, 16, 0, 0,
+ "PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA"},
+ {I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(0), 8, 16, 0, 0,
+ "PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER"},
+ {I40E_PRTMAC_HSEC_CTL_TX_SA_PART1, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_TX_SA_PART1"},
+ {I40E_PRTMAC_HSEC_CTL_TX_SA_PART2, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_TX_SA_PART2"},
+ {I40E_PRTTSYN_INC_L, 0, 0, 0, 0, "PRTTSYN_INC_L"},
+ {I40E_PRTTSYN_INC_H, 0, 0, 0, 0, "PRTTSYN_INC_H"},
+ {I40E_PRTTSYN_EVNT_L(0), 1, 32, 0, 0, "PRTTSYN_EVNT_L"},
+ {I40E_PRTTSYN_EVNT_H(0), 1, 32, 0, 0, "PRTTSYN_EVNT_H"},
+ {I40E_PRTTSYN_TIME_L, 0, 0, 0, 0, "PRTTSYN_TIME_L"},
+ {I40E_PRTTSYN_TIME_H, 0, 0, 0, 0, "PRTTSYN_TIME_H"},
+ {I40E_PRTTSYN_TGT_L(0), 1, 32, 0, 0, "PRTTSYN_TGT_L"},
+ {I40E_PRTTSYN_TGT_H(0), 1, 32, 0, 0, "PRTTSYN_TGT_H"},
+ {I40E_PRTTSYN_TXTIME_L, 0, 0, 0, 0, "PRTTSYN_TXTIME_L"},
+ {I40E_PRTTSYN_TXTIME_H, 0, 0, 0, 0, "PRTTSYN_TXTIME_H"},
+ {I40E_PRTTSYN_CTL0, 0, 0, 0, 0, "PRTTSYN_CTL0"},
+ {I40E_PRTTSYN_STAT_0, 0, 0, 0, 0, "PRTTSYN_STAT_0"},
+ {I40E_PRTTSYN_CLKO(0), 1, 32, 0, 0, "PRTTSYN_CLKO"},
+ {I40E_PRTTSYN_ADJ, 0, 0, 0, 0, "PRTTSYN_ADJ"},
+ {I40E_PRTTSYN_AUX_0(0), 1, 32, 0, 0, "PRTTSYN_AUX_0"},
+ {I40E_PRTTSYN_AUX_1(0), 1, 32, 0, 0, "PRTTSYN_AUX_1"},
+ {I40E_PRTPM_EEE_STAT, 0, 0, 0, 0, "PRTPM_EEE_STAT"},
+ {I40E_PRTPM_EEER, 0, 0, 0, 0, "PRTPM_EEER"},
+ {I40E_PRTPM_EEEC, 0, 0, 0, 0, "PRTPM_EEEC"},
+ {I40E_PRTPM_RLPIC, 0, 0, 0, 0, "PRTPM_RLPIC"},
+ {I40E_PRTPM_TLPIC, 0, 0, 0, 0, "PRTPM_TLPIC"},
+ {I40E_PRTPM_EEETXC, 0, 0, 0, 0, "PRTPM_EEETXC"},
+ {I40E_PRTPM_EEEFWD, 0, 0, 0, 0, "PRTPM_EEEFWD"},
+ {I40E_PRTPM_SAL(0), 3, 32, 0, 0, "PRTPM_SAL"},
+ {I40E_PRTPM_SAH(0), 3, 32, 0, 0, "PRTPM_SAH"},
+ {I40E_PRTDCB_TFCS, 0, 0, 0, 0, "PRTDCB_TFCS"},
+ {I40E_PRTDCB_FCTTVN(0), 3, 32, 0, 0, "PRTDCB_FCTTVN"},
+ {I40E_PRTDCB_FCRTV, 0, 0, 0, 0, "PRTDCB_FCRTV"},
+ {I40E_PRTDCB_FCCFG, 0, 0, 0, 0, "PRTDCB_FCCFG"},
+ {I40E_PRTDCB_TPFCTS(0), 7, 32, 0, 0, "PRTDCB_TPFCTS"},
+ {I40E_VFQF_HLUT1(0, 0), 15, 1024, 127, 4, "VFQF_HLUT1"},
+ {I40E_VSIQF_HLUT(0, 0), 15, 2048, 383, 4, "VSIQF_HLUT"},
+ {I40E_VFQF_HKEY1(0, 0), 12, 1024, 127, 4, "VFQF_HKEY1"},
+ {I40E_VFQF_HREGION1(0, 0), 7, 1024, 127, 4, "VFQF_HREGION1"},
+ {I40E_VFQF_HENA1(0, 0), 1, 1024, 127, 4, "VFQF_HENA1"},
+ {I40E_PFQF_HLUT(0), 127, 128, 0, 0, "PFQF_HLUT"},
+ {I40E_X722_PFQF_HLUT(0), 127, 128, 0, 0, "X722_PFQF_HLUT"},
+ {I40E_PFQF_CTL_1, 0, 0, 0, 0, "PFQF_CTL_1"},
+ {I40E_PFQF_FDSTAT, 0, 0, 0, 0, "PFQF_FDSTAT"},
+ {I40E_PRT_MNG_MIPAF6(0), 15, 32, 0, 0, "PRT_MNG_MIPAF6"},
+ {I40E_PRT_MNG_MFUTP(0), 15, 32, 0, 0, "PRT_MNG_MFUTP"},
+ {I40E_PRTQF_FLX_PIT(0), 8, 32, 0, 0, "PRTQF_FLX_PIT"},
+ {I40E_PRT_MNG_MAVTV(0), 7, 32, 0, 0, "PRT_MNG_MAVTV"},
+ {I40E_PRT_MNG_MDEF(0), 7, 32, 0, 0, "PRT_MNG_MDEF"},
+ {I40E_PRT_MNG_MDEF_EXT(0), 7, 32, 0, 0, "PRT_MNG_MDEF_EXT"},
+ {I40E_PRT_MNG_MIPAF4(0), 3, 32, 0, 0, "PRT_MNG_MIPAF4"},
+ {I40E_PRT_MNG_MMAH(0), 3, 32, 0, 0, "PRT_MNG_MMAH"},
+ {I40E_PRT_MNG_MMAL(0), 3, 32, 0, 0, "PRT_MNG_MMAL"},
+ {I40E_PRT_MNG_MDEFVSI(0), 3, 32, 0, 0, "PRT_MNG_MDEFVSI"},
+ {I40E_PRT_MNG_METF(0), 3, 32, 0, 0, "PRT_MNG_METF"},
+ {I40E_PRT_MNG_MANC, 0, 0, 0, 0, "PRT_MNG_MANC"},
+ {I40E_PRT_MNG_MNGONLY, 0, 0, 0, 0, "PRT_MNG_MNGONLY"},
+ {I40E_PRT_MNG_MSFM, 0, 0, 0, 0, "PRT_MNG_MSFM"},
+ {I40E_GLQF_APBVT(0), 2047, 4, 0, 0, "GLQF_APBVT"},
+ {I40E_GLQF_PCNT(0), 511, 4, 0, 0, "GLQF_PCNT"},
+ {I40E_GLQF_FD_PCTYPES(0), 63, 4, 0, 0, "GLQF_FD_PCTYPES"},
+ {I40E_GLQF_ORT(0), 63, 4, 0, 0, "GLQF_ORT"},
+ {I40E_GLQF_PIT(0), 23, 4, 0, 0, "GLQF_PIT"},
+ {I40E_GL_PRS_FVBM(0), 3, 4, 0, 0, "GL_PRS_FVBM"},
+ {I40E_GLQF_FDCNT_0, 0, 0, 0, 0, "GLQF_FDCNT_0"},
+ {I40E_GL_MTG_FLU_MSK_H, 0, 0, 0, 0, "GL_MTG_FLU_MSK_H"},
+ {I40E_GL_SWR_DEF_ACT_EN(0), 1, 4, 0, 0, "GL_SWR_DEF_ACT_EN"},
+ {I40E_GLQF_HKEY(0), 12, 4, 0, 0, "GLQF_HKEY"},
+ {I40E_GL_SWR_DEF_ACT(0), 35, 4, 0, 0, "GL_SWR_DEF_ACT"},
+ {I40E_GLQF_FDEVICTFLAG, 0, 0, 0, 0, "GLQF_FDEVICTFLAG"},
+ {I40E_PFQF_CTL_2, 0, 0, 0, 0, "PFQF_CTL_2"},
+ {I40E_GLQF_FDEVICTENA(0), 1, 4, 0, 0, "GLQF_FDEVICTENA"},
+ {I40E_VSIQF_HKEY(0, 0), 12, 2048, 383, 4, "VSIQF_HKEY"},
+ {I40E_GLPRT_GORCL(0), 3, 8, 0, 0, "GLPRT_GORCL"},
+ {I40E_GLPRT_GORCH(0), 3, 8, 0, 0, "GLPRT_GORCH"},
+ {I40E_GLPRT_MLFC(0), 3, 8, 0, 0, "GLPRT_MLFC"},
+ {I40E_GLPRT_MRFC(0), 3, 8, 0, 0, "GLPRT_MRFC"},
+ {I40E_GLPRT_CRCERRS(0), 3, 8, 0, 0, "GLPRT_CRCERRS"},
+ {I40E_GLPRT_RLEC(0), 3, 8, 0, 0, "GLPRT_RLEC"},
+ {I40E_GLPRT_ILLERRC(0), 3, 8, 0, 0, "GLPRT_ILLERRC"},
+ {I40E_GLPRT_RUC(0), 3, 8, 0, 0, "GLPRT_RUC"},
+ {I40E_GLPRT_ROC(0), 3, 8, 0, 0, "GLPRT_ROC"},
+ {I40E_GLPRT_LXONRXC(0), 3, 8, 0, 0, "GLPRT_LXONRXC"},
+ {I40E_GLPRT_LXOFFRXC(0), 3, 8, 0, 0, "GLPRT_LXOFFRXC"},
+ {I40E_GLPRT_PXONRXC(0, 0), 3, 8, 7, 32, "GLPRT_PXONRXC"},
+ {I40E_GLPRT_PXOFFRXC(0, 0), 3, 8, 7, 32, "GLPRT_PXOFFRXC"},
+ {I40E_GLPRT_RXON2OFFCNT(0, 0), 3, 8, 7, 32, "GLPRT_RXON2OFFCNT"},
+ {I40E_GLPRT_PRC64L(0), 3, 8, 0, 0, "GLPRT_PRC64L"},
+ {I40E_GLPRT_PRC64H(0), 3, 8, 0, 0, "GLPRT_PRC64H"},
+ {I40E_GLPRT_PRC127L(0), 3, 8, 0, 0, "GLPRT_PRC127L"},
+ {I40E_GLPRT_PRC127H(0), 3, 8, 0, 0, "GLPRT_PRC127H"},
+ {I40E_GLPRT_PRC255L(0), 3, 8, 0, 0, "GLPRT_PRC255L"},
+ {I40E_GLPRT_PRC255H(0), 3, 8, 0, 0, "GLPRT_PRC255H"},
+ {I40E_GLPRT_PRC511L(0), 3, 8, 0, 0, "GLPRT_PRC511L"},
+ {I40E_GLPRT_PRC511H(0), 3, 8, 0, 0, "GLPRT_PRC511H"},
+ {I40E_GLPRT_PRC1023L(0), 3, 8, 0, 0, "GLPRT_PRC1023L"},
+ {I40E_GLPRT_PRC1023H(0), 3, 8, 0, 0, "GLPRT_PRC1023H"},
+ {I40E_GLPRT_PRC1522L(0), 3, 8, 0, 0, "GLPRT_PRC1522L"},
+ {I40E_GLPRT_PRC1522H(0), 3, 8, 0, 0, "GLPRT_PRC1522H"},
+ {I40E_GLPRT_PRC9522L(0), 3, 8, 0, 0, "GLPRT_PRC9522L"},
+ {I40E_GLPRT_PRC9522H(0), 3, 8, 0, 0, "GLPRT_PRC9522H"},
+ {I40E_GLPRT_RFC(0), 3, 8, 0, 0, "GLPRT_RFC"},
+ {I40E_GLPRT_RJC(0), 3, 8, 0, 0, "GLPRT_RJC"},
+ {I40E_GLPRT_UPRCL(0), 3, 8, 0, 0, "GLPRT_UPRCL"},
+ {I40E_GLPRT_UPRCH(0), 3, 8, 0, 0, "GLPRT_UPRCH"},
+ {I40E_GLPRT_MPRCL(0), 3, 8, 0, 0, "GLPRT_MPRCL"},
+ {I40E_GLPRT_MPRCH(0), 3, 8, 0, 0, "GLPRT_MPRCH"},
+ {I40E_GLPRT_BPRCL(0), 3, 8, 0, 0, "GLPRT_BPRCL"},
+ {I40E_GLPRT_BPRCH(0), 3, 8, 0, 0, "GLPRT_BPRCH"},
+ {I40E_GLPRT_RDPC(0), 3, 8, 0, 0, "GLPRT_RDPC"},
+ {I40E_GLPRT_LDPC(0), 3, 8, 0, 0, "GLPRT_LDPC"},
+ {I40E_GLPRT_RUPP(0), 3, 8, 0, 0, "GLPRT_RUPP"},
+ {I40E_GLPRT_GOTCL(0), 3, 8, 0, 0, "GLPRT_GOTCL"},
+ {I40E_GLPRT_GOTCH(0), 3, 8, 0, 0, "GLPRT_GOTCH"},
+ {I40E_GLPRT_PTC64L(0), 3, 8, 0, 0, "GLPRT_PTC64L"},
+ {I40E_GLPRT_PTC64H(0), 3, 8, 0, 0, "GLPRT_PTC64H"},
+ {I40E_GLPRT_PTC127L(0), 3, 8, 0, 0, "GLPRT_PTC127L"},
+ {I40E_GLPRT_PTC127H(0), 3, 8, 0, 0, "GLPRT_PTC127H"},
+ {I40E_GLPRT_PTC255L(0), 3, 8, 0, 0, "GLPRT_PTC255L"},
+ {I40E_GLPRT_PTC255H(0), 3, 8, 0, 0, "GLPRT_PTC255H"},
+ {I40E_GLPRT_PTC511L(0), 3, 8, 0, 0, "GLPRT_PTC511L"},
+ {I40E_GLPRT_PTC511H(0), 3, 8, 0, 0, "GLPRT_PTC511H"},
+ {I40E_GLPRT_PTC1023L(0), 3, 8, 0, 0, "GLPRT_PTC1023L"},
+ {I40E_GLPRT_PTC1023H(0), 3, 8, 0, 0, "GLPRT_PTC1023H"},
+ {I40E_GLPRT_PTC1522L(0), 3, 8, 0, 0, "GLPRT_PTC1522L"},
+ {I40E_GLPRT_PTC1522H(0), 3, 8, 0, 0, "GLPRT_PTC1522H"},
+ {I40E_GLPRT_PTC9522L(0), 3, 8, 0, 0, "GLPRT_PTC9522L"},
+ {I40E_GLPRT_PTC9522H(0), 3, 8, 0, 0, "GLPRT_PTC9522H"},
+ {I40E_GLPRT_PXONTXC(0, 0), 3, 8, 7, 32, "GLPRT_PXONTXC"},
+ {I40E_GLPRT_PXOFFTXC(0, 0), 3, 8, 7, 32, "GLPRT_PXOFFTXC"},
+ {I40E_GLPRT_LXONTXC(0), 3, 8, 0, 0, "GLPRT_LXONTXC"},
+ {I40E_GLPRT_LXOFFTXC(0), 3, 8, 0, 0, "GLPRT_LXOFFTXC"},
+ {I40E_GLPRT_UPTCL(0), 3, 8, 0, 0, "GLPRT_UPTCL"},
+ {I40E_GLPRT_UPTCH(0), 3, 8, 0, 0, "GLPRT_UPTCH"},
+ {I40E_GLPRT_MPTCL(0), 3, 8, 0, 0, "GLPRT_MPTCL"},
+ {I40E_GLPRT_MPTCH(0), 3, 8, 0, 0, "GLPRT_MPTCH"},
+ {I40E_GLPRT_BPTCL(0), 3, 8, 0, 0, "GLPRT_BPTCL"},
+ {I40E_GLPRT_BPTCH(0), 3, 8, 0, 0, "GLPRT_BPTCH"},
+ {I40E_GLPRT_TDOLD(0), 3, 8, 0, 0, "GLPRT_TDOLD"},
+ {I40E_GLV_RDPC(0), 383, 8, 0, 0, "GLV_RDPC"},
+ {I40E_GL_FCOELAST(0), 143, 8, 0, 0, "GL_FCOELAST"},
+ {I40E_GL_FCOEDDPC(0), 143, 8, 0, 0, "GL_FCOEDDPC"},
+ {I40E_GL_FCOECRC(0), 143, 8, 0, 0, "GL_FCOECRC"},
+ {I40E_GL_FCOEPRC(0), 143, 8, 0, 0, "GL_FCOEPRC"},
+ {I40E_GL_RXERR1_L(0), 143, 8, 0, 0, "GL_RXERR1_L"},
+ {I40E_GL_FCOEDIFEC(0), 143, 8, 0, 0, "GL_FCOEDIFEC"},
+ {I40E_GL_RXERR2_L(0), 143, 8, 0, 0, "GL_RXERR2_L"},
+ {I40E_GL_FCOEDWRCL(0), 143, 8, 0, 0, "GL_FCOEDWRCL"},
+ {I40E_GL_FCOEDWRCH(0), 143, 8, 0, 0, "GL_FCOEDWRCH"},
+ {I40E_GL_FCOERPDC(0), 143, 8, 0, 0, "GL_FCOERPDC"},
+ {I40E_GLV_GOTCL(0), 383, 8, 0, 0, "GLV_GOTCL"},
+ {I40E_GLV_GOTCH(0), 383, 8, 0, 0, "GLV_GOTCH"},
+ {I40E_GLSW_GOTCL(0), 15, 8, 0, 0, "GLSW_GOTCL"},
+ {I40E_GLSW_GOTCH(0), 15, 8, 0, 0, "GLSW_GOTCH"},
+ {I40E_GLVEBVL_GOTCL(0), 127, 8, 0, 0, "GLVEBVL_GOTCL"},
+ {I40E_GLVEBVL_GOTCH(0), 127, 8, 0, 0, "GLVEBVL_GOTCH"},
+ {I40E_GLVEBTC_TBCL(0, 0), 7, 8, 15, 64, "GLVEBTC_TBCL"},
+ {I40E_GLVEBTC_TBCH(0, 0), 7, 8, 15, 64, "GLVEBTC_TBCH"},
+ {I40E_GLVEBTC_TPCL(0, 0), 7, 8, 15, 64, "GLVEBTC_TPCL"},
+ {I40E_GLVEBTC_TPCH(0, 0), 7, 8, 15, 64, "GLVEBTC_TPCH"},
+ {I40E_GLV_UPTCL(0), 383, 8, 0, 0, "GLV_UPTCL"},
+ {I40E_GLV_UPTCH(0), 383, 8, 0, 0, "GLV_UPTCH"},
+ {I40E_GLV_MPTCL(0), 383, 8, 0, 0, "GLV_MPTCL"},
+ {I40E_GLV_MPTCH(0), 383, 8, 0, 0, "GLV_MPTCH"},
+ {I40E_GLV_BPTCL(0), 383, 8, 0, 0, "GLV_BPTCL"},
+ {I40E_GLV_BPTCH(0), 383, 8, 0, 0, "GLV_BPTCH"},
+ {I40E_GLSW_UPTCL(0), 15, 8, 0, 0, "GLSW_UPTCL"},
+ {I40E_GLSW_UPTCH(0), 15, 8, 0, 0, "GLSW_UPTCH"},
+ {I40E_GLSW_MPTCL(0), 15, 8, 0, 0, "GLSW_MPTCL"},
+ {I40E_GLSW_MPTCH(0), 15, 8, 0, 0, "GLSW_MPTCH"},
+ {I40E_GLSW_BPTCL(0), 15, 8, 0, 0, "GLSW_BPTCL"},
+ {I40E_GLSW_BPTCH(0), 15, 8, 0, 0, "GLSW_BPTCH"},
+ {I40E_GLV_TEPC(0), 383, 4, 0, 0, "GLV_TEPC"},
+ {I40E_GL_FCOEPTC(0), 143, 8, 0, 0, "GL_FCOEPTC"},
+ {I40E_GLSW_TDPC(0), 15, 8, 0, 0, "GLSW_TDPC"},
+ {I40E_GL_FCOEDWTCL(0), 143, 8, 0, 0, "GL_FCOEDWTCL"},
+ {I40E_GL_FCOEDWTCH(0), 143, 8, 0, 0, "GL_FCOEDWTCH"},
+ {I40E_GL_FCOEDIXEC(0), 143, 8, 0, 0, "GL_FCOEDIXEC"},
+ {I40E_GL_FCOEDIXVC(0), 143, 8, 0, 0, "GL_FCOEDIXVC"},
+ {I40E_GL_FCOEDIFTCL(0), 143, 8, 0, 0, "GL_FCOEDIFTCL"},
+ {I40E_GLV_GORCL(0), 383, 8, 0, 0, "GLV_GORCL"},
+ {I40E_GLV_GORCH(0), 383, 8, 0, 0, "GLV_GORCH"},
+ {I40E_GLSW_GORCL(0), 15, 8, 0, 0, "GLSW_GORCL"},
+ {I40E_GLSW_GORCH(0), 15, 8, 0, 0, "GLSW_GORCH"},
+ {I40E_GLVEBVL_GORCL(0), 127, 8, 0, 0, "GLVEBVL_GORCL"},
+ {I40E_GLVEBVL_GORCH(0), 127, 8, 0, 0, "GLVEBVL_GORCH"},
+ {I40E_GLVEBTC_RBCL(0, 0), 7, 8, 15, 64, "GLVEBTC_RBCL"},
+ {I40E_GLVEBTC_RBCH(0, 0), 7, 8, 15, 64, "GLVEBTC_RBCH"},
+ {I40E_GLVEBTC_RPCL(0, 0), 7, 8, 15, 64, "GLVEBTC_RPCL"},
+ {I40E_GLVEBTC_RPCH(0, 0), 7, 8, 15, 64, "GLVEBTC_RPCH"},
+ {I40E_GLV_UPRCL(0), 383, 8, 0, 0, "GLV_UPRCL"},
+ {I40E_GLV_UPRCH(0), 383, 8, 0, 0, "GLV_UPRCH"},
+ {I40E_GLV_MPRCL(0), 383, 8, 0, 0, "GLV_MPRCL"},
+ {I40E_GLV_MPRCH(0), 383, 8, 0, 0, "GLV_MPRCH"},
+ {I40E_GLV_BPRCL(0), 383, 8, 0, 0, "GLV_BPRCL"},
+ {I40E_GLV_BPRCH(0), 383, 8, 0, 0, "GLV_BPRCH"},
+ {I40E_GLV_RUPP(0), 383, 8, 0, 0, "GLV_RUPP"},
+ {I40E_GLSW_UPRCL(0), 15, 8, 0, 0, "GLSW_UPRCL"},
+ {I40E_GLSW_UPRCH(0), 15, 8, 0, 0, "GLSW_UPRCH"},
+ {I40E_GLSW_MPRCL(0), 15, 8, 0, 0, "GLSW_MPRCL"},
+ {I40E_GLSW_MPRCH(0), 15, 8, 0, 0, "GLSW_MPRCH"},
+ {I40E_GLSW_BPRCL(0), 15, 8, 0, 0, "GLSW_BPRCL"},
+ {I40E_GLSW_BPRCH(0), 15, 8, 0, 0, "GLSW_BPRCH"},
+ {I40E_GLSW_RUPP(0), 15, 8, 0, 0, "GLSW_RUPP"},
+ {I40E_GLVEBVL_UPCL(0), 127, 8, 0, 0, "GLVEBVL_UPCL"},
+ {I40E_GLVEBVL_UPCH(0), 127, 8, 0, 0, "GLVEBVL_UPCH"},
+ {I40E_GLVEBVL_MPCL(0), 127, 8, 0, 0, "GLVEBVL_MPCL"},
+ {I40E_GLVEBVL_MPCH(0), 127, 8, 0, 0, "GLVEBVL_MPCH"},
+ {I40E_GLVEBVL_BPCL(0), 127, 8, 0, 0, "GLVEBVL_BPCL"},
+ {I40E_GLVEBVL_BPCH(0), 127, 8, 0, 0, "GLVEBVL_BPCH"},
+ {I40E_GLGEN_STAT_HALT, 0, 0, 0, 0, "GLGEN_STAT_HALT"},
+ {I40E_GLGEN_STAT_CLEAR, 0, 0, 0, 0, "GLGEN_STAT_CLEAR"},
+ {0, 0, 0, 0, 0, NULL}
+};
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.c b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.c
new file mode 100644
index 00000000..2a28ee34
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.c
@@ -0,0 +1,3241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_udp.h>
+#include <rte_ip.h>
+#include <rte_net.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+#define DEFAULT_TX_RS_THRESH 32
+#define DEFAULT_TX_FREE_THRESH 32
+
+#define I40E_TX_MAX_BURST 32
+
+#define I40E_DMA_MEM_ALIGN 4096
+
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define I40E_RING_BASE_ALIGN 128
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+#ifdef RTE_LIBRTE_IEEE1588
+#define I40E_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define I40E_TX_IEEE1588_TMST 0
+#endif
+
+#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_OUTER_IP_CKSUM)
+
+#define I40E_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_TUNNEL_MASK | \
+ I40E_TX_IEEE1588_TMST)
+
+#define I40E_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
+
+static inline void
+i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
+{
+ if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->vlan_tci =
+ rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
+ PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
+ rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));
+ } else {
+ mb->vlan_tci = 0;
+ }
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
+ (1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
+ mb->ol_flags |= PKT_RX_QINQ_STRIPPED;
+ mb->vlan_tci_outer = mb->vlan_tci;
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
+ PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+ rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),
+ rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));
+ } else {
+ mb->vlan_tci_outer = 0;
+ }
+#endif
+ PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
+ mb->vlan_tci, mb->vlan_tci_outer);
+}
+
+/* Translate the rx descriptor status to pkt flags */
+static inline uint64_t
+i40e_rxd_status_to_pkt_flags(uint64_t qword)
+{
+ uint64_t flags;
+
+ /* Check if RSS_HASH */
+ flags = (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+ I40E_RX_DESC_FLTSTAT_RSS_HASH) ==
+ I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
+
+ /* Check if FDIR Match */
+ flags |= (qword & (1 << I40E_RX_DESC_STATUS_FLM_SHIFT) ?
+ PKT_RX_FDIR : 0);
+
+ return flags;
+}
+
+static inline uint64_t
+i40e_rxd_error_to_pkt_flags(uint64_t qword)
+{
+ uint64_t flags = 0;
+ uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT);
+
+#define I40E_RX_ERR_BITS 0x3f
+ if (likely((error_bits & I40E_RX_ERR_BITS) == 0)) {
+ flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ return flags;
+ }
+
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
+ flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
+ flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
+ flags |= PKT_RX_EIP_CKSUM_BAD;
+
+ return flags;
+}
+
+/* Function to check and set the ieee1588 timesync index and get the
+ * appropriate flags.
+ */
+#ifdef RTE_LIBRTE_IEEE1588
+static inline uint64_t
+i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
+{
+ uint64_t pkt_flags = 0;
+ uint16_t tsyn = (qword & (I40E_RXD_QW1_STATUS_TSYNVALID_MASK
+ | I40E_RXD_QW1_STATUS_TSYNINDX_MASK))
+ >> I40E_RX_DESC_STATUS_TSYNINDX_SHIFT;
+
+ if ((mb->packet_type & RTE_PTYPE_L2_MASK)
+ == RTE_PTYPE_L2_ETHER_TIMESYNC)
+ pkt_flags = PKT_RX_IEEE1588_PTP;
+ if (tsyn & 0x04) {
+ pkt_flags |= PKT_RX_IEEE1588_TMST;
+ mb->timesync = tsyn & 0x03;
+ }
+
+ return pkt_flags;
+}
+#endif
+
+#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
+#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
+#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02
+#define I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK 0x03
+#define I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX 0x01
+
+static inline uint64_t
+i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
+{
+ uint64_t flags = 0;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ uint16_t flexbh, flexbl;
+
+ flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK;
+ flexbl = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT) &
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK;
+
+
+ if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
+ flags |= PKT_RX_FDIR_ID;
+ } else if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX) {
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.flex_bytes_hi);
+ flags |= PKT_RX_FDIR_FLX;
+ }
+ if (flexbl == I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX) {
+ mb->hash.fdir.lo =
+ rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo);
+ flags |= PKT_RX_FDIR_FLX;
+ }
+#else
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
+ flags |= PKT_RX_FDIR_ID;
+#endif
+ return flags;
+}
+
+static inline void
+i40e_parse_tunneling_params(uint64_t ol_flags,
+ union i40e_tx_offload tx_offload,
+ uint32_t *cd_tunneling)
+{
+ /* EIPT: External (outer) IP header type */
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ else if (ol_flags & PKT_TX_OUTER_IPV4)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ else if (ol_flags & PKT_TX_OUTER_IPV6)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+
+ /* EIPLEN: External (outer) IP header length, in DWords */
+ *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* L4TUNT: L4 Tunneling Type */
+ switch (ol_flags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_IPIP:
+ /* for non UDP / GRE tunneling, set to 00b */
+ break;
+ case PKT_TX_TUNNEL_VXLAN:
+ case PKT_TX_TUNNEL_GENEVE:
+ *cd_tunneling |= I40E_TXD_CTX_UDP_TUNNELING;
+ break;
+ case PKT_TX_TUNNEL_GRE:
+ *cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
+ break;
+ default:
+ PMD_TX_LOG(ERR, "Tunnel type not supported");
+ return;
+ }
+
+ /* L4TUNLEN: L4 Tunneling Length, in Words
+ *
+ * We depend on app to set rte_mbuf.l2_len correctly.
+ * For IP in GRE it should be set to the length of the GRE
+ * header;
+ * for MAC in GRE or MAC in UDP it should be set to the length
+ * of the GRE or UDP headers plus the inner MAC up to including
+ * its last Ethertype.
+ */
+ *cd_tunneling |= (tx_offload.l2_len >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+}
+
+static inline void
+i40e_txd_enable_checksum(uint64_t ol_flags,
+ uint32_t *td_cmd,
+ uint32_t *td_offset,
+ union i40e_tx_offload tx_offload)
+{
+ /* Set MACLEN */
+ if (ol_flags & PKT_TX_TUNNEL_MASK)
+ *td_offset |= (tx_offload.outer_l2_len >> 1)
+ << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+ else
+ *td_offset |= (tx_offload.l2_len >> 1)
+ << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L3 checksum offloads */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ *td_offset |= (tx_offload.l3_len >> 2)
+ << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (ol_flags & PKT_TX_IPV4) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ *td_offset |= (tx_offload.l3_len >> 2)
+ << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (ol_flags & PKT_TX_IPV6) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ *td_offset |= (tx_offload.l3_len >> 2)
+ << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (tx_offload.l4_len >> 2)
+ << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ return;
+ }
+
+ /* Enable L4 checksum offloads */
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case PKT_TX_UDP_CKSUM:
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/* Construct the tx flags */
+static inline uint64_t
+i40e_build_ctob(uint32_t td_cmd,
+ uint32_t td_offset,
+ unsigned int size,
+ uint32_t td_tag)
+{
+ return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+static inline int
+i40e_xmit_cleanup(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *sw_ring = txq->sw_ring;
+ volatile struct i40e_tx_desc *txd = txq->tx_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) {
+ PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
+ "(port=%d queue=%d)", desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ return -1;
+ }
+
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
+
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
+
+ return 0;
+}
+
+static inline int
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+check_rx_burst_bulk_alloc_preconditions(struct i40e_rx_queue *rxq)
+#else
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
+#endif
+{
+ int ret = 0;
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ if (!(rxq->rx_free_thresh >= RTE_PMD_I40E_RX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "RTE_PMD_I40E_RX_MAX_BURST=%d",
+ rxq->rx_free_thresh, RTE_PMD_I40E_RX_MAX_BURST);
+ ret = -EINVAL;
+ } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "rxq->nb_rx_desc=%d",
+ rxq->rx_free_thresh, rxq->nb_rx_desc);
+ ret = -EINVAL;
+ } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "rxq->rx_free_thresh=%d",
+ rxq->nb_rx_desc, rxq->rx_free_thresh);
+ ret = -EINVAL;
+ }
+#else
+ ret = -EINVAL;
+#endif
+
+ return ret;
+}
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+#define I40E_LOOK_AHEAD 8
+#if (I40E_LOOK_AHEAD != 8)
+#error "PMD I40E: I40E_LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ uint64_t qword1;
+ uint32_t rx_status;
+ int32_t s[I40E_LOOK_AHEAD], nb_dd;
+ int32_t i, j, nb_rx = 0;
+ uint64_t pkt_flags;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+
+ /* Make sure there is at least 1 packet to receive */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /**
+ * Scan LOOK_AHEAD descriptors at a time to determine which
+ * descriptors reference packets that are ready to be received.
+ */
+ for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; i+=I40E_LOOK_AHEAD,
+ rxdp += I40E_LOOK_AHEAD, rxep += I40E_LOOK_AHEAD) {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = I40E_LOOK_AHEAD - 1; j >= 0; j--) {
+ qword1 = rte_le_to_cpu_64(\
+ rxdp[j].wb.qword1.status_error_len);
+ s[j] = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ }
+
+ rte_smp_rmb();
+
+ /* Compute how many status bits were set */
+ for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++)
+ nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf parameters */
+ for (j = 0; j < nb_dd; j++) {
+ mb = rxep[j].mbuf;
+ qword1 = rte_le_to_cpu_64(\
+ rxdp[j].wb.qword1.status_error_len);
+ pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->ol_flags = 0;
+ i40e_rxd_to_vlan_tci(mb, &rxdp[j]);
+ pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
+ pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
+ mb->packet_type =
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT)];
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ mb->hash.rss = rte_le_to_cpu_32(\
+ rxdp[j].wb.qword0.hi_dword.rss);
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb);
+
+#ifdef RTE_LIBRTE_IEEE1588
+ pkt_flags |= i40e_get_iee15888_flags(mb, qword1);
+#endif
+ mb->ol_flags |= pkt_flags;
+
+ }
+
+ for (j = 0; j < I40E_LOOK_AHEAD; j++)
+ rxq->rx_stage[i + j] = rxep[j].mbuf;
+
+ if (nb_dd != I40E_LOOK_AHEAD)
+ break;
+ }
+
+ /* Clear software ring entries */
+ for (i = 0; i < nb_rx; i++)
+ rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+
+ return nb_rx;
+}
+
+static inline uint16_t
+i40e_rx_fill_from_stage(struct i40e_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t i;
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ for (i = 0; i < nb_pkts; i++)
+ rx_pkts[i] = stage[i];
+
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+ return nb_pkts;
+}
+
+static inline int
+i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx, i;
+ uint64_t dma_addr;
+ int diag;
+
+ /* Allocate buffers in bulk */
+ alloc_idx = (uint16_t)(rxq->rx_free_trigger -
+ (rxq->rx_free_thresh - 1));
+ rxep = &(rxq->sw_ring[alloc_idx]);
+ diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0)) {
+ PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk");
+ return -ENOMEM;
+ }
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; i++) {
+ if (likely(i < (rxq->rx_free_thresh - 1)))
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxep[i + 1].mbuf);
+
+ mb = rxep[i].mbuf;
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->next = NULL;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(\
+ rte_mbuf_data_iova_default(mb));
+ rxdp[i].read.hdr_addr = 0;
+ rxdp[i].read.pkt_addr = dma_addr;
+ }
+
+ /* Update rx tail regsiter */
+ rte_wmb();
+ I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
+
+ rxq->rx_free_trigger =
+ (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+
+ return 0;
+}
+
+static inline uint16_t
+rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev;
+ uint16_t nb_rx = 0;
+
+ if (!nb_pkts)
+ return 0;
+
+ if (rxq->rx_nb_avail)
+ return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ nb_rx = (uint16_t)i40e_rx_scan_hw_ring(rxq);
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ if (i40e_rx_alloc_bufs(rxq) != 0) {
+ uint16_t i, j;
+
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
+ rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
+
+ return 0;
+ }
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ if (rxq->rx_nb_avail)
+ return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+static uint16_t
+i40e_recv_pkts_bulk_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx = 0, n, count;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= RTE_PMD_I40E_RX_MAX_BURST))
+ return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ while (nb_pkts) {
+ n = RTE_MIN(nb_pkts, RTE_PMD_I40E_RX_MAX_BURST);
+ count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx = (uint16_t)(nb_rx + count);
+ nb_pkts = (uint16_t)(nb_pkts - count);
+ if (count < n)
+ break;
+ }
+
+ return nb_rx;
+}
+#else
+static uint16_t
+i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+
+uint16_t
+i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq;
+ volatile union i40e_rx_desc *rx_ring;
+ volatile union i40e_rx_desc *rxdp;
+ union i40e_rx_desc rxd;
+ struct i40e_rx_entry *sw_ring;
+ struct i40e_rx_entry *rxe;
+ struct rte_eth_dev *dev;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ uint16_t nb_rx;
+ uint32_t rx_status;
+ uint64_t qword1;
+ uint16_t rx_packet_len;
+ uint16_t rx_id, nb_hold;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+ uint32_t *ptype_tbl;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+ ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = &rx_ring[rx_id];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ /* Check the DD bit first */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (unlikely(rx_id == rxq->nb_rx_desc))
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(sw_ring[rx_id].mbuf);
+
+ /**
+ * When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(&sw_ring[rx_id]);
+ }
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+
+ rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = rx_packet_len;
+ rxm->data_len = rx_packet_len;
+ rxm->port = rxq->port_id;
+ rxm->ol_flags = 0;
+ i40e_rxd_to_vlan_tci(rxm, &rxd);
+ pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
+ pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
+ rxm->packet_type =
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ rxm->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
+
+#ifdef RTE_LIBRTE_IEEE1588
+ pkt_flags |= i40e_get_iee15888_flags(rxm, qword1);
+#endif
+ rxm->ol_flags |= pkt_flags;
+
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /**
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the receive tail register of queue.
+ * Update that register with the value of the last processed RX
+ * descriptor minus 1.
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+
+ return nb_rx;
+}
+
+uint16_t
+i40e_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ volatile union i40e_rx_desc *rx_ring = rxq->rx_ring;
+ volatile union i40e_rx_desc *rxdp;
+ union i40e_rx_desc rxd;
+ struct i40e_rx_entry *sw_ring = rxq->sw_ring;
+ struct i40e_rx_entry *rxe;
+ struct rte_mbuf *first_seg = rxq->pkt_first_seg;
+ struct rte_mbuf *last_seg = rxq->pkt_last_seg;
+ struct rte_mbuf *nmb, *rxm;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ struct rte_eth_dev *dev;
+ uint32_t rx_status;
+ uint64_t qword1;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = &rx_ring[rx_id];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+
+ /* Check the DD bit */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(sw_ring[rx_id].mbuf);
+
+ /**
+ * When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(&sw_ring[rx_id]);
+ }
+
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+
+ /* Set data buffer address and data length of the mbuf */
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+ rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rxm->data_len = rx_packet_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /**
+ * If this is the first buffer of the received packet, set the
+ * pointer to the first mbuf of the packet and initialize its
+ * context. Otherwise, update the total length and the number
+ * of segments of the current scattered packet, and update the
+ * pointer to the last mbuf of the current packet.
+ */
+ if (!first_seg) {
+ first_seg = rxm;
+ first_seg->nb_segs = 1;
+ first_seg->pkt_len = rx_packet_len;
+ } else {
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
+ rx_packet_len);
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /**
+ * If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT))) {
+ last_seg = rxm;
+ continue;
+ }
+
+ /**
+ * This is the last buffer of the received packet. If the CRC
+ * is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer. If part
+ * of the CRC is also contained in the previous mbuf, subtract
+ * the length of that CRC part from the data length of the
+ * previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= ETHER_CRC_LEN;
+ if (rx_packet_len <= ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len =
+ (uint16_t)(last_seg->data_len -
+ (ETHER_CRC_LEN - rx_packet_len));
+ last_seg->next = NULL;
+ } else
+ rxm->data_len = (uint16_t)(rx_packet_len -
+ ETHER_CRC_LEN);
+ }
+
+ first_seg->port = rxq->port_id;
+ first_seg->ol_flags = 0;
+ i40e_rxd_to_vlan_tci(first_seg, &rxd);
+ pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
+ pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
+ first_seg->packet_type =
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ first_seg->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= i40e_rxd_build_fdir(&rxd, first_seg);
+
+#ifdef RTE_LIBRTE_IEEE1588
+ pkt_flags |= i40e_get_iee15888_flags(first_seg, qword1);
+#endif
+ first_seg->ol_flags |= pkt_flags;
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
+ first_seg->data_off));
+ rx_pkts[nb_rx++] = first_seg;
+ first_seg = NULL;
+ }
+
+ /* Record index of the next RX descriptor to probe. */
+ rxq->rx_tail = rx_id;
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ /**
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register. Update the RDT with the value of the last processed RX
+ * descriptor minus 1, to guarantee that the RDT register is never
+ * equal to the RDH register, which creates a "full" ring situtation
+ * from the hardware point of view.
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ rx_id = (uint16_t)(rx_id == 0 ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+
+ return nb_rx;
+}
+
+/* Check if the context descriptor is needed for TX offloading */
+static inline uint16_t
+i40e_calc_context_desc(uint64_t flags)
+{
+ static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TCP_SEG |
+ PKT_TX_QINQ_PKT |
+ PKT_TX_TUNNEL_MASK;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ mask |= PKT_TX_IEEE1588_TMST;
+#endif
+
+ return (flags & mask) ? 1 : 0;
+}
+
+/* set i40e TSO context descriptor */
+static inline uint64_t
+i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
+{
+ uint64_t ctx_desc = 0;
+ uint32_t cd_cmd, hdr_len, cd_tso_len;
+
+ if (!tx_offload.l4_len) {
+ PMD_DRV_LOG(DEBUG, "L4 length set to 0");
+ return ctx_desc;
+ }
+
+ /**
+ * in case of non tunneling packet, the outer_l2_len and
+ * outer_l3_len must be 0.
+ */
+ hdr_len = tx_offload.outer_l2_len +
+ tx_offload.outer_l3_len +
+ tx_offload.l2_len +
+ tx_offload.l3_len +
+ tx_offload.l4_len;
+
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = mbuf->pkt_len - hdr_len;
+ ctx_desc |= ((uint64_t)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((uint64_t)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((uint64_t)mbuf->tso_segsz <<
+ I40E_TXD_CTX_QW1_MSS_SHIFT);
+
+ return ctx_desc;
+}
+
+uint16_t
+i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq;
+ struct i40e_tx_entry *sw_ring;
+ struct i40e_tx_entry *txe, *txn;
+ volatile struct i40e_tx_desc *txd;
+ volatile struct i40e_tx_desc *txr;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint32_t cd_tunneling_params;
+ uint16_t tx_id;
+ uint16_t nb_tx;
+ uint32_t td_cmd;
+ uint32_t td_offset;
+ uint32_t td_tag;
+ uint64_t ol_flags;
+ uint16_t nb_used;
+ uint16_t nb_ctx;
+ uint16_t tx_last;
+ uint16_t slen;
+ uint64_t buf_dma_addr;
+ union i40e_tx_offload tx_offload = {0};
+
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Check if the descriptor ring needs to be cleaned. */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_xmit_cleanup(txq);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ td_cmd = 0;
+ td_tag = 0;
+ td_offset = 0;
+
+ tx_pkt = *tx_pkts++;
+ RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+ ol_flags = tx_pkt->ol_flags;
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+ tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+
+ /* Calculate the number of context descriptors needed. */
+ nb_ctx = i40e_calc_context_desc(ol_flags);
+
+ /**
+ * The number of descriptors that must be allocated for
+ * a packet equals to the number of the segments of that
+ * packet plus 1 context descriptor if needed.
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+ tx_last = (uint16_t)(tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+
+ if (nb_used > txq->nb_tx_free) {
+ if (i40e_xmit_cleanup(txq) != 0) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ if (unlikely(nb_used > txq->tx_rs_thresh)) {
+ while (nb_used > txq->nb_tx_free) {
+ if (i40e_xmit_cleanup(txq) != 0) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ }
+ }
+ }
+
+ /* Descriptor based VLAN insertion */
+ if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ td_tag = tx_pkt->vlan_tci;
+ }
+
+ /* Always enable CRC offload insertion */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ /* Fill in tunneling parameters if necessary */
+ cd_tunneling_params = 0;
+ if (ol_flags & PKT_TX_TUNNEL_MASK)
+ i40e_parse_tunneling_params(ol_flags, tx_offload,
+ &cd_tunneling_params);
+ /* Enable checksum offloading */
+ if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK)
+ i40e_txd_enable_checksum(ol_flags, &td_cmd,
+ &td_offset, tx_offload);
+
+ if (nb_ctx) {
+ /* Setup TX context descriptor if required */
+ volatile struct i40e_tx_context_desc *ctx_txd =
+ (volatile struct i40e_tx_context_desc *)\
+ &txr[tx_id];
+ uint16_t cd_l2tag2 = 0;
+ uint64_t cd_type_cmd_tso_mss =
+ I40E_TX_DESC_DTYPE_CONTEXT;
+
+ txn = &sw_ring[txe->next_id];
+ RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ /* TSO enabled means no timestamp */
+ if (ol_flags & PKT_TX_TCP_SEG)
+ cd_type_cmd_tso_mss |=
+ i40e_set_tso_ctx(tx_pkt, tx_offload);
+ else {
+#ifdef RTE_LIBRTE_IEEE1588
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cd_type_cmd_tso_mss |=
+ ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT);
+#endif
+ }
+
+ ctx_txd->tunneling_params =
+ rte_cpu_to_le_32(cd_tunneling_params);
+ if (ol_flags & PKT_TX_QINQ_PKT) {
+ cd_l2tag2 = tx_pkt->vlan_tci_outer;
+ cd_type_cmd_tso_mss |=
+ ((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT);
+ }
+ ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
+ ctx_txd->type_cmd_tso_mss =
+ rte_cpu_to_le_64(cd_type_cmd_tso_mss);
+
+ PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]:\n"
+ "tunneling_params: %#x;\n"
+ "l2tag2: %#hx;\n"
+ "rsvd: %#hx;\n"
+ "type_cmd_tso_mss: %#"PRIx64";\n",
+ tx_pkt, tx_id,
+ ctx_txd->tunneling_params,
+ ctx_txd->l2tag2,
+ ctx_txd->rsvd,
+ ctx_txd->type_cmd_tso_mss);
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+
+ if (txe->mbuf)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /* Setup TX Descriptor */
+ slen = m_seg->data_len;
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+
+ PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
+ "buf_dma_addr: %#"PRIx64";\n"
+ "td_cmd: %#x;\n"
+ "td_offset: %#x;\n"
+ "td_len: %u;\n"
+ "td_tag: %#x;\n",
+ tx_pkt, tx_id, buf_dma_addr,
+ td_cmd, td_offset, slen, td_tag);
+
+ txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+ txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
+ td_offset, slen, td_tag);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /* The last packet data descriptor needs End Of Packet (EOP) */
+ td_cmd |= I40E_TX_DESC_CMD_EOP;
+ txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+ if (txq->nb_tx_used >= txq->tx_rs_thresh) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "Setting RS bit on TXD id="
+ "%4u (port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
+
+ td_cmd |= I40E_TX_DESC_CMD_RS;
+
+ /* Update txq RS bit counters */
+ txq->nb_tx_used = 0;
+ }
+
+ txd->cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)td_cmd) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ }
+
+end_of_tx:
+ rte_wmb();
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (unsigned) txq->port_id, (unsigned) txq->queue_id,
+ (unsigned) tx_id, (unsigned) nb_tx);
+
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
+static __rte_always_inline int
+i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txep;
+ uint16_t i;
+
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
+
+ for (i = 0; i < txq->tx_rs_thresh; i++)
+ rte_prefetch0((txep + i)->mbuf);
+
+ if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+ for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ rte_mempool_put(txep->mbuf->pool, txep->mbuf);
+ txep->mbuf = NULL;
+ }
+ } else {
+ for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ rte_pktmbuf_free_seg(txep->mbuf);
+ txep->mbuf = NULL;
+ }
+ }
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+/* Populate 4 descriptors with data from 4 mbufs */
+static inline void
+tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t dma_addr;
+ uint32_t i;
+
+ for (i = 0; i < 4; i++, txdp++, pkts++) {
+ dma_addr = rte_mbuf_data_iova(*pkts);
+ txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
+ txdp->cmd_type_offset_bsz =
+ i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
+ (*pkts)->data_len, 0);
+ }
+}
+
+/* Populate 1 descriptor with data from 1 mbuf */
+static inline void
+tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t dma_addr;
+
+ dma_addr = rte_mbuf_data_iova(*pkts);
+ txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
+ txdp->cmd_type_offset_bsz =
+ i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
+ (*pkts)->data_len, 0);
+}
+
+/* Fill hardware descriptor ring with mbuf data */
+static inline void
+i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq,
+ struct rte_mbuf **pkts,
+ uint16_t nb_pkts)
+{
+ volatile struct i40e_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+ struct i40e_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ const int N_PER_LOOP = 4;
+ const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
+ int mainpart, leftover;
+ int i, j;
+
+ mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
+ leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
+ for (i = 0; i < mainpart; i += N_PER_LOOP) {
+ for (j = 0; j < N_PER_LOOP; ++j) {
+ (txep + i + j)->mbuf = *(pkts + i + j);
+ }
+ tx4(txdp + i, pkts + i);
+ }
+ if (unlikely(leftover > 0)) {
+ for (i = 0; i < leftover; ++i) {
+ (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
+ tx1(txdp + mainpart + i, pkts + mainpart + i);
+ }
+ }
+}
+
+static inline uint16_t
+tx_xmit_pkts(struct i40e_tx_queue *txq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ volatile struct i40e_tx_desc *txr = txq->tx_ring;
+ uint16_t n = 0;
+
+ /**
+ * Begin scanning the H/W ring for done descriptors when the number
+ * of available descriptors drops below tx_free_thresh. For each done
+ * descriptor, free the associated buffer.
+ */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ /* Use available descriptor only */
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(!nb_pkts))
+ return 0;
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+ if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
+ n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
+ i40e_tx_fill_hw_ring(txq, tx_pkts, n);
+ txr[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_tail = 0;
+ }
+
+ /* Fill hardware descriptor ring with mbuf data */
+ i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
+
+ /* Determin if RS bit needs to be set */
+ if (txq->tx_tail > txq->tx_next_rs) {
+ txr[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ if (txq->tx_next_rs >= txq->nb_tx_desc)
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+ }
+
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+
+ /* Update the tx tail register */
+ rte_wmb();
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+static uint16_t
+i40e_xmit_pkts_simple(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+
+ if (likely(nb_pkts <= I40E_TX_MAX_BURST))
+ return tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
+ tx_pkts, nb_pkts);
+
+ while (nb_pkts) {
+ uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
+ I40E_TX_MAX_BURST);
+
+ ret = tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
+ &tx_pkts[nb_tx], num);
+ nb_tx = (uint16_t)(nb_tx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+static uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = i40e_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /* Check for m->nb_segs to not exceed the limits. */
+ if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (m->nb_segs > I40E_TX_MAX_MTU_SEG ||
+ m->pkt_len > I40E_FRAME_SIZE_MAX) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+ } else if (m->nb_segs > I40E_TX_MAX_SEG ||
+ m->tso_segsz < I40E_MIN_TSO_MSS ||
+ m->tso_segsz > I40E_MAX_TSO_MSS ||
+ m->pkt_len > I40E_TSO_FRAME_SIZE_MAX) {
+ /* MSS outside the range (256B - 9674B) are considered
+ * malicious
+ */
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+ /* check the size of packet */
+ if (m->pkt_len < I40E_TX_MIN_PKT_LEN) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+ return i;
+}
+
+/*
+ * Find the VSI the queue belongs to. 'queue_idx' is the queue index
+ * application used, which assume having sequential ones. But from driver's
+ * perspective, it's different. For example, q0 belongs to FDIR VSI, q1-q64
+ * to MAIN VSI, , q65-96 to SRIOV VSIs, q97-128 to VMDQ VSIs. For application
+ * running on host, q1-64 and q97-128 can be used, total 96 queues. They can
+ * use queue_idx from 0 to 95 to access queues, while real queue would be
+ * different. This function will do a queue mapping to find VSI the queue
+ * belongs to.
+ */
+static struct i40e_vsi*
+i40e_pf_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
+{
+ /* the queue in MAIN VSI range */
+ if (queue_idx < pf->main_vsi->nb_qps)
+ return pf->main_vsi;
+
+ queue_idx -= pf->main_vsi->nb_qps;
+
+ /* queue_idx is greater than VMDQ VSIs range */
+ if (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) {
+ PMD_INIT_LOG(ERR, "queue_idx out of range. VMDQ configured?");
+ return NULL;
+ }
+
+ return pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi;
+}
+
+static uint16_t
+i40e_get_queue_offset_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
+{
+ /* the queue in MAIN VSI range */
+ if (queue_idx < pf->main_vsi->nb_qps)
+ return queue_idx;
+
+ /* It's VMDQ queues */
+ queue_idx -= pf->main_vsi->nb_qps;
+
+ if (pf->nb_cfg_vmdq_vsi)
+ return queue_idx % pf->vmdq_nb_qps;
+ else {
+ PMD_INIT_LOG(ERR, "Fail to get queue offset");
+ return (uint16_t)(-1);
+ }
+}
+
+int
+i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ return err;
+ }
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+int
+i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ /*
+ * rx_queue_id is queue id application refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
+ }
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ int err;
+ struct i40e_tx_queue *txq;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /*
+ * tx_queue_id is queue id application refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+int
+i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct i40e_tx_queue *txq;
+ int err;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /*
+ * tx_queue_id is queue id application refers to, while
+ * txq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
+ tx_queue_id);
+ return err;
+ }
+
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+const uint32_t *
+i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to i40e_rxd_pkt_type_mapping() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_TIMESYNC,
+ RTE_PTYPE_L2_ETHER_LLDP,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_GRENAT,
+ RTE_PTYPE_TUNNEL_IP,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == i40e_recv_pkts ||
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
+#endif
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts ||
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2)
+ return ptypes;
+ return NULL;
+}
+
+static int
+i40e_dev_first_queue(uint16_t idx, void **queues, int num)
+{
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ if (i != idx && queues[i])
+ return 0;
+ }
+
+ return 1;
+}
+
+static int
+i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev,
+ struct i40e_rx_queue *rxq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int use_def_burst_func =
+ check_rx_burst_bulk_alloc_preconditions(rxq);
+ uint16_t buf_size =
+ (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+ int use_scattered_rx =
+ ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size);
+
+ if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to do RX queue initialization");
+ return -EINVAL;
+ }
+
+ if (i40e_dev_first_queue(rxq->queue_id,
+ dev->data->rx_queues,
+ dev->data->nb_rx_queues)) {
+ /**
+ * If it is the first queue to setup,
+ * set all flags to default and call
+ * i40e_set_rx_function.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ dev->data->scattered_rx = use_scattered_rx;
+ if (use_def_burst_func)
+ ad->rx_bulk_alloc_allowed = false;
+ i40e_set_rx_function(dev);
+ return 0;
+ }
+
+ /* check bulk alloc conflict */
+ if (ad->rx_bulk_alloc_allowed && use_def_burst_func) {
+ PMD_DRV_LOG(ERR, "Can't use default burst.");
+ return -EINVAL;
+ }
+ /* check scatterred conflict */
+ if (!dev->data->scattered_rx && use_scattered_rx) {
+ PMD_DRV_LOG(ERR, "Scattered rx is required.");
+ return -EINVAL;
+ }
+ /* check vector conflict */
+ if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) {
+ PMD_DRV_LOG(ERR, "Failed vector rx setup.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf = NULL;
+ struct i40e_vf *vf = NULL;
+ struct i40e_rx_queue *rxq;
+ const struct rte_memzone *rz;
+ uint32_t ring_size;
+ uint16_t len, i;
+ uint16_t reg_idx, base, bsf, tc_mapping;
+ int q_offset, use_def_burst_func = 1;
+ uint64_t offloads;
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
+ vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ vsi = &vf->vsi;
+ if (!vsi)
+ return -EINVAL;
+ reg_idx = queue_idx;
+ } else {
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+ if (!vsi)
+ return -EINVAL;
+ q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+ if (q_offset < 0)
+ return -EINVAL;
+ reg_idx = vsi->base_queue + q_offset;
+ }
+
+ if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
+ PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
+ "invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("i40e rx queue",
+ sizeof(struct i40e_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "rx queue data structure");
+ return -ENOMEM;
+ }
+ rxq->mp = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->reg_idx = reg_idx;
+ rxq->port_id = dev->data->port_id;
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+ rxq->drop_en = rx_conf->rx_drop_en;
+ rxq->vsi = vsi;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
+
+ /* Allocate the maximun number of RX ring hardware descriptor. */
+ len = I40E_MAX_RING_DESC;
+
+ /**
+ * Allocating a little more memory because vectorized/bulk_alloc Rx
+ * functions doesn't check boundaries each time.
+ */
+ len += RTE_PMD_I40E_RX_MAX_BURST;
+
+ ring_size = RTE_ALIGN(len * sizeof(union i40e_rx_desc),
+ I40E_DMA_MEM_ALIGN);
+
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ ring_size, I40E_RING_BASE_ALIGN, socket_id);
+ if (!rz) {
+ i40e_dev_rx_queue_release(rxq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
+ return -ENOMEM;
+ }
+
+ /* Zero all the descriptors in the ring. */
+ memset(rz->addr, 0, ring_size);
+
+ rxq->rx_ring_phys_addr = rz->iova;
+ rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
+
+ len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
+
+ /* Allocate the software ring. */
+ rxq->sw_ring =
+ rte_zmalloc_socket("i40e rx sw ring",
+ sizeof(struct i40e_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq->sw_ring) {
+ i40e_dev_rx_queue_release(rxq);
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
+ return -ENOMEM;
+ }
+
+ i40e_reset_rx_queue(rxq);
+ rxq->q_set = TRUE;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->enabled_tc & (1 << i)))
+ continue;
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+
+ if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
+ rxq->dcb_tc = i;
+ }
+
+ if (dev->data->dev_started) {
+ if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) {
+ i40e_dev_rx_queue_release(rxq);
+ return -EINVAL;
+ }
+ } else {
+ use_def_burst_func =
+ check_rx_burst_bulk_alloc_preconditions(rxq);
+ if (!use_def_burst_func) {
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ PMD_INIT_LOG(DEBUG,
+ "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ } else {
+ PMD_INIT_LOG(DEBUG,
+ "Rx Burst Bulk Alloc Preconditions are "
+ "not satisfied, Scattered Rx is requested, "
+ "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
+ "not enabled on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
+ }
+ }
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ return 0;
+}
+
+void
+i40e_dev_rx_queue_release(void *rxq)
+{
+ struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
+
+ if (!q) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
+ return;
+ }
+
+ i40e_rx_queue_release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_free(q);
+}
+
+uint32_t
+i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define I40E_RXQ_SCAN_INTERVAL 4
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_queue *rxq;
+ uint16_t desc = 0;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+ while ((desc < rxq->nb_rx_desc) &&
+ ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
+ (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ /**
+ * Check the DD bit of a rx descriptor of each 4 in a group,
+ * to avoid checking too frequently and downgrading performance
+ * too much.
+ */
+ desc += I40E_RXQ_SCAN_INTERVAL;
+ rxdp += I40E_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint16_t desc;
+ int ret;
+
+ if (unlikely(offset >= rxq->nb_rx_desc)) {
+ PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset);
+ return 0;
+ }
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ rxdp = &(rxq->rx_ring[desc]);
+
+ ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
+ (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
+
+ return ret;
+}
+
+int
+i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ volatile uint64_t *status;
+ uint64_t mask;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+ mask = rte_le_to_cpu_64((1ULL << I40E_RX_DESC_STATUS_DD_SHIFT)
+ << I40E_RXD_QW1_STATUS_SHIFT);
+ if (*status & mask)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct i40e_tx_queue *txq = tx_queue;
+ volatile uint64_t *status;
+ uint64_t mask, expect;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ mask = rte_le_to_cpu_64(I40E_TXD_QW1_DTYPE_MASK);
+ expect = rte_cpu_to_le_64(
+ I40E_TX_DESC_DTYPE_DESC_DONE << I40E_TXD_QW1_DTYPE_SHIFT);
+ if ((*status & mask) == expect)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+static int
+i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
+ struct i40e_tx_queue *txq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (i40e_tx_queue_init(txq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to do TX queue initialization");
+ return -EINVAL;
+ }
+
+ if (i40e_dev_first_queue(txq->queue_id,
+ dev->data->tx_queues,
+ dev->data->nb_tx_queues)) {
+ /**
+ * If it is the first queue to setup,
+ * set all flags and call
+ * i40e_set_tx_function.
+ */
+ i40e_set_tx_function_flag(dev, txq);
+ i40e_set_tx_function(dev);
+ return 0;
+ }
+
+ /* check vector conflict */
+ if (ad->tx_vec_allowed) {
+ if (txq->tx_rs_thresh > RTE_I40E_TX_MAX_FREE_BUF_SZ ||
+ i40e_txq_vec_setup(txq)) {
+ PMD_DRV_LOG(ERR, "Failed vector tx setup.");
+ return -EINVAL;
+ }
+ }
+ /* check simple tx conflict */
+ if (ad->tx_simple_allowed) {
+ if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
+ txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
+ PMD_DRV_LOG(ERR, "No-simple tx is required.");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int
+i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf = NULL;
+ struct i40e_vf *vf = NULL;
+ struct i40e_tx_queue *txq;
+ const struct rte_memzone *tz;
+ uint32_t ring_size;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+ uint16_t reg_idx, i, base, bsf, tc_mapping;
+ int q_offset;
+ uint64_t offloads;
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
+ vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ vsi = &vf->vsi;
+ if (!vsi)
+ return -EINVAL;
+ reg_idx = queue_idx;
+ } else {
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+ if (!vsi)
+ return -EINVAL;
+ q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+ if (q_offset < 0)
+ return -EINVAL;
+ reg_idx = vsi->base_queue + q_offset;
+ }
+
+ if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
+ PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
+ "invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /**
+ * The following two parameters control the setting of the RS bit on
+ * transmit descriptors. TX descriptors will have their RS bit set
+ * after txq->tx_rs_thresh descriptors have been used. The TX
+ * descriptor ring will be cleaned after txq->tx_free_thresh
+ * descriptors are used or if the number of descriptors required to
+ * transmit a packet is greater than the number of free TX descriptors.
+ *
+ * The following constraints must be satisfied:
+ * - tx_rs_thresh must be greater than 0.
+ * - tx_rs_thresh must be less than the size of the ring minus 2.
+ * - tx_rs_thresh must be less than or equal to tx_free_thresh.
+ * - tx_rs_thresh must be a divisor of the ring size.
+ * - tx_free_thresh must be greater than 0.
+ * - tx_free_thresh must be less than the size of the ring minus 3.
+ *
+ * One descriptor in the TX ring is used as a sentinel to avoid a H/W
+ * race condition, hence the maximum threshold constraints. When set
+ * to zero use default values.
+ */
+ tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+ tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ if (tx_rs_thresh >= (nb_desc - 2)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+ "number of TX descriptors minus 2. "
+ "(tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
+ "number of TX descriptors minus 3. "
+ "(tx_free_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+ if (tx_rs_thresh > tx_free_thresh) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
+ "equal to tx_free_thresh. (tx_free_thresh=%u"
+ " tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+ if ((nb_desc % tx_rs_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
+ "number of TX descriptors. (tx_rs_thresh=%u"
+ " port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+ if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
+ PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+ "tx_rs_thresh is greater than 1. "
+ "(tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("i40e tx queue",
+ sizeof(struct i40e_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure");
+ return -ENOMEM;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ ring_size, I40E_RING_BASE_ALIGN, socket_id);
+ if (!tz) {
+ i40e_dev_tx_queue_release(txq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->tx_rs_thresh = tx_rs_thresh;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->pthresh = tx_conf->tx_thresh.pthresh;
+ txq->hthresh = tx_conf->tx_thresh.hthresh;
+ txq->wthresh = tx_conf->tx_thresh.wthresh;
+ txq->queue_id = queue_idx;
+ txq->reg_idx = reg_idx;
+ txq->port_id = dev->data->port_id;
+ txq->offloads = offloads;
+ txq->vsi = vsi;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+ txq->tx_ring_phys_addr = tz->iova;
+ txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+
+ /* Allocate software ring */
+ txq->sw_ring =
+ rte_zmalloc_socket("i40e tx sw ring",
+ sizeof(struct i40e_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq->sw_ring) {
+ i40e_dev_tx_queue_release(txq);
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
+ return -ENOMEM;
+ }
+
+ i40e_reset_tx_queue(txq);
+ txq->q_set = TRUE;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->enabled_tc & (1 << i)))
+ continue;
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+
+ if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
+ txq->dcb_tc = i;
+ }
+
+ if (dev->data->dev_started) {
+ if (i40e_dev_tx_queue_setup_runtime(dev, txq)) {
+ i40e_dev_tx_queue_release(txq);
+ return -EINVAL;
+ }
+ } else {
+ /**
+ * Use a simple TX queue without offloads or
+ * multi segs if possible
+ */
+ i40e_set_tx_function_flag(dev, txq);
+ }
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+}
+
+void
+i40e_dev_tx_queue_release(void *txq)
+{
+ struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
+
+ if (!q) {
+ PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
+ return;
+ }
+
+ i40e_tx_queue_release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_free(q);
+}
+
+const struct rte_memzone *
+i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(name);
+ if (mz)
+ return mz;
+
+ mz = rte_memzone_reserve_aligned(name, len, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, I40E_RING_BASE_ALIGN);
+ return mz;
+}
+
+void
+i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
+{
+ uint16_t i;
+
+ /* SSE Vector driver has a different way of releasing mbufs. */
+ if (rxq->rx_using_sse) {
+ i40e_rx_queue_release_mbufs_vec(rxq);
+ return;
+ }
+
+ if (!rxq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
+ }
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ if (rxq->rx_nb_avail == 0)
+ return;
+ for (i = 0; i < rxq->rx_nb_avail; i++) {
+ struct rte_mbuf *mbuf;
+
+ mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
+ rte_pktmbuf_free_seg(mbuf);
+ }
+ rxq->rx_nb_avail = 0;
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+}
+
+void
+i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
+{
+ unsigned i;
+ uint16_t len;
+
+ if (!rxq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
+ return;
+ }
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
+ len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_I40E_RX_MAX_BURST);
+ else
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ len = rxq->nb_rx_desc;
+
+ for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+ for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
+ rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ rxq->rx_nb_avail = 0;
+ rxq->rx_next_avail = 0;
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
+}
+
+void
+i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
+{
+ struct rte_eth_dev *dev;
+ uint16_t i;
+
+ dev = &rte_eth_devices[txq->port_id];
+
+ if (!txq || !txq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ return;
+ }
+
+ /**
+ * vPMD tx will not set sw_ring's mbuf to NULL after free,
+ * so need to free remains more carefully.
+ */
+ if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx2 ||
+ dev->tx_pkt_burst == i40e_xmit_pkts_vec) {
+ i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
+ if (txq->tx_tail < i) {
+ for (; i < txq->nb_tx_desc; i++) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ i = 0;
+ }
+ for (; i < txq->tx_tail; i++) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ } else {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+void
+i40e_reset_tx_queue(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txe;
+ uint16_t i, prev, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ txe = txq->sw_ring;
+ size = sizeof(struct i40e_tx_desc) * txq->nb_tx_desc;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->tx_ring)[i] = 0;
+
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile struct i40e_tx_desc *txd = &txq->tx_ring[i];
+
+ txd->cmd_type_offset_bsz =
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ txq->tx_tail = 0;
+ txq->nb_tx_used = 0;
+
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+}
+
+/* Init the TX queue in hardware */
+int
+i40e_tx_queue_init(struct i40e_tx_queue *txq)
+{
+ enum i40e_status_code err = I40E_SUCCESS;
+ struct i40e_vsi *vsi = txq->vsi;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t pf_q = txq->reg_idx;
+ struct i40e_hmc_obj_txq tx_ctx;
+ uint32_t qtx_ctl;
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(tx_ctx));
+ tx_ctx.new_context = 1;
+ tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ tx_ctx.qlen = txq->nb_tx_desc;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ tx_ctx.timesync_ena = 1;
+#endif
+ tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[txq->dcb_tc]);
+ if (vsi->type == I40E_VSI_FDIR)
+ tx_ctx.fd_ena = TRUE;
+
+ err = i40e_clear_lan_tx_queue_context(hw, pf_q);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failure of clean lan tx queue context");
+ return err;
+ }
+
+ err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failure of set lan tx queue context");
+ return err;
+ }
+
+ /* Now associate this queue with this PCI function */
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK);
+ I40E_WRITE_REG(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
+ I40E_WRITE_FLUSH(hw);
+
+ txq->qtx_tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
+
+ return err;
+}
+
+int
+i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
+{
+ struct i40e_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile union i40e_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
+
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &rxq->rx_ring[i];
+ rxd->read.pkt_addr = dma_addr;
+ rxd->read.hdr_addr = 0;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rxd->read.rsvd1 = 0;
+ rxd->read.rsvd2 = 0;
+#endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
+
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate the buffer length, and check the jumbo frame
+ * and maximum packet length.
+ */
+static int
+i40e_rx_queue_config(struct i40e_rx_queue *rxq)
+{
+ struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
+ struct rte_eth_dev_data *data = pf->dev_data;
+ uint16_t buf_size, len;
+
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+
+ switch (pf->flags & (I40E_FLAG_HEADER_SPLIT_DISABLED |
+ I40E_FLAG_HEADER_SPLIT_ENABLED)) {
+ case I40E_FLAG_HEADER_SPLIT_ENABLED: /* Not supported */
+ rxq->rx_hdr_len = RTE_ALIGN(I40E_RXBUF_SZ_1024,
+ (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+ rxq->rx_buf_len = RTE_ALIGN(I40E_RXBUF_SZ_2048,
+ (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+ rxq->hs_mode = i40e_header_split_enabled;
+ break;
+ case I40E_FLAG_HEADER_SPLIT_DISABLED:
+ default:
+ rxq->rx_hdr_len = 0;
+ rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size,
+ (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+ rxq->hs_mode = i40e_header_split_none;
+ break;
+ }
+
+ len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
+ rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must "
+ "be larger than %u and smaller than %u,"
+ "as jumbo frame is enabled",
+ (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)I40E_FRAME_SIZE_MAX);
+ return I40E_ERR_CONFIG;
+ }
+ } else {
+ if (rxq->max_pkt_len < ETHER_MIN_LEN ||
+ rxq->max_pkt_len > ETHER_MAX_LEN) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is disabled",
+ (uint32_t)ETHER_MIN_LEN,
+ (uint32_t)ETHER_MAX_LEN);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ return 0;
+}
+
+/* Init the RX queue in hardware */
+int
+i40e_rx_queue_init(struct i40e_rx_queue *rxq)
+{
+ int err = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
+ struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi);
+ uint16_t pf_q = rxq->reg_idx;
+ uint16_t buf_size;
+ struct i40e_hmc_obj_rxq rx_ctx;
+
+ err = i40e_rx_queue_config(rxq);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Failed to config RX queue");
+ return err;
+ }
+
+ /* Clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ rx_ctx.dbuff = rxq->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = rxq->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ rx_ctx.qlen = rxq->nb_rx_desc;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rx_ctx.dsize = 1;
+#endif
+ rx_ctx.dtype = rxq->hs_mode;
+ if (rxq->hs_mode)
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
+ else
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
+ rx_ctx.rxmax = rxq->max_pkt_len;
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
+ rx_ctx.l2tsel = 1;
+ /* showiv indicates if inner VLAN is stripped inside of tunnel
+ * packet. When set it to 1, vlan information is stripped from
+ * the inner header, but the hardware does not put it in the
+ * descriptor. So set it zero by default.
+ */
+ rx_ctx.showiv = 0;
+ rx_ctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, pf_q);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to clear LAN RX queue context");
+ return err;
+ }
+ err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to set LAN RX queue context");
+ return err;
+ }
+
+ rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
+
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+
+ /* Check if scattered RX needs to be used. */
+ if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ dev_data->scattered_rx = 1;
+ }
+
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ return 0;
+}
+
+void
+i40e_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
+ i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]);
+ i40e_reset_tx_queue(dev->data->tx_queues[i]);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
+ i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]);
+ i40e_reset_rx_queue(dev->data->rx_queues[i]);
+ }
+}
+
+void
+i40e_dev_free_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
+ i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
+ i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+#define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC
+#define I40E_FDIR_NUM_RX_DESC I40E_MIN_RING_DESC
+
+enum i40e_status_code
+i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
+{
+ struct i40e_tx_queue *txq;
+ const struct rte_memzone *tz = NULL;
+ uint32_t ring_size;
+ struct rte_eth_dev *dev;
+
+ if (!pf) {
+ PMD_DRV_LOG(ERR, "PF is not available");
+ return I40E_ERR_BAD_PTR;
+ }
+
+ dev = pf->adapter->eth_dev;
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("i40e fdir tx queue",
+ sizeof(struct i40e_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+
+ tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
+ if (!tz) {
+ i40e_dev_tx_queue_release(txq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
+ txq->queue_id = I40E_FDIR_QUEUE_ID;
+ txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+ txq->vsi = pf->fdir.fdir_vsi;
+
+ txq->tx_ring_phys_addr = tz->iova;
+ txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+ /*
+ * don't need to allocate software ring and reset for the fdir
+ * program queue just set the queue has been configured.
+ */
+ txq->q_set = TRUE;
+ pf->fdir.txq = txq;
+
+ return I40E_SUCCESS;
+}
+
+enum i40e_status_code
+i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
+{
+ struct i40e_rx_queue *rxq;
+ const struct rte_memzone *rz = NULL;
+ uint32_t ring_size;
+ struct rte_eth_dev *dev;
+
+ if (!pf) {
+ PMD_DRV_LOG(ERR, "PF is not available");
+ return I40E_ERR_BAD_PTR;
+ }
+
+ dev = pf->adapter->eth_dev;
+
+ /* Allocate the RX queue data structure. */
+ rxq = rte_zmalloc_socket("i40e fdir rx queue",
+ sizeof(struct i40e_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "rx queue structure.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ /* Allocate RX hardware ring descriptors. */
+ ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+
+ rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
+ if (!rz) {
+ i40e_dev_rx_queue_release(rxq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC;
+ rxq->queue_id = I40E_FDIR_QUEUE_ID;
+ rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+ rxq->vsi = pf->fdir.fdir_vsi;
+
+ rxq->rx_ring_phys_addr = rz->iova;
+ memset(rz->addr, 0, I40E_FDIR_NUM_RX_DESC * sizeof(union i40e_rx_desc));
+ rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
+
+ /*
+ * Don't need to allocate software ring and reset for the fdir
+ * rx queue, just set the queue has been configured.
+ */
+ rxq->q_set = TRUE;
+ pf->fdir.rxq = rxq;
+
+ return I40E_SUCCESS;
+}
+
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct i40e_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mp;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct i40e_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+ qinfo->conf.offloads = txq->offloads;
+}
+
+void __attribute__((cold))
+i40e_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint16_t rx_using_sse, i;
+ /* In order to allow Vector Rx there are a few configuration
+ * conditions to be met and Rx Bulk Allocation should be allowed.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (i40e_rx_vec_dev_conf_condition_check(dev) ||
+ !ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
+ " Vector Rx preconditions",
+ dev->data->port_id);
+
+ ad->rx_vec_allowed = false;
+ }
+ if (ad->rx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq =
+ dev->data->rx_queues[i];
+
+ if (rxq && i40e_rxq_vec_setup(rxq)) {
+ ad->rx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (dev->data->scattered_rx) {
+ /* Set the non-LRO scattered callback: there are Vector and
+ * single allocation versions.
+ */
+ if (ad->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+ "callback (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base
+ * frequency, limit use of AVX2 version to later
+ * plaforms, not all those that could theoretically
+ * run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ dev->rx_pkt_burst =
+ i40e_recv_scattered_pkts_vec_avx2;
+#endif
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ }
+ /* If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Vector
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (ad->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
+ "burst size no less than %d (port=%d).",
+ RTE_I40E_DESCS_PER_LOOP,
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_vec;
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base
+ * frequency, limit use of AVX2 version to later
+ * plaforms, not all those that could theoretically
+ * run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ dev->rx_pkt_burst = i40e_recv_pkts_vec_avx2;
+#endif
+ } else if (ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts;
+ }
+
+ /* Propagate information about RX function choice through all queues. */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rx_using_sse =
+ (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq)
+ rxq->rx_using_sse = rx_using_sse;
+ }
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Use a simple Tx queue if possible (only fast free is allowed) */
+ ad->tx_simple_allowed =
+ (txq->offloads ==
+ (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
+ ad->tx_vec_allowed = (ad->tx_simple_allowed &&
+ txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
+
+ if (ad->tx_vec_allowed)
+ PMD_INIT_LOG(DEBUG, "Vector Tx can be enabled on Tx queue %u.",
+ txq->queue_id);
+ else if (ad->tx_simple_allowed)
+ PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
+ txq->queue_id);
+ else
+ PMD_INIT_LOG(DEBUG,
+ "Neither simple nor vector Tx enabled on Tx queue %u\n",
+ txq->queue_id);
+}
+
+void __attribute__((cold))
+i40e_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (ad->tx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct i40e_tx_queue *txq =
+ dev->data->tx_queues[i];
+
+ if (txq && i40e_txq_vec_setup(txq)) {
+ ad->tx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ad->tx_simple_allowed) {
+ if (ad->tx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base
+ * frequency, limit use of AVX2 version to later
+ * plaforms, not all those that could theoretically
+ * run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx2;
+#endif
+ } else {
+ PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+ }
+ dev->tx_pkt_prepare = NULL;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
+ }
+}
+
+void __attribute__((cold))
+i40e_set_default_ptype_table(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
+ ad->ptype_tbl[i] = i40e_get_default_pkt_type(i);
+}
+
+void __attribute__((cold))
+i40e_set_default_pctype_table(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
+ ad->pctypes_tbl[i] = 0ULL;
+ ad->flow_types_mask = 0ULL;
+ ad->pctypes_mask = 0ULL;
+
+ ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV4] =
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV6] =
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ ad->pctypes_tbl[RTE_ETH_FLOW_L2_PAYLOAD] =
+ (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD);
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+ }
+
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
+ if (ad->pctypes_tbl[i])
+ ad->flow_types_mask |= (1ULL << i);
+ ad->pctypes_mask |= ad->pctypes_tbl[i];
+ }
+}
+
+/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+ return -1;
+}
+
+uint16_t __attribute__((weak))
+i40e_recv_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+i40e_recv_scattered_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
+{
+ return -1;
+}
+
+int __attribute__((weak))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return -1;
+}
+
+void __attribute__((weak))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
+{
+ return;
+}
+
+uint16_t __attribute__((weak))
+i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.h b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.h
new file mode 100644
index 00000000..3fc619af
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.h
@@ -0,0 +1,803 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef _I40E_RXTX_H_
+#define _I40E_RXTX_H_
+
+#define RTE_PMD_I40E_RX_MAX_BURST 32
+#define RTE_PMD_I40E_TX_MAX_BURST 32
+
+#define RTE_I40E_VPMD_RX_BURST 32
+#define RTE_I40E_VPMD_TX_BURST 32
+#define RTE_I40E_RXQ_REARM_THRESH 32
+#define RTE_I40E_MAX_RX_BURST RTE_I40E_RXQ_REARM_THRESH
+#define RTE_I40E_TX_MAX_FREE_BUF_SZ 64
+#define RTE_I40E_DESCS_PER_LOOP 4
+
+#define I40E_RXBUF_SZ_1024 1024
+#define I40E_RXBUF_SZ_2048 2048
+
+/* In none-PXE mode QLEN must be whole number of 32 descriptors. */
+#define I40E_ALIGN_RING_DESC 32
+
+#define I40E_MIN_RING_DESC 64
+#define I40E_MAX_RING_DESC 4096
+
+#define I40E_MIN_TSO_MSS 256
+#define I40E_MAX_TSO_MSS 9674
+
+#define I40E_TX_MAX_SEG UINT8_MAX
+#define I40E_TX_MAX_MTU_SEG 8
+
+#define I40E_TX_MIN_PKT_LEN 17
+
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
+ I40E_TX_DESC_CMD_EOP)
+
+enum i40e_header_split_mode {
+ i40e_header_split_none = 0,
+ i40e_header_split_enabled = 1,
+ i40e_header_split_always = 2,
+ i40e_header_split_reserved
+};
+
+#define I40E_HEADER_SPLIT_NONE ((uint8_t)0)
+#define I40E_HEADER_SPLIT_L2 ((uint8_t)(1 << 0))
+#define I40E_HEADER_SPLIT_IP ((uint8_t)(1 << 1))
+#define I40E_HEADER_SPLIT_UDP_TCP ((uint8_t)(1 << 2))
+#define I40E_HEADER_SPLIT_SCTP ((uint8_t)(1 << 3))
+#define I40E_HEADER_SPLIT_ALL (I40E_HEADER_SPLIT_L2 | \
+ I40E_HEADER_SPLIT_IP | \
+ I40E_HEADER_SPLIT_UDP_TCP | \
+ I40E_HEADER_SPLIT_SCTP)
+
+/* HW desc structure, both 16-byte and 32-byte types are supported */
+#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#define i40e_rx_desc i40e_16byte_rx_desc
+#else
+#define i40e_rx_desc i40e_32byte_rx_desc
+#endif
+
+struct i40e_rx_entry {
+ struct rte_mbuf *mbuf;
+};
+
+/*
+ * Structure associated with each RX queue.
+ */
+struct i40e_rx_queue {
+ struct rte_mempool *mp; /**< mbuf pool to populate RX ring */
+ volatile union i40e_rx_desc *rx_ring;/**< RX ring virtual address */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address */
+ struct i40e_rx_entry *sw_ring; /**< address of RX soft ring */
+ uint16_t nb_rx_desc; /**< number of RX descriptors */
+ uint16_t rx_free_thresh; /**< max free RX desc to hold */
+ uint16_t rx_tail; /**< current value of tail */
+ uint16_t nb_rx_hold; /**< number of held free RX desc */
+ struct rte_mbuf *pkt_first_seg; /**< first segment of current packet */
+ struct rte_mbuf *pkt_last_seg; /**< last segment of current packet */
+ struct rte_mbuf fake_mbuf; /**< dummy mbuf */
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ uint16_t rx_nb_avail; /**< number of staged packets ready */
+ uint16_t rx_next_avail; /**< index of next staged packets */
+ uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+ struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
+#endif
+
+ uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
+ uint16_t rxrearm_start; /**< the idx we start the re-arming from */
+ uint64_t mbuf_initializer; /**< value to init mbufs */
+
+ uint16_t port_id; /**< device port ID */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */
+ uint16_t queue_id; /**< RX queue index */
+ uint16_t reg_idx; /**< RX queue register index */
+ uint8_t drop_en; /**< if not 0, set register bit */
+ volatile uint8_t *qrx_tail; /**< register address of tail */
+ struct i40e_vsi *vsi; /**< the VSI this queue belongs to */
+ uint16_t rx_buf_len; /* The packet buffer size */
+ uint16_t rx_hdr_len; /* The header buffer size */
+ uint16_t max_pkt_len; /* Maximum packet length */
+ uint8_t hs_mode; /* Header Split mode */
+ bool q_set; /**< indicate if rx queue has been configured */
+ bool rx_deferred_start; /**< don't start this queue in dev start */
+ uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
+ uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
+};
+
+struct i40e_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint16_t next_id;
+ uint16_t last_id;
+};
+
+/*
+ * Structure associated with each TX queue.
+ */
+struct i40e_tx_queue {
+ uint16_t nb_tx_desc; /**< number of TX descriptors */
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address */
+ volatile struct i40e_tx_desc *tx_ring; /**< TX ring virtual address */
+ struct i40e_tx_entry *sw_ring; /**< virtual address of SW ring */
+ uint16_t tx_tail; /**< current value of tail register */
+ volatile uint8_t *qtx_tail; /**< register address of tail */
+ uint16_t nb_tx_used; /**< number of TX desc used since RS bit set */
+ /**< index to last TX descriptor to have been cleaned */
+ uint16_t last_desc_cleaned;
+ /**< Total number of TX descriptors ready to be allocated. */
+ uint16_t nb_tx_free;
+ /**< Start freeing TX buffers if there are less free descriptors than
+ this value. */
+ uint16_t tx_free_thresh;
+ /** Number of TX descriptors to use before RS bit is set. */
+ uint16_t tx_rs_thresh;
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold reg. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint16_t queue_id; /**< TX queue index. */
+ uint16_t reg_idx;
+ struct i40e_vsi *vsi; /**< the VSI this queue belongs to */
+ uint16_t tx_next_dd;
+ uint16_t tx_next_rs;
+ bool q_set; /**< indicate if tx queue has been configured */
+ bool tx_deferred_start; /**< don't start this queue in dev start */
+ uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
+};
+
+/** Offload features */
+union i40e_tx_offload {
+ uint64_t data;
+ struct {
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l4_len:8; /**< L4 Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size */
+ uint64_t outer_l2_len:8; /**< outer L2 Header Length */
+ uint64_t outer_l3_len:16; /**< outer L3 Header Length */
+ };
+};
+
+int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+const uint32_t *i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void i40e_dev_rx_queue_release(void *rxq);
+void i40e_dev_tx_queue_release(void *txq);
+uint16_t i40e_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t i40e_xmit_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t i40e_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int i40e_tx_queue_init(struct i40e_tx_queue *txq);
+int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
+void i40e_free_tx_resources(struct i40e_tx_queue *txq);
+void i40e_free_rx_resources(struct i40e_rx_queue *rxq);
+void i40e_dev_clear_queues(struct rte_eth_dev *dev);
+void i40e_dev_free_queues(struct rte_eth_dev *dev);
+void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);
+void i40e_reset_tx_queue(struct i40e_tx_queue *txq);
+void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
+int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
+void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
+
+uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
+uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
+int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
+int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
+void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
+uint16_t i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+void i40e_set_rx_function(struct rte_eth_dev *dev);
+void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
+ struct i40e_tx_queue *txq);
+void i40e_set_tx_function(struct rte_eth_dev *dev);
+void i40e_set_default_ptype_table(struct rte_eth_dev *dev);
+void i40e_set_default_pctype_table(struct rte_eth_dev *dev);
+uint16_t i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts_vec_avx2(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+/* For each value it means, datasheet of hardware can tell more details
+ *
+ * @note: fix i40e_dev_supported_ptypes_get() if any change here.
+ */
+static inline uint32_t
+i40e_get_default_pkt_type(uint8_t ptype)
+{
+ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
+ /* L2 types */
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
+ /* [3] - [5] reserved */
+ [6] = RTE_PTYPE_L2_ETHER_LLDP,
+ /* [7] - [10] reserved */
+ [11] = RTE_PTYPE_L2_ETHER_ARP,
+ /* [12] - [21] reserved */
+
+ /* Non tunneled IPv4 */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* IPv4 --> IPv4 */
+ [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [32] reserved */
+ [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> IPv6 */
+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [39] reserved */
+ [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN */
+ [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
+ [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [47] reserved */
+ [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
+ [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [54] reserved */
+ [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
+ [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [62] reserved */
+ [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [69] reserved */
+ [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
+ [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
+ [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [77] reserved */
+ [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
+ [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [84] reserved */
+ [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* Non tunneled IPv6 */
+ [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [91] reserved */
+ [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* IPv6 --> IPv4 */
+ [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [98] reserved */
+ [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> IPv6 */
+ [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [105] reserved */
+ [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN */
+ [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
+ [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [113] reserved */
+ [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
+ [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [120] reserved */
+ [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
+ [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [128] reserved */
+ [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [135] reserved */
+ [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
+ [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
+ [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [143] reserved */
+ [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
+ [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [150] reserved */
+ [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* L2 NSH packet type */
+ [154] = RTE_PTYPE_L2_ETHER_NSH,
+ [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* All others reserved */
+ };
+
+ return type_table[ptype];
+}
+
+#endif /* _I40E_RXTX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c
new file mode 100644
index 00000000..f3fc8267
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -0,0 +1,645 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 IBM Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <altivec.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+
+ vector unsigned long hdr_room = (vector unsigned long){
+ RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM};
+ vector unsigned long dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = (vector unsigned long){};
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ vec_st(dma_addr0, 0,
+ (vector unsigned long *)&rxdp[i].read);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ vector unsigned long vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
+ vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = vec_mergel(vaddr0, vaddr0);
+ dma_addr1 = vec_mergel(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = vec_add(dma_addr0, hdr_room);
+ dma_addr1 = vec_add(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read);
+ vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+static inline void
+desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+{
+ vector unsigned int vlan0, vlan1, rss, l3_l4e;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const vector unsigned int rss_vlan_msk = (vector unsigned int){
+ (int32_t)0x1c03804, (int32_t)0x1c03804,
+ (int32_t)0x1c03804, (int32_t)0x1c03804};
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const vector unsigned char vlan_flags = (vector unsigned char){
+ 0, 0, 0, 0,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const vector unsigned char rss_flags = (vector unsigned char){
+ 0, PKT_RX_FDIR, 0, 0,
+ 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const vector unsigned char l3_l4e_flags = (vector unsigned char){
+ 0,
+ PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+ | PKT_RX_IP_CKSUM_BAD,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
+ vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]);
+ vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1);
+
+ vlan1 = vec_and(vlan0, rss_vlan_msk);
+ vlan0 = (vector unsigned int)vec_perm(vlan_flags,
+ (vector unsigned char){},
+ *(vector unsigned char *)&vlan1);
+
+ rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11});
+ rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){},
+ *(vector unsigned char *)&rss);
+
+ l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22});
+ l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags,
+ (vector unsigned char){},
+ *(vector unsigned char *)&l3_l4e);
+
+ vlan0 = vec_or(vlan0, rss);
+ vlan0 = vec_or(vlan0, l3_l4e);
+
+ rx_pkts[0]->ol_flags = (uint64_t)vlan0[2];
+ rx_pkts[1]->ol_flags = (uint64_t)vlan0[3];
+ rx_pkts[2]->ol_flags = (uint64_t)vlan0[0];
+ rx_pkts[3]->ol_flags = (uint64_t)vlan0[1];
+}
+
+#define PKTLEN_SHIFT 10
+
+static inline void
+desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
+{
+ vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
+ vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
+
+ ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
+ ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
+
+ rx_pkts[0]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype0)[0]];
+ rx_pkts[1]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype0)[8]];
+ rx_pkts[2]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype1)[0]];
+ rx_pkts[3]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype1)[8]];
+}
+
+ /* Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ vector unsigned char shuf_msk;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ vector unsigned short crc_adjust = (vector unsigned short){
+ 0, 0, /* ignore pkt_type field */
+ rxq->crc_len, /* sub crc on pkt_len */
+ 0, /* ignore high-16bits of pkt_len */
+ rxq->crc_len, /* sub crc on data_len */
+ 0, 0, 0 /* ignore non-length fields */
+ };
+ vector unsigned long dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = (vector unsigned long){0x0000000100000001ULL,
+ 0x0000000100000001ULL};
+
+ /* 4 packets EOP mask */
+ eop_check = (vector unsigned long){0x0000000200000002ULL,
+ 0x0000000200000002ULL};
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = (vector unsigned char){
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 14, 15, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 14, 15, /* octet 15~14, 16 bits data_len */
+ 2, 3, /* octet 2~3, low 16 bits vlan_macip */
+ 4, 5, 6, 7 /* octet 4~7, 32bits rss */
+ };
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
+ vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
+ vector unsigned long mbp1, mbp2; /* two mbuf pointer
+ * in one XMM reg.
+ */
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = *(vector unsigned long *)&sw_ring[pos];
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = *(vector unsigned long *)(rxdp + 3);
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ *(vector unsigned long *)&rx_pkts[pos] = mbp1;
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
+
+ descs[2] = *(vector unsigned long *)(rxdp + 2);
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs[1] = *(vector unsigned long *)(rxdp + 1);
+ rte_compiler_barrier();
+ descs[0] = *(vector unsigned long *)(rxdp);
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ *(vector unsigned long *)&rx_pkts[pos + 2] = mbp2;
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ const vector unsigned int len3 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[3]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ const vector unsigned int len2 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[2]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ /* merge the now-aligned packet length fields back in */
+ descs[3] = (vector unsigned long)len3;
+ descs[2] = (vector unsigned long)len2;
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = vec_perm((vector unsigned char)descs[3],
+ (vector unsigned char){}, shuf_msk);
+ pkt_mb3 = vec_perm((vector unsigned char)descs[2],
+ (vector unsigned char){}, shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = vec_mergel((vector unsigned short)descs[3],
+ (vector unsigned short)descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = vec_mergel((vector unsigned short)descs[1],
+ (vector unsigned short)descs[0]);
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb4, crc_adjust);
+ pkt_mb3 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb3, crc_adjust);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ const vector unsigned int len1 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[1]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+ const vector unsigned int len0 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[0]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ /* merge the now-aligned packet length fields back in */
+ descs[1] = (vector unsigned long)len1;
+ descs[0] = (vector unsigned long)len0;
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = vec_perm((vector unsigned char)descs[1],
+ (vector unsigned char){}, shuf_msk);
+ pkt_mb1 = vec_perm((vector unsigned char)descs[0],
+ (vector unsigned char){}, shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ staterr = (vector unsigned short)vec_mergeh(
+ sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ vec_st(pkt_mb4, 0,
+ (vector unsigned char *)&rx_pkts[pos + 3]
+ ->rx_descriptor_fields1
+ );
+ vec_st(pkt_mb3, 0,
+ (vector unsigned char *)&rx_pkts[pos + 2]
+ ->rx_descriptor_fields1
+ );
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb2, crc_adjust);
+ pkt_mb1 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ vector unsigned char eop_shuf_mask =
+ (vector unsigned char){
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ };
+
+ /* and with mask to extract bits, flipping 1-0 */
+ vector unsigned char eop_bits = vec_and(
+ (vector unsigned char)vec_nor(staterr, staterr),
+ (vector unsigned char)eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = vec_perm(eop_bits, (vector unsigned char){},
+ eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *split_packet = (vec_ld(0,
+ (vector unsigned int *)&eop_bits))[0];
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = vec_and(staterr, (vector unsigned short)dd_check);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ vec_st(pkt_mb2, 0,
+ (vector unsigned char *)&rx_pkts[pos + 1]
+ ->rx_descriptor_fields1
+ );
+ vec_st(pkt_mb1, 0,
+ (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
+ );
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll((vec_ld(0,
+ (vector unsigned long *)&staterr)[0]));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /* Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ vector unsigned long descriptor = (vector unsigned long){
+ pkt->buf_iova + pkt->data_off, high_qw};
+ *(vector unsigned long *)txdp = descriptor;
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ nb_commit = nb_pkts;
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ _i40e_rx_queue_release_mbufs_vec(rxq);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ return i40e_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
+{
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ return i40e_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c
new file mode 100644
index 00000000..23179b3b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c
@@ -0,0 +1,792 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <x86intrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ __m128i dma_addr0;
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ struct rte_mbuf *mb0, *mb1;
+ __m128i dma_addr0, dma_addr1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ __m128i vaddr0, vaddr1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+#else
+ struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
+ __m256i dma_addr0_1, dma_addr2_3;
+ __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM);
+ /* Initialize the mbufs in vector, process 4 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH;
+ i += 4, rxep += 4, rxdp += 4) {
+ __m128i vaddr0, vaddr1, vaddr2, vaddr3;
+ __m256i vaddr0_1, vaddr2_3;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+ mb2 = rxep[2].mbuf;
+ mb3 = rxep[3].mbuf;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+ vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
+ vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
+
+ /*
+ * merge 0 & 1, by casting 0 to 256-bit and inserting 1
+ * into the high lanes. Similarly for 2 & 3
+ */
+ vaddr0_1 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(vaddr0), vaddr1, 1);
+ vaddr2_3 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(vaddr2), vaddr3, 1);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1);
+ dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3);
+
+ /* add headroom to pa values */
+ dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room);
+ dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1);
+ _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3);
+ }
+
+#endif
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+#define PKTLEN_SHIFT 10
+
+static inline uint16_t
+_recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+#define RTE_I40E_DESCS_PER_LOOP_AVX 8
+
+ const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
+ 0, rxq->mbuf_initializer);
+ struct i40e_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
+ volatile union i40e_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
+ const int avx_aligned = ((rxq->rx_tail & 1) == 0);
+ rte_prefetch0(rxdp);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP_AVX */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP_AVX);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* constants used in processing loop */
+ const __m256i crc_adjust = _mm256_set_epi16(
+ /* first descriptor */
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0, /* ignore pkt_type field */
+ /* second descriptor */
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+
+ /* 8 packets DD mask, LSB in each 32-bit value */
+ const __m256i dd_check = _mm256_set1_epi32(1);
+
+ /* 8 packets EOP mask, second-LSB in each 32-bit value */
+ const __m256i eop_check = _mm256_slli_epi32(dd_check,
+ I40E_RX_DESC_STATUS_EOF_SHIFT);
+
+ /* mask to shuffle from desc. to mbuf (2 descriptors)*/
+ const __m256i shuf_msk = _mm256_set_epi8(
+ /* first descriptor */
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /*pkt_type set as unknown */
+ /* second descriptor */
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF /*pkt_type set as unknown */
+ );
+ /*
+ * compile-time check the above crc and shuffle layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi
+ * calls above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+ /* Status/Error flag masks */
+ /*
+ * mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication. Bits 3-5 of error
+ * field (bits 22-24) are for IP/L4 checksum errors
+ */
+ const __m256i flags_mask = _mm256_set1_epi32(
+ (1 << 2) | (1 << 11) | (3 << 12) | (7 << 22));
+ /*
+ * data to be shuffled by result of flag mask. If VLAN bit is set,
+ * (bit 2), then position 4 in this array will be used in the
+ * destination
+ */
+ const __m256i vlan_flags_shuf = _mm256_set_epi32(
+ 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
+ 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
+ /*
+ * data to be shuffled by result of flag mask, shifted down 11.
+ * If RSS/FDIR bits are set, shuffle moves appropriate flags in
+ * place.
+ */
+ const __m256i rss_flags_shuf = _mm256_set_epi8(
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
+ 0, 0, PKT_RX_FDIR, 0, /* end up 128-bits */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
+ 0, 0, PKT_RX_FDIR, 0);
+
+ /*
+ * data to be shuffled by the result of the flags mask shifted by 22
+ * bits. This gives use the l3_l4 flags.
+ */
+ const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ /* shift right 1 bit to make sure it not exceed 255 */
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
+ /* second 128-bits */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+
+ const __m256i cksum_mask = _mm256_set1_epi32(
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD);
+
+ RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
+
+ uint16_t i, received;
+ for (i = 0, received = 0; i < nb_pkts;
+ i += RTE_I40E_DESCS_PER_LOOP_AVX,
+ rxdp += RTE_I40E_DESCS_PER_LOOP_AVX) {
+ /* step 1, copy over 8 mbuf pointers to rx_pkts array */
+ _mm256_storeu_si256((void *)&rx_pkts[i],
+ _mm256_loadu_si256((void *)&sw_ring[i]));
+#ifdef RTE_ARCH_X86_64
+ _mm256_storeu_si256((void *)&rx_pkts[i + 4],
+ _mm256_loadu_si256((void *)&sw_ring[i + 4]));
+#endif
+
+ __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
+#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ /* for AVX we need alignment otherwise loads are not atomic */
+ if (avx_aligned) {
+ /* load in descriptors, 2 at a time, in reverse order */
+ raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
+ rte_compiler_barrier();
+ raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
+ rte_compiler_barrier();
+ raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
+ rte_compiler_barrier();
+ raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
+ } else
+#endif
+ do {
+ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7));
+ rte_compiler_barrier();
+ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6));
+ rte_compiler_barrier();
+ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5));
+ rte_compiler_barrier();
+ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4));
+ rte_compiler_barrier();
+ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3));
+ rte_compiler_barrier();
+ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2));
+ rte_compiler_barrier();
+ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1));
+ rte_compiler_barrier();
+ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0));
+
+ raw_desc6_7 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(raw_desc6), raw_desc7, 1);
+ raw_desc4_5 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(raw_desc4), raw_desc5, 1);
+ raw_desc2_3 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(raw_desc2), raw_desc3, 1);
+ raw_desc0_1 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(raw_desc0), raw_desc1, 1);
+ } while (0);
+
+ if (split_packet) {
+ int j;
+ for (j = 0; j < RTE_I40E_DESCS_PER_LOOP_AVX; j++)
+ rte_mbuf_prefetch_part2(rx_pkts[i + j]);
+ }
+
+ /*
+ * convert descriptors 4-7 into mbufs, adjusting length and
+ * re-arranging fields. Then write into the mbuf
+ */
+ const __m256i len6_7 = _mm256_slli_epi32(raw_desc6_7, PKTLEN_SHIFT);
+ const __m256i len4_5 = _mm256_slli_epi32(raw_desc4_5, PKTLEN_SHIFT);
+ const __m256i desc6_7 = _mm256_blend_epi16(raw_desc6_7, len6_7, 0x80);
+ const __m256i desc4_5 = _mm256_blend_epi16(raw_desc4_5, len4_5, 0x80);
+ __m256i mb6_7 = _mm256_shuffle_epi8(desc6_7, shuf_msk);
+ __m256i mb4_5 = _mm256_shuffle_epi8(desc4_5, shuf_msk);
+ mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
+ mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
+ /*
+ * to get packet types, shift 64-bit values down 30 bits
+ * and so ptype is in lower 8-bits in each
+ */
+ const __m256i ptypes6_7 = _mm256_srli_epi64(desc6_7, 30);
+ const __m256i ptypes4_5 = _mm256_srli_epi64(desc4_5, 30);
+ const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24);
+ const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8);
+ const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24);
+ const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8);
+ mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype7], 4);
+ mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype6], 0);
+ mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype5], 4);
+ mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype4], 0);
+ /* merge the status bits into one register */
+ const __m256i status4_7 = _mm256_unpackhi_epi32(desc6_7,
+ desc4_5);
+
+ /*
+ * convert descriptors 0-3 into mbufs, adjusting length and
+ * re-arranging fields. Then write into the mbuf
+ */
+ const __m256i len2_3 = _mm256_slli_epi32(raw_desc2_3, PKTLEN_SHIFT);
+ const __m256i len0_1 = _mm256_slli_epi32(raw_desc0_1, PKTLEN_SHIFT);
+ const __m256i desc2_3 = _mm256_blend_epi16(raw_desc2_3, len2_3, 0x80);
+ const __m256i desc0_1 = _mm256_blend_epi16(raw_desc0_1, len0_1, 0x80);
+ __m256i mb2_3 = _mm256_shuffle_epi8(desc2_3, shuf_msk);
+ __m256i mb0_1 = _mm256_shuffle_epi8(desc0_1, shuf_msk);
+ mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
+ mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
+ /* get the packet types */
+ const __m256i ptypes2_3 = _mm256_srli_epi64(desc2_3, 30);
+ const __m256i ptypes0_1 = _mm256_srli_epi64(desc0_1, 30);
+ const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24);
+ const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8);
+ const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24);
+ const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8);
+ mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype3], 4);
+ mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype2], 0);
+ mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype1], 4);
+ mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype0], 0);
+ /* merge the status bits into one register */
+ const __m256i status0_3 = _mm256_unpackhi_epi32(desc2_3,
+ desc0_1);
+
+ /*
+ * take the two sets of status bits and merge to one
+ * After merge, the packets status flags are in the
+ * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
+ */
+ __m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
+ status0_3);
+
+ /* now do flag manipulation */
+
+ /* get only flag/error bits we want */
+ const __m256i flag_bits = _mm256_and_si256(
+ status0_7, flags_mask);
+ /* set vlan and rss flags */
+ const __m256i vlan_flags = _mm256_shuffle_epi8(
+ vlan_flags_shuf, flag_bits);
+ const __m256i rss_flags = _mm256_shuffle_epi8(
+ rss_flags_shuf, _mm256_srli_epi32(flag_bits, 11));
+ /*
+ * l3_l4_error flags, shuffle, then shift to correct adjustment
+ * of flags in flags_shuf, and finally mask out extra bits
+ */
+ __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
+ _mm256_srli_epi32(flag_bits, 22));
+ l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
+ l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+
+ /* merge flags */
+ const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+ _mm256_or_si256(rss_flags, vlan_flags));
+ /*
+ * At this point, we have the 8 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write. However, we can also
+ * add in the previously computed rx_descriptor fields to
+ * make a single 256-bit write per mbuf
+ */
+ /* check the structure matches expectations */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+ /* build up data and do writes */
+ __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
+ rearm6, rearm7;
+ rearm6 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(mbuf_flags, 8), 0x04);
+ rearm4 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(mbuf_flags, 4), 0x04);
+ rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
+ rearm0 = _mm256_blend_epi32(mbuf_init, _mm256_srli_si256(mbuf_flags, 4), 0x04);
+ /* permute to add in the rx_descriptor e.g. rss fields */
+ rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
+ rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
+ rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
+ rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
+ /* write to mbuf */
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, rearm6);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, rearm4);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, rearm2);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, rearm0);
+
+ /* repeat for the odd mbufs */
+ const __m256i odd_flags = _mm256_castsi128_si256(
+ _mm256_extracti128_si256(mbuf_flags, 1));
+ rearm7 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(odd_flags, 8), 0x04);
+ rearm5 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(odd_flags, 4), 0x04);
+ rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
+ rearm1 = _mm256_blend_epi32(mbuf_init, _mm256_srli_si256(odd_flags, 4), 0x04);
+ /* since odd mbufs are already in hi 128-bits use blend */
+ rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
+ rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
+ rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
+ rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
+ /* again write to mbufs */
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, rearm7);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, rearm5);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, rearm3);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, rearm1);
+
+ /* extract and record EOP bit */
+ if (split_packet) {
+ const __m128i eop_mask = _mm_set1_epi16(
+ 1 << I40E_RX_DESC_STATUS_EOF_SHIFT);
+ const __m256i eop_bits256 = _mm256_and_si256(status0_7,
+ eop_check);
+ /* pack status bits into a single 128-bit register */
+ const __m128i eop_bits = _mm_packus_epi32(
+ _mm256_castsi256_si128(eop_bits256),
+ _mm256_extractf128_si256(eop_bits256, 1));
+ /*
+ * flip bits, and mask out the EOP bit, which is now
+ * a split-packet bit i.e. !EOP, rather than EOP one.
+ */
+ __m128i split_bits = _mm_andnot_si128(eop_bits,
+ eop_mask);
+ /*
+ * eop bits are out of order, so we need to shuffle them
+ * back into order again. In doing so, only use low 8
+ * bits, which acts like another pack instruction
+ * The original order is (hi->lo): 1,3,5,7,0,2,4,6
+ * [Since we use epi8, the 16-bit positions are
+ * multiplied by 2 in the eop_shuffle value.]
+ */
+ __m128i eop_shuffle = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF, /* zero hi 64b */
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 8, 0, 10, 2, /* move values to lo 64b */
+ 12, 4, 14, 6);
+ split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
+ *(uint64_t *)split_packet = _mm_cvtsi128_si64(split_bits);
+ split_packet += RTE_I40E_DESCS_PER_LOOP_AVX;
+ }
+
+ /* perform dd_check */
+ status0_7 = _mm256_and_si256(status0_7, dd_check);
+ status0_7 = _mm256_packs_epi32(status0_7,
+ _mm256_setzero_si256());
+
+ uint64_t burst = __builtin_popcountll(_mm_cvtsi128_si64(
+ _mm256_extracti128_si256(status0_7, 1)));
+ burst += __builtin_popcountll(_mm_cvtsi128_si64(
+ _mm256_castsi256_si128(status0_7)));
+ received += burst;
+ if (burst != RTE_I40E_DESCS_PER_LOOP_AVX)
+ break;
+ }
+
+ /* update tail pointers */
+ rxq->rx_tail += received;
+ rxq->rx_tail &= (rxq->nb_rx_desc - 1);
+ if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
+ rxq->rx_tail--;
+ received--;
+ }
+ rxq->rxrearm_nb += received;
+ return received;
+}
+
+/*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+/*
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+i40e_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+/*
+ * vPMD receive routine that reassembles scattered packets.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t retval = 0;
+ while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
+ uint16_t burst = i40e_recv_scattered_burst_vec_avx2(rx_queue,
+ rx_pkts + retval, RTE_I40E_VPMD_RX_BURST);
+ retval += burst;
+ nb_pkts -= burst;
+ if (burst < RTE_I40E_VPMD_RX_BURST)
+ return retval;
+ }
+ return retval + i40e_recv_scattered_burst_vec_avx2(rx_queue,
+ rx_pkts + retval, nb_pkts);
+}
+
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_physaddr + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ const uint64_t hi_qw_tmpl = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT));
+
+ /* if unaligned on 32-bit boundary, do one to align */
+ if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
+ vtx1(txdp, *pkt, flags);
+ nb_pkts--, txdp++, pkt++;
+ }
+
+ /* do two at a time while possible, in bursts */
+ for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
+ uint64_t hi_qw3 = hi_qw_tmpl |
+ ((uint64_t)pkt[3]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
+ uint64_t hi_qw2 = hi_qw_tmpl |
+ ((uint64_t)pkt[2]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
+ uint64_t hi_qw1 = hi_qw_tmpl |
+ ((uint64_t)pkt[1]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
+ uint64_t hi_qw0 = hi_qw_tmpl |
+ ((uint64_t)pkt[0]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
+
+ __m256i desc2_3 = _mm256_set_epi64x(
+ hi_qw3, pkt[3]->buf_physaddr + pkt[3]->data_off,
+ hi_qw2, pkt[2]->buf_physaddr + pkt[2]->data_off);
+ __m256i desc0_1 = _mm256_set_epi64x(
+ hi_qw1, pkt[1]->buf_physaddr + pkt[1]->data_off,
+ hi_qw0, pkt[0]->buf_physaddr + pkt[0]->data_off);
+ _mm256_store_si256((void *)(txdp + 2), desc2_3);
+ _mm256_store_si256((void *)txdp, desc0_1);
+ }
+
+ /* do any last ones */
+ while (nb_pkts) {
+ vtx1(txdp, *pkt, flags);
+ txdp++, pkt++, nb_pkts--;
+ }
+}
+
+static inline uint16_t
+i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ vtx(txdp, tx_pkts, n - 1, flags);
+ tx_pkts += (n - 1);
+ txdp += (n - 1);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+uint16_t
+i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = i40e_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h
new file mode 100644
index 00000000..63cb1774
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef _I40E_RXTX_VEC_COMMON_H_
+#define _I40E_RXTX_VEC_COMMON_H_
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+static inline uint16_t
+reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len)
+ end->data_len -= rxq->crc_len;
+ else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ start->nb_segs--;
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+static __rte_always_inline int
+i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txep;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bits on threshold descriptor */
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+ m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free,
+ nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static __rte_always_inline void
+tx_backlog_entry(struct i40e_tx_entry *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+static inline void
+_i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_rx_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ }
+ } else {
+ for (i = rxq->rx_tail;
+ i != rxq->rxrearm_start;
+ i = (i + 1) & mask) {
+ if (rxq->sw_ring[i].mbuf != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ }
+ }
+
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static inline int
+i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+static inline int
+i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /* - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+ return -1;
+
+ /* no QinQ support */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
+#endif
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c
new file mode 100644
index 00000000..83572ef8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -0,0 +1,597 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation.
+ * Copyright(c) 2016-2018, Linaro Limited.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <arm_neon.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ uint64x2_t dma_addr0, dma_addr1;
+ uint64x2_t zero = vdupq_n_u64(0);
+ uint64_t paddr;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (unlikely(rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0)) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ vst1q_u64((uint64_t *)&rxdp[i].read, zero);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
+ dma_addr0 = vdupq_n_u64(paddr);
+
+ /* flush desc with pa dma_addr */
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
+
+ paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
+ dma_addr1 = vdupq_n_u64(paddr);
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+static inline void
+desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4],
+ struct rte_mbuf **rx_pkts)
+{
+ uint32x4_t vlan0, vlan1, rss, l3_l4e;
+ const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};
+ uint64x2_t rearm0, rearm1, rearm2, rearm3;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const uint32x4_t rss_vlan_msk = {
+ 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
+
+ const uint32x4_t cksum_mask = {
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD};
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const uint8x16_t vlan_flags = {
+ 0, 0, 0, 0,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const uint8x16_t rss_flags = {
+ 0, PKT_RX_FDIR, 0, 0,
+ 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const uint8x16_t l3_l4e_flags = {
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
+ vreinterpretq_u32_u64(descs[2])).val[1];
+ vlan1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
+ vreinterpretq_u32_u64(descs[3])).val[1];
+ vlan0 = vzipq_u32(vlan0, vlan1).val[0];
+
+ vlan1 = vandq_u32(vlan0, rss_vlan_msk);
+ vlan0 = vreinterpretq_u32_u8(vqtbl1q_u8(vlan_flags,
+ vreinterpretq_u8_u32(vlan1)));
+
+ rss = vshrq_n_u32(vlan1, 11);
+ rss = vreinterpretq_u32_u8(vqtbl1q_u8(rss_flags,
+ vreinterpretq_u8_u32(rss)));
+
+ l3_l4e = vshrq_n_u32(vlan1, 22);
+ l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags,
+ vreinterpretq_u8_u32(l3_l4e)));
+ /* then we shift left 1 bit */
+ l3_l4e = vshlq_n_u32(l3_l4e, 1);
+ /* we need to mask out the reduntant bits */
+ l3_l4e = vandq_u32(l3_l4e, cksum_mask);
+
+ vlan0 = vorrq_u32(vlan0, rss);
+ vlan0 = vorrq_u32(vlan0, l3_l4e);
+
+ rearm0 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 0), mbuf_init, 1);
+ rearm1 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 1), mbuf_init, 1);
+ rearm2 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 2), mbuf_init, 1);
+ rearm3 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 3), mbuf_init, 1);
+
+ vst1q_u64((uint64_t *)&rx_pkts[0]->rearm_data, rearm0);
+ vst1q_u64((uint64_t *)&rx_pkts[1]->rearm_data, rearm1);
+ vst1q_u64((uint64_t *)&rx_pkts[2]->rearm_data, rearm2);
+ vst1q_u64((uint64_t *)&rx_pkts[3]->rearm_data, rearm3);
+}
+
+#define PKTLEN_SHIFT 10
+#define I40E_UINT16_BIT (CHAR_BIT * sizeof(uint16_t))
+
+static inline void
+desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
+{
+ int i;
+ uint8_t ptype;
+ uint8x16_t tmp;
+
+ for (i = 0; i < 4; i++) {
+ tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
+ ptype = vgetq_lane_u8(tmp, 8);
+ rx_pkts[i]->packet_type = ptype_tbl[ptype];
+ }
+
+}
+
+ /*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ /* mask to shuffle from desc. to mbuf */
+ uint8x16_t shuf_msk = {
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 14, 15, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 14, 15, /* octet 15~14, 16 bits data_len */
+ 2, 3, /* octet 2~3, low 16 bits vlan_macip */
+ 4, 5, 6, 7 /* octet 4~7, 32bits rss */
+ };
+
+ uint8x16_t eop_check = {
+ 0x02, 0x00, 0x02, 0x00,
+ 0x02, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+ };
+
+ uint16x8_t crc_adjust = {
+ 0, 0, /* ignore pkt_type field */
+ rxq->crc_len, /* sub crc on pkt_len */
+ 0, /* ignore high-16bits of pkt_len */
+ rxq->crc_len, /* sub crc on data_len */
+ 0, 0, 0 /* ignore non-length fields */
+ };
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch_non_temporal(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ uint64x2_t descs[RTE_I40E_DESCS_PER_LOOP];
+ uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ uint16x8x2_t sterr_tmp1, sterr_tmp2;
+ uint64x2_t mbp1, mbp2;
+ uint16x8_t staterr;
+ uint16x8_t tmp;
+ uint64_t stat;
+
+ int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT};
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
+ rte_rmb();
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
+
+ descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
+ /* B.1 load 2 mbuf point */
+ descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
+ descs[0] = vld1q_u64((uint64_t *)(rxdp));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]),
+ len_shl);
+ descs[3] = vreinterpretq_u64_u32(len3);
+ uint32x4_t len2 = vshlq_u32(vreinterpretq_u32_u64(descs[2]),
+ len_shl);
+ descs[2] = vreinterpretq_u64_u32(len2);
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
+ pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = vzipq_u16(vreinterpretq_u16_u64(descs[1]),
+ vreinterpretq_u16_u64(descs[3]));
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = vzipq_u16(vreinterpretq_u16_u64(descs[0]),
+ vreinterpretq_u16_u64(descs[2]));
+
+ /* C.2 get 4 pkts staterr value */
+ staterr = vzipq_u16(sterr_tmp1.val[1],
+ sterr_tmp2.val[1]).val[0];
+
+ desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
+ pkt_mb4 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
+ pkt_mb3 = vreinterpretq_u8_u16(tmp);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]),
+ len_shl);
+ descs[1] = vreinterpretq_u64_u32(len1);
+ uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]),
+ len_shl);
+ descs[0] = vreinterpretq_u64_u32(len0);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
+ pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+ pkt_mb4);
+ vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
+ pkt_mb2 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
+ pkt_mb1 = vreinterpretq_u8_u16(tmp);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ uint8x16_t eop_shuf_mask = {
+ 0x00, 0x02, 0x04, 0x06,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF};
+ uint8x16_t eop_bits;
+
+ /* and with mask to extract bits, flipping 1-0 */
+ eop_bits = vmvnq_u8(vreinterpretq_u8_u16(staterr));
+ eop_bits = vandq_u8(eop_bits, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = vqtbl1q_u8(eop_bits, eop_shuf_mask);
+
+ /* store the resulting 32-bit value */
+ vst1q_lane_u32((uint32_t *)split_packet,
+ vreinterpretq_u32_u8(eop_bits), 0);
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ staterr = vshlq_n_u16(staterr, I40E_UINT16_BIT - 1);
+ staterr = vreinterpretq_u16_s16(
+ vshrq_n_s16(vreinterpretq_s16_u16(staterr),
+ I40E_UINT16_BIT - 1));
+ stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
+
+ rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+ pkt_mb2);
+ vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+ /* C.4 calc avaialbe number of desc */
+ if (unlikely(stat == 0)) {
+ nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP;
+ } else {
+ nb_pkts_recd += __builtin_ctzl(stat) / I40E_UINT16_BIT;
+ break;
+ }
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned i = 0;
+
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ uint64x2_t descriptor = {pkt->buf_iova + pkt->data_off, high_qw};
+ vst1q_u64((uint64_t *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ _i40e_rx_queue_release_mbufs_vec(rxq);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ return i40e_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ return i40e_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c
new file mode 100644
index 00000000..3b22588c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -0,0 +1,626 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ __m128i dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ __m128i vaddr0, vaddr1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+static inline void
+desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4],
+ struct rte_mbuf **rx_pkts)
+{
+ const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+ __m128i rearm0, rearm1, rearm2, rearm3;
+
+ __m128i vlan0, vlan1, rss, l3_l4e;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const __m128i rss_vlan_msk = _mm_set_epi32(
+ 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
+
+ const __m128i cksum_mask = _mm_set_epi32(
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD);
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ 0, 0, 0, 0);
+
+ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
+ 0, 0, PKT_RX_FDIR, 0);
+
+ const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ /* shift right 1 bit to make sure it not exceed 255 */
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+
+ vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
+ vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
+ vlan0 = _mm_unpacklo_epi64(vlan0, vlan1);
+
+ vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
+ vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
+
+ rss = _mm_srli_epi32(vlan1, 11);
+ rss = _mm_shuffle_epi8(rss_flags, rss);
+
+ l3_l4e = _mm_srli_epi32(vlan1, 22);
+ l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
+ /* then we shift left 1 bit */
+ l3_l4e = _mm_slli_epi32(l3_l4e, 1);
+ /* we need to mask out the reduntant bits */
+ l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
+
+ vlan0 = _mm_or_si128(vlan0, rss);
+ vlan0 = _mm_or_si128(vlan0, l3_l4e);
+
+ /*
+ * At this point, we have the 4 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
+
+ /* write the rearm data and the olflags in one write */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+}
+
+#define PKTLEN_SHIFT 10
+
+static inline void
+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
+{
+ __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
+ __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
+
+ ptype0 = _mm_srli_epi64(ptype0, 30);
+ ptype1 = _mm_srli_epi64(ptype1, 30);
+
+ rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 0)];
+ rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 8)];
+ rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 0)];
+ rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 8)];
+}
+
+ /*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ __m128i shuf_msk;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ __m128i crc_adjust = _mm_set_epi16(
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+ /*
+ * compile-time check the above crc_adjust layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi16
+ * call above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ __m128i dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF /*pkt_type set as unknown */
+ );
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ __m128i descs[RTE_I40E_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
+
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
+ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
+ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
+
+ descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ rte_compiler_barrier();
+ descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT);
+ const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT);
+
+ /* merge the now-aligned packet length fields back in */
+ descs[3] = _mm_blend_epi16(descs[3], len3, 0x80);
+ descs[2] = _mm_blend_epi16(descs[2], len2, 0x80);
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+ desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+ pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT);
+ const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT);
+
+ /* merge the now-aligned packet length fields back in */
+ descs[1] = _mm_blend_epi16(descs[1], len1, 0x80);
+ descs[0] = _mm_blend_epi16(descs[0], len0, 0x80);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned i = 0;
+
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_iova + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ _i40e_rx_queue_release_mbufs_vec(rxq);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ return i40e_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ return i40e_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_tm.c b/src/spdk/dpdk/drivers/net/i40e/i40e_tm.c
new file mode 100644
index 00000000..c76760c9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_tm.c
@@ -0,0 +1,971 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+
+static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+static int i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+static int i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error);
+static int i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error);
+static int i40e_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+static int i40e_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+static int i40e_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
+
+const struct rte_tm_ops i40e_tm_ops = {
+ .capabilities_get = i40e_tm_capabilities_get,
+ .shaper_profile_add = i40e_shaper_profile_add,
+ .shaper_profile_delete = i40e_shaper_profile_del,
+ .node_add = i40e_node_add,
+ .node_delete = i40e_node_delete,
+ .node_type_get = i40e_node_type_get,
+ .level_capabilities_get = i40e_level_capabilities_get,
+ .node_capabilities_get = i40e_node_capabilities_get,
+ .hierarchy_commit = i40e_hierarchy_commit,
+};
+
+int
+i40e_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+ void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ *(const void **)arg = &i40e_tm_ops;
+
+ return 0;
+}
+
+void
+i40e_tm_conf_init(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ /* initialize shaper profile list */
+ TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
+
+ /* initialize node configuration */
+ pf->tm_conf.root = NULL;
+ TAILQ_INIT(&pf->tm_conf.tc_list);
+ TAILQ_INIT(&pf->tm_conf.queue_list);
+ pf->tm_conf.nb_tc_node = 0;
+ pf->tm_conf.nb_queue_node = 0;
+ pf->tm_conf.committed = false;
+}
+
+void
+i40e_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_shaper_profile *shaper_profile;
+ struct i40e_tm_node *tm_node;
+
+ /* clear node configuration */
+ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ pf->tm_conf.nb_queue_node = 0;
+ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ pf->tm_conf.nb_tc_node = 0;
+ if (pf->tm_conf.root) {
+ rte_free(pf->tm_conf.root);
+ pf->tm_conf.root = NULL;
+ }
+
+ /* Remove all shaper profiles */
+ while ((shaper_profile =
+ TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
+ shaper_profile, node);
+ rte_free(shaper_profile);
+ }
+}
+
+static inline uint16_t
+i40e_tc_nb_get(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ uint16_t sum = 0;
+ int i;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (main_vsi->enabled_tc & BIT_ULL(i))
+ sum++;
+ }
+
+ return sum;
+}
+
+static int
+i40e_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t tc_nb = i40e_tc_nb_get(dev);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (tc_nb > hw->func_caps.num_tx_qp)
+ return -EINVAL;
+
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+ /**
+ * support port + TCs + queues
+ * here shows the max capability not the current configuration.
+ */
+ cap->n_nodes_max = 1 + I40E_MAX_TRAFFIC_CLASS + hw->func_caps.num_tx_qp;
+ cap->n_levels_max = 3; /* port, TC, queue */
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+ cap->shaper_n_max = cap->n_nodes_max;
+ cap->shaper_private_n_max = cap->n_nodes_max;
+ cap->shaper_private_dual_rate_n_max = 0;
+ cap->shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->shaper_private_rate_max = 5000000000ull;
+ cap->shaper_shared_n_max = 0;
+ cap->shaper_shared_n_nodes_per_shaper_max = 0;
+ cap->shaper_shared_n_shapers_per_node_max = 0;
+ cap->shaper_shared_dual_rate_n_max = 0;
+ cap->shaper_shared_rate_min = 0;
+ cap->shaper_shared_rate_max = 0;
+ cap->sched_n_children_max = hw->func_caps.num_tx_qp;
+ /**
+ * HW supports SP. But no plan to support it now.
+ * So, all the nodes should have the same priority.
+ */
+ cap->sched_sp_n_priorities_max = 1;
+ cap->sched_wfq_n_children_per_group_max = 0;
+ cap->sched_wfq_n_groups_max = 0;
+ /**
+ * SW only supports fair round robin now.
+ * So, all the nodes should have the same weight.
+ */
+ cap->sched_wfq_weight_max = 1;
+ cap->cman_head_drop_supported = 0;
+ cap->dynamic_update_mask = 0;
+ cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
+ cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+ cap->cman_wred_context_n_max = 0;
+ cap->cman_wred_context_private_n_max = 0;
+ cap->cman_wred_context_shared_n_max = 0;
+ cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+ cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static inline struct i40e_tm_shaper_profile *
+i40e_shaper_profile_search(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_shaper_profile_list *shaper_profile_list =
+ &pf->tm_conf.shaper_profile_list;
+ struct i40e_tm_shaper_profile *shaper_profile;
+
+ TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+ if (shaper_profile_id == shaper_profile->shaper_profile_id)
+ return shaper_profile;
+ }
+
+ return NULL;
+}
+
+static int
+i40e_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ /* min rate not supported */
+ if (profile->committed.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "committed rate not supported";
+ return -EINVAL;
+ }
+ /* min bucket size not supported */
+ if (profile->committed.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ error->message = "committed bucket size not supported";
+ return -EINVAL;
+ }
+ /* max bucket size not supported */
+ if (profile->peak.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ error->message = "peak bucket size not supported";
+ return -EINVAL;
+ }
+ /* length adjustment not supported */
+ if (profile->pkt_length_adjust) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ error->message = "packet length adjustment not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_shaper_profile *shaper_profile;
+ int ret;
+
+ if (!profile || !error)
+ return -EINVAL;
+
+ ret = i40e_shaper_profile_param_check(profile, error);
+ if (ret)
+ return ret;
+
+ shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
+
+ if (shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID exist";
+ return -EINVAL;
+ }
+
+ shaper_profile = rte_zmalloc("i40e_tm_shaper_profile",
+ sizeof(struct i40e_tm_shaper_profile),
+ 0);
+ if (!shaper_profile)
+ return -ENOMEM;
+ shaper_profile->shaper_profile_id = shaper_profile_id;
+ rte_memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
+ TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
+ shaper_profile, node);
+
+ return 0;
+}
+
+static int
+i40e_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_shaper_profile *shaper_profile;
+
+ if (!error)
+ return -EINVAL;
+
+ shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
+
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID not exist";
+ return -EINVAL;
+ }
+
+ /* don't delete a profile if it's used by one or several nodes */
+ if (shaper_profile->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "profile in use";
+ return -EINVAL;
+ }
+
+ TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
+ rte_free(shaper_profile);
+
+ return 0;
+}
+
+static inline struct i40e_tm_node *
+i40e_tm_node_search(struct rte_eth_dev *dev,
+ uint32_t node_id, enum i40e_tm_node_type *node_type)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+ struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+ struct i40e_tm_node *tm_node;
+
+ if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
+ *node_type = I40E_TM_NODE_TYPE_PORT;
+ return pf->tm_conf.root;
+ }
+
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = I40E_TM_NODE_TYPE_TC;
+ return tm_node;
+ }
+ }
+
+ TAILQ_FOREACH(tm_node, queue_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = I40E_TM_NODE_TYPE_QUEUE;
+ return tm_node;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+i40e_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t priority, uint32_t weight,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ if (priority) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "priority should be 0";
+ return -EINVAL;
+ }
+
+ if (weight != 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+ error->message = "weight must be 1";
+ return -EINVAL;
+ }
+
+ /* not support shared shaper */
+ if (params->shared_shaper_id) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+ if (params->n_shared_shapers) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+
+ /* for non-leaf node */
+ if (node_id >= hw->func_caps.num_tx_qp) {
+ if (params->nonleaf.wfq_weight_mode) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFQ not supported";
+ return -EINVAL;
+ }
+ if (params->nonleaf.n_sp_priorities != 1) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+ error->message = "SP priority not supported";
+ return -EINVAL;
+ } else if (params->nonleaf.wfq_weight_mode &&
+ !(*params->nonleaf.wfq_weight_mode)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFP should be byte mode";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* for leaf node */
+ if (params->leaf.cman) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+ error->message = "Congestion management not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.wred_profile_id !=
+ RTE_TM_WRED_PROFILE_ID_NONE) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.shared_wred_context_id) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.n_shared_wred_contexts) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Now the TC and queue configuration is controlled by DCB.
+ * We need check if the node configuration follows the DCB configuration.
+ * In the future, we may use TM to cover DCB.
+ */
+static int
+i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+ enum i40e_tm_node_type parent_node_type = I40E_TM_NODE_TYPE_MAX;
+ struct i40e_tm_shaper_profile *shaper_profile = NULL;
+ struct i40e_tm_node *tm_node;
+ struct i40e_tm_node *parent_node;
+ uint16_t tc_nb = 0;
+ int ret;
+
+ if (!params || !error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (pf->tm_conf.committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ ret = i40e_node_param_check(dev, node_id, priority, weight,
+ params, error);
+ if (ret)
+ return ret;
+
+ /* check if the node ID is already used */
+ if (i40e_tm_node_search(dev, node_id, &node_type)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "node id already used";
+ return -EINVAL;
+ }
+
+ /* check the shaper profile id */
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+ shaper_profile = i40e_shaper_profile_search(
+ dev, params->shaper_profile_id);
+ if (!shaper_profile) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+ error->message = "shaper profile not exist";
+ return -EINVAL;
+ }
+ }
+
+ /* root node if not have a parent */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id > I40E_TM_NODE_TYPE_PORT) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* obviously no more than one root */
+ if (pf->tm_conf.root) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "already have a root";
+ return -EINVAL;
+ }
+
+ /* add the root node */
+ tm_node = rte_zmalloc("i40e_tm_node",
+ sizeof(struct i40e_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = NULL;
+ tm_node->shaper_profile = shaper_profile;
+ rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ pf->tm_conf.root = tm_node;
+
+ /* increase the reference counter of the shaper profile */
+ if (shaper_profile)
+ shaper_profile->reference_count++;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ /* check the parent node */
+ parent_node = i40e_tm_node_search(dev, parent_node_id,
+ &parent_node_type);
+ if (!parent_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent not exist";
+ return -EINVAL;
+ }
+ if (parent_node_type != I40E_TM_NODE_TYPE_PORT &&
+ parent_node_type != I40E_TM_NODE_TYPE_TC) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent is not port or TC";
+ return -EINVAL;
+ }
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id != parent_node_type + 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* check the node number */
+ if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
+ /* check the TC number */
+ tc_nb = i40e_tc_nb_get(dev);
+ if (pf->tm_conf.nb_tc_node >= tc_nb) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many TCs";
+ return -EINVAL;
+ }
+ } else {
+ /* check the queue number */
+ if (pf->tm_conf.nb_queue_node >= hw->func_caps.num_tx_qp) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues";
+ return -EINVAL;
+ }
+
+ /**
+ * check the node id.
+ * For queue, the node id means queue id.
+ */
+ if (node_id >= hw->func_caps.num_tx_qp) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too large queue id";
+ return -EINVAL;
+ }
+ }
+
+ /* add the TC or queue node */
+ tm_node = rte_zmalloc("i40e_tm_node",
+ sizeof(struct i40e_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = parent_node;
+ tm_node->shaper_profile = shaper_profile;
+ rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
+ TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
+ tm_node, node);
+ pf->tm_conf.nb_tc_node++;
+ } else {
+ TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
+ tm_node, node);
+ pf->tm_conf.nb_queue_node++;
+ }
+ tm_node->parent->reference_count++;
+
+ /* increase the reference counter of the shaper profile */
+ if (shaper_profile)
+ shaper_profile->reference_count++;
+
+ return 0;
+}
+
+static int
+i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+ struct i40e_tm_node *tm_node;
+
+ if (!error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (pf->tm_conf.committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* the node should have no child */
+ if (tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message =
+ "cannot delete a node which has children";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (node_type == I40E_TM_NODE_TYPE_PORT) {
+ if (tm_node->shaper_profile)
+ tm_node->shaper_profile->reference_count--;
+ rte_free(tm_node);
+ pf->tm_conf.root = NULL;
+ return 0;
+ }
+
+ /* TC or queue node */
+ if (tm_node->shaper_profile)
+ tm_node->shaper_profile->reference_count--;
+ tm_node->parent->reference_count--;
+ if (node_type == I40E_TM_NODE_TYPE_TC) {
+ TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+ pf->tm_conf.nb_tc_node--;
+ } else {
+ TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+ pf->tm_conf.nb_queue_node--;
+ }
+ rte_free(tm_node);
+
+ return 0;
+}
+
+static int
+i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+ struct i40e_tm_node *tm_node;
+
+ if (!is_leaf || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (node_type == I40E_TM_NODE_TYPE_QUEUE)
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
+static int
+i40e_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (level_id >= I40E_TM_NODE_TYPE_MAX) {
+ error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+ error->message = "too deep level";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (level_id == I40E_TM_NODE_TYPE_PORT) {
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->n_nodes_leaf_max = 0;
+ } else if (level_id == I40E_TM_NODE_TYPE_TC) {
+ /* TC */
+ cap->n_nodes_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_nonleaf_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_leaf_max = 0;
+ } else {
+ /* queue */
+ cap->n_nodes_max = hw->func_caps.num_tx_qp;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = hw->func_caps.num_tx_qp;
+ }
+
+ cap->non_leaf_nodes_identical = true;
+ cap->leaf_nodes_identical = true;
+
+ if (level_id != I40E_TM_NODE_TYPE_QUEUE) {
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = false;
+ cap->nonleaf.shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->nonleaf.shaper_private_rate_max = 5000000000ull;
+ cap->nonleaf.shaper_shared_n_max = 0;
+ if (level_id == I40E_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ I40E_MAX_TRAFFIC_CLASS;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->func_caps.num_tx_qp;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ cap->nonleaf.stats_mask = 0;
+
+ return 0;
+ }
+
+ /* queue node */
+ cap->leaf.shaper_private_supported = true;
+ cap->leaf.shaper_private_dual_rate_supported = false;
+ cap->leaf.shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->leaf.shaper_private_rate_max = 5000000000ull;
+ cap->leaf.shaper_shared_n_max = 0;
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ cap->leaf.stats_mask = 0;
+
+ return 0;
+}
+
+static int
+i40e_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ enum i40e_tm_node_type node_type;
+ struct i40e_tm_node *tm_node;
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ cap->shaper_private_supported = true;
+ cap->shaper_private_dual_rate_supported = false;
+ cap->shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->shaper_private_rate_max = 5000000000ull;
+ cap->shaper_shared_n_max = 0;
+
+ if (node_type == I40E_TM_NODE_TYPE_QUEUE) {
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ } else {
+ if (node_type == I40E_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ I40E_MAX_TRAFFIC_CLASS;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->func_caps.num_tx_qp;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ }
+
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static int
+i40e_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+ struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+ struct i40e_tm_node *tm_node;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
+ uint64_t bw;
+ uint8_t tc_map;
+ int ret;
+ int i;
+
+ if (!error)
+ return -EINVAL;
+
+ /* check the setting */
+ if (!pf->tm_conf.root)
+ goto done;
+
+ vsi = pf->main_vsi;
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /**
+ * Don't support bandwidth control for port and TCs in parallel.
+ * If the port has a max bandwidth, the TCs should have none.
+ */
+ /* port */
+ if (pf->tm_conf.root->shaper_profile)
+ bw = pf->tm_conf.root->shaper_profile->profile.peak.rate;
+ else
+ bw = 0;
+ if (bw) {
+ /* check if any TC has a max bandwidth */
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (tm_node->shaper_profile &&
+ tm_node->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no port and TC max bandwidth"
+ " in parallel";
+ goto fail_clear;
+ }
+ }
+
+ /* interpret Bps to 50Mbps */
+ bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
+
+ /* set the max bandwidth */
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid,
+ (uint16_t)bw, 0, NULL);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "fail to set port max bandwidth";
+ goto fail_clear;
+ }
+
+ goto done;
+ }
+
+ /* TC */
+ memset(&tc_bw, 0, sizeof(tc_bw));
+ tc_bw.tc_valid_bits = vsi->enabled_tc;
+ tc_map = vsi->enabled_tc;
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (!tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "TC without queue assigned";
+ goto fail_clear;
+ }
+
+ i = 0;
+ while (i < I40E_MAX_TRAFFIC_CLASS && !(tc_map & BIT_ULL(i)))
+ i++;
+ if (i >= I40E_MAX_TRAFFIC_CLASS) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "cannot find the TC";
+ goto fail_clear;
+ }
+ tc_map &= ~BIT_ULL(i);
+
+ if (tm_node->shaper_profile)
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ else
+ bw = 0;
+ if (!bw)
+ continue;
+
+ /* interpret Bps to 50Mbps */
+ bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
+
+ tc_bw.tc_bw_credits[i] = rte_cpu_to_le_16((uint16_t)bw);
+ }
+
+ TAILQ_FOREACH(tm_node, queue_list, node) {
+ if (tm_node->shaper_profile)
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ else
+ bw = 0;
+ if (bw) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "not support queue QoS";
+ goto fail_clear;
+ }
+ }
+
+ ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "fail to set TC max bandwidth";
+ goto fail_clear;
+ }
+
+done:
+ pf->tm_conf.committed = true;
+ return 0;
+
+fail_clear:
+ /* clear all the traffic manager configuration */
+ if (clear_on_fail) {
+ i40e_tm_conf_uninit(dev);
+ i40e_tm_conf_init(dev);
+ }
+ return -EINVAL;
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_vf_representor.c b/src/spdk/dpdk/drivers/net/i40e/i40e_vf_representor.c
new file mode 100644
index 00000000..f9f13161
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/i40e_vf_representor.c
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_type.h"
+#include "base/virtchnl.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "rte_pmd_i40e.h"
+
+static int
+i40e_vf_representor_link_update(struct rte_eth_dev *ethdev,
+ int wait_to_complete)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ return i40e_dev_link_update(representor->adapter->eth_dev,
+ wait_to_complete);
+}
+static void
+i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ /* get dev info for the vdev */
+ dev_info->device = ethdev->device;
+
+ dev_info->max_rx_queues = ethdev->data->nb_rx_queues;
+ dev_info->max_tx_queues = ethdev->data->nb_tx_queues;
+
+ dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+ dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
+ dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = I40E_DEFAULT_RX_PTHRESH,
+ .hthresh = I40E_DEFAULT_RX_HTHRESH,
+ .wthresh = I40E_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = I40E_DEFAULT_TX_PTHRESH,
+ .hthresh = I40E_DEFAULT_TX_HTHRESH,
+ .wthresh = I40E_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->switch_info.name =
+ representor->adapter->eth_dev->device->name;
+ dev_info->switch_info.domain_id = representor->switch_domain_id;
+ dev_info->switch_info.port_id = representor->vf_id;
+}
+
+static int
+i40e_vf_representor_dev_configure(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int
+i40e_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static void
+i40e_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
+{
+}
+
+static int
+i40e_vf_representor_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mb_pool)
+{
+ return 0;
+}
+
+static int
+i40e_vf_representor_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ return 0;
+}
+
+static void
+i40evf_stat_update_48(uint64_t *offset,
+ uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = *stat - *offset;
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
+
+ *stat &= I40E_48_BIT_MASK;
+}
+
+static void
+i40evf_stat_update_32(uint64_t *offset,
+ uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = (uint64_t)(*stat - *offset);
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
+}
+
+static int
+rte_pmd_i40e_get_vf_native_stats(uint16_t port,
+ uint16_t vf_id,
+ struct i40e_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ i40e_update_vsi_stats(vsi);
+ memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats));
+
+ return 0;
+}
+
+static int
+i40e_vf_representor_stats_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_stats *stats)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+ struct i40e_eth_stats native_stats;
+ int ret;
+
+ ret = rte_pmd_i40e_get_vf_native_stats(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, &native_stats);
+ if (ret == 0) {
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_bytes,
+ &native_stats.rx_bytes);
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_unicast,
+ &native_stats.rx_unicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_multicast,
+ &native_stats.rx_multicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_broadcast,
+ &native_stats.rx_broadcast);
+ i40evf_stat_update_32(
+ &representor->stats_offset.rx_discards,
+ &native_stats.rx_discards);
+ i40evf_stat_update_32(
+ &representor->stats_offset.rx_unknown_protocol,
+ &native_stats.rx_unknown_protocol);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_bytes,
+ &native_stats.tx_bytes);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_unicast,
+ &native_stats.tx_unicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_multicast,
+ &native_stats.tx_multicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_broadcast,
+ &native_stats.tx_broadcast);
+ i40evf_stat_update_32(
+ &representor->stats_offset.tx_errors,
+ &native_stats.tx_errors);
+ i40evf_stat_update_32(
+ &representor->stats_offset.tx_discards,
+ &native_stats.tx_discards);
+
+ stats->ipackets = native_stats.rx_unicast +
+ native_stats.rx_multicast +
+ native_stats.rx_broadcast;
+ stats->opackets = native_stats.tx_unicast +
+ native_stats.tx_multicast +
+ native_stats.tx_broadcast;
+ stats->ibytes = native_stats.rx_bytes;
+ stats->obytes = native_stats.tx_bytes;
+ stats->ierrors = native_stats.rx_discards;
+ stats->oerrors = native_stats.tx_errors + native_stats.tx_discards;
+ }
+ return ret;
+}
+
+static void
+i40e_vf_representor_stats_reset(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_get_vf_native_stats(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, &representor->stats_offset);
+}
+
+static void
+i40e_vf_representor_promiscuous_enable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_unicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 1);
+}
+
+static void
+i40e_vf_representor_promiscuous_disable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_unicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 0);
+}
+
+static void
+i40e_vf_representor_allmulticast_enable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_multicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 1);
+}
+
+static void
+i40e_vf_representor_allmulticast_disable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_multicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 0);
+}
+
+static void
+i40e_vf_representor_mac_addr_remove(struct rte_eth_dev *ethdev, uint32_t index)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_remove_vf_mac_addr(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, &ethdev->data->mac_addrs[index]);
+}
+
+static int
+i40e_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_i40e_set_vf_mac_addr(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, mac_addr);
+}
+
+static int
+i40e_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
+ uint16_t vlan_id, int on)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+ uint64_t vf_mask = 1ULL << representor->vf_id;
+
+ return rte_pmd_i40e_set_vf_vlan_filter(
+ representor->adapter->eth_dev->data->port_id,
+ vlan_id, vf_mask, on);
+}
+
+static int
+i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+ struct rte_eth_dev *pdev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+ uint32_t vfid;
+
+ pdev = representor->adapter->eth_dev;
+ vfid = representor->vf_id;
+
+ if (!is_i40e_supported(pdev)) {
+ PMD_DRV_LOG(ERR, "Invalid PF dev.");
+ return -EINVAL;
+ }
+
+ pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private);
+
+ if (vfid >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vf = &pf->vfs[vfid];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ /* Enable or disable VLAN filtering offload */
+ if (ethdev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
+ return i40e_vsi_config_vlan_filter(vsi, TRUE);
+ else
+ return i40e_vsi_config_vlan_filter(vsi, FALSE);
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping offload */
+ if (ethdev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP)
+ return i40e_vsi_config_vlan_stripping(vsi, TRUE);
+ else
+ return i40e_vsi_config_vlan_stripping(vsi, FALSE);
+ }
+
+ return -EINVAL;
+}
+
+static void
+i40e_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
+ __rte_unused uint16_t rx_queue_id, int on)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_vlan_stripq(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, on);
+}
+
+static int
+i40e_vf_representor_vlan_pvid_set(struct rte_eth_dev *ethdev, uint16_t vlan_id,
+ __rte_unused int on)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_i40e_set_vf_vlan_insert(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, vlan_id);
+}
+
+struct eth_dev_ops i40e_representor_dev_ops = {
+ .dev_infos_get = i40e_vf_representor_dev_infos_get,
+
+ .dev_start = i40e_vf_representor_dev_start,
+ .dev_configure = i40e_vf_representor_dev_configure,
+ .dev_stop = i40e_vf_representor_dev_stop,
+
+ .rx_queue_setup = i40e_vf_representor_rx_queue_setup,
+ .tx_queue_setup = i40e_vf_representor_tx_queue_setup,
+
+ .link_update = i40e_vf_representor_link_update,
+
+ .stats_get = i40e_vf_representor_stats_get,
+ .stats_reset = i40e_vf_representor_stats_reset,
+
+ .promiscuous_enable = i40e_vf_representor_promiscuous_enable,
+ .promiscuous_disable = i40e_vf_representor_promiscuous_disable,
+
+ .allmulticast_enable = i40e_vf_representor_allmulticast_enable,
+ .allmulticast_disable = i40e_vf_representor_allmulticast_disable,
+
+ .mac_addr_remove = i40e_vf_representor_mac_addr_remove,
+ .mac_addr_set = i40e_vf_representor_mac_addr_set,
+
+ .vlan_filter_set = i40e_vf_representor_vlan_filter_set,
+ .vlan_offload_set = i40e_vf_representor_vlan_offload_set,
+ .vlan_strip_queue_set = i40e_vf_representor_vlan_strip_queue_set,
+ .vlan_pvid_set = i40e_vf_representor_vlan_pvid_set
+
+};
+
+static uint16_t
+i40e_vf_representor_rx_burst(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+i40e_vf_representor_tx_burst(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int
+i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ struct i40e_pf *pf;
+ struct i40e_pf_vf *vf;
+ struct rte_eth_link *link;
+
+ representor->vf_id =
+ ((struct i40e_vf_representor *)init_params)->vf_id;
+ representor->switch_domain_id =
+ ((struct i40e_vf_representor *)init_params)->switch_domain_id;
+ representor->adapter =
+ ((struct i40e_vf_representor *)init_params)->adapter;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(
+ representor->adapter->eth_dev->data->dev_private);
+
+ if (representor->vf_id >= pf->vf_num)
+ return -ENODEV;
+
+ /** representor shares the same driver as it's PF device */
+ ethdev->device->driver = representor->adapter->eth_dev->device->driver;
+
+ /* Set representor device ops */
+ ethdev->dev_ops = &i40e_representor_dev_ops;
+
+ /* No data-path, but need stub Rx/Tx functions to avoid crash
+ * when testing with the likes of testpmd.
+ */
+ ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst;
+ ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst;
+
+ vf = &pf->vfs[representor->vf_id];
+
+ if (!vf->vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -ENODEV;
+ }
+
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ /* Setting the number queues allocated to the VF */
+ ethdev->data->nb_rx_queues = vf->vsi->nb_qps;
+ ethdev->data->nb_tx_queues = vf->vsi->nb_qps;
+
+ ethdev->data->mac_addrs = &vf->mac_addr;
+
+ /* Link state. Inherited from PF */
+ link = &representor->adapter->eth_dev->data->dev_link;
+
+ ethdev->data->dev_link.link_speed = link->link_speed;
+ ethdev->data->dev_link.link_duplex = link->link_duplex;
+ ethdev->data->dev_link.link_status = link->link_status;
+ ethdev->data->dev_link.link_autoneg = link->link_autoneg;
+
+ return 0;
+}
+
+int
+i40e_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused)
+{
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/meson.build b/src/spdk/dpdk/drivers/net/i40e/meson.build
new file mode 100644
index 00000000..d783f362
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/meson.build
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+version = 2
+
+cflags += ['-DPF_DRIVER',
+ '-DVF_DRIVER',
+ '-DINTEGRATED_VF',
+ '-DX722_A0_SUPPORT',
+ '-DALLOW_EXPERIMENTAL_API']
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'i40e_ethdev.c',
+ 'i40e_rxtx.c',
+ 'i40e_ethdev_vf.c',
+ 'i40e_pf.c',
+ 'i40e_fdir.c',
+ 'i40e_flow.c',
+ 'i40e_tm.c',
+ 'i40e_vf_representor.c',
+ 'rte_pmd_i40e.c'
+ )
+
+deps += ['hash']
+includes += include_directories('base')
+
+if arch_subdir == 'x86'
+ dpdk_conf.set('RTE_LIBRTE_I40E_INC_VECTOR', 1)
+ sources += files('i40e_rxtx_vec_sse.c')
+
+ # compile AVX2 version if either:
+ # a. we have AVX supported in minimum instruction set baseline
+ # b. it's not minimum instruction set, but supported by compiler
+ if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX2')
+ sources += files('i40e_rxtx_vec_avx2.c')
+ elif cc.has_argument('-mavx2')
+ i40e_avx2_lib = static_library('i40e_avx2_lib',
+ 'i40e_rxtx_vec_avx2.c',
+ dependencies: [static_rte_ethdev,
+ static_rte_kvargs, static_rte_hash],
+ include_directories: includes,
+ c_args: [cflags, '-mavx2'])
+ objs += i40e_avx2_lib.extract_objects('i40e_rxtx_vec_avx2.c')
+ endif
+endif
+
+install_headers('rte_pmd_i40e.h')
diff --git a/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.c b/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.c
new file mode 100644
index 00000000..bba62b1c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.c
@@ -0,0 +1,3192 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_dcb.h"
+#include "i40e_ethdev.h"
+#include "i40e_pf.h"
+#include "i40e_rxtx.h"
+#include "rte_pmd_i40e.h"
+
+int
+rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
+ if (on) {
+ if ((vsi->info.sec_flags &
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.sec_flags &
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
+ return 0; /* already off */
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (on)
+ vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+ else
+ vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
+{
+ uint32_t j, k;
+ uint16_t vlan_id;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
+ int ret;
+
+ for (j = 0; j < I40E_VFTA_SIZE; j++) {
+ if (!vsi->vfta[j])
+ continue;
+
+ for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+ if (!(vsi->vfta[j] & (1 << k)))
+ continue;
+
+ vlan_id = j * I40E_UINT32_BIT_SIZE + k;
+ if (!vlan_id)
+ continue;
+
+ vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
+ if (add)
+ ret = i40e_aq_add_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ else
+ ret = i40e_aq_remove_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to add/rm vlan filter");
+ return ret;
+ }
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+int
+rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->vlan_anti_spoof_on == on)
+ return 0; /* already on or off */
+
+ vsi->vlan_anti_spoof_on = on;
+ if (!vsi->vlan_filter_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, on);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
+ return -ENOTSUP;
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (on)
+ vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
+ else
+ vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num;
+ enum rte_mac_filter_type filter_type;
+ int ret = I40E_SUCCESS;
+ void *temp;
+
+ /* remove all the MACs */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+ vlan_num = vsi->vlan_num;
+ filter_type = f->mac_info.filter_type;
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ if (vlan_num == 0) {
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
+ return I40E_ERR_PARAM;
+ }
+ } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
+ filter_type == RTE_MAC_HASH_MATCH)
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (!mv_f) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = filter_type;
+ rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+ }
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+
+ rte_free(mv_f);
+ ret = I40E_SUCCESS;
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num = 0;
+ int ret = I40E_SUCCESS;
+ void *temp;
+
+ /* restore all the MACs */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+ if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
+ (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
+ /**
+ * If vlan_num is 0, that's the first time to add mac,
+ * set mask for vlan_id 0.
+ */
+ if (vsi->vlan_num == 0) {
+ i40e_set_vlan_filter(vsi, 0, 1);
+ vsi->vlan_num = 1;
+ }
+ vlan_num = vsi->vlan_num;
+ } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
+ (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (!mv_f) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = f->mac_info.filter_type;
+ rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+
+ if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+ }
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+
+ rte_free(mv_f);
+ ret = I40E_SUCCESS;
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
+{
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw;
+ int ret;
+
+ if (!vsi)
+ return -EINVAL;
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* Use the FW API if FW >= v5.0 */
+ if (hw->aq.fw_maj_ver < 5) {
+ PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
+ return -ENOTSUP;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
+ if (on) {
+ if ((vsi->info.switch_id &
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.switch_id &
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
+ return 0; /* already off */
+ }
+ }
+
+ /* remove all the MAC and VLAN first */
+ ret = i40e_vsi_rm_mac_filter(vsi);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
+ return ret;
+ }
+ if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, 0);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
+ return ret;
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ if (on)
+ vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
+ else
+ vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ return ret;
+ }
+
+ /* add all the MAC and VLAN back */
+ ret = i40e_vsi_restore_mac_filter(vsi);
+ if (ret)
+ return ret;
+ if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, 1);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ uint16_t vf_id;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ /* setup PF TX loopback */
+ vsi = pf->main_vsi;
+ ret = i40e_vsi_set_tx_loopback(vsi, on);
+ if (ret)
+ return -ENOTSUP;
+
+ /* setup TX loopback for all the VFs */
+ if (!pf->vfs) {
+ /* if no VF, do nothing. */
+ return 0;
+ }
+
+ for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+
+ ret = i40e_vsi_set_tx_loopback(vsi, on);
+ if (ret)
+ return -ENOTSUP;
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ on, NULL, true);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ on, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_mac_filter *f;
+ struct rte_eth_dev *dev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+ void *temp;
+
+ if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs)
+ return -EINVAL;
+
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ ether_addr_copy(mac_addr, &vf->mac_addr);
+
+ /* Remove all existing mac */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
+ if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
+ != I40E_SUCCESS)
+ PMD_DRV_LOG(WARNING, "Delete MAC failed");
+
+ return 0;
+}
+
+static const struct ether_addr null_mac_addr;
+
+int
+rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+
+ if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs)
+ return -EINVAL;
+
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (is_same_ether_addr(mac_addr, &vf->mac_addr))
+ /* Reset the mac with NULL address */
+ ether_addr_copy(&null_mac_addr, &vf->mac_addr);
+
+ /* Remove the mac */
+ i40e_vsi_delete_mac(vsi, mac_addr);
+
+ return 0;
+}
+
+/* Set vlan strip on/off for specific VF from host */
+int
+rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+
+ if (!vsi)
+ return -EINVAL;
+
+ ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
+ uint16_t vlan_id)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (vlan_id > ETHER_MAX_VLAN_ID) {
+ PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0)
+ return -ENODEV;
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.pvid = vlan_id;
+ if (vlan_id > 0)
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
+ else
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
+ uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_mac_filter_info filter;
+ struct ether_addr broadcast = {
+ .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (on) {
+ rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ ret = i40e_vsi_add_mac(vsi, &filter);
+ } else {
+ ret = i40e_vsi_delete_mac(vsi, &broadcast);
+ }
+
+ if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ if (on) {
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ } else {
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ }
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_vlan_filter_count(struct i40e_vsi *vsi)
+{
+ uint32_t j, k;
+ uint16_t vlan_id;
+ int count = 0;
+
+ for (j = 0; j < I40E_VFTA_SIZE; j++) {
+ if (!vsi->vfta[j])
+ continue;
+
+ for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+ if (!(vsi->vfta[j] & (1 << k)))
+ continue;
+
+ vlan_id = j * I40E_UINT32_BIT_SIZE + k;
+ if (!vlan_id)
+ continue;
+
+ count++;
+ }
+ }
+
+ return count;
+}
+
+int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
+ uint64_t vf_mask, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ uint16_t vf_idx;
+ int ret = I40E_SUCCESS;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
+ PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
+ return -EINVAL;
+ }
+
+ if (vf_mask == 0) {
+ PMD_DRV_LOG(ERR, "No VF.");
+ return -EINVAL;
+ }
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
+ if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+ vsi = pf->vfs[vf_idx].vsi;
+ if (on) {
+ if (!vsi->vlan_filter_on) {
+ vsi->vlan_filter_on = true;
+ i40e_aq_set_vsi_vlan_promisc(hw,
+ vsi->seid,
+ false,
+ NULL);
+ if (!vsi->vlan_anti_spoof_on)
+ i40e_add_rm_all_vlan_filter(
+ vsi, true);
+ }
+ ret = i40e_vsi_add_vlan(vsi, vlan_id);
+ } else {
+ ret = i40e_vsi_delete_vlan(vsi, vlan_id);
+
+ if (!i40e_vlan_filter_count(vsi)) {
+ vsi->vlan_filter_on = false;
+ i40e_aq_set_vsi_vlan_promisc(hw,
+ vsi->seid,
+ true,
+ NULL);
+ }
+ }
+ }
+ }
+
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_get_vf_stats(uint16_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ i40e_update_vsi_stats(vsi);
+
+ stats->ipackets = vsi->eth_stats.rx_unicast +
+ vsi->eth_stats.rx_multicast +
+ vsi->eth_stats.rx_broadcast;
+ stats->opackets = vsi->eth_stats.tx_unicast +
+ vsi->eth_stats.tx_multicast +
+ vsi->eth_stats.tx_broadcast;
+ stats->ibytes = vsi->eth_stats.rx_bytes;
+ stats->obytes = vsi->eth_stats.tx_bytes;
+ stats->ierrors = vsi->eth_stats.rx_discards;
+ stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_reset_vf_stats(uint16_t port,
+ uint16_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->offset_loaded = false;
+ i40e_update_vsi_stats(vsi);
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret = 0;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (bw > I40E_QOS_BW_MAX) {
+ PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
+ I40E_QOS_BW_MAX);
+ return -EINVAL;
+ }
+
+ if (bw % I40E_QOS_BW_GRANULARITY) {
+ PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
+ I40E_QOS_BW_GRANULARITY);
+ return -EINVAL;
+ }
+
+ bw /= I40E_QOS_BW_GRANULARITY;
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* No change. */
+ if (bw == vsi->bw_info.bw_limit) {
+ PMD_DRV_LOG(INFO,
+ "No change for VF max bandwidth. Nothing to do.");
+ return 0;
+ }
+
+ /**
+ * VF bandwidth limitation and TC bandwidth limitation cannot be
+ * enabled in parallel, quit if TC bandwidth limitation is enabled.
+ *
+ * If bw is 0, means disable bandwidth limitation. Then no need to
+ * check TC bandwidth limitation.
+ */
+ if (bw) {
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if ((vsi->enabled_tc & BIT_ULL(i)) &&
+ vsi->bw_info.bw_ets_credits[i])
+ break;
+ }
+ if (i != I40E_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR,
+ "TC max bandwidth has been set on this VF,"
+ " please disable it first.");
+ return -EINVAL;
+ }
+ }
+
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set VF %d bandwidth, err(%d).",
+ vf_id, ret);
+ return -EINVAL;
+ }
+
+ /* Store the configuration. */
+ vsi->bw_info.bw_limit = (uint16_t)bw;
+ vsi->bw_info.bw_max = 0;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
+ uint8_t tc_num, uint8_t *bw_weight)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
+ int ret = 0;
+ int i, j;
+ uint16_t sum;
+ bool b_change = false;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+ I40E_MAX_TRAFFIC_CLASS);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i))
+ sum++;
+ }
+ if (sum != tc_num) {
+ PMD_DRV_LOG(ERR,
+ "Weight should be set for all %d enabled TCs.",
+ sum);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < tc_num; i++) {
+ if (!bw_weight[i]) {
+ PMD_DRV_LOG(ERR,
+ "The weight should be 1 at least.");
+ return -EINVAL;
+ }
+ sum += bw_weight[i];
+ }
+ if (sum != 100) {
+ PMD_DRV_LOG(ERR,
+ "The summary of the TC weight should be 100.");
+ return -EINVAL;
+ }
+
+ /**
+ * Create the configuration for all the TCs.
+ */
+ memset(&tc_bw, 0, sizeof(tc_bw));
+ tc_bw.tc_valid_bits = vsi->enabled_tc;
+ j = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i)) {
+ if (bw_weight[j] !=
+ vsi->bw_info.bw_ets_share_credits[i])
+ b_change = true;
+
+ tc_bw.tc_bw_credits[i] = bw_weight[j];
+ j++;
+ }
+ }
+
+ /* No change. */
+ if (!b_change) {
+ PMD_DRV_LOG(INFO,
+ "No change for TC allocated bandwidth."
+ " Nothing to do.");
+ return 0;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set VF %d TC bandwidth weight, err(%d).",
+ vf_id, ret);
+ return -EINVAL;
+ }
+
+ /* Store the configuration. */
+ j = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i)) {
+ vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
+ j++;
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
+ uint8_t tc_no, uint32_t bw)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
+ int ret = 0;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (bw > I40E_QOS_BW_MAX) {
+ PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
+ I40E_QOS_BW_MAX);
+ return -EINVAL;
+ }
+
+ if (bw % I40E_QOS_BW_GRANULARITY) {
+ PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
+ I40E_QOS_BW_GRANULARITY);
+ return -EINVAL;
+ }
+
+ bw /= I40E_QOS_BW_GRANULARITY;
+
+ if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
+ I40E_MAX_TRAFFIC_CLASS);
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
+ PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
+ vf_id, tc_no);
+ return -EINVAL;
+ }
+
+ /* No change. */
+ if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
+ PMD_DRV_LOG(INFO,
+ "No change for TC max bandwidth. Nothing to do.");
+ return 0;
+ }
+
+ /**
+ * VF bandwidth limitation and TC bandwidth limitation cannot be
+ * enabled in parallel, disable VF bandwidth limitation if it's
+ * enabled.
+ * If bw is 0, means disable bandwidth limitation. Then no need to
+ * care about VF bandwidth limitation configuration.
+ */
+ if (bw && vsi->bw_info.bw_limit) {
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to disable VF(%d)"
+ " bandwidth limitation, err(%d).",
+ vf_id, ret);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "VF max bandwidth is disabled according"
+ " to TC max bandwidth setting.");
+ }
+
+ /**
+ * Get all the TCs' info to create a whole picture.
+ * Because the incremental change isn't permitted.
+ */
+ memset(&tc_bw, 0, sizeof(tc_bw));
+ tc_bw.tc_valid_bits = vsi->enabled_tc;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i)) {
+ tc_bw.tc_bw_credits[i] =
+ rte_cpu_to_le_16(
+ vsi->bw_info.bw_ets_credits[i]);
+ }
+ }
+ tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
+
+ ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set VF %d TC %d max bandwidth, err(%d).",
+ vf_id, tc_no, ret);
+ return -EINVAL;
+ }
+
+ /* Store the configuration. */
+ vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_switching_comp_ets_data ets_data;
+ int i;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ vsi = pf->main_vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ veb = vsi->veb;
+ if (!veb) {
+ PMD_DRV_LOG(ERR, "Invalid VEB.");
+ return -EINVAL;
+ }
+
+ if ((tc_map & veb->enabled_tc) != tc_map) {
+ PMD_DRV_LOG(ERR,
+ "TC bitmap isn't the subset of enabled TCs 0x%x.",
+ veb->enabled_tc);
+ return -EINVAL;
+ }
+
+ if (tc_map == veb->strict_prio_tc) {
+ PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
+ return 0;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* Disable DCBx if it's the first time to set strict priority. */
+ if (!veb->strict_prio_tc) {
+ ret = i40e_aq_stop_lldp(hw, true, NULL);
+ if (ret)
+ PMD_DRV_LOG(INFO,
+ "Failed to disable DCBx as it's already"
+ " disabled.");
+ else
+ PMD_DRV_LOG(INFO,
+ "DCBx is disabled according to strict"
+ " priority setting.");
+ }
+
+ memset(&ets_data, 0, sizeof(ets_data));
+ ets_data.tc_valid_bits = veb->enabled_tc;
+ ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
+ ets_data.tc_strict_priority_flags = tc_map;
+ /* Get all TCs' bandwidth. */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (veb->enabled_tc & BIT_ULL(i)) {
+ /* For rubust, if bandwidth is 0, use 1 instead. */
+ if (veb->bw_info.bw_ets_share_credits[i])
+ ets_data.tc_bw_share_credits[i] =
+ veb->bw_info.bw_ets_share_credits[i];
+ else
+ ets_data.tc_bw_share_credits[i] =
+ I40E_QOS_BW_WEIGHT_MIN;
+ }
+ }
+
+ if (!veb->strict_prio_tc)
+ ret = i40e_aq_config_switch_comp_ets(
+ hw, veb->uplink_seid,
+ &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
+ NULL);
+ else if (tc_map)
+ ret = i40e_aq_config_switch_comp_ets(
+ hw, veb->uplink_seid,
+ &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
+ NULL);
+ else
+ ret = i40e_aq_config_switch_comp_ets(
+ hw, veb->uplink_seid,
+ &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
+ NULL);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set TCs' strict priority mode."
+ " err (%d)", ret);
+ return -EINVAL;
+ }
+
+ veb->strict_prio_tc = tc_map;
+
+ /* Enable DCBx again, if all the TCs' strict priority disabled. */
+ if (!tc_map) {
+ ret = i40e_aq_start_lldp(hw, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to enable DCBx, err(%d).", ret);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "DCBx is enabled again according to strict"
+ " priority setting.");
+ }
+
+ return ret;
+}
+
+#define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
+#define I40E_MAX_PROFILE_NUM 16
+
+static void
+i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
+ uint32_t track_id, uint8_t *profile_info_sec,
+ bool add)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
+ memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
+ if (add)
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ else
+ pinfo->op = I40E_DDP_REMOVE_TRACKID;
+}
+
+static enum i40e_status_code
+i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_profile_section_header *sec;
+ uint32_t track_id;
+ uint32_t offset = 0;
+ uint32_t info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ track_id = ((struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset))->track_id;
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ if (status)
+ PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
+ "offset %d, info %d",
+ offset, info);
+
+ return status;
+}
+
+/* Check if the profile info exists */
+static int
+i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t *buff;
+ struct rte_pmd_i40e_profile_list *p_list;
+ struct rte_pmd_i40e_profile_info *pinfo, *p;
+ uint32_t i;
+ int ret;
+ static const uint32_t group_mask = 0x00ff0000;
+
+ pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
+ sizeof(struct i40e_profile_section_header));
+ if (pinfo->track_id == 0) {
+ PMD_DRV_LOG(INFO, "Read-only profile.");
+ return 0;
+ }
+ buff = rte_zmalloc("pinfo_list",
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
+ 0);
+ if (!buff) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return -1;
+ }
+
+ ret = i40e_aq_get_ddp_list(
+ hw, (void *)buff,
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
+ 0, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get profile info list.");
+ rte_free(buff);
+ return -1;
+ }
+ p_list = (struct rte_pmd_i40e_profile_list *)buff;
+ for (i = 0; i < p_list->p_count; i++) {
+ p = &p_list->p_info[i];
+ if (pinfo->track_id == p->track_id) {
+ PMD_DRV_LOG(INFO, "Profile exists.");
+ rte_free(buff);
+ return 1;
+ }
+ }
+ /* profile with group id 0xff is compatible with any other profile */
+ if ((pinfo->track_id & group_mask) == group_mask) {
+ rte_free(buff);
+ return 0;
+ }
+ for (i = 0; i < p_list->p_count; i++) {
+ p = &p_list->p_info[i];
+ if ((p->track_id & group_mask) == 0) {
+ PMD_DRV_LOG(INFO, "Profile of the group 0 exists.");
+ rte_free(buff);
+ return 2;
+ }
+ }
+ for (i = 0; i < p_list->p_count; i++) {
+ p = &p_list->p_info[i];
+ if ((p->track_id & group_mask) == group_mask)
+ continue;
+ if ((pinfo->track_id & group_mask) !=
+ (p->track_id & group_mask)) {
+ PMD_DRV_LOG(INFO, "Profile of different group exists.");
+ rte_free(buff);
+ return 3;
+ }
+ }
+
+ rte_free(buff);
+ return 0;
+}
+
+int
+rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
+ uint32_t size,
+ enum rte_pmd_i40e_package_op op)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_hw *hw;
+ struct i40e_package_header *pkg_hdr;
+ struct i40e_generic_seg_header *profile_seg_hdr;
+ struct i40e_generic_seg_header *metadata_seg_hdr;
+ uint32_t track_id;
+ uint8_t *profile_info_sec;
+ int is_exist;
+ enum i40e_status_code status = I40E_SUCCESS;
+ static const uint32_t type_mask = 0xff000000;
+
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Operation not supported.");
+ return -ENOTSUP;
+ }
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) +
+ sizeof(uint32_t) * 2)) {
+ PMD_DRV_LOG(ERR, "Buff is invalid.");
+ return -EINVAL;
+ }
+
+ pkg_hdr = (struct i40e_package_header *)buff;
+
+ if (!pkg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to fill the package structure");
+ return -EINVAL;
+ }
+
+ if (pkg_hdr->segment_count < 2) {
+ PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
+ return -EINVAL;
+ }
+
+ /* Find metadata segment */
+ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
+ pkg_hdr);
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+ track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ PMD_DRV_LOG(ERR, "Invalid track_id");
+ return -EINVAL;
+ }
+
+ /* force read-only track_id for type 0 */
+ if ((track_id & type_mask) == 0)
+ track_id = 0;
+
+ /* Find profile segment */
+ profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
+ pkg_hdr);
+ if (!profile_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find profile segment header");
+ return -EINVAL;
+ }
+
+ profile_info_sec = rte_zmalloc(
+ "i40e_profile_info",
+ sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info),
+ 0);
+ if (!profile_info_sec) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return -EINVAL;
+ }
+
+ /* Check if the profile already loaded */
+ i40e_generate_profile_info_sec(
+ ((struct i40e_profile_segment *)profile_seg_hdr)->name,
+ &((struct i40e_profile_segment *)profile_seg_hdr)->version,
+ track_id, profile_info_sec,
+ op == RTE_PMD_I40E_PKG_OP_WR_ADD);
+ is_exist = i40e_check_profile_info(port, profile_info_sec);
+ if (is_exist < 0) {
+ PMD_DRV_LOG(ERR, "Failed to check profile.");
+ rte_free(profile_info_sec);
+ return -EINVAL;
+ }
+
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
+ if (is_exist) {
+ if (is_exist == 1)
+ PMD_DRV_LOG(ERR, "Profile already exists.");
+ else if (is_exist == 2)
+ PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
+ else if (is_exist == 3)
+ PMD_DRV_LOG(ERR, "Profile of different group already exists");
+ i40e_update_customized_info(dev, buff, size, op);
+ rte_free(profile_info_sec);
+ return -EEXIST;
+ }
+ } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ if (is_exist != 1) {
+ PMD_DRV_LOG(ERR, "Profile does not exist.");
+ rte_free(profile_info_sec);
+ return -EACCES;
+ }
+ }
+
+ if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ status = i40e_rollback_profile(
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
+ rte_free(profile_info_sec);
+ return status;
+ }
+ } else {
+ status = i40e_write_profile(
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
+ if (status) {
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ PMD_DRV_LOG(ERR, "Failed to write profile for add.");
+ else
+ PMD_DRV_LOG(ERR, "Failed to write profile.");
+ rte_free(profile_info_sec);
+ return status;
+ }
+ }
+
+ if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
+ /* Modify loaded profiles info list */
+ status = i40e_add_rm_profile_info(hw, profile_info_sec);
+ if (status) {
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
+ else
+ PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
+ }
+ }
+
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
+ op == RTE_PMD_I40E_PKG_OP_WR_DEL)
+ i40e_update_customized_info(dev, buff, size, op);
+
+ rte_free(profile_info_sec);
+ return status;
+}
+
+/* Get number of tvl records in the section */
+static unsigned int
+i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
+{
+ unsigned int i, nb_rec, nb_tlv = 0;
+ struct i40e_profile_tlv_section_record *tlv;
+
+ if (!sec)
+ return nb_tlv;
+
+ /* get number of records in the section */
+ nb_rec = sec->section.size /
+ sizeof(struct i40e_profile_tlv_section_record);
+ for (i = 0; i < nb_rec; ) {
+ tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
+ i += tlv->len;
+ nb_tlv++;
+ }
+ return nb_tlv;
+}
+
+int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
+ uint8_t *info_buff, uint32_t info_size,
+ enum rte_pmd_i40e_package_info type)
+{
+ uint32_t ret_size;
+ struct i40e_package_header *pkg_hdr;
+ struct i40e_generic_seg_header *i40e_seg_hdr;
+ struct i40e_generic_seg_header *note_seg_hdr;
+ struct i40e_generic_seg_header *metadata_seg_hdr;
+
+ if (!info_buff) {
+ PMD_DRV_LOG(ERR, "Output info buff is invalid.");
+ return -EINVAL;
+ }
+
+ if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) +
+ sizeof(uint32_t) * 2)) {
+ PMD_DRV_LOG(ERR, "Package buff is invalid.");
+ return -EINVAL;
+ }
+
+ pkg_hdr = (struct i40e_package_header *)pkg_buff;
+ if (pkg_hdr->segment_count < 2) {
+ PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
+ return -EINVAL;
+ }
+
+ /* Find metadata segment */
+ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
+ pkg_hdr);
+
+ /* Find global notes segment */
+ note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
+ pkg_hdr);
+
+ /* Find i40e profile segment */
+ i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
+
+ /* get global header info */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
+ struct rte_pmd_i40e_profile_info *info =
+ (struct rte_pmd_i40e_profile_info *)info_buff;
+
+ if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
+ PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
+ return -EINVAL;
+ }
+
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+
+ memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
+ info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
+ info->track_id =
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ memcpy(info->name,
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
+ I40E_DDP_NAME_SIZE);
+ memcpy(&info->version,
+ &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
+ sizeof(struct i40e_ddp_version));
+ return I40E_SUCCESS;
+ }
+
+ /* get global note size */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ if (note_seg_hdr == NULL)
+ ret_size = 0;
+ else
+ ret_size = note_seg_hdr->size;
+ *(uint32_t *)info_buff = ret_size;
+ return I40E_SUCCESS;
+ }
+
+ /* get global note */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
+ if (note_seg_hdr == NULL)
+ return -ENOTSUP;
+ if (info_size < note_seg_hdr->size) {
+ PMD_DRV_LOG(ERR, "Information buffer size is too small");
+ return -EINVAL;
+ }
+ memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
+ return I40E_SUCCESS;
+ }
+
+ /* get i40e segment header info */
+ if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
+ struct rte_pmd_i40e_profile_info *info =
+ (struct rte_pmd_i40e_profile_info *)info_buff;
+
+ if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
+ PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
+ return -EINVAL;
+ }
+
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+
+ if (!i40e_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
+ return -EINVAL;
+ }
+
+ memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
+ info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
+ info->track_id =
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ memcpy(info->name,
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
+ I40E_DDP_NAME_SIZE);
+ memcpy(&info->version,
+ &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
+ sizeof(struct i40e_ddp_version));
+ return I40E_SUCCESS;
+ }
+
+ /* get number of devices */
+ if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ *(uint32_t *)info_buff =
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
+ return I40E_SUCCESS;
+ }
+
+ /* get list of devices */
+ if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
+ uint32_t dev_num;
+ dev_num =
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
+ if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ memcpy(info_buff,
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
+ sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
+ return I40E_SUCCESS;
+ }
+
+ /* get number of protocols */
+ if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
+ struct i40e_profile_section_header *proto;
+
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
+ return I40E_SUCCESS;
+ }
+
+ /* get list of protocols */
+ if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
+ uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
+ struct rte_pmd_i40e_proto_info *pinfo;
+ struct i40e_profile_section_header *proto;
+ struct i40e_profile_tlv_section_record *tlv;
+
+ pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
+ nb_proto_info = info_size /
+ sizeof(struct rte_pmd_i40e_proto_info);
+ for (i = 0; i < nb_proto_info; i++) {
+ pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
+ memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
+ }
+ proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ nb_tlv = i40e_get_tlv_section_size(proto);
+ if (nb_tlv == 0)
+ return I40E_SUCCESS;
+ if (nb_proto_info < nb_tlv) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ /* get number of records in the section */
+ nb_rec = proto->section.size /
+ sizeof(struct i40e_profile_tlv_section_record);
+ tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
+ for (i = j = 0; i < nb_rec; j++) {
+ pinfo[j].proto_id = tlv->data[0];
+ snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
+ (const char *)&tlv->data[1]);
+ i += tlv->len;
+ tlv = &tlv[tlv->len];
+ }
+ return I40E_SUCCESS;
+ }
+
+ /* get number of packet classification types */
+ if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
+ struct i40e_profile_section_header *pctype;
+
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
+ return I40E_SUCCESS;
+ }
+
+ /* get list of packet classification types */
+ if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
+ uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
+ struct rte_pmd_i40e_ptype_info *pinfo;
+ struct i40e_profile_section_header *pctype;
+ struct i40e_profile_tlv_section_record *tlv;
+
+ pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
+ nb_proto_info = info_size /
+ sizeof(struct rte_pmd_i40e_ptype_info);
+ for (i = 0; i < nb_proto_info; i++)
+ memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
+ sizeof(struct rte_pmd_i40e_ptype_info));
+ pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ nb_tlv = i40e_get_tlv_section_size(pctype);
+ if (nb_tlv == 0)
+ return I40E_SUCCESS;
+ if (nb_proto_info < nb_tlv) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+
+ /* get number of records in the section */
+ nb_rec = pctype->section.size /
+ sizeof(struct i40e_profile_tlv_section_record);
+ tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
+ for (i = j = 0; i < nb_rec; j++) {
+ memcpy(&pinfo[j], tlv->data,
+ sizeof(struct rte_pmd_i40e_ptype_info));
+ i += tlv->len;
+ tlv = &tlv[tlv->len];
+ }
+ return I40E_SUCCESS;
+ }
+
+ /* get number of packet types */
+ if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
+ struct i40e_profile_section_header *ptype;
+
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
+ return I40E_SUCCESS;
+ }
+
+ /* get list of packet types */
+ if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
+ uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
+ struct rte_pmd_i40e_ptype_info *pinfo;
+ struct i40e_profile_section_header *ptype;
+ struct i40e_profile_tlv_section_record *tlv;
+
+ pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
+ nb_proto_info = info_size /
+ sizeof(struct rte_pmd_i40e_ptype_info);
+ for (i = 0; i < nb_proto_info; i++)
+ memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
+ sizeof(struct rte_pmd_i40e_ptype_info));
+ ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ nb_tlv = i40e_get_tlv_section_size(ptype);
+ if (nb_tlv == 0)
+ return I40E_SUCCESS;
+ if (nb_proto_info < nb_tlv) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ /* get number of records in the section */
+ nb_rec = ptype->section.size /
+ sizeof(struct i40e_profile_tlv_section_record);
+ for (i = j = 0; i < nb_rec; j++) {
+ tlv = (struct i40e_profile_tlv_section_record *)
+ &ptype[1 + i];
+ memcpy(&pinfo[j], tlv->data,
+ sizeof(struct rte_pmd_i40e_ptype_info));
+ i += tlv->len;
+ }
+ return I40E_SUCCESS;
+ }
+
+ PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
+ return -EINVAL;
+}
+
+int
+rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_hw *hw;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
+ return -EINVAL;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ status = i40e_aq_get_ddp_list(hw, (void *)buff,
+ size, 0, NULL);
+
+ return status;
+}
+
+static int check_invalid_pkt_type(uint32_t pkt_type)
+{
+ uint32_t l2, l3, l4, tnl, il2, il3, il4;
+
+ l2 = pkt_type & RTE_PTYPE_L2_MASK;
+ l3 = pkt_type & RTE_PTYPE_L3_MASK;
+ l4 = pkt_type & RTE_PTYPE_L4_MASK;
+ tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
+ il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
+ il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
+ il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
+
+ if (l2 &&
+ l2 != RTE_PTYPE_L2_ETHER &&
+ l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
+ l2 != RTE_PTYPE_L2_ETHER_ARP &&
+ l2 != RTE_PTYPE_L2_ETHER_LLDP &&
+ l2 != RTE_PTYPE_L2_ETHER_NSH &&
+ l2 != RTE_PTYPE_L2_ETHER_VLAN &&
+ l2 != RTE_PTYPE_L2_ETHER_QINQ &&
+ l2 != RTE_PTYPE_L2_ETHER_PPPOE)
+ return -1;
+
+ if (l3 &&
+ l3 != RTE_PTYPE_L3_IPV4 &&
+ l3 != RTE_PTYPE_L3_IPV4_EXT &&
+ l3 != RTE_PTYPE_L3_IPV6 &&
+ l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
+ l3 != RTE_PTYPE_L3_IPV6_EXT &&
+ l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
+ return -1;
+
+ if (l4 &&
+ l4 != RTE_PTYPE_L4_TCP &&
+ l4 != RTE_PTYPE_L4_UDP &&
+ l4 != RTE_PTYPE_L4_FRAG &&
+ l4 != RTE_PTYPE_L4_SCTP &&
+ l4 != RTE_PTYPE_L4_ICMP &&
+ l4 != RTE_PTYPE_L4_NONFRAG)
+ return -1;
+
+ if (tnl &&
+ tnl != RTE_PTYPE_TUNNEL_IP &&
+ tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+ tnl != RTE_PTYPE_TUNNEL_VXLAN &&
+ tnl != RTE_PTYPE_TUNNEL_NVGRE &&
+ tnl != RTE_PTYPE_TUNNEL_GENEVE &&
+ tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+ tnl != RTE_PTYPE_TUNNEL_GTPC &&
+ tnl != RTE_PTYPE_TUNNEL_GTPU &&
+ tnl != RTE_PTYPE_TUNNEL_L2TP)
+ return -1;
+
+ if (il2 &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
+ return -1;
+
+ if (il3 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
+ return -1;
+
+ if (il4 &&
+ il4 != RTE_PTYPE_INNER_L4_TCP &&
+ il4 != RTE_PTYPE_INNER_L4_UDP &&
+ il4 != RTE_PTYPE_INNER_L4_FRAG &&
+ il4 != RTE_PTYPE_INNER_L4_SCTP &&
+ il4 != RTE_PTYPE_INNER_L4_ICMP &&
+ il4 != RTE_PTYPE_INNER_L4_NONFRAG)
+ return -1;
+
+ return 0;
+}
+
+static int check_invalid_ptype_mapping(
+ struct rte_pmd_i40e_ptype_mapping *mapping_table,
+ uint16_t count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uint16_t ptype = mapping_table[i].hw_ptype;
+ uint32_t pkt_type = mapping_table[i].sw_ptype;
+
+ if (ptype >= I40E_MAX_PKT_TYPE)
+ return -1;
+
+ if (pkt_type == RTE_PTYPE_UNKNOWN)
+ continue;
+
+ if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
+ continue;
+
+ if (check_invalid_pkt_type(pkt_type))
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_ptype_mapping_update(
+ uint16_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (count > I40E_MAX_PKT_TYPE)
+ return -EINVAL;
+
+ if (check_invalid_ptype_mapping(mapping_items, count))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (exclusive) {
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
+ ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
+ }
+
+ for (i = 0; i < count; i++)
+ ad->ptype_tbl[mapping_items[i].hw_ptype]
+ = mapping_items[i].sw_ptype;
+
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ i40e_set_default_ptype_table(dev);
+
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_get(
+ uint16_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t size,
+ uint16_t *count,
+ uint8_t valid_only)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int n = 0;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
+ if (n >= size)
+ break;
+ if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
+ continue;
+ mapping_items[n].hw_ptype = i;
+ mapping_items[n].sw_ptype = ad->ptype_tbl[i];
+ n++;
+ }
+
+ *count = n;
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
+ uint32_t target,
+ uint8_t mask,
+ uint32_t pkt_type)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (!mask && check_invalid_pkt_type(target))
+ return -EINVAL;
+
+ if (check_invalid_pkt_type(pkt_type))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
+ if (mask) {
+ if ((target | ad->ptype_tbl[i]) == target &&
+ (target & ad->ptype_tbl[i]))
+ ad->ptype_tbl[i] = pkt_type;
+ } else {
+ if (ad->ptype_tbl[i] == target)
+ ad->ptype_tbl[i] = pkt_type;
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+ struct i40e_mac_filter_info mac_filter;
+ int ret;
+
+ if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs)
+ return -EINVAL;
+
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ ether_addr_copy(mac_addr, &mac_filter.mac_addr);
+ ret = i40e_vsi_add_mac(vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
+ return -1;
+ }
+
+ return 0;
+}
+
+int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ i40e_set_default_pctype_table(dev);
+
+ return 0;
+}
+
+int rte_pmd_i40e_flow_type_mapping_get(
+ uint16_t port,
+ struct rte_pmd_i40e_flow_type_mapping *mapping_items)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
+ mapping_items[i].flow_type = i;
+ mapping_items[i].pctype = ad->pctypes_tbl[i];
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_flow_type_mapping_update(
+ uint16_t port,
+ struct rte_pmd_i40e_flow_type_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (count > I40E_FLOW_TYPE_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++)
+ if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
+ mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
+ (mapping_items[i].pctype &
+ (1ULL << I40E_FILTER_PCTYPE_INVALID)))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (exclusive) {
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
+ ad->pctypes_tbl[i] = 0ULL;
+ ad->flow_types_mask = 0ULL;
+ }
+
+ for (i = 0; i < count; i++) {
+ ad->pctypes_tbl[mapping_items[i].flow_type] =
+ mapping_items[i].pctype;
+ if (mapping_items[i].pctype)
+ ad->flow_types_mask |=
+ (1ULL << mapping_items[i].flow_type);
+ else
+ ad->flow_types_mask &=
+ ~(1ULL << mapping_items[i].flow_type);
+ }
+
+ for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
+ ad->pctypes_mask |= ad->pctypes_tbl[i];
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
+{
+ struct rte_eth_dev *dev;
+ struct ether_addr *mac;
+ struct i40e_pf *pf;
+ int vf_id;
+ struct i40e_pf_vf *vf;
+ uint16_t vf_num;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ vf_num = pf->vf_num;
+
+ for (vf_id = 0; vf_id < vf_num; vf_id++) {
+ vf = &pf->vfs[vf_id];
+ mac = &vf->mac_addr;
+
+ if (is_same_ether_addr(mac, vf_mac))
+ return vf_id;
+ }
+
+ return -EINVAL;
+}
+
+static int
+i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
+ struct i40e_pf *pf)
+{
+ uint16_t i;
+ struct i40e_vsi *vsi = pf->main_vsi;
+ uint16_t queue_offset, bsf, tc_index;
+ struct i40e_vsi_context ctxt;
+ struct i40e_aqc_vsi_properties_data *vsi_info;
+ struct i40e_queue_regions *region_info =
+ &pf->queue_region;
+ int32_t ret = -EINVAL;
+
+ if (!region_info->queue_region_number) {
+ PMD_INIT_LOG(ERR, "there is no that region id been set before");
+ return ret;
+ }
+
+ memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
+
+ /* Update Queue Pairs Mapping for currently enabled UPs */
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.info = vsi->info;
+ vsi_info = &ctxt.info;
+
+ memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
+ memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
+
+ /* Configure queue region and queue mapping parameters,
+ * for enabled queue region, allocate queues to this region.
+ */
+
+ for (i = 0; i < region_info->queue_region_number; i++) {
+ tc_index = region_info->region[i].region_id;
+ bsf = rte_bsf32(region_info->region[i].queue_num);
+ queue_offset = region_info->region[i].queue_start_index;
+ vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
+ (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+ }
+
+ /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
+ vsi_info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
+ vsi_info->valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+
+ /* Update the VSI after updating the VSI queue-mapping information */
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
+ hw->aq.asq_last_status);
+ return ret;
+ }
+ /* update the local VSI info with updated queue map */
+ rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+ rte_memcpy(&vsi->info.queue_mapping,
+ &ctxt.info.queue_mapping,
+ sizeof(vsi->info.queue_mapping));
+ vsi->info.mapping_flags = ctxt.info.mapping_flags;
+ vsi->info.valid_sections = 0;
+
+ return 0;
+}
+
+
+static int
+i40e_queue_region_set_region(struct i40e_pf *pf,
+ struct rte_pmd_i40e_queue_region_conf *conf_ptr)
+{
+ uint16_t i;
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ struct i40e_queue_regions *info = &pf->queue_region;
+ int32_t ret = -EINVAL;
+
+ if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
+ conf_ptr->queue_num <= 64)) {
+ PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
+ return ret;
+ }
+
+ if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return ret;
+ }
+
+ if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
+ > main_vsi->nb_used_qps) {
+ PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
+ return ret;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++)
+ if (conf_ptr->region_id == info->region[i].region_id)
+ break;
+
+ if (i == info->queue_region_number &&
+ i <= I40E_REGION_MAX_INDEX) {
+ info->region[i].region_id = conf_ptr->region_id;
+ info->region[i].queue_num = conf_ptr->queue_num;
+ info->region[i].queue_start_index =
+ conf_ptr->queue_start_index;
+ info->queue_region_number++;
+ } else {
+ PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i40e_queue_region_set_flowtype(struct i40e_pf *pf,
+ struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
+{
+ int32_t ret = -EINVAL;
+ struct i40e_queue_regions *info = &pf->queue_region;
+ uint16_t i, j;
+ uint16_t region_index, flowtype_index;
+
+ /* For the pctype or hardware flowtype of packet,
+ * the specific index for each type has been defined
+ * in file i40e_type.h as enum i40e_filter_pctype.
+ */
+
+ if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return ret;
+ }
+
+ if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
+ PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
+ return ret;
+ }
+
+
+ for (i = 0; i < info->queue_region_number; i++)
+ if (rss_region_conf->region_id == info->region[i].region_id)
+ break;
+
+ if (i == info->queue_region_number) {
+ PMD_DRV_LOG(ERR, "that region id has not been set before");
+ ret = -EINVAL;
+ return ret;
+ }
+ region_index = i;
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ for (j = 0; j < info->region[i].flowtype_num; j++) {
+ if (rss_region_conf->hw_flowtype ==
+ info->region[i].hw_flowtype[j]) {
+ PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
+ return 0;
+ }
+ }
+ }
+
+ flowtype_index = info->region[region_index].flowtype_num;
+ info->region[region_index].hw_flowtype[flowtype_index] =
+ rss_region_conf->hw_flowtype;
+ info->region[region_index].flowtype_num++;
+
+ return 0;
+}
+
+static void
+i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
+ struct i40e_pf *pf)
+{
+ uint8_t hw_flowtype;
+ uint32_t pfqf_hregion;
+ uint16_t i, j, index;
+ struct i40e_queue_regions *info = &pf->queue_region;
+
+ /* For the pctype or hardware flowtype of packet,
+ * the specific index for each type has been defined
+ * in file i40e_type.h as enum i40e_filter_pctype.
+ */
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ for (j = 0; j < info->region[i].flowtype_num; j++) {
+ hw_flowtype = info->region[i].hw_flowtype[j];
+ index = hw_flowtype >> 3;
+ pfqf_hregion =
+ i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
+
+ if ((hw_flowtype & 0x7) == 0) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_0_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 1) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_1_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 2) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_2_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 3) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_3_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 4) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_4_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 5) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_5_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 6) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_6_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
+ } else {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_7_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
+ }
+
+ i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
+ pfqf_hregion);
+ }
+ }
+}
+
+static int
+i40e_queue_region_set_user_priority(struct i40e_pf *pf,
+ struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
+{
+ struct i40e_queue_regions *info = &pf->queue_region;
+ int32_t ret = -EINVAL;
+ uint16_t i, j, region_index;
+
+ if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return ret;
+ }
+
+ if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the region_id max index is 7");
+ return ret;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++)
+ if (rss_region_conf->region_id == info->region[i].region_id)
+ break;
+
+ if (i == info->queue_region_number) {
+ PMD_DRV_LOG(ERR, "that region id has not been set before");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ region_index = i;
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ for (j = 0; j < info->region[i].user_priority_num; j++) {
+ if (info->region[i].user_priority[j] ==
+ rss_region_conf->user_priority) {
+ PMD_DRV_LOG(ERR, "that user priority has been set before");
+ return 0;
+ }
+ }
+ }
+
+ j = info->region[region_index].user_priority_num;
+ info->region[region_index].user_priority[j] =
+ rss_region_conf->user_priority;
+ info->region[region_index].user_priority_num++;
+
+ return 0;
+}
+
+static int
+i40e_queue_region_dcb_configure(struct i40e_hw *hw,
+ struct i40e_pf *pf)
+{
+ struct i40e_dcbx_config dcb_cfg_local;
+ struct i40e_dcbx_config *dcb_cfg;
+ struct i40e_queue_regions *info = &pf->queue_region;
+ struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
+ int32_t ret = -EINVAL;
+ uint16_t i, j, prio_index, region_index;
+ uint8_t tc_map, tc_bw, bw_lf;
+
+ if (!info->queue_region_number) {
+ PMD_DRV_LOG(ERR, "No queue region been set before");
+ return ret;
+ }
+
+ dcb_cfg = &dcb_cfg_local;
+ memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
+
+ /* assume each tc has the same bw */
+ tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
+ for (i = 0; i < info->queue_region_number; i++)
+ dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
+ /* to ensure the sum of tcbw is equal to 100 */
+ bw_lf = I40E_MAX_PERCENT % info->queue_region_number;
+ for (i = 0; i < bw_lf; i++)
+ dcb_cfg->etscfg.tcbwtable[i]++;
+
+ /* assume each tc has the same Transmission Selection Algorithm */
+ for (i = 0; i < info->queue_region_number; i++)
+ dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ for (j = 0; j < info->region[i].user_priority_num; j++) {
+ prio_index = info->region[i].user_priority[j];
+ region_index = info->region[i].region_id;
+ dcb_cfg->etscfg.prioritytable[prio_index] =
+ region_index;
+ }
+ }
+
+ /* FW needs one App to configure HW */
+ dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
+ dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
+ dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
+
+ dcb_cfg->pfc.willing = 0;
+ dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ dcb_cfg->pfc.pfcenable = tc_map;
+
+ /* Copy the new config to the current config */
+ *old_cfg = *dcb_cfg;
+ old_cfg->etsrec = old_cfg->etscfg;
+ ret = i40e_set_dcb_config(hw);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
+ struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
+{
+ int32_t ret = -EINVAL;
+ struct i40e_queue_regions *info = &pf->queue_region;
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+
+ if (on) {
+ i40e_queue_region_pf_flowtype_conf(hw, pf);
+
+ ret = i40e_vsi_update_queue_region_mapping(hw, pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
+ return ret;
+ }
+
+ ret = i40e_queue_region_dcb_configure(hw, pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to flush dcb.");
+ return ret;
+ }
+
+ return 0;
+ }
+
+ if (info->queue_region_number) {
+ info->queue_region_number = 1;
+ info->region[0].queue_num = main_vsi->nb_used_qps;
+ info->region[0].queue_start_index = 0;
+
+ ret = i40e_vsi_update_queue_region_mapping(hw, pf);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
+
+ ret = i40e_dcb_init_configure(dev, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to flush dcb.");
+ pf->flags &= ~I40E_FLAG_DCB;
+ }
+
+ i40e_init_queue_region_conf(dev);
+ }
+ return 0;
+}
+
+static int
+i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint64_t hena;
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+
+ if (!hena)
+ return -ENOTSUP;
+
+ return 0;
+}
+
+static int
+i40e_queue_region_get_all_info(struct i40e_pf *pf,
+ struct i40e_queue_regions *regions_ptr)
+{
+ struct i40e_queue_regions *info = &pf->queue_region;
+
+ rte_memcpy(regions_ptr, info,
+ sizeof(struct i40e_queue_regions));
+
+ return 0;
+}
+
+int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
+ enum rte_pmd_i40e_queue_region_op op_type, void *arg)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int32_t ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (!(!i40e_queue_region_pf_check_rss(pf)))
+ return -ENOTSUP;
+
+ /* This queue region feature only support pf by now. It should
+ * be called after dev_start, and will be clear after dev_stop.
+ * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
+ * is just an enable function which server for other configuration,
+ * it is for all configuration about queue region from up layer,
+ * at first will only keep in DPDK softwarestored in driver,
+ * only after "FLUSH_ON", it commit all configuration to HW.
+ * Because PMD had to set hardware configuration at a time, so
+ * it will record all up layer command at first.
+ * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
+ * just clean all configuration about queue region just now,
+ * and restore all to DPDK i40e driver default
+ * config when start up.
+ */
+
+ switch (op_type) {
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
+ ret = i40e_queue_region_set_region(pf,
+ (struct rte_pmd_i40e_queue_region_conf *)arg);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
+ ret = i40e_queue_region_set_flowtype(pf,
+ (struct rte_pmd_i40e_queue_region_conf *)arg);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
+ ret = i40e_queue_region_set_user_priority(pf,
+ (struct rte_pmd_i40e_queue_region_conf *)arg);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
+ ret = i40e_queue_region_get_all_info(pf,
+ (struct i40e_queue_regions *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "op type (%d) not supported",
+ op_type);
+ ret = -EINVAL;
+ }
+
+ I40E_WRITE_FLUSH(hw);
+
+ return ret;
+}
+
+int rte_pmd_i40e_flow_add_del_packet_template(
+ uint16_t port,
+ const struct rte_pmd_i40e_pkt_template_conf *conf,
+ uint8_t add)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct i40e_fdir_filter_conf filter_conf;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ memset(&filter_conf, 0, sizeof(filter_conf));
+ filter_conf.soft_id = conf->soft_id;
+ filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
+ filter_conf.input.flow.raw_flow.packet = conf->input.packet;
+ filter_conf.input.flow.raw_flow.length = conf->input.length;
+ filter_conf.input.flow_ext.pkt_template = true;
+
+ filter_conf.action.rx_queue = conf->action.rx_queue;
+ filter_conf.action.behavior =
+ (enum i40e_fdir_behavior)conf->action.behavior;
+ filter_conf.action.report_status =
+ (enum i40e_fdir_status)conf->action.report_status;
+ filter_conf.action.flex_off = conf->action.flex_off;
+
+ return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
+}
+
+int
+rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype,
+ struct rte_pmd_i40e_inset *inset,
+ enum rte_pmd_i40e_inset_type inset_type)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_hw *hw;
+ uint64_t inset_reg;
+ uint32_t mask_reg[2];
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (pctype > 63)
+ return -EINVAL;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ memset(inset, 0, sizeof(struct rte_pmd_i40e_inset));
+
+ switch (inset_type) {
+ case INSET_HASH:
+ /* Get input set */
+ inset_reg =
+ i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
+ inset_reg <<= I40E_32_BIT_WIDTH;
+ inset_reg |=
+ i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
+ /* Get field mask */
+ mask_reg[0] =
+ i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype));
+ mask_reg[1] =
+ i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype));
+ break;
+ case INSET_FDIR:
+ inset_reg =
+ i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
+ inset_reg <<= I40E_32_BIT_WIDTH;
+ inset_reg |=
+ i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
+ mask_reg[0] =
+ i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype));
+ mask_reg[1] =
+ i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype));
+ break;
+ case INSET_FDIR_FLX:
+ inset_reg =
+ i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype));
+ mask_reg[0] =
+ i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0));
+ mask_reg[1] =
+ i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1));
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported input set type.");
+ return -EINVAL;
+ }
+
+ inset->inset = inset_reg;
+
+ for (i = 0; i < 2; i++) {
+ inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F);
+ inset->mask[i].mask = mask_reg[i] & 0xFFFF;
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
+ struct rte_pmd_i40e_inset *inset,
+ enum rte_pmd_i40e_inset_type inset_type)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_hw *hw;
+ struct i40e_pf *pf;
+ uint64_t inset_reg;
+ uint32_t mask_reg[2];
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (pctype > 63)
+ return -EINVAL;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Input set configuration is not supported.");
+ return -ENOTSUP;
+ }
+
+ inset_reg = inset->inset;
+ for (i = 0; i < 2; i++)
+ mask_reg[i] = (inset->mask[i].field_idx << 16) |
+ inset->mask[i].mask;
+
+ switch (inset_type) {
+ case INSET_HASH:
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+ for (i = 0; i < 2; i++)
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
+ break;
+ case INSET_FDIR:
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+ for (i = 0; i < 2; i++)
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ break;
+ case INSET_FDIR_FLX:
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ for (i = 0; i < 2; i++)
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i),
+ mask_reg[i]);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported input set type.");
+ return -EINVAL;
+ }
+
+ I40E_WRITE_FLUSH(hw);
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.h b/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.h
new file mode 100644
index 00000000..be4a6024
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.h
@@ -0,0 +1,1064 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _PMD_I40E_H_
+#define _PMD_I40E_H_
+
+/**
+ * @file rte_pmd_i40e.h
+ *
+ * i40e PMD specific functions.
+ *
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ */
+
+#include <rte_ethdev_driver.h>
+
+/**
+ * Response sent back to i40e driver from user app after callback
+ */
+enum rte_pmd_i40e_mb_event_rsp {
+ RTE_PMD_I40E_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */
+ RTE_PMD_I40E_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */
+ RTE_PMD_I40E_MB_EVENT_PROCEED, /**< proceed with mbox request */
+ RTE_PMD_I40E_MB_EVENT_MAX /**< max value of this enum */
+};
+
+/**
+ * Data sent to the user application when the callback is executed.
+ */
+struct rte_pmd_i40e_mb_event_param {
+ uint16_t vfid; /**< Virtual Function number */
+ uint16_t msg_type; /**< VF to PF message type, see virtchnl_ops */
+ uint16_t retval; /**< return value */
+ void *msg; /**< pointer to message */
+ uint16_t msglen; /**< length of the message */
+};
+
+/**
+ * Option of package processing.
+ */
+enum rte_pmd_i40e_package_op {
+ RTE_PMD_I40E_PKG_OP_UNDEFINED = 0,
+ RTE_PMD_I40E_PKG_OP_WR_ADD, /**< load package and add to info list */
+ RTE_PMD_I40E_PKG_OP_WR_DEL, /**< load package and delete from info list */
+ RTE_PMD_I40E_PKG_OP_WR_ONLY, /**< load package without modifying info list */
+ RTE_PMD_I40E_PKG_OP_MAX = 32
+};
+
+/**
+ * Types of package information.
+ */
+enum rte_pmd_i40e_package_info {
+ RTE_PMD_I40E_PKG_INFO_UNDEFINED = 0,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_MAX = 1024,
+ RTE_PMD_I40E_PKG_INFO_HEADER,
+ RTE_PMD_I40E_PKG_INFO_DEVID_NUM,
+ RTE_PMD_I40E_PKG_INFO_DEVID_LIST,
+ RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM,
+ RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST,
+ RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM,
+ RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST,
+ RTE_PMD_I40E_PKG_INFO_PTYPE_NUM,
+ RTE_PMD_I40E_PKG_INFO_PTYPE_LIST,
+ RTE_PMD_I40E_PKG_INFO_MAX = (int)0xFFFFFFFF
+};
+
+/**
+ * Option types of queue region.
+ */
+enum rte_pmd_i40e_queue_region_op {
+ RTE_PMD_I40E_RSS_QUEUE_REGION_UNDEFINED,
+ /** add queue region set */
+ RTE_PMD_I40E_RSS_QUEUE_REGION_SET,
+ /** add PF region pctype set */
+ RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET,
+ /** add queue region user priority set */
+ RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET,
+ /**
+ * ALL configuration about queue region from up layer
+ * at first will only keep in DPDK software stored in driver,
+ * only after " FLUSH_ON ", it commit all configuration to HW.
+ * Because PMD had to set hardware configuration at a time, so
+ * it will record all up layer command at first.
+ */
+ RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON,
+ /**
+ * "FLUSH_OFF " is just clean all configuration about queue
+ * region just now, and restore all to DPDK i40e driver default
+ * config when start up.
+ */
+ RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF,
+ RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET,
+ RTE_PMD_I40E_RSS_QUEUE_REGION_OP_MAX
+};
+
+#define RTE_PMD_I40E_DDP_NAME_SIZE 32
+#define RTE_PMD_I40E_PCTYPE_MAX 64
+#define RTE_PMD_I40E_REGION_MAX_NUM 8
+#define RTE_PMD_I40E_MAX_USER_PRIORITY 8
+
+/**
+ * Version for dynamic device personalization.
+ * Version in "major.minor.update.draft" format.
+ */
+struct rte_pmd_i40e_ddp_version {
+ uint8_t major;
+ uint8_t minor;
+ uint8_t update;
+ uint8_t draft;
+};
+
+/**
+ * Device ID for dynamic device personalization.
+ */
+struct rte_pmd_i40e_ddp_device_id {
+ uint32_t vendor_dev_id;
+ uint32_t sub_vendor_dev_id;
+};
+
+/**
+ * Profile information in profile info list.
+ */
+struct rte_pmd_i40e_profile_info {
+ uint32_t track_id;
+ struct rte_pmd_i40e_ddp_version version;
+ uint8_t owner;
+ uint8_t reserved[7];
+ uint8_t name[RTE_PMD_I40E_DDP_NAME_SIZE];
+};
+
+#define RTE_PMD_I40E_DDP_OWNER_UNKNOWN 0xFF
+
+/**
+ * Profile information list returned from HW.
+ */
+struct rte_pmd_i40e_profile_list {
+ uint32_t p_count;
+ struct rte_pmd_i40e_profile_info p_info[1];
+};
+
+#define RTE_PMD_I40E_PROTO_NUM 6
+#define RTE_PMD_I40E_PROTO_UNUSED 0xFF
+
+/**
+ * Protocols information stored in profile
+ */
+struct rte_pmd_i40e_proto_info {
+ uint8_t proto_id;
+ char name[RTE_PMD_I40E_DDP_NAME_SIZE];
+};
+
+/**
+ * Packet classification/ packet type information stored in profile
+ */
+struct rte_pmd_i40e_ptype_info {
+ uint8_t ptype_id;
+ uint8_t protocols[RTE_PMD_I40E_PROTO_NUM];
+};
+
+/**
+ * ptype mapping table only accept RTE_PTYPE_XXX or "user defined" ptype.
+ * A ptype with MSB set will be regarded as a user defined ptype.
+ * Below macro help to create a user defined ptype.
+ */
+#define RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK 0x80000000
+
+struct rte_pmd_i40e_ptype_mapping {
+ uint16_t hw_ptype; /**< hardware defined packet type*/
+ uint32_t sw_ptype; /**< software defined packet type */
+};
+
+/**
+ * Queue region related information.
+ */
+struct rte_pmd_i40e_queue_region_conf {
+ /** the region id for this configuration */
+ uint8_t region_id;
+ /** the pctype or hardware flowtype of packet,
+ * the specific index for each type has been defined
+ * in file i40e_type.h as enum i40e_filter_pctype.
+ */
+ uint8_t hw_flowtype;
+ /** the start queue index for this region */
+ uint8_t queue_start_index;
+ /** the total queue number of this queue region */
+ uint8_t queue_num;
+ /** the packet's user priority for this region */
+ uint8_t user_priority;
+};
+
+/* queue region info */
+struct rte_pmd_i40e_queue_region_info {
+ /** the region id for this configuration */
+ uint8_t region_id;
+ /** the start queue index for this region */
+ uint8_t queue_start_index;
+ /** the total queue number of this queue region */
+ uint8_t queue_num;
+ /** the total number of user priority for this region */
+ uint8_t user_priority_num;
+ /** the packet's user priority for this region */
+ uint8_t user_priority[RTE_PMD_I40E_MAX_USER_PRIORITY];
+ /** the total number of flowtype for this region */
+ uint8_t flowtype_num;
+ /**
+ * the pctype or hardware flowtype of packet,
+ * the specific index for each type has been defined
+ * in file i40e_type.h as enum i40e_filter_pctype.
+ */
+ uint8_t hw_flowtype[RTE_PMD_I40E_PCTYPE_MAX];
+};
+
+struct rte_pmd_i40e_queue_regions {
+ /** the total number of queue region for this port */
+ uint16_t queue_region_number;
+ struct rte_pmd_i40e_queue_region_info
+ region[RTE_PMD_I40E_REGION_MAX_NUM];
+};
+
+/**
+ * Behavior will be taken if raw packet template is matched.
+ */
+enum rte_pmd_i40e_pkt_template_behavior {
+ RTE_PMD_I40E_PKT_TEMPLATE_ACCEPT,
+ RTE_PMD_I40E_PKT_TEMPLATE_REJECT,
+ RTE_PMD_I40E_PKT_TEMPLATE_PASSTHRU,
+};
+
+/**
+ * Flow director report status
+ * It defines what will be reported if raw packet template is matched.
+ */
+enum rte_pmd_i40e_pkt_template_status {
+ /** report nothing */
+ RTE_PMD_I40E_PKT_TEMPLATE_NO_REPORT_STATUS,
+ /** only report FD ID */
+ RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID,
+ /** report FD ID and 4 flex bytes */
+ RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID_FLEX_4,
+ /** report 8 flex bytes */
+ RTE_PMD_I40E_PKT_TEMPLATE_REPORT_FLEX_8,
+};
+
+/**
+ * A structure used to define an action when raw packet template is matched.
+ */
+struct rte_pmd_i40e_pkt_template_action {
+ /** queue assigned to if raw packet template match */
+ uint16_t rx_queue;
+ /** behavior will be taken */
+ enum rte_pmd_i40e_pkt_template_behavior behavior;
+ /** status report option */
+ enum rte_pmd_i40e_pkt_template_status report_status;
+ /**
+ * If report_status is RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID_FLEX_4 or
+ * RTE_PMD_I40E_PKT_TEMPLATE_REPORT_FLEX_8, flex_off specifies
+ * where the reported flex bytes start from in flexible payload.
+ */
+ uint8_t flex_off;
+};
+
+/**
+ * A structure used to define the input for raw packet template.
+ */
+struct rte_pmd_i40e_pkt_template_input {
+ /** the pctype used for raw packet template */
+ uint16_t pctype;
+ /** the buffer conatining raw packet template */
+ void *packet;
+ /** the length of buffer with raw packet template */
+ uint32_t length;
+};
+
+/**
+ * A structure used to define the configuration parameters
+ * for raw packet template.
+ */
+struct rte_pmd_i40e_pkt_template_conf {
+ /** the input for raw packet template. */
+ struct rte_pmd_i40e_pkt_template_input input;
+ /** the action to be taken when raw packet template is matched */
+ struct rte_pmd_i40e_pkt_template_action action;
+ /** ID, an unique software index for the raw packet template filter */
+ uint32_t soft_id;
+};
+
+enum rte_pmd_i40e_inset_type {
+ INSET_NONE = 0,
+ INSET_HASH,
+ INSET_FDIR,
+ INSET_FDIR_FLX,
+};
+
+struct rte_pmd_i40e_inset_mask {
+ uint8_t field_idx;
+ uint16_t mask;
+};
+
+struct rte_pmd_i40e_inset {
+ uint64_t inset;
+ struct rte_pmd_i40e_inset_mask mask[2];
+};
+
+/**
+ * Add or remove raw packet template filter to Flow Director.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param conf
+ * Specifies configuration parameters of raw packet template filter.
+ * @param add
+ * Speicifes an action to be taken - add or remove raw packet template filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *conf* invalid.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_flow_add_del_packet_template(
+ uint16_t port,
+ const struct rte_pmd_i40e_pkt_template_conf *conf,
+ uint8_t add);
+
+/**
+ * Notify VF when PF link status changes.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* invalid.
+ */
+int rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf);
+
+/**
+ * Enable/Disable VF MAC anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set MAC anti spoofing.
+ * @param on
+ * 1 - Enable VFs MAC anti spoofing.
+ * 0 - Disable VFs MAC anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF VLAN anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set VLAN anti spoofing.
+ * @param on
+ * 1 - Enable VFs VLAN anti spoofing.
+ * 0 - Disable VFs VLAN anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable TX loopback on all the PF and VFs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable TX loopback.
+ * 0 - Disable TX loopback.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_tx_loopback(uint16_t port,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF unicast promiscuous mode.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set.
+ * @param on
+ * 1 - Enable.
+ * 0 - Disable.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF multicast promiscuous mode.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set.
+ * @param on
+ * 1 - Enable.
+ * 0 - Disable.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Set the VF MAC address.
+ *
+ * PF should set MAC address before VF initialized, if PF sets the MAC
+ * address after VF initialized, new MAC address won't be effective until
+ * VF reinitialize.
+ *
+ * This will remove all existing MAC filters.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr);
+
+/**
+ * Remove the VF MAC address.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int
+rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr);
+
+/**
+ * Enable/Disable vf vlan strip for all queues in a pool
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - Enable VF's vlan strip on RX queues.
+ * 0 - Disable VF's vlan strip on RX queues.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan insert
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param vlan_id
+ * 0 - Disable VF's vlan insert.
+ * n - Enable; n is inserted as the vlan id.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
+ uint16_t vlan_id);
+
+/**
+ * Enable/Disable vf broadcast mode
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param on
+ * 0 - Disable broadcast.
+ * 1 - Enable broadcast.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable vf vlan tag
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param on
+ * 0 - Disable VF's vlan tag.
+ * n - Enable VF's vlan tag.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on);
+
+/**
+ * Enable/Disable VF VLAN filter
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vlan_id
+ * ID specifying VLAN
+ * @param vf_mask
+ * Mask to filter VF's
+ * @param on
+ * 0 - Disable VF's VLAN filter.
+ * 1 - Enable VF's VLAN filter.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
+ uint64_t vf_mask, uint8_t on);
+
+/**
+ * Get VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @param stats
+ * A pointer to a structure of type *rte_eth_stats* to be filled with
+ * the values of device counters for the following set of statistics:
+ * - *ipackets* with the total of successfully received packets.
+ * - *opackets* with the total of successfully transmitted packets.
+ * - *ibytes* with the total of successfully received bytes.
+ * - *obytes* with the total of successfully transmitted bytes.
+ * - *ierrors* with the total of erroneous received packets.
+ * - *oerrors* with the total of failed transmitted packets.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+
+int rte_pmd_i40e_get_vf_stats(uint16_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats);
+
+/**
+ * Clear VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_reset_vf_stats(uint16_t port,
+ uint16_t vf_id);
+
+/**
+ * Set VF's max bandwidth.
+ *
+ * Per VF bandwidth limitation and per TC bandwidth limitation cannot
+ * be enabled in parallel. If per TC bandwidth is enabled, this function
+ * will disable it.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param bw
+ * Bandwidth for this VF.
+ * The value should be an absolute bandwidth in Mbps.
+ * The bandwidth is a L2 bandwidth counting the bytes of ethernet packets.
+ * Not count the bytes added by physical layer.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_max_bw(uint16_t port,
+ uint16_t vf_id,
+ uint32_t bw);
+
+/**
+ * Set all the TCs' bandwidth weight on a specific VF.
+ *
+ * The bw_weight means the percentage occupied by the TC.
+ * It can be taken as the relative min bandwidth setting.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param tc_num
+ * Number of TCs.
+ * @param bw_weight
+ * An array of relative bandwidth weight for all the TCs.
+ * The summary of the bw_weight should be 100.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port,
+ uint16_t vf_id,
+ uint8_t tc_num,
+ uint8_t *bw_weight);
+
+/**
+ * Set a specific TC's max bandwidth on a specific VF.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param tc_no
+ * Number specifying TC.
+ * @param bw
+ * Max bandwidth for this TC.
+ * The value should be an absolute bandwidth in Mbps.
+ * The bandwidth is a L2 bandwidth counting the bytes of ethernet packets.
+ * Not count the bytes added by physical layer.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port,
+ uint16_t vf_id,
+ uint8_t tc_no,
+ uint32_t bw);
+
+/**
+ * Set some TCs to strict priority mode on a physical port.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param tc_map
+ * A bit map for the TCs.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map);
+
+/**
+ * Load/Unload a ddp package
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param buff
+ * buffer of package.
+ * @param size
+ * size of buffer.
+ * @param op
+ * Operation of package processing
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-EEXIST) if profile exists.
+ * - (-EACCES) if profile does not exist.
+ * - (-ENOTSUP) if operation not supported.
+ */
+int rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
+ uint32_t size,
+ enum rte_pmd_i40e_package_op op);
+
+/**
+ * rte_pmd_i40e_get_ddp_info - Get profile's info
+ * @param pkg
+ * buffer of package.
+ * @param pkg_size
+ * package buffer size
+ * @param info
+ * buffer for response
+ * @param size
+ * response buffer size
+ * @param type
+ * type of information requested
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if information type not supported by the profile.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_get_ddp_info(uint8_t *pkg, uint32_t pkg_size,
+ uint8_t *info, uint32_t size,
+ enum rte_pmd_i40e_package_info type);
+
+/**
+ * rte_pmd_i40e_get_ddp_list - Get loaded profile list
+ * @param port
+ * port id
+ * @param buff
+ * buffer for response
+ * @param size
+ * buffer size
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size);
+
+/**
+ * Update hardware defined ptype to software defined packet type
+ * mapping table.
+ *
+ * @param port
+ * pointer to port identifier of the device.
+ * @param mapping_items
+ * the base address of the mapping items array.
+ * @param count
+ * number of mapping items.
+ * @param exclusive
+ * the flag indicate different ptype mapping update method.
+ * -(0) only overwrite referred PTYPE mapping,
+ * keep other PTYPEs mapping unchanged.
+ * -(!0) overwrite referred PTYPE mapping,
+ * set other PTYPEs maps to PTYPE_UNKNOWN.
+ */
+int rte_pmd_i40e_ptype_mapping_update(
+ uint16_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive);
+
+/**
+ * Reset hardware defined ptype to software defined ptype
+ * mapping table to default.
+ *
+ * @param port
+ * pointer to port identifier of the device
+ */
+int rte_pmd_i40e_ptype_mapping_reset(uint16_t port);
+
+/**
+ * Get hardware defined ptype to software defined ptype
+ * mapping items.
+ *
+ * @param port
+ * pointer to port identifier of the device.
+ * @param mapping_items
+ * the base address of the array to store returned items.
+ * @param size
+ * the size of the input array.
+ * @param count
+ * the place to store the number of returned items.
+ * @param valid_only
+ * -(0) return full mapping table.
+ * -(!0) only return mapping items which packet_type != RTE_PTYPE_UNKNOWN.
+ */
+int rte_pmd_i40e_ptype_mapping_get(
+ uint16_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t size,
+ uint16_t *count,
+ uint8_t valid_only);
+
+/**
+ * Replace a specific or a group of software defined ptypes
+ * with a new one
+ *
+ * @param port
+ * pointer to port identifier of the device
+ * @param target
+ * the packet type to be replaced
+ * @param mask
+ * -(0) target represent a specific software defined ptype.
+ * -(!0) target is a mask to represent a group of software defined ptypes.
+ * @param pkt_type
+ * the new packet type to overwrite
+ */
+int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
+ uint32_t target,
+ uint8_t mask,
+ uint32_t pkt_type);
+
+/**
+ * Add a VF MAC address.
+ *
+ * Add more MAC address for VF. The existing MAC addresses
+ * are still effective.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr);
+
+#define RTE_PMD_I40E_PCTYPE_MAX 64
+#define RTE_PMD_I40E_FLOW_TYPE_MAX 64
+
+struct rte_pmd_i40e_flow_type_mapping {
+ uint16_t flow_type; /**< software defined flow type*/
+ uint64_t pctype; /**< hardware defined pctype */
+};
+
+/**
+ * Update hardware defined pctype to software defined flow type
+ * mapping table.
+ *
+ * @param port
+ * pointer to port identifier of the device.
+ * @param mapping_items
+ * the base address of the mapping items array.
+ * @param count
+ * number of mapping items.
+ * @param exclusive
+ * the flag indicate different pctype mapping update method.
+ * -(0) only overwrite referred PCTYPE mapping,
+ * keep other PCTYPEs mapping unchanged.
+ * -(!0) overwrite referred PCTYPE mapping,
+ * set other PCTYPEs maps to PCTYPE_INVALID.
+ */
+int rte_pmd_i40e_flow_type_mapping_update(
+ uint16_t port,
+ struct rte_pmd_i40e_flow_type_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive);
+
+/**
+ * Get software defined flow type to hardware defined pctype
+ * mapping items.
+ *
+ * @param port
+ * pointer to port identifier of the device.
+ * @param mapping_items
+ * the base address of the array to store returned items.
+ * array should be allocated by caller with minimum size of
+ * RTE_PMD_I40E_FLOW_TYPE_MAX items
+ */
+int rte_pmd_i40e_flow_type_mapping_get(
+ uint16_t port,
+ struct rte_pmd_i40e_flow_type_mapping *mapping_items);
+
+/**
+ * Reset hardware defined pctype to software defined flow type
+ * mapping table to default.
+ *
+ * @param port
+ * pointer to port identifier of the device
+ */
+int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port);
+
+/**
+ * On the PF, find VF index based on VF MAC address
+ *
+ * @param port
+ * pointer to port identifier of the device
+ * @param vf_mac
+ * the mac address of the vf to determine index of
+ * @return
+ * The index of vfid If successful.
+ * -EINVAL: vf mac address does not exist for this port
+ * -ENOTSUP: i40e not supported for this port.
+ */
+int rte_pmd_i40e_query_vfid_by_mac(uint16_t port,
+ const struct ether_addr *vf_mac);
+
+/**
+ * Do RSS queue region configuration for that port as
+ * the command option type
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param op_type
+ * Queue region operation type
+ * @param arg
+ * Queue region operation type specific data
+ */
+int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
+ enum rte_pmd_i40e_queue_region_op op_type, void *arg);
+
+int rte_pmd_i40e_cfg_hash_inset(uint16_t port,
+ uint64_t pctype, uint64_t inset);
+
+/**
+ * Get input set
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param pctype
+ * HW pctype.
+ * @param inset
+ * Buffer for input set info.
+ * @param inset_type
+ * Type of input set.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) if operation not supported.
+ */
+int rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype,
+ struct rte_pmd_i40e_inset *inset,
+ enum rte_pmd_i40e_inset_type inset_type);
+
+/**
+ * Set input set
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param pctype
+ * HW pctype.
+ * @param inset
+ * Input set info.
+ * @param inset_type
+ * Type of input set.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) if operation not supported.
+ */
+int rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
+ struct rte_pmd_i40e_inset *inset,
+ enum rte_pmd_i40e_inset_type inset_type);
+
+/**
+ * Get bit value for some field index
+ *
+ * @param inset
+ * Input set value.
+ * @param field_idx
+ * Field index for input set.
+ * @return
+ * - (1) if set.
+ * - (0) if cleared.
+ */
+static inline int
+rte_pmd_i40e_inset_field_get(uint64_t inset, uint8_t field_idx)
+{
+ uint8_t bit_idx;
+
+ if (field_idx > 63)
+ return 0;
+
+ bit_idx = 63 - field_idx;
+ if (inset & (1ULL << bit_idx))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Set bit value for some field index
+ *
+ * @param inset
+ * Input set value.
+ * @param field_idx
+ * Field index for input set.
+ * @return
+ * - (-1) if failed.
+ * - (0) if success.
+ */
+static inline int
+rte_pmd_i40e_inset_field_set(uint64_t *inset, uint8_t field_idx)
+{
+ uint8_t bit_idx;
+
+ if (field_idx > 63)
+ return -1;
+
+ bit_idx = 63 - field_idx;
+ *inset = *inset | (1ULL << bit_idx);
+
+ return 0;
+}
+
+/**
+ * Clear bit value for some field index
+ *
+ * @param inset
+ * Input set value.
+ * @param field_idx
+ * Field index for input set.
+ * @return
+ * - (-1) if failed.
+ * - (0) if success.
+ */
+static inline int
+rte_pmd_i40e_inset_field_clear(uint64_t *inset, uint8_t field_idx)
+{
+ uint8_t bit_idx;
+
+ if (field_idx > 63)
+ return -1;
+
+ bit_idx = 63 - field_idx;
+ *inset = *inset & ~(1ULL << bit_idx);
+
+ return 0;
+}
+
+#endif /* _PMD_I40E_H_ */
diff --git a/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e_version.map b/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e_version.map
new file mode 100644
index 00000000..cccd5768
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e_version.map
@@ -0,0 +1,67 @@
+DPDK_2.0 {
+
+ local: *;
+};
+
+DPDK_17.02 {
+ global:
+
+ rte_pmd_i40e_get_vf_stats;
+ rte_pmd_i40e_ping_vfs;
+ rte_pmd_i40e_ptype_mapping_get;
+ rte_pmd_i40e_ptype_mapping_replace;
+ rte_pmd_i40e_ptype_mapping_reset;
+ rte_pmd_i40e_ptype_mapping_update;
+ rte_pmd_i40e_reset_vf_stats;
+ rte_pmd_i40e_set_tx_loopback;
+ rte_pmd_i40e_set_vf_broadcast;
+ rte_pmd_i40e_set_vf_mac_addr;
+ rte_pmd_i40e_set_vf_mac_anti_spoof;
+ rte_pmd_i40e_set_vf_multicast_promisc;
+ rte_pmd_i40e_set_vf_unicast_promisc;
+ rte_pmd_i40e_set_vf_vlan_anti_spoof;
+ rte_pmd_i40e_set_vf_vlan_filter;
+ rte_pmd_i40e_set_vf_vlan_insert;
+ rte_pmd_i40e_set_vf_vlan_stripq;
+ rte_pmd_i40e_set_vf_vlan_tag;
+
+} DPDK_2.0;
+
+DPDK_17.05 {
+ global:
+
+ rte_pmd_i40e_set_tc_strict_prio;
+ rte_pmd_i40e_set_vf_max_bw;
+ rte_pmd_i40e_set_vf_tc_bw_alloc;
+ rte_pmd_i40e_set_vf_tc_max_bw;
+ rte_pmd_i40e_process_ddp_package;
+ rte_pmd_i40e_get_ddp_list;
+
+} DPDK_17.02;
+
+DPDK_17.08 {
+ global:
+
+ rte_pmd_i40e_get_ddp_info;
+
+} DPDK_17.05;
+
+DPDK_17.11 {
+ global:
+
+ rte_pmd_i40e_add_vf_mac_addr;
+ rte_pmd_i40e_flow_add_del_packet_template;
+ rte_pmd_i40e_flow_type_mapping_update;
+ rte_pmd_i40e_flow_type_mapping_get;
+ rte_pmd_i40e_flow_type_mapping_reset;
+ rte_pmd_i40e_query_vfid_by_mac;
+ rte_pmd_i40e_rss_queue_region_conf;
+
+} DPDK_17.08;
+
+DPDK_18.02 {
+ global:
+
+ rte_pmd_i40e_inset_get;
+ rte_pmd_i40e_inset_set;
+} DPDK_17.11; \ No newline at end of file
diff --git a/src/spdk/dpdk/drivers/net/ifc/Makefile b/src/spdk/dpdk/drivers/net/ifc/Makefile
new file mode 100644
index 00000000..39b36ae5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ifc/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ifc.a
+
+LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_pci -lrte_vhost -lrte_bus_pci
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+#
+# Add extra flags for base driver source files to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+
+VPATH += $(SRCDIR)/base
+
+EXPORT_MAP := rte_pmd_ifc_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += ifcvf_vdpa.c
+SRCS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += ifcvf.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/ifc/base/ifcvf.c b/src/spdk/dpdk/drivers/net/ifc/base/ifcvf.c
new file mode 100644
index 00000000..4b22d9ed
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ifc/base/ifcvf.c
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "ifcvf.h"
+#include "ifcvf_osdep.h"
+
+STATIC void *
+get_cap_addr(struct ifcvf_hw *hw, struct ifcvf_pci_cap *cap)
+{
+ u8 bar = cap->bar;
+ u32 length = cap->length;
+ u32 offset = cap->offset;
+
+ if (bar > IFCVF_PCI_MAX_RESOURCE - 1) {
+ DEBUGOUT("invalid bar: %u\n", bar);
+ return NULL;
+ }
+
+ if (offset + length < offset) {
+ DEBUGOUT("offset(%u) + length(%u) overflows\n",
+ offset, length);
+ return NULL;
+ }
+
+ if (offset + length > hw->mem_resource[cap->bar].len) {
+ DEBUGOUT("offset(%u) + length(%u) overflows bar length(%u)",
+ offset, length, (u32)hw->mem_resource[cap->bar].len);
+ return NULL;
+ }
+
+ return hw->mem_resource[bar].addr + offset;
+}
+
+int
+ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev)
+{
+ int ret;
+ u8 pos;
+ struct ifcvf_pci_cap cap;
+
+ ret = PCI_READ_CONFIG_BYTE(dev, &pos, PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ DEBUGOUT("failed to read pci capability list\n");
+ return -1;
+ }
+
+ while (pos) {
+ ret = PCI_READ_CONFIG_RANGE(dev, (u32 *)&cap,
+ sizeof(cap), pos);
+ if (ret < 0) {
+ DEBUGOUT("failed to read cap at pos: %x", pos);
+ break;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR)
+ goto next;
+
+ DEBUGOUT("cfg type: %u, bar: %u, offset: %u, "
+ "len: %u\n", cap.cfg_type, cap.bar,
+ cap.offset, cap.length);
+
+ switch (cap.cfg_type) {
+ case IFCVF_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cap_addr(hw, &cap);
+ break;
+ case IFCVF_PCI_CAP_NOTIFY_CFG:
+ PCI_READ_CONFIG_DWORD(dev, &hw->notify_off_multiplier,
+ pos + sizeof(cap));
+ hw->notify_base = get_cap_addr(hw, &cap);
+ hw->notify_region = cap.bar;
+ break;
+ case IFCVF_PCI_CAP_ISR_CFG:
+ hw->isr = get_cap_addr(hw, &cap);
+ break;
+ case IFCVF_PCI_CAP_DEVICE_CFG:
+ hw->dev_cfg = get_cap_addr(hw, &cap);
+ break;
+ }
+next:
+ pos = cap.cap_next;
+ }
+
+ hw->lm_cfg = hw->mem_resource[4].addr;
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->isr == NULL || hw->dev_cfg == NULL) {
+ DEBUGOUT("capability incomplete\n");
+ return -1;
+ }
+
+ DEBUGOUT("capability mapping:\ncommon cfg: %p\n"
+ "notify base: %p\nisr cfg: %p\ndevice cfg: %p\n"
+ "multiplier: %u\n",
+ hw->common_cfg, hw->dev_cfg,
+ hw->isr, hw->notify_base,
+ hw->notify_off_multiplier);
+
+ return 0;
+}
+
+STATIC u8
+ifcvf_get_status(struct ifcvf_hw *hw)
+{
+ return IFCVF_READ_REG8(&hw->common_cfg->device_status);
+}
+
+STATIC void
+ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
+{
+ IFCVF_WRITE_REG8(status, &hw->common_cfg->device_status);
+}
+
+STATIC void
+ifcvf_reset(struct ifcvf_hw *hw)
+{
+ ifcvf_set_status(hw, 0);
+
+ /* flush status write */
+ while (ifcvf_get_status(hw))
+ msec_delay(1);
+}
+
+STATIC void
+ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
+{
+ if (status != 0)
+ status |= ifcvf_get_status(hw);
+
+ ifcvf_set_status(hw, status);
+ ifcvf_get_status(hw);
+}
+
+u64
+ifcvf_get_features(struct ifcvf_hw *hw)
+{
+ u32 features_lo, features_hi;
+ struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
+
+ IFCVF_WRITE_REG32(0, &cfg->device_feature_select);
+ features_lo = IFCVF_READ_REG32(&cfg->device_feature);
+
+ IFCVF_WRITE_REG32(1, &cfg->device_feature_select);
+ features_hi = IFCVF_READ_REG32(&cfg->device_feature);
+
+ return ((u64)features_hi << 32) | features_lo;
+}
+
+STATIC void
+ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
+{
+ struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
+
+ IFCVF_WRITE_REG32(0, &cfg->guest_feature_select);
+ IFCVF_WRITE_REG32(features & ((1ULL << 32) - 1), &cfg->guest_feature);
+
+ IFCVF_WRITE_REG32(1, &cfg->guest_feature_select);
+ IFCVF_WRITE_REG32(features >> 32, &cfg->guest_feature);
+}
+
+STATIC int
+ifcvf_config_features(struct ifcvf_hw *hw)
+{
+ u64 host_features;
+
+ host_features = ifcvf_get_features(hw);
+ hw->req_features &= host_features;
+
+ ifcvf_set_features(hw, hw->req_features);
+ ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_FEATURES_OK);
+
+ if (!(ifcvf_get_status(hw) & IFCVF_CONFIG_STATUS_FEATURES_OK)) {
+ DEBUGOUT("failed to set FEATURES_OK status\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+STATIC void
+io_write64_twopart(u64 val, u32 *lo, u32 *hi)
+{
+ IFCVF_WRITE_REG32(val & ((1ULL << 32) - 1), lo);
+ IFCVF_WRITE_REG32(val >> 32, hi);
+}
+
+STATIC int
+ifcvf_hw_enable(struct ifcvf_hw *hw)
+{
+ struct ifcvf_pci_common_cfg *cfg;
+ u8 *lm_cfg;
+ u32 i;
+ u16 notify_off;
+
+ cfg = hw->common_cfg;
+ lm_cfg = hw->lm_cfg;
+
+ IFCVF_WRITE_REG16(0, &cfg->msix_config);
+ if (IFCVF_READ_REG16(&cfg->msix_config) == IFCVF_MSI_NO_VECTOR) {
+ DEBUGOUT("msix vec alloc failed for device config\n");
+ return -1;
+ }
+
+ for (i = 0; i < hw->nr_vring; i++) {
+ IFCVF_WRITE_REG16(i, &cfg->queue_select);
+ io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
+ &cfg->queue_desc_hi);
+ io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
+ &cfg->queue_avail_hi);
+ io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
+ &cfg->queue_used_hi);
+ IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size);
+
+ *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
+ (i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4) =
+ (u32)hw->vring[i].last_avail_idx |
+ ((u32)hw->vring[i].last_used_idx << 16);
+
+ IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector);
+ if (IFCVF_READ_REG16(&cfg->queue_msix_vector) ==
+ IFCVF_MSI_NO_VECTOR) {
+ DEBUGOUT("queue %u, msix vec alloc failed\n",
+ i);
+ return -1;
+ }
+
+ notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off);
+ hw->notify_addr[i] = (void *)((u8 *)hw->notify_base +
+ notify_off * hw->notify_off_multiplier);
+ IFCVF_WRITE_REG16(1, &cfg->queue_enable);
+ }
+
+ return 0;
+}
+
+STATIC void
+ifcvf_hw_disable(struct ifcvf_hw *hw)
+{
+ u32 i;
+ struct ifcvf_pci_common_cfg *cfg;
+ u32 ring_state;
+
+ cfg = hw->common_cfg;
+
+ IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->msix_config);
+ for (i = 0; i < hw->nr_vring; i++) {
+ IFCVF_WRITE_REG16(i, &cfg->queue_select);
+ IFCVF_WRITE_REG16(0, &cfg->queue_enable);
+ IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->queue_msix_vector);
+ ring_state = *(u32 *)(hw->lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
+ (i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4);
+ hw->vring[i].last_avail_idx = (u16)ring_state;
+ hw->vring[i].last_used_idx = (u16)(ring_state >> 16);
+ }
+}
+
+int
+ifcvf_start_hw(struct ifcvf_hw *hw)
+{
+ ifcvf_reset(hw);
+ ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_ACK);
+ ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER);
+
+ if (ifcvf_config_features(hw) < 0)
+ return -1;
+
+ if (ifcvf_hw_enable(hw) < 0)
+ return -1;
+
+ ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER_OK);
+ return 0;
+}
+
+void
+ifcvf_stop_hw(struct ifcvf_hw *hw)
+{
+ ifcvf_hw_disable(hw);
+ ifcvf_reset(hw);
+}
+
+void
+ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
+{
+ IFCVF_WRITE_REG16(qid, hw->notify_addr[qid]);
+}
+
+u8
+ifcvf_get_notify_region(struct ifcvf_hw *hw)
+{
+ return hw->notify_region;
+}
+
+u64
+ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid)
+{
+ return (u8 *)hw->notify_addr[qid] -
+ (u8 *)hw->mem_resource[hw->notify_region].addr;
+}
diff --git a/src/spdk/dpdk/drivers/net/ifc/base/ifcvf.h b/src/spdk/dpdk/drivers/net/ifc/base/ifcvf.h
new file mode 100644
index 00000000..badacb61
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ifc/base/ifcvf.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _IFCVF_H_
+#define _IFCVF_H_
+
+#include "ifcvf_osdep.h"
+
+#define IFCVF_VENDOR_ID 0x1AF4
+#define IFCVF_DEVICE_ID 0x1041
+#define IFCVF_SUBSYS_VENDOR_ID 0x8086
+#define IFCVF_SUBSYS_DEVICE_ID 0x001A
+
+#define IFCVF_MAX_QUEUES 1
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+/* Common configuration */
+#define IFCVF_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define IFCVF_PCI_CAP_NOTIFY_CFG 2
+/* ISR Status */
+#define IFCVF_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define IFCVF_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define IFCVF_PCI_CAP_PCI_CFG 5
+
+#define IFCVF_CONFIG_STATUS_RESET 0x00
+#define IFCVF_CONFIG_STATUS_ACK 0x01
+#define IFCVF_CONFIG_STATUS_DRIVER 0x02
+#define IFCVF_CONFIG_STATUS_DRIVER_OK 0x04
+#define IFCVF_CONFIG_STATUS_FEATURES_OK 0x08
+#define IFCVF_CONFIG_STATUS_FAILED 0x80
+
+#define IFCVF_MSI_NO_VECTOR 0xffff
+#define IFCVF_PCI_MAX_RESOURCE 6
+
+#define IFCVF_LM_CFG_SIZE 0x40
+#define IFCVF_LM_RING_STATE_OFFSET 0x20
+
+#define IFCVF_LM_LOGGING_CTRL 0x0
+
+#define IFCVF_LM_BASE_ADDR_LOW 0x10
+#define IFCVF_LM_BASE_ADDR_HIGH 0x14
+#define IFCVF_LM_END_ADDR_LOW 0x18
+#define IFCVF_LM_END_ADDR_HIGH 0x1c
+
+#define IFCVF_LM_DISABLE 0x0
+#define IFCVF_LM_ENABLE_VF 0x1
+#define IFCVF_LM_ENABLE_PF 0x3
+
+#define IFCVF_32_BIT_MASK 0xffffffff
+
+
+struct ifcvf_pci_cap {
+ u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ u8 cap_next; /* Generic PCI field: next ptr. */
+ u8 cap_len; /* Generic PCI field: capability length */
+ u8 cfg_type; /* Identifies the structure. */
+ u8 bar; /* Where to find it. */
+ u8 padding[3]; /* Pad to full dword. */
+ u32 offset; /* Offset within bar. */
+ u32 length; /* Length of the structure, in bytes. */
+};
+
+struct ifcvf_pci_notify_cap {
+ struct ifcvf_pci_cap cap;
+ u32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+struct ifcvf_pci_common_cfg {
+ /* About the whole device. */
+ u32 device_feature_select;
+ u32 device_feature;
+ u32 guest_feature_select;
+ u32 guest_feature;
+ u16 msix_config;
+ u16 num_queues;
+ u8 device_status;
+ u8 config_generation;
+
+ /* About a specific virtqueue. */
+ u16 queue_select;
+ u16 queue_size;
+ u16 queue_msix_vector;
+ u16 queue_enable;
+ u16 queue_notify_off;
+ u32 queue_desc_lo;
+ u32 queue_desc_hi;
+ u32 queue_avail_lo;
+ u32 queue_avail_hi;
+ u32 queue_used_lo;
+ u32 queue_used_hi;
+};
+
+struct ifcvf_net_config {
+ u8 mac[6];
+ u16 status;
+ u16 max_virtqueue_pairs;
+} __attribute__((packed));
+
+struct ifcvf_pci_mem_resource {
+ u64 phys_addr; /**< Physical address, 0 if not resource. */
+ u64 len; /**< Length of the resource. */
+ u8 *addr; /**< Virtual address, NULL when not mapped. */
+};
+
+struct vring_info {
+ u64 desc;
+ u64 avail;
+ u64 used;
+ u16 size;
+ u16 last_avail_idx;
+ u16 last_used_idx;
+};
+
+struct ifcvf_hw {
+ u64 req_features;
+ u8 notify_region;
+ u32 notify_off_multiplier;
+ struct ifcvf_pci_common_cfg *common_cfg;
+ struct ifcvf_net_device_config *dev_cfg;
+ u8 *isr;
+ u16 *notify_base;
+ u16 *notify_addr[IFCVF_MAX_QUEUES * 2];
+ u8 *lm_cfg;
+ struct vring_info vring[IFCVF_MAX_QUEUES * 2];
+ u8 nr_vring;
+ struct ifcvf_pci_mem_resource mem_resource[IFCVF_PCI_MAX_RESOURCE];
+};
+
+int
+ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev);
+
+u64
+ifcvf_get_features(struct ifcvf_hw *hw);
+
+int
+ifcvf_start_hw(struct ifcvf_hw *hw);
+
+void
+ifcvf_stop_hw(struct ifcvf_hw *hw);
+
+void
+ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
+
+u8
+ifcvf_get_notify_region(struct ifcvf_hw *hw);
+
+u64
+ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid);
+
+#endif /* _IFCVF_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ifc/base/ifcvf_osdep.h b/src/spdk/dpdk/drivers/net/ifc/base/ifcvf_osdep.h
new file mode 100644
index 00000000..cf151ef5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ifc/base/ifcvf_osdep.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _IFCVF_OSDEP_H_
+#define _IFCVF_OSDEP_H_
+
+#include <stdint.h>
+#include <linux/pci_regs.h>
+
+#include <rte_cycles.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#define DEBUGOUT(S, args...) RTE_LOG(DEBUG, PMD, S, ##args)
+#define STATIC static
+
+#define msec_delay rte_delay_ms
+
+#define IFCVF_READ_REG8(reg) rte_read8(reg)
+#define IFCVF_WRITE_REG8(val, reg) rte_write8((val), (reg))
+#define IFCVF_READ_REG16(reg) rte_read16(reg)
+#define IFCVF_WRITE_REG16(val, reg) rte_write16((val), (reg))
+#define IFCVF_READ_REG32(reg) rte_read32(reg)
+#define IFCVF_WRITE_REG32(val, reg) rte_write32((val), (reg))
+
+typedef struct rte_pci_device PCI_DEV;
+
+#define PCI_READ_CONFIG_BYTE(dev, val, where) \
+ rte_pci_read_config(dev, val, 1, where)
+
+#define PCI_READ_CONFIG_DWORD(dev, val, where) \
+ rte_pci_read_config(dev, val, 4, where)
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef int64_t s64;
+typedef uint64_t u64;
+
+static inline int
+PCI_READ_CONFIG_RANGE(PCI_DEV *dev, uint32_t *val, int size, int where)
+{
+ return rte_pci_read_config(dev, val, size, where);
+}
+
+#endif /* _IFCVF_OSDEP_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ifc/ifcvf_vdpa.c b/src/spdk/dpdk/drivers/net/ifc/ifcvf_vdpa.c
new file mode 100644
index 00000000..88d81403
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ifc/ifcvf_vdpa.c
@@ -0,0 +1,793 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <unistd.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "base/ifcvf.h"
+
+#define DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+static int ifcvf_vdpa_logtype;
+
+struct ifcvf_internal {
+ struct rte_vdpa_dev_addr dev_addr;
+ struct rte_pci_device *pdev;
+ struct ifcvf_hw hw;
+ int vfio_container_fd;
+ int vfio_group_fd;
+ int vfio_dev_fd;
+ pthread_t tid; /* thread for notify relay */
+ int epfd;
+ int vid;
+ int did;
+ uint16_t max_queues;
+ uint64_t features;
+ rte_atomic32_t started;
+ rte_atomic32_t dev_attached;
+ rte_atomic32_t running;
+ rte_spinlock_t lock;
+};
+
+struct internal_list {
+ TAILQ_ENTRY(internal_list) next;
+ struct ifcvf_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+static struct internal_list_head internal_list =
+ TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static struct internal_list *
+find_internal_resource_by_did(int did)
+{
+ int found = 0;
+ struct internal_list *list;
+
+ pthread_mutex_lock(&internal_list_lock);
+
+ TAILQ_FOREACH(list, &internal_list, next) {
+ if (did == list->internal->did) {
+ found = 1;
+ break;
+ }
+ }
+
+ pthread_mutex_unlock(&internal_list_lock);
+
+ if (!found)
+ return NULL;
+
+ return list;
+}
+
+static struct internal_list *
+find_internal_resource_by_dev(struct rte_pci_device *pdev)
+{
+ int found = 0;
+ struct internal_list *list;
+
+ pthread_mutex_lock(&internal_list_lock);
+
+ TAILQ_FOREACH(list, &internal_list, next) {
+ if (pdev == list->internal->pdev) {
+ found = 1;
+ break;
+ }
+ }
+
+ pthread_mutex_unlock(&internal_list_lock);
+
+ if (!found)
+ return NULL;
+
+ return list;
+}
+
+static int
+ifcvf_vfio_setup(struct ifcvf_internal *internal)
+{
+ struct rte_pci_device *dev = internal->pdev;
+ char devname[RTE_DEV_NAME_MAX_LEN] = {0};
+ int iommu_group_num;
+ int ret = 0;
+ int i;
+
+ internal->vfio_dev_fd = -1;
+ internal->vfio_group_fd = -1;
+ internal->vfio_container_fd = -1;
+
+ rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+ rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+ &iommu_group_num);
+
+ internal->vfio_container_fd = rte_vfio_container_create();
+ if (internal->vfio_container_fd < 0)
+ return -1;
+
+ internal->vfio_group_fd = rte_vfio_container_group_bind(
+ internal->vfio_container_fd, iommu_group_num);
+ if (internal->vfio_group_fd < 0)
+ goto err;
+
+ if (rte_pci_map_device(dev))
+ goto err;
+
+ internal->vfio_dev_fd = dev->intr_handle.vfio_dev_fd;
+
+ for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
+ i++) {
+ internal->hw.mem_resource[i].addr =
+ internal->pdev->mem_resource[i].addr;
+ internal->hw.mem_resource[i].phys_addr =
+ internal->pdev->mem_resource[i].phys_addr;
+ internal->hw.mem_resource[i].len =
+ internal->pdev->mem_resource[i].len;
+ }
+ ret = ifcvf_init_hw(&internal->hw, internal->pdev);
+
+ return ret;
+
+err:
+ rte_vfio_container_destroy(internal->vfio_container_fd);
+ return -1;
+}
+
+static int
+ifcvf_dma_map(struct ifcvf_internal *internal, int do_map)
+{
+ uint32_t i;
+ int ret;
+ struct rte_vhost_memory *mem = NULL;
+ int vfio_container_fd;
+
+ ret = rte_vhost_get_mem_table(internal->vid, &mem);
+ if (ret < 0) {
+ DRV_LOG(ERR, "failed to get VM memory layout.");
+ goto exit;
+ }
+
+ vfio_container_fd = internal->vfio_container_fd;
+
+ for (i = 0; i < mem->nregions; i++) {
+ struct rte_vhost_mem_region *reg;
+
+ reg = &mem->regions[i];
+ DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", "
+ "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".",
+ do_map ? "DMA map" : "DMA unmap", i,
+ reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+ if (do_map) {
+ ret = rte_vfio_container_dma_map(vfio_container_fd,
+ reg->host_user_addr, reg->guest_phys_addr,
+ reg->size);
+ if (ret < 0) {
+ DRV_LOG(ERR, "DMA map failed.");
+ goto exit;
+ }
+ } else {
+ ret = rte_vfio_container_dma_unmap(vfio_container_fd,
+ reg->host_user_addr, reg->guest_phys_addr,
+ reg->size);
+ if (ret < 0) {
+ DRV_LOG(ERR, "DMA unmap failed.");
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ if (mem)
+ free(mem);
+ return ret;
+}
+
+static uint64_t
+qva_to_gpa(int vid, uint64_t qva)
+{
+ struct rte_vhost_memory *mem = NULL;
+ struct rte_vhost_mem_region *reg;
+ uint32_t i;
+ uint64_t gpa = 0;
+
+ if (rte_vhost_get_mem_table(vid, &mem) < 0)
+ goto exit;
+
+ for (i = 0; i < mem->nregions; i++) {
+ reg = &mem->regions[i];
+
+ if (qva >= reg->host_user_addr &&
+ qva < reg->host_user_addr + reg->size) {
+ gpa = qva - reg->host_user_addr + reg->guest_phys_addr;
+ break;
+ }
+ }
+
+exit:
+ if (mem)
+ free(mem);
+ return gpa;
+}
+
+static int
+vdpa_ifcvf_start(struct ifcvf_internal *internal)
+{
+ struct ifcvf_hw *hw = &internal->hw;
+ int i, nr_vring;
+ int vid;
+ struct rte_vhost_vring vq;
+ uint64_t gpa;
+
+ vid = internal->vid;
+ nr_vring = rte_vhost_get_vring_num(vid);
+ rte_vhost_get_negotiated_features(vid, &hw->req_features);
+
+ for (i = 0; i < nr_vring; i++) {
+ rte_vhost_get_vhost_vring(vid, i, &vq);
+ gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+ if (gpa == 0) {
+ DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
+ return -1;
+ }
+ hw->vring[i].desc = gpa;
+
+ gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+ if (gpa == 0) {
+ DRV_LOG(ERR, "Fail to get GPA for available ring.");
+ return -1;
+ }
+ hw->vring[i].avail = gpa;
+
+ gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+ if (gpa == 0) {
+ DRV_LOG(ERR, "Fail to get GPA for used ring.");
+ return -1;
+ }
+ hw->vring[i].used = gpa;
+
+ hw->vring[i].size = vq.size;
+ rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
+ &hw->vring[i].last_used_idx);
+ }
+ hw->nr_vring = i;
+
+ return ifcvf_start_hw(&internal->hw);
+}
+
+static void
+vdpa_ifcvf_stop(struct ifcvf_internal *internal)
+{
+ struct ifcvf_hw *hw = &internal->hw;
+ uint32_t i;
+ int vid;
+
+ vid = internal->vid;
+ ifcvf_stop_hw(hw);
+
+ for (i = 0; i < hw->nr_vring; i++)
+ rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+ hw->vring[i].last_used_idx);
+}
+
+#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
+ sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1))
+static int
+vdpa_enable_vfio_intr(struct ifcvf_internal *internal)
+{
+ int ret;
+ uint32_t i, nr_vring;
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int *fd_ptr;
+ struct rte_vhost_vring vring;
+
+ nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = sizeof(irq_set_buf);
+ irq_set->count = nr_vring + 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+ irq_set->start = 0;
+ fd_ptr = (int *)&irq_set->data;
+ fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd;
+
+ for (i = 0; i < nr_vring; i++) {
+ rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+ fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+ }
+
+ ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret) {
+ DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
+{
+ int ret;
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = sizeof(irq_set_buf);
+ irq_set->count = 0;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+ irq_set->start = 0;
+
+ ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret) {
+ DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static void *
+notify_relay(void *arg)
+{
+ int i, kickfd, epfd, nfds = 0;
+ uint32_t qid, q_num;
+ struct epoll_event events[IFCVF_MAX_QUEUES * 2];
+ struct epoll_event ev;
+ uint64_t buf;
+ int nbytes;
+ struct rte_vhost_vring vring;
+ struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
+ struct ifcvf_hw *hw = &internal->hw;
+
+ q_num = rte_vhost_get_vring_num(internal->vid);
+
+ epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
+ if (epfd < 0) {
+ DRV_LOG(ERR, "failed to create epoll instance.");
+ return NULL;
+ }
+ internal->epfd = epfd;
+
+ for (qid = 0; qid < q_num; qid++) {
+ ev.events = EPOLLIN | EPOLLPRI;
+ rte_vhost_get_vhost_vring(internal->vid, qid, &vring);
+ ev.data.u64 = qid | (uint64_t)vring.kickfd << 32;
+ if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
+ DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
+ return NULL;
+ }
+ }
+
+ for (;;) {
+ nfds = epoll_wait(epfd, events, q_num, -1);
+ if (nfds < 0) {
+ if (errno == EINTR)
+ continue;
+ DRV_LOG(ERR, "epoll_wait return fail\n");
+ return NULL;
+ }
+
+ for (i = 0; i < nfds; i++) {
+ qid = events[i].data.u32;
+ kickfd = (uint32_t)(events[i].data.u64 >> 32);
+ do {
+ nbytes = read(kickfd, &buf, 8);
+ if (nbytes < 0) {
+ if (errno == EINTR ||
+ errno == EWOULDBLOCK ||
+ errno == EAGAIN)
+ continue;
+ DRV_LOG(INFO, "Error reading "
+ "kickfd: %s",
+ strerror(errno));
+ }
+ break;
+ } while (1);
+
+ ifcvf_notify_queue(hw, qid);
+ }
+ }
+
+ return NULL;
+}
+
+static int
+setup_notify_relay(struct ifcvf_internal *internal)
+{
+ int ret;
+
+ ret = pthread_create(&internal->tid, NULL, notify_relay,
+ (void *)internal);
+ if (ret) {
+ DRV_LOG(ERR, "failed to create notify relay pthread.");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+unset_notify_relay(struct ifcvf_internal *internal)
+{
+ void *status;
+
+ if (internal->tid) {
+ pthread_cancel(internal->tid);
+ pthread_join(internal->tid, &status);
+ }
+ internal->tid = 0;
+
+ if (internal->epfd >= 0)
+ close(internal->epfd);
+ internal->epfd = -1;
+
+ return 0;
+}
+
+static int
+update_datapath(struct ifcvf_internal *internal)
+{
+ int ret;
+
+ rte_spinlock_lock(&internal->lock);
+
+ if (!rte_atomic32_read(&internal->running) &&
+ (rte_atomic32_read(&internal->started) &&
+ rte_atomic32_read(&internal->dev_attached))) {
+ ret = ifcvf_dma_map(internal, 1);
+ if (ret)
+ goto err;
+
+ ret = vdpa_enable_vfio_intr(internal);
+ if (ret)
+ goto err;
+
+ ret = setup_notify_relay(internal);
+ if (ret)
+ goto err;
+
+ ret = vdpa_ifcvf_start(internal);
+ if (ret)
+ goto err;
+
+ rte_atomic32_set(&internal->running, 1);
+ } else if (rte_atomic32_read(&internal->running) &&
+ (!rte_atomic32_read(&internal->started) ||
+ !rte_atomic32_read(&internal->dev_attached))) {
+ vdpa_ifcvf_stop(internal);
+
+ ret = unset_notify_relay(internal);
+ if (ret)
+ goto err;
+
+ ret = vdpa_disable_vfio_intr(internal);
+ if (ret)
+ goto err;
+
+ ret = ifcvf_dma_map(internal, 0);
+ if (ret)
+ goto err;
+
+ rte_atomic32_set(&internal->running, 0);
+ }
+
+ rte_spinlock_unlock(&internal->lock);
+ return 0;
+err:
+ rte_spinlock_unlock(&internal->lock);
+ return ret;
+}
+
+static int
+ifcvf_dev_config(int vid)
+{
+ int did;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ internal = list->internal;
+ internal->vid = vid;
+ rte_atomic32_set(&internal->dev_attached, 1);
+ update_datapath(internal);
+
+ return 0;
+}
+
+static int
+ifcvf_dev_close(int vid)
+{
+ int did;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ internal = list->internal;
+ rte_atomic32_set(&internal->dev_attached, 0);
+ update_datapath(internal);
+
+ return 0;
+}
+
+static int
+ifcvf_get_vfio_group_fd(int vid)
+{
+ int did;
+ struct internal_list *list;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ return list->internal->vfio_group_fd;
+}
+
+static int
+ifcvf_get_vfio_device_fd(int vid)
+{
+ int did;
+ struct internal_list *list;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ return list->internal->vfio_dev_fd;
+}
+
+static int
+ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
+{
+ int did;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+ struct vfio_region_info reg = { .argsz = sizeof(reg) };
+ int ret;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ internal = list->internal;
+
+ reg.index = ifcvf_get_notify_region(&internal->hw);
+ ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
+ if (ret) {
+ DRV_LOG(ERR, "Get not get device region info: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset;
+ *size = 0x1000;
+
+ return 0;
+}
+
+static int
+ifcvf_get_queue_num(int did, uint32_t *queue_num)
+{
+ struct internal_list *list;
+
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ *queue_num = list->internal->max_queues;
+
+ return 0;
+}
+
+static int
+ifcvf_get_vdpa_features(int did, uint64_t *features)
+{
+ struct internal_list *list;
+
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ *features = list->internal->features;
+
+ return 0;
+}
+
+#define VDPA_SUPPORTED_PROTOCOL_FEATURES \
+ (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
+ 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \
+ 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \
+ 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
+ 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD)
+static int
+ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features)
+{
+ *features = VDPA_SUPPORTED_PROTOCOL_FEATURES;
+ return 0;
+}
+
+struct rte_vdpa_dev_ops ifcvf_ops = {
+ .get_queue_num = ifcvf_get_queue_num,
+ .get_features = ifcvf_get_vdpa_features,
+ .get_protocol_features = ifcvf_get_protocol_features,
+ .dev_conf = ifcvf_dev_config,
+ .dev_close = ifcvf_dev_close,
+ .set_vring_state = NULL,
+ .set_features = NULL,
+ .migration_done = NULL,
+ .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
+ .get_vfio_device_fd = ifcvf_get_vfio_device_fd,
+ .get_notify_area = ifcvf_get_notify_area,
+};
+
+static int
+ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ uint64_t features;
+ struct ifcvf_internal *internal = NULL;
+ struct internal_list *list = NULL;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ list = rte_zmalloc("ifcvf", sizeof(*list), 0);
+ if (list == NULL)
+ goto error;
+
+ internal = rte_zmalloc("ifcvf", sizeof(*internal), 0);
+ if (internal == NULL)
+ goto error;
+
+ internal->pdev = pci_dev;
+ rte_spinlock_init(&internal->lock);
+ if (ifcvf_vfio_setup(internal) < 0)
+ return -1;
+
+ internal->max_queues = IFCVF_MAX_QUEUES;
+ features = ifcvf_get_features(&internal->hw);
+ internal->features = (features &
+ ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
+ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
+
+ internal->dev_addr.pci_addr = pci_dev->addr;
+ internal->dev_addr.type = PCI_ADDR;
+ list->internal = internal;
+
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_INSERT_TAIL(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+
+ internal->did = rte_vdpa_register_device(&internal->dev_addr,
+ &ifcvf_ops);
+ if (internal->did < 0)
+ goto error;
+
+ rte_atomic32_set(&internal->started, 1);
+ update_datapath(internal);
+
+ return 0;
+
+error:
+ rte_free(list);
+ rte_free(internal);
+ return -1;
+}
+
+static int
+ifcvf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct ifcvf_internal *internal;
+ struct internal_list *list;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ list = find_internal_resource_by_dev(pci_dev);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device: %s", pci_dev->name);
+ return -1;
+ }
+
+ internal = list->internal;
+ rte_atomic32_set(&internal->started, 0);
+ update_datapath(internal);
+
+ rte_pci_unmap_device(internal->pdev);
+ rte_vfio_container_destroy(internal->vfio_container_fd);
+ rte_vdpa_unregister_device(internal->did);
+
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_REMOVE(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+
+ rte_free(list);
+ rte_free(internal);
+
+ return 0;
+}
+
+/*
+ * IFCVF has the same vendor ID and device ID as virtio net PCI
+ * device, with its specific subsystem vendor ID and device ID.
+ */
+static const struct rte_pci_id pci_id_ifcvf_map[] = {
+ { .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = IFCVF_VENDOR_ID,
+ .device_id = IFCVF_DEVICE_ID,
+ .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
+ .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID,
+ },
+
+ { .vendor_id = 0, /* sentinel */
+ },
+};
+
+static struct rte_pci_driver rte_ifcvf_vdpa = {
+ .id_table = pci_id_ifcvf_map,
+ .drv_flags = 0,
+ .probe = ifcvf_pci_probe,
+ .remove = ifcvf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci");
+
+RTE_INIT(ifcvf_vdpa_init_log)
+{
+ ifcvf_vdpa_logtype = rte_log_register("pmd.net.ifcvf_vdpa");
+ if (ifcvf_vdpa_logtype >= 0)
+ rte_log_set_level(ifcvf_vdpa_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/ifc/meson.build b/src/spdk/dpdk/drivers/net/ifc/meson.build
new file mode 100644
index 00000000..72df070a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ifc/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+build = dpdk_conf.has('RTE_LIBRTE_VHOST')
+allow_experimental_apis = true
+sources = files('ifcvf_vdpa.c', 'base/ifcvf.c')
+includes += include_directories('base')
+deps += 'vhost'
diff --git a/src/spdk/dpdk/drivers/net/ifc/rte_pmd_ifc_version.map b/src/spdk/dpdk/drivers/net/ifc/rte_pmd_ifc_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ifc/rte_pmd_ifc_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/Makefile b/src/spdk/dpdk/drivers/net/ixgbe/Makefile
new file mode 100644
index 00000000..7b6af353
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/Makefile
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ixgbe.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_ixgbe_version.map
+
+LIBABIVER := 2
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
+
+CFLAGS_ixgbe_rxtx.o += -diag-disable 3656
+
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+#
+# CFLAGS for clang
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+else
+#
+# CFLAGS for gcc
+#
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
+CFLAGS += -Wno-deprecated
+CFLAGS_ixgbe_common.o += -Wno-unused-but-set-variable
+CFLAGS_ixgbe_x550.o += -Wno-unused-but-set-variable
+endif
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_ixgbe_x550.o += -Wno-maybe-uninitialized
+endif
+
+ifeq ($(shell test $(GCC_VERSION) -ge 50 && echo 1), 1)
+CFLAGS_ixgbe_common.o += -Wno-logical-not-parentheses
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
+endif
+
+endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82598.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x540.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x550.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_hv_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82599.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82598.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_mbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_flow.c
+ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
+SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c
+else
+SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c
+endif
+ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y)
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
+endif
+ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf_representor.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/README b/src/spdk/dpdk/drivers/net/ixgbe/base/README
new file mode 100644
index 00000000..70fdfe7c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/README
@@ -0,0 +1,62 @@
+..
+ BSD LICENSE
+
+ Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Intel® IXGBE driver
+===================
+
+This directory contains source code of FreeBSD ixgbe driver of version
+cid-ixgbe.2018.01.02.tar.gz released by the team which develop
+basic drivers for any ixgbe NIC. The sub-directory of base/
+contains the original source package.
+This driver is valid for the product(s) listed below
+
+* Intel® 10 Gigabit AF DA Dual Port Server Adapter
+* Intel® 10 Gigabit AT Server Adapter
+* Intel® 10 Gigabit AT2 Server Adapter
+* Intel® 10 Gigabit CX4 Dual Port Server Adapter
+* Intel® 10 Gigabit XF LR Server Adapter
+* Intel® 10 Gigabit XF SR Dual Port Server Adapter
+* Intel® 10 Gigabit XF SR Server Adapter
+* Intel® 82598 10 Gigabit Ethernet Controller
+* Intel® 82599 10 Gigabit Ethernet Controller
+* Intel® Ethernet Controller X540-AT2
+* Intel® Ethernet Server Adapter X520 Series
+* Intel® Ethernet Server Adapter X520-T2
+* Intel® Ethernet Controller X550 Series
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ ixgbe_osdep.h
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.c
new file mode 100644
index 00000000..ee7ce2e9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.c
@@ -0,0 +1,1440 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_82598.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+#define IXGBE_82598_MAX_TX_QUEUES 32
+#define IXGBE_82598_MAX_RX_QUEUES 64
+#define IXGBE_82598_RAR_ENTRIES 16
+#define IXGBE_82598_MC_TBL_SIZE 128
+#define IXGBE_82598_VFT_TBL_SIZE 128
+#define IXGBE_82598_RX_PB_SIZE 512
+
+STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete);
+STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete);
+STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
+STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy);
+STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
+/**
+ * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ *
+ * The defaults for 82598 should be in the range of 50us to 50ms,
+ * however the hardware default for these parts is 500us to 1ms which is less
+ * than the 10ms recommended by the pci-e spec. To address this we need to
+ * increase the value to either 10ms to 250ms for capability version 1 config,
+ * or 16ms to 55ms for version 2.
+ **/
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
+{
+ u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
+ u16 pcie_devctl2;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+ /*
+ * if capababilities version is type 1 we can write the
+ * timeout of 10ms to 250ms through the GCR register
+ */
+ if (!(gcr & IXGBE_GCR_CAP_VER2)) {
+ gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
+ goto out;
+ }
+
+ /*
+ * for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+ pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
+ pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
+ IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+out:
+ /* disable completion timeout resend */
+ gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
+}
+
+/**
+ * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for 82598.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_82598");
+
+ ret_val = ixgbe_init_phy_ops_generic(hw);
+ ret_val = ixgbe_init_ops_generic(hw);
+
+ /* PHY */
+ phy->ops.init = ixgbe_init_phy_ops_82598;
+
+ /* MAC */
+ mac->ops.start_hw = ixgbe_start_hw_82598;
+ mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
+ mac->ops.reset_hw = ixgbe_reset_hw_82598;
+ mac->ops.get_media_type = ixgbe_get_media_type_82598;
+ mac->ops.get_supported_physical_layer =
+ ixgbe_get_supported_physical_layer_82598;
+ mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
+ mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
+ mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
+ mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
+ mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
+ mac->ops.set_vfta = ixgbe_set_vfta_82598;
+ mac->ops.set_vlvf = NULL;
+ mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
+
+ /* Flow Control */
+ mac->ops.fc_enable = ixgbe_fc_enable_82598;
+
+ mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
+ mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ /* SFP+ Module */
+ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
+ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
+
+ /* Link */
+ mac->ops.check_link = ixgbe_check_mac_link_82598;
+ mac->ops.setup_link = ixgbe_setup_mac_link_82598;
+ mac->ops.flap_tx_laser = NULL;
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
+ mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
+
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = NULL;
+
+ mac->ops.get_rtrup2tc = NULL;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val = IXGBE_SUCCESS;
+ u16 list_offset, data_offset;
+
+ DEBUGFUNC("ixgbe_init_phy_ops_82598");
+
+ /* Identify the PHY */
+ phy->ops.identify(hw);
+
+ /* Overwrite the link function pointers if copper PHY */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ mac->ops.setup_link = ixgbe_setup_copper_link_82598;
+ mac->ops.get_link_capabilities =
+ ixgbe_get_copper_link_capabilities_generic;
+ }
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
+ phy->ops.check_link = ixgbe_check_phy_link_tnx;
+ phy->ops.get_firmware_version =
+ ixgbe_get_phy_firmware_version_tnx;
+ break;
+ case ixgbe_phy_nl:
+ phy->ops.reset = ixgbe_reset_phy_nl;
+
+ /* Call SFP+ identify routine to get the SFP+ module type */
+ ret_val = phy->ops.identify_sfp(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+ else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
+ ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Check to see if SFP+ module is supported */
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
+ &list_offset,
+ &data_offset);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function.
+ * Disables relaxed ordering Then set pcie completion timeout
+ *
+ **/
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+{
+ u32 regval;
+ u32 i;
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_start_hw_82598");
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Disable relaxed ordering */
+ for (i = 0; ((i < hw->mac.max_tx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
+ }
+
+ for (i = 0; ((i < hw->mac.max_rx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+ /* set the completion timeout for interface */
+ ixgbe_set_pcie_completion_timeout(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_link_capabilities_82598 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 autoc = 0;
+
+ DEBUGFUNC("ixgbe_get_link_capabilities_82598");
+
+ /*
+ * Determine link capabilities based on the stored value of AUTOC,
+ * which represents EEPROM defaults. If AUTOC value has not been
+ * stored, use the current register value.
+ */
+ if (hw->mac.orig_link_settings_stored)
+ autoc = hw->mac.orig_autoc;
+ else
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_1G_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_AN:
+ case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ default:
+ status = IXGBE_ERR_LINK_SETUP;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_media_type_82598 - Determines media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ DEBUGFUNC("ixgbe_get_media_type_82598");
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ media_type = ixgbe_media_type_copper;
+ goto out;
+ default:
+ break;
+ }
+
+ /* Media type for I82598 is based on device ID */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ case IXGBE_DEV_ID_82598_BX:
+ /* Default device ID is mezzanine card KX/KX4 */
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ case IXGBE_DEV_ID_82598EB_SFP_LOM:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ media_type = ixgbe_media_type_cx4;
+ break;
+ case IXGBE_DEV_ID_82598AT:
+ case IXGBE_DEV_ID_82598AT2:
+ media_type = ixgbe_media_type_copper;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+out:
+ return media_type;
+}
+
+/**
+ * ixgbe_fc_enable_82598 - Enable flow control
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to the current settings.
+ **/
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 fctrl_reg;
+ u32 rmcs_reg;
+ u32 reg;
+ u32 fcrtl, fcrth;
+ u32 link_speed = 0;
+ int i;
+ bool link_up;
+
+ DEBUGFUNC("ixgbe_fc_enable_82598");
+
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ DEBUGOUT("Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * On 82598 having Rx FC on causes resets while doing 1G
+ * so if it's on turn it off once we know link_speed. For
+ * more details see 82598 Specification update.
+ */
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ hw->fc.requested_mode = ixgbe_fc_tx_pause;
+ break;
+ case ixgbe_fc_rx_pause:
+ hw->fc.requested_mode = ixgbe_fc_none;
+ break;
+ default:
+ /* no change */
+ break;
+ }
+ }
+
+ /* Negotiate the fc mode to use */
+ ixgbe_fc_autoneg(hw);
+
+ /* Disable any previous flow control settings */
+ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+
+ rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
+
+ /*
+ * The possible values of fc.current_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.current_mode) {
+ case ixgbe_fc_none:
+ /*
+ * Flow control is disabled by software override or autoneg.
+ * The code below will actually disable it in the HW.
+ */
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ fctrl_reg |= IXGBE_FCTRL_RFCE;
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ fctrl_reg |= IXGBE_FCTRL_RFCE;
+ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ /* Set 802.3x based flow control settings. */
+ fctrl_reg |= IXGBE_FCTRL_DPF;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+ }
+
+ }
+
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time * 0x00010001;
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_mac_link_82598 - Configures MAC link settings
+ * @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Configures link settings based on values in the ixgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed.
+ **/
+STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
+{
+ u32 autoc_reg;
+ u32 links_reg;
+ u32 i;
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_start_mac_link_82598");
+
+ /* Restart link */
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_AN ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ links_reg = 0; /* Just in case Autoneg time = 0 */
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msec_delay(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ DEBUGOUT("Autonegotiation did not complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msec_delay(50);
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_link_ready - Function looks for phy link
+ * @hw: pointer to hardware structure
+ *
+ * Function indicates success when phy link is available. If phy is not ready
+ * within 5 seconds of MAC indicating link, the function returns error.
+ **/
+STATIC s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+{
+ u32 timeout;
+ u16 an_reg;
+
+ if (hw->device_id != IXGBE_DEV_ID_82598AT2)
+ return IXGBE_SUCCESS;
+
+ for (timeout = 0;
+ timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
+
+ if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
+ (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
+ break;
+
+ msec_delay(100);
+ }
+
+ if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+ DEBUGOUT("Link was indicated but link is down\n");
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_check_mac_link_82598 - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete)
+{
+ u32 links_reg;
+ u32 i;
+ u16 link_reg, adapt_comp_reg;
+
+ DEBUGFUNC("ixgbe_check_mac_link_82598");
+
+ /*
+ * SERDES PHY requires us to read link status from undocumented
+ * register 0xC79F. Bit 0 set indicates link is up/ready; clear
+ * indicates link down. OxC00C is read to check that the XAUI lanes
+ * are active. Bit 0 clear indicates active; set indicates inactive.
+ */
+ if (hw->phy.type == ixgbe_phy_nl) {
+ hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
+ &adapt_comp_reg);
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ if ((link_reg & 1) &&
+ ((adapt_comp_reg & 1) == 0)) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msec_delay(100);
+ hw->phy.ops.read_reg(hw, 0xC79F,
+ IXGBE_TWINAX_DEV,
+ &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC00C,
+ IXGBE_TWINAX_DEV,
+ &adapt_comp_reg);
+ }
+ } else {
+ if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if (*link_up == false)
+ goto out;
+ }
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ if (links_reg & IXGBE_LINKS_UP) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msec_delay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ }
+ } else {
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if (links_reg & IXGBE_LINKS_SPEED)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
+ (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
+ *link_up = false;
+
+out:
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_setup_mac_link_82598 - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ bool autoneg = false;
+ s32 status = IXGBE_SUCCESS;
+ ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc = curr_autoc;
+ u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+
+ DEBUGFUNC("ixgbe_setup_mac_link_82598");
+
+ /* Check to see if speed passed in is supported. */
+ ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+ speed &= link_capabilities;
+
+ if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+ status = IXGBE_ERR_LINK_SETUP;
+
+ /* Set KX4/KX support according to speed requested */
+ else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ autoc |= IXGBE_AUTOC_KX4_SUPP;
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ autoc |= IXGBE_AUTOC_KX_SUPP;
+ if (autoc != curr_autoc)
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ /*
+ * Setup and restart the link based on the new values in
+ * ixgbe_hw This will write the AUTOC register based on the new
+ * stored values
+ */
+ status = ixgbe_start_mac_link_82598(hw,
+ autoneg_wait_to_complete);
+ }
+
+ return status;
+}
+
+
+/**
+ * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ * Sets the link speed in the AUTOC register in the MAC and restarts link.
+ **/
+STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_setup_copper_link_82598");
+
+ /* Setup the PHY according to input speed */
+ status = hw->phy.ops.setup_link_speed(hw, speed,
+ autoneg_wait_to_complete);
+ /* Set up MAC */
+ ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_hw_82598 - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts, performing a PHY reset, and performing a link (MAC)
+ * reset.
+ **/
+STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ s32 phy_status = IXGBE_SUCCESS;
+ u32 ctrl;
+ u32 gheccr;
+ u32 i;
+ u32 autoc;
+ u8 analog_val;
+
+ DEBUGFUNC("ixgbe_reset_hw_82598");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /*
+ * Power up the Atlas Tx lanes if they are currently powered down.
+ * Atlas Tx lanes are powered down for MAC loopback tests, but
+ * they are not automatically restored on reset.
+ */
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+ if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
+ /* Enable Tx Atlas so packets can be transmitted again */
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+ analog_val);
+ }
+
+ /* Reset PHY */
+ if (hw->phy.reset_disable == false) {
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Init PHY and function pointers, perform SFP setup */
+ phy_status = hw->phy.ops.init(hw);
+ if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+ if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto mac_reset_top;
+
+ hw->phy.ops.reset(hw);
+ }
+
+mac_reset_top:
+ /*
+ * Issue global reset to the MAC. This needs to be a SW reset.
+ * If link reset is used, it might reset the MAC when mng is using it
+ */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST))
+ break;
+ }
+ if (ctrl & IXGBE_CTRL_RST) {
+ status = IXGBE_ERR_RESET_FAILED;
+ DEBUGOUT("Reset polling failed to complete.\n");
+ }
+
+ msec_delay(50);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
+ gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+ IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
+
+ /*
+ * Store the original AUTOC value if it has not been
+ * stored off yet. Otherwise restore the stored original
+ * AUTOC value since the reset operation sets back to deaults.
+ */
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = autoc;
+ hw->mac.orig_link_settings_stored = true;
+ } else if (autoc != hw->mac.orig_autoc) {
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table
+ */
+ hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+ if (phy_status != IXGBE_SUCCESS)
+ status = phy_status;
+
+ return status;
+}
+
+/**
+ * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq set index
+ **/
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("ixgbe_set_vmdq_82598");
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
+ **/
+STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ UNREFERENCED_1PARAMETER(vmdq);
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ if (rar_high & IXGBE_RAH_VIND_MASK) {
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_vfta_82598 - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFTA
+ * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ * @vlvf_bypass: boolean flag - unused
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
+{
+ u32 regindex;
+ u32 bitindex;
+ u32 bits;
+ u32 vftabyte;
+
+ UNREFERENCED_1PARAMETER(vlvf_bypass);
+
+ DEBUGFUNC("ixgbe_set_vfta_82598");
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /* Determine 32-bit word position in array */
+ regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
+
+ /* Determine the location of the (VMD) queue index */
+ vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
+ bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
+
+ /* Set the nibble for VMD queue index */
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
+ bits &= (~(0x0F << bitindex));
+ bits |= (vind << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
+
+ /* Determine the location of the bit for this VLAN id */
+ bitindex = vlan & 0x1F; /* lower five bits */
+
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+ if (vlan_on)
+ /* Turn on this VLAN id */
+ bits |= (1 << bitindex);
+ else
+ /* Turn off this VLAN id */
+ bits &= ~(1 << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clear_vfta_82598 - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+{
+ u32 offset;
+ u32 vlanbyte;
+
+ DEBUGFUNC("ixgbe_clear_vfta_82598");
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
+ 0);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs read operation to Atlas analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ u32 atlas_ctl;
+
+ DEBUGFUNC("ixgbe_read_analog_reg8_82598");
+
+ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
+ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(10);
+ atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+ *val = (u8)atlas_ctl;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
+ * @hw: pointer to hardware structure
+ * @reg: atlas register to write
+ * @val: value to write
+ *
+ * Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ u32 atlas_ctl;
+
+ DEBUGFUNC("ixgbe_write_analog_reg8_82598");
+
+ atlas_ctl = (reg << 8) | val;
+ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(10);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @dev_addr: address to read from
+ * @byte_offset: byte offset to read from dev_addr
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+STATIC s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+ u8 byte_offset, u8 *eeprom_data)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 sfp_addr = 0;
+ u16 sfp_data = 0;
+ u16 sfp_stat = 0;
+ u16 gssr;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_read_i2c_phy_82598");
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ gssr = IXGBE_GSSR_PHY1_SM;
+ else
+ gssr = IXGBE_GSSR_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ if (hw->phy.type == ixgbe_phy_nl) {
+ /*
+ * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
+ * 0xC30D. These registers are used to talk to the SFP+
+ * module's EEPROM through the SDA/SCL (I2C) interface.
+ */
+ sfp_addr = (dev_addr << 8) + byte_offset;
+ sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
+ hw->phy.ops.write_reg_mdi(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ sfp_addr);
+
+ /* Poll status */
+ for (i = 0; i < 100; i++) {
+ hw->phy.ops.read_reg_mdi(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &sfp_stat);
+ sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
+ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
+ break;
+ msec_delay(10);
+ }
+
+ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
+ DEBUGOUT("EEPROM read did not pass.\n");
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ /* Read data */
+ hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
+
+ *eeprom_data = (u8)(sfp_data >> 8);
+ } else {
+ status = IXGBE_ERR_PHY;
+ }
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return status;
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, eeprom_data);
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @sff8472_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
+ byte_offset, sff8472_data);
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+{
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u16 ext_ability = 0;
+
+ DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
+
+ hw->phy.ops.identify(hw);
+
+ /* Copper PHY must be checked before AUTOC LMS to determine correct
+ * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_cu_unknown:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ goto out;
+ default:
+ break;
+ }
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_AN:
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ else
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+ break;
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+ else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ else /* XAUI */
+ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ break;
+ case IXGBE_AUTOC_LMS_KX4_AN:
+ case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ break;
+ default:
+ break;
+ }
+
+ if (hw->phy.type == ixgbe_phy_nl) {
+ hw->phy.ops.identify_sfp(hw);
+
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_da_cu:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case ixgbe_sfp_type_sr:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ break;
+ case ixgbe_sfp_type_lr:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ break;
+ }
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ break;
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ break;
+ }
+
+out:
+ return physical_layer;
+}
+
+/**
+ * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
+ * port devices.
+ * @hw: pointer to the HW structure
+ *
+ * Calls common function and corrects issue with some single port devices
+ * that enable LAN1 but not LAN0.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bus_info *bus = &hw->bus;
+ u16 pci_gen = 0;
+ u16 pci_ctrl2 = 0;
+
+ DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
+
+ ixgbe_set_lan_id_multi_port_pcie(hw);
+
+ /* check if LAN0 is disabled */
+ hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
+ if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
+
+ hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
+
+ /* if LAN0 is completely disabled force function to 0 */
+ if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
+ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
+ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
+
+ bus->func = 0;
+ }
+ }
+}
+
+/**
+ * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
+{
+ u32 regval;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
+
+ /* Enable relaxed ordering */
+ for (i = 0; ((i < hw->mac.max_tx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+ regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
+ }
+
+ for (i = 0; ((i < hw->mac.max_rx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+}
+
+/**
+ * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy)
+{
+ u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
+ u8 i = 0;
+ UNREFERENCED_1PARAMETER(headroom);
+
+ if (!num_pb)
+ return;
+
+ /* Setup Rx packet buffer sizes */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* Setup the first four at 80KB */
+ rxpktsize = IXGBE_RXPBSIZE_80KB;
+ for (; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Setup the last four at 48KB...don't re-init i */
+ rxpktsize = IXGBE_RXPBSIZE_48KB;
+ /* Fall Through */
+ case PBA_STRATEGY_EQUAL:
+ default:
+ /* Divide the remaining Rx packet buffer evenly among the TCs */
+ for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ }
+
+ /* Setup Tx packet buffer sizes */
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
+}
+
+/**
+ * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
+ * @hw: pointer to hardware structure
+ * @regval: register value to write to RXCTRL
+ *
+ * Enables the Rx DMA unit
+ **/
+s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
+{
+ DEBUGFUNC("ixgbe_enable_rx_dma_82598");
+
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+
+ return IXGBE_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.h
new file mode 100644
index 00000000..20aab9fc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.h
@@ -0,0 +1,53 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_82598_H_
+#define _IXGBE_82598_H_
+
+u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
+void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+ bool vlvf_bypass);
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval);
+#endif /* _IXGBE_82598_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c
new file mode 100644
index 00000000..26217212
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -0,0 +1,2640 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_82599.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+#define IXGBE_82599_MAX_TX_QUEUES 128
+#define IXGBE_82599_MAX_RX_QUEUES 128
+#define IXGBE_82599_RAR_ENTRIES 128
+#define IXGBE_82599_MC_TBL_SIZE 128
+#define IXGBE_82599_VFT_TBL_SIZE 128
+#define IXGBE_82599_RX_PB_SIZE 512
+
+STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+ u16 offset, u16 *data);
+STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
+
+ /*
+ * enable the laser control functions for SFP+ fiber
+ * and MNG not enabled
+ */
+ if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ !ixgbe_mng_enabled(hw)) {
+ mac->ops.disable_tx_laser =
+ ixgbe_disable_tx_laser_multispeed_fiber;
+ mac->ops.enable_tx_laser =
+ ixgbe_enable_tx_laser_multispeed_fiber;
+ mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
+
+ } else {
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
+ }
+
+ if (hw->phy.multispeed_fiber) {
+ /* Set up dual speed SFP+ support */
+ mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_hard_rate_select_speed;
+ if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_soft_rate_select_speed;
+ } else {
+ if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
+ (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
+ hw->phy.smart_speed == ixgbe_smart_speed_on) &&
+ !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
+ } else {
+ mac->ops.setup_link = ixgbe_setup_mac_link_82599;
+ }
+ }
+}
+
+/**
+ * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 esdp;
+
+ DEBUGFUNC("ixgbe_init_phy_ops_82599");
+
+ if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
+ /* Store flag indicating I2C bus access control unit. */
+ hw->phy.qsfp_shared_i2c_bus = TRUE;
+
+ /* Initialize access to QSFP+ I2C bus */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0_DIR;
+ esdp &= ~IXGBE_ESDP_SDP1_DIR;
+ esdp &= ~IXGBE_ESDP_SDP0;
+ esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
+ esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
+ phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
+ }
+ /* Identify the PHY or SFP module */
+ ret_val = phy->ops.identify(hw);
+ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto init_phy_ops_out;
+
+ /* Setup function pointers based on detected SFP module and speeds */
+ ixgbe_init_mac_link_ops_82599(hw);
+ if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
+ hw->phy.ops.reset = NULL;
+
+ /* If copper media, overwrite with copper function pointers */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ mac->ops.setup_link = ixgbe_setup_copper_link_82599;
+ mac->ops.get_link_capabilities =
+ ixgbe_get_copper_link_capabilities_generic;
+ }
+
+ /* Set necessary function pointers based on PHY type */
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
+ phy->ops.check_link = ixgbe_check_phy_link_tnx;
+ phy->ops.get_firmware_version =
+ ixgbe_get_phy_firmware_version_tnx;
+ break;
+ default:
+ break;
+ }
+init_phy_ops_out:
+ return ret_val;
+}
+
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u16 list_offset, data_offset, data_value;
+
+ DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
+
+ if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
+ ixgbe_init_mac_link_ops_82599(hw);
+
+ hw->phy.ops.reset = NULL;
+
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+ &data_offset);
+ if (ret_val != IXGBE_SUCCESS)
+ goto setup_sfp_out;
+
+ /* PHY config will finish before releasing the semaphore */
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto setup_sfp_out;
+ }
+
+ if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+ goto setup_sfp_err;
+ while (data_value != 0xffff) {
+ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
+ IXGBE_WRITE_FLUSH(hw);
+ if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+ goto setup_sfp_err;
+ }
+
+ /* Release the semaphore */
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ /* Delay obtaining semaphore again to allow FW access
+ * prot_autoc_write uses the semaphore too.
+ */
+ msec_delay(hw->eeprom.semaphore_delay);
+
+ /* Restart DSP and set SFI mode */
+ ret_val = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
+ false);
+
+ if (ret_val) {
+ DEBUGOUT("sfp module setup not complete\n");
+ ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ goto setup_sfp_out;
+ }
+
+ }
+
+setup_sfp_out:
+ return ret_val;
+
+setup_sfp_err:
+ /* Release the semaphore */
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ /* Delay obtaining semaphore again to allow FW access */
+ msec_delay(hw->eeprom.semaphore_delay);
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", data_offset);
+ return IXGBE_ERR_PHY;
+}
+
+/**
+ * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @locked: Return the if we locked for this read.
+ * @reg_val: Value we read from AUTOC
+ *
+ * For this part (82599) we need to wrap read-modify-writes with a possible
+ * FW/SW lock. It is assumed this lock will be freed with the next
+ * prot_autoc_write_82599().
+ */
+s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+ s32 ret_val;
+
+ *locked = false;
+ /* If LESM is on then we need to hold the SW/FW semaphore. */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ *locked = true;
+ }
+
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @autoc: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous proc_autoc_read_82599.
+ *
+ * This part (82599) may need to hold the SW/FW lock around all writes to
+ * AUTOC. Likewise after a write we need to do a pipeline reset.
+ */
+s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
+ /* We only need to get the lock if:
+ * - We didn't do it already (in the read part of a read-modify-write)
+ * - LESM is enabled.
+ */
+ if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ locked = true;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ ret_val = ixgbe_reset_pipeline_82599(hw);
+
+out:
+ /* Free the SW/FW semaphore as we either grabbed it here or
+ * already had it when this function was called.
+ */
+ if (locked)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for 82599.
+ * Does not touch the hardware.
+ **/
+
+s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_82599");
+
+ ixgbe_init_phy_ops_generic(hw);
+ ret_val = ixgbe_init_ops_generic(hw);
+
+ /* PHY */
+ phy->ops.identify = ixgbe_identify_phy_82599;
+ phy->ops.init = ixgbe_init_phy_ops_82599;
+
+ /* MAC */
+ mac->ops.reset_hw = ixgbe_reset_hw_82599;
+ mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
+ mac->ops.get_media_type = ixgbe_get_media_type_82599;
+ mac->ops.get_supported_physical_layer =
+ ixgbe_get_supported_physical_layer_82599;
+ mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
+ mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
+ mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
+ mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
+ mac->ops.start_hw = ixgbe_start_hw_82599;
+ mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
+ mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
+ mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
+ mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
+ mac->ops.prot_autoc_read = prot_autoc_read_82599;
+ mac->ops.prot_autoc_write = prot_autoc_write_82599;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
+ mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
+ mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
+ mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
+ mac->rar_highwater = 1;
+ mac->ops.set_vfta = ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
+ mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
+ mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
+ mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
+ mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
+
+ /* Link */
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
+ mac->ops.check_link = ixgbe_check_mac_link_generic;
+ mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
+ ixgbe_init_mac_link_ops_82599(hw);
+
+ mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
+ mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
+ & IXGBE_FWSM_MODE_MASK);
+
+ hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+
+ /* EEPROM */
+ eeprom->ops.read = ixgbe_read_eeprom_82599;
+ eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
+
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
+
+ mac->ops.get_thermal_sensor_data =
+ ixgbe_get_thermal_sensor_data_generic;
+ mac->ops.init_thermal_sensor_thresh =
+ ixgbe_init_thermal_sensor_thresh_generic;
+
+ mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_link_capabilities_82599 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 autoc = 0;
+
+ DEBUGFUNC("ixgbe_get_link_capabilities_82599");
+
+
+ /* Check if 1G SFP module. */
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ goto out;
+ }
+
+ /*
+ * Determine link capabilities based on the stored value of AUTOC,
+ * which represents EEPROM defaults. If AUTOC value has not
+ * been stored, use the current register values.
+ */
+ if (hw->mac.orig_link_settings_stored)
+ autoc = hw->mac.orig_autoc;
+ else
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_1G_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_SERIAL:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_KX_KR:
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_SGMII_1G_100M:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
+ *autoneg = false;
+ break;
+
+ default:
+ status = IXGBE_ERR_LINK_SETUP;
+ goto out;
+ break;
+ }
+
+ if (hw->phy.multispeed_fiber) {
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* QSFP must not enable full auto-negotiation
+ * Limited autoneg is enabled at 1G
+ */
+ if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
+ *autoneg = false;
+ else
+ *autoneg = true;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_get_media_type_82599 - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ DEBUGFUNC("ixgbe_get_media_type_82599");
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ media_type = ixgbe_media_type_copper;
+ goto out;
+ default:
+ break;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+ case IXGBE_DEV_ID_82599_XAUI_LOM:
+ /* Default device ID is mezzanine card KX/KX4 */
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599EN_SFP:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ case IXGBE_DEV_ID_82599_CX4:
+ media_type = ixgbe_media_type_cx4;
+ break;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ media_type = ixgbe_media_type_copper;
+ break;
+ case IXGBE_DEV_ID_82599_LS:
+ media_type = ixgbe_media_type_fiber_lco;
+ break;
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ media_type = ixgbe_media_type_fiber_qsfp;
+ break;
+ case IXGBE_DEV_ID_82599_BYPASS:
+ media_type = ixgbe_media_type_fiber_fixed;
+ hw->phy.multispeed_fiber = true;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+out:
+ return media_type;
+}
+
+/**
+ * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
+ * @hw: pointer to hardware structure
+ *
+ * Disables link during D3 power down sequence.
+ *
+ **/
+void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
+{
+ u32 autoc2_reg;
+ u16 ee_ctrl_2 = 0;
+
+ DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
+ ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
+
+ if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
+ ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
+ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+ }
+}
+
+/**
+ * ixgbe_start_mac_link_82599 - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Configures link settings based on values in the ixgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed.
+ **/
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
+{
+ u32 autoc_reg;
+ u32 links_reg;
+ u32 i;
+ s32 status = IXGBE_SUCCESS;
+ bool got_lock = false;
+
+ DEBUGFUNC("ixgbe_start_mac_link_82599");
+
+
+ /* reset_pipeline requires us to hold this lock as it writes to
+ * AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ got_lock = true;
+ }
+
+ /* Restart link */
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ links_reg = 0; /* Just in case Autoneg time = 0 */
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msec_delay(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ DEBUGOUT("Autoneg did not complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msec_delay(50);
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively shutting down the Tx
+ * laser on the PHY, effectively halting physical link.
+ **/
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
+ /* Disable Tx laser; allow 100us to go dark per spec */
+ esdp_reg |= IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(100);
+}
+
+/**
+ * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively turning on the Tx
+ * laser on the PHY, effectively starting physical link.
+ **/
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Enable Tx laser; allow 100ms to light up */
+ esdp_reg &= ~IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(100);
+}
+
+/**
+ * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * When the driver changes the link speeds that it can support,
+ * it sets autotry_restart to true to indicate that we need to
+ * initiate a new autotry session with the link partner. To do
+ * so, we set the speed then disable and re-enable the Tx laser, to
+ * alert the link partner that it also needs to restart autotry on its
+ * end. This is consistent with true clause 37 autoneg, which also
+ * involves a loss of signal.
+ **/
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
+ if (hw->mac.autotry_restart) {
+ ixgbe_disable_tx_laser_multispeed_fiber(hw);
+ ixgbe_enable_tx_laser_multispeed_fiber(hw);
+ hw->mac.autotry_restart = false;
+ }
+}
+
+/**
+ * ixgbe_set_hard_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * Set module link speed via RS0/RS1 rate select pins.
+ */
+void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ esdp_reg &= ~IXGBE_ESDP_SDP5;
+ esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+ break;
+ default:
+ DEBUGOUT("Invalid fixed module speed\n");
+ return;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Implements the Intel SmartSpeed algorithm.
+ **/
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = IXGBE_SUCCESS;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ s32 i, j;
+ bool link_up = false;
+ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
+
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ /*
+ * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
+ * autoneg advertisement if link is unable to be established at the
+ * highest negotiated rate. This can sometimes happen due to integrity
+ * issues with the physical media connection.
+ */
+
+ /* First, try to get link with full advertisement */
+ hw->phy.smart_speed_active = false;
+ for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
+ status = ixgbe_setup_mac_link_82599(hw, speed,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
+ * Table 9 in the AN MAS.
+ */
+ for (i = 0; i < 5; i++) {
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up,
+ false);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ /*
+ * We didn't get link. If we advertised KR plus one of KX4/KX
+ * (or BX4/BX), then disable KR and try again.
+ */
+ if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
+ ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
+ goto out;
+
+ /* Turn SmartSpeed on to disable KR support */
+ hw->phy.smart_speed_active = true;
+ status = ixgbe_setup_mac_link_82599(hw, speed,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. 600ms will allow for
+ * the AN link_fail_inhibit_timer as well for multiple cycles of
+ * parallel detect, both 10g and 1g. This allows for the maximum
+ * connect attempts as defined in the AN MAS table 73-7.
+ */
+ for (i = 0; i < 6; i++) {
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Turn SmartSpeed back off. */
+ hw->phy.smart_speed_active = false;
+ status = ixgbe_setup_mac_link_82599(hw, speed,
+ autoneg_wait_to_complete);
+
+out:
+ if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
+ DEBUGOUT("Smartspeed has downgraded the link speed "
+ "from the maximum advertised\n");
+ return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_82599 - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ bool autoneg = false;
+ s32 status = IXGBE_SUCCESS;
+ u32 pma_pmd_1g, link_mode;
+ u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
+ u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
+ u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+ u32 links_reg;
+ u32 i;
+ ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+
+ DEBUGFUNC("ixgbe_setup_mac_link_82599");
+
+ /* Check to see if speed passed in is supported. */
+ status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+ if (status)
+ goto out;
+
+ speed &= link_capabilities;
+
+ if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
+ status = IXGBE_ERR_LINK_SETUP;
+ goto out;
+ }
+
+ /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
+ if (hw->mac.orig_link_settings_stored)
+ orig_autoc = hw->mac.orig_autoc;
+ else
+ orig_autoc = autoc;
+
+ link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+ pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+
+ if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ /* Set KX4/KX/KR support according to speed requested */
+ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+ autoc |= IXGBE_AUTOC_KX4_SUPP;
+ if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
+ (hw->phy.smart_speed_active == false))
+ autoc |= IXGBE_AUTOC_KR_SUPP;
+ }
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ autoc |= IXGBE_AUTOC_KX_SUPP;
+ } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
+ (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+ link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+ /* Switch from 1G SFI to 10G SFI if requested */
+ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
+ (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
+ autoc &= ~IXGBE_AUTOC_LMS_MASK;
+ autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
+ }
+ } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
+ (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+ /* Switch from 10G SFI to 1G SFI if requested */
+ if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
+ (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
+ autoc &= ~IXGBE_AUTOC_LMS_MASK;
+ if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
+ autoc |= IXGBE_AUTOC_LMS_1G_AN;
+ else
+ autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
+ }
+ }
+
+ if (autoc != current_autoc) {
+ /* Restart link */
+ status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ links_reg = 0; /*Just in case Autoneg time=0*/
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg =
+ IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msec_delay(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status =
+ IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ DEBUGOUT("Autoneg did not complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msec_delay(50);
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ * Restarts link on PHY and MAC based on settings passed in.
+ **/
+STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_setup_copper_link_82599");
+
+ /* Setup the PHY according to input speed */
+ status = hw->phy.ops.setup_link_speed(hw, speed,
+ autoneg_wait_to_complete);
+ /* Set up MAC */
+ ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_hw_82599 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ **/
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed link_speed;
+ s32 status;
+ u32 ctrl = 0;
+ u32 i, autoc, autoc2;
+ u32 curr_lms;
+ bool link_up = false;
+
+ DEBUGFUNC("ixgbe_reset_hw_82599");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Identify PHY and related function pointers */
+ status = hw->phy.ops.init(hw);
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+
+ /* Setup SFP module if there is one present. */
+ if (hw->phy.sfp_setup_needed) {
+ status = hw->mac.ops.setup_sfp(hw);
+ hw->phy.sfp_setup_needed = false;
+ }
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+
+ /* Reset PHY */
+ if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
+ hw->phy.ops.reset(hw);
+
+ /* remember AUTOC from before we reset */
+ curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
+
+mac_reset_top:
+ /*
+ * Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ ctrl = IXGBE_CTRL_LNK_RST;
+ if (!hw->force_full_reset) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up)
+ ctrl = IXGBE_CTRL_RST;
+ }
+
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear meaning reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ DEBUGOUT("Reset polling failed to complete.\n");
+ }
+
+ msec_delay(50);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to
+ * allow time for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /*
+ * Store the original AUTOC/AUTOC2 values if they have not been
+ * stored off yet. Otherwise restore the stored original
+ * values since the reset operation sets back to defaults.
+ */
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+
+ /* Enable link if disabled in NVM */
+ if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+ autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = autoc;
+ hw->mac.orig_autoc2 = autoc2;
+ hw->mac.orig_link_settings_stored = true;
+ } else {
+
+ /* If MNG FW is running on a multi-speed device that
+ * doesn't autoneg with out driver support we need to
+ * leave LMS in the state it was before we MAC reset.
+ * Likewise if we support WoL we don't want change the
+ * LMS state.
+ */
+ if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
+ hw->wol_enabled)
+ hw->mac.orig_autoc =
+ (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
+ curr_lms;
+
+ if (autoc != hw->mac.orig_autoc) {
+ status = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc,
+ false);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+ }
+
+ if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
+ (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
+ autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
+ autoc2 |= (hw->mac.orig_autoc2 &
+ IXGBE_AUTOC2_UPPER_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+ }
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
+ hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
+ IXGBE_CLEAR_VMDQ_ALL);
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
+ * @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
+ */
+STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+ *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+ return IXGBE_SUCCESS;
+ usec_delay(10);
+ }
+
+ return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
+ * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+{
+ s32 err;
+ int i;
+ u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ u32 fdircmd;
+ fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
+
+ DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
+
+ /*
+ * Before starting reinitialization process,
+ * FDIRCMD.CMD must be zero.
+ */
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
+ return err;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
+ IXGBE_WRITE_FLUSH(hw);
+ /*
+ * 82599 adapters flow director init flow cannot be restarted,
+ * Workaround 82599 silicon errata by performing the following steps
+ * before re-writing the FDIRCTRL control register with the same value.
+ * - write 1 to bit 8 of FDIRCMD register &
+ * - write 0 to bit 8 of FDIRCMD register
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ /*
+ * Clear FDIR Hash register to clear any leftover hashes
+ * waiting to be programmed.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll init-done after we write FDIRCTRL register */
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msec_delay(1);
+ }
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+ DEBUGOUT("Flow Director Signature poll time exceeded!\n");
+ return IXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ /* Clear FDIR statistics registers (read to clear) */
+ IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+ IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
+ IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register
+ **/
+STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ int i;
+
+ DEBUGFUNC("ixgbe_fdir_enable_82599");
+
+ /* Prime the keys for hashing */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ /*
+ * Poll init-done after we write the register. Estimated times:
+ * 10G: PBALLOC = 11b, timing is 60us
+ * 1G: PBALLOC = 11b, timing is 600us
+ * 100M: PBALLOC = 11b, timing is 6ms
+ *
+ * Multiple these timings by 4 if under full Rx load
+ *
+ * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+ * 1 msec per poll time. If we're at line rate and drop to 100M, then
+ * this might not finish in our poll time, but we can live with that
+ * for now.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msec_delay(1);
+ }
+
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+ DEBUGOUT("Flow Director poll time exceeded!\n");
+}
+
+/**
+ * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
+ **/
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ DEBUGFUNC("ixgbe_init_fdir_signature_82599");
+
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
+ * @cloud_mode: true - cloud mode, false - other mode
+ **/
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
+ bool cloud_mode)
+{
+ UNREFERENCED_1PARAMETER(cloud_mode);
+ DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
+
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Turn perfect match filtering on
+ * Report hash in RSS field of Rx wb descriptor
+ * Initialize the drop queue to queue 127
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 (0x4 * 16) filters are left
+ */
+ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
+ IXGBE_FDIRCTRL_REPORT_STATUS |
+ (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
+ (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+ if (cloud_mode)
+ fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
+ IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
+ * @hw: pointer to hardware structure
+ * @dropqueue: Rx queue index used for the dropped packets
+ **/
+void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
+{
+ u32 fdirctrl;
+
+ DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
+ /* Clear init done bit and drop queue field */
+ fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
+
+ /* Set drop queue */
+ fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+ if ((hw->mac.type == ixgbe_mac_X550) ||
+ (hw->mac.type == ixgbe_mac_X550EM_x) ||
+ (hw->mac.type == ixgbe_mac_X550EM_a))
+ fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
+}
+
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ common_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ sig_hash ^= lo_hash_dword << (16 - n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ common_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0)
+
+/**
+ * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ * @input: input bitstream to compute the hash on
+ * @common: compressed common input dword
+ *
+ * This function is almost identical to the function above but contains
+ * several optimizations such as unwinding all of the loops, letting the
+ * compiler work out all of the conditional ifs since the keys are static
+ * defines, and computing two keys at once since the hashed dword stream
+ * will be the same for both keys.
+ **/
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common)
+{
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = IXGBE_NTOHL(input.dword);
+
+ /* generate common hash dword */
+ hi_hash_dword = IXGBE_NTOHL(common.dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the VLAN until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+ /* combine common_hash result with signature and bucket hashes */
+ bucket_hash ^= common_hash;
+ bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+ sig_hash ^= common_hash << 16;
+ sig_hash &= IXGBE_ATR_HASH_MASK << 16;
+
+ /* return completed signature hash */
+ return sig_hash ^ bucket_hash;
+}
+
+/**
+ * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
+ * @hw: pointer to hardware structure
+ * @input: unique input dword
+ * @common: compressed common input dword
+ * @queue: queue index to direct traffic to
+ *
+ * Note that the tunnel bit in input must not be set when the hardware
+ * tunneling support does not exist.
+ **/
+void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue)
+{
+ u64 fdirhashcmd;
+ u8 flow_type;
+ bool tunnel;
+ u32 fdircmd;
+
+ DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
+
+ /*
+ * Get the flow_type in order to program FDIRCMD properly
+ * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
+ * fifth is FDIRCMD.TUNNEL_FILTER
+ */
+ tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
+ flow_type = input.formatted.flow_type &
+ (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
+ switch (flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case IXGBE_ATR_FLOW_TYPE_TCPV6:
+ case IXGBE_ATR_FLOW_TYPE_UDPV6:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV6:
+ break;
+ default:
+ DEBUGOUT(" Error on flow type input\n");
+ return;
+ }
+
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ if (tunnel)
+ fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
+
+ /*
+ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+ * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ */
+ fdirhashcmd = (u64)fdircmd << 32;
+ fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
+ IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+
+ DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
+ return;
+}
+
+#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+} while (0)
+
+/**
+ * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
+ * @input: input bitstream to compute the hash on
+ * @input_mask: mask for the input bitstream
+ *
+ * This function serves two main purposes. First it applies the input_mask
+ * to the atr_input resulting in a cleaned up atr_input data stream.
+ * Secondly it computes the hash and stores it in the bkt_hash field at
+ * the end of the input byte stream. This way it will be available for
+ * future use without needing to recompute the hash.
+ **/
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *input_mask)
+{
+
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 bucket_hash = 0;
+ u32 hi_dword = 0;
+ u32 i = 0;
+
+ /* Apply masks to input data */
+ for (i = 0; i < 14; i++)
+ input->dword_stream[i] &= input_mask->dword_stream[i];
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
+
+ /* generate common hash dword */
+ for (i = 1; i <= 13; i++)
+ hi_dword ^= input->dword_stream[i];
+ hi_hash_dword = IXGBE_NTOHL(hi_dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the VLAN until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ for (i = 1; i <= 15; i++)
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
+
+ /*
+ * Limit hash to 13 bits since max bucket count is 8K.
+ * Store result at the end of the input stream.
+ */
+ input->formatted.bkt_hash = bucket_hash & 0x1FFF;
+}
+
+/**
+ * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
+ * @input_mask: mask to be bit swapped
+ *
+ * The source and destination port masks for flow director are bit swapped
+ * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
+ * generate a correctly swapped value we need to bit swap the mask and that
+ * is what is accomplished by this function.
+ **/
+STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
+{
+ u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
+ mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+ mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian. As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+ (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+ (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+ IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask, bool cloud_mode)
+{
+ /* mask IPv6 since it is currently not supported */
+ u32 fdirm = IXGBE_FDIRM_DIPv6;
+ u32 fdirtcpm;
+ u32 fdirip6m;
+ UNREFERENCED_1PARAMETER(cloud_mode);
+ DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+
+ /* verify bucket hash is cleared on hash generation */
+ if (input_mask->formatted.bkt_hash)
+ DEBUGOUT(" bucket hash should always be 0 in mask\n");
+
+ /* Program FDIRM and verify partial masks */
+ switch (input_mask->formatted.vm_pool & 0x7F) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_POOL;
+ case 0x7F:
+ break;
+ default:
+ DEBUGOUT(" Error on vm pool mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_L4P;
+ if (input_mask->formatted.dst_port ||
+ input_mask->formatted.src_port) {
+ DEBUGOUT(" Error on src/dst port mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_L4TYPE_MASK:
+ break;
+ default:
+ DEBUGOUT(" Error on flow type mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
+ case 0x0000:
+ /* mask VLAN ID */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ /* fall through */
+ case 0x0FFF:
+ /* mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ break;
+ case 0xE000:
+ /* mask VLAN ID only */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ /* fall through */
+ case 0xEFFF:
+ /* no VLAN fields masked */
+ break;
+ default:
+ DEBUGOUT(" Error on VLAN mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.flex_bytes & 0xFFFF) {
+ case 0x0000:
+ /* Mask Flex Bytes */
+ fdirm |= IXGBE_FDIRM_FLEX;
+ /* fall through */
+ case 0xFFFF:
+ break;
+ default:
+ DEBUGOUT(" Error on flexible byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ if (cloud_mode) {
+ fdirm |= IXGBE_FDIRM_L3P;
+ fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
+ fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
+
+ switch (input_mask->formatted.inner_mac[0] & 0xFF) {
+ case 0x00:
+ /* Mask inner MAC, fall through */
+ fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
+ case 0xFF:
+ break;
+ default:
+ DEBUGOUT(" Error on inner_mac byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
+ case 0x0:
+ /* Mask vxlan id */
+ fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
+ break;
+ case 0x00FFFFFF:
+ fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
+ break;
+ case 0xFFFFFFFF:
+ break;
+ default:
+ DEBUGOUT(" Error on TNI/VNI byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.tunnel_type & 0xFFFF) {
+ case 0x0:
+ /* Mask turnnel type, fall through */
+ fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
+ case 0xFFFF:
+ break;
+ default:
+ DEBUGOUT(" Error on tunnel type byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
+
+ /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM,
+ * FDIRSIP4M and FDIRDIP4M in cloud mode to allow
+ * L3/L3 packets to tunnel.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+ if (!cloud_mode) {
+ /* store the TCP/UDP port masks, bit reversed from port
+ * layout */
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+ /* also use it for SCTP */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
+ break;
+ default:
+ break;
+ }
+
+ /* store source and destination IP masks (big-enian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+ ~input_mask->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+ ~input_mask->formatted.dst_ip[0]);
+ }
+ return IXGBE_SUCCESS;
+}
+
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue, bool cloud_mode)
+{
+ u32 fdirport, fdirvlan, fdirhash, fdircmd;
+ u32 addr_low, addr_high;
+ u32 cloud_type = 0;
+ s32 err;
+ UNREFERENCED_1PARAMETER(cloud_mode);
+
+ DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
+ if (!cloud_mode) {
+ /* currently IPv6 is not supported, must be programmed with 0 */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
+ input->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
+ input->formatted.src_ip[1]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
+ input->formatted.src_ip[2]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
+ input->formatted.src_ip[0]);
+
+ /* record the first 32 bits of the destination address
+ * (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
+ input->formatted.dst_ip[0]);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+ }
+
+ /* record VLAN (little-endian) and flex_bytes(big-endian) */
+ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+ if (cloud_mode) {
+ if (input->formatted.tunnel_type != 0)
+ cloud_type = 0x80000000;
+
+ addr_low = ((u32)input->formatted.inner_mac[0] |
+ ((u32)input->formatted.inner_mac[1] << 8) |
+ ((u32)input->formatted.inner_mac[2] << 16) |
+ ((u32)input->formatted.inner_mac[3] << 24));
+ addr_high = ((u32)input->formatted.inner_mac[4] |
+ ((u32)input->formatted.inner_mac[5] << 8));
+ cloud_type |= addr_high;
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
+ }
+
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ if (queue == IXGBE_FDIR_DROP_QUEUE)
+ fdircmd |= IXGBE_FDIRCMD_DROP;
+ if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
+ fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director command did not complete!\n");
+ return err;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id)
+{
+ u32 fdirhash;
+ u32 fdircmd;
+ s32 err;
+
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /* flush hash to HW */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Query if filter is present */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director command did not complete!\n");
+ return err;
+ }
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
+ * @hw: pointer to hardware structure
+ * @input: input bitstream
+ * @input_mask: mask for the input bitstream
+ * @soft_id: software index for the filters
+ * @queue: queue index to direct traffic to
+ * @cloud_mode: unused
+ *
+ * Note that the caller to this function must lock before calling, since the
+ * hardware writes must be protected from one another.
+ **/
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ union ixgbe_atr_input *input_mask,
+ u16 soft_id, u8 queue, bool cloud_mode)
+{
+ s32 err = IXGBE_ERR_CONFIG;
+ UNREFERENCED_1PARAMETER(cloud_mode);
+
+ DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
+
+ /*
+ * Check flow_type formatting, and bail out before we touch the hardware
+ * if there's a configuration issue
+ */
+ switch (input->formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
+ input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
+ if (input->formatted.dst_port || input->formatted.src_port) {
+ DEBUGOUT(" Error on src/dst port\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ break;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
+ if (input->formatted.dst_port || input->formatted.src_port) {
+ DEBUGOUT(" Error on src/dst port\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ /* fall through */
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
+ input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+ break;
+ default:
+ DEBUGOUT(" Error on flow type input\n");
+ return err;
+ }
+
+ /* program input mask into the HW */
+ err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
+ if (err)
+ return err;
+
+ /* apply mask and compute/store hash */
+ ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
+
+ /* program filters to filter memory */
+ return ixgbe_fdir_write_perfect_filter_82599(hw, input,
+ soft_id, queue, cloud_mode);
+}
+
+/**
+ * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs read operation to Omer analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ u32 core_ctl;
+
+ DEBUGFUNC("ixgbe_read_analog_reg8_82599");
+
+ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
+ (reg << 8));
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(10);
+ core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
+ *val = (u8)core_ctl;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
+ * @hw: pointer to hardware structure
+ * @reg: atlas register to write
+ * @val: value to write
+ *
+ * Performs write operation to Omer analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ u32 core_ctl;
+
+ DEBUGFUNC("ixgbe_write_analog_reg8_82599");
+
+ core_ctl = (reg << 8) | val;
+ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(10);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_start_hw_82599");
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ret_val = ixgbe_start_hw_gen2(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ /* We need to run link autotry after the driver loads */
+ hw->mac.autotry_restart = true;
+
+ if (ret_val == IXGBE_SUCCESS)
+ ret_val = ixgbe_verify_fw_version_82599(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_phy_82599 - Get physical layer module
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ * If PHY already detected, maintains current PHY type in hw struct,
+ * otherwise executes the PHY detection routine.
+ **/
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_identify_phy_82599");
+
+ /* Detect PHY if not unknown - returns success if already detected. */
+ status = ixgbe_identify_phy_generic(hw);
+ if (status != IXGBE_SUCCESS) {
+ /* 82599 10GBASE-T requires an external PHY */
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
+ return status;
+ else
+ status = ixgbe_identify_module_generic(hw);
+ }
+
+ /* Set PHY type none if no PHY detected */
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.type = ixgbe_phy_none;
+ return IXGBE_SUCCESS;
+ }
+
+ /* Return error if SFP module has been detected but is not supported */
+ if (hw->phy.type == ixgbe_phy_sfp_unsupported)
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
+{
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+ u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u16 ext_ability = 0;
+
+ DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
+
+ hw->phy.ops.identify(hw);
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_cu_unknown:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ goto out;
+ default:
+ break;
+ }
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_AN:
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
+ IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+ goto out;
+ } else
+ /* SFI mode so read SFP module */
+ goto sfp_check;
+ break;
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
+ goto out;
+ break;
+ case IXGBE_AUTOC_LMS_10G_SERIAL:
+ if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ goto out;
+ } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
+ goto sfp_check;
+ break;
+ case IXGBE_AUTOC_LMS_KX4_KX_KR:
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ goto out;
+ break;
+ default:
+ goto out;
+ break;
+ }
+
+sfp_check:
+ /* SFP check must be done last since DA modules are sometimes used to
+ * test KR mode - we need to id KR mode correctly before SFP module.
+ * Call identify_sfp because the pluggable module may have changed */
+ physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
+out:
+ return physical_layer;
+}
+
+/**
+ * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
+ * @hw: pointer to hardware structure
+ * @regval: register value to write to RXCTRL
+ *
+ * Enables the Rx DMA unit for 82599
+ **/
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+{
+
+ DEBUGFUNC("ixgbe_enable_rx_dma_82599");
+
+ /*
+ * Workaround for 82599 silicon errata when enabling the Rx datapath.
+ * If traffic is incoming before we enable the Rx unit, it could hang
+ * the Rx DMA unit. Therefore, make sure the security engine is
+ * completely disabled prior to enabling the Rx unit.
+ */
+
+ hw->mac.ops.disable_sec_rx_path(hw);
+
+ if (regval & IXGBE_RXCTRL_RXEN)
+ ixgbe_enable_rx(hw);
+ else
+ ixgbe_disable_rx(hw);
+
+ hw->mac.ops.enable_sec_rx_path(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_verify_fw_version_82599 - verify FW version for 82599
+ * @hw: pointer to hardware structure
+ *
+ * Verifies that installed the firmware version is 0.6 or higher
+ * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
+ *
+ * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
+ * if the FW version is not supported.
+ **/
+STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM_VERSION;
+ u16 fw_offset, fw_ptp_cfg_offset;
+ u16 fw_version;
+
+ DEBUGFUNC("ixgbe_verify_fw_version_82599");
+
+ /* firmware check is only necessary for SFI devices */
+ if (hw->phy.media_type != ixgbe_media_type_fiber) {
+ status = IXGBE_SUCCESS;
+ goto fw_version_out;
+ }
+
+ /* get the offset to the Firmware Module block */
+ if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", IXGBE_FW_PTR);
+ return IXGBE_ERR_EEPROM_VERSION;
+ }
+
+ if ((fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto fw_version_out;
+
+ /* get the offset to the Pass Through Patch Configuration block */
+ if (hw->eeprom.ops.read(hw, (fw_offset +
+ IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
+ &fw_ptp_cfg_offset)) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed",
+ fw_offset +
+ IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
+ return IXGBE_ERR_EEPROM_VERSION;
+ }
+
+ if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
+ goto fw_version_out;
+
+ /* get the firmware version */
+ if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
+ IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed",
+ fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
+ return IXGBE_ERR_EEPROM_VERSION;
+ }
+
+ if (fw_version > 0x5)
+ status = IXGBE_SUCCESS;
+
+fw_version_out:
+ return status;
+}
+
+/**
+ * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
+ * @hw: pointer to hardware structure
+ *
+ * Returns true if the LESM FW module is present and enabled. Otherwise
+ * returns false. Smart Speed must be disabled if LESM FW module is enabled.
+ **/
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+{
+ bool lesm_enabled = false;
+ u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
+
+ /* get the offset to the Firmware Module block */
+ status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+ if ((status != IXGBE_SUCCESS) ||
+ (fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto out;
+
+ /* get the offset to the LESM Parameters block */
+ status = hw->eeprom.ops.read(hw, (fw_offset +
+ IXGBE_FW_LESM_PARAMETERS_PTR),
+ &fw_lesm_param_offset);
+
+ if ((status != IXGBE_SUCCESS) ||
+ (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
+ goto out;
+
+ /* get the LESM state word */
+ status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
+ IXGBE_FW_LESM_STATE_1),
+ &fw_lesm_state);
+
+ if ((status == IXGBE_SUCCESS) &&
+ (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
+ lesm_enabled = true;
+
+out:
+ return lesm_enabled;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
+ * fastest available method
+ *
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Retrieves 16 bit word(s) read from EEPROM
+ **/
+STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
+
+ /*
+ * If EEPROM is detected and can be addressed using 14 bits,
+ * use EERD otherwise use bit bang
+ */
+ if ((eeprom->type == ixgbe_eeprom_spi) &&
+ (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
+ ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
+ data);
+ else
+ ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
+ words,
+ data);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_eeprom_82599 - Read EEPROM word using
+ * fastest available method
+ *
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM
+ **/
+STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+ u16 offset, u16 *data)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ DEBUGFUNC("ixgbe_read_eeprom_82599");
+
+ /*
+ * If EEPROM is detected and can be addressed using 14 bits,
+ * use EERD otherwise use bit bang
+ */
+ if ((eeprom->type == ixgbe_eeprom_spi) &&
+ (offset <= IXGBE_EERD_MAX_ADDR))
+ ret_val = ixgbe_read_eerd_generic(hw, offset, data);
+ else
+ ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_reset_pipeline_82599 - perform pipeline reset
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset. This function assumes the SW/FW lock is held.
+ **/
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+{
+ s32 ret_val;
+ u32 anlp1_reg = 0;
+ u32 i, autoc_reg, autoc2_reg;
+
+ /* Enable link if disabled in NVM */
+ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+ autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
+ autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ msec_delay(4);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+
+ if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+ DEBUGOUT("auto negotiation not completed\n");
+ ret_val = IXGBE_ERR_RESET_FAILED;
+ goto reset_pipeline_out;
+ }
+
+ ret_val = IXGBE_SUCCESS;
+
+reset_pipeline_out:
+ /* Write AUTOC register with original LMS field and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: address to read from
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ DEBUGFUNC("ixgbe_read_i2c_byte_82599");
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ msec_delay(5);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Driver can't access resource,"
+ " acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: address to read from
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ DEBUGFUNC("ixgbe_write_i2c_byte_82599");
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ msec_delay(5);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Driver can't access resource,"
+ " acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.h
new file mode 100644
index 00000000..d555dbce
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.h
@@ -0,0 +1,64 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_82599_H_
+#define _IXGBE_82599_H_
+
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed);
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
+s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val);
+s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+#endif /* _IXGBE_82599_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.c
new file mode 100644
index 00000000..e50c1045
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.c
@@ -0,0 +1,1708 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+
+#define IXGBE_EMPTY_PARAM
+
+static const u32 ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM)
+};
+
+static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(_X540)
+};
+
+static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(_X550)
+};
+
+static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(_X550EM_x)
+};
+
+static const u32 ixgbe_mvals_X550EM_a[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(_X550EM_a)
+};
+
+/**
+ * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg
+ * @hw: pointer to hardware structure
+ * @map: pointer to u8 arr for returning map
+ *
+ * Read the rtrup2tc HW register and resolve its content into map
+ **/
+void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
+{
+ if (hw->mac.ops.get_rtrup2tc)
+ hw->mac.ops.get_rtrup2tc(hw, map);
+}
+
+/**
+ * ixgbe_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers and assign the MAC type and PHY code.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The ixgbe_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_init_shared_code");
+
+ /*
+ * Set the mac type
+ */
+ ixgbe_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ status = ixgbe_init_ops_82598(hw);
+ break;
+ case ixgbe_mac_82599EB:
+ status = ixgbe_init_ops_82599(hw);
+ break;
+ case ixgbe_mac_X540:
+ status = ixgbe_init_ops_X540(hw);
+ break;
+ case ixgbe_mac_X550:
+ status = ixgbe_init_ops_X550(hw);
+ break;
+ case ixgbe_mac_X550EM_x:
+ status = ixgbe_init_ops_X550EM_x(hw);
+ break;
+ case ixgbe_mac_X550EM_a:
+ status = ixgbe_init_ops_X550EM_a(hw);
+ break;
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ status = ixgbe_init_ops_vf(hw);
+ break;
+ default:
+ status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+ hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
+
+ return status;
+}
+
+/**
+ * ixgbe_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_set_mac_type\n");
+
+ if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) {
+ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+ "Unsupported vendor id: %x", hw->vendor_id);
+ return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw->mvals = ixgbe_mvals_base;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ case IXGBE_DEV_ID_82598_BX:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AT:
+ case IXGBE_DEV_ID_82598AT2:
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ case IXGBE_DEV_ID_82598EB_SFP_LOM:
+ hw->mac.type = ixgbe_mac_82598EB;
+ break;
+ case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_82599_XAUI_LOM:
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ case IXGBE_DEV_ID_82599EN_SFP:
+ case IXGBE_DEV_ID_82599_CX4:
+ case IXGBE_DEV_ID_82599_LS:
+ case IXGBE_DEV_ID_82599_BYPASS:
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ hw->mac.type = ixgbe_mac_82599EB;
+ break;
+ case IXGBE_DEV_ID_82599_VF:
+ case IXGBE_DEV_ID_82599_VF_HV:
+ hw->mac.type = ixgbe_mac_82599_vf;
+ break;
+ case IXGBE_DEV_ID_X540_VF:
+ case IXGBE_DEV_ID_X540_VF_HV:
+ hw->mac.type = ixgbe_mac_X540_vf;
+ hw->mvals = ixgbe_mvals_X540;
+ break;
+ case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X540_BYPASS:
+ hw->mac.type = ixgbe_mac_X540;
+ hw->mvals = ixgbe_mvals_X540;
+ break;
+ case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550T1:
+ hw->mac.type = ixgbe_mac_X550;
+ hw->mvals = ixgbe_mvals_X550;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->mac.type = ixgbe_mac_X550EM_x;
+ hw->mvals = ixgbe_mvals_X550EM_x;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_QSFP:
+ case IXGBE_DEV_ID_X550EM_A_QSFP_N:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ hw->mac.type = ixgbe_mac_X550EM_a;
+ hw->mvals = ixgbe_mvals_X550EM_a;
+ break;
+ case IXGBE_DEV_ID_X550_VF:
+ case IXGBE_DEV_ID_X550_VF_HV:
+ hw->mac.type = ixgbe_mac_X550_vf;
+ hw->mvals = ixgbe_mvals_X550;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_VF:
+ case IXGBE_DEV_ID_X550EM_X_VF_HV:
+ hw->mac.type = ixgbe_mac_X550EM_x_vf;
+ hw->mvals = ixgbe_mvals_X550EM_x;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_VF:
+ case IXGBE_DEV_ID_X550EM_A_VF_HV:
+ hw->mac.type = ixgbe_mac_X550EM_a_vf;
+ hw->mvals = ixgbe_mvals_X550EM_a;
+ break;
+ default:
+ ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+ "Unsupported device id: %x",
+ hw->device_id);
+ break;
+ }
+
+ DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, ret_val);
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_hw - Initialize the hardware
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting and then starting the hardware
+ **/
+s32 ixgbe_init_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_reset_hw - Performs a hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts, performs a PHY reset, and performs a MAC reset
+ **/
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_start_hw - Prepares hardware for Rx/Tx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type,
+ * clears all on chip counters, initializes receive address registers,
+ * multicast table, VLAN filter table, calls routine to setup link and
+ * flow control settings, and leaves transmit and receive units disabled
+ * and uninitialized.
+ **/
+s32 ixgbe_start_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering,
+ * which is disabled by default in ixgbe_start_hw();
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Enable relaxed ordering;
+ **/
+void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.enable_relaxed_ordering)
+ hw->mac.ops.enable_relaxed_ordering(hw);
+}
+
+/**
+ * ixgbe_clear_hw_cntrs - Clear hardware counters
+ * @hw: pointer to hardware structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_media_type - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
+ ixgbe_media_type_unknown);
+}
+
+/**
+ * ixgbe_get_mac_addr - Get MAC address
+ * @hw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+ * Reads the adapter's MAC address from the first Receive Address Register
+ * (RAR0) A reset of the adapter must have been performed prior to calling
+ * this function in order for the MAC address to have been loaded from the
+ * EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
+ (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_san_mac_addr - Get SAN MAC address
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Reads the SAN MAC address from the EEPROM, if it's available. This is
+ * per-port, so set_lan_id() must be called before reading the addresses.
+ **/
+s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr,
+ (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_san_mac_addr - Write a SAN MAC address
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Writes A SAN MAC address to the EEPROM.
+ **/
+s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr,
+ (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_device_caps - Get additional device capabilities
+ * @hw: pointer to hardware structure
+ * @device_caps: the EEPROM word for device capabilities
+ *
+ * Reads the extra device capabilities from the EEPROM
+ **/
+s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_device_caps,
+ (hw, device_caps), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix,
+ (hw, wwnn_prefix, wwpn_prefix),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM
+ * @hw: pointer to hardware structure
+ * @bs: the fcoe boot status
+ *
+ * This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status,
+ (hw, bs),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_bus_info - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_num_of_tx_queues - Get Tx queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of transmit queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw)
+{
+ return hw->mac.max_tx_queues;
+}
+
+/**
+ * ixgbe_get_num_of_rx_queues - Get Rx queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of receive queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
+{
+ return hw->mac.max_rx_queues;
+}
+
+/**
+ * ixgbe_stop_adapter - Disable Rx/Tx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ * ixgbe_read_pba_num - Reads part number from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number from the EEPROM
+ *
+ * Reads the part number from the EEPROM.
+ **/
+s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num)
+{
+ return ixgbe_read_pba_num_generic(hw, pba_num);
+}
+
+/**
+ * ixgbe_identify_phy - Get PHY type
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_phy - Perform a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS)
+ status = IXGBE_ERR_PHY;
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version -
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to firmware version
+ **/
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version,
+ (hw, firmware_version),
+ IXGBE_NOT_IMPLEMENTED);
+ return status;
+}
+
+/**
+ * ixgbe_read_phy_reg - Read PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @device_type: type of device you want to communicate with
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register
+ **/
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
+{
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+
+ return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_phy_reg - Write PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: type of device you want to communicate with
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register
+ **/
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data)
+{
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+
+ return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_phy_link - Restart PHY autoneg
+ * @hw: pointer to hardware structure
+ *
+ * Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_internal_phy - Configure integrated PHY
+ * @hw: pointer to hardware structure
+ *
+ * Reconfigure the integrated PHY in order to enable talk to the external PHY.
+ * Returns success if not implemented, since nothing needs to be done in this
+ * case.
+ */
+s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw),
+ IXGBE_SUCCESS);
+}
+
+/**
+ * ixgbe_check_phy_link - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: link speed
+ * @link_up: true when link is up
+ *
+ * Reads a PHY register to determine if link is up and the current speed for
+ * the PHY.
+ **/
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed,
+ link_up), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_phy_link_speed - Set auto advertise
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Sets the auto advertised capabilities
+ **/
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
+ autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_phy_power - Control the phy power state
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ */
+s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_check_link - Get link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
+ link_up, link_up_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_tx_laser - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * If the driver needs to disable the laser on SFI optics.
+ **/
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.disable_tx_laser)
+ hw->mac.ops.disable_tx_laser(hw);
+}
+
+/**
+ * ixgbe_enable_tx_laser - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * If the driver needs to enable the laser on SFI optics.
+ **/
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.enable_tx_laser)
+ hw->mac.ops.enable_tx_laser(hw);
+}
+
+/**
+ * ixgbe_flap_tx_laser - flap Tx laser to start autotry process
+ * @hw: pointer to hardware structure
+ *
+ * When the driver changes the link speeds that it can support then
+ * flap the tx laser to alert the link partner to start autotry
+ * process on its end.
+ **/
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.flap_tx_laser)
+ hw->mac.ops.flap_tx_laser(hw);
+}
+
+/**
+ * ixgbe_setup_link - Set link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Configures link settings. Restarts the link.
+ * Performs autonegotiation if needed.
+ **/
+s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
+ autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_mac_link - Set link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Configures link settings. Restarts the link.
+ * Performs autonegotiation if needed.
+ **/
+s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_mac_link, (hw, speed,
+ autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_link_capabilities - Returns link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: link speed capabilities
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determines the link capabilities of the current configuration.
+ **/
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
+ speed, autoneg), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_led_on - Turn on LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to turn on
+ *
+ * Turns on the software controllable LEDs.
+ **/
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_led_off - Turn off LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to turn off
+ *
+ * Turns off the software controllable LEDs.
+ **/
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_blink_led_start - Blink LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Blink LED based on index.
+ **/
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_blink_led_stop - Stop blinking LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to stop
+ *
+ * Stop blinking LED based on index.
+ **/
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_eeprom_params - Initialize EEPROM parameters
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/**
+ * ixgbe_write_eeprom - Write word to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not
+ * called after this function, the EEPROM will most likely contain an
+ * invalid checksum.
+ **/
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word(s) to be written to the EEPROM
+ * @words: number of words
+ *
+ * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not
+ * called after this function, the EEPROM will most likely contain an
+ * invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer,
+ (hw, offset, words, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_eeprom - Read word from EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM
+ **/
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit word(s) from EEPROM
+ * @words: number of words
+ *
+ * Reads 16 bit word(s) from EEPROM
+ **/
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer,
+ (hw, offset, words, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum
+ **/
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
+ (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_insert_mac_addr - Find a RAR for this mac address
+ * @hw: pointer to hardware structure
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq pool to assign
+ *
+ * Puts an ethernet address into a receive address register, or
+ * finds the rar that it is aleady in; adds to the pool list
+ **/
+s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr,
+ (hw, addr, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_rar - Set Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set"
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
+ enable_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_rar - Clear Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_vmdq - Associate a VMDq index with a receive address
+ * @hw: pointer to hardware structure
+ * @rar: receive address register index to associate with VMDq index
+ * @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+
+}
+
+/**
+ * ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address
+ * @hw: pointer to hardware structure
+ * @vmdq: VMDq default pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac,
+ (hw, vmdq), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
+ * @hw: pointer to hardware structure
+ * @rar: receive address register index to disassociate with VMDq index
+ * @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_rx_addrs - Initializes receive address filters.
+ * @hw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_num_rx_addrs - Returns the number of RAR entries.
+ * @hw: pointer to hardware structure
+ **/
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
+{
+ return hw->mac.num_rar_entries;
+}
+
+/**
+ * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses
+ * @hw: pointer to hardware structure
+ * @addr_list: the list of new multicast addresses
+ * @addr_count: number of addresses
+ * @func: iterator function to walk the multicast address list
+ *
+ * The given list replaces any existing list. Clears the secondary addrs from
+ * receive address registers. Uses unused receive address registers for the
+ * first secondary addresses, and falls back to promiscuous mode as needed.
+ **/
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
+ addr_list, addr_count, func),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses
+ * @hw: pointer to hardware structure
+ * @mc_addr_list: the list of new multicast addresses
+ * @mc_addr_count: number of addresses
+ * @func: iterator function to walk the multicast address list
+ * @clear: flag, when set clears the table beforehand
+ *
+ * The given list replaces any existing list. Clears the MC addrs from receive
+ * address registers and the multicast table. Uses unused receive address
+ * registers for the first multicast addresses, and hashes the rest into the
+ * multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr func,
+ bool clear)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
+ mc_addr_list, mc_addr_count, func, clear),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_enable_mc - Enable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_mc - Disable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_vfta - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_vfta - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN
+ * @vlvf_bypass: boolean flag indicating updating the default pool is okay
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+ bool vlvf_bypass)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
+ vlan_on, vlvf_bypass), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_vlvf - Set VLAN Pool Filter
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VLVF
+ * @vfta_delta: pointer to the difference between the current value of VFTA
+ * and the desired value
+ * @vfta: the desired value of the VFTA
+ * @vlvf_bypass: boolean flag indicating updating the default pool is okay
+ *
+ * Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+ u32 *vfta_delta, u32 vfta, bool vlvf_bypass)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
+ vlan_on, vfta_delta, vfta, vlvf_bypass),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_fc_enable - Enable flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configures the flow control settings based on SW configuration.
+ **/
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_fc - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_fw_drv_ver - Try to send the driver version number FW
+ * @hw: pointer to hardware structure
+ * @maj: driver major number to be sent to firmware
+ * @min: driver minor number to be sent to firmware
+ * @build: driver build number to be sent to firmware
+ * @ver: driver version number to be sent to firmware
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ **/
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 ver, u16 len, char *driver_ver)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min,
+ build, ver, len, driver_ver),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/**
+ * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ *
+ * Updates the temperatures in mac.thermal_sensor_data
+ **/
+s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Inits the thermal sensor thresholds according to the NVM map
+ **/
+s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_dmac_config - Configure DMA Coalescing registers.
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing. If enabling dmac, dmac is activated.
+ * When disabling dmac, dmac enable dmac bit is cleared.
+ **/
+s32 ixgbe_dmac_config(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_dmac_update_tcs - Configure DMA Coalescing registers.
+ * @hw: pointer to hardware structure
+ *
+ * Disables dmac, updates per TC settings, and then enable dmac.
+ **/
+s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_dmac_config_tcs - Configure DMA Coalescing registers.
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing threshold per TC and set high priority bit for
+ * FCOE TC. The dmac enable bit must be cleared before configuring.
+ **/
+s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_eee - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enable/disable EEE based on enable_ee flag.
+ * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
+ * are modified.
+ *
+ **/
+s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_source_address_pruning - Enable/Disable source address pruning
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable source address pruning
+ * @pool: Rx pool - Rx pool to toggle source address pruning
+ **/
+void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
+ unsigned int pool)
+{
+ if (hw->mac.ops.set_source_address_pruning)
+ hw->mac.ops.set_source_address_pruning(hw, enable, pool);
+}
+
+/**
+ * ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for Ethertype anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
+ *
+ **/
+void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+ if (hw->mac.ops.set_ethertype_anti_spoofing)
+ hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf);
+}
+
+/**
+ * ixgbe_read_iosf_sb_reg - Read 32 bit PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @device_type: type of device you want to communicate with
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register
+ **/
+s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *phy_data)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: type of device you want to communicate with
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register
+ **/
+s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 phy_data)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_mdd - Disable malicious driver detection
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_disable_mdd(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.disable_mdd)
+ hw->mac.ops.disable_mdd(hw);
+}
+
+/**
+ * ixgbe_enable_mdd - Enable malicious driver detection
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_mdd(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.enable_mdd)
+ hw->mac.ops.enable_mdd(hw);
+}
+
+/**
+ * ixgbe_mdd_event - Handle malicious driver detection event
+ * @hw: pointer to hardware structure
+ * @vf_bitmap: vf bitmap of malicious vfs
+ *
+ **/
+void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap)
+{
+ if (hw->mac.ops.mdd_event)
+ hw->mac.ops.mdd_event(hw, vf_bitmap);
+}
+
+/**
+ * ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver
+ * detection event
+ * @hw: pointer to hardware structure
+ * @vf: vf index
+ *
+ **/
+void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf)
+{
+ if (hw->mac.ops.restore_mdd_vf)
+ hw->mac.ops.restore_mdd_vf(hw, vf);
+}
+
+/**
+ * ixgbe_enter_lplu - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0).
+ **/
+s32 ixgbe_enter_lplu(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.enter_lplu, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_handle_lasi - Handle external Base T PHY interrupt
+ * @hw: pointer to hardware structure
+ *
+ * Handle external Base T PHY interrupt. If high temperature
+ * failure alarm then return error, else if link status change
+ * then setup internal/external PHY link
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
+ */
+s32 ixgbe_handle_lasi(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.handle_lasi, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_analog_reg8 - Reads 8 bit analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs write operation to analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
+ val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_analog_reg8 - Writes 8 bit analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to write
+ * @val: value to write
+ *
+ * Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
+ val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the Unicast Table Arrays to zero on device load. This
+ * is part of the Rx init addr execution path.
+ **/
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: I2C bus address to read from
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 *data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset,
+ dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_i2c_byte_unlocked - Reads 8 bit word via I2C from device address
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: I2C bus address to read from
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte_unlocked,
+ (hw, byte_offset, dev_addr, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_link - Perform read operation on link device
+ * @hw: pointer to the hardware structure
+ * @addr: bus address to read from
+ * @reg: device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val)
+{
+ return ixgbe_call_func(hw, hw->link.ops.read_link, (hw, addr,
+ reg, val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_link_unlocked - Perform read operation on link device
+ * @hw: pointer to the hardware structure
+ * @addr: bus address to read from
+ * @reg: device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ **/
+s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val)
+{
+ return ixgbe_call_func(hw, hw->link.ops.read_link_unlocked,
+ (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_i2c_byte - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: I2C bus address to write to
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface
+ * at a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset,
+ dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_i2c_byte_unlocked - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: I2C bus address to write to
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface
+ * at a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte_unlocked,
+ (hw, byte_offset, dev_addr, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_link - Perform write operation on link device
+ * @hw: pointer to the hardware structure
+ * @addr: bus address to write to
+ * @reg: device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_call_func(hw, hw->link.ops.write_link,
+ (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_link_unlocked - Perform write operation on link device
+ * @hw: pointer to the hardware structure
+ * @addr: bus address to write to
+ * @reg: device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ **/
+s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_call_func(hw, hw->link.ops.write_link_unlocked,
+ (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw,
+ u8 byte_offset, u8 eeprom_data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom,
+ (hw, byte_offset, eeprom_data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
+ (hw, byte_offset, eeprom_data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_supported_physical_layer - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
+ (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
+}
+
+/**
+ * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics
+ * @hw: pointer to hardware structure
+ * @regval: bitfield to write to the Rx DMA register
+ *
+ * Enables the Rx DMA unit of the device.
+ **/
+s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma,
+ (hw, regval), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_sec_rx_path - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path.
+ **/
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path,
+ (hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_enable_sec_rx_path - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path,
+ (hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
+ (hw, mask), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_release_swfw_semaphore - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through SW_FW_SYNC register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
+{
+ if (hw->mac.ops.release_swfw_sync)
+ hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
+/**
+ * ixgbe_init_swfw_semaphore - Clean up SWFW semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Attempts to acquire the SWFW semaphore through SW_FW_SYNC register.
+ * Regardless of whether is succeeds or not it then release the semaphore.
+ * This is function is called to recover from catastrophic failures that
+ * may have left the semaphore locked.
+ **/
+void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+}
+
+
+void ixgbe_disable_rx(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.disable_rx)
+ hw->mac.ops.disable_rx(hw);
+}
+
+void ixgbe_enable_rx(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.enable_rx)
+ hw->mac.ops.enable_rx(hw);
+}
+
+/**
+ * ixgbe_set_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * Set module link speed via the rate select.
+ */
+void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
+{
+ if (hw->mac.ops.set_rate_select_speed)
+ hw->mac.ops.set_rate_select_speed(hw, speed);
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.h
new file mode 100644
index 00000000..2f532aa8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.h
@@ -0,0 +1,225 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_API_H_
+#define _IXGBE_API_H_
+
+#include "ixgbe_type.h"
+
+void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
+
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
+
+extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
+
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw(struct ixgbe_hw *hw);
+void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data);
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data);
+
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
+s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw);
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up);
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on);
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq);
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr func,
+ bool clear);
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq);
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on, bool vlvf_bypass);
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, u32 *vfta_delta, u32 vfta,
+ bool vlvf_bypass);
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 ver, u16 len, char *driver_ver);
+s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
+s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw);
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
+u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
+ bool cloud_mode);
+void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask, bool cloud_mode);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue, bool cloud_mode);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id);
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask,
+ u16 soft_id,
+ u8 queue,
+ bool cloud_mode);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask);
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 *data);
+s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val);
+s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val);
+s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 data);
+void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue);
+s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val);
+s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val);
+s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
+s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw);
+s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix);
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
+s32 ixgbe_dmac_config(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw);
+s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee);
+void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
+ unsigned int vf);
+void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable,
+ int vf);
+s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *phy_data);
+s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 phy_data);
+void ixgbe_disable_mdd(struct ixgbe_hw *hw);
+void ixgbe_enable_mdd(struct ixgbe_hw *hw);
+void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap);
+void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf);
+s32 ixgbe_enter_lplu(struct ixgbe_hw *hw);
+s32 ixgbe_handle_lasi(struct ixgbe_hw *hw);
+void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed);
+void ixgbe_disable_rx(struct ixgbe_hw *hw);
+void ixgbe_enable_rx(struct ixgbe_hw *hw);
+s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
+
+#endif /* _IXGBE_API_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.c
new file mode 100644
index 00000000..e7e9256e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.c
@@ -0,0 +1,5445 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82599.h"
+#include "ixgbe_api.h"
+
+STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
+STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+ u16 count);
+STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
+STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
+
+STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset);
+STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset);
+
+/**
+ * ixgbe_init_ops_generic - Inits function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ **/
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
+
+ DEBUGFUNC("ixgbe_init_ops_generic");
+
+ /* EEPROM */
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
+ /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
+ if (eec & IXGBE_EEC_PRES) {
+ eeprom->ops.read = ixgbe_read_eerd_generic;
+ eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
+ } else {
+ eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
+ eeprom->ops.read_buffer =
+ ixgbe_read_eeprom_buffer_bit_bang_generic;
+ }
+ eeprom->ops.write = ixgbe_write_eeprom_generic;
+ eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
+ eeprom->ops.validate_checksum =
+ ixgbe_validate_eeprom_checksum_generic;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
+
+ /* MAC */
+ mac->ops.init_hw = ixgbe_init_hw_generic;
+ mac->ops.reset_hw = NULL;
+ mac->ops.start_hw = ixgbe_start_hw_generic;
+ mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
+ mac->ops.get_media_type = NULL;
+ mac->ops.get_supported_physical_layer = NULL;
+ mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
+ mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
+ mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
+ mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
+ mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
+ mac->ops.prot_autoc_read = prot_autoc_read_generic;
+ mac->ops.prot_autoc_write = prot_autoc_write_generic;
+
+ /* LEDs */
+ mac->ops.led_on = ixgbe_led_on_generic;
+ mac->ops.led_off = ixgbe_led_off_generic;
+ mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
+ mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
+ mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_rar = ixgbe_set_rar_generic;
+ mac->ops.clear_rar = ixgbe_clear_rar_generic;
+ mac->ops.insert_mac_addr = NULL;
+ mac->ops.set_vmdq = NULL;
+ mac->ops.clear_vmdq = NULL;
+ mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
+ mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
+ mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
+ mac->ops.enable_mc = ixgbe_enable_mc_generic;
+ mac->ops.disable_mc = ixgbe_disable_mc_generic;
+ mac->ops.clear_vfta = NULL;
+ mac->ops.set_vfta = NULL;
+ mac->ops.set_vlvf = NULL;
+ mac->ops.init_uta_tables = NULL;
+ mac->ops.enable_rx = ixgbe_enable_rx_generic;
+ mac->ops.disable_rx = ixgbe_disable_rx_generic;
+
+ /* Flow Control */
+ mac->ops.fc_enable = ixgbe_fc_enable_generic;
+ mac->ops.setup_fc = ixgbe_setup_fc_generic;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg;
+
+ /* Link */
+ mac->ops.get_link_capabilities = NULL;
+ mac->ops.setup_link = NULL;
+ mac->ops.check_link = NULL;
+ mac->ops.dmac_config = NULL;
+ mac->ops.dmac_update_tcs = NULL;
+ mac->ops.dmac_config_tcs = NULL;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
+ * of flow control
+ * @hw: pointer to hardware structure
+ *
+ * This function returns true if the device supports flow control
+ * autonegotiation, and false if it does not.
+ *
+ **/
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+ bool supported = false;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
+
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber_qsfp:
+ case ixgbe_media_type_fiber:
+ /* flow control autoneg black list */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_QSFP:
+ case IXGBE_DEV_ID_X550EM_A_QSFP_N:
+ supported = false;
+ break;
+ default:
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ /* if link is down, assume supported */
+ if (link_up)
+ supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
+ true : false;
+ else
+ supported = true;
+ }
+
+ break;
+ case ixgbe_media_type_backplane:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
+ supported = false;
+ else
+ supported = true;
+ break;
+ case ixgbe_media_type_copper:
+ /* only some copper devices support flow control autoneg */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X540_BYPASS:
+ case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550T1:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ supported = true;
+ break;
+ default:
+ supported = false;
+ }
+ default:
+ break;
+ }
+
+ if (!supported)
+ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+ "Device %x does not support flow control autoneg",
+ hw->device_id);
+ return supported;
+}
+
+/**
+ * ixgbe_setup_fc_generic - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 reg = 0, reg_bp = 0;
+ u16 reg_cu = 0;
+ bool locked = false;
+
+ DEBUGFUNC("ixgbe_setup_fc_generic");
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /*
+ * Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do fc autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
+ */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_backplane:
+ /* some MAC's need RMW protection on AUTOC */
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ /* fall through - only backplane uses autoc */
+ case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber_qsfp:
+ case ixgbe_media_type_fiber:
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+
+ break;
+ case ixgbe_media_type_copper:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ reg |= IXGBE_PCS1GANA_ASM_PAUSE;
+ reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
+ reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
+ } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+ reg_cu |= IXGBE_TAF_ASM_PAUSE;
+ reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
+ }
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE;
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
+ break;
+ default:
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+ "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ if (hw->mac.type < ixgbe_mac_X540) {
+ /*
+ * Enable auto-negotiation between the MAC & PHY;
+ * the MAC will advertise clause 37 flow control.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+
+ /* Disable AN timeout */
+ if (hw->fc.strict_ieee)
+ reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+ DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+ }
+
+ /*
+ * AUTOC restart handles negotiation of 1G and 10G on backplane
+ * and copper. There is no need to set the PCS1GCTL register.
+ *
+ */
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
+ if (ret_val)
+ goto out;
+ } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+ (ixgbe_device_supports_autoneg_fc(hw))) {
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
+ }
+
+ DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+{
+ s32 ret_val;
+ u32 ctrl_ext;
+ u16 device_caps;
+
+ DEBUGFUNC("ixgbe_start_hw_generic");
+
+ /* Set the media type */
+ hw->phy.media_type = hw->mac.ops.get_media_type(hw);
+
+ /* PHY ops initialization must be done in reset_hw() */
+
+ /* Clear the VLAN filter table */
+ hw->mac.ops.clear_vfta(hw);
+
+ /* Clear statistics registers */
+ hw->mac.ops.clear_hw_cntrs(hw);
+
+ /* Set No Snoop Disable */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Setup flow control */
+ ret_val = ixgbe_setup_fc(hw);
+ if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
+ DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
+ return ret_val;
+ }
+
+ /* Cache bit indicating need for crosstalk fix */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ hw->mac.ops.get_device_caps(hw, &device_caps);
+ if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+ hw->need_crosstalk_fix = false;
+ else
+ hw->need_crosstalk_fix = true;
+ break;
+ default:
+ hw->need_crosstalk_fix = false;
+ break;
+ }
+
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_start_hw_gen2 - Init sequence for common device family
+ * @hw: pointer to hw structure
+ *
+ * Performs the init sequence common to the second generation
+ * of 10 GbE devices.
+ * Devices in the second generation:
+ * 82599
+ * X540
+ **/
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 regval;
+
+ /* Clear the rate limiters */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
+ }
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Disable relaxed ordering */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+ }
+
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_hw_generic - Generic hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware, filling the bus info
+ * structure and media type, clears all on chip counters, initializes receive
+ * address registers, multicast table, VLAN filter table, calls routine to set
+ * up link and flow control settings, and leaves transmit and receive units
+ * disabled and uninitialized
+ **/
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_init_hw_generic");
+
+ /* Reset the hardware */
+ status = hw->mac.ops.reset_hw(hw);
+
+ if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
+ /* Start the HW */
+ status = hw->mac.ops.start_hw(hw);
+ }
+
+ /* Initialize the LED link active for LED blink support */
+ if (hw->mac.ops.init_led_link_act)
+ hw->mac.ops.init_led_link_act(hw);
+
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
+
+ return status;
+}
+
+/**
+ * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
+ * @hw: pointer to hardware structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+{
+ u16 i = 0;
+
+ DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
+
+ IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+ IXGBE_READ_REG(hw, IXGBE_ERRBC);
+ IXGBE_READ_REG(hw, IXGBE_MSPDC);
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
+ IXGBE_READ_REG(hw, IXGBE_MLFC);
+ IXGBE_READ_REG(hw, IXGBE_MRFC);
+ IXGBE_READ_REG(hw, IXGBE_RLEC);
+ IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ }
+
+ for (i = 0; i < 8; i++) {
+ IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ }
+ }
+ if (hw->mac.type >= ixgbe_mac_82599EB)
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+ IXGBE_READ_REG(hw, IXGBE_PRC64);
+ IXGBE_READ_REG(hw, IXGBE_PRC127);
+ IXGBE_READ_REG(hw, IXGBE_PRC255);
+ IXGBE_READ_REG(hw, IXGBE_PRC511);
+ IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ IXGBE_READ_REG(hw, IXGBE_PRC1522);
+ IXGBE_READ_REG(hw, IXGBE_GPRC);
+ IXGBE_READ_REG(hw, IXGBE_BPRC);
+ IXGBE_READ_REG(hw, IXGBE_MPRC);
+ IXGBE_READ_REG(hw, IXGBE_GPTC);
+ IXGBE_READ_REG(hw, IXGBE_GORCL);
+ IXGBE_READ_REG(hw, IXGBE_GORCH);
+ IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ IXGBE_READ_REG(hw, IXGBE_RUC);
+ IXGBE_READ_REG(hw, IXGBE_RFC);
+ IXGBE_READ_REG(hw, IXGBE_ROC);
+ IXGBE_READ_REG(hw, IXGBE_RJC);
+ IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+ IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+ IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+ IXGBE_READ_REG(hw, IXGBE_TORL);
+ IXGBE_READ_REG(hw, IXGBE_TORH);
+ IXGBE_READ_REG(hw, IXGBE_TPR);
+ IXGBE_READ_REG(hw, IXGBE_TPT);
+ IXGBE_READ_REG(hw, IXGBE_PTC64);
+ IXGBE_READ_REG(hw, IXGBE_PTC127);
+ IXGBE_READ_REG(hw, IXGBE_PTC255);
+ IXGBE_READ_REG(hw, IXGBE_PTC511);
+ IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ IXGBE_READ_REG(hw, IXGBE_MPTC);
+ IXGBE_READ_REG(hw, IXGBE_BPTC);
+ for (i = 0; i < 16; i++) {
+ IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ }
+ }
+
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ DEBUGFUNC("ixgbe_read_pba_string_generic");
+
+ if (pba_num == NULL) {
+ DEBUGOUT("PBA string buffer was null\n");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /*
+ * if data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (data != IXGBE_PBANUM_PTR_GUARD) {
+ DEBUGOUT("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (data >> 12) & 0xF;
+ pba_num[1] = (data >> 8) & 0xF;
+ pba_num[2] = (data >> 4) & 0xF;
+ pba_num[3] = data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return IXGBE_SUCCESS;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return IXGBE_ERR_PBA_SECTION;
+ }
+
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_pba_num_generic - Reads part number from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number from the EEPROM
+ *
+ * Reads the part number from the EEPROM.
+ **/
+s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
+{
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("ixgbe_read_pba_num_generic");
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ } else if (data == IXGBE_PBANUM_PTR_GUARD) {
+ DEBUGOUT("NVM Not supported\n");
+ return IXGBE_NOT_IMPLEMENTED;
+ }
+ *pba_num = (u32)(data << 16);
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ *pba_num |= data;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @max_pba_block_size: PBA block size limit
+ * @pba: pointer to output PBA structure
+ *
+ * Reads PBA from EEPROM image when eeprom_buf is not NULL.
+ * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct ixgbe_pba *pba)
+{
+ s32 ret_val;
+ u16 pba_block_size;
+
+ if (pba == NULL)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+ &pba->word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+ pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+ pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return IXGBE_ERR_PARAM;
+
+ ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
+ eeprom_buf_size,
+ &pba_block_size);
+ if (ret_val)
+ return ret_val;
+
+ if (pba_block_size > max_pba_block_size)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
+ pba_block_size,
+ pba->pba_block);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba_block_size)) {
+ memcpy(pba->pba_block,
+ &eeprom_buf[pba->word[1]],
+ pba_block_size * sizeof(u16));
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba: pointer to PBA structure
+ *
+ * Writes PBA to EEPROM image when eeprom_buf is not NULL.
+ * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct ixgbe_pba *pba)
+{
+ s32 ret_val;
+
+ if (pba == NULL)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+ &pba->word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+ eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
+ eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
+ pba->pba_block[0],
+ pba->pba_block);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba->pba_block[0])) {
+ memcpy(&eeprom_buf[pba->word[1]],
+ pba->pba_block,
+ pba->pba_block[0] * sizeof(u16));
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_pba_block_size
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba_data_size: pointer to output variable
+ *
+ * Returns the size of the PBA block in words. Function operates on EEPROM
+ * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
+ * EEPROM device.
+ *
+ **/
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size)
+{
+ s32 ret_val;
+ u16 pba_word[2];
+ u16 length;
+
+ DEBUGFUNC("ixgbe_get_pba_block_size");
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+ &pba_word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+ pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+ pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
+ &length);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > pba_word[1])
+ length = eeprom_buf[pba_word[1] + 0];
+ else
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (length == 0xFFFF || length == 0)
+ return IXGBE_ERR_PBA_SECTION;
+ } else {
+ /* PBA number in legacy format, there is no PBA Block. */
+ length = 0;
+ }
+
+ if (pba_block_size != NULL)
+ *pba_block_size = length;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_mac_addr_generic - Generic get MAC address
+ * @hw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+ * Reads the adapter's MAC address from first Receive Address Register (RAR0)
+ * A reset of the adapter must be performed prior to calling this function
+ * in order for the MAC address to have been loaded from the EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_get_mac_addr_generic");
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
+ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
+
+ for (i = 0; i < 4; i++)
+ mac_addr[i] = (u8)(rar_low >> (i*8));
+
+ for (i = 0; i < 2; i++)
+ mac_addr[i+4] = (u8)(rar_high >> (i*8));
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status returned by the PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ if (hw->bus.type == ixgbe_bus_type_unknown)
+ hw->bus.type = ixgbe_bus_type_pci_express;
+
+ switch (link_status & IXGBE_PCI_LINK_WIDTH) {
+ case IXGBE_PCI_LINK_WIDTH_1:
+ hw->bus.width = ixgbe_bus_width_pcie_x1;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_2:
+ hw->bus.width = ixgbe_bus_width_pcie_x2;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_4:
+ hw->bus.width = ixgbe_bus_width_pcie_x4;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_8:
+ hw->bus.width = ixgbe_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = ixgbe_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & IXGBE_PCI_LINK_SPEED) {
+ case IXGBE_PCI_LINK_SPEED_2500:
+ hw->bus.speed = ixgbe_bus_speed_2500;
+ break;
+ case IXGBE_PCI_LINK_SPEED_5000:
+ hw->bus.speed = ixgbe_bus_speed_5000;
+ break;
+ case IXGBE_PCI_LINK_SPEED_8000:
+ hw->bus.speed = ixgbe_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = ixgbe_bus_speed_unknown;
+ break;
+ }
+
+ mac->ops.set_lan_id(hw);
+}
+
+/**
+ * ixgbe_get_bus_info_generic - Generic set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Gets the PCI bus info (speed, width, type) then calls helper function to
+ * store this data within the ixgbe_hw structure.
+ **/
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+{
+ u16 link_status;
+
+ DEBUGFUNC("ixgbe_get_bus_info_generic");
+
+ /* Get the negotiated link width and speed from PCI config space */
+ link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+
+ ixgbe_set_pci_config_data_generic(hw, link_status);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers and swaps
+ * the port value if requested, and set MAC instance for devices that share
+ * CS4227.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bus_info *bus = &hw->bus;
+ u32 reg;
+ u16 ee_ctrl_4;
+
+ DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
+
+ reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
+ bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
+ bus->lan_id = (u8)bus->func;
+
+ /* check for a port swap */
+ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
+ if (reg & IXGBE_FACTPS_LFS)
+ bus->func ^= 0x1;
+
+ /* Get MAC instance from EEPROM for configuring CS4227 */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
+ bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
+ IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+ }
+}
+
+/**
+ * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+{
+ u32 reg_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_stop_adapter_generic");
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Disable the receive unit */
+ ixgbe_disable_rx(hw);
+
+ /* Clear interrupt mask to stop interrupts from being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts, flush previous writes */
+ IXGBE_READ_REG(hw, IXGBE_EICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < hw->mac.max_tx_queues; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ reg_val |= IXGBE_RXDCTL_SWFLSH;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
+ }
+
+ /* flush all queues disables */
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(2);
+
+ /*
+ * Prevent the PCI-E bus from hanging by disabling PCI-E master
+ * access and verify no pending requests
+ */
+ return ixgbe_disable_pcie_master(hw);
+}
+
+/**
+ * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
+ * @hw: pointer to hardware structure
+ *
+ * Store the index for the link active LED. This will be used to support
+ * blinking the LED.
+ **/
+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 led_reg, led_mode;
+ u8 i;
+
+ led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /* Get LED link active from the LEDCTL register */
+ for (i = 0; i < 4; i++) {
+ led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
+
+ if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
+ IXGBE_LED_LINK_ACTIVE) {
+ mac->led_link_act = i;
+ return IXGBE_SUCCESS;
+ }
+ }
+
+ /*
+ * If LEDCTL register does not have the LED link active set, then use
+ * known MAC defaults.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_X550EM_x:
+ mac->led_link_act = 1;
+ break;
+ default:
+ mac->led_link_act = 2;
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_led_on_generic - Turns on the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn on
+ **/
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ DEBUGFUNC("ixgbe_led_on_generic");
+
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
+ /* To turn on the LED, set mode to ON. */
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_led_off_generic - Turns off the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn off
+ **/
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ DEBUGFUNC("ixgbe_led_off_generic");
+
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
+ /* To turn off the LED, set mode to OFF. */
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ DEBUGFUNC("ixgbe_init_eeprom_params_generic");
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->type = ixgbe_eeprom_none;
+ /* Set default semaphore delay to 10ms which is a well
+ * tested value */
+ eeprom->semaphore_delay = 10;
+ /* Clear EEPROM page size, it will be initialized as needed */
+ eeprom->word_page_size = 0;
+
+ /*
+ * Check for EEPROM present first.
+ * If not present leave as none
+ */
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
+ if (eec & IXGBE_EEC_PRES) {
+ eeprom->type = ixgbe_eeprom_spi;
+
+ /*
+ * SPI EEPROM is assumed here. This code would need to
+ * change if a future EEPROM is not SPI.
+ */
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ }
+
+ if (eec & IXGBE_EEC_ADDR_SIZE)
+ eeprom->address_bits = 16;
+ else
+ eeprom->address_bits = 8;
+ DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
+ "%d\n", eeprom->type, eeprom->word_size,
+ eeprom->address_bits);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to write
+ * @words: number of word(s)
+ * @data: 16 bit word(s) to write to EEPROM
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /*
+ * The EEPROM page size cannot be queried from the chip. We do lazy
+ * initialization. It is worth to do that when we write large buffer.
+ */
+ if ((hw->eeprom.word_page_size == 0) &&
+ (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
+ ixgbe_detect_eeprom_page_size_generic(hw, offset);
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != IXGBE_SUCCESS)
+ break;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of word(s)
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+ u16 word;
+ u16 page_size;
+ u16 i;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+
+ DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
+
+ /* Prepare the EEPROM for writing */
+ status = ixgbe_acquire_eeprom(hw);
+
+ if (status == IXGBE_SUCCESS) {
+ if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
+ ixgbe_release_eeprom(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ for (i = 0; i < words; i++) {
+ ixgbe_standby_eeprom(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ ixgbe_shift_out_eeprom_bits(hw,
+ IXGBE_EEPROM_WREN_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
+
+ ixgbe_standby_eeprom(hw);
+
+ /*
+ * Some SPI eeproms use the 8th address bit embedded
+ * in the opcode
+ */
+ if ((hw->eeprom.address_bits == 8) &&
+ ((offset + i) >= 128))
+ write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, write_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+ hw->eeprom.address_bits);
+
+ page_size = hw->eeprom.word_page_size;
+
+ /* Send the data in burst via SPI*/
+ do {
+ word = data[i];
+ word = (word >> 8) | (word << 8);
+ ixgbe_shift_out_eeprom_bits(hw, word, 16);
+
+ if (page_size == 0)
+ break;
+
+ /* do not wrap around page */
+ if (((offset + i) & (page_size - 1)) ==
+ (page_size - 1))
+ break;
+ } while (++i < words);
+
+ ixgbe_standby_eeprom(hw);
+ msec_delay(10);
+ }
+ /* Done with writing - release the EEPROM */
+ ixgbe_release_eeprom(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_write_eeprom_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit words(s) from EEPROM
+ * @words: number of word(s)
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != IXGBE_SUCCESS)
+ break;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @words: number of word(s)
+ * @data: read 16 bit word(s) from EEPROM
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+ u16 word_in;
+ u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
+
+ /* Prepare the EEPROM for reading */
+ status = ixgbe_acquire_eeprom(hw);
+
+ if (status == IXGBE_SUCCESS) {
+ if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
+ ixgbe_release_eeprom(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ for (i = 0; i < words; i++) {
+ ixgbe_standby_eeprom(hw);
+ /*
+ * Some SPI eeproms use the 8th address bit embedded
+ * in the opcode
+ */
+ if ((hw->eeprom.address_bits == 8) &&
+ ((offset + i) >= 128))
+ read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, read_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+ hw->eeprom.address_bits);
+
+ /* Read the data. */
+ word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+
+ /* End this read operation */
+ ixgbe_release_eeprom(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of word(s)
+ * @data: 16 bit word(s) from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ u32 eerd;
+ s32 status = IXGBE_SUCCESS;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
+ goto out;
+ }
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ IXGBE_EEPROM_RW_REG_START;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
+
+ if (status == IXGBE_SUCCESS) {
+ data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
+ IXGBE_EEPROM_RW_REG_DATA);
+ } else {
+ DEBUGOUT("Eeprom read timed out\n");
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be used as a scratch pad
+ *
+ * Discover EEPROM page size by writing marching data at given offset.
+ * This function is called only when we are writing a new large buffer
+ * at given offset so the data would be overwritten anyway.
+ **/
+STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset)
+{
+ u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
+ s32 status = IXGBE_SUCCESS;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
+
+ for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
+ data[i] = i;
+
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
+ IXGBE_EEPROM_PAGE_SIZE_MAX, data);
+ hw->eeprom.word_page_size = 0;
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ /*
+ * When writing in burst more than the actual page size
+ * EEPROM address wraps around current page.
+ */
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
+
+ DEBUGOUT1("Detected EEPROM page size = %d words.",
+ hw->eeprom.word_page_size);
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_generic - Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
+}
+
+/**
+ * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of word(s)
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ u32 eewr;
+ s32 status = IXGBE_SUCCESS;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_write_eewr_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
+ goto out;
+ }
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
+ IXGBE_EEPROM_RW_REG_START;
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom write EEWR timed out\n");
+ goto out;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom write EEWR timed out\n");
+ goto out;
+ }
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
+}
+
+/**
+ * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
+ * @hw: pointer to hardware structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
+ * read or write is done respectively.
+ **/
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
+
+ for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
+ if (ee_reg == IXGBE_NVM_POLL_READ)
+ reg = IXGBE_READ_REG(hw, IXGBE_EERD);
+ else
+ reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
+
+ if (reg & IXGBE_EEPROM_RW_REG_DONE) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ if (i == IXGBE_EERD_EEWR_ATTEMPTS)
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "EEPROM read/write done polling timed out");
+
+ return status;
+}
+
+/**
+ * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ *
+ * Prepares EEPROM for access using bit-bang method. This function should
+ * be called before issuing a command to the EEPROM.
+ **/
+STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 eec;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_acquire_eeprom");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
+ != IXGBE_SUCCESS)
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ if (status == IXGBE_SUCCESS) {
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
+
+ /* Request EEPROM Access */
+ eec |= IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
+
+ for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
+ if (eec & IXGBE_EEC_GNT)
+ break;
+ usec_delay(5);
+ }
+
+ /* Release if grant not acquired */
+ if (!(eec & IXGBE_EEC_GNT)) {
+ eec &= ~IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
+ DEBUGOUT("Could not acquire EEPROM grant\n");
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ status = IXGBE_ERR_EEPROM;
+ }
+
+ /* Setup EEPROM for Read/Write */
+ if (status == IXGBE_SUCCESS) {
+ /* Clear CS and SK */
+ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+ }
+ }
+ return status;
+}
+
+/**
+ * ixgbe_get_eeprom_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
+ **/
+STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ DEBUGFUNC("ixgbe_get_eeprom_semaphore");
+
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
+ "not granted.\n");
+ /*
+ * this release is particularly important because our attempts
+ * above to get the semaphore may have succeeded, and if there
+ * was a timeout, we should unconditionally clear the semaphore
+ * bits to free the driver to make progress
+ */
+ ixgbe_release_eeprom_semaphore(hw);
+
+ usec_delay(50);
+ /*
+ * one last try
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
+ if (!(swsm & IXGBE_SWSM_SMBI))
+ status = IXGBE_SUCCESS;
+ }
+
+ /* Now get the semaphore between SW/FW through the SWESMBI bit */
+ if (status == IXGBE_SUCCESS) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
+
+ /* Set the SW EEPROM semaphore bit to request access */
+ swsm |= IXGBE_SWSM_SWESMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
+
+ /*
+ * If we set the bit successfully then we got the
+ * semaphore.
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
+ if (swsm & IXGBE_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ /*
+ * Release semaphores and return error if SW EEPROM semaphore
+ * was not granted because we don't have access to the EEPROM
+ */
+ if (i >= timeout) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "SWESMBI Software EEPROM semaphore not granted.\n");
+ ixgbe_release_eeprom_semaphore(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ } else {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_eeprom_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("ixgbe_release_eeprom_semaphore");
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+ /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
+ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ready_eeprom - Polls for EEPROM ready
+ * @hw: pointer to hardware structure
+ **/
+STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 i;
+ u8 spi_stat_reg;
+
+ DEBUGFUNC("ixgbe_ready_eeprom");
+
+ /*
+ * Read "Status Register" repeatedly until the LSB is cleared. The
+ * EEPROM will signal that the command has been completed by clearing
+ * bit 0 of the internal status register. If it's not cleared within
+ * 5 milliseconds, then error out.
+ */
+ for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
+ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
+ spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
+ if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
+ break;
+
+ usec_delay(5);
+ ixgbe_standby_eeprom(hw);
+ };
+
+ /*
+ * On some parts, SPI write time could vary from 0-20mSec on 3.3V
+ * devices (and only 0-5mSec on 5V devices)
+ */
+ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
+ DEBUGOUT("SPI EEPROM Status error\n");
+ status = IXGBE_ERR_EEPROM;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
+ * @hw: pointer to hardware structure
+ **/
+STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
+{
+ u32 eec;
+
+ DEBUGFUNC("ixgbe_standby_eeprom");
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
+
+ /* Toggle CS to flush commands */
+ eec |= IXGBE_EEC_CS;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+ eec &= ~IXGBE_EEC_CS;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+}
+
+/**
+ * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
+ * @hw: pointer to hardware structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ **/
+STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+ u16 count)
+{
+ u32 eec;
+ u32 mask;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
+
+ /*
+ * Mask is used to shift "count" bits of "data" out to the EEPROM
+ * one bit at a time. Determine the starting bit based on count
+ */
+ mask = 0x01 << (count - 1);
+
+ for (i = 0; i < count; i++) {
+ /*
+ * A "1" is shifted out to the EEPROM by setting bit "DI" to a
+ * "1", and then raising and then lowering the clock (the SK
+ * bit controls the clock input to the EEPROM). A "0" is
+ * shifted out to the EEPROM by setting "DI" to "0" and then
+ * raising and then lowering the clock.
+ */
+ if (data & mask)
+ eec |= IXGBE_EEC_DI;
+ else
+ eec &= ~IXGBE_EEC_DI;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
+ IXGBE_WRITE_FLUSH(hw);
+
+ usec_delay(1);
+
+ ixgbe_raise_eeprom_clk(hw, &eec);
+ ixgbe_lower_eeprom_clk(hw, &eec);
+
+ /*
+ * Shift mask to signify next bit of data to shift in to the
+ * EEPROM
+ */
+ mask = mask >> 1;
+ };
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eec &= ~IXGBE_EEC_DI;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to hardware structure
+ * @count: number of bits to shift
+ **/
+STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
+{
+ u32 eec;
+ u32 i;
+ u16 data = 0;
+
+ DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
+
+ /*
+ * In order to read a register from the EEPROM, we need to shift
+ * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
+ * the clock input to the EEPROM (setting the SK bit), and then reading
+ * the value of the "DO" bit. During this "shifting in" process the
+ * "DI" bit should always be clear.
+ */
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
+
+ eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
+
+ for (i = 0; i < count; i++) {
+ data = data << 1;
+ ixgbe_raise_eeprom_clk(hw, &eec);
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
+
+ eec &= ~(IXGBE_EEC_DI);
+ if (eec & IXGBE_EEC_DO)
+ data |= 1;
+
+ ixgbe_lower_eeprom_clk(hw, &eec);
+ }
+
+ return data;
+}
+
+/**
+ * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
+ * @hw: pointer to hardware structure
+ * @eec: EEC register's current value
+ **/
+STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+ DEBUGFUNC("ixgbe_raise_eeprom_clk");
+
+ /*
+ * Raise the clock input to the EEPROM
+ * (setting the SK bit), then delay
+ */
+ *eec = *eec | IXGBE_EEC_SK;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+}
+
+/**
+ * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
+ * @hw: pointer to hardware structure
+ * @eec: EEC's current value
+ **/
+STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+ DEBUGFUNC("ixgbe_lower_eeprom_clk");
+
+ /*
+ * Lower the clock input to the EEPROM (clearing the SK bit), then
+ * delay
+ */
+ *eec = *eec & ~IXGBE_EEC_SK;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+}
+
+/**
+ * ixgbe_release_eeprom - Release EEPROM, release semaphores
+ * @hw: pointer to hardware structure
+ **/
+STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
+{
+ u32 eec;
+
+ DEBUGFUNC("ixgbe_release_eeprom");
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
+
+ eec |= IXGBE_EEC_CS; /* Pull CS high */
+ eec &= ~IXGBE_EEC_SK; /* Lower SCK */
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
+ IXGBE_WRITE_FLUSH(hw);
+
+ usec_delay(1);
+
+ /* Stop requesting EEPROM access */
+ eec &= ~IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ /* Delay before attempt to obtain semaphore again to allow FW access */
+ msec_delay(hw->eeprom.semaphore_delay);
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+ u16 i;
+ u16 j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+
+ DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+ if (hw->eeprom.ops.read(hw, i, &word)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+ checksum += word;
+ }
+
+ /* Include all data from pointers except for the fw pointer */
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ if (hw->eeprom.ops.read(hw, i, &pointer)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+
+ /* If the pointer seems invalid */
+ if (pointer == 0xFFFF || pointer == 0)
+ continue;
+
+ if (hw->eeprom.ops.read(hw, pointer, &length)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+
+ if (length == 0xFFFF || length == 0)
+ continue;
+
+ for (j = pointer + 1; j <= pointer + length; j++) {
+ if (hw->eeprom.ops.read(hw, j, &word)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+ checksum += word;
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return (s32)checksum;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+ u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum;
+
+ DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_mac_addr - Validate MAC address
+ * @mac_addr: pointer to MAC address.
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address.
+ **/
+s32 ixgbe_validate_mac_addr(u8 *mac_addr)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_validate_mac_addr");
+
+ /* Make sure it is not a multicast address */
+ if (IXGBE_IS_MULTICAST(mac_addr)) {
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ /* Not a broadcast address */
+ } else if (IXGBE_IS_BROADCAST(mac_addr)) {
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ /* Reject the zero address */
+ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ }
+ return status;
+}
+
+/**
+ * ixgbe_set_rar_generic - Set Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ u32 rar_low, rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("ixgbe_set_rar_generic");
+
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+ "RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ /* setup VMDq pool selection before this RAR gets enabled */
+ hw->mac.ops.set_vmdq(hw, index, vmdq);
+
+ /*
+ * HW expects these in little endian so we reverse the byte
+ * order from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] |
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) |
+ ((u32)addr[3] << 24));
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+ rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ if (enable_addr != 0)
+ rar_high |= IXGBE_RAH_AV;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clear_rar_generic - Remove Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ *
+ * Clears an ethernet address from a receive address register.
+ **/
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("ixgbe_clear_rar_generic");
+
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+ "RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
+ * @hw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("ixgbe_init_rx_addrs_generic");
+
+ /*
+ * If the current mac address is valid, assume it is a software override
+ * to the permanent address.
+ * Otherwise, use the permanent address from the eeprom.
+ */
+ if (ixgbe_validate_mac_addr(hw->mac.addr) ==
+ IXGBE_ERR_INVALID_MAC_ADDR) {
+ /* Get the MAC address from the RAR0 for later reference */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
+ DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
+ hw->mac.addr[4], hw->mac.addr[5]);
+ } else {
+ /* Setup the receive address. */
+ DEBUGOUT("Overriding MAC Address in RAR[0]\n");
+ DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
+ DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
+ hw->mac.addr[4], hw->mac.addr[5]);
+
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ }
+
+ /* clear VMDq pool/queue selection for RAR 0 */
+ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+
+ hw->addr_ctrl.overflow_promisc = 0;
+
+ hw->addr_ctrl.rar_used_count = 1;
+
+ /* Zero out the other receive addresses. */
+ DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
+ for (i = 1; i < rar_entries; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+ }
+
+ /* Clear the MTA */
+ hw->addr_ctrl.mta_in_use = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ DEBUGOUT(" Clearing MTA\n");
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+
+ ixgbe_init_uta_tables(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_add_uc_addr - Adds a secondary unicast address.
+ * @hw: pointer to hardware structure
+ * @addr: new address
+ * @vmdq: VMDq "set" or "pool" index
+ *
+ * Adds it to unused receive address register or goes into promiscuous mode.
+ **/
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+ u32 rar_entries = hw->mac.num_rar_entries;
+ u32 rar;
+
+ DEBUGFUNC("ixgbe_add_uc_addr");
+
+ DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ /*
+ * Place this address in the RAR if there is room,
+ * else put the controller into promiscuous mode
+ */
+ if (hw->addr_ctrl.rar_used_count < rar_entries) {
+ rar = hw->addr_ctrl.rar_used_count;
+ hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+ DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
+ hw->addr_ctrl.rar_used_count++;
+ } else {
+ hw->addr_ctrl.overflow_promisc++;
+ }
+
+ DEBUGOUT("ixgbe_add_uc_addr Complete\n");
+}
+
+/**
+ * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
+ * @hw: pointer to hardware structure
+ * @addr_list: the list of new addresses
+ * @addr_count: number of addresses
+ * @next: iterator function to walk the address list
+ *
+ * The given list replaces any existing list. Clears the secondary addrs from
+ * receive address registers. Uses unused receive address registers for the
+ * first secondary addresses, and falls back to promiscuous mode as needed.
+ *
+ * Drivers using secondary unicast addresses must set user_set_promisc when
+ * manually putting the device into promiscuous mode.
+ **/
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr next)
+{
+ u8 *addr;
+ u32 i;
+ u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
+ u32 uc_addr_in_use;
+ u32 fctrl;
+ u32 vmdq;
+
+ DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
+
+ /*
+ * Clear accounting of old secondary address list,
+ * don't count RAR[0]
+ */
+ uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
+ hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
+ hw->addr_ctrl.overflow_promisc = 0;
+
+ /* Zero out the other receive addresses */
+ DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
+ for (i = 0; i < uc_addr_in_use; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
+ }
+
+ /* Add the new addresses */
+ for (i = 0; i < addr_count; i++) {
+ DEBUGOUT(" Adding the secondary addresses:\n");
+ addr = next(hw, &addr_list, &vmdq);
+ ixgbe_add_uc_addr(hw, addr, vmdq);
+ }
+
+ if (hw->addr_ctrl.overflow_promisc) {
+ /* enable promisc if not already in overflow or set by user */
+ if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ DEBUGOUT(" Entering address overflow promisc mode\n");
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_UPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ }
+ } else {
+ /* only disable if set by overflow, not by user */
+ if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ DEBUGOUT(" Leaving address overflow promisc mode\n");
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl &= ~IXGBE_FCTRL_UPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ }
+ }
+
+ DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ DEBUGFUNC("ixgbe_mta_vector");
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ DEBUGOUT("MC filter type param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * ixgbe_set_mta - Set bit-vector in multicast table
+ * @hw: pointer to hardware structure
+ * @mc_addr: Multicast address
+ *
+ * Sets the bit-vector in the multicast table.
+ **/
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector;
+ u32 vector_bit;
+ u32 vector_reg;
+
+ DEBUGFUNC("ixgbe_set_mta");
+
+ hw->addr_ctrl.mta_in_use++;
+
+ vector = ixgbe_mta_vector(hw, mc_addr);
+ DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
+
+ /*
+ * The MTA is a register array of 128 32-bit registers. It is treated
+ * like an array of 4096 bits. We want to set bit
+ * BitArray[vector_value]. So we figure out what register the bit is
+ * in, read it, OR in the new bit, then write back the new value. The
+ * register is determined by the upper 7 bits of the vector value and
+ * the bit within that register are determined by the lower 5 bits of
+ * the value.
+ */
+ vector_reg = (vector >> 5) & 0x7F;
+ vector_bit = vector & 0x1F;
+ hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+/**
+ * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
+ * @hw: pointer to hardware structure
+ * @mc_addr_list: the list of new multicast addresses
+ * @mc_addr_count: number of addresses
+ * @next: iterator function to walk the multicast address list
+ * @clear: flag, when set clears the table beforehand
+ *
+ * When the clear flag is set, the given list replaces any existing list.
+ * Hashes the given addresses into the multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr next,
+ bool clear)
+{
+ u32 i;
+ u32 vmdq;
+
+ DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
+
+ /*
+ * Set the new number of MC addresses that we are being requested to
+ * use.
+ */
+ hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+ hw->addr_ctrl.mta_in_use = 0;
+
+ /* Clear mta_shadow */
+ if (clear) {
+ DEBUGOUT(" Clearing MTA\n");
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+ }
+
+ /* Update mta_shadow */
+ for (i = 0; i < mc_addr_count; i++) {
+ DEBUGOUT(" Adding the multicast addresses:\n");
+ ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+ }
+
+ /* Enable mta */
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
+ hw->mac.mta_shadow[i]);
+
+ if (hw->addr_ctrl.mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+
+ DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_mc_generic - Enable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+ DEBUGFUNC("ixgbe_enable_mc_generic");
+
+ if (a->mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
+ hw->mac.mc_filter_type);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_disable_mc_generic - Disable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+ DEBUGFUNC("ixgbe_disable_mc_generic");
+
+ if (a->mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fc_enable_generic - Enable flow control
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to the current settings.
+ **/
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 mflcn_reg, fccfg_reg;
+ u32 reg;
+ u32 fcrtl, fcrth;
+ int i;
+
+ DEBUGFUNC("ixgbe_fc_enable_generic");
+
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ DEBUGOUT("Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
+ /* Negotiate the fc mode to use */
+ hw->mac.ops.fc_autoneg(hw);
+
+ /* Disable any previous flow control settings */
+ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
+
+ fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
+
+ /*
+ * The possible values of fc.current_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.current_mode) {
+ case ixgbe_fc_none:
+ /*
+ * Flow control is disabled by software override or autoneg.
+ * The code below will actually disable it in the HW.
+ */
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ mflcn_reg |= IXGBE_MFLCN_RFCE;
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ mflcn_reg |= IXGBE_MFLCN_RFCE;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
+ break;
+ default:
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+ "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ /* Set 802.3x based flow control settings. */
+ mflcn_reg |= IXGBE_MFLCN_DPF;
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
+
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+ fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+ /*
+ * In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
+ */
+ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
+ }
+
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time * 0x00010001;
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_negotiate_fc - Negotiate flow control
+ * @hw: pointer to hardware structure
+ * @adv_reg: flow control advertised settings
+ * @lp_reg: link partner's flow control settings
+ * @adv_sym: symmetric pause bit in advertisement
+ * @adv_asm: asymmetric pause bit in advertisement
+ * @lp_sym: symmetric pause bit in link partner advertisement
+ * @lp_asm: asymmetric pause bit in link partner advertisement
+ *
+ * Find the intersection between advertised settings and link partner's
+ * advertised settings
+ **/
+s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+{
+ if ((!(adv_reg)) || (!(lp_reg))) {
+ ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
+ "Local or link partner's advertised flow control "
+ "settings are NULL. Local: %x, link partner: %x\n",
+ adv_reg, lp_reg);
+ return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ }
+
+ if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
+ /*
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control=RX PAUSE frames only\n");
+ }
+ } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+ } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according on 1 gig fiber.
+ **/
+STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+{
+ u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ /*
+ * On multispeed fiber at 1g, bail out if
+ * - link is up but AN did not complete, or if
+ * - link is up and AN completed but timed out
+ */
+
+ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+ DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
+ goto out;
+ }
+
+ pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+
+ ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
+ pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE,
+ IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+{
+ u32 links2, anlp1_reg, autoc_reg, links;
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ /*
+ * On backplane, bail out if
+ * - backplane autoneg was not completed, or if
+ * - we are 82599 and link partner is not AN enabled
+ */
+ links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+ DEBUGOUT("Link partner is not AN enabled\n");
+ goto out;
+ }
+ }
+ /*
+ * Read the 10g AN autoc and LP ability registers and resolve
+ * local flow control settings accordingly
+ */
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+
+ ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
+ anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
+ IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+{
+ u16 technology_ability_reg = 0;
+ u16 lp_technology_ability_reg = 0;
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &technology_ability_reg);
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &lp_technology_ability_reg);
+
+ return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
+ (u32)lp_technology_ability_reg,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ * ixgbe_fc_autoneg - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Compares our advertised flow control capabilities to those advertised by
+ * our link partner, and determines the proper flow control mode to use.
+ **/
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ DEBUGFUNC("ixgbe_fc_autoneg");
+
+ /*
+ * AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "Flow control autoneg is disabled");
+ goto out;
+ }
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up) {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
+ goto out;
+ }
+
+ switch (hw->phy.media_type) {
+ /* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber_qsfp:
+ case ixgbe_media_type_fiber:
+ if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+ ret_val = ixgbe_fc_autoneg_fiber(hw);
+ break;
+
+ /* Autoneg flow control on backplane adapters */
+ case ixgbe_media_type_backplane:
+ ret_val = ixgbe_fc_autoneg_backplane(hw);
+ break;
+
+ /* Autoneg flow control on copper adapters */
+ case ixgbe_media_type_copper:
+ if (ixgbe_device_supports_autoneg_fc(hw))
+ ret_val = ixgbe_fc_autoneg_copper(hw);
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ if (ret_val == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/*
+ * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
+ * @hw: pointer to hardware structure
+ *
+ * System-wide timeout range is encoded in PCIe Device Control2 register.
+ *
+ * Add 10% to specified maximum and return the number of times to poll for
+ * completion timeout, in units of 100 microsec. Never return less than
+ * 800 = 80 millisec.
+ */
+STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
+{
+ s16 devctl2;
+ u32 pollcnt;
+
+ devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
+ devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
+
+ switch (devctl2) {
+ case IXGBE_PCIDEVCTRL2_65_130ms:
+ pollcnt = 1300; /* 130 millisec */
+ break;
+ case IXGBE_PCIDEVCTRL2_260_520ms:
+ pollcnt = 5200; /* 520 millisec */
+ break;
+ case IXGBE_PCIDEVCTRL2_1_2s:
+ pollcnt = 20000; /* 2 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_4_8s:
+ pollcnt = 80000; /* 8 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_17_34s:
+ pollcnt = 34000; /* 34 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
+ case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
+ case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
+ case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
+ default:
+ pollcnt = 800; /* 80 millisec minimum */
+ break;
+ }
+
+ /* add 10% to spec maximum */
+ return (pollcnt * 11) / 10;
+}
+
+/**
+ * ixgbe_disable_pcie_master - Disable PCI-express master access
+ * @hw: pointer to hardware structure
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
+ * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
+ * is returned signifying master requests disabled.
+ **/
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 i, poll;
+ u16 value;
+
+ DEBUGFUNC("ixgbe_disable_pcie_master");
+
+ /* Always set this bit to ensure any future transactions are blocked */
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
+
+ /* Exit if master requests are blocked */
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
+ IXGBE_REMOVED(hw->hw_addr))
+ goto out;
+
+ /* Poll for master request bit to clear */
+ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ usec_delay(100);
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto out;
+ }
+
+ /*
+ * Two consecutive resets are required via CTRL.RST per datasheet
+ * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new master requests from
+ * being issued by our device. We then must wait 1usec or more for any
+ * remaining completions from the PCIe bus to trickle in, and then reset
+ * again to clear out any effects they may have had on our device.
+ */
+ DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
+ hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+
+ if (hw->mac.type >= ixgbe_mac_X550)
+ goto out;
+
+ /*
+ * Before proceeding, make sure that the PCIe block does not have
+ * transactions pending.
+ */
+ poll = ixgbe_pcie_timeout_poll(hw);
+ for (i = 0; i < poll; i++) {
+ usec_delay(100);
+ value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (IXGBE_REMOVED(hw->hw_addr))
+ goto out;
+ if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ goto out;
+ }
+
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "PCIe transaction pending bit also did not clear.\n");
+ status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore through the GSSR register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 gssr = 0;
+ u32 swmask = mask;
+ u32 fwmask = mask << 5;
+ u32 timeout = 200;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_acquire_swfw_sync");
+
+ for (i = 0; i < timeout; i++) {
+ /*
+ * SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (ixgbe_get_eeprom_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+ if (!(gssr & (fwmask | swmask))) {
+ gssr |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+ ixgbe_release_eeprom_semaphore(hw);
+ return IXGBE_SUCCESS;
+ } else {
+ /* Resource is currently in use by FW or SW */
+ ixgbe_release_eeprom_semaphore(hw);
+ msec_delay(5);
+ }
+ }
+
+ /* If time expired clear the bits holding the lock and retry */
+ if (gssr & (fwmask | swmask))
+ ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
+
+ msec_delay(5);
+ return IXGBE_ERR_SWFW_SYNC;
+}
+
+/**
+ * ixgbe_release_swfw_sync - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through the GSSR register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 gssr;
+ u32 swmask = mask;
+
+ DEBUGFUNC("ixgbe_release_swfw_sync");
+
+ ixgbe_get_eeprom_semaphore(hw);
+
+ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+ gssr &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+ ixgbe_release_eeprom_semaphore(hw);
+}
+
+/**
+ * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path and waits for the HW to internally empty
+ * the Rx security block
+ **/
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECRX_POLL 4000
+
+ int i;
+ int secrxreg;
+
+ DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
+
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+ if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
+ break;
+ else
+ /* Use interrupt-safe sleep just in case */
+ usec_delay(10);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECRX_POLL)
+ DEBUGOUT("Rx unit being enabled before security "
+ "path fully disabled. Continuing with init.\n");
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @locked: bool to indicate whether the SW/FW lock was taken
+ * @reg_val: Value we read from AUTOC
+ *
+ * The default case requires no protection so just to the register read.
+ */
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+ *locked = false;
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous read.
+ *
+ * The default case requires no protection so just to the register write.
+ */
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+{
+ UNREFERENCED_1PARAMETER(locked);
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
+{
+ u32 secrxreg;
+
+ DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
+ * @hw: pointer to hardware structure
+ * @regval: register value to write to RXCTRL
+ *
+ * Enables the Rx DMA unit
+ **/
+s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
+{
+ DEBUGFUNC("ixgbe_enable_rx_dma_generic");
+
+ if (regval & IXGBE_RXCTRL_RXEN)
+ ixgbe_enable_rx(hw);
+ else
+ ixgbe_disable_rx(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_blink_led_start_generic - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ **/
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+{
+ ixgbe_link_speed speed = 0;
+ bool link_up = 0;
+ u32 autoc_reg = 0;
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = IXGBE_SUCCESS;
+ bool locked = false;
+
+ DEBUGFUNC("ixgbe_blink_led_start_generic");
+
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
+ /*
+ * Link must be up to auto-blink the LEDs;
+ * Force it if link is down.
+ */
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+
+ if (!link_up) {
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ autoc_reg |= IXGBE_AUTOC_FLU;
+
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(10);
+ }
+
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ **/
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 autoc_reg = 0;
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = IXGBE_SUCCESS;
+ bool locked = false;
+
+ DEBUGFUNC("ixgbe_blink_led_stop_generic");
+
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
+
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ autoc_reg &= ~IXGBE_AUTOC_FLU;
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg &= ~IXGBE_LED_BLINK(index);
+ led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_offset: SAN MAC address offset
+ *
+ * This function will read the EEPROM location for the SAN MAC address
+ * pointer, and returns the value at that location. This is used in both
+ * get and set mac_addr routines.
+ **/
+STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
+
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available.
+ */
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
+ san_mac_offset);
+ if (ret_val) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom at offset %d failed",
+ IXGBE_SAN_MAC_ADDR_PTR);
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Reads the SAN MAC address from the EEPROM, if it's available. This is
+ * per-port, so set_lan_id() must be called before reading the addresses.
+ * set_lan_id() is called by identify_sfp(), but this cannot be relied
+ * upon for non-SFP connections, so we must call it here.
+ **/
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
+
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available. If they're not, no point in calling set_lan_id() here.
+ */
+ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
+ goto san_mac_addr_out;
+
+ /* make sure we know which port we need to program */
+ hw->mac.ops.set_lan_id(hw);
+ /* apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ for (i = 0; i < 3; i++) {
+ ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
+ &san_mac_data);
+ if (ret_val) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed",
+ san_mac_offset);
+ goto san_mac_addr_out;
+ }
+ san_mac_addr[i * 2] = (u8)(san_mac_data);
+ san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+ san_mac_offset++;
+ }
+ return IXGBE_SUCCESS;
+
+san_mac_addr_out:
+ /*
+ * No addresses available in this EEPROM. It's not an
+ * error though, so just wipe the local address and return.
+ */
+ for (i = 0; i < 6; i++)
+ san_mac_addr[i] = 0xFF;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Write a SAN MAC address to the EEPROM.
+ **/
+s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ s32 ret_val;
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+
+ DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
+
+ /* Look for SAN mac address pointer. If not defined, return */
+ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
+ return IXGBE_ERR_NO_SAN_ADDR_PTR;
+
+ /* Make sure we know which port we need to write */
+ hw->mac.ops.set_lan_id(hw);
+ /* Apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+
+ for (i = 0; i < 3; i++) {
+ san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
+ san_mac_data |= (u16)(san_mac_addr[i * 2]);
+ hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
+ san_mac_offset++;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
+ * @hw: pointer to hardware structure
+ *
+ * Read PCIe configuration space, and get the MSI-X vector count from
+ * the capabilities table.
+ **/
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+{
+ u16 msix_count = 1;
+ u16 max_msix_count;
+ u16 pcie_offset;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
+ default:
+ return msix_count;
+ }
+
+ DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
+ msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
+ if (IXGBE_REMOVED(hw->hw_addr))
+ msix_count = 0;
+ msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+ /* MSI-X count is zero-based in HW */
+ msix_count++;
+
+ if (msix_count > max_msix_count)
+ msix_count = max_msix_count;
+
+ return msix_count;
+}
+
+/**
+ * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
+ * @hw: pointer to hardware structure
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq pool to assign
+ *
+ * Puts an ethernet address into a receive address register, or
+ * finds the rar that it is aleady in; adds to the pool list
+ **/
+s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+ static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
+ u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
+ u32 rar;
+ u32 rar_low, rar_high;
+ u32 addr_low, addr_high;
+
+ DEBUGFUNC("ixgbe_insert_mac_addr_generic");
+
+ /* swap bytes for HW little endian */
+ addr_low = addr[0] | (addr[1] << 8)
+ | (addr[2] << 16)
+ | (addr[3] << 24);
+ addr_high = addr[4] | (addr[5] << 8);
+
+ /*
+ * Either find the mac_id in rar or find the first empty space.
+ * rar_highwater points to just after the highest currently used
+ * rar in order to shorten the search. It grows when we add a new
+ * rar to the top.
+ */
+ for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+
+ if (((IXGBE_RAH_AV & rar_high) == 0)
+ && first_empty_rar == NO_EMPTY_RAR_FOUND) {
+ first_empty_rar = rar;
+ } else if ((rar_high & 0xFFFF) == addr_high) {
+ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
+ if (rar_low == addr_low)
+ break; /* found it already in the rars */
+ }
+ }
+
+ if (rar < hw->mac.rar_highwater) {
+ /* already there so just add to the pool bits */
+ ixgbe_set_vmdq(hw, rar, vmdq);
+ } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
+ /* stick it into first empty RAR slot we found */
+ rar = first_empty_rar;
+ ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+ } else if (rar == hw->mac.rar_highwater) {
+ /* add it to the top of the list and inc the highwater mark */
+ ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+ hw->mac.rar_highwater++;
+ } else if (rar >= hw->mac.num_rar_entries) {
+ return IXGBE_ERR_INVALID_MAC_ADDR;
+ }
+
+ /*
+ * If we found rar[0], make sure the default pool bit (we use pool 0)
+ * remains cleared to be sure default pool packets will get delivered
+ */
+ if (rar == 0)
+ ixgbe_clear_vmdq(hw, rar, 0);
+
+ return rar;
+}
+
+/**
+ * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to disassociate
+ * @vmdq: VMDq pool index to remove from the rar
+ **/
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar_lo, mpsar_hi;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("ixgbe_clear_vmdq_generic");
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+ "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+
+ if (IXGBE_REMOVED(hw->hw_addr))
+ goto done;
+
+ if (!mpsar_lo && !mpsar_hi)
+ goto done;
+
+ if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+ if (mpsar_lo) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ mpsar_lo = 0;
+ }
+ if (mpsar_hi) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ mpsar_hi = 0;
+ }
+ } else if (vmdq < 32) {
+ mpsar_lo &= ~(1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
+ } else {
+ mpsar_hi &= ~(1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
+ }
+
+ /* was that the last pool using this rar? */
+ if (mpsar_lo == 0 && mpsar_hi == 0 &&
+ rar != 0 && rar != hw->mac.san_mac_rar_index)
+ hw->mac.ops.clear_rar(hw, rar);
+done:
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("ixgbe_set_vmdq_generic");
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+ "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ if (vmdq < 32) {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar |= 1 << vmdq;
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+ } else {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ mpsar |= 1 << (vmdq - 32);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * This function should only be involved in the IOV mode.
+ * In IOV mode, Default pool is next pool after the number of
+ * VFs advertized and not 0.
+ * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
+ *
+ * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+{
+ u32 rar = hw->mac.san_mac_rar_index;
+
+ DEBUGFUNC("ixgbe_set_vmdq_san_mac");
+
+ if (vmdq < 32) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+{
+ int i;
+
+ DEBUGFUNC("ixgbe_init_uta_tables_generic");
+ DEBUGOUT(" Clearing UTA\n");
+
+ for (i = 0; i < 128; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vlvf_bypass: true to find vlanid only, false returns first empty slot if
+ * vlanid not found
+ *
+ *
+ * return the VLVF index where this VLAN id should be placed
+ *
+ **/
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
+{
+ s32 regindex, first_empty_slot;
+ u32 bits;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* if vlvf_bypass is set we don't want to use an empty slot, we
+ * will simply bypass the VLVF if there are no entries present in the
+ * VLVF that contain our VLAN
+ */
+ first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
+
+ /* add VLAN enable bit for comparison */
+ vlan |= IXGBE_VLVF_VIEN;
+
+ /* Search for the vlan id in the VLVF entries. Save off the first empty
+ * slot found along the way.
+ *
+ * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
+ */
+ for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+ if (bits == vlan)
+ return regindex;
+ if (!first_empty_slot && !bits)
+ first_empty_slot = regindex;
+ }
+
+ /* If we are here then we didn't find the VLAN. Return first empty
+ * slot we found during our search, else error.
+ */
+ if (!first_empty_slot)
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
+
+ return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
+}
+
+/**
+ * ixgbe_set_vfta_generic - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
+{
+ u32 regidx, vfta_delta, vfta;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_set_vfta_generic");
+
+ if (vlan > 4095 || vind > 63)
+ return IXGBE_ERR_PARAM;
+
+ /*
+ * this is a 2 part operation - first the VFTA, then the
+ * VLVF and VLVFB if VT Mode is set
+ * We don't write the VFTA until we know the VLVF part succeeded.
+ */
+
+ /* Part 1
+ * The VFTA is a bitstring made up of 128 32-bit registers
+ * that enable the particular VLAN id, much like the MTA:
+ * bits[11-5]: which register
+ * bits[4-0]: which bit in the register
+ */
+ regidx = vlan / 32;
+ vfta_delta = 1 << (vlan % 32);
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
+
+ /*
+ * vfta_delta represents the difference between the current value
+ * of vfta and the value we want in the register. Since the diff
+ * is an XOR mask we can just update the vfta using an XOR
+ */
+ vfta_delta &= vlan_on ? ~vfta : vfta;
+ vfta ^= vfta_delta;
+
+ /* Part 2
+ * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
+ */
+ ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
+ vfta, vlvf_bypass);
+ if (ret_val != IXGBE_SUCCESS) {
+ if (vlvf_bypass)
+ goto vfta_update;
+ return ret_val;
+ }
+
+vfta_update:
+ /* Update VFTA now that we are ready for traffic */
+ if (vfta_delta)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VLVF
+ * @vfta_delta: pointer to the difference between the current value of VFTA
+ * and the desired value
+ * @vfta: the desired value of the VFTA
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
+ *
+ * Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, u32 *vfta_delta, u32 vfta,
+ bool vlvf_bypass)
+{
+ u32 bits;
+ s32 vlvf_index;
+
+ DEBUGFUNC("ixgbe_set_vlvf_generic");
+
+ if (vlan > 4095 || vind > 63)
+ return IXGBE_ERR_PARAM;
+
+ /* If VT Mode is set
+ * Either vlan_on
+ * make sure the vlan is in VLVF
+ * set the vind bit in the matching VLVFB
+ * Or !vlan_on
+ * clear the pool bit and possibly the vind
+ */
+ if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
+ return IXGBE_SUCCESS;
+
+ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
+ if (vlvf_index < 0)
+ return vlvf_index;
+
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
+
+ /* set the pool bit */
+ bits |= 1 << (vind % 32);
+ if (vlan_on)
+ goto vlvf_update;
+
+ /* clear the pool bit */
+ bits ^= 1 << (vind % 32);
+
+ if (!bits &&
+ !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
+ /* Clear VFTA first, then disable VLVF. Otherwise
+ * we run the risk of stray packets leaking into
+ * the PF via the default pool
+ */
+ if (*vfta_delta)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
+
+ /* disable VLVF and clear remaining bit from pool */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
+
+ return IXGBE_SUCCESS;
+ }
+
+ /* If there are still bits set in the VLVFB registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the VFTA entry bit.
+ * If the caller has requested that we clear the VFTA
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the VFTA VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * VFTA bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ *vfta_delta = 0;
+
+vlvf_update:
+ /* record pool change and enable VLAN ID if not already enabled */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+{
+ u32 offset;
+
+ DEBUGFUNC("ixgbe_clear_vfta_generic");
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
+ * @hw: pointer to hardware structure
+ *
+ * Contains the logic to identify if we need to verify link for the
+ * crosstalk fix
+ **/
+static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
+{
+
+ /* Does FW say we need the fix */
+ if (!hw->need_crosstalk_fix)
+ return false;
+
+ /* Only consider SFP+ PHYs i.e. media type fiber */
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_fiber_qsfp:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_check_mac_link_generic - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ u32 links_reg, links_orig;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_check_mac_link_generic");
+
+ /* If Crosstalk fix enabled do the sanity check of making sure
+ * the SFP+ cage is full.
+ */
+ if (ixgbe_need_crosstalk_fix(hw)) {
+ u32 sfp_cage_full;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP2;
+ break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP0;
+ break;
+ default:
+ /* sanity check - No SFP+ devices here */
+ sfp_cage_full = false;
+ break;
+ }
+
+ if (!sfp_cage_full) {
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ return IXGBE_SUCCESS;
+ }
+ }
+
+ /* clear the old state */
+ links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+ if (links_orig != links_reg) {
+ DEBUGOUT2("LINKS changed from %08X to %08X\n",
+ links_orig, links_reg);
+ }
+
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ if (links_reg & IXGBE_LINKS_UP) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msec_delay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ }
+ } else {
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+#ifdef PREBOOT_SUPPORT
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+#else
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+#endif /* PREBOOT_SUPPORT */
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
+ if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
+ goto wwn_prefix_err;
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ if (hw->eeprom.ops.read(hw, offset, &caps))
+ goto wwn_prefix_err;
+ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", offset);
+ }
+
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
+ goto wwn_prefix_err;
+
+wwn_prefix_out:
+ return IXGBE_SUCCESS;
+
+wwn_prefix_err:
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", offset);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
+ * @hw: pointer to hardware structure
+ * @bs: the fcoe boot status
+ *
+ * This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
+{
+ u16 offset, caps, flags;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
+
+ /* clear output first */
+ *bs = ixgbe_fcoe_bootstatus_unavailable;
+
+ /* check if FCOE IBA block is present */
+ offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
+ status = hw->eeprom.ops.read(hw, offset, &caps);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
+ goto out;
+
+ /* check if iSCSI FCOE block is populated */
+ status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if ((offset == 0) || (offset == 0xFFFF))
+ goto out;
+
+ /* read fcoe flags in iSCSI FCOE block */
+ offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
+ status = hw->eeprom.ops.read(hw, offset, &flags);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
+ *bs = ixgbe_fcoe_bootstatus_enabled;
+ else
+ *bs = ixgbe_fcoe_bootstatus_disabled;
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for MAC anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
+ *
+ **/
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8;
+ u32 pfvfspoof;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
+/**
+ * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for VLAN anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ *
+ **/
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
+ u32 pfvfspoof;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
+/**
+ * ixgbe_get_device_caps_generic - Get additional device capabilities
+ * @hw: pointer to hardware structure
+ * @device_caps: the EEPROM word with the extra device capabilities
+ *
+ * This function will read the EEPROM location for the device capabilities,
+ * and return the word through device_caps.
+ **/
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
+{
+ DEBUGFUNC("ixgbe_get_device_caps_generic");
+
+ hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
+{
+ u32 regval;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
+
+ /* Enable relaxed ordering */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+ regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+ }
+
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+}
+
+/**
+ * ixgbe_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ DEBUGFUNC("ixgbe_calculate_checksum");
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * ixgbe_hic_unlocked - Issue command to manageability block unlocked
+ * @hw: pointer to the HW structure
+ * @buffer: command to write and where the return status will be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ *
+ * Communicates with the manageability block. On success return IXGBE_SUCCESS
+ * else returns semaphore error when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ *
+ * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
+ * by the caller.
+ **/
+s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+ u32 timeout)
+{
+ u32 hicr, i, fwsts;
+ u16 dword_len;
+
+ DEBUGFUNC("ixgbe_hic_unlocked");
+
+ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Set bit 9 of FWSTS clearing FW reset indication */
+ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
+ IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
+
+ /* Check that the host interface is enabled. */
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if (!(hicr & IXGBE_HICR_EN)) {
+ DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if (length % sizeof(u32)) {
+ DEBUGOUT("Buffer length failure, not aligned to dword");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ dword_len = length >> 2;
+
+ /* The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < dword_len; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ i, IXGBE_CPU_TO_LE32(buffer[i]));
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
+
+ for (i = 0; i < timeout; i++) {
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if (!(hicr & IXGBE_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check command completion */
+ if ((timeout && i == timeout) ||
+ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
+ ERROR_REPORT1(IXGBE_ERROR_CAUTION,
+ "Command has failed with no status valid.\n");
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_host_interface_command - Issue command to manageability block
+ * @hw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
+ *
+ * Communicates with the manageability block. On success return IXGBE_SUCCESS
+ * else returns semaphore error when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
+ u16 buf_len;
+ s32 status;
+ u32 bi;
+ u32 dword_len;
+
+ DEBUGFUNC("ixgbe_host_interface_command");
+
+ if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Take management host interface semaphore */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+ if (status)
+ return status;
+
+ status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
+ if (status)
+ goto rel_out;
+
+ if (!return_data)
+ goto rel_out;
+
+ /* Calculate length in DWORDs */
+ dword_len = hdr_size >> 2;
+
+ /* first pull in the header so we know the buffer length */
+ for (bi = 0; bi < dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
+ }
+
+ /*
+ * If there is any thing in data position pull it in
+ * Read Flash command requires reading buffer length from
+ * two byes instead of one byte
+ */
+ if (resp->cmd == 0x30) {
+ for (; bi < dword_len + 2; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ bi);
+ IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ }
+ buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3)
+ & 0xF00) | resp->buf_len;
+ hdr_size += (2 << 2);
+ } else {
+ buf_len = resp->buf_len;
+ }
+ if (!buf_len)
+ goto rel_out;
+
+ if (length < buf_len + hdr_size) {
+ DEBUGOUT("Buffer not large enough for reply message.\n");
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
+ }
+
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
+
+ /* Pull in the rest of the buffer (bi is where we left off) */
+ for (; bi <= dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
+ }
+
+rel_out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+
+ return status;
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: unused
+ * @driver_ver: unused
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return IXGBE_SUCCESS
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
+{
+ struct ixgbe_hic_drv_info fw_cmd;
+ int i;
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
+ UNREFERENCED_2PARAMETER(len, driver_ver);
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ fw_cmd.pad = 0;
+ fw_cmd.pad2 = 0;
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(fw_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (ret_val != IXGBE_SUCCESS)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = IXGBE_SUCCESS;
+ else
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy)
+{
+ u32 pbsize = hw->mac.rx_pb_size;
+ int i = 0;
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ /* Reserve headroom */
+ pbsize -= headroom;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ /* Divide remaining packet buffer space amongst the number of packet
+ * buffers requested using supplied strategy.
+ */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
+ * buffer with 5/8 of the packet buffer space.
+ */
+ rxpktsize = (pbsize * 5) / (num_pb * 4);
+ pbsize -= rxpktsize * (num_pb / 2);
+ rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
+ for (; i < (num_pb / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* fall through - configure remaining packet buffers */
+ case PBA_STRATEGY_EQUAL:
+ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+ for (; i < num_pb; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ default:
+ break;
+ }
+
+ /* Only support an equally distributed Tx packet buffer strategy. */
+ txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
+ txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < num_pb; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+ }
+
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < IXGBE_MAX_PB; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+ }
+}
+
+/**
+ * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
+ * @hw: pointer to the hardware structure
+ *
+ * The 82599 and x540 MACs can experience issues if TX work is still pending
+ * when a reset occurs. This function prevents this by flushing the PCIe
+ * buffers on the system.
+ **/
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
+{
+ u32 gcr_ext, hlreg0, i, poll;
+ u16 value;
+
+ /*
+ * If double reset is not requested then all transactions should
+ * already be clear and as such there is no work to do
+ */
+ if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
+ return;
+
+ /*
+ * Set loopback enable to prevent any transmits from being sent
+ * should the link come up. This assumes that the RXCTRL.RXEN bit
+ * has already been cleared.
+ */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+
+ /* Wait for a last completion before clearing buffers */
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(3);
+
+ /*
+ * Before proceeding, make sure that the PCIe block does not have
+ * transactions pending.
+ */
+ poll = ixgbe_pcie_timeout_poll(hw);
+ for (i = 0; i < poll; i++) {
+ usec_delay(100);
+ value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (IXGBE_REMOVED(hw->hw_addr))
+ goto out;
+ if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ goto out;
+ }
+
+out:
+ /* initiate cleaning flow for buffers in the PCIe transaction layer */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
+ gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
+
+ /* Flush all writes and allow 20usec for all transactions to clear */
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(20);
+
+ /* restore previous register values */
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+}
+
+STATIC const u8 ixgbe_emc_temp_data[4] = {
+ IXGBE_EMC_INTERNAL_DATA,
+ IXGBE_EMC_DIODE1_DATA,
+ IXGBE_EMC_DIODE2_DATA,
+ IXGBE_EMC_DIODE3_DATA
+};
+STATIC const u8 ixgbe_emc_therm_limit[4] = {
+ IXGBE_EMC_INTERNAL_THERM_LIMIT,
+ IXGBE_EMC_DIODE1_THERM_LIMIT,
+ IXGBE_EMC_DIODE2_THERM_LIMIT,
+ IXGBE_EMC_DIODE3_THERM_LIMIT
+};
+
+/**
+ * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ *
+ * Returns the thermal sensor data structure
+ **/
+s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 i;
+ struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
+
+ /* Only support thermal sensors attached to 82599 physical port 0 */
+ if ((hw->mac.type != ixgbe_mac_82599EB) ||
+ (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
+ status = IXGBE_NOT_IMPLEMENTED;
+ goto out;
+ }
+
+ status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
+ if (status)
+ goto out;
+
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
+ status = IXGBE_NOT_IMPLEMENTED;
+ goto out;
+ }
+
+ status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
+ if (status)
+ goto out;
+
+ if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
+ != IXGBE_ETS_TYPE_EMC) {
+ status = IXGBE_NOT_IMPLEMENTED;
+ goto out;
+ }
+
+ num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+ if (num_sensors > IXGBE_MAX_SENSORS)
+ num_sensors = IXGBE_MAX_SENSORS;
+
+ for (i = 0; i < num_sensors; i++) {
+ status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
+ &ets_sensor);
+ if (status)
+ goto out;
+
+ sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+ IXGBE_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+ IXGBE_ETS_DATA_LOC_SHIFT);
+
+ if (sensor_location != 0) {
+ status = hw->phy.ops.read_i2c_byte(hw,
+ ixgbe_emc_temp_data[sensor_index],
+ IXGBE_I2C_THERMAL_SENSOR_ADDR,
+ &data->sensor[i].temp);
+ if (status)
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Inits the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 offset;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 low_thresh_delta;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 therm_limit;
+ u8 i;
+ struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
+
+ memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
+
+ /* Only support thermal sensors attached to 82599 physical port 0 */
+ if ((hw->mac.type != ixgbe_mac_82599EB) ||
+ (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+ return IXGBE_NOT_IMPLEMENTED;
+
+ offset = IXGBE_ETS_CFG;
+ if (hw->eeprom.ops.read(hw, offset, &ets_offset))
+ goto eeprom_err;
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+ return IXGBE_NOT_IMPLEMENTED;
+
+ offset = ets_offset;
+ if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
+ goto eeprom_err;
+ if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
+ != IXGBE_ETS_TYPE_EMC)
+ return IXGBE_NOT_IMPLEMENTED;
+
+ low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
+ IXGBE_ETS_LTHRES_DELTA_SHIFT);
+ num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+
+ for (i = 0; i < num_sensors; i++) {
+ offset = ets_offset + 1 + i;
+ if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed",
+ offset);
+ continue;
+ }
+ sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+ IXGBE_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+ IXGBE_ETS_DATA_LOC_SHIFT);
+ therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
+
+ hw->phy.ops.write_i2c_byte(hw,
+ ixgbe_emc_therm_limit[sensor_index],
+ IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
+
+ if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
+ data->sensor[i].location = sensor_location;
+ data->sensor[i].caution_thresh = therm_limit;
+ data->sensor[i].max_op_thresh = therm_limit -
+ low_thresh_delta;
+ }
+ }
+ return status;
+
+eeprom_err:
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", offset);
+ return IXGBE_NOT_IMPLEMENTED;
+}
+
+/**
+ * ixgbe_get_orom_version - Return option ROM from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid option ROM version, nvm_ver->or_valid set to true
+ * else nvm_ver->or_valid is false.
+ **/
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
+
+ nvm_ver->or_valid = false;
+ /* Option Rom may or may not be present. Start with pointer */
+ hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
+
+ /* make sure offset is valid */
+ if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
+
+ /* option rom exists and is valid */
+ if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
+ eeprom_cfg_blkl == NVM_VER_INVALID ||
+ eeprom_cfg_blkh == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->or_valid = true;
+ nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
+ nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
+ (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
+ nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
+}
+
+/**
+ * ixgbe_get_oem_prod_version - Return OEM Product version
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid OEM product version, nvm_ver->oem_valid set to true
+ * else nvm_ver->oem_valid is false.
+ **/
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 rel_num, prod_ver, mod_len, cap, offset;
+
+ nvm_ver->oem_valid = false;
+ hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
+
+ /* Return is offset to OEM Product Version block is invalid */
+ if (offset == 0x0 && offset == NVM_INVALID_PTR)
+ return;
+
+ /* Read product version block */
+ hw->eeprom.ops.read(hw, offset, &mod_len);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
+
+ /* Return if OEM product version block is invalid */
+ if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
+ (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
+
+ /* Return if version is invalid */
+ if ((rel_num | prod_ver) == 0x0 ||
+ rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
+ nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
+ nvm_ver->oem_release = rel_num;
+ nvm_ver->oem_valid = true;
+}
+
+/**
+ * ixgbe_get_etk_id - Return Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * word read errors will return 0xFFFF
+ **/
+void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 etk_id_l, etk_id_h;
+
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
+ etk_id_l = NVM_VER_INVALID;
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
+ etk_id_h = NVM_VER_INVALID;
+
+ /* The word order for the version format is determined by high order
+ * word bit 15.
+ */
+ if ((etk_id_h & NVM_ETK_VALID) == 0) {
+ nvm_ver->etk_id = etk_id_h;
+ nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
+ } else {
+ nvm_ver->etk_id = etk_id_l;
+ nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
+ }
+}
+
+
+/**
+ * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
+ * @hw: pointer to hardware structure
+ * @map: pointer to u8 arr for returning map
+ *
+ * Read the rtrup2tc HW register and resolve its content into map
+ **/
+void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
+{
+ u32 reg, i;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+ for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+ map[i] = IXGBE_RTRUP2TC_UP_MASK &
+ (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
+ return;
+}
+
+void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
+{
+ u32 pfdtxgswc;
+ u32 rxctrl;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+ }
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+}
+
+void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
+{
+ u32 pfdtxgswc;
+ u32 rxctrl;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ if (hw->mac.set_lben) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = false;
+ }
+ }
+}
+
+/**
+ * ixgbe_mng_present - returns true when management capability is present
+ * @hw: pointer to hardware structure
+ */
+bool ixgbe_mng_present(struct ixgbe_hw *hw)
+{
+ u32 fwsm;
+
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return false;
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
+
+ return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
+}
+
+/**
+ * ixgbe_mng_enabled - Is the manageability engine enabled?
+ * @hw: pointer to hardware structure
+ *
+ * Returns true if the manageability engine is enabled.
+ **/
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
+{
+ u32 fwsm, manc, factps;
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
+ if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
+ return false;
+
+ manc = IXGBE_READ_REG(hw, IXGBE_MANC);
+ if (!(manc & IXGBE_MANC_RCV_TCO_EN))
+ return false;
+
+ if (hw->mac.type <= ixgbe_mac_X540) {
+ factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
+ if (factps & IXGBE_FACTPS_MNGCG)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the MAC and/or PHY register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ s32 status = IXGBE_SUCCESS;
+ u32 speedcnt = 0;
+ u32 i = 0;
+ bool autoneg, link_up = false;
+
+ DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
+
+ /* Mask off requested but non-supported speeds */
+ status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ speed &= link_speed;
+
+ /* Try each speed one by one, highest priority first. We do this in
+ * software because 10Gb fiber doesn't support speed autonegotiation.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ speedcnt++;
+ highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber:
+ ixgbe_set_rate_select_speed(hw,
+ IXGBE_LINK_SPEED_10GB_FULL);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ DEBUGOUT("Unexpected media type.\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (1G->10G) */
+ msec_delay(40);
+
+ status = ixgbe_setup_mac_link(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /* Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. 82599 uses the same timing for 10g SFI.
+ */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ speedcnt++;
+ if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber:
+ ixgbe_set_rate_select_speed(hw,
+ IXGBE_LINK_SPEED_1GB_FULL);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects link speed */
+ break;
+ default:
+ DEBUGOUT("Unexpected media type.\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (10G->1G) */
+ msec_delay(40);
+
+ status = ixgbe_setup_mac_link(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Configure back to the highest speed we tried,
+ * (if there was more than one). We call ourselves back with just the
+ * single highest speed that the user requested.
+ */
+ if (speedcnt > 1)
+ status = ixgbe_setup_mac_link_multispeed_fiber(hw,
+ highest_link_speed,
+ autoneg_wait_to_complete);
+
+out:
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ return status;
+}
+
+/**
+ * ixgbe_set_soft_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * Set module link speed via the soft rate select.
+ */
+void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u8 rs, eeprom_data;
+
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ /* one bit mask same as setting on */
+ rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+ break;
+ default:
+ DEBUGOUT("Invalid fixed module speed\n");
+ return;
+ }
+
+ /* Set RS0 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to read Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to write Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ /* Set RS1 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to read Rx Rate Select RS1\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to write Rx Rate Select RS1\n");
+ goto out;
+ }
+out:
+ return;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.h
new file mode 100644
index 00000000..fd35dcc4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.h
@@ -0,0 +1,199 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_COMMON_H_
+#define _IXGBE_COMMON_H_
+
+#include "ixgbe_type.h"
+#define IXGBE_WRITE_REG64(hw, reg, value) \
+ do { \
+ IXGBE_WRITE_REG(hw, reg, (u32) value); \
+ IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
+ } while (0)
+#define IXGBE_REMOVED(a) (0)
+struct ixgbe_pba {
+ u16 word[2];
+ u16 *pba_block;
+};
+
+void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map);
+
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct ixgbe_pba *pba);
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct ixgbe_pba *pba);
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size);
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status);
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data);
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+ u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count,
+ ixgbe_mc_addr_itr func, bool clear);
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_validate_mac_addr(u8 *mac_addr);
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on, bool vlvf_bypass);
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, u32 *vfta_delta, u32 vfta,
+ bool vlvf_bypass);
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass);
+
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix);
+
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy);
+void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 ver, u16 len, const char *str);
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length, u32 timeout, bool return_data);
+s32 ixgbe_hic_unlocked(struct ixgbe_hw *, u32 *buffer, u32 length, u32 timeout);
+s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *);
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *, u16 activity,
+ u32 (*data)[FW_PHY_ACT_DATA_COUNT]);
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+
+extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
+bool ixgbe_mng_present(struct ixgbe_hw *hw);
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
+
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
+#define IXGBE_EMC_INTERNAL_DATA 0x00
+#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
+#define IXGBE_EMC_DIODE1_DATA 0x01
+#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
+#define IXGBE_EMC_DIODE2_DATA 0x23
+#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
+#define IXGBE_EMC_DIODE3_DATA 0x2A
+#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
+
+s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+
+void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
+void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed);
+#endif /* IXGBE_COMMON */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.c
new file mode 100644
index 00000000..2877f22b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.c
@@ -0,0 +1,733 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82598.h"
+#include "ixgbe_dcb_82599.h"
+
+/**
+ * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programmable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ * @bw: bandwidth index by traffic class
+ * @refill: refill credits index by traffic class
+ * @max: max credits by traffic class
+ * @max_frame_size: maximum frame size
+ */
+s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max,
+ int max_frame_size)
+{
+ int min_percent = 100;
+ int min_credit, multiplier;
+ int i;
+
+ min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
+ IXGBE_DCB_CREDIT_QUANTUM;
+
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if (bw[i] < min_percent && bw[i])
+ min_percent = bw[i];
+ }
+
+ multiplier = (min_credit / min_percent) + 1;
+
+ /* Find out the hw credits for each TC */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL);
+
+ if (val < min_credit)
+ val = min_credit;
+ refill[i] = (u16)val;
+
+ max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
+ * @hw: pointer to hardware structure
+ * @dcb_config: Struct containing DCB settings
+ * @max_frame_size: Maximum frame size
+ * @direction: Configuring either Tx or Rx
+ *
+ * This function calculates the credits allocated to each traffic class.
+ * It should be called only after the rules are checked by
+ * ixgbe_dcb_check_config_cee().
+ */
+s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config,
+ u32 max_frame_size, u8 direction)
+{
+ struct ixgbe_dcb_tc_path *p;
+ u32 min_multiplier = 0;
+ u16 min_percent = 100;
+ s32 ret_val = IXGBE_SUCCESS;
+ /* Initialization values default for Tx settings */
+ u32 min_credit = 0;
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u16 link_percentage = 0;
+ u8 bw_percent = 0;
+ u8 i;
+
+ if (dcb_config == NULL) {
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ }
+
+ min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
+ IXGBE_DCB_CREDIT_QUANTUM;
+
+ /* Find smallest link percentage */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+ link_percentage = p->bwg_percent;
+
+ link_percentage = (link_percentage * bw_percent) / 100;
+
+ if (link_percentage && link_percentage < min_percent)
+ min_percent = link_percentage;
+ }
+
+ /*
+ * The ratio between traffic classes will control the bandwidth
+ * percentages seen on the wire. To calculate this ratio we use
+ * a multiplier. It is required that the refill credits must be
+ * larger than the max frame size so here we find the smallest
+ * multiplier that will allow all bandwidth percentages to be
+ * greater than the max frame size.
+ */
+ min_multiplier = (min_credit / min_percent) + 1;
+
+ /* Find out the link percentage for each TC first */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+
+ link_percentage = p->bwg_percent;
+ /* Must be careful of integer division for very small nums */
+ link_percentage = (link_percentage * bw_percent) / 100;
+ if (p->bwg_percent > 0 && link_percentage == 0)
+ link_percentage = 1;
+
+ /* Save link_percentage for reference */
+ p->link_percent = (u8)link_percentage;
+
+ /* Calculate credit refill ratio using multiplier */
+ credit_refill = min(link_percentage * min_multiplier,
+ (u32)IXGBE_DCB_MAX_CREDIT_REFILL);
+
+ /* Refill at least minimum credit */
+ if (credit_refill < min_credit)
+ credit_refill = min_credit;
+
+ p->data_credits_refill = (u16)credit_refill;
+
+ /* Calculate maximum credit for the TC */
+ credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100;
+
+ /*
+ * Adjustment based on rule checking, if the percentage
+ * of a TC is too small, the maximum credit may not be
+ * enough to send out a jumbo frame in data plane arbitration.
+ */
+ if (credit_max < min_credit)
+ credit_max = min_credit;
+
+ if (direction == IXGBE_DCB_TX_CONFIG) {
+ /*
+ * Adjustment based on rule checking, if the
+ * percentage of a TC is too small, the maximum
+ * credit may not be enough to send out a TSO
+ * packet in descriptor plane arbitration.
+ */
+ if (credit_max && (credit_max <
+ IXGBE_DCB_MIN_TSO_CREDIT)
+ && (hw->mac.type == ixgbe_mac_82598EB))
+ credit_max = IXGBE_DCB_MIN_TSO_CREDIT;
+
+ dcb_config->tc_config[i].desc_credits_max =
+ (u16)credit_max;
+ }
+
+ p->data_credits_max = (u16)credit_max;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info
+ * @cfg: dcb configuration to unpack into hardware consumable fields
+ * @map: user priority to traffic class map
+ * @pfc_up: u8 to store user priority PFC bitmask
+ *
+ * This unpacks the dcb configuration PFC info which is stored per
+ * traffic class into a 8bit user priority bitmask that can be
+ * consumed by hardware routines. The priority to tc map must be
+ * updated before calling this routine to use current up-to maps.
+ */
+void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up)
+{
+ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int up;
+
+ /*
+ * If the TC for this user priority has PFC enabled then set the
+ * matching bit in 'pfc_up' to reflect that PFC is enabled.
+ */
+ for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) {
+ if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled)
+ *pfc_up |= 1 << up;
+ }
+}
+
+void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction,
+ u16 *refill)
+{
+ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+ refill[tc] = tc_config[tc].path[direction].data_credits_refill;
+}
+
+void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max)
+{
+ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+ max[tc] = tc_config[tc].desc_credits_max;
+}
+
+void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *bwgid)
+{
+ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+ bwgid[tc] = tc_config[tc].path[direction].bwg_id;
+}
+
+void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *tsa)
+{
+ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+ tsa[tc] = tc_config[tc].path[direction].tsa;
+}
+
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
+{
+ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ u8 prio_mask = 1 << up;
+ u8 tc = cfg->num_tcs.pg_tcs;
+
+ /* If tc is 0 then DCB is likely not enabled or supported */
+ if (!tc)
+ goto out;
+
+ /*
+ * Test from maximum TC to 1 and report the first match we find. If
+ * we find no match we can assume that the TC is 0 since the TC must
+ * be set for all user priorities
+ */
+ for (tc--; tc; tc--) {
+ if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
+ break;
+ }
+out:
+ return tc;
+}
+
+void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *map)
+{
+ u8 up;
+
+ for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++)
+ map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
+}
+
+/**
+ * ixgbe_dcb_config - Struct containing DCB settings.
+ * @dcb_config: Pointer to DCB config structure
+ *
+ * This function checks DCB rules for DCB settings.
+ * The following rules are checked:
+ * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
+ * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
+ * Group must total 100.
+ * 3. A Traffic Class should not be set to both Link Strict Priority
+ * and Group Strict Priority.
+ * 4. Link strict Bandwidth Groups can only have link strict traffic classes
+ * with zero bandwidth.
+ */
+s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config)
+{
+ struct ixgbe_dcb_tc_path *p;
+ s32 ret_val = IXGBE_SUCCESS;
+ u8 i, j, bw = 0, bw_id;
+ u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP];
+ bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP];
+
+ memset(bw_sum, 0, sizeof(bw_sum));
+ memset(link_strict, 0, sizeof(link_strict));
+
+ /* First Tx, then Rx */
+ for (i = 0; i < 2; i++) {
+ /* Check each traffic class for rule violation */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ p = &dcb_config->tc_config[j].path[i];
+
+ bw = p->bwg_percent;
+ bw_id = p->bwg_id;
+
+ if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) {
+ ret_val = IXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ if (p->tsa == ixgbe_dcb_tsa_strict) {
+ link_strict[i][bw_id] = true;
+ /* Link strict should have zero bandwidth */
+ if (bw) {
+ ret_val = IXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ } else if (!bw) {
+ /*
+ * Traffic classes without link strict
+ * should have non-zero bandwidth.
+ */
+ ret_val = IXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ bw_sum[i][bw_id] += bw;
+ }
+
+ bw = 0;
+
+ /* Check each bandwidth group for rule violation */
+ for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) {
+ bw += dcb_config->bw_percentage[i][j];
+ /*
+ * Sum of bandwidth percentages of all traffic classes
+ * within a Bandwidth Group must total 100 except for
+ * link strict group (zero bandwidth).
+ */
+ if (link_strict[i][j]) {
+ if (bw_sum[i][j]) {
+ /*
+ * Link strict group should have zero
+ * bandwidth.
+ */
+ ret_val = IXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ } else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT &&
+ bw_sum[i][j] != 0) {
+ ret_val = IXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ }
+
+ if (bw != IXGBE_DCB_BW_PERCENT) {
+ ret_val = IXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ }
+
+err_config:
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Rx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
+ u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
+ u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+ u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
+ u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
+
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid,
+ tsa, map);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+ bwgid, tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+ bwgid, tsa);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+ u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+ bwgid, tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+ bwgid, tsa,
+ map);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_cee - Config priority flow control
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Priority Flow Control for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ u8 pfc_en;
+ u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+ ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_tc_stats_82598(hw);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_hw_config_cee - Config and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ u8 pfc_en;
+ u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+ u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+ /* Unpack CEE standard containers */
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed,
+ refill, max, bwgid, tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ ixgbe_dcb_config_82599(hw, dcb_config);
+ ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed,
+ refill, max, bwgid,
+ tsa, map);
+
+ ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
+ break;
+ default:
+ break;
+ }
+
+ if (!ret && dcb_config->pfc_mode_enable) {
+ ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+ ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
+ }
+
+ return ret;
+}
+
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+ int ret = IXGBE_ERR_PARAM;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+ u8 *bwg_id, u8 *tsa, u8 *map)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
+ tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
+ tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
+ tsa, map);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
+ tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
+ tsa, map);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.h
new file mode 100644
index 00000000..41208049
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.h
@@ -0,0 +1,174 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_DCB_H_
+#define _IXGBE_DCB_H_
+
+#include "ixgbe_type.h"
+
+/* DCB defines */
+/* DCB credit calculation defines */
+#define IXGBE_DCB_CREDIT_QUANTUM 64
+#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */
+#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/
+#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL)
+
+/* 513 for 32KB TSO packet */
+#define IXGBE_DCB_MIN_TSO_CREDIT \
+ ((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1)
+
+/* DCB configuration defines */
+#define IXGBE_DCB_MAX_USER_PRIORITY 8
+#define IXGBE_DCB_MAX_BW_GROUP 8
+#define IXGBE_DCB_BW_PERCENT 100
+
+#define IXGBE_DCB_TX_CONFIG 0
+#define IXGBE_DCB_RX_CONFIG 1
+
+/* DCB capability defines */
+#define IXGBE_DCB_PG_SUPPORT 0x00000001
+#define IXGBE_DCB_PFC_SUPPORT 0x00000002
+#define IXGBE_DCB_BCN_SUPPORT 0x00000004
+#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008
+#define IXGBE_DCB_GSP_SUPPORT 0x00000010
+
+struct ixgbe_dcb_support {
+ u32 capabilities; /* DCB capabilities */
+
+ /* Each bit represents a number of TCs configurable in the hw.
+ * If 8 traffic classes can be configured, the value is 0x80. */
+ u8 traffic_classes;
+ u8 pfc_traffic_classes;
+};
+
+enum ixgbe_dcb_tsa {
+ ixgbe_dcb_tsa_ets = 0,
+ ixgbe_dcb_tsa_group_strict_cee,
+ ixgbe_dcb_tsa_strict
+};
+
+/* Traffic class bandwidth allocation per direction */
+struct ixgbe_dcb_tc_path {
+ u8 bwg_id; /* Bandwidth Group (BWG) ID */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 link_percent; /* % of link bandwidth */
+ u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
+ u16 data_credits_refill; /* Credit refill amount in 64B granularity */
+ u16 data_credits_max; /* Max credits for a configured packet buffer
+ * in 64B granularity.*/
+ enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
+};
+
+enum ixgbe_dcb_pfc {
+ ixgbe_dcb_pfc_disabled = 0,
+ ixgbe_dcb_pfc_enabled,
+ ixgbe_dcb_pfc_enabled_txonly,
+ ixgbe_dcb_pfc_enabled_rxonly
+};
+
+/* Traffic class configuration */
+struct ixgbe_dcb_tc_config {
+ struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
+ enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */
+
+ u16 desc_credits_max; /* For Tx Descriptor arbitration */
+ u8 tc; /* Traffic class (TC) */
+};
+
+enum ixgbe_dcb_pba {
+ /* PBA[0-7] each use 64KB FIFO */
+ ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
+ /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
+ ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
+};
+
+struct ixgbe_dcb_num_tcs {
+ u8 pg_tcs;
+ u8 pfc_tcs;
+};
+
+struct ixgbe_dcb_config {
+ struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ struct ixgbe_dcb_support support;
+ struct ixgbe_dcb_num_tcs num_tcs;
+ u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */
+ bool pfc_mode_enable;
+ bool round_robin_enable;
+
+ enum ixgbe_dcb_pba rx_pba_cfg;
+
+ u32 dcb_cfg_version; /* Not used...OS-specific? */
+ u32 link_speed; /* For bandwidth allocation validation purpose */
+ bool vt_mode;
+};
+
+/* DCB driver APIs */
+
+/* DCB rule checking */
+s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *);
+
+/* DCB credits calculation */
+s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int);
+s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *, u32, u8);
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *);
+s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
+s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+
+/* DCB unpack routines */
+void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *);
+void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *);
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
+
+/* DCB initialization */
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *);
+s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+#endif /* _IXGBE_DCB_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c
new file mode 100644
index 00000000..3ed8337b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c
@@ -0,0 +1,372 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82598.h"
+
+/**
+ * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
+ struct ixgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ int tc;
+
+ DEBUGFUNC("dcb_get_tc_stats");
+
+ if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+ return IXGBE_ERR_PARAM;
+
+ /* Statistics pertaining to each traffic class */
+ for (tc = 0; tc < tc_count; tc++) {
+ /* Transmitted Packets */
+ stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
+ /* Transmitted Bytes */
+ stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
+ /* Received Packets */
+ stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
+ /* Received Bytes */
+ stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
+
+#if 0
+ /* Can we get rid of these?? Consequently, getting rid
+ * of the tc_stats structure.
+ */
+ tc_stats_array[up]->in_overflow_discards = 0;
+ tc_stats_array[up]->out_overflow_discards = 0;
+#endif
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
+ struct ixgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ int tc;
+
+ DEBUGFUNC("dcb_get_pfc_stats");
+
+ if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+ return IXGBE_ERR_PARAM;
+
+ for (tc = 0; tc < tc_count; tc++) {
+ /* Priority XOFF Transmitted */
+ stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
+ /* Priority XOFF Received */
+ stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ *
+ * Configure Rx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *tsa)
+{
+ u32 reg = 0;
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u8 i = 0;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
+ IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ /* Enable Arbiter */
+ reg &= ~IXGBE_RMCS_ARBDIS;
+ /* Enable Receive Recycle within the BWG */
+ reg |= IXGBE_RMCS_RRM;
+ /* Enable Deficit Fixed Priority arbitration*/
+ reg |= IXGBE_RMCS_DFP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ credit_refill = refill[i];
+ credit_max = max[i];
+
+ reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
+
+ if (tsa[i] == ixgbe_dcb_tsa_strict)
+ reg |= IXGBE_RT2CR_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
+ }
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ reg |= IXGBE_RDRXCTL_RDMTS_1_2;
+ reg |= IXGBE_RDRXCTL_MPBEN;
+ reg |= IXGBE_RDRXCTL_MCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ /* Make sure there is enough descriptors before arbitration */
+ reg &= ~IXGBE_RXCTRL_DMBYPS;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill, u16 *max, u8 *bwg_id,
+ u8 *tsa)
+{
+ u32 reg, max_credits;
+ u8 i;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+
+ /* Enable arbiter */
+ reg &= ~IXGBE_DPMCS_ARBDIS;
+ reg |= IXGBE_DPMCS_TSOEF;
+
+ /* Configure Max TSO packet size 34KB including payload and headers */
+ reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
+
+ IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ max_credits = max[i];
+ reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
+
+ if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+ reg |= IXGBE_TDTQ2TCCR_GSP;
+
+ if (tsa[i] == ixgbe_dcb_tsa_strict)
+ reg |= IXGBE_TDTQ2TCCR_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ *
+ * Configure Tx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill, u16 *max, u8 *bwg_id,
+ u8 *tsa)
+{
+ u32 reg;
+ u8 i;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
+ /* Enable Data Plane Arbiter */
+ reg &= ~IXGBE_PDPMCS_ARBDIS;
+ /* Enable DFP and Transmit Recycle Mode */
+ reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
+
+ IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ reg = refill[i];
+ reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
+
+ if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+ reg |= IXGBE_TDPT2TCCR_GSP;
+
+ if (tsa[i] == ixgbe_dcb_tsa_strict)
+ reg |= IXGBE_TDPT2TCCR_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
+ }
+
+ /* Enable Tx packet buffer division */
+ reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
+ reg |= IXGBE_DTXCTL_ENDBUBD;
+ IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_82598 - Config priority flow control
+ * @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
+ *
+ * Configure Priority Flow Control for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
+{
+ u32 fcrtl, reg;
+ u8 i;
+
+ /* Enable Transmit Priority Flow Control */
+ reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ reg &= ~IXGBE_RMCS_TFCE_802_3X;
+ reg |= IXGBE_RMCS_TFCE_PRIORITY;
+ IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+
+ /* Enable Receive Priority Flow Control */
+ reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
+
+ if (pfc_en)
+ reg |= IXGBE_FCTRL_RPFCE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
+
+ /* Configure PFC Tx thresholds per TC */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if (!(pfc_en & (1 << i))) {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+ continue;
+ }
+
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
+ }
+
+ /* Configure pause time */
+ reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+{
+ u32 reg = 0;
+ u8 i = 0;
+ u8 j = 0;
+
+ /* Receive Queues stats setting - 8 queues per statistics reg */
+ for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
+ reg |= ((0x1010101) * j);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
+ reg |= ((0x1010101) * j);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
+ }
+ /* Transmit Queues stats setting - 4 queues per statistics reg*/
+ for (i = 0; i < 8; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
+ reg |= ((0x1010101) * i);
+ IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_hw_config_82598 - Config and enable DCB
+ * @hw: pointer to hardware structure
+ * @link_speed: unused
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed,
+ u16 *refill, u16 *max, u8 *bwg_id,
+ u8 *tsa)
+{
+ UNREFERENCED_1PARAMETER(link_speed);
+
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
+ tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
+ tsa);
+ ixgbe_dcb_config_tc_stats_82598(hw);
+
+
+ return IXGBE_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.h
new file mode 100644
index 00000000..eb88b3d3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.h
@@ -0,0 +1,99 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_DCB_82598_H_
+#define _IXGBE_DCB_82598_H_
+
+/* DCB register definitions */
+
+#define IXGBE_DPMCS_MTSOS_SHIFT 16
+#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin,
+ * 1 DFP - Deficit Fixed Priority */
+#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */
+#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */
+#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
+
+#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */
+
+#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
+#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */
+
+#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
+ * buffers enable */
+#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
+ * (RSS) enable */
+
+#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12
+#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9
+#define IXGBE_TDTQ2TCCR_GSP 0x40000000
+#define IXGBE_TDTQ2TCCR_LSP 0x80000000
+
+#define IXGBE_TDPT2TCCR_MCL_SHIFT 12
+#define IXGBE_TDPT2TCCR_BWG_SHIFT 9
+#define IXGBE_TDPT2TCCR_GSP 0x40000000
+#define IXGBE_TDPT2TCCR_LSP 0x80000000
+
+#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin,
+ * 1 DFP - Deficit Fixed Priority */
+#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */
+#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */
+
+#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */
+
+#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
+
+/* DCB driver APIs */
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
+s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *,
+ struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *,
+ struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
+ u8 *, u8 *);
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
+ u8 *, u8 *);
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *);
+
+/* DCB initialization */
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *);
+#endif /* _IXGBE_DCB_82958_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
new file mode 100644
index 00000000..8f9e1590
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
@@ -0,0 +1,610 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82599.h"
+
+/**
+ * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
+ struct ixgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ int tc;
+
+ DEBUGFUNC("dcb_get_tc_stats");
+
+ if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+ return IXGBE_ERR_PARAM;
+
+ /* Statistics pertaining to each traffic class */
+ for (tc = 0; tc < tc_count; tc++) {
+ /* Transmitted Packets */
+ stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
+ /* Transmitted Bytes (read low first to prevent missed carry) */
+ stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
+ stats->qbtc[tc] +=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
+ /* Received Packets */
+ stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
+ /* Received Bytes (read low first to prevent missed carry) */
+ stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
+ stats->qbrc[tc] +=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
+
+ /* Received Dropped Packet */
+ stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
+ struct ixgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ int tc;
+
+ DEBUGFUNC("dcb_get_pfc_stats");
+
+ if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+ return IXGBE_ERR_PARAM;
+
+ for (tc = 0; tc < tc_count; tc++) {
+ /* Priority XOFF Transmitted */
+ stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
+ /* Priority XOFF Received */
+ stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Rx Packet Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa,
+ u8 *map)
+{
+ u32 reg = 0;
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u8 i = 0;
+
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; WSP)
+ */
+ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+ /*
+ * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+ * bits sets for the UPs that needs to be mappped to that TC.
+ * e.g if priorities 6 and 7 are to be mapped to a TC then the
+ * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+ */
+ reg = 0;
+ for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+ reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ credit_refill = refill[i];
+ credit_max = max[i];
+ reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
+
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
+
+ if (tsa[i] == ixgbe_dcb_tsa_strict)
+ reg |= IXGBE_RTRPT4C_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
+ }
+
+ /*
+ * Configure Rx packet plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa)
+{
+ u32 reg, max_credits;
+ u8 i;
+
+ /* Clear the per-Tx queue credits; we use per-TC instead */
+ for (i = 0; i < 128; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
+ }
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ max_credits = max[i];
+ reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
+
+ if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+ reg |= IXGBE_RTTDT2C_GSP;
+
+ if (tsa[i] == ixgbe_dcb_tsa_strict)
+ reg |= IXGBE_RTTDT2C_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
+ }
+
+ /*
+ * Configure Tx descriptor plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Tx Packet Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa,
+ u8 *map)
+{
+ u32 reg;
+ u8 i;
+
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; SP; arb delay)
+ */
+ reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
+ (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
+ IXGBE_RTTPCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
+
+ /*
+ * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+ * bits sets for the UPs that needs to be mappped to that TC.
+ * e.g if priorities 6 and 7 are to be mapped to a TC then the
+ * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+ */
+ reg = 0;
+ for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+ reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ reg = refill[i];
+ reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
+
+ if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+ reg |= IXGBE_RTTPT2C_GSP;
+
+ if (tsa[i] == ixgbe_dcb_tsa_strict)
+ reg |= IXGBE_RTTPT2C_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
+ }
+
+ /*
+ * Configure Tx packet plane (recycle mode; SP; arb delay) and
+ * enable arbiter
+ */
+ reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
+ (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
+ * @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Priority Flow Control (PFC) for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+ u32 i, j, fcrtl, reg;
+ u8 max_tc = 0;
+
+ /* Enable Transmit Priority Flow Control */
+ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
+
+ /* Enable Receive Priority Flow Control */
+ reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ reg |= IXGBE_MFLCN_DPF;
+
+ /*
+ * X540 supports per TC Rx priority flow control. So
+ * clear all TCs and only enable those that should be
+ * enabled.
+ */
+ reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
+
+ if (hw->mac.type >= ixgbe_mac_X540)
+ reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
+
+ if (pfc_en)
+ reg |= IXGBE_MFLCN_RPFCE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+
+ for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
+ if (map[i] > max_tc)
+ max_tc = map[i];
+ }
+
+
+ /* Configure PFC Tx thresholds per TC */
+ for (i = 0; i <= max_tc; i++) {
+ int enabled = 0;
+
+ for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
+ if ((map[j] == i) && (pfc_en & (1 << j))) {
+ enabled = 1;
+ break;
+ }
+ }
+
+ if (enabled) {
+ reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+ } else {
+ /*
+ * In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
+ }
+
+ for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
+ }
+
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ u32 reg = 0;
+ u8 i = 0;
+ u8 tc_count = 8;
+ bool vt_mode = false;
+
+ if (dcb_config != NULL) {
+ tc_count = dcb_config->num_tcs.pg_tcs;
+ vt_mode = dcb_config->vt_mode;
+ }
+
+ if (!((tc_count == 8 && vt_mode == false) || tc_count == 4))
+ return IXGBE_ERR_PARAM;
+
+ if (tc_count == 8 && vt_mode == false) {
+ /*
+ * Receive Queues stats setting
+ * 32 RQSMR registers, each configuring 4 queues.
+ *
+ * Set all 16 queues of each TC to the same stat
+ * with TC 'n' going to stat 'n'.
+ */
+ for (i = 0; i < 32; i++) {
+ reg = 0x01010101 * (i / 4);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+ }
+ /*
+ * Transmit Queues stats setting
+ * 32 TQSM registers, each controlling 4 queues.
+ *
+ * Set all queues of each TC to the same stat
+ * with TC 'n' going to stat 'n'.
+ * Tx queues are allocated non-uniformly to TCs:
+ * 32, 32, 16, 16, 8, 8, 8, 8.
+ */
+ for (i = 0; i < 32; i++) {
+ if (i < 8)
+ reg = 0x00000000;
+ else if (i < 16)
+ reg = 0x01010101;
+ else if (i < 20)
+ reg = 0x02020202;
+ else if (i < 24)
+ reg = 0x03030303;
+ else if (i < 26)
+ reg = 0x04040404;
+ else if (i < 28)
+ reg = 0x05050505;
+ else if (i < 30)
+ reg = 0x06060606;
+ else
+ reg = 0x07070707;
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
+ }
+ } else if (tc_count == 4 && vt_mode == false) {
+ /*
+ * Receive Queues stats setting
+ * 32 RQSMR registers, each configuring 4 queues.
+ *
+ * Set all 16 queues of each TC to the same stat
+ * with TC 'n' going to stat 'n'.
+ */
+ for (i = 0; i < 32; i++) {
+ if (i % 8 > 3)
+ /* In 4 TC mode, odd 16-queue ranges are
+ * not used.
+ */
+ continue;
+ reg = 0x01010101 * (i / 8);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+ }
+ /*
+ * Transmit Queues stats setting
+ * 32 TQSM registers, each controlling 4 queues.
+ *
+ * Set all queues of each TC to the same stat
+ * with TC 'n' going to stat 'n'.
+ * Tx queues are allocated non-uniformly to TCs:
+ * 64, 32, 16, 16.
+ */
+ for (i = 0; i < 32; i++) {
+ if (i < 16)
+ reg = 0x00000000;
+ else if (i < 24)
+ reg = 0x01010101;
+ else if (i < 28)
+ reg = 0x02020202;
+ else
+ reg = 0x03030303;
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
+ }
+ } else if (tc_count == 4 && vt_mode == true) {
+ /*
+ * Receive Queues stats setting
+ * 32 RQSMR registers, each configuring 4 queues.
+ *
+ * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
+ * pool. Set all 32 queues of each TC across pools to the same
+ * stat with TC 'n' going to stat 'n'.
+ */
+ for (i = 0; i < 32; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
+ /*
+ * Transmit Queues stats setting
+ * 32 TQSM registers, each controlling 4 queues.
+ *
+ * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
+ * pool. Set all 32 queues of each TC across pools to the same
+ * stat with TC 'n' going to stat 'n'.
+ */
+ for (i = 0; i < 32; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_82599 - Configure general DCB parameters
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure general DCB parameters.
+ */
+s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ u32 reg;
+ u32 q;
+
+ /* Disable the Tx desc arbiter so that MTQC can be changed */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ /* Enable DCB for Rx with 8 TCs */
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case 0:
+ case IXGBE_MRQC_RT4TCEN:
+ /* RSS disabled cases */
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RT8TCEN;
+ break;
+ case IXGBE_MRQC_RSSEN:
+ case IXGBE_MRQC_RTRSS4TCEN:
+ /* RSS enabled cases */
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RTRSS8TCEN;
+ break;
+ default:
+ /*
+ * Unsupported value, assume stale data,
+ * overwrite no RSS
+ */
+ ASSERT(0);
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RT8TCEN;
+ }
+ }
+ if (dcb_config->num_tcs.pg_tcs == 4) {
+ /* We support both VT-on and VT-off with 4 TCs. */
+ if (dcb_config->vt_mode)
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_VMDQRT4TCEN;
+ else
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RTRSS4TCEN;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+ /* Enable DCB for Tx with 8 TCs */
+ if (dcb_config->num_tcs.pg_tcs == 8)
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ else {
+ /* We support both VT-on and VT-off with 4 TCs. */
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ if (dcb_config->vt_mode)
+ reg |= IXGBE_MTQC_VT_ENA;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+ /* Disable drop for all queues */
+ for (q = 0; q < 128; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+
+ /* Enable the Tx desc arbiter */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ /* Enable Security TX Buffer IFG for DCB */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
+ * @hw: pointer to hardware structure
+ * @link_speed: unused
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
+ u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
+ u8 *map)
+{
+ UNREFERENCED_1PARAMETER(link_speed);
+
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
+ map);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
+ tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
+ tsa, map);
+
+ return IXGBE_SUCCESS;
+}
+
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.h
new file mode 100644
index 00000000..dc0fb284
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.h
@@ -0,0 +1,153 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_DCB_82599_H_
+#define _IXGBE_DCB_82599_H_
+
+/* DCB register definitions */
+#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
+ * 1 WSP - Weighted Strict Priority
+ */
+#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
+ * 1 WRR - Weighted Round Robin
+ */
+#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
+#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
+#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
+ * clear!
+ */
+#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
+
+/* Receive UP2TC mapping */
+#define IXGBE_RTRUP2TC_UP_SHIFT 3
+#define IXGBE_RTRUP2TC_UP_MASK 7
+/* Transmit UP2TC mapping */
+#define IXGBE_RTTUP2TC_UP_SHIFT 3
+
+#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
+#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */
+#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */
+#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
+
+#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
+ * buffers enable
+ */
+#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
+ * (RSS) enable
+ */
+
+/* RTRPCS Bit Masks */
+#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RTRPCS_RAC 0x00000004
+#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */
+
+/* RTTDT2C Bit Masks */
+#define IXGBE_RTTDT2C_MCL_SHIFT 12
+#define IXGBE_RTTDT2C_BWG_SHIFT 9
+#define IXGBE_RTTDT2C_GSP 0x40000000
+#define IXGBE_RTTDT2C_LSP 0x80000000
+
+#define IXGBE_RTTPT2C_MCL_SHIFT 12
+#define IXGBE_RTTPT2C_BWG_SHIFT 9
+#define IXGBE_RTTPT2C_GSP 0x40000000
+#define IXGBE_RTTPT2C_LSP 0x80000000
+
+/* RTTPCS Bit Masks */
+#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
+ * 1 SP - Strict Priority
+ */
+#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
+#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
+#define IXGBE_RTTPCS_ARBD_SHIFT 22
+#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */
+
+#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
+
+/* SECTXMINIFG DCB */
+#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer SEC IFG */
+
+/* BCN register definitions */
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
+#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
+
+#define IXGBE_RTTBCNCR_MNG_CMTGI 0x00000001
+#define IXGBE_RTTBCNCR_MGN_BCNA_MODE 0x00000002
+#define IXGBE_RTTBCNCR_RSV7_11_SHIFT 5
+#define IXGBE_RTTBCNCR_G 0x00000400
+#define IXGBE_RTTBCNCR_I 0x00000800
+#define IXGBE_RTTBCNCR_H 0x00001000
+#define IXGBE_RTTBCNCR_VER_SHIFT 14
+#define IXGBE_RTTBCNCR_CMT_ETH_SHIFT 16
+
+#define IXGBE_RTTBCNACL_SMAC_L_SHIFT 16
+
+#define IXGBE_RTTBCNTG_BCNA_MODE 0x80000000
+
+#define IXGBE_RTTBCNRTT_TS_SHIFT 3
+#define IXGBE_RTTBCNRTT_TXQ_IDX_SHIFT 16
+
+#define IXGBE_RTTBCNRD_BCN_CLEAR_ALL 0x00000002
+#define IXGBE_RTTBCNRD_DRIFT_FAC_SHIFT 2
+#define IXGBE_RTTBCNRD_DRIFT_INT_SHIFT 16
+#define IXGBE_RTTBCNRD_DRIFT_ENA 0x80000000
+
+
+/* DCB driver APIs */
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *,
+ struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *,
+ struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
+ u8 *, u8 *);
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
+ u8 *, u8 *, u8 *);
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *,
+ u8 *, u8 *);
+
+/* DCB initialization */
+s32 ixgbe_dcb_config_82599(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *,
+ u8 *, u8 *);
+#endif /* _IXGBE_DCB_82959_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.c
new file mode 100644
index 00000000..40dad775
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.c
@@ -0,0 +1,257 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_vf.h"
+#include "ixgbe_hv_vf.h"
+
+/**
+ * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @mc_addr_list: unused
+ * @mc_addr_count: unused
+ * @next: unused
+ * @clear: unused
+ */
+static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr next,
+ bool clear)
+{
+ UNREFERENCED_5PARAMETER(hw, mc_addr_list, mc_addr_count, next, clear);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @xcast_mode: unused
+ */
+static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+ UNREFERENCED_2PARAMETER(hw, xcast_mode);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @vlan: unused
+ * @vind: unused
+ * @vlan_on: unused
+ * @vlvf_bypass: unused
+ */
+static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
+{
+ UNREFERENCED_5PARAMETER(hw, vlan, vind, vlan_on, vlvf_bypass);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ UNREFERENCED_3PARAMETER(hw, index, addr);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_PARAMETER(hw);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vlan, u32 vind)
+{
+ UNREFERENCED_5PARAMETER(hw, index, addr, vlan, vind);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant; there is no mailbox communication.
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: unused
+ *
+ */
+static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 links_reg;
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ DELAY(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Reserved for pre-x550 devices */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ * Hyper-V variant.
+ **/
+static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 reg;
+
+ /* If we are on Hyper-V, we implement this functionality
+ * differently.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
+ /* CRC == 4 */
+ reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ * Hyper-V version - only ixgbe_mbox_api_10 supported.
+ **/
+static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
+{
+ UNREFERENCED_1PARAMETER(hw);
+
+ /* Hyper-V only supports api version ixgbe_mbox_api_10 */
+ if (api != ixgbe_mbox_api_10)
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbevf_hv_init_ops_vf - Initialize the pointers for vf
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers, adapter-specific functions can
+ * override the assignment of generic function pointers by assigning
+ * their own adapter-specific function pointers.
+ * Does not touch the hardware.
+ **/
+s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw)
+{
+ /* Set defaults for VF then override applicable Hyper-V
+ * specific functions
+ */
+ ixgbe_init_ops_vf(hw);
+
+ hw->mac.ops.reset_hw = ixgbevf_hv_reset_hw_vf;
+ hw->mac.ops.check_link = ixgbevf_hv_check_mac_link_vf;
+ hw->mac.ops.negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf;
+ hw->mac.ops.set_rar = ixgbevf_hv_set_rar_vf;
+ hw->mac.ops.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf;
+ hw->mac.ops.update_xcast_mode = ixgbevf_hv_update_xcast_mode;
+ hw->mac.ops.set_uc_addr = ixgbevf_hv_set_uc_addr_vf;
+ hw->mac.ops.set_vfta = ixgbevf_hv_set_vfta_vf;
+ hw->mac.ops.set_rlpml = ixgbevf_hv_set_rlpml_vf;
+
+ return IXGBE_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.h
new file mode 100644
index 00000000..9119f29f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.h
@@ -0,0 +1,41 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2016, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_HV_VF_H_
+#define _IXGBE_HV_VF_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_HV_VF_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.c
new file mode 100644
index 00000000..2785bbad
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.c
@@ -0,0 +1,769 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_mbx.h"
+
+/**
+ * ixgbe_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_read_mbx");
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_mbx");
+
+ if (size > mbx->size) {
+ ret_val = IXGBE_ERR_MBX;
+ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+ "Invalid mailbox message size %d", size);
+ } else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_msg");
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_ack");
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_rst");
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("ixgbe_poll_for_msg");
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+ if (countdown == 0)
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Polling for VF%d mailbox message timedout", mbx_id);
+
+out:
+ return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+STATIC s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("ixgbe_poll_for_ack");
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+ if (countdown == 0)
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Polling for VF%d mailbox ack timedout", mbx_id);
+
+out:
+ return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_read_posted_mbx");
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_write_posted_mbx");
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+/**
+ * ixgbe_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+STATIC u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+ u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+ v2p_mailbox |= hw->mbx.v2p_mailbox;
+ hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * ixgbe_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+STATIC s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw);
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = IXGBE_SUCCESS;
+
+ hw->mbx.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_check_for_msg_vf");
+
+ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_check_for_ack_vf");
+
+ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+STATIC s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_check_for_rst_vf");
+
+ if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+ IXGBE_VFMAILBOX_RSTI))) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+STATIC s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+ ret_val = IXGBE_SUCCESS;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ s32 ret_val;
+ u16 i;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+
+ DEBUGFUNC("ixgbe_write_mbx_vf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_vf(hw, 0);
+ ixgbe_check_for_ack_vf(hw, 0);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_mbx_vf");
+ UNREFERENCED_1PARAMETER(mbx_id);
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->ops.read = ixgbe_read_mbx_vf;
+ mbx->ops.write = ixgbe_write_mbx_vf;
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+ mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
+ mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
+ mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
+STATIC s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+{
+ u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = IXGBE_SUCCESS;
+ IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ DEBUGFUNC("ixgbe_check_for_msg_pf");
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ index)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ DEBUGFUNC("ixgbe_check_for_ack_pf");
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ index)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ u32 reg_offset = (vf_number < 32) ? 0 : 1;
+ u32 vf_shift = vf_number % 32;
+ u32 vflre = 0;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_rst_pf");
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+ break;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_X540:
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
+ break;
+ default:
+ break;
+ }
+
+ if (vflre & (1 << vf_shift)) {
+ ret_val = IXGBE_SUCCESS;
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+STATIC s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ u32 p2v_mailbox;
+
+ DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+ if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+ ret_val = IXGBE_SUCCESS;
+ else
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Failed to obtain mailbox lock for VF%d", vf_number);
+
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_write_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_pf(hw, vf_number);
+ ixgbe_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * ixgbe_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+STATIC s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a &&
+ hw->mac.type != ixgbe_mac_X540)
+ return;
+
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->ops.read = ixgbe_read_mbx_pf;
+ mbx->ops.write = ixgbe_write_mbx_pf;
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+ mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
+ mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
+ mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.h
new file mode 100644
index 00000000..bde50a51
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.h
@@ -0,0 +1,165 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for extra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+ ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
+ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
+ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
+ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
+ /* This value should always be last */
+ ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+
+/* mailbox API, version 1.0 VF requests */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+
+/* mailbox API, version 1.2 VF requests */
+#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
+#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+
+/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */
+enum ixgbevf_xcast_modes {
+ IXGBEVF_XCAST_MODE_NONE = 0,
+ IXGBEVF_XCAST_MODE_MULTI,
+ IXGBEVF_XCAST_MODE_ALLMULTI,
+ IXGBEVF_XCAST_MODE_PROMISC,
+};
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+/* mailbox API, version 2.0 VF requests */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */
+#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */
+#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */
+#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */
+#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */
+#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */
+
+/* mailbox API, version 2.0 PF requests */
+#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h
new file mode 100644
index 00000000..bb5dfd2a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -0,0 +1,172 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_OS_H_
+#define _IXGBE_OS_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+
+#include "../ixgbe_logs.h"
+#include "../ixgbe_bypass_defines.h"
+
+#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x")
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000*(x))
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n");
+#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args)
+#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args)
+
+#define ERROR_REPORT1(e, S, args...) DEBUGOUT(S, ##args)
+#define ERROR_REPORT2(e, S, args...) DEBUGOUT(S, ##args)
+#define ERROR_REPORT3(e, S, args...) DEBUGOUT(S, ##args)
+
+#define FALSE 0
+#define TRUE 1
+
+#define false 0
+#define true 1
+#define min(a,b) RTE_MIN(a,b)
+
+#define EWARN(hw, S, args...) DEBUGOUT1(S, ##args)
+
+/* Bunch of defines for shared code bogosity */
+#define UNREFERENCED_PARAMETER(_p)
+#define UNREFERENCED_1PARAMETER(_p)
+#define UNREFERENCED_2PARAMETER(_p, _q)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r)
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t)
+
+/* Shared code error reporting */
+enum {
+ IXGBE_ERROR_SOFTWARE,
+ IXGBE_ERROR_POLLING,
+ IXGBE_ERROR_INVALID_STATE,
+ IXGBE_ERROR_UNSUPPORTED,
+ IXGBE_ERROR_ARGUMENT,
+ IXGBE_ERROR_CAUTION,
+};
+
+#define STATIC static
+#define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i)
+#define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i)
+#define IXGBE_CPU_TO_LE16(_i) rte_cpu_to_le_16(_i)
+#define IXGBE_CPU_TO_LE32(_i) rte_cpu_to_le_32(_i)
+#define IXGBE_LE32_TO_CPU(_i) rte_le_to_cpu_32(_i)
+#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
+#define IXGBE_CPU_TO_BE16(_i) rte_cpu_to_be_16(_i)
+#define IXGBE_CPU_TO_BE32(_i) rte_cpu_to_be_32(_i)
+#define IXGBE_BE32_TO_CPU(_i) rte_be_to_cpu_32(_i)
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+#ifndef __cplusplus
+typedef int bool;
+#endif
+
+#define mb() rte_mb()
+#define wmb() rte_wmb()
+#define rmb() rte_rmb()
+
+#define IOMEM
+
+#define prefetch(x) rte_prefetch0(x)
+
+#define IXGBE_PCI_REG(reg) rte_read32(reg)
+
+static inline uint32_t ixgbe_read_addr(volatile void* addr)
+{
+ return rte_le_to_cpu_32(IXGBE_PCI_REG(addr));
+}
+
+#define IXGBE_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+
+#define IXGBE_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
+
+#define IXGBE_PCI_REG_ADDR(hw, reg) \
+ ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
+
+#define IXGBE_PCI_REG_ARRAY_ADDR(hw, reg, index) \
+ IXGBE_PCI_REG_ADDR((hw), (reg) + ((index) << 2))
+
+/* Not implemented !! */
+#define IXGBE_READ_PCIE_WORD(hw, reg) 0
+#define IXGBE_WRITE_PCIE_WORD(hw, reg, value) do { } while(0)
+
+#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+
+#define IXGBE_READ_REG(hw, reg) \
+ ixgbe_read_addr(IXGBE_PCI_REG_ADDR((hw), (reg)))
+
+#define IXGBE_WRITE_REG(hw, reg, value) \
+ IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ADDR((hw), (reg)), (value))
+
+#define IXGBE_READ_REG_ARRAY(hw, reg, index) \
+ IXGBE_PCI_REG(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)))
+
+#define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \
+ IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
+
+#define IXGBE_WRITE_REG_THEN_POLL_MASK(hw, reg, val, mask, poll_ms) \
+do { \
+ uint32_t cnt = poll_ms; \
+ IXGBE_WRITE_REG(hw, (reg), (val)); \
+ while (((IXGBE_READ_REG(hw, (reg))) & (mask)) && (cnt--)) \
+ rte_delay_ms(1); \
+} while (0)
+
+#endif /* _IXGBE_OS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c
new file mode 100644
index 00000000..2df068ee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -0,0 +1,2713 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw);
+STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
+STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
+STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
+STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
+STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
+STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
+STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
+
+/**
+ * ixgbe_out_i2c_byte_ack - Send I2C byte with ack
+ * @hw: pointer to the hardware structure
+ * @byte: byte to send
+ *
+ * Returns an error code on error.
+ */
+STATIC s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
+{
+ s32 status;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte);
+ if (status)
+ return status;
+ return ixgbe_get_i2c_ack(hw);
+}
+
+/**
+ * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack
+ * @hw: pointer to the hardware structure
+ * @byte: pointer to a u8 to receive the byte
+ *
+ * Returns an error code on error.
+ */
+STATIC s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
+{
+ s32 status;
+
+ status = ixgbe_clock_in_i2c_byte(hw, byte);
+ if (status)
+ return status;
+ /* ACK */
+ return ixgbe_clock_out_i2c_bit(hw, false);
+}
+
+/**
+ * ixgbe_ones_comp_byte_add - Perform one's complement addition
+ * @add1: addend 1
+ * @add2: addend 2
+ *
+ * Returns one's complement 8-bit sum.
+ */
+STATIC u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
+{
+ u16 sum = add1 + add2;
+
+ sum = (sum & 0xFF) + (sum >> 8);
+ return sum & 0xFF;
+}
+
+/**
+ * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ * @lock: true if to take and release semaphore
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg,
+ u16 *val, bool lock)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int max_retry = 3;
+ int retry = 0;
+ u8 csum_byte;
+ u8 high_bits;
+ u8 low_bits;
+ u8 reg_high;
+ u8 csum;
+
+ reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
+ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+ csum = ~csum;
+ do {
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
+ ixgbe_i2c_start(hw);
+ /* Device Address and write indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr))
+ goto fail;
+ /* Write bits 14:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+ goto fail;
+ /* Write bits 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+ goto fail;
+ /* Write csum */
+ if (ixgbe_out_i2c_byte_ack(hw, csum))
+ goto fail;
+ /* Re-start condition */
+ ixgbe_i2c_start(hw);
+ /* Device Address and read indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
+ goto fail;
+ /* Get upper bits */
+ if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
+ goto fail;
+ /* Get low bits */
+ if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
+ goto fail;
+ /* Get csum */
+ if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
+ goto fail;
+ /* NACK */
+ if (ixgbe_clock_out_i2c_bit(hw, false))
+ goto fail;
+ ixgbe_i2c_stop(hw);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ *val = (high_bits << 8) | low_bits;
+ return 0;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte read combined error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte read combined error.\n");
+ } while (retry < max_retry);
+
+ return IXGBE_ERR_I2C;
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ * @lock: true if to take and release semaphore
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg,
+ u16 val, bool lock)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int max_retry = 1;
+ int retry = 0;
+ u8 reg_high;
+ u8 csum;
+
+ reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */
+ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+ csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
+ csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
+ csum = ~csum;
+ do {
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
+ ixgbe_i2c_start(hw);
+ /* Device Address and write indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr))
+ goto fail;
+ /* Write bits 14:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+ goto fail;
+ /* Write bits 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+ goto fail;
+ /* Write data 15:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
+ goto fail;
+ /* Write data 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
+ goto fail;
+ /* Write csum */
+ if (ixgbe_out_i2c_byte_ack(hw, csum))
+ goto fail;
+ ixgbe_i2c_stop(hw);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return 0;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte write combined error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte write combined error.\n");
+ } while (retry < max_retry);
+
+ return IXGBE_ERR_I2C;
+}
+
+/**
+ * ixgbe_init_phy_ops_generic - Inits PHY function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ **/
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ DEBUGFUNC("ixgbe_init_phy_ops_generic");
+
+ /* PHY */
+ phy->ops.identify = ixgbe_identify_phy_generic;
+ phy->ops.reset = ixgbe_reset_phy_generic;
+ phy->ops.read_reg = ixgbe_read_phy_reg_generic;
+ phy->ops.write_reg = ixgbe_write_phy_reg_generic;
+ phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi;
+ phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi;
+ phy->ops.setup_link = ixgbe_setup_phy_link_generic;
+ phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic;
+ phy->ops.check_link = NULL;
+ phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
+ phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic;
+ phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic;
+ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic;
+ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic;
+ phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic;
+ phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear;
+ phy->ops.identify_sfp = ixgbe_identify_module_generic;
+ phy->sfp_type = ixgbe_sfp_type_unknown;
+ phy->ops.read_i2c_byte_unlocked = ixgbe_read_i2c_byte_generic_unlocked;
+ phy->ops.write_i2c_byte_unlocked =
+ ixgbe_write_i2c_byte_generic_unlocked;
+ phy->ops.check_overtemp = ixgbe_tn_check_overtemp;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_probe_phy - Probe a single address for a PHY
+ * @hw: pointer to hardware structure
+ * @phy_addr: PHY address to probe
+ *
+ * Returns true if PHY found
+ */
+static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
+{
+ u16 ext_ability = 0;
+
+ if (!ixgbe_validate_phy_addr(hw, phy_addr)) {
+ DEBUGOUT1("Unable to validate PHY address 0x%04X\n",
+ phy_addr);
+ return false;
+ }
+
+ if (ixgbe_get_phy_id(hw))
+ return false;
+
+ hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability &
+ (IXGBE_MDIO_PHY_10GBASET_ABILITY |
+ IXGBE_MDIO_PHY_1000BASET_ABILITY))
+ hw->phy.type = ixgbe_phy_cu_unknown;
+ else
+ hw->phy.type = ixgbe_phy_generic;
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_identify_phy_generic - Get physical layer module
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u16 phy_addr;
+
+ DEBUGFUNC("ixgbe_identify_phy_generic");
+
+ if (!hw->phy.phy_semaphore_mask) {
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+ }
+
+ if (hw->phy.type != ixgbe_phy_unknown)
+ return IXGBE_SUCCESS;
+
+ if (hw->phy.nw_mng_if_sel) {
+ phy_addr = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ if (ixgbe_probe_phy(hw, phy_addr))
+ return IXGBE_SUCCESS;
+ else
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ }
+
+ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+ if (ixgbe_probe_phy(hw, phy_addr)) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ }
+
+ /* Certain media types do not have a phy so an address will not
+ * be found and the code will take this path. Caller has to
+ * decide if it is an error or not.
+ */
+ if (status != IXGBE_SUCCESS)
+ hw->phy.addr = 0;
+
+ return status;
+}
+
+/**
+ * ixgbe_check_reset_blocked - check status of MNG FW veto bit
+ * @hw: pointer to the hardware structure
+ *
+ * This function checks the MMNGC.MNG_VETO bit to see if there are
+ * any constraints on link from manageability. For MAC's that don't
+ * have this bit just return faluse since the link can not be blocked
+ * via this method.
+ **/
+s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
+{
+ u32 mmngc;
+
+ DEBUGFUNC("ixgbe_check_reset_blocked");
+
+ /* If we don't have this bit, it can't be blocking */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return false;
+
+ mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
+ if (mmngc & IXGBE_MMNGC_MNG_VETO) {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
+ "MNG_VETO bit detected.\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_validate_phy_addr - Determines phy address is valid
+ * @hw: pointer to hardware structure
+ * @phy_addr: PHY address
+ *
+ **/
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
+{
+ u16 phy_id = 0;
+ bool valid = false;
+
+ DEBUGFUNC("ixgbe_validate_phy_addr");
+
+ hw->phy.addr = phy_addr;
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
+
+ if (phy_id != 0xFFFF && phy_id != 0x0)
+ valid = true;
+
+ DEBUGOUT1("PHY ID HIGH is 0x%04X\n", phy_id);
+
+ return valid;
+}
+
+/**
+ * ixgbe_get_phy_id - Get the phy type
+ * @hw: pointer to hardware structure
+ *
+ **/
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 phy_id_high = 0;
+ u16 phy_id_low = 0;
+
+ DEBUGFUNC("ixgbe_get_phy_id");
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &phy_id_high);
+
+ if (status == IXGBE_SUCCESS) {
+ hw->phy.id = (u32)(phy_id_high << 16);
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &phy_id_low);
+ hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
+ hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
+ }
+ DEBUGOUT2("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n",
+ phy_id_high, phy_id_low);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_type_from_id - Get the phy type
+ * @phy_id: PHY ID information
+ *
+ **/
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
+{
+ enum ixgbe_phy_type phy_type;
+
+ DEBUGFUNC("ixgbe_get_phy_type_from_id");
+
+ switch (phy_id) {
+ case TN1010_PHY_ID:
+ phy_type = ixgbe_phy_tn;
+ break;
+ case X550_PHY_ID2:
+ case X550_PHY_ID3:
+ case X540_PHY_ID:
+ phy_type = ixgbe_phy_aq;
+ break;
+ case QT2022_PHY_ID:
+ phy_type = ixgbe_phy_qt;
+ break;
+ case ATH_PHY_ID:
+ phy_type = ixgbe_phy_nl;
+ break;
+ case X557_PHY_ID:
+ case X557_PHY_ID2:
+ phy_type = ixgbe_phy_x550em_ext_t;
+ break;
+ case IXGBE_M88E1500_E_PHY_ID:
+ case IXGBE_M88E1543_E_PHY_ID:
+ phy_type = ixgbe_phy_ext_1g_t;
+ break;
+ default:
+ phy_type = ixgbe_phy_unknown;
+ break;
+ }
+ return phy_type;
+}
+
+/**
+ * ixgbe_reset_phy_generic - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u16 ctrl = 0;
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_reset_phy_generic");
+
+ if (hw->phy.type == ixgbe_phy_unknown)
+ status = ixgbe_identify_phy_generic(hw);
+
+ if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none)
+ goto out;
+
+ /* Don't reset PHY if it's shut down due to overtemp. */
+ if (!hw->phy.reset_if_overtemp &&
+ (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+ goto out;
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
+ /*
+ * Perform soft PHY reset to the PHY_XS.
+ * This will cause a soft reset to the PHY
+ */
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ IXGBE_MDIO_PHY_XS_RESET);
+
+ /*
+ * Poll for reset bit to self-clear indicating reset is complete.
+ * Some PHYs could take up to 3 seconds to complete and need about
+ * 1.7 usec delay after the reset is complete.
+ */
+ for (i = 0; i < 30; i++) {
+ msec_delay(100);
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &ctrl);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+ usec_delay(2);
+ break;
+ }
+ } else {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ &ctrl);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
+ usec_delay(2);
+ break;
+ }
+ }
+ }
+
+ if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
+ status = IXGBE_ERR_RESET_FAILED;
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "PHY reset polling failed to complete.\n");
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
+ * the SWFW lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
+ * @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
+{
+ u32 i, data, command;
+
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n");
+ DEBUGOUT("PHY address command did not complete, returning IXGBE_ERR_PHY\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /*
+ * Address cycle complete, setup and write the read
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n");
+ DEBUGOUT("PHY read command didn't complete, returning IXGBE_ERR_PHY\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /*
+ * Read operation is complete. Get the data
+ * from MSRWD
+ */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)(data);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ * using the SWFW lock - this function is needed in most cases
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
+ * @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ s32 status;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ DEBUGFUNC("ixgbe_read_phy_reg_generic");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register
+ * without SWFW lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 i, command;
+
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /*
+ * Address cycle complete, setup and write the write
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ * using SWFW lock- this function is needed in most cases
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ s32 status;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ DEBUGFUNC("ixgbe_write_phy_reg_generic");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
+ status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_generic - Set and restart auto-neg
+ * @hw: pointer to hardware structure
+ *
+ * Restart auto-negotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+ bool autoneg = false;
+ ixgbe_link_speed speed;
+
+ DEBUGFUNC("ixgbe_setup_phy_link_generic");
+
+ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_10GB_FULL))
+ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ if (hw->mac.type == ixgbe_mac_X550) {
+ /* Set or unset auto-negotiation 5G advertisement */
+ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_5GB_FULL))
+ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
+
+ /* Set or unset auto-negotiation 2.5G advertisement */
+ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_2_5GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
+ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
+ }
+
+ /* Set or unset auto-negotiation 1G advertisement */
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_1GB_FULL))
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
+ IXGBE_MII_100BASE_T_ADVERTISE_HALF);
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
+ (speed & IXGBE_LINK_SPEED_100_FULL))
+ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
+ /* Restart PHY auto-negotiation. */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+ autoneg_reg |= IXGBE_MII_RESTART;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: unused
+ **/
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ DEBUGFUNC("ixgbe_setup_phy_link_speed_generic");
+
+ /*
+ * Clear autoneg_advertised and set new values based on input link
+ * speed.
+ */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_5GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_10_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
+
+ /* Setup link based on the new speed settings */
+ ixgbe_setup_phy_link(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_copper_speeds_supported - Get copper link speeds from phy
+ * @hw: pointer to hardware structure
+ *
+ * Determines the supported link capabilities by reading the PHY auto
+ * negotiation register.
+ **/
+static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 speed_ability;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &speed_ability);
+ if (status)
+ return status;
+
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ **/
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic");
+
+ *autoneg = true;
+ if (!hw->phy.speeds_supported)
+ status = ixgbe_get_copper_speeds_supported(hw);
+
+ *speed = hw->phy.speeds_supported;
+ return status;
+}
+
+/**
+ * ixgbe_check_phy_link_tnx - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: current link speed
+ * @link_up: true is link is up, false otherwise
+ *
+ * Reads the VS1 register to determine if link is up and the current speed for
+ * the PHY.
+ **/
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 phy_link = 0;
+ u16 phy_speed = 0;
+ u16 phy_data = 0;
+
+ DEBUGFUNC("ixgbe_check_phy_link_tnx");
+
+ /* Initialize speed and link to default case */
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /*
+ * Check current speed and link status of the PHY register.
+ * This is a vendor specific register and may have to
+ * be changed for other copper PHYs.
+ */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ usec_delay(10);
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &phy_data);
+ phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+ phy_speed = phy_data &
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+ if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
+ *link_up = true;
+ if (phy_speed ==
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_tnx - Set and restart auto-neg
+ * @hw: pointer to hardware structure
+ *
+ * Restart auto-negotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+ bool autoneg = false;
+ ixgbe_link_speed speed;
+
+ DEBUGFUNC("ixgbe_setup_phy_link_tnx");
+
+ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ /* Set or unset auto-negotiation 1G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL) {
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
+ /* Restart PHY auto-negotiation. */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+ autoneg_reg |= IXGBE_MII_RESTART;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx");
+
+ status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ firmware_version);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_get_phy_firmware_version_generic");
+
+ status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ firmware_version);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_phy_nl - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+{
+ u16 phy_offset, control, eword, edata, block_crc;
+ bool end_data = false;
+ u16 list_offset, data_offset;
+ u16 phy_data = 0;
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_reset_phy_nl");
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+
+ /* reset the PHY and poll for completion */
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ (phy_data | IXGBE_MDIO_PHY_XS_RESET));
+
+ for (i = 0; i < 100; i++) {
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+ if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
+ break;
+ msec_delay(10);
+ }
+
+ if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
+ DEBUGOUT("PHY reset did not complete.\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+
+ /* Get init offsets */
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+ &data_offset);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
+ data_offset++;
+ while (!end_data) {
+ /*
+ * Read control word from PHY init contents offset
+ */
+ ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
+ if (ret_val)
+ goto err_eeprom;
+ control = (eword & IXGBE_CONTROL_MASK_NL) >>
+ IXGBE_CONTROL_SHIFT_NL;
+ edata = eword & IXGBE_DATA_MASK_NL;
+ switch (control) {
+ case IXGBE_DELAY_NL:
+ data_offset++;
+ DEBUGOUT1("DELAY: %d MS\n", edata);
+ msec_delay(edata);
+ break;
+ case IXGBE_DATA_NL:
+ DEBUGOUT("DATA:\n");
+ data_offset++;
+ ret_val = hw->eeprom.ops.read(hw, data_offset,
+ &phy_offset);
+ if (ret_val)
+ goto err_eeprom;
+ data_offset++;
+ for (i = 0; i < edata; i++) {
+ ret_val = hw->eeprom.ops.read(hw, data_offset,
+ &eword);
+ if (ret_val)
+ goto err_eeprom;
+ hw->phy.ops.write_reg(hw, phy_offset,
+ IXGBE_TWINAX_DEV, eword);
+ DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword,
+ phy_offset);
+ data_offset++;
+ phy_offset++;
+ }
+ break;
+ case IXGBE_CONTROL_NL:
+ data_offset++;
+ DEBUGOUT("CONTROL:\n");
+ if (edata == IXGBE_CONTROL_EOL_NL) {
+ DEBUGOUT("EOL\n");
+ end_data = true;
+ } else if (edata == IXGBE_CONTROL_SOL_NL) {
+ DEBUGOUT("SOL\n");
+ } else {
+ DEBUGOUT("Bad control value\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+ break;
+ default:
+ DEBUGOUT("Bad control type\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+
+err_eeprom:
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", data_offset);
+ return IXGBE_ERR_PHY;
+}
+
+/**
+ * ixgbe_identify_module_generic - Identifies module type
+ * @hw: pointer to hardware structure
+ *
+ * Determines HW type and calls appropriate function.
+ **/
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
+
+ DEBUGFUNC("ixgbe_identify_module_generic");
+
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ status = ixgbe_identify_sfp_module_generic(hw);
+ break;
+
+ case ixgbe_media_type_fiber_qsfp:
+ status = ixgbe_identify_qsfp_module_generic(hw);
+ break;
+
+ default:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the SFP module and assigns appropriate PHY type.
+ **/
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u8 cable_tech = 0;
+ u8 cable_spec = 0;
+ u16 enforce_sfp = 0;
+
+ DEBUGFUNC("ixgbe_identify_sfp_module_generic");
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ /* LAN ID is needed for I2C access */
+ hw->mac.ops.set_lan_id(hw);
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_IDENTIFIER,
+ &identifier);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ } else {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES,
+ &comp_codes_1g);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES,
+ &comp_codes_10g);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_CABLE_TECHNOLOGY,
+ &cable_tech);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ /* ID Module
+ * =========
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ * 3 SFP_DA_CORE0 - 82599-specific
+ * 4 SFP_DA_CORE1 - 82599-specific
+ * 5 SFP_SR/LR_CORE0 - 82599-specific
+ * 6 SFP_SR/LR_CORE1 - 82599-specific
+ * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
+ * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
+ * 9 SFP_1g_cu_CORE0 - 82599-specific
+ * 10 SFP_1g_cu_CORE1 - 82599-specific
+ * 11 SFP_1g_sx_CORE0 - 82599-specific
+ * 12 SFP_1g_sx_CORE1 - 82599-specific
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ } else {
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_cu_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_cu_core1;
+ } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
+ hw->phy.ops.read_i2c_eeprom(
+ hw, IXGBE_SFF_CABLE_SPEC_COMP,
+ &cable_spec);
+ if (cable_spec &
+ IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_unknown;
+ }
+ } else if (comp_codes_10g &
+ (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_srlr_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_srlr_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_cu_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_cu_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_lx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_lx_core1;
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ }
+ }
+
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = true;
+
+ /* Determine if the SFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = false;
+ if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = true;
+
+ /* Determine PHY vendor */
+ if (hw->phy.type != ixgbe_phy_nl) {
+ hw->phy.id = identifier;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ vendor_oui =
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+ switch (vendor_oui) {
+ case IXGBE_SFF_VENDOR_OUI_TYCO:
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_tyco;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_FTL:
+ if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_ftl_active;
+ else
+ hw->phy.type = ixgbe_phy_sfp_ftl;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_AVAGO:
+ hw->phy.type = ixgbe_phy_sfp_avago;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_INTEL:
+ hw->phy.type = ixgbe_phy_sfp_intel;
+ break;
+ default:
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_unknown;
+ else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_active_unknown;
+ else
+ hw->phy.type = ixgbe_phy_sfp_unknown;
+ break;
+ }
+ }
+
+ /* Allow any DA cable vendor */
+ if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
+ IXGBE_SFF_DA_ACTIVE_CABLE)) {
+ status = IXGBE_SUCCESS;
+ goto out;
+ }
+
+ /* Verify supported 1G SFP modules */
+ if (comp_codes_10g == 0 &&
+ !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Anything else 82598-based is supported */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ status = IXGBE_SUCCESS;
+ goto out;
+ }
+
+ ixgbe_get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+ !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ /* Make sure we're a supported PHY type */
+ if (hw->phy.type == ixgbe_phy_sfp_intel) {
+ status = IXGBE_SUCCESS;
+ } else {
+ if (hw->allow_unsupported_sfp == true) {
+ EWARN(hw,
+ "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
+ status = IXGBE_SUCCESS;
+ } else {
+ DEBUGOUT("SFP+ module not supported\n");
+ hw->phy.type =
+ ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ status = IXGBE_SUCCESS;
+ }
+ }
+
+out:
+ return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ if (hw->phy.type != ixgbe_phy_nl) {
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+ }
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+/**
+ * ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current SFP.
+ */
+u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
+{
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
+
+ DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic");
+
+ hw->phy.ops.identify_sfp(hw);
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return physical_layer;
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_qsfp_passive_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case ixgbe_phy_sfp_ftl_active:
+ case ixgbe_phy_sfp_active_unknown:
+ case ixgbe_phy_qsfp_active_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
+ break;
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
+ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+ break;
+ case ixgbe_phy_qsfp_intel:
+ case ixgbe_phy_qsfp_unknown:
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
+ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ break;
+ }
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the QSFP module and assigns appropriate PHY type
+ **/
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u16 enforce_sfp = 0;
+ u8 connector = 0;
+ u8 cable_length = 0;
+ u8 device_tech = 0;
+ bool active_cable = false;
+
+ DEBUGFUNC("ixgbe_identify_qsfp_module_generic");
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ /* LAN ID is needed for I2C access */
+ hw->mac.ops.set_lan_id(hw);
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+ &identifier);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ hw->phy.id = identifier;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
+ &comp_codes_10g);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
+ &comp_codes_1g);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
+ hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
+ } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
+ } else {
+ if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
+ active_cable = true;
+
+ if (!active_cable) {
+ /* check for active DA cables that pre-date
+ * SFF-8436 v3.6 */
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_CONNECTOR,
+ &connector);
+
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_CABLE_LENGTH,
+ &cable_length);
+
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_DEVICE_TECH,
+ &device_tech);
+
+ if ((connector ==
+ IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
+ (cable_length > 0) &&
+ ((device_tech >> 4) ==
+ IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
+ active_cable = true;
+ }
+
+ if (active_cable) {
+ hw->phy.type = ixgbe_phy_qsfp_active_unknown;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ /* unsupported module type */
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+ }
+
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = true;
+
+ /* Determine if the QSFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = false;
+ if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = true;
+
+ /* Determine PHY vendor for optical modules */
+ if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ vendor_oui =
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+ if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
+ hw->phy.type = ixgbe_phy_qsfp_intel;
+ else
+ hw->phy.type = ixgbe_phy_qsfp_unknown;
+
+ ixgbe_get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+ /* Make sure we're a supported PHY type */
+ if (hw->phy.type == ixgbe_phy_qsfp_intel) {
+ status = IXGBE_SUCCESS;
+ } else {
+ if (hw->allow_unsupported_sfp == true) {
+ EWARN(hw,
+ "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
+ status = IXGBE_SUCCESS;
+ } else {
+ DEBUGOUT("QSFP module not supported\n");
+ hw->phy.type =
+ ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ status = IXGBE_SUCCESS;
+ }
+ }
+
+out:
+ return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+/**
+ * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
+ * @hw: pointer to hardware structure
+ * @list_offset: offset to the SFP ID list
+ * @data_offset: offset to the SFP data block
+ *
+ * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
+ * so it returns the offsets to the phy init sequence block.
+ **/
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset)
+{
+ u16 sfp_id;
+ u16 sfp_type = hw->phy.sfp_type;
+
+ DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets");
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+
+ if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
+ (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ /*
+ * Limiting active cables and 1G Phys must be initialized as
+ * SR modules
+ */
+ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core0)
+ sfp_type = ixgbe_sfp_type_srlr_core0;
+ else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core1)
+ sfp_type = ixgbe_sfp_type_srlr_core1;
+
+ /* Read offset to PHY init contents */
+ if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed",
+ IXGBE_PHY_INIT_OFFSET_NL);
+ return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+ }
+
+ if ((!*list_offset) || (*list_offset == 0xFFFF))
+ return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+
+ /* Shift offset to first ID word */
+ (*list_offset)++;
+
+ /*
+ * Find the matching SFP ID in the EEPROM
+ * and program the init sequence
+ */
+ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+ goto err_phy;
+
+ while (sfp_id != IXGBE_PHY_INIT_END_NL) {
+ if (sfp_id == sfp_type) {
+ (*list_offset)++;
+ if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
+ goto err_phy;
+ if ((!*data_offset) || (*data_offset == 0xFFFF)) {
+ DEBUGOUT("SFP+ module not supported\n");
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ } else {
+ break;
+ }
+ } else {
+ (*list_offset) += 2;
+ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+ goto err_phy;
+ }
+ }
+
+ if (sfp_id == IXGBE_PHY_INIT_END_NL) {
+ DEBUGOUT("No matching SFP+ module found\n");
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ return IXGBE_SUCCESS;
+
+err_phy:
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", *list_offset);
+ return IXGBE_ERR_PHY;
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ DEBUGFUNC("ixgbe_read_i2c_eeprom_generic");
+
+ return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @sff8472_data: value read
+ *
+ * Performs byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ sff8472_data);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data)
+{
+ DEBUGFUNC("ixgbe_write_i2c_eeprom_generic");
+
+ return hw->phy.ops.write_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
+}
+
+/**
+ * ixgbe_is_sfp_probe - Returns true if SFP is being detected
+ * @hw: pointer to hardware structure
+ * @offset: eeprom offset to be read
+ * @addr: I2C address to be read
+ */
+STATIC bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
+{
+ if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
+ offset == IXGBE_SFF_IDENTIFIER &&
+ hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return true;
+ return false;
+}
+
+/**
+ * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: address to read from
+ * @data: value read
+ * @lock: true if to take and release semaphore
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+STATIC s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data, bool lock)
+{
+ s32 status;
+ u32 max_retry = 10;
+ u32 retry = 0;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ bool nack = 1;
+ *data = 0;
+
+ DEBUGFUNC("ixgbe_read_i2c_byte_generic");
+
+ if (hw->mac.type >= ixgbe_mac_X550)
+ max_retry = 3;
+ if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
+ max_retry = IXGBE_SFP_DETECT_RETRIES;
+
+ do {
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ ixgbe_i2c_start(hw);
+
+ /* Device Address and write indication */
+ status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ ixgbe_i2c_start(hw);
+
+ /* Device Address and read indication */
+ status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_clock_in_i2c_byte(hw, data);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_bit(hw, nack);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ ixgbe_i2c_stop(hw);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return IXGBE_SUCCESS;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ if (lock) {
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(100);
+ }
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte read error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte read error.\n");
+
+ } while (retry < max_retry);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: address to read from
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, true);
+}
+
+/**
+ * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: address to read from
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, false);
+}
+
+/**
+ * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: address to write to
+ * @data: value to write
+ * @lock: true if to take and release semaphore
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+STATIC s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data, bool lock)
+{
+ s32 status;
+ u32 max_retry = 1;
+ u32 retry = 0;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+
+ DEBUGFUNC("ixgbe_write_i2c_byte_generic");
+
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) !=
+ IXGBE_SUCCESS)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ do {
+ ixgbe_i2c_start(hw);
+
+ status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_byte(hw, data);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ ixgbe_i2c_stop(hw);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return IXGBE_SUCCESS;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte write error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte write error.\n");
+ } while (retry < max_retry);
+
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: address to write to
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, true);
+}
+
+/**
+ * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: address to write to
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, false);
+}
+
+/**
+ * ixgbe_i2c_start - Sets I2C start condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C start condition (High -> Low on SDA while SCL is High)
+ * Set bit-bang mode on X550 hardware.
+ **/
+STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+
+ DEBUGFUNC("ixgbe_i2c_start");
+
+ i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw);
+
+ /* Start condition must begin with data and clock high */
+ ixgbe_set_i2c_data(hw, &i2cctl, 1);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for start condition (4.7us) */
+ usec_delay(IXGBE_I2C_T_SU_STA);
+
+ ixgbe_set_i2c_data(hw, &i2cctl, 0);
+
+ /* Hold time for start condition (4us) */
+ usec_delay(IXGBE_I2C_T_HD_STA);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(IXGBE_I2C_T_LOW);
+
+}
+
+/**
+ * ixgbe_i2c_stop - Sets I2C stop condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ * Disables bit-bang mode and negates data output enable on X550
+ * hardware.
+ **/
+STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+ u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
+ u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw);
+
+ DEBUGFUNC("ixgbe_i2c_stop");
+
+ /* Stop condition must begin with data low and clock high */
+ ixgbe_set_i2c_data(hw, &i2cctl, 0);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for stop condition (4us) */
+ usec_delay(IXGBE_I2C_T_SU_STO);
+
+ ixgbe_set_i2c_data(hw, &i2cctl, 1);
+
+ /* bus free time between stop and start (4.7us)*/
+ usec_delay(IXGBE_I2C_T_BUF);
+
+ if (bb_en_bit || data_oe_bit || clk_oe_bit) {
+ i2cctl &= ~bb_en_bit;
+ i2cctl |= data_oe_bit | clk_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte to clock in
+ *
+ * Clocks in one byte data via I2C data/clock
+ **/
+STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
+{
+ s32 i;
+ bool bit = 0;
+
+ DEBUGFUNC("ixgbe_clock_in_i2c_byte");
+
+ *data = 0;
+ for (i = 7; i >= 0; i--) {
+ ixgbe_clock_in_i2c_bit(hw, &bit);
+ *data |= bit << i;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte clocked out
+ *
+ * Clocks out one byte data via I2C data/clock
+ **/
+STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
+{
+ s32 status = IXGBE_SUCCESS;
+ s32 i;
+ u32 i2cctl;
+ bool bit;
+
+ DEBUGFUNC("ixgbe_clock_out_i2c_byte");
+
+ for (i = 7; i >= 0; i--) {
+ bit = (data >> i) & 0x1;
+ status = ixgbe_clock_out_i2c_bit(hw, bit);
+
+ if (status != IXGBE_SUCCESS)
+ break;
+ }
+
+ /* Release SDA line (set high) */
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_i2c_ack - Polls for I2C ACK
+ * @hw: pointer to hardware structure
+ *
+ * Clocks in/out one bit via I2C data/clock
+ **/
+STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+ s32 status = IXGBE_SUCCESS;
+ u32 i = 0;
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 timeout = 10;
+ bool ack = 1;
+
+ DEBUGFUNC("ixgbe_get_i2c_ack");
+
+ if (data_oe_bit) {
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(IXGBE_I2C_T_HIGH);
+
+ /* Poll for ACK. Note that ACK in I2C spec is
+ * transition from 1 to 0 */
+ for (i = 0; i < timeout; i++) {
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ ack = ixgbe_get_i2c_data(hw, &i2cctl);
+
+ usec_delay(1);
+ if (!ack)
+ break;
+ }
+
+ if (ack) {
+ DEBUGOUT("I2C ack was not received.\n");
+ status = IXGBE_ERR_I2C;
+ }
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(IXGBE_I2C_T_LOW);
+
+ return status;
+}
+
+/**
+ * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: read data value
+ *
+ * Clocks in one bit via I2C data/clock
+ **/
+STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+
+ DEBUGFUNC("ixgbe_clock_in_i2c_bit");
+
+ if (data_oe_bit) {
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(IXGBE_I2C_T_HIGH);
+
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ *data = ixgbe_get_i2c_data(hw, &i2cctl);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(IXGBE_I2C_T_LOW);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: data value to write
+ *
+ * Clocks out one bit via I2C data/clock
+ **/
+STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+{
+ s32 status;
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+
+ DEBUGFUNC("ixgbe_clock_out_i2c_bit");
+
+ status = ixgbe_set_i2c_data(hw, &i2cctl, data);
+ if (status == IXGBE_SUCCESS) {
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(IXGBE_I2C_T_HIGH);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us.
+ * This also takes care of the data hold time.
+ */
+ usec_delay(IXGBE_I2C_T_LOW);
+ } else {
+ status = IXGBE_ERR_I2C;
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "I2C data was not set to %X\n", data);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_raise_i2c_clk - Raises the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Raises the I2C clock line '0'->'1'
+ * Negates the I2C clock output enable on X550 hardware.
+ **/
+STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+ u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
+ u32 i = 0;
+ u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
+ u32 i2cctl_r = 0;
+
+ DEBUGFUNC("ixgbe_raise_i2c_clk");
+
+ if (clk_oe_bit) {
+ *i2cctl |= clk_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ }
+
+ for (i = 0; i < timeout; i++) {
+ *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ /* SCL rise time (1000ns) */
+ usec_delay(IXGBE_I2C_T_RISE);
+
+ i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
+ break;
+ }
+}
+
+/**
+ * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Lowers the I2C clock line '1'->'0'
+ * Asserts the I2C clock output enable on X550 hardware.
+ **/
+STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+ DEBUGFUNC("ixgbe_lower_i2c_clk");
+
+ *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw));
+ *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* SCL fall time (300ns) */
+ usec_delay(IXGBE_I2C_T_FALL);
+}
+
+/**
+ * ixgbe_set_i2c_data - Sets the I2C data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ * @data: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ * Asserts the I2C data output enable on X550 hardware.
+ **/
+STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_set_i2c_data");
+
+ if (data)
+ *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ else
+ *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw));
+ *i2cctl &= ~data_oe_bit;
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
+ usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
+
+ if (!data) /* Can't verify data in this case */
+ return IXGBE_SUCCESS;
+ if (data_oe_bit) {
+ *i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ /* Verify data was set correctly */
+ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
+ status = IXGBE_ERR_I2C;
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "Error - I2C data was not set to %X.\n",
+ data);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ * Negates the I2C data output enable on X550 hardware.
+ **/
+STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+ bool data;
+ UNREFERENCED_1PARAMETER(hw);
+
+ DEBUGFUNC("ixgbe_get_i2c_data");
+
+ if (data_oe_bit) {
+ *i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(IXGBE_I2C_T_FALL);
+ }
+
+ if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
+ data = 1;
+ else
+ data = 0;
+
+ return data;
+}
+
+/**
+ * ixgbe_i2c_bus_clear - Clears the I2C bus
+ * @hw: pointer to hardware structure
+ *
+ * Clears the I2C bus by sending nine clock pulses.
+ * Used when data line is stuck low.
+ **/
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
+{
+ u32 i2cctl;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_i2c_bus_clear");
+
+ ixgbe_i2c_start(hw);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+
+ ixgbe_set_i2c_data(hw, &i2cctl, 1);
+
+ for (i = 0; i < 9; i++) {
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Min high period of clock is 4us */
+ usec_delay(IXGBE_I2C_T_HIGH);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Min low period of clock is 4.7us*/
+ usec_delay(IXGBE_I2C_T_LOW);
+ }
+
+ ixgbe_i2c_start(hw);
+
+ /* Put the i2c bus back to default state */
+ ixgbe_i2c_stop(hw);
+}
+
+/**
+ * ixgbe_tn_check_overtemp - Checks if an overtemp occurred.
+ * @hw: pointer to hardware structure
+ *
+ * Checks if the LASI temp alarm status was triggered due to overtemp
+ **/
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 phy_data = 0;
+
+ DEBUGFUNC("ixgbe_tn_check_overtemp");
+
+ if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
+ goto out;
+
+ /* Check that the LASI temp alarm status was triggered */
+ hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
+
+ if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
+ goto out;
+
+ status = IXGBE_ERR_OVERTEMP;
+ ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature");
+out:
+ return status;
+}
+
+/**
+ * ixgbe_set_copper_phy_power - Control power for copper phy
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ */
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+{
+ u32 status;
+ u16 reg;
+
+ if (!on && ixgbe_mng_present(hw))
+ return 0;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ if (on) {
+ reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+ } else {
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+ reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+ }
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ return status;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.h
new file mode 100644
index 00000000..cf8cadd9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.h
@@ -0,0 +1,218 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_PHY_H_
+#define _IXGBE_PHY_H_
+
+#include "ixgbe_type.h"
+#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
+#define IXGBE_I2C_EEPROM_BANK_LEN 0xFF
+
+/* EEPROM byte offsets */
+#define IXGBE_SFF_IDENTIFIER 0x0
+#define IXGBE_SFF_IDENTIFIER_SFP 0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
+#define IXGBE_SFF_1GBE_COMP_CODES 0x6
+#define IXGBE_SFF_10GBE_COMP_CODES 0x3
+#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+#define IXGBE_SFF_SFF_8472_SWAP 0x5C
+#define IXGBE_SFF_SFF_8472_COMP 0x5E
+#define IXGBE_SFF_SFF_8472_OSCB 0x6E
+#define IXGBE_SFF_SFF_8472_ESCB 0x76
+#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7
+#define IXGBE_SFF_QSFP_CONNECTOR 0x82
+#define IXGBE_SFF_QSFP_10GBE_COMP 0x83
+#define IXGBE_SFF_QSFP_1GBE_COMP 0x86
+#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92
+#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
+
+/* Bitmasks */
+#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
+#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
+#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
+#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
+#define IXGBE_I2C_EEPROM_READ_MASK 0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+
+#define IXGBE_CS4227 0xBE /* CS4227 address */
+#define IXGBE_CS4227_GLOBAL_ID_LSB 0
+#define IXGBE_CS4227_GLOBAL_ID_MSB 1
+#define IXGBE_CS4227_SCRATCH 2
+#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5
+#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F
+#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */
+#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */
+#define IXGBE_CS4227_RESET_PENDING 0x1357
+#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
+#define IXGBE_CS4227_RETRIES 15
+#define IXGBE_CS4227_EFUSE_STATUS 0x0181
+#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */
+#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */
+#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */
+#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */
+#define IXGBE_CS4227_EEPROM_STATUS 0x5001
+#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001
+#define IXGBE_CS4227_SPEED_1G 0x8000
+#define IXGBE_CS4227_SPEED_10G 0
+#define IXGBE_CS4227_EDC_MODE_CX1 0x0002
+#define IXGBE_CS4227_EDC_MODE_SR 0x0004
+#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008
+#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */
+#define IXGBE_CS4227_RESET_DELAY 450 /* milliseconds */
+#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */
+#define IXGBE_PE 0xE0 /* Port expander address */
+#define IXGBE_PE_OUTPUT 1 /* Output register offset */
+#define IXGBE_PE_CONFIG 3 /* Config register offset */
+#define IXGBE_PE_BIT1 (1 << 1)
+
+/* Flow control defines */
+#define IXGBE_TAF_SYM_PAUSE 0x400
+#define IXGBE_TAF_ASM_PAUSE 0x800
+
+/* Bit-shift macros */
+#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
+#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
+#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
+#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
+#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
+#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
+
+/* I2C SDA and SCL timing parameters for standard mode */
+#define IXGBE_I2C_T_HD_STA 4
+#define IXGBE_I2C_T_LOW 5
+#define IXGBE_I2C_T_HIGH 4
+#define IXGBE_I2C_T_SU_STA 5
+#define IXGBE_I2C_T_HD_DATA 5
+#define IXGBE_I2C_T_SU_DATA 1
+#define IXGBE_I2C_T_RISE 1
+#define IXGBE_I2C_T_FALL 1
+#define IXGBE_I2C_T_SU_STO 4
+#define IXGBE_I2C_T_BUF 5
+
+#ifndef IXGBE_SFP_DETECT_RETRIES
+#define IXGBE_SFP_DETECT_RETRIES 10
+
+#endif /* IXGBE_SFP_DETECT_RETRIES */
+#define IXGBE_TN_LASI_STATUS_REG 0x9005
+#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+
+/* SFP+ SFF-8472 Compliance */
+#define IXGBE_SFF_SFF_8472_UNSUP 0x00
+
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data);
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data);
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data);
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
+
+/* PHY specific */
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up);
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset);
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data);
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 *val, bool lock);
+s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 val, bool lock);
+#endif /* _IXGBE_PHY_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_type.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_type.h
new file mode 100644
index 00000000..6e03089e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_type.h
@@ -0,0 +1,4390 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_TYPE_H_
+#define _IXGBE_TYPE_H_
+
+/*
+ * The following is a brief description of the error categories used by the
+ * ERROR_REPORT* macros.
+ *
+ * - IXGBE_ERROR_INVALID_STATE
+ * This category is for errors which represent a serious failure state that is
+ * unexpected, and could be potentially harmful to device operation. It should
+ * not be used for errors relating to issues that can be worked around or
+ * ignored.
+ *
+ * - IXGBE_ERROR_POLLING
+ * This category is for errors related to polling/timeout issues and should be
+ * used in any case where the timeout occured, or a failure to obtain a lock, or
+ * failure to receive data within the time limit.
+ *
+ * - IXGBE_ERROR_CAUTION
+ * This category should be used for reporting issues that may be the cause of
+ * other errors, such as temperature warnings. It should indicate an event which
+ * could be serious, but hasn't necessarily caused problems yet.
+ *
+ * - IXGBE_ERROR_SOFTWARE
+ * This category is intended for errors due to software state preventing
+ * something. The category is not intended for errors due to bad arguments, or
+ * due to unsupported features. It should be used when a state occurs which
+ * prevents action but is not a serious issue.
+ *
+ * - IXGBE_ERROR_ARGUMENT
+ * This category is for when a bad or invalid argument is passed. It should be
+ * used whenever a function is called and error checking has detected the
+ * argument is wrong or incorrect.
+ *
+ * - IXGBE_ERROR_UNSUPPORTED
+ * This category is for errors which are due to unsupported circumstances or
+ * configuration issues. It should not be used when the issue is due to an
+ * invalid argument, but for when something has occurred that is unsupported
+ * (Ex: Flow control autonegotiation or an unsupported SFP+ module.)
+ */
+
+#include "ixgbe_osdep.h"
+
+/* Override this by setting IOMEM in your ixgbe_osdep.h header */
+
+/* Vendor ID */
+#define IXGBE_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82598 0x10B6
+#define IXGBE_DEV_ID_82598_BX 0x1508
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
+#define IXGBE_DEV_ID_82598AT 0x10C8
+#define IXGBE_DEV_ID_82598AT2 0x150B
+#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
+#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
+#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
+#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
+#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
+#define IXGBE_DEV_ID_82599_KX4 0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
+#define IXGBE_DEV_ID_82599_KR 0x1517
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+#define IXGBE_DEV_ID_82599_CX4 0x10F9
+#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071
+#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
+#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
+#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
+#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159
+#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D
+#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
+#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
+#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
+#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
+#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
+#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
+#define IXGBE_DEV_ID_82599EN_SFP 0x1557
+#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001
+#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
+#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_82599_VF_HV 0x152E
+#define IXGBE_DEV_ID_82599_LS 0x154F
+#define IXGBE_DEV_ID_82599_BYPASS 0x155D
+#define IXGBE_DEV_ID_X540T 0x1528
+#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_X540_VF_HV 0x1530
+#define IXGBE_DEV_ID_X540_BYPASS 0x155C
+#define IXGBE_DEV_ID_X540T1 0x1560
+#define IXGBE_DEV_ID_X550T 0x1563
+#define IXGBE_DEV_ID_X550T1 0x15D1
+/* Placeholder value, pending official value. */
+#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
+#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
+#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
+#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6
+#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
+#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8
+#define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA
+#define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC
+#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
+#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
+#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
+#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
+#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
+#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
+#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
+#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
+#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
+#define IXGBE_DEV_ID_X550_VF_HV 0x1564
+#define IXGBE_DEV_ID_X550_VF 0x1565
+#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
+#define IXGBE_DEV_ID_X550EM_A_VF_HV 0x15B4
+#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+
+#define IXGBE_CAT(r, m) IXGBE_##r##m
+
+#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, _IDX)])
+
+/* General Registers */
+#define IXGBE_CTRL 0x00000
+#define IXGBE_STATUS 0x00008
+#define IXGBE_CTRL_EXT 0x00018
+#define IXGBE_ESDP 0x00020
+#define IXGBE_EODSDP 0x00028
+#define IXGBE_I2CCTL_82599 0x00028
+#define IXGBE_I2CCTL IXGBE_I2CCTL_82599
+#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_82599
+#define IXGBE_I2CCTL_X550 0x15F5C
+#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2CCTL)
+#define IXGBE_PHY_GPIO 0x00028
+#define IXGBE_MAC_GPIO 0x00030
+#define IXGBE_PHYINT_STATUS0 0x00100
+#define IXGBE_PHYINT_STATUS1 0x00104
+#define IXGBE_PHYINT_STATUS2 0x00108
+#define IXGBE_LEDCTL 0x00200
+#define IXGBE_FRTIMER 0x00048
+#define IXGBE_TCPTIMER 0x0004C
+#define IXGBE_CORESPARE 0x00600
+#define IXGBE_EXVET 0x05078
+
+/* NVM Registers */
+#define IXGBE_EEC 0x10010
+#define IXGBE_EEC_X540 IXGBE_EEC
+#define IXGBE_EEC_X550 IXGBE_EEC
+#define IXGBE_EEC_X550EM_x IXGBE_EEC
+#define IXGBE_EEC_X550EM_a 0x15FF8
+#define IXGBE_EEC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EEC)
+
+#define IXGBE_EERD 0x10014
+#define IXGBE_EEWR 0x10018
+
+#define IXGBE_FLA 0x1001C
+#define IXGBE_FLA_X540 IXGBE_FLA
+#define IXGBE_FLA_X550 IXGBE_FLA
+#define IXGBE_FLA_X550EM_x IXGBE_FLA
+#define IXGBE_FLA_X550EM_a 0x15F68
+#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA)
+
+#define IXGBE_EEMNGCTL 0x10110
+#define IXGBE_EEMNGDATA 0x10114
+#define IXGBE_FLMNGCTL 0x10118
+#define IXGBE_FLMNGDATA 0x1011C
+#define IXGBE_FLMNGCNT 0x10120
+#define IXGBE_FLOP 0x1013C
+
+#define IXGBE_GRC 0x10200
+#define IXGBE_GRC_X540 IXGBE_GRC
+#define IXGBE_GRC_X550 IXGBE_GRC
+#define IXGBE_GRC_X550EM_x IXGBE_GRC
+#define IXGBE_GRC_X550EM_a 0x15F64
+#define IXGBE_GRC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), GRC)
+
+#define IXGBE_SRAMREL 0x10210
+#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL
+#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL
+#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL
+#define IXGBE_SRAMREL_X550EM_a 0x15F6C
+#define IXGBE_SRAMREL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SRAMREL)
+
+#define IXGBE_PHYDBG 0x10218
+
+/* General Receive Control */
+#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
+#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
+
+#define IXGBE_VPDDIAG0 0x10204
+#define IXGBE_VPDDIAG1 0x10208
+
+/* I2CCTL Bit Masks */
+#define IXGBE_I2C_CLK_IN 0x00000001
+#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN
+#define IXGBE_I2C_CLK_IN_X550 0x00004000
+#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN)
+
+#define IXGBE_I2C_CLK_OUT 0x00000002
+#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT
+#define IXGBE_I2C_CLK_OUT_X550 0x00000200
+#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT)
+
+#define IXGBE_I2C_DATA_IN 0x00000004
+#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN
+#define IXGBE_I2C_DATA_IN_X550 0x00001000
+#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN)
+
+#define IXGBE_I2C_DATA_OUT 0x00000008
+#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT
+#define IXGBE_I2C_DATA_OUT_X550 0x00000400
+#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT)
+
+#define IXGBE_I2C_DATA_OE_N_EN 0
+#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN
+#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN)
+
+#define IXGBE_I2C_BB_EN 0
+#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN
+#define IXGBE_I2C_BB_EN_X550 0x00000100
+#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550
+#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550
+#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN)
+
+#define IXGBE_I2C_CLK_OE_N_EN 0
+#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN
+#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN)
+#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
+
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
+#define IXGBE_EMC_INTERNAL_DATA 0x00
+#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
+#define IXGBE_EMC_DIODE1_DATA 0x01
+#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
+#define IXGBE_EMC_DIODE2_DATA 0x23
+#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
+
+#define IXGBE_MAX_SENSORS 3
+
+struct ixgbe_thermal_diode_data {
+ u8 location;
+ u8 temp;
+ u8 caution_thresh;
+ u8 max_op_thresh;
+};
+
+struct ixgbe_thermal_sensor_data {
+ struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
+};
+
+
+#define NVM_OROM_OFFSET 0x17
+#define NVM_OROM_BLK_LOW 0x83
+#define NVM_OROM_BLK_HI 0x84
+#define NVM_OROM_PATCH_MASK 0xFF
+#define NVM_OROM_SHIFT 8
+
+#define NVM_VER_MASK 0x00FF /* version mask */
+#define NVM_VER_SHIFT 8 /* version bit shift */
+#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */
+#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */
+#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */
+#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */
+#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */
+#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */
+#define NVM_ETK_OFF_LOW 0x2D /* version low order word */
+#define NVM_ETK_OFF_HI 0x2E /* version high order word */
+#define NVM_ETK_SHIFT 16 /* high version word shift */
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETK_VALID 0x8000
+#define NVM_INVALID_PTR 0xFFFF
+#define NVM_VER_SIZE 32 /* version sting size */
+
+struct ixgbe_nvm_version {
+ u32 etk_id;
+ u8 nvm_major;
+ u16 nvm_minor;
+ u8 nvm_id;
+
+ bool oem_valid;
+ u8 oem_major;
+ u8 oem_minor;
+ u16 oem_release;
+
+ bool or_valid;
+ u8 or_major;
+ u16 or_build;
+ u8 or_patch;
+
+};
+
+/* Interrupt Registers */
+#define IXGBE_EICR 0x00800
+#define IXGBE_EICS 0x00808
+#define IXGBE_EIMS 0x00880
+#define IXGBE_EIMC 0x00888
+#define IXGBE_EIAC 0x00810
+#define IXGBE_EIAM 0x00890
+#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4)
+#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
+#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
+#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
+/* 82599 EITR is only 12 bits, with the lower 3 always zero */
+/*
+ * 82598 EITR is 16 bits but set the limits based on the max
+ * supported by all ixgbe hardware
+ */
+#define IXGBE_MAX_INT_RATE 488281
+#define IXGBE_MIN_INT_RATE 956
+#define IXGBE_MAX_EITR 0x00000FF8
+#define IXGBE_MIN_EITR 8
+#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
+ (0x012300 + (((_i) - 24) * 4)))
+#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
+#define IXGBE_EITR_LLI_MOD 0x00008000
+#define IXGBE_EITR_CNT_WDIS 0x80000000
+#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
+#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */
+#define IXGBE_EITRSEL 0x00894
+#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
+#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
+#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
+#define IXGBE_GPIE 0x00898
+
+/* Flow Control Registers */
+#define IXGBE_FCADBUL 0x03210
+#define IXGBE_FCADBUH 0x03214
+#define IXGBE_FCAMACL 0x04328
+#define IXGBE_FCAMACH 0x0432C
+#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_PFCTOP 0x03008
+#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTV 0x032A0
+#define IXGBE_FCCFG 0x03D00
+#define IXGBE_TFCS 0x0CE00
+
+/* Receive DMA Registers */
+#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+ (0x0D000 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+ (0x0D004 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+ (0x0D008 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+ (0x0D010 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+ (0x0D018 + (((_i) - 64) * 0x40)))
+#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+ (0x0D028 + (((_i) - 64) * 0x40)))
+#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+ (0x0D02C + (((_i) - 64) * 0x40)))
+#define IXGBE_RSCDBU 0x03028
+#define IXGBE_RDDCC 0x02F20
+#define IXGBE_RXMEMWRAP 0x03190
+#define IXGBE_STARCTRL 0x03024
+/*
+ * Split and Replication Receive Control Registers
+ * 00-15 : 0x02100 + n*4
+ * 16-64 : 0x01014 + n*0x40
+ * 64-127: 0x0D014 + (n-64)*0x40
+ */
+#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
+ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+ (0x0D014 + (((_i) - 64) * 0x40))))
+/*
+ * Rx DCA Control Register:
+ * 00-15 : 0x02200 + n*4
+ * 16-64 : 0x0100C + n*0x40
+ * 64-127: 0x0D00C + (n-64)*0x40
+ */
+#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
+ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+ (0x0D00C + (((_i) - 64) * 0x40))))
+#define IXGBE_RDRXCTL 0x02F00
+/* 8 of these 0x03C00 - 0x03C1C */
+#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
+#define IXGBE_RXCTRL 0x03000
+#define IXGBE_DROPEN 0x03D04
+#define IXGBE_RXPBSIZE_SHIFT 10
+#define IXGBE_RXPBSIZE_MASK 0x000FFC00
+
+/* Receive Registers */
+#define IXGBE_RXCSUM 0x05000
+#define IXGBE_RFCTL 0x05008
+#define IXGBE_DRECCCTL 0x02F08
+#define IXGBE_DRECCCTL_DISABLE 0
+#define IXGBE_DRECCCTL2 0x02F8C
+
+/* Multicast Table Array - 128 entries */
+#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
+#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x0A200 + ((_i) * 8)))
+#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x0A204 + ((_i) * 8)))
+#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
+#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
+/* Packet split receive type */
+#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
+ (0x0EA00 + ((_i) * 4)))
+/* array of 4096 1-bit vlan filters */
+#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
+/*array of 4096 4-bit vlan vmdq indices */
+#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+#define IXGBE_FCTRL 0x05080
+#define IXGBE_VLNCTRL 0x05088
+#define IXGBE_MCSTCTRL 0x05090
+#define IXGBE_MRQC 0x05818
+#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
+#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
+#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
+#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
+#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */
+#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */
+#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */
+#define IXGBE_RQTC 0x0EC70
+#define IXGBE_MTQC 0x08120
+#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_PFFLPL 0x050B0
+#define IXGBE_PFFLPH 0x050B4
+#define IXGBE_VT_CTL 0x051B0
+#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
+/* 64 Mailboxes, 16 DW each */
+#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i)))
+#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */
+#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */
+#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
+#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
+#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
+#define IXGBE_QDE 0x2F04
+#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */
+#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
+#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
+#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
+#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
+#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_LVMMC_RX 0x2FA8
+#define IXGBE_LVMMC_TX 0x8108
+#define IXGBE_LMVM_RX 0x2FA4
+#define IXGBE_LMVM_TX 0x8124
+#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */
+#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */
+#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
+#define IXGBE_RXFECCERR0 0x051B8
+#define IXGBE_LLITHRESH 0x0EC90
+#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_IMIRVP 0x05AC0
+#define IXGBE_VMD_CTL 0x0581C
+#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
+#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */
+#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+
+/* Registers for setting up RSS on X550 with SRIOV
+ * _p - pool number (0..63)
+ * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA)
+ */
+#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4))
+#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40))
+#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40))
+
+/* Flow Director registers */
+#define IXGBE_FDIRCTRL 0x0EE00
+#define IXGBE_FDIRHKEY 0x0EE68
+#define IXGBE_FDIRSKEY 0x0EE6C
+#define IXGBE_FDIRDIP4M 0x0EE3C
+#define IXGBE_FDIRSIP4M 0x0EE40
+#define IXGBE_FDIRTCPM 0x0EE44
+#define IXGBE_FDIRUDPM 0x0EE48
+#define IXGBE_FDIRSCTPM 0x0EE78
+#define IXGBE_FDIRIP6M 0x0EE74
+#define IXGBE_FDIRM 0x0EE70
+
+/* Flow Director Stats registers */
+#define IXGBE_FDIRFREE 0x0EE38
+#define IXGBE_FDIRLEN 0x0EE4C
+#define IXGBE_FDIRUSTAT 0x0EE50
+#define IXGBE_FDIRFSTAT 0x0EE54
+#define IXGBE_FDIRMATCH 0x0EE58
+#define IXGBE_FDIRMISS 0x0EE5C
+
+/* Flow Director Programming registers */
+#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
+#define IXGBE_FDIRIPSA 0x0EE18
+#define IXGBE_FDIRIPDA 0x0EE1C
+#define IXGBE_FDIRPORT 0x0EE20
+#define IXGBE_FDIRVLAN 0x0EE24
+#define IXGBE_FDIRHASH 0x0EE28
+#define IXGBE_FDIRCMD 0x0EE2C
+
+/* Transmit DMA registers */
+#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/
+#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
+#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40))
+#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
+#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
+#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
+#define IXGBE_DTXCTL 0x07E00
+
+#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
+#define IXGBE_PFDTXGSWC 0x08220
+#define IXGBE_DTXMXSZRQ 0x08100
+#define IXGBE_DTXTCPFLGL 0x04A88
+#define IXGBE_DTXTCPFLGH 0x04A8C
+#define IXGBE_LBDRPEN 0x0CA00
+#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
+
+#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
+#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
+#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */
+#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */
+#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
+
+/* Anti-spoofing defines */
+#define IXGBE_SPOOF_MACAS_MASK 0xFF
+#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
+#define IXGBE_SPOOF_VLANAS_SHIFT 8
+#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000
+#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16
+#define IXGBE_PFVFSPOOF_REG_COUNT 8
+/* 16 of these (0-15) */
+#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4))
+/* Tx DCA Control register : 128 of these (0-127) */
+#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
+#define IXGBE_TIPG 0x0CB00
+#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_MNGTXMAP 0x0CD10
+#define IXGBE_TIPG_FIBER_DEFAULT 3
+#define IXGBE_TXPBSIZE_SHIFT 10
+
+/* Wake up registers */
+#define IXGBE_WUC 0x05800
+#define IXGBE_WUFC 0x05808
+#define IXGBE_WUS 0x05810
+#define IXGBE_IPAV 0x05838
+#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
+#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
+
+#define IXGBE_WUPL 0x05900
+#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_PROXYS 0x05F60 /* Proxying Status Register */
+#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */
+#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */
+
+/* masks for accessing VXLAN and GENEVE UDP ports */
+#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */
+#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */
+#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */
+#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16
+
+#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
+/* Ext Flexible Host Filter Table */
+#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100))
+#define IXGBE_FHFT_EXT_X550(_n) (0x09600 + ((_n) * 0x100))
+
+/* Four Flexible Filters are supported */
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
+/* Six Flexible Filters are supported */
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6
+/* Eight Flexible Filters are supported */
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8 8
+#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128
+#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
+#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
+#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
+
+/* Wake Up Filter Control */
+#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+#define IXGBE_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */
+#define IXGBE_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */
+#define IXGBE_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */
+/* Mask for Ext. flex filters */
+#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000
+#define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */
+#define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */
+#define IXGBE_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */
+#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+
+/* Wake Up Status */
+#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC
+#define IXGBE_WUS_MAG IXGBE_WUFC_MAG
+#define IXGBE_WUS_EX IXGBE_WUFC_EX
+#define IXGBE_WUS_MC IXGBE_WUFC_MC
+#define IXGBE_WUS_BC IXGBE_WUFC_BC
+#define IXGBE_WUS_ARP IXGBE_WUFC_ARP
+#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4
+#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6
+#define IXGBE_WUS_MNG IXGBE_WUFC_MNG
+#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0
+#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1
+#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2
+#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3
+#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4
+#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
+#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
+#define IXGBE_WUS_FW_RST_WK IXGBE_WUFC_FW_RST_WK
+/* Proxy Status */
+#define IXGBE_PROXYS_EX 0x00000004 /* Exact packet received */
+#define IXGBE_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */
+#define IXGBE_PROXYS_NS 0x00000200 /* IPV6 NS received */
+#define IXGBE_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */
+#define IXGBE_PROXYS_ARP 0x00000800 /* ARP request packet received */
+#define IXGBE_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */
+
+/* Proxying Filter Control */
+#define IXGBE_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */
+#define IXGBE_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */
+#define IXGBE_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */
+#define IXGBE_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define IXGBE_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */
+#define IXGBE_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */
+#define IXGBE_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */
+
+#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
+
+/* DCB registers */
+#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8
+#define IXGBE_RMCS 0x03D00
+#define IXGBE_DPMCS 0x07F40
+#define IXGBE_PDPMCS 0x0CD00
+#define IXGBE_RUPPBMR 0x050A0
+#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+
+/* Power Management */
+/* DMA Coalescing configuration */
+struct ixgbe_dmac_config {
+ u16 watchdog_timer; /* usec units */
+ bool fcoe_en;
+ u32 link_speed;
+ u8 fcoe_tc;
+ u8 num_tcs;
+};
+
+/*
+ * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed.
+ * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 ==
+ * 87500 bytes [85KB]
+ */
+#define IXGBE_DMACRXT_10G 0x55
+#define IXGBE_DMACRXT_1G 0x09
+#define IXGBE_DMACRXT_100M 0x01
+
+/* DMA Coalescing registers */
+#define IXGBE_DMCMNGTH 0x15F20 /* Management Threshold */
+#define IXGBE_DMACR 0x02400 /* Control register */
+#define IXGBE_DMCTH(_i) (0x03300 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_DMCTLX 0x02404 /* Time to Lx request */
+/* DMA Coalescing register fields */
+#define IXGBE_DMCMNGTH_DMCMNGTH_MASK 0x000FFFF0 /* Mng Threshold mask */
+#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT 4 /* Management Threshold shift */
+#define IXGBE_DMACR_DMACWT_MASK 0x0000FFFF /* Watchdog Timer mask */
+#define IXGBE_DMACR_HIGH_PRI_TC_MASK 0x00FF0000
+#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT 16
+#define IXGBE_DMACR_EN_MNG_IND 0x10000000 /* Enable Mng Indications */
+#define IXGBE_DMACR_LX_COAL_IND 0x40000000 /* Lx Coalescing indicate */
+#define IXGBE_DMACR_DMAC_EN 0x80000000 /* DMA Coalescing Enable */
+#define IXGBE_DMCTH_DMACRXT_MASK 0x000001FF /* Receive Threshold mask */
+#define IXGBE_DMCTLX_TTLX_MASK 0x00000FFF /* Time to Lx request mask */
+
+/* EEE registers */
+#define IXGBE_EEER 0x043A0 /* EEE register */
+#define IXGBE_EEE_STAT 0x04398 /* EEE Status */
+#define IXGBE_EEE_SU 0x04380 /* EEE Set up */
+#define IXGBE_EEE_SU_TEEE_DLY_SHIFT 26
+#define IXGBE_TLPIC 0x041F4 /* EEE Tx LPI count */
+#define IXGBE_RLPIC 0x041F8 /* EEE Rx LPI count */
+
+/* EEE register fields */
+#define IXGBE_EEER_TX_LPI_EN 0x00010000 /* Enable EEE LPI TX path */
+#define IXGBE_EEER_RX_LPI_EN 0x00020000 /* Enable EEE LPI RX path */
+#define IXGBE_EEE_STAT_NEG 0x20000000 /* EEE support neg on link */
+#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */
+#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */
+
+/* Security Control Registers */
+#define IXGBE_SECTXCTRL 0x08800
+#define IXGBE_SECTXSTAT 0x08804
+#define IXGBE_SECTXBUFFAF 0x08808
+#define IXGBE_SECTXMINIFG 0x08810
+#define IXGBE_SECRXCTRL 0x08D00
+#define IXGBE_SECRXSTAT 0x08D04
+
+/* Security Bit Fields and Masks */
+#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001
+#define IXGBE_SECTXCTRL_TX_DIS 0x00000002
+#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004
+
+#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001
+#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002
+
+#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001
+#define IXGBE_SECRXCTRL_RX_DIS 0x00000002
+
+#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001
+#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002
+
+/* LinkSec (MacSec) Registers */
+#define IXGBE_LSECTXCAP 0x08A00
+#define IXGBE_LSECRXCAP 0x08F00
+#define IXGBE_LSECTXCTRL 0x08A04
+#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */
+#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */
+#define IXGBE_LSECTXSA 0x08A10
+#define IXGBE_LSECTXPN0 0x08A14
+#define IXGBE_LSECTXPN1 0x08A18
+#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECRXCTRL 0x08F04
+#define IXGBE_LSECRXSCL 0x08F08
+#define IXGBE_LSECRXSCH 0x08F0C
+#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
+#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */
+#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */
+#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */
+#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */
+#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */
+#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */
+#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */
+#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */
+#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */
+#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */
+#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */
+#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */
+#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */
+#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */
+#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */
+#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
+#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
+#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */
+#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */
+
+/* LinkSec (MacSec) Bit Fields and Masks */
+#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000
+#define IXGBE_LSECTXCAP_SUM_SHIFT 16
+#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000
+#define IXGBE_LSECRXCAP_SUM_SHIFT 16
+
+#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003
+#define IXGBE_LSECTXCTRL_DISABLE 0x0
+#define IXGBE_LSECTXCTRL_AUTH 0x1
+#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2
+#define IXGBE_LSECTXCTRL_AISCI 0x00000020
+#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
+#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8
+
+#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C
+#define IXGBE_LSECRXCTRL_EN_SHIFT 2
+#define IXGBE_LSECRXCTRL_DISABLE 0x0
+#define IXGBE_LSECRXCTRL_CHECK 0x1
+#define IXGBE_LSECRXCTRL_STRICT 0x2
+#define IXGBE_LSECRXCTRL_DROP 0x3
+#define IXGBE_LSECRXCTRL_PLSH 0x00000040
+#define IXGBE_LSECRXCTRL_RP 0x00000080
+#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33
+
+/* IpSec Registers */
+#define IXGBE_IPSTXIDX 0x08900
+#define IXGBE_IPSTXSALT 0x08904
+#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXIDX 0x08E00
+#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSPI 0x08E14
+#define IXGBE_IPSRXIPIDX 0x08E18
+#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSALT 0x08E2C
+#define IXGBE_IPSRXMOD 0x08E30
+
+#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
+
+/* DCB registers */
+#define IXGBE_RTRPCS 0x02430
+#define IXGBE_RTTDCS 0x04900
+#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
+#define IXGBE_RTTPCS 0x0CD00
+#define IXGBE_RTRUP2TC 0x03020
+#define IXGBE_RTTUP2TC 0x0C800
+#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDQSEL 0x04904
+#define IXGBE_RTTDT1C 0x04908
+#define IXGBE_RTTDT1S 0x0490C
+#define IXGBE_RTTDTECC 0x04990
+#define IXGBE_RTTDTECC_NO_BCN 0x00000100
+
+#define IXGBE_RTTBCNRC 0x04984
+#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
+#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
+#define IXGBE_RTTBCNRC_RF_INT_MASK \
+ (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+#define IXGBE_RTTBCNRM 0x04980
+
+/* BCN (for DCB) Registers */
+#define IXGBE_RTTBCNRS 0x04988
+#define IXGBE_RTTBCNCR 0x08B00
+#define IXGBE_RTTBCNACH 0x08B04
+#define IXGBE_RTTBCNACL 0x08B08
+#define IXGBE_RTTBCNTG 0x04A90
+#define IXGBE_RTTBCNIDX 0x08B0C
+#define IXGBE_RTTBCNCP 0x08B10
+#define IXGBE_RTFRTIMER 0x08B14
+#define IXGBE_RTTBCNRTT 0x05150
+#define IXGBE_RTTBCNRD 0x0498C
+
+/* FCoE DMA Context Registers */
+/* FCoE Direct DMA Context */
+#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10))
+#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
+#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
+#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
+#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
+#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
+#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
+#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
+#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
+#define IXGBE_FCBUFF_OFFSET_SHIFT 16
+#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
+#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
+#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
+#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
+#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
+/* FCoE SOF/EOF */
+#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */
+#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */
+#define IXGBE_REOFF 0x05158 /* Rx FC EOF */
+#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */
+/* FCoE Filter Context Registers */
+#define IXGBE_FCD_ID 0x05114 /* FCoE D_ID */
+#define IXGBE_FCSMAC 0x0510C /* FCoE Source MAC */
+#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT 16
+/* FCoE Direct Filter Context */
+#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10))
+#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4))
+#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
+#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
+#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
+#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
+#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
+#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
+#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
+#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
+/* FCoE Receive Control */
+#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
+#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
+#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
+/* FCoE Redirection */
+#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */
+#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */
+#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
+#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */
+#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */
+#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */
+#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
+#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */
+/* Higher 7 bits for the queue index */
+#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000
+#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16
+
+/* Stats registers */
+#define IXGBE_CRCERRS 0x04000
+#define IXGBE_ILLERRC 0x04004
+#define IXGBE_ERRBC 0x04008
+#define IXGBE_MSPDC 0x04010
+#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
+#define IXGBE_MLFC 0x04034
+#define IXGBE_MRFC 0x04038
+#define IXGBE_RLEC 0x04040
+#define IXGBE_LXONTXC 0x03F60
+#define IXGBE_LXONRXC 0x0CF60
+#define IXGBE_LXOFFTXC 0x03F68
+#define IXGBE_LXOFFRXC 0x0CF68
+#define IXGBE_LXONRXCNT 0x041A4
+#define IXGBE_LXOFFRXCNT 0x041A8
+#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
+#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
+#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
+#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
+#define IXGBE_PRC64 0x0405C
+#define IXGBE_PRC127 0x04060
+#define IXGBE_PRC255 0x04064
+#define IXGBE_PRC511 0x04068
+#define IXGBE_PRC1023 0x0406C
+#define IXGBE_PRC1522 0x04070
+#define IXGBE_GPRC 0x04074
+#define IXGBE_BPRC 0x04078
+#define IXGBE_MPRC 0x0407C
+#define IXGBE_GPTC 0x04080
+#define IXGBE_GORCL 0x04088
+#define IXGBE_GORCH 0x0408C
+#define IXGBE_GOTCL 0x04090
+#define IXGBE_GOTCH 0x04094
+#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
+#define IXGBE_RUC 0x040A4
+#define IXGBE_RFC 0x040A8
+#define IXGBE_ROC 0x040AC
+#define IXGBE_RJC 0x040B0
+#define IXGBE_MNGPRC 0x040B4
+#define IXGBE_MNGPDC 0x040B8
+#define IXGBE_MNGPTC 0x0CF90
+#define IXGBE_TORL 0x040C0
+#define IXGBE_TORH 0x040C4
+#define IXGBE_TPR 0x040D0
+#define IXGBE_TPT 0x040D4
+#define IXGBE_PTC64 0x040D8
+#define IXGBE_PTC127 0x040DC
+#define IXGBE_PTC255 0x040E0
+#define IXGBE_PTC511 0x040E4
+#define IXGBE_PTC1023 0x040E8
+#define IXGBE_PTC1522 0x040EC
+#define IXGBE_MPTC 0x040F0
+#define IXGBE_BPTC 0x040F4
+#define IXGBE_XEC 0x04120
+#define IXGBE_SSVPC 0x08780
+
+#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
+#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
+ (0x08600 + ((_i) * 4)))
+#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
+
+#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_FCCRC 0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */
+#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */
+#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */
+#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */
+#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
+#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
+#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
+#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */
+#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */
+#define IXGBE_O2BGPTC 0x041C4
+#define IXGBE_O2BSPC 0x087B0
+#define IXGBE_B2OSPC 0x041C0
+#define IXGBE_B2OGPRC 0x02F90
+#define IXGBE_BUPRC 0x04180
+#define IXGBE_BMPRC 0x04184
+#define IXGBE_BBPRC 0x04188
+#define IXGBE_BUPTC 0x0418C
+#define IXGBE_BMPTC 0x04190
+#define IXGBE_BBPTC 0x04194
+#define IXGBE_BCRCERRS 0x04198
+#define IXGBE_BXONRXC 0x0419C
+#define IXGBE_BXOFFRXC 0x041E0
+#define IXGBE_BXONTXC 0x041E4
+#define IXGBE_BXOFFTXC 0x041E8
+
+/* Management */
+#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MANC 0x05820
+#define IXGBE_MFVAL 0x05824
+#define IXGBE_MANC2H 0x05860
+#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MIPAF 0x058B0
+#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
+#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_LSWFW 0x15F14
+#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
+#define IXGBE_BMCIPVAL 0x05060
+#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001
+#define IXGBE_BMCIP_IPADDR_VALID 0x00000002
+
+/* Management Bit Fields and Masks */
+#define IXGBE_MANC_MPROXYE 0x40000000 /* Management Proxy Enable */
+#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */
+#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */
+#define IXGBE_MANC_EN_BMC2OS_SHIFT 28
+
+/* Firmware Semaphore Register */
+#define IXGBE_FWSM_MODE_MASK 0xE
+#define IXGBE_FWSM_TS_ENABLED 0x1
+#define IXGBE_FWSM_FW_MODE_PT 0x4
+
+/* ARC Subsystem registers */
+#define IXGBE_HICR 0x15F00
+#define IXGBE_FWSTS 0x15F0C
+#define IXGBE_HSMC0R 0x15F04
+#define IXGBE_HSMC1R 0x15F08
+#define IXGBE_SWSR 0x15F10
+#define IXGBE_HFDR 0x15FE8
+#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */
+
+#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define IXGBE_HICR_C 0x02
+#define IXGBE_HICR_SV 0x04 /* Status Validity */
+#define IXGBE_HICR_FW_RESET_ENABLE 0x40
+#define IXGBE_HICR_FW_RESET 0x80
+
+/* PCI-E registers */
+#define IXGBE_GCR 0x11000
+#define IXGBE_GTV 0x11004
+#define IXGBE_FUNCTAG 0x11008
+#define IXGBE_GLT 0x1100C
+#define IXGBE_PCIEPIPEADR 0x11004
+#define IXGBE_PCIEPIPEDAT 0x11008
+#define IXGBE_GSCL_1 0x11010
+#define IXGBE_GSCL_2 0x11014
+#define IXGBE_GSCL_1_X540 IXGBE_GSCL_1
+#define IXGBE_GSCL_2_X540 IXGBE_GSCL_2
+#define IXGBE_GSCL_3 0x11018
+#define IXGBE_GSCL_4 0x1101C
+#define IXGBE_GSCN_0 0x11020
+#define IXGBE_GSCN_1 0x11024
+#define IXGBE_GSCN_2 0x11028
+#define IXGBE_GSCN_3 0x1102C
+#define IXGBE_GSCN_0_X540 IXGBE_GSCN_0
+#define IXGBE_GSCN_1_X540 IXGBE_GSCN_1
+#define IXGBE_GSCN_2_X540 IXGBE_GSCN_2
+#define IXGBE_GSCN_3_X540 IXGBE_GSCN_3
+#define IXGBE_FACTPS 0x10150
+#define IXGBE_FACTPS_X540 IXGBE_FACTPS
+#define IXGBE_GSCL_1_X550 0x11800
+#define IXGBE_GSCL_2_X550 0x11804
+#define IXGBE_GSCL_1_X550EM_x IXGBE_GSCL_1_X550
+#define IXGBE_GSCL_2_X550EM_x IXGBE_GSCL_2_X550
+#define IXGBE_GSCN_0_X550 0x11820
+#define IXGBE_GSCN_1_X550 0x11824
+#define IXGBE_GSCN_2_X550 0x11828
+#define IXGBE_GSCN_3_X550 0x1182C
+#define IXGBE_GSCN_0_X550EM_x IXGBE_GSCN_0_X550
+#define IXGBE_GSCN_1_X550EM_x IXGBE_GSCN_1_X550
+#define IXGBE_GSCN_2_X550EM_x IXGBE_GSCN_2_X550
+#define IXGBE_GSCN_3_X550EM_x IXGBE_GSCN_3_X550
+#define IXGBE_FACTPS_X550 IXGBE_FACTPS
+#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS
+#define IXGBE_GSCL_1_X550EM_a IXGBE_GSCL_1_X550
+#define IXGBE_GSCL_2_X550EM_a IXGBE_GSCL_2_X550
+#define IXGBE_GSCN_0_X550EM_a IXGBE_GSCN_0_X550
+#define IXGBE_GSCN_1_X550EM_a IXGBE_GSCN_1_X550
+#define IXGBE_GSCN_2_X550EM_a IXGBE_GSCN_2_X550
+#define IXGBE_GSCN_3_X550EM_a IXGBE_GSCN_3_X550
+#define IXGBE_FACTPS_X550EM_a 0x15FEC
+#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FACTPS)
+
+#define IXGBE_PCIEANACTL 0x11040
+#define IXGBE_SWSM 0x10140
+#define IXGBE_SWSM_X540 IXGBE_SWSM
+#define IXGBE_SWSM_X550 IXGBE_SWSM
+#define IXGBE_SWSM_X550EM_x IXGBE_SWSM
+#define IXGBE_SWSM_X550EM_a 0x15F70
+#define IXGBE_SWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWSM)
+
+#define IXGBE_FWSM 0x10148
+#define IXGBE_FWSM_X540 IXGBE_FWSM
+#define IXGBE_FWSM_X550 IXGBE_FWSM
+#define IXGBE_FWSM_X550EM_x IXGBE_FWSM
+#define IXGBE_FWSM_X550EM_a 0x15F74
+#define IXGBE_FWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FWSM)
+
+#define IXGBE_SWFW_SYNC IXGBE_GSSR
+#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC
+#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC
+#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC
+#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78
+#define IXGBE_SWFW_SYNC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC)
+
+#define IXGBE_GSSR 0x10160
+#define IXGBE_MREVID 0x11064
+#define IXGBE_DCA_ID 0x11070
+#define IXGBE_DCA_CTRL 0x11074
+
+/* PCI-E registers 82599-Specific */
+#define IXGBE_GCR_EXT 0x11050
+#define IXGBE_GSCL_5_82599 0x11030
+#define IXGBE_GSCL_6_82599 0x11034
+#define IXGBE_GSCL_7_82599 0x11038
+#define IXGBE_GSCL_8_82599 0x1103C
+#define IXGBE_GSCL_5_X540 IXGBE_GSCL_5_82599
+#define IXGBE_GSCL_6_X540 IXGBE_GSCL_6_82599
+#define IXGBE_GSCL_7_X540 IXGBE_GSCL_7_82599
+#define IXGBE_GSCL_8_X540 IXGBE_GSCL_8_82599
+#define IXGBE_PHYADR_82599 0x11040
+#define IXGBE_PHYDAT_82599 0x11044
+#define IXGBE_PHYCTL_82599 0x11048
+#define IXGBE_PBACLR_82599 0x11068
+#define IXGBE_CIAA 0x11088
+#define IXGBE_CIAD 0x1108C
+#define IXGBE_CIAA_82599 IXGBE_CIAA
+#define IXGBE_CIAD_82599 IXGBE_CIAD
+#define IXGBE_CIAA_X540 IXGBE_CIAA
+#define IXGBE_CIAD_X540 IXGBE_CIAD
+#define IXGBE_GSCL_5_X550 0x11810
+#define IXGBE_GSCL_6_X550 0x11814
+#define IXGBE_GSCL_7_X550 0x11818
+#define IXGBE_GSCL_8_X550 0x1181C
+#define IXGBE_GSCL_5_X550EM_x IXGBE_GSCL_5_X550
+#define IXGBE_GSCL_6_X550EM_x IXGBE_GSCL_6_X550
+#define IXGBE_GSCL_7_X550EM_x IXGBE_GSCL_7_X550
+#define IXGBE_GSCL_8_X550EM_x IXGBE_GSCL_8_X550
+#define IXGBE_CIAA_X550 0x11508
+#define IXGBE_CIAD_X550 0x11510
+#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550
+#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550
+#define IXGBE_GSCL_5_X550EM_a IXGBE_GSCL_5_X550
+#define IXGBE_GSCL_6_X550EM_a IXGBE_GSCL_6_X550
+#define IXGBE_GSCL_7_X550EM_a IXGBE_GSCL_7_X550
+#define IXGBE_GSCL_8_X550EM_a IXGBE_GSCL_8_X550
+#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550
+#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550
+#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA)
+#define IXGBE_CIAD_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAD)
+#define IXGBE_PICAUSE 0x110B0
+#define IXGBE_PIENA 0x110B8
+#define IXGBE_CDQ_MBR_82599 0x110B4
+#define IXGBE_PCIESPARE 0x110BC
+#define IXGBE_MISC_REG_82599 0x110F0
+#define IXGBE_ECC_CTRL_0_82599 0x11100
+#define IXGBE_ECC_CTRL_1_82599 0x11104
+#define IXGBE_ECC_STATUS_82599 0x110E0
+#define IXGBE_BAR_CTRL_82599 0x110F4
+
+/* PCI Express Control */
+#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000
+#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define IXGBE_GCR_CAP_VER2 0x00040000
+
+#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
+#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000
+#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
+ IXGBE_GCR_EXT_VT_MODE_64)
+#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003
+/* Time Sync Registers */
+#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
+#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
+#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */
+#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */
+#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */
+#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */
+#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */
+#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */
+#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */
+#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */
+#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */
+#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */
+#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */
+#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */
+#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */
+#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */
+#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */
+#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
+#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
+#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
+#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */
+#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */
+#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
+#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
+#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
+#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
+#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
+#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
+#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */
+#define IXGBE_TSICR 0x08C60 /* TimeSync Interrupt Cause Register - WO */
+#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */
+
+/* Diagnostic Registers */
+#define IXGBE_RDSTATCTL 0x02C20
+#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
+#define IXGBE_RDHMPN 0x02F08
+#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
+#define IXGBE_RDPROBE 0x02F20
+#define IXGBE_RDMAM 0x02F30
+#define IXGBE_RDMAD 0x02F34
+#define IXGBE_TDHMPN 0x07F08
+#define IXGBE_TDHMPN2 0x082FC
+#define IXGBE_TXDESCIC 0x082CC
+#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
+#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
+#define IXGBE_TDPROBE 0x07F20
+#define IXGBE_TXBUFCTRL 0x0C600
+#define IXGBE_TXBUFDATA0 0x0C610
+#define IXGBE_TXBUFDATA1 0x0C614
+#define IXGBE_TXBUFDATA2 0x0C618
+#define IXGBE_TXBUFDATA3 0x0C61C
+#define IXGBE_RXBUFCTRL 0x03600
+#define IXGBE_RXBUFDATA0 0x03610
+#define IXGBE_RXBUFDATA1 0x03614
+#define IXGBE_RXBUFDATA2 0x03618
+#define IXGBE_RXBUFDATA3 0x0361C
+#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_RFVAL 0x050A4
+#define IXGBE_MDFTC1 0x042B8
+#define IXGBE_MDFTC2 0x042C0
+#define IXGBE_MDFTFIFO1 0x042C4
+#define IXGBE_MDFTFIFO2 0x042C8
+#define IXGBE_MDFTS 0x042CC
+#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
+#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
+#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
+#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
+#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
+#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
+#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
+#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
+#define IXGBE_PCIEECCCTL 0x1106C
+#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
+#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
+#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
+#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
+#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
+#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
+#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
+#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
+#define IXGBE_PCIEECCCTL0 0x11100
+#define IXGBE_PCIEECCCTL1 0x11104
+#define IXGBE_RXDBUECC 0x03F70
+#define IXGBE_TXDBUECC 0x0CF70
+#define IXGBE_RXDBUEST 0x03F74
+#define IXGBE_TXDBUEST 0x0CF74
+#define IXGBE_PBTXECC 0x0C300
+#define IXGBE_PBRXECC 0x03300
+#define IXGBE_GHECCR 0x110B0
+
+/* MAC Registers */
+#define IXGBE_PCS1GCFIG 0x04200
+#define IXGBE_PCS1GLCTL 0x04208
+#define IXGBE_PCS1GLSTA 0x0420C
+#define IXGBE_PCS1GDBG0 0x04210
+#define IXGBE_PCS1GDBG1 0x04214
+#define IXGBE_PCS1GANA 0x04218
+#define IXGBE_PCS1GANLP 0x0421C
+#define IXGBE_PCS1GANNP 0x04220
+#define IXGBE_PCS1GANLPNP 0x04224
+#define IXGBE_HLREG0 0x04240
+#define IXGBE_HLREG1 0x04244
+#define IXGBE_PAP 0x04248
+#define IXGBE_MACA 0x0424C
+#define IXGBE_APAE 0x04250
+#define IXGBE_ARD 0x04254
+#define IXGBE_AIS 0x04258
+#define IXGBE_MSCA 0x0425C
+#define IXGBE_MSRWD 0x04260
+#define IXGBE_MLADD 0x04264
+#define IXGBE_MHADD 0x04268
+#define IXGBE_MAXFRS 0x04268
+#define IXGBE_TREG 0x0426C
+#define IXGBE_PCSS1 0x04288
+#define IXGBE_PCSS2 0x0428C
+#define IXGBE_XPCSS 0x04290
+#define IXGBE_MFLCN 0x04294
+#define IXGBE_SERDESC 0x04298
+#define IXGBE_MAC_SGMII_BUSY 0x04298
+#define IXGBE_MACS 0x0429C
+#define IXGBE_AUTOC 0x042A0
+#define IXGBE_LINKS 0x042A4
+#define IXGBE_LINKS2 0x04324
+#define IXGBE_AUTOC2 0x042A8
+#define IXGBE_AUTOC3 0x042AC
+#define IXGBE_ANLP1 0x042B0
+#define IXGBE_ANLP2 0x042B4
+#define IXGBE_MACC 0x04330
+#define IXGBE_ATLASCTL 0x04800
+#define IXGBE_MMNGC 0x042D0
+#define IXGBE_ANLPNP1 0x042D4
+#define IXGBE_ANLPNP2 0x042D8
+#define IXGBE_KRPCSFC 0x042E0
+#define IXGBE_KRPCSS 0x042E4
+#define IXGBE_FECS1 0x042E8
+#define IXGBE_FECS2 0x042EC
+#define IXGBE_SMADARCTL 0x14F10
+#define IXGBE_MPVC 0x04318
+#define IXGBE_SGMIIC 0x04314
+
+/* Statistics Registers */
+#define IXGBE_RXNFGPC 0x041B0
+#define IXGBE_RXNFGBCL 0x041B4
+#define IXGBE_RXNFGBCH 0x041B8
+#define IXGBE_RXDGPC 0x02F50
+#define IXGBE_RXDGBCL 0x02F54
+#define IXGBE_RXDGBCH 0x02F58
+#define IXGBE_RXDDGPC 0x02F5C
+#define IXGBE_RXDDGBCL 0x02F60
+#define IXGBE_RXDDGBCH 0x02F64
+#define IXGBE_RXLPBKGPC 0x02F68
+#define IXGBE_RXLPBKGBCL 0x02F6C
+#define IXGBE_RXLPBKGBCH 0x02F70
+#define IXGBE_RXDLPBKGPC 0x02F74
+#define IXGBE_RXDLPBKGBCL 0x02F78
+#define IXGBE_RXDLPBKGBCH 0x02F7C
+#define IXGBE_TXDGPC 0x087A0
+#define IXGBE_TXDGBCL 0x087A4
+#define IXGBE_TXDGBCH 0x087A8
+
+#define IXGBE_RXDSTATCTRL 0x02F40
+
+/* Copper Pond 2 link timeout */
+#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
+
+/* Omer CORECTL */
+#define IXGBE_CORECTL 0x014F00
+/* BARCTRL */
+#define IXGBE_BARCTRL 0x110F4
+#define IXGBE_BARCTRL_FLSIZE 0x0700
+#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
+#define IXGBE_BARCTRL_CSRSIZE 0x2000
+
+/* RSCCTL Bit Masks */
+#define IXGBE_RSCCTL_RSCEN 0x01
+#define IXGBE_RSCCTL_MAXDESC_1 0x00
+#define IXGBE_RSCCTL_MAXDESC_4 0x04
+#define IXGBE_RSCCTL_MAXDESC_8 0x08
+#define IXGBE_RSCCTL_MAXDESC_16 0x0C
+#define IXGBE_RSCCTL_TS_DIS 0x02
+
+/* RSCDBU Bit Masks */
+#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
+#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
+
+/* RDRXCTL Bit Masks */
+#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */
+#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */
+#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad Small Packet */
+#define IXGBE_RDRXCTL_MVMEN 0x00000020
+#define IXGBE_RDRXCTL_RSC_PUSH_DIS 0x00000020
+#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
+#define IXGBE_RDRXCTL_RSC_PUSH 0x00000080
+#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
+#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
+#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/
+#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */
+#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */
+#define IXGBE_RDRXCTL_MBINTEN 0x10000000
+#define IXGBE_RDRXCTL_MDP_EN 0x20000000
+
+/* RQTC Bit Masks and Shifts */
+#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
+#define IXGBE_RQTC_TC0_MASK (0x7 << 0)
+#define IXGBE_RQTC_TC1_MASK (0x7 << 4)
+#define IXGBE_RQTC_TC2_MASK (0x7 << 8)
+#define IXGBE_RQTC_TC3_MASK (0x7 << 12)
+#define IXGBE_RQTC_TC4_MASK (0x7 << 16)
+#define IXGBE_RQTC_TC5_MASK (0x7 << 20)
+#define IXGBE_RQTC_TC6_MASK (0x7 << 24)
+#define IXGBE_RQTC_TC7_MASK (0x7 << 28)
+
+/* PSRTYPE.RQPL Bit masks and shift */
+#define IXGBE_PSRTYPE_RQPL_MASK 0x7
+#define IXGBE_PSRTYPE_RQPL_SHIFT 29
+
+/* CTRL Bit Masks */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
+
+/* FACTPS */
+#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */
+#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
+
+/* MHADD Bit Masks */
+#define IXGBE_MHADD_MFS_MASK 0xFFFF0000
+#define IXGBE_MHADD_MFS_SHIFT 16
+
+/* Extended Device Control */
+#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */
+#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */
+#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+
+/* Direct Cache Access (DCA) definitions */
+#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
+#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
+
+/* MSCA Bit Masks */
+#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Addr (new prot) */
+#define IXGBE_MSCA_NP_ADDR_SHIFT 0
+#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Dev Type (new prot) */
+#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old prot */
+#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */
+#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/
+#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */
+#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
+#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
+#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (wr) */
+#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (rd) */
+#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (rd auto inc)*/
+#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
+#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
+#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new prot) */
+#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old prot) */
+#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */
+#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress ena */
+
+/* MSRWD bit masks */
+#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
+#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
+#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
+#define IXGBE_MSRWD_READ_DATA_SHIFT 16
+
+/* Atlas registers */
+#define IXGBE_ATLAS_PDN_LPBK 0x24
+#define IXGBE_ATLAS_PDN_10G 0xB
+#define IXGBE_ATLAS_PDN_1G 0xC
+#define IXGBE_ATLAS_PDN_AN 0xD
+
+/* Atlas bit masks */
+#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000
+#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10
+#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0
+#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
+#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
+
+/* Omer bit masks */
+#define IXGBE_CORECTL_WRITE_CMD 0x00010000
+
+/* Device Type definitions for new protocol MDIO commands */
+#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0
+#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
+#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
+#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
+#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
+#define IXGBE_TWINAX_DEV 1
+
+#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
+
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
+
+#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
+#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */
+#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */
+#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
+#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
+#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT 0x8 /* AUTO NEG EEE 10GBaseT Advt */
+#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4 /* AUTO NEG EEE 1000BaseT Advt */
+#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT 0x2 /* AUTO NEG EEE 100BaseT Advt */
+#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
+#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
+#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
+#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
+#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
+#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
+#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
+#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */
+#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */
+#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
+#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
+#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
+#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
+#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */
+#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT Cap */
+#define IXGBE_AUTO_NEG_LP_10GBASE_CAP 0x0800 /* AUTO NEG Rx LP 10GBaseT Cap */
+#define IXGBE_AUTO_NEG_10GBASET_STAT 0x0021 /* AUTO NEG 10G BaseT Stat */
+
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
+#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
+#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */
+#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */
+#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */
+#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */
+#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */
+#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */
+#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */
+#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */
+#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */
+#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */
+#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */
+#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /* int dev fault enable */
+#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
+#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */
+#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */
+#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */
+#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */
+
+#define IXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */
+#define IXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */
+#define IXGBE_PCRC8ECH_MASK 0x1F
+#define IXGBE_LDPCECL 0x0E820 /* PCR Uncorrected Error Count Lo */
+#define IXGBE_LDPCECH 0x0E821 /* PCR Uncorrected Error Count Hi */
+
+/* MII clause 22/28 definitions */
+#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
+
+#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register*/
+#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */
+
+#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */
+
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */
+#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */
+
+#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
+#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */
+#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400
+#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800
+#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
+#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */
+#define IXGBE_MII_RESTART 0x200
+#define IXGBE_MII_AUTONEG_COMPLETE 0x20
+#define IXGBE_MII_AUTONEG_LINK_UP 0x04
+#define IXGBE_MII_AUTONEG_REG 0x0
+
+#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
+#define IXGBE_MAX_PHY_ADDR 32
+
+/* PHY IDs*/
+#define TN1010_PHY_ID 0x00A19410
+#define TNX_FW_REV 0xB
+#define X540_PHY_ID 0x01540200
+#define X550_PHY_ID2 0x01540223
+#define X550_PHY_ID3 0x01540221
+#define X557_PHY_ID 0x01540240
+#define X557_PHY_ID2 0x01540250
+#define AQ_FW_REV 0x20
+#define QT2022_PHY_ID 0x0043A400
+#define ATH_PHY_ID 0x03429050
+
+/* PHY Types */
+#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0
+#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0
+
+/* Special PHY Init Routine */
+#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
+#define IXGBE_PHY_INIT_END_NL 0xFFFF
+#define IXGBE_CONTROL_MASK_NL 0xF000
+#define IXGBE_DATA_MASK_NL 0x0FFF
+#define IXGBE_CONTROL_SHIFT_NL 12
+#define IXGBE_DELAY_NL 0
+#define IXGBE_DATA_NL 1
+#define IXGBE_CONTROL_NL 0x000F
+#define IXGBE_CONTROL_EOL_NL 0x0FFF
+#define IXGBE_CONTROL_SOL_NL 0x0000
+
+/* General purpose Interrupt Enable */
+#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
+#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
+#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */
+#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */
+#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */
+#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN)
+#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN)
+#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN)
+
+#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
+#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
+#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
+#define IXGBE_GPIE_EIAME 0x40000000
+#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
+#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
+#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
+#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
+#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
+#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */
+
+/* Packet Buffer Initialization */
+#define IXGBE_MAX_PACKET_BUFFERS 8
+
+#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */
+#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
+#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
+#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */
+#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */
+
+#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
+#define IXGBE_MAX_PB 8
+
+/* Packet buffer allocation strategies */
+enum {
+ PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL
+ PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED
+};
+
+/* Transmit Flow Control status */
+#define IXGBE_TFCS_TXOFF 0x00000001
+#define IXGBE_TFCS_TXOFF0 0x00000100
+#define IXGBE_TFCS_TXOFF1 0x00000200
+#define IXGBE_TFCS_TXOFF2 0x00000400
+#define IXGBE_TFCS_TXOFF3 0x00000800
+#define IXGBE_TFCS_TXOFF4 0x00001000
+#define IXGBE_TFCS_TXOFF5 0x00002000
+#define IXGBE_TFCS_TXOFF6 0x00004000
+#define IXGBE_TFCS_TXOFF7 0x00008000
+
+/* TCP Timer */
+#define IXGBE_TCPTIMER_KS 0x00000100
+#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200
+#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400
+#define IXGBE_TCPTIMER_LOOP 0x00000800
+#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF
+
+/* HLREG0 Bit Masks */
+#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */
+#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */
+#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */
+#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */
+#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */
+#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */
+#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */
+#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */
+#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */
+#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */
+#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */
+#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */
+#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */
+#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */
+#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */
+
+/* VMD_CTL bitmasks */
+#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001
+#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
+
+/* VT_CTL bitmasks */
+#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */
+#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */
+#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */
+#define IXGBE_VT_CTL_POOL_SHIFT 7
+#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
+
+/* VMOLR bitmasks */
+#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */
+#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */
+#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
+#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */
+#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */
+#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */
+#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */
+
+/* VFRE bitmask */
+#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
+
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* RDHMPN and TDHMPN bitmasks */
+#define IXGBE_RDHMPN_RDICADDR 0x007FF800
+#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
+#define IXGBE_RDHMPN_RDICADDR_SHIFT 11
+#define IXGBE_TDHMPN_TDICADDR 0x003FF800
+#define IXGBE_TDHMPN_TDICRDREQ 0x00800000
+#define IXGBE_TDHMPN_TDICADDR_SHIFT 11
+
+#define IXGBE_RDMAM_MEM_SEL_SHIFT 13
+#define IXGBE_RDMAM_DWORD_SHIFT 9
+#define IXGBE_RDMAM_DESC_COMP_FIFO 1
+#define IXGBE_RDMAM_DFC_CMD_FIFO 2
+#define IXGBE_RDMAM_RSC_HEADER_ADDR 3
+#define IXGBE_RDMAM_TCN_STATUS_RAM 4
+#define IXGBE_RDMAM_WB_COLL_FIFO 5
+#define IXGBE_RDMAM_QSC_CNT_RAM 6
+#define IXGBE_RDMAM_QSC_FCOE_RAM 7
+#define IXGBE_RDMAM_QSC_QUEUE_CNT 8
+#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA
+#define IXGBE_RDMAM_QSC_RSC_RAM 0xB
+#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135
+#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4
+#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48
+#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4
+#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256
+#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9
+#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8
+#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4
+#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64
+#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4
+#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512
+#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8
+#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32
+#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8
+
+#define IXGBE_TXDESCIC_READY 0x80000000
+
+/* Receive Checksum Control */
+#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* FCRTL Bit Masks */
+#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */
+#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */
+
+/* PAP bit masks*/
+#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
+
+/* RMCS Bit Masks */
+#define IXGBE_RMCS_RRM 0x00000002 /* Rx Recycle Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RMCS_RAC 0x00000004
+/* Deficit Fixed Prio ena */
+#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC
+#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */
+#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */
+#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
+
+/* FCCFG Bit Masks */
+#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */
+#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */
+#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */
+#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
+#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
+#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
+#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
+#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
+#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
+#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0)
+#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1)
+#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2)
+
+#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
+#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
+#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
+#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */
+#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
+#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
+#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */
+#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
+#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
+#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
+#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
+#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
+#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
+#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_LSC | \
+ IXGBE_EIMS_TCP_TIMER | \
+ IXGBE_EIMS_OTHER)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
+#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
+#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */
+#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */
+#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass chk of ctrl bits */
+#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */
+#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */
+#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */
+#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */
+#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */
+
+#define IXGBE_MAX_FTQF_FILTERS 128
+#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003
+#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000
+#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001
+#define IXGBE_FTQF_PROTOCOL_SCTP 2
+#define IXGBE_FTQF_PRIORITY_MASK 0x00000007
+#define IXGBE_FTQF_PRIORITY_SHIFT 2
+#define IXGBE_FTQF_POOL_MASK 0x0000003F
+#define IXGBE_FTQF_POOL_SHIFT 8
+#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F
+#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25
+#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E
+#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D
+#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B
+#define IXGBE_FTQF_DEST_PORT_MASK 0x17
+#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F
+#define IXGBE_FTQF_POOL_MASK_EN 0x40000000
+#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000
+
+/* Interrupt clear mask */
+#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_REG_NUM 25
+#define IXGBE_IVAR_REG_NUM_82599 64
+#define IXGBE_IVAR_TXRX_ENTRY 96
+#define IXGBE_IVAR_RX_ENTRY 64
+#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i))
+#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i))
+#define IXGBE_IVAR_TX_ENTRY 32
+
+#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */
+#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */
+
+#define IXGBE_MSIX_VECTOR(_i) (0 + (_i))
+
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+/* ETYPE Queue Filter/Select Bit Masks */
+#define IXGBE_MAX_ETQF_FILTERS 8
+#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
+#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
+#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */
+#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
+#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
+#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_SHIFT 20
+
+#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
+#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
+#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */
+#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */
+
+/*
+ * ETQF filter list: one static filter per filter consumer. This is
+ * to avoid filter collisions later. Add new filters
+ * here!!
+ *
+ * Current filters:
+ * EAPOL 802.1x (0x888e): Filter 0
+ * FCoE (0x8906): Filter 2
+ * 1588 (0x88f7): Filter 3
+ * FIP (0x8914): Filter 4
+ * LLDP (0x88CC): Filter 5
+ * LACP (0x8809): Filter 6
+ * FC (0x8808): Filter 7
+ */
+#define IXGBE_ETQF_FILTER_EAPOL 0
+#define IXGBE_ETQF_FILTER_FCOE 2
+#define IXGBE_ETQF_FILTER_1588 3
+#define IXGBE_ETQF_FILTER_FIP 4
+#define IXGBE_ETQF_FILTER_LLDP 5
+#define IXGBE_ETQF_FILTER_LACP 6
+#define IXGBE_ETQF_FILTER_FC 7
+/* VLAN Control Bit Masks */
+#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
+#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
+#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */
+#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
+#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
+
+/* VLAN pool filtering masks */
+#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
+#define IXGBE_VLVF_ENTRIES 64
+#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
+/* Per VF Port VLAN insertion rules */
+#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
+#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
+
+/* STATUS Bit Masks */
+#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
+#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */
+
+#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
+#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
+
+/* ESDP Bit Masks */
+#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
+#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
+#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
+#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
+#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
+#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
+#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */
+#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
+#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */
+#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */
+#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
+#define IXGBE_ESDP_SDP6_DIR 0x00004000 /* SDP6 IO direction */
+#define IXGBE_ESDP_SDP7_DIR 0x00008000 /* SDP7 IO direction */
+#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */
+#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
+
+
+/* LEDCTL Bit Masks */
+#define IXGBE_LED_IVRT_BASE 0x00000040
+#define IXGBE_LED_BLINK_BASE 0x00000080
+#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
+#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
+#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
+#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
+#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
+#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+#define IXGBE_X557_LED_MANUAL_SET_MASK (1 << 8)
+#define IXGBE_X557_MAX_LED_INDEX 3
+#define IXGBE_X557_LED_PROVISIONING 0xC430
+
+/* LED modes */
+#define IXGBE_LED_LINK_UP 0x0
+#define IXGBE_LED_LINK_10G 0x1
+#define IXGBE_LED_MAC 0x2
+#define IXGBE_LED_FILTER 0x3
+#define IXGBE_LED_LINK_ACTIVE 0x4
+#define IXGBE_LED_LINK_1G 0x5
+#define IXGBE_LED_ON 0xE
+#define IXGBE_LED_OFF 0xF
+
+/* AUTOC Bit Masks */
+#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
+#define IXGBE_AUTOC_KX4_SUPP 0x80000000
+#define IXGBE_AUTOC_KX_SUPP 0x40000000
+#define IXGBE_AUTOC_PAUSE 0x30000000
+#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
+#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
+#define IXGBE_AUTOC_RF 0x08000000
+#define IXGBE_AUTOC_PD_TMR 0x06000000
+#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
+#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
+#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
+#define IXGBE_AUTOC_FECA 0x00040000
+#define IXGBE_AUTOC_FECR 0x00020000
+#define IXGBE_AUTOC_KR_SUPP 0x00010000
+#define IXGBE_AUTOC_AN_RESTART 0x00001000
+#define IXGBE_AUTOC_FLU 0x00000001
+#define IXGBE_AUTOC_LMS_SHIFT 13
+#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200
+#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
+#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
+#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
+#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
+#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000
+#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
+
+#define IXGBE_MACC_FLU 0x00000001
+#define IXGBE_MACC_FSV_10G 0x00030000
+#define IXGBE_MACC_FS 0x00040000
+#define IXGBE_MAC_RX2TX_LPBK 0x00000002
+
+/* Veto Bit definiton */
+#define IXGBE_MMNGC_MNG_VETO 0x00000001
+
+/* LINKS Bit Masks */
+#define IXGBE_LINKS_KX_AN_COMP 0x80000000
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED 0x20000000
+#define IXGBE_LINKS_MODE 0x18000000
+#define IXGBE_LINKS_RX_MODE 0x06000000
+#define IXGBE_LINKS_TX_MODE 0x01800000
+#define IXGBE_LINKS_XGXS_EN 0x00400000
+#define IXGBE_LINKS_SGMII_EN 0x02000000
+#define IXGBE_LINKS_PCS_1G_EN 0x00200000
+#define IXGBE_LINKS_1G_AN_EN 0x00100000
+#define IXGBE_LINKS_KX_AN_IDLE 0x00080000
+#define IXGBE_LINKS_1G_SYNC 0x00040000
+#define IXGBE_LINKS_10G_ALIGN 0x00020000
+#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000
+#define IXGBE_LINKS_TL_FAULT 0x00001000
+#define IXGBE_LINKS_SIGNAL 0x00000F00
+
+#define IXGBE_LINKS_SPEED_NON_STD 0x08000000
+#define IXGBE_LINKS_SPEED_82599 0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
+#define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_LINKS_SPEED_10_X550EM_A 0x00000000
+#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
+#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+
+#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
+
+/* PCS1GLSTA Bit Masks */
+#define IXGBE_PCS1GLSTA_LINK_OK 1
+#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
+#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000
+#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000
+#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000
+#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
+#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000
+
+#define IXGBE_PCS1GANA_SYM_PAUSE 0x80
+#define IXGBE_PCS1GANA_ASM_PAUSE 0x100
+
+/* PCS1GLCTL Bit Masks */
+#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
+#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1
+#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20
+#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40
+#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
+#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
+
+/* ANLP1 Bit Masks */
+#define IXGBE_ANLP1_PAUSE 0x0C00
+#define IXGBE_ANLP1_SYM_PAUSE 0x0400
+#define IXGBE_ANLP1_ASM_PAUSE 0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
+
+/* SW Semaphore Register bitmasks */
+#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
+
+/* SW_FW_SYNC/GSSR definitions */
+#define IXGBE_GSSR_EEP_SM 0x0001
+#define IXGBE_GSSR_PHY0_SM 0x0002
+#define IXGBE_GSSR_PHY1_SM 0x0004
+#define IXGBE_GSSR_MAC_CSR_SM 0x0008
+#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200
+#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */
+#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */
+#define IXGBE_GSSR_I2C_MASK 0x1800
+#define IXGBE_GSSR_NVM_PHY_MASK 0xF
+
+/* FW Status register bitmask */
+#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
+
+/* EEC Register */
+#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */
+#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */
+#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */
+#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */
+#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */
+#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */
+#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */
+#define IXGBE_EEC_FWE_SHIFT 4
+#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */
+#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
+#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
+#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
+#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
+#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
+/* EEPROM Addressing bits based on type (0-small, 1-large) */
+#define IXGBE_EEC_ADDR_SIZE 0x00000400
+#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
+#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
+
+#define IXGBE_EEC_SIZE_SHIFT 11
+#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
+#define IXGBE_EEPROM_OPCODE_BITS 8
+
+/* FLA Register */
+#define IXGBE_FLA_LOCKED 0x00000040
+
+/* Part Number String Length */
+#define IXGBE_PBANUM_LENGTH 11
+
+/* Checksum and EEPROM pointers */
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
+#define IXGBE_EEPROM_CHECKSUM 0x3F
+#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_EEPROM_CTRL_4 0x45
+#define IXGBE_EE_CTRL_4_INST_ID 0x10
+#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4
+#define IXGBE_PCIE_ANALOG_PTR 0x03
+#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR 0x04
+#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR 0x05
+#define IXGBE_PCIE_GENERAL_PTR 0x06
+#define IXGBE_PCIE_CONFIG0_PTR 0x07
+#define IXGBE_PCIE_CONFIG1_PTR 0x08
+#define IXGBE_CORE0_PTR 0x09
+#define IXGBE_CORE1_PTR 0x0A
+#define IXGBE_MAC0_PTR 0x0B
+#define IXGBE_MAC1_PTR 0x0C
+#define IXGBE_CSR0_CONFIG_PTR 0x0D
+#define IXGBE_CSR1_CONFIG_PTR 0x0E
+#define IXGBE_PCIE_ANALOG_PTR_X550 0x02
+#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000
+#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24
+#define IXGBE_PCIE_CONFIG_SIZE 0x08
+#define IXGBE_EEPROM_LAST_WORD 0x41
+#define IXGBE_FW_PTR 0x0F
+#define IXGBE_PBANUM0_PTR 0x15
+#define IXGBE_PBANUM1_PTR 0x16
+#define IXGBE_ALT_MAC_ADDR_PTR 0x37
+#define IXGBE_FREE_SPACE_PTR 0X3E
+
+/* External Thermal Sensor Config */
+#define IXGBE_ETS_CFG 0x26
+#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0
+#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6
+#define IXGBE_ETS_TYPE_MASK 0x0038
+#define IXGBE_ETS_TYPE_SHIFT 3
+#define IXGBE_ETS_TYPE_EMC 0x000
+#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007
+#define IXGBE_ETS_DATA_LOC_MASK 0x3C00
+#define IXGBE_ETS_DATA_LOC_SHIFT 10
+#define IXGBE_ETS_DATA_INDEX_MASK 0x0300
+#define IXGBE_ETS_DATA_INDEX_SHIFT 8
+#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF
+
+#define IXGBE_SAN_MAC_ADDR_PTR 0x28
+#define IXGBE_DEVICE_CAPS 0x2C
+#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04
+
+#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
+#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
+#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
+#define IXGBE_MAX_MSIX_VECTORS_82598 0x13
+
+/* MSI-X capability fields masks */
+#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
+
+/* Legacy EEPROM word offsets */
+#define IXGBE_ISCSI_BOOT_CAPS 0x0033
+#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
+#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
+
+/* EEPROM Commands - SPI */
+#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
+#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
+#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
+#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
+#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
+#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
+/* EEPROM reset Write Enable latch */
+#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
+#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
+#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
+#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
+#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
+#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
+
+/* EEPROM Read Register */
+#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
+#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
+#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */
+#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */
+
+#define NVM_INIT_CTRL_3 0x38
+#define NVM_INIT_CTRL_3_LPLU 0x8
+#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40
+#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100
+
+#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+
+#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
+#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */
+#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */
+#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */
+#define IXGBE_EEPROM_CCD_BIT 2
+
+#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
+#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */
+#endif
+
+/* Number of 5 microseconds we wait for EERD read and
+ * EERW write to complete */
+#define IXGBE_EERD_EEWR_ATTEMPTS 100000
+
+/* # attempts we wait for flush update to complete */
+#define IXGBE_FLUDONE_ATTEMPTS 20000
+
+#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */
+#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */
+#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */
+#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */
+
+#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
+#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
+#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
+#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR (1 << 7)
+#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
+#define IXGBE_FW_LESM_STATE_1 0x1
+#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
+#define IXGBE_FW_LESM_2_STATES_ENABLED_MASK 0x1F
+#define IXGBE_FW_LESM_2_STATES_ENABLED 0x12
+#define IXGBE_FW_LESM_STATE0_10G_ENABLED 0x6FFF
+#define IXGBE_FW_LESM_STATE1_10G_ENABLED 0x4FFF
+#define IXGBE_FW_LESM_STATE0_10G_DISABLED 0x0FFF
+#define IXGBE_FW_LESM_STATE1_10G_DISABLED 0x2FFF
+#define IXGBE_FW_LESM_PORT0_STATE0_OFFSET 0x2
+#define IXGBE_FW_LESM_PORT0_STATE1_OFFSET 0x3
+#define IXGBE_FW_LESM_PORT1_STATE0_OFFSET 0x6
+#define IXGBE_FW_LESM_PORT1_STATE1_OFFSET 0x7
+#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
+#define IXGBE_FW_PATCH_VERSION_4 0x7
+#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
+#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
+#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */
+
+/* FW header offset */
+#define IXGBE_X540_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
+#define IXGBE_X540_FW_MODULE_MASK 0x7FFF
+/* 4KB multiplier */
+#define IXGBE_X540_FW_MODULE_LENGTH 0x1000
+/* version word 2 (month & day) */
+#define IXGBE_X540_FW_PATCH_VERSION_2 0x5
+/* version word 3 (silicon compatibility & year) */
+#define IXGBE_X540_FW_PATCH_VERSION_3 0x6
+/* version word 4 (major & minor numbers) */
+#define IXGBE_X540_FW_PATCH_VERSION_4 0x7
+
+#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */
+#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */
+#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */
+
+/* PCI Bus Info */
+#define IXGBE_PCI_DEVICE_STATUS 0xAA
+#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
+#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
+#define IXGBE_PCI_LINK_WIDTH 0x3F0
+#define IXGBE_PCI_LINK_WIDTH_1 0x10
+#define IXGBE_PCI_LINK_WIDTH_2 0x20
+#define IXGBE_PCI_LINK_WIDTH_4 0x40
+#define IXGBE_PCI_LINK_WIDTH_8 0x80
+#define IXGBE_PCI_LINK_SPEED 0xF
+#define IXGBE_PCI_LINK_SPEED_2500 0x1
+#define IXGBE_PCI_LINK_SPEED_5000 0x2
+#define IXGBE_PCI_LINK_SPEED_8000 0x3
+#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
+#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
+
+#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf
+#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0
+#define IXGBE_PCIDEVCTRL2_50_100us 0x1
+#define IXGBE_PCIDEVCTRL2_1_2ms 0x2
+#define IXGBE_PCIDEVCTRL2_16_32ms 0x5
+#define IXGBE_PCIDEVCTRL2_65_130ms 0x6
+#define IXGBE_PCIDEVCTRL2_260_520ms 0x9
+#define IXGBE_PCIDEVCTRL2_1_2s 0xa
+#define IXGBE_PCIDEVCTRL2_4_8s 0xd
+#define IXGBE_PCIDEVCTRL2_17_34s 0xe
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+
+/* Check whether address is multicast. This is little-endian specific check.*/
+#define IXGBE_IS_MULTICAST(Address) \
+ (bool)(((u8 *)(Address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define IXGBE_IS_BROADCAST(Address) \
+ ((((u8 *)(Address))[0] == ((u8)0xff)) && \
+ (((u8 *)(Address))[1] == ((u8)0xff)))
+
+/* RAH */
+#define IXGBE_RAH_VIND_MASK 0x003C0000
+#define IXGBE_RAH_VIND_SHIFT 18
+#define IXGBE_RAH_AV 0x80000000
+#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
+
+/* Header split receive */
+#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
+#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
+#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
+#define IXGBE_RFCTL_RSC_DIS 0x00000020
+#define IXGBE_RFCTL_NFSW_DIS 0x00000040
+#define IXGBE_RFCTL_NFSR_DIS 0x00000080
+#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
+#define IXGBE_RFCTL_NFS_VER_SHIFT 8
+#define IXGBE_RFCTL_NFS_VER_2 0
+#define IXGBE_RFCTL_NFS_VER_3 1
+#define IXGBE_RFCTL_NFS_VER_4 2
+#define IXGBE_RFCTL_IPV6_DIS 0x00000400
+#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800
+#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000
+#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000
+#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
+/* Enable short packet padding to 64 bytes */
+#define IXGBE_TX_PAD_ENABLE 0x00000400
+#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
+/* This allows for 16K packets + 4k for vlan */
+#define IXGBE_MAX_FRAME_SZ 0x40040000
+
+#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
+#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Ena specific Rx Queue */
+#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc wr-bk flushing */
+#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* X540 supported only */
+#define IXGBE_RXDCTL_RLPML_EN 0x00008000
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+
+#define IXGBE_TSAUXC_EN_CLK 0x00000004
+#define IXGBE_TSAUXC_SYNCLK 0x00000008
+#define IXGBE_TSAUXC_SDP0_INT 0x00000040
+#define IXGBE_TSAUXC_EN_TT0 0x00000001
+#define IXGBE_TSAUXC_EN_TT1 0x00000002
+#define IXGBE_TSAUXC_ST0 0x00000010
+#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000
+
+#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0
+#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080
+#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100
+
+#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */
+
+#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08
+#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */
+#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */
+#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK 0xFF000000 /* Rx Timestamp UP Mask */
+
+#define IXGBE_TSIM_SYS_WRAP 0x00000001
+#define IXGBE_TSIM_TXTS 0x00000002
+#define IXGBE_TSIM_TADJ 0x00000080
+
+#define IXGBE_TSICR_SYS_WRAP IXGBE_TSIM_SYS_WRAP
+#define IXGBE_TSICR_TXTS IXGBE_TSIM_TXTS
+#define IXGBE_TSICR_TADJ IXGBE_TSIM_TADJ
+
+#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF
+#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00
+#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01
+#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02
+#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03
+#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04
+
+#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00
+#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000
+#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100
+#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200
+#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300
+#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800
+#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900
+#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00
+#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00
+#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00
+#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
+
+#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
+#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
+#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
+#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
+#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
+/* Receive Priority Flow Control Enable */
+#define IXGBE_FCTRL_RPFCE 0x00004000
+#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
+#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */
+#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
+#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
+#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
+#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */
+#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */
+
+/* Multiple Receive Queue Control */
+#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */
+#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */
+#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */
+#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */
+#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */
+#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */
+#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */
+#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
+#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
+#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Enable L3/L4 Tx switch */
+#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000
+#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
+
+/* Queue Drop Enable */
+#define IXGBE_QDE_ENABLE 0x00000001
+#define IXGBE_QDE_HIDE_VLAN 0x00000002
+#define IXGBE_QDE_IDX_MASK 0x00007F00
+#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_WRITE 0x00010000
+#define IXGBE_QDE_READ 0x00020000
+
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
+/* Multiple Transmit Queue Command Register */
+#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
+#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
+#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
+#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
+#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */
+#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */
+#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */
+#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */
+#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */
+#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCEOFe/IPE */
+#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
+#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
+#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */
+#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
+#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */
+#define IXGBE_RXDADV_STAT_TSIP 0x00008000 /* Time Stamp in packet buffer */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR 0x00000010
+#define IXGBE_PSRTYPE_UDPHDR 0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* 64byte resolution (>> 6)
+ * + at bit 8 offset (<< 8)
+ * = (<< 2)
+ */
+#define IXGBE_SRRCTL_RDMTS_SHIFT 22
+#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
+#define IXGBE_SRRCTL_DROP_EN 0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
+#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
+
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor. */
+#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_GENEVE 0x00000800 /* GENEVE hdr present */
+#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */
+#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
+#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
+#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
+
+/* Security Processing bit Indication */
+#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
+
+/* Masks to determine if packets should be dropped due to frame errors */
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE
+
+/* Multicast bit mask */
+#define IXGBE_MCSTCTRL_MFE 0x4
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+
+/* Vlan-specific macros */
+#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
+#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
+#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
+#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
+/* Translated register #defines */
+#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P)))
+#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P)))
+#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P)))
+#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P)))
+#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P)))
+#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P)))
+#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P)))
+#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P)))
+#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P)))
+#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P)))
+#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P)))
+#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P)))
+#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \
+ (0x012300 + (((P) - 24) * 4)))
+#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P)))
+#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P)))
+#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P)))
+#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P)))
+#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \
+ : (0x0D000 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \
+ : (0x0D004 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \
+ : (0x0D008 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \
+ : (0x0D010 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \
+ : (0x0D018 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \
+ : (0x0D028 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \
+ : (0x0D014 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P)))
+#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P)))
+#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P)))
+#define IXGBE_PVFTDLEN(P) (0x06008 + (0x40 * (P)))
+#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
+#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
+#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \
+ : (0x0D00C + (0x40 * ((P) - 64))))
+#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P)))
+#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x)))
+#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x)))
+#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x)))
+#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x)))
+#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x)))
+#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x)))
+#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x)))
+
+#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
+
+#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index)))
+
+/* Little Endian defines */
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+
+#endif
+#ifndef __be16
+/* Big Endian defines */
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+
+#endif
+enum ixgbe_fdir_pballoc_type {
+ IXGBE_FDIR_PBALLOC_NONE = 0,
+ IXGBE_FDIR_PBALLOC_64K = 1,
+ IXGBE_FDIR_PBALLOC_128K = 2,
+ IXGBE_FDIR_PBALLOC_256K = 3,
+};
+
+/* Flow Director register values */
+#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001
+#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002
+#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003
+#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008
+#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010
+#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020
+#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
+#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
+#define IXGBE_FDIRCTRL_DROP_Q_MASK 0x00007F00
+#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
+#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000
+#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21
+#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */
+#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */
+#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
+#define IXGBE_FDIRCTRL_FILTERMODE_MASK 0x00E00000
+#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
+#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
+#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28
+
+#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16
+#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16
+#define IXGBE_FDIRIP6M_DIPM_SHIFT 16
+#define IXGBE_FDIRM_VLANID 0x00000001
+#define IXGBE_FDIRM_VLANP 0x00000002
+#define IXGBE_FDIRM_POOL 0x00000004
+#define IXGBE_FDIRM_L4P 0x00000008
+#define IXGBE_FDIRM_FLEX 0x00000010
+#define IXGBE_FDIRM_DIPv6 0x00000020
+#define IXGBE_FDIRM_L3P 0x00000040
+
+#define IXGBE_FDIRIP6M_INNER_MAC 0x03F0 /* bit 9:4 */
+#define IXGBE_FDIRIP6M_TUNNEL_TYPE 0x0800 /* bit 11 */
+#define IXGBE_FDIRIP6M_TNI_VNI 0xF000 /* bit 15:12 */
+#define IXGBE_FDIRIP6M_TNI_VNI_24 0x1000 /* bit 12 */
+#define IXGBE_FDIRIP6M_ALWAYS_MASK 0x040F /* bit 10, 3:0 */
+
+#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
+#define IXGBE_FDIRFREE_FREE_SHIFT 0
+#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000
+#define IXGBE_FDIRFREE_COLL_SHIFT 16
+#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F
+#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0
+#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000
+#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16
+#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF
+#define IXGBE_FDIRUSTAT_ADD_SHIFT 0
+#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000
+#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16
+#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF
+#define IXGBE_FDIRFSTAT_FADD_SHIFT 0
+#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00
+#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8
+#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16
+#define IXGBE_FDIRVLAN_FLEX_SHIFT 16
+#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15
+#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16
+
+#define IXGBE_FDIRCMD_CMD_MASK 0x00000003
+#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
+#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
+#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
+#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004
+#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
+#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
+#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
+#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040
+#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060
+#define IXGBE_FDIRCMD_IPV6 0x00000080
+#define IXGBE_FDIRCMD_CLEARHT 0x00000100
+#define IXGBE_FDIRCMD_DROP 0x00000200
+#define IXGBE_FDIRCMD_INT 0x00000400
+#define IXGBE_FDIRCMD_LAST 0x00000800
+#define IXGBE_FDIRCMD_COLLISION 0x00001000
+#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
+#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
+#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT 23
+#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
+#define IXGBE_FDIR_INIT_DONE_POLL 10
+#define IXGBE_FDIRCMD_CMD_POLL 10
+#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000
+#define IXGBE_FDIR_DROP_QUEUE 127
+
+
+/* Manageablility Host Interface defines */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */
+#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */
+#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */
+#define IXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */
+
+/* CEM Support */
+#define FW_CEM_HDR_LEN 0x4
+#define FW_CEM_CMD_DRIVER_INFO 0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
+#define FW_CEM_CMD_RESERVED 0X0
+#define FW_CEM_UNUSED_VER 0x0
+#define FW_CEM_MAX_RETRIES 3
+#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */
+#define FW_READ_SHADOW_RAM_CMD 0x31
+#define FW_READ_SHADOW_RAM_LEN 0x6
+#define FW_WRITE_SHADOW_RAM_CMD 0x33
+#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */
+#define FW_SHADOW_RAM_DUMP_CMD 0x36
+#define FW_SHADOW_RAM_DUMP_LEN 0
+#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */
+#define FW_NVM_DATA_OFFSET 3
+#define FW_MAX_READ_BUFFER_SIZE 1024
+#define FW_DISABLE_RXEN_CMD 0xDE
+#define FW_DISABLE_RXEN_LEN 0x1
+#define FW_PHY_MGMT_REQ_CMD 0x20
+#define FW_PHY_TOKEN_REQ_CMD 0xA
+#define FW_PHY_TOKEN_REQ_LEN 2
+#define FW_PHY_TOKEN_REQ 0
+#define FW_PHY_TOKEN_REL 1
+#define FW_PHY_TOKEN_OK 1
+#define FW_PHY_TOKEN_RETRY 0x80
+#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */
+#define FW_PHY_TOKEN_WAIT 5 /* seconds */
+#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY)
+#define FW_INT_PHY_REQ_CMD 0xB
+#define FW_INT_PHY_REQ_LEN 10
+#define FW_INT_PHY_REQ_READ 0
+#define FW_INT_PHY_REQ_WRITE 1
+#define FW_PHY_ACT_REQ_CMD 5
+#define FW_PHY_ACT_DATA_COUNT 4
+#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT)
+#define FW_PHY_ACT_INIT_PHY 1
+#define FW_PHY_ACT_SETUP_LINK 2
+#define FW_PHY_ACT_LINK_SPEED_10 (1u << 0)
+#define FW_PHY_ACT_LINK_SPEED_100 (1u << 1)
+#define FW_PHY_ACT_LINK_SPEED_1G (1u << 2)
+#define FW_PHY_ACT_LINK_SPEED_2_5G (1u << 3)
+#define FW_PHY_ACT_LINK_SPEED_5G (1u << 4)
+#define FW_PHY_ACT_LINK_SPEED_10G (1u << 5)
+#define FW_PHY_ACT_LINK_SPEED_20G (1u << 6)
+#define FW_PHY_ACT_LINK_SPEED_25G (1u << 7)
+#define FW_PHY_ACT_LINK_SPEED_40G (1u << 8)
+#define FW_PHY_ACT_LINK_SPEED_50G (1u << 9)
+#define FW_PHY_ACT_LINK_SPEED_100G (1u << 10)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3u << \
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u
+#define FW_PHY_ACT_SETUP_LINK_LP (1u << 18)
+#define FW_PHY_ACT_SETUP_LINK_HP (1u << 19)
+#define FW_PHY_ACT_SETUP_LINK_EEE (1u << 20)
+#define FW_PHY_ACT_SETUP_LINK_AN (1u << 22)
+#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN (1u << 0)
+#define FW_PHY_ACT_GET_LINK_INFO 3
+#define FW_PHY_ACT_GET_LINK_INFO_EEE (1u << 19)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_TX (1u << 20)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_RX (1u << 21)
+#define FW_PHY_ACT_GET_LINK_INFO_POWER (1u << 22)
+#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE (1u << 24)
+#define FW_PHY_ACT_GET_LINK_INFO_TEMP (1u << 25)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX (1u << 28)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX (1u << 29)
+#define FW_PHY_ACT_FORCE_LINK_DOWN 4
+#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF (1u << 0)
+#define FW_PHY_ACT_PHY_SW_RESET 5
+#define FW_PHY_ACT_PHY_HW_RESET 6
+#define FW_PHY_ACT_GET_PHY_INFO 7
+#define FW_PHY_ACT_UD_2 0x1002
+#define FW_PHY_ACT_UD_2_10G_KR_EEE (1u << 6)
+#define FW_PHY_ACT_UD_2_10G_KX4_EEE (1u << 5)
+#define FW_PHY_ACT_UD_2_1G_KX_EEE (1u << 4)
+#define FW_PHY_ACT_UD_2_10G_T_EEE (1u << 3)
+#define FW_PHY_ACT_UD_2_1G_T_EEE (1u << 2)
+#define FW_PHY_ACT_UD_2_100M_TX_EEE (1u << 1)
+#define FW_PHY_ACT_RETRIES 50
+#define FW_PHY_INFO_SPEED_MASK 0xFFFu
+#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u
+#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu
+
+/* Host Interface Command Structures */
+
+#ifdef C99
+#pragma pack(push, 1)
+#else
+#pragma pack (1)
+#endif /* C99 */
+
+struct ixgbe_hic_hdr {
+ u8 cmd;
+ u8 buf_len;
+ union {
+ u8 cmd_resv;
+ u8 ret_status;
+ } cmd_or_resp;
+ u8 checksum;
+};
+
+struct ixgbe_hic_hdr2_req {
+ u8 cmd;
+ u8 buf_lenh;
+ u8 buf_lenl;
+ u8 checksum;
+};
+
+struct ixgbe_hic_hdr2_rsp {
+ u8 cmd;
+ u8 buf_lenl;
+ u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
+ u8 checksum;
+};
+
+union ixgbe_hic_hdr2 {
+ struct ixgbe_hic_hdr2_req req;
+ struct ixgbe_hic_hdr2_rsp rsp;
+};
+
+struct ixgbe_hic_drv_info {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ u8 pad; /* end spacing to ensure length is mult. of dword */
+ u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+struct ixgbe_hic_drv_info2 {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ char driver_string[FW_CEM_DRIVER_VERSION_SIZE];
+};
+
+/* These need to be dword aligned */
+struct ixgbe_hic_read_shadow_ram {
+ union ixgbe_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct ixgbe_hic_write_shadow_ram {
+ union ixgbe_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct ixgbe_hic_disable_rxen {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 pad2;
+ u16 pad3;
+};
+
+struct ixgbe_hic_phy_token_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 command_type;
+ u16 pad;
+};
+
+struct ixgbe_hic_internal_phy_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 command_type;
+ __be16 address;
+ u16 rsv1;
+ __be32 write_data;
+ u16 pad;
+};
+
+struct ixgbe_hic_internal_phy_resp {
+ struct ixgbe_hic_hdr hdr;
+ __be32 read_data;
+};
+
+struct ixgbe_hic_phy_activity_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 pad;
+ __le16 activity_id;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
+struct ixgbe_hic_phy_activity_resp {
+ struct ixgbe_hic_hdr hdr;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
+#ifdef C99
+#pragma pack(pop)
+#else
+#pragma pack()
+#endif /* C99 */
+
+/* Transmit Descriptor - Legacy */
+struct ixgbe_legacy_tx_desc {
+ u64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 vlan;
+ } fields;
+ } upper;
+};
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor - Legacy */
+struct ixgbe_legacy_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 vlan;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
+#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
+#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */
+#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
+#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Adv Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Adv Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
+#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+/* 1st&Last TSO-full iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800
+#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */
+#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */
+#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
+#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
+#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */
+#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+#define IXGBE_ADVTXD_OUTER_IPLEN 16 /* Adv ctxt OUTERIPLEN shift */
+#define IXGBE_ADVTXD_TUNNEL_LEN 24 /* Adv ctxt TUNNELLEN shift */
+#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT 16 /* Adv Tx Desc Tunnel Type shift */
+#define IXGBE_ADVTXD_OUTERIPCS_SHIFT 17 /* Adv Tx Desc OUTERIPCS Shift */
+#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE 1 /* Adv Tx Desc Tunnel Type NVGRE */
+/* Adv Tx Desc OUTERIPCS Shift for X550EM_a */
+#define IXGBE_ADVTXD_OUTERIPCS_SHIFT_X550EM_a 26
+/* Autonegotiation advertised speeds */
+typedef u32 ixgbe_autoneg_advertised;
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_10_FULL 0x0002
+#define IXGBE_LINK_SPEED_100_FULL 0x0008
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400
+#define IXGBE_LINK_SPEED_5GB_FULL 0x0800
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
+ IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+
+/* Physical layer type */
+typedef u64 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
+#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
+
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+/* BitTimes (BT) conversion */
+#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
+#define IXGBE_B2BT(BT) (BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define IXGBE_PFC_D 672
+
+/* Calculate Cable Delay */
+#define IXGBE_CABLE_DC 5556 /* Delay Copper */
+#define IXGBE_CABLE_DO 5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */
+#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */
+#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */
+
+#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC)
+
+/* Calculate Interface Delay 82598, 82599 */
+#define IXGBE_PHY_D 12800
+#define IXGBE_MAC_D 4096
+#define IXGBE_XAUI_D (2 * 1024)
+
+#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define IXGBE_HD 6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define IXGBE_PCI_DELAY 10000
+
+/* Calculate X540 delay value in bit times */
+#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID_X540) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
+
+/* Calculate 82599, 82598 delay value in bit times */
+#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
+
+/* Calculate low threshold delay values */
+#define IXGBE_LOW_DV_X540(_max_frame_tc) \
+ (2 * IXGBE_B2BT(_max_frame_tc) + \
+ (36 * IXGBE_PCI_DELAY / 25) + 1)
+#define IXGBE_LOW_DV(_max_frame_tc) \
+ (2 * IXGBE_LOW_DV_X540(_max_frame_tc))
+
+/* Software ATR hash keys */
+#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK 0x7fff
+#define IXGBE_ATR_L4TYPE_MASK 0x3
+#define IXGBE_ATR_L4TYPE_UDP 0x1
+#define IXGBE_ATR_L4TYPE_TCP 0x2
+#define IXGBE_ATR_L4TYPE_SCTP 0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10
+enum ixgbe_atr_flow_type {
+ IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
+};
+
+/* Flow Director ATR input struct. */
+union ixgbe_atr_input {
+ /*
+ * Byte layout in order, all values with MSB first:
+ *
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
+ * vlan_id - 2 bytes
+ * src_ip - 16 bytes
+ * inner_mac - 6 bytes
+ * cloud_mode - 2 bytes
+ * tni_vni - 4 bytes
+ * dst_ip - 16 bytes
+ * src_port - 2 bytes
+ * dst_port - 2 bytes
+ * flex_bytes - 2 bytes
+ * bkt_hash - 2 bytes
+ */
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ u8 inner_mac[6];
+ __be16 tunnel_type;
+ __be32 tni_vni;
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 bkt_hash;
+ } formatted;
+ __be32 dword_stream[14];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ } formatted;
+ __be32 ip;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } port;
+ __be16 flex_bytes;
+ __be32 dword;
+};
+
+
+#define IXGBE_MVALS_INIT(m) \
+ IXGBE_CAT(EEC, m), \
+ IXGBE_CAT(FLA, m), \
+ IXGBE_CAT(GRC, m), \
+ IXGBE_CAT(SRAMREL, m), \
+ IXGBE_CAT(FACTPS, m), \
+ IXGBE_CAT(SWSM, m), \
+ IXGBE_CAT(SWFW_SYNC, m), \
+ IXGBE_CAT(FWSM, m), \
+ IXGBE_CAT(SDP0_GPIEN, m), \
+ IXGBE_CAT(SDP1_GPIEN, m), \
+ IXGBE_CAT(SDP2_GPIEN, m), \
+ IXGBE_CAT(EICR_GPI_SDP0, m), \
+ IXGBE_CAT(EICR_GPI_SDP1, m), \
+ IXGBE_CAT(EICR_GPI_SDP2, m), \
+ IXGBE_CAT(CIAA, m), \
+ IXGBE_CAT(CIAD, m), \
+ IXGBE_CAT(I2C_CLK_IN, m), \
+ IXGBE_CAT(I2C_CLK_OUT, m), \
+ IXGBE_CAT(I2C_DATA_IN, m), \
+ IXGBE_CAT(I2C_DATA_OUT, m), \
+ IXGBE_CAT(I2C_DATA_OE_N_EN, m), \
+ IXGBE_CAT(I2C_BB_EN, m), \
+ IXGBE_CAT(I2C_CLK_OE_N_EN, m), \
+ IXGBE_CAT(I2CCTL, m)
+
+enum ixgbe_mvals {
+ IXGBE_MVALS_INIT(_IDX),
+ IXGBE_MVALS_IDX_LIMIT
+};
+
+/*
+ * Unavailable: The FCoE Boot Option ROM is not present in the flash.
+ * Disabled: Present; boot order is not set for any targets on the port.
+ * Enabled: Present; boot order is set for at least one target on the port.
+ */
+enum ixgbe_fcoe_boot_status {
+ ixgbe_fcoe_bootstatus_disabled = 0,
+ ixgbe_fcoe_bootstatus_enabled = 1,
+ ixgbe_fcoe_bootstatus_unavailable = 0xFFFF
+};
+
+enum ixgbe_eeprom_type {
+ ixgbe_eeprom_uninitialized = 0,
+ ixgbe_eeprom_spi,
+ ixgbe_flash,
+ ixgbe_eeprom_none /* No NVM support */
+};
+
+enum ixgbe_mac_type {
+ ixgbe_mac_unknown = 0,
+ ixgbe_mac_82598EB,
+ ixgbe_mac_82599EB,
+ ixgbe_mac_82599_vf,
+ ixgbe_mac_X540,
+ ixgbe_mac_X540_vf,
+ ixgbe_mac_X550,
+ ixgbe_mac_X550EM_x,
+ ixgbe_mac_X550EM_a,
+ ixgbe_mac_X550_vf,
+ ixgbe_mac_X550EM_x_vf,
+ ixgbe_mac_X550EM_a_vf,
+ ixgbe_num_macs
+};
+
+enum ixgbe_phy_type {
+ ixgbe_phy_unknown = 0,
+ ixgbe_phy_none,
+ ixgbe_phy_tn,
+ ixgbe_phy_aq,
+ ixgbe_phy_x550em_kr,
+ ixgbe_phy_x550em_kx4,
+ ixgbe_phy_x550em_xfi,
+ ixgbe_phy_x550em_ext_t,
+ ixgbe_phy_ext_1g_t,
+ ixgbe_phy_cu_unknown,
+ ixgbe_phy_qt,
+ ixgbe_phy_xaui,
+ ixgbe_phy_nl,
+ ixgbe_phy_sfp_passive_tyco,
+ ixgbe_phy_sfp_passive_unknown,
+ ixgbe_phy_sfp_active_unknown,
+ ixgbe_phy_sfp_avago,
+ ixgbe_phy_sfp_ftl,
+ ixgbe_phy_sfp_ftl_active,
+ ixgbe_phy_sfp_unknown,
+ ixgbe_phy_sfp_intel,
+ ixgbe_phy_qsfp_passive_unknown,
+ ixgbe_phy_qsfp_active_unknown,
+ ixgbe_phy_qsfp_intel,
+ ixgbe_phy_qsfp_unknown,
+ ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
+ ixgbe_phy_sgmii,
+ ixgbe_phy_fw,
+ ixgbe_phy_generic
+};
+
+/*
+ * SFP+ module type IDs:
+ *
+ * ID Module Type
+ * =============
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ * 3 SFP_DA_CU_CORE0 - 82599-specific
+ * 4 SFP_DA_CU_CORE1 - 82599-specific
+ * 5 SFP_SR/LR_CORE0 - 82599-specific
+ * 6 SFP_SR/LR_CORE1 - 82599-specific
+ */
+enum ixgbe_sfp_type {
+ ixgbe_sfp_type_da_cu = 0,
+ ixgbe_sfp_type_sr = 1,
+ ixgbe_sfp_type_lr = 2,
+ ixgbe_sfp_type_da_cu_core0 = 3,
+ ixgbe_sfp_type_da_cu_core1 = 4,
+ ixgbe_sfp_type_srlr_core0 = 5,
+ ixgbe_sfp_type_srlr_core1 = 6,
+ ixgbe_sfp_type_da_act_lmt_core0 = 7,
+ ixgbe_sfp_type_da_act_lmt_core1 = 8,
+ ixgbe_sfp_type_1g_cu_core0 = 9,
+ ixgbe_sfp_type_1g_cu_core1 = 10,
+ ixgbe_sfp_type_1g_sx_core0 = 11,
+ ixgbe_sfp_type_1g_sx_core1 = 12,
+ ixgbe_sfp_type_1g_lx_core0 = 13,
+ ixgbe_sfp_type_1g_lx_core1 = 14,
+ ixgbe_sfp_type_not_present = 0xFFFE,
+ ixgbe_sfp_type_unknown = 0xFFFF
+};
+
+enum ixgbe_media_type {
+ ixgbe_media_type_unknown = 0,
+ ixgbe_media_type_fiber,
+ ixgbe_media_type_fiber_fixed,
+ ixgbe_media_type_fiber_qsfp,
+ ixgbe_media_type_fiber_lco,
+ ixgbe_media_type_copper,
+ ixgbe_media_type_backplane,
+ ixgbe_media_type_cx4,
+ ixgbe_media_type_virtual
+};
+
+/* Flow Control Settings */
+enum ixgbe_fc_mode {
+ ixgbe_fc_none = 0,
+ ixgbe_fc_rx_pause,
+ ixgbe_fc_tx_pause,
+ ixgbe_fc_full,
+ ixgbe_fc_default
+};
+
+/* Smart Speed Settings */
+#define IXGBE_SMARTSPEED_MAX_RETRIES 3
+enum ixgbe_smart_speed {
+ ixgbe_smart_speed_auto = 0,
+ ixgbe_smart_speed_on,
+ ixgbe_smart_speed_off
+};
+
+/* PCI bus types */
+enum ixgbe_bus_type {
+ ixgbe_bus_type_unknown = 0,
+ ixgbe_bus_type_pci,
+ ixgbe_bus_type_pcix,
+ ixgbe_bus_type_pci_express,
+ ixgbe_bus_type_internal,
+ ixgbe_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum ixgbe_bus_speed {
+ ixgbe_bus_speed_unknown = 0,
+ ixgbe_bus_speed_33 = 33,
+ ixgbe_bus_speed_66 = 66,
+ ixgbe_bus_speed_100 = 100,
+ ixgbe_bus_speed_120 = 120,
+ ixgbe_bus_speed_133 = 133,
+ ixgbe_bus_speed_2500 = 2500,
+ ixgbe_bus_speed_5000 = 5000,
+ ixgbe_bus_speed_8000 = 8000,
+ ixgbe_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum ixgbe_bus_width {
+ ixgbe_bus_width_unknown = 0,
+ ixgbe_bus_width_pcie_x1 = 1,
+ ixgbe_bus_width_pcie_x2 = 2,
+ ixgbe_bus_width_pcie_x4 = 4,
+ ixgbe_bus_width_pcie_x8 = 8,
+ ixgbe_bus_width_32 = 32,
+ ixgbe_bus_width_64 = 64,
+ ixgbe_bus_width_reserved
+};
+
+struct ixgbe_addr_filter_info {
+ u32 num_mc_addrs;
+ u32 rar_used_count;
+ u32 mta_in_use;
+ u32 overflow_promisc;
+ bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct ixgbe_bus_info {
+ enum ixgbe_bus_speed speed;
+ enum ixgbe_bus_width width;
+ enum ixgbe_bus_type type;
+
+ u16 func;
+ u8 lan_id;
+ u16 instance_id;
+};
+
+/* Flow control parameters */
+struct ixgbe_fc_info {
+ u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */
+ u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */
+ u16 pause_time; /* Flow Control Pause timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ bool disable_fc_autoneg; /* Do not autonegotiate FC */
+ bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+ enum ixgbe_fc_mode current_mode; /* FC mode in effect */
+ enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+/* Statistics counters collected by the MAC */
+struct ixgbe_hw_stats {
+ u64 crcerrs;
+ u64 illerrc;
+ u64 errbc;
+ u64 mspdc;
+ u64 mpctotal;
+ u64 mpc[8];
+ u64 mlfc;
+ u64 mrfc;
+ u64 rlec;
+ u64 lxontxc;
+ u64 lxonrxc;
+ u64 lxofftxc;
+ u64 lxoffrxc;
+ u64 pxontxc[8];
+ u64 pxonrxc[8];
+ u64 pxofftxc[8];
+ u64 pxoffrxc[8];
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc[8];
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mngprc;
+ u64 mngpdc;
+ u64 mngptc;
+ u64 tor;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 xec;
+ u64 qprc[16];
+ u64 qptc[16];
+ u64 qbrc[16];
+ u64 qbtc[16];
+ u64 qprdc[16];
+ u64 pxon2offc[8];
+ u64 fdirustat_add;
+ u64 fdirustat_remove;
+ u64 fdirfstat_fadd;
+ u64 fdirfstat_fremove;
+ u64 fdirmatch;
+ u64 fdirmiss;
+ u64 fccrc;
+ u64 fclast;
+ u64 fcoerpdc;
+ u64 fcoeprc;
+ u64 fcoeptc;
+ u64 fcoedwrc;
+ u64 fcoedwtc;
+ u64 fcoe_noddp;
+ u64 fcoe_noddp_ext_buff;
+ u64 ldpcec;
+ u64 pcrc8ec;
+ u64 b2ospc;
+ u64 b2ogprc;
+ u64 o2bgptc;
+ u64 o2bspc;
+};
+
+/* forward declaration */
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq);
+
+/* Function pointer table */
+struct ixgbe_eeprom_operations {
+ s32 (*init_params)(struct ixgbe_hw *);
+ s32 (*read)(struct ixgbe_hw *, u16, u16 *);
+ s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ s32 (*write)(struct ixgbe_hw *, u16, u16);
+ s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
+ s32 (*update_checksum)(struct ixgbe_hw *);
+ s32 (*calc_checksum)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mac_operations {
+ s32 (*init_hw)(struct ixgbe_hw *);
+ s32 (*reset_hw)(struct ixgbe_hw *);
+ s32 (*start_hw)(struct ixgbe_hw *);
+ s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ void (*enable_relaxed_ordering)(struct ixgbe_hw *);
+ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ u64 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
+ s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+ s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *);
+ s32 (*stop_adapter)(struct ixgbe_hw *);
+ s32 (*get_bus_info)(struct ixgbe_hw *);
+ void (*set_lan_id)(struct ixgbe_hw *);
+ s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
+ s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
+ s32 (*setup_sfp)(struct ixgbe_hw *);
+ s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ s32 (*disable_sec_rx_path)(struct ixgbe_hw *);
+ s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
+ s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
+ void (*release_swfw_sync)(struct ixgbe_hw *, u32);
+ void (*init_swfw_sync)(struct ixgbe_hw *);
+ s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+ s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
+ s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
+
+ /* Link */
+ void (*disable_tx_laser)(struct ixgbe_hw *);
+ void (*enable_tx_laser)(struct ixgbe_hw *);
+ void (*flap_tx_laser)(struct ixgbe_hw *);
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ bool *);
+ void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed);
+
+ /* Packet Buffer manipulation */
+ void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int);
+
+ /* LED */
+ s32 (*led_on)(struct ixgbe_hw *, u32);
+ s32 (*led_off)(struct ixgbe_hw *, u32);
+ s32 (*blink_led_start)(struct ixgbe_hw *, u32);
+ s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
+ s32 (*init_led_link_act)(struct ixgbe_hw *);
+
+ /* RAR, Multicast, VLAN */
+ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+ s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
+ s32 (*clear_rar)(struct ixgbe_hw *, u32);
+ s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
+ s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
+ s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*init_rx_addrs)(struct ixgbe_hw *);
+ s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr, bool clear);
+ s32 (*enable_mc)(struct ixgbe_hw *);
+ s32 (*disable_mc)(struct ixgbe_hw *);
+ s32 (*clear_vfta)(struct ixgbe_hw *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
+ s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, u32 *, u32,
+ bool);
+ s32 (*init_uta_tables)(struct ixgbe_hw *);
+ void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
+ s32 (*set_rlpml)(struct ixgbe_hw *, u16);
+
+ /* Flow Control */
+ s32 (*fc_enable)(struct ixgbe_hw *);
+ s32 (*setup_fc)(struct ixgbe_hw *);
+ void (*fc_autoneg)(struct ixgbe_hw *);
+
+ /* Manageability interface */
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
+ const char *);
+ s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
+ s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+ void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map);
+ void (*disable_rx)(struct ixgbe_hw *hw);
+ void (*enable_rx)(struct ixgbe_hw *hw);
+ void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
+ unsigned int);
+ void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
+ s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ s32 (*dmac_config)(struct ixgbe_hw *hw);
+ s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee);
+ s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+ s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+ void (*disable_mdd)(struct ixgbe_hw *hw);
+ void (*enable_mdd)(struct ixgbe_hw *hw);
+ void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap);
+ void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
+};
+
+struct ixgbe_phy_operations {
+ s32 (*identify)(struct ixgbe_hw *);
+ s32 (*identify_sfp)(struct ixgbe_hw *);
+ s32 (*init)(struct ixgbe_hw *);
+ s32 (*reset)(struct ixgbe_hw *);
+ s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+ s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+ s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
+ s32 (*setup_link)(struct ixgbe_hw *);
+ s32 (*setup_internal_link)(struct ixgbe_hw *);
+ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
+ s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
+ s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
+ s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
+ s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ void (*i2c_bus_clear)(struct ixgbe_hw *);
+ s32 (*check_overtemp)(struct ixgbe_hw *);
+ s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+ s32 (*enter_lplu)(struct ixgbe_hw *);
+ s32 (*handle_lasi)(struct ixgbe_hw *hw);
+ s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ u8 *value);
+ s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ u8 value);
+};
+
+struct ixgbe_link_operations {
+ s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+ s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 *val);
+ s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
+ s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 val);
+};
+
+struct ixgbe_link_info {
+ struct ixgbe_link_operations ops;
+ u8 addr;
+};
+
+struct ixgbe_eeprom_info {
+ struct ixgbe_eeprom_operations ops;
+ enum ixgbe_eeprom_type type;
+ u32 semaphore_delay;
+ u16 word_size;
+ u16 address_bits;
+ u16 word_page_size;
+ u16 ctrl_word_3;
+};
+
+#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+struct ixgbe_mac_info {
+ struct ixgbe_mac_operations ops;
+ enum ixgbe_mac_type type;
+ u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ /* prefix for World Wide Node Name (WWNN) */
+ u16 wwnn_prefix;
+ /* prefix for World Wide Port Name (WWPN) */
+ u16 wwpn_prefix;
+#define IXGBE_MAX_MTA 128
+ u32 mta_shadow[IXGBE_MAX_MTA];
+ s32 mc_filter_type;
+ u32 mcft_size;
+ u32 vft_size;
+ u32 num_rar_entries;
+ u32 rar_highwater;
+ u32 rx_pb_size;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u32 orig_autoc;
+ u8 san_mac_rar_index;
+ bool get_link_status;
+ u32 orig_autoc2;
+ u16 max_msix_vectors;
+ bool arc_subsystem_valid;
+ bool orig_link_settings_stored;
+ bool autotry_restart;
+ u8 flags;
+ struct ixgbe_thermal_sensor_data thermal_sensor_data;
+ bool thermal_sensor_enabled;
+ struct ixgbe_dmac_config dmac_config;
+ bool set_lben;
+ u32 max_link_up_time;
+ u8 led_link_act;
+};
+
+struct ixgbe_phy_info {
+ struct ixgbe_phy_operations ops;
+ enum ixgbe_phy_type type;
+ u32 addr;
+ u32 id;
+ enum ixgbe_sfp_type sfp_type;
+ bool sfp_setup_needed;
+ u32 revision;
+ enum ixgbe_media_type media_type;
+ u32 phy_semaphore_mask;
+ bool reset_disable;
+ ixgbe_autoneg_advertised autoneg_advertised;
+ ixgbe_link_speed speeds_supported;
+ ixgbe_link_speed eee_speeds_supported;
+ ixgbe_link_speed eee_speeds_advertised;
+ enum ixgbe_smart_speed smart_speed;
+ bool smart_speed_active;
+ bool multispeed_fiber;
+ bool reset_if_overtemp;
+ bool qsfp_shared_i2c_bus;
+ u32 nw_mng_if_sel;
+};
+
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+ void (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *, u16);
+ s32 (*check_for_ack)(struct ixgbe_hw *, u16);
+ s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
+struct ixgbe_hw {
+ u8 IOMEM *hw_addr;
+ void *back;
+ struct ixgbe_mac_info mac;
+ struct ixgbe_addr_filter_info addr_ctrl;
+ struct ixgbe_fc_info fc;
+ struct ixgbe_phy_info phy;
+ struct ixgbe_link_info link;
+ struct ixgbe_eeprom_info eeprom;
+ struct ixgbe_bus_info bus;
+ struct ixgbe_mbx_info mbx;
+ const u32 *mvals;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ bool adapter_stopped;
+ int api_version;
+ bool force_full_reset;
+ bool allow_unsupported_sfp;
+ bool wol_enabled;
+ bool need_crosstalk_fix;
+};
+
+#define ixgbe_call_func(hw, func, params, error) \
+ (func != NULL) ? func params : error
+
+
+/* Error Codes */
+#define IXGBE_SUCCESS 0
+#define IXGBE_ERR_EEPROM -1
+#define IXGBE_ERR_EEPROM_CHECKSUM -2
+#define IXGBE_ERR_PHY -3
+#define IXGBE_ERR_CONFIG -4
+#define IXGBE_ERR_PARAM -5
+#define IXGBE_ERR_MAC_TYPE -6
+#define IXGBE_ERR_UNKNOWN_PHY -7
+#define IXGBE_ERR_LINK_SETUP -8
+#define IXGBE_ERR_ADAPTER_STOPPED -9
+#define IXGBE_ERR_INVALID_MAC_ADDR -10
+#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
+#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
+#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
+#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
+#define IXGBE_ERR_RESET_FAILED -15
+#define IXGBE_ERR_SWFW_SYNC -16
+#define IXGBE_ERR_PHY_ADDR_INVALID -17
+#define IXGBE_ERR_I2C -18
+#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
+#define IXGBE_ERR_SFP_NOT_PRESENT -20
+#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
+#define IXGBE_ERR_FDIR_REINIT_FAILED -23
+#define IXGBE_ERR_EEPROM_VERSION -24
+#define IXGBE_ERR_NO_SPACE -25
+#define IXGBE_ERR_OVERTEMP -26
+#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
+#define IXGBE_ERR_FC_NOT_SUPPORTED -28
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define IXGBE_ERR_PBA_SECTION -31
+#define IXGBE_ERR_INVALID_ARGUMENT -32
+#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
+#define IXGBE_ERR_OUT_OF_MEM -34
+#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36
+#define IXGBE_ERR_EEPROM_PROTECTED_REGION -37
+#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
+#define IXGBE_ERR_FW_RESP_INVALID -39
+#define IXGBE_ERR_TOKEN_RETRY -40
+
+#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
+#define IXGBE_FUSES0_300MHZ (1 << 5)
+#define IXGBE_FUSES0_REV_MASK (3 << 6)
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
+#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238)
+#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918)
+#define IXGBE_KRM_PCS_KX_AN_LP(P) ((P) ? 0x991C : 0x591C)
+#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
+#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
+#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
+
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR (1u << 20)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN (1u << 25)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN (1u << 26)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN (1u << 27)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M (1u << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART (1u << 31)
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
+
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN (1 << 12)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN (1 << 13)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
+#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE (1 << 28)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
+
+#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28)
+#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29)
+#define IXGBE_KRM_PCS_KX_AN_SYM_PAUSE (1 << 1)
+#define IXGBE_KRM_PCS_KX_AN_ASM_PAUSE (1 << 2)
+#define IXGBE_KRM_PCS_KX_AN_LP_SYM_PAUSE (1 << 2)
+#define IXGBE_KRM_PCS_KX_AN_LP_ASM_PAUSE (1 << 3)
+#define IXGBE_KRM_AN_CNTL_4_ECSR_AN37_OVER_73 (1 << 29)
+#define IXGBE_KRM_AN_CNTL_8_LINEAR (1 << 0)
+#define IXGBE_KRM_AN_CNTL_8_LIMITING (1 << 1)
+
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE (1 << 10)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE (1 << 11)
+
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D (1 << 12)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D (1 << 19)
+
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
+
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2)
+
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16)
+
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
+
+#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
+#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
+
+#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0
+#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \
+ (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \
+ (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
+#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
+#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
+
+#define IXGBE_NW_MNG_IF_SEL 0x00011178
+#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1u << 1)
+#define IXGBE_NW_MNG_IF_SEL_MDIO_IF_MODE (1u << 2)
+#define IXGBE_NW_MNG_IF_SEL_EN_SHARED_MDIO (1u << 13)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M (1u << 17)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M (1u << 18)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G (1u << 19)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G (1u << 20)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G (1u << 21)
+#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE (1u << 25)
+#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) /* X552 reg field only */
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
+ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
+
+#endif /* _IXGBE_TYPE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c
new file mode 100644
index 00000000..5b25a6b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c
@@ -0,0 +1,784 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "ixgbe_api.h"
+#include "ixgbe_type.h"
+#include "ixgbe_vf.h"
+
+#ifndef IXGBE_VFWRITE_REG
+#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
+#endif
+#ifndef IXGBE_VFREAD_REG
+#define IXGBE_VFREAD_REG IXGBE_READ_REG
+#endif
+
+/**
+ * ixgbe_init_ops_vf - Initialize the pointers for vf
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers, adapter-specific functions can
+ * override the assignment of generic function pointers by assigning
+ * their own adapter-specific function pointers.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
+{
+ /* MAC */
+ hw->mac.ops.init_hw = ixgbe_init_hw_vf;
+ hw->mac.ops.reset_hw = ixgbe_reset_hw_vf;
+ hw->mac.ops.start_hw = ixgbe_start_hw_vf;
+ /* Cannot clear stats on VF */
+ hw->mac.ops.clear_hw_cntrs = NULL;
+ hw->mac.ops.get_media_type = NULL;
+ hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf;
+ hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf;
+ hw->mac.ops.get_bus_info = NULL;
+ hw->mac.ops.negotiate_api_version = ixgbevf_negotiate_api_version;
+
+ /* Link */
+ hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf;
+ hw->mac.ops.check_link = ixgbe_check_mac_link_vf;
+ hw->mac.ops.get_link_capabilities = NULL;
+
+ /* RAR, Multicast, VLAN */
+ hw->mac.ops.set_rar = ixgbe_set_rar_vf;
+ hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf;
+ hw->mac.ops.init_rx_addrs = NULL;
+ hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf;
+ hw->mac.ops.update_xcast_mode = ixgbevf_update_xcast_mode;
+ hw->mac.ops.enable_mc = NULL;
+ hw->mac.ops.disable_mc = NULL;
+ hw->mac.ops.clear_vfta = NULL;
+ hw->mac.ops.set_vfta = ixgbe_set_vfta_vf;
+ hw->mac.ops.set_rlpml = ixgbevf_rlpml_set_vf;
+
+ hw->mac.max_tx_queues = 1;
+ hw->mac.max_rx_queues = 1;
+
+ hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf;
+
+ return IXGBE_SUCCESS;
+}
+
+/* ixgbe_virt_clr_reg - Set register to default (power on) state.
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw)
+{
+ int i;
+ u32 vfsrrctl;
+ u32 vfdca_rxctrl;
+ u32 vfdca_txctrl;
+
+ /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
+ vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
+ /* DCA_RXCTRL default value */
+ vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
+
+ /* DCA_TXCTRL default value */
+ vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_TXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_TXCTRL_DATA_RRO_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+
+ for (i = 0; i < 7; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl);
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw)
+{
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_hw_vf - virtual function hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware and then starting
+ * the hardware
+ **/
+s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw)
+{
+ s32 status = hw->mac.ops.start_hw(hw);
+
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_hw_vf - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by reseting the transmit and receive units, masks and
+ * clears all interrupts.
+ **/
+s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 timeout = IXGBE_VF_INIT_TIMEOUT;
+ s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+ u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ DEBUGFUNC("ixgbevf_reset_hw_vf");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ /* reset the api version */
+ hw->api_version = ixgbe_mbox_api_10;
+
+ DEBUGOUT("Issuing a function level reset to MAC\n");
+
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
+ IXGBE_WRITE_FLUSH(hw);
+
+ msec_delay(50);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+ timeout--;
+ usec_delay(5);
+ }
+
+ if (!timeout)
+ return IXGBE_ERR_RESET_FAILED;
+
+ /* Reset VF registers to initial values */
+ ixgbe_virt_clr_reg(hw);
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = IXGBE_VF_RESET;
+ mbx->ops.write_posted(hw, msgbuf, 1, 0);
+
+ msec_delay(10);
+
+ /*
+ * set our "perm_addr" based on info provided by PF
+ * also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3
+ */
+ ret_val = mbx->ops.read_posted(hw, msgbuf,
+ IXGBE_VF_PERMADDR_MSG_LEN, 0);
+ if (ret_val)
+ return ret_val;
+
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
+ msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
+ return IXGBE_ERR_INVALID_MAC_ADDR;
+
+ if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+ hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_stop_adapter_vf - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw)
+{
+ u32 reg_val;
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts, flush previous writes */
+ IXGBE_VFREAD_REG(hw, IXGBE_VTEICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < hw->mac.max_tx_queues; i++)
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i));
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+ }
+ /* Clear packet split and pool config */
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+
+ /* flush all queues disables */
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(2);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ DEBUGOUT("MC filter type param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+STATIC s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
+ u32 *retmsg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
+
+ if (retval)
+ return retval;
+
+ return mbx->ops.read_posted(hw, retmsg, size, 0);
+}
+
+/**
+ * ixgbe_set_rar_vf - set device MAC address
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ **/
+s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+ UNREFERENCED_3PARAMETER(vmdq, enable_addr, index);
+
+ memset(msgbuf, 0, 12);
+ msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
+ ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
+ return IXGBE_ERR_MBX;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ * @next: caller supplied function to return next address in list
+ * @clear: unused
+ *
+ * Updates the Multicast Table Array.
+ **/
+s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr next,
+ bool clear)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+ u16 *vector_list = (u16 *)&msgbuf[1];
+ u32 vector;
+ u32 cnt, i;
+ u32 vmdq;
+
+ UNREFERENCED_1PARAMETER(clear);
+
+ DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+
+ cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+ msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < cnt; i++) {
+ vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+ DEBUGOUT1("Hash value = 0x%03X\n", vector);
+ vector_list[i] = (u16)vector;
+ }
+
+ return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0);
+}
+
+/**
+ * ixgbevf_update_xcast_mode - Update Multicast mode
+ * @hw: pointer to the HW structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ **/
+s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+ u32 msgbuf[2];
+ s32 err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_12:
+ /* New modes were introduced in 1.3 version */
+ if (xcast_mode > IXGBEVF_XCAST_MODE_ALLMULTI)
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ /* Fall through */
+ case ixgbe_mbox_api_13:
+ break;
+ default:
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ }
+
+ msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
+ msgbuf[1] = xcast_mode;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (err)
+ return err;
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+ if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if true then set bit, else clear bit
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
+{
+ u32 msgbuf[2];
+ s32 ret_val;
+ UNREFERENCED_2PARAMETER(vind, vlvf_bypass);
+
+ msgbuf[0] = IXGBE_VF_SET_VLAN;
+ msgbuf[1] = vlan;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_SUCCESS;
+
+ return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK);
+}
+
+/**
+ * ixgbe_get_num_of_tx_queues_vf - Get number of TX queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of transmit queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return IXGBE_VF_MAX_TX_QUEUES;
+}
+
+/**
+ * ixgbe_get_num_of_rx_queues_vf - Get number of RX queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of receive queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return IXGBE_VF_MAX_RX_QUEUES;
+}
+
+/**
+ * ixgbe_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: the MAC address
+ **/
+s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++)
+ mac_addr[i] = hw->mac.perm_addr[i];
+
+ return IXGBE_SUCCESS;
+}
+
+s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ u32 msgbuf[3], msgbuf_chk;
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ /*
+ * If index is one then this is the start of a new list and needs
+ * indication to the PF so it can do it's own list management.
+ * If it is zero then that tells the PF to just clear all of
+ * this VF's macvlans and there is no new list.
+ */
+ msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
+ msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
+ msgbuf_chk = msgbuf[0];
+ if (addr)
+ memcpy(msg_addr, addr, 6);
+
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
+ if (!ret_val) {
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
+ return IXGBE_ERR_OUT_OF_MEM;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_setup_mac_link_vf - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_check_mac_link_vf - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 links_reg;
+ u32 in_msg = 0;
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ usec_delay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Since Reserved in older MAC's */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error
+ */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ goto out;
+
+ if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ ret_val = -1;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret_val = -1;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return ret_val;
+}
+
+/**
+ * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ **/
+s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 msgbuf[2];
+ s32 retval;
+
+ msgbuf[0] = IXGBE_VF_SET_LPE;
+ msgbuf[1] = max_size;
+
+ retval = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (retval)
+ return retval;
+ if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
+ (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
+ return IXGBE_ERR_MBX;
+
+ return 0;
+}
+
+/**
+ * ixgbevf_negotiate_api_version - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ **/
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+{
+ int err;
+ u32 msg[3];
+
+ /* Negotiate the mailbox API version */
+ msg[0] = IXGBE_VF_API_NEGOTIATE;
+ msg[1] = api;
+ msg[2] = 0;
+
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3);
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* Store value and return 0 on success */
+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+ hw->api_version = api;
+ return 0;
+ }
+
+ err = IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ return err;
+}
+
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc)
+{
+ int err;
+ u32 msg[5];
+
+ /* do nothing if API doesn't support ixgbevf_get_queues */
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_11:
+ case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
+ break;
+ default:
+ return 0;
+ }
+
+ /* Fetch queue configuration from the PF */
+ msg[0] = IXGBE_VF_GET_QUEUES;
+ msg[1] = msg[2] = msg[3] = msg[4] = 0;
+
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5);
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /*
+ * if we we didn't get an ACK there must have been
+ * some sort of mailbox error so we should treat it
+ * as such
+ */
+ if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_MBX;
+
+ /* record and validate values from message */
+ hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
+ if (hw->mac.max_tx_queues == 0 ||
+ hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
+ hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+
+ hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
+ if (hw->mac.max_rx_queues == 0 ||
+ hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
+ hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+ *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
+ /* in case of unknown state assume we cannot tag frames */
+ if (*num_tcs > hw->mac.max_rx_queues)
+ *num_tcs = 1;
+
+ *default_tc = msg[IXGBE_VF_DEF_QUEUE];
+ /* default to queue 0 on out-of-bounds queue number */
+ if (*default_tc >= hw->mac.max_tx_queues)
+ *default_tc = 0;
+ }
+
+ return err;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.h
new file mode 100644
index 00000000..3efffe82
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.h
@@ -0,0 +1,145 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_VF_H_
+#define _IXGBE_VF_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VF_IRQ_CLEAR_MASK 7
+#define IXGBE_VF_MAX_TX_QUEUES 8
+#define IXGBE_VF_MAX_RX_QUEUES 8
+
+/* DCB define */
+#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
+
+#define IXGBE_VFCTRL 0x00000
+#define IXGBE_VFSTATUS 0x00008
+#define IXGBE_VFLINKS 0x00010
+#define IXGBE_VFFRTIMER 0x00048
+#define IXGBE_VFRXMEMWRAP 0x03190
+#define IXGBE_VTEICR 0x00100
+#define IXGBE_VTEICS 0x00104
+#define IXGBE_VTEIMS 0x00108
+#define IXGBE_VTEIMC 0x0010C
+#define IXGBE_VTEIAC 0x00110
+#define IXGBE_VTEIAM 0x00114
+#define IXGBE_VTEITR(x) (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x)))
+#define IXGBE_VTIVAR_MISC 0x00140
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x)))
+/* define IXGBE_VFPBACL still says TBD in EAS */
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x)))
+#define IXGBE_VFPSRTYPE 0x00300
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
+#define IXGBE_VFGPRC 0x0101C
+#define IXGBE_VFGPTC 0x0201C
+#define IXGBE_VFGORC_LSB 0x01020
+#define IXGBE_VFGORC_MSB 0x01024
+#define IXGBE_VFGOTC_LSB 0x02020
+#define IXGBE_VFGOTC_MSB 0x02024
+#define IXGBE_VFMPRC 0x01034
+#define IXGBE_VFMRQC 0x3000
+#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4))
+#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4))
+
+
+struct ixgbevf_hw_stats {
+ u64 base_vfgprc;
+ u64 base_vfgptc;
+ u64 base_vfgorc;
+ u64 base_vfgotc;
+ u64 base_vfmprc;
+
+ u64 last_vfgprc;
+ u64 last_vfgptc;
+ u64 last_vfgorc;
+ u64 last_vfgotc;
+ u64 last_vfmprc;
+
+ u64 vfgprc;
+ u64 vfgptc;
+ u64 vfgorc;
+ u64 vfgotc;
+ u64 vfmprc;
+
+ u64 saved_reset_vfgprc;
+ u64 saved_reset_vfgptc;
+ u64 saved_reset_vfgorc;
+ u64 saved_reset_vfgotc;
+ u64 saved_reset_vfmprc;
+};
+
+s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool autoneg_wait_to_complete);
+s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
+s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr,
+ bool clear);
+s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode);
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass);
+s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc);
+
+#endif /* __IXGBE_VF_H__ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c
new file mode 100644
index 00000000..716664bb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -0,0 +1,1063 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES 128
+#define IXGBE_X540_MC_TBL_SIZE 128
+#define IXGBE_X540_VFT_TBL_SIZE 128
+#define IXGBE_X540_RX_PB_SIZE 384
+
+STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+
+/**
+ * ixgbe_init_ops_X540 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for X540.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X540");
+
+ ret_val = ixgbe_init_phy_ops_generic(hw);
+ ret_val = ixgbe_init_ops_generic(hw);
+
+
+ /* EEPROM */
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
+ eeprom->ops.read = ixgbe_read_eerd_X540;
+ eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540;
+ eeprom->ops.write = ixgbe_write_eewr_X540;
+ eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540;
+ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540;
+
+ /* PHY */
+ phy->ops.init = ixgbe_init_phy_ops_generic;
+ phy->ops.reset = NULL;
+ phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
+
+ /* MAC */
+ mac->ops.reset_hw = ixgbe_reset_hw_X540;
+ mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
+ mac->ops.get_media_type = ixgbe_get_media_type_X540;
+ mac->ops.get_supported_physical_layer =
+ ixgbe_get_supported_physical_layer_X540;
+ mac->ops.read_analog_reg8 = NULL;
+ mac->ops.write_analog_reg8 = NULL;
+ mac->ops.start_hw = ixgbe_start_hw_X540;
+ mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
+ mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
+ mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
+ mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540;
+ mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540;
+ mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
+ mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
+ mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
+ mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
+ mac->rar_highwater = 1;
+ mac->ops.set_vfta = ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
+ mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
+ mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
+ mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
+
+ /* Link */
+ mac->ops.get_link_capabilities =
+ ixgbe_get_copper_link_capabilities_generic;
+ mac->ops.setup_link = ixgbe_setup_mac_link_X540;
+ mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
+ mac->ops.check_link = ixgbe_check_mac_link_generic;
+
+
+ mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE;
+ mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ /*
+ * FWSM register
+ * ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
+ & IXGBE_FWSM_MODE_MASK);
+
+ hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+
+ /* LEDs */
+ mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
+ mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
+
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
+
+ mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_link_capabilities_X540 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_media_type_X540 - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return ixgbe_media_type_copper;
+}
+
+/**
+ * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ **/
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ DEBUGFUNC("ixgbe_setup_mac_link_X540");
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
+}
+
+/**
+ * ixgbe_reset_hw_X540 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and perform a reset.
+ **/
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 ctrl, i;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+
+ DEBUGFUNC("ixgbe_reset_hw_X540");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+mac_reset_top:
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Reset polling failed to complete.\n");
+ }
+ msec_delay(100);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
+ hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
+ IXGBE_CLEAR_VMDQ_ALL);
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_start_hw_X540");
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ret_val = ixgbe_start_hw_gen2(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+{
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u16 ext_ability = 0;
+
+ DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ DEBUGFUNC("ixgbe_init_eeprom_params_X540");
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_eerd_X540- Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_read_eerd_X540");
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_read_eerd_generic(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_read_eerd_buffer_X540");
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_read_eerd_buffer_generic(hw, offset,
+ words, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_eewr_X540");
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_write_eewr_generic(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_eewr_buffer_X540");
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_write_eewr_buffer_generic(hw, offset,
+ words, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ *
+ * This function does not use synchronization for EERD and EEWR. It can
+ * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ u16 i, j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+ u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
+
+ /* Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores here. Instead use
+ * ixgbe_read_eerd_generic
+ */
+
+ DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
+
+ /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the
+ * checksum itself
+ */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+ if (ixgbe_read_eerd_generic(hw, i, &word)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+ checksum += word;
+ }
+
+ /* Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (pointer + length) >= hw->eeprom.word_size)
+ continue;
+
+ for (j = pointer + 1; j <= pointer + length; j++) {
+ if (ixgbe_read_eerd_generic(hw, j, &word)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+ checksum += word;
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return (s32)checksum;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+ u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ goto out;
+
+ checksum = (u16)(status & 0xffff);
+
+ /* Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
+ &read_checksum);
+ if (status)
+ goto out;
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid EEPROM checksum");
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+ }
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum;
+
+ DEBUGFUNC("ixgbe_update_eeprom_checksum_X540");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ goto out;
+
+ checksum = (u16)(status & 0xffff);
+
+ /* Do not use hw->eeprom.ops.write because we do not want to
+ * take the synchronization semaphores twice here.
+ */
+ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+ if (status)
+ goto out;
+
+ status = ixgbe_update_flash_X540(hw);
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ * EEPROM from shadow RAM to the flash device.
+ **/
+s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+{
+ u32 flup;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_update_flash_X540");
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_ERR_EEPROM) {
+ DEBUGOUT("Flash update time out\n");
+ goto out;
+ }
+
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup);
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_SUCCESS)
+ DEBUGOUT("Flash update complete\n");
+ else
+ DEBUGOUT("Flash update time out\n");
+
+ if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) {
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
+
+ if (flup & IXGBE_EEC_SEC1VAL) {
+ flup |= IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup);
+ }
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_SUCCESS)
+ DEBUGOUT("Flash update complete\n");
+ else
+ DEBUGOUT("Flash update time out\n");
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ * @hw: pointer to hardware structure
+ *
+ * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ * flash update is done.
+ **/
+STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ DEBUGFUNC("ixgbe_poll_flash_update_done_X540");
+
+ for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
+ if (reg & IXGBE_EEC_FLUDONE) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ msec_delay(5);
+ }
+
+ if (i == IXGBE_FLUDONE_ATTEMPTS)
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Flash update status polling timed out");
+
+ return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ * the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
+ u32 fwmask = swmask << 5;
+ u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
+ u32 timeout = 200;
+ u32 hwmask = 0;
+ u32 swfw_sync;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_acquire_swfw_sync_X540");
+
+ if (swmask & IXGBE_GSSR_EEP_SM)
+ hwmask |= IXGBE_GSSR_FLASH_SM;
+
+ /* SW only mask doesn't have FW bit pair */
+ if (mask & IXGBE_GSSR_SW_MNG_SM)
+ swmask |= IXGBE_GSSR_SW_MNG_SM;
+
+ swmask |= swi2c_mask;
+ fwmask |= swi2c_mask << 2;
+ if (hw->mac.type >= ixgbe_mac_X550)
+ timeout = 1000;
+
+ for (i = 0; i < timeout; i++) {
+ /* SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n");
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
+ if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw),
+ swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ return IXGBE_SUCCESS;
+ }
+ /* Firmware currently using resource (fwmask), hardware
+ * currently using resource (hwmask), or other software
+ * thread currently using resource (swmask)
+ */
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(5);
+ }
+
+ /* If the resource is not released by the FW/HW the SW can assume that
+ * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
+ * of the requested resource(s) while ignoring the corresponding FW/HW
+ * bits in the SW_FW_SYNC register.
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n");
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
+ if (swfw_sync & (fwmask | hwmask)) {
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(5);
+ return IXGBE_SUCCESS;
+ }
+ /* If the resource is not released by other SW the SW can assume that
+ * the other SW malfunctions. In that case the SW should clear all SW
+ * flags that it does not own and then repeat the whole process once
+ * again.
+ */
+ if (swfw_sync & swmask) {
+ u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
+ IXGBE_GSSR_SW_MNG_SM;
+
+ if (swi2c_mask)
+ rmask |= IXGBE_GSSR_I2C_MASK;
+ ixgbe_release_swfw_sync_X540(hw, rmask);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n");
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+ ixgbe_release_swfw_sync_semaphore(hw);
+ DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n");
+
+ return IXGBE_ERR_SWFW_SYNC;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through the SW_FW_SYNC register
+ * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ **/
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM);
+ u32 swfw_sync;
+
+ DEBUGFUNC("ixgbe_release_swfw_sync_X540");
+
+ if (mask & IXGBE_GSSR_I2C_MASK)
+ swmask |= mask & IXGBE_GSSR_I2C_MASK;
+ ixgbe_get_swfw_sync_semaphore(hw);
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
+ swfw_sync &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync);
+
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(2);
+}
+
+/**
+ * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so SW/FW can gain control of shared resources
+ **/
+STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ DEBUGFUNC("ixgbe_get_swfw_sync_semaphore");
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ usec_delay(50);
+ }
+
+ /* Now get the semaphore between SW/FW through the REGSMP bit */
+ if (status == IXGBE_SUCCESS) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
+ if (!(swsm & IXGBE_SWFW_REGSMP))
+ break;
+
+ usec_delay(50);
+ }
+
+ /*
+ * Release semaphores and return error if SW NVM semaphore
+ * was not granted because we don't have access to the EEPROM
+ */
+ if (i >= timeout) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "REGSMP Software NVM semaphore not granted.\n");
+ ixgbe_release_swfw_sync_semaphore(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ } else {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("ixgbe_release_swfw_sync_semaphore");
+
+ /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
+ swsm &= ~IXGBE_SWFW_REGSMP;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm);
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
+ swsm &= ~IXGBE_SWSM_SMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_init_swfw_sync_X540 - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function reset hardware semaphore bits for a semaphore that may
+ * have be left locked due to a catastrophic failure.
+ **/
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
+{
+ u32 rmask;
+
+ /* First try to grab the semaphore but we don't need to bother
+ * looking to see whether we got the lock or not since we do
+ * the same thing regardless of whether we got the lock or not.
+ * We got the lock - we release it.
+ * We timeout trying to get the lock - we force its release.
+ */
+ ixgbe_get_swfw_sync_semaphore(hw);
+ ixgbe_release_swfw_sync_semaphore(hw);
+
+ /* Acquire and release all software resources. */
+ rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
+ IXGBE_GSSR_SW_MNG_SM;
+
+ rmask |= IXGBE_GSSR_I2C_MASK;
+ ixgbe_acquire_swfw_sync_X540(hw, rmask);
+ ixgbe_release_swfw_sync_X540(hw, rmask);
+}
+
+/**
+ * ixgbe_blink_led_start_X540 - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Devices that implement the version 2 interface:
+ * X540
+ **/
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+{
+ u32 macc_reg;
+ u32 ledctl_reg;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ DEBUGFUNC("ixgbe_blink_led_start_X540");
+
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
+ /*
+ * Link should be up in order for the blink bit in the LED control
+ * register to work. Force link and speed in the MAC if link is down.
+ * This will be reversed when we stop the blinking.
+ */
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (link_up == false) {
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ }
+ /* Set the LED to LINK_UP + BLINK. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+ ledctl_reg |= IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ *
+ * Devices that implement the version 2 interface:
+ * X540
+ **/
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+{
+ u32 macc_reg;
+ u32 ledctl_reg;
+
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
+ DEBUGFUNC("ixgbe_blink_led_stop_X540");
+
+ /* Restore the LED to its default value. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+ ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+ ledctl_reg &= ~IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+
+ /* Unforce link and speed in the MAC. */
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.h
new file mode 100644
index 00000000..8a19ae2e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.h
@@ -0,0 +1,67 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool link_up_wait_to_complete);
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+#endif /* _IXGBE_X540_H_ */
+
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c
new file mode 100644
index 00000000..f66f5407
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -0,0 +1,4663 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_x550.h"
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
+STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
+STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
+STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
+
+/**
+ * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for X550.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X550");
+
+ ret_val = ixgbe_init_ops_X540(hw);
+ mac->ops.dmac_config = ixgbe_dmac_config_X550;
+ mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
+ mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
+ mac->ops.setup_eee = NULL;
+ mac->ops.set_source_address_pruning =
+ ixgbe_set_source_address_pruning_X550;
+ mac->ops.set_ethertype_anti_spoofing =
+ ixgbe_set_ethertype_anti_spoofing_X550;
+
+ mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
+ eeprom->ops.read = ixgbe_read_ee_hostif_X550;
+ eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
+ eeprom->ops.write = ixgbe_write_ee_hostif_X550;
+ eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
+ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
+
+ mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
+ mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
+ mac->ops.mdd_event = ixgbe_mdd_event_X550;
+ mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
+ mac->ops.disable_rx = ixgbe_disable_rx_x550;
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ hw->mac.ops.led_on = NULL;
+ hw->mac.ops.led_off = NULL;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
+ hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
+ break;
+ default:
+ break;
+ }
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_cs4227 - Read CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: pointer to receive value read
+ *
+ * Returns status code
+ **/
+STATIC s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
+{
+ return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
+}
+
+/**
+ * ixgbe_write_cs4227 - Write CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: value to write to register
+ *
+ * Returns status code
+ **/
+STATIC s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
+{
+ return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
+}
+
+/**
+ * ixgbe_read_pe - Read register from port expander
+ * @hw: pointer to hardware structure
+ * @reg: register number to read
+ * @value: pointer to receive read value
+ *
+ * Returns status code
+ **/
+STATIC s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
+{
+ s32 status;
+
+ status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
+ if (status != IXGBE_SUCCESS)
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "port expander access failed with %d\n", status);
+ return status;
+}
+
+/**
+ * ixgbe_write_pe - Write register to port expander
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: value to write
+ *
+ * Returns status code
+ **/
+STATIC s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
+{
+ s32 status;
+
+ status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
+ if (status != IXGBE_SUCCESS)
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "port expander access failed with %d\n", status);
+ return status;
+}
+
+/**
+ * ixgbe_reset_cs4227 - Reset CS4227 using port expander
+ * @hw: pointer to hardware structure
+ *
+ * This function assumes that the caller has acquired the proper semaphore.
+ * Returns error code
+ **/
+STATIC s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 retry;
+ u16 value;
+ u8 reg;
+
+ /* Trigger hard reset. */
+ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg |= IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, &reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg &= ~IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg &= ~IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ usec_delay(IXGBE_CS4227_RESET_HOLD);
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg |= IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Wait for the reset to complete. */
+ msec_delay(IXGBE_CS4227_RESET_DELAY);
+ for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
+ &value);
+ if (status == IXGBE_SUCCESS &&
+ value == IXGBE_CS4227_EEPROM_LOAD_OK)
+ break;
+ msec_delay(IXGBE_CS4227_CHECK_DELAY);
+ }
+ if (retry == IXGBE_CS4227_RETRIES) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "CS4227 reset did not complete.");
+ return IXGBE_ERR_PHY;
+ }
+
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
+ if (status != IXGBE_SUCCESS ||
+ !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "CS4227 EEPROM did not load successfully.");
+ return IXGBE_ERR_PHY;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_check_cs4227 - Check CS4227 and reset as needed
+ * @hw: pointer to hardware structure
+ **/
+STATIC void ixgbe_check_cs4227(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u16 value = 0;
+ u8 retry;
+
+ for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ msec_delay(IXGBE_CS4227_CHECK_DELAY);
+ continue;
+ }
+
+ /* Get status of reset flow. */
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
+
+ if (status == IXGBE_SUCCESS &&
+ value == IXGBE_CS4227_RESET_COMPLETE)
+ goto out;
+
+ if (status != IXGBE_SUCCESS ||
+ value != IXGBE_CS4227_RESET_PENDING)
+ break;
+
+ /* Reset is pending. Wait and check again. */
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(IXGBE_CS4227_CHECK_DELAY);
+ }
+
+ /* If still pending, assume other instance failed. */
+ if (retry == IXGBE_CS4227_RETRIES) {
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return;
+ }
+ }
+
+ /* Reset the CS4227. */
+ status = ixgbe_reset_cs4227(hw);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "CS4227 reset failed: %d", status);
+ goto out;
+ }
+
+ /* Reset takes so long, temporarily release semaphore in case the
+ * other driver instance is waiting for the reset indication.
+ */
+ ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
+ IXGBE_CS4227_RESET_PENDING);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(10);
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return;
+ }
+
+ /* Record completion for next time. */
+ status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
+ IXGBE_CS4227_RESET_COMPLETE);
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(hw->eeprom.semaphore_delay);
+}
+
+/**
+ * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
+ * @hw: pointer to hardware structure
+ **/
+STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
+{
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ if (hw->bus.lan_id) {
+ esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+ esdp |= IXGBE_ESDP_SDP1_DIR;
+ }
+ esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_identify_phy_x550em - Get PHY type based on device id
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+{
+ hw->mac.ops.set_lan_id(hw);
+
+ ixgbe_read_mng_if_sel_x550em(hw);
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ return ixgbe_identify_module_generic(hw);
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ /* set up for CS4227 usage */
+ ixgbe_setup_mux_ctl(hw);
+ ixgbe_check_cs4227(hw);
+ /* Fallthrough */
+
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ return ixgbe_identify_module_generic(hw);
+ break;
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ hw->phy.type = ixgbe_phy_x550em_kx4;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->phy.type = ixgbe_phy_x550em_xfi;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ hw->phy.type = ixgbe_phy_x550em_kr;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ return ixgbe_identify_phy_generic(hw);
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ hw->phy.type = ixgbe_phy_ext_1g_t;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ hw->phy.type = ixgbe_phy_fw;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ break;
+ default:
+ break;
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fw_phy_activity - Perform an activity on a PHY
+ * @hw: pointer to hardware structure
+ * @activity: activity to perform
+ * @data: Pointer to 4 32-bit words of data
+ */
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+ u32 (*data)[FW_PHY_ACT_DATA_COUNT])
+{
+ union {
+ struct ixgbe_hic_phy_activity_req cmd;
+ struct ixgbe_hic_phy_activity_resp rsp;
+ } hic;
+ u16 retries = FW_PHY_ACT_RETRIES;
+ s32 rc;
+ u16 i;
+
+ do {
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+ hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
+
+ rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
+ sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (rc != IXGBE_SUCCESS)
+ return rc;
+ if (hic.rsp.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS) {
+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+ (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
+ return IXGBE_SUCCESS;
+ }
+ usec_delay(20);
+ --retries;
+ } while (retries > 0);
+
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+}
+
+static const struct {
+ u16 fw_speed;
+ ixgbe_link_speed phy_speed;
+} ixgbe_fw_map[] = {
+ { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
+ { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
+ { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
+};
+
+/**
+ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+{
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ u16 phy_speeds;
+ u16 phy_id_lo;
+ s32 rc;
+ u16 i;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
+ if (rc)
+ return rc;
+
+ hw->phy.speeds_supported = 0;
+ phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
+ for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
+ if (phy_speeds & ixgbe_fw_map[i].fw_speed)
+ hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
+ }
+ if (!hw->phy.autoneg_advertised)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
+ phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
+ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
+ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
+ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_phy_fw - Get PHY type based on firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+{
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ return ixgbe_get_phy_id_fw(hw);
+}
+
+/**
+ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+{
+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+
+ setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
+ return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
+}
+
+STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
+ return IXGBE_NOT_IMPLEMENTED;
+}
+
+STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
+ return IXGBE_NOT_IMPLEMENTED;
+}
+
+/**
+ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ **/
+STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
+}
+
+/**
+ * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ **/
+STATIC s32
+ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false);
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ **/
+STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ **/
+STATIC s32
+ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
+}
+
+/**
+* ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
+* @hw: pointer to hardware structure
+*
+* Initialize the function pointers and for MAC type X550EM.
+* Does not touch the hardware.
+**/
+s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X550EM");
+
+ /* Similar to X550 so start there. */
+ ret_val = ixgbe_init_ops_X550(hw);
+
+ /* Since this function eventually calls
+ * ixgbe_init_ops_540 by design, we are setting
+ * the pointers to NULL explicitly here to overwrite
+ * the values being set in the x540 function.
+ */
+ /* Thermal sensor not supported in x550EM */
+ mac->ops.get_thermal_sensor_data = NULL;
+ mac->ops.init_thermal_sensor_thresh = NULL;
+ mac->thermal_sensor_enabled = false;
+
+ /* FCOE not supported in x550EM */
+ mac->ops.get_san_mac_addr = NULL;
+ mac->ops.set_san_mac_addr = NULL;
+ mac->ops.get_wwn_prefix = NULL;
+ mac->ops.get_fcoe_boot_status = NULL;
+
+ /* IPsec not supported in x550EM */
+ mac->ops.disable_sec_rx_path = NULL;
+ mac->ops.enable_sec_rx_path = NULL;
+
+ /* AUTOC register is not present in x550EM. */
+ mac->ops.prot_autoc_read = NULL;
+ mac->ops.prot_autoc_write = NULL;
+
+ /* X550EM bus type is internal*/
+ hw->bus.type = ixgbe_bus_type_internal;
+ mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
+
+
+ mac->ops.get_media_type = ixgbe_get_media_type_X550em;
+ mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
+ mac->ops.reset_hw = ixgbe_reset_hw_X550em;
+ mac->ops.get_supported_physical_layer =
+ ixgbe_get_supported_physical_layer_X550em;
+
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ mac->ops.setup_fc = ixgbe_setup_fc_generic;
+ else
+ mac->ops.setup_fc = ixgbe_setup_fc_X550em;
+
+ /* PHY */
+ phy->ops.init = ixgbe_init_phy_ops_X550em;
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ mac->ops.setup_fc = NULL;
+ phy->ops.identify = ixgbe_identify_phy_fw;
+ phy->ops.set_phy_power = NULL;
+ phy->ops.get_firmware_version = NULL;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ mac->ops.setup_fc = NULL;
+ phy->ops.identify = ixgbe_identify_phy_x550em;
+ phy->ops.set_phy_power = NULL;
+ break;
+ default:
+ phy->ops.identify = ixgbe_identify_phy_x550em;
+ }
+
+ if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
+ phy->ops.set_phy_power = NULL;
+
+
+ /* EEPROM */
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
+ eeprom->ops.read = ixgbe_read_ee_hostif_X550;
+ eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
+ eeprom->ops.write = ixgbe_write_ee_hostif_X550;
+ eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
+ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+{
+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+ u16 i;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return 0;
+
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_rx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_tx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
+ if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
+ setup[0] |= ixgbe_fw_map[i].fw_speed;
+ }
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
+
+ if (hw->phy.eee_speeds_advertised)
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
+ if (rc)
+ return rc;
+ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
+ return IXGBE_ERR_OVERTEMP;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ */
+static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+{
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ return ixgbe_setup_fw_link(hw);
+}
+
+/**
+ * ixgbe_setup_eee_fw - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enable/disable EEE based on enable_eee flag.
+ * This function controls EEE for firmware-based PHY implementations.
+ */
+static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
+{
+ if (!!hw->phy.eee_speeds_advertised == enable_eee)
+ return IXGBE_SUCCESS;
+ if (enable_eee)
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+ else
+ hw->phy.eee_speeds_advertised = 0;
+ return hw->phy.ops.setup_link(hw);
+}
+
+/**
+* ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
+* @hw: pointer to hardware structure
+*
+* Initialize the function pointers and for MAC type X550EM_a.
+* Does not touch the hardware.
+**/
+s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X550EM_a");
+
+ /* Start with generic X550EM init */
+ ret_val = ixgbe_init_ops_X550EM(hw);
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
+ } else {
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
+ }
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
+
+ switch (mac->ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ mac->ops.setup_fc = NULL;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
+ break;
+ case ixgbe_media_type_backplane:
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
+ mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
+ break;
+ default:
+ break;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
+ mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
+ mac->ops.setup_eee = ixgbe_setup_eee_fw;
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+ break;
+ default:
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+* ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
+* @hw: pointer to hardware structure
+*
+* Initialize the function pointers and for MAC type X550EM_x.
+* Does not touch the hardware.
+**/
+s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_link_info *link = &hw->link;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X550EM_x");
+
+ /* Start with generic X550EM init */
+ ret_val = ixgbe_init_ops_X550EM(hw);
+
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
+ link->ops.read_link = ixgbe_read_i2c_combined_generic;
+ link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
+ link->ops.write_link = ixgbe_write_i2c_combined_generic;
+ link->ops.write_link_unlocked =
+ ixgbe_write_i2c_combined_generic_unlocked;
+ link->addr = IXGBE_CS4227;
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
+ mac->ops.setup_fc = NULL;
+ mac->ops.setup_eee = NULL;
+ mac->ops.init_led_link_act = NULL;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_dmac_config_X550
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing. If enabling dmac, dmac is activated.
+ * When disabling dmac, dmac enable dmac bit is cleared.
+ **/
+s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
+{
+ u32 reg, high_pri_tc;
+
+ DEBUGFUNC("ixgbe_dmac_config_X550");
+
+ /* Disable DMA coalescing before configuring */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+ reg &= ~IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+ /* Disable DMA Coalescing if the watchdog timer is 0 */
+ if (!hw->mac.dmac_config.watchdog_timer)
+ goto out;
+
+ ixgbe_dmac_config_tcs_X550(hw);
+
+ /* Configure DMA Coalescing Control Register */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+
+ /* Set the watchdog timer in units of 40.96 usec */
+ reg &= ~IXGBE_DMACR_DMACWT_MASK;
+ reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
+
+ reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
+ /* If fcoe is enabled, set high priority traffic class */
+ if (hw->mac.dmac_config.fcoe_en) {
+ high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
+ reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
+ IXGBE_DMACR_HIGH_PRI_TC_MASK);
+ }
+ reg |= IXGBE_DMACR_EN_MNG_IND;
+
+ /* Enable DMA coalescing after configuration */
+ reg |= IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+out:
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dmac_config_tcs_X550
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing threshold per TC. The dmac enable bit must
+ * be cleared before configuring.
+ **/
+s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
+{
+ u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
+
+ DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
+
+ /* Configure DMA coalescing enabled */
+ switch (hw->mac.dmac_config.link_speed) {
+ case IXGBE_LINK_SPEED_10_FULL:
+ case IXGBE_LINK_SPEED_100_FULL:
+ pb_headroom = IXGBE_DMACRXT_100M;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ pb_headroom = IXGBE_DMACRXT_1G;
+ break;
+ default:
+ pb_headroom = IXGBE_DMACRXT_10G;
+ break;
+ }
+
+ maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
+ IXGBE_MHADD_MFS_SHIFT) / 1024);
+
+ /* Set the per Rx packet buffer receive threshold */
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
+ reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
+
+ if (tc < hw->mac.dmac_config.num_tcs) {
+ /* Get Rx PB size */
+ rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
+ rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
+ IXGBE_RXPBSIZE_SHIFT;
+
+ /* Calculate receive buffer threshold in kilobytes */
+ if (rx_pb_size > pb_headroom)
+ rx_pb_size = rx_pb_size - pb_headroom;
+ else
+ rx_pb_size = 0;
+
+ /* Minimum of MFS shall be set for DMCTH */
+ reg |= (rx_pb_size > maxframe_size_kb) ?
+ rx_pb_size : maxframe_size_kb;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dmac_update_tcs_X550
+ * @hw: pointer to hardware structure
+ *
+ * Disables dmac, updates per TC settings, and then enables dmac.
+ **/
+s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
+
+ /* Disable DMA coalescing before configuring */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+ reg &= ~IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+ ixgbe_dmac_config_tcs_X550(hw);
+
+ /* Enable DMA coalescing after configuration */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+ reg |= IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ DEBUGFUNC("ixgbe_init_eeprom_params_X550");
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable source address pruning
+ * @pool: Rx pool to set source address pruning for
+ **/
+void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
+ unsigned int pool)
+{
+ u64 pfflp;
+
+ /* max rx pool is 63 */
+ if (pool > 63)
+ return;
+
+ pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
+ pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
+
+ if (enable)
+ pfflp |= (1ULL << pool);
+ else
+ pfflp &= ~(1ULL << pool);
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
+ IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
+}
+
+/**
+ * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for Ethertype anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
+ *
+ **/
+void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
+ bool enable, int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
+ u32 pfvfspoof;
+
+ DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
+
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
+/**
+ * ixgbe_iosf_wait - Wait for IOSF command completion
+ * @hw: pointer to hardware structure
+ * @ctrl: pointer to location to receive final IOSF control value
+ *
+ * Returns failing status on timeout
+ *
+ * Note: ctrl can be NULL if the IOSF control register value is not needed
+ **/
+STATIC s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+{
+ u32 i, command = 0;
+
+ /* Check every 10 usec to see if the address cycle completed.
+ * The SB IOSF BUSY bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+ if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
+ break;
+ usec_delay(10);
+ }
+ if (ctrl)
+ *ctrl = command;
+ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
+ * of the IOSF device
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 data)
+{
+ u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+ u32 command, error;
+ s32 ret;
+
+ ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
+ if (ret != IXGBE_SUCCESS)
+ return ret;
+
+ ret = ixgbe_iosf_wait(hw, NULL);
+ if (ret != IXGBE_SUCCESS)
+ goto out;
+
+ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+ /* Write IOSF control register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+ /* Write IOSF data register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+
+ ret = ixgbe_iosf_wait(hw, &command);
+
+ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Failed to write, error %x\n", error);
+ ret = IXGBE_ERR_PHY;
+ }
+
+out:
+ ixgbe_release_swfw_semaphore(hw, gssr);
+ return ret;
+}
+
+/**
+ * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Pointer to read data from the register
+ **/
+s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *data)
+{
+ u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+ u32 command, error;
+ s32 ret;
+
+ ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
+ if (ret != IXGBE_SUCCESS)
+ return ret;
+
+ ret = ixgbe_iosf_wait(hw, NULL);
+ if (ret != IXGBE_SUCCESS)
+ goto out;
+
+ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+ /* Write IOSF control register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+ ret = ixgbe_iosf_wait(hw, &command);
+
+ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Failed to read, error %x\n", error);
+ ret = IXGBE_ERR_PHY;
+ }
+
+ if (ret == IXGBE_SUCCESS)
+ *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
+
+out:
+ ixgbe_release_swfw_semaphore(hw, gssr);
+ return ret;
+}
+
+/**
+ * ixgbe_get_phy_token - Get the token for shared phy access
+ * @hw: Pointer to hardware structure
+ */
+
+s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+{
+ struct ixgbe_hic_phy_token_req token_cmd;
+ s32 status;
+
+ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+ token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ token_cmd.port_number = hw->bus.lan_id;
+ token_cmd.command_type = FW_PHY_TOKEN_REQ;
+ token_cmd.pad = 0;
+ status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
+ sizeof(token_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (status) {
+ DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
+ status);
+ return status;
+ }
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return IXGBE_SUCCESS;
+ if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
+ DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
+ token_cmd.hdr.cmd_or_resp.ret_status);
+ return IXGBE_ERR_FW_RESP_INVALID;
+ }
+
+ DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
+ return IXGBE_ERR_TOKEN_RETRY;
+}
+
+/**
+ * ixgbe_put_phy_token - Put the token for shared phy access
+ * @hw: Pointer to hardware structure
+ */
+
+s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+{
+ struct ixgbe_hic_phy_token_req token_cmd;
+ s32 status;
+
+ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+ token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ token_cmd.port_number = hw->bus.lan_id;
+ token_cmd.command_type = FW_PHY_TOKEN_REL;
+ token_cmd.pad = 0;
+ status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
+ sizeof(token_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (status)
+ return status;
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return IXGBE_SUCCESS;
+
+ DEBUGOUT("Put PHY Token host interface command failed");
+ return IXGBE_ERR_FW_RESP_INVALID;
+}
+
+/**
+ * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
+ * of the IOSF device
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 data)
+{
+ struct ixgbe_hic_internal_phy_req write_cmd;
+ s32 status;
+ UNREFERENCED_1PARAMETER(device_type);
+
+ memset(&write_cmd, 0, sizeof(write_cmd));
+ write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ write_cmd.port_number = hw->bus.lan_id;
+ write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
+ write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
+ write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
+ sizeof(write_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Pointer to read data from the register
+ **/
+s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *data)
+{
+ union {
+ struct ixgbe_hic_internal_phy_req cmd;
+ struct ixgbe_hic_internal_phy_resp rsp;
+ } hic;
+ s32 status;
+ UNREFERENCED_1PARAMETER(device_type);
+
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.command_type = FW_INT_PHY_REQ_READ;
+ hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
+ sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, true);
+
+ /* Extract the register value from the response. */
+ *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
+
+ return status;
+}
+
+/**
+ * ixgbe_disable_mdd_X550
+ * @hw: pointer to hardware structure
+ *
+ * Disable malicious driver detection
+ **/
+void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("ixgbe_disable_mdd_X550");
+
+ /* Disable MDD for TX DMA and interrupt */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+
+ /* Disable MDD for RX and interrupt */
+ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+}
+
+/**
+ * ixgbe_enable_mdd_X550
+ * @hw: pointer to hardware structure
+ *
+ * Enable malicious driver detection
+ **/
+void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("ixgbe_enable_mdd_X550");
+
+ /* Enable MDD for TX DMA and interrupt */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+
+ /* Enable MDD for RX and interrupt */
+ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+}
+
+/**
+ * ixgbe_restore_mdd_vf_X550
+ * @hw: pointer to hardware structure
+ * @vf: vf index
+ *
+ * Restore VF that was disabled during malicious driver detection event
+ **/
+void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
+{
+ u32 idx, reg, num_qs, start_q, bitmask;
+
+ DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
+
+ /* Map VF to queues */
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case IXGBE_MRQC_VMDQRT8TCEN:
+ num_qs = 8; /* 16 VFs / pools */
+ bitmask = 0x000000FF;
+ break;
+ case IXGBE_MRQC_VMDQRSS32EN:
+ case IXGBE_MRQC_VMDQRT4TCEN:
+ num_qs = 4; /* 32 VFs / pools */
+ bitmask = 0x0000000F;
+ break;
+ default: /* 64 VFs / pools */
+ num_qs = 2;
+ bitmask = 0x00000003;
+ break;
+ }
+ start_q = vf * num_qs;
+
+ /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
+ idx = start_q / 32;
+ reg = 0;
+ reg |= (bitmask << (start_q % 32));
+ IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
+ IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
+}
+
+/**
+ * ixgbe_mdd_event_X550
+ * @hw: pointer to hardware structure
+ * @vf_bitmap: vf bitmap of malicious vfs
+ *
+ * Handle malicious driver detection event.
+ **/
+void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
+{
+ u32 wqbr;
+ u32 i, j, reg, q, shift, vf, idx;
+
+ DEBUGFUNC("ixgbe_mdd_event_X550");
+
+ /* figure out pool size for mapping to vf's */
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case IXGBE_MRQC_VMDQRT8TCEN:
+ shift = 3; /* 16 VFs / pools */
+ break;
+ case IXGBE_MRQC_VMDQRSS32EN:
+ case IXGBE_MRQC_VMDQRT4TCEN:
+ shift = 2; /* 32 VFs / pools */
+ break;
+ default:
+ shift = 1; /* 64 VFs / pools */
+ break;
+ }
+
+ /* Read WQBR_TX and WQBR_RX and check for malicious queues */
+ for (i = 0; i < 4; i++) {
+ wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
+ wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
+
+ if (!wqbr)
+ continue;
+
+ /* Get malicious queue */
+ for (j = 0; j < 32 && wqbr; j++) {
+
+ if (!(wqbr & (1 << j)))
+ continue;
+
+ /* Get queue from bitmask */
+ q = j + (i * 32);
+
+ /* Map queue to vf */
+ vf = (q >> shift);
+
+ /* Set vf bit in vf_bitmap */
+ idx = vf / 32;
+ vf_bitmap[idx] |= (1 << (vf % 32));
+ wqbr &= ~(1 << j);
+ }
+ }
+}
+
+/**
+ * ixgbe_get_media_type_X550em - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ */
+enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ DEBUGFUNC("ixgbe_get_media_type_X550em");
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_QSFP:
+ case IXGBE_DEV_ID_X550EM_A_QSFP_N:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ media_type = ixgbe_media_type_copper;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ media_type = ixgbe_media_type_backplane;
+ hw->phy.type = ixgbe_phy_sgmii;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ media_type = ixgbe_media_type_copper;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+ return media_type;
+}
+
+/**
+ * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
+ * @hw: pointer to hardware structure
+ * @linear: true if SFP module is linear
+ */
+STATIC s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+{
+ DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
+
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_not_present:
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ *linear = true;
+ break;
+ case ixgbe_sfp_type_srlr_core0:
+ case ixgbe_sfp_type_srlr_core1:
+ case ixgbe_sfp_type_da_act_lmt_core0:
+ case ixgbe_sfp_type_da_act_lmt_core1:
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ case ixgbe_sfp_type_1g_lx_core0:
+ case ixgbe_sfp_type_1g_lx_core1:
+ *linear = false;
+ break;
+ case ixgbe_sfp_type_unknown:
+ case ixgbe_sfp_type_1g_cu_core0:
+ case ixgbe_sfp_type_1g_cu_core1:
+ default:
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the SFP module and assigns appropriate PHY type.
+ **/
+s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ bool linear;
+
+ DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
+
+ status = ixgbe_identify_module_generic(hw);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Check if SFP module is supported */
+ status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
+ * @hw: pointer to hardware structure
+ */
+s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ bool linear;
+
+ DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
+
+ /* Check if SFP module is supported */
+ status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_init_mac_link_ops_X550em(hw);
+ hw->phy.ops.reset = NULL;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+* ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
+* internal PHY
+* @hw: pointer to hardware structure
+**/
+STATIC s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 link_ctrl;
+
+ /* Restart auto-negotiation. */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
+
+ if (status) {
+ DEBUGOUT("Auto-negotiation did not complete\n");
+ return status;
+ }
+
+ link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
+
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ u32 flx_mask_st20;
+
+ /* Indicate to FW that AN restart has been asserted */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
+
+ if (status) {
+ DEBUGOUT("Auto-negotiation did not complete\n");
+ return status;
+ }
+
+ flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
+ }
+
+ return status;
+}
+
+#ifndef PREBOOT_SUPPORT
+/**
+ * ixgbe_setup_sgmii - Set up link for sgmii
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ */
+STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 lval, sval, flx_val;
+ s32 rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
+ if (rc)
+ return rc;
+
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_restart_an_internal_phy_x550em(hw);
+ if (rc)
+ return rc;
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+#endif /* PREBOOT_SUPPORT */
+/**
+ * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ */
+STATIC s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 lval, sval, flx_val;
+ s32 rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
+ if (rc)
+ return rc;
+
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_restart_an_internal_phy_x550em(hw);
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
+
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ /* CS4227 does not support autoneg, so disable the laser control
+ * functions for SFP+ fiber
+ */
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
+ mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_soft_rate_select_speed;
+
+ if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
+ (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550a;
+ else
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550em;
+ break;
+ case ixgbe_media_type_copper:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
+ break;
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
+ mac->ops.setup_link = ixgbe_setup_sgmii_fw;
+ mac->ops.check_link =
+ ixgbe_check_mac_link_generic;
+ } else {
+ mac->ops.setup_link =
+ ixgbe_setup_mac_link_t_X550em;
+ }
+ } else {
+ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ mac->ops.check_link = ixgbe_check_link_t_X550em;
+ }
+ break;
+ case ixgbe_media_type_backplane:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
+#ifdef PREBOOT_SUPPORT
+ mac->ops.setup_link = ixgbe_setup_sgmii_fw;
+#else
+ mac->ops.setup_link = ixgbe_setup_sgmii;
+#endif /* PREBOOT_SUPPORT */
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ixgbe_get_link_capabilities_x550em - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ */
+s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
+
+
+ if (hw->phy.type == ixgbe_phy_fw) {
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+ return 0;
+ }
+
+ /* SFP */
+ if (hw->phy.media_type == ixgbe_media_type_fiber) {
+
+ /* CS4227 SFP must not enable auto-negotiation */
+ *autoneg = false;
+
+ /* Check if 1G SFP module. */
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
+ || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ return IXGBE_SUCCESS;
+ }
+
+ /* Link capabilities are based on SFP */
+ if (hw->phy.multispeed_fiber)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ } else {
+ switch (hw->phy.type) {
+ case ixgbe_phy_ext_1g_t:
+#ifdef PREBOOT_SUPPORT
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+#endif /* PREBOOT_SUPPORT */
+ case ixgbe_phy_sgmii:
+#ifdef PREBOOT_SUPPORT
+ *speed = IXGBE_LINK_SPEED_1GB_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_10_FULL;
+#else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+#endif /* PREBOOT_SUPPORT */
+ break;
+ case ixgbe_phy_x550em_kr:
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ /* check different backplane modes */
+ if (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ } else if (hw->device_id ==
+ IXGBE_DEV_ID_X550EM_A_KR_L) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+ /* fall through */
+ default:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ *autoneg = true;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
+ * @hw: pointer to hardware structure
+ * @lsc: pointer to boolean flag which indicates whether external Base T
+ * PHY interrupt is lsc
+ *
+ * Determime if external Base T PHY interrupt cause is high temperature
+ * failure alarm or link status change.
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
+ */
+STATIC s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
+{
+ u32 status;
+ u16 reg;
+
+ *lsc = false;
+
+ /* Vendor alarm triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS ||
+ !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
+ return status;
+
+ /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS ||
+ !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
+ IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
+ return status;
+
+ /* Global alarm triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If high temperature failure, then return over temp error and exit */
+ if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
+ /* power down the PHY in case the PHY FW didn't already */
+ ixgbe_set_copper_phy_power(hw, false);
+ return IXGBE_ERR_OVERTEMP;
+ } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
+ /* device fault alarm triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* if device fault was due to high temp alarm handle and exit */
+ if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
+ /* power down the PHY in case the PHY FW didn't */
+ ixgbe_set_copper_phy_power(hw, false);
+ return IXGBE_ERR_OVERTEMP;
+ }
+ }
+
+ /* Vendor alarm 2 triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+ if (status != IXGBE_SUCCESS ||
+ !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
+ return status;
+
+ /* link connect/disconnect event occurred */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Indicate LSC */
+ if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
+ *lsc = true;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
+ * @hw: pointer to hardware structure
+ *
+ * Enable link status change and temperature failure alarm for the external
+ * Base T PHY
+ *
+ * Returns PHY access status
+ */
+STATIC s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 reg;
+ bool lsc;
+
+ /* Clear interrupt flags */
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+
+ /* Enable link status change alarm */
+
+ /* Enable the LASI interrupts on X552 devices to receive notifications
+ * of the link configurations of the external PHY and correspondingly
+ * support the configuration of the internal iXFI link, since iXFI does
+ * not support auto-negotiation. This is not required for X553 devices
+ * having KR support, which performs auto-negotiations and which is used
+ * as the internal link to the external PHY. Hence adding a check here
+ * to avoid enabling LASI interrupts for X553 devices.
+ */
+ if (hw->mac.type != ixgbe_mac_X550EM_a) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
+
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+
+ /* Enable high temperature failure and global fault alarms */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
+ IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
+ IXGBE_MDIO_GLOBAL_ALARM_1_INT);
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Enable chip-wide vendor alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
+ * @hw: pointer to hardware structure
+ * @speed: link speed
+ *
+ * Configures the integrated KR PHY.
+ **/
+STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u32 reg_val;
+
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+
+ /* Advertise 10G support. */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+
+ /* Advertise 1G support. */
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ /* Set lane mode to KR auto negotiation */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ }
+
+ return ixgbe_restart_an_internal_phy_x550em(hw);
+}
+
+/**
+ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
+{
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return IXGBE_SUCCESS;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
+ if (rc)
+ return rc;
+ memset(store, 0, sizeof(store));
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
+ if (rc)
+ return rc;
+
+ return ixgbe_setup_fw_link(hw);
+}
+
+/**
+ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
+{
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
+ if (rc)
+ return rc;
+
+ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
+ ixgbe_shutdown_fw_phy(hw);
+ return IXGBE_ERR_OVERTEMP;
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
+ * @hw: pointer to hardware structure
+ *
+ * Read NW_MNG_IF_SEL register and save field values, and check for valid field
+ * values.
+ **/
+STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
+{
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
+ * PHY address. This register field was has only been used for X552.
+ */
+ if (hw->mac.type == ixgbe_mac_X550EM_a &&
+ hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
+ hw->phy.addr = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ */
+s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_phy_ops_X550em");
+
+ hw->mac.ops.set_lan_id(hw);
+ ixgbe_read_mng_if_sel_x550em(hw);
+
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+ phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ ixgbe_setup_mux_ctl(hw);
+ phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ phy->ops.read_reg_mdi = NULL;
+ phy->ops.write_reg_mdi = NULL;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+
+ break;
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
+ hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ /* set up for CS4227 usage */
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ phy->ops.read_reg_mdi = NULL;
+ phy->ops.write_reg_mdi = NULL;
+ default:
+ break;
+ }
+
+ /* Identify the PHY or SFP module */
+ ret_val = phy->ops.identify(hw);
+ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+ ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
+ return ret_val;
+
+ /* Setup function pointers based on detected hardware */
+ ixgbe_init_mac_link_ops_X550em(hw);
+ if (phy->sfp_type != ixgbe_sfp_type_unknown)
+ phy->ops.reset = NULL;
+
+ /* Set functions pointers based on phy type */
+ switch (hw->phy.type) {
+ case ixgbe_phy_x550em_kx4:
+ phy->ops.setup_link = NULL;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
+ case ixgbe_phy_x550em_kr:
+ phy->ops.setup_link = ixgbe_setup_kr_x550em;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
+ case ixgbe_phy_ext_1g_t:
+ /* link is managed by FW */
+ phy->ops.setup_link = NULL;
+ phy->ops.reset = NULL;
+ break;
+ case ixgbe_phy_x550em_xfi:
+ /* link is managed by HW */
+ phy->ops.setup_link = NULL;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
+ case ixgbe_phy_x550em_ext_t:
+ /* If internal link mode is XFI, then setup iXFI internal link,
+ * else setup KR now.
+ */
+ phy->ops.setup_internal_link =
+ ixgbe_setup_internal_phy_t_x550em;
+
+ /* setup SW LPLU only for first revision of X550EM_x */
+ if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
+ !(IXGBE_FUSES0_REV_MASK &
+ IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
+ phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
+
+ phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
+ phy->ops.reset = ixgbe_reset_phy_t_X550em;
+ break;
+ case ixgbe_phy_sgmii:
+ phy->ops.setup_link = NULL;
+ break;
+ case ixgbe_phy_fw:
+ phy->ops.setup_link = ixgbe_setup_fw_link;
+ phy->ops.reset = ixgbe_reset_phy_fw;
+ break;
+ default:
+ break;
+ }
+ return ret_val;
+}
+
+/**
+ * ixgbe_set_mdio_speed - Set MDIO clock speed
+ * @hw: pointer to hardware structure
+ */
+STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
+{
+ u32 hlreg0;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_QSFP:
+ /* Config MDIO clock speed before the first MDIO PHY access */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ /* Select fast MDIO clock speed for these devices */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 |= IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ixgbe_reset_hw_X550em - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ */
+s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed link_speed;
+ s32 status;
+ u32 ctrl = 0;
+ u32 i;
+ bool link_up = false;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+
+ DEBUGFUNC("ixgbe_reset_hw_X550em");
+
+ /* Call adapter stop to disable Tx/Rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
+ return status;
+ }
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ ixgbe_set_mdio_speed(hw);
+
+ /* PHY ops must be identified and initialized prior to reset */
+ status = hw->phy.ops.init(hw);
+
+ if (status)
+ DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
+ status);
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+ status == IXGBE_ERR_PHY_ADDR_INVALID) {
+ DEBUGOUT("Returning from reset HW due to PHY init failure\n");
+ return status;
+ }
+
+ /* start the external PHY */
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+ status = ixgbe_init_ext_t_x550em(hw);
+ if (status) {
+ DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
+ status);
+ return status;
+ }
+ }
+
+ /* Setup SFP module if there is one present. */
+ if (hw->phy.sfp_setup_needed) {
+ status = hw->mac.ops.setup_sfp(hw);
+ hw->phy.sfp_setup_needed = false;
+ }
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ return status;
+
+ /* Reset PHY */
+ if (!hw->phy.reset_disable && hw->phy.ops.reset) {
+ if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
+ return IXGBE_ERR_OVERTEMP;
+ }
+
+mac_reset_top:
+ /* Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ ctrl = IXGBE_CTRL_LNK_RST;
+ if (!hw->force_full_reset) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up)
+ ctrl = IXGBE_CTRL_RST;
+ }
+
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear meaning reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ DEBUGOUT("Reset polling failed to complete.\n");
+ }
+
+ msec_delay(50);
+
+ /* Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to
+ * allow time for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ ixgbe_set_mdio_speed(hw);
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+ ixgbe_setup_mux_ctl(hw);
+
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
+
+ return status;
+}
+
+/**
+ * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
+ * @hw: pointer to hardware structure
+ */
+s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 reg;
+
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If PHY FW reset completed bit is set then this is the first
+ * SW instance after a power on so the PHY FW must be un-stalled.
+ */
+ if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg &= ~IXGBE_MDIO_POWER_UP_STALL;
+
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_kr_x550em - Configure the KR PHY.
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+{
+ /* leave link alone for 2.5G */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
+ return IXGBE_SUCCESS;
+
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+
+ return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
+}
+
+/**
+ * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: unused
+ *
+ * Configure the external PHY and the integrated KR PHY for SFP support.
+ **/
+s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 ret_val;
+ u16 reg_slice, reg_val;
+ bool setup_linear = false;
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ /* Check if SFP module is supported and linear */
+ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * there is no reason to configure CS4227 and SFP not present error is
+ * not excepted in the setup MAC link flow.
+ */
+ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
+ return IXGBE_SUCCESS;
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
+
+ /* Configure CS4227 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
+ (hw->bus.lan_id << 12);
+ if (setup_linear)
+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+ ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
+ reg_val);
+ return ret_val;
+}
+
+/**
+ * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ *
+ * Configures the integrated PHY for native SFI mode. Used to connect the
+ * internal PHY directly to an SFP cage, without autonegotiation.
+ **/
+STATIC s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 status;
+ u32 reg_val;
+
+ /* Disable all AN and force speed to 10G Serial. */
+ status = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+
+ /* Select forced link speed for internal PHY. */
+ switch (*speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
+ break;
+ default:
+ /* Other link speeds are not supported by internal PHY. */
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ status = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: unused
+ *
+ * Configure the integrated PHY for SFP support.
+ **/
+s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 ret_val;
+ u16 reg_phy_ext;
+ bool setup_linear = false;
+ u32 reg_slice, reg_phy_int, slice_offset;
+
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ /* Check if SFP module is supported and linear */
+ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
+ return IXGBE_SUCCESS;
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
+ /* Configure internal PHY for native SFI based on module type */
+ ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_phy_int);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
+ if (!setup_linear)
+ reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
+
+ ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* Setup SFI internal link. */
+ ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
+ } else {
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
+
+ if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
+ /* Find Address */
+ DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ }
+
+ /* Get external PHY SKU id */
+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* When configuring quad port CS4223, the MAC instance is part
+ * of the slice offset.
+ */
+ if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
+ slice_offset = (hw->bus.lan_id +
+ (hw->bus.instance_id << 1)) << 12;
+ else
+ slice_offset = hw->bus.lan_id << 12;
+
+ /* Configure CS4227/CS4223 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+
+ ret_val = hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
+ (IXGBE_CS4227_EDC_MODE_SR << 1));
+
+ if (setup_linear)
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+ ret_val = hw->phy.ops.write_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
+
+ /* Flush previous write with a read */
+ ret_val = hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+ }
+ return ret_val;
+}
+
+/**
+ * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
+ * @hw: pointer to hardware structure
+ *
+ * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
+ **/
+STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 status;
+ u32 reg_val;
+
+ /* Disable training protocol FSM. */
+ status = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
+ status = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Disable Flex from training TXFFE. */
+ status = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ status = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Enable override for coefficients. */
+ status = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
+ status = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ return status;
+}
+
+/**
+ * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ *
+ * Configures the integrated KR PHY to use iXFI mode. Used to connect an
+ * internal and external PHY at a specific speed, without autonegotiation.
+ **/
+STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 status;
+ u32 reg_val;
+
+ /* iXFI is only supported with X552 */
+ if (mac->type != ixgbe_mac_X550EM_x)
+ return IXGBE_ERR_LINK_SETUP;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+
+ /* Select forced link speed for internal PHY. */
+ switch (*speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ break;
+ default:
+ /* Other link speeds are not supported by internal KR PHY. */
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ status = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Additional configuration needed for x550em_x */
+ if (hw->mac.type == ixgbe_mac_X550EM_x) {
+ status = ixgbe_setup_ixfi_x550em_x(hw);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
+ * @hw: address of hardware structure
+ * @link_up: address of boolean to indicate link status
+ *
+ * Returns error code if unable to get link status.
+ */
+STATIC s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
+{
+ u32 ret;
+ u16 autoneg_status;
+
+ *link_up = false;
+
+ /* read this twice back to back to indicate current status */
+ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (ret != IXGBE_SUCCESS)
+ return ret;
+
+ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (ret != IXGBE_SUCCESS)
+ return ret;
+
+ *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
+ * @hw: point to hardware structure
+ *
+ * Configures the link between the integrated KR PHY and the external X557 PHY
+ * The driver will call this function when it gets a link status change
+ * interrupt from the X557 PHY. This function configures the link speed
+ * between the PHYs to match the link speed of the BASE-T link.
+ *
+ * A return of a non-zero value indicates an error, and the base driver should
+ * not report link up.
+ */
+s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed force_speed;
+ bool link_up;
+ u32 status;
+ u16 speed;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return IXGBE_ERR_CONFIG;
+
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ /* If link is down, there is no setup necessary so return */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (!link_up)
+ return IXGBE_SUCCESS;
+
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &speed);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If link is still down - no setup is required so return */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ if (!link_up)
+ return IXGBE_SUCCESS;
+
+ /* clear everything but the speed and duplex bits */
+ speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
+
+ switch (speed) {
+ case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
+ force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
+ force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ default:
+ /* Internal PHY does not support anything else */
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ return ixgbe_setup_ixfi_x550em(hw, &force_speed);
+ } else {
+ speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ return ixgbe_setup_kr_speed_x550em(hw, speed);
+ }
+}
+
+/**
+ * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the integrated KR PHY to use internal loopback mode.
+ **/
+s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 reg_val;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Set near-end loopback clocks. */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
+ reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Set loopback enable. */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Training bypass. */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
+ * assuming that the semaphore is already obtained.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
+ struct ixgbe_hic_read_shadow_ram buffer;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_read_ee_hostif_X550");
+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
+ /* one word */
+ buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
+ buffer.pad2 = 0;
+ buffer.pad3 = 0;
+
+ status = hw->mac.ops.acquire_swfw_sync(hw, mask);
+ if (status)
+ return status;
+
+ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT);
+ if (!status) {
+ *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ FW_NVM_DATA_OFFSET);
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+ return status;
+}
+
+/**
+ * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
+ struct ixgbe_hic_read_shadow_ram buffer;
+ u32 current_word = 0;
+ u16 words_to_read;
+ s32 status;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
+
+ /* Take semaphore for the entire operation. */
+ status = hw->mac.ops.acquire_swfw_sync(hw, mask);
+ if (status) {
+ DEBUGOUT("EEPROM read buffer - semaphore failed\n");
+ return status;
+ }
+
+ while (words) {
+ if (words > FW_MAX_READ_BUFFER_SIZE / 2)
+ words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
+ else
+ words_to_read = words;
+
+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
+ buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
+ buffer.pad2 = 0;
+ buffer.pad3 = 0;
+
+ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT);
+
+ if (status) {
+ DEBUGOUT("Host interface command failed\n");
+ goto out;
+ }
+
+ for (i = 0; i < words_to_read; i++) {
+ u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
+ 2 * i;
+ u32 value = IXGBE_READ_REG(hw, reg);
+
+ data[current_word] = (u16)(value & 0xffff);
+ current_word++;
+ i++;
+ if (i < words_to_read) {
+ value >>= 16;
+ data[current_word] = (u16)(value & 0xffff);
+ current_word++;
+ }
+ }
+ words -= words_to_read;
+ }
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, mask);
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 data)
+{
+ s32 status;
+ struct ixgbe_hic_write_shadow_ram buffer;
+
+ DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
+
+ buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* one word */
+ buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
+ buffer.data = data;
+ buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_ee_hostif_X550");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ DEBUGOUT("write ee hostif failed to get semaphore");
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 i = 0;
+
+ DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
+
+ /* Take semaphore for the entire operation. */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("EEPROM write buffer - semaphore failed\n");
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
+ data[i]);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom buffered write failed\n");
+ break;
+ }
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+out:
+
+ return status;
+}
+
+/**
+ * ixgbe_checksum_ptr_x550 - Checksum one pointer region
+ * @hw: pointer to hardware structure
+ * @ptr: pointer offset in eeprom
+ * @size: size of section pointed by ptr, if 0 first word will be used as size
+ * @csum: address of checksum to update
+ * @buffer: pointer to buffer containing calculated checksum
+ * @buffer_size: size of buffer
+ *
+ * Returns error status for any failure
+ */
+STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+ u16 size, u16 *csum, u16 *buffer,
+ u32 buffer_size)
+{
+ u16 buf[256];
+ s32 status;
+ u16 length, bufsz, i, start;
+ u16 *local_buffer;
+
+ bufsz = sizeof(buf) / sizeof(buf[0]);
+
+ /* Read a chunk at the pointer location */
+ if (!buffer) {
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
+ if (status) {
+ DEBUGOUT("Failed to read EEPROM image\n");
+ return status;
+ }
+ local_buffer = buf;
+ } else {
+ if (buffer_size < ptr)
+ return IXGBE_ERR_PARAM;
+ local_buffer = &buffer[ptr];
+ }
+
+ if (size) {
+ start = 0;
+ length = size;
+ } else {
+ start = 1;
+ length = local_buffer[0];
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (ptr + length) >= hw->eeprom.word_size)
+ return IXGBE_SUCCESS;
+ }
+
+ if (buffer && ((u32)start + (u32)length > buffer_size))
+ return IXGBE_ERR_PARAM;
+
+ for (i = start; length; i++, length--) {
+ if (i == bufsz && !buffer) {
+ ptr += bufsz;
+ i = 0;
+ if (length < bufsz)
+ bufsz = length;
+
+ /* Read a chunk at the pointer location */
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
+ bufsz, buf);
+ if (status) {
+ DEBUGOUT("Failed to read EEPROM image\n");
+ return status;
+ }
+ }
+ *csum += local_buffer[i];
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @buffer: pointer to buffer containing calculated checksum
+ * @buffer_size: size of buffer
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
+{
+ u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
+ u16 *local_buffer;
+ s32 status;
+ u16 checksum = 0;
+ u16 pointer, i, size;
+
+ DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (!buffer) {
+ /* Read pointer area */
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
+ IXGBE_EEPROM_LAST_WORD + 1,
+ eeprom_ptrs);
+ if (status) {
+ DEBUGOUT("Failed to read EEPROM image\n");
+ return status;
+ }
+ local_buffer = eeprom_ptrs;
+ } else {
+ if (buffer_size < IXGBE_EEPROM_LAST_WORD)
+ return IXGBE_ERR_PARAM;
+ local_buffer = buffer;
+ }
+
+ /*
+ * For X550 hardware include 0x0-0x41 in the checksum, skip the
+ * checksum word itself
+ */
+ for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
+ if (i != IXGBE_EEPROM_CHECKSUM)
+ checksum += local_buffer[i];
+
+ /*
+ * Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ pointer = local_buffer[i];
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ switch (i) {
+ case IXGBE_PCIE_GENERAL_PTR:
+ size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
+ break;
+ case IXGBE_PCIE_CONFIG0_PTR:
+ case IXGBE_PCIE_CONFIG1_PTR:
+ size = IXGBE_PCIE_CONFIG_SIZE;
+ break;
+ default:
+ size = 0;
+ break;
+ }
+
+ status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
+ buffer, buffer_size);
+ if (status)
+ return status;
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return (s32)checksum;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+ return ixgbe_calc_checksum_X550(hw, NULL, 0);
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+ &read_checksum);
+ if (status)
+ return status;
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid EEPROM checksum");
+ }
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum = 0;
+
+ DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ status = ixgbe_calc_eeprom_checksum_X550(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+ checksum);
+ if (status)
+ return status;
+
+ status = ixgbe_update_flash_X550(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
+ **/
+s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ union ixgbe_hic_hdr2 buffer;
+
+ DEBUGFUNC("ixgbe_update_flash_X550");
+
+ buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
+ buffer.req.buf_lenh = 0;
+ buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
+ buffer.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
+{
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u16 ext_ability = 0;
+
+ DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
+
+ hw->phy.ops.identify(hw);
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_x550em_kr:
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ if (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
+ physical_layer =
+ IXGBE_PHYSICAL_LAYER_2500BASE_KX;
+ break;
+ } else if (hw->device_id ==
+ IXGBE_DEV_ID_X550EM_A_KR_L) {
+ physical_layer =
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ }
+ }
+ /* fall through */
+ case ixgbe_phy_x550em_xfi:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ case ixgbe_phy_x550em_kx4:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ case ixgbe_phy_x550em_ext_t:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &ext_ability);
+ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ break;
+ case ixgbe_phy_fw:
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
+ break;
+ case ixgbe_phy_sgmii:
+#ifdef PREBOOT_SUPPORT
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
+ IXGBE_PHYSICAL_LAYER_100BASE_TX |
+ IXGBE_PHYSICAL_LAYER_10BASE_T;
+#else
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+#endif /* PREBOOT_SUPPORT */
+ break;
+ case ixgbe_phy_ext_1g_t:
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ break;
+ default:
+ break;
+ }
+
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
+ physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_get_bus_info_x550em - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets bus link width and speed to unknown because X550em is
+ * not a PCI device.
+ **/
+s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
+{
+
+ DEBUGFUNC("ixgbe_get_bus_info_x550em");
+
+ hw->bus.width = ixgbe_bus_width_unknown;
+ hw->bus.speed = ixgbe_bus_speed_unknown;
+
+ hw->mac.ops.set_lan_id(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_disable_rx_x550 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Enables the Rx DMA unit for x550
+ **/
+void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
+{
+ u32 rxctrl, pfdtxgswc;
+ s32 status;
+ struct ixgbe_hic_disable_rxen fw_cmd;
+
+ DEBUGFUNC("ixgbe_enable_rx_dma_x550");
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
+ fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
+ fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ fw_cmd.port_number = (u8)hw->bus.lan_id;
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(struct ixgbe_hic_disable_rxen),
+ IXGBE_HI_COMMAND_TIMEOUT, true);
+
+ /* If we fail - disable RX using register write */
+ if (status) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+ }
+}
+
+/**
+ * ixgbe_enter_lplu_x550em - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ **/
+s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
+{
+ u16 an_10g_cntl_reg, autoneg_reg, speed;
+ s32 status;
+ ixgbe_link_speed lcd_speed;
+ u32 save_autoneg;
+ bool link_up;
+
+ /* SW LPLU not required on later HW revisions. */
+ if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
+ (IXGBE_FUSES0_REV_MASK &
+ IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
+ return IXGBE_SUCCESS;
+
+ /* If blocked by MNG FW, then don't restart AN */
+ if (ixgbe_check_reset_blocked(hw))
+ return IXGBE_SUCCESS;
+
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
+ * disabled, then force link down by entering low power mode.
+ */
+ if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
+ !(hw->wol_enabled || ixgbe_mng_present(hw)))
+ return ixgbe_set_copper_phy_power(hw, FALSE);
+
+ /* Determine LCD */
+ status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If no valid LCD link speed, then force link down and exit. */
+ if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ return ixgbe_set_copper_phy_power(hw, FALSE);
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &speed);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If no link now, speed is invalid so take link down */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status != IXGBE_SUCCESS)
+ return ixgbe_set_copper_phy_power(hw, false);
+
+ /* clear everything but the speed bits */
+ speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
+
+ /* If current speed is already LCD, then exit. */
+ if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
+ (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
+ ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
+ (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
+ return status;
+
+ /* Clear AN completed indication */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &an_10g_cntl_reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ save_autoneg = hw->phy.autoneg_advertised;
+
+ /* Setup link at least common link speed */
+ status = hw->mac.ops.setup_link(hw, lcd_speed, false);
+
+ /* restore autoneg from before setting lplu speed */
+ hw->phy.autoneg_advertised = save_autoneg;
+
+ return status;
+}
+
+/**
+ * ixgbe_get_lcd_x550em - Determine lowest common denominator
+ * @hw: pointer to hardware structure
+ * @lcd_speed: pointer to lowest common link speed
+ *
+ * Determine lowest common link speed with link partner.
+ **/
+s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
+{
+ u16 an_lp_status;
+ s32 status;
+ u16 word = hw->eeprom.ctrl_word_3;
+
+ *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &an_lp_status);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If link partner advertised 1G, return 1G */
+ if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
+ *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
+ return status;
+ }
+
+ /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
+ if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
+ (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
+ return status;
+
+ /* Link partner not capable of lower speeds, return 10G */
+ *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ return status;
+}
+
+/**
+ * ixgbe_setup_fc_X550em - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 pause, asm_dir, reg_val;
+
+ DEBUGFUNC("ixgbe_setup_fc_X550em");
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /* Determine PAUSE and ASM_DIR bits. */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ pause = 0;
+ asm_dir = 0;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause = 0;
+ asm_dir = 1;
+ break;
+ case ixgbe_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case ixgbe_fc_full:
+ pause = 1;
+ asm_dir = 1;
+ break;
+ default:
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+ "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+ reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ if (pause)
+ reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
+ if (asm_dir)
+ reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* This device does not fully support AN. */
+ hw->fc.disable_fc_autoneg = true;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->fc.disable_fc_autoneg = true;
+ break;
+ default:
+ break;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
+{
+ u32 link_s1, lp_an_page_low, an_cntl_1;
+ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /* AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "Flow control autoneg is disabled");
+ goto out;
+ }
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up) {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
+ goto out;
+ }
+
+ /* Check at auto-negotiation has completed */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_S1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
+
+ if (status != IXGBE_SUCCESS ||
+ (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Read the 10g AN autoc and LP ability registers and resolve
+ * local flow control settings accordingly
+ */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
+ IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
+
+out:
+ if (status == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
+{
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+}
+
+/**
+ * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /* AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "Flow control autoneg is disabled");
+ goto out;
+ }
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up) {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
+ goto out;
+ }
+
+ /* Check if auto-negotiation has completed */
+ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
+ if (status != IXGBE_SUCCESS ||
+ !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Negotiate the flow control */
+ status = ixgbe_negotiate_fc(hw, info[0], info[0],
+ FW_PHY_ACT_GET_LINK_INFO_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_FC_TX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
+
+out:
+ if (status == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 an_cntl = 0;
+
+ DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /* Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do FC autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
+ */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ return status;
+ }
+
+ /* The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ break;
+ default:
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+ "Flow control param set incorrectly\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
+
+ /* Restart auto-negotiation. */
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_set_mux - Set mux for port 1 access with CS4227
+ * @hw: pointer to hardware structure
+ * @state: set mux if 1, clear if 0
+ */
+STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
+{
+ u32 esdp;
+
+ if (!hw->bus.lan_id)
+ return;
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (state)
+ esdp |= IXGBE_ESDP_SDP1;
+ else
+ esdp &= ~IXGBE_ESDP_SDP1;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and sets the I2C MUX
+ **/
+s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
+
+ status = ixgbe_acquire_swfw_sync_X540(hw, mask);
+ if (status)
+ return status;
+
+ if (mask & IXGBE_GSSR_I2C_MASK)
+ ixgbe_set_mux(hw, 1);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore and sets the I2C MUX
+ **/
+void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+{
+ DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
+
+ if (mask & IXGBE_GSSR_I2C_MASK)
+ ixgbe_set_mux(hw, 0);
+
+ ixgbe_release_swfw_sync_X540(hw, mask);
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and get the shared phy token as needed
+ */
+STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+ int retries = FW_PHY_TOKEN_RETRIES;
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
+
+ while (--retries) {
+ status = IXGBE_SUCCESS;
+ if (hmask)
+ status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
+ if (status) {
+ DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
+ status);
+ return status;
+ }
+ if (!(mask & IXGBE_GSSR_TOKEN_SM))
+ return IXGBE_SUCCESS;
+
+ status = ixgbe_get_phy_token(hw);
+ if (status == IXGBE_ERR_TOKEN_RETRY)
+ DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
+ status);
+
+ if (status == IXGBE_SUCCESS)
+ return IXGBE_SUCCESS;
+
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+
+ if (status != IXGBE_ERR_TOKEN_RETRY) {
+ DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
+ status);
+ return status;
+ }
+ }
+
+ DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
+ hw->phy.id);
+ return status;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore and puts the shared phy token as needed
+ */
+STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+
+ DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
+
+ if (mask & IXGBE_GSSR_TOKEN_SM)
+ ixgbe_put_phy_token(hw);
+
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+}
+
+/**
+ * ixgbe_read_phy_reg_x550a - Reads specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register using the SWFW lock and PHY
+ * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * instances.
+ **/
+s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ s32 status;
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+
+ DEBUGFUNC("ixgbe_read_phy_reg_x550a");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_x550a - Writes specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register using the SWFW lock and PHY Token.
+ * The PHY Token is needed since the MDIO is shared between to MAC instances.
+ **/
+s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ s32 status;
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+
+ DEBUGFUNC("ixgbe_write_phy_reg_x550a");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
+ status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
+ hw->mac.ops.release_swfw_sync(hw, mask);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
+ * @hw: pointer to hardware structure
+ *
+ * Handle external Base T PHY interrupt. If high temperature
+ * failure alarm then return error, else if link status change
+ * then setup internal/external PHY link
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
+ */
+s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+{
+ bool lsc;
+ u32 status;
+
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (lsc)
+ return ixgbe_setup_internal_phy(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Setup internal/external PHY link speed based on link speed, then set
+ * external PHY auto advertised link speed.
+ *
+ * Returns error status for any failure
+ **/
+s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status;
+ ixgbe_link_speed force_speed;
+
+ DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
+
+ /* Setup internal/external PHY link speed to iXFI (10G), unless
+ * only 1G is auto advertised then setup KX link.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If X552 and internal link mode is XFI, then setup XFI internal link.
+ */
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
+}
+
+/**
+ * ixgbe_check_link_t_X550em - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Check that both the MAC and X557 external PHY have link.
+ **/
+s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ u32 status;
+ u16 i, autoneg_status = 0;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return IXGBE_ERR_CONFIG;
+
+ status = ixgbe_check_mac_link_generic(hw, speed, link_up,
+ link_up_wait_to_complete);
+
+ /* If check link fails or MAC link is not up, then return */
+ if (status != IXGBE_SUCCESS || !(*link_up))
+ return status;
+
+ /* MAC link is up, so check external PHY link.
+ * X557 PHY. Link status is latching low, and can only be used to detect
+ * link drop, and not the current status of the link without performing
+ * back-to-back reads.
+ */
+ for (i = 0; i < 2; i++) {
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+
+ /* If external PHY link is not up, then indicate link not up */
+ if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
+ *link_up = false;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_reset_phy_generic(hw);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Configure Link Status Alarm and Temperature Threshold interrupts */
+ return ixgbe_enable_lasi_ext_t_x550em(hw);
+}
+
+/**
+ * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @led_idx: led number to turn on
+ **/
+s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
+{
+ u16 phy_data;
+
+ DEBUGFUNC("ixgbe_led_on_t_X550em");
+
+ if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
+ return IXGBE_ERR_PARAM;
+
+ /* To turn on the LED, set mode to ON. */
+ ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
+ phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
+ ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
+
+ /* Some designs have the LEDs wired to the MAC */
+ return ixgbe_led_on_generic(hw, led_idx);
+}
+
+/**
+ * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @led_idx: led number to turn off
+ **/
+s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
+{
+ u16 phy_data;
+
+ DEBUGFUNC("ixgbe_led_off_t_X550em");
+
+ if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
+ return IXGBE_ERR_PARAM;
+
+ /* To turn on the LED, set mode to ON. */
+ ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
+ phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
+ ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
+
+ /* Some designs have the LEDs wired to the MAC */
+ return ixgbe_led_off_generic(hw, led_idx);
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return IXGBE_SUCCESS
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len, const char *driver_ver)
+{
+ struct ixgbe_hic_drv_info2 fw_cmd;
+ s32 ret_val = IXGBE_SUCCESS;
+ int i;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
+
+ if ((len == 0) || (driver_ver == NULL) ||
+ (len > sizeof(fw_cmd.driver_string)))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ memcpy(fw_cmd.driver_string, driver_ver, len);
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(fw_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (ret_val != IXGBE_SUCCESS)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = IXGBE_SUCCESS;
+ else
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ return ret_val;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.h
new file mode 100644
index 00000000..6d188741
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.h
@@ -0,0 +1,124 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_X550_H_
+#define _IXGBE_X550_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw);
+
+s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw);
+s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw);
+s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw);
+s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size);
+s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw);
+s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data);
+s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 data);
+s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data);
+s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+u16 *data);
+s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 data);
+void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
+ unsigned int pool);
+void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
+ bool enable, int vf);
+s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 data);
+s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *data);
+s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 ver, u16 len, const char *str);
+s32 ixgbe_get_phy_token(struct ixgbe_hw *);
+s32 ixgbe_put_phy_token(struct ixgbe_hw *);
+s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 data);
+s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *data);
+void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw);
+void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw);
+void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap);
+void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf);
+enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_x550(struct ixgbe_hw *hw);
+s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed);
+s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask);
+s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data);
+s32 ixgbe_setup_fc_fiber_x550em_a(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw);
+s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx);
+s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx);
+#endif /* _IXGBE_X550_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/meson.build b/src/spdk/dpdk/drivers/net/ixgbe/base/meson.build
new file mode 100644
index 00000000..3147e110
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/base/meson.build
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = [
+ 'ixgbe_82598.c',
+ 'ixgbe_82599.c',
+ 'ixgbe_api.c',
+ 'ixgbe_common.c',
+ 'ixgbe_dcb_82598.c',
+ 'ixgbe_dcb_82599.c',
+ 'ixgbe_dcb.c',
+ 'ixgbe_hv_vf.c',
+ 'ixgbe_mbx.c',
+ 'ixgbe_phy.c',
+ 'ixgbe_vf.c',
+ 'ixgbe_x540.c',
+ 'ixgbe_x550.c'
+]
+
+error_cflags = ['-Wno-unused-value',
+ '-Wno-unused-but-set-variable']
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('ixgbe_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_82599_bypass.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_82599_bypass.c
new file mode 100644
index 00000000..b16ebd0a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_82599_bypass.c
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_82599.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_common.h"
+#include "base/ixgbe_phy.h"
+#include "ixgbe_bypass_defines.h"
+#include "ixgbe_bypass.h"
+
+/**
+ * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * We set the module speed differently for fixed fiber. For other
+ * multi-speed devices we don't have an error value so here if we
+ * detect an error we just log it and exit.
+ */
+static void
+ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
+{
+ s32 status;
+ u8 rs, eeprom_data;
+
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ /* one bit mask same as setting on */
+ rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid fixed module speed");
+ return;
+ }
+
+ /* Set RS0 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS0");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS0");
+ goto out;
+ }
+
+ /* Set RS1 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS1");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS1");
+ goto out;
+ }
+out:
+ return;
+}
+
+/**
+ * ixgbe_setup_mac_link_multispeed_fixed_fiber - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+static s32
+ixgbe_setup_mac_link_multispeed_fixed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = IXGBE_SUCCESS;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 speedcnt = 0;
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ u32 i = 0;
+ bool link_up = false;
+ bool negotiation;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Mask off requested but non-supported speeds */
+ status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ speed &= link_speed;
+
+ /*
+ * Try each speed one by one, highest priority first. We do this in
+ * software because 10gb fiber doesn't support speed autonegotiation.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ speedcnt++;
+ highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+ goto out;
+ /* Set the module link speed */
+ ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_10GB_FULL);
+
+ /* Set the module link speed */
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Allow module to change analog characteristics (1G->10G) */
+ msec_delay(40);
+
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Flap the tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. 82599 uses the same timing for 10g SFI.
+ */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ speedcnt++;
+ if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_1GB_FULL);
+
+ /* Allow module to change analog characteristics (10G->1G) */
+ msec_delay(40);
+
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Flap the tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+
+ /*
+ * We didn't get link. Configure back to the highest speed we tried,
+ * (if there was more than one). We call ourselves back with just the
+ * single highest speed that the user requested.
+ */
+ if (speedcnt > 1)
+ status = ixgbe_setup_mac_link_multispeed_fixed_fiber(hw,
+ highest_link_speed, autoneg_wait_to_complete);
+
+out:
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ return status;
+}
+
+static enum ixgbe_media_type
+ixgbe_bypass_get_media_type(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ media_type = ixgbe_media_type_fiber;
+ } else {
+ media_type = ixgbe_get_media_type_82599(hw);
+ }
+ return media_type;
+}
+
+/*
+ * Wrapper around shared code (base driver) to support BYPASS nic.
+ */
+s32
+ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw)
+{
+ s32 ret_val;
+
+ if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ hw->mac.type = ixgbe_mac_82599EB;
+ }
+
+ ret_val = ixgbe_init_shared_code(hw);
+ if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
+ ixgbe_init_mac_link_ops_82599(hw);
+ }
+
+ return ret_val;
+}
+
+s32
+ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
+{
+ int rc;
+
+ rc = ixgbe_init_hw(hw);
+ if (rc == 0 && hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+
+ hw->mac.ops.setup_link =
+ &ixgbe_setup_mac_link_multispeed_fixed_fiber;
+
+ hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
+
+ hw->mac.ops.disable_tx_laser = NULL;
+ hw->mac.ops.enable_tx_laser = NULL;
+ hw->mac.ops.flap_tx_laser = NULL;
+ }
+
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.c
new file mode 100644
index 00000000..ae38ce35
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.c
@@ -0,0 +1,386 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <time.h>
+#include <rte_atomic.h>
+#include <rte_ethdev_driver.h>
+#include "ixgbe_ethdev.h"
+#include "ixgbe_bypass_api.h"
+#include "rte_pmd_ixgbe.h"
+
+#define BYPASS_STATUS_OFF_MASK 3
+
+/* Macros to check for invlaid function pointers. */
+#define FUNC_PTR_OR_ERR_RET(func, retval) do { \
+ if ((func) == NULL) { \
+ PMD_DRV_LOG(ERR, "%s:%d function not supported", \
+ __func__, __LINE__); \
+ return retval; \
+ } \
+} while (0)
+
+#define FUNC_PTR_OR_RET(func) do { \
+ if ((func) == NULL) { \
+ PMD_DRV_LOG(ERR, "%s:%d function not supported", \
+ __func__, __LINE__); \
+ return; \
+ } \
+} while (0)
+
+
+/**
+ * ixgbe_bypass_set_time - Set bypass FW time epoc.
+ *
+ * @hw: pointer to hardware structure
+ *
+ * This function with sync the FW date stamp with that of the
+ * system clock.
+ **/
+static void
+ixgbe_bypass_set_time(struct ixgbe_adapter *adapter)
+{
+ u32 mask, value;
+ u32 sec;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ sec = 0;
+
+ /*
+ * Send the FW our current time and turn on time_valid and
+ * timer_reset bits.
+ */
+ mask = BYPASS_CTL1_TIME_M |
+ BYPASS_CTL1_VALID_M |
+ BYPASS_CTL1_OFFTRST_M;
+ value = (sec & BYPASS_CTL1_TIME_M) |
+ BYPASS_CTL1_VALID |
+ BYPASS_CTL1_OFFTRST;
+
+ FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set);
+
+ /* Store FW reset time (in seconds from epoch). */
+ adapter->bps.reset_tm = time(NULL);
+
+ /* reset FW timer. */
+ adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value);
+}
+
+/**
+ * ixgbe_bypass_init - Make some environment changes for bypass
+ *
+ * @adapter: pointer to ixgbe_adapter structure for access to state bits
+ *
+ * This function collects all the modifications needed by the bypass
+ * driver.
+ **/
+void
+ixgbe_bypass_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_hw *hw;
+
+ adapter = IXGBE_DEV_TO_ADPATER(dev);
+ hw = &adapter->hw;
+
+ /* Only allow BYPASS ops on the first port */
+ if (hw->device_id != IXGBE_DEV_ID_82599_BYPASS ||
+ hw->bus.func != 0) {
+ PMD_DRV_LOG(ERR, "bypass function is not supported on that device");
+ return;
+ }
+
+ /* set bypass ops. */
+ adapter->bps.ops.bypass_rw = &ixgbe_bypass_rw_generic;
+ adapter->bps.ops.bypass_valid_rd = &ixgbe_bypass_valid_rd_generic;
+ adapter->bps.ops.bypass_set = &ixgbe_bypass_set_generic;
+ adapter->bps.ops.bypass_rd_eep = &ixgbe_bypass_rd_eep_generic;
+
+ /* set the time for logging. */
+ ixgbe_bypass_set_time(adapter);
+
+ /* Don't have the SDP to the laser */
+ hw->mac.ops.disable_tx_laser = NULL;
+ hw->mac.ops.enable_tx_laser = NULL;
+ hw->mac.ops.flap_tx_laser = NULL;
+}
+
+s32
+ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state)
+{
+ struct ixgbe_hw *hw;
+ s32 ret_val;
+ u32 cmd;
+ u32 by_ctl = 0;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+
+ cmd = BYPASS_PAGE_CTL0;
+ ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
+
+ /* Assume bypass_rw didn't error out, if it did state will
+ * be ignored anyway.
+ */
+ *state = (by_ctl >> BYPASS_STATUS_OFF_SHIFT) & BYPASS_STATUS_OFF_MASK;
+
+ return ret_val;
+}
+
+
+s32
+ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state)
+{
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+ struct ixgbe_hw *hw;
+ s32 ret_val;
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
+
+ /* Set the new state */
+ ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
+ BYPASS_MODE_OFF_M, *new_state);
+ if (ret_val)
+ goto exit;
+
+ /* Set AUTO back on so FW can receive events */
+ ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
+ BYPASS_MODE_OFF_M, BYPASS_AUTO);
+
+exit:
+ return ret_val;
+
+}
+
+s32
+ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event,
+ u32 *state)
+{
+ struct ixgbe_hw *hw;
+ s32 ret_val;
+ u32 shift;
+ u32 cmd;
+ u32 by_ctl = 0;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+
+ cmd = BYPASS_PAGE_CTL0;
+ ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
+
+ /* Assume bypass_rw didn't error out, if it did event will
+ * be ignored anyway.
+ */
+ switch (event) {
+ case BYPASS_EVENT_WDT_TO:
+ shift = BYPASS_WDTIMEOUT_SHIFT;
+ break;
+ case BYPASS_EVENT_MAIN_ON:
+ shift = BYPASS_MAIN_ON_SHIFT;
+ break;
+ case BYPASS_EVENT_MAIN_OFF:
+ shift = BYPASS_MAIN_OFF_SHIFT;
+ break;
+ case BYPASS_EVENT_AUX_ON:
+ shift = BYPASS_AUX_ON_SHIFT;
+ break;
+ case BYPASS_EVENT_AUX_OFF:
+ shift = BYPASS_AUX_OFF_SHIFT;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ *state = (by_ctl >> shift) & 0x3;
+
+ return ret_val;
+}
+
+s32
+ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event,
+ u32 state)
+{
+ struct ixgbe_hw *hw;
+ u32 status;
+ u32 off;
+ s32 ret_val;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
+
+ switch (event) {
+ case BYPASS_EVENT_WDT_TO:
+ off = BYPASS_WDTIMEOUT_M;
+ status = state << BYPASS_WDTIMEOUT_SHIFT;
+ break;
+ case BYPASS_EVENT_MAIN_ON:
+ off = BYPASS_MAIN_ON_M;
+ status = state << BYPASS_MAIN_ON_SHIFT;
+ break;
+ case BYPASS_EVENT_MAIN_OFF:
+ off = BYPASS_MAIN_OFF_M;
+ status = state << BYPASS_MAIN_OFF_SHIFT;
+ break;
+ case BYPASS_EVENT_AUX_ON:
+ off = BYPASS_AUX_ON_M;
+ status = state << BYPASS_AUX_ON_SHIFT;
+ break;
+ case BYPASS_EVENT_AUX_OFF:
+ off = BYPASS_AUX_OFF_M;
+ status = state << BYPASS_AUX_OFF_SHIFT;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
+ off, status);
+
+ return ret_val;
+}
+
+s32
+ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout)
+{
+ struct ixgbe_hw *hw;
+ u32 status;
+ u32 mask;
+ s32 ret_val;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
+
+ /* disable the timer with timeout of zero */
+ if (timeout == RTE_PMD_IXGBE_BYPASS_TMT_OFF) {
+ status = 0x0; /* WDG enable off */
+ mask = BYPASS_WDT_ENABLE_M;
+ } else {
+ /* set time out value */
+ mask = BYPASS_WDT_VALUE_M;
+
+ /* enable the timer */
+ status = timeout << BYPASS_WDT_TIME_SHIFT;
+ status |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
+ mask |= BYPASS_WDT_ENABLE_M;
+ }
+
+ ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
+ mask, status);
+
+ return ret_val;
+}
+
+s32
+ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver)
+{
+ struct ixgbe_hw *hw;
+ u32 cmd;
+ u32 status;
+ s32 ret_val;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+
+ cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
+ cmd |= (BYPASS_EEPROM_VER_ADD << BYPASS_CTL2_OFFSET_SHIFT) &
+ BYPASS_CTL2_OFFSET_M;
+ ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
+ if (ret_val)
+ goto exit;
+
+ /* wait for the write to stick */
+ msleep(100);
+
+ /* Now read the results */
+ cmd &= ~BYPASS_WE;
+ ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
+ if (ret_val)
+ goto exit;
+
+ *ver = status & BYPASS_CTL2_DATA_M; /* only one byte of date */
+
+exit:
+ return ret_val;
+}
+
+s32
+ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout)
+{
+ struct ixgbe_hw *hw;
+ u32 by_ctl = 0;
+ u32 cmd;
+ u32 wdg;
+ s32 ret_val;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+
+ cmd = BYPASS_PAGE_CTL0;
+ ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
+
+ wdg = by_ctl & BYPASS_WDT_ENABLE_M;
+ if (!wdg)
+ *wd_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
+ else
+ *wd_timeout = (by_ctl >> BYPASS_WDT_TIME_SHIFT) &
+ BYPASS_WDT_MASK;
+
+ return ret_val;
+}
+
+s32
+ixgbe_bypass_wd_reset(struct rte_eth_dev *dev)
+{
+ u32 cmd;
+ u32 status;
+ u32 sec;
+ u32 count = 0;
+ s32 ret_val;
+ struct ixgbe_hw *hw;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_valid_rd, -ENOTSUP);
+
+ /* Use the lower level bit-bang functions since we don't need
+ * to read the register first to get it's current state as we
+ * are setting every thing in this write.
+ */
+ /* Set up WD pet */
+ cmd = BYPASS_PAGE_CTL1 | BYPASS_WE | BYPASS_CTL1_WDT_PET;
+
+ /* Resync the FW time while writing to CTL1 anyway */
+ adapter->bps.reset_tm = time(NULL);
+ sec = 0;
+
+ cmd |= (sec & BYPASS_CTL1_TIME_M) | BYPASS_CTL1_VALID;
+
+ /* reset FW timer offset since we are resetting the clock */
+ cmd |= BYPASS_CTL1_OFFTRST;
+
+ ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
+
+ /* Read until it matches what we wrote, or we time out */
+ do {
+ if (count++ > 10) {
+ ret_val = IXGBE_BYPASS_FW_WRITE_FAILURE;
+ break;
+ }
+
+ if (adapter->bps.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &status)) {
+ ret_val = IXGBE_ERR_INVALID_ARGUMENT;
+ break;
+ }
+ } while (!adapter->bps.ops.bypass_valid_rd(cmd, status));
+
+ return ret_val;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.h
new file mode 100644
index 00000000..92befad5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _IXGBE_BYPASS_H_
+#define _IXGBE_BYPASS_H_
+
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
+
+struct ixgbe_bypass_mac_ops {
+ s32 (*bypass_rw)(struct ixgbe_hw *hw, u32 cmd, u32 *status);
+ bool (*bypass_valid_rd)(u32 in_reg, u32 out_reg);
+ s32 (*bypass_set)(struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action);
+ s32 (*bypass_rd_eep)(struct ixgbe_hw *hw, u32 addr, u8 *value);
+};
+
+struct ixgbe_bypass_info {
+ uint64_t reset_tm;
+ struct ixgbe_bypass_mac_ops ops;
+};
+
+struct rte_eth_dev;
+
+void ixgbe_bypass_init(struct rte_eth_dev *dev);
+s32 ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state);
+s32 ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state);
+s32 ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event, u32 *state);
+s32 ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event, u32 state);
+s32 ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout);
+s32 ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver);
+s32 ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout);
+s32 ixgbe_bypass_wd_reset(struct rte_eth_dev *dev);
+
+s32 ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw);
+s32 ixgbe_bypass_init_hw(struct ixgbe_hw *hw);
+
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
+
+#endif /* _IXGBE_BYPASS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h
new file mode 100644
index 00000000..8eb77339
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _IXGBE_BYPASS_API_H_
+#define _IXGBE_BYPASS_API_H_
+
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
+
+#include "ixgbe_bypass_defines.h"
+/**
+ * ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
+ *
+ * @hw: pointer to hardware structure
+ * @cmd: Command we send to the FW
+ * @status: The reply from the FW
+ *
+ * Bit-bangs the cmd to the by_pass FW status points to what is returned.
+ **/
+#define IXGBE_BYPASS_BB_WAIT 1
+static s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
+{
+ int i;
+ u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
+ u32 esdp;
+
+ if (!status)
+ return IXGBE_ERR_PARAM;
+
+ *status = 0;
+
+ /* SDP vary by MAC type */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ sck = IXGBE_ESDP_SDP7;
+ sdi = IXGBE_ESDP_SDP0;
+ sdo = IXGBE_ESDP_SDP6;
+ dir_sck = IXGBE_ESDP_SDP7_DIR;
+ dir_sdi = IXGBE_ESDP_SDP0_DIR;
+ dir_sdo = IXGBE_ESDP_SDP6_DIR;
+ break;
+ case ixgbe_mac_X540:
+ sck = IXGBE_ESDP_SDP2;
+ sdi = IXGBE_ESDP_SDP0;
+ sdo = IXGBE_ESDP_SDP1;
+ dir_sck = IXGBE_ESDP_SDP2_DIR;
+ dir_sdi = IXGBE_ESDP_SDP0_DIR;
+ dir_sdo = IXGBE_ESDP_SDP1_DIR;
+ break;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ sck = IXGBE_ESDP_SDP2;
+ sdi = IXGBE_ESDP_SDP0;
+ sdo = IXGBE_ESDP_SDP1;
+ dir_sck = IXGBE_ESDP_SDP2_DIR;
+ dir_sdi = IXGBE_ESDP_SDP0_DIR;
+ dir_sdo = IXGBE_ESDP_SDP1_DIR;
+ break;
+ default:
+ return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ /* Set SDP pins direction */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= dir_sck; /* SCK as output */
+ esdp |= dir_sdi; /* SDI as output */
+ esdp &= ~dir_sdo; /* SDO as input */
+ esdp |= sck;
+ esdp |= sdi;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ // TODO:
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ /* Generate start condition */
+ esdp &= ~sdi;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ esdp &= ~sck;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ /* Clock out the new control word and clock in the status */
+ for (i = 0; i < 32; i++) {
+ if ((cmd >> (31 - i)) & 0x01) {
+ esdp |= sdi;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ } else {
+ esdp &= ~sdi;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ }
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ esdp |= sck;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ esdp &= ~sck;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & sdo)
+ *status = (*status << 1) | 0x01;
+ else
+ *status = (*status << 1) | 0x00;
+ msleep(IXGBE_BYPASS_BB_WAIT);
+ }
+
+ /* stop condition */
+ esdp |= sck;
+ esdp &= ~sdi;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ esdp |= sdi;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* set the page bits to match the cmd that the status it belongs to */
+ *status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
+
+ return 0;
+}
+
+/**
+ * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
+ *
+ * If we send a write we can't be sure it took until we can read back
+ * that same register. It can be a problem as some of the feilds may
+ * for valid reasons change between the time wrote the register and
+ * we read it again to verify. So this function check everything we
+ * can check and then assumes it worked.
+ *
+ * @u32 in_reg - The register cmd for the bit-bang read.
+ * @u32 out_reg - The register returned from a bit-bang read.
+ **/
+static bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
+{
+ u32 mask;
+
+ /* Page must match for all control pages */
+ if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
+ return false;
+
+ switch (in_reg & BYPASS_PAGE_M) {
+ case BYPASS_PAGE_CTL0:
+ /* All the following can't change since the last write
+ * - All the event actions
+ * - The timeout value
+ */
+ mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
+ BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
+ BYPASS_WDTIMEOUT_M |
+ BYPASS_WDT_VALUE_M;
+ if ((out_reg & mask) != (in_reg & mask))
+ return false;
+
+ /* 0x0 is never a valid value for bypass status */
+ if (!(out_reg & BYPASS_STATUS_OFF_M))
+ return false;
+ break;
+ case BYPASS_PAGE_CTL1:
+ /* All the following can't change since the last write
+ * - time valid bit
+ * - time we last sent
+ */
+ mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
+ if ((out_reg & mask) != (in_reg & mask))
+ return false;
+ break;
+ case BYPASS_PAGE_CTL2:
+ /* All we can check in this page is control number
+ * which is already done above.
+ */
+ break;
+ }
+
+ /* We are as sure as we can be return true */
+ return true;
+}
+
+/**
+ * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
+ *
+ * @hw: pointer to hardware structure
+ * @cmd: The control word we are setting.
+ * @event: The event we are setting in the FW. This also happens to
+ * be the mask for the event we are setting (handy)
+ * @action: The action we set the event to in the FW. This is in a
+ * bit field that happens to be what we want to put in
+ * the event spot (also handy)
+ **/
+static s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
+ u32 action)
+{
+ u32 by_ctl = 0;
+ u32 cmd, verify;
+ u32 count = 0;
+
+ /* Get current values */
+ cmd = ctrl; /* just reading only need control number */
+ if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ /* Set to new action */
+ cmd = (by_ctl & ~event) | BYPASS_WE | action;
+ if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ /* Page 0 force a FW eeprom write which is slow so verify */
+ if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
+ verify = BYPASS_PAGE_CTL0;
+ do {
+ if (count++ > 5)
+ return IXGBE_BYPASS_FW_WRITE_FAILURE;
+
+ if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ } while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
+ } else {
+ /* We have give the FW time for the write to stick */
+ msleep(100);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom address.
+ *
+ * @hw: pointer to hardware structure
+ * @addr: The bypass eeprom address to read.
+ * @value: The 8b of data at the address above.
+ **/
+static s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
+{
+ u32 cmd;
+ u32 status;
+
+
+ /* send the request */
+ cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
+ cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
+ if (ixgbe_bypass_rw_generic(hw, cmd, &status))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ /* We have give the FW time for the write to stick */
+ msleep(100);
+
+ /* now read the results */
+ cmd &= ~BYPASS_WE;
+ if (ixgbe_bypass_rw_generic(hw, cmd, &status))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ *value = status & BYPASS_CTL2_DATA_M;
+
+ return 0;
+}
+
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
+
+#endif /* _IXGBE_BYPASS_API_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_defines.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_defines.h
new file mode 100644
index 00000000..7740546b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_defines.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _IXGBE_BYPASS_DEFINES_H_
+#define _IXGBE_BYPASS_DEFINES_H_
+
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
+
+#define msleep(x) rte_delay_us(x*1000)
+#define usleep_range(min, max) rte_delay_us(min)
+
+#define BYPASS_PAGE_CTL0 0x00000000
+#define BYPASS_PAGE_CTL1 0x40000000
+#define BYPASS_PAGE_CTL2 0x80000000
+#define BYPASS_PAGE_M 0xc0000000
+#define BYPASS_WE 0x20000000
+
+#define BYPASS_AUTO 0x0
+#define BYPASS_NOP 0x0
+#define BYPASS_NORM 0x1
+#define BYPASS_BYPASS 0x2
+#define BYPASS_ISOLATE 0x3
+
+#define BYPASS_EVENT_MAIN_ON 0x1
+#define BYPASS_EVENT_AUX_ON 0x2
+#define BYPASS_EVENT_MAIN_OFF 0x3
+#define BYPASS_EVENT_AUX_OFF 0x4
+#define BYPASS_EVENT_WDT_TO 0x5
+#define BYPASS_EVENT_USR 0x6
+
+#define BYPASS_MODE_OFF_M 0x00000003
+#define BYPASS_STATUS_OFF_M 0x0000000c
+#define BYPASS_AUX_ON_M 0x00000030
+#define BYPASS_MAIN_ON_M 0x000000c0
+#define BYPASS_MAIN_OFF_M 0x00000300
+#define BYPASS_AUX_OFF_M 0x00000c00
+#define BYPASS_WDTIMEOUT_M 0x00003000
+#define BYPASS_WDT_ENABLE_M 0x00004000
+#define BYPASS_WDT_VALUE_M 0x00070000
+
+#define BYPASS_MODE_OFF_SHIFT 0
+#define BYPASS_STATUS_OFF_SHIFT 2
+#define BYPASS_AUX_ON_SHIFT 4
+#define BYPASS_MAIN_ON_SHIFT 6
+#define BYPASS_MAIN_OFF_SHIFT 8
+#define BYPASS_AUX_OFF_SHIFT 10
+#define BYPASS_WDTIMEOUT_SHIFT 12
+#define BYPASS_WDT_ENABLE_SHIFT 14
+#define BYPASS_WDT_TIME_SHIFT 16
+
+#define BYPASS_WDT_1 0x0
+#define BYPASS_WDT_1_5 0x1
+#define BYPASS_WDT_2 0x2
+#define BYPASS_WDT_3 0x3
+#define BYPASS_WDT_4 0x4
+#define BYPASS_WDT_8 0x5
+#define BYPASS_WDT_16 0x6
+#define BYPASS_WDT_32 0x7
+#define BYPASS_WDT_OFF 0xffff
+
+#define BYPASS_WDT_MASK 0x7
+
+#define BYPASS_CTL1_TIME_M 0x01ffffff
+#define BYPASS_CTL1_VALID_M 0x02000000
+#define BYPASS_CTL1_OFFTRST_M 0x04000000
+#define BYPASS_CTL1_WDT_PET_M 0x08000000
+
+#define BYPASS_CTL1_VALID 0x02000000
+#define BYPASS_CTL1_OFFTRST 0x04000000
+#define BYPASS_CTL1_WDT_PET 0x08000000
+
+#define BYPASS_CTL2_DATA_M 0x000000ff
+#define BYPASS_CTL2_OFFSET_M 0x0000ff00
+#define BYPASS_CTL2_RW_M 0x00010000
+#define BYPASS_CTL2_HEAD_M 0x0ff00000
+
+#define BYPASS_CTL2_OFFSET_SHIFT 8
+#define BYPASS_CTL2_HEAD_SHIFT 20
+
+#define BYPASS_CTL2_RW 0x00010000
+
+enum ixgbe_state_t {
+ __IXGBE_TESTING,
+ __IXGBE_RESETTING,
+ __IXGBE_DOWN,
+ __IXGBE_SERVICE_SCHED,
+ __IXGBE_IN_SFP_INIT,
+ __IXGBE_IN_BYPASS_LOW,
+ __IXGBE_IN_BYPASS_HIGH,
+ __IXGBE_IN_BYPASS_LOG,
+};
+
+#define BYPASS_MAX_LOGS 43
+#define BYPASS_LOG_SIZE 5
+#define BYPASS_LOG_LINE_SIZE 37
+
+#define BYPASS_EEPROM_VER_ADD 0x02
+
+#define BYPASS_LOG_TIME_M 0x01ffffff
+#define BYPASS_LOG_TIME_VALID_M 0x02000000
+#define BYPASS_LOG_HEAD_M 0x04000000
+#define BYPASS_LOG_CLEAR_M 0x08000000
+#define BYPASS_LOG_EVENT_M 0xf0000000
+#define BYPASS_LOG_ACTION_M 0x03
+
+#define BYPASS_LOG_EVENT_SHIFT 28
+#define BYPASS_LOG_CLEAR_SHIFT 24 /* bit offset */
+#define IXGBE_DEV_TO_ADPATER(dev) \
+ ((struct ixgbe_adapter *)(dev->data->dev_private))
+
+/* extractions from ixgbe_phy.h */
+#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
+
+#define IXGBE_SFF_SFF_8472_SWAP 0x5C
+#define IXGBE_SFF_SFF_8472_COMP 0x5E
+#define IXGBE_SFF_SFF_8472_OSCB 0x6E
+#define IXGBE_SFF_SFF_8472_ESCB 0x76
+
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
+
+/* extractions from ixgbe_type.h */
+#define IXGBE_DEV_ID_82599_BYPASS 0x155D
+
+#define IXGBE_BYPASS_FW_WRITE_FAILURE -35
+
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
+
+#endif /* _IXGBE_BYPASS_DEFINES_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c
new file mode 100644
index 00000000..26b19273
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -0,0 +1,8595 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_hash_crc.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_security_driver.h>
+#endif
+
+#include "ixgbe_logs.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_vf.h"
+#include "base/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_bypass.h"
+#include "ixgbe_rxtx.h"
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_phy.h"
+#include "ixgbe_regs.h"
+
+/*
+ * High threshold controlling when to start sending XOFF frames. Must be at
+ * least 8 bytes less than receive packet buffer size. This value is in units
+ * of 1024 bytes.
+ */
+#define IXGBE_FC_HI 0x80
+
+/*
+ * Low threshold controlling when to start sending XON frames. This value is
+ * in units of 1024 bytes.
+ */
+#define IXGBE_FC_LO 0x40
+
+/* Timer value included in XOFF frames. */
+#define IXGBE_FC_PAUSE 0x680
+
+/*Default value of Max Rx Queue*/
+#define IXGBE_MAX_RX_QUEUE_NUM 128
+
+#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
+#define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
+#define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
+
+#define IXGBE_MMW_SIZE_DEFAULT 0x4
+#define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14
+#define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */
+
+/*
+ * Default values for RX/TX configuration
+ */
+#define IXGBE_DEFAULT_RX_FREE_THRESH 32
+#define IXGBE_DEFAULT_RX_PTHRESH 8
+#define IXGBE_DEFAULT_RX_HTHRESH 8
+#define IXGBE_DEFAULT_RX_WTHRESH 0
+
+#define IXGBE_DEFAULT_TX_FREE_THRESH 32
+#define IXGBE_DEFAULT_TX_PTHRESH 32
+#define IXGBE_DEFAULT_TX_HTHRESH 0
+#define IXGBE_DEFAULT_TX_WTHRESH 0
+#define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
+
+/* Bit shift and mask */
+#define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2)
+#define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
+#define IXGBE_8_BIT_WIDTH CHAR_BIT
+#define IXGBE_8_BIT_MASK UINT8_MAX
+
+#define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
+
+#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
+
+/* Additional timesync values. */
+#define NSEC_PER_SEC 1000000000L
+#define IXGBE_INCVAL_10GB 0x66666666
+#define IXGBE_INCVAL_1GB 0x40000000
+#define IXGBE_INCVAL_100 0x50000000
+#define IXGBE_INCVAL_SHIFT_10GB 28
+#define IXGBE_INCVAL_SHIFT_1GB 24
+#define IXGBE_INCVAL_SHIFT_100 21
+#define IXGBE_INCVAL_SHIFT_82599 7
+#define IXGBE_INCPER_SHIFT_82599 24
+
+#define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+
+#define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000
+#define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000
+#define IXGBE_ETAG_ETYPE 0x00005084
+#define IXGBE_ETAG_ETYPE_MASK 0x0000ffff
+#define IXGBE_ETAG_ETYPE_VALID 0x80000000
+#define IXGBE_RAH_ADTYPE 0x40000000
+#define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff
+#define IXGBE_VMVIR_TAGA_MASK 0x18000000
+#define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000
+#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_QDE_STRIP_TAG 0x00000004
+#define IXGBE_VTEICR_MASK 0x07
+
+#define IXGBE_EXVET_VET_EXT_SHIFT 16
+#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
+
+static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
+static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
+static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_dev_configure(struct rte_eth_dev *dev);
+static int ixgbe_dev_start(struct rte_eth_dev *dev);
+static void ixgbe_dev_stop(struct rte_eth_dev *dev);
+static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
+static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
+static void ixgbe_dev_close(struct rte_eth_dev *dev);
+static int ixgbe_dev_reset(struct rte_eth_dev *dev);
+static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned n);
+static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned n);
+static int
+ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n);
+static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
+static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int size);
+static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned limit);
+static int ixgbe_dev_xstats_get_names_by_id(
+ struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit);
+static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx);
+static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+ size_t fw_size);
+static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
+static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid_id);
+static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
+ uint16_t queue, bool on);
+static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
+ int on);
+static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
+ int mask);
+static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask);
+static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
+static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
+static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
+static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
+
+static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
+static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
+static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_conf *pfc_conf);
+static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
+static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
+static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
+static void ixgbe_dev_interrupt_handler(void *param);
+static void ixgbe_dev_interrupt_delayed_handler(void *param);
+static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
+static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
+static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
+static bool is_device_supported(struct rte_eth_dev *dev,
+ struct rte_pci_driver *drv);
+
+/* For Virtual Function support */
+static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
+static int ixgbevf_dev_start(struct rte_eth_dev *dev);
+static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
+static void ixgbevf_dev_close(struct rte_eth_dev *dev);
+static int ixgbevf_dev_reset(struct rte_eth_dev *dev);
+static void ixgbevf_intr_disable(struct rte_eth_dev *dev);
+static void ixgbevf_intr_enable(struct rte_eth_dev *dev);
+static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
+static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
+ uint16_t queue, int on);
+static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
+static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector);
+static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
+static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
+
+/* For Eth VMDQ APIs support */
+static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
+ ether_addr * mac_addr, uint8_t on);
+static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
+static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on);
+static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
+ uint8_t rule_id);
+static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector);
+static void ixgbe_configure_msix(struct rte_eth_dev *dev);
+
+static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
+static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter);
+static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter);
+static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter);
+static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter);
+static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter);
+static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
+
+static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
+
+static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
+static int ixgbe_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
+static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+
+static int ixgbe_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+
+static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
+static int ixgbevf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+
+static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
+static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
+static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
+ const struct timespec *timestamp);
+static void ixgbevf_dev_interrupt_handler(void *param);
+
+static int ixgbe_dev_l2_tunnel_eth_type_conf
+ (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
+static int ixgbe_dev_l2_tunnel_offload_set
+ (struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ uint32_t mask,
+ uint8_t en);
+static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+
+static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
+static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
+static int ixgbe_filter_restore(struct rte_eth_dev *dev);
+static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
+
+/*
+ * Define VF Stats MACRO for Non "cleared on read" register
+ */
+#define UPDATE_VF_STAT(reg, last, cur) \
+{ \
+ uint32_t latest = IXGBE_READ_REG(hw, reg); \
+ cur += (latest - last) & UINT_MAX; \
+ last = latest; \
+}
+
+#define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \
+{ \
+ u64 new_lsb = IXGBE_READ_REG(hw, lsb); \
+ u64 new_msb = IXGBE_READ_REG(hw, msb); \
+ u64 latest = ((new_msb << 32) | new_lsb); \
+ cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
+ last = latest; \
+}
+
+#define IXGBE_SET_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+ (h)->bitmap[idx] |= 1 << bit;\
+ } while (0)
+
+#define IXGBE_CLEAR_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+ (h)->bitmap[idx] &= ~(1 << bit);\
+ } while (0)
+
+#define IXGBE_GET_HWSTRIP(h, q, r) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+ (r) = (h)->bitmap[idx] >> bit & 1;\
+ } while (0)
+
+int ixgbe_logtype_init;
+int ixgbe_logtype_driver;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_ixgbe_map[] = {
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
+#endif
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+/*
+ * The set of PCI devices this driver supports (for 82599 VF)
+ */
+static const struct rte_pci_id pci_id_ixgbevf_map[] = {
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct rte_eth_desc_lim rx_desc_lim = {
+ .nb_max = IXGBE_MAX_RING_DESC,
+ .nb_min = IXGBE_MIN_RING_DESC,
+ .nb_align = IXGBE_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+ .nb_max = IXGBE_MAX_RING_DESC,
+ .nb_min = IXGBE_MIN_RING_DESC,
+ .nb_align = IXGBE_TXD_ALIGN,
+ .nb_seg_max = IXGBE_TX_MAX_SEG,
+ .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
+};
+
+static const struct eth_dev_ops ixgbe_eth_dev_ops = {
+ .dev_configure = ixgbe_dev_configure,
+ .dev_start = ixgbe_dev_start,
+ .dev_stop = ixgbe_dev_stop,
+ .dev_set_link_up = ixgbe_dev_set_link_up,
+ .dev_set_link_down = ixgbe_dev_set_link_down,
+ .dev_close = ixgbe_dev_close,
+ .dev_reset = ixgbe_dev_reset,
+ .promiscuous_enable = ixgbe_dev_promiscuous_enable,
+ .promiscuous_disable = ixgbe_dev_promiscuous_disable,
+ .allmulticast_enable = ixgbe_dev_allmulticast_enable,
+ .allmulticast_disable = ixgbe_dev_allmulticast_disable,
+ .link_update = ixgbe_dev_link_update,
+ .stats_get = ixgbe_dev_stats_get,
+ .xstats_get = ixgbe_dev_xstats_get,
+ .xstats_get_by_id = ixgbe_dev_xstats_get_by_id,
+ .stats_reset = ixgbe_dev_stats_reset,
+ .xstats_reset = ixgbe_dev_xstats_reset,
+ .xstats_get_names = ixgbe_dev_xstats_get_names,
+ .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
+ .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
+ .fw_version_get = ixgbe_fw_version_get,
+ .dev_infos_get = ixgbe_dev_info_get,
+ .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
+ .mtu_set = ixgbe_dev_mtu_set,
+ .vlan_filter_set = ixgbe_vlan_filter_set,
+ .vlan_tpid_set = ixgbe_vlan_tpid_set,
+ .vlan_offload_set = ixgbe_vlan_offload_set,
+ .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
+ .rx_queue_start = ixgbe_dev_rx_queue_start,
+ .rx_queue_stop = ixgbe_dev_rx_queue_stop,
+ .tx_queue_start = ixgbe_dev_tx_queue_start,
+ .tx_queue_stop = ixgbe_dev_tx_queue_stop,
+ .rx_queue_setup = ixgbe_dev_rx_queue_setup,
+ .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
+ .rx_queue_release = ixgbe_dev_rx_queue_release,
+ .rx_queue_count = ixgbe_dev_rx_queue_count,
+ .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
+ .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
+ .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
+ .tx_queue_setup = ixgbe_dev_tx_queue_setup,
+ .tx_queue_release = ixgbe_dev_tx_queue_release,
+ .dev_led_on = ixgbe_dev_led_on,
+ .dev_led_off = ixgbe_dev_led_off,
+ .flow_ctrl_get = ixgbe_flow_ctrl_get,
+ .flow_ctrl_set = ixgbe_flow_ctrl_set,
+ .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
+ .mac_addr_add = ixgbe_add_rar,
+ .mac_addr_remove = ixgbe_remove_rar,
+ .mac_addr_set = ixgbe_set_default_mac_addr,
+ .uc_hash_table_set = ixgbe_uc_hash_table_set,
+ .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
+ .mirror_rule_set = ixgbe_mirror_rule_set,
+ .mirror_rule_reset = ixgbe_mirror_rule_reset,
+ .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
+ .reta_update = ixgbe_dev_rss_reta_update,
+ .reta_query = ixgbe_dev_rss_reta_query,
+ .rss_hash_update = ixgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
+ .filter_ctrl = ixgbe_dev_filter_ctrl,
+ .set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+ .rxq_info_get = ixgbe_rxq_info_get,
+ .txq_info_get = ixgbe_txq_info_get,
+ .timesync_enable = ixgbe_timesync_enable,
+ .timesync_disable = ixgbe_timesync_disable,
+ .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
+ .get_reg = ixgbe_get_regs,
+ .get_eeprom_length = ixgbe_get_eeprom_length,
+ .get_eeprom = ixgbe_get_eeprom,
+ .set_eeprom = ixgbe_set_eeprom,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
+ .get_dcb_info = ixgbe_dev_get_dcb_info,
+ .timesync_adjust_time = ixgbe_timesync_adjust_time,
+ .timesync_read_time = ixgbe_timesync_read_time,
+ .timesync_write_time = ixgbe_timesync_write_time,
+ .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
+ .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set,
+ .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del,
+ .tm_ops_get = ixgbe_tm_ops_get,
+};
+
+/*
+ * dev_ops for virtual function, bare necessities for basic vf
+ * operation have been implemented
+ */
+static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
+ .dev_configure = ixgbevf_dev_configure,
+ .dev_start = ixgbevf_dev_start,
+ .dev_stop = ixgbevf_dev_stop,
+ .link_update = ixgbevf_dev_link_update,
+ .stats_get = ixgbevf_dev_stats_get,
+ .xstats_get = ixgbevf_dev_xstats_get,
+ .stats_reset = ixgbevf_dev_stats_reset,
+ .xstats_reset = ixgbevf_dev_stats_reset,
+ .xstats_get_names = ixgbevf_dev_xstats_get_names,
+ .dev_close = ixgbevf_dev_close,
+ .dev_reset = ixgbevf_dev_reset,
+ .allmulticast_enable = ixgbevf_dev_allmulticast_enable,
+ .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
+ .dev_infos_get = ixgbevf_dev_info_get,
+ .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
+ .mtu_set = ixgbevf_dev_set_mtu,
+ .vlan_filter_set = ixgbevf_vlan_filter_set,
+ .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
+ .vlan_offload_set = ixgbevf_vlan_offload_set,
+ .rx_queue_setup = ixgbe_dev_rx_queue_setup,
+ .rx_queue_release = ixgbe_dev_rx_queue_release,
+ .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
+ .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
+ .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
+ .tx_queue_setup = ixgbe_dev_tx_queue_setup,
+ .tx_queue_release = ixgbe_dev_tx_queue_release,
+ .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
+ .mac_addr_add = ixgbevf_add_mac_addr,
+ .mac_addr_remove = ixgbevf_remove_mac_addr,
+ .set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+ .rxq_info_get = ixgbe_rxq_info_get,
+ .txq_info_get = ixgbe_txq_info_get,
+ .mac_addr_set = ixgbevf_set_default_mac_addr,
+ .get_reg = ixgbevf_get_regs,
+ .reta_update = ixgbe_dev_rss_reta_update,
+ .reta_query = ixgbe_dev_rss_reta_query,
+ .rss_hash_update = ixgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
+};
+
+/* store statistics names and its offset in stats structure */
+struct rte_ixgbe_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
+ {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
+ {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
+ {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
+ {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
+ {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
+ {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
+ {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
+ {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
+ {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
+ {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
+ {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
+ {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
+ {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
+ {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
+ {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
+ prc1023)},
+ {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
+ prc1522)},
+ {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
+ {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
+ {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
+ {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
+ {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
+ {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
+ {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
+ {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
+ {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
+ {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
+ {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
+ {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
+ {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
+ {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
+ {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
+ {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
+ {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
+ ptc1023)},
+ {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
+ ptc1522)},
+ {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
+ {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
+ {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
+ {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
+
+ {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
+ fdirustat_add)},
+ {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
+ fdirustat_remove)},
+ {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
+ fdirfstat_fadd)},
+ {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
+ fdirfstat_fremove)},
+ {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
+ fdirmatch)},
+ {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
+ fdirmiss)},
+
+ {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
+ {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
+ {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
+ fclast)},
+ {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
+ {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
+ {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
+ {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
+ {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
+ fcoe_noddp)},
+ {"rx_fcoe_no_direct_data_placement_ext_buff",
+ offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
+
+ {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
+ lxontxc)},
+ {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
+ lxonrxc)},
+ {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
+ lxofftxc)},
+ {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
+ lxoffrxc)},
+ {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
+};
+
+#define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
+ sizeof(rte_ixgbe_stats_strings[0]))
+
+/* MACsec statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
+ {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+ out_pkts_untagged)},
+ {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
+ out_pkts_encrypted)},
+ {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
+ out_pkts_protected)},
+ {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
+ out_octets_encrypted)},
+ {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
+ out_octets_protected)},
+ {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_untagged)},
+ {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_badtag)},
+ {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_nosci)},
+ {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_unknownsci)},
+ {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
+ in_octets_decrypted)},
+ {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
+ in_octets_validated)},
+ {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_unchecked)},
+ {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_delayed)},
+ {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_late)},
+ {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_ok)},
+ {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_invalid)},
+ {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_notvalid)},
+ {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_unusedsa)},
+ {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_notusingsa)},
+};
+
+#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
+ sizeof(rte_ixgbe_macsec_strings[0]))
+
+/* Per-queue statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
+ {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
+ {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
+ {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
+ {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
+};
+
+#define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
+ sizeof(rte_ixgbe_rxq_strings[0]))
+#define IXGBE_NB_RXQ_PRIO_VALUES 8
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
+ {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
+ {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
+ {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
+ pxon2offc)},
+};
+
+#define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
+ sizeof(rte_ixgbe_txq_strings[0]))
+#define IXGBE_NB_TXQ_PRIO_VALUES 8
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
+ {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
+};
+
+#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
+ sizeof(rte_ixgbevf_stats_strings[0]))
+
+/*
+ * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
+ */
+static inline int
+ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+ switch (hw->phy.type) {
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static inline int32_t
+ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
+{
+ uint32_t ctrl_ext;
+ int32_t status;
+
+ status = ixgbe_reset_hw(hw);
+
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
+
+ if (status == IXGBE_ERR_SFP_NOT_PRESENT)
+ status = IXGBE_SUCCESS;
+ return status;
+}
+
+static inline void
+ixgbe_enable_intr(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
+ */
+static void
+ixgbe_disable_intr(struct ixgbe_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
+ }
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * This function resets queue statistics mapping registers.
+ * From Niantic datasheet, Initialization of Statistics section:
+ * "...if software requires the queue counters, the RQSMR and TQSM registers
+ * must be re-programmed following a device reset.
+ */
+static void
+ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
+{
+ uint32_t i;
+
+ for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
+ }
+}
+
+
+static int
+ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx)
+{
+#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
+#define NB_QMAP_FIELDS_PER_QSM_REG 4
+#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
+
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct ixgbe_stat_mapping_registers *stat_mappings =
+ IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
+ uint32_t qsmr_mask = 0;
+ uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
+ uint32_t q_map;
+ uint8_t n, offset;
+
+ if ((hw->mac.type != ixgbe_mac_82599EB) &&
+ (hw->mac.type != ixgbe_mac_X540) &&
+ (hw->mac.type != ixgbe_mac_X550) &&
+ (hw->mac.type != ixgbe_mac_X550EM_x) &&
+ (hw->mac.type != ixgbe_mac_X550EM_a))
+ return -ENOSYS;
+
+ PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
+ (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+ queue_id, stat_idx);
+
+ n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
+ if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
+ PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
+ return -EIO;
+ }
+ offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
+
+ /* Now clear any previous stat_idx set */
+ clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+ if (!is_rx)
+ stat_mappings->tqsm[n] &= ~clearing_mask;
+ else
+ stat_mappings->rqsmr[n] &= ~clearing_mask;
+
+ q_map = (uint32_t)stat_idx;
+ q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
+ qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+ if (!is_rx)
+ stat_mappings->tqsm[n] |= qsmr_mask;
+ else
+ stat_mappings->rqsmr[n] |= qsmr_mask;
+
+ PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
+ (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+ queue_id, stat_idx);
+ PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+ is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
+
+ /* Now write the mapping in the appropriate register */
+ if (is_rx) {
+ PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
+ stat_mappings->rqsmr[n], n);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
+ } else {
+ PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
+ stat_mappings->tqsm[n], n);
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
+ }
+ return 0;
+}
+
+static void
+ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
+{
+ struct ixgbe_stat_mapping_registers *stat_mappings =
+ IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+
+ /* write whatever was in stat mapping table to the NIC */
+ for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
+ /* rx */
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
+
+ /* tx */
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
+ }
+}
+
+static void
+ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
+{
+ uint8_t i;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+
+ dcb_config->num_tcs.pg_tcs = dcb_max_tc;
+ dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
+ for (i = 0; i < dcb_max_tc; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100/dcb_max_tc + (i & 1));
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100/dcb_max_tc + (i & 1));
+ tc->pfc = ixgbe_dcb_pfc_disabled;
+ }
+
+ /* Initialize default user to priority mapping, UPx->TC0 */
+ tc = &dcb_config->tc_config[0];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+ for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
+ dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
+ dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
+ }
+ dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
+ dcb_config->pfc_mode_enable = false;
+ dcb_config->vt_mode = true;
+ dcb_config->round_robin_enable = false;
+ /* support all DCB capabilities in 82599 */
+ dcb_config->support.capabilities = 0xFF;
+
+ /*we only support 4 Tcs for X540, X550 */
+ if (hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a) {
+ dcb_config->num_tcs.pg_tcs = 4;
+ dcb_config->num_tcs.pfc_tcs = 4;
+ }
+}
+
+/*
+ * Ensure that all locks are released before first NVM or PHY access
+ */
+static void
+ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
+{
+ uint16_t mask;
+
+ /*
+ * Phy lock should not fail in this early stage. If this is the case,
+ * it is due to an improper exit of the application.
+ * So force the release of the faulty lock. Release of common lock
+ * is done automatically by swfw_sync function.
+ */
+ mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
+ if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
+ PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
+ }
+ ixgbe_release_swfw_semaphore(hw, mask);
+
+ /*
+ * These ones are more tricky since they are common to all ports; but
+ * swfw_sync retries last long enough (1s) to be almost sure that if
+ * lock can not be taken it is due to an improper lock of the
+ * semaphore.
+ */
+ mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
+ if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
+ PMD_DRV_LOG(DEBUG, "SWFW common locks released");
+ }
+ ixgbe_release_swfw_semaphore(hw, mask);
+}
+
+/*
+ * This function is based on code in ixgbe_attach() in base/ixgbe.c.
+ * It returns 0 on success.
+ */
+static int
+eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct ixgbe_vfta *shadow_vfta =
+ IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+ struct ixgbe_hwstrip *hwstrip =
+ IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
+ struct ixgbe_dcb_config *dcb_config =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct ixgbe_bw_conf *bw_conf =
+ IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
+ uint32_t ctrl_ext;
+ uint16_t csum;
+ int diag, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &ixgbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX and TX function.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ struct ixgbe_tx_queue *txq;
+ /* TX queue function in primary, set by last queue initialized
+ * Tx queue may not initialized by primary process
+ */
+ if (eth_dev->data->tx_queues) {
+ txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
+ ixgbe_set_tx_function(eth_dev, txq);
+ } else {
+ /* Use default TX function if we get here */
+ PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
+ "Using default TX function.");
+ }
+
+ ixgbe_set_rx_function(eth_dev);
+
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ /* Vendor and Device ID need to be set before init of shared code */
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->allow_unsupported_sfp = 1;
+
+ /* Initialize the shared code (base driver) */
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
+ diag = ixgbe_bypass_init_shared_code(hw);
+#else
+ diag = ixgbe_init_shared_code(hw);
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
+
+ if (diag != IXGBE_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
+ return -EIO;
+ }
+
+ /* pick up the PCI bus settings for reporting later */
+ ixgbe_get_bus_info(hw);
+
+ /* Unlock any pending hardware semaphore */
+ ixgbe_swfw_lock_reset(hw);
+
+#ifdef RTE_LIBRTE_SECURITY
+ /* Initialize security_ctx only for primary process*/
+ if (ixgbe_ipsec_ctx_create(eth_dev))
+ return -ENOMEM;
+#endif
+
+ /* Initialize DCB configuration*/
+ memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
+ ixgbe_dcb_init(hw, dcb_config);
+ /* Get Hardware Flow Control setting */
+ hw->fc.requested_mode = ixgbe_fc_full;
+ hw->fc.current_mode = ixgbe_fc_full;
+ hw->fc.pause_time = IXGBE_FC_PAUSE;
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ hw->fc.low_water[i] = IXGBE_FC_LO;
+ hw->fc.high_water[i] = IXGBE_FC_HI;
+ }
+ hw->fc.send_xon = 1;
+
+ /* Make sure we have a good EEPROM before we read from it */
+ diag = ixgbe_validate_eeprom_checksum(hw, &csum);
+ if (diag != IXGBE_SUCCESS) {
+ PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
+ return -EIO;
+ }
+
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
+ diag = ixgbe_bypass_init_hw(hw);
+#else
+ diag = ixgbe_init_hw(hw);
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
+
+ /*
+ * Devices with copper phys will fail to initialise if ixgbe_init_hw()
+ * is called too soon after the kernel driver unbinding/binding occurs.
+ * The failure occurs in ixgbe_identify_phy_generic() for all devices,
+ * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
+ * also called. See ixgbe_identify_phy_82599(). The reason for the
+ * failure is not known, and only occuts when virtualisation features
+ * are disabled in the bios. A delay of 100ms was found to be enough by
+ * trial-and-error, and is doubled to be safe.
+ */
+ if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
+ rte_delay_ms(200);
+ diag = ixgbe_init_hw(hw);
+ }
+
+ if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
+ diag = IXGBE_SUCCESS;
+
+ if (diag == IXGBE_ERR_EEPROM_VERSION) {
+ PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
+ "LOM. Please be aware there may be issues associated "
+ "with your hardware.");
+ PMD_INIT_LOG(ERR, "If you are experiencing problems "
+ "please contact your Intel or hardware representative "
+ "who provided you with this hardware.");
+ } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
+ if (diag) {
+ PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
+ return -EIO;
+ }
+
+ /* Reset the hw statistics */
+ ixgbe_dev_stats_reset(eth_dev);
+
+ /* disable interrupt */
+ ixgbe_disable_intr(hw);
+
+ /* reset mappings for queue statistics hw counters*/
+ ixgbe_reset_qstat_mappings(hw);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+ hw->mac.num_rar_entries, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ return -ENOMEM;
+ }
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ /* Allocate memory for storing hash filter MAC addresses */
+ eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+ IXGBE_VMDQ_NUM_UC_MAC, 0);
+ if (eth_dev->data->hash_mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ return -ENOMEM;
+ }
+
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ /* initialize the hw strip bitmap*/
+ memset(hwstrip, 0, sizeof(*hwstrip));
+
+ /* initialize PF if max_vfs not zero */
+ ixgbe_pf_host_init(eth_dev);
+
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ /* let hardware know driver is loaded */
+ ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
+
+ if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+ PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
+ (int) hw->mac.type, (int) hw->phy.type,
+ (int) hw->phy.sfp_type);
+ else
+ PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
+ (int) hw->mac.type, (int) hw->phy.type);
+
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ rte_intr_callback_register(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ /* enable support intr */
+ ixgbe_enable_intr(eth_dev);
+
+ /* initialize filter info */
+ memset(filter_info, 0,
+ sizeof(struct ixgbe_filter_info));
+
+ /* initialize 5tuple filter list */
+ TAILQ_INIT(&filter_info->fivetuple_list);
+
+ /* initialize flow director filter list & hash */
+ ixgbe_fdir_filter_init(eth_dev);
+
+ /* initialize l2 tunnel filter list & hash */
+ ixgbe_l2_tn_filter_init(eth_dev);
+
+ /* initialize flow filter lists */
+ ixgbe_filterlist_init();
+
+ /* initialize bandwidth configuration info */
+ memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
+
+ /* initialize Traffic Manager configuration */
+ ixgbe_tm_conf_init(eth_dev);
+
+ return 0;
+}
+
+static int
+eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ixgbe_hw *hw;
+ int retries = 0;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ if (hw->adapter_stopped == 0)
+ ixgbe_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ /* Unlock any pending hardware semaphore */
+ ixgbe_swfw_lock_reset(hw);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+
+ do {
+ ret = rte_intr_callback_unregister(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
+ if (ret >= 0) {
+ break;
+ } else if (ret != -EAGAIN) {
+ PMD_INIT_LOG(ERR,
+ "intr callback unregister failed: %d",
+ ret);
+ return ret;
+ }
+ rte_delay_ms(100);
+ } while (retries++ < (10 + IXGBE_LINK_UP_TIME));
+
+ /* uninitialize PF if max_vfs not zero */
+ ixgbe_pf_host_uninit(eth_dev);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ rte_free(eth_dev->data->hash_mac_addrs);
+ eth_dev->data->hash_mac_addrs = NULL;
+
+ /* remove all the fdir filters & hash */
+ ixgbe_fdir_filter_uninit(eth_dev);
+
+ /* remove all the L2 tunnel filters & hash */
+ ixgbe_l2_tn_filter_uninit(eth_dev);
+
+ /* Remove all ntuple filters of the device */
+ ixgbe_ntuple_filter_uninit(eth_dev);
+
+ /* clear all the filters list */
+ ixgbe_filterlist_flush();
+
+ /* Remove all Traffic Manager configuration */
+ ixgbe_tm_conf_uninit(eth_dev);
+
+#ifdef RTE_LIBRTE_SECURITY
+ rte_free(eth_dev->security_ctx);
+#endif
+
+ return 0;
+}
+
+static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct ixgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple,
+ entries);
+ rte_free(p_5tuple);
+ }
+ memset(filter_info->fivetuple_mask, 0,
+ sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+ return 0;
+}
+
+static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+ struct ixgbe_fdir_filter *fdir_filter;
+
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_handle)
+ rte_hash_free(fdir_info->hash_handle);
+
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ return 0;
+}
+
+static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+
+ if (l2_tn_info->hash_map)
+ rte_free(l2_tn_info->hash_map);
+ if (l2_tn_info->hash_handle)
+ rte_hash_free(l2_tn_info->hash_handle);
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
+ l2_tn_filter,
+ entries);
+ rte_free(l2_tn_filter);
+ }
+
+ return 0;
+}
+
+static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = IXGBE_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(union ixgbe_atr_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", eth_dev->device->name);
+ fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("ixgbe",
+ sizeof(struct ixgbe_fdir_filter *) *
+ IXGBE_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ return -ENOMEM;
+ }
+ fdir_info->mask_added = FALSE;
+
+ return 0;
+}
+
+static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
+ char l2_tn_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters l2_tn_hash_params = {
+ .name = l2_tn_hash_name,
+ .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
+ .key_len = sizeof(struct ixgbe_l2_tn_key),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&l2_tn_info->l2_tn_list);
+ snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
+ "l2_tn_%s", eth_dev->device->name);
+ l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
+ if (!l2_tn_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
+ return -EINVAL;
+ }
+ l2_tn_info->hash_map = rte_zmalloc("ixgbe",
+ sizeof(struct ixgbe_l2_tn_filter *) *
+ IXGBE_MAX_L2_TN_FILTER_NUM,
+ 0);
+ if (!l2_tn_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for L2 TN hash map!");
+ return -ENOMEM;
+ }
+ l2_tn_info->e_tag_en = FALSE;
+ l2_tn_info->e_tag_fwd_en = FALSE;
+ l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG;
+
+ return 0;
+}
+/*
+ * Negotiate mailbox API version with the PF.
+ * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
+ * Then we try to negotiate starting with the most recent one.
+ * If all negotiation attempts fail, then we will proceed with
+ * the default one (ixgbe_mbox_api_10).
+ */
+static void
+ixgbevf_negotiate_api(struct ixgbe_hw *hw)
+{
+ int32_t i;
+
+ /* start with highest supported, proceed down */
+ static const enum ixgbe_pfvf_api_rev sup_ver[] = {
+ ixgbe_mbox_api_12,
+ ixgbe_mbox_api_11,
+ ixgbe_mbox_api_10,
+ };
+
+ for (i = 0;
+ i != RTE_DIM(sup_ver) &&
+ ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
+ i++)
+ ;
+}
+
+static void
+generate_random_mac_addr(struct ether_addr *mac_addr)
+{
+ uint64_t random;
+
+ /* Set Organizationally Unique Identifier (OUI) prefix. */
+ mac_addr->addr_bytes[0] = 0x00;
+ mac_addr->addr_bytes[1] = 0x09;
+ mac_addr->addr_bytes[2] = 0xC0;
+ /* Force indication of locally assigned MAC address. */
+ mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
+ /* Generate the last 3 bytes of the MAC address with a random number. */
+ random = rte_rand();
+ memcpy(&mac_addr->addr_bytes[3], &random, 3);
+}
+
+/*
+ * Virtual Function device init
+ */
+static int
+eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ int diag;
+ uint32_t tc, tcs;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct ixgbe_vfta *shadow_vfta =
+ IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+ struct ixgbe_hwstrip *hwstrip =
+ IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
+ struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ struct ixgbe_tx_queue *txq;
+ /* TX queue function in primary, set by last queue initialized
+ * Tx queue may not initialized by primary process
+ */
+ if (eth_dev->data->tx_queues) {
+ txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
+ ixgbe_set_tx_function(eth_dev, txq);
+ } else {
+ /* Use default TX function if we get here */
+ PMD_INIT_LOG(NOTICE,
+ "No TX queues configured yet. Using default TX function.");
+ }
+
+ ixgbe_set_rx_function(eth_dev);
+
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ /* initialize the hw strip bitmap*/
+ memset(hwstrip, 0, sizeof(*hwstrip));
+
+ /* Initialize the shared code (base driver) */
+ diag = ixgbe_init_shared_code(hw);
+ if (diag != IXGBE_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
+ return -EIO;
+ }
+
+ /* init_mailbox_params */
+ hw->mbx.ops.init_params(hw);
+
+ /* Reset the hw statistics */
+ ixgbevf_dev_stats_reset(eth_dev);
+
+ /* Disable the interrupts for VF */
+ ixgbevf_intr_disable(eth_dev);
+
+ hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
+ diag = hw->mac.ops.reset_hw(hw);
+
+ /*
+ * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
+ * the underlying PF driver has not assigned a MAC address to the VF.
+ * In this case, assign a random MAC address.
+ */
+ if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
+ PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+ return diag;
+ }
+
+ /* negotiate mailbox API version to use with the PF. */
+ ixgbevf_negotiate_api(hw);
+
+ /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
+ ixgbevf_get_queues(hw, &tcs, &tc);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
+ hw->mac.num_rar_entries, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ return -ENOMEM;
+ }
+
+ /* Generate a random MAC address, if none was assigned by PF. */
+ if (is_zero_ether_addr(perm_addr)) {
+ generate_random_mac_addr(perm_addr);
+ diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
+ if (diag) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ return diag;
+ }
+ PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
+ PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ perm_addr->addr_bytes[0],
+ perm_addr->addr_bytes[1],
+ perm_addr->addr_bytes[2],
+ perm_addr->addr_bytes[3],
+ perm_addr->addr_bytes[4],
+ perm_addr->addr_bytes[5]);
+ }
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
+
+ /* reset the hardware with the new settings */
+ diag = hw->mac.ops.start_hw(hw);
+ switch (diag) {
+ case 0:
+ break;
+
+ default:
+ PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+ return -EIO;
+ }
+
+ rte_intr_callback_register(intr_handle,
+ ixgbevf_dev_interrupt_handler, eth_dev);
+ rte_intr_enable(intr_handle);
+ ixgbevf_intr_enable(eth_dev);
+
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id, "ixgbe_mac_82599_vf");
+
+ return 0;
+}
+
+/* Virtual Function device uninit */
+
+static int
+eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ixgbe_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ if (hw->adapter_stopped == 0)
+ ixgbevf_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ /* Disable the interrupts for VF */
+ ixgbevf_intr_disable(eth_dev);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ ixgbevf_dev_interrupt_handler, eth_dev);
+
+ return 0;
+}
+
+static int
+eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *pf_ethdev;
+ struct rte_eth_devargs eth_da;
+ int i, retval;
+
+ if (pci_dev->device.devargs) {
+ retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ &eth_da);
+ if (retval)
+ return retval;
+ } else
+ memset(&eth_da, 0, sizeof(eth_da));
+
+ retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+ sizeof(struct ixgbe_adapter),
+ eth_dev_pci_specific_init, pci_dev,
+ eth_ixgbe_dev_init, NULL);
+
+ if (retval || eth_da.nb_representor_ports < 1)
+ return retval;
+
+ pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (pf_ethdev == NULL)
+ return -ENODEV;
+
+ /* probe VF representor ports */
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ struct ixgbe_vf_info *vfinfo;
+ struct ixgbe_vf_representor representor;
+
+ vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
+ pf_ethdev->data->dev_private);
+ if (vfinfo == NULL) {
+ PMD_DRV_LOG(ERR,
+ "no virtual functions supported by PF");
+ break;
+ }
+
+ representor.vf_id = eth_da.representor_ports[i];
+ representor.switch_domain_id = vfinfo->switch_domain_id;
+ representor.pf_ethdev = pf_ethdev;
+
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ pci_dev->device.name,
+ eth_da.representor_ports[i]);
+
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct ixgbe_vf_representor), NULL, NULL,
+ ixgbe_vf_representor_init, &representor);
+
+ if (retval)
+ PMD_DRV_LOG(ERR, "failed to create ixgbe vf "
+ "representor %s.", name);
+ }
+
+ return 0;
+}
+
+static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *ethdev;
+
+ ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!ethdev)
+ return -ENODEV;
+
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit);
+ else
+ return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit);
+}
+
+static struct rte_pci_driver rte_ixgbe_pmd = {
+ .id_table = pci_id_ixgbe_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = eth_ixgbe_pci_probe,
+ .remove = eth_ixgbe_pci_remove,
+};
+
+static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
+}
+
+static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
+}
+
+/*
+ * virtual function driver struct
+ */
+static struct rte_pci_driver rte_ixgbevf_pmd = {
+ .id_table = pci_id_ixgbevf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = eth_ixgbevf_pci_probe,
+ .remove = eth_ixgbevf_pci_remove,
+};
+
+static int
+ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vfta *shadow_vfta =
+ IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vfta;
+ uint32_t vid_idx;
+ uint32_t vid_bit;
+
+ vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
+ vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
+ if (on)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
+
+ /* update local VFTA copy */
+ shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static void
+ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+ if (on)
+ ixgbe_vlan_hw_strip_enable(dev, queue);
+ else
+ ixgbe_vlan_hw_strip_disable(dev, queue);
+}
+
+static int
+ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = 0;
+ uint32_t reg;
+ uint32_t qinq;
+
+ qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ qinq &= IXGBE_DMATXCTL_GDV;
+
+ switch (vlan_type) {
+ case ETH_VLAN_TYPE_INNER:
+ if (qinq) {
+ reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
+ | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+ } else {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Inner type is not supported"
+ " by single VLAN");
+ }
+ break;
+ case ETH_VLAN_TYPE_OUTER:
+ if (qinq) {
+ /* Only the high 16-bits is valid */
+ IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
+ IXGBE_EXVET_VET_EXT_SHIFT);
+ } else {
+ reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
+ | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+ }
+
+ break;
+ default:
+ ret = -EINVAL;
+ PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
+ break;
+ }
+
+ return ret;
+}
+
+void
+ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vlnctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Filter Table Disable */
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+}
+
+void
+ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vfta *shadow_vfta =
+ IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vlnctrl;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Filter Table Enable */
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
+ /* write whatever is in local vfta copy */
+ for (i = 0; i < IXGBE_VFTA_SIZE; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
+}
+
+static void
+ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
+{
+ struct ixgbe_hwstrip *hwstrip =
+ IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
+ struct ixgbe_rx_queue *rxq;
+
+ if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ return;
+
+ if (on)
+ IXGBE_SET_HWSTRIP(hwstrip, queue);
+ else
+ IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
+
+ if (queue >= dev->data->nb_rx_queues)
+ return;
+
+ rxq = dev->data->rx_queues[queue];
+
+ if (on) {
+ rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ } else {
+ rxq->vlan_flags = PKT_RX_VLAN;
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+}
+
+static void
+ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ /* No queue level support */
+ PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
+ return;
+ }
+
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
+ /* record those setting for HW strip per queue */
+ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
+}
+
+static void
+ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ /* No queue level supported */
+ PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
+ return;
+ }
+
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
+ /* record those setting for HW strip per queue */
+ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
+}
+
+static void
+ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* DMATXCTRL: Geric Double VLAN Disable */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ ctrl &= ~IXGBE_DMATXCTL_GDV;
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
+
+ /* CTRL_EXT: Global Double VLAN Disable */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl &= ~IXGBE_EXTENDED_VLAN;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
+
+}
+
+static void
+ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* DMATXCTRL: Geric Double VLAN Enable */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ ctrl |= IXGBE_DMATXCTL_GDV;
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
+
+ /* CTRL_EXT: Global Double VLAN Enable */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl |= IXGBE_EXTENDED_VLAN;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
+
+ /* Clear pooling mode of PFVTCTL. It's required by X550. */
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
+ }
+
+ /*
+ * VET EXT field in the EXVET register = 0x8100 by default
+ * So no need to change. Same to VT field of DMATXCTL register
+ */
+}
+
+void
+ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ uint32_t ctrl;
+ uint16_t i;
+ struct ixgbe_rx_queue *rxq;
+ bool on;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl |= IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+ } else {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl &= ~IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+ }
+ } else {
+ /*
+ * Other 10G NIC, the VLAN strip can be setup
+ * per queue in RXDCTL
+ */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ ctrl |= IXGBE_RXDCTL_VME;
+ on = TRUE;
+ } else {
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ on = FALSE;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
+
+ /* record those setting for HW strip per queue */
+ ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
+ }
+ }
+}
+
+static void
+ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
+{
+ uint16_t i;
+ struct rte_eth_rxmode *rxmode;
+ struct ixgbe_rx_queue *rxq;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ else
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ }
+}
+
+static int
+ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ ixgbe_vlan_hw_strip_config(dev);
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ ixgbe_vlan_hw_filter_enable(dev);
+ else
+ ixgbe_vlan_hw_filter_disable(dev);
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ ixgbe_vlan_hw_extend_enable(dev);
+ else
+ ixgbe_vlan_hw_extend_disable(dev);
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ ixgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ ixgbe_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
+static void
+ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+ uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+}
+
+static int
+ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ switch (nb_rx_q) {
+ case 1:
+ case 2:
+ RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+ break;
+ case 4:
+ RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+ IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
+ pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ return 0;
+}
+
+static int
+ixgbe_check_mq_mode(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_tx_queues;
+
+ if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+ /* check multi-queue mode */
+ switch (dev_conf->rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+ break;
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
+ PMD_INIT_LOG(ERR, "SRIOV active,"
+ " unsupported mq_mode rx %d.",
+ dev_conf->rxmode.mq_mode);
+ return -EINVAL;
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+ if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
+ if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " invalid queue number"
+ " for VMDQ RSS, allowed"
+ " value are 1, 2 or 4.");
+ return -EINVAL;
+ }
+ break;
+ case ETH_MQ_RX_VMDQ_ONLY:
+ case ETH_MQ_RX_NONE:
+ /* if nothing mq mode configure, use default scheme */
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ break;
+ default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+ /* SRIOV only works in VMDq enable mode */
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " wrong mq_mode rx %d.",
+ dev_conf->rxmode.mq_mode);
+ return -EINVAL;
+ }
+
+ switch (dev_conf->txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ break;
+ default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+ break;
+ }
+
+ /* check valid queue number */
+ if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
+ (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " nb_rx_q=%d nb_tx_q=%d queue number"
+ " must be less than or equal to %d.",
+ nb_rx_q, nb_tx_q,
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+ return -EINVAL;
+ }
+ } else {
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
+ " not supported.");
+ return -EINVAL;
+ }
+ /* check configuration for vmdb+dcb mode */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_conf *conf;
+
+ if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
+ IXGBE_VMDQ_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
+ if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+ " nb_queue_pools must be %d or %d.",
+ ETH_16_POOLS, ETH_32_POOLS);
+ return -EINVAL;
+ }
+ }
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_tx_conf *conf;
+
+ if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
+ IXGBE_VMDQ_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
+ if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+ " nb_queue_pools != %d and"
+ " nb_queue_pools != %d.",
+ ETH_16_POOLS, ETH_32_POOLS);
+ return -EINVAL;
+ }
+ }
+
+ /* For DCB mode check our configuration before we go further */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+ const struct rte_eth_dcb_rx_conf *conf;
+
+ conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
+ if (!(conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+ " and nb_tcs != %d.",
+ ETH_4_TCS, ETH_8_TCS);
+ return -EINVAL;
+ }
+ }
+
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ const struct rte_eth_dcb_tx_conf *conf;
+
+ conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
+ if (!(conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+ " and nb_tcs != %d.",
+ ETH_4_TCS, ETH_8_TCS);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * When DCB/VT is off, maximum number of queues changes,
+ * except for 82598EB, which remains constant.
+ */
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+ hw->mac.type != ixgbe_mac_82598EB) {
+ if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
+ PMD_INIT_LOG(ERR,
+ "Neither VT nor DCB are enabled, "
+ "nb_tx_q > %d.",
+ IXGBE_NONE_MODE_TX_NB_QUEUES);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+ixgbe_dev_configure(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ /* multipe queue mode checking */
+ ret = ixgbe_check_mq_mode(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
+ ret);
+ return ret;
+ }
+
+ /* set flag to update link status after init */
+ intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+
+ /*
+ * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation or vector Rx preconditions we will reset it.
+ */
+ adapter->rx_bulk_alloc_allowed = true;
+ adapter->rx_vec_allowed = true;
+
+ return 0;
+}
+
+static void
+ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ uint32_t gpie;
+
+ /* only set up it on X550EM_X */
+ if (hw->mac.type == ixgbe_mac_X550EM_x) {
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t)
+ intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
+ }
+}
+
+int
+ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ struct rte_eth_link link;
+ uint8_t nb_q_per_pool;
+ uint32_t queue_stride;
+ uint32_t queue_idx, idx = 0, vf_idx;
+ uint32_t queue_end;
+ uint16_t total_rate = 0;
+ struct rte_pci_device *pci_dev;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ rte_eth_link_get_nowait(dev->data->port_id, &link);
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (tx_rate > link.link_speed)
+ return -EINVAL;
+
+ if (q_msk == 0)
+ return 0;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ queue_idx = vf * queue_stride;
+ queue_end = queue_idx + nb_q_per_pool - 1;
+ if (queue_end >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (vfinfo) {
+ for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
+ if (vf_idx == vf)
+ continue;
+ for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+ idx++)
+ total_rate += vfinfo[vf_idx].tx_rate[idx];
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ /* Store tx_rate for this vf. */
+ for (idx = 0; idx < nb_q_per_pool; idx++) {
+ if (((uint64_t)0x1 << idx) & q_msk) {
+ if (vfinfo[vf].tx_rate[idx] != tx_rate)
+ vfinfo[vf].tx_rate[idx] = tx_rate;
+ total_rate += tx_rate;
+ }
+ }
+
+ if (total_rate > dev->data->dev_link.link_speed) {
+ /* Reset stored TX rate of the VF if it causes exceed
+ * link speed.
+ */
+ memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+ return -EINVAL;
+ }
+
+ /* Set RTTBCNRC of each queue/pool for vf X */
+ for (; queue_idx <= queue_end; queue_idx++) {
+ if (0x1 & q_msk)
+ ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+ q_msk = q_msk >> 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+ixgbe_dev_start(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
+ int err, link_up = 0, negotiate = 0;
+ uint32_t speed = 0;
+ uint32_t allowed_speeds = 0;
+ int mask = 0;
+ int status;
+ uint16_t vf, idx;
+ uint32_t *link_speeds;
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* IXGBE devices don't support:
+ * - half duplex (checked afterwards for valid speeds)
+ * - fixed speed: TODO implement
+ */
+ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+ PMD_INIT_LOG(ERR,
+ "Invalid link_speeds for port %u, fix speed not supported",
+ dev->data->port_id);
+ return -EINVAL;
+ }
+
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* stop adapter */
+ hw->adapter_stopped = 0;
+ ixgbe_stop_adapter(hw);
+
+ /* reinitialize adapter
+ * this calls reset and start
+ */
+ status = ixgbe_pf_reset_hw(hw);
+ if (status != 0)
+ return -1;
+ hw->mac.ops.start_hw(hw);
+ hw->mac.get_link_status = true;
+
+ /* configure PF module if SRIOV enabled */
+ ixgbe_pf_host_configure(dev);
+
+ ixgbe_dev_phy_intr_setup(dev);
+
+ /* check and configure queue intr-vector mapping */
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
+ PMD_INIT_LOG(ERR, "At most %d intr queues supported",
+ IXGBE_MAX_INTR_QUEUE_NUM);
+ return -ENOTSUP;
+ }
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* confiugre msix for sleep until rx interrupt */
+ ixgbe_configure_msix(dev);
+
+ /* initialize transmission unit */
+ ixgbe_dev_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ err = ixgbe_dev_rx_init(dev);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ goto error;
+ }
+
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ err = ixgbe_vlan_offload_config(dev, mask);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
+ goto error;
+ }
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ /* Enable vlan filtering for VMDq */
+ ixgbe_vmdq_vlan_hw_filter_enable(dev);
+ }
+
+ /* Configure DCB hw */
+ ixgbe_configure_dcb(dev);
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ err = ixgbe_fdir_configure(dev);
+ if (err)
+ goto error;
+ }
+
+ /* Restore vf rate limit */
+ if (vfinfo != NULL) {
+ for (vf = 0; vf < pci_dev->max_vfs; vf++)
+ for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+ if (vfinfo[vf].tx_rate[idx] != 0)
+ ixgbe_set_vf_rate_limit(
+ dev, vf,
+ vfinfo[vf].tx_rate[idx],
+ 1 << idx);
+ }
+
+ ixgbe_restore_statistics_mapping(dev);
+
+ err = ixgbe_dev_rxtx_start(dev);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
+ goto error;
+ }
+
+ /* Skip link setup if loopback mode is enabled for 82599. */
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+ goto skip_link_setup;
+
+ if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
+ err = hw->mac.ops.setup_sfp(hw);
+ if (err)
+ goto error;
+ }
+
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ /* Turn on the copper */
+ ixgbe_set_phy_power(hw, true);
+ } else {
+ /* Turn on the laser */
+ ixgbe_enable_tx_laser(hw);
+ }
+
+ err = ixgbe_check_link(hw, &speed, &link_up, 0);
+ if (err)
+ goto error;
+ dev->data->dev_link.link_status = link_up;
+
+ err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
+ if (err)
+ goto error;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G |
+ ETH_LINK_SPEED_10G;
+ break;
+ default:
+ allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G;
+ }
+
+ link_speeds = &dev->data->dev_conf.link_speeds;
+ if (*link_speeds & ~allowed_speeds) {
+ PMD_INIT_LOG(ERR, "Invalid link setting");
+ goto error;
+ }
+
+ speed = 0x0;
+ if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ speed = IXGBE_LINK_SPEED_82598_AUTONEG;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ speed = IXGBE_LINK_SPEED_82599_AUTONEG;
+ break;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ speed = IXGBE_LINK_SPEED_X550_AUTONEG;
+ break;
+ default:
+ speed = IXGBE_LINK_SPEED_82599_AUTONEG;
+ }
+ } else {
+ if (*link_speeds & ETH_LINK_SPEED_10G)
+ speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_5G)
+ speed |= IXGBE_LINK_SPEED_5GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_2_5G)
+ speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_1G)
+ speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_100M)
+ speed |= IXGBE_LINK_SPEED_100_FULL;
+ }
+
+ err = ixgbe_setup_link(hw, speed, link_up);
+ if (err)
+ goto error;
+
+ ixgbe_dev_link_update(dev, 0);
+
+skip_link_setup:
+
+ if (rte_intr_allow_others(intr_handle)) {
+ /* check if lsc interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
+ else
+ ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
+ ixgbe_dev_macsec_interrupt_setup(dev);
+ } else {
+ rte_intr_callback_unregister(intr_handle,
+ ixgbe_dev_interrupt_handler, dev);
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex");
+ }
+
+ /* check if rxq interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+ rte_intr_dp_is_en(intr_handle))
+ ixgbe_dev_rxq_interrupt_setup(dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ /* resume enabled intr since hw reset */
+ ixgbe_enable_intr(dev);
+ ixgbe_l2_tunnel_conf(dev);
+ ixgbe_filter_restore(dev);
+
+ if (tm_conf->root && !tm_conf->committed)
+ PMD_DRV_LOG(WARNING,
+ "please call hierarchy_commit() "
+ "before starting the port");
+
+ return 0;
+
+error:
+ PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
+ ixgbe_dev_clear_queues(dev);
+ return -EIO;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void
+ixgbe_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_eth_link link;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int vf;
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* disable interrupts */
+ ixgbe_disable_intr(hw);
+
+ /* reset the NIC */
+ ixgbe_pf_reset_hw(hw);
+ hw->adapter_stopped = 0;
+
+ /* stop adapter */
+ ixgbe_stop_adapter(hw);
+
+ for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
+ vfinfo[vf].clear_to_send = false;
+
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ /* Turn off the copper */
+ ixgbe_set_phy_power(hw, false);
+ } else {
+ /* Turn off the laser */
+ ixgbe_disable_tx_laser(hw);
+ }
+
+ ixgbe_dev_clear_queues(dev);
+
+ /* Clear stored conf */
+ dev->data->scattered_rx = 0;
+ dev->data->lro = 0;
+
+ /* Clear recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(dev, &link);
+
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ ixgbe_dev_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+
+ /* reset hierarchy commit */
+ tm_conf->committed = false;
+}
+
+/*
+ * Set device link up: enable tx.
+ */
+static int
+ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
+ if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ /* Not suported in bypass mode */
+ PMD_INIT_LOG(ERR, "Set link up is not supported "
+ "by device id 0x%x", hw->device_id);
+ return -ENOTSUP;
+ }
+#endif
+ }
+
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ /* Turn on the copper */
+ ixgbe_set_phy_power(hw, true);
+ } else {
+ /* Turn on the laser */
+ ixgbe_enable_tx_laser(hw);
+ }
+
+ return 0;
+}
+
+/*
+ * Set device link down: disable tx.
+ */
+static int
+ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
+ if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ /* Not suported in bypass mode */
+ PMD_INIT_LOG(ERR, "Set link down is not supported "
+ "by device id 0x%x", hw->device_id);
+ return -ENOTSUP;
+ }
+#endif
+ }
+
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ /* Turn off the copper */
+ ixgbe_set_phy_power(hw, false);
+ } else {
+ /* Turn off the laser */
+ ixgbe_disable_tx_laser(hw);
+ }
+
+ return 0;
+}
+
+/*
+ * Reset and stop device.
+ */
+static void
+ixgbe_dev_close(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ ixgbe_pf_reset_hw(hw);
+
+ ixgbe_dev_stop(dev);
+ hw->adapter_stopped = 1;
+
+ ixgbe_dev_free_queues(dev);
+
+ ixgbe_disable_pcie_master(hw);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+}
+
+/*
+ * Reset PF device.
+ */
+static int
+ixgbe_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ /* When a DPDK PMD PF begin to reset PF port, it should notify all
+ * its VF to make them align with it. The detailed notification
+ * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
+ * To avoid unexpected behavior in VF, currently reset of PF with
+ * SR-IOV activation is not supported. It might be supported later.
+ */
+ if (dev->data->sriov.active)
+ return -ENOTSUP;
+
+ ret = eth_ixgbe_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_ixgbe_dev_init(dev, NULL);
+
+ return ret;
+}
+
+static void
+ixgbe_read_stats_registers(struct ixgbe_hw *hw,
+ struct ixgbe_hw_stats *hw_stats,
+ struct ixgbe_macsec_stats *macsec_stats,
+ uint64_t *total_missed_rx, uint64_t *total_qbrc,
+ uint64_t *total_qprc, uint64_t *total_qprdc)
+{
+ uint32_t bprc, lxon, lxoff, total;
+ uint32_t delta_gprc = 0;
+ unsigned i;
+ /* Workaround for RX byte count not including CRC bytes when CRC
+ * strip is enabled. CRC bytes are removed from counters when crc_strip
+ * is disabled.
+ */
+ int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
+ IXGBE_HLREG0_RXCRCSTRP);
+
+ hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+ hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
+ hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
+
+ for (i = 0; i < 8; i++) {
+ uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
+ /* global total per queue */
+ hw_stats->mpc[i] += mp;
+ /* Running comprehensive total for stats display */
+ *total_missed_rx += hw_stats->mpc[i];
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ hw_stats->rnbc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ hw_stats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ hw_stats->pxoffrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ } else {
+ hw_stats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ hw_stats->pxoffrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ hw_stats->pxon2offc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+ }
+ hw_stats->pxontxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ hw_stats->pxofftxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ }
+ for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
+ uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+
+ delta_gprc += delta_qprc;
+
+ hw_stats->qprc[i] += delta_qprc;
+ hw_stats->qptc[i] += delta_qptc;
+
+ hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ hw_stats->qbrc[i] +=
+ ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
+ if (crc_strip == 0)
+ hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
+
+ hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ hw_stats->qbtc[i] +=
+ ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
+
+ hw_stats->qprdc[i] += delta_qprdc;
+ *total_qprdc += hw_stats->qprdc[i];
+
+ *total_qprc += hw_stats->qprc[i];
+ *total_qbrc += hw_stats->qbrc[i];
+ }
+ hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
+ hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
+ hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+
+ /*
+ * An errata states that gprc actually counts good + missed packets:
+ * Workaround to set gprc to summated queue packet receives
+ */
+ hw_stats->gprc = *total_qprc;
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
+ hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
+ hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
+ hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
+ hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
+ hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+ hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ } else {
+ hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ /* 82598 only has a counter in the high register */
+ hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ }
+ uint64_t old_tpr = hw_stats->tpr;
+
+ hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+ hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
+
+ if (crc_strip == 0)
+ hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
+
+ uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
+ hw_stats->gptc += delta_gptc;
+ hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
+ hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
+
+ /*
+ * Workaround: mprc hardware is incorrectly counting
+ * broadcasts, so for now we subtract those.
+ */
+ bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
+ hw_stats->bprc += bprc;
+ hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ hw_stats->mprc -= bprc;
+
+ hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+ hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+ hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+ hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+ hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+
+ lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ hw_stats->lxontxc += lxon;
+ lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ hw_stats->lxofftxc += lxoff;
+ total = lxon + lxoff;
+
+ hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+ hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+ hw_stats->gptc -= total;
+ hw_stats->mptc -= total;
+ hw_stats->ptc64 -= total;
+ hw_stats->gotc -= total * ETHER_MIN_LEN;
+
+ hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+ hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+ hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+ hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+ hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+ hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+ hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+ hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+ hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+ hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+ hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+ hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
+ hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+ hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
+ /* Only read FCOE on 82599 */
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+ hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+ hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+ hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+ hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+ }
+
+ /* Flow Director Stats registers */
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ hw_stats->fdirustat_add += IXGBE_READ_REG(hw,
+ IXGBE_FDIRUSTAT) & 0xFFFF;
+ hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw,
+ IXGBE_FDIRUSTAT) >> 16) & 0xFFFF;
+ hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw,
+ IXGBE_FDIRFSTAT) & 0xFFFF;
+ hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw,
+ IXGBE_FDIRFSTAT) >> 16) & 0xFFFF;
+ }
+ /* MACsec Stats registers */
+ macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
+ macsec_stats->out_pkts_encrypted +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
+ macsec_stats->out_pkts_protected +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
+ macsec_stats->out_octets_encrypted +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
+ macsec_stats->out_octets_protected +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
+ macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
+ macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
+ macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
+ macsec_stats->in_pkts_unknownsci +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
+ macsec_stats->in_octets_decrypted +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
+ macsec_stats->in_octets_validated +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
+ macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
+ macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
+ macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
+ for (i = 0; i < 2; i++) {
+ macsec_stats->in_pkts_ok +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
+ macsec_stats->in_pkts_invalid +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
+ macsec_stats->in_pkts_notvalid +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
+ }
+ macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
+ macsec_stats->in_pkts_notusingsa +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
+}
+
+/*
+ * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
+ */
+static int
+ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_stats *hw_stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
+ uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
+ unsigned i;
+
+ total_missed_rx = 0;
+ total_qbrc = 0;
+ total_qprc = 0;
+ total_qprdc = 0;
+
+ ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+ &total_qbrc, &total_qprc, &total_qprdc);
+
+ if (stats == NULL)
+ return -EINVAL;
+
+ /* Fill out the rte_eth_stats statistics structure */
+ stats->ipackets = total_qprc;
+ stats->ibytes = total_qbrc;
+ stats->opackets = hw_stats->gptc;
+ stats->obytes = hw_stats->gotc;
+
+ for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
+ stats->q_ipackets[i] = hw_stats->qprc[i];
+ stats->q_opackets[i] = hw_stats->qptc[i];
+ stats->q_ibytes[i] = hw_stats->qbrc[i];
+ stats->q_obytes[i] = hw_stats->qbtc[i];
+ stats->q_errors[i] = hw_stats->qprdc[i];
+ }
+
+ /* Rx Errors */
+ stats->imissed = total_missed_rx;
+ stats->ierrors = hw_stats->crcerrs +
+ hw_stats->mspdc +
+ hw_stats->rlec +
+ hw_stats->ruc +
+ hw_stats->roc +
+ hw_stats->illerrc +
+ hw_stats->errbc +
+ hw_stats->rfc +
+ hw_stats->fccrc +
+ hw_stats->fclast;
+
+ /* Tx Errors */
+ stats->oerrors = 0;
+ return 0;
+}
+
+static void
+ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw_stats *stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* HW registers are cleared on read */
+ ixgbe_dev_stats_get(dev, NULL);
+
+ /* Reset software totals */
+ memset(stats, 0, sizeof(*stats));
+}
+
+/* This function calculates the number of xstats based on the current config */
+static unsigned
+ixgbe_xstats_calc_num(void) {
+ return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
+ (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
+ (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
+}
+
+static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
+{
+ const unsigned cnt_stats = ixgbe_xstats_calc_num();
+ unsigned stat, i, count;
+
+ if (xstats_names != NULL) {
+ count = 0;
+
+ /* Note: limit >= cnt_stats checked upstream
+ * in rte_eth_xstats_names()
+ */
+
+ /* Extended stats from ixgbe_hw_stats */
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_stats_strings[i].name);
+ count++;
+ }
+
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_macsec_strings[i].name);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_priority%u_%s", i,
+ rte_ixgbe_rxq_strings[stat].name);
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_priority%u_%s", i,
+ rte_ixgbe_txq_strings[stat].name);
+ count++;
+ }
+ }
+ }
+ return cnt_stats;
+}
+
+static int ixgbe_dev_xstats_get_names_by_id(
+ struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit)
+{
+ if (!ids) {
+ const unsigned int cnt_stats = ixgbe_xstats_calc_num();
+ unsigned int stat, i, count;
+
+ if (xstats_names != NULL) {
+ count = 0;
+
+ /* Note: limit >= cnt_stats checked upstream
+ * in rte_eth_xstats_names()
+ */
+
+ /* Extended stats from ixgbe_hw_stats */
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_stats_strings[i].name);
+ count++;
+ }
+
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_macsec_strings[i].name);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_priority%u_%s", i,
+ rte_ixgbe_rxq_strings[stat].name);
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_priority%u_%s", i,
+ rte_ixgbe_txq_strings[stat].name);
+ count++;
+ }
+ }
+ }
+ return cnt_stats;
+ }
+
+ uint16_t i;
+ uint16_t size = ixgbe_xstats_calc_num();
+ struct rte_eth_xstat_name xstats_names_copy[size];
+
+ ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
+ size);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= size) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name,
+ xstats_names_copy[ids[i]].name);
+ }
+ return limit;
+}
+
+static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned limit)
+{
+ unsigned i;
+
+ if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
+ return -ENOMEM;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s", rte_ixgbevf_stats_strings[i].name);
+ return IXGBEVF_NB_XSTATS;
+}
+
+static int
+ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned n)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_stats *hw_stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
+ uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
+ unsigned i, stat, count = 0;
+
+ count = ixgbe_xstats_calc_num();
+
+ if (n < count)
+ return count;
+
+ total_missed_rx = 0;
+ total_qbrc = 0;
+ total_qprc = 0;
+ total_qprdc = 0;
+
+ ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+ &total_qbrc, &total_qprc, &total_qprdc);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!xstats)
+ return 0;
+
+ /* Extended stats from ixgbe_hw_stats */
+ count = 0;
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_stats_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
+ rte_ixgbe_macsec_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_rxq_strings[stat].offset +
+ (sizeof(uint64_t) * i));
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_txq_strings[stat].offset +
+ (sizeof(uint64_t) * i));
+ xstats[count].id = count;
+ count++;
+ }
+ }
+ return count;
+}
+
+static int
+ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ if (!ids) {
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_stats *hw_stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(
+ dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
+ uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
+ unsigned int i, stat, count = 0;
+
+ count = ixgbe_xstats_calc_num();
+
+ if (!ids && n < count)
+ return count;
+
+ total_missed_rx = 0;
+ total_qbrc = 0;
+ total_qprc = 0;
+ total_qprdc = 0;
+
+ ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
+ &total_missed_rx, &total_qbrc, &total_qprc,
+ &total_qprdc);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!ids && !values)
+ return 0;
+
+ /* Extended stats from ixgbe_hw_stats */
+ count = 0;
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ values[count] = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_stats_strings[i].offset);
+ count++;
+ }
+
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ values[count] = *(uint64_t *)(((char *)macsec_stats) +
+ rte_ixgbe_macsec_strings[i].offset);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ values[count] =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_rxq_strings[stat].offset +
+ (sizeof(uint64_t) * i));
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ values[count] =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_txq_strings[stat].offset +
+ (sizeof(uint64_t) * i));
+ count++;
+ }
+ }
+ return count;
+ }
+
+ uint16_t i;
+ uint16_t size = ixgbe_xstats_calc_num();
+ uint64_t values_copy[size];
+
+ ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= size) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return n;
+}
+
+static void
+ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw_stats *stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
+
+ unsigned count = ixgbe_xstats_calc_num();
+
+ /* HW registers are cleared on read */
+ ixgbe_dev_xstats_get(dev, NULL, count);
+
+ /* Reset software totals */
+ memset(stats, 0, sizeof(*stats));
+ memset(macsec_stats, 0, sizeof(*macsec_stats));
+}
+
+static void
+ixgbevf_update_stats(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* Good Rx packet, include VF loopback */
+ UPDATE_VF_STAT(IXGBE_VFGPRC,
+ hw_stats->last_vfgprc, hw_stats->vfgprc);
+
+ /* Good Rx octets, include VF loopback */
+ UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+ hw_stats->last_vfgorc, hw_stats->vfgorc);
+
+ /* Good Tx packet, include VF loopback */
+ UPDATE_VF_STAT(IXGBE_VFGPTC,
+ hw_stats->last_vfgptc, hw_stats->vfgptc);
+
+ /* Good Tx octets, include VF loopback */
+ UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+ hw_stats->last_vfgotc, hw_stats->vfgotc);
+
+ /* Rx Multicst Packet */
+ UPDATE_VF_STAT(IXGBE_VFMPRC,
+ hw_stats->last_vfmprc, hw_stats->vfmprc);
+}
+
+static int
+ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned n)
+{
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ unsigned i;
+
+ if (n < IXGBEVF_NB_XSTATS)
+ return IXGBEVF_NB_XSTATS;
+
+ ixgbevf_update_stats(dev);
+
+ if (!xstats)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
+ xstats[i].id = i;
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbevf_stats_strings[i].offset);
+ }
+
+ return IXGBEVF_NB_XSTATS;
+}
+
+static int
+ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ ixgbevf_update_stats(dev);
+
+ if (stats == NULL)
+ return -EINVAL;
+
+ stats->ipackets = hw_stats->vfgprc;
+ stats->ibytes = hw_stats->vfgorc;
+ stats->opackets = hw_stats->vfgptc;
+ stats->obytes = hw_stats->vfgotc;
+ return 0;
+}
+
+static void
+ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* Sync HW register to the last stats */
+ ixgbevf_dev_stats_get(dev, NULL);
+
+ /* reset HW current stats*/
+ hw_stats->vfgprc = 0;
+ hw_stats->vfgorc = 0;
+ hw_stats->vfgptc = 0;
+ hw_stats->vfgotc = 0;
+}
+
+static int
+ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u16 eeprom_verh, eeprom_verl;
+ u32 etrack_id;
+ int ret;
+
+ ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
+ ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
+
+ etrack_id = (eeprom_verh << 16) | eeprom_verl;
+ ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static void
+ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+ dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * When DCB/VT is off, maximum number of queues changes,
+ * except for 82598EB, which remains constant.
+ */
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+ hw->mac.type != ixgbe_mac_82598EB)
+ dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
+ }
+ dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
+ dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
+ dev_info->max_vfs = pci_dev->max_vfs;
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ dev_info->max_vmdq_pools = ETH_16_POOLS;
+ else
+ dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
+ .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
+ .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
+ .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
+ .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
+ dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
+ dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
+
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ if (hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X540_vf ||
+ hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550_vf) {
+ dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+ }
+ if (hw->mac.type == ixgbe_mac_X550) {
+ dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+ }
+
+ /* Driver-preferred Rx/Tx parameters */
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+}
+
+static const uint32_t *
+ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* For non-vec functions,
+ * refers to ixgbe_rxd_pkt_info_to_pkt_type();
+ * for vec functions,
+ * refers to _recv_raw_pkts_vec().
+ */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_IP,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
+ dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
+ dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
+ dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
+ return ptypes;
+
+#if defined(RTE_ARCH_X86)
+ if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
+ dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
+ return ptypes;
+#endif
+ return NULL;
+}
+
+static void
+ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+ dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
+ dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
+ dev_info->max_vfs = pci_dev->max_vfs;
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ dev_info->max_vmdq_pools = ETH_16_POOLS;
+ else
+ dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
+ .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
+ .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
+ .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
+ .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+}
+
+static int
+ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ int *link_up, int wait_to_complete)
+{
+ /**
+ * for a quick link status checking, wait_to_compelet == 0,
+ * skip PF link status checking
+ */
+ bool no_pflink_check = wait_to_complete == 0;
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ uint32_t links_reg, in_msg;
+ int ret_val = 0;
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ rte_delay_us(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Since Reserved in older MAC's */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ if (no_pflink_check) {
+ if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
+ mac->get_link_status = true;
+ else
+ mac->get_link_status = false;
+
+ goto out;
+ }
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error
+ */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ goto out;
+
+ if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ ret_val = -1;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret_val = -1;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return ret_val;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+int
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete, int vf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ int link_up;
+ int diag;
+ u32 speed = 0;
+ int wait = 1;
+ bool autoneg = false;
+
+ memset(&link, 0, sizeof(link));
+ link.link_status = ETH_LINK_DOWN;
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
+
+ hw->mac.get_link_status = true;
+
+ if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
+ ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
+ speed = hw->phy.autoneg_advertised;
+ if (!speed)
+ ixgbe_get_link_capabilities(hw, &speed, &autoneg);
+ ixgbe_setup_link(hw, speed, true);
+ }
+
+ /* check if it needs to wait to complete, if lsc interrupt is enabled */
+ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
+ wait = 0;
+
+ if (vf)
+ diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
+ else
+ diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
+
+ if (diag != 0) {
+ link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ return rte_eth_linkstatus_set(dev, &link);
+ }
+
+ if (link_up == 0) {
+ intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+ return rte_eth_linkstatus_set(dev, &link);
+ }
+
+ intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
+ link.link_status = ETH_LINK_UP;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ switch (link_speed) {
+ default:
+ case IXGBE_LINK_SPEED_UNKNOWN:
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+
+ case IXGBE_LINK_SPEED_100_FULL:
+ link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_2_5G;
+ break;
+
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_5G;
+ break;
+
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ }
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
+}
+
+static int
+ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
+}
+
+static void
+ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fctrl;
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+static void
+ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fctrl;
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl &= (~IXGBE_FCTRL_UPE);
+ if (dev->data->all_multicast == 1)
+ fctrl |= IXGBE_FCTRL_MPE;
+ else
+ fctrl &= (~IXGBE_FCTRL_MPE);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+static void
+ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fctrl;
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+static void
+ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fctrl;
+
+ if (dev->data->promiscuous == 1)
+ return; /* must remain in all_multicast mode */
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl &= (~IXGBE_FCTRL_MPE);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ ixgbe_dev_link_status_print(dev);
+ if (on)
+ intr->mask |= IXGBE_EICR_LSC;
+ else
+ intr->mask &= ~IXGBE_EICR_LSC;
+
+ return 0;
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= IXGBE_EICR_RTX_QUEUE;
+
+ return 0;
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= IXGBE_EICR_LINKSEC;
+
+ return 0;
+}
+
+/*
+ * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t eicr;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ /* clear all cause mask */
+ ixgbe_disable_intr(hw);
+
+ /* read-on-clear nic registers here */
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+ PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
+
+ intr->flags = 0;
+
+ /* set flag for async link update */
+ if (eicr & IXGBE_EICR_LSC)
+ intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+
+ if (eicr & IXGBE_EICR_MAILBOX)
+ intr->flags |= IXGBE_FLAG_MAILBOX;
+
+ if (eicr & IXGBE_EICR_LINKSEC)
+ intr->flags |= IXGBE_FLAG_MACSEC;
+
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ hw->phy.type == ixgbe_phy_x550em_ext_t &&
+ (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
+ intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
+
+ return 0;
+}
+
+/**
+ * It gets and then prints the link status.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static void
+ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_eth_link link;
+
+ rte_eth_linkstatus_get(dev, &link);
+
+ if (link.link_status) {
+ PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
+ (int)(dev->data->port_id),
+ (unsigned)link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ } else {
+ PMD_INIT_LOG(INFO, " Port %d: Link Down",
+ (int)(dev->data->port_id));
+ }
+ PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+}
+
+/*
+ * It executes link_update after knowing an interrupt occurred.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ int64_t timeout;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
+
+ if (intr->flags & IXGBE_FLAG_MAILBOX) {
+ ixgbe_pf_mbx_process(dev);
+ intr->flags &= ~IXGBE_FLAG_MAILBOX;
+ }
+
+ if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
+ ixgbe_handle_lasi(hw);
+ intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
+ }
+
+ if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+ struct rte_eth_link link;
+
+ /* get the link status before link update, for predicting later */
+ rte_eth_linkstatus_get(dev, &link);
+
+ ixgbe_dev_link_update(dev, 0);
+
+ /* likely to up */
+ if (!link.link_status)
+ /* handle it 1 sec later, wait it being stable */
+ timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
+ /* likely to down */
+ else
+ /* handle it 4 sec later, wait it being stable */
+ timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
+
+ ixgbe_dev_link_status_print(dev);
+ if (rte_eal_alarm_set(timeout * 1000,
+ ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
+ PMD_DRV_LOG(ERR, "Error setting alarm");
+ else {
+ /* remember original mask */
+ intr->mask_original = intr->mask;
+ /* only disable lsc interrupt */
+ intr->mask &= ~IXGBE_EIMS_LSC;
+ }
+ }
+
+ PMD_DRV_LOG(DEBUG, "enable intr immediately");
+ ixgbe_enable_intr(dev);
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered for alarm callback for delayed
+ * handling specific interrupt to wait for the stable nic state. As the
+ * NIC interrupt state is not stable for ixgbe after link is just down,
+ * it needs to wait 4 seconds to get the stable status.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+ixgbe_dev_interrupt_delayed_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t eicr;
+
+ ixgbe_disable_intr(hw);
+
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_pf_mbx_process(dev);
+
+ if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
+ ixgbe_handle_lasi(hw);
+ intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
+ }
+
+ if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+ ixgbe_dev_link_update(dev, 0);
+ intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+ ixgbe_dev_link_status_print(dev);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+
+ if (intr->flags & IXGBE_FLAG_MACSEC) {
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
+ NULL);
+ intr->flags &= ~IXGBE_FLAG_MACSEC;
+ }
+
+ /* restore original mask */
+ intr->mask = intr->mask_original;
+ intr->mask_original = 0;
+
+ PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
+ ixgbe_enable_intr(dev);
+ rte_intr_enable(intr_handle);
+}
+
+/**
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+ixgbe_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ ixgbe_dev_interrupt_get_status(dev);
+ ixgbe_dev_interrupt_action(dev, dev->intr_handle);
+}
+
+static int
+ixgbe_dev_led_on(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
+}
+
+static int
+ixgbe_dev_led_off(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
+}
+
+static int
+ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mflcn_reg;
+ uint32_t fccfg_reg;
+ int rx_pause;
+ int tx_pause;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ fc_conf->pause_time = hw->fc.pause_time;
+ fc_conf->high_water = hw->fc.high_water[0];
+ fc_conf->low_water = hw->fc.low_water[0];
+ fc_conf->send_xon = hw->fc.send_xon;
+ fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
+
+ /*
+ * Return rx_pause status according to actual setting of
+ * MFLCN register.
+ */
+ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
+ rx_pause = 1;
+ else
+ rx_pause = 0;
+
+ /*
+ * Return tx_pause status according to actual setting of
+ * FCCFG register.
+ */
+ fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
+ tx_pause = 1;
+ else
+ tx_pause = 0;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static int
+ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct ixgbe_hw *hw;
+ int err;
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint32_t mflcn;
+ enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
+ ixgbe_fc_none,
+ ixgbe_fc_rx_pause,
+ ixgbe_fc_tx_pause,
+ ixgbe_fc_full
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+ /*
+ * At least reserve one Ethernet frame for watermark
+ * high_water/low_water in kilo bytes for ixgbe
+ */
+ max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
+ if ((fc_conf->high_water > max_high_water) ||
+ (fc_conf->high_water < fc_conf->low_water)) {
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+ PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+ return -EINVAL;
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
+ hw->fc.pause_time = fc_conf->pause_time;
+ hw->fc.high_water[0] = fc_conf->high_water;
+ hw->fc.low_water[0] = fc_conf->low_water;
+ hw->fc.send_xon = fc_conf->send_xon;
+ hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
+
+ err = ixgbe_fc_enable(hw);
+
+ /* Not negotiated is not an error case */
+ if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
+
+ /* check if we want to forward MAC frames - driver doesn't have native
+ * capability to do that, so we'll write the registers ourselves */
+
+ mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+ /* set or clear MFLCN.PMCF bit depending on configuration */
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ mflcn |= IXGBE_MFLCN_PMCF;
+ else
+ mflcn &= ~IXGBE_MFLCN_PMCF;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+ }
+
+ PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
+ return -EIO;
+}
+
+/**
+ * ixgbe_pfc_enable_generic - Enable flow control
+ * @hw: pointer to hardware structure
+ * @tc_num: traffic class number
+ * Enable flow control according to the current settings.
+ */
+static int
+ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
+{
+ int ret_val = 0;
+ uint32_t mflcn_reg, fccfg_reg;
+ uint32_t reg;
+ uint32_t fcrtl, fcrth;
+ uint8_t i;
+ uint8_t nb_rx_en;
+
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+ /* High/Low water can not be 0 */
+ if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
+ PMD_INIT_LOG(ERR, "Invalid water mark configuration");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
+ PMD_INIT_LOG(ERR, "Invalid water mark configuration");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ /* Negotiate the fc mode to use */
+ ixgbe_fc_autoneg(hw);
+
+ /* Disable any previous flow control settings */
+ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
+
+ fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
+
+ switch (hw->fc.current_mode) {
+ case ixgbe_fc_none:
+ /*
+ * If the count of enabled RX Priority Flow control >1,
+ * and the TX pause can not be disabled
+ */
+ nb_rx_en = 0;
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+ if (reg & IXGBE_FCRTH_FCEN)
+ nb_rx_en++;
+ }
+ if (nb_rx_en > 1)
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ mflcn_reg |= IXGBE_MFLCN_RPFCE;
+ /*
+ * If the count of enabled RX Priority Flow control >1,
+ * and the TX pause can not be disabled
+ */
+ nb_rx_en = 0;
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+ if (reg & IXGBE_FCRTH_FCEN)
+ nb_rx_en++;
+ }
+ if (nb_rx_en > 1)
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ mflcn_reg |= IXGBE_MFLCN_RPFCE;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ }
+
+ /* Set 802.3x based flow control settings. */
+ mflcn_reg |= IXGBE_MFLCN_DPF;
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[tc_num]) {
+ fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
+ fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
+ /*
+ * In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the maximum FCRTH value. This allows the Tx
+ * switch to function even under heavy Rx workloads.
+ */
+ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
+
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time * 0x00010001;
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+out:
+ return ret_val;
+}
+
+static int
+ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
+ }
+ return ret_val;
+}
+
+static int
+ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
+{
+ int err;
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint8_t tc_num;
+ uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_dcb_config *dcb_config =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+
+ enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
+ ixgbe_fc_none,
+ ixgbe_fc_rx_pause,
+ ixgbe_fc_tx_pause,
+ ixgbe_fc_full
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
+ tc_num = map[pfc_conf->priority];
+ rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+ /*
+ * At least reserve one Ethernet frame for watermark
+ * high_water/low_water in kilo bytes for ixgbe
+ */
+ max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
+ if ((pfc_conf->fc.high_water > max_high_water) ||
+ (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+ PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+ return -EINVAL;
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
+ hw->fc.pause_time = pfc_conf->fc.pause_time;
+ hw->fc.send_xon = pfc_conf->fc.send_xon;
+ hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
+ hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
+
+ err = ixgbe_dcb_pfc_enable(dev, tc_num);
+
+ /* Not negotiated is not an error case */
+ if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
+ return 0;
+
+ PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
+ return -EIO;
+}
+
+static int
+ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint16_t i, sp_reta_size;
+ uint8_t j, mask;
+ uint32_t reta, r;
+ uint16_t idx, shift;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reta_reg;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!ixgbe_rss_update_sp(hw->mac.type)) {
+ PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+ if (reta_size != sp_reta_size) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, sp_reta_size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IXGBE_4_BIT_MASK);
+ if (!mask)
+ continue;
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+ if (mask == IXGBE_4_BIT_MASK)
+ r = 0;
+ else
+ r = IXGBE_READ_REG(hw, reta_reg);
+ for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ reta |= reta_conf[idx].reta[shift + j] <<
+ (CHAR_BIT * j);
+ else
+ reta |= r & (IXGBE_8_BIT_MASK <<
+ (CHAR_BIT * j));
+ }
+ IXGBE_WRITE_REG(hw, reta_reg, reta);
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint16_t i, sp_reta_size;
+ uint8_t j, mask;
+ uint32_t reta;
+ uint16_t idx, shift;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reta_reg;
+
+ PMD_INIT_FUNC_TRACE();
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+ if (reta_size != sp_reta_size) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, sp_reta_size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IXGBE_4_BIT_MASK);
+ if (!mask)
+ continue;
+
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+ reta = IXGBE_READ_REG(hw, reta_reg);
+ for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ reta_conf[idx].reta[shift + j] =
+ ((reta >> (CHAR_BIT * j)) &
+ IXGBE_8_BIT_MASK);
+ }
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t enable_addr = 1;
+
+ return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
+ pool, enable_addr);
+}
+
+static void
+ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ixgbe_clear_rar(hw, index);
+}
+
+static int
+ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ ixgbe_remove_rar(dev, 0);
+ ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
+
+ return 0;
+}
+
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
+{
+ if (strcmp(dev->device->driver->name, drv->driver.name))
+ return false;
+
+ return true;
+}
+
+bool
+is_ixgbe_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_ixgbe_pmd);
+}
+
+static int
+ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ uint32_t hlreg0;
+ uint32_t maxfrs;
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev_info dev_info;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_dev_data *dev_data = dev->data;
+
+ ixgbe_dev_info_get(dev, &dev_info);
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* If device is started, refuse mtu that requires the support of
+ * scattered packets when this feature has not been enabled before.
+ */
+ if (dev_data->dev_started && !dev_data->scattered_rx &&
+ (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+ PMD_INIT_LOG(ERR, "Stop port first.");
+ return -EINVAL;
+ }
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+
+ /* switch to jumbo mode if needed */
+ if (frame_size > ETHER_MAX_LEN) {
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+ } else {
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+ maxfrs &= 0x0000FFFF;
+ maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+
+ return 0;
+}
+
+/*
+ * Virtual Function operations
+ */
+static void
+ixgbevf_intr_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Clear mask value. */
+ intr->mask = 0;
+}
+
+static void
+ixgbevf_intr_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* VF enable interrupt autoclean */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Save IXGBE_VTEIMS value to mask. */
+ intr->mask = IXGBE_VF_IRQ_ENABLE_MASK;
+}
+
+static int
+ixgbevf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+ dev->data->port_id);
+
+ /*
+ * VF has no ability to enable/disable HW CRC
+ * Keep the persistent behavior the same as Host PF
+ */
+#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
+ if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
+ PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ }
+#else
+ if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
+ PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
+ }
+#endif
+
+ /*
+ * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation or vector Rx preconditions we will reset it.
+ */
+ adapter->rx_bulk_alloc_allowed = true;
+ adapter->rx_vec_allowed = true;
+
+ return 0;
+}
+
+static int
+ixgbevf_dev_start(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t intr_vector = 0;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ int err, mask = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
+ return err;
+ }
+ hw->mac.get_link_status = true;
+
+ /* negotiate mailbox API version to use with the PF. */
+ ixgbevf_negotiate_api(hw);
+
+ ixgbevf_dev_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ err = ixgbevf_dev_rx_init(dev);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
+ ixgbe_dev_clear_queues(dev);
+ return err;
+ }
+
+ /* Set vfta */
+ ixgbevf_set_vfta_all(dev, 1);
+
+ /* Set HW strip */
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ err = ixgbevf_vlan_offload_config(dev, mask);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
+ ixgbe_dev_clear_queues(dev);
+ return err;
+ }
+
+ ixgbevf_dev_rxtx_start(dev);
+
+ ixgbevf_dev_link_update(dev, 0);
+
+ /* check and configure queue intr-vector mapping */
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
+ /* According to datasheet, only vector 0/1/2 can be used,
+ * now only one vector is used for Rx queue
+ */
+ intr_vector = 1;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+ ixgbevf_configure_msix(dev);
+
+ /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
+ * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
+ * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
+ * is not cleared, it will fail when following rte_intr_enable( ) tries
+ * to map Rx queue interrupt to other VFIO vectors.
+ * So clear uio/vfio intr/evevnfd first to avoid failure.
+ */
+ rte_intr_disable(intr_handle);
+
+ rte_intr_enable(intr_handle);
+
+ /* Re-enable interrupt for VF */
+ ixgbevf_intr_enable(dev);
+
+ return 0;
+}
+
+static void
+ixgbevf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ixgbevf_intr_disable(dev);
+
+ hw->adapter_stopped = 1;
+ ixgbe_stop_adapter(hw);
+
+ /*
+ * Clear what we set, but we still keep shadow_vfta to
+ * restore after device starts
+ */
+ ixgbevf_set_vfta_all(dev, 0);
+
+ /* Clear stored conf */
+ dev->data->scattered_rx = 0;
+
+ ixgbe_dev_clear_queues(dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static void
+ixgbevf_dev_close(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ ixgbe_reset_hw(hw);
+
+ ixgbevf_dev_stop(dev);
+
+ ixgbe_dev_free_queues(dev);
+
+ /**
+ * Remove the VF MAC address ro ensure
+ * that the VF traffic goes to the PF
+ * after stop, close and detach of the VF
+ **/
+ ixgbevf_remove_mac_addr(dev, 0);
+}
+
+/*
+ * Reset VF device
+ */
+static int
+ixgbevf_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = eth_ixgbevf_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_ixgbevf_dev_init(dev);
+
+ return ret;
+}
+
+static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vfta *shadow_vfta =
+ IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ int i = 0, j = 0, vfta = 0, mask = 1;
+
+ for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
+ vfta = shadow_vfta->vfta[i];
+ if (vfta) {
+ mask = 1;
+ for (j = 0; j < 32; j++) {
+ if (vfta & mask)
+ ixgbe_set_vfta(hw, (i<<5)+j, 0,
+ on, false);
+ mask <<= 1;
+ }
+ }
+ }
+
+}
+
+static int
+ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vfta *shadow_vfta =
+ IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vid_idx = 0;
+ uint32_t vid_bit = 0;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
+ ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to set VF vlan");
+ return ret;
+ }
+ vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
+ vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
+
+ /* Save what we set and retore it after device reset */
+ if (on)
+ shadow_vfta->vfta[vid_idx] |= vid_bit;
+ else
+ shadow_vfta->vfta[vid_idx] &= ~vid_bit;
+
+ return 0;
+}
+
+static void
+ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue >= hw->mac.max_rx_queues)
+ return;
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ if (on)
+ ctrl |= IXGBE_RXDCTL_VME;
+ else
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
+ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
+}
+
+static int
+ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct ixgbe_rx_queue *rxq;
+ uint16_t i;
+ int on = 0;
+
+ /* VF function only support hw strip feature, others are not support */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ ixgbevf_vlan_strip_queue_set(dev, i, on);
+ }
+ }
+
+ return 0;
+}
+
+static int
+ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ ixgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ ixgbevf_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
+int
+ixgbe_vt_check(struct ixgbe_hw *hw)
+{
+ uint32_t reg_val;
+
+ /* if Virtualization Technology is enabled */
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
+ PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
+ return -1;
+ }
+
+ return 0;
+}
+
+static uint32_t
+ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
+{
+ uint32_t vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 4) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 3) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 2) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((uc_addr->addr_bytes[4]) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+static int
+ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint8_t on)
+{
+ uint32_t vector;
+ uint32_t uta_idx;
+ uint32_t reg_val;
+ uint32_t uta_shift;
+ uint32_t rc;
+ const uint32_t ixgbe_uta_idx_mask = 0x7F;
+ const uint32_t ixgbe_uta_bit_shift = 5;
+ const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
+ const uint32_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_uta_info *uta_info =
+ IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+
+ /* The UTA table only exists on 82599 hardware and newer */
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return -ENOTSUP;
+
+ vector = ixgbe_uta_vector(hw, mac_addr);
+ uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
+ uta_shift = vector & ixgbe_uta_bit_mask;
+
+ rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
+ if (rc == on)
+ return 0;
+
+ reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
+ if (on) {
+ uta_info->uta_in_use++;
+ reg_val |= (bit1 << uta_shift);
+ uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
+ } else {
+ uta_info->uta_in_use--;
+ reg_val &= ~(bit1 << uta_shift);
+ uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
+
+ if (uta_info->uta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ return 0;
+}
+
+static int
+ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
+{
+ int i;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_uta_info *uta_info =
+ IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+
+ /* The UTA table only exists on 82599 hardware and newer */
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return -ENOTSUP;
+
+ if (on) {
+ for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = ~0;
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
+ }
+ } else {
+ for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+ }
+ }
+ return 0;
+
+}
+
+uint32_t
+ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+ uint32_t new_val = orig_val;
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+ new_val |= IXGBE_VMOLR_AUPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+ new_val |= IXGBE_VMOLR_ROMPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ new_val |= IXGBE_VMOLR_ROPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ new_val |= IXGBE_VMOLR_BAM;
+ if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ new_val |= IXGBE_VMOLR_MPE;
+
+ return new_val;
+}
+
+#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
+#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
+#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
+#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */
+#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
+ ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
+ ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
+
+static int
+ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on)
+{
+ uint32_t mr_ctl, vlvf;
+ uint32_t mp_lsb = 0;
+ uint32_t mv_msb = 0;
+ uint32_t mv_lsb = 0;
+ uint32_t mp_msb = 0;
+ uint8_t i = 0;
+ int reg_index = 0;
+ uint64_t vlan_mask = 0;
+
+ const uint8_t pool_mask_offset = 32;
+ const uint8_t vlan_mask_offset = 32;
+ const uint8_t dst_pool_offset = 8;
+ const uint8_t rule_mr_offset = 4;
+ const uint8_t mirror_rule_mask = 0x0F;
+
+ struct ixgbe_mirror_info *mr_info =
+ (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t mirror_type = 0;
+
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ if (rule_id >= IXGBE_MAX_MIRROR_RULES)
+ return -EINVAL;
+
+ if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
+ PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
+ mirror_conf->rule_type);
+ return -EINVAL;
+ }
+
+ if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+ mirror_type |= IXGBE_MRCTL_VLME;
+ /* Check if vlan id is valid and find conresponding VLAN ID
+ * index in VLVF
+ */
+ for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
+ if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+ /* search vlan id related pool vlan filter
+ * index
+ */
+ reg_index = ixgbe_find_vlvf_slot(
+ hw,
+ mirror_conf->vlan.vlan_id[i],
+ false);
+ if (reg_index < 0)
+ return -EINVAL;
+ vlvf = IXGBE_READ_REG(hw,
+ IXGBE_VLVF(reg_index));
+ if ((vlvf & IXGBE_VLVF_VIEN) &&
+ ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
+ mirror_conf->vlan.vlan_id[i]))
+ vlan_mask |= (1ULL << reg_index);
+ else
+ return -EINVAL;
+ }
+ }
+
+ if (on) {
+ mv_lsb = vlan_mask & 0xFFFFFFFF;
+ mv_msb = vlan_mask >> vlan_mask_offset;
+
+ mr_info->mr_conf[rule_id].vlan.vlan_mask =
+ mirror_conf->vlan.vlan_mask;
+ for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+ if (mirror_conf->vlan.vlan_mask & (1ULL << i))
+ mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
+ mirror_conf->vlan.vlan_id[i];
+ }
+ } else {
+ mv_lsb = 0;
+ mv_msb = 0;
+ mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
+ for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+ mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
+ }
+ }
+
+ /**
+ * if enable pool mirror, write related pool mask register,if disable
+ * pool mirror, clear PFMRVM register
+ */
+ if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+ mirror_type |= IXGBE_MRCTL_VPME;
+ if (on) {
+ mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
+ mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
+ mr_info->mr_conf[rule_id].pool_mask =
+ mirror_conf->pool_mask;
+
+ } else {
+ mp_lsb = 0;
+ mp_msb = 0;
+ mr_info->mr_conf[rule_id].pool_mask = 0;
+ }
+ }
+ if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
+ mirror_type |= IXGBE_MRCTL_UPME;
+ if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
+ mirror_type |= IXGBE_MRCTL_DPME;
+
+ /* read mirror control register and recalculate it */
+ mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
+
+ if (on) {
+ mr_ctl |= mirror_type;
+ mr_ctl &= mirror_rule_mask;
+ mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
+ } else {
+ mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
+ }
+
+ mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
+ mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
+
+ /* write mirrror control register */
+ IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+ /* write pool mirrror control register */
+ if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
+ mp_msb);
+ }
+ /* write VLAN mirrror control register */
+ if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
+ mv_msb);
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
+{
+ int mr_ctl = 0;
+ uint32_t lsb_val = 0;
+ uint32_t msb_val = 0;
+ const uint8_t rule_mr_offset = 4;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_mirror_info *mr_info =
+ (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ if (rule_id >= IXGBE_MAX_MIRROR_RULES)
+ return -EINVAL;
+
+ memset(&mr_info->mr_conf[rule_id], 0,
+ sizeof(struct rte_eth_mirror_conf));
+
+ /* clear PFVMCTL register */
+ IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+ /* clear pool mask register */
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
+
+ /* clear vlan mask register */
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
+
+ return 0;
+}
+
+static int
+ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vec = IXGBE_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = IXGBE_RX_VEC_START;
+ intr->mask |= (1 << vec);
+ RTE_SET_USED(queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
+
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static int
+ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t vec = IXGBE_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = IXGBE_RX_VEC_START;
+ intr->mask &= ~(1 << vec);
+ RTE_SET_USED(queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
+
+ return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (queue_id < 16) {
+ ixgbe_disable_intr(hw);
+ intr->mask |= (1 << queue_id);
+ ixgbe_enable_intr(dev);
+ } else if (queue_id < 32) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+ mask &= (1 << queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ } else if (queue_id < 64) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+ mask &= (1 << (queue_id - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ }
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (queue_id < 16) {
+ ixgbe_disable_intr(hw);
+ intr->mask &= ~(1 << queue_id);
+ ixgbe_enable_intr(dev);
+ } else if (queue_id < 32) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+ mask &= ~(1 << queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ } else if (queue_id < 64) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+ mask &= ~(1 << (queue_id - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ }
+
+ return 0;
+}
+
+static void
+ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp, idx;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ tmp &= ~0xFF;
+ tmp |= msix_vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
+ } else {
+ /* rx or tx cause */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ idx = ((16 * (queue & 1)) + (8 * direction));
+ tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
+ }
+}
+
+/**
+ * set the IVAR registers, mapping interrupt causes to vectors
+ * @param hw
+ * pointer to ixgbe_hw struct
+ * @direction
+ * 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue
+ * queue to map the corresponding interrupt to
+ * @msix_vector
+ * the vector to map to the corresponding queue
+ */
+static void
+ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp, idx;
+
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (direction == -1)
+ direction = 0;
+ idx = (((direction * 64) + queue) >> 2) & 0x1F;
+ tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
+ tmp &= ~(0xFF << (8 * (queue & 0x3)));
+ tmp |= (msix_vector << (8 * (queue & 0x3)));
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
+ } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
+ (hw->mac.type == ixgbe_mac_X540) ||
+ (hw->mac.type == ixgbe_mac_X550)) {
+ if (direction == -1) {
+ /* other causes */
+ idx = ((queue & 1) * 8);
+ tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
+ } else {
+ /* rx or tx causes */
+ idx = ((16 * (queue & 1)) + (8 * direction));
+ tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
+ }
+ }
+}
+
+static void
+ixgbevf_configure_msix(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t q_idx;
+ uint32_t vector_idx = IXGBE_MISC_VEC_ID;
+ uint32_t base = IXGBE_MISC_VEC_ID;
+
+ /* Configure VF other cause ivar */
+ ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd.
+ */
+ if (!rte_intr_dp_is_en(intr_handle))
+ return;
+
+ if (rte_intr_allow_others(intr_handle)) {
+ base = IXGBE_RX_VEC_START;
+ vector_idx = IXGBE_RX_VEC_START;
+ }
+
+ /* Configure all RX queues of VF */
+ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
+ /* Force all queue use vector 0,
+ * as IXGBE_VF_MAXMSIVECOTR = 1
+ */
+ ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
+ intr_handle->intr_vec[q_idx] = vector_idx;
+ if (vector_idx < base + intr_handle->nb_efd - 1)
+ vector_idx++;
+ }
+
+ /* As RX queue setting above show, all queues use the vector 0.
+ * Set only the ITR value of IXGBE_MISC_VEC_ID.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID),
+ IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+ | IXGBE_EITR_CNT_WDIS);
+}
+
+/**
+ * Sets up the hardware to properly generate MSI-X interrupts
+ * @hw
+ * board private structure
+ */
+static void
+ixgbe_configure_msix(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
+ uint32_t vec = IXGBE_MISC_VEC_ID;
+ uint32_t mask;
+ uint32_t gpie;
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd
+ * but if misx has been enabled already, need to configure
+ * auto clean, auto mask and throttling.
+ */
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ if (!rte_intr_dp_is_en(intr_handle) &&
+ !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
+ return;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = base = IXGBE_RX_VEC_START;
+
+ /* setup GPIE for MSI-x mode */
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+ IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
+ /* auto clearing and auto setting corresponding bits in EIMS
+ * when MSI-X interrupt is triggered
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+ queue_id++) {
+ /* by default, 1:1 mapping */
+ ixgbe_set_ivar_map(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_set_ivar_map(hw, -1,
+ IXGBE_IVAR_OTHER_CAUSES_INDEX,
+ IXGBE_MISC_VEC_ID);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
+ break;
+ default:
+ break;
+ }
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
+ IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+ | IXGBE_EITR_CNT_WDIS);
+
+ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+
+ IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
+}
+
+int
+ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t tx_rate)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_rxmode *rxmode;
+ uint32_t rf_dec, rf_int;
+ uint32_t bcnrc_val;
+ uint16_t link_speed = dev->data->dev_link.link_speed;
+
+ if (queue_idx >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (tx_rate != 0) {
+ /* Calculate the rate factor values to set */
+ rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
+ rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
+ rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+ bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+ bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+ IXGBE_RTTBCNRC_RF_INT_MASK_M);
+ bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ rxmode = &dev->data->dev_conf.rxmode;
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
+ * set as 0x4.
+ */
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+ (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+ IXGBE_MMW_SIZE_JUMBO_FRAME);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+ IXGBE_MMW_SIZE_DEFAULT);
+
+ /* Set RTTBCNRC of queue X */
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ __attribute__((unused)) uint32_t index,
+ __attribute__((unused)) uint32_t pool)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int diag;
+
+ /*
+ * On a 82599 VF, adding again the same MAC addr is not an idempotent
+ * operation. Trap this case to avoid exhausting the [very limited]
+ * set of PF resources used to store VF MAC addresses.
+ */
+ if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+ return -1;
+ diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (diag != 0)
+ PMD_DRV_LOG(ERR, "Unable to add MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5],
+ diag);
+ return diag;
+}
+
+static void
+ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
+ struct ether_addr *mac_addr;
+ uint32_t i;
+ int diag;
+
+ /*
+ * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
+ * not support the deletion of a given MAC address.
+ * Instead, it imposes to delete all MAC addresses, then to add again
+ * all MAC addresses with the exception of the one to be deleted.
+ */
+ (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
+
+ /*
+ * Add again all MAC addresses, with the exception of the deleted one
+ * and of the permanent MAC address.
+ */
+ for (i = 0, mac_addr = dev->data->mac_addrs;
+ i < hw->mac.num_rar_entries; i++, mac_addr++) {
+ /* Skip the deleted MAC address */
+ if (i == index)
+ continue;
+ /* Skip NULL MAC addresses */
+ if (is_zero_ether_addr(mac_addr))
+ continue;
+ /* Skip the permanent MAC address */
+ if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+ continue;
+ diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (diag != 0)
+ PMD_DRV_LOG(ERR,
+ "Adding again MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x failed "
+ "diag=%d",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5],
+ diag);
+ }
+}
+
+static int
+ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
+
+ return 0;
+}
+
+int
+ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t syn_info;
+ uint32_t synqf;
+
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ syn_info = filter_info->syn_info;
+
+ if (add) {
+ if (syn_info & IXGBE_SYN_FILTER_ENABLE)
+ return -EINVAL;
+ synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
+ IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
+
+ if (filter->hig_pri)
+ synqf |= IXGBE_SYN_FILTER_SYNQFP;
+ else
+ synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
+ } else {
+ synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
+ return -ENOENT;
+ synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
+ }
+
+ filter_info->syn_info = synqf;
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ IXGBE_WRITE_FLUSH(hw);
+ return 0;
+}
+
+static int
+ixgbe_syn_filter_get(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+ if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+ filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
+ filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static int
+ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_syn_filter_set(dev,
+ (struct rte_eth_syn_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_syn_filter_set(dev,
+ (struct rte_eth_syn_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = ixgbe_syn_filter_get(dev,
+ (struct rte_eth_syn_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+
+static inline enum ixgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+ if (protocol_value == IPPROTO_TCP)
+ return IXGBE_FILTER_PROTOCOL_TCP;
+ else if (protocol_value == IPPROTO_UDP)
+ return IXGBE_FILTER_PROTOCOL_UDP;
+ else if (protocol_value == IPPROTO_SCTP)
+ return IXGBE_FILTER_PROTOCOL_SCTP;
+ else
+ return IXGBE_FILTER_PROTOCOL_NONE;
+}
+
+/* inject a 5-tuple filter to HW */
+static inline void
+ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+ uint32_t ftqf, sdpqf;
+ uint32_t l34timir = 0;
+ uint8_t mask = 0xff;
+
+ i = filter->index;
+
+ sdpqf = (uint32_t)(filter->filter_info.dst_port <<
+ IXGBE_SDPQF_DSTPORT_SHIFT);
+ sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
+
+ ftqf = (uint32_t)(filter->filter_info.proto &
+ IXGBE_FTQF_PROTOCOL_MASK);
+ ftqf |= (uint32_t)((filter->filter_info.priority &
+ IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
+ if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+ mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+ if (filter->filter_info.dst_ip_mask == 0)
+ mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+ if (filter->filter_info.src_port_mask == 0)
+ mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+ if (filter->filter_info.dst_port_mask == 0)
+ mask &= IXGBE_FTQF_DEST_PORT_MASK;
+ if (filter->filter_info.proto_mask == 0)
+ mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+ ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+ ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+ ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+
+ l34timir |= IXGBE_L34T_IMIR_RESERVE;
+ l34timir |= (uint32_t)(filter->queue <<
+ IXGBE_L34T_IMIR_QUEUE_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i, idx, shift;
+
+ /*
+ * look for an unused 5tuple filter index,
+ * and insert the filter to list.
+ */
+ for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
+ idx = i / (sizeof(uint32_t) * NBBY);
+ shift = i % (sizeof(uint32_t) * NBBY);
+ if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
+ filter_info->fivetuple_mask[idx] |= 1 << shift;
+ filter->index = i;
+ TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+ filter,
+ entries);
+ break;
+ }
+ }
+ if (i >= IXGBE_MAX_FTQF_FILTERS) {
+ PMD_DRV_LOG(ERR, "5tuple filters are full.");
+ return -ENOSYS;
+ }
+
+ ixgbe_inject_5tuple_filter(dev, filter);
+
+ return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+static void
+ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint16_t index = filter->index;
+
+ filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
+ ~(1 << (index % (sizeof(uint32_t) * NBBY)));
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+ rte_free(filter);
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
+}
+
+static int
+ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct ixgbe_hw *hw;
+ uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before.
+ */
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
+ (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+ return -EINVAL;
+
+ /*
+ * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
+ * request of the version 2.0 of the mailbox API.
+ * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
+ * of the mailbox API.
+ * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
+ * prior to 3.11.33 which contains the following change:
+ * "ixgbe: Enable jumbo frames support w/ SR-IOV"
+ */
+ ixgbevf_rlpml_set_vf(hw, max_frame);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+ return 0;
+}
+
+static inline struct ixgbe_5tuple_filter *
+ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
+ struct ixgbe_5tuple_filter_info *key)
+{
+ struct ixgbe_5tuple_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
+ return it;
+ }
+ }
+ return NULL;
+}
+
+/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
+static inline int
+ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+ struct ixgbe_5tuple_filter_info *filter_info)
+{
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+ filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+ filter->priority < IXGBE_5TUPLE_MIN_PRI)
+ return -EINVAL;
+
+ switch (filter->dst_ip_mask) {
+ case UINT32_MAX:
+ filter_info->dst_ip_mask = 0;
+ filter_info->dst_ip = filter->dst_ip;
+ break;
+ case 0:
+ filter_info->dst_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_ip_mask) {
+ case UINT32_MAX:
+ filter_info->src_ip_mask = 0;
+ filter_info->src_ip = filter->src_ip;
+ break;
+ case 0:
+ filter_info->src_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->dst_port_mask) {
+ case UINT16_MAX:
+ filter_info->dst_port_mask = 0;
+ filter_info->dst_port = filter->dst_port;
+ break;
+ case 0:
+ filter_info->dst_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_port_mask) {
+ case UINT16_MAX:
+ filter_info->src_port_mask = 0;
+ filter_info->src_port = filter->src_port;
+ break;
+ case 0:
+ filter_info->src_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->proto_mask) {
+ case UINT8_MAX:
+ filter_info->proto_mask = 0;
+ filter_info->proto =
+ convert_protocol_type(filter->proto);
+ break;
+ case 0:
+ filter_info->proto_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ filter_info->priority = (uint8_t)filter->priority;
+ return 0;
+}
+
+/*
+ * add or delete a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * add: if true, add filter, if false, remove filter
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter,
+ bool add)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter_info filter_5tuple;
+ struct ixgbe_5tuple_filter *filter;
+ int ret;
+
+ if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+ return -EINVAL;
+ }
+
+ memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter != NULL && add) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ return -EEXIST;
+ }
+ if (filter == NULL && !add) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ if (add) {
+ filter = rte_zmalloc("ixgbe_5tuple_filter",
+ sizeof(struct ixgbe_5tuple_filter), 0);
+ if (filter == NULL)
+ return -ENOMEM;
+ rte_memcpy(&filter->filter_info,
+ &filter_5tuple,
+ sizeof(struct ixgbe_5tuple_filter_info));
+ filter->queue = ntuple_filter->queue;
+ ret = ixgbe_add_5tuple_filter(dev, filter);
+ if (ret < 0) {
+ rte_free(filter);
+ return ret;
+ }
+ } else
+ ixgbe_remove_5tuple_filter(dev, filter);
+
+ return 0;
+}
+
+/*
+ * get a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter_info filter_5tuple;
+ struct ixgbe_5tuple_filter *filter;
+ int ret;
+
+ if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+ return -EINVAL;
+ }
+
+ memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+ ntuple_filter->queue = filter->queue;
+ return 0;
+}
+
+/*
+ * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_add_del_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_add_del_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = ixgbe_get_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+int
+ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t etqf = 0;
+ uint32_t etqs = 0;
+ int ret;
+ struct ixgbe_ethertype_filter ethertype_filter;
+
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+ " ethertype filter.", filter->ether_type);
+ return -EINVAL;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+ return -EINVAL;
+ }
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ PMD_DRV_LOG(ERR, "drop option is unsupported.");
+ return -EINVAL;
+ }
+
+ ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret >= 0 && add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+ filter->ether_type);
+ return -EEXIST;
+ }
+ if (ret < 0 && !add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ if (add) {
+ etqf = IXGBE_ETQF_FILTER_EN;
+ etqf |= (uint32_t)filter->ether_type;
+ etqs |= (uint32_t)((filter->queue <<
+ IXGBE_ETQS_RX_QUEUE_SHIFT) &
+ IXGBE_ETQS_RX_QUEUE);
+ etqs |= IXGBE_ETQS_QUEUE_EN;
+
+ ethertype_filter.ethertype = filter->ether_type;
+ ethertype_filter.etqf = etqf;
+ ethertype_filter.etqs = etqs;
+ ethertype_filter.conf = FALSE;
+ ret = ixgbe_ethertype_filter_insert(filter_info,
+ &ethertype_filter);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype filters are full.");
+ return -ENOSPC;
+ }
+ } else {
+ ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
+ if (ret < 0)
+ return -ENOSYS;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t etqf, etqs;
+ int ret;
+
+ ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
+ if (etqf & IXGBE_ETQF_FILTER_EN) {
+ etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
+ filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
+ filter->flags = 0;
+ filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
+ IXGBE_ETQS_RX_QUEUE_SHIFT;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+/*
+ * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = ixgbe_get_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_SYN:
+ ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &ixgbe_flow_ops;
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static u8 *
+ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
+ u8 **mc_addr_ptr, u32 *vmdq)
+{
+ u8 *mc_addr;
+
+ *vmdq = 0;
+ mc_addr = *mc_addr_ptr;
+ *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
+ return mc_addr;
+}
+
+static int
+ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct ixgbe_hw *hw;
+ u8 *mc_addr_list;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mc_addr_list = (u8 *)mc_addr_set;
+ return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
+ ixgbe_dev_addr_list_itr, TRUE);
+}
+
+static uint64_t
+ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t systime_cycles;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
+ systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
+ * NSEC_PER_SEC;
+ break;
+ default:
+ systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
+ << 32;
+ }
+
+ return systime_cycles;
+}
+
+static uint64_t
+ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rx_tstamp_cycles;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+ rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
+ rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
+ * NSEC_PER_SEC;
+ break;
+ default:
+ /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+ rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
+ rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
+ << 32;
+ }
+
+ return rx_tstamp_cycles;
+}
+
+static uint64_t
+ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t tx_tstamp_cycles;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ /* TXSTMPL stores ns and TXSTMPH stores seconds. */
+ tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
+ tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
+ * NSEC_PER_SEC;
+ break;
+ default:
+ /* TXSTMPL stores ns and TXSTMPH stores seconds. */
+ tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
+ tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
+ << 32;
+ }
+
+ return tx_tstamp_cycles;
+}
+
+static void
+ixgbe_start_timecounters(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ struct rte_eth_link link;
+ uint32_t incval = 0;
+ uint32_t shift = 0;
+
+ /* Get current link speed. */
+ ixgbe_dev_link_update(dev, 1);
+ rte_eth_linkstatus_get(dev, &link);
+
+ switch (link.link_speed) {
+ case ETH_SPEED_NUM_100M:
+ incval = IXGBE_INCVAL_100;
+ shift = IXGBE_INCVAL_SHIFT_100;
+ break;
+ case ETH_SPEED_NUM_1G:
+ incval = IXGBE_INCVAL_1GB;
+ shift = IXGBE_INCVAL_SHIFT_1GB;
+ break;
+ case ETH_SPEED_NUM_10G:
+ default:
+ incval = IXGBE_INCVAL_10GB;
+ shift = IXGBE_INCVAL_SHIFT_10GB;
+ break;
+ }
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ /* Independent of link speed. */
+ incval = 1;
+ /* Cycles read will be interpreted as ns. */
+ shift = 0;
+ /* Fall-through */
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
+ break;
+ case ixgbe_mac_82599EB:
+ incval >>= IXGBE_INCVAL_SHIFT_82599;
+ shift -= IXGBE_INCVAL_SHIFT_82599;
+ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
+ (1 << IXGBE_INCPER_SHIFT_82599) | incval);
+ break;
+ default:
+ /* Not supported. */
+ return;
+ }
+
+ memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+ adapter->systime_tc.cc_shift = shift;
+ adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+ adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+ adapter->rx_tstamp_tc.cc_shift = shift;
+ adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+ adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+ adapter->tx_tstamp_tc.cc_shift = shift;
+ adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+}
+
+static int
+ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ adapter->systime_tc.nsec += delta;
+ adapter->rx_tstamp_tc.nsec += delta;
+ adapter->tx_tstamp_tc.nsec += delta;
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ uint64_t ns;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ ns = rte_timespec_to_ns(ts);
+ /* Set the timecounters to a new value. */
+ adapter->systime_tc.nsec = ns;
+ adapter->rx_tstamp_tc.nsec = ns;
+ adapter->tx_tstamp_tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ uint64_t ns, systime_cycles;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ systime_cycles = ixgbe_read_systime_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl;
+ uint32_t tsauxc;
+
+ /* Stop the timesync system time. */
+ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
+ /* Reset the timesync system time value. */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
+
+ /* Enable system time for platforms where it isn't on by default. */
+ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+ tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
+
+ ixgbe_start_timecounters(dev);
+
+ /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
+ (ETHER_TYPE_1588 |
+ IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_1588));
+
+ /* Enable timestamping of received PTP packets. */
+ tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
+
+ /* Enable timestamping of transmitted PTP packets. */
+ tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl;
+
+ /* Disable timestamping of transmitted PTP packets. */
+ tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
+
+ /* Disable timestamping of received PTP packets. */
+ tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
+
+ /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
+
+ /* Stop incrementating the System Time registers. */
+ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ uint32_t tsync_rxctl;
+ uint64_t rx_tstamp_cycles;
+ uint64_t ns;
+
+ tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
+ return -EINVAL;
+
+ rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ uint32_t tsync_txctl;
+ uint64_t tx_tstamp_cycles;
+ uint64_t ns;
+
+ tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
+ return -EINVAL;
+
+ tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ixgbe_get_reg_length(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+ const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
+ ixgbe_regs_mac_82598EB : ixgbe_regs_others;
+
+ while ((reg_group = reg_set[g_ind++]))
+ count += ixgbe_regs_group_count(reg_group);
+
+ return count;
+}
+
+static int
+ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+
+ while ((reg_group = ixgbevf_regs[g_ind++]))
+ count += ixgbe_regs_group_count(reg_group);
+
+ return count;
+}
+
+static int
+ixgbe_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+ const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
+ ixgbe_regs_mac_82598EB : ixgbe_regs_others;
+
+ if (data == NULL) {
+ regs->length = ixgbe_get_reg_length(dev);
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = reg_set[g_ind++]))
+ count += ixgbe_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+ixgbevf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+
+ if (data == NULL) {
+ regs->length = ixgbevf_get_reg_length(dev);
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = ixgbevf_regs[g_ind++]))
+ count += ixgbe_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Return unit is byte count */
+ return hw->eeprom.word_size * 2;
+}
+
+static int
+ixgbe_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first > hw->eeprom.word_size) ||
+ ((first + length) > hw->eeprom.word_size))
+ return -EINVAL;
+
+ in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ return eeprom->ops.read_buffer(hw, first, length, data);
+}
+
+static int
+ixgbe_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first > hw->eeprom.word_size) ||
+ ((first + length) > hw->eeprom.word_size))
+ return -EINVAL;
+
+ in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ return eeprom->ops.write_buffer(hw, first, length, data);
+}
+
+static int
+ixgbe_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t status;
+ uint8_t sff8472_rev, addr_mode;
+ bool page_swap = false;
+
+ /* Check whether we support SFF-8472 or not */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_COMP,
+ &sff8472_rev);
+ if (status != 0)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_SWAP,
+ &addr_mode);
+ if (status != 0)
+ return -EIO;
+
+ if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
+ PMD_DRV_LOG(ERR,
+ "Address change required to access page 0xA2, "
+ "but not supported. Please report the module "
+ "type to the driver maintainers.");
+ page_swap = true;
+ }
+
+ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ /* We have a SFP, but it does not support SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have a SFP which supports a revision of SFF-8472. */
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID;
+ uint8_t databyte = 0xFF;
+ uint8_t *data = info->data;
+ uint32_t i = 0;
+
+ if (info->length == 0)
+ return -EINVAL;
+
+ for (i = info->offset; i < info->offset + info->length; i++) {
+ if (i < RTE_ETH_MODULE_SFF_8079_LEN)
+ status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+ else
+ status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
+
+ if (status != 0)
+ return -EIO;
+
+ data[i - info->offset] = databyte;
+ }
+
+ return 0;
+}
+
+uint16_t
+ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ return ETH_RSS_RETA_SIZE_512;
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return ETH_RSS_RETA_SIZE_64;
+ default:
+ return ETH_RSS_RETA_SIZE_128;
+ }
+}
+
+uint32_t
+ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
+ switch (mac_type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ if (reta_idx < ETH_RSS_RETA_SIZE_128)
+ return IXGBE_RETA(reta_idx >> 2);
+ else
+ return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return IXGBE_VFRETA(reta_idx >> 2);
+ default:
+ return IXGBE_RETA(reta_idx >> 2);
+ }
+}
+
+uint32_t
+ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return IXGBE_VFMRQC;
+ default:
+ return IXGBE_MRQC;
+ }
+}
+
+uint32_t
+ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
+ switch (mac_type) {
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return IXGBE_VFRSSRK(i);
+ default:
+ return IXGBE_RSSRK(i);
+ }
+}
+
+bool
+ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+static int
+ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct ixgbe_dcb_config *dcb_config =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ struct ixgbe_dcb_tc_config *tc;
+ struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+ uint8_t nb_tcs;
+ uint8_t i, j;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+ else
+ dcb_info->nb_tcs = 1;
+
+ tc_queue = &dcb_info->tc_queue;
+ nb_tcs = dcb_info->nb_tcs;
+
+ if (dcb_config->vt_mode) { /* vt is enabled*/
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+ if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+ for (j = 0; j < nb_tcs; j++) {
+ tc_queue->tc_rxq[0][j].base = j;
+ tc_queue->tc_rxq[0][j].nb_queue = 1;
+ tc_queue->tc_txq[0][j].base = j;
+ tc_queue->tc_txq[0][j].nb_queue = 1;
+ }
+ } else {
+ for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+ for (j = 0; j < nb_tcs; j++) {
+ tc_queue->tc_rxq[i][j].base =
+ i * nb_tcs + j;
+ tc_queue->tc_rxq[i][j].nb_queue = 1;
+ tc_queue->tc_txq[i][j].base =
+ i * nb_tcs + j;
+ tc_queue->tc_txq[i][j].nb_queue = 1;
+ }
+ }
+ }
+ } else { /* vt is disabled*/
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+ if (dcb_info->nb_tcs == ETH_4_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 64;
+ dcb_info->tc_queue.tc_txq[0][2].base = 96;
+ dcb_info->tc_queue.tc_txq[0][3].base = 112;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 32;
+ dcb_info->tc_queue.tc_txq[0][2].base = 64;
+ dcb_info->tc_queue.tc_txq[0][3].base = 80;
+ dcb_info->tc_queue.tc_txq[0][4].base = 96;
+ dcb_info->tc_queue.tc_txq[0][5].base = 104;
+ dcb_info->tc_queue.tc_txq[0][6].base = 112;
+ dcb_info->tc_queue.tc_txq[0][7].base = 120;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+ }
+ }
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
+ }
+ return 0;
+}
+
+/* Update e-tag ether type */
+static int
+ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
+ uint16_t ether_type)
+{
+ uint32_t etag_etype;
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
+ etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
+ etag_etype |= ether_type;
+ IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/* Config l2 tunnel ether type */
+static int
+ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+
+ if (l2_tunnel == NULL)
+ return -EINVAL;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
+ ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Enable e-tag tunnel */
+static int
+ixgbe_e_tag_enable(struct ixgbe_hw *hw)
+{
+ uint32_t etag_etype;
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
+ etag_etype |= IXGBE_ETAG_ETYPE_VALID;
+ IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/* Enable l2 tunnel */
+static int
+ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = TRUE;
+ ret = ixgbe_e_tag_enable(hw);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable e-tag tunnel */
+static int
+ixgbe_e_tag_disable(struct ixgbe_hw *hw)
+{
+ uint32_t etag_etype;
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
+ etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
+ IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/* Disable l2 tunnel */
+static int
+ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = FALSE;
+ ret = ixgbe_e_tag_disable(hw);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t i, rar_entries;
+ uint32_t rar_low, rar_high;
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ rar_entries = ixgbe_get_num_rx_addrs(hw);
+
+ for (i = 1; i < rar_entries; i++) {
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i));
+ if ((rar_high & IXGBE_RAH_AV) &&
+ (rar_high & IXGBE_RAH_ADTYPE) &&
+ ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
+ l2_tunnel->tunnel_id)) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+
+ ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
+
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t i, rar_entries;
+ uint32_t rar_low, rar_high;
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ /* One entry for one tunnel. Try to remove potential existing entry. */
+ ixgbe_e_tag_filter_del(dev, l2_tunnel);
+
+ rar_entries = ixgbe_get_num_rx_addrs(hw);
+
+ for (i = 1; i < rar_entries; i++) {
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+ if (rar_high & IXGBE_RAH_AV) {
+ continue;
+ } else {
+ ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
+ rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
+ rar_low = l2_tunnel->tunnel_id;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
+
+ return ret;
+ }
+ }
+
+ PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
+ " Please remove a rule before adding a new one.");
+ return -EINVAL;
+}
+
+static inline struct ixgbe_l2_tn_filter *
+ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_key *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_filter *l2_tn_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(l2_tn_info->hash_handle,
+ &l2_tn_filter->key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert L2 tunnel filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+ TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+ return 0;
+}
+
+static inline int
+ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_key *key)
+{
+ int ret;
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+
+ ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "No such L2 tunnel filter to delete %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_filter = l2_tn_info->hash_map[ret];
+ l2_tn_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+ rte_free(l2_tn_filter);
+
+ return 0;
+}
+
+/* Add l2 tunnel filter */
+int
+ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore)
+{
+ int ret;
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_key key;
+ struct ixgbe_l2_tn_filter *node;
+
+ if (!restore) {
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+
+ node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+
+ if (node) {
+ PMD_DRV_LOG(ERR,
+ "The L2 tunnel filter already exists!");
+ return -EINVAL;
+ }
+
+ node = rte_zmalloc("ixgbe_l2_tn",
+ sizeof(struct ixgbe_l2_tn_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+
+ rte_memcpy(&node->key,
+ &key,
+ sizeof(struct ixgbe_l2_tn_key));
+ node->pool = l2_tunnel->pool;
+ ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
+ if (ret < 0) {
+ rte_free(node);
+ return ret;
+ }
+ }
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ if ((!restore) && (ret < 0))
+ (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+
+ return ret;
+}
+
+/* Delete l2 tunnel filter */
+int
+ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret;
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_key key;
+
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+ ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+ if (ret < 0)
+ return ret;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_dev_l2_tunnel_filter_add
+ (dev,
+ (struct rte_eth_l2_tunnel_conf *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_dev_l2_tunnel_filter_del
+ (dev,
+ (struct rte_eth_l2_tunnel_conf *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
+{
+ int ret = 0;
+ uint32_t ctrl;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
+ if (en)
+ ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
+
+ return ret;
+}
+
+/* Enable l2 tunnel forwarding */
+static int
+ixgbe_dev_l2_tunnel_forwarding_enable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = TRUE;
+ ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable l2 tunnel forwarding */
+static int
+ixgbe_dev_l2_tunnel_forwarding_disable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = FALSE;
+ ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool en)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ int ret = 0;
+ uint32_t vmtir, vmvir;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
+ PMD_DRV_LOG(ERR,
+ "VF id %u should be less than %u",
+ l2_tunnel->vf_id,
+ pci_dev->max_vfs);
+ return -EINVAL;
+ }
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ if (en)
+ vmtir = l2_tunnel->tunnel_id;
+ else
+ vmtir = 0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
+
+ vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
+ vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
+ if (en)
+ vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
+
+ return ret;
+}
+
+/* Enable l2 tunnel tag insertion */
+static int
+ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable l2 tunnel tag insertion */
+static int
+ixgbe_dev_l2_tunnel_insertion_disable
+ (struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
+ bool en)
+{
+ int ret = 0;
+ uint32_t qde;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ qde = IXGBE_READ_REG(hw, IXGBE_QDE);
+ if (en)
+ qde |= IXGBE_QDE_STRIP_TAG;
+ else
+ qde &= ~IXGBE_QDE_STRIP_TAG;
+ qde &= ~IXGBE_QDE_READ;
+ qde |= IXGBE_QDE_WRITE;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
+
+ return ret;
+}
+
+/* Enable l2 tunnel tag stripping */
+static int
+ixgbe_dev_l2_tunnel_stripping_enable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable l2 tunnel tag stripping */
+static int
+ixgbe_dev_l2_tunnel_stripping_disable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Enable/disable l2 tunnel offload functions */
+static int
+ixgbe_dev_l2_tunnel_offload_set
+ (struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ uint32_t mask,
+ uint8_t en)
+{
+ int ret = 0;
+
+ if (l2_tunnel == NULL)
+ return -EINVAL;
+
+ ret = -EINVAL;
+ if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
+ if (en)
+ ret = ixgbe_dev_l2_tunnel_enable(
+ dev,
+ l2_tunnel->l2_tunnel_type);
+ else
+ ret = ixgbe_dev_l2_tunnel_disable(
+ dev,
+ l2_tunnel->l2_tunnel_type);
+ }
+
+ if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
+ if (en)
+ ret = ixgbe_dev_l2_tunnel_insertion_enable(
+ dev,
+ l2_tunnel);
+ else
+ ret = ixgbe_dev_l2_tunnel_insertion_disable(
+ dev,
+ l2_tunnel);
+ }
+
+ if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
+ if (en)
+ ret = ixgbe_dev_l2_tunnel_stripping_enable(
+ dev,
+ l2_tunnel->l2_tunnel_type);
+ else
+ ret = ixgbe_dev_l2_tunnel_stripping_disable(
+ dev,
+ l2_tunnel->l2_tunnel_type);
+ }
+
+ if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
+ if (en)
+ ret = ixgbe_dev_l2_tunnel_forwarding_enable(
+ dev,
+ l2_tunnel->l2_tunnel_type);
+ else
+ ret = ixgbe_dev_l2_tunnel_forwarding_disable(
+ dev,
+ l2_tunnel->l2_tunnel_type);
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
+ uint16_t port)
+{
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/* There's only one register for VxLAN UDP port.
+ * So, we cannot add several ports. Will update it.
+ */
+static int
+ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
+ uint16_t port)
+{
+ if (port == 0) {
+ PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+ return -EINVAL;
+ }
+
+ return ixgbe_update_vxlan_port(hw, port);
+}
+
+/* We cannot delete the VxLAN port. For there's a register for VxLAN
+ * UDP port, it must have a value.
+ * So, will reset it to the original value 0.
+ */
+static int
+ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
+ uint16_t port)
+{
+ uint16_t cur_port;
+
+ cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
+
+ if (cur_port != port) {
+ PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
+ return -EINVAL;
+ }
+
+ return ixgbe_update_vxlan_port(hw, 0);
+}
+
+/* Add UDP tunneling port */
+static int
+ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
+ break;
+
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+ ret = -EINVAL;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+ ret = -EINVAL;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static void
+ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
+}
+
+static void
+ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
+}
+
+static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 in_msg = 0;
+
+ /* peek the message first */
+ in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
+
+ /* PF reset VF event */
+ if (in_msg == IXGBE_PF_CONTROL_MSG) {
+ /* dummy mbx read to ack pf */
+ if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
+ return;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ }
+}
+
+static int
+ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t eicr;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ ixgbevf_intr_disable(dev);
+
+ /* read-on-clear nic registers here */
+ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
+ intr->flags = 0;
+
+ /* only one misc vector supported - mailbox */
+ eicr &= IXGBE_VTEICR_MASK;
+ if (eicr == IXGBE_MISC_VEC_ID)
+ intr->flags |= IXGBE_FLAG_MAILBOX;
+
+ return 0;
+}
+
+static int
+ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (intr->flags & IXGBE_FLAG_MAILBOX) {
+ ixgbevf_mbx_process(dev);
+ intr->flags &= ~IXGBE_FLAG_MAILBOX;
+ }
+
+ ixgbevf_intr_enable(dev);
+
+ return 0;
+}
+
+static void
+ixgbevf_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ ixgbevf_dev_interrupt_get_status(dev);
+ ixgbevf_dev_interrupt_action(dev);
+}
+
+/**
+ * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the transmit data path and waits for the HW to internally empty
+ * the Tx security block
+ **/
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECTX_POLL 40
+
+ int i;
+ int sectxreg;
+
+ sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+ for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
+ sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+ if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
+ break;
+ /* Use interrupt-safe sleep just in case */
+ usec_delay(1000);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECTX_POLL)
+ PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+ "path fully disabled. Continuing with init.");
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the transmit data path.
+ **/
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+ uint32_t sectxreg;
+
+ sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/* restore n-tuple filter */
+static inline void
+ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter *node;
+
+ TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
+ ixgbe_inject_5tuple_filter(dev, node);
+ }
+}
+
+/* restore ethernet type filter */
+static inline void
+ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
+ filter_info->ethertype_filters[i].etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
+ filter_info->ethertype_filters[i].etqs);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* restore SYN filter */
+static inline void
+ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t synqf;
+
+ synqf = filter_info->syn_info;
+
+ if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/* restore L2 tunnel filter */
+static inline void
+ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *node;
+ struct rte_eth_l2_tunnel_conf l2_tn_conf;
+
+ TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
+ l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = node->key.tn_id;
+ l2_tn_conf.pool = node->pool;
+ (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
+ }
+}
+
+/* restore rss filter */
+static inline void
+ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->rss_info.conf.queue_num)
+ ixgbe_config_rss_filter(dev,
+ &filter_info->rss_info, TRUE);
+}
+
+static int
+ixgbe_filter_restore(struct rte_eth_dev *dev)
+{
+ ixgbe_ntuple_filter_restore(dev);
+ ixgbe_ethertype_filter_restore(dev);
+ ixgbe_syn_filter_restore(dev);
+ ixgbe_fdir_filter_restore(dev);
+ ixgbe_l2_tn_filter_restore(dev);
+ ixgbe_rss_filter_restore(dev);
+
+ return 0;
+}
+
+static void
+ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (l2_tn_info->e_tag_en)
+ (void)ixgbe_e_tag_enable(hw);
+
+ if (l2_tn_info->e_tag_fwd_en)
+ (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
+
+ (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
+}
+
+/* remove all the n-tuple filters */
+void
+ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ ixgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* remove all the ether type filters */
+void
+ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i) &&
+ !filter_info->ethertype_filters[i].conf) {
+ (void)ixgbe_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* remove the SYN filter */
+void
+ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
+ filter_info->syn_info = 0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/* remove all the L2 tunnel filters */
+int
+ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+ struct rte_eth_l2_tunnel_conf l2_tn_conf;
+ int ret = 0;
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
+ l2_tn_conf.pool = l2_tn_filter->pool;
+ ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
+
+RTE_INIT(ixgbe_init_log)
+{
+ ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init");
+ if (ixgbe_logtype_init >= 0)
+ rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE);
+ ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver");
+ if (ixgbe_logtype_driver >= 0)
+ rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h
new file mode 100644
index 00000000..d0b93968
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -0,0 +1,782 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#ifndef _IXGBE_ETHDEV_H_
+#define _IXGBE_ETHDEV_H_
+
+#include <stdint.h>
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_dcb.h"
+#include "base/ixgbe_dcb_82599.h"
+#include "base/ixgbe_dcb_82598.h"
+#include "ixgbe_bypass.h"
+#ifdef RTE_LIBRTE_SECURITY
+#include "ixgbe_ipsec.h"
+#endif
+#include <rte_flow.h>
+#include <rte_time.h>
+#include <rte_hash.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_tm_driver.h>
+
+/* need update link, bit flag */
+#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
+#define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1)
+#define IXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2)
+#define IXGBE_FLAG_MACSEC (uint32_t)(1 << 3)
+#define IXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4)
+
+/*
+ * Defines that were not part of ixgbe_type.h as they are not used by the
+ * FreeBSD driver.
+ */
+#define IXGBE_ADVTXD_MAC_1588 0x00080000 /* IEEE1588 Timestamp packet */
+#define IXGBE_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */
+#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE, resvd */
+#define IXGBE_RXDADV_ERR_CKSUM_BIT 30
+#define IXGBE_RXDADV_ERR_CKSUM_MSK 3
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */
+#define IXGBE_NB_STAT_MAPPING_REGS 32
+#define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */
+#define IXGBE_VFTA_SIZE 128
+#define IXGBE_VLAN_TAG_SIZE 4
+#define IXGBE_HKEY_MAX_INDEX 10
+#define IXGBE_MAX_RX_QUEUE_NUM 128
+#define IXGBE_MAX_INTR_QUEUE_NUM 15
+#define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
+#define IXGBE_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
+#define IXGBE_NONE_MODE_TX_NB_QUEUES 64
+
+#ifndef NBBY
+#define NBBY 8 /* number of bits in a byte */
+#endif
+#define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
+
+/* EITR Interval is in 2048ns uinits for 1G and 10G link */
+#define IXGBE_EITR_INTERVAL_UNIT_NS 2048
+#define IXGBE_EITR_ITR_INT_SHIFT 3
+#define IXGBE_EITR_INTERVAL_US(us) \
+ (((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \
+ IXGBE_EITR_ITR_INT_MASK)
+
+#define IXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */
+
+/* Loopback operation modes */
+/* 82599 specific loopback operation types */
+#define IXGBE_LPBK_82599_NONE 0x0 /* Default value. Loopback is disabled. */
+#define IXGBE_LPBK_82599_TX_RX 0x1 /* Tx->Rx loopback operation is enabled. */
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 0x2600 /* Maximum Jumbo frame size. */
+
+#define IXGBE_RTTBCNRC_RF_INT_MASK_BASE 0x000003FF
+#define IXGBE_RTTBCNRC_RF_INT_MASK_M \
+ (IXGBE_RTTBCNRC_RF_INT_MASK_BASE << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+
+#define IXGBE_MAX_QUEUE_NUM_PER_VF 8
+
+#define IXGBE_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */
+#define IXGBE_SYN_FILTER_QUEUE 0x000000FE /* syn filter queue field */
+#define IXGBE_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field shift */
+#define IXGBE_SYN_FILTER_SYNQFP 0x80000000 /* syn filter SYNQFP */
+
+#define IXGBE_ETQF_UP 0x00070000 /* ethertype filter priority field */
+#define IXGBE_ETQF_SHIFT 16
+#define IXGBE_ETQF_UP_EN 0x00080000
+#define IXGBE_ETQF_ETHERTYPE 0x0000FFFF /* ethertype filter ethertype field */
+#define IXGBE_ETQF_MAX_PRI 7
+
+#define IXGBE_SDPQF_DSTPORT 0xFFFF0000 /* dst port field */
+#define IXGBE_SDPQF_DSTPORT_SHIFT 16 /* dst port field shift */
+#define IXGBE_SDPQF_SRCPORT 0x0000FFFF /* src port field */
+
+#define IXGBE_L34T_IMIR_SIZE_BP 0x00001000
+#define IXGBE_L34T_IMIR_RESERVE 0x00080000 /* bit 13 to 19 must be set to 1000000b. */
+#define IXGBE_L34T_IMIR_LLI 0x00100000
+#define IXGBE_L34T_IMIR_QUEUE 0x0FE00000
+#define IXGBE_L34T_IMIR_QUEUE_SHIFT 21
+#define IXGBE_5TUPLE_MAX_PRI 7
+#define IXGBE_5TUPLE_MIN_PRI 1
+
+/* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000
+/* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0
+
+#define IXGBE_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_EX | \
+ ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV6_UDP_EX)
+
+#define IXGBE_VF_IRQ_ENABLE_MASK 3 /* vf irq enable mask */
+#define IXGBE_VF_MAXMSIVECTOR 1
+
+#define IXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define IXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+#define IXGBE_SECTX_MINSECIFG_MASK 0x0000000F
+
+#define IXGBE_MACSEC_PNTHRSH 0xFFFFFE00
+
+#define IXGBE_MAX_FDIR_FILTER_NUM (1024 * 32)
+#define IXGBE_MAX_L2_TN_FILTER_NUM 128
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
+ return -ENOTSUP;\
+} while (0)
+
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
+ (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
+ (type) != ixgbe_mac_X550EM_a)\
+ return -ENOTSUP;\
+} while (0)
+
+/* Link speed for X550 auto negotiation */
+#define IXGBE_LINK_SPEED_X550_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
+ IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_2_5GB_FULL | \
+ IXGBE_LINK_SPEED_5GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+
+/*
+ * Information about the fdir mode.
+ */
+struct ixgbe_hw_fdir_mask {
+ uint16_t vlan_tci_mask;
+ uint32_t src_ipv4_mask;
+ uint32_t dst_ipv4_mask;
+ uint16_t src_ipv6_mask;
+ uint16_t dst_ipv6_mask;
+ uint16_t src_port_mask;
+ uint16_t dst_port_mask;
+ uint16_t flex_bytes_mask;
+ uint8_t mac_addr_byte_mask;
+ uint32_t tunnel_id_mask;
+ uint8_t tunnel_type_mask;
+};
+
+struct ixgbe_fdir_filter {
+ TAILQ_ENTRY(ixgbe_fdir_filter) entries;
+ union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t fdirhash; /* hash value for fdir */
+ uint8_t queue; /* assigned rx queue */
+};
+
+/* list of fdir filters */
+TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter);
+
+struct ixgbe_fdir_rule {
+ struct ixgbe_hw_fdir_mask mask;
+ union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+ bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */
+ bool b_mask; /* If TRUE, mask has meaning. */
+ enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t soft_id; /* an unique value for this rule */
+ uint8_t queue; /* assigned rx queue */
+ uint8_t flex_bytes_offset;
+};
+
+struct ixgbe_hw_fdir_info {
+ struct ixgbe_hw_fdir_mask mask;
+ uint8_t flex_bytes_offset;
+ uint16_t collision;
+ uint16_t free;
+ uint16_t maxhash;
+ uint8_t maxlen;
+ uint64_t add;
+ uint64_t remove;
+ uint64_t f_add;
+ uint64_t f_remove;
+ struct ixgbe_fdir_filter_list fdir_list; /* filter list*/
+ /* store the pointers of the filters, index is the hash value. */
+ struct ixgbe_fdir_filter **hash_map;
+ struct rte_hash *hash_handle; /* cuckoo hash handler */
+ bool mask_added; /* If already got mask from consistent filter */
+};
+
+struct ixgbe_rte_flow_rss_conf {
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+ uint8_t key[IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
+ uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
+};
+
+/* structure for interrupt relative data */
+struct ixgbe_interrupt {
+ uint32_t flags;
+ uint32_t mask;
+ /*to save original mask during delayed handler */
+ uint32_t mask_original;
+};
+
+struct ixgbe_stat_mapping_registers {
+ uint32_t tqsm[IXGBE_NB_STAT_MAPPING_REGS];
+ uint32_t rqsmr[IXGBE_NB_STAT_MAPPING_REGS];
+};
+
+struct ixgbe_vfta {
+ uint32_t vfta[IXGBE_VFTA_SIZE];
+};
+
+struct ixgbe_hwstrip {
+ uint32_t bitmap[IXGBE_HWSTRIP_BITMAP_SIZE];
+};
+
+/*
+ * VF data which used by PF host only
+ */
+#define IXGBE_MAX_VF_MC_ENTRIES 30
+#define IXGBE_MAX_MR_RULE_ENTRIES 4 /* number of mirroring rules supported */
+#define IXGBE_MAX_UTA 128
+
+struct ixgbe_uta_info {
+ uint8_t uc_filter_type;
+ uint16_t uta_in_use;
+ uint32_t uta_shadow[IXGBE_MAX_UTA];
+};
+
+#define IXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */
+
+struct ixgbe_mirror_info {
+ struct rte_eth_mirror_conf mr_conf[IXGBE_MAX_MIRROR_RULES];
+ /**< store PF mirror rules configuration*/
+};
+
+struct ixgbe_vf_info {
+ uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
+ uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
+ uint16_t num_vf_mc_hashes;
+ uint16_t default_vf_vlan_id;
+ uint16_t vlans_enabled;
+ bool clear_to_send;
+ uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
+ uint16_t vlan_count;
+ uint8_t spoofchk_enabled;
+ uint8_t api_version;
+ uint16_t switch_domain_id;
+};
+
+/*
+ * Possible l4type of 5tuple filters.
+ */
+enum ixgbe_5tuple_protocol {
+ IXGBE_FILTER_PROTOCOL_TCP = 0,
+ IXGBE_FILTER_PROTOCOL_UDP,
+ IXGBE_FILTER_PROTOCOL_SCTP,
+ IXGBE_FILTER_PROTOCOL_NONE,
+};
+
+TAILQ_HEAD(ixgbe_5tuple_filter_list, ixgbe_5tuple_filter);
+
+struct ixgbe_5tuple_filter_info {
+ uint32_t dst_ip;
+ uint32_t src_ip;
+ uint16_t dst_port;
+ uint16_t src_port;
+ enum ixgbe_5tuple_protocol proto; /* l4 protocol. */
+ uint8_t priority; /* seven levels (001b-111b), 111b is highest,
+ used when more than one filter matches. */
+ uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
+ src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
+ dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
+ src_port_mask:1, /* if mask is 1b, do not compare src port. */
+ proto_mask:1; /* if mask is 1b, do not compare protocol. */
+};
+
+/* 5tuple filter structure */
+struct ixgbe_5tuple_filter {
+ TAILQ_ENTRY(ixgbe_5tuple_filter) entries;
+ uint16_t index; /* the index of 5tuple filter */
+ struct ixgbe_5tuple_filter_info filter_info;
+ uint16_t queue; /* rx queue assigned to */
+};
+
+#define IXGBE_5TUPLE_ARRAY_SIZE \
+ (RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
+ (sizeof(uint32_t) * NBBY))
+
+struct ixgbe_ethertype_filter {
+ uint16_t ethertype;
+ uint32_t etqf;
+ uint32_t etqs;
+ /**
+ * If this filter is added by configuration,
+ * it should not be removed.
+ */
+ bool conf;
+};
+
+/*
+ * Structure to store filters' info.
+ */
+struct ixgbe_filter_info {
+ uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
+ /* store used ethertype filters*/
+ struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
+ /* Bit mask for every used 5tuple filter */
+ uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
+ struct ixgbe_5tuple_filter_list fivetuple_list;
+ /* store the SYN filter info */
+ uint32_t syn_info;
+ /* store the rss filter info */
+ struct ixgbe_rte_flow_rss_conf rss_info;
+};
+
+struct ixgbe_l2_tn_key {
+ enum rte_eth_tunnel_type l2_tn_type;
+ uint32_t tn_id;
+};
+
+struct ixgbe_l2_tn_filter {
+ TAILQ_ENTRY(ixgbe_l2_tn_filter) entries;
+ struct ixgbe_l2_tn_key key;
+ uint32_t pool;
+};
+
+TAILQ_HEAD(ixgbe_l2_tn_filter_list, ixgbe_l2_tn_filter);
+
+struct ixgbe_l2_tn_info {
+ struct ixgbe_l2_tn_filter_list l2_tn_list;
+ struct ixgbe_l2_tn_filter **hash_map;
+ struct rte_hash *hash_handle;
+ bool e_tag_en; /* e-tag enabled */
+ bool e_tag_fwd_en; /* e-tag based forwarding enabled */
+ bool e_tag_ether_type; /* ether type for e-tag */
+};
+
+struct rte_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+/*
+ * Statistics counters collected by the MACsec
+ */
+struct ixgbe_macsec_stats {
+ /* TX port statistics */
+ uint64_t out_pkts_untagged;
+ uint64_t out_pkts_encrypted;
+ uint64_t out_pkts_protected;
+ uint64_t out_octets_encrypted;
+ uint64_t out_octets_protected;
+
+ /* RX port statistics */
+ uint64_t in_pkts_untagged;
+ uint64_t in_pkts_badtag;
+ uint64_t in_pkts_nosci;
+ uint64_t in_pkts_unknownsci;
+ uint64_t in_octets_decrypted;
+ uint64_t in_octets_validated;
+
+ /* RX SC statistics */
+ uint64_t in_pkts_unchecked;
+ uint64_t in_pkts_delayed;
+ uint64_t in_pkts_late;
+
+ /* RX SA statistics */
+ uint64_t in_pkts_ok;
+ uint64_t in_pkts_invalid;
+ uint64_t in_pkts_notvalid;
+ uint64_t in_pkts_unusedsa;
+ uint64_t in_pkts_notusingsa;
+};
+
+/* The configuration of bandwidth */
+struct ixgbe_bw_conf {
+ uint8_t tc_num; /* Number of TCs. */
+};
+
+/* Struct to store Traffic Manager shaper profile. */
+struct ixgbe_tm_shaper_profile {
+ TAILQ_ENTRY(ixgbe_tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t reference_count;
+ struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(ixgbe_shaper_profile_list, ixgbe_tm_shaper_profile);
+
+/* node type of Traffic Manager */
+enum ixgbe_tm_node_type {
+ IXGBE_TM_NODE_TYPE_PORT,
+ IXGBE_TM_NODE_TYPE_TC,
+ IXGBE_TM_NODE_TYPE_QUEUE,
+ IXGBE_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store Traffic Manager node configuration. */
+struct ixgbe_tm_node {
+ TAILQ_ENTRY(ixgbe_tm_node) node;
+ uint32_t id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t reference_count;
+ uint16_t no;
+ struct ixgbe_tm_node *parent;
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+ struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(ixgbe_tm_node_list, ixgbe_tm_node);
+
+/* The configuration of Traffic Manager */
+struct ixgbe_tm_conf {
+ struct ixgbe_shaper_profile_list shaper_profile_list;
+ struct ixgbe_tm_node *root; /* root node - port */
+ struct ixgbe_tm_node_list tc_list; /* node list for all the TCs */
+ struct ixgbe_tm_node_list queue_list; /* node list for all the queues */
+ /**
+ * The number of added TC nodes.
+ * It should be no more than the TC number of this port.
+ */
+ uint32_t nb_tc_node;
+ /**
+ * The number of added queue nodes.
+ * It should be no more than the queue number of this port.
+ */
+ uint32_t nb_queue_node;
+ /**
+ * This flag is used to check if APP can change the TM node
+ * configuration.
+ * When it's true, means the configuration is applied to HW,
+ * APP should not change the configuration.
+ * As we don't support on-the-fly configuration, when starting
+ * the port, APP should call the hierarchy_commit API to set this
+ * flag to true. When stopping the port, this flag should be set
+ * to false.
+ */
+ bool committed;
+};
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+struct ixgbe_adapter {
+ struct ixgbe_hw hw;
+ struct ixgbe_hw_stats stats;
+ struct ixgbe_macsec_stats macsec_stats;
+ struct ixgbe_hw_fdir_info fdir;
+ struct ixgbe_interrupt intr;
+ struct ixgbe_stat_mapping_registers stat_mappings;
+ struct ixgbe_vfta shadow_vfta;
+ struct ixgbe_hwstrip hwstrip;
+ struct ixgbe_dcb_config dcb_config;
+ struct ixgbe_mirror_info mr_data;
+ struct ixgbe_vf_info *vfdata;
+ struct ixgbe_uta_info uta_info;
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
+ struct ixgbe_bypass_info bps;
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
+ struct ixgbe_filter_info filter;
+ struct ixgbe_l2_tn_info l2_tn;
+ struct ixgbe_bw_conf bw_conf;
+#ifdef RTE_LIBRTE_SECURITY
+ struct ixgbe_ipsec ipsec;
+#endif
+ bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
+ struct rte_timecounter systime_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct rte_timecounter tx_tstamp_tc;
+ struct ixgbe_tm_conf tm_conf;
+};
+
+struct ixgbe_vf_representor {
+ uint16_t vf_id;
+ uint16_t switch_domain_id;
+ struct rte_eth_dev *pf_ethdev;
+};
+
+int ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
+int ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev);
+
+#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
+ (&((struct ixgbe_adapter *)adapter)->hw)
+
+#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->stats)
+
+#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->macsec_stats)
+
+#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->intr)
+
+#define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->fdir)
+
+#define IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->stat_mappings)
+
+#define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->shadow_vfta)
+
+#define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->hwstrip)
+
+#define IXGBE_DEV_PRIVATE_TO_DCB_CFG(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->dcb_config)
+
+#define IXGBE_DEV_PRIVATE_TO_P_VFDATA(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->vfdata)
+
+#define IXGBE_DEV_PRIVATE_TO_PFDATA(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->mr_data)
+
+#define IXGBE_DEV_PRIVATE_TO_UTA(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->uta_info)
+
+#define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->filter)
+
+#define IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->l2_tn)
+
+#define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->bw_conf)
+
+#define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->tm_conf)
+
+#define IXGBE_DEV_PRIVATE_TO_IPSEC(adapter)\
+ (&((struct ixgbe_adapter *)adapter)->ipsec)
+
+/*
+ * RX/TX function prototypes
+ */
+void ixgbe_dev_clear_queues(struct rte_eth_dev *dev);
+
+void ixgbe_dev_free_queues(struct rte_eth_dev *dev);
+
+void ixgbe_dev_rx_queue_release(void *rxq);
+
+void ixgbe_dev_tx_queue_release(void *txq);
+
+int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+
+int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
+int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
+
+void ixgbe_dev_tx_init(struct rte_eth_dev *dev);
+
+int ixgbe_dev_rxtx_start(struct rte_eth_dev *dev);
+
+int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+
+void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+
+int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
+
+void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
+
+void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
+
+uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+int ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type);
+
+uint32_t ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx);
+
+uint32_t ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type);
+
+uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i);
+
+bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type);
+
+int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter,
+ bool add);
+int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
+int
+ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore);
+int
+ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel);
+void ixgbe_filterlist_init(void);
+void ixgbe_filterlist_flush(void);
+/*
+ * Flow director function prototypes
+ */
+int ixgbe_fdir_configure(struct rte_eth_dev *dev);
+int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+ uint16_t offset);
+int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct ixgbe_fdir_rule *rule,
+ bool del, bool update);
+
+void ixgbe_configure_dcb(struct rte_eth_dev *dev);
+
+int
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete, int vf);
+
+/*
+ * misc function prototypes
+ */
+void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
+
+void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
+
+void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
+
+void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
+
+void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev);
+
+void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
+
+int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
+
+uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
+
+int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op, void *arg);
+void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+int ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
+
+extern const struct rte_flow_ops ixgbe_flow_ops;
+
+void ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
+void ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
+void ixgbe_clear_syn_filter(struct rte_eth_dev *dev);
+int ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
+
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_vt_check(struct ixgbe_hw *hw);
+int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+bool is_ixgbe_supported(struct rte_eth_dev *dev);
+int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
+void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
+void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
+int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t tx_rate);
+int ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
+int ixgbe_config_rss_filter(struct rte_eth_dev *dev,
+ struct ixgbe_rte_flow_rss_conf *conf, bool add);
+
+static inline int
+ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
+ uint16_t ethertype)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_filters[i].ethertype == ethertype &&
+ (filter_info->ethertype_mask & (1 << i)))
+ return i;
+ }
+ return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
+ struct ixgbe_ethertype_filter *ethertype_filter)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (!(filter_info->ethertype_mask & (1 << i))) {
+ filter_info->ethertype_mask |= 1 << i;
+ filter_info->ethertype_filters[i].ethertype =
+ ethertype_filter->ethertype;
+ filter_info->ethertype_filters[i].etqf =
+ ethertype_filter->etqf;
+ filter_info->ethertype_filters[i].etqs =
+ ethertype_filter->etqs;
+ filter_info->ethertype_filters[i].conf =
+ ethertype_filter->conf;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
+ uint8_t idx)
+{
+ if (idx >= IXGBE_MAX_ETQF_FILTERS)
+ return -1;
+ filter_info->ethertype_mask &= ~(1 << idx);
+ filter_info->ethertype_filters[idx].ethertype = 0;
+ filter_info->ethertype_filters[idx].etqf = 0;
+ filter_info->ethertype_filters[idx].etqs = 0;
+ filter_info->ethertype_filters[idx].etqs = FALSE;
+ return idx;
+}
+
+#endif /* _IXGBE_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_fdir.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_fdir.c
new file mode 100644
index 00000000..e559f0fa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_fdir.c
@@ -0,0 +1,1649 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ixgbe_logs.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+
+/* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
+#define FDIRCTRL_PBALLOC_MASK 0x03
+
+/* For calculating memory required for FDIR filters */
+#define PBALLOC_SIZE_SHIFT 15
+
+/* Number of bits used to mask bucket hash for different pballoc sizes */
+#define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */
+#define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */
+#define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */
+#define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */
+#define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */
+#define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */
+#define IXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /* default flexbytes offset in bytes */
+#define IXGBE_FDIR_MAX_FLEX_LEN 2 /* len in bytes of flexbytes */
+#define IXGBE_MAX_FLX_SOURCE_OFF 62
+#define IXGBE_FDIRCTRL_FLEX_MASK (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT)
+#define IXGBE_FDIRCMD_CMD_INTERVAL_US 10
+
+#define IXGBE_FDIR_FLOW_TYPES ( \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
+
+#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
+ uint8_t ipv6_addr[16]; \
+ uint8_t i; \
+ rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
+ (ipv6m) = 0; \
+ for (i = 0; i < sizeof(ipv6_addr); i++) { \
+ if (ipv6_addr[i] == UINT8_MAX) \
+ (ipv6m) |= 1 << i; \
+ else if (ipv6_addr[i] != 0) { \
+ PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
+ return -EINVAL; \
+ } \
+ } \
+} while (0)
+
+#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
+ uint8_t ipv6_addr[16]; \
+ uint8_t i; \
+ for (i = 0; i < sizeof(ipv6_addr); i++) { \
+ if ((ipv6m) & (1 << i)) \
+ ipv6_addr[i] = UINT8_MAX; \
+ else \
+ ipv6_addr[i] = 0; \
+ } \
+ rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
+} while (0)
+
+#define DEFAULT_VXLAN_PORT 4789
+#define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
+
+static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
+static int fdir_set_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask);
+static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
+static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
+static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
+static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
+static int ixgbe_fdir_filter_to_atr_input(
+ const struct rte_eth_fdir_filter *fdir_filter,
+ union ixgbe_atr_input *input,
+ enum rte_fdir_mode mode);
+static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+ uint32_t key);
+static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc);
+static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc);
+static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input, uint8_t queue,
+ uint32_t fdircmd, uint32_t fdirhash,
+ enum rte_fdir_mode mode);
+static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
+ uint32_t fdirhash);
+static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update);
+static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
+static void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_fdir_info *fdir_info);
+static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_fdir_stats *fdir_stats);
+
+/**
+ * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c.
+ * It adds extra configuration of fdirctrl that is common for all filter types.
+ *
+ * Initialize Flow Director control registers
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register
+ **/
+static int
+fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Prime the keys for hashing */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+ /*
+ * Poll init-done after we write the register. Estimated times:
+ * 10G: PBALLOC = 11b, timing is 60us
+ * 1G: PBALLOC = 11b, timing is 600us
+ * 100M: PBALLOC = 11b, timing is 6ms
+ *
+ * Multiple these timings by 4 if under full Rx load
+ *
+ * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+ * 1 msec per poll time. If we're at line rate and drop to 100M, then
+ * this might not finish in our poll time, but we can live with that
+ * for now.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msec_delay(1);
+ }
+
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+ PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/*
+ * Set appropriate bits in fdirctrl for: variable reporting levels, moving
+ * flexbytes matching field, and drop queue (only for perfect matching mode).
+ */
+static inline int
+configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+{
+ *fdirctrl = 0;
+
+ switch (conf->pballoc) {
+ case RTE_FDIR_PBALLOC_64K:
+ /* 8k - 1 signature filters */
+ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
+ break;
+ case RTE_FDIR_PBALLOC_128K:
+ /* 16k - 1 signature filters */
+ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
+ break;
+ case RTE_FDIR_PBALLOC_256K:
+ /* 32k - 1 signature filters */
+ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
+ break;
+ default:
+ /* bad value */
+ PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
+ return -EINVAL;
+ };
+
+ /* status flags: write hash & swindex in the rx descriptor */
+ switch (conf->status) {
+ case RTE_FDIR_NO_REPORT_STATUS:
+ /* do nothing, default mode */
+ break;
+ case RTE_FDIR_REPORT_STATUS:
+ /* report status when the packet matches a fdir rule */
+ *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
+ break;
+ case RTE_FDIR_REPORT_STATUS_ALWAYS:
+ /* always report status */
+ *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
+ break;
+ default:
+ /* bad value */
+ PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
+ return -EINVAL;
+ };
+
+ *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
+ IXGBE_FDIRCTRL_FLEX_SHIFT;
+
+ if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
+ conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
+ *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+ if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+ *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
+ << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
+ else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
+ << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
+ }
+
+ return 0;
+}
+
+/**
+ * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
+ *
+ * @hi_dword: Bits 31:16 mask to be bit swapped.
+ * @lo_dword: Bits 15:0 mask to be bit swapped.
+ *
+ * Flow director uses several registers to store 2 x 16 bit masks with the
+ * bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the
+ * mask affects the MS bit/byte of the target. This function reverses the
+ * bits in these masks.
+ * **/
+static inline uint32_t
+reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
+{
+ uint32_t mask = hi_dword << 16;
+
+ mask |= lo_dword;
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
+ * but makes use of the rte_fdir_masks structure to see which bits to set.
+ */
+static int
+fdir_set_input_mask_82599(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ /*
+ * mask VM pool and DIPv6 since there are currently not supported
+ * mask FLEX byte, it will be set in flex_conf
+ */
+ uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
+ uint32_t fdirtcpm; /* TCP source and destination port masks. */
+ uint32_t fdiripv6m; /* IPv6 source and destination masks. */
+ volatile uint32_t *reg;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ */
+ if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
+ /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+ fdirm |= IXGBE_FDIRM_L4P;
+
+ if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+ /* mask VLAN Priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+ /* mask VLAN ID */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ else if (info->mask.vlan_tci_mask == 0)
+ /* mask VLAN ID and Priority */
+ fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
+ else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+ PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
+ return -EINVAL;
+ }
+
+ /* flex byte mask */
+ if (info->mask.flex_bytes_mask == 0)
+ fdirm |= IXGBE_FDIRM_FLEX;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = reverse_fdir_bitmasks(
+ rte_be_to_cpu_16(info->mask.dst_port_mask),
+ rte_be_to_cpu_16(info->mask.src_port_mask));
+
+ /* write all the same so that UDP, TCP and SCTP use the same mask
+ * (little-endian)
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
+
+ /* Store source and destination IPv4 masks (big-endian),
+ * can not use IXGBE_WRITE_REG.
+ */
+ reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
+ *reg = ~(info->mask.src_ipv4_mask);
+ reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
+ *reg = ~(info->mask.dst_ipv4_mask);
+
+ if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
+ /*
+ * Store source and destination IPv6 masks (bit reversed)
+ */
+ fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
+ info->mask.src_ipv6_mask;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/*
+ * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
+ * but makes use of the rte_fdir_masks structure to see which bits to set.
+ */
+static int
+fdir_set_input_mask_x550(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ /* mask VM pool and DIPv6 since there are currently not supported
+ * mask FLEX byte, it will be set in flex_conf
+ */
+ uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
+ IXGBE_FDIRM_FLEX;
+ uint32_t fdiripv6m;
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+ uint16_t mac_mask;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* set the default UDP port for VxLAN */
+ if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
+
+ /* some bits must be set for mac vlan or tunnel mode */
+ fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
+
+ if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+ /* mask VLAN Priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+ /* mask VLAN ID */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ else if (info->mask.vlan_tci_mask == 0)
+ /* mask VLAN ID and Priority */
+ fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
+ else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+ PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
+ return -EINVAL;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+ fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
+ fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
+ if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+ fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
+ IXGBE_FDIRIP6M_TNI_VNI;
+
+ if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
+ mac_mask = info->mask.mac_addr_byte_mask &
+ (IXGBE_FDIRIP6M_INNER_MAC >>
+ IXGBE_FDIRIP6M_INNER_MAC_SHIFT);
+ fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) &
+ IXGBE_FDIRIP6M_INNER_MAC);
+
+ switch (info->mask.tunnel_type_mask) {
+ case 0:
+ /* Mask turnnel type */
+ fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
+ break;
+ case 1:
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
+ return -EINVAL;
+ }
+
+ switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
+ case 0x0:
+ /* Mask vxlan id */
+ fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
+ break;
+ case 0x00FFFFFF:
+ fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
+ break;
+ case 0xFFFFFFFF:
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
+ return -EINVAL;
+ }
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ uint16_t dst_ipv6m = 0;
+ uint16_t src_ipv6m = 0;
+
+ memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+ info->mask.src_port_mask = input_mask->src_port_mask;
+ info->mask.dst_port_mask = input_mask->dst_port_mask;
+ info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+ info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
+ info->mask.src_ipv6_mask = src_ipv6m;
+ info->mask.dst_ipv6_mask = dst_ipv6m;
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+
+ memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+ info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
+ info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
+ info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (mode >= RTE_FDIR_MODE_SIGNATURE &&
+ mode <= RTE_FDIR_MODE_PERFECT)
+ return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
+ else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
+
+ PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+ return -ENOTSUP;
+}
+
+int
+ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
+{
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (mode >= RTE_FDIR_MODE_SIGNATURE &&
+ mode <= RTE_FDIR_MODE_PERFECT)
+ return fdir_set_input_mask_82599(dev);
+ else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ return fdir_set_input_mask_x550(dev);
+
+ PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+ return -ENOTSUP;
+}
+
+int
+ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+ uint16_t offset)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fdirctrl;
+ int i;
+
+ fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+
+ fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
+ fdirctrl |= ((offset >> 1) /* convert to word offset */
+ << IXGBE_FDIRCTRL_FLEX_SHIFT);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msec_delay(1);
+ }
+ return 0;
+}
+
+static int
+fdir_set_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ int ret;
+
+ ret = ixgbe_fdir_store_input_mask(dev, input_mask);
+ if (ret)
+ return ret;
+
+ return ixgbe_fdir_set_input_mask(dev);
+}
+
+/*
+ * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
+ * arguments are valid
+ */
+static int
+ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ const struct rte_eth_flex_payload_cfg *flex_cfg;
+ const struct rte_eth_fdir_flex_mask *flex_mask;
+ uint32_t fdirm;
+ uint16_t flexbytes = 0;
+ uint16_t i;
+
+ fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
+
+ if (conf == NULL) {
+ PMD_DRV_LOG(ERR, "NULL pointer.");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < conf->nb_payloads; i++) {
+ flex_cfg = &conf->flex_set[i];
+ if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
+ PMD_DRV_LOG(ERR, "unsupported payload type.");
+ return -EINVAL;
+ }
+ if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
+ (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
+ (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) {
+ *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
+ *fdirctrl |=
+ (flex_cfg->src_offset[0] / sizeof(uint16_t)) <<
+ IXGBE_FDIRCTRL_FLEX_SHIFT;
+ } else {
+ PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < conf->nb_flexmasks; i++) {
+ flex_mask = &conf->flex_mask[i];
+ if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
+ PMD_DRV_LOG(ERR, "flexmask should be set globally.");
+ return -EINVAL;
+ }
+ flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) |
+ ((flex_mask->mask[1]) & 0xFF));
+ if (flexbytes == UINT16_MAX)
+ fdirm &= ~IXGBE_FDIRM_FLEX;
+ else if (flexbytes != 0) {
+ /* IXGBE_FDIRM_FLEX is set by default when set mask */
+ PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
+ return -EINVAL;
+ }
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+ info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
+ info->flex_bytes_offset = (uint8_t)((*fdirctrl &
+ IXGBE_FDIRCTRL_FLEX_MASK) >>
+ IXGBE_FDIRCTRL_FLEX_SHIFT);
+ return 0;
+}
+
+int
+ixgbe_fdir_configure(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int err;
+ uint32_t fdirctrl, pbsize;
+ int i;
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a)
+ return -ENOSYS;
+
+ /* x550 supports mac-vlan and tunnel mode but other NICs not */
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a &&
+ mode != RTE_FDIR_MODE_SIGNATURE &&
+ mode != RTE_FDIR_MODE_PERFECT)
+ return -ENOSYS;
+
+ err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
+ if (err)
+ return err;
+
+ /*
+ * Before enabling Flow Director, the Rx Packet Buffer size
+ * must be reduced. The new value is the current size minus
+ * flow director memory usage size.
+ */
+ pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
+ (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
+
+ /*
+ * The defaults in the HW for RX PB 1-7 are not zero and so should be
+ * initialized to zero for non DCB mode otherwise actual total RX PB
+ * would be bigger than programmed and filter space would run into
+ * the PB 0 region.
+ */
+ for (i = 1; i < 8; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+
+ err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on setting FD mask");
+ return err;
+ }
+ err = ixgbe_set_fdir_flex_conf(dev,
+ &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
+ return err;
+ }
+
+ err = fdir_enable_82599(hw, fdirctrl);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on enabling FD.");
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used
+ * by the IXGBE driver code.
+ */
+static int
+ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
+ union ixgbe_atr_input *input, enum rte_fdir_mode mode)
+{
+ input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
+ input->formatted.flex_bytes = (uint16_t)(
+ (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
+ (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF));
+
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
+ break;
+ default:
+ break;
+ }
+
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ input->formatted.src_port =
+ fdir_filter->input.flow.udp4_flow.src_port;
+ input->formatted.dst_port =
+ fdir_filter->input.flow.udp4_flow.dst_port;
+ /* fall-through */
+ /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ input->formatted.src_ip[0] =
+ fdir_filter->input.flow.ip4_flow.src_ip;
+ input->formatted.dst_ip[0] =
+ fdir_filter->input.flow.ip4_flow.dst_ip;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ input->formatted.src_port =
+ fdir_filter->input.flow.udp6_flow.src_port;
+ input->formatted.dst_port =
+ fdir_filter->input.flow.udp6_flow.dst_port;
+ /* fall-through */
+ /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ rte_memcpy(input->formatted.src_ip,
+ fdir_filter->input.flow.ipv6_flow.src_ip,
+ sizeof(input->formatted.src_ip));
+ rte_memcpy(input->formatted.dst_ip,
+ fdir_filter->input.flow.ipv6_flow.dst_ip,
+ sizeof(input->formatted.dst_ip));
+ break;
+ default:
+ break;
+ }
+
+ if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ rte_memcpy(
+ input->formatted.inner_mac,
+ fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
+ sizeof(input->formatted.inner_mac));
+ } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ rte_memcpy(
+ input->formatted.inner_mac,
+ fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
+ sizeof(input->formatted.inner_mac));
+ if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+ RTE_FDIR_TUNNEL_TYPE_VXLAN)
+ input->formatted.tunnel_type =
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
+ else if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+ RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ input->formatted.tunnel_type =
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
+ else
+ PMD_DRV_LOG(ERR, " invalid tunnel type arguments.");
+
+ input->formatted.tni_vni =
+ fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8;
+ }
+
+ return 0;
+}
+
+/*
+ * The below function is taken from the FreeBSD IXGBE drivers release
+ * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
+ * before returning, as the signature hash can use 16bits.
+ *
+ * The newer driver has optimised functions for calculating bucket and
+ * signature hashes. However they don't support IPv6 type packets for signature
+ * filters so are not used here.
+ *
+ * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
+ * set.
+ *
+ * Compute the hashes for SW ATR
+ * @stream: input bitstream to compute the hash on
+ * @key: 32-bit hash key
+ **/
+static uint32_t
+ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+ uint32_t key)
+{
+ /*
+ * The algorithm is as follows:
+ * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
+ * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
+ * and A[n] x B[n] is bitwise AND between same length strings
+ *
+ * K[n] is 16 bits, defined as:
+ * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
+ * for n modulo 32 < 15, K[n] =
+ * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
+ *
+ * S[n] is 16 bits, defined as:
+ * for n >= 15, S[n] = S[n:n - 15]
+ * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
+ *
+ * To simplify for programming, the algorithm is implemented
+ * in software this way:
+ *
+ * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+ *
+ * for (i = 0; i < 352; i+=32)
+ * hi_hash_dword[31:0] ^= Stream[(i+31):i];
+ *
+ * lo_hash_dword[15:0] ^= Stream[15:0];
+ * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
+ * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
+ *
+ * hi_hash_dword[31:0] ^= Stream[351:320];
+ *
+ * if (key[0])
+ * hash[15:0] ^= Stream[15:0];
+ *
+ * for (i = 0; i < 16; i++) {
+ * if (key[i])
+ * hash[15:0] ^= lo_hash_dword[(i+15):i];
+ * if (key[i + 16])
+ * hash[15:0] ^= hi_hash_dword[(i+15):i];
+ * }
+ *
+ */
+ __be32 common_hash_dword = 0;
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 hash_result = 0;
+ u8 i;
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
+
+ /* generate common hash dword */
+ for (i = 1; i <= 13; i++)
+ common_hash_dword ^= atr_input->dword_stream[i];
+
+ hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ if (key & 0x0001)
+ hash_result ^= lo_hash_dword;
+ if (key & 0x00010000)
+ hash_result ^= hi_hash_dword;
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+
+ /* process the remaining 30 bits in the key 2 bits at a time */
+ for (i = 15; i; i--) {
+ if (key & (0x0001 << i))
+ hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i))
+ hash_result ^= hi_hash_dword >> i;
+ }
+
+ return hash_result;
+}
+
+static uint32_t
+atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc)
+{
+ if (pballoc == RTE_FDIR_PBALLOC_256K)
+ return ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ PERFECT_BUCKET_256KB_HASH_MASK;
+ else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ return ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ PERFECT_BUCKET_128KB_HASH_MASK;
+ else
+ return ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ PERFECT_BUCKET_64KB_HASH_MASK;
+}
+
+/**
+ * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
+ * @hw: pointer to hardware structure
+ */
+static inline int
+ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+ *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+ return 0;
+ rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Calculate the hash value needed for signature-match filters. In the FreeBSD
+ * driver, this is done by the optimised function
+ * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
+ * doesn't support calculating a hash for an IPv6 filter.
+ */
+static uint32_t
+atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc)
+{
+ uint32_t bucket_hash, sig_hash;
+
+ if (pballoc == RTE_FDIR_PBALLOC_256K)
+ bucket_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ SIG_BUCKET_256KB_HASH_MASK;
+ else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ bucket_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ SIG_BUCKET_128KB_HASH_MASK;
+ else
+ bucket_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ SIG_BUCKET_64KB_HASH_MASK;
+
+ sig_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
+}
+
+/*
+ * This is based on ixgbe_fdir_write_perfect_filter_82599() in
+ * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
+ * added, and IPv6 support also added. The hash value is also pre-calculated
+ * as the pballoc value is needed to do it.
+ */
+static int
+fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input, uint8_t queue,
+ uint32_t fdircmd, uint32_t fdirhash,
+ enum rte_fdir_mode mode)
+{
+ uint32_t fdirport, fdirvlan;
+ u32 addr_low, addr_high;
+ u32 tunnel_type = 0;
+ int err = 0;
+ volatile uint32_t *reg;
+
+ if (mode == RTE_FDIR_MODE_PERFECT) {
+ /* record the IPv4 address (big-endian)
+ * can not use IXGBE_WRITE_REG.
+ */
+ reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA);
+ *reg = input->formatted.src_ip[0];
+ reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA);
+ *reg = input->formatted.dst_ip[0];
+
+ /* record source and destination port (little-endian)*/
+ fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+ } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ /* for mac vlan and tunnel modes */
+ addr_low = ((u32)input->formatted.inner_mac[0] |
+ ((u32)input->formatted.inner_mac[1] << 8) |
+ ((u32)input->formatted.inner_mac[2] << 16) |
+ ((u32)input->formatted.inner_mac[3] << 24));
+ addr_high = ((u32)input->formatted.inner_mac[4] |
+ ((u32)input->formatted.inner_mac[5] << 8));
+
+ if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
+ } else {
+ /* tunnel mode */
+ if (input->formatted.tunnel_type)
+ tunnel_type = 0x80000000;
+ tunnel_type |= addr_high;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
+ input->formatted.tni_vni);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0);
+ }
+
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan = input->formatted.flex_bytes;
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+ /* configure FDIRHASH register */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* configure FDIRCMD register */
+ fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
+
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
+
+ return err;
+}
+
+/**
+ * This function is based on ixgbe_atr_add_signature_filter_82599() in
+ * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
+ * setting extra fields in the FDIRCMD register, and removes the code that was
+ * verifying the flow_type field. According to the documentation, a flow type of
+ * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
+ * work ok...
+ *
+ * Adds a signature hash filter
+ * @hw: pointer to hardware structure
+ * @input: unique input dword
+ * @queue: queue index to direct traffic to
+ * @fdircmd: any extra flags to set in fdircmd register
+ * @fdirhash: pre-calculated hash value for the filter
+ **/
+static int
+fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
+ uint32_t fdirhash)
+{
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* configure FDIRCMD register */
+ fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
+
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
+
+ return err;
+}
+
+/*
+ * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
+ * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so
+ * that it can be used for removing signature and perfect filters.
+ */
+static int
+fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
+{
+ uint32_t fdircmd = 0;
+ int err = 0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /* flush hash to HW */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Query if filter is present */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
+ return err;
+ }
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+ }
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0)
+ PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
+ return err;
+
+}
+
+static inline struct ixgbe_fdir_filter *
+ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info,
+ union ixgbe_atr_input *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
+ struct ixgbe_fdir_filter *fdir_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_handle,
+ &fdir_filter->ixgbe_fdir);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ fdir_info->hash_map[ret] = fdir_filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
+
+ return 0;
+}
+
+static inline int
+ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
+ union ixgbe_atr_input *key)
+{
+ int ret;
+ struct ixgbe_fdir_filter *fdir_filter;
+
+ ret = rte_hash_del_key(fdir_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret);
+ return ret;
+ }
+
+ fdir_filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
+ rte_free(fdir_filter);
+
+ return 0;
+}
+
+static int
+ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ struct ixgbe_fdir_rule *rule)
+{
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ int err;
+
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+
+ err = ixgbe_fdir_filter_to_atr_input(fdir_filter,
+ &rule->ixgbe_fdir,
+ fdir_mode);
+ if (err)
+ return err;
+
+ rule->mode = fdir_mode;
+ if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
+ rule->fdirflags = IXGBE_FDIRCMD_DROP;
+ rule->queue = fdir_filter->action.rx_queue;
+ rule->soft_id = fdir_filter->soft_id;
+
+ return 0;
+}
+
+int
+ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct ixgbe_fdir_rule *rule,
+ bool del,
+ bool update)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fdircmd_flags;
+ uint32_t fdirhash;
+ uint8_t queue;
+ bool is_perfect = FALSE;
+ int err;
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ struct ixgbe_fdir_filter *node;
+ bool add_node = FALSE;
+
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
+ return -ENOTSUP;
+
+ /*
+ * Sanity check for x550.
+ * When adding a new filter with flow type set to IPv4,
+ * the flow director mask should be configed before,
+ * and the L4 protocol and ports are masked.
+ */
+ if ((!del) &&
+ (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a) &&
+ (rule->ixgbe_fdir.formatted.flow_type ==
+ IXGBE_ATR_FLOW_TYPE_IPV4 ||
+ rule->ixgbe_fdir.formatted.flow_type ==
+ IXGBE_ATR_FLOW_TYPE_IPV6) &&
+ (info->mask.src_port_mask != 0 ||
+ info->mask.dst_port_mask != 0) &&
+ (rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ rule->mode != RTE_FDIR_MODE_PERFECT_TUNNEL)) {
+ PMD_DRV_LOG(ERR, "By this device,"
+ " IPv4 is not supported without"
+ " L4 protocol and ports masked!");
+ return -ENOTSUP;
+ }
+
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ is_perfect = TRUE;
+
+ if (is_perfect) {
+ if (rule->ixgbe_fdir.formatted.flow_type &
+ IXGBE_ATR_L4TYPE_IPV6_MASK) {
+ PMD_DRV_LOG(ERR, "IPv6 is not supported in"
+ " perfect mode!");
+ return -ENOTSUP;
+ }
+ fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
+ dev->data->dev_conf.fdir_conf.pballoc);
+ fdirhash |= rule->soft_id <<
+ IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ } else
+ fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
+ dev->data->dev_conf.fdir_conf.pballoc);
+
+ if (del) {
+ err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
+ if (err < 0)
+ return err;
+
+ err = fdir_erase_filter_82599(hw, fdirhash);
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
+ else
+ PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
+ return err;
+ }
+ /* add or update an fdir filter*/
+ fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
+ if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
+ if (is_perfect) {
+ queue = dev->data->dev_conf.fdir_conf.drop_queue;
+ fdircmd_flags |= IXGBE_FDIRCMD_DROP;
+ } else {
+ PMD_DRV_LOG(ERR, "Drop option is not supported in"
+ " signature mode.");
+ return -EINVAL;
+ }
+ } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
+ queue = (uint8_t)rule->queue;
+ else
+ return -EINVAL;
+
+ node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
+ if (node) {
+ if (update) {
+ node->fdirflags = fdircmd_flags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+ } else {
+ PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
+ return -EINVAL;
+ }
+ } else {
+ add_node = TRUE;
+ node = rte_zmalloc("ixgbe_fdir",
+ sizeof(struct ixgbe_fdir_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+ rte_memcpy(&node->ixgbe_fdir,
+ &rule->ixgbe_fdir,
+ sizeof(union ixgbe_atr_input));
+ node->fdirflags = fdircmd_flags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+
+ err = ixgbe_insert_fdir_filter(info, node);
+ if (err < 0) {
+ rte_free(node);
+ return err;
+ }
+ }
+
+ if (is_perfect) {
+ err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
+ queue, fdircmd_flags,
+ fdirhash, fdir_mode);
+ } else {
+ err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
+ queue, fdircmd_flags,
+ fdirhash);
+ }
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
+
+ if (add_node)
+ (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
+ } else {
+ PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
+ }
+
+ return err;
+}
+
+/* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
+ * @dev: pointer to the structure rte_eth_dev
+ * @fdir_filter: fdir filter entry
+ * @del: 1 - delete, 0 - add
+ * @update: 1 - update
+ */
+static int
+ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update)
+{
+ struct ixgbe_fdir_rule rule;
+ int err;
+
+ err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
+
+ if (err)
+ return err;
+
+ return ixgbe_fdir_filter_program(dev, &rule, del, update);
+}
+
+static int
+ixgbe_fdir_flush(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ int ret;
+
+ ret = ixgbe_reinit_fdir_tables_82599(hw);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
+ return ret;
+ }
+
+ info->f_add = 0;
+ info->f_remove = 0;
+ info->add = 0;
+ info->remove = 0;
+
+ return ret;
+}
+
+#define FDIRENTRIES_NUM_SHIFT 10
+static void
+ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ uint32_t fdirctrl, max_num, i;
+ uint8_t offset;
+
+ fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
+ IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);
+
+ fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
+ max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
+ (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
+ if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ fdir_info->guarant_spc = max_num;
+ else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
+ fdir_info->guarant_spc = max_num * 4;
+
+ fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
+ fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
+ fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
+ IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
+ fdir_info->mask.ipv6_mask.src_ip);
+ IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
+ fdir_info->mask.ipv6_mask.dst_ip);
+ fdir_info->mask.src_port_mask = info->mask.src_port_mask;
+ fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
+ fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
+ fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
+ fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
+ fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
+
+ if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
+ fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ fdir_info->flow_types_mask[0] = 0ULL;
+ else
+ fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
+ for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
+ fdir_info->flow_types_mask[i] = 0ULL;
+
+ fdir_info->flex_payload_unit = sizeof(uint16_t);
+ fdir_info->max_flex_payload_segment_num = 1;
+ fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
+ fdir_info->flex_conf.nb_payloads = 1;
+ fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
+ fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
+ fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
+ fdir_info->flex_conf.nb_flexmasks = 1;
+ fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
+ fdir_info->flex_conf.flex_mask[0].mask[0] =
+ (uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
+ fdir_info->flex_conf.flex_mask[0].mask[1] =
+ (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
+}
+
+static void
+ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ uint32_t reg, max_num;
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ /* Get the information from registers */
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
+ info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
+ IXGBE_FDIRFREE_COLL_SHIFT);
+ info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
+ IXGBE_FDIRFREE_FREE_SHIFT);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+ info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
+ IXGBE_FDIRLEN_MAXHASH_SHIFT);
+ info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
+ IXGBE_FDIRLEN_MAXLEN_SHIFT);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+ info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
+ IXGBE_FDIRUSTAT_REMOVE_SHIFT;
+ info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
+ IXGBE_FDIRUSTAT_ADD_SHIFT;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
+ info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
+ IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
+ info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
+ IXGBE_FDIRFSTAT_FADD_SHIFT;
+
+ /* Copy the new information in the fdir parameter */
+ fdir_stats->collision = info->collision;
+ fdir_stats->free = info->free;
+ fdir_stats->maxhash = info->maxhash;
+ fdir_stats->maxlen = info->maxlen;
+ fdir_stats->remove = info->remove;
+ fdir_stats->add = info->add;
+ fdir_stats->f_remove = info->f_remove;
+ fdir_stats->f_add = info->f_add;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
+ (reg & FDIRCTRL_PBALLOC_MASK)));
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ fdir_stats->guarant_cnt = max_num - fdir_stats->free;
+ else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
+ fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
+
+}
+
+/*
+ * ixgbe_fdir_ctrl_func - deal with all operations on flow director.
+ * @dev: pointer to the structure rte_eth_dev
+ * @filter_op:operation will be taken
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+int
+ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op, void *arg)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = 0;
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a)
+ return -ENOTSUP;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
+ return -EINVAL;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg, FALSE, FALSE);
+ break;
+ case RTE_ETH_FILTER_UPDATE:
+ ret = ixgbe_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg, FALSE, TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg, TRUE, FALSE);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ ret = ixgbe_fdir_flush(dev);
+ break;
+ case RTE_ETH_FILTER_INFO:
+ ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
+ break;
+ case RTE_ETH_FILTER_STATS:
+ ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/* restore flow director filter */
+void
+ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_fdir_filter *node;
+ bool is_perfect = FALSE;
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ is_perfect = TRUE;
+
+ if (is_perfect) {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_write_perfect_filter_82599(hw,
+ &node->ixgbe_fdir,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash,
+ fdir_mode);
+ }
+ } else {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_add_signature_filter_82599(hw,
+ &node->ixgbe_fdir,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash);
+ }
+ }
+}
+
+/* remove all the flow director filters */
+int
+ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_fdir_filter *fdir_filter;
+ struct ixgbe_fdir_filter *filter_flag;
+ int ret = 0;
+
+ /* flush flow director */
+ rte_hash_reset(fdir_info->hash_handle);
+ memset(fdir_info->hash_map, 0,
+ sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
+ filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ if (filter_flag != NULL)
+ ret = ixgbe_fdir_flush(dev);
+
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_flow.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_flow.c
new file mode 100644
index 00000000..1adf1b80
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_flow.c
@@ -0,0 +1,3463 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_hash_crc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "ixgbe_logs.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_vf.h"
+#include "base/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_bypass.h"
+#include "ixgbe_rxtx.h"
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_phy.h"
+#include "rte_pmd_ixgbe.h"
+
+
+#define IXGBE_MIN_N_TUPLE_PRIO 1
+#define IXGBE_MAX_N_TUPLE_PRIO 7
+#define IXGBE_MAX_FLX_SOURCE_OFF 62
+
+/* ntuple filter list structure */
+struct ixgbe_ntuple_filter_ele {
+ TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+/* ethertype filter list structure */
+struct ixgbe_ethertype_filter_ele {
+ TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+/* syn filter list structure */
+struct ixgbe_eth_syn_filter_ele {
+ TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+/* fdir filter list structure */
+struct ixgbe_fdir_rule_ele {
+ TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
+ struct ixgbe_fdir_rule filter_info;
+};
+/* l2_tunnel filter list structure */
+struct ixgbe_eth_l2_tunnel_conf_ele {
+ TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
+ struct rte_eth_l2_tunnel_conf filter_info;
+};
+/* rss filter list structure */
+struct ixgbe_rss_conf_ele {
+ TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
+ struct ixgbe_rte_flow_rss_conf filter_info;
+};
+/* ixgbe_flow memory list structure */
+struct ixgbe_flow_mem {
+ TAILQ_ENTRY(ixgbe_flow_mem) entries;
+ struct rte_flow *flow;
+};
+
+TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
+TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
+TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
+TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
+TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
+TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
+TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
+
+static struct ixgbe_ntuple_filter_list filter_ntuple_list;
+static struct ixgbe_ethertype_filter_list filter_ethertype_list;
+static struct ixgbe_syn_filter_list filter_syn_list;
+static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
+static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+static struct ixgbe_rss_filter_list filter_rss_list;
+static struct ixgbe_flow_mem_list ixgbe_flow_list;
+
+/**
+ * Endless loop will never happen with below assumption
+ * 1. there is at least one no-void item(END)
+ * 2. cur is before END.
+ */
+static inline
+const struct rte_flow_item *next_no_void_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ cur ? cur + 1 : &pattern[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+static inline
+const struct rte_flow_action *next_no_void_action(
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action *cur)
+{
+ const struct rte_flow_action *next =
+ cur ? cur + 1 : &actions[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ *
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
+ *
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ struct rte_flow_item_eth eth_null;
+ struct rte_flow_item_vlan vlan_null;
+
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
+ memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+
+#ifdef RTE_LIBRTE_SECURITY
+ /**
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+ const void *conf = act->conf;
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* get the IP pattern*/
+ item = next_no_void_pattern(pattern, NULL);
+ while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ if (item->last ||
+ item->type == RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "IP pattern missing.");
+ return -rte_errno;
+ }
+ item = next_no_void_pattern(pattern, item);
+ }
+
+ filter->proto = IPPROTO_ESP;
+ return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+ }
+#endif
+
+ /* the first not void item can be MAC or IPv4 */
+ item = next_no_void_pattern(pattern, NULL);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if ((item->spec || item->mask) &&
+ (memcmp(eth_spec, &eth_null,
+ sizeof(struct rte_flow_item_eth)) ||
+ memcmp(eth_mask, &eth_null,
+ sizeof(struct rte_flow_item_eth)))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 or Vlan */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* the content should be NULL */
+ if ((item->spec || item->mask) &&
+ (memcmp(vlan_spec, &vlan_null,
+ sizeof(struct rte_flow_item_vlan)) ||
+ memcmp(vlan_mask, &vlan_null,
+ sizeof(struct rte_flow_item_vlan)))) {
+
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->mask) {
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ ipv4_mask = item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+ }
+
+ /* check if the next not void item is TCP or UDP */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+ (!item->spec && !item->mask)) {
+ goto action;
+ }
+
+ /* get the TCP/UDP/SCTP info */
+ if (item->type != RTE_FLOW_ITEM_TYPE_END &&
+ (!item->spec || !item->mask)) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ tcp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ tcp_spec = item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ udp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ sctp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ } else {
+ goto action;
+ }
+
+ /* check if the next not void item is END */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+action:
+
+ /**
+ * n-tuple only supports forwarding,
+ * check if the first not void action is QUEUE.
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ item, "Not supported action.");
+ return -rte_errno;
+ }
+ filter->queue =
+ ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+ filter->priority = (uint16_t)attr->priority;
+ if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+ attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
+ filter->priority = 1;
+
+ return 0;
+}
+
+/* a specific function for ixgbe because the flags is specific */
+static int
+ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+ if (ret)
+ return ret;
+
+#ifdef RTE_LIBRTE_SECURITY
+ /* ESP flow not really a flow*/
+ if (filter->proto == IPPROTO_ESP)
+ return 0;
+#endif
+
+ /* Ixgbe doesn't support tcp flags. */
+ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* Ixgbe doesn't support many priorities. */
+ if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+ filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Priority not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
+ /* fixed value for ixgbe */
+ filter->flags = RTE_5TUPLE_FLAGS;
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH type 0x0807 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ item = next_no_void_pattern(pattern, NULL);
+ /* The first non-void item should be MAC. */
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ether address mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ /* Check if the next non-void item is END. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter.");
+ return -rte_errno;
+ }
+
+ /* Parse action */
+
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* Parse attr */
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_ethertype_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* Ixgbe doesn't support MAC address. */
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= dev->data->nb_rx_queues) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue index much too big");
+ return -rte_errno;
+ }
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "IPv4/IPv6 not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "mac compare is unsupported");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "drop option is unsupported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a TCP SYN rule.
+ * And get the TCP SYN filter info BTW.
+ * pattern:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4 or IPV6.
+ * The third not void item must be TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * TCP tcp_flags 0x02 0xFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_action_queue *act_q;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+
+ /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+ item = next_no_void_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* if the item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN address mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is IPv4 or IPv6 */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* if the item is IP, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is TCP */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. Only support SYN. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+ if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+ tcp_mask->hdr.src_port ||
+ tcp_mask->hdr.dst_port ||
+ tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the first not void action is QUEUE. */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Support 2 priorities, the lowest or highest. */
+ if (!attr->priority) {
+ filter->hig_pri = 0;
+ } else if (attr->priority == (uint32_t)~0U) {
+ filter->hig_pri = 1;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_syn_filter(attr, pattern,
+ actions, filter, error);
+
+ if (filter->queue >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a L2 tunnel rule.
+ * And get the L2 tunnel filter info BTW.
+ * Only support E-tag now.
+ * pattern:
+ * The first not void item can be E_TAG.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be VF or PF.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * E_TAG grp 0x1 0x3
+ e_cid_base 0x309 0xFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_e_tag *e_tag_spec;
+ const struct rte_flow_item_e_tag *e_tag_mask;
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_vf *act_vf;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* The first not void item should be e-tag. */
+ item = next_no_void_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ if (!item->spec || !item->mask) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ e_tag_spec = item->spec;
+ e_tag_mask = item->mask;
+
+ /* Only care about GRP and E cid base. */
+ if (e_tag_mask->epcp_edei_in_ecid_b ||
+ e_tag_mask->in_ecid_e ||
+ e_tag_mask->ecid_e ||
+ e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ /**
+ * grp and e_cid_base are bit fields and only use 14 bits.
+ * e-tag id is taken as little endian by HW.
+ */
+ filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
+
+ /* check if the next not void item is END */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* check if the first not void action is VF or PF. */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
+ act->type != RTE_FLOW_ACTION_TYPE_PF) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
+ act_vf = (const struct rte_flow_action_vf *)act->conf;
+ filter->pool = act_vf->id;
+ } else {
+ filter->pool = pci_dev->max_vfs;
+ }
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *l2_tn_filter,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint16_t vf_num;
+
+ ret = cons_parse_l2_tn_filter(dev, attr, pattern,
+ actions, l2_tn_filter, error);
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ vf_num = pci_dev->max_vfs;
+
+ if (l2_tn_filter->pool > vf_num)
+ return -rte_errno;
+
+ return ret;
+}
+
+/* Parse to get the attr and action info of flow director rule. */
+static int
+ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark;
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* check if the first not void action is QUEUE or DROP. */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ rule->queue = act_q->index;
+ } else { /* drop */
+ /* signature mode does not support drop action. */
+ if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+ rule->fdirflags = IXGBE_FDIRCMD_DROP;
+ }
+
+ /* check if the next not void item is MARK */
+ act = next_no_void_action(actions, act);
+ if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
+ (act->type != RTE_FLOW_ACTION_TYPE_END)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rule->soft_id = 0;
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark = (const struct rte_flow_action_mark *)act->conf;
+ rule->soft_id = mark->id;
+ act = next_no_void_action(actions, act);
+ }
+
+ /* check if the next not void item is END */
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* search next no void pattern and skip fuzzy */
+static inline
+const struct rte_flow_item *next_no_fuzzy_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ next_no_void_pattern(pattern, cur);
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
+ return next;
+ next = next_no_void_pattern(pattern, next);
+ }
+}
+
+static inline uint8_t signature_match(const struct rte_flow_item pattern[])
+{
+ const struct rte_flow_item_fuzzy *spec, *last, *mask;
+ const struct rte_flow_item *item;
+ uint32_t sh, lh, mh;
+ int i = 0;
+
+ while (1) {
+ item = pattern + i;
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ break;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
+ spec = item->spec;
+ last = item->last;
+ mask = item->mask;
+
+ if (!spec || !mask)
+ return 0;
+
+ sh = spec->thresh;
+
+ if (!last)
+ lh = sh;
+ else
+ lh = last->thresh;
+
+ mh = mask->thresh;
+ sh = sh & mh;
+ lh = lh & mh;
+
+ if (!sh || sh > lh)
+ return 0;
+
+ return 1;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
+ * The next not void item could be UDP or TCP or SCTP (optional)
+ * The next not void item could be RAW (for flexbyte, optional)
+ * The next not void item must be END.
+ * A Fuzzy Match pattern can appear at any place before END.
+ * Fuzzy Match is optional for IPV4 but is required for IPV6
+ * MAC VLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be MAC VLAN.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP/SCTP pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * UDP/TCP/SCTP src_port 80 0xFFFF
+ * dst_port 80 0xFFFF
+ * FLEX relative 0 0x1
+ * search 0 0x1
+ * reserved 0 0
+ * offset 12 0xFFFFFFFF
+ * limit 0 0xFFFF
+ * length 2 0xFFFF
+ * pattern[0] 0x86 0xFF
+ * pattern[1] 0xDD 0xFF
+ * END
+ * MAC VLAN pattern example:
+ * ITEM Spec Mask
+ * ETH dst_addr
+ {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
+ 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
+ */
+static int
+ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec;
+ const struct rte_flow_item_ipv6 *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ const struct rte_flow_item_raw *raw_mask;
+ const struct rte_flow_item_raw *raw_spec;
+ uint8_t j;
+
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /**
+ * Some fields may not be provided. Set spec to 0 and mask to default
+ * value. So, we need not do anything for the not provided fields later.
+ */
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
+ rule->mask.vlan_tci_mask = 0;
+ rule->mask.flex_bytes_mask = 0;
+
+ /**
+ * The first not void item should be
+ * MAC or IPv4 or TCP or UDP or SCTP.
+ */
+ item = next_no_fuzzy_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ if (signature_match(pattern))
+ rule->mode = RTE_FDIR_MODE_SIGNATURE;
+ else
+ rule->mode = RTE_FDIR_MODE_PERFECT;
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /**
+ * Only support vlan and dst MAC address,
+ * others should be masked.
+ */
+ if (item->spec && !item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ eth_spec = item->spec;
+
+ /* Get the dst MAC. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ rule->ixgbe_fdir.formatted.inner_mac[j] =
+ eth_spec->dst.addr_bytes[j];
+ }
+ }
+
+
+ if (item->mask) {
+
+ rule->b_mask = TRUE;
+ eth_mask = item->mask;
+
+ /* Ether type should be masked. */
+ if (eth_mask->type ||
+ rule->mode == RTE_FDIR_MODE_SIGNATURE) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* If ethernet has meaning, it means MAC VLAN mode. */
+ rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
+ /**
+ * src MAC address must be masked,
+ * and don't support dst MAC address mask.
+ */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j] ||
+ eth_mask->dst.addr_bytes[j] != 0xFF) {
+ memset(rule, 0,
+ sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* When no VLAN, considered as full mask. */
+ rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+ }
+ /*** If both spec and mask are item,
+ * it means don't care about ETH.
+ * Do nothing.
+ */
+
+ /**
+ * Check if the next not void item is vlan or ipv4.
+ * IPv6 is not supported.
+ */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ } else {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ if (!(item->spec && item->mask)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+
+ rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+
+ rule->mask.vlan_tci_mask = vlan_mask->tci;
+ rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
+ /* More than one tags are not supported. */
+
+ /* Next not void item must be END */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the IPV4 info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_IPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst addresses,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ ipv4_mask = item->mask;
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.next_proto_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
+ rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv4_spec = item->spec;
+ rule->ixgbe_fdir.formatted.dst_ip[0] =
+ ipv4_spec->hdr.dst_addr;
+ rule->ixgbe_fdir.formatted.src_ip[0] =
+ ipv4_spec->hdr.src_addr;
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the IPV6 info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_IPV6;
+
+ /**
+ * 1. must signature match
+ * 2. not support last
+ * 3. mask must not null
+ */
+ if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
+ item->last ||
+ !item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ rule->b_mask = TRUE;
+ ipv6_mask = item->mask;
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check src addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
+ rule->mask.src_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.src_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* check dst addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
+ rule->mask.dst_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv6_spec = item->spec;
+ rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_TCP;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ tcp_mask = item->mask;
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = tcp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ tcp_spec = item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ tcp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ }
+
+ /* Get the UDP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_UDP;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ udp_mask = item->mask;
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = udp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ udp_spec = item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ udp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ }
+
+ /* Get the SCTP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_SCTP;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* only x550 family only support sctp port */
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a) {
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ sctp_mask = item->mask;
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = sctp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ sctp_spec = item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ sctp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ sctp_spec->hdr.dst_port;
+ }
+ /* others even sctp port is not supported */
+ } else {
+ sctp_mask = item->mask;
+ if (sctp_mask &&
+ (sctp_mask->hdr.src_port ||
+ sctp_mask->hdr.dst_port ||
+ sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the flex byte info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+ /* Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* mask should not be null */
+ if (!item->mask || !item->spec) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ raw_mask = item->mask;
+
+ /* check mask */
+ if (raw_mask->relative != 0x1 ||
+ raw_mask->search != 0x1 ||
+ raw_mask->reserved != 0x0 ||
+ (uint32_t)raw_mask->offset != 0xffffffff ||
+ raw_mask->limit != 0xffff ||
+ raw_mask->length != 0xffff) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ raw_spec = item->spec;
+
+ /* check spec */
+ if (raw_spec->relative != 0 ||
+ raw_spec->search != 0 ||
+ raw_spec->reserved != 0 ||
+ raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
+ raw_spec->offset % 2 ||
+ raw_spec->limit != 0 ||
+ raw_spec->length != 2 ||
+ /* pattern can't be 0xffff */
+ (raw_spec->pattern[0] == 0xff &&
+ raw_spec->pattern[1] == 0xff)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check pattern mask */
+ if (raw_mask->pattern[0] != 0xff ||
+ raw_mask->pattern[1] != 0xff) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mask.flex_bytes_mask = 0xffff;
+ rule->ixgbe_fdir.formatted.flex_bytes =
+ (((uint16_t)raw_spec->pattern[1]) << 8) |
+ raw_spec->pattern[0];
+ rule->flex_bytes_offset = raw_spec->offset;
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ /* check if the next not void item is END */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+#define NVGRE_PROTOCOL 0x6558
+
+/**
+ * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
+ * And get the flow director filter info BTW.
+ * VxLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * NVGRE PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * VxLAN pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * UDP NULL NULL
+ * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * NEGRV pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * NVGRE protocol 0x6558 0xFFFF
+ * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ uint32_t j;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /**
+ * Some fields may not be provided. Set spec to 0 and mask to default
+ * value. So, we need not do anything for the not provided fields later.
+ */
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
+ rule->mask.vlan_tci_mask = 0;
+
+ /**
+ * The first not void item should be
+ * MAC or IPv4 or IPv6 or UDP or VxLAN.
+ */
+ item = next_no_void_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
+
+ /* Skip MAC. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is IPv4 or IPv6. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is UDP or NVGRE. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip UDP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is VxLAN. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the VxLAN info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ rule->ixgbe_fdir.formatted.tunnel_type =
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
+
+ /* Only care about VNI, others should be masked. */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+
+ /* Tunnel type is always meaningful. */
+ rule->mask.tunnel_type_mask = 1;
+
+ vxlan_mask = item->mask;
+ if (vxlan_mask->flags) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* VNI must be totally masked or not. */
+ if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
+ vxlan_mask->vni[2]) &&
+ ((vxlan_mask->vni[0] != 0xFF) ||
+ (vxlan_mask->vni[1] != 0xFF) ||
+ (vxlan_mask->vni[2] != 0xFF))) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
+ RTE_DIM(vxlan_mask->vni));
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ vxlan_spec = item->spec;
+ rte_memcpy(((uint8_t *)
+ &rule->ixgbe_fdir.formatted.tni_vni),
+ vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
+ }
+ }
+
+ /* Get the NVGRE info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
+ rule->ixgbe_fdir.formatted.tunnel_type =
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
+
+ /**
+ * Only care about flags0, flags1, protocol and TNI,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+
+ /* Tunnel type is always meaningful. */
+ rule->mask.tunnel_type_mask = 1;
+
+ nvgre_mask = item->mask;
+ if (nvgre_mask->flow_id) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ if (nvgre_mask->protocol &&
+ nvgre_mask->protocol != 0xFFFF) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ if (nvgre_mask->c_k_s_rsvd0_ver &&
+ nvgre_mask->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0xFFFF)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* TNI must be totally masked or not. */
+ if (nvgre_mask->tni[0] &&
+ ((nvgre_mask->tni[0] != 0xFF) ||
+ (nvgre_mask->tni[1] != 0xFF) ||
+ (nvgre_mask->tni[2] != 0xFF))) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* tni is a 24-bits bit field */
+ rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
+ RTE_DIM(nvgre_mask->tni));
+ rule->mask.tunnel_id_mask <<= 8;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ nvgre_spec = item->spec;
+ if (nvgre_spec->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0x2000) &&
+ nvgre_mask->c_k_s_rsvd0_ver) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ if (nvgre_mask->protocol &&
+ nvgre_spec->protocol !=
+ rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* tni is a 24-bits bit field */
+ rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
+ nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
+ }
+ }
+
+ /* check if the next not void item is MAC */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /**
+ * Only support vlan and dst MAC address,
+ * others should be masked.
+ */
+
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ eth_mask = item->mask;
+
+ /* Ether type should be masked. */
+ if (eth_mask->type) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* src MAC address should be masked. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j]) {
+ memset(rule, 0,
+ sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+ rule->mask.mac_addr_byte_mask = 0;
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ /* It's a per byte mask. */
+ if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+ rule->mask.mac_addr_byte_mask |= 0x1 << j;
+ } else if (eth_mask->dst.addr_bytes[j]) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* When no vlan, considered as full mask. */
+ rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ eth_spec = item->spec;
+
+ /* Get the dst MAC. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ rule->ixgbe_fdir.formatted.inner_mac[j] =
+ eth_spec->dst.addr_bytes[j];
+ }
+ }
+
+ /**
+ * Check if the next not void item is vlan or ipv4.
+ * IPv6 is not supported.
+ */
+ item = next_no_void_pattern(pattern, item);
+ if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
+ (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ if (!(item->spec && item->mask)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+
+ rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+
+ rule->mask.vlan_tci_mask = vlan_mask->tci;
+ rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
+ /* More than one tags are not supported. */
+
+ /* check if the next not void item is END */
+ item = next_no_void_pattern(pattern, item);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /**
+ * If the tags is 0, it means don't care about the VLAN.
+ * Do nothing.
+ */
+
+ return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+static int
+ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a)
+ return -ENOTSUP;
+
+ ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
+ actions, rule, error);
+
+ if (!ret)
+ goto step_next;
+
+ ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
+ actions, rule, error);
+
+ if (ret)
+ return ret;
+
+step_next:
+
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ rule->fdirflags == IXGBE_FDIRCMD_DROP &&
+ (rule->ixgbe_fdir.formatted.src_port != 0 ||
+ rule->ixgbe_fdir.formatted.dst_port != 0))
+ return -ENOTSUP;
+
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
+ return -ENOTSUP;
+
+ if (rule->queue >= dev->data->nb_rx_queues)
+ return -ENOTSUP;
+
+ return ret;
+}
+
+static int
+ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct ixgbe_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ uint16_t n;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rss = (const struct rte_flow_action_rss *)act->conf;
+
+ if (!rss || !rss->queue_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->queue_num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (ixgbe_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* remove the rss filter */
+static void
+ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->rss_info.conf.queue_num)
+ ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
+}
+
+void
+ixgbe_filterlist_init(void)
+{
+ TAILQ_INIT(&filter_ntuple_list);
+ TAILQ_INIT(&filter_ethertype_list);
+ TAILQ_INIT(&filter_syn_list);
+ TAILQ_INIT(&filter_fdir_list);
+ TAILQ_INIT(&filter_l2_tunnel_list);
+ TAILQ_INIT(&filter_rss_list);
+ TAILQ_INIT(&ixgbe_flow_list);
+}
+
+void
+ixgbe_filterlist_flush(void)
+{
+ struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+ struct ixgbe_rss_conf_ele *rss_filter_ptr;
+
+ while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr,
+ entries);
+ rte_free(ntuple_filter_ptr);
+ }
+
+ while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr,
+ entries);
+ rte_free(ethertype_filter_ptr);
+ }
+
+ while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ rte_free(syn_filter_ptr);
+ }
+
+ while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr,
+ entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+
+ while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr,
+ entries);
+ rte_free(fdir_rule_ptr);
+ }
+
+ while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr,
+ entries);
+ rte_free(rss_filter_ptr);
+ }
+
+ while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
+ TAILQ_REMOVE(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr,
+ entries);
+ rte_free(ixgbe_flow_mem_ptr->flow);
+ rte_free(ixgbe_flow_mem_ptr);
+ }
+}
+
+/**
+ * Create or destroy a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+ixgbe_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct ixgbe_fdir_rule fdir_rule;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_rte_flow_rss_conf rss_conf;
+ struct rte_flow *flow = NULL;
+ struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_rss_conf_ele *rss_filter_ptr;
+ struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+ uint8_t first_mask = FALSE;
+
+ flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+ ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
+ sizeof(struct ixgbe_flow_mem), 0);
+ if (!ixgbe_flow_mem_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ rte_free(flow);
+ return NULL;
+ }
+ ixgbe_flow_mem_ptr->flow = flow;
+ TAILQ_INSERT_TAIL(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr, entries);
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+
+#ifdef RTE_LIBRTE_SECURITY
+ /* ESP flow not really a flow*/
+ if (ntuple_filter.proto == IPPROTO_ESP)
+ return flow;
+#endif
+
+ if (!ret) {
+ ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+ if (!ret) {
+ ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
+ sizeof(struct ixgbe_ntuple_filter_ele), 0);
+ if (!ntuple_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
+ TAILQ_INSERT_TAIL(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ flow->rule = ntuple_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret) {
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ &ethertype_filter, TRUE);
+ if (!ret) {
+ ethertype_filter_ptr = rte_zmalloc(
+ "ixgbe_ethertype_filter",
+ sizeof(struct ixgbe_ethertype_filter_ele), 0);
+ if (!ethertype_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&ethertype_filter_ptr->filter_info,
+ &ethertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
+ TAILQ_INSERT_TAIL(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ flow->rule = ethertype_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret) {
+ ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
+ if (!ret) {
+ syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
+ sizeof(struct ixgbe_eth_syn_filter_ele), 0);
+ if (!syn_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&syn_filter_ptr->filter_info,
+ &syn_filter,
+ sizeof(struct rte_eth_syn_filter));
+ TAILQ_INSERT_TAIL(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ flow->rule = syn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_SYN;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
+ ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret) {
+ /* A mask cannot be deleted. */
+ if (fdir_rule.b_mask) {
+ if (!fdir_info->mask_added) {
+ /* It's the first time the mask is set. */
+ rte_memcpy(&fdir_info->mask,
+ &fdir_rule.mask,
+ sizeof(struct ixgbe_hw_fdir_mask));
+ fdir_info->flex_bytes_offset =
+ fdir_rule.flex_bytes_offset;
+
+ if (fdir_rule.mask.flex_bytes_mask)
+ ixgbe_fdir_set_flexbytes_offset(dev,
+ fdir_rule.flex_bytes_offset);
+
+ ret = ixgbe_fdir_set_input_mask(dev);
+ if (ret)
+ goto out;
+
+ fdir_info->mask_added = TRUE;
+ first_mask = TRUE;
+ } else {
+ /**
+ * Only support one global mask,
+ * all the masks should be the same.
+ */
+ ret = memcmp(&fdir_info->mask,
+ &fdir_rule.mask,
+ sizeof(struct ixgbe_hw_fdir_mask));
+ if (ret)
+ goto out;
+
+ if (fdir_info->flex_bytes_offset !=
+ fdir_rule.flex_bytes_offset)
+ goto out;
+ }
+ }
+
+ if (fdir_rule.b_spec) {
+ ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
+ FALSE, FALSE);
+ if (!ret) {
+ fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
+ sizeof(struct ixgbe_fdir_rule_ele), 0);
+ if (!fdir_rule_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&fdir_rule_ptr->filter_info,
+ &fdir_rule,
+ sizeof(struct ixgbe_fdir_rule));
+ TAILQ_INSERT_TAIL(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ flow->rule = fdir_rule_ptr;
+ flow->filter_type = RTE_ETH_FILTER_FDIR;
+
+ return flow;
+ }
+
+ if (ret) {
+ /**
+ * clean the mask_added flag if fail to
+ * program
+ **/
+ if (first_mask)
+ fdir_info->mask_added = FALSE;
+ goto out;
+ }
+ }
+
+ goto out;
+ }
+
+ memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+ if (!ret) {
+ ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
+ if (!ret) {
+ l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
+ sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
+ if (!l2_tn_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&l2_tn_filter_ptr->filter_info,
+ &l2_tn_filter,
+ sizeof(struct rte_eth_l2_tunnel_conf));
+ TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ flow->rule = l2_tn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
+ return flow;
+ }
+ }
+
+ memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ ret = ixgbe_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
+ if (!ret) {
+ ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
+ if (!ret) {
+ rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
+ sizeof(struct ixgbe_rss_conf_ele), 0);
+ if (!rss_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
+ TAILQ_INSERT_TAIL(&filter_rss_list,
+ rss_filter_ptr, entries);
+ flow->rule = rss_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_HASH;
+ return flow;
+ }
+ }
+
+out:
+ TAILQ_REMOVE(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr, entries);
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(ixgbe_flow_mem_ptr);
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Check if the flow rule is supported by ixgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+ixgbe_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_fdir_rule fdir_rule;
+ struct ixgbe_rte_flow_rss_conf rss_conf;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
+ ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret)
+ return 0;
+
+ memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ ret = ixgbe_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
+
+ return ret;
+}
+
+/* Destroy a flow rule on ixgbe. */
+static int
+ixgbe_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *pmd_flow = flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct ixgbe_fdir_rule fdir_rule;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_rss_conf_ele *rss_filter_ptr;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&ntuple_filter,
+ &ntuple_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&ethertype_filter,
+ &ethertype_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ &ethertype_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&syn_filter,
+ &syn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
+ rte_memcpy(&fdir_rule,
+ &fdir_rule_ptr->filter_info,
+ sizeof(struct ixgbe_fdir_rule));
+ ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ rte_free(fdir_rule_ptr);
+ if (TAILQ_EMPTY(&filter_fdir_list))
+ fdir_info->mask_added = false;
+ }
+ break;
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_HASH:
+ rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
+ pmd_flow->rule;
+ ret = ixgbe_config_rss_filter(dev,
+ &rss_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr, entries);
+ rte_free(rss_filter_ptr);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to destroy flow");
+ return ret;
+ }
+
+ TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
+ if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
+ TAILQ_REMOVE(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr, entries);
+ rte_free(ixgbe_flow_mem_ptr);
+ }
+ }
+ rte_free(flow);
+
+ return ret;
+}
+
+/* Destroy all flow rules associated with a port on ixgbe. */
+static int
+ixgbe_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ixgbe_clear_all_ntuple_filter(dev);
+ ixgbe_clear_all_ethertype_filter(dev);
+ ixgbe_clear_syn_filter(dev);
+
+ ret = ixgbe_clear_all_fdir_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ret = ixgbe_clear_all_l2_tn_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ixgbe_clear_rss_filter(dev);
+
+ ixgbe_filterlist_flush();
+
+ return 0;
+}
+
+const struct rte_flow_ops ixgbe_flow_ops = {
+ .validate = ixgbe_flow_validate,
+ .create = ixgbe_flow_create,
+ .destroy = ixgbe_flow_destroy,
+ .flush = ixgbe_flow_flush,
+};
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c
new file mode 100644
index 00000000..08405f1e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -0,0 +1,728 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_ip.h>
+#include <rte_jhash.h>
+#include <rte_security_driver.h>
+#include <rte_cryptodev.h>
+#include <rte_flow.h>
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_api.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_ipsec.h"
+
+#define RTE_IXGBE_REGISTER_POLL_WAIT_5_MS 5
+
+#define IXGBE_WAIT_RREAD \
+ IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
+ IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+#define IXGBE_WAIT_RWRITE \
+ IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
+ IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+#define IXGBE_WAIT_TREAD \
+ IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
+ IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+#define IXGBE_WAIT_TWRITE \
+ IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
+ IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+
+#define CMP_IP(a, b) (\
+ (a).ipv6[0] == (b).ipv6[0] && \
+ (a).ipv6[1] == (b).ipv6[1] && \
+ (a).ipv6[2] == (b).ipv6[2] && \
+ (a).ipv6[3] == (b).ipv6[3])
+
+
+static void
+ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC(
+ dev->data->dev_private);
+ int i = 0;
+
+ /* clear Rx IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ uint16_t index = i << 3;
+ uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | index;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
+ IXGBE_WAIT_RWRITE;
+ }
+
+ /* clear Rx SPI and Rx/Tx SA tables*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ uint32_t index = i << 3;
+ uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | index;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
+ IXGBE_WAIT_RWRITE;
+ reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | index;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
+ IXGBE_WAIT_RWRITE;
+ reg_val = IPSRXIDX_WRITE | index;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
+ IXGBE_WAIT_TWRITE;
+ }
+
+ memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl));
+ memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl));
+ memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl));
+}
+
+static int
+ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session)
+{
+ struct rte_eth_dev *dev = ic_session->dev;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC(
+ dev->data->dev_private);
+ uint32_t reg_val;
+ int sa_index = -1;
+
+ if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ int i, ip_index = -1;
+
+ /* Find a match in the IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (CMP_IP(priv->rx_ip_tbl[i].ip,
+ ic_session->dst_ip)) {
+ ip_index = i;
+ break;
+ }
+ }
+ /* If no match, find a free entry in the IP table*/
+ if (ip_index < 0) {
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (priv->rx_ip_tbl[i].ref_count == 0) {
+ ip_index = i;
+ break;
+ }
+ }
+ }
+
+ /* Fail if no match and no free entries*/
+ if (ip_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "No free entry left in the Rx IP table\n");
+ return -1;
+ }
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->rx_sa_tbl[i].used == 0) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no free entries*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "No free entry left in the Rx SA table\n");
+ return -1;
+ }
+
+ priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
+ ic_session->dst_ip.ipv6[0];
+ priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
+ ic_session->dst_ip.ipv6[1];
+ priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
+ ic_session->dst_ip.ipv6[2];
+ priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
+ ic_session->dst_ip.ipv6[3];
+ priv->rx_ip_tbl[ip_index].ref_count++;
+
+ priv->rx_sa_tbl[sa_index].spi =
+ rte_cpu_to_be_32(ic_session->spi);
+ priv->rx_sa_tbl[sa_index].ip_index = ip_index;
+ priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
+ if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION)
+ priv->rx_sa_tbl[sa_index].mode |=
+ (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
+ if (ic_session->dst_ip.type == IPv6)
+ priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
+ priv->rx_sa_tbl[sa_index].used = 1;
+
+ /* write IP table entry*/
+ reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
+ IPSRXIDX_TABLE_IP | (ip_index << 3);
+ if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
+ priv->rx_ip_tbl[ip_index].ip.ipv4);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
+ }
+ IXGBE_WAIT_RWRITE;
+
+ /* write SPI table entry*/
+ reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
+ IPSRXIDX_TABLE_SPI | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
+ priv->rx_sa_tbl[sa_index].spi);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX,
+ priv->rx_sa_tbl[sa_index].ip_index);
+ IXGBE_WAIT_RWRITE;
+
+ /* write Key table entry*/
+ reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
+ IPSRXIDX_TABLE_KEY | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0),
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1),
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2),
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3),
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT,
+ rte_cpu_to_be_32(ic_session->salt));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD,
+ priv->rx_sa_tbl[sa_index].mode);
+ IXGBE_WAIT_RWRITE;
+
+ } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
+ int i;
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->tx_sa_tbl[i].used == 0) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no free entries*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "No free entry left in the Tx SA table\n");
+ return -1;
+ }
+
+ priv->tx_sa_tbl[sa_index].spi =
+ rte_cpu_to_be_32(ic_session->spi);
+ priv->tx_sa_tbl[i].used = 1;
+ ic_session->sa_index = sa_index;
+
+ /* write Key table entry*/
+ reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0),
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1),
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2),
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3),
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT,
+ rte_cpu_to_be_32(ic_session->salt));
+ IXGBE_WAIT_TWRITE;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_crypto_remove_sa(struct rte_eth_dev *dev,
+ struct ixgbe_crypto_session *ic_session)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_ipsec *priv =
+ IXGBE_DEV_PRIVATE_TO_IPSEC(dev->data->dev_private);
+ uint32_t reg_val;
+ int sa_index = -1;
+
+ if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ int i, ip_index = -1;
+
+ /* Find a match in the IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
+ ip_index = i;
+ break;
+ }
+ }
+
+ /* Fail if no match*/
+ if (ip_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "Entry not found in the Rx IP table\n");
+ return -1;
+ }
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->rx_sa_tbl[i].spi ==
+ rte_cpu_to_be_32(ic_session->spi)) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no match*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "Entry not found in the Rx SA table\n");
+ return -1;
+ }
+
+ /* Disable and clear Rx SPI and key table table entryes*/
+ reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
+ IXGBE_WAIT_RWRITE;
+ reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
+ IXGBE_WAIT_RWRITE;
+ priv->rx_sa_tbl[sa_index].used = 0;
+
+ /* If last used then clear the IP table entry*/
+ priv->rx_ip_tbl[ip_index].ref_count--;
+ if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
+ reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP |
+ (ip_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
+ }
+ } else { /* session->dir == RTE_CRYPTO_OUTBOUND */
+ int i;
+
+ /* Find a match in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->tx_sa_tbl[i].spi ==
+ rte_cpu_to_be_32(ic_session->spi)) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no match entries*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "Entry not found in the Tx SA table\n");
+ return -1;
+ }
+ reg_val = IPSRXIDX_WRITE | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
+ IXGBE_WAIT_TWRITE;
+
+ priv->tx_sa_tbl[sa_index].used = 0;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_crypto_create_session(void *device,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *session,
+ struct rte_mempool *mempool)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct ixgbe_crypto_session *ic_session = NULL;
+ struct rte_crypto_aead_xform *aead_xform;
+ struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
+
+ if (rte_mempool_get(mempool, (void **)&ic_session)) {
+ PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
+ return -ENOMEM;
+ }
+
+ if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
+ conf->crypto_xform->aead.algo !=
+ RTE_CRYPTO_AEAD_AES_GCM) {
+ PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
+ return -ENOTSUP;
+ }
+ aead_xform = &conf->crypto_xform->aead;
+
+ if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
+ } else {
+ PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
+ return -ENOTSUP;
+ }
+ } else {
+ if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
+ } else {
+ PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
+ return -ENOTSUP;
+ }
+ }
+
+ ic_session->key = aead_xform->key.data;
+ memcpy(&ic_session->salt,
+ &aead_xform->key.data[aead_xform->key.length], 4);
+ ic_session->spi = conf->ipsec.spi;
+ ic_session->dev = eth_dev;
+
+ set_sec_session_private_data(session, ic_session);
+
+ if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+ if (ixgbe_crypto_add_sa(ic_session)) {
+ PMD_DRV_LOG(ERR, "Failed to add SA\n");
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static unsigned int
+ixgbe_crypto_session_get_size(__rte_unused void *device)
+{
+ return sizeof(struct ixgbe_crypto_session);
+}
+
+static int
+ixgbe_crypto_remove_session(void *device,
+ struct rte_security_session *session)
+{
+ struct rte_eth_dev *eth_dev = device;
+ struct ixgbe_crypto_session *ic_session =
+ (struct ixgbe_crypto_session *)
+ get_sec_session_private_data(session);
+ struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
+
+ if (eth_dev != ic_session->dev) {
+ PMD_DRV_LOG(ERR, "Session not bound to this device\n");
+ return -ENODEV;
+ }
+
+ if (ixgbe_crypto_remove_sa(eth_dev, ic_session)) {
+ PMD_DRV_LOG(ERR, "Failed to remove session\n");
+ return -EFAULT;
+ }
+
+ rte_mempool_put(mempool, (void *)ic_session);
+
+ return 0;
+}
+
+static inline uint8_t
+ixgbe_crypto_compute_pad_len(struct rte_mbuf *m)
+{
+ if (m->nb_segs == 1) {
+ /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size
+ * payload padding size is stored at <pkt_len - 18>
+ */
+ uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *,
+ rte_pktmbuf_pkt_len(m) -
+ (ESP_TRAILER_SIZE + ESP_ICV_SIZE));
+ return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE;
+ }
+ return 0;
+}
+
+static int
+ixgbe_crypto_update_mb(void *device __rte_unused,
+ struct rte_security_session *session,
+ struct rte_mbuf *m, void *params __rte_unused)
+{
+ struct ixgbe_crypto_session *ic_session =
+ get_sec_session_private_data(session);
+ if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+ union ixgbe_crypto_tx_desc_md *mdata =
+ (union ixgbe_crypto_tx_desc_md *)&m->udata64;
+ mdata->enc = 1;
+ mdata->sa_idx = ic_session->sa_index;
+ mdata->pad_len = ixgbe_crypto_compute_pad_len(m);
+ }
+ return 0;
+}
+
+
+static const struct rte_security_capability *
+ixgbe_crypto_capabilities_get(void *device __rte_unused)
+{
+ static const struct rte_cryptodev_capabilities
+ aes_gcm_gmac_crypto_capabilities[] = {
+ { /* AES GMAC (128-bit) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (128-bit) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ {
+ .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
+ }, }
+ },
+ };
+
+ static const struct rte_security_capability
+ ixgbe_security_capabilities[] = {
+ { /* IPsec Inline Crypto ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ { /* IPsec Inline Crypto ESP Transport Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = 0
+ },
+ { /* IPsec Inline Crypto ESP Tunnel Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ { /* IPsec Inline Crypto ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = 0
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+ };
+
+ return ixgbe_security_capabilities;
+}
+
+
+int
+ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+ uint64_t rx_offloads;
+ uint64_t tx_offloads;
+
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+ /* sanity checks */
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
+ return -1;
+ }
+ if (rte_eth_dev_must_keep_crc(rx_offloads)) {
+ PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
+ return -1;
+ }
+
+
+ /* Set IXGBE_SECTXBUFFAF to 0x15 as required in the datasheet*/
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x15);
+
+ /* IFG needs to be set to 3 when we are using security. Otherwise a Tx
+ * hang will occur with heavy traffic.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg = (reg & 0xFFFFFFF0) | 0x3;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+ if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
+ reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ if (reg != 0) {
+ PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+ return -1;
+ }
+ }
+ if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
+ IXGBE_SECTXCTRL_STORE_FORWARD);
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ if (reg != IXGBE_SECTXCTRL_STORE_FORWARD) {
+ PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+ return -1;
+ }
+ }
+
+ ixgbe_crypto_clear_ipsec_tables(dev);
+
+ return 0;
+}
+
+int
+ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+ const void *ip_spec,
+ uint8_t is_ipv6)
+{
+ struct ixgbe_crypto_session *ic_session
+ = get_sec_session_private_data(sess);
+
+ if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ if (is_ipv6) {
+ const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
+ ic_session->src_ip.type = IPv6;
+ ic_session->dst_ip.type = IPv6;
+ rte_memcpy(ic_session->src_ip.ipv6,
+ ipv6->hdr.src_addr, 16);
+ rte_memcpy(ic_session->dst_ip.ipv6,
+ ipv6->hdr.dst_addr, 16);
+ } else {
+ const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
+ ic_session->src_ip.type = IPv4;
+ ic_session->dst_ip.type = IPv4;
+ ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
+ ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
+ }
+ return ixgbe_crypto_add_sa(ic_session);
+ }
+
+ return 0;
+}
+
+static struct rte_security_ops ixgbe_security_ops = {
+ .session_create = ixgbe_crypto_create_session,
+ .session_update = NULL,
+ .session_get_size = ixgbe_crypto_session_get_size,
+ .session_stats_get = NULL,
+ .session_destroy = ixgbe_crypto_remove_session,
+ .set_pkt_metadata = ixgbe_crypto_update_mb,
+ .capabilities_get = ixgbe_crypto_capabilities_get
+};
+
+static int
+ixgbe_crypto_capable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg_i, reg, capable = 1;
+ /* test if rx crypto can be enabled and then write back initial value*/
+ reg_i = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
+ reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ if (reg != 0)
+ capable = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg_i);
+ return capable;
+}
+
+int
+ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
+{
+ struct rte_security_ctx *ctx = NULL;
+
+ if (ixgbe_crypto_capable(dev)) {
+ ctx = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (ctx) {
+ ctx->device = (void *)dev;
+ ctx->ops = &ixgbe_security_ops;
+ ctx->sess_cnt = 0;
+ dev->security_ctx = ctx;
+ } else {
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.h
new file mode 100644
index 00000000..c73e1806
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#ifndef IXGBE_IPSEC_H_
+#define IXGBE_IPSEC_H_
+
+#include <rte_security.h>
+
+#define IPSRXIDX_RX_EN 0x00000001
+#define IPSRXIDX_TABLE_IP 0x00000002
+#define IPSRXIDX_TABLE_SPI 0x00000004
+#define IPSRXIDX_TABLE_KEY 0x00000006
+#define IPSRXIDX_WRITE 0x80000000
+#define IPSRXIDX_READ 0x40000000
+#define IPSRXMOD_VALID 0x00000001
+#define IPSRXMOD_PROTO 0x00000004
+#define IPSRXMOD_DECRYPT 0x00000008
+#define IPSRXMOD_IPV6 0x00000010
+#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400
+#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000
+#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
+
+#define IPSEC_MAX_RX_IP_COUNT 128
+#define IPSEC_MAX_SA_COUNT 1024
+
+#define ESP_ICV_SIZE 16
+#define ESP_TRAILER_SIZE 2
+
+enum ixgbe_operation {
+ IXGBE_OP_AUTHENTICATED_ENCRYPTION,
+ IXGBE_OP_AUTHENTICATED_DECRYPTION
+};
+
+enum ixgbe_gcm_key {
+ IXGBE_GCM_KEY_128,
+ IXGBE_GCM_KEY_256
+};
+
+/**
+ * Generic IP address structure
+ * TODO: Find better location for this rte_net.h possibly.
+ **/
+struct ipaddr {
+ enum ipaddr_type {
+ IPv4,
+ IPv6
+ } type;
+ /**< IP Address Type - IPv4/IPv6 */
+
+ union {
+ uint32_t ipv4;
+ uint32_t ipv6[4];
+ };
+};
+
+/** inline crypto crypto private session structure */
+struct ixgbe_crypto_session {
+ enum ixgbe_operation op;
+ uint8_t *key;
+ uint32_t salt;
+ uint32_t sa_index;
+ uint32_t spi;
+ struct ipaddr src_ip;
+ struct ipaddr dst_ip;
+ struct rte_eth_dev *dev;
+} __rte_cache_aligned;
+
+struct ixgbe_crypto_rx_ip_table {
+ struct ipaddr ip;
+ uint16_t ref_count;
+};
+struct ixgbe_crypto_rx_sa_table {
+ uint32_t spi;
+ uint32_t ip_index;
+ uint8_t mode;
+ uint8_t used;
+};
+
+struct ixgbe_crypto_tx_sa_table {
+ uint32_t spi;
+ uint8_t used;
+};
+
+union ixgbe_crypto_tx_desc_md {
+ uint64_t data;
+ struct {
+ /**< SA table index */
+ uint32_t sa_idx;
+ /**< ICV and ESP trailer length */
+ uint8_t pad_len;
+ /**< enable encryption */
+ uint8_t enc;
+ };
+};
+
+struct ixgbe_ipsec {
+ struct ixgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT];
+ struct ixgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT];
+ struct ixgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT];
+};
+
+
+int ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
+int ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
+int ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+ const void *ip_spec,
+ uint8_t is_ipv6);
+
+
+
+#endif /*IXGBE_IPSEC_H_*/
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_logs.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_logs.h
new file mode 100644
index 00000000..dc73e9bd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_logs.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _IXGBE_LOGS_H_
+#define _IXGBE_LOGS_H_
+
+extern int ixgbe_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ixgbe_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+extern int ixgbe_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ixgbe_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _IXGBE_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_pf.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_pf.c
new file mode 100644
index 00000000..4b833ffa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_pf.c
@@ -0,0 +1,844 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_memcpy.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+
+#include "base/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+#include "rte_pmd_ixgbe.h"
+
+#define IXGBE_MAX_VFTA (128)
+#define IXGBE_VF_MSG_SIZE_DEFAULT 1
+#define IXGBE_VF_GET_QUEUE_MSG_SIZE 5
+#define IXGBE_ETHERTYPE_FLOW_CTRL 0x8808
+
+static inline uint16_t
+dev_num_vf(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ return pci_dev->max_vfs;
+}
+
+static inline
+int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
+{
+ unsigned char vf_mac_addr[ETHER_ADDR_LEN];
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ uint16_t vfn;
+
+ for (vfn = 0; vfn < vf_num; vfn++) {
+ eth_random_addr(vf_mac_addr);
+ /* keep the random address as default */
+ memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
+ ETHER_ADDR_LEN);
+ }
+
+ return 0;
+}
+
+static inline int
+ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= IXGBE_EICR_MAILBOX;
+
+ return 0;
+}
+
+void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_vf_info **vfinfo =
+ IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
+ struct ixgbe_mirror_info *mirror_info =
+ IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
+ struct ixgbe_uta_info *uta_info =
+ IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ uint16_t vf_num;
+ uint8_t nb_queue;
+
+ PMD_INIT_FUNC_TRACE();
+
+ RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
+ return;
+
+ *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
+ if (*vfinfo == NULL)
+ rte_panic("Cannot allocate memory for private VF data\n");
+
+ rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
+
+ memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
+ memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
+ hw->mac.mc_filter_type = 0;
+
+ if (vf_num >= ETH_32_POOLS) {
+ nb_queue = 2;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
+ } else if (vf_num >= ETH_16_POOLS) {
+ nb_queue = 4;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+ } else {
+ nb_queue = 8;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+ }
+
+ RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
+
+ ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
+
+ /* init_mailbox_params */
+ hw->mbx.ops.init_params(hw);
+
+ /* set mb interrupt mask */
+ ixgbe_mb_intr_setup(eth_dev);
+}
+
+void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_vf_info **vfinfo;
+ uint16_t vf_num;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+ RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
+
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
+ return;
+
+ vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
+ if (*vfinfo == NULL)
+ return;
+
+ ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
+ rte_free(*vfinfo);
+ *vfinfo = NULL;
+}
+
+static void
+ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ uint16_t vf_num;
+ int i;
+ struct ixgbe_ethertype_filter ethertype_filter;
+
+ if (!hw->mac.ops.set_ethertype_anti_spoofing) {
+ RTE_LOG(INFO, PMD, "ether type anti-spoofing is not"
+ " supported.\n");
+ return;
+ }
+
+ i = ixgbe_ethertype_filter_lookup(filter_info,
+ IXGBE_ETHERTYPE_FLOW_CTRL);
+ if (i >= 0) {
+ RTE_LOG(ERR, PMD, "A ether type filter"
+ " entity for flow control already exists!\n");
+ return;
+ }
+
+ ethertype_filter.ethertype = IXGBE_ETHERTYPE_FLOW_CTRL;
+ ethertype_filter.etqf = IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ IXGBE_ETHERTYPE_FLOW_CTRL;
+ ethertype_filter.etqs = 0;
+ ethertype_filter.conf = TRUE;
+ i = ixgbe_ethertype_filter_insert(filter_info,
+ &ethertype_filter);
+ if (i < 0) {
+ RTE_LOG(ERR, PMD, "Cannot find an unused ether type filter"
+ " entity for flow control.\n");
+ return;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ IXGBE_ETHERTYPE_FLOW_CTRL));
+
+ vf_num = dev_num_vf(eth_dev);
+ for (i = 0; i < vf_num; i++)
+ hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
+}
+
+int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
+{
+ uint32_t vtctl, fcrth;
+ uint32_t vfre_slot, vfre_offset;
+ uint16_t vf_num;
+ const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
+ const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ uint32_t gpie, gcr_ext;
+ uint32_t vlanctrl;
+ int i;
+
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
+ return -1;
+
+ /* enable VMDq and set the default pool for PF */
+ vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vtctl |= IXGBE_VMD_CTL_VMDQ_EN;
+ vtctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
+ << IXGBE_VT_CTL_POOL_SHIFT;
+ vtctl |= IXGBE_VT_CTL_REPLEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
+
+ vfre_offset = vf_num & VFRE_MASK;
+ vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
+
+ /* Enable pools reserved to PF only */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0U) << vfre_offset);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0U) << vfre_offset);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1);
+
+ /* PFDMA Tx General Switch Control Enables VMDQ loopback */
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+
+ /* clear VMDq map to perment rar 0 */
+ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+
+ /* clear VMDq map to scan rar 127 */
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0);
+
+ /* set VMDq map to default PF pool */
+ hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
+
+ /*
+ * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
+ */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
+
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
+
+ switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
+ case ETH_64_POOLS:
+ gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ break;
+ case ETH_32_POOLS:
+ gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
+ gpie |= IXGBE_GPIE_VTMODE_32;
+ break;
+ case ETH_16_POOLS:
+ gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
+ gpie |= IXGBE_GPIE_VTMODE_16;
+ break;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /*
+ * enable vlan filtering and allow all vlan tags through
+ */
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < IXGBE_MAX_VFTA; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
+
+ /* Enable MAC Anti-Spoofing */
+ hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
+
+ /* set flow control threshold to max to avoid tx switch hang */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
+ }
+
+ ixgbe_add_tx_flow_control_drop_filter(eth_dev);
+
+ return 0;
+}
+
+static void
+set_rx_mode(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *dev_data = dev->data;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ uint16_t vfn = dev_num_vf(dev);
+
+ /* Check for Promiscuous and All Multicast modes */
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+
+ /* set all bits that we expect to always be set */
+ fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
+ fctrl |= IXGBE_FCTRL_BAM;
+
+ /* clear the bits we are changing the status of */
+ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+
+ if (dev_data->promiscuous) {
+ fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+ } else {
+ if (dev_data->all_multicast) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else {
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ }
+ }
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) &
+ ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_ROPE);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+ ixgbe_vlan_hw_strip_config(dev);
+}
+
+static inline void
+ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+
+ vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+
+ /* reset multicast table array for vf */
+ vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* reset rx mode */
+ set_rx_mode(dev);
+
+ hw->mac.ops.clear_rar(hw, rar_entry);
+}
+
+static inline void
+ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+ uint32_t reg_offset, vf_shift;
+ const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
+ const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+ uint8_t nb_q_per_pool;
+ int i;
+
+ vf_shift = vf & VFRE_MASK;
+ reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
+
+ /* enable transmit for vf */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+
+ /* enable all queue drop for IOV */
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
+ IXGBE_WRITE_FLUSH(hw);
+ reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+
+ /* enable receive for vf */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+
+ /* Enable counting of spoofed packets in the SSVPC register */
+ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
+ reg |= (1 << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
+
+ ixgbe_vf_reset_event(dev, vf);
+}
+
+static int
+ixgbe_enable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vmolr;
+
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+
+ RTE_LOG(INFO, PMD, "VF %u: enabling multicast promiscuous\n", vf);
+
+ vmolr |= IXGBE_VMOLR_MPE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+ return 0;
+}
+
+static int
+ixgbe_disable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vmolr;
+
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+
+ RTE_LOG(INFO, PMD, "VF %u: disabling multicast promiscuous\n", vf);
+
+ vmolr &= ~IXGBE_VMOLR_MPE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+ return 0;
+}
+
+static int
+ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+
+ ixgbe_vf_reset_msg(dev, vf);
+
+ hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV);
+
+ /* Disable multicast promiscuous at reset */
+ ixgbe_disable_vf_mc_promisc(dev, vf);
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+ rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return 0;
+}
+
+static int
+ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+
+ if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
+ return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
+ }
+ return -1;
+}
+
+static int
+ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+ IXGBE_VT_MSGINFO_SHIFT;
+ uint16_t *hash_list = (uint16_t *)&msgbuf[1];
+ uint32_t mta_idx;
+ uint32_t mta_shift;
+ const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F;
+ const uint32_t IXGBE_MTA_BIT_SHIFT = 5;
+ const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1;
+ uint32_t reg_val;
+ int i;
+
+ /* Disable multicast promiscuous first */
+ ixgbe_disable_vf_mc_promisc(dev, vf);
+
+ /* only so many hash values supported */
+ nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES);
+
+ /* store the mc entries */
+ vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
+ for (i = 0; i < nb_entries; i++) {
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+ }
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT)
+ & IXGBE_MTA_INDEX_MASK;
+ mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK;
+ reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx));
+ reg_val |= (1 << mta_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val);
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ int add, vid;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+
+ add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+
+ if (add)
+ vfinfo[vf].vlan_count++;
+ else if (vfinfo[vf].vlan_count)
+ vfinfo[vf].vlan_count--;
+ return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add, false);
+}
+
+static int
+ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t new_mtu = msgbuf[1];
+ uint32_t max_frs;
+ int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ /* X540 and X550 support jumbo frames in IOV mode */
+ if (hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a)
+ return -1;
+
+ if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ return -1;
+
+ max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
+ IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
+ if (max_frs < new_mtu) {
+ max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ uint32_t api_version = msgbuf[1];
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+
+ switch (api_version) {
+ case ixgbe_mbox_api_10:
+ case ixgbe_mbox_api_11:
+ case ixgbe_mbox_api_12:
+ vfinfo[vf].api_version = (uint8_t)api_version;
+ return 0;
+ default:
+ break;
+ }
+
+ RTE_LOG(ERR, PMD, "Negotiate invalid api version %u from VF %d\n",
+ api_version, vf);
+
+ return -1;
+}
+
+static int
+ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ struct rte_eth_conf *eth_conf;
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf;
+ u8 num_tcs;
+ struct ixgbe_hw *hw;
+ u32 vmvir;
+#define IXGBE_VMVIR_VLANA_MASK 0xC0000000
+#define IXGBE_VMVIR_VLAN_VID_MASK 0x00000FFF
+#define IXGBE_VMVIR_VLAN_UP_MASK 0x0000E000
+#define VLAN_PRIO_SHIFT 13
+ u32 vlana;
+ u32 vid;
+ u32 user_priority;
+
+ /* Verify if the PF supports the mbox APIs version or not */
+ switch (vfinfo[vf].api_version) {
+ case ixgbe_mbox_api_20:
+ case ixgbe_mbox_api_11:
+ case ixgbe_mbox_api_12:
+ break;
+ default:
+ return -1;
+ }
+
+ /* Notify VF of Rx and Tx queue number */
+ msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+
+ /* Notify VF of default queue */
+ msgbuf[IXGBE_VF_DEF_QUEUE] = default_q;
+
+ /* Notify VF of number of DCB traffic classes */
+ eth_conf = &dev->data->dev_conf;
+ switch (eth_conf->txmode.mq_mode) {
+ case ETH_MQ_TX_NONE:
+ case ETH_MQ_TX_DCB:
+ RTE_LOG(ERR, PMD, "PF must work with virtualization for VF %u"
+ ", but its tx mode = %d\n", vf,
+ eth_conf->txmode.mq_mode);
+ return -1;
+
+ case ETH_MQ_TX_VMDQ_DCB:
+ vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
+ switch (vmdq_dcb_tx_conf->nb_queue_pools) {
+ case ETH_16_POOLS:
+ num_tcs = ETH_8_TCS;
+ break;
+ case ETH_32_POOLS:
+ num_tcs = ETH_4_TCS;
+ break;
+ default:
+ return -1;
+ }
+ break;
+
+ /* ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */
+ case ETH_MQ_TX_VMDQ_ONLY:
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
+ vlana = vmvir & IXGBE_VMVIR_VLANA_MASK;
+ vid = vmvir & IXGBE_VMVIR_VLAN_VID_MASK;
+ user_priority =
+ (vmvir & IXGBE_VMVIR_VLAN_UP_MASK) >> VLAN_PRIO_SHIFT;
+ if ((vlana == IXGBE_VMVIR_VLANA_DEFAULT) &&
+ ((vid != 0) || (user_priority != 0)))
+ num_tcs = 1;
+ else
+ num_tcs = 0;
+ break;
+
+ default:
+ RTE_LOG(ERR, PMD, "PF work with invalid mode = %d\n",
+ eth_conf->txmode.mq_mode);
+ return -1;
+ }
+ msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
+
+ return 0;
+}
+
+static int
+ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ bool enable = !!msgbuf[1]; /* msgbuf contains the flag to enable */
+
+ switch (vfinfo[vf].api_version) {
+ case ixgbe_mbox_api_12:
+ break;
+ default:
+ return -1;
+ }
+
+ if (enable)
+ return ixgbe_enable_vf_mc_promisc(dev, vf);
+ else
+ return ixgbe_disable_vf_mc_promisc(dev, vf);
+}
+
+static int
+ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
+{
+ uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE;
+ uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT;
+ uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
+ int32_t retval;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ struct rte_pmd_ixgbe_mb_event_param ret_param;
+
+ retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+ if (retval) {
+ PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
+ return retval;
+ }
+
+ /* do nothing with the message already been processed */
+ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
+ return retval;
+
+ /* flush the ack before we write any messages back */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /**
+ * initialise structure to send to user application
+ * will return response from user in retval field
+ */
+ ret_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED;
+ ret_param.vfid = vf;
+ ret_param.msg_type = msgbuf[0] & 0xFFFF;
+ ret_param.msg = (void *)msgbuf;
+
+ /* perform VF reset */
+ if (msgbuf[0] == IXGBE_VF_RESET) {
+ int ret = ixgbe_vf_reset(dev, vf, msgbuf);
+
+ vfinfo[vf].clear_to_send = true;
+
+ /* notify application about VF reset */
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+ &ret_param);
+ return ret;
+ }
+
+ /**
+ * ask user application if we allowed to perform those functions
+ * if we get ret_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
+ * then business as usual,
+ * if 0, do nothing and send ACK to VF
+ * if ret_param.retval > 1, do nothing and send NAK to VF
+ */
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+ &ret_param);
+
+ retval = ret_param.retval;
+
+ /* check & process VF to PF mailbox message */
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case IXGBE_VF_SET_MAC_ADDR:
+ if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
+ retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf);
+ break;
+ case IXGBE_VF_SET_MULTICAST:
+ if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
+ retval = ixgbe_vf_set_multicast(dev, vf, msgbuf);
+ break;
+ case IXGBE_VF_SET_LPE:
+ if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
+ retval = ixgbe_set_vf_lpe(dev, vf, msgbuf);
+ break;
+ case IXGBE_VF_SET_VLAN:
+ if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
+ retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
+ break;
+ case IXGBE_VF_API_NEGOTIATE:
+ retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
+ break;
+ case IXGBE_VF_GET_QUEUES:
+ retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
+ msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE;
+ break;
+ case IXGBE_VF_UPDATE_XCAST_MODE:
+ if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
+ retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf);
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
+ retval = IXGBE_ERR_MBX;
+ break;
+ }
+
+ /* response the VF according to the message process result */
+ if (retval)
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, msgbuf, msg_size, vf);
+
+ return retval;
+}
+
+static inline void
+ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
+{
+ uint32_t msg = IXGBE_VT_MSGTYPE_NACK;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+
+ if (!vfinfo[vf].clear_to_send)
+ ixgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
+{
+ uint16_t vf;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
+ /* check & process vf function level reset */
+ if (!ixgbe_check_for_rst(hw, vf))
+ ixgbe_vf_reset_event(eth_dev, vf);
+
+ /* check & process vf mailbox messages */
+ if (!ixgbe_check_for_msg(hw, vf))
+ ixgbe_rcv_msg_from_vf(eth_dev, vf);
+
+ /* check & process acks from vf */
+ if (!ixgbe_check_for_ack(hw, vf))
+ ixgbe_rcv_ack_from_vf(eth_dev, vf);
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_regs.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_regs.h
new file mode 100644
index 00000000..9c953370
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_regs.h
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Intel Corporation
+ */
+#ifndef _IXGBE_REGS_H_
+#define _IXGBE_REGS_H_
+
+#include "ixgbe_ethdev.h"
+
+struct ixgbe_hw;
+struct reg_info {
+ uint32_t base_addr;
+ uint32_t count;
+ uint32_t stride;
+ const char *name;
+};
+
+static const struct reg_info ixgbe_regs_general[] = {
+ {IXGBE_CTRL, 1, 1, "IXGBE_CTRL"},
+ {IXGBE_STATUS, 1, 1, "IXGBE_STATUS"},
+ {IXGBE_CTRL_EXT, 1, 1, "IXGBE_CTRL_EXT"},
+ {IXGBE_ESDP, 1, 1, "IXGBE_ESDP"},
+ {IXGBE_EODSDP, 1, 1, "IXGBE_EODSDP"},
+ {IXGBE_LEDCTL, 1, 1, "IXGBE_LEDCTL"},
+ {IXGBE_FRTIMER, 1, 1, "IXGBE_FRTIMER"},
+ {IXGBE_TCPTIMER, 1, 1, "IXGBE_TCPTIMER"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbevf_regs_general[] = {
+ {IXGBE_VFCTRL, 1, 1, "IXGBE_VFCTRL"},
+ {IXGBE_VFSTATUS, 1, 1, "IXGBE_VFSTATUS"},
+ {IXGBE_VFLINKS, 1, 1, "IXGBE_VFLINKS"},
+ {IXGBE_VFFRTIMER, 1, 1, "IXGBE_VFFRTIMER"},
+ {IXGBE_VFMAILBOX, 1, 1, "IXGBE_VFMAILBOX"},
+ {IXGBE_VFMBMEM, 16, 4, "IXGBE_VFMBMEM"},
+ {IXGBE_VFRXMEMWRAP, 1, 1, "IXGBE_VFRXMEMWRAP"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_nvm[] = {
+ {IXGBE_EEC, 1, 1, "IXGBE_EEC"},
+ {IXGBE_EERD, 1, 1, "IXGBE_EERD"},
+ {IXGBE_FLA, 1, 1, "IXGBE_FLA"},
+ {IXGBE_EEMNGCTL, 1, 1, "IXGBE_EEMNGCTL"},
+ {IXGBE_EEMNGDATA, 1, 1, "IXGBE_EEMNGDATA"},
+ {IXGBE_FLMNGCTL, 1, 1, "IXGBE_FLMNGCTL"},
+ {IXGBE_FLMNGDATA, 1, 1, "IXGBE_FLMNGDATA"},
+ {IXGBE_FLMNGCNT, 1, 1, "IXGBE_FLMNGCNT"},
+ {IXGBE_FLOP, 1, 1, "IXGBE_FLOP"},
+ {IXGBE_GRC, 1, 1, "IXGBE_GRC"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_interrupt[] = {
+ {IXGBE_EICS, 1, 1, "IXGBE_EICS"},
+ {IXGBE_EIMS, 1, 1, "IXGBE_EIMS"},
+ {IXGBE_EIMC, 1, 1, "IXGBE_EIMC"},
+ {IXGBE_EIAC, 1, 1, "IXGBE_EIAC"},
+ {IXGBE_EIAM, 1, 1, "IXGBE_EIAM"},
+ {IXGBE_EITR(0), 24, 4, "IXGBE_EITR"},
+ {IXGBE_IVAR(0), 24, 4, "IXGBE_IVAR"},
+ {IXGBE_MSIXT, 1, 1, "IXGBE_MSIXT"},
+ {IXGBE_MSIXPBA, 1, 1, "IXGBE_MSIXPBA"},
+ {IXGBE_PBACL(0), 1, 4, "IXGBE_PBACL"},
+ {IXGBE_GPIE, 1, 1, ""},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbevf_regs_interrupt[] = {
+ {IXGBE_VTEICR, 1, 1, "IXGBE_VTEICR"},
+ {IXGBE_VTEICS, 1, 1, "IXGBE_VTEICS"},
+ {IXGBE_VTEIMS, 1, 1, "IXGBE_VTEIMS"},
+ {IXGBE_VTEIMC, 1, 1, "IXGBE_VTEIMC"},
+ {IXGBE_VTEIAM, 1, 1, "IXGBE_VTEIAM"},
+ {IXGBE_VTEITR(0), 2, 4, "IXGBE_VTEITR"},
+ {IXGBE_VTIVAR(0), 4, 4, "IXGBE_VTIVAR"},
+ {IXGBE_VTIVAR_MISC, 1, 1, "IXGBE_VTIVAR_MISC"},
+ {IXGBE_VTRSCINT(0), 2, 4, "IXGBE_VTRSCINT"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_fctl_mac_82598EB[] = {
+ {IXGBE_PFCTOP, 1, 1, ""},
+ {IXGBE_FCTTV(0), 4, 4, ""},
+ {IXGBE_FCRTV, 1, 1, ""},
+ {IXGBE_TFCS, 1, 1, ""},
+ {IXGBE_FCRTL(0), 8, 8, "IXGBE_FCRTL"},
+ {IXGBE_FCRTH(0), 8, 8, "IXGBE_FCRTH"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_fctl_others[] = {
+ {IXGBE_PFCTOP, 1, 1, ""},
+ {IXGBE_FCTTV(0), 4, 4, ""},
+ {IXGBE_FCRTV, 1, 1, ""},
+ {IXGBE_TFCS, 1, 1, ""},
+ {IXGBE_FCRTL_82599(0), 8, 4, "IXGBE_FCRTL"},
+ {IXGBE_FCRTH_82599(0), 8, 4, "IXGBE_FCRTH"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_rxdma[] = {
+ {IXGBE_RDBAL(0), 64, 0x40, "IXGBE_RDBAL"},
+ {IXGBE_RDBAH(0), 64, 0x40, "IXGBE_RDBAH"},
+ {IXGBE_RDLEN(0), 64, 0x40, "IXGBE_RDLEN"},
+ {IXGBE_RDH(0), 64, 0x40, "IXGBE_RDH"},
+ {IXGBE_RDT(0), 64, 0x40, "IXGBE_RDT"},
+ {IXGBE_RXDCTL(0), 64, 0x40, "IXGBE_RXDCTL"},
+ {IXGBE_SRRCTL(0), 16, 0x4, "IXGBE_SRRCTL"},
+ {IXGBE_DCA_RXCTRL(0), 16, 4, "IXGBE_DCA_RXCTRL"},
+ {IXGBE_RDRXCTL, 1, 1, "IXGBE_RDRXCTL"},
+ {IXGBE_RXPBSIZE(0), 8, 4, "IXGBE_RXPBSIZE"},
+ {IXGBE_RXCTRL, 1, 1, "IXGBE_RXCTRL"},
+ {IXGBE_DROPEN, 1, 1, "IXGBE_DROPEN"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbevf_regs_rxdma[] = {
+ {IXGBE_VFRDBAL(0), 8, 0x40, "IXGBE_VFRDBAL"},
+ {IXGBE_VFRDBAH(0), 8, 0x40, "IXGBE_VFRDBAH"},
+ {IXGBE_VFRDLEN(0), 8, 0x40, "IXGBE_VFRDLEN"},
+ {IXGBE_VFRDH(0), 8, 0x40, "IXGBE_VFRDH"},
+ {IXGBE_VFRDT(0), 8, 0x40, "IXGBE_VFRDT"},
+ {IXGBE_VFRXDCTL(0), 8, 0x40, "IXGBE_VFRXDCTL"},
+ {IXGBE_VFSRRCTL(0), 8, 0x40, "IXGBE_VFSRRCTL"},
+ {IXGBE_VFPSRTYPE, 1, 1, "IXGBE_VFPSRTYPE"},
+ {IXGBE_VFRSCCTL(0), 8, 0x40, "IXGBE_VFRSCCTL"},
+ {IXGBE_VFDCA_RXCTRL(0), 8, 0x40, "IXGBE_VFDCA_RXCTRL"},
+ {IXGBE_VFDCA_TXCTRL(0), 8, 0x40, "IXGBE_VFDCA_TXCTRL"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_rx[] = {
+ {IXGBE_RXCSUM, 1, 1, "IXGBE_RXCSUM"},
+ {IXGBE_RFCTL, 1, 1, "IXGBE_RFCTL"},
+ {IXGBE_RAL(0), 16, 8, "IXGBE_RAL"},
+ {IXGBE_RAH(0), 16, 8, "IXGBE_RAH"},
+ {IXGBE_PSRTYPE(0), 1, 4, "IXGBE_PSRTYPE"},
+ {IXGBE_FCTRL, 1, 1, "IXGBE_FCTRL"},
+ {IXGBE_VLNCTRL, 1, 1, "IXGBE_VLNCTRL"},
+ {IXGBE_MCSTCTRL, 1, 1, "IXGBE_MCSTCTRL"},
+ {IXGBE_MRQC, 1, 1, "IXGBE_MRQC"},
+ {IXGBE_VMD_CTL, 1, 1, "IXGBE_VMD_CTL"},
+ {IXGBE_IMIR(0), 8, 4, "IXGBE_IMIR"},
+ {IXGBE_IMIREXT(0), 8, 4, "IXGBE_IMIREXT"},
+ {IXGBE_IMIRVP, 1, 1, "IXGBE_IMIRVP"},
+ {0, 0, 0, ""}
+};
+
+static struct reg_info ixgbe_regs_tx[] = {
+ {IXGBE_TDBAL(0), 32, 0x40, "IXGBE_TDBAL"},
+ {IXGBE_TDBAH(0), 32, 0x40, "IXGBE_TDBAH"},
+ {IXGBE_TDLEN(0), 32, 0x40, "IXGBE_TDLEN"},
+ {IXGBE_TDH(0), 32, 0x40, "IXGBE_TDH"},
+ {IXGBE_TDT(0), 32, 0x40, "IXGBE_TDT"},
+ {IXGBE_TXDCTL(0), 32, 0x40, "IXGBE_TXDCTL"},
+ {IXGBE_TDWBAL(0), 32, 0x40, "IXGBE_TDWBAL"},
+ {IXGBE_TDWBAH(0), 32, 0x40, "IXGBE_TDWBAH"},
+ {IXGBE_DTXCTL, 1, 1, "IXGBE_DTXCTL"},
+ {IXGBE_DCA_TXCTRL(0), 16, 4, "IXGBE_DCA_TXCTRL"},
+ {IXGBE_TXPBSIZE(0), 8, 4, "IXGBE_TXPBSIZE"},
+ {IXGBE_MNGTXMAP, 1, 1, "IXGBE_MNGTXMAP"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbevf_regs_tx[] = {
+ {IXGBE_VFTDBAL(0), 4, 0x40, "IXGBE_VFTDBAL"},
+ {IXGBE_VFTDBAH(0), 4, 0x40, "IXGBE_VFTDBAH"},
+ {IXGBE_VFTDLEN(0), 4, 0x40, "IXGBE_VFTDLEN"},
+ {IXGBE_VFTDH(0), 4, 0x40, "IXGBE_VFTDH"},
+ {IXGBE_VFTDT(0), 4, 0x40, "IXGBE_VFTDT"},
+ {IXGBE_VFTXDCTL(0), 4, 0x40, "IXGBE_VFTXDCTL"},
+ {IXGBE_VFTDWBAL(0), 4, 0x40, "IXGBE_VFTDWBAL"},
+ {IXGBE_VFTDWBAH(0), 4, 0x40, "IXGBE_VFTDWBAH"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_wakeup[] = {
+ {IXGBE_WUC, 1, 1, "IXGBE_WUC"},
+ {IXGBE_WUFC, 1, 1, "IXGBE_WUFC"},
+ {IXGBE_WUS, 1, 1, "IXGBE_WUS"},
+ {IXGBE_IPAV, 1, 1, "IXGBE_IPAV"},
+ {IXGBE_IP4AT, 1, 1, "IXGBE_IP4AT"},
+ {IXGBE_IP6AT, 1, 1, "IXGBE_IP6AT"},
+ {IXGBE_WUPL, 1, 1, "IXGBE_WUPL"},
+ {IXGBE_WUPM, 1, 1, "IXGBE_WUPM"},
+ {IXGBE_FHFT(0), 1, 1, "IXGBE_FHFT"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_dcb[] = {
+ {IXGBE_RMCS, 1, 1, "IXGBE_RMCS"},
+ {IXGBE_DPMCS, 1, 1, "IXGBE_DPMCS"},
+ {IXGBE_PDPMCS, 1, 1, "IXGBE_PDPMCS"},
+ {IXGBE_RUPPBMR, 1, 1, "IXGBE_RUPPBMR"},
+ {IXGBE_RT2CR(0), 8, 4, "IXGBE_RT2CR"},
+ {IXGBE_RT2SR(0), 8, 4, "IXGBE_RT2SR"},
+ {IXGBE_TDTQ2TCCR(0), 8, 0x40, "IXGBE_TDTQ2TCCR"},
+ {IXGBE_TDTQ2TCSR(0), 8, 0x40, "IXGBE_TDTQ2TCSR"},
+ {IXGBE_TDPT2TCCR(0), 8, 4, "IXGBE_TDPT2TCCR"},
+ {IXGBE_TDPT2TCSR(0), 8, 4, "IXGBE_TDPT2TCSR"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_mac[] = {
+ {IXGBE_PCS1GCFIG, 1, 1, "IXGBE_PCS1GCFIG"},
+ {IXGBE_PCS1GLCTL, 1, 1, "IXGBE_PCS1GLCTL"},
+ {IXGBE_PCS1GLSTA, 1, 1, "IXGBE_PCS1GLSTA"},
+ {IXGBE_PCS1GDBG0, 1, 1, "IXGBE_PCS1GDBG0"},
+ {IXGBE_PCS1GDBG1, 1, 1, "IXGBE_PCS1GDBG1"},
+ {IXGBE_PCS1GANA, 1, 1, "IXGBE_PCS1GANA"},
+ {IXGBE_PCS1GANLP, 1, 1, "IXGBE_PCS1GANLP"},
+ {IXGBE_PCS1GANNP, 1, 1, "IXGBE_PCS1GANNP"},
+ {IXGBE_PCS1GANLPNP, 1, 1, "IXGBE_PCS1GANLPNP"},
+ {IXGBE_HLREG0, 1, 1, "IXGBE_HLREG0"},
+ {IXGBE_HLREG1, 1, 1, "IXGBE_HLREG1"},
+ {IXGBE_PAP, 1, 1, "IXGBE_PAP"},
+ {IXGBE_MACA, 1, 1, "IXGBE_MACA"},
+ {IXGBE_APAE, 1, 1, "IXGBE_APAE"},
+ {IXGBE_ARD, 1, 1, "IXGBE_ARD"},
+ {IXGBE_AIS, 1, 1, "IXGBE_AIS"},
+ {IXGBE_MSCA, 1, 1, "IXGBE_MSCA"},
+ {IXGBE_MSRWD, 1, 1, "IXGBE_MSRWD"},
+ {IXGBE_MLADD, 1, 1, "IXGBE_MLADD"},
+ {IXGBE_MHADD, 1, 1, "IXGBE_MHADD"},
+ {IXGBE_TREG, 1, 1, "IXGBE_TREG"},
+ {IXGBE_PCSS1, 1, 1, "IXGBE_PCSS1"},
+ {IXGBE_PCSS2, 1, 1, "IXGBE_PCSS2"},
+ {IXGBE_XPCSS, 1, 1, "IXGBE_XPCSS"},
+ {IXGBE_SERDESC, 1, 1, "IXGBE_SERDESC"},
+ {IXGBE_MACS, 1, 1, "IXGBE_MACS"},
+ {IXGBE_AUTOC, 1, 1, "IXGBE_AUTOC"},
+ {IXGBE_LINKS, 1, 1, "IXGBE_LINKS"},
+ {IXGBE_AUTOC2, 1, 1, "IXGBE_AUTOC2"},
+ {IXGBE_AUTOC3, 1, 1, "IXGBE_AUTOC3"},
+ {IXGBE_ANLP1, 1, 1, "IXGBE_ANLP1"},
+ {IXGBE_ANLP2, 1, 1, "IXGBE_ANLP2"},
+ {IXGBE_ATLASCTL, 1, 1, "IXGBE_ATLASCTL"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_diagnostic[] = {
+ {IXGBE_RDSTATCTL, 1, 1, "IXGBE_RDSTATCTL"},
+ {IXGBE_RDSTAT(0), 8, 4, "IXGBE_RDSTAT"},
+ {IXGBE_RDHMPN, 1, 1, "IXGBE_RDHMPN"},
+ {IXGBE_RIC_DW(0), 4, 4, "IXGBE_RIC_DW"},
+ {IXGBE_RDPROBE, 1, 1, "IXGBE_RDPROBE"},
+ {IXGBE_TDHMPN, 1, 1, "IXGBE_TDHMPN"},
+ {IXGBE_TIC_DW(0), 4, 4, "IXGBE_TIC_DW"},
+ {IXGBE_TDPROBE, 1, 1, "IXGBE_TDPROBE"},
+ {IXGBE_TXBUFCTRL, 1, 1, "IXGBE_TXBUFCTRL"},
+ {IXGBE_TXBUFDATA0, 1, 1, "IXGBE_TXBUFDATA0"},
+ {IXGBE_TXBUFDATA1, 1, 1, "IXGBE_TXBUFDATA1"},
+ {IXGBE_TXBUFDATA2, 1, 1, "IXGBE_TXBUFDATA2"},
+ {IXGBE_TXBUFDATA3, 1, 1, "IXGBE_TXBUFDATA3"},
+ {IXGBE_RXBUFCTRL, 1, 1, "IXGBE_RXBUFCTRL"},
+ {IXGBE_RXBUFDATA0, 1, 1, "IXGBE_RXBUFDATA0"},
+ {IXGBE_RXBUFDATA1, 1, 1, "IXGBE_RXBUFDATA1"},
+ {IXGBE_RXBUFDATA2, 1, 1, "IXGBE_RXBUFDATA2"},
+ {IXGBE_RXBUFDATA3, 1, 1, "IXGBE_RXBUFDATA3"},
+ {IXGBE_PCIE_DIAG(0), 8, 4, ""},
+ {IXGBE_RFVAL, 1, 1, "IXGBE_RFVAL"},
+ {IXGBE_MDFTC1, 1, 1, "IXGBE_MDFTC1"},
+ {IXGBE_MDFTC2, 1, 1, "IXGBE_MDFTC2"},
+ {IXGBE_MDFTFIFO1, 1, 1, "IXGBE_MDFTFIFO1"},
+ {IXGBE_MDFTFIFO2, 1, 1, "IXGBE_MDFTFIFO2"},
+ {IXGBE_MDFTS, 1, 1, "IXGBE_MDFTS"},
+ {IXGBE_PCIEECCCTL, 1, 1, "IXGBE_PCIEECCCTL"},
+ {IXGBE_PBTXECC, 1, 1, "IXGBE_PBTXECC"},
+ {IXGBE_PBRXECC, 1, 1, "IXGBE_PBRXECC"},
+ {IXGBE_MFLCN, 1, 1, "IXGBE_MFLCN"},
+ {0, 0, 0, ""},
+};
+
+/* PF registers */
+static const struct reg_info *ixgbe_regs_others[] = {
+ ixgbe_regs_general,
+ ixgbe_regs_nvm, ixgbe_regs_interrupt,
+ ixgbe_regs_fctl_others,
+ ixgbe_regs_rxdma,
+ ixgbe_regs_rx,
+ ixgbe_regs_tx,
+ ixgbe_regs_wakeup,
+ ixgbe_regs_dcb,
+ ixgbe_regs_mac,
+ ixgbe_regs_diagnostic,
+ NULL};
+
+static const struct reg_info *ixgbe_regs_mac_82598EB[] = {
+ ixgbe_regs_general,
+ ixgbe_regs_nvm,
+ ixgbe_regs_interrupt,
+ ixgbe_regs_fctl_mac_82598EB,
+ ixgbe_regs_rxdma,
+ ixgbe_regs_rx,
+ ixgbe_regs_tx,
+ ixgbe_regs_wakeup,
+ ixgbe_regs_dcb,
+ ixgbe_regs_mac,
+ ixgbe_regs_diagnostic,
+ NULL};
+
+/* VF registers */
+static const struct reg_info *ixgbevf_regs[] = {
+ ixgbevf_regs_general,
+ ixgbevf_regs_interrupt,
+ ixgbevf_regs_rxdma,
+ ixgbevf_regs_tx,
+ NULL};
+
+static inline int
+ixgbe_read_regs(struct ixgbe_hw *hw, const struct reg_info *reg,
+ uint32_t *reg_buf)
+{
+ unsigned int i;
+
+ for (i = 0; i < reg->count; i++)
+ reg_buf[i] = IXGBE_READ_REG(hw,
+ reg->base_addr + i * reg->stride);
+ return reg->count;
+};
+
+static inline int
+ixgbe_regs_group_count(const struct reg_info *regs)
+{
+ int count = 0;
+ int i = 0;
+
+ while (regs[i].count)
+ count += regs[i++].count;
+ return count;
+};
+
+static inline int
+ixgbe_read_regs_group(struct rte_eth_dev *dev, uint32_t *reg_buf,
+ const struct reg_info *regs)
+{
+ int count = 0;
+ int i = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ while (regs[i].count)
+ count += ixgbe_read_regs(hw, &regs[i++], &reg_buf[count]);
+ return count;
+};
+
+#endif /* _IXGBE_REGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c
new file mode 100644
index 00000000..f82b74a9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -0,0 +1,5746 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation.
+ * Copyright 2014 6WIND S.A.
+ */
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_prefetch.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_ip.h>
+#include <rte_net.h>
+
+#include "ixgbe_logs.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_vf.h"
+#include "ixgbe_ethdev.h"
+#include "base/ixgbe_dcb.h"
+#include "base/ixgbe_common.h"
+#include "ixgbe_rxtx.h"
+
+#ifdef RTE_LIBRTE_IEEE1588
+#define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define IXGBE_TX_IEEE1588_TMST 0
+#endif
+/* Bit Mask to indicate what bits required for building TX context */
+#define IXGBE_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_MACSEC | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_SEC_OFFLOAD | \
+ IXGBE_TX_IEEE1588_TMST)
+
+#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+/*
+ * Prefetch a cache line into all cache levels.
+ */
+#define rte_ixgbe_prefetch(p) rte_prefetch0(p)
+#else
+#define rte_ixgbe_prefetch(p) do {} while (0)
+#endif
+
+#ifdef RTE_IXGBE_INC_VECTOR
+uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+#endif
+
+/*********************************************************************
+ *
+ * TX functions
+ *
+ **********************************************************************/
+
+/*
+ * Check for descriptors with their DD bit set and free mbufs.
+ * Return the total number of buffers freed.
+ */
+static __rte_always_inline int
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+{
+ struct ixgbe_tx_entry *txep;
+ uint32_t status;
+ int i, nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
+ return 0;
+
+ /*
+ * first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
+
+ for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ /* free buffers one at a time */
+ m = rte_pktmbuf_prefree_seg(txep->mbuf);
+ txep->mbuf = NULL;
+
+ if (unlikely(m == NULL))
+ continue;
+
+ if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
+ (nb_free > 0 && m->pool != free[0]->pool)) {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void **)free, nb_free);
+ nb_free = 0;
+ }
+
+ free[nb_free++] = m;
+ }
+
+ if (nb_free > 0)
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+/* Populate 4 descriptors with data from 4 mbufs */
+static inline void
+tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t buf_dma_addr;
+ uint32_t pkt_len;
+ int i;
+
+ for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
+ pkt_len = (*pkts)->data_len;
+
+ /* write data to descriptor */
+ txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
+ txdp->read.cmd_type_len =
+ rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+
+ txdp->read.olinfo_status =
+ rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ rte_prefetch0(&(*pkts)->pool);
+ }
+}
+
+/* Populate 1 descriptor with data from 1 mbuf */
+static inline void
+tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t buf_dma_addr;
+ uint32_t pkt_len;
+
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
+ pkt_len = (*pkts)->data_len;
+
+ /* write data to descriptor */
+ txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+ txdp->read.cmd_type_len =
+ rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ txdp->read.olinfo_status =
+ rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_prefetch0(&(*pkts)->pool);
+}
+
+/*
+ * Fill H/W descriptor ring with mbuf data.
+ * Copy mbuf pointers to the S/W ring.
+ */
+static inline void
+ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
+ uint16_t nb_pkts)
+{
+ volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+ struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ const int N_PER_LOOP = 4;
+ const int N_PER_LOOP_MASK = N_PER_LOOP-1;
+ int mainpart, leftover;
+ int i, j;
+
+ /*
+ * Process most of the packets in chunks of N pkts. Any
+ * leftover packets will get processed one at a time.
+ */
+ mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
+ leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
+ for (i = 0; i < mainpart; i += N_PER_LOOP) {
+ /* Copy N mbuf pointers to the S/W ring */
+ for (j = 0; j < N_PER_LOOP; ++j) {
+ (txep + i + j)->mbuf = *(pkts + i + j);
+ }
+ tx4(txdp + i, pkts + i);
+ }
+
+ if (unlikely(leftover > 0)) {
+ for (i = 0; i < leftover; ++i) {
+ (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
+ tx1(txdp + mainpart + i, pkts + mainpart + i);
+ }
+ }
+}
+
+static inline uint16_t
+tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+ volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
+ uint16_t n = 0;
+
+ /*
+ * Begin scanning the H/W ring for done descriptors when the
+ * number of available descriptors drops below tx_free_thresh. For
+ * each done descriptor, free the associated buffer.
+ */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ ixgbe_tx_free_bufs(txq);
+
+ /* Only use descriptors that are available */
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ /* Use exactly nb_pkts descriptors */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ /*
+ * At this point, we know there are enough descriptors in the
+ * ring to transmit all the packets. This assumes that each
+ * mbuf contains a single segment, and that no new offloads
+ * are expected, which would require a new context descriptor.
+ */
+
+ /*
+ * See if we're going to wrap-around. If so, handle the top
+ * of the descriptor ring first, then do the bottom. If not,
+ * the processing looks just like the "bottom" part anyway...
+ */
+ if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
+ n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
+ ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
+
+ /*
+ * We know that the last descriptor in the ring will need to
+ * have its RS bit set because tx_rs_thresh has to be
+ * a divisor of the ring size
+ */
+ tx_r[txq->tx_next_rs].read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ txq->tx_tail = 0;
+ }
+
+ /* Fill H/W descriptor ring with mbuf data */
+ ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
+
+ /*
+ * Determine if RS bit should be set
+ * This is what we actually want:
+ * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
+ * but instead of subtracting 1 and doing >=, we can just do
+ * greater than without subtracting.
+ */
+ if (txq->tx_tail > txq->tx_next_rs) {
+ tx_r[txq->tx_next_rs].read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+ txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
+ txq->tx_rs_thresh);
+ if (txq->tx_next_rs >= txq->nb_tx_desc)
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+ }
+
+ /*
+ * Check for wrap-around. This would only happen if we used
+ * up to the last descriptor in the ring, no more, no less.
+ */
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+
+ /* update tail pointer */
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+uint16_t
+ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+
+ /* Try to transmit at least chunks of TX_MAX_BURST pkts */
+ if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
+ return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
+
+ /* transmit more than the max burst, in chunks of TX_MAX_BURST */
+ nb_tx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
+ ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
+ nb_tx = (uint16_t)(nb_tx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < n)
+ break;
+ }
+
+ return nb_tx;
+}
+
+#ifdef RTE_IXGBE_INC_VECTOR
+static uint16_t
+ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+#endif
+
+static inline void
+ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
+ volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
+ uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
+ __rte_unused uint64_t *mdata)
+{
+ uint32_t type_tucmd_mlhl;
+ uint32_t mss_l4len_idx = 0;
+ uint32_t ctx_idx;
+ uint32_t vlan_macip_lens;
+ union ixgbe_tx_offload tx_offload_mask;
+ uint32_t seqnum_seed = 0;
+
+ ctx_idx = txq->ctx_curr;
+ tx_offload_mask.data[0] = 0;
+ tx_offload_mask.data[1] = 0;
+ type_tucmd_mlhl = 0;
+
+ /* Specify which HW CTX to upload. */
+ mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
+
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ tx_offload_mask.vlan_tci |= ~0;
+ }
+
+ /* check if TCP segmentation required for this packet */
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* implies IP cksum in IPv4 */
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
+ IXGBE_ADVTXD_TUCMD_L4T_TCP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+ else
+ type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
+ IXGBE_ADVTXD_TUCMD_L4T_TCP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ tx_offload_mask.l4_len |= ~0;
+ tx_offload_mask.tso_segsz |= ~0;
+ mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ } else { /* no TSO, check if hardware checksum is needed */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ }
+
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ default:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+ break;
+ }
+ }
+
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ tx_offload_mask.outer_l2_len |= ~0;
+ tx_offload_mask.outer_l3_len |= ~0;
+ tx_offload_mask.l2_len |= ~0;
+ seqnum_seed |= tx_offload.outer_l3_len
+ << IXGBE_ADVTXD_OUTER_IPLEN;
+ seqnum_seed |= tx_offload.l2_len
+ << IXGBE_ADVTXD_TUNNEL_LEN;
+ }
+#ifdef RTE_LIBRTE_SECURITY
+ if (ol_flags & PKT_TX_SEC_OFFLOAD) {
+ union ixgbe_crypto_tx_desc_md *md =
+ (union ixgbe_crypto_tx_desc_md *)mdata;
+ seqnum_seed |=
+ (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
+ type_tucmd_mlhl |= md->enc ?
+ (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
+ IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
+ type_tucmd_mlhl |=
+ (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
+ tx_offload_mask.sa_idx |= ~0;
+ tx_offload_mask.sec_pad_len |= ~0;
+ }
+#endif
+
+ txq->ctx_cache[ctx_idx].flags = ol_flags;
+ txq->ctx_cache[ctx_idx].tx_offload.data[0] =
+ tx_offload_mask.data[0] & tx_offload.data[0];
+ txq->ctx_cache[ctx_idx].tx_offload.data[1] =
+ tx_offload_mask.data[1] & tx_offload.data[1];
+ txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
+
+ ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+ vlan_macip_lens = tx_offload.l3_len;
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ vlan_macip_lens |= (tx_offload.outer_l2_len <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ else
+ vlan_macip_lens |= (tx_offload.l2_len <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
+ ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
+ ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
+ ctx_txd->seqnum_seed = seqnum_seed;
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
+ union ixgbe_tx_offload tx_offload)
+{
+ /* If match with the current used context */
+ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
+
+ /* What if match with the next context */
+ txq->ctx_curr ^= 1;
+ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
+
+ /* Mismatch, use the previous context */
+ return IXGBE_CTX_NUM;
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
+{
+ uint32_t tmp = 0;
+
+ if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
+ tmp |= IXGBE_ADVTXD_POPTS_TXSM;
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ tmp |= IXGBE_ADVTXD_POPTS_IXSM;
+ if (ol_flags & PKT_TX_TCP_SEG)
+ tmp |= IXGBE_ADVTXD_POPTS_TXSM;
+ return tmp;
+}
+
+static inline uint32_t
+tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
+{
+ uint32_t cmdtype = 0;
+
+ if (ol_flags & PKT_TX_VLAN_PKT)
+ cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
+ if (ol_flags & PKT_TX_TCP_SEG)
+ cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+ if (ol_flags & PKT_TX_MACSEC)
+ cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
+ return cmdtype;
+}
+
+/* Default RS bit threshold values */
+#ifndef DEFAULT_TX_RS_THRESH
+#define DEFAULT_TX_RS_THRESH 32
+#endif
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+/* Reset transmit descriptors after they have been used */
+static inline int
+ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
+{
+ struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
+ volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+ uint32_t status;
+
+ /* Determine the last descriptor needing to be cleaned */
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ /* Check to make sure the last descriptor to clean is done */
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ status = txr[desc_to_clean_to].wb.status;
+ if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "TX descriptor %4u is not done"
+ "(port=%d queue=%d)",
+ desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ /* Failed to clean any descriptors, better luck next time */
+ return -(1);
+ }
+
+ /* Figure out how many descriptors will be cleaned */
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ PMD_TX_FREE_LOG(DEBUG,
+ "Cleaning %4u TX descriptors: %4u to %4u "
+ "(port=%d queue=%d)",
+ nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+
+ /*
+ * The last descriptor to clean is done, so that means all the
+ * descriptors from the last descriptor that was cleaned
+ * up to the last descriptor with the RS bit set
+ * are done. Only reset the threshold descriptor.
+ */
+ txr[desc_to_clean_to].wb.status = 0;
+
+ /* Update the txq to reflect the last descriptor that was cleaned */
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
+
+ /* No Error */
+ return 0;
+}
+
+uint16_t
+ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_tx_entry *sw_ring;
+ struct ixgbe_tx_entry *txe, *txn;
+ volatile union ixgbe_adv_tx_desc *txr;
+ volatile union ixgbe_adv_tx_desc *txd, *txp;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint64_t buf_dma_addr;
+ uint32_t olinfo_status;
+ uint32_t cmd_type_len;
+ uint32_t pkt_len;
+ uint16_t slen;
+ uint64_t ol_flags;
+ uint16_t tx_id;
+ uint16_t tx_last;
+ uint16_t nb_tx;
+ uint16_t nb_used;
+ uint64_t tx_ol_req;
+ uint32_t ctx = 0;
+ uint32_t new_ctx;
+ union ixgbe_tx_offload tx_offload;
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t use_ipsec;
+#endif
+
+ tx_offload.data[0] = 0;
+ tx_offload.data[1] = 0;
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+ txp = NULL;
+
+ /* Determine if the descriptor ring needs to be cleaned. */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ ixgbe_xmit_cleanup(txq);
+
+ rte_prefetch0(&txe->mbuf->pool);
+
+ /* TX loop */
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ new_ctx = 0;
+ tx_pkt = *tx_pkts++;
+ pkt_len = tx_pkt->pkt_len;
+
+ /*
+ * Determine how many (if any) context descriptors
+ * are needed for offload functionality.
+ */
+ ol_flags = tx_pkt->ol_flags;
+#ifdef RTE_LIBRTE_SECURITY
+ use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
+#endif
+
+ /* If hardware offload required */
+ tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
+ if (tx_ol_req) {
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.vlan_tci = tx_pkt->vlan_tci;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+ tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+ tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+#ifdef RTE_LIBRTE_SECURITY
+ if (use_ipsec) {
+ union ixgbe_crypto_tx_desc_md *ipsec_mdata =
+ (union ixgbe_crypto_tx_desc_md *)
+ &tx_pkt->udata64;
+ tx_offload.sa_idx = ipsec_mdata->sa_idx;
+ tx_offload.sec_pad_len = ipsec_mdata->pad_len;
+ }
+#endif
+
+ /* If new context need be built or reuse the exist ctx. */
+ ctx = what_advctx_update(txq, tx_ol_req,
+ tx_offload);
+ /* Only allocate context descriptor if required*/
+ new_ctx = (ctx == IXGBE_CTX_NUM);
+ ctx = txq->ctx_curr;
+ }
+
+ /*
+ * Keep track of how many descriptors are used this loop
+ * This will always be the number of segments + the number of
+ * Context descriptors required to transmit the packet
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+
+ if (txp != NULL &&
+ nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
+ /* set RS on the previous packet in the burst */
+ txp->read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+
+ /*
+ * The number of descriptors that must be allocated for a
+ * packet is the number of segments of that packet, plus 1
+ * Context Descriptor for the hardware offload, if any.
+ * Determine the last TX descriptor to allocate in the TX ring
+ * for the packet, starting from the current position (tx_id)
+ * in the ring.
+ */
+ tx_last = (uint16_t) (tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+ " tx_first=%u tx_last=%u",
+ (unsigned) txq->port_id,
+ (unsigned) txq->queue_id,
+ (unsigned) pkt_len,
+ (unsigned) tx_id,
+ (unsigned) tx_last);
+
+ /*
+ * Make sure there are enough TX descriptors available to
+ * transmit the entire packet.
+ * nb_used better be less than or equal to txq->tx_rs_thresh
+ */
+ if (nb_used > txq->nb_tx_free) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "Not enough free TX descriptors "
+ "nb_used=%4u nb_free=%4u "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->port_id, txq->queue_id);
+
+ if (ixgbe_xmit_cleanup(txq) != 0) {
+ /* Could not clean any descriptors */
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+
+ /* nb_used better be <= txq->tx_rs_thresh */
+ if (unlikely(nb_used > txq->tx_rs_thresh)) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "The number of descriptors needed to "
+ "transmit the packet exceeds the "
+ "RS bit threshold. This will impact "
+ "performance."
+ "nb_used=%4u nb_free=%4u "
+ "tx_rs_thresh=%4u. "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->tx_rs_thresh,
+ txq->port_id, txq->queue_id);
+ /*
+ * Loop here until there are enough TX
+ * descriptors or until the ring cannot be
+ * cleaned.
+ */
+ while (nb_used > txq->nb_tx_free) {
+ if (ixgbe_xmit_cleanup(txq) != 0) {
+ /*
+ * Could not clean any
+ * descriptors
+ */
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ }
+ }
+ }
+
+ /*
+ * By now there are enough free TX descriptors to transmit
+ * the packet.
+ */
+
+ /*
+ * Set common flags of all TX Data Descriptors.
+ *
+ * The following bits must be set in all Data Descriptors:
+ * - IXGBE_ADVTXD_DTYP_DATA
+ * - IXGBE_ADVTXD_DCMD_DEXT
+ *
+ * The following bits must be set in the first Data Descriptor
+ * and are ignored in the other ones:
+ * - IXGBE_ADVTXD_DCMD_IFCS
+ * - IXGBE_ADVTXD_MAC_1588
+ * - IXGBE_ADVTXD_DCMD_VLE
+ *
+ * The following bits must only be set in the last Data
+ * Descriptor:
+ * - IXGBE_TXD_CMD_EOP
+ *
+ * The following bits can be set in any Data Descriptor, but
+ * are only set in the last Data Descriptor:
+ * - IXGBE_TXD_CMD_RS
+ */
+ cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
+#endif
+
+ olinfo_status = 0;
+ if (tx_ol_req) {
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* when TSO is on, paylen in descriptor is the
+ * not the packet len but the tcp payload len */
+ pkt_len -= (tx_offload.l2_len +
+ tx_offload.l3_len + tx_offload.l4_len);
+ }
+
+ /*
+ * Setup the TX Advanced Context Descriptor if required
+ */
+ if (new_ctx) {
+ volatile struct ixgbe_adv_tx_context_desc *
+ ctx_txd;
+
+ ctx_txd = (volatile struct
+ ixgbe_adv_tx_context_desc *)
+ &txr[tx_id];
+
+ txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
+
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+ tx_offload, &tx_pkt->udata64);
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ /*
+ * Setup the TX Advanced Data Descriptor,
+ * This path will go through
+ * whatever new/reuse the context descriptor
+ */
+ cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
+ olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
+ olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
+ }
+
+ olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+#ifdef RTE_LIBRTE_SECURITY
+ if (use_ipsec)
+ olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
+#endif
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
+
+ if (txe->mbuf != NULL)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /*
+ * Set up Transmit Data Descriptor.
+ */
+ slen = m_seg->data_len;
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ txd->read.buffer_addr =
+ rte_cpu_to_le_64(buf_dma_addr);
+ txd->read.cmd_type_len =
+ rte_cpu_to_le_32(cmd_type_len | slen);
+ txd->read.olinfo_status =
+ rte_cpu_to_le_32(olinfo_status);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /*
+ * The last packet data descriptor needs End Of Packet (EOP)
+ */
+ cmd_type_len |= IXGBE_TXD_CMD_EOP;
+ txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+ /* Set RS bit only on threshold packets' last descriptor */
+ if (txq->nb_tx_used >= txq->tx_rs_thresh) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "Setting RS bit on TXD id="
+ "%4u (port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
+
+ cmd_type_len |= IXGBE_TXD_CMD_RS;
+
+ /* Update txq RS bit counters */
+ txq->nb_tx_used = 0;
+ txp = NULL;
+ } else
+ txp = txd;
+
+ txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
+ }
+
+end_of_tx:
+ /* set RS on last packet in the burst */
+ if (txp != NULL)
+ txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+
+ rte_wmb();
+
+ /*
+ * Set the Transmit Descriptor Tail (TDT)
+ */
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (unsigned) txq->port_id, (unsigned) txq->queue_id,
+ (unsigned) tx_id, (unsigned) nb_tx);
+ IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /**
+ * Check if packet meets requirements for number of segments
+ *
+ * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
+ * non-TSO
+ */
+
+ if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
+ * RX functions
+ *
+ **********************************************************************/
+
+#define IXGBE_PACKET_TYPE_ETHER 0X00
+#define IXGBE_PACKET_TYPE_IPV4 0X01
+#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
+#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
+#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
+#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
+#define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
+#define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
+#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
+#define IXGBE_PACKET_TYPE_IPV6 0X04
+#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
+#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
+#define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44
+#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C
+#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F
+
+#define IXGBE_PACKET_TYPE_NVGRE 0X00
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP 0X11
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT 0X0D
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D
+
+#define IXGBE_PACKET_TYPE_VXLAN 0X80
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4 0X81
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP 0x91
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT 0X8D
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
+
+/**
+ * Use 2 different table for normal packet and tunnel packet
+ * to save the space.
+ */
+const uint32_t
+ ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
+ [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
+ [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4,
+ [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
+ RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+};
+
+const uint32_t
+ ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
+ [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+
+ [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
+};
+
+/* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
+static inline uint32_t
+ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
+{
+
+ if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
+ return RTE_PTYPE_UNKNOWN;
+
+ pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask;
+
+ /* For tunnel packet */
+ if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) {
+ /* Remove the tunnel bit to save the space. */
+ pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
+ return ptype_table_tn[pkt_info];
+ }
+
+ /**
+ * For x550, if it's not tunnel,
+ * tunnel type bit should be set to 0.
+ * Reuse 82599's mask.
+ */
+ pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
+
+ return ptype_table[pkt_info];
+}
+
+static inline uint64_t
+ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
+{
+ static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
+ 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, 0, 0,
+ 0, 0, 0, PKT_RX_FDIR,
+ };
+#ifdef RTE_LIBRTE_IEEE1588
+ static uint64_t ip_pkt_etqf_map[8] = {
+ 0, 0, 0, PKT_RX_IEEE1588_PTP,
+ 0, 0, 0, 0,
+ };
+
+ if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
+ return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
+ ip_rss_types_map[pkt_info & 0XF];
+ else
+ return ip_rss_types_map[pkt_info & 0XF];
+#else
+ return ip_rss_types_map[pkt_info & 0XF];
+#endif
+}
+
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
+{
+ uint64_t pkt_flags;
+
+ /*
+ * Check if VLAN present only.
+ * Do not check whether L3/L4 rx checksum done by NIC or not,
+ * That can be found from rte_eth_rxmode.offloads flag
+ */
+ pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ if (rx_status & IXGBE_RXD_STAT_TMST)
+ pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+#endif
+ return pkt_flags;
+}
+
+static inline uint64_t
+rx_desc_error_to_pkt_flags(uint32_t rx_status)
+{
+ uint64_t pkt_flags;
+
+ /*
+ * Bit 31: IPE, IPv4 checksum error
+ * Bit 30: L4I, L4I integrity error
+ */
+ static uint64_t error_to_pkt_flags_map[4] = {
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+ };
+ pkt_flags = error_to_pkt_flags_map[(rx_status >>
+ IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
+
+ if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
+ (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
+ pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
+ }
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (rx_status & IXGBE_RXD_STAT_SECP) {
+ pkt_flags |= PKT_RX_SEC_OFFLOAD;
+ if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
+ pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ }
+#endif
+
+ return pkt_flags;
+}
+
+/*
+ * LOOK_AHEAD defines how many desc statuses to check beyond the
+ * current descriptor.
+ * It must be a pound define for optimal performance.
+ * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
+ * function only works with LOOK_AHEAD=8.
+ */
+#define LOOK_AHEAD 8
+#if (LOOK_AHEAD != 8)
+#error "PMD IXGBE: LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ uint64_t pkt_flags;
+ int nb_dd;
+ uint32_t s[LOOK_AHEAD];
+ uint32_t pkt_info[LOOK_AHEAD];
+ int i, j, nb_rx = 0;
+ uint32_t status;
+ uint64_t vlan_flags = rxq->vlan_flags;
+
+ /* get references to current descriptor and S/W ring entry */
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ status = rxdp->wb.upper.status_error;
+ /* check to make sure there is at least 1 packet to receive */
+ if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ return 0;
+
+ /*
+ * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
+ * reference packets that are ready to be received.
+ */
+ for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
+ i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = 0; j < LOOK_AHEAD; j++)
+ s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
+
+ rte_smp_rmb();
+
+ /* Compute how many status bits were set */
+ for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
+ (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
+ ;
+
+ for (j = 0; j < nb_dd; j++)
+ pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
+ lo_dword.data);
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf format */
+ for (j = 0; j < nb_dd; ++j) {
+ mb = rxep[j].mbuf;
+ pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
+ rxq->crc_len;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
+
+ /* convert descriptor fields to rte mbuf flags */
+ pkt_flags = rx_desc_status_to_pkt_flags(s[j],
+ vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
+ ((uint16_t)pkt_info[j]);
+ mb->ol_flags = pkt_flags;
+ mb->packet_type =
+ ixgbe_rxd_pkt_info_to_pkt_type
+ (pkt_info[j], rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ mb->hash.rss = rte_le_to_cpu_32(
+ rxdp[j].wb.lower.hi_dword.rss);
+ else if (pkt_flags & PKT_RX_FDIR) {
+ mb->hash.fdir.hash = rte_le_to_cpu_16(
+ rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
+ IXGBE_ATR_HASH_MASK;
+ mb->hash.fdir.id = rte_le_to_cpu_16(
+ rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
+ }
+ }
+
+ /* Move mbuf pointers from the S/W ring to the stage */
+ for (j = 0; j < LOOK_AHEAD; ++j) {
+ rxq->rx_stage[i + j] = rxep[j].mbuf;
+ }
+
+ /* stop if all requested packets could not be received */
+ if (nb_dd != LOOK_AHEAD)
+ break;
+ }
+
+ /* clear software ring entries so we can cleanup correctly */
+ for (i = 0; i < nb_rx; ++i) {
+ rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+ }
+
+
+ return nb_rx;
+}
+
+static inline int
+ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx;
+ __le64 dma_addr;
+ int diag, i;
+
+ /* allocate buffers in bulk directly into the S/W ring */
+ alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
+ rxep = &rxq->sw_ring[alloc_idx];
+ diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0))
+ return -ENOMEM;
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; ++i) {
+ /* populate the static rte mbuf fields */
+ mb = rxep[i].mbuf;
+ if (reset_mbuf) {
+ mb->port = rxq->port_id;
+ }
+
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /* populate the descriptors */
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
+ rxdp[i].read.hdr_addr = 0;
+ rxdp[i].read.pkt_addr = dma_addr;
+ }
+
+ /* update state of internal queue structure */
+ rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
+
+ /* no errors */
+ return 0;
+}
+
+static inline uint16_t
+ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+ int i;
+
+ /* how many packets are ready to return? */
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ /* copy mbuf pointers to the application's packet list */
+ for (i = 0; i < nb_pkts; ++i)
+ rx_pkts[i] = stage[i];
+
+ /* update internal queue state */
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+ return nb_pkts;
+}
+
+static inline uint16_t
+rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
+ uint16_t nb_rx = 0;
+
+ /* Any previously recv'd pkts will be returned from the Rx stage */
+ if (rxq->rx_nb_avail)
+ return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ /* Scan the H/W ring for packets to receive */
+ nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
+
+ /* update internal queue state */
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+ /* if required, allocate new buffers to replenish descriptors */
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ uint16_t cur_free_trigger = rxq->rx_free_trigger;
+
+ if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
+ int i, j;
+
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
+ /*
+ * Need to rewind any previous receives if we cannot
+ * allocate new buffers to replenish the old ones.
+ */
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
+ rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
+
+ return 0;
+ }
+
+ /* update tail pointer */
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ cur_free_trigger);
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ /* received any packets this loop? */
+ if (rxq->rx_nb_avail)
+ return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
+uint16_t
+ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
+ return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ /* request is relatively large, chunk it up */
+ nb_rx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
+ ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx = (uint16_t)(nb_rx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < n)
+ break;
+ }
+
+ return nb_rx;
+}
+
+uint16_t
+ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ixgbe_rx_queue *rxq;
+ volatile union ixgbe_adv_rx_desc *rx_ring;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_entry *sw_ring;
+ struct ixgbe_rx_entry *rxe;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ union ixgbe_adv_rx_desc rxd;
+ uint64_t dma_addr;
+ uint32_t staterr;
+ uint32_t pkt_info;
+ uint16_t pkt_len;
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint64_t pkt_flags;
+ uint64_t vlan_flags;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+ vlan_flags = rxq->vlan_flags;
+ while (nb_rx < nb_pkts) {
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rxdp->wb.upper.status_error;
+ if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * End of packet.
+ *
+ * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
+ * is likely to be invalid and to be dropped by the various
+ * validation checks performed by the network stack.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy do not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "ext_err_stat=0x%08x pkt_len=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) staterr,
+ (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_ixgbe_prefetch(&rx_ring[rx_id]);
+ rte_ixgbe_prefetch(&sw_ring[rx_id]);
+ }
+
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+
+ /*
+ * Initialize the returned mbuf.
+ * 1) setup generic mbuf fields:
+ * - number of segments,
+ * - next segment,
+ * - packet length,
+ * - RX port identifier.
+ * 2) integrate hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
+ rxq->crc_len);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->port = rxq->port_id;
+
+ pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+ /* Only valid if PKT_RX_VLAN set in pkt_flags */
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
+ pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags = pkt_flags |
+ ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
+ rxm->ol_flags = pkt_flags;
+ rxm->packet_type =
+ ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
+ rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ rxm->hash.rss = rte_le_to_cpu_32(
+ rxd.wb.lower.hi_dword.rss);
+ else if (pkt_flags & PKT_RX_FDIR) {
+ rxm->hash.fdir.hash = rte_le_to_cpu_16(
+ rxd.wb.lower.hi_dword.csum_ip.csum) &
+ IXGBE_ATR_HASH_MASK;
+ rxm->hash.fdir.id = rte_le_to_cpu_16(
+ rxd.wb.lower.hi_dword.csum_ip.ip_id);
+ }
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) nb_hold,
+ (unsigned) nb_rx);
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+/**
+ * Detect an RSC descriptor.
+ */
+static inline uint32_t
+ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
+{
+ return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
+}
+
+/**
+ * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
+ *
+ * Fill the following info in the HEAD buffer of the Rx cluster:
+ * - RX port identifier
+ * - hardware offload data, if any:
+ * - RSS flag & hash
+ * - IP checksum flag
+ * - VLAN TCI, if any
+ * - error flags
+ * @head HEAD of the packet cluster
+ * @desc HW descriptor to get data from
+ * @rxq Pointer to the Rx queue
+ */
+static inline void
+ixgbe_fill_cluster_head_buf(
+ struct rte_mbuf *head,
+ union ixgbe_adv_rx_desc *desc,
+ struct ixgbe_rx_queue *rxq,
+ uint32_t staterr)
+{
+ uint32_t pkt_info;
+ uint64_t pkt_flags;
+
+ head->port = rxq->port_id;
+
+ /* The vlan_tci field is only valid when PKT_RX_VLAN is
+ * set in the pkt_flags field.
+ */
+ head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
+ pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
+ head->ol_flags = pkt_flags;
+ head->packet_type =
+ ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
+ else if (pkt_flags & PKT_RX_FDIR) {
+ head->hash.fdir.hash =
+ rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
+ & IXGBE_ATR_HASH_MASK;
+ head->hash.fdir.id =
+ rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
+ }
+}
+
+/**
+ * ixgbe_recv_pkts_lro - receive handler for and LRO case.
+ *
+ * @rx_queue Rx queue handle
+ * @rx_pkts table of received packets
+ * @nb_pkts size of rx_pkts table
+ * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
+ *
+ * Handles the Rx HW ring completions when RSC feature is configured. Uses an
+ * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
+ *
+ * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
+ * 1) When non-EOP RSC completion arrives:
+ * a) Update the HEAD of the current RSC aggregation cluster with the new
+ * segment's data length.
+ * b) Set the "next" pointer of the current segment to point to the segment
+ * at the NEXTP index.
+ * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
+ * in the sw_rsc_ring.
+ * 2) When EOP arrives we just update the cluster's total length and offload
+ * flags and deliver the cluster up to the upper layers. In our case - put it
+ * in the rx_pkts table.
+ *
+ * Returns the number of received packets/clusters (according to the "bulk
+ * receive" interface).
+ */
+static inline uint16_t
+ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+ bool bulk_alloc)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
+ struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
+ struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0;
+ uint16_t nb_hold = rxq->nb_rx_hold;
+ uint16_t prev_id = rxq->rx_tail;
+
+ while (nb_rx < nb_pkts) {
+ bool eop;
+ struct ixgbe_rx_entry *rxe;
+ struct ixgbe_scattered_rx_entry *sc_entry;
+ struct ixgbe_scattered_rx_entry *next_sc_entry;
+ struct ixgbe_rx_entry *next_rxe = NULL;
+ struct rte_mbuf *first_seg;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ union ixgbe_adv_rx_desc rxd;
+ uint16_t data_len;
+ uint16_t next_id;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ uint32_t staterr;
+
+next_desc:
+ /*
+ * The code in this whole file uses the volatile pointer to
+ * ensure the read ordering of the status and the rest of the
+ * descriptor fields (on the compiler level only!!!). This is so
+ * UGLY - why not to just use the compiler barrier instead? DPDK
+ * even has the rte_compiler_barrier() for that.
+ *
+ * But most importantly this is just wrong because this doesn't
+ * ensure memory ordering in a general case at all. For
+ * instance, DPDK is supposed to work on Power CPUs where
+ * compiler barrier may just not be enough!
+ *
+ * I tried to write only this function properly to have a
+ * starting point (as a part of an LRO/RSC series) but the
+ * compiler cursed at me when I tried to cast away the
+ * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
+ * keeping it the way it is for now.
+ *
+ * The code in this file is broken in so many other places and
+ * will just not work on a big endian CPU anyway therefore the
+ * lines below will have to be revisited together with the rest
+ * of the ixgbe PMD.
+ *
+ * TODO:
+ * - Get rid of "volatile" crap and let the compiler do its
+ * job.
+ * - Use the proper memory barrier (rte_rmb()) to ensure the
+ * memory ordering below.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
+
+ if (!(staterr & IXGBE_RXDADV_STAT_DD))
+ break;
+
+ rxd = *rxdp;
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x data_len=%u",
+ rxq->port_id, rxq->queue_id, rx_id, staterr,
+ rte_le_to_cpu_16(rxd.wb.upper.length));
+
+ if (!bulk_alloc) {
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ rte_eth_devices[rxq->port_id].data->
+ rx_mbuf_alloc_failed++;
+ break;
+ }
+ } else if (nb_hold > rxq->rx_free_thresh) {
+ uint16_t next_rdt = rxq->rx_free_trigger;
+
+ if (!ixgbe_rx_alloc_bufs(rxq, false)) {
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ next_rdt);
+ nb_hold -= rxq->rx_free_thresh;
+ } else {
+ PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ rte_eth_devices[rxq->port_id].data->
+ rx_mbuf_alloc_failed++;
+ break;
+ }
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ eop = staterr & IXGBE_RXDADV_STAT_EOP;
+
+ next_id = rx_id + 1;
+ if (next_id == rxq->nb_rx_desc)
+ next_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 4 pointers
+ * to mbufs.
+ */
+ if ((next_id & 0x3) == 0) {
+ rte_ixgbe_prefetch(&rx_ring[next_id]);
+ rte_ixgbe_prefetch(&sw_ring[next_id]);
+ }
+
+ rxm = rxe->mbuf;
+
+ if (!bulk_alloc) {
+ __le64 dma =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ /*
+ * Update RX descriptor with the physical address of the
+ * new data buffer of the new allocated mbuf.
+ */
+ rxe->mbuf = nmb;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma;
+ } else
+ rxe->mbuf = NULL;
+
+ /*
+ * Set data length & data buffer address of mbuf.
+ */
+ data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
+ rxm->data_len = data_len;
+
+ if (!eop) {
+ uint16_t nextp_id;
+ /*
+ * Get next descriptor index:
+ * - For RSC it's in the NEXTP field.
+ * - For a scattered packet - it's just a following
+ * descriptor.
+ */
+ if (ixgbe_rsc_count(&rxd))
+ nextp_id =
+ (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
+ IXGBE_RXDADV_NEXTP_SHIFT;
+ else
+ nextp_id = next_id;
+
+ next_sc_entry = &sw_sc_ring[nextp_id];
+ next_rxe = &sw_ring[nextp_id];
+ rte_ixgbe_prefetch(next_rxe);
+ }
+
+ sc_entry = &sw_sc_ring[rx_id];
+ first_seg = sc_entry->fbuf;
+ sc_entry->fbuf = NULL;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (first_seg == NULL) {
+ first_seg = rxm;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
+ } else {
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ }
+
+ prev_id = rx_id;
+ rx_id = next_id;
+
+ /*
+ * If this is not the last buffer of the received packet, update
+ * the pointer to the first mbuf at the NEXTP entry in the
+ * sw_sc_ring and continue to parse the RX ring.
+ */
+ if (!eop && next_rxe) {
+ rxm->next = next_rxe->mbuf;
+ next_sc_entry->fbuf = first_seg;
+ goto next_desc;
+ }
+
+ /* Initialize the first mbuf of the returned packet */
+ ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
+
+ /*
+ * Deal with the case, when HW CRC srip is disabled.
+ * That can't happen when LRO is enabled, but still could
+ * happen for scattered RX mode.
+ */
+ first_seg->pkt_len -= rxq->crc_len;
+ if (unlikely(rxm->data_len <= rxq->crc_len)) {
+ struct rte_mbuf *lp;
+
+ for (lp = first_seg; lp->next != rxm; lp = lp->next)
+ ;
+
+ first_seg->nb_segs--;
+ lp->data_len -= rxq->crc_len - rxm->data_len;
+ lp->next = NULL;
+ rte_pktmbuf_free_seg(rxm);
+ } else
+ rxm->data_len -= rxq->crc_len;
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = first_seg;
+ }
+
+ /*
+ * Record index of the next RX descriptor to probe.
+ */
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
+
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
+ nb_hold = 0;
+ }
+
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+uint16_t
+ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
+}
+
+uint16_t
+ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
+}
+
+/*********************************************************************
+ *
+ * Queue management functions
+ *
+ **********************************************************************/
+
+static void __attribute__((cold))
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
+{
+ unsigned i;
+
+ if (txq->sw_ring != NULL) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static void __attribute__((cold))
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+{
+ if (txq != NULL &&
+ txq->sw_ring != NULL)
+ rte_free(txq->sw_ring);
+}
+
+static void __attribute__((cold))
+ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
+{
+ if (txq != NULL && txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->free_swring(txq);
+ rte_free(txq);
+ }
+}
+
+void __attribute__((cold))
+ixgbe_dev_tx_queue_release(void *txq)
+{
+ ixgbe_tx_queue_release(txq);
+}
+
+/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
+static void __attribute__((cold))
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+{
+ static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
+ struct ixgbe_tx_entry *txe = txq->sw_ring;
+ uint16_t prev, i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->tx_ring[i] = zeroed_desc;
+ }
+
+ /* Initialize SW ring entries */
+ prev = (uint16_t) (txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+
+ txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ txq->tx_tail = 0;
+ txq->nb_tx_used = 0;
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->ctx_curr = 0;
+ memset((void *)&txq->ctx_cache, 0,
+ IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+}
+
+static const struct ixgbe_txq_ops def_txq_ops = {
+ .release_mbufs = ixgbe_tx_queue_release_mbufs,
+ .free_swring = ixgbe_tx_free_swring,
+ .reset = ixgbe_reset_tx_queue,
+};
+
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void __attribute__((cold))
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
+{
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if ((txq->offloads == 0) &&
+#ifdef RTE_LIBRTE_SECURITY
+ !(txq->using_ipsec) &&
+#endif
+ (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Using simple tx code path");
+ dev->tx_pkt_prepare = NULL;
+#ifdef RTE_IXGBE_INC_VECTOR
+ if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+ (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+ ixgbe_txq_vec_setup(txq) == 0)) {
+ PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
+ dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
+ } else
+#endif
+ dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
+ PMD_INIT_LOG(DEBUG,
+ " - offloads = 0x%" PRIx64,
+ txq->offloads);
+ PMD_INIT_LOG(DEBUG,
+ " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
+ (unsigned long)txq->tx_rs_thresh,
+ (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
+ dev->tx_pkt_burst = ixgbe_xmit_pkts;
+ dev->tx_pkt_prepare = ixgbe_prep_pkts;
+ }
+}
+
+uint64_t
+ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+uint64_t
+ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+ return tx_offload_capa;
+}
+
+int __attribute__((cold))
+ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const struct rte_memzone *tz;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_hw *hw;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ /*
+ * Validate number of transmit descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of IXGBE_ALIGN.
+ */
+ if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
+ (nb_desc > IXGBE_MAX_RING_DESC) ||
+ (nb_desc < IXGBE_MIN_RING_DESC)) {
+ return -EINVAL;
+ }
+
+ /*
+ * The following two parameters control the setting of the RS bit on
+ * transmit descriptors.
+ * TX descriptors will have their RS bit set after txq->tx_rs_thresh
+ * descriptors have been used.
+ * The TX descriptor ring will be cleaned after txq->tx_free_thresh
+ * descriptors are used or if the number of descriptors required
+ * to transmit a packet is greater than the number of free TX
+ * descriptors.
+ * The following constraints must be satisfied:
+ * tx_rs_thresh must be greater than 0.
+ * tx_rs_thresh must be less than the size of the ring minus 2.
+ * tx_rs_thresh must be less than or equal to tx_free_thresh.
+ * tx_rs_thresh must be a divisor of the ring size.
+ * tx_free_thresh must be greater than 0.
+ * tx_free_thresh must be less than the size of the ring minus 3.
+ * One descriptor in the TX ring is used as a sentinel to avoid a
+ * H/W race condition, hence the maximum threshold constraints.
+ * When set to zero use default values.
+ */
+ tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+ tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ if (tx_rs_thresh >= (nb_desc - 2)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
+ "of TX descriptors minus 2. (tx_rs_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+ if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
+ "(tx_rs_thresh=%u port=%d queue=%d)",
+ DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+ "tx_free_thresh must be less than the number of "
+ "TX descriptors minus 3. (tx_free_thresh=%u "
+ "port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+ if (tx_rs_thresh > tx_free_thresh) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
+ "tx_free_thresh. (tx_free_thresh=%u "
+ "tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return -(EINVAL);
+ }
+ if ((nb_desc % tx_rs_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
+ "number of TX descriptors. (tx_rs_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ /*
+ * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
+ * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
+ * by the NIC and all descriptors are written back after the NIC
+ * accumulates WTHRESH descriptors.
+ */
+ if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
+ PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+ "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->tx_queues[queue_idx] != NULL) {
+ ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ return -ENOMEM;
+
+ /*
+ * Allocate TX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
+ IXGBE_ALIGN, socket_id);
+ if (tz == NULL) {
+ ixgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->tx_rs_thresh = tx_rs_thresh;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->pthresh = tx_conf->tx_thresh.pthresh;
+ txq->hthresh = tx_conf->tx_thresh.hthresh;
+ txq->wthresh = tx_conf->tx_thresh.wthresh;
+ txq->queue_id = queue_idx;
+ txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ txq->port_id = dev->data->port_id;
+ txq->offloads = offloads;
+ txq->ops = &def_txq_ops;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+#ifdef RTE_LIBRTE_SECURITY
+ txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_SECURITY);
+#endif
+
+ /*
+ * Modification to set VFTDT for virtual function if vf is detected
+ */
+ if (hw->mac.type == ixgbe_mac_82599_vf ||
+ hw->mac.type == ixgbe_mac_X540_vf ||
+ hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_x_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_a_vf)
+ txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
+ else
+ txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
+
+ txq->tx_ring_phys_addr = tz->iova;
+ txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
+
+ /* Allocate software ring */
+ txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
+ sizeof(struct ixgbe_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL) {
+ ixgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+ /* set up vector or scalar TX function as appropriate */
+ ixgbe_set_tx_function(dev, txq);
+
+ txq->ops->reset(txq);
+
+ dev->data->tx_queues[queue_idx] = txq;
+
+
+ return 0;
+}
+
+/**
+ * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
+ *
+ * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
+ * in the sw_rsc_ring is not set to NULL but rather points to the next
+ * mbuf of this RSC aggregation (that has not been completed yet and still
+ * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
+ * will just free first "nb_segs" segments of the cluster explicitly by calling
+ * an rte_pktmbuf_free_seg().
+ *
+ * @m scattered cluster head
+ */
+static void __attribute__((cold))
+ixgbe_free_sc_cluster(struct rte_mbuf *m)
+{
+ uint16_t i, nb_segs = m->nb_segs;
+ struct rte_mbuf *next_seg;
+
+ for (i = 0; i < nb_segs; i++) {
+ next_seg = m->next;
+ rte_pktmbuf_free_seg(m);
+ m = next_seg;
+ }
+}
+
+static void __attribute__((cold))
+ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
+{
+ unsigned i;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+ /* SSE Vector driver has a different way of releasing mbufs. */
+ if (rxq->rx_using_sse) {
+ ixgbe_rx_queue_release_mbufs_vec(rxq);
+ return;
+ }
+#endif
+
+ if (rxq->sw_ring != NULL) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ if (rxq->rx_nb_avail) {
+ for (i = 0; i < rxq->rx_nb_avail; ++i) {
+ struct rte_mbuf *mb;
+
+ mb = rxq->rx_stage[rxq->rx_next_avail + i];
+ rte_pktmbuf_free_seg(mb);
+ }
+ rxq->rx_nb_avail = 0;
+ }
+ }
+
+ if (rxq->sw_sc_ring)
+ for (i = 0; i < rxq->nb_rx_desc; i++)
+ if (rxq->sw_sc_ring[i].fbuf) {
+ ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
+ rxq->sw_sc_ring[i].fbuf = NULL;
+ }
+}
+
+static void __attribute__((cold))
+ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
+{
+ if (rxq != NULL) {
+ ixgbe_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq->sw_sc_ring);
+ rte_free(rxq);
+ }
+}
+
+void __attribute__((cold))
+ixgbe_dev_rx_queue_release(void *rxq)
+{
+ ixgbe_rx_queue_release(rxq);
+}
+
+/*
+ * Check if Rx Burst Bulk Alloc function can be used.
+ * Return
+ * 0: the preconditions are satisfied and the bulk allocation function
+ * can be used.
+ * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
+ * function must be used.
+ */
+static inline int __attribute__((cold))
+check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
+{
+ int ret = 0;
+
+ /*
+ * Make sure the following pre-conditions are satisfied:
+ * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
+ * rxq->rx_free_thresh < rxq->nb_rx_desc
+ * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
+ * Scattered packets are not supported. This should be checked
+ * outside of this function.
+ */
+ if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
+ rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
+ ret = -EINVAL;
+ } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "rxq->nb_rx_desc=%d",
+ rxq->rx_free_thresh, rxq->nb_rx_desc);
+ ret = -EINVAL;
+ } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "rxq->rx_free_thresh=%d",
+ rxq->nb_rx_desc, rxq->rx_free_thresh);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Reset dynamic ixgbe_rx_queue fields back to defaults */
+static void __attribute__((cold))
+ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
+{
+ static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
+ unsigned i;
+ uint16_t len = rxq->nb_rx_desc;
+
+ /*
+ * By default, the Rx queue setup function allocates enough memory for
+ * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
+ * extra memory at the end of the descriptor ring to be zero'd out.
+ */
+ if (adapter->rx_bulk_alloc_allowed)
+ /* zero out extra memory */
+ len += RTE_PMD_IXGBE_RX_MAX_BURST;
+
+ /*
+ * Zero out HW ring memory. Zero out extra memory at the end of
+ * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
+ * reads extra memory as zeros.
+ */
+ for (i = 0; i < len; i++) {
+ rxq->rx_ring[i] = zeroed_desc;
+ }
+
+ /*
+ * initialize extra software ring entries. Space for these extra
+ * entries is always allocated
+ */
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+ for (i = rxq->nb_rx_desc; i < len; ++i) {
+ rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
+ }
+
+ rxq->rx_nb_avail = 0;
+ rxq->rx_next_avail = 0;
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
+#endif
+}
+
+static int
+ixgbe_is_vf(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+uint64_t
+ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ return offloads;
+}
+
+uint64_t
+ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ if (ixgbe_is_vf(dev) == 0)
+ offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+ /*
+ * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+ * mode.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) &&
+ !RTE_ETH_DEV_SRIOV(dev).active)
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+ return offloads;
+}
+
+int __attribute__((cold))
+ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *rz;
+ struct ixgbe_rx_queue *rxq;
+ struct ixgbe_hw *hw;
+ uint16_t len;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ /*
+ * Validate number of receive descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of IXGBE_ALIGN.
+ */
+ if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
+ (nb_desc > IXGBE_MAX_RING_DESC) ||
+ (nb_desc < IXGBE_MIN_RING_DESC)) {
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ return -ENOMEM;
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ rxq->port_id = dev->data->port_id;
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+ rxq->drop_en = rx_conf->rx_drop_en;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
+
+ /*
+ * The packet type in RX descriptor is different for different NICs.
+ * Some bits are used for x550 but reserved for other NICS.
+ * So set different masks for different NICs.
+ */
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a ||
+ hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_x_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_a_vf)
+ rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550;
+ else
+ rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
+
+ /*
+ * Allocate RX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ RX_RING_SZ, IXGBE_ALIGN, socket_id);
+ if (rz == NULL) {
+ ixgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ /*
+ * Zero init all the descriptors in the ring.
+ */
+ memset(rz->addr, 0, RX_RING_SZ);
+
+ /*
+ * Modified to setup VFRDT for Virtual Function
+ */
+ if (hw->mac.type == ixgbe_mac_82599_vf ||
+ hw->mac.type == ixgbe_mac_X540_vf ||
+ hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_x_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_a_vf) {
+ rxq->rdt_reg_addr =
+ IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
+ rxq->rdh_reg_addr =
+ IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
+ } else {
+ rxq->rdt_reg_addr =
+ IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
+ rxq->rdh_reg_addr =
+ IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
+ }
+
+ rxq->rx_ring_phys_addr = rz->iova;
+ rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
+
+ /*
+ * Certain constraints must be met in order to use the bulk buffer
+ * allocation Rx burst function. If any of Rx queues doesn't meet them
+ * the feature should be disabled for the whole port.
+ */
+ if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ rxq->queue_id, rxq->port_id);
+ adapter->rx_bulk_alloc_allowed = false;
+ }
+
+ /*
+ * Allocate software ring. Allow for space at the end of the
+ * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
+ * function does not access an invalid memory region.
+ */
+ len = nb_desc;
+ if (adapter->rx_bulk_alloc_allowed)
+ len += RTE_PMD_IXGBE_RX_MAX_BURST;
+
+ rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
+ sizeof(struct ixgbe_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_ring) {
+ ixgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ /*
+ * Always allocate even if it's not going to be needed in order to
+ * simplify the code.
+ *
+ * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
+ * be requested in ixgbe_dev_rx_init(), which is called later from
+ * dev_start() flow.
+ */
+ rxq->sw_sc_ring =
+ rte_zmalloc_socket("rxq->sw_sc_ring",
+ sizeof(struct ixgbe_scattered_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_sc_ring) {
+ ixgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
+ "dma_addr=0x%"PRIx64,
+ rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
+ rxq->rx_ring_phys_addr);
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ rxq->queue_id, rxq->port_id);
+ adapter->rx_vec_allowed = false;
+ } else
+ ixgbe_rxq_vec_setup(rxq);
+
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ ixgbe_reset_rx_queue(adapter, rxq);
+
+ return 0;
+}
+
+uint32_t
+ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define IXGBE_RXQ_SCAN_INTERVAL 4
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_queue *rxq;
+ uint32_t desc = 0;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+
+ while ((desc < rxq->nb_rx_desc) &&
+ (rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
+ desc += IXGBE_RXQ_SCAN_INTERVAL;
+ rxdp += IXGBE_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return 0;
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ rxdp = &rxq->rx_ring[desc];
+ return !!(rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
+}
+
+int
+ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t nb_hold, desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+ if (rxq->rx_using_sse)
+ nb_hold = rxq->rxrearm_nb;
+ else
+#endif
+ nb_hold = rxq->nb_rx_hold;
+ if (offset >= rxq->nb_rx_desc - nb_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.upper.status_error;
+ if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct ixgbe_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].wb.status;
+ if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+void __attribute__((cold))
+ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (txq != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq != NULL) {
+ ixgbe_rx_queue_release_mbufs(rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
+ }
+ }
+}
+
+void
+ixgbe_dev_free_queues(struct rte_eth_dev *dev)
+{
+ unsigned i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+/*********************************************************************
+ *
+ * Device RX/TX init functions
+ *
+ **********************************************************************/
+
+/**
+ * Receive Side Scaling (RSS)
+ * See section 7.1.2.8 in the following document:
+ * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
+ *
+ * Principles:
+ * The source and destination IP addresses of the IP header and the source
+ * and destination ports of TCP/UDP headers, if any, of received packets are
+ * hashed against a configurable random key to compute a 32-bit RSS hash result.
+ * The seven (7) LSBs of the 32-bit hash result are used as an index into a
+ * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
+ * RSS output index which is used as the RX queue index where to store the
+ * received packets.
+ * The following output is supplied in the RX write-back descriptor:
+ * - 32-bit result of the Microsoft RSS hash function,
+ * - 4-bit RSS type field.
+ */
+
+/*
+ * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
+ * Used as the default key.
+ */
+static uint8_t rss_intel_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static void
+ixgbe_rss_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mrqc;
+ uint32_t mrqc_reg;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+ mrqc = IXGBE_READ_REG(hw, mrqc_reg);
+ mrqc &= ~IXGBE_MRQC_RSSEN;
+ IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
+}
+
+static void
+ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
+{
+ uint8_t *hash_key;
+ uint32_t mrqc;
+ uint32_t rss_key;
+ uint64_t rss_hf;
+ uint16_t i;
+ uint32_t mrqc_reg;
+ uint32_t rssrk_reg;
+
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+ rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
+
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL) {
+ /* Fill in RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = hash_key[(i * 4)];
+ rss_key |= hash_key[(i * 4) + 1] << 8;
+ rss_key |= hash_key[(i * 4) + 2] << 16;
+ rss_key |= hash_key[(i * 4) + 3] << 24;
+ IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
+ }
+ }
+
+ /* Set configured hashing protocols in MRQC register */
+ rss_hf = rss_conf->rss_hf;
+ mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
+ if (rss_hf & ETH_RSS_IPV4)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
+ if (rss_hf & ETH_RSS_IPV6)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
+ if (rss_hf & ETH_RSS_IPV6_EX)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+ if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
+ if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+ IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
+}
+
+int
+ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mrqc;
+ uint64_t rss_hf;
+ uint32_t mrqc_reg;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!ixgbe_rss_update_sp(hw->mac.type)) {
+ PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+
+ /*
+ * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
+ * "RSS enabling cannot be done dynamically while it must be
+ * preceded by a software reset"
+ * Before changing anything, first check that the update RSS operation
+ * does not attempt to disable RSS, if RSS was enabled at
+ * initialization time, or does not attempt to enable RSS, if RSS was
+ * disabled at initialization time.
+ */
+ rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
+ mrqc = IXGBE_READ_REG(hw, mrqc_reg);
+ if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
+ if (rss_hf != 0) /* Enable RSS */
+ return -(EINVAL);
+ return 0; /* Nothing to do */
+ }
+ /* RSS enabled */
+ if (rss_hf == 0) /* Disable RSS */
+ return -(EINVAL);
+ ixgbe_hw_rss_hash_set(hw, rss_conf);
+ return 0;
+}
+
+int
+ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct ixgbe_hw *hw;
+ uint8_t *hash_key;
+ uint32_t mrqc;
+ uint32_t rss_key;
+ uint64_t rss_hf;
+ uint16_t i;
+ uint32_t mrqc_reg;
+ uint32_t rssrk_reg;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+ rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL) {
+ /* Return RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
+ hash_key[(i * 4)] = rss_key & 0x000000FF;
+ hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
+ hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
+ hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
+ }
+ }
+
+ /* Get RSS functions configured in MRQC register */
+ mrqc = IXGBE_READ_REG(hw, mrqc_reg);
+ if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
+ rss_conf->rss_hf = 0;
+ return 0;
+ }
+ rss_hf = 0;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
+ rss_hf |= ETH_RSS_IPV4;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
+ rss_hf |= ETH_RSS_IPV6;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
+ rss_hf |= ETH_RSS_IPV6_EX;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
+ rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
+ rss_hf |= ETH_RSS_IPV6_UDP_EX;
+ rss_conf->rss_hf = rss_hf;
+ return 0;
+}
+
+static void
+ixgbe_rss_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rss_conf rss_conf;
+ struct ixgbe_hw *hw;
+ uint32_t reta;
+ uint16_t i;
+ uint16_t j;
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+
+ /*
+ * Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ reta = 0;
+ for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ reta = (reta << 8) | j;
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, reta_reg,
+ rte_bswap32(reta));
+ }
+
+ /*
+ * Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
+ ixgbe_rss_disable(dev);
+ return;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ ixgbe_hw_rss_hash_set(hw, &rss_conf);
+}
+
+#define NUM_VFTA_REGISTERS 128
+#define NIC_RX_BUFFER_SIZE 0x200
+#define X550_RX_BUFFER_SIZE 0x180
+
+static void
+ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_dcb_conf *cfg;
+ struct ixgbe_hw *hw;
+ enum rte_eth_nb_pools num_pools;
+ uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
+ uint16_t pbsize;
+ uint8_t nb_tcs; /* number of traffic classes */
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ num_pools = cfg->nb_queue_pools;
+ /* Check we have a valid number of pools */
+ if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+ ixgbe_rss_disable(dev);
+ return;
+ }
+ /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
+ nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+
+ /*
+ * RXPBSIZE
+ * split rx buffer up into sections, each for 1 traffic class
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
+ break;
+ default:
+ pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ break;
+ }
+ for (i = 0; i < nb_tcs; i++) {
+ uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+
+ rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
+ /* clear 10 bits. */
+ rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+ }
+ /* zero alloc all unused TCs */
+ for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+
+ rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
+ /* clear 10 bits. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+ }
+
+ /* MRQC: enable vmdq and dcb */
+ mrqc = (num_pools == ETH_16_POOLS) ?
+ IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ /* PFVTCTL: turn on virtualisation and set the default pool */
+ vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
+ if (cfg->enable_default_pool) {
+ vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
+ } else {
+ vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
+
+ /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
+ queue_mapping = 0;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ /*
+ * mapping is done with 3 bits per priority,
+ * so shift by i*3 each time
+ */
+ queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
+
+ /* RTRPCS: DCB related */
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
+
+ /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
+ }
+
+ /* VFRE: pool enabling for receive - 16 or 32 */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
+ num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+ /*
+ * MPSAR - allow pools to read specific mac addresses
+ * In this case, all pools should be able to read from mac addr 0
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
+
+ /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
+ (cfg->pool_map[i].vlan_id & 0xFFF)));
+ /*
+ * Put the allowed pools in VFB reg. As we only have 16 or 32
+ * pools, we only need to use the first half of the register
+ * i.e. bits 0-31
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
+ }
+}
+
+/**
+ * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
+ * @dev: pointer to eth_dev structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static void
+ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ /* Disable the Tx desc arbiter so that MTQC can be changed */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ /* Enable DCB for Tx with 8 TCs */
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ } else {
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ }
+ if (dcb_config->vt_mode)
+ reg |= IXGBE_MTQC_VT_ENA;
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+ /* Enable the Tx desc arbiter */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ /* Enable Security TX Buffer IFG for DCB */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+ }
+}
+
+/**
+ * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static void
+ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ /*PF VF Transmit Enable*/
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
+ vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+ /*Configure general DCB TX parameters*/
+ ixgbe_dcb_tx_hw_config(dev, dcb_config);
+}
+
+static void
+ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
+ if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ } else {
+ dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ }
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_rx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
+ if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ } else {
+ dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ }
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_tx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = rx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_tx_conf *tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = tx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+/**
+ * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
+ * @dev: pointer to eth_dev structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static void
+ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ uint32_t vlanctrl;
+ uint8_t i;
+ uint32_t q;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; WSP)
+ */
+ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ if (dcb_config->num_tcs.pg_tcs == 4) {
+ if (dcb_config->vt_mode)
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_VMDQRT4TCEN;
+ else {
+ /* no matter the mode is DCB or DCB_RSS, just
+ * set the MRQE to RSSXTCEN. RSS is controlled
+ * by RSS_FIELD
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RTRSS4TCEN;
+ }
+ }
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ if (dcb_config->vt_mode)
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_VMDQRT8TCEN;
+ else {
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RTRSS8TCEN;
+ }
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /* Disable drop for all queues in VMDQ mode*/
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT)));
+ } else {
+ /* Enable drop for all queues in SRIOV mode */
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT) |
+ IXGBE_QDE_ENABLE));
+ }
+ }
+
+ /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
+ }
+
+ /*
+ * Configure Rx packet plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+}
+
+static void
+ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
+ uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
+ tsa, map);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
+ uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
+ break;
+ default:
+ break;
+ }
+}
+
+#define DCB_RX_CONFIG 1
+#define DCB_TX_CONFIG 1
+#define DCB_TX_PB 1024
+/**
+ * ixgbe_dcb_hw_configure - Enable DCB and configure
+ * general DCB in VT mode and non-VT mode parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static int
+ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ int ret = 0;
+ uint8_t i, pfc_en, nb_tcs;
+ uint16_t pbsize, rx_buffer_size;
+ uint8_t config_dcb_rx = 0;
+ uint8_t config_dcb_tx = 0;
+ uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ struct ixgbe_dcb_tc_config *tc;
+ uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_bw_conf *bw_conf =
+ IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
+
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ config_dcb_rx = DCB_RX_CONFIG;
+ /*
+ *get dcb and VT rx configuration parameters
+ *from rte_eth_conf
+ */
+ ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
+ /*Configure general VMDQ and DCB RX parameters*/
+ ixgbe_vmdq_dcb_configure(dev);
+ }
+ break;
+ case ETH_MQ_RX_DCB:
+ case ETH_MQ_RX_DCB_RSS:
+ dcb_config->vt_mode = false;
+ config_dcb_rx = DCB_RX_CONFIG;
+ /* Get dcb TX configuration parameters from rte_eth_conf */
+ ixgbe_dcb_rx_config(dev, dcb_config);
+ /*Configure general DCB RX parameters*/
+ ixgbe_dcb_rx_hw_config(dev, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
+ break;
+ }
+ switch (dev->data->dev_conf.txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /* get DCB and VT TX configuration parameters
+ * from rte_eth_conf
+ */
+ ixgbe_dcb_vt_tx_config(dev, dcb_config);
+ /*Configure general VMDQ and DCB TX parameters*/
+ ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
+ break;
+
+ case ETH_MQ_TX_DCB:
+ dcb_config->vt_mode = false;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /*get DCB TX configuration parameters from rte_eth_conf*/
+ ixgbe_dcb_tx_config(dev, dcb_config);
+ /*Configure general DCB TX parameters*/
+ ixgbe_dcb_tx_hw_config(dev, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
+ break;
+ }
+
+ nb_tcs = dcb_config->num_tcs.pfc_tcs;
+ /* Unpack map */
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
+ if (nb_tcs == ETH_4_TCS) {
+ /* Avoid un-configured priority mapping to TC0 */
+ uint8_t j = 4;
+ uint8_t mask = 0xFF;
+
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+ mask = (uint8_t)(mask & (~(1 << map[i])));
+ for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
+ if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+ map[j++] = i;
+ mask >>= 1;
+ }
+ /* Re-configure 4 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ }
+ for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
+ }
+ } else {
+ /* Re-configure 8 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ }
+ }
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ rx_buffer_size = X550_RX_BUFFER_SIZE;
+ break;
+ default:
+ rx_buffer_size = NIC_RX_BUFFER_SIZE;
+ break;
+ }
+
+ if (config_dcb_rx) {
+ /* Set RX buffer size */
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
+ uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
+
+ for (i = 0; i < nb_tcs; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+ }
+ /* zero alloc all unused TCs */
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ }
+ }
+ if (config_dcb_tx) {
+ /* Only support an equally distributed
+ * Tx packet buffer strategy.
+ */
+ uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
+ uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
+
+ for (i = 0; i < nb_tcs; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+ }
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+ }
+ }
+
+ /*Calculates traffic class credits*/
+ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
+ IXGBE_DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
+ IXGBE_DCB_RX_CONFIG);
+
+ if (config_dcb_rx) {
+ /* Unpack CEE standard containers */
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
+ /* Configure PG(ETS) RX */
+ ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
+ }
+
+ if (config_dcb_tx) {
+ /* Unpack CEE standard containers */
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+ /* Configure PG(ETS) TX */
+ ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
+ }
+
+ /*Configure queue statistics registers*/
+ ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
+
+ /* Check if the PFC is supported */
+ if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
+ for (i = 0; i < nb_tcs; i++) {
+ /*
+ * If the TC count is 8,and the default high_water is 48,
+ * the low_water is 16 as default.
+ */
+ hw->fc.high_water[i] = (pbsize * 3) / 4;
+ hw->fc.low_water[i] = pbsize / 4;
+ /* Enable pfc for this TC */
+ tc = &dcb_config->tc_config[i];
+ tc->pfc = ixgbe_dcb_pfc_enabled;
+ }
+ ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+ if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+ pfc_en &= 0x0F;
+ ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
+ }
+
+ return ret;
+}
+
+/**
+ * ixgbe_configure_dcb - Configure DCB Hardware
+ * @dev: pointer to rte_eth_dev
+ */
+void ixgbe_configure_dcb(struct rte_eth_dev *dev)
+{
+ struct ixgbe_dcb_config *dcb_cfg =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* check support mq_mode for DCB */
+ if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
+ (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
+ (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
+ return;
+
+ if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+ return;
+
+ /** Configure DCB hardware **/
+ ixgbe_dcb_hw_configure(dev, dcb_cfg);
+}
+
+/*
+ * VMDq only support for 10 GbE NIC.
+ */
+static void
+ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_rx_conf *cfg;
+ struct ixgbe_hw *hw;
+ enum rte_eth_nb_pools num_pools;
+ uint32_t mrqc, vt_ctl, vlanctrl;
+ uint32_t vmolr = 0;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+ num_pools = cfg->nb_queue_pools;
+
+ ixgbe_rss_disable(dev);
+
+ /* MRQC: enable vmdq */
+ mrqc = IXGBE_MRQC_VMDQEN;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ /* PFVTCTL: turn on virtualisation and set the default pool */
+ vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
+ if (cfg->enable_default_pool)
+ vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
+ else
+ vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
+
+ for (i = 0; i < (int)num_pools; i++) {
+ vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
+ }
+
+ /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
+
+ /* VFRE: pool enabling for receive - 64 */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
+ if (num_pools == ETH_64_POOLS)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
+
+ /*
+ * MPSAR - allow pools to read specific mac addresses
+ * In this case, all pools should be able to read from mac addr 0
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
+
+ /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
+ (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
+ /*
+ * Put the allowed pools in VFB reg. As we only have 16 or 64
+ * pools, we only need to use the first half of the register
+ * i.e. bits 0-31
+ */
+ if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
+ (cfg->pool_map[i].pools & UINT32_MAX));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
+ ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
+
+ }
+
+ /* PFDMA Tx General Switch Control Enables VMDQ loopback */
+ if (cfg->enable_loop_back) {
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
+ * @hw: pointer to hardware structure
+ */
+static void
+ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
+{
+ uint32_t reg;
+ uint32_t q;
+
+ PMD_INIT_FUNC_TRACE();
+ /*PF VF Transmit Enable*/
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
+
+ /* Disable the Tx desc arbiter so that MTQC can be changed */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+ /* Disable drop for all queues */
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+
+ /* Enable the Tx desc arbiter */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+static int __attribute__((cold))
+ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
+{
+ struct ixgbe_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ unsigned int i;
+
+ /* Initialize software ring entries */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile union ixgbe_adv_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
+ (unsigned) rxq->queue_id);
+ return -ENOMEM;
+ }
+
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ rxd = &rxq->rx_ring[i];
+ rxd->read.hdr_addr = 0;
+ rxd->read.pkt_addr = dma_addr;
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_config_vf_rss(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mrqc;
+
+ ixgbe_rss_configure(dev);
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* MRQC: enable VF RSS */
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc &= ~IXGBE_MRQC_MRQE_MASK;
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ mrqc |= IXGBE_MRQC_VMDQRSS64EN;
+ break;
+
+ case ETH_32_POOLS:
+ mrqc |= IXGBE_MRQC_VMDQRSS32EN;
+ break;
+
+ default:
+ PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
+ return -EINVAL;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ return 0;
+}
+
+static int
+ixgbe_config_vf_default(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+ IXGBE_MRQC_VMDQEN);
+ break;
+
+ case ETH_32_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+ IXGBE_MRQC_VMDQRT4TCEN);
+ break;
+
+ case ETH_16_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+ IXGBE_MRQC_VMDQRT8TCEN);
+ break;
+ default:
+ PMD_INIT_LOG(ERR,
+ "invalid pool number in IOV mode");
+ break;
+ }
+ return 0;
+}
+
+static int
+ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return 0;
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ * any DCB/RSS w/o VMDq multi-queue setting
+ */
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_DCB_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ ixgbe_rss_configure(dev);
+ break;
+
+ case ETH_MQ_RX_VMDQ_DCB:
+ ixgbe_vmdq_dcb_configure(dev);
+ break;
+
+ case ETH_MQ_RX_VMDQ_ONLY:
+ ixgbe_vmdq_rx_hw_configure(dev);
+ break;
+
+ case ETH_MQ_RX_NONE:
+ default:
+ /* if mq_mode is none, disable rss mode.*/
+ ixgbe_rss_disable(dev);
+ break;
+ }
+ } else {
+ /* SRIOV active scheme
+ * Support RSS together with SRIOV.
+ */
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ ixgbe_config_vf_rss(dev);
+ break;
+ case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_DCB:
+ /* In SRIOV, the configuration is the same as VMDq case */
+ ixgbe_vmdq_dcb_configure(dev);
+ break;
+ /* DCB/RSS together with SRIOV is not supported */
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case ETH_MQ_RX_DCB_RSS:
+ PMD_INIT_LOG(ERR,
+ "Could not support DCB/RSS with VMDq & SRIOV");
+ return -1;
+ default:
+ ixgbe_config_vf_default(dev);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mtqc;
+ uint32_t rttdcs;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return 0;
+
+ /* disable arbiter before setting MTQC */
+ rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ rttdcs |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ * any DCB w/o VMDq multi-queue setting
+ */
+ if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+ ixgbe_vmdq_tx_hw_configure(hw);
+ else {
+ mtqc = IXGBE_MTQC_64Q_1PB;
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
+ }
+ } else {
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+
+ /*
+ * SRIOV active scheme
+ * FIXME if support DCB together with VMDq & SRIOV
+ */
+ case ETH_64_POOLS:
+ mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
+ break;
+ case ETH_32_POOLS:
+ mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
+ break;
+ case ETH_16_POOLS:
+ mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
+ IXGBE_MTQC_8TC_8TQ;
+ break;
+ default:
+ mtqc = IXGBE_MTQC_64Q_1PB;
+ PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
+ }
+
+ /* re-enable arbiter */
+ rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
+ *
+ * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
+ * spec rev. 3.0 chapter 8.2.3.8.13.
+ *
+ * @pool Memory pool of the Rx queue
+ */
+static inline uint32_t
+ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
+{
+ struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
+
+ /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
+ uint16_t maxdesc =
+ IPV4_MAX_PKT_LEN /
+ (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
+
+ if (maxdesc >= 16)
+ return IXGBE_RSCCTL_MAXDESC_16;
+ else if (maxdesc >= 8)
+ return IXGBE_RSCCTL_MAXDESC_8;
+ else if (maxdesc >= 4)
+ return IXGBE_RSCCTL_MAXDESC_4;
+ else
+ return IXGBE_RSCCTL_MAXDESC_1;
+}
+
+/**
+ * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
+ * interrupt
+ *
+ * (Taken from FreeBSD tree)
+ * (yes this is all very magic and confusing :)
+ *
+ * @dev port handle
+ * @entry the register array entry
+ * @vector the MSIX vector for this queue
+ * @type RX/TX/MISC
+ */
+static void
+ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 ivar, index;
+
+ vector |= IXGBE_IVAR_ALLOC_VAL;
+
+ switch (hw->mac.type) {
+
+ case ixgbe_mac_82598EB:
+ if (type == -1)
+ entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
+ else
+ entry += (type * 64);
+ index = (entry >> 2) & 0x1F;
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+ ivar &= ~(0xFF << (8 * (entry & 0x3)));
+ ivar |= (vector << (8 * (entry & 0x3)));
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
+ break;
+
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (type == -1) { /* MISC IVAR */
+ index = (entry & 1) * 8;
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+ ivar &= ~(0xFF << index);
+ ivar |= (vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
+ } else { /* RX/TX IVARS */
+ index = (16 * (entry & 1)) + (8 * type);
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
+ }
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+void __attribute__((cold))
+ixgbe_set_rx_function(struct rte_eth_dev *dev)
+{
+ uint16_t i, rx_using_sse;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ /*
+ * In order to allow Vector Rx there are a few configuration
+ * conditions to be met and Rx Bulk Allocation should be allowed.
+ */
+ if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
+ !adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
+ "preconditions or RTE_IXGBE_INC_VECTOR is "
+ "not enabled",
+ dev->data->port_id);
+
+ adapter->rx_vec_allowed = false;
+ }
+
+ /*
+ * Initialize the appropriate LRO callback.
+ *
+ * If all queues satisfy the bulk allocation preconditions
+ * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
+ * Otherwise use a single allocation version.
+ */
+ if (dev->data->lro) {
+ if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
+ "allocation version");
+ dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
+ "allocation version");
+ dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+ }
+ } else if (dev->data->scattered_rx) {
+ /*
+ * Set the non-LRO scattered callback: there are Vector and
+ * single allocation versions.
+ */
+ if (adapter->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+ "callback (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+ } else if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
+ "single allocation) "
+ "Scattered Rx callback "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+ }
+ /*
+ * Below we set "simple" callbacks according to port/queues parameters.
+ * If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Vector
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (adapter->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
+ "burst size no less than %d (port=%d).",
+ RTE_IXGBE_DESCS_PER_LOOP,
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+ } else if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_pkts;
+ }
+
+ /* Propagate information about RX function choice through all queues. */
+
+ rx_using_sse =
+ (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->rx_using_sse = rx_using_sse;
+#ifdef RTE_LIBRTE_SECURITY
+ rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SECURITY);
+#endif
+ }
+}
+
+/**
+ * ixgbe_set_rsc - configure RSC related port HW registers
+ *
+ * Configures the port's RSC related registers according to the 4.6.7.2 chapter
+ * of 82599 Spec (x540 configuration is virtually the same).
+ *
+ * @dev port handle
+ *
+ * Returns 0 in case of success or a non-zero error code
+ */
+static int
+ixgbe_set_rsc(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_dev_info dev_info = { 0 };
+ bool rsc_capable = false;
+ uint16_t i;
+ uint32_t rdrxctl;
+ uint32_t rfctl;
+
+ /* Sanity check */
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+ rsc_capable = true;
+
+ if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+ PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
+ "support it");
+ return -EINVAL;
+ }
+
+ /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
+
+ if (rte_eth_dev_must_keep_crc(rx_conf->offloads) &&
+ (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+ /*
+ * According to chapter of 4.6.7.2.1 of the Spec Rev.
+ * 3.0 RSC configuration requires HW CRC stripping being
+ * enabled. If user requested both HW CRC stripping off
+ * and RSC on - return an error.
+ */
+ PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
+ "is disabled");
+ return -EINVAL;
+ }
+
+ /* RFCTL configuration */
+ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+ if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+ /*
+ * Since NFS packets coalescing is not supported - clear
+ * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
+ * enabled.
+ */
+ rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
+ IXGBE_RFCTL_NFSR_DIS);
+ else
+ rfctl |= IXGBE_RFCTL_RSC_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
+
+ /* If LRO hasn't been requested - we are done here. */
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+ return 0;
+
+ /* Set RDRXCTL.RSCACKC bit */
+ rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+
+ /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+ uint32_t srrctl =
+ IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
+ uint32_t rscctl =
+ IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
+ uint32_t psrtype =
+ IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
+ uint32_t eitr =
+ IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
+
+ /*
+ * ixgbe PMD doesn't support header-split at the moment.
+ *
+ * Following the 4.6.7.2.1 chapter of the 82599/x540
+ * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
+ * should be configured even if header split is not
+ * enabled. We will configure it 128 bytes following the
+ * recommendation in the spec.
+ */
+ srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+ srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK;
+
+ /*
+ * TODO: Consider setting the Receive Descriptor Minimum
+ * Threshold Size for an RSC case. This is not an obviously
+ * beneficiary option but the one worth considering...
+ */
+
+ rscctl |= IXGBE_RSCCTL_RSCEN;
+ rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
+ psrtype |= IXGBE_PSRTYPE_TCPHDR;
+
+ /*
+ * RSC: Set ITR interval corresponding to 2K ints/s.
+ *
+ * Full-sized RSC aggregations for a 10Gb/s link will
+ * arrive at about 20K aggregation/s rate.
+ *
+ * 2K inst/s rate will make only 10% of the
+ * aggregations to be closed due to the interrupt timer
+ * expiration for a streaming at wire-speed case.
+ *
+ * For a sparse streaming case this setting will yield
+ * at most 500us latency for a single RSC aggregation.
+ */
+ eitr &= ~IXGBE_EITR_ITR_INT_MASK;
+ eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
+ eitr |= IXGBE_EITR_CNT_WDIS;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
+
+ /*
+ * RSC requires the mapping of the queue to the
+ * interrupt vector.
+ */
+ ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
+ }
+
+ dev->data->lro = 1;
+
+ PMD_INIT_LOG(DEBUG, "enabling LRO mode");
+
+ return 0;
+}
+
+/*
+ * Initializes Receive Unit.
+ */
+int __attribute__((cold))
+ixgbe_dev_rx_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_rx_queue *rxq;
+ uint64_t bus_addr;
+ uint32_t rxctrl;
+ uint32_t fctrl;
+ uint32_t hlreg0;
+ uint32_t maxfrs;
+ uint32_t srrctl;
+ uint32_t rdrxctl;
+ uint32_t rxcsum;
+ uint16_t buf_size;
+ uint16_t i;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Make sure receives are disabled while setting
+ * up the RX context (registers, descriptor rings, etc.).
+ */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+ /* Enable receipt of broadcasted frames */
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_BAM;
+ fctrl |= IXGBE_FCTRL_DPF;
+ fctrl |= IXGBE_FCTRL_PMCF;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+ /*
+ * Configure CRC stripping, if any.
+ */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
+ hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
+ else
+ hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
+
+ /*
+ * Configure jumbo frame support, if any.
+ */
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+ maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+ maxfrs &= 0x0000FFFF;
+ maxfrs |= (rx_conf->max_rx_pkt_len << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+ } else
+ hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+
+ /*
+ * If loopback mode is configured for 82599, set LPBK bit.
+ */
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+ hlreg0 |= IXGBE_HLREG0_LPBK;
+ else
+ hlreg0 &= ~IXGBE_HLREG0_LPBK;
+
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ /* Setup RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ /*
+ * Reset crc_len in case it was changed after queue setup by a
+ * call to configure.
+ */
+ rxq->crc_len = rte_eth_dev_must_keep_crc(rx_conf->offloads) ?
+ ETHER_CRC_LEN : 0;
+
+ /* Setup the Base and Length of the Rx Descriptor Rings */
+ bus_addr = rxq->rx_ring_phys_addr;
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
+ (uint32_t)(bus_addr & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
+ (uint32_t)(bus_addr >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
+ rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
+
+ /* Configure the SRRCTL register */
+ srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
+
+ /*
+ * Configure the RX buffer size in the BSIZEPACKET field of
+ * the SRRCTL register of the queue.
+ * The value is in 1 KB resolution. Valid values can be from
+ * 1 KB to 16 KB.
+ */
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
+ srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
+ IXGBE_SRRCTL_BSIZEPKT_MASK);
+
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
+
+ buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
+ dev->data->scattered_rx = 1;
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+ dev->data->scattered_rx = 1;
+
+ /*
+ * Device configured with multiple RX queues.
+ */
+ ixgbe_dev_mq_rx_configure(dev);
+
+ /*
+ * Setup the Checksum Register.
+ * Disable Full-Packet Checksum which is mutually exclusive with RSS.
+ * Enable IP/L4 checkum computation by hardware if requested to do so.
+ */
+ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ rxcsum |= IXGBE_RXCSUM_PCSD;
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ rxcsum |= IXGBE_RXCSUM_IPPCSE;
+ else
+ rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) {
+ rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
+ rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
+ else
+ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+ rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+ }
+
+ rc = ixgbe_set_rsc(dev);
+ if (rc)
+ return rc;
+
+ ixgbe_set_rx_function(dev);
+
+ return 0;
+}
+
+/*
+ * Initializes Transmit Unit.
+ */
+void __attribute__((cold))
+ixgbe_dev_tx_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_tx_queue *txq;
+ uint64_t bus_addr;
+ uint32_t hlreg0;
+ uint32_t txctrl;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Enable TX CRC (checksum offload requirement) and hw padding
+ * (TSO requirement)
+ */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+
+ bus_addr = txq->tx_ring_phys_addr;
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
+ (uint32_t)(bus_addr & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
+ (uint32_t)(bus_addr >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
+ txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
+ /* Setup the HW Tx Head and TX Tail descriptor pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+
+ /*
+ * Disable Tx Head Writeback RO bit, since this hoses
+ * bookkeeping if things aren't delivered in order.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw,
+ IXGBE_DCA_TXCTRL(txq->reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
+ txctrl);
+ break;
+
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ default:
+ txctrl = IXGBE_READ_REG(hw,
+ IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
+ txctrl);
+ break;
+ }
+ }
+
+ /* Device configured with multiple TX queues. */
+ ixgbe_dev_mq_tx_configure(dev);
+}
+
+/*
+ * Set up link for 82599 loopback mode Tx->Rx.
+ */
+static inline void __attribute__((cold))
+ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
+ IXGBE_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Could not enable loopback mode");
+ /* ignore error */
+ return;
+ }
+ }
+
+ /* Restart link */
+ IXGBE_WRITE_REG(hw,
+ IXGBE_AUTOC,
+ IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
+ ixgbe_reset_pipeline_82599(hw);
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ msec_delay(50);
+}
+
+
+/*
+ * Start Transmit and Receive Units.
+ */
+int __attribute__((cold))
+ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_rx_queue *rxq;
+ uint32_t txdctl;
+ uint32_t dmatxctl;
+ uint32_t rxctrl;
+ uint16_t i;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Setup Transmit Threshold Registers */
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl |= txq->pthresh & 0x7F;
+ txdctl |= ((txq->hthresh & 0x7F) << 8);
+ txdctl |= ((txq->wthresh & 0x7F) << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+ }
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ dmatxctl |= IXGBE_DMATXCTL_TE;
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq->tx_deferred_start) {
+ ret = ixgbe_dev_tx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq->rx_deferred_start) {
+ ret = ixgbe_dev_rx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Enable Receive engine */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ rxctrl |= IXGBE_RXCTRL_DMBYPS;
+ rxctrl |= IXGBE_RXCTRL_RXEN;
+ hw->mac.ops.enable_rx_dma(hw, rxctrl);
+
+ /* If loopback mode is enabled for 82599, set up the link accordingly */
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+ ixgbe_setup_loopback_link_82599(hw);
+
+#ifdef RTE_LIBRTE_SECURITY
+ if ((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SECURITY) ||
+ (dev->data->dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_SECURITY)) {
+ ret = ixgbe_crypto_enable_ipsec(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "ixgbe_crypto_enable_ipsec fails with %d.",
+ ret);
+ return ret;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+/*
+ * Start Receive Units for specified queue.
+ */
+int __attribute__((cold))
+ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_rx_queue *rxq;
+ uint32_t rxdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ /* Allocate buffers for descriptor rings */
+ if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+ PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+ rx_queue_id);
+ return -1;
+ }
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/*
+ * Stop Receive Units for specified queue.
+ */
+int __attribute__((cold))
+ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ struct ixgbe_rx_queue *rxq;
+ uint32_t rxdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable bit clear */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
+
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
+
+ ixgbe_rx_queue_release_mbufs(rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+
+/*
+ * Start Transmit Units for specified queue.
+ */
+int __attribute__((cold))
+ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_tx_queue *txq;
+ uint32_t txdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+ /* Wait until TX Enable ready */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
+ IXGBE_TXDCTL(txq->reg_idx));
+ } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
+ tx_queue_id);
+ }
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/*
+ * Stop Transmit Units for specified queue.
+ */
+int __attribute__((cold))
+ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_tx_queue *txq;
+ uint32_t txdctl;
+ uint32_t txtdh, txtdt;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /* Wait until TX queue is empty */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
+ txtdh = IXGBE_READ_REG(hw,
+ IXGBE_TDH(txq->reg_idx));
+ txtdt = IXGBE_READ_REG(hw,
+ IXGBE_TDT(txq->reg_idx));
+ } while (--poll_ms && (txtdh != txtdt));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR,
+ "Tx Queue %d is not empty when stopping.",
+ tx_queue_id);
+ }
+
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+ /* Wait until TX Enable bit clear */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
+ IXGBE_TXDCTL(txq->reg_idx));
+ } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+ tx_queue_id);
+ }
+
+ if (txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+void
+ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct ixgbe_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct ixgbe_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+/*
+ * [VF] Initializes Receive Unit.
+ */
+int __attribute__((cold))
+ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_rx_queue *rxq;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ uint64_t bus_addr;
+ uint32_t srrctl, psrtype = 0;
+ uint16_t buf_size;
+ uint16_t i;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
+ PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+ "it should be power of 2");
+ return -1;
+ }
+
+ if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
+ PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+ "it should be equal to or less than %d",
+ hw->mac.max_rx_queues);
+ return -1;
+ }
+
+ /*
+ * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
+ * disables the VF receipt of packets if the PF MTU is > 1500.
+ * This is done to deal with 82599 limitations that imposes
+ * the PF and all VFs to share the same MTU.
+ * Then, the PF driver enables again the VF receipt of packet when
+ * the VF driver issues a IXGBE_VF_SET_LPE request.
+ * In the meantime, the VF device cannot be used, even if the VF driver
+ * and the Guest VM network stack are ready to accept packets with a
+ * size up to the PF MTU.
+ * As a work-around to this PF behaviour, force the call to
+ * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
+ * VF packets received can work in all cases.
+ */
+ ixgbevf_rlpml_set_vf(hw,
+ (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ /* Setup RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ /* Allocate buffers for descriptor rings */
+ ret = ixgbe_alloc_rx_queue_mbufs(rxq);
+ if (ret)
+ return ret;
+
+ /* Setup the Base and Length of the Rx Descriptor Rings */
+ bus_addr = rxq->rx_ring_phys_addr;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
+ (uint32_t)(bus_addr & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
+ rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
+
+
+ /* Configure the SRRCTL register */
+ srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
+
+ /*
+ * Configure the RX buffer size in the BSIZEPACKET field of
+ * the SRRCTL register of the queue.
+ * The value is in 1 KB resolution. Valid values can be from
+ * 1 KB to 16 KB.
+ */
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
+ srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
+ IXGBE_SRRCTL_BSIZEPKT_MASK);
+
+ /*
+ * VF modification to write virtual function SRRCTL register
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
+
+ buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT);
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+ /* It adds dual VLAN length for supporting dual VLAN */
+ (rxmode->max_rx_pkt_len +
+ 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->data->scattered_rx = 1;
+ }
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+
+ /* Set RQPL for VF RSS according to max Rx queue */
+ psrtype |= (dev->data->nb_rx_queues >> 1) <<
+ IXGBE_PSRTYPE_RQPL_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+
+ ixgbe_set_rx_function(dev);
+
+ return 0;
+}
+
+/*
+ * [VF] Initializes Transmit Unit.
+ */
+void __attribute__((cold))
+ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_tx_queue *txq;
+ uint64_t bus_addr;
+ uint32_t txctrl;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ bus_addr = txq->tx_ring_phys_addr;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
+ (uint32_t)(bus_addr & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
+ txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
+ /* Setup the HW Tx Head and TX Tail descriptor pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
+
+ /*
+ * Disable Tx Head Writeback RO bit, since this hoses
+ * bookkeeping if things aren't delivered in order.
+ */
+ txctrl = IXGBE_READ_REG(hw,
+ IXGBE_VFDCA_TXCTRL(i));
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
+ txctrl);
+ }
+}
+
+/*
+ * [VF] Start Transmit and Receive Units.
+ */
+void __attribute__((cold))
+ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_rx_queue *rxq;
+ uint32_t txdctl;
+ uint32_t rxdctl;
+ uint16_t i;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Setup Transmit Threshold Registers */
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ txdctl |= txq->pthresh & 0x7F;
+ txdctl |= ((txq->hthresh & 0x7F) << 8);
+ txdctl |= ((txq->wthresh & 0x7F) << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+
+ poll_ms = 10;
+ /* Wait until TX Enable ready */
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
+ }
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+
+ rxq = dev->data->rx_queues[i];
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = 10;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
+
+ }
+}
+
+int
+ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
+ixgbe_config_rss_filter(struct rte_eth_dev *dev,
+ struct ixgbe_rte_flow_rss_conf *conf, bool add)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reta;
+ uint16_t i;
+ uint16_t j;
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+
+ if (!add) {
+ if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
+ ixgbe_rss_disable(dev);
+ memset(&filter_info->rss_info, 0,
+ sizeof(struct ixgbe_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (filter_info->rss_info.conf.queue_num)
+ return -EINVAL;
+ /* Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ reta = 0;
+ for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+
+ if (j == conf->conf.queue_num)
+ j = 0;
+ reta = (reta << 8) | conf->conf.queue[j];
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, reta_reg,
+ rte_bswap32(reta));
+ }
+
+ /* Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
+ ixgbe_rss_disable(dev);
+ return -EINVAL;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ ixgbe_hw_rss_hash_set(hw, &rss_conf);
+
+ if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+ return -1;
+}
+
+uint16_t __attribute__((weak))
+ixgbe_recv_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+ixgbe_recv_scattered_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
+{
+ return -1;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h
new file mode 100644
index 00000000..39378f75
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -0,0 +1,302 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _IXGBE_RXTX_H_
+#define _IXGBE_RXTX_H_
+
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
+ * also optimize cache line size effect. H/W supports up to cache line size 128.
+ */
+#define IXGBE_ALIGN 128
+
+#define IXGBE_RXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_rx_desc))
+#define IXGBE_TXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_tx_desc))
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * descriptors should meet the following condition:
+ * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
+ */
+#define IXGBE_MIN_RING_DESC 32
+#define IXGBE_MAX_RING_DESC 4096
+
+#define RTE_PMD_IXGBE_TX_MAX_BURST 32
+#define RTE_PMD_IXGBE_RX_MAX_BURST 32
+#define RTE_IXGBE_TX_MAX_FREE_BUF_SZ 64
+
+#define RTE_IXGBE_DESCS_PER_LOOP 4
+
+#ifdef RTE_IXGBE_INC_VECTOR
+#define RTE_IXGBE_RXQ_REARM_THRESH 32
+#define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH
+#endif
+
+#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_PMD_IXGBE_RX_MAX_BURST) * \
+ sizeof(union ixgbe_adv_rx_desc))
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while(0)
+#endif
+
+#define RTE_IXGBE_REGISTER_POLL_WAIT_10_MS 10
+#define RTE_IXGBE_WAIT_100_US 100
+#define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
+
+#define IXGBE_TX_MAX_SEG 40
+
+#define IXGBE_PACKET_TYPE_MASK_82599 0X7F
+#define IXGBE_PACKET_TYPE_MASK_X550 0X10FF
+#define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF
+#define IXGBE_PACKET_TYPE_TUNNEL_BIT 0X1000
+
+#define IXGBE_PACKET_TYPE_MAX 0X80
+#define IXGBE_PACKET_TYPE_TN_MAX 0X100
+#define IXGBE_PACKET_TYPE_SHIFT 0X04
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct ixgbe_rx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+struct ixgbe_scattered_rx_entry {
+ struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct ixgbe_tx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+ uint16_t next_id; /**< Index of next descriptor in ring. */
+ uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct ixgbe_tx_entry_v {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct ixgbe_rx_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
+ volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
+ struct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */
+ struct ixgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */
+ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint64_t mbuf_initializer; /**< value to init mbufs */
+ uint16_t nb_rx_desc; /**< number of RX descriptors. */
+ uint16_t rx_tail; /**< current value of RDT register. */
+ uint16_t nb_rx_hold; /**< number of held free RX desc. */
+ uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
+ uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
+ uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+ uint8_t rx_using_sse;
+ /**< indicates that vector RX is in use */
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t using_ipsec;
+ /**< indicates that IPsec RX feature is in use */
+#endif
+#ifdef RTE_IXGBE_INC_VECTOR
+ uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
+ uint16_t rxrearm_start; /**< the idx we start the re-arming from */
+#endif
+ uint16_t rx_free_thresh; /**< max free RX desc to hold. */
+ uint16_t queue_id; /**< RX queue index. */
+ uint16_t reg_idx; /**< RX queue register index. */
+ uint16_t pkt_type_mask; /**< Packet type mask for different NICs. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
+ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
+ uint8_t rx_deferred_start; /**< not in global dev start. */
+ /** flags to set in mbuf when a vlan is detected. */
+ uint64_t vlan_flags;
+ uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+ /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
+ struct rte_mbuf fake_mbuf;
+ /** hold packets to return to application */
+ struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2];
+};
+
+/**
+ * IXGBE CTX Constants
+ */
+enum ixgbe_advctx_num {
+ IXGBE_CTX_0 = 0, /**< CTX0 */
+ IXGBE_CTX_1 = 1, /**< CTX1 */
+ IXGBE_CTX_NUM = 2, /**< CTX NUMBER */
+};
+
+/** Offload features */
+union ixgbe_tx_offload {
+ uint64_t data[2];
+ struct {
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size */
+ uint64_t vlan_tci:16;
+ /**< VLAN Tag Control Identifier (CPU order). */
+
+ /* fields for TX offloading of tunnels */
+ uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */
+ uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
+#ifdef RTE_LIBRTE_SECURITY
+ /* inline ipsec related*/
+ uint64_t sa_idx:8; /**< TX SA database entry index */
+ uint64_t sec_pad_len:4; /**< padding length */
+#endif
+ };
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with ixgbe_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/** MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
+/**
+ * Structure to check if new context need be built
+ */
+
+struct ixgbe_advctx_info {
+ uint64_t flags; /**< ol_flags for context build. */
+ /**< tx offload: vlan, tso, l2-l3-l4 lengths. */
+ union ixgbe_tx_offload tx_offload;
+ /** compare mask for tx offload. */
+ union ixgbe_tx_offload tx_offload_mask;
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct ixgbe_tx_queue {
+ /** TX ring virtual address. */
+ volatile union ixgbe_adv_tx_desc *tx_ring;
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ union {
+ struct ixgbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */
+ struct ixgbe_tx_entry_v *sw_ring_v; /**< address of SW ring for vector PMD */
+ };
+ volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
+ uint16_t nb_tx_desc; /**< number of TX descriptors. */
+ uint16_t tx_tail; /**< current value of TDT reg. */
+ /**< Start freeing TX buffers if there are less free descriptors than
+ this value. */
+ uint16_t tx_free_thresh;
+ /** Number of TX descriptors to use before RS bit is set. */
+ uint16_t tx_rs_thresh;
+ /** Number of TX descriptors used since RS bit was set. */
+ uint16_t nb_tx_used;
+ /** Index to last TX descriptor to have been cleaned. */
+ uint16_t last_desc_cleaned;
+ /** Total number of TX descriptors ready to be allocated. */
+ uint16_t nb_tx_free;
+ uint16_t tx_next_dd; /**< next desc to scan for DD bit */
+ uint16_t tx_next_rs; /**< next desc to set RS bit */
+ uint16_t queue_id; /**< TX queue index. */
+ uint16_t reg_idx; /**< TX queue register index. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold reg. */
+ uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+ uint32_t ctx_curr; /**< Hardware context states. */
+ /** Hardware context0 history. */
+ struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
+ const struct ixgbe_txq_ops *ops; /**< txq ops */
+ uint8_t tx_deferred_start; /**< not in global dev start. */
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t using_ipsec;
+ /**< indicates that IPsec TX feature is in use */
+#endif
+};
+
+struct ixgbe_txq_ops {
+ void (*release_mbufs)(struct ixgbe_tx_queue *txq);
+ void (*free_swring)(struct ixgbe_tx_queue *txq);
+ void (*reset)(struct ixgbe_tx_queue *txq);
+};
+
+/*
+ * Populate descriptors with the following info:
+ * 1.) buffer_addr = phys_addr + headroom
+ * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len
+ * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT
+ */
+
+/* Defines for Tx descriptor */
+#define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\
+ IXGBE_ADVTXD_DCMD_IFCS |\
+ IXGBE_ADVTXD_DCMD_DEXT |\
+ IXGBE_ADVTXD_DCMD_EOP)
+
+
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq);
+
+/**
+ * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance.
+ *
+ * Sets the callback based on the device parameters:
+ * - ixgbe_hw.rx_bulk_alloc_allowed
+ * - rte_eth_dev_data.scattered_rx
+ * - rte_eth_dev_data.lro
+ * - conditions checked in ixgbe_rx_vec_condition_check()
+ *
+ * This means that the parameters above have to be configured prior to calling
+ * to this function.
+ *
+ * @dev rte_eth_dev handle
+ */
+void ixgbe_set_rx_function(struct rte_eth_dev *dev);
+
+uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
+int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
+void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
+
+extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];
+extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
+
+#ifdef RTE_IXGBE_INC_VECTOR
+
+uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
+#endif /* RTE_IXGBE_INC_VECTOR */
+
+uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev);
+
+#endif /* _IXGBE_RXTX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
new file mode 100644
index 00000000..a97c2718
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef _IXGBE_RXTX_VEC_COMMON_H_
+#define _IXGBE_RXTX_VEC_COMMON_H_
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+
+static inline uint16_t
+reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned int pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len)
+ end->data_len -= rxq->crc_len;
+ else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ start->nb_segs--;
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+static __rte_always_inline int
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+{
+ struct ixgbe_tx_entry_v *txep;
+ uint32_t status;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ if (!(status & IXGBE_ADVTXD_STAT_DD))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /*
+ * first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
+ m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static __rte_always_inline void
+tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+static inline void
+_ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+{
+ unsigned int i;
+ struct ixgbe_tx_entry_v *txe;
+ const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
+
+ if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
+ return;
+
+ /* release the used mbufs in sw_ring */
+ for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
+ i != txq->tx_tail;
+ i = (i + 1) & max_desc) {
+ txe = &txq->sw_ring_v[i];
+ rte_pktmbuf_free_seg(txe->mbuf);
+ }
+ txq->nb_tx_free = max_desc;
+
+ /* reset tx_entry */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txe = &txq->sw_ring_v[i];
+ txe->mbuf = NULL;
+ }
+}
+
+static inline void
+_ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
+{
+ const unsigned int mask = rxq->nb_rx_desc - 1;
+ unsigned int i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ }
+ } else {
+ for (i = rxq->rx_tail;
+ i != rxq->rxrearm_start;
+ i = (i + 1) & mask) {
+ if (rxq->sw_ring[i].mbuf != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ }
+ }
+
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static inline void
+_ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue *txq)
+{
+ if (txq == NULL)
+ return;
+
+ if (txq->sw_ring != NULL) {
+ rte_free(txq->sw_ring_v - 1);
+ txq->sw_ring_v = NULL;
+ }
+}
+
+static inline void
+_ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq)
+{
+ static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
+ struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
+ uint16_t i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++)
+ txq->tx_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+
+ txd->wb.status = IXGBE_TXD_STAT_DD;
+ txe[i].mbuf = NULL;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ txq->tx_tail = 0;
+ txq->nb_tx_used = 0;
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->ctx_curr = 0;
+ memset((void *)&txq->ctx_cache, 0,
+ IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+}
+
+static inline int
+ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+static inline int
+ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue *txq,
+ const struct ixgbe_txq_ops *txq_ops)
+{
+ if (txq->sw_ring_v == NULL)
+ return -1;
+
+ /* leave the first one for overflow */
+ txq->sw_ring_v = txq->sw_ring_v + 1;
+ txq->ops = txq_ops;
+
+ return 0;
+}
+
+static inline int
+ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
new file mode 100644
index 00000000..edb13835
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -0,0 +1,522 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+#include "ixgbe_rxtx_vec_common.h"
+
+#include <arm_neon.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ uint64x2_t dma_addr0, dma_addr1;
+ uint64x2_t zero = vdupq_n_u64(0);
+ uint64_t paddr;
+ uint8x8_t p;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (unlikely(rte_mempool_get_bulk(rxq->mb_pool,
+ (void *)rxep,
+ RTE_IXGBE_RXQ_REARM_THRESH) < 0)) {
+ if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ vst1q_u64((uint64_t *)&rxdp[i].read,
+ zero);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_IXGBE_RXQ_REARM_THRESH;
+ return;
+ }
+
+ p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /*
+ * Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ */
+ vst1_u8((uint8_t *)&mb0->rearm_data, p);
+ paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
+ dma_addr0 = vsetq_lane_u64(paddr, zero, 0);
+ /* flush desc with pa dma_addr */
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
+
+ vst1_u8((uint8_t *)&mb1->rearm_data, p);
+ paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
+ dma_addr1 = vsetq_lane_u64(paddr, zero, 0);
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+}
+
+#define VTAG_SHIFT (3)
+
+static inline void
+desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
+ uint8x16_t staterr, struct rte_mbuf **rx_pkts)
+{
+ uint8x16_t ptype;
+ uint8x16_t vtag;
+
+ union {
+ uint8_t e[4];
+ uint32_t word;
+ } vol;
+
+ const uint8x16_t pkttype_msk = {
+ PKT_RX_VLAN, PKT_RX_VLAN,
+ PKT_RX_VLAN, PKT_RX_VLAN,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+
+ const uint8x16_t rsstype_msk = {
+ 0x0F, 0x0F, 0x0F, 0x0F,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+
+ const uint8x16_t rss_flags = {
+ 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, 0, 0,
+ 0, 0, 0, PKT_RX_FDIR};
+
+ ptype = vzipq_u8(sterr_tmp1.val[0], sterr_tmp2.val[0]).val[0];
+ ptype = vandq_u8(ptype, rsstype_msk);
+ ptype = vqtbl1q_u8(rss_flags, ptype);
+
+ vtag = vshrq_n_u8(staterr, VTAG_SHIFT);
+ vtag = vandq_u8(vtag, pkttype_msk);
+ vtag = vorrq_u8(ptype, vtag);
+
+ vol.word = vgetq_lane_u32(vreinterpretq_u32_u8(vtag), 0);
+
+ rx_pkts[0]->ol_flags = vol.e[0];
+ rx_pkts[1]->ol_flags = vol.e[1];
+ rx_pkts[2]->ol_flags = vol.e[2];
+ rx_pkts[3]->ol_flags = vol.e[3];
+}
+
+/*
+ * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ * - don't support ol_flags for rss and csum err
+ */
+
+#define IXGBE_VPMD_DESC_DD_MASK 0x01010101
+#define IXGBE_VPMD_DESC_EOP_MASK 0x02020202
+
+static inline uint16_t
+_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint8x16_t shuf_msk = {
+ 0xFF, 0xFF,
+ 0xFF, 0xFF, /* skip 32 bits pkt_type */
+ 12, 13, /* octet 12~13, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 12, 13, /* octet 12~13, 16 bits data_len */
+ 14, 15, /* octet 14~15, low 16 bits vlan_macip */
+ 4, 5, 6, 7 /* octet 4~7, 32bits rss */
+ };
+ uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0,
+ rxq->crc_len, 0, 0, 0};
+
+ /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch_non_temporal(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
+ ixgbe_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ return 0;
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_IXGBE_DESCS_PER_LOOP,
+ rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
+ uint64x2_t descs[RTE_IXGBE_DESCS_PER_LOOP];
+ uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ uint8x16x2_t sterr_tmp1, sterr_tmp2;
+ uint64x2_t mbp1, mbp2;
+ uint8x16_t staterr;
+ uint16x8_t tmp;
+ uint32_t var = 0;
+ uint32_t stat;
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
+
+ /* A. load 4 pkts descs */
+ descs[0] = vld1q_u64((uint64_t *)(rxdp));
+ descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
+ descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
+ descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
+ rte_smp_rmb();
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
+ pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
+ pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = vzipq_u8(vreinterpretq_u8_u64(descs[1]),
+ vreinterpretq_u8_u64(descs[3]));
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = vzipq_u8(vreinterpretq_u8_u64(descs[0]),
+ vreinterpretq_u8_u64(descs[2]));
+
+ /* C.2 get 4 pkts staterr value */
+ staterr = vzipq_u8(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0];
+ stat = vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0);
+
+ /* set ol_flags with vlan packet type */
+ desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr,
+ &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
+ pkt_mb4 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
+ pkt_mb3 = vreinterpretq_u8_u16(tmp);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+ pkt_mb4);
+ vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
+ pkt_mb2 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
+ pkt_mb1 = vreinterpretq_u8_u16(tmp);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ /* and with mask to extract bits, flipping 1-0 */
+ *(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK;
+
+ split_packet += RTE_IXGBE_DESCS_PER_LOOP;
+ }
+
+ rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ vst1q_u8((uint8_t *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+ pkt_mb2);
+ vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+
+ stat &= IXGBE_VPMD_DESC_DD_MASK;
+
+ /* C.4 calc avaialbe number of desc */
+ if (likely(stat != IXGBE_VPMD_DESC_DD_MASK)) {
+ while (stat & 0x01) {
+ ++var;
+ stat = stat >> 8;
+ }
+ nb_pkts_recd += var;
+ break;
+ } else {
+ nb_pkts_recd += RTE_IXGBE_DESCS_PER_LOOP;
+ }
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+/*
+ * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ * - don't support ol_flags for rss and csum err
+ */
+uint16_t
+ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+/*
+ * vPMD receive routine that reassembles scattered packets
+ *
+ * Notice:
+ * - don't support ol_flags for rss and csum err
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ */
+uint16_t
+ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile union ixgbe_adv_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64x2_t descriptor = {
+ pkt->buf_iova + pkt->data_off,
+ (uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len};
+
+ vst1q_u64((uint64_t *)&txdp->read, descriptor);
+}
+
+static inline void
+vtx(volatile union ixgbe_adv_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+ volatile union ixgbe_adv_tx_desc *txdp;
+ struct ixgbe_tx_entry_v *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = DCMD_DTYP_FLAGS;
+ uint64_t rs = IXGBE_ADVTXD_DCMD_RS | DCMD_DTYP_FLAGS;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ ixgbe_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+ txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
+ txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+static void __attribute__((cold))
+ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+{
+ _ixgbe_tx_queue_release_mbufs_vec(txq);
+}
+
+void __attribute__((cold))
+ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
+{
+ _ixgbe_rx_queue_release_mbufs_vec(rxq);
+}
+
+static void __attribute__((cold))
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+{
+ _ixgbe_tx_free_swring_vec(txq);
+}
+
+static void __attribute__((cold))
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+{
+ _ixgbe_reset_tx_queue_vec(txq);
+}
+
+static const struct ixgbe_txq_ops vec_txq_ops = {
+ .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
+ .free_swring = ixgbe_tx_free_swring,
+ .reset = ixgbe_reset_tx_queue,
+};
+
+int __attribute__((cold))
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
+{
+ return ixgbe_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
+{
+ return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
+}
+
+int __attribute__((cold))
+ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+ /* no csum error report support */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ return -1;
+
+ return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
new file mode 100644
index 00000000..c9ba4824
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -0,0 +1,750 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+#include "ixgbe_rxtx_vec_common.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ __m128i dma_addr0, dma_addr1;
+
+ const __m128i hba_msk = _mm_set_epi64x(0, UINT64_MAX);
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mb_pool,
+ (void *)rxep,
+ RTE_IXGBE_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_IXGBE_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ __m128i vaddr0, vaddr1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
+ vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* set Header Buffer Address to zero */
+ dma_addr0 = _mm_and_si128(dma_addr0, hba_msk);
+ dma_addr1 = _mm_and_si128(dma_addr1, hba_msk);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+}
+
+#ifdef RTE_LIBRTE_SECURITY
+static inline void
+desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i sterr, rearm, tmp_e, tmp_p;
+ uint32_t *rearm0 = (uint32_t *)rx_pkts[0]->rearm_data + 2;
+ uint32_t *rearm1 = (uint32_t *)rx_pkts[1]->rearm_data + 2;
+ uint32_t *rearm2 = (uint32_t *)rx_pkts[2]->rearm_data + 2;
+ uint32_t *rearm3 = (uint32_t *)rx_pkts[3]->rearm_data + 2;
+ const __m128i ipsec_sterr_msk =
+ _mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP |
+ IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED);
+ const __m128i ipsec_proc_msk =
+ _mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP);
+ const __m128i ipsec_err_flag =
+ _mm_set1_epi32(PKT_RX_SEC_OFFLOAD_FAILED |
+ PKT_RX_SEC_OFFLOAD);
+ const __m128i ipsec_proc_flag = _mm_set1_epi32(PKT_RX_SEC_OFFLOAD);
+
+ rearm = _mm_set_epi32(*rearm3, *rearm2, *rearm1, *rearm0);
+ sterr = _mm_set_epi32(_mm_extract_epi32(descs[3], 2),
+ _mm_extract_epi32(descs[2], 2),
+ _mm_extract_epi32(descs[1], 2),
+ _mm_extract_epi32(descs[0], 2));
+ sterr = _mm_and_si128(sterr, ipsec_sterr_msk);
+ tmp_e = _mm_cmpeq_epi32(sterr, ipsec_sterr_msk);
+ tmp_p = _mm_cmpeq_epi32(sterr, ipsec_proc_msk);
+ sterr = _mm_or_si128(_mm_and_si128(tmp_e, ipsec_err_flag),
+ _mm_and_si128(tmp_p, ipsec_proc_flag));
+ rearm = _mm_or_si128(rearm, sterr);
+ *rearm0 = _mm_extract_epi32(rearm, 0);
+ *rearm1 = _mm_extract_epi32(rearm, 1);
+ *rearm2 = _mm_extract_epi32(rearm, 2);
+ *rearm3 = _mm_extract_epi32(rearm, 3);
+}
+#endif
+
+static inline void
+desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
+ struct rte_mbuf **rx_pkts)
+{
+ __m128i ptype0, ptype1, vtag0, vtag1, csum;
+ __m128i rearm0, rearm1, rearm2, rearm3;
+
+ /* mask everything except rss type */
+ const __m128i rsstype_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x000F, 0x000F, 0x000F, 0x000F);
+
+ /* mask the lower byte of ol_flags */
+ const __m128i ol_flags_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x00FF, 0x00FF, 0x00FF, 0x00FF);
+
+ /* map rss type to rss hash flag */
+ const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0,
+ 0, 0, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
+
+ /* mask everything except vlan present and l4/ip csum error */
+ const __m128i vlan_csum_msk = _mm_set_epi16(
+ (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
+ (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
+ (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
+ (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
+ IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
+ IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP);
+ /* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */
+ const __m128i vlan_csum_map_lo = _mm_set_epi8(
+ 0, 0, 0, 0,
+ vlan_flags | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ vlan_flags | PKT_RX_IP_CKSUM_BAD,
+ vlan_flags | PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ vlan_flags | PKT_RX_IP_CKSUM_GOOD,
+ 0, 0, 0, 0,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD);
+
+ const __m128i vlan_csum_map_hi = _mm_set_epi8(
+ 0, 0, 0, 0,
+ 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
+ PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t),
+ 0, 0, 0, 0,
+ 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
+ PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
+
+ ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
+ ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
+ vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
+ vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
+
+ ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
+ ptype0 = _mm_and_si128(ptype0, rsstype_msk);
+ ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
+
+ vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
+ vtag1 = _mm_and_si128(vtag1, vlan_csum_msk);
+
+ /* csum bits are in the most significant, to use shuffle we need to
+ * shift them. Change mask to 0xc000 to 0x0003.
+ */
+ csum = _mm_srli_epi16(vtag1, 14);
+
+ /* now or the most significant 64 bits containing the checksum
+ * flags with the vlan present flags.
+ */
+ csum = _mm_srli_si128(csum, 8);
+ vtag1 = _mm_or_si128(csum, vtag1);
+
+ /* convert VP, IPE, L4E to ol_flags */
+ vtag0 = _mm_shuffle_epi8(vlan_csum_map_hi, vtag1);
+ vtag0 = _mm_slli_epi16(vtag0, sizeof(uint8_t));
+
+ vtag1 = _mm_shuffle_epi8(vlan_csum_map_lo, vtag1);
+ vtag1 = _mm_and_si128(vtag1, ol_flags_msk);
+ vtag1 = _mm_or_si128(vtag0, vtag1);
+
+ vtag1 = _mm_or_si128(ptype0, vtag1);
+
+ /*
+ * At this point, we have the 4 sets of flags in the low 64-bits
+ * of vtag1 (4x16).
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10);
+
+ /* write the rearm data and the olflags in one write */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+}
+
+static inline uint32_t get_packet_type(int index,
+ uint32_t pkt_info,
+ uint32_t etqf_check,
+ uint32_t tunnel_check)
+{
+ if (etqf_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP)))
+ return RTE_PTYPE_UNKNOWN;
+
+ if (tunnel_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP))) {
+ pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
+ return ptype_table_tn[pkt_info];
+ }
+
+ pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
+ return ptype_table[pkt_info];
+}
+
+static inline void
+desc_to_ptype_v(__m128i descs[4], uint16_t pkt_type_mask,
+ struct rte_mbuf **rx_pkts)
+{
+ __m128i etqf_mask = _mm_set_epi64x(0x800000008000LL, 0x800000008000LL);
+ __m128i ptype_mask = _mm_set_epi32(
+ pkt_type_mask, pkt_type_mask, pkt_type_mask, pkt_type_mask);
+ __m128i tunnel_mask =
+ _mm_set_epi64x(0x100000001000LL, 0x100000001000LL);
+
+ uint32_t etqf_check, tunnel_check, pkt_info;
+
+ __m128i ptype0 = _mm_unpacklo_epi32(descs[0], descs[2]);
+ __m128i ptype1 = _mm_unpacklo_epi32(descs[1], descs[3]);
+
+ /* interleave low 32 bits,
+ * now we have 4 ptypes in a XMM register
+ */
+ ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
+
+ /* create a etqf bitmask based on the etqf bit. */
+ etqf_check = _mm_movemask_epi8(_mm_and_si128(ptype0, etqf_mask));
+
+ /* shift left by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */
+ ptype0 = _mm_and_si128(_mm_srli_epi32(ptype0, IXGBE_PACKET_TYPE_SHIFT),
+ ptype_mask);
+
+ /* create a tunnel bitmask based on the tunnel bit */
+ tunnel_check = _mm_movemask_epi8(
+ _mm_slli_epi32(_mm_and_si128(ptype0, tunnel_mask), 0x3));
+
+ pkt_info = _mm_extract_epi32(ptype0, 0);
+ rx_pkts[0]->packet_type =
+ get_packet_type(0, pkt_info, etqf_check, tunnel_check);
+ pkt_info = _mm_extract_epi32(ptype0, 1);
+ rx_pkts[1]->packet_type =
+ get_packet_type(1, pkt_info, etqf_check, tunnel_check);
+ pkt_info = _mm_extract_epi32(ptype0, 2);
+ rx_pkts[2]->packet_type =
+ get_packet_type(2, pkt_info, etqf_check, tunnel_check);
+ pkt_info = _mm_extract_epi32(ptype0, 3);
+ rx_pkts[3]->packet_type =
+ get_packet_type(3, pkt_info, etqf_check, tunnel_check);
+}
+
+/*
+ * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t use_ipsec = rxq->using_ipsec;
+#endif
+ int pos;
+ uint64_t var;
+ __m128i shuf_msk;
+ __m128i crc_adjust = _mm_set_epi16(
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+ /*
+ * compile-time check the above crc_adjust layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi16
+ * call above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ __m128i dd_check, eop_check;
+ __m128i mbuf_init;
+ uint8_t vlan_flags;
+
+ /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
+ ixgbe_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 15, 14, /* octet 14~15, low 16 bits vlan_macip */
+ 13, 12, /* octet 12~13, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 13, 12, /* octet 12~13, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip 32 bit pkt_type */
+ 0xFF, 0xFF
+ );
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+ mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* ensure these 2 flags are in the lower 8 bits */
+ RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
+ vlan_flags = rxq->vlan_flags & UINT8_MAX;
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_IXGBE_DESCS_PER_LOOP,
+ rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
+ __m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
+
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
+ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
+ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
+
+ descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ rte_compiler_barrier();
+ descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+ /* set ol_flags with vlan packet type */
+ desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (unlikely(use_ipsec))
+ desc_to_olflags_v_ipsec(descs, &rx_pkts[pos]);
+#endif
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+ pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+ /* C.2 get 4 pkts staterr value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += RTE_IXGBE_DESCS_PER_LOOP;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+
+ desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
+
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+/*
+ * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ */
+uint16_t
+ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+/*
+ * vPMD receive routine that reassembles scattered packets
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ */
+uint16_t
+ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned i = 0;
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile union ixgbe_adv_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
+ flags | pkt->data_len,
+ pkt->buf_iova + pkt->data_off);
+ _mm_store_si128((__m128i *)&txdp->read, descriptor);
+}
+
+static inline void
+vtx(volatile union ixgbe_adv_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+ volatile union ixgbe_adv_tx_desc *txdp;
+ struct ixgbe_tx_entry_v *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = DCMD_DTYP_FLAGS;
+ uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ ixgbe_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &(txq->tx_ring[tx_id]);
+ txep = &txq->sw_ring_v[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+ txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
+ txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+static void __attribute__((cold))
+ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+{
+ _ixgbe_tx_queue_release_mbufs_vec(txq);
+}
+
+void __attribute__((cold))
+ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
+{
+ _ixgbe_rx_queue_release_mbufs_vec(rxq);
+}
+
+static void __attribute__((cold))
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+{
+ _ixgbe_tx_free_swring_vec(txq);
+}
+
+static void __attribute__((cold))
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+{
+ _ixgbe_reset_tx_queue_vec(txq);
+}
+
+static const struct ixgbe_txq_ops vec_txq_ops = {
+ .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
+ .free_swring = ixgbe_tx_free_swring,
+ .reset = ixgbe_reset_tx_queue,
+};
+
+int __attribute__((cold))
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
+{
+ return ixgbe_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
+{
+ return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
+}
+
+int __attribute__((cold))
+ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_tm.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_tm.c
new file mode 100644
index 00000000..73845a73
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_tm.c
@@ -0,0 +1,1031 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#include <rte_malloc.h>
+
+#include "ixgbe_ethdev.h"
+
+static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error);
+static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error);
+static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
+
+const struct rte_tm_ops ixgbe_tm_ops = {
+ .capabilities_get = ixgbe_tm_capabilities_get,
+ .shaper_profile_add = ixgbe_shaper_profile_add,
+ .shaper_profile_delete = ixgbe_shaper_profile_del,
+ .node_add = ixgbe_node_add,
+ .node_delete = ixgbe_node_delete,
+ .node_type_get = ixgbe_node_type_get,
+ .level_capabilities_get = ixgbe_level_capabilities_get,
+ .node_capabilities_get = ixgbe_node_capabilities_get,
+ .hierarchy_commit = ixgbe_hierarchy_commit,
+};
+
+int
+ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+ void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ *(const void **)arg = &ixgbe_tm_ops;
+
+ return 0;
+}
+
+void
+ixgbe_tm_conf_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+
+ /* initialize shaper profile list */
+ TAILQ_INIT(&tm_conf->shaper_profile_list);
+
+ /* initialize node configuration */
+ tm_conf->root = NULL;
+ TAILQ_INIT(&tm_conf->queue_list);
+ TAILQ_INIT(&tm_conf->tc_list);
+ tm_conf->nb_tc_node = 0;
+ tm_conf->nb_queue_node = 0;
+ tm_conf->committed = false;
+}
+
+void
+ixgbe_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+ struct ixgbe_tm_node *tm_node;
+
+ /* clear node configuration */
+ while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
+ TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ tm_conf->nb_queue_node = 0;
+ while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
+ TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ tm_conf->nb_tc_node = 0;
+ if (tm_conf->root) {
+ rte_free(tm_conf->root);
+ tm_conf->root = NULL;
+ }
+
+ /* Remove all shaper profiles */
+ while ((shaper_profile =
+ TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
+ TAILQ_REMOVE(&tm_conf->shaper_profile_list,
+ shaper_profile, node);
+ rte_free(shaper_profile);
+ }
+}
+
+static inline uint8_t
+ixgbe_tc_nb_get(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *eth_conf;
+ uint8_t nb_tcs = 0;
+
+ eth_conf = &dev->data->dev_conf;
+ if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+ } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+ ETH_32_POOLS)
+ nb_tcs = ETH_4_TCS;
+ else
+ nb_tcs = ETH_8_TCS;
+ } else {
+ nb_tcs = 1;
+ }
+
+ return nb_tcs;
+}
+
+static int
+ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t tc_nb = ixgbe_tc_nb_get(dev);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (tc_nb > hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+ /**
+ * here is the max capability not the current configuration.
+ */
+ /* port + TCs + queues */
+ cap->n_nodes_max = 1 + IXGBE_DCB_MAX_TRAFFIC_CLASS +
+ hw->mac.max_tx_queues;
+ cap->n_levels_max = 3;
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+ cap->shaper_n_max = cap->n_nodes_max;
+ cap->shaper_private_n_max = cap->n_nodes_max;
+ cap->shaper_private_dual_rate_n_max = 0;
+ cap->shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->shaper_private_rate_max = 1250000000ull;
+ cap->shaper_shared_n_max = 0;
+ cap->shaper_shared_n_nodes_per_shaper_max = 0;
+ cap->shaper_shared_n_shapers_per_node_max = 0;
+ cap->shaper_shared_dual_rate_n_max = 0;
+ cap->shaper_shared_rate_min = 0;
+ cap->shaper_shared_rate_max = 0;
+ cap->sched_n_children_max = hw->mac.max_tx_queues;
+ /**
+ * HW supports SP. But no plan to support it now.
+ * So, all the nodes should have the same priority.
+ */
+ cap->sched_sp_n_priorities_max = 1;
+ cap->sched_wfq_n_children_per_group_max = 0;
+ cap->sched_wfq_n_groups_max = 0;
+ /**
+ * SW only supports fair round robin now.
+ * So, all the nodes should have the same weight.
+ */
+ cap->sched_wfq_weight_max = 1;
+ cap->cman_head_drop_supported = 0;
+ cap->dynamic_update_mask = 0;
+ cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
+ cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+ cap->cman_wred_context_n_max = 0;
+ cap->cman_wred_context_private_n_max = 0;
+ cap->cman_wred_context_shared_n_max = 0;
+ cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+ cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static inline struct ixgbe_tm_shaper_profile *
+ixgbe_shaper_profile_search(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_shaper_profile_list *shaper_profile_list =
+ &tm_conf->shaper_profile_list;
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+
+ TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+ if (shaper_profile_id == shaper_profile->shaper_profile_id)
+ return shaper_profile;
+ }
+
+ return NULL;
+}
+
+static int
+ixgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ /* min rate not supported */
+ if (profile->committed.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "committed rate not supported";
+ return -EINVAL;
+ }
+ /* min bucket size not supported */
+ if (profile->committed.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ error->message = "committed bucket size not supported";
+ return -EINVAL;
+ }
+ /* max bucket size not supported */
+ if (profile->peak.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ error->message = "peak bucket size not supported";
+ return -EINVAL;
+ }
+ /* length adjustment not supported */
+ if (profile->pkt_length_adjust) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ error->message = "packet length adjustment not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+ int ret;
+
+ if (!profile || !error)
+ return -EINVAL;
+
+ ret = ixgbe_shaper_profile_param_check(profile, error);
+ if (ret)
+ return ret;
+
+ shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
+
+ if (shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID exist";
+ return -EINVAL;
+ }
+
+ shaper_profile = rte_zmalloc("ixgbe_tm_shaper_profile",
+ sizeof(struct ixgbe_tm_shaper_profile),
+ 0);
+ if (!shaper_profile)
+ return -ENOMEM;
+ shaper_profile->shaper_profile_id = shaper_profile_id;
+ rte_memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
+ TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
+ shaper_profile, node);
+
+ return 0;
+}
+
+static int
+ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+
+ if (!error)
+ return -EINVAL;
+
+ shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
+
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID not exist";
+ return -EINVAL;
+ }
+
+ /* don't delete a profile if it's used by one or several nodes */
+ if (shaper_profile->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "profile in use";
+ return -EINVAL;
+ }
+
+ TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
+ rte_free(shaper_profile);
+
+ return 0;
+}
+
+static inline struct ixgbe_tm_node *
+ixgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
+ enum ixgbe_tm_node_type *node_type)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_node *tm_node;
+
+ if (tm_conf->root && tm_conf->root->id == node_id) {
+ *node_type = IXGBE_TM_NODE_TYPE_PORT;
+ return tm_conf->root;
+ }
+
+ TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = IXGBE_TM_NODE_TYPE_TC;
+ return tm_node;
+ }
+ }
+
+ TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = IXGBE_TM_NODE_TYPE_QUEUE;
+ return tm_node;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
+ uint16_t *base, uint16_t *nb)
+{
+ uint8_t nb_tcs = ixgbe_tc_nb_get(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint16_t vf_num = pci_dev->max_vfs;
+
+ *base = 0;
+ *nb = 0;
+
+ /* VT on */
+ if (vf_num) {
+ /* no DCB */
+ if (nb_tcs == 1) {
+ if (vf_num >= ETH_32_POOLS) {
+ *nb = 2;
+ *base = vf_num * 2;
+ } else if (vf_num >= ETH_16_POOLS) {
+ *nb = 4;
+ *base = vf_num * 4;
+ } else {
+ *nb = 8;
+ *base = vf_num * 8;
+ }
+ } else {
+ /* DCB */
+ *nb = 1;
+ *base = vf_num * nb_tcs + tc_node_no;
+ }
+ } else {
+ /* VT off */
+ if (nb_tcs == ETH_8_TCS) {
+ switch (tc_node_no) {
+ case 0:
+ *base = 0;
+ *nb = 32;
+ break;
+ case 1:
+ *base = 32;
+ *nb = 32;
+ break;
+ case 2:
+ *base = 64;
+ *nb = 16;
+ break;
+ case 3:
+ *base = 80;
+ *nb = 16;
+ break;
+ case 4:
+ *base = 96;
+ *nb = 8;
+ break;
+ case 5:
+ *base = 104;
+ *nb = 8;
+ break;
+ case 6:
+ *base = 112;
+ *nb = 8;
+ break;
+ case 7:
+ *base = 120;
+ *nb = 8;
+ break;
+ default:
+ return;
+ }
+ } else {
+ switch (tc_node_no) {
+ /**
+ * If no VF and no DCB, only 64 queues can be used.
+ * This case also be covered by this "case 0".
+ */
+ case 0:
+ *base = 0;
+ *nb = 64;
+ break;
+ case 1:
+ *base = 64;
+ *nb = 32;
+ break;
+ case 2:
+ *base = 96;
+ *nb = 16;
+ break;
+ case 3:
+ *base = 112;
+ *nb = 16;
+ break;
+ default:
+ return;
+ }
+ }
+ }
+}
+
+static int
+ixgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t priority, uint32_t weight,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ if (priority) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "priority should be 0";
+ return -EINVAL;
+ }
+
+ if (weight != 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+ error->message = "weight must be 1";
+ return -EINVAL;
+ }
+
+ /* not support shared shaper */
+ if (params->shared_shaper_id) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+ if (params->n_shared_shapers) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+
+ /* for non-leaf node */
+ if (node_id >= dev->data->nb_tx_queues) {
+ /* check the unsupported parameters */
+ if (params->nonleaf.wfq_weight_mode) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFQ not supported";
+ return -EINVAL;
+ }
+ if (params->nonleaf.n_sp_priorities != 1) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+ error->message = "SP priority not supported";
+ return -EINVAL;
+ } else if (params->nonleaf.wfq_weight_mode &&
+ !(*params->nonleaf.wfq_weight_mode)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFP should be byte mode";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* for leaf node */
+ /* check the unsupported parameters */
+ if (params->leaf.cman) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+ error->message = "Congestion management not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.wred_profile_id !=
+ RTE_TM_WRED_PROFILE_ID_NONE) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.shared_wred_context_id) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.n_shared_wred_contexts) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Now the TC and queue configuration is controlled by DCB.
+ * We need check if the node configuration follows the DCB configuration.
+ * In the future, we may use TM to cover DCB.
+ */
+static int
+ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+ enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX;
+ struct ixgbe_tm_shaper_profile *shaper_profile = NULL;
+ struct ixgbe_tm_node *tm_node;
+ struct ixgbe_tm_node *parent_node;
+ uint8_t nb_tcs;
+ uint16_t q_base = 0;
+ uint16_t q_nb = 0;
+ int ret;
+
+ if (!params || !error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (tm_conf->committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ ret = ixgbe_node_param_check(dev, node_id, priority, weight,
+ params, error);
+ if (ret)
+ return ret;
+
+ /* check if the node ID is already used */
+ if (ixgbe_tm_node_search(dev, node_id, &node_type)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "node id already used";
+ return -EINVAL;
+ }
+
+ /* check the shaper profile id */
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+ shaper_profile = ixgbe_shaper_profile_search(
+ dev, params->shaper_profile_id);
+ if (!shaper_profile) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+ error->message = "shaper profile not exist";
+ return -EINVAL;
+ }
+ }
+
+ /* root node if not have a parent */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id > IXGBE_TM_NODE_TYPE_PORT) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* obviously no more than one root */
+ if (tm_conf->root) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "already have a root";
+ return -EINVAL;
+ }
+
+ /* add the root node */
+ tm_node = rte_zmalloc("ixgbe_tm_node",
+ sizeof(struct ixgbe_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->no = 0;
+ tm_node->parent = NULL;
+ tm_node->shaper_profile = shaper_profile;
+ rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ tm_conf->root = tm_node;
+
+ /* increase the reference counter of the shaper profile */
+ if (shaper_profile)
+ shaper_profile->reference_count++;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ /* check the parent node */
+ parent_node = ixgbe_tm_node_search(dev, parent_node_id,
+ &parent_node_type);
+ if (!parent_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent not exist";
+ return -EINVAL;
+ }
+ if (parent_node_type != IXGBE_TM_NODE_TYPE_PORT &&
+ parent_node_type != IXGBE_TM_NODE_TYPE_TC) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent is not port or TC";
+ return -EINVAL;
+ }
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id != parent_node_type + 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* check the node number */
+ if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
+ /* check TC number */
+ nb_tcs = ixgbe_tc_nb_get(dev);
+ if (tm_conf->nb_tc_node >= nb_tcs) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many TCs";
+ return -EINVAL;
+ }
+ } else {
+ /* check queue number */
+ if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues";
+ return -EINVAL;
+ }
+
+ ixgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
+ if (parent_node->reference_count >= q_nb) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues than TC supported";
+ return -EINVAL;
+ }
+
+ /**
+ * check the node id.
+ * For queue, the node id means queue id.
+ */
+ if (node_id >= dev->data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too large queue id";
+ return -EINVAL;
+ }
+ }
+
+ /* add the TC or queue node */
+ tm_node = rte_zmalloc("ixgbe_tm_node",
+ sizeof(struct ixgbe_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = parent_node;
+ tm_node->shaper_profile = shaper_profile;
+ rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
+ tm_node->no = parent_node->reference_count;
+ TAILQ_INSERT_TAIL(&tm_conf->tc_list,
+ tm_node, node);
+ tm_conf->nb_tc_node++;
+ } else {
+ tm_node->no = q_base + parent_node->reference_count;
+ TAILQ_INSERT_TAIL(&tm_conf->queue_list,
+ tm_node, node);
+ tm_conf->nb_queue_node++;
+ }
+ tm_node->parent->reference_count++;
+
+ /* increase the reference counter of the shaper profile */
+ if (shaper_profile)
+ shaper_profile->reference_count++;
+
+ return 0;
+}
+
+static int
+ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+ struct ixgbe_tm_node *tm_node;
+
+ if (!error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (tm_conf->committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check the if the node id exists */
+ tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* the node should have no child */
+ if (tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message =
+ "cannot delete a node which has children";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (node_type == IXGBE_TM_NODE_TYPE_PORT) {
+ if (tm_node->shaper_profile)
+ tm_node->shaper_profile->reference_count--;
+ rte_free(tm_node);
+ tm_conf->root = NULL;
+ return 0;
+ }
+
+ /* TC or queue node */
+ if (tm_node->shaper_profile)
+ tm_node->shaper_profile->reference_count--;
+ tm_node->parent->reference_count--;
+ if (node_type == IXGBE_TM_NODE_TYPE_TC) {
+ TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+ tm_conf->nb_tc_node--;
+ } else {
+ TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+ tm_conf->nb_queue_node--;
+ }
+ rte_free(tm_node);
+
+ return 0;
+}
+
+static int
+ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+ struct ixgbe_tm_node *tm_node;
+
+ if (!is_leaf || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (node_type == IXGBE_TM_NODE_TYPE_QUEUE)
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
+static int
+ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (level_id >= IXGBE_TM_NODE_TYPE_MAX) {
+ error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+ error->message = "too deep level";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (level_id == IXGBE_TM_NODE_TYPE_PORT) {
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->n_nodes_leaf_max = 0;
+ } else if (level_id == IXGBE_TM_NODE_TYPE_TC) {
+ /* TC */
+ cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_leaf_max = 0;
+ } else {
+ /* queue */
+ cap->n_nodes_max = hw->mac.max_tx_queues;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
+ }
+
+ cap->non_leaf_nodes_identical = true;
+ cap->leaf_nodes_identical = true;
+
+ if (level_id != IXGBE_TM_NODE_TYPE_QUEUE) {
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = false;
+ cap->nonleaf.shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->nonleaf.shaper_private_rate_max = 1250000000ull;
+ cap->nonleaf.shaper_shared_n_max = 0;
+ if (level_id == IXGBE_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->mac.max_tx_queues;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ cap->nonleaf.stats_mask = 0;
+
+ return 0;
+ }
+
+ /* queue node */
+ cap->leaf.shaper_private_supported = true;
+ cap->leaf.shaper_private_dual_rate_supported = false;
+ cap->leaf.shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->leaf.shaper_private_rate_max = 1250000000ull;
+ cap->leaf.shaper_shared_n_max = 0;
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ cap->leaf.stats_mask = 0;
+
+ return 0;
+}
+
+static int
+ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+ struct ixgbe_tm_node *tm_node;
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ cap->shaper_private_supported = true;
+ cap->shaper_private_dual_rate_supported = false;
+ cap->shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->shaper_private_rate_max = 1250000000ull;
+ cap->shaper_shared_n_max = 0;
+
+ if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) {
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ } else {
+ if (node_type == IXGBE_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->mac.max_tx_queues;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ }
+
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static int
+ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_node *tm_node;
+ uint64_t bw;
+ int ret;
+
+ if (!error)
+ return -EINVAL;
+
+ /* check the setting */
+ if (!tm_conf->root)
+ goto done;
+
+ /* not support port max bandwidth yet */
+ if (tm_conf->root->shaper_profile &&
+ tm_conf->root->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no port max bandwidth";
+ goto fail_clear;
+ }
+
+ /* HW not support TC max bandwidth */
+ TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+ if (tm_node->shaper_profile &&
+ tm_node->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no TC max bandwidth";
+ goto fail_clear;
+ }
+ }
+
+ /* queue max bandwidth */
+ TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+ if (tm_node->shaper_profile)
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ else
+ bw = 0;
+ if (bw) {
+ /* interpret Bps to Mbps */
+ bw = bw * 8 / 1000 / 1000;
+ ret = ixgbe_set_queue_rate_limit(dev, tm_node->no, bw);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message =
+ "failed to set queue max bandwidth";
+ goto fail_clear;
+ }
+ }
+ }
+
+done:
+ tm_conf->committed = true;
+ return 0;
+
+fail_clear:
+ /* clear all the traffic manager configuration */
+ if (clear_on_fail) {
+ ixgbe_tm_conf_uninit(dev);
+ ixgbe_tm_conf_init(dev);
+ }
+ return -EINVAL;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_vf_representor.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_vf_representor.c
new file mode 100644
index 00000000..db516d99
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_vf.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+#include "rte_pmd_ixgbe.h"
+
+
+static int
+ixgbe_vf_representor_link_update(struct rte_eth_dev *ethdev,
+ int wait_to_complete)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ return ixgbe_dev_link_update_share(representor->pf_ethdev,
+ wait_to_complete, 0);
+}
+
+static int
+ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct ether_addr *mac_addr)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_ixgbe_set_vf_mac_addr(
+ representor->pf_ethdev->data->port_id,
+ representor->vf_id, mac_addr);
+}
+
+static void
+ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(
+ representor->pf_ethdev->data->dev_private);
+
+ dev_info->device = representor->pf_ethdev->device;
+
+ dev_info->min_rx_bufsize = 1024;
+ /**< Minimum size of RX buffer. */
+ dev_info->max_rx_pktlen = 9728;
+ /**< Maximum configurable length of RX pkt. */
+ dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+ /**< Maximum number of RX queues. */
+ dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+ /**< Maximum number of TX queues. */
+
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ /**< Maximum number of MAC addresses. */
+
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ /**< Device RX offload capabilities. */
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ /**< Device TX offload capabilities. */
+
+ dev_info->speed_capa =
+ representor->pf_ethdev->data->dev_link.link_speed;
+ /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+
+ dev_info->switch_info.name =
+ representor->pf_ethdev->device->name;
+ dev_info->switch_info.domain_id = representor->switch_domain_id;
+ dev_info->switch_info.port_id = representor->vf_id;
+}
+
+static int ixgbe_vf_representor_dev_configure(
+ __rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_rx_queue_setup(
+ __rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mb_pool)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_tx_queue_setup(
+ __rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static void ixgbe_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
+{
+}
+
+static int
+ixgbe_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
+ uint16_t vlan_id, int on)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+ uint64_t vf_mask = 1ULL << representor->vf_id;
+
+ return rte_pmd_ixgbe_set_vf_vlan_filter(
+ representor->pf_ethdev->data->port_id, vlan_id, vf_mask, on);
+}
+
+static void
+ixgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
+ __rte_unused uint16_t rx_queue_id, int on)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_ixgbe_set_vf_vlan_stripq(representor->pf_ethdev->data->port_id,
+ representor->vf_id, on);
+}
+
+struct eth_dev_ops ixgbe_vf_representor_dev_ops = {
+ .dev_infos_get = ixgbe_vf_representor_dev_infos_get,
+
+ .dev_start = ixgbe_vf_representor_dev_start,
+ .dev_configure = ixgbe_vf_representor_dev_configure,
+ .dev_stop = ixgbe_vf_representor_dev_stop,
+
+ .rx_queue_setup = ixgbe_vf_representor_rx_queue_setup,
+ .tx_queue_setup = ixgbe_vf_representor_tx_queue_setup,
+
+ .link_update = ixgbe_vf_representor_link_update,
+
+ .vlan_filter_set = ixgbe_vf_representor_vlan_filter_set,
+ .vlan_strip_queue_set = ixgbe_vf_representor_vlan_strip_queue_set,
+
+ .mac_addr_set = ixgbe_vf_representor_mac_addr_set,
+};
+
+static uint16_t
+ixgbe_vf_representor_rx_burst(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+ixgbe_vf_representor_tx_burst(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int
+ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ struct ixgbe_vf_info *vf_data;
+ struct rte_pci_device *pci_dev;
+ struct rte_eth_link *link;
+
+ if (!representor)
+ return -ENOMEM;
+
+ representor->vf_id =
+ ((struct ixgbe_vf_representor *)init_params)->vf_id;
+ representor->switch_domain_id =
+ ((struct ixgbe_vf_representor *)init_params)->switch_domain_id;
+ representor->pf_ethdev =
+ ((struct ixgbe_vf_representor *)init_params)->pf_ethdev;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(representor->pf_ethdev);
+
+ if (representor->vf_id >= pci_dev->max_vfs)
+ return -ENODEV;
+
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ /* Set representor device ops */
+ ethdev->dev_ops = &ixgbe_vf_representor_dev_ops;
+
+ /* No data-path, but need stub Rx/Tx functions to avoid crash
+ * when testing with the likes of testpmd.
+ */
+ ethdev->rx_pkt_burst = ixgbe_vf_representor_rx_burst;
+ ethdev->tx_pkt_burst = ixgbe_vf_representor_tx_burst;
+
+ /* Setting the number queues allocated to the VF */
+ ethdev->data->nb_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+ ethdev->data->nb_tx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+ /* Reference VF mac address from PF data structure */
+ vf_data = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
+ representor->pf_ethdev->data->dev_private);
+
+ ethdev->data->mac_addrs = (struct ether_addr *)
+ vf_data[representor->vf_id].vf_mac_addresses;
+
+ /* Link state. Inherited from PF */
+ link = &representor->pf_ethdev->data->dev_link;
+
+ ethdev->data->dev_link.link_speed = link->link_speed;
+ ethdev->data->dev_link.link_duplex = link->link_duplex;
+ ethdev->data->dev_link.link_status = link->link_status;
+ ethdev->data->dev_link.link_autoneg = link->link_autoneg;
+
+ return 0;
+}
+
+int
+ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused)
+{
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/meson.build b/src/spdk/dpdk/drivers/net/ixgbe/meson.build
new file mode 100644
index 00000000..02d5ef5e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/meson.build
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+version = 2
+
+cflags += ['-DRTE_LIBRTE_IXGBE_BYPASS']
+
+subdir('base')
+objs = [base_objs]
+
+allow_experimental_apis = true
+sources = files(
+ 'ixgbe_82599_bypass.c',
+ 'ixgbe_bypass.c',
+ 'ixgbe_ethdev.c',
+ 'ixgbe_fdir.c',
+ 'ixgbe_flow.c',
+ 'ixgbe_ipsec.c',
+ 'ixgbe_pf.c',
+ 'ixgbe_rxtx.c',
+ 'ixgbe_tm.c',
+ 'ixgbe_vf_representor.c',
+ 'rte_pmd_ixgbe.c'
+)
+
+deps += ['hash', 'security']
+
+if arch_subdir == 'x86'
+ dpdk_conf.set('RTE_IXGBE_INC_VECTOR', 1)
+ sources += files('ixgbe_rxtx_vec_sse.c')
+endif
+
+includes += include_directories('base')
+
+install_headers('rte_pmd_ixgbe.h')
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c b/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c
new file mode 100644
index 00000000..3a874f9a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -0,0 +1,1244 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#include <rte_ethdev_driver.h>
+
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_x550.h"
+#include "ixgbe_ethdev.h"
+#include "rte_pmd_ixgbe.h"
+
+int
+rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
+ struct ether_addr *mac_addr)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ int rar_entry;
+ uint8_t *new_mac = (uint8_t *)(mac_addr);
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+ if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
+ ETHER_ADDR_LEN);
+ return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
+ IXGBE_RAH_AV);
+ }
+ return -EINVAL;
+}
+
+int
+rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+
+ ctrl = IXGBE_PF_CONTROL_MSG;
+ if (vfinfo[vf].clear_to_send)
+ ctrl |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, &ctrl, 1, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mac = &hw->mac;
+
+ mac->ops.set_vlan_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mac = &hw->mac;
+ mac->ops.set_mac_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id)
+{
+ struct ixgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (vlan_id > ETHER_MAX_VLAN_ID)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
+ if (vlan_id) {
+ ctrl = vlan_id;
+ ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
+ } else {
+ ctrl = 0;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ /* enable or disable VMDQ loopback */
+ if (on)
+ ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
+ else
+ ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg_value;
+ int i;
+ int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ for (i = 0; i <= num_queues; i++) {
+ reg_value = IXGBE_QDE_WRITE |
+ (i << IXGBE_QDE_IDX_SHIFT) |
+ (on & IXGBE_QDE_ENABLE);
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg_value;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ /* only support VF's 0 to 63 */
+ if ((vf >= pci_dev->max_vfs) || (vf > 63))
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
+ if (on)
+ reg_value |= IXGBE_SRRCTL_DROP_EN;
+ else
+ reg_value &= ~IXGBE_SRRCTL_DROP_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw;
+ uint16_t queues_per_pool;
+ uint32_t q;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+
+ /* The PF has 128 queue pairs and in SRIOV configuration
+ * those queues will be assigned to VF's, so RXDCTL
+ * registers will be dealing with queues which will be
+ * assigned to VF's.
+ * Let's say we have SRIOV configured with 31 VF's then the
+ * first 124 queues 0-123 will be allocated to VF's and only
+ * the last 4 queues 123-127 will be assigned to the PF.
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+ ETH_16_POOLS;
+ else
+ queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+ ETH_64_POOLS;
+
+ for (q = 0; q < queues_per_pool; q++)
+ (*dev->dev_ops->vlan_strip_queue_set)(dev,
+ q + vf * queues_per_pool, on);
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on)
+{
+ int val = 0;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw;
+ uint32_t vmolr;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
+ " on 82599 hardware and newer");
+ return -ENOTSUP;
+ }
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
+
+ if (on)
+ vmolr |= val;
+ else
+ vmolr &= ~val;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+ if (vf >= 32) {
+ addr = IXGBE_VFRE(1);
+ val = bit1 << (vf - 32);
+ } else {
+ addr = IXGBE_VFRE(0);
+ val = bit1 << vf;
+ }
+
+ reg = IXGBE_READ_REG(hw, addr);
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr, reg);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+ if (vf >= 32) {
+ addr = IXGBE_VFTE(1);
+ val = bit1 << (vf - 32);
+ } else {
+ addr = IXGBE_VFTE(0);
+ val = bit1 << vf;
+ }
+
+ reg = IXGBE_READ_REG(hw, addr);
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr, reg);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on)
+{
+ struct rte_eth_dev *dev;
+ int ret = 0;
+ uint16_t vf_idx;
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ for (vf_idx = 0; vf_idx < 64; vf_idx++) {
+ if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+ ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
+ vlan_on, false);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Stop the data paths */
+ if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+ return -ENOTSUP;
+ /**
+ * Workaround:
+ * As no ixgbe_disable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ * The hardware support has been checked by
+ * ixgbe_disable_sec_rx_path().
+ */
+ ixgbe_disable_sec_tx_path_generic(hw);
+
+ /* Enable Ethernet CRC (required by MACsec offload) */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+ /* Enable the TX and RX crypto engines */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+ ctrl |= 0x3;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+ /* Enable SA lookup */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+ ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+ ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+ IXGBE_LSECTXCTRL_AUTH;
+ ctrl |= IXGBE_LSECTXCTRL_AISCI;
+ ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+ ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+ ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+ ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+ ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+ if (rp)
+ ctrl |= IXGBE_LSECRXCTRL_RP;
+ else
+ ctrl &= ~IXGBE_LSECRXCTRL_RP;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+ /* Start the data paths */
+ ixgbe_enable_sec_rx_path(hw);
+ /**
+ * Workaround:
+ * As no ixgbe_enable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ */
+ ixgbe_enable_sec_tx_path_generic(hw);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint16_t port)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Stop the data paths */
+ if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+ return -ENOTSUP;
+ /**
+ * Workaround:
+ * As no ixgbe_disable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ * The hardware support has been checked by
+ * ixgbe_disable_sec_rx_path().
+ */
+ ixgbe_disable_sec_tx_path_generic(hw);
+
+ /* Disable the TX and RX crypto engines */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+ /* Disable SA lookup */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+ ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+ ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+ ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+ ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+ /* Start the data paths */
+ ixgbe_enable_sec_rx_path(hw);
+ /**
+ * Workaround:
+ * As no ixgbe_enable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ */
+ ixgbe_enable_sec_tx_path_generic(hw);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+ ctrl = mac[4] | (mac[5] << 8);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+ pi = rte_cpu_to_be_16(pi);
+ ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl, i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (idx != 0 && idx != 1)
+ return -EINVAL;
+
+ if (an >= 4)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Set the PN and key */
+ pn = rte_cpu_to_be_32(pn);
+ if (idx == 0) {
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+ }
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+ }
+ }
+
+ /* Set AN and select the SA */
+ ctrl = (an << idx * 2) | (idx << 4);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl, i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (idx != 0 && idx != 1)
+ return -EINVAL;
+
+ if (an >= 4)
+ return -EINVAL;
+
+ /* Set the PN */
+ pn = rte_cpu_to_be_32(pn);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+ /* Set the key */
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+ }
+
+ /* Set the AN and validate the SA */
+ ctrl = an | (1 << 2);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
+ uint8_t tc_num,
+ uint8_t *bw_weight)
+{
+ struct rte_eth_dev *dev;
+ struct ixgbe_dcb_config *dcb_config;
+ struct ixgbe_dcb_tc_config *tc;
+ struct rte_eth_conf *eth_conf;
+ struct ixgbe_bw_conf *bw_conf;
+ uint8_t i;
+ uint8_t nb_tcs;
+ uint16_t sum;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+ IXGBE_DCB_MAX_TRAFFIC_CLASS);
+ return -EINVAL;
+ }
+
+ dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
+ eth_conf = &dev->data->dev_conf;
+
+ if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+ } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+ ETH_32_POOLS)
+ nb_tcs = ETH_4_TCS;
+ else
+ nb_tcs = ETH_8_TCS;
+ } else {
+ nb_tcs = 1;
+ }
+
+ if (nb_tcs != tc_num) {
+ PMD_DRV_LOG(ERR,
+ "Weight should be set for all %d enabled TCs.",
+ nb_tcs);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < nb_tcs; i++)
+ sum += bw_weight[i];
+ if (sum != 100) {
+ PMD_DRV_LOG(ERR,
+ "The summary of the TC weight should be 100.");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
+ }
+ for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+ }
+
+ bw_conf->tc_num = nb_tcs;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t fctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+
+ /* If 'enable' set the SBP bit else clear it */
+ if (enable)
+ fctrl |= IXGBE_FCTRL_SBP;
+ else
+ fctrl &= ~(IXGBE_FCTRL_SBP);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ return 0;
+}
+
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
+int
+rte_pmd_ixgbe_bypass_init(uint16_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ ixgbe_bypass_init(dev);
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_bypass_state_show(uint16_t port_id, uint32_t *state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_state_show(dev, state);
+}
+
+int
+rte_pmd_ixgbe_bypass_state_set(uint16_t port_id, uint32_t *new_state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_state_store(dev, new_state);
+}
+
+int
+rte_pmd_ixgbe_bypass_event_show(uint16_t port_id,
+ uint32_t event,
+ uint32_t *state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_event_show(dev, event, state);
+}
+
+int
+rte_pmd_ixgbe_bypass_event_store(uint16_t port_id,
+ uint32_t event,
+ uint32_t state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_event_store(dev, event, state);
+}
+
+int
+rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port_id, uint32_t timeout)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_wd_timeout_store(dev, timeout);
+}
+
+int
+rte_pmd_ixgbe_bypass_ver_show(uint16_t port_id, uint32_t *ver)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_ver_show(dev, ver);
+}
+
+int
+rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port_id, uint32_t *wd_timeout)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_wd_timeout_show(dev, wd_timeout);
+}
+
+int
+rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_wd_reset(dev);
+}
+#endif
+
+/**
+ * rte_pmd_ixgbe_acquire_swfw - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and get the shared phy token as needed
+ */
+STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask)
+{
+ int retries = FW_PHY_TOKEN_RETRIES;
+ s32 status = IXGBE_SUCCESS;
+
+ while (--retries) {
+ status = ixgbe_acquire_swfw_semaphore(hw, mask);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n",
+ status);
+ return status;
+ }
+ status = ixgbe_get_phy_token(hw);
+ if (status == IXGBE_SUCCESS)
+ return IXGBE_SUCCESS;
+
+ if (status == IXGBE_ERR_TOKEN_RETRY)
+ PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n",
+ status);
+
+ ixgbe_release_swfw_semaphore(hw, mask);
+ if (status != IXGBE_ERR_TOKEN_RETRY) {
+ PMD_DRV_LOG(ERR,
+ "Retry get PHY token failed, Status=%d\n",
+ status);
+ return status;
+ }
+ }
+ PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n",
+ hw->phy.id);
+ return status;
+}
+
+/**
+ * rte_pmd_ixgbe_release_swfw_sync - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore and puts the shared phy token as needed
+ */
+STATIC void rte_pmd_ixgbe_release_swfw(struct ixgbe_hw *hw, u32 mask)
+{
+ ixgbe_put_phy_token(hw);
+ ixgbe_release_swfw_semaphore(hw, mask);
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_lock(uint16_t port)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ u32 swfw_mask;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ if (hw->bus.lan_id)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ if (rte_pmd_ixgbe_acquire_swfw(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ return IXGBE_SUCCESS;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlock(uint16_t port)
+{
+ struct rte_eth_dev *dev;
+ struct ixgbe_hw *hw;
+ u32 swfw_mask;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ if (hw->bus.lan_id)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ rte_pmd_ixgbe_release_swfw(hw, swfw_mask);
+
+ return IXGBE_SUCCESS;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t *phy_data)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ u32 i, data, command;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ /* Setup and write the read command */
+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+ if (command & IXGBE_MSCA_MDI_COMMAND)
+ return IXGBE_ERR_PHY;
+
+ /* Read operation is complete. Get the data from MSRWD */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)data;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t phy_data)
+{
+ struct ixgbe_hw *hw;
+ u32 i, command;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+ /* Setup and write the write command */
+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+ if (command & IXGBE_MSCA_MDI_COMMAND) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "PHY write cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h b/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h
new file mode 100644
index 00000000..72a941f9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -0,0 +1,724 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+/**
+ * @file rte_pmd_ixgbe.h
+ * ixgbe PMD specific functions.
+ *
+ **/
+
+#ifndef _PMD_IXGBE_H_
+#define _PMD_IXGBE_H_
+
+#include <rte_ethdev_driver.h>
+
+/**
+ * Notify VF when PF link status changes.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* invalid.
+ */
+int rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf);
+
+/**
+ * Set the VF MAC address.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
+ struct ether_addr *mac_addr);
+
+/**
+ * Enable/Disable VF VLAN anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF on which to set VLAN anti spoofing.
+ * @param on
+ * 1 - Enable VFs VLAN anti spoofing.
+ * 0 - Disable VFs VLAN anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF MAC anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF on which to set MAC anti spoofing.
+ * @param on
+ * 1 - Enable VFs MAC anti spoofing.
+ * 0 - Disable VFs MAC anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan insert
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param vlan_id
+ * 0 - Disable VF's vlan insert.
+ * n - Enable; n is inserted as the vlan id.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf,
+ uint16_t vlan_id);
+
+/**
+ * Enable/Disable tx loopback
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable tx loopback.
+ * 0 - Disable tx loopback.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on);
+
+/**
+ * set all queues drop enable bit
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - set the queue drop enable bit for all pools.
+ * 0 - reset the queue drop enable bit for all pools.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on);
+
+/**
+ * set drop enable bit in the VF split rx control register
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - set the drop enable bit in the split rx control register.
+ * 0 - reset the drop enable bit in the split rx control register.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+
+int rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan strip for all queues in a pool
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - Enable VF's vlan strip on RX queues.
+ * 0 - Disable VF's vlan strip on RX queues.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable MACsec offload.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param en
+ * 1 - Enable encryption (encrypt and add integrity signature).
+ * 0 - Disable encryption (only add integrity signature).
+ * @param rp
+ * 1 - Enable replay protection.
+ * 0 - Disable replay protection.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp);
+
+/**
+ * Disable MACsec offload.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_disable(uint16_t port);
+
+/**
+ * Configure Tx SC (Secure Connection).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac
+ * The MAC address on the local side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac);
+
+/**
+ * Configure Rx SC (Secure Connection).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac
+ * The MAC address on the remote side.
+ * @param pi
+ * The PI (port identifier) on the remote side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi);
+
+/**
+ * Enable Tx SA (Secure Association).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param idx
+ * The SA to be enabled (0 or 1).
+ * @param an
+ * The association number on the local side.
+ * @param pn
+ * The packet number on the local side.
+ * @param key
+ * The key on the local side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key);
+
+/**
+ * Enable Rx SA (Secure Association).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param idx
+ * The SA to be enabled (0 or 1)
+ * @param an
+ * The association number on the remote side.
+ * @param pn
+ * The packet number on the remote side.
+ * @param key
+ * The key on the remote side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key);
+
+/**
+* Set RX L2 Filtering mode of a VF of an Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param rx_mask
+* The RX mode mask, which is one or more of accepting Untagged Packets,
+* packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
+* ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
+* ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+* in rx_mode.
+* @param on
+* 1 - Enable a VF RX mode.
+* 0 - Disable a VF RX mode.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf, uint16_t rx_mask,
+ uint8_t on);
+
+/**
+* Enable or disable a VF traffic receive of an Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param on
+* 1 - Enable a VF traffic receive.
+* 0 - Disable a VF traffic receive.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+* Enable or disable a VF traffic transmit of the Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param on
+* 1 - Enable a VF traffic transmit.
+* 0 - Disable a VF traffic transmit.
+* @return
+* - (0) if successful.
+* - (-ENODEV) if *port_id* invalid.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+* Enable/Disable hardware VF VLAN filtering by an Ethernet device of
+* received VLAN packets tagged with a given VLAN Tag Identifier.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vlan
+* The VLAN Tag Identifier whose filtering must be enabled or disabled.
+* @param vf_mask
+* Bitmap listing which VFs participate in the VLAN filtering.
+* @param vlan_on
+* 1 - Enable VFs VLAN filtering.
+* 0 - Disable VFs VLAN filtering.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on);
+
+/**
+ * Set the rate limitation for a vf on an Ethernet device.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param tx_rate
+ * The tx rate allocated from the total link speed for this VF id.
+ * @param q_msk
+ * The queue mask which need to set the rate.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+
+/**
+ * Set all the TCs' bandwidth weight.
+ *
+ * The bw_weight means the percentage occupied by the TC.
+ * It can be taken as the relative min bandwidth setting.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param tc_num
+ * Number of TCs.
+ * @param bw_weight
+ * An array of relative bandwidth weight for all the TCs.
+ * The summary of the bw_weight should be 100.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
+ uint8_t tc_num,
+ uint8_t *bw_weight);
+
+
+/**
+ * Initialize bypass logic. This function needs to be called before
+ * executing any other bypass API.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_init(uint16_t port);
+
+/**
+ * Return bypass state.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param state
+ * The return bypass state.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_state_show(uint16_t port, uint32_t *state);
+
+/**
+ * Set bypass state
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param new_state
+ * The current bypass state.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_state_set(uint16_t port, uint32_t *new_state);
+
+/**
+ * Return bypass state when given event occurs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param event
+ * The bypass event
+ * - (1) Main power on (power button is pushed)
+ * - (2) Auxiliary power on (power supply is being plugged)
+ * - (3) Main power off (system shutdown and power supply is left plugged in)
+ * - (4) Auxiliary power off (power supply is being unplugged)
+ * - (5) Display or set the watchdog timer
+ * @param state
+ * The bypass state when given event occurred.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_event_show(uint16_t port,
+ uint32_t event,
+ uint32_t *state);
+
+/**
+ * Set bypass state when given event occurs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param event
+ * The bypass event
+ * - (1) Main power on (power button is pushed)
+ * - (2) Auxiliary power on (power supply is being plugged)
+ * - (3) Main power off (system shutdown and power supply is left plugged in)
+ * - (4) Auxiliary power off (power supply is being unplugged)
+ * - (5) Display or set the watchdog timer
+ * @param state
+ * The assigned state when given event occurs.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_event_store(uint16_t port,
+ uint32_t event,
+ uint32_t state);
+
+/**
+ * Set bypass watchdog timeout count.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param timeout
+ * The timeout to be set.
+ * - (0) 0 seconds (timer is off)
+ * - (1) 1.5 seconds
+ * - (2) 2 seconds
+ * - (3) 3 seconds
+ * - (4) 4 seconds
+ * - (5) 8 seconds
+ * - (6) 16 seconds
+ * - (7) 32 seconds
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port, uint32_t timeout);
+
+/**
+ * Get bypass firmware version.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param ver
+ * The firmware version
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_ver_show(uint16_t port, uint32_t *ver);
+
+/**
+ * Return bypass watchdog timeout in seconds
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param wd_timeout
+ * The return watchdog timeout. "0" represents timer expired
+ * - (0) 0 seconds (timer is off)
+ * - (1) 1.5 seconds
+ * - (2) 2 seconds
+ * - (3) 3 seconds
+ * - (4) 4 seconds
+ * - (5) 8 seconds
+ * - (6) 16 seconds
+ * - (7) 32 seconds
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port, uint32_t *wd_timeout);
+
+/**
+ * Reset bypass watchdog timer
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_wd_reset(uint16_t port);
+
+/**
+ * Acquire swfw semaphore lock for MDIO access
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (IXGBE_ERR_SWFW_SYNC) If sw/fw semaphore acquisition failed
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_lock(uint16_t port);
+
+/**
+ * Release swfw semaphore lock used for MDIO access
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlock(uint16_t port);
+
+/**
+ * Read PHY register using MDIO without MDIO lock
+ * The lock must be taken separately before calling this
+ * API
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param reg_addr
+ * 32 bit PHY Register
+ * @param dev_type
+ * Used to define device base address
+ * @param phy_data
+ * Pointer for reading PHY register data
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (IXGBE_ERR_PHY) If PHY read command failed
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t *phy_data);
+
+/**
+ * Write data to PHY register using without MDIO lock
+ * The lock must be taken separately before calling this
+ * API
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param reg_addr
+ * 32 bit PHY Register
+ * @param dev_type
+ * Used to define device base address
+ * @param phy_data
+ * Data to write to PHY register
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (IXGBE_ERR_PHY) If PHY read command failed
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t phy_data);
+
+/**
+ * Response sent back to ixgbe driver from user app after callback
+ */
+enum rte_pmd_ixgbe_mb_event_rsp {
+ RTE_PMD_IXGBE_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */
+ RTE_PMD_IXGBE_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */
+ RTE_PMD_IXGBE_MB_EVENT_PROCEED, /**< proceed with mbox request */
+ RTE_PMD_IXGBE_MB_EVENT_MAX /**< max value of this enum */
+};
+
+/**
+ * Data sent to the user application when the callback is executed.
+ */
+struct rte_pmd_ixgbe_mb_event_param {
+ uint16_t vfid; /**< Virtual Function number */
+ uint16_t msg_type; /**< VF to PF message type, defined in ixgbe_mbx.h */
+ uint16_t retval; /**< return value */
+ void *msg; /**< pointer to message */
+};
+enum {
+ RTE_PMD_IXGBE_BYPASS_MODE_NONE,
+ RTE_PMD_IXGBE_BYPASS_MODE_NORMAL,
+ RTE_PMD_IXGBE_BYPASS_MODE_BYPASS,
+ RTE_PMD_IXGBE_BYPASS_MODE_ISOLATE,
+ RTE_PMD_IXGBE_BYPASS_MODE_NUM,
+};
+
+#define RTE_PMD_IXGBE_BYPASS_MODE_VALID(x) \
+ ((x) > RTE_PMD_IXGBE_BYPASS_MODE_NONE && \
+ (x) < RTE_PMD_IXGBE_BYPASS_MODE_NUM)
+
+enum {
+ RTE_PMD_IXGBE_BYPASS_EVENT_NONE,
+ RTE_PMD_IXGBE_BYPASS_EVENT_START,
+ RTE_PMD_IXGBE_BYPASS_EVENT_OS_ON = RTE_PMD_IXGBE_BYPASS_EVENT_START,
+ RTE_PMD_IXGBE_BYPASS_EVENT_POWER_ON,
+ RTE_PMD_IXGBE_BYPASS_EVENT_OS_OFF,
+ RTE_PMD_IXGBE_BYPASS_EVENT_POWER_OFF,
+ RTE_PMD_IXGBE_BYPASS_EVENT_TIMEOUT,
+ RTE_PMD_IXGBE_BYPASS_EVENT_NUM
+};
+
+#define RTE_PMD_IXGBE_BYPASS_EVENT_VALID(x) \
+ ((x) > RTE_PMD_IXGBE_BYPASS_EVENT_NONE && \
+ (x) < RTE_PMD_IXGBE_BYPASS_MODE_NUM)
+
+enum {
+ RTE_PMD_IXGBE_BYPASS_TMT_OFF, /* timeout disabled. */
+ RTE_PMD_IXGBE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_NUM
+};
+
+#define RTE_PMD_IXGBE_BYPASS_TMT_VALID(x) \
+ ((x) == RTE_PMD_IXGBE_BYPASS_TMT_OFF || \
+ ((x) > RTE_PMD_IXGBE_BYPASS_TMT_OFF && \
+ (x) < RTE_PMD_IXGBE_BYPASS_TMT_NUM))
+
+/**
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param enable
+ * 0 to disable and nonzero to enable 'SBP' bit in FCTRL register
+ * to receive all packets
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int __rte_experimental
+rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable);
+#endif /* _PMD_IXGBE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
new file mode 100644
index 00000000..c814f96d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -0,0 +1,64 @@
+DPDK_2.0 {
+
+ local: *;
+};
+
+DPDK_16.11 {
+ global:
+
+ rte_pmd_ixgbe_set_all_queues_drop_en;
+ rte_pmd_ixgbe_set_tx_loopback;
+ rte_pmd_ixgbe_set_vf_mac_addr;
+ rte_pmd_ixgbe_set_vf_mac_anti_spoof;
+ rte_pmd_ixgbe_set_vf_split_drop_en;
+ rte_pmd_ixgbe_set_vf_vlan_anti_spoof;
+ rte_pmd_ixgbe_set_vf_vlan_insert;
+ rte_pmd_ixgbe_set_vf_vlan_stripq;
+} DPDK_2.0;
+
+DPDK_17.02 {
+ global:
+
+ rte_pmd_ixgbe_macsec_config_rxsc;
+ rte_pmd_ixgbe_macsec_config_txsc;
+ rte_pmd_ixgbe_macsec_disable;
+ rte_pmd_ixgbe_macsec_enable;
+ rte_pmd_ixgbe_macsec_select_rxsa;
+ rte_pmd_ixgbe_macsec_select_txsa;
+ rte_pmd_ixgbe_set_vf_rate_limit;
+ rte_pmd_ixgbe_set_vf_rx;
+ rte_pmd_ixgbe_set_vf_rxmode;
+ rte_pmd_ixgbe_set_vf_tx;
+ rte_pmd_ixgbe_set_vf_vlan_filter;
+} DPDK_16.11;
+
+DPDK_17.05 {
+ global:
+
+ rte_pmd_ixgbe_ping_vf;
+ rte_pmd_ixgbe_set_tc_bw_alloc;
+} DPDK_17.02;
+
+DPDK_17.08 {
+ global:
+
+ rte_pmd_ixgbe_bypass_event_show;
+ rte_pmd_ixgbe_bypass_event_store;
+ rte_pmd_ixgbe_bypass_init;
+ rte_pmd_ixgbe_bypass_state_set;
+ rte_pmd_ixgbe_bypass_state_show;
+ rte_pmd_ixgbe_bypass_ver_show;
+ rte_pmd_ixgbe_bypass_wd_reset;
+ rte_pmd_ixgbe_bypass_wd_timeout_show;
+ rte_pmd_ixgbe_bypass_wd_timeout_store;
+} DPDK_17.05;
+
+EXPERIMENTAL {
+ global:
+
+ rte_pmd_ixgbe_mdio_lock;
+ rte_pmd_ixgbe_mdio_unlock;
+ rte_pmd_ixgbe_mdio_unlocked_read;
+ rte_pmd_ixgbe_mdio_unlocked_write;
+ rte_pmd_ixgbe_upd_fctrl_sbp;
+};
diff --git a/src/spdk/dpdk/drivers/net/kni/Makefile b/src/spdk/dpdk/drivers/net/kni/Makefile
new file mode 100644
index 00000000..562e8d2d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/kni/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_kni.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_kni
+LDLIBS += -lrte_bus_vdev
+
+EXPORT_MAP := rte_pmd_kni_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KNI) += rte_eth_kni.c
+
+#
+# Export include files
+#
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/kni/meson.build b/src/spdk/dpdk/drivers/net/kni/meson.build
new file mode 100644
index 00000000..0f784c6d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/kni/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# this driver can be built if-and-only-if KNI library is buildable
+build = dpdk_conf.has('RTE_LIBRTE_KNI')
+allow_experimental_apis = true
+sources = files('rte_eth_kni.c')
+deps += 'kni'
diff --git a/src/spdk/dpdk/drivers/net/kni/rte_eth_kni.c b/src/spdk/dpdk/drivers/net/kni/rte_eth_kni.c
new file mode 100644
index 00000000..085bb845
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/kni/rte_eth_kni.c
@@ -0,0 +1,497 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <fcntl.h>
+#include <pthread.h>
+#include <unistd.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_kni.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_bus_vdev.h>
+
+/* Only single queue supported */
+#define KNI_MAX_QUEUE_PER_PORT 1
+
+#define MAX_PACKET_SZ 2048
+#define MAX_KNI_PORTS 8
+
+#define ETH_KNI_NO_REQUEST_THREAD_ARG "no_request_thread"
+static const char * const valid_arguments[] = {
+ ETH_KNI_NO_REQUEST_THREAD_ARG,
+ NULL
+};
+
+struct eth_kni_args {
+ int no_request_thread;
+};
+
+struct pmd_queue_stats {
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t err_pkts;
+};
+
+struct pmd_queue {
+ struct pmd_internals *internals;
+ struct rte_mempool *mb_pool;
+
+ struct pmd_queue_stats rx;
+ struct pmd_queue_stats tx;
+};
+
+struct pmd_internals {
+ struct rte_kni *kni;
+ int is_kni_started;
+
+ pthread_t thread;
+ int stop_thread;
+ int no_request_thread;
+
+ struct ether_addr eth_addr;
+
+ struct pmd_queue rx_queues[KNI_MAX_QUEUE_PER_PORT];
+ struct pmd_queue tx_queues[KNI_MAX_QUEUE_PER_PORT];
+};
+
+static const struct rte_eth_link pmd_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_FIXED,
+};
+static int is_kni_initialized;
+
+static int eth_kni_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eth_kni_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+static uint16_t
+eth_kni_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ struct pmd_queue *kni_q = q;
+ struct rte_kni *kni = kni_q->internals->kni;
+ uint16_t nb_pkts;
+
+ nb_pkts = rte_kni_rx_burst(kni, bufs, nb_bufs);
+
+ kni_q->rx.pkts += nb_pkts;
+ kni_q->rx.err_pkts += nb_bufs - nb_pkts;
+
+ return nb_pkts;
+}
+
+static uint16_t
+eth_kni_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ struct pmd_queue *kni_q = q;
+ struct rte_kni *kni = kni_q->internals->kni;
+ uint16_t nb_pkts;
+
+ nb_pkts = rte_kni_tx_burst(kni, bufs, nb_bufs);
+
+ kni_q->tx.pkts += nb_pkts;
+ kni_q->tx.err_pkts += nb_bufs - nb_pkts;
+
+ return nb_pkts;
+}
+
+static void *
+kni_handle_request(void *param)
+{
+ struct pmd_internals *internals = param;
+#define MS 1000
+
+ while (!internals->stop_thread) {
+ rte_kni_handle_request(internals->kni);
+ usleep(500 * MS);
+ }
+
+ return param;
+}
+
+static int
+eth_kni_start(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ uint16_t port_id = dev->data->port_id;
+ struct rte_mempool *mb_pool;
+ struct rte_kni_conf conf;
+ const char *name = dev->device->name + 4; /* remove net_ */
+
+ snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", name);
+ conf.force_bind = 0;
+ conf.group_id = port_id;
+ conf.mbuf_size = MAX_PACKET_SZ;
+ mb_pool = internals->rx_queues[0].mb_pool;
+
+ internals->kni = rte_kni_alloc(mb_pool, &conf, NULL);
+ if (internals->kni == NULL) {
+ PMD_LOG(ERR,
+ "Fail to create kni interface for port: %d",
+ port_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+eth_kni_dev_start(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ int ret;
+
+ if (internals->is_kni_started == 0) {
+ ret = eth_kni_start(dev);
+ if (ret)
+ return -1;
+ internals->is_kni_started = 1;
+ }
+
+ if (internals->no_request_thread == 0) {
+ ret = rte_ctrl_thread_create(&internals->thread,
+ "kni_handle_req", NULL,
+ kni_handle_request, internals);
+ if (ret) {
+ PMD_LOG(ERR,
+ "Fail to create kni request thread");
+ return -1;
+ }
+ }
+
+ dev->data->dev_link.link_status = 1;
+
+ return 0;
+}
+
+static void
+eth_kni_dev_stop(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ int ret;
+
+ if (internals->no_request_thread == 0) {
+ internals->stop_thread = 1;
+
+ ret = pthread_cancel(internals->thread);
+ if (ret)
+ PMD_LOG(ERR, "Can't cancel the thread");
+
+ ret = pthread_join(internals->thread, NULL);
+ if (ret)
+ PMD_LOG(ERR, "Can't join the thread");
+
+ internals->stop_thread = 0;
+ }
+
+ dev->data->dev_link.link_status = 0;
+}
+
+static int
+eth_kni_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_dev_info *dev_info)
+{
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = UINT32_MAX;
+ dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT;
+ dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
+}
+
+static int
+eth_kni_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_queue *q;
+
+ q = &internals->rx_queues[rx_queue_id];
+ q->internals = internals;
+ q->mb_pool = mb_pool;
+
+ dev->data->rx_queues[rx_queue_id] = q;
+
+ return 0;
+}
+
+static int
+eth_kni_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_queue *q;
+
+ q = &internals->tx_queues[tx_queue_id];
+ q->internals = internals;
+
+ dev->data->tx_queues[tx_queue_id] = q;
+
+ return 0;
+}
+
+static void
+eth_kni_queue_release(void *q __rte_unused)
+{
+}
+
+static int
+eth_kni_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
+{
+ return 0;
+}
+
+static int
+eth_kni_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ unsigned long rx_packets_total = 0, rx_bytes_total = 0;
+ unsigned long tx_packets_total = 0, tx_bytes_total = 0;
+ struct rte_eth_dev_data *data = dev->data;
+ unsigned long tx_packets_err_total = 0;
+ unsigned int i, num_stats;
+ struct pmd_queue *q;
+
+ num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+ data->nb_rx_queues);
+ for (i = 0; i < num_stats; i++) {
+ q = data->rx_queues[i];
+ stats->q_ipackets[i] = q->rx.pkts;
+ stats->q_ibytes[i] = q->rx.bytes;
+ rx_packets_total += stats->q_ipackets[i];
+ rx_bytes_total += stats->q_ibytes[i];
+ }
+
+ num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+ data->nb_tx_queues);
+ for (i = 0; i < num_stats; i++) {
+ q = data->tx_queues[i];
+ stats->q_opackets[i] = q->tx.pkts;
+ stats->q_obytes[i] = q->tx.bytes;
+ stats->q_errors[i] = q->tx.err_pkts;
+ tx_packets_total += stats->q_opackets[i];
+ tx_bytes_total += stats->q_obytes[i];
+ tx_packets_err_total += stats->q_errors[i];
+ }
+
+ stats->ipackets = rx_packets_total;
+ stats->ibytes = rx_bytes_total;
+ stats->opackets = tx_packets_total;
+ stats->obytes = tx_bytes_total;
+ stats->oerrors = tx_packets_err_total;
+
+ return 0;
+}
+
+static void
+eth_kni_stats_reset(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct pmd_queue *q;
+ unsigned int i;
+
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ q = data->rx_queues[i];
+ q->rx.pkts = 0;
+ q->rx.bytes = 0;
+ }
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ q = data->tx_queues[i];
+ q->tx.pkts = 0;
+ q->tx.bytes = 0;
+ q->tx.err_pkts = 0;
+ }
+}
+
+static const struct eth_dev_ops eth_kni_ops = {
+ .dev_start = eth_kni_dev_start,
+ .dev_stop = eth_kni_dev_stop,
+ .dev_configure = eth_kni_dev_configure,
+ .dev_infos_get = eth_kni_dev_info,
+ .rx_queue_setup = eth_kni_rx_queue_setup,
+ .tx_queue_setup = eth_kni_tx_queue_setup,
+ .rx_queue_release = eth_kni_queue_release,
+ .tx_queue_release = eth_kni_queue_release,
+ .link_update = eth_kni_link_update,
+ .stats_get = eth_kni_stats_get,
+ .stats_reset = eth_kni_stats_reset,
+};
+
+static struct rte_eth_dev *
+eth_kni_create(struct rte_vdev_device *vdev,
+ struct eth_kni_args *args,
+ unsigned int numa_node)
+{
+ struct pmd_internals *internals;
+ struct rte_eth_dev_data *data;
+ struct rte_eth_dev *eth_dev;
+
+ PMD_LOG(INFO, "Creating kni ethdev on numa socket %u",
+ numa_node);
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*internals));
+ if (!eth_dev)
+ return NULL;
+
+ internals = eth_dev->data->dev_private;
+ data = eth_dev->data;
+ data->nb_rx_queues = 1;
+ data->nb_tx_queues = 1;
+ data->dev_link = pmd_link;
+ data->mac_addrs = &internals->eth_addr;
+
+ eth_random_addr(internals->eth_addr.addr_bytes);
+
+ eth_dev->dev_ops = &eth_kni_ops;
+
+ internals->no_request_thread = args->no_request_thread;
+
+ return eth_dev;
+}
+
+static int
+kni_init(void)
+{
+ if (is_kni_initialized == 0)
+ rte_kni_init(MAX_KNI_PORTS);
+
+ is_kni_initialized++;
+
+ return 0;
+}
+
+static int
+eth_kni_kvargs_process(struct eth_kni_args *args, const char *params)
+{
+ struct rte_kvargs *kvlist;
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+
+ memset(args, 0, sizeof(struct eth_kni_args));
+
+ if (rte_kvargs_count(kvlist, ETH_KNI_NO_REQUEST_THREAD_ARG) == 1)
+ args->no_request_thread = 1;
+
+ rte_kvargs_free(kvlist);
+
+ return 0;
+}
+
+static int
+eth_kni_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *eth_dev;
+ struct eth_kni_args args;
+ const char *name;
+ const char *params;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+ params = rte_vdev_device_args(vdev);
+ PMD_LOG(INFO, "Initializing eth_kni for %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &eth_kni_ops;
+ eth_dev->device = &vdev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ ret = eth_kni_kvargs_process(&args, params);
+ if (ret < 0)
+ return ret;
+
+ ret = kni_init();
+ if (ret < 0)
+ return ret;
+
+ eth_dev = eth_kni_create(vdev, &args, rte_socket_id());
+ if (eth_dev == NULL)
+ goto kni_uninit;
+
+ eth_dev->rx_pkt_burst = eth_kni_rx;
+ eth_dev->tx_pkt_burst = eth_kni_tx;
+
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+
+kni_uninit:
+ is_kni_initialized--;
+ if (is_kni_initialized == 0)
+ rte_kni_close();
+ return -1;
+}
+
+static int
+eth_kni_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *eth_dev;
+ struct pmd_internals *internals;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ PMD_LOG(INFO, "Un-Initializing eth_kni for %s", name);
+
+ /* find the ethdev entry */
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -1;
+
+ eth_kni_dev_stop(eth_dev);
+
+ internals = eth_dev->data->dev_private;
+ rte_kni_release(internals->kni);
+
+ rte_free(internals);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ is_kni_initialized--;
+ if (is_kni_initialized == 0)
+ rte_kni_close();
+
+ return 0;
+}
+
+static struct rte_vdev_driver eth_kni_drv = {
+ .probe = eth_kni_probe,
+ .remove = eth_kni_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_kni, eth_kni_drv);
+RTE_PMD_REGISTER_PARAM_STRING(net_kni, ETH_KNI_NO_REQUEST_THREAD_ARG "=<int>");
+
+RTE_INIT(eth_kni_init_log)
+{
+ eth_kni_logtype = rte_log_register("pmd.net.kni");
+ if (eth_kni_logtype >= 0)
+ rte_log_set_level(eth_kni_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/kni/rte_pmd_kni_version.map b/src/spdk/dpdk/drivers/net/kni/rte_pmd_kni_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/kni/rte_pmd_kni_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/liquidio/Makefile b/src/spdk/dpdk/drivers/net/liquidio/Makefile
new file mode 100644
index 00000000..f1092851
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_lio.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)/base -I$(SRCDIR)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+EXPORT_MAP := rte_pmd_liquidio_version.map
+
+LIBABIVER := 1
+
+VPATH += $(RTE_SDK)/drivers/net/liquidio/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_23xx_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_mbox.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_reg.h b/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_reg.h
new file mode 100644
index 00000000..9f28504b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_reg.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _LIO_23XX_REG_H_
+#define _LIO_23XX_REG_H_
+
+/* ###################### REQUEST QUEUE ######################### */
+
+/* 64 registers for Input Queues Start Addr - SLI_PKT(0..63)_INSTR_BADDR */
+#define CN23XX_SLI_PKT_INSTR_BADDR_START64 0x10010
+
+/* 64 registers for Input Doorbell - SLI_PKT(0..63)_INSTR_BAOFF_DBELL */
+#define CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START 0x10020
+
+/* 64 registers for Input Queue size - SLI_PKT(0..63)_INSTR_FIFO_RSIZE */
+#define CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START 0x10030
+
+/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE(0..63)_CNTS */
+#define CN23XX_SLI_PKT_IN_DONE_CNTS_START64 0x10040
+
+/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.
+ */
+#define CN23XX_SLI_PKT_INPUT_CONTROL_START64 0x10000
+
+/* ------- Request Queue Macros --------- */
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_IQ_OFFSET 0x20000
+
+#define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \
+ (CN23XX_SLI_PKT_INPUT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_BASE_ADDR64(iq) \
+ (CN23XX_SLI_PKT_INSTR_BADDR_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_SIZE(iq) \
+ (CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_DOORBELL(iq) \
+ (CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \
+ (CN23XX_SLI_PKT_IN_DONE_CNTS_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25)
+#define CN23XX_PKT_INPUT_CTL_IS_64B (1 << 24)
+#define CN23XX_PKT_INPUT_CTL_RST (1 << 23)
+#define CN23XX_PKT_INPUT_CTL_QUIET (1 << 28)
+#define CN23XX_PKT_INPUT_CTL_RING_ENB (1 << 22)
+#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP (1 << 6)
+#define CN23XX_PKT_INPUT_CTL_USE_CSR (1 << 4)
+#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2)
+
+/* These bits[47:44] select the Physical function number within the MAC */
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS 45
+/* These bits[43:32] select the function number within the PF */
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS 32
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR)
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR | \
+ CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)
+#endif
+
+/* ############################ OUTPUT QUEUE ######################### */
+
+/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */
+#define CN23XX_SLI_PKT_OUTPUT_CONTROL_START 0x10050
+
+/* 64 registers for Output queue buffer and info size
+ * SLI_PKT(0..63)_OUT_SIZE
+ */
+#define CN23XX_SLI_PKT_OUT_SIZE 0x10060
+
+/* 64 registers for Output Queue Start Addr - SLI_PKT(0..63)_SLIST_BADDR */
+#define CN23XX_SLI_SLIST_BADDR_START64 0x10070
+
+/* 64 registers for Output Queue Packet Credits
+ * SLI_PKT(0..63)_SLIST_BAOFF_DBELL
+ */
+#define CN23XX_SLI_PKT_SLIST_BAOFF_DBELL_START 0x10080
+
+/* 64 registers for Output Queue size - SLI_PKT(0..63)_SLIST_FIFO_RSIZE */
+#define CN23XX_SLI_PKT_SLIST_FIFO_RSIZE_START 0x10090
+
+/* 64 registers for Output Queue Packet Count - SLI_PKT(0..63)_CNTS */
+#define CN23XX_SLI_PKT_CNTS_START 0x100B0
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_OQ_OFFSET 0x20000
+
+/* ------- Output Queue Macros --------- */
+
+#define CN23XX_SLI_OQ_PKT_CONTROL(oq) \
+ (CN23XX_SLI_PKT_OUTPUT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BASE_ADDR64(oq) \
+ (CN23XX_SLI_SLIST_BADDR_START64 + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_SIZE(oq) \
+ (CN23XX_SLI_PKT_SLIST_FIFO_RSIZE_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN23XX_SLI_PKT_OUT_SIZE + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_SENT(oq) \
+ (CN23XX_SLI_PKT_CNTS_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN23XX_SLI_PKT_SLIST_BAOFF_DBELL_START + ((oq) * CN23XX_OQ_OFFSET))
+
+/* ------------------ Masks ---------------- */
+#define CN23XX_PKT_OUTPUT_CTL_IPTR (1 << 11)
+#define CN23XX_PKT_OUTPUT_CTL_ES (1 << 9)
+#define CN23XX_PKT_OUTPUT_CTL_NSR (1 << 8)
+#define CN23XX_PKT_OUTPUT_CTL_ROR (1 << 7)
+#define CN23XX_PKT_OUTPUT_CTL_DPTR (1 << 6)
+#define CN23XX_PKT_OUTPUT_CTL_BMODE (1 << 5)
+#define CN23XX_PKT_OUTPUT_CTL_ES_P (1 << 3)
+#define CN23XX_PKT_OUTPUT_CTL_NSR_P (1 << 2)
+#define CN23XX_PKT_OUTPUT_CTL_ROR_P (1 << 1)
+#define CN23XX_PKT_OUTPUT_CTL_RING_ENB (1 << 0)
+
+/* Rings per Virtual Function [RO] */
+#define CN23XX_PKT_INPUT_CTL_RPVF_MASK 0x3F
+#define CN23XX_PKT_INPUT_CTL_RPVF_POS 48
+
+/* These bits[47:44][RO] give the Physical function
+ * number info within the MAC
+ */
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK 0x7
+
+/* These bits[43:32][RO] give the virtual function
+ * number info within the PF
+ */
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK 0x1FFF
+
+/* ######################### Mailbox Reg Macros ######################## */
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200
+#define CN23XX_VF_SLI_PKT_MBOX_INT_START 0x10210
+
+#define CN23XX_SLI_MBOX_OFFSET 0x20000
+#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8
+
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \
+ (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \
+ ((q) * CN23XX_SLI_MBOX_OFFSET + \
+ (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET))
+
+#define CN23XX_VF_SLI_PKT_MBOX_INT(q) \
+ (CN23XX_VF_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
+
+#endif /* _LIO_23XX_REG_H_ */
diff --git a/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.c b/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.c
new file mode 100644
index 00000000..ddbc8c0e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.c
@@ -0,0 +1,513 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+
+#include "lio_logs.h"
+#include "lio_23xx_vf.h"
+#include "lio_23xx_reg.h"
+#include "lio_mbox.h"
+
+static int
+cn23xx_vf_reset_io_queues(struct lio_device *lio_dev, uint32_t num_queues)
+{
+ uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
+ uint64_t d64, q_no;
+ int ret_val = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (q_no = 0; q_no < num_queues; q_no++) {
+ /* set RST bit to 1. This bit applies to both IQ and OQ */
+ d64 = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ d64);
+ }
+
+ /* wait until the RST bit is clear or the RST and QUIET bits are set */
+ for (q_no = 0; q_no < num_queues; q_no++) {
+ volatile uint64_t reg_val;
+
+ reg_val = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop) {
+ reg_val = lio_read_csr64(
+ lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ loop = loop - 1;
+ }
+
+ if (loop == 0) {
+ lio_dev_err(lio_dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %lu\n",
+ (unsigned long)q_no);
+ return -1;
+ }
+
+ reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ reg_val = lio_read_csr64(
+ lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ lio_dev_err(lio_dev,
+ "clearing the reset failed for qno: %lu\n",
+ (unsigned long)q_no);
+ ret_val = -1;
+ }
+ }
+
+ return ret_val;
+}
+
+static int
+cn23xx_vf_setup_global_input_regs(struct lio_device *lio_dev)
+{
+ uint64_t q_no;
+ uint64_t d64;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (cn23xx_vf_reset_io_queues(lio_dev,
+ lio_dev->sriov_info.rings_per_vf))
+ return -1;
+
+ for (q_no = 0; q_no < (lio_dev->sriov_info.rings_per_vf); q_no++) {
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_DOORBELL(q_no),
+ 0xFFFFFFFF);
+
+ d64 = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_INSTR_COUNT64(q_no));
+
+ d64 &= 0xEFFFFFFFFFFFFFFFL;
+
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_INSTR_COUNT64(q_no),
+ d64);
+
+ /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ * the Input Queues
+ */
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ CN23XX_PKT_INPUT_CTL_MASK);
+ }
+
+ return 0;
+}
+
+static void
+cn23xx_vf_setup_global_output_regs(struct lio_device *lio_dev)
+{
+ uint32_t reg_val;
+ uint32_t q_no;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) {
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
+ 0xFFFFFFFF);
+
+ reg_val =
+ lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no));
+
+ reg_val &= 0xEFFFFFFFFFFFFFFFL;
+
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no), reg_val);
+
+ reg_val =
+ lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+
+ /* set IPTR & DPTR */
+ reg_val |=
+ (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+
+ /* reset BMODE */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue Scatter List
+ * reset ROR_P, NSR_P
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
+#endif
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue Data
+ * reset ROR, NSR
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
+ /* set the ES bit */
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
+
+ /* write all the selected settings */
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
+ reg_val);
+ }
+}
+
+static int
+cn23xx_vf_setup_device_regs(struct lio_device *lio_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (cn23xx_vf_setup_global_input_regs(lio_dev))
+ return -1;
+
+ cn23xx_vf_setup_global_output_regs(lio_dev);
+
+ return 0;
+}
+
+static void
+cn23xx_vf_setup_iq_regs(struct lio_device *lio_dev, uint32_t iq_no)
+{
+ struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
+ uint64_t pkt_in_done = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Write the start of the input queue's ring and its size */
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->nb_desc);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
+ lio_dev_dbg(lio_dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter (used in flush_iq
+ * calculation)
+ */
+ pkt_in_done = rte_read64(iq->inst_cnt_reg);
+
+ /* Clear the count by writing back what we read, but don't
+ * enable data traffic here
+ */
+ rte_write64(pkt_in_done, iq->inst_cnt_reg);
+}
+
+static void
+cn23xx_vf_setup_oq_regs(struct lio_device *lio_dev, uint32_t oq_no)
+{
+ struct lio_droq *droq = lio_dev->droq[oq_no];
+
+ PMD_INIT_FUNC_TRACE();
+
+ lio_write_csr64(lio_dev, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_SIZE(oq_no), droq->nb_desc);
+
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ (droq->buffer_size | (OCTEON_RH_SIZE << 16)));
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
+}
+
+static void
+cn23xx_vf_free_mbox(struct lio_device *lio_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_free(lio_dev->mbox[0]);
+ lio_dev->mbox[0] = NULL;
+
+ rte_free(lio_dev->mbox);
+ lio_dev->mbox = NULL;
+}
+
+static int
+cn23xx_vf_setup_mbox(struct lio_device *lio_dev)
+{
+ struct lio_mbox *mbox;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (lio_dev->mbox == NULL) {
+ lio_dev->mbox = rte_zmalloc(NULL, sizeof(void *), 0);
+ if (lio_dev->mbox == NULL)
+ return -ENOMEM;
+ }
+
+ mbox = rte_zmalloc(NULL, sizeof(struct lio_mbox), 0);
+ if (mbox == NULL) {
+ rte_free(lio_dev->mbox);
+ lio_dev->mbox = NULL;
+ return -ENOMEM;
+ }
+
+ rte_spinlock_init(&mbox->lock);
+
+ mbox->lio_dev = lio_dev;
+
+ mbox->q_no = 0;
+
+ mbox->state = LIO_MBOX_STATE_IDLE;
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_VF_SLI_PKT_MBOX_INT(0);
+ /* VF reads from SIG0 reg */
+ mbox->mbox_read_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
+ /* VF writes into SIG1 reg */
+ mbox->mbox_write_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);
+
+ lio_dev->mbox[0] = mbox;
+
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+
+ return 0;
+}
+
+static int
+cn23xx_vf_enable_io_queues(struct lio_device *lio_dev)
+{
+ uint32_t q_no;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (q_no = 0; q_no < lio_dev->num_iqs; q_no++) {
+ uint64_t reg_val;
+
+ /* set the corresponding IQ IS_64B bit */
+ if (lio_dev->io_qmask.iq64B & (1ULL << q_no)) {
+ reg_val = lio_read_csr64(
+ lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
+ lio_write_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+ }
+
+ /* set the corresponding IQ ENB bit */
+ if (lio_dev->io_qmask.iq & (1ULL << q_no)) {
+ reg_val = lio_read_csr64(
+ lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
+ lio_write_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+ }
+ }
+ for (q_no = 0; q_no < lio_dev->num_oqs; q_no++) {
+ uint32_t reg_val;
+
+ /* set the corresponding OQ ENB bit */
+ if (lio_dev->io_qmask.oq & (1ULL << q_no)) {
+ reg_val = lio_read_csr(
+ lio_dev,
+ CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+ reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
+ lio_write_csr(lio_dev,
+ CN23XX_SLI_OQ_PKT_CONTROL(q_no),
+ reg_val);
+ }
+ }
+
+ return 0;
+}
+
+static void
+cn23xx_vf_disable_io_queues(struct lio_device *lio_dev)
+{
+ uint32_t num_queues;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* per HRM, rings can only be disabled via reset operation,
+ * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB]
+ */
+ num_queues = lio_dev->num_iqs;
+ if (num_queues < lio_dev->num_oqs)
+ num_queues = lio_dev->num_oqs;
+
+ cn23xx_vf_reset_io_queues(lio_dev, num_queues);
+}
+
+void
+cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev)
+{
+ struct lio_mbox_cmd mbox_cmd;
+
+ memset(&mbox_cmd, 0, sizeof(struct lio_mbox_cmd));
+ mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 0;
+ mbox_cmd.msg.s.cmd = LIO_VF_FLR_REQUEST;
+ mbox_cmd.msg.s.len = 1;
+ mbox_cmd.q_no = 0;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = NULL;
+ mbox_cmd.fn_arg = 0;
+
+ lio_mbox_write(lio_dev, &mbox_cmd);
+}
+
+static void
+cn23xx_pfvf_hs_callback(struct lio_device *lio_dev,
+ struct lio_mbox_cmd *cmd, void *arg)
+{
+ uint32_t major = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_memcpy((uint8_t *)&lio_dev->pfvf_hsword, cmd->msg.s.params, 6);
+ if (cmd->recv_len > 1) {
+ struct lio_version *lio_ver = (struct lio_version *)cmd->data;
+
+ major = lio_ver->major;
+ major = major << 16;
+ }
+
+ rte_atomic64_set((rte_atomic64_t *)arg, major | 1);
+}
+
+int
+cn23xx_pfvf_handshake(struct lio_device *lio_dev)
+{
+ struct lio_mbox_cmd mbox_cmd;
+ struct lio_version *lio_ver = (struct lio_version *)&mbox_cmd.data[0];
+ uint32_t q_no, count = 0;
+ rte_atomic64_t status;
+ uint32_t pfmajor;
+ uint32_t vfmajor;
+ uint32_t ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Sending VF_ACTIVE indication to the PF driver */
+ lio_dev_dbg(lio_dev, "requesting info from PF\n");
+
+ mbox_cmd.msg.mbox_msg64 = 0;
+ mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 1;
+ mbox_cmd.msg.s.cmd = LIO_VF_ACTIVE;
+ mbox_cmd.msg.s.len = 2;
+ mbox_cmd.data[0] = 0;
+ lio_ver->major = LIO_BASE_MAJOR_VERSION;
+ lio_ver->minor = LIO_BASE_MINOR_VERSION;
+ lio_ver->micro = LIO_BASE_MICRO_VERSION;
+ mbox_cmd.q_no = 0;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = (lio_mbox_callback)cn23xx_pfvf_hs_callback;
+ mbox_cmd.fn_arg = (void *)&status;
+
+ if (lio_mbox_write(lio_dev, &mbox_cmd)) {
+ lio_dev_err(lio_dev, "Write to mailbox failed\n");
+ return -1;
+ }
+
+ rte_atomic64_set(&status, 0);
+
+ do {
+ rte_delay_ms(1);
+ } while ((rte_atomic64_read(&status) == 0) && (count++ < 10000));
+
+ ret = rte_atomic64_read(&status);
+ if (ret == 0) {
+ lio_dev_err(lio_dev, "cn23xx_pfvf_handshake timeout\n");
+ return -1;
+ }
+
+ for (q_no = 0; q_no < lio_dev->num_iqs; q_no++)
+ lio_dev->instr_queue[q_no]->txpciq.s.pkind =
+ lio_dev->pfvf_hsword.pkind;
+
+ vfmajor = LIO_BASE_MAJOR_VERSION;
+ pfmajor = ret >> 16;
+ if (pfmajor != vfmajor) {
+ lio_dev_err(lio_dev,
+ "VF LiquidIO driver (major version %d) is not compatible with LiquidIO PF driver (major version %d)\n",
+ vfmajor, pfmajor);
+ ret = -EPERM;
+ } else {
+ lio_dev_dbg(lio_dev,
+ "VF LiquidIO driver (major version %d), LiquidIO PF driver (major version %d)\n",
+ vfmajor, pfmajor);
+ ret = 0;
+ }
+
+ lio_dev_dbg(lio_dev, "got data from PF pkind is %d\n",
+ lio_dev->pfvf_hsword.pkind);
+
+ return ret;
+}
+
+void
+cn23xx_vf_handle_mbox(struct lio_device *lio_dev)
+{
+ uint64_t mbox_int_val;
+
+ /* read and clear by writing 1 */
+ mbox_int_val = rte_read64(lio_dev->mbox[0]->mbox_int_reg);
+ rte_write64(mbox_int_val, lio_dev->mbox[0]->mbox_int_reg);
+ if (lio_mbox_read(lio_dev->mbox[0]))
+ lio_mbox_process_message(lio_dev->mbox[0]);
+}
+
+int
+cn23xx_vf_setup_device(struct lio_device *lio_dev)
+{
+ uint64_t reg_val;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* INPUT_CONTROL[RPVF] gives the VF IOq count */
+ reg_val = lio_read_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(0));
+
+ lio_dev->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
+ CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
+ lio_dev->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &
+ CN23XX_PKT_INPUT_CTL_VF_NUM_MASK;
+
+ reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
+
+ lio_dev->sriov_info.rings_per_vf =
+ reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
+
+ lio_dev->default_config = lio_get_conf(lio_dev);
+ if (lio_dev->default_config == NULL)
+ return -1;
+
+ lio_dev->fn_list.setup_iq_regs = cn23xx_vf_setup_iq_regs;
+ lio_dev->fn_list.setup_oq_regs = cn23xx_vf_setup_oq_regs;
+ lio_dev->fn_list.setup_mbox = cn23xx_vf_setup_mbox;
+ lio_dev->fn_list.free_mbox = cn23xx_vf_free_mbox;
+
+ lio_dev->fn_list.setup_device_regs = cn23xx_vf_setup_device_regs;
+
+ lio_dev->fn_list.enable_io_queues = cn23xx_vf_enable_io_queues;
+ lio_dev->fn_list.disable_io_queues = cn23xx_vf_disable_io_queues;
+
+ return 0;
+}
+
diff --git a/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.h b/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.h
new file mode 100644
index 00000000..8e5362db
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _LIO_23XX_VF_H_
+#define _LIO_23XX_VF_H_
+
+#include <stdio.h>
+
+#include "lio_struct.h"
+
+static const struct lio_config default_cn23xx_conf = {
+ .card_type = LIO_23XX,
+ .card_name = LIO_23XX_NAME,
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN23XX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN23XX_MAX_IQ_DESCRIPTORS * CN23XX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ },
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN23XX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .refill_threshold = CN23XX_OQ_REFIL_THRESHOLD,
+ },
+
+ .num_nic_ports = CN23XX_DEFAULT_NUM_PORTS,
+ .num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN23XX_OQ_BUF_SIZE,
+};
+
+static inline const struct lio_config *
+lio_get_conf(struct lio_device *lio_dev)
+{
+ const struct lio_config *default_lio_conf = NULL;
+
+ /* check the LIO Device model & return the corresponding lio
+ * configuration
+ */
+ default_lio_conf = &default_cn23xx_conf;
+
+ if (default_lio_conf == NULL) {
+ lio_dev_err(lio_dev, "Configuration verification failed\n");
+ return NULL;
+ }
+
+ return default_lio_conf;
+}
+
+#define CN23XX_VF_BUSY_READING_REG_LOOP_COUNT 100000
+
+void cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev);
+
+int cn23xx_pfvf_handshake(struct lio_device *lio_dev);
+
+int cn23xx_vf_setup_device(struct lio_device *lio_dev);
+
+void cn23xx_vf_handle_mbox(struct lio_device *lio_dev);
+#endif /* _LIO_23XX_VF_H_ */
diff --git a/src/spdk/dpdk/drivers/net/liquidio/base/lio_hw_defs.h b/src/spdk/dpdk/drivers/net/liquidio/base/lio_hw_defs.h
new file mode 100644
index 00000000..5e119c12
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/base/lio_hw_defs.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _LIO_HW_DEFS_H_
+#define _LIO_HW_DEFS_H_
+
+#include <rte_io.h>
+
+#ifndef PCI_VENDOR_ID_CAVIUM
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#endif
+
+#define LIO_CN23XX_VF_VID 0x9712
+
+/* CN23xx subsystem device ids */
+#define PCI_SUBSYS_DEV_ID_CN2350_210 0x0004
+#define PCI_SUBSYS_DEV_ID_CN2360_210 0x0005
+#define PCI_SUBSYS_DEV_ID_CN2360_225 0x0006
+#define PCI_SUBSYS_DEV_ID_CN2350_225 0x0007
+#define PCI_SUBSYS_DEV_ID_CN2350_210SVPN3 0x0008
+#define PCI_SUBSYS_DEV_ID_CN2360_210SVPN3 0x0009
+#define PCI_SUBSYS_DEV_ID_CN2350_210SVPT 0x000a
+#define PCI_SUBSYS_DEV_ID_CN2360_210SVPT 0x000b
+
+/* --------------------------CONFIG VALUES------------------------ */
+
+/* CN23xx IQ configuration macros */
+#define CN23XX_MAX_RINGS_PER_PF 64
+#define CN23XX_MAX_RINGS_PER_VF 8
+
+#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_IQ_DESCRIPTORS 512
+#define CN23XX_MIN_IQ_DESCRIPTORS 128
+
+#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_OQ_DESCRIPTORS 512
+#define CN23XX_MIN_OQ_DESCRIPTORS 128
+#define CN23XX_OQ_BUF_SIZE 1536
+
+#define CN23XX_OQ_REFIL_THRESHOLD 16
+
+#define CN23XX_DEFAULT_NUM_PORTS 1
+
+#define CN23XX_CFG_IO_QUEUES CN23XX_MAX_RINGS_PER_PF
+
+/* common OCTEON configuration macros */
+#define OCTEON_64BYTE_INSTR 64
+#define OCTEON_OQ_INFOPTR_MODE 1
+
+/* Max IOQs per LIO Link */
+#define LIO_MAX_IOQS_PER_IF 64
+
+/* Wait time in milliseconds for FLR */
+#define LIO_PCI_FLR_WAIT 100
+
+enum lio_card_type {
+ LIO_23XX /* 23xx */
+};
+
+#define LIO_23XX_NAME "23xx"
+
+#define LIO_DEV_RUNNING 0xc
+
+#define LIO_OQ_REFILL_THRESHOLD_CFG(cfg) \
+ ((cfg)->default_config->oq.refill_threshold)
+#define LIO_NUM_DEF_TX_DESCS_CFG(cfg) \
+ ((cfg)->default_config->num_def_tx_descs)
+
+#define LIO_IQ_INSTR_TYPE(cfg) ((cfg)->default_config->iq.instr_type)
+
+/* The following config values are fixed and should not be modified. */
+
+/* Maximum number of Instruction queues */
+#define LIO_MAX_INSTR_QUEUES(lio_dev) CN23XX_MAX_RINGS_PER_VF
+
+#define LIO_MAX_POSSIBLE_INSTR_QUEUES CN23XX_MAX_INPUT_QUEUES
+#define LIO_MAX_POSSIBLE_OUTPUT_QUEUES CN23XX_MAX_OUTPUT_QUEUES
+
+#define LIO_DEVICE_NAME_LEN 32
+#define LIO_BASE_MAJOR_VERSION 1
+#define LIO_BASE_MINOR_VERSION 5
+#define LIO_BASE_MICRO_VERSION 1
+
+#define LIO_FW_VERSION_LENGTH 32
+
+#define LIO_Q_RECONF_MIN_VERSION "1.7.0"
+#define LIO_VF_TRUST_MIN_VERSION "1.7.1"
+
+/** Tag types used by Octeon cores in its work. */
+enum octeon_tag_type {
+ OCTEON_ORDERED_TAG = 0,
+ OCTEON_ATOMIC_TAG = 1,
+};
+
+/* pre-defined host->NIC tag values */
+#define LIO_CONTROL (0x11111110)
+#define LIO_DATA(i) (0x11111111 + (i))
+
+/* used for NIC operations */
+#define LIO_OPCODE 1
+
+/* Subcodes are used by host driver/apps to identify the sub-operation
+ * for the core. They only need to by unique for a given subsystem.
+ */
+#define LIO_OPCODE_SUBCODE(op, sub) \
+ ((((op) & 0x0f) << 8) | ((sub) & 0x7f))
+
+/** LIO_OPCODE subcodes */
+/* This subcode is sent by core PCI driver to indicate cores are ready. */
+#define LIO_OPCODE_NW_DATA 0x02 /* network packet data */
+#define LIO_OPCODE_CMD 0x03
+#define LIO_OPCODE_INFO 0x04
+#define LIO_OPCODE_PORT_STATS 0x05
+#define LIO_OPCODE_IF_CFG 0x09
+
+#define LIO_MIN_RX_BUF_SIZE 64
+#define LIO_MAX_RX_PKTLEN (64 * 1024)
+
+/* NIC Command types */
+#define LIO_CMD_CHANGE_MTU 0x1
+#define LIO_CMD_CHANGE_DEVFLAGS 0x3
+#define LIO_CMD_RX_CTL 0x4
+#define LIO_CMD_CLEAR_STATS 0x6
+#define LIO_CMD_SET_RSS 0xD
+#define LIO_CMD_TNL_RX_CSUM_CTL 0x10
+#define LIO_CMD_TNL_TX_CSUM_CTL 0x11
+#define LIO_CMD_ADD_VLAN_FILTER 0x17
+#define LIO_CMD_DEL_VLAN_FILTER 0x18
+#define LIO_CMD_VXLAN_PORT_CONFIG 0x19
+#define LIO_CMD_QUEUE_COUNT_CTL 0x1f
+
+#define LIO_CMD_VXLAN_PORT_ADD 0x0
+#define LIO_CMD_VXLAN_PORT_DEL 0x1
+#define LIO_CMD_RXCSUM_ENABLE 0x0
+#define LIO_CMD_TXCSUM_ENABLE 0x0
+
+/* RX(packets coming from wire) Checksum verification flags */
+/* TCP/UDP csum */
+#define LIO_L4_CSUM_VERIFIED 0x1
+#define LIO_IP_CSUM_VERIFIED 0x2
+
+/* RSS */
+#define LIO_RSS_PARAM_DISABLE_RSS 0x10
+#define LIO_RSS_PARAM_HASH_KEY_UNCHANGED 0x08
+#define LIO_RSS_PARAM_ITABLE_UNCHANGED 0x04
+#define LIO_RSS_PARAM_HASH_INFO_UNCHANGED 0x02
+
+#define LIO_RSS_HASH_IPV4 0x100
+#define LIO_RSS_HASH_TCP_IPV4 0x200
+#define LIO_RSS_HASH_IPV6 0x400
+#define LIO_RSS_HASH_TCP_IPV6 0x1000
+#define LIO_RSS_HASH_IPV6_EX 0x800
+#define LIO_RSS_HASH_TCP_IPV6_EX 0x2000
+
+#define LIO_RSS_OFFLOAD_ALL ( \
+ LIO_RSS_HASH_IPV4 | \
+ LIO_RSS_HASH_TCP_IPV4 | \
+ LIO_RSS_HASH_IPV6 | \
+ LIO_RSS_HASH_TCP_IPV6 | \
+ LIO_RSS_HASH_IPV6_EX | \
+ LIO_RSS_HASH_TCP_IPV6_EX)
+
+#define LIO_RSS_MAX_TABLE_SZ 128
+#define LIO_RSS_MAX_KEY_SZ 40
+#define LIO_RSS_PARAM_SIZE 16
+
+/* Interface flags communicated between host driver and core app. */
+enum lio_ifflags {
+ LIO_IFFLAG_PROMISC = 0x01,
+ LIO_IFFLAG_ALLMULTI = 0x02,
+ LIO_IFFLAG_UNICAST = 0x10
+};
+
+/* Routines for reading and writing CSRs */
+#ifdef RTE_LIBRTE_LIO_DEBUG_REGS
+#define lio_write_csr(lio_dev, reg_off, value) \
+ do { \
+ typeof(lio_dev) _dev = lio_dev; \
+ typeof(reg_off) _reg_off = reg_off; \
+ typeof(value) _value = value; \
+ PMD_REGS_LOG(_dev, \
+ "Write32: Reg: 0x%08lx Val: 0x%08lx\n", \
+ (unsigned long)_reg_off, \
+ (unsigned long)_value); \
+ rte_write32(_value, _dev->hw_addr + _reg_off); \
+ } while (0)
+
+#define lio_write_csr64(lio_dev, reg_off, val64) \
+ do { \
+ typeof(lio_dev) _dev = lio_dev; \
+ typeof(reg_off) _reg_off = reg_off; \
+ typeof(val64) _val64 = val64; \
+ PMD_REGS_LOG( \
+ _dev, \
+ "Write64: Reg: 0x%08lx Val: 0x%016llx\n", \
+ (unsigned long)_reg_off, \
+ (unsigned long long)_val64); \
+ rte_write64(_val64, _dev->hw_addr + _reg_off); \
+ } while (0)
+
+#define lio_read_csr(lio_dev, reg_off) \
+ ({ \
+ typeof(lio_dev) _dev = lio_dev; \
+ typeof(reg_off) _reg_off = reg_off; \
+ uint32_t val = rte_read32(_dev->hw_addr + _reg_off); \
+ PMD_REGS_LOG(_dev, \
+ "Read32: Reg: 0x%08lx Val: 0x%08lx\n", \
+ (unsigned long)_reg_off, \
+ (unsigned long)val); \
+ val; \
+ })
+
+#define lio_read_csr64(lio_dev, reg_off) \
+ ({ \
+ typeof(lio_dev) _dev = lio_dev; \
+ typeof(reg_off) _reg_off = reg_off; \
+ uint64_t val64 = rte_read64(_dev->hw_addr + _reg_off); \
+ PMD_REGS_LOG( \
+ _dev, \
+ "Read64: Reg: 0x%08lx Val: 0x%016llx\n", \
+ (unsigned long)_reg_off, \
+ (unsigned long long)val64); \
+ val64; \
+ })
+#else
+#define lio_write_csr(lio_dev, reg_off, value) \
+ rte_write32(value, (lio_dev)->hw_addr + (reg_off))
+
+#define lio_write_csr64(lio_dev, reg_off, val64) \
+ rte_write64(val64, (lio_dev)->hw_addr + (reg_off))
+
+#define lio_read_csr(lio_dev, reg_off) \
+ rte_read32((lio_dev)->hw_addr + (reg_off))
+
+#define lio_read_csr64(lio_dev, reg_off) \
+ rte_read64((lio_dev)->hw_addr + (reg_off))
+#endif
+#endif /* _LIO_HW_DEFS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.c b/src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.c
new file mode 100644
index 00000000..11290015
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.c
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_cycles.h>
+
+#include "lio_logs.h"
+#include "lio_struct.h"
+#include "lio_mbox.h"
+
+/**
+ * lio_mbox_read:
+ * @mbox: Pointer mailbox
+ *
+ * Reads the 8-bytes of data from the mbox register
+ * Writes back the acknowledgment indicating completion of read
+ */
+int
+lio_mbox_read(struct lio_mbox *mbox)
+{
+ union lio_mbox_message msg;
+ int ret = 0;
+
+ msg.mbox_msg64 = rte_read64(mbox->mbox_read_reg);
+
+ if ((msg.mbox_msg64 == LIO_PFVFACK) || (msg.mbox_msg64 == LIO_PFVFSIG))
+ return 0;
+
+ if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVING) {
+ mbox->mbox_req.data[mbox->mbox_req.recv_len - 1] =
+ msg.mbox_msg64;
+ mbox->mbox_req.recv_len++;
+ } else {
+ if (mbox->state & LIO_MBOX_STATE_RES_RECEIVING) {
+ mbox->mbox_resp.data[mbox->mbox_resp.recv_len - 1] =
+ msg.mbox_msg64;
+ mbox->mbox_resp.recv_len++;
+ } else {
+ if ((mbox->state & LIO_MBOX_STATE_IDLE) &&
+ (msg.s.type == LIO_MBOX_REQUEST)) {
+ mbox->state &= ~LIO_MBOX_STATE_IDLE;
+ mbox->state |= LIO_MBOX_STATE_REQ_RECEIVING;
+ mbox->mbox_req.msg.mbox_msg64 = msg.mbox_msg64;
+ mbox->mbox_req.q_no = mbox->q_no;
+ mbox->mbox_req.recv_len = 1;
+ } else {
+ if ((mbox->state &
+ LIO_MBOX_STATE_RES_PENDING) &&
+ (msg.s.type == LIO_MBOX_RESPONSE)) {
+ mbox->state &=
+ ~LIO_MBOX_STATE_RES_PENDING;
+ mbox->state |=
+ LIO_MBOX_STATE_RES_RECEIVING;
+ mbox->mbox_resp.msg.mbox_msg64 =
+ msg.mbox_msg64;
+ mbox->mbox_resp.q_no = mbox->q_no;
+ mbox->mbox_resp.recv_len = 1;
+ } else {
+ rte_write64(LIO_PFVFERR,
+ mbox->mbox_read_reg);
+ mbox->state |= LIO_MBOX_STATE_ERROR;
+ return -1;
+ }
+ }
+ }
+ }
+
+ if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVING) {
+ if (mbox->mbox_req.recv_len < msg.s.len) {
+ ret = 0;
+ } else {
+ mbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVING;
+ mbox->state |= LIO_MBOX_STATE_REQ_RECEIVED;
+ ret = 1;
+ }
+ } else {
+ if (mbox->state & LIO_MBOX_STATE_RES_RECEIVING) {
+ if (mbox->mbox_resp.recv_len < msg.s.len) {
+ ret = 0;
+ } else {
+ mbox->state &= ~LIO_MBOX_STATE_RES_RECEIVING;
+ mbox->state |= LIO_MBOX_STATE_RES_RECEIVED;
+ ret = 1;
+ }
+ } else {
+ RTE_ASSERT(0);
+ }
+ }
+
+ rte_write64(LIO_PFVFACK, mbox->mbox_read_reg);
+
+ return ret;
+}
+
+/**
+ * lio_mbox_write:
+ * @lio_dev: Pointer lio device
+ * @mbox_cmd: Cmd to send to mailbox.
+ *
+ * Populates the queue specific mbox structure
+ * with cmd information.
+ * Write the cmd to mbox register
+ */
+int
+lio_mbox_write(struct lio_device *lio_dev,
+ struct lio_mbox_cmd *mbox_cmd)
+{
+ struct lio_mbox *mbox = lio_dev->mbox[mbox_cmd->q_no];
+ uint32_t count, i, ret = LIO_MBOX_STATUS_SUCCESS;
+
+ if ((mbox_cmd->msg.s.type == LIO_MBOX_RESPONSE) &&
+ !(mbox->state & LIO_MBOX_STATE_REQ_RECEIVED))
+ return LIO_MBOX_STATUS_FAILED;
+
+ if ((mbox_cmd->msg.s.type == LIO_MBOX_REQUEST) &&
+ !(mbox->state & LIO_MBOX_STATE_IDLE))
+ return LIO_MBOX_STATUS_BUSY;
+
+ if (mbox_cmd->msg.s.type == LIO_MBOX_REQUEST) {
+ rte_memcpy(&mbox->mbox_resp, mbox_cmd,
+ sizeof(struct lio_mbox_cmd));
+ mbox->state = LIO_MBOX_STATE_RES_PENDING;
+ }
+
+ count = 0;
+
+ while (rte_read64(mbox->mbox_write_reg) != LIO_PFVFSIG) {
+ rte_delay_ms(1);
+ if (count++ == 1000) {
+ ret = LIO_MBOX_STATUS_FAILED;
+ break;
+ }
+ }
+
+ if (ret == LIO_MBOX_STATUS_SUCCESS) {
+ rte_write64(mbox_cmd->msg.mbox_msg64, mbox->mbox_write_reg);
+ for (i = 0; i < (uint32_t)(mbox_cmd->msg.s.len - 1); i++) {
+ count = 0;
+ while (rte_read64(mbox->mbox_write_reg) !=
+ LIO_PFVFACK) {
+ rte_delay_ms(1);
+ if (count++ == 1000) {
+ ret = LIO_MBOX_STATUS_FAILED;
+ break;
+ }
+ }
+ rte_write64(mbox_cmd->data[i], mbox->mbox_write_reg);
+ }
+ }
+
+ if (mbox_cmd->msg.s.type == LIO_MBOX_RESPONSE) {
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+ } else {
+ if ((!mbox_cmd->msg.s.resp_needed) ||
+ (ret == LIO_MBOX_STATUS_FAILED)) {
+ mbox->state &= ~LIO_MBOX_STATE_RES_PENDING;
+ if (!(mbox->state & (LIO_MBOX_STATE_REQ_RECEIVING |
+ LIO_MBOX_STATE_REQ_RECEIVED)))
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * lio_mbox_process_cmd:
+ * @mbox: Pointer mailbox
+ * @mbox_cmd: Pointer to command received
+ *
+ * Process the cmd received in mbox
+ */
+static int
+lio_mbox_process_cmd(struct lio_mbox *mbox,
+ struct lio_mbox_cmd *mbox_cmd)
+{
+ struct lio_device *lio_dev = mbox->lio_dev;
+
+ if (mbox_cmd->msg.s.cmd == LIO_CORES_CRASHED)
+ lio_dev_err(lio_dev, "Octeon core(s) crashed or got stuck!\n");
+
+ return 0;
+}
+
+/**
+ * Process the received mbox message.
+ */
+int
+lio_mbox_process_message(struct lio_mbox *mbox)
+{
+ struct lio_mbox_cmd mbox_cmd;
+
+ if (mbox->state & LIO_MBOX_STATE_ERROR) {
+ if (mbox->state & (LIO_MBOX_STATE_RES_PENDING |
+ LIO_MBOX_STATE_RES_RECEIVING)) {
+ rte_memcpy(&mbox_cmd, &mbox->mbox_resp,
+ sizeof(struct lio_mbox_cmd));
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+ mbox_cmd.recv_status = 1;
+ if (mbox_cmd.fn)
+ mbox_cmd.fn(mbox->lio_dev, &mbox_cmd,
+ mbox_cmd.fn_arg);
+
+ return 0;
+ }
+
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+
+ return 0;
+ }
+
+ if (mbox->state & LIO_MBOX_STATE_RES_RECEIVED) {
+ rte_memcpy(&mbox_cmd, &mbox->mbox_resp,
+ sizeof(struct lio_mbox_cmd));
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+ mbox_cmd.recv_status = 0;
+ if (mbox_cmd.fn)
+ mbox_cmd.fn(mbox->lio_dev, &mbox_cmd, mbox_cmd.fn_arg);
+
+ return 0;
+ }
+
+ if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVED) {
+ rte_memcpy(&mbox_cmd, &mbox->mbox_req,
+ sizeof(struct lio_mbox_cmd));
+ if (!mbox_cmd.msg.s.resp_needed) {
+ mbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVED;
+ if (!(mbox->state & LIO_MBOX_STATE_RES_PENDING))
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+ }
+
+ lio_mbox_process_cmd(mbox, &mbox_cmd);
+
+ return 0;
+ }
+
+ RTE_ASSERT(0);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.h b/src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.h
new file mode 100644
index 00000000..457917e9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _LIO_MBOX_H_
+#define _LIO_MBOX_H_
+
+#include <stdint.h>
+
+#include <rte_spinlock.h>
+
+/* Macros for Mail Box Communication */
+
+#define LIO_MBOX_DATA_MAX 32
+
+#define LIO_VF_ACTIVE 0x1
+#define LIO_VF_FLR_REQUEST 0x2
+#define LIO_CORES_CRASHED 0x3
+
+/* Macro for Read acknowledgment */
+#define LIO_PFVFACK 0xffffffffffffffff
+#define LIO_PFVFSIG 0x1122334455667788
+#define LIO_PFVFERR 0xDEADDEADDEADDEAD
+
+enum lio_mbox_cmd_status {
+ LIO_MBOX_STATUS_SUCCESS = 0,
+ LIO_MBOX_STATUS_FAILED = 1,
+ LIO_MBOX_STATUS_BUSY = 2
+};
+
+enum lio_mbox_message_type {
+ LIO_MBOX_REQUEST = 0,
+ LIO_MBOX_RESPONSE = 1
+};
+
+union lio_mbox_message {
+ uint64_t mbox_msg64;
+ struct {
+ uint16_t type : 1;
+ uint16_t resp_needed : 1;
+ uint16_t cmd : 6;
+ uint16_t len : 8;
+ uint8_t params[6];
+ } s;
+};
+
+typedef void (*lio_mbox_callback)(void *, void *, void *);
+
+struct lio_mbox_cmd {
+ union lio_mbox_message msg;
+ uint64_t data[LIO_MBOX_DATA_MAX];
+ uint32_t q_no;
+ uint32_t recv_len;
+ uint32_t recv_status;
+ lio_mbox_callback fn;
+ void *fn_arg;
+};
+
+enum lio_mbox_state {
+ LIO_MBOX_STATE_IDLE = 1,
+ LIO_MBOX_STATE_REQ_RECEIVING = 2,
+ LIO_MBOX_STATE_REQ_RECEIVED = 4,
+ LIO_MBOX_STATE_RES_PENDING = 8,
+ LIO_MBOX_STATE_RES_RECEIVING = 16,
+ LIO_MBOX_STATE_RES_RECEIVED = 16,
+ LIO_MBOX_STATE_ERROR = 32
+};
+
+struct lio_mbox {
+ /* A spinlock to protect access to this q_mbox. */
+ rte_spinlock_t lock;
+
+ struct lio_device *lio_dev;
+
+ uint32_t q_no;
+
+ enum lio_mbox_state state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ void *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ void *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ void *mbox_read_reg;
+
+ struct lio_mbox_cmd mbox_req;
+
+ struct lio_mbox_cmd mbox_resp;
+
+};
+
+int lio_mbox_read(struct lio_mbox *mbox);
+int lio_mbox_write(struct lio_device *lio_dev,
+ struct lio_mbox_cmd *mbox_cmd);
+int lio_mbox_process_message(struct lio_mbox *mbox);
+#endif /* _LIO_MBOX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.c b/src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.c
new file mode 100644
index 00000000..93e89007
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.c
@@ -0,0 +1,2154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+
+#include "lio_logs.h"
+#include "lio_23xx_vf.h"
+#include "lio_ethdev.h"
+#include "lio_rxtx.h"
+
+int lio_logtype_init;
+int lio_logtype_driver;
+
+/* Default RSS key in use */
+static uint8_t lio_rss_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static const struct rte_eth_desc_lim lio_rx_desc_lim = {
+ .nb_max = CN23XX_MAX_OQ_DESCRIPTORS,
+ .nb_min = CN23XX_MIN_OQ_DESCRIPTORS,
+ .nb_align = 1,
+};
+
+static const struct rte_eth_desc_lim lio_tx_desc_lim = {
+ .nb_max = CN23XX_MAX_IQ_DESCRIPTORS,
+ .nb_min = CN23XX_MIN_IQ_DESCRIPTORS,
+ .nb_align = 1,
+};
+
+/* Wait for control command to reach nic. */
+static uint16_t
+lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
+ struct lio_dev_ctrl_cmd *ctrl_cmd)
+{
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+
+ while ((ctrl_cmd->cond == 0) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+ rte_delay_ms(1);
+ }
+
+ return !timeout;
+}
+
+/**
+ * \brief Send Rx control command
+ * @param eth_dev Pointer to the structure rte_eth_dev
+ * @param start_stop whether to start or stop
+ */
+static int
+lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
+ ctrl_pkt.ncmd.s.param1 = start_stop;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send RX Control message\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "RX Control command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* store statistics names and its offset in stats structure */
+struct rte_lio_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {
+ {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)},
+ {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)},
+ {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)},
+ {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)},
+ {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)},
+ {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)},
+ {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)},
+ {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)},
+ {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)},
+ {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)},
+ {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},
+ {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},
+ {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)},
+ {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_broadcast_pkts",
+ (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_multicast_pkts",
+ (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_total_collisions", (offsetof(struct octeon_tx_stats,
+ total_collisions)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +
+ sizeof(struct octeon_rx_stats)},
+};
+
+#define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings)
+
+/* Get hw stats of the port */
+static int
+lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ struct octeon_link_stats *hw_stats;
+ struct lio_link_stats_resp *resp;
+ struct lio_soft_command *sc;
+ uint32_t resp_size;
+ unsigned int i;
+ int retval;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ if (n < LIO_NB_XSTATS)
+ return LIO_NB_XSTATS;
+
+ resp_size = sizeof(struct lio_link_stats_resp);
+ sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
+ if (sc == NULL)
+ return -ENOMEM;
+
+ resp = (struct lio_link_stats_resp *)sc->virtrptr;
+ lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
+ LIO_OPCODE_PORT_STATS, 0, 0, 0);
+
+ /* Setting wait time in seconds */
+ sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
+
+ retval = lio_send_soft_command(lio_dev, sc);
+ if (retval == LIO_IQ_SEND_FAILED) {
+ lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n",
+ retval);
+ goto get_stats_fail;
+ }
+
+ while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
+ lio_process_ordered_list(lio_dev);
+ rte_delay_ms(1);
+ }
+
+ retval = resp->status;
+ if (retval) {
+ lio_dev_err(lio_dev, "failed to get port stats from firmware\n");
+ goto get_stats_fail;
+ }
+
+ lio_swap_8B_data((uint64_t *)(&resp->link_stats),
+ sizeof(struct octeon_link_stats) >> 3);
+
+ hw_stats = &resp->link_stats;
+
+ for (i = 0; i < LIO_NB_XSTATS; i++) {
+ xstats[i].id = i;
+ xstats[i].value =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_lio_stats_strings[i].offset);
+ }
+
+ lio_free_soft_command(sc);
+
+ return LIO_NB_XSTATS;
+
+get_stats_fail:
+ lio_free_soft_command(sc);
+
+ return -1;
+}
+
+static int
+lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit __rte_unused)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ unsigned int i;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ if (xstats_names == NULL)
+ return LIO_NB_XSTATS;
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ for (i = 0; i < LIO_NB_XSTATS; i++) {
+ snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
+ "%s", rte_lio_stats_strings[i].name);
+ }
+
+ return LIO_NB_XSTATS;
+}
+
+/* Reset hw stats for the port */
+static void
+lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send clear stats command\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Clear stats command timed out\n");
+ return;
+ }
+
+ /* clear stored per queue stats */
+ RTE_FUNC_PTR_OR_RET(*eth_dev->dev_ops->stats_reset);
+ (*eth_dev->dev_ops->stats_reset)(eth_dev);
+}
+
+/* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
+static int
+lio_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *stats)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_droq_stats *oq_stats;
+ struct lio_iq_stats *iq_stats;
+ struct lio_instr_queue *txq;
+ struct lio_droq *droq;
+ int i, iq_no, oq_no;
+ uint64_t bytes = 0;
+ uint64_t pkts = 0;
+ uint64_t drop = 0;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ iq_no = lio_dev->linfo.txpciq[i].s.q_no;
+ txq = lio_dev->instr_queue[iq_no];
+ if (txq != NULL) {
+ iq_stats = &txq->stats;
+ pkts += iq_stats->tx_done;
+ drop += iq_stats->tx_dropped;
+ bytes += iq_stats->tx_tot_bytes;
+ }
+ }
+
+ stats->opackets = pkts;
+ stats->obytes = bytes;
+ stats->oerrors = drop;
+
+ pkts = 0;
+ drop = 0;
+ bytes = 0;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
+ droq = lio_dev->droq[oq_no];
+ if (droq != NULL) {
+ oq_stats = &droq->stats;
+ pkts += oq_stats->rx_pkts_received;
+ drop += (oq_stats->rx_dropped +
+ oq_stats->dropped_toomany +
+ oq_stats->dropped_nomem);
+ bytes += oq_stats->rx_bytes_received;
+ }
+ }
+ stats->ibytes = bytes;
+ stats->ipackets = pkts;
+ stats->ierrors = drop;
+
+ return 0;
+}
+
+static void
+lio_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_droq_stats *oq_stats;
+ struct lio_iq_stats *iq_stats;
+ struct lio_instr_queue *txq;
+ struct lio_droq *droq;
+ int i, iq_no, oq_no;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ iq_no = lio_dev->linfo.txpciq[i].s.q_no;
+ txq = lio_dev->instr_queue[iq_no];
+ if (txq != NULL) {
+ iq_stats = &txq->stats;
+ memset(iq_stats, 0, sizeof(struct lio_iq_stats));
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
+ droq = lio_dev->droq[oq_no];
+ if (droq != NULL) {
+ oq_stats = &droq->stats;
+ memset(oq_stats, 0, sizeof(struct lio_droq_stats));
+ }
+ }
+}
+
+static void
+lio_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *devinfo)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ switch (pci_dev->id.subsystem_device_id) {
+ /* CN23xx 10G cards */
+ case PCI_SUBSYS_DEV_ID_CN2350_210:
+ case PCI_SUBSYS_DEV_ID_CN2360_210:
+ case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3:
+ case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
+ case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
+ case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
+ devinfo->speed_capa = ETH_LINK_SPEED_10G;
+ break;
+ /* CN23xx 25G cards */
+ case PCI_SUBSYS_DEV_ID_CN2350_225:
+ case PCI_SUBSYS_DEV_ID_CN2360_225:
+ devinfo->speed_capa = ETH_LINK_SPEED_25G;
+ break;
+ default:
+ devinfo->speed_capa = ETH_LINK_SPEED_10G;
+ lio_dev_err(lio_dev,
+ "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
+ }
+
+ devinfo->max_rx_queues = lio_dev->max_rx_queues;
+ devinfo->max_tx_queues = lio_dev->max_tx_queues;
+
+ devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;
+ devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;
+
+ devinfo->max_mac_addrs = 1;
+
+ devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_STRIP);
+ devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
+
+ devinfo->rx_desc_lim = lio_rx_desc_lim;
+ devinfo->tx_desc_lim = lio_tx_desc_lim;
+
+ devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
+ devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
+ devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_IPV6_EX |
+ ETH_RSS_IPV6_TCP_EX);
+}
+
+static int
+lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
+ uint32_t frame_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't set MTU\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ /* check if VF MTU is within allowed range.
+ * New value should not exceed PF MTU.
+ */
+ if ((mtu < ETHER_MIN_MTU) || (mtu > pf_mtu)) {
+ lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
+ ETHER_MIN_MTU, pf_mtu);
+ return -EINVAL;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU;
+ ctrl_pkt.ncmd.s.param1 = mtu;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send command to change MTU\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Command to change MTU timed out\n");
+ return -1;
+ }
+
+ if (frame_len > ETHER_MAX_LEN)
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
+ eth_dev->data->mtu = mtu;
+
+ return 0;
+}
+
+static int
+lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct lio_rss_set *rss_param;
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+ int i, j, index;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't update reta\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
+ lio_dev_err(lio_dev,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
+ reta_size, LIO_RSS_MAX_TABLE_SZ);
+ return -EINVAL;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
+ ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ rss_param->param.flags = 0xF;
+ rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
+ rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
+
+ for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
+ index = (i * RTE_RETA_GROUP_SIZE) + j;
+ rss_state->itable[index] = reta_conf[i].reta[j];
+ }
+ }
+ }
+
+ rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
+ memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
+
+ lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to set rss hash\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Set rss hash timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ int i, num;
+
+ if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
+ lio_dev_err(lio_dev,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
+ reta_size, LIO_RSS_MAX_TABLE_SZ);
+ return -EINVAL;
+ }
+
+ num = reta_size / RTE_RETA_GROUP_SIZE;
+
+ for (i = 0; i < num; i++) {
+ memcpy(reta_conf->reta,
+ &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
+ RTE_RETA_GROUP_SIZE);
+ reta_conf++;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ uint8_t *hash_key = NULL;
+ uint64_t rss_hf = 0;
+
+ if (rss_state->hash_disable) {
+ lio_dev_info(lio_dev, "RSS disabled in nic\n");
+ rss_conf->rss_hf = 0;
+ return 0;
+ }
+
+ /* Get key value */
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL)
+ memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
+
+ if (rss_state->ip)
+ rss_hf |= ETH_RSS_IPV4;
+ if (rss_state->tcp_hash)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (rss_state->ipv6)
+ rss_hf |= ETH_RSS_IPV6;
+ if (rss_state->ipv6_tcp_hash)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ if (rss_state->ipv6_ex)
+ rss_hf |= ETH_RSS_IPV6_EX;
+ if (rss_state->ipv6_tcp_ex_hash)
+ rss_hf |= ETH_RSS_IPV6_TCP_EX;
+
+ rss_conf->rss_hf = rss_hf;
+
+ return 0;
+}
+
+static int
+lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct lio_rss_set *rss_param;
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
+ ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ rss_param->param.flags = 0xF;
+
+ if (rss_conf->rss_key) {
+ rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
+ rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
+ rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
+ memcpy(rss_state->hash_key, rss_conf->rss_key,
+ rss_state->hash_key_size);
+ memcpy(rss_param->key, rss_state->hash_key,
+ rss_state->hash_key_size);
+ }
+
+ if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
+ /* Can't disable rss through hash flags,
+ * if it is enabled by default during init
+ */
+ if (!rss_state->hash_disable)
+ return -EINVAL;
+
+ /* This is for --disable-rss during testpmd launch */
+ rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
+ } else {
+ uint32_t hashinfo = 0;
+
+ /* Can't enable rss if disabled by default during init */
+ if (rss_state->hash_disable)
+ return -EINVAL;
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+ hashinfo |= LIO_RSS_HASH_IPV4;
+ rss_state->ip = 1;
+ } else {
+ rss_state->ip = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ hashinfo |= LIO_RSS_HASH_TCP_IPV4;
+ rss_state->tcp_hash = 1;
+ } else {
+ rss_state->tcp_hash = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV6) {
+ hashinfo |= LIO_RSS_HASH_IPV6;
+ rss_state->ipv6 = 1;
+ } else {
+ rss_state->ipv6 = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ hashinfo |= LIO_RSS_HASH_TCP_IPV6;
+ rss_state->ipv6_tcp_hash = 1;
+ } else {
+ rss_state->ipv6_tcp_hash = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
+ hashinfo |= LIO_RSS_HASH_IPV6_EX;
+ rss_state->ipv6_ex = 1;
+ } else {
+ rss_state->ipv6_ex = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
+ hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
+ rss_state->ipv6_tcp_ex_hash = 1;
+ } else {
+ rss_state->ipv6_tcp_ex_hash = 0;
+ }
+
+ rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
+ rss_param->param.hashinfo = hashinfo;
+ }
+
+ lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to set rss hash\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Set rss hash timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Add vxlan dest udp port for an interface.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param udp_tnl
+ * udp tunnel conf
+ *
+ * @return
+ * On success return 0
+ * On failure return -1
+ */
+static int
+lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tnl)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (udp_tnl == NULL)
+ return -EINVAL;
+
+ if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+ lio_dev_err(lio_dev, "Unsupported tunnel type\n");
+ return -1;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
+ ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
+ ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Remove vxlan dest udp port for an interface.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param udp_tnl
+ * udp tunnel conf
+ *
+ * @return
+ * On success return 0
+ * On failure return -1
+ */
+static int
+lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tnl)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (udp_tnl == NULL)
+ return -EINVAL;
+
+ if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+ lio_dev_err(lio_dev, "Unsupported tunnel type\n");
+ return -1;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
+ ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
+ ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (lio_dev->linfo.vlan_is_admin_assigned)
+ return -EPERM;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = on ?
+ LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
+ ctrl_pkt.ncmd.s.param1 = vlan_id;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
+ on ? "add" : "remove");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
+ on ? "add" : "remove");
+ return -1;
+ }
+
+ return 0;
+}
+
+static uint64_t
+lio_hweight64(uint64_t w)
+{
+ uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
+
+ res =
+ (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+ res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+ res = res + (res >> 8);
+ res = res + (res >> 16);
+
+ return (res + (res >> 32)) & 0x00000000000000FFul;
+}
+
+static int
+lio_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete __rte_unused)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct rte_eth_link link;
+
+ /* Initialize */
+ memset(&link, 0, sizeof(link));
+ link.link_status = ETH_LINK_DOWN;
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
+
+ /* Return what we found */
+ if (lio_dev->linfo.link.s.link_up == 0) {
+ /* Interface is down */
+ return rte_eth_linkstatus_set(eth_dev, &link);
+ }
+
+ link.link_status = ETH_LINK_UP; /* Interface is up */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ switch (lio_dev->linfo.link.s.speed) {
+ case LIO_LINK_SPEED_10000:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case LIO_LINK_SPEED_25000:
+ link.link_speed = ETH_SPEED_NUM_25G;
+ break;
+ default:
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ }
+
+ return rte_eth_linkstatus_set(eth_dev, &link);
+}
+
+/**
+ * \brief Net device enable, disable allmulticast
+ * @param eth_dev Pointer to the structure rte_eth_dev
+ */
+static void
+lio_change_dev_flag(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ /* Create a ctrl pkt command to be sent to core app. */
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
+ ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send change flag message\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "Change dev flag command timed out\n");
+}
+
+static void
+lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
+ lio_dev_err(lio_dev, "Require firmware version >= %s\n",
+ LIO_VF_TRUST_MIN_VERSION);
+ return;
+ }
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags |= LIO_IFFLAG_PROMISC;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
+ lio_dev_err(lio_dev, "Require firmware version >= %s\n",
+ LIO_VF_TRUST_MIN_VERSION);
+ return;
+ }
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct rte_eth_rss_reta_entry64 reta_conf[8];
+ struct rte_eth_rss_conf rss_conf;
+ uint16_t i;
+
+ /* Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
+ rss_state->hash_disable = 1;
+ lio_dev_rss_hash_update(eth_dev, &rss_conf);
+ return;
+ }
+
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = lio_rss_key; /* Default hash key */
+
+ lio_dev_rss_hash_update(eth_dev, &rss_conf);
+
+ memset(reta_conf, 0, sizeof(reta_conf));
+ for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
+ uint8_t q_idx, conf_idx, reta_idx;
+
+ q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
+ i % eth_dev->data->nb_rx_queues : 0);
+ conf_idx = i / RTE_RETA_GROUP_SIZE;
+ reta_idx = i % RTE_RETA_GROUP_SIZE;
+ reta_conf[conf_idx].reta[reta_idx] = q_idx;
+ reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
+ }
+
+ lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);
+}
+
+static void
+lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct rte_eth_rss_conf rss_conf;
+
+ switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ lio_dev_rss_configure(eth_dev);
+ break;
+ case ETH_MQ_RX_NONE:
+ /* if mq_mode is none, disable rss mode. */
+ default:
+ memset(&rss_conf, 0, sizeof(rss_conf));
+ rss_state->hash_disable = 1;
+ lio_dev_rss_hash_update(eth_dev, &rss_conf);
+ }
+}
+
+/**
+ * Setup our receive queue/ringbuffer. This is the
+ * queue the Octeon uses to send us packets and
+ * responses. We are given a memory pool for our
+ * packet buffers that are used to populate the receive
+ * queue.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param q_no
+ * Queue number
+ * @param num_rx_descs
+ * Number of entries in the queue
+ * @param socket_id
+ * Where to allocate memory
+ * @param rx_conf
+ * Pointer to the struction rte_eth_rxconf
+ * @param mp
+ * Pointer to the packet pool
+ *
+ * @return
+ * - On success, return 0
+ * - On failure, return -1
+ */
+static int
+lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
+ uint16_t num_rx_descs, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint32_t fw_mapped_oq;
+ uint16_t buf_size;
+
+ if (q_no >= lio_dev->nb_rx_queues) {
+ lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
+ return -EINVAL;
+ }
+
+ lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
+
+ fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
+
+ /* Free previous allocation if any */
+ if (eth_dev->data->rx_queues[q_no] != NULL) {
+ lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);
+ eth_dev->data->rx_queues[q_no] = NULL;
+ }
+
+ mbp_priv = rte_mempool_get_priv(mp);
+ buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
+ socket_id)) {
+ lio_dev_err(lio_dev, "droq allocation failed\n");
+ return -1;
+ }
+
+ eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
+
+ return 0;
+}
+
+/**
+ * Release the receive queue/ringbuffer. Called by
+ * the upper layers.
+ *
+ * @param rxq
+ * Opaque pointer to the receive queue to release
+ *
+ * @return
+ * - nothing
+ */
+void
+lio_dev_rx_queue_release(void *rxq)
+{
+ struct lio_droq *droq = rxq;
+ int oq_no;
+
+ if (droq) {
+ oq_no = droq->q_no;
+ lio_delete_droq_queue(droq->lio_dev, oq_no);
+ }
+}
+
+/**
+ * Allocate and initialize SW ring. Initialize associated HW registers.
+ *
+ * @param eth_dev
+ * Pointer to structure rte_eth_dev
+ *
+ * @param q_no
+ * Queue number
+ *
+ * @param num_tx_descs
+ * Number of ringbuffer descriptors
+ *
+ * @param socket_id
+ * NUMA socket id, used for memory allocations
+ *
+ * @param tx_conf
+ * Pointer to the structure rte_eth_txconf
+ *
+ * @return
+ * - On success, return 0
+ * - On failure, return -errno value
+ */
+static int
+lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
+ uint16_t num_tx_descs, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
+ int retval;
+
+ if (q_no >= lio_dev->nb_tx_queues) {
+ lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
+ return -EINVAL;
+ }
+
+ lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
+
+ /* Free previous allocation if any */
+ if (eth_dev->data->tx_queues[q_no] != NULL) {
+ lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);
+ eth_dev->data->tx_queues[q_no] = NULL;
+ }
+
+ retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
+ num_tx_descs, lio_dev, socket_id);
+
+ if (retval) {
+ lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
+ return retval;
+ }
+
+ retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
+ lio_dev->instr_queue[fw_mapped_iq]->nb_desc,
+ socket_id);
+
+ if (retval) {
+ lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
+ return retval;
+ }
+
+ eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
+
+ return 0;
+}
+
+/**
+ * Release the transmit queue/ringbuffer. Called by
+ * the upper layers.
+ *
+ * @param txq
+ * Opaque pointer to the transmit queue to release
+ *
+ * @return
+ * - nothing
+ */
+void
+lio_dev_tx_queue_release(void *txq)
+{
+ struct lio_instr_queue *tq = txq;
+ uint32_t fw_mapped_iq_no;
+
+
+ if (tq) {
+ /* Free sg_list */
+ lio_delete_sglist(tq);
+
+ fw_mapped_iq_no = tq->txpciq.s.q_no;
+ lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
+ }
+}
+
+/**
+ * Api to check link state.
+ */
+static void
+lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ struct lio_link_status_resp *resp;
+ union octeon_link_status *ls;
+ struct lio_soft_command *sc;
+ uint32_t resp_size;
+
+ if (!lio_dev->intf_open)
+ return;
+
+ resp_size = sizeof(struct lio_link_status_resp);
+ sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
+ if (sc == NULL)
+ return;
+
+ resp = (struct lio_link_status_resp *)sc->virtrptr;
+ lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
+ LIO_OPCODE_INFO, 0, 0, 0);
+
+ /* Setting wait time in seconds */
+ sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
+
+ if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
+ goto get_status_fail;
+
+ while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
+ rte_delay_ms(1);
+ }
+
+ if (resp->status)
+ goto get_status_fail;
+
+ ls = &resp->link_info.link;
+
+ lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
+
+ if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
+ if (ls->s.mtu < eth_dev->data->mtu) {
+ lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n",
+ ls->s.mtu);
+ eth_dev->data->mtu = ls->s.mtu;
+ }
+ lio_dev->linfo.link.link_status64 = ls->link_status64;
+ lio_dev_link_update(eth_dev, 0);
+ }
+
+ lio_free_soft_command(sc);
+
+ return;
+
+get_status_fail:
+ lio_free_soft_command(sc);
+}
+
+/* This function will be invoked every LSC_TIMEOUT ns (100ms)
+ * and will update link state if it changes.
+ */
+static void
+lio_sync_link_state_check(void *eth_dev)
+{
+ struct lio_device *lio_dev =
+ (((struct rte_eth_dev *)eth_dev)->data->dev_private);
+
+ if (lio_dev->port_configured)
+ lio_dev_get_link_status(eth_dev);
+
+ /* Schedule periodic link status check.
+ * Stop check if interface is close and start again while opening.
+ */
+ if (lio_dev->intf_open)
+ rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
+ eth_dev);
+}
+
+static int
+lio_dev_start(struct rte_eth_dev *eth_dev)
+{
+ uint16_t mtu;
+ uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ int ret = 0;
+
+ lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
+
+ if (lio_dev->fn_list.enable_io_queues(lio_dev))
+ return -1;
+
+ if (lio_send_rx_ctrl_cmd(eth_dev, 1))
+ return -1;
+
+ /* Ready for link status updates */
+ lio_dev->intf_open = 1;
+ rte_mb();
+
+ /* Configure RSS if device configured with multiple RX queues. */
+ lio_dev_mq_rx_configure(eth_dev);
+
+ /* Before update the link info,
+ * must set linfo.link.link_status64 to 0.
+ */
+ lio_dev->linfo.link.link_status64 = 0;
+
+ /* start polling for lsc */
+ ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
+ lio_sync_link_state_check,
+ eth_dev);
+ if (ret) {
+ lio_dev_err(lio_dev,
+ "link state check handler creation failed\n");
+ goto dev_lsc_handle_error;
+ }
+
+ while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))
+ rte_delay_ms(1);
+
+ if (lio_dev->linfo.link.link_status64 == 0) {
+ ret = -1;
+ goto dev_mtu_set_error;
+ }
+
+ mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN);
+ if (mtu < ETHER_MIN_MTU)
+ mtu = ETHER_MIN_MTU;
+
+ if (eth_dev->data->mtu != mtu) {
+ ret = lio_dev_mtu_set(eth_dev, mtu);
+ if (ret)
+ goto dev_mtu_set_error;
+ }
+
+ return 0;
+
+dev_mtu_set_error:
+ rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
+
+dev_lsc_handle_error:
+ lio_dev->intf_open = 0;
+ lio_send_rx_ctrl_cmd(eth_dev, 0);
+
+ return ret;
+}
+
+/* Stop device and disable input/output functions */
+static void
+lio_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id);
+ lio_dev->intf_open = 0;
+ rte_mb();
+
+ /* Cancel callback if still running. */
+ rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
+
+ lio_send_rx_ctrl_cmd(eth_dev, 0);
+
+ lio_wait_for_instr_fetch(lio_dev);
+
+ /* Clear recorded link status */
+ lio_dev->linfo.link.link_status64 = 0;
+}
+
+static int
+lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
+ return 0;
+ }
+
+ if (lio_dev->linfo.link.s.link_up) {
+ lio_dev_info(lio_dev, "Link is already UP\n");
+ return 0;
+ }
+
+ if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
+ lio_dev_err(lio_dev, "Unable to set Link UP\n");
+ return -1;
+ }
+
+ lio_dev->linfo.link.s.link_up = 1;
+ eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+
+ return 0;
+}
+
+static int
+lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
+ return 0;
+ }
+
+ if (!lio_dev->linfo.link.s.link_up) {
+ lio_dev_info(lio_dev, "Link is already DOWN\n");
+ return 0;
+ }
+
+ lio_dev->linfo.link.s.link_up = 0;
+ eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+ if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
+ lio_dev->linfo.link.s.link_up = 1;
+ eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ lio_dev_err(lio_dev, "Unable to set Link Down\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Reset and stop the device. This occurs on the first
+ * call to this routine. Subsequent calls will simply
+ * return. NB: This will require the NIC to be rebooted.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ *
+ * @return
+ * - nothing
+ */
+static void
+lio_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
+
+ if (lio_dev->intf_open)
+ lio_dev_stop(eth_dev);
+
+ /* Reset ioq regs */
+ lio_dev->fn_list.setup_device_regs(lio_dev);
+
+ if (lio_dev->pci_dev->kdrv == RTE_KDRV_IGB_UIO) {
+ cn23xx_vf_ask_pf_to_do_flr(lio_dev);
+ rte_delay_ms(LIO_PCI_FLR_WAIT);
+ }
+
+ /* lio_free_mbox */
+ lio_dev->fn_list.free_mbox(lio_dev);
+
+ /* Free glist resources */
+ rte_free(lio_dev->glist_head);
+ rte_free(lio_dev->glist_lock);
+ lio_dev->glist_head = NULL;
+ lio_dev->glist_lock = NULL;
+
+ lio_dev->port_configured = 0;
+
+ /* Delete all queues */
+ lio_dev_clear_queues(eth_dev);
+}
+
+/**
+ * Enable tunnel rx checksum verification from firmware.
+ */
+static void
+lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;
+ ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n");
+}
+
+/**
+ * Enable checksum calculation for inner packet in a tunnel.
+ */
+static void
+lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;
+ ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
+}
+
+static int
+lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq,
+ int num_rxq)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) {
+ lio_dev_err(lio_dev, "Require firmware version >= %s\n",
+ LIO_Q_RECONF_MIN_VERSION);
+ return -ENOTSUP;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL;
+ ctrl_pkt.ncmd.s.param1 = num_txq;
+ ctrl_pkt.ncmd.s.param2 = num_rxq;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send queue count control command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Queue count control command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (lio_dev->nb_rx_queues != num_rxq ||
+ lio_dev->nb_tx_queues != num_txq) {
+ if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq))
+ return -1;
+ lio_dev->nb_rx_queues = num_rxq;
+ lio_dev->nb_tx_queues = num_txq;
+ }
+
+ if (lio_dev->intf_open)
+ lio_dev_stop(eth_dev);
+
+ /* Reset ioq registers */
+ if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
+ lio_dev_err(lio_dev, "Failed to configure device registers\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ int retval, num_iqueues, num_oqueues;
+ uint8_t mac[ETHER_ADDR_LEN], i;
+ struct lio_if_cfg_resp *resp;
+ struct lio_soft_command *sc;
+ union lio_if_cfg if_cfg;
+ uint32_t resp_size;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Inform firmware about change in number of queues to use.
+ * Disable IO queues and reset registers for re-configuration.
+ */
+ if (lio_dev->port_configured)
+ return lio_reconf_queues(eth_dev,
+ eth_dev->data->nb_tx_queues,
+ eth_dev->data->nb_rx_queues);
+
+ lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
+ lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
+
+ /* Set max number of queues which can be re-configured. */
+ lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues;
+ lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues;
+
+ resp_size = sizeof(struct lio_if_cfg_resp);
+ sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
+ if (sc == NULL)
+ return -ENOMEM;
+
+ resp = (struct lio_if_cfg_resp *)sc->virtrptr;
+
+ /* Firmware doesn't have capability to reconfigure the queues,
+ * Claim all queues, and use as many required
+ */
+ if_cfg.if_cfg64 = 0;
+ if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
+ if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
+ if_cfg.s.base_queue = 0;
+
+ if_cfg.s.gmx_port_id = lio_dev->pf_num;
+
+ lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
+ LIO_OPCODE_IF_CFG, 0,
+ if_cfg.if_cfg64, 0);
+
+ /* Setting wait time in seconds */
+ sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
+
+ retval = lio_send_soft_command(lio_dev, sc);
+ if (retval == LIO_IQ_SEND_FAILED) {
+ lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
+ retval);
+ /* Soft instr is freed by driver in case of failure. */
+ goto nic_config_fail;
+ }
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
+ lio_process_ordered_list(lio_dev);
+ rte_delay_ms(1);
+ }
+
+ retval = resp->status;
+ if (retval) {
+ lio_dev_err(lio_dev, "iq/oq config failed\n");
+ goto nic_config_fail;
+ }
+
+ snprintf(lio_dev->firmware_version, LIO_FW_VERSION_LENGTH, "%s",
+ resp->cfg_info.lio_firmware_version);
+
+ lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
+ sizeof(struct octeon_if_cfg_info) >> 3);
+
+ num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
+ num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
+
+ if (!(num_iqueues) || !(num_oqueues)) {
+ lio_dev_err(lio_dev,
+ "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
+ (unsigned long)resp->cfg_info.iqmask,
+ (unsigned long)resp->cfg_info.oqmask);
+ goto nic_config_fail;
+ }
+
+ lio_dev_dbg(lio_dev,
+ "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
+ eth_dev->data->port_id,
+ (unsigned long)resp->cfg_info.iqmask,
+ (unsigned long)resp->cfg_info.oqmask,
+ num_iqueues, num_oqueues);
+
+ lio_dev->linfo.num_rxpciq = num_oqueues;
+ lio_dev->linfo.num_txpciq = num_iqueues;
+
+ for (i = 0; i < num_oqueues; i++) {
+ lio_dev->linfo.rxpciq[i].rxpciq64 =
+ resp->cfg_info.linfo.rxpciq[i].rxpciq64;
+ lio_dev_dbg(lio_dev, "index %d OQ %d\n",
+ i, lio_dev->linfo.rxpciq[i].s.q_no);
+ }
+
+ for (i = 0; i < num_iqueues; i++) {
+ lio_dev->linfo.txpciq[i].txpciq64 =
+ resp->cfg_info.linfo.txpciq[i].txpciq64;
+ lio_dev_dbg(lio_dev, "index %d IQ %d\n",
+ i, lio_dev->linfo.txpciq[i].s.q_no);
+ }
+
+ lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+ lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+ lio_dev->linfo.link.link_status64 =
+ resp->cfg_info.linfo.link.link_status64;
+
+ /* 64-bit swap required on LE machines */
+ lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
+ 2 + i));
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *)mac, &eth_dev->data->mac_addrs[0]);
+
+ /* enable firmware checksum support for tunnel packets */
+ lio_enable_hw_tunnel_rx_checksum(eth_dev);
+ lio_enable_hw_tunnel_tx_checksum(eth_dev);
+
+ lio_dev->glist_lock =
+ rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
+ if (lio_dev->glist_lock == NULL)
+ return -ENOMEM;
+
+ lio_dev->glist_head =
+ rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
+ 0);
+ if (lio_dev->glist_head == NULL) {
+ rte_free(lio_dev->glist_lock);
+ lio_dev->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
+ lio_dev_link_update(eth_dev, 0);
+
+ lio_dev->port_configured = 1;
+
+ lio_free_soft_command(sc);
+
+ /* Reset ioq regs */
+ lio_dev->fn_list.setup_device_regs(lio_dev);
+
+ /* Free iq_0 used during init */
+ lio_free_instr_queue0(lio_dev);
+
+ return 0;
+
+nic_config_fail:
+ lio_dev_err(lio_dev, "Failed retval %d\n", retval);
+ lio_free_soft_command(sc);
+ lio_free_instr_queue0(lio_dev);
+
+ return -ENODEV;
+}
+
+/* Define our ethernet definitions */
+static const struct eth_dev_ops liovf_eth_dev_ops = {
+ .dev_configure = lio_dev_configure,
+ .dev_start = lio_dev_start,
+ .dev_stop = lio_dev_stop,
+ .dev_set_link_up = lio_dev_set_link_up,
+ .dev_set_link_down = lio_dev_set_link_down,
+ .dev_close = lio_dev_close,
+ .promiscuous_enable = lio_dev_promiscuous_enable,
+ .promiscuous_disable = lio_dev_promiscuous_disable,
+ .allmulticast_enable = lio_dev_allmulticast_enable,
+ .allmulticast_disable = lio_dev_allmulticast_disable,
+ .link_update = lio_dev_link_update,
+ .stats_get = lio_dev_stats_get,
+ .xstats_get = lio_dev_xstats_get,
+ .xstats_get_names = lio_dev_xstats_get_names,
+ .stats_reset = lio_dev_stats_reset,
+ .xstats_reset = lio_dev_xstats_reset,
+ .dev_infos_get = lio_dev_info_get,
+ .vlan_filter_set = lio_dev_vlan_filter_set,
+ .rx_queue_setup = lio_dev_rx_queue_setup,
+ .rx_queue_release = lio_dev_rx_queue_release,
+ .tx_queue_setup = lio_dev_tx_queue_setup,
+ .tx_queue_release = lio_dev_tx_queue_release,
+ .reta_update = lio_dev_rss_reta_update,
+ .reta_query = lio_dev_rss_reta_query,
+ .rss_hash_conf_get = lio_dev_rss_hash_conf_get,
+ .rss_hash_update = lio_dev_rss_hash_update,
+ .udp_tunnel_port_add = lio_dev_udp_tunnel_add,
+ .udp_tunnel_port_del = lio_dev_udp_tunnel_del,
+ .mtu_set = lio_dev_mtu_set,
+};
+
+static void
+lio_check_pf_hs_response(void *lio_dev)
+{
+ struct lio_device *dev = lio_dev;
+
+ /* check till response arrives */
+ if (dev->pfvf_hsword.coproc_tics_per_us)
+ return;
+
+ cn23xx_vf_handle_mbox(dev);
+
+ rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
+}
+
+/**
+ * \brief Identify the LIO device and to map the BAR address space
+ * @param lio_dev lio device
+ */
+static int
+lio_chip_specific_setup(struct lio_device *lio_dev)
+{
+ struct rte_pci_device *pdev = lio_dev->pci_dev;
+ uint32_t dev_id = pdev->id.device_id;
+ const char *s;
+ int ret = 1;
+
+ switch (dev_id) {
+ case LIO_CN23XX_VF_VID:
+ lio_dev->chip_id = LIO_CN23XX_VF_VID;
+ ret = cn23xx_vf_setup_device(lio_dev);
+ s = "CN23XX VF";
+ break;
+ default:
+ s = "?";
+ lio_dev_err(lio_dev, "Unsupported Chip\n");
+ }
+
+ if (!ret)
+ lio_dev_info(lio_dev, "DEVICE : %s\n", s);
+
+ return ret;
+}
+
+static int
+lio_first_time_init(struct lio_device *lio_dev,
+ struct rte_pci_device *pdev)
+{
+ int dpdk_queues;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* set dpdk specific pci device pointer */
+ lio_dev->pci_dev = pdev;
+
+ /* Identify the LIO type and set device ops */
+ if (lio_chip_specific_setup(lio_dev)) {
+ lio_dev_err(lio_dev, "Chip specific setup failed\n");
+ return -1;
+ }
+
+ /* Initialize soft command buffer pool */
+ if (lio_setup_sc_buffer_pool(lio_dev)) {
+ lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
+ return -1;
+ }
+
+ /* Initialize lists to manage the requests of different types that
+ * arrive from applications for this lio device.
+ */
+ lio_setup_response_list(lio_dev);
+
+ if (lio_dev->fn_list.setup_mbox(lio_dev)) {
+ lio_dev_err(lio_dev, "Mailbox setup failed\n");
+ goto error;
+ }
+
+ /* Check PF response */
+ lio_check_pf_hs_response((void *)lio_dev);
+
+ /* Do handshake and exit if incompatible PF driver */
+ if (cn23xx_pfvf_handshake(lio_dev))
+ goto error;
+
+ /* Request and wait for device reset. */
+ if (pdev->kdrv == RTE_KDRV_IGB_UIO) {
+ cn23xx_vf_ask_pf_to_do_flr(lio_dev);
+ /* FLR wait time doubled as a precaution. */
+ rte_delay_ms(LIO_PCI_FLR_WAIT * 2);
+ }
+
+ if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
+ lio_dev_err(lio_dev, "Failed to configure device registers\n");
+ goto error;
+ }
+
+ if (lio_setup_instr_queue0(lio_dev)) {
+ lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
+ goto error;
+ }
+
+ dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
+
+ lio_dev->max_tx_queues = dpdk_queues;
+ lio_dev->max_rx_queues = dpdk_queues;
+
+ /* Enable input and output queues for this device */
+ if (lio_dev->fn_list.enable_io_queues(lio_dev))
+ goto error;
+
+ return 0;
+
+error:
+ lio_free_sc_buffer_pool(lio_dev);
+ if (lio_dev->mbox[0])
+ lio_dev->fn_list.free_mbox(lio_dev);
+ if (lio_dev->instr_queue[0])
+ lio_free_instr_queue0(lio_dev);
+
+ return -1;
+}
+
+static int
+lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ /* lio_free_sc_buffer_pool */
+ lio_free_sc_buffer_pool(lio_dev);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ return 0;
+}
+
+static int
+lio_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
+ eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
+
+ /* Primary does the initialization. */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ rte_eth_copy_pci_info(eth_dev, pdev);
+
+ if (pdev->mem_resource[0].addr) {
+ lio_dev->hw_addr = pdev->mem_resource[0].addr;
+ } else {
+ PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
+ return -ENODEV;
+ }
+
+ lio_dev->eth_dev = eth_dev;
+ /* set lio device print string */
+ snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
+ "%s[%02x:%02x.%x]", pdev->driver->driver.name,
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+
+ lio_dev->port_id = eth_dev->data->port_id;
+
+ if (lio_first_time_init(lio_dev, pdev)) {
+ lio_dev_err(lio_dev, "Device init failed\n");
+ return -EINVAL;
+ }
+
+ eth_dev->dev_ops = &liovf_eth_dev_ops;
+ eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ lio_dev_err(lio_dev,
+ "MAC addresses memory allocation failed\n");
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ return -ENOMEM;
+ }
+
+ rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
+ rte_wmb();
+
+ lio_dev->port_configured = 0;
+ /* Always allow unicast packets */
+ lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
+
+ return 0;
+}
+
+static int
+lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device),
+ lio_eth_dev_init);
+}
+
+static int
+lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ lio_eth_dev_uninit);
+}
+
+/* Set of PCI devices this driver supports */
+static const struct rte_pci_id pci_id_liovf_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
+ { .vendor_id = 0, /* sentinel */ }
+};
+
+static struct rte_pci_driver rte_liovf_pmd = {
+ .id_table = pci_id_liovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = lio_eth_dev_pci_probe,
+ .remove = lio_eth_dev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");
+
+RTE_INIT(lio_init_log)
+{
+ lio_logtype_init = rte_log_register("pmd.net.liquidio.init");
+ if (lio_logtype_init >= 0)
+ rte_log_set_level(lio_logtype_init, RTE_LOG_NOTICE);
+ lio_logtype_driver = rte_log_register("pmd.net.liquidio.driver");
+ if (lio_logtype_driver >= 0)
+ rte_log_set_level(lio_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.h b/src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.h
new file mode 100644
index 00000000..74cd2fb6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _LIO_ETHDEV_H_
+#define _LIO_ETHDEV_H_
+
+#include <stdint.h>
+
+#include "lio_struct.h"
+
+/* timeout to check link state updates from firmware in us */
+#define LIO_LSC_TIMEOUT 100000 /* 100000us (100ms) */
+#define LIO_MAX_CMD_TIMEOUT 10000 /* 10000ms (10s) */
+
+#define LIO_DEV(_eth_dev) ((_eth_dev)->data->dev_private)
+
+/* LIO Response condition variable */
+struct lio_dev_ctrl_cmd {
+ struct rte_eth_dev *eth_dev;
+ uint64_t cond;
+};
+
+enum lio_bus_speed {
+ LIO_LINK_SPEED_UNKNOWN = 0,
+ LIO_LINK_SPEED_10000 = 10000,
+ LIO_LINK_SPEED_25000 = 25000
+};
+
+struct octeon_if_cfg_info {
+ uint64_t iqmask; /** mask for IQs enabled for the port */
+ uint64_t oqmask; /** mask for OQs enabled for the port */
+ struct octeon_link_info linfo; /** initial link information */
+ char lio_firmware_version[LIO_FW_VERSION_LENGTH];
+};
+
+/** Stats for each NIC port in RX direction. */
+struct octeon_rx_stats {
+ /* link-level stats */
+ uint64_t total_rcvd;
+ uint64_t bytes_rcvd;
+ uint64_t total_bcst;
+ uint64_t total_mcst;
+ uint64_t runts;
+ uint64_t ctl_rcvd;
+ uint64_t fifo_err; /* Accounts for over/under-run of buffers */
+ uint64_t dmac_drop;
+ uint64_t fcs_err;
+ uint64_t jabber_err;
+ uint64_t l2_err;
+ uint64_t frame_err;
+
+ /* firmware stats */
+ uint64_t fw_total_rcvd;
+ uint64_t fw_total_fwd;
+ uint64_t fw_total_fwd_bytes;
+ uint64_t fw_err_pko;
+ uint64_t fw_err_link;
+ uint64_t fw_err_drop;
+ uint64_t fw_rx_vxlan;
+ uint64_t fw_rx_vxlan_err;
+
+ /* LRO */
+ uint64_t fw_lro_pkts; /* Number of packets that are LROed */
+ uint64_t fw_lro_octs; /* Number of octets that are LROed */
+ uint64_t fw_total_lro; /* Number of LRO packets formed */
+ uint64_t fw_lro_aborts; /* Number of times lRO of packet aborted */
+ uint64_t fw_lro_aborts_port;
+ uint64_t fw_lro_aborts_seq;
+ uint64_t fw_lro_aborts_tsval;
+ uint64_t fw_lro_aborts_timer;
+ /* intrmod: packet forward rate */
+ uint64_t fwd_rate;
+};
+
+/** Stats for each NIC port in RX direction. */
+struct octeon_tx_stats {
+ /* link-level stats */
+ uint64_t total_pkts_sent;
+ uint64_t total_bytes_sent;
+ uint64_t mcast_pkts_sent;
+ uint64_t bcast_pkts_sent;
+ uint64_t ctl_sent;
+ uint64_t one_collision_sent; /* Packets sent after one collision */
+ /* Packets sent after multiple collision */
+ uint64_t multi_collision_sent;
+ /* Packets not sent due to max collisions */
+ uint64_t max_collision_fail;
+ /* Packets not sent due to max deferrals */
+ uint64_t max_deferral_fail;
+ /* Accounts for over/under-run of buffers */
+ uint64_t fifo_err;
+ uint64_t runts;
+ uint64_t total_collisions; /* Total number of collisions detected */
+
+ /* firmware stats */
+ uint64_t fw_total_sent;
+ uint64_t fw_total_fwd;
+ uint64_t fw_total_fwd_bytes;
+ uint64_t fw_err_pko;
+ uint64_t fw_err_link;
+ uint64_t fw_err_drop;
+ uint64_t fw_err_tso;
+ uint64_t fw_tso; /* number of tso requests */
+ uint64_t fw_tso_fwd; /* number of packets segmented in tso */
+ uint64_t fw_tx_vxlan;
+};
+
+struct octeon_link_stats {
+ struct octeon_rx_stats fromwire;
+ struct octeon_tx_stats fromhost;
+};
+
+union lio_if_cfg {
+ uint64_t if_cfg64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t base_queue : 16;
+ uint64_t num_iqueues : 16;
+ uint64_t num_oqueues : 16;
+ uint64_t gmx_port_id : 8;
+ uint64_t vf_id : 8;
+#else
+ uint64_t vf_id : 8;
+ uint64_t gmx_port_id : 8;
+ uint64_t num_oqueues : 16;
+ uint64_t num_iqueues : 16;
+ uint64_t base_queue : 16;
+#endif
+ } s;
+};
+
+struct lio_if_cfg_resp {
+ uint64_t rh;
+ struct octeon_if_cfg_info cfg_info;
+ uint64_t status;
+};
+
+struct lio_link_stats_resp {
+ uint64_t rh;
+ struct octeon_link_stats link_stats;
+ uint64_t status;
+};
+
+struct lio_link_status_resp {
+ uint64_t rh;
+ struct octeon_link_info link_info;
+ uint64_t status;
+};
+
+struct lio_rss_set {
+ struct param {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ uint64_t flags : 16;
+ uint64_t hashinfo : 32;
+ uint64_t itablesize : 16;
+ uint64_t hashkeysize : 16;
+ uint64_t reserved : 48;
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t itablesize : 16;
+ uint64_t hashinfo : 32;
+ uint64_t flags : 16;
+ uint64_t reserved : 48;
+ uint64_t hashkeysize : 16;
+#endif
+ } param;
+
+ uint8_t itable[LIO_RSS_MAX_TABLE_SZ];
+ uint8_t key[LIO_RSS_MAX_KEY_SZ];
+};
+
+void lio_dev_rx_queue_release(void *rxq);
+
+void lio_dev_tx_queue_release(void *txq);
+
+#endif /* _LIO_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/liquidio/lio_logs.h b/src/spdk/dpdk/drivers/net/liquidio/lio_logs.h
new file mode 100644
index 00000000..f2278270
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/lio_logs.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _LIO_LOGS_H_
+#define _LIO_LOGS_H_
+
+extern int lio_logtype_driver;
+#define lio_dev_printf(lio_dev, level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, lio_logtype_driver, \
+ "%s" fmt, (lio_dev)->dev_string, ##args)
+
+#define lio_dev_info(lio_dev, fmt, args...) \
+ lio_dev_printf(lio_dev, INFO, "INFO: " fmt, ##args)
+
+#define lio_dev_err(lio_dev, fmt, args...) \
+ lio_dev_printf(lio_dev, ERR, "ERROR: %s() " fmt, __func__, ##args)
+
+extern int lio_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, lio_logtype_init, \
+ fmt, ## args)
+
+/* Enable these through config options */
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, "%s() >>\n", __func__)
+
+#define lio_dev_dbg(lio_dev, fmt, args...) \
+ lio_dev_printf(lio_dev, DEBUG, "DEBUG: %s() " fmt, __func__, ##args)
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_RX
+#define PMD_RX_LOG(lio_dev, level, fmt, args...) \
+ lio_dev_printf(lio_dev, level, "RX: %s() " fmt, __func__, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_RX */
+#define PMD_RX_LOG(lio_dev, level, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_RX */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_TX
+#define PMD_TX_LOG(lio_dev, level, fmt, args...) \
+ lio_dev_printf(lio_dev, level, "TX: %s() " fmt, __func__, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_TX */
+#define PMD_TX_LOG(lio_dev, level, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_TX */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_MBOX
+#define PMD_MBOX_LOG(lio_dev, level, fmt, args...) \
+ lio_dev_printf(lio_dev, level, "MBOX: %s() " fmt, __func__, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_MBOX */
+#define PMD_MBOX_LOG(level, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_MBOX */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_REGS
+#define PMD_REGS_LOG(lio_dev, fmt, args...) \
+ lio_dev_printf(lio_dev, DEBUG, "REGS: " fmt, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_REGS */
+#define PMD_REGS_LOG(level, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_REGS */
+
+#endif /* _LIO_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.c b/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.c
new file mode 100644
index 00000000..8d705bfe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.c
@@ -0,0 +1,1806 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+
+#include "lio_logs.h"
+#include "lio_struct.h"
+#include "lio_ethdev.h"
+#include "lio_rxtx.h"
+
+#define LIO_MAX_SG 12
+/* Flush iq if available tx_desc fall below LIO_FLUSH_WM */
+#define LIO_FLUSH_WM(_iq) ((_iq)->nb_desc / 2)
+#define LIO_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL
+
+static void
+lio_droq_compute_max_packet_bufs(struct lio_droq *droq)
+{
+ uint32_t count = 0;
+
+ do {
+ count += droq->buffer_size;
+ } while (count < LIO_MAX_RX_PKTLEN);
+}
+
+static void
+lio_droq_reset_indices(struct lio_droq *droq)
+{
+ droq->read_idx = 0;
+ droq->write_idx = 0;
+ droq->refill_idx = 0;
+ droq->refill_count = 0;
+ rte_atomic64_set(&droq->pkts_pending, 0);
+}
+
+static void
+lio_droq_destroy_ring_buffers(struct lio_droq *droq)
+{
+ uint32_t i;
+
+ for (i = 0; i < droq->nb_desc; i++) {
+ if (droq->recv_buf_list[i].buffer) {
+ rte_pktmbuf_free((struct rte_mbuf *)
+ droq->recv_buf_list[i].buffer);
+ droq->recv_buf_list[i].buffer = NULL;
+ }
+ }
+
+ lio_droq_reset_indices(droq);
+}
+
+static int
+lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
+ struct lio_droq *droq)
+{
+ struct lio_droq_desc *desc_ring = droq->desc_ring;
+ uint32_t i;
+ void *buf;
+
+ for (i = 0; i < droq->nb_desc; i++) {
+ buf = rte_pktmbuf_alloc(droq->mpool);
+ if (buf == NULL) {
+ lio_dev_err(lio_dev, "buffer alloc failed\n");
+ droq->stats.rx_alloc_failure++;
+ lio_droq_destroy_ring_buffers(droq);
+ return -ENOMEM;
+ }
+
+ droq->recv_buf_list[i].buffer = buf;
+ droq->info_list[i].length = 0;
+
+ /* map ring buffers into memory */
+ desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
+ desc_ring[i].buffer_ptr =
+ lio_map_ring(droq->recv_buf_list[i].buffer);
+ }
+
+ lio_droq_reset_indices(droq);
+
+ lio_droq_compute_max_packet_bufs(droq);
+
+ return 0;
+}
+
+static void
+lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)
+{
+ const struct rte_memzone *mz_tmp;
+ int ret = 0;
+
+ if (mz == NULL) {
+ lio_dev_err(lio_dev, "Memzone NULL\n");
+ return;
+ }
+
+ mz_tmp = rte_memzone_lookup(mz->name);
+ if (mz_tmp == NULL) {
+ lio_dev_err(lio_dev, "Memzone %s Not Found\n", mz->name);
+ return;
+ }
+
+ ret = rte_memzone_free(mz);
+ if (ret)
+ lio_dev_err(lio_dev, "Memzone free Failed ret %d\n", ret);
+}
+
+/**
+ * Frees the space for descriptor ring for the droq.
+ *
+ * @param lio_dev - pointer to the lio device structure
+ * @param q_no - droq no.
+ */
+static void
+lio_delete_droq(struct lio_device *lio_dev, uint32_t q_no)
+{
+ struct lio_droq *droq = lio_dev->droq[q_no];
+
+ lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
+
+ lio_droq_destroy_ring_buffers(droq);
+ rte_free(droq->recv_buf_list);
+ droq->recv_buf_list = NULL;
+ lio_dma_zone_free(lio_dev, droq->info_mz);
+ lio_dma_zone_free(lio_dev, droq->desc_ring_mz);
+
+ memset(droq, 0, LIO_DROQ_SIZE);
+}
+
+static void *
+lio_alloc_info_buffer(struct lio_device *lio_dev,
+ struct lio_droq *droq, unsigned int socket_id)
+{
+ droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
+ "info_list", droq->q_no,
+ (droq->nb_desc *
+ LIO_DROQ_INFO_SIZE),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ if (droq->info_mz == NULL)
+ return NULL;
+
+ droq->info_list_dma = droq->info_mz->iova;
+ droq->info_alloc_size = droq->info_mz->len;
+ droq->info_base_addr = (size_t)droq->info_mz->addr;
+
+ return droq->info_mz->addr;
+}
+
+/**
+ * Allocates space for the descriptor ring for the droq and
+ * sets the base addr, num desc etc in Octeon registers.
+ *
+ * @param lio_dev - pointer to the lio device structure
+ * @param q_no - droq no.
+ * @param app_ctx - pointer to application context
+ * @return Success: 0 Failure: -1
+ */
+static int
+lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
+ uint32_t num_descs, uint32_t desc_size,
+ struct rte_mempool *mpool, unsigned int socket_id)
+{
+ uint32_t c_refill_threshold;
+ uint32_t desc_ring_size;
+ struct lio_droq *droq;
+
+ lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
+
+ droq = lio_dev->droq[q_no];
+ droq->lio_dev = lio_dev;
+ droq->q_no = q_no;
+ droq->mpool = mpool;
+
+ c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);
+
+ droq->nb_desc = num_descs;
+ droq->buffer_size = desc_size;
+
+ desc_ring_size = droq->nb_desc * LIO_DROQ_DESC_SIZE;
+ droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
+ "droq", q_no,
+ desc_ring_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ if (droq->desc_ring_mz == NULL) {
+ lio_dev_err(lio_dev,
+ "Output queue %d ring alloc failed\n", q_no);
+ return -1;
+ }
+
+ droq->desc_ring_dma = droq->desc_ring_mz->iova;
+ droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
+
+ lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
+ q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
+ lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no,
+ droq->nb_desc);
+
+ droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);
+ if (droq->info_list == NULL) {
+ lio_dev_err(lio_dev, "Cannot allocate memory for info list.\n");
+ goto init_droq_fail;
+ }
+
+ droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
+ (droq->nb_desc *
+ LIO_DROQ_RECVBUF_SIZE),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (droq->recv_buf_list == NULL) {
+ lio_dev_err(lio_dev,
+ "Output queue recv buf list alloc failed\n");
+ goto init_droq_fail;
+ }
+
+ if (lio_droq_setup_ring_buffers(lio_dev, droq))
+ goto init_droq_fail;
+
+ droq->refill_threshold = c_refill_threshold;
+
+ rte_spinlock_init(&droq->lock);
+
+ lio_dev->fn_list.setup_oq_regs(lio_dev, q_no);
+
+ lio_dev->io_qmask.oq |= (1ULL << q_no);
+
+ return 0;
+
+init_droq_fail:
+ lio_delete_droq(lio_dev, q_no);
+
+ return -1;
+}
+
+int
+lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
+ int desc_size, struct rte_mempool *mpool, unsigned int socket_id)
+{
+ struct lio_droq *droq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Allocate the DS for the new droq. */
+ droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (droq == NULL)
+ return -ENOMEM;
+
+ lio_dev->droq[oq_no] = droq;
+
+ /* Initialize the Droq */
+ if (lio_init_droq(lio_dev, oq_no, num_descs, desc_size, mpool,
+ socket_id)) {
+ lio_dev_err(lio_dev, "Droq[%u] Initialization Failed\n", oq_no);
+ rte_free(lio_dev->droq[oq_no]);
+ lio_dev->droq[oq_no] = NULL;
+ return -ENOMEM;
+ }
+
+ lio_dev->num_oqs++;
+
+ lio_dev_dbg(lio_dev, "Total number of OQ: %d\n", lio_dev->num_oqs);
+
+ /* Send credit for octeon output queues. credits are always
+ * sent after the output queue is enabled.
+ */
+ rte_write32(lio_dev->droq[oq_no]->nb_desc,
+ lio_dev->droq[oq_no]->pkts_credit_reg);
+ rte_wmb();
+
+ return 0;
+}
+
+static inline uint32_t
+lio_droq_get_bufcount(uint32_t buf_size, uint32_t total_len)
+{
+ uint32_t buf_cnt = 0;
+
+ while (total_len > (buf_size * buf_cnt))
+ buf_cnt++;
+
+ return buf_cnt;
+}
+
+/* If we were not able to refill all buffers, try to move around
+ * the buffers that were not dispatched.
+ */
+static inline uint32_t
+lio_droq_refill_pullup_descs(struct lio_droq *droq,
+ struct lio_droq_desc *desc_ring)
+{
+ uint32_t refill_index = droq->refill_idx;
+ uint32_t desc_refilled = 0;
+
+ while (refill_index != droq->read_idx) {
+ if (droq->recv_buf_list[refill_index].buffer) {
+ droq->recv_buf_list[droq->refill_idx].buffer =
+ droq->recv_buf_list[refill_index].buffer;
+ desc_ring[droq->refill_idx].buffer_ptr =
+ desc_ring[refill_index].buffer_ptr;
+ droq->recv_buf_list[refill_index].buffer = NULL;
+ desc_ring[refill_index].buffer_ptr = 0;
+ do {
+ droq->refill_idx = lio_incr_index(
+ droq->refill_idx, 1,
+ droq->nb_desc);
+ desc_refilled++;
+ droq->refill_count--;
+ } while (droq->recv_buf_list[droq->refill_idx].buffer);
+ }
+ refill_index = lio_incr_index(refill_index, 1,
+ droq->nb_desc);
+ } /* while */
+
+ return desc_refilled;
+}
+
+/* lio_droq_refill
+ *
+ * @param droq - droq in which descriptors require new buffers.
+ *
+ * Description:
+ * Called during normal DROQ processing in interrupt mode or by the poll
+ * thread to refill the descriptors from which buffers were dispatched
+ * to upper layers. Attempts to allocate new buffers. If that fails, moves
+ * up buffers (that were not dispatched) to form a contiguous ring.
+ *
+ * Returns:
+ * No of descriptors refilled.
+ *
+ * Locks:
+ * This routine is called with droq->lock held.
+ */
+static uint32_t
+lio_droq_refill(struct lio_droq *droq)
+{
+ struct lio_droq_desc *desc_ring;
+ uint32_t desc_refilled = 0;
+ void *buf = NULL;
+
+ desc_ring = droq->desc_ring;
+
+ while (droq->refill_count && (desc_refilled < droq->nb_desc)) {
+ /* If a valid buffer exists (happens if there is no dispatch),
+ * reuse the buffer, else allocate.
+ */
+ if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {
+ buf = rte_pktmbuf_alloc(droq->mpool);
+ /* If a buffer could not be allocated, no point in
+ * continuing
+ */
+ if (buf == NULL) {
+ droq->stats.rx_alloc_failure++;
+ break;
+ }
+
+ droq->recv_buf_list[droq->refill_idx].buffer = buf;
+ }
+
+ desc_ring[droq->refill_idx].buffer_ptr =
+ lio_map_ring(droq->recv_buf_list[droq->refill_idx].buffer);
+ /* Reset any previous values in the length field. */
+ droq->info_list[droq->refill_idx].length = 0;
+
+ droq->refill_idx = lio_incr_index(droq->refill_idx, 1,
+ droq->nb_desc);
+ desc_refilled++;
+ droq->refill_count--;
+ }
+
+ if (droq->refill_count)
+ desc_refilled += lio_droq_refill_pullup_descs(droq, desc_ring);
+
+ /* if droq->refill_count
+ * The refill count would not change in pass two. We only moved buffers
+ * to close the gap in the ring, but we would still have the same no. of
+ * buffers to refill.
+ */
+ return desc_refilled;
+}
+
+static int
+lio_droq_fast_process_packet(struct lio_device *lio_dev,
+ struct lio_droq *droq,
+ struct rte_mbuf **rx_pkts)
+{
+ struct rte_mbuf *nicbuf = NULL;
+ struct lio_droq_info *info;
+ uint32_t total_len = 0;
+ int data_total_len = 0;
+ uint32_t pkt_len = 0;
+ union octeon_rh *rh;
+ int data_pkts = 0;
+
+ info = &droq->info_list[droq->read_idx];
+ lio_swap_8B_data((uint64_t *)info, 2);
+
+ if (!info->length)
+ return -1;
+
+ /* Len of resp hdr in included in the received data len. */
+ info->length -= OCTEON_RH_SIZE;
+ rh = &info->rh;
+
+ total_len += (uint32_t)info->length;
+
+ if (lio_opcode_slow_path(rh)) {
+ uint32_t buf_cnt;
+
+ buf_cnt = lio_droq_get_bufcount(droq->buffer_size,
+ (uint32_t)info->length);
+ droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,
+ droq->nb_desc);
+ droq->refill_count += buf_cnt;
+ } else {
+ if (info->length <= droq->buffer_size) {
+ if (rh->r_dh.has_hash)
+ pkt_len = (uint32_t)(info->length - 8);
+ else
+ pkt_len = (uint32_t)info->length;
+
+ nicbuf = droq->recv_buf_list[droq->read_idx].buffer;
+ droq->recv_buf_list[droq->read_idx].buffer = NULL;
+ droq->read_idx = lio_incr_index(
+ droq->read_idx, 1,
+ droq->nb_desc);
+ droq->refill_count++;
+
+ if (likely(nicbuf != NULL)) {
+ /* We don't have a way to pass flags yet */
+ nicbuf->ol_flags = 0;
+ if (rh->r_dh.has_hash) {
+ uint64_t *hash_ptr;
+
+ nicbuf->ol_flags |= PKT_RX_RSS_HASH;
+ hash_ptr = rte_pktmbuf_mtod(nicbuf,
+ uint64_t *);
+ lio_swap_8B_data(hash_ptr, 1);
+ nicbuf->hash.rss = (uint32_t)*hash_ptr;
+ nicbuf->data_off += 8;
+ }
+
+ nicbuf->pkt_len = pkt_len;
+ nicbuf->data_len = pkt_len;
+ nicbuf->port = lio_dev->port_id;
+ /* Store the mbuf */
+ rx_pkts[data_pkts++] = nicbuf;
+ data_total_len += pkt_len;
+ }
+
+ /* Prefetch buffer pointers when on a cache line
+ * boundary
+ */
+ if ((droq->read_idx & 3) == 0) {
+ rte_prefetch0(
+ &droq->recv_buf_list[droq->read_idx]);
+ rte_prefetch0(
+ &droq->info_list[droq->read_idx]);
+ }
+ } else {
+ struct rte_mbuf *first_buf = NULL;
+ struct rte_mbuf *last_buf = NULL;
+
+ while (pkt_len < info->length) {
+ int cpy_len = 0;
+
+ cpy_len = ((pkt_len + droq->buffer_size) >
+ info->length)
+ ? ((uint32_t)info->length -
+ pkt_len)
+ : droq->buffer_size;
+
+ nicbuf =
+ droq->recv_buf_list[droq->read_idx].buffer;
+ droq->recv_buf_list[droq->read_idx].buffer =
+ NULL;
+
+ if (likely(nicbuf != NULL)) {
+ /* Note the first seg */
+ if (!pkt_len)
+ first_buf = nicbuf;
+
+ nicbuf->port = lio_dev->port_id;
+ /* We don't have a way to pass
+ * flags yet
+ */
+ nicbuf->ol_flags = 0;
+ if ((!pkt_len) && (rh->r_dh.has_hash)) {
+ uint64_t *hash_ptr;
+
+ nicbuf->ol_flags |=
+ PKT_RX_RSS_HASH;
+ hash_ptr = rte_pktmbuf_mtod(
+ nicbuf, uint64_t *);
+ lio_swap_8B_data(hash_ptr, 1);
+ nicbuf->hash.rss =
+ (uint32_t)*hash_ptr;
+ nicbuf->data_off += 8;
+ nicbuf->pkt_len = cpy_len - 8;
+ nicbuf->data_len = cpy_len - 8;
+ } else {
+ nicbuf->pkt_len = cpy_len;
+ nicbuf->data_len = cpy_len;
+ }
+
+ if (pkt_len)
+ first_buf->nb_segs++;
+
+ if (last_buf)
+ last_buf->next = nicbuf;
+
+ last_buf = nicbuf;
+ } else {
+ PMD_RX_LOG(lio_dev, ERR, "no buf\n");
+ }
+
+ pkt_len += cpy_len;
+ droq->read_idx = lio_incr_index(
+ droq->read_idx,
+ 1, droq->nb_desc);
+ droq->refill_count++;
+
+ /* Prefetch buffer pointers when on a
+ * cache line boundary
+ */
+ if ((droq->read_idx & 3) == 0) {
+ rte_prefetch0(&droq->recv_buf_list
+ [droq->read_idx]);
+
+ rte_prefetch0(
+ &droq->info_list[droq->read_idx]);
+ }
+ }
+ rx_pkts[data_pkts++] = first_buf;
+ if (rh->r_dh.has_hash)
+ data_total_len += (pkt_len - 8);
+ else
+ data_total_len += pkt_len;
+ }
+
+ /* Inform upper layer about packet checksum verification */
+ struct rte_mbuf *m = rx_pkts[data_pkts - 1];
+
+ if (rh->r_dh.csum_verified & LIO_IP_CSUM_VERIFIED)
+ m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (rh->r_dh.csum_verified & LIO_L4_CSUM_VERIFIED)
+ m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+
+ if (droq->refill_count >= droq->refill_threshold) {
+ int desc_refilled = lio_droq_refill(droq);
+
+ /* Flush the droq descriptor data to memory to be sure
+ * that when we update the credits the data in memory is
+ * accurate.
+ */
+ rte_wmb();
+ rte_write32(desc_refilled, droq->pkts_credit_reg);
+ /* make sure mmio write completes */
+ rte_wmb();
+ }
+
+ info->length = 0;
+ info->rh.rh64 = 0;
+
+ droq->stats.pkts_received++;
+ droq->stats.rx_pkts_received += data_pkts;
+ droq->stats.rx_bytes_received += data_total_len;
+ droq->stats.bytes_received += total_len;
+
+ return data_pkts;
+}
+
+static uint32_t
+lio_droq_fast_process_packets(struct lio_device *lio_dev,
+ struct lio_droq *droq,
+ struct rte_mbuf **rx_pkts,
+ uint32_t pkts_to_process)
+{
+ int ret, data_pkts = 0;
+ uint32_t pkt;
+
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ ret = lio_droq_fast_process_packet(lio_dev, droq,
+ &rx_pkts[data_pkts]);
+ if (ret < 0) {
+ lio_dev_err(lio_dev, "Port[%d] DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
+ lio_dev->port_id, droq->q_no,
+ droq->read_idx, pkts_to_process);
+ break;
+ }
+ data_pkts += ret;
+ }
+
+ rte_atomic64_sub(&droq->pkts_pending, pkt);
+
+ return data_pkts;
+}
+
+static inline uint32_t
+lio_droq_check_hw_for_pkts(struct lio_droq *droq)
+{
+ uint32_t last_count;
+ uint32_t pkt_count;
+
+ pkt_count = rte_read32(droq->pkts_sent_reg);
+
+ last_count = pkt_count - droq->pkt_count;
+ droq->pkt_count = pkt_count;
+
+ if (last_count)
+ rte_atomic64_add(&droq->pkts_pending, last_count);
+
+ return last_count;
+}
+
+uint16_t
+lio_dev_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t budget)
+{
+ struct lio_droq *droq = rx_queue;
+ struct lio_device *lio_dev = droq->lio_dev;
+ uint32_t pkts_processed = 0;
+ uint32_t pkt_count = 0;
+
+ lio_droq_check_hw_for_pkts(droq);
+
+ pkt_count = rte_atomic64_read(&droq->pkts_pending);
+ if (!pkt_count)
+ return 0;
+
+ if (pkt_count > budget)
+ pkt_count = budget;
+
+ /* Grab the lock */
+ rte_spinlock_lock(&droq->lock);
+ pkts_processed = lio_droq_fast_process_packets(lio_dev,
+ droq, rx_pkts,
+ pkt_count);
+
+ if (droq->pkt_count) {
+ rte_write32(droq->pkt_count, droq->pkts_sent_reg);
+ droq->pkt_count = 0;
+ }
+
+ /* Release the spin lock */
+ rte_spinlock_unlock(&droq->lock);
+
+ return pkts_processed;
+}
+
+void
+lio_delete_droq_queue(struct lio_device *lio_dev,
+ int oq_no)
+{
+ lio_delete_droq(lio_dev, oq_no);
+ lio_dev->num_oqs--;
+ rte_free(lio_dev->droq[oq_no]);
+ lio_dev->droq[oq_no] = NULL;
+}
+
+/**
+ * lio_init_instr_queue()
+ * @param lio_dev - pointer to the lio device structure.
+ * @param txpciq - queue to be initialized.
+ *
+ * Called at driver init time for each input queue. iq_conf has the
+ * configuration parameters for the queue.
+ *
+ * @return Success: 0 Failure: -1
+ */
+static int
+lio_init_instr_queue(struct lio_device *lio_dev,
+ union octeon_txpciq txpciq,
+ uint32_t num_descs, unsigned int socket_id)
+{
+ uint32_t iq_no = (uint32_t)txpciq.s.q_no;
+ struct lio_instr_queue *iq;
+ uint32_t instr_type;
+ uint32_t q_size;
+
+ instr_type = LIO_IQ_INSTR_TYPE(lio_dev);
+
+ q_size = instr_type * num_descs;
+ iq = lio_dev->instr_queue[iq_no];
+ iq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
+ "instr_queue", iq_no, q_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (iq->iq_mz == NULL) {
+ lio_dev_err(lio_dev, "Cannot allocate memory for instr queue %d\n",
+ iq_no);
+ return -1;
+ }
+
+ iq->base_addr_dma = iq->iq_mz->iova;
+ iq->base_addr = (uint8_t *)iq->iq_mz->addr;
+
+ iq->nb_desc = num_descs;
+
+ /* Initialize a list to holds requests that have been posted to Octeon
+ * but has yet to be fetched by octeon
+ */
+ iq->request_list = rte_zmalloc_socket("request_list",
+ sizeof(*iq->request_list) *
+ num_descs,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (iq->request_list == NULL) {
+ lio_dev_err(lio_dev, "Alloc failed for IQ[%d] nr free list\n",
+ iq_no);
+ lio_dma_zone_free(lio_dev, iq->iq_mz);
+ return -1;
+ }
+
+ lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
+ iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
+ iq->nb_desc);
+
+ iq->lio_dev = lio_dev;
+ iq->txpciq.txpciq64 = txpciq.txpciq64;
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->lio_read_index = 0;
+ iq->flush_index = 0;
+
+ rte_atomic64_set(&iq->instr_pending, 0);
+
+ /* Initialize the spinlock for this instruction queue */
+ rte_spinlock_init(&iq->lock);
+ rte_spinlock_init(&iq->post_lock);
+
+ rte_atomic64_clear(&iq->iq_flush_running);
+
+ lio_dev->io_qmask.iq |= (1ULL << iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+ lio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (instr_type == 64);
+
+ lio_dev->fn_list.setup_iq_regs(lio_dev, iq_no);
+
+ return 0;
+}
+
+int
+lio_setup_instr_queue0(struct lio_device *lio_dev)
+{
+ union octeon_txpciq txpciq;
+ uint32_t num_descs = 0;
+ uint32_t iq_no = 0;
+
+ num_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev);
+
+ lio_dev->num_iqs = 0;
+
+ lio_dev->instr_queue[0] = rte_zmalloc(NULL,
+ sizeof(struct lio_instr_queue), 0);
+ if (lio_dev->instr_queue[0] == NULL)
+ return -ENOMEM;
+
+ lio_dev->instr_queue[0]->q_index = 0;
+ lio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0;
+ txpciq.txpciq64 = 0;
+ txpciq.s.q_no = iq_no;
+ txpciq.s.pkind = lio_dev->pfvf_hsword.pkind;
+ txpciq.s.use_qpg = 0;
+ txpciq.s.qpg = 0;
+ if (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) {
+ rte_free(lio_dev->instr_queue[0]);
+ lio_dev->instr_queue[0] = NULL;
+ return -1;
+ }
+
+ lio_dev->num_iqs++;
+
+ return 0;
+}
+
+/**
+ * lio_delete_instr_queue()
+ * @param lio_dev - pointer to the lio device structure.
+ * @param iq_no - queue to be deleted.
+ *
+ * Called at driver unload time for each input queue. Deletes all
+ * allocated resources for the input queue.
+ */
+static void
+lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no)
+{
+ struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
+
+ rte_free(iq->request_list);
+ iq->request_list = NULL;
+ lio_dma_zone_free(lio_dev, iq->iq_mz);
+}
+
+void
+lio_free_instr_queue0(struct lio_device *lio_dev)
+{
+ lio_delete_instr_queue(lio_dev, 0);
+ rte_free(lio_dev->instr_queue[0]);
+ lio_dev->instr_queue[0] = NULL;
+ lio_dev->num_iqs--;
+}
+
+/* Return 0 on success, -1 on failure */
+int
+lio_setup_iq(struct lio_device *lio_dev, int q_index,
+ union octeon_txpciq txpciq, uint32_t num_descs, void *app_ctx,
+ unsigned int socket_id)
+{
+ uint32_t iq_no = (uint32_t)txpciq.s.q_no;
+
+ lio_dev->instr_queue[iq_no] = rte_zmalloc_socket("ethdev TX queue",
+ sizeof(struct lio_instr_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (lio_dev->instr_queue[iq_no] == NULL)
+ return -1;
+
+ lio_dev->instr_queue[iq_no]->q_index = q_index;
+ lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
+
+ if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id)) {
+ rte_free(lio_dev->instr_queue[iq_no]);
+ lio_dev->instr_queue[iq_no] = NULL;
+ return -1;
+ }
+
+ lio_dev->num_iqs++;
+
+ return 0;
+}
+
+int
+lio_wait_for_instr_fetch(struct lio_device *lio_dev)
+{
+ int pending, instr_cnt;
+ int i, retry = 1000;
+
+ do {
+ instr_cnt = 0;
+
+ for (i = 0; i < LIO_MAX_INSTR_QUEUES(lio_dev); i++) {
+ if (!(lio_dev->io_qmask.iq & (1ULL << i)))
+ continue;
+
+ if (lio_dev->instr_queue[i] == NULL)
+ break;
+
+ pending = rte_atomic64_read(
+ &lio_dev->instr_queue[i]->instr_pending);
+ if (pending)
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[i]);
+
+ instr_cnt += pending;
+ }
+
+ if (instr_cnt == 0)
+ break;
+
+ rte_delay_ms(1);
+
+ } while (retry-- && instr_cnt);
+
+ return instr_cnt;
+}
+
+static inline void
+lio_ring_doorbell(struct lio_device *lio_dev,
+ struct lio_instr_queue *iq)
+{
+ if (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) {
+ rte_write32(iq->fill_cnt, iq->doorbell_reg);
+ /* make sure doorbell write goes through */
+ rte_wmb();
+ iq->fill_cnt = 0;
+ }
+}
+
+static inline void
+copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
+{
+ uint8_t *iqptr, cmdsize;
+
+ cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
+ iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
+
+ rte_memcpy(iqptr, cmd, cmdsize);
+}
+
+static inline struct lio_iq_post_status
+post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
+{
+ struct lio_iq_post_status st;
+
+ st.status = LIO_IQ_SEND_OK;
+
+ /* This ensures that the read index does not wrap around to the same
+ * position if queue gets full before Octeon could fetch any instr.
+ */
+ if (rte_atomic64_read(&iq->instr_pending) >=
+ (int32_t)(iq->nb_desc - 1)) {
+ st.status = LIO_IQ_SEND_FAILED;
+ st.index = -1;
+ return st;
+ }
+
+ if (rte_atomic64_read(&iq->instr_pending) >=
+ (int32_t)(iq->nb_desc - 2))
+ st.status = LIO_IQ_SEND_STOP;
+
+ copy_cmd_into_iq(iq, cmd);
+
+ /* "index" is returned, host_write_index is modified. */
+ st.index = iq->host_write_index;
+ iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
+ iq->nb_desc);
+ iq->fill_cnt++;
+
+ /* Flush the command into memory. We need to be sure the data is in
+ * memory before indicating that the instruction is pending.
+ */
+ rte_wmb();
+
+ rte_atomic64_inc(&iq->instr_pending);
+
+ return st;
+}
+
+static inline void
+lio_add_to_request_list(struct lio_instr_queue *iq,
+ int idx, void *buf, int reqtype)
+{
+ iq->request_list[idx].buf = buf;
+ iq->request_list[idx].reqtype = reqtype;
+}
+
+static inline void
+lio_free_netsgbuf(void *buf)
+{
+ struct lio_buf_free_info *finfo = buf;
+ struct lio_device *lio_dev = finfo->lio_dev;
+ struct rte_mbuf *m = finfo->mbuf;
+ struct lio_gather *g = finfo->g;
+ uint8_t iq = finfo->iq_no;
+
+ /* This will take care of multiple segments also */
+ rte_pktmbuf_free(m);
+
+ rte_spinlock_lock(&lio_dev->glist_lock[iq]);
+ STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq], &g->list, entries);
+ rte_spinlock_unlock(&lio_dev->glist_lock[iq]);
+ rte_free(finfo);
+}
+
+/* Can only run in process context */
+static int
+lio_process_iq_request_list(struct lio_device *lio_dev,
+ struct lio_instr_queue *iq)
+{
+ struct octeon_instr_irh *irh = NULL;
+ uint32_t old = iq->flush_index;
+ struct lio_soft_command *sc;
+ uint32_t inst_count = 0;
+ int reqtype;
+ void *buf;
+
+ while (old != iq->lio_read_index) {
+ reqtype = iq->request_list[old].reqtype;
+ buf = iq->request_list[old].buf;
+
+ if (reqtype == LIO_REQTYPE_NONE)
+ goto skip_this;
+
+ switch (reqtype) {
+ case LIO_REQTYPE_NORESP_NET:
+ rte_pktmbuf_free((struct rte_mbuf *)buf);
+ break;
+ case LIO_REQTYPE_NORESP_NET_SG:
+ lio_free_netsgbuf(buf);
+ break;
+ case LIO_REQTYPE_SOFT_COMMAND:
+ sc = buf;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ if (irh->rflag) {
+ /* We're expecting a response from Octeon.
+ * It's up to lio_process_ordered_list() to
+ * process sc. Add sc to the ordered soft
+ * command response list because we expect
+ * a response from Octeon.
+ */
+ rte_spinlock_lock(&lio_dev->response_list.lock);
+ rte_atomic64_inc(
+ &lio_dev->response_list.pending_req_count);
+ STAILQ_INSERT_TAIL(
+ &lio_dev->response_list.head,
+ &sc->node, entries);
+ rte_spinlock_unlock(
+ &lio_dev->response_list.lock);
+ } else {
+ if (sc->callback) {
+ /* This callback must not sleep */
+ sc->callback(LIO_REQUEST_DONE,
+ sc->callback_arg);
+ }
+ }
+ break;
+ default:
+ lio_dev_err(lio_dev,
+ "Unknown reqtype: %d buf: %p at idx %d\n",
+ reqtype, buf, old);
+ }
+
+ iq->request_list[old].buf = NULL;
+ iq->request_list[old].reqtype = 0;
+
+skip_this:
+ inst_count++;
+ old = lio_incr_index(old, 1, iq->nb_desc);
+ }
+
+ iq->flush_index = old;
+
+ return inst_count;
+}
+
+static void
+lio_update_read_index(struct lio_instr_queue *iq)
+{
+ uint32_t pkt_in_done = rte_read32(iq->inst_cnt_reg);
+ uint32_t last_done;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ /* Add last_done and modulo with the IQ size to get new index */
+ iq->lio_read_index = (iq->lio_read_index +
+ (uint32_t)(last_done & LIO_PKT_IN_DONE_CNT_MASK)) %
+ iq->nb_desc;
+}
+
+int
+lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq)
+{
+ uint32_t tot_inst_processed = 0;
+ uint32_t inst_processed = 0;
+ int tx_done = 1;
+
+ if (rte_atomic64_test_and_set(&iq->iq_flush_running) == 0)
+ return tx_done;
+
+ rte_spinlock_lock(&iq->lock);
+
+ lio_update_read_index(iq);
+
+ do {
+ /* Process any outstanding IQ packets. */
+ if (iq->flush_index == iq->lio_read_index)
+ break;
+
+ inst_processed = lio_process_iq_request_list(lio_dev, iq);
+
+ if (inst_processed) {
+ rte_atomic64_sub(&iq->instr_pending, inst_processed);
+ iq->stats.instr_processed += inst_processed;
+ }
+
+ tot_inst_processed += inst_processed;
+ inst_processed = 0;
+
+ } while (1);
+
+ rte_spinlock_unlock(&iq->lock);
+
+ rte_atomic64_clear(&iq->iq_flush_running);
+
+ return tx_done;
+}
+
+static int
+lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,
+ void *buf, uint32_t datasize, uint32_t reqtype)
+{
+ struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
+ struct lio_iq_post_status st;
+
+ rte_spinlock_lock(&iq->post_lock);
+
+ st = post_command2(iq, cmd);
+
+ if (st.status != LIO_IQ_SEND_FAILED) {
+ lio_add_to_request_list(iq, st.index, buf, reqtype);
+ LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, bytes_sent,
+ datasize);
+ LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_posted, 1);
+
+ lio_ring_doorbell(lio_dev, iq);
+ } else {
+ LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_dropped, 1);
+ }
+
+ rte_spinlock_unlock(&iq->post_lock);
+
+ return st.status;
+}
+
+void
+lio_prepare_soft_command(struct lio_device *lio_dev,
+ struct lio_soft_command *sc, uint8_t opcode,
+ uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0,
+ uint64_t ossp1)
+{
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ RTE_ASSERT(opcode <= 15);
+ RTE_ASSERT(subcode <= 127);
+
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+
+ ih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind;
+
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
+
+ pki_ih3->w = 1;
+ pki_ih3->raw = 1;
+ pki_ih3->utag = 1;
+ pki_ih3->uqpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
+ pki_ih3->utt = 1;
+
+ pki_ih3->tag = LIO_CONTROL;
+ pki_ih3->tagtype = OCTEON_ATOMIC_TAG;
+ pki_ih3->qpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x7;
+ pki_ih3->sl = 8;
+
+ if (sc->datasize)
+ ih3->dlengsz = sc->datasize;
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.cmd3.ossp[0] = ossp0;
+ sc->cmd.cmd3.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ rdp->pcie_port = lio_dev->pcie_port;
+ rdp->rlen = sc->rdatasize;
+ irh->rflag = 1;
+ /* PKI IH3 */
+ ih3->fsz = OCTEON_SOFT_CMD_RESP_IH3;
+ } else {
+ irh->rflag = 0;
+ /* PKI IH3 */
+ ih3->fsz = OCTEON_PCI_CMD_O3;
+ }
+}
+
+int
+lio_send_soft_command(struct lio_device *lio_dev,
+ struct lio_soft_command *sc)
+{
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_irh *irh;
+ uint32_t len = 0;
+
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+ if (ih3->dlengsz) {
+ RTE_ASSERT(sc->dmadptr);
+ sc->cmd.cmd3.dptr = sc->dmadptr;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ if (irh->rflag) {
+ RTE_ASSERT(sc->dmarptr);
+ RTE_ASSERT(sc->status_word != NULL);
+ *sc->status_word = LIO_COMPLETION_WORD_INIT;
+ sc->cmd.cmd3.rptr = sc->dmarptr;
+ }
+
+ len = (uint32_t)ih3->dlengsz;
+
+ if (sc->wait_time)
+ sc->timeout = lio_uptime + sc->wait_time;
+
+ return lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len,
+ LIO_REQTYPE_SOFT_COMMAND);
+}
+
+int
+lio_setup_sc_buffer_pool(struct lio_device *lio_dev)
+{
+ char sc_pool_name[RTE_MEMPOOL_NAMESIZE];
+ uint16_t buf_size;
+
+ buf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM;
+ snprintf(sc_pool_name, sizeof(sc_pool_name),
+ "lio_sc_pool_%u", lio_dev->port_id);
+ lio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name,
+ LIO_MAX_SOFT_COMMAND_BUFFERS,
+ 0, 0, buf_size, SOCKET_ID_ANY);
+ return 0;
+}
+
+void
+lio_free_sc_buffer_pool(struct lio_device *lio_dev)
+{
+ rte_mempool_free(lio_dev->sc_buf_pool);
+}
+
+struct lio_soft_command *
+lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
+ uint32_t rdatasize, uint32_t ctxsize)
+{
+ uint32_t offset = sizeof(struct lio_soft_command);
+ struct lio_soft_command *sc;
+ struct rte_mbuf *m;
+ uint64_t dma_addr;
+
+ RTE_ASSERT((offset + datasize + rdatasize + ctxsize) <=
+ LIO_SOFT_COMMAND_BUFFER_SIZE);
+
+ m = rte_pktmbuf_alloc(lio_dev->sc_buf_pool);
+ if (m == NULL) {
+ lio_dev_err(lio_dev, "Cannot allocate mbuf for sc\n");
+ return NULL;
+ }
+
+ /* set rte_mbuf data size and there is only 1 segment */
+ m->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
+ m->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
+
+ /* use rte_mbuf buffer for soft command */
+ sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
+ memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
+ sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
+ sc->dma_addr = rte_mbuf_data_iova(m);
+ sc->mbuf = m;
+
+ dma_addr = sc->dma_addr;
+
+ if (ctxsize) {
+ sc->ctxptr = (uint8_t *)sc + offset;
+ sc->ctxsize = ctxsize;
+ }
+
+ /* Start data at 128 byte boundary */
+ offset = (offset + ctxsize + 127) & 0xffffff80;
+
+ if (datasize) {
+ sc->virtdptr = (uint8_t *)sc + offset;
+ sc->dmadptr = dma_addr + offset;
+ sc->datasize = datasize;
+ }
+
+ /* Start rdata at 128 byte boundary */
+ offset = (offset + datasize + 127) & 0xffffff80;
+
+ if (rdatasize) {
+ RTE_ASSERT(rdatasize >= 16);
+ sc->virtrptr = (uint8_t *)sc + offset;
+ sc->dmarptr = dma_addr + offset;
+ sc->rdatasize = rdatasize;
+ sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
+ rdatasize - 8);
+ }
+
+ return sc;
+}
+
+void
+lio_free_soft_command(struct lio_soft_command *sc)
+{
+ rte_pktmbuf_free(sc->mbuf);
+}
+
+void
+lio_setup_response_list(struct lio_device *lio_dev)
+{
+ STAILQ_INIT(&lio_dev->response_list.head);
+ rte_spinlock_init(&lio_dev->response_list.lock);
+ rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);
+}
+
+int
+lio_process_ordered_list(struct lio_device *lio_dev)
+{
+ int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
+ struct lio_response_list *ordered_sc_list;
+ struct lio_soft_command *sc;
+ int request_complete = 0;
+ uint64_t status64;
+ uint32_t status;
+
+ ordered_sc_list = &lio_dev->response_list;
+
+ do {
+ rte_spinlock_lock(&ordered_sc_list->lock);
+
+ if (STAILQ_EMPTY(&ordered_sc_list->head)) {
+ /* ordered_sc_list is empty; there is
+ * nothing to process
+ */
+ rte_spinlock_unlock(&ordered_sc_list->lock);
+ return -1;
+ }
+
+ sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,
+ struct lio_soft_command, node);
+
+ status = LIO_REQUEST_PENDING;
+
+ /* check if octeon has finished DMA'ing a response
+ * to where rptr is pointing to
+ */
+ status64 = *sc->status_word;
+
+ if (status64 != LIO_COMPLETION_WORD_INIT) {
+ /* This logic ensures that all 64b have been written.
+ * 1. check byte 0 for non-FF
+ * 2. if non-FF, then swap result from BE to host order
+ * 3. check byte 7 (swapped to 0) for non-FF
+ * 4. if non-FF, use the low 32-bit status code
+ * 5. if either byte 0 or byte 7 is FF, don't use status
+ */
+ if ((status64 & 0xff) != 0xff) {
+ lio_swap_8B_data(&status64, 1);
+ if (((status64 & 0xff) != 0xff)) {
+ /* retrieve 16-bit firmware status */
+ status = (uint32_t)(status64 &
+ 0xffffULL);
+ if (status) {
+ status =
+ LIO_FIRMWARE_STATUS_CODE(
+ status);
+ } else {
+ /* i.e. no error */
+ status = LIO_REQUEST_DONE;
+ }
+ }
+ }
+ } else if ((sc->timeout && lio_check_timeout(lio_uptime,
+ sc->timeout))) {
+ lio_dev_err(lio_dev,
+ "cmd failed, timeout (%ld, %ld)\n",
+ (long)lio_uptime, (long)sc->timeout);
+ status = LIO_REQUEST_TIMEOUT;
+ }
+
+ if (status != LIO_REQUEST_PENDING) {
+ /* we have received a response or we have timed out.
+ * remove node from linked list
+ */
+ STAILQ_REMOVE(&ordered_sc_list->head,
+ &sc->node, lio_stailq_node, entries);
+ rte_atomic64_dec(
+ &lio_dev->response_list.pending_req_count);
+ rte_spinlock_unlock(&ordered_sc_list->lock);
+
+ if (sc->callback)
+ sc->callback(status, sc->callback_arg);
+
+ request_complete++;
+ } else {
+ /* no response yet */
+ request_complete = 0;
+ rte_spinlock_unlock(&ordered_sc_list->lock);
+ }
+
+ /* If we hit the Max Ordered requests to process every loop,
+ * we quit and let this function be invoked the next time
+ * the poll thread runs to process the remaining requests.
+ * This function can take up the entire CPU if there is
+ * no upper limit to the requests processed.
+ */
+ if (request_complete >= resp_to_process)
+ break;
+ } while (request_complete);
+
+ return 0;
+}
+
+static inline struct lio_stailq_node *
+list_delete_first_node(struct lio_stailq_head *head)
+{
+ struct lio_stailq_node *node;
+
+ if (STAILQ_EMPTY(head))
+ node = NULL;
+ else
+ node = STAILQ_FIRST(head);
+
+ if (node)
+ STAILQ_REMOVE(head, node, lio_stailq_node, entries);
+
+ return node;
+}
+
+void
+lio_delete_sglist(struct lio_instr_queue *txq)
+{
+ struct lio_device *lio_dev = txq->lio_dev;
+ int iq_no = txq->q_index;
+ struct lio_gather *g;
+
+ if (lio_dev->glist_head == NULL)
+ return;
+
+ do {
+ g = (struct lio_gather *)list_delete_first_node(
+ &lio_dev->glist_head[iq_no]);
+ if (g) {
+ if (g->sg)
+ rte_free(
+ (void *)((unsigned long)g->sg - g->adjust));
+ rte_free(g);
+ }
+ } while (g);
+}
+
+/**
+ * \brief Setup gather lists
+ * @param lio per-network private data
+ */
+int
+lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
+ int fw_mapped_iq, int num_descs, unsigned int socket_id)
+{
+ struct lio_gather *g;
+ int i;
+
+ rte_spinlock_init(&lio_dev->glist_lock[iq_no]);
+
+ STAILQ_INIT(&lio_dev->glist_head[iq_no]);
+
+ for (i = 0; i < num_descs; i++) {
+ g = rte_zmalloc_socket(NULL, sizeof(*g), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (g == NULL) {
+ lio_dev_err(lio_dev,
+ "lio_gather memory allocation failed for qno %d\n",
+ iq_no);
+ break;
+ }
+
+ g->sg_size =
+ ((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE);
+
+ g->sg = rte_zmalloc_socket(NULL, g->sg_size + 8,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (g->sg == NULL) {
+ lio_dev_err(lio_dev,
+ "sg list memory allocation failed for qno %d\n",
+ iq_no);
+ rte_free(g);
+ break;
+ }
+
+ /* The gather component should be aligned on 64-bit boundary */
+ if (((unsigned long)g->sg) & 7) {
+ g->adjust = 8 - (((unsigned long)g->sg) & 7);
+ g->sg =
+ (struct lio_sg_entry *)((unsigned long)g->sg +
+ g->adjust);
+ }
+
+ STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq_no], &g->list,
+ entries);
+ }
+
+ if (i != num_descs) {
+ lio_delete_sglist(lio_dev->instr_queue[fw_mapped_iq]);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no)
+{
+ lio_delete_instr_queue(lio_dev, iq_no);
+ rte_free(lio_dev->instr_queue[iq_no]);
+ lio_dev->instr_queue[iq_no] = NULL;
+ lio_dev->num_iqs--;
+}
+
+static inline uint32_t
+lio_iq_get_available(struct lio_device *lio_dev, uint32_t q_no)
+{
+ return ((lio_dev->instr_queue[q_no]->nb_desc - 1) -
+ (uint32_t)rte_atomic64_read(
+ &lio_dev->instr_queue[q_no]->instr_pending));
+}
+
+static inline int
+lio_iq_is_full(struct lio_device *lio_dev, uint32_t q_no)
+{
+ return ((uint32_t)rte_atomic64_read(
+ &lio_dev->instr_queue[q_no]->instr_pending) >=
+ (lio_dev->instr_queue[q_no]->nb_desc - 2));
+}
+
+static int
+lio_dev_cleanup_iq(struct lio_device *lio_dev, int iq_no)
+{
+ struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
+ uint32_t count = 10000;
+
+ while ((lio_iq_get_available(lio_dev, iq_no) < LIO_FLUSH_WM(iq)) &&
+ --count)
+ lio_flush_iq(lio_dev, iq);
+
+ return count ? 0 : 1;
+}
+
+static void
+lio_ctrl_cmd_callback(uint32_t status __rte_unused, void *sc_ptr)
+{
+ struct lio_soft_command *sc = sc_ptr;
+ struct lio_dev_ctrl_cmd *ctrl_cmd;
+ struct lio_ctrl_pkt *ctrl_pkt;
+
+ ctrl_pkt = (struct lio_ctrl_pkt *)sc->ctxptr;
+ ctrl_cmd = ctrl_pkt->ctrl_cmd;
+ ctrl_cmd->cond = 1;
+
+ lio_free_soft_command(sc);
+}
+
+static inline struct lio_soft_command *
+lio_alloc_ctrl_pkt_sc(struct lio_device *lio_dev,
+ struct lio_ctrl_pkt *ctrl_pkt)
+{
+ struct lio_soft_command *sc = NULL;
+ uint32_t uddsize, datasize;
+ uint32_t rdatasize;
+ uint8_t *data;
+
+ uddsize = (uint32_t)(ctrl_pkt->ncmd.s.more * 8);
+
+ datasize = OCTEON_CMD_SIZE + uddsize;
+ rdatasize = (ctrl_pkt->wait_time) ? 16 : 0;
+
+ sc = lio_alloc_soft_command(lio_dev, datasize,
+ rdatasize, sizeof(struct lio_ctrl_pkt));
+ if (sc == NULL)
+ return NULL;
+
+ rte_memcpy(sc->ctxptr, ctrl_pkt, sizeof(struct lio_ctrl_pkt));
+
+ data = (uint8_t *)sc->virtdptr;
+
+ rte_memcpy(data, &ctrl_pkt->ncmd, OCTEON_CMD_SIZE);
+
+ lio_swap_8B_data((uint64_t *)data, OCTEON_CMD_SIZE >> 3);
+
+ if (uddsize) {
+ /* Endian-Swap for UDD should have been done by caller. */
+ rte_memcpy(data + OCTEON_CMD_SIZE, ctrl_pkt->udd, uddsize);
+ }
+
+ sc->iq_no = (uint32_t)ctrl_pkt->iq_no;
+
+ lio_prepare_soft_command(lio_dev, sc,
+ LIO_OPCODE, LIO_OPCODE_CMD,
+ 0, 0, 0);
+
+ sc->callback = lio_ctrl_cmd_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = ctrl_pkt->wait_time;
+
+ return sc;
+}
+
+int
+lio_send_ctrl_pkt(struct lio_device *lio_dev, struct lio_ctrl_pkt *ctrl_pkt)
+{
+ struct lio_soft_command *sc = NULL;
+ int retval;
+
+ sc = lio_alloc_ctrl_pkt_sc(lio_dev, ctrl_pkt);
+ if (sc == NULL) {
+ lio_dev_err(lio_dev, "soft command allocation failed\n");
+ return -1;
+ }
+
+ retval = lio_send_soft_command(lio_dev, sc);
+ if (retval == LIO_IQ_SEND_FAILED) {
+ lio_free_soft_command(sc);
+ lio_dev_err(lio_dev, "Port: %d soft command: %d send failed status: %x\n",
+ lio_dev->port_id, ctrl_pkt->ncmd.s.cmd, retval);
+ return -1;
+ }
+
+ return retval;
+}
+
+/** Send data packet to the device
+ * @param lio_dev - lio device pointer
+ * @param ndata - control structure with queueing, and buffer information
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
+ */
+static inline int
+lio_send_data_pkt(struct lio_device *lio_dev, struct lio_data_pkt *ndata)
+{
+ return lio_send_command(lio_dev, ndata->q_no, &ndata->cmd,
+ ndata->buf, ndata->datasize, ndata->reqtype);
+}
+
+uint16_t
+lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
+{
+ struct lio_instr_queue *txq = tx_queue;
+ union lio_cmd_setup cmdsetup;
+ struct lio_device *lio_dev;
+ struct lio_iq_stats *stats;
+ struct lio_data_pkt ndata;
+ int i, processed = 0;
+ struct rte_mbuf *m;
+ uint32_t tag = 0;
+ int status = 0;
+ int iq_no;
+
+ lio_dev = txq->lio_dev;
+ iq_no = txq->txpciq.s.q_no;
+ stats = &lio_dev->instr_queue[iq_no]->stats;
+
+ if (!lio_dev->intf_open || !lio_dev->linfo.link.s.link_up) {
+ PMD_TX_LOG(lio_dev, ERR, "Transmit failed link_status : %d\n",
+ lio_dev->linfo.link.s.link_up);
+ goto xmit_failed;
+ }
+
+ lio_dev_cleanup_iq(lio_dev, iq_no);
+
+ for (i = 0; i < nb_pkts; i++) {
+ uint32_t pkt_len = 0;
+
+ m = pkts[i];
+
+ /* Prepare the attributes for the data to be passed to BASE. */
+ memset(&ndata, 0, sizeof(struct lio_data_pkt));
+
+ ndata.buf = m;
+
+ ndata.q_no = iq_no;
+ if (lio_iq_is_full(lio_dev, ndata.q_no)) {
+ stats->tx_iq_busy++;
+ if (lio_dev_cleanup_iq(lio_dev, iq_no)) {
+ PMD_TX_LOG(lio_dev, ERR,
+ "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ break;
+ }
+ }
+
+ cmdsetup.cmd_setup64 = 0;
+ cmdsetup.s.iq_no = iq_no;
+
+ /* check checksum offload flags to form cmd */
+ if (m->ol_flags & PKT_TX_IP_CKSUM)
+ cmdsetup.s.ip_csum = 1;
+
+ if (m->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cmdsetup.s.tnl_csum = 1;
+ else if ((m->ol_flags & PKT_TX_TCP_CKSUM) ||
+ (m->ol_flags & PKT_TX_UDP_CKSUM))
+ cmdsetup.s.transport_csum = 1;
+
+ if (m->nb_segs == 1) {
+ pkt_len = rte_pktmbuf_data_len(m);
+ cmdsetup.s.u.datasize = pkt_len;
+ lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
+ &cmdsetup, tag);
+ ndata.cmd.cmd3.dptr = rte_mbuf_data_iova(m);
+ ndata.reqtype = LIO_REQTYPE_NORESP_NET;
+ } else {
+ struct lio_buf_free_info *finfo;
+ struct lio_gather *g;
+ rte_iova_t phyaddr;
+ int i, frags;
+
+ finfo = (struct lio_buf_free_info *)rte_malloc(NULL,
+ sizeof(*finfo), 0);
+ if (finfo == NULL) {
+ PMD_TX_LOG(lio_dev, ERR,
+ "free buffer alloc failed\n");
+ goto xmit_failed;
+ }
+
+ rte_spinlock_lock(&lio_dev->glist_lock[iq_no]);
+ g = (struct lio_gather *)list_delete_first_node(
+ &lio_dev->glist_head[iq_no]);
+ rte_spinlock_unlock(&lio_dev->glist_lock[iq_no]);
+ if (g == NULL) {
+ PMD_TX_LOG(lio_dev, ERR,
+ "Transmit scatter gather: glist null!\n");
+ goto xmit_failed;
+ }
+
+ cmdsetup.s.gather = 1;
+ cmdsetup.s.u.gatherptrs = m->nb_segs;
+ lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
+ &cmdsetup, tag);
+
+ memset(g->sg, 0, g->sg_size);
+ g->sg[0].ptr[0] = rte_mbuf_data_iova(m);
+ lio_add_sg_size(&g->sg[0], m->data_len, 0);
+ pkt_len = m->data_len;
+ finfo->mbuf = m;
+
+ /* First seg taken care above */
+ frags = m->nb_segs - 1;
+ i = 1;
+ m = m->next;
+ while (frags--) {
+ g->sg[(i >> 2)].ptr[(i & 3)] =
+ rte_mbuf_data_iova(m);
+ lio_add_sg_size(&g->sg[(i >> 2)],
+ m->data_len, (i & 3));
+ pkt_len += m->data_len;
+ i++;
+ m = m->next;
+ }
+
+ phyaddr = rte_mem_virt2iova(g->sg);
+ if (phyaddr == RTE_BAD_IOVA) {
+ PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
+ goto xmit_failed;
+ }
+
+ ndata.cmd.cmd3.dptr = phyaddr;
+ ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG;
+
+ finfo->g = g;
+ finfo->lio_dev = lio_dev;
+ finfo->iq_no = (uint64_t)iq_no;
+ ndata.buf = finfo;
+ }
+
+ ndata.datasize = pkt_len;
+
+ status = lio_send_data_pkt(lio_dev, &ndata);
+
+ if (unlikely(status == LIO_IQ_SEND_FAILED)) {
+ PMD_TX_LOG(lio_dev, ERR, "send failed\n");
+ break;
+ }
+
+ if (unlikely(status == LIO_IQ_SEND_STOP)) {
+ PMD_TX_LOG(lio_dev, DEBUG, "iq full\n");
+ /* create space as iq is full */
+ lio_dev_cleanup_iq(lio_dev, iq_no);
+ }
+
+ stats->tx_done++;
+ stats->tx_tot_bytes += pkt_len;
+ processed++;
+ }
+
+xmit_failed:
+ stats->tx_dropped += (nb_pkts - processed);
+
+ return processed;
+}
+
+void
+lio_dev_clear_queues(struct rte_eth_dev *eth_dev)
+{
+ struct lio_instr_queue *txq;
+ struct lio_droq *rxq;
+ uint16_t i;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ txq = eth_dev->data->tx_queues[i];
+ if (txq != NULL) {
+ lio_dev_tx_queue_release(txq);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rxq = eth_dev->data->rx_queues[i];
+ if (rxq != NULL) {
+ lio_dev_rx_queue_release(rxq);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.h b/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.h
new file mode 100644
index 00000000..d2a45104
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.h
@@ -0,0 +1,740 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _LIO_RXTX_H_
+#define _LIO_RXTX_H_
+
+#include <stdio.h>
+#include <stdint.h>
+
+#include <rte_spinlock.h>
+#include <rte_memory.h>
+
+#include "lio_struct.h"
+
+#ifndef ROUNDUP4
+#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
+#endif
+
+#define LIO_STQUEUE_FIRST_ENTRY(ptr, type, elem) \
+ (type *)((char *)((ptr)->stqh_first) - offsetof(type, elem))
+
+#define lio_check_timeout(cur_time, chk_time) ((cur_time) > (chk_time))
+
+#define lio_uptime \
+ (size_t)(rte_get_timer_cycles() / rte_get_timer_hz())
+
+/** Descriptor format.
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ * -# Physical (bus) address of the data buffer.
+ * -# Physical (bus) address of a lio_droq_info structure.
+ * The device DMA's incoming packets and its information at the address
+ * given by these descriptor fields.
+ */
+struct lio_droq_desc {
+ /** The buffer pointer */
+ uint64_t buffer_ptr;
+
+ /** The Info pointer */
+ uint64_t info_ptr;
+};
+
+#define LIO_DROQ_DESC_SIZE (sizeof(struct lio_droq_desc))
+
+/** Information about packet DMA'ed by Octeon.
+ * The format of the information available at Info Pointer after Octeon
+ * has posted a packet. Not all descriptors have valid information. Only
+ * the Info field of the first descriptor for a packet has information
+ * about the packet.
+ */
+struct lio_droq_info {
+ /** The Output Receive Header. */
+ union octeon_rh rh;
+
+ /** The Length of the packet. */
+ uint64_t length;
+};
+
+#define LIO_DROQ_INFO_SIZE (sizeof(struct lio_droq_info))
+
+/** Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers.
+ */
+struct lio_recv_buffer {
+ /** Packet buffer, including meta data. */
+ void *buffer;
+
+ /** Data in the packet buffer. */
+ uint8_t *data;
+
+};
+
+#define LIO_DROQ_RECVBUF_SIZE (sizeof(struct lio_recv_buffer))
+
+#define LIO_DROQ_SIZE (sizeof(struct lio_droq))
+
+#define LIO_IQ_SEND_OK 0
+#define LIO_IQ_SEND_STOP 1
+#define LIO_IQ_SEND_FAILED -1
+
+/* conditions */
+#define LIO_REQTYPE_NONE 0
+#define LIO_REQTYPE_NORESP_NET 1
+#define LIO_REQTYPE_NORESP_NET_SG 2
+#define LIO_REQTYPE_SOFT_COMMAND 3
+
+struct lio_request_list {
+ uint32_t reqtype;
+ void *buf;
+};
+
+/*---------------------- INSTRUCTION FORMAT ----------------------------*/
+
+struct lio_instr3_64B {
+ /** Pointer where the input data is available. */
+ uint64_t dptr;
+
+ /** Instruction Header. */
+ uint64_t ih3;
+
+ /** Instruction Header. */
+ uint64_t pki_ih3;
+
+ /** Input Request Header. */
+ uint64_t irh;
+
+ /** opcode/subcode specific parameters */
+ uint64_t ossp[2];
+
+ /** Return Data Parameters */
+ uint64_t rdp;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ uint64_t rptr;
+
+};
+
+union lio_instr_64B {
+ struct lio_instr3_64B cmd3;
+};
+
+/** The size of each buffer in soft command buffer pool */
+#define LIO_SOFT_COMMAND_BUFFER_SIZE 1536
+
+/** Maximum number of buffers to allocate into soft command buffer pool */
+#define LIO_MAX_SOFT_COMMAND_BUFFERS 255
+
+struct lio_soft_command {
+ /** Soft command buffer info. */
+ struct lio_stailq_node node;
+ uint64_t dma_addr;
+ uint32_t size;
+
+ /** Command and return status */
+ union lio_instr_64B cmd;
+
+#define LIO_COMPLETION_WORD_INIT 0xffffffffffffffffULL
+ uint64_t *status_word;
+
+ /** Data buffer info */
+ void *virtdptr;
+ uint64_t dmadptr;
+ uint32_t datasize;
+
+ /** Return buffer info */
+ void *virtrptr;
+ uint64_t dmarptr;
+ uint32_t rdatasize;
+
+ /** Context buffer info */
+ void *ctxptr;
+ uint32_t ctxsize;
+
+ /** Time out and callback */
+ size_t wait_time;
+ size_t timeout;
+ uint32_t iq_no;
+ void (*callback)(uint32_t, void *);
+ void *callback_arg;
+ struct rte_mbuf *mbuf;
+};
+
+struct lio_iq_post_status {
+ int status;
+ int index;
+};
+
+/* wqe
+ * --------------- 0
+ * | wqe word0-3 |
+ * --------------- 32
+ * | PCI IH |
+ * --------------- 40
+ * | RPTR |
+ * --------------- 48
+ * | PCI IRH |
+ * --------------- 56
+ * | OCTEON_CMD |
+ * --------------- 64
+ * | Addtl 8-BData |
+ * | |
+ * ---------------
+ */
+
+union octeon_cmd {
+ uint64_t cmd64;
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t cmd : 5;
+
+ uint64_t more : 6; /* How many udd words follow the command */
+
+ uint64_t reserved : 29;
+
+ uint64_t param1 : 16;
+
+ uint64_t param2 : 8;
+
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+ uint64_t param2 : 8;
+
+ uint64_t param1 : 16;
+
+ uint64_t reserved : 29;
+
+ uint64_t more : 6;
+
+ uint64_t cmd : 5;
+
+#endif
+ } s;
+};
+
+#define OCTEON_CMD_SIZE (sizeof(union octeon_cmd))
+
+/* Maximum number of 8-byte words can be
+ * sent in a NIC control message.
+ */
+#define LIO_MAX_NCTRL_UDD 32
+
+/* Structure of control information passed by driver to the BASE
+ * layer when sending control commands to Octeon device software.
+ */
+struct lio_ctrl_pkt {
+ /** Command to be passed to the Octeon device software. */
+ union octeon_cmd ncmd;
+
+ /** Send buffer */
+ void *data;
+ uint64_t dmadata;
+
+ /** Response buffer */
+ void *rdata;
+ uint64_t dmardata;
+
+ /** Additional data that may be needed by some commands. */
+ uint64_t udd[LIO_MAX_NCTRL_UDD];
+
+ /** Input queue to use to send this command. */
+ uint64_t iq_no;
+
+ /** Time to wait for Octeon software to respond to this control command.
+ * If wait_time is 0, BASE assumes no response is expected.
+ */
+ size_t wait_time;
+
+ struct lio_dev_ctrl_cmd *ctrl_cmd;
+};
+
+/** Structure of data information passed by driver to the BASE
+ * layer when forwarding data to Octeon device software.
+ */
+struct lio_data_pkt {
+ /** Pointer to information maintained by NIC module for this packet. The
+ * BASE layer passes this as-is to the driver.
+ */
+ void *buf;
+
+ /** Type of buffer passed in "buf" above. */
+ uint32_t reqtype;
+
+ /** Total data bytes to be transferred in this command. */
+ uint32_t datasize;
+
+ /** Command to be passed to the Octeon device software. */
+ union lio_instr_64B cmd;
+
+ /** Input queue to use to send this command. */
+ uint32_t q_no;
+};
+
+/** Structure passed by driver to BASE layer to prepare a command to send
+ * network data to Octeon.
+ */
+union lio_cmd_setup {
+ struct {
+ uint32_t iq_no : 8;
+ uint32_t gather : 1;
+ uint32_t timestamp : 1;
+ uint32_t ip_csum : 1;
+ uint32_t transport_csum : 1;
+ uint32_t tnl_csum : 1;
+ uint32_t rsvd : 19;
+
+ union {
+ uint32_t datasize;
+ uint32_t gatherptrs;
+ } u;
+ } s;
+
+ uint64_t cmd_setup64;
+};
+
+/* Instruction Header */
+struct octeon_instr_ih3 {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+
+ /** Reserved3 */
+ uint64_t reserved3 : 1;
+
+ /** Gather indicator 1=gather*/
+ uint64_t gather : 1;
+
+ /** Data length OR no. of entries in gather list */
+ uint64_t dlengsz : 14;
+
+ /** Front Data size */
+ uint64_t fsz : 6;
+
+ /** Reserved2 */
+ uint64_t reserved2 : 4;
+
+ /** PKI port kind - PKIND */
+ uint64_t pkind : 6;
+
+ /** Reserved1 */
+ uint64_t reserved1 : 32;
+
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /** Reserved1 */
+ uint64_t reserved1 : 32;
+
+ /** PKI port kind - PKIND */
+ uint64_t pkind : 6;
+
+ /** Reserved2 */
+ uint64_t reserved2 : 4;
+
+ /** Front Data size */
+ uint64_t fsz : 6;
+
+ /** Data length OR no. of entries in gather list */
+ uint64_t dlengsz : 14;
+
+ /** Gather indicator 1=gather*/
+ uint64_t gather : 1;
+
+ /** Reserved3 */
+ uint64_t reserved3 : 1;
+
+#endif
+};
+
+/* PKI Instruction Header(PKI IH) */
+struct octeon_instr_pki_ih3 {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+
+ /** Wider bit */
+ uint64_t w : 1;
+
+ /** Raw mode indicator 1 = RAW */
+ uint64_t raw : 1;
+
+ /** Use Tag */
+ uint64_t utag : 1;
+
+ /** Use QPG */
+ uint64_t uqpg : 1;
+
+ /** Reserved2 */
+ uint64_t reserved2 : 1;
+
+ /** Parse Mode */
+ uint64_t pm : 3;
+
+ /** Skip Length */
+ uint64_t sl : 8;
+
+ /** Use Tag Type */
+ uint64_t utt : 1;
+
+ /** Tag type */
+ uint64_t tagtype : 2;
+
+ /** Reserved1 */
+ uint64_t reserved1 : 2;
+
+ /** QPG Value */
+ uint64_t qpg : 11;
+
+ /** Tag Value */
+ uint64_t tag : 32;
+
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+ /** Tag Value */
+ uint64_t tag : 32;
+
+ /** QPG Value */
+ uint64_t qpg : 11;
+
+ /** Reserved1 */
+ uint64_t reserved1 : 2;
+
+ /** Tag type */
+ uint64_t tagtype : 2;
+
+ /** Use Tag Type */
+ uint64_t utt : 1;
+
+ /** Skip Length */
+ uint64_t sl : 8;
+
+ /** Parse Mode */
+ uint64_t pm : 3;
+
+ /** Reserved2 */
+ uint64_t reserved2 : 1;
+
+ /** Use QPG */
+ uint64_t uqpg : 1;
+
+ /** Use Tag */
+ uint64_t utag : 1;
+
+ /** Raw mode indicator 1 = RAW */
+ uint64_t raw : 1;
+
+ /** Wider bit */
+ uint64_t w : 1;
+#endif
+};
+
+/** Input Request Header */
+struct octeon_instr_irh {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t opcode : 4;
+ uint64_t rflag : 1;
+ uint64_t subcode : 7;
+ uint64_t vlan : 12;
+ uint64_t priority : 3;
+ uint64_t reserved : 5;
+ uint64_t ossp : 32; /* opcode/subcode specific parameters */
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ uint64_t ossp : 32; /* opcode/subcode specific parameters */
+ uint64_t reserved : 5;
+ uint64_t priority : 3;
+ uint64_t vlan : 12;
+ uint64_t subcode : 7;
+ uint64_t rflag : 1;
+ uint64_t opcode : 4;
+#endif
+};
+
+/* pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+#define OCTEON_SOFT_CMD_RESP_IH3 (40 + 8)
+/* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
+#define OCTEON_PCI_CMD_O3 (24 + 8)
+
+/** Return Data Parameters */
+struct octeon_instr_rdp {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t reserved : 49;
+ uint64_t pcie_port : 3;
+ uint64_t rlen : 12;
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ uint64_t rlen : 12;
+ uint64_t pcie_port : 3;
+ uint64_t reserved : 49;
+#endif
+};
+
+union octeon_packet_params {
+ uint32_t pkt_params32;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint32_t reserved : 24;
+ uint32_t ip_csum : 1; /* Perform IP header checksum(s) */
+ /* Perform Outer transport header checksum */
+ uint32_t transport_csum : 1;
+ /* Find tunnel, and perform transport csum. */
+ uint32_t tnl_csum : 1;
+ uint32_t tsflag : 1; /* Timestamp this packet */
+ uint32_t ipsec_ops : 4; /* IPsec operation */
+#else
+ uint32_t ipsec_ops : 4;
+ uint32_t tsflag : 1;
+ uint32_t tnl_csum : 1;
+ uint32_t transport_csum : 1;
+ uint32_t ip_csum : 1;
+ uint32_t reserved : 7;
+#endif
+ } s;
+};
+
+/** Utility function to prepare a 64B NIC instruction based on a setup command
+ * @param cmd - pointer to instruction to be filled in.
+ * @param setup - pointer to the setup structure
+ * @param q_no - which queue for back pressure
+ *
+ * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
+ */
+static inline void
+lio_prepare_pci_cmd(struct lio_device *lio_dev,
+ union lio_instr_64B *cmd,
+ union lio_cmd_setup *setup,
+ uint32_t tag)
+{
+ union octeon_packet_params packet_params;
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_ih3 *ih3;
+ int port;
+
+ memset(cmd, 0, sizeof(union lio_instr_64B));
+
+ ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;
+
+ /* assume that rflag is cleared so therefore front data will only have
+ * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ */
+ ih3->pkind = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
+ /* PKI IH */
+ ih3->fsz = OCTEON_PCI_CMD_O3;
+
+ if (!setup->s.gather) {
+ ih3->dlengsz = setup->s.u.datasize;
+ } else {
+ ih3->gather = 1;
+ ih3->dlengsz = setup->s.u.gatherptrs;
+ }
+
+ pki_ih3->w = 1;
+ pki_ih3->raw = 0;
+ pki_ih3->utag = 0;
+ pki_ih3->utt = 1;
+ pki_ih3->uqpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;
+
+ port = (int)lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.port;
+
+ if (tag)
+ pki_ih3->tag = tag;
+ else
+ pki_ih3->tag = LIO_DATA(port);
+
+ pki_ih3->tagtype = OCTEON_ORDERED_TAG;
+ pki_ih3->qpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x0; /* parse from L2 */
+ pki_ih3->sl = 32; /* sl will be sizeof(pki_ih3) + irh + ossp0 + ossp1*/
+
+ irh = (struct octeon_instr_irh *)&cmd->cmd3.irh;
+
+ irh->opcode = LIO_OPCODE;
+ irh->subcode = LIO_OPCODE_NW_DATA;
+
+ packet_params.pkt_params32 = 0;
+ packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.transport_csum = setup->s.transport_csum;
+ packet_params.s.tnl_csum = setup->s.tnl_csum;
+ packet_params.s.tsflag = setup->s.timestamp;
+
+ irh->ossp = packet_params.pkt_params32;
+}
+
+int lio_setup_sc_buffer_pool(struct lio_device *lio_dev);
+void lio_free_sc_buffer_pool(struct lio_device *lio_dev);
+
+struct lio_soft_command *
+lio_alloc_soft_command(struct lio_device *lio_dev,
+ uint32_t datasize, uint32_t rdatasize,
+ uint32_t ctxsize);
+void lio_prepare_soft_command(struct lio_device *lio_dev,
+ struct lio_soft_command *sc,
+ uint8_t opcode, uint8_t subcode,
+ uint32_t irh_ossp, uint64_t ossp0,
+ uint64_t ossp1);
+int lio_send_soft_command(struct lio_device *lio_dev,
+ struct lio_soft_command *sc);
+void lio_free_soft_command(struct lio_soft_command *sc);
+
+/** Send control packet to the device
+ * @param lio_dev - lio device pointer
+ * @param nctrl - control structure with command, timeout, and callback info
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
+ */
+int lio_send_ctrl_pkt(struct lio_device *lio_dev,
+ struct lio_ctrl_pkt *ctrl_pkt);
+
+/** Maximum ordered requests to process in every invocation of
+ * lio_process_ordered_list(). The function will continue to process requests
+ * as long as it can find one that has finished processing. If it keeps
+ * finding requests that have completed, the function can run for ever. The
+ * value defined here sets an upper limit on the number of requests it can
+ * process before it returns control to the poll thread.
+ */
+#define LIO_MAX_ORD_REQS_TO_PROCESS 4096
+
+/** Error codes used in Octeon Host-Core communication.
+ *
+ * 31 16 15 0
+ * ----------------------------
+ * | | |
+ * ----------------------------
+ * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
+ * are reserved to identify the group to which the error code belongs. The
+ * lower 16-bits, called Minor Error Number, carry the actual code.
+ *
+ * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
+ */
+/** Status for a request.
+ * If the request is successfully queued, the driver will return
+ * a LIO_REQUEST_PENDING status. LIO_REQUEST_TIMEOUT is only returned by
+ * the driver if the response for request failed to arrive before a
+ * time-out period or if the request processing * got interrupted due to
+ * a signal respectively.
+ */
+enum {
+ /** A value of 0x00000000 indicates no error i.e. success */
+ LIO_REQUEST_DONE = 0x00000000,
+ /** (Major number: 0x0000; Minor Number: 0x0001) */
+ LIO_REQUEST_PENDING = 0x00000001,
+ LIO_REQUEST_TIMEOUT = 0x00000003,
+
+};
+
+/*------ Error codes used by firmware (bits 15..0 set by firmware */
+#define LIO_FIRMWARE_MAJOR_ERROR_CODE 0x0001
+#define LIO_FIRMWARE_STATUS_CODE(status) \
+ ((LIO_FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))
+
+/** Initialize the response lists. The number of response lists to create is
+ * given by count.
+ * @param lio_dev - the lio device structure.
+ */
+void lio_setup_response_list(struct lio_device *lio_dev);
+
+/** Check the status of first entry in the ordered list. If the instruction at
+ * that entry finished processing or has timed-out, the entry is cleaned.
+ * @param lio_dev - the lio device structure.
+ * @return 1 if the ordered list is empty, 0 otherwise.
+ */
+int lio_process_ordered_list(struct lio_device *lio_dev);
+
+#define LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, field, count) \
+ (((lio_dev)->instr_queue[iq_no]->stats.field) += count)
+
+static inline void
+lio_swap_8B_data(uint64_t *data, uint32_t blocks)
+{
+ while (blocks) {
+ *data = rte_cpu_to_be_64(*data);
+ blocks--;
+ data++;
+ }
+}
+
+static inline uint64_t
+lio_map_ring(void *buf)
+{
+ rte_iova_t dma_addr;
+
+ dma_addr = rte_mbuf_data_iova_default(((struct rte_mbuf *)buf));
+
+ return (uint64_t)dma_addr;
+}
+
+static inline uint64_t
+lio_map_ring_info(struct lio_droq *droq, uint32_t i)
+{
+ rte_iova_t dma_addr;
+
+ dma_addr = droq->info_list_dma + (i * LIO_DROQ_INFO_SIZE);
+
+ return (uint64_t)dma_addr;
+}
+
+static inline int
+lio_opcode_slow_path(union octeon_rh *rh)
+{
+ uint16_t subcode1, subcode2;
+
+ subcode1 = LIO_OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode);
+ subcode2 = LIO_OPCODE_SUBCODE(LIO_OPCODE, LIO_OPCODE_NW_DATA);
+
+ return subcode2 != subcode1;
+}
+
+static inline void
+lio_add_sg_size(struct lio_sg_entry *sg_entry,
+ uint16_t size, uint32_t pos)
+{
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ sg_entry->u.size[pos] = size;
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ sg_entry->u.size[3 - pos] = size;
+#endif
+}
+
+/* Macro to increment index.
+ * Index is incremented by count; if the sum exceeds
+ * max, index is wrapped-around to the start.
+ */
+static inline uint32_t
+lio_incr_index(uint32_t index, uint32_t count, uint32_t max)
+{
+ if ((index + count) >= max)
+ index = index + count - max;
+ else
+ index += count;
+
+ return index;
+}
+
+int lio_setup_droq(struct lio_device *lio_dev, int q_no, int num_descs,
+ int desc_size, struct rte_mempool *mpool,
+ unsigned int socket_id);
+uint16_t lio_dev_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t budget);
+void lio_delete_droq_queue(struct lio_device *lio_dev, int oq_no);
+
+void lio_delete_sglist(struct lio_instr_queue *txq);
+int lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
+ int fw_mapped_iq, int num_descs, unsigned int socket_id);
+uint16_t lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts,
+ uint16_t nb_pkts);
+int lio_wait_for_instr_fetch(struct lio_device *lio_dev);
+int lio_setup_iq(struct lio_device *lio_dev, int q_index,
+ union octeon_txpciq iq_no, uint32_t num_descs, void *app_ctx,
+ unsigned int socket_id);
+int lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq);
+void lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no);
+/** Setup instruction queue zero for the device
+ * @param lio_dev which lio device to setup
+ *
+ * @return 0 if success. -1 if fails
+ */
+int lio_setup_instr_queue0(struct lio_device *lio_dev);
+void lio_free_instr_queue0(struct lio_device *lio_dev);
+void lio_dev_clear_queues(struct rte_eth_dev *eth_dev);
+#endif /* _LIO_RXTX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/liquidio/lio_struct.h b/src/spdk/dpdk/drivers/net/liquidio/lio_struct.h
new file mode 100644
index 00000000..10270c56
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/lio_struct.h
@@ -0,0 +1,661 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _LIO_STRUCT_H_
+#define _LIO_STRUCT_H_
+
+#include <stdio.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_spinlock.h>
+#include <rte_atomic.h>
+
+#include "lio_hw_defs.h"
+
+struct lio_stailq_node {
+ STAILQ_ENTRY(lio_stailq_node) entries;
+};
+
+STAILQ_HEAD(lio_stailq_head, lio_stailq_node);
+
+struct lio_version {
+ uint16_t major;
+ uint16_t minor;
+ uint16_t micro;
+ uint16_t reserved;
+};
+
+/** Input Queue statistics. Each input queue has four stats fields. */
+struct lio_iq_stats {
+ uint64_t instr_posted; /**< Instructions posted to this queue. */
+ uint64_t instr_processed; /**< Instructions processed in this queue. */
+ uint64_t instr_dropped; /**< Instructions that could not be processed */
+ uint64_t bytes_sent; /**< Bytes sent through this queue. */
+ uint64_t tx_done; /**< Num of packets sent to network. */
+ uint64_t tx_iq_busy; /**< Num of times this iq was found to be full. */
+ uint64_t tx_dropped; /**< Num of pkts dropped due to xmitpath errors. */
+ uint64_t tx_tot_bytes; /**< Total count of bytes sent to network. */
+};
+
+/** Output Queue statistics. Each output queue has four stats fields. */
+struct lio_droq_stats {
+ /** Number of packets received in this queue. */
+ uint64_t pkts_received;
+
+ /** Bytes received by this queue. */
+ uint64_t bytes_received;
+
+ /** Packets dropped due to no memory available. */
+ uint64_t dropped_nomem;
+
+ /** Packets dropped due to large number of pkts to process. */
+ uint64_t dropped_toomany;
+
+ /** Number of packets sent to stack from this queue. */
+ uint64_t rx_pkts_received;
+
+ /** Number of Bytes sent to stack from this queue. */
+ uint64_t rx_bytes_received;
+
+ /** Num of Packets dropped due to receive path failures. */
+ uint64_t rx_dropped;
+
+ /** Num of vxlan packets received; */
+ uint64_t rx_vxlan;
+
+ /** Num of failures of rte_pktmbuf_alloc() */
+ uint64_t rx_alloc_failure;
+
+};
+
+/** The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * DROQ.
+ */
+struct lio_droq {
+ /** A spinlock to protect access to this ring. */
+ rte_spinlock_t lock;
+
+ uint32_t q_no;
+
+ uint32_t pkt_count;
+
+ struct lio_device *lio_dev;
+
+ /** The 8B aligned descriptor ring starts at this address. */
+ struct lio_droq_desc *desc_ring;
+
+ /** Index in the ring where the driver should read the next packet */
+ uint32_t read_idx;
+
+ /** Index in the ring where Octeon will write the next packet */
+ uint32_t write_idx;
+
+ /** Index in the ring where the driver will refill the descriptor's
+ * buffer
+ */
+ uint32_t refill_idx;
+
+ /** Packets pending to be processed */
+ rte_atomic64_t pkts_pending;
+
+ /** Number of descriptors in this ring. */
+ uint32_t nb_desc;
+
+ /** The number of descriptors pending refill. */
+ uint32_t refill_count;
+
+ uint32_t refill_threshold;
+
+ /** The 8B aligned info ptrs begin from this address. */
+ struct lio_droq_info *info_list;
+
+ /** The receive buffer list. This list has the virtual addresses of the
+ * buffers.
+ */
+ struct lio_recv_buffer *recv_buf_list;
+
+ /** The size of each buffer pointed by the buffer pointer. */
+ uint32_t buffer_size;
+
+ /** Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ void *pkts_credit_reg;
+
+ /** Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ void *pkts_sent_reg;
+
+ /** Statistics for this DROQ. */
+ struct lio_droq_stats stats;
+
+ /** DMA mapped address of the DROQ descriptor ring. */
+ size_t desc_ring_dma;
+
+ /** Info ptr list are allocated at this virtual address. */
+ size_t info_base_addr;
+
+ /** DMA mapped address of the info list */
+ size_t info_list_dma;
+
+ /** Allocated size of info list. */
+ uint32_t info_alloc_size;
+
+ /** Memory zone **/
+ const struct rte_memzone *desc_ring_mz;
+ const struct rte_memzone *info_mz;
+ struct rte_mempool *mpool;
+};
+
+/** Receive Header */
+union octeon_rh {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t rh64;
+ struct {
+ uint64_t opcode : 4;
+ uint64_t subcode : 8;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t reserved : 17;
+ uint64_t ossp : 32; /** opcode/subcode specific parameters */
+ } r;
+ struct {
+ uint64_t opcode : 4;
+ uint64_t subcode : 8;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t extra : 28;
+ uint64_t vlan : 12;
+ uint64_t priority : 3;
+ uint64_t csum_verified : 3; /** checksum verified. */
+ uint64_t has_hwtstamp : 1; /** Has hardware timestamp.1 = yes.*/
+ uint64_t encap_on : 1;
+ uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */
+ } r_dh;
+ struct {
+ uint64_t opcode : 4;
+ uint64_t subcode : 8;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t reserved : 8;
+ uint64_t extra : 25;
+ uint64_t gmxport : 16;
+ } r_nic_info;
+#else
+ uint64_t rh64;
+ struct {
+ uint64_t ossp : 32; /** opcode/subcode specific parameters */
+ uint64_t reserved : 17;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t subcode : 8;
+ uint64_t opcode : 4;
+ } r;
+ struct {
+ uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */
+ uint64_t encap_on : 1;
+ uint64_t has_hwtstamp : 1; /** 1 = has hwtstamp */
+ uint64_t csum_verified : 3; /** checksum verified. */
+ uint64_t priority : 3;
+ uint64_t vlan : 12;
+ uint64_t extra : 28;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t subcode : 8;
+ uint64_t opcode : 4;
+ } r_dh;
+ struct {
+ uint64_t gmxport : 16;
+ uint64_t extra : 25;
+ uint64_t reserved : 8;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t subcode : 8;
+ uint64_t opcode : 4;
+ } r_nic_info;
+#endif
+};
+
+#define OCTEON_RH_SIZE (sizeof(union octeon_rh))
+
+/** The txpciq info passed to host from the firmware */
+union octeon_txpciq {
+ uint64_t txpciq64;
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t q_no : 8;
+ uint64_t port : 8;
+ uint64_t pkind : 6;
+ uint64_t use_qpg : 1;
+ uint64_t qpg : 11;
+ uint64_t aura_num : 10;
+ uint64_t reserved : 20;
+#else
+ uint64_t reserved : 20;
+ uint64_t aura_num : 10;
+ uint64_t qpg : 11;
+ uint64_t use_qpg : 1;
+ uint64_t pkind : 6;
+ uint64_t port : 8;
+ uint64_t q_no : 8;
+#endif
+ } s;
+};
+
+/** The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue for
+ * a LIO device has one such structure to represent it.
+ */
+struct lio_instr_queue {
+ /** A spinlock to protect access to the input ring. */
+ rte_spinlock_t lock;
+
+ rte_spinlock_t post_lock;
+
+ struct lio_device *lio_dev;
+
+ uint32_t pkt_in_done;
+
+ rte_atomic64_t iq_flush_running;
+
+ /** Flag that indicates if the queue uses 64 byte commands. */
+ uint32_t iqcmd_64B:1;
+
+ /** Queue info. */
+ union octeon_txpciq txpciq;
+
+ uint32_t rsvd:17;
+
+ uint32_t status:8;
+
+ /** Number of descriptors in this ring. */
+ uint32_t nb_desc;
+
+ /** Index in input ring where the driver should write the next packet */
+ uint32_t host_write_index;
+
+ /** Index in input ring where Octeon is expected to read the next
+ * packet.
+ */
+ uint32_t lio_read_index;
+
+ /** This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ uint32_t flush_index;
+
+ /** This field keeps track of the instructions pending in this queue. */
+ rte_atomic64_t instr_pending;
+
+ /** Pointer to the Virtual Base addr of the input ring. */
+ uint8_t *base_addr;
+
+ struct lio_request_list *request_list;
+
+ /** Octeon doorbell register for the ring. */
+ void *doorbell_reg;
+
+ /** Octeon instruction count register for this ring. */
+ void *inst_cnt_reg;
+
+ /** Number of instructions pending to be posted to Octeon. */
+ uint32_t fill_cnt;
+
+ /** Statistics for this input queue. */
+ struct lio_iq_stats stats;
+
+ /** DMA mapped base address of the input descriptor ring. */
+ uint64_t base_addr_dma;
+
+ /** Application context */
+ void *app_ctx;
+
+ /* network stack queue index */
+ int q_index;
+
+ /* Memory zone */
+ const struct rte_memzone *iq_mz;
+};
+
+/** This structure is used by driver to store information required
+ * to free the mbuff when the packet has been fetched by Octeon.
+ * Bytes offset below assume worst-case of a 64-bit system.
+ */
+struct lio_buf_free_info {
+ /** Bytes 1-8. Pointer to network device private structure. */
+ struct lio_device *lio_dev;
+
+ /** Bytes 9-16. Pointer to mbuff. */
+ struct rte_mbuf *mbuf;
+
+ /** Bytes 17-24. Pointer to gather list. */
+ struct lio_gather *g;
+
+ /** Bytes 25-32. Physical address of mbuf->data or gather list. */
+ uint64_t dptr;
+
+ /** Bytes 33-47. Piggybacked soft command, if any */
+ struct lio_soft_command *sc;
+
+ /** Bytes 48-63. iq no */
+ uint64_t iq_no;
+};
+
+/* The Scatter-Gather List Entry. The scatter or gather component used with
+ * input instruction has this format.
+ */
+struct lio_sg_entry {
+ /** The first 64 bit gives the size of data in each dptr. */
+ union {
+ uint16_t size[4];
+ uint64_t size64;
+ } u;
+
+ /** The 4 dptr pointers for this entry. */
+ uint64_t ptr[4];
+};
+
+#define LIO_SG_ENTRY_SIZE (sizeof(struct lio_sg_entry))
+
+/** Structure of a node in list of gather components maintained by
+ * driver for each network device.
+ */
+struct lio_gather {
+ /** List manipulation. Next and prev pointers. */
+ struct lio_stailq_node list;
+
+ /** Size of the gather component at sg in bytes. */
+ int sg_size;
+
+ /** Number of bytes that sg was adjusted to make it 8B-aligned. */
+ int adjust;
+
+ /** Gather component that can accommodate max sized fragment list
+ * received from the IP layer.
+ */
+ struct lio_sg_entry *sg;
+};
+
+struct lio_rss_ctx {
+ uint16_t hash_key_size;
+ uint8_t hash_key[LIO_RSS_MAX_KEY_SZ];
+ /* Ideally a factor of number of queues */
+ uint8_t itable[LIO_RSS_MAX_TABLE_SZ];
+ uint8_t itable_size;
+ uint8_t ip;
+ uint8_t tcp_hash;
+ uint8_t ipv6;
+ uint8_t ipv6_tcp_hash;
+ uint8_t ipv6_ex;
+ uint8_t ipv6_tcp_ex_hash;
+ uint8_t hash_disable;
+};
+
+struct lio_io_enable {
+ uint64_t iq;
+ uint64_t oq;
+ uint64_t iq64B;
+};
+
+struct lio_fn_list {
+ void (*setup_iq_regs)(struct lio_device *, uint32_t);
+ void (*setup_oq_regs)(struct lio_device *, uint32_t);
+
+ int (*setup_mbox)(struct lio_device *);
+ void (*free_mbox)(struct lio_device *);
+
+ int (*setup_device_regs)(struct lio_device *);
+ int (*enable_io_queues)(struct lio_device *);
+ void (*disable_io_queues)(struct lio_device *);
+};
+
+struct lio_pf_vf_hs_word {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /** PKIND value assigned for the DPI interface */
+ uint64_t pkind : 8;
+
+ /** OCTEON core clock multiplier */
+ uint64_t core_tics_per_us : 16;
+
+ /** OCTEON coprocessor clock multiplier */
+ uint64_t coproc_tics_per_us : 16;
+
+ /** app that currently running on OCTEON */
+ uint64_t app_mode : 8;
+
+ /** RESERVED */
+ uint64_t reserved : 16;
+
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+
+ /** RESERVED */
+ uint64_t reserved : 16;
+
+ /** app that currently running on OCTEON */
+ uint64_t app_mode : 8;
+
+ /** OCTEON coprocessor clock multiplier */
+ uint64_t coproc_tics_per_us : 16;
+
+ /** OCTEON core clock multiplier */
+ uint64_t core_tics_per_us : 16;
+
+ /** PKIND value assigned for the DPI interface */
+ uint64_t pkind : 8;
+#endif
+};
+
+struct lio_sriov_info {
+ /** Number of rings assigned to VF */
+ uint32_t rings_per_vf;
+
+ /** Number of VF devices enabled */
+ uint32_t num_vfs;
+};
+
+/* Head of a response list */
+struct lio_response_list {
+ /** List structure to add delete pending entries to */
+ struct lio_stailq_head head;
+
+ /** A lock for this response list */
+ rte_spinlock_t lock;
+
+ rte_atomic64_t pending_req_count;
+};
+
+/* Structure to define the configuration attributes for each Input queue. */
+struct lio_iq_config {
+ /* Max number of IQs available */
+ uint8_t max_iqs;
+
+ /** Pending list size (usually set to the sum of the size of all Input
+ * queues)
+ */
+ uint32_t pending_list_size;
+
+ /** Command size - 32 or 64 bytes */
+ uint32_t instr_type;
+};
+
+/* Structure to define the configuration attributes for each Output queue. */
+struct lio_oq_config {
+ /* Max number of OQs available */
+ uint8_t max_oqs;
+
+ /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ uint32_t info_ptr;
+
+ /** The number of buffers that were consumed during packet processing by
+ * the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ uint32_t refill_threshold;
+};
+
+/* Structure to define the configuration. */
+struct lio_config {
+ uint16_t card_type;
+ const char *card_name;
+
+ /** Input Queue attributes. */
+ struct lio_iq_config iq;
+
+ /** Output Queue attributes. */
+ struct lio_oq_config oq;
+
+ int num_nic_ports;
+
+ int num_def_tx_descs;
+
+ /* Num of desc for rx rings */
+ int num_def_rx_descs;
+
+ int def_rx_buf_size;
+};
+
+/** Status of a RGMII Link on Octeon as seen by core driver. */
+union octeon_link_status {
+ uint64_t link_status64;
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t duplex : 8;
+ uint64_t mtu : 16;
+ uint64_t speed : 16;
+ uint64_t link_up : 1;
+ uint64_t autoneg : 1;
+ uint64_t if_mode : 5;
+ uint64_t pause : 1;
+ uint64_t flashing : 1;
+ uint64_t reserved : 15;
+#else
+ uint64_t reserved : 15;
+ uint64_t flashing : 1;
+ uint64_t pause : 1;
+ uint64_t if_mode : 5;
+ uint64_t autoneg : 1;
+ uint64_t link_up : 1;
+ uint64_t speed : 16;
+ uint64_t mtu : 16;
+ uint64_t duplex : 8;
+#endif
+ } s;
+};
+
+/** The rxpciq info passed to host from the firmware */
+union octeon_rxpciq {
+ uint64_t rxpciq64;
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t q_no : 8;
+ uint64_t reserved : 56;
+#else
+ uint64_t reserved : 56;
+ uint64_t q_no : 8;
+#endif
+ } s;
+};
+
+/** Information for a OCTEON ethernet interface shared between core & host. */
+struct octeon_link_info {
+ union octeon_link_status link;
+ uint64_t hw_addr;
+
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t gmxport : 16;
+ uint64_t macaddr_is_admin_assigned : 1;
+ uint64_t vlan_is_admin_assigned : 1;
+ uint64_t rsvd : 30;
+ uint64_t num_txpciq : 8;
+ uint64_t num_rxpciq : 8;
+#else
+ uint64_t num_rxpciq : 8;
+ uint64_t num_txpciq : 8;
+ uint64_t rsvd : 30;
+ uint64_t vlan_is_admin_assigned : 1;
+ uint64_t macaddr_is_admin_assigned : 1;
+ uint64_t gmxport : 16;
+#endif
+
+ union octeon_txpciq txpciq[LIO_MAX_IOQS_PER_IF];
+ union octeon_rxpciq rxpciq[LIO_MAX_IOQS_PER_IF];
+};
+
+/* ----------------------- THE LIO DEVICE --------------------------- */
+/** The lio device.
+ * Each lio device has this structure to represent all its
+ * components.
+ */
+struct lio_device {
+ /** PCI device pointer */
+ struct rte_pci_device *pci_dev;
+
+ /** Octeon Chip type */
+ uint16_t chip_id;
+ uint16_t pf_num;
+ uint16_t vf_num;
+
+ /** This device's PCIe port used for traffic. */
+ uint16_t pcie_port;
+
+ /** The state of this device */
+ rte_atomic64_t status;
+
+ uint8_t intf_open;
+
+ struct octeon_link_info linfo;
+
+ uint8_t *hw_addr;
+
+ struct lio_fn_list fn_list;
+
+ uint32_t num_iqs;
+
+ /** Guards each glist */
+ rte_spinlock_t *glist_lock;
+ /** Array of gather component linked lists */
+ struct lio_stailq_head *glist_head;
+
+ /* The pool containing pre allocated buffers used for soft commands */
+ struct rte_mempool *sc_buf_pool;
+
+ /** The input instruction queues */
+ struct lio_instr_queue *instr_queue[LIO_MAX_POSSIBLE_INSTR_QUEUES];
+
+ /** The singly-linked tail queues of instruction response */
+ struct lio_response_list response_list;
+
+ uint32_t num_oqs;
+
+ /** The DROQ output queues */
+ struct lio_droq *droq[LIO_MAX_POSSIBLE_OUTPUT_QUEUES];
+
+ struct lio_io_enable io_qmask;
+
+ struct lio_sriov_info sriov_info;
+
+ struct lio_pf_vf_hs_word pfvf_hsword;
+
+ /** Mail Box details of each lio queue. */
+ struct lio_mbox **mbox;
+
+ char dev_string[LIO_DEVICE_NAME_LEN]; /* Device print string */
+
+ const struct lio_config *default_config;
+
+ struct rte_eth_dev *eth_dev;
+
+ uint64_t ifflags;
+ uint8_t max_rx_queues;
+ uint8_t max_tx_queues;
+ uint8_t nb_rx_queues;
+ uint8_t nb_tx_queues;
+ uint8_t port_configured;
+ struct lio_rss_ctx rss_state;
+ uint16_t port_id;
+ char firmware_version[LIO_FW_VERSION_LENGTH];
+};
+#endif /* _LIO_STRUCT_H_ */
diff --git a/src/spdk/dpdk/drivers/net/liquidio/meson.build b/src/spdk/dpdk/drivers/net/liquidio/meson.build
new file mode 100644
index 00000000..9ae48e21
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('base/lio_23xx_vf.c',
+ 'base/lio_mbox.c',
+ 'lio_ethdev.c',
+ 'lio_rxtx.c')
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/liquidio/rte_pmd_liquidio_version.map b/src/spdk/dpdk/drivers/net/liquidio/rte_pmd_liquidio_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/liquidio/rte_pmd_liquidio_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/meson.build b/src/spdk/dpdk/drivers/net/meson.build
new file mode 100644
index 00000000..9c28ed4d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/meson.build
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['af_packet',
+ 'ark',
+ 'avp',
+ 'axgbe', 'bonding',
+ 'bnx2x',
+ 'bnxt',
+ 'cxgbe',
+ 'dpaa', 'dpaa2',
+ 'e1000',
+ 'ena',
+ 'enic',
+ 'failsafe',
+ 'fm10k', 'i40e',
+ 'ifc',
+ 'ixgbe',
+ 'kni',
+ 'liquidio',
+ 'mvpp2',
+ 'netvsc',
+ 'nfp',
+ 'null', 'octeontx', 'pcap', 'ring',
+ 'sfc',
+ 'softnic',
+ 'szedata2',
+ 'thunderx',
+ 'vhost',
+ 'virtio']
+std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
+std_deps += ['bus_pci'] # very many PMDs depend on PCI, so make std
+std_deps += ['bus_vdev'] # same with vdev bus
+config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/src/spdk/dpdk/drivers/net/mlx4/Makefile b/src/spdk/dpdk/drivers/net/mlx4/Makefile
new file mode 100644
index 00000000..92e93225
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/Makefile
@@ -0,0 +1,128 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2012 6WIND S.A.
+# Copyright 2012 Mellanox Technologies, Ltd
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# Library name.
+LIB = librte_pmd_mlx4.a
+LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION)
+LIB_GLUE_BASE = librte_pmd_mlx4_glue.so
+LIB_GLUE_VERSION = 18.02.0
+
+# Sources.
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_flow.c
+ifneq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y)
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_glue.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_mr.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxq.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_txq.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_utils.c
+
+ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y)
+INSTALL-$(CONFIG_RTE_LIBRTE_MLX4_PMD)-lib += $(LIB_GLUE)
+endif
+
+# Basic CFLAGS.
+CFLAGS += -O3
+CFLAGS += -std=c11 -Wall -Wextra
+CFLAGS += -g
+CFLAGS += -I.
+CFLAGS += -D_BSD_SOURCE
+CFLAGS += -D_DEFAULT_SOURCE
+CFLAGS += -D_XOPEN_SOURCE=600
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y)
+CFLAGS += -DMLX4_GLUE='"$(LIB_GLUE)"'
+CFLAGS += -DMLX4_GLUE_VERSION='"$(LIB_GLUE_VERSION)"'
+CFLAGS_mlx4_glue.o += -fPIC
+LDLIBS += -ldl
+else
+LDLIBS += -libverbs -lmlx4
+endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+# A few warnings cannot be avoided in external headers.
+CFLAGS += -Wno-error=cast-qual
+
+EXPORT_MAP := rte_pmd_mlx4_version.map
+LIBABIVER := 1
+
+# DEBUG which is usually provided on the command-line may enable
+# CONFIG_RTE_LIBRTE_MLX4_DEBUG.
+ifeq ($(DEBUG),1)
+CONFIG_RTE_LIBRTE_MLX4_DEBUG := y
+endif
+
+# User-defined CFLAGS.
+ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DEBUG),y)
+CFLAGS += -pedantic -UNDEBUG -DPEDANTIC
+else
+CFLAGS += -DNDEBUG -UPEDANTIC
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
+# Generate and clean-up mlx4_autoconf.h.
+
+export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS
+export AUTO_CONFIG_CFLAGS = -Wno-error
+
+ifndef V
+AUTOCONF_OUTPUT := >/dev/null
+endif
+
+mlx4_autoconf.h.new: FORCE
+
+mlx4_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
+ $Q $(RM) -f -- '$@'
+ $Q : > '$@'
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX4_WQE_LSO_SEG \
+ infiniband/mlx4dv.h \
+ type 'struct mlx4_wqe_lso_seg' \
+ $(AUTOCONF_OUTPUT)
+
+# Create mlx4_autoconf.h or update it in case it differs from the new one.
+
+mlx4_autoconf.h: mlx4_autoconf.h.new
+ $Q [ -f '$@' ] && \
+ cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \
+ mv '$<' '$@'
+
+$(SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD):.c=.o): mlx4_autoconf.h
+
+# Generate dependency plug-in for rdma-core when the PMD must not be linked
+# directly, so that applications do not inherit this dependency.
+
+ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y)
+
+$(LIB): $(LIB_GLUE)
+
+ifeq ($(LINK_USING_CC),1)
+GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS))
+else
+GLUE_LDFLAGS := $(LDFLAGS)
+endif
+$(LIB_GLUE): mlx4_glue.o
+ $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \
+ -Wl,-h,$(LIB_GLUE) \
+ -shared -o $@ $< -libverbs -lmlx4
+
+mlx4_glue.o: mlx4_autoconf.h
+
+endif
+
+clean_mlx4: FORCE
+ $Q rm -f -- mlx4_autoconf.h mlx4_autoconf.h.new
+ $Q rm -f -- mlx4_glue.o $(LIB_GLUE_BASE)*
+
+clean: clean_mlx4
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4.c
new file mode 100644
index 00000000..defc0d4b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4.c
@@ -0,0 +1,1013 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2012 6WIND S.A.
+ * Copyright 2012 Mellanox Technologies, Ltd
+ */
+
+/**
+ * @file
+ * mlx4 driver initialization.
+ */
+
+#include <assert.h>
+#include <dlfcn.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_interrupts.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+
+#include "mlx4.h"
+#include "mlx4_glue.h"
+#include "mlx4_flow.h"
+#include "mlx4_mr.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+struct mlx4_dev_list mlx4_mem_event_cb_list =
+ LIST_HEAD_INITIALIZER(mlx4_mem_event_cb_list);
+
+rte_rwlock_t mlx4_mem_event_rwlock = RTE_RWLOCK_INITIALIZER;
+
+/** Configuration structure for device arguments. */
+struct mlx4_conf {
+ struct {
+ uint32_t present; /**< Bit-field for existing ports. */
+ uint32_t enabled; /**< Bit-field for user-enabled ports. */
+ } ports;
+};
+
+/* Available parameters list. */
+const char *pmd_mlx4_init_params[] = {
+ MLX4_PMD_PORT_KVARG,
+ NULL,
+};
+
+static void mlx4_dev_stop(struct rte_eth_dev *dev);
+
+/**
+ * DPDK callback for Ethernet device configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_dev_configure(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
+ int ret;
+
+ /* Prepare internal flow rules. */
+ ret = mlx4_flow_sync(priv, &error);
+ if (ret) {
+ ERROR("cannot set up internal flow rules (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ -ret, strerror(-ret), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+ goto exit;
+ }
+ ret = mlx4_intr_install(priv);
+ if (ret)
+ ERROR("%p: interrupt handler installation failed",
+ (void *)dev);
+exit:
+ return ret;
+}
+
+/**
+ * DPDK callback to start the device.
+ *
+ * Simulate device start by initializing common RSS resources and attaching
+ * all configured flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_dev_start(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
+ int ret;
+
+ if (priv->started)
+ return 0;
+ DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
+ priv->started = 1;
+ ret = mlx4_rss_init(priv);
+ if (ret) {
+ ERROR("%p: cannot initialize RSS resources: %s",
+ (void *)dev, strerror(-ret));
+ goto err;
+ }
+#ifndef NDEBUG
+ mlx4_mr_dump_dev(dev);
+#endif
+ ret = mlx4_rxq_intr_enable(priv);
+ if (ret) {
+ ERROR("%p: interrupt handler installation failed",
+ (void *)dev);
+ goto err;
+ }
+ ret = mlx4_flow_sync(priv, &error);
+ if (ret) {
+ ERROR("%p: cannot attach flow rules (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ (void *)dev,
+ -ret, strerror(-ret), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+ goto err;
+ }
+ rte_wmb();
+ dev->tx_pkt_burst = mlx4_tx_burst;
+ dev->rx_pkt_burst = mlx4_rx_burst;
+ return 0;
+err:
+ mlx4_dev_stop(dev);
+ return ret;
+}
+
+/**
+ * DPDK callback to stop the device.
+ *
+ * Simulate device stop by detaching all configured flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mlx4_dev_stop(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ if (!priv->started)
+ return;
+ DEBUG("%p: detaching flows from all RX queues", (void *)dev);
+ priv->started = 0;
+ dev->tx_pkt_burst = mlx4_tx_burst_removed;
+ dev->rx_pkt_burst = mlx4_rx_burst_removed;
+ rte_wmb();
+ mlx4_flow_sync(priv, NULL);
+ mlx4_rxq_intr_disable(priv);
+ mlx4_rss_deinit(priv);
+}
+
+/**
+ * DPDK callback to close the device.
+ *
+ * Destroy all queues and objects, free memory.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mlx4_dev_close(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ DEBUG("%p: closing device \"%s\"",
+ (void *)dev,
+ ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
+ dev->rx_pkt_burst = mlx4_rx_burst_removed;
+ dev->tx_pkt_burst = mlx4_tx_burst_removed;
+ rte_wmb();
+ mlx4_flow_clean(priv);
+ mlx4_rss_deinit(priv);
+ for (i = 0; i != dev->data->nb_rx_queues; ++i)
+ mlx4_rx_queue_release(dev->data->rx_queues[i]);
+ for (i = 0; i != dev->data->nb_tx_queues; ++i)
+ mlx4_tx_queue_release(dev->data->tx_queues[i]);
+ mlx4_mr_release(dev);
+ if (priv->pd != NULL) {
+ assert(priv->ctx != NULL);
+ claim_zero(mlx4_glue->dealloc_pd(priv->pd));
+ claim_zero(mlx4_glue->close_device(priv->ctx));
+ } else
+ assert(priv->ctx == NULL);
+ mlx4_intr_uninstall(priv);
+ memset(priv, 0, sizeof(*priv));
+}
+
+static const struct eth_dev_ops mlx4_dev_ops = {
+ .dev_configure = mlx4_dev_configure,
+ .dev_start = mlx4_dev_start,
+ .dev_stop = mlx4_dev_stop,
+ .dev_set_link_down = mlx4_dev_set_link_down,
+ .dev_set_link_up = mlx4_dev_set_link_up,
+ .dev_close = mlx4_dev_close,
+ .link_update = mlx4_link_update,
+ .promiscuous_enable = mlx4_promiscuous_enable,
+ .promiscuous_disable = mlx4_promiscuous_disable,
+ .allmulticast_enable = mlx4_allmulticast_enable,
+ .allmulticast_disable = mlx4_allmulticast_disable,
+ .mac_addr_remove = mlx4_mac_addr_remove,
+ .mac_addr_add = mlx4_mac_addr_add,
+ .mac_addr_set = mlx4_mac_addr_set,
+ .stats_get = mlx4_stats_get,
+ .stats_reset = mlx4_stats_reset,
+ .dev_infos_get = mlx4_dev_infos_get,
+ .dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get,
+ .vlan_filter_set = mlx4_vlan_filter_set,
+ .rx_queue_setup = mlx4_rx_queue_setup,
+ .tx_queue_setup = mlx4_tx_queue_setup,
+ .rx_queue_release = mlx4_rx_queue_release,
+ .tx_queue_release = mlx4_tx_queue_release,
+ .flow_ctrl_get = mlx4_flow_ctrl_get,
+ .flow_ctrl_set = mlx4_flow_ctrl_set,
+ .mtu_set = mlx4_mtu_set,
+ .filter_ctrl = mlx4_filter_ctrl,
+ .rx_queue_intr_enable = mlx4_rx_intr_enable,
+ .rx_queue_intr_disable = mlx4_rx_intr_disable,
+ .is_removed = mlx4_is_removed,
+};
+
+/**
+ * Get PCI information from struct ibv_device.
+ *
+ * @param device
+ * Pointer to Ethernet device structure.
+ * @param[out] pci_addr
+ * PCI bus address output buffer.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
+ struct rte_pci_addr *pci_addr)
+{
+ FILE *file;
+ char line[32];
+ MKSTR(path, "%s/device/uevent", device->ibdev_path);
+
+ file = fopen(path, "rb");
+ if (file == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ while (fgets(line, sizeof(line), file) == line) {
+ size_t len = strlen(line);
+ int ret;
+
+ /* Truncate long lines. */
+ if (len == (sizeof(line) - 1))
+ while (line[(len - 1)] != '\n') {
+ ret = fgetc(file);
+ if (ret == EOF)
+ break;
+ line[(len - 1)] = ret;
+ }
+ /* Extract information. */
+ if (sscanf(line,
+ "PCI_SLOT_NAME="
+ "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
+ &pci_addr->domain,
+ &pci_addr->bus,
+ &pci_addr->devid,
+ &pci_addr->function) == 4) {
+ ret = 0;
+ break;
+ }
+ }
+ fclose(file);
+ return 0;
+}
+
+/**
+ * Verify and store value for device argument.
+ *
+ * @param[in] key
+ * Key argument to verify.
+ * @param[in] val
+ * Value associated with key.
+ * @param[in, out] conf
+ * Shared configuration data.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf)
+{
+ unsigned long tmp;
+
+ errno = 0;
+ tmp = strtoul(val, NULL, 0);
+ if (errno) {
+ rte_errno = errno;
+ WARN("%s: \"%s\" is not a valid integer", key, val);
+ return -rte_errno;
+ }
+ if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
+ uint32_t ports = rte_log2_u32(conf->ports.present + 1);
+
+ if (tmp >= ports) {
+ ERROR("port index %lu outside range [0,%" PRIu32 ")",
+ tmp, ports);
+ return -EINVAL;
+ }
+ if (!(conf->ports.present & (1 << tmp))) {
+ rte_errno = EINVAL;
+ ERROR("invalid port index %lu", tmp);
+ return -rte_errno;
+ }
+ conf->ports.enabled |= 1 << tmp;
+ } else {
+ rte_errno = EINVAL;
+ WARN("%s: unknown parameter", key);
+ return -rte_errno;
+ }
+ return 0;
+}
+
+/**
+ * Parse device parameters.
+ *
+ * @param devargs
+ * Device arguments structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf)
+{
+ struct rte_kvargs *kvlist;
+ unsigned int arg_count;
+ int ret = 0;
+ int i;
+
+ if (devargs == NULL)
+ return 0;
+ kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params);
+ if (kvlist == NULL) {
+ rte_errno = EINVAL;
+ ERROR("failed to parse kvargs");
+ return -rte_errno;
+ }
+ /* Process parameters. */
+ for (i = 0; pmd_mlx4_init_params[i]; ++i) {
+ arg_count = rte_kvargs_count(kvlist, MLX4_PMD_PORT_KVARG);
+ while (arg_count-- > 0) {
+ ret = rte_kvargs_process(kvlist,
+ MLX4_PMD_PORT_KVARG,
+ (int (*)(const char *,
+ const char *,
+ void *))
+ mlx4_arg_parse,
+ conf);
+ if (ret != 0)
+ goto free_kvlist;
+ }
+ }
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+/**
+ * Interpret RSS capabilities reported by device.
+ *
+ * This function returns the set of usable Verbs RSS hash fields, kernel
+ * quirks taken into account.
+ *
+ * @param ctx
+ * Verbs context.
+ * @param pd
+ * Verbs protection domain.
+ * @param device_attr_ex
+ * Extended device attributes to interpret.
+ *
+ * @return
+ * Usable RSS hash fields mask in Verbs format.
+ */
+static uint64_t
+mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd,
+ struct ibv_device_attr_ex *device_attr_ex)
+{
+ uint64_t hw_rss_sup = device_attr_ex->rss_caps.rx_hash_fields_mask;
+ struct ibv_cq *cq = NULL;
+ struct ibv_wq *wq = NULL;
+ struct ibv_rwq_ind_table *ind = NULL;
+ struct ibv_qp *qp = NULL;
+
+ if (!hw_rss_sup) {
+ WARN("no RSS capabilities reported; disabling support for UDP"
+ " RSS and inner VXLAN RSS");
+ return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
+ IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
+ IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
+ }
+ if (!(hw_rss_sup & IBV_RX_HASH_INNER))
+ return hw_rss_sup;
+ /*
+ * Although reported as supported, missing code in some Linux
+ * versions (v4.15, v4.16) prevents the creation of hash QPs with
+ * inner capability.
+ *
+ * There is no choice but to attempt to instantiate a temporary RSS
+ * context in order to confirm its support.
+ */
+ cq = mlx4_glue->create_cq(ctx, 1, NULL, NULL, 0);
+ wq = cq ? mlx4_glue->create_wq
+ (ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
+ .pd = pd,
+ .cq = cq,
+ }) : NULL;
+ ind = wq ? mlx4_glue->create_rwq_ind_table
+ (ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = 0,
+ .ind_tbl = &wq,
+ .comp_mask = 0,
+ }) : NULL;
+ qp = ind ? mlx4_glue->create_qp_ex
+ (ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .comp_mask =
+ (IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_RX_HASH |
+ IBV_QP_INIT_ATTR_IND_TABLE),
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .pd = pd,
+ .rwq_ind_tbl = ind,
+ .rx_hash_conf = {
+ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
+ .rx_hash_key = mlx4_rss_hash_key_default,
+ .rx_hash_fields_mask = hw_rss_sup,
+ },
+ }) : NULL;
+ if (!qp) {
+ WARN("disabling unusable inner RSS capability due to kernel"
+ " quirk");
+ hw_rss_sup &= ~IBV_RX_HASH_INNER;
+ } else {
+ claim_zero(mlx4_glue->destroy_qp(qp));
+ }
+ if (ind)
+ claim_zero(mlx4_glue->destroy_rwq_ind_table(ind));
+ if (wq)
+ claim_zero(mlx4_glue->destroy_wq(wq));
+ if (cq)
+ claim_zero(mlx4_glue->destroy_cq(cq));
+ return hw_rss_sup;
+}
+
+static struct rte_pci_driver mlx4_driver;
+
+/**
+ * DPDK callback to register a PCI device.
+ *
+ * This function creates an Ethernet device for each port of a given
+ * PCI device.
+ *
+ * @param[in] pci_drv
+ * PCI driver structure (mlx4_driver).
+ * @param[in] pci_dev
+ * PCI device information.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ struct ibv_device **list;
+ struct ibv_device *ibv_dev;
+ int err = 0;
+ struct ibv_context *attr_ctx = NULL;
+ struct ibv_device_attr device_attr;
+ struct ibv_device_attr_ex device_attr_ex;
+ struct mlx4_conf conf = {
+ .ports.present = 0,
+ };
+ unsigned int vf;
+ int i;
+
+ (void)pci_drv;
+ assert(pci_drv == &mlx4_driver);
+ list = mlx4_glue->get_device_list(&i);
+ if (list == NULL) {
+ rte_errno = errno;
+ assert(rte_errno);
+ if (rte_errno == ENOSYS)
+ ERROR("cannot list devices, is ib_uverbs loaded?");
+ return -rte_errno;
+ }
+ assert(i >= 0);
+ /*
+ * For each listed device, check related sysfs entry against
+ * the provided PCI ID.
+ */
+ while (i != 0) {
+ struct rte_pci_addr pci_addr;
+
+ --i;
+ DEBUG("checking device \"%s\"", list[i]->name);
+ if (mlx4_ibv_device_to_pci_addr(list[i], &pci_addr))
+ continue;
+ if ((pci_dev->addr.domain != pci_addr.domain) ||
+ (pci_dev->addr.bus != pci_addr.bus) ||
+ (pci_dev->addr.devid != pci_addr.devid) ||
+ (pci_dev->addr.function != pci_addr.function))
+ continue;
+ vf = (pci_dev->id.device_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3VF);
+ INFO("PCI information matches, using device \"%s\" (VF: %s)",
+ list[i]->name, (vf ? "true" : "false"));
+ attr_ctx = mlx4_glue->open_device(list[i]);
+ err = errno;
+ break;
+ }
+ if (attr_ctx == NULL) {
+ mlx4_glue->free_device_list(list);
+ switch (err) {
+ case 0:
+ rte_errno = ENODEV;
+ ERROR("cannot access device, is mlx4_ib loaded?");
+ return -rte_errno;
+ case EINVAL:
+ rte_errno = EINVAL;
+ ERROR("cannot use device, are drivers up to date?");
+ return -rte_errno;
+ }
+ assert(err > 0);
+ rte_errno = err;
+ return -rte_errno;
+ }
+ ibv_dev = list[i];
+ DEBUG("device opened");
+ if (mlx4_glue->query_device(attr_ctx, &device_attr)) {
+ err = ENODEV;
+ goto error;
+ }
+ INFO("%u port(s) detected", device_attr.phys_port_cnt);
+ conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
+ if (mlx4_args(pci_dev->device.devargs, &conf)) {
+ ERROR("failed to process device arguments");
+ err = EINVAL;
+ goto error;
+ }
+ /* Use all ports when none are defined */
+ if (!conf.ports.enabled)
+ conf.ports.enabled = conf.ports.present;
+ /* Retrieve extended device attributes. */
+ if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) {
+ err = ENODEV;
+ goto error;
+ }
+ assert(device_attr.max_sge >= MLX4_MAX_SGE);
+ for (i = 0; i < device_attr.phys_port_cnt; i++) {
+ uint32_t port = i + 1; /* ports are indexed from one */
+ struct ibv_context *ctx = NULL;
+ struct ibv_port_attr port_attr;
+ struct ibv_pd *pd = NULL;
+ struct priv *priv = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct ether_addr mac;
+
+ /* If port is not enabled, skip. */
+ if (!(conf.ports.enabled & (1 << i)))
+ continue;
+ DEBUG("using port %u", port);
+ ctx = mlx4_glue->open_device(ibv_dev);
+ if (ctx == NULL) {
+ err = ENODEV;
+ goto port_error;
+ }
+ /* Check port status. */
+ err = mlx4_glue->query_port(ctx, port, &port_attr);
+ if (err) {
+ err = ENODEV;
+ ERROR("port query failed: %s", strerror(err));
+ goto port_error;
+ }
+ if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
+ err = ENOTSUP;
+ ERROR("port %d is not configured in Ethernet mode",
+ port);
+ goto port_error;
+ }
+ if (port_attr.state != IBV_PORT_ACTIVE)
+ DEBUG("port %d is not active: \"%s\" (%d)",
+ port, mlx4_glue->port_state_str(port_attr.state),
+ port_attr.state);
+ /* Make asynchronous FD non-blocking to handle interrupts. */
+ err = mlx4_fd_set_non_blocking(ctx->async_fd);
+ if (err) {
+ ERROR("cannot make asynchronous FD non-blocking: %s",
+ strerror(err));
+ goto port_error;
+ }
+ /* Allocate protection domain. */
+ pd = mlx4_glue->alloc_pd(ctx);
+ if (pd == NULL) {
+ err = ENOMEM;
+ ERROR("PD allocation failure");
+ goto port_error;
+ }
+ /* from rte_ethdev.c */
+ priv = rte_zmalloc("ethdev private structure",
+ sizeof(*priv),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ err = ENOMEM;
+ ERROR("priv allocation failure");
+ goto port_error;
+ }
+ priv->ctx = ctx;
+ priv->device_attr = device_attr;
+ priv->port = port;
+ priv->pd = pd;
+ priv->mtu = ETHER_MTU;
+ priv->vf = vf;
+ priv->hw_csum = !!(device_attr.device_cap_flags &
+ IBV_DEVICE_RAW_IP_CSUM);
+ DEBUG("checksum offloading is %ssupported",
+ (priv->hw_csum ? "" : "not "));
+ /* Only ConnectX-3 Pro supports tunneling. */
+ priv->hw_csum_l2tun =
+ priv->hw_csum &&
+ (device_attr.vendor_part_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
+ DEBUG("L2 tunnel checksum offloads are %ssupported",
+ priv->hw_csum_l2tun ? "" : "not ");
+ priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd,
+ &device_attr_ex);
+ DEBUG("supported RSS hash fields mask: %016" PRIx64,
+ priv->hw_rss_sup);
+ priv->hw_rss_max_qps =
+ device_attr_ex.rss_caps.max_rwq_indirection_table_size;
+ DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps);
+ priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_SCATTER_FCS);
+ DEBUG("FCS stripping toggling is %ssupported",
+ priv->hw_fcs_strip ? "" : "not ");
+ priv->tso =
+ ((device_attr_ex.tso_caps.max_tso > 0) &&
+ (device_attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
+ if (priv->tso)
+ priv->tso_max_payload_sz =
+ device_attr_ex.tso_caps.max_tso;
+ DEBUG("TSO is %ssupported",
+ priv->tso ? "" : "not ");
+ /* Configure the first MAC address by default. */
+ err = mlx4_get_mac(priv, &mac.addr_bytes);
+ if (err) {
+ ERROR("cannot get MAC address, is mlx4_en loaded?"
+ " (error: %s)", strerror(err));
+ goto port_error;
+ }
+ INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
+ priv->port,
+ mac.addr_bytes[0], mac.addr_bytes[1],
+ mac.addr_bytes[2], mac.addr_bytes[3],
+ mac.addr_bytes[4], mac.addr_bytes[5]);
+ /* Register MAC address. */
+ priv->mac[0] = mac;
+#ifndef NDEBUG
+ {
+ char ifname[IF_NAMESIZE];
+
+ if (mlx4_get_ifname(priv, &ifname) == 0)
+ DEBUG("port %u ifname is \"%s\"",
+ priv->port, ifname);
+ else
+ DEBUG("port %u ifname is unknown", priv->port);
+ }
+#endif
+ /* Get actual MTU if possible. */
+ mlx4_mtu_get(priv, &priv->mtu);
+ DEBUG("port %u MTU is %u", priv->port, priv->mtu);
+ /* from rte_ethdev.c */
+ {
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(name, sizeof(name), "%s port %u",
+ mlx4_glue->get_device_name(ibv_dev), port);
+ eth_dev = rte_eth_dev_allocate(name);
+ }
+ if (eth_dev == NULL) {
+ err = ENOMEM;
+ ERROR("can not allocate rte ethdev");
+ goto port_error;
+ }
+ eth_dev->data->dev_private = priv;
+ eth_dev->data->mac_addrs = priv->mac;
+ eth_dev->device = &pci_dev->device;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->device->driver = &mlx4_driver.driver;
+ /* Initialize local interrupt handle for current port. */
+ priv->intr_handle = (struct rte_intr_handle){
+ .fd = -1,
+ .type = RTE_INTR_HANDLE_EXT,
+ };
+ /*
+ * Override ethdev interrupt handle pointer with private
+ * handle instead of that of the parent PCI device used by
+ * default. This prevents it from being shared between all
+ * ports of the same PCI device since each of them is
+ * associated its own Verbs context.
+ *
+ * Rx interrupts in particular require this as the PMD has
+ * no control over the registration of queue interrupts
+ * besides setting up eth_dev->intr_handle, the rest is
+ * handled by rte_intr_rx_ctl().
+ */
+ eth_dev->intr_handle = &priv->intr_handle;
+ priv->dev = eth_dev;
+ eth_dev->dev_ops = &mlx4_dev_ops;
+ /* Bring Ethernet device up. */
+ DEBUG("forcing Ethernet interface up");
+ mlx4_dev_set_link_up(priv->dev);
+ /* Update link status once if waiting for LSC. */
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ mlx4_link_update(eth_dev, 0);
+ /*
+ * Once the device is added to the list of memory event
+ * callback, its global MR cache table cannot be expanded
+ * on the fly because of deadlock. If it overflows, lookup
+ * should be done by searching MR list linearly, which is slow.
+ */
+ err = mlx4_mr_btree_init(&priv->mr.cache,
+ MLX4_MR_BTREE_CACHE_N * 2,
+ eth_dev->device->numa_node);
+ if (err) {
+ /* rte_errno is already set. */
+ goto port_error;
+ }
+ /* Add device to memory callback list. */
+ rte_rwlock_write_lock(&mlx4_mem_event_rwlock);
+ LIST_INSERT_HEAD(&mlx4_mem_event_cb_list, priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx4_mem_event_rwlock);
+ rte_eth_dev_probing_finish(eth_dev);
+ continue;
+port_error:
+ rte_free(priv);
+ if (pd)
+ claim_zero(mlx4_glue->dealloc_pd(pd));
+ if (ctx)
+ claim_zero(mlx4_glue->close_device(ctx));
+ if (eth_dev)
+ rte_eth_dev_release_port(eth_dev);
+ break;
+ }
+ /*
+ * XXX if something went wrong in the loop above, there is a resource
+ * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
+ * long as the dpdk does not provide a way to deallocate a ethdev and a
+ * way to enumerate the registered ethdevs to free the previous ones.
+ */
+error:
+ if (attr_ctx)
+ claim_zero(mlx4_glue->close_device(attr_ctx));
+ if (list)
+ mlx4_glue->free_device_list(list);
+ if (err)
+ rte_errno = err;
+ return -err;
+}
+
+static const struct rte_pci_id mlx4_pci_id_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3VF)
+ },
+ {
+ .vendor_id = 0
+ }
+};
+
+static struct rte_pci_driver mlx4_driver = {
+ .driver = {
+ .name = MLX4_DRIVER_NAME
+ },
+ .id_table = mlx4_pci_id_map,
+ .probe = mlx4_pci_probe,
+ .drv_flags = RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_INTR_RMV,
+};
+
+#ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS
+
+/**
+ * Suffix RTE_EAL_PMD_PATH with "-glue".
+ *
+ * This function performs a sanity check on RTE_EAL_PMD_PATH before
+ * suffixing its last component.
+ *
+ * @param buf[out]
+ * Output buffer, should be large enough otherwise NULL is returned.
+ * @param size
+ * Size of @p out.
+ *
+ * @return
+ * Pointer to @p buf or @p NULL in case suffix cannot be appended.
+ */
+static char *
+mlx4_glue_path(char *buf, size_t size)
+{
+ static const char *const bad[] = { "/", ".", "..", NULL };
+ const char *path = RTE_EAL_PMD_PATH;
+ size_t len = strlen(path);
+ size_t off;
+ int i;
+
+ while (len && path[len - 1] == '/')
+ --len;
+ for (off = len; off && path[off - 1] != '/'; --off)
+ ;
+ for (i = 0; bad[i]; ++i)
+ if (!strncmp(path + off, bad[i], (int)(len - off)))
+ goto error;
+ i = snprintf(buf, size, "%.*s-glue", (int)len, path);
+ if (i == -1 || (size_t)i >= size)
+ goto error;
+ return buf;
+error:
+ ERROR("unable to append \"-glue\" to last component of"
+ " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
+ " please re-configure DPDK");
+ return NULL;
+}
+
+/**
+ * Initialization routine for run-time dependency on rdma-core.
+ */
+static int
+mlx4_glue_init(void)
+{
+ char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
+ const char *path[] = {
+ /*
+ * A basic security check is necessary before trusting
+ * MLX4_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
+ */
+ (geteuid() == getuid() && getegid() == getgid() ?
+ getenv("MLX4_GLUE_PATH") : NULL),
+ /*
+ * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
+ * variant, otherwise let dlopen() look up libraries on its
+ * own.
+ */
+ (*RTE_EAL_PMD_PATH ?
+ mlx4_glue_path(glue_path, sizeof(glue_path)) : ""),
+ };
+ unsigned int i = 0;
+ void *handle = NULL;
+ void **sym;
+ const char *dlmsg;
+
+ while (!handle && i != RTE_DIM(path)) {
+ const char *end;
+ size_t len;
+ int ret;
+
+ if (!path[i]) {
+ ++i;
+ continue;
+ }
+ end = strpbrk(path[i], ":;");
+ if (!end)
+ end = path[i] + strlen(path[i]);
+ len = end - path[i];
+ ret = 0;
+ do {
+ char name[ret + 1];
+
+ ret = snprintf(name, sizeof(name), "%.*s%s" MLX4_GLUE,
+ (int)len, path[i],
+ (!len || *(end - 1) == '/') ? "" : "/");
+ if (ret == -1)
+ break;
+ if (sizeof(name) != (size_t)ret + 1)
+ continue;
+ DEBUG("looking for rdma-core glue as \"%s\"", name);
+ handle = dlopen(name, RTLD_LAZY);
+ break;
+ } while (1);
+ path[i] = end + 1;
+ if (!*end)
+ ++i;
+ }
+ if (!handle) {
+ rte_errno = EINVAL;
+ dlmsg = dlerror();
+ if (dlmsg)
+ WARN("cannot load glue library: %s", dlmsg);
+ goto glue_error;
+ }
+ sym = dlsym(handle, "mlx4_glue");
+ if (!sym || !*sym) {
+ rte_errno = EINVAL;
+ dlmsg = dlerror();
+ if (dlmsg)
+ ERROR("cannot resolve glue symbol: %s", dlmsg);
+ goto glue_error;
+ }
+ mlx4_glue = *sym;
+ return 0;
+glue_error:
+ if (handle)
+ dlclose(handle);
+ WARN("cannot initialize PMD due to missing run-time"
+ " dependency on rdma-core libraries (libibverbs,"
+ " libmlx4)");
+ return -rte_errno;
+}
+
+#endif
+
+/**
+ * Driver initialization routine.
+ */
+RTE_INIT(rte_mlx4_pmd_init)
+{
+ /*
+ * MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we
+ * want to get success errno value in case of calling them
+ * when the device was removed.
+ */
+ setenv("MLX4_DEVICE_FATAL_CLEANUP", "1", 1);
+ /*
+ * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
+ * huge pages. Calling ibv_fork_init() during init allows
+ * applications to use fork() safely for purposes other than
+ * using this PMD, which is not supported in forked processes.
+ */
+ setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
+#ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS
+ if (mlx4_glue_init())
+ return;
+ assert(mlx4_glue);
+#endif
+#ifndef NDEBUG
+ /* Glue structure must not contain any NULL pointers. */
+ {
+ unsigned int i;
+
+ for (i = 0; i != sizeof(*mlx4_glue) / sizeof(void *); ++i)
+ assert(((const void *const *)mlx4_glue)[i]);
+ }
+#endif
+ if (strcmp(mlx4_glue->version, MLX4_GLUE_VERSION)) {
+ ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required",
+ mlx4_glue->version, MLX4_GLUE_VERSION);
+ return;
+ }
+ mlx4_glue->fork_init();
+ rte_pci_register(&mlx4_driver);
+ rte_mem_event_callback_register("MLX4_MEM_EVENT_CB",
+ mlx4_mr_mem_event_cb, NULL);
+}
+
+RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
+RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_mlx4,
+ "* ib_uverbs & mlx4_en & mlx4_core & mlx4_ib");
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4.h
new file mode 100644
index 00000000..e6fb934f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2012 6WIND S.A.
+ * Copyright 2012 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX4_H_
+#define RTE_PMD_MLX4_H_
+
+#include <net/if.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+#include <rte_interrupts.h>
+#include <rte_mempool.h>
+#include <rte_rwlock.h>
+
+#include "mlx4_mr.h"
+
+#ifndef IBV_RX_HASH_INNER
+/** This is not necessarily defined by supported RDMA core versions. */
+#define IBV_RX_HASH_INNER (1ull << 31)
+#endif /* IBV_RX_HASH_INNER */
+
+/** Maximum number of simultaneous MAC addresses. This value is arbitrary. */
+#define MLX4_MAX_MAC_ADDRESSES 128
+
+/** Request send completion once in every 64 sends, might be less. */
+#define MLX4_PMD_TX_PER_COMP_REQ 64
+
+/** Maximum size for inline data. */
+#define MLX4_PMD_MAX_INLINE 0
+
+/** Fixed RSS hash key size in bytes. Cannot be modified. */
+#define MLX4_RSS_HASH_KEY_SIZE 40
+
+/** Interrupt alarm timeout value in microseconds. */
+#define MLX4_INTR_ALARM_TIMEOUT 100000
+
+/* Maximum packet headers size (L2+L3+L4) for TSO. */
+#define MLX4_MAX_TSO_HEADER 192
+
+/** Port parameter. */
+#define MLX4_PMD_PORT_KVARG "port"
+
+enum {
+ PCI_VENDOR_ID_MELLANOX = 0x15b3,
+};
+
+enum {
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3 = 0x1003,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3VF = 0x1004,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO = 0x1007,
+};
+
+/** Driver name reported to lower layers and used in log output. */
+#define MLX4_DRIVER_NAME "net_mlx4"
+
+struct mlx4_drop;
+struct mlx4_rss;
+struct rxq;
+struct txq;
+struct rte_flow;
+
+LIST_HEAD(mlx4_dev_list, priv);
+LIST_HEAD(mlx4_mr_list, mlx4_mr);
+
+/** Private data structure. */
+struct priv {
+ LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
+ struct rte_eth_dev *dev; /**< Ethernet device. */
+ struct ibv_context *ctx; /**< Verbs context. */
+ struct ibv_device_attr device_attr; /**< Device properties. */
+ struct ibv_pd *pd; /**< Protection Domain. */
+ /* Device properties. */
+ uint16_t mtu; /**< Configured MTU. */
+ uint8_t port; /**< Physical port number. */
+ uint32_t started:1; /**< Device started, flows enabled. */
+ uint32_t vf:1; /**< This is a VF device. */
+ uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */
+ uint32_t isolated:1; /**< Toggle isolated mode. */
+ uint32_t rss_init:1; /**< Common RSS context is initialized. */
+ uint32_t hw_csum:1; /**< Checksum offload is supported. */
+ uint32_t hw_csum_l2tun:1; /**< Checksum support for L2 tunnels. */
+ uint32_t hw_fcs_strip:1; /**< FCS stripping toggling is supported. */
+ uint32_t tso:1; /**< Transmit segmentation offload is supported. */
+ uint32_t tso_max_payload_sz; /**< Max supported TSO payload size. */
+ uint32_t hw_rss_max_qps; /**< Max Rx Queues supported by RSS. */
+ uint64_t hw_rss_sup; /**< Supported RSS hash fields (Verbs format). */
+ struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
+ struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */
+ struct {
+ uint32_t dev_gen; /* Generation number to flush local caches. */
+ rte_rwlock_t rwlock; /* MR Lock. */
+ struct mlx4_mr_btree cache; /* Global MR cache table. */
+ struct mlx4_mr_list mr_list; /* Registered MR list. */
+ struct mlx4_mr_list mr_free_list; /* Freed MR list. */
+ } mr;
+ LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */
+ LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
+ struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
+ /**< Configured MAC addresses. Unused entries are zeroed. */
+};
+
+/* mlx4_ethdev.c */
+
+int mlx4_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]);
+int mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]);
+int mlx4_mtu_get(struct priv *priv, uint16_t *mtu);
+int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+int mlx4_dev_set_link_down(struct rte_eth_dev *dev);
+int mlx4_dev_set_link_up(struct rte_eth_dev *dev);
+void mlx4_promiscuous_enable(struct rte_eth_dev *dev);
+void mlx4_promiscuous_disable(struct rte_eth_dev *dev);
+void mlx4_allmulticast_enable(struct rte_eth_dev *dev);
+void mlx4_allmulticast_disable(struct rte_eth_dev *dev);
+void mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
+int mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq);
+int mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr);
+int mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
+int mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+void mlx4_stats_reset(struct rte_eth_dev *dev);
+void mlx4_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *info);
+int mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+int mlx4_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+int mlx4_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int mlx4_is_removed(struct rte_eth_dev *dev);
+
+/* mlx4_intr.c */
+
+int mlx4_intr_uninstall(struct priv *priv);
+int mlx4_intr_install(struct priv *priv);
+int mlx4_rxq_intr_enable(struct priv *priv);
+void mlx4_rxq_intr_disable(struct priv *priv);
+int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
+int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
+
+#endif /* RTE_PMD_MLX4_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_ethdev.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_ethdev.c
new file mode 100644
index 00000000..30deb3ef
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_ethdev.c
@@ -0,0 +1,883 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+/**
+ * @file
+ * Miscellaneous control operations for mlx4 driver.
+ */
+
+#include <assert.h>
+#include <dirent.h>
+#include <errno.h>
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <netinet/ip.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_bus_pci.h>
+#include <rte_errno.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+
+#include "mlx4.h"
+#include "mlx4_flow.h"
+#include "mlx4_glue.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+/**
+ * Get interface name from private structure.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param[out] ifname
+ * Interface name output buffer.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
+{
+ DIR *dir;
+ struct dirent *dent;
+ unsigned int dev_type = 0;
+ unsigned int dev_port_prev = ~0u;
+ char match[IF_NAMESIZE] = "";
+
+ {
+ MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path);
+
+ dir = opendir(path);
+ if (dir == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ }
+ while ((dent = readdir(dir)) != NULL) {
+ char *name = dent->d_name;
+ FILE *file;
+ unsigned int dev_port;
+ int r;
+
+ if ((name[0] == '.') &&
+ ((name[1] == '\0') ||
+ ((name[1] == '.') && (name[2] == '\0'))))
+ continue;
+
+ MKSTR(path, "%s/device/net/%s/%s",
+ priv->ctx->device->ibdev_path, name,
+ (dev_type ? "dev_id" : "dev_port"));
+
+ file = fopen(path, "rb");
+ if (file == NULL) {
+ if (errno != ENOENT)
+ continue;
+ /*
+ * Switch to dev_id when dev_port does not exist as
+ * is the case with Linux kernel versions < 3.15.
+ */
+try_dev_id:
+ match[0] = '\0';
+ if (dev_type)
+ break;
+ dev_type = 1;
+ dev_port_prev = ~0u;
+ rewinddir(dir);
+ continue;
+ }
+ r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
+ fclose(file);
+ if (r != 1)
+ continue;
+ /*
+ * Switch to dev_id when dev_port returns the same value for
+ * all ports. May happen when using a MOFED release older than
+ * 3.0 with a Linux kernel >= 3.15.
+ */
+ if (dev_port == dev_port_prev)
+ goto try_dev_id;
+ dev_port_prev = dev_port;
+ if (dev_port == (priv->port - 1u))
+ strlcpy(match, name, sizeof(match));
+ }
+ closedir(dir);
+ if (match[0] == '\0') {
+ rte_errno = ENODEV;
+ return -rte_errno;
+ }
+ strncpy(*ifname, match, sizeof(*ifname));
+ return 0;
+}
+
+/**
+ * Perform ifreq ioctl() on associated Ethernet device.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param req
+ * Request number to pass to ioctl().
+ * @param[out] ifr
+ * Interface request structure output buffer.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
+{
+ int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
+ int ret;
+
+ if (sock == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ ret = mlx4_get_ifname(priv, &ifr->ifr_name);
+ if (!ret && ioctl(sock, req, ifr) == -1) {
+ rte_errno = errno;
+ ret = -rte_errno;
+ }
+ close(sock);
+ return ret;
+}
+
+/**
+ * Get MAC address by querying netdevice.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param[out] mac
+ * MAC address output buffer.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
+{
+ struct ifreq request;
+ int ret = mlx4_ifreq(priv, SIOCGIFHWADDR, &request);
+
+ if (ret)
+ return ret;
+ memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+ return 0;
+}
+
+/**
+ * Get device MTU.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[out] mtu
+ * MTU value output buffer.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_mtu_get(struct priv *priv, uint16_t *mtu)
+{
+ struct ifreq request;
+ int ret = mlx4_ifreq(priv, SIOCGIFMTU, &request);
+
+ if (ret)
+ return ret;
+ *mtu = request.ifr_mtu;
+ return 0;
+}
+
+/**
+ * DPDK callback to change the MTU.
+ *
+ * @param priv
+ * Pointer to Ethernet device structure.
+ * @param mtu
+ * MTU value to set.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct ifreq request = { .ifr_mtu = mtu, };
+ int ret = mlx4_ifreq(priv, SIOCSIFMTU, &request);
+
+ if (ret)
+ return ret;
+ priv->mtu = mtu;
+ return 0;
+}
+
+/**
+ * Set device flags.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param keep
+ * Bitmask for flags that must remain untouched.
+ * @param flags
+ * Bitmask for flags to modify.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
+{
+ struct ifreq request;
+ int ret = mlx4_ifreq(priv, SIOCGIFFLAGS, &request);
+
+ if (ret)
+ return ret;
+ request.ifr_flags &= keep;
+ request.ifr_flags |= flags & ~keep;
+ return mlx4_ifreq(priv, SIOCSIFFLAGS, &request);
+}
+
+/**
+ * Change the link state (UP / DOWN).
+ *
+ * @param priv
+ * Pointer to Ethernet device private data.
+ * @param up
+ * Nonzero for link up, otherwise link down.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_dev_set_link(struct priv *priv, int up)
+{
+ int err;
+
+ if (up) {
+ err = mlx4_set_flags(priv, ~IFF_UP, IFF_UP);
+ if (err)
+ return err;
+ } else {
+ err = mlx4_set_flags(priv, ~IFF_UP, ~IFF_UP);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/**
+ * DPDK callback to bring the link DOWN.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ return mlx4_dev_set_link(priv, 0);
+}
+
+/**
+ * DPDK callback to bring the link UP.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ return mlx4_dev_set_link(priv, 1);
+}
+
+/**
+ * Supported Rx mode toggles.
+ *
+ * Even and odd values respectively stand for off and on.
+ */
+enum rxmode_toggle {
+ RXMODE_TOGGLE_PROMISC_OFF,
+ RXMODE_TOGGLE_PROMISC_ON,
+ RXMODE_TOGGLE_ALLMULTI_OFF,
+ RXMODE_TOGGLE_ALLMULTI_ON,
+};
+
+/**
+ * Helper function to toggle promiscuous and all multicast modes.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param toggle
+ * Toggle to set.
+ */
+static void
+mlx4_rxmode_toggle(struct rte_eth_dev *dev, enum rxmode_toggle toggle)
+{
+ struct priv *priv = dev->data->dev_private;
+ const char *mode;
+ struct rte_flow_error error;
+
+ switch (toggle) {
+ case RXMODE_TOGGLE_PROMISC_OFF:
+ case RXMODE_TOGGLE_PROMISC_ON:
+ mode = "promiscuous";
+ dev->data->promiscuous = toggle & 1;
+ break;
+ case RXMODE_TOGGLE_ALLMULTI_OFF:
+ case RXMODE_TOGGLE_ALLMULTI_ON:
+ mode = "all multicast";
+ dev->data->all_multicast = toggle & 1;
+ break;
+ }
+ if (!mlx4_flow_sync(priv, &error))
+ return;
+ ERROR("cannot toggle %s mode (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ mode, rte_errno, strerror(rte_errno), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+}
+
+/**
+ * DPDK callback to enable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx4_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ mlx4_rxmode_toggle(dev, RXMODE_TOGGLE_PROMISC_ON);
+}
+
+/**
+ * DPDK callback to disable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx4_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ mlx4_rxmode_toggle(dev, RXMODE_TOGGLE_PROMISC_OFF);
+}
+
+/**
+ * DPDK callback to enable all multicast mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx4_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ mlx4_rxmode_toggle(dev, RXMODE_TOGGLE_ALLMULTI_ON);
+}
+
+/**
+ * DPDK callback to disable all multicast mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx4_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ mlx4_rxmode_toggle(dev, RXMODE_TOGGLE_ALLMULTI_OFF);
+}
+
+/**
+ * DPDK callback to remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param index
+ * MAC address index.
+ */
+void
+mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
+
+ if (index >= RTE_DIM(priv->mac)) {
+ rte_errno = EINVAL;
+ return;
+ }
+ memset(&priv->mac[index], 0, sizeof(priv->mac[index]));
+ if (!mlx4_flow_sync(priv, &error))
+ return;
+ ERROR("failed to synchronize flow rules after removing MAC address"
+ " at index %d (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ index, rte_errno, strerror(rte_errno), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+}
+
+/**
+ * DPDK callback to add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ * @param vmdq
+ * VMDq pool index to associate address with (ignored).
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
+ int ret;
+
+ (void)vmdq;
+ if (index >= RTE_DIM(priv->mac)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ memcpy(&priv->mac[index], mac_addr, sizeof(priv->mac[index]));
+ ret = mlx4_flow_sync(priv, &error);
+ if (!ret)
+ return 0;
+ ERROR("failed to synchronize flow rules after adding MAC address"
+ " at index %d (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ index, rte_errno, strerror(rte_errno), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+ return ret;
+}
+
+/**
+ * DPDK callback to configure a VLAN filter.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param vlan_id
+ * VLAN ID to filter.
+ * @param on
+ * Toggle filter.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
+ unsigned int vidx = vlan_id / 64;
+ unsigned int vbit = vlan_id % 64;
+ uint64_t *v;
+ int ret;
+
+ if (vidx >= RTE_DIM(dev->data->vlan_filter_conf.ids)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ v = &dev->data->vlan_filter_conf.ids[vidx];
+ *v &= ~(UINT64_C(1) << vbit);
+ *v |= (uint64_t)!!on << vbit;
+ ret = mlx4_flow_sync(priv, &error);
+ if (!ret)
+ return 0;
+ ERROR("failed to synchronize flow rules after %s VLAN filter on ID %u"
+ " (code %d, \"%s\"), "
+ " flow error type %d, cause %p, message: %s",
+ on ? "enabling" : "disabling", vlan_id,
+ rte_errno, strerror(rte_errno), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+ return ret;
+}
+
+/**
+ * DPDK callback to set the primary MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ return mlx4_mac_addr_add(dev, mac_addr, 0, 0);
+}
+
+/**
+ * DPDK callback to get information about the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] info
+ * Info structure output buffer.
+ */
+void
+mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int max;
+ char ifname[IF_NAMESIZE];
+
+ /* FIXME: we should ask the device for these values. */
+ info->min_rx_bufsize = 32;
+ info->max_rx_pktlen = 65536;
+ /*
+ * Since we need one CQ per QP, the limit is the minimum number
+ * between the two values.
+ */
+ max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ?
+ priv->device_attr.max_qp : priv->device_attr.max_cq);
+ /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
+ if (max >= 65535)
+ max = 65535;
+ info->max_rx_queues = max;
+ info->max_tx_queues = max;
+ info->max_mac_addrs = RTE_DIM(priv->mac);
+ info->tx_offload_capa = mlx4_get_tx_port_offloads(priv);
+ info->rx_queue_offload_capa = mlx4_get_rx_queue_offloads(priv);
+ info->rx_offload_capa = (mlx4_get_rx_port_offloads(priv) |
+ info->rx_queue_offload_capa);
+ if (mlx4_get_ifname(priv, &ifname) == 0)
+ info->if_index = if_nametoindex(ifname);
+ info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
+ info->speed_capa =
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_20G |
+ ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_56G;
+ info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1);
+}
+
+/**
+ * DPDK callback to get device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] stats
+ * Stats structure output buffer.
+ */
+int
+mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct rte_eth_stats tmp;
+ unsigned int i;
+ unsigned int idx;
+
+ memset(&tmp, 0, sizeof(tmp));
+ /* Add software counters. */
+ for (i = 0; i != dev->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = dev->data->rx_queues[i];
+
+ if (rxq == NULL)
+ continue;
+ idx = rxq->stats.idx;
+ if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ tmp.q_ipackets[idx] += rxq->stats.ipackets;
+ tmp.q_ibytes[idx] += rxq->stats.ibytes;
+ tmp.q_errors[idx] += (rxq->stats.idropped +
+ rxq->stats.rx_nombuf);
+ }
+ tmp.ipackets += rxq->stats.ipackets;
+ tmp.ibytes += rxq->stats.ibytes;
+ tmp.ierrors += rxq->stats.idropped;
+ tmp.rx_nombuf += rxq->stats.rx_nombuf;
+ }
+ for (i = 0; i != dev->data->nb_tx_queues; ++i) {
+ struct txq *txq = dev->data->tx_queues[i];
+
+ if (txq == NULL)
+ continue;
+ idx = txq->stats.idx;
+ if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ tmp.q_opackets[idx] += txq->stats.opackets;
+ tmp.q_obytes[idx] += txq->stats.obytes;
+ tmp.q_errors[idx] += txq->stats.odropped;
+ }
+ tmp.opackets += txq->stats.opackets;
+ tmp.obytes += txq->stats.obytes;
+ tmp.oerrors += txq->stats.odropped;
+ }
+ *stats = tmp;
+ return 0;
+}
+
+/**
+ * DPDK callback to clear device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx4_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i != dev->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = dev->data->rx_queues[i];
+
+ if (rxq)
+ rxq->stats = (struct mlx4_rxq_stats){
+ .idx = rxq->stats.idx,
+ };
+ }
+ for (i = 0; i != dev->data->nb_tx_queues; ++i) {
+ struct txq *txq = dev->data->tx_queues[i];
+
+ if (txq)
+ txq->stats = (struct mlx4_txq_stats){
+ .idx = txq->stats.idx,
+ };
+ }
+}
+
+/**
+ * DPDK callback to retrieve physical link information.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param wait_to_complete
+ * Wait for request completion (ignored).
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ const struct priv *priv = dev->data->dev_private;
+ struct ethtool_cmd edata = {
+ .cmd = ETHTOOL_GSET,
+ };
+ struct ifreq ifr;
+ struct rte_eth_link dev_link;
+ int link_speed = 0;
+
+ if (priv == NULL) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ (void)wait_to_complete;
+ if (mlx4_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
+ WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno));
+ return -rte_errno;
+ }
+ memset(&dev_link, 0, sizeof(dev_link));
+ dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
+ (ifr.ifr_flags & IFF_RUNNING));
+ ifr.ifr_data = (void *)&edata;
+ if (mlx4_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
+ strerror(rte_errno));
+ return -rte_errno;
+ }
+ link_speed = ethtool_cmd_speed(&edata);
+ if (link_speed == -1)
+ dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ else
+ dev_link.link_speed = link_speed;
+ dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
+ ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+ dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ dev->data->dev_link = dev_link;
+ return 0;
+}
+
+/**
+ * DPDK callback to get flow control status.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] fc_conf
+ * Flow control output buffer.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct ifreq ifr;
+ struct ethtool_pauseparam ethpause = {
+ .cmd = ETHTOOL_GPAUSEPARAM,
+ };
+ int ret;
+
+ ifr.ifr_data = (void *)&ethpause;
+ if (mlx4_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ ret = rte_errno;
+ WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
+ " failed: %s",
+ strerror(rte_errno));
+ goto out;
+ }
+ fc_conf->autoneg = ethpause.autoneg;
+ if (ethpause.rx_pause && ethpause.tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (ethpause.rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (ethpause.tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+ ret = 0;
+out:
+ assert(ret >= 0);
+ return -ret;
+}
+
+/**
+ * DPDK callback to modify flow control parameters.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] fc_conf
+ * Flow control parameters.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct ifreq ifr;
+ struct ethtool_pauseparam ethpause = {
+ .cmd = ETHTOOL_SPAUSEPARAM,
+ };
+ int ret;
+
+ ifr.ifr_data = (void *)&ethpause;
+ ethpause.autoneg = fc_conf->autoneg;
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_RX_PAUSE))
+ ethpause.rx_pause = 1;
+ else
+ ethpause.rx_pause = 0;
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_TX_PAUSE))
+ ethpause.tx_pause = 1;
+ else
+ ethpause.tx_pause = 0;
+ if (mlx4_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ ret = rte_errno;
+ WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
+ " failed: %s",
+ strerror(rte_errno));
+ goto out;
+ }
+ ret = 0;
+out:
+ assert(ret >= 0);
+ return -ret;
+}
+
+/**
+ * DPDK callback to retrieve the received packet types that are recognized
+ * by the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * Pointer to an array of recognized packet types if in Rx burst mode,
+ * NULL otherwise.
+ */
+const uint32_t *
+mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to rxq_cq_to_pkt_type() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+ static const uint32_t ptypes_l2tun[] = {
+ /* refers to rxq_cq_to_pkt_type() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_UNKNOWN
+ };
+ struct priv *priv = dev->data->dev_private;
+
+ if (dev->rx_pkt_burst == mlx4_rx_burst) {
+ if (priv->hw_csum_l2tun)
+ return ptypes_l2tun;
+ else
+ return ptypes;
+ }
+ return NULL;
+}
+
+/**
+ * Check if mlx4 device was removed.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 1 when device is removed, otherwise 0.
+ */
+int
+mlx4_is_removed(struct rte_eth_dev *dev)
+{
+ struct ibv_device_attr device_attr;
+ struct priv *priv = dev->data->dev_private;
+
+ if (mlx4_glue->query_device(priv->ctx, &device_attr) == EIO)
+ return 1;
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.c
new file mode 100644
index 00000000..b40e7e5c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.c
@@ -0,0 +1,1617 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+/**
+ * @file
+ * Flow API operations for mlx4 driver.
+ */
+
+#include <arpa/inet.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdalign.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/queue.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_byteorder.h>
+#include <rte_errno.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+
+/* PMD headers. */
+#include "mlx4.h"
+#include "mlx4_glue.h"
+#include "mlx4_flow.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+/** Static initializer for a list of subsequent item types. */
+#define NEXT_ITEM(...) \
+ (const enum rte_flow_item_type []){ \
+ __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
+ }
+
+/** Processor structure associated with a flow item. */
+struct mlx4_flow_proc_item {
+ /** Bit-mask for fields supported by this PMD. */
+ const void *mask_support;
+ /** Bit-mask to use when @p item->mask is not provided. */
+ const void *mask_default;
+ /** Size in bytes for @p mask_support and @p mask_default. */
+ const unsigned int mask_sz;
+ /** Merge a pattern item into a flow rule handle. */
+ int (*merge)(struct rte_flow *flow,
+ const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error);
+ /** Size in bytes of the destination structure. */
+ const unsigned int dst_sz;
+ /** List of possible subsequent items. */
+ const enum rte_flow_item_type *const next_item;
+};
+
+/** Shared resources for drop flow rules. */
+struct mlx4_drop {
+ struct ibv_qp *qp; /**< QP target. */
+ struct ibv_cq *cq; /**< CQ associated with above QP. */
+ struct priv *priv; /**< Back pointer to private data. */
+ uint32_t refcnt; /**< Reference count. */
+};
+
+/**
+ * Convert supported RSS hash field types between DPDK and Verbs formats.
+ *
+ * This function returns the supported (default) set when @p types has
+ * special value 0.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param types
+ * Depending on @p verbs_to_dpdk, hash types in either DPDK (see struct
+ * rte_eth_rss_conf) or Verbs format.
+ * @param verbs_to_dpdk
+ * A zero value converts @p types from DPDK to Verbs, a nonzero value
+ * performs the reverse operation.
+ *
+ * @return
+ * Converted RSS hash fields on success, (uint64_t)-1 otherwise and
+ * rte_errno is set.
+ */
+uint64_t
+mlx4_conv_rss_types(struct priv *priv, uint64_t types, int verbs_to_dpdk)
+{
+ enum {
+ INNER,
+ IPV4, IPV4_1, IPV4_2, IPV6, IPV6_1, IPV6_2, IPV6_3,
+ TCP, UDP,
+ IPV4_TCP, IPV4_UDP, IPV6_TCP, IPV6_TCP_1, IPV6_UDP, IPV6_UDP_1,
+ };
+ enum {
+ VERBS_IPV4 = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
+ VERBS_IPV6 = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
+ VERBS_TCP = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
+ VERBS_UDP = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
+ };
+ static const uint64_t dpdk[] = {
+ [INNER] = 0,
+ [IPV4] = ETH_RSS_IPV4,
+ [IPV4_1] = ETH_RSS_FRAG_IPV4,
+ [IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER,
+ [IPV6] = ETH_RSS_IPV6,
+ [IPV6_1] = ETH_RSS_FRAG_IPV6,
+ [IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER,
+ [IPV6_3] = ETH_RSS_IPV6_EX,
+ [TCP] = 0,
+ [UDP] = 0,
+ [IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP,
+ [IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP,
+ [IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP,
+ [IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX,
+ [IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP,
+ [IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX,
+ };
+ static const uint64_t verbs[RTE_DIM(dpdk)] = {
+ [INNER] = IBV_RX_HASH_INNER,
+ [IPV4] = VERBS_IPV4,
+ [IPV4_1] = VERBS_IPV4,
+ [IPV4_2] = VERBS_IPV4,
+ [IPV6] = VERBS_IPV6,
+ [IPV6_1] = VERBS_IPV6,
+ [IPV6_2] = VERBS_IPV6,
+ [IPV6_3] = VERBS_IPV6,
+ [TCP] = VERBS_TCP,
+ [UDP] = VERBS_UDP,
+ [IPV4_TCP] = VERBS_IPV4 | VERBS_TCP,
+ [IPV4_UDP] = VERBS_IPV4 | VERBS_UDP,
+ [IPV6_TCP] = VERBS_IPV6 | VERBS_TCP,
+ [IPV6_TCP_1] = VERBS_IPV6 | VERBS_TCP,
+ [IPV6_UDP] = VERBS_IPV6 | VERBS_UDP,
+ [IPV6_UDP_1] = VERBS_IPV6 | VERBS_UDP,
+ };
+ const uint64_t *in = verbs_to_dpdk ? verbs : dpdk;
+ const uint64_t *out = verbs_to_dpdk ? dpdk : verbs;
+ uint64_t seen = 0;
+ uint64_t conv = 0;
+ unsigned int i;
+
+ if (!types) {
+ if (!verbs_to_dpdk)
+ return priv->hw_rss_sup;
+ types = priv->hw_rss_sup;
+ }
+ for (i = 0; i != RTE_DIM(dpdk); ++i)
+ if (in[i] && (types & in[i]) == in[i]) {
+ seen |= types & in[i];
+ conv |= out[i];
+ }
+ if ((verbs_to_dpdk || (conv & priv->hw_rss_sup) == conv) &&
+ !(types & ~seen))
+ return conv;
+ rte_errno = ENOTSUP;
+ return (uint64_t)-1;
+}
+
+/**
+ * Merge Ethernet pattern item into flow rule handle.
+ *
+ * Additional mlx4-specific constraints on supported fields:
+ *
+ * - No support for partial masks, except in the specific case of matching
+ * all multicast traffic (@p spec->dst and @p mask->dst equal to
+ * 01:00:00:00:00:00).
+ * - Not providing @p item->spec or providing an empty @p mask->dst is
+ * *only* supported if the rule doesn't specify additional matching
+ * criteria (i.e. rule is promiscuous-like).
+ *
+ * @param[in, out] flow
+ * Flow rule handle to update.
+ * @param[in] item
+ * Pattern item to merge.
+ * @param[in] proc
+ * Associated item-processing object.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_flow_merge_eth(struct rte_flow *flow,
+ const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask =
+ spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
+ struct ibv_flow_spec_eth *eth;
+ const char *msg;
+ unsigned int i;
+
+ if (!mask) {
+ flow->promisc = 1;
+ } else {
+ uint32_t sum_dst = 0;
+ uint32_t sum_src = 0;
+
+ for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) {
+ sum_dst += mask->dst.addr_bytes[i];
+ sum_src += mask->src.addr_bytes[i];
+ }
+ if (sum_src) {
+ msg = "mlx4 does not support source MAC matching";
+ goto error;
+ } else if (!sum_dst) {
+ flow->promisc = 1;
+ } else if (sum_dst == 1 && mask->dst.addr_bytes[0] == 1) {
+ if (!(spec->dst.addr_bytes[0] & 1)) {
+ msg = "mlx4 does not support the explicit"
+ " exclusion of all multicast traffic";
+ goto error;
+ }
+ flow->allmulti = 1;
+ } else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {
+ msg = "mlx4 does not support matching partial"
+ " Ethernet fields";
+ goto error;
+ }
+ }
+ if (!flow->ibv_attr)
+ return 0;
+ if (flow->promisc) {
+ flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
+ return 0;
+ }
+ if (flow->allmulti) {
+ flow->ibv_attr->type = IBV_FLOW_ATTR_MC_DEFAULT;
+ return 0;
+ }
+ ++flow->ibv_attr->num_of_specs;
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
+ *eth = (struct ibv_flow_spec_eth) {
+ .type = IBV_FLOW_SPEC_ETH,
+ .size = sizeof(*eth),
+ };
+ memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
+ }
+ return 0;
+error:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg);
+}
+
+/**
+ * Merge VLAN pattern item into flow rule handle.
+ *
+ * Additional mlx4-specific constraints on supported fields:
+ *
+ * - Matching *all* VLAN traffic by omitting @p item->spec or providing an
+ * empty @p item->mask would also include non-VLAN traffic. Doing so is
+ * therefore unsupported.
+ * - No support for partial masks.
+ *
+ * @param[in, out] flow
+ * Flow rule handle to update.
+ * @param[in] item
+ * Pattern item to merge.
+ * @param[in] proc
+ * Associated item-processing object.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_flow_merge_vlan(struct rte_flow *flow,
+ const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask =
+ spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
+ struct ibv_flow_spec_eth *eth;
+ const char *msg;
+
+ if (!mask || !mask->tci) {
+ msg = "mlx4 cannot match all VLAN traffic while excluding"
+ " non-VLAN traffic, TCI VID must be specified";
+ goto error;
+ }
+ if (mask->tci != RTE_BE16(0x0fff)) {
+ msg = "mlx4 does not support partial TCI VID matching";
+ goto error;
+ }
+ if (!flow->ibv_attr)
+ return 0;
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
+ sizeof(*eth));
+ eth->val.vlan_tag = spec->tci;
+ eth->mask.vlan_tag = mask->tci;
+ eth->val.vlan_tag &= eth->mask.vlan_tag;
+ return 0;
+error:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg);
+}
+
+/**
+ * Merge IPv4 pattern item into flow rule handle.
+ *
+ * Additional mlx4-specific constraints on supported fields:
+ *
+ * - No support for partial masks.
+ *
+ * @param[in, out] flow
+ * Flow rule handle to update.
+ * @param[in] item
+ * Pattern item to merge.
+ * @param[in] proc
+ * Associated item-processing object.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_flow_merge_ipv4(struct rte_flow *flow,
+ const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask =
+ spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
+ struct ibv_flow_spec_ipv4 *ipv4;
+ const char *msg;
+
+ if (mask &&
+ ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) ||
+ (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) {
+ msg = "mlx4 does not support matching partial IPv4 fields";
+ goto error;
+ }
+ if (!flow->ibv_attr)
+ return 0;
+ ++flow->ibv_attr->num_of_specs;
+ ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
+ *ipv4 = (struct ibv_flow_spec_ipv4) {
+ .type = IBV_FLOW_SPEC_IPV4,
+ .size = sizeof(*ipv4),
+ };
+ if (!spec)
+ return 0;
+ ipv4->val = (struct ibv_flow_ipv4_filter) {
+ .src_ip = spec->hdr.src_addr,
+ .dst_ip = spec->hdr.dst_addr,
+ };
+ ipv4->mask = (struct ibv_flow_ipv4_filter) {
+ .src_ip = mask->hdr.src_addr,
+ .dst_ip = mask->hdr.dst_addr,
+ };
+ /* Remove unwanted bits from values. */
+ ipv4->val.src_ip &= ipv4->mask.src_ip;
+ ipv4->val.dst_ip &= ipv4->mask.dst_ip;
+ return 0;
+error:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg);
+}
+
+/**
+ * Merge UDP pattern item into flow rule handle.
+ *
+ * Additional mlx4-specific constraints on supported fields:
+ *
+ * - No support for partial masks.
+ * - Due to HW/FW limitation, flow rule priority is not taken into account
+ * when matching UDP destination ports, doing is therefore only supported
+ * at the highest priority level (0).
+ *
+ * @param[in, out] flow
+ * Flow rule handle to update.
+ * @param[in] item
+ * Pattern item to merge.
+ * @param[in] proc
+ * Associated item-processing object.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_flow_merge_udp(struct rte_flow *flow,
+ const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask =
+ spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
+ struct ibv_flow_spec_tcp_udp *udp;
+ const char *msg;
+
+ if (mask &&
+ ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
+ (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
+ msg = "mlx4 does not support matching partial UDP fields";
+ goto error;
+ }
+ if (mask && mask->hdr.dst_port && flow->priority) {
+ msg = "combining UDP destination port matching with a nonzero"
+ " priority level is not supported";
+ goto error;
+ }
+ if (!flow->ibv_attr)
+ return 0;
+ ++flow->ibv_attr->num_of_specs;
+ udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
+ *udp = (struct ibv_flow_spec_tcp_udp) {
+ .type = IBV_FLOW_SPEC_UDP,
+ .size = sizeof(*udp),
+ };
+ if (!spec)
+ return 0;
+ udp->val.dst_port = spec->hdr.dst_port;
+ udp->val.src_port = spec->hdr.src_port;
+ udp->mask.dst_port = mask->hdr.dst_port;
+ udp->mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ udp->val.src_port &= udp->mask.src_port;
+ udp->val.dst_port &= udp->mask.dst_port;
+ return 0;
+error:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg);
+}
+
+/**
+ * Merge TCP pattern item into flow rule handle.
+ *
+ * Additional mlx4-specific constraints on supported fields:
+ *
+ * - No support for partial masks.
+ *
+ * @param[in, out] flow
+ * Flow rule handle to update.
+ * @param[in] item
+ * Pattern item to merge.
+ * @param[in] proc
+ * Associated item-processing object.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_flow_merge_tcp(struct rte_flow *flow,
+ const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask =
+ spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
+ struct ibv_flow_spec_tcp_udp *tcp;
+ const char *msg;
+
+ if (mask &&
+ ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
+ (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
+ msg = "mlx4 does not support matching partial TCP fields";
+ goto error;
+ }
+ if (!flow->ibv_attr)
+ return 0;
+ ++flow->ibv_attr->num_of_specs;
+ tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
+ *tcp = (struct ibv_flow_spec_tcp_udp) {
+ .type = IBV_FLOW_SPEC_TCP,
+ .size = sizeof(*tcp),
+ };
+ if (!spec)
+ return 0;
+ tcp->val.dst_port = spec->hdr.dst_port;
+ tcp->val.src_port = spec->hdr.src_port;
+ tcp->mask.dst_port = mask->hdr.dst_port;
+ tcp->mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ tcp->val.src_port &= tcp->mask.src_port;
+ tcp->val.dst_port &= tcp->mask.dst_port;
+ return 0;
+error:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg);
+}
+
+/**
+ * Perform basic sanity checks on a pattern item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] proc
+ * Associated item-processing object.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_flow_item_check(const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error)
+{
+ const uint8_t *mask;
+ unsigned int i;
+
+ /* item->last and item->mask cannot exist without item->spec. */
+ if (!item->spec && (item->mask || item->last))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "\"mask\" or \"last\" field provided without a"
+ " corresponding \"spec\"");
+ /* No spec, no mask, no problem. */
+ if (!item->spec)
+ return 0;
+ mask = item->mask ?
+ (const uint8_t *)item->mask :
+ (const uint8_t *)proc->mask_default;
+ assert(mask);
+ /*
+ * Single-pass check to make sure that:
+ * - Mask is supported, no bits are set outside proc->mask_support.
+ * - Both item->spec and item->last are included in mask.
+ */
+ for (i = 0; i != proc->mask_sz; ++i) {
+ if (!mask[i])
+ continue;
+ if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) !=
+ ((const uint8_t *)proc->mask_support)[i])
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "unsupported field found in \"mask\"");
+ if (item->last &&
+ (((const uint8_t *)item->spec)[i] & mask[i]) !=
+ (((const uint8_t *)item->last)[i] & mask[i]))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "range between \"spec\" and \"last\""
+ " is larger than \"mask\"");
+ }
+ return 0;
+}
+
+/** Graph of supported items and associated actions. */
+static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
+ [RTE_FLOW_ITEM_TYPE_END] = {
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
+ },
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4),
+ .mask_support = &(const struct rte_flow_item_eth){
+ /* Only destination MAC can be matched. */
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ .mask_default = &rte_flow_item_eth_mask,
+ .mask_sz = sizeof(struct rte_flow_item_eth),
+ .merge = mlx4_flow_merge_eth,
+ .dst_sz = sizeof(struct ibv_flow_spec_eth),
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
+ .mask_support = &(const struct rte_flow_item_vlan){
+ /* Only TCI VID matching is supported. */
+ .tci = RTE_BE16(0x0fff),
+ },
+ .mask_default = &rte_flow_item_vlan_mask,
+ .mask_sz = sizeof(struct rte_flow_item_vlan),
+ .merge = mlx4_flow_merge_vlan,
+ .dst_sz = 0,
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .mask_support = &(const struct rte_flow_item_ipv4){
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ },
+ },
+ .mask_default = &rte_flow_item_ipv4_mask,
+ .mask_sz = sizeof(struct rte_flow_item_ipv4),
+ .merge = mlx4_flow_merge_ipv4,
+ .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .mask_support = &(const struct rte_flow_item_udp){
+ .hdr = {
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
+ },
+ },
+ .mask_default = &rte_flow_item_udp_mask,
+ .mask_sz = sizeof(struct rte_flow_item_udp),
+ .merge = mlx4_flow_merge_udp,
+ .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .mask_support = &(const struct rte_flow_item_tcp){
+ .hdr = {
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
+ },
+ },
+ .mask_default = &rte_flow_item_tcp_mask,
+ .mask_sz = sizeof(struct rte_flow_item_tcp),
+ .merge = mlx4_flow_merge_tcp,
+ .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
+ },
+};
+
+/**
+ * Make sure a flow rule is supported and initialize associated structure.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] addr
+ * Buffer where the resulting flow rule handle pointer must be stored.
+ * If NULL, stop processing after validation stage.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_flow_prepare(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow **addr)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *action;
+ const struct mlx4_flow_proc_item *proc;
+ struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
+ struct rte_flow *flow = &temp;
+ const char *msg = NULL;
+ int overlap;
+
+ if (attr->group)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL, "groups are not supported");
+ if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "maximum priority level is "
+ MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
+ if (attr->egress)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL, "egress is not supported");
+ if (attr->transfer)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL, "transfer is not supported");
+ if (!attr->ingress)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL, "only ingress is supported");
+fill:
+ overlap = 0;
+ proc = mlx4_flow_proc_item_list;
+ flow->priority = attr->priority;
+ /* Go over pattern. */
+ for (item = pattern; item->type; ++item) {
+ const struct mlx4_flow_proc_item *next = NULL;
+ unsigned int i;
+ int err;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) {
+ flow->internal = 1;
+ continue;
+ }
+ if (flow->promisc || flow->allmulti) {
+ msg = "mlx4 does not support additional matching"
+ " criteria combined with indiscriminate"
+ " matching on Ethernet headers";
+ goto exit_item_not_supported;
+ }
+ for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
+ if (proc->next_item[i] == item->type) {
+ next = &mlx4_flow_proc_item_list[item->type];
+ break;
+ }
+ }
+ if (!next)
+ goto exit_item_not_supported;
+ proc = next;
+ /*
+ * Perform basic sanity checks only once, while handle is
+ * not allocated.
+ */
+ if (flow == &temp) {
+ err = mlx4_flow_item_check(item, proc, error);
+ if (err)
+ return err;
+ }
+ if (proc->merge) {
+ err = proc->merge(flow, item, proc, error);
+ if (err)
+ return err;
+ }
+ flow->ibv_attr_size += proc->dst_sz;
+ }
+ /* Go over actions list. */
+ for (action = actions; action->type; ++action) {
+ /* This one may appear anywhere multiple times. */
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+ /* Fate-deciding actions may appear exactly once. */
+ if (overlap) {
+ msg = "cannot combine several fate-deciding actions,"
+ " choose between DROP, QUEUE or RSS";
+ goto exit_action_not_supported;
+ }
+ overlap = 1;
+ switch (action->type) {
+ const struct rte_flow_action_queue *queue;
+ const struct rte_flow_action_rss *rss;
+ const uint8_t *rss_key;
+ uint32_t rss_key_len;
+ uint64_t fields;
+ unsigned int i;
+
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ flow->drop = 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ if (flow->rss)
+ break;
+ queue = action->conf;
+ if (queue->index >= priv->dev->data->nb_rx_queues) {
+ msg = "queue target index beyond number of"
+ " configured Rx queues";
+ goto exit_action_not_supported;
+ }
+ flow->rss = mlx4_rss_get
+ (priv, 0, mlx4_rss_hash_key_default, 1,
+ &queue->index);
+ if (!flow->rss) {
+ msg = "not enough resources for additional"
+ " single-queue RSS context";
+ goto exit_action_not_supported;
+ }
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ if (flow->rss)
+ break;
+ rss = action->conf;
+ /* Default RSS configuration if none is provided. */
+ if (rss->key_len) {
+ rss_key = rss->key;
+ rss_key_len = rss->key_len;
+ } else {
+ rss_key = mlx4_rss_hash_key_default;
+ rss_key_len = MLX4_RSS_HASH_KEY_SIZE;
+ }
+ /* Sanity checks. */
+ for (i = 0; i < rss->queue_num; ++i)
+ if (rss->queue[i] >=
+ priv->dev->data->nb_rx_queues)
+ break;
+ if (i != rss->queue_num) {
+ msg = "queue index target beyond number of"
+ " configured Rx queues";
+ goto exit_action_not_supported;
+ }
+ if (!rte_is_power_of_2(rss->queue_num)) {
+ msg = "for RSS, mlx4 requires the number of"
+ " queues to be a power of two";
+ goto exit_action_not_supported;
+ }
+ if (rss_key_len != sizeof(flow->rss->key)) {
+ msg = "mlx4 supports exactly one RSS hash key"
+ " length: "
+ MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
+ goto exit_action_not_supported;
+ }
+ for (i = 1; i < rss->queue_num; ++i)
+ if (rss->queue[i] - rss->queue[i - 1] != 1)
+ break;
+ if (i != rss->queue_num) {
+ msg = "mlx4 requires RSS contexts to use"
+ " consecutive queue indices only";
+ goto exit_action_not_supported;
+ }
+ if (rss->queue[0] % rss->queue_num) {
+ msg = "mlx4 requires the first queue of a RSS"
+ " context to be aligned on a multiple"
+ " of the context size";
+ goto exit_action_not_supported;
+ }
+ if (rss->func &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
+ msg = "the only supported RSS hash function"
+ " is Toeplitz";
+ goto exit_action_not_supported;
+ }
+ if (rss->level) {
+ msg = "a nonzero RSS encapsulation level is"
+ " not supported";
+ goto exit_action_not_supported;
+ }
+ rte_errno = 0;
+ fields = mlx4_conv_rss_types(priv, rss->types, 0);
+ if (fields == (uint64_t)-1 && rte_errno) {
+ msg = "unsupported RSS hash type requested";
+ goto exit_action_not_supported;
+ }
+ flow->rss = mlx4_rss_get
+ (priv, fields, rss_key, rss->queue_num,
+ rss->queue);
+ if (!flow->rss) {
+ msg = "either invalid parameters or not enough"
+ " resources for additional multi-queue"
+ " RSS context";
+ goto exit_action_not_supported;
+ }
+ break;
+ default:
+ goto exit_action_not_supported;
+ }
+ }
+ /* When fate is unknown, drop traffic. */
+ if (!overlap)
+ flow->drop = 1;
+ /* Validation ends here. */
+ if (!addr) {
+ if (flow->rss)
+ mlx4_rss_put(flow->rss);
+ return 0;
+ }
+ if (flow == &temp) {
+ /* Allocate proper handle based on collected data. */
+ const struct mlx4_malloc_vec vec[] = {
+ {
+ .align = alignof(struct rte_flow),
+ .size = sizeof(*flow),
+ .addr = (void **)&flow,
+ },
+ {
+ .align = alignof(struct ibv_flow_attr),
+ .size = temp.ibv_attr_size,
+ .addr = (void **)&temp.ibv_attr,
+ },
+ };
+
+ if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) {
+ if (temp.rss)
+ mlx4_rss_put(temp.rss);
+ return rte_flow_error_set
+ (error, -rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "flow rule handle allocation failure");
+ }
+ /* Most fields will be updated by second pass. */
+ *flow = (struct rte_flow){
+ .ibv_attr = temp.ibv_attr,
+ .ibv_attr_size = sizeof(*flow->ibv_attr),
+ .rss = temp.rss,
+ };
+ *flow->ibv_attr = (struct ibv_flow_attr){
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .size = sizeof(*flow->ibv_attr),
+ .priority = attr->priority,
+ .port = priv->port,
+ };
+ goto fill;
+ }
+ *addr = flow;
+ return 0;
+exit_item_not_supported:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg ? msg : "item not supported");
+exit_action_not_supported:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ action, msg ? msg : "action not supported");
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+static int
+mlx4_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
+}
+
+/**
+ * Get a drop flow rule resources instance.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Pointer to drop flow resources on success, NULL otherwise and rte_errno
+ * is set.
+ */
+static struct mlx4_drop *
+mlx4_drop_get(struct priv *priv)
+{
+ struct mlx4_drop *drop = priv->drop;
+
+ if (drop) {
+ assert(drop->refcnt);
+ assert(drop->priv == priv);
+ ++drop->refcnt;
+ return drop;
+ }
+ drop = rte_malloc(__func__, sizeof(*drop), 0);
+ if (!drop)
+ goto error;
+ *drop = (struct mlx4_drop){
+ .priv = priv,
+ .refcnt = 1,
+ };
+ drop->cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
+ if (!drop->cq)
+ goto error;
+ drop->qp = mlx4_glue->create_qp
+ (priv->pd,
+ &(struct ibv_qp_init_attr){
+ .send_cq = drop->cq,
+ .recv_cq = drop->cq,
+ .qp_type = IBV_QPT_RAW_PACKET,
+ });
+ if (!drop->qp)
+ goto error;
+ priv->drop = drop;
+ return drop;
+error:
+ if (drop->qp)
+ claim_zero(mlx4_glue->destroy_qp(drop->qp));
+ if (drop->cq)
+ claim_zero(mlx4_glue->destroy_cq(drop->cq));
+ if (drop)
+ rte_free(drop);
+ rte_errno = ENOMEM;
+ return NULL;
+}
+
+/**
+ * Give back a drop flow rule resources instance.
+ *
+ * @param drop
+ * Pointer to drop flow rule resources.
+ */
+static void
+mlx4_drop_put(struct mlx4_drop *drop)
+{
+ assert(drop->refcnt);
+ if (--drop->refcnt)
+ return;
+ drop->priv->drop = NULL;
+ claim_zero(mlx4_glue->destroy_qp(drop->qp));
+ claim_zero(mlx4_glue->destroy_cq(drop->cq));
+ rte_free(drop);
+}
+
+/**
+ * Toggle a configured flow rule.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param flow
+ * Flow rule handle to toggle.
+ * @param enable
+ * Whether associated Verbs flow must be created or removed.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_flow_toggle(struct priv *priv,
+ struct rte_flow *flow,
+ int enable,
+ struct rte_flow_error *error)
+{
+ struct ibv_qp *qp = NULL;
+ const char *msg;
+ int err;
+
+ if (!enable) {
+ if (!flow->ibv_flow)
+ return 0;
+ claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
+ flow->ibv_flow = NULL;
+ if (flow->drop)
+ mlx4_drop_put(priv->drop);
+ else if (flow->rss)
+ mlx4_rss_detach(flow->rss);
+ return 0;
+ }
+ assert(flow->ibv_attr);
+ if (!flow->internal &&
+ !priv->isolated &&
+ flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
+ if (flow->ibv_flow) {
+ claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
+ flow->ibv_flow = NULL;
+ if (flow->drop)
+ mlx4_drop_put(priv->drop);
+ else if (flow->rss)
+ mlx4_rss_detach(flow->rss);
+ }
+ err = EACCES;
+ msg = ("priority level "
+ MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)
+ " is reserved when not in isolated mode");
+ goto error;
+ }
+ if (flow->rss) {
+ struct mlx4_rss *rss = flow->rss;
+ int missing = 0;
+ unsigned int i;
+
+ /* Stop at the first nonexistent target queue. */
+ for (i = 0; i != rss->queues; ++i)
+ if (rss->queue_id[i] >=
+ priv->dev->data->nb_rx_queues ||
+ !priv->dev->data->rx_queues[rss->queue_id[i]]) {
+ missing = 1;
+ break;
+ }
+ if (flow->ibv_flow) {
+ if (missing ^ !flow->drop)
+ return 0;
+ /* Verbs flow needs updating. */
+ claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
+ flow->ibv_flow = NULL;
+ if (flow->drop)
+ mlx4_drop_put(priv->drop);
+ else
+ mlx4_rss_detach(rss);
+ }
+ if (!missing) {
+ err = mlx4_rss_attach(rss);
+ if (err) {
+ err = -err;
+ msg = "cannot create indirection table or hash"
+ " QP to associate flow rule with";
+ goto error;
+ }
+ qp = rss->qp;
+ }
+ /* A missing target queue drops traffic implicitly. */
+ flow->drop = missing;
+ }
+ if (flow->drop) {
+ if (flow->ibv_flow)
+ return 0;
+ mlx4_drop_get(priv);
+ if (!priv->drop) {
+ err = rte_errno;
+ msg = "resources for drop flow rule cannot be created";
+ goto error;
+ }
+ qp = priv->drop->qp;
+ }
+ assert(qp);
+ if (flow->ibv_flow)
+ return 0;
+ flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr);
+ if (flow->ibv_flow)
+ return 0;
+ if (flow->drop)
+ mlx4_drop_put(priv->drop);
+ else if (flow->rss)
+ mlx4_rss_detach(flow->rss);
+ err = errno;
+ msg = "flow rule rejected by device";
+error:
+ return rte_flow_error_set
+ (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg);
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+static struct rte_flow *
+mlx4_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow;
+ int err;
+
+ err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
+ if (err)
+ return NULL;
+ err = mlx4_flow_toggle(priv, flow, priv->started, error);
+ if (!err) {
+ struct rte_flow *curr = LIST_FIRST(&priv->flows);
+
+ /* New rules are inserted after internal ones. */
+ if (!curr || !curr->internal) {
+ LIST_INSERT_HEAD(&priv->flows, flow, next);
+ } else {
+ while (LIST_NEXT(curr, next) &&
+ LIST_NEXT(curr, next)->internal)
+ curr = LIST_NEXT(curr, next);
+ LIST_INSERT_AFTER(curr, flow, next);
+ }
+ return flow;
+ }
+ if (flow->rss)
+ mlx4_rss_put(flow->rss);
+ rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ error->message);
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Configure isolated mode.
+ *
+ * @see rte_flow_isolate()
+ * @see rte_flow_ops
+ */
+static int
+mlx4_flow_isolate(struct rte_eth_dev *dev,
+ int enable,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ if (!!enable == !!priv->isolated)
+ return 0;
+ priv->isolated = !!enable;
+ if (mlx4_flow_sync(priv, error)) {
+ priv->isolated = !enable;
+ return -rte_errno;
+ }
+ return 0;
+}
+
+/**
+ * Destroy a flow rule.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+static int
+mlx4_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ int err = mlx4_flow_toggle(priv, flow, 0, error);
+
+ if (err)
+ return err;
+ LIST_REMOVE(flow, next);
+ if (flow->rss)
+ mlx4_rss_put(flow->rss);
+ rte_free(flow);
+ return 0;
+}
+
+/**
+ * Destroy user-configured flow rules.
+ *
+ * This function skips internal flows rules.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+static int
+mlx4_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = LIST_FIRST(&priv->flows);
+
+ while (flow) {
+ struct rte_flow *next = LIST_NEXT(flow, next);
+
+ if (!flow->internal)
+ mlx4_flow_destroy(dev, flow, error);
+ flow = next;
+ }
+ return 0;
+}
+
+/**
+ * Helper function to determine the next configured VLAN filter.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param vlan
+ * VLAN ID to use as a starting point.
+ *
+ * @return
+ * Next configured VLAN ID or a high value (>= 4096) if there is none.
+ */
+static uint16_t
+mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
+{
+ while (vlan < 4096) {
+ if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
+ (UINT64_C(1) << (vlan % 64)))
+ return vlan;
+ ++vlan;
+ }
+ return vlan;
+}
+
+/**
+ * Generate internal flow rules.
+ *
+ * Various flow rules are created depending on the mode the device is in:
+ *
+ * 1. Promiscuous:
+ * port MAC + broadcast + catch-all (VLAN filtering is ignored).
+ * 2. All multicast:
+ * port MAC/VLAN + broadcast + catch-all multicast.
+ * 3. Otherwise:
+ * port MAC/VLAN + broadcast MAC/VLAN.
+ *
+ * About MAC flow rules:
+ *
+ * - MAC flow rules are generated from @p dev->data->mac_addrs
+ * (@p priv->mac array).
+ * - An additional flow rule for Ethernet broadcasts is also generated.
+ * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
+ * is enabled and VLAN filters are configured.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
+{
+ struct rte_flow_attr attr = {
+ .priority = MLX4_FLOW_PRIORITY_LAST,
+ .ingress = 1,
+ };
+ struct rte_flow_item_eth eth_spec;
+ const struct rte_flow_item_eth eth_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ };
+ const struct rte_flow_item_eth eth_allmulti = {
+ .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ };
+ struct rte_flow_item_vlan vlan_spec;
+ const struct rte_flow_item_vlan vlan_mask = {
+ .tci = RTE_BE16(0x0fff),
+ };
+ struct rte_flow_item pattern[] = {
+ {
+ .type = MLX4_FLOW_ITEM_TYPE_INTERNAL,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &eth_spec,
+ .mask = &eth_mask,
+ },
+ {
+ /* Replaced with VLAN if filtering is enabled. */
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ /*
+ * Round number of queues down to their previous power of 2 to
+ * comply with RSS context limitations. Extra queues silently do not
+ * get RSS by default.
+ */
+ uint32_t queues =
+ rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
+ uint16_t queue[queues];
+ struct rte_flow_action_rss action_rss = {
+ .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ .level = 0,
+ .types = 0,
+ .key_len = MLX4_RSS_HASH_KEY_SIZE,
+ .queue_num = queues,
+ .key = mlx4_rss_hash_key_default,
+ .queue = queue,
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_RSS,
+ .conf = &action_rss,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct ether_addr *rule_mac = &eth_spec.dst;
+ rte_be16_t *rule_vlan =
+ (priv->dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER) &&
+ !priv->dev->data->promiscuous ?
+ &vlan_spec.tci :
+ NULL;
+ uint16_t vlan = 0;
+ struct rte_flow *flow;
+ unsigned int i;
+ int err = 0;
+
+ /* Nothing to be done if there are no Rx queues. */
+ if (!queues)
+ goto error;
+ /* Prepare default RSS configuration. */
+ for (i = 0; i != queues; ++i)
+ queue[i] = i;
+ /*
+ * Set up VLAN item if filtering is enabled and at least one VLAN
+ * filter is configured.
+ */
+ if (rule_vlan) {
+ vlan = mlx4_flow_internal_next_vlan(priv, 0);
+ if (vlan < 4096) {
+ pattern[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .spec = &vlan_spec,
+ .mask = &vlan_mask,
+ };
+next_vlan:
+ *rule_vlan = rte_cpu_to_be_16(vlan);
+ } else {
+ rule_vlan = NULL;
+ }
+ }
+ for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
+ const struct ether_addr *mac;
+
+ /* Broadcasts are handled by an extra iteration. */
+ if (i < RTE_DIM(priv->mac))
+ mac = &priv->mac[i];
+ else
+ mac = &eth_mask.dst;
+ if (is_zero_ether_addr(mac))
+ continue;
+ /* Check if MAC flow rule is already present. */
+ for (flow = LIST_FIRST(&priv->flows);
+ flow && flow->internal;
+ flow = LIST_NEXT(flow, next)) {
+ const struct ibv_flow_spec_eth *eth =
+ (const void *)((uintptr_t)flow->ibv_attr +
+ sizeof(*flow->ibv_attr));
+ unsigned int j;
+
+ if (!flow->mac)
+ continue;
+ assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
+ assert(flow->ibv_attr->num_of_specs == 1);
+ assert(eth->type == IBV_FLOW_SPEC_ETH);
+ assert(flow->rss);
+ if (rule_vlan &&
+ (eth->val.vlan_tag != *rule_vlan ||
+ eth->mask.vlan_tag != RTE_BE16(0x0fff)))
+ continue;
+ if (!rule_vlan && eth->mask.vlan_tag)
+ continue;
+ for (j = 0; j != sizeof(mac->addr_bytes); ++j)
+ if (eth->val.dst_mac[j] != mac->addr_bytes[j] ||
+ eth->mask.dst_mac[j] != UINT8_C(0xff) ||
+ eth->val.src_mac[j] != UINT8_C(0x00) ||
+ eth->mask.src_mac[j] != UINT8_C(0x00))
+ break;
+ if (j != sizeof(mac->addr_bytes))
+ continue;
+ if (flow->rss->queues != queues ||
+ memcmp(flow->rss->queue_id, action_rss.queue,
+ queues * sizeof(flow->rss->queue_id[0])))
+ continue;
+ break;
+ }
+ if (!flow || !flow->internal) {
+ /* Not found, create a new flow rule. */
+ memcpy(rule_mac, mac, sizeof(*mac));
+ flow = mlx4_flow_create(priv->dev, &attr, pattern,
+ actions, error);
+ if (!flow) {
+ err = -rte_errno;
+ goto error;
+ }
+ }
+ flow->select = 1;
+ flow->mac = 1;
+ }
+ if (rule_vlan) {
+ vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1);
+ if (vlan < 4096)
+ goto next_vlan;
+ }
+ /* Take care of promiscuous and all multicast flow rules. */
+ if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
+ for (flow = LIST_FIRST(&priv->flows);
+ flow && flow->internal;
+ flow = LIST_NEXT(flow, next)) {
+ if (priv->dev->data->promiscuous) {
+ if (flow->promisc)
+ break;
+ } else {
+ assert(priv->dev->data->all_multicast);
+ if (flow->allmulti)
+ break;
+ }
+ }
+ if (flow && flow->internal) {
+ assert(flow->rss);
+ if (flow->rss->queues != queues ||
+ memcmp(flow->rss->queue_id, action_rss.queue,
+ queues * sizeof(flow->rss->queue_id[0])))
+ flow = NULL;
+ }
+ if (!flow || !flow->internal) {
+ /* Not found, create a new flow rule. */
+ if (priv->dev->data->promiscuous) {
+ pattern[1].spec = NULL;
+ pattern[1].mask = NULL;
+ } else {
+ assert(priv->dev->data->all_multicast);
+ pattern[1].spec = &eth_allmulti;
+ pattern[1].mask = &eth_allmulti;
+ }
+ pattern[2] = pattern[3];
+ flow = mlx4_flow_create(priv->dev, &attr, pattern,
+ actions, error);
+ if (!flow) {
+ err = -rte_errno;
+ goto error;
+ }
+ }
+ assert(flow->promisc || flow->allmulti);
+ flow->select = 1;
+ }
+error:
+ /* Clear selection and clean up stale internal flow rules. */
+ flow = LIST_FIRST(&priv->flows);
+ while (flow && flow->internal) {
+ struct rte_flow *next = LIST_NEXT(flow, next);
+
+ if (!flow->select)
+ claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ else
+ flow->select = 0;
+ flow = next;
+ }
+ return err;
+}
+
+/**
+ * Synchronize flow rules.
+ *
+ * This function synchronizes flow rules with the state of the device by
+ * taking into account isolated mode and whether target queues are
+ * configured.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
+{
+ struct rte_flow *flow;
+ int ret;
+
+ /* Internal flow rules are guaranteed to come first in the list. */
+ if (priv->isolated) {
+ /*
+ * Get rid of them in isolated mode, stop at the first
+ * non-internal rule found.
+ */
+ for (flow = LIST_FIRST(&priv->flows);
+ flow && flow->internal;
+ flow = LIST_FIRST(&priv->flows))
+ claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ } else {
+ /* Refresh internal rules. */
+ ret = mlx4_flow_internal(priv, error);
+ if (ret)
+ return ret;
+ }
+ /* Toggle the remaining flow rules . */
+ LIST_FOREACH(flow, &priv->flows, next) {
+ ret = mlx4_flow_toggle(priv, flow, priv->started, error);
+ if (ret)
+ return ret;
+ }
+ if (!priv->started)
+ assert(!priv->drop);
+ return 0;
+}
+
+/**
+ * Clean up all flow rules.
+ *
+ * Unlike mlx4_flow_flush(), this function takes care of all remaining flow
+ * rules regardless of whether they are internal or user-configured.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+mlx4_flow_clean(struct priv *priv)
+{
+ struct rte_flow *flow;
+
+ while ((flow = LIST_FIRST(&priv->flows)))
+ mlx4_flow_destroy(priv->dev, flow, NULL);
+ assert(LIST_EMPTY(&priv->rss));
+}
+
+static const struct rte_flow_ops mlx4_flow_ops = {
+ .validate = mlx4_flow_validate,
+ .create = mlx4_flow_create,
+ .destroy = mlx4_flow_destroy,
+ .flush = mlx4_flow_flush,
+ .isolate = mlx4_flow_isolate,
+};
+
+/**
+ * Manage filter operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ break;
+ *(const void **)arg = &mlx4_flow_ops;
+ return 0;
+ default:
+ ERROR("%p: filter type (%d) not supported",
+ (void *)dev, filter_type);
+ break;
+ }
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.h
new file mode 100644
index 00000000..2917ebe9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX4_FLOW_H_
+#define RTE_PMD_MLX4_FLOW_H_
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_eth_ctrl.h>
+#include <rte_ethdev_driver.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_byteorder.h>
+
+/** Last and lowest priority level for a flow rule. */
+#define MLX4_FLOW_PRIORITY_LAST UINT32_C(0xfff)
+
+/** Meta pattern item used to distinguish internal rules. */
+#define MLX4_FLOW_ITEM_TYPE_INTERNAL ((enum rte_flow_item_type)-1)
+
+/** PMD-specific (mlx4) definition of a flow rule handle. */
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+ struct ibv_flow *ibv_flow; /**< Verbs flow. */
+ struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+ uint32_t ibv_attr_size; /**< Size of Verbs attributes. */
+ uint32_t select:1; /**< Used by operations on the linked list. */
+ uint32_t internal:1; /**< Internal flow rule outside isolated mode. */
+ uint32_t mac:1; /**< Rule associated with a configured MAC address. */
+ uint32_t promisc:1; /**< This rule matches everything. */
+ uint32_t allmulti:1; /**< This rule matches all multicast traffic. */
+ uint32_t drop:1; /**< This rule drops packets. */
+ uint32_t priority; /**< Flow rule priority. */
+ struct mlx4_rss *rss; /**< Rx target. */
+};
+
+/* mlx4_flow.c */
+
+uint64_t mlx4_conv_rss_types(struct priv *priv, uint64_t types,
+ int verbs_to_dpdk);
+int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error);
+void mlx4_flow_clean(struct priv *priv);
+int mlx4_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+
+#endif /* RTE_PMD_MLX4_FLOW_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.c
new file mode 100644
index 00000000..67b3bfac
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.c
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx4dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include "mlx4_glue.h"
+
+static int
+mlx4_glue_fork_init(void)
+{
+ return ibv_fork_init();
+}
+
+static int
+mlx4_glue_get_async_event(struct ibv_context *context,
+ struct ibv_async_event *event)
+{
+ return ibv_get_async_event(context, event);
+}
+
+static void
+mlx4_glue_ack_async_event(struct ibv_async_event *event)
+{
+ ibv_ack_async_event(event);
+}
+
+static struct ibv_pd *
+mlx4_glue_alloc_pd(struct ibv_context *context)
+{
+ return ibv_alloc_pd(context);
+}
+
+static int
+mlx4_glue_dealloc_pd(struct ibv_pd *pd)
+{
+ return ibv_dealloc_pd(pd);
+}
+
+static struct ibv_device **
+mlx4_glue_get_device_list(int *num_devices)
+{
+ return ibv_get_device_list(num_devices);
+}
+
+static void
+mlx4_glue_free_device_list(struct ibv_device **list)
+{
+ ibv_free_device_list(list);
+}
+
+static struct ibv_context *
+mlx4_glue_open_device(struct ibv_device *device)
+{
+ return ibv_open_device(device);
+}
+
+static int
+mlx4_glue_close_device(struct ibv_context *context)
+{
+ return ibv_close_device(context);
+}
+
+static const char *
+mlx4_glue_get_device_name(struct ibv_device *device)
+{
+ return ibv_get_device_name(device);
+}
+
+static int
+mlx4_glue_query_device(struct ibv_context *context,
+ struct ibv_device_attr *device_attr)
+{
+ return ibv_query_device(context, device_attr);
+}
+
+static int
+mlx4_glue_query_device_ex(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr)
+{
+ return ibv_query_device_ex(context, input, attr);
+}
+
+static int
+mlx4_glue_query_port(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr)
+{
+ return ibv_query_port(context, port_num, port_attr);
+}
+
+static const char *
+mlx4_glue_port_state_str(enum ibv_port_state port_state)
+{
+ return ibv_port_state_str(port_state);
+}
+
+static struct ibv_comp_channel *
+mlx4_glue_create_comp_channel(struct ibv_context *context)
+{
+ return ibv_create_comp_channel(context);
+}
+
+static int
+mlx4_glue_destroy_comp_channel(struct ibv_comp_channel *channel)
+{
+ return ibv_destroy_comp_channel(channel);
+}
+
+static struct ibv_cq *
+mlx4_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context,
+ struct ibv_comp_channel *channel, int comp_vector)
+{
+ return ibv_create_cq(context, cqe, cq_context, channel, comp_vector);
+}
+
+static int
+mlx4_glue_destroy_cq(struct ibv_cq *cq)
+{
+ return ibv_destroy_cq(cq);
+}
+
+static int
+mlx4_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,
+ void **cq_context)
+{
+ return ibv_get_cq_event(channel, cq, cq_context);
+}
+
+static void
+mlx4_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)
+{
+ ibv_ack_cq_events(cq, nevents);
+}
+
+static struct ibv_flow *
+mlx4_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow)
+{
+ return ibv_create_flow(qp, flow);
+}
+
+static int
+mlx4_glue_destroy_flow(struct ibv_flow *flow_id)
+{
+ return ibv_destroy_flow(flow_id);
+}
+
+static struct ibv_qp *
+mlx4_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr)
+{
+ return ibv_create_qp(pd, qp_init_attr);
+}
+
+static struct ibv_qp *
+mlx4_glue_create_qp_ex(struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex)
+{
+ return ibv_create_qp_ex(context, qp_init_attr_ex);
+}
+
+static int
+mlx4_glue_destroy_qp(struct ibv_qp *qp)
+{
+ return ibv_destroy_qp(qp);
+}
+
+static int
+mlx4_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
+{
+ return ibv_modify_qp(qp, attr, attr_mask);
+}
+
+static struct ibv_mr *
+mlx4_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
+{
+ return ibv_reg_mr(pd, addr, length, access);
+}
+
+static int
+mlx4_glue_dereg_mr(struct ibv_mr *mr)
+{
+ return ibv_dereg_mr(mr);
+}
+
+static struct ibv_rwq_ind_table *
+mlx4_glue_create_rwq_ind_table(struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr)
+{
+ return ibv_create_rwq_ind_table(context, init_attr);
+}
+
+static int
+mlx4_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)
+{
+ return ibv_destroy_rwq_ind_table(rwq_ind_table);
+}
+
+static struct ibv_wq *
+mlx4_glue_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr)
+{
+ return ibv_create_wq(context, wq_init_attr);
+}
+
+static int
+mlx4_glue_destroy_wq(struct ibv_wq *wq)
+{
+ return ibv_destroy_wq(wq);
+}
+static int
+mlx4_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)
+{
+ return ibv_modify_wq(wq, wq_attr);
+}
+
+static int
+mlx4_glue_dv_init_obj(struct mlx4dv_obj *obj, uint64_t obj_type)
+{
+ return mlx4dv_init_obj(obj, obj_type);
+}
+
+static int
+mlx4_glue_dv_set_context_attr(struct ibv_context *context,
+ enum mlx4dv_set_ctx_attr_type attr_type,
+ void *attr)
+{
+ return mlx4dv_set_context_attr(context, attr_type, attr);
+}
+
+const struct mlx4_glue *mlx4_glue = &(const struct mlx4_glue){
+ .version = MLX4_GLUE_VERSION,
+ .fork_init = mlx4_glue_fork_init,
+ .get_async_event = mlx4_glue_get_async_event,
+ .ack_async_event = mlx4_glue_ack_async_event,
+ .alloc_pd = mlx4_glue_alloc_pd,
+ .dealloc_pd = mlx4_glue_dealloc_pd,
+ .get_device_list = mlx4_glue_get_device_list,
+ .free_device_list = mlx4_glue_free_device_list,
+ .open_device = mlx4_glue_open_device,
+ .close_device = mlx4_glue_close_device,
+ .get_device_name = mlx4_glue_get_device_name,
+ .query_device = mlx4_glue_query_device,
+ .query_device_ex = mlx4_glue_query_device_ex,
+ .query_port = mlx4_glue_query_port,
+ .port_state_str = mlx4_glue_port_state_str,
+ .create_comp_channel = mlx4_glue_create_comp_channel,
+ .destroy_comp_channel = mlx4_glue_destroy_comp_channel,
+ .create_cq = mlx4_glue_create_cq,
+ .destroy_cq = mlx4_glue_destroy_cq,
+ .get_cq_event = mlx4_glue_get_cq_event,
+ .ack_cq_events = mlx4_glue_ack_cq_events,
+ .create_flow = mlx4_glue_create_flow,
+ .destroy_flow = mlx4_glue_destroy_flow,
+ .create_qp = mlx4_glue_create_qp,
+ .create_qp_ex = mlx4_glue_create_qp_ex,
+ .destroy_qp = mlx4_glue_destroy_qp,
+ .modify_qp = mlx4_glue_modify_qp,
+ .reg_mr = mlx4_glue_reg_mr,
+ .dereg_mr = mlx4_glue_dereg_mr,
+ .create_rwq_ind_table = mlx4_glue_create_rwq_ind_table,
+ .destroy_rwq_ind_table = mlx4_glue_destroy_rwq_ind_table,
+ .create_wq = mlx4_glue_create_wq,
+ .destroy_wq = mlx4_glue_destroy_wq,
+ .modify_wq = mlx4_glue_modify_wq,
+ .dv_init_obj = mlx4_glue_dv_init_obj,
+ .dv_set_context_attr = mlx4_glue_dv_set_context_attr,
+};
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.h
new file mode 100644
index 00000000..668ca867
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef MLX4_GLUE_H_
+#define MLX4_GLUE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx4dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#ifndef MLX4_GLUE_VERSION
+#define MLX4_GLUE_VERSION ""
+#endif
+
+/* LIB_GLUE_VERSION must be updated every time this structure is modified. */
+struct mlx4_glue {
+ const char *version;
+ int (*fork_init)(void);
+ int (*get_async_event)(struct ibv_context *context,
+ struct ibv_async_event *event);
+ void (*ack_async_event)(struct ibv_async_event *event);
+ struct ibv_pd *(*alloc_pd)(struct ibv_context *context);
+ int (*dealloc_pd)(struct ibv_pd *pd);
+ struct ibv_device **(*get_device_list)(int *num_devices);
+ void (*free_device_list)(struct ibv_device **list);
+ struct ibv_context *(*open_device)(struct ibv_device *device);
+ int (*close_device)(struct ibv_context *context);
+ const char *(*get_device_name)(struct ibv_device *device);
+ int (*query_device)(struct ibv_context *context,
+ struct ibv_device_attr *device_attr);
+ int (*query_device_ex)(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr);
+ int (*query_port)(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr);
+ const char *(*port_state_str)(enum ibv_port_state port_state);
+ struct ibv_comp_channel *(*create_comp_channel)
+ (struct ibv_context *context);
+ int (*destroy_comp_channel)(struct ibv_comp_channel *channel);
+ struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,
+ void *cq_context,
+ struct ibv_comp_channel *channel,
+ int comp_vector);
+ int (*destroy_cq)(struct ibv_cq *cq);
+ int (*get_cq_event)(struct ibv_comp_channel *channel,
+ struct ibv_cq **cq, void **cq_context);
+ void (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);
+ struct ibv_flow *(*create_flow)(struct ibv_qp *qp,
+ struct ibv_flow_attr *flow);
+ int (*destroy_flow)(struct ibv_flow *flow_id);
+ struct ibv_qp *(*create_qp)(struct ibv_pd *pd,
+ struct ibv_qp_init_attr *qp_init_attr);
+ struct ibv_qp *(*create_qp_ex)
+ (struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex);
+ int (*destroy_qp)(struct ibv_qp *qp);
+ int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask);
+ struct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr,
+ size_t length, int access);
+ int (*dereg_mr)(struct ibv_mr *mr);
+ struct ibv_rwq_ind_table *(*create_rwq_ind_table)
+ (struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr);
+ int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);
+ struct ibv_wq *(*create_wq)(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr);
+ int (*destroy_wq)(struct ibv_wq *wq);
+ int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
+ int (*dv_init_obj)(struct mlx4dv_obj *obj, uint64_t obj_type);
+ int (*dv_set_context_attr)(struct ibv_context *context,
+ enum mlx4dv_set_ctx_attr_type attr_type,
+ void *attr);
+};
+
+const struct mlx4_glue *mlx4_glue;
+
+#endif /* MLX4_GLUE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_intr.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_intr.c
new file mode 100644
index 00000000..eeb982a0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_intr.c
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+/**
+ * @file
+ * Interrupts handling for mlx4 driver.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_alarm.h>
+#include <rte_errno.h>
+#include <rte_ethdev_driver.h>
+#include <rte_io.h>
+#include <rte_interrupts.h>
+
+#include "mlx4.h"
+#include "mlx4_glue.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+static int mlx4_link_status_check(struct priv *priv);
+
+/**
+ * Clean up Rx interrupts handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+mlx4_rx_intr_vec_disable(struct priv *priv)
+{
+ struct rte_intr_handle *intr_handle = &priv->intr_handle;
+
+ rte_intr_free_epoll_fd(intr_handle);
+ free(intr_handle->intr_vec);
+ intr_handle->nb_efd = 0;
+ intr_handle->intr_vec = NULL;
+}
+
+/**
+ * Allocate queue vector and fill epoll fd list for Rx interrupts.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_rx_intr_vec_enable(struct priv *priv)
+{
+ unsigned int i;
+ unsigned int rxqs_n = priv->dev->data->nb_rx_queues;
+ unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ unsigned int count = 0;
+ struct rte_intr_handle *intr_handle = &priv->intr_handle;
+
+ mlx4_rx_intr_vec_disable(priv);
+ intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
+ if (intr_handle->intr_vec == NULL) {
+ rte_errno = ENOMEM;
+ ERROR("failed to allocate memory for interrupt vector,"
+ " Rx interrupts will not be supported");
+ return -rte_errno;
+ }
+ for (i = 0; i != n; ++i) {
+ struct rxq *rxq = priv->dev->data->rx_queues[i];
+
+ /* Skip queues that cannot request interrupts. */
+ if (!rxq || !rxq->channel) {
+ /* Use invalid intr_vec[] index to disable entry. */
+ intr_handle->intr_vec[i] =
+ RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID;
+ continue;
+ }
+ if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
+ rte_errno = E2BIG;
+ ERROR("too many Rx queues for interrupt vector size"
+ " (%d), Rx interrupts cannot be enabled",
+ RTE_MAX_RXTX_INTR_VEC_ID);
+ mlx4_rx_intr_vec_disable(priv);
+ return -rte_errno;
+ }
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+ intr_handle->efds[count] = rxq->channel->fd;
+ count++;
+ }
+ if (!count)
+ mlx4_rx_intr_vec_disable(priv);
+ else
+ intr_handle->nb_efd = count;
+ return 0;
+}
+
+/**
+ * Process scheduled link status check.
+ *
+ * If LSC interrupts are requested, process related callback.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+mlx4_link_status_alarm(struct priv *priv)
+{
+ const struct rte_intr_conf *const intr_conf =
+ &priv->dev->data->dev_conf.intr_conf;
+
+ assert(priv->intr_alarm == 1);
+ priv->intr_alarm = 0;
+ if (intr_conf->lsc && !mlx4_link_status_check(priv))
+ _rte_eth_dev_callback_process(priv->dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+}
+
+/**
+ * Check link status.
+ *
+ * In case of inconsistency, another check is scheduled.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success (link status is consistent), negative errno value
+ * otherwise and rte_errno is set.
+ */
+static int
+mlx4_link_status_check(struct priv *priv)
+{
+ struct rte_eth_link *link = &priv->dev->data->dev_link;
+ int ret = mlx4_link_update(priv->dev, 0);
+
+ if (ret)
+ return ret;
+ if ((!link->link_speed && link->link_status) ||
+ (link->link_speed && !link->link_status)) {
+ if (!priv->intr_alarm) {
+ /* Inconsistent status, check again later. */
+ ret = rte_eal_alarm_set(MLX4_INTR_ALARM_TIMEOUT,
+ (void (*)(void *))
+ mlx4_link_status_alarm,
+ priv);
+ if (ret)
+ return ret;
+ priv->intr_alarm = 1;
+ }
+ rte_errno = EINPROGRESS;
+ return -rte_errno;
+ }
+ return 0;
+}
+
+/**
+ * Handle interrupts from the NIC.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+mlx4_interrupt_handler(struct priv *priv)
+{
+ enum { LSC, RMV, };
+ static const enum rte_eth_event_type type[] = {
+ [LSC] = RTE_ETH_EVENT_INTR_LSC,
+ [RMV] = RTE_ETH_EVENT_INTR_RMV,
+ };
+ uint32_t caught[RTE_DIM(type)] = { 0 };
+ struct ibv_async_event event;
+ const struct rte_intr_conf *const intr_conf =
+ &priv->dev->data->dev_conf.intr_conf;
+ unsigned int i;
+
+ /* Read all message and acknowledge them. */
+ while (!mlx4_glue->get_async_event(priv->ctx, &event)) {
+ switch (event.event_type) {
+ case IBV_EVENT_PORT_ACTIVE:
+ case IBV_EVENT_PORT_ERR:
+ if (intr_conf->lsc && !mlx4_link_status_check(priv))
+ ++caught[LSC];
+ break;
+ case IBV_EVENT_DEVICE_FATAL:
+ if (intr_conf->rmv)
+ ++caught[RMV];
+ break;
+ default:
+ DEBUG("event type %d on physical port %d not handled",
+ event.event_type, event.element.port_num);
+ }
+ mlx4_glue->ack_async_event(&event);
+ }
+ for (i = 0; i != RTE_DIM(caught); ++i)
+ if (caught[i])
+ _rte_eth_dev_callback_process(priv->dev, type[i],
+ NULL);
+}
+
+/**
+ * MLX4 CQ notification .
+ *
+ * @param rxq
+ * Pointer to receive queue structure.
+ * @param solicited
+ * Is request solicited or not.
+ */
+static void
+mlx4_arm_cq(struct rxq *rxq, int solicited)
+{
+ struct mlx4_cq *cq = &rxq->mcq;
+ uint64_t doorbell;
+ uint32_t sn = cq->arm_sn & MLX4_CQ_DB_GEQ_N_MASK;
+ uint32_t ci = cq->cons_index & MLX4_CQ_DB_CI_MASK;
+ uint32_t cmd = solicited ? MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT;
+
+ *cq->arm_db = rte_cpu_to_be_32(sn << 28 | cmd | ci);
+ /*
+ * Make sure that the doorbell record in host memory is
+ * written before ringing the doorbell via PCI MMIO.
+ */
+ rte_wmb();
+ doorbell = sn << 28 | cmd | cq->cqn;
+ doorbell <<= 32;
+ doorbell |= ci;
+ rte_write64(rte_cpu_to_be_64(doorbell), cq->cq_db_reg);
+}
+
+/**
+ * Uninstall interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_intr_uninstall(struct priv *priv)
+{
+ int err = rte_errno; /* Make sure rte_errno remains unchanged. */
+
+ if (priv->intr_handle.fd != -1) {
+ rte_intr_callback_unregister(&priv->intr_handle,
+ (void (*)(void *))
+ mlx4_interrupt_handler,
+ priv);
+ priv->intr_handle.fd = -1;
+ }
+ rte_eal_alarm_cancel((void (*)(void *))mlx4_link_status_alarm, priv);
+ priv->intr_alarm = 0;
+ mlx4_rxq_intr_disable(priv);
+ rte_errno = err;
+ return 0;
+}
+
+/**
+ * Install interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_intr_install(struct priv *priv)
+{
+ const struct rte_intr_conf *const intr_conf =
+ &priv->dev->data->dev_conf.intr_conf;
+ int rc;
+
+ mlx4_intr_uninstall(priv);
+ if (intr_conf->lsc | intr_conf->rmv) {
+ priv->intr_handle.fd = priv->ctx->async_fd;
+ rc = rte_intr_callback_register(&priv->intr_handle,
+ (void (*)(void *))
+ mlx4_interrupt_handler,
+ priv);
+ if (rc < 0) {
+ rte_errno = -rc;
+ goto error;
+ }
+ }
+ return 0;
+error:
+ mlx4_intr_uninstall(priv);
+ return -rte_errno;
+}
+
+/**
+ * DPDK callback for Rx queue interrupt disable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Rx queue index.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct rxq *rxq = dev->data->rx_queues[idx];
+ struct ibv_cq *ev_cq;
+ void *ev_ctx;
+ int ret;
+
+ if (!rxq || !rxq->channel) {
+ ret = EINVAL;
+ } else {
+ ret = mlx4_glue->get_cq_event(rxq->cq->channel, &ev_cq,
+ &ev_ctx);
+ if (ret || ev_cq != rxq->cq)
+ ret = EINVAL;
+ }
+ if (ret) {
+ rte_errno = ret;
+ WARN("unable to disable interrupt on rx queue %d",
+ idx);
+ } else {
+ rxq->mcq.arm_sn++;
+ mlx4_glue->ack_cq_events(rxq->cq, 1);
+ }
+ return -ret;
+}
+
+/**
+ * DPDK callback for Rx queue interrupt enable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Rx queue index.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct rxq *rxq = dev->data->rx_queues[idx];
+ int ret = 0;
+
+ if (!rxq || !rxq->channel) {
+ ret = EINVAL;
+ rte_errno = ret;
+ WARN("unable to arm interrupt on rx queue %d", idx);
+ } else {
+ mlx4_arm_cq(rxq, 0);
+ }
+ return -ret;
+}
+
+/**
+ * Enable datapath interrupts.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rxq_intr_enable(struct priv *priv)
+{
+ const struct rte_intr_conf *const intr_conf =
+ &priv->dev->data->dev_conf.intr_conf;
+
+ if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
+ goto error;
+ return 0;
+error:
+ return -rte_errno;
+}
+
+/**
+ * Disable datapath interrupts, keeping other interrupts intact.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+mlx4_rxq_intr_disable(struct priv *priv)
+{
+ int err = rte_errno; /* Make sure rte_errno remains unchanged. */
+
+ mlx4_rx_intr_vec_disable(priv);
+ rte_errno = err;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.c
new file mode 100644
index 00000000..d23d3c61
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.c
@@ -0,0 +1,1181 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+/**
+ * @file
+ * Memory management functions for mlx4 driver.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_rwlock.h>
+
+#include "mlx4_glue.h"
+#include "mlx4_mr.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+struct mr_find_contig_memsegs_data {
+ uintptr_t addr;
+ uintptr_t start;
+ uintptr_t end;
+ const struct rte_memseg_list *msl;
+};
+
+struct mr_update_mp_data {
+ struct rte_eth_dev *dev;
+ struct mlx4_mr_ctrl *mr_ctrl;
+ int ret;
+};
+
+/**
+ * Expand B-tree table to a given size. Can't be called with holding
+ * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc().
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries for expansion.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_btree_expand(struct mlx4_mr_btree *bt, int n)
+{
+ void *mem;
+ int ret = 0;
+
+ if (n <= bt->size)
+ return ret;
+ /*
+ * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
+ * used inside if there's no room to expand. Because this is a quite
+ * rare case and a part of very slow path, it is very acceptable.
+ * Initially cache_bh[] will be given practically enough space and once
+ * it is expanded, expansion wouldn't be needed again ever.
+ */
+ mem = rte_realloc(bt->table, n * sizeof(struct mlx4_mr_cache), 0);
+ if (mem == NULL) {
+ /* Not an error, B-tree search will be skipped. */
+ WARN("failed to expand MR B-tree (%p) table", (void *)bt);
+ ret = -1;
+ } else {
+ DEBUG("expanded MR B-tree table (size=%u)", n);
+ bt->table = mem;
+ bt->size = n;
+ }
+ return ret;
+}
+
+/**
+ * Look up LKey from given B-tree lookup table, store the last index and return
+ * searched LKey.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param[out] idx
+ * Pointer to index. Even on search failure, returns index where it stops
+ * searching so that index can be used when inserting a new entry.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mr_btree_lookup(struct mlx4_mr_btree *bt, uint16_t *idx, uintptr_t addr)
+{
+ struct mlx4_mr_cache *lkp_tbl;
+ uint16_t n;
+ uint16_t base = 0;
+
+ assert(bt != NULL);
+ lkp_tbl = *bt->table;
+ n = bt->len;
+ /* First entry must be NULL for comparison. */
+ assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
+ lkp_tbl[0].lkey == UINT32_MAX));
+ /* Binary search. */
+ do {
+ register uint16_t delta = n >> 1;
+
+ if (addr < lkp_tbl[base + delta].start) {
+ n = delta;
+ } else {
+ base += delta;
+ n -= delta;
+ }
+ } while (n > 1);
+ assert(addr >= lkp_tbl[base].start);
+ *idx = base;
+ if (addr < lkp_tbl[base].end)
+ return lkp_tbl[base].lkey;
+ /* Not found. */
+ return UINT32_MAX;
+}
+
+/**
+ * Insert an entry to B-tree lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param entry
+ * Pointer to new entry to insert.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_btree_insert(struct mlx4_mr_btree *bt, struct mlx4_mr_cache *entry)
+{
+ struct mlx4_mr_cache *lkp_tbl;
+ uint16_t idx = 0;
+ size_t shift;
+
+ assert(bt != NULL);
+ assert(bt->len <= bt->size);
+ assert(bt->len > 0);
+ lkp_tbl = *bt->table;
+ /* Find out the slot for insertion. */
+ if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
+ DEBUG("abort insertion to B-tree(%p): already exist at"
+ " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ /* Already exist, return. */
+ return 0;
+ }
+ /* If table is full, return error. */
+ if (unlikely(bt->len == bt->size)) {
+ bt->overflow = 1;
+ return -1;
+ }
+ /* Insert entry. */
+ ++idx;
+ shift = (bt->len - idx) * sizeof(struct mlx4_mr_cache);
+ if (shift)
+ memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
+ lkp_tbl[idx] = *entry;
+ bt->len++;
+ DEBUG("inserted B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ return 0;
+}
+
+/**
+ * Initialize B-tree and allocate memory for lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries to allocate.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket)
+{
+ if (bt == NULL) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ memset(bt, 0, sizeof(*bt));
+ bt->table = rte_calloc_socket("B-tree table",
+ n, sizeof(struct mlx4_mr_cache),
+ 0, socket);
+ if (bt->table == NULL) {
+ rte_errno = ENOMEM;
+ ERROR("failed to allocate memory for btree cache on socket %d",
+ socket);
+ return -rte_errno;
+ }
+ bt->size = n;
+ /* First entry must be NULL for binary search. */
+ (*bt->table)[bt->len++] = (struct mlx4_mr_cache) {
+ .lkey = UINT32_MAX,
+ };
+ DEBUG("initialized B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
+ return 0;
+}
+
+/**
+ * Free B-tree resources.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx4_mr_btree_free(struct mlx4_mr_btree *bt)
+{
+ if (bt == NULL)
+ return;
+ DEBUG("freeing B-tree %p with table %p", (void *)bt, (void *)bt->table);
+ rte_free(bt->table);
+ memset(bt, 0, sizeof(*bt));
+}
+
+#ifndef NDEBUG
+/**
+ * Dump all the entries in a B-tree
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx4_mr_btree_dump(struct mlx4_mr_btree *bt)
+{
+ int idx;
+ struct mlx4_mr_cache *lkp_tbl;
+
+ if (bt == NULL)
+ return;
+ lkp_tbl = *bt->table;
+ for (idx = 0; idx < bt->len; ++idx) {
+ struct mlx4_mr_cache *entry = &lkp_tbl[idx];
+
+ DEBUG("B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ }
+}
+#endif
+
+/**
+ * Find virtually contiguous memory chunk in a given MR.
+ *
+ * @param dev
+ * Pointer to MR structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If not found, this will not be
+ * updated.
+ * @param start_idx
+ * Start index of the memseg bitmap.
+ *
+ * @return
+ * Next index to go on lookup.
+ */
+static int
+mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry,
+ int base_idx)
+{
+ uintptr_t start = 0;
+ uintptr_t end = 0;
+ uint32_t idx = 0;
+
+ for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
+ if (rte_bitmap_get(mr->ms_bmp, idx)) {
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
+
+ msl = mr->msl;
+ ms = rte_fbarray_get(&msl->memseg_arr,
+ mr->ms_base_idx + idx);
+ assert(msl->page_sz == ms->hugepage_sz);
+ if (!start)
+ start = ms->addr_64;
+ end = ms->addr_64 + ms->hugepage_sz;
+ } else if (start) {
+ /* Passed the end of a fragment. */
+ break;
+ }
+ }
+ if (start) {
+ /* Found one chunk. */
+ entry->start = start;
+ entry->end = end;
+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
+ }
+ return idx;
+}
+
+/**
+ * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
+ * Then, this entry will have to be searched by mr_lookup_dev_list() in
+ * mlx4_mr_create() on miss.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr
+ * Pointer to MR to insert.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int n;
+
+ DEBUG("port %u inserting MR(%p) to global cache",
+ dev->data->port_id, (void *)mr);
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx4_mr_cache entry = { 0, };
+
+ /* Find a contiguous chunk and advance the index. */
+ n = mr_find_next_chunk(mr, &entry, n);
+ if (!entry.end)
+ break;
+ if (mr_btree_insert(&priv->mr.cache, &entry) < 0) {
+ /*
+ * Overflowed, but the global table cannot be expanded
+ * because of deadlock.
+ */
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Look up address in the original global MR list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Found MR on match, NULL otherwise.
+ */
+static struct mlx4_mr *
+mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
+ uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr;
+
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
+ unsigned int n;
+
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx4_mr_cache ret = { 0, };
+
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (addr >= ret.start && addr < ret.end) {
+ /* Found. */
+ *entry = ret;
+ return mr;
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Look up address on device.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
+ */
+static uint32_t
+mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
+ uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint16_t idx;
+ uint32_t lkey = UINT32_MAX;
+ struct mlx4_mr *mr;
+
+ /*
+ * If the global cache has overflowed since it failed to expand the
+ * B-tree table, it can't have all the existing MRs. Then, the address
+ * has to be searched by traversing the original MR list instead, which
+ * is very slow path. Otherwise, the global cache is all inclusive.
+ */
+ if (!unlikely(priv->mr.cache.overflow)) {
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX)
+ *entry = (*priv->mr.cache.table)[idx];
+ } else {
+ /* Falling back to the slowest path. */
+ mr = mr_lookup_dev_list(dev, entry, addr);
+ if (mr != NULL)
+ lkey = entry->lkey;
+ }
+ assert(lkey == UINT32_MAX || (addr >= entry->start &&
+ addr < entry->end));
+ return lkey;
+}
+
+/**
+ * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
+ * can raise memory free event and the callback function will spin on the lock.
+ *
+ * @param mr
+ * Pointer to MR to free.
+ */
+static void
+mr_free(struct mlx4_mr *mr)
+{
+ if (mr == NULL)
+ return;
+ DEBUG("freeing MR(%p):", (void *)mr);
+ if (mr->ibv_mr != NULL)
+ claim_zero(mlx4_glue->dereg_mr(mr->ibv_mr));
+ if (mr->ms_bmp != NULL)
+ rte_bitmap_free(mr->ms_bmp);
+ rte_free(mr);
+}
+
+/**
+ * Releass resources of detached MR having no online entry.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mlx4_mr_garbage_collect(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr_next;
+ struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
+
+ /*
+ * MR can't be freed with holding the lock because rte_free() could call
+ * memory free callback function. This will be a deadlock situation.
+ */
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Detach the whole free list and release it after unlocking. */
+ free_list = priv->mr.mr_free_list;
+ LIST_INIT(&priv->mr.mr_free_list);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Release resources. */
+ mr_next = LIST_FIRST(&free_list);
+ while (mr_next != NULL) {
+ struct mlx4_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ mr_free(mr);
+ }
+}
+
+/* Called during rte_memseg_contig_walk() by mlx4_mr_create(). */
+static int
+mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
+ const struct rte_memseg *ms, size_t len, void *arg)
+{
+ struct mr_find_contig_memsegs_data *data = arg;
+
+ if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
+ return 0;
+ /* Found, save it and stop walking. */
+ data->start = ms->addr_64;
+ data->end = ms->addr_64 + len;
+ data->msl = msl;
+ return 1;
+}
+
+/**
+ * Create a new global Memroy Region (MR) for a missing virtual address.
+ * Register entire virtually contiguous memory chunk around the address.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this will not be updated.
+ * @param addr
+ * Target virtual address to register.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
+ */
+static uint32_t
+mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
+ uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
+ struct mlx4_mr *mr = NULL;
+ size_t len;
+ uint32_t ms_n;
+ uint32_t bmp_size;
+ void *bmp_mem;
+ int ms_idx_shift = -1;
+ unsigned int n;
+ struct mr_find_contig_memsegs_data data = {
+ .addr = addr,
+ };
+ struct mr_find_contig_memsegs_data data_re;
+
+ DEBUG("port %u creating a MR using address (%p)",
+ dev->data->port_id, (void *)addr);
+ /*
+ * Release detached MRs if any. This can't be called with holding either
+ * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have
+ * been detached by the memory free event but it couldn't be released
+ * inside the callback due to deadlock. As a result, releasing resources
+ * is quite opportunistic.
+ */
+ mlx4_mr_garbage_collect(dev);
+ /*
+ * Find out a contiguous virtual address chunk in use, to which the
+ * given address belongs, in order to register maximum range. In the
+ * best case where mempools are not dynamically recreated and
+ * '--socket-mem' is speicified as an EAL option, it is very likely to
+ * have only one MR(LKey) per a socket and per a hugepage-size even
+ * though the system memory is highly fragmented.
+ */
+ if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
+ WARN("port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENXIO;
+ goto err_nolock;
+ }
+alloc_resources:
+ /* Addresses must be page-aligned. */
+ assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
+ assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
+ msl = data.msl;
+ ms = rte_mem_virt2memseg((void *)data.start, msl);
+ len = data.end - data.start;
+ assert(msl->page_sz == ms->hugepage_sz);
+ /* Number of memsegs in the range. */
+ ms_n = len / msl->page_sz;
+ DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " page_sz=0x%" PRIx64 ", ms_n=%u",
+ dev->data->port_id, (void *)addr,
+ data.start, data.end, msl->page_sz, ms_n);
+ /* Size of memory for bitmap. */
+ bmp_size = rte_bitmap_get_memory_footprint(ms_n);
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE) +
+ bmp_size,
+ RTE_CACHE_LINE_SIZE, msl->socket_id);
+ if (mr == NULL) {
+ WARN("port %u unable to allocate memory for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENOMEM;
+ goto err_nolock;
+ }
+ mr->msl = msl;
+ /*
+ * Save the index of the first memseg and initialize memseg bitmap. To
+ * see if a memseg of ms_idx in the memseg-list is still valid, check:
+ * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
+ */
+ mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
+ mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
+ if (mr->ms_bmp == NULL) {
+ WARN("port %u unable to initialize bitamp for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EINVAL;
+ goto err_nolock;
+ }
+ /*
+ * Should recheck whether the extended contiguous chunk is still valid.
+ * Because memory_hotplug_lock can't be held if there's any memory
+ * related calls in a critical path, resource allocation above can't be
+ * locked. If the memory has been changed at this point, try again with
+ * just single page. If not, go on with the big chunk atomically from
+ * here.
+ */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ data_re = data;
+ if (len > msl->page_sz &&
+ !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
+ WARN("port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENXIO;
+ goto err_memlock;
+ }
+ if (data.start != data_re.start || data.end != data_re.end) {
+ /*
+ * The extended contiguous chunk has been changed. Try again
+ * with single memseg instead.
+ */
+ data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
+ data.end = data.start + msl->page_sz;
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ mr_free(mr);
+ goto alloc_resources;
+ }
+ assert(data.msl == data_re.msl);
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /*
+ * Check the address is really missing. If other thread already created
+ * one or it is not found due to overflow, abort and return.
+ */
+ if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
+ /*
+ * Insert to the global cache table. It may fail due to
+ * low-on-memory. Then, this entry will have to be searched
+ * here again.
+ */
+ mr_btree_insert(&priv->mr.cache, entry);
+ DEBUG("port %u found MR for %p on final lookup, abort",
+ dev->data->port_id, (void *)addr);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ /*
+ * Must be unlocked before calling rte_free() because
+ * mlx4_mr_mem_event_free_cb() can be called inside.
+ */
+ mr_free(mr);
+ return entry->lkey;
+ }
+ /*
+ * Trim start and end addresses for verbs MR. Set bits for registering
+ * memsegs but exclude already registered ones. Bitmap can be
+ * fragmented.
+ */
+ for (n = 0; n < ms_n; ++n) {
+ uintptr_t start;
+ struct mlx4_mr_cache ret = { 0, };
+
+ start = data_re.start + n * msl->page_sz;
+ /* Exclude memsegs already registered by other MRs. */
+ if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
+ /*
+ * Start from the first unregistered memseg in the
+ * extended range.
+ */
+ if (ms_idx_shift == -1) {
+ mr->ms_base_idx += n;
+ data.start = start;
+ ms_idx_shift = n;
+ }
+ data.end = start + msl->page_sz;
+ rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
+ ++mr->ms_n;
+ }
+ }
+ len = data.end - data.start;
+ mr->ms_bmp_n = len / msl->page_sz;
+ assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
+ /*
+ * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
+ * called with holding the memory lock because it doesn't use
+ * mlx4_alloc_buf_extern() which eventually calls rte_malloc_socket()
+ * through mlx4_alloc_verbs_buf().
+ */
+ mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)data.start, len,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (mr->ibv_mr == NULL) {
+ WARN("port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EINVAL;
+ goto err_mrlock;
+ }
+ assert((uintptr_t)mr->ibv_mr->addr == data.start);
+ assert(mr->ibv_mr->length == len);
+ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+ DEBUG("port %u MR CREATED (%p) for %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ /* Insert to the global cache table. */
+ mr_insert_dev_cache(dev, mr);
+ /* Fill in output data. */
+ mr_lookup_dev(dev, entry, addr);
+ /* Lookup can't fail. */
+ assert(entry->lkey != UINT32_MAX);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ return entry->lkey;
+err_mrlock:
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+err_memlock:
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+err_nolock:
+ /*
+ * In case of error, as this can be called in a datapath, a warning
+ * message per an error is preferable instead. Must be unlocked before
+ * calling rte_free() because mlx4_mr_mem_event_free_cb() can be called
+ * inside.
+ */
+ mr_free(mr);
+ return UINT32_MAX;
+}
+
+/**
+ * Rebuild the global B-tree cache of device from the original MR list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mr_rebuild_dev_cache(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr;
+
+ DEBUG("port %u rebuild dev cache[]", dev->data->port_id);
+ /* Flush cache to rebuild. */
+ priv->mr.cache.len = 1;
+ priv->mr.cache.overflow = 0;
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr)
+ if (mr_insert_dev_cache(dev, mr) < 0)
+ return;
+}
+
+/**
+ * Callback for memory free event. Iterate freed memsegs and check whether it
+ * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
+ * result, the MR would be fragmented. If it becomes empty, the MR will be freed
+ * later by mlx4_mr_garbage_collect().
+ *
+ * The global cache must be rebuilt if there's any change and this event has to
+ * be propagated to dataplane threads to flush the local caches.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param addr
+ * Address of freed memory.
+ * @param len
+ * Size of freed memory.
+ */
+static void
+mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_memseg_list *msl;
+ struct mlx4_mr *mr;
+ int ms_n;
+ int i;
+ int rebuild = 0;
+
+ DEBUG("port %u free callback: addr=%p, len=%zu",
+ dev->data->port_id, addr, len);
+ msl = rte_mem_virt2memseg_list(addr);
+ /* addr and len must be page-aligned. */
+ assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+ assert(len == RTE_ALIGN(len, msl->page_sz));
+ ms_n = len / msl->page_sz;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Clear bits of freed memsegs from MR. */
+ for (i = 0; i < ms_n; ++i) {
+ const struct rte_memseg *ms;
+ struct mlx4_mr_cache entry;
+ uintptr_t start;
+ int ms_idx;
+ uint32_t pos;
+
+ /* Find MR having this memseg. */
+ start = (uintptr_t)addr + i * msl->page_sz;
+ mr = mr_lookup_dev_list(dev, &entry, start);
+ if (mr == NULL)
+ continue;
+ ms = rte_mem_virt2memseg((void *)start, msl);
+ assert(ms != NULL);
+ assert(msl->page_sz == ms->hugepage_sz);
+ ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ pos = ms_idx - mr->ms_base_idx;
+ assert(rte_bitmap_get(mr->ms_bmp, pos));
+ assert(pos < mr->ms_bmp_n);
+ DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
+ dev->data->port_id, (void *)mr, pos, (void *)start);
+ rte_bitmap_clear(mr->ms_bmp, pos);
+ if (--mr->ms_n == 0) {
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
+ DEBUG("port %u remove MR(%p) from list",
+ dev->data->port_id, (void *)mr);
+ }
+ /*
+ * MR is fragmented or will be freed. the global cache must be
+ * rebuilt.
+ */
+ rebuild = 1;
+ }
+ if (rebuild) {
+ mr_rebuild_dev_cache(dev);
+ /*
+ * Flush local caches by propagating invalidation across cores.
+ * rte_smp_wmb() is enough to synchronize this event. If one of
+ * freed memsegs is seen by other core, that means the memseg
+ * has been allocated by allocator, which will come after this
+ * free call. Therefore, this store instruction (incrementing
+ * generation below) will be guaranteed to be seen by other core
+ * before the core sees the newly allocated memory.
+ */
+ ++priv->mr.dev_gen;
+ DEBUG("broadcasting local cache flush, gen=%d",
+ priv->mr.dev_gen);
+ rte_smp_wmb();
+ }
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+#ifndef NDEBUG
+ if (rebuild)
+ mlx4_mr_dump_dev(dev);
+#endif
+}
+
+/**
+ * Callback for memory event.
+ *
+ * @param event_type
+ * Memory event type.
+ * @param addr
+ * Address of memory.
+ * @param len
+ * Size of memory.
+ */
+void
+mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg __rte_unused)
+{
+ struct priv *priv;
+
+ switch (event_type) {
+ case RTE_MEM_EVENT_FREE:
+ rte_rwlock_read_lock(&mlx4_mem_event_rwlock);
+ /* Iterate all the existing mlx4 devices. */
+ LIST_FOREACH(priv, &mlx4_mem_event_cb_list, mem_event_cb)
+ mlx4_mr_mem_event_free_cb(priv->dev, addr, len);
+ rte_rwlock_read_unlock(&mlx4_mem_event_rwlock);
+ break;
+ case RTE_MEM_EVENT_ALLOC:
+ default:
+ break;
+ }
+}
+
+/**
+ * Look up address in the global MR cache table. If not found, create a new MR.
+ * Insert the found/created entry to local bottom-half cache table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this is not written.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx4_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ struct mlx4_mr_cache *entry, uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr_btree *bt = &mr_ctrl->cache_bh;
+ uint16_t idx;
+ uint32_t lkey;
+
+ /* If local cache table is full, try to double it. */
+ if (unlikely(bt->len == bt->size))
+ mr_btree_expand(bt, bt->size << 1);
+ /* Look up in the global cache. */
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX) {
+ /* Found. */
+ *entry = (*priv->mr.cache.table)[idx];
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /*
+ * Update local cache. Even if it fails, return the found entry
+ * to update top-half cache. Next time, this entry will be found
+ * in the global cache.
+ */
+ mr_btree_insert(bt, entry);
+ return lkey;
+ }
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /* First time to see the address? Create a new MR. */
+ lkey = mlx4_mr_create(dev, entry, addr);
+ /*
+ * Update the local cache if successfully created a new global MR. Even
+ * if failed to create one, there's no action to take in this datapath
+ * code. As returning LKey is invalid, this will eventually make HW
+ * fail.
+ */
+ if (lkey != UINT32_MAX)
+ mr_btree_insert(bt, entry);
+ return lkey;
+}
+
+/**
+ * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if
+ * misses, search in the global MR cache table and update the new entry to
+ * per-queue local caches.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx4_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ uintptr_t addr)
+{
+ uint32_t lkey;
+ uint16_t bh_idx = 0;
+ /* Victim in top-half cache to replace with new entry. */
+ struct mlx4_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
+
+ /* Binary-search MR translation table. */
+ lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
+ /* Update top-half cache. */
+ if (likely(lkey != UINT32_MAX)) {
+ *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
+ } else {
+ /*
+ * If missed in local lookup table, search in the global cache
+ * and local cache_bh[] will be updated inside if possible.
+ * Top-half cache entry will also be updated.
+ */
+ lkey = mlx4_mr_lookup_dev(dev, mr_ctrl, repl, addr);
+ if (unlikely(lkey == UINT32_MAX))
+ return UINT32_MAX;
+ }
+ /* Update the most recently used entry. */
+ mr_ctrl->mru = mr_ctrl->head;
+ /* Point to the next victim, the oldest. */
+ mr_ctrl->head = (mr_ctrl->head + 1) % MLX4_MR_CACHE_N;
+ return lkey;
+}
+
+/**
+ * Bottom-half of LKey search on Rx.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr)
+{
+ struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ struct priv *priv = rxq->priv;
+
+ DEBUG("Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ rxq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+}
+
+/**
+ * Bottom-half of LKey search on Tx.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
+{
+ struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct priv *priv = txq->priv;
+
+ DEBUG("Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ txq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+}
+
+/**
+ * Flush all of the local cache entries.
+ *
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ */
+void
+mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl)
+{
+ /* Reset the most-recently-used index. */
+ mr_ctrl->mru = 0;
+ /* Reset the linear search array. */
+ mr_ctrl->head = 0;
+ memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
+ /* Reset the B-tree table. */
+ mr_ctrl->cache_bh.len = 1;
+ mr_ctrl->cache_bh.overflow = 0;
+ /* Update the generation number. */
+ mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
+ DEBUG("mr_ctrl(%p): flushed, cur_gen=%d",
+ (void *)mr_ctrl, mr_ctrl->cur_gen);
+}
+
+/* Called during rte_mempool_mem_iter() by mlx4_mr_update_mp(). */
+static void
+mlx4_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
+{
+ struct mr_update_mp_data *data = opaque;
+ uint32_t lkey;
+
+ /* Stop iteration if failed in the previous walk. */
+ if (data->ret < 0)
+ return;
+ /* Register address of the chunk and update local caches. */
+ lkey = mlx4_mr_addr2mr_bh(data->dev, data->mr_ctrl,
+ (uintptr_t)memhdr->addr);
+ if (lkey == UINT32_MAX)
+ data->ret = -1;
+}
+
+/**
+ * Register entire memory chunks in a Mempool.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mp
+ * Pointer to registering Mempool.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+int
+mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
+{
+ struct mr_update_mp_data data = {
+ .dev = dev,
+ .mr_ctrl = mr_ctrl,
+ .ret = 0,
+ };
+
+ rte_mempool_mem_iter(mp, mlx4_mr_update_mp_cb, &data);
+ return data.ret;
+}
+
+#ifndef NDEBUG
+/**
+ * Dump all the created MRs and the global cache entries.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx4_mr_dump_dev(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr;
+ int mr_n = 0;
+ int chunk_n = 0;
+
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
+ unsigned int n;
+
+ DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
+ dev->data->port_id, mr_n++,
+ rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_n, mr->ms_bmp_n);
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx4_mr_cache ret = { 0, };
+
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (!ret.end)
+ break;
+ DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
+ chunk_n++, ret.start, ret.end);
+ }
+ }
+ DEBUG("port %u dumping global cache", dev->data->port_id);
+ mlx4_mr_btree_dump(&priv->mr.cache);
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+}
+#endif
+
+/**
+ * Release all the created MRs and resources. Remove device from memory callback
+ * list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx4_mr_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
+
+ /* Remove from memory callback device list. */
+ rte_rwlock_write_lock(&mlx4_mem_event_rwlock);
+ LIST_REMOVE(priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx4_mem_event_rwlock);
+#ifndef NDEBUG
+ mlx4_mr_dump_dev(dev);
+#endif
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Detach from MR list and move to free list. */
+ while (mr_next != NULL) {
+ struct mlx4_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
+ }
+ LIST_INIT(&priv->mr.mr_list);
+ /* Free global cache. */
+ mlx4_mr_btree_free(&priv->mr.cache);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Free all remaining MRs. */
+ mlx4_mr_garbage_collect(dev);
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.h
new file mode 100644
index 00000000..37a365a8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX4_MR_H_
+#define RTE_PMD_MLX4_MR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_eal_memconfig.h>
+#include <rte_ethdev.h>
+#include <rte_rwlock.h>
+#include <rte_bitmap.h>
+
+/* Size of per-queue MR cache array for linear search. */
+#define MLX4_MR_CACHE_N 8
+
+/* Size of MR cache table for binary search. */
+#define MLX4_MR_BTREE_CACHE_N 256
+
+/* Memory Region object. */
+struct mlx4_mr {
+ LIST_ENTRY(mlx4_mr) mr; /**< Pointer to the prev/next entry. */
+ struct ibv_mr *ibv_mr; /* Verbs Memory Region. */
+ const struct rte_memseg_list *msl;
+ int ms_base_idx; /* Start index of msl->memseg_arr[]. */
+ int ms_n; /* Number of memsegs in use. */
+ uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
+ struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
+};
+
+/* Cache entry for Memory Region. */
+struct mlx4_mr_cache {
+ uintptr_t start; /* Start address of MR. */
+ uintptr_t end; /* End address of MR. */
+ uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */
+} __rte_packed;
+
+/* MR Cache table for Binary search. */
+struct mlx4_mr_btree {
+ uint16_t len; /* Number of entries. */
+ uint16_t size; /* Total number of entries. */
+ int overflow; /* Mark failure of table expansion. */
+ struct mlx4_mr_cache (*table)[];
+} __rte_packed;
+
+/* Per-queue MR control descriptor. */
+struct mlx4_mr_ctrl {
+ uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
+ uint32_t cur_gen; /* Generation number saved to flush caches. */
+ uint16_t mru; /* Index of last hit entry in top-half cache. */
+ uint16_t head; /* Index of the oldest entry in top-half cache. */
+ struct mlx4_mr_cache cache[MLX4_MR_CACHE_N]; /* Cache for top-half. */
+ struct mlx4_mr_btree cache_bh; /* Cache for bottom-half. */
+} __rte_packed;
+
+extern struct mlx4_dev_list mlx4_mem_event_cb_list;
+extern rte_rwlock_t mlx4_mem_event_rwlock;
+
+/* First entry must be NULL for comparison. */
+#define mlx4_mr_btree_len(bt) ((bt)->len - 1)
+
+int mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket);
+void mlx4_mr_btree_free(struct mlx4_mr_btree *bt);
+void mlx4_mr_btree_dump(struct mlx4_mr_btree *bt);
+void mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg);
+int mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp);
+void mlx4_mr_dump_dev(struct rte_eth_dev *dev);
+void mlx4_mr_release(struct rte_eth_dev *dev);
+
+/**
+ * Look up LKey from given lookup table by linear search. Firstly look up the
+ * last-hit entry. If miss, the entire array is searched. If found, update the
+ * last-hit index and return LKey.
+ *
+ * @param lkp_tbl
+ * Pointer to lookup table.
+ * @param[in,out] cached_idx
+ * Pointer to last-hit index.
+ * @param n
+ * Size of lookup table.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx4_mr_lookup_cache(struct mlx4_mr_cache *lkp_tbl, uint16_t *cached_idx,
+ uint16_t n, uintptr_t addr)
+{
+ uint16_t idx;
+
+ if (likely(addr >= lkp_tbl[*cached_idx].start &&
+ addr < lkp_tbl[*cached_idx].end))
+ return lkp_tbl[*cached_idx].lkey;
+ for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
+ if (addr >= lkp_tbl[idx].start &&
+ addr < lkp_tbl[idx].end) {
+ /* Found. */
+ *cached_idx = idx;
+ return lkp_tbl[idx].lkey;
+ }
+ }
+ return UINT32_MAX;
+}
+
+#endif /* RTE_PMD_MLX4_MR_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_prm.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4_prm.h
new file mode 100644
index 00000000..aef77ba0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_prm.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef MLX4_PRM_H_
+#define MLX4_PRM_H_
+
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx4dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+#include "mlx4_autoconf.h"
+
+/* ConnectX-3 Tx queue basic block. */
+#define MLX4_TXBB_SHIFT 6
+#define MLX4_TXBB_SIZE (1 << MLX4_TXBB_SHIFT)
+
+/* Typical TSO descriptor with 16 gather entries is 352 bytes. */
+#define MLX4_MAX_SGE 32
+#define MLX4_MAX_WQE_SIZE \
+ (MLX4_MAX_SGE * sizeof(struct mlx4_wqe_data_seg) + \
+ sizeof(struct mlx4_wqe_ctrl_seg))
+#define MLX4_SEG_SHIFT 4
+
+/* Send queue stamping/invalidating information. */
+#define MLX4_SQ_STAMP_STRIDE 64
+#define MLX4_SQ_STAMP_DWORDS (MLX4_SQ_STAMP_STRIDE / 4)
+#define MLX4_SQ_OWNER_BIT 31
+#define MLX4_SQ_STAMP_VAL 0x7fffffff
+
+/* Work queue element (WQE) flags. */
+#define MLX4_WQE_CTRL_IIP_HDR_CSUM (1 << 28)
+#define MLX4_WQE_CTRL_IL4_HDR_CSUM (1 << 27)
+#define MLX4_WQE_CTRL_RR (1 << 6)
+
+/* CQE checksum flags. */
+enum {
+ MLX4_CQE_L2_TUNNEL_IPV4 = (int)(1u << 25),
+ MLX4_CQE_L2_TUNNEL_L4_CSUM = (int)(1u << 26),
+ MLX4_CQE_L2_TUNNEL = (int)(1u << 27),
+ MLX4_CQE_L2_VLAN_MASK = (int)(3u << 29),
+ MLX4_CQE_L2_TUNNEL_IPOK = (int)(1u << 31),
+};
+
+/* CQE status flags. */
+#define MLX4_CQE_STATUS_IPV6F (1 << 12)
+#define MLX4_CQE_STATUS_IPV4 (1 << 22)
+#define MLX4_CQE_STATUS_IPV4F (1 << 23)
+#define MLX4_CQE_STATUS_IPV6 (1 << 24)
+#define MLX4_CQE_STATUS_IPV4OPT (1 << 25)
+#define MLX4_CQE_STATUS_TCP (1 << 26)
+#define MLX4_CQE_STATUS_UDP (1 << 27)
+#define MLX4_CQE_STATUS_PTYPE_MASK \
+ (MLX4_CQE_STATUS_IPV4 | \
+ MLX4_CQE_STATUS_IPV4F | \
+ MLX4_CQE_STATUS_IPV6 | \
+ MLX4_CQE_STATUS_IPV4OPT | \
+ MLX4_CQE_STATUS_TCP | \
+ MLX4_CQE_STATUS_UDP)
+
+/* Send queue information. */
+struct mlx4_sq {
+ volatile uint8_t *buf; /**< SQ buffer. */
+ volatile uint8_t *eob; /**< End of SQ buffer */
+ uint32_t size; /**< SQ size includes headroom. */
+ uint32_t remain_size; /**< Remaining WQE room in SQ (bytes). */
+ uint32_t owner_opcode;
+ /**< Default owner opcode with HW valid owner bit. */
+ uint32_t stamp; /**< Stamp value with an invalid HW owner bit. */
+ volatile uint32_t *db; /**< Pointer to the doorbell. */
+ uint32_t doorbell_qpn; /**< qp number to write to the doorbell. */
+};
+
+/* Completion queue events, numbers and masks. */
+#define MLX4_CQ_DB_GEQ_N_MASK 0x3
+#define MLX4_CQ_DOORBELL 0x20
+#define MLX4_CQ_DB_CI_MASK 0xffffff
+
+/* Completion queue information. */
+struct mlx4_cq {
+ volatile void *cq_uar; /**< CQ user access region. */
+ volatile void *cq_db_reg; /**< CQ doorbell register. */
+ volatile uint32_t *set_ci_db; /**< Pointer to the CQ doorbell. */
+ volatile uint32_t *arm_db; /**< Arming Rx events doorbell. */
+ volatile uint8_t *buf; /**< Pointer to the completion queue buffer. */
+ uint32_t cqe_cnt; /**< Number of entries in the queue. */
+ uint32_t cqe_64:1; /**< CQ entry size is 64 bytes. */
+ uint32_t cons_index; /**< Last queue entry that was handled. */
+ uint32_t cqn; /**< CQ number. */
+ int arm_sn; /**< Rx event counter. */
+};
+
+#ifndef HAVE_IBV_MLX4_WQE_LSO_SEG
+/*
+ * WQE LSO segment structure.
+ * Defined here as backward compatibility for rdma-core v17 and below.
+ * Similar definition is found in infiniband/mlx4dv.h in rdma-core v18
+ * and above.
+ */
+struct mlx4_wqe_lso_seg {
+ rte_be32_t mss_hdr_size;
+ rte_be32_t header[];
+};
+#endif
+
+/**
+ * Retrieve a CQE entry from a CQ.
+ *
+ * cqe = cq->buf + cons_index * cqe_size + cqe_offset
+ *
+ * Where cqe_size is 32 or 64 bytes and cqe_offset is 0 or 32 (depending on
+ * cqe_size).
+ *
+ * @param cq
+ * CQ to retrieve entry from.
+ * @param index
+ * Entry index.
+ *
+ * @return
+ * Pointer to CQE entry.
+ */
+static inline volatile struct mlx4_cqe *
+mlx4_get_cqe(struct mlx4_cq *cq, uint32_t index)
+{
+ return (volatile struct mlx4_cqe *)(cq->buf +
+ ((index & (cq->cqe_cnt - 1)) <<
+ (5 + cq->cqe_64)) +
+ (cq->cqe_64 << 5));
+}
+
+/**
+ * Transpose a flag in a value.
+ *
+ * @param val
+ * Input value.
+ * @param from
+ * Flag to retrieve from input value.
+ * @param to
+ * Flag to set in output value.
+ *
+ * @return
+ * Output value with transposed flag enabled if present on input.
+ */
+static inline uint64_t
+mlx4_transpose(uint64_t val, uint64_t from, uint64_t to)
+{
+ return (from >= to ?
+ (val & from) / (from / to) :
+ (val & from) * (to / from));
+}
+
+#endif /* MLX4_PRM_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxq.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxq.c
new file mode 100644
index 00000000..9737da2e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxq.c
@@ -0,0 +1,936 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+/**
+ * @file
+ * Rx queues configuration for mlx4 driver.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx4dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_ethdev_driver.h>
+#include <rte_flow.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+
+#include "mlx4.h"
+#include "mlx4_glue.h"
+#include "mlx4_flow.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+/**
+ * Historical RSS hash key.
+ *
+ * This used to be the default for mlx4 in Linux before v3.19 switched to
+ * generating random hash keys through netdev_rss_key_fill().
+ *
+ * It is used in this PMD for consistency with past DPDK releases but can
+ * now be overridden through user configuration.
+ *
+ * Note: this is not const to work around API quirks.
+ */
+uint8_t
+mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
+ 0x2c, 0xc6, 0x81, 0xd1,
+ 0x5b, 0xdb, 0xf4, 0xf7,
+ 0xfc, 0xa2, 0x83, 0x19,
+ 0xdb, 0x1a, 0x3e, 0x94,
+ 0x6b, 0x9e, 0x38, 0xd9,
+ 0x2c, 0x9c, 0x03, 0xd1,
+ 0xad, 0x99, 0x44, 0xa7,
+ 0xd9, 0x56, 0x3d, 0x59,
+ 0x06, 0x3c, 0x25, 0xf3,
+ 0xfc, 0x1f, 0xdc, 0x2a,
+};
+
+/**
+ * Obtain a RSS context with specified properties.
+ *
+ * Used when creating a flow rule targeting one or several Rx queues.
+ *
+ * If a matching RSS context already exists, it is returned with its
+ * reference count incremented.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param fields
+ * Fields for RSS processing (Verbs format).
+ * @param[in] key
+ * Hash key to use (whose size is exactly MLX4_RSS_HASH_KEY_SIZE).
+ * @param queues
+ * Number of target queues.
+ * @param[in] queue_id
+ * Target queues.
+ *
+ * @return
+ * Pointer to RSS context on success, NULL otherwise and rte_errno is set.
+ */
+struct mlx4_rss *
+mlx4_rss_get(struct priv *priv, uint64_t fields,
+ const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
+ uint16_t queues, const uint16_t queue_id[])
+{
+ struct mlx4_rss *rss;
+ size_t queue_id_size = sizeof(queue_id[0]) * queues;
+
+ LIST_FOREACH(rss, &priv->rss, next)
+ if (fields == rss->fields &&
+ queues == rss->queues &&
+ !memcmp(key, rss->key, MLX4_RSS_HASH_KEY_SIZE) &&
+ !memcmp(queue_id, rss->queue_id, queue_id_size)) {
+ ++rss->refcnt;
+ return rss;
+ }
+ rss = rte_malloc(__func__, offsetof(struct mlx4_rss, queue_id) +
+ queue_id_size, 0);
+ if (!rss)
+ goto error;
+ *rss = (struct mlx4_rss){
+ .priv = priv,
+ .refcnt = 1,
+ .usecnt = 0,
+ .qp = NULL,
+ .ind = NULL,
+ .fields = fields,
+ .queues = queues,
+ };
+ memcpy(rss->key, key, MLX4_RSS_HASH_KEY_SIZE);
+ memcpy(rss->queue_id, queue_id, queue_id_size);
+ LIST_INSERT_HEAD(&priv->rss, rss, next);
+ return rss;
+error:
+ rte_errno = ENOMEM;
+ return NULL;
+}
+
+/**
+ * Release a RSS context instance.
+ *
+ * Used when destroying a flow rule targeting one or several Rx queues.
+ *
+ * This function decrements the reference count of the context and destroys
+ * it after reaching 0. The context must have no users at this point; all
+ * prior calls to mlx4_rss_attach() must have been followed by matching
+ * calls to mlx4_rss_detach().
+ *
+ * @param rss
+ * RSS context to release.
+ */
+void
+mlx4_rss_put(struct mlx4_rss *rss)
+{
+ assert(rss->refcnt);
+ if (--rss->refcnt)
+ return;
+ assert(!rss->usecnt);
+ assert(!rss->qp);
+ assert(!rss->ind);
+ LIST_REMOVE(rss, next);
+ rte_free(rss);
+}
+
+/**
+ * Attach a user to a RSS context instance.
+ *
+ * Used when the RSS QP and indirection table objects must be instantiated,
+ * that is, when a flow rule must be enabled.
+ *
+ * This function increments the usage count of the context.
+ *
+ * @param rss
+ * RSS context to attach to.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rss_attach(struct mlx4_rss *rss)
+{
+ assert(rss->refcnt);
+ if (rss->usecnt++) {
+ assert(rss->qp);
+ assert(rss->ind);
+ return 0;
+ }
+
+ struct ibv_wq *ind_tbl[rss->queues];
+ struct priv *priv = rss->priv;
+ const char *msg;
+ unsigned int i = 0;
+ int ret;
+
+ if (!rte_is_power_of_2(RTE_DIM(ind_tbl))) {
+ ret = EINVAL;
+ msg = "number of RSS queues must be a power of two";
+ goto error;
+ }
+ for (i = 0; i != RTE_DIM(ind_tbl); ++i) {
+ uint16_t id = rss->queue_id[i];
+ struct rxq *rxq = NULL;
+
+ if (id < priv->dev->data->nb_rx_queues)
+ rxq = priv->dev->data->rx_queues[id];
+ if (!rxq) {
+ ret = EINVAL;
+ msg = "RSS target queue is not configured";
+ goto error;
+ }
+ ret = mlx4_rxq_attach(rxq);
+ if (ret) {
+ ret = -ret;
+ msg = "unable to attach RSS target queue";
+ goto error;
+ }
+ ind_tbl[i] = rxq->wq;
+ }
+ rss->ind = mlx4_glue->create_rwq_ind_table
+ (priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),
+ .ind_tbl = ind_tbl,
+ .comp_mask = 0,
+ });
+ if (!rss->ind) {
+ ret = errno ? errno : EINVAL;
+ msg = "RSS indirection table creation failure";
+ goto error;
+ }
+ rss->qp = mlx4_glue->create_qp_ex
+ (priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .comp_mask = (IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_RX_HASH |
+ IBV_QP_INIT_ATTR_IND_TABLE),
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .pd = priv->pd,
+ .rwq_ind_tbl = rss->ind,
+ .rx_hash_conf = {
+ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
+ .rx_hash_key = rss->key,
+ .rx_hash_fields_mask = rss->fields,
+ },
+ });
+ if (!rss->qp) {
+ ret = errno ? errno : EINVAL;
+ msg = "RSS hash QP creation failure";
+ goto error;
+ }
+ ret = mlx4_glue->modify_qp
+ (rss->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_INIT,
+ .port_num = priv->port,
+ },
+ IBV_QP_STATE | IBV_QP_PORT);
+ if (ret) {
+ msg = "failed to switch RSS hash QP to INIT state";
+ goto error;
+ }
+ ret = mlx4_glue->modify_qp
+ (rss->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_RTR,
+ },
+ IBV_QP_STATE);
+ if (ret) {
+ msg = "failed to switch RSS hash QP to RTR state";
+ goto error;
+ }
+ return 0;
+error:
+ if (rss->qp) {
+ claim_zero(mlx4_glue->destroy_qp(rss->qp));
+ rss->qp = NULL;
+ }
+ if (rss->ind) {
+ claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
+ rss->ind = NULL;
+ }
+ while (i--)
+ mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
+ ERROR("mlx4: %s", msg);
+ --rss->usecnt;
+ rte_errno = ret;
+ return -ret;
+}
+
+/**
+ * Detach a user from a RSS context instance.
+ *
+ * Used when disabling (not destroying) a flow rule.
+ *
+ * This function decrements the usage count of the context and destroys
+ * usage resources after reaching 0.
+ *
+ * @param rss
+ * RSS context to detach from.
+ */
+void
+mlx4_rss_detach(struct mlx4_rss *rss)
+{
+ struct priv *priv = rss->priv;
+ unsigned int i;
+
+ assert(rss->refcnt);
+ assert(rss->qp);
+ assert(rss->ind);
+ if (--rss->usecnt)
+ return;
+ claim_zero(mlx4_glue->destroy_qp(rss->qp));
+ rss->qp = NULL;
+ claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
+ rss->ind = NULL;
+ for (i = 0; i != rss->queues; ++i)
+ mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
+}
+
+/**
+ * Initialize common RSS context resources.
+ *
+ * Because ConnectX-3 hardware limitations require a fixed order in the
+ * indirection table, WQs must be allocated sequentially to be part of a
+ * common RSS context.
+ *
+ * Since a newly created WQ cannot be moved to a different context, this
+ * function allocates them all at once, one for each configured Rx queue,
+ * as well as all related resources (CQs and mbufs).
+ *
+ * This must therefore be done before creating any Rx flow rules relying on
+ * indirection tables.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rss_init(struct priv *priv)
+{
+ struct rte_eth_dev *dev = priv->dev;
+ uint8_t log2_range = rte_log2_u32(dev->data->nb_rx_queues);
+ uint32_t wq_num_prev = 0;
+ const char *msg;
+ unsigned int i;
+ int ret;
+
+ if (priv->rss_init)
+ return 0;
+ if (priv->dev->data->nb_rx_queues > priv->hw_rss_max_qps) {
+ ERROR("RSS does not support more than %d queues",
+ priv->hw_rss_max_qps);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ /* Prepare range for RSS contexts before creating the first WQ. */
+ ret = mlx4_glue->dv_set_context_attr
+ (priv->ctx,
+ MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ,
+ &log2_range);
+ if (ret) {
+ ERROR("cannot set up range size for RSS context to %u"
+ " (for %u Rx queues), error: %s",
+ 1 << log2_range, dev->data->nb_rx_queues, strerror(ret));
+ rte_errno = ret;
+ return -ret;
+ }
+ for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = priv->dev->data->rx_queues[i];
+ struct ibv_cq *cq;
+ struct ibv_wq *wq;
+ uint32_t wq_num;
+
+ /* Attach the configured Rx queues. */
+ if (rxq) {
+ assert(!rxq->usecnt);
+ ret = mlx4_rxq_attach(rxq);
+ if (!ret) {
+ wq_num = rxq->wq->wq_num;
+ goto wq_num_check;
+ }
+ ret = -ret;
+ msg = "unable to create Rx queue resources";
+ goto error;
+ }
+ /*
+ * WQs are temporarily allocated for unconfigured Rx queues
+ * to maintain proper index alignment in indirection table
+ * by skipping unused WQ numbers.
+ *
+ * The reason this works at all even though these WQs are
+ * immediately destroyed is that WQNs are allocated
+ * sequentially and are guaranteed to never be reused in the
+ * same context by the underlying implementation.
+ */
+ cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
+ if (!cq) {
+ ret = ENOMEM;
+ msg = "placeholder CQ creation failure";
+ goto error;
+ }
+ wq = mlx4_glue->create_wq
+ (priv->ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
+ .pd = priv->pd,
+ .cq = cq,
+ });
+ if (wq) {
+ wq_num = wq->wq_num;
+ claim_zero(mlx4_glue->destroy_wq(wq));
+ } else {
+ wq_num = 0; /* Shut up GCC 4.8 warnings. */
+ }
+ claim_zero(mlx4_glue->destroy_cq(cq));
+ if (!wq) {
+ ret = ENOMEM;
+ msg = "placeholder WQ creation failure";
+ goto error;
+ }
+wq_num_check:
+ /*
+ * While guaranteed by the implementation, make sure WQ
+ * numbers are really sequential (as the saying goes,
+ * trust, but verify).
+ */
+ if (i && wq_num - wq_num_prev != 1) {
+ if (rxq)
+ mlx4_rxq_detach(rxq);
+ ret = ERANGE;
+ msg = "WQ numbers are not sequential";
+ goto error;
+ }
+ wq_num_prev = wq_num;
+ }
+ priv->rss_init = 1;
+ return 0;
+error:
+ ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
+ i, msg, strerror(ret));
+ while (i--) {
+ struct rxq *rxq = priv->dev->data->rx_queues[i];
+
+ if (rxq)
+ mlx4_rxq_detach(rxq);
+ }
+ rte_errno = ret;
+ return -ret;
+}
+
+/**
+ * Release common RSS context resources.
+ *
+ * As the reverse of mlx4_rss_init(), this must be done after removing all
+ * flow rules relying on indirection tables.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+mlx4_rss_deinit(struct priv *priv)
+{
+ unsigned int i;
+
+ if (!priv->rss_init)
+ return;
+ for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = priv->dev->data->rx_queues[i];
+
+ if (rxq) {
+ assert(rxq->usecnt == 1);
+ mlx4_rxq_detach(rxq);
+ }
+ }
+ priv->rss_init = 0;
+}
+
+/**
+ * Attach a user to a Rx queue.
+ *
+ * Used when the resources of an Rx queue must be instantiated for it to
+ * become in a usable state.
+ *
+ * This function increments the usage count of the Rx queue.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rxq_attach(struct rxq *rxq)
+{
+ if (rxq->usecnt++) {
+ assert(rxq->cq);
+ assert(rxq->wq);
+ assert(rxq->wqes);
+ assert(rxq->rq_db);
+ return 0;
+ }
+
+ struct priv *priv = rxq->priv;
+ struct rte_eth_dev *dev = priv->dev;
+ const uint32_t elts_n = 1 << rxq->elts_n;
+ const uint32_t sges_n = 1 << rxq->sges_n;
+ struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
+ struct mlx4dv_obj mlxdv;
+ struct mlx4dv_rwq dv_rwq;
+ struct mlx4dv_cq dv_cq = { .comp_mask = MLX4DV_CQ_MASK_UAR, };
+ const char *msg;
+ struct ibv_cq *cq = NULL;
+ struct ibv_wq *wq = NULL;
+ uint32_t create_flags = 0;
+ uint32_t comp_mask = 0;
+ volatile struct mlx4_wqe_data_seg (*wqes)[];
+ unsigned int i;
+ int ret;
+
+ assert(rte_is_power_of_2(elts_n));
+ cq = mlx4_glue->create_cq(priv->ctx, elts_n / sges_n, NULL,
+ rxq->channel, 0);
+ if (!cq) {
+ ret = ENOMEM;
+ msg = "CQ creation failure";
+ goto error;
+ }
+ /* By default, FCS (CRC) is stripped by hardware. */
+ if (rxq->crc_present) {
+ create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
+ comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ }
+ wq = mlx4_glue->create_wq
+ (priv->ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = elts_n / sges_n,
+ .max_sge = sges_n,
+ .pd = priv->pd,
+ .cq = cq,
+ .comp_mask = comp_mask,
+ .create_flags = create_flags,
+ });
+ if (!wq) {
+ ret = errno ? errno : EINVAL;
+ msg = "WQ creation failure";
+ goto error;
+ }
+ ret = mlx4_glue->modify_wq
+ (wq,
+ &(struct ibv_wq_attr){
+ .attr_mask = IBV_WQ_ATTR_STATE,
+ .wq_state = IBV_WQS_RDY,
+ });
+ if (ret) {
+ msg = "WQ state change to IBV_WQS_RDY failed";
+ goto error;
+ }
+ /* Retrieve device queue information. */
+ mlxdv.cq.in = cq;
+ mlxdv.cq.out = &dv_cq;
+ mlxdv.rwq.in = wq;
+ mlxdv.rwq.out = &dv_rwq;
+ ret = mlx4_glue->dv_init_obj(&mlxdv, MLX4DV_OBJ_RWQ | MLX4DV_OBJ_CQ);
+ if (ret) {
+ msg = "failed to obtain device information from WQ/CQ objects";
+ goto error;
+ }
+ /* Pre-register Rx mempool. */
+ DEBUG("port %u Rx queue %u registering mp %s having %u chunks",
+ priv->dev->data->port_id, rxq->stats.idx,
+ rxq->mp->name, rxq->mp->nb_mem_chunks);
+ mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp);
+ wqes = (volatile struct mlx4_wqe_data_seg (*)[])
+ ((uintptr_t)dv_rwq.buf.buf + dv_rwq.rq.offset);
+ for (i = 0; i != RTE_DIM(*elts); ++i) {
+ volatile struct mlx4_wqe_data_seg *scat = &(*wqes)[i];
+ struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
+
+ if (buf == NULL) {
+ while (i--) {
+ rte_pktmbuf_free_seg((*elts)[i]);
+ (*elts)[i] = NULL;
+ }
+ ret = ENOMEM;
+ msg = "cannot allocate mbuf";
+ goto error;
+ }
+ /* Headroom is reserved by rte_pktmbuf_alloc(). */
+ assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
+ /* Buffer is supposed to be empty. */
+ assert(rte_pktmbuf_data_len(buf) == 0);
+ assert(rte_pktmbuf_pkt_len(buf) == 0);
+ /* Only the first segment keeps headroom. */
+ if (i % sges_n)
+ buf->data_off = 0;
+ buf->port = rxq->port_id;
+ buf->data_len = rte_pktmbuf_tailroom(buf);
+ buf->pkt_len = rte_pktmbuf_tailroom(buf);
+ buf->nb_segs = 1;
+ *scat = (struct mlx4_wqe_data_seg){
+ .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
+ uintptr_t)),
+ .byte_count = rte_cpu_to_be_32(buf->data_len),
+ .lkey = mlx4_rx_mb2mr(rxq, buf),
+ };
+ (*elts)[i] = buf;
+ }
+ DEBUG("%p: allocated and configured %u segments (max %u packets)",
+ (void *)rxq, elts_n, elts_n / sges_n);
+ rxq->cq = cq;
+ rxq->wq = wq;
+ rxq->wqes = wqes;
+ rxq->rq_db = dv_rwq.rdb;
+ rxq->mcq.buf = dv_cq.buf.buf;
+ rxq->mcq.cqe_cnt = dv_cq.cqe_cnt;
+ rxq->mcq.set_ci_db = dv_cq.set_ci_db;
+ rxq->mcq.cqe_64 = (dv_cq.cqe_size & 64) ? 1 : 0;
+ rxq->mcq.arm_db = dv_cq.arm_db;
+ rxq->mcq.arm_sn = dv_cq.arm_sn;
+ rxq->mcq.cqn = dv_cq.cqn;
+ rxq->mcq.cq_uar = dv_cq.cq_uar;
+ rxq->mcq.cq_db_reg = (uint8_t *)dv_cq.cq_uar + MLX4_CQ_DOORBELL;
+ /* Update doorbell counter. */
+ rxq->rq_ci = elts_n / sges_n;
+ rte_wmb();
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+ return 0;
+error:
+ if (wq)
+ claim_zero(mlx4_glue->destroy_wq(wq));
+ if (cq)
+ claim_zero(mlx4_glue->destroy_cq(cq));
+ --rxq->usecnt;
+ rte_errno = ret;
+ ERROR("error while attaching Rx queue %p: %s: %s",
+ (void *)rxq, msg, strerror(ret));
+ return -ret;
+}
+
+/**
+ * Detach a user from a Rx queue.
+ *
+ * This function decrements the usage count of the Rx queue and destroys
+ * usage resources after reaching 0.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ */
+void
+mlx4_rxq_detach(struct rxq *rxq)
+{
+ unsigned int i;
+ struct rte_mbuf *(*elts)[1 << rxq->elts_n] = rxq->elts;
+
+ if (--rxq->usecnt)
+ return;
+ rxq->rq_ci = 0;
+ memset(&rxq->mcq, 0, sizeof(rxq->mcq));
+ rxq->rq_db = NULL;
+ rxq->wqes = NULL;
+ claim_zero(mlx4_glue->destroy_wq(rxq->wq));
+ rxq->wq = NULL;
+ claim_zero(mlx4_glue->destroy_cq(rxq->cq));
+ rxq->cq = NULL;
+ DEBUG("%p: freeing Rx queue elements", (void *)rxq);
+ for (i = 0; (i != RTE_DIM(*elts)); ++i) {
+ if (!(*elts)[i])
+ continue;
+ rte_pktmbuf_free_seg((*elts)[i]);
+ (*elts)[i] = NULL;
+ }
+}
+
+/**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx4_get_rx_queue_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ if (priv->hw_csum)
+ offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ return offloads;
+}
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx4_get_rx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ (void)priv;
+ return offloads;
+}
+
+/**
+ * DPDK callback to configure a Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Rx queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint32_t mb_len = rte_pktmbuf_data_room_size(mp);
+ struct rte_mbuf *(*elts)[rte_align32pow2(desc)];
+ struct rxq *rxq;
+ struct mlx4_malloc_vec vec[] = {
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = sizeof(*rxq),
+ .addr = (void **)&rxq,
+ },
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = sizeof(*elts),
+ .addr = (void **)&elts,
+ },
+ };
+ int ret;
+ uint32_t crc_present;
+ uint64_t offloads;
+
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ DEBUG("%p: configuring queue %u for %u descriptors",
+ (void *)dev, idx, desc);
+
+ if (idx >= dev->data->nb_rx_queues) {
+ rte_errno = EOVERFLOW;
+ ERROR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, idx, dev->data->nb_rx_queues);
+ return -rte_errno;
+ }
+ rxq = dev->data->rx_queues[idx];
+ if (rxq) {
+ rte_errno = EEXIST;
+ ERROR("%p: Rx queue %u already configured, release it first",
+ (void *)dev, idx);
+ return -rte_errno;
+ }
+ if (!desc) {
+ rte_errno = EINVAL;
+ ERROR("%p: invalid number of Rx descriptors", (void *)dev);
+ return -rte_errno;
+ }
+ if (desc != RTE_DIM(*elts)) {
+ desc = RTE_DIM(*elts);
+ WARN("%p: increased number of descriptors in Rx queue %u"
+ " to the next power of two (%u)",
+ (void *)dev, idx, desc);
+ }
+ /* By default, FCS (CRC) is stripped by hardware. */
+ crc_present = 0;
+ if (rte_eth_dev_must_keep_crc(offloads)) {
+ if (priv->hw_fcs_strip) {
+ crc_present = 1;
+ } else {
+ WARN("%p: CRC stripping has been disabled but will still"
+ " be performed by hardware, make sure MLNX_OFED and"
+ " firmware are up to date",
+ (void *)dev);
+ }
+ }
+ DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
+ " incoming frames to hide it",
+ (void *)dev,
+ crc_present ? "disabled" : "enabled",
+ crc_present << 2);
+ /* Allocate and initialize Rx queue. */
+ mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket);
+ if (!rxq) {
+ ERROR("%p: unable to allocate queue index %u",
+ (void *)dev, idx);
+ return -rte_errno;
+ }
+ *rxq = (struct rxq){
+ .priv = priv,
+ .mp = mp,
+ .port_id = dev->data->port_id,
+ .sges_n = 0,
+ .elts_n = rte_log2_u32(desc),
+ .elts = elts,
+ /* Toggle Rx checksum offload if hardware supports it. */
+ .csum = priv->hw_csum &&
+ (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ .csum_l2tun = priv->hw_csum_l2tun &&
+ (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ .crc_present = crc_present,
+ .l2tun_offload = priv->hw_csum_l2tun,
+ .stats = {
+ .idx = idx,
+ },
+ .socket = socket,
+ };
+ /* Enable scattered packets support for this queue if necessary. */
+ assert(mb_len >= RTE_PKTMBUF_HEADROOM);
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ ;
+ } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+ uint32_t size =
+ RTE_PKTMBUF_HEADROOM +
+ dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint32_t sges_n;
+
+ /*
+ * Determine the number of SGEs needed for a full packet
+ * and round it to the next power of two.
+ */
+ sges_n = rte_log2_u32((size / mb_len) + !!(size % mb_len));
+ rxq->sges_n = sges_n;
+ /* Make sure sges_n did not overflow. */
+ size = mb_len * (1 << rxq->sges_n);
+ size -= RTE_PKTMBUF_HEADROOM;
+ if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
+ rte_errno = EOVERFLOW;
+ ERROR("%p: too many SGEs (%u) needed to handle"
+ " requested maximum packet size %u",
+ (void *)dev,
+ 1 << sges_n,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ goto error;
+ }
+ } else {
+ WARN("%p: the requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered"
+ " mode has not been requested",
+ (void *)dev,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ mb_len - RTE_PKTMBUF_HEADROOM);
+ }
+ DEBUG("%p: maximum number of segments per packet: %u",
+ (void *)dev, 1 << rxq->sges_n);
+ if (desc % (1 << rxq->sges_n)) {
+ rte_errno = EINVAL;
+ ERROR("%p: number of Rx queue descriptors (%u) is not a"
+ " multiple of maximum segments per packet (%u)",
+ (void *)dev,
+ desc,
+ 1 << rxq->sges_n);
+ goto error;
+ }
+ if (mlx4_mr_btree_init(&rxq->mr_ctrl.cache_bh,
+ MLX4_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
+ goto error;
+ }
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ rxq->channel = mlx4_glue->create_comp_channel(priv->ctx);
+ if (rxq->channel == NULL) {
+ rte_errno = ENOMEM;
+ ERROR("%p: Rx interrupt completion channel creation"
+ " failure: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ if (mlx4_fd_set_non_blocking(rxq->channel->fd) < 0) {
+ ERROR("%p: unable to make Rx interrupt completion"
+ " channel non-blocking: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ }
+ DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
+ dev->data->rx_queues[idx] = rxq;
+ return 0;
+error:
+ dev->data->rx_queues[idx] = NULL;
+ ret = rte_errno;
+ mlx4_rx_queue_release(rxq);
+ rte_errno = ret;
+ assert(rte_errno > 0);
+ return -rte_errno;
+}
+
+/**
+ * DPDK callback to release a Rx queue.
+ *
+ * @param dpdk_rxq
+ * Generic Rx queue pointer.
+ */
+void
+mlx4_rx_queue_release(void *dpdk_rxq)
+{
+ struct rxq *rxq = (struct rxq *)dpdk_rxq;
+ struct priv *priv;
+ unsigned int i;
+
+ if (rxq == NULL)
+ return;
+ priv = rxq->priv;
+ for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
+ if (priv->dev->data->rx_queues[i] == rxq) {
+ DEBUG("%p: removing Rx queue %p from list",
+ (void *)priv->dev, (void *)rxq);
+ priv->dev->data->rx_queues[i] = NULL;
+ break;
+ }
+ assert(!rxq->cq);
+ assert(!rxq->wq);
+ assert(!rxq->wqes);
+ assert(!rxq->rq_db);
+ if (rxq->channel)
+ claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel));
+ mlx4_mr_btree_free(&rxq->mr_ctrl.cache_bh);
+ rte_free(rxq);
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.c
new file mode 100644
index 00000000..8c88effc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.c
@@ -0,0 +1,1394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+/**
+ * @file
+ * Data plane functions for mlx4 driver.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_io.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+
+#include "mlx4.h"
+#include "mlx4_prm.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+/**
+ * Pointer-value pair structure used in tx_post_send for saving the first
+ * DWORD (32 byte) of a TXBB.
+ */
+struct pv {
+ union {
+ volatile struct mlx4_wqe_data_seg *dseg;
+ volatile uint32_t *dst;
+ };
+ uint32_t val;
+};
+
+/** A helper structure for TSO packet handling. */
+struct tso_info {
+ /** Pointer to the array of saved first DWORD (32 byte) of a TXBB. */
+ struct pv *pv;
+ /** Current entry in the pv array. */
+ int pv_counter;
+ /** Total size of the WQE including padding. */
+ uint32_t wqe_size;
+ /** Size of TSO header to prepend to each packet to send. */
+ uint16_t tso_header_size;
+ /** Total size of the TSO segment in the WQE. */
+ uint16_t wqe_tso_seg_size;
+ /** Raw WQE size in units of 16 Bytes and without padding. */
+ uint8_t fence_size;
+};
+
+/** A table to translate Rx completion flags to packet type. */
+uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
+ /*
+ * The index to the array should have:
+ * bit[7] - MLX4_CQE_L2_TUNNEL
+ * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
+ * bit[5] - MLX4_CQE_STATUS_UDP
+ * bit[4] - MLX4_CQE_STATUS_TCP
+ * bit[3] - MLX4_CQE_STATUS_IPV4OPT
+ * bit[2] - MLX4_CQE_STATUS_IPV6
+ * bit[1] - MLX4_CQE_STATUS_IPF
+ * bit[0] - MLX4_CQE_STATUS_IPV4
+ * giving a total of up to 256 entries.
+ */
+ /* L2 */
+ [0x00] = RTE_PTYPE_L2_ETHER,
+ /* L3 */
+ [0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [0x03] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [0x04] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [0x08] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_NONFRAG,
+ [0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_NONFRAG,
+ [0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_FRAG,
+ [0x0b] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_FRAG,
+ /* TCP */
+ [0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [0x16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [0x18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_TCP,
+ [0x19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_TCP,
+ /* UDP */
+ [0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [0x26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [0x28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_UDP,
+ [0x29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_UDP,
+ /* Tunneled - L3 IPV6 */
+ [0x80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ [0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x84] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x8b] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_FRAG,
+ /* Tunneled - L3 IPV6, TCP */
+ [0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0x94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0x96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x98] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [0x99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
+ /* Tunneled - L3 IPV6, UDP */
+ [0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xa4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xa6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0xa8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xa9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* Tunneled - L3 IPV4 */
+ [0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ [0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0xc3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0xc4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0xc8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0xcb] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_FRAG,
+ /* Tunneled - L3 IPV4, TCP */
+ [0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0xd4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0xd6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0xd8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0xd9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ /* Tunneled - L3 IPV4, UDP */
+ [0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xe4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xe6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0xe8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xe9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+};
+
+/**
+ * Stamp TXBB burst so it won't be reused by the HW.
+ *
+ * Routine is used when freeing WQE used by the chip or when failing
+ * building an WQ entry has failed leaving partial information on the queue.
+ *
+ * @param sq
+ * Pointer to the SQ structure.
+ * @param start
+ * Pointer to the first TXBB to stamp.
+ * @param end
+ * Pointer to the followed end TXBB to stamp.
+ *
+ * @return
+ * Stamping burst size in byte units.
+ */
+static uint32_t
+mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, volatile uint32_t *start,
+ volatile uint32_t *end)
+{
+ uint32_t stamp = sq->stamp;
+ int32_t size = (intptr_t)end - (intptr_t)start;
+
+ assert(start != end);
+ /* Hold SQ ring wrap around. */
+ if (size < 0) {
+ size = (int32_t)sq->size + size;
+ do {
+ *start = stamp;
+ start += MLX4_SQ_STAMP_DWORDS;
+ } while (start != (volatile uint32_t *)sq->eob);
+ start = (volatile uint32_t *)sq->buf;
+ /* Flip invalid stamping ownership. */
+ stamp ^= RTE_BE32(1u << MLX4_SQ_OWNER_BIT);
+ sq->stamp = stamp;
+ if (start == end)
+ return size;
+ }
+ do {
+ *start = stamp;
+ start += MLX4_SQ_STAMP_DWORDS;
+ } while (start != end);
+ return (uint32_t)size;
+}
+
+/**
+ * Manage Tx completions.
+ *
+ * When sending a burst, mlx4_tx_burst() posts several WRs.
+ * To improve performance, a completion event is only required once every
+ * MLX4_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
+ * for other WRs, but this information would not be used anyway.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param elts_m
+ * Tx elements number mask.
+ * @param sq
+ * Pointer to the SQ structure.
+ */
+static void
+mlx4_txq_complete(struct txq *txq, const unsigned int elts_m,
+ struct mlx4_sq *sq)
+{
+ unsigned int elts_tail = txq->elts_tail;
+ struct mlx4_cq *cq = &txq->mcq;
+ volatile struct mlx4_cqe *cqe;
+ uint32_t completed;
+ uint32_t cons_index = cq->cons_index;
+ volatile uint32_t *first_txbb;
+
+ /*
+ * Traverse over all CQ entries reported and handle each WQ entry
+ * reported by them.
+ */
+ do {
+ cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cons_index);
+ if (unlikely(!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
+ !!(cons_index & cq->cqe_cnt)))
+ break;
+#ifndef NDEBUG
+ /*
+ * Make sure we read the CQE after we read the ownership bit.
+ */
+ rte_io_rmb();
+ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+ MLX4_CQE_OPCODE_ERROR)) {
+ volatile struct mlx4_err_cqe *cqe_err =
+ (volatile struct mlx4_err_cqe *)cqe;
+ ERROR("%p CQE error - vendor syndrome: 0x%x"
+ " syndrome: 0x%x\n",
+ (void *)txq, cqe_err->vendor_err,
+ cqe_err->syndrome);
+ break;
+ }
+#endif /* NDEBUG */
+ cons_index++;
+ } while (1);
+ completed = (cons_index - cq->cons_index) * txq->elts_comp_cd_init;
+ if (unlikely(!completed))
+ return;
+ /* First stamping address is the end of the last one. */
+ first_txbb = (&(*txq->elts)[elts_tail & elts_m])->eocb;
+ elts_tail += completed;
+ /* The new tail element holds the end address. */
+ sq->remain_size += mlx4_txq_stamp_freed_wqe(sq, first_txbb,
+ (&(*txq->elts)[elts_tail & elts_m])->eocb);
+ /* Update CQ consumer index. */
+ cq->cons_index = cons_index;
+ *cq->set_ci_db = rte_cpu_to_be_32(cons_index & MLX4_CQ_DB_CI_MASK);
+ txq->elts_tail = elts_tail;
+}
+
+/**
+ * Write Tx data segment to the SQ.
+ *
+ * @param dseg
+ * Pointer to data segment in SQ.
+ * @param lkey
+ * Memory region lkey.
+ * @param addr
+ * Data address.
+ * @param byte_count
+ * Big endian bytes count of the data to send.
+ */
+static inline void
+mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg *dseg,
+ uint32_t lkey, uintptr_t addr, rte_be32_t byte_count)
+{
+ dseg->addr = rte_cpu_to_be_64(addr);
+ dseg->lkey = lkey;
+#if RTE_CACHE_LINE_SIZE < 64
+ /*
+ * Need a barrier here before writing the byte_count
+ * fields to make sure that all the data is visible
+ * before the byte_count field is set.
+ * Otherwise, if the segment begins a new cacheline,
+ * the HCA prefetcher could grab the 64-byte chunk and
+ * get a valid (!= 0xffffffff) byte count but stale
+ * data, and end up sending the wrong data.
+ */
+ rte_io_wmb();
+#endif /* RTE_CACHE_LINE_SIZE */
+ dseg->byte_count = byte_count;
+}
+
+/**
+ * Obtain and calculate TSO information needed for assembling a TSO WQE.
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param tinfo
+ * Pointer to a structure to fill the info with.
+ *
+ * @return
+ * 0 on success, negative value upon error.
+ */
+static inline int
+mlx4_tx_burst_tso_get_params(struct rte_mbuf *buf,
+ struct txq *txq,
+ struct tso_info *tinfo)
+{
+ struct mlx4_sq *sq = &txq->msq;
+ const uint8_t tunneled = txq->priv->hw_csum_l2tun &&
+ (buf->ol_flags & PKT_TX_TUNNEL_MASK);
+
+ tinfo->tso_header_size = buf->l2_len + buf->l3_len + buf->l4_len;
+ if (tunneled)
+ tinfo->tso_header_size +=
+ buf->outer_l2_len + buf->outer_l3_len;
+ if (unlikely(buf->tso_segsz == 0 ||
+ tinfo->tso_header_size == 0 ||
+ tinfo->tso_header_size > MLX4_MAX_TSO_HEADER ||
+ tinfo->tso_header_size > buf->data_len))
+ return -EINVAL;
+ /*
+ * Calculate the WQE TSO segment size
+ * Note:
+ * 1. An LSO segment must be padded such that the subsequent data
+ * segment is 16-byte aligned.
+ * 2. The start address of the TSO segment is always 16 Bytes aligned.
+ */
+ tinfo->wqe_tso_seg_size = RTE_ALIGN(sizeof(struct mlx4_wqe_lso_seg) +
+ tinfo->tso_header_size,
+ sizeof(struct mlx4_wqe_data_seg));
+ tinfo->fence_size = ((sizeof(struct mlx4_wqe_ctrl_seg) +
+ tinfo->wqe_tso_seg_size) >> MLX4_SEG_SHIFT) +
+ buf->nb_segs;
+ tinfo->wqe_size =
+ RTE_ALIGN((uint32_t)(tinfo->fence_size << MLX4_SEG_SHIFT),
+ MLX4_TXBB_SIZE);
+ /* Validate WQE size and WQE space in the send queue. */
+ if (sq->remain_size < tinfo->wqe_size ||
+ tinfo->wqe_size > MLX4_MAX_WQE_SIZE)
+ return -ENOMEM;
+ /* Init pv. */
+ tinfo->pv = (struct pv *)txq->bounce_buf;
+ tinfo->pv_counter = 0;
+ return 0;
+}
+
+/**
+ * Fill the TSO WQE data segments with info on buffers to transmit .
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param tinfo
+ * Pointer to TSO info to use.
+ * @param dseg
+ * Pointer to the first data segment in the TSO WQE.
+ * @param ctrl
+ * Pointer to the control segment in the TSO WQE.
+ *
+ * @return
+ * 0 on success, negative value upon error.
+ */
+static inline volatile struct mlx4_wqe_ctrl_seg *
+mlx4_tx_burst_fill_tso_dsegs(struct rte_mbuf *buf,
+ struct txq *txq,
+ struct tso_info *tinfo,
+ volatile struct mlx4_wqe_data_seg *dseg,
+ volatile struct mlx4_wqe_ctrl_seg *ctrl)
+{
+ uint32_t lkey;
+ int nb_segs = buf->nb_segs;
+ int nb_segs_txbb;
+ struct mlx4_sq *sq = &txq->msq;
+ struct rte_mbuf *sbuf = buf;
+ struct pv *pv = tinfo->pv;
+ int *pv_counter = &tinfo->pv_counter;
+ volatile struct mlx4_wqe_ctrl_seg *ctrl_next =
+ (volatile struct mlx4_wqe_ctrl_seg *)
+ ((volatile uint8_t *)ctrl + tinfo->wqe_size);
+ uint16_t data_len = sbuf->data_len - tinfo->tso_header_size;
+ uintptr_t data_addr = rte_pktmbuf_mtod_offset(sbuf, uintptr_t,
+ tinfo->tso_header_size);
+
+ do {
+ /* how many dseg entries do we have in the current TXBB ? */
+ nb_segs_txbb = (MLX4_TXBB_SIZE -
+ ((uintptr_t)dseg & (MLX4_TXBB_SIZE - 1))) >>
+ MLX4_SEG_SHIFT;
+ switch (nb_segs_txbb) {
+#ifndef NDEBUG
+ default:
+ /* Should never happen. */
+ rte_panic("%p: Invalid number of SGEs(%d) for a TXBB",
+ (void *)txq, nb_segs_txbb);
+ /* rte_panic never returns. */
+ break;
+#endif /* NDEBUG */
+ case 4:
+ /* Memory region key for this memory pool. */
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1))
+ goto err;
+ dseg->addr = rte_cpu_to_be_64(data_addr);
+ dseg->lkey = lkey;
+ /*
+ * This data segment starts at the beginning of a new
+ * TXBB, so we need to postpone its byte_count writing
+ * for later.
+ */
+ pv[*pv_counter].dseg = dseg;
+ /*
+ * Zero length segment is treated as inline segment
+ * with zero data.
+ */
+ pv[(*pv_counter)++].val =
+ rte_cpu_to_be_32(data_len ?
+ data_len :
+ 0x80000000);
+ if (--nb_segs == 0)
+ return ctrl_next;
+ /* Prepare next buf info */
+ sbuf = sbuf->next;
+ dseg++;
+ data_len = sbuf->data_len;
+ data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ /* fallthrough */
+ case 3:
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1))
+ goto err;
+ mlx4_fill_tx_data_seg(dseg, lkey, data_addr,
+ rte_cpu_to_be_32(data_len ?
+ data_len :
+ 0x80000000));
+ if (--nb_segs == 0)
+ return ctrl_next;
+ /* Prepare next buf info */
+ sbuf = sbuf->next;
+ dseg++;
+ data_len = sbuf->data_len;
+ data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ /* fallthrough */
+ case 2:
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1))
+ goto err;
+ mlx4_fill_tx_data_seg(dseg, lkey, data_addr,
+ rte_cpu_to_be_32(data_len ?
+ data_len :
+ 0x80000000));
+ if (--nb_segs == 0)
+ return ctrl_next;
+ /* Prepare next buf info */
+ sbuf = sbuf->next;
+ dseg++;
+ data_len = sbuf->data_len;
+ data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ /* fallthrough */
+ case 1:
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1))
+ goto err;
+ mlx4_fill_tx_data_seg(dseg, lkey, data_addr,
+ rte_cpu_to_be_32(data_len ?
+ data_len :
+ 0x80000000));
+ if (--nb_segs == 0)
+ return ctrl_next;
+ /* Prepare next buf info */
+ sbuf = sbuf->next;
+ dseg++;
+ data_len = sbuf->data_len;
+ data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ /* fallthrough */
+ }
+ /* Wrap dseg if it points at the end of the queue. */
+ if ((volatile uint8_t *)dseg >= sq->eob)
+ dseg = (volatile struct mlx4_wqe_data_seg *)
+ ((volatile uint8_t *)dseg - sq->size);
+ } while (true);
+err:
+ return NULL;
+}
+
+/**
+ * Fill the packet's l2, l3 and l4 headers to the WQE.
+ *
+ * This will be used as the header for each TSO segment that is transmitted.
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param tinfo
+ * Pointer to TSO info to use.
+ * @param ctrl
+ * Pointer to the control segment in the TSO WQE.
+ *
+ * @return
+ * 0 on success, negative value upon error.
+ */
+static inline volatile struct mlx4_wqe_data_seg *
+mlx4_tx_burst_fill_tso_hdr(struct rte_mbuf *buf,
+ struct txq *txq,
+ struct tso_info *tinfo,
+ volatile struct mlx4_wqe_ctrl_seg *ctrl)
+{
+ volatile struct mlx4_wqe_lso_seg *tseg =
+ (volatile struct mlx4_wqe_lso_seg *)(ctrl + 1);
+ struct mlx4_sq *sq = &txq->msq;
+ struct pv *pv = tinfo->pv;
+ int *pv_counter = &tinfo->pv_counter;
+ int remain_size = tinfo->tso_header_size;
+ char *from = rte_pktmbuf_mtod(buf, char *);
+ uint16_t txbb_avail_space;
+ /* Union to overcome volatile constraints when copying TSO header. */
+ union {
+ volatile uint8_t *vto;
+ uint8_t *to;
+ } thdr = { .vto = (volatile uint8_t *)tseg->header, };
+
+ /*
+ * TSO data always starts at offset 20 from the beginning of the TXBB
+ * (16 byte ctrl + 4byte TSO desc). Since each TXBB is 64Byte aligned
+ * we can write the first 44 TSO header bytes without worry for TxQ
+ * wrapping or overwriting the first TXBB 32bit word.
+ */
+ txbb_avail_space = MLX4_TXBB_SIZE -
+ (sizeof(struct mlx4_wqe_ctrl_seg) +
+ sizeof(struct mlx4_wqe_lso_seg));
+ while (remain_size >= (int)(txbb_avail_space + sizeof(uint32_t))) {
+ /* Copy to end of txbb. */
+ rte_memcpy(thdr.to, from, txbb_avail_space);
+ from += txbb_avail_space;
+ thdr.to += txbb_avail_space;
+ /* New TXBB, Check for TxQ wrap. */
+ if (thdr.to >= sq->eob)
+ thdr.vto = sq->buf;
+ /* New TXBB, stash the first 32bits for later use. */
+ pv[*pv_counter].dst = (volatile uint32_t *)thdr.to;
+ pv[(*pv_counter)++].val = *(uint32_t *)from,
+ from += sizeof(uint32_t);
+ thdr.to += sizeof(uint32_t);
+ remain_size -= txbb_avail_space + sizeof(uint32_t);
+ /* Avail space in new TXBB is TXBB size - 4 */
+ txbb_avail_space = MLX4_TXBB_SIZE - sizeof(uint32_t);
+ }
+ if (remain_size > txbb_avail_space) {
+ rte_memcpy(thdr.to, from, txbb_avail_space);
+ from += txbb_avail_space;
+ thdr.to += txbb_avail_space;
+ remain_size -= txbb_avail_space;
+ /* New TXBB, Check for TxQ wrap. */
+ if (thdr.to >= sq->eob)
+ thdr.vto = sq->buf;
+ pv[*pv_counter].dst = (volatile uint32_t *)thdr.to;
+ rte_memcpy(&pv[*pv_counter].val, from, remain_size);
+ (*pv_counter)++;
+ } else if (remain_size) {
+ rte_memcpy(thdr.to, from, remain_size);
+ }
+ tseg->mss_hdr_size = rte_cpu_to_be_32((buf->tso_segsz << 16) |
+ tinfo->tso_header_size);
+ /* Calculate data segment location */
+ return (volatile struct mlx4_wqe_data_seg *)
+ ((uintptr_t)tseg + tinfo->wqe_tso_seg_size);
+}
+
+/**
+ * Write data segments and header for TSO uni/multi segment packet.
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param ctrl
+ * Pointer to the WQE control segment.
+ *
+ * @return
+ * Pointer to the next WQE control segment on success, NULL otherwise.
+ */
+static volatile struct mlx4_wqe_ctrl_seg *
+mlx4_tx_burst_tso(struct rte_mbuf *buf, struct txq *txq,
+ volatile struct mlx4_wqe_ctrl_seg *ctrl)
+{
+ volatile struct mlx4_wqe_data_seg *dseg;
+ volatile struct mlx4_wqe_ctrl_seg *ctrl_next;
+ struct mlx4_sq *sq = &txq->msq;
+ struct tso_info tinfo;
+ struct pv *pv;
+ int pv_counter;
+ int ret;
+
+ ret = mlx4_tx_burst_tso_get_params(buf, txq, &tinfo);
+ if (unlikely(ret))
+ goto error;
+ dseg = mlx4_tx_burst_fill_tso_hdr(buf, txq, &tinfo, ctrl);
+ if (unlikely(dseg == NULL))
+ goto error;
+ if ((uintptr_t)dseg >= (uintptr_t)sq->eob)
+ dseg = (volatile struct mlx4_wqe_data_seg *)
+ ((uintptr_t)dseg - sq->size);
+ ctrl_next = mlx4_tx_burst_fill_tso_dsegs(buf, txq, &tinfo, dseg, ctrl);
+ if (unlikely(ctrl_next == NULL))
+ goto error;
+ /* Write the first DWORD of each TXBB save earlier. */
+ if (likely(tinfo.pv_counter)) {
+ pv = tinfo.pv;
+ pv_counter = tinfo.pv_counter;
+ /* Need a barrier here before writing the first TXBB word. */
+ rte_io_wmb();
+ do {
+ --pv_counter;
+ *pv[pv_counter].dst = pv[pv_counter].val;
+ } while (pv_counter > 0);
+ }
+ ctrl->fence_size = tinfo.fence_size;
+ sq->remain_size -= tinfo.wqe_size;
+ return ctrl_next;
+error:
+ txq->stats.odropped++;
+ return NULL;
+}
+
+/**
+ * Write data segments of multi-segment packet.
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param ctrl
+ * Pointer to the WQE control segment.
+ *
+ * @return
+ * Pointer to the next WQE control segment on success, NULL otherwise.
+ */
+static volatile struct mlx4_wqe_ctrl_seg *
+mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
+ volatile struct mlx4_wqe_ctrl_seg *ctrl)
+{
+ struct pv *pv = (struct pv *)txq->bounce_buf;
+ struct mlx4_sq *sq = &txq->msq;
+ struct rte_mbuf *sbuf = buf;
+ uint32_t lkey;
+ int pv_counter = 0;
+ int nb_segs = buf->nb_segs;
+ uint32_t wqe_size;
+ volatile struct mlx4_wqe_data_seg *dseg =
+ (volatile struct mlx4_wqe_data_seg *)(ctrl + 1);
+
+ ctrl->fence_size = 1 + nb_segs;
+ wqe_size = RTE_ALIGN((uint32_t)(ctrl->fence_size << MLX4_SEG_SHIFT),
+ MLX4_TXBB_SIZE);
+ /* Validate WQE size and WQE space in the send queue. */
+ if (sq->remain_size < wqe_size ||
+ wqe_size > MLX4_MAX_WQE_SIZE)
+ return NULL;
+ /*
+ * Fill the data segments with buffer information.
+ * First WQE TXBB head segment is always control segment,
+ * so jump to tail TXBB data segments code for the first
+ * WQE data segments filling.
+ */
+ goto txbb_tail_segs;
+txbb_head_seg:
+ /* Memory region key (big endian) for this memory pool. */
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1)) {
+ DEBUG("%p: unable to get MP <-> MR association",
+ (void *)txq);
+ return NULL;
+ }
+ /* Handle WQE wraparound. */
+ if (dseg >=
+ (volatile struct mlx4_wqe_data_seg *)sq->eob)
+ dseg = (volatile struct mlx4_wqe_data_seg *)
+ sq->buf;
+ dseg->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(sbuf, uintptr_t));
+ dseg->lkey = lkey;
+ /*
+ * This data segment starts at the beginning of a new
+ * TXBB, so we need to postpone its byte_count writing
+ * for later.
+ */
+ pv[pv_counter].dseg = dseg;
+ /*
+ * Zero length segment is treated as inline segment
+ * with zero data.
+ */
+ pv[pv_counter++].val = rte_cpu_to_be_32(sbuf->data_len ?
+ sbuf->data_len : 0x80000000);
+ sbuf = sbuf->next;
+ dseg++;
+ nb_segs--;
+txbb_tail_segs:
+ /* Jump to default if there are more than two segments remaining. */
+ switch (nb_segs) {
+ default:
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1)) {
+ DEBUG("%p: unable to get MP <-> MR association",
+ (void *)txq);
+ return NULL;
+ }
+ mlx4_fill_tx_data_seg(dseg, lkey,
+ rte_pktmbuf_mtod(sbuf, uintptr_t),
+ rte_cpu_to_be_32(sbuf->data_len ?
+ sbuf->data_len :
+ 0x80000000));
+ sbuf = sbuf->next;
+ dseg++;
+ nb_segs--;
+ /* fallthrough */
+ case 2:
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1)) {
+ DEBUG("%p: unable to get MP <-> MR association",
+ (void *)txq);
+ return NULL;
+ }
+ mlx4_fill_tx_data_seg(dseg, lkey,
+ rte_pktmbuf_mtod(sbuf, uintptr_t),
+ rte_cpu_to_be_32(sbuf->data_len ?
+ sbuf->data_len :
+ 0x80000000));
+ sbuf = sbuf->next;
+ dseg++;
+ nb_segs--;
+ /* fallthrough */
+ case 1:
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1)) {
+ DEBUG("%p: unable to get MP <-> MR association",
+ (void *)txq);
+ return NULL;
+ }
+ mlx4_fill_tx_data_seg(dseg, lkey,
+ rte_pktmbuf_mtod(sbuf, uintptr_t),
+ rte_cpu_to_be_32(sbuf->data_len ?
+ sbuf->data_len :
+ 0x80000000));
+ nb_segs--;
+ if (nb_segs) {
+ sbuf = sbuf->next;
+ dseg++;
+ goto txbb_head_seg;
+ }
+ /* fallthrough */
+ case 0:
+ break;
+ }
+ /* Write the first DWORD of each TXBB save earlier. */
+ if (pv_counter) {
+ /* Need a barrier here before writing the byte_count. */
+ rte_io_wmb();
+ for (--pv_counter; pv_counter >= 0; pv_counter--)
+ pv[pv_counter].dseg->byte_count = pv[pv_counter].val;
+ }
+ sq->remain_size -= wqe_size;
+ /* Align next WQE address to the next TXBB. */
+ return (volatile struct mlx4_wqe_ctrl_seg *)
+ ((volatile uint8_t *)ctrl + wqe_size);
+}
+
+/**
+ * DPDK callback for Tx.
+ *
+ * @param dpdk_txq
+ * Generic pointer to Tx queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct txq *txq = (struct txq *)dpdk_txq;
+ unsigned int elts_head = txq->elts_head;
+ const unsigned int elts_n = txq->elts_n;
+ const unsigned int elts_m = elts_n - 1;
+ unsigned int bytes_sent = 0;
+ unsigned int i;
+ unsigned int max = elts_head - txq->elts_tail;
+ struct mlx4_sq *sq = &txq->msq;
+ volatile struct mlx4_wqe_ctrl_seg *ctrl;
+ struct txq_elt *elt;
+
+ assert(txq->elts_comp_cd != 0);
+ if (likely(max >= txq->elts_comp_cd_init))
+ mlx4_txq_complete(txq, elts_m, sq);
+ max = elts_n - max;
+ assert(max >= 1);
+ assert(max <= elts_n);
+ /* Always leave one free entry in the ring. */
+ --max;
+ if (max > pkts_n)
+ max = pkts_n;
+ elt = &(*txq->elts)[elts_head & elts_m];
+ /* First Tx burst element saves the next WQE control segment. */
+ ctrl = elt->wqe;
+ for (i = 0; (i != max); ++i) {
+ struct rte_mbuf *buf = pkts[i];
+ struct txq_elt *elt_next = &(*txq->elts)[++elts_head & elts_m];
+ uint32_t owner_opcode = sq->owner_opcode;
+ volatile struct mlx4_wqe_data_seg *dseg =
+ (volatile struct mlx4_wqe_data_seg *)(ctrl + 1);
+ volatile struct mlx4_wqe_ctrl_seg *ctrl_next;
+ union {
+ uint32_t flags;
+ uint16_t flags16[2];
+ } srcrb;
+ uint32_t lkey;
+ bool tso = txq->priv->tso && (buf->ol_flags & PKT_TX_TCP_SEG);
+
+ /* Clean up old buffer. */
+ if (likely(elt->buf != NULL)) {
+ struct rte_mbuf *tmp = elt->buf;
+
+#ifndef NDEBUG
+ /* Poisoning. */
+ memset(&elt->buf, 0x66, sizeof(struct rte_mbuf *));
+#endif
+ /* Faster than rte_pktmbuf_free(). */
+ do {
+ struct rte_mbuf *next = tmp->next;
+
+ rte_pktmbuf_free_seg(tmp);
+ tmp = next;
+ } while (tmp != NULL);
+ }
+ RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
+ if (tso) {
+ /* Change opcode to TSO */
+ owner_opcode &= ~MLX4_OPCODE_CONFIG_CMD;
+ owner_opcode |= MLX4_OPCODE_LSO | MLX4_WQE_CTRL_RR;
+ ctrl_next = mlx4_tx_burst_tso(buf, txq, ctrl);
+ if (!ctrl_next) {
+ elt->buf = NULL;
+ break;
+ }
+ } else if (buf->nb_segs == 1) {
+ /* Validate WQE space in the send queue. */
+ if (sq->remain_size < MLX4_TXBB_SIZE) {
+ elt->buf = NULL;
+ break;
+ }
+ lkey = mlx4_tx_mb2mr(txq, buf);
+ if (unlikely(lkey == (uint32_t)-1)) {
+ /* MR does not exist. */
+ DEBUG("%p: unable to get MP <-> MR association",
+ (void *)txq);
+ elt->buf = NULL;
+ break;
+ }
+ mlx4_fill_tx_data_seg(dseg++, lkey,
+ rte_pktmbuf_mtod(buf, uintptr_t),
+ rte_cpu_to_be_32(buf->data_len));
+ /* Set WQE size in 16-byte units. */
+ ctrl->fence_size = 0x2;
+ sq->remain_size -= MLX4_TXBB_SIZE;
+ /* Align next WQE address to the next TXBB. */
+ ctrl_next = ctrl + 0x4;
+ } else {
+ ctrl_next = mlx4_tx_burst_segs(buf, txq, ctrl);
+ if (!ctrl_next) {
+ elt->buf = NULL;
+ break;
+ }
+ }
+ /* Hold SQ ring wrap around. */
+ if ((volatile uint8_t *)ctrl_next >= sq->eob) {
+ ctrl_next = (volatile struct mlx4_wqe_ctrl_seg *)
+ ((volatile uint8_t *)ctrl_next - sq->size);
+ /* Flip HW valid ownership. */
+ sq->owner_opcode ^= 1u << MLX4_SQ_OWNER_BIT;
+ }
+ /*
+ * For raw Ethernet, the SOLICIT flag is used to indicate
+ * that no ICRC should be calculated.
+ */
+ if (--txq->elts_comp_cd == 0) {
+ /* Save the completion burst end address. */
+ elt_next->eocb = (volatile uint32_t *)ctrl_next;
+ txq->elts_comp_cd = txq->elts_comp_cd_init;
+ srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT |
+ MLX4_WQE_CTRL_CQ_UPDATE);
+ } else {
+ srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT);
+ }
+ /* Enable HW checksum offload if requested */
+ if (txq->csum &&
+ (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) {
+ const uint64_t is_tunneled = (buf->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN));
+
+ if (is_tunneled && txq->csum_l2tun) {
+ owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM |
+ MLX4_WQE_CTRL_IL4_HDR_CSUM;
+ if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ srcrb.flags |=
+ RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM);
+ } else {
+ srcrb.flags |=
+ RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM |
+ MLX4_WQE_CTRL_TCP_UDP_CSUM);
+ }
+ }
+ if (txq->lb) {
+ /*
+ * Copy destination MAC address to the WQE, this allows
+ * loopback in eSwitch, so that VFs and PF can
+ * communicate with each other.
+ */
+ srcrb.flags16[0] = *(rte_pktmbuf_mtod(buf, uint16_t *));
+ ctrl->imm = *(rte_pktmbuf_mtod_offset(buf, uint32_t *,
+ sizeof(uint16_t)));
+ } else {
+ ctrl->imm = 0;
+ }
+ ctrl->srcrb_flags = srcrb.flags;
+ /*
+ * Make sure descriptor is fully written before
+ * setting ownership bit (because HW can start
+ * executing as soon as we do).
+ */
+ rte_io_wmb();
+ ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode);
+ elt->buf = buf;
+ bytes_sent += buf->pkt_len;
+ ctrl = ctrl_next;
+ elt = elt_next;
+ }
+ /* Take a shortcut if nothing must be sent. */
+ if (unlikely(i == 0))
+ return 0;
+ /* Save WQE address of the next Tx burst element. */
+ elt->wqe = ctrl;
+ /* Increment send statistics counters. */
+ txq->stats.opackets += i;
+ txq->stats.obytes += bytes_sent;
+ /* Make sure that descriptors are written before doorbell record. */
+ rte_wmb();
+ /* Ring QP doorbell. */
+ rte_write32(txq->msq.doorbell_qpn, txq->msq.db);
+ txq->elts_head += i;
+ return i;
+}
+
+/**
+ * Translate Rx completion flags to packet type.
+ *
+ * @param[in] cqe
+ * Pointer to CQE.
+ *
+ * @return
+ * Packet type for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe,
+ uint32_t l2tun_offload)
+{
+ uint8_t idx = 0;
+ uint32_t pinfo = rte_be_to_cpu_32(cqe->vlan_my_qpn);
+ uint32_t status = rte_be_to_cpu_32(cqe->status);
+
+ /*
+ * The index to the array should have:
+ * bit[7] - MLX4_CQE_L2_TUNNEL
+ * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
+ */
+ if (l2tun_offload && (pinfo & MLX4_CQE_L2_TUNNEL))
+ idx |= ((pinfo & MLX4_CQE_L2_TUNNEL) >> 20) |
+ ((pinfo & MLX4_CQE_L2_TUNNEL_IPV4) >> 19);
+ /*
+ * The index to the array should have:
+ * bit[5] - MLX4_CQE_STATUS_UDP
+ * bit[4] - MLX4_CQE_STATUS_TCP
+ * bit[3] - MLX4_CQE_STATUS_IPV4OPT
+ * bit[2] - MLX4_CQE_STATUS_IPV6
+ * bit[1] - MLX4_CQE_STATUS_IPF
+ * bit[0] - MLX4_CQE_STATUS_IPV4
+ * giving a total of up to 256 entries.
+ */
+ idx |= ((status & MLX4_CQE_STATUS_PTYPE_MASK) >> 22);
+ if (status & MLX4_CQE_STATUS_IPV6)
+ idx |= ((status & MLX4_CQE_STATUS_IPV6F) >> 11);
+ return mlx4_ptype_table[idx];
+}
+
+/**
+ * Translate Rx completion flags to offload flags.
+ *
+ * @param flags
+ * Rx completion flags returned by mlx4_cqe_flags().
+ * @param csum
+ * Whether Rx checksums are enabled.
+ * @param csum_l2tun
+ * Whether Rx L2 tunnel checksums are enabled.
+ *
+ * @return
+ * Offload flags (ol_flags) in mbuf format.
+ */
+static inline uint32_t
+rxq_cq_to_ol_flags(uint32_t flags, int csum, int csum_l2tun)
+{
+ uint32_t ol_flags = 0;
+
+ if (csum)
+ ol_flags |=
+ mlx4_transpose(flags,
+ MLX4_CQE_STATUS_IP_HDR_CSUM_OK,
+ PKT_RX_IP_CKSUM_GOOD) |
+ mlx4_transpose(flags,
+ MLX4_CQE_STATUS_TCP_UDP_CSUM_OK,
+ PKT_RX_L4_CKSUM_GOOD);
+ if ((flags & MLX4_CQE_L2_TUNNEL) && csum_l2tun)
+ ol_flags |=
+ mlx4_transpose(flags,
+ MLX4_CQE_L2_TUNNEL_IPOK,
+ PKT_RX_IP_CKSUM_GOOD) |
+ mlx4_transpose(flags,
+ MLX4_CQE_L2_TUNNEL_L4_CSUM,
+ PKT_RX_L4_CKSUM_GOOD);
+ return ol_flags;
+}
+
+/**
+ * Extract checksum information from CQE flags.
+ *
+ * @param cqe
+ * Pointer to CQE structure.
+ * @param csum
+ * Whether Rx checksums are enabled.
+ * @param csum_l2tun
+ * Whether Rx L2 tunnel checksums are enabled.
+ *
+ * @return
+ * CQE checksum information.
+ */
+static inline uint32_t
+mlx4_cqe_flags(volatile struct mlx4_cqe *cqe, int csum, int csum_l2tun)
+{
+ uint32_t flags = 0;
+
+ /*
+ * The relevant bits are in different locations on their
+ * CQE fields therefore we can join them in one 32bit
+ * variable.
+ */
+ if (csum)
+ flags = (rte_be_to_cpu_32(cqe->status) &
+ MLX4_CQE_STATUS_IPV4_CSUM_OK);
+ if (csum_l2tun)
+ flags |= (rte_be_to_cpu_32(cqe->vlan_my_qpn) &
+ (MLX4_CQE_L2_TUNNEL |
+ MLX4_CQE_L2_TUNNEL_IPOK |
+ MLX4_CQE_L2_TUNNEL_L4_CSUM |
+ MLX4_CQE_L2_TUNNEL_IPV4));
+ return flags;
+}
+
+/**
+ * Poll one CQE from CQ.
+ *
+ * @param rxq
+ * Pointer to the receive queue structure.
+ * @param[out] out
+ * Just polled CQE.
+ *
+ * @return
+ * Number of bytes of the CQE, 0 in case there is no completion.
+ */
+static unsigned int
+mlx4_cq_poll_one(struct rxq *rxq, volatile struct mlx4_cqe **out)
+{
+ int ret = 0;
+ volatile struct mlx4_cqe *cqe = NULL;
+ struct mlx4_cq *cq = &rxq->mcq;
+
+ cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cq->cons_index);
+ if (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
+ !!(cq->cons_index & cq->cqe_cnt))
+ goto out;
+ /*
+ * Make sure we read CQ entry contents after we've checked the
+ * ownership bit.
+ */
+ rte_rmb();
+ assert(!(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK));
+ assert((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) !=
+ MLX4_CQE_OPCODE_ERROR);
+ ret = rte_be_to_cpu_32(cqe->byte_cnt);
+ ++cq->cons_index;
+out:
+ *out = cqe;
+ return ret;
+}
+
+/**
+ * DPDK callback for Rx with scattered packets support.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to Rx queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct rxq *rxq = dpdk_rxq;
+ const uint32_t wr_cnt = (1 << rxq->elts_n) - 1;
+ const uint16_t sges_n = rxq->sges_n;
+ struct rte_mbuf *pkt = NULL;
+ struct rte_mbuf *seg = NULL;
+ unsigned int i = 0;
+ uint32_t rq_ci = rxq->rq_ci << sges_n;
+ int len = 0;
+
+ while (pkts_n) {
+ volatile struct mlx4_cqe *cqe;
+ uint32_t idx = rq_ci & wr_cnt;
+ struct rte_mbuf *rep = (*rxq->elts)[idx];
+ volatile struct mlx4_wqe_data_seg *scat = &(*rxq->wqes)[idx];
+
+ /* Update the 'next' pointer of the previous segment. */
+ if (pkt)
+ seg->next = rep;
+ seg = rep;
+ rte_prefetch0(seg);
+ rte_prefetch0(scat);
+ rep = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(rep == NULL)) {
+ ++rxq->stats.rx_nombuf;
+ if (!pkt) {
+ /*
+ * No buffers before we even started,
+ * bail out silently.
+ */
+ break;
+ }
+ while (pkt != seg) {
+ assert(pkt != (*rxq->elts)[idx]);
+ rep = pkt->next;
+ pkt->next = NULL;
+ pkt->nb_segs = 1;
+ rte_mbuf_raw_free(pkt);
+ pkt = rep;
+ }
+ break;
+ }
+ if (!pkt) {
+ /* Looking for the new packet. */
+ len = mlx4_cq_poll_one(rxq, &cqe);
+ if (!len) {
+ rte_mbuf_raw_free(rep);
+ break;
+ }
+ if (unlikely(len < 0)) {
+ /* Rx error, packet is likely too large. */
+ rte_mbuf_raw_free(rep);
+ ++rxq->stats.idropped;
+ goto skip;
+ }
+ pkt = seg;
+ assert(len >= (rxq->crc_present << 2));
+ /* Update packet information. */
+ pkt->packet_type =
+ rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
+ pkt->ol_flags = PKT_RX_RSS_HASH;
+ pkt->hash.rss = cqe->immed_rss_invalid;
+ if (rxq->crc_present)
+ len -= ETHER_CRC_LEN;
+ pkt->pkt_len = len;
+ if (rxq->csum | rxq->csum_l2tun) {
+ uint32_t flags =
+ mlx4_cqe_flags(cqe,
+ rxq->csum,
+ rxq->csum_l2tun);
+
+ pkt->ol_flags =
+ rxq_cq_to_ol_flags(flags,
+ rxq->csum,
+ rxq->csum_l2tun);
+ }
+ }
+ rep->nb_segs = 1;
+ rep->port = rxq->port_id;
+ rep->data_len = seg->data_len;
+ rep->data_off = seg->data_off;
+ (*rxq->elts)[idx] = rep;
+ /*
+ * Fill NIC descriptor with the new buffer. The lkey and size
+ * of the buffers are already known, only the buffer address
+ * changes.
+ */
+ scat->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
+ /* If there's only one MR, no need to replace LKey in WQE. */
+ if (unlikely(mlx4_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+ scat->lkey = mlx4_rx_mb2mr(rxq, rep);
+ if (len > seg->data_len) {
+ len -= seg->data_len;
+ ++pkt->nb_segs;
+ ++rq_ci;
+ continue;
+ }
+ /* The last segment. */
+ seg->data_len = len;
+ /* Increment bytes counter. */
+ rxq->stats.ibytes += pkt->pkt_len;
+ /* Return packet. */
+ *(pkts++) = pkt;
+ pkt = NULL;
+ --pkts_n;
+ ++i;
+skip:
+ /* Align consumer index to the next stride. */
+ rq_ci >>= sges_n;
+ ++rq_ci;
+ rq_ci <<= sges_n;
+ }
+ if (unlikely(i == 0 && (rq_ci >> sges_n) == rxq->rq_ci))
+ return 0;
+ /* Update the consumer index. */
+ rxq->rq_ci = rq_ci >> sges_n;
+ rte_wmb();
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+ *rxq->mcq.set_ci_db =
+ rte_cpu_to_be_32(rxq->mcq.cons_index & MLX4_CQ_DB_CI_MASK);
+ /* Increment packets counter. */
+ rxq->stats.ipackets += i;
+ return i;
+}
+
+/**
+ * Dummy DPDK callback for Tx.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ *
+ * @param dpdk_txq
+ * Generic pointer to Tx queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ (void)dpdk_txq;
+ (void)pkts;
+ (void)pkts_n;
+ return 0;
+}
+
+/**
+ * Dummy DPDK callback for Rx.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to Rx queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ (void)dpdk_rxq;
+ (void)pkts;
+ (void)pkts_n;
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.h
new file mode 100644
index 00000000..ffa8abfc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef MLX4_RXTX_H_
+#define MLX4_RXTX_H_
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx4dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_ethdev_driver.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+
+#include "mlx4.h"
+#include "mlx4_prm.h"
+#include "mlx4_mr.h"
+
+/** Rx queue counters. */
+struct mlx4_rxq_stats {
+ unsigned int idx; /**< Mapping index. */
+ uint64_t ipackets; /**< Total of successfully received packets. */
+ uint64_t ibytes; /**< Total of successfully received bytes. */
+ uint64_t idropped; /**< Total of packets dropped when Rx ring full. */
+ uint64_t rx_nombuf; /**< Total of Rx mbuf allocation failures. */
+};
+
+/** Rx queue descriptor. */
+struct rxq {
+ struct priv *priv; /**< Back pointer to private data. */
+ struct rte_mempool *mp; /**< Memory pool for allocations. */
+ struct ibv_cq *cq; /**< Completion queue. */
+ struct ibv_wq *wq; /**< Work queue. */
+ struct ibv_comp_channel *channel; /**< Rx completion channel. */
+ uint16_t rq_ci; /**< Saved RQ consumer index. */
+ uint16_t port_id; /**< Port ID for incoming packets. */
+ uint16_t sges_n; /**< Number of segments per packet (log2 value). */
+ uint16_t elts_n; /**< Mbuf queue size (log2 value). */
+ struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */
+ struct rte_mbuf *(*elts)[]; /**< Rx elements. */
+ volatile struct mlx4_wqe_data_seg (*wqes)[]; /**< HW queue entries. */
+ volatile uint32_t *rq_db; /**< RQ doorbell record. */
+ uint32_t csum:1; /**< Enable checksum offloading. */
+ uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
+ uint32_t crc_present:1; /**< CRC must be subtracted. */
+ uint32_t l2tun_offload:1; /**< L2 tunnel offload is enabled. */
+ struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
+ struct mlx4_rxq_stats stats; /**< Rx queue counters. */
+ unsigned int socket; /**< CPU socket ID for allocations. */
+ uint32_t usecnt; /**< Number of users relying on queue resources. */
+ uint8_t data[]; /**< Remaining queue resources. */
+};
+
+/** Shared flow target for Rx queues. */
+struct mlx4_rss {
+ LIST_ENTRY(mlx4_rss) next; /**< Next entry in list. */
+ struct priv *priv; /**< Back pointer to private data. */
+ uint32_t refcnt; /**< Reference count for this object. */
+ uint32_t usecnt; /**< Number of users relying on @p qp and @p ind. */
+ struct ibv_qp *qp; /**< Queue pair. */
+ struct ibv_rwq_ind_table *ind; /**< Indirection table. */
+ uint64_t fields; /**< Fields for RSS processing (Verbs format). */
+ uint8_t key[MLX4_RSS_HASH_KEY_SIZE]; /**< Hash key to use. */
+ uint16_t queues; /**< Number of target queues. */
+ uint16_t queue_id[]; /**< Target queues. */
+};
+
+/** Tx element. */
+struct txq_elt {
+ struct rte_mbuf *buf; /**< Buffer. */
+ union {
+ volatile struct mlx4_wqe_ctrl_seg *wqe; /**< SQ WQE. */
+ volatile uint32_t *eocb; /**< End of completion burst. */
+ };
+};
+
+/** Tx queue counters. */
+struct mlx4_txq_stats {
+ unsigned int idx; /**< Mapping index. */
+ uint64_t opackets; /**< Total of successfully sent packets. */
+ uint64_t obytes; /**< Total of successfully sent bytes. */
+ uint64_t odropped; /**< Total number of packets failed to transmit. */
+};
+
+/** Tx queue descriptor. */
+struct txq {
+ struct mlx4_sq msq; /**< Info for directly manipulating the SQ. */
+ struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
+ unsigned int elts_head; /**< Current index in (*elts)[]. */
+ unsigned int elts_tail; /**< First element awaiting completion. */
+ int elts_comp_cd; /**< Countdown for next completion. */
+ unsigned int elts_comp_cd_init; /**< Initial value for countdown. */
+ unsigned int elts_n; /**< (*elts)[] length. */
+ struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */
+ struct txq_elt (*elts)[]; /**< Tx elements. */
+ struct mlx4_txq_stats stats; /**< Tx queue counters. */
+ uint32_t max_inline; /**< Max inline send size. */
+ uint32_t csum:1; /**< Enable checksum offloading. */
+ uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
+ uint32_t lb:1; /**< Whether packets should be looped back by eSwitch. */
+ uint8_t *bounce_buf;
+ /**< Memory used for storing the first DWORD of data TXBBs. */
+ struct priv *priv; /**< Back pointer to private data. */
+ unsigned int socket; /**< CPU socket ID for allocations. */
+ struct ibv_cq *cq; /**< Completion queue. */
+ struct ibv_qp *qp; /**< Queue pair. */
+ uint8_t data[]; /**< Remaining queue resources. */
+};
+
+/* mlx4_rxq.c */
+
+uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE];
+int mlx4_rss_init(struct priv *priv);
+void mlx4_rss_deinit(struct priv *priv);
+struct mlx4_rss *mlx4_rss_get(struct priv *priv, uint64_t fields,
+ const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
+ uint16_t queues, const uint16_t queue_id[]);
+void mlx4_rss_put(struct mlx4_rss *rss);
+int mlx4_rss_attach(struct mlx4_rss *rss);
+void mlx4_rss_detach(struct mlx4_rss *rss);
+int mlx4_rxq_attach(struct rxq *rxq);
+void mlx4_rxq_detach(struct rxq *rxq);
+uint64_t mlx4_get_rx_port_offloads(struct priv *priv);
+uint64_t mlx4_get_rx_queue_offloads(struct priv *priv);
+int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp);
+void mlx4_rx_queue_release(void *dpdk_rxq);
+
+/* mlx4_rxtx.c */
+
+uint16_t mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+
+/* mlx4_txq.c */
+
+uint64_t mlx4_get_tx_port_offloads(struct priv *priv);
+int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_txconf *conf);
+void mlx4_tx_queue_release(void *dpdk_txq);
+
+/* mlx4_mr.c */
+
+void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl);
+uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr);
+uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr);
+
+/**
+ * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
+ * as mempool is pre-configured and static.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Address to search.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx4_rx_addr2mr(struct rxq *rxq, uintptr_t addr)
+{
+ struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ uint32_t lkey;
+
+ /* Linear search on MR cache array. */
+ lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX4_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half (Binary Search) on miss. */
+ return mlx4_rx_addr2mr_bh(rxq, addr);
+}
+
+#define mlx4_rx_mb2mr(rxq, mb) mlx4_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
+/**
+ * Query LKey from a packet buffer for Tx. If not found, add the mempool.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param addr
+ * Address to search.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr)
+{
+ struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ uint32_t lkey;
+
+ /* Check generation bit to see if there's any change on existing MRs. */
+ if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
+ mlx4_mr_flush_local_cache(mr_ctrl);
+ /* Linear search on MR cache array. */
+ lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX4_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half (binary search) on miss. */
+ return mlx4_tx_addr2mr_bh(txq, addr);
+}
+
+#define mlx4_tx_mb2mr(rxq, mb) mlx4_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
+#endif /* MLX4_RXTX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_txq.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_txq.c
new file mode 100644
index 00000000..9aa7440d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_txq.c
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+/**
+ * @file
+ * Tx queues configuration for mlx4 driver.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <inttypes.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+
+#include "mlx4.h"
+#include "mlx4_glue.h"
+#include "mlx4_prm.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+/**
+ * Free Tx queue elements.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ */
+static void
+mlx4_txq_free_elts(struct txq *txq)
+{
+ unsigned int elts_head = txq->elts_head;
+ unsigned int elts_tail = txq->elts_tail;
+ struct txq_elt (*elts)[txq->elts_n] = txq->elts;
+ unsigned int elts_m = txq->elts_n - 1;
+
+ DEBUG("%p: freeing WRs", (void *)txq);
+ while (elts_tail != elts_head) {
+ struct txq_elt *elt = &(*elts)[elts_tail++ & elts_m];
+
+ assert(elt->buf != NULL);
+ rte_pktmbuf_free(elt->buf);
+ elt->buf = NULL;
+ elt->wqe = NULL;
+ }
+ txq->elts_tail = txq->elts_head;
+}
+
+/**
+ * Retrieves information needed in order to directly access the Tx queue.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param mlxdv
+ * Pointer to device information for this Tx queue.
+ */
+static void
+mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
+{
+ struct mlx4_sq *sq = &txq->msq;
+ struct mlx4_cq *cq = &txq->mcq;
+ struct mlx4dv_qp *dqp = mlxdv->qp.out;
+ struct mlx4dv_cq *dcq = mlxdv->cq.out;
+
+ /* Total length, including headroom and spare WQEs. */
+ sq->size = (uint32_t)dqp->rq.offset - (uint32_t)dqp->sq.offset;
+ sq->buf = (uint8_t *)dqp->buf.buf + dqp->sq.offset;
+ sq->eob = sq->buf + sq->size;
+ uint32_t headroom_size = 2048 + (1 << dqp->sq.wqe_shift);
+ /* Continuous headroom size bytes must always stay freed. */
+ sq->remain_size = sq->size - headroom_size;
+ sq->owner_opcode = MLX4_OPCODE_SEND | (0u << MLX4_SQ_OWNER_BIT);
+ sq->stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
+ (0u << MLX4_SQ_OWNER_BIT));
+ sq->db = dqp->sdb;
+ sq->doorbell_qpn = dqp->doorbell_qpn;
+ cq->buf = dcq->buf.buf;
+ cq->cqe_cnt = dcq->cqe_cnt;
+ cq->set_ci_db = dcq->set_ci_db;
+ cq->cqe_64 = (dcq->cqe_size & 64) ? 1 : 0;
+}
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx4_get_tx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (priv->hw_csum) {
+ offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ }
+ if (priv->tso)
+ offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (priv->hw_csum_l2tun) {
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (priv->tso)
+ offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ }
+ return offloads;
+}
+
+/**
+ * DPDK callback to configure a Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Tx queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4dv_obj mlxdv;
+ struct mlx4dv_qp dv_qp;
+ struct mlx4dv_cq dv_cq;
+ struct txq_elt (*elts)[rte_align32pow2(desc)];
+ struct ibv_qp_init_attr qp_init_attr;
+ struct txq *txq;
+ uint8_t *bounce_buf;
+ struct mlx4_malloc_vec vec[] = {
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = sizeof(*txq),
+ .addr = (void **)&txq,
+ },
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = sizeof(*elts),
+ .addr = (void **)&elts,
+ },
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = MLX4_MAX_WQE_SIZE,
+ .addr = (void **)&bounce_buf,
+ },
+ };
+ int ret;
+ uint64_t offloads;
+
+ offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ DEBUG("%p: configuring queue %u for %u descriptors",
+ (void *)dev, idx, desc);
+
+ if (idx >= dev->data->nb_tx_queues) {
+ rte_errno = EOVERFLOW;
+ ERROR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, idx, dev->data->nb_tx_queues);
+ return -rte_errno;
+ }
+ txq = dev->data->tx_queues[idx];
+ if (txq) {
+ rte_errno = EEXIST;
+ DEBUG("%p: Tx queue %u already configured, release it first",
+ (void *)dev, idx);
+ return -rte_errno;
+ }
+ if (!desc) {
+ rte_errno = EINVAL;
+ ERROR("%p: invalid number of Tx descriptors", (void *)dev);
+ return -rte_errno;
+ }
+ if (desc != RTE_DIM(*elts)) {
+ desc = RTE_DIM(*elts);
+ WARN("%p: increased number of descriptors in Tx queue %u"
+ " to the next power of two (%u)",
+ (void *)dev, idx, desc);
+ }
+ /* Allocate and initialize Tx queue. */
+ mlx4_zmallocv_socket("TXQ", vec, RTE_DIM(vec), socket);
+ if (!txq) {
+ ERROR("%p: unable to allocate queue index %u",
+ (void *)dev, idx);
+ return -rte_errno;
+ }
+ *txq = (struct txq){
+ .priv = priv,
+ .stats = {
+ .idx = idx,
+ },
+ .socket = socket,
+ .elts_n = desc,
+ .elts = elts,
+ .elts_head = 0,
+ .elts_tail = 0,
+ /*
+ * Request send completion every MLX4_PMD_TX_PER_COMP_REQ
+ * packets or at least 4 times per ring.
+ */
+ .elts_comp_cd =
+ RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
+ .elts_comp_cd_init =
+ RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
+ .csum = priv->hw_csum &&
+ (offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM)),
+ .csum_l2tun = priv->hw_csum_l2tun &&
+ (offloads &
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
+ /* Enable Tx loopback for VF devices. */
+ .lb = !!priv->vf,
+ .bounce_buf = bounce_buf,
+ };
+ txq->cq = mlx4_glue->create_cq(priv->ctx, desc, NULL, NULL, 0);
+ if (!txq->cq) {
+ rte_errno = ENOMEM;
+ ERROR("%p: CQ creation failure: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ qp_init_attr = (struct ibv_qp_init_attr){
+ .send_cq = txq->cq,
+ .recv_cq = txq->cq,
+ .cap = {
+ .max_send_wr =
+ RTE_MIN(priv->device_attr.max_qp_wr, desc),
+ .max_send_sge = 1,
+ .max_inline_data = MLX4_PMD_MAX_INLINE,
+ },
+ .qp_type = IBV_QPT_RAW_PACKET,
+ /* No completion events must occur by default. */
+ .sq_sig_all = 0,
+ };
+ txq->qp = mlx4_glue->create_qp(priv->pd, &qp_init_attr);
+ if (!txq->qp) {
+ rte_errno = errno ? errno : EINVAL;
+ ERROR("%p: QP creation failure: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ txq->max_inline = qp_init_attr.cap.max_inline_data;
+ ret = mlx4_glue->modify_qp
+ (txq->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_INIT,
+ .port_num = priv->port,
+ },
+ IBV_QP_STATE | IBV_QP_PORT);
+ if (ret) {
+ rte_errno = ret;
+ ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ ret = mlx4_glue->modify_qp
+ (txq->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_RTR,
+ },
+ IBV_QP_STATE);
+ if (ret) {
+ rte_errno = ret;
+ ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ ret = mlx4_glue->modify_qp
+ (txq->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_RTS,
+ },
+ IBV_QP_STATE);
+ if (ret) {
+ rte_errno = ret;
+ ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ /* Retrieve device queue information. */
+ mlxdv.cq.in = txq->cq;
+ mlxdv.cq.out = &dv_cq;
+ mlxdv.qp.in = txq->qp;
+ mlxdv.qp.out = &dv_qp;
+ ret = mlx4_glue->dv_init_obj(&mlxdv, MLX4DV_OBJ_QP | MLX4DV_OBJ_CQ);
+ if (ret) {
+ rte_errno = EINVAL;
+ ERROR("%p: failed to obtain information needed for"
+ " accessing the device queues", (void *)dev);
+ goto error;
+ }
+ mlx4_txq_fill_dv_obj_info(txq, &mlxdv);
+ /* Save first wqe pointer in the first element. */
+ (&(*txq->elts)[0])->wqe =
+ (volatile struct mlx4_wqe_ctrl_seg *)txq->msq.buf;
+ if (mlx4_mr_btree_init(&txq->mr_ctrl.cache_bh,
+ MLX4_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
+ goto error;
+ }
+ /* Save pointer of global generation number to check memory event. */
+ txq->mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;
+ DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq);
+ dev->data->tx_queues[idx] = txq;
+ return 0;
+error:
+ dev->data->tx_queues[idx] = NULL;
+ ret = rte_errno;
+ mlx4_tx_queue_release(txq);
+ rte_errno = ret;
+ assert(rte_errno > 0);
+ return -rte_errno;
+}
+
+/**
+ * DPDK callback to release a Tx queue.
+ *
+ * @param dpdk_txq
+ * Generic Tx queue pointer.
+ */
+void
+mlx4_tx_queue_release(void *dpdk_txq)
+{
+ struct txq *txq = (struct txq *)dpdk_txq;
+ struct priv *priv;
+ unsigned int i;
+
+ if (txq == NULL)
+ return;
+ priv = txq->priv;
+ for (i = 0; i != priv->dev->data->nb_tx_queues; ++i)
+ if (priv->dev->data->tx_queues[i] == txq) {
+ DEBUG("%p: removing Tx queue %p from list",
+ (void *)priv->dev, (void *)txq);
+ priv->dev->data->tx_queues[i] = NULL;
+ break;
+ }
+ mlx4_txq_free_elts(txq);
+ if (txq->qp)
+ claim_zero(mlx4_glue->destroy_qp(txq->qp));
+ if (txq->cq)
+ claim_zero(mlx4_glue->destroy_cq(txq->cq));
+ mlx4_mr_btree_free(&txq->mr_ctrl.cache_bh);
+ rte_free(txq);
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.c
new file mode 100644
index 00000000..a727d703
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.c
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+/**
+ * @file
+ * Utility functions used by the mlx4 driver.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+
+#include "mlx4_utils.h"
+
+/**
+ * Make a file descriptor non-blocking.
+ *
+ * @param fd
+ * File descriptor to alter.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_fd_set_non_blocking(int fd)
+{
+ int ret = fcntl(fd, F_GETFL);
+
+ if (ret != -1 && !fcntl(fd, F_SETFL, ret | O_NONBLOCK))
+ return 0;
+ assert(errno);
+ rte_errno = errno;
+ return -rte_errno;
+}
+
+/**
+ * Internal helper to allocate memory once for several disparate objects.
+ *
+ * The most restrictive alignment constraint for standard objects is assumed
+ * to be sizeof(double) and is used as a default value.
+ *
+ * C11 code would include stdalign.h and use alignof(max_align_t) however
+ * we'll stick with C99 for the time being.
+ */
+static inline size_t
+mlx4_mallocv_inline(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt, int zero, int socket)
+{
+ unsigned int i;
+ size_t size;
+ size_t least;
+ uint8_t *data = NULL;
+ int fill = !vec[0].addr;
+
+fill:
+ size = 0;
+ least = 0;
+ for (i = 0; i < cnt; ++i) {
+ size_t align = (uintptr_t)vec[i].align;
+
+ if (!align) {
+ align = sizeof(double);
+ } else if (!rte_is_power_of_2(align)) {
+ rte_errno = EINVAL;
+ goto error;
+ }
+ if (least < align)
+ least = align;
+ align = RTE_ALIGN_CEIL(size, align);
+ size = align + vec[i].size;
+ if (fill && vec[i].addr)
+ *vec[i].addr = data + align;
+ }
+ if (fill)
+ return size;
+ if (!zero)
+ data = rte_malloc_socket(type, size, least, socket);
+ else
+ data = rte_zmalloc_socket(type, size, least, socket);
+ if (data) {
+ fill = 1;
+ goto fill;
+ }
+ rte_errno = ENOMEM;
+error:
+ for (i = 0; i != cnt; ++i)
+ if (vec[i].addr)
+ *vec[i].addr = NULL;
+ return 0;
+}
+
+/**
+ * Allocate memory once for several disparate objects.
+ *
+ * This function adds iovec-like semantics (e.g. readv()) to rte_malloc().
+ * Memory is allocated once for several contiguous objects of nonuniform
+ * sizes and alignment constraints.
+ *
+ * Each entry of @p vec describes the size, alignment constraint and
+ * provides a buffer address where the resulting object pointer must be
+ * stored.
+ *
+ * The buffer of the first entry is guaranteed to point to the beginning of
+ * the allocated region and is safe to use with rte_free().
+ *
+ * NULL buffers are silently ignored.
+ *
+ * Providing a NULL buffer in the first entry prevents this function from
+ * allocating any memory but has otherwise no effect on its behavior. In
+ * this case, the contents of remaining non-NULL buffers are updated with
+ * addresses relative to zero (i.e. offsets that would have been used during
+ * the allocation).
+ *
+ * @param[in] type
+ * A string identifying the type of allocated objects (useful for debug
+ * purposes, such as identifying the cause of a memory leak). Can be NULL.
+ * @param[in, out] vec
+ * Description of objects to allocate memory for.
+ * @param cnt
+ * Number of entries in @p vec.
+ *
+ * @return
+ * Size in bytes of the allocated region including any padding. In case of
+ * error, rte_errno is set, 0 is returned and NULL is stored in the
+ * non-NULL buffers pointed by @p vec.
+ *
+ * @see struct mlx4_malloc_vec
+ * @see rte_malloc()
+ */
+size_t
+mlx4_mallocv(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt)
+{
+ return mlx4_mallocv_inline(type, vec, cnt, 0, SOCKET_ID_ANY);
+}
+
+/**
+ * Combines the semantics of mlx4_mallocv() with those of rte_zmalloc().
+ *
+ * @see mlx4_mallocv()
+ * @see rte_zmalloc()
+ */
+size_t
+mlx4_zmallocv(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt)
+{
+ return mlx4_mallocv_inline(type, vec, cnt, 1, SOCKET_ID_ANY);
+}
+
+/**
+ * Socket-aware version of mlx4_mallocv().
+ *
+ * This function takes one additional parameter.
+ *
+ * @param socket
+ * NUMA socket to allocate memory on. If SOCKET_ID_ANY is used, this
+ * function will behave the same as mlx4_mallocv().
+ *
+ * @see mlx4_mallocv()
+ * @see rte_malloc_socket()
+ */
+size_t
+mlx4_mallocv_socket(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt, int socket)
+{
+ return mlx4_mallocv_inline(type, vec, cnt, 0, socket);
+}
+
+/**
+ * Combines the semantics of mlx4_mallocv_socket() with those of
+ * mlx4_zmalloc_socket().
+ *
+ * @see mlx4_mallocv_socket()
+ * @see rte_zmalloc_socket()
+ */
+size_t
+mlx4_zmallocv_socket(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt, int socket)
+{
+ return mlx4_mallocv_inline(type, vec, cnt, 1, socket);
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.h
new file mode 100644
index 00000000..86abb3b7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef MLX4_UTILS_H_
+#define MLX4_UTILS_H_
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+
+#include "mlx4.h"
+
+#ifndef NDEBUG
+
+/*
+ * When debugging is enabled (NDEBUG not defined), file, line and function
+ * information replace the driver name (MLX4_DRIVER_NAME) in log messages.
+ */
+
+/** Return the file name part of a path. */
+static inline const char *
+pmd_drv_log_basename(const char *s)
+{
+ const char *n = s;
+
+ while (*n)
+ if (*(n++) == '/')
+ s = n;
+ return s;
+}
+
+#define PMD_DRV_LOG(level, ...) \
+ RTE_LOG(level, PMD, \
+ RTE_FMT("%s:%u: %s(): " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ pmd_drv_log_basename(__FILE__), \
+ __LINE__, \
+ __func__, \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__)
+#define claim_zero(...) assert((__VA_ARGS__) == 0)
+
+#else /* NDEBUG */
+
+/*
+ * Like assert(), DEBUG() becomes a no-op and claim_zero() does not perform
+ * any check when debugging is disabled.
+ */
+
+#define PMD_DRV_LOG(level, ...) \
+ RTE_LOG(level, PMD, \
+ RTE_FMT(MLX4_DRIVER_NAME ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+#define DEBUG(...) (void)0
+#define claim_zero(...) (__VA_ARGS__)
+
+#endif /* NDEBUG */
+
+#define INFO(...) PMD_DRV_LOG(INFO, __VA_ARGS__)
+#define WARN(...) PMD_DRV_LOG(WARNING, __VA_ARGS__)
+#define ERROR(...) PMD_DRV_LOG(ERR, __VA_ARGS__)
+
+/** Allocate a buffer on the stack and fill it with a printf format string. */
+#define MKSTR(name, ...) \
+ char name[snprintf(NULL, 0, __VA_ARGS__) + 1]; \
+ \
+ snprintf(name, sizeof(name), __VA_ARGS__)
+
+/** Generate a string out of the provided arguments. */
+#define MLX4_STR(...) # __VA_ARGS__
+
+/** Similar to MLX4_STR() with enclosed macros expanded first. */
+#define MLX4_STR_EXPAND(...) MLX4_STR(__VA_ARGS__)
+
+/** Object description used with mlx4_mallocv() and similar functions. */
+struct mlx4_malloc_vec {
+ size_t align; /**< Alignment constraint (power of 2), 0 if unknown. */
+ size_t size; /**< Object size. */
+ void **addr; /**< Storage for allocation address. */
+};
+
+/* mlx4_utils.c */
+
+int mlx4_fd_set_non_blocking(int fd);
+size_t mlx4_mallocv(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt);
+size_t mlx4_zmallocv(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt);
+size_t mlx4_mallocv_socket(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt, int socket);
+size_t mlx4_zmallocv_socket(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt, int socket);
+
+#endif /* MLX4_UTILS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx4/rte_pmd_mlx4_version.map b/src/spdk/dpdk/drivers/net/mlx4/rte_pmd_mlx4_version.map
new file mode 100644
index 00000000..ef353984
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx4/rte_pmd_mlx4_version.map
@@ -0,0 +1,4 @@
+DPDK_2.0 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/mlx5/Makefile b/src/spdk/dpdk/drivers/net/mlx5/Makefile
new file mode 100644
index 00000000..2e70dec5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/Makefile
@@ -0,0 +1,422 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2015 6WIND S.A.
+# Copyright 2015 Mellanox Technologies, Ltd
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# Library name.
+LIB = librte_pmd_mlx5.a
+LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION)
+LIB_GLUE_BASE = librte_pmd_mlx5_glue.so
+LIB_GLUE_VERSION = 18.05.0
+
+# Sources.
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c
+ifneq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_glue.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxq.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_txq.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx.c
+ifneq ($(filter y,$(CONFIG_RTE_ARCH_X86_64) \
+ $(CONFIG_RTE_ARCH_ARM64)),)
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx_vec.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_trigger.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxmode.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_vlan.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl_flow.c
+
+ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
+INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE)
+endif
+
+# Basic CFLAGS.
+CFLAGS += -O3
+CFLAGS += -std=c11 -Wall -Wextra
+CFLAGS += -g
+CFLAGS += -I.
+CFLAGS += -D_BSD_SOURCE
+CFLAGS += -D_DEFAULT_SOURCE
+CFLAGS += -D_XOPEN_SOURCE=600
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -Wno-strict-prototypes
+ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
+CFLAGS += -DMLX5_GLUE='"$(LIB_GLUE)"'
+CFLAGS += -DMLX5_GLUE_VERSION='"$(LIB_GLUE_VERSION)"'
+CFLAGS_mlx5_glue.o += -fPIC
+LDLIBS += -ldl
+else
+LDLIBS += -libverbs -lmlx5
+endif
+LDLIBS += -lmnl
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+# A few warnings cannot be avoided in external headers.
+CFLAGS += -Wno-error=cast-qual
+
+EXPORT_MAP := rte_pmd_mlx5_version.map
+LIBABIVER := 1
+
+# memseg walk is not part of stable API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# DEBUG which is usually provided on the command-line may enable
+# CONFIG_RTE_LIBRTE_MLX5_DEBUG.
+ifeq ($(DEBUG),1)
+CONFIG_RTE_LIBRTE_MLX5_DEBUG := y
+endif
+
+# User-defined CFLAGS.
+ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DEBUG),y)
+CFLAGS += -pedantic -UNDEBUG -DPEDANTIC
+else
+CFLAGS += -DNDEBUG -UPEDANTIC
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
+# Generate and clean-up mlx5_autoconf.h.
+
+export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS
+export AUTO_CONFIG_CFLAGS = -Wno-error
+
+ifndef V
+AUTOCONF_OUTPUT := >/dev/null
+endif
+
+mlx5_autoconf.h.new: FORCE
+
+mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
+ $Q $(RM) -f -- '$@'
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_TUNNEL_SUPPORT \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_MPLS_SUPPORT \
+ infiniband/verbs.h \
+ enum IBV_FLOW_SPEC_MPLS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_WQ_FLAG_RX_END_PADDING \
+ infiniband/verbs.h \
+ enum IBV_WQ_FLAG_RX_END_PADDING \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_SWP \
+ infiniband/mlx5dv.h \
+ type 'struct mlx5dv_sw_parsing_caps' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_MPW \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_CQE_128B_COMP \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_ETHTOOL_LINK_MODE_25G \
+ /usr/include/linux/ethtool.h \
+ enum ETHTOOL_LINK_MODE_25000baseCR_Full_BIT \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_ETHTOOL_LINK_MODE_50G \
+ /usr/include/linux/ethtool.h \
+ enum ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_ETHTOOL_LINK_MODE_100G \
+ /usr/include/linux/ethtool.h \
+ enum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \
+ infiniband/verbs.h \
+ type 'struct ibv_counter_set_init_attr' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NL_NLDEV \
+ rdma/rdma_netlink.h \
+ enum RDMA_NL_NLDEV \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_CMD_GET \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_CMD_GET \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_CMD_PORT_GET \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_CMD_PORT_GET \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_DEV_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_DEV_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_DEV_NAME \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_DEV_NAME \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_PORT_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_PORT_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_NDEV_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_PHYS_SWITCH_ID \
+ linux/if_link.h \
+ enum IFLA_PHYS_SWITCH_ID \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_PHYS_PORT_NAME \
+ linux/if_link.h \
+ enum IFLA_PHYS_PORT_NAME \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_ACT \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_ACT \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_FLAGS \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_FLAGS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_TYPE \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_TYPE \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IP_PROTO \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IP_PROTO \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV4_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV4_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV4_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV4_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV4_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV4_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV6_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV6_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV6_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV6_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV6_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV6_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_UDP_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_UDP_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_UDP_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_UDP_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_UDP_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_UDP_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_UDP_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_VLAN_ID \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_VLAN_ID \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_VLAN_PRIO \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_VLAN_PRIO \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_VLAN_ETH_TYPE \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_ACT_VLAN \
+ linux/tc_act/tc_vlan.h \
+ enum TCA_VLAN_PUSH_VLAN_PRIORITY \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_STATIC_ASSERT \
+ /usr/include/assert.h \
+ define static_assert \
+ $(AUTOCONF_OUTPUT)
+
+# Create mlx5_autoconf.h or update it in case it differs from the new one.
+
+mlx5_autoconf.h: mlx5_autoconf.h.new
+ $Q [ -f '$@' ] && \
+ cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \
+ mv '$<' '$@'
+
+$(SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD):.c=.o): mlx5_autoconf.h
+
+# Generate dependency plug-in for rdma-core when the PMD must not be linked
+# directly, so that applications do not inherit this dependency.
+
+ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
+
+$(LIB): $(LIB_GLUE)
+
+ifeq ($(LINK_USING_CC),1)
+GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS))
+else
+GLUE_LDFLAGS := $(LDFLAGS)
+endif
+$(LIB_GLUE): mlx5_glue.o
+ $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \
+ -Wl,-h,$(LIB_GLUE) \
+ -shared -o $@ $< -libverbs -lmlx5
+
+mlx5_glue.o: mlx5_autoconf.h
+
+endif
+
+clean_mlx5: FORCE
+ $Q rm -f -- mlx5_autoconf.h mlx5_autoconf.h.new
+ $Q rm -f -- mlx5_glue.o $(LIB_GLUE_BASE)*
+
+clean: clean_mlx5
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5.c
new file mode 100644
index 00000000..ec63bc6e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5.c
@@ -0,0 +1,1690 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#include <stddef.h>
+#include <unistd.h>
+#include <string.h>
+#include <assert.h>
+#include <dlfcn.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <net/if.h>
+#include <sys/mman.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_malloc.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_eal_memconfig.h>
+#include <rte_kvargs.h>
+#include <rte_rwlock.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+#include "mlx5_glue.h"
+#include "mlx5_mr.h"
+
+/* Device parameter to enable RX completion queue compression. */
+#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
+
+/* Device parameter to enable Multi-Packet Rx queue. */
+#define MLX5_RX_MPRQ_EN "mprq_en"
+
+/* Device parameter to configure log 2 of the number of strides for MPRQ. */
+#define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
+
+/* Device parameter to limit the size of memcpy'd packet for MPRQ. */
+#define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
+
+/* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
+#define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
+
+/* Device parameter to configure inline send. */
+#define MLX5_TXQ_INLINE "txq_inline"
+
+/*
+ * Device parameter to configure the number of TX queues threshold for
+ * enabling inline send.
+ */
+#define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
+
+/* Device parameter to enable multi-packet send WQEs. */
+#define MLX5_TXQ_MPW_EN "txq_mpw_en"
+
+/* Device parameter to include 2 dsegs in the title WQEBB. */
+#define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
+
+/* Device parameter to limit the size of inlining packet. */
+#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
+
+/* Device parameter to enable hardware Tx vector. */
+#define MLX5_TX_VEC_EN "tx_vec_en"
+
+/* Device parameter to enable hardware Rx vector. */
+#define MLX5_RX_VEC_EN "rx_vec_en"
+
+/* Allow L3 VXLAN flow creation. */
+#define MLX5_L3_VXLAN_EN "l3_vxlan_en"
+
+/* Activate Netlink support in VF mode. */
+#define MLX5_VF_NL_EN "vf_nl_en"
+
+/* Select port representors to instantiate. */
+#define MLX5_REPRESENTOR "representor"
+
+#ifndef HAVE_IBV_MLX5_MOD_MPW
+#define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
+#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
+#endif
+
+#ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
+#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
+#endif
+
+static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
+
+/* Shared memory between primary and secondary processes. */
+struct mlx5_shared_data *mlx5_shared_data;
+
+/* Spinlock for mlx5_shared_data allocation. */
+static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
+
+/** Driver-specific log messages type. */
+int mlx5_logtype;
+
+/**
+ * Prepare shared data between primary and secondary process.
+ */
+static void
+mlx5_prepare_shared_data(void)
+{
+ const struct rte_memzone *mz;
+
+ rte_spinlock_lock(&mlx5_shared_data_lock);
+ if (mlx5_shared_data == NULL) {
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* Allocate shared memory. */
+ mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
+ sizeof(*mlx5_shared_data),
+ SOCKET_ID_ANY, 0);
+ } else {
+ /* Lookup allocated shared memory. */
+ mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
+ }
+ if (mz == NULL)
+ rte_panic("Cannot allocate mlx5 shared data\n");
+ mlx5_shared_data = mz->addr;
+ /* Initialize shared data. */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ LIST_INIT(&mlx5_shared_data->mem_event_cb_list);
+ rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock);
+ }
+ rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
+ mlx5_mr_mem_event_cb, NULL);
+ }
+ rte_spinlock_unlock(&mlx5_shared_data_lock);
+}
+
+/**
+ * Retrieve integer value from environment variable.
+ *
+ * @param[in] name
+ * Environment variable name.
+ *
+ * @return
+ * Integer value, 0 if the variable is not set.
+ */
+int
+mlx5_getenv_int(const char *name)
+{
+ const char *val = getenv(name);
+
+ if (val == NULL)
+ return 0;
+ return atoi(val);
+}
+
+/**
+ * Verbs callback to allocate a memory. This function should allocate the space
+ * according to the size provided residing inside a huge page.
+ * Please note that all allocation must respect the alignment from libmlx5
+ * (i.e. currently sysconf(_SC_PAGESIZE)).
+ *
+ * @param[in] size
+ * The size in bytes of the memory to allocate.
+ * @param[in] data
+ * A pointer to the callback data.
+ *
+ * @return
+ * Allocated buffer, NULL otherwise and rte_errno is set.
+ */
+static void *
+mlx5_alloc_verbs_buf(size_t size, void *data)
+{
+ struct priv *priv = data;
+ void *ret;
+ size_t alignment = sysconf(_SC_PAGESIZE);
+ unsigned int socket = SOCKET_ID_ANY;
+
+ if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
+ const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
+
+ socket = ctrl->socket;
+ } else if (priv->verbs_alloc_ctx.type ==
+ MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
+ const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
+
+ socket = ctrl->socket;
+ }
+ assert(data != NULL);
+ ret = rte_malloc_socket(__func__, size, alignment, socket);
+ if (!ret && size)
+ rte_errno = ENOMEM;
+ return ret;
+}
+
+/**
+ * Verbs callback to free a memory.
+ *
+ * @param[in] ptr
+ * A pointer to the memory to free.
+ * @param[in] data
+ * A pointer to the callback data.
+ */
+static void
+mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
+{
+ assert(data != NULL);
+ rte_free(ptr);
+}
+
+/**
+ * DPDK callback to close the device.
+ *
+ * Destroy all queues and objects, free memory.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mlx5_dev_close(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+ int ret;
+
+ DRV_LOG(DEBUG, "port %u closing device \"%s\"",
+ dev->data->port_id,
+ ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
+ /* In case mlx5_dev_stop() has not been called. */
+ mlx5_dev_interrupt_handler_uninstall(dev);
+ mlx5_traffic_disable(dev);
+ mlx5_flow_flush(dev, NULL);
+ /* Prevent crashes when queues are still in use. */
+ dev->rx_pkt_burst = removed_rx_burst;
+ dev->tx_pkt_burst = removed_tx_burst;
+ if (priv->rxqs != NULL) {
+ /* XXX race condition if mlx5_rx_burst() is still running. */
+ usleep(1000);
+ for (i = 0; (i != priv->rxqs_n); ++i)
+ mlx5_rxq_release(dev, i);
+ priv->rxqs_n = 0;
+ priv->rxqs = NULL;
+ }
+ if (priv->txqs != NULL) {
+ /* XXX race condition if mlx5_tx_burst() is still running. */
+ usleep(1000);
+ for (i = 0; (i != priv->txqs_n); ++i)
+ mlx5_txq_release(dev, i);
+ priv->txqs_n = 0;
+ priv->txqs = NULL;
+ }
+ mlx5_mprq_free_mp(dev);
+ mlx5_mr_release(dev);
+ if (priv->pd != NULL) {
+ assert(priv->ctx != NULL);
+ claim_zero(mlx5_glue->dealloc_pd(priv->pd));
+ claim_zero(mlx5_glue->close_device(priv->ctx));
+ } else
+ assert(priv->ctx == NULL);
+ if (priv->rss_conf.rss_key != NULL)
+ rte_free(priv->rss_conf.rss_key);
+ if (priv->reta_idx != NULL)
+ rte_free(priv->reta_idx);
+ if (priv->primary_socket)
+ mlx5_socket_uninit(dev);
+ if (priv->config.vf)
+ mlx5_nl_mac_addr_flush(dev);
+ if (priv->nl_socket_route >= 0)
+ close(priv->nl_socket_route);
+ if (priv->nl_socket_rdma >= 0)
+ close(priv->nl_socket_rdma);
+ if (priv->mnl_socket)
+ mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+ ret = mlx5_hrxq_ibv_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
+ dev->data->port_id);
+ ret = mlx5_ind_table_ibv_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "port %u some indirection table still remain",
+ dev->data->port_id);
+ ret = mlx5_rxq_ibv_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
+ dev->data->port_id);
+ ret = mlx5_rxq_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "port %u some Rx queues still remain",
+ dev->data->port_id);
+ ret = mlx5_txq_ibv_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
+ dev->data->port_id);
+ ret = mlx5_txq_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "port %u some Tx queues still remain",
+ dev->data->port_id);
+ ret = mlx5_flow_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "port %u some flows still remain",
+ dev->data->port_id);
+ if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
+ unsigned int c = 0;
+ unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
+ uint16_t port_id[i];
+
+ i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
+ while (i--) {
+ struct priv *opriv =
+ rte_eth_devices[port_id[i]].data->dev_private;
+
+ if (!opriv ||
+ opriv->domain_id != priv->domain_id ||
+ &rte_eth_devices[port_id[i]] == dev)
+ continue;
+ ++c;
+ }
+ if (!c)
+ claim_zero(rte_eth_switch_domain_free(priv->domain_id));
+ }
+ memset(priv, 0, sizeof(*priv));
+ priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+}
+
+const struct eth_dev_ops mlx5_dev_ops = {
+ .dev_configure = mlx5_dev_configure,
+ .dev_start = mlx5_dev_start,
+ .dev_stop = mlx5_dev_stop,
+ .dev_set_link_down = mlx5_set_link_down,
+ .dev_set_link_up = mlx5_set_link_up,
+ .dev_close = mlx5_dev_close,
+ .promiscuous_enable = mlx5_promiscuous_enable,
+ .promiscuous_disable = mlx5_promiscuous_disable,
+ .allmulticast_enable = mlx5_allmulticast_enable,
+ .allmulticast_disable = mlx5_allmulticast_disable,
+ .link_update = mlx5_link_update,
+ .stats_get = mlx5_stats_get,
+ .stats_reset = mlx5_stats_reset,
+ .xstats_get = mlx5_xstats_get,
+ .xstats_reset = mlx5_xstats_reset,
+ .xstats_get_names = mlx5_xstats_get_names,
+ .dev_infos_get = mlx5_dev_infos_get,
+ .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
+ .vlan_filter_set = mlx5_vlan_filter_set,
+ .rx_queue_setup = mlx5_rx_queue_setup,
+ .tx_queue_setup = mlx5_tx_queue_setup,
+ .rx_queue_release = mlx5_rx_queue_release,
+ .tx_queue_release = mlx5_tx_queue_release,
+ .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
+ .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
+ .mac_addr_remove = mlx5_mac_addr_remove,
+ .mac_addr_add = mlx5_mac_addr_add,
+ .mac_addr_set = mlx5_mac_addr_set,
+ .set_mc_addr_list = mlx5_set_mc_addr_list,
+ .mtu_set = mlx5_dev_set_mtu,
+ .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
+ .vlan_offload_set = mlx5_vlan_offload_set,
+ .reta_update = mlx5_dev_rss_reta_update,
+ .reta_query = mlx5_dev_rss_reta_query,
+ .rss_hash_update = mlx5_rss_hash_update,
+ .rss_hash_conf_get = mlx5_rss_hash_conf_get,
+ .filter_ctrl = mlx5_dev_filter_ctrl,
+ .rx_descriptor_status = mlx5_rx_descriptor_status,
+ .tx_descriptor_status = mlx5_tx_descriptor_status,
+ .rx_queue_intr_enable = mlx5_rx_intr_enable,
+ .rx_queue_intr_disable = mlx5_rx_intr_disable,
+ .is_removed = mlx5_is_removed,
+};
+
+static const struct eth_dev_ops mlx5_dev_sec_ops = {
+ .stats_get = mlx5_stats_get,
+ .stats_reset = mlx5_stats_reset,
+ .xstats_get = mlx5_xstats_get,
+ .xstats_reset = mlx5_xstats_reset,
+ .xstats_get_names = mlx5_xstats_get_names,
+ .dev_infos_get = mlx5_dev_infos_get,
+ .rx_descriptor_status = mlx5_rx_descriptor_status,
+ .tx_descriptor_status = mlx5_tx_descriptor_status,
+};
+
+/* Available operators in flow isolated mode. */
+const struct eth_dev_ops mlx5_dev_ops_isolate = {
+ .dev_configure = mlx5_dev_configure,
+ .dev_start = mlx5_dev_start,
+ .dev_stop = mlx5_dev_stop,
+ .dev_set_link_down = mlx5_set_link_down,
+ .dev_set_link_up = mlx5_set_link_up,
+ .dev_close = mlx5_dev_close,
+ .promiscuous_enable = mlx5_promiscuous_enable,
+ .promiscuous_disable = mlx5_promiscuous_disable,
+ .allmulticast_enable = mlx5_allmulticast_enable,
+ .allmulticast_disable = mlx5_allmulticast_disable,
+ .link_update = mlx5_link_update,
+ .stats_get = mlx5_stats_get,
+ .stats_reset = mlx5_stats_reset,
+ .xstats_get = mlx5_xstats_get,
+ .xstats_reset = mlx5_xstats_reset,
+ .xstats_get_names = mlx5_xstats_get_names,
+ .dev_infos_get = mlx5_dev_infos_get,
+ .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
+ .vlan_filter_set = mlx5_vlan_filter_set,
+ .rx_queue_setup = mlx5_rx_queue_setup,
+ .tx_queue_setup = mlx5_tx_queue_setup,
+ .rx_queue_release = mlx5_rx_queue_release,
+ .tx_queue_release = mlx5_tx_queue_release,
+ .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
+ .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
+ .mac_addr_remove = mlx5_mac_addr_remove,
+ .mac_addr_add = mlx5_mac_addr_add,
+ .mac_addr_set = mlx5_mac_addr_set,
+ .set_mc_addr_list = mlx5_set_mc_addr_list,
+ .mtu_set = mlx5_dev_set_mtu,
+ .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
+ .vlan_offload_set = mlx5_vlan_offload_set,
+ .filter_ctrl = mlx5_dev_filter_ctrl,
+ .rx_descriptor_status = mlx5_rx_descriptor_status,
+ .tx_descriptor_status = mlx5_tx_descriptor_status,
+ .rx_queue_intr_enable = mlx5_rx_intr_enable,
+ .rx_queue_intr_disable = mlx5_rx_intr_disable,
+ .is_removed = mlx5_is_removed,
+};
+
+/**
+ * Verify and store value for device argument.
+ *
+ * @param[in] key
+ * Key argument to verify.
+ * @param[in] val
+ * Value associated with key.
+ * @param opaque
+ * User data.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_args_check(const char *key, const char *val, void *opaque)
+{
+ struct mlx5_dev_config *config = opaque;
+ unsigned long tmp;
+
+ /* No-op, port representors are processed in mlx5_dev_spawn(). */
+ if (!strcmp(MLX5_REPRESENTOR, key))
+ return 0;
+ errno = 0;
+ tmp = strtoul(val, NULL, 0);
+ if (errno) {
+ rte_errno = errno;
+ DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
+ return -rte_errno;
+ }
+ if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
+ config->cqe_comp = !!tmp;
+ } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
+ config->mprq.enabled = !!tmp;
+ } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
+ config->mprq.stride_num_n = tmp;
+ } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
+ config->mprq.max_memcpy_len = tmp;
+ } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
+ config->mprq.min_rxqs_num = tmp;
+ } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
+ config->txq_inline = tmp;
+ } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
+ config->txqs_inline = tmp;
+ } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
+ config->mps = !!tmp ? config->mps : 0;
+ } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
+ config->mpw_hdr_dseg = !!tmp;
+ } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
+ config->inline_max_packet_sz = tmp;
+ } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
+ config->tx_vec_en = !!tmp;
+ } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
+ config->rx_vec_en = !!tmp;
+ } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
+ config->l3_vxlan_en = !!tmp;
+ } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
+ config->vf_nl_en = !!tmp;
+ } else {
+ DRV_LOG(WARNING, "%s: unknown parameter", key);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ return 0;
+}
+
+/**
+ * Parse device parameters.
+ *
+ * @param config
+ * Pointer to device configuration structure.
+ * @param devargs
+ * Device arguments structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
+{
+ const char **params = (const char *[]){
+ MLX5_RXQ_CQE_COMP_EN,
+ MLX5_RX_MPRQ_EN,
+ MLX5_RX_MPRQ_LOG_STRIDE_NUM,
+ MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
+ MLX5_RXQS_MIN_MPRQ,
+ MLX5_TXQ_INLINE,
+ MLX5_TXQS_MIN_INLINE,
+ MLX5_TXQ_MPW_EN,
+ MLX5_TXQ_MPW_HDR_DSEG_EN,
+ MLX5_TXQ_MAX_INLINE_LEN,
+ MLX5_TX_VEC_EN,
+ MLX5_RX_VEC_EN,
+ MLX5_L3_VXLAN_EN,
+ MLX5_VF_NL_EN,
+ MLX5_REPRESENTOR,
+ NULL,
+ };
+ struct rte_kvargs *kvlist;
+ int ret = 0;
+ int i;
+
+ if (devargs == NULL)
+ return 0;
+ /* Following UGLY cast is done to pass checkpatch. */
+ kvlist = rte_kvargs_parse(devargs->args, params);
+ if (kvlist == NULL)
+ return 0;
+ /* Process parameters. */
+ for (i = 0; (params[i] != NULL); ++i) {
+ if (rte_kvargs_count(kvlist, params[i])) {
+ ret = rte_kvargs_process(kvlist, params[i],
+ mlx5_args_check, config);
+ if (ret) {
+ rte_errno = EINVAL;
+ rte_kvargs_free(kvlist);
+ return -rte_errno;
+ }
+ }
+ }
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
+static struct rte_pci_driver mlx5_driver;
+
+/*
+ * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process
+ * local resource used by both primary and secondary to avoid duplicate
+ * reservation.
+ * The space has to be available on both primary and secondary process,
+ * TXQ UAR maps to this area using fixed mmap w/o double check.
+ */
+static void *uar_base;
+
+static int
+find_lower_va_bound(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
+{
+ void **addr = arg;
+
+ if (*addr == NULL)
+ *addr = ms->addr;
+ else
+ *addr = RTE_MIN(*addr, ms->addr);
+
+ return 0;
+}
+
+/**
+ * Reserve UAR address space for primary process.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_uar_init_primary(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ void *addr = (void *)0;
+
+ if (uar_base) { /* UAR address space mapped. */
+ priv->uar_base = uar_base;
+ return 0;
+ }
+ /* find out lower bound of hugepage segments */
+ rte_memseg_walk(find_lower_va_bound, &addr);
+
+ /* keep distance to hugepages to minimize potential conflicts. */
+ addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE));
+ /* anonymous mmap, no real memory consumption. */
+ addr = mmap(addr, MLX5_UAR_SIZE,
+ PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ DRV_LOG(ERR,
+ "port %u failed to reserve UAR address space, please"
+ " adjust MLX5_UAR_SIZE or try --base-virtaddr",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ /* Accept either same addr or a new addr returned from mmap if target
+ * range occupied.
+ */
+ DRV_LOG(INFO, "port %u reserved UAR address space: %p",
+ dev->data->port_id, addr);
+ priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
+ uar_base = addr; /* process local, don't reserve again. */
+ return 0;
+}
+
+/**
+ * Reserve UAR address space for secondary process, align with
+ * primary process.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_uar_init_secondary(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ void *addr;
+
+ assert(priv->uar_base);
+ if (uar_base) { /* already reserved. */
+ assert(uar_base == priv->uar_base);
+ return 0;
+ }
+ /* anonymous mmap, no real memory consumption. */
+ addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
+ PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu",
+ dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
+ rte_errno = ENXIO;
+ return -rte_errno;
+ }
+ if (priv->uar_base != addr) {
+ DRV_LOG(ERR,
+ "port %u UAR address %p size %llu occupied, please"
+ " adjust MLX5_UAR_OFFSET or try EAL parameter"
+ " --base-virtaddr",
+ dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
+ rte_errno = ENXIO;
+ return -rte_errno;
+ }
+ uar_base = addr; /* process local, don't reserve again */
+ DRV_LOG(INFO, "port %u reserved UAR address space: %p",
+ dev->data->port_id, addr);
+ return 0;
+}
+
+/**
+ * Spawn an Ethernet device from Verbs information.
+ *
+ * @param dpdk_dev
+ * Backing DPDK device.
+ * @param ibv_dev
+ * Verbs device.
+ * @param vf
+ * If nonzero, enable VF-specific features.
+ * @param[in] switch_info
+ * Switch properties of Ethernet device.
+ *
+ * @return
+ * A valid Ethernet device object on success, NULL otherwise and rte_errno
+ * is set. The following error is defined:
+ *
+ * EBUSY: device is not supposed to be spawned.
+ */
+static struct rte_eth_dev *
+mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ struct ibv_device *ibv_dev,
+ int vf,
+ const struct mlx5_switch_info *switch_info)
+{
+ struct ibv_context *ctx;
+ struct ibv_device_attr_ex attr;
+ struct ibv_port_attr port_attr;
+ struct ibv_pd *pd = NULL;
+ struct mlx5dv_context dv_attr = { .comp_mask = 0 };
+ struct mlx5_dev_config config = {
+ .vf = !!vf,
+ .tx_vec_en = 1,
+ .rx_vec_en = 1,
+ .mpw_hdr_dseg = 0,
+ .txq_inline = MLX5_ARG_UNSET,
+ .txqs_inline = MLX5_ARG_UNSET,
+ .inline_max_packet_sz = MLX5_ARG_UNSET,
+ .vf_nl_en = 1,
+ .mprq = {
+ .enabled = 0,
+ .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
+ .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
+ .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
+ },
+ };
+ struct rte_eth_dev *eth_dev = NULL;
+ struct priv *priv = NULL;
+ int err = 0;
+ unsigned int mps;
+ unsigned int cqe_comp;
+ unsigned int tunnel_en = 0;
+ unsigned int mpls_en = 0;
+ unsigned int swp = 0;
+ unsigned int mprq = 0;
+ unsigned int mprq_min_stride_size_n = 0;
+ unsigned int mprq_max_stride_size_n = 0;
+ unsigned int mprq_min_stride_num_n = 0;
+ unsigned int mprq_max_stride_num_n = 0;
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ struct ibv_counter_set_description cs_desc = { .counter_type = 0 };
+#endif
+ struct ether_addr mac;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ int own_domain_id = 0;
+ unsigned int i;
+
+ /* Determine if this port representor is supposed to be spawned. */
+ if (switch_info->representor && dpdk_dev->devargs) {
+ struct rte_eth_devargs eth_da;
+
+ err = rte_eth_devargs_parse(dpdk_dev->devargs->args, &eth_da);
+ if (err) {
+ rte_errno = -err;
+ DRV_LOG(ERR, "failed to process device arguments: %s",
+ strerror(rte_errno));
+ return NULL;
+ }
+ for (i = 0; i < eth_da.nb_representor_ports; ++i)
+ if (eth_da.representor_ports[i] ==
+ (uint16_t)switch_info->port_name)
+ break;
+ if (i == eth_da.nb_representor_ports) {
+ rte_errno = EBUSY;
+ return NULL;
+ }
+ }
+ /* Prepare shared data between primary and secondary process. */
+ mlx5_prepare_shared_data();
+ errno = 0;
+ ctx = mlx5_glue->open_device(ibv_dev);
+ if (!ctx) {
+ rte_errno = errno ? errno : ENODEV;
+ return NULL;
+ }
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+ dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
+#endif
+ /*
+ * Multi-packet send is supported by ConnectX-4 Lx PF as well
+ * as all ConnectX-5 devices.
+ */
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
+#endif
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
+#endif
+ mlx5_glue->dv_query_device(ctx, &dv_attr);
+ if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
+ if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
+ DRV_LOG(DEBUG, "enhanced MPW is supported");
+ mps = MLX5_MPW_ENHANCED;
+ } else {
+ DRV_LOG(DEBUG, "MPW is supported");
+ mps = MLX5_MPW;
+ }
+ } else {
+ DRV_LOG(DEBUG, "MPW isn't supported");
+ mps = MLX5_MPW_DISABLED;
+ }
+ config.mps = mps;
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
+ swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
+ DRV_LOG(DEBUG, "SWP support: %u", swp);
+#endif
+ config.swp = !!swp;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
+ struct mlx5dv_striding_rq_caps mprq_caps =
+ dv_attr.striding_rq_caps;
+
+ DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
+ mprq_caps.min_single_stride_log_num_of_bytes);
+ DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
+ mprq_caps.max_single_stride_log_num_of_bytes);
+ DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
+ mprq_caps.min_single_wqe_log_num_of_strides);
+ DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
+ mprq_caps.max_single_wqe_log_num_of_strides);
+ DRV_LOG(DEBUG, "\tsupported_qpts: %d",
+ mprq_caps.supported_qpts);
+ DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
+ mprq = 1;
+ mprq_min_stride_size_n =
+ mprq_caps.min_single_stride_log_num_of_bytes;
+ mprq_max_stride_size_n =
+ mprq_caps.max_single_stride_log_num_of_bytes;
+ mprq_min_stride_num_n =
+ mprq_caps.min_single_wqe_log_num_of_strides;
+ mprq_max_stride_num_n =
+ mprq_caps.max_single_wqe_log_num_of_strides;
+ config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
+ mprq_min_stride_num_n);
+ }
+#endif
+ if (RTE_CACHE_LINE_SIZE == 128 &&
+ !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
+ cqe_comp = 0;
+ else
+ cqe_comp = 1;
+ config.cqe_comp = cqe_comp;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
+ tunnel_en = ((dv_attr.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
+ (dv_attr.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
+ }
+ DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
+ tunnel_en ? "" : "not ");
+#else
+ DRV_LOG(WARNING,
+ "tunnel offloading disabled due to old OFED/rdma-core version");
+#endif
+ config.tunnel_en = tunnel_en;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ mpls_en = ((dv_attr.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
+ (dv_attr.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
+ DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
+ mpls_en ? "" : "not ");
+#else
+ DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
+ " old OFED/rdma-core version or firmware configuration");
+#endif
+ config.mpls_en = mpls_en;
+ err = mlx5_glue->query_device_ex(ctx, NULL, &attr);
+ if (err) {
+ DEBUG("ibv_query_device_ex() failed");
+ goto error;
+ }
+ if (!switch_info->representor)
+ rte_strlcpy(name, dpdk_dev->name, sizeof(name));
+ else
+ snprintf(name, sizeof(name), "%s_representor_%u",
+ dpdk_dev->name, switch_info->port_name);
+ DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (eth_dev == NULL) {
+ DRV_LOG(ERR, "can not attach rte ethdev");
+ rte_errno = ENOMEM;
+ err = rte_errno;
+ goto error;
+ }
+ eth_dev->device = dpdk_dev;
+ eth_dev->dev_ops = &mlx5_dev_sec_ops;
+ err = mlx5_uar_init_secondary(eth_dev);
+ if (err) {
+ err = rte_errno;
+ goto error;
+ }
+ /* Receive command fd from primary process */
+ err = mlx5_socket_connect(eth_dev);
+ if (err < 0) {
+ err = rte_errno;
+ goto error;
+ }
+ /* Remap UAR for Tx queues. */
+ err = mlx5_tx_uar_remap(eth_dev, err);
+ if (err) {
+ err = rte_errno;
+ goto error;
+ }
+ /*
+ * Ethdev pointer is still required as input since
+ * the primary device is not accessible from the
+ * secondary process.
+ */
+ eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
+ eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
+ claim_zero(mlx5_glue->close_device(ctx));
+ return eth_dev;
+ }
+ /* Check port status. */
+ err = mlx5_glue->query_port(ctx, 1, &port_attr);
+ if (err) {
+ DRV_LOG(ERR, "port query failed: %s", strerror(err));
+ goto error;
+ }
+ if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
+ DRV_LOG(ERR, "port is not configured in Ethernet mode");
+ err = EINVAL;
+ goto error;
+ }
+ if (port_attr.state != IBV_PORT_ACTIVE)
+ DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
+ mlx5_glue->port_state_str(port_attr.state),
+ port_attr.state);
+ /* Allocate protection domain. */
+ pd = mlx5_glue->alloc_pd(ctx);
+ if (pd == NULL) {
+ DRV_LOG(ERR, "PD allocation failure");
+ err = ENOMEM;
+ goto error;
+ }
+ priv = rte_zmalloc("ethdev private structure",
+ sizeof(*priv),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ DRV_LOG(ERR, "priv allocation failure");
+ err = ENOMEM;
+ goto error;
+ }
+ priv->ctx = ctx;
+ strncpy(priv->ibdev_name, priv->ctx->device->name,
+ sizeof(priv->ibdev_name));
+ strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
+ sizeof(priv->ibdev_path));
+ priv->device_attr = attr;
+ priv->pd = pd;
+ priv->mtu = ETHER_MTU;
+#ifndef RTE_ARCH_64
+ /* Initialize UAR access locks for 32bit implementations. */
+ rte_spinlock_init(&priv->uar_lock_cq);
+ for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+ rte_spinlock_init(&priv->uar_lock[i]);
+#endif
+ /* Some internal functions rely on Netlink sockets, open them now. */
+ priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
+ priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
+ priv->nl_sn = 0;
+ priv->representor = !!switch_info->representor;
+ priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+ priv->representor_id =
+ switch_info->representor ? switch_info->port_name : -1;
+ /*
+ * Look for sibling devices in order to reuse their switch domain
+ * if any, otherwise allocate one.
+ */
+ i = mlx5_dev_to_port_id(dpdk_dev, NULL, 0);
+ if (i > 0) {
+ uint16_t port_id[i];
+
+ i = RTE_MIN(mlx5_dev_to_port_id(dpdk_dev, port_id, i), i);
+ while (i--) {
+ const struct priv *opriv =
+ rte_eth_devices[port_id[i]].data->dev_private;
+
+ if (!opriv ||
+ opriv->domain_id ==
+ RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
+ continue;
+ priv->domain_id = opriv->domain_id;
+ break;
+ }
+ }
+ if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
+ err = rte_eth_switch_domain_alloc(&priv->domain_id);
+ if (err) {
+ err = rte_errno;
+ DRV_LOG(ERR, "unable to allocate switch domain: %s",
+ strerror(rte_errno));
+ goto error;
+ }
+ own_domain_id = 1;
+ }
+ err = mlx5_args(&config, dpdk_dev->devargs);
+ if (err) {
+ err = rte_errno;
+ DRV_LOG(ERR, "failed to process device arguments: %s",
+ strerror(rte_errno));
+ goto error;
+ }
+ config.hw_csum = !!(attr.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM);
+ DRV_LOG(DEBUG, "checksum offloading is %ssupported",
+ (config.hw_csum ? "" : "not "));
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ config.flow_counter_en = !!attr.max_counter_sets;
+ mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
+ DRV_LOG(DEBUG, "counter type = %d, num of cs = %ld, attributes = %d",
+ cs_desc.counter_type, cs_desc.num_of_cs,
+ cs_desc.attributes);
+#endif
+ config.ind_table_max_size =
+ attr.rss_caps.max_rwq_indirection_table_size;
+ /*
+ * Remove this check once DPDK supports larger/variable
+ * indirection tables.
+ */
+ if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
+ config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+ DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
+ config.ind_table_max_size);
+ config.hw_vlan_strip = !!(attr.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
+ DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
+ (config.hw_vlan_strip ? "" : "not "));
+ config.hw_fcs_strip = !!(attr.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_SCATTER_FCS);
+ DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
+ (config.hw_fcs_strip ? "" : "not "));
+#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
+ config.hw_padding = !!attr.rx_pad_end_addr_align;
+#endif
+ DRV_LOG(DEBUG, "hardware Rx end alignment padding is %ssupported",
+ (config.hw_padding ? "" : "not "));
+ config.tso = (attr.tso_caps.max_tso > 0 &&
+ (attr.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
+ if (config.tso)
+ config.tso_max_payload_sz = attr.tso_caps.max_tso;
+ if (config.mps && !mps) {
+ DRV_LOG(ERR,
+ "multi-packet send not supported on this device"
+ " (" MLX5_TXQ_MPW_EN ")");
+ err = ENOTSUP;
+ goto error;
+ }
+ DRV_LOG(INFO, "%sMPS is %s",
+ config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
+ config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ if (config.cqe_comp && !cqe_comp) {
+ DRV_LOG(WARNING, "Rx CQE compression isn't supported");
+ config.cqe_comp = 0;
+ }
+ if (config.mprq.enabled && mprq) {
+ if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
+ config.mprq.stride_num_n < mprq_min_stride_num_n) {
+ config.mprq.stride_num_n =
+ RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
+ mprq_min_stride_num_n);
+ DRV_LOG(WARNING,
+ "the number of strides"
+ " for Multi-Packet RQ is out of range,"
+ " setting default value (%u)",
+ 1 << config.mprq.stride_num_n);
+ }
+ config.mprq.min_stride_size_n = mprq_min_stride_size_n;
+ config.mprq.max_stride_size_n = mprq_max_stride_size_n;
+ } else if (config.mprq.enabled && !mprq) {
+ DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
+ config.mprq.enabled = 0;
+ }
+ eth_dev = rte_eth_dev_allocate(name);
+ if (eth_dev == NULL) {
+ DRV_LOG(ERR, "can not allocate rte ethdev");
+ err = ENOMEM;
+ goto error;
+ }
+ if (priv->representor)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ eth_dev->data->dev_private = priv;
+ priv->dev_data = eth_dev->data;
+ eth_dev->data->mac_addrs = priv->mac;
+ eth_dev->device = dpdk_dev;
+ eth_dev->device->driver = &mlx5_driver.driver;
+ err = mlx5_uar_init_primary(eth_dev);
+ if (err) {
+ err = rte_errno;
+ goto error;
+ }
+ /* Configure the first MAC address by default. */
+ if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
+ DRV_LOG(ERR,
+ "port %u cannot get MAC address, is mlx5_en"
+ " loaded? (errno: %s)",
+ eth_dev->data->port_id, strerror(rte_errno));
+ err = ENODEV;
+ goto error;
+ }
+ DRV_LOG(INFO,
+ "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
+ eth_dev->data->port_id,
+ mac.addr_bytes[0], mac.addr_bytes[1],
+ mac.addr_bytes[2], mac.addr_bytes[3],
+ mac.addr_bytes[4], mac.addr_bytes[5]);
+#ifndef NDEBUG
+ {
+ char ifname[IF_NAMESIZE];
+
+ if (mlx5_get_ifname(eth_dev, &ifname) == 0)
+ DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
+ eth_dev->data->port_id, ifname);
+ else
+ DRV_LOG(DEBUG, "port %u ifname is unknown",
+ eth_dev->data->port_id);
+ }
+#endif
+ /* Get actual MTU if possible. */
+ err = mlx5_get_mtu(eth_dev, &priv->mtu);
+ if (err) {
+ err = rte_errno;
+ goto error;
+ }
+ DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
+ priv->mtu);
+ /* Initialize burst functions to prevent crashes before link-up. */
+ eth_dev->rx_pkt_burst = removed_rx_burst;
+ eth_dev->tx_pkt_burst = removed_tx_burst;
+ eth_dev->dev_ops = &mlx5_dev_ops;
+ /* Register MAC address. */
+ claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
+ if (vf && config.vf_nl_en)
+ mlx5_nl_mac_addr_sync(eth_dev);
+ priv->mnl_socket = mlx5_nl_flow_socket_create();
+ if (!priv->mnl_socket) {
+ err = -rte_errno;
+ DRV_LOG(WARNING,
+ "flow rules relying on switch offloads will not be"
+ " supported: cannot open libmnl socket: %s",
+ strerror(rte_errno));
+ } else {
+ struct rte_flow_error error;
+ unsigned int ifindex = mlx5_ifindex(eth_dev);
+
+ if (!ifindex) {
+ err = -rte_errno;
+ error.message =
+ "cannot retrieve network interface index";
+ } else {
+ err = mlx5_nl_flow_init(priv->mnl_socket, ifindex,
+ &error);
+ }
+ if (err) {
+ DRV_LOG(WARNING,
+ "flow rules relying on switch offloads will"
+ " not be supported: %s: %s",
+ error.message, strerror(rte_errno));
+ mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+ priv->mnl_socket = NULL;
+ }
+ }
+ TAILQ_INIT(&priv->flows);
+ TAILQ_INIT(&priv->ctrl_flows);
+ /* Hint libmlx5 to use PMD allocator for data plane resources */
+ struct mlx5dv_ctx_allocators alctr = {
+ .alloc = &mlx5_alloc_verbs_buf,
+ .free = &mlx5_free_verbs_buf,
+ .data = priv,
+ };
+ mlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
+ (void *)((uintptr_t)&alctr));
+ /* Bring Ethernet device up. */
+ DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
+ eth_dev->data->port_id);
+ mlx5_set_link_up(eth_dev);
+ /*
+ * Even though the interrupt handler is not installed yet,
+ * interrupts will still trigger on the asyn_fd from
+ * Verbs context returned by ibv_open_device().
+ */
+ mlx5_link_update(eth_dev, 0);
+ /* Store device configuration on private structure. */
+ priv->config = config;
+ /* Supported Verbs flow priority number detection. */
+ err = mlx5_flow_discover_priorities(eth_dev);
+ if (err < 0)
+ goto error;
+ priv->config.flow_prio = err;
+ /*
+ * Once the device is added to the list of memory event
+ * callback, its global MR cache table cannot be expanded
+ * on the fly because of deadlock. If it overflows, lookup
+ * should be done by searching MR list linearly, which is slow.
+ */
+ err = mlx5_mr_btree_init(&priv->mr.cache,
+ MLX5_MR_BTREE_CACHE_N * 2,
+ eth_dev->device->numa_node);
+ if (err) {
+ err = rte_errno;
+ goto error;
+ }
+ /* Add device to memory callback list. */
+ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+ LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
+ priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ return eth_dev;
+error:
+ if (priv) {
+ if (priv->nl_socket_route >= 0)
+ close(priv->nl_socket_route);
+ if (priv->nl_socket_rdma >= 0)
+ close(priv->nl_socket_rdma);
+ if (priv->mnl_socket)
+ mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+ if (own_domain_id)
+ claim_zero(rte_eth_switch_domain_free(priv->domain_id));
+ rte_free(priv);
+ }
+ if (pd)
+ claim_zero(mlx5_glue->dealloc_pd(pd));
+ if (eth_dev)
+ rte_eth_dev_release_port(eth_dev);
+ if (ctx)
+ claim_zero(mlx5_glue->close_device(ctx));
+ assert(err > 0);
+ rte_errno = err;
+ return NULL;
+}
+
+/** Data associated with devices to spawn. */
+struct mlx5_dev_spawn_data {
+ unsigned int ifindex; /**< Network interface index. */
+ struct mlx5_switch_info info; /**< Switch information. */
+ struct ibv_device *ibv_dev; /**< Associated IB device. */
+ struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
+};
+
+/**
+ * Comparison callback to sort device data.
+ *
+ * This is meant to be used with qsort().
+ *
+ * @param a[in]
+ * Pointer to pointer to first data object.
+ * @param b[in]
+ * Pointer to pointer to second data object.
+ *
+ * @return
+ * 0 if both objects are equal, less than 0 if the first argument is less
+ * than the second, greater than 0 otherwise.
+ */
+static int
+mlx5_dev_spawn_data_cmp(const void *a, const void *b)
+{
+ const struct mlx5_switch_info *si_a =
+ &((const struct mlx5_dev_spawn_data *)a)->info;
+ const struct mlx5_switch_info *si_b =
+ &((const struct mlx5_dev_spawn_data *)b)->info;
+ int ret;
+
+ /* Master device first. */
+ ret = si_b->master - si_a->master;
+ if (ret)
+ return ret;
+ /* Then representor devices. */
+ ret = si_b->representor - si_a->representor;
+ if (ret)
+ return ret;
+ /* Unidentified devices come last in no specific order. */
+ if (!si_a->representor)
+ return 0;
+ /* Order representors by name. */
+ return si_a->port_name - si_b->port_name;
+}
+
+/**
+ * DPDK callback to register a PCI device.
+ *
+ * This function spawns Ethernet devices out of a given PCI device.
+ *
+ * @param[in] pci_drv
+ * PCI driver structure (mlx5_driver).
+ * @param[in] pci_dev
+ * PCI device information.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct ibv_device **ibv_list;
+ unsigned int n = 0;
+ int vf;
+ int ret;
+
+ assert(pci_drv == &mlx5_driver);
+ errno = 0;
+ ibv_list = mlx5_glue->get_device_list(&ret);
+ if (!ibv_list) {
+ rte_errno = errno ? errno : ENOSYS;
+ DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
+ return -rte_errno;
+ }
+
+ struct ibv_device *ibv_match[ret + 1];
+
+ while (ret-- > 0) {
+ struct rte_pci_addr pci_addr;
+
+ DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
+ if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr))
+ continue;
+ if (pci_dev->addr.domain != pci_addr.domain ||
+ pci_dev->addr.bus != pci_addr.bus ||
+ pci_dev->addr.devid != pci_addr.devid ||
+ pci_dev->addr.function != pci_addr.function)
+ continue;
+ DRV_LOG(INFO, "PCI information matches for device \"%s\"",
+ ibv_list[ret]->name);
+ ibv_match[n++] = ibv_list[ret];
+ }
+ ibv_match[n] = NULL;
+
+ struct mlx5_dev_spawn_data list[n];
+ int nl_route = n ? mlx5_nl_init(NETLINK_ROUTE) : -1;
+ int nl_rdma = n ? mlx5_nl_init(NETLINK_RDMA) : -1;
+ unsigned int i;
+ unsigned int u;
+
+ /*
+ * The existence of several matching entries (n > 1) means port
+ * representors have been instantiated. No existing Verbs call nor
+ * /sys entries can tell them apart, this can only be done through
+ * Netlink calls assuming kernel drivers are recent enough to
+ * support them.
+ *
+ * In the event of identification failure through Netlink, try again
+ * through sysfs, then either:
+ *
+ * 1. No device matches (n == 0), complain and bail out.
+ * 2. A single IB device matches (n == 1) and is not a representor,
+ * assume no switch support.
+ * 3. Otherwise no safe assumptions can be made; complain louder and
+ * bail out.
+ */
+ for (i = 0; i != n; ++i) {
+ list[i].ibv_dev = ibv_match[i];
+ list[i].eth_dev = NULL;
+ if (nl_rdma < 0)
+ list[i].ifindex = 0;
+ else
+ list[i].ifindex = mlx5_nl_ifindex
+ (nl_rdma, list[i].ibv_dev->name);
+ if (nl_route < 0 ||
+ !list[i].ifindex ||
+ mlx5_nl_switch_info(nl_route, list[i].ifindex,
+ &list[i].info) ||
+ ((!list[i].info.representor && !list[i].info.master) &&
+ mlx5_sysfs_switch_info(list[i].ifindex, &list[i].info))) {
+ list[i].ifindex = 0;
+ memset(&list[i].info, 0, sizeof(list[i].info));
+ continue;
+ }
+ }
+ if (nl_rdma >= 0)
+ close(nl_rdma);
+ if (nl_route >= 0)
+ close(nl_route);
+ /* Count unidentified devices. */
+ for (u = 0, i = 0; i != n; ++i)
+ if (!list[i].info.master && !list[i].info.representor)
+ ++u;
+ if (u) {
+ if (n == 1 && u == 1) {
+ /* Case #2. */
+ DRV_LOG(INFO, "no switch support detected");
+ } else {
+ /* Case #3. */
+ DRV_LOG(ERR,
+ "unable to tell which of the matching devices"
+ " is the master (lack of kernel support?)");
+ n = 0;
+ }
+ }
+ /*
+ * Sort list to probe devices in natural order for users convenience
+ * (i.e. master first, then representors from lowest to highest ID).
+ */
+ if (n)
+ qsort(list, n, sizeof(*list), mlx5_dev_spawn_data_cmp);
+ switch (pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
+ vf = 1;
+ break;
+ default:
+ vf = 0;
+ }
+ for (i = 0; i != n; ++i) {
+ uint32_t restore;
+
+ list[i].eth_dev = mlx5_dev_spawn
+ (&pci_dev->device, list[i].ibv_dev, vf, &list[i].info);
+ if (!list[i].eth_dev) {
+ if (rte_errno != EBUSY)
+ break;
+ /* Device is disabled, ignore it. */
+ continue;
+ }
+ restore = list[i].eth_dev->data->dev_flags;
+ rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
+ /* Restore non-PCI flags cleared by the above call. */
+ list[i].eth_dev->data->dev_flags |= restore;
+ rte_eth_dev_probing_finish(list[i].eth_dev);
+ }
+ mlx5_glue->free_device_list(ibv_list);
+ if (!n) {
+ DRV_LOG(WARNING,
+ "no Verbs device matches PCI device " PCI_PRI_FMT ","
+ " are kernel drivers loaded?",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+ rte_errno = ENOENT;
+ ret = -rte_errno;
+ } else if (i != n) {
+ DRV_LOG(ERR,
+ "probe of PCI device " PCI_PRI_FMT " aborted after"
+ " encountering an error: %s",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function,
+ strerror(rte_errno));
+ ret = -rte_errno;
+ /* Roll back. */
+ while (i--) {
+ if (!list[i].eth_dev)
+ continue;
+ mlx5_dev_close(list[i].eth_dev);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(list[i].eth_dev->data->dev_private);
+ claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
+ }
+ /* Restore original error. */
+ rte_errno = -ret;
+ } else {
+ ret = 0;
+ }
+ return ret;
+}
+
+static const struct rte_pci_id mlx5_pci_id_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
+ },
+ {
+ .vendor_id = 0
+ }
+};
+
+static struct rte_pci_driver mlx5_driver = {
+ .driver = {
+ .name = MLX5_DRIVER_NAME
+ },
+ .id_table = mlx5_pci_id_map,
+ .probe = mlx5_pci_probe,
+ .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
+};
+
+#ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
+
+/**
+ * Suffix RTE_EAL_PMD_PATH with "-glue".
+ *
+ * This function performs a sanity check on RTE_EAL_PMD_PATH before
+ * suffixing its last component.
+ *
+ * @param buf[out]
+ * Output buffer, should be large enough otherwise NULL is returned.
+ * @param size
+ * Size of @p out.
+ *
+ * @return
+ * Pointer to @p buf or @p NULL in case suffix cannot be appended.
+ */
+static char *
+mlx5_glue_path(char *buf, size_t size)
+{
+ static const char *const bad[] = { "/", ".", "..", NULL };
+ const char *path = RTE_EAL_PMD_PATH;
+ size_t len = strlen(path);
+ size_t off;
+ int i;
+
+ while (len && path[len - 1] == '/')
+ --len;
+ for (off = len; off && path[off - 1] != '/'; --off)
+ ;
+ for (i = 0; bad[i]; ++i)
+ if (!strncmp(path + off, bad[i], (int)(len - off)))
+ goto error;
+ i = snprintf(buf, size, "%.*s-glue", (int)len, path);
+ if (i == -1 || (size_t)i >= size)
+ goto error;
+ return buf;
+error:
+ DRV_LOG(ERR,
+ "unable to append \"-glue\" to last component of"
+ " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
+ " please re-configure DPDK");
+ return NULL;
+}
+
+/**
+ * Initialization routine for run-time dependency on rdma-core.
+ */
+static int
+mlx5_glue_init(void)
+{
+ char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
+ const char *path[] = {
+ /*
+ * A basic security check is necessary before trusting
+ * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
+ */
+ (geteuid() == getuid() && getegid() == getgid() ?
+ getenv("MLX5_GLUE_PATH") : NULL),
+ /*
+ * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
+ * variant, otherwise let dlopen() look up libraries on its
+ * own.
+ */
+ (*RTE_EAL_PMD_PATH ?
+ mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
+ };
+ unsigned int i = 0;
+ void *handle = NULL;
+ void **sym;
+ const char *dlmsg;
+
+ while (!handle && i != RTE_DIM(path)) {
+ const char *end;
+ size_t len;
+ int ret;
+
+ if (!path[i]) {
+ ++i;
+ continue;
+ }
+ end = strpbrk(path[i], ":;");
+ if (!end)
+ end = path[i] + strlen(path[i]);
+ len = end - path[i];
+ ret = 0;
+ do {
+ char name[ret + 1];
+
+ ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
+ (int)len, path[i],
+ (!len || *(end - 1) == '/') ? "" : "/");
+ if (ret == -1)
+ break;
+ if (sizeof(name) != (size_t)ret + 1)
+ continue;
+ DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
+ name);
+ handle = dlopen(name, RTLD_LAZY);
+ break;
+ } while (1);
+ path[i] = end + 1;
+ if (!*end)
+ ++i;
+ }
+ if (!handle) {
+ rte_errno = EINVAL;
+ dlmsg = dlerror();
+ if (dlmsg)
+ DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
+ goto glue_error;
+ }
+ sym = dlsym(handle, "mlx5_glue");
+ if (!sym || !*sym) {
+ rte_errno = EINVAL;
+ dlmsg = dlerror();
+ if (dlmsg)
+ DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
+ goto glue_error;
+ }
+ mlx5_glue = *sym;
+ return 0;
+glue_error:
+ if (handle)
+ dlclose(handle);
+ DRV_LOG(WARNING,
+ "cannot initialize PMD due to missing run-time dependency on"
+ " rdma-core libraries (libibverbs, libmlx5)");
+ return -rte_errno;
+}
+
+#endif
+
+/**
+ * Driver initialization routine.
+ */
+RTE_INIT(rte_mlx5_pmd_init)
+{
+ /* Initialize driver log type. */
+ mlx5_logtype = rte_log_register("pmd.net.mlx5");
+ if (mlx5_logtype >= 0)
+ rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
+
+ /* Build the static tables for Verbs conversion. */
+ mlx5_set_ptype_table();
+ mlx5_set_cksum_table();
+ mlx5_set_swp_types_table();
+ /*
+ * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
+ * huge pages. Calling ibv_fork_init() during init allows
+ * applications to use fork() safely for purposes other than
+ * using this PMD, which is not supported in forked processes.
+ */
+ setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
+ /* Match the size of Rx completion entry to the size of a cacheline. */
+ if (RTE_CACHE_LINE_SIZE == 128)
+ setenv("MLX5_CQE_SIZE", "128", 0);
+ /*
+ * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to
+ * cleanup all the Verbs resources even when the device was removed.
+ */
+ setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
+#ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
+ if (mlx5_glue_init())
+ return;
+ assert(mlx5_glue);
+#endif
+#ifndef NDEBUG
+ /* Glue structure must not contain any NULL pointers. */
+ {
+ unsigned int i;
+
+ for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
+ assert(((const void *const *)mlx5_glue)[i]);
+ }
+#endif
+ if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
+ DRV_LOG(ERR,
+ "rdma-core glue \"%s\" mismatch: \"%s\" is required",
+ mlx5_glue->version, MLX5_GLUE_VERSION);
+ return;
+ }
+ mlx5_glue->fork_init();
+ rte_pci_register(&mlx5_driver);
+}
+
+RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
+RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5.h
new file mode 100644
index 00000000..a3a34cff
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5.h
@@ -0,0 +1,418 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_H_
+#define RTE_PMD_MLX5_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <limits.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <sys/queue.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_rwlock.h>
+#include <rte_interrupts.h>
+#include <rte_errno.h>
+#include <rte_flow.h>
+
+#include "mlx5_utils.h"
+#include "mlx5_mr.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+
+enum {
+ PCI_VENDOR_ID_MELLANOX = 0x15b3,
+};
+
+enum {
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4 = 0x1013,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4VF = 0x1014,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LX = 0x1015,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5 = 0x1017,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2,
+};
+
+/** Switch information returned by mlx5_nl_switch_info(). */
+struct mlx5_switch_info {
+ uint32_t master:1; /**< Master device. */
+ uint32_t representor:1; /**< Representor device. */
+ int32_t port_name; /**< Representor port name. */
+ uint64_t switch_id; /**< Switch identifier. */
+};
+
+LIST_HEAD(mlx5_dev_list, priv);
+
+/* Shared memory between primary and secondary processes. */
+struct mlx5_shared_data {
+ struct mlx5_dev_list mem_event_cb_list;
+ rte_rwlock_t mem_event_rwlock;
+};
+
+extern struct mlx5_shared_data *mlx5_shared_data;
+
+struct mlx5_xstats_ctrl {
+ /* Number of device stats. */
+ uint16_t stats_n;
+ /* Index in the device counters table. */
+ uint16_t dev_table_idx[MLX5_MAX_XSTATS];
+ uint64_t base[MLX5_MAX_XSTATS];
+};
+
+/* Flow list . */
+TAILQ_HEAD(mlx5_flows, rte_flow);
+
+/* Default PMD specific parameter value. */
+#define MLX5_ARG_UNSET (-1)
+
+/*
+ * Device configuration structure.
+ *
+ * Merged configuration from:
+ *
+ * - Device capabilities,
+ * - User device parameters disabled features.
+ */
+struct mlx5_dev_config {
+ unsigned int hw_csum:1; /* Checksum offload is supported. */
+ unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
+ unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
+ unsigned int hw_padding:1; /* End alignment padding is supported. */
+ unsigned int vf:1; /* This is a VF. */
+ unsigned int mps:2; /* Multi-packet send supported mode. */
+ unsigned int tunnel_en:1;
+ /* Whether tunnel stateless offloads are supported. */
+ unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
+ unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
+ unsigned int cqe_comp:1; /* CQE compression is enabled. */
+ unsigned int tso:1; /* Whether TSO is supported. */
+ unsigned int tx_vec_en:1; /* Tx vector is enabled. */
+ unsigned int rx_vec_en:1; /* Rx vector is enabled. */
+ unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
+ unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
+ unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */
+ unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */
+ struct {
+ unsigned int enabled:1; /* Whether MPRQ is enabled. */
+ unsigned int stride_num_n; /* Number of strides. */
+ unsigned int min_stride_size_n; /* Min size of a stride. */
+ unsigned int max_stride_size_n; /* Max size of a stride. */
+ unsigned int max_memcpy_len;
+ /* Maximum packet size to memcpy Rx packets. */
+ unsigned int min_rxqs_num;
+ /* Rx queue count threshold to enable MPRQ. */
+ } mprq; /* Configurations for Multi-Packet RQ. */
+ unsigned int flow_prio; /* Number of flow priorities. */
+ unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
+ unsigned int ind_table_max_size; /* Maximum indirection table size. */
+ int txq_inline; /* Maximum packet size for inlining. */
+ int txqs_inline; /* Queue number threshold for inlining. */
+ int inline_max_packet_sz; /* Max packet size for inlining. */
+};
+
+/**
+ * Type of objet being allocated.
+ */
+enum mlx5_verbs_alloc_type {
+ MLX5_VERBS_ALLOC_TYPE_NONE,
+ MLX5_VERBS_ALLOC_TYPE_TX_QUEUE,
+ MLX5_VERBS_ALLOC_TYPE_RX_QUEUE,
+};
+
+/**
+ * Verbs allocator needs a context to know in the callback which kind of
+ * resources it is allocating.
+ */
+struct mlx5_verbs_alloc_ctx {
+ enum mlx5_verbs_alloc_type type; /* Kind of object being allocated. */
+ const void *obj; /* Pointer to the DPDK object. */
+};
+
+LIST_HEAD(mlx5_mr_list, mlx5_mr);
+
+/* Flow drop context necessary due to Verbs API. */
+struct mlx5_drop {
+ struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
+ struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */
+};
+
+/** DPDK port to network interface index (ifindex) conversion. */
+struct mlx5_nl_flow_ptoi {
+ uint16_t port_id; /**< DPDK port ID. */
+ unsigned int ifindex; /**< Network interface index. */
+};
+
+struct mnl_socket;
+
+struct priv {
+ LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
+ struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
+ struct ibv_context *ctx; /* Verbs context. */
+ struct ibv_device_attr_ex device_attr; /* Device properties. */
+ struct ibv_pd *pd; /* Protection Domain. */
+ char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */
+ char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
+ struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
+ BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
+ /* Bit-field of MAC addresses owned by the PMD. */
+ uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */
+ unsigned int vlan_filter_n; /* Number of configured VLAN filters. */
+ /* Device properties. */
+ uint16_t mtu; /* Configured MTU. */
+ unsigned int isolated:1; /* Whether isolated mode is enabled. */
+ unsigned int representor:1; /* Device is a port representor. */
+ uint16_t domain_id; /* Switch domain identifier. */
+ int32_t representor_id; /* Port representor identifier. */
+ /* RX/TX queues. */
+ unsigned int rxqs_n; /* RX queues array size. */
+ unsigned int txqs_n; /* TX queues array size. */
+ struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
+ struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
+ struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
+ struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
+ struct rte_intr_handle intr_handle; /* Interrupt handler. */
+ unsigned int (*reta_idx)[]; /* RETA index table. */
+ unsigned int reta_idx_n; /* RETA index size. */
+ struct mlx5_drop drop_queue; /* Flow drop queues. */
+ struct mlx5_flows flows; /* RTE Flow rules. */
+ struct mlx5_flows ctrl_flows; /* Control flow rules. */
+ LIST_HEAD(counters, mlx5_flow_counter) flow_counters;
+ /* Flow counters. */
+ struct {
+ uint32_t dev_gen; /* Generation number to flush local caches. */
+ rte_rwlock_t rwlock; /* MR Lock. */
+ struct mlx5_mr_btree cache; /* Global MR cache table. */
+ struct mlx5_mr_list mr_list; /* Registered MR list. */
+ struct mlx5_mr_list mr_free_list; /* Freed MR list. */
+ } mr;
+ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
+ LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
+ LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
+ LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
+ LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */
+ /* Verbs Indirection tables. */
+ LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
+ uint32_t link_speed_capa; /* Link speed capabilities. */
+ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
+ int primary_socket; /* Unix socket for primary process. */
+ void *uar_base; /* Reserved address space for UAR mapping */
+ struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
+ struct mlx5_dev_config config; /* Device configuration. */
+ struct mlx5_verbs_alloc_ctx verbs_alloc_ctx;
+ /* Context for Verbs allocator. */
+ int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
+ int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
+ uint32_t nl_sn; /* Netlink message sequence number. */
+#ifndef RTE_ARCH_64
+ rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */
+ rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
+ /* UAR same-page access control required in 32bit implementations. */
+#endif
+ struct mnl_socket *mnl_socket; /* Libmnl socket. */
+};
+
+#define PORT_ID(priv) ((priv)->dev_data->port_id)
+#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
+
+/* mlx5.c */
+
+int mlx5_getenv_int(const char *);
+
+/* mlx5_ethdev.c */
+
+int mlx5_get_master_ifname(const struct rte_eth_dev *dev,
+ char (*ifname)[IF_NAMESIZE]);
+int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]);
+unsigned int mlx5_ifindex(const struct rte_eth_dev *dev);
+int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr,
+ int master);
+int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu);
+int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep,
+ unsigned int flags);
+int mlx5_dev_configure(struct rte_eth_dev *dev);
+void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
+const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status);
+int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
+int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
+ struct rte_pci_addr *pci_addr);
+void mlx5_dev_link_status_handler(void *arg);
+void mlx5_dev_interrupt_handler(void *arg);
+void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev);
+void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev);
+int mlx5_set_link_down(struct rte_eth_dev *dev);
+int mlx5_set_link_up(struct rte_eth_dev *dev);
+int mlx5_is_removed(struct rte_eth_dev *dev);
+eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev);
+eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev);
+unsigned int mlx5_dev_to_port_id(const struct rte_device *dev,
+ uint16_t *port_list,
+ unsigned int port_list_n);
+int mlx5_sysfs_switch_info(unsigned int ifindex,
+ struct mlx5_switch_info *info);
+
+/* mlx5_mac.c */
+
+int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]);
+void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
+int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index, uint32_t vmdq);
+int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr);
+int mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set, uint32_t nb_mc_addr);
+
+/* mlx5_rss.c */
+
+int mlx5_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size);
+int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
+/* mlx5_rxmode.c */
+
+void mlx5_promiscuous_enable(struct rte_eth_dev *dev);
+void mlx5_promiscuous_disable(struct rte_eth_dev *dev);
+void mlx5_allmulticast_enable(struct rte_eth_dev *dev);
+void mlx5_allmulticast_disable(struct rte_eth_dev *dev);
+
+/* mlx5_stats.c */
+
+void mlx5_xstats_init(struct rte_eth_dev *dev);
+int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+void mlx5_stats_reset(struct rte_eth_dev *dev);
+int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
+ unsigned int n);
+void mlx5_xstats_reset(struct rte_eth_dev *dev);
+int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int n);
+
+/* mlx5_vlan.c */
+
+int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
+void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on);
+int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+
+/* mlx5_trigger.c */
+
+int mlx5_dev_start(struct rte_eth_dev *dev);
+void mlx5_dev_stop(struct rte_eth_dev *dev);
+int mlx5_traffic_enable(struct rte_eth_dev *dev);
+void mlx5_traffic_disable(struct rte_eth_dev *dev);
+int mlx5_traffic_restart(struct rte_eth_dev *dev);
+
+/* mlx5_flow.c */
+
+int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
+void mlx5_flow_print(struct rte_flow *flow);
+int mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error);
+void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list);
+int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action, void *data,
+ struct rte_flow_error *error);
+int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable,
+ struct rte_flow_error *error);
+int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list);
+void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
+int mlx5_flow_verify(struct rte_eth_dev *dev);
+int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask,
+ struct rte_flow_item_vlan *vlan_spec,
+ struct rte_flow_item_vlan *vlan_mask);
+int mlx5_ctrl_flow(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask);
+int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev);
+void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
+
+/* mlx5_socket.c */
+
+int mlx5_socket_init(struct rte_eth_dev *priv);
+void mlx5_socket_uninit(struct rte_eth_dev *priv);
+void mlx5_socket_handle(struct rte_eth_dev *priv);
+int mlx5_socket_connect(struct rte_eth_dev *priv);
+
+/* mlx5_nl.c */
+
+int mlx5_nl_init(int protocol);
+int mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index);
+int mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index);
+void mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev);
+void mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev);
+int mlx5_nl_promisc(struct rte_eth_dev *dev, int enable);
+int mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable);
+unsigned int mlx5_nl_ifindex(int nl, const char *name);
+int mlx5_nl_switch_info(int nl, unsigned int ifindex,
+ struct mlx5_switch_info *info);
+
+/* mlx5_nl_flow.c */
+
+int mlx5_nl_flow_transpose(void *buf,
+ size_t size,
+ const struct mlx5_nl_flow_ptoi *ptoi,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+void mlx5_nl_flow_brand(void *buf, uint32_t handle);
+int mlx5_nl_flow_create(struct mnl_socket *nl, void *buf,
+ struct rte_flow_error *error);
+int mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf,
+ struct rte_flow_error *error);
+int mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex,
+ struct rte_flow_error *error);
+struct mnl_socket *mlx5_nl_flow_socket_create(void);
+void mlx5_nl_flow_socket_destroy(struct mnl_socket *nl);
+
+#endif /* RTE_PMD_MLX5_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_defs.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_defs.h
new file mode 100644
index 00000000..f2a16795
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_defs.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_DEFS_H_
+#define RTE_PMD_MLX5_DEFS_H_
+
+#include <rte_ethdev_driver.h>
+
+#include "mlx5_autoconf.h"
+
+/* Reported driver name. */
+#define MLX5_DRIVER_NAME "net_mlx5"
+
+/* Maximum number of simultaneous unicast MAC addresses. */
+#define MLX5_MAX_UC_MAC_ADDRESSES 128
+/* Maximum number of simultaneous Multicast MAC addresses. */
+#define MLX5_MAX_MC_MAC_ADDRESSES 128
+/* Maximum number of simultaneous MAC addresses. */
+#define MLX5_MAX_MAC_ADDRESSES \
+ (MLX5_MAX_UC_MAC_ADDRESSES + MLX5_MAX_MC_MAC_ADDRESSES)
+
+/* Maximum number of simultaneous VLAN filters. */
+#define MLX5_MAX_VLAN_IDS 128
+
+/*
+ * Request TX completion every time descriptors reach this threshold since
+ * the previous request. Must be a power of two for performance reasons.
+ */
+#define MLX5_TX_COMP_THRESH 32
+
+/*
+ * Request TX completion every time the total number of WQEBBs used for inlining
+ * packets exceeds the size of WQ divided by this divisor. Better to be power of
+ * two for performance.
+ */
+#define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3)
+
+/* Size of per-queue MR cache array for linear search. */
+#define MLX5_MR_CACHE_N 8
+
+/* Size of MR cache table for binary search. */
+#define MLX5_MR_BTREE_CACHE_N 256
+
+/*
+ * If defined, only use software counters. The PMD will never ask the hardware
+ * for these, and many of them won't be available.
+ */
+#ifndef MLX5_PMD_SOFT_COUNTERS
+#define MLX5_PMD_SOFT_COUNTERS 1
+#endif
+
+/* Alarm timeout. */
+#define MLX5_ALARM_TIMEOUT_US 100000
+
+/* Maximum number of extended statistics counters. */
+#define MLX5_MAX_XSTATS 32
+
+/* Maximum Packet headers size (L2+L3+L4) for TSO. */
+#define MLX5_MAX_TSO_HEADER 192
+
+/* Default minimum number of Tx queues for vectorized Tx. */
+#define MLX5_VPMD_MIN_TXQS 4
+
+/* Threshold of buffer replenishment for vectorized Rx. */
+#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \
+ (RTE_MIN(MLX5_VPMD_RX_MAX_BURST, (unsigned int)(n) >> 2))
+
+/* Maximum size of burst for vectorized Rx. */
+#define MLX5_VPMD_RX_MAX_BURST 64U
+
+/*
+ * Maximum size of burst for vectorized Tx. This is related to the maximum size
+ * of Enhanced MPW (eMPW) WQE as vectorized Tx is supported with eMPW.
+ * Careful when changing, large value can cause WQE DS to overlap.
+ */
+#define MLX5_VPMD_TX_MAX_BURST 32U
+
+/* Number of packets vectorized Rx can simultaneously process in a loop. */
+#define MLX5_VPMD_DESCS_PER_LOOP 4
+
+/* Supported RSS */
+#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+
+/* Timeout in seconds to get a valid link status. */
+#define MLX5_LINK_STATUS_TIMEOUT 10
+
+/* Reserved address space for UAR mapping. */
+#define MLX5_UAR_SIZE (1ULL << (sizeof(uintptr_t) * 4))
+
+/* Offset of reserved UAR address space to hugepage memory. Offset is used here
+ * to minimize possibility of address next to hugepage being used by other code
+ * in either primary or secondary process, failing to map TX UAR would make TX
+ * packets invisible to HW.
+ */
+#define MLX5_UAR_OFFSET (1ULL << (sizeof(uintptr_t) * 4))
+
+/* Maximum number of UAR pages used by a port,
+ * These are the size and mask for an array of mutexes used to synchronize
+ * the access to port's UARs on platforms that do not support 64 bit writes.
+ * In such systems it is possible to issue the 64 bits DoorBells through two
+ * consecutive writes, each write 32 bits. The access to a UAR page (which can
+ * be accessible by all threads in the process) must be synchronized
+ * (for example, using a semaphore). Such a synchronization is not required
+ * when ringing DoorBells on different UAR pages.
+ * A port with 512 Tx queues uses 8, 4kBytes, UAR pages which are shared
+ * among the ports.
+ */
+#define MLX5_UAR_PAGE_NUM_MAX 64
+#define MLX5_UAR_PAGE_NUM_MASK ((MLX5_UAR_PAGE_NUM_MAX) - 1)
+
+/* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */
+#define MLX5_MPRQ_STRIDE_NUM_N 6U
+
+/* Two-byte shift is disabled for Multi-Packet RQ. */
+#define MLX5_MPRQ_TWO_BYTE_SHIFT 0
+
+/*
+ * Minimum size of packet to be memcpy'd instead of being attached as an
+ * external buffer.
+ */
+#define MLX5_MPRQ_MEMCPY_DEFAULT_LEN 128
+
+/* Minimum number Rx queues to enable Multi-Packet RQ. */
+#define MLX5_MPRQ_MIN_RXQS 12
+
+/* Cache size of mempool for Multi-Packet RQ. */
+#define MLX5_MPRQ_MP_CACHE_SZ 32U
+
+/* Definition of static_assert found in /usr/include/assert.h */
+#ifndef HAVE_STATIC_ASSERT
+#define static_assert _Static_assert
+#endif
+
+#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_ethdev.c
new file mode 100644
index 00000000..34c5b95e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_ethdev.c
@@ -0,0 +1,1372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#define _GNU_SOURCE
+
+#include <stddef.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <dirent.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+#include <fcntl.h>
+#include <stdalign.h>
+#include <sys/un.h>
+#include <time.h>
+
+#include <rte_atomic.h>
+#include <rte_ethdev_driver.h>
+#include <rte_bus_pci.h>
+#include <rte_mbuf.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_rwlock.h>
+
+#include "mlx5.h"
+#include "mlx5_glue.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_utils.h"
+
+/* Supported speed values found in /usr/include/linux/ethtool.h */
+#ifndef HAVE_SUPPORTED_40000baseKR4_Full
+#define SUPPORTED_40000baseKR4_Full (1 << 23)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseCR4_Full
+#define SUPPORTED_40000baseCR4_Full (1 << 24)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseSR4_Full
+#define SUPPORTED_40000baseSR4_Full (1 << 25)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseLR4_Full
+#define SUPPORTED_40000baseLR4_Full (1 << 26)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseKR4_Full
+#define SUPPORTED_56000baseKR4_Full (1 << 27)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseCR4_Full
+#define SUPPORTED_56000baseCR4_Full (1 << 28)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseSR4_Full
+#define SUPPORTED_56000baseSR4_Full (1 << 29)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseLR4_Full
+#define SUPPORTED_56000baseLR4_Full (1 << 30)
+#endif
+
+/* Add defines in case the running kernel is not the same as user headers. */
+#ifndef ETHTOOL_GLINKSETTINGS
+struct ethtool_link_settings {
+ uint32_t cmd;
+ uint32_t speed;
+ uint8_t duplex;
+ uint8_t port;
+ uint8_t phy_address;
+ uint8_t autoneg;
+ uint8_t mdio_support;
+ uint8_t eth_to_mdix;
+ uint8_t eth_tp_mdix_ctrl;
+ int8_t link_mode_masks_nwords;
+ uint32_t reserved[8];
+ uint32_t link_mode_masks[];
+};
+
+#define ETHTOOL_GLINKSETTINGS 0x0000004c
+#define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
+#define ETHTOOL_LINK_MODE_Autoneg_BIT 6
+#define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
+#define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
+#define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
+#define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
+#define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
+#define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
+#define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
+#define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
+#define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
+#define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
+#define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
+#define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
+#define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
+#define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_25G
+#define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
+#define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
+#define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_50G
+#define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
+#define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_100G
+#define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
+#define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
+#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
+#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
+#endif
+
+/**
+ * Get master interface name from private structure.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[out] ifname
+ * Interface name output buffer.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_get_master_ifname(const struct rte_eth_dev *dev,
+ char (*ifname)[IF_NAMESIZE])
+{
+ struct priv *priv = dev->data->dev_private;
+ DIR *dir;
+ struct dirent *dent;
+ unsigned int dev_type = 0;
+ unsigned int dev_port_prev = ~0u;
+ char match[IF_NAMESIZE] = "";
+
+ {
+ MKSTR(path, "%s/device/net", priv->ibdev_path);
+
+ dir = opendir(path);
+ if (dir == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ }
+ while ((dent = readdir(dir)) != NULL) {
+ char *name = dent->d_name;
+ FILE *file;
+ unsigned int dev_port;
+ int r;
+
+ if ((name[0] == '.') &&
+ ((name[1] == '\0') ||
+ ((name[1] == '.') && (name[2] == '\0'))))
+ continue;
+
+ MKSTR(path, "%s/device/net/%s/%s",
+ priv->ibdev_path, name,
+ (dev_type ? "dev_id" : "dev_port"));
+
+ file = fopen(path, "rb");
+ if (file == NULL) {
+ if (errno != ENOENT)
+ continue;
+ /*
+ * Switch to dev_id when dev_port does not exist as
+ * is the case with Linux kernel versions < 3.15.
+ */
+try_dev_id:
+ match[0] = '\0';
+ if (dev_type)
+ break;
+ dev_type = 1;
+ dev_port_prev = ~0u;
+ rewinddir(dir);
+ continue;
+ }
+ r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
+ fclose(file);
+ if (r != 1)
+ continue;
+ /*
+ * Switch to dev_id when dev_port returns the same value for
+ * all ports. May happen when using a MOFED release older than
+ * 3.0 with a Linux kernel >= 3.15.
+ */
+ if (dev_port == dev_port_prev)
+ goto try_dev_id;
+ dev_port_prev = dev_port;
+ if (dev_port == 0)
+ strlcpy(match, name, sizeof(match));
+ }
+ closedir(dir);
+ if (match[0] == '\0') {
+ rte_errno = ENOENT;
+ return -rte_errno;
+ }
+ strncpy(*ifname, match, sizeof(*ifname));
+ return 0;
+}
+
+/**
+ * Get interface name from private structure.
+ *
+ * This is a port representor-aware version of mlx5_get_master_ifname().
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[out] ifname
+ * Interface name output buffer.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int ifindex =
+ priv->nl_socket_rdma >= 0 ?
+ mlx5_nl_ifindex(priv->nl_socket_rdma, priv->ibdev_name) : 0;
+
+ if (!ifindex) {
+ if (!priv->representor)
+ return mlx5_get_master_ifname(dev, ifname);
+ rte_errno = ENXIO;
+ return -rte_errno;
+ }
+ if (if_indextoname(ifindex, &(*ifname)[0]))
+ return 0;
+ rte_errno = errno;
+ return -rte_errno;
+}
+
+/**
+ * Get the interface index from device name.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Nonzero interface index on success, zero otherwise and rte_errno is set.
+ */
+unsigned int
+mlx5_ifindex(const struct rte_eth_dev *dev)
+{
+ char ifname[IF_NAMESIZE];
+ unsigned int ifindex;
+
+ if (mlx5_get_ifname(dev, &ifname))
+ return 0;
+ ifindex = if_nametoindex(ifname);
+ if (!ifindex)
+ rte_errno = errno;
+ return ifindex;
+}
+
+/**
+ * Perform ifreq ioctl() on associated Ethernet device.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param req
+ * Request number to pass to ioctl().
+ * @param[out] ifr
+ * Interface request structure output buffer.
+ * @param master
+ * When device is a port representor, perform request on master device
+ * instead.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr,
+ int master)
+{
+ int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
+ int ret = 0;
+
+ if (sock == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ if (master)
+ ret = mlx5_get_master_ifname(dev, &ifr->ifr_name);
+ else
+ ret = mlx5_get_ifname(dev, &ifr->ifr_name);
+ if (ret)
+ goto error;
+ ret = ioctl(sock, req, ifr);
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ close(sock);
+ return 0;
+error:
+ close(sock);
+ return -rte_errno;
+}
+
+/**
+ * Get device MTU.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] mtu
+ * MTU value output buffer.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
+{
+ struct ifreq request;
+ int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request, 0);
+
+ if (ret)
+ return ret;
+ *mtu = request.ifr_mtu;
+ return 0;
+}
+
+/**
+ * Set device MTU.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mtu
+ * MTU value to set.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct ifreq request = { .ifr_mtu = mtu, };
+
+ return mlx5_ifreq(dev, SIOCSIFMTU, &request, 0);
+}
+
+/**
+ * Set device flags.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param keep
+ * Bitmask for flags that must remain untouched.
+ * @param flags
+ * Bitmask for flags to modify.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
+{
+ struct ifreq request;
+ int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request, 0);
+
+ if (ret)
+ return ret;
+ request.ifr_flags &= keep;
+ request.ifr_flags |= flags & ~keep;
+ return mlx5_ifreq(dev, SIOCSIFFLAGS, &request, 0);
+}
+
+/**
+ * DPDK callback for Ethernet device configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_dev_configure(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int rxqs_n = dev->data->nb_rx_queues;
+ unsigned int txqs_n = dev->data->nb_tx_queues;
+ unsigned int i;
+ unsigned int j;
+ unsigned int reta_idx_n;
+ const uint8_t use_app_rss_key =
+ !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+ int ret = 0;
+
+ if (use_app_rss_key &&
+ (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
+ MLX5_RSS_HASH_KEY_LEN)) {
+ DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long",
+ dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN));
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ priv->rss_conf.rss_key =
+ rte_realloc(priv->rss_conf.rss_key,
+ MLX5_RSS_HASH_KEY_LEN, 0);
+ if (!priv->rss_conf.rss_key) {
+ DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
+ dev->data->port_id, rxqs_n);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ memcpy(priv->rss_conf.rss_key,
+ use_app_rss_key ?
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
+ rss_hash_default_key,
+ MLX5_RSS_HASH_KEY_LEN);
+ priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN;
+ priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ priv->rxqs = (void *)dev->data->rx_queues;
+ priv->txqs = (void *)dev->data->tx_queues;
+ if (txqs_n != priv->txqs_n) {
+ DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u",
+ dev->data->port_id, priv->txqs_n, txqs_n);
+ priv->txqs_n = txqs_n;
+ }
+ if (rxqs_n > priv->config.ind_table_max_size) {
+ DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
+ dev->data->port_id, rxqs_n);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (rxqs_n == priv->rxqs_n)
+ return 0;
+ DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
+ dev->data->port_id, priv->rxqs_n, rxqs_n);
+ priv->rxqs_n = rxqs_n;
+ /* If the requested number of RX queues is not a power of two, use the
+ * maximum indirection table size for better balancing.
+ * The result is always rounded to the next power of two. */
+ reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
+ priv->config.ind_table_max_size :
+ rxqs_n));
+ ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
+ if (ret)
+ return ret;
+ /* When the number of RX queues is not a power of two, the remaining
+ * table entries are padded with reused WQs and hashes are not spread
+ * uniformly. */
+ for (i = 0, j = 0; (i != reta_idx_n); ++i) {
+ (*priv->reta_idx)[i] = j;
+ if (++j == rxqs_n)
+ j = 0;
+ }
+ return 0;
+}
+
+/**
+ * Sets default tuning parameters.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] info
+ * Info structure output buffer.
+ */
+static void
+mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ /* Minimum CPU utilization. */
+ info->default_rxportconf.ring_size = 256;
+ info->default_txportconf.ring_size = 256;
+ info->default_rxportconf.burst_size = 64;
+ info->default_txportconf.burst_size = 64;
+ if (priv->link_speed_capa & ETH_LINK_SPEED_100G) {
+ info->default_rxportconf.nb_queues = 16;
+ info->default_txportconf.nb_queues = 16;
+ if (dev->data->nb_rx_queues > 2 ||
+ dev->data->nb_tx_queues > 2) {
+ /* Max Throughput. */
+ info->default_rxportconf.ring_size = 2048;
+ info->default_txportconf.ring_size = 2048;
+ }
+ } else {
+ info->default_rxportconf.nb_queues = 8;
+ info->default_txportconf.nb_queues = 8;
+ if (dev->data->nb_rx_queues > 2 ||
+ dev->data->nb_tx_queues > 2) {
+ /* Max Throughput. */
+ info->default_rxportconf.ring_size = 4096;
+ info->default_txportconf.ring_size = 4096;
+ }
+ }
+}
+
+/**
+ * DPDK callback to get information about the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] info
+ * Info structure output buffer.
+ */
+void
+mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ unsigned int max;
+ char ifname[IF_NAMESIZE];
+
+ /* FIXME: we should ask the device for these values. */
+ info->min_rx_bufsize = 32;
+ info->max_rx_pktlen = 65536;
+ /*
+ * Since we need one CQ per QP, the limit is the minimum number
+ * between the two values.
+ */
+ max = RTE_MIN(priv->device_attr.orig_attr.max_cq,
+ priv->device_attr.orig_attr.max_qp);
+ /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
+ if (max >= 65535)
+ max = 65535;
+ info->max_rx_queues = max;
+ info->max_tx_queues = max;
+ info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
+ info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
+ info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
+ info->rx_queue_offload_capa);
+ info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
+ if (mlx5_get_ifname(dev, &ifname) == 0)
+ info->if_index = if_nametoindex(ifname);
+ info->reta_size = priv->reta_idx_n ?
+ priv->reta_idx_n : config->ind_table_max_size;
+ info->hash_key_size = MLX5_RSS_HASH_KEY_LEN;
+ info->speed_capa = priv->link_speed_capa;
+ info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
+ mlx5_set_default_params(dev, info);
+ info->switch_info.name = dev->data->name;
+ info->switch_info.domain_id = priv->domain_id;
+ info->switch_info.port_id = priv->representor_id;
+ if (priv->representor) {
+ unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
+ uint16_t port_id[i];
+
+ i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
+ while (i--) {
+ struct priv *opriv =
+ rte_eth_devices[port_id[i]].data->dev_private;
+
+ if (!opriv ||
+ opriv->representor ||
+ opriv->domain_id != priv->domain_id)
+ continue;
+ /*
+ * Override switch name with that of the master
+ * device.
+ */
+ info->switch_info.name = opriv->dev_data->name;
+ break;
+ }
+ }
+}
+
+/**
+ * Get supported packet types.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * A pointer to the supported Packet types array.
+ */
+const uint32_t *
+mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to rxq_cq_to_pkt_type() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == mlx5_rx_burst ||
+ dev->rx_pkt_burst == mlx5_rx_burst_mprq ||
+ dev->rx_pkt_burst == mlx5_rx_burst_vec)
+ return ptypes;
+ return NULL;
+}
+
+/**
+ * DPDK callback to retrieve physical link information.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] link
+ * Storage for current link status.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct ethtool_cmd edata = {
+ .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
+ };
+ struct ifreq ifr;
+ struct rte_eth_link dev_link;
+ int link_speed = 0;
+ int ret;
+
+ ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
+ }
+ memset(&dev_link, 0, sizeof(dev_link));
+ dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
+ (ifr.ifr_flags & IFF_RUNNING));
+ ifr.ifr_data = (void *)&edata;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
+ }
+ link_speed = ethtool_cmd_speed(&edata);
+ if (link_speed == -1)
+ dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ else
+ dev_link.link_speed = link_speed;
+ priv->link_speed_capa = 0;
+ if (edata.supported & SUPPORTED_Autoneg)
+ priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
+ if (edata.supported & (SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseKX_Full))
+ priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+ if (edata.supported & SUPPORTED_10000baseKR_Full)
+ priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+ if (edata.supported & (SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_40000baseCR4_Full |
+ SUPPORTED_40000baseSR4_Full |
+ SUPPORTED_40000baseLR4_Full))
+ priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+ dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
+ ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+ dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ if ((dev_link.link_speed && !dev_link.link_status) ||
+ (!dev_link.link_speed && dev_link.link_status)) {
+ rte_errno = EAGAIN;
+ return -rte_errno;
+ }
+ *link = dev_link;
+ return 0;
+}
+
+/**
+ * Retrieve physical link information (unlocked version using new ioctl).
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] link
+ * Storage for current link status.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+
+{
+ struct priv *priv = dev->data->dev_private;
+ struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
+ struct ifreq ifr;
+ struct rte_eth_link dev_link;
+ uint64_t sc;
+ int ret;
+
+ ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
+ }
+ memset(&dev_link, 0, sizeof(dev_link));
+ dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
+ (ifr.ifr_flags & IFF_RUNNING));
+ ifr.ifr_data = (void *)&gcmd;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(DEBUG,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
+ " failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
+ }
+ gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
+
+ alignas(struct ethtool_link_settings)
+ uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
+ sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3];
+ struct ethtool_link_settings *ecmd = (void *)data;
+
+ *ecmd = gcmd;
+ ifr.ifr_data = (void *)ecmd;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(DEBUG,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
+ " failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
+ }
+ dev_link.link_speed = ecmd->speed;
+ sc = ecmd->link_mode_masks[0] |
+ ((uint64_t)ecmd->link_mode_masks[1] << 32);
+ priv->link_speed_capa = 0;
+ if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT))
+ priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_20G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_56G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_25G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_50G;
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
+ priv->link_speed_capa |= ETH_LINK_SPEED_100G;
+ dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
+ ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+ dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ if ((dev_link.link_speed && !dev_link.link_status) ||
+ (!dev_link.link_speed && dev_link.link_status)) {
+ rte_errno = EAGAIN;
+ return -rte_errno;
+ }
+ *link = dev_link;
+ return 0;
+}
+
+/**
+ * DPDK callback to retrieve physical link information.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param wait_to_complete
+ * Wait for request completion.
+ *
+ * @return
+ * 0 if link status was not updated, positive if it was, a negative errno
+ * value otherwise and rte_errno is set.
+ */
+int
+mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ int ret;
+ struct rte_eth_link dev_link;
+ time_t start_time = time(NULL);
+
+ do {
+ ret = mlx5_link_update_unlocked_gs(dev, &dev_link);
+ if (ret)
+ ret = mlx5_link_update_unlocked_gset(dev, &dev_link);
+ if (ret == 0)
+ break;
+ /* Handle wait to complete situation. */
+ if (wait_to_complete && ret == -EAGAIN) {
+ if (abs((int)difftime(time(NULL), start_time)) <
+ MLX5_LINK_STATUS_TIMEOUT) {
+ usleep(0);
+ continue;
+ } else {
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+ } else if (ret < 0) {
+ return ret;
+ }
+ } while (wait_to_complete);
+ ret = !!memcmp(&dev->data->dev_link, &dev_link,
+ sizeof(struct rte_eth_link));
+ dev->data->dev_link = dev_link;
+ return ret;
+}
+
+/**
+ * DPDK callback to change the MTU.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param in_mtu
+ * New MTU.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint16_t kern_mtu = 0;
+ int ret;
+
+ ret = mlx5_get_mtu(dev, &kern_mtu);
+ if (ret)
+ return ret;
+ /* Set kernel interface MTU first. */
+ ret = mlx5_set_mtu(dev, mtu);
+ if (ret)
+ return ret;
+ ret = mlx5_get_mtu(dev, &kern_mtu);
+ if (ret)
+ return ret;
+ if (kern_mtu == mtu) {
+ priv->mtu = mtu;
+ DRV_LOG(DEBUG, "port %u adapter MTU set to %u",
+ dev->data->port_id, mtu);
+ return 0;
+ }
+ rte_errno = EAGAIN;
+ return -rte_errno;
+}
+
+/**
+ * DPDK callback to get flow control status.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] fc_conf
+ * Flow control output buffer.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct ifreq ifr;
+ struct ethtool_pauseparam ethpause = {
+ .cmd = ETHTOOL_GPAUSEPARAM
+ };
+ int ret;
+
+ ifr.ifr_data = (void *)&ethpause;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
+ " %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
+ }
+ fc_conf->autoneg = ethpause.autoneg;
+ if (ethpause.rx_pause && ethpause.tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (ethpause.rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (ethpause.tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+ return 0;
+}
+
+/**
+ * DPDK callback to modify flow control parameters.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] fc_conf
+ * Flow control parameters.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct ifreq ifr;
+ struct ethtool_pauseparam ethpause = {
+ .cmd = ETHTOOL_SPAUSEPARAM
+ };
+ int ret;
+
+ ifr.ifr_data = (void *)&ethpause;
+ ethpause.autoneg = fc_conf->autoneg;
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_RX_PAUSE))
+ ethpause.rx_pause = 1;
+ else
+ ethpause.rx_pause = 0;
+
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_TX_PAUSE))
+ ethpause.tx_pause = 1;
+ else
+ ethpause.tx_pause = 0;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 0);
+ if (ret) {
+ DRV_LOG(WARNING,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
+ " failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * Get PCI information from struct ibv_device.
+ *
+ * @param device
+ * Pointer to Ethernet device structure.
+ * @param[out] pci_addr
+ * PCI bus address output buffer.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
+ struct rte_pci_addr *pci_addr)
+{
+ FILE *file;
+ char line[32];
+ MKSTR(path, "%s/device/uevent", device->ibdev_path);
+
+ file = fopen(path, "rb");
+ if (file == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ while (fgets(line, sizeof(line), file) == line) {
+ size_t len = strlen(line);
+ int ret;
+
+ /* Truncate long lines. */
+ if (len == (sizeof(line) - 1))
+ while (line[(len - 1)] != '\n') {
+ ret = fgetc(file);
+ if (ret == EOF)
+ break;
+ line[(len - 1)] = ret;
+ }
+ /* Extract information. */
+ if (sscanf(line,
+ "PCI_SLOT_NAME="
+ "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
+ &pci_addr->domain,
+ &pci_addr->bus,
+ &pci_addr->devid,
+ &pci_addr->function) == 4) {
+ ret = 0;
+ break;
+ }
+ }
+ fclose(file);
+ return 0;
+}
+
+/**
+ * Device status handler.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param events
+ * Pointer to event flags holder.
+ *
+ * @return
+ * Events bitmap of callback process which can be called immediately.
+ */
+static uint32_t
+mlx5_dev_status_handler(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct ibv_async_event event;
+ uint32_t ret = 0;
+
+ if (mlx5_link_update(dev, 0) == -EAGAIN) {
+ usleep(0);
+ return 0;
+ }
+ /* Read all message and acknowledge them. */
+ for (;;) {
+ if (mlx5_glue->get_async_event(priv->ctx, &event))
+ break;
+ if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
+ event.event_type == IBV_EVENT_PORT_ERR) &&
+ (dev->data->dev_conf.intr_conf.lsc == 1))
+ ret |= (1 << RTE_ETH_EVENT_INTR_LSC);
+ else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
+ dev->data->dev_conf.intr_conf.rmv == 1)
+ ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
+ else
+ DRV_LOG(DEBUG,
+ "port %u event type %d on not handled",
+ dev->data->port_id, event.event_type);
+ mlx5_glue->ack_async_event(&event);
+ }
+ return ret;
+}
+
+/**
+ * Handle interrupts from the NIC.
+ *
+ * @param[in] intr_handle
+ * Interrupt handler.
+ * @param cb_arg
+ * Callback argument.
+ */
+void
+mlx5_dev_interrupt_handler(void *cb_arg)
+{
+ struct rte_eth_dev *dev = cb_arg;
+ uint32_t events;
+
+ events = mlx5_dev_status_handler(dev);
+ if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL);
+}
+
+/**
+ * Handle interrupts from the socket.
+ *
+ * @param cb_arg
+ * Callback argument.
+ */
+static void
+mlx5_dev_handler_socket(void *cb_arg)
+{
+ struct rte_eth_dev *dev = cb_arg;
+
+ mlx5_socket_handle(dev);
+}
+
+/**
+ * Uninstall interrupt handler.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ if (dev->data->dev_conf.intr_conf.lsc ||
+ dev->data->dev_conf.intr_conf.rmv)
+ rte_intr_callback_unregister(&priv->intr_handle,
+ mlx5_dev_interrupt_handler, dev);
+ if (priv->primary_socket)
+ rte_intr_callback_unregister(&priv->intr_handle_socket,
+ mlx5_dev_handler_socket, dev);
+ priv->intr_handle.fd = 0;
+ priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ priv->intr_handle_socket.fd = 0;
+ priv->intr_handle_socket.type = RTE_INTR_HANDLE_UNKNOWN;
+}
+
+/**
+ * Install interrupt handler.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+ int flags;
+
+ assert(priv->ctx->async_fd > 0);
+ flags = fcntl(priv->ctx->async_fd, F_GETFL);
+ ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
+ if (ret) {
+ DRV_LOG(INFO,
+ "port %u failed to change file descriptor async event"
+ " queue",
+ dev->data->port_id);
+ dev->data->dev_conf.intr_conf.lsc = 0;
+ dev->data->dev_conf.intr_conf.rmv = 0;
+ }
+ if (dev->data->dev_conf.intr_conf.lsc ||
+ dev->data->dev_conf.intr_conf.rmv) {
+ priv->intr_handle.fd = priv->ctx->async_fd;
+ priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
+ rte_intr_callback_register(&priv->intr_handle,
+ mlx5_dev_interrupt_handler, dev);
+ }
+ ret = mlx5_socket_init(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot initialise socket: %s",
+ dev->data->port_id, strerror(rte_errno));
+ else if (priv->primary_socket) {
+ priv->intr_handle_socket.fd = priv->primary_socket;
+ priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
+ rte_intr_callback_register(&priv->intr_handle_socket,
+ mlx5_dev_handler_socket, dev);
+ }
+}
+
+/**
+ * DPDK callback to bring the link DOWN.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_set_link_down(struct rte_eth_dev *dev)
+{
+ return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP);
+}
+
+/**
+ * DPDK callback to bring the link UP.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_set_link_up(struct rte_eth_dev *dev)
+{
+ return mlx5_set_flags(dev, ~IFF_UP, IFF_UP);
+}
+
+/**
+ * Configure the TX function to use.
+ *
+ * @param dev
+ * Pointer to private data structure.
+ *
+ * @return
+ * Pointer to selected Tx burst function.
+ */
+eth_tx_burst_t
+mlx5_select_tx_function(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
+ struct mlx5_dev_config *config = &priv->config;
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO));
+ int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM));
+ int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
+
+ assert(priv != NULL);
+ /* Select appropriate TX function. */
+ if (vlan_insert || tso || swp)
+ return tx_pkt_burst;
+ if (config->mps == MLX5_MPW_ENHANCED) {
+ if (mlx5_check_vec_tx_support(dev) > 0) {
+ if (mlx5_check_raw_vec_tx_support(dev) > 0)
+ tx_pkt_burst = mlx5_tx_burst_raw_vec;
+ else
+ tx_pkt_burst = mlx5_tx_burst_vec;
+ DRV_LOG(DEBUG,
+ "port %u selected enhanced MPW Tx vectorized"
+ " function",
+ dev->data->port_id);
+ } else {
+ tx_pkt_burst = mlx5_tx_burst_empw;
+ DRV_LOG(DEBUG,
+ "port %u selected enhanced MPW Tx function",
+ dev->data->port_id);
+ }
+ } else if (config->mps && (config->txq_inline > 0)) {
+ tx_pkt_burst = mlx5_tx_burst_mpw_inline;
+ DRV_LOG(DEBUG, "port %u selected MPW inline Tx function",
+ dev->data->port_id);
+ } else if (config->mps) {
+ tx_pkt_burst = mlx5_tx_burst_mpw;
+ DRV_LOG(DEBUG, "port %u selected MPW Tx function",
+ dev->data->port_id);
+ }
+ return tx_pkt_burst;
+}
+
+/**
+ * Configure the RX function to use.
+ *
+ * @param dev
+ * Pointer to private data structure.
+ *
+ * @return
+ * Pointer to selected Rx burst function.
+ */
+eth_rx_burst_t
+mlx5_select_rx_function(struct rte_eth_dev *dev)
+{
+ eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
+
+ assert(dev != NULL);
+ if (mlx5_check_vec_rx_support(dev) > 0) {
+ rx_pkt_burst = mlx5_rx_burst_vec;
+ DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
+ dev->data->port_id);
+ } else if (mlx5_mprq_enabled(dev)) {
+ rx_pkt_burst = mlx5_rx_burst_mprq;
+ }
+ return rx_pkt_burst;
+}
+
+/**
+ * Check if mlx5 device was removed.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 1 when device is removed, otherwise 0.
+ */
+int
+mlx5_is_removed(struct rte_eth_dev *dev)
+{
+ struct ibv_device_attr device_attr;
+ struct priv *priv = dev->data->dev_private;
+
+ if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO)
+ return 1;
+ return 0;
+}
+
+/**
+ * Get port ID list of mlx5 instances sharing a common device.
+ *
+ * @param[in] dev
+ * Device to look for.
+ * @param[out] port_list
+ * Result buffer for collected port IDs.
+ * @param port_list_n
+ * Maximum number of entries in result buffer. If 0, @p port_list can be
+ * NULL.
+ *
+ * @return
+ * Number of matching instances regardless of the @p port_list_n
+ * parameter, 0 if none were found.
+ */
+unsigned int
+mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list,
+ unsigned int port_list_n)
+{
+ uint16_t id;
+ unsigned int n = 0;
+
+ RTE_ETH_FOREACH_DEV(id) {
+ struct rte_eth_dev *ldev = &rte_eth_devices[id];
+
+ if (!ldev->device ||
+ !ldev->device->driver ||
+ strcmp(ldev->device->driver->name, MLX5_DRIVER_NAME) ||
+ ldev->device != dev)
+ continue;
+ if (n < port_list_n)
+ port_list[n] = id;
+ n++;
+ }
+ return n;
+}
+
+/**
+ * Get switch information associated with network interface.
+ *
+ * @param ifindex
+ * Network interface index.
+ * @param[out] info
+ * Switch information object, populated in case of success.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info)
+{
+ char ifname[IF_NAMESIZE];
+ FILE *file;
+ struct mlx5_switch_info data = { .master = 0, };
+ bool port_name_set = false;
+ bool port_switch_id_set = false;
+ char c;
+
+ if (!if_indextoname(ifindex, ifname)) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+
+ MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name",
+ ifname);
+ MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id",
+ ifname);
+
+ file = fopen(phys_port_name, "rb");
+ if (file != NULL) {
+ port_name_set =
+ fscanf(file, "%d%c", &data.port_name, &c) == 2 &&
+ c == '\n';
+ fclose(file);
+ }
+ file = fopen(phys_switch_id, "rb");
+ if (file == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ port_switch_id_set =
+ fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 &&
+ c == '\n';
+ fclose(file);
+ data.master = port_switch_id_set && !port_name_set;
+ data.representor = port_switch_id_set && port_name_set;
+ *info = data;
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.c
new file mode 100644
index 00000000..ca4625b6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.c
@@ -0,0 +1,3848 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox Technologies, Ltd
+ */
+
+#include <sys/queue.h>
+#include <stdalign.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_common.h>
+#include <rte_ether.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ethdev_driver.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+
+#include "mlx5.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+#include "mlx5_glue.h"
+
+/* Dev ops structure defined in mlx5.c */
+extern const struct eth_dev_ops mlx5_dev_ops;
+extern const struct eth_dev_ops mlx5_dev_ops_isolate;
+
+/* Pattern outer Layer bits. */
+#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
+#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
+#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
+#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
+#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
+#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
+
+/* Pattern inner Layer bits. */
+#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
+#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
+#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
+#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
+#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
+#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
+
+/* Pattern tunnel Layer bits. */
+#define MLX5_FLOW_LAYER_VXLAN (1u << 12)
+#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
+#define MLX5_FLOW_LAYER_GRE (1u << 14)
+#define MLX5_FLOW_LAYER_MPLS (1u << 15)
+
+/* Outer Masks. */
+#define MLX5_FLOW_LAYER_OUTER_L3 \
+ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
+#define MLX5_FLOW_LAYER_OUTER_L4 \
+ (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
+#define MLX5_FLOW_LAYER_OUTER \
+ (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
+ MLX5_FLOW_LAYER_OUTER_L4)
+
+/* Tunnel Masks. */
+#define MLX5_FLOW_LAYER_TUNNEL \
+ (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
+
+/* Inner Masks. */
+#define MLX5_FLOW_LAYER_INNER_L3 \
+ (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
+#define MLX5_FLOW_LAYER_INNER_L4 \
+ (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
+#define MLX5_FLOW_LAYER_INNER \
+ (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
+ MLX5_FLOW_LAYER_INNER_L4)
+
+/* Actions that modify the fate of matching traffic. */
+#define MLX5_FLOW_FATE_DROP (1u << 0)
+#define MLX5_FLOW_FATE_QUEUE (1u << 1)
+#define MLX5_FLOW_FATE_RSS (1u << 2)
+
+/* Modify a packet. */
+#define MLX5_FLOW_MOD_FLAG (1u << 0)
+#define MLX5_FLOW_MOD_MARK (1u << 1)
+#define MLX5_FLOW_MOD_COUNT (1u << 2)
+
+/* possible L3 layers protocols filtering. */
+#define MLX5_IP_PROTOCOL_TCP 6
+#define MLX5_IP_PROTOCOL_UDP 17
+#define MLX5_IP_PROTOCOL_GRE 47
+#define MLX5_IP_PROTOCOL_MPLS 147
+
+/* Priority reserved for default flows. */
+#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
+
+enum mlx5_expansion {
+ MLX5_EXPANSION_ROOT,
+ MLX5_EXPANSION_ROOT_OUTER,
+ MLX5_EXPANSION_ROOT_ETH_VLAN,
+ MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
+ MLX5_EXPANSION_OUTER_ETH,
+ MLX5_EXPANSION_OUTER_ETH_VLAN,
+ MLX5_EXPANSION_OUTER_VLAN,
+ MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV4_UDP,
+ MLX5_EXPANSION_OUTER_IPV4_TCP,
+ MLX5_EXPANSION_OUTER_IPV6,
+ MLX5_EXPANSION_OUTER_IPV6_UDP,
+ MLX5_EXPANSION_OUTER_IPV6_TCP,
+ MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_VXLAN_GPE,
+ MLX5_EXPANSION_GRE,
+ MLX5_EXPANSION_MPLS,
+ MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_ETH_VLAN,
+ MLX5_EXPANSION_VLAN,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV4_UDP,
+ MLX5_EXPANSION_IPV4_TCP,
+ MLX5_EXPANSION_IPV6,
+ MLX5_EXPANSION_IPV6_UDP,
+ MLX5_EXPANSION_IPV6_TCP,
+};
+
+/** Supported expansion of items. */
+static const struct rte_flow_expand_node mlx5_support_expansion[] = {
+ [MLX5_EXPANSION_ROOT] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ [MLX5_EXPANSION_ROOT_OUTER] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
+ MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ [MLX5_EXPANSION_OUTER_ETH] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV6,
+ MLX5_EXPANSION_MPLS),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .rss_types = 0,
+ },
+ [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .rss_types = 0,
+ },
+ [MLX5_EXPANSION_OUTER_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ },
+ [MLX5_EXPANSION_OUTER_IPV4] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT
+ (MLX5_EXPANSION_OUTER_IPV4_UDP,
+ MLX5_EXPANSION_OUTER_IPV4_TCP,
+ MLX5_EXPANSION_GRE),
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER,
+ },
+ [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_VXLAN_GPE),
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+ },
+ [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+ },
+ [MLX5_EXPANSION_OUTER_IPV6] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT
+ (MLX5_EXPANSION_OUTER_IPV6_UDP,
+ MLX5_EXPANSION_OUTER_IPV6_TCP),
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER,
+ },
+ [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_VXLAN_GPE),
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+ },
+ [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+ },
+ [MLX5_EXPANSION_VXLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+ },
+ [MLX5_EXPANSION_VXLAN_GPE] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+ },
+ [MLX5_EXPANSION_GRE] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
+ .type = RTE_FLOW_ITEM_TYPE_GRE,
+ },
+ [MLX5_EXPANSION_MPLS] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_MPLS,
+ },
+ [MLX5_EXPANSION_ETH] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ },
+ [MLX5_EXPANSION_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ },
+ [MLX5_EXPANSION_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ },
+ [MLX5_EXPANSION_IPV4] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
+ MLX5_EXPANSION_IPV4_TCP),
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER,
+ },
+ [MLX5_EXPANSION_IPV4_UDP] = {
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+ },
+ [MLX5_EXPANSION_IPV4_TCP] = {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+ },
+ [MLX5_EXPANSION_IPV6] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
+ MLX5_EXPANSION_IPV6_TCP),
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER,
+ },
+ [MLX5_EXPANSION_IPV6_UDP] = {
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+ },
+ [MLX5_EXPANSION_IPV6_TCP] = {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+ },
+};
+
+/** Handles information leading to a drop fate. */
+struct mlx5_flow_verbs {
+ LIST_ENTRY(mlx5_flow_verbs) next;
+ unsigned int size; /**< Size of the attribute. */
+ struct {
+ struct ibv_flow_attr *attr;
+ /**< Pointer to the Specification buffer. */
+ uint8_t *specs; /**< Pointer to the specifications. */
+ };
+ struct ibv_flow *flow; /**< Verbs flow pointer. */
+ struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+ uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+};
+
+/* Counters information. */
+struct mlx5_flow_counter {
+ LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */
+ uint32_t shared:1; /**< Share counter ID with other flow rules. */
+ uint32_t ref_cnt:31; /**< Reference counter. */
+ uint32_t id; /**< Counter ID. */
+ struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
+ uint64_t hits; /**< Number of packets matched by the rule. */
+ uint64_t bytes; /**< Number of bytes matched by the rule. */
+};
+
+/* Flow structure. */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+ struct rte_flow_attr attributes; /**< User flow attribute. */
+ uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */
+ uint32_t layers;
+ /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
+ uint32_t modifier;
+ /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */
+ uint32_t fate;
+ /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
+ uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */
+ LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */
+ struct mlx5_flow_verbs *cur_verbs;
+ /**< Current Verbs flow structure being filled. */
+ struct mlx5_flow_counter *counter; /**< Holds Verbs flow counter. */
+ struct rte_flow_action_rss rss;/**< RSS context. */
+ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+ uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
+ void *nl_flow; /**< Netlink flow buffer if relevant. */
+};
+
+static const struct rte_flow_ops mlx5_flow_ops = {
+ .validate = mlx5_flow_validate,
+ .create = mlx5_flow_create,
+ .destroy = mlx5_flow_destroy,
+ .flush = mlx5_flow_flush,
+ .isolate = mlx5_flow_isolate,
+ .query = mlx5_flow_query,
+};
+
+/* Convert FDIR request to Generic flow. */
+struct mlx5_fdir {
+ struct rte_flow_attr attr;
+ struct rte_flow_action actions[2];
+ struct rte_flow_item items[4];
+ struct rte_flow_item_eth l2;
+ struct rte_flow_item_eth l2_mask;
+ union {
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ } l3;
+ union {
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ } l3_mask;
+ union {
+ struct rte_flow_item_udp udp;
+ struct rte_flow_item_tcp tcp;
+ } l4;
+ union {
+ struct rte_flow_item_udp udp;
+ struct rte_flow_item_tcp tcp;
+ } l4_mask;
+ struct rte_flow_action_queue queue;
+};
+
+/* Verbs specification header. */
+struct ibv_spec_header {
+ enum ibv_flow_spec_type type;
+ uint16_t size;
+};
+
+/*
+ * Number of sub priorities.
+ * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
+ * matching on the NIC (firmware dependent) L4 most have the higher priority
+ * followed by L3 and ending with L2.
+ */
+#define MLX5_PRIORITY_MAP_L2 2
+#define MLX5_PRIORITY_MAP_L3 1
+#define MLX5_PRIORITY_MAP_L4 0
+#define MLX5_PRIORITY_MAP_MAX 3
+
+/* Map of Verbs to Flow priority with 8 Verbs priorities. */
+static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
+ { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
+};
+
+/* Map of Verbs to Flow priority with 16 Verbs priorities. */
+static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
+ { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
+ { 9, 10, 11 }, { 12, 13, 14 },
+};
+
+/* Tunnel information. */
+struct mlx5_flow_tunnel_info {
+ uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
+ uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
+};
+
+static struct mlx5_flow_tunnel_info tunnels_info[] = {
+ {
+ .tunnel = MLX5_FLOW_LAYER_VXLAN,
+ .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
+ .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_GRE,
+ .ptype = RTE_PTYPE_TUNNEL_GRE,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
+ .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_MPLS,
+ .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
+ },
+};
+
+/**
+ * Discover the maximum number of priority available.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * number of supported flow priority on success, a negative errno
+ * value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
+{
+ struct {
+ struct ibv_flow_attr attr;
+ struct ibv_flow_spec_eth eth;
+ struct ibv_flow_spec_action_drop drop;
+ } flow_attr = {
+ .attr = {
+ .num_of_specs = 2,
+ },
+ .eth = {
+ .type = IBV_FLOW_SPEC_ETH,
+ .size = sizeof(struct ibv_flow_spec_eth),
+ },
+ .drop = {
+ .size = sizeof(struct ibv_flow_spec_action_drop),
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ },
+ };
+ struct ibv_flow *flow;
+ struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
+ uint16_t vprio[] = { 8, 16 };
+ int i;
+ int priority = 0;
+
+ if (!drop) {
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ for (i = 0; i != RTE_DIM(vprio); i++) {
+ flow_attr.attr.priority = vprio[i] - 1;
+ flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
+ if (!flow)
+ break;
+ claim_zero(mlx5_glue->destroy_flow(flow));
+ priority = vprio[i];
+ }
+ switch (priority) {
+ case 8:
+ priority = RTE_DIM(priority_map_3);
+ break;
+ case 16:
+ priority = RTE_DIM(priority_map_5);
+ break;
+ default:
+ rte_errno = ENOTSUP;
+ DRV_LOG(ERR,
+ "port %u verbs maximum priority: %d expected 8/16",
+ dev->data->port_id, vprio[i]);
+ return -rte_errno;
+ }
+ mlx5_hrxq_drop_release(dev);
+ DRV_LOG(INFO, "port %u flow maximum priority: %d",
+ dev->data->port_id, priority);
+ return priority;
+}
+
+/**
+ * Adjust flow priority.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param flow
+ * Pointer to an rte flow.
+ */
+static void
+mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint32_t priority = flow->attributes.priority;
+ uint32_t subpriority = flow->cur_verbs->attr->priority;
+
+ switch (priv->config.flow_prio) {
+ case RTE_DIM(priority_map_3):
+ priority = priority_map_3[priority][subpriority];
+ break;
+ case RTE_DIM(priority_map_5):
+ priority = priority_map_5[priority][subpriority];
+ break;
+ }
+ flow->cur_verbs->attr->priority = priority;
+}
+
+/**
+ * Get a flow counter.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] shared
+ * Indicate if this counter is shared with other flows.
+ * @param[in] id
+ * Counter identifier.
+ *
+ * @return
+ * A pointer to the counter, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_counter *
+mlx5_flow_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter *cnt;
+
+ LIST_FOREACH(cnt, &priv->flow_counters, next) {
+ if (!cnt->shared || cnt->shared != shared)
+ continue;
+ if (cnt->id != id)
+ continue;
+ cnt->ref_cnt++;
+ return cnt;
+ }
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+
+ struct mlx5_flow_counter tmpl = {
+ .shared = shared,
+ .id = id,
+ .cs = mlx5_glue->create_counter_set
+ (priv->ctx,
+ &(struct ibv_counter_set_init_attr){
+ .counter_set_id = id,
+ }),
+ .hits = 0,
+ .bytes = 0,
+ };
+
+ if (!tmpl.cs) {
+ rte_errno = errno;
+ return NULL;
+ }
+ cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
+ if (!cnt) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ *cnt = tmpl;
+ LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
+ return cnt;
+#endif
+ rte_errno = ENOTSUP;
+ return NULL;
+}
+
+/**
+ * Release a flow counter.
+ *
+ * @param[in] counter
+ * Pointer to the counter handler.
+ */
+static void
+mlx5_flow_counter_release(struct mlx5_flow_counter *counter)
+{
+ if (--counter->ref_cnt == 0) {
+ claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
+ LIST_REMOVE(counter, next);
+ rte_free(counter);
+ }
+}
+
+/**
+ * Verify the @p attributes will be correctly understood by the NIC and store
+ * them in the @p flow if everything is correct.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] attributes
+ * Pointer to flow attributes
+ * @param[in, out] flow
+ * Pointer to the rte_flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_attributes(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attributes,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ uint32_t priority_max =
+ ((struct priv *)dev->data->dev_private)->config.flow_prio - 1;
+
+ if (attributes->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "groups is not supported");
+ if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ attributes->priority >= priority_max)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priority out of range");
+ if (attributes->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ if (attributes->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ if (!attributes->ingress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "ingress attribute is mandatory");
+ flow->attributes = *attributes;
+ if (attributes->priority == MLX5_FLOW_PRIO_RSVD)
+ flow->attributes.priority = priority_max;
+ return 0;
+}
+
+/**
+ * Verify the @p item specifications (spec, last, mask) are compatible with the
+ * NIC capabilities.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] mask
+ * @p item->mask or flow default bit-masks.
+ * @param[in] nic_mask
+ * Bit-masks covering supported fields by the NIC to compare with user mask.
+ * @param[in] size
+ * Bit-masks size in bytes.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_item_acceptable(const struct rte_flow_item *item,
+ const uint8_t *mask,
+ const uint8_t *nic_mask,
+ unsigned int size,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+
+ assert(nic_mask);
+ for (i = 0; i < size; ++i)
+ if ((nic_mask[i] | mask[i]) != nic_mask[i])
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "mask enables non supported"
+ " bits");
+ if (!item->spec && (item->mask || item->last))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "mask/last without a spec is not"
+ " supported");
+ if (item->spec && item->last) {
+ uint8_t spec[size];
+ uint8_t last[size];
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < size; ++i) {
+ spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
+ last[i] = ((const uint8_t *)item->last)[i] & mask[i];
+ }
+ ret = memcmp(spec, last, size);
+ if (ret != 0)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "range is not supported");
+ }
+ return 0;
+}
+
+/**
+ * Add a verbs item specification into @p flow.
+ *
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] src
+ * Create specification.
+ * @param[in] size
+ * Size in bytes of the specification to copy.
+ */
+static void
+mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
+{
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+
+ if (verbs->specs) {
+ void *dst;
+
+ dst = (void *)(verbs->specs + verbs->size);
+ memcpy(dst, src, size);
+ ++verbs->attr->num_of_specs;
+ }
+ verbs->size += size;
+}
+
+/**
+ * Adjust verbs hash fields according to the @p flow information.
+ *
+ * @param[in, out] flow.
+ * Pointer to flow structure.
+ * @param[in] tunnel
+ * 1 when the hash field is for a tunnel item.
+ * @param[in] layer_types
+ * ETH_RSS_* types.
+ * @param[in] hash_fields
+ * Item hash fields.
+ */
+static void
+mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,
+ int tunnel __rte_unused,
+ uint32_t layer_types, uint64_t hash_fields)
+{
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
+ if (flow->rss.level == 2 && !tunnel)
+ hash_fields = 0;
+ else if (flow->rss.level < 2 && tunnel)
+ hash_fields = 0;
+#endif
+ if (!(flow->rss.types & layer_types))
+ hash_fields = 0;
+ flow->cur_verbs->hash_fields |= hash_fields;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ const struct rte_flow_item_eth nic_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = RTE_BE16(0xffff),
+ };
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const unsigned int size = sizeof(struct ibv_flow_spec_eth);
+ struct ibv_flow_spec_eth eth = {
+ .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L2 layers already configured");
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_eth),
+ error);
+ if (ret)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ if (size > flow_size)
+ return size;
+ if (spec) {
+ unsigned int i;
+
+ memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ eth.val.ether_type = spec->type;
+ memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ eth.mask.ether_type = mask->type;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
+ eth.val.src_mac[i] &= eth.mask.src_mac[i];
+ }
+ eth.val.ether_type &= eth.mask.ether_type;
+ }
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ mlx5_flow_spec_verbs_add(flow, &eth, size);
+ return size;
+}
+
+/**
+ * Update the VLAN tag in the Verbs Ethernet specification.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] eth
+ * Verbs structure containing the VLAN information to copy.
+ */
+static void
+mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
+ struct ibv_flow_spec_eth *eth)
+{
+ unsigned int i;
+ const enum ibv_flow_spec_type search = eth->type;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ struct ibv_flow_spec_eth *e =
+ (struct ibv_flow_spec_eth *)hdr;
+
+ e->val.vlan_tag = eth->val.vlan_tag;
+ e->mask.vlan_tag = eth->mask.vlan_tag;
+ e->val.ether_type = eth->val.ether_type;
+ e->mask.ether_type = eth->mask.ether_type;
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p item into @p flow (or by updating the already present
+ * Ethernet Verbs) specification after ensuring the NIC will understand and
+ * process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ const struct rte_flow_item_vlan nic_mask = {
+ .tci = RTE_BE16(0x0fff),
+ .inner_type = RTE_BE16(0xffff),
+ };
+ unsigned int size = sizeof(struct ibv_flow_spec_eth);
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ struct ibv_flow_spec_eth eth = {
+ .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+ const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+ MLX5_FLOW_LAYER_INNER_L4) :
+ (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);
+ const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+ const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+
+ if (flow->layers & vlanm)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN layer already configured");
+ else if ((flow->layers & l34m) != 0)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L2 layer cannot follow L3/L4 layer");
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_vlan), error);
+ if (ret)
+ return ret;
+ if (spec) {
+ eth.val.vlan_tag = spec->tci;
+ eth.mask.vlan_tag = mask->tci;
+ eth.val.vlan_tag &= eth.mask.vlan_tag;
+ eth.val.ether_type = spec->inner_type;
+ eth.mask.ether_type = mask->inner_type;
+ eth.val.ether_type &= eth.mask.ether_type;
+ }
+ /*
+ * From verbs perspective an empty VLAN is equivalent
+ * to a packet without VLAN layer.
+ */
+ if (!eth.mask.vlan_tag)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ item->spec,
+ "VLAN cannot be empty");
+ if (!(flow->layers & l2m)) {
+ if (size <= flow_size) {
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ mlx5_flow_spec_verbs_add(flow, &eth, size);
+ }
+ } else {
+ if (flow->cur_verbs)
+ mlx5_flow_item_vlan_update(flow->cur_verbs->attr,
+ &eth);
+ size = 0; /* Only an update is done in eth specification. */
+ }
+ flow->layers |= tunnel ?
+ (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
+ (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ const struct rte_flow_item_ipv4 nic_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .next_proto_id = 0xff,
+ },
+ };
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
+ struct ibv_flow_spec_ipv4_ext ipv4 = {
+ .type = IBV_FLOW_SPEC_IPV4_EXT |
+ (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "multiple L3 layers not supported");
+ else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 cannot follow an L4 layer.");
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (spec) {
+ ipv4.val = (struct ibv_flow_ipv4_ext_filter){
+ .src_ip = spec->hdr.src_addr,
+ .dst_ip = spec->hdr.dst_addr,
+ .proto = spec->hdr.next_proto_id,
+ .tos = spec->hdr.type_of_service,
+ };
+ ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
+ .src_ip = mask->hdr.src_addr,
+ .dst_ip = mask->hdr.dst_addr,
+ .proto = mask->hdr.next_proto_id,
+ .tos = mask->hdr.type_of_service,
+ };
+ /* Remove unwanted bits from values. */
+ ipv4.val.src_ip &= ipv4.mask.src_ip;
+ ipv4.val.dst_ip &= ipv4.mask.dst_ip;
+ ipv4.val.proto &= ipv4.mask.proto;
+ ipv4.val.tos &= ipv4.mask.tos;
+ }
+ flow->l3_protocol_en = !!ipv4.mask.proto;
+ flow->l3_protocol = ipv4.val.proto;
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust
+ (flow, tunnel,
+ (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER),
+ (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
+ mlx5_flow_spec_verbs_add(flow, &ipv4, size);
+ }
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ const struct rte_flow_item_ipv6 nic_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xffffffff),
+ .proto = 0xff,
+ .hop_limits = 0xff,
+ },
+ };
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
+ struct ibv_flow_spec_ipv6 ipv6 = {
+ .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "multiple L3 layers not supported");
+ else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 cannot follow an L4 layer.");
+ /*
+ * IPv6 is not recognised by the NIC inside a GRE tunnel.
+ * Such support has to be disabled as the rule will be
+ * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
+ * Mellanox OFED 4.4-1.0.0.0.
+ */
+ if (tunnel && flow->layers & MLX5_FLOW_LAYER_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "IPv6 inside a GRE tunnel is"
+ " not recognised.");
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv6), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (spec) {
+ unsigned int i;
+ uint32_t vtc_flow_val;
+ uint32_t vtc_flow_mask;
+
+ memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
+ RTE_DIM(ipv6.val.src_ip));
+ memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
+ RTE_DIM(ipv6.val.dst_ip));
+ memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
+ RTE_DIM(ipv6.mask.src_ip));
+ memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
+ RTE_DIM(ipv6.mask.dst_ip));
+ vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
+ vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
+ ipv6.val.flow_label =
+ rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
+ IPV6_HDR_FL_SHIFT);
+ ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
+ IPV6_HDR_TC_SHIFT;
+ ipv6.val.next_hdr = spec->hdr.proto;
+ ipv6.val.hop_limit = spec->hdr.hop_limits;
+ ipv6.mask.flow_label =
+ rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
+ IPV6_HDR_FL_SHIFT);
+ ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
+ IPV6_HDR_TC_SHIFT;
+ ipv6.mask.next_hdr = mask->hdr.proto;
+ ipv6.mask.hop_limit = mask->hdr.hop_limits;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
+ ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
+ ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
+ }
+ ipv6.val.flow_label &= ipv6.mask.flow_label;
+ ipv6.val.traffic_class &= ipv6.mask.traffic_class;
+ ipv6.val.next_hdr &= ipv6.mask.next_hdr;
+ ipv6.val.hop_limit &= ipv6.mask.hop_limit;
+ }
+ flow->l3_protocol_en = !!ipv6.mask.next_hdr;
+ flow->l3_protocol = ipv6.val.next_hdr;
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust
+ (flow, tunnel,
+ (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER),
+ (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
+ mlx5_flow_spec_verbs_add(flow, &ipv6, size);
+ }
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp udp = {
+ .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with UDP layer");
+ if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 is mandatory to filter"
+ " on L4");
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L4 layer is already"
+ " present");
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_udp_mask,
+ sizeof(struct rte_flow_item_udp), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ if (spec) {
+ udp.val.dst_port = spec->hdr.dst_port;
+ udp.val.src_port = spec->hdr.src_port;
+ udp.mask.dst_port = mask->hdr.dst_port;
+ udp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ udp.val.src_port &= udp.mask.src_port;
+ udp.val.dst_port &= udp.mask.dst_port;
+ }
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,
+ (IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
+ mlx5_flow_spec_verbs_add(flow, &udp, size);
+ }
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp tcp = {
+ .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with TCP layer");
+ if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 is mandatory to filter on L4");
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L4 layer is already present");
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_tcp_mask,
+ sizeof(struct rte_flow_item_tcp), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ if (spec) {
+ tcp.val.dst_port = spec->hdr.dst_port;
+ tcp.val.src_port = spec->hdr.src_port;
+ tcp.mask.dst_port = mask->hdr.dst_port;
+ tcp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ tcp.val.src_port &= tcp.mask.src_port;
+ tcp.val.dst_port &= tcp.mask.dst_port;
+ }
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,
+ (IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
+ mlx5_flow_spec_verbs_add(flow, &tcp, size);
+ }
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ int ret;
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan), error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan.mask.tunnel_id = id.vlan_id;
+ /* Remove unwanted bits from values. */
+ vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+ }
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
+ * only this layer is defined in the Verbs specification it is
+ * interpreted as wildcard and all packets will match this
+ * rule, if it follows a full stack layer (ex: eth / ipv4 /
+ * udp), all packets matching the layers before will also
+ * match this rule. To avoid such situation, VNI 0 is
+ * currently refused.
+ */
+ if (!vxlan.val.tunnel_id)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN vni cannot be 0");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN tunnel must be fully defined");
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &vxlan, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_VXLAN;
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vxlan_gpe *spec = item->spec;
+ const struct rte_flow_item_vxlan_gpe *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan_gpe = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ int ret;
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+
+ if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 VXLAN is not enabled by device"
+ " parameter and/or not configured in"
+ " firmware");
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_gpe_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
+ sizeof(struct rte_flow_item_vxlan_gpe), error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan_gpe.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan_gpe.mask.tunnel_id = id.vlan_id;
+ if (spec->protocol)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VxLAN-GPE protocol not supported");
+ /* Remove unwanted bits from values. */
+ vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
+ }
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
+ * layer is defined in the Verbs specification it is interpreted as
+ * wildcard and all packets will match this rule, if it follows a full
+ * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
+ * before will also match this rule. To avoid such situation, VNI 0
+ * is currently refused.
+ */
+ if (!vxlan_gpe.val.tunnel_id)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN-GPE vni cannot be 0");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN-GPE tunnel must be fully"
+ " defined");
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ return size;
+}
+
+/**
+ * Update the protocol in Verbs IPv4/IPv6 spec.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] search
+ * Specification type to search in order to update the IP protocol.
+ * @param[in] protocol
+ * Protocol value to set if none is present in the specification.
+ */
+static void
+mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
+ enum ibv_flow_spec_type search,
+ uint8_t protocol)
+{
+ unsigned int i;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ if (!attr)
+ return;
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ union {
+ struct ibv_flow_spec_ipv4_ext *ipv4;
+ struct ibv_flow_spec_ipv6 *ipv6;
+ } ip;
+
+ switch (search) {
+ case IBV_FLOW_SPEC_IPV4_EXT:
+ ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
+ if (!ip.ipv4->val.proto) {
+ ip.ipv4->val.proto = protocol;
+ ip.ipv4->mask.proto = 0xff;
+ }
+ break;
+ case IBV_FLOW_SPEC_IPV6:
+ ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
+ if (!ip.ipv6->val.next_hdr) {
+ ip.ipv6->val.next_hdr = protocol;
+ ip.ipv6->mask.next_hdr = 0xff;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * It will also update the previous L3 layer with the protocol value matching
+ * the GRE.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_gre(const struct rte_flow_item *item,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+ const struct rte_flow_item_gre *spec = item->spec;
+ const struct rte_flow_item_gre *mask = item->mask;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ unsigned int size = sizeof(struct ibv_flow_spec_gre);
+ struct ibv_flow_spec_gre tunnel = {
+ .type = IBV_FLOW_SPEC_GRE,
+ .size = size,
+ };
+#else
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel tunnel = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+#endif
+ int ret;
+
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with this GRE layer");
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 Layer is missing");
+ if (!mask)
+ mask = &rte_flow_item_gre_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_gre_mask,
+ sizeof(struct rte_flow_item_gre), error);
+ if (ret < 0)
+ return ret;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ if (spec) {
+ tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+ tunnel.val.protocol = spec->protocol;
+ tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+ tunnel.mask.protocol = mask->protocol;
+ /* Remove unwanted bits from values. */
+ tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+ tunnel.val.protocol &= tunnel.mask.protocol;
+ tunnel.val.key &= tunnel.mask.key;
+ }
+#else
+ if (spec && (spec->protocol & mask->protocol))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "without MPLS support the"
+ " specification cannot be used for"
+ " filtering");
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+ if (size <= flow_size) {
+ if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+ mlx5_flow_item_gre_ip_protocol_update
+ (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT,
+ MLX5_IP_PROTOCOL_GRE);
+ else
+ mlx5_flow_item_gre_ip_protocol_update
+ (verbs->attr, IBV_FLOW_SPEC_IPV6,
+ MLX5_IP_PROTOCOL_GRE);
+ mlx5_flow_spec_verbs_add(flow, &tunnel, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_GRE;
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const size_t flow_size __rte_unused,
+ struct rte_flow_error *error)
+{
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ const struct rte_flow_item_mpls *spec = item->spec;
+ const struct rte_flow_item_mpls *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_mpls);
+ struct ibv_flow_spec_mpls mpls = {
+ .type = IBV_FLOW_SPEC_MPLS,
+ .size = size,
+ };
+ int ret;
+
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_MPLS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with MPLS layer");
+ /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL &&
+ (flow->layers & MLX5_FLOW_LAYER_GRE) != MLX5_FLOW_LAYER_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already"
+ " present");
+ if (!mask)
+ mask = &rte_flow_item_mpls_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_mpls_mask,
+ sizeof(struct rte_flow_item_mpls), error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
+ memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
+ /* Remove unwanted bits from values. */
+ mpls.val.label &= mpls.mask.label;
+ }
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &mpls, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_MPLS;
+ return size;
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "MPLS is not supported by Verbs, please"
+ " update.");
+}
+
+/**
+ * Convert the @p pattern into a Verbs specifications after ensuring the NIC
+ * will understand and process it correctly.
+ * The conversion is performed item per item, each of them is written into
+ * the @p flow if its size is lesser or equal to @p flow_size.
+ * Validation and memory consumption computation are still performed until the
+ * end of @p pattern, unless an error is encountered.
+ *
+ * @param[in] pattern
+ * Flow pattern.
+ * @param[in, out] flow
+ * Pointer to the rte_flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small some
+ * garbage may be present.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @pattern has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_items(struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ int remain = flow_size;
+ size_t size = 0;
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ int ret = 0;
+
+ switch (pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ ret = mlx5_flow_item_eth(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_item_udp(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_item_tcp(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_item_vxlan(pattern, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,
+ remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ ret = mlx5_flow_item_gre(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_item_mpls(pattern, flow, remain, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "item not supported");
+ }
+ if (ret < 0)
+ return ret;
+ if (remain > ret)
+ remain -= ret;
+ else
+ remain = 0;
+ size += ret;
+ }
+ if (!flow->layers) {
+ const struct rte_flow_item item = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ };
+
+ return mlx5_flow_item_eth(&item, flow, flow_size, error);
+ }
+ return size;
+}
+
+/**
+ * Convert the @p action into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_drop(const struct rte_flow_action *action,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
+ struct ibv_flow_spec_action_drop drop = {
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ .size = size,
+ };
+
+ if (flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "multiple fate actions are not"
+ " supported");
+ if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "drop is not compatible with"
+ " flag/mark action");
+ if (size < flow_size)
+ mlx5_flow_spec_verbs_add(flow, &drop, size);
+ flow->fate |= MLX5_FLOW_FATE_DROP;
+ return size;
+}
+
+/**
+ * Convert the @p action into @p flow after ensuring the NIC will understand
+ * and process it correctly.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_action_queue(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_queue *queue = action->conf;
+
+ if (flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "multiple fate actions are not"
+ " supported");
+ if (queue->index >= priv->rxqs_n)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue index out of range");
+ if (!(*priv->rxqs)[queue->index])
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue is not configured");
+ if (flow->queue)
+ (*flow->queue)[0] = queue->index;
+ flow->rss.queue_num = 1;
+ flow->fate |= MLX5_FLOW_FATE_QUEUE;
+ return 0;
+}
+
+/**
+ * Ensure the @p action will be understood and used correctly by the NIC.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param action[in]
+ * Pointer to flow actions array.
+ * @param flow[in, out]
+ * Pointer to the rte_flow structure.
+ * @param error[in, out]
+ * Pointer to error structure.
+ *
+ * @return
+ * On success @p flow->queue array and @p flow->rss are filled and valid.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_rss(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_rss *rss = action->conf;
+ unsigned int i;
+
+ if (flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "multiple fate actions are not"
+ " supported");
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->func,
+ "RSS hash function not supported");
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (rss->level > 2)
+#else
+ if (rss->level > 1)
+#endif
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->level,
+ "tunnel RSS is not supported");
+ if (rss->key_len < MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too small");
+ if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too large");
+ if (!rss->queue_num)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ rss,
+ "no queues were provided for RSS");
+ if (rss->queue_num > priv->config.ind_table_max_size)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue_num,
+ "number of queues too large");
+ if (rss->types & MLX5_RSS_HF_MASK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->types,
+ "some RSS protocols are not"
+ " supported");
+ for (i = 0; i != rss->queue_num; ++i) {
+ if (rss->queue[i] >= priv->rxqs_n)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ rss,
+ "queue index out of range");
+ if (!(*priv->rxqs)[rss->queue[i]])
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[i],
+ "queue is not configured");
+ }
+ if (flow->queue)
+ memcpy((*flow->queue), rss->queue,
+ rss->queue_num * sizeof(uint16_t));
+ flow->rss.queue_num = rss->queue_num;
+ memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
+ flow->rss.types = rss->types;
+ flow->rss.level = rss->level;
+ flow->fate |= MLX5_FLOW_FATE_RSS;
+ return 0;
+}
+
+/**
+ * Convert the @p action into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_flag(const struct rte_flow_action *action,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
+ struct ibv_flow_spec_action_tag tag = {
+ .type = IBV_FLOW_SPEC_ACTION_TAG,
+ .size = size,
+ .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
+ };
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+
+ if (flow->modifier & MLX5_FLOW_MOD_FLAG)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flag action already present");
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flag is not compatible with drop"
+ " action");
+ if (flow->modifier & MLX5_FLOW_MOD_MARK)
+ size = 0;
+ else if (size <= flow_size && verbs)
+ mlx5_flow_spec_verbs_add(flow, &tag, size);
+ flow->modifier |= MLX5_FLOW_MOD_FLAG;
+ return size;
+}
+
+/**
+ * Update verbs specification to modify the flag to mark.
+ *
+ * @param[in, out] verbs
+ * Pointer to the mlx5_flow_verbs structure.
+ * @param[in] mark_id
+ * Mark identifier to replace the flag.
+ */
+static void
+mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
+{
+ struct ibv_spec_header *hdr;
+ int i;
+
+ if (!verbs)
+ return;
+ /* Update Verbs specification. */
+ hdr = (struct ibv_spec_header *)verbs->specs;
+ if (!hdr)
+ return;
+ for (i = 0; i != verbs->attr->num_of_specs; ++i) {
+ if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
+ struct ibv_flow_spec_action_tag *t =
+ (struct ibv_flow_spec_action_tag *)hdr;
+
+ t->tag_id = mlx5_flow_mark_set(mark_id);
+ }
+ hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p action into @p flow (or by updating the already present
+ * Flag Verbs specification) after ensuring the NIC will understand and
+ * process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_mark(const struct rte_flow_action *action,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_mark *mark = action->conf;
+ unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
+ struct ibv_flow_spec_action_tag tag = {
+ .type = IBV_FLOW_SPEC_ACTION_TAG,
+ .size = size,
+ };
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+
+ if (!mark)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "configuration cannot be null");
+ if (mark->id >= MLX5_FLOW_MARK_MAX)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &mark->id,
+ "mark id must in 0 <= id < "
+ RTE_STR(MLX5_FLOW_MARK_MAX));
+ if (flow->modifier & MLX5_FLOW_MOD_MARK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "mark action already present");
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "mark is not compatible with drop"
+ " action");
+ if (flow->modifier & MLX5_FLOW_MOD_FLAG) {
+ mlx5_flow_verbs_mark_update(verbs, mark->id);
+ size = 0;
+ } else if (size <= flow_size) {
+ tag.tag_id = mlx5_flow_mark_set(mark->id);
+ mlx5_flow_spec_verbs_add(flow, &tag, size);
+ }
+ flow->modifier |= MLX5_FLOW_MOD_MARK;
+ return size;
+}
+
+/**
+ * Convert the @p action into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param action[in]
+ * Action configuration.
+ * @param flow[in, out]
+ * Pointer to flow structure.
+ * @param flow_size[in]
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param error[int, out]
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_count(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow *flow,
+ const size_t flow_size __rte_unused,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_count *count = action->conf;
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
+ struct ibv_flow_spec_counter_action counter = {
+ .type = IBV_FLOW_SPEC_ACTION_COUNT,
+ .size = size,
+ };
+#endif
+
+ if (!flow->counter) {
+ flow->counter = mlx5_flow_counter_new(dev, count->shared,
+ count->id);
+ if (!flow->counter)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "cannot get counter"
+ " context.");
+ }
+ if (!((struct priv *)dev->data->dev_private)->config.flow_counter_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flow counters are not supported.");
+ flow->modifier |= MLX5_FLOW_MOD_COUNT;
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ counter.counter_set_handle = flow->counter->cs->handle;
+ if (size <= flow_size)
+ mlx5_flow_spec_verbs_add(flow, &counter, size);
+ return size;
+#endif
+ return 0;
+}
+
+/**
+ * Convert the @p action into @p flow after ensuring the NIC will understand
+ * and process it correctly.
+ * The conversion is performed action per action, each of them is written into
+ * the @p flow if its size is lesser or equal to @p flow_size.
+ * Validation and memory consumption computation are still performed until the
+ * end of @p action, unless an error is encountered.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in] actions
+ * Pointer to flow actions array.
+ * @param[in, out] flow
+ * Pointer to the rte_flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small some
+ * garbage may be present.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p actions has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_actions(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ size_t size = 0;
+ int remain = flow_size;
+ int ret = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ ret = mlx5_flow_action_flag(actions, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = mlx5_flow_action_mark(actions, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ ret = mlx5_flow_action_drop(actions, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ ret = mlx5_flow_action_queue(dev, actions, flow, error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mlx5_flow_action_rss(dev, actions, flow, error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = mlx5_flow_action_count(dev, actions, flow, remain,
+ error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ if (ret < 0)
+ return ret;
+ if (remain > ret)
+ remain -= ret;
+ else
+ remain = 0;
+ size += ret;
+ }
+ if (!flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "no fate action found");
+ return size;
+}
+
+/**
+ * Validate flow rule and fill flow structure accordingly.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] flow
+ * Pointer to flow structure.
+ * @param flow_size
+ * Size of allocated space for @p flow.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A positive value representing the size of the flow object in bytes
+ * regardless of @p flow_size on success, a negative errno value otherwise
+ * and rte_errno is set.
+ */
+static int
+mlx5_flow_merge_switch(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ size_t flow_size,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0);
+ uint16_t port_id[!n + n];
+ struct mlx5_nl_flow_ptoi ptoi[!n + n + 1];
+ size_t off = RTE_ALIGN_CEIL(sizeof(*flow), alignof(max_align_t));
+ unsigned int i;
+ unsigned int own = 0;
+ int ret;
+
+ /* At least one port is needed when no switch domain is present. */
+ if (!n) {
+ n = 1;
+ port_id[0] = dev->data->port_id;
+ } else {
+ n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n);
+ }
+ for (i = 0; i != n; ++i) {
+ struct rte_eth_dev_info dev_info;
+
+ rte_eth_dev_info_get(port_id[i], &dev_info);
+ if (port_id[i] == dev->data->port_id)
+ own = i;
+ ptoi[i].port_id = port_id[i];
+ ptoi[i].ifindex = dev_info.if_index;
+ }
+ /* Ensure first entry of ptoi[] is the current device. */
+ if (own) {
+ ptoi[n] = ptoi[0];
+ ptoi[0] = ptoi[own];
+ ptoi[own] = ptoi[n];
+ }
+ /* An entry with zero ifindex terminates ptoi[]. */
+ ptoi[n].port_id = 0;
+ ptoi[n].ifindex = 0;
+ if (flow_size < off)
+ flow_size = 0;
+ ret = mlx5_nl_flow_transpose((uint8_t *)flow + off,
+ flow_size ? flow_size - off : 0,
+ ptoi, attr, pattern, actions, error);
+ if (ret < 0)
+ return ret;
+ if (flow_size) {
+ *flow = (struct rte_flow){
+ .attributes = *attr,
+ .nl_flow = (uint8_t *)flow + off,
+ };
+ /*
+ * Generate a reasonably unique handle based on the address
+ * of the target buffer.
+ *
+ * This is straightforward on 32-bit systems where the flow
+ * pointer can be used directly. Otherwise, its least
+ * significant part is taken after shifting it by the
+ * previous power of two of the pointed buffer size.
+ */
+ if (sizeof(flow) <= 4)
+ mlx5_nl_flow_brand(flow->nl_flow, (uintptr_t)flow);
+ else
+ mlx5_nl_flow_brand
+ (flow->nl_flow,
+ (uintptr_t)flow >>
+ rte_log2_u32(rte_align32prevpow2(flow_size)));
+ }
+ return off + ret;
+}
+
+static unsigned int
+mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
+{
+ const struct rte_flow_item *item;
+ unsigned int has_vlan = 0;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ has_vlan = 1;
+ break;
+ }
+ }
+ if (has_vlan)
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
+ MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT :
+ MLX5_EXPANSION_ROOT_OUTER;
+}
+
+/**
+ * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
+ * after ensuring the NIC will understand and process it correctly.
+ * The conversion is only performed item/action per item/action, each of
+ * them is written into the @p flow if its size is lesser or equal to @p
+ * flow_size.
+ * Validation and memory consumption computation are still performed until the
+ * end, unless an error is encountered.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small some
+ * garbage may be present.
+ * @param[in] attributes
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the flow has fully been converted and
+ * can be applied, otherwise another call with this returned memory size
+ * should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const size_t flow_size,
+ const struct rte_flow_attr *attributes,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow local_flow = { .layers = 0, };
+ size_t size = sizeof(*flow);
+ union {
+ struct rte_flow_expand_rss buf;
+ uint8_t buffer[2048];
+ } expand_buffer;
+ struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+ struct mlx5_flow_verbs *original_verbs = NULL;
+ size_t original_verbs_size = 0;
+ uint32_t original_layers = 0;
+ int expanded_pattern_idx = 0;
+ int ret;
+ uint32_t i;
+
+ if (attributes->transfer)
+ return mlx5_flow_merge_switch(dev, flow, flow_size,
+ attributes, pattern,
+ actions, error);
+ if (size > flow_size)
+ flow = &local_flow;
+ ret = mlx5_flow_attributes(dev, attributes, flow, error);
+ if (ret < 0)
+ return ret;
+ ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);
+ if (ret < 0)
+ return ret;
+ if (local_flow.rss.types) {
+ unsigned int graph_root;
+
+ graph_root = mlx5_find_graph_root(pattern,
+ local_flow.rss.level);
+ ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
+ pattern, local_flow.rss.types,
+ mlx5_support_expansion,
+ graph_root);
+ assert(ret > 0 &&
+ (unsigned int)ret < sizeof(expand_buffer.buffer));
+ } else {
+ buf->entries = 1;
+ buf->entry[0].pattern = (void *)(uintptr_t)pattern;
+ }
+ size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t),
+ sizeof(void *));
+ if (size <= flow_size)
+ flow->queue = (void *)(flow + 1);
+ LIST_INIT(&flow->verbs);
+ flow->layers = 0;
+ flow->modifier = 0;
+ flow->fate = 0;
+ for (i = 0; i != buf->entries; ++i) {
+ size_t off = size;
+ size_t off2;
+
+ flow->layers = original_layers;
+ size += sizeof(struct ibv_flow_attr) +
+ sizeof(struct mlx5_flow_verbs);
+ off2 = size;
+ if (size < flow_size) {
+ flow->cur_verbs = (void *)((uintptr_t)flow + off);
+ flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);
+ flow->cur_verbs->specs =
+ (void *)(flow->cur_verbs->attr + 1);
+ }
+ /* First iteration convert the pattern into Verbs. */
+ if (i == 0) {
+ /* Actions don't need to be converted several time. */
+ ret = mlx5_flow_actions(dev, actions, flow,
+ (size < flow_size) ?
+ flow_size - size : 0,
+ error);
+ if (ret < 0)
+ return ret;
+ size += ret;
+ } else {
+ /*
+ * Next iteration means the pattern has already been
+ * converted and an expansion is necessary to match
+ * the user RSS request. For that only the expanded
+ * items will be converted, the common part with the
+ * user pattern are just copied into the next buffer
+ * zone.
+ */
+ size += original_verbs_size;
+ if (size < flow_size) {
+ rte_memcpy(flow->cur_verbs->attr,
+ original_verbs->attr,
+ original_verbs_size +
+ sizeof(struct ibv_flow_attr));
+ flow->cur_verbs->size = original_verbs_size;
+ }
+ }
+ ret = mlx5_flow_items
+ (dev,
+ (const struct rte_flow_item *)
+ &buf->entry[i].pattern[expanded_pattern_idx],
+ flow,
+ (size < flow_size) ? flow_size - size : 0, error);
+ if (ret < 0)
+ return ret;
+ size += ret;
+ if (size <= flow_size) {
+ mlx5_flow_adjust_priority(dev, flow);
+ LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
+ }
+ /*
+ * Keep a pointer of the first verbs conversion and the layers
+ * it has encountered.
+ */
+ if (i == 0) {
+ original_verbs = flow->cur_verbs;
+ original_verbs_size = size - off2;
+ original_layers = flow->layers;
+ /*
+ * move the index of the expanded pattern to the
+ * first item not addressed yet.
+ */
+ if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
+ expanded_pattern_idx++;
+ } else {
+ const struct rte_flow_item *item = pattern;
+
+ for (item = pattern;
+ item->type != RTE_FLOW_ITEM_TYPE_END;
+ ++item)
+ expanded_pattern_idx++;
+ }
+ }
+ }
+ /* Restore the origin layers in the flow. */
+ flow->layers = original_layers;
+ return size;
+}
+
+/**
+ * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
+ * if several tunnel rules are used on this queue, the tunnel ptype will be
+ * cleared.
+ *
+ * @param rxq_ctrl
+ * Rx queue to update.
+ */
+static void
+mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ unsigned int i;
+ uint32_t tunnel_ptype = 0;
+
+ /* Look up for the ptype to use. */
+ for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
+ if (!rxq_ctrl->flow_tunnels_n[i])
+ continue;
+ if (!tunnel_ptype) {
+ tunnel_ptype = tunnels_info[i].ptype;
+ } else {
+ tunnel_ptype = 0;
+ break;
+ }
+ }
+ rxq_ctrl->rxq.tunnel = tunnel_ptype;
+}
+
+/**
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to flow structure.
+ */
+static void
+mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ const int mark = !!(flow->modifier &
+ (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int i;
+
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+
+ if (mark) {
+ rxq_ctrl->rxq.mark = 1;
+ rxq_ctrl->flow_mark_n++;
+ }
+ if (tunnel) {
+ unsigned int j;
+
+ /* Increase the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel & flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]++;
+ break;
+ }
+ }
+ mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
+ }
+ }
+}
+
+/**
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * @p flow if no other flow uses it with the same kind of request.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to the flow.
+ */
+static void
+mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ const int mark = !!(flow->modifier &
+ (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int i;
+
+ assert(dev->data->dev_started);
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+
+ if (mark) {
+ rxq_ctrl->flow_mark_n--;
+ rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
+ }
+ if (tunnel) {
+ unsigned int j;
+
+ /* Decrease the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel & flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]--;
+ break;
+ }
+ }
+ mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
+ }
+ }
+}
+
+/**
+ * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ unsigned int j;
+
+ if (!(*priv->rxqs)[i])
+ continue;
+ rxq_ctrl = container_of((*priv->rxqs)[i],
+ struct mlx5_rxq_ctrl, rxq);
+ rxq_ctrl->flow_mark_n = 0;
+ rxq_ctrl->rxq.mark = 0;
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
+ rxq_ctrl->flow_tunnels_n[j] = 0;
+ rxq_ctrl->rxq.tunnel = 0;
+ }
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
+
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Remove the flow.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ */
+static void
+mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_verbs *verbs;
+
+ if (flow->nl_flow && priv->mnl_socket)
+ mlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL);
+ LIST_FOREACH(verbs, &flow->verbs, next) {
+ if (verbs->flow) {
+ claim_zero(mlx5_glue->destroy_flow(verbs->flow));
+ verbs->flow = NULL;
+ }
+ if (verbs->hrxq) {
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ mlx5_hrxq_drop_release(dev);
+ else
+ mlx5_hrxq_release(dev, verbs->hrxq);
+ verbs->hrxq = NULL;
+ }
+ }
+ if (flow->counter) {
+ mlx5_flow_counter_release(flow->counter);
+ flow->counter = NULL;
+ }
+}
+
+/**
+ * Apply the flow.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_verbs *verbs;
+ int err;
+
+ LIST_FOREACH(verbs, &flow->verbs, next) {
+ if (flow->fate & MLX5_FLOW_FATE_DROP) {
+ verbs->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!verbs->hrxq) {
+ rte_flow_error_set
+ (error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot get drop hash queue");
+ goto error;
+ }
+ } else {
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_hrxq_get(dev, flow->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ verbs->hash_fields,
+ (*flow->queue),
+ flow->rss.queue_num);
+ if (!hrxq)
+ hrxq = mlx5_hrxq_new(dev, flow->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ verbs->hash_fields,
+ (*flow->queue),
+ flow->rss.queue_num,
+ !!(flow->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
+ if (!hrxq) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot get hash queue");
+ goto error;
+ }
+ verbs->hrxq = hrxq;
+ }
+ verbs->flow =
+ mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);
+ if (!verbs->flow) {
+ rte_flow_error_set(error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "hardware refuses to create flow");
+ goto error;
+ }
+ }
+ if (flow->nl_flow &&
+ priv->mnl_socket &&
+ mlx5_nl_flow_create(priv->mnl_socket, flow->nl_flow, error))
+ goto error;
+ return 0;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ LIST_FOREACH(verbs, &flow->verbs, next) {
+ if (verbs->hrxq) {
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ mlx5_hrxq_drop_release(dev);
+ else
+ mlx5_hrxq_release(dev, verbs->hrxq);
+ verbs->hrxq = NULL;
+ }
+ }
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * Create a flow and add it to @p list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param list
+ * Pointer to a TAILQ flow list.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow *
+mlx5_flow_list_create(struct rte_eth_dev *dev,
+ struct mlx5_flows *list,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow *flow = NULL;
+ size_t size = 0;
+ int ret;
+
+ ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
+ if (ret < 0)
+ return NULL;
+ size = ret;
+ flow = rte_calloc(__func__, 1, size, 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "not enough memory to create flow");
+ return NULL;
+ }
+ ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
+ if (ret < 0) {
+ rte_free(flow);
+ return NULL;
+ }
+ assert((size_t)ret == size);
+ if (dev->data->dev_started) {
+ ret = mlx5_flow_apply(dev, flow, error);
+ if (ret < 0) {
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (flow) {
+ mlx5_flow_remove(dev, flow);
+ rte_free(flow);
+ }
+ rte_errno = ret; /* Restore rte_errno. */
+ return NULL;
+ }
+ }
+ TAILQ_INSERT_TAIL(list, flow, next);
+ mlx5_flow_rxq_flags_set(dev, flow);
+ return flow;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+struct rte_flow *
+mlx5_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ return mlx5_flow_list_create
+ (dev, &((struct priv *)dev->data->dev_private)->flows,
+ attr, items, actions, error);
+}
+
+/**
+ * Destroy a flow in a list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param list
+ * Pointer to a TAILQ flow list.
+ * @param[in] flow
+ * Flow to destroy.
+ */
+static void
+mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ struct rte_flow *flow)
+{
+ mlx5_flow_remove(dev, flow);
+ TAILQ_REMOVE(list, flow, next);
+ /*
+ * Update RX queue flags only if port is started, otherwise it is
+ * already clean.
+ */
+ if (dev->data->dev_started)
+ mlx5_flow_rxq_flags_trim(dev, flow);
+ rte_free(flow);
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param list
+ * Pointer to a TAILQ flow list.
+ */
+void
+mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
+{
+ while (!TAILQ_EMPTY(list)) {
+ struct rte_flow *flow;
+
+ flow = TAILQ_FIRST(list);
+ mlx5_flow_list_destroy(dev, list, flow);
+ }
+}
+
+/**
+ * Remove all flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param list
+ * Pointer to a TAILQ flow list.
+ */
+void
+mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
+{
+ struct rte_flow *flow;
+
+ TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
+ mlx5_flow_remove(dev, flow);
+ mlx5_flow_rxq_flags_clear(dev);
+}
+
+/**
+ * Add all flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param list
+ * Pointer to a TAILQ flow list.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
+{
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+ int ret = 0;
+
+ TAILQ_FOREACH(flow, list, next) {
+ ret = mlx5_flow_apply(dev, flow, &error);
+ if (ret < 0)
+ goto error;
+ mlx5_flow_rxq_flags_set(dev, flow);
+ }
+ return 0;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_flow_stop(dev, list);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * Verify the flow list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return the number of flows not released.
+ */
+int
+mlx5_flow_verify(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow;
+ int ret = 0;
+
+ TAILQ_FOREACH(flow, &priv->flows, next) {
+ DRV_LOG(DEBUG, "port %u flow %p still referenced",
+ dev->data->port_id, (void *)flow);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Enable a control flow configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ * @param eth_mask
+ * An Ethernet flow mask to apply.
+ * @param vlan_spec
+ * A VLAN flow spec to apply.
+ * @param vlan_mask
+ * A VLAN flow mask to apply.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask,
+ struct rte_flow_item_vlan *vlan_spec,
+ struct rte_flow_item_vlan *vlan_mask)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr attr = {
+ .ingress = 1,
+ .priority = MLX5_FLOW_PRIO_RSVD,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = eth_spec,
+ .last = NULL,
+ .mask = eth_mask,
+ },
+ {
+ .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
+ RTE_FLOW_ITEM_TYPE_END,
+ .spec = vlan_spec,
+ .last = NULL,
+ .mask = vlan_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ uint16_t queue[priv->reta_idx_n];
+ struct rte_flow_action_rss action_rss = {
+ .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ .level = 0,
+ .types = priv->rss_conf.rss_hf,
+ .key_len = priv->rss_conf.rss_key_len,
+ .queue_num = priv->reta_idx_n,
+ .key = priv->rss_conf.rss_key,
+ .queue = queue,
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_RSS,
+ .conf = &action_rss,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+ unsigned int i;
+
+ if (!priv->reta_idx_n) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ for (i = 0; i != priv->reta_idx_n; ++i)
+ queue[i] = (*priv->reta_idx)[i];
+ flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
+ actions, &error);
+ if (!flow)
+ return -rte_errno;
+ return 0;
+}
+
+/**
+ * Enable a flow control configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ * @param eth_mask
+ * An Ethernet flow mask to apply.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ctrl_flow(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask)
+{
+ return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ mlx5_flow_list_destroy(dev, &priv->flows, flow);
+ return 0;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ mlx5_flow_list_flush(dev, &priv->flows);
+ return 0;
+}
+
+/**
+ * Isolated mode.
+ *
+ * @see rte_flow_isolate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_isolate(struct rte_eth_dev *dev,
+ int enable,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ if (dev->data->dev_started) {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "port must be stopped first");
+ return -rte_errno;
+ }
+ priv->isolated = !!enable;
+ if (enable)
+ dev->dev_ops = &mlx5_dev_ops_isolate;
+ else
+ dev->dev_ops = &mlx5_dev_ops;
+ return 0;
+}
+
+/**
+ * Query flow counter.
+ *
+ * @param flow
+ * Pointer to the flow.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_query_count(struct rte_flow *flow __rte_unused,
+ void *data __rte_unused,
+ struct rte_flow_error *error)
+{
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ if (flow->modifier & MLX5_FLOW_MOD_COUNT) {
+ struct rte_flow_query_count *qc = data;
+ uint64_t counters[2] = {0, 0};
+ struct ibv_query_counter_set_attr query_cs_attr = {
+ .cs = flow->counter->cs,
+ .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
+ };
+ struct ibv_counter_set_data query_out = {
+ .out = counters,
+ .outlen = 2 * sizeof(uint64_t),
+ };
+ int err = mlx5_glue->query_counter_set(&query_cs_attr,
+ &query_out);
+
+ if (err)
+ return rte_flow_error_set
+ (error, err,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot read counter");
+ qc->hits_set = 1;
+ qc->bytes_set = 1;
+ qc->hits = counters[0] - flow->counter->hits;
+ qc->bytes = counters[1] - flow->counter->bytes;
+ if (qc->reset) {
+ flow->counter->hits = counters[0];
+ flow->counter->bytes = counters[1];
+ }
+ return 0;
+ }
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "flow does not have counter");
+#endif
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "counters are not available");
+}
+
+/**
+ * Query a flows.
+ *
+ * @see rte_flow_query()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = mlx5_flow_query_count(flow, data, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * Convert a flow director filter to a generic flow.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param fdir_filter
+ * Flow director filter to add.
+ * @param attributes
+ * Generic flow parameters structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ struct mlx5_fdir *attributes)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_eth_fdir_input *input = &fdir_filter->input;
+ const struct rte_eth_fdir_masks *mask =
+ &dev->data->dev_conf.fdir_conf.mask;
+
+ /* Validate queue number. */
+ if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
+ DRV_LOG(ERR, "port %u invalid queue number %d",
+ dev->data->port_id, fdir_filter->action.rx_queue);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ attributes->attr.ingress = 1;
+ attributes->items[0] = (struct rte_flow_item) {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &attributes->l2,
+ .mask = &attributes->l2_mask,
+ };
+ switch (fdir_filter->action.behavior) {
+ case RTE_ETH_FDIR_ACCEPT:
+ attributes->actions[0] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &attributes->queue,
+ };
+ break;
+ case RTE_ETH_FDIR_REJECT:
+ attributes->actions[0] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_DROP,
+ };
+ break;
+ default:
+ DRV_LOG(ERR, "port %u invalid behavior %d",
+ dev->data->port_id,
+ fdir_filter->action.behavior);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ attributes->queue.index = fdir_filter->action.rx_queue;
+ /* Handle L3. */
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = input->flow.ip4_flow.src_ip,
+ .dst_addr = input->flow.ip4_flow.dst_ip,
+ .time_to_live = input->flow.ip4_flow.ttl,
+ .type_of_service = input->flow.ip4_flow.tos,
+ .next_proto_id = input->flow.ip4_flow.proto,
+ };
+ attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = mask->ipv4_mask.src_ip,
+ .dst_addr = mask->ipv4_mask.dst_ip,
+ .time_to_live = mask->ipv4_mask.ttl,
+ .type_of_service = mask->ipv4_mask.tos,
+ .next_proto_id = mask->ipv4_mask.proto,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &attributes->l3,
+ .mask = &attributes->l3_mask,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ .hop_limits = input->flow.ipv6_flow.hop_limits,
+ .proto = input->flow.ipv6_flow.proto,
+ };
+
+ memcpy(attributes->l3.ipv6.hdr.src_addr,
+ input->flow.ipv6_flow.src_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3.ipv6.hdr.dst_addr,
+ input->flow.ipv6_flow.dst_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
+ mask->ipv6_mask.src_ip,
+ RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
+ memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
+ mask->ipv6_mask.dst_ip,
+ RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &attributes->l3,
+ .mask = &attributes->l3_mask,
+ };
+ break;
+ default:
+ DRV_LOG(ERR, "port %u invalid flow type%d",
+ dev->data->port_id, fdir_filter->input.flow_type);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ /* Handle L4. */
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ attributes->l4.udp.hdr = (struct udp_hdr){
+ .src_port = input->flow.udp4_flow.src_port,
+ .dst_port = input->flow.udp4_flow.dst_port,
+ };
+ attributes->l4_mask.udp.hdr = (struct udp_hdr){
+ .src_port = mask->src_port_mask,
+ .dst_port = mask->dst_port_mask,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .spec = &attributes->l4,
+ .mask = &attributes->l4_mask,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ attributes->l4.tcp.hdr = (struct tcp_hdr){
+ .src_port = input->flow.tcp4_flow.src_port,
+ .dst_port = input->flow.tcp4_flow.dst_port,
+ };
+ attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+ .src_port = mask->src_port_mask,
+ .dst_port = mask->dst_port_mask,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .spec = &attributes->l4,
+ .mask = &attributes->l4_mask,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ attributes->l4.udp.hdr = (struct udp_hdr){
+ .src_port = input->flow.udp6_flow.src_port,
+ .dst_port = input->flow.udp6_flow.dst_port,
+ };
+ attributes->l4_mask.udp.hdr = (struct udp_hdr){
+ .src_port = mask->src_port_mask,
+ .dst_port = mask->dst_port_mask,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .spec = &attributes->l4,
+ .mask = &attributes->l4_mask,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ attributes->l4.tcp.hdr = (struct tcp_hdr){
+ .src_port = input->flow.tcp6_flow.src_port,
+ .dst_port = input->flow.tcp6_flow.dst_port,
+ };
+ attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+ .src_port = mask->src_port_mask,
+ .dst_port = mask->dst_port_mask,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .spec = &attributes->l4,
+ .mask = &attributes->l4_mask,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ break;
+ default:
+ DRV_LOG(ERR, "port %u invalid flow type%d",
+ dev->data->port_id, fdir_filter->input.flow_type);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ return 0;
+}
+
+/**
+ * Add new flow director filter and store it in list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param fdir_filter
+ * Flow director filter to add.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_fdir_filter_add(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_fdir attributes = {
+ .attr.group = 0,
+ .l2_mask = {
+ .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .type = 0,
+ },
+ };
+ struct rte_flow_error error;
+ struct rte_flow *flow;
+ int ret;
+
+ ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
+ if (ret)
+ return ret;
+ flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
+ attributes.items, attributes.actions,
+ &error);
+ if (flow) {
+ DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
+ (void *)flow);
+ return 0;
+ }
+ return -rte_errno;
+}
+
+/**
+ * Delete specific filter.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param fdir_filter
+ * Filter to be deleted.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_eth_fdir_filter *fdir_filter
+ __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
+
+/**
+ * Update queue for specific filter.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param fdir_filter
+ * Filter to be updated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_fdir_filter_update(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter)
+{
+ int ret;
+
+ ret = mlx5_fdir_filter_delete(dev, fdir_filter);
+ if (ret)
+ return ret;
+ return mlx5_fdir_filter_add(dev, fdir_filter);
+}
+
+/**
+ * Flush all filters.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ mlx5_flow_list_flush(dev, &priv->flows);
+}
+
+/**
+ * Get flow director information.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] fdir_info
+ * Resulting flow director information.
+ */
+static void
+mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
+{
+ struct rte_eth_fdir_masks *mask =
+ &dev->data->dev_conf.fdir_conf.mask;
+
+ fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
+ fdir_info->guarant_spc = 0;
+ rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
+ fdir_info->max_flexpayload = 0;
+ fdir_info->flow_types_mask[0] = 0;
+ fdir_info->flex_payload_unit = 0;
+ fdir_info->max_flex_payload_segment_num = 0;
+ fdir_info->flex_payload_limit = 0;
+ memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
+}
+
+/**
+ * Deal with flow director operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
+ void *arg)
+{
+ enum rte_fdir_mode fdir_mode =
+ dev->data->dev_conf.fdir_conf.mode;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+ if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
+ fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ DRV_LOG(ERR, "port %u flow director mode %d not supported",
+ dev->data->port_id, fdir_mode);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ return mlx5_fdir_filter_add(dev, arg);
+ case RTE_ETH_FILTER_UPDATE:
+ return mlx5_fdir_filter_update(dev, arg);
+ case RTE_ETH_FILTER_DELETE:
+ return mlx5_fdir_filter_delete(dev, arg);
+ case RTE_ETH_FILTER_FLUSH:
+ mlx5_fdir_filter_flush(dev);
+ break;
+ case RTE_ETH_FILTER_INFO:
+ mlx5_fdir_info_get(dev, arg);
+ break;
+ default:
+ DRV_LOG(DEBUG, "port %u unknown operation %u",
+ dev->data->port_id, filter_op);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ return 0;
+}
+
+/**
+ * Manage filter operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ *(const void **)arg = &mlx5_flow_ops;
+ return 0;
+ case RTE_ETH_FILTER_FDIR:
+ return mlx5_fdir_ctrl_func(dev, filter_op, arg);
+ default:
+ DRV_LOG(ERR, "port %u filter type (%d) not supported",
+ dev->data->port_id, filter_type);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_glue.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_glue.c
new file mode 100644
index 00000000..84f9492a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_glue.c
@@ -0,0 +1,395 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <errno.h>
+#include <stdalign.h>
+#include <stddef.h>
+#include <stdint.h>
+
+/*
+ * Not needed by this file; included to work around the lack of off_t
+ * definition for mlx5dv.h with unpatched rdma-core versions.
+ */
+#include <sys/types.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx5dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_config.h>
+
+#include "mlx5_autoconf.h"
+#include "mlx5_glue.h"
+
+static int
+mlx5_glue_fork_init(void)
+{
+ return ibv_fork_init();
+}
+
+static struct ibv_pd *
+mlx5_glue_alloc_pd(struct ibv_context *context)
+{
+ return ibv_alloc_pd(context);
+}
+
+static int
+mlx5_glue_dealloc_pd(struct ibv_pd *pd)
+{
+ return ibv_dealloc_pd(pd);
+}
+
+static struct ibv_device **
+mlx5_glue_get_device_list(int *num_devices)
+{
+ return ibv_get_device_list(num_devices);
+}
+
+static void
+mlx5_glue_free_device_list(struct ibv_device **list)
+{
+ ibv_free_device_list(list);
+}
+
+static struct ibv_context *
+mlx5_glue_open_device(struct ibv_device *device)
+{
+ return ibv_open_device(device);
+}
+
+static int
+mlx5_glue_close_device(struct ibv_context *context)
+{
+ return ibv_close_device(context);
+}
+
+static int
+mlx5_glue_query_device(struct ibv_context *context,
+ struct ibv_device_attr *device_attr)
+{
+ return ibv_query_device(context, device_attr);
+}
+
+static int
+mlx5_glue_query_device_ex(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr)
+{
+ return ibv_query_device_ex(context, input, attr);
+}
+
+static int
+mlx5_glue_query_port(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr)
+{
+ return ibv_query_port(context, port_num, port_attr);
+}
+
+static struct ibv_comp_channel *
+mlx5_glue_create_comp_channel(struct ibv_context *context)
+{
+ return ibv_create_comp_channel(context);
+}
+
+static int
+mlx5_glue_destroy_comp_channel(struct ibv_comp_channel *channel)
+{
+ return ibv_destroy_comp_channel(channel);
+}
+
+static struct ibv_cq *
+mlx5_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context,
+ struct ibv_comp_channel *channel, int comp_vector)
+{
+ return ibv_create_cq(context, cqe, cq_context, channel, comp_vector);
+}
+
+static int
+mlx5_glue_destroy_cq(struct ibv_cq *cq)
+{
+ return ibv_destroy_cq(cq);
+}
+
+static int
+mlx5_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,
+ void **cq_context)
+{
+ return ibv_get_cq_event(channel, cq, cq_context);
+}
+
+static void
+mlx5_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)
+{
+ ibv_ack_cq_events(cq, nevents);
+}
+
+static struct ibv_rwq_ind_table *
+mlx5_glue_create_rwq_ind_table(struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr)
+{
+ return ibv_create_rwq_ind_table(context, init_attr);
+}
+
+static int
+mlx5_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)
+{
+ return ibv_destroy_rwq_ind_table(rwq_ind_table);
+}
+
+static struct ibv_wq *
+mlx5_glue_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr)
+{
+ return ibv_create_wq(context, wq_init_attr);
+}
+
+static int
+mlx5_glue_destroy_wq(struct ibv_wq *wq)
+{
+ return ibv_destroy_wq(wq);
+}
+static int
+mlx5_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)
+{
+ return ibv_modify_wq(wq, wq_attr);
+}
+
+static struct ibv_flow *
+mlx5_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow)
+{
+ return ibv_create_flow(qp, flow);
+}
+
+static int
+mlx5_glue_destroy_flow(struct ibv_flow *flow_id)
+{
+ return ibv_destroy_flow(flow_id);
+}
+
+static struct ibv_qp *
+mlx5_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr)
+{
+ return ibv_create_qp(pd, qp_init_attr);
+}
+
+static struct ibv_qp *
+mlx5_glue_create_qp_ex(struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex)
+{
+ return ibv_create_qp_ex(context, qp_init_attr_ex);
+}
+
+static int
+mlx5_glue_destroy_qp(struct ibv_qp *qp)
+{
+ return ibv_destroy_qp(qp);
+}
+
+static int
+mlx5_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
+{
+ return ibv_modify_qp(qp, attr, attr_mask);
+}
+
+static struct ibv_mr *
+mlx5_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
+{
+ return ibv_reg_mr(pd, addr, length, access);
+}
+
+static int
+mlx5_glue_dereg_mr(struct ibv_mr *mr)
+{
+ return ibv_dereg_mr(mr);
+}
+
+static struct ibv_counter_set *
+mlx5_glue_create_counter_set(struct ibv_context *context,
+ struct ibv_counter_set_init_attr *init_attr)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ (void)context;
+ (void)init_attr;
+ return NULL;
+#else
+ return ibv_create_counter_set(context, init_attr);
+#endif
+}
+
+static int
+mlx5_glue_destroy_counter_set(struct ibv_counter_set *cs)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ (void)cs;
+ return ENOTSUP;
+#else
+ return ibv_destroy_counter_set(cs);
+#endif
+}
+
+static int
+mlx5_glue_describe_counter_set(struct ibv_context *context,
+ uint16_t counter_set_id,
+ struct ibv_counter_set_description *cs_desc)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ (void)context;
+ (void)counter_set_id;
+ (void)cs_desc;
+ return ENOTSUP;
+#else
+ return ibv_describe_counter_set(context, counter_set_id, cs_desc);
+#endif
+}
+
+static int
+mlx5_glue_query_counter_set(struct ibv_query_counter_set_attr *query_attr,
+ struct ibv_counter_set_data *cs_data)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ (void)query_attr;
+ (void)cs_data;
+ return ENOTSUP;
+#else
+ return ibv_query_counter_set(query_attr, cs_data);
+#endif
+}
+
+static void
+mlx5_glue_ack_async_event(struct ibv_async_event *event)
+{
+ ibv_ack_async_event(event);
+}
+
+static int
+mlx5_glue_get_async_event(struct ibv_context *context,
+ struct ibv_async_event *event)
+{
+ return ibv_get_async_event(context, event);
+}
+
+static const char *
+mlx5_glue_port_state_str(enum ibv_port_state port_state)
+{
+ return ibv_port_state_str(port_state);
+}
+
+static struct ibv_cq *
+mlx5_glue_cq_ex_to_cq(struct ibv_cq_ex *cq)
+{
+ return ibv_cq_ex_to_cq(cq);
+}
+
+static struct ibv_cq_ex *
+mlx5_glue_dv_create_cq(struct ibv_context *context,
+ struct ibv_cq_init_attr_ex *cq_attr,
+ struct mlx5dv_cq_init_attr *mlx5_cq_attr)
+{
+ return mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr);
+}
+
+static struct ibv_wq *
+mlx5_glue_dv_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_attr,
+ struct mlx5dv_wq_init_attr *mlx5_wq_attr)
+{
+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ (void)context;
+ (void)wq_attr;
+ (void)mlx5_wq_attr;
+ return NULL;
+#else
+ return mlx5dv_create_wq(context, wq_attr, mlx5_wq_attr);
+#endif
+}
+
+static int
+mlx5_glue_dv_query_device(struct ibv_context *ctx,
+ struct mlx5dv_context *attrs_out)
+{
+ return mlx5dv_query_device(ctx, attrs_out);
+}
+
+static int
+mlx5_glue_dv_set_context_attr(struct ibv_context *ibv_ctx,
+ enum mlx5dv_set_ctx_attr_type type, void *attr)
+{
+ return mlx5dv_set_context_attr(ibv_ctx, type, attr);
+}
+
+static int
+mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type)
+{
+ return mlx5dv_init_obj(obj, obj_type);
+}
+
+static struct ibv_qp *
+mlx5_glue_dv_create_qp(struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex,
+ struct mlx5dv_qp_init_attr *dv_qp_init_attr)
+{
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ return mlx5dv_create_qp(context, qp_init_attr_ex, dv_qp_init_attr);
+#else
+ (void)context;
+ (void)qp_init_attr_ex;
+ (void)dv_qp_init_attr;
+ return NULL;
+#endif
+}
+
+alignas(RTE_CACHE_LINE_SIZE)
+const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
+ .version = MLX5_GLUE_VERSION,
+ .fork_init = mlx5_glue_fork_init,
+ .alloc_pd = mlx5_glue_alloc_pd,
+ .dealloc_pd = mlx5_glue_dealloc_pd,
+ .get_device_list = mlx5_glue_get_device_list,
+ .free_device_list = mlx5_glue_free_device_list,
+ .open_device = mlx5_glue_open_device,
+ .close_device = mlx5_glue_close_device,
+ .query_device = mlx5_glue_query_device,
+ .query_device_ex = mlx5_glue_query_device_ex,
+ .query_port = mlx5_glue_query_port,
+ .create_comp_channel = mlx5_glue_create_comp_channel,
+ .destroy_comp_channel = mlx5_glue_destroy_comp_channel,
+ .create_cq = mlx5_glue_create_cq,
+ .destroy_cq = mlx5_glue_destroy_cq,
+ .get_cq_event = mlx5_glue_get_cq_event,
+ .ack_cq_events = mlx5_glue_ack_cq_events,
+ .create_rwq_ind_table = mlx5_glue_create_rwq_ind_table,
+ .destroy_rwq_ind_table = mlx5_glue_destroy_rwq_ind_table,
+ .create_wq = mlx5_glue_create_wq,
+ .destroy_wq = mlx5_glue_destroy_wq,
+ .modify_wq = mlx5_glue_modify_wq,
+ .create_flow = mlx5_glue_create_flow,
+ .destroy_flow = mlx5_glue_destroy_flow,
+ .create_qp = mlx5_glue_create_qp,
+ .create_qp_ex = mlx5_glue_create_qp_ex,
+ .destroy_qp = mlx5_glue_destroy_qp,
+ .modify_qp = mlx5_glue_modify_qp,
+ .reg_mr = mlx5_glue_reg_mr,
+ .dereg_mr = mlx5_glue_dereg_mr,
+ .create_counter_set = mlx5_glue_create_counter_set,
+ .destroy_counter_set = mlx5_glue_destroy_counter_set,
+ .describe_counter_set = mlx5_glue_describe_counter_set,
+ .query_counter_set = mlx5_glue_query_counter_set,
+ .ack_async_event = mlx5_glue_ack_async_event,
+ .get_async_event = mlx5_glue_get_async_event,
+ .port_state_str = mlx5_glue_port_state_str,
+ .cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,
+ .dv_create_cq = mlx5_glue_dv_create_cq,
+ .dv_create_wq = mlx5_glue_dv_create_wq,
+ .dv_query_device = mlx5_glue_dv_query_device,
+ .dv_set_context_attr = mlx5_glue_dv_set_context_attr,
+ .dv_init_obj = mlx5_glue_dv_init_obj,
+ .dv_create_qp = mlx5_glue_dv_create_qp,
+};
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_glue.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_glue.h
new file mode 100644
index 00000000..e584d367
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_glue.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef MLX5_GLUE_H_
+#define MLX5_GLUE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx5dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#ifndef MLX5_GLUE_VERSION
+#define MLX5_GLUE_VERSION ""
+#endif
+
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+struct ibv_counter_set;
+struct ibv_counter_set_data;
+struct ibv_counter_set_description;
+struct ibv_counter_set_init_attr;
+struct ibv_query_counter_set_attr;
+#endif
+
+#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+struct mlx5dv_qp_init_attr;
+#endif
+
+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+struct mlx5dv_wq_init_attr;
+#endif
+
+/* LIB_GLUE_VERSION must be updated every time this structure is modified. */
+struct mlx5_glue {
+ const char *version;
+ int (*fork_init)(void);
+ struct ibv_pd *(*alloc_pd)(struct ibv_context *context);
+ int (*dealloc_pd)(struct ibv_pd *pd);
+ struct ibv_device **(*get_device_list)(int *num_devices);
+ void (*free_device_list)(struct ibv_device **list);
+ struct ibv_context *(*open_device)(struct ibv_device *device);
+ int (*close_device)(struct ibv_context *context);
+ int (*query_device)(struct ibv_context *context,
+ struct ibv_device_attr *device_attr);
+ int (*query_device_ex)(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr);
+ int (*query_port)(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr);
+ struct ibv_comp_channel *(*create_comp_channel)
+ (struct ibv_context *context);
+ int (*destroy_comp_channel)(struct ibv_comp_channel *channel);
+ struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,
+ void *cq_context,
+ struct ibv_comp_channel *channel,
+ int comp_vector);
+ int (*destroy_cq)(struct ibv_cq *cq);
+ int (*get_cq_event)(struct ibv_comp_channel *channel,
+ struct ibv_cq **cq, void **cq_context);
+ void (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);
+ struct ibv_rwq_ind_table *(*create_rwq_ind_table)
+ (struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr);
+ int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);
+ struct ibv_wq *(*create_wq)(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr);
+ int (*destroy_wq)(struct ibv_wq *wq);
+ int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
+ struct ibv_flow *(*create_flow)(struct ibv_qp *qp,
+ struct ibv_flow_attr *flow);
+ int (*destroy_flow)(struct ibv_flow *flow_id);
+ struct ibv_qp *(*create_qp)(struct ibv_pd *pd,
+ struct ibv_qp_init_attr *qp_init_attr);
+ struct ibv_qp *(*create_qp_ex)
+ (struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex);
+ int (*destroy_qp)(struct ibv_qp *qp);
+ int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask);
+ struct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr,
+ size_t length, int access);
+ int (*dereg_mr)(struct ibv_mr *mr);
+ struct ibv_counter_set *(*create_counter_set)
+ (struct ibv_context *context,
+ struct ibv_counter_set_init_attr *init_attr);
+ int (*destroy_counter_set)(struct ibv_counter_set *cs);
+ int (*describe_counter_set)
+ (struct ibv_context *context,
+ uint16_t counter_set_id,
+ struct ibv_counter_set_description *cs_desc);
+ int (*query_counter_set)(struct ibv_query_counter_set_attr *query_attr,
+ struct ibv_counter_set_data *cs_data);
+ void (*ack_async_event)(struct ibv_async_event *event);
+ int (*get_async_event)(struct ibv_context *context,
+ struct ibv_async_event *event);
+ const char *(*port_state_str)(enum ibv_port_state port_state);
+ struct ibv_cq *(*cq_ex_to_cq)(struct ibv_cq_ex *cq);
+ struct ibv_cq_ex *(*dv_create_cq)
+ (struct ibv_context *context,
+ struct ibv_cq_init_attr_ex *cq_attr,
+ struct mlx5dv_cq_init_attr *mlx5_cq_attr);
+ struct ibv_wq *(*dv_create_wq)
+ (struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_attr,
+ struct mlx5dv_wq_init_attr *mlx5_wq_attr);
+ int (*dv_query_device)(struct ibv_context *ctx_in,
+ struct mlx5dv_context *attrs_out);
+ int (*dv_set_context_attr)(struct ibv_context *ibv_ctx,
+ enum mlx5dv_set_ctx_attr_type type,
+ void *attr);
+ int (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type);
+ struct ibv_qp *(*dv_create_qp)
+ (struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex,
+ struct mlx5dv_qp_init_attr *dv_qp_init_attr);
+};
+
+const struct mlx5_glue *mlx5_glue;
+
+#endif /* MLX5_GLUE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_mac.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mac.c
new file mode 100644
index 00000000..12ee37f5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mac.c
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#include <stddef.h>
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <netinet/in.h>
+#include <sys/ioctl.h>
+#include <arpa/inet.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_common.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_defs.h"
+
+/**
+ * Get MAC address by querying netdevice.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[out] mac
+ * MAC address output buffer.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])
+{
+ struct ifreq request;
+ int ret;
+
+ ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request, 0);
+ if (ret)
+ return ret;
+ memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+ return 0;
+}
+
+/**
+ * Remove a MAC address from the internal array.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param index
+ * MAC address index.
+ */
+static void
+mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct priv *priv = dev->data->dev_private;
+ const int vf = priv->config.vf;
+
+ assert(index < MLX5_MAX_MAC_ADDRESSES);
+ if (is_zero_ether_addr(&dev->data->mac_addrs[index]))
+ return;
+ if (vf)
+ mlx5_nl_mac_addr_remove(dev, &dev->data->mac_addrs[index],
+ index);
+ memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr));
+}
+
+/**
+ * Adds a MAC address to the internal array.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_internal_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index)
+{
+ struct priv *priv = dev->data->dev_private;
+ const int vf = priv->config.vf;
+ unsigned int i;
+
+ assert(index < MLX5_MAX_MAC_ADDRESSES);
+ if (is_zero_ether_addr(mac)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ /* First, make sure this address isn't already configured. */
+ for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) {
+ /* Skip this index, it's going to be reconfigured. */
+ if (i == index)
+ continue;
+ if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac)))
+ continue;
+ /* Address already configured elsewhere, return with error. */
+ rte_errno = EADDRINUSE;
+ return -rte_errno;
+ }
+ if (vf) {
+ int ret = mlx5_nl_mac_addr_add(dev, mac, index);
+
+ if (ret)
+ return ret;
+ }
+ dev->data->mac_addrs[index] = *mac;
+ return 0;
+}
+
+/**
+ * DPDK callback to remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param index
+ * MAC address index.
+ */
+void
+mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ int ret;
+
+ if (index >= MLX5_MAX_UC_MAC_ADDRESSES)
+ return;
+ mlx5_internal_mac_addr_remove(dev, index);
+ if (!dev->data->promiscuous) {
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot restart traffic: %s",
+ dev->data->port_id, strerror(rte_errno));
+ }
+}
+
+/**
+ * DPDK callback to add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ * @param vmdq
+ * VMDq pool index to associate address with (ignored).
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index, uint32_t vmdq __rte_unused)
+{
+ int ret;
+
+ if (index >= MLX5_MAX_UC_MAC_ADDRESSES) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ ret = mlx5_internal_mac_addr_add(dev, mac, index);
+ if (ret < 0)
+ return ret;
+ if (!dev->data->promiscuous)
+ return mlx5_traffic_restart(dev);
+ return 0;
+}
+
+/**
+ * DPDK callback to set primary MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ DRV_LOG(DEBUG, "port %u setting primary MAC address",
+ dev->data->port_id);
+ return mlx5_mac_addr_add(dev, mac_addr, 0, 0);
+}
+
+/**
+ * DPDK callback to set multicast addresses list.
+ *
+ * @see rte_eth_dev_set_mc_addr_list()
+ */
+int
+mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set, uint32_t nb_mc_addr)
+{
+ uint32_t i;
+ int ret;
+
+ if (nb_mc_addr >= MLX5_MAX_MC_MAC_ADDRESSES) {
+ rte_errno = ENOSPC;
+ return -rte_errno;
+ }
+ for (i = MLX5_MAX_UC_MAC_ADDRESSES; i != MLX5_MAX_MAC_ADDRESSES; ++i)
+ mlx5_internal_mac_addr_remove(dev, i);
+ i = MLX5_MAX_UC_MAC_ADDRESSES;
+ while (nb_mc_addr--) {
+ ret = mlx5_internal_mac_addr_add(dev, mc_addr_set++, i++);
+ if (ret)
+ return ret;
+ }
+ if (!dev->data->promiscuous)
+ return mlx5_traffic_restart(dev);
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.c
new file mode 100644
index 00000000..1d1bcb5f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.c
@@ -0,0 +1,1186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox Technologies, Ltd
+ */
+
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_rwlock.h>
+
+#include "mlx5.h"
+#include "mlx5_mr.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_glue.h"
+
+struct mr_find_contig_memsegs_data {
+ uintptr_t addr;
+ uintptr_t start;
+ uintptr_t end;
+ const struct rte_memseg_list *msl;
+};
+
+struct mr_update_mp_data {
+ struct rte_eth_dev *dev;
+ struct mlx5_mr_ctrl *mr_ctrl;
+ int ret;
+};
+
+/**
+ * Expand B-tree table to a given size. Can't be called with holding
+ * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc().
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries for expansion.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_btree_expand(struct mlx5_mr_btree *bt, int n)
+{
+ void *mem;
+ int ret = 0;
+
+ if (n <= bt->size)
+ return ret;
+ /*
+ * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
+ * used inside if there's no room to expand. Because this is a quite
+ * rare case and a part of very slow path, it is very acceptable.
+ * Initially cache_bh[] will be given practically enough space and once
+ * it is expanded, expansion wouldn't be needed again ever.
+ */
+ mem = rte_realloc(bt->table, n * sizeof(struct mlx5_mr_cache), 0);
+ if (mem == NULL) {
+ /* Not an error, B-tree search will be skipped. */
+ DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
+ (void *)bt);
+ ret = -1;
+ } else {
+ DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
+ bt->table = mem;
+ bt->size = n;
+ }
+ return ret;
+}
+
+/**
+ * Look up LKey from given B-tree lookup table, store the last index and return
+ * searched LKey.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param[out] idx
+ * Pointer to index. Even on search failure, returns index where it stops
+ * searching so that index can be used when inserting a new entry.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
+{
+ struct mlx5_mr_cache *lkp_tbl;
+ uint16_t n;
+ uint16_t base = 0;
+
+ assert(bt != NULL);
+ lkp_tbl = *bt->table;
+ n = bt->len;
+ /* First entry must be NULL for comparison. */
+ assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
+ lkp_tbl[0].lkey == UINT32_MAX));
+ /* Binary search. */
+ do {
+ register uint16_t delta = n >> 1;
+
+ if (addr < lkp_tbl[base + delta].start) {
+ n = delta;
+ } else {
+ base += delta;
+ n -= delta;
+ }
+ } while (n > 1);
+ assert(addr >= lkp_tbl[base].start);
+ *idx = base;
+ if (addr < lkp_tbl[base].end)
+ return lkp_tbl[base].lkey;
+ /* Not found. */
+ return UINT32_MAX;
+}
+
+/**
+ * Insert an entry to B-tree lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param entry
+ * Pointer to new entry to insert.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_btree_insert(struct mlx5_mr_btree *bt, struct mlx5_mr_cache *entry)
+{
+ struct mlx5_mr_cache *lkp_tbl;
+ uint16_t idx = 0;
+ size_t shift;
+
+ assert(bt != NULL);
+ assert(bt->len <= bt->size);
+ assert(bt->len > 0);
+ lkp_tbl = *bt->table;
+ /* Find out the slot for insertion. */
+ if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
+ DRV_LOG(DEBUG,
+ "abort insertion to B-tree(%p): already exist at"
+ " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ /* Already exist, return. */
+ return 0;
+ }
+ /* If table is full, return error. */
+ if (unlikely(bt->len == bt->size)) {
+ bt->overflow = 1;
+ return -1;
+ }
+ /* Insert entry. */
+ ++idx;
+ shift = (bt->len - idx) * sizeof(struct mlx5_mr_cache);
+ if (shift)
+ memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
+ lkp_tbl[idx] = *entry;
+ bt->len++;
+ DRV_LOG(DEBUG,
+ "inserted B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ return 0;
+}
+
+/**
+ * Initialize B-tree and allocate memory for lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries to allocate.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
+{
+ if (bt == NULL) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ assert(!bt->table && !bt->size);
+ memset(bt, 0, sizeof(*bt));
+ bt->table = rte_calloc_socket("B-tree table",
+ n, sizeof(struct mlx5_mr_cache),
+ 0, socket);
+ if (bt->table == NULL) {
+ rte_errno = ENOMEM;
+ DEBUG("failed to allocate memory for btree cache on socket %d",
+ socket);
+ return -rte_errno;
+ }
+ bt->size = n;
+ /* First entry must be NULL for binary search. */
+ (*bt->table)[bt->len++] = (struct mlx5_mr_cache) {
+ .lkey = UINT32_MAX,
+ };
+ DEBUG("initialized B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
+ return 0;
+}
+
+/**
+ * Free B-tree resources.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
+{
+ if (bt == NULL)
+ return;
+ DEBUG("freeing B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
+ rte_free(bt->table);
+ memset(bt, 0, sizeof(*bt));
+}
+
+/**
+ * Dump all the entries in a B-tree
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
+{
+#ifndef NDEBUG
+ int idx;
+ struct mlx5_mr_cache *lkp_tbl;
+
+ if (bt == NULL)
+ return;
+ lkp_tbl = *bt->table;
+ for (idx = 0; idx < bt->len; ++idx) {
+ struct mlx5_mr_cache *entry = &lkp_tbl[idx];
+
+ DEBUG("B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ }
+#endif
+}
+
+/**
+ * Find virtually contiguous memory chunk in a given MR.
+ *
+ * @param dev
+ * Pointer to MR structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If not found, this will not be
+ * updated.
+ * @param start_idx
+ * Start index of the memseg bitmap.
+ *
+ * @return
+ * Next index to go on lookup.
+ */
+static int
+mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
+ int base_idx)
+{
+ uintptr_t start = 0;
+ uintptr_t end = 0;
+ uint32_t idx = 0;
+
+ for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
+ if (rte_bitmap_get(mr->ms_bmp, idx)) {
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
+
+ msl = mr->msl;
+ ms = rte_fbarray_get(&msl->memseg_arr,
+ mr->ms_base_idx + idx);
+ assert(msl->page_sz == ms->hugepage_sz);
+ if (!start)
+ start = ms->addr_64;
+ end = ms->addr_64 + ms->hugepage_sz;
+ } else if (start) {
+ /* Passed the end of a fragment. */
+ break;
+ }
+ }
+ if (start) {
+ /* Found one chunk. */
+ entry->start = start;
+ entry->end = end;
+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
+ }
+ return idx;
+}
+
+/**
+ * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
+ * Then, this entry will have to be searched by mr_lookup_dev_list() in
+ * mlx5_mr_create() on miss.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr
+ * Pointer to MR to insert.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int n;
+
+ DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
+ dev->data->port_id, (void *)mr);
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx5_mr_cache entry = { 0, };
+
+ /* Find a contiguous chunk and advance the index. */
+ n = mr_find_next_chunk(mr, &entry, n);
+ if (!entry.end)
+ break;
+ if (mr_btree_insert(&priv->mr.cache, &entry) < 0) {
+ /*
+ * Overflowed, but the global table cannot be expanded
+ * because of deadlock.
+ */
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Look up address in the original global MR list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Found MR on match, NULL otherwise.
+ */
+static struct mlx5_mr *
+mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+ uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr;
+
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
+ unsigned int n;
+
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx5_mr_cache ret = { 0, };
+
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (addr >= ret.start && addr < ret.end) {
+ /* Found. */
+ *entry = ret;
+ return mr;
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Look up address on device.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
+ */
+static uint32_t
+mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+ uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint16_t idx;
+ uint32_t lkey = UINT32_MAX;
+ struct mlx5_mr *mr;
+
+ /*
+ * If the global cache has overflowed since it failed to expand the
+ * B-tree table, it can't have all the existing MRs. Then, the address
+ * has to be searched by traversing the original MR list instead, which
+ * is very slow path. Otherwise, the global cache is all inclusive.
+ */
+ if (!unlikely(priv->mr.cache.overflow)) {
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX)
+ *entry = (*priv->mr.cache.table)[idx];
+ } else {
+ /* Falling back to the slowest path. */
+ mr = mr_lookup_dev_list(dev, entry, addr);
+ if (mr != NULL)
+ lkey = entry->lkey;
+ }
+ assert(lkey == UINT32_MAX || (addr >= entry->start &&
+ addr < entry->end));
+ return lkey;
+}
+
+/**
+ * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
+ * can raise memory free event and the callback function will spin on the lock.
+ *
+ * @param mr
+ * Pointer to MR to free.
+ */
+static void
+mr_free(struct mlx5_mr *mr)
+{
+ if (mr == NULL)
+ return;
+ DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
+ if (mr->ibv_mr != NULL)
+ claim_zero(mlx5_glue->dereg_mr(mr->ibv_mr));
+ if (mr->ms_bmp != NULL)
+ rte_bitmap_free(mr->ms_bmp);
+ rte_free(mr);
+}
+
+/**
+ * Releass resources of detached MR having no online entry.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mlx5_mr_garbage_collect(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr_next;
+ struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
+
+ /* Must be called from the primary process. */
+ assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ /*
+ * MR can't be freed with holding the lock because rte_free() could call
+ * memory free callback function. This will be a deadlock situation.
+ */
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Detach the whole free list and release it after unlocking. */
+ free_list = priv->mr.mr_free_list;
+ LIST_INIT(&priv->mr.mr_free_list);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Release resources. */
+ mr_next = LIST_FIRST(&free_list);
+ while (mr_next != NULL) {
+ struct mlx5_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ mr_free(mr);
+ }
+}
+
+/* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
+static int
+mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
+ const struct rte_memseg *ms, size_t len, void *arg)
+{
+ struct mr_find_contig_memsegs_data *data = arg;
+
+ if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
+ return 0;
+ /* Found, save it and stop walking. */
+ data->start = ms->addr_64;
+ data->end = ms->addr_64 + len;
+ data->msl = msl;
+ return 1;
+}
+
+/**
+ * Create a new global Memroy Region (MR) for a missing virtual address.
+ * Register entire virtually contiguous memory chunk around the address.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this will not be updated.
+ * @param addr
+ * Target virtual address to register.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
+ */
+static uint32_t
+mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+ uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
+ struct mlx5_mr *mr = NULL;
+ size_t len;
+ uint32_t ms_n;
+ uint32_t bmp_size;
+ void *bmp_mem;
+ int ms_idx_shift = -1;
+ unsigned int n;
+ struct mr_find_contig_memsegs_data data = {
+ .addr = addr,
+ };
+ struct mr_find_contig_memsegs_data data_re;
+
+ DRV_LOG(DEBUG, "port %u creating a MR using address (%p)",
+ dev->data->port_id, (void *)addr);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DRV_LOG(WARNING,
+ "port %u using address (%p) of unregistered mempool"
+ " in secondary process, please create mempool"
+ " before rte_eth_dev_start()",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EPERM;
+ goto err_nolock;
+ }
+ /*
+ * Release detached MRs if any. This can't be called with holding either
+ * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have
+ * been detached by the memory free event but it couldn't be released
+ * inside the callback due to deadlock. As a result, releasing resources
+ * is quite opportunistic.
+ */
+ mlx5_mr_garbage_collect(dev);
+ /*
+ * Find out a contiguous virtual address chunk in use, to which the
+ * given address belongs, in order to register maximum range. In the
+ * best case where mempools are not dynamically recreated and
+ * '--socket-mem' is speicified as an EAL option, it is very likely to
+ * have only one MR(LKey) per a socket and per a hugepage-size even
+ * though the system memory is highly fragmented.
+ */
+ if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
+ DRV_LOG(WARNING,
+ "port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENXIO;
+ goto err_nolock;
+ }
+alloc_resources:
+ /* Addresses must be page-aligned. */
+ assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
+ assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
+ msl = data.msl;
+ ms = rte_mem_virt2memseg((void *)data.start, msl);
+ len = data.end - data.start;
+ assert(msl->page_sz == ms->hugepage_sz);
+ /* Number of memsegs in the range. */
+ ms_n = len / msl->page_sz;
+ DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " page_sz=0x%" PRIx64 ", ms_n=%u",
+ dev->data->port_id, (void *)addr,
+ data.start, data.end, msl->page_sz, ms_n);
+ /* Size of memory for bitmap. */
+ bmp_size = rte_bitmap_get_memory_footprint(ms_n);
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE) +
+ bmp_size,
+ RTE_CACHE_LINE_SIZE, msl->socket_id);
+ if (mr == NULL) {
+ DEBUG("port %u unable to allocate memory for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENOMEM;
+ goto err_nolock;
+ }
+ mr->msl = msl;
+ /*
+ * Save the index of the first memseg and initialize memseg bitmap. To
+ * see if a memseg of ms_idx in the memseg-list is still valid, check:
+ * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
+ */
+ mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
+ mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
+ if (mr->ms_bmp == NULL) {
+ DEBUG("port %u unable to initialize bitamp for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EINVAL;
+ goto err_nolock;
+ }
+ /*
+ * Should recheck whether the extended contiguous chunk is still valid.
+ * Because memory_hotplug_lock can't be held if there's any memory
+ * related calls in a critical path, resource allocation above can't be
+ * locked. If the memory has been changed at this point, try again with
+ * just single page. If not, go on with the big chunk atomically from
+ * here.
+ */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ data_re = data;
+ if (len > msl->page_sz &&
+ !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
+ DEBUG("port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENXIO;
+ goto err_memlock;
+ }
+ if (data.start != data_re.start || data.end != data_re.end) {
+ /*
+ * The extended contiguous chunk has been changed. Try again
+ * with single memseg instead.
+ */
+ data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
+ data.end = data.start + msl->page_sz;
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ mr_free(mr);
+ goto alloc_resources;
+ }
+ assert(data.msl == data_re.msl);
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /*
+ * Check the address is really missing. If other thread already created
+ * one or it is not found due to overflow, abort and return.
+ */
+ if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
+ /*
+ * Insert to the global cache table. It may fail due to
+ * low-on-memory. Then, this entry will have to be searched
+ * here again.
+ */
+ mr_btree_insert(&priv->mr.cache, entry);
+ DEBUG("port %u found MR for %p on final lookup, abort",
+ dev->data->port_id, (void *)addr);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ /*
+ * Must be unlocked before calling rte_free() because
+ * mlx5_mr_mem_event_free_cb() can be called inside.
+ */
+ mr_free(mr);
+ return entry->lkey;
+ }
+ /*
+ * Trim start and end addresses for verbs MR. Set bits for registering
+ * memsegs but exclude already registered ones. Bitmap can be
+ * fragmented.
+ */
+ for (n = 0; n < ms_n; ++n) {
+ uintptr_t start;
+ struct mlx5_mr_cache ret = { 0, };
+
+ start = data_re.start + n * msl->page_sz;
+ /* Exclude memsegs already registered by other MRs. */
+ if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
+ /*
+ * Start from the first unregistered memseg in the
+ * extended range.
+ */
+ if (ms_idx_shift == -1) {
+ mr->ms_base_idx += n;
+ data.start = start;
+ ms_idx_shift = n;
+ }
+ data.end = start + msl->page_sz;
+ rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
+ ++mr->ms_n;
+ }
+ }
+ len = data.end - data.start;
+ mr->ms_bmp_n = len / msl->page_sz;
+ assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
+ /*
+ * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
+ * called with holding the memory lock because it doesn't use
+ * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
+ * through mlx5_alloc_verbs_buf().
+ */
+ mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (mr->ibv_mr == NULL) {
+ DEBUG("port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EINVAL;
+ goto err_mrlock;
+ }
+ assert((uintptr_t)mr->ibv_mr->addr == data.start);
+ assert(mr->ibv_mr->length == len);
+ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+ DEBUG("port %u MR CREATED (%p) for %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ /* Insert to the global cache table. */
+ mr_insert_dev_cache(dev, mr);
+ /* Fill in output data. */
+ mr_lookup_dev(dev, entry, addr);
+ /* Lookup can't fail. */
+ assert(entry->lkey != UINT32_MAX);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ return entry->lkey;
+err_mrlock:
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+err_memlock:
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+err_nolock:
+ /*
+ * In case of error, as this can be called in a datapath, a warning
+ * message per an error is preferable instead. Must be unlocked before
+ * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
+ * inside.
+ */
+ mr_free(mr);
+ return UINT32_MAX;
+}
+
+/**
+ * Rebuild the global B-tree cache of device from the original MR list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mr_rebuild_dev_cache(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr;
+
+ DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id);
+ /* Flush cache to rebuild. */
+ priv->mr.cache.len = 1;
+ priv->mr.cache.overflow = 0;
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr)
+ if (mr_insert_dev_cache(dev, mr) < 0)
+ return;
+}
+
+/**
+ * Callback for memory free event. Iterate freed memsegs and check whether it
+ * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
+ * result, the MR would be fragmented. If it becomes empty, the MR will be freed
+ * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
+ * secondary process, the garbage collector will be called in primary process
+ * as the secondary process can't call mlx5_mr_create().
+ *
+ * The global cache must be rebuilt if there's any change and this event has to
+ * be propagated to dataplane threads to flush the local caches.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param addr
+ * Address of freed memory.
+ * @param len
+ * Size of freed memory.
+ */
+static void
+mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_memseg_list *msl;
+ struct mlx5_mr *mr;
+ int ms_n;
+ int i;
+ int rebuild = 0;
+
+ DEBUG("port %u free callback: addr=%p, len=%zu",
+ dev->data->port_id, addr, len);
+ msl = rte_mem_virt2memseg_list(addr);
+ /* addr and len must be page-aligned. */
+ assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+ assert(len == RTE_ALIGN(len, msl->page_sz));
+ ms_n = len / msl->page_sz;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Clear bits of freed memsegs from MR. */
+ for (i = 0; i < ms_n; ++i) {
+ const struct rte_memseg *ms;
+ struct mlx5_mr_cache entry;
+ uintptr_t start;
+ int ms_idx;
+ uint32_t pos;
+
+ /* Find MR having this memseg. */
+ start = (uintptr_t)addr + i * msl->page_sz;
+ mr = mr_lookup_dev_list(dev, &entry, start);
+ if (mr == NULL)
+ continue;
+ ms = rte_mem_virt2memseg((void *)start, msl);
+ assert(ms != NULL);
+ assert(msl->page_sz == ms->hugepage_sz);
+ ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ pos = ms_idx - mr->ms_base_idx;
+ assert(rte_bitmap_get(mr->ms_bmp, pos));
+ assert(pos < mr->ms_bmp_n);
+ DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
+ dev->data->port_id, (void *)mr, pos, (void *)start);
+ rte_bitmap_clear(mr->ms_bmp, pos);
+ if (--mr->ms_n == 0) {
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
+ DEBUG("port %u remove MR(%p) from list",
+ dev->data->port_id, (void *)mr);
+ }
+ /*
+ * MR is fragmented or will be freed. the global cache must be
+ * rebuilt.
+ */
+ rebuild = 1;
+ }
+ if (rebuild) {
+ mr_rebuild_dev_cache(dev);
+ /*
+ * Flush local caches by propagating invalidation across cores.
+ * rte_smp_wmb() is enough to synchronize this event. If one of
+ * freed memsegs is seen by other core, that means the memseg
+ * has been allocated by allocator, which will come after this
+ * free call. Therefore, this store instruction (incrementing
+ * generation below) will be guaranteed to be seen by other core
+ * before the core sees the newly allocated memory.
+ */
+ ++priv->mr.dev_gen;
+ DEBUG("broadcasting local cache flush, gen=%d",
+ priv->mr.dev_gen);
+ rte_smp_wmb();
+ }
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+}
+
+/**
+ * Callback for memory event. This can be called from both primary and secondary
+ * process.
+ *
+ * @param event_type
+ * Memory event type.
+ * @param addr
+ * Address of memory.
+ * @param len
+ * Size of memory.
+ */
+void
+mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg __rte_unused)
+{
+ struct priv *priv;
+ struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
+
+ switch (event_type) {
+ case RTE_MEM_EVENT_FREE:
+ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+ /* Iterate all the existing mlx5 devices. */
+ LIST_FOREACH(priv, dev_list, mem_event_cb)
+ mlx5_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
+ rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ break;
+ case RTE_MEM_EVENT_ALLOC:
+ default:
+ break;
+ }
+}
+
+/**
+ * Look up address in the global MR cache table. If not found, create a new MR.
+ * Insert the found/created entry to local bottom-half cache table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this is not written.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx5_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct mlx5_mr_cache *entry, uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
+ uint16_t idx;
+ uint32_t lkey;
+
+ /* If local cache table is full, try to double it. */
+ if (unlikely(bt->len == bt->size))
+ mr_btree_expand(bt, bt->size << 1);
+ /* Look up in the global cache. */
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX) {
+ /* Found. */
+ *entry = (*priv->mr.cache.table)[idx];
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /*
+ * Update local cache. Even if it fails, return the found entry
+ * to update top-half cache. Next time, this entry will be found
+ * in the global cache.
+ */
+ mr_btree_insert(bt, entry);
+ return lkey;
+ }
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /* First time to see the address? Create a new MR. */
+ lkey = mlx5_mr_create(dev, entry, addr);
+ /*
+ * Update the local cache if successfully created a new global MR. Even
+ * if failed to create one, there's no action to take in this datapath
+ * code. As returning LKey is invalid, this will eventually make HW
+ * fail.
+ */
+ if (lkey != UINT32_MAX)
+ mr_btree_insert(bt, entry);
+ return lkey;
+}
+
+/**
+ * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if
+ * misses, search in the global MR cache table and update the new entry to
+ * per-queue local caches.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx5_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ uintptr_t addr)
+{
+ uint32_t lkey;
+ uint16_t bh_idx = 0;
+ /* Victim in top-half cache to replace with new entry. */
+ struct mlx5_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
+
+ /* Binary-search MR translation table. */
+ lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
+ /* Update top-half cache. */
+ if (likely(lkey != UINT32_MAX)) {
+ *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
+ } else {
+ /*
+ * If missed in local lookup table, search in the global cache
+ * and local cache_bh[] will be updated inside if possible.
+ * Top-half cache entry will also be updated.
+ */
+ lkey = mlx5_mr_lookup_dev(dev, mr_ctrl, repl, addr);
+ if (unlikely(lkey == UINT32_MAX))
+ return UINT32_MAX;
+ }
+ /* Update the most recently used entry. */
+ mr_ctrl->mru = mr_ctrl->head;
+ /* Point to the next victim, the oldest. */
+ mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
+ return lkey;
+}
+
+/**
+ * Bottom-half of LKey search on Rx.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
+{
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ struct priv *priv = rxq_ctrl->priv;
+
+ DRV_LOG(DEBUG,
+ "Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ rxq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
+}
+
+/**
+ * Bottom-half of LKey search on Tx.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
+{
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct priv *priv = txq_ctrl->priv;
+
+ DRV_LOG(DEBUG,
+ "Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ txq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
+}
+
+/**
+ * Flush all of the local cache entries.
+ *
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ */
+void
+mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
+{
+ /* Reset the most-recently-used index. */
+ mr_ctrl->mru = 0;
+ /* Reset the linear search array. */
+ mr_ctrl->head = 0;
+ memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
+ /* Reset the B-tree table. */
+ mr_ctrl->cache_bh.len = 1;
+ mr_ctrl->cache_bh.overflow = 0;
+ /* Update the generation number. */
+ mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
+ DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
+ (void *)mr_ctrl, mr_ctrl->cur_gen);
+}
+
+/* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
+static void
+mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
+{
+ struct mr_update_mp_data *data = opaque;
+ uint32_t lkey;
+
+ /* Stop iteration if failed in the previous walk. */
+ if (data->ret < 0)
+ return;
+ /* Register address of the chunk and update local caches. */
+ lkey = mlx5_mr_addr2mr_bh(data->dev, data->mr_ctrl,
+ (uintptr_t)memhdr->addr);
+ if (lkey == UINT32_MAX)
+ data->ret = -1;
+}
+
+/**
+ * Register entire memory chunks in a Mempool.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mp
+ * Pointer to registering Mempool.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+int
+mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
+{
+ struct mr_update_mp_data data = {
+ .dev = dev,
+ .mr_ctrl = mr_ctrl,
+ .ret = 0,
+ };
+
+ rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
+ return data.ret;
+}
+
+/**
+ * Dump all the created MRs and the global cache entries.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused)
+{
+#ifndef NDEBUG
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr;
+ int mr_n = 0;
+ int chunk_n = 0;
+
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
+ unsigned int n;
+
+ DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
+ dev->data->port_id, mr_n++,
+ rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_n, mr->ms_bmp_n);
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx5_mr_cache ret = { 0, };
+
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (!ret.end)
+ break;
+ DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
+ chunk_n++, ret.start, ret.end);
+ }
+ }
+ DEBUG("port %u dumping global cache", dev->data->port_id);
+ mlx5_mr_btree_dump(&priv->mr.cache);
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+#endif
+}
+
+/**
+ * Release all the created MRs and resources. Remove device from memory callback
+ * list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_mr_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
+
+ /* Remove from memory callback device list. */
+ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+ LIST_REMOVE(priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
+ mlx5_mr_dump_dev(dev);
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Detach from MR list and move to free list. */
+ while (mr_next != NULL) {
+ struct mlx5_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
+ }
+ LIST_INIT(&priv->mr.mr_list);
+ /* Free global cache. */
+ mlx5_mr_btree_free(&priv->mr.cache);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Free all remaining MRs. */
+ mlx5_mr_garbage_collect(dev);
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.h
new file mode 100644
index 00000000..a57003fe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_MR_H_
+#define RTE_PMD_MLX5_MR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#include <infiniband/mlx5dv.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_eal_memconfig.h>
+#include <rte_ethdev.h>
+#include <rte_rwlock.h>
+#include <rte_bitmap.h>
+
+/* Memory Region object. */
+struct mlx5_mr {
+ LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */
+ struct ibv_mr *ibv_mr; /* Verbs Memory Region. */
+ const struct rte_memseg_list *msl;
+ int ms_base_idx; /* Start index of msl->memseg_arr[]. */
+ int ms_n; /* Number of memsegs in use. */
+ uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
+ struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
+};
+
+/* Cache entry for Memory Region. */
+struct mlx5_mr_cache {
+ uintptr_t start; /* Start address of MR. */
+ uintptr_t end; /* End address of MR. */
+ uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */
+} __rte_packed;
+
+/* MR Cache table for Binary search. */
+struct mlx5_mr_btree {
+ uint16_t len; /* Number of entries. */
+ uint16_t size; /* Total number of entries. */
+ int overflow; /* Mark failure of table expansion. */
+ struct mlx5_mr_cache (*table)[];
+} __rte_packed;
+
+/* Per-queue MR control descriptor. */
+struct mlx5_mr_ctrl {
+ uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
+ uint32_t cur_gen; /* Generation number saved to flush caches. */
+ uint16_t mru; /* Index of last hit entry in top-half cache. */
+ uint16_t head; /* Index of the oldest entry in top-half cache. */
+ struct mlx5_mr_cache cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */
+ struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */
+} __rte_packed;
+
+extern struct mlx5_dev_list mlx5_mem_event_cb_list;
+extern rte_rwlock_t mlx5_mem_event_rwlock;
+
+/* First entry must be NULL for comparison. */
+#define mlx5_mr_btree_len(bt) ((bt)->len - 1)
+
+int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket);
+void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
+void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg);
+int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp);
+void mlx5_mr_release(struct rte_eth_dev *dev);
+
+/* Debug purpose functions. */
+void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt);
+void mlx5_mr_dump_dev(struct rte_eth_dev *dev);
+
+/**
+ * Look up LKey from given lookup table by linear search. Firstly look up the
+ * last-hit entry. If miss, the entire array is searched. If found, update the
+ * last-hit index and return LKey.
+ *
+ * @param lkp_tbl
+ * Pointer to lookup table.
+ * @param[in,out] cached_idx
+ * Pointer to last-hit index.
+ * @param n
+ * Size of lookup table.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx5_mr_lookup_cache(struct mlx5_mr_cache *lkp_tbl, uint16_t *cached_idx,
+ uint16_t n, uintptr_t addr)
+{
+ uint16_t idx;
+
+ if (likely(addr >= lkp_tbl[*cached_idx].start &&
+ addr < lkp_tbl[*cached_idx].end))
+ return lkp_tbl[*cached_idx].lkey;
+ for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
+ if (addr >= lkp_tbl[idx].start &&
+ addr < lkp_tbl[idx].end) {
+ /* Found. */
+ *cached_idx = idx;
+ return lkp_tbl[idx].lkey;
+ }
+ }
+ return UINT32_MAX;
+}
+
+#endif /* RTE_PMD_MLX5_MR_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_nl.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_nl.c
new file mode 100644
index 00000000..d61826ae
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_nl.c
@@ -0,0 +1,916 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <errno.h>
+#include <linux/if_link.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <net/if.h>
+#include <rdma/rdma_netlink.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <rte_errno.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+
+/* Size of the buffer to receive kernel messages */
+#define MLX5_NL_BUF_SIZE (32 * 1024)
+/* Send buffer size for the Netlink socket */
+#define MLX5_SEND_BUF_SIZE 32768
+/* Receive buffer size for the Netlink socket */
+#define MLX5_RECV_BUF_SIZE 32768
+
+/*
+ * Define NDA_RTA as defined in iproute2 sources.
+ *
+ * see in iproute2 sources file include/libnetlink.h
+ */
+#ifndef MLX5_NDA_RTA
+#define MLX5_NDA_RTA(r) \
+ ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg))))
+#endif
+
+/*
+ * The following definitions are normally found in rdma/rdma_netlink.h,
+ * however they are so recent that most systems do not expose them yet.
+ */
+#ifndef HAVE_RDMA_NL_NLDEV
+#define RDMA_NL_NLDEV 5
+#endif
+#ifndef HAVE_RDMA_NLDEV_CMD_GET
+#define RDMA_NLDEV_CMD_GET 1
+#endif
+#ifndef HAVE_RDMA_NLDEV_CMD_PORT_GET
+#define RDMA_NLDEV_CMD_PORT_GET 5
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_INDEX
+#define RDMA_NLDEV_ATTR_DEV_INDEX 1
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_NAME
+#define RDMA_NLDEV_ATTR_DEV_NAME 2
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_PORT_INDEX
+#define RDMA_NLDEV_ATTR_PORT_INDEX 3
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX
+#define RDMA_NLDEV_ATTR_NDEV_INDEX 50
+#endif
+
+/* These are normally found in linux/if_link.h. */
+#ifndef HAVE_IFLA_PHYS_SWITCH_ID
+#define IFLA_PHYS_SWITCH_ID 36
+#endif
+#ifndef HAVE_IFLA_PHYS_PORT_NAME
+#define IFLA_PHYS_PORT_NAME 38
+#endif
+
+/* Add/remove MAC address through Netlink */
+struct mlx5_nl_mac_addr {
+ struct ether_addr (*mac)[];
+ /**< MAC address handled by the device. */
+ int mac_n; /**< Number of addresses in the array. */
+};
+
+/** Data structure used by mlx5_nl_ifindex_cb(). */
+struct mlx5_nl_ifindex_data {
+ const char *name; /**< IB device name (in). */
+ uint32_t ibindex; /**< IB device index (out). */
+ uint32_t ifindex; /**< Network interface index (out). */
+};
+
+/**
+ * Opens a Netlink socket.
+ *
+ * @param protocol
+ * Netlink protocol (e.g. NETLINK_ROUTE, NETLINK_RDMA).
+ *
+ * @return
+ * A file descriptor on success, a negative errno value otherwise and
+ * rte_errno is set.
+ */
+int
+mlx5_nl_init(int protocol)
+{
+ int fd;
+ int sndbuf_size = MLX5_SEND_BUF_SIZE;
+ int rcvbuf_size = MLX5_RECV_BUF_SIZE;
+ struct sockaddr_nl local = {
+ .nl_family = AF_NETLINK,
+ };
+ int ret;
+
+ fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, protocol);
+ if (fd == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int));
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int));
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ ret = bind(fd, (struct sockaddr *)&local, sizeof(local));
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ return fd;
+error:
+ close(fd);
+ return -rte_errno;
+}
+
+/**
+ * Send a request message to the kernel on the Netlink socket.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] nh
+ * The Netlink message send to the kernel.
+ * @param[in] ssn
+ * Sequence number.
+ * @param[in] req
+ * Pointer to the request structure.
+ * @param[in] len
+ * Length of the request in bytes.
+ *
+ * @return
+ * The number of sent bytes on success, a negative errno value otherwise and
+ * rte_errno is set.
+ */
+static int
+mlx5_nl_request(int nlsk_fd, struct nlmsghdr *nh, uint32_t sn, void *req,
+ int len)
+{
+ struct sockaddr_nl sa = {
+ .nl_family = AF_NETLINK,
+ };
+ struct iovec iov[2] = {
+ { .iov_base = nh, .iov_len = sizeof(*nh), },
+ { .iov_base = req, .iov_len = len, },
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = iov,
+ .msg_iovlen = 2,
+ };
+ int send_bytes;
+
+ nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */
+ nh->nlmsg_seq = sn;
+ send_bytes = sendmsg(nlsk_fd, &msg, 0);
+ if (send_bytes < 0) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ return send_bytes;
+}
+
+/**
+ * Send a message to the kernel on the Netlink socket.
+ *
+ * @param[in] nlsk_fd
+ * The Netlink socket file descriptor used for communication.
+ * @param[in] nh
+ * The Netlink message send to the kernel.
+ * @param[in] sn
+ * Sequence number.
+ *
+ * @return
+ * The number of sent bytes on success, a negative errno value otherwise and
+ * rte_errno is set.
+ */
+static int
+mlx5_nl_send(int nlsk_fd, struct nlmsghdr *nh, uint32_t sn)
+{
+ struct sockaddr_nl sa = {
+ .nl_family = AF_NETLINK,
+ };
+ struct iovec iov = {
+ .iov_base = nh,
+ .iov_len = nh->nlmsg_len,
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+ int send_bytes;
+
+ nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */
+ nh->nlmsg_seq = sn;
+ send_bytes = sendmsg(nlsk_fd, &msg, 0);
+ if (send_bytes < 0) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ return send_bytes;
+}
+
+/**
+ * Receive a message from the kernel on the Netlink socket, following
+ * mlx5_nl_send().
+ *
+ * @param[in] nlsk_fd
+ * The Netlink socket file descriptor used for communication.
+ * @param[in] sn
+ * Sequence number.
+ * @param[in] cb
+ * The callback function to call for each Netlink message received.
+ * @param[in, out] arg
+ * Custom arguments for the callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_recv(int nlsk_fd, uint32_t sn, int (*cb)(struct nlmsghdr *, void *arg),
+ void *arg)
+{
+ struct sockaddr_nl sa;
+ char buf[MLX5_RECV_BUF_SIZE];
+ struct iovec iov = {
+ .iov_base = buf,
+ .iov_len = sizeof(buf),
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ /* One message at a time */
+ .msg_iovlen = 1,
+ };
+ int multipart = 0;
+ int ret = 0;
+
+ do {
+ struct nlmsghdr *nh;
+ int recv_bytes = 0;
+
+ do {
+ recv_bytes = recvmsg(nlsk_fd, &msg, 0);
+ if (recv_bytes == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ nh = (struct nlmsghdr *)buf;
+ } while (nh->nlmsg_seq != sn);
+ for (;
+ NLMSG_OK(nh, (unsigned int)recv_bytes);
+ nh = NLMSG_NEXT(nh, recv_bytes)) {
+ if (nh->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *err_data = NLMSG_DATA(nh);
+
+ if (err_data->error < 0) {
+ rte_errno = -err_data->error;
+ return -rte_errno;
+ }
+ /* Ack message. */
+ return 0;
+ }
+ /* Multi-part msgs and their trailing DONE message. */
+ if (nh->nlmsg_flags & NLM_F_MULTI) {
+ if (nh->nlmsg_type == NLMSG_DONE)
+ return 0;
+ multipart = 1;
+ }
+ if (cb) {
+ ret = cb(nh, arg);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ } while (multipart);
+ return ret;
+}
+
+/**
+ * Parse Netlink message to retrieve the bridge MAC address.
+ *
+ * @param nh
+ * Pointer to Netlink Message Header.
+ * @param arg
+ * PMD data register with this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_mac_addr_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct mlx5_nl_mac_addr *data = arg;
+ struct ndmsg *r = NLMSG_DATA(nh);
+ struct rtattr *attribute;
+ int len;
+
+ len = nh->nlmsg_len - NLMSG_LENGTH(sizeof(*r));
+ for (attribute = MLX5_NDA_RTA(r);
+ RTA_OK(attribute, len);
+ attribute = RTA_NEXT(attribute, len)) {
+ if (attribute->rta_type == NDA_LLADDR) {
+ if (data->mac_n == MLX5_MAX_MAC_ADDRESSES) {
+ DRV_LOG(WARNING,
+ "not enough room to finalize the"
+ " request");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+#ifndef NDEBUG
+ char m[18];
+
+ ether_format_addr(m, 18, RTA_DATA(attribute));
+ DRV_LOG(DEBUG, "bridge MAC address %s", m);
+#endif
+ memcpy(&(*data->mac)[data->mac_n++],
+ RTA_DATA(attribute), ETHER_ADDR_LEN);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Get bridge MAC addresses.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mac[out]
+ * Pointer to the array table of MAC addresses to fill.
+ * Its size should be of MLX5_MAX_MAC_ADDRESSES.
+ * @param mac_n[out]
+ * Number of entries filled in MAC array.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_mac_addr_list(struct rte_eth_dev *dev, struct ether_addr (*mac)[],
+ int *mac_n)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int iface_idx = mlx5_ifindex(dev);
+ struct {
+ struct nlmsghdr hdr;
+ struct ifinfomsg ifm;
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlmsg_type = RTM_GETNEIGH,
+ .nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ },
+ .ifm = {
+ .ifi_family = PF_BRIDGE,
+ .ifi_index = iface_idx,
+ },
+ };
+ struct mlx5_nl_mac_addr data = {
+ .mac = mac,
+ .mac_n = 0,
+ };
+ int fd;
+ int ret;
+ uint32_t sn = priv->nl_sn++;
+
+ if (priv->nl_socket_route == -1)
+ return 0;
+ fd = priv->nl_socket_route;
+ ret = mlx5_nl_request(fd, &req.hdr, sn, &req.ifm,
+ sizeof(struct ifinfomsg));
+ if (ret < 0)
+ goto error;
+ ret = mlx5_nl_recv(fd, sn, mlx5_nl_mac_addr_cb, &data);
+ if (ret < 0)
+ goto error;
+ *mac_n = data.mac_n;
+ return 0;
+error:
+ DRV_LOG(DEBUG, "port %u cannot retrieve MAC address list %s",
+ dev->data->port_id, strerror(rte_errno));
+ return -rte_errno;
+}
+
+/**
+ * Modify the MAC address neighbour table with Netlink.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mac
+ * MAC address to consider.
+ * @param add
+ * 1 to add the MAC address, 0 to remove the MAC address.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct ether_addr *mac,
+ int add)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int iface_idx = mlx5_ifindex(dev);
+ struct {
+ struct nlmsghdr hdr;
+ struct ndmsg ndm;
+ struct rtattr rta;
+ uint8_t buffer[ETHER_ADDR_LEN];
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)),
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE |
+ NLM_F_EXCL | NLM_F_ACK,
+ .nlmsg_type = add ? RTM_NEWNEIGH : RTM_DELNEIGH,
+ },
+ .ndm = {
+ .ndm_family = PF_BRIDGE,
+ .ndm_state = NUD_NOARP | NUD_PERMANENT,
+ .ndm_ifindex = iface_idx,
+ .ndm_flags = NTF_SELF,
+ },
+ .rta = {
+ .rta_type = NDA_LLADDR,
+ .rta_len = RTA_LENGTH(ETHER_ADDR_LEN),
+ },
+ };
+ int fd;
+ int ret;
+ uint32_t sn = priv->nl_sn++;
+
+ if (priv->nl_socket_route == -1)
+ return 0;
+ fd = priv->nl_socket_route;
+ memcpy(RTA_DATA(&req.rta), mac, ETHER_ADDR_LEN);
+ req.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) +
+ RTA_ALIGN(req.rta.rta_len);
+ ret = mlx5_nl_send(fd, &req.hdr, sn);
+ if (ret < 0)
+ goto error;
+ ret = mlx5_nl_recv(fd, sn, NULL, NULL);
+ if (ret < 0)
+ goto error;
+ return 0;
+error:
+ DRV_LOG(DEBUG,
+ "port %u cannot %s MAC address %02X:%02X:%02X:%02X:%02X:%02X"
+ " %s",
+ dev->data->port_id,
+ add ? "add" : "remove",
+ mac->addr_bytes[0], mac->addr_bytes[1],
+ mac->addr_bytes[2], mac->addr_bytes[3],
+ mac->addr_bytes[4], mac->addr_bytes[5],
+ strerror(rte_errno));
+ return -rte_errno;
+}
+
+/**
+ * Add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mac
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ ret = mlx5_nl_mac_addr_modify(dev, mac, 1);
+ if (!ret)
+ BITFIELD_SET(priv->mac_own, index);
+ if (ret == -EEXIST)
+ return 0;
+ return ret;
+}
+
+/**
+ * Remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mac
+ * MAC address to remove.
+ * @param index
+ * MAC address index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ BITFIELD_RESET(priv->mac_own, index);
+ return mlx5_nl_mac_addr_modify(dev, mac, 0);
+}
+
+/**
+ * Synchronize Netlink bridge table to the internal table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev)
+{
+ struct ether_addr macs[MLX5_MAX_MAC_ADDRESSES];
+ int macs_n = 0;
+ int i;
+ int ret;
+
+ ret = mlx5_nl_mac_addr_list(dev, &macs, &macs_n);
+ if (ret)
+ return;
+ for (i = 0; i != macs_n; ++i) {
+ int j;
+
+ /* Verify the address is not in the array yet. */
+ for (j = 0; j != MLX5_MAX_MAC_ADDRESSES; ++j)
+ if (is_same_ether_addr(&macs[i],
+ &dev->data->mac_addrs[j]))
+ break;
+ if (j != MLX5_MAX_MAC_ADDRESSES)
+ continue;
+ /* Find the first entry available. */
+ for (j = 0; j != MLX5_MAX_MAC_ADDRESSES; ++j) {
+ if (is_zero_ether_addr(&dev->data->mac_addrs[j])) {
+ dev->data->mac_addrs[j] = macs[i];
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * Flush all added MAC addresses.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int i;
+
+ for (i = MLX5_MAX_MAC_ADDRESSES - 1; i >= 0; --i) {
+ struct ether_addr *m = &dev->data->mac_addrs[i];
+
+ if (BITFIELD_ISSET(priv->mac_own, i))
+ mlx5_nl_mac_addr_remove(dev, m, i);
+ }
+}
+
+/**
+ * Enable promiscuous / all multicast mode through Netlink.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param flags
+ * IFF_PROMISC for promiscuous, IFF_ALLMULTI for allmulti.
+ * @param enable
+ * Nonzero to enable, disable otherwise.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_device_flags(struct rte_eth_dev *dev, uint32_t flags, int enable)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int iface_idx = mlx5_ifindex(dev);
+ struct {
+ struct nlmsghdr hdr;
+ struct ifinfomsg ifi;
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlmsg_type = RTM_NEWLINK,
+ .nlmsg_flags = NLM_F_REQUEST,
+ },
+ .ifi = {
+ .ifi_flags = enable ? flags : 0,
+ .ifi_change = flags,
+ .ifi_index = iface_idx,
+ },
+ };
+ int fd;
+ int ret;
+
+ assert(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI)));
+ if (priv->nl_socket_route < 0)
+ return 0;
+ fd = priv->nl_socket_route;
+ ret = mlx5_nl_send(fd, &req.hdr, priv->nl_sn++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Enable promiscuous mode through Netlink.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param enable
+ * Nonzero to enable, disable otherwise.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_promisc(struct rte_eth_dev *dev, int enable)
+{
+ int ret = mlx5_nl_device_flags(dev, IFF_PROMISC, enable);
+
+ if (ret)
+ DRV_LOG(DEBUG,
+ "port %u cannot %s promisc mode: Netlink error %s",
+ dev->data->port_id, enable ? "enable" : "disable",
+ strerror(rte_errno));
+ return ret;
+}
+
+/**
+ * Enable all multicast mode through Netlink.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param enable
+ * Nonzero to enable, disable otherwise.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable)
+{
+ int ret = mlx5_nl_device_flags(dev, IFF_ALLMULTI, enable);
+
+ if (ret)
+ DRV_LOG(DEBUG,
+ "port %u cannot %s allmulti mode: Netlink error %s",
+ dev->data->port_id, enable ? "enable" : "disable",
+ strerror(rte_errno));
+ return ret;
+}
+
+/**
+ * Process network interface information from Netlink message.
+ *
+ * @param nh
+ * Pointer to Netlink message header.
+ * @param arg
+ * Opaque data pointer for this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_ifindex_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct mlx5_nl_ifindex_data *data = arg;
+ size_t off = NLMSG_HDRLEN;
+ uint32_t ibindex = 0;
+ uint32_t ifindex = 0;
+ int found = 0;
+
+ if (nh->nlmsg_type !=
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET) &&
+ nh->nlmsg_type !=
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_PORT_GET))
+ goto error;
+ while (off < nh->nlmsg_len) {
+ struct nlattr *na = (void *)((uintptr_t)nh + off);
+ void *payload = (void *)((uintptr_t)na + NLA_HDRLEN);
+
+ if (na->nla_len > nh->nlmsg_len - off)
+ goto error;
+ switch (na->nla_type) {
+ case RDMA_NLDEV_ATTR_DEV_INDEX:
+ ibindex = *(uint32_t *)payload;
+ break;
+ case RDMA_NLDEV_ATTR_DEV_NAME:
+ if (!strcmp(payload, data->name))
+ found = 1;
+ break;
+ case RDMA_NLDEV_ATTR_NDEV_INDEX:
+ ifindex = *(uint32_t *)payload;
+ break;
+ default:
+ break;
+ }
+ off += NLA_ALIGN(na->nla_len);
+ }
+ if (found) {
+ data->ibindex = ibindex;
+ data->ifindex = ifindex;
+ }
+ return 0;
+error:
+ rte_errno = EINVAL;
+ return -rte_errno;
+}
+
+/**
+ * Get index of network interface associated with some IB device.
+ *
+ * This is the only somewhat safe method to avoid resorting to heuristics
+ * when faced with port representors. Unfortunately it requires at least
+ * Linux 4.17.
+ *
+ * @param nl
+ * Netlink socket of the RDMA kind (NETLINK_RDMA).
+ * @param[in] name
+ * IB device name.
+ *
+ * @return
+ * A valid (nonzero) interface index on success, 0 otherwise and rte_errno
+ * is set.
+ */
+unsigned int
+mlx5_nl_ifindex(int nl, const char *name)
+{
+ static const uint32_t pindex = 1;
+ uint32_t seq = random();
+ struct mlx5_nl_ifindex_data data = {
+ .name = name,
+ .ibindex = 0, /* Determined during first pass. */
+ .ifindex = 0, /* Determined during second pass. */
+ };
+ union {
+ struct nlmsghdr nh;
+ uint8_t buf[NLMSG_HDRLEN +
+ NLA_HDRLEN + NLA_ALIGN(sizeof(data.ibindex)) +
+ NLA_HDRLEN + NLA_ALIGN(sizeof(pindex))];
+ } req = {
+ .nh = {
+ .nlmsg_len = NLMSG_LENGTH(0),
+ .nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_GET),
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP,
+ },
+ };
+ struct nlattr *na;
+ int ret;
+
+ ret = mlx5_nl_send(nl, &req.nh, seq);
+ if (ret < 0)
+ return 0;
+ ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data);
+ if (ret < 0)
+ return 0;
+ if (!data.ibindex)
+ goto error;
+ ++seq;
+ req.nh.nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_PORT_GET);
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.buf) - NLMSG_HDRLEN);
+ na = (void *)((uintptr_t)req.buf + NLMSG_HDRLEN);
+ na->nla_len = NLA_HDRLEN + sizeof(data.ibindex);
+ na->nla_type = RDMA_NLDEV_ATTR_DEV_INDEX;
+ memcpy((void *)((uintptr_t)na + NLA_HDRLEN),
+ &data.ibindex, sizeof(data.ibindex));
+ na = (void *)((uintptr_t)na + NLA_ALIGN(na->nla_len));
+ na->nla_len = NLA_HDRLEN + sizeof(pindex);
+ na->nla_type = RDMA_NLDEV_ATTR_PORT_INDEX;
+ memcpy((void *)((uintptr_t)na + NLA_HDRLEN),
+ &pindex, sizeof(pindex));
+ ret = mlx5_nl_send(nl, &req.nh, seq);
+ if (ret < 0)
+ return 0;
+ ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data);
+ if (ret < 0)
+ return 0;
+ if (!data.ifindex)
+ goto error;
+ return data.ifindex;
+error:
+ rte_errno = ENODEV;
+ return 0;
+}
+
+/**
+ * Process switch information from Netlink message.
+ *
+ * @param nh
+ * Pointer to Netlink message header.
+ * @param arg
+ * Opaque data pointer for this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_switch_info_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct mlx5_switch_info info = {
+ .master = 0,
+ .representor = 0,
+ .port_name = 0,
+ .switch_id = 0,
+ };
+ size_t off = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ bool port_name_set = false;
+ bool switch_id_set = false;
+
+ if (nh->nlmsg_type != RTM_NEWLINK)
+ goto error;
+ while (off < nh->nlmsg_len) {
+ struct rtattr *ra = (void *)((uintptr_t)nh + off);
+ void *payload = RTA_DATA(ra);
+ char *end;
+ unsigned int i;
+
+ if (ra->rta_len > nh->nlmsg_len - off)
+ goto error;
+ switch (ra->rta_type) {
+ case IFLA_PHYS_PORT_NAME:
+ errno = 0;
+ info.port_name = strtol(payload, &end, 0);
+ if (errno ||
+ (size_t)(end - (char *)payload) != strlen(payload))
+ goto error;
+ port_name_set = true;
+ break;
+ case IFLA_PHYS_SWITCH_ID:
+ info.switch_id = 0;
+ for (i = 0; i < RTA_PAYLOAD(ra); ++i) {
+ info.switch_id <<= 8;
+ info.switch_id |= ((uint8_t *)payload)[i];
+ }
+ switch_id_set = true;
+ break;
+ }
+ off += RTA_ALIGN(ra->rta_len);
+ }
+ info.master = switch_id_set && !port_name_set;
+ info.representor = switch_id_set && port_name_set;
+ memcpy(arg, &info, sizeof(info));
+ return 0;
+error:
+ rte_errno = EINVAL;
+ return -rte_errno;
+}
+
+/**
+ * Get switch information associated with network interface.
+ *
+ * @param nl
+ * Netlink socket of the ROUTE kind (NETLINK_ROUTE).
+ * @param ifindex
+ * Network interface index.
+ * @param[out] info
+ * Switch information object, populated in case of success.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_switch_info(int nl, unsigned int ifindex, struct mlx5_switch_info *info)
+{
+ uint32_t seq = random();
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg info;
+ } req = {
+ .nh = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(req.info)),
+ .nlmsg_type = RTM_GETLINK,
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK,
+ },
+ .info = {
+ .ifi_family = AF_UNSPEC,
+ .ifi_index = ifindex,
+ },
+ };
+ int ret;
+
+ ret = mlx5_nl_send(nl, &req.nh, seq);
+ if (ret >= 0)
+ ret = mlx5_nl_recv(nl, seq, mlx5_nl_switch_info_cb, info);
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_nl_flow.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_nl_flow.c
new file mode 100644
index 00000000..a1c8c340
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_nl_flow.c
@@ -0,0 +1,1248 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <libmnl/libmnl.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
+#include <linux/pkt_cls.h>
+#include <linux/pkt_sched.h>
+#include <linux/rtnetlink.h>
+#include <linux/tc_act/tc_gact.h>
+#include <linux/tc_act/tc_mirred.h>
+#include <netinet/in.h>
+#include <stdalign.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/socket.h>
+
+#include <rte_byteorder.h>
+#include <rte_errno.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+
+#include "mlx5.h"
+#include "mlx5_autoconf.h"
+
+#ifdef HAVE_TC_ACT_VLAN
+
+#include <linux/tc_act/tc_vlan.h>
+
+#else /* HAVE_TC_ACT_VLAN */
+
+#define TCA_VLAN_ACT_POP 1
+#define TCA_VLAN_ACT_PUSH 2
+#define TCA_VLAN_ACT_MODIFY 3
+#define TCA_VLAN_PARMS 2
+#define TCA_VLAN_PUSH_VLAN_ID 3
+#define TCA_VLAN_PUSH_VLAN_PROTOCOL 4
+#define TCA_VLAN_PAD 5
+#define TCA_VLAN_PUSH_VLAN_PRIORITY 6
+
+struct tc_vlan {
+ tc_gen;
+ int v_action;
+};
+
+#endif /* HAVE_TC_ACT_VLAN */
+
+/* Normally found in linux/netlink.h. */
+#ifndef NETLINK_CAP_ACK
+#define NETLINK_CAP_ACK 10
+#endif
+
+/* Normally found in linux/pkt_sched.h. */
+#ifndef TC_H_MIN_INGRESS
+#define TC_H_MIN_INGRESS 0xfff2u
+#endif
+
+/* Normally found in linux/pkt_cls.h. */
+#ifndef TCA_CLS_FLAGS_SKIP_SW
+#define TCA_CLS_FLAGS_SKIP_SW (1 << 1)
+#endif
+#ifndef HAVE_TCA_FLOWER_ACT
+#define TCA_FLOWER_ACT 3
+#endif
+#ifndef HAVE_TCA_FLOWER_FLAGS
+#define TCA_FLOWER_FLAGS 22
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_TYPE
+#define TCA_FLOWER_KEY_ETH_TYPE 8
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST
+#define TCA_FLOWER_KEY_ETH_DST 4
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST_MASK
+#define TCA_FLOWER_KEY_ETH_DST_MASK 5
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC
+#define TCA_FLOWER_KEY_ETH_SRC 6
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK
+#define TCA_FLOWER_KEY_ETH_SRC_MASK 7
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IP_PROTO
+#define TCA_FLOWER_KEY_IP_PROTO 9
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC
+#define TCA_FLOWER_KEY_IPV4_SRC 10
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK
+#define TCA_FLOWER_KEY_IPV4_SRC_MASK 11
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST
+#define TCA_FLOWER_KEY_IPV4_DST 12
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK
+#define TCA_FLOWER_KEY_IPV4_DST_MASK 13
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC
+#define TCA_FLOWER_KEY_IPV6_SRC 14
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK
+#define TCA_FLOWER_KEY_IPV6_SRC_MASK 15
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST
+#define TCA_FLOWER_KEY_IPV6_DST 16
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK
+#define TCA_FLOWER_KEY_IPV6_DST_MASK 17
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC
+#define TCA_FLOWER_KEY_TCP_SRC 18
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK
+#define TCA_FLOWER_KEY_TCP_SRC_MASK 35
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST
+#define TCA_FLOWER_KEY_TCP_DST 19
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST_MASK
+#define TCA_FLOWER_KEY_TCP_DST_MASK 36
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC
+#define TCA_FLOWER_KEY_UDP_SRC 20
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK
+#define TCA_FLOWER_KEY_UDP_SRC_MASK 37
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST
+#define TCA_FLOWER_KEY_UDP_DST 21
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST_MASK
+#define TCA_FLOWER_KEY_UDP_DST_MASK 38
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ID
+#define TCA_FLOWER_KEY_VLAN_ID 23
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_VLAN_PRIO
+#define TCA_FLOWER_KEY_VLAN_PRIO 24
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE
+#define TCA_FLOWER_KEY_VLAN_ETH_TYPE 25
+#endif
+
+/** Parser state definitions for mlx5_nl_flow_trans[]. */
+enum mlx5_nl_flow_trans {
+ INVALID,
+ BACK,
+ ATTR,
+ PATTERN,
+ ITEM_VOID,
+ ITEM_PORT_ID,
+ ITEM_ETH,
+ ITEM_VLAN,
+ ITEM_IPV4,
+ ITEM_IPV6,
+ ITEM_TCP,
+ ITEM_UDP,
+ ACTIONS,
+ ACTION_VOID,
+ ACTION_PORT_ID,
+ ACTION_DROP,
+ ACTION_OF_POP_VLAN,
+ ACTION_OF_PUSH_VLAN,
+ ACTION_OF_SET_VLAN_VID,
+ ACTION_OF_SET_VLAN_PCP,
+ END,
+};
+
+#define TRANS(...) (const enum mlx5_nl_flow_trans []){ __VA_ARGS__, INVALID, }
+
+#define PATTERN_COMMON \
+ ITEM_VOID, ITEM_PORT_ID, ACTIONS
+#define ACTIONS_COMMON \
+ ACTION_VOID, ACTION_OF_POP_VLAN, ACTION_OF_PUSH_VLAN, \
+ ACTION_OF_SET_VLAN_VID, ACTION_OF_SET_VLAN_PCP
+#define ACTIONS_FATE \
+ ACTION_PORT_ID, ACTION_DROP
+
+/** Parser state transitions used by mlx5_nl_flow_transpose(). */
+static const enum mlx5_nl_flow_trans *const mlx5_nl_flow_trans[] = {
+ [INVALID] = NULL,
+ [BACK] = NULL,
+ [ATTR] = TRANS(PATTERN),
+ [PATTERN] = TRANS(ITEM_ETH, PATTERN_COMMON),
+ [ITEM_VOID] = TRANS(BACK),
+ [ITEM_PORT_ID] = TRANS(BACK),
+ [ITEM_ETH] = TRANS(ITEM_IPV4, ITEM_IPV6, ITEM_VLAN, PATTERN_COMMON),
+ [ITEM_VLAN] = TRANS(ITEM_IPV4, ITEM_IPV6, PATTERN_COMMON),
+ [ITEM_IPV4] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON),
+ [ITEM_IPV6] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON),
+ [ITEM_TCP] = TRANS(PATTERN_COMMON),
+ [ITEM_UDP] = TRANS(PATTERN_COMMON),
+ [ACTIONS] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [ACTION_VOID] = TRANS(BACK),
+ [ACTION_PORT_ID] = TRANS(ACTION_VOID, END),
+ [ACTION_DROP] = TRANS(ACTION_VOID, END),
+ [ACTION_OF_POP_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [ACTION_OF_PUSH_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [ACTION_OF_SET_VLAN_VID] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [ACTION_OF_SET_VLAN_PCP] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [END] = NULL,
+};
+
+/** Empty masks for known item types. */
+static const union {
+ struct rte_flow_item_port_id port_id;
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_tcp tcp;
+ struct rte_flow_item_udp udp;
+} mlx5_nl_flow_mask_empty;
+
+/** Supported masks for known item types. */
+static const struct {
+ struct rte_flow_item_port_id port_id;
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_tcp tcp;
+ struct rte_flow_item_udp udp;
+} mlx5_nl_flow_mask_supported = {
+ .port_id = {
+ .id = 0xffffffff,
+ },
+ .eth = {
+ .type = RTE_BE16(0xffff),
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ .vlan = {
+ /* PCP and VID only, no DEI. */
+ .tci = RTE_BE16(0xefff),
+ .inner_type = RTE_BE16(0xffff),
+ },
+ .ipv4.hdr = {
+ .next_proto_id = 0xff,
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ },
+ .ipv6.hdr = {
+ .proto = 0xff,
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+ .tcp.hdr = {
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
+ },
+ .udp.hdr = {
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
+ },
+};
+
+/**
+ * Retrieve mask for pattern item.
+ *
+ * This function does basic sanity checks on a pattern item in order to
+ * return the most appropriate mask for it.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] mask_default
+ * Default mask for pattern item as specified by the flow API.
+ * @param[in] mask_supported
+ * Mask fields supported by the implementation.
+ * @param[in] mask_empty
+ * Empty mask to return when there is no specification.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * Either @p item->mask or one of the mask parameters on success, NULL
+ * otherwise and rte_errno is set.
+ */
+static const void *
+mlx5_nl_flow_item_mask(const struct rte_flow_item *item,
+ const void *mask_default,
+ const void *mask_supported,
+ const void *mask_empty,
+ size_t mask_size,
+ struct rte_flow_error *error)
+{
+ const uint8_t *mask;
+ size_t i;
+
+ /* item->last and item->mask cannot exist without item->spec. */
+ if (!item->spec && (item->mask || item->last)) {
+ rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "\"mask\" or \"last\" field provided without a"
+ " corresponding \"spec\"");
+ return NULL;
+ }
+ /* No spec, no mask, no problem. */
+ if (!item->spec)
+ return mask_empty;
+ mask = item->mask ? item->mask : mask_default;
+ assert(mask);
+ /*
+ * Single-pass check to make sure that:
+ * - Mask is supported, no bits are set outside mask_supported.
+ * - Both item->spec and item->last are included in mask.
+ */
+ for (i = 0; i != mask_size; ++i) {
+ if (!mask[i])
+ continue;
+ if ((mask[i] | ((const uint8_t *)mask_supported)[i]) !=
+ ((const uint8_t *)mask_supported)[i]) {
+ rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask, "unsupported field found in \"mask\"");
+ return NULL;
+ }
+ if (item->last &&
+ (((const uint8_t *)item->spec)[i] & mask[i]) !=
+ (((const uint8_t *)item->last)[i] & mask[i])) {
+ rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+ item->last,
+ "range between \"spec\" and \"last\" not"
+ " comprised in \"mask\"");
+ return NULL;
+ }
+ }
+ return mask;
+}
+
+/**
+ * Transpose flow rule description to rtnetlink message.
+ *
+ * This function transposes a flow rule description to a traffic control
+ * (TC) filter creation message ready to be sent over Netlink.
+ *
+ * Target interface is specified as the first entry of the @p ptoi table.
+ * Subsequent entries enable this function to resolve other DPDK port IDs
+ * found in the flow rule.
+ *
+ * @param[out] buf
+ * Output message buffer. May be NULL when @p size is 0.
+ * @param size
+ * Size of @p buf. Message may be truncated if not large enough.
+ * @param[in] ptoi
+ * DPDK port ID to network interface index translation table. This table
+ * is terminated by an entry with a zero ifindex value.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification.
+ * @param[in] actions
+ * Associated actions.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A positive value representing the exact size of the message in bytes
+ * regardless of the @p size parameter on success, a negative errno value
+ * otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_flow_transpose(void *buf,
+ size_t size,
+ const struct mlx5_nl_flow_ptoi *ptoi,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ alignas(struct nlmsghdr)
+ uint8_t buf_tmp[mnl_nlmsg_size(sizeof(struct tcmsg) + 1024)];
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *action;
+ unsigned int n;
+ uint32_t act_index_cur;
+ bool in_port_id_set;
+ bool eth_type_set;
+ bool vlan_present;
+ bool vlan_eth_type_set;
+ bool ip_proto_set;
+ struct nlattr *na_flower;
+ struct nlattr *na_flower_act;
+ struct nlattr *na_vlan_id;
+ struct nlattr *na_vlan_priority;
+ const enum mlx5_nl_flow_trans *trans;
+ const enum mlx5_nl_flow_trans *back;
+
+ if (!size)
+ goto error_nobufs;
+init:
+ item = pattern;
+ action = actions;
+ n = 0;
+ act_index_cur = 0;
+ in_port_id_set = false;
+ eth_type_set = false;
+ vlan_present = false;
+ vlan_eth_type_set = false;
+ ip_proto_set = false;
+ na_flower = NULL;
+ na_flower_act = NULL;
+ na_vlan_id = NULL;
+ na_vlan_priority = NULL;
+ trans = TRANS(ATTR);
+ back = trans;
+trans:
+ switch (trans[n++]) {
+ union {
+ const struct rte_flow_item_port_id *port_id;
+ const struct rte_flow_item_eth *eth;
+ const struct rte_flow_item_vlan *vlan;
+ const struct rte_flow_item_ipv4 *ipv4;
+ const struct rte_flow_item_ipv6 *ipv6;
+ const struct rte_flow_item_tcp *tcp;
+ const struct rte_flow_item_udp *udp;
+ } spec, mask;
+ union {
+ const struct rte_flow_action_port_id *port_id;
+ const struct rte_flow_action_of_push_vlan *of_push_vlan;
+ const struct rte_flow_action_of_set_vlan_vid *
+ of_set_vlan_vid;
+ const struct rte_flow_action_of_set_vlan_pcp *
+ of_set_vlan_pcp;
+ } conf;
+ struct nlmsghdr *nlh;
+ struct tcmsg *tcm;
+ struct nlattr *act_index;
+ struct nlattr *act;
+ unsigned int i;
+
+ case INVALID:
+ if (item->type)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "unsupported pattern item combination");
+ else if (action->type)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "unsupported action combination");
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "flow rule lacks some kind of fate action");
+ case BACK:
+ trans = back;
+ n = 0;
+ goto trans;
+ case ATTR:
+ /*
+ * Supported attributes: no groups, some priorities and
+ * ingress only. Don't care about transfer as it is the
+ * caller's problem.
+ */
+ if (attr->group)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "groups are not supported");
+ if (attr->priority > 0xfffe)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "lowest priority level is 0xfffe");
+ if (!attr->ingress)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "only ingress is supported");
+ if (attr->egress)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "egress is not supported");
+ if (size < mnl_nlmsg_size(sizeof(*tcm)))
+ goto error_nobufs;
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = 0;
+ nlh->nlmsg_flags = 0;
+ nlh->nlmsg_seq = 0;
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ptoi[0].ifindex;
+ /*
+ * Let kernel pick a handle by default. A predictable handle
+ * can be set by the caller on the resulting buffer through
+ * mlx5_nl_flow_brand().
+ */
+ tcm->tcm_handle = 0;
+ tcm->tcm_parent = TC_H_MAKE(TC_H_INGRESS, TC_H_MIN_INGRESS);
+ /*
+ * Priority cannot be zero to prevent the kernel from
+ * picking one automatically.
+ */
+ tcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16,
+ RTE_BE16(ETH_P_ALL));
+ break;
+ case PATTERN:
+ if (!mnl_attr_put_strz_check(buf, size, TCA_KIND, "flower"))
+ goto error_nobufs;
+ na_flower = mnl_attr_nest_start_check(buf, size, TCA_OPTIONS);
+ if (!na_flower)
+ goto error_nobufs;
+ if (!mnl_attr_put_u32_check(buf, size, TCA_FLOWER_FLAGS,
+ TCA_CLS_FLAGS_SKIP_SW))
+ goto error_nobufs;
+ break;
+ case ITEM_VOID:
+ if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
+ goto trans;
+ ++item;
+ break;
+ case ITEM_PORT_ID:
+ if (item->type != RTE_FLOW_ITEM_TYPE_PORT_ID)
+ goto trans;
+ mask.port_id = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_port_id_mask,
+ &mlx5_nl_flow_mask_supported.port_id,
+ &mlx5_nl_flow_mask_empty.port_id,
+ sizeof(mlx5_nl_flow_mask_supported.port_id), error);
+ if (!mask.port_id)
+ return -rte_errno;
+ if (mask.port_id == &mlx5_nl_flow_mask_empty.port_id) {
+ in_port_id_set = 1;
+ ++item;
+ break;
+ }
+ spec.port_id = item->spec;
+ if (mask.port_id->id && mask.port_id->id != 0xffffffff)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.port_id,
+ "no support for partial mask on"
+ " \"id\" field");
+ if (!mask.port_id->id)
+ i = 0;
+ else
+ for (i = 0; ptoi[i].ifindex; ++i)
+ if (ptoi[i].port_id == spec.port_id->id)
+ break;
+ if (!ptoi[i].ifindex)
+ return rte_flow_error_set
+ (error, ENODEV, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ spec.port_id,
+ "missing data to convert port ID to ifindex");
+ tcm = mnl_nlmsg_get_payload(buf);
+ if (in_port_id_set &&
+ ptoi[i].ifindex != (unsigned int)tcm->tcm_ifindex)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ spec.port_id,
+ "cannot match traffic for several port IDs"
+ " through a single flow rule");
+ tcm->tcm_ifindex = ptoi[i].ifindex;
+ in_port_id_set = 1;
+ ++item;
+ break;
+ case ITEM_ETH:
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH)
+ goto trans;
+ mask.eth = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_eth_mask,
+ &mlx5_nl_flow_mask_supported.eth,
+ &mlx5_nl_flow_mask_empty.eth,
+ sizeof(mlx5_nl_flow_mask_supported.eth), error);
+ if (!mask.eth)
+ return -rte_errno;
+ if (mask.eth == &mlx5_nl_flow_mask_empty.eth) {
+ ++item;
+ break;
+ }
+ spec.eth = item->spec;
+ if (mask.eth->type && mask.eth->type != RTE_BE16(0xffff))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.eth,
+ "no support for partial mask on"
+ " \"type\" field");
+ if (mask.eth->type) {
+ if (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_ETH_TYPE,
+ spec.eth->type))
+ goto error_nobufs;
+ eth_type_set = 1;
+ }
+ if ((!is_zero_ether_addr(&mask.eth->dst) &&
+ (!mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_ETH_DST,
+ ETHER_ADDR_LEN,
+ spec.eth->dst.addr_bytes) ||
+ !mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_ETH_DST_MASK,
+ ETHER_ADDR_LEN,
+ mask.eth->dst.addr_bytes))) ||
+ (!is_zero_ether_addr(&mask.eth->src) &&
+ (!mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_ETH_SRC,
+ ETHER_ADDR_LEN,
+ spec.eth->src.addr_bytes) ||
+ !mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_ETH_SRC_MASK,
+ ETHER_ADDR_LEN,
+ mask.eth->src.addr_bytes))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_VLAN:
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN)
+ goto trans;
+ mask.vlan = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_vlan_mask,
+ &mlx5_nl_flow_mask_supported.vlan,
+ &mlx5_nl_flow_mask_empty.vlan,
+ sizeof(mlx5_nl_flow_mask_supported.vlan), error);
+ if (!mask.vlan)
+ return -rte_errno;
+ if (!eth_type_set &&
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_ETH_TYPE,
+ RTE_BE16(ETH_P_8021Q)))
+ goto error_nobufs;
+ eth_type_set = 1;
+ vlan_present = 1;
+ if (mask.vlan == &mlx5_nl_flow_mask_empty.vlan) {
+ ++item;
+ break;
+ }
+ spec.vlan = item->spec;
+ if ((mask.vlan->tci & RTE_BE16(0xe000) &&
+ (mask.vlan->tci & RTE_BE16(0xe000)) != RTE_BE16(0xe000)) ||
+ (mask.vlan->tci & RTE_BE16(0x0fff) &&
+ (mask.vlan->tci & RTE_BE16(0x0fff)) != RTE_BE16(0x0fff)) ||
+ (mask.vlan->inner_type &&
+ mask.vlan->inner_type != RTE_BE16(0xffff)))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.vlan,
+ "no support for partial masks on"
+ " \"tci\" (PCP and VID parts) and"
+ " \"inner_type\" fields");
+ if (mask.vlan->inner_type) {
+ if (!mnl_attr_put_u16_check
+ (buf, size, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ spec.vlan->inner_type))
+ goto error_nobufs;
+ vlan_eth_type_set = 1;
+ }
+ if ((mask.vlan->tci & RTE_BE16(0xe000) &&
+ !mnl_attr_put_u8_check
+ (buf, size, TCA_FLOWER_KEY_VLAN_PRIO,
+ (rte_be_to_cpu_16(spec.vlan->tci) >> 13) & 0x7)) ||
+ (mask.vlan->tci & RTE_BE16(0x0fff) &&
+ !mnl_attr_put_u16_check
+ (buf, size, TCA_FLOWER_KEY_VLAN_ID,
+ rte_be_to_cpu_16(spec.vlan->tci & RTE_BE16(0x0fff)))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_IPV4:
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4)
+ goto trans;
+ mask.ipv4 = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_ipv4_mask,
+ &mlx5_nl_flow_mask_supported.ipv4,
+ &mlx5_nl_flow_mask_empty.ipv4,
+ sizeof(mlx5_nl_flow_mask_supported.ipv4), error);
+ if (!mask.ipv4)
+ return -rte_errno;
+ if ((!eth_type_set || !vlan_eth_type_set) &&
+ !mnl_attr_put_u16_check(buf, size,
+ vlan_present ?
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE :
+ TCA_FLOWER_KEY_ETH_TYPE,
+ RTE_BE16(ETH_P_IP)))
+ goto error_nobufs;
+ eth_type_set = 1;
+ vlan_eth_type_set = 1;
+ if (mask.ipv4 == &mlx5_nl_flow_mask_empty.ipv4) {
+ ++item;
+ break;
+ }
+ spec.ipv4 = item->spec;
+ if (mask.ipv4->hdr.next_proto_id &&
+ mask.ipv4->hdr.next_proto_id != 0xff)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.ipv4,
+ "no support for partial mask on"
+ " \"hdr.next_proto_id\" field");
+ if (mask.ipv4->hdr.next_proto_id) {
+ if (!mnl_attr_put_u8_check
+ (buf, size, TCA_FLOWER_KEY_IP_PROTO,
+ spec.ipv4->hdr.next_proto_id))
+ goto error_nobufs;
+ ip_proto_set = 1;
+ }
+ if ((mask.ipv4->hdr.src_addr &&
+ (!mnl_attr_put_u32_check(buf, size,
+ TCA_FLOWER_KEY_IPV4_SRC,
+ spec.ipv4->hdr.src_addr) ||
+ !mnl_attr_put_u32_check(buf, size,
+ TCA_FLOWER_KEY_IPV4_SRC_MASK,
+ mask.ipv4->hdr.src_addr))) ||
+ (mask.ipv4->hdr.dst_addr &&
+ (!mnl_attr_put_u32_check(buf, size,
+ TCA_FLOWER_KEY_IPV4_DST,
+ spec.ipv4->hdr.dst_addr) ||
+ !mnl_attr_put_u32_check(buf, size,
+ TCA_FLOWER_KEY_IPV4_DST_MASK,
+ mask.ipv4->hdr.dst_addr))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_IPV6:
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV6)
+ goto trans;
+ mask.ipv6 = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_ipv6_mask,
+ &mlx5_nl_flow_mask_supported.ipv6,
+ &mlx5_nl_flow_mask_empty.ipv6,
+ sizeof(mlx5_nl_flow_mask_supported.ipv6), error);
+ if (!mask.ipv6)
+ return -rte_errno;
+ if ((!eth_type_set || !vlan_eth_type_set) &&
+ !mnl_attr_put_u16_check(buf, size,
+ vlan_present ?
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE :
+ TCA_FLOWER_KEY_ETH_TYPE,
+ RTE_BE16(ETH_P_IPV6)))
+ goto error_nobufs;
+ eth_type_set = 1;
+ vlan_eth_type_set = 1;
+ if (mask.ipv6 == &mlx5_nl_flow_mask_empty.ipv6) {
+ ++item;
+ break;
+ }
+ spec.ipv6 = item->spec;
+ if (mask.ipv6->hdr.proto && mask.ipv6->hdr.proto != 0xff)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.ipv6,
+ "no support for partial mask on"
+ " \"hdr.proto\" field");
+ if (mask.ipv6->hdr.proto) {
+ if (!mnl_attr_put_u8_check
+ (buf, size, TCA_FLOWER_KEY_IP_PROTO,
+ spec.ipv6->hdr.proto))
+ goto error_nobufs;
+ ip_proto_set = 1;
+ }
+ if ((!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.src_addr) &&
+ (!mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_IPV6_SRC,
+ sizeof(spec.ipv6->hdr.src_addr),
+ spec.ipv6->hdr.src_addr) ||
+ !mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_IPV6_SRC_MASK,
+ sizeof(mask.ipv6->hdr.src_addr),
+ mask.ipv6->hdr.src_addr))) ||
+ (!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.dst_addr) &&
+ (!mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_IPV6_DST,
+ sizeof(spec.ipv6->hdr.dst_addr),
+ spec.ipv6->hdr.dst_addr) ||
+ !mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_IPV6_DST_MASK,
+ sizeof(mask.ipv6->hdr.dst_addr),
+ mask.ipv6->hdr.dst_addr))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_TCP:
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP)
+ goto trans;
+ mask.tcp = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_tcp_mask,
+ &mlx5_nl_flow_mask_supported.tcp,
+ &mlx5_nl_flow_mask_empty.tcp,
+ sizeof(mlx5_nl_flow_mask_supported.tcp), error);
+ if (!mask.tcp)
+ return -rte_errno;
+ if (!ip_proto_set &&
+ !mnl_attr_put_u8_check(buf, size,
+ TCA_FLOWER_KEY_IP_PROTO,
+ IPPROTO_TCP))
+ goto error_nobufs;
+ if (mask.tcp == &mlx5_nl_flow_mask_empty.tcp) {
+ ++item;
+ break;
+ }
+ spec.tcp = item->spec;
+ if ((mask.tcp->hdr.src_port &&
+ mask.tcp->hdr.src_port != RTE_BE16(0xffff)) ||
+ (mask.tcp->hdr.dst_port &&
+ mask.tcp->hdr.dst_port != RTE_BE16(0xffff)))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.tcp,
+ "no support for partial masks on"
+ " \"hdr.src_port\" and \"hdr.dst_port\""
+ " fields");
+ if ((mask.tcp->hdr.src_port &&
+ (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_TCP_SRC,
+ spec.tcp->hdr.src_port) ||
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_TCP_SRC_MASK,
+ mask.tcp->hdr.src_port))) ||
+ (mask.tcp->hdr.dst_port &&
+ (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_TCP_DST,
+ spec.tcp->hdr.dst_port) ||
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_TCP_DST_MASK,
+ mask.tcp->hdr.dst_port))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_UDP:
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP)
+ goto trans;
+ mask.udp = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_udp_mask,
+ &mlx5_nl_flow_mask_supported.udp,
+ &mlx5_nl_flow_mask_empty.udp,
+ sizeof(mlx5_nl_flow_mask_supported.udp), error);
+ if (!mask.udp)
+ return -rte_errno;
+ if (!ip_proto_set &&
+ !mnl_attr_put_u8_check(buf, size,
+ TCA_FLOWER_KEY_IP_PROTO,
+ IPPROTO_UDP))
+ goto error_nobufs;
+ if (mask.udp == &mlx5_nl_flow_mask_empty.udp) {
+ ++item;
+ break;
+ }
+ spec.udp = item->spec;
+ if ((mask.udp->hdr.src_port &&
+ mask.udp->hdr.src_port != RTE_BE16(0xffff)) ||
+ (mask.udp->hdr.dst_port &&
+ mask.udp->hdr.dst_port != RTE_BE16(0xffff)))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.udp,
+ "no support for partial masks on"
+ " \"hdr.src_port\" and \"hdr.dst_port\""
+ " fields");
+ if ((mask.udp->hdr.src_port &&
+ (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_UDP_SRC,
+ spec.udp->hdr.src_port) ||
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_UDP_SRC_MASK,
+ mask.udp->hdr.src_port))) ||
+ (mask.udp->hdr.dst_port &&
+ (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_UDP_DST,
+ spec.udp->hdr.dst_port) ||
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_UDP_DST_MASK,
+ mask.udp->hdr.dst_port))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ACTIONS:
+ if (item->type != RTE_FLOW_ITEM_TYPE_END)
+ goto trans;
+ assert(na_flower);
+ assert(!na_flower_act);
+ na_flower_act =
+ mnl_attr_nest_start_check(buf, size, TCA_FLOWER_ACT);
+ if (!na_flower_act)
+ goto error_nobufs;
+ act_index_cur = 1;
+ break;
+ case ACTION_VOID:
+ if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
+ goto trans;
+ ++action;
+ break;
+ case ACTION_PORT_ID:
+ if (action->type != RTE_FLOW_ACTION_TYPE_PORT_ID)
+ goto trans;
+ conf.port_id = action->conf;
+ if (conf.port_id->original)
+ i = 0;
+ else
+ for (i = 0; ptoi[i].ifindex; ++i)
+ if (ptoi[i].port_id == conf.port_id->id)
+ break;
+ if (!ptoi[i].ifindex)
+ return rte_flow_error_set
+ (error, ENODEV, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ conf.port_id,
+ "missing data to convert port ID to ifindex");
+ act_index =
+ mnl_attr_nest_start_check(buf, size, act_index_cur++);
+ if (!act_index ||
+ !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "mirred"))
+ goto error_nobufs;
+ act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);
+ if (!act)
+ goto error_nobufs;
+ if (!mnl_attr_put_check(buf, size, TCA_MIRRED_PARMS,
+ sizeof(struct tc_mirred),
+ &(struct tc_mirred){
+ .action = TC_ACT_STOLEN,
+ .eaction = TCA_EGRESS_REDIR,
+ .ifindex = ptoi[i].ifindex,
+ }))
+ goto error_nobufs;
+ mnl_attr_nest_end(buf, act);
+ mnl_attr_nest_end(buf, act_index);
+ ++action;
+ break;
+ case ACTION_DROP:
+ if (action->type != RTE_FLOW_ACTION_TYPE_DROP)
+ goto trans;
+ act_index =
+ mnl_attr_nest_start_check(buf, size, act_index_cur++);
+ if (!act_index ||
+ !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "gact"))
+ goto error_nobufs;
+ act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);
+ if (!act)
+ goto error_nobufs;
+ if (!mnl_attr_put_check(buf, size, TCA_GACT_PARMS,
+ sizeof(struct tc_gact),
+ &(struct tc_gact){
+ .action = TC_ACT_SHOT,
+ }))
+ goto error_nobufs;
+ mnl_attr_nest_end(buf, act);
+ mnl_attr_nest_end(buf, act_index);
+ ++action;
+ break;
+ case ACTION_OF_POP_VLAN:
+ if (action->type != RTE_FLOW_ACTION_TYPE_OF_POP_VLAN)
+ goto trans;
+ conf.of_push_vlan = NULL;
+ i = TCA_VLAN_ACT_POP;
+ goto action_of_vlan;
+ case ACTION_OF_PUSH_VLAN:
+ if (action->type != RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
+ goto trans;
+ conf.of_push_vlan = action->conf;
+ i = TCA_VLAN_ACT_PUSH;
+ goto action_of_vlan;
+ case ACTION_OF_SET_VLAN_VID:
+ if (action->type != RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
+ goto trans;
+ conf.of_set_vlan_vid = action->conf;
+ if (na_vlan_id)
+ goto override_na_vlan_id;
+ i = TCA_VLAN_ACT_MODIFY;
+ goto action_of_vlan;
+ case ACTION_OF_SET_VLAN_PCP:
+ if (action->type != RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)
+ goto trans;
+ conf.of_set_vlan_pcp = action->conf;
+ if (na_vlan_priority)
+ goto override_na_vlan_priority;
+ i = TCA_VLAN_ACT_MODIFY;
+ goto action_of_vlan;
+action_of_vlan:
+ act_index =
+ mnl_attr_nest_start_check(buf, size, act_index_cur++);
+ if (!act_index ||
+ !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "vlan"))
+ goto error_nobufs;
+ act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);
+ if (!act)
+ goto error_nobufs;
+ if (!mnl_attr_put_check(buf, size, TCA_VLAN_PARMS,
+ sizeof(struct tc_vlan),
+ &(struct tc_vlan){
+ .action = TC_ACT_PIPE,
+ .v_action = i,
+ }))
+ goto error_nobufs;
+ if (i == TCA_VLAN_ACT_POP) {
+ mnl_attr_nest_end(buf, act);
+ mnl_attr_nest_end(buf, act_index);
+ ++action;
+ break;
+ }
+ if (i == TCA_VLAN_ACT_PUSH &&
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_VLAN_PUSH_VLAN_PROTOCOL,
+ conf.of_push_vlan->ethertype))
+ goto error_nobufs;
+ na_vlan_id = mnl_nlmsg_get_payload_tail(buf);
+ if (!mnl_attr_put_u16_check(buf, size, TCA_VLAN_PAD, 0))
+ goto error_nobufs;
+ na_vlan_priority = mnl_nlmsg_get_payload_tail(buf);
+ if (!mnl_attr_put_u8_check(buf, size, TCA_VLAN_PAD, 0))
+ goto error_nobufs;
+ mnl_attr_nest_end(buf, act);
+ mnl_attr_nest_end(buf, act_index);
+ if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
+override_na_vlan_id:
+ na_vlan_id->nla_type = TCA_VLAN_PUSH_VLAN_ID;
+ *(uint16_t *)mnl_attr_get_payload(na_vlan_id) =
+ rte_be_to_cpu_16
+ (conf.of_set_vlan_vid->vlan_vid);
+ } else if (action->type ==
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
+override_na_vlan_priority:
+ na_vlan_priority->nla_type =
+ TCA_VLAN_PUSH_VLAN_PRIORITY;
+ *(uint8_t *)mnl_attr_get_payload(na_vlan_priority) =
+ conf.of_set_vlan_pcp->vlan_pcp;
+ }
+ ++action;
+ break;
+ case END:
+ if (item->type != RTE_FLOW_ITEM_TYPE_END ||
+ action->type != RTE_FLOW_ACTION_TYPE_END)
+ goto trans;
+ if (na_flower_act)
+ mnl_attr_nest_end(buf, na_flower_act);
+ if (na_flower)
+ mnl_attr_nest_end(buf, na_flower);
+ nlh = buf;
+ return nlh->nlmsg_len;
+ }
+ back = trans;
+ trans = mlx5_nl_flow_trans[trans[n - 1]];
+ n = 0;
+ goto trans;
+error_nobufs:
+ if (buf != buf_tmp) {
+ buf = buf_tmp;
+ size = sizeof(buf_tmp);
+ goto init;
+ }
+ return rte_flow_error_set
+ (error, ENOBUFS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "generated TC message is too large");
+}
+
+/**
+ * Brand rtnetlink buffer with unique handle.
+ *
+ * This handle should be unique for a given network interface to avoid
+ * collisions.
+ *
+ * @param buf
+ * Flow rule buffer previously initialized by mlx5_nl_flow_transpose().
+ * @param handle
+ * Unique 32-bit handle to use.
+ */
+void
+mlx5_nl_flow_brand(void *buf, uint32_t handle)
+{
+ struct tcmsg *tcm = mnl_nlmsg_get_payload(buf);
+
+ tcm->tcm_handle = handle;
+}
+
+/**
+ * Send Netlink message with acknowledgment.
+ *
+ * @param nl
+ * Libmnl socket to use.
+ * @param nlh
+ * Message to send. This function always raises the NLM_F_ACK flag before
+ * sending.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_flow_nl_ack(struct mnl_socket *nl, struct nlmsghdr *nlh)
+{
+ alignas(struct nlmsghdr)
+ uint8_t ans[mnl_nlmsg_size(sizeof(struct nlmsgerr)) +
+ nlh->nlmsg_len - sizeof(*nlh)];
+ uint32_t seq = random();
+ int ret;
+
+ nlh->nlmsg_flags |= NLM_F_ACK;
+ nlh->nlmsg_seq = seq;
+ ret = mnl_socket_sendto(nl, nlh, nlh->nlmsg_len);
+ if (ret != -1)
+ ret = mnl_socket_recvfrom(nl, ans, sizeof(ans));
+ if (ret != -1)
+ ret = mnl_cb_run
+ (ans, ret, seq, mnl_socket_get_portid(nl), NULL, NULL);
+ if (!ret)
+ return 0;
+ rte_errno = errno;
+ return -rte_errno;
+}
+
+/**
+ * Create a Netlink flow rule.
+ *
+ * @param nl
+ * Libmnl socket to use.
+ * @param buf
+ * Flow rule buffer previously initialized by mlx5_nl_flow_transpose().
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_flow_create(struct mnl_socket *nl, void *buf,
+ struct rte_flow_error *error)
+{
+ struct nlmsghdr *nlh = buf;
+
+ nlh->nlmsg_type = RTM_NEWTFILTER;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ if (!mlx5_nl_flow_nl_ack(nl, nlh))
+ return 0;
+ return rte_flow_error_set
+ (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "netlink: failed to create TC flow rule");
+}
+
+/**
+ * Destroy a Netlink flow rule.
+ *
+ * @param nl
+ * Libmnl socket to use.
+ * @param buf
+ * Flow rule buffer previously initialized by mlx5_nl_flow_transpose().
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf,
+ struct rte_flow_error *error)
+{
+ struct nlmsghdr *nlh = buf;
+
+ nlh->nlmsg_type = RTM_DELTFILTER;
+ nlh->nlmsg_flags = NLM_F_REQUEST;
+ if (!mlx5_nl_flow_nl_ack(nl, nlh))
+ return 0;
+ return rte_flow_error_set
+ (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "netlink: failed to destroy TC flow rule");
+}
+
+/**
+ * Initialize ingress qdisc of a given network interface.
+ *
+ * @param nl
+ * Libmnl socket of the @p NETLINK_ROUTE kind.
+ * @param ifindex
+ * Index of network interface to initialize.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex,
+ struct rte_flow_error *error)
+{
+ struct nlmsghdr *nlh;
+ struct tcmsg *tcm;
+ alignas(struct nlmsghdr)
+ uint8_t buf[mnl_nlmsg_size(sizeof(*tcm) + 128)];
+
+ /* Destroy existing ingress qdisc and everything attached to it. */
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = RTM_DELQDISC;
+ nlh->nlmsg_flags = NLM_F_REQUEST;
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ifindex;
+ tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ tcm->tcm_parent = TC_H_INGRESS;
+ /* Ignore errors when qdisc is already absent. */
+ if (mlx5_nl_flow_nl_ack(nl, nlh) &&
+ rte_errno != EINVAL && rte_errno != ENOENT)
+ return rte_flow_error_set
+ (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "netlink: failed to remove ingress qdisc");
+ /* Create fresh ingress qdisc. */
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = RTM_NEWQDISC;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ifindex;
+ tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ tcm->tcm_parent = TC_H_INGRESS;
+ mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress");
+ if (mlx5_nl_flow_nl_ack(nl, nlh))
+ return rte_flow_error_set
+ (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "netlink: failed to create ingress qdisc");
+ return 0;
+}
+
+/**
+ * Create and configure a libmnl socket for Netlink flow rules.
+ *
+ * @return
+ * A valid libmnl socket object pointer on success, NULL otherwise and
+ * rte_errno is set.
+ */
+struct mnl_socket *
+mlx5_nl_flow_socket_create(void)
+{
+ struct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE);
+
+ if (nl) {
+ mnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 },
+ sizeof(int));
+ if (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID))
+ return nl;
+ }
+ rte_errno = errno;
+ if (nl)
+ mnl_socket_close(nl);
+ return NULL;
+}
+
+/**
+ * Destroy a libmnl socket.
+ */
+void
+mlx5_nl_flow_socket_destroy(struct mnl_socket *nl)
+{
+ mnl_socket_close(nl);
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_prm.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_prm.h
new file mode 100644
index 00000000..0870d32f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_prm.h
@@ -0,0 +1,364 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_PRM_H_
+#define RTE_PMD_MLX5_PRM_H_
+
+#include <assert.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx5dv.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_vect.h>
+#include "mlx5_autoconf.h"
+
+/* RSS hash key size. */
+#define MLX5_RSS_HASH_KEY_LEN 40
+
+/* Get CQE owner bit. */
+#define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK)
+
+/* Get CQE format. */
+#define MLX5_CQE_FORMAT(op_own) (((op_own) & MLX5E_CQE_FORMAT_MASK) >> 2)
+
+/* Get CQE opcode. */
+#define MLX5_CQE_OPCODE(op_own) (((op_own) & 0xf0) >> 4)
+
+/* Get CQE solicited event. */
+#define MLX5_CQE_SE(op_own) (((op_own) >> 1) & 1)
+
+/* Invalidate a CQE. */
+#define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4)
+
+/* Maximum number of packets a multi-packet WQE can handle. */
+#define MLX5_MPW_DSEG_MAX 5
+
+/* WQE DWORD size */
+#define MLX5_WQE_DWORD_SIZE 16
+
+/* WQE size */
+#define MLX5_WQE_SIZE (4 * MLX5_WQE_DWORD_SIZE)
+
+/* Max size of a WQE session. */
+#define MLX5_WQE_SIZE_MAX 960U
+
+/* Compute the number of DS. */
+#define MLX5_WQE_DS(n) \
+ (((n) + MLX5_WQE_DWORD_SIZE - 1) / MLX5_WQE_DWORD_SIZE)
+
+/* Room for inline data in multi-packet WQE. */
+#define MLX5_MWQE64_INL_DATA 28
+
+/* Default minimum number of Tx queues for inlining packets. */
+#define MLX5_EMPW_MIN_TXQS 8
+
+/* Default max packet length to be inlined. */
+#define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE)
+
+
+#define MLX5_OPC_MOD_ENHANCED_MPSW 0
+#define MLX5_OPCODE_ENHANCED_MPSW 0x29
+
+/* CQE value to inform that VLAN is stripped. */
+#define MLX5_CQE_VLAN_STRIPPED (1u << 0)
+
+/* IPv4 options. */
+#define MLX5_CQE_RX_IP_EXT_OPTS_PACKET (1u << 1)
+
+/* IPv6 packet. */
+#define MLX5_CQE_RX_IPV6_PACKET (1u << 2)
+
+/* IPv4 packet. */
+#define MLX5_CQE_RX_IPV4_PACKET (1u << 3)
+
+/* TCP packet. */
+#define MLX5_CQE_RX_TCP_PACKET (1u << 4)
+
+/* UDP packet. */
+#define MLX5_CQE_RX_UDP_PACKET (1u << 5)
+
+/* IP is fragmented. */
+#define MLX5_CQE_RX_IP_FRAG_PACKET (1u << 7)
+
+/* L2 header is valid. */
+#define MLX5_CQE_RX_L2_HDR_VALID (1u << 8)
+
+/* L3 header is valid. */
+#define MLX5_CQE_RX_L3_HDR_VALID (1u << 9)
+
+/* L4 header is valid. */
+#define MLX5_CQE_RX_L4_HDR_VALID (1u << 10)
+
+/* Outer packet, 0 IPv4, 1 IPv6. */
+#define MLX5_CQE_RX_OUTER_PACKET (1u << 1)
+
+/* Tunnel packet bit in the CQE. */
+#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)
+
+/* Inner L3 checksum offload (Tunneled packets only). */
+#define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4)
+
+/* Inner L4 checksum offload (Tunneled packets only). */
+#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
+
+/* Outer L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_OUTER_TCP (0u << 5)
+
+/* Outer L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_OUTER_UDP (1u << 5)
+
+/* Outer L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4)
+
+/* Outer L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4)
+
+/* Inner L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1)
+
+/* Inner L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1)
+
+/* Inner L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0)
+
+/* Inner L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0)
+
+/* Is flow mark valid. */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
+#else
+#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff)
+#endif
+
+/* INVALID is used by packets matching no flow rules. */
+#define MLX5_FLOW_MARK_INVALID 0
+
+/* Maximum allowed value to mark a packet. */
+#define MLX5_FLOW_MARK_MAX 0xfffff0
+
+/* Default mark value used when none is provided. */
+#define MLX5_FLOW_MARK_DEFAULT 0xffffff
+
+/* Maximum number of DS in WQE. */
+#define MLX5_DSEG_MAX 63
+
+/* Subset of struct mlx5_wqe_eth_seg. */
+struct mlx5_wqe_eth_seg_small {
+ uint32_t rsvd0;
+ uint8_t cs_flags;
+ uint8_t rsvd1;
+ uint16_t mss;
+ uint32_t rsvd2;
+ uint16_t inline_hdr_sz;
+ uint8_t inline_hdr[2];
+} __rte_aligned(MLX5_WQE_DWORD_SIZE);
+
+struct mlx5_wqe_inl_small {
+ uint32_t byte_cnt;
+ uint8_t raw;
+} __rte_aligned(MLX5_WQE_DWORD_SIZE);
+
+struct mlx5_wqe_ctrl {
+ uint32_t ctrl0;
+ uint32_t ctrl1;
+ uint32_t ctrl2;
+ uint32_t ctrl3;
+} __rte_aligned(MLX5_WQE_DWORD_SIZE);
+
+/* Small common part of the WQE. */
+struct mlx5_wqe {
+ uint32_t ctrl[4];
+ struct mlx5_wqe_eth_seg_small eseg;
+};
+
+/* Vectorize WQE header. */
+struct mlx5_wqe_v {
+ rte_v128u32_t ctrl;
+ rte_v128u32_t eseg;
+};
+
+/* WQE. */
+struct mlx5_wqe64 {
+ struct mlx5_wqe hdr;
+ uint8_t raw[32];
+} __rte_aligned(MLX5_WQE_SIZE);
+
+/* MPW mode. */
+enum mlx5_mpw_mode {
+ MLX5_MPW_DISABLED,
+ MLX5_MPW,
+ MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */
+};
+
+/* MPW session status. */
+enum mlx5_mpw_state {
+ MLX5_MPW_STATE_OPENED,
+ MLX5_MPW_INL_STATE_OPENED,
+ MLX5_MPW_ENHANCED_STATE_OPENED,
+ MLX5_MPW_STATE_CLOSED,
+};
+
+/* MPW session descriptor. */
+struct mlx5_mpw {
+ enum mlx5_mpw_state state;
+ unsigned int pkts_n;
+ unsigned int len;
+ unsigned int total_len;
+ volatile struct mlx5_wqe *wqe;
+ union {
+ volatile struct mlx5_wqe_data_seg *dseg[MLX5_MPW_DSEG_MAX];
+ volatile uint8_t *raw;
+ } data;
+};
+
+/* WQE for Multi-Packet RQ. */
+struct mlx5_wqe_mprq {
+ struct mlx5_wqe_srq_next_seg next_seg;
+ struct mlx5_wqe_data_seg dseg;
+};
+
+#define MLX5_MPRQ_LEN_MASK 0x000ffff
+#define MLX5_MPRQ_LEN_SHIFT 0
+#define MLX5_MPRQ_STRIDE_NUM_MASK 0x3fff0000
+#define MLX5_MPRQ_STRIDE_NUM_SHIFT 16
+#define MLX5_MPRQ_FILLER_MASK 0x80000000
+#define MLX5_MPRQ_FILLER_SHIFT 31
+
+#define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2
+
+/* CQ element structure - should be equal to the cache line size */
+struct mlx5_cqe {
+#if (RTE_CACHE_LINE_SIZE == 128)
+ uint8_t padding[64];
+#endif
+ uint8_t pkt_info;
+ uint8_t rsvd0;
+ uint16_t wqe_id;
+ uint8_t rsvd3[8];
+ uint32_t rx_hash_res;
+ uint8_t rx_hash_type;
+ uint8_t rsvd1[11];
+ uint16_t hdr_type_etc;
+ uint16_t vlan_info;
+ uint8_t rsvd2[12];
+ uint32_t byte_cnt;
+ uint64_t timestamp;
+ uint32_t sop_drop_qpn;
+ uint16_t wqe_counter;
+ uint8_t rsvd4;
+ uint8_t op_own;
+};
+
+/* Adding direct verbs to data-path. */
+
+/* CQ sequence number mask. */
+#define MLX5_CQ_SQN_MASK 0x3
+
+/* CQ sequence number index. */
+#define MLX5_CQ_SQN_OFFSET 28
+
+/* CQ doorbell index mask. */
+#define MLX5_CI_MASK 0xffffff
+
+/* CQ doorbell offset. */
+#define MLX5_CQ_ARM_DB 1
+
+/* CQ doorbell offset*/
+#define MLX5_CQ_DOORBELL 0x20
+
+/* CQE format value. */
+#define MLX5_COMPRESSED 0x3
+
+/* CQE format mask. */
+#define MLX5E_CQE_FORMAT_MASK 0xc
+
+/* MPW opcode. */
+#define MLX5_OPC_MOD_MPW 0x01
+
+/* Compressed Rx CQE structure. */
+struct mlx5_mini_cqe8 {
+ union {
+ uint32_t rx_hash_result;
+ struct {
+ uint16_t checksum;
+ uint16_t stride_idx;
+ };
+ struct {
+ uint16_t wqe_counter;
+ uint8_t s_wqe_opcode;
+ uint8_t reserved;
+ } s_wqe_info;
+ };
+ uint32_t byte_cnt;
+};
+
+/**
+ * Convert a user mark to flow mark.
+ *
+ * @param val
+ * Mark value to convert.
+ *
+ * @return
+ * Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_set(uint32_t val)
+{
+ uint32_t ret;
+
+ /*
+ * Add one to the user value to differentiate un-marked flows from
+ * marked flows, if the ID is equal to MLX5_FLOW_MARK_DEFAULT it
+ * remains untouched.
+ */
+ if (val != MLX5_FLOW_MARK_DEFAULT)
+ ++val;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /*
+ * Mark is 24 bits (minus reserved values) but is stored on a 32 bit
+ * word, byte-swapped by the kernel on little-endian systems. In this
+ * case, left-shifting the resulting big-endian value ensures the
+ * least significant 24 bits are retained when converting it back.
+ */
+ ret = rte_cpu_to_be_32(val) >> 8;
+#else
+ ret = val;
+#endif
+ return ret;
+}
+
+/**
+ * Convert a mark to user mark.
+ *
+ * @param val
+ * Mark value to convert.
+ *
+ * @return
+ * Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_get(uint32_t val)
+{
+ /*
+ * Subtract one from the retrieved value. It was added by
+ * mlx5_flow_mark_set() to distinguish unmarked flows.
+ */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ return (val >> 8) - 1;
+#else
+ return val - 1;
+#endif
+}
+
+#endif /* RTE_PMD_MLX5_PRM_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rss.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rss.c
new file mode 100644
index 00000000..b95778a8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rss.c
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_malloc.h>
+#include <rte_ethdev_driver.h>
+
+#include "mlx5.h"
+#include "mlx5_defs.h"
+#include "mlx5_rxtx.h"
+
+/**
+ * DPDK callback to update the RSS hash configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] rss_conf
+ * RSS configuration data.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+ unsigned int idx;
+
+ if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (rss_conf->rss_key && rss_conf->rss_key_len) {
+ if (rss_conf->rss_key_len != MLX5_RSS_HASH_KEY_LEN) {
+ DRV_LOG(ERR,
+ "port %u RSS key len must be %s Bytes long",
+ dev->data->port_id,
+ RTE_STR(MLX5_RSS_HASH_KEY_LEN));
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key,
+ rss_conf->rss_key_len, 0);
+ if (!priv->rss_conf.rss_key) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ memcpy(priv->rss_conf.rss_key, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ priv->rss_conf.rss_key_len = rss_conf->rss_key_len;
+ }
+ priv->rss_conf.rss_hf = rss_conf->rss_hf;
+ /* Enable the RSS hash in all Rx queues. */
+ for (i = 0, idx = 0; idx != priv->rxqs_n; ++i) {
+ if (!(*priv->rxqs)[i])
+ continue;
+ (*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf &&
+ !!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS);
+ ++idx;
+ }
+ return 0;
+}
+
+/**
+ * DPDK callback to get the RSS hash configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in, out] rss_conf
+ * RSS configuration data.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ if (!rss_conf) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (rss_conf->rss_key &&
+ (rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) {
+ memcpy(rss_conf->rss_key, priv->rss_conf.rss_key,
+ priv->rss_conf.rss_key_len);
+ }
+ rss_conf->rss_key_len = priv->rss_conf.rss_key_len;
+ rss_conf->rss_hf = priv->rss_conf.rss_hf;
+ return 0;
+}
+
+/**
+ * Allocate/reallocate RETA index table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @praram reta_size
+ * The size of the array to allocate.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
+{
+ struct priv *priv = dev->data->dev_private;
+ void *mem;
+ unsigned int old_size = priv->reta_idx_n;
+
+ if (priv->reta_idx_n == reta_size)
+ return 0;
+
+ mem = rte_realloc(priv->reta_idx,
+ reta_size * sizeof((*priv->reta_idx)[0]), 0);
+ if (!mem) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ priv->reta_idx = mem;
+ priv->reta_idx_n = reta_size;
+ if (old_size < reta_size)
+ memset(&(*priv->reta_idx)[old_size], 0,
+ (reta_size - old_size) *
+ sizeof((*priv->reta_idx)[0]));
+ return 0;
+}
+
+/**
+ * DPDK callback to get the RETA indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param reta_conf
+ * Pointer to RETA configuration structure array.
+ * @param reta_size
+ * Size of the RETA table.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int idx;
+ unsigned int i;
+
+ if (!reta_size || reta_size > priv->reta_idx_n) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ /* Fill each entry of the table even if its bit is not set. */
+ for (idx = 0, i = 0; (i != reta_size); ++i) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ reta_conf[idx].reta[i % RTE_RETA_GROUP_SIZE] =
+ (*priv->reta_idx)[i];
+ }
+ return 0;
+}
+
+/**
+ * DPDK callback to update the RETA indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param reta_conf
+ * Pointer to RETA configuration structure array.
+ * @param reta_size
+ * Size of the RETA table.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ int ret;
+ struct priv *priv = dev->data->dev_private;
+ unsigned int idx;
+ unsigned int i;
+ unsigned int pos;
+
+ if (!reta_size) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ ret = mlx5_rss_reta_index_resize(dev, reta_size);
+ if (ret)
+ return ret;
+ for (idx = 0, i = 0; (i != reta_size); ++i) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ pos = i % RTE_RETA_GROUP_SIZE;
+ if (((reta_conf[idx].mask >> i) & 0x1) == 0)
+ continue;
+ assert(reta_conf[idx].reta[pos] < priv->rxqs_n);
+ (*priv->reta_idx)[i] = reta_conf[idx].reta[pos];
+ }
+ if (dev->data->dev_started) {
+ mlx5_dev_stop(dev);
+ return mlx5_dev_start(dev);
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxmode.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxmode.c
new file mode 100644
index 00000000..e74fdef8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxmode.c
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#include <stddef.h>
+#include <errno.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_ethdev_driver.h>
+
+#include "mlx5.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_utils.h"
+
+/**
+ * DPDK callback to enable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx5_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ dev->data->promiscuous = 1;
+ if (priv->isolated) {
+ DRV_LOG(WARNING,
+ "port %u cannot enable promiscuous mode"
+ " in flow isolation mode",
+ dev->data->port_id);
+ return;
+ }
+ if (priv->config.vf)
+ mlx5_nl_promisc(dev, 1);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot enable promiscuous mode: %s",
+ dev->data->port_id, strerror(rte_errno));
+}
+
+/**
+ * DPDK callback to disable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx5_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ dev->data->promiscuous = 0;
+ if (priv->config.vf)
+ mlx5_nl_promisc(dev, 0);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot disable promiscuous mode: %s",
+ dev->data->port_id, strerror(rte_errno));
+}
+
+/**
+ * DPDK callback to enable allmulti mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx5_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ dev->data->all_multicast = 1;
+ if (priv->isolated) {
+ DRV_LOG(WARNING,
+ "port %u cannot enable allmulticast mode"
+ " in flow isolation mode",
+ dev->data->port_id);
+ return;
+ }
+ if (priv->config.vf)
+ mlx5_nl_allmulti(dev, 1);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot enable allmulicast mode: %s",
+ dev->data->port_id, strerror(rte_errno));
+}
+
+/**
+ * DPDK callback to disable allmulti mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx5_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ dev->data->all_multicast = 0;
+ if (priv->config.vf)
+ mlx5_nl_allmulti(dev, 0);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot disable allmulicast mode: %s",
+ dev->data->port_id, strerror(rte_errno));
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxq.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxq.c
new file mode 100644
index 00000000..1f7bfd44
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxq.c
@@ -0,0 +1,2191 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#include <stddef.h>
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+#include <stdint.h>
+#include <fcntl.h>
+#include <sys/queue.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#include <infiniband/mlx5dv.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ethdev_driver.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_debug.h>
+#include <rte_io.h>
+
+#include "mlx5.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_utils.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+#include "mlx5_glue.h"
+
+/* Default RSS hash key also used for ConnectX-3. */
+uint8_t rss_hash_default_key[] = {
+ 0x2c, 0xc6, 0x81, 0xd1,
+ 0x5b, 0xdb, 0xf4, 0xf7,
+ 0xfc, 0xa2, 0x83, 0x19,
+ 0xdb, 0x1a, 0x3e, 0x94,
+ 0x6b, 0x9e, 0x38, 0xd9,
+ 0x2c, 0x9c, 0x03, 0xd1,
+ 0xad, 0x99, 0x44, 0xa7,
+ 0xd9, 0x56, 0x3d, 0x59,
+ 0x06, 0x3c, 0x25, 0xf3,
+ 0xfc, 0x1f, 0xdc, 0x2a,
+};
+
+/* Length of the default RSS hash key. */
+static_assert(MLX5_RSS_HASH_KEY_LEN ==
+ (unsigned int)sizeof(rss_hash_default_key),
+ "wrong RSS default key size.");
+
+/**
+ * Check whether Multi-Packet RQ can be enabled for the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+inline int
+mlx5_check_mprq_support(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ if (priv->config.mprq.enabled &&
+ priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
+ return 1;
+ return -ENOTSUP;
+}
+
+/**
+ * Check whether Multi-Packet RQ is enabled for the Rx queue.
+ *
+ * @param rxq
+ * Pointer to receive queue structure.
+ *
+ * @return
+ * 0 if disabled, otherwise enabled.
+ */
+inline int
+mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
+{
+ return rxq->strd_num_n > 0;
+}
+
+/**
+ * Check whether Multi-Packet RQ is enabled for the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 if disabled, otherwise enabled.
+ */
+inline int
+mlx5_mprq_enabled(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint16_t i;
+ uint16_t n = 0;
+
+ if (mlx5_check_mprq_support(dev) < 0)
+ return 0;
+ /* All the configured queues should be enabled. */
+ for (i = 0; i < priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (!rxq)
+ continue;
+ if (mlx5_rxq_mprq_enabled(rxq))
+ ++n;
+ }
+ /* Multi-Packet RQ can't be partially configured. */
+ assert(n == 0 || n == priv->rxqs_n);
+ return n == priv->rxqs_n;
+}
+
+/**
+ * Allocate RX queue elements for Multi-Packet RQ.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ unsigned int wqe_n = 1 << rxq->elts_n;
+ unsigned int i;
+ int err;
+
+ /* Iterate on segments. */
+ for (i = 0; i <= wqe_n; ++i) {
+ struct mlx5_mprq_buf *buf;
+
+ if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
+ DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ if (i < wqe_n)
+ (*rxq->mprq_bufs)[i] = buf;
+ else
+ rxq->mprq_repl = buf;
+ }
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u allocated and configured %u segments",
+ rxq->port_id, rxq_ctrl->idx, wqe_n);
+ return 0;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ wqe_n = i;
+ for (i = 0; (i != wqe_n); ++i) {
+ if ((*rxq->mprq_bufs)[i] != NULL)
+ rte_mempool_put(rxq->mprq_mp,
+ (*rxq->mprq_bufs)[i]);
+ (*rxq->mprq_bufs)[i] = NULL;
+ }
+ DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
+ rxq->port_id, rxq_ctrl->idx);
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * Allocate RX queue elements for Single-Packet RQ.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
+ unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
+ unsigned int i;
+ int err;
+
+ /* Iterate on segments. */
+ for (i = 0; (i != elts_n); ++i) {
+ struct rte_mbuf *buf;
+
+ buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
+ if (buf == NULL) {
+ DRV_LOG(ERR, "port %u empty mbuf pool",
+ PORT_ID(rxq_ctrl->priv));
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ /* Headroom is reserved by rte_pktmbuf_alloc(). */
+ assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
+ /* Buffer is supposed to be empty. */
+ assert(rte_pktmbuf_data_len(buf) == 0);
+ assert(rte_pktmbuf_pkt_len(buf) == 0);
+ assert(!buf->next);
+ /* Only the first segment keeps headroom. */
+ if (i % sges_n)
+ SET_DATA_OFF(buf, 0);
+ PORT(buf) = rxq_ctrl->rxq.port_id;
+ DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
+ PKT_LEN(buf) = DATA_LEN(buf);
+ NB_SEGS(buf) = 1;
+ (*rxq_ctrl->rxq.elts)[i] = buf;
+ }
+ /* If Rx vector is activated. */
+ if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
+ int j;
+
+ /* Initialize default rearm_data for vPMD. */
+ mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_mbuf_refcnt_set(mbuf_init, 1);
+ mbuf_init->nb_segs = 1;
+ mbuf_init->port = rxq->port_id;
+ /*
+ * prevent compiler reordering:
+ * rearm_data covers previous fields.
+ */
+ rte_compiler_barrier();
+ rxq->mbuf_initializer =
+ *(uint64_t *)&mbuf_init->rearm_data;
+ /* Padding with a fake mbuf for vectorized Rx. */
+ for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
+ (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
+ }
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u allocated and configured %u segments"
+ " (max %u packets)",
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
+ elts_n / (1 << rxq_ctrl->rxq.sges_n));
+ return 0;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ elts_n = i;
+ for (i = 0; (i != elts_n); ++i) {
+ if ((*rxq_ctrl->rxq.elts)[i] != NULL)
+ rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
+ (*rxq_ctrl->rxq.elts)[i] = NULL;
+ }
+ DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * Allocate RX queue elements.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
+}
+
+/**
+ * Free RX queue elements for Multi-Packet RQ.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ */
+static void
+rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ uint16_t i;
+
+ DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
+ rxq->port_id, rxq_ctrl->idx);
+ if (rxq->mprq_bufs == NULL)
+ return;
+ assert(mlx5_rxq_check_vec_support(rxq) < 0);
+ for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
+ if ((*rxq->mprq_bufs)[i] != NULL)
+ mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
+ (*rxq->mprq_bufs)[i] = NULL;
+ }
+ if (rxq->mprq_repl != NULL) {
+ mlx5_mprq_buf_free(rxq->mprq_repl);
+ rxq->mprq_repl = NULL;
+ }
+}
+
+/**
+ * Free RX queue elements for Single-Packet RQ.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ */
+static void
+rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ const uint16_t q_n = (1 << rxq->elts_n);
+ const uint16_t q_mask = q_n - 1;
+ uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
+ uint16_t i;
+
+ DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
+ if (rxq->elts == NULL)
+ return;
+ /**
+ * Some mbuf in the Ring belongs to the application. They cannot be
+ * freed.
+ */
+ if (mlx5_rxq_check_vec_support(rxq) > 0) {
+ for (i = 0; i < used; ++i)
+ (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
+ rxq->rq_pi = rxq->rq_ci;
+ }
+ for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
+ if ((*rxq->elts)[i] != NULL)
+ rte_pktmbuf_free_seg((*rxq->elts)[i]);
+ (*rxq->elts)[i] = NULL;
+ }
+}
+
+/**
+ * Free RX queue elements.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ */
+static void
+rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
+ rxq_free_elts_mprq(rxq_ctrl);
+ else
+ rxq_free_elts_sprq(rxq_ctrl);
+}
+
+/**
+ * Clean up a RX queue.
+ *
+ * Destroy objects, free allocated memory and reset the structure for reuse.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ */
+void
+mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
+ if (rxq_ctrl->ibv)
+ mlx5_rxq_ibv_release(rxq_ctrl->ibv);
+ memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
+}
+
+/**
+ * Returns the per-queue supported offloads.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_TIMESTAMP |
+ DEV_RX_OFFLOAD_JUMBO_FRAME);
+
+ offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ if (config->hw_fcs_strip)
+ offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+
+ if (config->hw_csum)
+ offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM);
+ if (config->hw_vlan_strip)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ return offloads;
+}
+
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx5_get_rx_port_offloads(void)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ return offloads;
+}
+
+/**
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+
+ if (!rte_is_power_of_2(desc)) {
+ desc = 1 << log2above(desc);
+ DRV_LOG(WARNING,
+ "port %u increased number of descriptors in Rx queue %u"
+ " to the next power of two (%d)",
+ dev->data->port_id, idx, desc);
+ }
+ DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
+ dev->data->port_id, idx, desc);
+ if (idx >= priv->rxqs_n) {
+ DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
+ dev->data->port_id, idx, priv->rxqs_n);
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
+ }
+ if (!mlx5_rxq_releasable(dev, idx)) {
+ DRV_LOG(ERR, "port %u unable to release queue index %u",
+ dev->data->port_id, idx);
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+ mlx5_rxq_release(dev, idx);
+ rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
+ if (!rxq_ctrl) {
+ DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
+ dev->data->port_id, idx);
+ (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+ return 0;
+}
+
+/**
+ * DPDK callback to release a RX queue.
+ *
+ * @param dpdk_rxq
+ * Generic RX queue pointer.
+ */
+void
+mlx5_rx_queue_release(void *dpdk_rxq)
+{
+ struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct priv *priv;
+
+ if (rxq == NULL)
+ return;
+ rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ priv = rxq_ctrl->priv;
+ if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
+ rte_panic("port %u Rx queue %u is still used by a flow and"
+ " cannot be removed\n",
+ PORT_ID(priv), rxq_ctrl->idx);
+ mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
+}
+
+/**
+ * Allocate queue vector and fill epoll fd list for Rx interrupts.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+ unsigned int rxqs_n = priv->rxqs_n;
+ unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ unsigned int count = 0;
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+
+ if (!dev->data->dev_conf.intr_conf.rxq)
+ return 0;
+ mlx5_rx_intr_vec_disable(dev);
+ intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
+ if (intr_handle->intr_vec == NULL) {
+ DRV_LOG(ERR,
+ "port %u failed to allocate memory for interrupt"
+ " vector, Rx interrupts will not be supported",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ intr_handle->type = RTE_INTR_HANDLE_EXT;
+ for (i = 0; i != n; ++i) {
+ /* This rxq ibv must not be released in this function. */
+ struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
+ int fd;
+ int flags;
+ int rc;
+
+ /* Skip queues that cannot request interrupts. */
+ if (!rxq_ibv || !rxq_ibv->channel) {
+ /* Use invalid intr_vec[] index to disable entry. */
+ intr_handle->intr_vec[i] =
+ RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID;
+ continue;
+ }
+ if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
+ DRV_LOG(ERR,
+ "port %u too many Rx queues for interrupt"
+ " vector size (%d), Rx interrupts cannot be"
+ " enabled",
+ dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
+ mlx5_rx_intr_vec_disable(dev);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ fd = rxq_ibv->channel->fd;
+ flags = fcntl(fd, F_GETFL);
+ rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+ if (rc < 0) {
+ rte_errno = errno;
+ DRV_LOG(ERR,
+ "port %u failed to make Rx interrupt file"
+ " descriptor %d non-blocking for queue index"
+ " %d",
+ dev->data->port_id, fd, i);
+ mlx5_rx_intr_vec_disable(dev);
+ return -rte_errno;
+ }
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+ intr_handle->efds[count] = fd;
+ count++;
+ }
+ if (!count)
+ mlx5_rx_intr_vec_disable(dev);
+ else
+ intr_handle->nb_efd = count;
+ return 0;
+}
+
+/**
+ * Clean up Rx interrupts handler.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+ unsigned int i;
+ unsigned int rxqs_n = priv->rxqs_n;
+ unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+
+ if (!dev->data->dev_conf.intr_conf.rxq)
+ return;
+ if (!intr_handle->intr_vec)
+ goto free;
+ for (i = 0; i != n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_data *rxq_data;
+
+ if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID)
+ continue;
+ /**
+ * Need to access directly the queue to release the reference
+ * kept in priv_rx_intr_vec_enable().
+ */
+ rxq_data = (*priv->rxqs)[i];
+ rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ mlx5_rxq_ibv_release(rxq_ctrl->ibv);
+ }
+free:
+ rte_intr_free_epoll_fd(intr_handle);
+ if (intr_handle->intr_vec)
+ free(intr_handle->intr_vec);
+ intr_handle->nb_efd = 0;
+ intr_handle->intr_vec = NULL;
+}
+
+/**
+ * MLX5 CQ notification .
+ *
+ * @param rxq
+ * Pointer to receive queue structure.
+ * @param sq_n_rxq
+ * Sequence number per receive queue .
+ */
+static inline void
+mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
+{
+ int sq_n = 0;
+ uint32_t doorbell_hi;
+ uint64_t doorbell;
+ void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
+
+ sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
+ doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
+ doorbell = (uint64_t)doorbell_hi << 32;
+ doorbell |= rxq->cqn;
+ rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
+ mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
+ cq_db_reg, rxq->uar_lock_cq);
+}
+
+/**
+ * DPDK callback for Rx queue interrupt enable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * Rx queue number.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ rxq_data = (*priv->rxqs)[rx_queue_id];
+ if (!rxq_data) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ if (rxq_ctrl->irq) {
+ struct mlx5_rxq_ibv *rxq_ibv;
+
+ rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
+ if (!rxq_ibv) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
+ mlx5_rxq_ibv_release(rxq_ibv);
+ }
+ return 0;
+}
+
+/**
+ * DPDK callback for Rx queue interrupt disable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * Rx queue number.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_ibv *rxq_ibv = NULL;
+ struct ibv_cq *ev_cq;
+ void *ev_ctx;
+ int ret;
+
+ rxq_data = (*priv->rxqs)[rx_queue_id];
+ if (!rxq_data) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ if (!rxq_ctrl->irq)
+ return 0;
+ rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
+ if (!rxq_ibv) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
+ if (ret || ev_cq != rxq_ibv->cq) {
+ rte_errno = EINVAL;
+ goto exit;
+ }
+ rxq_data->cq_arm_sn++;
+ mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
+ return 0;
+exit:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (rxq_ibv)
+ mlx5_rxq_ibv_release(rxq_ibv);
+ DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
+ dev->data->port_id, rx_queue_id);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * Create the Rx queue Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_rxq_ibv *
+mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ struct ibv_wq_attr mod;
+ union {
+ struct {
+ struct ibv_cq_init_attr_ex ibv;
+ struct mlx5dv_cq_init_attr mlx5;
+ } cq;
+ struct {
+ struct ibv_wq_init_attr ibv;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ struct mlx5dv_wq_init_attr mlx5;
+#endif
+ } wq;
+ struct ibv_cq_ex cq_attr;
+ } attr;
+ unsigned int cqe_n;
+ unsigned int wqe_n = 1 << rxq_data->elts_n;
+ struct mlx5_rxq_ibv *tmpl;
+ struct mlx5dv_cq cq_info;
+ struct mlx5dv_rwq rwq;
+ unsigned int i;
+ int ret = 0;
+ struct mlx5dv_obj obj;
+ struct mlx5_dev_config *config = &priv->config;
+ const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data);
+
+ assert(rxq_data);
+ assert(!rxq_ctrl->ibv);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
+ priv->verbs_alloc_ctx.obj = rxq_ctrl;
+ tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
+ rxq_ctrl->socket);
+ if (!tmpl) {
+ DRV_LOG(ERR,
+ "port %u Rx queue %u cannot allocate verbs resources",
+ dev->data->port_id, rxq_ctrl->idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ tmpl->rxq_ctrl = rxq_ctrl;
+ if (rxq_ctrl->irq) {
+ tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
+ if (!tmpl->channel) {
+ DRV_LOG(ERR, "port %u: comp channel creation failure",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ }
+ if (mprq_en)
+ cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
+ else
+ cqe_n = wqe_n - 1;
+ attr.cq.ibv = (struct ibv_cq_init_attr_ex){
+ .cqe = cqe_n,
+ .channel = tmpl->channel,
+ .comp_mask = 0,
+ };
+ attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
+ .comp_mask = 0,
+ };
+ if (config->cqe_comp && !rxq_data->hw_timestamp) {
+ attr.cq.mlx5.comp_mask |=
+ MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ attr.cq.mlx5.cqe_comp_res_format =
+ mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
+ MLX5DV_CQE_RES_FORMAT_HASH;
+#else
+ attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
+#endif
+ /*
+ * For vectorized Rx, it must not be doubled in order to
+ * make cq_ci and rq_ci aligned.
+ */
+ if (mlx5_rxq_check_vec_support(rxq_data) < 0)
+ attr.cq.ibv.cqe *= 2;
+ } else if (config->cqe_comp && rxq_data->hw_timestamp) {
+ DRV_LOG(DEBUG,
+ "port %u Rx CQE compression is disabled for HW"
+ " timestamp",
+ dev->data->port_id);
+ }
+ tmpl->cq = mlx5_glue->cq_ex_to_cq
+ (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
+ &attr.cq.mlx5));
+ if (tmpl->cq == NULL) {
+ DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_sge);
+ attr.wq.ibv = (struct ibv_wq_init_attr){
+ .wq_context = NULL, /* Could be useful in the future. */
+ .wq_type = IBV_WQT_RQ,
+ /* Max number of outstanding WRs. */
+ .max_wr = wqe_n >> rxq_data->sges_n,
+ /* Max number of scatter/gather elements in a WR. */
+ .max_sge = 1 << rxq_data->sges_n,
+ .pd = priv->pd,
+ .cq = tmpl->cq,
+ .comp_mask =
+ IBV_WQ_FLAGS_CVLAN_STRIPPING |
+ 0,
+ .create_flags = (rxq_data->vlan_strip ?
+ IBV_WQ_FLAGS_CVLAN_STRIPPING :
+ 0),
+ };
+ /* By default, FCS (CRC) is stripped by hardware. */
+ if (rxq_data->crc_present) {
+ attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
+ attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ }
+#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
+ if (config->hw_padding) {
+ attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
+ attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ }
+#endif
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){
+ .comp_mask = 0,
+ };
+ if (mprq_en) {
+ struct mlx5dv_striding_rq_init_attr *mprq_attr =
+ &attr.wq.mlx5.striding_rq_attrs;
+
+ attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
+ *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
+ .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
+ .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
+ .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
+ };
+ }
+ tmpl->wq = mlx5_glue->dv_create_wq(priv->ctx, &attr.wq.ibv,
+ &attr.wq.mlx5);
+#else
+ tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq.ibv);
+#endif
+ if (tmpl->wq == NULL) {
+ DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ /*
+ * Make sure number of WRs*SGEs match expectations since a queue
+ * cannot allocate more than "desc" buffers.
+ */
+ if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
+ attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) {
+ DRV_LOG(ERR,
+ "port %u Rx queue %u requested %u*%u but got %u*%u"
+ " WRs*SGEs",
+ dev->data->port_id, idx,
+ wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n),
+ attr.wq.ibv.max_wr, attr.wq.ibv.max_sge);
+ rte_errno = EINVAL;
+ goto error;
+ }
+ /* Change queue state to ready. */
+ mod = (struct ibv_wq_attr){
+ .attr_mask = IBV_WQ_ATTR_STATE,
+ .wq_state = IBV_WQS_RDY,
+ };
+ ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
+ if (ret) {
+ DRV_LOG(ERR,
+ "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
+ dev->data->port_id, idx);
+ rte_errno = ret;
+ goto error;
+ }
+ obj.cq.in = tmpl->cq;
+ obj.cq.out = &cq_info;
+ obj.rwq.in = tmpl->wq;
+ obj.rwq.out = &rwq;
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
+ if (ret) {
+ rte_errno = ret;
+ goto error;
+ }
+ if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
+ DRV_LOG(ERR,
+ "port %u wrong MLX5_CQE_SIZE environment variable"
+ " value: it should be set to %u",
+ dev->data->port_id, RTE_CACHE_LINE_SIZE);
+ rte_errno = EINVAL;
+ goto error;
+ }
+ /* Fill the rings. */
+ rxq_data->wqes = rwq.buf;
+ for (i = 0; (i != wqe_n); ++i) {
+ volatile struct mlx5_wqe_data_seg *scat;
+ uintptr_t addr;
+ uint32_t byte_count;
+
+ if (mprq_en) {
+ struct mlx5_mprq_buf *buf = (*rxq_data->mprq_bufs)[i];
+
+ scat = &((volatile struct mlx5_wqe_mprq *)
+ rxq_data->wqes)[i].dseg;
+ addr = (uintptr_t)mlx5_mprq_buf_addr(buf);
+ byte_count = (1 << rxq_data->strd_sz_n) *
+ (1 << rxq_data->strd_num_n);
+ } else {
+ struct rte_mbuf *buf = (*rxq_data->elts)[i];
+
+ scat = &((volatile struct mlx5_wqe_data_seg *)
+ rxq_data->wqes)[i];
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ byte_count = DATA_LEN(buf);
+ }
+ /* scat->addr must be able to store a pointer. */
+ assert(sizeof(scat->addr) >= sizeof(uintptr_t));
+ *scat = (struct mlx5_wqe_data_seg){
+ .addr = rte_cpu_to_be_64(addr),
+ .byte_count = rte_cpu_to_be_32(byte_count),
+ .lkey = mlx5_rx_addr2mr(rxq_data, addr),
+ };
+ }
+ rxq_data->rq_db = rwq.dbrec;
+ rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
+ rxq_data->cq_ci = 0;
+ rxq_data->consumed_strd = 0;
+ rxq_data->rq_pi = 0;
+ rxq_data->zip = (struct rxq_zip){
+ .ai = 0,
+ };
+ rxq_data->cq_db = cq_info.dbrec;
+ rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
+ rxq_data->cq_uar = cq_info.cq_uar;
+ rxq_data->cqn = cq_info.cqn;
+ rxq_data->cq_arm_sn = 0;
+ /* Update doorbell counter. */
+ rxq_data->rq_ci = wqe_n >> rxq_data->sges_n;
+ rte_wmb();
+ *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
+ DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
+ idx, (void *)&tmpl);
+ rte_atomic32_inc(&tmpl->refcnt);
+ LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ return tmpl;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (tmpl->wq)
+ claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
+ if (tmpl->cq)
+ claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
+ if (tmpl->channel)
+ claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ rte_errno = ret; /* Restore rte_errno. */
+ return NULL;
+}
+
+/**
+ * Get an Rx queue Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The Verbs object if it exists.
+ */
+struct mlx5_rxq_ibv *
+mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (idx >= priv->rxqs_n)
+ return NULL;
+ if (!rxq_data)
+ return NULL;
+ rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ if (rxq_ctrl->ibv) {
+ rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
+ }
+ return rxq_ctrl->ibv;
+}
+
+/**
+ * Release an Rx verbs queue object.
+ *
+ * @param rxq_ibv
+ * Verbs Rx queue object.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int
+mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
+{
+ assert(rxq_ibv);
+ assert(rxq_ibv->wq);
+ assert(rxq_ibv->cq);
+ if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
+ rxq_free_elts(rxq_ibv->rxq_ctrl);
+ claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
+ claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
+ if (rxq_ibv->channel)
+ claim_zero(mlx5_glue->destroy_comp_channel
+ (rxq_ibv->channel));
+ LIST_REMOVE(rxq_ibv, next);
+ rte_free(rxq_ibv);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Verify the Verbs Rx queue list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret = 0;
+ struct mlx5_rxq_ibv *rxq_ibv;
+
+ LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
+ DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
+ dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Return true if a single reference exists on the object.
+ *
+ * @param rxq_ibv
+ * Verbs Rx queue object.
+ */
+int
+mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
+{
+ assert(rxq_ibv);
+ return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
+}
+
+/**
+ * Callback function to initialize mbufs for Multi-Packet RQ.
+ */
+static inline void
+mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
+ void *_m, unsigned int i __rte_unused)
+{
+ struct mlx5_mprq_buf *buf = _m;
+
+ memset(_m, 0, sizeof(*buf));
+ buf->mp = mp;
+ rte_atomic16_set(&buf->refcnt, 1);
+}
+
+/**
+ * Free mempool of Multi-Packet RQ.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+mlx5_mprq_free_mp(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_mempool *mp = priv->mprq_mp;
+ unsigned int i;
+
+ if (mp == NULL)
+ return 0;
+ DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
+ dev->data->port_id, mp->name);
+ /*
+ * If a buffer in the pool has been externally attached to a mbuf and it
+ * is still in use by application, destroying the Rx qeueue can spoil
+ * the packet. It is unlikely to happen but if application dynamically
+ * creates and destroys with holding Rx packets, this can happen.
+ *
+ * TODO: It is unavoidable for now because the mempool for Multi-Packet
+ * RQ isn't provided by application but managed by PMD.
+ */
+ if (!rte_mempool_full(mp)) {
+ DRV_LOG(ERR,
+ "port %u mempool for Multi-Packet RQ is still in use",
+ dev->data->port_id);
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+ rte_mempool_free(mp);
+ /* Unset mempool for each Rx queue. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (rxq == NULL)
+ continue;
+ rxq->mprq_mp = NULL;
+ }
+ return 0;
+}
+
+/**
+ * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
+ * mempool. If already allocated, reuse it if there're enough elements.
+ * Otherwise, resize it.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_mempool *mp = priv->mprq_mp;
+ char name[RTE_MEMPOOL_NAMESIZE];
+ unsigned int desc = 0;
+ unsigned int buf_len;
+ unsigned int obj_num;
+ unsigned int obj_size;
+ unsigned int strd_num_n = 0;
+ unsigned int strd_sz_n = 0;
+ unsigned int i;
+
+ if (!mlx5_mprq_enabled(dev))
+ return 0;
+ /* Count the total number of descriptors configured. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (rxq == NULL)
+ continue;
+ desc += 1 << rxq->elts_n;
+ /* Get the max number of strides. */
+ if (strd_num_n < rxq->strd_num_n)
+ strd_num_n = rxq->strd_num_n;
+ /* Get the max size of a stride. */
+ if (strd_sz_n < rxq->strd_sz_n)
+ strd_sz_n = rxq->strd_sz_n;
+ }
+ assert(strd_num_n && strd_sz_n);
+ buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
+ obj_size = buf_len + sizeof(struct mlx5_mprq_buf);
+ /*
+ * Received packets can be either memcpy'd or externally referenced. In
+ * case that the packet is attached to an mbuf as an external buffer, as
+ * it isn't possible to predict how the buffers will be queued by
+ * application, there's no option to exactly pre-allocate needed buffers
+ * in advance but to speculatively prepares enough buffers.
+ *
+ * In the data path, if this Mempool is depleted, PMD will try to memcpy
+ * received packets to buffers provided by application (rxq->mp) until
+ * this Mempool gets available again.
+ */
+ desc *= 4;
+ obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
+ /*
+ * rte_mempool_create_empty() has sanity check to refuse large cache
+ * size compared to the number of elements.
+ * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
+ * constant number 2 instead.
+ */
+ obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
+ /* Check a mempool is already allocated and if it can be resued. */
+ if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
+ DRV_LOG(DEBUG, "port %u mempool %s is being reused",
+ dev->data->port_id, mp->name);
+ /* Reuse. */
+ goto exit;
+ } else if (mp != NULL) {
+ DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
+ dev->data->port_id, mp->name);
+ /*
+ * If failed to free, which means it may be still in use, no way
+ * but to keep using the existing one. On buffer underrun,
+ * packets will be memcpy'd instead of external buffer
+ * attachment.
+ */
+ if (mlx5_mprq_free_mp(dev)) {
+ if (mp->elt_size >= obj_size)
+ goto exit;
+ else
+ return -rte_errno;
+ }
+ }
+ snprintf(name, sizeof(name), "%s-mprq", dev->device->name);
+ mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
+ 0, NULL, NULL, mlx5_mprq_buf_init, NULL,
+ dev->device->numa_node, 0);
+ if (mp == NULL) {
+ DRV_LOG(ERR,
+ "port %u failed to allocate a mempool for"
+ " Multi-Packet RQ, count=%u, size=%u",
+ dev->data->port_id, obj_num, obj_size);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ priv->mprq_mp = mp;
+exit:
+ /* Set mempool for each Rx queue. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (rxq == NULL)
+ continue;
+ rxq->mprq_mp = mp;
+ }
+ DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
+ dev->data->port_id);
+ return 0;
+}
+
+/**
+ * Create a DPDK Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * A DPDK queue object on success, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_rxq_ctrl *
+mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *tmpl;
+ unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+ unsigned int mprq_stride_size;
+ struct mlx5_dev_config *config = &priv->config;
+ /*
+ * Always allocate extra slots, even if eventually
+ * the vector Rx will not be used.
+ */
+ uint16_t desc_n =
+ desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+ uint64_t offloads = conf->offloads |
+ dev->data->dev_conf.rxmode.offloads;
+ const int mprq_en = mlx5_check_mprq_support(dev) > 0;
+
+ tmpl = rte_calloc_socket("RXQ", 1,
+ sizeof(*tmpl) +
+ desc_n * sizeof(struct rte_mbuf *),
+ 0, socket);
+ if (!tmpl) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
+ MLX5_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
+ goto error;
+ }
+ tmpl->socket = socket;
+ if (dev->data->dev_conf.intr_conf.rxq)
+ tmpl->irq = 1;
+ /*
+ * This Rx queue can be configured as a Multi-Packet RQ if all of the
+ * following conditions are met:
+ * - MPRQ is enabled.
+ * - The number of descs is more than the number of strides.
+ * - max_rx_pkt_len plus overhead is less than the max size of a
+ * stride.
+ * Otherwise, enable Rx scatter if necessary.
+ */
+ assert(mb_len >= RTE_PKTMBUF_HEADROOM);
+ mprq_stride_size =
+ dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ sizeof(struct rte_mbuf_ext_shared_info) +
+ RTE_PKTMBUF_HEADROOM;
+ if (mprq_en &&
+ desc > (1U << config->mprq.stride_num_n) &&
+ mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
+ /* TODO: Rx scatter isn't supported yet. */
+ tmpl->rxq.sges_n = 0;
+ /* Trim the number of descs needed. */
+ desc >>= config->mprq.stride_num_n;
+ tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
+ tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
+ config->mprq.min_stride_size_n);
+ tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
+ tmpl->rxq.mprq_max_memcpy_len =
+ RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
+ config->mprq.max_memcpy_len);
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u: Multi-Packet RQ is enabled"
+ " strd_num_n = %u, strd_sz_n = %u",
+ dev->data->port_id, idx,
+ tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
+ } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ tmpl->rxq.sges_n = 0;
+ } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+ unsigned int size =
+ RTE_PKTMBUF_HEADROOM +
+ dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ unsigned int sges_n;
+
+ /*
+ * Determine the number of SGEs needed for a full packet
+ * and round it to the next power of two.
+ */
+ sges_n = log2above((size / mb_len) + !!(size % mb_len));
+ tmpl->rxq.sges_n = sges_n;
+ /* Make sure rxq.sges_n did not overflow. */
+ size = mb_len * (1 << tmpl->rxq.sges_n);
+ size -= RTE_PKTMBUF_HEADROOM;
+ if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
+ DRV_LOG(ERR,
+ "port %u too many SGEs (%u) needed to handle"
+ " requested maximum packet size %u",
+ dev->data->port_id,
+ 1 << sges_n,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ rte_errno = EOVERFLOW;
+ goto error;
+ }
+ } else {
+ DRV_LOG(WARNING,
+ "port %u the requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered mode has"
+ " not been requested",
+ dev->data->port_id,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ mb_len - RTE_PKTMBUF_HEADROOM);
+ }
+ if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
+ DRV_LOG(WARNING,
+ "port %u MPRQ is requested but cannot be enabled"
+ " (requested: desc = %u, stride_sz = %u,"
+ " supported: min_stride_num = %u, max_stride_sz = %u).",
+ dev->data->port_id, desc, mprq_stride_size,
+ (1 << config->mprq.stride_num_n),
+ (1 << config->mprq.max_stride_size_n));
+ DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
+ dev->data->port_id, 1 << tmpl->rxq.sges_n);
+ if (desc % (1 << tmpl->rxq.sges_n)) {
+ DRV_LOG(ERR,
+ "port %u number of Rx queue descriptors (%u) is not a"
+ " multiple of SGEs per packet (%u)",
+ dev->data->port_id,
+ desc,
+ 1 << tmpl->rxq.sges_n);
+ rte_errno = EINVAL;
+ goto error;
+ }
+ /* Toggle RX checksum offload if hardware supports it. */
+ tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+ /* Configure VLAN stripping. */
+ tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ /* By default, FCS (CRC) is stripped by hardware. */
+ tmpl->rxq.crc_present = 0;
+ if (rte_eth_dev_must_keep_crc(offloads)) {
+ if (config->hw_fcs_strip) {
+ tmpl->rxq.crc_present = 1;
+ } else {
+ DRV_LOG(WARNING,
+ "port %u CRC stripping has been disabled but will"
+ " still be performed by hardware, make sure MLNX_OFED"
+ " and firmware are up to date",
+ dev->data->port_id);
+ }
+ }
+ DRV_LOG(DEBUG,
+ "port %u CRC stripping is %s, %u bytes will be subtracted from"
+ " incoming frames to hide it",
+ dev->data->port_id,
+ tmpl->rxq.crc_present ? "disabled" : "enabled",
+ tmpl->rxq.crc_present << 2);
+ /* Save port ID. */
+ tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
+ (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
+ tmpl->rxq.port_id = dev->data->port_id;
+ tmpl->priv = priv;
+ tmpl->rxq.mp = mp;
+ tmpl->rxq.stats.idx = idx;
+ tmpl->rxq.elts_n = log2above(desc);
+ tmpl->rxq.elts =
+ (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
+#ifndef RTE_ARCH_64
+ tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
+#endif
+ tmpl->idx = idx;
+ rte_atomic32_inc(&tmpl->refcnt);
+ LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
+ return tmpl;
+error:
+ rte_free(tmpl);
+ return NULL;
+}
+
+/**
+ * Get a Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_ctrl *
+mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+
+ if ((*priv->rxqs)[idx]) {
+ rxq_ctrl = container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl,
+ rxq);
+ mlx5_rxq_ibv_get(dev, idx);
+ rte_atomic32_inc(&rxq_ctrl->refcnt);
+ }
+ return rxq_ctrl;
+}
+
+/**
+ * Release a Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int
+mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (!(*priv->rxqs)[idx])
+ return 0;
+ rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
+ assert(rxq_ctrl->priv);
+ if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
+ rxq_ctrl->ibv = NULL;
+ if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
+ mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+ LIST_REMOVE(rxq_ctrl, next);
+ rte_free(rxq_ctrl);
+ (*priv->rxqs)[idx] = NULL;
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Verify if the queue can be released.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 1 if the queue can be released, negative errno otherwise and rte_errno is
+ * set.
+ */
+int
+mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (!(*priv->rxqs)[idx]) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
+ return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
+}
+
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_rxq_verify(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ int ret = 0;
+
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+ DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
+ dev->data->port_id, rxq_ctrl->idx);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Create an indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queues
+ * Queues entering in the indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size);
+ struct ibv_wq *wq[1 << wq_n];
+ unsigned int i;
+ unsigned int j;
+
+ ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
+ queues_n * sizeof(uint16_t), 0);
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ for (i = 0; i != queues_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
+
+ if (!rxq)
+ goto error;
+ wq[i] = rxq->ibv->wq;
+ ind_tbl->queues[i] = queues[i];
+ }
+ ind_tbl->queues_n = queues_n;
+ /* Finalise indirection table. */
+ for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
+ wq[i] = wq[j];
+ ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
+ (priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = wq_n,
+ .ind_tbl = wq,
+ .comp_mask = 0,
+ });
+ if (!ind_tbl->ind_table) {
+ rte_errno = errno;
+ goto error;
+ }
+ rte_atomic32_inc(&ind_tbl->refcnt);
+ LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
+ return ind_tbl;
+error:
+ rte_free(ind_tbl);
+ DEBUG("port %u cannot create indirection table", dev->data->port_id);
+ return NULL;
+}
+
+/**
+ * Get an indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queues
+ * Queues entering in the indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ *
+ * @return
+ * An indirection table if found.
+ */
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl;
+
+ LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
+ if ((ind_tbl->queues_n == queues_n) &&
+ (memcmp(ind_tbl->queues, queues,
+ ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
+ == 0))
+ break;
+ }
+ if (ind_tbl) {
+ unsigned int i;
+
+ rte_atomic32_inc(&ind_tbl->refcnt);
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ mlx5_rxq_get(dev, ind_tbl->queues[i]);
+ }
+ return ind_tbl;
+}
+
+/**
+ * Release an indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param ind_table
+ * Indirection table to release.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int
+mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_ibv *ind_tbl)
+{
+ unsigned int i;
+
+ if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
+ claim_zero(mlx5_glue->destroy_rwq_ind_table
+ (ind_tbl->ind_table));
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
+ if (!rte_atomic32_read(&ind_tbl->refcnt)) {
+ LIST_REMOVE(ind_tbl, next);
+ rte_free(ind_tbl);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ int ret = 0;
+
+ LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
+ DRV_LOG(DEBUG,
+ "port %u Verbs indirection table %p still referenced",
+ dev->data->port_id, (void *)ind_tbl);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Create an Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param rss_key
+ * RSS key for the Rx hash queue.
+ * @param rss_key_len
+ * RSS key length.
+ * @param hash_fields
+ * Verbs protocol hash field to make the RSS on.
+ * @param queues
+ * Queues entering in hash queue. In case of empty hash_fields only the
+ * first queue index will be taken for the indirection table.
+ * @param queues_n
+ * Number of queues.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_hrxq *
+mlx5_hrxq_new(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n,
+ int tunnel __rte_unused)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ struct ibv_qp *qp;
+ int err;
+
+ queues_n = hash_fields ? queues_n : 1;
+ ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
+ if (!ind_tbl)
+ ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ if (!rss_key_len) {
+ rss_key_len = MLX5_RSS_HASH_KEY_LEN;
+ rss_key = rss_hash_default_key;
+ }
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ qp = mlx5_glue->dv_create_qp
+ (priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_key_len ? rss_key_len :
+ MLX5_RSS_HASH_KEY_LEN,
+ .rx_hash_key = rss_key ?
+ (void *)(uintptr_t)rss_key :
+ rss_hash_default_key,
+ .rx_hash_fields_mask = hash_fields,
+ },
+ .rwq_ind_tbl = ind_tbl->ind_table,
+ .pd = priv->pd,
+ },
+ &(struct mlx5dv_qp_init_attr){
+ .comp_mask = tunnel ?
+ MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS : 0,
+ .create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS,
+ });
+#else
+ qp = mlx5_glue->create_qp_ex
+ (priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_key_len ? rss_key_len :
+ MLX5_RSS_HASH_KEY_LEN,
+ .rx_hash_key = rss_key ?
+ (void *)(uintptr_t)rss_key :
+ rss_hash_default_key,
+ .rx_hash_fields_mask = hash_fields,
+ },
+ .rwq_ind_tbl = ind_tbl->ind_table,
+ .pd = priv->pd,
+ });
+#endif
+ if (!qp) {
+ rte_errno = errno;
+ goto error;
+ }
+ hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
+ if (!hrxq)
+ goto error;
+ hrxq->ind_table = ind_tbl;
+ hrxq->qp = qp;
+ hrxq->rss_key_len = rss_key_len;
+ hrxq->hash_fields = hash_fields;
+ memcpy(hrxq->rss_key, rss_key, rss_key_len);
+ rte_atomic32_inc(&hrxq->refcnt);
+ LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
+ return hrxq;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_ind_table_ibv_release(dev, ind_tbl);
+ if (qp)
+ claim_zero(mlx5_glue->destroy_qp(qp));
+ rte_errno = err; /* Restore rte_errno. */
+ return NULL;
+}
+
+/**
+ * Get an Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param rss_conf
+ * RSS configuration for the Rx hash queue.
+ * @param queues
+ * Queues entering in hash queue. In case of empty hash_fields only the
+ * first queue index will be taken for the indirection table.
+ * @param queues_n
+ * Number of queues.
+ *
+ * @return
+ * An hash Rx queue on success.
+ */
+struct mlx5_hrxq *
+mlx5_hrxq_get(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+
+ queues_n = hash_fields ? queues_n : 1;
+ LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+ struct mlx5_ind_table_ibv *ind_tbl;
+
+ if (hrxq->rss_key_len != rss_key_len)
+ continue;
+ if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
+ continue;
+ if (hrxq->hash_fields != hash_fields)
+ continue;
+ ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
+ if (!ind_tbl)
+ continue;
+ if (ind_tbl != hrxq->ind_table) {
+ mlx5_ind_table_ibv_release(dev, ind_tbl);
+ continue;
+ }
+ rte_atomic32_inc(&hrxq->refcnt);
+ return hrxq;
+ }
+ return NULL;
+}
+
+/**
+ * Release the hash Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq
+ * Pointer to Hash Rx queue to release.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int
+mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
+{
+ if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+ claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+ mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
+ LIST_REMOVE(hrxq, next);
+ rte_free(hrxq);
+ return 0;
+ }
+ claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
+ return 1;
+}
+
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+ int ret = 0;
+
+ LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+ DRV_LOG(DEBUG,
+ "port %u Verbs hash Rx queue %p still referenced",
+ dev->data->port_id, (void *)hrxq);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Create a drop Rx queue Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_rxq_ibv *
+mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct ibv_cq *cq;
+ struct ibv_wq *wq = NULL;
+ struct mlx5_rxq_ibv *rxq;
+
+ if (priv->drop_queue.rxq)
+ return priv->drop_queue.rxq;
+ cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
+ if (!cq) {
+ DEBUG("port %u cannot allocate CQ for drop queue",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ wq = mlx5_glue->create_wq(priv->ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
+ .pd = priv->pd,
+ .cq = cq,
+ });
+ if (!wq) {
+ DEBUG("port %u cannot allocate WQ for drop queue",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
+ if (!rxq) {
+ DEBUG("port %u cannot allocate drop Rx queue memory",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ rxq->cq = cq;
+ rxq->wq = wq;
+ priv->drop_queue.rxq = rxq;
+ return rxq;
+error:
+ if (wq)
+ claim_zero(mlx5_glue->destroy_wq(wq));
+ if (cq)
+ claim_zero(mlx5_glue->destroy_cq(cq));
+ return NULL;
+}
+
+/**
+ * Release a drop Rx queue Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+void
+mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;
+
+ if (rxq->wq)
+ claim_zero(mlx5_glue->destroy_wq(rxq->wq));
+ if (rxq->cq)
+ claim_zero(mlx5_glue->destroy_cq(rxq->cq));
+ rte_free(rxq);
+ priv->drop_queue.rxq = NULL;
+}
+
+/**
+ * Create a drop indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_rxq_ibv *rxq;
+ struct mlx5_ind_table_ibv tmpl;
+
+ rxq = mlx5_rxq_ibv_drop_new(dev);
+ if (!rxq)
+ return NULL;
+ tmpl.ind_table = mlx5_glue->create_rwq_ind_table
+ (priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = 0,
+ .ind_tbl = &rxq->wq,
+ .comp_mask = 0,
+ });
+ if (!tmpl.ind_table) {
+ DEBUG("port %u cannot allocate indirection table for drop"
+ " queue",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ ind_tbl->ind_table = tmpl.ind_table;
+ return ind_tbl;
+error:
+ mlx5_rxq_ibv_drop_release(dev);
+ return NULL;
+}
+
+/**
+ * Release a drop indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
+
+ claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
+ mlx5_rxq_ibv_drop_release(dev);
+ rte_free(ind_tbl);
+ priv->drop_queue.hrxq->ind_table = NULL;
+}
+
+/**
+ * Create a drop Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_hrxq *
+mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ struct ibv_qp *qp;
+ struct mlx5_hrxq *hrxq;
+
+ if (priv->drop_queue.hrxq) {
+ rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
+ return priv->drop_queue.hrxq;
+ }
+ ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
+ if (!ind_tbl)
+ return NULL;
+ qp = mlx5_glue->create_qp_ex(priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function =
+ IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
+ .rx_hash_key = rss_hash_default_key,
+ .rx_hash_fields_mask = 0,
+ },
+ .rwq_ind_tbl = ind_tbl->ind_table,
+ .pd = priv->pd
+ });
+ if (!qp) {
+ DEBUG("port %u cannot allocate QP for drop queue",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+ if (!hrxq) {
+ DRV_LOG(WARNING,
+ "port %u cannot allocate memory for drop queue",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ hrxq->ind_table = ind_tbl;
+ hrxq->qp = qp;
+ priv->drop_queue.hrxq = hrxq;
+ rte_atomic32_set(&hrxq->refcnt, 1);
+ return hrxq;
+error:
+ if (ind_tbl)
+ mlx5_ind_table_ibv_drop_release(dev);
+ return NULL;
+}
+
+/**
+ * Release a drop hash Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
+
+ if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+ claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+ mlx5_ind_table_ibv_drop_release(dev);
+ rte_free(hrxq);
+ priv->drop_queue.hrxq = NULL;
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.c
new file mode 100644
index 00000000..2d14f8a6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.c
@@ -0,0 +1,2373 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#include <infiniband/mlx5dv.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+#include <rte_common.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+
+static __rte_always_inline uint32_t
+rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
+
+static __rte_always_inline int
+mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+ uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
+
+static __rte_always_inline uint32_t
+rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
+
+static __rte_always_inline void
+rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
+ volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
+
+static __rte_always_inline void
+mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx);
+
+uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
+ [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
+};
+
+uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
+uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
+
+/**
+ * Build a table to translate Rx completion flags to packet type.
+ *
+ * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
+ */
+void
+mlx5_set_ptype_table(void)
+{
+ unsigned int i;
+ uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
+
+ /* Last entry must not be overwritten, reserved for errored packet. */
+ for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
+ (*p)[i] = RTE_PTYPE_UNKNOWN;
+ /*
+ * The index to the array should have:
+ * bit[1:0] = l3_hdr_type
+ * bit[4:2] = l4_hdr_type
+ * bit[5] = ip_frag
+ * bit[6] = tunneled
+ * bit[7] = outer_l3_type
+ */
+ /* L2 */
+ (*p)[0x00] = RTE_PTYPE_L2_ETHER;
+ /* L3 */
+ (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ /* Fragmented */
+ (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ /* TCP */
+ (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ /* UDP */
+ (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ /* Repeat with outer_l3_type being set. Just in case. */
+ (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ /* Tunneled - L3 */
+ (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ /* Tunneled - Fragmented */
+ (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ /* Tunneled - TCP */
+ (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ /* Tunneled - UDP */
+ (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+}
+
+/**
+ * Build a table to translate packet to checksum type of Verbs.
+ */
+void
+mlx5_set_cksum_table(void)
+{
+ unsigned int i;
+ uint8_t v;
+
+ /*
+ * The index should have:
+ * bit[0] = PKT_TX_TCP_SEG
+ * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
+ * bit[4] = PKT_TX_IP_CKSUM
+ * bit[8] = PKT_TX_OUTER_IP_CKSUM
+ * bit[9] = tunnel
+ */
+ for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
+ v = 0;
+ if (i & (1 << 9)) {
+ /* Tunneled packet. */
+ if (i & (1 << 8)) /* Outer IP. */
+ v |= MLX5_ETH_WQE_L3_CSUM;
+ if (i & (1 << 4)) /* Inner IP. */
+ v |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
+ v |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ } else {
+ /* No tunnel. */
+ if (i & (1 << 4)) /* IP. */
+ v |= MLX5_ETH_WQE_L3_CSUM;
+ if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
+ v |= MLX5_ETH_WQE_L4_CSUM;
+ }
+ mlx5_cksum_table[i] = v;
+ }
+}
+
+/**
+ * Build a table to translate packet type of mbuf to SWP type of Verbs.
+ */
+void
+mlx5_set_swp_types_table(void)
+{
+ unsigned int i;
+ uint8_t v;
+
+ /*
+ * The index should have:
+ * bit[0:1] = PKT_TX_L4_MASK
+ * bit[4] = PKT_TX_IPV6
+ * bit[8] = PKT_TX_OUTER_IPV6
+ * bit[9] = PKT_TX_OUTER_UDP
+ */
+ for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
+ v = 0;
+ if (i & (1 << 8))
+ v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
+ if (i & (1 << 9))
+ v |= MLX5_ETH_WQE_L4_OUTER_UDP;
+ if (i & (1 << 4))
+ v |= MLX5_ETH_WQE_L3_INNER_IPV6;
+ if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
+ v |= MLX5_ETH_WQE_L4_INNER_UDP;
+ mlx5_swp_types_table[i] = v;
+ }
+}
+
+/**
+ * Return the size of tailroom of WQ.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param addr
+ * Pointer to tail of WQ.
+ *
+ * @return
+ * Size of tailroom.
+ */
+static inline size_t
+tx_mlx5_wq_tailroom(struct mlx5_txq_data *txq, void *addr)
+{
+ size_t tailroom;
+ tailroom = (uintptr_t)(txq->wqes) +
+ (1 << txq->wqe_n) * MLX5_WQE_SIZE -
+ (uintptr_t)addr;
+ return tailroom;
+}
+
+/**
+ * Copy data to tailroom of circular queue.
+ *
+ * @param dst
+ * Pointer to destination.
+ * @param src
+ * Pointer to source.
+ * @param n
+ * Number of bytes to copy.
+ * @param base
+ * Pointer to head of queue.
+ * @param tailroom
+ * Size of tailroom from dst.
+ *
+ * @return
+ * Pointer after copied data.
+ */
+static inline void *
+mlx5_copy_to_wq(void *dst, const void *src, size_t n,
+ void *base, size_t tailroom)
+{
+ void *ret;
+
+ if (n > tailroom) {
+ rte_memcpy(dst, src, tailroom);
+ rte_memcpy(base, (void *)((uintptr_t)src + tailroom),
+ n - tailroom);
+ ret = (uint8_t *)base + n - tailroom;
+ } else {
+ rte_memcpy(dst, src, n);
+ ret = (n == tailroom) ? base : (uint8_t *)dst + n;
+ }
+ return ret;
+}
+
+/**
+ * Inline TSO headers into WQE.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
+ uint32_t *length,
+ uintptr_t *addr,
+ uint16_t *pkt_inline_sz,
+ uint8_t **raw,
+ uint16_t *max_wqe,
+ uint16_t *tso_segsz,
+ uint16_t *tso_header_sz)
+{
+ uintptr_t end = (uintptr_t)(((uintptr_t)txq->wqes) +
+ (1 << txq->wqe_n) * MLX5_WQE_SIZE);
+ unsigned int copy_b;
+ uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
+ const uint8_t tunneled = txq->tunnel_en && (buf->ol_flags &
+ PKT_TX_TUNNEL_MASK);
+ uint16_t n_wqe;
+
+ *tso_segsz = buf->tso_segsz;
+ *tso_header_sz = buf->l2_len + vlan_sz + buf->l3_len + buf->l4_len;
+ if (unlikely(*tso_segsz == 0 || *tso_header_sz == 0)) {
+ txq->stats.oerrors++;
+ return -EINVAL;
+ }
+ if (tunneled)
+ *tso_header_sz += buf->outer_l2_len + buf->outer_l3_len;
+ /* First seg must contain all TSO headers. */
+ if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER) ||
+ *tso_header_sz > DATA_LEN(buf)) {
+ txq->stats.oerrors++;
+ return -EINVAL;
+ }
+ copy_b = *tso_header_sz - *pkt_inline_sz;
+ if (!copy_b || ((end - (uintptr_t)*raw) < copy_b))
+ return -EAGAIN;
+ n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
+ if (unlikely(*max_wqe < n_wqe))
+ return -EINVAL;
+ *max_wqe -= n_wqe;
+ rte_memcpy((void *)*raw, (void *)*addr, copy_b);
+ *length -= copy_b;
+ *addr += copy_b;
+ copy_b = MLX5_WQE_DS(copy_b) * MLX5_WQE_DWORD_SIZE;
+ *pkt_inline_sz += copy_b;
+ *raw += copy_b;
+ return 0;
+}
+
+/**
+ * DPDK callback to check the status of a tx descriptor.
+ *
+ * @param tx_queue
+ * The tx queue.
+ * @param[in] offset
+ * The index of the descriptor in the ring.
+ *
+ * @return
+ * The status of the tx descriptor.
+ */
+int
+mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct mlx5_txq_data *txq = tx_queue;
+ uint16_t used;
+
+ mlx5_tx_complete(txq);
+ used = txq->elts_head - txq->elts_tail;
+ if (offset < used)
+ return RTE_ETH_TX_DESC_FULL;
+ return RTE_ETH_TX_DESC_DONE;
+}
+
+/**
+ * DPDK callback to check the status of a rx descriptor.
+ *
+ * @param rx_queue
+ * The rx queue.
+ * @param[in] offset
+ * The index of the descriptor in the ring.
+ *
+ * @return
+ * The status of the tx descriptor.
+ */
+int
+mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct mlx5_rxq_data *rxq = rx_queue;
+ struct rxq_zip *zip = &rxq->zip;
+ volatile struct mlx5_cqe *cqe;
+ const unsigned int cqe_n = (1 << rxq->cqe_n);
+ const unsigned int cqe_cnt = cqe_n - 1;
+ unsigned int cq_ci;
+ unsigned int used;
+
+ /* if we are processing a compressed cqe */
+ if (zip->ai) {
+ used = zip->cqe_cnt - zip->ca;
+ cq_ci = zip->cq_ci;
+ } else {
+ used = 0;
+ cq_ci = rxq->cq_ci;
+ }
+ cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
+ while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
+ int8_t op_own;
+ unsigned int n;
+
+ op_own = cqe->op_own;
+ if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
+ n = rte_be_to_cpu_32(cqe->byte_cnt);
+ else
+ n = 1;
+ cq_ci += n;
+ used += n;
+ cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
+ }
+ used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
+ if (offset < used)
+ return RTE_ETH_RX_DESC_DONE;
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+/**
+ * DPDK callback for TX.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ unsigned int i = 0;
+ unsigned int j = 0;
+ unsigned int k = 0;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ unsigned int comp;
+ volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
+ unsigned int segs_n = 0;
+ const unsigned int max_inline = txq->max_inline;
+ uint64_t addr_64;
+
+ if (unlikely(!pkts_n))
+ return 0;
+ /* Prefetch first packet cacheline. */
+ rte_prefetch0(*pkts);
+ /* Start processing. */
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ if (unlikely(!max_wqe))
+ return 0;
+ do {
+ struct rte_mbuf *buf = *pkts; /* First_seg. */
+ uint8_t *raw;
+ volatile struct mlx5_wqe_v *wqe = NULL;
+ volatile rte_v128u32_t *dseg = NULL;
+ uint32_t length;
+ unsigned int ds = 0;
+ unsigned int sg = 0; /* counter of additional segs attached. */
+ uintptr_t addr;
+ uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
+ uint16_t tso_header_sz = 0;
+ uint16_t ehdr;
+ uint8_t cs_flags;
+ uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
+ uint32_t swp_offsets = 0;
+ uint8_t swp_types = 0;
+ uint16_t tso_segsz = 0;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t total_length = 0;
+#endif
+ int ret;
+
+ segs_n = buf->nb_segs;
+ /*
+ * Make sure there is enough room to store this packet and
+ * that one ring entry remains unused.
+ */
+ assert(segs_n);
+ if (max_elts < segs_n)
+ break;
+ max_elts -= segs_n;
+ sg = --segs_n;
+ if (unlikely(--max_wqe == 0))
+ break;
+ wqe = (volatile struct mlx5_wqe_v *)
+ tx_mlx5_wqe(txq, txq->wqe_ci);
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
+ if (pkts_n - i > 1)
+ rte_prefetch0(*(pkts + 1));
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ length = DATA_LEN(buf);
+ ehdr = (((uint8_t *)addr)[1] << 8) |
+ ((uint8_t *)addr)[0];
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ total_length = length;
+#endif
+ if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
+ txq->stats.oerrors++;
+ break;
+ }
+ /* Update element. */
+ (*txq->elts)[elts_head & elts_m] = buf;
+ /* Prefetch next buffer data. */
+ if (pkts_n - i > 1)
+ rte_prefetch0(
+ rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
+ cs_flags = txq_ol_cksum_to_cs(buf);
+ txq_mbuf_to_swp(txq, buf, (uint8_t *)&swp_offsets, &swp_types);
+ raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
+ /* Replace the Ethernet type by the VLAN if necessary. */
+ if (buf->ol_flags & PKT_TX_VLAN_PKT) {
+ uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
+ buf->vlan_tci);
+ unsigned int len = 2 * ETHER_ADDR_LEN - 2;
+
+ addr += 2;
+ length -= 2;
+ /* Copy Destination and source mac address. */
+ memcpy((uint8_t *)raw, ((uint8_t *)addr), len);
+ /* Copy VLAN. */
+ memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan));
+ /* Copy missing two bytes to end the DSeg. */
+ memcpy((uint8_t *)raw + len + sizeof(vlan),
+ ((uint8_t *)addr) + len, 2);
+ addr += len + 2;
+ length -= (len + 2);
+ } else {
+ memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2,
+ MLX5_WQE_DWORD_SIZE);
+ length -= pkt_inline_sz;
+ addr += pkt_inline_sz;
+ }
+ raw += MLX5_WQE_DWORD_SIZE;
+ if (tso) {
+ ret = inline_tso(txq, buf, &length,
+ &addr, &pkt_inline_sz,
+ &raw, &max_wqe,
+ &tso_segsz, &tso_header_sz);
+ if (ret == -EINVAL) {
+ break;
+ } else if (ret == -EAGAIN) {
+ /* NOP WQE. */
+ wqe->ctrl = (rte_v128u32_t){
+ rte_cpu_to_be_32(txq->wqe_ci << 8),
+ rte_cpu_to_be_32(txq->qp_num_8s | 1),
+ 0,
+ 0,
+ };
+ ds = 1;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ total_length = 0;
+#endif
+ k++;
+ goto next_wqe;
+ }
+ }
+ /* Inline if enough room. */
+ if (max_inline || tso) {
+ uint32_t inl = 0;
+ uintptr_t end = (uintptr_t)
+ (((uintptr_t)txq->wqes) +
+ (1 << txq->wqe_n) * MLX5_WQE_SIZE);
+ unsigned int inline_room = max_inline *
+ RTE_CACHE_LINE_SIZE -
+ (pkt_inline_sz - 2) -
+ !!tso * sizeof(inl);
+ uintptr_t addr_end;
+ unsigned int copy_b;
+
+pkt_inline:
+ addr_end = RTE_ALIGN_FLOOR(addr + inline_room,
+ RTE_CACHE_LINE_SIZE);
+ copy_b = (addr_end > addr) ?
+ RTE_MIN((addr_end - addr), length) : 0;
+ if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
+ /*
+ * One Dseg remains in the current WQE. To
+ * keep the computation positive, it is
+ * removed after the bytes to Dseg conversion.
+ */
+ uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
+
+ if (unlikely(max_wqe < n))
+ break;
+ max_wqe -= n;
+ if (tso) {
+ assert(inl == 0);
+ inl = rte_cpu_to_be_32(copy_b |
+ MLX5_INLINE_SEG);
+ rte_memcpy((void *)raw,
+ (void *)&inl, sizeof(inl));
+ raw += sizeof(inl);
+ pkt_inline_sz += sizeof(inl);
+ }
+ rte_memcpy((void *)raw, (void *)addr, copy_b);
+ addr += copy_b;
+ length -= copy_b;
+ pkt_inline_sz += copy_b;
+ }
+ /*
+ * 2 DWORDs consumed by the WQE header + ETH segment +
+ * the size of the inline part of the packet.
+ */
+ ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
+ if (length > 0) {
+ if (ds % (MLX5_WQE_SIZE /
+ MLX5_WQE_DWORD_SIZE) == 0) {
+ if (unlikely(--max_wqe == 0))
+ break;
+ dseg = (volatile rte_v128u32_t *)
+ tx_mlx5_wqe(txq, txq->wqe_ci +
+ ds / 4);
+ } else {
+ dseg = (volatile rte_v128u32_t *)
+ ((uintptr_t)wqe +
+ (ds * MLX5_WQE_DWORD_SIZE));
+ }
+ goto use_dseg;
+ } else if (!segs_n) {
+ goto next_pkt;
+ } else {
+ /*
+ * Further inline the next segment only for
+ * non-TSO packets.
+ */
+ if (!tso) {
+ raw += copy_b;
+ inline_room -= copy_b;
+ } else {
+ inline_room = 0;
+ }
+ /* Move to the next segment. */
+ --segs_n;
+ buf = buf->next;
+ assert(buf);
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ length = DATA_LEN(buf);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ total_length += length;
+#endif
+ (*txq->elts)[++elts_head & elts_m] = buf;
+ goto pkt_inline;
+ }
+ } else {
+ /*
+ * No inline has been done in the packet, only the
+ * Ethernet Header as been stored.
+ */
+ dseg = (volatile rte_v128u32_t *)
+ ((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
+ ds = 3;
+use_dseg:
+ /* Add the remaining packet as a simple ds. */
+ addr_64 = rte_cpu_to_be_64(addr);
+ *dseg = (rte_v128u32_t){
+ rte_cpu_to_be_32(length),
+ mlx5_tx_mb2mr(txq, buf),
+ addr_64,
+ addr_64 >> 32,
+ };
+ ++ds;
+ if (!segs_n)
+ goto next_pkt;
+ }
+next_seg:
+ assert(buf);
+ assert(ds);
+ assert(wqe);
+ /*
+ * Spill on next WQE when the current one does not have
+ * enough room left. Size of WQE must a be a multiple
+ * of data segment size.
+ */
+ assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE));
+ if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) {
+ if (unlikely(--max_wqe == 0))
+ break;
+ dseg = (volatile rte_v128u32_t *)
+ tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4);
+ rte_prefetch0(tx_mlx5_wqe(txq,
+ txq->wqe_ci + ds / 4 + 1));
+ } else {
+ ++dseg;
+ }
+ ++ds;
+ buf = buf->next;
+ assert(buf);
+ length = DATA_LEN(buf);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ total_length += length;
+#endif
+ /* Store segment information. */
+ addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
+ *dseg = (rte_v128u32_t){
+ rte_cpu_to_be_32(length),
+ mlx5_tx_mb2mr(txq, buf),
+ addr_64,
+ addr_64 >> 32,
+ };
+ (*txq->elts)[++elts_head & elts_m] = buf;
+ if (--segs_n)
+ goto next_seg;
+next_pkt:
+ if (ds > MLX5_DSEG_MAX) {
+ txq->stats.oerrors++;
+ break;
+ }
+ ++elts_head;
+ ++pkts;
+ ++i;
+ j += sg;
+ /* Initialize known and common part of the WQE structure. */
+ if (tso) {
+ wqe->ctrl = (rte_v128u32_t){
+ rte_cpu_to_be_32((txq->wqe_ci << 8) |
+ MLX5_OPCODE_TSO),
+ rte_cpu_to_be_32(txq->qp_num_8s | ds),
+ 0,
+ 0,
+ };
+ wqe->eseg = (rte_v128u32_t){
+ swp_offsets,
+ cs_flags | (swp_types << 8) |
+ (rte_cpu_to_be_16(tso_segsz) << 16),
+ 0,
+ (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
+ };
+ } else {
+ wqe->ctrl = (rte_v128u32_t){
+ rte_cpu_to_be_32((txq->wqe_ci << 8) |
+ MLX5_OPCODE_SEND),
+ rte_cpu_to_be_32(txq->qp_num_8s | ds),
+ 0,
+ 0,
+ };
+ wqe->eseg = (rte_v128u32_t){
+ swp_offsets,
+ cs_flags | (swp_types << 8),
+ 0,
+ (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
+ };
+ }
+next_wqe:
+ txq->wqe_ci += (ds + 3) / 4;
+ /* Save the last successful WQE for completion request */
+ last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent bytes counter. */
+ txq->stats.obytes += total_length;
+#endif
+ } while (i < pkts_n);
+ /* Take a shortcut if nothing must be sent. */
+ if (unlikely((i + k) == 0))
+ return 0;
+ txq->elts_head += (i + j);
+ /* Check whether completion threshold has been reached. */
+ comp = txq->elts_comp + i + j + k;
+ if (comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
+ /* Request completion on last WQE. */
+ last_wqe->ctrl2 = rte_cpu_to_be_32(8);
+ /* Save elts_head in unused "immediate" field of WQE. */
+ last_wqe->ctrl3 = txq->elts_head;
+ txq->elts_comp = 0;
+ } else {
+ txq->elts_comp = comp;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent packets counter. */
+ txq->stats.opackets += i;
+#endif
+ /* Ring QP doorbell. */
+ mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
+ return i;
+}
+
+/**
+ * Open a MPW session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ * @param length
+ * Packet length.
+ */
+static inline void
+mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
+{
+ uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
+ volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
+ (volatile struct mlx5_wqe_data_seg (*)[])
+ tx_mlx5_wqe(txq, idx + 1);
+
+ mpw->state = MLX5_MPW_STATE_OPENED;
+ mpw->pkts_n = 0;
+ mpw->len = length;
+ mpw->total_len = 0;
+ mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
+ mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
+ mpw->wqe->eseg.inline_hdr_sz = 0;
+ mpw->wqe->eseg.rsvd0 = 0;
+ mpw->wqe->eseg.rsvd1 = 0;
+ mpw->wqe->eseg.rsvd2 = 0;
+ mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_TSO);
+ mpw->wqe->ctrl[2] = 0;
+ mpw->wqe->ctrl[3] = 0;
+ mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
+ (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
+ mpw->data.dseg[1] = (volatile struct mlx5_wqe_data_seg *)
+ (((uintptr_t)mpw->wqe) + (3 * MLX5_WQE_DWORD_SIZE));
+ mpw->data.dseg[2] = &(*dseg)[0];
+ mpw->data.dseg[3] = &(*dseg)[1];
+ mpw->data.dseg[4] = &(*dseg)[2];
+}
+
+/**
+ * Close a MPW session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ */
+static inline void
+mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
+{
+ unsigned int num = mpw->pkts_n;
+
+ /*
+ * Store size in multiple of 16 bytes. Control and Ethernet segments
+ * count as 2.
+ */
+ mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
+ mpw->state = MLX5_MPW_STATE_CLOSED;
+ if (num < 3)
+ ++txq->wqe_ci;
+ else
+ txq->wqe_ci += 2;
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
+}
+
+/**
+ * DPDK callback for TX with MPW support.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ unsigned int i = 0;
+ unsigned int j = 0;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ unsigned int comp;
+ struct mlx5_mpw mpw = {
+ .state = MLX5_MPW_STATE_CLOSED,
+ };
+
+ if (unlikely(!pkts_n))
+ return 0;
+ /* Prefetch first packet cacheline. */
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
+ /* Start processing. */
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ if (unlikely(!max_wqe))
+ return 0;
+ do {
+ struct rte_mbuf *buf = *(pkts++);
+ uint32_t length;
+ unsigned int segs_n = buf->nb_segs;
+ uint32_t cs_flags;
+
+ /*
+ * Make sure there is enough room to store this packet and
+ * that one ring entry remains unused.
+ */
+ assert(segs_n);
+ if (max_elts < segs_n)
+ break;
+ /* Do not bother with large packets MPW cannot handle. */
+ if (segs_n > MLX5_MPW_DSEG_MAX) {
+ txq->stats.oerrors++;
+ break;
+ }
+ max_elts -= segs_n;
+ --pkts_n;
+ cs_flags = txq_ol_cksum_to_cs(buf);
+ /* Retrieve packet information. */
+ length = PKT_LEN(buf);
+ assert(length);
+ /* Start new session if packet differs. */
+ if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
+ ((mpw.len != length) ||
+ (segs_n != 1) ||
+ (mpw.wqe->eseg.cs_flags != cs_flags)))
+ mlx5_mpw_close(txq, &mpw);
+ if (mpw.state == MLX5_MPW_STATE_CLOSED) {
+ /*
+ * Multi-Packet WQE consumes at most two WQE.
+ * mlx5_mpw_new() expects to be able to use such
+ * resources.
+ */
+ if (unlikely(max_wqe < 2))
+ break;
+ max_wqe -= 2;
+ mlx5_mpw_new(txq, &mpw, length);
+ mpw.wqe->eseg.cs_flags = cs_flags;
+ }
+ /* Multi-segment packets must be alone in their MPW. */
+ assert((segs_n == 1) || (mpw.pkts_n == 0));
+#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
+ length = 0;
+#endif
+ do {
+ volatile struct mlx5_wqe_data_seg *dseg;
+ uintptr_t addr;
+
+ assert(buf);
+ (*txq->elts)[elts_head++ & elts_m] = buf;
+ dseg = mpw.data.dseg[mpw.pkts_n];
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ *dseg = (struct mlx5_wqe_data_seg){
+ .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
+ .lkey = mlx5_tx_mb2mr(txq, buf),
+ .addr = rte_cpu_to_be_64(addr),
+ };
+#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
+ length += DATA_LEN(buf);
+#endif
+ buf = buf->next;
+ ++mpw.pkts_n;
+ ++j;
+ } while (--segs_n);
+ assert(length == mpw.len);
+ if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
+ mlx5_mpw_close(txq, &mpw);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent bytes counter. */
+ txq->stats.obytes += length;
+#endif
+ ++i;
+ } while (pkts_n);
+ /* Take a shortcut if nothing must be sent. */
+ if (unlikely(i == 0))
+ return 0;
+ /* Check whether completion threshold has been reached. */
+ /* "j" includes both packets and segments. */
+ comp = txq->elts_comp + j;
+ if (comp >= MLX5_TX_COMP_THRESH) {
+ volatile struct mlx5_wqe *wqe = mpw.wqe;
+
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
+ /* Request completion on last WQE. */
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
+ /* Save elts_head in unused "immediate" field of WQE. */
+ wqe->ctrl[3] = elts_head;
+ txq->elts_comp = 0;
+ } else {
+ txq->elts_comp = comp;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent packets counter. */
+ txq->stats.opackets += i;
+#endif
+ /* Ring QP doorbell. */
+ if (mpw.state == MLX5_MPW_STATE_OPENED)
+ mlx5_mpw_close(txq, &mpw);
+ mlx5_tx_dbrec(txq, mpw.wqe);
+ txq->elts_head = elts_head;
+ return i;
+}
+
+/**
+ * Open a MPW inline session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ * @param length
+ * Packet length.
+ */
+static inline void
+mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw,
+ uint32_t length)
+{
+ uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
+ struct mlx5_wqe_inl_small *inl;
+
+ mpw->state = MLX5_MPW_INL_STATE_OPENED;
+ mpw->pkts_n = 0;
+ mpw->len = length;
+ mpw->total_len = 0;
+ mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
+ mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_TSO);
+ mpw->wqe->ctrl[2] = 0;
+ mpw->wqe->ctrl[3] = 0;
+ mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
+ mpw->wqe->eseg.inline_hdr_sz = 0;
+ mpw->wqe->eseg.cs_flags = 0;
+ mpw->wqe->eseg.rsvd0 = 0;
+ mpw->wqe->eseg.rsvd1 = 0;
+ mpw->wqe->eseg.rsvd2 = 0;
+ inl = (struct mlx5_wqe_inl_small *)
+ (((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE);
+ mpw->data.raw = (uint8_t *)&inl->raw;
+}
+
+/**
+ * Close a MPW inline session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ */
+static inline void
+mlx5_mpw_inline_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
+{
+ unsigned int size;
+ struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
+ (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
+
+ size = MLX5_WQE_SIZE - MLX5_MWQE64_INL_DATA + mpw->total_len;
+ /*
+ * Store size in multiple of 16 bytes. Control and Ethernet segments
+ * count as 2.
+ */
+ mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
+ MLX5_WQE_DS(size));
+ mpw->state = MLX5_MPW_STATE_CLOSED;
+ inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
+ txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
+}
+
+/**
+ * DPDK callback for TX with MPW inline support.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ unsigned int i = 0;
+ unsigned int j = 0;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ unsigned int comp;
+ unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
+ struct mlx5_mpw mpw = {
+ .state = MLX5_MPW_STATE_CLOSED,
+ };
+ /*
+ * Compute the maximum number of WQE which can be consumed by inline
+ * code.
+ * - 2 DSEG for:
+ * - 1 control segment,
+ * - 1 Ethernet segment,
+ * - N Dseg from the inline request.
+ */
+ const unsigned int wqe_inl_n =
+ ((2 * MLX5_WQE_DWORD_SIZE +
+ txq->max_inline * RTE_CACHE_LINE_SIZE) +
+ RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
+
+ if (unlikely(!pkts_n))
+ return 0;
+ /* Prefetch first packet cacheline. */
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
+ /* Start processing. */
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
+ do {
+ struct rte_mbuf *buf = *(pkts++);
+ uintptr_t addr;
+ uint32_t length;
+ unsigned int segs_n = buf->nb_segs;
+ uint8_t cs_flags;
+
+ /*
+ * Make sure there is enough room to store this packet and
+ * that one ring entry remains unused.
+ */
+ assert(segs_n);
+ if (max_elts < segs_n)
+ break;
+ /* Do not bother with large packets MPW cannot handle. */
+ if (segs_n > MLX5_MPW_DSEG_MAX) {
+ txq->stats.oerrors++;
+ break;
+ }
+ max_elts -= segs_n;
+ --pkts_n;
+ /*
+ * Compute max_wqe in case less WQE were consumed in previous
+ * iteration.
+ */
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ cs_flags = txq_ol_cksum_to_cs(buf);
+ /* Retrieve packet information. */
+ length = PKT_LEN(buf);
+ /* Start new session if packet differs. */
+ if (mpw.state == MLX5_MPW_STATE_OPENED) {
+ if ((mpw.len != length) ||
+ (segs_n != 1) ||
+ (mpw.wqe->eseg.cs_flags != cs_flags))
+ mlx5_mpw_close(txq, &mpw);
+ } else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
+ if ((mpw.len != length) ||
+ (segs_n != 1) ||
+ (length > inline_room) ||
+ (mpw.wqe->eseg.cs_flags != cs_flags)) {
+ mlx5_mpw_inline_close(txq, &mpw);
+ inline_room =
+ txq->max_inline * RTE_CACHE_LINE_SIZE;
+ }
+ }
+ if (mpw.state == MLX5_MPW_STATE_CLOSED) {
+ if ((segs_n != 1) ||
+ (length > inline_room)) {
+ /*
+ * Multi-Packet WQE consumes at most two WQE.
+ * mlx5_mpw_new() expects to be able to use
+ * such resources.
+ */
+ if (unlikely(max_wqe < 2))
+ break;
+ max_wqe -= 2;
+ mlx5_mpw_new(txq, &mpw, length);
+ mpw.wqe->eseg.cs_flags = cs_flags;
+ } else {
+ if (unlikely(max_wqe < wqe_inl_n))
+ break;
+ max_wqe -= wqe_inl_n;
+ mlx5_mpw_inline_new(txq, &mpw, length);
+ mpw.wqe->eseg.cs_flags = cs_flags;
+ }
+ }
+ /* Multi-segment packets must be alone in their MPW. */
+ assert((segs_n == 1) || (mpw.pkts_n == 0));
+ if (mpw.state == MLX5_MPW_STATE_OPENED) {
+ assert(inline_room ==
+ txq->max_inline * RTE_CACHE_LINE_SIZE);
+#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
+ length = 0;
+#endif
+ do {
+ volatile struct mlx5_wqe_data_seg *dseg;
+
+ assert(buf);
+ (*txq->elts)[elts_head++ & elts_m] = buf;
+ dseg = mpw.data.dseg[mpw.pkts_n];
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ *dseg = (struct mlx5_wqe_data_seg){
+ .byte_count =
+ rte_cpu_to_be_32(DATA_LEN(buf)),
+ .lkey = mlx5_tx_mb2mr(txq, buf),
+ .addr = rte_cpu_to_be_64(addr),
+ };
+#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
+ length += DATA_LEN(buf);
+#endif
+ buf = buf->next;
+ ++mpw.pkts_n;
+ ++j;
+ } while (--segs_n);
+ assert(length == mpw.len);
+ if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
+ mlx5_mpw_close(txq, &mpw);
+ } else {
+ unsigned int max;
+
+ assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
+ assert(length <= inline_room);
+ assert(length == DATA_LEN(buf));
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ (*txq->elts)[elts_head++ & elts_m] = buf;
+ /* Maximum number of bytes before wrapping. */
+ max = ((((uintptr_t)(txq->wqes)) +
+ (1 << txq->wqe_n) *
+ MLX5_WQE_SIZE) -
+ (uintptr_t)mpw.data.raw);
+ if (length > max) {
+ rte_memcpy((void *)(uintptr_t)mpw.data.raw,
+ (void *)addr,
+ max);
+ mpw.data.raw = (volatile void *)txq->wqes;
+ rte_memcpy((void *)(uintptr_t)mpw.data.raw,
+ (void *)(addr + max),
+ length - max);
+ mpw.data.raw += length - max;
+ } else {
+ rte_memcpy((void *)(uintptr_t)mpw.data.raw,
+ (void *)addr,
+ length);
+
+ if (length == max)
+ mpw.data.raw =
+ (volatile void *)txq->wqes;
+ else
+ mpw.data.raw += length;
+ }
+ ++mpw.pkts_n;
+ mpw.total_len += length;
+ ++j;
+ if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
+ mlx5_mpw_inline_close(txq, &mpw);
+ inline_room =
+ txq->max_inline * RTE_CACHE_LINE_SIZE;
+ } else {
+ inline_room -= length;
+ }
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent bytes counter. */
+ txq->stats.obytes += length;
+#endif
+ ++i;
+ } while (pkts_n);
+ /* Take a shortcut if nothing must be sent. */
+ if (unlikely(i == 0))
+ return 0;
+ /* Check whether completion threshold has been reached. */
+ /* "j" includes both packets and segments. */
+ comp = txq->elts_comp + j;
+ if (comp >= MLX5_TX_COMP_THRESH) {
+ volatile struct mlx5_wqe *wqe = mpw.wqe;
+
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
+ /* Request completion on last WQE. */
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
+ /* Save elts_head in unused "immediate" field of WQE. */
+ wqe->ctrl[3] = elts_head;
+ txq->elts_comp = 0;
+ } else {
+ txq->elts_comp = comp;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent packets counter. */
+ txq->stats.opackets += i;
+#endif
+ /* Ring QP doorbell. */
+ if (mpw.state == MLX5_MPW_INL_STATE_OPENED)
+ mlx5_mpw_inline_close(txq, &mpw);
+ else if (mpw.state == MLX5_MPW_STATE_OPENED)
+ mlx5_mpw_close(txq, &mpw);
+ mlx5_tx_dbrec(txq, mpw.wqe);
+ txq->elts_head = elts_head;
+ return i;
+}
+
+/**
+ * Open an Enhanced MPW session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ * @param length
+ * Packet length.
+ */
+static inline void
+mlx5_empw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, int padding)
+{
+ uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
+
+ mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED;
+ mpw->pkts_n = 0;
+ mpw->total_len = sizeof(struct mlx5_wqe);
+ mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
+ mpw->wqe->ctrl[0] =
+ rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_ENHANCED_MPSW);
+ mpw->wqe->ctrl[2] = 0;
+ mpw->wqe->ctrl[3] = 0;
+ memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
+ if (unlikely(padding)) {
+ uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
+
+ /* Pad the first 2 DWORDs with zero-length inline header. */
+ *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
+ *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
+ rte_cpu_to_be_32(MLX5_INLINE_SEG);
+ mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
+ /* Start from the next WQEBB. */
+ mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
+ } else {
+ mpw->data.raw = (volatile void *)(mpw->wqe + 1);
+ }
+}
+
+/**
+ * Close an Enhanced MPW session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ *
+ * @return
+ * Number of consumed WQEs.
+ */
+static inline uint16_t
+mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
+{
+ uint16_t ret;
+
+ /* Store size in multiple of 16 bytes. Control and Ethernet segments
+ * count as 2.
+ */
+ mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
+ MLX5_WQE_DS(mpw->total_len));
+ mpw->state = MLX5_MPW_STATE_CLOSED;
+ ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
+ txq->wqe_ci += ret;
+ return ret;
+}
+
+/**
+ * TX with Enhanced MPW support.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+static inline uint16_t
+txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ unsigned int i = 0;
+ unsigned int j = 0;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
+ unsigned int mpw_room = 0;
+ unsigned int inl_pad = 0;
+ uint32_t inl_hdr;
+ uint64_t addr_64;
+ struct mlx5_mpw mpw = {
+ .state = MLX5_MPW_STATE_CLOSED,
+ };
+
+ if (unlikely(!pkts_n))
+ return 0;
+ /* Start processing. */
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ if (unlikely(!max_wqe))
+ return 0;
+ do {
+ struct rte_mbuf *buf = *(pkts++);
+ uintptr_t addr;
+ unsigned int do_inline = 0; /* Whether inline is possible. */
+ uint32_t length;
+ uint8_t cs_flags;
+
+ /* Multi-segmented packet is handled in slow-path outside. */
+ assert(NB_SEGS(buf) == 1);
+ /* Make sure there is enough room to store this packet. */
+ if (max_elts - j == 0)
+ break;
+ cs_flags = txq_ol_cksum_to_cs(buf);
+ /* Retrieve packet information. */
+ length = PKT_LEN(buf);
+ /* Start new session if:
+ * - multi-segment packet
+ * - no space left even for a dseg
+ * - next packet can be inlined with a new WQE
+ * - cs_flag differs
+ */
+ if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
+ if ((inl_pad + sizeof(struct mlx5_wqe_data_seg) >
+ mpw_room) ||
+ (length <= txq->inline_max_packet_sz &&
+ inl_pad + sizeof(inl_hdr) + length >
+ mpw_room) ||
+ (mpw.wqe->eseg.cs_flags != cs_flags))
+ max_wqe -= mlx5_empw_close(txq, &mpw);
+ }
+ if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
+ /* In Enhanced MPW, inline as much as the budget is
+ * allowed. The remaining space is to be filled with
+ * dsegs. If the title WQEBB isn't padded, it will have
+ * 2 dsegs there.
+ */
+ mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
+ (max_inline ? max_inline :
+ pkts_n * MLX5_WQE_DWORD_SIZE) +
+ MLX5_WQE_SIZE);
+ if (unlikely(max_wqe * MLX5_WQE_SIZE < mpw_room))
+ break;
+ /* Don't pad the title WQEBB to not waste WQ. */
+ mlx5_empw_new(txq, &mpw, 0);
+ mpw_room -= mpw.total_len;
+ inl_pad = 0;
+ do_inline = length <= txq->inline_max_packet_sz &&
+ sizeof(inl_hdr) + length <= mpw_room &&
+ !txq->mpw_hdr_dseg;
+ mpw.wqe->eseg.cs_flags = cs_flags;
+ } else {
+ /* Evaluate whether the next packet can be inlined.
+ * Inlininig is possible when:
+ * - length is less than configured value
+ * - length fits for remaining space
+ * - not required to fill the title WQEBB with dsegs
+ */
+ do_inline =
+ length <= txq->inline_max_packet_sz &&
+ inl_pad + sizeof(inl_hdr) + length <=
+ mpw_room &&
+ (!txq->mpw_hdr_dseg ||
+ mpw.total_len >= MLX5_WQE_SIZE);
+ }
+ if (max_inline && do_inline) {
+ /* Inline packet into WQE. */
+ unsigned int max;
+
+ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
+ assert(length == DATA_LEN(buf));
+ inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ mpw.data.raw = (volatile void *)
+ ((uintptr_t)mpw.data.raw + inl_pad);
+ max = tx_mlx5_wq_tailroom(txq,
+ (void *)(uintptr_t)mpw.data.raw);
+ /* Copy inline header. */
+ mpw.data.raw = (volatile void *)
+ mlx5_copy_to_wq(
+ (void *)(uintptr_t)mpw.data.raw,
+ &inl_hdr,
+ sizeof(inl_hdr),
+ (void *)(uintptr_t)txq->wqes,
+ max);
+ max = tx_mlx5_wq_tailroom(txq,
+ (void *)(uintptr_t)mpw.data.raw);
+ /* Copy packet data. */
+ mpw.data.raw = (volatile void *)
+ mlx5_copy_to_wq(
+ (void *)(uintptr_t)mpw.data.raw,
+ (void *)addr,
+ length,
+ (void *)(uintptr_t)txq->wqes,
+ max);
+ ++mpw.pkts_n;
+ mpw.total_len += (inl_pad + sizeof(inl_hdr) + length);
+ /* No need to get completion as the entire packet is
+ * copied to WQ. Free the buf right away.
+ */
+ rte_pktmbuf_free_seg(buf);
+ mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
+ /* Add pad in the next packet if any. */
+ inl_pad = (((uintptr_t)mpw.data.raw +
+ (MLX5_WQE_DWORD_SIZE - 1)) &
+ ~(MLX5_WQE_DWORD_SIZE - 1)) -
+ (uintptr_t)mpw.data.raw;
+ } else {
+ /* No inline. Load a dseg of packet pointer. */
+ volatile rte_v128u32_t *dseg;
+
+ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
+ assert((inl_pad + sizeof(*dseg)) <= mpw_room);
+ assert(length == DATA_LEN(buf));
+ if (!tx_mlx5_wq_tailroom(txq,
+ (void *)((uintptr_t)mpw.data.raw
+ + inl_pad)))
+ dseg = (volatile void *)txq->wqes;
+ else
+ dseg = (volatile void *)
+ ((uintptr_t)mpw.data.raw +
+ inl_pad);
+ (*txq->elts)[elts_head++ & elts_m] = buf;
+ addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
+ uintptr_t));
+ *dseg = (rte_v128u32_t) {
+ rte_cpu_to_be_32(length),
+ mlx5_tx_mb2mr(txq, buf),
+ addr_64,
+ addr_64 >> 32,
+ };
+ mpw.data.raw = (volatile void *)(dseg + 1);
+ mpw.total_len += (inl_pad + sizeof(*dseg));
+ ++j;
+ ++mpw.pkts_n;
+ mpw_room -= (inl_pad + sizeof(*dseg));
+ inl_pad = 0;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent bytes counter. */
+ txq->stats.obytes += length;
+#endif
+ ++i;
+ } while (i < pkts_n);
+ /* Take a shortcut if nothing must be sent. */
+ if (unlikely(i == 0))
+ return 0;
+ /* Check whether completion threshold has been reached. */
+ if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH ||
+ (uint16_t)(txq->wqe_ci - txq->mpw_comp) >=
+ (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
+ volatile struct mlx5_wqe *wqe = mpw.wqe;
+
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
+ /* Request completion on last WQE. */
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
+ /* Save elts_head in unused "immediate" field of WQE. */
+ wqe->ctrl[3] = elts_head;
+ txq->elts_comp = 0;
+ txq->mpw_comp = txq->wqe_ci;
+ } else {
+ txq->elts_comp += j;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent packets counter. */
+ txq->stats.opackets += i;
+#endif
+ if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
+ mlx5_empw_close(txq, &mpw);
+ /* Ring QP doorbell. */
+ mlx5_tx_dbrec(txq, mpw.wqe);
+ txq->elts_head = elts_head;
+ return i;
+}
+
+/**
+ * DPDK callback for TX with Enhanced MPW support.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ uint16_t nb_tx = 0;
+
+ while (pkts_n > nb_tx) {
+ uint16_t n;
+ uint16_t ret;
+
+ n = txq_count_contig_multi_seg(&pkts[nb_tx], pkts_n - nb_tx);
+ if (n) {
+ ret = mlx5_tx_burst(dpdk_txq, &pkts[nb_tx], n);
+ if (!ret)
+ break;
+ nb_tx += ret;
+ }
+ n = txq_count_contig_single_seg(&pkts[nb_tx], pkts_n - nb_tx);
+ if (n) {
+ ret = txq_burst_empw(txq, &pkts[nb_tx], n);
+ if (!ret)
+ break;
+ nb_tx += ret;
+ }
+ }
+ return nb_tx;
+}
+
+/**
+ * Translate RX completion flags to packet type.
+ *
+ * @param[in] rxq
+ * Pointer to RX queue structure.
+ * @param[in] cqe
+ * Pointer to CQE.
+ *
+ * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
+ *
+ * @return
+ * Packet type for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
+{
+ uint8_t idx;
+ uint8_t pinfo = cqe->pkt_info;
+ uint16_t ptype = cqe->hdr_type_etc;
+
+ /*
+ * The index to the array should have:
+ * bit[1:0] = l3_hdr_type
+ * bit[4:2] = l4_hdr_type
+ * bit[5] = ip_frag
+ * bit[6] = tunneled
+ * bit[7] = outer_l3_type
+ */
+ idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
+ return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
+}
+
+/**
+ * Get size of the next packet for a given CQE. For compressed CQEs, the
+ * consumer index is updated only once all packets of the current one have
+ * been processed.
+ *
+ * @param rxq
+ * Pointer to RX queue.
+ * @param cqe
+ * CQE to process.
+ * @param[out] mcqe
+ * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
+ * written.
+ *
+ * @return
+ * Packet size in bytes (0 if there is none), -1 in case of completion
+ * with error.
+ */
+static inline int
+mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+ uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
+{
+ struct rxq_zip *zip = &rxq->zip;
+ uint16_t cqe_n = cqe_cnt + 1;
+ int len = 0;
+ uint16_t idx, end;
+
+ /* Process compressed data in the CQE and mini arrays. */
+ if (zip->ai) {
+ volatile struct mlx5_mini_cqe8 (*mc)[8] =
+ (volatile struct mlx5_mini_cqe8 (*)[8])
+ (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
+
+ len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
+ *mcqe = &(*mc)[zip->ai & 7];
+ if ((++zip->ai & 7) == 0) {
+ /* Invalidate consumed CQEs */
+ idx = zip->ca;
+ end = zip->na;
+ while (idx != end) {
+ (*rxq->cqes)[idx & cqe_cnt].op_own =
+ MLX5_CQE_INVALIDATE;
+ ++idx;
+ }
+ /*
+ * Increment consumer index to skip the number of
+ * CQEs consumed. Hardware leaves holes in the CQ
+ * ring for software use.
+ */
+ zip->ca = zip->na;
+ zip->na += 8;
+ }
+ if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
+ /* Invalidate the rest */
+ idx = zip->ca;
+ end = zip->cq_ci;
+
+ while (idx != end) {
+ (*rxq->cqes)[idx & cqe_cnt].op_own =
+ MLX5_CQE_INVALIDATE;
+ ++idx;
+ }
+ rxq->cq_ci = zip->cq_ci;
+ zip->ai = 0;
+ }
+ /* No compressed data, get next CQE and verify if it is compressed. */
+ } else {
+ int ret;
+ int8_t op_own;
+
+ ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
+ if (unlikely(ret == 1))
+ return 0;
+ ++rxq->cq_ci;
+ op_own = cqe->op_own;
+ rte_cio_rmb();
+ if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
+ volatile struct mlx5_mini_cqe8 (*mc)[8] =
+ (volatile struct mlx5_mini_cqe8 (*)[8])
+ (uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
+ cqe_cnt].pkt_info);
+
+ /* Fix endianness. */
+ zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
+ /*
+ * Current mini array position is the one returned by
+ * check_cqe64().
+ *
+ * If completion comprises several mini arrays, as a
+ * special case the second one is located 7 CQEs after
+ * the initial CQE instead of 8 for subsequent ones.
+ */
+ zip->ca = rxq->cq_ci;
+ zip->na = zip->ca + 7;
+ /* Compute the next non compressed CQE. */
+ --rxq->cq_ci;
+ zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
+ /* Get packet size to return. */
+ len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
+ *mcqe = &(*mc)[0];
+ zip->ai = 1;
+ /* Prefetch all the entries to be invalidated */
+ idx = zip->ca;
+ end = zip->cq_ci;
+ while (idx != end) {
+ rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]);
+ ++idx;
+ }
+ } else {
+ len = rte_be_to_cpu_32(cqe->byte_cnt);
+ }
+ /* Error while receiving packet. */
+ if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
+ return -1;
+ }
+ return len;
+}
+
+/**
+ * Translate RX completion flags to offload flags.
+ *
+ * @param[in] cqe
+ * Pointer to CQE.
+ *
+ * @return
+ * Offload flags (ol_flags) for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
+{
+ uint32_t ol_flags = 0;
+ uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
+
+ ol_flags =
+ TRANSPOSE(flags,
+ MLX5_CQE_RX_L3_HDR_VALID,
+ PKT_RX_IP_CKSUM_GOOD) |
+ TRANSPOSE(flags,
+ MLX5_CQE_RX_L4_HDR_VALID,
+ PKT_RX_L4_CKSUM_GOOD);
+ return ol_flags;
+}
+
+/**
+ * Fill in mbuf fields from RX completion flags.
+ * Note that pkt->ol_flags should be initialized outside of this function.
+ *
+ * @param rxq
+ * Pointer to RX queue.
+ * @param pkt
+ * mbuf to fill.
+ * @param cqe
+ * CQE to process.
+ * @param rss_hash_res
+ * Packet RSS Hash result.
+ */
+static inline void
+rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
+ volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
+{
+ /* Update packet information. */
+ pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
+ if (rss_hash_res && rxq->rss_hash) {
+ pkt->hash.rss = rss_hash_res;
+ pkt->ol_flags |= PKT_RX_RSS_HASH;
+ }
+ if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
+ pkt->ol_flags |= PKT_RX_FDIR;
+ if (cqe->sop_drop_qpn !=
+ rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
+ uint32_t mark = cqe->sop_drop_qpn;
+
+ pkt->ol_flags |= PKT_RX_FDIR_ID;
+ pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
+ }
+ }
+ if (rxq->csum)
+ pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
+ if (rxq->vlan_strip &&
+ (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
+ pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
+ }
+ if (rxq->hw_timestamp) {
+ pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
+ pkt->ol_flags |= PKT_RX_TIMESTAMP;
+ }
+}
+
+/**
+ * DPDK callback for RX.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_rxq_data *rxq = dpdk_rxq;
+ const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
+ const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
+ const unsigned int sges_n = rxq->sges_n;
+ struct rte_mbuf *pkt = NULL;
+ struct rte_mbuf *seg = NULL;
+ volatile struct mlx5_cqe *cqe =
+ &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
+ unsigned int i = 0;
+ unsigned int rq_ci = rxq->rq_ci << sges_n;
+ int len = 0; /* keep its value across iterations. */
+
+ while (pkts_n) {
+ unsigned int idx = rq_ci & wqe_cnt;
+ volatile struct mlx5_wqe_data_seg *wqe =
+ &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
+ struct rte_mbuf *rep = (*rxq->elts)[idx];
+ volatile struct mlx5_mini_cqe8 *mcqe = NULL;
+ uint32_t rss_hash_res;
+
+ if (pkt)
+ NEXT(seg) = rep;
+ seg = rep;
+ rte_prefetch0(seg);
+ rte_prefetch0(cqe);
+ rte_prefetch0(wqe);
+ rep = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(rep == NULL)) {
+ ++rxq->stats.rx_nombuf;
+ if (!pkt) {
+ /*
+ * no buffers before we even started,
+ * bail out silently.
+ */
+ break;
+ }
+ while (pkt != seg) {
+ assert(pkt != (*rxq->elts)[idx]);
+ rep = NEXT(pkt);
+ NEXT(pkt) = NULL;
+ NB_SEGS(pkt) = 1;
+ rte_mbuf_raw_free(pkt);
+ pkt = rep;
+ }
+ break;
+ }
+ if (!pkt) {
+ cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
+ len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
+ if (!len) {
+ rte_mbuf_raw_free(rep);
+ break;
+ }
+ if (unlikely(len == -1)) {
+ /* RX error, packet is likely too large. */
+ rte_mbuf_raw_free(rep);
+ ++rxq->stats.idropped;
+ goto skip;
+ }
+ pkt = seg;
+ assert(len >= (rxq->crc_present << 2));
+ pkt->ol_flags = 0;
+ /* If compressed, take hash result from mini-CQE. */
+ rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
+ cqe->rx_hash_res :
+ mcqe->rx_hash_result);
+ rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
+ if (rxq->crc_present)
+ len -= ETHER_CRC_LEN;
+ PKT_LEN(pkt) = len;
+ }
+ DATA_LEN(rep) = DATA_LEN(seg);
+ PKT_LEN(rep) = PKT_LEN(seg);
+ SET_DATA_OFF(rep, DATA_OFF(seg));
+ PORT(rep) = PORT(seg);
+ (*rxq->elts)[idx] = rep;
+ /*
+ * Fill NIC descriptor with the new buffer. The lkey and size
+ * of the buffers are already known, only the buffer address
+ * changes.
+ */
+ wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
+ /* If there's only one MR, no need to replace LKey in WQE. */
+ if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+ wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
+ if (len > DATA_LEN(seg)) {
+ len -= DATA_LEN(seg);
+ ++NB_SEGS(pkt);
+ ++rq_ci;
+ continue;
+ }
+ DATA_LEN(seg) = len;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment bytes counter. */
+ rxq->stats.ibytes += PKT_LEN(pkt);
+#endif
+ /* Return packet. */
+ *(pkts++) = pkt;
+ pkt = NULL;
+ --pkts_n;
+ ++i;
+skip:
+ /* Align consumer index to the next stride. */
+ rq_ci >>= sges_n;
+ ++rq_ci;
+ rq_ci <<= sges_n;
+ }
+ if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
+ return 0;
+ /* Update the consumer index. */
+ rxq->rq_ci = rq_ci >> sges_n;
+ rte_cio_wmb();
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ rte_cio_wmb();
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment packets counter. */
+ rxq->stats.ipackets += i;
+#endif
+ return i;
+}
+
+void
+mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
+{
+ struct mlx5_mprq_buf *buf = opaque;
+
+ if (rte_atomic16_read(&buf->refcnt) == 1) {
+ rte_mempool_put(buf->mp, buf);
+ } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
+ rte_atomic16_set(&buf->refcnt, 1);
+ rte_mempool_put(buf->mp, buf);
+ }
+}
+
+void
+mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
+{
+ mlx5_mprq_buf_free_cb(NULL, buf);
+}
+
+static inline void
+mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
+{
+ struct mlx5_mprq_buf *rep = rxq->mprq_repl;
+ volatile struct mlx5_wqe_data_seg *wqe =
+ &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
+ void *addr;
+
+ assert(rep != NULL);
+ /* Replace MPRQ buf. */
+ (*rxq->mprq_bufs)[rq_idx] = rep;
+ /* Replace WQE. */
+ addr = mlx5_mprq_buf_addr(rep);
+ wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
+ /* If there's only one MR, no need to replace LKey in WQE. */
+ if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+ wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
+ /* Stash a mbuf for next replacement. */
+ if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
+ rxq->mprq_repl = rep;
+ else
+ rxq->mprq_repl = NULL;
+}
+
+/**
+ * DPDK callback for RX with Multi-Packet RQ support.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_rxq_data *rxq = dpdk_rxq;
+ const unsigned int strd_n = 1 << rxq->strd_num_n;
+ const unsigned int strd_sz = 1 << rxq->strd_sz_n;
+ const unsigned int strd_shift =
+ MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
+ const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
+ const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
+ volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
+ unsigned int i = 0;
+ uint16_t rq_ci = rxq->rq_ci;
+ uint16_t consumed_strd = rxq->consumed_strd;
+ struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
+
+ while (i < pkts_n) {
+ struct rte_mbuf *pkt;
+ void *addr;
+ int ret;
+ unsigned int len;
+ uint16_t strd_cnt;
+ uint16_t strd_idx;
+ uint32_t offset;
+ uint32_t byte_cnt;
+ volatile struct mlx5_mini_cqe8 *mcqe = NULL;
+ uint32_t rss_hash_res = 0;
+
+ if (consumed_strd == strd_n) {
+ /* Replace WQE only if the buffer is still in use. */
+ if (rte_atomic16_read(&buf->refcnt) > 1) {
+ mprq_buf_replace(rxq, rq_ci & wq_mask);
+ /* Release the old buffer. */
+ mlx5_mprq_buf_free(buf);
+ } else if (unlikely(rxq->mprq_repl == NULL)) {
+ struct mlx5_mprq_buf *rep;
+
+ /*
+ * Currently, the MPRQ mempool is out of buffer
+ * and doing memcpy regardless of the size of Rx
+ * packet. Retry allocation to get back to
+ * normal.
+ */
+ if (!rte_mempool_get(rxq->mprq_mp,
+ (void **)&rep))
+ rxq->mprq_repl = rep;
+ }
+ /* Advance to the next WQE. */
+ consumed_strd = 0;
+ ++rq_ci;
+ buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
+ }
+ cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
+ ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
+ if (!ret)
+ break;
+ if (unlikely(ret == -1)) {
+ /* RX error, packet is likely too large. */
+ ++rxq->stats.idropped;
+ continue;
+ }
+ byte_cnt = ret;
+ strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
+ MLX5_MPRQ_STRIDE_NUM_SHIFT;
+ assert(strd_cnt);
+ consumed_strd += strd_cnt;
+ if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
+ continue;
+ if (mcqe == NULL) {
+ rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
+ strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
+ } else {
+ /* mini-CQE for MPRQ doesn't have hash result. */
+ strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
+ }
+ assert(strd_idx < strd_n);
+ assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
+ /*
+ * Currently configured to receive a packet per a stride. But if
+ * MTU is adjusted through kernel interface, device could
+ * consume multiple strides without raising an error. In this
+ * case, the packet should be dropped because it is bigger than
+ * the max_rx_pkt_len.
+ */
+ if (unlikely(strd_cnt > 1)) {
+ ++rxq->stats.idropped;
+ continue;
+ }
+ pkt = rte_pktmbuf_alloc(rxq->mp);
+ if (unlikely(pkt == NULL)) {
+ ++rxq->stats.rx_nombuf;
+ break;
+ }
+ len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
+ assert((int)len >= (rxq->crc_present << 2));
+ if (rxq->crc_present)
+ len -= ETHER_CRC_LEN;
+ offset = strd_idx * strd_sz + strd_shift;
+ addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset);
+ /* Initialize the offload flag. */
+ pkt->ol_flags = 0;
+ /*
+ * Memcpy packets to the target mbuf if:
+ * - The size of packet is smaller than mprq_max_memcpy_len.
+ * - Out of buffer in the Mempool for Multi-Packet RQ.
+ */
+ if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
+ /*
+ * When memcpy'ing packet due to out-of-buffer, the
+ * packet must be smaller than the target mbuf.
+ */
+ if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
+ rte_pktmbuf_free_seg(pkt);
+ ++rxq->stats.idropped;
+ continue;
+ }
+ rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
+ } else {
+ rte_iova_t buf_iova;
+ struct rte_mbuf_ext_shared_info *shinfo;
+ uint16_t buf_len = strd_cnt * strd_sz;
+
+ /* Increment the refcnt of the whole chunk. */
+ rte_atomic16_add_return(&buf->refcnt, 1);
+ assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
+ strd_n + 1);
+ addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
+ /*
+ * MLX5 device doesn't use iova but it is necessary in a
+ * case where the Rx packet is transmitted via a
+ * different PMD.
+ */
+ buf_iova = rte_mempool_virt2iova(buf) +
+ RTE_PTR_DIFF(addr, buf);
+ shinfo = rte_pktmbuf_ext_shinfo_init_helper(addr,
+ &buf_len, mlx5_mprq_buf_free_cb, buf);
+ /*
+ * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
+ * attaching the stride to mbuf and more offload flags
+ * will be added below by calling rxq_cq_to_mbuf().
+ * Other fields will be overwritten.
+ */
+ rte_pktmbuf_attach_extbuf(pkt, addr, buf_iova, buf_len,
+ shinfo);
+ rte_pktmbuf_reset_headroom(pkt);
+ assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
+ /*
+ * Prevent potential overflow due to MTU change through
+ * kernel interface.
+ */
+ if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
+ rte_pktmbuf_free_seg(pkt);
+ ++rxq->stats.idropped;
+ continue;
+ }
+ }
+ rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
+ PKT_LEN(pkt) = len;
+ DATA_LEN(pkt) = len;
+ PORT(pkt) = rxq->port_id;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment bytes counter. */
+ rxq->stats.ibytes += PKT_LEN(pkt);
+#endif
+ /* Return packet. */
+ *(pkts++) = pkt;
+ ++i;
+ }
+ /* Update the consumer indexes. */
+ rxq->consumed_strd = consumed_strd;
+ rte_cio_wmb();
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ if (rq_ci != rxq->rq_ci) {
+ rxq->rq_ci = rq_ci;
+ rte_cio_wmb();
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment packets counter. */
+ rxq->stats.ipackets += i;
+#endif
+ return i;
+}
+
+/**
+ * Dummy DPDK callback for TX.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+removed_tx_burst(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
+{
+ return 0;
+}
+
+/**
+ * Dummy DPDK callback for RX.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+removed_rx_burst(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
+{
+ return 0;
+}
+
+/*
+ * Vectorized Rx/Tx routines are not compiled in when required vector
+ * instructions are not supported on a target architecture. The following null
+ * stubs are needed for linkage when those are not included outside of this file
+ * (e.g. mlx5_rxtx_vec_sse.c for x86).
+ */
+
+uint16_t __attribute__((weak))
+mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+mlx5_tx_burst_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+int __attribute__((weak))
+mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+int __attribute__((weak))
+mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+int __attribute__((weak))
+mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
+{
+ return -ENOTSUP;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.h
new file mode 100644
index 00000000..48ed2b20
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.h
@@ -0,0 +1,856 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_RXTX_H_
+#define RTE_PMD_MLX5_RXTX_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#include <infiniband/mlx5dv.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_atomic.h>
+#include <rte_spinlock.h>
+#include <rte_io.h>
+
+#include "mlx5_utils.h"
+#include "mlx5.h"
+#include "mlx5_mr.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+
+/* Support tunnel matching. */
+#define MLX5_FLOW_TUNNEL 5
+
+struct mlx5_rxq_stats {
+ unsigned int idx; /**< Mapping index. */
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint64_t ipackets; /**< Total of successfully received packets. */
+ uint64_t ibytes; /**< Total of successfully received bytes. */
+#endif
+ uint64_t idropped; /**< Total of packets dropped when RX ring full. */
+ uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
+};
+
+struct mlx5_txq_stats {
+ unsigned int idx; /**< Mapping index. */
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint64_t opackets; /**< Total of successfully sent packets. */
+ uint64_t obytes; /**< Total of successfully sent bytes. */
+#endif
+ uint64_t oerrors; /**< Total number of failed transmitted packets. */
+};
+
+struct priv;
+
+/* Compressed CQE context. */
+struct rxq_zip {
+ uint16_t ai; /* Array index. */
+ uint16_t ca; /* Current array index. */
+ uint16_t na; /* Next array index. */
+ uint16_t cq_ci; /* The next CQE. */
+ uint32_t cqe_cnt; /* Number of CQEs. */
+};
+
+/* Multi-Packet RQ buffer header. */
+struct mlx5_mprq_buf {
+ struct rte_mempool *mp;
+ rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
+ uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
+} __rte_cache_aligned;
+
+/* Get pointer to the first stride. */
+#define mlx5_mprq_buf_addr(ptr) ((ptr) + 1)
+
+/* RX queue descriptor. */
+struct mlx5_rxq_data {
+ unsigned int csum:1; /* Enable checksum offloading. */
+ unsigned int hw_timestamp:1; /* Enable HW timestamp. */
+ unsigned int vlan_strip:1; /* Enable VLAN stripping. */
+ unsigned int crc_present:1; /* CRC must be subtracted. */
+ unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
+ unsigned int cqe_n:4; /* Log 2 of CQ elements. */
+ unsigned int elts_n:4; /* Log 2 of Mbufs. */
+ unsigned int rss_hash:1; /* RSS hash result is enabled. */
+ unsigned int mark:1; /* Marked flow available on the queue. */
+ unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
+ unsigned int strd_sz_n:4; /* Log 2 of stride size. */
+ unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
+ unsigned int :6; /* Remaining bits. */
+ volatile uint32_t *rq_db;
+ volatile uint32_t *cq_db;
+ uint16_t port_id;
+ uint16_t rq_ci;
+ uint16_t consumed_strd; /* Number of consumed strides in WQE. */
+ uint16_t rq_pi;
+ uint16_t cq_ci;
+ struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
+ uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
+ volatile void *wqes;
+ volatile struct mlx5_cqe(*cqes)[];
+ struct rxq_zip zip; /* Compressed context. */
+ RTE_STD_C11
+ union {
+ struct rte_mbuf *(*elts)[];
+ struct mlx5_mprq_buf *(*mprq_bufs)[];
+ };
+ struct rte_mempool *mp;
+ struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
+ struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
+ struct mlx5_rxq_stats stats;
+ uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
+ struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
+ void *cq_uar; /* CQ user access region. */
+ uint32_t cqn; /* CQ number. */
+ uint8_t cq_arm_sn; /* CQ arm seq number. */
+#ifndef RTE_ARCH_64
+ rte_spinlock_t *uar_lock_cq;
+ /* CQ (UAR) access lock required for 32bit implementations */
+#endif
+ uint32_t tunnel; /* Tunnel information. */
+} __rte_cache_aligned;
+
+/* Verbs Rx queue elements. */
+struct mlx5_rxq_ibv {
+ LIST_ENTRY(mlx5_rxq_ibv) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
+ struct ibv_cq *cq; /* Completion Queue. */
+ struct ibv_wq *wq; /* Work Queue. */
+ struct ibv_comp_channel *channel;
+};
+
+/* RX queue control descriptor. */
+struct mlx5_rxq_ctrl {
+ LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
+ struct priv *priv; /* Back pointer to private data. */
+ struct mlx5_rxq_data rxq; /* Data path structure. */
+ unsigned int socket; /* CPU socket ID for allocations. */
+ unsigned int irq:1; /* Whether IRQ is enabled. */
+ uint16_t idx; /* Queue index. */
+ uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
+ uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
+};
+
+/* Indirection table. */
+struct mlx5_ind_table_ibv {
+ LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
+ uint32_t queues_n; /**< Number of queues in the list. */
+ uint16_t queues[]; /**< Queue list. */
+};
+
+/* Hash Rx queue. */
+struct mlx5_hrxq {
+ LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
+ struct ibv_qp *qp; /* Verbs queue pair. */
+ uint64_t hash_fields; /* Verbs Hash fields. */
+ uint32_t rss_key_len; /* Hash key length in bytes. */
+ uint8_t rss_key[]; /* Hash key. */
+};
+
+/* TX queue descriptor. */
+__extension__
+struct mlx5_txq_data {
+ uint16_t elts_head; /* Current counter in (*elts)[]. */
+ uint16_t elts_tail; /* Counter of first element awaiting completion. */
+ uint16_t elts_comp; /* Counter since last completion request. */
+ uint16_t mpw_comp; /* WQ index since last completion request. */
+ uint16_t cq_ci; /* Consumer index for completion queue. */
+#ifndef NDEBUG
+ uint16_t cq_pi; /* Producer index for completion queue. */
+#endif
+ uint16_t wqe_ci; /* Consumer index for work queue. */
+ uint16_t wqe_pi; /* Producer index for work queue. */
+ uint16_t elts_n:4; /* (*elts)[] length (in log2). */
+ uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
+ uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
+ uint16_t tso_en:1; /* When set hardware TSO is enabled. */
+ uint16_t tunnel_en:1;
+ /* When set TX offload for tunneled packets are supported. */
+ uint16_t swp_en:1; /* Whether SW parser is enabled. */
+ uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
+ uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
+ uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
+ uint32_t qp_num_8s; /* QP number shifted by 8. */
+ uint64_t offloads; /* Offloads for Tx Queue. */
+ struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
+ volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
+ volatile void *wqes; /* Work queue (use volatile to write into). */
+ volatile uint32_t *qp_db; /* Work queue doorbell. */
+ volatile uint32_t *cq_db; /* Completion queue doorbell. */
+ volatile void *bf_reg; /* Blueflame register remapped. */
+ struct rte_mbuf *(*elts)[]; /* TX elements. */
+ struct mlx5_txq_stats stats; /* TX queue counters. */
+#ifndef RTE_ARCH_64
+ rte_spinlock_t *uar_lock;
+ /* UAR access lock required for 32bit implementations */
+#endif
+} __rte_cache_aligned;
+
+/* Verbs Rx queue elements. */
+struct mlx5_txq_ibv {
+ LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
+ struct ibv_cq *cq; /* Completion Queue. */
+ struct ibv_qp *qp; /* Queue Pair. */
+};
+
+/* TX queue control descriptor. */
+struct mlx5_txq_ctrl {
+ LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ unsigned int socket; /* CPU socket ID for allocations. */
+ unsigned int max_inline_data; /* Max inline data. */
+ unsigned int max_tso_header; /* Max TSO header size. */
+ struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
+ struct priv *priv; /* Back pointer to private data. */
+ struct mlx5_txq_data txq; /* Data path structure. */
+ off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
+ volatile void *bf_reg_orig; /* Blueflame register from verbs. */
+ uint16_t idx; /* Queue index. */
+};
+
+/* mlx5_rxq.c */
+
+extern uint8_t rss_hash_default_key[];
+
+int mlx5_check_mprq_support(struct rte_eth_dev *dev);
+int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
+int mlx5_mprq_enabled(struct rte_eth_dev *dev);
+int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
+int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
+void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl);
+int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp);
+void mlx5_rx_queue_release(void *dpdk_rxq);
+int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
+void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
+int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
+int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev);
+int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp);
+struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_verify(struct rte_eth_dev *dev);
+int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
+int rxq_alloc_mprq_buf(struct mlx5_rxq_ctrl *rxq_ctrl);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev,
+ const uint16_t *queues,
+ uint32_t queues_n);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
+ const uint16_t *queues,
+ uint32_t queues_n);
+int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_ibv *ind_tbl);
+int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev);
+struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n,
+ int tunnel __rte_unused);
+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n);
+int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
+int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
+void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
+uint64_t mlx5_get_rx_port_offloads(void);
+uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
+
+/* mlx5_txq.c */
+
+int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf);
+void mlx5_tx_queue_release(void *dpdk_txq);
+int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd);
+struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
+int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv);
+int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_txconf *conf);
+struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_verify(struct rte_eth_dev *dev);
+void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
+uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
+
+/* mlx5_rxtx.c */
+
+extern uint32_t mlx5_ptype_table[];
+extern uint8_t mlx5_cksum_table[];
+extern uint8_t mlx5_swp_types_table[];
+
+void mlx5_set_ptype_table(void);
+void mlx5_set_cksum_table(void);
+void mlx5_set_swp_types_table(void);
+uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
+void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
+void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
+uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
+/* Vectorized version of mlx5_rxtx.c */
+int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
+int mlx5_check_vec_tx_support(struct rte_eth_dev *dev);
+int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
+int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
+uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+
+/* mlx5_mr.c */
+
+void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
+uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
+uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
+
+/**
+ * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
+ * 64bit architectures.
+ *
+ * @param val
+ * value to write in CPU endian format.
+ * @param addr
+ * Address to write to.
+ * @param lock
+ * Address of the lock to use for that UAR access.
+ */
+static __rte_always_inline void
+__mlx5_uar_write64_relaxed(uint64_t val, volatile void *addr,
+ rte_spinlock_t *lock __rte_unused)
+{
+#ifdef RTE_ARCH_64
+ rte_write64_relaxed(val, addr);
+#else /* !RTE_ARCH_64 */
+ rte_spinlock_lock(lock);
+ rte_write32_relaxed(val, addr);
+ rte_io_wmb();
+ rte_write32_relaxed(val >> 32,
+ (volatile void *)((volatile char *)addr + 4));
+ rte_spinlock_unlock(lock);
+#endif
+}
+
+/**
+ * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
+ * 64bit architectures while guaranteeing the order of execution with the
+ * code being executed.
+ *
+ * @param val
+ * value to write in CPU endian format.
+ * @param addr
+ * Address to write to.
+ * @param lock
+ * Address of the lock to use for that UAR access.
+ */
+static __rte_always_inline void
+__mlx5_uar_write64(uint64_t val, volatile void *addr, rte_spinlock_t *lock)
+{
+ rte_io_wmb();
+ __mlx5_uar_write64_relaxed(val, addr, lock);
+}
+
+/* Assist macros, used instead of directly calling the functions they wrap. */
+#ifdef RTE_ARCH_64
+#define mlx5_uar_write64_relaxed(val, dst, lock) \
+ __mlx5_uar_write64_relaxed(val, dst, NULL)
+#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
+#else
+#define mlx5_uar_write64_relaxed(val, dst, lock) \
+ __mlx5_uar_write64_relaxed(val, dst, lock)
+#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
+#endif
+
+#ifndef NDEBUG
+/**
+ * Verify or set magic value in CQE.
+ *
+ * @param cqe
+ * Pointer to CQE.
+ *
+ * @return
+ * 0 the first time.
+ */
+static inline int
+check_cqe_seen(volatile struct mlx5_cqe *cqe)
+{
+ static const uint8_t magic[] = "seen";
+ volatile uint8_t (*buf)[sizeof(cqe->rsvd1)] = &cqe->rsvd1;
+ int ret = 1;
+ unsigned int i;
+
+ for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
+ if (!ret || (*buf)[i] != magic[i]) {
+ ret = 0;
+ (*buf)[i] = magic[i];
+ }
+ return ret;
+}
+#endif /* NDEBUG */
+
+/**
+ * Check whether CQE is valid.
+ *
+ * @param cqe
+ * Pointer to CQE.
+ * @param cqes_n
+ * Size of completion queue.
+ * @param ci
+ * Consumer index.
+ *
+ * @return
+ * 0 on success, 1 on failure.
+ */
+static __rte_always_inline int
+check_cqe(volatile struct mlx5_cqe *cqe,
+ unsigned int cqes_n, const uint16_t ci)
+{
+ uint16_t idx = ci & cqes_n;
+ uint8_t op_own = cqe->op_own;
+ uint8_t op_owner = MLX5_CQE_OWNER(op_own);
+ uint8_t op_code = MLX5_CQE_OPCODE(op_own);
+
+ if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
+ return 1; /* No CQE. */
+#ifndef NDEBUG
+ if ((op_code == MLX5_CQE_RESP_ERR) ||
+ (op_code == MLX5_CQE_REQ_ERR)) {
+ volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
+ uint8_t syndrome = err_cqe->syndrome;
+
+ if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
+ (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
+ return 0;
+ if (!check_cqe_seen(cqe)) {
+ DRV_LOG(ERR,
+ "unexpected CQE error %u (0x%02x) syndrome"
+ " 0x%02x",
+ op_code, op_code, syndrome);
+ rte_hexdump(stderr, "MLX5 Error CQE:",
+ (const void *)((uintptr_t)err_cqe),
+ sizeof(*err_cqe));
+ }
+ return 1;
+ } else if ((op_code != MLX5_CQE_RESP_SEND) &&
+ (op_code != MLX5_CQE_REQ)) {
+ if (!check_cqe_seen(cqe)) {
+ DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)",
+ op_code, op_code);
+ rte_hexdump(stderr, "MLX5 CQE:",
+ (const void *)((uintptr_t)cqe),
+ sizeof(*cqe));
+ }
+ return 1;
+ }
+#endif /* NDEBUG */
+ return 0;
+}
+
+/**
+ * Return the address of the WQE.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe_ci
+ * WQE consumer index.
+ *
+ * @return
+ * WQE address.
+ */
+static inline uintptr_t *
+tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci)
+{
+ ci &= ((1 << txq->wqe_n) - 1);
+ return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
+}
+
+/**
+ * Manage TX completions.
+ *
+ * When sending a burst, mlx5_tx_burst() posts several WRs.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ */
+static __rte_always_inline void
+mlx5_tx_complete(struct mlx5_txq_data *txq)
+{
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const unsigned int cqe_n = 1 << txq->cqe_n;
+ const unsigned int cqe_cnt = cqe_n - 1;
+ uint16_t elts_free = txq->elts_tail;
+ uint16_t elts_tail;
+ uint16_t cq_ci = txq->cq_ci;
+ volatile struct mlx5_cqe *cqe = NULL;
+ volatile struct mlx5_wqe_ctrl *ctrl;
+ struct rte_mbuf *m, *free[elts_n];
+ struct rte_mempool *pool = NULL;
+ unsigned int blk_n = 0;
+
+ cqe = &(*txq->cqes)[cq_ci & cqe_cnt];
+ if (unlikely(check_cqe(cqe, cqe_n, cq_ci)))
+ return;
+#ifndef NDEBUG
+ if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
+ (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
+ if (!check_cqe_seen(cqe)) {
+ DRV_LOG(ERR, "unexpected error CQE, Tx stopped");
+ rte_hexdump(stderr, "MLX5 TXQ:",
+ (const void *)((uintptr_t)txq->wqes),
+ ((1 << txq->wqe_n) *
+ MLX5_WQE_SIZE));
+ }
+ return;
+ }
+#endif /* NDEBUG */
+ ++cq_ci;
+ txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
+ ctrl = (volatile struct mlx5_wqe_ctrl *)
+ tx_mlx5_wqe(txq, txq->wqe_pi);
+ elts_tail = ctrl->ctrl3;
+ assert((elts_tail & elts_m) < (1 << txq->wqe_n));
+ /* Free buffers. */
+ while (elts_free != elts_tail) {
+ m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == pool)) {
+ free[blk_n++] = m;
+ } else {
+ if (likely(pool != NULL))
+ rte_mempool_put_bulk(pool,
+ (void *)free,
+ blk_n);
+ free[0] = m;
+ pool = m->pool;
+ blk_n = 1;
+ }
+ }
+ }
+ if (blk_n)
+ rte_mempool_put_bulk(pool, (void *)free, blk_n);
+#ifndef NDEBUG
+ elts_free = txq->elts_tail;
+ /* Poisoning. */
+ while (elts_free != elts_tail) {
+ memset(&(*txq->elts)[elts_free & elts_m],
+ 0x66,
+ sizeof((*txq->elts)[elts_free & elts_m]));
+ ++elts_free;
+ }
+#endif
+ txq->cq_ci = cq_ci;
+ txq->elts_tail = elts_tail;
+ /* Update the consumer index. */
+ rte_compiler_barrier();
+ *txq->cq_db = rte_cpu_to_be_32(cq_ci);
+}
+
+/**
+ * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
+ * as mempool is pre-configured and static.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Address to search.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
+{
+ struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ uint32_t lkey;
+
+ /* Linear search on MR cache array. */
+ lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX5_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half (Binary Search) on miss. */
+ return mlx5_rx_addr2mr_bh(rxq, addr);
+}
+
+#define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
+/**
+ * Query LKey from a packet buffer for Tx. If not found, add the mempool.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param addr
+ * Address to search.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
+{
+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ uint32_t lkey;
+
+ /* Check generation bit to see if there's any change on existing MRs. */
+ if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
+ mlx5_mr_flush_local_cache(mr_ctrl);
+ /* Linear search on MR cache array. */
+ lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX5_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half (binary search) on miss. */
+ return mlx5_tx_addr2mr_bh(txq, addr);
+}
+
+#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
+/**
+ * Ring TX queue doorbell and flush the update if requested.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the last WQE posted in the NIC.
+ * @param cond
+ * Request for write memory barrier after BlueFlame update.
+ */
+static __rte_always_inline void
+mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
+ int cond)
+{
+ uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
+ volatile uint64_t *src = ((volatile uint64_t *)wqe);
+
+ rte_cio_wmb();
+ *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
+ /* Ensure ordering between DB record and BF copy. */
+ rte_wmb();
+ mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
+ if (cond)
+ rte_wmb();
+}
+
+/**
+ * Ring TX queue doorbell and flush the update by write memory barrier.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the last WQE posted in the NIC.
+ */
+static __rte_always_inline void
+mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
+{
+ mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
+}
+
+/**
+ * Convert mbuf to Verb SWP.
+ *
+ * @param txq_data
+ * Pointer to the Tx queue.
+ * @param buf
+ * Pointer to the mbuf.
+ * @param tso
+ * TSO offloads enabled.
+ * @param vlan
+ * VLAN offloads enabled
+ * @param offsets
+ * Pointer to the SWP header offsets.
+ * @param swp_types
+ * Pointer to the SWP header types.
+ */
+static __rte_always_inline void
+txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
+ uint8_t *offsets, uint8_t *swp_types)
+{
+ const uint64_t vlan = buf->ol_flags & PKT_TX_VLAN_PKT;
+ const uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK;
+ const uint64_t tso = buf->ol_flags & PKT_TX_TCP_SEG;
+ const uint64_t csum_flags = buf->ol_flags & PKT_TX_L4_MASK;
+ const uint64_t inner_ip =
+ buf->ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6);
+ const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 |
+ PKT_TX_OUTER_IPV6;
+ uint16_t idx;
+ uint16_t off;
+
+ if (likely(!txq->swp_en || (tunnel != PKT_TX_TUNNEL_UDP &&
+ tunnel != PKT_TX_TUNNEL_IP)))
+ return;
+ /*
+ * The index should have:
+ * bit[0:1] = PKT_TX_L4_MASK
+ * bit[4] = PKT_TX_IPV6
+ * bit[8] = PKT_TX_OUTER_IPV6
+ * bit[9] = PKT_TX_OUTER_UDP
+ */
+ idx = (buf->ol_flags & ol_flags_mask) >> 52;
+ if (tunnel == PKT_TX_TUNNEL_UDP)
+ idx |= 1 << 9;
+ *swp_types = mlx5_swp_types_table[idx];
+ /*
+ * Set offsets for SW parser. Since ConnectX-5, SW parser just
+ * complements HW parser. SW parser starts to engage only if HW parser
+ * can't reach a header. For the older devices, HW parser will not kick
+ * in if any of SWP offsets is set. Therefore, all of the L3 offsets
+ * should be set regardless of HW offload.
+ */
+ off = buf->outer_l2_len + (vlan ? sizeof(struct vlan_hdr) : 0);
+ offsets[1] = off >> 1; /* Outer L3 offset. */
+ off += buf->outer_l3_len;
+ if (tunnel == PKT_TX_TUNNEL_UDP)
+ offsets[0] = off >> 1; /* Outer L4 offset. */
+ if (inner_ip) {
+ off += buf->l2_len;
+ offsets[3] = off >> 1; /* Inner L3 offset. */
+ if (csum_flags == PKT_TX_TCP_CKSUM || tso ||
+ csum_flags == PKT_TX_UDP_CKSUM) {
+ off += buf->l3_len;
+ offsets[2] = off >> 1; /* Inner L4 offset. */
+ }
+ }
+}
+
+/**
+ * Convert the Checksum offloads to Verbs.
+ *
+ * @param buf
+ * Pointer to the mbuf.
+ *
+ * @return
+ * Converted checksum flags.
+ */
+static __rte_always_inline uint8_t
+txq_ol_cksum_to_cs(struct rte_mbuf *buf)
+{
+ uint32_t idx;
+ uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
+ const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
+
+ /*
+ * The index should have:
+ * bit[0] = PKT_TX_TCP_SEG
+ * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
+ * bit[4] = PKT_TX_IP_CKSUM
+ * bit[8] = PKT_TX_OUTER_IP_CKSUM
+ * bit[9] = tunnel
+ */
+ idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
+ return mlx5_cksum_table[idx];
+}
+
+/**
+ * Count the number of contiguous single segment packets.
+ *
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ *
+ * @return
+ * Number of contiguous single segment packets.
+ */
+static __rte_always_inline unsigned int
+txq_count_contig_single_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ unsigned int pos;
+
+ if (!pkts_n)
+ return 0;
+ /* Count the number of contiguous single segment packets. */
+ for (pos = 0; pos < pkts_n; ++pos)
+ if (NB_SEGS(pkts[pos]) > 1)
+ break;
+ return pos;
+}
+
+/**
+ * Count the number of contiguous multi-segment packets.
+ *
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ *
+ * @return
+ * Number of contiguous multi-segment packets.
+ */
+static __rte_always_inline unsigned int
+txq_count_contig_multi_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ unsigned int pos;
+
+ if (!pkts_n)
+ return 0;
+ /* Count the number of contiguous multi-segment packets. */
+ for (pos = 0; pos < pkts_n; ++pos)
+ if (NB_SEGS(pkts[pos]) == 1)
+ break;
+ return pos;
+}
+
+#endif /* RTE_PMD_MLX5_RXTX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c
new file mode 100644
index 00000000..0a4aed8f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#include <infiniband/mlx5dv.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_rxtx_vec.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+
+#if defined RTE_ARCH_X86_64
+#include "mlx5_rxtx_vec_sse.h"
+#elif defined RTE_ARCH_ARM64
+#include "mlx5_rxtx_vec_neon.h"
+#else
+#error "This should not be compiled if SIMD instructions are not supported."
+#endif
+
+/**
+ * Count the number of packets having same ol_flags and calculate cs_flags.
+ *
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ * @param cs_flags
+ * Pointer of flags to be returned.
+ *
+ * @return
+ * Number of packets having same ol_flags.
+ */
+static inline unsigned int
+txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags)
+{
+ unsigned int pos;
+ const uint64_t ol_mask =
+ PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
+ PKT_TX_UDP_CKSUM | PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN | PKT_TX_OUTER_IP_CKSUM;
+
+ if (!pkts_n)
+ return 0;
+ /* Count the number of packets having same ol_flags. */
+ for (pos = 1; pos < pkts_n; ++pos)
+ if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
+ break;
+ *cs_flags = txq_ol_cksum_to_cs(pkts[0]);
+ return pos;
+}
+
+/**
+ * DPDK callback for vectorized TX.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ uint16_t nb_tx = 0;
+
+ while (pkts_n > nb_tx) {
+ uint16_t n;
+ uint16_t ret;
+
+ n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
+ ret = txq_burst_v(txq, &pkts[nb_tx], n, 0);
+ nb_tx += ret;
+ if (!ret)
+ break;
+ }
+ return nb_tx;
+}
+
+/**
+ * DPDK callback for vectorized TX with multi-seg packets and offload.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ uint16_t nb_tx = 0;
+
+ while (pkts_n > nb_tx) {
+ uint8_t cs_flags = 0;
+ uint16_t n;
+ uint16_t ret;
+
+ /* Transmit multi-seg packets in the head of pkts list. */
+ if ((txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+ NB_SEGS(pkts[nb_tx]) > 1)
+ nb_tx += txq_scatter_v(txq,
+ &pkts[nb_tx],
+ pkts_n - nb_tx);
+ n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
+ if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ n = txq_count_contig_single_seg(&pkts[nb_tx], n);
+ if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
+ n = txq_calc_offload(&pkts[nb_tx], n, &cs_flags);
+ ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
+ nb_tx += ret;
+ if (!ret)
+ break;
+ }
+ return nb_tx;
+}
+
+/**
+ * Skip error packets.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+static uint16_t
+rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ uint16_t n = 0;
+ unsigned int i;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t err_bytes = 0;
+#endif
+
+ for (i = 0; i < pkts_n; ++i) {
+ struct rte_mbuf *pkt = pkts[i];
+
+ if (pkt->packet_type == RTE_PTYPE_ALL_MASK) {
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ err_bytes += PKT_LEN(pkt);
+#endif
+ rte_pktmbuf_free_seg(pkt);
+ } else {
+ pkts[n++] = pkt;
+ }
+ }
+ rxq->stats.idropped += (pkts_n - n);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Correct counters of errored completions. */
+ rxq->stats.ipackets -= (pkts_n - n);
+ rxq->stats.ibytes -= err_bytes;
+#endif
+ return n;
+}
+
+/**
+ * DPDK callback for vectorized RX.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_rxq_data *rxq = dpdk_rxq;
+ uint16_t nb_rx;
+ uint64_t err = 0;
+
+ nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err);
+ if (unlikely(err))
+ nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
+ return nb_rx;
+}
+
+/**
+ * Check Tx queue flags are set for raw vectorized Tx.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev)
+{
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+
+ /* Doesn't support any offload. */
+ if (offloads)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Check a device can support vectorized TX.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+mlx5_check_vec_tx_support(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+
+ if (!priv->config.tx_vec_en ||
+ priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
+ priv->config.mps != MLX5_MPW_ENHANCED ||
+ offloads & ~MLX5_VEC_TX_OFFLOAD_CAP)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Check a RX queue can support vectorized RX.
+ *
+ * @param rxq
+ * Pointer to RX queue.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
+{
+ struct mlx5_rxq_ctrl *ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+
+ if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv)))
+ return -ENOTSUP;
+ if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Check a device can support vectorized RX.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint16_t i;
+
+ if (!priv->config.rx_vec_en)
+ return -ENOTSUP;
+ if (mlx5_mprq_enabled(dev))
+ return -ENOTSUP;
+ /* All the configured queues should support. */
+ for (i = 0; i < priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (!rxq)
+ continue;
+ if (mlx5_rxq_check_vec_support(rxq) < 0)
+ break;
+ }
+ if (i != priv->rxqs_n)
+ return -ENOTSUP;
+ return 1;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.h
new file mode 100644
index 00000000..fb884f92
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_RXTX_VEC_H_
+#define RTE_PMD_MLX5_RXTX_VEC_H_
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+
+#include "mlx5_autoconf.h"
+#include "mlx5_prm.h"
+
+/* HW checksum offload capabilities of vectorized Tx. */
+#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
+ (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+
+/* HW offload capabilities of vectorized Tx. */
+#define MLX5_VEC_TX_OFFLOAD_CAP \
+ (MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+/*
+ * Compile time sanity check for vectorized functions.
+ */
+
+#define S_ASSERT_RTE_MBUF(s) \
+ static_assert(s, "A field of struct rte_mbuf is changed")
+#define S_ASSERT_MLX5_CQE(s) \
+ static_assert(s, "A field of struct mlx5_cqe is changed")
+
+/* rxq_cq_decompress_v() */
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) ==
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+/* rxq_cq_to_ptype_oflags_v() */
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) ==
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) ==
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+
+/* rxq_burst_v() */
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+#if (RTE_CACHE_LINE_SIZE == 128)
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64);
+#else
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0);
+#endif
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) ==
+ offsetof(struct mlx5_cqe, pkt_info) + 12);
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) +
+ sizeof(((struct mlx5_cqe *)0)->rsvd1) ==
+ offsetof(struct mlx5_cqe, hdr_type_etc));
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) ==
+ offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd2) +
+ sizeof(((struct mlx5_cqe *)0)->rsvd2) ==
+ offsetof(struct mlx5_cqe, byte_cnt));
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) ==
+ RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) ==
+ offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
+
+/**
+ * Replenish buffers for RX in bulk.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param n
+ * Number of buffers to be replenished.
+ */
+static inline void
+mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
+{
+ const uint16_t q_n = 1 << rxq->elts_n;
+ const uint16_t q_mask = q_n - 1;
+ uint16_t elts_idx = rxq->rq_ci & q_mask;
+ struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
+ volatile struct mlx5_wqe_data_seg *wq =
+ &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
+ unsigned int i;
+
+ assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
+ assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
+ assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to cross queue end. */
+ n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
+ if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
+ rxq->stats.rx_nombuf += n;
+ return;
+ }
+ for (i = 0; i < n; ++i) {
+ wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
+ /* If there's only one MR, no need to replace LKey in WQE. */
+ if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+ wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]);
+ }
+ rxq->rq_ci += n;
+ /* Prevent overflowing into consumed mbufs. */
+ elts_idx = rxq->rq_ci & q_mask;
+ for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
+ (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
+ rte_cio_wmb();
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+}
+
+#endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
new file mode 100644
index 00000000..b37b7381
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -0,0 +1,1017 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_
+#define RTE_PMD_MLX5_RXTX_VEC_NEON_H_
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <arm_neon.h>
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_rxtx_vec.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+/**
+ * Fill in buffer descriptors in a multi-packet send descriptor.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param dseg
+ * Pointer to buffer descriptor to be written.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param n
+ * Number of packets to be filled.
+ */
+static inline void
+txq_wr_dseg_v(struct mlx5_txq_data *txq, uint8_t *dseg,
+ struct rte_mbuf **pkts, unsigned int n)
+{
+ unsigned int pos;
+ uintptr_t addr;
+ const uint8x16_t dseg_shuf_m = {
+ 3, 2, 1, 0, /* length, bswap32 */
+ 4, 5, 6, 7, /* lkey */
+ 15, 14, 13, 12, /* addr, bswap64 */
+ 11, 10, 9, 8
+ };
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t tx_byte = 0;
+#endif
+
+ for (pos = 0; pos < n; ++pos, dseg += MLX5_WQE_DWORD_SIZE) {
+ uint8x16_t desc;
+ struct rte_mbuf *pkt = pkts[pos];
+
+ addr = rte_pktmbuf_mtod(pkt, uintptr_t);
+ desc = vreinterpretq_u8_u32((uint32x4_t) {
+ DATA_LEN(pkt),
+ mlx5_tx_mb2mr(txq, pkt),
+ addr,
+ addr >> 32 });
+ desc = vqtbl1q_u8(desc, dseg_shuf_m);
+ vst1q_u8(dseg, desc);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ tx_byte += DATA_LEN(pkt);
+#endif
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.obytes += tx_byte;
+#endif
+}
+
+/**
+ * Send multi-segmented packets until it encounters a single segment packet in
+ * the pkts list.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param pkts_n
+ * Number of packets to be sent.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+static uint16_t
+txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const uint16_t wq_n = 1 << txq->wqe_n;
+ const uint16_t wq_mask = wq_n - 1;
+ const unsigned int nb_dword_per_wqebb =
+ MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
+ const unsigned int nb_dword_in_hdr =
+ sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
+ unsigned int n;
+ volatile struct mlx5_wqe *wqe = NULL;
+
+ assert(elts_n > pkts_n);
+ mlx5_tx_complete(txq);
+ if (unlikely(!pkts_n))
+ return 0;
+ for (n = 0; n < pkts_n; ++n) {
+ struct rte_mbuf *buf = pkts[n];
+ unsigned int segs_n = buf->nb_segs;
+ unsigned int ds = nb_dword_in_hdr;
+ unsigned int len = PKT_LEN(buf);
+ uint16_t wqe_ci = txq->wqe_ci;
+ const uint8x16_t ctrl_shuf_m = {
+ 3, 2, 1, 0, /* bswap32 */
+ 7, 6, 5, 4, /* bswap32 */
+ 11, 10, 9, 8, /* bswap32 */
+ 12, 13, 14, 15
+ };
+ uint8_t cs_flags;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ uint8x16_t *t_wqe;
+ uint8_t *dseg;
+ uint8x16_t ctrl;
+
+ assert(segs_n);
+ max_elts = elts_n - (elts_head - txq->elts_tail);
+ max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
+ /*
+ * A MPW session consumes 2 WQEs at most to
+ * include MLX5_MPW_DSEG_MAX pointers.
+ */
+ if (segs_n == 1 ||
+ max_elts < segs_n || max_wqe < 2)
+ break;
+ wqe = &((volatile struct mlx5_wqe64 *)
+ txq->wqes)[wqe_ci & wq_mask].hdr;
+ cs_flags = txq_ol_cksum_to_cs(buf);
+ /* Title WQEBB pointer. */
+ t_wqe = (uint8x16_t *)wqe;
+ dseg = (uint8_t *)(wqe + 1);
+ do {
+ if (!(ds++ % nb_dword_per_wqebb)) {
+ dseg = (uint8_t *)
+ &((volatile struct mlx5_wqe64 *)
+ txq->wqes)[++wqe_ci & wq_mask];
+ }
+ txq_wr_dseg_v(txq, dseg, &buf, 1);
+ dseg += MLX5_WQE_DWORD_SIZE;
+ (*txq->elts)[elts_head++ & elts_m] = buf;
+ buf = buf->next;
+ } while (--segs_n);
+ ++wqe_ci;
+ /* Fill CTRL in the header. */
+ ctrl = vreinterpretq_u8_u32((uint32x4_t) {
+ MLX5_OPC_MOD_MPW << 24 |
+ txq->wqe_ci << 8 | MLX5_OPCODE_TSO,
+ txq->qp_num_8s | ds, 0, 0});
+ ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
+ vst1q_u8((void *)t_wqe, ctrl);
+ /* Fill ESEG in the header. */
+ vst1q_u16((void *)(t_wqe + 1),
+ ((uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len),
+ 0, 0, 0, 0 }));
+ txq->wqe_ci = wqe_ci;
+ }
+ if (!n)
+ return 0;
+ txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
+ txq->elts_head = elts_head;
+ if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
+ wqe->ctrl[3] = txq->elts_head;
+ txq->elts_comp = 0;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.opackets += n;
+#endif
+ mlx5_tx_dbrec(txq, wqe);
+ return n;
+}
+
+/**
+ * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
+ * it returns to make it processed by txq_scatter_v(). All the packets in
+ * the pkts list should be single segment packets having same offload flags.
+ * This must be checked by txq_count_contig_single_seg() and txq_calc_offload().
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param pkts_n
+ * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
+ * @param cs_flags
+ * Checksum offload flags to be written in the descriptor.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+static inline uint16_t
+txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
+ uint8_t cs_flags)
+{
+ struct rte_mbuf **elts;
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const unsigned int nb_dword_per_wqebb =
+ MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
+ const unsigned int nb_dword_in_hdr =
+ sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
+ unsigned int n = 0;
+ unsigned int pos;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ uint32_t comp_req = 0;
+ const uint16_t wq_n = 1 << txq->wqe_n;
+ const uint16_t wq_mask = wq_n - 1;
+ uint16_t wq_idx = txq->wqe_ci & wq_mask;
+ volatile struct mlx5_wqe64 *wq =
+ &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
+ volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
+ const uint8x16_t ctrl_shuf_m = {
+ 3, 2, 1, 0, /* bswap32 */
+ 7, 6, 5, 4, /* bswap32 */
+ 11, 10, 9, 8, /* bswap32 */
+ 12, 13, 14, 15
+ };
+ uint8x16_t *t_wqe;
+ uint8_t *dseg;
+ uint8x16_t ctrl;
+
+ /* Make sure all packets can fit into a single WQE. */
+ assert(elts_n > pkts_n);
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
+ if (unlikely(!pkts_n))
+ return 0;
+ elts = &(*txq->elts)[elts_head & elts_m];
+ /* Loop for available tailroom first. */
+ n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
+ for (pos = 0; pos < (n & -2); pos += 2)
+ vst1q_u64((void *)&elts[pos], vld1q_u64((void *)&pkts[pos]));
+ if (n & 1)
+ elts[pos] = pkts[pos];
+ /* Check if it crosses the end of the queue. */
+ if (unlikely(n < pkts_n)) {
+ elts = &(*txq->elts)[0];
+ for (pos = 0; pos < pkts_n - n; ++pos)
+ elts[pos] = pkts[n + pos];
+ }
+ txq->elts_head += pkts_n;
+ /* Save title WQEBB pointer. */
+ t_wqe = (uint8x16_t *)wqe;
+ dseg = (uint8_t *)(wqe + 1);
+ /* Calculate the number of entries to the end. */
+ n = RTE_MIN(
+ (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
+ pkts_n);
+ /* Fill DSEGs. */
+ txq_wr_dseg_v(txq, dseg, pkts, n);
+ /* Check if it crosses the end of the queue. */
+ if (n < pkts_n) {
+ dseg = (uint8_t *)txq->wqes;
+ txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
+ }
+ if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
+ txq->elts_comp += pkts_n;
+ } else {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
+ /* Request a completion. */
+ txq->elts_comp = 0;
+ comp_req = 8;
+ }
+ /* Fill CTRL in the header. */
+ ctrl = vreinterpretq_u8_u32((uint32x4_t) {
+ MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
+ txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW,
+ txq->qp_num_8s | (pkts_n + 2),
+ comp_req,
+ txq->elts_head });
+ ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
+ vst1q_u8((void *)t_wqe, ctrl);
+ /* Fill ESEG in the header. */
+ vst1q_u8((void *)(t_wqe + 1),
+ ((uint8x16_t) { 0, 0, 0, 0,
+ cs_flags, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }));
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.opackets += pkts_n;
+#endif
+ txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
+ nb_dword_per_wqebb;
+ /* Ring QP doorbell. */
+ mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST);
+ return pkts_n;
+}
+
+/**
+ * Store free buffers to RX SW ring.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be stored.
+ * @param pkts_n
+ * Number of packets to be stored.
+ */
+static inline void
+rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
+{
+ const uint16_t q_mask = (1 << rxq->elts_n) - 1;
+ struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
+ unsigned int pos;
+ uint16_t p = n & -2;
+
+ for (pos = 0; pos < p; pos += 2) {
+ uint64x2_t mbp;
+
+ mbp = vld1q_u64((void *)&elts[pos]);
+ vst1q_u64((void *)&pkts[pos], mbp);
+ }
+ if (n & 1)
+ pkts[pos] = elts[pos];
+}
+
+/**
+ * Decompress a compressed completion and fill in mbufs in RX SW ring with data
+ * extracted from the title completion descriptor.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param cq
+ * Pointer to completion array having a compressed completion at first.
+ * @param elts
+ * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
+ * the title completion descriptor to be copied to the rest of mbufs.
+ */
+static inline void
+rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ struct rte_mbuf **elts)
+{
+ volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info;
+ struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
+ unsigned int pos;
+ unsigned int i;
+ unsigned int inv = 0;
+ /* Mask to shuffle from extracted mini CQE to mbuf. */
+ const uint8x16_t mcqe_shuf_m1 = {
+ -1, -1, -1, -1, /* skip packet_type */
+ 7, 6, -1, -1, /* pkt_len, bswap16 */
+ 7, 6, /* data_len, bswap16 */
+ -1, -1, /* skip vlan_tci */
+ 3, 2, 1, 0 /* hash.rss, bswap32 */
+ };
+ const uint8x16_t mcqe_shuf_m2 = {
+ -1, -1, -1, -1, /* skip packet_type */
+ 15, 14, -1, -1, /* pkt_len, bswap16 */
+ 15, 14, /* data_len, bswap16 */
+ -1, -1, /* skip vlan_tci */
+ 11, 10, 9, 8 /* hash.rss, bswap32 */
+ };
+ /* Restore the compressed count. Must be 16 bits. */
+ const uint16_t mcqe_n = t_pkt->data_len +
+ (rxq->crc_present * ETHER_CRC_LEN);
+ const uint64x2_t rearm =
+ vld1q_u64((void *)&t_pkt->rearm_data);
+ const uint32x4_t rxdf_mask = {
+ 0xffffffff, /* packet_type */
+ 0, /* skip pkt_len */
+ 0xffff0000, /* vlan_tci, skip data_len */
+ 0, /* skip hash.rss */
+ };
+ const uint8x16_t rxdf =
+ vandq_u8(vld1q_u8((void *)&t_pkt->rx_descriptor_fields1),
+ vreinterpretq_u8_u32(rxdf_mask));
+ const uint16x8_t crc_adj = {
+ 0, 0,
+ rxq->crc_present * ETHER_CRC_LEN, 0,
+ rxq->crc_present * ETHER_CRC_LEN, 0,
+ 0, 0
+ };
+ const uint32_t flow_tag = t_pkt->hash.fdir.hi;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t rcvd_byte = 0;
+#endif
+ /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
+ const uint8x8_t len_shuf_m = {
+ 7, 6, /* 1st mCQE */
+ 15, 14, /* 2nd mCQE */
+ 23, 22, /* 3rd mCQE */
+ 31, 30 /* 4th mCQE */
+ };
+
+ /*
+ * A. load mCQEs into a 128bit register.
+ * B. store rearm data to mbuf.
+ * C. combine data from mCQEs with rx_descriptor_fields1.
+ * D. store rx_descriptor_fields1.
+ * E. store flow tag (rte_flow mark).
+ */
+ for (pos = 0; pos < mcqe_n; ) {
+ uint8_t *p = (void *)&mcq[pos % 8];
+ uint8_t *e0 = (void *)&elts[pos]->rearm_data;
+ uint8_t *e1 = (void *)&elts[pos + 1]->rearm_data;
+ uint8_t *e2 = (void *)&elts[pos + 2]->rearm_data;
+ uint8_t *e3 = (void *)&elts[pos + 3]->rearm_data;
+ uint16x4_t byte_cnt;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint16x4_t invalid_mask =
+ vcreate_u16(mcqe_n - pos < MLX5_VPMD_DESCS_PER_LOOP ?
+ -1UL << ((mcqe_n - pos) *
+ sizeof(uint16_t) * 8) : 0);
+#endif
+
+ if (!(pos & 0x7) && pos + 8 < mcqe_n)
+ rte_prefetch0((void *)(cq + pos + 8));
+ __asm__ volatile (
+ /* A.1 load mCQEs into a 128bit register. */
+ "ld1 {v16.16b - v17.16b}, [%[mcq]] \n\t"
+ /* B.1 store rearm data to mbuf. */
+ "st1 {%[rearm].2d}, [%[e0]] \n\t"
+ "add %[e0], %[e0], #16 \n\t"
+ "st1 {%[rearm].2d}, [%[e1]] \n\t"
+ "add %[e1], %[e1], #16 \n\t"
+ /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ "tbl v18.16b, {v16.16b}, %[mcqe_shuf_m1].16b \n\t"
+ "tbl v19.16b, {v16.16b}, %[mcqe_shuf_m2].16b \n\t"
+ "sub v18.8h, v18.8h, %[crc_adj].8h \n\t"
+ "sub v19.8h, v19.8h, %[crc_adj].8h \n\t"
+ "orr v18.16b, v18.16b, %[rxdf].16b \n\t"
+ "orr v19.16b, v19.16b, %[rxdf].16b \n\t"
+ /* D.1 store rx_descriptor_fields1. */
+ "st1 {v18.2d}, [%[e0]] \n\t"
+ "st1 {v19.2d}, [%[e1]] \n\t"
+ /* B.1 store rearm data to mbuf. */
+ "st1 {%[rearm].2d}, [%[e2]] \n\t"
+ "add %[e2], %[e2], #16 \n\t"
+ "st1 {%[rearm].2d}, [%[e3]] \n\t"
+ "add %[e3], %[e3], #16 \n\t"
+ /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ "tbl v18.16b, {v17.16b}, %[mcqe_shuf_m1].16b \n\t"
+ "tbl v19.16b, {v17.16b}, %[mcqe_shuf_m2].16b \n\t"
+ "sub v18.8h, v18.8h, %[crc_adj].8h \n\t"
+ "sub v19.8h, v19.8h, %[crc_adj].8h \n\t"
+ "orr v18.16b, v18.16b, %[rxdf].16b \n\t"
+ "orr v19.16b, v19.16b, %[rxdf].16b \n\t"
+ /* D.1 store rx_descriptor_fields1. */
+ "st1 {v18.2d}, [%[e2]] \n\t"
+ "st1 {v19.2d}, [%[e3]] \n\t"
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ "tbl %[byte_cnt].8b, {v16.16b - v17.16b}, %[len_shuf_m].8b \n\t"
+#endif
+ :[byte_cnt]"=&w"(byte_cnt)
+ :[mcq]"r"(p),
+ [rxdf]"w"(rxdf),
+ [rearm]"w"(rearm),
+ [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0),
+ [mcqe_shuf_m1]"w"(mcqe_shuf_m1),
+ [mcqe_shuf_m2]"w"(mcqe_shuf_m2),
+ [crc_adj]"w"(crc_adj),
+ [len_shuf_m]"w"(len_shuf_m)
+ :"memory", "v16", "v17", "v18", "v19");
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ byte_cnt = vbic_u16(byte_cnt, invalid_mask);
+ rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0);
+#endif
+ if (rxq->mark) {
+ /* E.1 store flow tag (rte_flow mark). */
+ elts[pos]->hash.fdir.hi = flow_tag;
+ elts[pos + 1]->hash.fdir.hi = flow_tag;
+ elts[pos + 2]->hash.fdir.hi = flow_tag;
+ elts[pos + 3]->hash.fdir.hi = flow_tag;
+ }
+ pos += MLX5_VPMD_DESCS_PER_LOOP;
+ /* Move to next CQE and invalidate consumed CQEs. */
+ if (!(pos & 0x7) && pos < mcqe_n) {
+ mcq = (void *)&(cq + pos)->pkt_info;
+ for (i = 0; i < 8; ++i)
+ cq[inv++].op_own = MLX5_CQE_INVALIDATE;
+ }
+ }
+ /* Invalidate the rest of CQEs. */
+ for (; inv < mcqe_n; ++inv)
+ cq[inv].op_own = MLX5_CQE_INVALIDATE;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ rxq->stats.ipackets += mcqe_n;
+ rxq->stats.ibytes += rcvd_byte;
+#endif
+ rxq->cq_ci += mcqe_n;
+}
+
+/**
+ * Calculate packet type and offload flag for mbuf and store it.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param ptype_info
+ * Array of four 4bytes packet type info extracted from the original
+ * completion descriptor.
+ * @param flow_tag
+ * Array of four 4bytes flow ID extracted from the original completion
+ * descriptor.
+ * @param op_err
+ * Opcode vector having responder error status. Each field is 4B.
+ * @param pkts
+ * Pointer to array of packets to be filled.
+ */
+static inline void
+rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
+ uint32x4_t ptype_info, uint32x4_t flow_tag,
+ uint16x4_t op_err, struct rte_mbuf **pkts)
+{
+ uint16x4_t ptype;
+ uint32x4_t pinfo, cv_flags;
+ uint32x4_t ol_flags =
+ vdupq_n_u32(rxq->rss_hash * PKT_RX_RSS_HASH |
+ rxq->hw_timestamp * PKT_RX_TIMESTAMP);
+ const uint32x4_t ptype_ol_mask = { 0x106, 0x106, 0x106, 0x106 };
+ const uint8x16_t cv_flag_sel = {
+ 0,
+ (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
+ (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
+ 0,
+ (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
+ 0,
+ (uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ const uint32x4_t cv_mask =
+ vdupq_n_u32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer);
+ const uint64x1_t r32_mask = vcreate_u64(0xffffffff);
+ uint64x2_t rearm0, rearm1, rearm2, rearm3;
+ uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
+
+ if (rxq->mark) {
+ const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
+ const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR);
+ uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID);
+ uint32x4_t invalid_mask;
+
+ /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
+ invalid_mask = vceqzq_u32(flow_tag);
+ ol_flags = vorrq_u32(ol_flags,
+ vbicq_u32(fdir_flags, invalid_mask));
+ /* Mask out invalid entries. */
+ fdir_id_flags = vbicq_u32(fdir_id_flags, invalid_mask);
+ /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
+ ol_flags = vorrq_u32(ol_flags,
+ vbicq_u32(fdir_id_flags,
+ vceqq_u32(flow_tag, ft_def)));
+ }
+ /*
+ * ptype_info has the following:
+ * bit[1] = l3_ok
+ * bit[2] = l4_ok
+ * bit[8] = cv
+ * bit[11:10] = l3_hdr_type
+ * bit[14:12] = l4_hdr_type
+ * bit[15] = ip_frag
+ * bit[16] = tunneled
+ * bit[17] = outer_l3_type
+ */
+ ptype = vshrn_n_u32(ptype_info, 10);
+ /* Errored packets will have RTE_PTYPE_ALL_MASK. */
+ ptype = vorr_u16(ptype, op_err);
+ pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6);
+ pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4);
+ pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2);
+ pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0);
+ pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
+ !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
+ pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
+ !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
+ pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
+ !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
+ pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
+ !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
+ /* Fill flags for checksum and VLAN. */
+ pinfo = vandq_u32(ptype_info, ptype_ol_mask);
+ pinfo = vreinterpretq_u32_u8(
+ vqtbl1q_u8(cv_flag_sel, vreinterpretq_u8_u32(pinfo)));
+ /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
+ cv_flags = vshlq_n_u32(pinfo, 9);
+ cv_flags = vorrq_u32(pinfo, cv_flags);
+ /* Move back flags to start from byte[0]. */
+ cv_flags = vshrq_n_u32(cv_flags, 8);
+ /* Mask out garbage bits. */
+ cv_flags = vandq_u32(cv_flags, cv_mask);
+ /* Merge to ol_flags. */
+ ol_flags = vorrq_u32(ol_flags, cv_flags);
+ /* Merge mbuf_init and ol_flags, and store. */
+ rearm0 = vcombine_u64(mbuf_init,
+ vshr_n_u64(vget_high_u64(vreinterpretq_u64_u32(
+ ol_flags)), 32));
+ rearm1 = vcombine_u64(mbuf_init,
+ vand_u64(vget_high_u64(vreinterpretq_u64_u32(
+ ol_flags)), r32_mask));
+ rearm2 = vcombine_u64(mbuf_init,
+ vshr_n_u64(vget_low_u64(vreinterpretq_u64_u32(
+ ol_flags)), 32));
+ rearm3 = vcombine_u64(mbuf_init,
+ vand_u64(vget_low_u64(vreinterpretq_u64_u32(
+ ol_flags)), r32_mask));
+ vst1q_u64((void *)&pkts[0]->rearm_data, rearm0);
+ vst1q_u64((void *)&pkts[1]->rearm_data, rearm1);
+ vst1q_u64((void *)&pkts[2]->rearm_data, rearm2);
+ vst1q_u64((void *)&pkts[3]->rearm_data, rearm3);
+}
+
+/**
+ * Receive burst of packets. An errored completion also consumes a mbuf, but the
+ * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
+ * before returning to application.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ * @param[out] err
+ * Pointer to a flag. Set non-zero value if pkts array has at least one error
+ * packet to handle.
+ *
+ * @return
+ * Number of packets received including errors (<= pkts_n).
+ */
+static inline uint16_t
+rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
+ uint64_t *err)
+{
+ const uint16_t q_n = 1 << rxq->cqe_n;
+ const uint16_t q_mask = q_n - 1;
+ volatile struct mlx5_cqe *cq;
+ struct rte_mbuf **elts;
+ unsigned int pos;
+ uint64_t n;
+ uint16_t repl_n;
+ uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
+ uint16_t nocmp_n = 0;
+ uint16_t rcvd_pkt = 0;
+ unsigned int cq_idx = rxq->cq_ci & q_mask;
+ unsigned int elts_idx;
+ const uint16x4_t ownership = vdup_n_u16(!(rxq->cq_ci & (q_mask + 1)));
+ const uint16x4_t owner_check = vcreate_u16(0x0001000100010001);
+ const uint16x4_t opcode_check = vcreate_u16(0x00f000f000f000f0);
+ const uint16x4_t format_check = vcreate_u16(0x000c000c000c000c);
+ const uint16x4_t resp_err_check = vcreate_u16(0x00e000e000e000e0);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t rcvd_byte = 0;
+#endif
+ /* Mask to generate 16B length vector. */
+ const uint8x8_t len_shuf_m = {
+ 52, 53, /* 4th CQE */
+ 36, 37, /* 3rd CQE */
+ 20, 21, /* 2nd CQE */
+ 4, 5 /* 1st CQE */
+ };
+ /* Mask to extract 16B data from a 64B CQE. */
+ const uint8x16_t cqe_shuf_m = {
+ 28, 29, /* hdr_type_etc */
+ 0, /* pkt_info */
+ -1, /* null */
+ 47, 46, /* byte_cnt, bswap16 */
+ 31, 30, /* vlan_info, bswap16 */
+ 15, 14, 13, 12, /* rx_hash_res, bswap32 */
+ 57, 58, 59, /* flow_tag */
+ 63 /* op_own */
+ };
+ /* Mask to generate 16B data for mbuf. */
+ const uint8x16_t mb_shuf_m = {
+ 4, 5, -1, -1, /* pkt_len */
+ 4, 5, /* data_len */
+ 6, 7, /* vlan_tci */
+ 8, 9, 10, 11, /* hash.rss */
+ 12, 13, 14, -1 /* hash.fdir.hi */
+ };
+ /* Mask to generate 16B owner vector. */
+ const uint8x8_t owner_shuf_m = {
+ 63, -1, /* 4th CQE */
+ 47, -1, /* 3rd CQE */
+ 31, -1, /* 2nd CQE */
+ 15, -1 /* 1st CQE */
+ };
+ /* Mask to generate a vector having packet_type/ol_flags. */
+ const uint8x16_t ptype_shuf_m = {
+ 48, 49, 50, -1, /* 4th CQE */
+ 32, 33, 34, -1, /* 3rd CQE */
+ 16, 17, 18, -1, /* 2nd CQE */
+ 0, 1, 2, -1 /* 1st CQE */
+ };
+ /* Mask to generate a vector having flow tags. */
+ const uint8x16_t ftag_shuf_m = {
+ 60, 61, 62, -1, /* 4th CQE */
+ 44, 45, 46, -1, /* 3rd CQE */
+ 28, 29, 30, -1, /* 2nd CQE */
+ 12, 13, 14, -1 /* 1st CQE */
+ };
+ const uint16x8_t crc_adj = {
+ 0, 0, rxq->crc_present * ETHER_CRC_LEN, 0, 0, 0, 0, 0
+ };
+ const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) };
+
+ assert(rxq->sges_n == 0);
+ assert(rxq->cqe_n == rxq->elts_n);
+ cq = &(*rxq->cqes)[cq_idx];
+ rte_prefetch_non_temporal(cq);
+ rte_prefetch_non_temporal(cq + 1);
+ rte_prefetch_non_temporal(cq + 2);
+ rte_prefetch_non_temporal(cq + 3);
+ pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
+ /*
+ * Order of indexes:
+ * rq_ci >= cq_ci >= rq_pi
+ * Definition of indexes:
+ * rq_ci - cq_ci := # of buffers owned by HW (posted).
+ * cq_ci - rq_pi := # of buffers not returned to app (decompressed).
+ * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
+ */
+ repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
+ mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
+ /* See if there're unreturned mbufs from compressed CQE. */
+ rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
+ if (rcvd_pkt > 0) {
+ rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
+ rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
+ rxq->rq_pi += rcvd_pkt;
+ pkts += rcvd_pkt;
+ }
+ elts_idx = rxq->rq_pi & q_mask;
+ elts = &(*rxq->elts)[elts_idx];
+ /* Not to overflow pkts array. */
+ pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to cross queue end. */
+ pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
+ if (!pkts_n)
+ return rcvd_pkt;
+ /* At this point, there shouldn't be any remained packets. */
+ assert(rxq->rq_pi == rxq->cq_ci);
+ /*
+ * Note that vectors have reverse order - {v3, v2, v1, v0}, because
+ * there's no instruction to count trailing zeros. __builtin_clzl() is
+ * used instead.
+ *
+ * A. copy 4 mbuf pointers from elts ring to returing pkts.
+ * B. load 64B CQE and extract necessary fields
+ * Final 16bytes cqes[] extracted from original 64bytes CQE has the
+ * following structure:
+ * struct {
+ * uint16_t hdr_type_etc;
+ * uint8_t pkt_info;
+ * uint8_t rsvd;
+ * uint16_t byte_cnt;
+ * uint16_t vlan_info;
+ * uint32_t rx_has_res;
+ * uint8_t flow_tag[3];
+ * uint8_t op_own;
+ * } c;
+ * C. fill in mbuf.
+ * D. get valid CQEs.
+ * E. find compressed CQE.
+ */
+ for (pos = 0;
+ pos < pkts_n;
+ pos += MLX5_VPMD_DESCS_PER_LOOP) {
+ uint16x4_t op_own;
+ uint16x4_t opcode, owner_mask, invalid_mask;
+ uint16x4_t comp_mask;
+ uint16x4_t mask;
+ uint16x4_t byte_cnt;
+ uint32x4_t ptype_info, flow_tag;
+ register uint64x2_t c0, c1, c2, c3;
+ uint8_t *p0, *p1, *p2, *p3;
+ uint8_t *e0 = (void *)&elts[pos]->pkt_len;
+ uint8_t *e1 = (void *)&elts[pos + 1]->pkt_len;
+ uint8_t *e2 = (void *)&elts[pos + 2]->pkt_len;
+ uint8_t *e3 = (void *)&elts[pos + 3]->pkt_len;
+ void *elts_p = (void *)&elts[pos];
+ void *pkts_p = (void *)&pkts[pos];
+
+ /* A.0 do not cross the end of CQ. */
+ mask = vcreate_u16(pkts_n - pos < MLX5_VPMD_DESCS_PER_LOOP ?
+ -1UL >> ((pkts_n - pos) *
+ sizeof(uint16_t) * 8) : 0);
+ p0 = (void *)&cq[pos].pkt_info;
+ p1 = p0 + (pkts_n - pos > 1) * sizeof(struct mlx5_cqe);
+ p2 = p1 + (pkts_n - pos > 2) * sizeof(struct mlx5_cqe);
+ p3 = p2 + (pkts_n - pos > 3) * sizeof(struct mlx5_cqe);
+ /* B.0 (CQE 3) load a block having op_own. */
+ c3 = vld1q_u64((uint64_t *)(p3 + 48));
+ /* B.0 (CQE 2) load a block having op_own. */
+ c2 = vld1q_u64((uint64_t *)(p2 + 48));
+ /* B.0 (CQE 1) load a block having op_own. */
+ c1 = vld1q_u64((uint64_t *)(p1 + 48));
+ /* B.0 (CQE 0) load a block having op_own. */
+ c0 = vld1q_u64((uint64_t *)(p0 + 48));
+ /* Synchronize for loading the rest of blocks. */
+ rte_cio_rmb();
+ /* Prefetch next 4 CQEs. */
+ if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
+ unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP;
+ rte_prefetch_non_temporal(&cq[next]);
+ rte_prefetch_non_temporal(&cq[next + 1]);
+ rte_prefetch_non_temporal(&cq[next + 2]);
+ rte_prefetch_non_temporal(&cq[next + 3]);
+ }
+ __asm__ volatile (
+ /* B.1 (CQE 3) load the rest of blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p3]] \n\t"
+ /* B.2 (CQE 3) move the block having op_own. */
+ "mov v19.16b, %[c3].16b \n\t"
+ /* B.3 (CQE 3) extract 16B fields. */
+ "tbl v23.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.1 (CQE 2) load the rest of blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t"
+ /* B.4 (CQE 3) adjust CRC length. */
+ "sub v23.8h, v23.8h, %[crc_adj].8h \n\t"
+ /* C.1 (CQE 3) generate final structure for mbuf. */
+ "tbl v15.16b, {v23.16b}, %[mb_shuf_m].16b \n\t"
+ /* B.2 (CQE 2) move the block having op_own. */
+ "mov v19.16b, %[c2].16b \n\t"
+ /* B.3 (CQE 2) extract 16B fields. */
+ "tbl v22.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.1 (CQE 1) load the rest of blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t"
+ /* B.4 (CQE 2) adjust CRC length. */
+ "sub v22.8h, v22.8h, %[crc_adj].8h \n\t"
+ /* C.1 (CQE 2) generate final structure for mbuf. */
+ "tbl v14.16b, {v22.16b}, %[mb_shuf_m].16b \n\t"
+ /* B.2 (CQE 1) move the block having op_own. */
+ "mov v19.16b, %[c1].16b \n\t"
+ /* B.3 (CQE 1) extract 16B fields. */
+ "tbl v21.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.1 (CQE 0) load the rest of blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t"
+ /* B.4 (CQE 1) adjust CRC length. */
+ "sub v21.8h, v21.8h, %[crc_adj].8h \n\t"
+ /* C.1 (CQE 1) generate final structure for mbuf. */
+ "tbl v13.16b, {v21.16b}, %[mb_shuf_m].16b \n\t"
+ /* B.2 (CQE 0) move the block having op_own. */
+ "mov v19.16b, %[c0].16b \n\t"
+ /* A.1 load mbuf pointers. */
+ "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t"
+ /* B.3 (CQE 0) extract 16B fields. */
+ "tbl v20.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.4 (CQE 0) adjust CRC length. */
+ "sub v20.8h, v20.8h, %[crc_adj].8h \n\t"
+ /* D.1 extract op_own byte. */
+ "tbl %[op_own].8b, {v20.16b - v23.16b}, %[owner_shuf_m].8b \n\t"
+ /* C.2 (CQE 3) adjust flow mark. */
+ "add v15.4s, v15.4s, %[flow_mark_adj].4s \n\t"
+ /* C.3 (CQE 3) fill in mbuf - rx_descriptor_fields1. */
+ "st1 {v15.2d}, [%[e3]] \n\t"
+ /* C.2 (CQE 2) adjust flow mark. */
+ "add v14.4s, v14.4s, %[flow_mark_adj].4s \n\t"
+ /* C.3 (CQE 2) fill in mbuf - rx_descriptor_fields1. */
+ "st1 {v14.2d}, [%[e2]] \n\t"
+ /* C.1 (CQE 0) generate final structure for mbuf. */
+ "tbl v12.16b, {v20.16b}, %[mb_shuf_m].16b \n\t"
+ /* C.2 (CQE 1) adjust flow mark. */
+ "add v13.4s, v13.4s, %[flow_mark_adj].4s \n\t"
+ /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */
+ "st1 {v13.2d}, [%[e1]] \n\t"
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Extract byte_cnt. */
+ "tbl %[byte_cnt].8b, {v20.16b - v23.16b}, %[len_shuf_m].8b \n\t"
+#endif
+ /* Extract ptype_info. */
+ "tbl %[ptype_info].16b, {v20.16b - v23.16b}, %[ptype_shuf_m].16b \n\t"
+ /* Extract flow_tag. */
+ "tbl %[flow_tag].16b, {v20.16b - v23.16b}, %[ftag_shuf_m].16b \n\t"
+ /* A.2 copy mbuf pointers. */
+ "st1 {v24.2d - v25.2d}, [%[pkts_p]] \n\t"
+ /* C.2 (CQE 0) adjust flow mark. */
+ "add v12.4s, v12.4s, %[flow_mark_adj].4s \n\t"
+ /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */
+ "st1 {v12.2d}, [%[e0]] \n\t"
+ :[op_own]"=&w"(op_own),
+ [byte_cnt]"=&w"(byte_cnt),
+ [ptype_info]"=&w"(ptype_info),
+ [flow_tag]"=&w"(flow_tag)
+ :[p3]"r"(p3), [p2]"r"(p2), [p1]"r"(p1), [p0]"r"(p0),
+ [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0),
+ [c3]"w"(c3), [c2]"w"(c2), [c1]"w"(c1), [c0]"w"(c0),
+ [elts_p]"r"(elts_p),
+ [pkts_p]"r"(pkts_p),
+ [cqe_shuf_m]"w"(cqe_shuf_m),
+ [mb_shuf_m]"w"(mb_shuf_m),
+ [owner_shuf_m]"w"(owner_shuf_m),
+ [len_shuf_m]"w"(len_shuf_m),
+ [ptype_shuf_m]"w"(ptype_shuf_m),
+ [ftag_shuf_m]"w"(ftag_shuf_m),
+ [crc_adj]"w"(crc_adj),
+ [flow_mark_adj]"w"(flow_mark_adj)
+ :"memory",
+ "v12", "v13", "v14", "v15",
+ "v16", "v17", "v18", "v19",
+ "v20", "v21", "v22", "v23",
+ "v24", "v25");
+ /* D.2 flip owner bit to mark CQEs from last round. */
+ owner_mask = vand_u16(op_own, owner_check);
+ owner_mask = vceq_u16(owner_mask, ownership);
+ /* D.3 get mask for invalidated CQEs. */
+ opcode = vand_u16(op_own, opcode_check);
+ invalid_mask = vceq_u16(opcode_check, opcode);
+ /* E.1 find compressed CQE format. */
+ comp_mask = vand_u16(op_own, format_check);
+ comp_mask = vceq_u16(comp_mask, format_check);
+ /* D.4 mask out beyond boundary. */
+ invalid_mask = vorr_u16(invalid_mask, mask);
+ /* D.5 merge invalid_mask with invalid owner. */
+ invalid_mask = vorr_u16(invalid_mask, owner_mask);
+ /* E.2 mask out invalid entries. */
+ comp_mask = vbic_u16(comp_mask, invalid_mask);
+ /* E.3 get the first compressed CQE. */
+ comp_idx = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16(
+ comp_mask), 0)) /
+ (sizeof(uint16_t) * 8);
+ /* D.6 mask out entries after the compressed CQE. */
+ mask = vcreate_u16(comp_idx < MLX5_VPMD_DESCS_PER_LOOP ?
+ -1UL >> (comp_idx * sizeof(uint16_t) * 8) :
+ 0);
+ invalid_mask = vorr_u16(invalid_mask, mask);
+ /* D.7 count non-compressed valid CQEs. */
+ n = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16(
+ invalid_mask), 0)) / (sizeof(uint16_t) * 8);
+ nocmp_n += n;
+ /* D.2 get the final invalid mask. */
+ mask = vcreate_u16(n < MLX5_VPMD_DESCS_PER_LOOP ?
+ -1UL >> (n * sizeof(uint16_t) * 8) : 0);
+ invalid_mask = vorr_u16(invalid_mask, mask);
+ /* D.3 check error in opcode. */
+ opcode = vceq_u16(resp_err_check, opcode);
+ opcode = vbic_u16(opcode, invalid_mask);
+ /* D.4 mark if any error is set */
+ *err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0);
+ /* C.4 fill in mbuf - rearm_data and packet_type. */
+ rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
+ opcode, &elts[pos]);
+ if (rxq->hw_timestamp) {
+ elts[pos]->timestamp =
+ rte_be_to_cpu_64(
+ container_of(p0, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 1]->timestamp =
+ rte_be_to_cpu_64(
+ container_of(p1, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 2]->timestamp =
+ rte_be_to_cpu_64(
+ container_of(p2, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 3]->timestamp =
+ rte_be_to_cpu_64(
+ container_of(p3, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Add up received bytes count. */
+ byte_cnt = vbic_u16(byte_cnt, invalid_mask);
+ rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0);
+#endif
+ /*
+ * Break the loop unless more valid CQE is expected, or if
+ * there's a compressed CQE.
+ */
+ if (n != MLX5_VPMD_DESCS_PER_LOOP)
+ break;
+ }
+ /* If no new CQE seen, return without updating cq_db. */
+ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
+ return rcvd_pkt;
+ /* Update the consumer indexes for non-compressed CQEs. */
+ assert(nocmp_n <= pkts_n);
+ rxq->cq_ci += nocmp_n;
+ rxq->rq_pi += nocmp_n;
+ rcvd_pkt += nocmp_n;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ rxq->stats.ipackets += nocmp_n;
+ rxq->stats.ibytes += rcvd_byte;
+#endif
+ /* Decompress the last CQE if compressed. */
+ if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
+ assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+ rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
+ /* Return more packets if needed. */
+ if (nocmp_n < pkts_n) {
+ uint16_t n = rxq->cq_ci - rxq->rq_pi;
+
+ n = RTE_MIN(n, pkts_n - nocmp_n);
+ rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
+ rxq->rq_pi += n;
+ rcvd_pkt += n;
+ }
+ }
+ rte_compiler_barrier();
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ return rcvd_pkt;
+}
+
+#endif /* RTE_PMD_MLX5_RXTX_VEC_NEON_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
new file mode 100644
index 00000000..54b3783c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -0,0 +1,969 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
+#define RTE_PMD_MLX5_RXTX_VEC_SSE_H_
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <smmintrin.h>
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_rxtx_vec.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+/**
+ * Fill in buffer descriptors in a multi-packet send descriptor.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param dseg
+ * Pointer to buffer descriptor to be written.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param n
+ * Number of packets to be filled.
+ */
+static inline void
+txq_wr_dseg_v(struct mlx5_txq_data *txq, __m128i *dseg,
+ struct rte_mbuf **pkts, unsigned int n)
+{
+ unsigned int pos;
+ uintptr_t addr;
+ const __m128i shuf_mask_dseg =
+ _mm_set_epi8(8, 9, 10, 11, /* addr, bswap64 */
+ 12, 13, 14, 15,
+ 7, 6, 5, 4, /* lkey */
+ 0, 1, 2, 3 /* length, bswap32 */);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t tx_byte = 0;
+#endif
+
+ for (pos = 0; pos < n; ++pos, ++dseg) {
+ __m128i desc;
+ struct rte_mbuf *pkt = pkts[pos];
+
+ addr = rte_pktmbuf_mtod(pkt, uintptr_t);
+ desc = _mm_set_epi32(addr >> 32,
+ addr,
+ mlx5_tx_mb2mr(txq, pkt),
+ DATA_LEN(pkt));
+ desc = _mm_shuffle_epi8(desc, shuf_mask_dseg);
+ _mm_store_si128(dseg, desc);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ tx_byte += DATA_LEN(pkt);
+#endif
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.obytes += tx_byte;
+#endif
+}
+
+/**
+ * Send multi-segmented packets until it encounters a single segment packet in
+ * the pkts list.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param pkts_n
+ * Number of packets to be sent.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+static uint16_t
+txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const uint16_t wq_n = 1 << txq->wqe_n;
+ const uint16_t wq_mask = wq_n - 1;
+ const unsigned int nb_dword_per_wqebb =
+ MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
+ const unsigned int nb_dword_in_hdr =
+ sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
+ unsigned int n;
+ volatile struct mlx5_wqe *wqe = NULL;
+
+ assert(elts_n > pkts_n);
+ mlx5_tx_complete(txq);
+ if (unlikely(!pkts_n))
+ return 0;
+ for (n = 0; n < pkts_n; ++n) {
+ struct rte_mbuf *buf = pkts[n];
+ unsigned int segs_n = buf->nb_segs;
+ unsigned int ds = nb_dword_in_hdr;
+ unsigned int len = PKT_LEN(buf);
+ uint16_t wqe_ci = txq->wqe_ci;
+ const __m128i shuf_mask_ctrl =
+ _mm_set_epi8(15, 14, 13, 12,
+ 8, 9, 10, 11, /* bswap32 */
+ 4, 5, 6, 7, /* bswap32 */
+ 0, 1, 2, 3 /* bswap32 */);
+ uint8_t cs_flags;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ __m128i *t_wqe, *dseg;
+ __m128i ctrl;
+
+ assert(segs_n);
+ max_elts = elts_n - (elts_head - txq->elts_tail);
+ max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
+ /*
+ * A MPW session consumes 2 WQEs at most to
+ * include MLX5_MPW_DSEG_MAX pointers.
+ */
+ if (segs_n == 1 ||
+ max_elts < segs_n || max_wqe < 2)
+ break;
+ if (segs_n > MLX5_MPW_DSEG_MAX) {
+ txq->stats.oerrors++;
+ break;
+ }
+ wqe = &((volatile struct mlx5_wqe64 *)
+ txq->wqes)[wqe_ci & wq_mask].hdr;
+ cs_flags = txq_ol_cksum_to_cs(buf);
+ /* Title WQEBB pointer. */
+ t_wqe = (__m128i *)wqe;
+ dseg = (__m128i *)(wqe + 1);
+ do {
+ if (!(ds++ % nb_dword_per_wqebb)) {
+ dseg = (__m128i *)
+ &((volatile struct mlx5_wqe64 *)
+ txq->wqes)[++wqe_ci & wq_mask];
+ }
+ txq_wr_dseg_v(txq, dseg++, &buf, 1);
+ (*txq->elts)[elts_head++ & elts_m] = buf;
+ buf = buf->next;
+ } while (--segs_n);
+ ++wqe_ci;
+ /* Fill CTRL in the header. */
+ ctrl = _mm_set_epi32(0, 0, txq->qp_num_8s | ds,
+ MLX5_OPC_MOD_MPW << 24 |
+ txq->wqe_ci << 8 | MLX5_OPCODE_TSO);
+ ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
+ _mm_store_si128(t_wqe, ctrl);
+ /* Fill ESEG in the header. */
+ _mm_store_si128(t_wqe + 1,
+ _mm_set_epi16(0, 0, 0, 0,
+ rte_cpu_to_be_16(len), cs_flags,
+ 0, 0));
+ txq->wqe_ci = wqe_ci;
+ }
+ if (!n)
+ return 0;
+ txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
+ txq->elts_head = elts_head;
+ if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
+ wqe->ctrl[3] = txq->elts_head;
+ txq->elts_comp = 0;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.opackets += n;
+#endif
+ mlx5_tx_dbrec(txq, wqe);
+ return n;
+}
+
+/**
+ * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
+ * it returns to make it processed by txq_scatter_v(). All the packets in
+ * the pkts list should be single segment packets having same offload flags.
+ * This must be checked by txq_count_contig_single_seg() and txq_calc_offload().
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param pkts_n
+ * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
+ * @param cs_flags
+ * Checksum offload flags to be written in the descriptor.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+static inline uint16_t
+txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
+ uint8_t cs_flags)
+{
+ struct rte_mbuf **elts;
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const unsigned int nb_dword_per_wqebb =
+ MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
+ const unsigned int nb_dword_in_hdr =
+ sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
+ unsigned int n = 0;
+ unsigned int pos;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ uint32_t comp_req = 0;
+ const uint16_t wq_n = 1 << txq->wqe_n;
+ const uint16_t wq_mask = wq_n - 1;
+ uint16_t wq_idx = txq->wqe_ci & wq_mask;
+ volatile struct mlx5_wqe64 *wq =
+ &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
+ volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
+ const __m128i shuf_mask_ctrl =
+ _mm_set_epi8(15, 14, 13, 12,
+ 8, 9, 10, 11, /* bswap32 */
+ 4, 5, 6, 7, /* bswap32 */
+ 0, 1, 2, 3 /* bswap32 */);
+ __m128i *t_wqe, *dseg;
+ __m128i ctrl;
+
+ /* Make sure all packets can fit into a single WQE. */
+ assert(elts_n > pkts_n);
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
+ assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
+ if (unlikely(!pkts_n))
+ return 0;
+ elts = &(*txq->elts)[elts_head & elts_m];
+ /* Loop for available tailroom first. */
+ n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
+ for (pos = 0; pos < (n & -2); pos += 2)
+ _mm_storeu_si128((__m128i *)&elts[pos],
+ _mm_loadu_si128((__m128i *)&pkts[pos]));
+ if (n & 1)
+ elts[pos] = pkts[pos];
+ /* Check if it crosses the end of the queue. */
+ if (unlikely(n < pkts_n)) {
+ elts = &(*txq->elts)[0];
+ for (pos = 0; pos < pkts_n - n; ++pos)
+ elts[pos] = pkts[n + pos];
+ }
+ txq->elts_head += pkts_n;
+ /* Save title WQEBB pointer. */
+ t_wqe = (__m128i *)wqe;
+ dseg = (__m128i *)(wqe + 1);
+ /* Calculate the number of entries to the end. */
+ n = RTE_MIN(
+ (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
+ pkts_n);
+ /* Fill DSEGs. */
+ txq_wr_dseg_v(txq, dseg, pkts, n);
+ /* Check if it crosses the end of the queue. */
+ if (n < pkts_n) {
+ dseg = (__m128i *)txq->wqes;
+ txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
+ }
+ if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
+ txq->elts_comp += pkts_n;
+ } else {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
+ /* Request a completion. */
+ txq->elts_comp = 0;
+ comp_req = 8;
+ }
+ /* Fill CTRL in the header. */
+ ctrl = _mm_set_epi32(txq->elts_head, comp_req,
+ txq->qp_num_8s | (pkts_n + 2),
+ MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
+ txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW);
+ ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
+ _mm_store_si128(t_wqe, ctrl);
+ /* Fill ESEG in the header. */
+ _mm_store_si128(t_wqe + 1,
+ _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, cs_flags,
+ 0, 0, 0, 0));
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.opackets += pkts_n;
+#endif
+ txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
+ nb_dword_per_wqebb;
+ /* Ring QP doorbell. */
+ mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST);
+ return pkts_n;
+}
+
+/**
+ * Store free buffers to RX SW ring.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be stored.
+ * @param pkts_n
+ * Number of packets to be stored.
+ */
+static inline void
+rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
+{
+ const uint16_t q_mask = (1 << rxq->elts_n) - 1;
+ struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
+ unsigned int pos;
+ uint16_t p = n & -2;
+
+ for (pos = 0; pos < p; pos += 2) {
+ __m128i mbp;
+
+ mbp = _mm_loadu_si128((__m128i *)&elts[pos]);
+ _mm_storeu_si128((__m128i *)&pkts[pos], mbp);
+ }
+ if (n & 1)
+ pkts[pos] = elts[pos];
+}
+
+/**
+ * Decompress a compressed completion and fill in mbufs in RX SW ring with data
+ * extracted from the title completion descriptor.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param cq
+ * Pointer to completion array having a compressed completion at first.
+ * @param elts
+ * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
+ * the title completion descriptor to be copied to the rest of mbufs.
+ */
+static inline void
+rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ struct rte_mbuf **elts)
+{
+ volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1);
+ struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
+ unsigned int pos;
+ unsigned int i;
+ unsigned int inv = 0;
+ /* Mask to shuffle from extracted mini CQE to mbuf. */
+ const __m128i shuf_mask1 =
+ _mm_set_epi8(0, 1, 2, 3, /* rss, bswap32 */
+ -1, -1, /* skip vlan_tci */
+ 6, 7, /* data_len, bswap16 */
+ -1, -1, 6, 7, /* pkt_len, bswap16 */
+ -1, -1, -1, -1 /* skip packet_type */);
+ const __m128i shuf_mask2 =
+ _mm_set_epi8(8, 9, 10, 11, /* rss, bswap32 */
+ -1, -1, /* skip vlan_tci */
+ 14, 15, /* data_len, bswap16 */
+ -1, -1, 14, 15, /* pkt_len, bswap16 */
+ -1, -1, -1, -1 /* skip packet_type */);
+ /* Restore the compressed count. Must be 16 bits. */
+ const uint16_t mcqe_n = t_pkt->data_len +
+ (rxq->crc_present * ETHER_CRC_LEN);
+ const __m128i rearm =
+ _mm_loadu_si128((__m128i *)&t_pkt->rearm_data);
+ const __m128i rxdf =
+ _mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);
+ const __m128i crc_adj =
+ _mm_set_epi16(0, 0, 0,
+ rxq->crc_present * ETHER_CRC_LEN,
+ 0,
+ rxq->crc_present * ETHER_CRC_LEN,
+ 0, 0);
+ const uint32_t flow_tag = t_pkt->hash.fdir.hi;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i ones = _mm_cmpeq_epi32(zero, zero);
+ uint32_t rcvd_byte = 0;
+ /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
+ const __m128i len_shuf_mask =
+ _mm_set_epi8(-1, -1, -1, -1,
+ -1, -1, -1, -1,
+ 14, 15, 6, 7,
+ 10, 11, 2, 3);
+#endif
+
+ /*
+ * A. load mCQEs into a 128bit register.
+ * B. store rearm data to mbuf.
+ * C. combine data from mCQEs with rx_descriptor_fields1.
+ * D. store rx_descriptor_fields1.
+ * E. store flow tag (rte_flow mark).
+ */
+ for (pos = 0; pos < mcqe_n; ) {
+ __m128i mcqe1, mcqe2;
+ __m128i rxdf1, rxdf2;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ __m128i byte_cnt, invalid_mask;
+#endif
+
+ if (!(pos & 0x7) && pos + 8 < mcqe_n)
+ rte_prefetch0((void *)(cq + pos + 8));
+ /* A.1 load mCQEs into a 128bit register. */
+ mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
+ mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
+ /* B.1 store rearm data to mbuf. */
+ _mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm);
+ _mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm);
+ /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1);
+ rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2);
+ rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
+ rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
+ rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
+ rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
+ /* D.1 store rx_descriptor_fields1. */
+ _mm_storeu_si128((__m128i *)
+ &elts[pos]->rx_descriptor_fields1,
+ rxdf1);
+ _mm_storeu_si128((__m128i *)
+ &elts[pos + 1]->rx_descriptor_fields1,
+ rxdf2);
+ /* B.1 store rearm data to mbuf. */
+ _mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm);
+ _mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm);
+ /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1);
+ rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2);
+ rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
+ rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
+ rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
+ rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
+ /* D.1 store rx_descriptor_fields1. */
+ _mm_storeu_si128((__m128i *)
+ &elts[pos + 2]->rx_descriptor_fields1,
+ rxdf1);
+ _mm_storeu_si128((__m128i *)
+ &elts[pos + 3]->rx_descriptor_fields1,
+ rxdf2);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ invalid_mask = _mm_set_epi64x(0,
+ (mcqe_n - pos) *
+ sizeof(uint16_t) * 8);
+ invalid_mask = _mm_sll_epi64(ones, invalid_mask);
+ mcqe1 = _mm_srli_si128(mcqe1, 4);
+ byte_cnt = _mm_blend_epi16(mcqe1, mcqe2, 0xcc);
+ byte_cnt = _mm_shuffle_epi8(byte_cnt, len_shuf_mask);
+ byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
+ byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
+ rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
+#endif
+ if (rxq->mark) {
+ /* E.1 store flow tag (rte_flow mark). */
+ elts[pos]->hash.fdir.hi = flow_tag;
+ elts[pos + 1]->hash.fdir.hi = flow_tag;
+ elts[pos + 2]->hash.fdir.hi = flow_tag;
+ elts[pos + 3]->hash.fdir.hi = flow_tag;
+ }
+ pos += MLX5_VPMD_DESCS_PER_LOOP;
+ /* Move to next CQE and invalidate consumed CQEs. */
+ if (!(pos & 0x7) && pos < mcqe_n) {
+ mcq = (void *)(cq + pos);
+ for (i = 0; i < 8; ++i)
+ cq[inv++].op_own = MLX5_CQE_INVALIDATE;
+ }
+ }
+ /* Invalidate the rest of CQEs. */
+ for (; inv < mcqe_n; ++inv)
+ cq[inv].op_own = MLX5_CQE_INVALIDATE;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ rxq->stats.ipackets += mcqe_n;
+ rxq->stats.ibytes += rcvd_byte;
+#endif
+ rxq->cq_ci += mcqe_n;
+}
+
+/**
+ * Calculate packet type and offload flag for mbuf and store it.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param cqes[4]
+ * Array of four 16bytes completions extracted from the original completion
+ * descriptor.
+ * @param op_err
+ * Opcode vector having responder error status. Each field is 4B.
+ * @param pkts
+ * Pointer to array of packets to be filled.
+ */
+static inline void
+rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
+ __m128i op_err, struct rte_mbuf **pkts)
+{
+ __m128i pinfo0, pinfo1;
+ __m128i pinfo, ptype;
+ __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH |
+ rxq->hw_timestamp * PKT_RX_TIMESTAMP);
+ __m128i cv_flags;
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i ptype_mask =
+ _mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06);
+ const __m128i ptype_ol_mask =
+ _mm_set_epi32(0x106, 0x106, 0x106, 0x106);
+ const __m128i pinfo_mask =
+ _mm_set_epi32(0x3, 0x3, 0x3, 0x3);
+ const __m128i cv_flag_sel =
+ _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
+ (uint8_t)((PKT_RX_IP_CKSUM_GOOD |
+ PKT_RX_L4_CKSUM_GOOD) >> 1),
+ 0,
+ (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
+ 0,
+ (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
+ (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
+ 0);
+ const __m128i cv_mask =
+ _mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ const __m128i mbuf_init =
+ _mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
+ __m128i rearm0, rearm1, rearm2, rearm3;
+ uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
+
+ /* Extract pkt_info field. */
+ pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
+ pinfo1 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
+ pinfo = _mm_unpacklo_epi64(pinfo0, pinfo1);
+ /* Extract hdr_type_etc field. */
+ pinfo0 = _mm_unpackhi_epi32(cqes[0], cqes[1]);
+ pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]);
+ ptype = _mm_unpacklo_epi64(pinfo0, pinfo1);
+ if (rxq->mark) {
+ const __m128i pinfo_ft_mask =
+ _mm_set_epi32(0xffffff00, 0xffffff00,
+ 0xffffff00, 0xffffff00);
+ const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
+ __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
+ __m128i flow_tag, invalid_mask;
+
+ flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask);
+ /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
+ invalid_mask = _mm_cmpeq_epi32(flow_tag, zero);
+ ol_flags = _mm_or_si128(ol_flags,
+ _mm_andnot_si128(invalid_mask,
+ fdir_flags));
+ /* Mask out invalid entries. */
+ fdir_id_flags = _mm_andnot_si128(invalid_mask, fdir_id_flags);
+ /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
+ ol_flags = _mm_or_si128(ol_flags,
+ _mm_andnot_si128(
+ _mm_cmpeq_epi32(flow_tag,
+ pinfo_ft_mask),
+ fdir_id_flags));
+ }
+ /*
+ * Merge the two fields to generate the following:
+ * bit[1] = l3_ok
+ * bit[2] = l4_ok
+ * bit[8] = cv
+ * bit[11:10] = l3_hdr_type
+ * bit[14:12] = l4_hdr_type
+ * bit[15] = ip_frag
+ * bit[16] = tunneled
+ * bit[17] = outer_l3_type
+ */
+ ptype = _mm_and_si128(ptype, ptype_mask);
+ pinfo = _mm_and_si128(pinfo, pinfo_mask);
+ pinfo = _mm_slli_epi32(pinfo, 16);
+ /* Make pinfo has merged fields for ol_flags calculation. */
+ pinfo = _mm_or_si128(ptype, pinfo);
+ ptype = _mm_srli_epi32(pinfo, 10);
+ ptype = _mm_packs_epi32(ptype, zero);
+ /* Errored packets will have RTE_PTYPE_ALL_MASK. */
+ op_err = _mm_srli_epi16(op_err, 8);
+ ptype = _mm_or_si128(ptype, op_err);
+ pt_idx0 = _mm_extract_epi8(ptype, 0);
+ pt_idx1 = _mm_extract_epi8(ptype, 2);
+ pt_idx2 = _mm_extract_epi8(ptype, 4);
+ pt_idx3 = _mm_extract_epi8(ptype, 6);
+ pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
+ !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
+ pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
+ !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
+ pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
+ !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
+ pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
+ !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
+ /* Fill flags for checksum and VLAN. */
+ pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
+ pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
+ /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
+ cv_flags = _mm_slli_epi32(pinfo, 9);
+ cv_flags = _mm_or_si128(pinfo, cv_flags);
+ /* Move back flags to start from byte[0]. */
+ cv_flags = _mm_srli_epi32(cv_flags, 8);
+ /* Mask out garbage bits. */
+ cv_flags = _mm_and_si128(cv_flags, cv_mask);
+ /* Merge to ol_flags. */
+ ol_flags = _mm_or_si128(ol_flags, cv_flags);
+ /* Merge mbuf_init and ol_flags. */
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30);
+ rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30);
+ /* Write 8B rearm_data and 8B ol_flags. */
+ _mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3);
+}
+
+/**
+ * Receive burst of packets. An errored completion also consumes a mbuf, but the
+ * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
+ * before returning to application.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ * @param[out] err
+ * Pointer to a flag. Set non-zero value if pkts array has at least one error
+ * packet to handle.
+ *
+ * @return
+ * Number of packets received including errors (<= pkts_n).
+ */
+static inline uint16_t
+rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
+ uint64_t *err)
+{
+ const uint16_t q_n = 1 << rxq->cqe_n;
+ const uint16_t q_mask = q_n - 1;
+ volatile struct mlx5_cqe *cq;
+ struct rte_mbuf **elts;
+ unsigned int pos;
+ uint64_t n;
+ uint16_t repl_n;
+ uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
+ uint16_t nocmp_n = 0;
+ uint16_t rcvd_pkt = 0;
+ unsigned int cq_idx = rxq->cq_ci & q_mask;
+ unsigned int elts_idx;
+ unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
+ const __m128i owner_check =
+ _mm_set_epi64x(0x0100000001000000LL, 0x0100000001000000LL);
+ const __m128i opcode_check =
+ _mm_set_epi64x(0xf0000000f0000000LL, 0xf0000000f0000000LL);
+ const __m128i format_check =
+ _mm_set_epi64x(0x0c0000000c000000LL, 0x0c0000000c000000LL);
+ const __m128i resp_err_check =
+ _mm_set_epi64x(0xe0000000e0000000LL, 0xe0000000e0000000LL);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t rcvd_byte = 0;
+ /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
+ const __m128i len_shuf_mask =
+ _mm_set_epi8(-1, -1, -1, -1,
+ -1, -1, -1, -1,
+ 12, 13, 8, 9,
+ 4, 5, 0, 1);
+#endif
+ /* Mask to shuffle from extracted CQE to mbuf. */
+ const __m128i shuf_mask =
+ _mm_set_epi8(-1, 3, 2, 1, /* fdir.hi */
+ 12, 13, 14, 15, /* rss, bswap32 */
+ 10, 11, /* vlan_tci, bswap16 */
+ 4, 5, /* data_len, bswap16 */
+ -1, -1, /* zero out 2nd half of pkt_len */
+ 4, 5 /* pkt_len, bswap16 */);
+ /* Mask to blend from the last Qword to the first DQword. */
+ const __m128i blend_mask =
+ _mm_set_epi8(-1, -1, -1, -1,
+ -1, -1, -1, -1,
+ 0, 0, 0, 0,
+ 0, 0, 0, -1);
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i ones = _mm_cmpeq_epi32(zero, zero);
+ const __m128i crc_adj =
+ _mm_set_epi16(0, 0, 0, 0, 0,
+ rxq->crc_present * ETHER_CRC_LEN,
+ 0,
+ rxq->crc_present * ETHER_CRC_LEN);
+ const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
+
+ assert(rxq->sges_n == 0);
+ assert(rxq->cqe_n == rxq->elts_n);
+ cq = &(*rxq->cqes)[cq_idx];
+ rte_prefetch0(cq);
+ rte_prefetch0(cq + 1);
+ rte_prefetch0(cq + 2);
+ rte_prefetch0(cq + 3);
+ pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
+ /*
+ * Order of indexes:
+ * rq_ci >= cq_ci >= rq_pi
+ * Definition of indexes:
+ * rq_ci - cq_ci := # of buffers owned by HW (posted).
+ * cq_ci - rq_pi := # of buffers not returned to app (decompressed).
+ * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
+ */
+ repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
+ mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
+ /* See if there're unreturned mbufs from compressed CQE. */
+ rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
+ if (rcvd_pkt > 0) {
+ rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
+ rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
+ rxq->rq_pi += rcvd_pkt;
+ pkts += rcvd_pkt;
+ }
+ elts_idx = rxq->rq_pi & q_mask;
+ elts = &(*rxq->elts)[elts_idx];
+ /* Not to overflow pkts array. */
+ pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to cross queue end. */
+ pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
+ if (!pkts_n)
+ return rcvd_pkt;
+ /* At this point, there shouldn't be any remained packets. */
+ assert(rxq->rq_pi == rxq->cq_ci);
+ /*
+ * A. load first Qword (8bytes) in one loop.
+ * B. copy 4 mbuf pointers from elts ring to returing pkts.
+ * C. load remained CQE data and extract necessary fields.
+ * Final 16bytes cqes[] extracted from original 64bytes CQE has the
+ * following structure:
+ * struct {
+ * uint8_t pkt_info;
+ * uint8_t flow_tag[3];
+ * uint16_t byte_cnt;
+ * uint8_t rsvd4;
+ * uint8_t op_own;
+ * uint16_t hdr_type_etc;
+ * uint16_t vlan_info;
+ * uint32_t rx_has_res;
+ * } c;
+ * D. fill in mbuf.
+ * E. get valid CQEs.
+ * F. find compressed CQE.
+ */
+ for (pos = 0;
+ pos < pkts_n;
+ pos += MLX5_VPMD_DESCS_PER_LOOP) {
+ __m128i cqes[MLX5_VPMD_DESCS_PER_LOOP];
+ __m128i cqe_tmp1, cqe_tmp2;
+ __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
+ __m128i op_own, op_own_tmp1, op_own_tmp2;
+ __m128i opcode, owner_mask, invalid_mask;
+ __m128i comp_mask;
+ __m128i mask;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ __m128i byte_cnt;
+#endif
+ __m128i mbp1, mbp2;
+ __m128i p = _mm_set_epi16(0, 0, 0, 0, 3, 2, 1, 0);
+ unsigned int p1, p2, p3;
+
+ /* Prefetch next 4 CQEs. */
+ if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
+ rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
+ rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
+ rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
+ rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
+ }
+ /* A.0 do not cross the end of CQ. */
+ mask = _mm_set_epi64x(0, (pkts_n - pos) * sizeof(uint16_t) * 8);
+ mask = _mm_sll_epi64(ones, mask);
+ p = _mm_andnot_si128(mask, p);
+ /* A.1 load cqes. */
+ p3 = _mm_extract_epi16(p, 3);
+ cqes[3] = _mm_loadl_epi64((__m128i *)
+ &cq[pos + p3].sop_drop_qpn);
+ rte_compiler_barrier();
+ p2 = _mm_extract_epi16(p, 2);
+ cqes[2] = _mm_loadl_epi64((__m128i *)
+ &cq[pos + p2].sop_drop_qpn);
+ rte_compiler_barrier();
+ /* B.1 load mbuf pointers. */
+ mbp1 = _mm_loadu_si128((__m128i *)&elts[pos]);
+ mbp2 = _mm_loadu_si128((__m128i *)&elts[pos + 2]);
+ /* A.1 load a block having op_own. */
+ p1 = _mm_extract_epi16(p, 1);
+ cqes[1] = _mm_loadl_epi64((__m128i *)
+ &cq[pos + p1].sop_drop_qpn);
+ rte_compiler_barrier();
+ cqes[0] = _mm_loadl_epi64((__m128i *)
+ &cq[pos].sop_drop_qpn);
+ /* B.2 copy mbuf pointers. */
+ _mm_storeu_si128((__m128i *)&pkts[pos], mbp1);
+ _mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);
+ rte_cio_rmb();
+ /* C.1 load remained CQE data and extract necessary fields. */
+ cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);
+ cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
+ cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask);
+ cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask);
+ cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].rsvd1[3]);
+ cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].rsvd1[3]);
+ cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
+ cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
+ cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd2[10]);
+ cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd2[10]);
+ cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
+ cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
+ /* C.2 generate final structure for mbuf with swapping bytes. */
+ pkt_mb3 = _mm_shuffle_epi8(cqes[3], shuf_mask);
+ pkt_mb2 = _mm_shuffle_epi8(cqes[2], shuf_mask);
+ /* C.3 adjust CRC length. */
+ pkt_mb3 = _mm_sub_epi16(pkt_mb3, crc_adj);
+ pkt_mb2 = _mm_sub_epi16(pkt_mb2, crc_adj);
+ /* C.4 adjust flow mark. */
+ pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj);
+ pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj);
+ /* D.1 fill in mbuf - rx_descriptor_fields1. */
+ _mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3);
+ _mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2);
+ /* E.1 extract op_own field. */
+ op_own_tmp2 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
+ /* C.1 load remained CQE data and extract necessary fields. */
+ cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p1]);
+ cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]);
+ cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask);
+ cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask);
+ cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].rsvd1[3]);
+ cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].rsvd1[3]);
+ cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
+ cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
+ cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd2[10]);
+ cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd2[10]);
+ cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
+ cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
+ /* C.2 generate final structure for mbuf with swapping bytes. */
+ pkt_mb1 = _mm_shuffle_epi8(cqes[1], shuf_mask);
+ pkt_mb0 = _mm_shuffle_epi8(cqes[0], shuf_mask);
+ /* C.3 adjust CRC length. */
+ pkt_mb1 = _mm_sub_epi16(pkt_mb1, crc_adj);
+ pkt_mb0 = _mm_sub_epi16(pkt_mb0, crc_adj);
+ /* C.4 adjust flow mark. */
+ pkt_mb1 = _mm_add_epi32(pkt_mb1, flow_mark_adj);
+ pkt_mb0 = _mm_add_epi32(pkt_mb0, flow_mark_adj);
+ /* E.1 extract op_own byte. */
+ op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
+ op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2);
+ /* D.1 fill in mbuf - rx_descriptor_fields1. */
+ _mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1);
+ _mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0);
+ /* E.2 flip owner bit to mark CQEs from last round. */
+ owner_mask = _mm_and_si128(op_own, owner_check);
+ if (ownership)
+ owner_mask = _mm_xor_si128(owner_mask, owner_check);
+ owner_mask = _mm_cmpeq_epi32(owner_mask, owner_check);
+ owner_mask = _mm_packs_epi32(owner_mask, zero);
+ /* E.3 get mask for invalidated CQEs. */
+ opcode = _mm_and_si128(op_own, opcode_check);
+ invalid_mask = _mm_cmpeq_epi32(opcode_check, opcode);
+ invalid_mask = _mm_packs_epi32(invalid_mask, zero);
+ /* E.4 mask out beyond boundary. */
+ invalid_mask = _mm_or_si128(invalid_mask, mask);
+ /* E.5 merge invalid_mask with invalid owner. */
+ invalid_mask = _mm_or_si128(invalid_mask, owner_mask);
+ /* F.1 find compressed CQE format. */
+ comp_mask = _mm_and_si128(op_own, format_check);
+ comp_mask = _mm_cmpeq_epi32(comp_mask, format_check);
+ comp_mask = _mm_packs_epi32(comp_mask, zero);
+ /* F.2 mask out invalid entries. */
+ comp_mask = _mm_andnot_si128(invalid_mask, comp_mask);
+ comp_idx = _mm_cvtsi128_si64(comp_mask);
+ /* F.3 get the first compressed CQE. */
+ comp_idx = comp_idx ?
+ __builtin_ctzll(comp_idx) /
+ (sizeof(uint16_t) * 8) :
+ MLX5_VPMD_DESCS_PER_LOOP;
+ /* E.6 mask out entries after the compressed CQE. */
+ mask = _mm_set_epi64x(0, comp_idx * sizeof(uint16_t) * 8);
+ mask = _mm_sll_epi64(ones, mask);
+ invalid_mask = _mm_or_si128(invalid_mask, mask);
+ /* E.7 count non-compressed valid CQEs. */
+ n = _mm_cvtsi128_si64(invalid_mask);
+ n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
+ MLX5_VPMD_DESCS_PER_LOOP;
+ nocmp_n += n;
+ /* D.2 get the final invalid mask. */
+ mask = _mm_set_epi64x(0, n * sizeof(uint16_t) * 8);
+ mask = _mm_sll_epi64(ones, mask);
+ invalid_mask = _mm_or_si128(invalid_mask, mask);
+ /* D.3 check error in opcode. */
+ opcode = _mm_cmpeq_epi32(resp_err_check, opcode);
+ opcode = _mm_packs_epi32(opcode, zero);
+ opcode = _mm_andnot_si128(invalid_mask, opcode);
+ /* D.4 mark if any error is set */
+ *err |= _mm_cvtsi128_si64(opcode);
+ /* D.5 fill in mbuf - rearm_data and packet_type. */
+ rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
+ if (rxq->hw_timestamp) {
+ pkts[pos]->timestamp =
+ rte_be_to_cpu_64(cq[pos].timestamp);
+ pkts[pos + 1]->timestamp =
+ rte_be_to_cpu_64(cq[pos + p1].timestamp);
+ pkts[pos + 2]->timestamp =
+ rte_be_to_cpu_64(cq[pos + p2].timestamp);
+ pkts[pos + 3]->timestamp =
+ rte_be_to_cpu_64(cq[pos + p3].timestamp);
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Add up received bytes count. */
+ byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);
+ byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
+ byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
+ rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
+#endif
+ /*
+ * Break the loop unless more valid CQE is expected, or if
+ * there's a compressed CQE.
+ */
+ if (n != MLX5_VPMD_DESCS_PER_LOOP)
+ break;
+ }
+ /* If no new CQE seen, return without updating cq_db. */
+ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
+ return rcvd_pkt;
+ /* Update the consumer indexes for non-compressed CQEs. */
+ assert(nocmp_n <= pkts_n);
+ rxq->cq_ci += nocmp_n;
+ rxq->rq_pi += nocmp_n;
+ rcvd_pkt += nocmp_n;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ rxq->stats.ipackets += nocmp_n;
+ rxq->stats.ibytes += rcvd_byte;
+#endif
+ /* Decompress the last CQE if compressed. */
+ if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
+ assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+ rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
+ /* Return more packets if needed. */
+ if (nocmp_n < pkts_n) {
+ uint16_t n = rxq->cq_ci - rxq->rq_pi;
+
+ n = RTE_MIN(n, pkts_n - nocmp_n);
+ rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
+ rxq->rq_pi += n;
+ rcvd_pkt += n;
+ }
+ }
+ rte_compiler_barrier();
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ return rcvd_pkt;
+}
+
+#endif /* RTE_PMD_MLX5_RXTX_VEC_SSE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_socket.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_socket.c
new file mode 100644
index 00000000..a3a52291
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_socket.c
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox Technologies, Ltd
+ */
+
+#define _GNU_SOURCE
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/stat.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+
+/**
+ * Initialise the socket to communicate with the secondary process
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_socket_init(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct sockaddr_un sun = {
+ .sun_family = AF_UNIX,
+ };
+ int ret;
+ int flags;
+
+ /*
+ * Close the last socket that was used to communicate
+ * with the secondary process
+ */
+ if (priv->primary_socket)
+ mlx5_socket_uninit(dev);
+ /*
+ * Initialise the socket to communicate with the secondary
+ * process.
+ */
+ ret = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (ret < 0) {
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u secondary process not supported: %s",
+ dev->data->port_id, strerror(errno));
+ goto error;
+ }
+ priv->primary_socket = ret;
+ flags = fcntl(priv->primary_socket, F_GETFL, 0);
+ if (flags == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ ret = fcntl(priv->primary_socket, F_SETFL, flags | O_NONBLOCK);
+ if (ret < 0) {
+ rte_errno = errno;
+ goto error;
+ }
+ snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d",
+ MLX5_DRIVER_NAME, priv->primary_socket);
+ remove(sun.sun_path);
+ ret = bind(priv->primary_socket, (const struct sockaddr *)&sun,
+ sizeof(sun));
+ if (ret < 0) {
+ rte_errno = errno;
+ DRV_LOG(WARNING,
+ "port %u cannot bind socket, secondary process not"
+ " supported: %s",
+ dev->data->port_id, strerror(errno));
+ goto close;
+ }
+ ret = listen(priv->primary_socket, 0);
+ if (ret < 0) {
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u secondary process not supported: %s",
+ dev->data->port_id, strerror(errno));
+ goto close;
+ }
+ return 0;
+close:
+ remove(sun.sun_path);
+error:
+ claim_zero(close(priv->primary_socket));
+ priv->primary_socket = 0;
+ return -rte_errno;
+}
+
+/**
+ * Un-Initialise the socket to communicate with the secondary process
+ *
+ * @param[in] dev
+ */
+void
+mlx5_socket_uninit(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ MKSTR(path, "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket);
+ claim_zero(close(priv->primary_socket));
+ priv->primary_socket = 0;
+ claim_zero(remove(path));
+}
+
+/**
+ * Handle socket interrupts.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_socket_handle(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int conn_sock;
+ int ret = 0;
+ struct cmsghdr *cmsg = NULL;
+ struct ucred *cred = NULL;
+ char buf[CMSG_SPACE(sizeof(struct ucred))] = { 0 };
+ char vbuf[1024] = { 0 };
+ struct iovec io = {
+ .iov_base = vbuf,
+ .iov_len = sizeof(*vbuf),
+ };
+ struct msghdr msg = {
+ .msg_iov = &io,
+ .msg_iovlen = 1,
+ .msg_control = buf,
+ .msg_controllen = sizeof(buf),
+ };
+ int *fd;
+
+ /* Accept the connection from the client. */
+ conn_sock = accept(priv->primary_socket, NULL, NULL);
+ if (conn_sock < 0) {
+ DRV_LOG(WARNING, "port %u connection failed: %s",
+ dev->data->port_id, strerror(errno));
+ return;
+ }
+ ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1},
+ sizeof(int));
+ if (ret < 0) {
+ ret = errno;
+ DRV_LOG(WARNING, "port %u cannot change socket options: %s",
+ dev->data->port_id, strerror(rte_errno));
+ goto error;
+ }
+ ret = recvmsg(conn_sock, &msg, MSG_WAITALL);
+ if (ret < 0) {
+ ret = errno;
+ DRV_LOG(WARNING, "port %u received an empty message: %s",
+ dev->data->port_id, strerror(rte_errno));
+ goto error;
+ }
+ /* Expect to receive credentials only. */
+ cmsg = CMSG_FIRSTHDR(&msg);
+ if (cmsg == NULL) {
+ DRV_LOG(WARNING, "port %u no message", dev->data->port_id);
+ goto error;
+ }
+ if ((cmsg->cmsg_type == SCM_CREDENTIALS) &&
+ (cmsg->cmsg_len >= sizeof(*cred))) {
+ cred = (struct ucred *)CMSG_DATA(cmsg);
+ assert(cred != NULL);
+ }
+ cmsg = CMSG_NXTHDR(&msg, cmsg);
+ if (cmsg != NULL) {
+ DRV_LOG(WARNING, "port %u message wrongly formatted",
+ dev->data->port_id);
+ goto error;
+ }
+ /* Make sure all the ancillary data was received and valid. */
+ if ((cred == NULL) || (cred->uid != getuid()) ||
+ (cred->gid != getgid())) {
+ DRV_LOG(WARNING, "port %u wrong credentials",
+ dev->data->port_id);
+ goto error;
+ }
+ /* Set-up the ancillary data. */
+ cmsg = CMSG_FIRSTHDR(&msg);
+ assert(cmsg != NULL);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(priv->ctx->cmd_fd));
+ fd = (int *)CMSG_DATA(cmsg);
+ *fd = priv->ctx->cmd_fd;
+ ret = sendmsg(conn_sock, &msg, 0);
+ if (ret < 0)
+ DRV_LOG(WARNING, "port %u cannot send response",
+ dev->data->port_id);
+error:
+ close(conn_sock);
+}
+
+/**
+ * Connect to the primary process.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet structure.
+ *
+ * @return
+ * fd on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_socket_connect(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct sockaddr_un sun = {
+ .sun_family = AF_UNIX,
+ };
+ int socket_fd = -1;
+ int *fd = NULL;
+ int ret;
+ struct ucred *cred;
+ char buf[CMSG_SPACE(sizeof(*cred))] = { 0 };
+ char vbuf[1024] = { 0 };
+ struct iovec io = {
+ .iov_base = vbuf,
+ .iov_len = sizeof(*vbuf),
+ };
+ struct msghdr msg = {
+ .msg_control = buf,
+ .msg_controllen = sizeof(buf),
+ .msg_iov = &io,
+ .msg_iovlen = 1,
+ };
+ struct cmsghdr *cmsg;
+
+ ret = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (ret < 0) {
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u cannot connect to primary",
+ dev->data->port_id);
+ goto error;
+ }
+ socket_fd = ret;
+ snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d",
+ MLX5_DRIVER_NAME, priv->primary_socket);
+ ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun));
+ if (ret < 0) {
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u cannot connect to primary",
+ dev->data->port_id);
+ goto error;
+ }
+ cmsg = CMSG_FIRSTHDR(&msg);
+ if (cmsg == NULL) {
+ rte_errno = EINVAL;
+ DRV_LOG(DEBUG, "port %u cannot get first message",
+ dev->data->port_id);
+ goto error;
+ }
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_CREDENTIALS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(*cred));
+ cred = (struct ucred *)CMSG_DATA(cmsg);
+ if (cred == NULL) {
+ rte_errno = EINVAL;
+ DRV_LOG(DEBUG, "port %u no credentials received",
+ dev->data->port_id);
+ goto error;
+ }
+ cred->pid = getpid();
+ cred->uid = getuid();
+ cred->gid = getgid();
+ ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT);
+ if (ret < 0) {
+ rte_errno = errno;
+ DRV_LOG(WARNING,
+ "port %u cannot send credentials to primary: %s",
+ dev->data->port_id, strerror(errno));
+ goto error;
+ }
+ ret = recvmsg(socket_fd, &msg, MSG_WAITALL);
+ if (ret <= 0) {
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u no message from primary: %s",
+ dev->data->port_id, strerror(errno));
+ goto error;
+ }
+ cmsg = CMSG_FIRSTHDR(&msg);
+ if (cmsg == NULL) {
+ rte_errno = EINVAL;
+ DRV_LOG(WARNING, "port %u no file descriptor received",
+ dev->data->port_id);
+ goto error;
+ }
+ fd = (int *)CMSG_DATA(cmsg);
+ if (*fd < 0) {
+ DRV_LOG(WARNING, "port %u no file descriptor received: %s",
+ dev->data->port_id, strerror(errno));
+ rte_errno = *fd;
+ goto error;
+ }
+ ret = *fd;
+ close(socket_fd);
+ return ret;
+error:
+ if (socket_fd != -1)
+ close(socket_fd);
+ return -rte_errno;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_stats.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_stats.c
new file mode 100644
index 00000000..91f3d474
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_stats.c
@@ -0,0 +1,494 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#include <inttypes.h>
+#include <linux/sockios.h>
+#include <linux/ethtool.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include "mlx5.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_defs.h"
+
+struct mlx5_counter_ctrl {
+ /* Name of the counter. */
+ char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE];
+ /* Name of the counter on the device table. */
+ char ctr_name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint32_t ib:1; /**< Nonzero for IB counters. */
+};
+
+static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
+ {
+ .dpdk_name = "rx_port_unicast_bytes",
+ .ctr_name = "rx_vport_unicast_bytes",
+ },
+ {
+ .dpdk_name = "rx_port_multicast_bytes",
+ .ctr_name = "rx_vport_multicast_bytes",
+ },
+ {
+ .dpdk_name = "rx_port_broadcast_bytes",
+ .ctr_name = "rx_vport_broadcast_bytes",
+ },
+ {
+ .dpdk_name = "rx_port_unicast_packets",
+ .ctr_name = "rx_vport_unicast_packets",
+ },
+ {
+ .dpdk_name = "rx_port_multicast_packets",
+ .ctr_name = "rx_vport_multicast_packets",
+ },
+ {
+ .dpdk_name = "rx_port_broadcast_packets",
+ .ctr_name = "rx_vport_broadcast_packets",
+ },
+ {
+ .dpdk_name = "tx_port_unicast_bytes",
+ .ctr_name = "tx_vport_unicast_bytes",
+ },
+ {
+ .dpdk_name = "tx_port_multicast_bytes",
+ .ctr_name = "tx_vport_multicast_bytes",
+ },
+ {
+ .dpdk_name = "tx_port_broadcast_bytes",
+ .ctr_name = "tx_vport_broadcast_bytes",
+ },
+ {
+ .dpdk_name = "tx_port_unicast_packets",
+ .ctr_name = "tx_vport_unicast_packets",
+ },
+ {
+ .dpdk_name = "tx_port_multicast_packets",
+ .ctr_name = "tx_vport_multicast_packets",
+ },
+ {
+ .dpdk_name = "tx_port_broadcast_packets",
+ .ctr_name = "tx_vport_broadcast_packets",
+ },
+ {
+ .dpdk_name = "rx_wqe_err",
+ .ctr_name = "rx_wqe_err",
+ },
+ {
+ .dpdk_name = "rx_crc_errors_phy",
+ .ctr_name = "rx_crc_errors_phy",
+ },
+ {
+ .dpdk_name = "rx_in_range_len_errors_phy",
+ .ctr_name = "rx_in_range_len_errors_phy",
+ },
+ {
+ .dpdk_name = "rx_symbol_err_phy",
+ .ctr_name = "rx_symbol_err_phy",
+ },
+ {
+ .dpdk_name = "tx_errors_phy",
+ .ctr_name = "tx_errors_phy",
+ },
+ {
+ .dpdk_name = "rx_out_of_buffer",
+ .ctr_name = "out_of_buffer",
+ .ib = 1,
+ },
+ {
+ .dpdk_name = "tx_packets_phy",
+ .ctr_name = "tx_packets_phy",
+ },
+ {
+ .dpdk_name = "rx_packets_phy",
+ .ctr_name = "rx_packets_phy",
+ },
+ {
+ .dpdk_name = "tx_bytes_phy",
+ .ctr_name = "tx_bytes_phy",
+ },
+ {
+ .dpdk_name = "rx_bytes_phy",
+ .ctr_name = "rx_bytes_phy",
+ },
+};
+
+static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
+
+/**
+ * Read device counters table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] stats
+ * Counters table output buffer.
+ *
+ * @return
+ * 0 on success and stats is filled, negative errno value otherwise and
+ * rte_errno is set.
+ */
+static int
+mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ unsigned int i;
+ struct ifreq ifr;
+ unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t);
+ unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz];
+ struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf;
+ int ret;
+
+ et_stats->cmd = ETHTOOL_GSTATS;
+ et_stats->n_stats = xstats_ctrl->stats_n;
+ ifr.ifr_data = (caddr_t)et_stats;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING,
+ "port %u unable to read statistic values from device",
+ dev->data->port_id);
+ return ret;
+ }
+ for (i = 0; i != xstats_n; ++i) {
+ if (mlx5_counters_init[i].ib) {
+ FILE *file;
+ MKSTR(path, "%s/ports/1/hw_counters/%s",
+ priv->ibdev_path,
+ mlx5_counters_init[i].ctr_name);
+
+ file = fopen(path, "rb");
+ if (file) {
+ int n = fscanf(file, "%" SCNu64, &stats[i]);
+
+ fclose(file);
+ if (n != 1)
+ stats[i] = 0;
+ }
+ } else {
+ stats[i] = (uint64_t)
+ et_stats->data[xstats_ctrl->dev_table_idx[i]];
+ }
+ }
+ return 0;
+}
+
+/**
+ * Query the number of statistics provided by ETHTOOL.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Number of statistics on success, negative errno value otherwise and
+ * rte_errno is set.
+ */
+static int
+mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
+ struct ethtool_drvinfo drvinfo;
+ struct ifreq ifr;
+ int ret;
+
+ drvinfo.cmd = ETHTOOL_GDRVINFO;
+ ifr.ifr_data = (caddr_t)&drvinfo;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING, "port %u unable to query number of statistics",
+ dev->data->port_id);
+ return ret;
+ }
+ return drvinfo.n_stats;
+}
+
+/**
+ * Init the structures to read device counters.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_xstats_init(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ unsigned int i;
+ unsigned int j;
+ struct ifreq ifr;
+ struct ethtool_gstrings *strings = NULL;
+ unsigned int dev_stats_n;
+ unsigned int str_sz;
+ int ret;
+
+ ret = mlx5_ethtool_get_stats_n(dev);
+ if (ret < 0) {
+ DRV_LOG(WARNING, "port %u no extended statistics available",
+ dev->data->port_id);
+ return;
+ }
+ dev_stats_n = ret;
+ xstats_ctrl->stats_n = dev_stats_n;
+ /* Allocate memory to grab stat names and values. */
+ str_sz = dev_stats_n * ETH_GSTRING_LEN;
+ strings = (struct ethtool_gstrings *)
+ rte_malloc("xstats_strings",
+ str_sz + sizeof(struct ethtool_gstrings), 0);
+ if (!strings) {
+ DRV_LOG(WARNING, "port %u unable to allocate memory for xstats",
+ dev->data->port_id);
+ return;
+ }
+ strings->cmd = ETHTOOL_GSTRINGS;
+ strings->string_set = ETH_SS_STATS;
+ strings->len = dev_stats_n;
+ ifr.ifr_data = (caddr_t)strings;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING, "port %u unable to get statistic names",
+ dev->data->port_id);
+ goto free;
+ }
+ for (j = 0; j != xstats_n; ++j)
+ xstats_ctrl->dev_table_idx[j] = dev_stats_n;
+ for (i = 0; i != dev_stats_n; ++i) {
+ const char *curr_string = (const char *)
+ &strings->data[i * ETH_GSTRING_LEN];
+
+ for (j = 0; j != xstats_n; ++j) {
+ if (!strcmp(mlx5_counters_init[j].ctr_name,
+ curr_string)) {
+ xstats_ctrl->dev_table_idx[j] = i;
+ break;
+ }
+ }
+ }
+ for (j = 0; j != xstats_n; ++j) {
+ if (mlx5_counters_init[j].ib)
+ continue;
+ if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) {
+ DRV_LOG(WARNING,
+ "port %u counter \"%s\" is not recognized",
+ dev->data->port_id,
+ mlx5_counters_init[j].dpdk_name);
+ goto free;
+ }
+ }
+ /* Copy to base at first time. */
+ assert(xstats_n <= MLX5_MAX_XSTATS);
+ ret = mlx5_read_dev_counters(dev, xstats_ctrl->base);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot read device counters: %s",
+ dev->data->port_id, strerror(rte_errno));
+free:
+ rte_free(strings);
+}
+
+/**
+ * DPDK callback to get extended device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] stats
+ * Pointer to rte extended stats table.
+ * @param n
+ * The size of the stats table.
+ *
+ * @return
+ * Number of extended stats on success and stats is filled,
+ * negative on error and rte_errno is set.
+ */
+int
+mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
+ unsigned int n)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+ uint64_t counters[n];
+
+ if (n >= xstats_n && stats) {
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ int stats_n;
+ int ret;
+
+ stats_n = mlx5_ethtool_get_stats_n(dev);
+ if (stats_n < 0)
+ return stats_n;
+ if (xstats_ctrl->stats_n != stats_n)
+ mlx5_xstats_init(dev);
+ ret = mlx5_read_dev_counters(dev, counters);
+ if (ret)
+ return ret;
+ for (i = 0; i != xstats_n; ++i) {
+ stats[i].id = i;
+ stats[i].value = (counters[i] - xstats_ctrl->base[i]);
+ }
+ }
+ return xstats_n;
+}
+
+/**
+ * DPDK callback to get device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] stats
+ * Stats structure output buffer.
+ *
+ * @return
+ * 0 on success and stats is filled, negative errno value otherwise and
+ * rte_errno is set.
+ */
+int
+mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_eth_stats tmp = {0};
+ unsigned int i;
+ unsigned int idx;
+
+ /* Add software counters. */
+ for (i = 0; (i != priv->rxqs_n); ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (rxq == NULL)
+ continue;
+ idx = rxq->stats.idx;
+ if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ tmp.q_ipackets[idx] += rxq->stats.ipackets;
+ tmp.q_ibytes[idx] += rxq->stats.ibytes;
+#endif
+ tmp.q_errors[idx] += (rxq->stats.idropped +
+ rxq->stats.rx_nombuf);
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ tmp.ipackets += rxq->stats.ipackets;
+ tmp.ibytes += rxq->stats.ibytes;
+#endif
+ tmp.ierrors += rxq->stats.idropped;
+ tmp.rx_nombuf += rxq->stats.rx_nombuf;
+ }
+ for (i = 0; (i != priv->txqs_n); ++i) {
+ struct mlx5_txq_data *txq = (*priv->txqs)[i];
+
+ if (txq == NULL)
+ continue;
+ idx = txq->stats.idx;
+ if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ tmp.q_opackets[idx] += txq->stats.opackets;
+ tmp.q_obytes[idx] += txq->stats.obytes;
+#endif
+ tmp.q_errors[idx] += txq->stats.oerrors;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ tmp.opackets += txq->stats.opackets;
+ tmp.obytes += txq->stats.obytes;
+#endif
+ tmp.oerrors += txq->stats.oerrors;
+ }
+#ifndef MLX5_PMD_SOFT_COUNTERS
+ /* FIXME: retrieve and add hardware counters. */
+#endif
+ *stats = tmp;
+ return 0;
+}
+
+/**
+ * DPDK callback to clear device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx5_stats_reset(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+ unsigned int idx;
+
+ for (i = 0; (i != priv->rxqs_n); ++i) {
+ if ((*priv->rxqs)[i] == NULL)
+ continue;
+ idx = (*priv->rxqs)[i]->stats.idx;
+ (*priv->rxqs)[i]->stats =
+ (struct mlx5_rxq_stats){ .idx = idx };
+ }
+ for (i = 0; (i != priv->txqs_n); ++i) {
+ if ((*priv->txqs)[i] == NULL)
+ continue;
+ idx = (*priv->txqs)[i]->stats.idx;
+ (*priv->txqs)[i]->stats =
+ (struct mlx5_txq_stats){ .idx = idx };
+ }
+#ifndef MLX5_PMD_SOFT_COUNTERS
+ /* FIXME: reset hardware counters. */
+#endif
+}
+
+/**
+ * DPDK callback to clear device extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx5_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ int stats_n;
+ unsigned int i;
+ unsigned int n = xstats_n;
+ uint64_t counters[n];
+ int ret;
+
+ stats_n = mlx5_ethtool_get_stats_n(dev);
+ if (stats_n < 0) {
+ DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id,
+ strerror(-stats_n));
+ return;
+ }
+ if (xstats_ctrl->stats_n != stats_n)
+ mlx5_xstats_init(dev);
+ ret = mlx5_read_dev_counters(dev, counters);
+ if (ret) {
+ DRV_LOG(ERR, "port %u cannot read device counters: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return;
+ }
+ for (i = 0; i != n; ++i)
+ xstats_ctrl->base[i] = counters[i];
+}
+
+/**
+ * DPDK callback to retrieve names of extended device statistics
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] xstats_names
+ * Buffer to insert names into.
+ * @param n
+ * Number of names.
+ *
+ * @return
+ * Number of xstats names.
+ */
+int
+mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names, unsigned int n)
+{
+ unsigned int i;
+
+ if (n >= xstats_n && xstats_names) {
+ for (i = 0; i != xstats_n; ++i) {
+ strncpy(xstats_names[i].name,
+ mlx5_counters_init[i].dpdk_name,
+ RTE_ETH_XSTATS_NAME_SIZE);
+ xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
+ }
+ }
+ return xstats_n;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_trigger.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_trigger.c
new file mode 100644
index 00000000..e2a9bb70
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_trigger.c
@@ -0,0 +1,404 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#include <unistd.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_interrupts.h>
+#include <rte_alarm.h>
+
+#include "mlx5.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_utils.h"
+
+/**
+ * Stop traffic on Tx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mlx5_txq_stop(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i != priv->txqs_n; ++i)
+ mlx5_txq_release(dev, i);
+}
+
+/**
+ * Start traffic on Tx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_txq_start(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i != priv->txqs_n; ++i) {
+ struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
+
+ if (!txq_ctrl)
+ continue;
+ txq_alloc_elts(txq_ctrl);
+ txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i);
+ if (!txq_ctrl->ibv) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ }
+ ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
+ if (ret) {
+ /* Adjust index for rollback. */
+ i = priv->txqs_n - 1;
+ goto error;
+ }
+ return 0;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ do {
+ mlx5_txq_release(dev, i);
+ } while (i-- != 0);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * Stop traffic on Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mlx5_rxq_stop(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i != priv->rxqs_n; ++i)
+ mlx5_rxq_release(dev, i);
+}
+
+/**
+ * Start traffic on Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_rxq_start(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+ int ret = 0;
+
+ /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
+ if (mlx5_mprq_alloc_mp(dev)) {
+ /* Should not release Rx queues but return immediately. */
+ return -rte_errno;
+ }
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
+ struct rte_mempool *mp;
+
+ if (!rxq_ctrl)
+ continue;
+ /* Pre-register Rx mempool. */
+ mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u registering"
+ " mp %s having %u chunks",
+ dev->data->port_id, rxq_ctrl->idx,
+ mp->name, mp->nb_mem_chunks);
+ mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
+ ret = rxq_alloc_elts(rxq_ctrl);
+ if (ret)
+ goto error;
+ rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i);
+ if (!rxq_ctrl->ibv)
+ goto error;
+ }
+ return 0;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ do {
+ mlx5_rxq_release(dev, i);
+ } while (i-- != 0);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * DPDK callback to start the device.
+ *
+ * Simulate device start by attaching all configured flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_dev_start(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
+ ret = mlx5_txq_start(dev);
+ if (ret) {
+ DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return -rte_errno;
+ }
+ ret = mlx5_rxq_start(dev);
+ if (ret) {
+ DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ mlx5_txq_stop(dev);
+ return -rte_errno;
+ }
+ dev->data->dev_started = 1;
+ ret = mlx5_rx_intr_vec_enable(dev);
+ if (ret) {
+ DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
+ dev->data->port_id);
+ goto error;
+ }
+ mlx5_xstats_init(dev);
+ ret = mlx5_traffic_enable(dev);
+ if (ret) {
+ DRV_LOG(DEBUG, "port %u failed to set defaults flows",
+ dev->data->port_id);
+ goto error;
+ }
+ ret = mlx5_flow_start(dev, &priv->flows);
+ if (ret) {
+ DRV_LOG(DEBUG, "port %u failed to set flows",
+ dev->data->port_id);
+ goto error;
+ }
+ dev->tx_pkt_burst = mlx5_select_tx_function(dev);
+ dev->rx_pkt_burst = mlx5_select_rx_function(dev);
+ mlx5_dev_interrupt_handler_install(dev);
+ return 0;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ /* Rollback. */
+ dev->data->dev_started = 0;
+ mlx5_flow_stop(dev, &priv->flows);
+ mlx5_traffic_disable(dev);
+ mlx5_txq_stop(dev);
+ mlx5_rxq_stop(dev);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * DPDK callback to stop the device.
+ *
+ * Simulate device stop by detaching all configured flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx5_dev_stop(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ dev->data->dev_started = 0;
+ /* Prevent crashes when queues are still in use. */
+ dev->rx_pkt_burst = removed_rx_burst;
+ dev->tx_pkt_burst = removed_tx_burst;
+ rte_wmb();
+ usleep(1000 * priv->rxqs_n);
+ DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
+ mlx5_flow_stop(dev, &priv->flows);
+ mlx5_traffic_disable(dev);
+ mlx5_rx_intr_vec_disable(dev);
+ mlx5_dev_interrupt_handler_uninstall(dev);
+ mlx5_txq_stop(dev);
+ mlx5_rxq_stop(dev);
+}
+
+/**
+ * Enable traffic flows configured by control plane
+ *
+ * @param dev
+ * Pointer to Ethernet device private data.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_traffic_enable(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow_item_eth bcast = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ };
+ struct rte_flow_item_eth ipv6_multi_spec = {
+ .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
+ };
+ struct rte_flow_item_eth ipv6_multi_mask = {
+ .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
+ };
+ struct rte_flow_item_eth unicast = {
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ };
+ struct rte_flow_item_eth unicast_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ };
+ const unsigned int vlan_filter_n = priv->vlan_filter_n;
+ const struct ether_addr cmp = {
+ .addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ };
+ unsigned int i;
+ unsigned int j;
+ int ret;
+
+ if (priv->isolated)
+ return 0;
+ if (dev->data->promiscuous) {
+ struct rte_flow_item_eth promisc = {
+ .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .type = 0,
+ };
+
+ ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
+ if (ret)
+ goto error;
+ }
+ if (dev->data->all_multicast) {
+ struct rte_flow_item_eth multicast = {
+ .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .type = 0,
+ };
+
+ ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
+ if (ret)
+ goto error;
+ } else {
+ /* Add broadcast/multicast flows. */
+ for (i = 0; i != vlan_filter_n; ++i) {
+ uint16_t vlan = priv->vlan_filter[i];
+
+ struct rte_flow_item_vlan vlan_spec = {
+ .tci = rte_cpu_to_be_16(vlan),
+ };
+ struct rte_flow_item_vlan vlan_mask =
+ rte_flow_item_vlan_mask;
+
+ ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
+ &vlan_spec, &vlan_mask);
+ if (ret)
+ goto error;
+ ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
+ &ipv6_multi_mask,
+ &vlan_spec, &vlan_mask);
+ if (ret)
+ goto error;
+ }
+ if (!vlan_filter_n) {
+ ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
+ if (ret)
+ goto error;
+ ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
+ &ipv6_multi_mask);
+ if (ret)
+ goto error;
+ }
+ }
+ /* Add MAC address flows. */
+ for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
+ struct ether_addr *mac = &dev->data->mac_addrs[i];
+
+ if (!memcmp(mac, &cmp, sizeof(*mac)))
+ continue;
+ memcpy(&unicast.dst.addr_bytes,
+ mac->addr_bytes,
+ ETHER_ADDR_LEN);
+ for (j = 0; j != vlan_filter_n; ++j) {
+ uint16_t vlan = priv->vlan_filter[j];
+
+ struct rte_flow_item_vlan vlan_spec = {
+ .tci = rte_cpu_to_be_16(vlan),
+ };
+ struct rte_flow_item_vlan vlan_mask =
+ rte_flow_item_vlan_mask;
+
+ ret = mlx5_ctrl_flow_vlan(dev, &unicast,
+ &unicast_mask,
+ &vlan_spec,
+ &vlan_mask);
+ if (ret)
+ goto error;
+ }
+ if (!vlan_filter_n) {
+ ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
+ if (ret)
+ goto error;
+ }
+ }
+ return 0;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_flow_list_flush(dev, &priv->ctrl_flows);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+
+/**
+ * Disable traffic flows configured by control plane
+ *
+ * @param dev
+ * Pointer to Ethernet device private data.
+ */
+void
+mlx5_traffic_disable(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ mlx5_flow_list_flush(dev, &priv->ctrl_flows);
+}
+
+/**
+ * Restart traffic flows configured by control plane
+ *
+ * @param dev
+ * Pointer to Ethernet device private data.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_traffic_restart(struct rte_eth_dev *dev)
+{
+ if (dev->data->dev_started) {
+ mlx5_traffic_disable(dev);
+ return mlx5_traffic_enable(dev);
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_txq.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_txq.c
new file mode 100644
index 00000000..f9bc4739
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_txq.c
@@ -0,0 +1,903 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#include <stddef.h>
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ethdev_driver.h>
+#include <rte_common.h>
+
+#include "mlx5_utils.h"
+#include "mlx5_defs.h"
+#include "mlx5.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_glue.h"
+
+/**
+ * Allocate TX queue elements.
+ *
+ * @param txq_ctrl
+ * Pointer to TX queue structure.
+ */
+void
+txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
+{
+ const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
+ unsigned int i;
+
+ for (i = 0; (i != elts_n); ++i)
+ (*txq_ctrl->txq.elts)[i] = NULL;
+ DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
+ PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n);
+ txq_ctrl->txq.elts_head = 0;
+ txq_ctrl->txq.elts_tail = 0;
+ txq_ctrl->txq.elts_comp = 0;
+}
+
+/**
+ * Free TX queue elements.
+ *
+ * @param txq_ctrl
+ * Pointer to TX queue structure.
+ */
+static void
+txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
+{
+ const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ uint16_t elts_head = txq_ctrl->txq.elts_head;
+ uint16_t elts_tail = txq_ctrl->txq.elts_tail;
+ struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
+
+ DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
+ PORT_ID(txq_ctrl->priv), txq_ctrl->idx);
+ txq_ctrl->txq.elts_head = 0;
+ txq_ctrl->txq.elts_tail = 0;
+ txq_ctrl->txq.elts_comp = 0;
+
+ while (elts_tail != elts_head) {
+ struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
+
+ assert(elt != NULL);
+ rte_pktmbuf_free_seg(elt);
+#ifndef NDEBUG
+ /* Poisoning. */
+ memset(&(*elts)[elts_tail & elts_m],
+ 0x77,
+ sizeof((*elts)[elts_tail & elts_m]));
+#endif
+ ++elts_tail;
+ }
+}
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT);
+ struct mlx5_dev_config *config = &priv->config;
+
+ if (config->hw_csum)
+ offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ if (config->tso)
+ offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (config->swp) {
+ if (config->hw_csum)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (config->tso)
+ offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO);
+ }
+
+ if (config->tunnel_en) {
+ if (config->hw_csum)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (config->tso)
+ offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ }
+ return offloads;
+}
+
+/**
+ * DPDK callback to configure a TX queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * TX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+
+ if (desc <= MLX5_TX_COMP_THRESH) {
+ DRV_LOG(WARNING,
+ "port %u number of descriptors requested for Tx queue"
+ " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
+ " instead of %u",
+ dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
+ desc = MLX5_TX_COMP_THRESH + 1;
+ }
+ if (!rte_is_power_of_2(desc)) {
+ desc = 1 << log2above(desc);
+ DRV_LOG(WARNING,
+ "port %u increased number of descriptors in Tx queue"
+ " %u to the next power of two (%d)",
+ dev->data->port_id, idx, desc);
+ }
+ DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
+ dev->data->port_id, idx, desc);
+ if (idx >= priv->txqs_n) {
+ DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
+ dev->data->port_id, idx, priv->txqs_n);
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
+ }
+ if (!mlx5_txq_releasable(dev, idx)) {
+ rte_errno = EBUSY;
+ DRV_LOG(ERR, "port %u unable to release queue index %u",
+ dev->data->port_id, idx);
+ return -rte_errno;
+ }
+ mlx5_txq_release(dev, idx);
+ txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
+ if (!txq_ctrl) {
+ DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ dev->data->port_id, idx);
+ return -rte_errno;
+ }
+ DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
+ dev->data->port_id, idx);
+ (*priv->txqs)[idx] = &txq_ctrl->txq;
+ return 0;
+}
+
+/**
+ * DPDK callback to release a TX queue.
+ *
+ * @param dpdk_txq
+ * Generic TX queue pointer.
+ */
+void
+mlx5_tx_queue_release(void *dpdk_txq)
+{
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ struct mlx5_txq_ctrl *txq_ctrl;
+ struct priv *priv;
+ unsigned int i;
+
+ if (txq == NULL)
+ return;
+ txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+ priv = txq_ctrl->priv;
+ for (i = 0; (i != priv->txqs_n); ++i)
+ if ((*priv->txqs)[i] == txq) {
+ mlx5_txq_release(ETH_DEV(priv), i);
+ DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
+ PORT_ID(priv), txq_ctrl->idx);
+ break;
+ }
+}
+
+
+/**
+ * Mmap TX UAR(HW doorbell) pages into reserved UAR address space.
+ * Both primary and secondary process do mmap to make UAR address
+ * aligned.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param fd
+ * Verbs file descriptor to map UAR pages.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i, j;
+ uintptr_t pages[priv->txqs_n];
+ unsigned int pages_n = 0;
+ uintptr_t uar_va;
+ uintptr_t off;
+ void *addr;
+ void *ret;
+ struct mlx5_txq_data *txq;
+ struct mlx5_txq_ctrl *txq_ctrl;
+ int already_mapped;
+ size_t page_size = sysconf(_SC_PAGESIZE);
+#ifndef RTE_ARCH_64
+ unsigned int lock_idx;
+#endif
+
+ memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
+ /*
+ * As rdma-core, UARs are mapped in size of OS page size.
+ * Use aligned address to avoid duplicate mmap.
+ * Ref to libmlx5 function: mlx5_init_context()
+ */
+ for (i = 0; i != priv->txqs_n; ++i) {
+ if (!(*priv->txqs)[i])
+ continue;
+ txq = (*priv->txqs)[i];
+ txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+ assert(txq_ctrl->idx == (uint16_t)i);
+ /* UAR addr form verbs used to find dup and offset in page. */
+ uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
+ off = uar_va & (page_size - 1); /* offset in page. */
+ uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); /* page addr. */
+ already_mapped = 0;
+ for (j = 0; j != pages_n; ++j) {
+ if (pages[j] == uar_va) {
+ already_mapped = 1;
+ break;
+ }
+ }
+ /* new address in reserved UAR address space. */
+ addr = RTE_PTR_ADD(priv->uar_base,
+ uar_va & (uintptr_t)(MLX5_UAR_SIZE - 1));
+ if (!already_mapped) {
+ pages[pages_n++] = uar_va;
+ /* fixed mmap to specified address in reserved
+ * address space.
+ */
+ ret = mmap(addr, page_size,
+ PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
+ txq_ctrl->uar_mmap_offset);
+ if (ret != addr) {
+ /* fixed mmap have to return same address */
+ DRV_LOG(ERR,
+ "port %u call to mmap failed on UAR"
+ " for txq %u",
+ dev->data->port_id, txq_ctrl->idx);
+ rte_errno = ENXIO;
+ return -rte_errno;
+ }
+ }
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
+ txq_ctrl->txq.bf_reg = RTE_PTR_ADD((void *)addr, off);
+ else
+ assert(txq_ctrl->txq.bf_reg ==
+ RTE_PTR_ADD((void *)addr, off));
+#ifndef RTE_ARCH_64
+ /* Assign a UAR lock according to UAR page number */
+ lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
+ MLX5_UAR_PAGE_NUM_MASK;
+ txq->uar_lock = &priv->uar_lock[lock_idx];
+#endif
+ }
+ return 0;
+}
+
+/**
+ * Check if the burst function is using eMPW.
+ *
+ * @param tx_pkt_burst
+ * Tx burst function pointer.
+ *
+ * @return
+ * 1 if the burst function is using eMPW, 0 otherwise.
+ */
+static int
+is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
+{
+ if (tx_pkt_burst == mlx5_tx_burst_raw_vec ||
+ tx_pkt_burst == mlx5_tx_burst_vec ||
+ tx_pkt_burst == mlx5_tx_burst_empw)
+ return 1;
+ return 0;
+}
+
+/**
+ * Create the Tx queue Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_txq_ibv *
+mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq_data, struct mlx5_txq_ctrl, txq);
+ struct mlx5_txq_ibv tmpl;
+ struct mlx5_txq_ibv *txq_ibv;
+ union {
+ struct ibv_qp_init_attr_ex init;
+ struct ibv_cq_init_attr_ex cq;
+ struct ibv_qp_attr mod;
+ struct ibv_cq_ex cq_attr;
+ } attr;
+ unsigned int cqe_n;
+ struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
+ struct mlx5dv_cq cq_info;
+ struct mlx5dv_obj obj;
+ const int desc = 1 << txq_data->elts_n;
+ eth_tx_burst_t tx_pkt_burst = mlx5_select_tx_function(dev);
+ int ret = 0;
+
+ assert(txq_data);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
+ priv->verbs_alloc_ctx.obj = txq_ctrl;
+ if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
+ DRV_LOG(ERR,
+ "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
+ attr.cq = (struct ibv_cq_init_attr_ex){
+ .comp_mask = 0,
+ };
+ cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
+ ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
+ if (is_empw_burst_func(tx_pkt_burst))
+ cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
+ tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
+ if (tmpl.cq == NULL) {
+ DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = errno;
+ goto error;
+ }
+ attr.init = (struct ibv_qp_init_attr_ex){
+ /* CQ to be associated with the send queue. */
+ .send_cq = tmpl.cq,
+ /* CQ to be associated with the receive queue. */
+ .recv_cq = tmpl.cq,
+ .cap = {
+ /* Max number of outstanding WRs. */
+ .max_send_wr =
+ ((priv->device_attr.orig_attr.max_qp_wr <
+ desc) ?
+ priv->device_attr.orig_attr.max_qp_wr :
+ desc),
+ /*
+ * Max number of scatter/gather elements in a WR,
+ * must be 1 to prevent libmlx5 from trying to affect
+ * too much memory. TX gather is not impacted by the
+ * priv->device_attr.max_sge limit and will still work
+ * properly.
+ */
+ .max_send_sge = 1,
+ },
+ .qp_type = IBV_QPT_RAW_PACKET,
+ /*
+ * Do *NOT* enable this, completions events are managed per
+ * Tx burst.
+ */
+ .sq_sig_all = 0,
+ .pd = priv->pd,
+ .comp_mask = IBV_QP_INIT_ATTR_PD,
+ };
+ if (txq_data->max_inline)
+ attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
+ if (txq_data->tso_en) {
+ attr.init.max_tso_header = txq_ctrl->max_tso_header;
+ attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
+ }
+ tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
+ if (tmpl.qp == NULL) {
+ DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
+ dev->data->port_id, idx);
+ rte_errno = errno;
+ goto error;
+ }
+ attr.mod = (struct ibv_qp_attr){
+ /* Move the QP to this state. */
+ .qp_state = IBV_QPS_INIT,
+ /* Primary port number. */
+ .port_num = 1,
+ };
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
+ (IBV_QP_STATE | IBV_QP_PORT));
+ if (ret) {
+ DRV_LOG(ERR,
+ "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
+ dev->data->port_id, idx);
+ rte_errno = errno;
+ goto error;
+ }
+ attr.mod = (struct ibv_qp_attr){
+ .qp_state = IBV_QPS_RTR
+ };
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
+ if (ret) {
+ DRV_LOG(ERR,
+ "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
+ dev->data->port_id, idx);
+ rte_errno = errno;
+ goto error;
+ }
+ attr.mod.qp_state = IBV_QPS_RTS;
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
+ if (ret) {
+ DRV_LOG(ERR,
+ "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
+ dev->data->port_id, idx);
+ rte_errno = errno;
+ goto error;
+ }
+ txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
+ txq_ctrl->socket);
+ if (!txq_ibv) {
+ DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ obj.cq.in = tmpl.cq;
+ obj.cq.out = &cq_info;
+ obj.qp.in = tmpl.qp;
+ obj.qp.out = &qp;
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
+ if (ret != 0) {
+ rte_errno = errno;
+ goto error;
+ }
+ if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
+ DRV_LOG(ERR,
+ "port %u wrong MLX5_CQE_SIZE environment variable"
+ " value: it should be set to %u",
+ dev->data->port_id, RTE_CACHE_LINE_SIZE);
+ rte_errno = EINVAL;
+ goto error;
+ }
+ txq_data->cqe_n = log2above(cq_info.cqe_cnt);
+ txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
+ txq_data->wqes = qp.sq.buf;
+ txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
+ txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
+ txq_ctrl->bf_reg_orig = qp.bf.reg;
+ txq_data->cq_db = cq_info.dbrec;
+ txq_data->cqes =
+ (volatile struct mlx5_cqe (*)[])
+ (uintptr_t)cq_info.buf;
+ txq_data->cq_ci = 0;
+#ifndef NDEBUG
+ txq_data->cq_pi = 0;
+#endif
+ txq_data->wqe_ci = 0;
+ txq_data->wqe_pi = 0;
+ txq_ibv->qp = tmpl.qp;
+ txq_ibv->cq = tmpl.cq;
+ rte_atomic32_inc(&txq_ibv->refcnt);
+ if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
+ txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
+ DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%lx",
+ dev->data->port_id, txq_ctrl->uar_mmap_offset);
+ } else {
+ DRV_LOG(ERR,
+ "port %u failed to retrieve UAR info, invalid"
+ " libmlx5.so",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ goto error;
+ }
+ LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
+ txq_ibv->txq_ctrl = txq_ctrl;
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ return txq_ibv;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (tmpl.cq)
+ claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
+ if (tmpl.qp)
+ claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ rte_errno = ret; /* Restore rte_errno. */
+ return NULL;
+}
+
+/**
+ * Get an Tx queue Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The Verbs object if it exists.
+ */
+struct mlx5_txq_ibv *
+mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_ctrl *txq_ctrl;
+
+ if (idx >= priv->txqs_n)
+ return NULL;
+ if (!(*priv->txqs)[idx])
+ return NULL;
+ txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ if (txq_ctrl->ibv)
+ rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
+ return txq_ctrl->ibv;
+}
+
+/**
+ * Release an Tx verbs queue object.
+ *
+ * @param txq_ibv
+ * Verbs Tx queue object.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int
+mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
+{
+ assert(txq_ibv);
+ if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
+ claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
+ claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
+ LIST_REMOVE(txq_ibv, next);
+ rte_free(txq_ibv);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Return true if a single reference exists on the object.
+ *
+ * @param txq_ibv
+ * Verbs Tx queue object.
+ */
+int
+mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv)
+{
+ assert(txq_ibv);
+ return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
+}
+
+/**
+ * Verify the Verbs Tx queue list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret = 0;
+ struct mlx5_txq_ibv *txq_ibv;
+
+ LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
+ DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
+ dev->data->port_id, txq_ibv->txq_ctrl->idx);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Set Tx queue parameters from device configuration.
+ *
+ * @param txq_ctrl
+ * Pointer to Tx queue control structure.
+ */
+static void
+txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
+{
+ struct priv *priv = txq_ctrl->priv;
+ struct mlx5_dev_config *config = &priv->config;
+ const unsigned int max_tso_inline =
+ ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE);
+ unsigned int txq_inline;
+ unsigned int txqs_inline;
+ unsigned int inline_max_packet_sz;
+ eth_tx_burst_t tx_pkt_burst =
+ mlx5_select_tx_function(ETH_DEV(priv));
+ int is_empw_func = is_empw_burst_func(tx_pkt_burst);
+ int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO));
+
+ txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
+ 0 : config->txq_inline;
+ txqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?
+ 0 : config->txqs_inline;
+ inline_max_packet_sz =
+ (config->inline_max_packet_sz == MLX5_ARG_UNSET) ?
+ 0 : config->inline_max_packet_sz;
+ if (is_empw_func) {
+ if (config->txq_inline == MLX5_ARG_UNSET)
+ txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE;
+ if (config->txqs_inline == MLX5_ARG_UNSET)
+ txqs_inline = MLX5_EMPW_MIN_TXQS;
+ if (config->inline_max_packet_sz == MLX5_ARG_UNSET)
+ inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
+ txq_ctrl->txq.mpw_hdr_dseg = config->mpw_hdr_dseg;
+ txq_ctrl->txq.inline_max_packet_sz = inline_max_packet_sz;
+ }
+ if (txq_inline && priv->txqs_n >= txqs_inline) {
+ unsigned int ds_cnt;
+
+ txq_ctrl->txq.max_inline =
+ ((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE);
+ if (is_empw_func) {
+ /* To minimize the size of data set, avoid requesting
+ * too large WQ.
+ */
+ txq_ctrl->max_inline_data =
+ ((RTE_MIN(txq_inline,
+ inline_max_packet_sz) +
+ (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
+ } else {
+ txq_ctrl->max_inline_data =
+ txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
+ }
+ /*
+ * Check if the inline size is too large in a way which
+ * can make the WQE DS to overflow.
+ * Considering in calculation:
+ * WQE CTRL (1 DS)
+ * WQE ETH (1 DS)
+ * Inline part (N DS)
+ */
+ ds_cnt = 2 + (txq_ctrl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
+ if (ds_cnt > MLX5_DSEG_MAX) {
+ unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
+ MLX5_WQE_DWORD_SIZE;
+
+ max_inline = max_inline - (max_inline %
+ RTE_CACHE_LINE_SIZE);
+ DRV_LOG(WARNING,
+ "port %u txq inline is too large (%d) setting"
+ " it to the maximum possible: %d\n",
+ PORT_ID(priv), txq_inline, max_inline);
+ txq_ctrl->txq.max_inline = max_inline /
+ RTE_CACHE_LINE_SIZE;
+ }
+ }
+ if (tso) {
+ txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
+ txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
+ max_tso_inline);
+ txq_ctrl->txq.tso_en = 1;
+ }
+ txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
+ txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
+ txq_ctrl->txq.offloads) && config->swp;
+}
+
+/**
+ * Create a DPDK Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ *
+ * @return
+ * A DPDK queue object on success, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_txq_ctrl *
+mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_ctrl *tmpl;
+
+ tmpl = rte_calloc_socket("TXQ", 1,
+ sizeof(*tmpl) +
+ desc * sizeof(struct rte_mbuf *),
+ 0, socket);
+ if (!tmpl) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
+ MLX5_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
+ goto error;
+ }
+ /* Save pointer of global generation number to check memory event. */
+ tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;
+ assert(desc > MLX5_TX_COMP_THRESH);
+ tmpl->txq.offloads = conf->offloads |
+ dev->data->dev_conf.txmode.offloads;
+ tmpl->priv = priv;
+ tmpl->socket = socket;
+ tmpl->txq.elts_n = log2above(desc);
+ tmpl->idx = idx;
+ txq_set_params(tmpl);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_sge);
+ tmpl->txq.elts =
+ (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
+ tmpl->txq.stats.idx = idx;
+ rte_atomic32_inc(&tmpl->refcnt);
+ LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
+ return tmpl;
+error:
+ rte_free(tmpl);
+ return NULL;
+}
+
+/**
+ * Get a Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists.
+ */
+struct mlx5_txq_ctrl *
+mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_ctrl *ctrl = NULL;
+
+ if ((*priv->txqs)[idx]) {
+ ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
+ txq);
+ mlx5_txq_ibv_get(dev, idx);
+ rte_atomic32_inc(&ctrl->refcnt);
+ }
+ return ctrl;
+}
+
+/**
+ * Release a Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int
+mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_ctrl *txq;
+ size_t page_size = sysconf(_SC_PAGESIZE);
+
+ if (!(*priv->txqs)[idx])
+ return 0;
+ txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
+ txq->ibv = NULL;
+ if (priv->uar_base)
+ munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
+ page_size), page_size);
+ if (rte_atomic32_dec_and_test(&txq->refcnt)) {
+ txq_free_elts(txq);
+ mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
+ LIST_REMOVE(txq, next);
+ rte_free(txq);
+ (*priv->txqs)[idx] = NULL;
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Verify if the queue can be released.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 1 if the queue can be released.
+ */
+int
+mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_ctrl *txq;
+
+ if (!(*priv->txqs)[idx])
+ return -1;
+ txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ return (rte_atomic32_read(&txq->refcnt) == 1);
+}
+
+/**
+ * Verify the Tx Queue list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_txq_verify(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_ctrl *txq;
+ int ret = 0;
+
+ LIST_FOREACH(txq, &priv->txqsctrl, next) {
+ DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
+ dev->data->port_id, txq->idx);
+ ++ret;
+ }
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.h
new file mode 100644
index 00000000..886f60e6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_UTILS_H_
+#define RTE_PMD_MLX5_UTILS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <limits.h>
+#include <assert.h>
+#include <errno.h>
+
+#include "mlx5_defs.h"
+
+/* Bit-field manipulation. */
+#define BITFIELD_DECLARE(bf, type, size) \
+ type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \
+ !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
+#define BITFIELD_DEFINE(bf, type, size) \
+ BITFIELD_DECLARE((bf), type, (size)) = { 0 }
+#define BITFIELD_SET(bf, b) \
+ (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
+ (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
+ ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+#define BITFIELD_RESET(bf, b) \
+ (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
+ (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
+ ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+#define BITFIELD_ISSET(bf, b) \
+ (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
+ !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
+ ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
+
+/* Convert a bit number to the corresponding 64-bit mask */
+#define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
+
+/* Save and restore errno around argument evaluation. */
+#define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
+
+/*
+ * Helper macros to work around __VA_ARGS__ limitations in a C99 compliant
+ * manner.
+ */
+#define PMD_DRV_LOG_STRIP(a, b) a
+#define PMD_DRV_LOG_OPAREN (
+#define PMD_DRV_LOG_CPAREN )
+#define PMD_DRV_LOG_COMMA ,
+
+/* Return the file name part of a path. */
+static inline const char *
+pmd_drv_log_basename(const char *s)
+{
+ const char *n = s;
+
+ while (*n)
+ if (*(n++) == '/')
+ s = n;
+ return s;
+}
+
+extern int mlx5_logtype;
+
+#define PMD_DRV_LOG___(level, ...) \
+ rte_log(RTE_LOG_ ## level, \
+ mlx5_logtype, \
+ RTE_FMT(MLX5_DRIVER_NAME ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,), \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+
+/*
+ * When debugging is enabled (NDEBUG not defined), file, line and function
+ * information replace the driver name (MLX5_DRIVER_NAME) in log messages.
+ */
+#ifndef NDEBUG
+
+#define PMD_DRV_LOG__(level, ...) \
+ PMD_DRV_LOG___(level, "%s:%u: %s(): " __VA_ARGS__)
+#define PMD_DRV_LOG_(level, s, ...) \
+ PMD_DRV_LOG__(level, \
+ s "\n" PMD_DRV_LOG_COMMA \
+ pmd_drv_log_basename(__FILE__) PMD_DRV_LOG_COMMA \
+ __LINE__ PMD_DRV_LOG_COMMA \
+ __func__, \
+ __VA_ARGS__)
+
+#else /* NDEBUG */
+#define PMD_DRV_LOG__(level, ...) \
+ PMD_DRV_LOG___(level, __VA_ARGS__)
+#define PMD_DRV_LOG_(level, s, ...) \
+ PMD_DRV_LOG__(level, s "\n", __VA_ARGS__)
+
+#endif /* NDEBUG */
+
+/* Generic printf()-like logging macro with automatic line feed. */
+#define DRV_LOG(level, ...) \
+ PMD_DRV_LOG_(level, \
+ __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
+ PMD_DRV_LOG_CPAREN)
+
+/* claim_zero() does not perform any check when debugging is disabled. */
+#ifndef NDEBUG
+
+#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
+#define claim_zero(...) assert((__VA_ARGS__) == 0)
+#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
+
+#else /* NDEBUG */
+
+#define DEBUG(...) (void)0
+#define claim_zero(...) (__VA_ARGS__)
+#define claim_nonzero(...) (__VA_ARGS__)
+
+#endif /* NDEBUG */
+
+#define INFO(...) DRV_LOG(INFO, __VA_ARGS__)
+#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)
+#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)
+
+/* Convenience macros for accessing mbuf fields. */
+#define NEXT(m) ((m)->next)
+#define DATA_LEN(m) ((m)->data_len)
+#define PKT_LEN(m) ((m)->pkt_len)
+#define DATA_OFF(m) ((m)->data_off)
+#define SET_DATA_OFF(m, o) ((m)->data_off = (o))
+#define NB_SEGS(m) ((m)->nb_segs)
+#define PORT(m) ((m)->port)
+
+/* Transpose flags. Useful to convert IBV to DPDK flags. */
+#define TRANSPOSE(val, from, to) \
+ (((from) >= (to)) ? \
+ (((val) & (from)) / ((from) / (to))) : \
+ (((val) & (from)) * ((to) / (from))))
+
+/* Allocate a buffer on the stack and fill it with a printf format string. */
+#define MKSTR(name, ...) \
+ char name[snprintf(NULL, 0, __VA_ARGS__) + 1]; \
+ \
+ snprintf(name, sizeof(name), __VA_ARGS__)
+
+/**
+ * Return nearest power of two above input value.
+ *
+ * @param v
+ * Input value.
+ *
+ * @return
+ * Nearest power of two above input value.
+ */
+static inline unsigned int
+log2above(unsigned int v)
+{
+ unsigned int l;
+ unsigned int r;
+
+ for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
+ r |= (v & 1);
+ return l + r;
+}
+
+#endif /* RTE_PMD_MLX5_UTILS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_vlan.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_vlan.c
new file mode 100644
index 00000000..c91d08be
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_vlan.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox Technologies, Ltd
+ */
+
+#include <stddef.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdint.h>
+
+/*
+ * Not needed by this file; included to work around the lack of off_t
+ * definition for mlx5dv.h with unpatched rdma-core versions.
+ */
+#include <sys/types.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx5dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_ethdev_driver.h>
+#include <rte_common.h>
+
+#include "mlx5_utils.h"
+#include "mlx5.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_glue.h"
+
+/**
+ * DPDK callback to configure a VLAN filter.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param vlan_id
+ * VLAN ID to filter.
+ * @param on
+ * Toggle filter.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
+ dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
+ assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
+ for (i = 0; (i != priv->vlan_filter_n); ++i)
+ if (priv->vlan_filter[i] == vlan_id)
+ break;
+ /* Check if there's room for another VLAN filter. */
+ if (i == RTE_DIM(priv->vlan_filter)) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ if (i < priv->vlan_filter_n) {
+ assert(priv->vlan_filter_n != 0);
+ /* Enabling an existing VLAN filter has no effect. */
+ if (on)
+ goto out;
+ /* Remove VLAN filter from list. */
+ --priv->vlan_filter_n;
+ memmove(&priv->vlan_filter[i],
+ &priv->vlan_filter[i + 1],
+ sizeof(priv->vlan_filter[i]) *
+ (priv->vlan_filter_n - i));
+ priv->vlan_filter[priv->vlan_filter_n] = 0;
+ } else {
+ assert(i == priv->vlan_filter_n);
+ /* Disabling an unknown VLAN filter has no effect. */
+ if (!on)
+ goto out;
+ /* Add new VLAN filter. */
+ priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
+ ++priv->vlan_filter_n;
+ }
+out:
+ if (dev->data->dev_started)
+ return mlx5_traffic_restart(dev);
+ return 0;
+}
+
+/**
+ * Callback to set/reset VLAN stripping for a specific queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue
+ * RX queue index.
+ * @param on
+ * Enable/disable VLAN stripping.
+ */
+void
+mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct ibv_wq_attr mod;
+ uint16_t vlan_offloads =
+ (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
+ 0;
+ int ret;
+
+ /* Validate hw support */
+ if (!priv->config.hw_vlan_strip) {
+ DRV_LOG(ERR, "port %u VLAN stripping is not supported",
+ dev->data->port_id);
+ return;
+ }
+ /* Validate queue number */
+ if (queue >= priv->rxqs_n) {
+ DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d",
+ dev->data->port_id, queue);
+ return;
+ }
+ DRV_LOG(DEBUG, "port %u set VLAN offloads 0x%x for port %uqueue %d",
+ dev->data->port_id, vlan_offloads, rxq->port_id, queue);
+ if (!rxq_ctrl->ibv) {
+ /* Update related bits in RX queue. */
+ rxq->vlan_strip = !!on;
+ return;
+ }
+ mod = (struct ibv_wq_attr){
+ .attr_mask = IBV_WQ_ATTR_FLAGS,
+ .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
+ .flags = vlan_offloads,
+ };
+ ret = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod);
+ if (ret) {
+ DRV_LOG(ERR, "port %u failed to modified stripping mode: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return;
+ }
+ /* Update related bits in RX queue. */
+ rxq->vlan_strip = !!on;
+}
+
+/**
+ * Callback to set/reset VLAN offloads for a port.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mask
+ * VLAN offload bit mask.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP);
+
+ if (!priv->config.hw_vlan_strip) {
+ DRV_LOG(ERR, "port %u VLAN stripping is not supported",
+ dev->data->port_id);
+ return 0;
+ }
+ /* Run on every RX queue and set/reset VLAN stripping. */
+ for (i = 0; (i != priv->rxqs_n); i++)
+ mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip);
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5_version.map b/src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5_version.map
new file mode 100644
index 00000000..ad607bbe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5_version.map
@@ -0,0 +1,3 @@
+DPDK_2.2 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/mvpp2/Makefile b/src/spdk/dpdk/drivers/net/mvpp2/Makefile
new file mode 100644
index 00000000..492aef97
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mvpp2/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Marvell International Ltd.
+# Copyright(c) 2017 Semihalf.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mvpp2.a
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_mvpp2_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+LDLIBS += -L$(LIBMUSDK_PATH)/lib
+LDLIBS += -lmusdk
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_qos.c
+SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_flow.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/mvpp2/meson.build b/src/spdk/dpdk/drivers/net/mvpp2/meson.build
new file mode 100644
index 00000000..e1398895
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mvpp2/meson.build
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+
+path = get_option('lib_musdk_dir')
+lib_dir = path + '/lib'
+inc_dir = path + '/include'
+
+lib = cc.find_library('libmusdk', dirs : [lib_dir], required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+ includes += include_directories(inc_dir)
+ cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC']
+endif
+
+sources = files(
+ 'mrvl_ethdev.c',
+ 'mrvl_flow.c',
+ 'mrvl_qos.c'
+)
+
+deps += ['cfgfile']
diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.c b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.c
new file mode 100644
index 00000000..a2d0576e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.c
@@ -0,0 +1,2761 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_kvargs.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus_vdev.h>
+
+/* Unluckily, container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+
+#include <fcntl.h>
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "mrvl_ethdev.h"
+#include "mrvl_qos.h"
+
+/* bitmask with reserved hifs */
+#define MRVL_MUSDK_HIFS_RESERVED 0x0F
+/* bitmask with reserved bpools */
+#define MRVL_MUSDK_BPOOLS_RESERVED 0x07
+/* bitmask with reserved kernel RSS tables */
+#define MRVL_MUSDK_RSS_RESERVED 0x01
+/* maximum number of available hifs */
+#define MRVL_MUSDK_HIFS_MAX 9
+
+/* prefetch shift */
+#define MRVL_MUSDK_PREFETCH_SHIFT 2
+
+/* TCAM has 25 entries reserved for uc/mc filter entries */
+#define MRVL_MAC_ADDRS_MAX 25
+#define MRVL_MATCH_LEN 16
+#define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
+/* Maximum allowable packet size */
+#define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
+
+#define MRVL_IFACE_NAME_ARG "iface"
+#define MRVL_CFG_ARG "cfg"
+
+#define MRVL_BURST_SIZE 64
+
+#define MRVL_ARP_LENGTH 28
+
+#define MRVL_COOKIE_ADDR_INVALID ~0ULL
+
+#define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8)
+#define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
+
+/* Memory size (in bytes) for MUSDK dma buffers */
+#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+
+/** Port Rx offload capabilities */
+#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_CRC_STRIP | \
+ DEV_RX_OFFLOAD_CHECKSUM)
+
+/** Port Tx offloads capabilities */
+#define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM)
+
+static const char * const valid_args[] = {
+ MRVL_IFACE_NAME_ARG,
+ MRVL_CFG_ARG,
+ NULL
+};
+
+static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
+static struct pp2_hif *hifs[RTE_MAX_LCORE];
+static int used_bpools[PP2_NUM_PKT_PROC] = {
+ MRVL_MUSDK_BPOOLS_RESERVED,
+ MRVL_MUSDK_BPOOLS_RESERVED
+};
+
+struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
+int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
+uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
+
+int mrvl_logtype;
+
+struct mrvl_ifnames {
+ const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
+ int idx;
+};
+
+/*
+ * To use buffer harvesting based on loopback port shadow queue structure
+ * was introduced for buffers information bookkeeping.
+ *
+ * Before sending the packet, related buffer information (pp2_buff_inf) is
+ * stored in shadow queue. After packet is transmitted no longer used
+ * packet buffer is released back to it's original hardware pool,
+ * on condition it originated from interface.
+ * In case it was generated by application itself i.e: mbuf->port field is
+ * 0xff then its released to software mempool.
+ */
+struct mrvl_shadow_txq {
+ int head; /* write index - used when sending buffers */
+ int tail; /* read index - used when releasing buffers */
+ u16 size; /* queue occupied size */
+ u16 num_to_release; /* number of buffers sent, that can be released */
+ struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
+};
+
+struct mrvl_rxq {
+ struct mrvl_priv *priv;
+ struct rte_mempool *mp;
+ int queue_id;
+ int port_id;
+ int cksum_enabled;
+ uint64_t bytes_recv;
+ uint64_t drop_mac;
+};
+
+struct mrvl_txq {
+ struct mrvl_priv *priv;
+ int queue_id;
+ int port_id;
+ uint64_t bytes_sent;
+ struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
+ int tx_deferred_start;
+};
+
+static int mrvl_lcore_first;
+static int mrvl_lcore_last;
+static int mrvl_dev_num;
+
+static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
+static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
+ struct pp2_hif *hif, unsigned int core_id,
+ struct mrvl_shadow_txq *sq, int qid, int force);
+
+#define MRVL_XSTATS_TBL_ENTRY(name) { \
+ #name, offsetof(struct pp2_ppio_statistics, name), \
+ sizeof(((struct pp2_ppio_statistics *)0)->name) \
+}
+
+/* Table with xstats data */
+static struct {
+ const char *name;
+ unsigned int offset;
+ unsigned int size;
+} mrvl_xstats_tbl[] = {
+ MRVL_XSTATS_TBL_ENTRY(rx_bytes),
+ MRVL_XSTATS_TBL_ENTRY(rx_packets),
+ MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets),
+ MRVL_XSTATS_TBL_ENTRY(rx_errors),
+ MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_early_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped),
+ MRVL_XSTATS_TBL_ENTRY(tx_bytes),
+ MRVL_XSTATS_TBL_ENTRY(tx_packets),
+ MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets),
+ MRVL_XSTATS_TBL_ENTRY(tx_errors)
+};
+
+static inline int
+mrvl_get_bpool_size(int pp2_id, int pool_id)
+{
+ int i;
+ int size = 0;
+
+ for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
+ size += mrvl_port_bpool_size[pp2_id][pool_id][i];
+
+ return size;
+}
+
+static inline int
+mrvl_reserve_bit(int *bitmap, int max)
+{
+ int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
+
+ if (n >= max)
+ return -1;
+
+ *bitmap |= 1 << n;
+
+ return n;
+}
+
+static int
+mrvl_init_hif(int core_id)
+{
+ struct pp2_hif_params params;
+ char match[MRVL_MATCH_LEN];
+ int ret;
+
+ ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
+ if (ret < 0) {
+ MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
+ return ret;
+ }
+
+ snprintf(match, sizeof(match), "hif-%d", ret);
+ memset(&params, 0, sizeof(params));
+ params.match = match;
+ params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
+ ret = pp2_hif_init(&params, &hifs[core_id]);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline struct pp2_hif*
+mrvl_get_hif(struct mrvl_priv *priv, int core_id)
+{
+ int ret;
+
+ if (likely(hifs[core_id] != NULL))
+ return hifs[core_id];
+
+ rte_spinlock_lock(&priv->lock);
+
+ ret = mrvl_init_hif(core_id);
+ if (ret < 0) {
+ MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
+ goto out;
+ }
+
+ if (core_id < mrvl_lcore_first)
+ mrvl_lcore_first = core_id;
+
+ if (core_id > mrvl_lcore_last)
+ mrvl_lcore_last = core_id;
+out:
+ rte_spinlock_unlock(&priv->lock);
+
+ return hifs[core_id];
+}
+
+/**
+ * Configure rss based on dpdk rss configuration.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param rss_conf
+ * Pointer to RSS configuration.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
+{
+ if (rss_conf->rss_key)
+ MRVL_LOG(WARNING, "Changing hash key is not supported");
+
+ if (rss_conf->rss_hf == 0) {
+ priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
+ } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+ priv->ppio_params.inqs_params.hash_type =
+ PP2_PPIO_HASH_T_2_TUPLE;
+ } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ priv->ppio_params.inqs_params.hash_type =
+ PP2_PPIO_HASH_T_5_TUPLE;
+ priv->rss_hf_tcp = 1;
+ } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ priv->ppio_params.inqs_params.hash_type =
+ PP2_PPIO_HASH_T_5_TUPLE;
+ priv->rss_hf_tcp = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Ethernet device configuration.
+ *
+ * Prepare the driver for a given number of TX and RX queues and
+ * configure RSS.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_dev_configure(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
+ dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
+ dev->data->dev_conf.rxmode.mq_mode);
+ return -EINVAL;
+ }
+
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) {
+ MRVL_LOG(INFO, "L2 CRC stripping is always enabled in hw");
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ }
+
+ if (dev->data->dev_conf.rxmode.split_hdr_size) {
+ MRVL_LOG(INFO, "Split headers not supported");
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ ETHER_HDR_LEN - ETHER_CRC_LEN;
+
+ ret = mrvl_configure_rxqs(priv, dev->data->port_id,
+ dev->data->nb_rx_queues);
+ if (ret < 0)
+ return ret;
+
+ ret = mrvl_configure_txqs(priv, dev->data->port_id,
+ dev->data->nb_tx_queues);
+ if (ret < 0)
+ return ret;
+
+ priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
+ priv->ppio_params.maintain_stats = 1;
+ priv->nb_rx_queues = dev->data->nb_rx_queues;
+
+ if (dev->data->nb_rx_queues == 1 &&
+ dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
+ priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
+
+ return 0;
+ }
+
+ return mrvl_configure_rss(priv,
+ &dev->data->dev_conf.rx_adv_conf.rss_conf);
+}
+
+/**
+ * DPDK callback to change the MTU.
+ *
+ * Setting the MTU affects hardware MRU (packets larger than the MRU
+ * will be dropped).
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mtu
+ * New MTU.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ /* extra MV_MH_SIZE bytes are required for Marvell tag */
+ uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ int ret;
+
+ if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
+ return -EINVAL;
+
+ if (!priv->ppio)
+ return 0;
+
+ ret = pp2_ppio_set_mru(priv->ppio, mru);
+ if (ret)
+ return ret;
+
+ return pp2_ppio_set_mtu(priv->ppio, mtu);
+}
+
+/**
+ * DPDK callback to bring the link up.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ ret = pp2_ppio_enable(priv->ppio);
+ if (ret)
+ return ret;
+
+ /*
+ * mtu/mru can be updated if pp2_ppio_enable() was called at least once
+ * as pp2_ppio_enable() changes port->t_mode from default 0 to
+ * PP2_TRAFFIC_INGRESS_EGRESS.
+ *
+ * Set mtu to default DPDK value here.
+ */
+ ret = mrvl_mtu_set(dev, dev->data->mtu);
+ if (ret)
+ pp2_ppio_disable(priv->ppio);
+
+ return ret;
+}
+
+/**
+ * DPDK callback to bring the link down.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ return pp2_ppio_disable(priv->ppio);
+}
+
+/**
+ * DPDK callback to start tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue_id
+ * Transmit queue index.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv)
+ return -EPERM;
+
+ /* passing 1 enables given tx queue */
+ ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
+ return ret;
+ }
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to stop tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue_id
+ * Transmit queue index.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ /* passing 0 disables given tx queue */
+ ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
+ return ret;
+ }
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to start the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+mrvl_dev_start(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ char match[MRVL_MATCH_LEN];
+ int ret = 0, i, def_init_size;
+
+ snprintf(match, sizeof(match), "ppio-%d:%d",
+ priv->pp_id, priv->ppio_id);
+ priv->ppio_params.match = match;
+
+ /*
+ * Calculate the minimum bpool size for refill feature as follows:
+ * 2 default burst sizes multiply by number of rx queues.
+ * If the bpool size will be below this value, new buffers will
+ * be added to the pool.
+ */
+ priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
+
+ /* In case initial bpool size configured in queues setup is
+ * smaller than minimum size add more buffers
+ */
+ def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
+ if (priv->bpool_init_size < def_init_size) {
+ int buffs_to_add = def_init_size - priv->bpool_init_size;
+
+ priv->bpool_init_size += buffs_to_add;
+ ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
+ if (ret)
+ MRVL_LOG(ERR, "Failed to add buffers to bpool");
+ }
+
+ /*
+ * Calculate the maximum bpool size for refill feature as follows:
+ * maximum number of descriptors in rx queue multiply by number
+ * of rx queues plus minimum bpool size.
+ * In case the bpool size will exceed this value, superfluous buffers
+ * will be removed
+ */
+ priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
+ priv->bpool_min_size;
+
+ ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to init ppio");
+ return ret;
+ }
+
+ /*
+ * In case there are some some stale uc/mc mac addresses flush them
+ * here. It cannot be done during mrvl_dev_close() as port information
+ * is already gone at that point (due to pp2_ppio_deinit() in
+ * mrvl_dev_stop()).
+ */
+ if (!priv->uc_mc_flushed) {
+ ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
+ if (ret) {
+ MRVL_LOG(ERR,
+ "Failed to flush uc/mc filter list");
+ goto out;
+ }
+ priv->uc_mc_flushed = 1;
+ }
+
+ if (!priv->vlan_flushed) {
+ ret = pp2_ppio_flush_vlan(priv->ppio);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to flush vlan list");
+ /*
+ * TODO
+ * once pp2_ppio_flush_vlan() is supported jump to out
+ * goto out;
+ */
+ }
+ priv->vlan_flushed = 1;
+ }
+
+ /* For default QoS config, don't start classifier. */
+ if (mrvl_qos_cfg) {
+ ret = mrvl_start_qos_mapping(priv);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to setup QoS mapping");
+ goto out;
+ }
+ }
+
+ ret = mrvl_dev_set_link_up(dev);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to set link up");
+ goto out;
+ }
+
+ /* start tx queues */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct mrvl_txq *txq = dev->data->tx_queues[i];
+
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ if (!txq->tx_deferred_start)
+ continue;
+
+ /*
+ * All txqs are started by default. Stop them
+ * so that tx_deferred_start works as expected.
+ */
+ ret = mrvl_tx_queue_stop(dev, i);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ MRVL_LOG(ERR, "Failed to start device");
+ pp2_ppio_deinit(priv->ppio);
+ return ret;
+}
+
+/**
+ * Flush receive queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_flush_rx_queues(struct rte_eth_dev *dev)
+{
+ int i;
+
+ MRVL_LOG(INFO, "Flushing rx queues");
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ int ret, num;
+
+ do {
+ struct mrvl_rxq *q = dev->data->rx_queues[i];
+ struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
+
+ num = MRVL_PP2_RXD_MAX;
+ ret = pp2_ppio_recv(q->priv->ppio,
+ q->priv->rxq_map[q->queue_id].tc,
+ q->priv->rxq_map[q->queue_id].inq,
+ descs, (uint16_t *)&num);
+ } while (ret == 0 && num);
+ }
+}
+
+/**
+ * Flush transmit shadow queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
+{
+ int i, j;
+ struct mrvl_txq *txq;
+
+ MRVL_LOG(INFO, "Flushing tx shadow queues");
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = (struct mrvl_txq *)dev->data->tx_queues[i];
+
+ for (j = 0; j < RTE_MAX_LCORE; j++) {
+ struct mrvl_shadow_txq *sq;
+
+ if (!hifs[j])
+ continue;
+
+ sq = &txq->shadow_txqs[j];
+ mrvl_free_sent_buffers(txq->priv->ppio,
+ hifs[j], j, sq, txq->queue_id, 1);
+ while (sq->tail != sq->head) {
+ uint64_t addr = cookie_addr_high |
+ sq->ent[sq->tail].buff.cookie;
+ rte_pktmbuf_free(
+ (struct rte_mbuf *)addr);
+ sq->tail = (sq->tail + 1) &
+ MRVL_PP2_TX_SHADOWQ_MASK;
+ }
+ memset(sq, 0, sizeof(*sq));
+ }
+ }
+}
+
+/**
+ * Flush hardware bpool (buffer-pool).
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_flush_bpool(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_hif *hif;
+ uint32_t num;
+ int ret;
+ unsigned int core_id = rte_lcore_id();
+
+ if (core_id == LCORE_ID_ANY)
+ core_id = 0;
+
+ hif = mrvl_get_hif(priv, core_id);
+
+ ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to get bpool buffers number");
+ return;
+ }
+
+ while (num--) {
+ struct pp2_buff_inf inf;
+ uint64_t addr;
+
+ ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
+ if (ret)
+ break;
+
+ addr = cookie_addr_high | inf.cookie;
+ rte_pktmbuf_free((struct rte_mbuf *)addr);
+ }
+}
+
+/**
+ * DPDK callback to stop the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_dev_stop(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ mrvl_dev_set_link_down(dev);
+ mrvl_flush_rx_queues(dev);
+ mrvl_flush_tx_shadow_queues(dev);
+ if (priv->cls_tbl) {
+ pp2_cls_tbl_deinit(priv->cls_tbl);
+ priv->cls_tbl = NULL;
+ }
+ if (priv->qos_tbl) {
+ pp2_cls_qos_tbl_deinit(priv->qos_tbl);
+ priv->qos_tbl = NULL;
+ }
+ if (priv->ppio)
+ pp2_ppio_deinit(priv->ppio);
+ priv->ppio = NULL;
+
+ /* policer must be released after ppio deinitialization */
+ if (priv->policer) {
+ pp2_cls_plcr_deinit(priv->policer);
+ priv->policer = NULL;
+ }
+}
+
+/**
+ * DPDK callback to close the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_dev_close(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ size_t i;
+
+ for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
+ struct pp2_ppio_tc_params *tc_params =
+ &priv->ppio_params.inqs_params.tcs_params[i];
+
+ if (tc_params->inqs_params) {
+ rte_free(tc_params->inqs_params);
+ tc_params->inqs_params = NULL;
+ }
+ }
+
+ mrvl_flush_bpool(dev);
+}
+
+/**
+ * DPDK callback to retrieve physical link information.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param wait_to_complete
+ * Wait for request completion (ignored).
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
+{
+ /*
+ * TODO
+ * once MUSDK provides necessary API use it here
+ */
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct ethtool_cmd edata;
+ struct ifreq req;
+ int ret, fd, link_up;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ edata.cmd = ETHTOOL_GSET;
+
+ strcpy(req.ifr_name, dev->data->name);
+ req.ifr_data = (void *)&edata;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd == -1)
+ return -EFAULT;
+
+ ret = ioctl(fd, SIOCETHTOOL, &req);
+ if (ret == -1) {
+ close(fd);
+ return -EFAULT;
+ }
+
+ close(fd);
+
+ switch (ethtool_cmd_speed(&edata)) {
+ case SPEED_10:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+ break;
+ case SPEED_100:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case SPEED_1000:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case SPEED_10000:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ default:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ }
+
+ dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
+ ETH_LINK_HALF_DUPLEX;
+ dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
+ ETH_LINK_FIXED;
+ pp2_ppio_get_link_state(priv->ppio, &link_up);
+ dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to enable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv->ppio)
+ return;
+
+ if (priv->isolated)
+ return;
+
+ ret = pp2_ppio_set_promisc(priv->ppio, 1);
+ if (ret)
+ MRVL_LOG(ERR, "Failed to enable promiscuous mode");
+}
+
+/**
+ * DPDK callback to enable allmulti mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv->ppio)
+ return;
+
+ if (priv->isolated)
+ return;
+
+ ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
+ if (ret)
+ MRVL_LOG(ERR, "Failed enable all-multicast mode");
+}
+
+/**
+ * DPDK callback to disable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv->ppio)
+ return;
+
+ ret = pp2_ppio_set_promisc(priv->ppio, 0);
+ if (ret)
+ MRVL_LOG(ERR, "Failed to disable promiscuous mode");
+}
+
+/**
+ * DPDK callback to disable allmulticast mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv->ppio)
+ return;
+
+ ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
+ if (ret)
+ MRVL_LOG(ERR, "Failed to disable all-multicast mode");
+}
+
+/**
+ * DPDK callback to remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param index
+ * MAC address index.
+ */
+static void
+mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ char buf[ETHER_ADDR_FMT_SIZE];
+ int ret;
+
+ if (!priv->ppio)
+ return;
+
+ if (priv->isolated)
+ return;
+
+ ret = pp2_ppio_remove_mac_addr(priv->ppio,
+ dev->data->mac_addrs[index].addr_bytes);
+ if (ret) {
+ ether_format_addr(buf, sizeof(buf),
+ &dev->data->mac_addrs[index]);
+ MRVL_LOG(ERR, "Failed to remove mac %s", buf);
+ }
+}
+
+/**
+ * DPDK callback to add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ * @param vmdq
+ * VMDq pool index to associate address with (unused).
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq __rte_unused)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ char buf[ETHER_ADDR_FMT_SIZE];
+ int ret;
+
+ if (priv->isolated)
+ return -ENOTSUP;
+
+ if (index == 0)
+ /* For setting index 0, mrvl_mac_addr_set() should be used.*/
+ return -1;
+
+ if (!priv->ppio)
+ return 0;
+
+ /*
+ * Maximum number of uc addresses can be tuned via kernel module mvpp2x
+ * parameter uc_filter_max. Maximum number of mc addresses is then
+ * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
+ * 21 respectively.
+ *
+ * If more than uc_filter_max uc addresses were added to filter list
+ * then NIC will switch to promiscuous mode automatically.
+ *
+ * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
+ * were added to filter list then NIC will switch to all-multicast mode
+ * automatically.
+ */
+ ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
+ if (ret) {
+ ether_format_addr(buf, sizeof(buf), mac_addr);
+ MRVL_LOG(ERR, "Failed to add mac %s", buf);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * DPDK callback to set the primary MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv->ppio)
+ return 0;
+
+ if (priv->isolated)
+ return -ENOTSUP;
+
+ ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
+ if (ret) {
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, sizeof(buf), mac_addr);
+ MRVL_LOG(ERR, "Failed to set mac to %s", buf);
+ }
+
+ return ret;
+}
+
+/**
+ * DPDK callback to get device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param stats
+ * Stats structure output buffer.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_ppio_statistics ppio_stats;
+ uint64_t drop_mac = 0;
+ unsigned int i, idx, ret;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct mrvl_rxq *rxq = dev->data->rx_queues[i];
+ struct pp2_ppio_inq_statistics rx_stats;
+
+ if (!rxq)
+ continue;
+
+ idx = rxq->queue_id;
+ if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
+ MRVL_LOG(ERR,
+ "rx queue %d stats out of range (0 - %d)",
+ idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
+ continue;
+ }
+
+ ret = pp2_ppio_inq_get_statistics(priv->ppio,
+ priv->rxq_map[idx].tc,
+ priv->rxq_map[idx].inq,
+ &rx_stats, 0);
+ if (unlikely(ret)) {
+ MRVL_LOG(ERR,
+ "Failed to update rx queue %d stats", idx);
+ break;
+ }
+
+ stats->q_ibytes[idx] = rxq->bytes_recv;
+ stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
+ stats->q_errors[idx] = rx_stats.drop_early +
+ rx_stats.drop_fullq +
+ rx_stats.drop_bm +
+ rxq->drop_mac;
+ stats->ibytes += rxq->bytes_recv;
+ drop_mac += rxq->drop_mac;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct mrvl_txq *txq = dev->data->tx_queues[i];
+ struct pp2_ppio_outq_statistics tx_stats;
+
+ if (!txq)
+ continue;
+
+ idx = txq->queue_id;
+ if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
+ MRVL_LOG(ERR,
+ "tx queue %d stats out of range (0 - %d)",
+ idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
+ }
+
+ ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
+ &tx_stats, 0);
+ if (unlikely(ret)) {
+ MRVL_LOG(ERR,
+ "Failed to update tx queue %d stats", idx);
+ break;
+ }
+
+ stats->q_opackets[idx] = tx_stats.deq_desc;
+ stats->q_obytes[idx] = txq->bytes_sent;
+ stats->obytes += txq->bytes_sent;
+ }
+
+ ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
+ if (unlikely(ret)) {
+ MRVL_LOG(ERR, "Failed to update port statistics");
+ return ret;
+ }
+
+ stats->ipackets += ppio_stats.rx_packets - drop_mac;
+ stats->opackets += ppio_stats.tx_packets;
+ stats->imissed += ppio_stats.rx_fullq_dropped +
+ ppio_stats.rx_bm_dropped +
+ ppio_stats.rx_early_dropped +
+ ppio_stats.rx_fifo_dropped +
+ ppio_stats.rx_cls_dropped;
+ stats->ierrors = drop_mac;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to clear device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_stats_reset(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int i;
+
+ if (!priv->ppio)
+ return;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct mrvl_rxq *rxq = dev->data->rx_queues[i];
+
+ pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
+ priv->rxq_map[i].inq, NULL, 1);
+ rxq->bytes_recv = 0;
+ rxq->drop_mac = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct mrvl_txq *txq = dev->data->tx_queues[i];
+
+ pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
+ txq->bytes_sent = 0;
+ }
+
+ pp2_ppio_get_statistics(priv->ppio, NULL, 1);
+}
+
+/**
+ * DPDK callback to get extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param stats
+ * Pointer to xstats table.
+ * @param n
+ * Number of entries in xstats table.
+ * @return
+ * Negative value on error, number of read xstats otherwise.
+ */
+static int
+mrvl_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *stats, unsigned int n)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_ppio_statistics ppio_stats;
+ unsigned int i;
+
+ if (!stats)
+ return 0;
+
+ pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
+ for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) {
+ uint64_t val;
+
+ if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
+ val = *(uint32_t *)((uint8_t *)&ppio_stats +
+ mrvl_xstats_tbl[i].offset);
+ else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t))
+ val = *(uint64_t *)((uint8_t *)&ppio_stats +
+ mrvl_xstats_tbl[i].offset);
+ else
+ return -EINVAL;
+
+ stats[i].id = i;
+ stats[i].value = val;
+ }
+
+ return n;
+}
+
+/**
+ * DPDK callback to reset extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_xstats_reset(struct rte_eth_dev *dev)
+{
+ mrvl_stats_reset(dev);
+}
+
+/**
+ * DPDK callback to get extended statistics names.
+ *
+ * @param dev (unused)
+ * Pointer to Ethernet device structure.
+ * @param xstats_names
+ * Pointer to xstats names table.
+ * @param size
+ * Size of the xstats names table.
+ * @return
+ * Number of read names.
+ */
+static int
+mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if (!xstats_names)
+ return RTE_DIM(mrvl_xstats_tbl);
+
+ for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++)
+ snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
+ mrvl_xstats_tbl[i].name);
+
+ return size;
+}
+
+/**
+ * DPDK callback to get information about the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure (unused).
+ * @param info
+ * Info structure output buffer.
+ */
+static void
+mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_dev_info *info)
+{
+ info->speed_capa = ETH_LINK_SPEED_10M |
+ ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G;
+
+ info->max_rx_queues = MRVL_PP2_RXQ_MAX;
+ info->max_tx_queues = MRVL_PP2_TXQ_MAX;
+ info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
+
+ info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
+ info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
+ info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
+
+ info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
+ info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
+ info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
+
+ info->rx_offload_capa = MRVL_RX_OFFLOADS;
+ info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
+
+ info->tx_offload_capa = MRVL_TX_OFFLOADS;
+ info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
+
+ info->flow_type_rss_offloads = ETH_RSS_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_NONFRAG_IPV4_UDP;
+
+ /* By default packets are dropped if no descriptors are available */
+ info->default_rxconf.rx_drop_en = 1;
+ info->default_rxconf.offloads = DEV_RX_OFFLOAD_CRC_STRIP;
+
+ info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
+}
+
+/**
+ * Return supported packet types.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure (unused).
+ *
+ * @return
+ * Const pointer to the table with supported packet types.
+ */
+static const uint32_t *
+mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP
+ };
+
+ return ptypes;
+}
+
+/**
+ * DPDK callback to get information about specific receive queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * Receive queue index.
+ * @param qinfo
+ * Receive queue information structure.
+ */
+static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int inq = priv->rxq_map[rx_queue_id].inq;
+ int tc = priv->rxq_map[rx_queue_id].tc;
+ struct pp2_ppio_tc_params *tc_params =
+ &priv->ppio_params.inqs_params.tcs_params[tc];
+
+ qinfo->mp = q->mp;
+ qinfo->nb_desc = tc_params->inqs_params[inq].size;
+}
+
+/**
+ * DPDK callback to get information about specific transmit queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param tx_queue_id
+ * Transmit queue index.
+ * @param qinfo
+ * Transmit queue information structure.
+ */
+static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id];
+
+ qinfo->nb_desc =
+ priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+/**
+ * DPDK callback to Configure a VLAN filter.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param vlan_id
+ * VLAN ID to filter.
+ * @param on
+ * Toggle filter.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ if (priv->isolated)
+ return -ENOTSUP;
+
+ return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
+ pp2_ppio_remove_vlan(priv->ppio, vlan_id);
+}
+
+/**
+ * Release buffers to hardware bpool (buffer-pool)
+ *
+ * @param rxq
+ * Receive queue pointer.
+ * @param num
+ * Number of buffers to release to bpool.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
+{
+ struct buff_release_entry entries[MRVL_PP2_RXD_MAX];
+ struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX];
+ int i, ret;
+ unsigned int core_id;
+ struct pp2_hif *hif;
+ struct pp2_bpool *bpool;
+
+ core_id = rte_lcore_id();
+ if (core_id == LCORE_ID_ANY)
+ core_id = 0;
+
+ hif = mrvl_get_hif(rxq->priv, core_id);
+ if (!hif)
+ return -1;
+
+ bpool = rxq->priv->bpool;
+
+ ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
+ if (ret)
+ return ret;
+
+ if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
+ cookie_addr_high =
+ (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
+
+ for (i = 0; i < num; i++) {
+ if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
+ != cookie_addr_high) {
+ MRVL_LOG(ERR,
+ "mbuf virtual addr high 0x%lx out of range",
+ (uint64_t)mbufs[i] >> 32);
+ goto out;
+ }
+
+ entries[i].buff.addr =
+ rte_mbuf_data_iova_default(mbufs[i]);
+ entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
+ entries[i].bpool = bpool;
+ }
+
+ pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
+ mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
+
+ if (i != num)
+ goto out;
+
+ return 0;
+out:
+ for (; i < num; i++)
+ rte_pktmbuf_free(mbufs[i]);
+
+ return -1;
+}
+
+/**
+ * DPDK callback to configure the receive queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param conf
+ * Thresholds parameters.
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_rxq *rxq;
+ uint32_t min_size,
+ max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ int ret, tc, inq;
+ uint64_t offloads;
+
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
+ /*
+ * Unknown TC mapping, mapping will not have a correct queue.
+ */
+ MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
+ idx, priv->ppio_id);
+ return -EFAULT;
+ }
+
+ min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
+ MRVL_PKT_EFFEC_OFFS;
+ if (min_size < max_rx_pkt_len) {
+ MRVL_LOG(ERR,
+ "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.",
+ max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
+ MRVL_PKT_EFFEC_OFFS,
+ max_rx_pkt_len);
+ return -EINVAL;
+ }
+
+ if (dev->data->rx_queues[idx]) {
+ rte_free(dev->data->rx_queues[idx]);
+ dev->data->rx_queues[idx] = NULL;
+ }
+
+ rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
+ if (!rxq)
+ return -ENOMEM;
+
+ rxq->priv = priv;
+ rxq->mp = mp;
+ rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+ rxq->queue_id = idx;
+ rxq->port_id = dev->data->port_id;
+ mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
+
+ tc = priv->rxq_map[rxq->queue_id].tc,
+ inq = priv->rxq_map[rxq->queue_id].inq;
+ priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
+ desc;
+
+ ret = mrvl_fill_bpool(rxq, desc);
+ if (ret) {
+ rte_free(rxq);
+ return ret;
+ }
+
+ priv->bpool_init_size += desc;
+
+ dev->data->rx_queues[idx] = rxq;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to release the receive queue.
+ *
+ * @param rxq
+ * Generic receive queue pointer.
+ */
+static void
+mrvl_rx_queue_release(void *rxq)
+{
+ struct mrvl_rxq *q = rxq;
+ struct pp2_ppio_tc_params *tc_params;
+ int i, num, tc, inq;
+ struct pp2_hif *hif;
+ unsigned int core_id = rte_lcore_id();
+
+ if (core_id == LCORE_ID_ANY)
+ core_id = 0;
+
+ if (!q)
+ return;
+
+ hif = mrvl_get_hif(q->priv, core_id);
+
+ if (!hif)
+ return;
+
+ tc = q->priv->rxq_map[q->queue_id].tc;
+ inq = q->priv->rxq_map[q->queue_id].inq;
+ tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
+ num = tc_params->inqs_params[inq].size;
+ for (i = 0; i < num; i++) {
+ struct pp2_buff_inf inf;
+ uint64_t addr;
+
+ pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
+ addr = cookie_addr_high | inf.cookie;
+ rte_pktmbuf_free((struct rte_mbuf *)addr);
+ }
+
+ rte_free(q);
+}
+
+/**
+ * DPDK callback to configure the transmit queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Transmit queue index.
+ * @param desc
+ * Number of descriptors to configure in the queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param conf
+ * Tx queue configuration parameters.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_txconf *conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_txq *txq;
+
+ if (dev->data->tx_queues[idx]) {
+ rte_free(dev->data->tx_queues[idx]);
+ dev->data->tx_queues[idx] = NULL;
+ }
+
+ txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
+ if (!txq)
+ return -ENOMEM;
+
+ txq->priv = priv;
+ txq->queue_id = idx;
+ txq->port_id = dev->data->port_id;
+ txq->tx_deferred_start = conf->tx_deferred_start;
+ dev->data->tx_queues[idx] = txq;
+
+ priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to release the transmit queue.
+ *
+ * @param txq
+ * Generic transmit queue pointer.
+ */
+static void
+mrvl_tx_queue_release(void *txq)
+{
+ struct mrvl_txq *q = txq;
+
+ if (!q)
+ return;
+
+ rte_free(q);
+}
+
+/**
+ * DPDK callback to get flow control configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param fc_conf
+ * Pointer to the flow control configuration.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret, en;
+
+ if (!priv)
+ return -EPERM;
+
+ ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to read rx pause state");
+ return ret;
+ }
+
+ fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to set flow control configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param fc_conf
+ * Pointer to the flow control configuration.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ if (!priv)
+ return -EPERM;
+
+ if (fc_conf->high_water ||
+ fc_conf->low_water ||
+ fc_conf->pause_time ||
+ fc_conf->mac_ctrl_frame_fwd ||
+ fc_conf->autoneg) {
+ MRVL_LOG(ERR, "Flowctrl parameter is not supported");
+
+ return -EINVAL;
+ }
+
+ if (fc_conf->mode == RTE_FC_NONE ||
+ fc_conf->mode == RTE_FC_RX_PAUSE) {
+ int ret, en;
+
+ en = fc_conf->mode == RTE_FC_NONE ? 0 : 1;
+ ret = pp2_ppio_set_rx_pause(priv->ppio, en);
+ if (ret)
+ MRVL_LOG(ERR,
+ "Failed to change flowctrl on RX side");
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Update RSS hash configuration
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rss_conf
+ * Pointer to RSS configuration.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ if (priv->isolated)
+ return -ENOTSUP;
+
+ return mrvl_configure_rss(priv, rss_conf);
+}
+
+/**
+ * DPDK callback to get RSS hash configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @rss_conf
+ * Pointer to RSS configuration.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ enum pp2_ppio_hash_type hash_type =
+ priv->ppio_params.inqs_params.hash_type;
+
+ rss_conf->rss_key = NULL;
+
+ if (hash_type == PP2_PPIO_HASH_T_NONE)
+ rss_conf->rss_hf = 0;
+ else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
+ rss_conf->rss_hf = ETH_RSS_IPV4;
+ else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
+ rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
+ else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
+ rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to get rte_flow callbacks.
+ *
+ * @param dev
+ * Pointer to the device structure.
+ * @param filer_type
+ * Flow filter type.
+ * @param filter_op
+ * Flow filter operation.
+ * @param arg
+ * Pointer to pass the flow ops.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
+{
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mrvl_flow_ops;
+ return 0;
+ default:
+ MRVL_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ return -EINVAL;
+ }
+}
+
+static const struct eth_dev_ops mrvl_ops = {
+ .dev_configure = mrvl_dev_configure,
+ .dev_start = mrvl_dev_start,
+ .dev_stop = mrvl_dev_stop,
+ .dev_set_link_up = mrvl_dev_set_link_up,
+ .dev_set_link_down = mrvl_dev_set_link_down,
+ .dev_close = mrvl_dev_close,
+ .link_update = mrvl_link_update,
+ .promiscuous_enable = mrvl_promiscuous_enable,
+ .allmulticast_enable = mrvl_allmulticast_enable,
+ .promiscuous_disable = mrvl_promiscuous_disable,
+ .allmulticast_disable = mrvl_allmulticast_disable,
+ .mac_addr_remove = mrvl_mac_addr_remove,
+ .mac_addr_add = mrvl_mac_addr_add,
+ .mac_addr_set = mrvl_mac_addr_set,
+ .mtu_set = mrvl_mtu_set,
+ .stats_get = mrvl_stats_get,
+ .stats_reset = mrvl_stats_reset,
+ .xstats_get = mrvl_xstats_get,
+ .xstats_reset = mrvl_xstats_reset,
+ .xstats_get_names = mrvl_xstats_get_names,
+ .dev_infos_get = mrvl_dev_infos_get,
+ .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
+ .rxq_info_get = mrvl_rxq_info_get,
+ .txq_info_get = mrvl_txq_info_get,
+ .vlan_filter_set = mrvl_vlan_filter_set,
+ .tx_queue_start = mrvl_tx_queue_start,
+ .tx_queue_stop = mrvl_tx_queue_stop,
+ .rx_queue_setup = mrvl_rx_queue_setup,
+ .rx_queue_release = mrvl_rx_queue_release,
+ .tx_queue_setup = mrvl_tx_queue_setup,
+ .tx_queue_release = mrvl_tx_queue_release,
+ .flow_ctrl_get = mrvl_flow_ctrl_get,
+ .flow_ctrl_set = mrvl_flow_ctrl_set,
+ .rss_hash_update = mrvl_rss_hash_update,
+ .rss_hash_conf_get = mrvl_rss_hash_conf_get,
+ .filter_ctrl = mrvl_eth_filter_ctrl,
+};
+
+/**
+ * Return packet type information and l3/l4 offsets.
+ *
+ * @param desc
+ * Pointer to the received packet descriptor.
+ * @param l3_offset
+ * l3 packet offset.
+ * @param l4_offset
+ * l4 packet offset.
+ *
+ * @return
+ * Packet type information.
+ */
+static inline uint64_t
+mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
+ uint8_t *l3_offset, uint8_t *l4_offset)
+{
+ enum pp2_inq_l3_type l3_type;
+ enum pp2_inq_l4_type l4_type;
+ uint64_t packet_type;
+
+ pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
+ pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
+
+ packet_type = RTE_PTYPE_L2_ETHER;
+
+ switch (l3_type) {
+ case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
+ packet_type |= RTE_PTYPE_L3_IPV4;
+ break;
+ case PP2_INQ_L3_TYPE_IPV4_OK:
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT;
+ break;
+ case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ break;
+ case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
+ packet_type |= RTE_PTYPE_L3_IPV6;
+ break;
+ case PP2_INQ_L3_TYPE_IPV6_EXT:
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT;
+ break;
+ case PP2_INQ_L3_TYPE_ARP:
+ packet_type |= RTE_PTYPE_L2_ETHER_ARP;
+ /*
+ * In case of ARP l4_offset is set to wrong value.
+ * Set it to proper one so that later on mbuf->l3_len can be
+ * calculated subtracting l4_offset and l3_offset.
+ */
+ *l4_offset = *l3_offset + MRVL_ARP_LENGTH;
+ break;
+ default:
+ MRVL_LOG(DEBUG, "Failed to recognise l3 packet type");
+ break;
+ }
+
+ switch (l4_type) {
+ case PP2_INQ_L4_TYPE_TCP:
+ packet_type |= RTE_PTYPE_L4_TCP;
+ break;
+ case PP2_INQ_L4_TYPE_UDP:
+ packet_type |= RTE_PTYPE_L4_UDP;
+ break;
+ default:
+ MRVL_LOG(DEBUG, "Failed to recognise l4 packet type");
+ break;
+ }
+
+ return packet_type;
+}
+
+/**
+ * Get offload information from the received packet descriptor.
+ *
+ * @param desc
+ * Pointer to the received packet descriptor.
+ *
+ * @return
+ * Mbuf offload flags.
+ */
+static inline uint64_t
+mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc)
+{
+ uint64_t flags;
+ enum pp2_inq_desc_status status;
+
+ status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
+ if (unlikely(status != PP2_DESC_ERR_OK))
+ flags = PKT_RX_IP_CKSUM_BAD;
+ else
+ flags = PKT_RX_IP_CKSUM_GOOD;
+
+ status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
+ if (unlikely(status != PP2_DESC_ERR_OK))
+ flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ return flags;
+}
+
+/**
+ * DPDK callback for receive.
+ *
+ * @param rxq
+ * Generic pointer to the receive queue.
+ * @param rx_pkts
+ * Array to store received packets.
+ * @param nb_pkts
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received.
+ */
+static uint16_t
+mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct mrvl_rxq *q = rxq;
+ struct pp2_ppio_desc descs[nb_pkts];
+ struct pp2_bpool *bpool;
+ int i, ret, rx_done = 0;
+ int num;
+ struct pp2_hif *hif;
+ unsigned int core_id = rte_lcore_id();
+
+ hif = mrvl_get_hif(q->priv, core_id);
+
+ if (unlikely(!q->priv->ppio || !hif))
+ return 0;
+
+ bpool = q->priv->bpool;
+
+ ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
+ q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
+ if (unlikely(ret < 0)) {
+ MRVL_LOG(ERR, "Failed to receive packets");
+ return 0;
+ }
+ mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf;
+ uint8_t l3_offset, l4_offset;
+ enum pp2_inq_desc_status status;
+ uint64_t addr;
+
+ if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
+ struct pp2_ppio_desc *pref_desc;
+ u64 pref_addr;
+
+ pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
+ pref_addr = cookie_addr_high |
+ pp2_ppio_inq_desc_get_cookie(pref_desc);
+ rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
+ rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
+ }
+
+ addr = cookie_addr_high |
+ pp2_ppio_inq_desc_get_cookie(&descs[i]);
+ mbuf = (struct rte_mbuf *)addr;
+ rte_pktmbuf_reset(mbuf);
+
+ /* drop packet in case of mac, overrun or resource error */
+ status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
+ if (unlikely(status != PP2_DESC_ERR_OK)) {
+ struct pp2_buff_inf binf = {
+ .addr = rte_mbuf_data_iova_default(mbuf),
+ .cookie = (pp2_cookie_t)(uint64_t)mbuf,
+ };
+
+ pp2_bpool_put_buff(hif, bpool, &binf);
+ mrvl_port_bpool_size
+ [bpool->pp2_id][bpool->id][core_id]++;
+ q->drop_mac++;
+ continue;
+ }
+
+ mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
+ mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = q->port_id;
+ mbuf->packet_type =
+ mrvl_desc_to_packet_type_and_offset(&descs[i],
+ &l3_offset,
+ &l4_offset);
+ mbuf->l2_len = l3_offset;
+ mbuf->l3_len = l4_offset - l3_offset;
+
+ if (likely(q->cksum_enabled))
+ mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
+
+ rx_pkts[rx_done++] = mbuf;
+ q->bytes_recv += mbuf->pkt_len;
+ }
+
+ if (rte_spinlock_trylock(&q->priv->lock) == 1) {
+ num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
+
+ if (unlikely(num <= q->priv->bpool_min_size ||
+ (!rx_done && num < q->priv->bpool_init_size))) {
+ ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
+ if (ret)
+ MRVL_LOG(ERR, "Failed to fill bpool");
+ } else if (unlikely(num > q->priv->bpool_max_size)) {
+ int i;
+ int pkt_to_remove = num - q->priv->bpool_init_size;
+ struct rte_mbuf *mbuf;
+ struct pp2_buff_inf buff;
+
+ MRVL_LOG(DEBUG,
+ "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)",
+ bpool->pp2_id, q->priv->ppio->port_id,
+ bpool->id, pkt_to_remove, num,
+ q->priv->bpool_init_size);
+
+ for (i = 0; i < pkt_to_remove; i++) {
+ ret = pp2_bpool_get_buff(hif, bpool, &buff);
+ if (ret)
+ break;
+ mbuf = (struct rte_mbuf *)
+ (cookie_addr_high | buff.cookie);
+ rte_pktmbuf_free(mbuf);
+ }
+ mrvl_port_bpool_size
+ [bpool->pp2_id][bpool->id][core_id] -= i;
+ }
+ rte_spinlock_unlock(&q->priv->lock);
+ }
+
+ return rx_done;
+}
+
+/**
+ * Prepare offload information.
+ *
+ * @param ol_flags
+ * Offload flags.
+ * @param packet_type
+ * Packet type bitfield.
+ * @param l3_type
+ * Pointer to the pp2_ouq_l3_type structure.
+ * @param l4_type
+ * Pointer to the pp2_outq_l4_type structure.
+ * @param gen_l3_cksum
+ * Will be set to 1 in case l3 checksum is computed.
+ * @param l4_cksum
+ * Will be set to 1 in case l4 checksum is computed.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static inline int
+mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
+ enum pp2_outq_l3_type *l3_type,
+ enum pp2_outq_l4_type *l4_type,
+ int *gen_l3_cksum,
+ int *gen_l4_cksum)
+{
+ /*
+ * Based on ol_flags prepare information
+ * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
+ * for offloading.
+ */
+ if (ol_flags & PKT_TX_IPV4) {
+ *l3_type = PP2_OUTQ_L3_TYPE_IPV4;
+ *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
+ } else if (ol_flags & PKT_TX_IPV6) {
+ *l3_type = PP2_OUTQ_L3_TYPE_IPV6;
+ /* no checksum for ipv6 header */
+ *gen_l3_cksum = 0;
+ } else {
+ /* if something different then stop processing */
+ return -1;
+ }
+
+ ol_flags &= PKT_TX_L4_MASK;
+ if ((packet_type & RTE_PTYPE_L4_TCP) &&
+ ol_flags == PKT_TX_TCP_CKSUM) {
+ *l4_type = PP2_OUTQ_L4_TYPE_TCP;
+ *gen_l4_cksum = 1;
+ } else if ((packet_type & RTE_PTYPE_L4_UDP) &&
+ ol_flags == PKT_TX_UDP_CKSUM) {
+ *l4_type = PP2_OUTQ_L4_TYPE_UDP;
+ *gen_l4_cksum = 1;
+ } else {
+ *l4_type = PP2_OUTQ_L4_TYPE_OTHER;
+ /* no checksum for other type */
+ *gen_l4_cksum = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * Release already sent buffers to bpool (buffer-pool).
+ *
+ * @param ppio
+ * Pointer to the port structure.
+ * @param hif
+ * Pointer to the MUSDK hardware interface.
+ * @param sq
+ * Pointer to the shadow queue.
+ * @param qid
+ * Queue id number.
+ * @param force
+ * Force releasing packets.
+ */
+static inline void
+mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
+ unsigned int core_id, struct mrvl_shadow_txq *sq,
+ int qid, int force)
+{
+ struct buff_release_entry *entry;
+ uint16_t nb_done = 0, num = 0, skip_bufs = 0;
+ int i;
+
+ pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
+
+ sq->num_to_release += nb_done;
+
+ if (likely(!force &&
+ sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
+ return;
+
+ nb_done = sq->num_to_release;
+ sq->num_to_release = 0;
+
+ for (i = 0; i < nb_done; i++) {
+ entry = &sq->ent[sq->tail + num];
+ if (unlikely(!entry->buff.addr)) {
+ MRVL_LOG(ERR,
+ "Shadow memory @%d: cookie(%lx), pa(%lx)!",
+ sq->tail, (u64)entry->buff.cookie,
+ (u64)entry->buff.addr);
+ skip_bufs = 1;
+ goto skip;
+ }
+
+ if (unlikely(!entry->bpool)) {
+ struct rte_mbuf *mbuf;
+
+ mbuf = (struct rte_mbuf *)
+ (cookie_addr_high | entry->buff.cookie);
+ rte_pktmbuf_free(mbuf);
+ skip_bufs = 1;
+ goto skip;
+ }
+
+ mrvl_port_bpool_size
+ [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
+ num++;
+ if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
+ goto skip;
+ continue;
+skip:
+ if (likely(num))
+ pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
+ num += skip_bufs;
+ sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
+ sq->size -= num;
+ num = 0;
+ skip_bufs = 0;
+ }
+
+ if (likely(num)) {
+ pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
+ sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
+ sq->size -= num;
+ }
+}
+
+/**
+ * DPDK callback for transmit.
+ *
+ * @param txq
+ * Generic pointer transmit queue.
+ * @param tx_pkts
+ * Packets to transmit.
+ * @param nb_pkts
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted.
+ */
+static uint16_t
+mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct mrvl_txq *q = txq;
+ struct mrvl_shadow_txq *sq;
+ struct pp2_hif *hif;
+ struct pp2_ppio_desc descs[nb_pkts];
+ unsigned int core_id = rte_lcore_id();
+ int i, ret, bytes_sent = 0;
+ uint16_t num, sq_free_size;
+ uint64_t addr;
+
+ hif = mrvl_get_hif(q->priv, core_id);
+ sq = &q->shadow_txqs[core_id];
+
+ if (unlikely(!q->priv->ppio || !hif))
+ return 0;
+
+ if (sq->size)
+ mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
+ sq, q->queue_id, 0);
+
+ sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
+ if (unlikely(nb_pkts > sq_free_size)) {
+ MRVL_LOG(DEBUG,
+ "No room in shadow queue for %d packets! %d packets will be sent.",
+ nb_pkts, sq_free_size);
+ nb_pkts = sq_free_size;
+ }
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf = tx_pkts[i];
+ int gen_l3_cksum, gen_l4_cksum;
+ enum pp2_outq_l3_type l3_type;
+ enum pp2_outq_l4_type l4_type;
+
+ if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
+ struct rte_mbuf *pref_pkt_hdr;
+
+ pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
+ rte_mbuf_prefetch_part1(pref_pkt_hdr);
+ rte_mbuf_prefetch_part2(pref_pkt_hdr);
+ }
+
+ sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
+ sq->ent[sq->head].buff.addr =
+ rte_mbuf_data_iova_default(mbuf);
+ sq->ent[sq->head].bpool =
+ (unlikely(mbuf->port >= RTE_MAX_ETHPORTS ||
+ mbuf->refcnt > 1)) ? NULL :
+ mrvl_port_to_bpool_lookup[mbuf->port];
+ sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
+ sq->size++;
+
+ pp2_ppio_outq_desc_reset(&descs[i]);
+ pp2_ppio_outq_desc_set_phys_addr(&descs[i],
+ rte_pktmbuf_iova(mbuf));
+ pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
+ pp2_ppio_outq_desc_set_pkt_len(&descs[i],
+ rte_pktmbuf_pkt_len(mbuf));
+
+ bytes_sent += rte_pktmbuf_pkt_len(mbuf);
+ /*
+ * in case unsupported ol_flags were passed
+ * do not update descriptor offload information
+ */
+ ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type,
+ &l3_type, &l4_type, &gen_l3_cksum,
+ &gen_l4_cksum);
+ if (unlikely(ret))
+ continue;
+
+ pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
+ mbuf->l2_len,
+ mbuf->l2_len + mbuf->l3_len,
+ gen_l3_cksum, gen_l4_cksum);
+ }
+
+ num = nb_pkts;
+ pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
+ /* number of packets that were not sent */
+ if (unlikely(num > nb_pkts)) {
+ for (i = nb_pkts; i < num; i++) {
+ sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
+ MRVL_PP2_TX_SHADOWQ_MASK;
+ addr = cookie_addr_high | sq->ent[sq->head].buff.cookie;
+ bytes_sent -=
+ rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
+ }
+ sq->size -= num - nb_pkts;
+ }
+
+ q->bytes_sent += bytes_sent;
+
+ return nb_pkts;
+}
+
+/**
+ * Initialize packet processor.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_init_pp2(void)
+{
+ struct pp2_init_params init_params;
+
+ memset(&init_params, 0, sizeof(init_params));
+ init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
+ init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
+ init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
+
+ return pp2_init(&init_params);
+}
+
+/**
+ * Deinitialize packet processor.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static void
+mrvl_deinit_pp2(void)
+{
+ pp2_deinit();
+}
+
+/**
+ * Create private device structure.
+ *
+ * @param dev_name
+ * Pointer to the port name passed in the initialization parameters.
+ *
+ * @return
+ * Pointer to the newly allocated private device structure.
+ */
+static struct mrvl_priv *
+mrvl_priv_create(const char *dev_name)
+{
+ struct pp2_bpool_params bpool_params;
+ char match[MRVL_MATCH_LEN];
+ struct mrvl_priv *priv;
+ int ret, bpool_bit;
+
+ priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
+ if (!priv)
+ return NULL;
+
+ ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
+ &priv->pp_id, &priv->ppio_id);
+ if (ret)
+ goto out_free_priv;
+
+ bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
+ PP2_BPOOL_NUM_POOLS);
+ if (bpool_bit < 0)
+ goto out_free_priv;
+ priv->bpool_bit = bpool_bit;
+
+ snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
+ priv->bpool_bit);
+ memset(&bpool_params, 0, sizeof(bpool_params));
+ bpool_params.match = match;
+ bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
+ ret = pp2_bpool_init(&bpool_params, &priv->bpool);
+ if (ret)
+ goto out_clear_bpool_bit;
+
+ priv->ppio_params.type = PP2_PPIO_T_NIC;
+ rte_spinlock_init(&priv->lock);
+
+ return priv;
+out_clear_bpool_bit:
+ used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
+out_free_priv:
+ rte_free(priv);
+ return NULL;
+}
+
+/**
+ * Create device representing Ethernet port.
+ *
+ * @param name
+ * Pointer to the port's name.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
+{
+ int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
+ struct rte_eth_dev *eth_dev;
+ struct mrvl_priv *priv;
+ struct ifreq req;
+
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
+ return -ENOMEM;
+
+ priv = mrvl_priv_create(name);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_dev;
+ }
+
+ eth_dev->data->mac_addrs =
+ rte_zmalloc("mac_addrs",
+ ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
+ if (!eth_dev->data->mac_addrs) {
+ MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
+ ret = -ENOMEM;
+ goto out_free_priv;
+ }
+
+ memset(&req, 0, sizeof(req));
+ strcpy(req.ifr_name, name);
+ ret = ioctl(fd, SIOCGIFHWADDR, &req);
+ if (ret)
+ goto out_free_mac;
+
+ memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
+ req.ifr_addr.sa_data, ETHER_ADDR_LEN);
+
+ eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
+ eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
+ eth_dev->data->dev_private = priv;
+ eth_dev->device = &vdev->device;
+ eth_dev->dev_ops = &mrvl_ops;
+
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+out_free_mac:
+ rte_free(eth_dev->data->mac_addrs);
+out_free_dev:
+ rte_eth_dev_release_port(eth_dev);
+out_free_priv:
+ rte_free(priv);
+
+ return ret;
+}
+
+/**
+ * Cleanup previously created device representing Ethernet port.
+ *
+ * @param name
+ * Pointer to the port name.
+ */
+static void
+mrvl_eth_dev_destroy(const char *name)
+{
+ struct rte_eth_dev *eth_dev;
+ struct mrvl_priv *priv;
+
+ eth_dev = rte_eth_dev_allocated(name);
+ if (!eth_dev)
+ return;
+
+ priv = eth_dev->data->dev_private;
+ pp2_bpool_deinit(priv->bpool);
+ used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
+ rte_free(priv);
+ rte_free(eth_dev->data->mac_addrs);
+ rte_eth_dev_release_port(eth_dev);
+}
+
+/**
+ * Callback used by rte_kvargs_process() during argument parsing.
+ *
+ * @param key
+ * Pointer to the parsed key (unused).
+ * @param value
+ * Pointer to the parsed value.
+ * @param extra_args
+ * Pointer to the extra arguments which contains address of the
+ * table of pointers to parsed interface names.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+mrvl_get_ifnames(const char *key __rte_unused, const char *value,
+ void *extra_args)
+{
+ struct mrvl_ifnames *ifnames = extra_args;
+
+ ifnames->names[ifnames->idx++] = value;
+
+ return 0;
+}
+
+/**
+ * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
+ */
+static void
+mrvl_deinit_hifs(void)
+{
+ int i;
+
+ for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
+ if (hifs[i])
+ pp2_hif_deinit(hifs[i]);
+ }
+ used_hifs = MRVL_MUSDK_HIFS_RESERVED;
+ memset(hifs, 0, sizeof(hifs));
+}
+
+/**
+ * DPDK callback to register the virtual device.
+ *
+ * @param vdev
+ * Pointer to the virtual device.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_kvargs *kvlist;
+ struct mrvl_ifnames ifnames;
+ int ret = -EINVAL;
+ uint32_t i, ifnum, cfgnum;
+ const char *params;
+
+ params = rte_vdev_device_args(vdev);
+ if (!params)
+ return -EINVAL;
+
+ kvlist = rte_kvargs_parse(params, valid_args);
+ if (!kvlist)
+ return -EINVAL;
+
+ ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
+ if (ifnum > RTE_DIM(ifnames.names))
+ goto out_free_kvlist;
+
+ ifnames.idx = 0;
+ rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
+ mrvl_get_ifnames, &ifnames);
+
+
+ /*
+ * The below system initialization should be done only once,
+ * on the first provided configuration file
+ */
+ if (!mrvl_qos_cfg) {
+ cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
+ MRVL_LOG(INFO, "Parsing config file!");
+ if (cfgnum > 1) {
+ MRVL_LOG(ERR, "Cannot handle more than one config file!");
+ goto out_free_kvlist;
+ } else if (cfgnum == 1) {
+ rte_kvargs_process(kvlist, MRVL_CFG_ARG,
+ mrvl_get_qoscfg, &mrvl_qos_cfg);
+ }
+ }
+
+ if (mrvl_dev_num)
+ goto init_devices;
+
+ MRVL_LOG(INFO, "Perform MUSDK initializations");
+ /*
+ * ret == -EEXIST is correct, it means DMA
+ * has been already initialized (by another PMD).
+ */
+ ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
+ if (ret < 0) {
+ if (ret != -EEXIST)
+ goto out_free_kvlist;
+ else
+ MRVL_LOG(INFO,
+ "DMA memory has been already initialized by a different driver.");
+ }
+
+ ret = mrvl_init_pp2();
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to init PP!");
+ goto out_deinit_dma;
+ }
+
+ memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
+ memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup));
+
+ mrvl_lcore_first = RTE_MAX_LCORE;
+ mrvl_lcore_last = 0;
+
+init_devices:
+ for (i = 0; i < ifnum; i++) {
+ MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
+ ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
+ if (ret)
+ goto out_cleanup;
+ }
+ mrvl_dev_num += ifnum;
+
+ rte_kvargs_free(kvlist);
+
+ return 0;
+out_cleanup:
+ for (; i > 0; i--)
+ mrvl_eth_dev_destroy(ifnames.names[i]);
+
+ if (mrvl_dev_num == 0)
+ mrvl_deinit_pp2();
+out_deinit_dma:
+ if (mrvl_dev_num == 0)
+ mv_sys_dma_mem_destroy();
+out_free_kvlist:
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+/**
+ * DPDK callback to remove virtual device.
+ *
+ * @param vdev
+ * Pointer to the removed virtual device.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
+{
+ int i;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (!name)
+ return -EINVAL;
+
+ MRVL_LOG(INFO, "Removing %s", name);
+
+ RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */
+ char ifname[RTE_ETH_NAME_MAX_LEN];
+
+ rte_eth_dev_get_name_by_port(i, ifname);
+ mrvl_eth_dev_destroy(ifname);
+ mrvl_dev_num--;
+ }
+
+ if (mrvl_dev_num == 0) {
+ MRVL_LOG(INFO, "Perform MUSDK deinit");
+ mrvl_deinit_hifs();
+ mrvl_deinit_pp2();
+ mv_sys_dma_mem_destroy();
+ }
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_mrvl_drv = {
+ .probe = rte_pmd_mrvl_probe,
+ .remove = rte_pmd_mrvl_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
+RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
+
+RTE_INIT(mrvl_init_log)
+{
+ mrvl_logtype = rte_log_register("pmd.net.mvpp2");
+ if (mrvl_logtype >= 0)
+ rte_log_set_level(mrvl_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.h b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.h
new file mode 100644
index 00000000..3726f788
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _MRVL_ETHDEV_H_
+#define _MRVL_ETHDEV_H_
+
+#include <rte_spinlock.h>
+#include <rte_flow_driver.h>
+
+#include <env/mv_autogen_comp_flags.h>
+#include <drivers/mv_pp2.h>
+#include <drivers/mv_pp2_bpool.h>
+#include <drivers/mv_pp2_cls.h>
+#include <drivers/mv_pp2_hif.h>
+#include <drivers/mv_pp2_ppio.h>
+
+/** Maximum number of rx queues per port */
+#define MRVL_PP2_RXQ_MAX 32
+
+/** Maximum number of tx queues per port */
+#define MRVL_PP2_TXQ_MAX 8
+
+/** Minimum number of descriptors in tx queue */
+#define MRVL_PP2_TXD_MIN 16
+
+/** Maximum number of descriptors in tx queue */
+#define MRVL_PP2_TXD_MAX 2048
+
+/** Tx queue descriptors alignment */
+#define MRVL_PP2_TXD_ALIGN 16
+
+/** Minimum number of descriptors in rx queue */
+#define MRVL_PP2_RXD_MIN 16
+
+/** Maximum number of descriptors in rx queue */
+#define MRVL_PP2_RXD_MAX 2048
+
+/** Rx queue descriptors alignment */
+#define MRVL_PP2_RXD_ALIGN 16
+
+/** Maximum number of descriptors in tx aggregated queue */
+#define MRVL_PP2_AGGR_TXQD_MAX 2048
+
+/** Maximum number of Traffic Classes. */
+#define MRVL_PP2_TC_MAX 8
+
+/** Packet offset inside RX buffer. */
+#define MRVL_PKT_OFFS 64
+
+/** Maximum number of descriptors in shadow queue. Must be power of 2 */
+#define MRVL_PP2_TX_SHADOWQ_SIZE MRVL_PP2_TXD_MAX
+
+/** Shadow queue size mask (since shadow queue size is power of 2) */
+#define MRVL_PP2_TX_SHADOWQ_MASK (MRVL_PP2_TX_SHADOWQ_SIZE - 1)
+
+/** Minimum number of sent buffers to release from shadow queue to BM */
+#define MRVL_PP2_BUF_RELEASE_BURST_SIZE 64
+
+struct mrvl_priv {
+ /* Hot fields, used in fast path. */
+ struct pp2_bpool *bpool; /**< BPool pointer */
+ struct pp2_ppio *ppio; /**< Port handler pointer */
+ rte_spinlock_t lock; /**< Spinlock for checking bpool status */
+ uint16_t bpool_max_size; /**< BPool maximum size */
+ uint16_t bpool_min_size; /**< BPool minimum size */
+ uint16_t bpool_init_size; /**< Configured BPool size */
+
+ /** Mapping for DPDK rx queue->(TC, MRVL relative inq) */
+ struct {
+ uint8_t tc; /**< Traffic Class */
+ uint8_t inq; /**< Relative in-queue number */
+ } rxq_map[MRVL_PP2_RXQ_MAX] __rte_cache_aligned;
+
+ /* Configuration data, used sporadically. */
+ uint8_t pp_id;
+ uint8_t ppio_id;
+ uint8_t bpool_bit;
+ uint8_t rss_hf_tcp;
+ uint8_t uc_mc_flushed;
+ uint8_t vlan_flushed;
+ uint8_t isolated;
+
+ struct pp2_ppio_params ppio_params;
+ struct pp2_cls_qos_tbl_params qos_tbl_params;
+ struct pp2_cls_tbl *qos_tbl;
+ uint16_t nb_rx_queues;
+
+ struct pp2_cls_tbl_params cls_tbl_params;
+ struct pp2_cls_tbl *cls_tbl;
+ uint32_t cls_tbl_pattern;
+ LIST_HEAD(mrvl_flows, rte_flow) flows;
+
+ struct pp2_cls_plcr *policer;
+};
+
+/** Flow operations forward declaration. */
+extern const struct rte_flow_ops mrvl_flow_ops;
+
+/** Current log type. */
+extern int mrvl_logtype;
+
+#define MRVL_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, mrvl_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#endif /* _MRVL_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.c b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.c
new file mode 100644
index 00000000..ecc34192
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.c
@@ -0,0 +1,2779 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+#include <arpa/inet.h>
+
+#ifdef container_of
+#undef container_of
+#endif
+
+#include "mrvl_ethdev.h"
+#include "mrvl_qos.h"
+#include "env/mv_common.h" /* for BIT() */
+
+/** Number of rules in the classifier table. */
+#define MRVL_CLS_MAX_NUM_RULES 20
+
+/** Size of the classifier key and mask strings. */
+#define MRVL_CLS_STR_SIZE_MAX 40
+
+/** Parsed fields in processed rte_flow_item. */
+enum mrvl_parsed_fields {
+ /* eth flags */
+ F_DMAC = BIT(0),
+ F_SMAC = BIT(1),
+ F_TYPE = BIT(2),
+ /* vlan flags */
+ F_VLAN_ID = BIT(3),
+ F_VLAN_PRI = BIT(4),
+ F_VLAN_TCI = BIT(5), /* not supported by MUSDK yet */
+ /* ip4 flags */
+ F_IP4_TOS = BIT(6),
+ F_IP4_SIP = BIT(7),
+ F_IP4_DIP = BIT(8),
+ F_IP4_PROTO = BIT(9),
+ /* ip6 flags */
+ F_IP6_TC = BIT(10), /* not supported by MUSDK yet */
+ F_IP6_SIP = BIT(11),
+ F_IP6_DIP = BIT(12),
+ F_IP6_FLOW = BIT(13),
+ F_IP6_NEXT_HDR = BIT(14),
+ /* tcp flags */
+ F_TCP_SPORT = BIT(15),
+ F_TCP_DPORT = BIT(16),
+ /* udp flags */
+ F_UDP_SPORT = BIT(17),
+ F_UDP_DPORT = BIT(18),
+};
+
+/** PMD-specific definition of a flow rule handle. */
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next;
+
+ enum mrvl_parsed_fields pattern;
+
+ struct pp2_cls_tbl_rule rule;
+ struct pp2_cls_cos_desc cos;
+ struct pp2_cls_tbl_action action;
+};
+
+static const enum rte_flow_item_type pattern_eth[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_vlan[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_vlan_ip[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_vlan_ip6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip_udp[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip6[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_udp[] = {
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+#define MRVL_VLAN_ID_MASK 0x0fff
+#define MRVL_VLAN_PRI_MASK 0x7000
+#define MRVL_IPV4_DSCP_MASK 0xfc
+#define MRVL_IPV4_ADDR_MASK 0xffffffff
+#define MRVL_IPV6_FLOW_MASK 0x0fffff
+
+/**
+ * Given a flow item, return the next non-void one.
+ *
+ * @param items Pointer to the item in the table.
+ * @returns Next not-void item, NULL otherwise.
+ */
+static const struct rte_flow_item *
+mrvl_next_item(const struct rte_flow_item *items)
+{
+ const struct rte_flow_item *item = items;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return item;
+ }
+
+ return NULL;
+}
+
+/**
+ * Allocate memory for classifier rule key and mask fields.
+ *
+ * @param field Pointer to the classifier rule.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
+{
+ unsigned int id = rte_socket_id();
+
+ field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
+ if (!field->key)
+ goto out;
+
+ field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
+ if (!field->mask)
+ goto out_mask;
+
+ return 0;
+out_mask:
+ rte_free(field->key);
+out:
+ field->key = NULL;
+ field->mask = NULL;
+ return -1;
+}
+
+/**
+ * Free memory allocated for classifier rule key and mask fields.
+ *
+ * @param field Pointer to the classifier rule.
+ */
+static void
+mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
+{
+ rte_free(field->key);
+ rte_free(field->mask);
+ field->key = NULL;
+ field->mask = NULL;
+}
+
+/**
+ * Free memory allocated for all classifier rule key and mask fields.
+ *
+ * @param rule Pointer to the classifier table rule.
+ */
+static void
+mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
+{
+ int i;
+
+ for (i = 0; i < rule->num_fields; i++)
+ mrvl_free_key_mask(&rule->fields[i]);
+ rule->num_fields = 0;
+}
+
+/*
+ * Initialize rte flow item parsing.
+ *
+ * @param item Pointer to the flow item.
+ * @param spec_ptr Pointer to the specific item pointer.
+ * @param mask_ptr Pointer to the specific item's mask pointer.
+ * @def_mask Pointer to the default mask.
+ * @size Size of the flow item.
+ * @error Pointer to the rte flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_init(const struct rte_flow_item *item,
+ const void **spec_ptr,
+ const void **mask_ptr,
+ const void *def_mask,
+ unsigned int size,
+ struct rte_flow_error *error)
+{
+ const uint8_t *spec;
+ const uint8_t *mask;
+ const uint8_t *last;
+ uint8_t zeros[size];
+
+ memset(zeros, 0, size);
+
+ if (item == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "NULL item\n");
+ return -rte_errno;
+ }
+
+ if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Mask or last is set without spec\n");
+ return -rte_errno;
+ }
+
+ /*
+ * If "mask" is not set, default mask is used,
+ * but if default mask is NULL, "mask" should be set.
+ */
+ if (item->mask == NULL) {
+ if (def_mask == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Mask should be specified\n");
+ return -rte_errno;
+ }
+
+ mask = (const uint8_t *)def_mask;
+ } else {
+ mask = (const uint8_t *)item->mask;
+ }
+
+ spec = (const uint8_t *)item->spec;
+ last = (const uint8_t *)item->last;
+
+ if (spec == NULL) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Spec should be specified\n");
+ return -rte_errno;
+ }
+
+ /*
+ * If field values in "last" are either 0 or equal to the corresponding
+ * values in "spec" then they are ignored.
+ */
+ if (last != NULL &&
+ !memcmp(last, zeros, size) &&
+ memcmp(last, spec, size) != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Ranging is not supported\n");
+ return -rte_errno;
+ }
+
+ *spec_ptr = spec;
+ *mask_ptr = mask;
+
+ return 0;
+}
+
+/**
+ * Parse the eth flow item.
+ *
+ * This will create classifier rule that matches either destination or source
+ * mac.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param mask Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_mac(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ const uint8_t *k, *m;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ if (parse_dst) {
+ k = spec->dst.addr_bytes;
+ m = mask->dst.addr_bytes;
+
+ flow->pattern |= F_DMAC;
+ } else {
+ k = spec->src.addr_bytes;
+ m = mask->src.addr_bytes;
+
+ flow->pattern |= F_SMAC;
+ }
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 6;
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ k[0], k[1], k[2], k[3], k[4], k[5]);
+
+ snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ m[0], m[1], m[2], m[3], m[4], m[5]);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing the eth flow item destination mac address.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_mac(spec, mask, 1, flow);
+}
+
+/**
+ * Helper for parsing the eth flow item source mac address.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_smac(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_mac(spec, mask, 0, flow);
+}
+
+/**
+ * Parse the ether type field of the eth flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_type(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 2;
+
+ k = rte_be_to_cpu_16(spec->type);
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_TYPE;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse the vid field of the vlan rte flow item.
+ *
+ * This will create classifier rule that matches vid.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
+ const struct rte_flow_item_vlan *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 2;
+
+ k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_VLAN_ID;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse the pri field of the vlan rte flow item.
+ *
+ * This will create classifier rule that matches pri.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
+ const struct rte_flow_item_vlan *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 1;
+
+ k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_VLAN_PRI;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse the dscp field of the ipv4 rte flow item.
+ *
+ * This will create classifier rule that matches dscp field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint8_t k, m;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 1;
+
+ k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
+ m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+ snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
+
+ flow->pattern |= F_IP4_TOS;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse either source or destination ip addresses of the ipv4 flow item.
+ *
+ * This will create classifier rule that matches either destination
+ * or source ip field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ struct in_addr k;
+ uint32_t m;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ memset(&k, 0, sizeof(k));
+ if (parse_dst) {
+ k.s_addr = spec->hdr.dst_addr;
+ m = rte_be_to_cpu_32(mask->hdr.dst_addr);
+
+ flow->pattern |= F_IP4_DIP;
+ } else {
+ k.s_addr = spec->hdr.src_addr;
+ m = rte_be_to_cpu_32(mask->hdr.src_addr);
+
+ flow->pattern |= F_IP4_SIP;
+ }
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 4;
+
+ inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
+ snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing destination ip of the ipv4 flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_ip4_addr(spec, mask, 1, flow);
+}
+
+/**
+ * Helper for parsing source ip of the ipv4 flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_ip4_addr(spec, mask, 0, flow);
+}
+
+/**
+ * Parse the proto field of the ipv4 rte flow item.
+ *
+ * This will create classifier rule that matches proto field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint8_t k = spec->hdr.next_proto_id;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 1;
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_IP4_PROTO;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse either source or destination ip addresses of the ipv6 rte flow item.
+ *
+ * This will create classifier rule that matches either destination
+ * or source ip field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ int size = sizeof(spec->hdr.dst_addr);
+ struct in6_addr k, m;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ memset(&k, 0, sizeof(k));
+ if (parse_dst) {
+ memcpy(k.s6_addr, spec->hdr.dst_addr, size);
+ memcpy(m.s6_addr, mask->hdr.dst_addr, size);
+
+ flow->pattern |= F_IP6_DIP;
+ } else {
+ memcpy(k.s6_addr, spec->hdr.src_addr, size);
+ memcpy(m.s6_addr, mask->hdr.src_addr, size);
+
+ flow->pattern |= F_IP6_SIP;
+ }
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 16;
+
+ inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
+ inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing destination ip of the ipv6 flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_ip6_addr(spec, mask, 1, flow);
+}
+
+/**
+ * Helper for parsing source ip of the ipv6 flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_ip6_addr(spec, mask, 0, flow);
+}
+
+/**
+ * Parse the flow label of the ipv6 flow item.
+ *
+ * This will create classifier rule that matches flow field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
+ m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 3;
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+ snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
+
+ flow->pattern |= F_IP6_FLOW;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse the next header of the ipv6 flow item.
+ *
+ * This will create classifier rule that matches next header field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint8_t k = spec->hdr.proto;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 1;
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_IP6_NEXT_HDR;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse destination or source port of the tcp flow item.
+ *
+ * This will create classifier rule that matches either destination or
+ * source tcp port.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
+ const struct rte_flow_item_tcp *mask __rte_unused,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 2;
+
+ if (parse_dst) {
+ k = rte_be_to_cpu_16(spec->hdr.dst_port);
+
+ flow->pattern |= F_TCP_DPORT;
+ } else {
+ k = rte_be_to_cpu_16(spec->hdr.src_port);
+
+ flow->pattern |= F_TCP_SPORT;
+ }
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing the tcp source port of the tcp flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
+ const struct rte_flow_item_tcp *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_tcp_port(spec, mask, 0, flow);
+}
+
+/**
+ * Helper for parsing the tcp destination port of the tcp flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
+ const struct rte_flow_item_tcp *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_tcp_port(spec, mask, 1, flow);
+}
+
+/**
+ * Parse destination or source port of the udp flow item.
+ *
+ * This will create classifier rule that matches either destination or
+ * source udp port.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
+ const struct rte_flow_item_udp *mask __rte_unused,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 2;
+
+ if (parse_dst) {
+ k = rte_be_to_cpu_16(spec->hdr.dst_port);
+
+ flow->pattern |= F_UDP_DPORT;
+ } else {
+ k = rte_be_to_cpu_16(spec->hdr.src_port);
+
+ flow->pattern |= F_UDP_SPORT;
+ }
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing the udp source port of the udp flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
+ const struct rte_flow_item_udp *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_udp_port(spec, mask, 0, flow);
+}
+
+/**
+ * Helper for parsing the udp destination port of the udp flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
+ const struct rte_flow_item_udp *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_udp_port(spec, mask, 1, flow);
+}
+
+/**
+ * Parse eth flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
+ struct ether_addr zero;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_eth_mask,
+ sizeof(struct rte_flow_item_eth), error);
+ if (ret)
+ return ret;
+
+ memset(&zero, 0, sizeof(zero));
+
+ if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
+ ret = mrvl_parse_dmac(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
+ ret = mrvl_parse_smac(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->type) {
+ MRVL_LOG(WARNING, "eth type mask is ignored");
+ ret = mrvl_parse_type(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse vlan flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_vlan(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
+ uint16_t m;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_vlan_mask,
+ sizeof(struct rte_flow_item_vlan), error);
+ if (ret)
+ return ret;
+
+ m = rte_be_to_cpu_16(mask->tci);
+ if (m & MRVL_VLAN_ID_MASK) {
+ MRVL_LOG(WARNING, "vlan id mask is ignored");
+ ret = mrvl_parse_vlan_id(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (m & MRVL_VLAN_PRI_MASK) {
+ MRVL_LOG(WARNING, "vlan pri mask is ignored");
+ ret = mrvl_parse_vlan_pri(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (flow->pattern & F_TYPE) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN TPID matching is not supported");
+ return -rte_errno;
+ }
+ if (mask->inner_type) {
+ struct rte_flow_item_eth spec_eth = {
+ .type = spec->inner_type,
+ };
+ struct rte_flow_item_eth mask_eth = {
+ .type = mask->inner_type,
+ };
+
+ MRVL_LOG(WARNING, "inner eth type mask is ignored");
+ ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse ipv4 flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_ip4(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret)
+ return ret;
+
+ if (mask->hdr.version_ihl ||
+ mask->hdr.total_length ||
+ mask->hdr.packet_id ||
+ mask->hdr.fragment_offset ||
+ mask->hdr.time_to_live ||
+ mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by classifier\n");
+ return -rte_errno;
+ }
+
+ if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
+ ret = mrvl_parse_ip4_dscp(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.src_addr) {
+ ret = mrvl_parse_ip4_sip(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.dst_addr) {
+ ret = mrvl_parse_ip4_dip(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.next_proto_id) {
+ MRVL_LOG(WARNING, "next proto id mask is ignored");
+ ret = mrvl_parse_ip4_proto(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse ipv6 flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_ip6(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
+ struct ipv6_hdr zero;
+ uint32_t flow_mask;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec,
+ (const void **)&mask,
+ &rte_flow_item_ipv6_mask,
+ sizeof(struct rte_flow_item_ipv6),
+ error);
+ if (ret)
+ return ret;
+
+ memset(&zero, 0, sizeof(zero));
+
+ if (mask->hdr.payload_len ||
+ mask->hdr.hop_limits) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by classifier\n");
+ return -rte_errno;
+ }
+
+ if (memcmp(mask->hdr.src_addr,
+ zero.src_addr, sizeof(mask->hdr.src_addr))) {
+ ret = mrvl_parse_ip6_sip(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (memcmp(mask->hdr.dst_addr,
+ zero.dst_addr, sizeof(mask->hdr.dst_addr))) {
+ ret = mrvl_parse_ip6_dip(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
+ if (flow_mask) {
+ ret = mrvl_parse_ip6_flow(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.proto) {
+ MRVL_LOG(WARNING, "next header mask is ignored");
+ ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse tcp flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_tcp(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret)
+ return ret;
+
+ if (mask->hdr.sent_seq ||
+ mask->hdr.recv_ack ||
+ mask->hdr.data_off ||
+ mask->hdr.tcp_flags ||
+ mask->hdr.rx_win ||
+ mask->hdr.cksum ||
+ mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by classifier\n");
+ return -rte_errno;
+ }
+
+ if (mask->hdr.src_port) {
+ MRVL_LOG(WARNING, "tcp sport mask is ignored");
+ ret = mrvl_parse_tcp_sport(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.dst_port) {
+ MRVL_LOG(WARNING, "tcp dport mask is ignored");
+ ret = mrvl_parse_tcp_dport(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse udp flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_udp(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret)
+ return ret;
+
+ if (mask->hdr.dgram_len ||
+ mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by classifier\n");
+ return -rte_errno;
+ }
+
+ if (mask->hdr.src_port) {
+ MRVL_LOG(WARNING, "udp sport mask is ignored");
+ ret = mrvl_parse_udp_sport(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.dst_port) {
+ MRVL_LOG(WARNING, "udp dport mask is ignored");
+ ret = mrvl_parse_udp_dport(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse flow pattern composed of the the eth item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_eth(pattern, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth and vlan items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_vlan(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_eth(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return mrvl_parse_vlan(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth, vlan and ip4/ip6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_vlan_ip4_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_eth(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ ret = mrvl_parse_vlan(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth, vlan and ipv4 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_vlan_ip4(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the eth, vlan and ipv6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_vlan_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the eth and ip4/ip6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_ip4_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_eth(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth and ipv4 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip4(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the eth and ipv6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ip4 and tcp/udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param tcp 1 to parse tcp item, 0 to parse udp item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_ip4_tcp_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int tcp)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ item = mrvl_next_item(item + 1);
+
+ if (tcp)
+ return mrvl_parse_tcp(item, flow, error);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv4 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip4_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv4 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip4_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv6 and tcp/udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param tcp 1 to parse tcp item, 0 to parse udp item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_ip6_tcp_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int tcp)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ item = mrvl_next_item(item + 1);
+
+ if (tcp)
+ return mrvl_parse_tcp(item, flow, error);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv6 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip6_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv6 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip6_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the vlan item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_vlan(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+
+ return mrvl_parse_vlan(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the vlan and ip4/ip6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_vlan_ip4_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_vlan(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the vlan and ipv4 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip4(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv4 and tcp/udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_vlan_ip_tcp_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int tcp)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ item = mrvl_next_item(item + 1);
+
+ if (tcp)
+ return mrvl_parse_tcp(item, flow, error);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv4 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv4 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the vlan and ipv6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv6 and tcp/udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_vlan_ip6_tcp_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int tcp)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ item = mrvl_next_item(item + 1);
+
+ if (tcp)
+ return mrvl_parse_tcp(item, flow, error);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv6 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip6_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv6 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip6_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the ip4/ip6 item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_ip4_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+
+ return ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the ipv4 item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip4(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the ipv6 item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the ip4/ip6 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_ip4_ip6_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return mrvl_parse_tcp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the ipv4 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip4_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the ipv6 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the ipv4/ipv6 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_ip4_ip6_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the ipv4 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip4_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the ipv6 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip6_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the tcp item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+
+ return mrvl_parse_tcp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the udp item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Structure used to map specific flow pattern to the pattern parse callback
+ * which will iterate over each pattern item and extract relevant data.
+ */
+static const struct {
+ const enum rte_flow_item_type *pattern;
+ int (*parse)(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+} mrvl_patterns[] = {
+ { pattern_eth, mrvl_parse_pattern_eth },
+ { pattern_eth_vlan, mrvl_parse_pattern_eth_vlan },
+ { pattern_eth_vlan_ip, mrvl_parse_pattern_eth_vlan_ip4 },
+ { pattern_eth_vlan_ip6, mrvl_parse_pattern_eth_vlan_ip6 },
+ { pattern_eth_ip4, mrvl_parse_pattern_eth_ip4 },
+ { pattern_eth_ip4_tcp, mrvl_parse_pattern_eth_ip4_tcp },
+ { pattern_eth_ip4_udp, mrvl_parse_pattern_eth_ip4_udp },
+ { pattern_eth_ip6, mrvl_parse_pattern_eth_ip6 },
+ { pattern_eth_ip6_tcp, mrvl_parse_pattern_eth_ip6_tcp },
+ { pattern_eth_ip6_udp, mrvl_parse_pattern_eth_ip6_udp },
+ { pattern_vlan, mrvl_parse_pattern_vlan },
+ { pattern_vlan_ip, mrvl_parse_pattern_vlan_ip4 },
+ { pattern_vlan_ip_tcp, mrvl_parse_pattern_vlan_ip_tcp },
+ { pattern_vlan_ip_udp, mrvl_parse_pattern_vlan_ip_udp },
+ { pattern_vlan_ip6, mrvl_parse_pattern_vlan_ip6 },
+ { pattern_vlan_ip6_tcp, mrvl_parse_pattern_vlan_ip6_tcp },
+ { pattern_vlan_ip6_udp, mrvl_parse_pattern_vlan_ip6_udp },
+ { pattern_ip, mrvl_parse_pattern_ip4 },
+ { pattern_ip_tcp, mrvl_parse_pattern_ip4_tcp },
+ { pattern_ip_udp, mrvl_parse_pattern_ip4_udp },
+ { pattern_ip6, mrvl_parse_pattern_ip6 },
+ { pattern_ip6_tcp, mrvl_parse_pattern_ip6_tcp },
+ { pattern_ip6_udp, mrvl_parse_pattern_ip6_udp },
+ { pattern_tcp, mrvl_parse_pattern_tcp },
+ { pattern_udp, mrvl_parse_pattern_udp }
+};
+
+/**
+ * Check whether provided pattern matches any of the supported ones.
+ *
+ * @param type_pattern Pointer to the pattern type.
+ * @param item_pattern Pointer to the flow pattern.
+ * @returns 1 in case of success, 0 value otherwise.
+ */
+static int
+mrvl_patterns_match(const enum rte_flow_item_type *type_pattern,
+ const struct rte_flow_item *item_pattern)
+{
+ const enum rte_flow_item_type *type = type_pattern;
+ const struct rte_flow_item *item = item_pattern;
+
+ for (;;) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
+ item++;
+ continue;
+ }
+
+ if (*type == RTE_FLOW_ITEM_TYPE_END ||
+ item->type == RTE_FLOW_ITEM_TYPE_END)
+ break;
+
+ if (*type != item->type)
+ break;
+
+ item++;
+ type++;
+ }
+
+ return *type == item->type;
+}
+
+/**
+ * Parse flow attribute.
+ *
+ * This will check whether the provided attribute's flags are supported.
+ *
+ * @param priv Unused
+ * @param attr Pointer to the flow attribute.
+ * @param flow Unused
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
+ const struct rte_flow_attr *attr,
+ struct rte_flow *flow __rte_unused,
+ struct rte_flow_error *error)
+{
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute");
+ return -rte_errno;
+ }
+
+ if (attr->group) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
+ "Groups are not supported");
+ return -rte_errno;
+ }
+ if (attr->priority) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
+ "Priorities are not supported");
+ return -rte_errno;
+ }
+ if (!attr->ingress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
+ "Only ingress is supported");
+ return -rte_errno;
+ }
+ if (attr->egress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "Egress is not supported");
+ return -rte_errno;
+ }
+ if (attr->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
+ "Transfer is not supported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse flow pattern.
+ *
+ * Specific classifier rule will be created as well.
+ *
+ * @param priv Unused
+ * @param pattern Pointer to the flow pattern.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
+ const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < RTE_DIM(mrvl_patterns); i++) {
+ if (!mrvl_patterns_match(mrvl_patterns[i].pattern, pattern))
+ continue;
+
+ ret = mrvl_patterns[i].parse(pattern, flow, error);
+ if (ret)
+ mrvl_free_all_key_mask(&flow->rule);
+
+ return ret;
+ }
+
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Unsupported pattern");
+
+ return -rte_errno;
+}
+
+/**
+ * Parse flow actions.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param actions Pointer the action table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_parse_actions(struct mrvl_priv *priv,
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action = actions;
+ int specified = 0;
+
+ for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+
+ if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ flow->cos.ppio = priv->ppio;
+ flow->cos.tc = 0;
+ flow->action.type = PP2_CLS_TBL_ACT_DROP;
+ flow->action.cos = &flow->cos;
+ specified++;
+ } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *q =
+ (const struct rte_flow_action_queue *)
+ action->conf;
+
+ if (q->index > priv->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Queue index out of range");
+ return -rte_errno;
+ }
+
+ if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
+ /*
+ * Unknown TC mapping, mapping will not have
+ * a correct queue.
+ */
+ MRVL_LOG(ERR,
+ "Unknown TC mapping for queue %hu eth%hhu",
+ q->index, priv->ppio_id);
+
+ rte_flow_error_set(error, EFAULT,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+ return -rte_errno;
+ }
+
+ MRVL_LOG(DEBUG,
+ "Action: Assign packets to queue %d, tc:%d, q:%d",
+ q->index, priv->rxq_map[q->index].tc,
+ priv->rxq_map[q->index].inq);
+
+ flow->cos.ppio = priv->ppio;
+ flow->cos.tc = priv->rxq_map[q->index].tc;
+ flow->action.type = PP2_CLS_TBL_ACT_DONE;
+ flow->action.cos = &flow->cos;
+ specified++;
+ } else {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Action not supported");
+ return -rte_errno;
+ }
+
+ }
+
+ if (!specified) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Action not specified");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse flow attribute, pattern and actions.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param attr Pointer to the flow attribute.
+ * @param pattern Pointer to the flow pattern.
+ * @param actions Pointer to the flow actions.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = mrvl_flow_parse_attr(priv, attr, flow, error);
+ if (ret)
+ return ret;
+
+ ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
+ if (ret)
+ return ret;
+
+ return mrvl_flow_parse_actions(priv, actions, flow, error);
+}
+
+static inline enum pp2_cls_tbl_type
+mrvl_engine_type(const struct rte_flow *flow)
+{
+ int i, size = 0;
+
+ for (i = 0; i < flow->rule.num_fields; i++)
+ size += flow->rule.fields[i].size;
+
+ /*
+ * For maskable engine type the key size must be up to 8 bytes.
+ * For keys with size bigger than 8 bytes, engine type must
+ * be set to exact match.
+ */
+ if (size > 8)
+ return PP2_CLS_TBL_EXACT_MATCH;
+
+ return PP2_CLS_TBL_MASKABLE;
+}
+
+static int
+mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
+ int ret;
+
+ if (priv->cls_tbl) {
+ pp2_cls_tbl_deinit(priv->cls_tbl);
+ priv->cls_tbl = NULL;
+ }
+
+ memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
+
+ priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
+ MRVL_LOG(INFO, "Setting cls search engine type to %s",
+ priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
+ "exact" : "maskable");
+ priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
+ priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
+ priv->cls_tbl_params.default_act.cos = &first_flow->cos;
+
+ if (first_flow->pattern & F_DMAC) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
+ key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA;
+ key->key_size += 6;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_SMAC) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
+ key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA;
+ key->key_size += 6;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_TYPE) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
+ key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_VLAN_ID) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
+ key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_VLAN_PRI) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
+ key->proto_field[key->num_fields].field.vlan =
+ MV_NET_VLAN_F_PRI;
+ key->key_size += 1;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP4_TOS) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
+ key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_TOS;
+ key->key_size += 1;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP4_SIP) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
+ key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA;
+ key->key_size += 4;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP4_DIP) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
+ key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA;
+ key->key_size += 4;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP4_PROTO) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
+ key->proto_field[key->num_fields].field.ipv4 =
+ MV_NET_IP4_F_PROTO;
+ key->key_size += 1;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP6_SIP) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
+ key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA;
+ key->key_size += 16;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP6_DIP) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
+ key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA;
+ key->key_size += 16;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP6_FLOW) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
+ key->proto_field[key->num_fields].field.ipv6 =
+ MV_NET_IP6_F_FLOW;
+ key->key_size += 3;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP6_NEXT_HDR) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
+ key->proto_field[key->num_fields].field.ipv6 =
+ MV_NET_IP6_F_NEXT_HDR;
+ key->key_size += 1;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_TCP_SPORT) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
+ key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_TCP_DPORT) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
+ key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_UDP_SPORT) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
+ key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_UDP_DPORT) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
+ key->proto_field[key->num_fields].field.udp = MV_NET_TCP_F_DP;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
+ if (!ret)
+ priv->cls_tbl_pattern = first_flow->pattern;
+
+ return ret;
+}
+
+/**
+ * Check whether new flow can be added to the table
+ *
+ * @param priv Pointer to the port's private data.
+ * @param flow Pointer to the new flow.
+ * @return 1 in case flow can be added, 0 otherwise.
+ */
+static inline int
+mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
+{
+ return flow->pattern == priv->cls_tbl_pattern &&
+ mrvl_engine_type(flow) == priv->cls_tbl_params.type;
+}
+
+/**
+ * DPDK flow create callback called when flow is to be created.
+ *
+ * @param dev Pointer to the device.
+ * @param attr Pointer to the flow attribute.
+ * @param pattern Pointer to the flow pattern.
+ * @param actions Pointer to the flow actions.
+ * @param error Pointer to the flow error.
+ * @returns Pointer to the created flow in case of success, NULL otherwise.
+ */
+static struct rte_flow *
+mrvl_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow, *first;
+ int ret;
+
+ if (!dev->data->dev_started) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Port must be started first\n");
+ return NULL;
+ }
+
+ flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
+ if (!flow)
+ return NULL;
+
+ ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
+ if (ret)
+ goto out;
+
+ /*
+ * Four cases here:
+ *
+ * 1. In case table does not exist - create one.
+ * 2. In case table exists, is empty and new flow cannot be added
+ * recreate table.
+ * 3. In case table is not empty and new flow matches table format
+ * add it.
+ * 4. Otherwise flow cannot be added.
+ */
+ first = LIST_FIRST(&priv->flows);
+ if (!priv->cls_tbl) {
+ ret = mrvl_create_cls_table(dev, flow);
+ } else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
+ ret = mrvl_create_cls_table(dev, flow);
+ } else if (mrvl_flow_can_be_added(priv, flow)) {
+ ret = 0;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Pattern does not match cls table format\n");
+ goto out;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to create cls table\n");
+ goto out;
+ }
+
+ ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to add rule\n");
+ goto out;
+ }
+
+ LIST_INSERT_HEAD(&priv->flows, flow, next);
+
+ return flow;
+out:
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Remove classifier rule associated with given flow.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ if (!priv->cls_tbl) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Classifier table not initialized");
+ return -rte_errno;
+ }
+
+ ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to remove rule");
+ return -rte_errno;
+ }
+
+ mrvl_free_all_key_mask(&flow->rule);
+
+ return 0;
+}
+
+/**
+ * DPDK flow destroy callback called when flow is to be removed.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct rte_flow *f;
+ int ret;
+
+ LIST_FOREACH(f, &priv->flows, next) {
+ if (f == flow)
+ break;
+ }
+
+ if (!flow) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Rule was not found");
+ return -rte_errno;
+ }
+
+ LIST_REMOVE(f, next);
+
+ ret = mrvl_flow_remove(priv, flow, error);
+ if (ret)
+ return ret;
+
+ rte_free(flow);
+
+ return 0;
+}
+
+/**
+ * DPDK flow callback called to verify given attribute, pattern and actions.
+ *
+ * @param dev Pointer to the device.
+ * @param attr Pointer to the flow attribute.
+ * @param pattern Pointer to the flow pattern.
+ * @param actions Pointer to the flow actions.
+ * @param error Pointer to the flow error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ static struct rte_flow *flow;
+
+ flow = mrvl_flow_create(dev, attr, pattern, actions, error);
+ if (!flow)
+ return -rte_errno;
+
+ mrvl_flow_destroy(dev, flow, error);
+
+ return 0;
+}
+
+/**
+ * DPDK flow flush callback called when flows are to be flushed.
+ *
+ * @param dev Pointer to the device.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ while (!LIST_EMPTY(&priv->flows)) {
+ struct rte_flow *flow = LIST_FIRST(&priv->flows);
+ int ret = mrvl_flow_remove(priv, flow, error);
+ if (ret)
+ return ret;
+
+ LIST_REMOVE(flow, next);
+ rte_free(flow);
+ }
+
+ return 0;
+}
+
+/**
+ * DPDK flow isolate callback called to isolate port.
+ *
+ * @param dev Pointer to the device.
+ * @param enable Pass 0/1 to disable/enable port isolation.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
+ struct rte_flow_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ if (dev->data->dev_started) {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Port must be stopped first\n");
+ return -rte_errno;
+ }
+
+ priv->isolated = enable;
+
+ return 0;
+}
+
+const struct rte_flow_ops mrvl_flow_ops = {
+ .validate = mrvl_flow_validate,
+ .create = mrvl_flow_create,
+ .destroy = mrvl_flow_destroy,
+ .flush = mrvl_flow_flush,
+ .isolate = mrvl_flow_isolate
+};
diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.c b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.c
new file mode 100644
index 00000000..71856c1a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.c
@@ -0,0 +1,894 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cfgfile.h>
+#include <rte_log.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+/* Unluckily, container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+
+#include "mrvl_qos.h"
+
+/* Parsing tokens. Defined conveniently, so that any correction is easy. */
+#define MRVL_TOK_DEFAULT "default"
+#define MRVL_TOK_DEFAULT_TC "default_tc"
+#define MRVL_TOK_DSCP "dscp"
+#define MRVL_TOK_MAPPING_PRIORITY "mapping_priority"
+#define MRVL_TOK_IP "ip"
+#define MRVL_TOK_IP_VLAN "ip/vlan"
+#define MRVL_TOK_PCP "pcp"
+#define MRVL_TOK_PORT "port"
+#define MRVL_TOK_RXQ "rxq"
+#define MRVL_TOK_TC "tc"
+#define MRVL_TOK_TXQ "txq"
+#define MRVL_TOK_VLAN "vlan"
+#define MRVL_TOK_VLAN_IP "vlan/ip"
+
+/* egress specific configuration tokens */
+#define MRVL_TOK_BURST_SIZE "burst_size"
+#define MRVL_TOK_RATE_LIMIT "rate_limit"
+#define MRVL_TOK_RATE_LIMIT_ENABLE "rate_limit_enable"
+#define MRVL_TOK_SCHED_MODE "sched_mode"
+#define MRVL_TOK_SCHED_MODE_SP "sp"
+#define MRVL_TOK_SCHED_MODE_WRR "wrr"
+#define MRVL_TOK_WRR_WEIGHT "wrr_weight"
+
+/* policer specific configuration tokens */
+#define MRVL_TOK_PLCR_ENABLE "policer_enable"
+#define MRVL_TOK_PLCR_UNIT "token_unit"
+#define MRVL_TOK_PLCR_UNIT_BYTES "bytes"
+#define MRVL_TOK_PLCR_UNIT_PACKETS "packets"
+#define MRVL_TOK_PLCR_COLOR "color_mode"
+#define MRVL_TOK_PLCR_COLOR_BLIND "blind"
+#define MRVL_TOK_PLCR_COLOR_AWARE "aware"
+#define MRVL_TOK_PLCR_CIR "cir"
+#define MRVL_TOK_PLCR_CBS "cbs"
+#define MRVL_TOK_PLCR_EBS "ebs"
+#define MRVL_TOK_PLCR_DEFAULT_COLOR "default_color"
+#define MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN "green"
+#define MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW "yellow"
+#define MRVL_TOK_PLCR_DEFAULT_COLOR_RED "red"
+
+/** Number of tokens in range a-b = 2. */
+#define MAX_RNG_TOKENS 2
+
+/** Maximum possible value of PCP. */
+#define MAX_PCP 7
+
+/** Maximum possible value of DSCP. */
+#define MAX_DSCP 63
+
+/** Global QoS configuration. */
+struct mrvl_qos_cfg *mrvl_qos_cfg;
+
+/**
+ * Convert string to uint32_t with extra checks for result correctness.
+ *
+ * @param string String to convert.
+ * @param val Conversion result.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+get_val_securely(const char *string, uint32_t *val)
+{
+ char *endptr;
+ size_t len = strlen(string);
+
+ if (len == 0)
+ return -1;
+
+ errno = 0;
+ *val = strtoul(string, &endptr, 0);
+ if (errno != 0 || RTE_PTR_DIFF(endptr, string) != len)
+ return -2;
+
+ return 0;
+}
+
+/**
+ * Read out-queue configuration from file.
+ *
+ * @param file Path to the configuration file.
+ * @param port Port number.
+ * @param outq Out queue number.
+ * @param cfg Pointer to the Marvell QoS configuration structure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+get_outq_cfg(struct rte_cfgfile *file, int port, int outq,
+ struct mrvl_qos_cfg *cfg)
+{
+ char sec_name[32];
+ const char *entry;
+ uint32_t val;
+
+ snprintf(sec_name, sizeof(sec_name), "%s %d %s %d",
+ MRVL_TOK_PORT, port, MRVL_TOK_TXQ, outq);
+
+ /* Skip non-existing */
+ if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0)
+ return 0;
+
+ /* Read scheduling mode */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_SCHED_MODE);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_SCHED_MODE_SP,
+ strlen(MRVL_TOK_SCHED_MODE_SP))) {
+ cfg->port[port].outq[outq].sched_mode =
+ PP2_PPIO_SCHED_M_SP;
+ } else if (!strncmp(entry, MRVL_TOK_SCHED_MODE_WRR,
+ strlen(MRVL_TOK_SCHED_MODE_WRR))) {
+ cfg->port[port].outq[outq].sched_mode =
+ PP2_PPIO_SCHED_M_WRR;
+ } else {
+ MRVL_LOG(ERR, "Unknown token: %s", entry);
+ return -1;
+ }
+ }
+
+ /* Read wrr weight */
+ if (cfg->port[port].outq[outq].sched_mode == PP2_PPIO_SCHED_M_WRR) {
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_WRR_WEIGHT);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].outq[outq].weight = val;
+ }
+ }
+
+ /*
+ * There's no point in setting rate limiting for specific outq as
+ * global port rate limiting has priority.
+ */
+ if (cfg->port[port].rate_limit_enable) {
+ MRVL_LOG(WARNING, "Port %d rate limiting already enabled",
+ port);
+ return 0;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_RATE_LIMIT_ENABLE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].outq[outq].rate_limit_enable = val;
+ }
+
+ if (!cfg->port[port].outq[outq].rate_limit_enable)
+ return 0;
+
+ /* Read CBS (in kB) */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_BURST_SIZE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].outq[outq].rate_limit_params.cbs = val;
+ }
+
+ /* Read CIR (in kbps) */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RATE_LIMIT);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].outq[outq].rate_limit_params.cir = val;
+ }
+
+ return 0;
+}
+
+/**
+ * Gets multiple-entry values and places them in table.
+ *
+ * Entry can be anything, e.g. "1 2-3 5 6 7-9". This needs to be converted to
+ * table entries, respectively: {1, 2, 3, 5, 6, 7, 8, 9}.
+ * As all result table's elements are always 1-byte long, we
+ * won't overcomplicate the function, but we'll keep API generic,
+ * check if someone hasn't changed element size and make it simple
+ * to extend to other sizes.
+ *
+ * This function is purely utilitary, it does not print any error, only returns
+ * different error numbers.
+ *
+ * @param entry[in] Values string to parse.
+ * @param tab[out] Results table.
+ * @param elem_sz[in] Element size (in bytes).
+ * @param max_elems[in] Number of results table elements available.
+ * @param max val[in] Maximum value allowed.
+ * @returns Number of correctly parsed elements in case of success.
+ * @retval -1 Wrong element size.
+ * @retval -2 More tokens than result table allows.
+ * @retval -3 Wrong range syntax.
+ * @retval -4 Wrong range values.
+ * @retval -5 Maximum value exceeded.
+ */
+static int
+get_entry_values(const char *entry, uint8_t *tab,
+ size_t elem_sz, uint8_t max_elems, uint8_t max_val)
+{
+ /* There should not be more tokens than max elements.
+ * Add 1 for error trap.
+ */
+ char *tokens[max_elems + 1];
+
+ /* Begin, End + error trap = 3. */
+ char *rng_tokens[MAX_RNG_TOKENS + 1];
+ long beg, end;
+ uint32_t token_val;
+ int nb_tokens, nb_rng_tokens;
+ int i;
+ int values = 0;
+ char val;
+ char entry_cpy[CFG_VALUE_LEN];
+
+ if (elem_sz != 1)
+ return -1;
+
+ /* Copy the entry to safely use rte_strsplit(). */
+ strlcpy(entry_cpy, entry, RTE_DIM(entry_cpy));
+
+ /*
+ * If there are more tokens than array size, rte_strsplit will
+ * not return error, just array size.
+ */
+ nb_tokens = rte_strsplit(entry_cpy, strlen(entry_cpy),
+ tokens, max_elems + 1, ' ');
+
+ /* Quick check, will be refined later. */
+ if (nb_tokens > max_elems)
+ return -2;
+
+ for (i = 0; i < nb_tokens; ++i) {
+ if (strchr(tokens[i], '-') != NULL) {
+ /*
+ * Split to begin and end tokens.
+ * We want to catch error cases too, thus we leave
+ * option for number of tokens to be more than 2.
+ */
+ nb_rng_tokens = rte_strsplit(tokens[i],
+ strlen(tokens[i]), rng_tokens,
+ RTE_DIM(rng_tokens), '-');
+ if (nb_rng_tokens != 2)
+ return -3;
+
+ /* Range and sanity checks. */
+ if (get_val_securely(rng_tokens[0], &token_val) < 0)
+ return -4;
+ beg = (char)token_val;
+ if (get_val_securely(rng_tokens[1], &token_val) < 0)
+ return -4;
+ end = (char)token_val;
+ if (beg < 0 || beg > UCHAR_MAX ||
+ end < 0 || end > UCHAR_MAX || end < beg)
+ return -4;
+
+ for (val = beg; val <= end; ++val) {
+ if (val > max_val)
+ return -5;
+
+ *tab = val;
+ tab = RTE_PTR_ADD(tab, elem_sz);
+ ++values;
+ if (values >= max_elems)
+ return -2;
+ }
+ } else {
+ /* Single values. */
+ if (get_val_securely(tokens[i], &token_val) < 0)
+ return -5;
+ val = (char)token_val;
+ if (val > max_val)
+ return -5;
+
+ *tab = val;
+ tab = RTE_PTR_ADD(tab, elem_sz);
+ ++values;
+ if (values >= max_elems)
+ return -2;
+ }
+ }
+
+ return values;
+}
+
+/**
+ * Parse Traffic Class'es mapping configuration.
+ *
+ * @param file Config file handle.
+ * @param port Which port to look for.
+ * @param tc Which Traffic Class to look for.
+ * @param cfg[out] Parsing results.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
+ struct mrvl_qos_cfg *cfg)
+{
+ char sec_name[32];
+ const char *entry;
+ int n;
+
+ snprintf(sec_name, sizeof(sec_name), "%s %d %s %d",
+ MRVL_TOK_PORT, port, MRVL_TOK_TC, tc);
+
+ /* Skip non-existing */
+ if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0)
+ return 0;
+
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RXQ);
+ if (entry) {
+ n = get_entry_values(entry,
+ cfg->port[port].tc[tc].inq,
+ sizeof(cfg->port[port].tc[tc].inq[0]),
+ RTE_DIM(cfg->port[port].tc[tc].inq),
+ MRVL_PP2_RXQ_MAX);
+ if (n < 0) {
+ MRVL_LOG(ERR, "Error %d while parsing: %s",
+ n, entry);
+ return n;
+ }
+ cfg->port[port].tc[tc].inqs = n;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PCP);
+ if (entry) {
+ n = get_entry_values(entry,
+ cfg->port[port].tc[tc].pcp,
+ sizeof(cfg->port[port].tc[tc].pcp[0]),
+ RTE_DIM(cfg->port[port].tc[tc].pcp),
+ MAX_PCP);
+ if (n < 0) {
+ MRVL_LOG(ERR, "Error %d while parsing: %s",
+ n, entry);
+ return n;
+ }
+ cfg->port[port].tc[tc].pcps = n;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_DSCP);
+ if (entry) {
+ n = get_entry_values(entry,
+ cfg->port[port].tc[tc].dscp,
+ sizeof(cfg->port[port].tc[tc].dscp[0]),
+ RTE_DIM(cfg->port[port].tc[tc].dscp),
+ MAX_DSCP);
+ if (n < 0) {
+ MRVL_LOG(ERR, "Error %d while parsing: %s",
+ n, entry);
+ return n;
+ }
+ cfg->port[port].tc[tc].dscps = n;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_DEFAULT_COLOR);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN,
+ sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN))) {
+ cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_GREEN;
+ } else if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW,
+ sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW))) {
+ cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_YELLOW;
+ } else if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_RED,
+ sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_RED))) {
+ cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_RED;
+ } else {
+ MRVL_LOG(ERR, "Error while parsing: %s", entry);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Parse QoS configuration - rte_kvargs_process handler.
+ *
+ * Opens configuration file and parses its content.
+ *
+ * @param key Unused.
+ * @param path Path to config file.
+ * @param extra_args Pointer to configuration structure.
+ * @returns 0 in case of success, exits otherwise.
+ */
+int
+mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
+ void *extra_args)
+{
+ struct mrvl_qos_cfg **cfg = extra_args;
+ struct rte_cfgfile *file = rte_cfgfile_load(path, 0);
+ uint32_t val;
+ int n, i, ret;
+ const char *entry;
+ char sec_name[32];
+
+ if (file == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot load configuration %s\n", path);
+
+ /* Create configuration. This is never accessed on the fast path,
+ * so we can ignore socket.
+ */
+ *cfg = rte_zmalloc("mrvl_qos_cfg", sizeof(struct mrvl_qos_cfg), 0);
+ if (*cfg == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot allocate configuration %s\n",
+ path);
+
+ n = rte_cfgfile_num_sections(file, MRVL_TOK_PORT,
+ sizeof(MRVL_TOK_PORT) - 1);
+
+ if (n == 0) {
+ /* This is weird, but not bad. */
+ MRVL_LOG(WARNING, "Empty configuration file?");
+ return 0;
+ }
+
+ /* Use the number of ports given as vdev parameters. */
+ for (n = 0; n < (PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC); ++n) {
+ snprintf(sec_name, sizeof(sec_name), "%s %d %s",
+ MRVL_TOK_PORT, n, MRVL_TOK_DEFAULT);
+
+ /* Skip ports non-existing in configuration. */
+ if (rte_cfgfile_num_sections(file, sec_name,
+ strlen(sec_name)) <= 0) {
+ (*cfg)->port[n].use_global_defaults = 1;
+ (*cfg)->port[n].mapping_priority =
+ PP2_CLS_QOS_TBL_VLAN_IP_PRI;
+ continue;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_DEFAULT_TC);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0 ||
+ val > USHRT_MAX)
+ return -1;
+ (*cfg)->port[n].default_tc = (uint8_t)val;
+ } else {
+ MRVL_LOG(ERR,
+ "Default Traffic Class required in custom configuration!");
+ return -1;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_ENABLE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].policer_enable = val;
+ }
+
+ if ((*cfg)->port[n].policer_enable) {
+ enum pp2_cls_plcr_token_unit unit;
+
+ /* Read policer token unit */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_UNIT);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_PLCR_UNIT_BYTES,
+ sizeof(MRVL_TOK_PLCR_UNIT_BYTES))) {
+ unit = PP2_CLS_PLCR_BYTES_TOKEN_UNIT;
+ } else if (!strncmp(entry,
+ MRVL_TOK_PLCR_UNIT_PACKETS,
+ sizeof(MRVL_TOK_PLCR_UNIT_PACKETS))) {
+ unit = PP2_CLS_PLCR_PACKETS_TOKEN_UNIT;
+ } else {
+ MRVL_LOG(ERR, "Unknown token: %s",
+ entry);
+ return -1;
+ }
+ (*cfg)->port[n].policer_params.token_unit =
+ unit;
+ }
+
+ /* Read policer color mode */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_COLOR);
+ if (entry) {
+ enum pp2_cls_plcr_color_mode mode;
+
+ if (!strncmp(entry, MRVL_TOK_PLCR_COLOR_BLIND,
+ sizeof(MRVL_TOK_PLCR_COLOR_BLIND))) {
+ mode = PP2_CLS_PLCR_COLOR_BLIND_MODE;
+ } else if (!strncmp(entry,
+ MRVL_TOK_PLCR_COLOR_AWARE,
+ sizeof(MRVL_TOK_PLCR_COLOR_AWARE))) {
+ mode = PP2_CLS_PLCR_COLOR_AWARE_MODE;
+ } else {
+ MRVL_LOG(ERR,
+ "Error in parsing: %s",
+ entry);
+ return -1;
+ }
+ (*cfg)->port[n].policer_params.color_mode =
+ mode;
+ }
+
+ /* Read policer cir */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_CIR);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].policer_params.cir = val;
+ }
+
+ /* Read policer cbs */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_CBS);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].policer_params.cbs = val;
+ }
+
+ /* Read policer ebs */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_EBS);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].policer_params.ebs = val;
+ }
+ }
+
+ /*
+ * Read per-port rate limiting. Setting that will
+ * disable per-queue rate limiting.
+ */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_RATE_LIMIT_ENABLE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].rate_limit_enable = val;
+ }
+
+ if ((*cfg)->port[n].rate_limit_enable) {
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_BURST_SIZE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].rate_limit_params.cbs = val;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_RATE_LIMIT);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].rate_limit_params.cir = val;
+ }
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_MAPPING_PRIORITY);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_VLAN_IP,
+ sizeof(MRVL_TOK_VLAN_IP)))
+ (*cfg)->port[n].mapping_priority =
+ PP2_CLS_QOS_TBL_VLAN_IP_PRI;
+ else if (!strncmp(entry, MRVL_TOK_IP_VLAN,
+ sizeof(MRVL_TOK_IP_VLAN)))
+ (*cfg)->port[n].mapping_priority =
+ PP2_CLS_QOS_TBL_IP_VLAN_PRI;
+ else if (!strncmp(entry, MRVL_TOK_IP,
+ sizeof(MRVL_TOK_IP)))
+ (*cfg)->port[n].mapping_priority =
+ PP2_CLS_QOS_TBL_IP_PRI;
+ else if (!strncmp(entry, MRVL_TOK_VLAN,
+ sizeof(MRVL_TOK_VLAN)))
+ (*cfg)->port[n].mapping_priority =
+ PP2_CLS_QOS_TBL_VLAN_PRI;
+ else
+ rte_exit(EXIT_FAILURE,
+ "Error in parsing %s value (%s)!\n",
+ MRVL_TOK_MAPPING_PRIORITY, entry);
+ } else {
+ (*cfg)->port[n].mapping_priority =
+ PP2_CLS_QOS_TBL_VLAN_IP_PRI;
+ }
+
+ for (i = 0; i < MRVL_PP2_RXQ_MAX; ++i) {
+ ret = get_outq_cfg(file, n, i, *cfg);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Error %d parsing port %d outq %d!\n",
+ ret, n, i);
+ }
+
+ for (i = 0; i < MRVL_PP2_TC_MAX; ++i) {
+ ret = parse_tc_cfg(file, n, i, *cfg);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Error %d parsing port %d tc %d!\n",
+ ret, n, i);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Setup Traffic Class.
+ *
+ * Fill in TC parameters in single MUSDK TC config entry.
+ * @param param TC parameters entry.
+ * @param inqs Number of MUSDK in-queues in this TC.
+ * @param bpool Bpool for this TC.
+ * @param color Default color for this TC.
+ * @returns 0 in case of success, exits otherwise.
+ */
+static int
+setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs,
+ struct pp2_bpool *bpool, enum pp2_ppio_color color)
+{
+ struct pp2_ppio_inq_params *inq_params;
+
+ param->pkt_offset = MRVL_PKT_OFFS;
+ param->pools[0] = bpool;
+ param->default_color = color;
+
+ inq_params = rte_zmalloc_socket("inq_params",
+ inqs * sizeof(*inq_params),
+ 0, rte_socket_id());
+ if (!inq_params)
+ return -ENOMEM;
+
+ param->num_in_qs = inqs;
+
+ /* Release old config if necessary. */
+ if (param->inqs_params)
+ rte_free(param->inqs_params);
+
+ param->inqs_params = inq_params;
+
+ return 0;
+}
+
+/**
+ * Setup ingress policer.
+ *
+ * @param priv Port's private data.
+ * @param params Pointer to the policer's configuration.
+ * @returns 0 in case of success, negative values otherwise.
+ */
+static int
+setup_policer(struct mrvl_priv *priv, struct pp2_cls_plcr_params *params)
+{
+ char match[16];
+ int ret;
+
+ snprintf(match, sizeof(match), "policer-%d:%d\n",
+ priv->pp_id, priv->ppio_id);
+ params->match = match;
+
+ ret = pp2_cls_plcr_init(params, &priv->policer);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to setup %s", match);
+ return -1;
+ }
+
+ priv->ppio_params.inqs_params.plcr = priv->policer;
+
+ return 0;
+}
+
+/**
+ * Configure RX Queues in a given port.
+ *
+ * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping.
+ *
+ * @param priv Port's private data
+ * @param portid DPDK port ID
+ * @param max_queues Maximum number of queues to configure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
+ uint16_t max_queues)
+{
+ size_t i, tc;
+
+ if (mrvl_qos_cfg == NULL ||
+ mrvl_qos_cfg->port[portid].use_global_defaults) {
+ /*
+ * No port configuration, use default: 1 TC, no QoS,
+ * TC color set to green.
+ */
+ priv->ppio_params.inqs_params.num_tcs = 1;
+ setup_tc(&priv->ppio_params.inqs_params.tcs_params[0],
+ max_queues, priv->bpool, PP2_PPIO_COLOR_GREEN);
+
+ /* Direct mapping of queues i.e. 0->0, 1->1 etc. */
+ for (i = 0; i < max_queues; ++i) {
+ priv->rxq_map[i].tc = 0;
+ priv->rxq_map[i].inq = i;
+ }
+ return 0;
+ }
+
+ /* We need only a subset of configuration. */
+ struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid];
+
+ priv->qos_tbl_params.type = port_cfg->mapping_priority;
+
+ /*
+ * We need to reverse mapping, from tc->pcp (better from usability
+ * point of view) to pcp->tc (configurable in MUSDK).
+ * First, set all map elements to "default".
+ */
+ for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i)
+ priv->qos_tbl_params.pcp_cos_map[i].tc = port_cfg->default_tc;
+
+ /* Then, fill in all known values. */
+ for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
+ if (port_cfg->tc[tc].pcps > RTE_DIM(port_cfg->tc[0].pcp)) {
+ /* Better safe than sorry. */
+ MRVL_LOG(ERR,
+ "Too many PCPs configured in TC %zu!", tc);
+ return -1;
+ }
+ for (i = 0; i < port_cfg->tc[tc].pcps; ++i) {
+ priv->qos_tbl_params.pcp_cos_map[
+ port_cfg->tc[tc].pcp[i]].tc = tc;
+ }
+ }
+
+ /*
+ * The same logic goes with DSCP.
+ * First, set all map elements to "default".
+ */
+ for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i)
+ priv->qos_tbl_params.dscp_cos_map[i].tc =
+ port_cfg->default_tc;
+
+ /* Fill in all known values. */
+ for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
+ if (port_cfg->tc[tc].dscps > RTE_DIM(port_cfg->tc[0].dscp)) {
+ /* Better safe than sorry. */
+ MRVL_LOG(ERR,
+ "Too many DSCPs configured in TC %zu!", tc);
+ return -1;
+ }
+ for (i = 0; i < port_cfg->tc[tc].dscps; ++i) {
+ priv->qos_tbl_params.dscp_cos_map[
+ port_cfg->tc[tc].dscp[i]].tc = tc;
+ }
+ }
+
+ /*
+ * Surprisingly, similar logic goes with queue mapping.
+ * We need only to store qid->tc mapping,
+ * to know TC when queue is read.
+ */
+ for (i = 0; i < RTE_DIM(priv->rxq_map); ++i)
+ priv->rxq_map[i].tc = MRVL_UNKNOWN_TC;
+
+ /* Set up DPDKq->(TC,inq) mapping. */
+ for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
+ if (port_cfg->tc[tc].inqs > RTE_DIM(port_cfg->tc[0].inq)) {
+ /* Overflow. */
+ MRVL_LOG(ERR,
+ "Too many RX queues configured per TC %zu!",
+ tc);
+ return -1;
+ }
+ for (i = 0; i < port_cfg->tc[tc].inqs; ++i) {
+ uint8_t idx = port_cfg->tc[tc].inq[i];
+
+ if (idx > RTE_DIM(priv->rxq_map)) {
+ MRVL_LOG(ERR, "Bad queue index %d!", idx);
+ return -1;
+ }
+
+ priv->rxq_map[idx].tc = tc;
+ priv->rxq_map[idx].inq = i;
+ }
+ }
+
+ /*
+ * Set up TC configuration. TCs need to be sequenced: 0, 1, 2
+ * with no gaps. Empty TC means end of processing.
+ */
+ for (i = 0; i < MRVL_PP2_TC_MAX; ++i) {
+ if (port_cfg->tc[i].inqs == 0)
+ break;
+ setup_tc(&priv->ppio_params.inqs_params.tcs_params[i],
+ port_cfg->tc[i].inqs,
+ priv->bpool, port_cfg->tc[i].color);
+ }
+
+ priv->ppio_params.inqs_params.num_tcs = i;
+
+ if (port_cfg->policer_enable)
+ return setup_policer(priv, &port_cfg->policer_params);
+
+ return 0;
+}
+
+/**
+ * Configure TX Queues in a given port.
+ *
+ * Sets up TX queues egress scheduler and limiter.
+ *
+ * @param priv Port's private data
+ * @param portid DPDK port ID
+ * @param max_queues Maximum number of queues to configure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid,
+ uint16_t max_queues)
+{
+ /* We need only a subset of configuration. */
+ struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid];
+ int i;
+
+ if (mrvl_qos_cfg == NULL)
+ return 0;
+
+ priv->ppio_params.rate_limit_enable = port_cfg->rate_limit_enable;
+ if (port_cfg->rate_limit_enable)
+ priv->ppio_params.rate_limit_params =
+ port_cfg->rate_limit_params;
+
+ for (i = 0; i < max_queues; i++) {
+ struct pp2_ppio_outq_params *params =
+ &priv->ppio_params.outqs_params.outqs_params[i];
+
+ params->sched_mode = port_cfg->outq[i].sched_mode;
+ params->weight = port_cfg->outq[i].weight;
+ params->rate_limit_enable = port_cfg->outq[i].rate_limit_enable;
+ params->rate_limit_params = port_cfg->outq[i].rate_limit_params;
+ }
+
+ return 0;
+}
+
+/**
+ * Start QoS mapping.
+ *
+ * Finalize QoS table configuration and initialize it in SDK. It can be done
+ * only after port is started, so we have a valid ppio reference.
+ *
+ * @param priv Port's private (configuration) data.
+ * @returns 0 in case of success, exits otherwise.
+ */
+int
+mrvl_start_qos_mapping(struct mrvl_priv *priv)
+{
+ size_t i;
+
+ if (priv->ppio == NULL) {
+ MRVL_LOG(ERR, "ppio must not be NULL here!");
+ return -1;
+ }
+
+ for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i)
+ priv->qos_tbl_params.pcp_cos_map[i].ppio = priv->ppio;
+
+ for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i)
+ priv->qos_tbl_params.dscp_cos_map[i].ppio = priv->ppio;
+
+ /* Initialize Classifier QoS table. */
+
+ return pp2_cls_qos_tbl_init(&priv->qos_tbl_params, &priv->qos_tbl);
+}
diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.h b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.h
new file mode 100644
index 00000000..fa9ddecb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _MRVL_QOS_H_
+#define _MRVL_QOS_H_
+
+#include <rte_common.h>
+
+#include "mrvl_ethdev.h"
+
+/** Code Points per Traffic Class. Equals max(DSCP, PCP). */
+#define MRVL_CP_PER_TC (64)
+
+/** Value used as "unknown". */
+#define MRVL_UNKNOWN_TC (0xFF)
+
+/* QoS config. */
+struct mrvl_qos_cfg {
+ struct port_cfg {
+ int rate_limit_enable;
+ struct pp2_ppio_rate_limit_params rate_limit_params;
+ struct {
+ uint8_t inq[MRVL_PP2_RXQ_MAX];
+ uint8_t dscp[MRVL_CP_PER_TC];
+ uint8_t pcp[MRVL_CP_PER_TC];
+ uint8_t inqs;
+ uint8_t dscps;
+ uint8_t pcps;
+ enum pp2_ppio_color color;
+ } tc[MRVL_PP2_TC_MAX];
+ struct {
+ enum pp2_ppio_outq_sched_mode sched_mode;
+ uint8_t weight;
+ int rate_limit_enable;
+ struct pp2_ppio_rate_limit_params rate_limit_params;
+ } outq[MRVL_PP2_RXQ_MAX];
+ enum pp2_cls_qos_tbl_type mapping_priority;
+ uint16_t inqs;
+ uint16_t outqs;
+ uint8_t default_tc;
+ uint8_t use_global_defaults;
+ struct pp2_cls_plcr_params policer_params;
+ uint8_t policer_enable;
+ } port[RTE_MAX_ETHPORTS];
+};
+
+/** Global QoS configuration. */
+extern struct mrvl_qos_cfg *mrvl_qos_cfg;
+
+/**
+ * Parse QoS configuration - rte_kvargs_process handler.
+ *
+ * Opens configuration file and parses its content.
+ *
+ * @param key Unused.
+ * @param path Path to config file.
+ * @param extra_args Pointer to configuration structure.
+ * @returns 0 in case of success, exits otherwise.
+ */
+int
+mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
+ void *extra_args);
+
+/**
+ * Configure RX Queues in a given port.
+ *
+ * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping.
+ *
+ * @param priv Port's private data
+ * @param portid DPDK port ID
+ * @param max_queues Maximum number of queues to configure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
+ uint16_t max_queues);
+
+/**
+ * Configure TX Queues in a given port.
+ *
+ * Sets up TX queues egress scheduler and limiter.
+ *
+ * @param priv Port's private data
+ * @param portid DPDK port ID
+ * @param max_queues Maximum number of queues to configure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid,
+ uint16_t max_queues);
+
+/**
+ * Start QoS mapping.
+ *
+ * Finalize QoS table configuration and initialize it in SDK. It can be done
+ * only after port is started, so we have a valid ppio reference.
+ *
+ * @param priv Port's private (configuration) data.
+ * @returns 0 in case of success, exits otherwise.
+ */
+int
+mrvl_start_qos_mapping(struct mrvl_priv *priv);
+
+#endif /* _MRVL_QOS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/mvpp2/rte_pmd_mvpp2_version.map b/src/spdk/dpdk/drivers/net/mvpp2/rte_pmd_mvpp2_version.map
new file mode 100644
index 00000000..a7530317
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/mvpp2/rte_pmd_mvpp2_version.map
@@ -0,0 +1,3 @@
+DPDK_17.11 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/netvsc/Makefile b/src/spdk/dpdk/drivers/net/netvsc/Makefile
new file mode 100644
index 00000000..3c713af3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_pmd_netvsc.a
+
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+EXPORT_MAP := rte_pmd_netvsc_version.map
+
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_rndis.c
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_nvs.c
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vmbus
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_ethdev.c b/src/spdk/dpdk/drivers/net/netvsc/hn_ethdev.c
new file mode 100644
index 00000000..78b842ba
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/hn_ethdev.c
@@ -0,0 +1,761 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Microsoft Corporation
+ * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_cycles.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_bus_vmbus.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_rndis.h"
+#include "hn_nvs.h"
+#include "ndis.h"
+
+#define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_MULTI_SEGS | \
+ DEV_TX_OFFLOAD_VLAN_INSERT)
+
+#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
+ DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_CRC_STRIP)
+
+int hn_logtype_init;
+int hn_logtype_driver;
+
+struct hn_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+static const struct hn_xstats_name_off hn_stat_strings[] = {
+ { "good_packets", offsetof(struct hn_stats, packets) },
+ { "good_bytes", offsetof(struct hn_stats, bytes) },
+ { "errors", offsetof(struct hn_stats, errors) },
+ { "allocation_failed", offsetof(struct hn_stats, nomemory) },
+ { "multicast_packets", offsetof(struct hn_stats, multicast) },
+ { "broadcast_packets", offsetof(struct hn_stats, broadcast) },
+ { "undersize_packets", offsetof(struct hn_stats, size_bins[0]) },
+ { "size_64_packets", offsetof(struct hn_stats, size_bins[1]) },
+ { "size_65_127_packets", offsetof(struct hn_stats, size_bins[2]) },
+ { "size_128_255_packets", offsetof(struct hn_stats, size_bins[3]) },
+ { "size_256_511_packets", offsetof(struct hn_stats, size_bins[4]) },
+ { "size_512_1023_packets", offsetof(struct hn_stats, size_bins[5]) },
+ { "size_1024_1518_packets", offsetof(struct hn_stats, size_bins[6]) },
+ { "size_1519_max_packets", offsetof(struct hn_stats, size_bins[7]) },
+};
+
+static struct rte_eth_dev *
+eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size)
+{
+ struct rte_eth_dev *eth_dev;
+ const char *name;
+
+ if (!dev)
+ return NULL;
+
+ name = dev->device.name;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(NOTICE, "can not allocate rte ethdev");
+ return NULL;
+ }
+
+ if (private_data_size) {
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, private_data_size,
+ RTE_CACHE_LINE_SIZE, dev->device.numa_node);
+ if (!eth_dev->data->dev_private) {
+ PMD_DRV_LOG(NOTICE, "can not allocate driver data");
+ rte_eth_dev_release_port(eth_dev);
+ return NULL;
+ }
+ }
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(NOTICE, "can not attach secondary");
+ return NULL;
+ }
+ }
+
+ eth_dev->device = &dev->device;
+ eth_dev->intr_handle = &dev->intr_handle;
+
+ return eth_dev;
+}
+
+static void
+eth_dev_vmbus_release(struct rte_eth_dev *eth_dev)
+{
+ /* free ether device */
+ rte_eth_dev_release_port(eth_dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+
+ eth_dev->data->dev_private = NULL;
+
+ /*
+ * Secondary process will check the name to attach.
+ * Clear this field to avoid attaching a released ports.
+ */
+ eth_dev->data->name[0] = '\0';
+
+ eth_dev->device = NULL;
+ eth_dev->intr_handle = NULL;
+}
+
+/* Update link status.
+ * Note: the DPDK definition of "wait_to_complete"
+ * means block this call until link is up.
+ * which is not worth supporting.
+ */
+static int
+hn_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_link link, old;
+ int error;
+
+ old = dev->data->dev_link;
+
+ error = hn_rndis_get_linkstatus(hv);
+ if (error)
+ return error;
+
+ hn_rndis_get_linkspeed(hv);
+
+ link = (struct rte_eth_link) {
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_autoneg = ETH_LINK_SPEED_FIXED,
+ .link_speed = hv->link_speed / 10000,
+ };
+
+ if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
+ link.link_status = ETH_LINK_UP;
+ else
+ link.link_status = ETH_LINK_DOWN;
+
+ if (old.link_status == link.link_status)
+ return 0;
+
+ PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
+ (link.link_status == ETH_LINK_UP) ? "up" : "down");
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static void hn_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ dev_info->speed_capa = ETH_LINK_SPEED_10G;
+ dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_pktlen = HN_MAX_XFER_LEN;
+ dev_info->max_mac_addrs = 1;
+
+ dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
+ dev_info->flow_type_rss_offloads =
+ ETH_RSS_IPV4 | ETH_RSS_IPV6 | ETH_RSS_TCP | ETH_RSS_UDP;
+
+ dev_info->max_rx_queues = hv->max_queues;
+ dev_info->max_tx_queues = hv->max_queues;
+
+ hn_rndis_get_offload(hv, dev_info);
+}
+
+static void
+hn_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS);
+}
+
+static void
+hn_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ uint32_t filter;
+
+ filter = NDIS_PACKET_TYPE_DIRECTED | NDIS_PACKET_TYPE_BROADCAST;
+ if (dev->data->all_multicast)
+ filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
+ hn_rndis_set_rxfilter(hv, filter);
+}
+
+static void
+hn_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_BROADCAST);
+}
+
+static void
+hn_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
+ NDIS_PACKET_TYPE_BROADCAST);
+}
+
+/* Setup shared rx/tx queue data */
+static int hn_subchan_configure(struct hn_data *hv,
+ uint32_t subchan)
+{
+ struct vmbus_channel *primary = hn_primary_chan(hv);
+ int err;
+ unsigned int retry = 0;
+
+ PMD_DRV_LOG(DEBUG,
+ "open %u subchannels", subchan);
+
+ /* Send create sub channels command */
+ err = hn_nvs_alloc_subchans(hv, &subchan);
+ if (err)
+ return err;
+
+ while (subchan > 0) {
+ struct vmbus_channel *new_sc;
+ uint16_t chn_index;
+
+ err = rte_vmbus_subchan_open(primary, &new_sc);
+ if (err == -ENOENT && ++retry < 1000) {
+ /* This can happen if not ready yet */
+ rte_delay_ms(10);
+ continue;
+ }
+
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "open subchannel failed: %d", err);
+ return err;
+ }
+
+ retry = 0;
+ chn_index = rte_vmbus_sub_channel_index(new_sc);
+ if (chn_index == 0 || chn_index > hv->max_queues) {
+ PMD_DRV_LOG(ERR,
+ "Invalid subchannel offermsg channel %u",
+ chn_index);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(DEBUG, "new sub channel %u", chn_index);
+ hv->channels[chn_index] = new_sc;
+ --subchan;
+ }
+
+ return err;
+}
+
+static int hn_dev_configure(struct rte_eth_dev *dev)
+{
+ const struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
+ const struct rte_eth_txmode *txmode = &dev_conf->txmode;
+
+ const struct rte_eth_rss_conf *rss_conf =
+ &dev_conf->rx_adv_conf.rss_conf;
+ struct hn_data *hv = dev->data->dev_private;
+ uint64_t unsupported;
+ int err, subchan;
+
+ PMD_INIT_FUNC_TRACE();
+
+ unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
+ if (unsupported) {
+ PMD_DRV_LOG(NOTICE,
+ "unsupported TX offload: %#" PRIx64,
+ unsupported);
+ return -EINVAL;
+ }
+
+ unsupported = rxmode->offloads & ~HN_RX_OFFLOAD_CAPS;
+ if (unsupported) {
+ PMD_DRV_LOG(NOTICE,
+ "unsupported RX offload: %#" PRIx64,
+ rxmode->offloads);
+ return -EINVAL;
+ }
+
+ err = hn_rndis_conf_offload(hv, txmode->offloads,
+ rxmode->offloads);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "offload configure failed");
+ return err;
+ }
+
+ hv->num_queues = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
+ subchan = hv->num_queues - 1;
+ if (subchan > 0) {
+ err = hn_subchan_configure(hv, subchan);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "subchannel configuration failed");
+ return err;
+ }
+
+ err = hn_rndis_conf_rss(hv, rss_conf);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "rss configuration failed");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int hn_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct hn_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (!txq)
+ continue;
+
+ stats->opackets += txq->stats.packets;
+ stats->obytes += txq->stats.bytes;
+ stats->oerrors += txq->stats.errors + txq->stats.nomemory;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = txq->stats.packets;
+ stats->q_obytes[i] = txq->stats.bytes;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ stats->ipackets += rxq->stats.packets;
+ stats->ibytes += rxq->stats.bytes;
+ stats->ierrors += rxq->stats.errors;
+ stats->imissed += rxq->ring_full;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = rxq->stats.packets;
+ stats->q_ibytes[i] = rxq->stats.bytes;
+ }
+ }
+
+ stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+ return 0;
+}
+
+static void
+hn_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct hn_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (!txq)
+ continue;
+ memset(&txq->stats, 0, sizeof(struct hn_stats));
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ memset(&rxq->stats, 0, sizeof(struct hn_stats));
+ rxq->ring_full = 0;
+ }
+}
+
+static int
+hn_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ unsigned int i, t, count = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!xstats_names)
+ return dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
+ + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct hn_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (!txq)
+ continue;
+
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
+ snprintf(xstats_names[count++].name,
+ RTE_ETH_XSTATS_NAME_SIZE,
+ "tx_q%u_%s", i, hn_stat_strings[t].name);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
+ snprintf(xstats_names[count++].name,
+ RTE_ETH_XSTATS_NAME_SIZE,
+ "rx_q%u_%s", i,
+ hn_stat_strings[t].name);
+ }
+
+ return count;
+}
+
+static int
+hn_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i, t, count = 0;
+
+ const unsigned int nstats =
+ dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
+ + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
+ const char *stats;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (n < nstats)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct hn_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (!txq)
+ continue;
+
+ stats = (const char *)&txq->stats;
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
+ xstats[count++].value = *(const uint64_t *)
+ (stats + hn_stat_strings[t].offset);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ stats = (const char *)&rxq->stats;
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
+ xstats[count++].value = *(const uint64_t *)
+ (stats + hn_stat_strings[t].offset);
+ }
+
+ return count;
+}
+
+static int
+hn_dev_start(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* check if lsc interrupt feature is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ PMD_DRV_LOG(ERR, "link status not supported yet");
+ return -ENOTSUP;
+ }
+
+ return hn_rndis_set_rxfilter(hv,
+ NDIS_PACKET_TYPE_BROADCAST |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_DIRECTED);
+}
+
+static void
+hn_dev_stop(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hn_rndis_set_rxfilter(hv, 0);
+}
+
+static void
+hn_dev_close(struct rte_eth_dev *dev __rte_unused)
+{
+ PMD_INIT_LOG(DEBUG, "close");
+}
+
+static const struct eth_dev_ops hn_eth_dev_ops = {
+ .dev_configure = hn_dev_configure,
+ .dev_start = hn_dev_start,
+ .dev_stop = hn_dev_stop,
+ .dev_close = hn_dev_close,
+ .dev_infos_get = hn_dev_info_get,
+ .txq_info_get = hn_dev_tx_queue_info,
+ .rxq_info_get = hn_dev_rx_queue_info,
+ .promiscuous_enable = hn_dev_promiscuous_enable,
+ .promiscuous_disable = hn_dev_promiscuous_disable,
+ .allmulticast_enable = hn_dev_allmulticast_enable,
+ .allmulticast_disable = hn_dev_allmulticast_disable,
+ .tx_queue_setup = hn_dev_tx_queue_setup,
+ .tx_queue_release = hn_dev_tx_queue_release,
+ .rx_queue_setup = hn_dev_rx_queue_setup,
+ .rx_queue_release = hn_dev_rx_queue_release,
+ .link_update = hn_dev_link_update,
+ .stats_get = hn_dev_stats_get,
+ .xstats_get = hn_dev_xstats_get,
+ .xstats_get_names = hn_dev_xstats_get_names,
+ .stats_reset = hn_dev_stats_reset,
+ .xstats_reset = hn_dev_stats_reset,
+};
+
+/*
+ * Setup connection between PMD and kernel.
+ */
+static int
+hn_attach(struct hn_data *hv, unsigned int mtu)
+{
+ int error;
+
+ /* Attach NVS */
+ error = hn_nvs_attach(hv, mtu);
+ if (error)
+ goto failed_nvs;
+
+ /* Attach RNDIS */
+ error = hn_rndis_attach(hv);
+ if (error)
+ goto failed_rndis;
+
+ /*
+ * NOTE:
+ * Under certain conditions on certain versions of Hyper-V,
+ * the RNDIS rxfilter is _not_ zero on the hypervisor side
+ * after the successful RNDIS initialization.
+ */
+ hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_NONE);
+ return 0;
+failed_rndis:
+ hn_nvs_detach(hv);
+failed_nvs:
+ return error;
+}
+
+static void
+hn_detach(struct hn_data *hv)
+{
+ hn_nvs_detach(hv);
+ hn_rndis_detach(hv);
+}
+
+static int
+eth_hn_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct hn_data *hv = eth_dev->data->dev_private;
+ struct rte_device *device = eth_dev->device;
+ struct rte_vmbus_device *vmbus;
+ unsigned int rxr_cnt;
+ int err, max_chan;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vmbus = container_of(device, struct rte_vmbus_device, device);
+ eth_dev->dev_ops = &hn_eth_dev_ops;
+ eth_dev->tx_pkt_burst = &hn_xmit_pkts;
+ eth_dev->rx_pkt_burst = &hn_recv_pkts;
+
+ /*
+ * for secondary processes, we don't initialize any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Since Hyper-V only supports one MAC address, just use local data */
+ eth_dev->data->mac_addrs = &hv->mac_addr;
+
+ hv->vmbus = vmbus;
+ hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP];
+ hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP];
+ hv->port_id = eth_dev->data->port_id;
+
+ /* Initialize primary channel input for control operations */
+ err = rte_vmbus_chan_open(vmbus, &hv->channels[0]);
+ if (err)
+ return err;
+
+ hv->primary = hn_rx_queue_alloc(hv, 0,
+ eth_dev->device->numa_node);
+
+ if (!hv->primary)
+ return -ENOMEM;
+
+ err = hn_attach(hv, ETHER_MTU);
+ if (err)
+ goto failed;
+
+ err = hn_tx_pool_init(eth_dev);
+ if (err)
+ goto failed;
+
+ err = hn_rndis_get_eaddr(hv, hv->mac_addr.addr_bytes);
+ if (err)
+ goto failed;
+
+ max_chan = rte_vmbus_max_channels(vmbus);
+ PMD_INIT_LOG(DEBUG, "VMBus max channels %d", max_chan);
+ if (max_chan <= 0)
+ goto failed;
+
+ if (hn_rndis_query_rsscaps(hv, &rxr_cnt) != 0)
+ rxr_cnt = 1;
+
+ hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan);
+
+ return 0;
+
+failed:
+ PMD_INIT_LOG(NOTICE, "device init failed");
+
+ hn_detach(hv);
+ return err;
+}
+
+static int
+eth_hn_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct hn_data *hv = eth_dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ hn_dev_stop(eth_dev);
+ hn_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+
+ hn_detach(hv);
+ rte_vmbus_chan_close(hv->primary->chan);
+ rte_free(hv->primary);
+
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
+static int eth_hn_probe(struct rte_vmbus_driver *drv __rte_unused,
+ struct rte_vmbus_device *dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev = eth_dev_vmbus_allocate(dev, sizeof(struct hn_data));
+ if (!eth_dev)
+ return -ENOMEM;
+
+ ret = eth_hn_dev_init(eth_dev);
+ if (ret)
+ eth_dev_vmbus_release(eth_dev);
+ else
+ rte_eth_dev_probing_finish(eth_dev);
+
+ return ret;
+}
+
+static int eth_hn_remove(struct rte_vmbus_device *dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev = rte_eth_dev_allocated(dev->device.name);
+ if (!eth_dev)
+ return -ENODEV;
+
+ ret = eth_hn_dev_uninit(eth_dev);
+ if (ret)
+ return ret;
+
+ eth_dev_vmbus_release(eth_dev);
+ return 0;
+}
+
+/* Network device GUID */
+static const rte_uuid_t hn_net_ids[] = {
+ /* f8615163-df3e-46c5-913f-f2d2f965ed0e */
+ RTE_UUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x913f, 0xf2d2f965ed0eULL),
+ { 0 }
+};
+
+static struct rte_vmbus_driver rte_netvsc_pmd = {
+ .id_table = hn_net_ids,
+ .probe = eth_hn_probe,
+ .remove = eth_hn_remove,
+};
+
+RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd);
+RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic");
+
+RTE_INIT(hn_init_log);
+static void
+hn_init_log(void)
+{
+ hn_logtype_init = rte_log_register("pmd.net.netvsc.init");
+ if (hn_logtype_init >= 0)
+ rte_log_set_level(hn_logtype_init, RTE_LOG_NOTICE);
+ hn_logtype_driver = rte_log_register("pmd.net.netvsc.driver");
+ if (hn_logtype_driver >= 0)
+ rte_log_set_level(hn_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_logs.h b/src/spdk/dpdk/drivers/net/netvsc/hn_logs.h
new file mode 100644
index 00000000..cddadef0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/hn_logs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#ifndef _HN_LOGS_H_
+#define _HN_LOGS_H_
+
+#include <rte_log.h>
+
+extern int hn_logtype_init;
+extern int hn_logtype_driver;
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hn_logtype_init, "%s(): " fmt "\n",\
+ __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_NETVSC_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hn_logtype_driver, \
+ "%s() rx: " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_NETVSC_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hn_logtype_driver, \
+ "%s() tx: " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hn_logtype_driver, "%s(): " fmt "\n", \
+ __func__, ## args)
+
+#endif /* _HN_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_nvs.c b/src/spdk/dpdk/drivers/net/netvsc/hn_nvs.c
new file mode 100644
index 00000000..77d3b839
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/hn_nvs.c
@@ -0,0 +1,546 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * Copyright (c) 2010-2012 Citrix Inc.
+ * Copyright (c) 2012 NetApp Inc.
+ * All rights reserved.
+ */
+
+/*
+ * Network Virtualization Service.
+ */
+
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_bus_vmbus.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_nvs.h"
+
+static const uint32_t hn_nvs_version[] = {
+ NVS_VERSION_61,
+ NVS_VERSION_6,
+ NVS_VERSION_5,
+ NVS_VERSION_4,
+ NVS_VERSION_2,
+ NVS_VERSION_1
+};
+
+static int hn_nvs_req_send(struct hn_data *hv,
+ void *req, uint32_t reqlen)
+{
+ return rte_vmbus_chan_send(hn_primary_chan(hv),
+ VMBUS_CHANPKT_TYPE_INBAND,
+ req, reqlen, 0,
+ VMBUS_CHANPKT_FLAG_NONE, NULL);
+}
+
+static int
+hn_nvs_execute(struct hn_data *hv,
+ void *req, uint32_t reqlen,
+ void *resp, uint32_t resplen,
+ uint32_t type)
+{
+ struct vmbus_channel *chan = hn_primary_chan(hv);
+ char buffer[NVS_RESPSIZE_MAX];
+ const struct hn_nvs_hdr *hdr;
+ uint32_t len;
+ int ret;
+
+ /* Send request to ring buffer */
+ ret = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND,
+ req, reqlen, 0,
+ VMBUS_CHANPKT_FLAG_RC, NULL);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "send request failed: %d", ret);
+ return ret;
+ }
+
+ retry:
+ len = sizeof(buffer);
+ ret = rte_vmbus_chan_recv(chan, buffer, &len, NULL);
+ if (ret == -EAGAIN) {
+ rte_delay_us(HN_CHAN_INTERVAL_US);
+ goto retry;
+ }
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "recv response failed: %d", ret);
+ return ret;
+ }
+
+ hdr = (struct hn_nvs_hdr *)buffer;
+ if (hdr->type != type) {
+ PMD_DRV_LOG(ERR, "unexpected NVS resp %#x, expect %#x",
+ hdr->type, type);
+ return -EINVAL;
+ }
+
+ if (len < resplen) {
+ PMD_DRV_LOG(ERR,
+ "invalid NVS resp len %u (expect %u)",
+ len, resplen);
+ return -EINVAL;
+ }
+
+ memcpy(resp, buffer, resplen);
+
+ /* All pass! */
+ return 0;
+}
+
+static int
+hn_nvs_doinit(struct hn_data *hv, uint32_t nvs_ver)
+{
+ struct hn_nvs_init init;
+ struct hn_nvs_init_resp resp;
+ uint32_t status;
+ int error;
+
+ memset(&init, 0, sizeof(init));
+ init.type = NVS_TYPE_INIT;
+ init.ver_min = nvs_ver;
+ init.ver_max = nvs_ver;
+
+ error = hn_nvs_execute(hv, &init, sizeof(init),
+ &resp, sizeof(resp),
+ NVS_TYPE_INIT_RESP);
+ if (error)
+ return error;
+
+ status = resp.status;
+ if (status != NVS_STATUS_OK) {
+ /* Not fatal, try other versions */
+ PMD_INIT_LOG(DEBUG, "nvs init failed for ver 0x%x",
+ nvs_ver);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+hn_nvs_conn_rxbuf(struct hn_data *hv)
+{
+ struct hn_nvs_rxbuf_conn conn;
+ struct hn_nvs_rxbuf_connresp resp;
+ uint32_t status;
+ int error;
+
+ /* Kernel has already setup RXBUF on primary channel. */
+
+ /*
+ * Connect RXBUF to NVS.
+ */
+ conn.type = NVS_TYPE_RXBUF_CONN;
+ conn.gpadl = hv->rxbuf_res->phys_addr;
+ conn.sig = NVS_RXBUF_SIG;
+ PMD_DRV_LOG(DEBUG, "connect rxbuff va=%p gpad=%#" PRIx64,
+ hv->rxbuf_res->addr,
+ hv->rxbuf_res->phys_addr);
+
+ error = hn_nvs_execute(hv, &conn, sizeof(conn),
+ &resp, sizeof(resp),
+ NVS_TYPE_RXBUF_CONNRESP);
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "exec nvs rxbuf conn failed: %d",
+ error);
+ return error;
+ }
+
+ status = resp.status;
+ if (status != NVS_STATUS_OK) {
+ PMD_DRV_LOG(ERR,
+ "nvs rxbuf conn failed: %x", status);
+ return -EIO;
+ }
+ if (resp.nsect != 1) {
+ PMD_DRV_LOG(ERR,
+ "nvs rxbuf response num sections %u != 1",
+ resp.nsect);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "receive buffer size %u count %u",
+ resp.nvs_sect[0].slotsz,
+ resp.nvs_sect[0].slotcnt);
+ hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
+
+ hv->rxbuf_info = rte_calloc("HN_RXBUF_INFO", hv->rxbuf_section_cnt,
+ sizeof(*hv->rxbuf_info), RTE_CACHE_LINE_SIZE);
+ if (!hv->rxbuf_info) {
+ PMD_DRV_LOG(ERR,
+ "could not allocate rxbuf info");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+hn_nvs_disconn_rxbuf(struct hn_data *hv)
+{
+ struct hn_nvs_rxbuf_disconn disconn;
+ int error;
+
+ /*
+ * Disconnect RXBUF from NVS.
+ */
+ memset(&disconn, 0, sizeof(disconn));
+ disconn.type = NVS_TYPE_RXBUF_DISCONN;
+ disconn.sig = NVS_RXBUF_SIG;
+
+ /* NOTE: No response. */
+ error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "send nvs rxbuf disconn failed: %d",
+ error);
+ }
+
+ rte_free(hv->rxbuf_info);
+ /*
+ * Linger long enough for NVS to disconnect RXBUF.
+ */
+ rte_delay_ms(200);
+}
+
+static void
+hn_nvs_disconn_chim(struct hn_data *hv)
+{
+ int error;
+
+ if (hv->chim_cnt != 0) {
+ struct hn_nvs_chim_disconn disconn;
+
+ /* Disconnect chimney sending buffer from NVS. */
+ memset(&disconn, 0, sizeof(disconn));
+ disconn.type = NVS_TYPE_CHIM_DISCONN;
+ disconn.sig = NVS_CHIM_SIG;
+
+ /* NOTE: No response. */
+ error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
+
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "send nvs chim disconn failed: %d", error);
+ }
+
+ hv->chim_cnt = 0;
+ /*
+ * Linger long enough for NVS to disconnect chimney
+ * sending buffer.
+ */
+ rte_delay_ms(200);
+ }
+}
+
+static int
+hn_nvs_conn_chim(struct hn_data *hv)
+{
+ struct hn_nvs_chim_conn chim;
+ struct hn_nvs_chim_connresp resp;
+ uint32_t sectsz;
+ unsigned long len = hv->chim_res->len;
+ int error;
+
+ /* Connect chimney sending buffer to NVS */
+ memset(&chim, 0, sizeof(chim));
+ chim.type = NVS_TYPE_CHIM_CONN;
+ chim.gpadl = hv->chim_res->phys_addr;
+ chim.sig = NVS_CHIM_SIG;
+ PMD_DRV_LOG(DEBUG, "connect send buf va=%p gpad=%#" PRIx64,
+ hv->chim_res->addr,
+ hv->chim_res->phys_addr);
+
+ error = hn_nvs_execute(hv, &chim, sizeof(chim),
+ &resp, sizeof(resp),
+ NVS_TYPE_CHIM_CONNRESP);
+ if (error) {
+ PMD_DRV_LOG(ERR, "exec nvs chim conn failed");
+ goto cleanup;
+ }
+
+ if (resp.status != NVS_STATUS_OK) {
+ PMD_DRV_LOG(ERR, "nvs chim conn failed: %x",
+ resp.status);
+ error = -EIO;
+ goto cleanup;
+ }
+
+ sectsz = resp.sectsz;
+ if (sectsz == 0 || sectsz & (sizeof(uint32_t) - 1)) {
+ /* Can't use chimney sending buffer; done! */
+ PMD_DRV_LOG(NOTICE,
+ "invalid chimney sending buffer section size: %u",
+ sectsz);
+ return 0;
+ }
+
+ hv->chim_szmax = sectsz;
+ hv->chim_cnt = len / sectsz;
+
+ PMD_DRV_LOG(INFO, "send buffer %lu section size:%u, count:%u",
+ len, hv->chim_szmax, hv->chim_cnt);
+
+ if (len % hv->chim_szmax != 0) {
+ PMD_DRV_LOG(NOTICE,
+ "chimney sending sections are not properly aligned");
+ }
+
+ /* Done! */
+ return 0;
+
+cleanup:
+ hn_nvs_disconn_chim(hv);
+ return error;
+}
+
+/*
+ * Configure MTU and enable VLAN.
+ */
+static int
+hn_nvs_conf_ndis(struct hn_data *hv, unsigned int mtu)
+{
+ struct hn_nvs_ndis_conf conf;
+ int error;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.type = NVS_TYPE_NDIS_CONF;
+ conf.mtu = mtu + ETHER_HDR_LEN;
+ conf.caps = NVS_NDIS_CONF_VLAN;
+
+ /* TODO enable SRIOV */
+ //if (hv->nvs_ver >= NVS_VERSION_5)
+ // conf.caps |= NVS_NDIS_CONF_SRIOV;
+
+ /* NOTE: No response. */
+ error = hn_nvs_req_send(hv, &conf, sizeof(conf));
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "send nvs ndis conf failed: %d", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+hn_nvs_init_ndis(struct hn_data *hv)
+{
+ struct hn_nvs_ndis_init ndis;
+ int error;
+
+ memset(&ndis, 0, sizeof(ndis));
+ ndis.type = NVS_TYPE_NDIS_INIT;
+ ndis.ndis_major = NDIS_VERSION_MAJOR(hv->ndis_ver);
+ ndis.ndis_minor = NDIS_VERSION_MINOR(hv->ndis_ver);
+
+ /* NOTE: No response. */
+ error = hn_nvs_req_send(hv, &ndis, sizeof(ndis));
+ if (error)
+ PMD_DRV_LOG(ERR,
+ "send nvs ndis init failed: %d", error);
+
+ return error;
+}
+
+static int
+hn_nvs_init(struct hn_data *hv)
+{
+ unsigned int i;
+ int error;
+
+ /*
+ * Find the supported NVS version and set NDIS version accordingly.
+ */
+ for (i = 0; i < RTE_DIM(hn_nvs_version); ++i) {
+ error = hn_nvs_doinit(hv, hn_nvs_version[i]);
+ if (error) {
+ PMD_INIT_LOG(DEBUG, "version %#x error %d",
+ hn_nvs_version[i], error);
+ continue;
+ }
+
+ hv->nvs_ver = hn_nvs_version[i];
+
+ /* Set NDIS version according to NVS version. */
+ hv->ndis_ver = NDIS_VERSION_6_30;
+ if (hv->nvs_ver <= NVS_VERSION_4)
+ hv->ndis_ver = NDIS_VERSION_6_1;
+
+ PMD_INIT_LOG(DEBUG,
+ "NVS version %#x, NDIS version %u.%u",
+ hv->nvs_ver, NDIS_VERSION_MAJOR(hv->ndis_ver),
+ NDIS_VERSION_MINOR(hv->ndis_ver));
+ return 0;
+ }
+
+ PMD_DRV_LOG(ERR,
+ "no NVS compatible version available");
+ return -ENXIO;
+}
+
+int
+hn_nvs_attach(struct hn_data *hv, unsigned int mtu)
+{
+ int error;
+
+ /*
+ * Initialize NVS.
+ */
+ error = hn_nvs_init(hv);
+ if (error)
+ return error;
+
+ /** Configure NDIS before initializing it. */
+ if (hv->nvs_ver >= NVS_VERSION_2) {
+ error = hn_nvs_conf_ndis(hv, mtu);
+ if (error)
+ return error;
+ }
+
+ /*
+ * Initialize NDIS.
+ */
+ error = hn_nvs_init_ndis(hv);
+ if (error)
+ return error;
+
+ /*
+ * Connect RXBUF.
+ */
+ error = hn_nvs_conn_rxbuf(hv);
+ if (error)
+ return error;
+
+ /*
+ * Connect chimney sending buffer.
+ */
+ error = hn_nvs_conn_chim(hv);
+ if (error) {
+ hn_nvs_disconn_rxbuf(hv);
+ return error;
+ }
+
+ return 0;
+}
+
+void
+hn_nvs_detach(struct hn_data *hv __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /* NOTE: there are no requests to stop the NVS. */
+ hn_nvs_disconn_rxbuf(hv);
+ hn_nvs_disconn_chim(hv);
+}
+
+/*
+ * Ack the consumed RXBUF associated w/ this channel packet,
+ * so that this RXBUF can be recycled by the hypervisor.
+ */
+void
+hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid)
+{
+ unsigned int retries = 0;
+ struct hn_nvs_rndis_ack ack = {
+ .type = NVS_TYPE_RNDIS_ACK,
+ .status = NVS_STATUS_OK,
+ };
+ int error;
+
+ PMD_RX_LOG(DEBUG, "ack RX id %" PRIu64, tid);
+
+ again:
+ error = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_COMP,
+ &ack, sizeof(ack), tid,
+ VMBUS_CHANPKT_FLAG_NONE, NULL);
+
+ if (error == 0)
+ return;
+
+ if (error == -EAGAIN) {
+ /*
+ * NOTE:
+ * This should _not_ happen in real world, since the
+ * consumption of the TX bufring from the TX path is
+ * controlled.
+ */
+ PMD_RX_LOG(NOTICE, "RXBUF ack retry");
+ if (++retries < 10) {
+ rte_delay_ms(1);
+ goto again;
+ }
+ }
+ /* RXBUF leaks! */
+ PMD_DRV_LOG(ERR, "RXBUF ack failed");
+}
+
+int
+hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch)
+{
+ struct hn_nvs_subch_req req;
+ struct hn_nvs_subch_resp resp;
+ int error;
+
+ memset(&req, 0, sizeof(req));
+ req.type = NVS_TYPE_SUBCH_REQ;
+ req.op = NVS_SUBCH_OP_ALLOC;
+ req.nsubch = *nsubch;
+
+ error = hn_nvs_execute(hv, &req, sizeof(req),
+ &resp, sizeof(resp),
+ NVS_TYPE_SUBCH_RESP);
+ if (error)
+ return error;
+
+ if (resp.status != NVS_STATUS_OK) {
+ PMD_INIT_LOG(ERR,
+ "nvs subch alloc failed: %#x",
+ resp.status);
+ return -EIO;
+ }
+
+ if (resp.nsubch > *nsubch) {
+ PMD_INIT_LOG(NOTICE,
+ "%u subchans are allocated, requested %u",
+ resp.nsubch, *nsubch);
+ }
+ *nsubch = resp.nsubch;
+
+ return 0;
+}
+
+void
+hn_nvs_set_datapath(struct hn_data *hv, uint32_t path)
+{
+ struct hn_nvs_datapath dp;
+
+ memset(&dp, 0, sizeof(dp));
+ dp.type = NVS_TYPE_SET_DATAPATH;
+ dp.active_path = path;
+
+ hn_nvs_req_send(hv, &dp, sizeof(dp));
+}
diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_nvs.h b/src/spdk/dpdk/drivers/net/netvsc/hn_nvs.h
new file mode 100644
index 00000000..984a9c11
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/hn_nvs.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * All rights reserved.
+ */
+
+/*
+ * The indirection table message is the largest message
+ * received from host, and that is 112 bytes.
+ */
+#define NVS_RESPSIZE_MAX 256
+
+/*
+ * NDIS protocol version numbers
+ */
+#define NDIS_VERSION_6_1 0x00060001
+#define NDIS_VERSION_6_20 0x00060014
+#define NDIS_VERSION_6_30 0x0006001e
+#define NDIS_VERSION_MAJOR(ver) (((ver) & 0xffff0000) >> 16)
+#define NDIS_VERSION_MINOR(ver) ((ver) & 0xffff)
+
+/*
+ * NVS versions.
+ */
+#define NVS_VERSION_1 0x00002
+#define NVS_VERSION_2 0x30002
+#define NVS_VERSION_4 0x40000
+#define NVS_VERSION_5 0x50000
+#define NVS_VERSION_6 0x60000
+#define NVS_VERSION_61 0x60001
+
+#define NVS_RXBUF_SIG 0xcafe
+#define NVS_CHIM_SIG 0xface
+
+#define NVS_CHIM_IDX_INVALID 0xffffffff
+
+#define NVS_RNDIS_MTYPE_DATA 0
+#define NVS_RNDIS_MTYPE_CTRL 1
+
+/*
+ * NVS message transacion status codes.
+ */
+#define NVS_STATUS_OK 1
+#define NVS_STATUS_FAILED 2
+
+/*
+ * NVS request/response message types.
+ */
+#define NVS_TYPE_INIT 1
+#define NVS_TYPE_INIT_RESP 2
+
+#define NVS_TYPE_NDIS_INIT 100
+#define NVS_TYPE_RXBUF_CONN 101
+#define NVS_TYPE_RXBUF_CONNRESP 102
+#define NVS_TYPE_RXBUF_DISCONN 103
+#define NVS_TYPE_CHIM_CONN 104
+#define NVS_TYPE_CHIM_CONNRESP 105
+#define NVS_TYPE_CHIM_DISCONN 106
+#define NVS_TYPE_RNDIS 107
+#define NVS_TYPE_RNDIS_ACK 108
+
+#define NVS_TYPE_NDIS_CONF 125
+#define NVS_TYPE_VFASSOC_NOTE 128 /* notification */
+#define NVS_TYPE_SET_DATAPATH 129
+#define NVS_TYPE_SUBCH_REQ 133
+#define NVS_TYPE_SUBCH_RESP 133 /* same as SUBCH_REQ */
+#define NVS_TYPE_TXTBL_NOTE 134 /* notification */
+
+
+/* NVS message common header */
+struct hn_nvs_hdr {
+ uint32_t type;
+} __rte_packed;
+
+struct hn_nvs_init {
+ uint32_t type; /* NVS_TYPE_INIT */
+ uint32_t ver_min;
+ uint32_t ver_max;
+ uint8_t rsvd[28];
+} __rte_packed;
+
+struct hn_nvs_init_resp {
+ uint32_t type; /* NVS_TYPE_INIT_RESP */
+ uint32_t ver; /* deprecated */
+ uint32_t rsvd;
+ uint32_t status; /* NVS_STATUS_ */
+} __rte_packed;
+
+/* No response */
+struct hn_nvs_ndis_conf {
+ uint32_t type; /* NVS_TYPE_NDIS_CONF */
+ uint32_t mtu;
+ uint32_t rsvd;
+ uint64_t caps; /* NVS_NDIS_CONF_ */
+ uint8_t rsvd1[20];
+} __rte_packed;
+
+#define NVS_NDIS_CONF_SRIOV 0x0004
+#define NVS_NDIS_CONF_VLAN 0x0008
+
+/* No response */
+struct hn_nvs_ndis_init {
+ uint32_t type; /* NVS_TYPE_NDIS_INIT */
+ uint32_t ndis_major; /* NDIS_VERSION_MAJOR_ */
+ uint32_t ndis_minor; /* NDIS_VERSION_MINOR_ */
+ uint8_t rsvd[28];
+} __rte_packed;
+
+#define NVS_DATAPATH_SYNTHETIC 0
+#define NVS_DATAPATH_VF 1
+
+/* No response */
+struct hn_nvs_datapath {
+ uint32_t type; /* NVS_TYPE_SET_DATAPATH */
+ uint32_t active_path;/* NVS_DATAPATH_* */
+ uint8_t rsvd[32];
+} __rte_packed;
+
+struct hn_nvs_rxbuf_conn {
+ uint32_t type; /* NVS_TYPE_RXBUF_CONN */
+ uint32_t gpadl; /* RXBUF vmbus GPADL */
+ uint16_t sig; /* NVS_RXBUF_SIG */
+ uint8_t rsvd[30];
+} __rte_packed;
+
+struct hn_nvs_rxbuf_sect {
+ uint32_t start;
+ uint32_t slotsz;
+ uint32_t slotcnt;
+ uint32_t end;
+} __rte_packed;
+
+struct hn_nvs_rxbuf_connresp {
+ uint32_t type; /* NVS_TYPE_RXBUF_CONNRESP */
+ uint32_t status; /* NVS_STATUS_ */
+ uint32_t nsect; /* # of elem in nvs_sect */
+ struct hn_nvs_rxbuf_sect nvs_sect[1];
+} __rte_packed;
+
+/* No response */
+struct hn_nvs_rxbuf_disconn {
+ uint32_t type; /* NVS_TYPE_RXBUF_DISCONN */
+ uint16_t sig; /* NVS_RXBUF_SIG */
+ uint8_t rsvd[34];
+} __rte_packed;
+
+struct hn_nvs_chim_conn {
+ uint32_t type; /* NVS_TYPE_CHIM_CONN */
+ uint32_t gpadl; /* chimney buf vmbus GPADL */
+ uint16_t sig; /* NDIS_NVS_CHIM_SIG */
+ uint8_t rsvd[30];
+} __rte_packed;
+
+struct hn_nvs_chim_connresp {
+ uint32_t type; /* NVS_TYPE_CHIM_CONNRESP */
+ uint32_t status; /* NVS_STATUS_ */
+ uint32_t sectsz; /* section size */
+} __rte_packed;
+
+/* No response */
+struct hn_nvs_chim_disconn {
+ uint32_t type; /* NVS_TYPE_CHIM_DISCONN */
+ uint16_t sig; /* NVS_CHIM_SIG */
+ uint8_t rsvd[34];
+} __rte_packed;
+
+#define NVS_SUBCH_OP_ALLOC 1
+
+struct hn_nvs_subch_req {
+ uint32_t type; /* NVS_TYPE_SUBCH_REQ */
+ uint32_t op; /* NVS_SUBCH_OP_ */
+ uint32_t nsubch;
+ uint8_t rsvd[28];
+} __rte_packed;
+
+struct hn_nvs_subch_resp {
+ uint32_t type; /* NVS_TYPE_SUBCH_RESP */
+ uint32_t status; /* NVS_STATUS_ */
+ uint32_t nsubch;
+ uint8_t rsvd[28];
+} __rte_packed;
+
+struct hn_nvs_rndis {
+ uint32_t type; /* NVS_TYPE_RNDIS */
+ uint32_t rndis_mtype;/* NVS_RNDIS_MTYPE_ */
+ /*
+ * Chimney sending buffer index and size.
+ *
+ * NOTE:
+ * If nvs_chim_idx is set to NVS_CHIM_IDX_INVALID
+ * and nvs_chim_sz is set to 0, then chimney sending
+ * buffer is _not_ used by this RNDIS message.
+ */
+ uint32_t chim_idx;
+ uint32_t chim_sz;
+ uint8_t rsvd[24];
+} __rte_packed;
+
+struct hn_nvs_rndis_ack {
+ uint32_t type; /* NVS_TYPE_RNDIS_ACK */
+ uint32_t status; /* NVS_STATUS_ */
+ uint8_t rsvd[32];
+} __rte_packed;
+
+
+int hn_nvs_attach(struct hn_data *hv, unsigned int mtu);
+void hn_nvs_detach(struct hn_data *hv);
+void hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid);
+int hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch);
+void hn_nvs_set_datapath(struct hn_data *hv, uint32_t path);
+
+static inline int
+hn_nvs_send(struct vmbus_channel *chan, uint16_t flags,
+ void *nvs_msg, int nvs_msglen, uintptr_t sndc,
+ bool *need_sig)
+{
+ return rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND,
+ nvs_msg, nvs_msglen, (uint64_t)sndc,
+ flags, need_sig);
+}
+
+static inline int
+hn_nvs_send_sglist(struct vmbus_channel *chan,
+ struct vmbus_gpa sg[], unsigned int sglen,
+ void *nvs_msg, int nvs_msglen,
+ uintptr_t sndc, bool *need_sig)
+{
+ return rte_vmbus_chan_send_sglist(chan, sg, sglen, nvs_msg, nvs_msglen,
+ (uint64_t)sndc, need_sig);
+}
diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_rndis.c b/src/spdk/dpdk/drivers/net/netvsc/hn_rndis.c
new file mode 100644
index 00000000..bde33969
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/hn_rndis.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2009-2018 Microsoft Corp.
+ * Copyright (c) 2010-2012 Citrix Inc.
+ * Copyright (c) 2012 NetApp Inc.
+ * All rights reserved.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_bus_vmbus.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_nvs.h"
+#include "hn_rndis.h"
+#include "ndis.h"
+
+#define HN_RNDIS_XFER_SIZE 0x4000
+
+#define HN_NDIS_TXCSUM_CAP_IP4 \
+ (NDIS_TXCSUM_CAP_IP4 | NDIS_TXCSUM_CAP_IP4OPT)
+#define HN_NDIS_TXCSUM_CAP_TCP4 \
+ (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT)
+#define HN_NDIS_TXCSUM_CAP_TCP6 \
+ (NDIS_TXCSUM_CAP_TCP6 | NDIS_TXCSUM_CAP_TCP6OPT | \
+ NDIS_TXCSUM_CAP_IP6EXT)
+#define HN_NDIS_TXCSUM_CAP_UDP6 \
+ (NDIS_TXCSUM_CAP_UDP6 | NDIS_TXCSUM_CAP_IP6EXT)
+#define HN_NDIS_LSOV2_CAP_IP6 \
+ (NDIS_LSOV2_CAP_IP6EXT | NDIS_LSOV2_CAP_TCP6OPT)
+
+/* Get unique request id */
+static inline uint32_t
+hn_rndis_rid(struct hn_data *hv)
+{
+ uint32_t rid;
+
+ do {
+ rid = rte_atomic32_add_return(&hv->rndis_req_id, 1);
+ } while (rid == 0);
+
+ return rid;
+}
+
+static void *hn_rndis_alloc(struct hn_data *hv, size_t size)
+{
+ return rte_zmalloc_socket("RNDIS", size, PAGE_SIZE,
+ hv->vmbus->device.numa_node);
+}
+
+#ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP
+void hn_rndis_dump(const void *buf)
+{
+ const union {
+ struct rndis_msghdr hdr;
+ struct rndis_packet_msg pkt;
+ struct rndis_init_req init_request;
+ struct rndis_init_comp init_complete;
+ struct rndis_halt_req halt;
+ struct rndis_query_req query_request;
+ struct rndis_query_comp query_complete;
+ struct rndis_set_req set_request;
+ struct rndis_set_comp set_complete;
+ struct rndis_reset_req reset_request;
+ struct rndis_reset_comp reset_complete;
+ struct rndis_keepalive_req keepalive_request;
+ struct rndis_keepalive_comp keepalive_complete;
+ struct rndis_status_msg indicate_status;
+ } *rndis_msg = buf;
+
+ switch (rndis_msg->hdr.type) {
+ case RNDIS_PACKET_MSG: {
+ const struct rndis_pktinfo *ppi;
+ unsigned int ppi_len;
+
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_PACKET (len %u, data %u:%u, # oob %u %u:%u, pkt %u:%u)\n",
+ rndis_msg->pkt.len,
+ rndis_msg->pkt.dataoffset,
+ rndis_msg->pkt.datalen,
+ rndis_msg->pkt.oobdataelements,
+ rndis_msg->pkt.oobdataoffset,
+ rndis_msg->pkt.oobdatalen,
+ rndis_msg->pkt.pktinfooffset,
+ rndis_msg->pkt.pktinfolen);
+
+ ppi = (const struct rndis_pktinfo *)
+ ((const char *)buf
+ + RNDIS_PACKET_MSG_OFFSET_ABS(rndis_msg->pkt.pktinfooffset));
+
+ ppi_len = rndis_msg->pkt.pktinfolen;
+ while (ppi_len > 0) {
+ const void *ppi_data;
+
+ ppi_data = ppi->data;
+
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ " PPI (size %u, type %u, offs %u data %#x)\n",
+ ppi->size, ppi->type, ppi->offset,
+ *(const uint32_t *)ppi_data);
+ if (ppi->size == 0)
+ break;
+ ppi_len -= ppi->size;
+ ppi = (const struct rndis_pktinfo *)
+ ((const char *)ppi + ppi->size);
+ }
+ break;
+ }
+ case RNDIS_INITIALIZE_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_INIT (len %u id %#x, ver %u.%u max xfer %u)\n",
+ rndis_msg->init_request.len,
+ rndis_msg->init_request.rid,
+ rndis_msg->init_request.ver_major,
+ rndis_msg->init_request.ver_minor,
+ rndis_msg->init_request.max_xfersz);
+ break;
+
+ case RNDIS_INITIALIZE_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_INIT_C (len %u, id %#x, status 0x%x, vers %u.%u, "
+ "flags %d, max xfer %u, max pkts %u, aligned %u)\n",
+ rndis_msg->init_complete.len,
+ rndis_msg->init_complete.rid,
+ rndis_msg->init_complete.status,
+ rndis_msg->init_complete.ver_major,
+ rndis_msg->init_complete.ver_minor,
+ rndis_msg->init_complete.devflags,
+ rndis_msg->init_complete.pktmaxsz,
+ rndis_msg->init_complete.pktmaxcnt,
+ rndis_msg->init_complete.align);
+ break;
+
+ case RNDIS_HALT_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_HALT (len %u id %#x)\n",
+ rndis_msg->halt.len, rndis_msg->halt.rid);
+ break;
+
+ case RNDIS_QUERY_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_QUERY (len %u, id %#x, oid %#x, info %u:%u)\n",
+ rndis_msg->query_request.len,
+ rndis_msg->query_request.rid,
+ rndis_msg->query_request.oid,
+ rndis_msg->query_request.infobuflen,
+ rndis_msg->query_request.infobufoffset);
+ break;
+
+ case RNDIS_QUERY_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_QUERY_C (len %u, id %#x, status 0x%x, buf %u:%u)\n",
+ rndis_msg->query_complete.len,
+ rndis_msg->query_complete.rid,
+ rndis_msg->query_complete.status,
+ rndis_msg->query_complete.infobuflen,
+ rndis_msg->query_complete.infobufoffset);
+ break;
+
+ case RNDIS_SET_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_SET (len %u, id %#x, oid %#x, info %u:%u)\n",
+ rndis_msg->set_request.len,
+ rndis_msg->set_request.rid,
+ rndis_msg->set_request.oid,
+ rndis_msg->set_request.infobuflen,
+ rndis_msg->set_request.infobufoffset);
+ break;
+
+ case RNDIS_SET_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
+ rndis_msg->set_complete.len,
+ rndis_msg->set_complete.rid,
+ rndis_msg->set_complete.status);
+ break;
+
+ case RNDIS_INDICATE_STATUS_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_INDICATE (len %u, status %#x, buf len %u, buf offset %u)\n",
+ rndis_msg->indicate_status.len,
+ rndis_msg->indicate_status.status,
+ rndis_msg->indicate_status.stbuflen,
+ rndis_msg->indicate_status.stbufoffset);
+ break;
+
+ case RNDIS_RESET_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_RESET (len %u, id %#x)\n",
+ rndis_msg->reset_request.len,
+ rndis_msg->reset_request.rid);
+ break;
+
+ case RNDIS_RESET_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_RESET_C (len %u, status %#x address %#x)\n",
+ rndis_msg->reset_complete.len,
+ rndis_msg->reset_complete.status,
+ rndis_msg->reset_complete.adrreset);
+ break;
+
+ case RNDIS_KEEPALIVE_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_KEEPALIVE (len %u, id %#x)\n",
+ rndis_msg->keepalive_request.len,
+ rndis_msg->keepalive_request.rid);
+ break;
+
+ case RNDIS_KEEPALIVE_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_KEEPALIVE_C (len %u, id %#x address %#x)\n",
+ rndis_msg->keepalive_complete.len,
+ rndis_msg->keepalive_complete.rid,
+ rndis_msg->keepalive_complete.status);
+ break;
+
+ default:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS type %#x len %u\n",
+ rndis_msg->hdr.type,
+ rndis_msg->hdr.len);
+ break;
+ }
+}
+#endif
+
+static int hn_nvs_send_rndis_ctrl(struct vmbus_channel *chan,
+ const void *req, uint32_t reqlen)
+
+{
+ struct hn_nvs_rndis nvs_rndis = {
+ .type = NVS_TYPE_RNDIS,
+ .rndis_mtype = NVS_RNDIS_MTYPE_CTRL,
+ .chim_idx = NVS_CHIM_IDX_INVALID,
+ .chim_sz = 0
+ };
+ struct vmbus_gpa sg;
+ rte_iova_t addr;
+
+ addr = rte_malloc_virt2iova(req);
+ if (unlikely(addr == RTE_BAD_IOVA)) {
+ PMD_DRV_LOG(ERR, "RNDIS send request can not get iova");
+ return -EINVAL;
+ }
+
+ if (unlikely(reqlen > PAGE_SIZE)) {
+ PMD_DRV_LOG(ERR, "RNDIS request %u greater than page size",
+ reqlen);
+ return -EINVAL;
+ }
+
+ sg.page = addr / PAGE_SIZE;
+ sg.ofs = addr & PAGE_MASK;
+ sg.len = reqlen;
+
+ if (sg.ofs + reqlen > PAGE_SIZE) {
+ PMD_DRV_LOG(ERR, "RNDIS request crosses page bounary");
+ return -EINVAL;
+ }
+
+ hn_rndis_dump(req);
+
+ return hn_nvs_send_sglist(chan, &sg, 1,
+ &nvs_rndis, sizeof(nvs_rndis), 0U, NULL);
+}
+
+void hn_rndis_link_status(struct hn_data *hv __rte_unused, const void *msg)
+{
+ const struct rndis_status_msg *indicate = msg;
+
+ hn_rndis_dump(msg);
+
+ PMD_DRV_LOG(DEBUG, "link status %#x", indicate->status);
+
+ switch (indicate->status) {
+ case RNDIS_STATUS_LINK_SPEED_CHANGE:
+ case RNDIS_STATUS_NETWORK_CHANGE:
+ case RNDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG:
+ /* ignore not in DPDK API */
+ break;
+
+ case RNDIS_STATUS_MEDIA_CONNECT:
+ case RNDIS_STATUS_MEDIA_DISCONNECT:
+ /* TODO handle as LSC interrupt */
+ break;
+ default:
+ PMD_DRV_LOG(NOTICE, "unknown RNDIS indication: %#x",
+ indicate->status);
+ }
+}
+
+/* Callback from hn_process_events when response is visible */
+void hn_rndis_receive_response(struct hn_data *hv,
+ const void *data, uint32_t len)
+{
+ const struct rndis_init_comp *hdr = data;
+
+ hn_rndis_dump(data);
+
+ if (len < sizeof(3 * sizeof(uint32_t))) {
+ PMD_DRV_LOG(ERR,
+ "missing RNDIS header %u", len);
+ return;
+ }
+
+ if (len < hdr->len) {
+ PMD_DRV_LOG(ERR,
+ "truncated RNDIS response %u", len);
+ return;
+ }
+
+ if (len > sizeof(hv->rndis_resp)) {
+ PMD_DRV_LOG(NOTICE,
+ "RNDIS response exceeds buffer");
+ len = sizeof(hv->rndis_resp);
+ }
+
+ if (hdr->rid == 0) {
+ PMD_DRV_LOG(NOTICE,
+ "RNDIS response id zero!");
+ }
+
+ memcpy(hv->rndis_resp, data, len);
+
+ /* make sure response copied before update */
+ rte_smp_wmb();
+
+ if (rte_atomic32_cmpset(&hv->rndis_pending, hdr->rid, 0) == 0) {
+ PMD_DRV_LOG(ERR,
+ "received id %#x pending id %#x",
+ hdr->rid, (uint32_t)hv->rndis_pending);
+ }
+}
+
+/* Do request/response transaction */
+static int hn_rndis_exec1(struct hn_data *hv,
+ const void *req, uint32_t reqlen,
+ void *comp, uint32_t comp_len)
+{
+ const struct rndis_halt_req *hdr = req;
+ uint32_t rid = hdr->rid;
+ struct vmbus_channel *chan = hn_primary_chan(hv);
+ int error;
+
+ if (comp_len > sizeof(hv->rndis_resp)) {
+ PMD_DRV_LOG(ERR,
+ "Expected completion size %u exceeds buffer %zu",
+ comp_len, sizeof(hv->rndis_resp));
+ return -EIO;
+ }
+
+ if (comp != NULL &&
+ rte_atomic32_cmpset(&hv->rndis_pending, 0, rid) == 0) {
+ PMD_DRV_LOG(ERR,
+ "Request already pending");
+ return -EBUSY;
+ }
+
+ error = hn_nvs_send_rndis_ctrl(chan, req, reqlen);
+ if (error) {
+ PMD_DRV_LOG(ERR, "RNDIS ctrl send failed: %d", error);
+ return error;
+ }
+
+ if (comp) {
+ /* Poll primary channel until response received */
+ while (hv->rndis_pending == rid)
+ hn_process_events(hv, 0);
+
+ memcpy(comp, hv->rndis_resp, comp_len);
+ }
+
+ return 0;
+}
+
+/* Do transaction and validate response */
+static int hn_rndis_execute(struct hn_data *hv, uint32_t rid,
+ const void *req, uint32_t reqlen,
+ void *comp, uint32_t comp_len, uint32_t comp_type)
+{
+ const struct rndis_comp_hdr *hdr = comp;
+ int ret;
+
+ memset(comp, 0, comp_len);
+
+ ret = hn_rndis_exec1(hv, req, reqlen, comp, comp_len);
+ if (ret < 0)
+ return ret;
+ /*
+ * Check this RNDIS complete message.
+ */
+ if (unlikely(hdr->type != comp_type)) {
+ PMD_DRV_LOG(ERR,
+ "unexpected RNDIS response complete %#x expect %#x",
+ hdr->type, comp_type);
+
+ return -ENXIO;
+ }
+ if (unlikely(hdr->rid != rid)) {
+ PMD_DRV_LOG(ERR,
+ "RNDIS comp rid mismatch %#x, expect %#x",
+ hdr->rid, rid);
+ return -EINVAL;
+ }
+
+ /* All pass! */
+ return 0;
+}
+
+static int
+hn_rndis_query(struct hn_data *hv, uint32_t oid,
+ const void *idata, uint32_t idlen,
+ void *odata, uint32_t odlen)
+{
+ struct rndis_query_req *req;
+ struct rndis_query_comp *comp;
+ uint32_t reqlen, comp_len;
+ int error = -EIO;
+ unsigned int ofs;
+ uint32_t rid;
+
+ reqlen = sizeof(*req) + idlen;
+ req = hn_rndis_alloc(hv, reqlen);
+ if (req == NULL)
+ return -ENOMEM;
+
+ comp_len = sizeof(*comp) + odlen;
+ comp = rte_zmalloc("QUERY", comp_len, PAGE_SIZE);
+ if (!comp) {
+ error = -ENOMEM;
+ goto done;
+ }
+ comp->status = RNDIS_STATUS_PENDING;
+
+ rid = hn_rndis_rid(hv);
+
+ req->type = RNDIS_QUERY_MSG;
+ req->len = reqlen;
+ req->rid = rid;
+ req->oid = oid;
+ req->infobufoffset = RNDIS_QUERY_REQ_INFOBUFOFFSET;
+ req->infobuflen = idlen;
+
+ /* Input data immediately follows RNDIS query. */
+ memcpy(req + 1, idata, idlen);
+
+ error = hn_rndis_execute(hv, rid, req, reqlen,
+ comp, comp_len, RNDIS_QUERY_CMPLT);
+
+ if (error)
+ goto done;
+
+ if (comp->status != RNDIS_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR, "RNDIS query 0x%08x failed: status 0x%08x",
+ oid, comp->status);
+ error = -EINVAL;
+ goto done;
+ }
+
+ if (comp->infobuflen == 0 || comp->infobufoffset == 0) {
+ /* No output data! */
+ PMD_DRV_LOG(ERR, "RNDIS query 0x%08x, no data", oid);
+ error = 0;
+ goto done;
+ }
+
+ /*
+ * Check output data length and offset.
+ */
+ /* ofs is the offset from the beginning of comp. */
+ ofs = RNDIS_QUERY_COMP_INFOBUFOFFSET_ABS(comp->infobufoffset);
+ if (ofs < sizeof(*comp) || ofs + comp->infobuflen > comp_len) {
+ PMD_DRV_LOG(ERR, "RNDIS query invalid comp ib off/len, %u/%u",
+ comp->infobufoffset, comp->infobuflen);
+ error = -EINVAL;
+ goto done;
+ }
+
+ /* Save output data. */
+ if (comp->infobuflen < odlen)
+ odlen = comp->infobuflen;
+
+ /* ofs is the offset from the beginning of comp. */
+ memcpy(odata, (const char *)comp + ofs, odlen);
+
+ error = 0;
+done:
+ rte_free(comp);
+ rte_free(req);
+ return error;
+}
+
+static int
+hn_rndis_halt(struct hn_data *hv)
+{
+ struct rndis_halt_req *halt;
+
+ halt = hn_rndis_alloc(hv, sizeof(*halt));
+ if (halt == NULL)
+ return -ENOMEM;
+
+ halt->type = RNDIS_HALT_MSG;
+ halt->len = sizeof(*halt);
+ halt->rid = hn_rndis_rid(hv);
+
+ /* No RNDIS completion; rely on NVS message send completion */
+ hn_rndis_exec1(hv, halt, sizeof(*halt), NULL, 0);
+
+ rte_free(halt);
+
+ PMD_INIT_LOG(DEBUG, "RNDIS halt done");
+ return 0;
+}
+
+static int
+hn_rndis_query_hwcaps(struct hn_data *hv, struct ndis_offload *caps)
+{
+ struct ndis_offload in;
+ uint32_t caps_len, size;
+ int error;
+
+ memset(caps, 0, sizeof(*caps));
+ memset(&in, 0, sizeof(in));
+ in.ndis_hdr.ndis_type = NDIS_OBJTYPE_OFFLOAD;
+
+ if (hv->ndis_ver >= NDIS_VERSION_6_30) {
+ in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_3;
+ size = NDIS_OFFLOAD_SIZE;
+ } else if (hv->ndis_ver >= NDIS_VERSION_6_1) {
+ in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_2;
+ size = NDIS_OFFLOAD_SIZE_6_1;
+ } else {
+ in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_1;
+ size = NDIS_OFFLOAD_SIZE_6_0;
+ }
+ in.ndis_hdr.ndis_size = size;
+
+ caps_len = NDIS_OFFLOAD_SIZE;
+ error = hn_rndis_query(hv, OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
+ &in, size, caps, caps_len);
+ if (error)
+ return error;
+
+ /* Preliminary verification. */
+ if (caps->ndis_hdr.ndis_type != NDIS_OBJTYPE_OFFLOAD) {
+ PMD_DRV_LOG(NOTICE, "invalid NDIS objtype 0x%02x",
+ caps->ndis_hdr.ndis_type);
+ return -EINVAL;
+ }
+ if (caps->ndis_hdr.ndis_rev < NDIS_OFFLOAD_REV_1) {
+ PMD_DRV_LOG(NOTICE, "invalid NDIS objrev 0x%02x",
+ caps->ndis_hdr.ndis_rev);
+ return -EINVAL;
+ }
+ if (caps->ndis_hdr.ndis_size > caps_len) {
+ PMD_DRV_LOG(NOTICE, "invalid NDIS objsize %u, data size %u",
+ caps->ndis_hdr.ndis_size, caps_len);
+ return -EINVAL;
+ } else if (caps->ndis_hdr.ndis_size < NDIS_OFFLOAD_SIZE_6_0) {
+ PMD_DRV_LOG(NOTICE, "invalid NDIS objsize %u",
+ caps->ndis_hdr.ndis_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+hn_rndis_query_rsscaps(struct hn_data *hv,
+ unsigned int *rxr_cnt0)
+{
+ struct ndis_rss_caps in, caps;
+ unsigned int indsz, rxr_cnt;
+ uint32_t caps_len;
+ int error;
+
+ *rxr_cnt0 = 0;
+
+ if (hv->ndis_ver < NDIS_VERSION_6_20) {
+ PMD_DRV_LOG(DEBUG, "RSS not supported on this host");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&in, 0, sizeof(in));
+ in.ndis_hdr.ndis_type = NDIS_OBJTYPE_RSS_CAPS;
+ in.ndis_hdr.ndis_rev = NDIS_RSS_CAPS_REV_2;
+ in.ndis_hdr.ndis_size = NDIS_RSS_CAPS_SIZE;
+
+ caps_len = NDIS_RSS_CAPS_SIZE;
+ error = hn_rndis_query(hv, OID_GEN_RECEIVE_SCALE_CAPABILITIES,
+ &in, NDIS_RSS_CAPS_SIZE,
+ &caps, caps_len);
+ if (error)
+ return error;
+
+ PMD_INIT_LOG(DEBUG, "RX rings %u indirect %u caps %#x",
+ caps.ndis_nrxr, caps.ndis_nind, caps.ndis_caps);
+ /*
+ * Preliminary verification.
+ */
+ if (caps.ndis_hdr.ndis_type != NDIS_OBJTYPE_RSS_CAPS) {
+ PMD_DRV_LOG(ERR, "invalid NDIS objtype 0x%02x",
+ caps.ndis_hdr.ndis_type);
+ return -EINVAL;
+ }
+ if (caps.ndis_hdr.ndis_rev < NDIS_RSS_CAPS_REV_1) {
+ PMD_DRV_LOG(ERR, "invalid NDIS objrev 0x%02x",
+ caps.ndis_hdr.ndis_rev);
+ return -EINVAL;
+ }
+ if (caps.ndis_hdr.ndis_size > caps_len) {
+ PMD_DRV_LOG(ERR,
+ "invalid NDIS objsize %u, data size %u",
+ caps.ndis_hdr.ndis_size, caps_len);
+ return -EINVAL;
+ } else if (caps.ndis_hdr.ndis_size < NDIS_RSS_CAPS_SIZE_6_0) {
+ PMD_DRV_LOG(ERR, "invalid NDIS objsize %u",
+ caps.ndis_hdr.ndis_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Save information for later RSS configuration.
+ */
+ if (caps.ndis_nrxr == 0) {
+ PMD_DRV_LOG(ERR, "0 RX rings!?");
+ return -EINVAL;
+ }
+ rxr_cnt = caps.ndis_nrxr;
+
+ if (caps.ndis_hdr.ndis_size == NDIS_RSS_CAPS_SIZE &&
+ caps.ndis_hdr.ndis_rev >= NDIS_RSS_CAPS_REV_2) {
+ if (caps.ndis_nind > NDIS_HASH_INDCNT) {
+ PMD_DRV_LOG(ERR,
+ "too many RSS indirect table entries %u",
+ caps.ndis_nind);
+ return -EOPNOTSUPP;
+ }
+ if (!rte_is_power_of_2(caps.ndis_nind)) {
+ PMD_DRV_LOG(ERR,
+ "RSS indirect table size is not power-of-2 %u",
+ caps.ndis_nind);
+ }
+
+ indsz = caps.ndis_nind;
+ } else {
+ indsz = NDIS_HASH_INDCNT;
+ }
+
+ if (indsz < rxr_cnt) {
+ PMD_DRV_LOG(NOTICE,
+ "# of RX rings (%d) > RSS indirect table size %d",
+ rxr_cnt, indsz);
+ rxr_cnt = indsz;
+ }
+
+ hv->rss_offloads = 0;
+ if (caps.ndis_caps & NDIS_RSS_CAP_IPV4)
+ hv->rss_offloads |= ETH_RSS_IPV4
+ | ETH_RSS_NONFRAG_IPV4_TCP
+ | ETH_RSS_NONFRAG_IPV4_UDP;
+ if (caps.ndis_caps & NDIS_RSS_CAP_IPV6)
+ hv->rss_offloads |= ETH_RSS_IPV6
+ | ETH_RSS_NONFRAG_IPV6_TCP;
+ if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX)
+ hv->rss_offloads |= ETH_RSS_IPV6_EX
+ | ETH_RSS_IPV6_TCP_EX;
+
+ /* Commit! */
+ *rxr_cnt0 = rxr_cnt;
+
+ return 0;
+}
+
+static int
+hn_rndis_set(struct hn_data *hv, uint32_t oid, const void *data, uint32_t dlen)
+{
+ struct rndis_set_req *req;
+ struct rndis_set_comp comp;
+ uint32_t reqlen, comp_len;
+ uint32_t rid;
+ int error;
+
+ reqlen = sizeof(*req) + dlen;
+ req = rte_zmalloc("RNDIS_SET", reqlen, PAGE_SIZE);
+ if (!req)
+ return -ENOMEM;
+
+ rid = hn_rndis_rid(hv);
+ req->type = RNDIS_SET_MSG;
+ req->len = reqlen;
+ req->rid = rid;
+ req->oid = oid;
+ req->infobuflen = dlen;
+ req->infobufoffset = RNDIS_SET_REQ_INFOBUFOFFSET;
+
+ /* Data immediately follows RNDIS set. */
+ memcpy(req + 1, data, dlen);
+
+ comp_len = sizeof(comp);
+ error = hn_rndis_execute(hv, rid, req, reqlen,
+ &comp, comp_len,
+ RNDIS_SET_CMPLT);
+ if (error) {
+ PMD_DRV_LOG(ERR, "exec RNDIS set %#" PRIx32 " failed",
+ oid);
+ error = EIO;
+ goto done;
+ }
+
+ if (comp.status != RNDIS_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "RNDIS set %#" PRIx32 " failed: status %#" PRIx32,
+ oid, comp.status);
+ error = EIO;
+ goto done;
+ }
+
+done:
+ rte_free(req);
+ return error;
+}
+
+int hn_rndis_conf_offload(struct hn_data *hv,
+ uint64_t tx_offloads, uint64_t rx_offloads)
+{
+ struct ndis_offload_params params;
+ struct ndis_offload hwcaps;
+ int error;
+
+ error = hn_rndis_query_hwcaps(hv, &hwcaps);
+ if (error) {
+ PMD_DRV_LOG(ERR, "hwcaps query failed: %d", error);
+ return error;
+ }
+
+ /* NOTE: 0 means "no change" */
+ memset(&params, 0, sizeof(params));
+
+ params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
+ if (hv->ndis_ver < NDIS_VERSION_6_30) {
+ params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
+ params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
+ } else {
+ params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
+ params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+ if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_TCP4)
+ params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+
+ if (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_TCP6)
+ params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) {
+ if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4)
+ == NDIS_RXCSUM_CAP_TCP4)
+ params.ndis_tcp4csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+
+ if ((hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6)
+ == NDIS_RXCSUM_CAP_TCP6)
+ params.ndis_tcp6csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+ if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4)
+ params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+
+ if ((hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6)
+ == NDIS_TXCSUM_CAP_UDP6)
+ params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+ }
+
+ if (rx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+ if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4)
+ params.ndis_udp4csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+
+ if (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6)
+ params.ndis_udp6csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+ if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_IP4)
+ == NDIS_TXCSUM_CAP_IP4)
+ params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+ }
+ if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+ if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
+ params.ndis_ip4csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+ if (hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023)
+ params.ndis_lsov2_ip4 = NDIS_OFFLOAD_LSOV2_ON;
+ else
+ goto unsupported;
+
+ if ((hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
+ == HN_NDIS_LSOV2_CAP_IP6)
+ params.ndis_lsov2_ip6 = NDIS_OFFLOAD_LSOV2_ON;
+ else
+ goto unsupported;
+ }
+
+ error = hn_rndis_set(hv, OID_TCP_OFFLOAD_PARAMETERS, &params,
+ params.ndis_hdr.ndis_size);
+ if (error) {
+ PMD_DRV_LOG(ERR, "offload config failed");
+ return error;
+ }
+
+ return 0;
+ unsupported:
+ PMD_DRV_LOG(NOTICE,
+ "offload tx:%" PRIx64 " rx:%" PRIx64 " not supported by this version",
+ tx_offloads, rx_offloads);
+ return -EINVAL;
+}
+
+int hn_rndis_get_offload(struct hn_data *hv,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ndis_offload hwcaps;
+ int error;
+
+ memset(&hwcaps, 0, sizeof(hwcaps));
+
+ error = hn_rndis_query_hwcaps(hv, &hwcaps);
+ if (error) {
+ PMD_DRV_LOG(ERR, "hwcaps query failed: %d", error);
+ return error;
+ }
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_IP4)
+ == HN_NDIS_TXCSUM_CAP_IP4)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_TCP4)
+ == HN_NDIS_TXCSUM_CAP_TCP4 &&
+ (hwcaps.ndis_csum.ndis_ip6_txcsum & HN_NDIS_TXCSUM_CAP_TCP6)
+ == HN_NDIS_TXCSUM_CAP_TCP6)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) &&
+ (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6))
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM;
+
+ if ((hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) &&
+ (hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
+ == HN_NDIS_LSOV2_CAP_IP6)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_CRC_STRIP;
+
+ if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) &&
+ (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6))
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) &&
+ (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6))
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_UDP_CKSUM;
+
+ return 0;
+}
+
+int
+hn_rndis_set_rxfilter(struct hn_data *hv, uint32_t filter)
+{
+ int error;
+
+ error = hn_rndis_set(hv, OID_GEN_CURRENT_PACKET_FILTER,
+ &filter, sizeof(filter));
+ if (error) {
+ PMD_DRV_LOG(ERR, "set RX filter %#" PRIx32 " failed: %d",
+ filter, error);
+ } else {
+ PMD_DRV_LOG(DEBUG, "set RX filter %#" PRIx32 " done", filter);
+ }
+
+ return error;
+}
+
+/* The default RSS key.
+ * This value is the same as MLX5 so that flows will be
+ * received on same path for both VF ans synthetic NIC.
+ */
+static const uint8_t rss_default_key[NDIS_HASH_KEYSIZE_TOEPLITZ] = {
+ 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
+ 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
+ 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
+ 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
+ 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a,
+};
+
+int hn_rndis_conf_rss(struct hn_data *hv,
+ const struct rte_eth_rss_conf *rss_conf)
+{
+ struct ndis_rssprm_toeplitz rssp;
+ struct ndis_rss_params *prm = &rssp.rss_params;
+ const uint8_t *rss_key = rss_conf->rss_key ? : rss_default_key;
+ uint32_t rss_hash;
+ unsigned int i;
+ int error;
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(&rssp, 0, sizeof(rssp));
+
+ prm->ndis_hdr.ndis_type = NDIS_OBJTYPE_RSS_PARAMS;
+ prm->ndis_hdr.ndis_rev = NDIS_RSS_PARAMS_REV_2;
+ prm->ndis_hdr.ndis_size = sizeof(*prm);
+ prm->ndis_flags = 0;
+
+ rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
+ if (rss_conf->rss_hf & ETH_RSS_IPV4)
+ rss_hash |= NDIS_HASH_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ rss_hash |= NDIS_HASH_TCP_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_IPV6)
+ rss_hash |= NDIS_HASH_IPV6;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ rss_hash |= NDIS_HASH_TCP_IPV6;
+
+ prm->ndis_hash = rss_hash;
+ prm->ndis_indsize = sizeof(rssp.rss_ind[0]) * NDIS_HASH_INDCNT;
+ prm->ndis_indoffset = offsetof(struct ndis_rssprm_toeplitz, rss_ind[0]);
+ prm->ndis_keysize = NDIS_HASH_KEYSIZE_TOEPLITZ;
+ prm->ndis_keyoffset = offsetof(struct ndis_rssprm_toeplitz, rss_key[0]);
+
+ for (i = 0; i < NDIS_HASH_INDCNT; i++)
+ rssp.rss_ind[i] = i % hv->num_queues;
+
+ /* Set hask key values */
+ memcpy(&rssp.rss_key, rss_key, NDIS_HASH_KEYSIZE_TOEPLITZ);
+
+ error = hn_rndis_set(hv, OID_GEN_RECEIVE_SCALE_PARAMETERS,
+ &rssp, sizeof(rssp));
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "RSS config num queues=%u failed: %d",
+ hv->num_queues, error);
+ }
+ return error;
+}
+
+static int hn_rndis_init(struct hn_data *hv)
+{
+ struct rndis_init_req *req;
+ struct rndis_init_comp comp;
+ uint32_t comp_len, rid;
+ int error;
+
+ req = hn_rndis_alloc(hv, sizeof(*req));
+ if (!req) {
+ PMD_DRV_LOG(ERR, "no memory for RNDIS init");
+ return -ENXIO;
+ }
+
+ rid = hn_rndis_rid(hv);
+ req->type = RNDIS_INITIALIZE_MSG;
+ req->len = sizeof(*req);
+ req->rid = rid;
+ req->ver_major = RNDIS_VERSION_MAJOR;
+ req->ver_minor = RNDIS_VERSION_MINOR;
+ req->max_xfersz = HN_RNDIS_XFER_SIZE;
+
+ comp_len = RNDIS_INIT_COMP_SIZE_MIN;
+ error = hn_rndis_execute(hv, rid, req, sizeof(*req),
+ &comp, comp_len,
+ RNDIS_INITIALIZE_CMPLT);
+ if (error)
+ goto done;
+
+ if (comp.status != RNDIS_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR, "RNDIS init failed: status 0x%08x",
+ comp.status);
+ error = -EIO;
+ goto done;
+ }
+
+ hv->rndis_agg_size = comp.pktmaxsz;
+ hv->rndis_agg_pkts = comp.pktmaxcnt;
+ hv->rndis_agg_align = 1U << comp.align;
+
+ if (hv->rndis_agg_align < sizeof(uint32_t)) {
+ /*
+ * The RNDIS packet message encap assumes that the RNDIS
+ * packet message is at least 4 bytes aligned. Fix up the
+ * alignment here, if the remote side sets the alignment
+ * too low.
+ */
+ PMD_DRV_LOG(NOTICE,
+ "fixup RNDIS aggpkt align: %u -> %zu",
+ hv->rndis_agg_align, sizeof(uint32_t));
+ hv->rndis_agg_align = sizeof(uint32_t);
+ }
+
+ PMD_INIT_LOG(INFO,
+ "RNDIS ver %u.%u, aggpkt size %u, aggpkt cnt %u, aggpkt align %u",
+ comp.ver_major, comp.ver_minor,
+ hv->rndis_agg_size, hv->rndis_agg_pkts,
+ hv->rndis_agg_align);
+ error = 0;
+done:
+ rte_free(req);
+ return error;
+}
+
+int
+hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr)
+{
+ uint32_t eaddr_len;
+ int error;
+
+ eaddr_len = ETHER_ADDR_LEN;
+ error = hn_rndis_query(hv, OID_802_3_PERMANENT_ADDRESS, NULL, 0,
+ eaddr, eaddr_len);
+ if (error)
+ return error;
+
+ PMD_DRV_LOG(INFO, "MAC address %02x:%02x:%02x:%02x:%02x:%02x",
+ eaddr[0], eaddr[1], eaddr[2],
+ eaddr[3], eaddr[4], eaddr[5]);
+ return 0;
+}
+
+int
+hn_rndis_get_linkstatus(struct hn_data *hv)
+{
+ return hn_rndis_query(hv, OID_GEN_MEDIA_CONNECT_STATUS, NULL, 0,
+ &hv->link_status, sizeof(uint32_t));
+}
+
+int
+hn_rndis_get_linkspeed(struct hn_data *hv)
+{
+ return hn_rndis_query(hv, OID_GEN_LINK_SPEED, NULL, 0,
+ &hv->link_speed, sizeof(uint32_t));
+}
+
+int
+hn_rndis_attach(struct hn_data *hv)
+{
+ /* Initialize RNDIS. */
+ return hn_rndis_init(hv);
+}
+
+void
+hn_rndis_detach(struct hn_data *hv)
+{
+ /* Halt the RNDIS. */
+ hn_rndis_halt(hv);
+}
diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_rndis.h b/src/spdk/dpdk/drivers/net/netvsc/hn_rndis.h
new file mode 100644
index 00000000..89e2e6ba
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/hn_rndis.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#include "rndis.h"
+
+struct hn_data;
+
+void hn_rndis_receive_response(struct hn_data *hv,
+ const void *data, uint32_t len);
+void hn_rndis_link_status(struct hn_data *hv, const void *data);
+int hn_rndis_attach(struct hn_data *hv);
+void hn_rndis_detach(struct hn_data *hv);
+int hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr);
+int hn_rndis_get_linkstatus(struct hn_data *hv);
+int hn_rndis_get_linkspeed(struct hn_data *hv);
+int hn_rndis_set_rxfilter(struct hn_data *hv, uint32_t filter);
+void hn_rndis_rx_ctrl(struct hn_data *hv, const void *data,
+ int dlen);
+int hn_rndis_get_offload(struct hn_data *hv,
+ struct rte_eth_dev_info *dev_info);
+int hn_rndis_conf_offload(struct hn_data *hv,
+ uint64_t tx_offloads,
+ uint64_t rx_offloads);
+int hn_rndis_query_rsscaps(struct hn_data *hv,
+ unsigned int *rxr_cnt0);
+int hn_rndis_conf_rss(struct hn_data *hv,
+ const struct rte_eth_rss_conf *rss_conf);
+
+#ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP
+void hn_rndis_dump(const void *buf);
+#else
+#define hn_rndis_dump(buf)
+#endif
diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_rxtx.c b/src/spdk/dpdk/drivers/net/netvsc/hn_rxtx.c
new file mode 100644
index 00000000..02ef27e3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/hn_rxtx.c
@@ -0,0 +1,1334 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Microsoft Corporation
+ * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <strings.h>
+
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_net.h>
+#include <rte_bus_vmbus.h>
+#include <rte_spinlock.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_rndis.h"
+#include "hn_nvs.h"
+#include "ndis.h"
+
+#define HN_NVS_SEND_MSG_SIZE \
+ (sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis))
+
+#define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */
+#define HN_TXCOPY_THRESHOLD 512
+
+#define HN_RXCOPY_THRESHOLD 256
+#define HN_RXQ_EVENT_DEFAULT 2048
+
+struct hn_rxinfo {
+ uint32_t vlan_info;
+ uint32_t csum_info;
+ uint32_t hash_info;
+ uint32_t hash_value;
+};
+
+#define HN_RXINFO_VLAN 0x0001
+#define HN_RXINFO_CSUM 0x0002
+#define HN_RXINFO_HASHINF 0x0004
+#define HN_RXINFO_HASHVAL 0x0008
+#define HN_RXINFO_ALL \
+ (HN_RXINFO_VLAN | \
+ HN_RXINFO_CSUM | \
+ HN_RXINFO_HASHINF | \
+ HN_RXINFO_HASHVAL)
+
+#define HN_NDIS_VLAN_INFO_INVALID 0xffffffff
+#define HN_NDIS_RXCSUM_INFO_INVALID 0
+#define HN_NDIS_HASH_INFO_INVALID 0
+
+/*
+ * Per-transmit book keeping.
+ * A slot in transmit ring (chim_index) is reserved for each transmit.
+ *
+ * There are two types of transmit:
+ * - buffered transmit where chimney buffer is used and RNDIS header
+ * is in the buffer. mbuf == NULL for this case.
+ *
+ * - direct transmit where RNDIS header is in the in rndis_pkt
+ * mbuf is freed after transmit.
+ *
+ * Descriptors come from per-port pool which is used
+ * to limit number of outstanding requests per device.
+ */
+struct hn_txdesc {
+ struct rte_mbuf *m;
+
+ uint16_t queue_id;
+ uint16_t chim_index;
+ uint32_t chim_size;
+ uint32_t data_size;
+ uint32_t packets;
+
+ struct rndis_packet_msg *rndis_pkt;
+};
+
+#define HN_RNDIS_PKT_LEN \
+ (sizeof(struct rndis_packet_msg) + \
+ RNDIS_PKTINFO_SIZE(NDIS_HASH_VALUE_SIZE) + \
+ RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) + \
+ RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) + \
+ RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE))
+
+/* Minimum space required for a packet */
+#define HN_PKTSIZE_MIN(align) \
+ RTE_ALIGN(ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)
+
+#define DEFAULT_TX_FREE_THRESH 32U
+
+static void
+hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m)
+{
+ uint32_t s = m->pkt_len;
+ const struct ether_addr *ea;
+
+ if (s == 64) {
+ stats->size_bins[1]++;
+ } else if (s > 64 && s < 1024) {
+ uint32_t bin;
+
+ /* count zeros, and offset into correct bin */
+ bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+ stats->size_bins[bin]++;
+ } else {
+ if (s < 64)
+ stats->size_bins[0]++;
+ else if (s < 1519)
+ stats->size_bins[6]++;
+ else if (s >= 1519)
+ stats->size_bins[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(m, const struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ stats->broadcast++;
+ else
+ stats->multicast++;
+ }
+}
+
+static inline unsigned int hn_rndis_pktlen(const struct rndis_packet_msg *pkt)
+{
+ return pkt->pktinfooffset + pkt->pktinfolen;
+}
+
+static inline uint32_t
+hn_rndis_pktmsg_offset(uint32_t ofs)
+{
+ return ofs - offsetof(struct rndis_packet_msg, dataoffset);
+}
+
+static void hn_txd_init(struct rte_mempool *mp __rte_unused,
+ void *opaque, void *obj, unsigned int idx)
+{
+ struct hn_txdesc *txd = obj;
+ struct rte_eth_dev *dev = opaque;
+ struct rndis_packet_msg *pkt;
+
+ memset(txd, 0, sizeof(*txd));
+ txd->chim_index = idx;
+
+ pkt = rte_malloc_socket("RNDIS_TX", HN_RNDIS_PKT_LEN,
+ rte_align32pow2(HN_RNDIS_PKT_LEN),
+ dev->device->numa_node);
+ if (!pkt)
+ rte_exit(EXIT_FAILURE, "can not allocate RNDIS header");
+
+ txd->rndis_pkt = pkt;
+}
+
+/*
+ * Unlike Linux and FreeBSD, this driver uses a mempool
+ * to limit outstanding transmits and reserve buffers
+ */
+int
+hn_tx_pool_init(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ char name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(name, sizeof(name),
+ "hn_txd_%u", dev->data->port_id);
+
+ PMD_INIT_LOG(DEBUG, "create a TX send pool %s n=%u size=%zu socket=%d",
+ name, hv->chim_cnt, sizeof(struct hn_txdesc),
+ dev->device->numa_node);
+
+ mp = rte_mempool_create(name, hv->chim_cnt, sizeof(struct hn_txdesc),
+ HN_TXD_CACHE_SIZE, 0,
+ NULL, NULL,
+ hn_txd_init, dev,
+ dev->device->numa_node, 0);
+ if (!mp) {
+ PMD_DRV_LOG(ERR,
+ "mempool %s create failed: %d", name, rte_errno);
+ return -rte_errno;
+ }
+
+ hv->tx_pool = mp;
+ return 0;
+}
+
+static void hn_reset_txagg(struct hn_tx_queue *txq)
+{
+ txq->agg_szleft = txq->agg_szmax;
+ txq->agg_pktleft = txq->agg_pktmax;
+ txq->agg_txd = NULL;
+ txq->agg_prevpkt = NULL;
+}
+
+int
+hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct hn_tx_queue *txq;
+ uint32_t tx_free_thresh;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq)
+ return -ENOMEM;
+
+ txq->hv = hv;
+ txq->chan = hv->channels[queue_idx];
+ txq->port_id = dev->data->port_id;
+ txq->queue_id = queue_idx;
+
+ tx_free_thresh = tx_conf->tx_free_thresh;
+ if (tx_free_thresh == 0)
+ tx_free_thresh = RTE_MIN(hv->chim_cnt / 4,
+ DEFAULT_TX_FREE_THRESH);
+
+ if (tx_free_thresh >= hv->chim_cnt - 3)
+ tx_free_thresh = hv->chim_cnt - 3;
+
+ txq->free_thresh = tx_free_thresh;
+
+ txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size);
+ txq->agg_pktmax = hv->rndis_agg_pkts;
+ txq->agg_align = hv->rndis_agg_align;
+
+ hn_reset_txagg(txq);
+
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+}
+
+void
+hn_dev_tx_queue_release(void *arg)
+{
+ struct hn_tx_queue *txq = arg;
+ struct hn_txdesc *txd;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!txq)
+ return;
+
+ /* If any pending data is still present just drop it */
+ txd = txq->agg_txd;
+ if (txd)
+ rte_mempool_put(txq->hv->tx_pool, txd);
+
+ rte_free(txq);
+}
+
+void
+hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct hn_tx_queue *txq = dev->data->rx_queues[queue_idx];
+
+ qinfo->conf.tx_free_thresh = txq->free_thresh;
+ qinfo->nb_desc = hv->tx_pool->size;
+}
+
+static void
+hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
+ unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
+{
+ struct hn_txdesc *txd = (struct hn_txdesc *)xactid;
+ struct hn_tx_queue *txq;
+
+ /* Control packets are sent with xacid == 0 */
+ if (!txd)
+ return;
+
+ txq = dev->data->tx_queues[queue_id];
+ if (likely(ack->status == NVS_STATUS_OK)) {
+ PMD_TX_LOG(DEBUG, "port %u:%u complete tx %u packets %u bytes %u",
+ txq->port_id, txq->queue_id, txd->chim_index,
+ txd->packets, txd->data_size);
+ txq->stats.bytes += txd->data_size;
+ txq->stats.packets += txd->packets;
+ } else {
+ PMD_TX_LOG(NOTICE, "port %u:%u complete tx %u failed status %u",
+ txq->port_id, txq->queue_id, txd->chim_index, ack->status);
+ ++txq->stats.errors;
+ }
+
+ rte_pktmbuf_free(txd->m);
+
+ rte_mempool_put(txq->hv->tx_pool, txd);
+}
+
+/* Handle transmit completion events */
+static void
+hn_nvs_handle_comp(struct rte_eth_dev *dev, uint16_t queue_id,
+ const struct vmbus_chanpkt_hdr *pkt,
+ const void *data)
+{
+ const struct hn_nvs_hdr *hdr = data;
+
+ switch (hdr->type) {
+ case NVS_TYPE_RNDIS_ACK:
+ hn_nvs_send_completed(dev, queue_id, pkt->xactid, data);
+ break;
+
+ default:
+ PMD_TX_LOG(NOTICE,
+ "unexpected send completion type %u",
+ hdr->type);
+ }
+}
+
+/* Parse per-packet info (meta data) */
+static int
+hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen,
+ struct hn_rxinfo *info)
+{
+ const struct rndis_pktinfo *pi = info_data;
+ uint32_t mask = 0;
+
+ while (info_dlen != 0) {
+ const void *data;
+ uint32_t dlen;
+
+ if (unlikely(info_dlen < sizeof(*pi)))
+ return -EINVAL;
+
+ if (unlikely(info_dlen < pi->size))
+ return -EINVAL;
+ info_dlen -= pi->size;
+
+ if (unlikely(pi->size & RNDIS_PKTINFO_SIZE_ALIGNMASK))
+ return -EINVAL;
+ if (unlikely(pi->size < pi->offset))
+ return -EINVAL;
+
+ dlen = pi->size - pi->offset;
+ data = pi->data;
+
+ switch (pi->type) {
+ case NDIS_PKTINFO_TYPE_VLAN:
+ if (unlikely(dlen < NDIS_VLAN_INFO_SIZE))
+ return -EINVAL;
+ info->vlan_info = *((const uint32_t *)data);
+ mask |= HN_RXINFO_VLAN;
+ break;
+
+ case NDIS_PKTINFO_TYPE_CSUM:
+ if (unlikely(dlen < NDIS_RXCSUM_INFO_SIZE))
+ return -EINVAL;
+ info->csum_info = *((const uint32_t *)data);
+ mask |= HN_RXINFO_CSUM;
+ break;
+
+ case NDIS_PKTINFO_TYPE_HASHVAL:
+ if (unlikely(dlen < NDIS_HASH_VALUE_SIZE))
+ return -EINVAL;
+ info->hash_value = *((const uint32_t *)data);
+ mask |= HN_RXINFO_HASHVAL;
+ break;
+
+ case NDIS_PKTINFO_TYPE_HASHINF:
+ if (unlikely(dlen < NDIS_HASH_INFO_SIZE))
+ return -EINVAL;
+ info->hash_info = *((const uint32_t *)data);
+ mask |= HN_RXINFO_HASHINF;
+ break;
+
+ default:
+ goto next;
+ }
+
+ if (mask == HN_RXINFO_ALL)
+ break; /* All found; done */
+next:
+ pi = (const struct rndis_pktinfo *)
+ ((const uint8_t *)pi + pi->size);
+ }
+
+ /*
+ * Final fixup.
+ * - If there is no hash value, invalidate the hash info.
+ */
+ if (!(mask & HN_RXINFO_HASHVAL))
+ info->hash_info = HN_NDIS_HASH_INFO_INVALID;
+ return 0;
+}
+
+/*
+ * Ack the consumed RXBUF associated w/ this channel packet,
+ * so that this RXBUF can be recycled by the hypervisor.
+ */
+static void hn_rx_buf_release(struct hn_rx_bufinfo *rxb)
+{
+ struct rte_mbuf_ext_shared_info *shinfo = &rxb->shinfo;
+ struct hn_data *hv = rxb->hv;
+
+ if (rte_mbuf_ext_refcnt_update(shinfo, -1) == 0) {
+ hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
+ --hv->rxbuf_outstanding;
+ }
+}
+
+static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
+{
+ hn_rx_buf_release(opaque);
+}
+
+static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq,
+ const struct vmbus_chanpkt_rxbuf *pkt)
+{
+ struct hn_rx_bufinfo *rxb;
+
+ rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid;
+ rxb->chan = rxq->chan;
+ rxb->xactid = pkt->hdr.xactid;
+ rxb->hv = rxq->hv;
+
+ rxb->shinfo.free_cb = hn_rx_buf_free_cb;
+ rxb->shinfo.fcb_opaque = rxb;
+ rte_mbuf_ext_refcnt_set(&rxb->shinfo, 1);
+ return rxb;
+}
+
+static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
+ uint8_t *data, unsigned int headroom, unsigned int dlen,
+ const struct hn_rxinfo *info)
+{
+ struct hn_data *hv = rxq->hv;
+ struct rte_mbuf *m;
+
+ m = rte_pktmbuf_alloc(rxq->mb_pool);
+ if (unlikely(!m)) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxq->port_id];
+
+ dev->data->rx_mbuf_alloc_failed++;
+ return;
+ }
+
+ /*
+ * For large packets, avoid copy if possible but need to keep
+ * some space available in receive area for later packets.
+ */
+ if (dlen >= HN_RXCOPY_THRESHOLD &&
+ hv->rxbuf_outstanding < hv->rxbuf_section_cnt / 2) {
+ struct rte_mbuf_ext_shared_info *shinfo;
+ const void *rxbuf;
+ rte_iova_t iova;
+
+ /*
+ * Build an external mbuf that points to recveive area.
+ * Use refcount to handle multiple packets in same
+ * receive buffer section.
+ */
+ rxbuf = hv->rxbuf_res->addr;
+ iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf);
+ shinfo = &rxb->shinfo;
+
+ if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 1)
+ ++hv->rxbuf_outstanding;
+
+ rte_pktmbuf_attach_extbuf(m, data, iova,
+ dlen + headroom, shinfo);
+ m->data_off = headroom;
+ } else {
+ /* Mbuf's in pool must be large enough to hold small packets */
+ if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) {
+ rte_pktmbuf_free_seg(m);
+ ++rxq->stats.errors;
+ return;
+ }
+ rte_memcpy(rte_pktmbuf_mtod(m, void *),
+ data + headroom, dlen);
+ }
+
+ m->port = rxq->port_id;
+ m->pkt_len = dlen;
+ m->data_len = dlen;
+ m->packet_type = rte_net_get_ptype(m, NULL,
+ RTE_PTYPE_L2_MASK |
+ RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+
+ if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
+ m->vlan_tci = info->vlan_info;
+ m->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+ }
+
+ if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) {
+ if (info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK)
+ m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK
+ | NDIS_RXCSUM_INFO_TCPCS_OK))
+ m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else if (info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_FAILED
+ | NDIS_RXCSUM_INFO_UDPCS_FAILED))
+ m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+
+ if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) {
+ m->ol_flags |= PKT_RX_RSS_HASH;
+ m->hash.rss = info->hash_value;
+ }
+
+ PMD_RX_LOG(DEBUG,
+ "port %u:%u RX id %"PRIu64" size %u type %#x ol_flags %#"PRIx64,
+ rxq->port_id, rxq->queue_id, rxb->xactid,
+ m->pkt_len, m->packet_type, m->ol_flags);
+
+ ++rxq->stats.packets;
+ rxq->stats.bytes += m->pkt_len;
+ hn_update_packet_stats(&rxq->stats, m);
+
+ if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) {
+ ++rxq->ring_full;
+ rte_pktmbuf_free(m);
+ }
+}
+
+static void hn_rndis_rx_data(struct hn_rx_queue *rxq,
+ struct hn_rx_bufinfo *rxb,
+ void *data, uint32_t dlen)
+{
+ unsigned int data_off, data_len, pktinfo_off, pktinfo_len;
+ const struct rndis_packet_msg *pkt = data;
+ struct hn_rxinfo info = {
+ .vlan_info = HN_NDIS_VLAN_INFO_INVALID,
+ .csum_info = HN_NDIS_RXCSUM_INFO_INVALID,
+ .hash_info = HN_NDIS_HASH_INFO_INVALID,
+ };
+ int err;
+
+ hn_rndis_dump(pkt);
+
+ if (unlikely(dlen < sizeof(*pkt)))
+ goto error;
+
+ if (unlikely(dlen < pkt->len))
+ goto error; /* truncated RNDIS from host */
+
+ if (unlikely(pkt->len < pkt->datalen
+ + pkt->oobdatalen + pkt->pktinfolen))
+ goto error;
+
+ if (unlikely(pkt->datalen == 0))
+ goto error;
+
+ /* Check offsets. */
+ if (unlikely(pkt->dataoffset < RNDIS_PACKET_MSG_OFFSET_MIN))
+ goto error;
+
+ if (likely(pkt->pktinfooffset > 0) &&
+ unlikely(pkt->pktinfooffset < RNDIS_PACKET_MSG_OFFSET_MIN ||
+ (pkt->pktinfooffset & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK)))
+ goto error;
+
+ data_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
+ data_len = pkt->datalen;
+ pktinfo_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->pktinfooffset);
+ pktinfo_len = pkt->pktinfolen;
+
+ if (likely(pktinfo_len > 0)) {
+ err = hn_rndis_rxinfo((const uint8_t *)pkt + pktinfo_off,
+ pktinfo_len, &info);
+ if (err)
+ goto error;
+ }
+
+ if (unlikely(data_off + data_len > pkt->len))
+ goto error;
+
+ if (unlikely(data_len < ETHER_HDR_LEN))
+ goto error;
+
+ hn_rxpkt(rxq, rxb, data, data_off, data_len, &info);
+ return;
+error:
+ ++rxq->stats.errors;
+}
+
+static void
+hn_rndis_receive(const struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
+ struct hn_rx_bufinfo *rxb, void *buf, uint32_t len)
+{
+ const struct rndis_msghdr *hdr = buf;
+
+ switch (hdr->type) {
+ case RNDIS_PACKET_MSG:
+ if (dev->data->dev_started)
+ hn_rndis_rx_data(rxq, rxb, buf, len);
+ break;
+
+ case RNDIS_INDICATE_STATUS_MSG:
+ hn_rndis_link_status(rxq->hv, buf);
+ break;
+
+ case RNDIS_INITIALIZE_CMPLT:
+ case RNDIS_QUERY_CMPLT:
+ case RNDIS_SET_CMPLT:
+ hn_rndis_receive_response(rxq->hv, buf, len);
+ break;
+
+ default:
+ PMD_DRV_LOG(NOTICE,
+ "unexpected RNDIS message (type %#x len %u)",
+ hdr->type, len);
+ break;
+ }
+}
+
+static void
+hn_nvs_handle_rxbuf(struct rte_eth_dev *dev,
+ struct hn_data *hv,
+ struct hn_rx_queue *rxq,
+ const struct vmbus_chanpkt_hdr *hdr,
+ const void *buf)
+{
+ const struct vmbus_chanpkt_rxbuf *pkt;
+ const struct hn_nvs_hdr *nvs_hdr = buf;
+ uint32_t rxbuf_sz = hv->rxbuf_res->len;
+ char *rxbuf = hv->rxbuf_res->addr;
+ unsigned int i, hlen, count;
+ struct hn_rx_bufinfo *rxb;
+
+ /* At minimum we need type header */
+ if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*nvs_hdr))) {
+ PMD_RX_LOG(ERR, "invalid receive nvs RNDIS");
+ return;
+ }
+
+ /* Make sure that this is a RNDIS message. */
+ if (unlikely(nvs_hdr->type != NVS_TYPE_RNDIS)) {
+ PMD_RX_LOG(ERR, "nvs type %u, not RNDIS",
+ nvs_hdr->type);
+ return;
+ }
+
+ hlen = vmbus_chanpkt_getlen(hdr->hlen);
+ if (unlikely(hlen < sizeof(*pkt))) {
+ PMD_RX_LOG(ERR, "invalid rxbuf chanpkt");
+ return;
+ }
+
+ pkt = container_of(hdr, const struct vmbus_chanpkt_rxbuf, hdr);
+ if (unlikely(pkt->rxbuf_id != NVS_RXBUF_SIG)) {
+ PMD_RX_LOG(ERR, "invalid rxbuf_id 0x%08x",
+ pkt->rxbuf_id);
+ return;
+ }
+
+ count = pkt->rxbuf_cnt;
+ if (unlikely(hlen < offsetof(struct vmbus_chanpkt_rxbuf,
+ rxbuf[count]))) {
+ PMD_RX_LOG(ERR, "invalid rxbuf_cnt %u", count);
+ return;
+ }
+
+ if (pkt->hdr.xactid > hv->rxbuf_section_cnt) {
+ PMD_RX_LOG(ERR, "invalid rxbuf section id %" PRIx64,
+ pkt->hdr.xactid);
+ return;
+ }
+
+ /* Setup receive buffer info to allow for callback */
+ rxb = hn_rx_buf_init(rxq, pkt);
+
+ /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */
+ for (i = 0; i < count; ++i) {
+ unsigned int ofs, len;
+
+ ofs = pkt->rxbuf[i].ofs;
+ len = pkt->rxbuf[i].len;
+
+ if (unlikely(ofs + len > rxbuf_sz)) {
+ PMD_RX_LOG(ERR,
+ "%uth RNDIS msg overflow ofs %u, len %u",
+ i, ofs, len);
+ continue;
+ }
+
+ if (unlikely(len == 0)) {
+ PMD_RX_LOG(ERR, "%uth RNDIS msg len %u", i, len);
+ continue;
+ }
+
+ hn_rndis_receive(dev, rxq, rxb,
+ rxbuf + ofs, len);
+ }
+
+ /* Send ACK now if external mbuf not used */
+ hn_rx_buf_release(rxb);
+}
+
+struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
+ uint16_t queue_id,
+ unsigned int socket_id)
+{
+ struct hn_rx_queue *rxq;
+
+ rxq = rte_zmalloc_socket("HN_RXQ",
+ sizeof(*rxq) + HN_RXQ_EVENT_DEFAULT,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq) {
+ rxq->hv = hv;
+ rxq->chan = hv->channels[queue_id];
+ rte_spinlock_init(&rxq->ring_lock);
+ rxq->port_id = hv->port_id;
+ rxq->queue_id = queue_id;
+ }
+ return rxq;
+}
+
+int
+hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ char ring_name[RTE_RING_NAMESIZE];
+ struct hn_rx_queue *rxq;
+ unsigned int count;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue_idx == 0) {
+ rxq = hv->primary;
+ } else {
+ rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id);
+ if (!rxq)
+ return -ENOMEM;
+ }
+
+ rxq->mb_pool = mp;
+ count = rte_mempool_avail_count(mp) / dev->data->nb_rx_queues;
+ if (nb_desc == 0 || nb_desc > count)
+ nb_desc = count;
+
+ /*
+ * Staging ring from receive event logic to rx_pkts.
+ * rx_pkts assumes caller is handling multi-thread issue.
+ * event logic has locking.
+ */
+ snprintf(ring_name, sizeof(ring_name),
+ "hn_rx_%u_%u", dev->data->port_id, queue_idx);
+ rxq->rx_ring = rte_ring_create(ring_name,
+ rte_align32pow2(nb_desc),
+ socket_id, 0);
+ if (!rxq->rx_ring)
+ goto fail;
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ return 0;
+
+fail:
+ rte_ring_free(rxq->rx_ring);
+ rte_free(rxq->event_buf);
+ rte_free(rxq);
+ return -ENOMEM;
+}
+
+void
+hn_dev_rx_queue_release(void *arg)
+{
+ struct hn_rx_queue *rxq = arg;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!rxq)
+ return;
+
+ rte_ring_free(rxq->rx_ring);
+ rxq->rx_ring = NULL;
+ rxq->mb_pool = NULL;
+
+ if (rxq != rxq->hv->primary) {
+ rte_free(rxq->event_buf);
+ rte_free(rxq);
+ }
+}
+
+void
+hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct hn_rx_queue *rxq = dev->data->rx_queues[queue_idx];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = 1;
+ qinfo->nb_desc = rte_ring_get_capacity(rxq->rx_ring);
+}
+
+static void
+hn_nvs_handle_notify(const struct vmbus_chanpkt_hdr *pkthdr,
+ const void *data)
+{
+ const struct hn_nvs_hdr *hdr = data;
+
+ if (unlikely(vmbus_chanpkt_datalen(pkthdr) < sizeof(*hdr))) {
+ PMD_DRV_LOG(ERR, "invalid nvs notify");
+ return;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "got notify, nvs type %u", hdr->type);
+}
+
+/*
+ * Process pending events on the channel.
+ * Called from both Rx queue poll and Tx cleanup
+ */
+void hn_process_events(struct hn_data *hv, uint16_t queue_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id];
+ struct hn_rx_queue *rxq;
+ uint32_t bytes_read = 0;
+ int ret = 0;
+
+ rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id];
+
+ /* If no pending data then nothing to do */
+ if (rte_vmbus_chan_rx_empty(rxq->chan))
+ return;
+
+ /*
+ * Since channel is shared between Rx and TX queue need to have a lock
+ * since DPDK does not force same CPU to be used for Rx/Tx.
+ */
+ if (unlikely(!rte_spinlock_trylock(&rxq->ring_lock)))
+ return;
+
+ for (;;) {
+ const struct vmbus_chanpkt_hdr *pkt;
+ uint32_t len = HN_RXQ_EVENT_DEFAULT;
+ const void *data;
+
+ ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len);
+ if (ret == -EAGAIN)
+ break; /* ring is empty */
+
+ else if (ret == -ENOBUFS)
+ rte_exit(EXIT_FAILURE, "event buffer not big enough (%u < %u)",
+ HN_RXQ_EVENT_DEFAULT, len);
+ else if (ret <= 0)
+ rte_exit(EXIT_FAILURE,
+ "vmbus ring buffer error: %d", ret);
+
+ bytes_read += ret;
+ pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf;
+ data = (char *)rxq->event_buf + vmbus_chanpkt_getlen(pkt->hlen);
+
+ switch (pkt->type) {
+ case VMBUS_CHANPKT_TYPE_COMP:
+ hn_nvs_handle_comp(dev, queue_id, pkt, data);
+ break;
+
+ case VMBUS_CHANPKT_TYPE_RXBUF:
+ hn_nvs_handle_rxbuf(dev, hv, rxq, pkt, data);
+ break;
+
+ case VMBUS_CHANPKT_TYPE_INBAND:
+ hn_nvs_handle_notify(pkt, data);
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type);
+ break;
+ }
+
+ if (rxq->rx_ring && rte_ring_full(rxq->rx_ring))
+ break;
+ }
+
+ if (bytes_read > 0)
+ rte_vmbus_chan_signal_read(rxq->chan, bytes_read);
+
+ rte_spinlock_unlock(&rxq->ring_lock);
+}
+
+static void hn_append_to_chim(struct hn_tx_queue *txq,
+ struct rndis_packet_msg *pkt,
+ const struct rte_mbuf *m)
+{
+ struct hn_txdesc *txd = txq->agg_txd;
+ uint8_t *buf = (uint8_t *)pkt;
+ unsigned int data_offs;
+
+ hn_rndis_dump(pkt);
+
+ data_offs = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
+ txd->chim_size += pkt->len;
+ txd->data_size += m->pkt_len;
+ ++txd->packets;
+ hn_update_packet_stats(&txq->stats, m);
+
+ for (; m; m = m->next) {
+ uint16_t len = rte_pktmbuf_data_len(m);
+
+ rte_memcpy(buf + data_offs,
+ rte_pktmbuf_mtod(m, const char *), len);
+ data_offs += len;
+ }
+}
+
+/*
+ * Send pending aggregated data in chimney buffer (if any).
+ * Returns error if send was unsuccessful because channel ring buffer
+ * was full.
+ */
+static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig)
+
+{
+ struct hn_txdesc *txd = txq->agg_txd;
+ struct hn_nvs_rndis rndis;
+ int ret;
+
+ if (!txd)
+ return 0;
+
+ rndis = (struct hn_nvs_rndis) {
+ .type = NVS_TYPE_RNDIS,
+ .rndis_mtype = NVS_RNDIS_MTYPE_DATA,
+ .chim_idx = txd->chim_index,
+ .chim_sz = txd->chim_size,
+ };
+
+ PMD_TX_LOG(DEBUG, "port %u:%u tx %u size %u",
+ txq->port_id, txq->queue_id, txd->chim_index, txd->chim_size);
+
+ ret = hn_nvs_send(txq->chan, VMBUS_CHANPKT_FLAG_RC,
+ &rndis, sizeof(rndis), (uintptr_t)txd, need_sig);
+
+ if (likely(ret == 0))
+ hn_reset_txagg(txq);
+ else
+ PMD_TX_LOG(NOTICE, "port %u:%u send failed: %d",
+ txq->port_id, txq->queue_id, ret);
+
+ return ret;
+}
+
+static struct hn_txdesc *hn_new_txd(struct hn_data *hv,
+ struct hn_tx_queue *txq)
+{
+ struct hn_txdesc *txd;
+
+ if (rte_mempool_get(hv->tx_pool, (void **)&txd)) {
+ ++txq->stats.nomemory;
+ PMD_TX_LOG(DEBUG, "tx pool exhausted!");
+ return NULL;
+ }
+
+ txd->m = NULL;
+ txd->queue_id = txq->queue_id;
+ txd->packets = 0;
+ txd->data_size = 0;
+ txd->chim_size = 0;
+
+ return txd;
+}
+
+static void *
+hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, uint32_t pktsize)
+{
+ struct hn_txdesc *agg_txd = txq->agg_txd;
+ struct rndis_packet_msg *pkt;
+ void *chim;
+
+ if (agg_txd) {
+ unsigned int padding, olen;
+
+ /*
+ * Update the previous RNDIS packet's total length,
+ * it can be increased due to the mandatory alignment
+ * padding for this RNDIS packet. And update the
+ * aggregating txdesc's chimney sending buffer size
+ * accordingly.
+ *
+ * Zero-out the padding, as required by the RNDIS spec.
+ */
+ pkt = txq->agg_prevpkt;
+ olen = pkt->len;
+ padding = RTE_ALIGN(olen, txq->agg_align) - olen;
+ if (padding > 0) {
+ agg_txd->chim_size += padding;
+ pkt->len += padding;
+ memset((uint8_t *)pkt + olen, 0, padding);
+ }
+
+ chim = (uint8_t *)pkt + pkt->len;
+
+ txq->agg_pktleft--;
+ txq->agg_szleft -= pktsize;
+ if (txq->agg_szleft < HN_PKTSIZE_MIN(txq->agg_align)) {
+ /*
+ * Probably can't aggregate more packets,
+ * flush this aggregating txdesc proactively.
+ */
+ txq->agg_pktleft = 0;
+ }
+ } else {
+ agg_txd = hn_new_txd(hv, txq);
+ if (!agg_txd)
+ return NULL;
+
+ chim = (uint8_t *)hv->chim_res->addr
+ + agg_txd->chim_index * hv->chim_szmax;
+
+ txq->agg_txd = agg_txd;
+ txq->agg_pktleft = txq->agg_pktmax - 1;
+ txq->agg_szleft = txq->agg_szmax - pktsize;
+ }
+ txq->agg_prevpkt = chim;
+
+ return chim;
+}
+
+static inline void *
+hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt,
+ uint32_t pi_dlen, uint32_t pi_type)
+{
+ const uint32_t pi_size = RNDIS_PKTINFO_SIZE(pi_dlen);
+ struct rndis_pktinfo *pi;
+
+ /*
+ * Per-packet-info does not move; it only grows.
+ *
+ * NOTE:
+ * pktinfooffset in this phase counts from the beginning
+ * of rndis_packet_msg.
+ */
+ pi = (struct rndis_pktinfo *)((uint8_t *)pkt + hn_rndis_pktlen(pkt));
+
+ pkt->pktinfolen += pi_size;
+
+ pi->size = pi_size;
+ pi->type = pi_type;
+ pi->offset = RNDIS_PKTINFO_OFFSET;
+
+ return pi->data;
+}
+
+/* Put RNDIS header and packet info on packet */
+static void hn_encap(struct rndis_packet_msg *pkt,
+ uint16_t queue_id,
+ const struct rte_mbuf *m)
+{
+ unsigned int hlen = m->l2_len + m->l3_len;
+ uint32_t *pi_data;
+ uint32_t pkt_hlen;
+
+ pkt->type = RNDIS_PACKET_MSG;
+ pkt->len = m->pkt_len;
+ pkt->dataoffset = 0;
+ pkt->datalen = m->pkt_len;
+ pkt->oobdataoffset = 0;
+ pkt->oobdatalen = 0;
+ pkt->oobdataelements = 0;
+ pkt->pktinfooffset = sizeof(*pkt);
+ pkt->pktinfolen = 0;
+ pkt->vchandle = 0;
+ pkt->reserved = 0;
+
+ /*
+ * Set the hash value for this packet, to the queue_id to cause
+ * TX done event for this packet on the right channel.
+ */
+ pi_data = hn_rndis_pktinfo_append(pkt, NDIS_HASH_VALUE_SIZE,
+ NDIS_PKTINFO_TYPE_HASHVAL);
+ *pi_data = queue_id;
+
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE,
+ NDIS_PKTINFO_TYPE_VLAN);
+ *pi_data = m->vlan_tci;
+ }
+
+ if (m->ol_flags & PKT_TX_TCP_SEG) {
+ pi_data = hn_rndis_pktinfo_append(pkt, NDIS_LSO2_INFO_SIZE,
+ NDIS_PKTINFO_TYPE_LSO);
+
+ if (m->ol_flags & PKT_TX_IPV6) {
+ *pi_data = NDIS_LSO2_INFO_MAKEIPV6(hlen,
+ m->tso_segsz);
+ } else {
+ *pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen,
+ m->tso_segsz);
+ }
+ } else if (m->ol_flags &
+ (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)) {
+ pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE,
+ NDIS_PKTINFO_TYPE_CSUM);
+ *pi_data = 0;
+
+ if (m->ol_flags & PKT_TX_IPV6)
+ *pi_data |= NDIS_TXCSUM_INFO_IPV6;
+ if (m->ol_flags & PKT_TX_IPV4) {
+ *pi_data |= NDIS_TXCSUM_INFO_IPV4;
+
+ if (m->ol_flags & PKT_TX_IP_CKSUM)
+ *pi_data |= NDIS_TXCSUM_INFO_IPCS;
+ }
+
+ if (m->ol_flags & PKT_TX_TCP_CKSUM)
+ *pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen);
+ else if (m->ol_flags & PKT_TX_UDP_CKSUM)
+ *pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen);
+ }
+
+ pkt_hlen = pkt->pktinfooffset + pkt->pktinfolen;
+ /* Fixup RNDIS packet message total length */
+ pkt->len += pkt_hlen;
+
+ /* Convert RNDIS packet message offsets */
+ pkt->dataoffset = hn_rndis_pktmsg_offset(pkt_hlen);
+ pkt->pktinfooffset = hn_rndis_pktmsg_offset(pkt->pktinfooffset);
+}
+
+/* How many scatter gather list elements ar needed */
+static unsigned int hn_get_slots(const struct rte_mbuf *m)
+{
+ unsigned int slots = 1; /* for RNDIS header */
+
+ while (m) {
+ unsigned int size = rte_pktmbuf_data_len(m);
+ unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK;
+
+ slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE;
+ m = m->next;
+ }
+
+ return slots;
+}
+
+/* Build scatter gather list from chained mbuf */
+static unsigned int hn_fill_sg(struct vmbus_gpa *sg,
+ const struct rte_mbuf *m)
+{
+ unsigned int segs = 0;
+
+ while (m) {
+ rte_iova_t addr = rte_mbuf_data_iova(m);
+ unsigned int page = addr / PAGE_SIZE;
+ unsigned int offset = addr & PAGE_MASK;
+ unsigned int len = rte_pktmbuf_data_len(m);
+
+ while (len > 0) {
+ unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset);
+
+ sg[segs].page = page;
+ sg[segs].ofs = offset;
+ sg[segs].len = bytes;
+ segs++;
+
+ ++page;
+ offset = 0;
+ len -= bytes;
+ }
+ m = m->next;
+ }
+
+ return segs;
+}
+
+/* Transmit directly from mbuf */
+static int hn_xmit_sg(struct hn_tx_queue *txq,
+ const struct hn_txdesc *txd, const struct rte_mbuf *m,
+ bool *need_sig)
+{
+ struct vmbus_gpa sg[hn_get_slots(m)];
+ struct hn_nvs_rndis nvs_rndis = {
+ .type = NVS_TYPE_RNDIS,
+ .rndis_mtype = NVS_RNDIS_MTYPE_DATA,
+ .chim_sz = txd->chim_size,
+ };
+ rte_iova_t addr;
+ unsigned int segs;
+
+ /* attach aggregation data if present */
+ if (txd->chim_size > 0)
+ nvs_rndis.chim_idx = txd->chim_index;
+ else
+ nvs_rndis.chim_idx = NVS_CHIM_IDX_INVALID;
+
+ hn_rndis_dump(txd->rndis_pkt);
+
+ /* pass IOVA of rndis header in first segment */
+ addr = rte_malloc_virt2iova(txd->rndis_pkt);
+ if (unlikely(addr == RTE_BAD_IOVA)) {
+ PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova");
+ return -EINVAL;
+ }
+
+ sg[0].page = addr / PAGE_SIZE;
+ sg[0].ofs = addr & PAGE_MASK;
+ sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt));
+ segs = 1;
+
+ hn_update_packet_stats(&txq->stats, m);
+
+ segs += hn_fill_sg(sg + 1, m);
+
+ PMD_TX_LOG(DEBUG, "port %u:%u tx %u segs %u size %u",
+ txq->port_id, txq->queue_id, txd->chim_index,
+ segs, nvs_rndis.chim_sz);
+
+ return hn_nvs_send_sglist(txq->chan, sg, segs,
+ &nvs_rndis, sizeof(nvs_rndis),
+ (uintptr_t)txd, need_sig);
+}
+
+uint16_t
+hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct hn_tx_queue *txq = ptxq;
+ struct hn_data *hv = txq->hv;
+ bool need_sig = false;
+ uint16_t nb_tx;
+ int ret;
+
+ if (unlikely(hv->closed))
+ return 0;
+
+ if (rte_mempool_avail_count(hv->tx_pool) <= txq->free_thresh)
+ hn_process_events(hv, txq->queue_id);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *m = tx_pkts[nb_tx];
+ uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN;
+ struct rndis_packet_msg *pkt;
+
+ /* For small packets aggregate them in chimney buffer */
+ if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) {
+ /* If this packet will not fit, then flush */
+ if (txq->agg_pktleft == 0 ||
+ RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) {
+ if (hn_flush_txagg(txq, &need_sig))
+ goto fail;
+ }
+
+ pkt = hn_try_txagg(hv, txq, pkt_size);
+ if (unlikely(!pkt))
+ break;
+
+ hn_encap(pkt, txq->queue_id, m);
+ hn_append_to_chim(txq, pkt, m);
+
+ rte_pktmbuf_free(m);
+
+ /* if buffer is full, flush */
+ if (txq->agg_pktleft == 0 &&
+ hn_flush_txagg(txq, &need_sig))
+ goto fail;
+ } else {
+ struct hn_txdesc *txd;
+
+ /* can send chimney data and large packet at once */
+ txd = txq->agg_txd;
+ if (txd) {
+ hn_reset_txagg(txq);
+ } else {
+ txd = hn_new_txd(hv, txq);
+ if (unlikely(!txd))
+ break;
+ }
+
+ pkt = txd->rndis_pkt;
+ txd->m = m;
+ txd->data_size += m->pkt_len;
+ ++txd->packets;
+
+ hn_encap(pkt, txq->queue_id, m);
+
+ ret = hn_xmit_sg(txq, txd, m, &need_sig);
+ if (unlikely(ret != 0)) {
+ PMD_TX_LOG(NOTICE, "sg send failed: %d", ret);
+ ++txq->stats.errors;
+ rte_mempool_put(hv->tx_pool, txd);
+ goto fail;
+ }
+ }
+ }
+
+ /* If partial buffer left, then try and send it.
+ * if that fails, then reuse it on next send.
+ */
+ hn_flush_txagg(txq, &need_sig);
+
+fail:
+ if (need_sig)
+ rte_vmbus_chan_signal_tx(txq->chan);
+
+ return nb_tx;
+}
+
+uint16_t
+hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct hn_rx_queue *rxq = prxq;
+ struct hn_data *hv = rxq->hv;
+
+ if (unlikely(hv->closed))
+ return 0;
+
+ /* If ring is empty then process more */
+ if (rte_ring_count(rxq->rx_ring) < nb_pkts)
+ hn_process_events(hv, rxq->queue_id);
+
+ /* Get mbufs off staging ring */
+ return rte_ring_sc_dequeue_burst(rxq->rx_ring, (void **)rx_pkts,
+ nb_pkts, NULL);
+}
diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_var.h b/src/spdk/dpdk/drivers/net/netvsc/hn_var.h
new file mode 100644
index 00000000..f7ff8585
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/hn_var.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2009-2018 Microsoft Corp.
+ * Copyright (c) 2016 Brocade Communications Systems, Inc.
+ * Copyright (c) 2012 NetApp Inc.
+ * Copyright (c) 2012 Citrix Inc.
+ * All rights reserved.
+ */
+
+/*
+ * Tunable ethdev params
+ */
+#define HN_MIN_RX_BUF_SIZE 1024
+#define HN_MAX_XFER_LEN 2048
+#define HN_MAX_MAC_ADDRS 1
+#define HN_MAX_CHANNELS 64
+
+/* Claimed to be 12232B */
+#define HN_MTU_MAX (9 * 1024)
+
+/* Retry interval */
+#define HN_CHAN_INTERVAL_US 100
+
+/* Buffers need to be aligned */
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#ifndef PAGE_MASK
+#define PAGE_MASK (PAGE_SIZE - 1)
+#endif
+
+struct hn_data;
+struct hn_txdesc;
+
+struct hn_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+ uint64_t nomemory;
+ uint64_t multicast;
+ uint64_t broadcast;
+ /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
+ uint64_t size_bins[8];
+};
+
+struct hn_tx_queue {
+ struct hn_data *hv;
+ struct vmbus_channel *chan;
+ uint16_t port_id;
+ uint16_t queue_id;
+ uint32_t free_thresh;
+
+ /* Applied packet transmission aggregation limits. */
+ uint32_t agg_szmax;
+ uint32_t agg_pktmax;
+ uint32_t agg_align;
+
+ /* Packet transmission aggregation states */
+ struct hn_txdesc *agg_txd;
+ uint32_t agg_pktleft;
+ uint32_t agg_szleft;
+ struct rndis_packet_msg *agg_prevpkt;
+
+ struct hn_stats stats;
+};
+
+struct hn_rx_queue {
+ struct hn_data *hv;
+ struct vmbus_channel *chan;
+ struct rte_mempool *mb_pool;
+ struct rte_ring *rx_ring;
+
+ rte_spinlock_t ring_lock;
+ uint32_t event_sz;
+ uint16_t port_id;
+ uint16_t queue_id;
+ struct hn_stats stats;
+ uint64_t ring_full;
+
+ uint8_t event_buf[];
+};
+
+
+/* multi-packet data from host */
+struct hn_rx_bufinfo {
+ struct vmbus_channel *chan;
+ struct hn_data *hv;
+ uint64_t xactid;
+ struct rte_mbuf_ext_shared_info shinfo;
+} __rte_cache_aligned;
+
+struct hn_data {
+ struct rte_vmbus_device *vmbus;
+ struct hn_rx_queue *primary;
+ uint16_t port_id;
+ bool closed;
+ uint32_t link_status;
+ uint32_t link_speed;
+
+ struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */
+ struct hn_rx_bufinfo *rxbuf_info;
+ uint32_t rxbuf_section_cnt; /* # of Rx sections */
+ volatile uint32_t rxbuf_outstanding;
+ uint16_t max_queues; /* Max available queues */
+ uint16_t num_queues;
+ uint64_t rss_offloads;
+
+ struct rte_mem_resource *chim_res; /* UIO resource for Tx */
+ struct rte_mempool *tx_pool; /* Tx descriptors */
+ uint32_t chim_szmax; /* Max size per buffer */
+ uint32_t chim_cnt; /* Max packets per buffer */
+
+ uint32_t nvs_ver;
+ uint32_t ndis_ver;
+ uint32_t rndis_agg_size;
+ uint32_t rndis_agg_pkts;
+ uint32_t rndis_agg_align;
+
+ volatile uint32_t rndis_pending;
+ rte_atomic32_t rndis_req_id;
+ uint8_t rndis_resp[256];
+
+ struct ether_addr mac_addr;
+ struct vmbus_channel *channels[HN_MAX_CHANNELS];
+};
+
+static inline struct vmbus_channel *
+hn_primary_chan(const struct hn_data *hv)
+{
+ return hv->channels[0];
+}
+
+void hn_process_events(struct hn_data *hv, uint16_t queue_id);
+
+uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+int hn_tx_pool_init(struct rte_eth_dev *dev);
+int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void hn_dev_tx_queue_release(void *arg);
+void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_txq_info *qinfo);
+
+struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
+ uint16_t queue_id,
+ unsigned int socket_id);
+int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+void hn_dev_rx_queue_release(void *arg);
+void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_rxq_info *qinfo);
diff --git a/src/spdk/dpdk/drivers/net/netvsc/meson.build b/src/spdk/dpdk/drivers/net/netvsc/meson.build
new file mode 100644
index 00000000..a717cdd4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Microsoft Corporation
+
+build = dpdk_conf.has('RTE_LIBRTE_VMBUS_BUS')
+version = 2
+sources = files('hn_ethdev.c', 'hn_rxtx.c', 'hn_rndis.c', 'hn_nvs.c')
+
+deps += ['bus_vmbus' ]
+
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/net/netvsc/ndis.h b/src/spdk/dpdk/drivers/net/netvsc/ndis.h
new file mode 100644
index 00000000..2e7ca99b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/ndis.h
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * All rights reserved.
+ */
+
+#ifndef _NET_NDIS_H_
+#define _NET_NDIS_H_
+
+#define NDIS_MEDIA_STATE_CONNECTED 0
+#define NDIS_MEDIA_STATE_DISCONNECTED 1
+
+#define NDIS_NETCHANGE_TYPE_POSSIBLE 1
+#define NDIS_NETCHANGE_TYPE_DEFINITE 2
+#define NDIS_NETCHANGE_TYPE_FROMMEDIA 3
+
+#define NDIS_OFFLOAD_SET_NOCHG 0
+#define NDIS_OFFLOAD_SET_ON 1
+#define NDIS_OFFLOAD_SET_OFF 2
+
+/* a.k.a GRE MAC */
+#define NDIS_ENCAP_TYPE_NVGRE 0x00000001
+
+#define NDIS_HASH_FUNCTION_MASK 0x000000FF /* see hash function */
+#define NDIS_HASH_TYPE_MASK 0x00FFFF00 /* see hash type */
+
+/* hash function */
+#define NDIS_HASH_FUNCTION_TOEPLITZ 0x00000001
+
+/* hash type */
+#define NDIS_HASH_IPV4 0x00000100
+#define NDIS_HASH_TCP_IPV4 0x00000200
+#define NDIS_HASH_IPV6 0x00000400
+#define NDIS_HASH_IPV6_EX 0x00000800
+#define NDIS_HASH_TCP_IPV6 0x00001000
+#define NDIS_HASH_TCP_IPV6_EX 0x00002000
+
+#define NDIS_HASH_KEYSIZE_TOEPLITZ 40
+#define NDIS_HASH_INDCNT 128
+
+#define NDIS_OBJTYPE_DEFAULT 0x80
+#define NDIS_OBJTYPE_RSS_CAPS 0x88
+#define NDIS_OBJTYPE_RSS_PARAMS 0x89
+#define NDIS_OBJTYPE_OFFLOAD 0xa7
+
+struct ndis_object_hdr {
+ uint8_t ndis_type; /* NDIS_OBJTYPE_ */
+ uint8_t ndis_rev; /* type specific */
+ uint16_t ndis_size; /* incl. this hdr */
+} __rte_packed;
+
+/*
+ * OID_TCP_OFFLOAD_PARAMETERS
+ * ndis_type: NDIS_OBJTYPE_DEFAULT
+ */
+struct ndis_offload_params {
+ struct ndis_object_hdr ndis_hdr;
+ uint8_t ndis_ip4csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_tcp4csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_udp4csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_tcp6csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_udp6csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_lsov1; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_ipsecv1; /* NDIS_OFFLOAD_IPSECV1_ */
+ uint8_t ndis_lsov2_ip4; /* NDIS_OFFLOAD_LSOV2_ */
+ uint8_t ndis_lsov2_ip6; /* NDIS_OFFLOAD_LSOV2_ */
+ uint8_t ndis_tcp4conn; /* 0 */
+ uint8_t ndis_tcp6conn; /* 0 */
+ uint32_t ndis_flags; /* 0 */
+ /* NDIS >= 6.1 */
+ uint8_t ndis_ipsecv2; /* NDIS_OFFLOAD_IPSECV2_ */
+ uint8_t ndis_ipsecv2_ip4;/* NDIS_OFFLOAD_IPSECV2_ */
+ /* NDIS >= 6.30 */
+ uint8_t ndis_rsc_ip4; /* NDIS_OFFLOAD_RSC_ */
+ uint8_t ndis_rsc_ip6; /* NDIS_OFFLOAD_RSC_ */
+ uint8_t ndis_encap; /* NDIS_OFFLOAD_SET_ */
+ uint8_t ndis_encap_types;/* NDIS_ENCAP_TYPE_ */
+};
+
+#define NDIS_OFFLOAD_PARAMS_SIZE sizeof(struct ndis_offload_params)
+#define NDIS_OFFLOAD_PARAMS_SIZE_6_1 \
+ offsetof(struct ndis_offload_params, ndis_rsc_ip4)
+
+#define NDIS_OFFLOAD_PARAMS_REV_2 2 /* NDIS 6.1 */
+#define NDIS_OFFLOAD_PARAMS_REV_3 3 /* NDIS 6.30 */
+
+#define NDIS_OFFLOAD_PARAM_NOCHG 0 /* common */
+#define NDIS_OFFLOAD_PARAM_OFF 1
+#define NDIS_OFFLOAD_PARAM_TX 2
+#define NDIS_OFFLOAD_PARAM_RX 3
+#define NDIS_OFFLOAD_PARAM_TXRX 4
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_LSOV1_OFF 1
+#define NDIS_OFFLOAD_LSOV1_ON 2
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_IPSECV1_OFF 1
+#define NDIS_OFFLOAD_IPSECV1_AH 2
+#define NDIS_OFFLOAD_IPSECV1_ESP 3
+#define NDIS_OFFLOAD_IPSECV1_AH_ESP 4
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_LSOV2_OFF 1
+#define NDIS_OFFLOAD_LSOV2_ON 2
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_IPSECV2_OFF 1
+#define NDIS_OFFLOAD_IPSECV2_AH 2
+#define NDIS_OFFLOAD_IPSECV2_ESP 3
+#define NDIS_OFFLOAD_IPSECV2_AH_ESP 4
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_RSC_OFF 1
+#define NDIS_OFFLOAD_RSC_ON 2
+
+/*
+ * OID_GEN_RECEIVE_SCALE_CAPABILITIES
+ * ndis_type: NDIS_OBJTYPE_RSS_CAPS
+ */
+struct ndis_rss_caps {
+ struct ndis_object_hdr ndis_hdr;
+ uint32_t ndis_caps; /* NDIS_RSS_CAP_ */
+ uint32_t ndis_nmsi; /* # of MSIs */
+ uint32_t ndis_nrxr; /* # of RX rings */
+ /* NDIS >= 6.30 */
+ uint16_t ndis_nind; /* # of indtbl ent. */
+ uint16_t ndis_pad;
+} __rte_packed;
+
+#define NDIS_RSS_CAPS_SIZE \
+ offsetof(struct ndis_rss_caps, ndis_pad)
+#define NDIS_RSS_CAPS_SIZE_6_0 \
+ offsetof(struct ndis_rss_caps, ndis_nind)
+
+#define NDIS_RSS_CAPS_REV_1 1 /* NDIS 6.{0,1,20} */
+#define NDIS_RSS_CAPS_REV_2 2 /* NDIS 6.30 */
+
+#define NDIS_RSS_CAP_MSI 0x01000000
+#define NDIS_RSS_CAP_CLASSIFY_ISR 0x02000000
+#define NDIS_RSS_CAP_CLASSIFY_DPC 0x04000000
+#define NDIS_RSS_CAP_MSIX 0x08000000
+#define NDIS_RSS_CAP_IPV4 0x00000100
+#define NDIS_RSS_CAP_IPV6 0x00000200
+#define NDIS_RSS_CAP_IPV6_EX 0x00000400
+#define NDIS_RSS_CAP_HASH_TOEPLITZ NDIS_HASH_FUNCTION_TOEPLITZ
+#define NDIS_RSS_CAP_HASHFUNC_MASK NDIS_HASH_FUNCTION_MASK
+
+/*
+ * OID_GEN_RECEIVE_SCALE_PARAMETERS
+ * ndis_type: NDIS_OBJTYPE_RSS_PARAMS
+ */
+struct ndis_rss_params {
+ struct ndis_object_hdr ndis_hdr;
+ uint16_t ndis_flags; /* NDIS_RSS_FLAG_ */
+ uint16_t ndis_bcpu; /* base cpu 0 */
+ uint32_t ndis_hash; /* NDIS_HASH_ */
+ uint16_t ndis_indsize; /* indirect table */
+ uint32_t ndis_indoffset;
+ uint16_t ndis_keysize; /* hash key */
+ uint32_t ndis_keyoffset;
+ /* NDIS >= 6.20 */
+ uint32_t ndis_cpumaskoffset;
+ uint32_t ndis_cpumaskcnt;
+ uint32_t ndis_cpumaskentsz;
+};
+
+#define NDIS_RSS_PARAMS_SIZE sizeof(struct ndis_rss_params)
+#define NDIS_RSS_PARAMS_SIZE_6_0 \
+ offsetof(struct ndis_rss_params, ndis_cpumaskoffset)
+
+#define NDIS_RSS_PARAMS_REV_1 1 /* NDIS 6.0 */
+#define NDIS_RSS_PARAMS_REV_2 2 /* NDIS 6.20 */
+
+#define NDIS_RSS_FLAG_NONE 0x0000
+#define NDIS_RSS_FLAG_BCPU_UNCHG 0x0001
+#define NDIS_RSS_FLAG_HASH_UNCHG 0x0002
+#define NDIS_RSS_FLAG_IND_UNCHG 0x0004
+#define NDIS_RSS_FLAG_KEY_UNCHG 0x0008
+#define NDIS_RSS_FLAG_DISABLE 0x0010
+
+/* non-standard convenient struct */
+struct ndis_rssprm_toeplitz {
+ struct ndis_rss_params rss_params;
+ /* Indirect table */
+ uint32_t rss_ind[NDIS_HASH_INDCNT];
+ /* Toeplitz hash key */
+ uint8_t rss_key[NDIS_HASH_KEYSIZE_TOEPLITZ];
+};
+
+#define NDIS_RSSPRM_TOEPLITZ_SIZE(nind) \
+ offsetof(struct ndis_rssprm_toeplitz, rss_ind[nind])
+
+/*
+ * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES
+ * ndis_type: NDIS_OBJTYPE_OFFLOAD
+ */
+
+#define NDIS_OFFLOAD_ENCAP_NONE 0x0000
+#define NDIS_OFFLOAD_ENCAP_NULL 0x0001
+#define NDIS_OFFLOAD_ENCAP_8023 0x0002
+#define NDIS_OFFLOAD_ENCAP_8023PQ 0x0004
+#define NDIS_OFFLOAD_ENCAP_8023PQ_OOB 0x0008
+#define NDIS_OFFLOAD_ENCAP_RFC1483 0x0010
+
+struct ndis_csum_offload {
+ uint32_t ndis_ip4_txenc; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip4_txcsum;
+#define NDIS_TXCSUM_CAP_IP4OPT 0x001
+#define NDIS_TXCSUM_CAP_TCP4OPT 0x004
+#define NDIS_TXCSUM_CAP_TCP4 0x010
+#define NDIS_TXCSUM_CAP_UDP4 0x040
+#define NDIS_TXCSUM_CAP_IP4 0x100
+ uint32_t ndis_ip4_rxenc; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip4_rxcsum;
+#define NDIS_RXCSUM_CAP_IP4OPT 0x001
+#define NDIS_RXCSUM_CAP_TCP4OPT 0x004
+#define NDIS_RXCSUM_CAP_TCP4 0x010
+#define NDIS_RXCSUM_CAP_UDP4 0x040
+#define NDIS_RXCSUM_CAP_IP4 0x100
+ uint32_t ndis_ip6_txenc; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip6_txcsum;
+#define NDIS_TXCSUM_CAP_IP6EXT 0x001
+#define NDIS_TXCSUM_CAP_TCP6OPT 0x004
+#define NDIS_TXCSUM_CAP_TCP6 0x010
+#define NDIS_TXCSUM_CAP_UDP6 0x040
+ uint32_t ndis_ip6_rxenc; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip6_rxcsum;
+#define NDIS_RXCSUM_CAP_IP6EXT 0x001
+#define NDIS_RXCSUM_CAP_TCP6OPT 0x004
+#define NDIS_RXCSUM_CAP_TCP6 0x010
+#define NDIS_RXCSUM_CAP_UDP6 0x040
+};
+
+struct ndis_lsov1_offload {
+ uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_maxsize;
+ uint32_t ndis_minsegs;
+ uint32_t ndis_opts;
+};
+
+struct ndis_ipsecv1_offload {
+ uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ah_esp;
+ uint32_t ndis_xport_tun;
+ uint32_t ndis_ip4_opts;
+ uint32_t ndis_flags;
+ uint32_t ndis_ip4_ah;
+ uint32_t ndis_ip4_esp;
+};
+
+struct ndis_lsov2_offload {
+ uint32_t ndis_ip4_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip4_maxsz;
+ uint32_t ndis_ip4_minsg;
+ uint32_t ndis_ip6_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip6_maxsz;
+ uint32_t ndis_ip6_minsg;
+ uint32_t ndis_ip6_opts;
+#define NDIS_LSOV2_CAP_IP6EXT 0x001
+#define NDIS_LSOV2_CAP_TCP6OPT 0x004
+};
+
+struct ndis_ipsecv2_offload {
+ uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint16_t ndis_ip6;
+ uint16_t ndis_ip4opt;
+ uint16_t ndis_ip6ext;
+ uint16_t ndis_ah;
+ uint16_t ndis_esp;
+ uint16_t ndis_ah_esp;
+ uint16_t ndis_xport;
+ uint16_t ndis_tun;
+ uint16_t ndis_xport_tun;
+ uint16_t ndis_lso;
+ uint16_t ndis_extseq;
+ uint32_t ndis_udp_esp;
+ uint32_t ndis_auth;
+ uint32_t ndis_crypto;
+ uint32_t ndis_sa_caps;
+};
+
+struct ndis_rsc_offload {
+ uint16_t ndis_ip4;
+ uint16_t ndis_ip6;
+};
+
+struct ndis_encap_offload {
+ uint32_t ndis_flags;
+ uint32_t ndis_maxhdr;
+};
+
+struct ndis_offload {
+ struct ndis_object_hdr ndis_hdr;
+ struct ndis_csum_offload ndis_csum;
+ struct ndis_lsov1_offload ndis_lsov1;
+ struct ndis_ipsecv1_offload ndis_ipsecv1;
+ struct ndis_lsov2_offload ndis_lsov2;
+ uint32_t ndis_flags;
+ /* NDIS >= 6.1 */
+ struct ndis_ipsecv2_offload ndis_ipsecv2;
+ /* NDIS >= 6.30 */
+ struct ndis_rsc_offload ndis_rsc;
+ struct ndis_encap_offload ndis_encap_gre;
+};
+
+#define NDIS_OFFLOAD_SIZE sizeof(struct ndis_offload)
+#define NDIS_OFFLOAD_SIZE_6_0 offsetof(struct ndis_offload, ndis_ipsecv2)
+#define NDIS_OFFLOAD_SIZE_6_1 offsetof(struct ndis_offload, ndis_rsc)
+
+#define NDIS_OFFLOAD_REV_1 1 /* NDIS 6.0 */
+#define NDIS_OFFLOAD_REV_2 2 /* NDIS 6.1 */
+#define NDIS_OFFLOAD_REV_3 3 /* NDIS 6.30 */
+
+/*
+ * Per-packet-info
+ */
+
+/* VLAN */
+#define NDIS_VLAN_INFO_SIZE sizeof(uint32_t)
+#define NDIS_VLAN_INFO_PRI_MASK 0x0007
+#define NDIS_VLAN_INFO_CFI_MASK 0x0008
+#define NDIS_VLAN_INFO_ID_MASK 0xfff0
+#define NDIS_VLAN_INFO_MAKE(id, pri, cfi) \
+ (((pri) & NDIS_VLAN_INFO_PRI_MASK) | \
+ (((cfi) & 0x1) << 3) | (((id) & 0xfff) << 4))
+#define NDIS_VLAN_INFO_ID(inf) (((inf) & NDIS_VLAN_INFO_ID_MASK) >> 4)
+#define NDIS_VLAN_INFO_CFI(inf) (((inf) & NDIS_VLAN_INFO_CFI_MASK) >> 3)
+#define NDIS_VLAN_INFO_PRI(inf) ((inf) & NDIS_VLAN_INFO_PRI_MASK)
+
+/* Reception checksum */
+#define NDIS_RXCSUM_INFO_SIZE sizeof(uint32_t)
+#define NDIS_RXCSUM_INFO_TCPCS_FAILED 0x0001
+#define NDIS_RXCSUM_INFO_UDPCS_FAILED 0x0002
+#define NDIS_RXCSUM_INFO_IPCS_FAILED 0x0004
+#define NDIS_RXCSUM_INFO_TCPCS_OK 0x0008
+#define NDIS_RXCSUM_INFO_UDPCS_OK 0x0010
+#define NDIS_RXCSUM_INFO_IPCS_OK 0x0020
+#define NDIS_RXCSUM_INFO_LOOPBACK 0x0040
+#define NDIS_RXCSUM_INFO_TCPCS_INVAL 0x0080
+#define NDIS_RXCSUM_INFO_IPCS_INVAL 0x0100
+
+/* LSOv2 */
+#define NDIS_LSO2_INFO_SIZE sizeof(uint32_t)
+#define NDIS_LSO2_INFO_MSS_MASK 0x000fffff
+#define NDIS_LSO2_INFO_THOFF_MASK 0x3ff00000
+#define NDIS_LSO2_INFO_ISLSO2 0x40000000
+#define NDIS_LSO2_INFO_ISIPV6 0x80000000
+
+#define NDIS_LSO2_INFO_MAKE(thoff, mss) \
+ ((((uint32_t)(mss)) & NDIS_LSO2_INFO_MSS_MASK) | \
+ ((((uint32_t)(thoff)) & 0x3ff) << 20) | \
+ NDIS_LSO2_INFO_ISLSO2)
+
+#define NDIS_LSO2_INFO_MAKEIPV4(thoff, mss) \
+ NDIS_LSO2_INFO_MAKE((thoff), (mss))
+
+#define NDIS_LSO2_INFO_MAKEIPV6(thoff, mss) \
+ (NDIS_LSO2_INFO_MAKE((thoff), (mss)) | NDIS_LSO2_INFO_ISIPV6)
+
+/* Transmission checksum */
+#define NDIS_TXCSUM_INFO_SIZE sizeof(uint32_t)
+#define NDIS_TXCSUM_INFO_IPV4 0x00000001
+#define NDIS_TXCSUM_INFO_IPV6 0x00000002
+#define NDIS_TXCSUM_INFO_TCPCS 0x00000004
+#define NDIS_TXCSUM_INFO_UDPCS 0x00000008
+#define NDIS_TXCSUM_INFO_IPCS 0x00000010
+#define NDIS_TXCSUM_INFO_THOFF 0x03ff0000
+
+#define NDIS_TXCSUM_INFO_MKL4CS(thoff, flag) \
+ ((((uint32_t)(thoff)) << 16) | (flag))
+
+#define NDIS_TXCSUM_INFO_MKTCPCS(thoff) \
+ NDIS_TXCSUM_INFO_MKL4CS((thoff), NDIS_TXCSUM_INFO_TCPCS)
+
+#define NDIS_TXCSUM_INFO_MKUDPCS(thoff) \
+ NDIS_TXCSUM_INFO_MKL4CS((thoff), NDIS_TXCSUM_INFO_UDPCS)
+
+#endif /* !_NET_NDIS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/netvsc/rndis.h b/src/spdk/dpdk/drivers/net/netvsc/rndis.h
new file mode 100644
index 00000000..eac9a99f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/rndis.h
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * Copyright (c) 2010 Jonathan Armani <armani@openbsd.org>
+ * Copyright (c) 2010 Fabien Romano <fabien@openbsd.org>
+ * Copyright (c) 2010 Michael Knudsen <mk@openbsd.org>
+ * All rights reserved.
+ */
+
+#ifndef _NET_RNDIS_H_
+#define _NET_RNDIS_H_
+
+/* Canonical major/minor version as of 22th Aug. 2016. */
+#define RNDIS_VERSION_MAJOR 0x00000001
+#define RNDIS_VERSION_MINOR 0x00000000
+
+#define RNDIS_STATUS_SUCCESS 0x00000000
+#define RNDIS_STATUS_PENDING 0x00000103
+
+#define RNDIS_STATUS_ONLINE 0x40010003
+#define RNDIS_STATUS_RESET_START 0x40010004
+#define RNDIS_STATUS_RESET_END 0x40010005
+#define RNDIS_STATUS_RING_STATUS 0x40010006
+#define RNDIS_STATUS_CLOSED 0x40010007
+#define RNDIS_STATUS_WAN_LINE_UP 0x40010008
+#define RNDIS_STATUS_WAN_LINE_DOWN 0x40010009
+#define RNDIS_STATUS_WAN_FRAGMENT 0x4001000A
+#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000B
+#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000C
+#define RNDIS_STATUS_HARDWARE_LINE_UP 0x4001000D
+#define RNDIS_STATUS_HARDWARE_LINE_DOWN 0x4001000E
+#define RNDIS_STATUS_INTERFACE_UP 0x4001000F
+#define RNDIS_STATUS_INTERFACE_DOWN 0x40010010
+#define RNDIS_STATUS_MEDIA_BUSY 0x40010011
+#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION 0x40010012
+#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION
+#define RNDIS_STATUS_LINK_SPEED_CHANGE 0x40010013
+#define RNDIS_STATUS_NETWORK_CHANGE 0x40010018
+#define RNDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG 0x40020006
+
+#define RNDIS_STATUS_FAILURE 0xC0000001
+#define RNDIS_STATUS_RESOURCES 0xC000009A
+#define RNDIS_STATUS_NOT_SUPPORTED 0xC00000BB
+#define RNDIS_STATUS_CLOSING 0xC0010002
+#define RNDIS_STATUS_BAD_VERSION 0xC0010004
+#define RNDIS_STATUS_BAD_CHARACTERISTICS 0xC0010005
+#define RNDIS_STATUS_ADAPTER_NOT_FOUND 0xC0010006
+#define RNDIS_STATUS_OPEN_FAILED 0xC0010007
+#define RNDIS_STATUS_DEVICE_FAILED 0xC0010008
+#define RNDIS_STATUS_MULTICAST_FULL 0xC0010009
+#define RNDIS_STATUS_MULTICAST_EXISTS 0xC001000A
+#define RNDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B
+#define RNDIS_STATUS_REQUEST_ABORTED 0xC001000C
+#define RNDIS_STATUS_RESET_IN_PROGRESS 0xC001000D
+#define RNDIS_STATUS_CLOSING_INDICATING 0xC001000E
+#define RNDIS_STATUS_INVALID_PACKET 0xC001000F
+#define RNDIS_STATUS_OPEN_LIST_FULL 0xC0010010
+#define RNDIS_STATUS_ADAPTER_NOT_READY 0xC0010011
+#define RNDIS_STATUS_ADAPTER_NOT_OPEN 0xC0010012
+#define RNDIS_STATUS_NOT_INDICATING 0xC0010013
+#define RNDIS_STATUS_INVALID_LENGTH 0xC0010014
+#define RNDIS_STATUS_INVALID_DATA 0xC0010015
+#define RNDIS_STATUS_BUFFER_TOO_SHORT 0xC0010016
+#define RNDIS_STATUS_INVALID_OID 0xC0010017
+#define RNDIS_STATUS_ADAPTER_REMOVED 0xC0010018
+#define RNDIS_STATUS_UNSUPPORTED_MEDIA 0xC0010019
+#define RNDIS_STATUS_GROUP_ADDRESS_IN_US 0xC001001A
+#define RNDIS_STATUS_FILE_NOT_FOUND 0xC001001B
+#define RNDIS_STATUS_ERROR_READING_FILE 0xC001001C
+#define RNDIS_STATUS_ALREADY_MAPPED 0xC001001D
+#define RNDIS_STATUS_RESOURCE_CONFLICT 0xC001001E
+#define RNDIS_STATUS_NO_CABLE 0xC001001F
+
+#define OID_GEN_SUPPORTED_LIST 0x00010101
+#define OID_GEN_HARDWARE_STATUS 0x00010102
+#define OID_GEN_MEDIA_SUPPORTED 0x00010103
+#define OID_GEN_MEDIA_IN_USE 0x00010104
+#define OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105
+#define OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106
+#define OID_GEN_LINK_SPEED 0x00010107
+#define OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108
+#define OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109
+#define OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A
+#define OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B
+#define OID_GEN_VENDOR_ID 0x0001010C
+#define OID_GEN_VENDOR_DESCRIPTION 0x0001010D
+#define OID_GEN_CURRENT_PACKET_FILTER 0x0001010E
+#define OID_GEN_CURRENT_LOOKAHEAD 0x0001010F
+#define OID_GEN_DRIVER_VERSION 0x00010110
+#define OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111
+#define OID_GEN_PROTOCOL_OPTIONS 0x00010112
+#define OID_GEN_MAC_OPTIONS 0x00010113
+#define OID_GEN_MEDIA_CONNECT_STATUS 0x00010114
+#define OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115
+#define OID_GEN_VENDOR_DRIVER_VERSION 0x00010116
+#define OID_GEN_SUPPORTED_GUIDS 0x00010117
+#define OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118
+#define OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119
+#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203
+#define OID_GEN_RECEIVE_SCALE_PARAMETERS 0x00010204
+#define OID_GEN_MACHINE_NAME 0x0001021A
+#define OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B
+#define OID_GEN_VLAN_ID 0x0001021C
+
+#define OID_802_3_PERMANENT_ADDRESS 0x01010101
+#define OID_802_3_CURRENT_ADDRESS 0x01010102
+#define OID_802_3_MULTICAST_LIST 0x01010103
+#define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104
+#define OID_802_3_MAC_OPTIONS 0x01010105
+#define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
+#define OID_802_3_XMIT_ONE_COLLISION 0x01020102
+#define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
+#define OID_802_3_XMIT_DEFERRED 0x01020201
+#define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
+#define OID_802_3_RCV_OVERRUN 0x01020203
+#define OID_802_3_XMIT_UNDERRUN 0x01020204
+#define OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205
+#define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
+#define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
+
+#define OID_TCP_OFFLOAD_PARAMETERS 0xFC01020C
+#define OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020D
+
+#define RNDIS_MEDIUM_802_3 0x00000000
+
+/* Device flags */
+#define RNDIS_DF_CONNECTIONLESS 0x00000001
+#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002
+
+/*
+ * Common RNDIS message header.
+ */
+struct rndis_msghdr {
+ uint32_t type;
+ uint32_t len;
+};
+
+/*
+ * RNDIS data message
+ */
+#define RNDIS_PACKET_MSG 0x00000001
+
+struct rndis_packet_msg {
+ uint32_t type;
+ uint32_t len;
+ uint32_t dataoffset;
+ uint32_t datalen;
+ uint32_t oobdataoffset;
+ uint32_t oobdatalen;
+ uint32_t oobdataelements;
+ uint32_t pktinfooffset;
+ uint32_t pktinfolen;
+ uint32_t vchandle;
+ uint32_t reserved;
+};
+
+/*
+ * Minimum value for dataoffset, oobdataoffset, and
+ * pktinfooffset.
+ */
+#define RNDIS_PACKET_MSG_OFFSET_MIN \
+ (sizeof(struct rndis_packet_msg) - \
+ offsetof(struct rndis_packet_msg, dataoffset))
+
+/* Offset from the beginning of rndis_packet_msg. */
+#define RNDIS_PACKET_MSG_OFFSET_ABS(ofs) \
+ ((ofs) + offsetof(struct rndis_packet_msg, dataoffset))
+
+#define RNDIS_PACKET_MSG_OFFSET_ALIGN 4
+#define RNDIS_PACKET_MSG_OFFSET_ALIGNMASK \
+ (RNDIS_PACKET_MSG_OFFSET_ALIGN - 1)
+
+/* Per-packet-info for RNDIS data message */
+struct rndis_pktinfo {
+ uint32_t size;
+ uint32_t type; /* NDIS_PKTINFO_TYPE_ */
+ uint32_t offset;
+ uint8_t data[];
+};
+
+#define RNDIS_PKTINFO_OFFSET \
+ offsetof(struct rndis_pktinfo, data[0])
+#define RNDIS_PKTINFO_SIZE_ALIGN 4
+#define RNDIS_PKTINFO_SIZE_ALIGNMASK (RNDIS_PKTINFO_SIZE_ALIGN - 1)
+
+#define NDIS_PKTINFO_TYPE_CSUM 0
+#define NDIS_PKTINFO_TYPE_IPSEC 1
+#define NDIS_PKTINFO_TYPE_LSO 2
+#define NDIS_PKTINFO_TYPE_CLASSIFY 3
+/* reserved 4 */
+#define NDIS_PKTINFO_TYPE_SGLIST 5
+#define NDIS_PKTINFO_TYPE_VLAN 6
+#define NDIS_PKTINFO_TYPE_ORIG 7
+#define NDIS_PKTINFO_TYPE_PKT_CANCELID 8
+#define NDIS_PKTINFO_TYPE_ORIG_NBLIST 9
+#define NDIS_PKTINFO_TYPE_CACHE_NBLIST 10
+#define NDIS_PKTINFO_TYPE_PKT_PAD 11
+
+/* RNDIS extension */
+
+/* Per-packet hash info */
+#define NDIS_HASH_INFO_SIZE sizeof(uint32_t)
+#define NDIS_PKTINFO_TYPE_HASHINF NDIS_PKTINFO_TYPE_ORIG_NBLIST
+/* NDIS_HASH_ */
+
+/* Per-packet hash value */
+#define NDIS_HASH_VALUE_SIZE sizeof(uint32_t)
+#define NDIS_PKTINFO_TYPE_HASHVAL NDIS_PKTINFO_TYPE_PKT_CANCELID
+
+/* Per-packet-info size */
+#define RNDIS_PKTINFO_SIZE(dlen) offsetof(struct rndis_pktinfo, data[dlen])
+
+/*
+ * RNDIS control messages
+ */
+
+/*
+ * Common header for RNDIS completion messages.
+ *
+ * NOTE: It does not apply to RNDIS_RESET_CMPLT.
+ */
+struct rndis_comp_hdr {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+};
+
+/* Initialize the device. */
+#define RNDIS_INITIALIZE_MSG 0x00000002
+#define RNDIS_INITIALIZE_CMPLT 0x80000002
+
+struct rndis_init_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t ver_major;
+ uint32_t ver_minor;
+ uint32_t max_xfersz;
+};
+
+struct rndis_init_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+ uint32_t ver_major;
+ uint32_t ver_minor;
+ uint32_t devflags;
+ uint32_t medium;
+ uint32_t pktmaxcnt;
+ uint32_t pktmaxsz;
+ uint32_t align;
+ uint32_t aflistoffset;
+ uint32_t aflistsz;
+};
+
+#define RNDIS_INIT_COMP_SIZE_MIN \
+ offsetof(struct rndis_init_comp, aflistsz)
+
+/* Halt the device. No response sent. */
+#define RNDIS_HALT_MSG 0x00000003
+
+struct rndis_halt_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+};
+
+/* Send a query object. */
+#define RNDIS_QUERY_MSG 0x00000004
+#define RNDIS_QUERY_CMPLT 0x80000004
+
+struct rndis_query_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t oid;
+ uint32_t infobuflen;
+ uint32_t infobufoffset;
+ uint32_t devicevchdl;
+};
+
+#define RNDIS_QUERY_REQ_INFOBUFOFFSET \
+ (sizeof(struct rndis_query_req) - \
+ offsetof(struct rndis_query_req, rid))
+
+struct rndis_query_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+ uint32_t infobuflen;
+ uint32_t infobufoffset;
+};
+
+/* infobuf offset from the beginning of rndis_query_comp. */
+#define RNDIS_QUERY_COMP_INFOBUFOFFSET_ABS(ofs) \
+ ((ofs) + offsetof(struct rndis_query_comp, rid))
+
+/* Send a set object request. */
+#define RNDIS_SET_MSG 0x00000005
+#define RNDIS_SET_CMPLT 0x80000005
+
+struct rndis_set_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t oid;
+ uint32_t infobuflen;
+ uint32_t infobufoffset;
+ uint32_t devicevchdl;
+};
+
+#define RNDIS_SET_REQ_INFOBUFOFFSET \
+ (sizeof(struct rndis_set_req) - \
+ offsetof(struct rndis_set_req, rid))
+
+struct rndis_set_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+};
+
+/*
+ * Parameter used by OID_GEN_RNDIS_CONFIG_PARAMETER.
+ */
+#define RNDIS_SET_PARAM_NUMERIC 0x00000000
+#define RNDIS_SET_PARAM_STRING 0x00000002
+
+struct rndis_set_parameter {
+ uint32_t nameoffset;
+ uint32_t namelen;
+ uint32_t type;
+ uint32_t valueoffset;
+ uint32_t valuelen;
+};
+
+/* Perform a soft reset on the device. */
+#define RNDIS_RESET_MSG 0x00000006
+#define RNDIS_RESET_CMPLT 0x80000006
+
+struct rndis_reset_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+};
+
+struct rndis_reset_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t status;
+ uint32_t adrreset;
+};
+
+/* 802.3 link-state or undefined message error. Sent by device. */
+#define RNDIS_INDICATE_STATUS_MSG 0x00000007
+
+struct rndis_status_msg {
+ uint32_t type;
+ uint32_t len;
+ uint32_t status;
+ uint32_t stbuflen;
+ uint32_t stbufoffset;
+ /* rndis_diag_info */
+};
+
+/* stbuf offset from the beginning of rndis_status_msg. */
+#define RNDIS_STBUFOFFSET_ABS(ofs) \
+ ((ofs) + offsetof(struct rndis_status_msg, status))
+
+/*
+ * Immediately after rndis_status_msg.stbufoffset, if a control
+ * message is malformatted, or a packet message contains inappropriate
+ * content.
+ */
+struct rndis_diag_info {
+ uint32_t diagstatus;
+ uint32_t erroffset;
+};
+
+/* Keepalive message. May be sent by device. */
+#define RNDIS_KEEPALIVE_MSG 0x00000008
+#define RNDIS_KEEPALIVE_CMPLT 0x80000008
+
+struct rndis_keepalive_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+};
+
+struct rndis_keepalive_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+};
+
+/* Packet filter bits used by OID_GEN_CURRENT_PACKET_FILTER */
+#define NDIS_PACKET_TYPE_NONE 0x00000000
+#define NDIS_PACKET_TYPE_DIRECTED 0x00000001
+#define NDIS_PACKET_TYPE_MULTICAST 0x00000002
+#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
+#define NDIS_PACKET_TYPE_BROADCAST 0x00000008
+#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010
+#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020
+#define NDIS_PACKET_TYPE_SMT 0x00000040
+#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080
+#define NDIS_PACKET_TYPE_GROUP 0x00001000
+#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00002000
+#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00004000
+#define NDIS_PACKET_TYPE_MAC_FRAME 0x00008000
+
+#endif /* !_NET_RNDIS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/netvsc/rte_pmd_netvsc_version.map b/src/spdk/dpdk/drivers/net/netvsc/rte_pmd_netvsc_version.map
new file mode 100644
index 00000000..d534019a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/netvsc/rte_pmd_netvsc_version.map
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+DPDK_18.08 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/nfp/Makefile b/src/spdk/dpdk/drivers/net/nfp/Makefile
new file mode 100644
index 00000000..ab4e0a7d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/Makefile
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_nfp.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lm
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+EXPORT_MAP := rte_pmd_nfp_version.map
+
+LIBABIVER := 1
+
+VPATH += $(SRCDIR)/nfpcore
+
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_cppcore.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_cpp_pcie_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_mutex.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_resource.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_crc.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_mip.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nffw.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_hwinfo.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_rtsym.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp_cmds.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp_eth.c
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_net.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/nfp/meson.build b/src/spdk/dpdk/drivers/net/nfp/meson.build
new file mode 100644
index 00000000..3ba37e27
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('nfpcore/nfp_cpp_pcie_ops.c',
+ 'nfpcore/nfp_nsp.c',
+ 'nfpcore/nfp_cppcore.c',
+ 'nfpcore/nfp_resource.c',
+ 'nfpcore/nfp_mip.c',
+ 'nfpcore/nfp_nffw.c',
+ 'nfpcore/nfp_rtsym.c',
+ 'nfpcore/nfp_nsp_cmds.c',
+ 'nfpcore/nfp_crc.c',
+ 'nfpcore/nfp_mutex.c',
+ 'nfpcore/nfp_nsp_eth.c',
+ 'nfpcore/nfp_hwinfo.c',
+ 'nfp_net.c')
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfp_net.c b/src/spdk/dpdk/drivers/net/nfp/nfp_net.c
new file mode 100644
index 00000000..6e5e305f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfp_net.c
@@ -0,0 +1,3301 @@
+/*
+ * Copyright (c) 2014-2018 Netronome Systems, Inc.
+ * All rights reserved.
+ *
+ * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * vim:shiftwidth=8:noexpandtab
+ *
+ * @file dpdk/pmd/nfp_net.c
+ *
+ * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
+ */
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_dev.h>
+#include <rte_ether.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+#include <rte_version.h>
+#include <rte_string_fns.h>
+#include <rte_alarm.h>
+#include <rte_spinlock.h>
+
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfpcore/nfp_hwinfo.h"
+#include "nfpcore/nfp_mip.h"
+#include "nfpcore/nfp_rtsym.h"
+#include "nfpcore/nfp_nsp.h"
+
+#include "nfp_net_pmd.h"
+#include "nfp_net_logs.h"
+#include "nfp_net_ctrl.h"
+
+/* Prototypes */
+static void nfp_net_close(struct rte_eth_dev *dev);
+static int nfp_net_configure(struct rte_eth_dev *dev);
+static void nfp_net_dev_interrupt_handler(void *param);
+static void nfp_net_dev_interrupt_delayed_handler(void *param);
+static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void nfp_net_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int nfp_net_init(struct rte_eth_dev *eth_dev);
+static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+static void nfp_net_promisc_enable(struct rte_eth_dev *dev);
+static void nfp_net_promisc_disable(struct rte_eth_dev *dev);
+static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
+static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t queue_idx);
+static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+static void nfp_net_rx_queue_release(void *rxq);
+static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
+static void nfp_net_tx_queue_release(void *txq);
+static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+static int nfp_net_start(struct rte_eth_dev *dev);
+static int nfp_net_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void nfp_net_stats_reset(struct rte_eth_dev *dev);
+static void nfp_net_stop(struct rte_eth_dev *dev);
+static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+static int nfp_net_rss_config_default(struct rte_eth_dev *dev);
+static int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int nfp_set_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+
+/* The offset of the queue controller queues in the PCIe Target */
+#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
+
+/* Maximum value which can be added to a queue with one transaction */
+#define NFP_QCP_MAX_ADD 0x7f
+
+#define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
+ (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
+
+/* nfp_qcp_ptr - Read or Write Pointer of a queue */
+enum nfp_qcp_ptr {
+ NFP_QCP_READ_PTR = 0,
+ NFP_QCP_WRITE_PTR
+};
+
+/*
+ * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
+ * @q: Base address for queue structure
+ * @ptr: Add to the Read or Write pointer
+ * @val: Value to add to the queue pointer
+ *
+ * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
+ */
+static inline void
+nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
+{
+ uint32_t off;
+
+ if (ptr == NFP_QCP_READ_PTR)
+ off = NFP_QCP_QUEUE_ADD_RPTR;
+ else
+ off = NFP_QCP_QUEUE_ADD_WPTR;
+
+ while (val > NFP_QCP_MAX_ADD) {
+ nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
+ val -= NFP_QCP_MAX_ADD;
+ }
+
+ nn_writel(rte_cpu_to_le_32(val), q + off);
+}
+
+/*
+ * nfp_qcp_read - Read the current Read/Write pointer value for a queue
+ * @q: Base address for queue structure
+ * @ptr: Read or Write pointer
+ */
+static inline uint32_t
+nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
+{
+ uint32_t off;
+ uint32_t val;
+
+ if (ptr == NFP_QCP_READ_PTR)
+ off = NFP_QCP_QUEUE_STS_LO;
+ else
+ off = NFP_QCP_QUEUE_STS_HI;
+
+ val = rte_cpu_to_le_32(nn_readl(q + off));
+
+ if (ptr == NFP_QCP_READ_PTR)
+ return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
+ else
+ return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
+}
+
+/*
+ * Functions to read/write from/to Config BAR
+ * Performs any endian conversion necessary.
+ */
+static inline uint8_t
+nn_cfg_readb(struct nfp_net_hw *hw, int off)
+{
+ return nn_readb(hw->ctrl_bar + off);
+}
+
+static inline void
+nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
+{
+ nn_writeb(val, hw->ctrl_bar + off);
+}
+
+static inline uint32_t
+nn_cfg_readl(struct nfp_net_hw *hw, int off)
+{
+ return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
+}
+
+static inline void
+nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
+{
+ nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
+}
+
+static inline uint64_t
+nn_cfg_readq(struct nfp_net_hw *hw, int off)
+{
+ return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
+}
+
+static inline void
+nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
+{
+ nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
+}
+
+static void
+nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
+{
+ unsigned i;
+
+ if (rxq->rxbufs == NULL)
+ return;
+
+ for (i = 0; i < rxq->rx_count; i++) {
+ if (rxq->rxbufs[i].mbuf) {
+ rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
+ rxq->rxbufs[i].mbuf = NULL;
+ }
+ }
+}
+
+static void
+nfp_net_rx_queue_release(void *rx_queue)
+{
+ struct nfp_net_rxq *rxq = rx_queue;
+
+ if (rxq) {
+ nfp_net_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->rxbufs);
+ rte_free(rxq);
+ }
+}
+
+static void
+nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
+{
+ nfp_net_rx_queue_release_mbufs(rxq);
+ rxq->rd_p = 0;
+ rxq->nb_rx_hold = 0;
+}
+
+static void
+nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
+{
+ unsigned i;
+
+ if (txq->txbufs == NULL)
+ return;
+
+ for (i = 0; i < txq->tx_count; i++) {
+ if (txq->txbufs[i].mbuf) {
+ rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
+ txq->txbufs[i].mbuf = NULL;
+ }
+ }
+}
+
+static void
+nfp_net_tx_queue_release(void *tx_queue)
+{
+ struct nfp_net_txq *txq = tx_queue;
+
+ if (txq) {
+ nfp_net_tx_queue_release_mbufs(txq);
+ rte_free(txq->txbufs);
+ rte_free(txq);
+ }
+}
+
+static void
+nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
+{
+ nfp_net_tx_queue_release_mbufs(txq);
+ txq->wr_p = 0;
+ txq->rd_p = 0;
+}
+
+static int
+__nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
+{
+ int cnt;
+ uint32_t new;
+ struct timespec wait;
+
+ PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
+ hw->qcp_cfg);
+
+ if (hw->qcp_cfg == NULL)
+ rte_panic("Bad configuration queue pointer\n");
+
+ nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 1000000;
+
+ PMD_DRV_LOG(DEBUG, "Polling for update ack...");
+
+ /* Poll update field, waiting for NFP to ack the config */
+ for (cnt = 0; ; cnt++) {
+ new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
+ if (new == 0)
+ break;
+ if (new & NFP_NET_CFG_UPDATE_ERR) {
+ PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
+ return -1;
+ }
+ if (cnt >= NFP_NET_POLL_TIMEOUT) {
+ PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
+ " %dms", update, cnt);
+ rte_panic("Exiting\n");
+ }
+ nanosleep(&wait, 0); /* waiting for a 1ms */
+ }
+ PMD_DRV_LOG(DEBUG, "Ack DONE");
+ return 0;
+}
+
+/*
+ * Reconfigure the NIC
+ * @nn: device to reconfigure
+ * @ctrl: The value for the ctrl field in the BAR config
+ * @update: The value for the update field in the BAR config
+ *
+ * Write the update word to the BAR and ping the reconfig queue. Then poll
+ * until the firmware has acknowledged the update by zeroing the update word.
+ */
+static int
+nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
+{
+ uint32_t err;
+
+ PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x",
+ ctrl, update);
+
+ rte_spinlock_lock(&hw->reconfig_lock);
+
+ nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
+ nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
+
+ rte_wmb();
+
+ err = __nfp_net_reconfig(hw, update);
+
+ rte_spinlock_unlock(&hw->reconfig_lock);
+
+ if (!err)
+ return 0;
+
+ /*
+ * Reconfig errors imply situations where they can be handled.
+ * Otherwise, rte_panic is called inside __nfp_net_reconfig
+ */
+ PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
+ ctrl, update);
+ return -EIO;
+}
+
+/*
+ * Configure an Ethernet device. This function must be invoked first
+ * before any other function in the Ethernet API. This function can
+ * also be re-invoked when a device is in the stopped state.
+ */
+static int
+nfp_net_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
+ struct rte_eth_txmode *txmode;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * A DPDK app sends info about how many queues to use and how
+ * those queues need to be configured. This is used by the
+ * DPDK core and it makes sure no more queues than those
+ * advertised by the driver are requested. This function is
+ * called after that internal process
+ */
+
+ PMD_INIT_LOG(DEBUG, "Configure");
+
+ dev_conf = &dev->data->dev_conf;
+ rxmode = &dev_conf->rxmode;
+ txmode = &dev_conf->txmode;
+
+ /* Checking TX mode */
+ if (txmode->mq_mode) {
+ PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
+ return -EINVAL;
+ }
+
+ /* Checking RX mode */
+ if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+ !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
+ PMD_INIT_LOG(INFO, "RSS not supported");
+ return -EINVAL;
+ }
+
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads))
+ PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
+
+ return 0;
+}
+
+static void
+nfp_net_enable_queues(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *hw;
+ uint64_t enabled_queues = 0;
+ int i;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Enabling the required TX queues in the device */
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ enabled_queues |= (1 << i);
+
+ nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
+
+ enabled_queues = 0;
+
+ /* Enabling the required RX queues in the device */
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ enabled_queues |= (1 << i);
+
+ nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
+}
+
+static void
+nfp_net_disable_queues(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *hw;
+ uint32_t new_ctrl, update = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
+ nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
+
+ new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
+ update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
+ NFP_NET_CFG_UPDATE_MSIX;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
+ new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
+
+ /* If an error when reconfig we avoid to change hw state */
+ if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
+ return;
+
+ hw->ctrl = new_ctrl;
+}
+
+static int
+nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+static void
+nfp_net_params_setup(struct nfp_net_hw *hw)
+{
+ nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
+ nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
+}
+
+static void
+nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
+{
+ hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
+}
+
+#define ETH_ADDR_LEN 6
+
+static void
+nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
+{
+ int i;
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ dst[i] = src[i];
+}
+
+static int
+nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
+{
+ struct nfp_eth_table *nfp_eth_table;
+
+ nfp_eth_table = nfp_eth_read_ports(hw->cpp);
+ /*
+ * hw points to port0 private data. We need hw now pointing to
+ * right port.
+ */
+ hw += port;
+ nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
+ (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
+
+ free(nfp_eth_table);
+ return 0;
+}
+
+static void
+nfp_net_vf_read_mac(struct nfp_net_hw *hw)
+{
+ uint32_t tmp;
+
+ tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
+ memcpy(&hw->mac_addr[0], &tmp, 4);
+
+ tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
+ memcpy(&hw->mac_addr[4], &tmp, 2);
+}
+
+static void
+nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
+{
+ uint32_t mac0 = *(uint32_t *)mac;
+ uint16_t mac1;
+
+ nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
+
+ mac += 4;
+ mac1 = *(uint16_t *)mac;
+ nn_writew(rte_cpu_to_be_16(mac1),
+ hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
+}
+
+int
+nfp_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct nfp_net_hw *hw;
+ uint32_t update, ctrl;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
+ PMD_INIT_LOG(INFO, "MAC address unable to change when"
+ " port enabled");
+ return -EBUSY;
+ }
+
+ if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
+ return -EBUSY;
+
+ /* Writing new MAC to the specific port BAR address */
+ nfp_net_write_mac(hw, (uint8_t *)mac_addr);
+
+ /* Signal the NIC about the change */
+ update = NFP_NET_CFG_UPDATE_MACADDR;
+ ctrl = hw->ctrl | NFP_NET_CFG_CTRL_LIVE_ADDR;
+ if (nfp_net_reconfig(hw, ctrl, update) < 0) {
+ PMD_INIT_LOG(INFO, "MAC address update failed");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct nfp_net_hw *hw;
+ int i;
+
+ if (!intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (!intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
+ PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
+ /* UIO just supports one queue and no LSC*/
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
+ intr_handle->intr_vec[0] = 0;
+ } else {
+ PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ /*
+ * The first msix vector is reserved for non
+ * efd interrupts
+ */
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
+ intr_handle->intr_vec[i] = i + 1;
+ PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
+ intr_handle->intr_vec[i]);
+ }
+ }
+
+ /* Avoiding TX interrupts */
+ hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
+ return 0;
+}
+
+static uint32_t
+nfp_check_offloads(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *hw;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
+ struct rte_eth_txmode *txmode;
+ uint32_t ctrl = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_conf = &dev->data->dev_conf;
+ rxmode = &dev_conf->rxmode;
+ txmode = &dev_conf->txmode;
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+ if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
+ ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
+ ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ hw->mtu = rxmode->max_rx_pkt_len;
+
+ if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+
+ /* L2 broadcast */
+ if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
+ ctrl |= NFP_NET_CFG_CTRL_L2BC;
+
+ /* L2 multicast */
+ if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
+ ctrl |= NFP_NET_CFG_CTRL_L2MC;
+
+ /* TX checksum offload */
+ if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
+ txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
+ txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+ ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+
+ /* LSO offload */
+ if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+ ctrl |= NFP_NET_CFG_CTRL_LSO;
+ else
+ ctrl |= NFP_NET_CFG_CTRL_LSO2;
+ }
+
+ /* RX gather */
+ if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ ctrl |= NFP_NET_CFG_CTRL_GATHER;
+
+ return ctrl;
+}
+
+static int
+nfp_net_start(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t new_ctrl, update = 0;
+ struct nfp_net_hw *hw;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
+ uint32_t intr_vector;
+ int ret;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_LOG(DEBUG, "Start");
+
+ /* Disabling queues just in case... */
+ nfp_net_disable_queues(dev);
+
+ /* Enabling the required queues in the device */
+ nfp_net_enable_queues(dev);
+
+ /* check and configure queue intr-vector mapping */
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ if (hw->pf_multiport_enabled) {
+ PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
+ "with NFP multiport PF");
+ return -EINVAL;
+ }
+ if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
+ /*
+ * Better not to share LSC with RX interrupts.
+ * Unregistering LSC interrupt handler
+ */
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ nfp_net_dev_interrupt_handler, (void *)dev);
+
+ if (dev->data->nb_rx_queues > 1) {
+ PMD_INIT_LOG(ERR, "PMD rx interrupt only "
+ "supports 1 queue with UIO");
+ return -EIO;
+ }
+ }
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+
+ nfp_configure_rx_interrupt(dev, intr_handle);
+ update = NFP_NET_CFG_UPDATE_MSIX;
+ }
+
+ rte_intr_enable(intr_handle);
+
+ new_ctrl = nfp_check_offloads(dev);
+
+ /* Writing configuration parameters in the device */
+ nfp_net_params_setup(hw);
+
+ dev_conf = &dev->data->dev_conf;
+ rxmode = &dev_conf->rxmode;
+
+ if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+ nfp_net_rss_config_default(dev);
+ update |= NFP_NET_CFG_UPDATE_RSS;
+ new_ctrl |= NFP_NET_CFG_CTRL_RSS;
+ }
+
+ /* Enable device */
+ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
+
+ update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
+ new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
+
+ nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
+ if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
+ return -EIO;
+
+ /*
+ * Allocating rte mbuffs for configured rx queues.
+ * This requires queues being enabled before
+ */
+ if (nfp_net_rx_freelist_setup(dev) < 0) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (hw->is_pf)
+ /* Configure the physical port up */
+ nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1);
+
+ hw->ctrl = new_ctrl;
+
+ return 0;
+
+error:
+ /*
+ * An error returned by this function should mean the app
+ * exiting and then the system releasing all the memory
+ * allocated even memory coming from hugepages.
+ *
+ * The device could be enabled at this point with some queues
+ * ready for getting packets. This is true if the call to
+ * nfp_net_rx_freelist_setup() succeeds for some queues but
+ * fails for subsequent queues.
+ *
+ * This should make the app exiting but better if we tell the
+ * device first.
+ */
+ nfp_net_disable_queues(dev);
+
+ return ret;
+}
+
+/* Stop device: disable rx and tx functions to allow for reconfiguring. */
+static void
+nfp_net_stop(struct rte_eth_dev *dev)
+{
+ int i;
+ struct nfp_net_hw *hw;
+
+ PMD_INIT_LOG(DEBUG, "Stop");
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ nfp_net_disable_queues(dev);
+
+ /* Clear queues */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ nfp_net_reset_tx_queue(
+ (struct nfp_net_txq *)dev->data->tx_queues[i]);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ nfp_net_reset_rx_queue(
+ (struct nfp_net_rxq *)dev->data->rx_queues[i]);
+ }
+
+ if (hw->is_pf)
+ /* Configure the physical port down */
+ nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0);
+}
+
+/* Reset and stop device. The device can not be restarted. */
+static void
+nfp_net_close(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *hw;
+ struct rte_pci_device *pci_dev;
+ int i;
+
+ PMD_INIT_LOG(DEBUG, "Close");
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ /*
+ * We assume that the DPDK application is stopping all the
+ * threads/queues before calling the device close function.
+ */
+
+ nfp_net_disable_queues(dev);
+
+ /* Clear queues */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ nfp_net_reset_tx_queue(
+ (struct nfp_net_txq *)dev->data->tx_queues[i]);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ nfp_net_reset_rx_queue(
+ (struct nfp_net_rxq *)dev->data->rx_queues[i]);
+ }
+
+ rte_intr_disable(&pci_dev->intr_handle);
+ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
+
+ /* unregister callback func from eal lib */
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ nfp_net_dev_interrupt_handler,
+ (void *)dev);
+
+ /*
+ * The ixgbe PMD driver disables the pcie master on the
+ * device. The i40e does not...
+ */
+}
+
+static void
+nfp_net_promisc_enable(struct rte_eth_dev *dev)
+{
+ uint32_t new_ctrl, update = 0;
+ struct nfp_net_hw *hw;
+
+ PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
+ PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
+ return;
+ }
+
+ if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
+ PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
+ return;
+ }
+
+ new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
+ update = NFP_NET_CFG_UPDATE_GEN;
+
+ /*
+ * DPDK sets promiscuous mode on just after this call assuming
+ * it can not fail ...
+ */
+ if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
+ return;
+
+ hw->ctrl = new_ctrl;
+}
+
+static void
+nfp_net_promisc_disable(struct rte_eth_dev *dev)
+{
+ uint32_t new_ctrl, update = 0;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
+ PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
+ return;
+ }
+
+ new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
+ update = NFP_NET_CFG_UPDATE_GEN;
+
+ /*
+ * DPDK sets promiscuous mode off just before this call
+ * assuming it can not fail ...
+ */
+ if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
+ return;
+
+ hw->ctrl = new_ctrl;
+}
+
+/*
+ * return 0 means link status changed, -1 means not changed
+ *
+ * Wait to complete is needed as it can take up to 9 seconds to get the Link
+ * status.
+ */
+static int
+nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
+{
+ struct nfp_net_hw *hw;
+ struct rte_eth_link link;
+ uint32_t nn_link_status;
+ int ret;
+
+ static const uint32_t ls_to_ethtool[] = {
+ [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
+ [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE,
+ [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G,
+ [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G,
+ [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G,
+ [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G,
+ [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G,
+ [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
+ };
+
+ PMD_DRV_LOG(DEBUG, "Link update");
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
+
+ memset(&link, 0, sizeof(struct rte_eth_link));
+
+ if (nn_link_status & NFP_NET_CFG_STS_LINK)
+ link.link_status = ETH_LINK_UP;
+
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
+ NFP_NET_CFG_STS_LINK_RATE_MASK;
+
+ if (nn_link_status >= RTE_DIM(ls_to_ethtool))
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ else
+ link.link_speed = ls_to_ethtool[nn_link_status];
+
+ ret = rte_eth_linkstatus_set(dev, &link);
+ if (ret == 0) {
+ if (link.link_status)
+ PMD_DRV_LOG(INFO, "NIC Link is Up");
+ else
+ PMD_DRV_LOG(INFO, "NIC Link is Down");
+ }
+ return ret;
+}
+
+static int
+nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ int i;
+ struct nfp_net_hw *hw;
+ struct rte_eth_stats nfp_dev_stats;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
+
+ memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
+
+ /* reading per RX ring stats */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ nfp_dev_stats.q_ipackets[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
+
+ nfp_dev_stats.q_ipackets[i] -=
+ hw->eth_stats_base.q_ipackets[i];
+
+ nfp_dev_stats.q_ibytes[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
+
+ nfp_dev_stats.q_ibytes[i] -=
+ hw->eth_stats_base.q_ibytes[i];
+ }
+
+ /* reading per TX ring stats */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ nfp_dev_stats.q_opackets[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
+
+ nfp_dev_stats.q_opackets[i] -=
+ hw->eth_stats_base.q_opackets[i];
+
+ nfp_dev_stats.q_obytes[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
+
+ nfp_dev_stats.q_obytes[i] -=
+ hw->eth_stats_base.q_obytes[i];
+ }
+
+ nfp_dev_stats.ipackets =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
+
+ nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
+
+ nfp_dev_stats.ibytes =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
+
+ nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
+
+ nfp_dev_stats.opackets =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
+
+ nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
+
+ nfp_dev_stats.obytes =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
+
+ nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
+
+ /* reading general device stats */
+ nfp_dev_stats.ierrors =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
+
+ nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
+
+ nfp_dev_stats.oerrors =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
+
+ nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
+
+ /* RX ring mbuf allocation failures */
+ nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+
+ nfp_dev_stats.imissed =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+
+ nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
+
+ if (stats) {
+ memcpy(stats, &nfp_dev_stats, sizeof(*stats));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void
+nfp_net_stats_reset(struct rte_eth_dev *dev)
+{
+ int i;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * hw->eth_stats_base records the per counter starting point.
+ * Lets update it now
+ */
+
+ /* reading per RX ring stats */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ hw->eth_stats_base.q_ipackets[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
+
+ hw->eth_stats_base.q_ibytes[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
+ }
+
+ /* reading per TX ring stats */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ hw->eth_stats_base.q_opackets[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
+
+ hw->eth_stats_base.q_obytes[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
+ }
+
+ hw->eth_stats_base.ipackets =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
+
+ hw->eth_stats_base.ibytes =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
+
+ hw->eth_stats_base.opackets =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
+
+ hw->eth_stats_base.obytes =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
+
+ /* reading general device stats */
+ hw->eth_stats_base.ierrors =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
+
+ hw->eth_stats_base.oerrors =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
+
+ /* RX ring mbuf allocation failures */
+ dev->data->rx_mbuf_alloc_failed = 0;
+
+ hw->eth_stats_base.imissed =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+}
+
+static void
+nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
+ dev_info->min_rx_bufsize = ETHER_MIN_MTU;
+ dev_info->max_rx_pktlen = hw->max_mtu;
+ /* Next should change when PF support is implemented */
+ dev_info->max_mac_addrs = 1;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_KEEP_CRC;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = DEFAULT_RX_PTHRESH,
+ .hthresh = DEFAULT_RX_HTHRESH,
+ .wthresh = DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = DEFAULT_TX_PTHRESH,
+ .hthresh = DEFAULT_TX_HTHRESH,
+ .wthresh = DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
+ };
+
+ dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_NONFRAG_IPV6_UDP;
+
+ dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
+ dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
+
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+}
+
+static const uint32_t *
+nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to nfp_net_set_hash() */
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_MASK,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == nfp_net_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
+static uint32_t
+nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct nfp_net_rxq *rxq;
+ struct nfp_net_rx_desc *rxds;
+ uint32_t idx;
+ uint32_t count;
+
+ rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
+
+ idx = rxq->rd_p;
+
+ count = 0;
+
+ /*
+ * Other PMDs are just checking the DD bit in intervals of 4
+ * descriptors and counting all four if the first has the DD
+ * bit on. Of course, this is not accurate but can be good for
+ * performance. But ideally that should be done in descriptors
+ * chunks belonging to the same cache line
+ */
+
+ while (count < rxq->rx_count) {
+ rxds = &rxq->rxds[idx];
+ if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+ break;
+
+ count++;
+ idx++;
+
+ /* Wrapping? */
+ if ((idx) == rxq->rx_count)
+ idx = 0;
+ }
+
+ return count;
+}
+
+static int
+nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev;
+ struct nfp_net_hw *hw;
+ int base = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
+ base = 1;
+
+ /* Make sure all updates are written before un-masking */
+ rte_wmb();
+ nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
+ NFP_NET_CFG_ICR_UNMASKED);
+ return 0;
+}
+
+static int
+nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev;
+ struct nfp_net_hw *hw;
+ int base = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
+ base = 1;
+
+ /* Make sure all updates are written before un-masking */
+ rte_wmb();
+ nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
+ return 0;
+}
+
+static void
+nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_eth_link link;
+
+ rte_eth_linkstatus_get(dev, &link);
+ if (link.link_status)
+ PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
+ dev->data->port_id, link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX
+ ? "full-duplex" : "half-duplex");
+ else
+ PMD_DRV_LOG(INFO, " Port %d: Link Down",
+ dev->data->port_id);
+
+ PMD_DRV_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+}
+
+/* Interrupt configuration and handling */
+
+/*
+ * nfp_net_irq_unmask - Unmask an interrupt
+ *
+ * If MSI-X auto-masking is enabled clear the mask bit, otherwise
+ * clear the ICR for the entry.
+ */
+static void
+nfp_net_irq_unmask(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *hw;
+ struct rte_pci_device *pci_dev;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
+ /* If MSI-X auto-masking is used, clear the entry */
+ rte_wmb();
+ rte_intr_enable(&pci_dev->intr_handle);
+ } else {
+ /* Make sure all updates are written before un-masking */
+ rte_wmb();
+ nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
+ NFP_NET_CFG_ICR_UNMASKED);
+ }
+}
+
+static void
+nfp_net_dev_interrupt_handler(void *param)
+{
+ int64_t timeout;
+ struct rte_eth_link link;
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
+
+ rte_eth_linkstatus_get(dev, &link);
+
+ nfp_net_link_update(dev, 0);
+
+ /* likely to up */
+ if (!link.link_status) {
+ /* handle it 1 sec later, wait it being stable */
+ timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
+ /* likely to down */
+ } else {
+ /* handle it 4 sec later, wait it being stable */
+ timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
+ }
+
+ if (rte_eal_alarm_set(timeout * 1000,
+ nfp_net_dev_interrupt_delayed_handler,
+ (void *)dev) < 0) {
+ PMD_INIT_LOG(ERR, "Error setting alarm");
+ /* Unmasking */
+ nfp_net_irq_unmask(dev);
+ }
+}
+
+/*
+ * Interrupt handler which shall be registered for alarm callback for delayed
+ * handling specific interrupt to wait for the stable nic state. As the NIC
+ * interrupt state is not stable for nfp after link is just down, it needs
+ * to wait 4 seconds to get the stable status.
+ *
+ * @param handle Pointer to interrupt handle.
+ * @param param The address of parameter (struct rte_eth_dev *)
+ *
+ * @return void
+ */
+static void
+nfp_net_dev_interrupt_delayed_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ nfp_net_link_update(dev, 0);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+
+ nfp_net_dev_link_status_print(dev);
+
+ /* Unmasking */
+ nfp_net_irq_unmask(dev);
+}
+
+static int
+nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is started */
+ if (dev->data->dev_started) {
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+ dev->data->port_id);
+ return -EBUSY;
+ }
+
+ /* switch to jumbo mode if needed */
+ if ((uint32_t)mtu > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
+
+ /* writing to configuration space */
+ nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
+
+ hw->mtu = mtu;
+
+ return 0;
+}
+
+static int
+nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *tz;
+ struct nfp_net_rxq *rxq;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Validating number of descriptors */
+ if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
+ (nb_desc > NFP_NET_MAX_RX_DESC) ||
+ (nb_desc < NFP_NET_MIN_RX_DESC)) {
+ PMD_DRV_LOG(ERR, "Wrong nb_desc value");
+ return -EINVAL;
+ }
+
+ /*
+ * Free memory prior to re-allocation if needed. This is the case after
+ * calling nfp_net_stop
+ */
+ if (dev->data->rx_queues[queue_idx]) {
+ nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocating rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ return -ENOMEM;
+
+ /* Hw queues mapping based on firmware confifguration */
+ rxq->qidx = queue_idx;
+ rxq->fl_qcidx = queue_idx * hw->stride_rx;
+ rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
+ rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
+ rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
+
+ /*
+ * Tracking mbuf size for detecting a potential mbuf overflow due to
+ * RX offset
+ */
+ rxq->mem_pool = mp;
+ rxq->mbuf_size = rxq->mem_pool->elt_size;
+ rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
+ hw->flbufsz = rxq->mbuf_size;
+
+ rxq->rx_count = nb_desc;
+ rxq->port_id = dev->data->port_id;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->drop_en = rx_conf->rx_drop_en;
+
+ /*
+ * Allocate RX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ sizeof(struct nfp_net_rx_desc) *
+ NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
+ socket_id);
+
+ if (tz == NULL) {
+ PMD_DRV_LOG(ERR, "Error allocatig rx dma");
+ nfp_net_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ /* Saving physical and virtual addresses for the RX ring */
+ rxq->dma = (uint64_t)tz->iova;
+ rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
+
+ /* mbuf pointers array for referencing mbufs linked to RX descriptors */
+ rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
+ sizeof(*rxq->rxbufs) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->rxbufs == NULL) {
+ nfp_net_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
+ rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
+
+ nfp_net_reset_rx_queue(rxq);
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ rxq->hw = hw;
+
+ /*
+ * Telling the HW about the physical address of the RX ring and number
+ * of descriptors in log2 format
+ */
+ nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
+
+ return 0;
+}
+
+static int
+nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
+{
+ struct nfp_net_rx_buff *rxe = rxq->rxbufs;
+ uint64_t dma_addr;
+ unsigned i;
+
+ PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors",
+ rxq->rx_count);
+
+ for (i = 0; i < rxq->rx_count; i++) {
+ struct nfp_net_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
+
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
+ (unsigned)rxq->qidx);
+ return -ENOMEM;
+ }
+
+ dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
+
+ rxd = &rxq->rxds[i];
+ rxd->fld.dd = 0;
+ rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
+ rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
+ rxe[i].mbuf = mbuf;
+ PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr);
+ }
+
+ /* Make sure all writes are flushed before telling the hardware */
+ rte_wmb();
+
+ /* Not advertising the whole ring as the firmware gets confused if so */
+ PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u",
+ rxq->rx_count - 1);
+
+ nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
+
+ return 0;
+}
+
+static int
+nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const struct rte_memzone *tz;
+ struct nfp_net_txq *txq;
+ uint16_t tx_free_thresh;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Validating number of descriptors */
+ if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
+ (nb_desc > NFP_NET_MAX_TX_DESC) ||
+ (nb_desc < NFP_NET_MIN_TX_DESC)) {
+ PMD_DRV_LOG(ERR, "Wrong nb_desc value");
+ return -EINVAL;
+ }
+
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh :
+ DEFAULT_TX_FREE_THRESH);
+
+ if (tx_free_thresh > (nb_desc)) {
+ PMD_DRV_LOG(ERR,
+ "tx_free_thresh must be less than the number of TX "
+ "descriptors. (tx_free_thresh=%u port=%d "
+ "queue=%d)", (unsigned int)tx_free_thresh,
+ dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ /*
+ * Free memory prior to re-allocation if needed. This is the case after
+ * calling nfp_net_stop
+ */
+ if (dev->data->tx_queues[queue_idx]) {
+ PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+ queue_idx);
+ nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocating tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL) {
+ PMD_DRV_LOG(ERR, "Error allocating tx dma");
+ return -ENOMEM;
+ }
+
+ /*
+ * Allocate TX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ sizeof(struct nfp_net_tx_desc) *
+ NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
+ socket_id);
+ if (tz == NULL) {
+ PMD_DRV_LOG(ERR, "Error allocating tx dma");
+ nfp_net_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ txq->tx_count = nb_desc;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
+ txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
+ txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
+
+ /* queue mapping based on firmware configuration */
+ txq->qidx = queue_idx;
+ txq->tx_qcidx = queue_idx * hw->stride_tx;
+ txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
+
+ txq->port_id = dev->data->port_id;
+
+ /* Saving physical and virtual addresses for the TX ring */
+ txq->dma = (uint64_t)tz->iova;
+ txq->txds = (struct nfp_net_tx_desc *)tz->addr;
+
+ /* mbuf pointers array for referencing mbufs linked to TX descriptors */
+ txq->txbufs = rte_zmalloc_socket("txq->txbufs",
+ sizeof(*txq->txbufs) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->txbufs == NULL) {
+ nfp_net_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
+ txq->txbufs, txq->txds, (unsigned long int)txq->dma);
+
+ nfp_net_reset_tx_queue(txq);
+
+ dev->data->tx_queues[queue_idx] = txq;
+ txq->hw = hw;
+
+ /*
+ * Telling the HW about the physical address of the TX ring and number
+ * of descriptors in log2 format
+ */
+ nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
+ nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
+
+ return 0;
+}
+
+/* nfp_net_tx_tso - Set TX descriptor for TSO */
+static inline void
+nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
+ struct rte_mbuf *mb)
+{
+ uint64_t ol_flags;
+ struct nfp_net_hw *hw = txq->hw;
+
+ if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
+ goto clean_txd;
+
+ ol_flags = mb->ol_flags;
+
+ if (!(ol_flags & PKT_TX_TCP_SEG))
+ goto clean_txd;
+
+ txd->l3_offset = mb->l2_len;
+ txd->l4_offset = mb->l2_len + mb->l3_len;
+ txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
+ txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
+ txd->flags = PCIE_DESC_TX_LSO;
+ return;
+
+clean_txd:
+ txd->flags = 0;
+ txd->l3_offset = 0;
+ txd->l4_offset = 0;
+ txd->lso_hdrlen = 0;
+ txd->mss = 0;
+}
+
+/* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
+static inline void
+nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
+ struct rte_mbuf *mb)
+{
+ uint64_t ol_flags;
+ struct nfp_net_hw *hw = txq->hw;
+
+ if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
+ return;
+
+ ol_flags = mb->ol_flags;
+
+ /* IPv6 does not need checksum */
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ txd->flags |= PCIE_DESC_TX_IP4_CSUM;
+
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ txd->flags |= PCIE_DESC_TX_UDP_CSUM;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ txd->flags |= PCIE_DESC_TX_TCP_CSUM;
+ break;
+ }
+
+ if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
+ txd->flags |= PCIE_DESC_TX_CSUM;
+}
+
+/* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
+static inline void
+nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
+ struct rte_mbuf *mb)
+{
+ struct nfp_net_hw *hw = rxq->hw;
+
+ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
+ return;
+
+ /* If IPv4 and IP checksum error, fail */
+ if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
+ !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))
+ mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ /* If neither UDP nor TCP return */
+ if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
+ !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
+ return;
+
+ if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
+ !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK))
+ mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+
+ if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) &&
+ !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK))
+ mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+}
+
+#define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
+#define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
+
+#define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
+
+/*
+ * nfp_net_set_hash - Set mbuf hash data
+ *
+ * The RSS hash and hash-type are pre-pended to the packet data.
+ * Extract and decode it and set the mbuf fields.
+ */
+static inline void
+nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
+ struct rte_mbuf *mbuf)
+{
+ struct nfp_net_hw *hw = rxq->hw;
+ uint8_t *meta_offset;
+ uint32_t meta_info;
+ uint32_t hash = 0;
+ uint32_t hash_type = 0;
+
+ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
+ return;
+
+ /* this is true for new firmwares */
+ if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) ||
+ (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) &&
+ NFP_DESC_META_LEN(rxd))) {
+ /*
+ * new metadata api:
+ * <---- 32 bit ----->
+ * m field type word
+ * e data field #2
+ * t data field #1
+ * a data field #0
+ * ====================
+ * packet data
+ *
+ * Field type word contains up to 8 4bit field types
+ * A 4bit field type refers to a data field word
+ * A data field word can have several 4bit field types
+ */
+ meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
+ meta_offset -= NFP_DESC_META_LEN(rxd);
+ meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
+ meta_offset += 4;
+ /* NFP PMD just supports metadata for hashing */
+ switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_HASH:
+ /* next field type is about the hash type */
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ /* hash value is in the data field */
+ hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
+ hash_type = meta_info & NFP_NET_META_FIELD_MASK;
+ break;
+ default:
+ /* Unsupported metadata can be a performance issue */
+ return;
+ }
+ } else {
+ if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+ return;
+
+ hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
+ hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
+ }
+
+ mbuf->hash.rss = hash;
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+
+ switch (hash_type) {
+ case NFP_NET_RSS_IPV4:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
+ break;
+ case NFP_NET_RSS_IPV6:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
+ break;
+ case NFP_NET_RSS_IPV6_EX:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
+ default:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
+ }
+}
+
+static inline void
+nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
+{
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+}
+
+#define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
+
+/*
+ * RX path design:
+ *
+ * There are some decissions to take:
+ * 1) How to check DD RX descriptors bit
+ * 2) How and when to allocate new mbufs
+ *
+ * Current implementation checks just one single DD bit each loop. As each
+ * descriptor is 8 bytes, it is likely a good idea to check descriptors in
+ * a single cache line instead. Tests with this change have not shown any
+ * performance improvement but it requires further investigation. For example,
+ * depending on which descriptor is next, the number of descriptors could be
+ * less than 8 for just checking those in the same cache line. This implies
+ * extra work which could be counterproductive by itself. Indeed, last firmware
+ * changes are just doing this: writing several descriptors with the DD bit
+ * for saving PCIe bandwidth and DMA operations from the NFP.
+ *
+ * Mbuf allocation is done when a new packet is received. Then the descriptor
+ * is automatically linked with the new mbuf and the old one is given to the
+ * user. The main drawback with this design is mbuf allocation is heavier than
+ * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
+ * cache point of view it does not seem allocating the mbuf early on as we are
+ * doing now have any benefit at all. Again, tests with this change have not
+ * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
+ * so looking at the implications of this type of allocation should be studied
+ * deeply
+ */
+
+static uint16_t
+nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct nfp_net_rxq *rxq;
+ struct nfp_net_rx_desc *rxds;
+ struct nfp_net_rx_buff *rxb;
+ struct nfp_net_hw *hw;
+ struct rte_mbuf *mb;
+ struct rte_mbuf *new_mb;
+ uint16_t nb_hold;
+ uint64_t dma_addr;
+ int avail;
+
+ rxq = rx_queue;
+ if (unlikely(rxq == NULL)) {
+ /*
+ * DPDK just checks the queue is lower than max queues
+ * enabled. But the queue needs to be configured
+ */
+ RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
+ return -EINVAL;
+ }
+
+ hw = rxq->hw;
+ avail = 0;
+ nb_hold = 0;
+
+ while (avail < nb_pkts) {
+ rxb = &rxq->rxbufs[rxq->rd_p];
+ if (unlikely(rxb == NULL)) {
+ RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
+ break;
+ }
+
+ rxds = &rxq->rxds[rxq->rd_p];
+ if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+ break;
+
+ /*
+ * Memory barrier to ensure that we won't do other
+ * reads before the DD bit.
+ */
+ rte_rmb();
+
+ /*
+ * We got a packet. Let's alloc a new mbuff for refilling the
+ * free descriptor ring as soon as possible
+ */
+ new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
+ if (unlikely(new_mb == NULL)) {
+ RTE_LOG_DP(DEBUG, PMD,
+ "RX mbuf alloc failed port_id=%u queue_id=%u\n",
+ rxq->port_id, (unsigned int)rxq->qidx);
+ nfp_net_mbuf_alloc_failed(rxq);
+ break;
+ }
+
+ nb_hold++;
+
+ /*
+ * Grab the mbuff and refill the descriptor with the
+ * previously allocated mbuff
+ */
+ mb = rxb->mbuf;
+ rxb->mbuf = new_mb;
+
+ PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
+ rxds->rxd.data_len, rxq->mbuf_size);
+
+ /* Size of this segment */
+ mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
+ /* Size of the whole packet. We just support 1 segment */
+ mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
+
+ if (unlikely((mb->data_len + hw->rx_offset) >
+ rxq->mbuf_size)) {
+ /*
+ * This should not happen and the user has the
+ * responsibility of avoiding it. But we have
+ * to give some info about the error
+ */
+ RTE_LOG_DP(ERR, PMD,
+ "mbuf overflow likely due to the RX offset.\n"
+ "\t\tYour mbuf size should have extra space for"
+ " RX offset=%u bytes.\n"
+ "\t\tCurrently you just have %u bytes available"
+ " but the received packet is %u bytes long",
+ hw->rx_offset,
+ rxq->mbuf_size - hw->rx_offset,
+ mb->data_len);
+ return -EINVAL;
+ }
+
+ /* Filling the received mbuff with packet info */
+ if (hw->rx_offset)
+ mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
+ else
+ mb->data_off = RTE_PKTMBUF_HEADROOM +
+ NFP_DESC_META_LEN(rxds);
+
+ /* No scatter mode supported */
+ mb->nb_segs = 1;
+ mb->next = NULL;
+
+ mb->port = rxq->port_id;
+
+ /* Checking the RSS flag */
+ nfp_net_set_hash(rxq, rxds, mb);
+
+ /* Checking the checksum flag */
+ nfp_net_rx_cksum(rxq, rxds, mb);
+
+ if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
+ (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
+ mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ }
+
+ /* Adding the mbuff to the mbuff array passed by the app */
+ rx_pkts[avail++] = mb;
+
+ /* Now resetting and updating the descriptor */
+ rxds->vals[0] = 0;
+ rxds->vals[1] = 0;
+ dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
+ rxds->fld.dd = 0;
+ rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
+ rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
+
+ rxq->rd_p++;
+ if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
+ rxq->rd_p = 0;
+ }
+
+ if (nb_hold == 0)
+ return nb_hold;
+
+ PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received",
+ rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
+
+ nb_hold += rxq->nb_rx_hold;
+
+ /*
+ * FL descriptors needs to be written before incrementing the
+ * FL queue WR pointer
+ */
+ rte_wmb();
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u",
+ rxq->port_id, (unsigned int)rxq->qidx,
+ (unsigned)nb_hold, (unsigned)avail);
+ nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+
+ return avail;
+}
+
+/*
+ * nfp_net_tx_free_bufs - Check for descriptors with a complete
+ * status
+ * @txq: TX queue to work with
+ * Returns number of descriptors freed
+ */
+int
+nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
+{
+ uint32_t qcp_rd_p;
+ int todo;
+
+ PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
+ " status", txq->qidx);
+
+ /* Work out how many packets have been sent */
+ qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
+
+ if (qcp_rd_p == txq->rd_p) {
+ PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
+ "packets (%u, %u)", txq->qidx,
+ qcp_rd_p, txq->rd_p);
+ return 0;
+ }
+
+ if (qcp_rd_p > txq->rd_p)
+ todo = qcp_rd_p - txq->rd_p;
+ else
+ todo = qcp_rd_p + txq->tx_count - txq->rd_p;
+
+ PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
+ qcp_rd_p, txq->rd_p, txq->rd_p);
+
+ if (todo == 0)
+ return todo;
+
+ txq->rd_p += todo;
+ if (unlikely(txq->rd_p >= txq->tx_count))
+ txq->rd_p -= txq->tx_count;
+
+ return todo;
+}
+
+/* Leaving always free descriptors for avoiding wrapping confusion */
+static inline
+uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
+{
+ if (txq->wr_p >= txq->rd_p)
+ return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
+ else
+ return txq->rd_p - txq->wr_p - 8;
+}
+
+/*
+ * nfp_net_txq_full - Check if the TX queue free descriptors
+ * is below tx_free_threshold
+ *
+ * @txq: TX queue to check
+ *
+ * This function uses the host copy* of read/write pointers
+ */
+static inline
+uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
+{
+ return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
+}
+
+static uint16_t
+nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct nfp_net_txq *txq;
+ struct nfp_net_hw *hw;
+ struct nfp_net_tx_desc *txds, txd;
+ struct rte_mbuf *pkt;
+ uint64_t dma_addr;
+ int pkt_size, dma_size;
+ uint16_t free_descs, issued_descs;
+ struct rte_mbuf **lmbuf;
+ int i;
+
+ txq = tx_queue;
+ hw = txq->hw;
+ txds = &txq->txds[txq->wr_p];
+
+ PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
+ txq->qidx, txq->wr_p, nb_pkts);
+
+ if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
+ nfp_net_tx_free_bufs(txq);
+
+ free_descs = (uint16_t)nfp_free_tx_desc(txq);
+ if (unlikely(free_descs == 0))
+ return 0;
+
+ pkt = *tx_pkts;
+
+ i = 0;
+ issued_descs = 0;
+ PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
+ txq->qidx, nb_pkts);
+ /* Sending packets */
+ while ((i < nb_pkts) && free_descs) {
+ /* Grabbing the mbuf linked to the current descriptor */
+ lmbuf = &txq->txbufs[txq->wr_p].mbuf;
+ /* Warming the cache for releasing the mbuf later on */
+ RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
+
+ pkt = *(tx_pkts + i);
+
+ if (unlikely((pkt->nb_segs > 1) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
+ PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
+ rte_panic("Multisegment packet unsupported\n");
+ }
+
+ /* Checking if we have enough descriptors */
+ if (unlikely(pkt->nb_segs > free_descs))
+ goto xmit_end;
+
+ /*
+ * Checksum and VLAN flags just in the first descriptor for a
+ * multisegment packet, but TSO info needs to be in all of them.
+ */
+ txd.data_len = pkt->pkt_len;
+ nfp_net_tx_tso(txq, &txd, pkt);
+ nfp_net_tx_cksum(txq, &txd, pkt);
+
+ if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
+ (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
+ txd.flags |= PCIE_DESC_TX_VLAN;
+ txd.vlan = pkt->vlan_tci;
+ }
+
+ /*
+ * mbuf data_len is the data in one segment and pkt_len data
+ * in the whole packet. When the packet is just one segment,
+ * then data_len = pkt_len
+ */
+ pkt_size = pkt->pkt_len;
+
+ while (pkt) {
+ /* Copying TSO, VLAN and cksum info */
+ *txds = txd;
+
+ /* Releasing mbuf used by this descriptor previously*/
+ if (*lmbuf)
+ rte_pktmbuf_free_seg(*lmbuf);
+
+ /*
+ * Linking mbuf with descriptor for being released
+ * next time descriptor is used
+ */
+ *lmbuf = pkt;
+
+ dma_size = pkt->data_len;
+ dma_addr = rte_mbuf_data_iova(pkt);
+ PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
+ "%" PRIx64 "", dma_addr);
+
+ /* Filling descriptors fields */
+ txds->dma_len = dma_size;
+ txds->data_len = txd.data_len;
+ txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
+ txds->dma_addr_lo = (dma_addr & 0xffffffff);
+ ASSERT(free_descs > 0);
+ free_descs--;
+
+ txq->wr_p++;
+ if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
+ txq->wr_p = 0;
+
+ pkt_size -= dma_size;
+
+ /*
+ * Making the EOP, packets with just one segment
+ * the priority
+ */
+ if (likely(!pkt_size))
+ txds->offset_eop = PCIE_DESC_TX_EOP;
+ else
+ txds->offset_eop = 0;
+
+ pkt = pkt->next;
+ /* Referencing next free TX descriptor */
+ txds = &txq->txds[txq->wr_p];
+ lmbuf = &txq->txbufs[txq->wr_p].mbuf;
+ issued_descs++;
+ }
+ i++;
+ }
+
+xmit_end:
+ /* Increment write pointers. Force memory write before we let HW know */
+ rte_wmb();
+ nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
+
+ return i;
+}
+
+static int
+nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ uint32_t new_ctrl, update;
+ struct nfp_net_hw *hw;
+ int ret;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ new_ctrl = 0;
+
+ if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
+ (mask & ETH_VLAN_EXTEND_OFFLOAD))
+ PMD_DRV_LOG(INFO, "No support for ETH_VLAN_FILTER_OFFLOAD or"
+ " ETH_VLAN_EXTEND_OFFLOAD");
+
+ /* Enable vlan strip if it is not configured yet */
+ if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
+ !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
+ new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
+
+ /* Disable vlan strip just if it is configured */
+ if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
+ (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
+ new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
+
+ if (new_ctrl == 0)
+ return 0;
+
+ update = NFP_NET_CFG_UPDATE_GEN;
+
+ ret = nfp_net_reconfig(hw, new_ctrl, update);
+ if (!ret)
+ hw->ctrl = new_ctrl;
+
+ return ret;
+}
+
+static int
+nfp_net_rss_reta_write(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint32_t reta, mask;
+ int i, j;
+ int idx, shift;
+ struct nfp_net_hw *hw =
+ NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
+ return -EINVAL;
+ }
+
+ /*
+ * Update Redirection Table. There are 128 8bit-entries which can be
+ * manage as 32 32bit-entries
+ */
+ for (i = 0; i < reta_size; i += 4) {
+ /* Handling 4 RSS entries per loop */
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
+
+ if (!mask)
+ continue;
+
+ reta = 0;
+ /* If all 4 entries were set, don't need read RETA register */
+ if (mask != 0xF)
+ reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
+
+ for (j = 0; j < 4; j++) {
+ if (!(mask & (0x1 << j)))
+ continue;
+ if (mask != 0xF)
+ /* Clearing the entry bits */
+ reta &= ~(0xFF << (8 * j));
+ reta |= reta_conf[idx].reta[shift + j] << (8 * j);
+ }
+ nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
+ reta);
+ }
+ return 0;
+}
+
+/* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
+static int
+nfp_net_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct nfp_net_hw *hw =
+ NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t update;
+ int ret;
+
+ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
+ return -EINVAL;
+
+ ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
+ if (ret != 0)
+ return ret;
+
+ update = NFP_NET_CFG_UPDATE_RSS;
+
+ if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
+ return -EIO;
+
+ return 0;
+}
+
+ /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
+static int
+nfp_net_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint8_t i, j, mask;
+ int idx, shift;
+ uint32_t reta;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
+ return -EINVAL;
+
+ if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
+ return -EINVAL;
+ }
+
+ /*
+ * Reading Redirection Table. There are 128 8bit-entries which can be
+ * manage as 32 32bit-entries
+ */
+ for (i = 0; i < reta_size; i += 4) {
+ /* Handling 4 RSS entries per loop */
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
+
+ if (!mask)
+ continue;
+
+ reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
+ shift);
+ for (j = 0; j < 4; j++) {
+ if (!(mask & (0x1 << j)))
+ continue;
+ reta_conf->reta[shift + j] =
+ (uint8_t)((reta >> (8 * j)) & 0xF);
+ }
+ }
+ return 0;
+}
+
+static int
+nfp_net_rss_hash_write(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct nfp_net_hw *hw;
+ uint64_t rss_hf;
+ uint32_t cfg_rss_ctrl = 0;
+ uint8_t key;
+ int i;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Writing the key byte a byte */
+ for (i = 0; i < rss_conf->rss_key_len; i++) {
+ memcpy(&key, &rss_conf->rss_key[i], 1);
+ nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
+ }
+
+ rss_hf = rss_conf->rss_hf;
+
+ if (rss_hf & ETH_RSS_IPV4)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
+ NFP_NET_CFG_RSS_IPV4_TCP |
+ NFP_NET_CFG_RSS_IPV4_UDP;
+
+ if (rss_hf & ETH_RSS_IPV6)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 |
+ NFP_NET_CFG_RSS_IPV6_TCP |
+ NFP_NET_CFG_RSS_IPV6_UDP;
+
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
+
+ /* configuring where to apply the RSS hash */
+ nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
+
+ /* Writing the key size */
+ nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
+
+ return 0;
+}
+
+static int
+nfp_net_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ uint32_t update;
+ uint64_t rss_hf;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ rss_hf = rss_conf->rss_hf;
+
+ /* Checking if RSS is enabled */
+ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
+ if (rss_hf != 0) { /* Enable RSS? */
+ PMD_DRV_LOG(ERR, "RSS unsupported");
+ return -EINVAL;
+ }
+ return 0; /* Nothing to do */
+ }
+
+ if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
+ PMD_DRV_LOG(ERR, "hash key too long");
+ return -EINVAL;
+ }
+
+ nfp_net_rss_hash_write(dev, rss_conf);
+
+ update = NFP_NET_CFG_UPDATE_RSS;
+
+ if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int
+nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ uint64_t rss_hf;
+ uint32_t cfg_rss_ctrl;
+ uint8_t key;
+ int i;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
+ return -EINVAL;
+
+ rss_hf = rss_conf->rss_hf;
+ cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
+
+ if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
+
+ if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+
+ if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+
+ if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+
+ if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+
+ if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
+
+ /* Reading the key size */
+ rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
+
+ /* Reading the key byte a byte */
+ for (i = 0; i < rss_conf->rss_key_len; i++) {
+ key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
+ memcpy(&rss_conf->rss_key[i], &key, 1);
+ }
+
+ return 0;
+}
+
+static int
+nfp_net_rss_config_default(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rss_conf rss_conf;
+ struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
+ uint16_t rx_queues = dev->data->nb_rx_queues;
+ uint16_t queue;
+ int i, j, ret;
+
+ PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
+ rx_queues);
+
+ nfp_reta_conf[0].mask = ~0x0;
+ nfp_reta_conf[1].mask = ~0x0;
+
+ queue = 0;
+ for (i = 0; i < 0x40; i += 8) {
+ for (j = i; j < (i + 8); j++) {
+ nfp_reta_conf[0].reta[j] = queue;
+ nfp_reta_conf[1].reta[j] = queue++;
+ queue %= rx_queues;
+ }
+ }
+ ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
+ if (ret != 0)
+ return ret;
+
+ dev_conf = &dev->data->dev_conf;
+ if (!dev_conf) {
+ PMD_DRV_LOG(INFO, "wrong rss conf");
+ return -EINVAL;
+ }
+ rss_conf = dev_conf->rx_adv_conf.rss_conf;
+
+ ret = nfp_net_rss_hash_write(dev, &rss_conf);
+
+ return ret;
+}
+
+
+/* Initialise and register driver with DPDK Application */
+static const struct eth_dev_ops nfp_net_eth_dev_ops = {
+ .dev_configure = nfp_net_configure,
+ .dev_start = nfp_net_start,
+ .dev_stop = nfp_net_stop,
+ .dev_close = nfp_net_close,
+ .promiscuous_enable = nfp_net_promisc_enable,
+ .promiscuous_disable = nfp_net_promisc_disable,
+ .link_update = nfp_net_link_update,
+ .stats_get = nfp_net_stats_get,
+ .stats_reset = nfp_net_stats_reset,
+ .dev_infos_get = nfp_net_infos_get,
+ .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
+ .mtu_set = nfp_net_dev_mtu_set,
+ .mac_addr_set = nfp_set_mac_addr,
+ .vlan_offload_set = nfp_net_vlan_offload_set,
+ .reta_update = nfp_net_reta_update,
+ .reta_query = nfp_net_reta_query,
+ .rss_hash_update = nfp_net_rss_hash_update,
+ .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
+ .rx_queue_setup = nfp_net_rx_queue_setup,
+ .rx_queue_release = nfp_net_rx_queue_release,
+ .rx_queue_count = nfp_net_rx_queue_count,
+ .tx_queue_setup = nfp_net_tx_queue_setup,
+ .tx_queue_release = nfp_net_tx_queue_release,
+ .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
+ .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
+};
+
+/*
+ * All eth_dev created got its private data, but before nfp_net_init, that
+ * private data is referencing private data for all the PF ports. This is due
+ * to how the vNIC bars are mapped based on first port, so all ports need info
+ * about port 0 private data. Inside nfp_net_init the private data pointer is
+ * changed to the right address for each port once the bars have been mapped.
+ *
+ * This functions helps to find out which port and therefore which offset
+ * inside the private data array to use.
+ */
+static int
+get_pf_port_number(char *name)
+{
+ char *pf_str = name;
+ int size = 0;
+
+ while ((*pf_str != '_') && (*pf_str != '\0') && (size++ < 30))
+ pf_str++;
+
+ if (size == 30)
+ /*
+ * This should not happen at all and it would mean major
+ * implementation fault.
+ */
+ rte_panic("nfp_net: problem with pf device name\n");
+
+ /* Expecting _portX with X within [0,7] */
+ pf_str += 5;
+
+ return (int)strtol(pf_str, NULL, 10);
+}
+
+static int
+nfp_net_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct nfp_net_hw *hw, *hwport0;
+
+ uint64_t tx_bar_off = 0, rx_bar_off = 0;
+ uint32_t start_q;
+ int stride = 4;
+ int port = 0;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
+ (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
+ port = get_pf_port_number(eth_dev->data->name);
+ if (port < 0 || port > 7) {
+ PMD_DRV_LOG(ERR, "Port value is wrong");
+ return -ENODEV;
+ }
+
+ PMD_INIT_LOG(DEBUG, "Working with PF port value %d", port);
+
+ /* This points to port 0 private data */
+ hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ /* This points to the specific port private data */
+ hw = &hwport0[port];
+ } else {
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ hwport0 = 0;
+ }
+
+ eth_dev->dev_ops = &nfp_net_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
+ eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+
+ PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
+ pci_dev->id.vendor_id, pci_dev->id.device_id,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+
+ hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
+ if (hw->ctrl_bar == NULL) {
+ PMD_DRV_LOG(ERR,
+ "hw->ctrl_bar is NULL. BAR0 not configured");
+ return -ENODEV;
+ }
+
+ if (hw->is_pf && port == 0) {
+ hw->ctrl_bar = nfp_rtsym_map(hw->sym_tbl, "_pf0_net_bar0",
+ hw->total_ports * 32768,
+ &hw->ctrl_area);
+ if (!hw->ctrl_bar) {
+ printf("nfp_rtsym_map fails for _pf0_net_ctrl_bar");
+ return -EIO;
+ }
+
+ PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
+ }
+
+ if (port > 0) {
+ if (!hwport0->ctrl_bar)
+ return -ENODEV;
+
+ /* address based on port0 offset */
+ hw->ctrl_bar = hwport0->ctrl_bar +
+ (port * NFP_PF_CSR_SLICE_SIZE);
+ }
+
+ PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
+
+ hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
+ hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
+
+ /* Work out where in the BAR the queues start. */
+ switch (pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_NFP4000_PF_NIC:
+ case PCI_DEVICE_ID_NFP6000_PF_NIC:
+ case PCI_DEVICE_ID_NFP6000_VF_NIC:
+ start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
+ tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
+ start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
+ rx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
+ err = -ENODEV;
+ goto dev_err_ctrl_map;
+ }
+
+ PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
+ PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
+
+ if (hw->is_pf && port == 0) {
+ /* configure access to tx/rx vNIC BARs */
+ hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, 0, 0,
+ NFP_PCIE_QUEUE(0),
+ NFP_QCP_QUEUE_AREA_SZ,
+ &hw->hwqueues_area);
+
+ if (!hwport0->hw_queues) {
+ printf("nfp_rtsym_map fails for net.qc");
+ err = -EIO;
+ goto dev_err_ctrl_map;
+ }
+
+ PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p",
+ hwport0->hw_queues);
+ }
+
+ if (hw->is_pf) {
+ hw->tx_bar = hwport0->hw_queues + tx_bar_off;
+ hw->rx_bar = hwport0->hw_queues + rx_bar_off;
+ eth_dev->data->dev_private = hw;
+ } else {
+ hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
+ tx_bar_off;
+ hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
+ rx_bar_off;
+ }
+
+ PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
+ hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
+
+ nfp_net_cfg_queue_setup(hw);
+
+ /* Get some of the read-only fields from the config BAR */
+ hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
+ hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
+ hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
+ hw->mtu = ETHER_MTU;
+
+ /* VLAN insertion is incompatible with LSOv2 */
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
+ hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
+
+ if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
+ hw->rx_offset = NFP_NET_RX_OFFSET;
+ else
+ hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
+
+ PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
+ NFD_CFG_MAJOR_VERSION_of(hw->ver),
+ NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
+
+ PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
+ hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
+ hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
+ hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
+ hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
+ hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
+ hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
+ hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
+ hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
+ hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
+ hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
+ hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
+ hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
+ hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
+ hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
+
+ hw->ctrl = 0;
+
+ hw->stride_rx = stride;
+ hw->stride_tx = stride;
+
+ PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
+ hw->max_rx_queues, hw->max_tx_queues);
+
+ /* Initializing spinlock for reconfigs */
+ rte_spinlock_init(&hw->reconfig_lock);
+
+ /* Allocating memory for mac addr */
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to space for MAC address");
+ err = -ENOMEM;
+ goto dev_err_queues_map;
+ }
+
+ if (hw->is_pf) {
+ nfp_net_pf_read_mac(hwport0, port);
+ nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
+ } else {
+ nfp_net_vf_read_mac(hw);
+ }
+
+ if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
+ PMD_INIT_LOG(INFO, "Using random mac address for port %d",
+ port);
+ /* Using random mac addresses for VFs */
+ eth_random_addr(&hw->mac_addr[0]);
+ nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
+ }
+
+ /* Copying mac address to DPDK eth_dev struct */
+ ether_addr_copy((struct ether_addr *)hw->mac_addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
+ "mac=%02x:%02x:%02x:%02x:%02x:%02x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id,
+ hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
+ hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
+
+ /* Registering LSC interrupt handler */
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ nfp_net_dev_interrupt_handler,
+ (void *)eth_dev);
+
+ /* Telling the firmware about the LSC interrupt entry */
+ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+
+ /* Recording current stats counters values */
+ nfp_net_stats_reset(eth_dev);
+
+ return 0;
+
+dev_err_queues_map:
+ nfp_cpp_area_free(hw->hwqueues_area);
+dev_err_ctrl_map:
+ nfp_cpp_area_free(hw->ctrl_area);
+
+ return err;
+}
+
+static int
+nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
+ struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo,
+ int phys_port, struct nfp_rtsym_table *sym_tbl, void **priv)
+{
+ struct rte_eth_dev *eth_dev;
+ struct nfp_net_hw *hw;
+ char *port_name;
+ int ret;
+
+ port_name = rte_zmalloc("nfp_pf_port_name", 100, 0);
+ if (!port_name)
+ return -ENOMEM;
+
+ if (ports > 1)
+ sprintf(port_name, "%s_port%d", dev->device.name, port);
+ else
+ sprintf(port_name, "%s", dev->device.name);
+
+ eth_dev = rte_eth_dev_allocate(port_name);
+ if (!eth_dev)
+ return -ENOMEM;
+
+ if (port == 0) {
+ *priv = rte_zmalloc(port_name,
+ sizeof(struct nfp_net_adapter) * ports,
+ RTE_CACHE_LINE_SIZE);
+ if (!*priv) {
+ rte_eth_dev_release_port(eth_dev);
+ return -ENOMEM;
+ }
+ }
+
+ eth_dev->data->dev_private = *priv;
+
+ /*
+ * dev_private pointing to port0 dev_private because we need
+ * to configure vNIC bars based on port0 at nfp_net_init.
+ * Then dev_private is adjusted per port.
+ */
+ hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port;
+ hw->cpp = cpp;
+ hw->hwinfo = hwinfo;
+ hw->sym_tbl = sym_tbl;
+ hw->pf_port_idx = phys_port;
+ hw->is_pf = 1;
+ if (ports > 1)
+ hw->pf_multiport_enabled = 1;
+
+ hw->total_ports = ports;
+
+ eth_dev->device = &dev->device;
+ rte_eth_copy_pci_info(eth_dev, dev);
+
+ ret = nfp_net_init(eth_dev);
+
+ if (ret)
+ rte_eth_dev_release_port(eth_dev);
+ else
+ rte_eth_dev_probing_finish(eth_dev);
+
+ rte_free(port_name);
+
+ return ret;
+}
+
+#define DEFAULT_FW_PATH "/lib/firmware/netronome"
+
+static int
+nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
+{
+ struct nfp_cpp *cpp = nsp->cpp;
+ int fw_f;
+ char *fw_buf;
+ char fw_name[125];
+ char serial[40];
+ struct stat file_stat;
+ off_t fsize, bytes;
+
+ /* Looking for firmware file in order of priority */
+
+ /* First try to find a firmware image specific for this device */
+ sprintf(serial, "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
+ cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
+ cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
+ cpp->interface & 0xff);
+
+ sprintf(fw_name, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
+
+ PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+ fw_f = open(fw_name, O_RDONLY);
+ if (fw_f > 0)
+ goto read_fw;
+
+ /* Then try the PCI name */
+ sprintf(fw_name, "%s/pci-%s.nffw", DEFAULT_FW_PATH, dev->device.name);
+
+ PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+ fw_f = open(fw_name, O_RDONLY);
+ if (fw_f > 0)
+ goto read_fw;
+
+ /* Finally try the card type and media */
+ sprintf(fw_name, "%s/%s", DEFAULT_FW_PATH, card);
+ PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+ fw_f = open(fw_name, O_RDONLY);
+ if (fw_f < 0) {
+ PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
+ return -ENOENT;
+ }
+
+read_fw:
+ if (fstat(fw_f, &file_stat) < 0) {
+ PMD_DRV_LOG(INFO, "Firmware file %s size is unknown", fw_name);
+ close(fw_f);
+ return -ENOENT;
+ }
+
+ fsize = file_stat.st_size;
+ PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %" PRIu64 "",
+ fw_name, (uint64_t)fsize);
+
+ fw_buf = malloc((size_t)fsize);
+ if (!fw_buf) {
+ PMD_DRV_LOG(INFO, "malloc failed for fw buffer");
+ close(fw_f);
+ return -ENOMEM;
+ }
+ memset(fw_buf, 0, fsize);
+
+ bytes = read(fw_f, fw_buf, fsize);
+ if (bytes != fsize) {
+ PMD_DRV_LOG(INFO, "Reading fw to buffer failed."
+ "Just %" PRIu64 " of %" PRIu64 " bytes read",
+ (uint64_t)bytes, (uint64_t)fsize);
+ free(fw_buf);
+ close(fw_f);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(INFO, "Uploading the firmware ...");
+ nfp_nsp_load_fw(nsp, fw_buf, bytes);
+ PMD_DRV_LOG(INFO, "Done");
+
+ free(fw_buf);
+ close(fw_f);
+
+ return 0;
+}
+
+static int
+nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
+ struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
+{
+ struct nfp_nsp *nsp;
+ const char *nfp_fw_model;
+ char card_desc[100];
+ int err = 0;
+
+ nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
+
+ if (nfp_fw_model) {
+ PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
+ } else {
+ PMD_DRV_LOG(ERR, "firmware model NOT found");
+ return -EIO;
+ }
+
+ if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
+ PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
+ nfp_eth_table->count);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
+ nfp_eth_table->count);
+
+ PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
+
+ sprintf(card_desc, "nic_%s_%dx%d.nffw", nfp_fw_model,
+ nfp_eth_table->count, nfp_eth_table->ports[0].speed / 1000);
+
+ nsp = nfp_nsp_open(cpp);
+ if (!nsp) {
+ PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
+ return -EIO;
+ }
+
+ nfp_nsp_device_soft_reset(nsp);
+ err = nfp_fw_upload(dev, nsp, card_desc);
+
+ nfp_nsp_close(nsp);
+ return err;
+}
+
+static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *dev)
+{
+ struct nfp_cpp *cpp;
+ struct nfp_hwinfo *hwinfo;
+ struct nfp_rtsym_table *sym_tbl;
+ struct nfp_eth_table *nfp_eth_table = NULL;
+ int total_ports;
+ void *priv = 0;
+ int ret = -ENODEV;
+ int err;
+ int i;
+
+ if (!dev)
+ return ret;
+
+ /*
+ * When device bound to UIO, the device could be used, by mistake,
+ * by two DPDK apps, and the UIO driver does not avoid it. This
+ * could lead to a serious problem when configuring the NFP CPP
+ * interface. Here we avoid this telling to the CPP init code to
+ * use a lock file if UIO is being used.
+ */
+ if (dev->kdrv == RTE_KDRV_VFIO)
+ cpp = nfp_cpp_from_device_name(dev, 0);
+ else
+ cpp = nfp_cpp_from_device_name(dev, 1);
+
+ if (!cpp) {
+ PMD_DRV_LOG(ERR, "A CPP handle can not be obtained");
+ ret = -EIO;
+ goto error;
+ }
+
+ hwinfo = nfp_hwinfo_read(cpp);
+ if (!hwinfo) {
+ PMD_DRV_LOG(ERR, "Error reading hwinfo table");
+ return -EIO;
+ }
+
+ nfp_eth_table = nfp_eth_read_ports(cpp);
+ if (!nfp_eth_table) {
+ PMD_DRV_LOG(ERR, "Error reading NFP ethernet table");
+ return -EIO;
+ }
+
+ if (nfp_fw_setup(dev, cpp, nfp_eth_table, hwinfo)) {
+ PMD_DRV_LOG(INFO, "Error when uploading firmware");
+ ret = -EIO;
+ goto error;
+ }
+
+ /* Now the symbol table should be there */
+ sym_tbl = nfp_rtsym_table_read(cpp);
+ if (!sym_tbl) {
+ PMD_DRV_LOG(ERR, "Something is wrong with the firmware"
+ " symbol table");
+ ret = -EIO;
+ goto error;
+ }
+
+ total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
+ if (total_ports != (int)nfp_eth_table->count) {
+ PMD_DRV_LOG(ERR, "Inconsistent number of ports");
+ ret = -EIO;
+ goto error;
+ }
+ PMD_INIT_LOG(INFO, "Total pf ports: %d", total_ports);
+
+ if (total_ports <= 0 || total_ports > 8) {
+ PMD_DRV_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ for (i = 0; i < total_ports; i++) {
+ ret = nfp_pf_create_dev(dev, i, total_ports, cpp, hwinfo,
+ nfp_eth_table->ports[i].index,
+ sym_tbl, &priv);
+ if (ret)
+ break;
+ }
+
+error:
+ free(nfp_eth_table);
+ return ret;
+}
+
+int nfp_logtype_init;
+int nfp_logtype_driver;
+
+static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
+ PCI_DEVICE_ID_NFP4000_PF_NIC)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
+ PCI_DEVICE_ID_NFP6000_PF_NIC)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
+ PCI_DEVICE_ID_NFP6000_VF_NIC)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct nfp_net_adapter), nfp_net_init);
+}
+
+static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ struct nfp_net_hw *hw, *hwport0;
+ int port = 0;
+
+ eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+ if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
+ (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
+ port = get_pf_port_number(eth_dev->data->name);
+ /*
+ * hotplug is not possible with multiport PF although freeing
+ * data structures can be done for first port.
+ */
+ if (port != 0)
+ return -ENOTSUP;
+ hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ hw = &hwport0[port];
+ nfp_cpp_area_free(hw->ctrl_area);
+ nfp_cpp_area_free(hw->hwqueues_area);
+ free(hw->hwinfo);
+ free(hw->sym_tbl);
+ nfp_cpp_free(hw->cpp);
+ } else {
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ }
+ /* hotplug is not possible with multiport PF */
+ if (hw->pf_multiport_enabled)
+ return -ENOTSUP;
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_nfp_net_pf_pmd = {
+ .id_table = pci_id_nfp_pf_net_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = nfp_pf_pci_probe,
+ .remove = eth_nfp_pci_remove,
+};
+
+static struct rte_pci_driver rte_nfp_net_vf_pmd = {
+ .id_table = pci_id_nfp_vf_net_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_nfp_pci_probe,
+ .remove = eth_nfp_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
+RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
+RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
+
+RTE_INIT(nfp_init_log)
+{
+ nfp_logtype_init = rte_log_register("pmd.net.nfp.init");
+ if (nfp_logtype_init >= 0)
+ rte_log_set_level(nfp_logtype_init, RTE_LOG_NOTICE);
+ nfp_logtype_driver = rte_log_register("pmd.net.nfp.driver");
+ if (nfp_logtype_driver >= 0)
+ rte_log_set_level(nfp_logtype_driver, RTE_LOG_NOTICE);
+}
+/*
+ * Local variables:
+ * c-file-style: "Linux"
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfp_net_ctrl.h b/src/spdk/dpdk/drivers/net/nfp/nfp_net_ctrl.h
new file mode 100644
index 00000000..21e17da1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfp_net_ctrl.h
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2014, 2015 Netronome Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * vim:shiftwidth=8:noexpandtab
+ *
+ * Netronome network device driver: Control BAR layout
+ */
+#ifndef _NFP_NET_CTRL_H_
+#define _NFP_NET_CTRL_H_
+
+/*
+ * Configuration BAR size.
+ *
+ * The configuration BAR is 8K in size, but on the NFP6000, due to
+ * THB-350, 32k needs to be reserved.
+ */
+#ifdef __NFP_IS_6000
+#define NFP_NET_CFG_BAR_SZ (32 * 1024)
+#else
+#define NFP_NET_CFG_BAR_SZ (8 * 1024)
+#endif
+
+/* Offset in Freelist buffer where packet starts on RX */
+#define NFP_NET_RX_OFFSET 32
+
+/* working with metadata api (NFD version > 3.0) */
+#define NFP_NET_META_FIELD_SIZE 4
+#define NFP_NET_META_FIELD_MASK ((1 << NFP_NET_META_FIELD_SIZE) - 1)
+
+/* Prepend field types */
+#define NFP_NET_META_HASH 1 /* next field carries hash type */
+
+/* Hash type pre-pended when a RSS hash was computed */
+#define NFP_NET_RSS_NONE 0
+#define NFP_NET_RSS_IPV4 1
+#define NFP_NET_RSS_IPV6 2
+#define NFP_NET_RSS_IPV6_EX 3
+#define NFP_NET_RSS_IPV4_TCP 4
+#define NFP_NET_RSS_IPV6_TCP 5
+#define NFP_NET_RSS_IPV6_EX_TCP 6
+#define NFP_NET_RSS_IPV4_UDP 7
+#define NFP_NET_RSS_IPV6_UDP 8
+#define NFP_NET_RSS_IPV6_EX_UDP 9
+
+/*
+ * @NFP_NET_TXR_MAX: Maximum number of TX rings
+ * @NFP_NET_TXR_MASK: Mask for TX rings
+ * @NFP_NET_RXR_MAX: Maximum number of RX rings
+ * @NFP_NET_RXR_MASK: Mask for RX rings
+ */
+#define NFP_NET_TXR_MAX 64
+#define NFP_NET_TXR_MASK (NFP_NET_TXR_MAX - 1)
+#define NFP_NET_RXR_MAX 64
+#define NFP_NET_RXR_MASK (NFP_NET_RXR_MAX - 1)
+
+/*
+ * Read/Write config words (0x0000 - 0x002c)
+ * @NFP_NET_CFG_CTRL: Global control
+ * @NFP_NET_CFG_UPDATE: Indicate which fields are updated
+ * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
+ * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
+ * @NFP_NET_CFG_MTU: Set MTU size
+ * @NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU)
+ * @NFP_NET_CFG_EXN: MSI-X table entry for exceptions
+ * @NFP_NET_CFG_LSC: MSI-X table entry for link state changes
+ * @NFP_NET_CFG_MACADDR: MAC address
+ *
+ * TODO:
+ * - define Error details in UPDATE
+ */
+#define NFP_NET_CFG_CTRL 0x0000
+#define NFP_NET_CFG_CTRL_ENABLE (0x1 << 0) /* Global enable */
+#define NFP_NET_CFG_CTRL_PROMISC (0x1 << 1) /* Enable Promisc mode */
+#define NFP_NET_CFG_CTRL_L2BC (0x1 << 2) /* Allow L2 Broadcast */
+#define NFP_NET_CFG_CTRL_L2MC (0x1 << 3) /* Allow L2 Multicast */
+#define NFP_NET_CFG_CTRL_RXCSUM (0x1 << 4) /* Enable RX Checksum */
+#define NFP_NET_CFG_CTRL_TXCSUM (0x1 << 5) /* Enable TX Checksum */
+#define NFP_NET_CFG_CTRL_RXVLAN (0x1 << 6) /* Enable VLAN strip */
+#define NFP_NET_CFG_CTRL_TXVLAN (0x1 << 7) /* Enable VLAN insert */
+#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */
+#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */
+#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO */
+#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
+#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS */
+#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
+#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
+#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
+#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
+#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */
+#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */
+#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* Enable VXLAN */
+#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* Enable NVGRE */
+#define NFP_NET_CFG_CTRL_MSIX_TX_OFF (0x1 << 26) /* Disable MSIX for TX */
+#define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */
+#define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */
+#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1 << 31) /* live MAC addr change */
+#define NFP_NET_CFG_UPDATE 0x0004
+#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
+#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
+#define NFP_NET_CFG_UPDATE_RSS (0x1 << 2) /* RSS config change */
+#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */
+#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */
+#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */
+#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */
+#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
+#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
+#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
+#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
+#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
+#define NFP_NET_CFG_TXRS_ENABLE 0x0008
+#define NFP_NET_CFG_RXRS_ENABLE 0x0010
+#define NFP_NET_CFG_MTU 0x0018
+#define NFP_NET_CFG_FLBUFSZ 0x001c
+#define NFP_NET_CFG_EXN 0x001f
+#define NFP_NET_CFG_LSC 0x0020
+#define NFP_NET_CFG_MACADDR 0x0024
+
+#define NFP_NET_CFG_CTRL_LSO_ANY (NFP_NET_CFG_CTRL_LSO | NFP_NET_CFG_CTRL_LSO2)
+
+/*
+ * Read-only words (0x0030 - 0x0050):
+ * @NFP_NET_CFG_VERSION: Firmware version number
+ * @NFP_NET_CFG_STS: Status
+ * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL)
+ * @NFP_NET_MAX_TXRINGS: Maximum number of TX rings
+ * @NFP_NET_MAX_RXRINGS: Maximum number of RX rings
+ * @NFP_NET_MAX_MTU: Maximum support MTU
+ * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
+ * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
+ *
+ * TODO:
+ * - define more STS bits
+ */
+#define NFP_NET_CFG_VERSION 0x0030
+#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xff << 24)
+#define NFP_NET_CFG_VERSION_CLASS_MASK (0xff << 16)
+#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16)
+#define NFP_NET_CFG_VERSION_CLASS_GENERIC 0
+#define NFP_NET_CFG_VERSION_MAJOR_MASK (0xff << 8)
+#define NFP_NET_CFG_VERSION_MAJOR(x) (((x) & 0xff) << 8)
+#define NFP_NET_CFG_VERSION_MINOR_MASK (0xff << 0)
+#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0)
+#define NFP_NET_CFG_STS 0x0034
+#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */
+/* Link rate */
+#define NFP_NET_CFG_STS_LINK_RATE_SHIFT 1
+#define NFP_NET_CFG_STS_LINK_RATE_MASK 0xF
+#define NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED 0
+#define NFP_NET_CFG_STS_LINK_RATE_UNKNOWN 1
+#define NFP_NET_CFG_STS_LINK_RATE_1G 2
+#define NFP_NET_CFG_STS_LINK_RATE_10G 3
+#define NFP_NET_CFG_STS_LINK_RATE_25G 4
+#define NFP_NET_CFG_STS_LINK_RATE_40G 5
+#define NFP_NET_CFG_STS_LINK_RATE_50G 6
+#define NFP_NET_CFG_STS_LINK_RATE_100G 7
+#define NFP_NET_CFG_CAP 0x0038
+#define NFP_NET_CFG_MAX_TXRINGS 0x003c
+#define NFP_NET_CFG_MAX_RXRINGS 0x0040
+#define NFP_NET_CFG_MAX_MTU 0x0044
+/* Next two words are being used by VFs for solving THB350 issue */
+#define NFP_NET_CFG_START_TXQ 0x0048
+#define NFP_NET_CFG_START_RXQ 0x004c
+
+/*
+ * NFP-3200 workaround (0x0050 - 0x0058)
+ * @NFP_NET_CFG_SPARE_ADDR: DMA address for ME code to use (e.g. YDS-155 fix)
+ */
+#define NFP_NET_CFG_SPARE_ADDR 0x0050
+/**
+ * NFP6000/NFP4000 - Prepend configuration
+ */
+#define NFP_NET_CFG_RX_OFFSET 0x0050
+#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */
+
+/**
+ * Reuse spare address to contain the offset from the start of
+ * the host buffer where the first byte of the received frame
+ * will land. Any metadata will come prior to that offset. If the
+ * value in this field is 0, it means that that the metadata will
+ * always land starting at the first byte of the host buffer and
+ * packet data will immediately follow the metadata. As always,
+ * the RX descriptor indicates the presence or absence of metadata
+ * along with the length thereof.
+ */
+#define NFP_NET_CFG_RX_OFFSET_ADDR 0x0050
+
+#define NFP_NET_CFG_VXLAN_PORT 0x0060
+#define NFP_NET_CFG_VXLAN_SZ 0x0008
+
+/* Offload definitions */
+#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(uint16_t))
+
+/**
+ * 64B reserved for future use (0x0080 - 0x00c0)
+ */
+#define NFP_NET_CFG_RESERVED 0x0080
+#define NFP_NET_CFG_RESERVED_SZ 0x0040
+
+/*
+ * RSS configuration (0x0100 - 0x01ac):
+ * Used only when NFP_NET_CFG_CTRL_RSS is enabled
+ * @NFP_NET_CFG_RSS_CFG: RSS configuration word
+ * @NFP_NET_CFG_RSS_KEY: RSS "secret" key
+ * @NFP_NET_CFG_RSS_ITBL: RSS indirection table
+ */
+#define NFP_NET_CFG_RSS_BASE 0x0100
+#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE
+#define NFP_NET_CFG_RSS_MASK (0x7f)
+#define NFP_NET_CFG_RSS_MASK_of(_x) ((_x) & 0x7f)
+#define NFP_NET_CFG_RSS_IPV4 (1 << 8) /* RSS for IPv4 */
+#define NFP_NET_CFG_RSS_IPV6 (1 << 9) /* RSS for IPv6 */
+#define NFP_NET_CFG_RSS_IPV4_TCP (1 << 10) /* RSS for IPv4/TCP */
+#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */
+#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */
+#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */
+#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */
+#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4)
+#define NFP_NET_CFG_RSS_KEY_SZ 0x28
+#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \
+ NFP_NET_CFG_RSS_KEY_SZ)
+#define NFP_NET_CFG_RSS_ITBL_SZ 0x80
+
+/*
+ * TX ring configuration (0x200 - 0x800)
+ * @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
+ * @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
+ * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
+ * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries)
+ * @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries)
+ * @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
+ * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation (4B entries)
+ */
+#define NFP_NET_CFG_TXR_BASE 0x0200
+#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8))
+#define NFP_NET_CFG_TXR_WB_ADDR(_x) (NFP_NET_CFG_TXR_BASE + 0x200 + \
+ ((_x) * 0x8))
+#define NFP_NET_CFG_TXR_SZ(_x) (NFP_NET_CFG_TXR_BASE + 0x400 + (_x))
+#define NFP_NET_CFG_TXR_VEC(_x) (NFP_NET_CFG_TXR_BASE + 0x440 + (_x))
+#define NFP_NET_CFG_TXR_PRIO(_x) (NFP_NET_CFG_TXR_BASE + 0x480 + (_x))
+#define NFP_NET_CFG_TXR_IRQ_MOD(_x) (NFP_NET_CFG_TXR_BASE + 0x500 + \
+ ((_x) * 0x4))
+
+/*
+ * RX ring configuration (0x0800 - 0x0c00)
+ * @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
+ * @NFP_NET_CFG_RXR_ADDR: Per TX ring DMA address (8B entries)
+ * @NFP_NET_CFG_RXR_SZ: Per TX ring ring size (1B entries)
+ * @NFP_NET_CFG_RXR_VEC: Per TX ring MSI-X table entry (1B entries)
+ * @NFP_NET_CFG_RXR_PRIO: Per TX ring priority (1B entries)
+ * @NFP_NET_CFG_RXR_IRQ_MOD: Per TX ring interrupt moderation (4B entries)
+ */
+#define NFP_NET_CFG_RXR_BASE 0x0800
+#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8))
+#define NFP_NET_CFG_RXR_SZ(_x) (NFP_NET_CFG_RXR_BASE + 0x200 + (_x))
+#define NFP_NET_CFG_RXR_VEC(_x) (NFP_NET_CFG_RXR_BASE + 0x240 + (_x))
+#define NFP_NET_CFG_RXR_PRIO(_x) (NFP_NET_CFG_RXR_BASE + 0x280 + (_x))
+#define NFP_NET_CFG_RXR_IRQ_MOD(_x) (NFP_NET_CFG_RXR_BASE + 0x300 + \
+ ((_x) * 0x4))
+
+/*
+ * Interrupt Control/Cause registers (0x0c00 - 0x0d00)
+ * These registers are only used when MSI-X auto-masking is not
+ * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
+ * by MSI-X entry and are 1B in size. If an entry is zero, the
+ * corresponding entry is enabled. If the FW generates an interrupt,
+ * it writes a cause into the corresponding field. This also masks
+ * the MSI-X entry and the host driver must clear the register to
+ * re-enable the interrupt.
+ */
+#define NFP_NET_CFG_ICR_BASE 0x0c00
+#define NFP_NET_CFG_ICR(_x) (NFP_NET_CFG_ICR_BASE + (_x))
+#define NFP_NET_CFG_ICR_UNMASKED 0x0
+#define NFP_NET_CFG_ICR_RXTX 0x1
+#define NFP_NET_CFG_ICR_LSC 0x2
+
+/*
+ * General device stats (0x0d00 - 0x0d90)
+ * all counters are 64bit.
+ */
+#define NFP_NET_CFG_STATS_BASE 0x0d00
+#define NFP_NET_CFG_STATS_RX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x00)
+#define NFP_NET_CFG_STATS_RX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x08)
+#define NFP_NET_CFG_STATS_RX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x10)
+#define NFP_NET_CFG_STATS_RX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x18)
+#define NFP_NET_CFG_STATS_RX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x20)
+#define NFP_NET_CFG_STATS_RX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x28)
+#define NFP_NET_CFG_STATS_RX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x30)
+#define NFP_NET_CFG_STATS_RX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x38)
+#define NFP_NET_CFG_STATS_RX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x40)
+
+#define NFP_NET_CFG_STATS_TX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x48)
+#define NFP_NET_CFG_STATS_TX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x50)
+#define NFP_NET_CFG_STATS_TX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x58)
+#define NFP_NET_CFG_STATS_TX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x60)
+#define NFP_NET_CFG_STATS_TX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x68)
+#define NFP_NET_CFG_STATS_TX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x70)
+#define NFP_NET_CFG_STATS_TX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x78)
+#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80)
+#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88)
+
+/*
+ * Per ring stats (0x1000 - 0x1800)
+ * options, 64bit per entry
+ * @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
+ * @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
+ */
+#define NFP_NET_CFG_TXR_STATS_BASE 0x1000
+#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \
+ ((_x) * 0x10))
+#define NFP_NET_CFG_RXR_STATS_BASE 0x1400
+#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \
+ ((_x) * 0x10))
+
+/* PF multiport offset */
+#define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
+
+#endif /* _NFP_NET_CTRL_H_ */
+/*
+ * Local variables:
+ * c-file-style: "Linux"
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfp_net_logs.h b/src/spdk/dpdk/drivers/net/nfp/nfp_net_logs.h
new file mode 100644
index 00000000..9952881c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfp_net_logs.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014, 2015 Netronome Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NFP_NET_LOGS_H_
+#define _NFP_NET_LOGS_H_
+
+#include <rte_log.h>
+
+extern int nfp_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, nfp_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_NFP_NET_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() rx: " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_NFP_NET_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() tx: " fmt "\n", __func__, ## args)
+#define ASSERT(x) if (!(x)) rte_panic("NFP_NET: x")
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#define ASSERT(x) do { } while (0)
+#endif
+
+extern int nfp_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, nfp_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#endif /* _NFP_NET_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfp_net_pmd.h b/src/spdk/dpdk/drivers/net/nfp/nfp_net_pmd.h
new file mode 100644
index 00000000..c1b044ee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfp_net_pmd.h
@@ -0,0 +1,472 @@
+/*
+ * Copyright (c) 2014-2018 Netronome Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * vim:shiftwidth=8:noexpandtab
+ *
+ * @file dpdk/pmd/nfp_net_pmd.h
+ *
+ * Netronome NFP_NET PDM driver
+ */
+
+#ifndef _NFP_NET_PMD_H_
+#define _NFP_NET_PMD_H_
+
+#define NFP_NET_PMD_VERSION "0.1"
+#define PCI_VENDOR_ID_NETRONOME 0x19ee
+#define PCI_DEVICE_ID_NFP4000_PF_NIC 0x4000
+#define PCI_DEVICE_ID_NFP6000_PF_NIC 0x6000
+#define PCI_DEVICE_ID_NFP6000_VF_NIC 0x6003
+
+/* Forward declaration */
+struct nfp_net_adapter;
+
+/*
+ * The maximum number of descriptors is limited by design as
+ * DPDK uses uint16_t variables for these values
+ */
+#define NFP_NET_MAX_TX_DESC (32 * 1024)
+#define NFP_NET_MIN_TX_DESC 64
+
+#define NFP_NET_MAX_RX_DESC (32 * 1024)
+#define NFP_NET_MIN_RX_DESC 64
+
+/* Bar allocation */
+#define NFP_NET_CRTL_BAR 0
+#define NFP_NET_TX_BAR 2
+#define NFP_NET_RX_BAR 2
+#define NFP_QCP_QUEUE_AREA_SZ 0x80000
+
+/* Macros for accessing the Queue Controller Peripheral 'CSRs' */
+#define NFP_QCP_QUEUE_OFF(_x) ((_x) * 0x800)
+#define NFP_QCP_QUEUE_ADD_RPTR 0x0000
+#define NFP_QCP_QUEUE_ADD_WPTR 0x0004
+#define NFP_QCP_QUEUE_STS_LO 0x0008
+#define NFP_QCP_QUEUE_STS_LO_READPTR_mask (0x3ffff)
+#define NFP_QCP_QUEUE_STS_HI 0x000c
+#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask (0x3ffff)
+
+/* Interrupt definitions */
+#define NFP_NET_IRQ_LSC_IDX 0
+
+/* Default values for RX/TX configuration */
+#define DEFAULT_RX_FREE_THRESH 32
+#define DEFAULT_RX_PTHRESH 8
+#define DEFAULT_RX_HTHRESH 8
+#define DEFAULT_RX_WTHRESH 0
+
+#define DEFAULT_TX_RS_THRESH 32
+#define DEFAULT_TX_FREE_THRESH 32
+#define DEFAULT_TX_PTHRESH 32
+#define DEFAULT_TX_HTHRESH 0
+#define DEFAULT_TX_WTHRESH 0
+#define DEFAULT_TX_RSBIT_THRESH 32
+
+/* Alignment for dma zones */
+#define NFP_MEMZONE_ALIGN 128
+
+/*
+ * This is used by the reconfig protocol. It sets the maximum time waiting in
+ * milliseconds before a reconfig timeout happens.
+ */
+#define NFP_NET_POLL_TIMEOUT 5000
+
+#define NFP_QCP_QUEUE_ADDR_SZ (0x800)
+
+#define NFP_NET_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
+#define NFP_NET_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
+
+/* Version number helper defines */
+#define NFD_CFG_CLASS_VER_msk 0xff
+#define NFD_CFG_CLASS_VER_shf 24
+#define NFD_CFG_CLASS_VER(x) (((x) & 0xff) << 24)
+#define NFD_CFG_CLASS_VER_of(x) (((x) >> 24) & 0xff)
+#define NFD_CFG_CLASS_TYPE_msk 0xff
+#define NFD_CFG_CLASS_TYPE_shf 16
+#define NFD_CFG_CLASS_TYPE(x) (((x) & 0xff) << 16)
+#define NFD_CFG_CLASS_TYPE_of(x) (((x) >> 16) & 0xff)
+#define NFD_CFG_MAJOR_VERSION_msk 0xff
+#define NFD_CFG_MAJOR_VERSION_shf 8
+#define NFD_CFG_MAJOR_VERSION(x) (((x) & 0xff) << 8)
+#define NFD_CFG_MAJOR_VERSION_of(x) (((x) >> 8) & 0xff)
+#define NFD_CFG_MINOR_VERSION_msk 0xff
+#define NFD_CFG_MINOR_VERSION_shf 0
+#define NFD_CFG_MINOR_VERSION(x) (((x) & 0xff) << 0)
+#define NFD_CFG_MINOR_VERSION_of(x) (((x) >> 0) & 0xff)
+
+#include <linux/types.h>
+#include <rte_io.h>
+
+static inline uint8_t nn_readb(volatile const void *addr)
+{
+ return rte_read8(addr);
+}
+
+static inline void nn_writeb(uint8_t val, volatile void *addr)
+{
+ rte_write8(val, addr);
+}
+
+static inline uint32_t nn_readl(volatile const void *addr)
+{
+ return rte_read32(addr);
+}
+
+static inline void nn_writel(uint32_t val, volatile void *addr)
+{
+ rte_write32(val, addr);
+}
+
+static inline void nn_writew(uint16_t val, volatile void *addr)
+{
+ rte_write16(val, addr);
+}
+
+static inline uint64_t nn_readq(volatile void *addr)
+{
+ const volatile uint32_t *p = addr;
+ uint32_t low, high;
+
+ high = nn_readl((volatile const void *)(p + 1));
+ low = nn_readl((volatile const void *)p);
+
+ return low + ((uint64_t)high << 32);
+}
+
+static inline void nn_writeq(uint64_t val, volatile void *addr)
+{
+ nn_writel(val >> 32, (volatile char *)addr + 4);
+ nn_writel(val, addr);
+}
+
+/* TX descriptor format */
+#define PCIE_DESC_TX_EOP (1 << 7)
+#define PCIE_DESC_TX_OFFSET_MASK (0x7f)
+
+/* Flags in the host TX descriptor */
+#define PCIE_DESC_TX_CSUM (1 << 7)
+#define PCIE_DESC_TX_IP4_CSUM (1 << 6)
+#define PCIE_DESC_TX_TCP_CSUM (1 << 5)
+#define PCIE_DESC_TX_UDP_CSUM (1 << 4)
+#define PCIE_DESC_TX_VLAN (1 << 3)
+#define PCIE_DESC_TX_LSO (1 << 2)
+#define PCIE_DESC_TX_ENCAP_NONE (0)
+#define PCIE_DESC_TX_ENCAP_VXLAN (1 << 1)
+#define PCIE_DESC_TX_ENCAP_GRE (1 << 0)
+
+struct nfp_net_tx_desc {
+ union {
+ struct {
+ uint8_t dma_addr_hi; /* High bits of host buf address */
+ __le16 dma_len; /* Length to DMA for this desc */
+ uint8_t offset_eop; /* Offset in buf where pkt starts +
+ * highest bit is eop flag.
+ */
+ __le32 dma_addr_lo; /* Low 32bit of host buf addr */
+
+ __le16 mss; /* MSS to be used for LSO */
+ uint8_t lso_hdrlen; /* LSO, where the data starts */
+ uint8_t flags; /* TX Flags, see @PCIE_DESC_TX_* */
+
+ union {
+ struct {
+ /*
+ * L3 and L4 header offsets required
+ * for TSOv2
+ */
+ uint8_t l3_offset;
+ uint8_t l4_offset;
+ };
+ __le16 vlan; /* VLAN tag to add if indicated */
+ };
+ __le16 data_len; /* Length of frame + meta data */
+ } __attribute__((__packed__));
+ __le32 vals[4];
+ };
+};
+
+struct nfp_net_txq {
+ struct nfp_net_hw *hw; /* Backpointer to nfp_net structure */
+
+ /*
+ * Queue information: @qidx is the queue index from Linux's
+ * perspective. @tx_qcidx is the index of the Queue
+ * Controller Peripheral queue relative to the TX queue BAR.
+ * @cnt is the size of the queue in number of
+ * descriptors. @qcp_q is a pointer to the base of the queue
+ * structure on the NFP
+ */
+ uint8_t *qcp_q;
+
+ /*
+ * Read and Write pointers. @wr_p and @rd_p are host side pointer,
+ * they are free running and have little relation to the QCP pointers *
+ * @qcp_rd_p is a local copy queue controller peripheral read pointer
+ */
+
+ uint32_t wr_p;
+ uint32_t rd_p;
+
+ uint32_t tx_count;
+
+ uint32_t tx_free_thresh;
+
+ /*
+ * For each descriptor keep a reference to the mbuff and
+ * DMA address used until completion is signalled.
+ */
+ struct {
+ struct rte_mbuf *mbuf;
+ } *txbufs;
+
+ /*
+ * Information about the host side queue location. @txds is
+ * the virtual address for the queue, @dma is the DMA address
+ * of the queue and @size is the size in bytes for the queue
+ * (needed for free)
+ */
+ struct nfp_net_tx_desc *txds;
+
+ /*
+ * At this point 48 bytes have been used for all the fields in the
+ * TX critical path. We have room for 8 bytes and still all placed
+ * in a cache line. We are not using the threshold values below but
+ * if we need to, we can add the most used in the remaining bytes.
+ */
+ uint32_t tx_rs_thresh; /* not used by now. Future? */
+ uint32_t tx_pthresh; /* not used by now. Future? */
+ uint32_t tx_hthresh; /* not used by now. Future? */
+ uint32_t tx_wthresh; /* not used by now. Future? */
+ uint16_t port_id;
+ int qidx;
+ int tx_qcidx;
+ __le64 dma;
+} __attribute__ ((__aligned__(64)));
+
+/* RX and freelist descriptor format */
+#define PCIE_DESC_RX_DD (1 << 7)
+#define PCIE_DESC_RX_META_LEN_MASK (0x7f)
+
+/* Flags in the RX descriptor */
+#define PCIE_DESC_RX_RSS (1 << 15)
+#define PCIE_DESC_RX_I_IP4_CSUM (1 << 14)
+#define PCIE_DESC_RX_I_IP4_CSUM_OK (1 << 13)
+#define PCIE_DESC_RX_I_TCP_CSUM (1 << 12)
+#define PCIE_DESC_RX_I_TCP_CSUM_OK (1 << 11)
+#define PCIE_DESC_RX_I_UDP_CSUM (1 << 10)
+#define PCIE_DESC_RX_I_UDP_CSUM_OK (1 << 9)
+#define PCIE_DESC_RX_SPARE (1 << 8)
+#define PCIE_DESC_RX_EOP (1 << 7)
+#define PCIE_DESC_RX_IP4_CSUM (1 << 6)
+#define PCIE_DESC_RX_IP4_CSUM_OK (1 << 5)
+#define PCIE_DESC_RX_TCP_CSUM (1 << 4)
+#define PCIE_DESC_RX_TCP_CSUM_OK (1 << 3)
+#define PCIE_DESC_RX_UDP_CSUM (1 << 2)
+#define PCIE_DESC_RX_UDP_CSUM_OK (1 << 1)
+#define PCIE_DESC_RX_VLAN (1 << 0)
+
+struct nfp_net_rx_desc {
+ union {
+ /* Freelist descriptor */
+ struct {
+ uint8_t dma_addr_hi;
+ __le16 spare;
+ uint8_t dd;
+
+ __le32 dma_addr_lo;
+ } __attribute__((__packed__)) fld;
+
+ /* RX descriptor */
+ struct {
+ __le16 data_len;
+ uint8_t reserved;
+ uint8_t meta_len_dd;
+
+ __le16 flags;
+ __le16 vlan;
+ } __attribute__((__packed__)) rxd;
+
+ __le32 vals[2];
+ };
+};
+
+struct nfp_net_rx_buff {
+ struct rte_mbuf *mbuf;
+};
+
+struct nfp_net_rxq {
+ struct nfp_net_hw *hw; /* Backpointer to nfp_net structure */
+
+ /*
+ * @qcp_fl and @qcp_rx are pointers to the base addresses of the
+ * freelist and RX queue controller peripheral queue structures on the
+ * NFP
+ */
+ uint8_t *qcp_fl;
+ uint8_t *qcp_rx;
+
+ /*
+ * Read and Write pointers. @wr_p and @rd_p are host side
+ * pointer, they are free running and have little relation to
+ * the QCP pointers. @wr_p is where the driver adds new
+ * freelist descriptors and @rd_p is where the driver start
+ * reading descriptors for newly arrive packets from.
+ */
+ uint32_t rd_p;
+
+ /*
+ * For each buffer placed on the freelist, record the
+ * associated SKB
+ */
+ struct nfp_net_rx_buff *rxbufs;
+
+ /*
+ * Information about the host side queue location. @rxds is
+ * the virtual address for the queue
+ */
+ struct nfp_net_rx_desc *rxds;
+
+ /*
+ * The mempool is created by the user specifying a mbuf size.
+ * We save here the reference of the mempool needed in the RX
+ * path and the mbuf size for checking received packets can be
+ * safely copied to the mbuf using the NFP_NET_RX_OFFSET
+ */
+ struct rte_mempool *mem_pool;
+ uint16_t mbuf_size;
+
+ /*
+ * Next two fields are used for giving more free descriptors
+ * to the NFP
+ */
+ uint16_t rx_free_thresh;
+ uint16_t nb_rx_hold;
+
+ /* the size of the queue in number of descriptors */
+ uint16_t rx_count;
+
+ /*
+ * Fields above this point fit in a single cache line and are all used
+ * in the RX critical path. Fields below this point are just used
+ * during queue configuration or not used at all (yet)
+ */
+
+ /* referencing dev->data->port_id */
+ uint16_t port_id;
+
+ uint8_t crc_len; /* Not used by now */
+ uint8_t drop_en; /* Not used by now */
+
+ /* DMA address of the queue */
+ __le64 dma;
+
+ /*
+ * Queue information: @qidx is the queue index from Linux's
+ * perspective. @fl_qcidx is the index of the Queue
+ * Controller peripheral queue relative to the RX queue BAR
+ * used for the freelist and @rx_qcidx is the Queue Controller
+ * Peripheral index for the RX queue.
+ */
+ int qidx;
+ int fl_qcidx;
+ int rx_qcidx;
+} __attribute__ ((__aligned__(64)));
+
+struct nfp_net_hw {
+ /* Info from the firmware */
+ uint32_t ver;
+ uint32_t cap;
+ uint32_t max_mtu;
+ uint32_t mtu;
+ uint32_t rx_offset;
+
+ /* Current values for control */
+ uint32_t ctrl;
+
+ uint8_t *ctrl_bar;
+ uint8_t *tx_bar;
+ uint8_t *rx_bar;
+
+ int stride_rx;
+ int stride_tx;
+
+ uint8_t *qcp_cfg;
+ rte_spinlock_t reconfig_lock;
+
+ uint32_t max_tx_queues;
+ uint32_t max_rx_queues;
+ uint16_t flbufsz;
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t subsystem_device_id;
+ uint16_t subsystem_vendor_id;
+#if defined(DSTQ_SELECTION)
+#if DSTQ_SELECTION
+ uint16_t device_function;
+#endif
+#endif
+
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+
+ /* Records starting point for counters */
+ struct rte_eth_stats eth_stats_base;
+
+ struct nfp_cpp *cpp;
+ struct nfp_cpp_area *ctrl_area;
+ struct nfp_cpp_area *hwqueues_area;
+ struct nfp_cpp_area *msix_area;
+
+ uint8_t *hw_queues;
+ uint8_t is_pf;
+ uint8_t pf_port_idx;
+ uint8_t pf_multiport_enabled;
+ uint8_t total_ports;
+
+ union eth_table_entry *eth_table;
+
+ struct nfp_hwinfo *hwinfo;
+ struct nfp_rtsym_table *sym_tbl;
+};
+
+struct nfp_net_adapter {
+ struct nfp_net_hw hw;
+};
+
+#define NFP_NET_DEV_PRIVATE_TO_HW(adapter)\
+ (&((struct nfp_net_adapter *)adapter)->hw)
+
+#endif /* _NFP_NET_PMD_H_ */
+/*
+ * Local variables:
+ * c-file-style: "Linux"
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h
new file mode 100644
index 00000000..6e380cca
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h
@@ -0,0 +1,722 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_CPPAT_H__
+#define __NFP_CPPAT_H__
+
+#include "nfp_platform.h"
+#include "nfp_resid.h"
+
+/* This file contains helpers for creating CPP commands
+ *
+ * All magic NFP-6xxx IMB 'mode' numbers here are from:
+ * Databook (1 August 2013)
+ * - System Overview and Connectivity
+ * -- Internal Connectivity
+ * --- Distributed Switch Fabric - Command Push/Pull (DSF-CPP) Bus
+ * ---- CPP addressing
+ * ----- Table 3.6. CPP Address Translation Mode Commands
+ */
+
+#define _NIC_NFP6000_MU_LOCALITY_DIRECT 2
+
+static inline int
+_nfp6000_decode_basic(uint64_t addr, int *dest_island, int cpp_tgt, int mode,
+ int addr40, int isld1, int isld0);
+
+static uint64_t
+_nic_mask64(int msb, int lsb, int at0)
+{
+ uint64_t v;
+ int w = msb - lsb + 1;
+
+ if (w == 64)
+ return ~(uint64_t)0;
+
+ if ((lsb + w) > 64)
+ return 0;
+
+ v = (UINT64_C(1) << w) - 1;
+
+ if (at0)
+ return v;
+
+ return v << lsb;
+}
+
+/* For VQDR, we may not modify the Channel bits, which might overlap
+ * with the Index bit. When it does, we need to ensure that isld0 == isld1.
+ */
+static inline int
+_nfp6000_encode_basic(uint64_t *addr, int dest_island, int cpp_tgt, int mode,
+ int addr40, int isld1, int isld0)
+{
+ uint64_t _u64;
+ int iid_lsb, idx_lsb;
+ int i, v = 0;
+ int isld[2];
+
+ isld[0] = isld0;
+ isld[1] = isld1;
+
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_MU:
+ /* This function doesn't handle MU */
+ return NFP_ERRNO(EINVAL);
+ case NFP6000_CPPTGT_CTXPB:
+ /* This function doesn't handle CTXPB */
+ return NFP_ERRNO(EINVAL);
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case 0:
+ if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) {
+ /*
+ * In this specific mode we'd rather not modify the
+ * address but we can verify if the existing contents
+ * will point to a valid island.
+ */
+ i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode,
+ addr40, isld1,
+ isld0);
+ if (i != 0)
+ /* Full Island ID and channel bits overlap */
+ return i;
+
+ /*
+ * If dest_island is invalid, the current address won't
+ * go where expected.
+ */
+ if (dest_island != -1 && dest_island != v)
+ return NFP_ERRNO(EINVAL);
+
+ /* If dest_island was -1, we don't care */
+ return 0;
+ }
+
+ iid_lsb = (addr40) ? 34 : 26;
+
+ /* <39:34> or <31:26> */
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ case 1:
+ if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) {
+ i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode,
+ addr40, isld1, isld0);
+ if (i != 0)
+ /* Full Island ID and channel bits overlap */
+ return i;
+
+ /*
+ * If dest_island is invalid, the current address won't
+ * go where expected.
+ */
+ if (dest_island != -1 && dest_island != v)
+ return NFP_ERRNO(EINVAL);
+
+ /* If dest_island was -1, we don't care */
+ return 0;
+ }
+
+ idx_lsb = (addr40) ? 39 : 31;
+ if (dest_island == isld0) {
+ /* Only need to clear the Index bit */
+ *addr &= ~_nic_mask64(idx_lsb, idx_lsb, 0);
+ return 0;
+ }
+
+ if (dest_island == isld1) {
+ /* Only need to set the Index bit */
+ *addr |= (UINT64_C(1) << idx_lsb);
+ return 0;
+ }
+
+ return NFP_ERRNO(ENODEV);
+ case 2:
+ if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) {
+ /* iid<0> = addr<30> = channel<0> */
+ /* channel<1> = addr<31> = Index */
+
+ /*
+ * Special case where we allow channel bits to be set
+ * before hand and with them select an island.
+ * So we need to confirm that it's at least plausible.
+ */
+ i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode,
+ addr40, isld1, isld0);
+ if (i != 0)
+ /* Full Island ID and channel bits overlap */
+ return i;
+
+ /*
+ * If dest_island is invalid, the current address won't
+ * go where expected.
+ */
+ if (dest_island != -1 && dest_island != v)
+ return NFP_ERRNO(EINVAL);
+
+ /* If dest_island was -1, we don't care */
+ return 0;
+ }
+
+ /*
+ * Make sure we compare against isldN values by clearing the
+ * LSB. This is what the silicon does.
+ **/
+ isld[0] &= ~1;
+ isld[1] &= ~1;
+
+ idx_lsb = (addr40) ? 39 : 31;
+ iid_lsb = idx_lsb - 1;
+
+ /*
+ * Try each option, take first one that fits. Not sure if we
+ * would want to do some smarter searching and prefer 0 or non-0
+ * island IDs.
+ */
+
+ for (i = 0; i < 2; i++) {
+ for (v = 0; v < 2; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+ *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0);
+ *addr |= (((uint64_t)i) << idx_lsb);
+ *addr |= (((uint64_t)v) << iid_lsb);
+ return 0;
+ }
+ }
+
+ return NFP_ERRNO(ENODEV);
+ case 3:
+ if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) {
+ /*
+ * iid<0> = addr<29> = data
+ * iid<1> = addr<30> = channel<0>
+ * channel<1> = addr<31> = Index
+ */
+ i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode,
+ addr40, isld1, isld0);
+ if (i != 0)
+ /* Full Island ID and channel bits overlap */
+ return i;
+
+ if (dest_island != -1 && dest_island != v)
+ return NFP_ERRNO(EINVAL);
+
+ /* If dest_island was -1, we don't care */
+ return 0;
+ }
+
+ isld[0] &= ~3;
+ isld[1] &= ~3;
+
+ idx_lsb = (addr40) ? 39 : 31;
+ iid_lsb = idx_lsb - 2;
+
+ for (i = 0; i < 2; i++) {
+ for (v = 0; v < 4; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+ *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0);
+ *addr |= (((uint64_t)i) << idx_lsb);
+ *addr |= (((uint64_t)v) << iid_lsb);
+ return 0;
+ }
+ }
+ return NFP_ERRNO(ENODEV);
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_decode_basic(uint64_t addr, int *dest_island, int cpp_tgt, int mode,
+ int addr40, int isld1, int isld0)
+{
+ int iid_lsb, idx_lsb;
+
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_MU:
+ /* This function doesn't handle MU */
+ return NFP_ERRNO(EINVAL);
+ case NFP6000_CPPTGT_CTXPB:
+ /* This function doesn't handle CTXPB */
+ return NFP_ERRNO(EINVAL);
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case 0:
+ /*
+ * For VQDR, in this mode for 32-bit addressing it would be
+ * islands 0, 16, 32 and 48 depending on channel and upper
+ * address bits. Since those are not all valid islands, most
+ * decode cases would result in bad island IDs, but we do them
+ * anyway since this is decoding an address that is already
+ * assumed to be used as-is to get to sram.
+ */
+ iid_lsb = (addr40) ? 34 : 26;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ case 1:
+ /*
+ * For VQDR 32-bit, this would decode as:
+ * Channel 0: island#0
+ * Channel 1: island#0
+ * Channel 2: island#1
+ * Channel 3: island#1
+ *
+ * That would be valid as long as both islands have VQDR.
+ * Let's allow this.
+ */
+
+ idx_lsb = (addr40) ? 39 : 31;
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1;
+ else
+ *dest_island = isld0;
+
+ return 0;
+ case 2:
+ /*
+ * For VQDR 32-bit:
+ * Channel 0: (island#0 | 0)
+ * Channel 1: (island#0 | 1)
+ * Channel 2: (island#1 | 0)
+ * Channel 3: (island#1 | 1)
+ *
+ * Make sure we compare against isldN values by clearing the
+ * LSB. This is what the silicon does.
+ */
+ isld0 &= ~1;
+ isld1 &= ~1;
+
+ idx_lsb = (addr40) ? 39 : 31;
+ iid_lsb = idx_lsb - 1;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 1);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 1);
+
+ return 0;
+ case 3:
+ /*
+ * In this mode the data address starts to affect the island ID
+ * so rather not allow it. In some really specific case one
+ * could use this to send the upper half of the VQDR channel to
+ * another MU, but this is getting very specific. However, as
+ * above for mode 0, this is the decoder and the caller should
+ * validate the resulting IID. This blindly does what the
+ * silicon would do.
+ */
+
+ isld0 &= ~3;
+ isld1 &= ~3;
+
+ idx_lsb = (addr40) ? 39 : 31;
+ iid_lsb = idx_lsb - 2;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 3);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 3);
+
+ return 0;
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_cppat_mu_locality_lsb(int mode, int addr40)
+{
+ switch (mode) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ return (addr40) ? 38 : 30;
+ default:
+ break;
+ }
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_encode_mu(uint64_t *addr, int dest_island, int mode, int addr40,
+ int isld1, int isld0)
+{
+ uint64_t _u64;
+ int iid_lsb, idx_lsb, locality_lsb;
+ int i, v;
+ int isld[2];
+ int da;
+
+ isld[0] = isld0;
+ isld[1] = isld1;
+ locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40);
+
+ if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT)
+ da = 1;
+ else
+ da = 0;
+
+ switch (mode) {
+ case 0:
+ iid_lsb = (addr40) ? 32 : 24;
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ case 1:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ }
+
+ idx_lsb = (addr40) ? 37 : 29;
+ if (dest_island == isld0) {
+ *addr &= ~_nic_mask64(idx_lsb, idx_lsb, 0);
+ return 0;
+ }
+
+ if (dest_island == isld1) {
+ *addr |= (UINT64_C(1) << idx_lsb);
+ return 0;
+ }
+
+ return NFP_ERRNO(ENODEV);
+ case 2:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ }
+
+ /*
+ * Make sure we compare against isldN values by clearing the
+ * LSB. This is what the silicon does.
+ */
+ isld[0] &= ~1;
+ isld[1] &= ~1;
+
+ idx_lsb = (addr40) ? 37 : 29;
+ iid_lsb = idx_lsb - 1;
+
+ /*
+ * Try each option, take first one that fits. Not sure if we
+ * would want to do some smarter searching and prefer 0 or
+ * non-0 island IDs.
+ */
+
+ for (i = 0; i < 2; i++) {
+ for (v = 0; v < 2; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+ *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0);
+ *addr |= (((uint64_t)i) << idx_lsb);
+ *addr |= (((uint64_t)v) << iid_lsb);
+ return 0;
+ }
+ }
+ return NFP_ERRNO(ENODEV);
+ case 3:
+ /*
+ * Only the EMU will use 40 bit addressing. Silently set the
+ * direct locality bit for everyone else. The SDK toolchain
+ * uses dest_island <= 0 to test for atypical address encodings
+ * to support access to local-island CTM with a 32-but address
+ * (high-locality is effectively ignored and just used for
+ * routing to island #0).
+ */
+ if (dest_island > 0 &&
+ (dest_island < 24 || dest_island > 26)) {
+ *addr |= ((uint64_t)_NIC_NFP6000_MU_LOCALITY_DIRECT)
+ << locality_lsb;
+ da = 1;
+ }
+
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ }
+
+ isld[0] &= ~3;
+ isld[1] &= ~3;
+
+ idx_lsb = (addr40) ? 37 : 29;
+ iid_lsb = idx_lsb - 2;
+
+ for (i = 0; i < 2; i++) {
+ for (v = 0; v < 4; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+ *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0);
+ *addr |= (((uint64_t)i) << idx_lsb);
+ *addr |= (((uint64_t)v) << iid_lsb);
+ return 0;
+ }
+ }
+
+ return NFP_ERRNO(ENODEV);
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_decode_mu(uint64_t addr, int *dest_island, int mode, int addr40,
+ int isld1, int isld0)
+{
+ int iid_lsb, idx_lsb, locality_lsb;
+ int da;
+
+ locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40);
+
+ if (((addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT)
+ da = 1;
+ else
+ da = 0;
+
+ switch (mode) {
+ case 0:
+ iid_lsb = (addr40) ? 32 : 24;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ case 1:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ }
+
+ idx_lsb = (addr40) ? 37 : 29;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1;
+ else
+ *dest_island = isld0;
+
+ return 0;
+ case 2:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ }
+ /*
+ * Make sure we compare against isldN values by clearing the
+ * LSB. This is what the silicon does.
+ */
+ isld0 &= ~1;
+ isld1 &= ~1;
+
+ idx_lsb = (addr40) ? 37 : 29;
+ iid_lsb = idx_lsb - 1;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 1);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 1);
+
+ return 0;
+ case 3:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ }
+
+ isld0 &= ~3;
+ isld1 &= ~3;
+
+ idx_lsb = (addr40) ? 37 : 29;
+ iid_lsb = idx_lsb - 2;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 3);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 3);
+
+ return 0;
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_cppat_addr_encode(uint64_t *addr, int dest_island, int cpp_tgt,
+ int mode, int addr40, int isld1, int isld0)
+{
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_NBI:
+ case NFP6000_CPPTGT_VQDR:
+ case NFP6000_CPPTGT_ILA:
+ case NFP6000_CPPTGT_PCIE:
+ case NFP6000_CPPTGT_ARM:
+ case NFP6000_CPPTGT_CRYPTO:
+ case NFP6000_CPPTGT_CLS:
+ return _nfp6000_encode_basic(addr, dest_island, cpp_tgt, mode,
+ addr40, isld1, isld0);
+
+ case NFP6000_CPPTGT_MU:
+ return _nfp6000_encode_mu(addr, dest_island, mode, addr40,
+ isld1, isld0);
+
+ case NFP6000_CPPTGT_CTXPB:
+ if (mode != 1 || addr40 != 0)
+ return NFP_ERRNO(EINVAL);
+
+ *addr &= ~_nic_mask64(29, 24, 0);
+ *addr |= (((uint64_t)dest_island) << 24) &
+ _nic_mask64(29, 24, 0);
+ return 0;
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_cppat_addr_decode(uint64_t addr, int *dest_island, int cpp_tgt,
+ int mode, int addr40, int isld1, int isld0)
+{
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_NBI:
+ case NFP6000_CPPTGT_VQDR:
+ case NFP6000_CPPTGT_ILA:
+ case NFP6000_CPPTGT_PCIE:
+ case NFP6000_CPPTGT_ARM:
+ case NFP6000_CPPTGT_CRYPTO:
+ case NFP6000_CPPTGT_CLS:
+ return _nfp6000_decode_basic(addr, dest_island, cpp_tgt, mode,
+ addr40, isld1, isld0);
+
+ case NFP6000_CPPTGT_MU:
+ return _nfp6000_decode_mu(addr, dest_island, mode, addr40,
+ isld1, isld0);
+
+ case NFP6000_CPPTGT_CTXPB:
+ if (mode != 1 || addr40 != 0)
+ return -EINVAL;
+ *dest_island = (int)(addr >> 24) & 0x3F;
+ return 0;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+_nfp6000_cppat_addr_iid_clear(uint64_t *addr, int cpp_tgt, int mode, int addr40)
+{
+ int iid_lsb, locality_lsb, da;
+
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_NBI:
+ case NFP6000_CPPTGT_VQDR:
+ case NFP6000_CPPTGT_ILA:
+ case NFP6000_CPPTGT_PCIE:
+ case NFP6000_CPPTGT_ARM:
+ case NFP6000_CPPTGT_CRYPTO:
+ case NFP6000_CPPTGT_CLS:
+ switch (mode) {
+ case 0:
+ iid_lsb = (addr40) ? 34 : 26;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ case 1:
+ iid_lsb = (addr40) ? 39 : 31;
+ *addr &= ~_nic_mask64(iid_lsb, iid_lsb, 0);
+ return 0;
+ case 2:
+ iid_lsb = (addr40) ? 38 : 30;
+ *addr &= ~_nic_mask64(iid_lsb + 1, iid_lsb, 0);
+ return 0;
+ case 3:
+ iid_lsb = (addr40) ? 37 : 29;
+ *addr &= ~_nic_mask64(iid_lsb + 2, iid_lsb, 0);
+ return 0;
+ default:
+ break;
+ }
+ case NFP6000_CPPTGT_MU:
+ locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40);
+ da = (((*addr >> locality_lsb) & 3) ==
+ _NIC_NFP6000_MU_LOCALITY_DIRECT);
+ switch (mode) {
+ case 0:
+ iid_lsb = (addr40) ? 32 : 24;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ case 1:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ }
+ iid_lsb = (addr40) ? 37 : 29;
+ *addr &= ~_nic_mask64(iid_lsb, iid_lsb, 0);
+ return 0;
+ case 2:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ }
+
+ iid_lsb = (addr40) ? 36 : 28;
+ *addr &= ~_nic_mask64(iid_lsb + 1, iid_lsb, 0);
+ return 0;
+ case 3:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ }
+
+ iid_lsb = (addr40) ? 35 : 27;
+ *addr &= ~_nic_mask64(iid_lsb + 2, iid_lsb, 0);
+ return 0;
+ default:
+ break;
+ }
+ case NFP6000_CPPTGT_CTXPB:
+ if (mode != 1 || addr40 != 0)
+ return 0;
+ *addr &= ~(UINT64_C(0x3F) << 24);
+ return 0;
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+#endif /* __NFP_CPPAT_H__ */
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h
new file mode 100644
index 00000000..d46574b1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_PLATFORM_H__
+#define __NFP_PLATFORM_H__
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <limits.h>
+#include <errno.h>
+
+#ifndef BIT_ULL
+#define BIT(x) (1 << (x))
+#define BIT_ULL(x) (1ULL << (x))
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#define NFP_ERRNO(err) (errno = (err), -1)
+#define NFP_ERRNO_RET(err, ret) (errno = (err), (ret))
+#define NFP_NOERR(errv) (errno)
+#define NFP_ERRPTR(err) (errno = (err), NULL)
+#define NFP_PTRERR(errv) (errno)
+
+#endif /* __NFP_PLATFORM_H__ */
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h
new file mode 100644
index 00000000..0e03948e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h
@@ -0,0 +1,592 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_RESID_H__
+#define __NFP_RESID_H__
+
+#if (!defined(_NFP_RESID_NO_C_FUNC) && \
+ (defined(__NFP_TOOL_NFCC) || defined(__NFP_TOOL_NFAS)))
+#define _NFP_RESID_NO_C_FUNC
+#endif
+
+#ifndef _NFP_RESID_NO_C_FUNC
+#include "nfp_platform.h"
+#endif
+
+/*
+ * NFP Chip Architectures
+ *
+ * These are semi-arbitrary values to indicate an NFP architecture.
+ * They serve as a software view of a group of chip families, not necessarily a
+ * direct mapping to actual hardware design.
+ */
+#define NFP_CHIP_ARCH_YD 1
+#define NFP_CHIP_ARCH_TH 2
+
+/*
+ * NFP Chip Families.
+ *
+ * These are not enums, because they need to be microcode compatible.
+ * They are also not maskable.
+ *
+ * Note: The NFP-4xxx family is handled as NFP-6xxx in most software
+ * components.
+ *
+ */
+#define NFP_CHIP_FAMILY_NFP6000 0x6000 /* ARCH_TH */
+
+/* NFP Microengine/Flow Processing Core Versions */
+#define NFP_CHIP_ME_VERSION_2_7 0x0207
+#define NFP_CHIP_ME_VERSION_2_8 0x0208
+#define NFP_CHIP_ME_VERSION_2_9 0x0209
+
+/* NFP Chip Base Revisions. Minor stepping can just be added to these */
+#define NFP_CHIP_REVISION_A0 0x00
+#define NFP_CHIP_REVISION_B0 0x10
+#define NFP_CHIP_REVISION_C0 0x20
+#define NFP_CHIP_REVISION_PF 0xff /* Maximum possible revision */
+
+/* CPP Targets for each chip architecture */
+#define NFP6000_CPPTGT_NBI 1
+#define NFP6000_CPPTGT_VQDR 2
+#define NFP6000_CPPTGT_ILA 6
+#define NFP6000_CPPTGT_MU 7
+#define NFP6000_CPPTGT_PCIE 9
+#define NFP6000_CPPTGT_ARM 10
+#define NFP6000_CPPTGT_CRYPTO 12
+#define NFP6000_CPPTGT_CTXPB 14
+#define NFP6000_CPPTGT_CLS 15
+
+/*
+ * Wildcard indicating a CPP read or write action
+ *
+ * The action used will be either read or write depending on whether a read or
+ * write instruction/call is performed on the NFP_CPP_ID. It is recomended that
+ * the RW action is used even if all actions to be performed on a NFP_CPP_ID are
+ * known to be only reads or writes. Doing so will in many cases save NFP CPP
+ * internal software resources.
+ */
+#define NFP_CPP_ACTION_RW 32
+
+#define NFP_CPP_TARGET_ID_MASK 0x1f
+
+/*
+ * NFP_CPP_ID - pack target, token, and action into a CPP ID.
+ *
+ * Create a 32-bit CPP identifier representing the access to be made.
+ * These identifiers are used as parameters to other NFP CPP functions. Some
+ * CPP devices may allow wildcard identifiers to be specified.
+ *
+ * @param[in] target NFP CPP target id
+ * @param[in] action NFP CPP action id
+ * @param[in] token NFP CPP token id
+ * @return NFP CPP ID
+ */
+#define NFP_CPP_ID(target, action, token) \
+ ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \
+ (((action) & 0xff) << 8))
+
+#define NFP_CPP_ISLAND_ID(target, action, token, island) \
+ ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \
+ (((action) & 0xff) << 8) | (((island) & 0xff) << 0))
+
+#ifndef _NFP_RESID_NO_C_FUNC
+
+/**
+ * Return the NFP CPP target of a NFP CPP ID
+ * @param[in] id NFP CPP ID
+ * @return NFP CPP target
+ */
+static inline uint8_t
+NFP_CPP_ID_TARGET_of(uint32_t id)
+{
+ return (id >> 24) & NFP_CPP_TARGET_ID_MASK;
+}
+
+/*
+ * Return the NFP CPP token of a NFP CPP ID
+ * @param[in] id NFP CPP ID
+ * @return NFP CPP token
+ */
+static inline uint8_t
+NFP_CPP_ID_TOKEN_of(uint32_t id)
+{
+ return (id >> 16) & 0xff;
+}
+
+/*
+ * Return the NFP CPP action of a NFP CPP ID
+ * @param[in] id NFP CPP ID
+ * @return NFP CPP action
+ */
+static inline uint8_t
+NFP_CPP_ID_ACTION_of(uint32_t id)
+{
+ return (id >> 8) & 0xff;
+}
+
+/*
+ * Return the NFP CPP action of a NFP CPP ID
+ * @param[in] id NFP CPP ID
+ * @return NFP CPP action
+ */
+static inline uint8_t
+NFP_CPP_ID_ISLAND_of(uint32_t id)
+{
+ return (id) & 0xff;
+}
+
+#endif /* _NFP_RESID_NO_C_FUNC */
+
+/*
+ * Check if @p chip_family is an ARCH_TH chip.
+ * @param chip_family One of NFP_CHIP_FAMILY_*
+ */
+#define NFP_FAMILY_IS_ARCH_TH(chip_family) \
+ ((int)(chip_family) == (int)NFP_CHIP_FAMILY_NFP6000)
+
+/*
+ * Get the NFP_CHIP_ARCH_* of @p chip_family.
+ * @param chip_family One of NFP_CHIP_FAMILY_*
+ */
+#define NFP_FAMILY_ARCH(x) \
+ (__extension__ ({ \
+ typeof(x) _x = (x); \
+ (NFP_FAMILY_IS_ARCH_TH(_x) ? NFP_CHIP_ARCH_TH : \
+ NFP_FAMILY_IS_ARCH_YD(_x) ? NFP_CHIP_ARCH_YD : -1) \
+ }))
+
+/*
+ * Check if @p chip_family is an NFP-6xxx chip.
+ * @param chip_family One of NFP_CHIP_FAMILY_*
+ */
+#define NFP_FAMILY_IS_NFP6000(chip_family) \
+ ((int)(chip_family) == (int)NFP_CHIP_FAMILY_NFP6000)
+
+/*
+ * Make microengine ID for NFP-6xxx.
+ * @param island_id Island ID.
+ * @param menum ME number, 0 based, within island.
+ *
+ * NOTE: menum should really be unsigned - MSC compiler throws error (not
+ * warning) if a clause is always true i.e. menum >= 0 if cluster_num is type
+ * unsigned int hence the cast of the menum to an int in that particular clause
+ */
+#define NFP6000_MEID(a, b) \
+ (__extension__ ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ (((((int)(_a) & 0x3F) == (int)(_a)) && \
+ (((int)(_b) >= 0) && ((int)(_b) < 12))) ? \
+ (int)(((_a) << 4) | ((_b) + 4)) : -1) \
+ }))
+
+/*
+ * Do a general sanity check on the ME ID.
+ * The check is on the highest possible island ID for the chip family and the
+ * microengine number must be a master ID.
+ * @param meid ME ID as created by NFP6000_MEID
+ */
+#define NFP6000_MEID_IS_VALID(meid) \
+ (__extension__ ({ \
+ typeof(meid) _a = (meid); \
+ ((((_a) >> 4) < 64) && (((_a) >> 4) >= 0) && \
+ (((_a) & 0xF) >= 4)) \
+ }))
+
+/*
+ * Extract island ID from ME ID.
+ * @param meid ME ID as created by NFP6000_MEID
+ */
+#define NFP6000_MEID_ISLAND_of(meid) (((meid) >> 4) & 0x3F)
+
+/*
+ * Extract microengine number (0 based) from ME ID.
+ * @param meid ME ID as created by NFP6000_MEID
+ */
+#define NFP6000_MEID_MENUM_of(meid) (((meid) & 0xF) - 4)
+
+/*
+ * Extract microengine group number (0 based) from ME ID.
+ * The group is two code-sharing microengines, so group 0 refers to MEs 0,1,
+ * group 1 refers to MEs 2,3 etc.
+ * @param meid ME ID as created by NFP6000_MEID
+ */
+#define NFP6000_MEID_MEGRP_of(meid) (NFP6000_MEID_MENUM_of(meid) >> 1)
+
+#ifndef _NFP_RESID_NO_C_FUNC
+
+/*
+ * Convert a string to an ME ID.
+ *
+ * @param s A string of format iX.meY
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the ME ID part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return ME ID on success, -1 on error.
+ */
+int nfp6000_idstr2meid(const char *s, const char **endptr);
+
+/*
+ * Extract island ID from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp6000_idstr2island("i32.me5", &c);
+ * // val == 32, c == "me5"
+ * val = nfp6000_idstr2island("i32", &c);
+ * // val == 32, c == ""
+ *
+ * @param s A string of format "iX.anything" or "iX"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the island part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the island ID, -1 on error.
+ */
+int nfp6000_idstr2island(const char *s, const char **endptr);
+
+/*
+ * Extract microengine number from string.
+ *
+ * Example:
+ * char *c;
+ * int menum = nfp6000_idstr2menum("me5.anything", &c);
+ * // menum == 5, c == "anything"
+ * menum = nfp6000_idstr2menum("me5", &c);
+ * // menum == 5, c == ""
+ *
+ * @param s A string of format "meX.anything" or "meX"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the ME number part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the ME number, -1 on error.
+ */
+int nfp6000_idstr2menum(const char *s, const char **endptr);
+
+/*
+ * Extract context number from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp6000_idstr2ctxnum("ctx5.anything", &c);
+ * // val == 5, c == "anything"
+ * val = nfp6000_idstr2ctxnum("ctx5", &c);
+ * // val == 5, c == ""
+ *
+ * @param s A string of format "ctxN.anything" or "ctxN"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the context number part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the context number, -1 on error.
+ */
+int nfp6000_idstr2ctxnum(const char *s, const char **endptr);
+
+/*
+ * Extract microengine group number from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp6000_idstr2megrp("tg2.anything", &c);
+ * // val == 2, c == "anything"
+ * val = nfp6000_idstr2megrp("tg5", &c);
+ * // val == 2, c == ""
+ *
+ * @param s A string of format "tgX.anything" or "tgX"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the ME group part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the ME group number, -1 on error.
+ */
+int nfp6000_idstr2megrp(const char *s, const char **endptr);
+
+/*
+ * Create ME ID string of format "iX[.meY]".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param meid Microengine ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_meid2str(char *s, int meid);
+
+/*
+ * Create ME ID string of format "name[.meY]" or "iX[.meY]".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param meid Microengine ID.
+ * @return Pointer to "s" on success, NULL on error.
+ *
+ * Similar to nfp6000_meid2str() except use an alias instead of "iX"
+ * if one exists for the island.
+ */
+const char *nfp6000_meid2altstr(char *s, int meid);
+
+/*
+ * Create string of format "iX".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param island_id Island ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_island2str(char *s, int island_id);
+
+/*
+ * Create string of format "name", an island alias.
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param island_id Island ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_island2altstr(char *s, int island_id);
+
+/*
+ * Create string of format "meY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param menum Microengine number within island.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_menum2str(char *s, int menum);
+
+/*
+ * Create string of format "ctxY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param ctxnum Context number within microengine.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_ctxnum2str(char *s, int ctxnum);
+
+/*
+ * Create string of format "tgY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param megrp Microengine group number within cluster.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_megrp2str(char *s, int megrp);
+
+/*
+ * Convert a string to an ME ID.
+ *
+ * @param chip_family Chip family ID
+ * @param s A string of format iX.meY (or clX.meY)
+ * @param endptr If non-NULL, *endptr will point to the trailing
+ * string after the ME ID part of the string, which
+ * is either an empty string or the first character
+ * after the separating period.
+ * @return ME ID on success, -1 on error.
+ */
+int nfp_idstr2meid(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Extract island ID from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp_idstr2island(chip, "i32.me5", &c);
+ * // val == 32, c == "me5"
+ * val = nfp_idstr2island(chip, "i32", &c);
+ * // val == 32, c == ""
+ *
+ * @param chip_family Chip family ID
+ * @param s A string of format "iX.anything" or "iX"
+ * @param endptr If non-NULL, *endptr will point to the trailing
+ * striong after the ME ID part of the string, which
+ * is either an empty string or the first character
+ * after the separating period.
+ * @return The island ID on succes, -1 on error.
+ */
+int nfp_idstr2island(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Extract microengine number from string.
+ *
+ * Example:
+ * char *c;
+ * int menum = nfp_idstr2menum("me5.anything", &c);
+ * // menum == 5, c == "anything"
+ * menum = nfp_idstr2menum("me5", &c);
+ * // menum == 5, c == ""
+ *
+ * @param chip_family Chip family ID
+ * @param s A string of format "meX.anything" or "meX"
+ * @param endptr If non-NULL, *endptr will point to the trailing
+ * striong after the ME ID part of the string, which
+ * is either an empty string or the first character
+ * after the separating period.
+ * @return The ME number on succes, -1 on error.
+ */
+int nfp_idstr2menum(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Extract context number from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp_idstr2ctxnum("ctx5.anything", &c);
+ * // val == 5, c == "anything"
+ * val = nfp_idstr2ctxnum("ctx5", &c);
+ * // val == 5, c == ""
+ *
+ * @param s A string of format "ctxN.anything" or "ctxN"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the context number part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the context number, -1 on error.
+ */
+int nfp_idstr2ctxnum(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Extract microengine group number from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp_idstr2megrp("tg2.anything", &c);
+ * // val == 2, c == "anything"
+ * val = nfp_idstr2megrp("tg5", &c);
+ * // val == 5, c == ""
+ *
+ * @param s A string of format "tgX.anything" or "tgX"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the ME group part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the ME group number, -1 on error.
+ */
+int nfp_idstr2megrp(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Create ME ID string of format "iX[.meY]".
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param meid Microengine ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_meid2str(int chip_family, char *s, int meid);
+
+/*
+ * Create ME ID string of format "name[.meY]" or "iX[.meY]".
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param meid Microengine ID.
+ * @return Pointer to "s" on success, NULL on error.
+ *
+ * Similar to nfp_meid2str() except use an alias instead of "iX"
+ * if one exists for the island.
+ */
+const char *nfp_meid2altstr(int chip_family, char *s, int meid);
+
+/*
+ * Create string of format "iX".
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param island_id Island ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_island2str(int chip_family, char *s, int island_id);
+
+/*
+ * Create string of format "name", an island alias.
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param island_id Island ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_island2altstr(int chip_family, char *s, int island_id);
+
+/*
+ * Create string of format "meY".
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param menum Microengine number within island.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_menum2str(int chip_family, char *s, int menum);
+
+/*
+ * Create string of format "ctxY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param ctxnum Context number within microengine.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_ctxnum2str(int chip_family, char *s, int ctxnum);
+
+/*
+ * Create string of format "tgY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param megrp Microengine group number within cluster.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_megrp2str(int chip_family, char *s, int megrp);
+
+/*
+ * Convert a two character string to revision number.
+ *
+ * Revision integer is 0x00 for A0, 0x11 for B1 etc.
+ *
+ * @param s Two character string.
+ * @return Revision number, -1 on error
+ */
+int nfp_idstr2rev(const char *s);
+
+/*
+ * Create string from revision number.
+ *
+ * String will be upper case.
+ *
+ * @param s Pointer to char buffer with size of at least 3
+ * for 2 characters and string terminator.
+ * @param rev Revision number.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_rev2str(char *s, int rev);
+
+/*
+ * Get the NFP CPP address from a string
+ *
+ * String is in the format [island@]target[:[action:[token:]]address]
+ *
+ * @param chip_family Chip family ID
+ * @param tid Pointer to string to parse
+ * @param cpp_idp Pointer to CPP ID
+ * @param cpp_addrp Pointer to CPP address
+ * @return 0 on success, or -1 and errno
+ */
+int nfp_str2cpp(int chip_family,
+ const char *tid,
+ uint32_t *cpp_idp,
+ uint64_t *cpp_addrp);
+
+
+#endif /* _NFP_RESID_NO_C_FUNC */
+
+#endif /* __NFP_RESID_H__ */
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h
new file mode 100644
index 00000000..47e1ddae
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_NFP6000_H__
+#define __NFP_NFP6000_H__
+
+/* CPP Target IDs */
+#define NFP_CPP_TARGET_INVALID 0
+#define NFP_CPP_TARGET_NBI 1
+#define NFP_CPP_TARGET_QDR 2
+#define NFP_CPP_TARGET_ILA 6
+#define NFP_CPP_TARGET_MU 7
+#define NFP_CPP_TARGET_PCIE 9
+#define NFP_CPP_TARGET_ARM 10
+#define NFP_CPP_TARGET_CRYPTO 12
+#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */
+#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */
+#define NFP_CPP_TARGET_CT_XPB 14
+#define NFP_CPP_TARGET_LOCAL_SCRATCH 15
+#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH
+
+#define NFP_ISL_EMEM0 24
+
+#define NFP_MU_ADDR_ACCESS_TYPE_MASK 3ULL
+#define NFP_MU_ADDR_ACCESS_TYPE_DIRECT 2ULL
+
+static inline int
+nfp_cppat_mu_locality_lsb(int mode, int addr40)
+{
+ switch (mode) {
+ case 0 ... 3:
+ return addr40 ? 38 : 30;
+ default:
+ return -EINVAL;
+ }
+}
+
+#endif /* NFP_NFP6000_H */
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h
new file mode 100644
index 00000000..7ada1bb2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_XPB_H__
+#define __NFP_XPB_H__
+
+/*
+ * For use with NFP6000 Databook "XPB Addressing" section
+ */
+#define NFP_XPB_OVERLAY(island) (((island) & 0x3f) << 24)
+
+#define NFP_XPB_ISLAND(island) (NFP_XPB_OVERLAY(island) + 0x60000)
+
+#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F)
+
+/*
+ * For use with NFP6000 Databook "XPB Island and Device IDs" chapter
+ */
+#define NFP_XPB_DEVICE(island, slave, device) \
+ (NFP_XPB_OVERLAY(island) | \
+ (((slave) & 3) << 22) | \
+ (((device) & 0x3f) << 16))
+
+#endif /* NFP_XPB_H */
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h
new file mode 100644
index 00000000..1427954c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h
@@ -0,0 +1,781 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_CPP_H__
+#define __NFP_CPP_H__
+
+#include <rte_ethdev_pci.h>
+
+#include "nfp-common/nfp_platform.h"
+#include "nfp-common/nfp_resid.h"
+
+struct nfp_cpp_mutex;
+
+/*
+ * NFP CPP handle
+ */
+struct nfp_cpp {
+ uint32_t model;
+ uint32_t interface;
+ uint8_t *serial;
+ int serial_len;
+ void *priv;
+
+ /* Mutex cache */
+ struct nfp_cpp_mutex *mutex_cache;
+ const struct nfp_cpp_operations *op;
+
+ /*
+ * NFP-6xxx originating island IMB CPP Address Translation. CPP Target
+ * ID is index into array. Values are obtained at runtime from local
+ * island XPB CSRs.
+ */
+ uint32_t imb_cat_table[16];
+
+ int driver_lock_needed;
+};
+
+/*
+ * NFP CPP device area handle
+ */
+struct nfp_cpp_area {
+ struct nfp_cpp *cpp;
+ char *name;
+ unsigned long long offset;
+ unsigned long size;
+ /* Here follows the 'priv' part of nfp_cpp_area. */
+};
+
+/*
+ * NFP CPP operations structure
+ */
+struct nfp_cpp_operations {
+ /* Size of priv area in struct nfp_cpp_area */
+ size_t area_priv_size;
+
+ /* Instance an NFP CPP */
+ int (*init)(struct nfp_cpp *cpp, struct rte_pci_device *dev);
+
+ /*
+ * Free the bus.
+ * Called only once, during nfp_cpp_unregister()
+ */
+ void (*free)(struct nfp_cpp *cpp);
+
+ /*
+ * Initialize a new NFP CPP area
+ * NOTE: This is _not_ serialized
+ */
+ int (*area_init)(struct nfp_cpp_area *area,
+ uint32_t dest,
+ unsigned long long address,
+ unsigned long size);
+ /*
+ * Clean up a NFP CPP area before it is freed
+ * NOTE: This is _not_ serialized
+ */
+ void (*area_cleanup)(struct nfp_cpp_area *area);
+
+ /*
+ * Acquire resources for a NFP CPP area
+ * Serialized
+ */
+ int (*area_acquire)(struct nfp_cpp_area *area);
+ /*
+ * Release resources for a NFP CPP area
+ * Serialized
+ */
+ void (*area_release)(struct nfp_cpp_area *area);
+ /*
+ * Return a void IO pointer to a NFP CPP area
+ * NOTE: This is _not_ serialized
+ */
+
+ void *(*area_iomem)(struct nfp_cpp_area *area);
+
+ void *(*area_mapped)(struct nfp_cpp_area *area);
+ /*
+ * Perform a read from a NFP CPP area
+ * Serialized
+ */
+ int (*area_read)(struct nfp_cpp_area *area,
+ void *kernel_vaddr,
+ unsigned long offset,
+ unsigned int length);
+ /*
+ * Perform a write to a NFP CPP area
+ * Serialized
+ */
+ int (*area_write)(struct nfp_cpp_area *area,
+ const void *kernel_vaddr,
+ unsigned long offset,
+ unsigned int length);
+};
+
+/*
+ * This should be the only external function the transport
+ * module supplies
+ */
+const struct nfp_cpp_operations *nfp_cpp_transport_operations(void);
+
+/*
+ * Set the model id
+ *
+ * @param cpp NFP CPP operations structure
+ * @param model Model ID
+ */
+void nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model);
+
+/*
+ * Set the private instance owned data of a nfp_cpp struct
+ *
+ * @param cpp NFP CPP operations structure
+ * @param interface Interface ID
+ */
+void nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface);
+
+/*
+ * Set the private instance owned data of a nfp_cpp struct
+ *
+ * @param cpp NFP CPP operations structure
+ * @param serial NFP serial byte array
+ * @param len Length of the serial byte array
+ */
+int nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial,
+ size_t serial_len);
+
+/*
+ * Set the private data of the nfp_cpp instance
+ *
+ * @param cpp NFP CPP operations structure
+ * @return Opaque device pointer
+ */
+void nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv);
+
+/*
+ * Return the private data of the nfp_cpp instance
+ *
+ * @param cpp NFP CPP operations structure
+ * @return Opaque device pointer
+ */
+void *nfp_cpp_priv(struct nfp_cpp *cpp);
+
+/*
+ * Get the privately allocated portion of a NFP CPP area handle
+ *
+ * @param cpp_area NFP CPP area handle
+ * @return Pointer to the private area, or NULL on failure
+ */
+void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
+
+uint32_t __nfp_cpp_model_autodetect(struct nfp_cpp *cpp);
+
+/*
+ * NFP CPP core interface for CPP clients.
+ */
+
+/*
+ * Open a NFP CPP handle to a CPP device
+ *
+ * @param[in] id 0-based ID for the CPP interface to use
+ *
+ * @return NFP CPP handle, or NULL on failure (and set errno accordingly).
+ */
+struct nfp_cpp *nfp_cpp_from_device_name(struct rte_pci_device *dev,
+ int driver_lock_needed);
+
+/*
+ * Free a NFP CPP handle
+ *
+ * @param[in] cpp NFP CPP handle
+ */
+void nfp_cpp_free(struct nfp_cpp *cpp);
+
+#define NFP_CPP_MODEL_INVALID 0xffffffff
+
+/*
+ * NFP_CPP_MODEL_CHIP_of - retrieve the chip ID from the model ID
+ *
+ * The chip ID is a 16-bit BCD+A-F encoding for the chip type.
+ *
+ * @param[in] model NFP CPP model id
+ * @return NFP CPP chip id
+ */
+#define NFP_CPP_MODEL_CHIP_of(model) (((model) >> 16) & 0xffff)
+
+/*
+ * NFP_CPP_MODEL_IS_6000 - Check for the NFP6000 family of devices
+ *
+ * NOTE: The NFP4000 series is considered as a NFP6000 series variant.
+ *
+ * @param[in] model NFP CPP model id
+ * @return true if model is in the NFP6000 family, false otherwise.
+ */
+#define NFP_CPP_MODEL_IS_6000(model) \
+ ((NFP_CPP_MODEL_CHIP_of(model) >= 0x4000) && \
+ (NFP_CPP_MODEL_CHIP_of(model) < 0x7000))
+
+/*
+ * nfp_cpp_model - Retrieve the Model ID of the NFP
+ *
+ * @param[in] cpp NFP CPP handle
+ * @return NFP CPP Model ID
+ */
+uint32_t nfp_cpp_model(struct nfp_cpp *cpp);
+
+/*
+ * NFP Interface types - logical interface for this CPP connection 4 bits are
+ * reserved for interface type.
+ */
+#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0
+#define NFP_CPP_INTERFACE_TYPE_PCI 0x1
+#define NFP_CPP_INTERFACE_TYPE_ARM 0x2
+#define NFP_CPP_INTERFACE_TYPE_RPC 0x3
+#define NFP_CPP_INTERFACE_TYPE_ILA 0x4
+
+/*
+ * Construct a 16-bit NFP Interface ID
+ *
+ * Interface IDs consists of 4 bits of interface type, 4 bits of unit
+ * identifier, and 8 bits of channel identifier.
+ *
+ * The NFP Interface ID is used in the implementation of NFP CPP API mutexes,
+ * which use the MU Atomic CompareAndWrite operation - hence the limit to 16
+ * bits to be able to use the NFP Interface ID as a lock owner.
+ *
+ * @param[in] type NFP Interface Type
+ * @param[in] unit Unit identifier for the interface type
+ * @param[in] channel Channel identifier for the interface unit
+ * @return Interface ID
+ */
+#define NFP_CPP_INTERFACE(type, unit, channel) \
+ ((((type) & 0xf) << 12) | \
+ (((unit) & 0xf) << 8) | \
+ (((channel) & 0xff) << 0))
+
+/*
+ * Get the interface type of a NFP Interface ID
+ * @param[in] interface NFP Interface ID
+ * @return NFP Interface ID's type
+ */
+#define NFP_CPP_INTERFACE_TYPE_of(interface) (((interface) >> 12) & 0xf)
+
+/*
+ * Get the interface unit of a NFP Interface ID
+ * @param[in] interface NFP Interface ID
+ * @return NFP Interface ID's unit
+ */
+#define NFP_CPP_INTERFACE_UNIT_of(interface) (((interface) >> 8) & 0xf)
+
+/*
+ * Get the interface channel of a NFP Interface ID
+ * @param[in] interface NFP Interface ID
+ * @return NFP Interface ID's channel
+ */
+#define NFP_CPP_INTERFACE_CHANNEL_of(interface) (((interface) >> 0) & 0xff)
+
+/*
+ * Retrieve the Interface ID of the NFP
+ * @param[in] cpp NFP CPP handle
+ * @return NFP CPP Interface ID
+ */
+uint16_t nfp_cpp_interface(struct nfp_cpp *cpp);
+
+/*
+ * Retrieve the NFP Serial Number (unique per NFP)
+ * @param[in] cpp NFP CPP handle
+ * @param[out] serial Pointer to reference the serial number array
+ *
+ * @return size of the NFP6000 serial number, in bytes
+ */
+int nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial);
+
+/*
+ * Allocate a NFP CPP area handle, as an offset into a CPP ID
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] size Size of the area to reserve
+ *
+ * @return NFP CPP handle, or NULL on failure (and set errno accordingly).
+ */
+struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address,
+ unsigned long size);
+
+/*
+ * Allocate a NFP CPP area handle, as an offset into a CPP ID, by a named owner
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] name Name of owner of the area
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] size Size of the area to reserve
+ *
+ * @return NFP CPP handle, or NULL on failure (and set errno accordingly).
+ */
+struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
+ uint32_t cpp_id,
+ const char *name,
+ unsigned long long address,
+ unsigned long size);
+
+/*
+ * Free an allocated NFP CPP area handle
+ * @param[in] area NFP CPP area handle
+ */
+void nfp_cpp_area_free(struct nfp_cpp_area *area);
+
+/*
+ * Acquire the resources needed to access the NFP CPP area handle
+ *
+ * @param[in] area NFP CPP area handle
+ *
+ * @return 0 on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_area_acquire(struct nfp_cpp_area *area);
+
+/*
+ * Release the resources needed to access the NFP CPP area handle
+ *
+ * @param[in] area NFP CPP area handle
+ */
+void nfp_cpp_area_release(struct nfp_cpp_area *area);
+
+/*
+ * Allocate, then acquire the resources needed to access the NFP CPP area handle
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] size Size of the area to reserve
+ *
+ * @return NFP CPP handle, or NULL on failure (and set errno accordingly).
+ */
+struct nfp_cpp_area *nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp,
+ uint32_t cpp_id,
+ unsigned long long address,
+ unsigned long size);
+
+/*
+ * Release the resources, then free the NFP CPP area handle
+ * @param[in] area NFP CPP area handle
+ */
+void nfp_cpp_area_release_free(struct nfp_cpp_area *area);
+
+uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target,
+ uint64_t addr, unsigned long size,
+ struct nfp_cpp_area **area);
+/*
+ * Return an IO pointer to the beginning of the NFP CPP area handle. The area
+ * must be acquired with 'nfp_cpp_area_acquire()' before calling this operation.
+ *
+ * @param[in] area NFP CPP area handle
+ *
+ * @return Pointer to IO memory, or NULL on failure (and set errno accordingly).
+ */
+void *nfp_cpp_area_mapped(struct nfp_cpp_area *area);
+
+/*
+ * Read from a NFP CPP area handle into a buffer. The area must be acquired with
+ * 'nfp_cpp_area_acquire()' before calling this operation.
+ *
+ * @param[in] area NFP CPP area handle
+ * @param[in] offset Offset into the area
+ * @param[in] buffer Location of buffer to receive the data
+ * @param[in] length Length of the data to read
+ *
+ * @return bytes read on success, -1 on failure (and set errno accordingly).
+ *
+ */
+int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
+ void *buffer, size_t length);
+
+/*
+ * Write to a NFP CPP area handle from a buffer. The area must be acquired with
+ * 'nfp_cpp_area_acquire()' before calling this operation.
+ *
+ * @param[in] area NFP CPP area handle
+ * @param[in] offset Offset into the area
+ * @param[in] buffer Location of buffer that holds the data
+ * @param[in] length Length of the data to read
+ *
+ * @return bytes written on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
+ const void *buffer, size_t length);
+
+/*
+ * nfp_cpp_area_iomem() - get IOMEM region for CPP area
+ * @area: CPP area handle
+ *
+ * Returns an iomem pointer for use with readl()/writel() style operations.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: pointer to the area, or NULL
+ */
+void *nfp_cpp_area_iomem(struct nfp_cpp_area *area);
+
+/*
+ * Verify that IO can be performed on an offset in an area
+ *
+ * @param[in] area NFP CPP area handle
+ * @param[in] offset Offset into the area
+ * @param[in] size Size of region to validate
+ *
+ * @return 0 on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
+ unsigned long long offset, unsigned long size);
+
+/*
+ * Get the NFP CPP handle that is the parent of a NFP CPP area handle
+ *
+ * @param cpp_area NFP CPP area handle
+ * @return NFP CPP handle
+ */
+struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
+
+/*
+ * Get the name passed during allocation of the NFP CPP area handle
+ *
+ * @param cpp_area NFP CPP area handle
+ * @return Pointer to the area's name
+ */
+const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
+
+/*
+ * Read a block of data from a NFP CPP ID
+ *
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] kernel_vaddr Buffer to copy read data to
+ * @param[in] length Size of the area to reserve
+ *
+ * @return bytes read on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_read(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, void *kernel_vaddr, size_t length);
+
+/*
+ * Write a block of data to a NFP CPP ID
+ *
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] kernel_vaddr Buffer to copy write data from
+ * @param[in] length Size of the area to reserve
+ *
+ * @return bytes written on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_write(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, const void *kernel_vaddr,
+ size_t length);
+
+
+
+/*
+ * Fill a NFP CPP area handle and offset with a value
+ *
+ * @param[in] area NFP CPP area handle
+ * @param[in] offset Offset into the NFP CPP ID address space
+ * @param[in] value 32-bit value to fill area with
+ * @param[in] length Size of the area to reserve
+ *
+ * @return bytes written on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t value, size_t length);
+
+/*
+ * Read a single 32-bit value from a NFP CPP area handle
+ *
+ * @param area NFP CPP area handle
+ * @param offset offset into NFP CPP area handle
+ * @param value output value
+ *
+ * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this
+ * operation.
+ *
+ * NOTE: offset must be 32-bit aligned.
+ *
+ * @return 0 on success, or -1 on error (and set errno accordingly).
+ */
+int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t *value);
+
+/*
+ * Write a single 32-bit value to a NFP CPP area handle
+ *
+ * @param area NFP CPP area handle
+ * @param offset offset into NFP CPP area handle
+ * @param value value to write
+ *
+ * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this
+ * operation.
+ *
+ * NOTE: offset must be 32-bit aligned.
+ *
+ * @return 0 on success, or -1 on error (and set errno accordingly).
+ */
+int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t value);
+
+/*
+ * Read a single 64-bit value from a NFP CPP area handle
+ *
+ * @param area NFP CPP area handle
+ * @param offset offset into NFP CPP area handle
+ * @param value output value
+ *
+ * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this
+ * operation.
+ *
+ * NOTE: offset must be 64-bit aligned.
+ *
+ * @return 0 on success, or -1 on error (and set errno accordingly).
+ */
+int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset,
+ uint64_t *value);
+
+/*
+ * Write a single 64-bit value to a NFP CPP area handle
+ *
+ * @param area NFP CPP area handle
+ * @param offset offset into NFP CPP area handle
+ * @param value value to write
+ *
+ * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this
+ * operation.
+ *
+ * NOTE: offset must be 64-bit aligned.
+ *
+ * @return 0 on success, or -1 on error (and set errno accordingly).
+ */
+int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset,
+ uint64_t value);
+
+/*
+ * Write a single 32-bit value on the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param value value to write
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t value);
+
+/*
+ * Read a single 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param value output value
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t *value);
+
+/*
+ * Modify bits of a 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param mask mask of bits to alter
+ * @param value value to modify
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask,
+ uint32_t value);
+
+/*
+ * Modify bits of a 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param mask mask of bits to alter
+ * @param value value to monitor for
+ * @param timeout_us maximum number of us to wait (-1 for forever)
+ *
+ * @return >= 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask,
+ uint32_t value, int timeout_us);
+
+/*
+ * Read a 32-bit word from a NFP CPP ID
+ *
+ * @param cpp NFP CPP handle
+ * @param cpp_id NFP CPP ID
+ * @param address offset into the NFP CPP ID address space
+ * @param value output value
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, uint32_t *value);
+
+/*
+ * Write a 32-bit value to a NFP CPP ID
+ *
+ * @param cpp NFP CPP handle
+ * @param cpp_id NFP CPP ID
+ * @param address offset into the NFP CPP ID address space
+ * @param value value to write
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ *
+ */
+int nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, uint32_t value);
+
+/*
+ * Read a 64-bit work from a NFP CPP ID
+ *
+ * @param cpp NFP CPP handle
+ * @param cpp_id NFP CPP ID
+ * @param address offset into the NFP CPP ID address space
+ * @param value output value
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, uint64_t *value);
+
+/*
+ * Write a 64-bit value to a NFP CPP ID
+ *
+ * @param cpp NFP CPP handle
+ * @param cpp_id NFP CPP ID
+ * @param address offset into the NFP CPP ID address space
+ * @param value value to write
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, uint64_t value);
+
+/*
+ * Initialize a mutex location
+
+ * The CPP target:address must point to a 64-bit aligned location, and will
+ * initialize 64 bits of data at the location.
+ *
+ * This creates the initial mutex state, as locked by this nfp_cpp_interface().
+ *
+ * This function should only be called when setting up the initial lock state
+ * upon boot-up of the system.
+ *
+ * @param cpp NFP CPP handle
+ * @param target NFP CPP target ID
+ * @param address Offset into the address space of the NFP CPP target ID
+ * @param key_id Unique 32-bit value for this mutex
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target,
+ unsigned long long address, uint32_t key_id);
+
+/*
+ * Create a mutex handle from an address controlled by a MU Atomic engine
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and reserve
+ * 64 bits of data at the location for use by the handle.
+ *
+ * Only target/address pairs that point to entities that support the MU Atomic
+ * Engine's CmpAndSwap32 command are supported.
+ *
+ * @param cpp NFP CPP handle
+ * @param target NFP CPP target ID
+ * @param address Offset into the address space of the NFP CPP target ID
+ * @param key_id 32-bit unique key (must match the key at this location)
+ *
+ * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on
+ * failure.
+ */
+struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+ unsigned long long address,
+ uint32_t key_id);
+
+/*
+ * Get the NFP CPP handle the mutex was created with
+ *
+ * @param mutex NFP mutex handle
+ * @return NFP CPP handle
+ */
+struct nfp_cpp *nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Get the mutex key
+ *
+ * @param mutex NFP mutex handle
+ * @return Mutex key
+ */
+uint32_t nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Get the mutex owner
+ *
+ * @param mutex NFP mutex handle
+ * @return Interface ID of the mutex owner
+ *
+ * NOTE: This is for debug purposes ONLY - the owner may change at any time,
+ * unless it has been locked by this NFP CPP handle.
+ */
+uint16_t nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Get the mutex target
+ *
+ * @param mutex NFP mutex handle
+ * @return Mutex CPP target (ie NFP_CPP_TARGET_MU)
+ */
+int nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Get the mutex address
+ *
+ * @param mutex NFP mutex handle
+ * @return Mutex CPP address
+ */
+uint64_t nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Free a mutex handle - does not alter the lock state
+ *
+ * @param mutex NFP CPP Mutex handle
+ */
+void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Lock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Unlock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Attempt to lock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ * @return 0 if the lock succeeded, -1 on failure (and errno set
+ * appropriately).
+ */
+int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
+
+#endif /* !__NFP_CPP_H__ */
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
new file mode 100644
index 00000000..c68d9400
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
@@ -0,0 +1,845 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+/*
+ * nfp_cpp_pcie_ops.c
+ * Authors: Vinayak Tammineedi <vinayak.tammineedi@netronome.com>
+ *
+ * Multiplexes the NFP BARs between NFP internal resources and
+ * implements the PCIe specific interface for generic CPP bus access.
+ *
+ * The BARs are managed and allocated if they are available.
+ * The generic CPP bus abstraction builds upon this BAR interface.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <execinfo.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <dirent.h>
+#include <libgen.h>
+
+#include <sys/mman.h>
+#include <sys/file.h>
+#include <sys/stat.h>
+
+#include <rte_ethdev_pci.h>
+#include <rte_string_fns.h>
+
+#include "nfp_cpp.h"
+#include "nfp_target.h"
+#include "nfp6000/nfp6000.h"
+
+#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0)
+
+#define NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(_x) (((_x) & 0x1f) << 16)
+#define NFP_PCIE_BAR_PCIE2CPP_BASEADDRESS(_x) (((_x) & 0xffff) << 0)
+#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT(_x) (((_x) & 0x3) << 27)
+#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT 0
+#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT 1
+#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE 3
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE(_x) (((_x) & 0x7) << 29)
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(_x) (((_x) >> 29) & 0x7)
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED 0
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK 1
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_TARGET 2
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL 3
+#define NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(_x) (((_x) & 0xf) << 23)
+#define NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(_x) (((_x) & 0x3) << 21)
+
+/*
+ * Minimal size of the PCIe cfg memory we depend on being mapped,
+ * queue controller and DMA controller don't have to be covered.
+ */
+#define NFP_PCI_MIN_MAP_SIZE 0x080000
+
+#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize)
+#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize)
+#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
+#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
+#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4))
+
+#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar, slot) \
+ (NFP_PCIE_BAR(0) + ((bar) * 8 + (slot)) * 4)
+
+#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPBAR(bar, slot) \
+ (((bar) * 8 + (slot)) * 4)
+
+/*
+ * Define to enable a bit more verbose debug output.
+ * Set to 1 to enable a bit more verbose debug output.
+ */
+struct nfp_pcie_user;
+struct nfp6000_area_priv;
+
+/*
+ * struct nfp_bar - describes BAR configuration and usage
+ * @nfp: backlink to owner
+ * @barcfg: cached contents of BAR config CSR
+ * @base: the BAR's base CPP offset
+ * @mask: mask for the BAR aperture (read only)
+ * @bitsize: bitsize of BAR aperture (read only)
+ * @index: index of the BAR
+ * @lock: lock to specify if bar is in use
+ * @refcnt: number of current users
+ * @iomem: mapped IO memory
+ */
+#define NFP_BAR_MAX 7
+struct nfp_bar {
+ struct nfp_pcie_user *nfp;
+ uint32_t barcfg;
+ uint64_t base; /* CPP address base */
+ uint64_t mask; /* Bit mask of the bar */
+ uint32_t bitsize; /* Bit size of the bar */
+ int index;
+ int lock;
+
+ char *csr;
+ char *iomem;
+};
+
+#define BUSDEV_SZ 13
+struct nfp_pcie_user {
+ struct nfp_bar bar[NFP_BAR_MAX];
+
+ int device;
+ int lock;
+ char busdev[BUSDEV_SZ];
+ int barsz;
+ char *cfg;
+};
+
+static uint32_t
+nfp_bar_maptype(struct nfp_bar *bar)
+{
+ return NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(bar->barcfg);
+}
+
+#define TARGET_WIDTH_32 4
+#define TARGET_WIDTH_64 8
+
+static int
+nfp_compute_bar(const struct nfp_bar *bar, uint32_t *bar_config,
+ uint64_t *bar_base, int tgt, int act, int tok,
+ uint64_t offset, size_t size, int width)
+{
+ uint32_t bitsize;
+ uint32_t newcfg;
+ uint64_t mask;
+
+ if (tgt >= 16)
+ return -EINVAL;
+
+ switch (width) {
+ case 8:
+ newcfg =
+ NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT
+ (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT);
+ break;
+ case 4:
+ newcfg =
+ NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT
+ (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT);
+ break;
+ case 0:
+ newcfg =
+ NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT
+ (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (act != NFP_CPP_ACTION_RW && act != 0) {
+ /* Fixed CPP mapping with specific action */
+ mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1);
+
+ newcfg |=
+ NFP_PCIE_BAR_PCIE2CPP_MAPTYPE
+ (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(act);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok);
+
+ if ((offset & mask) != ((offset + size - 1) & mask)) {
+ printf("BAR%d: Won't use for Fixed mapping\n",
+ bar->index);
+ printf("\t<%#llx,%#llx>, action=%d\n",
+ (unsigned long long)offset,
+ (unsigned long long)(offset + size), act);
+ printf("\tBAR too small (0x%llx).\n",
+ (unsigned long long)mask);
+ return -EINVAL;
+ }
+ offset &= mask;
+
+#ifdef DEBUG
+ printf("BAR%d: Created Fixed mapping\n", bar->index);
+ printf("\t%d:%d:%d:0x%#llx-0x%#llx>\n", tgt, act, tok,
+ (unsigned long long)offset,
+ (unsigned long long)(offset + mask));
+#endif
+
+ bitsize = 40 - 16;
+ } else {
+ mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1);
+
+ /* Bulk mapping */
+ newcfg |=
+ NFP_PCIE_BAR_PCIE2CPP_MAPTYPE
+ (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK);
+
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok);
+
+ if ((offset & mask) != ((offset + size - 1) & mask)) {
+ printf("BAR%d: Won't use for bulk mapping\n",
+ bar->index);
+ printf("\t<%#llx,%#llx>\n", (unsigned long long)offset,
+ (unsigned long long)(offset + size));
+ printf("\ttarget=%d, token=%d\n", tgt, tok);
+ printf("\tBAR too small (%#llx) - (%#llx != %#llx).\n",
+ (unsigned long long)mask,
+ (unsigned long long)(offset & mask),
+ (unsigned long long)(offset + size - 1) & mask);
+
+ return -EINVAL;
+ }
+
+ offset &= mask;
+
+#ifdef DEBUG
+ printf("BAR%d: Created bulk mapping %d:x:%d:%#llx-%#llx\n",
+ bar->index, tgt, tok, (unsigned long long)offset,
+ (unsigned long long)(offset + ~mask));
+#endif
+
+ bitsize = 40 - 21;
+ }
+
+ if (bar->bitsize < bitsize) {
+ printf("BAR%d: Too small for %d:%d:%d\n", bar->index, tgt, tok,
+ act);
+ return -EINVAL;
+ }
+
+ newcfg |= offset >> bitsize;
+
+ if (bar_base)
+ *bar_base = offset;
+
+ if (bar_config)
+ *bar_config = newcfg;
+
+ return 0;
+}
+
+static int
+nfp_bar_write(struct nfp_pcie_user *nfp, struct nfp_bar *bar,
+ uint32_t newcfg)
+{
+ int base, slot;
+
+ base = bar->index >> 3;
+ slot = bar->index & 7;
+
+ if (!nfp->cfg)
+ return (-ENOMEM);
+
+ bar->csr = nfp->cfg +
+ NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(base, slot);
+
+ *(uint32_t *)(bar->csr) = newcfg;
+
+ bar->barcfg = newcfg;
+#ifdef DEBUG
+ printf("BAR%d: updated to 0x%08x\n", bar->index, newcfg);
+#endif
+
+ return 0;
+}
+
+static int
+nfp_reconfigure_bar(struct nfp_pcie_user *nfp, struct nfp_bar *bar, int tgt,
+ int act, int tok, uint64_t offset, size_t size, int width)
+{
+ uint64_t newbase;
+ uint32_t newcfg;
+ int err;
+
+ err = nfp_compute_bar(bar, &newcfg, &newbase, tgt, act, tok, offset,
+ size, width);
+ if (err)
+ return err;
+
+ bar->base = newbase;
+
+ return nfp_bar_write(nfp, bar, newcfg);
+}
+
+/*
+ * Map all PCI bars. We assume that the BAR with the PCIe config block is
+ * already mapped.
+ *
+ * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM)
+ */
+static int
+nfp_enable_bars(struct nfp_pcie_user *nfp)
+{
+ struct nfp_bar *bar;
+ int x;
+
+ for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) {
+ bar = &nfp->bar[x - 1];
+ bar->barcfg = 0;
+ bar->nfp = nfp;
+ bar->index = x;
+ bar->mask = (1 << (nfp->barsz - 3)) - 1;
+ bar->bitsize = nfp->barsz - 3;
+ bar->base = 0;
+ bar->iomem = NULL;
+ bar->lock = 0;
+ bar->csr = nfp->cfg +
+ NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar->index >> 3,
+ bar->index & 7);
+
+ bar->iomem = nfp->cfg + (bar->index << bar->bitsize);
+ }
+ return 0;
+}
+
+static struct nfp_bar *
+nfp_alloc_bar(struct nfp_pcie_user *nfp)
+{
+ struct nfp_bar *bar;
+ int x;
+
+ for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) {
+ bar = &nfp->bar[x - 1];
+ if (!bar->lock) {
+ bar->lock = 1;
+ return bar;
+ }
+ }
+ return NULL;
+}
+
+static void
+nfp_disable_bars(struct nfp_pcie_user *nfp)
+{
+ struct nfp_bar *bar;
+ int x;
+
+ for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) {
+ bar = &nfp->bar[x - 1];
+ if (bar->iomem) {
+ bar->iomem = NULL;
+ bar->lock = 0;
+ }
+ }
+}
+
+/*
+ * Generic CPP bus access interface.
+ */
+
+struct nfp6000_area_priv {
+ struct nfp_bar *bar;
+ uint32_t bar_offset;
+
+ uint32_t target;
+ uint32_t action;
+ uint32_t token;
+ uint64_t offset;
+ struct {
+ int read;
+ int write;
+ int bar;
+ } width;
+ size_t size;
+ char *iomem;
+};
+
+static int
+nfp6000_area_init(struct nfp_cpp_area *area, uint32_t dest,
+ unsigned long long address, unsigned long size)
+{
+ struct nfp_pcie_user *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+ uint32_t target = NFP_CPP_ID_TARGET_of(dest);
+ uint32_t action = NFP_CPP_ID_ACTION_of(dest);
+ uint32_t token = NFP_CPP_ID_TOKEN_of(dest);
+ int pp, ret = 0;
+
+ pp = nfp6000_target_pushpull(NFP_CPP_ID(target, action, token),
+ address);
+ if (pp < 0)
+ return pp;
+
+ priv->width.read = PUSH_WIDTH(pp);
+ priv->width.write = PULL_WIDTH(pp);
+
+ if (priv->width.read > 0 &&
+ priv->width.write > 0 && priv->width.read != priv->width.write)
+ return -EINVAL;
+
+ if (priv->width.read > 0)
+ priv->width.bar = priv->width.read;
+ else
+ priv->width.bar = priv->width.write;
+
+ priv->bar = nfp_alloc_bar(nfp);
+ if (priv->bar == NULL)
+ return -ENOMEM;
+
+ priv->target = target;
+ priv->action = action;
+ priv->token = token;
+ priv->offset = address;
+ priv->size = size;
+
+ ret = nfp_reconfigure_bar(nfp, priv->bar, priv->target, priv->action,
+ priv->token, priv->offset, priv->size,
+ priv->width.bar);
+
+ return ret;
+}
+
+static int
+nfp6000_area_acquire(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+ /* Calculate offset into BAR. */
+ if (nfp_bar_maptype(priv->bar) ==
+ NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL) {
+ priv->bar_offset = priv->offset &
+ (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1);
+ priv->bar_offset +=
+ NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(priv->bar,
+ priv->target);
+ priv->bar_offset +=
+ NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(priv->bar, priv->token);
+ } else {
+ priv->bar_offset = priv->offset & priv->bar->mask;
+ }
+
+ /* Must have been too big. Sub-allocate. */
+ if (!priv->bar->iomem)
+ return (-ENOMEM);
+
+ priv->iomem = priv->bar->iomem + priv->bar_offset;
+
+ return 0;
+}
+
+static void *
+nfp6000_area_mapped(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *area_priv = nfp_cpp_area_priv(area);
+
+ if (!area_priv->iomem)
+ return NULL;
+
+ return area_priv->iomem;
+}
+
+static void
+nfp6000_area_release(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+ priv->bar->lock = 0;
+ priv->bar = NULL;
+ priv->iomem = NULL;
+}
+
+static void *
+nfp6000_area_iomem(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+ return priv->iomem;
+}
+
+static int
+nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
+ unsigned long offset, unsigned int length)
+{
+ uint64_t *wrptr64 = kernel_vaddr;
+ const volatile uint64_t *rdptr64;
+ struct nfp6000_area_priv *priv;
+ uint32_t *wrptr32 = kernel_vaddr;
+ const volatile uint32_t *rdptr32;
+ int width;
+ unsigned int n;
+ bool is_64;
+
+ priv = nfp_cpp_area_priv(area);
+ rdptr64 = (uint64_t *)(priv->iomem + offset);
+ rdptr32 = (uint32_t *)(priv->iomem + offset);
+
+ if (offset + length > priv->size)
+ return -EFAULT;
+
+ width = priv->width.read;
+
+ if (width <= 0)
+ return -EINVAL;
+
+ /* Unaligned? Translate to an explicit access */
+ if ((priv->offset + offset) & (width - 1)) {
+ printf("aread_read unaligned!!!\n");
+ return -EINVAL;
+ }
+
+ is_64 = width == TARGET_WIDTH_64;
+
+ /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */
+ if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
+ priv->action == NFP_CPP_ACTION_RW) {
+ is_64 = false;
+ }
+
+ if (is_64) {
+ if (offset % sizeof(uint64_t) != 0 ||
+ length % sizeof(uint64_t) != 0)
+ return -EINVAL;
+ } else {
+ if (offset % sizeof(uint32_t) != 0 ||
+ length % sizeof(uint32_t) != 0)
+ return -EINVAL;
+ }
+
+ if (!priv->bar)
+ return -EFAULT;
+
+ if (is_64)
+ for (n = 0; n < length; n += sizeof(uint64_t)) {
+ *wrptr64 = *rdptr64;
+ wrptr64++;
+ rdptr64++;
+ }
+ else
+ for (n = 0; n < length; n += sizeof(uint32_t)) {
+ *wrptr32 = *rdptr32;
+ wrptr32++;
+ rdptr32++;
+ }
+
+ return n;
+}
+
+static int
+nfp6000_area_write(struct nfp_cpp_area *area, const void *kernel_vaddr,
+ unsigned long offset, unsigned int length)
+{
+ const uint64_t *rdptr64 = kernel_vaddr;
+ uint64_t *wrptr64;
+ const uint32_t *rdptr32 = kernel_vaddr;
+ struct nfp6000_area_priv *priv;
+ uint32_t *wrptr32;
+ int width;
+ unsigned int n;
+ bool is_64;
+
+ priv = nfp_cpp_area_priv(area);
+ wrptr64 = (uint64_t *)(priv->iomem + offset);
+ wrptr32 = (uint32_t *)(priv->iomem + offset);
+
+ if (offset + length > priv->size)
+ return -EFAULT;
+
+ width = priv->width.write;
+
+ if (width <= 0)
+ return -EINVAL;
+
+ /* Unaligned? Translate to an explicit access */
+ if ((priv->offset + offset) & (width - 1))
+ return -EINVAL;
+
+ is_64 = width == TARGET_WIDTH_64;
+
+ /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */
+ if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
+ priv->action == NFP_CPP_ACTION_RW)
+ is_64 = false;
+
+ if (is_64) {
+ if (offset % sizeof(uint64_t) != 0 ||
+ length % sizeof(uint64_t) != 0)
+ return -EINVAL;
+ } else {
+ if (offset % sizeof(uint32_t) != 0 ||
+ length % sizeof(uint32_t) != 0)
+ return -EINVAL;
+ }
+
+ if (!priv->bar)
+ return -EFAULT;
+
+ if (is_64)
+ for (n = 0; n < length; n += sizeof(uint64_t)) {
+ *wrptr64 = *rdptr64;
+ wrptr64++;
+ rdptr64++;
+ }
+ else
+ for (n = 0; n < length; n += sizeof(uint32_t)) {
+ *wrptr32 = *rdptr32;
+ wrptr32++;
+ rdptr32++;
+ }
+
+ return n;
+}
+
+#define PCI_DEVICES "/sys/bus/pci/devices"
+
+static int
+nfp_acquire_process_lock(struct nfp_pcie_user *desc)
+{
+ int rc;
+ struct flock lock;
+ char lockname[30];
+
+ memset(&lock, 0, sizeof(lock));
+
+ snprintf(lockname, sizeof(lockname), "/var/lock/nfp_%s", desc->busdev);
+ desc->lock = open(lockname, O_RDWR | O_CREAT, 0666);
+ if (desc->lock < 0)
+ return desc->lock;
+
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ rc = -1;
+ while (rc != 0) {
+ rc = fcntl(desc->lock, F_SETLKW, &lock);
+ if (rc < 0) {
+ if (errno != EAGAIN && errno != EACCES) {
+ close(desc->lock);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+nfp6000_set_model(struct rte_pci_device *dev, struct nfp_cpp *cpp)
+{
+ uint32_t model;
+
+ if (rte_pci_read_config(dev, &model, 4, 0x2e) < 0) {
+ printf("nfp set model failed\n");
+ return -1;
+ }
+
+ model = model << 16;
+ nfp_cpp_model_set(cpp, model);
+
+ return 0;
+}
+
+static int
+nfp6000_set_interface(struct rte_pci_device *dev, struct nfp_cpp *cpp)
+{
+ uint16_t interface;
+
+ if (rte_pci_read_config(dev, &interface, 2, 0x154) < 0) {
+ printf("nfp set interface failed\n");
+ return -1;
+ }
+
+ nfp_cpp_interface_set(cpp, interface);
+
+ return 0;
+}
+
+#define PCI_CFG_SPACE_SIZE 256
+#define PCI_CFG_SPACE_EXP_SIZE 4096
+#define PCI_EXT_CAP_ID(header) (int)(header & 0x0000ffff)
+#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc)
+#define PCI_EXT_CAP_ID_DSN 0x03
+static int
+nfp_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap)
+{
+ uint32_t header;
+ int ttl;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ /* minimum 8 bytes per capability */
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+
+ if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
+ printf("nfp error reading extended capabilities\n");
+ return -1;
+ }
+
+ /*
+ * If we have no capabilities, this is indicated by cap ID,
+ * cap version and next pointer all being 0.
+ */
+ if (header == 0)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < PCI_CFG_SPACE_SIZE)
+ break;
+
+ if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
+ printf("nfp error reading extended capabilities\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nfp6000_set_serial(struct rte_pci_device *dev, struct nfp_cpp *cpp)
+{
+ uint16_t tmp;
+ uint8_t serial[6];
+ int serial_len = 6;
+ int pos;
+
+ pos = nfp_pci_find_next_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
+ if (pos <= 0) {
+ printf("PCI_EXT_CAP_ID_DSN not found. nfp set serial failed\n");
+ return -1;
+ } else {
+ pos += 6;
+ }
+
+ if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) {
+ printf("nfp set serial failed\n");
+ return -1;
+ }
+
+ serial[4] = (uint8_t)((tmp >> 8) & 0xff);
+ serial[5] = (uint8_t)(tmp & 0xff);
+
+ pos += 2;
+ if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) {
+ printf("nfp set serial failed\n");
+ return -1;
+ }
+
+ serial[2] = (uint8_t)((tmp >> 8) & 0xff);
+ serial[3] = (uint8_t)(tmp & 0xff);
+
+ pos += 2;
+ if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) {
+ printf("nfp set serial failed\n");
+ return -1;
+ }
+
+ serial[0] = (uint8_t)((tmp >> 8) & 0xff);
+ serial[1] = (uint8_t)(tmp & 0xff);
+
+ nfp_cpp_serial_set(cpp, serial, serial_len);
+
+ return 0;
+}
+
+static int
+nfp6000_set_barsz(struct rte_pci_device *dev, struct nfp_pcie_user *desc)
+{
+ unsigned long tmp;
+ int i = 0;
+
+ tmp = dev->mem_resource[0].len;
+
+ while (tmp >>= 1)
+ i++;
+
+ desc->barsz = i;
+ return 0;
+}
+
+static int
+nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev)
+{
+ int ret = 0;
+ uint32_t model;
+ struct nfp_pcie_user *desc;
+
+ desc = malloc(sizeof(*desc));
+ if (!desc)
+ return -1;
+
+
+ memset(desc->busdev, 0, BUSDEV_SZ);
+ strlcpy(desc->busdev, dev->device.name, sizeof(desc->busdev));
+
+ if (cpp->driver_lock_needed) {
+ ret = nfp_acquire_process_lock(desc);
+ if (ret)
+ return -1;
+ }
+
+ if (nfp6000_set_model(dev, cpp) < 0)
+ return -1;
+ if (nfp6000_set_interface(dev, cpp) < 0)
+ return -1;
+ if (nfp6000_set_serial(dev, cpp) < 0)
+ return -1;
+ if (nfp6000_set_barsz(dev, desc) < 0)
+ return -1;
+
+ desc->cfg = (char *)dev->mem_resource[0].addr;
+
+ nfp_enable_bars(desc);
+
+ nfp_cpp_priv_set(cpp, desc);
+
+ model = __nfp_cpp_model_autodetect(cpp);
+ nfp_cpp_model_set(cpp, model);
+
+ return ret;
+}
+
+static void
+nfp6000_free(struct nfp_cpp *cpp)
+{
+ struct nfp_pcie_user *desc = nfp_cpp_priv(cpp);
+
+ nfp_disable_bars(desc);
+ if (cpp->driver_lock_needed)
+ close(desc->lock);
+ close(desc->device);
+ free(desc);
+}
+
+static const struct nfp_cpp_operations nfp6000_pcie_ops = {
+ .init = nfp6000_init,
+ .free = nfp6000_free,
+
+ .area_priv_size = sizeof(struct nfp6000_area_priv),
+ .area_init = nfp6000_area_init,
+ .area_acquire = nfp6000_area_acquire,
+ .area_release = nfp6000_area_release,
+ .area_mapped = nfp6000_area_mapped,
+ .area_read = nfp6000_area_read,
+ .area_write = nfp6000_area_write,
+ .area_iomem = nfp6000_area_iomem,
+};
+
+const struct
+nfp_cpp_operations *nfp_cpp_transport_operations(void)
+{
+ return &nfp6000_pcie_ops;
+}
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c
new file mode 100644
index 00000000..75d3c974
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+
+#include <rte_byteorder.h>
+#include <rte_ethdev_pci.h>
+
+#include "nfp_cpp.h"
+#include "nfp_target.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp6000/nfp_xpb.h"
+#include "nfp_nffw.h"
+
+#define NFP_PL_DEVICE_ID 0x00000004
+#define NFP_PL_DEVICE_ID_MASK 0xff
+
+#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144
+
+void
+nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv)
+{
+ cpp->priv = priv;
+}
+
+void *
+nfp_cpp_priv(struct nfp_cpp *cpp)
+{
+ return cpp->priv;
+}
+
+void
+nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model)
+{
+ cpp->model = model;
+}
+
+uint32_t
+nfp_cpp_model(struct nfp_cpp *cpp)
+{
+ if (!cpp)
+ return NFP_CPP_MODEL_INVALID;
+
+ if (cpp->model == 0)
+ cpp->model = __nfp_cpp_model_autodetect(cpp);
+
+ return cpp->model;
+}
+
+void
+nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface)
+{
+ cpp->interface = interface;
+}
+
+int
+nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial)
+{
+ *serial = cpp->serial;
+ return cpp->serial_len;
+}
+
+int
+nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial,
+ size_t serial_len)
+{
+ if (cpp->serial_len)
+ free(cpp->serial);
+
+ cpp->serial = malloc(serial_len);
+ if (!cpp->serial)
+ return -1;
+
+ memcpy(cpp->serial, serial, serial_len);
+ cpp->serial_len = serial_len;
+
+ return 0;
+}
+
+uint16_t
+nfp_cpp_interface(struct nfp_cpp *cpp)
+{
+ if (!cpp)
+ return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_INVALID, 0, 0);
+
+ return cpp->interface;
+}
+
+void *
+nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area)
+{
+ return &cpp_area[1];
+}
+
+struct nfp_cpp *
+nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area)
+{
+ return cpp_area->cpp;
+}
+
+const char *
+nfp_cpp_area_name(struct nfp_cpp_area *cpp_area)
+{
+ return cpp_area->name;
+}
+
+/*
+ * nfp_cpp_area_alloc - allocate a new CPP area
+ * @cpp: CPP handle
+ * @dest: CPP id
+ * @address: start address on CPP target
+ * @size: size of area in bytes
+ *
+ * Allocate and initialize a CPP area structure. The area must later
+ * be locked down with an 'acquire' before it can be safely accessed.
+ *
+ * NOTE: @address and @size must be 32-bit aligned values.
+ */
+struct nfp_cpp_area *
+nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, uint32_t dest,
+ const char *name, unsigned long long address,
+ unsigned long size)
+{
+ struct nfp_cpp_area *area;
+ uint64_t tmp64 = (uint64_t)address;
+ int tmp, err;
+
+ if (!cpp)
+ return NULL;
+
+ /* CPP bus uses only a 40-bit address */
+ if ((address + size) > (1ULL << 40))
+ return NFP_ERRPTR(EFAULT);
+
+ /* Remap from cpp_island to cpp_target */
+ err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table);
+ if (err < 0)
+ return NULL;
+
+ address = (unsigned long long)tmp64;
+
+ if (!name)
+ name = "";
+
+ area = calloc(1, sizeof(*area) + cpp->op->area_priv_size +
+ strlen(name) + 1);
+ if (!area)
+ return NULL;
+
+ area->cpp = cpp;
+ area->name = ((char *)area) + sizeof(*area) + cpp->op->area_priv_size;
+ memcpy(area->name, name, strlen(name) + 1);
+
+ /*
+ * Preserve errno around the call to area_init, since most
+ * implementations will blindly call nfp_target_action_width()for both
+ * read or write modes, and that will set errno to EINVAL.
+ */
+ tmp = errno;
+
+ err = cpp->op->area_init(area, dest, address, size);
+ if (err < 0) {
+ free(area);
+ return NULL;
+ }
+
+ /* Restore errno */
+ errno = tmp;
+
+ area->offset = address;
+ area->size = size;
+
+ return area;
+}
+
+struct nfp_cpp_area *
+nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t dest,
+ unsigned long long address, unsigned long size)
+{
+ return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size);
+}
+
+/*
+ * nfp_cpp_area_alloc_acquire - allocate a new CPP area and lock it down
+ *
+ * @cpp: CPP handle
+ * @dest: CPP id
+ * @address: start address on CPP target
+ * @size: size of area
+ *
+ * Allocate and initilizae a CPP area structure, and lock it down so
+ * that it can be accessed directly.
+ *
+ * NOTE: @address and @size must be 32-bit aligned values.
+ *
+ * NOTE: The area must also be 'released' when the structure is freed.
+ */
+struct nfp_cpp_area *
+nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, uint32_t destination,
+ unsigned long long address, unsigned long size)
+{
+ struct nfp_cpp_area *area;
+
+ area = nfp_cpp_area_alloc(cpp, destination, address, size);
+ if (!area)
+ return NULL;
+
+ if (nfp_cpp_area_acquire(area)) {
+ nfp_cpp_area_free(area);
+ return NULL;
+ }
+
+ return area;
+}
+
+/*
+ * nfp_cpp_area_free - free up the CPP area
+ * area: CPP area handle
+ *
+ * Frees up memory resources held by the CPP area.
+ */
+void
+nfp_cpp_area_free(struct nfp_cpp_area *area)
+{
+ if (area->cpp->op->area_cleanup)
+ area->cpp->op->area_cleanup(area);
+ free(area);
+}
+
+/*
+ * nfp_cpp_area_release_free - release CPP area and free it
+ * area: CPP area handle
+ *
+ * Releases CPP area and frees up memory resources held by the it.
+ */
+void
+nfp_cpp_area_release_free(struct nfp_cpp_area *area)
+{
+ nfp_cpp_area_release(area);
+ nfp_cpp_area_free(area);
+}
+
+/*
+ * nfp_cpp_area_acquire - lock down a CPP area for access
+ * @area: CPP area handle
+ *
+ * Locks down the CPP area for a potential long term activity. Area
+ * must always be locked down before being accessed.
+ */
+int
+nfp_cpp_area_acquire(struct nfp_cpp_area *area)
+{
+ if (area->cpp->op->area_acquire) {
+ int err = area->cpp->op->area_acquire(area);
+
+ if (err < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * nfp_cpp_area_release - release a locked down CPP area
+ * @area: CPP area handle
+ *
+ * Releases a previously locked down CPP area.
+ */
+void
+nfp_cpp_area_release(struct nfp_cpp_area *area)
+{
+ if (area->cpp->op->area_release)
+ area->cpp->op->area_release(area);
+}
+
+/*
+ * nfp_cpp_area_iomem() - get IOMEM region for CPP area
+ *
+ * @area: CPP area handle
+ *
+ * Returns an iomem pointer for use with readl()/writel() style operations.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: pointer to the area, or NULL
+ */
+void *
+nfp_cpp_area_iomem(struct nfp_cpp_area *area)
+{
+ void *iomem = NULL;
+
+ if (area->cpp->op->area_iomem)
+ iomem = area->cpp->op->area_iomem(area);
+
+ return iomem;
+}
+
+/*
+ * nfp_cpp_area_read - read data from CPP area
+ *
+ * @area: CPP area handle
+ * @offset: offset into CPP area
+ * @kernel_vaddr: kernel address to put data into
+ * @length: number of bytes to read
+ *
+ * Read data from indicated CPP region.
+ *
+ * NOTE: @offset and @length must be 32-bit aligned values.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ */
+int
+nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
+ void *kernel_vaddr, size_t length)
+{
+ if ((offset + length) > area->size)
+ return NFP_ERRNO(EFAULT);
+
+ return area->cpp->op->area_read(area, kernel_vaddr, offset, length);
+}
+
+/*
+ * nfp_cpp_area_write - write data to CPP area
+ *
+ * @area: CPP area handle
+ * @offset: offset into CPP area
+ * @kernel_vaddr: kernel address to read data from
+ * @length: number of bytes to write
+ *
+ * Write data to indicated CPP region.
+ *
+ * NOTE: @offset and @length must be 32-bit aligned values.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ */
+int
+nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
+ const void *kernel_vaddr, size_t length)
+{
+ if ((offset + length) > area->size)
+ return NFP_ERRNO(EFAULT);
+
+ return area->cpp->op->area_write(area, kernel_vaddr, offset, length);
+}
+
+void *
+nfp_cpp_area_mapped(struct nfp_cpp_area *area)
+{
+ if (area->cpp->op->area_mapped)
+ return area->cpp->op->area_mapped(area);
+ return NULL;
+}
+
+/*
+ * nfp_cpp_area_check_range - check if address range fits in CPP area
+ *
+ * @area: CPP area handle
+ * @offset: offset into CPP area
+ * @length: size of address range in bytes
+ *
+ * Check if address range fits within CPP area. Return 0 if area fits
+ * or -1 on error.
+ */
+int
+nfp_cpp_area_check_range(struct nfp_cpp_area *area, unsigned long long offset,
+ unsigned long length)
+{
+ if (((offset + length) > area->size))
+ return NFP_ERRNO(EFAULT);
+
+ return 0;
+}
+
+/*
+ * Return the correct CPP address, and fixup xpb_addr as needed,
+ * based upon NFP model.
+ */
+static uint32_t
+nfp_xpb_to_cpp(struct nfp_cpp *cpp, uint32_t *xpb_addr)
+{
+ uint32_t xpb;
+ int island;
+
+ if (!NFP_CPP_MODEL_IS_6000(cpp->model))
+ return 0;
+
+ xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0);
+
+ /*
+ * Ensure that non-local XPB accesses go out through the
+ * global XPBM bus.
+ */
+ island = ((*xpb_addr) >> 24) & 0x3f;
+
+ if (!island)
+ return xpb;
+
+ if (island == 1) {
+ /*
+ * Accesses to the ARM Island overlay uses Island 0
+ * Global Bit
+ */
+ (*xpb_addr) &= ~0x7f000000;
+ if (*xpb_addr < 0x60000)
+ *xpb_addr |= (1 << 30);
+ else
+ /* And only non-ARM interfaces use island id = 1 */
+ if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp)) !=
+ NFP_CPP_INTERFACE_TYPE_ARM)
+ *xpb_addr |= (1 << 24);
+ } else {
+ (*xpb_addr) |= (1 << 30);
+ }
+
+ return xpb;
+}
+
+int
+nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t *value)
+{
+ int sz;
+ uint32_t tmp = 0;
+
+ sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ *value = rte_le_to_cpu_32(tmp);
+
+ return (sz == sizeof(*value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t value)
+{
+ int sz;
+
+ value = rte_cpu_to_le_32(value);
+ sz = nfp_cpp_area_write(area, offset, &value, sizeof(value));
+ return (sz == sizeof(value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset,
+ uint64_t *value)
+{
+ int sz;
+ uint64_t tmp = 0;
+
+ sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ *value = rte_le_to_cpu_64(tmp);
+
+ return (sz == sizeof(*value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset,
+ uint64_t value)
+{
+ int sz;
+
+ value = rte_cpu_to_le_64(value);
+ sz = nfp_cpp_area_write(area, offset, &value, sizeof(value));
+
+ return (sz == sizeof(value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address,
+ uint32_t *value)
+{
+ int sz;
+ uint32_t tmp;
+
+ sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp));
+ *value = rte_le_to_cpu_32(tmp);
+
+ return (sz == sizeof(*value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address,
+ uint32_t value)
+{
+ int sz;
+
+ value = rte_cpu_to_le_32(value);
+ sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value));
+
+ return (sz == sizeof(value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address,
+ uint64_t *value)
+{
+ int sz;
+ uint64_t tmp;
+
+ sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp));
+ *value = rte_le_to_cpu_64(tmp);
+
+ return (sz == sizeof(*value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address,
+ uint64_t value)
+{
+ int sz;
+
+ value = rte_cpu_to_le_64(value);
+ sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value));
+
+ return (sz == sizeof(value)) ? 0 : -1;
+}
+
+int
+nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t value)
+{
+ uint32_t cpp_dest;
+
+ cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
+
+ return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value);
+}
+
+int
+nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t *value)
+{
+ uint32_t cpp_dest;
+
+ cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
+
+ return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value);
+}
+
+static struct nfp_cpp *
+nfp_cpp_alloc(struct rte_pci_device *dev, int driver_lock_needed)
+{
+ const struct nfp_cpp_operations *ops;
+ struct nfp_cpp *cpp;
+ int err;
+
+ ops = nfp_cpp_transport_operations();
+
+ if (!ops || !ops->init)
+ return NFP_ERRPTR(EINVAL);
+
+ cpp = calloc(1, sizeof(*cpp));
+ if (!cpp)
+ return NULL;
+
+ cpp->op = ops;
+ cpp->driver_lock_needed = driver_lock_needed;
+
+ if (cpp->op->init) {
+ err = cpp->op->init(cpp, dev);
+ if (err < 0) {
+ free(cpp);
+ return NULL;
+ }
+ }
+
+ if (NFP_CPP_MODEL_IS_6000(nfp_cpp_model(cpp))) {
+ uint32_t xpbaddr;
+ size_t tgt;
+
+ for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
+ /* Hardcoded XPB IMB Base, island 0 */
+ xpbaddr = 0x000a0000 + (tgt * 4);
+ err = nfp_xpb_readl(cpp, xpbaddr,
+ (uint32_t *)&cpp->imb_cat_table[tgt]);
+ if (err < 0) {
+ free(cpp);
+ return NULL;
+ }
+ }
+ }
+
+ return cpp;
+}
+
+/*
+ * nfp_cpp_free - free the CPP handle
+ * @cpp: CPP handle
+ */
+void
+nfp_cpp_free(struct nfp_cpp *cpp)
+{
+ if (cpp->op && cpp->op->free)
+ cpp->op->free(cpp);
+
+ if (cpp->serial_len)
+ free(cpp->serial);
+
+ free(cpp);
+}
+
+struct nfp_cpp *
+nfp_cpp_from_device_name(struct rte_pci_device *dev, int driver_lock_needed)
+{
+ return nfp_cpp_alloc(dev, driver_lock_needed);
+}
+
+/*
+ * Modify bits of a 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param mask mask of bits to alter
+ * @param value value to modify
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask,
+ uint32_t value)
+{
+ int err;
+ uint32_t tmp;
+
+ err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
+ if (err < 0)
+ return err;
+
+ tmp &= ~mask;
+ tmp |= (mask & value);
+ return nfp_xpb_writel(cpp, xpb_tgt, tmp);
+}
+
+/*
+ * Modify bits of a 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param mask mask of bits to alter
+ * @param value value to monitor for
+ * @param timeout_us maximum number of us to wait (-1 for forever)
+ *
+ * @return >= 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask,
+ uint32_t value, int timeout_us)
+{
+ uint32_t tmp;
+ int err;
+
+ do {
+ err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
+ if (err < 0)
+ goto exit;
+
+ if ((tmp & mask) == (value & mask)) {
+ if (timeout_us < 0)
+ timeout_us = 0;
+ break;
+ }
+
+ if (timeout_us < 0)
+ continue;
+
+ timeout_us -= 100;
+ usleep(100);
+ } while (timeout_us >= 0);
+
+ if (timeout_us < 0)
+ err = NFP_ERRNO(ETIMEDOUT);
+ else
+ err = timeout_us;
+
+exit:
+ return err;
+}
+
+/*
+ * nfp_cpp_read - read from CPP target
+ * @cpp: CPP handle
+ * @destination: CPP id
+ * @address: offset into CPP target
+ * @kernel_vaddr: kernel buffer for result
+ * @length: number of bytes to read
+ */
+int
+nfp_cpp_read(struct nfp_cpp *cpp, uint32_t destination,
+ unsigned long long address, void *kernel_vaddr, size_t length)
+{
+ struct nfp_cpp_area *area;
+ int err;
+
+ area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length);
+ if (!area) {
+ printf("Area allocation/acquire failed\n");
+ return -1;
+ }
+
+ err = nfp_cpp_area_read(area, 0, kernel_vaddr, length);
+
+ nfp_cpp_area_release_free(area);
+ return err;
+}
+
+/*
+ * nfp_cpp_write - write to CPP target
+ * @cpp: CPP handle
+ * @destination: CPP id
+ * @address: offset into CPP target
+ * @kernel_vaddr: kernel buffer to read from
+ * @length: number of bytes to write
+ */
+int
+nfp_cpp_write(struct nfp_cpp *cpp, uint32_t destination,
+ unsigned long long address, const void *kernel_vaddr,
+ size_t length)
+{
+ struct nfp_cpp_area *area;
+ int err;
+
+ area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length);
+ if (!area)
+ return -1;
+
+ err = nfp_cpp_area_write(area, 0, kernel_vaddr, length);
+
+ nfp_cpp_area_release_free(area);
+ return err;
+}
+
+/*
+ * nfp_cpp_area_fill - fill a CPP area with a value
+ * @area: CPP area
+ * @offset: offset into CPP area
+ * @value: value to fill with
+ * @length: length of area to fill
+ */
+int
+nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t value, size_t length)
+{
+ int err;
+ size_t i;
+ uint64_t value64;
+
+ value = rte_cpu_to_le_32(value);
+ value64 = ((uint64_t)value << 32) | value;
+
+ if ((offset + length) > area->size)
+ return NFP_ERRNO(EINVAL);
+
+ if ((area->offset + offset) & 3)
+ return NFP_ERRNO(EINVAL);
+
+ if (((area->offset + offset) & 7) == 4 && length >= 4) {
+ err = nfp_cpp_area_write(area, offset, &value, sizeof(value));
+ if (err < 0)
+ return err;
+ if (err != sizeof(value))
+ return NFP_ERRNO(ENOSPC);
+ offset += sizeof(value);
+ length -= sizeof(value);
+ }
+
+ for (i = 0; (i + sizeof(value)) < length; i += sizeof(value64)) {
+ err =
+ nfp_cpp_area_write(area, offset + i, &value64,
+ sizeof(value64));
+ if (err < 0)
+ return err;
+ if (err != sizeof(value64))
+ return NFP_ERRNO(ENOSPC);
+ }
+
+ if ((i + sizeof(value)) <= length) {
+ err =
+ nfp_cpp_area_write(area, offset + i, &value, sizeof(value));
+ if (err < 0)
+ return err;
+ if (err != sizeof(value))
+ return NFP_ERRNO(ENOSPC);
+ i += sizeof(value);
+ }
+
+ return (int)i;
+}
+
+/*
+ * NOTE: This code should not use nfp_xpb_* functions,
+ * as those are model-specific
+ */
+uint32_t
+__nfp_cpp_model_autodetect(struct nfp_cpp *cpp)
+{
+ uint32_t arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0);
+ uint32_t model = 0;
+
+ nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, &model);
+
+ if (NFP_CPP_MODEL_IS_6000(model)) {
+ uint32_t tmp;
+
+ nfp_cpp_model_set(cpp, model);
+
+ /* The PL's PluDeviceID revision code is authoratative */
+ model &= ~0xff;
+ nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) +
+ NFP_PL_DEVICE_ID, &tmp);
+ model |= (NFP_PL_DEVICE_ID_MASK & tmp) - 0x10;
+ }
+
+ return model;
+}
+
+/*
+ * nfp_cpp_map_area() - Helper function to map an area
+ * @cpp: NFP CPP handler
+ * @domain: CPP domain
+ * @target: CPP target
+ * @addr: CPP address
+ * @size: Size of the area
+ * @area: Area handle (output)
+ *
+ * Map an area of IOMEM access. To undo the effect of this function call
+ * @nfp_cpp_area_release_free(*area).
+ *
+ * Return: Pointer to memory mapped area or ERR_PTR
+ */
+uint8_t *
+nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, uint64_t addr,
+ unsigned long size, struct nfp_cpp_area **area)
+{
+ uint8_t *res;
+ uint32_t dest;
+
+ dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain);
+
+ *area = nfp_cpp_area_alloc_acquire(cpp, dest, addr, size);
+ if (!*area)
+ goto err_eio;
+
+ res = nfp_cpp_area_iomem(*area);
+ if (!res)
+ goto err_release_free;
+
+ return res;
+
+err_release_free:
+ nfp_cpp_area_release_free(*area);
+err_eio:
+ return NULL;
+}
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.c
new file mode 100644
index 00000000..20431bf8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+
+#include "nfp_crc.h"
+
+static inline uint32_t
+nfp_crc32_be_generic(uint32_t crc, unsigned char const *p, size_t len,
+ uint32_t polynomial)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 24;
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x80000000) ? polynomial :
+ 0);
+ }
+ return crc;
+}
+
+static inline uint32_t
+nfp_crc32_be(uint32_t crc, unsigned char const *p, size_t len)
+{
+ return nfp_crc32_be_generic(crc, p, len, CRCPOLY_BE);
+}
+
+static uint32_t
+nfp_crc32_posix_end(uint32_t crc, size_t total_len)
+{
+ /* Extend with the length of the string. */
+ while (total_len != 0) {
+ uint8_t c = total_len & 0xff;
+
+ crc = nfp_crc32_be(crc, &c, 1);
+ total_len >>= 8;
+ }
+
+ return ~crc;
+}
+
+uint32_t
+nfp_crc32_posix(const void *buff, size_t len)
+{
+ return nfp_crc32_posix_end(nfp_crc32_be(0, buff, len), len);
+}
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.h
new file mode 100644
index 00000000..f99c89fc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_CRC_H__
+#define __NFP_CRC_H__
+
+/*
+ * There are multiple 16-bit CRC polynomials in common use, but this is
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+#define CRCPOLY_LE 0xedb88320
+#define CRCPOLY_BE 0x04c11db7
+
+uint32_t nfp_crc32_posix(const void *buff, size_t len);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c
new file mode 100644
index 00000000..c0516bf8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM
+ * after chip reset.
+ *
+ * Examples of the fields:
+ * me.count = 40
+ * me.mask = 0x7f_ffff_ffff
+ *
+ * me.count is the total number of MEs on the system.
+ * me.mask is the bitmask of MEs that are available for application usage.
+ *
+ * (ie, in this example, ME 39 has been reserved by boardconfig.)
+ */
+
+#include <stdio.h>
+#include <time.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp_resource.h"
+#include "nfp_hwinfo.h"
+#include "nfp_crc.h"
+
+static int
+nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo)
+{
+ return hwinfo->version & NFP_HWINFO_VERSION_UPDATING;
+}
+
+static int
+nfp_hwinfo_db_walk(struct nfp_hwinfo *hwinfo, uint32_t size)
+{
+ const char *key, *val, *end = hwinfo->data + size;
+
+ for (key = hwinfo->data; *key && key < end;
+ key = val + strlen(val) + 1) {
+ val = key + strlen(key) + 1;
+ if (val >= end) {
+ printf("Bad HWINFO - overflowing key\n");
+ return -EINVAL;
+ }
+
+ if (val + strlen(val) + 1 > end) {
+ printf("Bad HWINFO - overflowing value\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int
+nfp_hwinfo_db_validate(struct nfp_hwinfo *db, uint32_t len)
+{
+ uint32_t size, new_crc, *crc;
+
+ size = db->size;
+ if (size > len) {
+ printf("Unsupported hwinfo size %u > %u\n", size, len);
+ return -EINVAL;
+ }
+
+ size -= sizeof(uint32_t);
+ new_crc = nfp_crc32_posix((char *)db, size);
+ crc = (uint32_t *)(db->start + size);
+ if (new_crc != *crc) {
+ printf("Corrupt hwinfo table (CRC mismatch)\n");
+ printf("\tcalculated 0x%x, expected 0x%x\n", new_crc, *crc);
+ return -EINVAL;
+ }
+
+ return nfp_hwinfo_db_walk(db, size);
+}
+
+static struct nfp_hwinfo *
+nfp_hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
+{
+ struct nfp_hwinfo *header;
+ void *res;
+ uint64_t cpp_addr;
+ uint32_t cpp_id;
+ int err;
+ uint8_t *db;
+
+ res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO);
+ if (res) {
+ cpp_id = nfp_resource_cpp_id(res);
+ cpp_addr = nfp_resource_address(res);
+ *cpp_size = nfp_resource_size(res);
+
+ nfp_resource_release(res);
+
+ if (*cpp_size < HWINFO_SIZE_MIN)
+ return NULL;
+ } else {
+ return NULL;
+ }
+
+ db = malloc(*cpp_size + 1);
+ if (!db)
+ return NULL;
+
+ err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size);
+ if (err != (int)*cpp_size)
+ goto exit_free;
+
+ header = (void *)db;
+ printf("NFP HWINFO header: %08x\n", *(uint32_t *)header);
+ if (nfp_hwinfo_is_updating(header))
+ goto exit_free;
+
+ if (header->version != NFP_HWINFO_VERSION_2) {
+ printf("Unknown HWInfo version: 0x%08x\n",
+ header->version);
+ goto exit_free;
+ }
+
+ /* NULL-terminate for safety */
+ db[*cpp_size] = '\0';
+
+ return (void *)db;
+exit_free:
+ free(db);
+ return NULL;
+}
+
+static struct nfp_hwinfo *
+nfp_hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
+{
+ struct timespec wait;
+ struct nfp_hwinfo *db;
+ int count;
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 10000000;
+ count = 0;
+
+ for (;;) {
+ db = nfp_hwinfo_try_fetch(cpp, hwdb_size);
+ if (db)
+ return db;
+
+ nanosleep(&wait, NULL);
+ if (count++ > 200) {
+ printf("NFP access error\n");
+ return NULL;
+ }
+ }
+}
+
+struct nfp_hwinfo *
+nfp_hwinfo_read(struct nfp_cpp *cpp)
+{
+ struct nfp_hwinfo *db;
+ size_t hwdb_size = 0;
+ int err;
+
+ db = nfp_hwinfo_fetch(cpp, &hwdb_size);
+ if (!db)
+ return NULL;
+
+ err = nfp_hwinfo_db_validate(db, hwdb_size);
+ if (err) {
+ free(db);
+ return NULL;
+ }
+ return db;
+}
+
+/*
+ * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name
+ * @hwinfo: NFP HWinfo table
+ * @lookup: HWInfo name to search for
+ *
+ * Return: Value of the HWInfo name, or NULL
+ */
+const char *
+nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup)
+{
+ const char *key, *val, *end;
+
+ if (!hwinfo || !lookup)
+ return NULL;
+
+ end = hwinfo->data + hwinfo->size - sizeof(uint32_t);
+
+ for (key = hwinfo->data; *key && key < end;
+ key = val + strlen(val) + 1) {
+ val = key + strlen(key) + 1;
+
+ if (strcmp(key, lookup) == 0)
+ return val;
+ }
+
+ return NULL;
+}
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.h
new file mode 100644
index 00000000..ccc61632
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_HWINFO_H__
+#define __NFP_HWINFO_H__
+
+#include <inttypes.h>
+
+#define HWINFO_SIZE_MIN 0x100
+
+/*
+ * The Hardware Info Table defines the properties of the system.
+ *
+ * HWInfo v1 Table (fixed size)
+ *
+ * 0x0000: uint32_t version Hardware Info Table version (1.0)
+ * 0x0004: uint32_t size Total size of the table, including the
+ * CRC32 (IEEE 802.3)
+ * 0x0008: uint32_t jumptab Offset of key/value table
+ * 0x000c: uint32_t keys Total number of keys in the key/value
+ * table
+ * NNNNNN: Key/value jump table and string data
+ * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc)
+ * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE
+ *
+ * HWInfo v2 Table (variable size)
+ *
+ * 0x0000: uint32_t version Hardware Info Table version (2.0)
+ * 0x0004: uint32_t size Current size of the data area, excluding
+ * CRC32
+ * 0x0008: uint32_t limit Maximum size of the table
+ * 0x000c: uint32_t reserved Unused, set to zero
+ * NNNNNN: Key/value data
+ * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc)
+ * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE
+ *
+ * If the HWInfo table is in the process of being updated, the low bit of
+ * version will be set.
+ *
+ * HWInfo v1 Key/Value Table
+ * -------------------------
+ *
+ * The key/value table is a set of offsets to ASCIIZ strings which have
+ * been strcmp(3) sorted (yes, please use bsearch(3) on the table).
+ *
+ * All keys are guaranteed to be unique.
+ *
+ * N+0: uint32_t key_1 Offset to the first key
+ * N+4: uint32_t val_1 Offset to the first value
+ * N+8: uint32_t key_2 Offset to the second key
+ * N+c: uint32_t val_2 Offset to the second value
+ * ...
+ *
+ * HWInfo v2 Key/Value Table
+ * -------------------------
+ *
+ * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000'
+ *
+ * Unsorted.
+ */
+
+#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0)
+#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0)
+#define NFP_HWINFO_VERSION_UPDATING BIT(0)
+
+struct nfp_hwinfo {
+ uint8_t start[0];
+
+ uint32_t version;
+ uint32_t size;
+
+ /* v2 specific fields */
+ uint32_t limit;
+ uint32_t resv;
+
+ char data[];
+};
+
+struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp);
+
+const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.c
new file mode 100644
index 00000000..c86966df
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.c
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <rte_byteorder.h>
+
+#include "nfp_cpp.h"
+#include "nfp_mip.h"
+#include "nfp_nffw.h"
+
+#define NFP_MIP_SIGNATURE rte_cpu_to_le_32(0x0050494d) /* "MIP\0" */
+#define NFP_MIP_VERSION rte_cpu_to_le_32(1)
+#define NFP_MIP_MAX_OFFSET (256 * 1024)
+
+struct nfp_mip {
+ uint32_t signature;
+ uint32_t mip_version;
+ uint32_t mip_size;
+ uint32_t first_entry;
+
+ uint32_t version;
+ uint32_t buildnum;
+ uint32_t buildtime;
+ uint32_t loadtime;
+
+ uint32_t symtab_addr;
+ uint32_t symtab_size;
+ uint32_t strtab_addr;
+ uint32_t strtab_size;
+
+ char name[16];
+ char toolchain[32];
+};
+
+/* Read memory and check if it could be a valid MIP */
+static int
+nfp_mip_try_read(struct nfp_cpp *cpp, uint32_t cpp_id, uint64_t addr,
+ struct nfp_mip *mip)
+{
+ int ret;
+
+ ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip));
+ if (ret != sizeof(*mip)) {
+ printf("Failed to read MIP data (%d, %zu)\n",
+ ret, sizeof(*mip));
+ return -EIO;
+ }
+ if (mip->signature != NFP_MIP_SIGNATURE) {
+ printf("Incorrect MIP signature (0x%08x)\n",
+ rte_le_to_cpu_32(mip->signature));
+ return -EINVAL;
+ }
+ if (mip->mip_version != NFP_MIP_VERSION) {
+ printf("Unsupported MIP version (%d)\n",
+ rte_le_to_cpu_32(mip->mip_version));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Try to locate MIP using the resource table */
+static int
+nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip)
+{
+ struct nfp_nffw_info *nffw_info;
+ uint32_t cpp_id;
+ uint64_t addr;
+ int err;
+
+ nffw_info = nfp_nffw_info_open(cpp);
+ if (!nffw_info)
+ return -ENODEV;
+
+ err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr);
+ if (err)
+ goto exit_close_nffw;
+
+ err = nfp_mip_try_read(cpp, cpp_id, addr, mip);
+exit_close_nffw:
+ nfp_nffw_info_close(nffw_info);
+ return err;
+}
+
+/*
+ * nfp_mip_open() - Get device MIP structure
+ * @cpp: NFP CPP Handle
+ *
+ * Copy MIP structure from NFP device and return it. The returned
+ * structure is handled internally by the library and should be
+ * freed by calling nfp_mip_close().
+ *
+ * Return: pointer to mip, NULL on failure.
+ */
+struct nfp_mip *
+nfp_mip_open(struct nfp_cpp *cpp)
+{
+ struct nfp_mip *mip;
+ int err;
+
+ mip = malloc(sizeof(*mip));
+ if (!mip)
+ return NULL;
+
+ err = nfp_mip_read_resource(cpp, mip);
+ if (err) {
+ free(mip);
+ return NULL;
+ }
+
+ mip->name[sizeof(mip->name) - 1] = 0;
+
+ return mip;
+}
+
+void
+nfp_mip_close(struct nfp_mip *mip)
+{
+ free(mip);
+}
+
+const char *
+nfp_mip_name(const struct nfp_mip *mip)
+{
+ return mip->name;
+}
+
+/*
+ * nfp_mip_symtab() - Get the address and size of the MIP symbol table
+ * @mip: MIP handle
+ * @addr: Location for NFP DDR address of MIP symbol table
+ * @size: Location for size of MIP symbol table
+ */
+void
+nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size)
+{
+ *addr = rte_le_to_cpu_32(mip->symtab_addr);
+ *size = rte_le_to_cpu_32(mip->symtab_size);
+}
+
+/*
+ * nfp_mip_strtab() - Get the address and size of the MIP symbol name table
+ * @mip: MIP handle
+ * @addr: Location for NFP DDR address of MIP symbol name table
+ * @size: Location for size of MIP symbol name table
+ */
+void
+nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size)
+{
+ *addr = rte_le_to_cpu_32(mip->strtab_addr);
+ *size = rte_le_to_cpu_32(mip->strtab_size);
+}
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.h
new file mode 100644
index 00000000..d0919b58
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_MIP_H__
+#define __NFP_MIP_H__
+
+#include "nfp_nffw.h"
+
+struct nfp_mip;
+
+struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp);
+void nfp_mip_close(struct nfp_mip *mip);
+
+const char *nfp_mip_name(const struct nfp_mip *mip);
+void nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size);
+void nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size);
+int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id,
+ uint64_t *off);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c
new file mode 100644
index 00000000..318c5800
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c
@@ -0,0 +1,424 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <errno.h>
+
+#include <malloc.h>
+#include <time.h>
+#include <sched.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+
+#define MUTEX_LOCKED(interface) ((((uint32_t)(interface)) << 16) | 0x000f)
+#define MUTEX_UNLOCK(interface) (0 | 0x0000)
+
+#define MUTEX_IS_LOCKED(value) (((value) & 0xffff) == 0x000f)
+#define MUTEX_IS_UNLOCKED(value) (((value) & 0xffff) == 0x0000)
+#define MUTEX_INTERFACE(value) (((value) >> 16) & 0xffff)
+
+/*
+ * If you need more than 65536 recursive locks, please
+ * rethink your code.
+ */
+#define MUTEX_DEPTH_MAX 0xffff
+
+struct nfp_cpp_mutex {
+ struct nfp_cpp *cpp;
+ uint8_t target;
+ uint16_t depth;
+ unsigned long long address;
+ uint32_t key;
+ unsigned int usage;
+ struct nfp_cpp_mutex *prev, *next;
+};
+
+static int
+_nfp_cpp_mutex_validate(uint32_t model, int *target, unsigned long long address)
+{
+ /* Address must be 64-bit aligned */
+ if (address & 7)
+ return NFP_ERRNO(EINVAL);
+
+ if (NFP_CPP_MODEL_IS_6000(model)) {
+ if (*target != NFP_CPP_TARGET_MU)
+ return NFP_ERRNO(EINVAL);
+ } else {
+ return NFP_ERRNO(EINVAL);
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize a mutex location
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * will initialize 64 bits of data at the location.
+ *
+ * This creates the initial mutex state, as locked by this
+ * nfp_cpp_interface().
+ *
+ * This function should only be called when setting up
+ * the initial lock state upon boot-up of the system.
+ *
+ * @param mutex NFP CPP Mutex handle
+ * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or
+ * NFP_CPP_TARGET_MU)
+ * @param address Offset into the address space of the NFP CPP target ID
+ * @param key Unique 32-bit value for this mutex
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, unsigned long long address,
+ uint32_t key)
+{
+ uint32_t model = nfp_cpp_model(cpp);
+ uint32_t muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
+ int err;
+
+ err = _nfp_cpp_mutex_validate(model, &target, address);
+ if (err < 0)
+ return err;
+
+ err = nfp_cpp_writel(cpp, muw, address + 4, key);
+ if (err < 0)
+ return err;
+
+ err =
+ nfp_cpp_writel(cpp, muw, address + 0,
+ MUTEX_LOCKED(nfp_cpp_interface(cpp)));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Create a mutex handle from an address controlled by a MU Atomic engine
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * reserve 64 bits of data at the location for use by the handle.
+ *
+ * Only target/address pairs that point to entities that support the
+ * MU Atomic Engine are supported.
+ *
+ * @param cpp NFP CPP handle
+ * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or
+ * NFP_CPP_TARGET_MU)
+ * @param address Offset into the address space of the NFP CPP target ID
+ * @param key 32-bit unique key (must match the key at this location)
+ *
+ * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
+ */
+struct nfp_cpp_mutex *
+nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+ unsigned long long address, uint32_t key)
+{
+ uint32_t model = nfp_cpp_model(cpp);
+ struct nfp_cpp_mutex *mutex;
+ uint32_t mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
+ int err;
+ uint32_t tmp;
+
+ /* Look for cached mutex */
+ for (mutex = cpp->mutex_cache; mutex; mutex = mutex->next) {
+ if (mutex->target == target && mutex->address == address)
+ break;
+ }
+
+ if (mutex) {
+ if (mutex->key == key) {
+ mutex->usage++;
+ return mutex;
+ }
+
+ /* If the key doesn't match... */
+ return NFP_ERRPTR(EEXIST);
+ }
+
+ err = _nfp_cpp_mutex_validate(model, &target, address);
+ if (err < 0)
+ return NULL;
+
+ err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
+ if (err < 0)
+ return NULL;
+
+ if (tmp != key)
+ return NFP_ERRPTR(EEXIST);
+
+ mutex = calloc(sizeof(*mutex), 1);
+ if (!mutex)
+ return NFP_ERRPTR(ENOMEM);
+
+ mutex->cpp = cpp;
+ mutex->target = target;
+ mutex->address = address;
+ mutex->key = key;
+ mutex->depth = 0;
+ mutex->usage = 1;
+
+ /* Add mutex to the cache */
+ if (cpp->mutex_cache) {
+ cpp->mutex_cache->prev = mutex;
+ mutex->next = cpp->mutex_cache;
+ cpp->mutex_cache = mutex;
+ } else {
+ cpp->mutex_cache = mutex;
+ }
+
+ return mutex;
+}
+
+struct nfp_cpp *
+nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex)
+{
+ return mutex->cpp;
+}
+
+uint32_t
+nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex)
+{
+ return mutex->key;
+}
+
+uint16_t
+nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex)
+{
+ uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ uint32_t value, key;
+ int err;
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
+ if (err < 0)
+ return err;
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ return err;
+
+ if (key != mutex->key)
+ return NFP_ERRNO(EPERM);
+
+ if (!MUTEX_IS_LOCKED(value))
+ return 0;
+
+ return MUTEX_INTERFACE(value);
+}
+
+int
+nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex)
+{
+ return mutex->target;
+}
+
+uint64_t
+nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex)
+{
+ return mutex->address;
+}
+
+/*
+ * Free a mutex handle - does not alter the lock state
+ *
+ * @param mutex NFP CPP Mutex handle
+ */
+void
+nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
+{
+ mutex->usage--;
+ if (mutex->usage > 0)
+ return;
+
+ /* Remove mutex from the cache */
+ if (mutex->next)
+ mutex->next->prev = mutex->prev;
+ if (mutex->prev)
+ mutex->prev->next = mutex->next;
+
+ /* If mutex->cpp == NULL, something broke */
+ if (mutex->cpp && mutex == mutex->cpp->mutex_cache)
+ mutex->cpp->mutex_cache = mutex->next;
+
+ free(mutex);
+}
+
+/*
+ * Lock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
+{
+ int err;
+ time_t warn_at = time(NULL) + 15;
+
+ while ((err = nfp_cpp_mutex_trylock(mutex)) != 0) {
+ /* If errno != EBUSY, then the lock was damaged */
+ if (err < 0 && errno != EBUSY)
+ return err;
+ if (time(NULL) >= warn_at) {
+ printf("Warning: waiting for NFP mutex\n");
+ printf("\tusage:%u\n", mutex->usage);
+ printf("\tdepth:%hd]\n", mutex->depth);
+ printf("\ttarget:%d\n", mutex->target);
+ printf("\taddr:%llx\n", mutex->address);
+ printf("\tkey:%08x]\n", mutex->key);
+ warn_at = time(NULL) + 60;
+ }
+ sched_yield();
+ }
+ return 0;
+}
+
+/*
+ * Unlock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
+{
+ uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
+ uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ struct nfp_cpp *cpp = mutex->cpp;
+ uint32_t key, value;
+ uint16_t interface = nfp_cpp_interface(cpp);
+ int err;
+
+ if (mutex->depth > 1) {
+ mutex->depth--;
+ return 0;
+ }
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
+ if (err < 0)
+ goto exit;
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ goto exit;
+
+ if (key != mutex->key) {
+ err = NFP_ERRNO(EPERM);
+ goto exit;
+ }
+
+ if (value != MUTEX_LOCKED(interface)) {
+ err = NFP_ERRNO(EACCES);
+ goto exit;
+ }
+
+ err = nfp_cpp_writel(cpp, muw, mutex->address, MUTEX_UNLOCK(interface));
+ if (err < 0)
+ goto exit;
+
+ mutex->depth = 0;
+
+exit:
+ return err;
+}
+
+/*
+ * Attempt to lock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * Valid lock states:
+ *
+ * 0x....0000 - Unlocked
+ * 0x....000f - Locked
+ *
+ * @param mutex NFP CPP Mutex handle
+ * @return 0 if the lock succeeded, -1 on failure (and errno set
+ * appropriately).
+ */
+int
+nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
+{
+ uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
+ uint32_t mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */
+ uint32_t key, value, tmp;
+ struct nfp_cpp *cpp = mutex->cpp;
+ int err;
+
+ if (mutex->depth > 0) {
+ if (mutex->depth == MUTEX_DEPTH_MAX)
+ return NFP_ERRNO(E2BIG);
+
+ mutex->depth++;
+ return 0;
+ }
+
+ /* Verify that the lock marker is not damaged */
+ err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ goto exit;
+
+ if (key != mutex->key) {
+ err = NFP_ERRNO(EPERM);
+ goto exit;
+ }
+
+ /*
+ * Compare against the unlocked state, and if true,
+ * write the interface id into the top 16 bits, and
+ * mark as locked.
+ */
+ value = MUTEX_LOCKED(nfp_cpp_interface(cpp));
+
+ /*
+ * We use test_set_imm here, as it implies a read
+ * of the current state, and sets the bits in the
+ * bytemask of the command to 1s. Since the mutex
+ * is guaranteed to be 64-bit aligned, the bytemask
+ * of this 32-bit command is ensured to be 8'b00001111,
+ * which implies that the lower 4 bits will be set to
+ * ones regardless of the initial state.
+ *
+ * Since this is a 'Readback' operation, with no Pull
+ * data, we can treat this as a normal Push (read)
+ * atomic, which returns the original value.
+ */
+ err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
+ if (err < 0)
+ goto exit;
+
+ /* Was it unlocked? */
+ if (MUTEX_IS_UNLOCKED(tmp)) {
+ /*
+ * The read value can only be 0x....0000 in the unlocked state.
+ * If there was another contending for this lock, then
+ * the lock state would be 0x....000f
+ *
+ * Write our owner ID into the lock
+ * While not strictly necessary, this helps with
+ * debug and bookkeeping.
+ */
+ err = nfp_cpp_writel(cpp, muw, mutex->address, value);
+ if (err < 0)
+ goto exit;
+
+ mutex->depth = 1;
+ goto exit;
+ }
+
+ /* Already locked by us? Success! */
+ if (tmp == value) {
+ mutex->depth = 1;
+ goto exit;
+ }
+
+ err = NFP_ERRNO(MUTEX_IS_LOCKED(tmp) ? EBUSY : EINVAL);
+
+exit:
+ return err;
+}
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.c
new file mode 100644
index 00000000..8bec0e3c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.c
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include "nfp_cpp.h"
+#include "nfp_nffw.h"
+#include "nfp_mip.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp_resource.h"
+
+/*
+ * flg_info_version = flags[0]<27:16>
+ * This is a small version counter intended only to detect if the current
+ * implementation can read the current struct. Struct changes should be very
+ * rare and as such a 12-bit counter should cover large spans of time. By the
+ * time it wraps around, we don't expect to have 4096 versions of this struct
+ * to be in use at the same time.
+ */
+static uint32_t
+nffw_res_info_version_get(const struct nfp_nffw_info_data *res)
+{
+ return (res->flags[0] >> 16) & 0xfff;
+}
+
+/* flg_init = flags[0]<0> */
+static uint32_t
+nffw_res_flg_init_get(const struct nfp_nffw_info_data *res)
+{
+ return (res->flags[0] >> 0) & 1;
+}
+
+/* loaded = loaded__mu_da__mip_off_hi<31:31> */
+static uint32_t
+nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi)
+{
+ return (fi->loaded__mu_da__mip_off_hi >> 31) & 1;
+}
+
+/* mip_cppid = mip_cppid */
+static uint32_t
+nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi)
+{
+ return fi->mip_cppid;
+}
+
+/* loaded = loaded__mu_da__mip_off_hi<8:8> */
+static uint32_t
+nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi)
+{
+ return (fi->loaded__mu_da__mip_off_hi >> 8) & 1;
+}
+
+/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */
+static uint64_t
+nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi)
+{
+ uint64_t mip_off_hi = fi->loaded__mu_da__mip_off_hi;
+
+ return (mip_off_hi & 0xFF) << 32 | fi->mip_offset_lo;
+}
+
+#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7)
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12)
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12)
+
+static int
+nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp)
+{
+ unsigned int mode, addr40;
+ uint32_t xpbaddr, imbcppat;
+ int err;
+
+ /* Hardcoded XPB IMB Base, island 0 */
+ xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4;
+ err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat);
+ if (err < 0)
+ return err;
+
+ mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat);
+ addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE);
+
+ return nfp_cppat_mu_locality_lsb(mode, addr40);
+}
+
+static unsigned int
+nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr)
+{
+ /*
+ * For the this code, version 0 is most likely to be version 1 in this
+ * case. Since the kernel driver does not take responsibility for
+ * initialising the nfp.nffw resource, any previous code (CA firmware or
+ * userspace) that left the version 0 and did set the init flag is going
+ * to be version 1.
+ */
+ switch (nffw_res_info_version_get(fwinf)) {
+ case 0:
+ case 1:
+ *arr = &fwinf->info.v1.fwinfo[0];
+ return NFFW_FWINFO_CNT_V1;
+ case 2:
+ *arr = &fwinf->info.v2.fwinfo[0];
+ return NFFW_FWINFO_CNT_V2;
+ default:
+ *arr = NULL;
+ return 0;
+ }
+}
+
+/*
+ * nfp_nffw_info_open() - Acquire the lock on the NFFW table
+ * @cpp: NFP CPP handle
+ *
+ * Return: 0, or -ERRNO
+ */
+struct nfp_nffw_info *
+nfp_nffw_info_open(struct nfp_cpp *cpp)
+{
+ struct nfp_nffw_info_data *fwinf;
+ struct nfp_nffw_info *state;
+ uint32_t info_ver;
+ int err;
+
+ state = malloc(sizeof(*state));
+ if (!state)
+ return NULL;
+
+ memset(state, 0, sizeof(*state));
+
+ state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW);
+ if (!state->res)
+ goto err_free;
+
+ fwinf = &state->fwinf;
+
+ if (sizeof(*fwinf) > nfp_resource_size(state->res))
+ goto err_release;
+
+ err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
+ nfp_resource_address(state->res),
+ fwinf, sizeof(*fwinf));
+ if (err < (int)sizeof(*fwinf))
+ goto err_release;
+
+ if (!nffw_res_flg_init_get(fwinf))
+ goto err_release;
+
+ info_ver = nffw_res_info_version_get(fwinf);
+ if (info_ver > NFFW_INFO_VERSION_CURRENT)
+ goto err_release;
+
+ state->cpp = cpp;
+ return state;
+
+err_release:
+ nfp_resource_release(state->res);
+err_free:
+ free(state);
+ return NULL;
+}
+
+/*
+ * nfp_nffw_info_release() - Release the lock on the NFFW table
+ * @state: NFP FW info state
+ *
+ * Return: 0, or -ERRNO
+ */
+void
+nfp_nffw_info_close(struct nfp_nffw_info *state)
+{
+ nfp_resource_release(state->res);
+ free(state);
+}
+
+/*
+ * nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW
+ * @state: NFP FW info state
+ *
+ * Return: First NFFW firmware info, NULL on failure
+ */
+static struct nffw_fwinfo *
+nfp_nffw_info_fwid_first(struct nfp_nffw_info *state)
+{
+ struct nffw_fwinfo *fwinfo;
+ unsigned int cnt, i;
+
+ cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo);
+ if (!cnt)
+ return NULL;
+
+ for (i = 0; i < cnt; i++)
+ if (nffw_fwinfo_loaded_get(&fwinfo[i]))
+ return &fwinfo[i];
+
+ return NULL;
+}
+
+/*
+ * nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP
+ * @state: NFP FW info state
+ * @cpp_id: Pointer to the CPP ID of the MIP
+ * @off: Pointer to the CPP Address of the MIP
+ *
+ * Return: 0, or -ERRNO
+ */
+int
+nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id,
+ uint64_t *off)
+{
+ struct nffw_fwinfo *fwinfo;
+
+ fwinfo = nfp_nffw_info_fwid_first(state);
+ if (!fwinfo)
+ return -EINVAL;
+
+ *cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo);
+ *off = nffw_fwinfo_mip_offset_get(fwinfo);
+
+ if (nffw_fwinfo_mip_mu_da_get(fwinfo)) {
+ int locality_off;
+
+ if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU)
+ return 0;
+
+ locality_off = nfp_mip_mu_locality_lsb(state->cpp);
+ if (locality_off < 0)
+ return locality_off;
+
+ *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off);
+ *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off;
+ }
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.h
new file mode 100644
index 00000000..3bbdf1c1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_NFFW_H__
+#define __NFP_NFFW_H__
+
+#include "nfp-common/nfp_platform.h"
+#include "nfp_cpp.h"
+
+/*
+ * Init-CSR owner IDs for firmware map to firmware IDs which start at 4.
+ * Lower IDs are reserved for target and loader IDs.
+ */
+#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */
+#define NFFW_FWID_BASE 4
+
+#define NFFW_FWID_ALL 255
+
+/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4.
+ * Lower IDs are reserved for target and loader IDs.
+ */
+#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */
+#define NFFW_FWID_BASE 4
+
+#define NFFW_FWID_ALL 255
+
+/**
+ * NFFW_INFO_VERSION history:
+ * 0: This was never actually used (before versioning), but it refers to
+ * the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later
+ * changed to 200.
+ * 1: First versioned struct, with
+ * FWINFO_CNT = 120
+ * MEINFO_CNT = 120
+ * 2: FWINFO_CNT = 200
+ * MEINFO_CNT = 200
+ */
+#define NFFW_INFO_VERSION_CURRENT 2
+
+/* Enough for all current chip families */
+#define NFFW_MEINFO_CNT_V1 120
+#define NFFW_FWINFO_CNT_V1 120
+#define NFFW_MEINFO_CNT_V2 200
+#define NFFW_FWINFO_CNT_V2 200
+
+struct nffw_meinfo {
+ uint32_t ctxmask__fwid__meid;
+};
+
+struct nffw_fwinfo {
+ uint32_t loaded__mu_da__mip_off_hi;
+ uint32_t mip_cppid; /* 0 means no MIP */
+ uint32_t mip_offset_lo;
+};
+
+struct nfp_nffw_info_v1 {
+ struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1];
+ struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1];
+};
+
+struct nfp_nffw_info_v2 {
+ struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2];
+ struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2];
+};
+
+struct nfp_nffw_info_data {
+ uint32_t flags[2];
+ union {
+ struct nfp_nffw_info_v1 v1;
+ struct nfp_nffw_info_v2 v2;
+ } info;
+};
+
+struct nfp_nffw_info {
+ struct nfp_cpp *cpp;
+ struct nfp_resource *res;
+
+ struct nfp_nffw_info_data fwinf;
+};
+
+struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp);
+void nfp_nffw_info_close(struct nfp_nffw_info *state);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.c
new file mode 100644
index 00000000..876a4017
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.c
@@ -0,0 +1,427 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#define NFP_SUBSYS "nfp_nsp"
+
+#include <stdio.h>
+#include <time.h>
+
+#include <rte_common.h>
+
+#include "nfp_cpp.h"
+#include "nfp_nsp.h"
+#include "nfp_resource.h"
+
+int
+nfp_nsp_config_modified(struct nfp_nsp *state)
+{
+ return state->modified;
+}
+
+void
+nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified)
+{
+ state->modified = modified;
+}
+
+void *
+nfp_nsp_config_entries(struct nfp_nsp *state)
+{
+ return state->entries;
+}
+
+unsigned int
+nfp_nsp_config_idx(struct nfp_nsp *state)
+{
+ return state->idx;
+}
+
+void
+nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx)
+{
+ state->entries = entries;
+ state->idx = idx;
+}
+
+void
+nfp_nsp_config_clear_state(struct nfp_nsp *state)
+{
+ state->entries = NULL;
+ state->idx = 0;
+}
+
+static void
+nfp_nsp_print_extended_error(uint32_t ret_val)
+{
+ int i;
+
+ if (!ret_val)
+ return;
+
+ for (i = 0; i < (int)ARRAY_SIZE(nsp_errors); i++)
+ if (ret_val == (uint32_t)nsp_errors[i].code)
+ printf("err msg: %s\n", nsp_errors[i].msg);
+}
+
+static int
+nfp_nsp_check(struct nfp_nsp *state)
+{
+ struct nfp_cpp *cpp = state->cpp;
+ uint64_t nsp_status, reg;
+ uint32_t nsp_cpp;
+ int err;
+
+ nsp_cpp = nfp_resource_cpp_id(state->res);
+ nsp_status = nfp_resource_address(state->res) + NSP_STATUS;
+
+ err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, &reg);
+ if (err < 0)
+ return err;
+
+ if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) {
+ printf("Cannot detect NFP Service Processor\n");
+ return -ENODEV;
+ }
+
+ state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg);
+ state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg);
+
+ if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) {
+ printf("Unsupported ABI %hu.%hu\n", state->ver.major,
+ state->ver.minor);
+ return -EINVAL;
+ }
+
+ if (reg & NSP_STATUS_BUSY) {
+ printf("Service processor busy!\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/*
+ * nfp_nsp_open() - Prepare for communication and lock the NSP resource.
+ * @cpp: NFP CPP Handle
+ */
+struct nfp_nsp *
+nfp_nsp_open(struct nfp_cpp *cpp)
+{
+ struct nfp_resource *res;
+ struct nfp_nsp *state;
+ int err;
+
+ res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP);
+ if (!res)
+ return NULL;
+
+ state = malloc(sizeof(*state));
+ if (!state) {
+ nfp_resource_release(res);
+ return NULL;
+ }
+ memset(state, 0, sizeof(*state));
+ state->cpp = cpp;
+ state->res = res;
+
+ err = nfp_nsp_check(state);
+ if (err) {
+ nfp_nsp_close(state);
+ return NULL;
+ }
+
+ return state;
+}
+
+/*
+ * nfp_nsp_close() - Clean up and unlock the NSP resource.
+ * @state: NFP SP state
+ */
+void
+nfp_nsp_close(struct nfp_nsp *state)
+{
+ nfp_resource_release(state->res);
+ free(state);
+}
+
+uint16_t
+nfp_nsp_get_abi_ver_major(struct nfp_nsp *state)
+{
+ return state->ver.major;
+}
+
+uint16_t
+nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state)
+{
+ return state->ver.minor;
+}
+
+static int
+nfp_nsp_wait_reg(struct nfp_cpp *cpp, uint64_t *reg, uint32_t nsp_cpp,
+ uint64_t addr, uint64_t mask, uint64_t val)
+{
+ struct timespec wait;
+ int count;
+ int err;
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 25000000;
+ count = 0;
+
+ for (;;) {
+ err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg);
+ if (err < 0)
+ return err;
+
+ if ((*reg & mask) == val)
+ return 0;
+
+ nanosleep(&wait, 0);
+ if (count++ > 1000)
+ return -ETIMEDOUT;
+ }
+}
+
+/*
+ * nfp_nsp_command() - Execute a command on the NFP Service Processor
+ * @state: NFP SP state
+ * @code: NFP SP Command Code
+ * @option: NFP SP Command Argument
+ * @buff_cpp: NFP SP Buffer CPP Address info
+ * @buff_addr: NFP SP Buffer Host address
+ *
+ * Return: 0 for success with no result
+ *
+ * positive value for NSP completion with a result code
+ *
+ * -EAGAIN if the NSP is not yet present
+ * -ENODEV if the NSP is not a supported model
+ * -EBUSY if the NSP is stuck
+ * -EINTR if interrupted while waiting for completion
+ * -ETIMEDOUT if the NSP took longer than 30 seconds to complete
+ */
+static int
+nfp_nsp_command(struct nfp_nsp *state, uint16_t code, uint32_t option,
+ uint32_t buff_cpp, uint64_t buff_addr)
+{
+ uint64_t reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command;
+ struct nfp_cpp *cpp = state->cpp;
+ uint32_t nsp_cpp;
+ int err;
+
+ nsp_cpp = nfp_resource_cpp_id(state->res);
+ nsp_base = nfp_resource_address(state->res);
+ nsp_status = nsp_base + NSP_STATUS;
+ nsp_command = nsp_base + NSP_COMMAND;
+ nsp_buffer = nsp_base + NSP_BUFFER;
+
+ err = nfp_nsp_check(state);
+ if (err)
+ return err;
+
+ if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) ||
+ !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) {
+ printf("Host buffer out of reach %08x %" PRIx64 "\n",
+ buff_cpp, buff_addr);
+ return -EINVAL;
+ }
+
+ err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer,
+ FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) |
+ FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr));
+ if (err < 0)
+ return err;
+
+ err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command,
+ FIELD_PREP(NSP_COMMAND_OPTION, option) |
+ FIELD_PREP(NSP_COMMAND_CODE, code) |
+ FIELD_PREP(NSP_COMMAND_START, 1));
+ if (err < 0)
+ return err;
+
+ /* Wait for NSP_COMMAND_START to go to 0 */
+ err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_command,
+ NSP_COMMAND_START, 0);
+ if (err) {
+ printf("Error %d waiting for code 0x%04x to start\n",
+ err, code);
+ return err;
+ }
+
+ /* Wait for NSP_STATUS_BUSY to go to 0 */
+ err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_status, NSP_STATUS_BUSY,
+ 0);
+ if (err) {
+ printf("Error %d waiting for code 0x%04x to complete\n",
+ err, code);
+ return err;
+ }
+
+ err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val);
+ if (err < 0)
+ return err;
+ ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val);
+
+ err = FIELD_GET(NSP_STATUS_RESULT, reg);
+ if (err) {
+ printf("Result (error) code set: %d (%d) command: %d\n",
+ -err, (int)ret_val, code);
+ nfp_nsp_print_extended_error(ret_val);
+ return -err;
+ }
+
+ return ret_val;
+}
+
+#define SZ_1M 0x00100000
+
+static int
+nfp_nsp_command_buf(struct nfp_nsp *nsp, uint16_t code, uint32_t option,
+ const void *in_buf, unsigned int in_size, void *out_buf,
+ unsigned int out_size)
+{
+ struct nfp_cpp *cpp = nsp->cpp;
+ unsigned int max_size;
+ uint64_t reg, cpp_buf;
+ int ret, err;
+ uint32_t cpp_id;
+
+ if (nsp->ver.minor < 13) {
+ printf("NSP: Code 0x%04x with buffer not supported\n", code);
+ printf("\t(ABI %hu.%hu)\n", nsp->ver.major, nsp->ver.minor);
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+ nfp_resource_address(nsp->res) +
+ NSP_DFLT_BUFFER_CONFIG,
+ &reg);
+ if (err < 0)
+ return err;
+
+ max_size = RTE_MAX(in_size, out_size);
+ if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) {
+ printf("NSP: default buffer too small for command 0x%04x\n",
+ code);
+ printf("\t(%llu < %u)\n",
+ FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M,
+ max_size);
+ return -EINVAL;
+ }
+
+ err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+ nfp_resource_address(nsp->res) +
+ NSP_DFLT_BUFFER,
+ &reg);
+ if (err < 0)
+ return err;
+
+ cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8;
+ cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg);
+
+ if (in_buf && in_size) {
+ err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size);
+ if (err < 0)
+ return err;
+ }
+ /* Zero out remaining part of the buffer */
+ if (out_buf && out_size && out_size > in_size) {
+ memset(out_buf, 0, out_size - in_size);
+ err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size, out_buf,
+ out_size - in_size);
+ if (err < 0)
+ return err;
+ }
+
+ ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
+ if (ret < 0)
+ return ret;
+
+ if (out_buf && out_size) {
+ err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size);
+ if (err < 0)
+ return err;
+ }
+
+ return ret;
+}
+
+int
+nfp_nsp_wait(struct nfp_nsp *state)
+{
+ struct timespec wait;
+ int count;
+ int err;
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 25000000;
+ count = 0;
+
+ for (;;) {
+ err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0);
+ if (err != -EAGAIN)
+ break;
+
+ nanosleep(&wait, 0);
+
+ if (count++ > 1000) {
+ err = -ETIMEDOUT;
+ break;
+ }
+ }
+ if (err)
+ printf("NSP failed to respond %d\n", err);
+
+ return err;
+}
+
+int
+nfp_nsp_device_soft_reset(struct nfp_nsp *state)
+{
+ return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
+}
+
+int
+nfp_nsp_mac_reinit(struct nfp_nsp *state)
+{
+ return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0);
+}
+
+int
+nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, size, buf, size,
+ NULL, 0);
+}
+
+int
+nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0,
+ buf, size);
+}
+
+int
+nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf,
+ unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size,
+ NULL, 0);
+}
+
+int
+nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0,
+ buf, size);
+}
+
+int
+nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, void *buf,
+ unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask, NULL,
+ 0, buf, size);
+}
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h
new file mode 100644
index 00000000..c9c7b0d0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef NSP_NSP_H
+#define NSP_NSP_H 1
+
+#include "nfp_cpp.h"
+#include "nfp_nsp.h"
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (64 - 1 - (h))))
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define FIELD_GET(_mask, _reg) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \
+ }))
+
+#define FIELD_FIT(_mask, _val) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \
+ }))
+
+#define FIELD_PREP(_mask, _val) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \
+ }))
+
+/* Offsets relative to the CSR base */
+#define NSP_STATUS 0x00
+#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48)
+#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44)
+#define NSP_STATUS_MINOR GENMASK_ULL(43, 32)
+#define NSP_STATUS_CODE GENMASK_ULL(31, 16)
+#define NSP_STATUS_RESULT GENMASK_ULL(15, 8)
+#define NSP_STATUS_BUSY BIT_ULL(0)
+
+#define NSP_COMMAND 0x08
+#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32)
+#define NSP_COMMAND_CODE GENMASK_ULL(31, 16)
+#define NSP_COMMAND_START BIT_ULL(0)
+
+/* CPP address to retrieve the data from */
+#define NSP_BUFFER 0x10
+#define NSP_BUFFER_CPP GENMASK_ULL(63, 40)
+#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38)
+#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0)
+
+#define NSP_DFLT_BUFFER 0x18
+
+#define NSP_DFLT_BUFFER_CONFIG 0x20
+#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0)
+
+#define NSP_MAGIC 0xab10
+#define NSP_MAJOR 0
+#define NSP_MINOR 8
+
+#define NSP_CODE_MAJOR GENMASK(15, 12)
+#define NSP_CODE_MINOR GENMASK(11, 0)
+
+enum nfp_nsp_cmd {
+ SPCODE_NOOP = 0, /* No operation */
+ SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */
+ SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */
+ SPCODE_PHY_INIT = 3, /* Initialize the PHY */
+ SPCODE_MAC_INIT = 4, /* Initialize the MAC */
+ SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */
+ SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
+ SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
+ SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
+ SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */
+ SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
+};
+
+static const struct {
+ int code;
+ const char *msg;
+} nsp_errors[] = {
+ { 6010, "could not map to phy for port" },
+ { 6011, "not an allowed rate/lanes for port" },
+ { 6012, "not an allowed rate/lanes for port" },
+ { 6013, "high/low error, change other port first" },
+ { 6014, "config not found in flash" },
+};
+
+struct nfp_nsp {
+ struct nfp_cpp *cpp;
+ struct nfp_resource *res;
+ struct {
+ uint16_t major;
+ uint16_t minor;
+ } ver;
+
+ /* Eth table config state */
+ int modified;
+ unsigned int idx;
+ void *entries;
+};
+
+struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp);
+void nfp_nsp_close(struct nfp_nsp *state);
+uint16_t nfp_nsp_get_abi_ver_major(struct nfp_nsp *state);
+uint16_t nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
+int nfp_nsp_wait(struct nfp_nsp *state);
+int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
+int nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_mac_reinit(struct nfp_nsp *state);
+int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask,
+ void *buf, unsigned int size);
+
+static inline int nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 20;
+}
+
+enum nfp_eth_interface {
+ NFP_INTERFACE_NONE = 0,
+ NFP_INTERFACE_SFP = 1,
+ NFP_INTERFACE_SFPP = 10,
+ NFP_INTERFACE_SFP28 = 28,
+ NFP_INTERFACE_QSFP = 40,
+ NFP_INTERFACE_CXP = 100,
+ NFP_INTERFACE_QSFP28 = 112,
+};
+
+enum nfp_eth_media {
+ NFP_MEDIA_DAC_PASSIVE = 0,
+ NFP_MEDIA_DAC_ACTIVE,
+ NFP_MEDIA_FIBRE,
+};
+
+enum nfp_eth_aneg {
+ NFP_ANEG_AUTO = 0,
+ NFP_ANEG_SEARCH,
+ NFP_ANEG_25G_CONSORTIUM,
+ NFP_ANEG_25G_IEEE,
+ NFP_ANEG_DISABLED,
+};
+
+enum nfp_eth_fec {
+ NFP_FEC_AUTO_BIT = 0,
+ NFP_FEC_BASER_BIT,
+ NFP_FEC_REED_SOLOMON_BIT,
+ NFP_FEC_DISABLED_BIT,
+};
+
+#define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT)
+#define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT)
+#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT)
+#define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT)
+
+#define ETH_ALEN 6
+
+/**
+ * struct nfp_eth_table - ETH table information
+ * @count: number of table entries
+ * @max_index: max of @index fields of all @ports
+ * @ports: table of ports
+ *
+ * @eth_index: port index according to legacy ethX numbering
+ * @index: chip-wide first channel index
+ * @nbi: NBI index
+ * @base: first channel index (within NBI)
+ * @lanes: number of channels
+ * @speed: interface speed (in Mbps)
+ * @interface: interface (module) plugged in
+ * @media: media type of the @interface
+ * @fec: forward error correction mode
+ * @aneg: auto negotiation mode
+ * @mac_addr: interface MAC address
+ * @label_port: port id
+ * @label_subport: id of interface within port (for split ports)
+ * @enabled: is enabled?
+ * @tx_enabled: is TX enabled?
+ * @rx_enabled: is RX enabled?
+ * @override_changed: is media reconfig pending?
+ *
+ * @port_type: one of %PORT_* defines for ethtool
+ * @port_lanes: total number of lanes on the port (sum of lanes of all subports)
+ * @is_split: is interface part of a split port
+ * @fec_modes_supported: bitmap of FEC modes supported
+ */
+struct nfp_eth_table {
+ unsigned int count;
+ unsigned int max_index;
+ struct nfp_eth_table_port {
+ unsigned int eth_index;
+ unsigned int index;
+ unsigned int nbi;
+ unsigned int base;
+ unsigned int lanes;
+ unsigned int speed;
+
+ unsigned int interface;
+ enum nfp_eth_media media;
+
+ enum nfp_eth_fec fec;
+ enum nfp_eth_aneg aneg;
+
+ uint8_t mac_addr[ETH_ALEN];
+
+ uint8_t label_port;
+ uint8_t label_subport;
+
+ int enabled;
+ int tx_enabled;
+ int rx_enabled;
+
+ int override_changed;
+
+ /* Computed fields */
+ uint8_t port_type;
+
+ unsigned int port_lanes;
+
+ int is_split;
+
+ unsigned int fec_modes_supported;
+ } ports[0];
+};
+
+struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
+
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable);
+int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx,
+ int configed);
+int
+nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode);
+
+int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf,
+ unsigned int size);
+void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries,
+ unsigned int idx);
+void nfp_nsp_config_clear_state(struct nfp_nsp *state);
+void nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified);
+void *nfp_nsp_config_entries(struct nfp_nsp *state);
+int nfp_nsp_config_modified(struct nfp_nsp *state);
+unsigned int nfp_nsp_config_idx(struct nfp_nsp *state);
+
+static inline int nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port)
+{
+ return !!eth_port->fec_modes_supported;
+}
+
+static inline unsigned int
+nfp_eth_supported_fec_modes(struct nfp_eth_table_port *eth_port)
+{
+ return eth_port->fec_modes_supported;
+}
+
+struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx);
+int nfp_eth_config_commit_end(struct nfp_nsp *nsp);
+void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp);
+
+int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode);
+int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed);
+int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes);
+
+/**
+ * struct nfp_nsp_identify - NSP static information
+ * @version: opaque version string
+ * @flags: version flags
+ * @br_primary: branch id of primary bootloader
+ * @br_secondary: branch id of secondary bootloader
+ * @br_nsp: branch id of NSP
+ * @primary: version of primarary bootloader
+ * @secondary: version id of secondary bootloader
+ * @nsp: version id of NSP
+ * @sensor_mask: mask of present sensors available on NIC
+ */
+struct nfp_nsp_identify {
+ char version[40];
+ uint8_t flags;
+ uint8_t br_primary;
+ uint8_t br_secondary;
+ uint8_t br_nsp;
+ uint16_t primary;
+ uint16_t secondary;
+ uint16_t nsp;
+ uint64_t sensor_mask;
+};
+
+struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp);
+
+enum nfp_nsp_sensor_id {
+ NFP_SENSOR_CHIP_TEMPERATURE,
+ NFP_SENSOR_ASSEMBLY_POWER,
+ NFP_SENSOR_ASSEMBLY_12V_POWER,
+ NFP_SENSOR_ASSEMBLY_3V3_POWER,
+};
+
+int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id,
+ long *val);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c
new file mode 100644
index 00000000..bfd1eddb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <rte_byteorder.h>
+#include "nfp_cpp.h"
+#include "nfp_nsp.h"
+#include "nfp_nffw.h"
+
+struct nsp_identify {
+ uint8_t version[40];
+ uint8_t flags;
+ uint8_t br_primary;
+ uint8_t br_secondary;
+ uint8_t br_nsp;
+ uint16_t primary;
+ uint16_t secondary;
+ uint16_t nsp;
+ uint8_t reserved[6];
+ uint64_t sensor_mask;
+};
+
+struct nfp_nsp_identify *
+__nfp_nsp_identify(struct nfp_nsp *nsp)
+{
+ struct nfp_nsp_identify *nspi = NULL;
+ struct nsp_identify *ni;
+ int ret;
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 15)
+ return NULL;
+
+ ni = malloc(sizeof(*ni));
+ if (!ni)
+ return NULL;
+
+ memset(ni, 0, sizeof(*ni));
+ ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni));
+ if (ret < 0) {
+ printf("reading bsp version failed %d\n",
+ ret);
+ goto exit_free;
+ }
+
+ nspi = malloc(sizeof(*nspi));
+ if (!nspi)
+ goto exit_free;
+
+ memset(nspi, 0, sizeof(*nspi));
+ memcpy(nspi->version, ni->version, sizeof(nspi->version));
+ nspi->version[sizeof(nspi->version) - 1] = '\0';
+ nspi->flags = ni->flags;
+ nspi->br_primary = ni->br_primary;
+ nspi->br_secondary = ni->br_secondary;
+ nspi->br_nsp = ni->br_nsp;
+ nspi->primary = rte_le_to_cpu_16(ni->primary);
+ nspi->secondary = rte_le_to_cpu_16(ni->secondary);
+ nspi->nsp = rte_le_to_cpu_16(ni->nsp);
+ nspi->sensor_mask = rte_le_to_cpu_64(ni->sensor_mask);
+
+exit_free:
+ free(ni);
+ return nspi;
+}
+
+struct nfp_sensors {
+ uint32_t chip_temp;
+ uint32_t assembly_power;
+ uint32_t assembly_12v_power;
+ uint32_t assembly_3v3_power;
+};
+
+int
+nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, long *val)
+{
+ struct nfp_sensors s;
+ struct nfp_nsp *nsp;
+ int ret;
+
+ nsp = nfp_nsp_open(cpp);
+ if (!nsp)
+ return -EIO;
+
+ ret = nfp_nsp_read_sensors(nsp, BIT(id), &s, sizeof(s));
+ nfp_nsp_close(nsp);
+
+ if (ret < 0)
+ return ret;
+
+ switch (id) {
+ case NFP_SENSOR_CHIP_TEMPERATURE:
+ *val = rte_le_to_cpu_32(s.chip_temp);
+ break;
+ case NFP_SENSOR_ASSEMBLY_POWER:
+ *val = rte_le_to_cpu_32(s.assembly_power);
+ break;
+ case NFP_SENSOR_ASSEMBLY_12V_POWER:
+ *val = rte_le_to_cpu_32(s.assembly_12v_power);
+ break;
+ case NFP_SENSOR_ASSEMBLY_3V3_POWER:
+ *val = rte_le_to_cpu_32(s.assembly_3v3_power);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
new file mode 100644
index 00000000..67946891
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
@@ -0,0 +1,665 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include "nfp_cpp.h"
+#include "nfp_nsp.h"
+#include "nfp6000/nfp6000.h"
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (64 - 1 - (h))))
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define FIELD_GET(_mask, _reg) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \
+ }))
+
+#define FIELD_FIT(_mask, _val) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \
+ }))
+
+#define FIELD_PREP(_mask, _val) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \
+ }))
+
+#define NSP_ETH_NBI_PORT_COUNT 24
+#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT)
+#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \
+ sizeof(union eth_table_entry))
+
+#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0)
+#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8)
+#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48)
+#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54)
+#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60)
+#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61)
+
+#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES)
+
+#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0)
+#define NSP_ETH_STATE_ENABLED BIT_ULL(1)
+#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2)
+#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3)
+#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8)
+#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12)
+#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20)
+#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
+#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
+#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26)
+
+#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
+#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
+#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2)
+#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3)
+#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4)
+#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
+#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
+#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7)
+
+/* Which connector port. */
+#define PORT_TP 0x00
+#define PORT_AUI 0x01
+#define PORT_MII 0x02
+#define PORT_FIBRE 0x03
+#define PORT_BNC 0x04
+#define PORT_DA 0x05
+#define PORT_NONE 0xef
+#define PORT_OTHER 0xff
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define SPEED_5000 5000
+#define SPEED_10000 10000
+#define SPEED_14000 14000
+#define SPEED_20000 20000
+#define SPEED_25000 25000
+#define SPEED_40000 40000
+#define SPEED_50000 50000
+#define SPEED_56000 56000
+#define SPEED_100000 100000
+
+enum nfp_eth_raw {
+ NSP_ETH_RAW_PORT = 0,
+ NSP_ETH_RAW_STATE,
+ NSP_ETH_RAW_MAC,
+ NSP_ETH_RAW_CONTROL,
+
+ NSP_ETH_NUM_RAW
+};
+
+enum nfp_eth_rate {
+ RATE_INVALID = 0,
+ RATE_10M,
+ RATE_100M,
+ RATE_1G,
+ RATE_10G,
+ RATE_25G,
+};
+
+union eth_table_entry {
+ struct {
+ uint64_t port;
+ uint64_t state;
+ uint8_t mac_addr[6];
+ uint8_t resv[2];
+ uint64_t control;
+ };
+ uint64_t raw[NSP_ETH_NUM_RAW];
+};
+
+static const struct {
+ enum nfp_eth_rate rate;
+ unsigned int speed;
+} nsp_eth_rate_tbl[] = {
+ { RATE_INVALID, 0, },
+ { RATE_10M, SPEED_10, },
+ { RATE_100M, SPEED_100, },
+ { RATE_1G, SPEED_1000, },
+ { RATE_10G, SPEED_10000, },
+ { RATE_25G, SPEED_25000, },
+};
+
+static unsigned int
+nfp_eth_rate2speed(enum nfp_eth_rate rate)
+{
+ int i;
+
+ for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+ if (nsp_eth_rate_tbl[i].rate == rate)
+ return nsp_eth_rate_tbl[i].speed;
+
+ return 0;
+}
+
+static unsigned int
+nfp_eth_speed2rate(unsigned int speed)
+{
+ int i;
+
+ for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+ if (nsp_eth_rate_tbl[i].speed == speed)
+ return nsp_eth_rate_tbl[i].rate;
+
+ return RATE_INVALID;
+}
+
+static void
+nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src)
+{
+ int i;
+
+ for (i = 0; i < (int)ETH_ALEN; i++)
+ dst[ETH_ALEN - i - 1] = src[i];
+}
+
+static void
+nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
+ unsigned int index, struct nfp_eth_table_port *dst)
+{
+ unsigned int rate;
+ unsigned int fec;
+ uint64_t port, state;
+
+ port = rte_le_to_cpu_64(src->port);
+ state = rte_le_to_cpu_64(src->state);
+
+ dst->eth_index = FIELD_GET(NSP_ETH_PORT_INDEX, port);
+ dst->index = index;
+ dst->nbi = index / NSP_ETH_NBI_PORT_COUNT;
+ dst->base = index % NSP_ETH_NBI_PORT_COUNT;
+ dst->lanes = FIELD_GET(NSP_ETH_PORT_LANES, port);
+
+ dst->enabled = FIELD_GET(NSP_ETH_STATE_ENABLED, state);
+ dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state);
+ dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state);
+
+ rate = nfp_eth_rate2speed(FIELD_GET(NSP_ETH_STATE_RATE, state));
+ dst->speed = dst->lanes * rate;
+
+ dst->interface = FIELD_GET(NSP_ETH_STATE_INTERFACE, state);
+ dst->media = FIELD_GET(NSP_ETH_STATE_MEDIA, state);
+
+ nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr);
+
+ dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port);
+ dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 17)
+ return;
+
+ dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state);
+ dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 22)
+ return;
+
+ fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_BASER, port);
+ dst->fec_modes_supported |= fec << NFP_FEC_BASER_BIT;
+ fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_RS, port);
+ dst->fec_modes_supported |= fec << NFP_FEC_REED_SOLOMON_BIT;
+ if (dst->fec_modes_supported)
+ dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED;
+
+ dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state);
+}
+
+static void
+nfp_eth_calc_port_geometry(struct nfp_eth_table *table)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < table->count; i++) {
+ table->max_index = RTE_MAX(table->max_index,
+ table->ports[i].index);
+
+ for (j = 0; j < table->count; j++) {
+ if (table->ports[i].label_port !=
+ table->ports[j].label_port)
+ continue;
+ table->ports[i].port_lanes += table->ports[j].lanes;
+
+ if (i == j)
+ continue;
+ if (table->ports[i].label_subport ==
+ table->ports[j].label_subport)
+ printf("Port %d subport %d is a duplicate\n",
+ table->ports[i].label_port,
+ table->ports[i].label_subport);
+
+ table->ports[i].is_split = 1;
+ }
+ }
+}
+
+static void
+nfp_eth_calc_port_type(struct nfp_eth_table_port *entry)
+{
+ if (entry->interface == NFP_INTERFACE_NONE) {
+ entry->port_type = PORT_NONE;
+ return;
+ }
+
+ if (entry->media == NFP_MEDIA_FIBRE)
+ entry->port_type = PORT_FIBRE;
+ else
+ entry->port_type = PORT_DA;
+}
+
+static struct nfp_eth_table *
+__nfp_eth_read_ports(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries;
+ struct nfp_eth_table *table;
+ uint32_t table_sz;
+ int i, j, ret, cnt = 0;
+
+ entries = malloc(NSP_ETH_TABLE_SIZE);
+ if (!entries)
+ return NULL;
+
+ memset(entries, 0, NSP_ETH_TABLE_SIZE);
+ ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ if (ret < 0) {
+ printf("reading port table failed %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
+ if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+ cnt++;
+
+ /* Some versions of flash will give us 0 instead of port count. For
+ * those that give a port count, verify it against the value calculated
+ * above.
+ */
+ if (ret && ret != cnt) {
+ printf("table entry count (%d) unmatch entries present (%d)\n",
+ ret, cnt);
+ goto err;
+ }
+
+ table_sz = sizeof(*table) + sizeof(struct nfp_eth_table_port) * cnt;
+ table = malloc(table_sz);
+ if (!table)
+ goto err;
+
+ memset(table, 0, table_sz);
+ table->count = cnt;
+ for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++)
+ if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+ nfp_eth_port_translate(nsp, &entries[i], i,
+ &table->ports[j++]);
+
+ nfp_eth_calc_port_geometry(table);
+ for (i = 0; i < (int)table->count; i++)
+ nfp_eth_calc_port_type(&table->ports[i]);
+
+ free(entries);
+
+ return table;
+
+err:
+ free(entries);
+ return NULL;
+}
+
+/*
+ * nfp_eth_read_ports() - retrieve port information
+ * @cpp: NFP CPP handle
+ *
+ * Read the port information from the device. Returned structure should
+ * be freed with kfree() once no longer needed.
+ *
+ * Return: populated ETH table or NULL on error.
+ */
+struct nfp_eth_table *
+nfp_eth_read_ports(struct nfp_cpp *cpp)
+{
+ struct nfp_eth_table *ret;
+ struct nfp_nsp *nsp;
+
+ nsp = nfp_nsp_open(cpp);
+ if (!nsp)
+ return NULL;
+
+ ret = __nfp_eth_read_ports(nsp);
+ nfp_nsp_close(nsp);
+
+ return ret;
+}
+
+struct nfp_nsp *
+nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ int ret;
+
+ entries = malloc(NSP_ETH_TABLE_SIZE);
+ if (!entries)
+ return NULL;
+
+ memset(entries, 0, NSP_ETH_TABLE_SIZE);
+ nsp = nfp_nsp_open(cpp);
+ if (!nsp) {
+ free(entries);
+ return nsp;
+ }
+
+ ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ if (ret < 0) {
+ printf("reading port table failed %d\n", ret);
+ goto err;
+ }
+
+ if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) {
+ printf("trying to set port state on disabled port %d\n", idx);
+ goto err;
+ }
+
+ nfp_nsp_config_set_state(nsp, entries, idx);
+ return nsp;
+
+err:
+ nfp_nsp_close(nsp);
+ free(entries);
+ return NULL;
+}
+
+void
+nfp_eth_config_cleanup_end(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+
+ nfp_nsp_config_set_modified(nsp, 0);
+ nfp_nsp_config_clear_state(nsp);
+ nfp_nsp_close(nsp);
+ free(entries);
+}
+
+/*
+ * nfp_eth_config_commit_end() - perform recorded configuration changes
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ *
+ * Perform the configuration which was requested with __nfp_eth_set_*()
+ * helpers and recorded in @nsp state. If device was already configured
+ * as requested or no __nfp_eth_set_*() operations were made no NSP command
+ * will be performed.
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_config_commit_end(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+ int ret = 1;
+
+ if (nfp_nsp_config_modified(nsp)) {
+ ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ ret = ret < 0 ? ret : 0;
+ }
+
+ nfp_eth_config_cleanup_end(nsp);
+
+ return ret;
+}
+
+/*
+ * nfp_eth_set_mod_enable() - set PHY module enable control bit
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @enable: Desired state
+ *
+ * Enable or disable PHY module (this usually means setting the TX lanes
+ * disable bits).
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ uint64_t reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (!nsp)
+ return -1;
+
+ entries = nfp_nsp_config_entries(nsp);
+
+ /* Check if we are already in requested state */
+ reg = rte_le_to_cpu_64(entries[idx].state);
+ if (enable != (int)FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) {
+ reg = rte_le_to_cpu_64(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_ENABLED;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable);
+ entries[idx].control = rte_cpu_to_le_64(reg);
+
+ nfp_nsp_config_set_modified(nsp, 1);
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/*
+ * nfp_eth_set_configured() - set PHY module configured control bit
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @configed: Desired state
+ *
+ * Set the ifup/ifdown state on the PHY.
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, int configed)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ uint64_t reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (!nsp)
+ return -EIO;
+
+ /*
+ * Older ABI versions did support this feature, however this has only
+ * been reliable since ABI 20.
+ */
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 20) {
+ nfp_eth_config_cleanup_end(nsp);
+ return -EOPNOTSUPP;
+ }
+
+ entries = nfp_nsp_config_entries(nsp);
+
+ /* Check if we are already in requested state */
+ reg = rte_le_to_cpu_64(entries[idx].state);
+ if (configed != (int)FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) {
+ reg = rte_le_to_cpu_64(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_CONFIGURED;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed);
+ entries[idx].control = rte_cpu_to_le_64(reg);
+
+ nfp_nsp_config_set_modified(nsp, 1);
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+static int
+nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
+ const uint64_t mask, const unsigned int shift,
+ unsigned int val, const uint64_t ctrl_bit)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+ unsigned int idx = nfp_nsp_config_idx(nsp);
+ uint64_t reg;
+
+ /*
+ * Note: set features were added in ABI 0.14 but the error
+ * codes were initially not populated correctly.
+ */
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 17) {
+ printf("set operations not supported, please update flash\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are already in requested state */
+ reg = rte_le_to_cpu_64(entries[idx].raw[raw_idx]);
+ if (val == (reg & mask) >> shift)
+ return 0;
+
+ reg &= ~mask;
+ reg |= (val << shift) & mask;
+ entries[idx].raw[raw_idx] = rte_cpu_to_le_64(reg);
+
+ entries[idx].control |= rte_cpu_to_le_64(ctrl_bit);
+
+ nfp_nsp_config_set_modified(nsp, 1);
+
+ return 0;
+}
+
+#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \
+ (__extension__ ({ \
+ typeof(mask) _x = (mask); \
+ nfp_eth_set_bit_config(nsp, raw_idx, _x, __bf_shf(_x), \
+ val, ctrl_bit); \
+ }))
+
+/*
+ * __nfp_eth_set_aneg() - set PHY autonegotiation control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @mode: Desired autonegotiation mode
+ *
+ * Allow/disallow PHY module to advertise/perform autonegotiation.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int
+__nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_ANEG, mode,
+ NSP_ETH_CTRL_SET_ANEG);
+}
+
+/*
+ * __nfp_eth_set_fec() - set PHY forward error correction control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @mode: Desired fec mode
+ *
+ * Set the PHY module forward error correction mode.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+static int
+__nfp_eth_set_fec(struct nfp_nsp *nsp, enum nfp_eth_fec mode)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_FEC, mode,
+ NSP_ETH_CTRL_SET_FEC);
+}
+
+/*
+ * nfp_eth_set_fec() - set PHY forward error correction control mode
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @mode: Desired fec mode
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode)
+{
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (!nsp)
+ return -EIO;
+
+ err = __nfp_eth_set_fec(nsp, mode);
+ if (err) {
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/*
+ * __nfp_eth_set_speed() - set interface speed/rate
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @speed: Desired speed (per lane)
+ *
+ * Set lane speed. Provided @speed value should be subport speed divided
+ * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for
+ * 50G, etc.)
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int
+__nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed)
+{
+ enum nfp_eth_rate rate;
+
+ rate = nfp_eth_speed2rate(speed);
+ if (rate == RATE_INVALID) {
+ printf("could not find matching lane rate for speed %u\n",
+ speed);
+ return -EINVAL;
+ }
+
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_RATE, rate,
+ NSP_ETH_CTRL_SET_RATE);
+}
+
+/*
+ * __nfp_eth_set_split() - set interface lane split
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @lanes: Desired lanes per port
+ *
+ * Set number of lanes in the port.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int
+__nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
+ lanes, NSP_ETH_CTRL_SET_LANES);
+}
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.c
new file mode 100644
index 00000000..dd41fa4d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <time.h>
+#include <endian.h>
+
+#include <rte_string_fns.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp_resource.h"
+#include "nfp_crc.h"
+
+#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU
+#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL
+
+/* NFP Resource Table self-identifier */
+#define NFP_RESOURCE_TBL_NAME "nfp.res"
+#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */
+
+#define NFP_RESOURCE_ENTRY_NAME_SZ 8
+
+/*
+ * struct nfp_resource_entry - Resource table entry
+ * @owner: NFP CPP Lock, interface owner
+ * @key: NFP CPP Lock, posix_crc32(name, 8)
+ * @region: Memory region descriptor
+ * @name: ASCII, zero padded name
+ * @reserved
+ * @cpp_action: CPP Action
+ * @cpp_token: CPP Token
+ * @cpp_target: CPP Target ID
+ * @page_offset: 256-byte page offset into target's CPP address
+ * @page_size: size, in 256-byte pages
+ */
+struct nfp_resource_entry {
+ struct nfp_resource_entry_mutex {
+ uint32_t owner;
+ uint32_t key;
+ } mutex;
+ struct nfp_resource_entry_region {
+ uint8_t name[NFP_RESOURCE_ENTRY_NAME_SZ];
+ uint8_t reserved[5];
+ uint8_t cpp_action;
+ uint8_t cpp_token;
+ uint8_t cpp_target;
+ uint32_t page_offset;
+ uint32_t page_size;
+ } region;
+};
+
+#define NFP_RESOURCE_TBL_SIZE 4096
+#define NFP_RESOURCE_TBL_ENTRIES (int)(NFP_RESOURCE_TBL_SIZE / \
+ sizeof(struct nfp_resource_entry))
+
+struct nfp_resource {
+ char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1];
+ uint32_t cpp_id;
+ uint64_t addr;
+ uint64_t size;
+ struct nfp_cpp_mutex *mutex;
+};
+
+static int
+nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
+{
+ char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ + 2];
+ struct nfp_resource_entry entry;
+ uint32_t cpp_id, key;
+ int ret, i;
+
+ cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */
+
+ memset(name_pad, 0, sizeof(name_pad));
+ strlcpy(name_pad, res->name, sizeof(name_pad));
+
+ /* Search for a matching entry */
+ if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) {
+ printf("Grabbing device lock not supported\n");
+ return -EOPNOTSUPP;
+ }
+ key = nfp_crc32_posix(name_pad, NFP_RESOURCE_ENTRY_NAME_SZ);
+
+ for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
+ uint64_t addr = NFP_RESOURCE_TBL_BASE +
+ sizeof(struct nfp_resource_entry) * i;
+
+ ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry));
+ if (ret != sizeof(entry))
+ return -EIO;
+
+ if (entry.mutex.key != key)
+ continue;
+
+ /* Found key! */
+ res->mutex =
+ nfp_cpp_mutex_alloc(cpp,
+ NFP_RESOURCE_TBL_TARGET, addr, key);
+ res->cpp_id = NFP_CPP_ID(entry.region.cpp_target,
+ entry.region.cpp_action,
+ entry.region.cpp_token);
+ res->addr = ((uint64_t)entry.region.page_offset) << 8;
+ res->size = (uint64_t)entry.region.page_size << 8;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int
+nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res,
+ struct nfp_cpp_mutex *dev_mutex)
+{
+ int err;
+
+ if (nfp_cpp_mutex_lock(dev_mutex))
+ return -EINVAL;
+
+ err = nfp_cpp_resource_find(cpp, res);
+ if (err)
+ goto err_unlock_dev;
+
+ err = nfp_cpp_mutex_trylock(res->mutex);
+ if (err)
+ goto err_res_mutex_free;
+
+ nfp_cpp_mutex_unlock(dev_mutex);
+
+ return 0;
+
+err_res_mutex_free:
+ nfp_cpp_mutex_free(res->mutex);
+err_unlock_dev:
+ nfp_cpp_mutex_unlock(dev_mutex);
+
+ return err;
+}
+
+/*
+ * nfp_resource_acquire() - Acquire a resource handle
+ * @cpp: NFP CPP handle
+ * @name: Name of the resource
+ *
+ * NOTE: This function locks the acquired resource
+ *
+ * Return: NFP Resource handle, or ERR_PTR()
+ */
+struct nfp_resource *
+nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
+{
+ struct nfp_cpp_mutex *dev_mutex;
+ struct nfp_resource *res;
+ int err;
+ struct timespec wait;
+ int count;
+
+ res = malloc(sizeof(*res));
+ if (!res)
+ return NULL;
+
+ memset(res, 0, sizeof(*res));
+
+ strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ);
+
+ dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
+ NFP_RESOURCE_TBL_BASE,
+ NFP_RESOURCE_TBL_KEY);
+ if (!dev_mutex) {
+ free(res);
+ return NULL;
+ }
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 1000000;
+ count = 0;
+
+ for (;;) {
+ err = nfp_resource_try_acquire(cpp, res, dev_mutex);
+ if (!err)
+ break;
+ if (err != -EBUSY)
+ goto err_free;
+
+ if (count++ > 1000) {
+ printf("Error: resource %s timed out\n", name);
+ err = -EBUSY;
+ goto err_free;
+ }
+
+ nanosleep(&wait, NULL);
+ }
+
+ nfp_cpp_mutex_free(dev_mutex);
+
+ return res;
+
+err_free:
+ nfp_cpp_mutex_free(dev_mutex);
+ free(res);
+ return NULL;
+}
+
+/*
+ * nfp_resource_release() - Release a NFP Resource handle
+ * @res: NFP Resource handle
+ *
+ * NOTE: This function implictly unlocks the resource handle
+ */
+void
+nfp_resource_release(struct nfp_resource *res)
+{
+ nfp_cpp_mutex_unlock(res->mutex);
+ nfp_cpp_mutex_free(res->mutex);
+ free(res);
+}
+
+/*
+ * nfp_resource_cpp_id() - Return the cpp_id of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: NFP CPP ID
+ */
+uint32_t
+nfp_resource_cpp_id(const struct nfp_resource *res)
+{
+ return res->cpp_id;
+}
+
+/*
+ * nfp_resource_name() - Return the name of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: const char pointer to the name of the resource
+ */
+const char
+*nfp_resource_name(const struct nfp_resource *res)
+{
+ return res->name;
+}
+
+/*
+ * nfp_resource_address() - Return the address of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: Address of the resource
+ */
+uint64_t
+nfp_resource_address(const struct nfp_resource *res)
+{
+ return res->addr;
+}
+
+/*
+ * nfp_resource_size() - Return the size in bytes of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: Size of the resource in bytes
+ */
+uint64_t
+nfp_resource_size(const struct nfp_resource *res)
+{
+ return res->size;
+}
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.h
new file mode 100644
index 00000000..06cc6f74
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef NFP_RESOURCE_H
+#define NFP_RESOURCE_H
+
+#include "nfp_cpp.h"
+
+#define NFP_RESOURCE_NFP_NFFW "nfp.nffw"
+#define NFP_RESOURCE_NFP_HWINFO "nfp.info"
+#define NFP_RESOURCE_NSP "nfp.sp"
+
+/**
+ * Opaque handle to a NFP Resource
+ */
+struct nfp_resource;
+
+struct nfp_resource *nfp_resource_acquire(struct nfp_cpp *cpp,
+ const char *name);
+
+/**
+ * Release a NFP Resource, and free the handle
+ * @param[in] res NFP Resource handle
+ */
+void nfp_resource_release(struct nfp_resource *res);
+
+/**
+ * Return the CPP ID of a NFP Resource
+ * @param[in] res NFP Resource handle
+ * @return CPP ID of the NFP Resource
+ */
+uint32_t nfp_resource_cpp_id(const struct nfp_resource *res);
+
+/**
+ * Return the name of a NFP Resource
+ * @param[in] res NFP Resource handle
+ * @return Name of the NFP Resource
+ */
+const char *nfp_resource_name(const struct nfp_resource *res);
+
+/**
+ * Return the target address of a NFP Resource
+ * @param[in] res NFP Resource handle
+ * @return Address of the NFP Resource
+ */
+uint64_t nfp_resource_address(const struct nfp_resource *res);
+
+uint64_t nfp_resource_size(const struct nfp_resource *res);
+
+#endif /* NFP_RESOURCE_H */
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c
new file mode 100644
index 00000000..cb7d83db
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+/*
+ * nfp_rtsym.c
+ * Interface for accessing run-time symbol table
+ */
+
+#include <stdio.h>
+#include <rte_byteorder.h>
+#include "nfp_cpp.h"
+#include "nfp_mip.h"
+#include "nfp_rtsym.h"
+#include "nfp6000/nfp6000.h"
+
+/* These need to match the linker */
+#define SYM_TGT_LMEM 0
+#define SYM_TGT_EMU_CACHE 0x17
+
+struct nfp_rtsym_entry {
+ uint8_t type;
+ uint8_t target;
+ uint8_t island;
+ uint8_t addr_hi;
+ uint32_t addr_lo;
+ uint16_t name;
+ uint8_t menum;
+ uint8_t size_hi;
+ uint32_t size_lo;
+};
+
+struct nfp_rtsym_table {
+ struct nfp_cpp *cpp;
+ int num;
+ char *strtab;
+ struct nfp_rtsym symtab[];
+};
+
+static int
+nfp_meid(uint8_t island_id, uint8_t menum)
+{
+ return (island_id & 0x3F) == island_id && menum < 12 ?
+ (island_id << 4) | (menum + 4) : -1;
+}
+
+static void
+nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, uint32_t strtab_size,
+ struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw)
+{
+ sw->type = fw->type;
+ sw->name = cache->strtab + rte_le_to_cpu_16(fw->name) % strtab_size;
+ sw->addr = ((uint64_t)fw->addr_hi << 32) |
+ rte_le_to_cpu_32(fw->addr_lo);
+ sw->size = ((uint64_t)fw->size_hi << 32) |
+ rte_le_to_cpu_32(fw->size_lo);
+
+#ifdef DEBUG
+ printf("rtsym_entry_init\n");
+ printf("\tname=%s, addr=%" PRIx64 ", size=%" PRIu64 ",target=%d\n",
+ sw->name, sw->addr, sw->size, sw->target);
+#endif
+ switch (fw->target) {
+ case SYM_TGT_LMEM:
+ sw->target = NFP_RTSYM_TARGET_LMEM;
+ break;
+ case SYM_TGT_EMU_CACHE:
+ sw->target = NFP_RTSYM_TARGET_EMU_CACHE;
+ break;
+ default:
+ sw->target = fw->target;
+ break;
+ }
+
+ if (fw->menum != 0xff)
+ sw->domain = nfp_meid(fw->island, fw->menum);
+ else if (fw->island != 0xff)
+ sw->domain = fw->island;
+ else
+ sw->domain = -1;
+}
+
+struct nfp_rtsym_table *
+nfp_rtsym_table_read(struct nfp_cpp *cpp)
+{
+ struct nfp_rtsym_table *rtbl;
+ struct nfp_mip *mip;
+
+ mip = nfp_mip_open(cpp);
+ rtbl = __nfp_rtsym_table_read(cpp, mip);
+ nfp_mip_close(mip);
+
+ return rtbl;
+}
+
+/*
+ * This looks more complex than it should be. But we need to get the type for
+ * the ~ right in round_down (it needs to be as wide as the result!), and we
+ * want to evaluate the macro arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y) - 1))
+
+#define round_up(x, y) \
+ (__extension__ ({ \
+ typeof(x) _x = (x); \
+ ((((_x) - 1) | __round_mask(_x, y)) + 1); \
+ }))
+
+#define round_down(x, y) \
+ (__extension__ ({ \
+ typeof(x) _x = (x); \
+ ((_x) & ~__round_mask(_x, y)); \
+ }))
+
+struct nfp_rtsym_table *
+__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip)
+{
+ uint32_t strtab_addr, symtab_addr, strtab_size, symtab_size;
+ struct nfp_rtsym_entry *rtsymtab;
+ struct nfp_rtsym_table *cache;
+ const uint32_t dram =
+ NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) |
+ NFP_ISL_EMEM0;
+ int err, n, size;
+
+ if (!mip)
+ return NULL;
+
+ nfp_mip_strtab(mip, &strtab_addr, &strtab_size);
+ nfp_mip_symtab(mip, &symtab_addr, &symtab_size);
+
+ if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab))
+ return NULL;
+
+ /* Align to 64 bits */
+ symtab_size = round_up(symtab_size, 8);
+ strtab_size = round_up(strtab_size, 8);
+
+ rtsymtab = malloc(symtab_size);
+ if (!rtsymtab)
+ return NULL;
+
+ size = sizeof(*cache);
+ size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym);
+ size += strtab_size + 1;
+ cache = malloc(size);
+ if (!cache)
+ goto exit_free_rtsym_raw;
+
+ cache->cpp = cpp;
+ cache->num = symtab_size / sizeof(*rtsymtab);
+ cache->strtab = (void *)&cache->symtab[cache->num];
+
+ err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size);
+ if (err != (int)symtab_size)
+ goto exit_free_cache;
+
+ err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size);
+ if (err != (int)strtab_size)
+ goto exit_free_cache;
+ cache->strtab[strtab_size] = '\0';
+
+ for (n = 0; n < cache->num; n++)
+ nfp_rtsym_sw_entry_init(cache, strtab_size,
+ &cache->symtab[n], &rtsymtab[n]);
+
+ free(rtsymtab);
+
+ return cache;
+
+exit_free_cache:
+ free(cache);
+exit_free_rtsym_raw:
+ free(rtsymtab);
+ return NULL;
+}
+
+/*
+ * nfp_rtsym_count() - Get the number of RTSYM descriptors
+ * @rtbl: NFP RTsym table
+ *
+ * Return: Number of RTSYM descriptors
+ */
+int
+nfp_rtsym_count(struct nfp_rtsym_table *rtbl)
+{
+ if (!rtbl)
+ return -EINVAL;
+
+ return rtbl->num;
+}
+
+/*
+ * nfp_rtsym_get() - Get the Nth RTSYM descriptor
+ * @rtbl: NFP RTsym table
+ * @idx: Index (0-based) of the RTSYM descriptor
+ *
+ * Return: const pointer to a struct nfp_rtsym descriptor, or NULL
+ */
+const struct nfp_rtsym *
+nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx)
+{
+ if (!rtbl)
+ return NULL;
+
+ if (idx >= rtbl->num)
+ return NULL;
+
+ return &rtbl->symtab[idx];
+}
+
+/*
+ * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name
+ * @rtbl: NFP RTsym table
+ * @name: Symbol name
+ *
+ * Return: const pointer to a struct nfp_rtsym descriptor, or NULL
+ */
+const struct nfp_rtsym *
+nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name)
+{
+ int n;
+
+ if (!rtbl)
+ return NULL;
+
+ for (n = 0; n < rtbl->num; n++)
+ if (strcmp(name, rtbl->symtab[n].name) == 0)
+ return &rtbl->symtab[n];
+
+ return NULL;
+}
+
+/*
+ * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
+ * @rtbl: NFP RTsym table
+ * @name: Symbol name
+ * @error: Poniter to error code (optional)
+ *
+ * Lookup a symbol, map, read it and return it's value. Value of the symbol
+ * will be interpreted as a simple little-endian unsigned value. Symbol can
+ * be 4 or 8 bytes in size.
+ *
+ * Return: value read, on error sets the error and returns ~0ULL.
+ */
+uint64_t
+nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error)
+{
+ const struct nfp_rtsym *sym;
+ uint32_t val32, id;
+ uint64_t val;
+ int err;
+
+ sym = nfp_rtsym_lookup(rtbl, name);
+ if (!sym) {
+ err = -ENOENT;
+ goto exit;
+ }
+
+ id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);
+
+#ifdef DEBUG
+ printf("Reading symbol %s with size %" PRIu64 " at %" PRIx64 "\n",
+ name, sym->size, sym->addr);
+#endif
+ switch (sym->size) {
+ case 4:
+ err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32);
+ val = val32;
+ break;
+ case 8:
+ err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val);
+ break;
+ default:
+ printf("rtsym '%s' unsupported size: %" PRId64 "\n",
+ name, sym->size);
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ err = -EIO;
+exit:
+ if (error)
+ *error = err;
+
+ if (err)
+ return ~0ULL;
+
+ return val;
+}
+
+uint8_t *
+nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name,
+ unsigned int min_size, struct nfp_cpp_area **area)
+{
+ const struct nfp_rtsym *sym;
+ uint8_t *mem;
+
+#ifdef DEBUG
+ printf("mapping symbol %s\n", name);
+#endif
+ sym = nfp_rtsym_lookup(rtbl, name);
+ if (!sym) {
+ printf("symbol lookup fails for %s\n", name);
+ return NULL;
+ }
+
+ if (sym->size < min_size) {
+ printf("Symbol %s too small (%" PRIu64 " < %u)\n", name,
+ sym->size, min_size);
+ return NULL;
+ }
+
+ mem = nfp_cpp_map_area(rtbl->cpp, sym->domain, sym->target, sym->addr,
+ sym->size, area);
+ if (!mem) {
+ printf("Failed to map symbol %s\n", name);
+ return NULL;
+ }
+#ifdef DEBUG
+ printf("symbol %s with address %p\n", name, mem);
+#endif
+
+ return mem;
+}
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.h
new file mode 100644
index 00000000..8b494211
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_RTSYM_H__
+#define __NFP_RTSYM_H__
+
+#define NFP_RTSYM_TYPE_NONE 0
+#define NFP_RTSYM_TYPE_OBJECT 1
+#define NFP_RTSYM_TYPE_FUNCTION 2
+#define NFP_RTSYM_TYPE_ABS 3
+
+#define NFP_RTSYM_TARGET_NONE 0
+#define NFP_RTSYM_TARGET_LMEM -1
+#define NFP_RTSYM_TARGET_EMU_CACHE -7
+
+/*
+ * Structure describing a run-time NFP symbol.
+ *
+ * The memory target of the symbol is generally the CPP target number and can be
+ * used directly by the nfp_cpp API calls. However, in some cases (i.e., for
+ * local memory or control store) the target is encoded using a negative number.
+ *
+ * When the target type can not be used to fully describe the location of a
+ * symbol the domain field is used to further specify the location (i.e., the
+ * specific ME or island number).
+ *
+ * For ME target resources, 'domain' is an MEID.
+ * For Island target resources, 'domain' is an island ID, with the one exception
+ * of "sram" symbols for backward compatibility, which are viewed as global.
+ */
+struct nfp_rtsym {
+ const char *name;
+ uint64_t addr;
+ uint64_t size;
+ int type;
+ int target;
+ int domain;
+};
+
+struct nfp_rtsym_table;
+
+struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp);
+
+struct nfp_rtsym_table *
+__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip);
+
+int nfp_rtsym_count(struct nfp_rtsym_table *rtbl);
+
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx);
+
+const struct nfp_rtsym *
+nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name);
+
+uint64_t nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
+ int *error);
+uint8_t *
+nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name,
+ unsigned int min_size, struct nfp_cpp_area **area);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_target.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_target.h
new file mode 100644
index 00000000..2884a003
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_target.h
@@ -0,0 +1,579 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef NFP_TARGET_H
+#define NFP_TARGET_H
+
+#include "nfp-common/nfp_resid.h"
+#include "nfp-common/nfp_cppat.h"
+#include "nfp-common/nfp_platform.h"
+#include "nfp_cpp.h"
+
+#define P32 1
+#define P64 2
+
+#define PUSHPULL(_pull, _push) (((_pull) << 4) | ((_push) << 0))
+
+#ifndef NFP_ERRNO
+#include <errno.h>
+#define NFP_ERRNO(x) (errno = (x), -1)
+#endif
+
+static inline int
+pushpull_width(int pp)
+{
+ pp &= 0xf;
+
+ if (pp == 0)
+ return NFP_ERRNO(EINVAL);
+ return (2 << pp);
+}
+
+#define PUSH_WIDTH(_pushpull) pushpull_width((_pushpull) >> 0)
+#define PULL_WIDTH(_pushpull) pushpull_width((_pushpull) >> 4)
+
+static inline int
+target_rw(uint32_t cpp_id, int pp, int start, int len)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < start || island > (start + len)))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0):
+ return PUSHPULL(0, pp);
+ case NFP_CPP_ID(0, 1, 0):
+ return PUSHPULL(pp, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(pp, pp);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi_dma(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0): /* ReadNbiDma */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* WriteNbiDma */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(P64, P64);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi_stats(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0): /* ReadNbiStats */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* WriteNbiStats */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(P64, P64);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi_tm(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0): /* ReadNbiTM */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* WriteNbiTM */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(P64, P64);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi_ppc(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0): /* ReadNbiPreclassifier */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* WriteNbiPreclassifier */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(P64, P64);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi(uint32_t cpp_id, uint64_t address)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+ uint64_t rel_addr = address & 0x3fFFFF;
+
+ if (island && (island < 8 || island > 9))
+ return NFP_ERRNO(EINVAL);
+
+ if (rel_addr < (1 << 20))
+ return nfp6000_nbi_dma(cpp_id);
+ if (rel_addr < (2 << 20))
+ return nfp6000_nbi_stats(cpp_id);
+ if (rel_addr < (3 << 20))
+ return nfp6000_nbi_tm(cpp_id);
+ return nfp6000_nbi_ppc(cpp_id);
+}
+
+/*
+ * This structure ONLY includes items that can be done with a read or write of
+ * 32-bit or 64-bit words. All others are not listed.
+ */
+static inline int
+nfp6000_mu_common(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): /* read_be/write_be */
+ return PUSHPULL(P64, P64);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1): /* read_le/write_le */
+ return PUSHPULL(P64, P64);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 2): /* {read/write}_swap_be */
+ return PUSHPULL(P64, P64);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 3): /* {read/write}_swap_le */
+ return PUSHPULL(P64, P64);
+ case NFP_CPP_ID(0, 0, 0): /* read_be */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 0, 1): /* read_le */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 0, 2): /* read_swap_be */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 0, 3): /* read_swap_le */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* write_be */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, 1, 1): /* write_le */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, 1, 2): /* write_swap_be */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, 1, 3): /* write_swap_le */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, 3, 0): /* atomic_read */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 3, 2): /* mask_compare_write */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 4, 0): /* atomic_write */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 4, 2): /* atomic_write_imm */
+ return PUSHPULL(0, 0);
+ case NFP_CPP_ID(0, 4, 3): /* swap_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 5, 0): /* set */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 5, 3): /* test_set_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 6, 0): /* clr */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 6, 3): /* test_clr_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 7, 0): /* add */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 7, 3): /* test_add_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 8, 0): /* addsat */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 8, 3): /* test_subsat_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 9, 0): /* sub */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 9, 3): /* test_sub_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 10, 0): /* subsat */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 10, 3): /* test_subsat_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 13, 0): /* microq128_get */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 13, 1): /* microq128_pop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 13, 2): /* microq128_put */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 15, 0): /* xor */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 15, 3): /* test_xor_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 28, 0): /* read32_be */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 28, 1): /* read32_le */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 28, 2): /* read32_swap_be */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 28, 3): /* read32_swap_le */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 31, 0): /* write32_be */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 31, 1): /* write32_le */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 31, 2): /* write32_swap_be */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 31, 3): /* write32_swap_le */
+ return PUSHPULL(P32, 0);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_mu_ctm(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 16, 1): /* packet_read_packet_status */
+ return PUSHPULL(0, P32);
+ default:
+ return nfp6000_mu_common(cpp_id);
+ }
+}
+
+static inline int
+nfp6000_mu_emu(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 18, 0): /* read_queue */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 18, 1): /* read_queue_ring */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 18, 2): /* write_queue */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 18, 3): /* write_queue_ring */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 20, 2): /* journal */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 21, 0): /* get */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 21, 1): /* get_eop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 21, 2): /* get_freely */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 22, 0): /* pop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 22, 1): /* pop_eop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 22, 2): /* pop_freely */
+ return PUSHPULL(0, P32);
+ default:
+ return nfp6000_mu_common(cpp_id);
+ }
+}
+
+static inline int
+nfp6000_mu_imu(uint32_t cpp_id)
+{
+ return nfp6000_mu_common(cpp_id);
+}
+
+static inline int
+nfp6000_mu(uint32_t cpp_id, uint64_t address)
+{
+ int pp;
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island == 0) {
+ if (address < 0x2000000000ULL)
+ pp = nfp6000_mu_ctm(cpp_id);
+ else if (address < 0x8000000000ULL)
+ pp = nfp6000_mu_emu(cpp_id);
+ else if (address < 0x9800000000ULL)
+ pp = nfp6000_mu_ctm(cpp_id);
+ else if (address < 0x9C00000000ULL)
+ pp = nfp6000_mu_emu(cpp_id);
+ else if (address < 0xA000000000ULL)
+ pp = nfp6000_mu_imu(cpp_id);
+ else
+ pp = nfp6000_mu_ctm(cpp_id);
+ } else if (island >= 24 && island <= 27) {
+ pp = nfp6000_mu_emu(cpp_id);
+ } else if (island >= 28 && island <= 31) {
+ pp = nfp6000_mu_imu(cpp_id);
+ } else if (island == 1 ||
+ (island >= 4 && island <= 7) ||
+ (island >= 12 && island <= 13) ||
+ (island >= 32 && island <= 47) ||
+ (island >= 48 && island <= 51)) {
+ pp = nfp6000_mu_ctm(cpp_id);
+ } else {
+ pp = NFP_ERRNO(EINVAL);
+ }
+
+ return pp;
+}
+
+static inline int
+nfp6000_ila(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 48 || island > 51))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 1): /* read_check_error */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 2, 0): /* read_int */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 3, 0): /* write_int */
+ return PUSHPULL(P32, 0);
+ default:
+ return target_rw(cpp_id, P32, 48, 4);
+ }
+}
+
+static inline int
+nfp6000_pci(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 4 || island > 7))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 2, 0):
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 3, 0):
+ return PUSHPULL(P32, 0);
+ default:
+ return target_rw(cpp_id, P32, 4, 4);
+ }
+}
+
+static inline int
+nfp6000_crypto(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 12 || island > 15))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 2, 0):
+ return PUSHPULL(P64, 0);
+ default:
+ return target_rw(cpp_id, P64, 12, 4);
+ }
+}
+
+static inline int
+nfp6000_cap_xpb(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 1 || island > 63))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 1): /* RingGet */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 0, 2): /* Interthread Signal */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 1, 1): /* RingPut */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 1, 2): /* CTNNWr */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 2, 0): /* ReflectRd, signal none */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 2, 1): /* ReflectRd, signal self */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 2, 2): /* ReflectRd, signal remote */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 2, 3): /* ReflectRd, signal both */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 3, 0): /* ReflectWr, signal none */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 3, 1): /* ReflectWr, signal self */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 3, 2): /* ReflectWr, signal remote */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 3, 3): /* ReflectWr, signal both */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1):
+ return PUSHPULL(P32, P32);
+ default:
+ return target_rw(cpp_id, P32, 1, 63);
+ }
+}
+
+static inline int
+nfp6000_cls(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 1 || island > 63))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 3): /* xor */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 2, 0): /* set */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 2, 1): /* clr */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 4, 0): /* add */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 4, 1): /* add64 */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 6, 0): /* sub */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 6, 1): /* sub64 */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 6, 2): /* subsat */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 8, 2): /* hash_mask */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 8, 3): /* hash_clear */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 9, 0): /* ring_get */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 9, 1): /* ring_pop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 9, 2): /* ring_get_freely */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 9, 3): /* ring_pop_freely */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 10, 0): /* ring_put */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 10, 2): /* ring_journal */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 14, 0): /* reflect_write_sig_local */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 15, 1): /* reflect_read_sig_local */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 17, 2): /* statistic */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 24, 0): /* ring_read */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 24, 1): /* ring_write */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 25, 0): /* ring_workq_add_thread */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 25, 1): /* ring_workq_add_work */
+ return PUSHPULL(P32, 0);
+ default:
+ return target_rw(cpp_id, P32, 0, 64);
+ }
+}
+
+static inline int
+nfp6000_target_pushpull(uint32_t cpp_id, uint64_t address)
+{
+ switch (NFP_CPP_ID_TARGET_of(cpp_id)) {
+ case NFP6000_CPPTGT_NBI:
+ return nfp6000_nbi(cpp_id, address);
+ case NFP6000_CPPTGT_VQDR:
+ return target_rw(cpp_id, P32, 24, 4);
+ case NFP6000_CPPTGT_ILA:
+ return nfp6000_ila(cpp_id);
+ case NFP6000_CPPTGT_MU:
+ return nfp6000_mu(cpp_id, address);
+ case NFP6000_CPPTGT_PCIE:
+ return nfp6000_pci(cpp_id);
+ case NFP6000_CPPTGT_ARM:
+ if (address < 0x10000)
+ return target_rw(cpp_id, P64, 1, 1);
+ else
+ return target_rw(cpp_id, P32, 1, 1);
+ case NFP6000_CPPTGT_CRYPTO:
+ return nfp6000_crypto(cpp_id);
+ case NFP6000_CPPTGT_CTXPB:
+ return nfp6000_cap_xpb(cpp_id);
+ case NFP6000_CPPTGT_CLS:
+ return nfp6000_cls(cpp_id);
+ case 0:
+ return target_rw(cpp_id, P32, 4, 4);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp_target_pushpull_width(int pp, int write_not_read)
+{
+ if (pp < 0)
+ return pp;
+
+ if (write_not_read)
+ return PULL_WIDTH(pp);
+ else
+ return PUSH_WIDTH(pp);
+}
+
+static inline int
+nfp6000_target_action_width(uint32_t cpp_id, uint64_t address,
+ int write_not_read)
+{
+ int pp;
+
+ pp = nfp6000_target_pushpull(cpp_id, address);
+
+ return nfp_target_pushpull_width(pp, write_not_read);
+}
+
+static inline int
+nfp_target_action_width(uint32_t model, uint32_t cpp_id, uint64_t address,
+ int write_not_read)
+{
+ if (NFP_CPP_MODEL_IS_6000(model)) {
+ return nfp6000_target_action_width(cpp_id, address,
+ write_not_read);
+ } else {
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp_target_cpp(uint32_t cpp_island_id, uint64_t cpp_island_address,
+ uint32_t *cpp_target_id, uint64_t *cpp_target_address,
+ const uint32_t *imb_table)
+{
+ int err;
+ int island = NFP_CPP_ID_ISLAND_of(cpp_island_id);
+ int target = NFP_CPP_ID_TARGET_of(cpp_island_id);
+ uint32_t imb;
+
+ if (target < 0 || target >= 16)
+ return NFP_ERRNO(EINVAL);
+
+ if (island == 0) {
+ /* Already translated */
+ *cpp_target_id = cpp_island_id;
+ *cpp_target_address = cpp_island_address;
+ return 0;
+ }
+
+ if (!imb_table) {
+ /* CPP + Island only allowed on systems with IMB tables */
+ return NFP_ERRNO(EINVAL);
+ }
+
+ imb = imb_table[target];
+
+ *cpp_target_address = cpp_island_address;
+ err = _nfp6000_cppat_addr_encode(cpp_target_address, island, target,
+ ((imb >> 13) & 7),
+ ((imb >> 12) & 1),
+ ((imb >> 6) & 0x3f),
+ ((imb >> 0) & 0x3f));
+ if (err == 0) {
+ *cpp_target_id =
+ NFP_CPP_ID(target, NFP_CPP_ID_ACTION_of(cpp_island_id),
+ NFP_CPP_ID_TOKEN_of(cpp_island_id));
+ }
+
+ return err;
+}
+
+#endif /* NFP_TARGET_H */
diff --git a/src/spdk/dpdk/drivers/net/nfp/rte_pmd_nfp_version.map b/src/spdk/dpdk/drivers/net/nfp/rte_pmd_nfp_version.map
new file mode 100644
index 00000000..ad607bbe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/nfp/rte_pmd_nfp_version.map
@@ -0,0 +1,3 @@
+DPDK_2.2 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/null/Makefile b/src/spdk/dpdk/drivers/net/null/Makefile
new file mode 100644
index 00000000..9331ccac
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/null/Makefile
@@ -0,0 +1,54 @@
+# BSD LICENSE
+#
+# Copyright (C) IGEL Co.,Ltd.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of IGEL Co.,Ltd. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_null.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vdev
+
+EXPORT_MAP := rte_pmd_null_version.map
+
+LIBABIVER := 2
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += rte_eth_null.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/null/meson.build b/src/spdk/dpdk/drivers/net/null/meson.build
new file mode 100644
index 00000000..60e2ce6c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/null/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+version = 2
+sources = files('rte_eth_null.c')
diff --git a/src/spdk/dpdk/drivers/net/null/rte_eth_null.c b/src/spdk/dpdk/drivers/net/null/rte_eth_null.c
new file mode 100644
index 00000000..244f8654
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/null/rte_eth_null.c
@@ -0,0 +1,707 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (C) IGEL Co.,Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of IGEL Co.,Ltd. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_bus_vdev.h>
+#include <rte_kvargs.h>
+#include <rte_spinlock.h>
+
+#define ETH_NULL_PACKET_SIZE_ARG "size"
+#define ETH_NULL_PACKET_COPY_ARG "copy"
+
+static unsigned default_packet_size = 64;
+static unsigned default_packet_copy;
+
+static const char *valid_arguments[] = {
+ ETH_NULL_PACKET_SIZE_ARG,
+ ETH_NULL_PACKET_COPY_ARG,
+ NULL
+};
+
+struct pmd_internals;
+
+struct null_queue {
+ struct pmd_internals *internals;
+
+ struct rte_mempool *mb_pool;
+ struct rte_mbuf *dummy_packet;
+
+ rte_atomic64_t rx_pkts;
+ rte_atomic64_t tx_pkts;
+ rte_atomic64_t err_pkts;
+};
+
+struct pmd_internals {
+ unsigned packet_size;
+ unsigned packet_copy;
+ uint16_t port_id;
+
+ struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
+ struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
+
+ struct ether_addr eth_addr;
+ /** Bit mask of RSS offloads, the bit offset also means flow type */
+ uint64_t flow_type_rss_offloads;
+
+ rte_spinlock_t rss_lock;
+
+ uint16_t reta_size;
+ struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
+ RTE_RETA_GROUP_SIZE];
+
+ uint8_t rss_key[40]; /**< 40-byte hash key. */
+};
+static struct rte_eth_link pmd_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_FIXED,
+};
+
+static int eth_null_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eth_null_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+static uint16_t
+eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ int i;
+ struct null_queue *h = q;
+ unsigned packet_size;
+
+ if ((q == NULL) || (bufs == NULL))
+ return 0;
+
+ packet_size = h->internals->packet_size;
+ if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
+ return 0;
+
+ for (i = 0; i < nb_bufs; i++) {
+ bufs[i]->data_len = (uint16_t)packet_size;
+ bufs[i]->pkt_len = packet_size;
+ bufs[i]->port = h->internals->port_id;
+ }
+
+ rte_atomic64_add(&(h->rx_pkts), i);
+
+ return i;
+}
+
+static uint16_t
+eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ int i;
+ struct null_queue *h = q;
+ unsigned packet_size;
+
+ if ((q == NULL) || (bufs == NULL))
+ return 0;
+
+ packet_size = h->internals->packet_size;
+ if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
+ return 0;
+
+ for (i = 0; i < nb_bufs; i++) {
+ rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
+ packet_size);
+ bufs[i]->data_len = (uint16_t)packet_size;
+ bufs[i]->pkt_len = packet_size;
+ bufs[i]->port = h->internals->port_id;
+ }
+
+ rte_atomic64_add(&(h->rx_pkts), i);
+
+ return i;
+}
+
+static uint16_t
+eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ int i;
+ struct null_queue *h = q;
+
+ if ((q == NULL) || (bufs == NULL))
+ return 0;
+
+ for (i = 0; i < nb_bufs; i++)
+ rte_pktmbuf_free(bufs[i]);
+
+ rte_atomic64_add(&(h->tx_pkts), i);
+
+ return i;
+}
+
+static uint16_t
+eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ int i;
+ struct null_queue *h = q;
+ unsigned packet_size;
+
+ if ((q == NULL) || (bufs == NULL))
+ return 0;
+
+ packet_size = h->internals->packet_size;
+ for (i = 0; i < nb_bufs; i++) {
+ rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
+ packet_size);
+ rte_pktmbuf_free(bufs[i]);
+ }
+
+ rte_atomic64_add(&(h->tx_pkts), i);
+
+ return i;
+}
+
+static int
+eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static int
+eth_dev_start(struct rte_eth_dev *dev)
+{
+ if (dev == NULL)
+ return -EINVAL;
+
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+ return 0;
+}
+
+static void
+eth_dev_stop(struct rte_eth_dev *dev)
+{
+ if (dev == NULL)
+ return;
+
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+}
+
+static int
+eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct rte_mbuf *dummy_packet;
+ struct pmd_internals *internals;
+ unsigned packet_size;
+
+ if ((dev == NULL) || (mb_pool == NULL))
+ return -EINVAL;
+
+ internals = dev->data->dev_private;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues)
+ return -ENODEV;
+
+ packet_size = internals->packet_size;
+
+ internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
+ dev->data->rx_queues[rx_queue_id] =
+ &internals->rx_null_queues[rx_queue_id];
+ dummy_packet = rte_zmalloc_socket(NULL,
+ packet_size, 0, dev->data->numa_node);
+ if (dummy_packet == NULL)
+ return -ENOMEM;
+
+ internals->rx_null_queues[rx_queue_id].internals = internals;
+ internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
+
+ return 0;
+}
+
+static int
+eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct rte_mbuf *dummy_packet;
+ struct pmd_internals *internals;
+ unsigned packet_size;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ internals = dev->data->dev_private;
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -ENODEV;
+
+ packet_size = internals->packet_size;
+
+ dev->data->tx_queues[tx_queue_id] =
+ &internals->tx_null_queues[tx_queue_id];
+ dummy_packet = rte_zmalloc_socket(NULL,
+ packet_size, 0, dev->data->numa_node);
+ if (dummy_packet == NULL)
+ return -ENOMEM;
+
+ internals->tx_null_queues[tx_queue_id].internals = internals;
+ internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
+
+ return 0;
+}
+
+static int
+eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
+{
+ return 0;
+}
+
+static void
+eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals;
+
+ if ((dev == NULL) || (dev_info == NULL))
+ return;
+
+ internals = dev->data->dev_private;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)-1;
+ dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
+ dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
+ dev_info->min_rx_bufsize = 0;
+ dev_info->reta_size = internals->reta_size;
+ dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
+}
+
+static int
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
+{
+ unsigned i, num_stats;
+ unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ const struct pmd_internals *internal;
+
+ if ((dev == NULL) || (igb_stats == NULL))
+ return -EINVAL;
+
+ internal = dev->data->dev_private;
+ num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+ RTE_MIN(dev->data->nb_rx_queues,
+ RTE_DIM(internal->rx_null_queues)));
+ for (i = 0; i < num_stats; i++) {
+ igb_stats->q_ipackets[i] =
+ internal->rx_null_queues[i].rx_pkts.cnt;
+ rx_total += igb_stats->q_ipackets[i];
+ }
+
+ num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+ RTE_MIN(dev->data->nb_tx_queues,
+ RTE_DIM(internal->tx_null_queues)));
+ for (i = 0; i < num_stats; i++) {
+ igb_stats->q_opackets[i] =
+ internal->tx_null_queues[i].tx_pkts.cnt;
+ igb_stats->q_errors[i] =
+ internal->tx_null_queues[i].err_pkts.cnt;
+ tx_total += igb_stats->q_opackets[i];
+ tx_err_total += igb_stats->q_errors[i];
+ }
+
+ igb_stats->ipackets = rx_total;
+ igb_stats->opackets = tx_total;
+ igb_stats->oerrors = tx_err_total;
+
+ return 0;
+}
+
+static void
+eth_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ struct pmd_internals *internal;
+
+ if (dev == NULL)
+ return;
+
+ internal = dev->data->dev_private;
+ for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
+ internal->rx_null_queues[i].rx_pkts.cnt = 0;
+ for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
+ internal->tx_null_queues[i].tx_pkts.cnt = 0;
+ internal->tx_null_queues[i].err_pkts.cnt = 0;
+ }
+}
+
+static void
+eth_queue_release(void *q)
+{
+ struct null_queue *nq;
+
+ if (q == NULL)
+ return;
+
+ nq = q;
+ rte_free(nq->dummy_packet);
+}
+
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused) { return 0; }
+
+static int
+eth_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
+{
+ int i, j;
+ struct pmd_internals *internal = dev->data->dev_private;
+
+ if (reta_size != internal->reta_size)
+ return -EINVAL;
+
+ rte_spinlock_lock(&internal->rss_lock);
+
+ /* Copy RETA table */
+ for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
+ internal->reta_conf[i].mask = reta_conf[i].mask;
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ if ((reta_conf[i].mask >> j) & 0x01)
+ internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
+ }
+
+ rte_spinlock_unlock(&internal->rss_lock);
+
+ return 0;
+}
+
+static int
+eth_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
+{
+ int i, j;
+ struct pmd_internals *internal = dev->data->dev_private;
+
+ if (reta_size != internal->reta_size)
+ return -EINVAL;
+
+ rte_spinlock_lock(&internal->rss_lock);
+
+ /* Copy RETA table */
+ for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ if ((reta_conf[i].mask >> j) & 0x01)
+ reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
+ }
+
+ rte_spinlock_unlock(&internal->rss_lock);
+
+ return 0;
+}
+
+static int
+eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
+{
+ struct pmd_internals *internal = dev->data->dev_private;
+
+ rte_spinlock_lock(&internal->rss_lock);
+
+ if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
+ rss_conf->rss_hf & internal->flow_type_rss_offloads;
+
+ if (rss_conf->rss_key)
+ rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
+
+ rte_spinlock_unlock(&internal->rss_lock);
+
+ return 0;
+}
+
+static int
+eth_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct pmd_internals *internal = dev->data->dev_private;
+
+ rte_spinlock_lock(&internal->rss_lock);
+
+ rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ if (rss_conf->rss_key)
+ rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
+
+ rte_spinlock_unlock(&internal->rss_lock);
+
+ return 0;
+}
+
+static int
+eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct ether_addr *addr)
+{
+ return 0;
+}
+
+static const struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_release = eth_queue_release,
+ .tx_queue_release = eth_queue_release,
+ .mtu_set = eth_mtu_set,
+ .link_update = eth_link_update,
+ .mac_addr_set = eth_mac_address_set,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
+ .reta_update = eth_rss_reta_update,
+ .reta_query = eth_rss_reta_query,
+ .rss_hash_update = eth_rss_hash_update,
+ .rss_hash_conf_get = eth_rss_hash_conf_get
+};
+
+static struct rte_vdev_driver pmd_null_drv;
+
+static int
+eth_dev_null_create(struct rte_vdev_device *dev,
+ unsigned packet_size,
+ unsigned packet_copy)
+{
+ const unsigned nb_rx_queues = 1;
+ const unsigned nb_tx_queues = 1;
+ struct rte_eth_dev_data *data;
+ struct pmd_internals *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+
+ static const uint8_t default_rss_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
+ 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
+ 0xBE, 0xAC, 0x01, 0xFA
+ };
+
+ if (dev->device.numa_node == SOCKET_ID_ANY)
+ dev->device.numa_node = rte_socket_id();
+
+ PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
+ dev->device.numa_node);
+
+ eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
+ if (!eth_dev)
+ return -ENOMEM;
+
+ /* now put it all together
+ * - store queue data in internals,
+ * - store numa_node info in ethdev data
+ * - point eth_dev_data to internals
+ * - and point eth_dev structure to new eth_dev_data structure
+ */
+ /* NOTE: we'll replace the data element, of originally allocated eth_dev
+ * so the nulls are local per-process */
+
+ internals = eth_dev->data->dev_private;
+ internals->packet_size = packet_size;
+ internals->packet_copy = packet_copy;
+ internals->port_id = eth_dev->data->port_id;
+ eth_random_addr(internals->eth_addr.addr_bytes);
+
+ internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+ internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
+
+ rte_memcpy(internals->rss_key, default_rss_key, 40);
+
+ data = eth_dev->data;
+ data->nb_rx_queues = (uint16_t)nb_rx_queues;
+ data->nb_tx_queues = (uint16_t)nb_tx_queues;
+ data->dev_link = pmd_link;
+ data->mac_addrs = &internals->eth_addr;
+
+ eth_dev->dev_ops = &ops;
+
+ /* finally assign rx and tx ops */
+ if (packet_copy) {
+ eth_dev->rx_pkt_burst = eth_null_copy_rx;
+ eth_dev->tx_pkt_burst = eth_null_copy_tx;
+ } else {
+ eth_dev->rx_pkt_burst = eth_null_rx;
+ eth_dev->tx_pkt_burst = eth_null_tx;
+ }
+
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+}
+
+static inline int
+get_packet_size_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ const char *a = value;
+ unsigned *packet_size = extra_args;
+
+ if ((value == NULL) || (extra_args == NULL))
+ return -EINVAL;
+
+ *packet_size = (unsigned)strtoul(a, NULL, 0);
+ if (*packet_size == UINT_MAX)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+get_packet_copy_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ const char *a = value;
+ unsigned *packet_copy = extra_args;
+
+ if ((value == NULL) || (extra_args == NULL))
+ return -EINVAL;
+
+ *packet_copy = (unsigned)strtoul(a, NULL, 0);
+ if (*packet_copy == UINT_MAX)
+ return -1;
+
+ return 0;
+}
+
+static int
+rte_pmd_null_probe(struct rte_vdev_device *dev)
+{
+ const char *name, *params;
+ unsigned packet_size = default_packet_size;
+ unsigned packet_copy = default_packet_copy;
+ struct rte_kvargs *kvlist = NULL;
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(dev);
+ params = rte_vdev_device_args(dev);
+ PMD_LOG(INFO, "Initializing pmd_null for %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ if (params != NULL) {
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+
+ if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
+
+ ret = rte_kvargs_process(kvlist,
+ ETH_NULL_PACKET_SIZE_ARG,
+ &get_packet_size_arg, &packet_size);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
+
+ ret = rte_kvargs_process(kvlist,
+ ETH_NULL_PACKET_COPY_ARG,
+ &get_packet_copy_arg, &packet_copy);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+ }
+
+ PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
+ "packet copy is %s", packet_size,
+ packet_copy ? "enabled" : "disabled");
+
+ ret = eth_dev_null_create(dev, packet_size, packet_copy);
+
+free_kvlist:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+rte_pmd_null_remove(struct rte_vdev_device *dev)
+{
+ struct rte_eth_dev *eth_dev = NULL;
+
+ if (!dev)
+ return -EINVAL;
+
+ PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
+ rte_socket_id());
+
+ /* find the ethdev entry */
+ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
+ if (eth_dev == NULL)
+ return -1;
+
+ rte_free(eth_dev->data->dev_private);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_null_drv = {
+ .probe = rte_pmd_null_probe,
+ .remove = rte_pmd_null_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
+RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
+RTE_PMD_REGISTER_PARAM_STRING(net_null,
+ "size=<int> "
+ "copy=<int>");
+
+RTE_INIT(eth_null_init_log)
+{
+ eth_null_logtype = rte_log_register("pmd.net.null");
+ if (eth_null_logtype >= 0)
+ rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/null/rte_pmd_null_version.map b/src/spdk/dpdk/drivers/net/null/rte_pmd_null_version.map
new file mode 100644
index 00000000..ef353984
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/null/rte_pmd_null_version.map
@@ -0,0 +1,4 @@
+DPDK_2.0 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/octeontx/Makefile b/src/spdk/dpdk/drivers/net/octeontx/Makefile
new file mode 100644
index 00000000..885f1768
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/Makefile
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_octeontx.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
+
+EXPORT_MAP := rte_pmd_octeontx_version.map
+
+LIBABIVER := 1
+
+OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_pkovf.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_pkivf.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_bgx.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_ethdev.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_octeontx_rxtx.o += -fno-prefetch-loop-arrays
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_octeontx_rxtx.o += -O3 -Ofast
+else
+CFLAGS_octeontx_rxtx.o += -O3 -ffast-math
+endif
+
+else
+CFLAGS_octeontx_rxtx.o += -O3 -Ofast
+endif
+
+CFLAGS_octeontx_ethdev.o += -DALLOW_EXPERIMENTAL_API
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_common_octeontx
+LDLIBS += -lrte_mempool_octeontx
+LDLIBS += -lrte_eventdev
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/meson.build b/src/spdk/dpdk/drivers/net/octeontx/base/meson.build
new file mode 100644
index 00000000..09f657ab
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+sources = [
+ 'octeontx_pkovf.c',
+ 'octeontx_pkivf.c',
+ 'octeontx_bgx.c'
+]
+
+depends = ['ethdev', 'mempool_octeontx']
+static_objs = []
+foreach d: depends
+ static_objs += [get_variable('static_rte_' + d)]
+endforeach
+
+base_lib = static_library('octeontx_base', sources,
+ c_args: cflags,
+ dependencies: static_objs,
+)
+
+base_objs = base_lib.extract_all_objects()
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.c b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.c
new file mode 100644
index 00000000..0e238826
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.c
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include "octeontx_bgx.h"
+
+int
+octeontx_bgx_port_open(int port, octeontx_mbox_bgx_port_conf_t *conf)
+{
+ struct octeontx_mbox_hdr hdr;
+ octeontx_mbox_bgx_port_conf_t bgx_conf;
+ int len = sizeof(octeontx_mbox_bgx_port_conf_t);
+ int res;
+
+ memset(&bgx_conf, 0, sizeof(octeontx_mbox_bgx_port_conf_t));
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_OPEN;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
+ if (res < 0)
+ return -EACCES;
+
+ conf->enable = bgx_conf.enable;
+ conf->promisc = bgx_conf.promisc;
+ conf->bpen = bgx_conf.bpen;
+ conf->node = bgx_conf.node;
+ conf->base_chan = bgx_conf.base_chan;
+ conf->num_chans = bgx_conf.num_chans;
+ conf->mtu = bgx_conf.mtu;
+ conf->bgx = bgx_conf.bgx;
+ conf->lmac = bgx_conf.lmac;
+ conf->mode = bgx_conf.mode;
+ conf->pkind = bgx_conf.pkind;
+ memcpy(conf->macaddr, bgx_conf.macaddr, 6);
+
+ return res;
+}
+
+int
+octeontx_bgx_port_close(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_CLOSE;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_start(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_START;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_stop(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_STOP;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_get_config(int port, octeontx_mbox_bgx_port_conf_t *conf)
+{
+ struct octeontx_mbox_hdr hdr;
+ octeontx_mbox_bgx_port_conf_t bgx_conf;
+ int len = sizeof(octeontx_mbox_bgx_port_conf_t);
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_CONFIG;
+ hdr.vfid = port;
+
+ memset(&bgx_conf, 0, sizeof(octeontx_mbox_bgx_port_conf_t));
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
+ if (res < 0)
+ return -EACCES;
+
+ conf->enable = bgx_conf.enable;
+ conf->promisc = bgx_conf.promisc;
+ conf->bpen = bgx_conf.bpen;
+ conf->node = bgx_conf.node;
+ conf->base_chan = bgx_conf.base_chan;
+ conf->num_chans = bgx_conf.num_chans;
+ conf->mtu = bgx_conf.mtu;
+ conf->bgx = bgx_conf.bgx;
+ conf->lmac = bgx_conf.lmac;
+ conf->mode = bgx_conf.mode;
+ conf->pkind = bgx_conf.pkind;
+ memcpy(conf->macaddr, bgx_conf.macaddr, 6);
+
+ return res;
+}
+
+int
+octeontx_bgx_port_status(int port, octeontx_mbox_bgx_port_status_t *stat)
+{
+ struct octeontx_mbox_hdr hdr;
+ octeontx_mbox_bgx_port_status_t bgx_stat;
+ int len = sizeof(octeontx_mbox_bgx_port_status_t);
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_STATUS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stat, len);
+ if (res < 0)
+ return -EACCES;
+
+ stat->link_up = bgx_stat.link_up;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_stats(int port, octeontx_mbox_bgx_port_stats_t *stats)
+{
+ struct octeontx_mbox_hdr hdr;
+ octeontx_mbox_bgx_port_stats_t bgx_stats;
+ int len = sizeof(octeontx_mbox_bgx_port_stats_t);
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_STATS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stats, len);
+ if (res < 0)
+ return -EACCES;
+
+ stats->rx_packets = bgx_stats.rx_packets;
+ stats->rx_bytes = bgx_stats.rx_bytes;
+ stats->rx_dropped = bgx_stats.rx_dropped;
+ stats->rx_errors = bgx_stats.rx_errors;
+ stats->tx_packets = bgx_stats.tx_packets;
+ stats->tx_bytes = bgx_stats.tx_bytes;
+ stats->tx_dropped = bgx_stats.tx_dropped;
+ stats->tx_errors = bgx_stats.tx_errors;
+ return res;
+}
+
+int
+octeontx_bgx_port_stats_clr(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_CLR_STATS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_link_status(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ uint8_t link;
+ int len = sizeof(uint8_t);
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_LINK_STATUS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, &link, len);
+ if (res < 0)
+ return -EACCES;
+
+ return link;
+}
+
+int
+octeontx_bgx_port_promisc_set(int port, int en)
+{
+ struct octeontx_mbox_hdr hdr;
+ uint8_t prom;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_SET_PROMISC;
+ hdr.vfid = port;
+ prom = en ? 1 : 0;
+
+ res = octeontx_mbox_send(&hdr, &prom, sizeof(prom), NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_mac_set(int port, uint8_t *mac_addr)
+{
+ struct octeontx_mbox_hdr hdr;
+ int len = 6;
+ int res = 0;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_SET_MACADDR;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, mac_addr, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.h
new file mode 100644
index 00000000..ff265149
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_BGX_H__
+#define __OCTEONTX_BGX_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <octeontx_mbox.h>
+
+#define OCTEONTX_BGX_COPROC 6
+
+/* BGX messages */
+#define MBOX_BGX_PORT_OPEN 0
+#define MBOX_BGX_PORT_CLOSE 1
+#define MBOX_BGX_PORT_START 2
+#define MBOX_BGX_PORT_STOP 3
+#define MBOX_BGX_PORT_GET_CONFIG 4
+#define MBOX_BGX_PORT_GET_STATUS 5
+#define MBOX_BGX_PORT_GET_STATS 6
+#define MBOX_BGX_PORT_CLR_STATS 7
+#define MBOX_BGX_PORT_GET_LINK_STATUS 8
+#define MBOX_BGX_PORT_SET_PROMISC 9
+#define MBOX_BGX_PORT_SET_MACADDR 10
+#define MBOX_BGX_PORT_SET_BP 11
+#define MBOX_BGX_PORT_SET_BCAST 12
+#define MBOX_BGX_PORT_SET_MCAST 13
+
+/* BGX port configuration parameters: */
+typedef struct octeontx_mbox_bgx_port_conf {
+ uint8_t enable;
+ uint8_t promisc;
+ uint8_t bpen;
+ uint8_t macaddr[6]; /* MAC address.*/
+ uint8_t fcs_strip;
+ uint8_t bcast_mode;
+ uint8_t mcast_mode;
+ uint8_t node; /* CPU node */
+ uint16_t base_chan;
+ uint16_t num_chans;
+ uint16_t mtu;
+ uint8_t bgx;
+ uint8_t lmac;
+ uint8_t mode;
+ uint8_t pkind;
+} octeontx_mbox_bgx_port_conf_t;
+
+/* BGX port status: */
+typedef struct octeontx_mbox_bgx_port_status {
+ uint8_t link_up;
+ uint8_t bp;
+} octeontx_mbox_bgx_port_status_t;
+
+/* BGX port statistics: */
+typedef struct octeontx_mbox_bgx_port_stats {
+ uint64_t rx_packets;
+ uint64_t tx_packets;
+ uint64_t rx_bytes;
+ uint64_t tx_bytes;
+ uint64_t rx_errors;
+ uint64_t tx_errors;
+ uint64_t rx_dropped;
+ uint64_t tx_dropped;
+ uint64_t multicast;
+ uint64_t collisions;
+
+ uint64_t rx_length_errors;
+ uint64_t rx_over_errors;
+ uint64_t rx_crc_errors;
+ uint64_t rx_frame_errors;
+ uint64_t rx_fifo_errors;
+ uint64_t rx_missed_errors;
+
+ /* Detailed transmit errors. */
+ uint64_t tx_aborted_errors;
+ uint64_t tx_carrier_errors;
+ uint64_t tx_fifo_errors;
+ uint64_t tx_heartbeat_errors;
+ uint64_t tx_window_errors;
+
+ /* Extended statistics based on RFC2819. */
+ uint64_t rx_1_to_64_packets;
+ uint64_t rx_65_to_127_packets;
+ uint64_t rx_128_to_255_packets;
+ uint64_t rx_256_to_511_packets;
+ uint64_t rx_512_to_1023_packets;
+ uint64_t rx_1024_to_1522_packets;
+ uint64_t rx_1523_to_max_packets;
+
+ uint64_t tx_1_to_64_packets;
+ uint64_t tx_65_to_127_packets;
+ uint64_t tx_128_to_255_packets;
+ uint64_t tx_256_to_511_packets;
+ uint64_t tx_512_to_1023_packets;
+ uint64_t tx_1024_to_1522_packets;
+ uint64_t tx_1523_to_max_packets;
+
+ uint64_t tx_multicast_packets;
+ uint64_t rx_broadcast_packets;
+ uint64_t tx_broadcast_packets;
+ uint64_t rx_undersized_errors;
+ uint64_t rx_oversize_errors;
+ uint64_t rx_fragmented_errors;
+ uint64_t rx_jabber_errors;
+} octeontx_mbox_bgx_port_stats_t;
+
+int octeontx_bgx_port_open(int port, octeontx_mbox_bgx_port_conf_t *conf);
+int octeontx_bgx_port_close(int port);
+int octeontx_bgx_port_start(int port);
+int octeontx_bgx_port_stop(int port);
+int octeontx_bgx_port_get_config(int port, octeontx_mbox_bgx_port_conf_t *conf);
+int octeontx_bgx_port_status(int port, octeontx_mbox_bgx_port_status_t *stat);
+int octeontx_bgx_port_stats(int port, octeontx_mbox_bgx_port_stats_t *stats);
+int octeontx_bgx_port_stats_clr(int port);
+int octeontx_bgx_port_link_status(int port);
+int octeontx_bgx_port_promisc_set(int port, int en);
+int octeontx_bgx_port_mac_set(int port, uint8_t *mac_addr);
+
+#endif /* __OCTEONTX_BGX_H__ */
+
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_io.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_io.h
new file mode 100644
index 00000000..d51ded23
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_io.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_IO_H__
+#define __OCTEONTX_IO_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <rte_io.h>
+
+/* In Cavium OcteonTX SoC, all accesses to the device registers are
+ * implicitly strongly ordered. So, The relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define octeontx_read64 rte_read64_relaxed
+#define octeontx_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define octeontx_prefetch_store_keep(_ptr) ({\
+ asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); })
+
+#define octeontx_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define octeontx_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1]]" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define octeontx_prefetch_store_keep(_ptr) do {} while (0)
+
+#define octeontx_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64(addr); \
+ val1 = rte_read64(((uint8_t *)addr) + 8); \
+} while (0)
+
+#define octeontx_store_pair(val0, val1, addr) \
+do { \
+ rte_write64(val0, addr); \
+ rte_write64(val1, (((uint8_t *)addr) + 8)); \
+} while (0)
+#endif
+
+#if defined(RTE_ARCH_ARM64)
+/**
+ * Perform an atomic fetch-and-add operation.
+ */
+static inline uint64_t
+octeontx_reg_ldadd_u64(void *addr, int64_t off)
+{
+ uint64_t old_val;
+
+ __asm__ volatile(
+ " .cpu generic+lse\n"
+ " ldadd %1, %0, [%2]\n"
+ : "=r" (old_val) : "r" (off), "r" (addr) : "memory");
+
+ return old_val;
+}
+
+/**
+ * Perform a LMTST operation - an atomic write of up to 128 byte to
+ * an I/O block that supports this operation type.
+ *
+ * @param lmtline_va is the address where LMTLINE is mapped
+ * @param ioreg_va is the virtual address of the device register
+ * @param cmdbuf is the array of peripheral commands to execute
+ * @param cmdsize is the number of 64-bit words in 'cmdbuf'
+ *
+ * @return N/A
+ */
+static inline void
+octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[],
+ uint64_t cmdsize)
+{
+ uint64_t result;
+ uint64_t word_count;
+ uint64_t *lmtline = lmtline_va;
+
+ word_count = cmdsize;
+
+ do {
+ /* Copy commands to LMTLINE */
+ for (result = 0; result < word_count; result += 2) {
+ lmtline[result + 0] = cmdbuf[result + 0];
+ lmtline[result + 1] = cmdbuf[result + 1];
+ }
+
+ /* LDEOR initiates atomic transfer to I/O device */
+ __asm__ volatile(
+ " .cpu generic+lse\n"
+ " ldeor xzr, %0, [%1]\n"
+ : "=r" (result) : "r" (ioreg_va) : "memory");
+ } while (!result);
+}
+
+#else
+
+static inline uint64_t
+octeontx_reg_ldadd_u64(void *addr, int64_t off)
+{
+ RTE_SET_USED(addr);
+ RTE_SET_USED(off);
+ return 0;
+}
+
+static inline void
+octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[],
+ uint64_t cmdsize)
+{
+ RTE_SET_USED(lmtline_va);
+ RTE_SET_USED(ioreg_va);
+ RTE_SET_USED(cmdbuf);
+ RTE_SET_USED(cmdsize);
+}
+
+#endif
+#endif /* __OCTEONTX_IO_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pki_var.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pki_var.h
new file mode 100644
index 00000000..c793b655
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pki_var.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_PKI_VAR_H__
+#define __OCTEONTX_PKI_VAR_H__
+
+#include <rte_byteorder.h>
+
+#define OCTTX_PACKET_WQE_SKIP 128
+#define OCTTX_PACKET_FIRST_SKIP 240
+#define OCTTX_PACKET_LATER_SKIP 128
+
+/* WQE descriptor */
+typedef union octtx_wqe_s {
+ uint64_t w[6];
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ struct {
+ uint64_t pknd : 6;
+ uint64_t rsvd0 : 10;
+ uint64_t style : 8;
+ uint64_t bufs : 8;
+ uint64_t chan : 12;
+ uint64_t apad : 3;
+ uint64_t rsvd1 : 1;
+ uint64_t aura : 12;
+ uint64_t rsvd2 : 4;
+ } w0;
+
+ struct {
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t grp : 10;
+ uint64_t rsvd0 : 2;
+ uint64_t rsvd1 : 2;
+ uint64_t len : 16;
+ } w1;
+
+ struct {
+ uint64_t op_code : 8;
+ uint64_t err_lev : 3;
+ uint64_t raw : 1;
+ uint64_t l2m : 1;
+ uint64_t l2b : 1;
+ uint64_t l3m : 1;
+ uint64_t l3b : 1;
+ uint64_t l3fr : 1;
+ uint64_t pf1 : 1;
+ uint64_t pf2 : 1;
+ uint64_t pf3 : 1;
+ uint64_t pf4 : 1;
+ uint64_t sh : 1;
+ uint64_t vs : 1;
+ uint64_t vv : 1;
+ uint64_t rsvd0 : 8;
+ uint64_t lae : 1;
+ uint64_t lbty : 5;
+ uint64_t lcty : 5;
+ uint64_t ldty : 5;
+ uint64_t lety : 5;
+ uint64_t lfty : 5;
+ uint64_t lgty : 5;
+ uint64_t sw : 1;
+ } w2;
+
+ struct {
+ uint64_t addr; /* Byte addr of start-of-pkt */
+ } w3;
+
+ struct {
+ uint64_t laptr : 8;
+ uint64_t lbptr : 8;
+ uint64_t lcptr : 8;
+ uint64_t ldprt : 8;
+ uint64_t leptr : 8;
+ uint64_t lfptr : 8;
+ uint64_t lgptr : 8;
+ uint64_t vlptr : 8;
+ } w4;
+
+ struct {
+ uint64_t rsvd0 : 47;
+ uint64_t dwd : 1;
+ uint64_t size : 16;
+ } w5;
+#else
+ struct {
+ uint64_t rsvd2 : 4;
+ uint64_t aura : 12;
+ uint64_t rsvd1 : 1;
+ uint64_t apad : 3;
+ uint64_t chan : 12;
+ uint64_t bufs : 8;
+ uint64_t style : 8;
+ uint64_t rsvd0 : 10;
+ uint64_t pknd : 6;
+ } w0;
+
+ struct {
+ uint64_t len : 16;
+ uint64_t rsvd1 : 2;
+ uint64_t rsvd0 : 2;
+ uint64_t grp : 10;
+ uint64_t tt : 2;
+ uint64_t tag : 32;
+ } w1;
+
+ struct {
+ uint64_t sw : 1;
+ uint64_t lgty : 5;
+ uint64_t lfty : 5;
+ uint64_t lety : 5;
+ uint64_t ldty : 5;
+ uint64_t lcty : 5;
+ uint64_t lbty : 5;
+ uint64_t lae : 1;
+ uint64_t rsvd0 : 8;
+ uint64_t vv : 1;
+ uint64_t vs : 1;
+ uint64_t sh : 1;
+ uint64_t pf4 : 1;
+ uint64_t pf3 : 1;
+ uint64_t pf2 : 1;
+ uint64_t pf1 : 1;
+ uint64_t l3fr : 1;
+ uint64_t l3b : 1;
+ uint64_t l3m : 1;
+ uint64_t l2b : 1;
+ uint64_t l2m : 1;
+ uint64_t raw : 1;
+ uint64_t err_lev : 3;
+ uint64_t op_code : 8;
+ } w2;
+
+ struct {
+ uint64_t addr; /* Byte addr of start-of-pkt */
+ } w3;
+
+ struct {
+ uint64_t vlptr : 8;
+ uint64_t lgptr : 8;
+ uint64_t lfptr : 8;
+ uint64_t leptr : 8;
+ uint64_t ldprt : 8;
+ uint64_t lcptr : 8;
+ uint64_t lbptr : 8;
+ uint64_t laptr : 8;
+ } w4;
+#endif
+ } s;
+
+} __rte_packed octtx_wqe_t;
+
+enum occtx_pki_ltype_e {
+ OCCTX_PKI_LTYPE_NONE = 0,
+ OCCTX_PKI_LTYPE_ENET = 1,
+ OCCTX_PKI_LTYPE_VLAN = 2,
+ OCCTX_PKI_LTYPE_SNAP_PAYLD = 5,
+ OCCTX_PKI_LTYPE_ARP = 6,
+ OCCTX_PKI_LTYPE_RARP = 7,
+ OCCTX_PKI_LTYPE_IP4 = 8,
+ OCCTX_PKI_LTYPE_IP4_OPT = 9,
+ OCCTX_PKI_LTYPE_IP6 = 0xa,
+ OCCTX_PKI_LTYPE_IP6_OPT = 0xb,
+ OCCTX_PKI_LTYPE_IPSEC_ESP = 0xc,
+ OCCTX_PKI_LTYPE_IPFRAG = 0xd,
+ OCCTX_PKI_LTYPE_IPCOMP = 0xe,
+ OCCTX_PKI_LTYPE_TCP = 0x10,
+ OCCTX_PKI_LTYPE_UDP = 0x11,
+ OCCTX_PKI_LTYPE_SCTP = 0x12,
+ OCCTX_PKI_LTYPE_UDP_VXLAN = 0x13,
+ OCCTX_PKI_LTYPE_GRE = 0x14,
+ OCCTX_PKI_LTYPE_NVGRE = 0x15,
+ OCCTX_PKI_LTYPE_GTP = 0x16,
+ OCCTX_PKI_LTYPE_UDP_GENEVE = 0x17,
+ OCCTX_PKI_LTYPE_SW28 = 0x1c,
+ OCCTX_PKI_LTYPE_SW29 = 0x1d,
+ OCCTX_PKI_LTYPE_SW30 = 0x1e,
+ OCCTX_PKI_LTYPE_SW31 = 0x1f,
+ OCCTX_PKI_LTYPE_LAST
+};
+
+enum lc_type_e {
+ LC_NONE = OCCTX_PKI_LTYPE_NONE,
+ LC_IPV4 = OCCTX_PKI_LTYPE_IP4,
+ LC_IPV4_OPT = OCCTX_PKI_LTYPE_IP4_OPT,
+ LC_IPV6 = OCCTX_PKI_LTYPE_IP6,
+ LC_IPV6_OPT = OCCTX_PKI_LTYPE_IP6_OPT,
+};
+
+enum le_type_e {
+ LE_NONE = OCCTX_PKI_LTYPE_NONE,
+};
+
+enum lf_type_e {
+ LF_NONE = OCCTX_PKI_LTYPE_NONE,
+ LF_IPSEC_ESP = OCCTX_PKI_LTYPE_IPSEC_ESP,
+ LF_IPFRAG = OCCTX_PKI_LTYPE_IPFRAG,
+ LF_IPCOMP = OCCTX_PKI_LTYPE_IPCOMP,
+ LF_TCP = OCCTX_PKI_LTYPE_TCP,
+ LF_UDP = OCCTX_PKI_LTYPE_UDP,
+ LF_GRE = OCCTX_PKI_LTYPE_GRE,
+ LF_UDP_GENEVE = OCCTX_PKI_LTYPE_UDP_GENEVE,
+ LF_UDP_VXLAN = OCCTX_PKI_LTYPE_UDP_VXLAN,
+ LF_NVGRE = OCCTX_PKI_LTYPE_NVGRE,
+};
+#endif /* __OCTEONTX_PKI_VAR_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.c b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.c
new file mode 100644
index 00000000..1babea0e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include <rte_eal.h>
+#include <rte_bus_pci.h>
+
+#include "octeontx_pkivf.h"
+
+int
+octeontx_pki_port_open(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_OPEN;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+ return res;
+}
+
+int
+octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_hash_cfg_t h_cfg = *(mbox_pki_hash_cfg_t *)hash_cfg;
+ int len = sizeof(mbox_pki_hash_cfg_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_HASH_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &h_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_pktbuf_cfg_t b_cfg = *(mbox_pki_pktbuf_cfg_t *)buf_cfg;
+ int len = sizeof(mbox_pki_pktbuf_cfg_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_PKTBUF_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &b_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+ return res;
+}
+
+int
+octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_qos_cfg_t q_cfg = *(mbox_pki_qos_cfg_t *)qos_cfg;
+ int len = sizeof(mbox_pki_qos_cfg_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_CREATE_QOS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+
+int
+octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_errcheck_cfg_t e_cfg;
+ e_cfg = *((mbox_pki_errcheck_cfg_t *)(cfg));
+ int len = sizeof(mbox_pki_errcheck_cfg_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_ERRCHK_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &e_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_PKI_VF 0xA0DD
+
+/* PKIVF pcie device */
+static int
+pkivf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ RTE_SET_USED(pci_drv);
+ RTE_SET_USED(pci_dev);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ return 0;
+}
+
+static const struct rte_pci_id pci_pkivf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_PKI_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_pkivf = {
+ .id_table = pci_pkivf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = pkivf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_pkivf, pci_pkivf);
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.h
new file mode 100644
index 00000000..764aff53
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_PKI_H__
+#define __OCTEONTX_PKI_H__
+
+#include <stdint.h>
+
+#include <octeontx_mbox.h>
+
+#define OCTEONTX_PKI_COPROC 5
+
+/* PKI messages */
+
+#define MBOX_PKI_PORT_OPEN 1
+#define MBOX_PKI_PORT_START 2
+#define MBOX_PKI_PORT_STOP 3
+#define MBOX_PKI_PORT_CLOSE 4
+#define MBOX_PKI_PORT_CONFIG 5
+#define MBOX_PKI_PORT_OPT_PARSER_CONFIG 6
+#define MBOX_PKI_PORT_CUSTOM_PARSER_CONFIG 7
+#define MBOX_PKI_PORT_PKTBUF_CONFIG 8
+#define MBOX_PKI_PORT_HASH_CONFIG 9
+#define MBOX_PKI_PORT_ERRCHK_CONFIG 10
+#define MBOX_PKI_PORT_CREATE_QOS 11
+#define MBOX_PKI_PORT_MODIFY_QOS 12
+#define MBOX_PKI_PORT_DELETE_QOS 13
+#define MBOX_PKI_PORT_PKTDROP_CONFIG 14
+#define MBOX_PKI_PORT_WQE_GEN_CONFIG 15
+#define MBOX_PKI_BACKPRESSURE_CONFIG 16
+#define MBOX_PKI_PORT_GET_STATS 17
+#define MBOX_PKI_PORT_RESET_STATS 18
+#define MBOX_PKI_GET_PORT_CONFIG 19
+#define MBOX_PKI_GET_PORT_QOS_CONFIG 20
+
+#define MBOX_PKI_MAX_QOS_ENTRY 64
+
+/* pki pkind parse mode */
+enum {
+ MBOX_PKI_PARSE_LA_TO_LG = 0,
+ MBOX_PKI_PARSE_LB_TO_LG = 1,
+ MBOX_PKI_PARSE_LC_TO_LG = 3,
+ MBOX_PKI_PARSE_LG = 0x3f,
+ MBOX_PKI_PARSE_NOTHING = 0x7f
+};
+
+/* Interface types: */
+enum {
+ OCTTX_PORT_TYPE_NET, /* Network interface ports */
+ OCTTX_PORT_TYPE_INT, /* CPU internal interface ports */
+ OCTTX_PORT_TYPE_PCI, /* DPI/PCIe interface ports */
+ OCTTX_PORT_TYPE_MAX
+};
+
+/* pki port config */
+typedef struct mbox_pki_port_type {
+ uint8_t port_type;
+} mbox_pki_port_t;
+
+/* pki port config */
+typedef struct mbox_pki_port_cfg {
+ uint8_t port_type;
+ struct {
+ uint8_t fcs_pres:1;
+ uint8_t fcs_skip:1;
+ uint8_t parse_mode:1;
+ uint8_t mpls_parse:1;
+ uint8_t inst_hdr_parse:1;
+ uint8_t fulc_parse:1;
+ uint8_t dsa_parse:1;
+ uint8_t hg2_parse:1;
+ uint8_t hg_parse:1;
+ } mmask;
+ uint8_t fcs_pres;
+ uint8_t fcs_skip;
+ uint8_t parse_mode;
+ uint8_t mpls_parse;
+ uint8_t inst_hdr_parse;
+ uint8_t fulc_parse;
+ uint8_t dsa_parse;
+ uint8_t hg2_parse;
+ uint8_t hg_parse;
+} mbox_pki_prt_cfg_t;
+
+/* pki Flow/style packet buffer config */
+typedef struct mbox_pki_port_pktbuf_cfg {
+ uint8_t port_type;
+ struct {
+ uint16_t f_mbuff_size:1;
+ uint16_t f_wqe_skip:1;
+ uint16_t f_first_skip:1;
+ uint16_t f_later_skip:1;
+ uint16_t f_pkt_outside_wqe:1;
+ uint16_t f_wqe_endian:1;
+ uint16_t f_cache_mode:1;
+ } mmask;
+ uint16_t mbuff_size;
+ uint16_t wqe_skip;
+ uint16_t first_skip;
+ uint16_t later_skip;
+ uint8_t pkt_outside_wqe;
+ uint8_t wqe_endian;
+ uint8_t cache_mode;
+} mbox_pki_pktbuf_cfg_t;
+
+/* pki flow/style tag config */
+typedef struct mbox_pki_port_hash_cfg {
+ uint8_t port_type;
+ uint32_t tag_slf:1;
+ uint32_t tag_sle:1;
+ uint32_t tag_sld:1;
+ uint32_t tag_slc:1;
+ uint32_t tag_dlf:1;
+ uint32_t tag_dle:1;
+ uint32_t tag_dld:1;
+ uint32_t tag_dlc:1;
+ uint32_t tag_prt:1;
+ uint32_t tag_vlan0:1;
+ uint32_t tag_vlan1:1;
+ uint32_t tag_ip_pctl:1;
+ uint32_t tag_sync:1;
+ uint32_t tag_spi:1;
+ uint32_t tag_gtp:1;
+ uint32_t tag_vni:1;
+} mbox_pki_hash_cfg_t;
+
+/* pki flow/style errcheck config */
+typedef struct mbox_pki_port_errcheck_cfg {
+ uint8_t port_type;
+ struct {
+ uint32_t f_ip6_udp_opt:1;
+ uint32_t f_lenerr_en:1;
+ uint32_t f_maxerr_en:1;
+ uint32_t f_minerr_en:1;
+ uint32_t f_fcs_chk:1;
+ uint32_t f_fcs_strip:1;
+ uint32_t f_len_lf:1;
+ uint32_t f_len_le:1;
+ uint32_t f_len_ld:1;
+ uint32_t f_len_lc:1;
+ uint32_t f_csum_lf:1;
+ uint32_t f_csum_le:1;
+ uint32_t f_csum_ld:1;
+ uint32_t f_csum_lc:1;
+ uint32_t f_min_frame_len;
+ uint32_t f_max_frame_len;
+ } mmask;
+ uint64_t ip6_udp_opt:1;
+ uint64_t lenerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t minerr_en:1;
+ uint64_t fcs_chk:1;
+ uint64_t fcs_strip:1;
+ uint64_t len_lf:1;
+ uint64_t len_le:1;
+ uint64_t len_ld:1;
+ uint64_t len_lc:1;
+ uint64_t csum_lf:1;
+ uint64_t csum_le:1;
+ uint64_t csum_ld:1;
+ uint64_t csum_lc:1;
+ uint64_t min_frame_len;
+ uint64_t max_frame_len;
+} mbox_pki_errcheck_cfg_t;
+
+/* CACHE MODE*/
+enum {
+ MBOX_PKI_OPC_MODE_STT = 0LL,
+ MBOX_PKI_OPC_MODE_STF = 1LL,
+ MBOX_PKI_OPC_MODE_STF1_STT = 2LL,
+ MBOX_PKI_OPC_MODE_STF2_STT = 3LL
+};
+
+/* PKI QPG QOS*/
+enum {
+ MBOX_PKI_QPG_QOS_NONE = 0,
+ MBOX_PKI_QPG_QOS_VLAN,
+ MBOX_PKI_QPG_QOS_MPLS,
+ MBOX_PKI_QPG_QOS_DSA_SRC,
+ MBOX_PKI_QPG_QOS_DIFFSERV,
+ MBOX_PKI_QPG_QOS_HIGIG,
+};
+
+struct mbox_pki_qos_entry {
+ uint16_t port_add;
+ uint16_t ggrp_ok;
+ uint16_t ggrp_bad;
+ uint16_t gaura;
+ uint8_t grptag_ok;
+ uint8_t grptag_bad;
+};
+
+/* pki flow/style enable qos */
+typedef struct mbox_pki_port_create_qos {
+ uint8_t port_type;
+ uint8_t qpg_qos;
+ uint8_t num_entry;
+ uint8_t tag_type;
+ uint8_t drop_policy;
+ struct mbox_pki_qos_entry qos_entry[MBOX_PKI_MAX_QOS_ENTRY];
+} mbox_pki_qos_cfg_t;
+
+/* pki flow/style enable qos */
+typedef struct mbox_pki_port_modify_qos_entry {
+ uint8_t port_type;
+ uint16_t index;
+ struct {
+ uint8_t f_port_add:1;
+ uint8_t f_grp_ok:1;
+ uint8_t f_grp_bad:1;
+ uint8_t f_gaura:1;
+ uint8_t f_grptag_ok:1;
+ uint8_t f_grptag_bad:1;
+ uint8_t f_tag_type:1;
+ } mmask;
+ uint8_t tag_type;
+ struct mbox_pki_qos_entry qos_entry;
+} mbox_pki_mod_qos_t;
+
+/* pki flow/style enable qos */
+typedef struct mbox_pki_port_delete_qos_entry {
+ uint8_t port_type;
+ uint16_t index;
+} mbox_pki_del_qos_t;
+
+/* PKI maximum constants */
+#define PKI_VF_MAX (1)
+#define PKI_MAX_PKTLEN (32768)
+
+/* pki pkind parse mode */
+enum {
+ PKI_PARSE_LA_TO_LG = 0,
+ PKI_PARSE_LB_TO_LG = 1,
+ PKI_PARSE_LC_TO_LG = 3,
+ PKI_PARSE_LG = 0x3f,
+ PKI_PARSE_NOTHING = 0x7f
+};
+
+/* pki port config */
+typedef struct pki_port_cfg {
+ uint8_t port_type;
+ struct {
+ uint8_t fcs_pres:1;
+ uint8_t fcs_skip:1;
+ uint8_t parse_mode:1;
+ uint8_t mpls_parse:1;
+ uint8_t inst_hdr_parse:1;
+ uint8_t fulc_parse:1;
+ uint8_t dsa_parse:1;
+ uint8_t hg2_parse:1;
+ uint8_t hg_parse:1;
+ } mmask;
+ uint8_t fcs_pres;
+ uint8_t fcs_skip;
+ uint8_t parse_mode;
+ uint8_t mpls_parse;
+ uint8_t inst_hdr_parse;
+ uint8_t fulc_parse;
+ uint8_t dsa_parse;
+ uint8_t hg2_parse;
+ uint8_t hg_parse;
+} pki_prt_cfg_t;
+
+
+/* pki Flow/style packet buffer config */
+typedef struct pki_port_pktbuf_cfg {
+ uint8_t port_type;
+ struct {
+ uint16_t f_mbuff_size:1;
+ uint16_t f_wqe_skip:1;
+ uint16_t f_first_skip:1;
+ uint16_t f_later_skip:1;
+ uint16_t f_pkt_outside_wqe:1;
+ uint16_t f_wqe_endian:1;
+ uint16_t f_cache_mode:1;
+ } mmask;
+ uint16_t mbuff_size;
+ uint16_t wqe_skip;
+ uint16_t first_skip;
+ uint16_t later_skip;
+ uint8_t pkt_outside_wqe;
+ uint8_t wqe_endian;
+ uint8_t cache_mode;
+} pki_pktbuf_cfg_t;
+
+/* pki flow/style tag config */
+typedef struct pki_port_hash_cfg {
+ uint8_t port_type;
+ uint32_t tag_slf:1;
+ uint32_t tag_sle:1;
+ uint32_t tag_sld:1;
+ uint32_t tag_slc:1;
+ uint32_t tag_dlf:1;
+ uint32_t tag_dle:1;
+ uint32_t tag_dld:1;
+ uint32_t tag_dlc:1;
+ uint32_t tag_prt:1;
+ uint32_t tag_vlan0:1;
+ uint32_t tag_vlan1:1;
+ uint32_t tag_ip_pctl:1;
+ uint32_t tag_sync:1;
+ uint32_t tag_spi:1;
+ uint32_t tag_gtp:1;
+ uint32_t tag_vni:1;
+} pki_hash_cfg_t;
+
+/* pki flow/style errcheck config */
+typedef struct pki_port_errcheck_cfg {
+ uint8_t port_type;
+ struct {
+ uint32_t f_ip6_udp_opt:1;
+ uint32_t f_lenerr_en:1;
+ uint32_t f_maxerr_en:1;
+ uint32_t f_minerr_en:1;
+ uint32_t f_fcs_chk:1;
+ uint32_t f_fcs_strip:1;
+ uint32_t f_len_lf:1;
+ uint32_t f_len_le:1;
+ uint32_t f_len_ld:1;
+ uint32_t f_len_lc:1;
+ uint32_t f_csum_lf:1;
+ uint32_t f_csum_le:1;
+ uint32_t f_csum_ld:1;
+ uint32_t f_csum_lc:1;
+ uint32_t f_min_frame_len;
+ uint32_t f_max_frame_len;
+ } mmask;
+ uint64_t ip6_udp_opt:1;
+ uint64_t lenerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t minerr_en:1;
+ uint64_t fcs_chk:1;
+ uint64_t fcs_strip:1;
+ uint64_t len_lf:1;
+ uint64_t len_le:1;
+ uint64_t len_ld:1;
+ uint64_t len_lc:1;
+ uint64_t csum_lf:1;
+ uint64_t csum_le:1;
+ uint64_t csum_ld:1;
+ uint64_t csum_lc:1;
+ uint64_t min_frame_len;
+ uint64_t max_frame_len;
+} pki_errchk_cfg_t;
+
+
+/* CACHE MODE*/
+enum {
+ PKI_OPC_MODE_STT = 0LL,
+ PKI_OPC_MODE_STF = 1LL,
+ PKI_OPC_MODE_STF1_STT = 2LL,
+ PKI_OPC_MODE_STF2_STT = 3LL
+};
+
+/* PKI QPG QOS*/
+enum {
+ PKI_QPG_QOS_NONE = 0,
+ PKI_QPG_QOS_VLAN,
+ PKI_QPG_QOS_MPLS,
+ PKI_QPG_QOS_DSA_SRC,
+ PKI_QPG_QOS_DIFFSERV,
+ PKI_QPG_QOS_HIGIG,
+};
+
+struct pki_qos_entry {
+ uint16_t port_add;
+ uint16_t ggrp_ok;
+ uint16_t ggrp_bad;
+ uint16_t gaura;
+ uint8_t grptag_ok;
+ uint8_t grptag_bad;
+ uint8_t ena_red;
+ uint8_t ena_drop;
+};
+
+#define PKO_MAX_QOS_ENTRY 64
+
+/* pki flow/style enable qos */
+typedef struct pki_port_create_qos {
+ uint8_t port_type;
+ uint8_t qpg_qos;
+ uint8_t num_entry;
+ uint8_t tag_type;
+ uint8_t drop_policy;
+ struct pki_qos_entry qos_entry[PKO_MAX_QOS_ENTRY];
+} pki_qos_cfg_t;
+
+/* pki flow/style enable qos */
+typedef struct pki_port_delete_qos_entry {
+ uint8_t port_type;
+ uint16_t index;
+} pki_del_qos_t;
+
+/* pki flow/style enable qos */
+typedef struct pki_port_modify_qos_entry {
+ uint8_t port_type;
+ uint16_t index;
+ struct {
+ uint8_t f_port_add:1;
+ uint8_t f_grp_ok:1;
+ uint8_t f_grp_bad:1;
+ uint8_t f_gaura:1;
+ uint8_t f_grptag_ok:1;
+ uint8_t f_grptag_bad:1;
+ uint8_t f_tag_type:1;
+ } mmask;
+ uint8_t tag_type;
+ struct pki_qos_entry qos_entry;
+} pki_mod_qos_t;
+
+static inline int
+octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_mod_qos_t q_cfg = *(mbox_pki_mod_qos_t *)qos_cfg;
+ int len = sizeof(mbox_pki_mod_qos_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_MODIFY_QOS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+static inline int
+octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_del_qos_t q_cfg = *(mbox_pki_del_qos_t *)qos_cfg;
+ int len = sizeof(mbox_pki_del_qos_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_DELETE_QOS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+static inline int
+octeontx_pki_port_close(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_port_t ptype;
+ int len = sizeof(mbox_pki_port_t);
+ memset(&ptype, 0, len);
+ ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_CLOSE;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+static inline int
+octeontx_pki_port_start(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_port_t ptype;
+ int len = sizeof(mbox_pki_port_t);
+ memset(&ptype, 0, len);
+ ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_START;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+static inline int
+octeontx_pki_port_stop(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_port_t ptype;
+ int len = sizeof(mbox_pki_port_t);
+ memset(&ptype, 0, len);
+ ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_STOP;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int octeontx_pki_port_open(int port);
+int octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg);
+int octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg);
+int octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg);
+int octeontx_pki_port_close(int port);
+int octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg);
+
+#endif /* __OCTEONTX_PKI_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c
new file mode 100644
index 00000000..0a6d64b8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c
@@ -0,0 +1,590 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdbool.h>
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_eal.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include "../octeontx_logs.h"
+#include "octeontx_io.h"
+#include "octeontx_pkovf.h"
+
+struct octeontx_pko_iomem {
+ uint8_t *va;
+ rte_iova_t iova;
+ size_t size;
+};
+
+#define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0}
+
+struct octeontx_pko_fc_ctl_s {
+ int64_t buf_cnt;
+ int64_t padding[(PKO_DQ_FC_STRIDE / 8) - 1];
+};
+
+struct octeontx_pkovf {
+ uint8_t *bar0;
+ uint8_t *bar2;
+ uint16_t domain;
+ uint16_t vfid;
+};
+
+struct octeontx_pko_vf_ctl_s {
+ rte_spinlock_t lock;
+
+ struct octeontx_pko_iomem fc_iomem;
+ struct octeontx_pko_fc_ctl_s *fc_ctl;
+ struct octeontx_pkovf pko[PKO_VF_MAX];
+ struct {
+ uint64_t chanid;
+ } dq_map[PKO_VF_MAX * PKO_VF_NUM_DQ];
+};
+
+static struct octeontx_pko_vf_ctl_s pko_vf_ctl;
+
+static void *
+octeontx_pko_dq_vf_bar0(uint16_t txq)
+{
+ int vf_ix;
+
+ vf_ix = txq / PKO_VF_NUM_DQ;
+ return pko_vf_ctl.pko[vf_ix].bar0;
+}
+
+static int
+octeontx_pko_dq_gdq(uint16_t txq)
+{
+ return txq % PKO_VF_NUM_DQ;
+}
+
+/**
+ * Open a PKO DQ.
+ */
+static inline
+int octeontx_pko_dq_open(uint16_t txq)
+{
+ unsigned int reg_off;
+ uint8_t *vf_bar0;
+ uint64_t rtn;
+ int gdq;
+
+ vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
+ gdq = octeontx_pko_dq_gdq(txq);
+
+ if (unlikely(gdq < 0 || vf_bar0 == NULL))
+ return -EINVAL;
+ *(volatile int64_t*)(pko_vf_ctl.fc_ctl + txq) =
+ PKO_DQ_FC_DEPTH_PAGES - PKO_DQ_FC_SKID;
+
+ rte_wmb();
+
+ octeontx_write64(PKO_DQ_FC_DEPTH_PAGES,
+ vf_bar0 + PKO_VF_DQ_FC_STATUS(gdq));
+
+ /* Set the register to return descriptor (packet) count as DEPTH */
+ /* KIND=1, NCB_QUERY_RSP=0 */
+ octeontx_write64(1ull << PKO_DQ_KIND_BIT,
+ vf_bar0 + PKO_VF_DQ_WM_CTL(gdq));
+ reg_off = PKO_VF_DQ_OP_OPEN(gdq);
+
+ rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
+
+ /* PKO_DQOP_E::OPEN */
+ if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x1)
+ return -EIO;
+
+ switch (rtn >> PKO_DQ_STATUS_BIT) {
+ case 0xC: /* DQALREADYCREATED */
+ case 0x0: /* PASS */
+ break;
+ default:
+ return -EIO;
+ }
+
+ /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
+ octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
+
+ return rtn & ((1ull << PKO_DQ_OP_BIT) - 1);
+}
+
+/**
+ * Close a PKO DQ
+ * Flush all packets pending.
+ */
+static inline
+int octeontx_pko_dq_close(uint16_t txq)
+{
+ unsigned int reg_off;
+ uint8_t *vf_bar0;
+ uint64_t rtn;
+ int res;
+
+ vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
+ res = octeontx_pko_dq_gdq(txq);
+
+ if (unlikely(res < 0 || vf_bar0 == NULL))
+ return -EINVAL;
+
+ reg_off = PKO_VF_DQ_OP_CLOSE(res);
+
+ rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
+
+ /* PKO_DQOP_E::CLOSE */
+ if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x2)
+ return -EIO;
+
+ switch (rtn >> PKO_DQ_STATUS_BIT) {
+ case 0xD: /* DQNOTCREATED */
+ case 0x0: /* PASS */
+ break;
+ default:
+ return -EIO;
+ }
+
+ res = rtn & ((1ull << PKO_DQ_OP_BIT) - 1); /* DEPTH */
+ return res;
+}
+
+/* Flush all packets pending on a DQ */
+static inline
+int octeontx_pko_dq_drain(uint16_t txq)
+{
+ unsigned int gdq;
+ uint8_t *vf_bar0;
+ uint64_t reg;
+ int res, timo = PKO_DQ_DRAIN_TO;
+
+ vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
+ res = octeontx_pko_dq_gdq(txq);
+ gdq = res;
+
+ /* DRAIN=1, DRAIN_NULL_LINK=0, SW_XOFF=1 */
+ octeontx_write64(0x3, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
+ /* Wait until buffers leave DQs */
+ reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
+ while (reg && timo > 0) {
+ rte_delay_us(100);
+ timo--;
+ reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
+ }
+ /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
+ octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
+
+ return reg;
+}
+
+static inline int
+octeontx_pko_dq_range_lookup(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
+ unsigned int dq_num, unsigned int dq_from)
+{
+ unsigned int dq, dq_cnt;
+ unsigned int dq_base;
+
+ dq_cnt = 0;
+ dq = dq_from;
+ while (dq < RTE_DIM(ctl->dq_map)) {
+ dq_base = dq;
+ dq_cnt = 0;
+ while (ctl->dq_map[dq].chanid == ~chanid &&
+ dq < RTE_DIM(ctl->dq_map)) {
+ dq_cnt++;
+ if (dq_cnt == dq_num)
+ return dq_base;
+ dq++;
+ }
+ dq++;
+ }
+ return -1;
+}
+
+static inline void
+octeontx_pko_dq_range_assign(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
+ unsigned int dq_base, unsigned int dq_num)
+{
+ unsigned int dq, dq_cnt;
+
+ dq_cnt = 0;
+ while (dq_cnt < dq_num) {
+ dq = dq_base + dq_cnt;
+
+ octeontx_log_dbg("DQ# %u assigned to CHAN# %" PRIx64 "", dq,
+ chanid);
+
+ ctl->dq_map[dq].chanid = ~chanid;
+ dq_cnt++;
+ }
+}
+
+static inline int
+octeontx_pko_dq_claim(struct octeontx_pko_vf_ctl_s *ctl, unsigned int dq_base,
+ unsigned int dq_num, uint64_t chanid)
+{
+ const uint64_t null_chanid = ~0ull;
+ int dq;
+
+ rte_spinlock_lock(&ctl->lock);
+
+ dq = octeontx_pko_dq_range_lookup(ctl, null_chanid, dq_num, dq_base);
+ if (dq < 0 || (unsigned int)dq != dq_base) {
+ rte_spinlock_unlock(&ctl->lock);
+ return -1;
+ }
+ octeontx_pko_dq_range_assign(ctl, chanid, dq_base, dq_num);
+
+ rte_spinlock_unlock(&ctl->lock);
+
+ return 0;
+}
+
+static inline int
+octeontx_pko_dq_free(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
+{
+ const uint64_t null_chanid = ~0ull;
+ unsigned int dq = 0, dq_cnt = 0;
+
+ rte_spinlock_lock(&ctl->lock);
+ while (dq < RTE_DIM(ctl->dq_map)) {
+ if (ctl->dq_map[dq].chanid == ~chanid) {
+ ctl->dq_map[dq].chanid = ~null_chanid;
+ dq_cnt++;
+ }
+ dq++;
+ }
+ rte_spinlock_unlock(&ctl->lock);
+
+ return dq_cnt > 0 ? 0 : -EINVAL;
+}
+
+int
+octeontx_pko_channel_open(int dq_base, int dq_num, int chanid)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+ int res;
+
+ res = octeontx_pko_dq_claim(ctl, dq_base, dq_num, chanid);
+ if (res < 0)
+ return -1;
+
+ return 0;
+}
+
+int
+octeontx_pko_channel_close(int chanid)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+ int res;
+
+ res = octeontx_pko_dq_free(ctl, chanid);
+ if (res < 0)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+octeontx_pko_chan_start(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
+{
+ unsigned int dq_vf;
+ unsigned int dq, dq_cnt;
+
+ dq_cnt = 0;
+ dq = 0;
+ while (dq < RTE_DIM(ctl->dq_map)) {
+ dq_vf = dq / PKO_VF_NUM_DQ;
+
+ if (!ctl->pko[dq_vf].bar0) {
+ dq += PKO_VF_NUM_DQ;
+ continue;
+ }
+
+ if (ctl->dq_map[dq].chanid != ~chanid) {
+ dq++;
+ continue;
+ }
+
+ if (octeontx_pko_dq_open(dq) < 0)
+ break;
+
+ dq_cnt++;
+ dq++;
+ }
+
+ return dq_cnt;
+}
+
+int
+octeontx_pko_channel_start(int chanid)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+ int dq_cnt;
+
+ dq_cnt = octeontx_pko_chan_start(ctl, chanid);
+ if (dq_cnt < 0)
+ return -1;
+
+ return dq_cnt;
+}
+
+static inline int
+octeontx_pko_chan_stop(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
+{
+ unsigned int dq, dq_cnt, dq_vf;
+ int res;
+
+ dq_cnt = 0;
+ dq = 0;
+ while (dq < RTE_DIM(ctl->dq_map)) {
+ dq_vf = dq / PKO_VF_NUM_DQ;
+
+ if (!ctl->pko[dq_vf].bar0) {
+ dq += PKO_VF_NUM_DQ;
+ continue;
+ }
+
+ if (ctl->dq_map[dq].chanid != ~chanid) {
+ dq++;
+ continue;
+ }
+
+ res = octeontx_pko_dq_drain(dq);
+ if (res > 0)
+ octeontx_log_err("draining DQ%d, buffers left: %x",
+ dq, res);
+
+ res = octeontx_pko_dq_close(dq);
+ if (res < 0)
+ octeontx_log_err("closing DQ%d failed\n", dq);
+
+ dq_cnt++;
+ dq++;
+ }
+ return dq_cnt;
+}
+
+int
+octeontx_pko_channel_stop(int chanid)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+
+ octeontx_pko_chan_stop(ctl, chanid);
+ return 0;
+}
+
+static inline int
+octeontx_pko_channel_query(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
+ void *out, size_t out_elem_size,
+ size_t dq_num, octeontx_pko_dq_getter_t getter)
+{
+ octeontx_dq_t curr;
+ unsigned int dq_vf;
+ unsigned int dq;
+
+ RTE_SET_USED(out_elem_size);
+ memset(&curr, 0, sizeof(octeontx_dq_t));
+
+ dq_vf = dq_num / PKO_VF_NUM_DQ;
+ dq = dq_num % PKO_VF_NUM_DQ;
+
+ if (!ctl->pko[dq_vf].bar0)
+ return -EINVAL;
+
+ if (ctl->dq_map[dq_num].chanid != ~chanid)
+ return -EINVAL;
+
+ uint8_t *iter = (uint8_t *)out;
+ curr.lmtline_va = ctl->pko[dq_vf].bar2;
+ curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0
+ + PKO_VF_DQ_OP_SEND((dq), 0));
+ curr.fc_status_va = ctl->fc_ctl + dq;
+
+ octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p",
+ curr.lmtline_va, curr.ioreg_va,
+ curr.fc_status_va);
+
+ getter(&curr, (void *)iter);
+ return 0;
+}
+
+int
+octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
+ size_t dq_num, octeontx_pko_dq_getter_t getter)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+ int dq_cnt;
+
+ dq_cnt = octeontx_pko_channel_query(ctl, chanid, out, out_elem_size,
+ dq_num, getter);
+ if (dq_cnt < 0)
+ return -1;
+
+ return dq_cnt;
+}
+
+int
+octeontx_pko_vf_count(void)
+{
+ int vf_cnt;
+
+ vf_cnt = 0;
+ while (pko_vf_ctl.pko[vf_cnt].bar0)
+ vf_cnt++;
+
+ return vf_cnt;
+}
+
+int
+octeontx_pko_init_fc(const size_t pko_vf_count)
+{
+ int dq_ix;
+ uint64_t reg;
+ uint8_t *vf_bar0;
+ size_t vf_idx;
+ size_t fc_mem_size;
+
+ fc_mem_size = sizeof(struct octeontx_pko_fc_ctl_s) *
+ pko_vf_count * PKO_VF_NUM_DQ;
+
+ pko_vf_ctl.fc_iomem.va = rte_malloc(NULL, fc_mem_size, 128);
+ if (unlikely(!pko_vf_ctl.fc_iomem.va)) {
+ octeontx_log_err("fc_iomem: not enough memory");
+ return -ENOMEM;
+ }
+
+ pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2iova((void *)
+ pko_vf_ctl.fc_iomem.va);
+ pko_vf_ctl.fc_iomem.size = fc_mem_size;
+
+ pko_vf_ctl.fc_ctl =
+ (struct octeontx_pko_fc_ctl_s *)pko_vf_ctl.fc_iomem.va;
+
+ /* Configure Flow-Control feature for all DQs of open VFs */
+ for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) {
+ dq_ix = vf_idx * PKO_VF_NUM_DQ;
+
+ vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0;
+
+ reg = (pko_vf_ctl.fc_iomem.iova +
+ (sizeof(struct octeontx_pko_fc_ctl_s) * dq_ix)) & ~0x7F;
+ reg |= /* BASE */
+ (0x2 << 3) | /* HYST_BITS */
+ (((PKO_DQ_FC_STRIDE == PKO_DQ_FC_STRIDE_16) ? 1 : 0) << 2) |
+ (0x1 << 0); /* ENABLE */
+
+ octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG);
+
+ octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "",
+ vf_bar0, (int)vf_idx, reg);
+ }
+ return 0;
+}
+
+void
+octeontx_pko_fc_free(void)
+{
+ rte_free(pko_vf_ctl.fc_iomem.va);
+}
+
+static void
+octeontx_pkovf_setup(void)
+{
+ static bool init_once;
+
+ if (!init_once) {
+ unsigned int i;
+
+ rte_spinlock_init(&pko_vf_ctl.lock);
+
+ pko_vf_ctl.fc_iomem = PKO_IOMEM_NULL;
+ pko_vf_ctl.fc_ctl = NULL;
+
+ for (i = 0; i < PKO_VF_MAX; i++) {
+ pko_vf_ctl.pko[i].bar0 = NULL;
+ pko_vf_ctl.pko[i].bar2 = NULL;
+ pko_vf_ctl.pko[i].domain = ~(uint16_t)0;
+ pko_vf_ctl.pko[i].vfid = ~(uint16_t)0;
+ }
+
+ for (i = 0; i < (PKO_VF_MAX * PKO_VF_NUM_DQ); i++)
+ pko_vf_ctl.dq_map[i].chanid = 0;
+
+ init_once = true;
+ }
+}
+
+/* PKOVF pcie device*/
+static int
+pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ uint16_t domain;
+ uint8_t *bar0;
+ uint8_t *bar2;
+ struct octeontx_pkovf *res;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL) {
+ octeontx_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr);
+ return -ENODEV;
+ }
+ bar0 = pci_dev->mem_resource[0].addr;
+ bar2 = pci_dev->mem_resource[2].addr;
+
+ octeontx_pkovf_setup();
+
+ /* get vfid and domain */
+ val = octeontx_read64(bar0 + PKO_VF_DQ_FC_CONFIG);
+ domain = (val >> 7) & 0xffff;
+ vfid = (val >> 23) & 0xffff;
+
+ if (unlikely(vfid >= PKO_VF_MAX)) {
+ octeontx_log_err("pko: Invalid vfid %d", vfid);
+ return -EINVAL;
+ }
+
+ res = &pko_vf_ctl.pko[vfid];
+ res->vfid = vfid;
+ res->domain = domain;
+ res->bar0 = bar0;
+ res->bar2 = bar2;
+
+ octeontx_log_dbg("Domain=%d group=%d", res->domain, res->vfid);
+ return 0;
+}
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_PKO_VF 0xA049
+
+static const struct rte_pci_id pci_pkovf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_PKO_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_pkovf = {
+ .id_table = pci_pkovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = pkovf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_pkovf, pci_pkovf);
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.h
new file mode 100644
index 00000000..cbd28249
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_PKO_H__
+#define __OCTEONTX_PKO_H__
+
+/* PKO maximum constants */
+#define PKO_VF_MAX (32)
+#define PKO_VF_NUM_DQ (8)
+#define PKO_MAX_NUM_DQ (8)
+#define PKO_DQ_DRAIN_TO (1000)
+
+#define PKO_DQ_FC_SKID (4)
+#define PKO_DQ_FC_DEPTH_PAGES (2048)
+#define PKO_DQ_FC_STRIDE_16 (16)
+#define PKO_DQ_FC_STRIDE_128 (128)
+#define PKO_DQ_FC_STRIDE PKO_DQ_FC_STRIDE_16
+
+#define PKO_DQ_KIND_BIT 49
+#define PKO_DQ_STATUS_BIT 60
+#define PKO_DQ_OP_BIT 48
+
+/* PKO VF register offsets from VF_BAR0 */
+#define PKO_VF_DQ_SW_XOFF(gdq) (0x000100 | (gdq) << 17)
+#define PKO_VF_DQ_WM_CTL(gdq) (0x000130 | (gdq) << 17)
+#define PKO_VF_DQ_WM_CNT(gdq) (0x000150 | (gdq) << 17)
+#define PKO_VF_DQ_FC_CONFIG (0x000160)
+#define PKO_VF_DQ_FC_STATUS(gdq) (0x000168 | (gdq) << 17)
+#define PKO_VF_DQ_OP_SEND(gdq, op) (0x001000 | (gdq) << 17 | (op) << 3)
+#define PKO_VF_DQ_OP_OPEN(gdq) (0x001100 | (gdq) << 17)
+#define PKO_VF_DQ_OP_CLOSE(gdq) (0x001200 | (gdq) << 17)
+#define PKO_VF_DQ_OP_QUERY(gdq) (0x001300 | (gdq) << 17)
+
+/* pko_send_hdr_s + pko_send_link */
+#define PKO_CMD_SZ (2 << 1)
+#define PKO_SEND_GATHER_SUBDC (0x0ull << 60)
+#define PKO_SEND_GATHER_LDTYPE(x) ((x) << 58)
+#define PKO_SEND_GATHER_GAUAR(x) ((x) << 24)
+
+typedef struct octeontx_dq_s {
+ void *lmtline_va;
+ void *ioreg_va;
+ void *fc_status_va;
+} octeontx_dq_t;
+
+/**
+ * Function for extracting information out of a given DQ.
+ *
+ * It is intended to be used in slow path (configuration) in
+ * octeontx_pko_channel_query().
+ *
+ * @param dq The DQ to extract information from.
+ * @param out Pointer to the user's structure he wants to fill.
+ */
+typedef void (*octeontx_pko_dq_getter_t)(octeontx_dq_t *dq, void *out);
+
+int
+octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
+ size_t dq_num, octeontx_pko_dq_getter_t getter);
+int octeontx_pko_channel_open(int dq_base, int dq_num, int chanid);
+int octeontx_pko_channel_close(int chanid);
+int octeontx_pko_channel_start(int chanid);
+int octeontx_pko_channel_stop(int chanid);
+int octeontx_pko_vf_count(void);
+int octeontx_pko_init_fc(const size_t pko_vf_count);
+void octeontx_pko_fc_free(void);
+
+#endif /* __OCTEONTX_PKO_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx/meson.build b/src/spdk/dpdk/drivers/net/octeontx/meson.build
new file mode 100644
index 00000000..0e249eb9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+subdir('base')
+objs = [base_objs]
+
+sources = files('octeontx_rxtx.c',
+ 'octeontx_ethdev.c'
+ )
+
+allow_experimental_apis = true
+
+deps += ['mempool_octeontx', 'eventdev']
+
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.c b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.c
new file mode 100644
index 00000000..0f3d5d67
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.c
@@ -0,0 +1,1321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_alarm.h>
+#include <rte_branch_prediction.h>
+#include <rte_debug.h>
+#include <rte_devargs.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_prefetch.h>
+#include <rte_bus_vdev.h>
+
+#include "octeontx_ethdev.h"
+#include "octeontx_rxtx.h"
+#include "octeontx_logs.h"
+
+struct octeontx_vdev_init_params {
+ uint8_t nr_port;
+};
+
+uint16_t
+rte_octeontx_pchan_map[OCTEONTX_MAX_BGX_PORTS][OCTEONTX_MAX_LMAC_PER_BGX];
+
+enum octeontx_link_speed {
+ OCTEONTX_LINK_SPEED_SGMII,
+ OCTEONTX_LINK_SPEED_XAUI,
+ OCTEONTX_LINK_SPEED_RXAUI,
+ OCTEONTX_LINK_SPEED_10G_R,
+ OCTEONTX_LINK_SPEED_40G_R,
+ OCTEONTX_LINK_SPEED_RESERVE1,
+ OCTEONTX_LINK_SPEED_QSGMII,
+ OCTEONTX_LINK_SPEED_RESERVE2
+};
+
+int otx_net_logtype_mbox;
+int otx_net_logtype_init;
+int otx_net_logtype_driver;
+
+RTE_INIT(otx_net_init_log)
+{
+ otx_net_logtype_mbox = rte_log_register("pmd.net.octeontx.mbox");
+ if (otx_net_logtype_mbox >= 0)
+ rte_log_set_level(otx_net_logtype_mbox, RTE_LOG_NOTICE);
+
+ otx_net_logtype_init = rte_log_register("pmd.net.octeontx.init");
+ if (otx_net_logtype_init >= 0)
+ rte_log_set_level(otx_net_logtype_init, RTE_LOG_NOTICE);
+
+ otx_net_logtype_driver = rte_log_register("pmd.net.octeontx.driver");
+ if (otx_net_logtype_driver >= 0)
+ rte_log_set_level(otx_net_logtype_driver, RTE_LOG_NOTICE);
+}
+
+/* Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *)extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ octeontx_log_err("argument has to be positive.");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+octeontx_parse_vdev_init_params(struct octeontx_vdev_init_params *params,
+ struct rte_vdev_device *dev)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ static const char * const octeontx_vdev_valid_params[] = {
+ OCTEONTX_VDEV_NR_PORT_ARG,
+ NULL
+ };
+
+ const char *input_args = rte_vdev_device_args(dev);
+ if (params == NULL)
+ return -EINVAL;
+
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ octeontx_vdev_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ OCTEONTX_VDEV_NR_PORT_ARG,
+ &parse_integer_arg,
+ &params->nr_port);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+octeontx_port_open(struct octeontx_nic *nic)
+{
+ octeontx_mbox_bgx_port_conf_t bgx_port_conf;
+ int res;
+
+ res = 0;
+ memset(&bgx_port_conf, 0x0, sizeof(bgx_port_conf));
+ PMD_INIT_FUNC_TRACE();
+
+ res = octeontx_bgx_port_open(nic->port_id, &bgx_port_conf);
+ if (res < 0) {
+ octeontx_log_err("failed to open port %d", res);
+ return res;
+ }
+
+ nic->node = bgx_port_conf.node;
+ nic->port_ena = bgx_port_conf.enable;
+ nic->base_ichan = bgx_port_conf.base_chan;
+ nic->base_ochan = bgx_port_conf.base_chan;
+ nic->num_ichans = bgx_port_conf.num_chans;
+ nic->num_ochans = bgx_port_conf.num_chans;
+ nic->mtu = bgx_port_conf.mtu;
+ nic->bpen = bgx_port_conf.bpen;
+ nic->fcs_strip = bgx_port_conf.fcs_strip;
+ nic->bcast_mode = bgx_port_conf.bcast_mode;
+ nic->mcast_mode = bgx_port_conf.mcast_mode;
+ nic->speed = bgx_port_conf.mode;
+
+ memcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0], ETHER_ADDR_LEN);
+
+ octeontx_log_dbg("port opened %d", nic->port_id);
+ return res;
+}
+
+static void
+octeontx_port_close(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ octeontx_bgx_port_close(nic->port_id);
+ octeontx_log_dbg("port closed %d", nic->port_id);
+}
+
+static int
+octeontx_port_start(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return octeontx_bgx_port_start(nic->port_id);
+}
+
+static int
+octeontx_port_stop(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return octeontx_bgx_port_stop(nic->port_id);
+}
+
+static void
+octeontx_port_promisc_set(struct octeontx_nic *nic, int en)
+{
+ struct rte_eth_dev *dev;
+ int res;
+
+ res = 0;
+ PMD_INIT_FUNC_TRACE();
+ dev = nic->dev;
+
+ res = octeontx_bgx_port_promisc_set(nic->port_id, en);
+ if (res < 0)
+ octeontx_log_err("failed to set promiscuous mode %d",
+ nic->port_id);
+
+ /* Set proper flag for the mode */
+ dev->data->promiscuous = (en != 0) ? 1 : 0;
+
+ octeontx_log_dbg("port %d : promiscuous mode %s",
+ nic->port_id, en ? "set" : "unset");
+}
+
+static int
+octeontx_port_stats(struct octeontx_nic *nic, struct rte_eth_stats *stats)
+{
+ octeontx_mbox_bgx_port_stats_t bgx_stats;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ res = octeontx_bgx_port_stats(nic->port_id, &bgx_stats);
+ if (res < 0) {
+ octeontx_log_err("failed to get port stats %d", nic->port_id);
+ return res;
+ }
+
+ stats->ipackets = bgx_stats.rx_packets;
+ stats->ibytes = bgx_stats.rx_bytes;
+ stats->imissed = bgx_stats.rx_dropped;
+ stats->ierrors = bgx_stats.rx_errors;
+ stats->opackets = bgx_stats.tx_packets;
+ stats->obytes = bgx_stats.tx_bytes;
+ stats->oerrors = bgx_stats.tx_errors;
+
+ octeontx_log_dbg("port%d stats inpkts=%" PRIx64 " outpkts=%" PRIx64 "",
+ nic->port_id, stats->ipackets, stats->opackets);
+
+ return 0;
+}
+
+static void
+octeontx_port_stats_clr(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ octeontx_bgx_port_stats_clr(nic->port_id);
+}
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+
+ dev_conf->nb_event_ports = info->max_event_ports;
+ dev_conf->nb_event_queues = info->max_event_queues;
+
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+static int
+octeontx_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *conf = &data->dev_conf;
+ struct rte_eth_rxmode *rxmode = &conf->rxmode;
+ struct rte_eth_txmode *txmode = &conf->txmode;
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ RTE_SET_USED(conf);
+
+ if (!rte_eal_has_hugepages()) {
+ octeontx_log_err("huge page is not configured");
+ return -EINVAL;
+ }
+
+ if (txmode->mq_mode) {
+ octeontx_log_err("tx mq_mode DCB or VMDq not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
+ return -EINVAL;
+ }
+
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
+ PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
+ rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ }
+
+ if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+ PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
+ txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
+ }
+
+ if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ octeontx_log_err("setting link speed/duplex not supported");
+ return -EINVAL;
+ }
+
+ if (conf->dcb_capability_en) {
+ octeontx_log_err("DCB enable not supported");
+ return -EINVAL;
+ }
+
+ if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ octeontx_log_err("flow director not supported");
+ return -EINVAL;
+ }
+
+ nic->num_tx_queues = dev->data->nb_tx_queues;
+
+ ret = octeontx_pko_channel_open(nic->port_id * PKO_VF_NUM_DQ,
+ nic->num_tx_queues,
+ nic->base_ochan);
+ if (ret) {
+ octeontx_log_err("failed to open channel %d no-of-txq %d",
+ nic->base_ochan, nic->num_tx_queues);
+ return -EFAULT;
+ }
+
+ nic->pki.classifier_enable = false;
+ nic->pki.hash_enable = true;
+ nic->pki.initialized = false;
+
+ return 0;
+}
+
+static void
+octeontx_dev_close(struct rte_eth_dev *dev)
+{
+ struct octeontx_txq *txq = NULL;
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ unsigned int i;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_event_dev_close(nic->evdev);
+
+ ret = octeontx_pko_channel_close(nic->base_ochan);
+ if (ret < 0) {
+ octeontx_log_err("failed to close channel %d VF%d %d %d",
+ nic->base_ochan, nic->port_id, nic->num_tx_queues,
+ ret);
+ }
+ /* Free txq resources for this port */
+ for (i = 0; i < nic->num_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+
+ rte_free(txq);
+ }
+
+ dev->tx_pkt_burst = NULL;
+ dev->rx_pkt_burst = NULL;
+}
+
+static int
+octeontx_dev_start(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ /*
+ * Tx start
+ */
+ dev->tx_pkt_burst = octeontx_xmit_pkts;
+ ret = octeontx_pko_channel_start(nic->base_ochan);
+ if (ret < 0) {
+ octeontx_log_err("fail to conf VF%d no. txq %d chan %d ret %d",
+ nic->port_id, nic->num_tx_queues, nic->base_ochan,
+ ret);
+ goto error;
+ }
+
+ /*
+ * Rx start
+ */
+ dev->rx_pkt_burst = octeontx_recv_pkts;
+ ret = octeontx_pki_port_start(nic->port_id);
+ if (ret < 0) {
+ octeontx_log_err("fail to start Rx on port %d", nic->port_id);
+ goto channel_stop_error;
+ }
+
+ /*
+ * Start port
+ */
+ ret = octeontx_port_start(nic);
+ if (ret < 0) {
+ octeontx_log_err("failed start port %d", ret);
+ goto pki_port_stop_error;
+ }
+
+ PMD_TX_LOG(DEBUG, "pko: start channel %d no.of txq %d port %d",
+ nic->base_ochan, nic->num_tx_queues, nic->port_id);
+
+ ret = rte_event_dev_start(nic->evdev);
+ if (ret < 0) {
+ octeontx_log_err("failed to start evdev: ret (%d)", ret);
+ goto pki_port_stop_error;
+ }
+
+ /* Success */
+ return ret;
+
+pki_port_stop_error:
+ octeontx_pki_port_stop(nic->port_id);
+channel_stop_error:
+ octeontx_pko_channel_stop(nic->base_ochan);
+error:
+ return ret;
+}
+
+static void
+octeontx_dev_stop(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_event_dev_stop(nic->evdev);
+
+ ret = octeontx_port_stop(nic);
+ if (ret < 0) {
+ octeontx_log_err("failed to req stop port %d res=%d",
+ nic->port_id, ret);
+ return;
+ }
+
+ ret = octeontx_pki_port_stop(nic->port_id);
+ if (ret < 0) {
+ octeontx_log_err("failed to stop pki port %d res=%d",
+ nic->port_id, ret);
+ return;
+ }
+
+ ret = octeontx_pko_channel_stop(nic->base_ochan);
+ if (ret < 0) {
+ octeontx_log_err("failed to stop channel %d VF%d %d %d",
+ nic->base_ochan, nic->port_id, nic->num_tx_queues,
+ ret);
+ return;
+ }
+}
+
+static void
+octeontx_dev_promisc_enable(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ octeontx_port_promisc_set(nic, 1);
+}
+
+static void
+octeontx_dev_promisc_disable(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ octeontx_port_promisc_set(nic, 0);
+}
+
+static int
+octeontx_port_link_status(struct octeontx_nic *nic)
+{
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+ res = octeontx_bgx_port_link_status(nic->port_id);
+ if (res < 0) {
+ octeontx_log_err("failed to get port %d link status",
+ nic->port_id);
+ return res;
+ }
+
+ nic->link_up = (uint8_t)res;
+ octeontx_log_dbg("port %d link status %d", nic->port_id, nic->link_up);
+
+ return res;
+}
+
+/*
+ * Return 0 means link status changed, -1 means not changed
+ */
+static int
+octeontx_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct rte_eth_link link;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ res = octeontx_port_link_status(nic);
+ if (res < 0) {
+ octeontx_log_err("failed to request link status %d", res);
+ return res;
+ }
+
+ link.link_status = nic->link_up;
+
+ switch (nic->speed) {
+ case OCTEONTX_LINK_SPEED_SGMII:
+ link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+
+ case OCTEONTX_LINK_SPEED_XAUI:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+
+ case OCTEONTX_LINK_SPEED_RXAUI:
+ case OCTEONTX_LINK_SPEED_10G_R:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case OCTEONTX_LINK_SPEED_QSGMII:
+ link.link_speed = ETH_SPEED_NUM_5G;
+ break;
+ case OCTEONTX_LINK_SPEED_40G_R:
+ link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+
+ case OCTEONTX_LINK_SPEED_RESERVE1:
+ case OCTEONTX_LINK_SPEED_RESERVE2:
+ default:
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ octeontx_log_err("incorrect link speed %d", nic->speed);
+ break;
+ }
+
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+octeontx_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_stats(nic, stats);
+}
+
+static void
+octeontx_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ octeontx_port_stats_clr(nic);
+}
+
+static int
+octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ ret = octeontx_bgx_port_mac_set(nic->port_id, addr->addr_bytes);
+ if (ret != 0)
+ octeontx_log_err("failed to set MAC address on port %d",
+ nic->port_id);
+
+ return ret;
+}
+
+static void
+octeontx_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ RTE_SET_USED(dev);
+
+ /* Autonegotiation may be disabled */
+ dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+ dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_40G;
+
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = PKI_MAX_PKTLEN;
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = PKO_MAX_NUM_DQ;
+ dev_info->min_rx_bufsize = 0;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = 0,
+ .rx_drop_en = 0,
+ .offloads = OCTEONTX_RX_OFFLOADS,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = 0,
+ .offloads = OCTEONTX_TX_OFFLOADS,
+ };
+
+ dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS;
+ dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS;
+}
+
+static void
+octeontx_dq_info_getter(octeontx_dq_t *dq, void *out)
+{
+ ((octeontx_dq_t *)out)->lmtline_va = dq->lmtline_va;
+ ((octeontx_dq_t *)out)->ioreg_va = dq->ioreg_va;
+ ((octeontx_dq_t *)out)->fc_status_va = dq->fc_status_va;
+}
+
+static int
+octeontx_vf_start_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
+ uint16_t qidx)
+{
+ struct octeontx_txq *txq;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ txq = dev->data->tx_queues[qidx];
+
+ res = octeontx_pko_channel_query_dqs(nic->base_ochan,
+ &txq->dq,
+ sizeof(octeontx_dq_t),
+ txq->queue_id,
+ octeontx_dq_info_getter);
+ if (res < 0) {
+ res = -EFAULT;
+ goto close_port;
+ }
+
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ return res;
+
+close_port:
+ (void)octeontx_port_stop(nic);
+ octeontx_pko_channel_stop(nic->base_ochan);
+ octeontx_pko_channel_close(nic->base_ochan);
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return res;
+}
+
+static int
+octeontx_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ qidx = qidx % PKO_VF_NUM_DQ;
+ return octeontx_vf_start_tx_queue(dev, nic, qidx);
+}
+
+static inline int
+octeontx_vf_stop_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
+ uint16_t qidx)
+{
+ int ret = 0;
+
+ RTE_SET_USED(nic);
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return ret;
+}
+
+static int
+octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ qidx = qidx % PKO_VF_NUM_DQ;
+
+ return octeontx_vf_stop_tx_queue(dev, nic, qidx);
+}
+
+static void
+octeontx_dev_tx_queue_release(void *tx_queue)
+{
+ struct octeontx_txq *txq = tx_queue;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (txq) {
+ res = octeontx_dev_tx_queue_stop(txq->eth_dev, txq->queue_id);
+ if (res < 0)
+ octeontx_log_err("failed stop tx_queue(%d)\n",
+ txq->queue_id);
+
+ rte_free(txq);
+ }
+}
+
+static int
+octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct octeontx_txq *txq = NULL;
+ uint16_t dq_num;
+ int res = 0;
+
+ RTE_SET_USED(nb_desc);
+ RTE_SET_USED(socket_id);
+
+ dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx;
+
+ /* Socket id check */
+ if (socket_id != (unsigned int)SOCKET_ID_ANY &&
+ socket_id != (unsigned int)nic->node)
+ PMD_TX_LOG(INFO, "socket_id expected %d, configured %d",
+ socket_id, nic->node);
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->tx_queues[qidx] != NULL) {
+ PMD_TX_LOG(DEBUG, "freeing memory prior to re-allocation %d",
+ qidx);
+ octeontx_dev_tx_queue_release(dev->data->tx_queues[qidx]);
+ dev->data->tx_queues[qidx] = NULL;
+ }
+
+ /* Allocating tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (txq == NULL) {
+ octeontx_log_err("failed to allocate txq=%d", qidx);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ txq->eth_dev = dev;
+ txq->queue_id = dq_num;
+ dev->data->tx_queues[qidx] = txq;
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ res = octeontx_pko_channel_query_dqs(nic->base_ochan,
+ &txq->dq,
+ sizeof(octeontx_dq_t),
+ txq->queue_id,
+ octeontx_dq_info_getter);
+ if (res < 0) {
+ res = -EFAULT;
+ goto err;
+ }
+
+ PMD_TX_LOG(DEBUG, "[%d]:[%d] txq=%p nb_desc=%d lmtline=%p ioreg_va=%p fc_status_va=%p",
+ qidx, txq->queue_id, txq, nb_desc, txq->dq.lmtline_va,
+ txq->dq.ioreg_va,
+ txq->dq.fc_status_va);
+
+ return res;
+
+err:
+ if (txq)
+ rte_free(txq);
+
+ return res;
+}
+
+static int
+octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct rte_mempool_ops *mp_ops = NULL;
+ struct octeontx_rxq *rxq = NULL;
+ pki_pktbuf_cfg_t pktbuf_conf;
+ pki_hash_cfg_t pki_hash;
+ pki_qos_cfg_t pki_qos;
+ uintptr_t pool;
+ int ret, port;
+ uint16_t gaura;
+ unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
+ unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
+
+ RTE_SET_USED(nb_desc);
+
+ memset(&pktbuf_conf, 0, sizeof(pktbuf_conf));
+ memset(&pki_hash, 0, sizeof(pki_hash));
+ memset(&pki_qos, 0, sizeof(pki_qos));
+
+ mp_ops = rte_mempool_get_ops(mb_pool->ops_index);
+ if (strcmp(mp_ops->name, "octeontx_fpavf")) {
+ octeontx_log_err("failed to find octeontx_fpavf mempool");
+ return -ENOTSUP;
+ }
+
+ /* Handle forbidden configurations */
+ if (nic->pki.classifier_enable) {
+ octeontx_log_err("cannot setup queue %d. "
+ "Classifier option unsupported", qidx);
+ return -EINVAL;
+ }
+
+ port = nic->port_id;
+
+ /* Rx deferred start is not supported */
+ if (rx_conf->rx_deferred_start) {
+ octeontx_log_err("rx deferred start not supported");
+ return -EINVAL;
+ }
+
+ /* Verify queue index */
+ if (qidx >= dev->data->nb_rx_queues) {
+ octeontx_log_err("QID %d not supporteded (0 - %d available)\n",
+ qidx, (dev->data->nb_rx_queues - 1));
+ return -ENOTSUP;
+ }
+
+ /* Socket id check */
+ if (socket_id != (unsigned int)SOCKET_ID_ANY &&
+ socket_id != (unsigned int)nic->node)
+ PMD_RX_LOG(INFO, "socket_id expected %d, configured %d",
+ socket_id, nic->node);
+
+ /* Allocating rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct octeontx_rxq),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (rxq == NULL) {
+ octeontx_log_err("failed to allocate rxq=%d", qidx);
+ return -ENOMEM;
+ }
+
+ if (!nic->pki.initialized) {
+ pktbuf_conf.port_type = 0;
+ pki_hash.port_type = 0;
+ pki_qos.port_type = 0;
+
+ pktbuf_conf.mmask.f_wqe_skip = 1;
+ pktbuf_conf.mmask.f_first_skip = 1;
+ pktbuf_conf.mmask.f_later_skip = 1;
+ pktbuf_conf.mmask.f_mbuff_size = 1;
+ pktbuf_conf.mmask.f_cache_mode = 1;
+
+ pktbuf_conf.wqe_skip = OCTTX_PACKET_WQE_SKIP;
+ pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP;
+ pktbuf_conf.later_skip = OCTTX_PACKET_LATER_SKIP;
+ pktbuf_conf.mbuff_size = (mb_pool->elt_size -
+ RTE_PKTMBUF_HEADROOM -
+ sizeof(struct rte_mbuf));
+
+ pktbuf_conf.cache_mode = PKI_OPC_MODE_STF2_STT;
+
+ ret = octeontx_pki_port_pktbuf_config(port, &pktbuf_conf);
+ if (ret != 0) {
+ octeontx_log_err("fail to configure pktbuf for port %d",
+ port);
+ rte_free(rxq);
+ return ret;
+ }
+ PMD_RX_LOG(DEBUG, "Port %d Rx pktbuf configured:\n"
+ "\tmbuf_size:\t0x%0x\n"
+ "\twqe_skip:\t0x%0x\n"
+ "\tfirst_skip:\t0x%0x\n"
+ "\tlater_skip:\t0x%0x\n"
+ "\tcache_mode:\t%s\n",
+ port,
+ pktbuf_conf.mbuff_size,
+ pktbuf_conf.wqe_skip,
+ pktbuf_conf.first_skip,
+ pktbuf_conf.later_skip,
+ (pktbuf_conf.cache_mode ==
+ PKI_OPC_MODE_STT) ?
+ "STT" :
+ (pktbuf_conf.cache_mode ==
+ PKI_OPC_MODE_STF) ?
+ "STF" :
+ (pktbuf_conf.cache_mode ==
+ PKI_OPC_MODE_STF1_STT) ?
+ "STF1_STT" : "STF2_STT");
+
+ if (nic->pki.hash_enable) {
+ pki_hash.tag_dlc = 1;
+ pki_hash.tag_slc = 1;
+ pki_hash.tag_dlf = 1;
+ pki_hash.tag_slf = 1;
+ pki_hash.tag_prt = 1;
+ octeontx_pki_port_hash_config(port, &pki_hash);
+ }
+
+ pool = (uintptr_t)mb_pool->pool_id;
+
+ /* Get the gaura Id */
+ gaura = octeontx_fpa_bufpool_gaura(pool);
+
+ pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
+ pki_qos.num_entry = 1;
+ pki_qos.drop_policy = 0;
+ pki_qos.tag_type = 0L;
+ pki_qos.qos_entry[0].port_add = 0;
+ pki_qos.qos_entry[0].gaura = gaura;
+ pki_qos.qos_entry[0].ggrp_ok = ev_queues;
+ pki_qos.qos_entry[0].ggrp_bad = ev_queues;
+ pki_qos.qos_entry[0].grptag_bad = 0;
+ pki_qos.qos_entry[0].grptag_ok = 0;
+
+ ret = octeontx_pki_port_create_qos(port, &pki_qos);
+ if (ret < 0) {
+ octeontx_log_err("failed to create QOS port=%d, q=%d",
+ port, qidx);
+ rte_free(rxq);
+ return ret;
+ }
+ nic->pki.initialized = true;
+ }
+
+ rxq->port_id = nic->port_id;
+ rxq->eth_dev = dev;
+ rxq->queue_id = qidx;
+ rxq->evdev = nic->evdev;
+ rxq->ev_queues = ev_queues;
+ rxq->ev_ports = ev_ports;
+
+ dev->data->rx_queues[qidx] = rxq;
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static void
+octeontx_dev_rx_queue_release(void *rxq)
+{
+ rte_free(rxq);
+}
+
+static const uint32_t *
+octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == octeontx_recv_pkts)
+ return ptypes;
+
+ return NULL;
+}
+
+static int
+octeontx_pool_ops(struct rte_eth_dev *dev, const char *pool)
+{
+ RTE_SET_USED(dev);
+
+ if (!strcmp(pool, "octeontx_fpavf"))
+ return 0;
+
+ return -ENOTSUP;
+}
+
+/* Initialize and register driver with DPDK Application */
+static const struct eth_dev_ops octeontx_dev_ops = {
+ .dev_configure = octeontx_dev_configure,
+ .dev_infos_get = octeontx_dev_info,
+ .dev_close = octeontx_dev_close,
+ .dev_start = octeontx_dev_start,
+ .dev_stop = octeontx_dev_stop,
+ .promiscuous_enable = octeontx_dev_promisc_enable,
+ .promiscuous_disable = octeontx_dev_promisc_disable,
+ .link_update = octeontx_dev_link_update,
+ .stats_get = octeontx_dev_stats_get,
+ .stats_reset = octeontx_dev_stats_reset,
+ .mac_addr_set = octeontx_dev_default_mac_addr_set,
+ .tx_queue_start = octeontx_dev_tx_queue_start,
+ .tx_queue_stop = octeontx_dev_tx_queue_stop,
+ .tx_queue_setup = octeontx_dev_tx_queue_setup,
+ .tx_queue_release = octeontx_dev_tx_queue_release,
+ .rx_queue_setup = octeontx_dev_rx_queue_setup,
+ .rx_queue_release = octeontx_dev_rx_queue_release,
+ .dev_supported_ptypes_get = octeontx_dev_supported_ptypes_get,
+ .pool_ops_supported = octeontx_pool_ops,
+};
+
+/* Create Ethdev interface per BGX LMAC ports */
+static int
+octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
+ int socket_id)
+{
+ int res;
+ char octtx_name[OCTEONTX_MAX_NAME_LEN];
+ struct octeontx_nic *nic = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct rte_eth_dev_data *data;
+ const char *name = rte_vdev_device_name(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ sprintf(octtx_name, "%s_%d", name, port);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_attach_secondary(octtx_name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ eth_dev->dev_ops = &octeontx_dev_ops;
+ eth_dev->device = &dev->device;
+ eth_dev->tx_pkt_burst = octeontx_xmit_pkts;
+ eth_dev->rx_pkt_burst = octeontx_recv_pkts;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id);
+ if (nic == NULL) {
+ octeontx_log_err("failed to allocate nic structure");
+ res = -ENOMEM;
+ goto err;
+ }
+
+ nic->port_id = port;
+ nic->evdev = evdev;
+
+ res = octeontx_port_open(nic);
+ if (res < 0)
+ goto err;
+
+ /* Rx side port configuration */
+ res = octeontx_pki_port_open(port);
+ if (res != 0) {
+ octeontx_log_err("failed to open PKI port %d", port);
+ res = -ENODEV;
+ goto err;
+ }
+
+ /* Reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(octtx_name);
+ if (eth_dev == NULL) {
+ octeontx_log_err("failed to allocate rte_eth_dev");
+ res = -ENOMEM;
+ goto err;
+ }
+
+ eth_dev->device = &dev->device;
+ eth_dev->intr_handle = NULL;
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
+ eth_dev->data->numa_node = dev->device.numa_node;
+
+ data = eth_dev->data;
+ data->dev_private = nic;
+ data->port_id = eth_dev->data->port_id;
+
+ nic->ev_queues = 1;
+ nic->ev_ports = 1;
+
+ data->dev_link.link_status = ETH_LINK_DOWN;
+ data->dev_started = 0;
+ data->promiscuous = 0;
+ data->all_multicast = 0;
+ data->scattered_rx = 0;
+
+ data->mac_addrs = rte_zmalloc_socket(octtx_name, ETHER_ADDR_LEN, 0,
+ socket_id);
+ if (data->mac_addrs == NULL) {
+ octeontx_log_err("failed to allocate memory for mac_addrs");
+ res = -ENOMEM;
+ goto err;
+ }
+
+ eth_dev->dev_ops = &octeontx_dev_ops;
+
+ /* Finally save ethdev pointer to the NIC structure */
+ nic->dev = eth_dev;
+
+ if (nic->port_id != data->port_id) {
+ octeontx_log_err("eth_dev->port_id (%d) is diff to orig (%d)",
+ data->port_id, nic->port_id);
+ res = -EINVAL;
+ goto err;
+ }
+
+ /* Update port_id mac to eth_dev */
+ memcpy(data->mac_addrs, nic->mac_addr, ETHER_ADDR_LEN);
+
+ PMD_INIT_LOG(DEBUG, "ethdev info: ");
+ PMD_INIT_LOG(DEBUG, "port %d, port_ena %d ochan %d num_ochan %d tx_q %d",
+ nic->port_id, nic->port_ena,
+ nic->base_ochan, nic->num_ochans,
+ nic->num_tx_queues);
+ PMD_INIT_LOG(DEBUG, "speed %d mtu %d", nic->speed, nic->mtu);
+
+ rte_octeontx_pchan_map[(nic->base_ochan >> 8) & 0x7]
+ [(nic->base_ochan >> 4) & 0xF] = data->port_id;
+
+ rte_eth_dev_probing_finish(eth_dev);
+ return data->port_id;
+
+err:
+ if (nic)
+ octeontx_port_close(nic);
+
+ if (eth_dev != NULL) {
+ rte_free(eth_dev->data->mac_addrs);
+ rte_free(data);
+ rte_free(nic);
+ rte_eth_dev_release_port(eth_dev);
+ }
+
+ return res;
+}
+
+/* Un initialize octeontx device */
+static int
+octeontx_remove(struct rte_vdev_device *dev)
+{
+ char octtx_name[OCTEONTX_MAX_NAME_LEN];
+ struct rte_eth_dev *eth_dev = NULL;
+ struct octeontx_nic *nic = NULL;
+ int i;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT; i++) {
+ sprintf(octtx_name, "eth_octeontx_%d", i);
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocated(octtx_name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ nic = octeontx_pmd_priv(eth_dev);
+ rte_event_dev_stop(nic->evdev);
+ PMD_INIT_LOG(INFO, "Closing octeontx device %s", octtx_name);
+
+ rte_free(eth_dev->data->mac_addrs);
+ rte_free(eth_dev->data->dev_private);
+ rte_eth_dev_release_port(eth_dev);
+ rte_event_dev_close(nic->evdev);
+ }
+
+ /* Free FC resource */
+ octeontx_pko_fc_free();
+
+ return 0;
+}
+
+/* Initialize octeontx device */
+static int
+octeontx_probe(struct rte_vdev_device *dev)
+{
+ const char *dev_name;
+ static int probe_once;
+ uint8_t socket_id, qlist;
+ int tx_vfcnt, port_id, evdev, qnum, pnum, res, i;
+ struct rte_event_dev_config dev_conf;
+ const char *eventdev_name = "event_octeontx";
+ struct rte_event_dev_info info;
+ struct rte_eth_dev *eth_dev;
+
+ struct octeontx_vdev_init_params init_params = {
+ OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT
+ };
+
+ dev_name = rte_vdev_device_name(dev);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(dev_name);
+ if (!eth_dev) {
+ RTE_LOG(ERR, PMD, "Failed to probe %s\n", dev_name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &octeontx_dev_ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ res = octeontx_parse_vdev_init_params(&init_params, dev);
+ if (res < 0)
+ return -EINVAL;
+
+ if (init_params.nr_port > OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT) {
+ octeontx_log_err("nr_port (%d) > max (%d)", init_params.nr_port,
+ OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT);
+ return -ENOTSUP;
+ }
+
+ PMD_INIT_LOG(DEBUG, "initializing %s pmd", dev_name);
+
+ socket_id = rte_socket_id();
+
+ tx_vfcnt = octeontx_pko_vf_count();
+
+ if (tx_vfcnt < init_params.nr_port) {
+ octeontx_log_err("not enough PKO (%d) for port number (%d)",
+ tx_vfcnt, init_params.nr_port);
+ return -EINVAL;
+ }
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ octeontx_log_err("eventdev %s not found", eventdev_name);
+ return -ENODEV;
+ }
+
+ res = rte_event_dev_info_get(evdev, &info);
+ if (res < 0) {
+ octeontx_log_err("failed to eventdev info %d", res);
+ return -EINVAL;
+ }
+
+ PMD_INIT_LOG(DEBUG, "max_queue %d max_port %d",
+ info.max_event_queues, info.max_event_ports);
+
+ if (octeontx_pko_init_fc(tx_vfcnt))
+ return -ENOMEM;
+
+ devconf_set_default_sane_values(&dev_conf, &info);
+ res = rte_event_dev_configure(evdev, &dev_conf);
+ if (res < 0)
+ goto parse_error;
+
+ rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ (uint32_t *)&pnum);
+ rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ (uint32_t *)&qnum);
+ if (pnum < qnum) {
+ octeontx_log_err("too few event ports (%d) for event_q(%d)",
+ pnum, qnum);
+ res = -EINVAL;
+ goto parse_error;
+ }
+ if (pnum > qnum) {
+ /*
+ * We don't poll on event ports
+ * that do not have any queues assigned.
+ */
+ pnum = qnum;
+ PMD_INIT_LOG(INFO,
+ "reducing number of active event ports to %d", pnum);
+ }
+ for (i = 0; i < qnum; i++) {
+ res = rte_event_queue_setup(evdev, i, NULL);
+ if (res < 0) {
+ octeontx_log_err("failed to setup event_q(%d): res %d",
+ i, res);
+ goto parse_error;
+ }
+ }
+
+ for (i = 0; i < pnum; i++) {
+ res = rte_event_port_setup(evdev, i, NULL);
+ if (res < 0) {
+ res = -ENODEV;
+ octeontx_log_err("failed to setup ev port(%d) res=%d",
+ i, res);
+ goto parse_error;
+ }
+ /* Link one queue to one event port */
+ qlist = i;
+ res = rte_event_port_link(evdev, i, &qlist, NULL, 1);
+ if (res < 0) {
+ res = -ENODEV;
+ octeontx_log_err("failed to link port (%d): res=%d",
+ i, res);
+ goto parse_error;
+ }
+ }
+
+ /* Create ethdev interface */
+ for (i = 0; i < init_params.nr_port; i++) {
+ port_id = octeontx_create(dev, i, evdev, socket_id);
+ if (port_id < 0) {
+ octeontx_log_err("failed to create device %s",
+ dev_name);
+ res = -ENODEV;
+ goto parse_error;
+ }
+
+ PMD_INIT_LOG(INFO, "created ethdev %s for port %d", dev_name,
+ port_id);
+ }
+
+ if (probe_once) {
+ octeontx_log_err("interface %s not supported", dev_name);
+ octeontx_remove(dev);
+ res = -ENOTSUP;
+ goto parse_error;
+ }
+ rte_mbuf_set_platform_mempool_ops("octeontx_fpavf");
+ probe_once = 1;
+
+ return 0;
+
+parse_error:
+ octeontx_pko_fc_free();
+ return res;
+}
+
+static struct rte_vdev_driver octeontx_pmd_drv = {
+ .probe = octeontx_probe,
+ .remove = octeontx_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(OCTEONTX_PMD, octeontx_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(OCTEONTX_PMD, eth_octeontx);
+RTE_PMD_REGISTER_PARAM_STRING(OCTEONTX_PMD, "nr_port=<int> ");
diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.h b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.h
new file mode 100644
index 00000000..14f16969
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_ETHDEV_H__
+#define __OCTEONTX_ETHDEV_H__
+
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_ethdev_driver.h>
+#include <rte_eventdev.h>
+#include <rte_mempool.h>
+#include <rte_memory.h>
+
+#include <octeontx_fpavf.h>
+
+#include "base/octeontx_bgx.h"
+#include "base/octeontx_pki_var.h"
+#include "base/octeontx_pkivf.h"
+#include "base/octeontx_pkovf.h"
+#include "base/octeontx_io.h"
+
+#define OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT 12
+#define OCTEONTX_VDEV_NR_PORT_ARG ("nr_port")
+#define OCTEONTX_MAX_NAME_LEN 32
+
+#define OCTEONTX_MAX_BGX_PORTS 4
+#define OCTEONTX_MAX_LMAC_PER_BGX 4
+
+#define OCTEONTX_RX_OFFLOADS (DEV_RX_OFFLOAD_CRC_STRIP \
+ | DEV_RX_OFFLOAD_CHECKSUM)
+#define OCTEONTX_TX_OFFLOADS DEV_TX_OFFLOAD_MT_LOCKFREE
+
+static inline struct octeontx_nic *
+octeontx_pmd_priv(struct rte_eth_dev *dev)
+{
+ return dev->data->dev_private;
+}
+
+extern uint16_t
+rte_octeontx_pchan_map[OCTEONTX_MAX_BGX_PORTS][OCTEONTX_MAX_LMAC_PER_BGX];
+
+/* Octeontx ethdev nic */
+struct octeontx_nic {
+ struct rte_eth_dev *dev;
+ int node;
+ int port_id;
+ int port_ena;
+ int base_ichan;
+ int num_ichans;
+ int base_ochan;
+ int num_ochans;
+ uint8_t evdev;
+ uint8_t bpen;
+ uint8_t fcs_strip;
+ uint8_t bcast_mode;
+ uint8_t mcast_mode;
+ uint16_t num_tx_queues;
+ uint64_t hwcap;
+ uint8_t link_up;
+ uint8_t duplex;
+ uint8_t speed;
+ uint16_t mtu;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ /* Rx port parameters */
+ struct {
+ bool classifier_enable;
+ bool hash_enable;
+ bool initialized;
+ } pki;
+
+ uint16_t ev_queues;
+ uint16_t ev_ports;
+} __rte_cache_aligned;
+
+struct octeontx_txq {
+ uint16_t queue_id;
+ octeontx_dq_t dq;
+ struct rte_eth_dev *eth_dev;
+} __rte_cache_aligned;
+
+struct octeontx_rxq {
+ uint16_t queue_id;
+ uint16_t port_id;
+ uint8_t evdev;
+ struct rte_eth_dev *eth_dev;
+ uint16_t ev_queues;
+ uint16_t ev_ports;
+} __rte_cache_aligned;
+
+#endif /* __OCTEONTX_ETHDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h b/src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h
new file mode 100644
index 00000000..ccb8a1b0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_LOGS_H__
+#define __OCTEONTX_LOGS_H__
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, otx_net_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, ">>")
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, otx_net_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define PMD_MBOX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, otx_net_logtype_mbox, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define octeontx_log_err(s, ...) PMD_INIT_LOG(ERR, s, ##__VA_ARGS__)
+#define octeontx_log_dbg(s, ...) PMD_DRV_LOG(DEBUG, s, ##__VA_ARGS__)
+#define octeontx_mbox_log(s, ...) PMD_MBOX_LOG(DEBUG, s, ##__VA_ARGS__)
+
+#define PMD_RX_LOG PMD_DRV_LOG
+#define PMD_TX_LOG PMD_DRV_LOG
+
+extern int otx_net_logtype_init;
+extern int otx_net_logtype_driver;
+extern int otx_net_logtype_mbox;
+
+#endif /* __OCTEONTX_LOGS_H__*/
diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c b/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c
new file mode 100644
index 00000000..a9149b4e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_mbuf.h>
+#include <rte_prefetch.h>
+
+#include "octeontx_ethdev.h"
+#include "octeontx_rxtx.h"
+#include "octeontx_logs.h"
+
+
+static __rte_always_inline uint16_t __hot
+__octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
+ struct rte_mbuf *tx_pkt)
+{
+ uint64_t cmd_buf[4];
+ uint16_t gaura_id;
+
+ if (unlikely(*((volatile int64_t *)fc_status_va) < 0))
+ return -ENOSPC;
+
+ /* Get the gaura Id */
+ gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)tx_pkt->pool->pool_id);
+
+ /* Setup PKO_SEND_HDR_S */
+ cmd_buf[0] = tx_pkt->data_len & 0xffff;
+ cmd_buf[1] = 0x0;
+
+ /* Set don't free bit if reference count > 1 */
+ if (rte_mbuf_refcnt_read(tx_pkt) > 1)
+ cmd_buf[0] |= (1ULL << 58); /* SET DF */
+
+ /* Setup PKO_SEND_GATHER_S */
+ cmd_buf[(1 << 1) | 1] = rte_mbuf_data_iova(tx_pkt);
+ cmd_buf[(1 << 1) | 0] = PKO_SEND_GATHER_SUBDC |
+ PKO_SEND_GATHER_LDTYPE(0x1ull) |
+ PKO_SEND_GATHER_GAUAR((long)gaura_id) |
+ tx_pkt->data_len;
+
+ octeontx_reg_lmtst(lmtline_va, ioreg_va, cmd_buf, PKO_CMD_SZ);
+
+ return 0;
+}
+
+uint16_t __hot
+octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int count;
+ struct octeontx_txq *txq = tx_queue;
+ octeontx_dq_t *dq = &txq->dq;
+ int res;
+
+ count = 0;
+
+ while (count < nb_pkts) {
+ res = __octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va,
+ dq->fc_status_va,
+ tx_pkts[count]);
+ if (res < 0)
+ break;
+
+ count++;
+ }
+
+ return count; /* return number of pkts transmitted */
+}
+
+uint16_t __hot
+octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct octeontx_rxq *rxq;
+ struct rte_event ev;
+ size_t count;
+ uint16_t valid_event;
+
+ rxq = rx_queue;
+ count = 0;
+ while (count < nb_pkts) {
+ valid_event = rte_event_dequeue_burst(rxq->evdev,
+ rxq->ev_ports, &ev,
+ 1, 0);
+ if (!valid_event)
+ break;
+ rx_pkts[count++] = ev.mbuf;
+ }
+
+ return count; /* return number of pkts received */
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h b/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h
new file mode 100644
index 00000000..fe3e5ccd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_RXTX_H__
+#define __OCTEONTX_RXTX_H__
+
+#include <rte_ethdev_driver.h>
+
+#ifndef __hot
+#define __hot __attribute__((hot))
+#endif
+
+/* Packet type table */
+#define PTYPE_SIZE OCCTX_PKI_LTYPE_LAST
+
+static const uint32_t __rte_cache_aligned
+ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
+ [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
+ [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+ [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
+ [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
+ [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
+ [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
+ [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+ [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+ [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
+ [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
+ [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+ [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
+ [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_IPV4][LE_NONE][LF_NVGRE] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+
+ [LC_IPV4_OPT][LE_NONE][LF_NONE] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+ [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
+ [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
+ [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+ [LC_IPV4_OPT][LE_NONE][LF_TCP] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+ [LC_IPV4_OPT][LE_NONE][LF_UDP] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [LC_IPV4_OPT][LE_NONE][LF_GRE] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
+ [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+ [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+ [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
+ [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
+ [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+ [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
+ [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_IPV6][LE_NONE][LF_NVGRE] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+ [LC_IPV6_OPT][LE_NONE][LF_NONE] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+ [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
+ [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
+ [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+ [LC_IPV6_OPT][LE_NONE][LF_TCP] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [LC_IPV6_OPT][LE_NONE][LF_UDP] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [LC_IPV6_OPT][LE_NONE][LF_GRE] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
+ [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+};
+
+uint16_t
+octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+
+uint16_t
+octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+#endif /* __OCTEONTX_RXTX_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx/rte_pmd_octeontx_version.map b/src/spdk/dpdk/drivers/net/octeontx/rte_pmd_octeontx_version.map
new file mode 100644
index 00000000..a3161b14
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/rte_pmd_octeontx_version.map
@@ -0,0 +1,11 @@
+DPDK_17.11 {
+
+ local: *;
+};
+
+DPDK_18.02 {
+ global:
+
+ rte_octeontx_pchan_map;
+
+} DPDK_17.11;
diff --git a/src/spdk/dpdk/drivers/net/pcap/Makefile b/src/spdk/dpdk/drivers/net/pcap/Makefile
new file mode 100644
index 00000000..ef332162
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pcap/Makefile
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation.
+# Copyright(c) 2014 6WIND S.A.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_pcap.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lpcap
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vdev
+
+EXPORT_MAP := rte_pmd_pcap_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += rte_eth_pcap.c
+
+#
+# Export include files
+#
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/pcap/meson.build b/src/spdk/dpdk/drivers/net/pcap/meson.build
new file mode 100644
index 00000000..0c4e0201
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pcap/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+pcap_dep = cc.find_library('pcap', required: false)
+if pcap_dep.found() and cc.has_header('pcap.h', dependencies: pcap_dep)
+ build = true
+else
+ build = false
+endif
+sources = files('rte_eth_pcap.c')
+ext_deps += pcap_dep
+pkgconfig_extra_libs += '-lpcap'
diff --git a/src/spdk/dpdk/drivers/net/pcap/rte_eth_pcap.c b/src/spdk/dpdk/drivers/net/pcap/rte_eth_pcap.c
new file mode 100644
index 00000000..e8810a17
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pcap/rte_eth_pcap.c
@@ -0,0 +1,1138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation.
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
+ */
+
+#include <time.h>
+
+#include <net/if.h>
+
+#include <pcap.h>
+
+#include <rte_cycles.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_bus_vdev.h>
+
+#define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
+#define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN
+#define RTE_ETH_PCAP_PROMISC 1
+#define RTE_ETH_PCAP_TIMEOUT -1
+
+#define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
+#define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
+#define ETH_PCAP_RX_IFACE_ARG "rx_iface"
+#define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in"
+#define ETH_PCAP_TX_IFACE_ARG "tx_iface"
+#define ETH_PCAP_IFACE_ARG "iface"
+
+#define ETH_PCAP_ARG_MAXLEN 64
+
+#define RTE_PMD_PCAP_MAX_QUEUES 16
+
+static char errbuf[PCAP_ERRBUF_SIZE];
+static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
+static struct timeval start_time;
+static uint64_t start_cycles;
+static uint64_t hz;
+
+struct queue_stat {
+ volatile unsigned long pkts;
+ volatile unsigned long bytes;
+ volatile unsigned long err_pkts;
+};
+
+struct pcap_rx_queue {
+ pcap_t *pcap;
+ uint16_t in_port;
+ struct rte_mempool *mb_pool;
+ struct queue_stat rx_stat;
+ char name[PATH_MAX];
+ char type[ETH_PCAP_ARG_MAXLEN];
+};
+
+struct pcap_tx_queue {
+ pcap_dumper_t *dumper;
+ pcap_t *pcap;
+ struct queue_stat tx_stat;
+ char name[PATH_MAX];
+ char type[ETH_PCAP_ARG_MAXLEN];
+};
+
+struct pmd_internals {
+ struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
+ struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
+ int if_index;
+ int single_iface;
+};
+
+struct pmd_devargs {
+ unsigned int num_of_queue;
+ struct devargs_queue {
+ pcap_dumper_t *dumper;
+ pcap_t *pcap;
+ const char *name;
+ const char *type;
+ } queue[RTE_PMD_PCAP_MAX_QUEUES];
+};
+
+static const char *valid_arguments[] = {
+ ETH_PCAP_RX_PCAP_ARG,
+ ETH_PCAP_TX_PCAP_ARG,
+ ETH_PCAP_RX_IFACE_ARG,
+ ETH_PCAP_RX_IFACE_IN_ARG,
+ ETH_PCAP_TX_IFACE_ARG,
+ ETH_PCAP_IFACE_ARG,
+ NULL
+};
+
+static struct ether_addr eth_addr = {
+ .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 }
+};
+
+static struct rte_eth_link pmd_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_FIXED,
+};
+
+static int eth_pcap_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+static int
+eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
+ const u_char *data, uint16_t data_len)
+{
+ /* Copy the first segment. */
+ uint16_t len = rte_pktmbuf_tailroom(mbuf);
+ struct rte_mbuf *m = mbuf;
+
+ rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
+ data_len -= len;
+ data += len;
+
+ while (data_len > 0) {
+ /* Allocate next mbuf and point to that. */
+ m->next = rte_pktmbuf_alloc(mb_pool);
+
+ if (unlikely(!m->next))
+ return -1;
+
+ m = m->next;
+
+ /* Headroom is not needed in chained mbufs. */
+ rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+ m->pkt_len = 0;
+ m->data_len = 0;
+
+ /* Copy next segment. */
+ len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len);
+ rte_memcpy(rte_pktmbuf_append(m, len), data, len);
+
+ mbuf->nb_segs++;
+ data_len -= len;
+ data += len;
+ }
+
+ return mbuf->nb_segs;
+}
+
+/* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
+static void
+eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
+{
+ uint16_t data_len = 0;
+
+ while (mbuf) {
+ rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
+ mbuf->data_len);
+
+ data_len += mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+}
+
+static uint16_t
+eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ unsigned int i;
+ struct pcap_pkthdr header;
+ const u_char *packet;
+ struct rte_mbuf *mbuf;
+ struct pcap_rx_queue *pcap_q = queue;
+ uint16_t num_rx = 0;
+ uint16_t buf_size;
+ uint32_t rx_bytes = 0;
+
+ if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0))
+ return 0;
+
+ /* Reads the given number of packets from the pcap file one by one
+ * and copies the packet data into a newly allocated mbuf to return.
+ */
+ for (i = 0; i < nb_pkts; i++) {
+ /* Get the next PCAP packet */
+ packet = pcap_next(pcap_q->pcap, &header);
+ if (unlikely(packet == NULL))
+ break;
+
+ mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
+ if (unlikely(mbuf == NULL))
+ break;
+
+ /* Now get the space available for data in the mbuf */
+ buf_size = rte_pktmbuf_data_room_size(pcap_q->mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+
+ if (header.caplen <= buf_size) {
+ /* pcap packet will fit in the mbuf, can copy it */
+ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
+ header.caplen);
+ mbuf->data_len = (uint16_t)header.caplen;
+ } else {
+ /* Try read jumbo frame into multi mbufs. */
+ if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
+ mbuf,
+ packet,
+ header.caplen) == -1)) {
+ rte_pktmbuf_free(mbuf);
+ break;
+ }
+ }
+
+ mbuf->pkt_len = (uint16_t)header.caplen;
+ mbuf->port = pcap_q->in_port;
+ bufs[num_rx] = mbuf;
+ num_rx++;
+ rx_bytes += header.caplen;
+ }
+ pcap_q->rx_stat.pkts += num_rx;
+ pcap_q->rx_stat.bytes += rx_bytes;
+
+ return num_rx;
+}
+
+static inline void
+calculate_timestamp(struct timeval *ts) {
+ uint64_t cycles;
+ struct timeval cur_time;
+
+ cycles = rte_get_timer_cycles() - start_cycles;
+ cur_time.tv_sec = cycles / hz;
+ cur_time.tv_usec = (cycles % hz) * 1e6 / hz;
+ timeradd(&start_time, &cur_time, ts);
+}
+
+/*
+ * Callback to handle writing packets to a pcap file.
+ */
+static uint16_t
+eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ unsigned int i;
+ struct rte_mbuf *mbuf;
+ struct pcap_tx_queue *dumper_q = queue;
+ uint16_t num_tx = 0;
+ uint32_t tx_bytes = 0;
+ struct pcap_pkthdr header;
+
+ if (dumper_q->dumper == NULL || nb_pkts == 0)
+ return 0;
+
+ /* writes the nb_pkts packets to the previously opened pcap file
+ * dumper */
+ for (i = 0; i < nb_pkts; i++) {
+ mbuf = bufs[i];
+ calculate_timestamp(&header.ts);
+ header.len = mbuf->pkt_len;
+ header.caplen = header.len;
+
+ if (likely(mbuf->nb_segs == 1)) {
+ pcap_dump((u_char *)dumper_q->dumper, &header,
+ rte_pktmbuf_mtod(mbuf, void*));
+ } else {
+ if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
+ eth_pcap_gather_data(tx_pcap_data, mbuf);
+ pcap_dump((u_char *)dumper_q->dumper, &header,
+ tx_pcap_data);
+ } else {
+ PMD_LOG(ERR,
+ "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
+ mbuf->pkt_len,
+ ETHER_MAX_JUMBO_FRAME_LEN);
+
+ rte_pktmbuf_free(mbuf);
+ break;
+ }
+ }
+
+ num_tx++;
+ tx_bytes += mbuf->pkt_len;
+ rte_pktmbuf_free(mbuf);
+ }
+
+ /*
+ * Since there's no place to hook a callback when the forwarding
+ * process stops and to make sure the pcap file is actually written,
+ * we flush the pcap dumper within each burst.
+ */
+ pcap_dump_flush(dumper_q->dumper);
+ dumper_q->tx_stat.pkts += num_tx;
+ dumper_q->tx_stat.bytes += tx_bytes;
+ dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
+
+ return num_tx;
+}
+
+/*
+ * Callback to handle sending packets through a real NIC.
+ */
+static uint16_t
+eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ unsigned int i;
+ int ret;
+ struct rte_mbuf *mbuf;
+ struct pcap_tx_queue *tx_queue = queue;
+ uint16_t num_tx = 0;
+ uint32_t tx_bytes = 0;
+
+ if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL))
+ return 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ mbuf = bufs[i];
+
+ if (likely(mbuf->nb_segs == 1)) {
+ ret = pcap_sendpacket(tx_queue->pcap,
+ rte_pktmbuf_mtod(mbuf, u_char *),
+ mbuf->pkt_len);
+ } else {
+ if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
+ eth_pcap_gather_data(tx_pcap_data, mbuf);
+ ret = pcap_sendpacket(tx_queue->pcap,
+ tx_pcap_data, mbuf->pkt_len);
+ } else {
+ PMD_LOG(ERR,
+ "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
+ mbuf->pkt_len,
+ ETHER_MAX_JUMBO_FRAME_LEN);
+
+ rte_pktmbuf_free(mbuf);
+ break;
+ }
+ }
+
+ if (unlikely(ret != 0))
+ break;
+ num_tx++;
+ tx_bytes += mbuf->pkt_len;
+ rte_pktmbuf_free(mbuf);
+ }
+
+ tx_queue->tx_stat.pkts += num_tx;
+ tx_queue->tx_stat.bytes += tx_bytes;
+ tx_queue->tx_stat.err_pkts += nb_pkts - num_tx;
+
+ return num_tx;
+}
+
+/*
+ * pcap_open_live wrapper function
+ */
+static inline int
+open_iface_live(const char *iface, pcap_t **pcap) {
+ *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
+ RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
+
+ if (*pcap == NULL) {
+ PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+open_single_iface(const char *iface, pcap_t **pcap)
+{
+ if (open_iface_live(iface, pcap) < 0) {
+ PMD_LOG(ERR, "Couldn't open interface %s", iface);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
+{
+ pcap_t *tx_pcap;
+
+ /*
+ * We need to create a dummy empty pcap_t to use it
+ * with pcap_dump_open(). We create big enough an Ethernet
+ * pcap holder.
+ */
+ tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN);
+ if (tx_pcap == NULL) {
+ PMD_LOG(ERR, "Couldn't create dead pcap");
+ return -1;
+ }
+
+ /* The dumper is created using the previous pcap_t reference */
+ *dumper = pcap_dump_open(tx_pcap, pcap_filename);
+ if (*dumper == NULL) {
+ pcap_close(tx_pcap);
+ PMD_LOG(ERR, "Couldn't open %s for writing.",
+ pcap_filename);
+ return -1;
+ }
+
+ pcap_close(tx_pcap);
+ return 0;
+}
+
+static int
+open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
+{
+ *pcap = pcap_open_offline(pcap_filename, errbuf);
+ if (*pcap == NULL) {
+ PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename,
+ errbuf);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+eth_dev_start(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pcap_tx_queue *tx;
+ struct pcap_rx_queue *rx;
+
+ /* Special iface case. Single pcap is open and shared between tx/rx. */
+ if (internals->single_iface) {
+ tx = &internals->tx_queue[0];
+ rx = &internals->rx_queue[0];
+
+ if (!tx->pcap && strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
+ if (open_single_iface(tx->name, &tx->pcap) < 0)
+ return -1;
+ rx->pcap = tx->pcap;
+ }
+
+ goto status_up;
+ }
+
+ /* If not open already, open tx pcaps/dumpers */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ tx = &internals->tx_queue[i];
+
+ if (!tx->dumper &&
+ strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
+ if (open_single_tx_pcap(tx->name, &tx->dumper) < 0)
+ return -1;
+ } else if (!tx->pcap &&
+ strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
+ if (open_single_iface(tx->name, &tx->pcap) < 0)
+ return -1;
+ }
+ }
+
+ /* If not open already, open rx pcaps */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rx = &internals->rx_queue[i];
+
+ if (rx->pcap != NULL)
+ continue;
+
+ if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
+ if (open_single_rx_pcap(rx->name, &rx->pcap) < 0)
+ return -1;
+ } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
+ if (open_single_iface(rx->name, &rx->pcap) < 0)
+ return -1;
+ }
+ }
+
+status_up:
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+
+ return 0;
+}
+
+/*
+ * This function gets called when the current port gets stopped.
+ * Is the only place for us to close all the tx streams dumpers.
+ * If not called the dumpers will be flushed within each tx burst.
+ */
+static void
+eth_dev_stop(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pcap_tx_queue *tx;
+ struct pcap_rx_queue *rx;
+
+ /* Special iface case. Single pcap is open and shared between tx/rx. */
+ if (internals->single_iface) {
+ tx = &internals->tx_queue[0];
+ rx = &internals->rx_queue[0];
+ pcap_close(tx->pcap);
+ tx->pcap = NULL;
+ rx->pcap = NULL;
+ goto status_down;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ tx = &internals->tx_queue[i];
+
+ if (tx->dumper != NULL) {
+ pcap_dump_close(tx->dumper);
+ tx->dumper = NULL;
+ }
+
+ if (tx->pcap != NULL) {
+ pcap_close(tx->pcap);
+ tx->pcap = NULL;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rx = &internals->rx_queue[i];
+
+ if (rx->pcap != NULL) {
+ pcap_close(rx->pcap);
+ rx->pcap = NULL;
+ }
+ }
+
+status_down:
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+}
+
+static int
+eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ dev_info->if_index = internals->if_index;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t) -1;
+ dev_info->max_rx_queues = dev->data->nb_rx_queues;
+ dev_info->max_tx_queues = dev->data->nb_tx_queues;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
+}
+
+static int
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ unsigned int i;
+ unsigned long rx_packets_total = 0, rx_bytes_total = 0;
+ unsigned long tx_packets_total = 0, tx_bytes_total = 0;
+ unsigned long tx_packets_err_total = 0;
+ const struct pmd_internals *internal = dev->data->dev_private;
+
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+ i < dev->data->nb_rx_queues; i++) {
+ stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
+ stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
+ rx_packets_total += stats->q_ipackets[i];
+ rx_bytes_total += stats->q_ibytes[i];
+ }
+
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+ i < dev->data->nb_tx_queues; i++) {
+ stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
+ stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
+ stats->q_errors[i] = internal->tx_queue[i].tx_stat.err_pkts;
+ tx_packets_total += stats->q_opackets[i];
+ tx_bytes_total += stats->q_obytes[i];
+ tx_packets_err_total += stats->q_errors[i];
+ }
+
+ stats->ipackets = rx_packets_total;
+ stats->ibytes = rx_bytes_total;
+ stats->opackets = tx_packets_total;
+ stats->obytes = tx_bytes_total;
+ stats->oerrors = tx_packets_err_total;
+
+ return 0;
+}
+
+static void
+eth_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+ struct pmd_internals *internal = dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ internal->rx_queue[i].rx_stat.pkts = 0;
+ internal->rx_queue[i].rx_stat.bytes = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ internal->tx_queue[i].tx_stat.pkts = 0;
+ internal->tx_queue[i].tx_stat.bytes = 0;
+ internal->tx_queue[i].tx_stat.err_pkts = 0;
+ }
+}
+
+static void
+eth_dev_close(struct rte_eth_dev *dev __rte_unused)
+{
+}
+
+static void
+eth_queue_release(void *q __rte_unused)
+{
+}
+
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
+{
+ return 0;
+}
+
+static int
+eth_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
+
+ pcap_q->mb_pool = mb_pool;
+ dev->data->rx_queues[rx_queue_id] = pcap_q;
+ pcap_q->in_port = dev->data->port_id;
+
+ return 0;
+}
+
+static int
+eth_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
+
+ return 0;
+}
+
+static int
+eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static const struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_close = eth_dev_close,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_start = eth_rx_queue_start,
+ .tx_queue_start = eth_tx_queue_start,
+ .rx_queue_stop = eth_rx_queue_stop,
+ .tx_queue_stop = eth_tx_queue_stop,
+ .rx_queue_release = eth_queue_release,
+ .tx_queue_release = eth_queue_release,
+ .link_update = eth_link_update,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
+};
+
+static int
+add_queue(struct pmd_devargs *pmd, const char *name, const char *type,
+ pcap_t *pcap, pcap_dumper_t *dumper)
+{
+ if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
+ return -1;
+ if (pcap)
+ pmd->queue[pmd->num_of_queue].pcap = pcap;
+ if (dumper)
+ pmd->queue[pmd->num_of_queue].dumper = dumper;
+ pmd->queue[pmd->num_of_queue].name = name;
+ pmd->queue[pmd->num_of_queue].type = type;
+ pmd->num_of_queue++;
+ return 0;
+}
+
+/*
+ * Function handler that opens the pcap file for reading a stores a
+ * reference of it for use it later on.
+ */
+static int
+open_rx_pcap(const char *key, const char *value, void *extra_args)
+{
+ const char *pcap_filename = value;
+ struct pmd_devargs *rx = extra_args;
+ pcap_t *pcap = NULL;
+
+ if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
+ return -1;
+
+ if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) {
+ pcap_close(pcap);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Opens a pcap file for writing and stores a reference to it
+ * for use it later on.
+ */
+static int
+open_tx_pcap(const char *key, const char *value, void *extra_args)
+{
+ const char *pcap_filename = value;
+ struct pmd_devargs *dumpers = extra_args;
+ pcap_dumper_t *dumper;
+
+ if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
+ return -1;
+
+ if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) {
+ pcap_dump_close(dumper);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Opens an interface for reading and writing
+ */
+static inline int
+open_rx_tx_iface(const char *key, const char *value, void *extra_args)
+{
+ const char *iface = value;
+ struct pmd_devargs *tx = extra_args;
+ pcap_t *pcap = NULL;
+
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+
+ tx->queue[0].pcap = pcap;
+ tx->queue[0].name = iface;
+ tx->queue[0].type = key;
+
+ return 0;
+}
+
+static inline int
+set_iface_direction(const char *iface, pcap_t *pcap,
+ pcap_direction_t direction)
+{
+ const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT";
+ if (pcap_setdirection(pcap, direction) < 0) {
+ PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n",
+ iface, direction_str, pcap_geterr(pcap));
+ return -1;
+ }
+ PMD_LOG(INFO, "Setting %s pcap direction %s\n",
+ iface, direction_str);
+ return 0;
+}
+
+static inline int
+open_iface(const char *key, const char *value, void *extra_args)
+{
+ const char *iface = value;
+ struct pmd_devargs *pmd = extra_args;
+ pcap_t *pcap = NULL;
+
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+ if (add_queue(pmd, iface, key, pcap, NULL) < 0) {
+ pcap_close(pcap);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Opens a NIC for reading packets from it
+ */
+static inline int
+open_rx_iface(const char *key, const char *value, void *extra_args)
+{
+ int ret = open_iface(key, value, extra_args);
+ if (ret < 0)
+ return ret;
+ if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) {
+ struct pmd_devargs *pmd = extra_args;
+ unsigned int qid = pmd->num_of_queue - 1;
+
+ set_iface_direction(pmd->queue[qid].name,
+ pmd->queue[qid].pcap,
+ PCAP_D_IN);
+ }
+
+ return 0;
+}
+
+static inline int
+rx_iface_args_process(const char *key, const char *value, void *extra_args)
+{
+ if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 ||
+ strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0)
+ return open_rx_iface(key, value, extra_args);
+
+ return 0;
+}
+
+/*
+ * Opens a NIC for writing packets to it
+ */
+static int
+open_tx_iface(const char *key, const char *value, void *extra_args)
+{
+ return open_iface(key, value, extra_args);
+}
+
+static struct rte_vdev_driver pmd_pcap_drv;
+
+static int
+pmd_init_internals(struct rte_vdev_device *vdev,
+ const unsigned int nb_rx_queues,
+ const unsigned int nb_tx_queues,
+ struct pmd_internals **internals,
+ struct rte_eth_dev **eth_dev)
+{
+ struct rte_eth_dev_data *data;
+ unsigned int numa_node = vdev->device.numa_node;
+
+ PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
+ numa_node);
+
+ /* reserve an ethdev entry */
+ *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
+ if (!(*eth_dev))
+ return -1;
+
+ /* now put it all together
+ * - store queue data in internals,
+ * - store numa_node info in eth_dev
+ * - point eth_dev_data to internals
+ * - and point eth_dev structure to new eth_dev_data structure
+ */
+ *internals = (*eth_dev)->data->dev_private;
+ data = (*eth_dev)->data;
+ data->nb_rx_queues = (uint16_t)nb_rx_queues;
+ data->nb_tx_queues = (uint16_t)nb_tx_queues;
+ data->dev_link = pmd_link;
+ data->mac_addrs = &eth_addr;
+
+ /*
+ * NOTE: we'll replace the data element, of originally allocated
+ * eth_dev so the rings are local per-process
+ */
+ (*eth_dev)->dev_ops = &ops;
+
+ return 0;
+}
+
+static int
+eth_from_pcaps_common(struct rte_vdev_device *vdev,
+ struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
+ struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
+ struct rte_kvargs *kvlist, struct pmd_internals **internals,
+ struct rte_eth_dev **eth_dev)
+{
+ struct rte_kvargs_pair *pair = NULL;
+ unsigned int k_idx;
+ unsigned int i;
+
+ /* do some parameter checking */
+ if (rx_queues == NULL && nb_rx_queues > 0)
+ return -1;
+ if (tx_queues == NULL && nb_tx_queues > 0)
+ return -1;
+
+ if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals,
+ eth_dev) < 0)
+ return -1;
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
+ struct devargs_queue *queue = &rx_queues->queue[i];
+
+ rx->pcap = queue->pcap;
+ snprintf(rx->name, sizeof(rx->name), "%s", queue->name);
+ snprintf(rx->type, sizeof(rx->type), "%s", queue->type);
+ }
+
+ for (i = 0; i < nb_tx_queues; i++) {
+ struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
+ struct devargs_queue *queue = &tx_queues->queue[i];
+
+ tx->dumper = queue->dumper;
+ tx->pcap = queue->pcap;
+ snprintf(tx->name, sizeof(tx->name), "%s", queue->name);
+ snprintf(tx->type, sizeof(tx->type), "%s", queue->type);
+ }
+
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
+ break;
+ }
+
+ if (pair == NULL)
+ (*internals)->if_index = 0;
+ else
+ (*internals)->if_index = if_nametoindex(pair->value);
+
+ return 0;
+}
+
+static int
+eth_from_pcaps(struct rte_vdev_device *vdev,
+ struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
+ struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
+ struct rte_kvargs *kvlist, int single_iface,
+ unsigned int using_dumpers)
+{
+ struct pmd_internals *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ int ret;
+
+ ret = eth_from_pcaps_common(vdev, rx_queues, nb_rx_queues,
+ tx_queues, nb_tx_queues, kvlist, &internals, &eth_dev);
+
+ if (ret < 0)
+ return ret;
+
+ /* store weather we are using a single interface for rx/tx or not */
+ internals->single_iface = single_iface;
+
+ eth_dev->rx_pkt_burst = eth_pcap_rx;
+
+ if (using_dumpers)
+ eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
+ else
+ eth_dev->tx_pkt_burst = eth_pcap_tx;
+
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+}
+
+static int
+pmd_pcap_probe(struct rte_vdev_device *dev)
+{
+ const char *name;
+ unsigned int is_rx_pcap = 0, is_tx_pcap = 0;
+ struct rte_kvargs *kvlist;
+ struct pmd_devargs pcaps = {0};
+ struct pmd_devargs dumpers = {0};
+ struct rte_eth_dev *eth_dev;
+ int single_iface = 0;
+ int ret;
+
+ name = rte_vdev_device_name(dev);
+ PMD_LOG(INFO, "Initializing pmd_pcap for %s", name);
+
+ gettimeofday(&start_time, NULL);
+ start_cycles = rte_get_timer_cycles();
+ hz = rte_get_timer_hz();
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+
+ /*
+ * If iface argument is passed we open the NICs and use them for
+ * reading / writing
+ */
+ if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
+
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
+ &open_rx_tx_iface, &pcaps);
+
+ if (ret < 0)
+ goto free_kvlist;
+
+ dumpers.queue[0] = pcaps.queue[0];
+
+ single_iface = 1;
+ pcaps.num_of_queue = 1;
+ dumpers.num_of_queue = 1;
+
+ goto create_eth;
+ }
+
+ /*
+ * We check whether we want to open a RX stream from a real NIC or a
+ * pcap file
+ */
+ is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
+ pcaps.num_of_queue = 0;
+
+ if (is_rx_pcap) {
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
+ &open_rx_pcap, &pcaps);
+ } else {
+ ret = rte_kvargs_process(kvlist, NULL,
+ &rx_iface_args_process, &pcaps);
+ }
+
+ if (ret < 0)
+ goto free_kvlist;
+
+ /*
+ * We check whether we want to open a TX stream to a real NIC or a
+ * pcap file
+ */
+ is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
+ dumpers.num_of_queue = 0;
+
+ if (is_tx_pcap)
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
+ &open_tx_pcap, &dumpers);
+ else
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
+ &open_tx_iface, &dumpers);
+
+ if (ret < 0)
+ goto free_kvlist;
+
+create_eth:
+ ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
+ dumpers.num_of_queue, kvlist, single_iface, is_tx_pcap);
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+static int
+pmd_pcap_remove(struct rte_vdev_device *dev)
+{
+ struct rte_eth_dev *eth_dev = NULL;
+
+ PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d",
+ rte_socket_id());
+
+ if (!dev)
+ return -1;
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
+ if (eth_dev == NULL)
+ return -1;
+
+ rte_free(eth_dev->data->dev_private);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_pcap_drv = {
+ .probe = pmd_pcap_probe,
+ .remove = pmd_pcap_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
+RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap);
+RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
+ ETH_PCAP_RX_PCAP_ARG "=<string> "
+ ETH_PCAP_TX_PCAP_ARG "=<string> "
+ ETH_PCAP_RX_IFACE_ARG "=<ifc> "
+ ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
+ ETH_PCAP_TX_IFACE_ARG "=<ifc> "
+ ETH_PCAP_IFACE_ARG "=<ifc>");
+
+RTE_INIT(eth_pcap_init_log)
+{
+ eth_pcap_logtype = rte_log_register("pmd.net.pcap");
+ if (eth_pcap_logtype >= 0)
+ rte_log_set_level(eth_pcap_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/pcap/rte_pmd_pcap_version.map b/src/spdk/dpdk/drivers/net/pcap/rte_pmd_pcap_version.map
new file mode 100644
index 00000000..ef353984
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pcap/rte_pmd_pcap_version.map
@@ -0,0 +1,4 @@
+DPDK_2.0 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/qede/Makefile b/src/spdk/dpdk/drivers/net/qede/Makefile
new file mode 100644
index 00000000..488ca1d9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/Makefile
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2016 - 2018 Cavium Inc.
+# All rights reserved.
+# www.cavium.com
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_qede.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+EXPORT_MAP := rte_pmd_qede_version.map
+
+LIBABIVER := 1
+
+#
+# OS
+#
+OS_TYPE := $(shell uname -s)
+
+#
+# CFLAGS
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-sign-compare
+CFLAGS_BASE_DRIVER += -Wno-missing-prototypes
+CFLAGS_BASE_DRIVER += -Wno-cast-qual
+CFLAGS_BASE_DRIVER += -Wno-unused-function
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing
+CFLAGS_BASE_DRIVER += -Wno-missing-prototypes
+
+ifneq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS_BASE_DRIVER += -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-format-nonliteral
+ifeq ($(OS_TYPE),Linux)
+ifeq ($(shell clang -Wno-shift-negative-value -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
+CFLAGS_BASE_DRIVER += -Wno-shift-negative-value
+endif
+endif
+endif
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+endif
+CFLAGS_BASE_DRIVER += -Wno-missing-declarations
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-maybe-uninitialized
+endif
+CFLAGS_BASE_DRIVER += -Wno-strict-prototypes
+ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-shift-negative-value
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
+endif
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+CFLAGS_BASE_DRIVER += -Wno-format-extra-args
+CFLAGS_BASE_DRIVER += -Wno-visibility
+CFLAGS_BASE_DRIVER += -Wno-empty-body
+CFLAGS_BASE_DRIVER += -Wno-invalid-source-encoding
+CFLAGS_BASE_DRIVER += -Wno-sometimes-uninitialized
+ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
+CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion
+endif
+else #ICC
+CFLAGS_qede_ethdev.o += -diag-disable 279 #279: controlling expression is constant
+endif
+
+#
+# Add extra flags for base ecore driver files
+# to disable warnings in them
+#
+#
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_cxt.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_l2.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sp_commands.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_init_fw_funcs.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_spq.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_init_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_mcp.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_int.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_dcbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += bcm_osal.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sriov.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/qede/base/bcm_osal.c b/src/spdk/dpdk/drivers/net/qede/base/bcm_osal.c
new file mode 100644
index 00000000..d5d6f8e2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/bcm_osal.c
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include <rte_memzone.h>
+#include <rte_errno.h>
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_iov_api.h"
+#include "ecore_mcp_api.h"
+#include "ecore_l2_api.h"
+
+/* Array of memzone pointers */
+static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
+/* Counter to track current memzone allocated */
+uint16_t ecore_mz_count;
+
+unsigned long qede_log2_align(unsigned long n)
+{
+ unsigned long ret = n ? 1 : 0;
+ unsigned long _n = n >> 1;
+
+ while (_n) {
+ _n >>= 1;
+ ret <<= 1;
+ }
+
+ if (ret < n)
+ ret <<= 1;
+
+ return ret;
+}
+
+u32 qede_osal_log2(u32 val)
+{
+ u32 log = 0;
+
+ while (val >>= 1)
+ log++;
+
+ return log;
+}
+
+inline void qede_set_bit(u32 nr, unsigned long *addr)
+{
+ __sync_fetch_and_or(addr, (1UL << nr));
+}
+
+inline void qede_clr_bit(u32 nr, unsigned long *addr)
+{
+ __sync_fetch_and_and(addr, ~(1UL << nr));
+}
+
+inline bool qede_test_bit(u32 nr, unsigned long *addr)
+{
+ bool res;
+
+ rte_mb();
+ res = ((*addr) & (1UL << nr)) != 0;
+ rte_mb();
+ return res;
+}
+
+static inline u32 qede_ffb(unsigned long word)
+{
+ unsigned long first_bit;
+
+ first_bit = __builtin_ffsl(word);
+ return first_bit ? (first_bit - 1) : OSAL_BITS_PER_UL;
+}
+
+inline u32 qede_find_first_bit(unsigned long *addr, u32 limit)
+{
+ u32 i;
+ u32 nwords = 0;
+ OSAL_BUILD_BUG_ON(!limit);
+ nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
+ for (i = 0; i < nwords; i++)
+ if (addr[i] != 0)
+ break;
+
+ return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffb(addr[i]);
+}
+
+static inline u32 qede_ffz(unsigned long word)
+{
+ unsigned long first_zero;
+
+ first_zero = __builtin_ffsl(~word);
+ return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
+}
+
+inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)
+{
+ u32 i;
+ u32 nwords = 0;
+ OSAL_BUILD_BUG_ON(!limit);
+ nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
+ for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
+ return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
+}
+
+void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,
+ __rte_unused struct vf_pf_resc_request *resc_req,
+ struct ecore_vf_acquire_sw_info *vf_sw_info)
+{
+ vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;
+ vf_sw_info->override_fw_version = 1;
+}
+
+void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
+ dma_addr_t *phys, size_t size)
+{
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ uint32_t core_id = rte_lcore_id();
+ unsigned int socket_id;
+
+ if (ecore_mz_count >= RTE_MAX_MEMZONE) {
+ DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
+ RTE_MAX_MEMZONE);
+ *phys = 0;
+ return OSAL_NULL;
+ }
+
+ OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ if (core_id == (unsigned int)LCORE_ID_ANY)
+ core_id = rte_get_master_lcore();
+ socket_id = rte_lcore_to_socket_id(core_id);
+ mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
+ if (!mz) {
+ DP_ERR(p_dev, "Unable to allocate DMA memory "
+ "of size %zu bytes - %s\n",
+ size, rte_strerror(rte_errno));
+ *phys = 0;
+ return OSAL_NULL;
+ }
+ *phys = mz->iova;
+ ecore_mz_mapping[ecore_mz_count++] = mz;
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Allocated dma memory size=%zu phys=0x%lx"
+ " virt=%p core=%d\n",
+ mz->len, (unsigned long)mz->iova, mz->addr, core_id);
+ return mz->addr;
+}
+
+void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
+ dma_addr_t *phys, size_t size, int align)
+{
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ uint32_t core_id = rte_lcore_id();
+ unsigned int socket_id;
+
+ if (ecore_mz_count >= RTE_MAX_MEMZONE) {
+ DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
+ RTE_MAX_MEMZONE);
+ *phys = 0;
+ return OSAL_NULL;
+ }
+
+ OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ if (core_id == (unsigned int)LCORE_ID_ANY)
+ core_id = rte_get_master_lcore();
+ socket_id = rte_lcore_to_socket_id(core_id);
+ mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
+ if (!mz) {
+ DP_ERR(p_dev, "Unable to allocate DMA memory "
+ "of size %zu bytes - %s\n",
+ size, rte_strerror(rte_errno));
+ *phys = 0;
+ return OSAL_NULL;
+ }
+ *phys = mz->iova;
+ ecore_mz_mapping[ecore_mz_count++] = mz;
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Allocated aligned dma memory size=%zu phys=0x%lx"
+ " virt=%p core=%d\n",
+ mz->len, (unsigned long)mz->iova, mz->addr, core_id);
+ return mz->addr;
+}
+
+void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
+{
+ uint16_t j;
+
+ for (j = 0 ; j < ecore_mz_count; j++) {
+ if (phys == ecore_mz_mapping[j]->iova) {
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Free memzone %s\n", ecore_mz_mapping[j]->name);
+ rte_memzone_free(ecore_mz_mapping[j]);
+ while (j < ecore_mz_count - 1) {
+ ecore_mz_mapping[j] = ecore_mz_mapping[j + 1];
+ j++;
+ }
+ ecore_mz_count--;
+ return;
+ }
+ }
+
+ DP_ERR(p_dev, "Unexpected memory free request\n");
+}
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
+ u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+ int rc;
+
+ p_hwfn->stream->next_in = input_buf;
+ p_hwfn->stream->avail_in = input_len;
+ p_hwfn->stream->next_out = unzip_buf;
+ p_hwfn->stream->avail_out = max_size;
+
+ rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+ if (rc != Z_OK) {
+ DP_ERR(p_hwfn,
+ "zlib init failed, rc = %d\n", rc);
+ return 0;
+ }
+
+ rc = inflate(p_hwfn->stream, Z_FINISH);
+ inflateEnd(p_hwfn->stream);
+
+ if (rc != Z_OK && rc != Z_STREAM_END) {
+ DP_ERR(p_hwfn,
+ "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
+ rc);
+ return 0;
+ }
+
+ return p_hwfn->stream->total_out / 4;
+}
+#endif
+
+void
+qede_get_mcp_proto_stats(struct ecore_dev *edev,
+ enum ecore_mcp_protocol_type type,
+ union ecore_mcp_protocol_stats *stats)
+{
+ struct ecore_eth_stats lan_stats;
+
+ if (type == ECORE_MCP_LAN_STATS) {
+ ecore_get_vport_stats(edev, &lan_stats);
+
+ /* @DPDK */
+ stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts;
+
+ stats->lan_stats.fcs_err = -1;
+ } else {
+ DP_INFO(edev, "Statistics request type %d not supported\n",
+ type);
+ }
+}
+
+void
+qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
+{
+ char err_str[64];
+
+ switch (err_type) {
+ case ECORE_HW_ERR_FAN_FAIL:
+ strcpy(err_str, "Fan Failure");
+ break;
+ case ECORE_HW_ERR_MFW_RESP_FAIL:
+ strcpy(err_str, "MFW Response Failure");
+ break;
+ case ECORE_HW_ERR_HW_ATTN:
+ strcpy(err_str, "HW Attention");
+ break;
+ case ECORE_HW_ERR_DMAE_FAIL:
+ strcpy(err_str, "DMAE Failure");
+ break;
+ case ECORE_HW_ERR_RAMROD_FAIL:
+ strcpy(err_str, "Ramrod Failure");
+ break;
+ case ECORE_HW_ERR_FW_ASSERT:
+ strcpy(err_str, "FW Assertion");
+ break;
+ default:
+ strcpy(err_str, "Unknown");
+ }
+
+ DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
+ ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
+}
+
+u32 qede_crc32(u32 crc, u8 *ptr, u32 length)
+{
+ int i;
+
+ while (length--) {
+ crc ^= *ptr++;
+ for (i = 0; i < 8; i++)
+ crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
+ }
+ return crc;
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/bcm_osal.h b/src/spdk/dpdk/drivers/net/qede/base/bcm_osal.h
new file mode 100644
index 00000000..630867fa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/bcm_osal.h
@@ -0,0 +1,456 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __BCM_OSAL_H
+#define __BCM_OSAL_H
+
+#include <rte_byteorder.h>
+#include <rte_spinlock.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_memcpy.h>
+#include <rte_log.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_io.h>
+
+/* Forward declaration */
+struct ecore_dev;
+struct ecore_hwfn;
+struct ecore_ptt;
+struct ecore_vf_acquire_sw_info;
+struct vf_pf_resc_request;
+enum ecore_mcp_protocol_type;
+union ecore_mcp_protocol_stats;
+enum ecore_hw_err_type;
+
+void qed_link_update(struct ecore_hwfn *hwfn);
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#undef __BIG_ENDIAN
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN
+#endif
+#else
+#undef __LITTLE_ENDIAN
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN
+#endif
+#endif
+
+#define OSAL_WARN(arg1, arg2, arg3, ...) (0)
+
+#define UNUSED(x) (void)(x)
+
+/* Memory Types */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef int16_t s16;
+typedef int32_t s32;
+
+typedef u16 __le16;
+typedef u32 __le32;
+typedef u32 OSAL_BE32;
+
+#define osal_uintptr_t uintptr_t
+
+typedef rte_iova_t dma_addr_t;
+
+typedef rte_spinlock_t osal_spinlock_t;
+
+typedef void *osal_dpc_t;
+
+typedef size_t osal_size_t;
+
+typedef intptr_t osal_int_ptr_t;
+
+typedef int bool;
+#define true 1
+#define false 0
+
+#define nothing do {} while (0)
+
+/* Delays */
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000 * (x))
+#define OSAL_UDELAY(time) usec_delay(time)
+#define OSAL_MSLEEP(time) msec_delay(time)
+
+/* Memory allocations and deallocations */
+
+#define OSAL_NULL ((void *)0)
+#define OSAL_ALLOC(dev, GFP, size) rte_malloc("qede", size, 0)
+#define OSAL_ZALLOC(dev, GFP, size) rte_zmalloc("qede", size, 0)
+#define OSAL_CALLOC(dev, GFP, num, size) rte_calloc("qede", num, size, 0)
+#define OSAL_VZALLOC(dev, size) rte_zmalloc("qede", size, 0)
+#define OSAL_FREE(dev, memory) \
+ do { \
+ rte_free((void *)memory); \
+ memory = OSAL_NULL; \
+ } while (0)
+#define OSAL_VFREE(dev, memory) OSAL_FREE(dev, memory)
+#define OSAL_MEM_ZERO(mem, size) bzero(mem, size)
+#define OSAL_MEMCPY(dst, src, size) rte_memcpy(dst, src, size)
+#define OSAL_MEMCMP(s1, s2, size) memcmp(s1, s2, size)
+#define OSAL_MEMSET(dst, val, length) \
+ memset(dst, val, length)
+
+void *osal_dma_alloc_coherent(struct ecore_dev *, dma_addr_t *, size_t);
+
+void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
+ size_t, int);
+
+void osal_dma_free_mem(struct ecore_dev *edev, dma_addr_t phys);
+
+#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \
+ osal_dma_alloc_coherent(dev, phys, size)
+
+#define OSAL_DMA_ALLOC_COHERENT_ALIGNED(dev, phys, size, align) \
+ osal_dma_alloc_coherent_aligned(dev, phys, size, align)
+
+#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) \
+ osal_dma_free_mem(dev, phys)
+
+/* HW reads/writes */
+
+#define DIRECT_REG_RD(_dev, _reg_addr) rte_read32(_reg_addr)
+
+#define REG_RD(_p_hwfn, _reg_offset) \
+ DIRECT_REG_RD(_p_hwfn, \
+ ((u8 *)(uintptr_t)(_p_hwfn->regview) + (_reg_offset)))
+
+#define DIRECT_REG_WR16(_reg_addr, _val) rte_write16((_val), (_reg_addr))
+
+#define DIRECT_REG_WR(_dev, _reg_addr, _val) rte_write32((_val), (_reg_addr))
+
+#define DIRECT_REG_WR_RELAXED(_dev, _reg_addr, _val) \
+ rte_write32_relaxed((_val), (_reg_addr))
+
+#define REG_WR(_p_hwfn, _reg_offset, _val) \
+ DIRECT_REG_WR(NULL, \
+ ((u8 *)((uintptr_t)(_p_hwfn->regview)) + (_reg_offset)), (u32)_val)
+
+#define REG_WR16(_p_hwfn, _reg_offset, _val) \
+ DIRECT_REG_WR16(((u8 *)(uintptr_t)(_p_hwfn->regview) + \
+ (_reg_offset)), (u16)_val)
+
+#define DOORBELL(_p_hwfn, _db_addr, _val) \
+ DIRECT_REG_WR_RELAXED((_p_hwfn), \
+ ((u8 *)(uintptr_t)(_p_hwfn->doorbells) + \
+ (_db_addr)), (u32)_val)
+
+#define DIRECT_REG_WR64(hwfn, addr, value) nothing
+#define DIRECT_REG_RD64(hwfn, addr) 0
+
+/* Mutexes */
+
+typedef pthread_mutex_t osal_mutex_t;
+#define OSAL_MUTEX_RELEASE(lock) pthread_mutex_unlock(lock)
+#define OSAL_MUTEX_INIT(lock) pthread_mutex_init(lock, NULL)
+#define OSAL_MUTEX_ACQUIRE(lock) pthread_mutex_lock(lock)
+#define OSAL_MUTEX_ALLOC(hwfn, lock) nothing
+#define OSAL_MUTEX_DEALLOC(lock) nothing
+
+/* Spinlocks */
+
+#define OSAL_SPIN_LOCK_INIT(lock) rte_spinlock_init(lock)
+#define OSAL_SPIN_LOCK(lock) rte_spinlock_lock(lock)
+#define OSAL_SPIN_UNLOCK(lock) rte_spinlock_unlock(lock)
+#define OSAL_SPIN_LOCK_IRQSAVE(lock, flags) \
+ do { \
+ UNUSED(lock); \
+ flags = 0; \
+ UNUSED(flags); \
+ } while (0)
+#define OSAL_SPIN_UNLOCK_IRQSAVE(lock, flags) nothing
+#define OSAL_SPIN_LOCK_ALLOC(hwfn, lock) nothing
+#define OSAL_SPIN_LOCK_DEALLOC(lock) nothing
+
+/* DPC */
+
+#define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t))
+#define OSAL_DPC_INIT(dpc, hwfn) nothing
+#define OSAL_POLL_MODE_DPC(hwfn) nothing
+#define OSAL_DPC_SYNC(hwfn) nothing
+
+/* Lists */
+
+#define OSAL_LIST_SPLICE_INIT(new_list, list) nothing
+#define OSAL_LIST_SPLICE_TAIL_INIT(new_list, list) nothing
+
+typedef struct _osal_list_entry_t {
+ struct _osal_list_entry_t *next, *prev;
+} osal_list_entry_t;
+
+typedef struct osal_list_t {
+ osal_list_entry_t *head, *tail;
+ unsigned long cnt;
+} osal_list_t;
+
+#define OSAL_LIST_INIT(list) \
+ do { \
+ (list)->head = NULL; \
+ (list)->tail = NULL; \
+ (list)->cnt = 0; \
+ } while (0)
+
+#define OSAL_LIST_PUSH_HEAD(entry, list) \
+ do { \
+ (entry)->prev = (osal_list_entry_t *)0; \
+ (entry)->next = (list)->head; \
+ if ((list)->tail == (osal_list_entry_t *)0) { \
+ (list)->tail = (entry); \
+ } else { \
+ (list)->head->prev = (entry); \
+ } \
+ (list)->head = (entry); \
+ (list)->cnt++; \
+ } while (0)
+
+#define OSAL_LIST_PUSH_TAIL(entry, list) \
+ do { \
+ (entry)->next = (osal_list_entry_t *)0; \
+ (entry)->prev = (list)->tail; \
+ if ((list)->tail) { \
+ (list)->tail->next = (entry); \
+ } else { \
+ (list)->head = (entry); \
+ } \
+ (list)->tail = (entry); \
+ (list)->cnt++; \
+ } while (0)
+
+#define OSAL_LIST_FIRST_ENTRY(list, type, field) \
+ (type *)((list)->head)
+
+#define OSAL_LIST_REMOVE_ENTRY(entry, list) \
+ do { \
+ if ((list)->head == (entry)) { \
+ if ((list)->head) { \
+ (list)->head = (list)->head->next; \
+ if ((list)->head) { \
+ (list)->head->prev = (osal_list_entry_t *)0;\
+ } else { \
+ (list)->tail = (osal_list_entry_t *)0; \
+ } \
+ (list)->cnt--; \
+ } \
+ } else if ((list)->tail == (entry)) { \
+ if ((list)->tail) { \
+ (list)->tail = (list)->tail->prev; \
+ if ((list)->tail) { \
+ (list)->tail->next = (osal_list_entry_t *)0;\
+ } else { \
+ (list)->head = (osal_list_entry_t *)0; \
+ } \
+ (list)->cnt--; \
+ } \
+ } else { \
+ (entry)->prev->next = (entry)->next; \
+ (entry)->next->prev = (entry)->prev; \
+ (list)->cnt--; \
+ } \
+ } while (0)
+
+#define OSAL_LIST_IS_EMPTY(list) \
+ ((list)->cnt == 0)
+
+#define OSAL_LIST_NEXT(entry, field, type) \
+ (type *)((&((entry)->field))->next)
+
+/* TODO: Check field, type order */
+
+#define OSAL_LIST_FOR_EACH_ENTRY(entry, list, field, type) \
+ for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field); \
+ entry; \
+ entry = OSAL_LIST_NEXT(entry, field, type))
+
+#define OSAL_LIST_FOR_EACH_ENTRY_SAFE(entry, tmp_entry, list, field, type) \
+ for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field), \
+ tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL; \
+ entry != NULL; \
+ entry = (type *)tmp_entry, \
+ tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL)
+
+/* TODO: OSAL_LIST_INSERT_ENTRY_AFTER */
+#define OSAL_LIST_INSERT_ENTRY_AFTER(new_entry, entry, list) \
+ OSAL_LIST_PUSH_HEAD(new_entry, list)
+
+/* PCI config space */
+
+#define OSAL_PCI_READ_CONFIG_BYTE(dev, address, dst) nothing
+#define OSAL_PCI_READ_CONFIG_WORD(dev, address, dst) nothing
+#define OSAL_PCI_READ_CONFIG_DWORD(dev, address, dst) nothing
+#define OSAL_PCI_FIND_EXT_CAPABILITY(dev, pcie_id) 0
+#define OSAL_PCI_FIND_CAPABILITY(dev, pcie_id) 0
+#define OSAL_PCI_WRITE_CONFIG_WORD(dev, address, val) nothing
+#define OSAL_BAR_SIZE(dev, bar_id) 0
+
+/* Barriers */
+
+#define OSAL_MMIOWB(dev) rte_wmb()
+#define OSAL_BARRIER(dev) rte_compiler_barrier()
+#define OSAL_SMP_RMB(dev) rte_rmb()
+#define OSAL_SMP_WMB(dev) rte_wmb()
+#define OSAL_RMB(dev) rte_rmb()
+#define OSAL_WMB(dev) rte_wmb()
+#define OSAL_DMA_SYNC(dev, addr, length, is_post) nothing
+
+#define OSAL_BIT(nr) (1UL << (nr))
+#define OSAL_BITS_PER_BYTE (8)
+#define OSAL_BITS_PER_UL (sizeof(unsigned long) * OSAL_BITS_PER_BYTE)
+#define OSAL_BITS_PER_UL_MASK (OSAL_BITS_PER_UL - 1)
+
+/* Bitops */
+void qede_set_bit(u32, unsigned long *);
+#define OSAL_SET_BIT(bit, bitmap) \
+ qede_set_bit(bit, bitmap)
+
+void qede_clr_bit(u32, unsigned long *);
+#define OSAL_CLEAR_BIT(bit, bitmap) \
+ qede_clr_bit(bit, bitmap)
+
+bool qede_test_bit(u32, unsigned long *);
+#define OSAL_TEST_BIT(bit, bitmap) \
+ qede_test_bit(bit, bitmap)
+
+u32 qede_find_first_bit(unsigned long *, u32);
+#define OSAL_FIND_FIRST_BIT(bitmap, length) \
+ qede_find_first_bit(bitmap, length)
+
+u32 qede_find_first_zero_bit(unsigned long *, u32);
+#define OSAL_FIND_FIRST_ZERO_BIT(bitmap, length) \
+ qede_find_first_zero_bit(bitmap, length)
+
+#define OSAL_BUILD_BUG_ON(cond) nothing
+#define ETH_ALEN ETHER_ADDR_LEN
+
+#define OSAL_BITMAP_WEIGHT(bitmap, count) 0
+
+#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
+#define OSAL_TRANSCEIVER_UPDATE(hwfn) nothing
+#define OSAL_DCBX_AEN(hwfn, mib_type) nothing
+
+/* SR-IOV channel */
+
+#define OSAL_VF_FLR_UPDATE(hwfn) nothing
+#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0
+#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0)
+#define OSAL_PF_VF_MSG(hwfn, vfid) 0
+#define OSAL_PF_VF_MALICIOUS(hwfn, vfid) nothing
+#define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0
+#define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing
+#define OSAL_IOV_VF_ACQUIRE(hwfn, vfid) 0
+#define OSAL_IOV_VF_CLEANUP(hwfn, vfid) nothing
+#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0
+#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0
+#define OSAL_IOV_GET_OS_TYPE() 0
+#define OSAL_IOV_VF_MSG_TYPE(hwfn, vfid, vf_msg_type) nothing
+#define OSAL_IOV_PF_RESP_TYPE(hwfn, vfid, pf_resp_type) nothing
+#define OSAL_IOV_VF_VPORT_STOP(hwfn, vf) nothing
+
+u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
+ u8 *input_buf, u32 max_size, u8 *unzip_buf);
+void qede_vf_fill_driver_data(struct ecore_hwfn *, struct vf_pf_resc_request *,
+ struct ecore_vf_acquire_sw_info *);
+void qede_hw_err_notify(struct ecore_hwfn *p_hwfn,
+ enum ecore_hw_err_type err_type);
+#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) \
+ qede_vf_fill_driver_data(_dev_p, _resc_req, _os_info)
+
+#define OSAL_UNZIP_DATA(p_hwfn, input_len, buf, max_size, unzip_buf) \
+ qede_unzip_data(p_hwfn, input_len, buf, max_size, unzip_buf)
+
+/* TODO: */
+#define OSAL_SCHEDULE_RECOVERY_HANDLER(hwfn) nothing
+#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) \
+ qede_hw_err_notify(hwfn, err_type)
+
+#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1)
+#define OSAL_NUM_CPUS() 0
+
+/* Utility functions */
+
+#define RTE_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define DIV_ROUND_UP(size, to_what) RTE_DIV_ROUND_UP(size, to_what)
+#define RTE_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ROUNDUP(value, to_what) RTE_ROUNDUP((value), (to_what))
+
+unsigned long qede_log2_align(unsigned long n);
+#define OSAL_ROUNDUP_POW_OF_TWO(val) \
+ qede_log2_align(val)
+
+u32 qede_osal_log2(u32);
+#define OSAL_LOG2(val) \
+ qede_osal_log2(val)
+
+#define PRINT(format, ...) printf
+#define PRINT_ERR(format, ...) PRINT
+
+#define OFFSETOF(str, field) __builtin_offsetof(str, field)
+#define OSAL_ASSERT(is_assert) assert(is_assert)
+#define OSAL_BEFORE_PF_START(file, engine) nothing
+#define OSAL_AFTER_PF_STOP(file, engine) nothing
+
+/* Endian macros */
+#define OSAL_CPU_TO_BE32(val) rte_cpu_to_be_32(val)
+#define OSAL_BE32_TO_CPU(val) rte_be_to_cpu_32(val)
+#define OSAL_CPU_TO_LE32(val) rte_cpu_to_le_32(val)
+#define OSAL_CPU_TO_LE16(val) rte_cpu_to_le_16(val)
+#define OSAL_LE32_TO_CPU(val) rte_le_to_cpu_32(val)
+#define OSAL_LE16_TO_CPU(val) rte_le_to_cpu_16(val)
+#define OSAL_CPU_TO_BE64(val) rte_cpu_to_be_64(val)
+
+#define OSAL_ARRAY_SIZE(arr) RTE_DIM(arr)
+#define OSAL_SPRINTF(name, pattern, ...) \
+ sprintf(name, pattern, ##__VA_ARGS__)
+#define OSAL_SNPRINTF(buf, size, format, ...) \
+ snprintf(buf, size, format, ##__VA_ARGS__)
+#define OSAL_STRLEN(string) strlen(string)
+#define OSAL_STRCPY(dst, string) strcpy(dst, string)
+#define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len)
+#define OSAL_STRCMP(str1, str2) strcmp(str1, str2)
+#define OSAL_STRTOUL(str, base, res) 0
+
+#define OSAL_INLINE inline
+#define OSAL_REG_ADDR(_p_hwfn, _offset) \
+ (void *)((u8 *)(uintptr_t)(_p_hwfn->regview) + (_offset))
+#define OSAL_PAGE_SIZE 4096
+#define OSAL_CACHE_LINE_SIZE RTE_CACHE_LINE_SIZE
+#define OSAL_IOMEM volatile
+#define OSAL_UNUSED __attribute__((unused))
+#define OSAL_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#define OSAL_MIN_T(type, __min1, __min2) \
+ ((type)(__min1) < (type)(__min2) ? (type)(__min1) : (type)(__min2))
+#define OSAL_MAX_T(type, __max1, __max2) \
+ ((type)(__max1) > (type)(__max2) ? (type)(__max1) : (type)(__max2))
+
+void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
+ union ecore_mcp_protocol_stats *);
+#define OSAL_GET_PROTOCOL_STATS(dev, type, stats) \
+ qede_get_mcp_proto_stats(dev, type, stats)
+
+#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
+
+u32 qede_crc32(u32 crc, u8 *ptr, u32 length);
+#define OSAL_CRC32(crc, buf, length) qede_crc32(crc, buf, length)
+#define OSAL_CRC8_POPULATE(table, polynomial) nothing
+#define OSAL_CRC8(table, pdata, nbytes, crc) 0
+#define OSAL_MFW_TLV_REQ(p_hwfn) nothing
+#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
+#define OSAL_MFW_CMD_PREEMPT(p_hwfn) nothing
+#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
+
+#define OSAL_DIV_S64(a, b) ((a) / (b))
+#define OSAL_LLDP_RX_TLVS(p_hwfn, tlv_buf, tlv_size) nothing
+
+#endif /* __BCM_OSAL_H */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/common_hsi.h b/src/spdk/dpdk/drivers/net/qede/base/common_hsi.h
new file mode 100644
index 00000000..ca8e59db
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/common_hsi.h
@@ -0,0 +1,1642 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __COMMON_HSI__
+#define __COMMON_HSI__
+/********************************/
+/* PROTOCOL COMMON FW CONSTANTS */
+/********************************/
+
+/* Temporarily here should be added to HSI automatically by resource allocation
+ * tool.
+ */
+#define T_TEST_AGG_INT_TEMP 6
+#define M_TEST_AGG_INT_TEMP 8
+#define U_TEST_AGG_INT_TEMP 6
+#define X_TEST_AGG_INT_TEMP 14
+#define Y_TEST_AGG_INT_TEMP 4
+#define P_TEST_AGG_INT_TEMP 4
+
+#define X_FINAL_CLEANUP_AGG_INT 1
+
+#define EVENT_RING_PAGE_SIZE_BYTES 4096
+
+#define NUM_OF_GLOBAL_QUEUES 128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
+
+#define ISCSI_CDU_TASK_SEG_TYPE 0
+#define FCOE_CDU_TASK_SEG_TYPE 0
+#define RDMA_CDU_TASK_SEG_TYPE 1
+
+#define FW_ASSERT_GENERAL_ATTN_IDX 32
+
+#define MAX_PINNED_CCFC 32
+
+#define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE 3
+
+/* Queue Zone sizes in bytes */
+#define TSTORM_QZONE_SIZE 8 /*tstorm_scsi_queue_zone*/
+#define MSTORM_QZONE_SIZE 16 /*mstorm_eth_queue_zone. Used only for RX
+ *producer of VFs in backward compatibility
+ *mode.
+ */
+#define USTORM_QZONE_SIZE 8 /*ustorm_eth_queue_zone*/
+#define XSTORM_QZONE_SIZE 8 /*xstorm_eth_queue_zone*/
+#define YSTORM_QZONE_SIZE 0
+#define PSTORM_QZONE_SIZE 0
+
+/*Log of mstorm default VF zone size.*/
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
+/*Maximum number of RX queues that can be allocated to VF by default*/
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
+/*Maximum number of RX queues that can be allocated to VF with doubled VF zone
+ * size. Up to 96 VF supported in this mode
+ */
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
+/*Maximum number of RX queues that can be allocated to VF with 4 VF zone size.
+ * Up to 48 VF supported in this mode
+ */
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
+
+
+/********************************/
+/* CORE (LIGHT L2) FW CONSTANTS */
+/********************************/
+
+#define CORE_LL2_MAX_RAMROD_PER_CON 8
+#define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_NUM_NEXT_PAGE_BDS 1
+
+#define CORE_LL2_TX_MAX_BDS_PER_PACKET 12
+
+#define CORE_SPQE_PAGE_SIZE_BYTES 4096
+
+/*
+ * Usually LL2 queues are opened in pairs TX-RX.
+ * There is a hard restriction on number of RX queues (limited by Tstorm RAM)
+ * and TX counters (Pstorm RAM).
+ * Number of TX queues is almost unlimited.
+ * The constants are different so as to allow asymmetric LL2 connections
+ */
+
+#define MAX_NUM_LL2_RX_QUEUES 48
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
+
+
+/****************************************************************************/
+/* Include firmware version number only- do not add constants here to avoid */
+/* redundunt compilations */
+/****************************************************************************/
+
+
+#define FW_MAJOR_VERSION 8
+#define FW_MINOR_VERSION 33
+#define FW_REVISION_VERSION 12
+#define FW_ENGINEERING_VERSION 0
+
+/***********************/
+/* COMMON HW CONSTANTS */
+/***********************/
+
+/* PCI functions */
+#define MAX_NUM_PORTS_BB (2)
+#define MAX_NUM_PORTS_K2 (4)
+#define MAX_NUM_PORTS_E5 (4)
+#define MAX_NUM_PORTS (MAX_NUM_PORTS_E5)
+
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_E5 (16)
+#define MAX_NUM_PFS (MAX_NUM_PFS_E5)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+
+#define MAX_NUM_VFS_BB (120)
+#define MAX_NUM_VFS_K2 (192)
+#define MAX_NUM_VFS_E4 (MAX_NUM_VFS_K2)
+#define MAX_NUM_VFS_E5 (240)
+#define COMMON_MAX_NUM_VFS (MAX_NUM_VFS_E5)
+
+#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
+#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS_E4)
+
+/* in both BB and K2, the VF number starts from 16. so for arrays containing all
+ * possible PFs and VFs - we need a constant for this size
+ */
+#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
+#define MAX_FUNCTION_NUMBER_E4 (MAX_NUM_PFS + MAX_NUM_VFS_E4)
+#define MAX_FUNCTION_NUMBER_E5 (MAX_NUM_PFS + MAX_NUM_VFS_E5)
+#define COMMON_MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS_E5)
+
+#define MAX_NUM_VPORTS_K2 (208)
+#define MAX_NUM_VPORTS_BB (160)
+#define MAX_NUM_VPORTS_E4 (MAX_NUM_VPORTS_K2)
+#define MAX_NUM_VPORTS_E5 (256)
+#define COMMON_MAX_NUM_VPORTS (MAX_NUM_VPORTS_E5)
+
+#define MAX_NUM_L2_QUEUES_BB (256)
+#define MAX_NUM_L2_QUEUES_K2 (320)
+#define MAX_NUM_L2_QUEUES_E5 (320) /* TODO_E5_VITALY - fix to 512 */
+#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_E5)
+
+/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
+#define NUM_PHYS_TCS_4PORT_K2 4
+#define NUM_PHYS_TCS_4PORT_TX_E5 6
+#define NUM_PHYS_TCS_4PORT_RX_E5 4
+#define NUM_OF_PHYS_TCS 8
+#define PURE_LB_TC NUM_OF_PHYS_TCS
+#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_TCS_4PORT_TX_E5 (NUM_PHYS_TCS_4PORT_TX_E5 + 1)
+#define NUM_TCS_4PORT_RX_E5 (NUM_PHYS_TCS_4PORT_RX_E5 + 1)
+#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
+
+/* CIDs */
+#define NUM_OF_CONNECTION_TYPES_E4 (8)
+#define NUM_OF_CONNECTION_TYPES_E5 (16)
+#define NUM_OF_TASK_TYPES (8)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
+
+/* Global PXP windows (GTT) */
+#define NUM_OF_GTT 19
+#define GTT_DWORD_SIZE_BITS 10
+#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
+#define GTT_DWORD_SIZE (1 << GTT_DWORD_SIZE_BITS)
+
+/* Tools Version */
+#define TOOLS_VERSION 10
+/*****************/
+/* CDU CONSTANTS */
+/*****************/
+
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
+
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
+
+#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0)
+#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+
+
+/*****************/
+/* DQ CONSTANTS */
+/*****************/
+
+/* DEMS */
+#define DQ_DEMS_LEGACY 0
+#define DQ_DEMS_TOE_MORE_TO_SEND 3
+#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
+#define DQ_DEMS_ROCE_CQ_CONS 7
+
+/* XCM agg val selection (HW) */
+#define DQ_XCM_AGG_VAL_SEL_WORD2 0
+#define DQ_XCM_AGG_VAL_SEL_WORD3 1
+#define DQ_XCM_AGG_VAL_SEL_WORD4 2
+#define DQ_XCM_AGG_VAL_SEL_WORD5 3
+#define DQ_XCM_AGG_VAL_SEL_REG3 4
+#define DQ_XCM_AGG_VAL_SEL_REG4 5
+#define DQ_XCM_AGG_VAL_SEL_REG5 6
+#define DQ_XCM_AGG_VAL_SEL_REG6 7
+
+/* XCM agg val selection (FW) */
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_CONS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_TX_BD_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+
+/* UCM agg val selection (HW) */
+#define DQ_UCM_AGG_VAL_SEL_WORD0 0
+#define DQ_UCM_AGG_VAL_SEL_WORD1 1
+#define DQ_UCM_AGG_VAL_SEL_WORD2 2
+#define DQ_UCM_AGG_VAL_SEL_WORD3 3
+#define DQ_UCM_AGG_VAL_SEL_REG0 4
+#define DQ_UCM_AGG_VAL_SEL_REG1 5
+#define DQ_UCM_AGG_VAL_SEL_REG2 6
+#define DQ_UCM_AGG_VAL_SEL_REG3 7
+
+/* UCM agg val selection (FW) */
+#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2
+#define DQ_UCM_ETH_PMD_RX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD3
+#define DQ_UCM_ROCE_CQ_CONS_CMD DQ_UCM_AGG_VAL_SEL_REG0
+#define DQ_UCM_ROCE_CQ_PROD_CMD DQ_UCM_AGG_VAL_SEL_REG2
+
+/* TCM agg val selection (HW) */
+#define DQ_TCM_AGG_VAL_SEL_WORD0 0
+#define DQ_TCM_AGG_VAL_SEL_WORD1 1
+#define DQ_TCM_AGG_VAL_SEL_WORD2 2
+#define DQ_TCM_AGG_VAL_SEL_WORD3 3
+#define DQ_TCM_AGG_VAL_SEL_REG1 4
+#define DQ_TCM_AGG_VAL_SEL_REG2 5
+#define DQ_TCM_AGG_VAL_SEL_REG6 6
+#define DQ_TCM_AGG_VAL_SEL_REG9 7
+
+/* TCM agg val selection (FW) */
+#define DQ_TCM_L2B_BD_PROD_CMD DQ_TCM_AGG_VAL_SEL_WORD1
+#define DQ_TCM_ROCE_RQ_PROD_CMD DQ_TCM_AGG_VAL_SEL_WORD0
+
+/* XCM agg counter flag selection (HW) */
+#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
+#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
+#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
+#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
+#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
+#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
+#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
+#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
+
+/* XCM agg counter flag selection (FW) */
+#define DQ_XCM_ETH_DQ_CF_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_DQ_CF_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_TERMINATE_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_FCOE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+
+/* UCM agg counter flag selection (HW) */
+#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
+#define DQ_UCM_AGG_FLG_SHIFT_CF1 1
+#define DQ_UCM_AGG_FLG_SHIFT_CF3 2
+#define DQ_UCM_AGG_FLG_SHIFT_CF4 3
+#define DQ_UCM_AGG_FLG_SHIFT_CF5 4
+#define DQ_UCM_AGG_FLG_SHIFT_CF6 5
+#define DQ_UCM_AGG_FLG_SHIFT_RULE0EN 6
+#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7
+
+/* UCM agg counter flag selection (FW) */
+#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ROCE_CQ_ARM_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF3)
+#define DQ_UCM_TOE_SLOW_PATH_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_TOE_DQ_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
+
+/* TCM agg counter flag selection (HW) */
+#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
+#define DQ_TCM_AGG_FLG_SHIFT_CF1 1
+#define DQ_TCM_AGG_FLG_SHIFT_CF2 2
+#define DQ_TCM_AGG_FLG_SHIFT_CF3 3
+#define DQ_TCM_AGG_FLG_SHIFT_CF4 4
+#define DQ_TCM_AGG_FLG_SHIFT_CF5 5
+#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
+#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
+
+/* TCM agg counter flag selection (FW) */
+#define DQ_TCM_FCOE_FLUSH_Q0_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_FCOE_DUMMY_TIMER_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF2)
+#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_ISCSI_FLUSH_Q0_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_TOE_FLUSH_Q0_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_IWARP_POST_RQ_CF_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1)
+
+/* PWM address mapping */
+#define DQ_PWM_OFFSET_DPM_BASE 0x0
+#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_XCM16_BASE 0x40
+#define DQ_PWM_OFFSET_XCM32_BASE 0x44
+#define DQ_PWM_OFFSET_UCM16_BASE 0x48
+#define DQ_PWM_OFFSET_UCM32_BASE 0x4C
+#define DQ_PWM_OFFSET_UCM16_4 0x50
+#define DQ_PWM_OFFSET_TCM16_BASE 0x58
+#define DQ_PWM_OFFSET_TCM32_BASE 0x5C
+#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
+
+#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT (DQ_PWM_OFFSET_UCM16_4)
+#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT (DQ_PWM_OFFSET_UCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
+#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
+#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
+
+#define DQ_REGION_SHIFT (12)
+
+/* DPM */
+#define DQ_DPM_WQE_BUFF_SIZE (320)
+
+/* Conn type ranges */
+#define DQ_CONN_TYPE_RANGE_SHIFT (4)
+
+/*****************/
+/* QM CONSTANTS */
+/*****************/
+
+/* number of TX queues in the QM */
+#define MAX_QM_TX_QUEUES_K2 512
+#define MAX_QM_TX_QUEUES_BB 448
+#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
+
+/* number of Other queues in the QM */
+#define MAX_QM_OTHER_QUEUES_BB 64
+#define MAX_QM_OTHER_QUEUES_K2 128
+#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
+
+/* number of queues in a PF queue group */
+#define QM_PF_QUEUE_GROUP_SIZE 8
+
+/* the size of a single queue element in bytes */
+#define QM_PQ_ELEMENT_SIZE 4
+
+/* base number of Tx PQs in the CM PQ representation.
+ * should be used when storing PQ IDs in CM PQ registers and context
+ */
+#define CM_TX_PQ_BASE 0x200
+
+/* number of global Vport/QCN rate limiters */
+#define MAX_QM_GLOBAL_RLS 256
+
+/* QM registers data */
+#define QM_LINE_CRD_REG_WIDTH 16
+#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_WIDTH 24
+#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_WIDTH 32
+#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_WIDTH 32
+#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1))
+
+/*****************/
+/* CAU CONSTANTS */
+/*****************/
+
+#define CAU_FSM_ETH_RX 0
+#define CAU_FSM_ETH_TX 1
+
+/* Number of Protocol Indices per Status Block */
+#define PIS_PER_SB_E4 12
+#define PIS_PER_SB_E5 8
+#define MAX_PIS_PER_SB_E4 OSAL_MAX_T(PIS_PER_SB_E4, PIS_PER_SB_E5)
+
+/* fsm is stopped or not valid for this sb */
+#define CAU_HC_STOPPED_STATE 3
+/* fsm is working without interrupt coalescing for this sb*/
+#define CAU_HC_DISABLE_STATE 4
+/* fsm is working with interrupt coalescing for this sb*/
+#define CAU_HC_ENABLE_STATE 0
+
+
+/*****************/
+/* IGU CONSTANTS */
+/*****************/
+
+#define MAX_SB_PER_PATH_K2 (368)
+#define MAX_SB_PER_PATH_BB (288)
+#define MAX_SB_PER_PATH_E5 (512)
+#define MAX_TOT_SB_PER_PATH MAX_SB_PER_PATH_E5
+
+#define MAX_SB_PER_PF_MIMD 129
+#define MAX_SB_PER_PF_SIMD 64
+#define MAX_SB_PER_VF 64
+
+/* Memory addresses on the BAR for the IGU Sub Block */
+#define IGU_MEM_BASE 0x0000
+
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
+ MAX_TOT_SB_PER_PATH - \
+ 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
+
+#define IGU_CMD_PROD_UPD_BASE 0x0600
+#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE + \
+ MAX_TOT_SB_PER_PATH - \
+ 1)
+#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
+
+/*****************/
+/* PXP CONSTANTS */
+/*****************/
+
+/* Bars for Blocks */
+#define PXP_BAR_GRC 0
+#define PXP_BAR_TSDM 0
+#define PXP_BAR_USDM 0
+#define PXP_BAR_XSDM 0
+#define PXP_BAR_MSDM 0
+#define PXP_BAR_YSDM 0
+#define PXP_BAR_PSDM 0
+#define PXP_BAR_IGU 0
+#define PXP_BAR_DQ 1
+
+/* PTT and GTT */
+#define PXP_PER_PF_ENTRY_SIZE 8
+#define PXP_NUM_GLOBAL_WINDOWS 243
+#define PXP_GLOBAL_ENTRY_SIZE 4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4
+#define PXP_PF_WINDOW_ADMIN_START 0
+#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000
+#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \
+ PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0
+#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \
+ PXP_PER_PF_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+ PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \
+ PXP_GLOBAL_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
+ (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
+ PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
+#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0
+#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4
+#define PXP_PF_ME_OPAQUE_ADDR 0x1f8
+#define PXP_PF_ME_CONCRETE_ADDR 0x1fc
+
+#define PXP_NUM_PF_WINDOWS 12
+
+#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS
+#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_START + \
+ PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
+
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
+ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
+ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
+ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
+ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+
+/* PF BAR */
+#define PXP_BAR0_START_GRC 0x0000
+#define PXP_BAR0_GRC_LENGTH 0x1C00000
+#define PXP_BAR0_END_GRC \
+ (PXP_BAR0_START_GRC + PXP_BAR0_GRC_LENGTH - 1)
+
+#define PXP_BAR0_START_IGU 0x1C00000
+#define PXP_BAR0_IGU_LENGTH 0x10000
+#define PXP_BAR0_END_IGU \
+ (PXP_BAR0_START_IGU + PXP_BAR0_IGU_LENGTH - 1)
+
+#define PXP_BAR0_START_TSDM 0x1C80000
+#define PXP_BAR0_SDM_LENGTH 0x40000
+#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000
+#define PXP_BAR0_END_TSDM \
+ (PXP_BAR0_START_TSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_MSDM 0x1D00000
+#define PXP_BAR0_END_MSDM \
+ (PXP_BAR0_START_MSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_USDM 0x1D80000
+#define PXP_BAR0_END_USDM \
+ (PXP_BAR0_START_USDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_XSDM 0x1E00000
+#define PXP_BAR0_END_XSDM \
+ (PXP_BAR0_START_XSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_YSDM 0x1E80000
+#define PXP_BAR0_END_YSDM \
+ (PXP_BAR0_START_YSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_PSDM 0x1F00000
+#define PXP_BAR0_END_PSDM \
+ (PXP_BAR0_START_PSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_FIRST_INVALID_ADDRESS \
+ (PXP_BAR0_END_PSDM + 1)
+
+/* VF BAR */
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_IGU 0
+#define PXP_VF_BAR0_IGU_LENGTH 0x3000
+#define PXP_VF_BAR0_END_IGU \
+ (PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ 0x3000
+#define PXP_VF_BAR0_DQ_LENGTH 0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+ (PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \
+ (PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
+#define PXP_VF_BAR0_END_DQ \
+ (PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B \
+ (PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B \
+ (PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B \
+ (PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B \
+ (PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B \
+ (PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B \
+ (PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_GRC 0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH 0x200
+#define PXP_VF_BAR0_END_GRC \
+ (PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
+
+#define PXP_VF_BAR0_START_IGU2 0x10000
+#define PXP_VF_BAR0_IGU2_LENGTH 0xD000
+#define PXP_VF_BAR0_END_IGU2 \
+ (PXP_VF_BAR0_START_IGU2 + PXP_VF_BAR0_IGU2_LENGTH - 1)
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+
+// ILT Records
+#define PXP_NUM_ILT_RECORDS_BB 7600
+#define PXP_NUM_ILT_RECORDS_K2 11000
+#define MAX_NUM_ILT_RECORDS \
+ OSAL_MAX_T(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+
+#define PXP_NUM_ILT_RECORDS_E5 13664
+
+
+// Host Interface
+#define PXP_QUEUES_ZONE_MAX_NUM_E4 320
+#define PXP_QUEUES_ZONE_MAX_NUM_E5 512
+
+
+/*****************/
+/* PRM CONSTANTS */
+/*****************/
+#define PRM_DMA_PAD_BYTES_NUM 2
+/*****************/
+/* SDMs CONSTANTS */
+/*****************/
+
+
+#define SDM_OP_GEN_TRIG_NONE 0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
+#define SDM_OP_GEN_TRIG_AGG_INT 2
+#define SDM_OP_GEN_TRIG_LOADER 4
+#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
+#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9
+
+/***********************************************************/
+/* Completion types */
+/***********************************************************/
+
+#define SDM_COMP_TYPE_NONE 0
+#define SDM_COMP_TYPE_WAKE_THREAD 1
+#define SDM_COMP_TYPE_AGG_INT 2
+/* Send direct message to local CM and/or remote CMs. Destinations are defined
+ * by vector in CompParams.
+ */
+#define SDM_COMP_TYPE_CM 3
+#define SDM_COMP_TYPE_LOADER 4
+/* Send direct message to PXP (like "internal write" command) to write to remote
+ * Storm RAM via remote SDM
+ */
+#define SDM_COMP_TYPE_PXP 5
+/* Indicate error per thread */
+#define SDM_COMP_TYPE_INDICATE_ERROR 6
+#define SDM_COMP_TYPE_RELEASE_THREAD 7
+/* Write to local RAM as a completion */
+#define SDM_COMP_TYPE_RAM 8
+#define SDM_COMP_TYPE_INC_ORDER_CNT 9 /* Applicable only for E4 */
+
+
+/******************/
+/* PBF CONSTANTS */
+/******************/
+
+/* Number of PBF command queue lines. Each line is 32B. */
+#define PBF_MAX_CMD_LINES_E4 3328
+#define PBF_MAX_CMD_LINES_E5 5280
+
+/* Number of BTB blocks. Each block is 256B. */
+#define BTB_MAX_BLOCKS 1440
+
+/*****************/
+/* PRS CONSTANTS */
+/*****************/
+
+#define PRS_GFT_CAM_LINES_NO_MATCH 31
+
+/*
+ * Interrupt coalescing TimeSet
+ */
+struct coalescing_timeset {
+ u8 value;
+/* Interrupt coalescing TimeSet (timeout_ticks = TimeSet shl (TimerRes+1)) */
+#define COALESCING_TIMESET_TIMESET_MASK 0x7F
+#define COALESCING_TIMESET_TIMESET_SHIFT 0
+/* Only if this flag is set, timeset will take effect */
+#define COALESCING_TIMESET_VALID_MASK 0x1
+#define COALESCING_TIMESET_VALID_SHIFT 7
+};
+
+struct common_queue_zone {
+ __le16 ring_drv_data_consumer;
+ __le16 reserved;
+};
+
+/*
+ * ETH Rx producers data
+ */
+struct eth_rx_prod_data {
+ __le16 bd_prod /* BD producer. */;
+ __le16 cqe_prod /* CQE producer. */;
+};
+
+
+struct tcp_ulp_connect_done_params {
+ __le16 mss;
+ u8 snd_wnd_scale;
+ u8 flags;
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1
+};
+
+struct iscsi_connect_done_results {
+ __le16 icid /* Context ID of the connection */;
+ __le16 conn_id /* Driver connection ID */;
+/* decided tcp params after connect done */
+ struct tcp_ulp_connect_done_params params;
+};
+
+
+struct iscsi_eqe_data {
+ __le16 icid /* Context ID of the connection */;
+ __le16 conn_id /* Driver connection ID */;
+ __le16 reserved;
+/* error code - relevant only if the opcode indicates its an error */
+ u8 error_code;
+ u8 error_pdu_opcode_reserved;
+/* The processed PDUs opcode on which happened the error - updated for specific
+ * error codes, by default=0xFF
+ */
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
+/* Indication for driver is the error_pdu_opcode field has valid value */
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
+#define ISCSI_EQE_DATA_RESERVED0_MASK 0x1
+#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
+};
+
+
+/*
+ * Multi function mode
+ */
+enum mf_mode {
+ ERROR_MODE /* Unsupported mode */,
+ MF_OVLAN /* Multi function based on outer VLAN */,
+ MF_NPAR /* Multi function based on MAC address (NIC partitioning) */,
+ MAX_MF_MODE
+};
+
+/* Per-protocol connection types */
+enum protocol_type {
+ PROTOCOLID_ISCSI /* iSCSI */,
+ PROTOCOLID_FCOE /* FCoE */,
+ PROTOCOLID_ROCE /* RoCE */,
+ PROTOCOLID_CORE /* Core (light L2, slow path core) */,
+ PROTOCOLID_ETH /* Ethernet */,
+ PROTOCOLID_IWARP /* iWARP */,
+ PROTOCOLID_TOE /* TOE */,
+ PROTOCOLID_PREROCE /* Pre (tapeout) RoCE */,
+ PROTOCOLID_COMMON /* ProtocolCommon */,
+ PROTOCOLID_TCP /* TCP */,
+ MAX_PROTOCOL_TYPE
+};
+
+
+struct regpair {
+ __le32 lo /* low word for reg-pair */;
+ __le32 hi /* high word for reg-pair */;
+};
+
+
+
+/*
+ * Ustorm Queue Zone
+ */
+struct ustorm_eth_queue_zone {
+/* Rx interrupt coalescing TimeSet */
+ struct coalescing_timeset int_coalescing_timeset;
+ u8 reserved[3];
+};
+
+
+struct ustorm_queue_zone {
+ struct ustorm_eth_queue_zone eth;
+ struct common_queue_zone common;
+};
+
+/* status block structure */
+struct cau_pi_entry {
+ __le32 prod;
+/* A per protocol indexPROD value. */
+#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
+/* This value determines the TimeSet that the PI is associated with */
+#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+/* Select the FSM within the SB */
+#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
+/* Select the FSM within the SB */
+#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT 24
+};
+
+/* status block structure */
+struct cau_sb_entry {
+ __le32 data;
+/* The SB PROD index which is sent to the IGU. */
+#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
+#define CAU_SB_ENTRY_STATE0_MASK 0xF /* RX state */
+#define CAU_SB_ENTRY_STATE0_SHIFT 24
+#define CAU_SB_ENTRY_STATE1_MASK 0xF /* TX state */
+#define CAU_SB_ENTRY_STATE1_SHIFT 28
+ __le32 params;
+/* Indicates the RX TimeSet that this SB is associated with. */
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+/* Indicates the TX TimeSet that this SB is associated with. */
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+/* This value will determine the RX FSM timer resolution in ticks */
+#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
+/* This value will determine the TX FSM timer resolution in ticks */
+#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
+#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
+/* If set then indicates that the TPH STAG is equal to the SB number. Otherwise
+ * the STAG will be equal to all ones.
+ */
+#define CAU_SB_ENTRY_TPH_MASK 0x1
+#define CAU_SB_ENTRY_TPH_SHIFT 31
+};
+
+
+/*
+ * Igu cleanup bit values to distinguish between clean or producer consumer
+ * update.
+ */
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+
+/* core doorbell data */
+struct core_db_data {
+ u8 params;
+/* destination of doorbell (use enum db_dest) */
+#define CORE_DB_DATA_DEST_MASK 0x3
+#define CORE_DB_DATA_DEST_SHIFT 0
+/* aggregative command to CM (use enum db_agg_cmd_sel) */
+#define CORE_DB_DATA_AGG_CMD_MASK 0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT 2
+#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
+#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
+#define CORE_DB_DATA_RESERVED_MASK 0x1
+#define CORE_DB_DATA_RESERVED_SHIFT 5
+/* aggregative value selection */
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+/* bit for every DQ counter flags in CM context that DQ can increment */
+ u8 agg_flags;
+ __le16 spq_prod;
+};
+
+/* Enum of doorbell aggregative command selection */
+enum db_agg_cmd_sel {
+ DB_AGG_CMD_NOP /* No operation */,
+ DB_AGG_CMD_SET /* Set the value */,
+ DB_AGG_CMD_ADD /* Add the value */,
+ DB_AGG_CMD_MAX /* Set max of current and new value */,
+ MAX_DB_AGG_CMD_SEL
+};
+
+/* Enum of doorbell destination */
+enum db_dest {
+ DB_DEST_XCM /* TX doorbell to XCM */,
+ DB_DEST_UCM /* RX doorbell to UCM */,
+ DB_DEST_TCM /* RX doorbell to TCM */,
+ DB_NUM_DESTINATIONS,
+ MAX_DB_DEST
+};
+
+
+/*
+ * Enum of doorbell DPM types
+ */
+enum db_dpm_type {
+ DPM_LEGACY /* Legacy DPM- to Xstorm RAM */,
+ DPM_RDMA /* RDMA DPM (only RoCE in E4) - to NIG */,
+/* L2 DPM inline- to PBF, with packet data on doorbell */
+ DPM_L2_INLINE,
+ DPM_L2_BD /* L2 DPM with BD- to PBF, with TX BD data on doorbell */,
+ MAX_DB_DPM_TYPE
+};
+
+/*
+ * Structure for doorbell data, in L2 DPM mode, for the first doorbell in a DPM
+ * burst
+ */
+struct db_l2_dpm_data {
+ __le16 icid /* internal CID */;
+ __le16 bd_prod /* bd producer value to update */;
+ __le32 params;
+/* Size in QWORD-s of the DPM burst */
+#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
+#define DB_L2_DPM_DATA_SIZE_SHIFT 0
+/* Type of DPM transaction (DPM_L2_INLINE or DPM_L2_BD) (use enum db_dpm_type)
+ */
+#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
+#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
+#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF /* number of BD-s */
+#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
+/* size of the packet to be transmitted in bytes */
+#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
+#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
+#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
+/* In DPM_L2_BD mode: the number of SGE-s */
+#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
+#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
+/* Flag indicating whether to enable GFS search */
+#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
+};
+
+/*
+ * Structure for SGE in a DPM doorbell of type DPM_L2_BD
+ */
+struct db_l2_dpm_sge {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ __le16 bitfields;
+/* The TPH STAG index value */
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
+#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
+/* Indicate if ST hint is requested or not */
+#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
+#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
+ __le32 reserved2;
+};
+
+/* Structure for doorbell address, in legacy mode */
+struct db_legacy_addr {
+ __le32 addr;
+#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+/* doorbell extraction mode specifier- 0 if not used */
+#define DB_LEGACY_ADDR_DEMS_MASK 0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT 2
+#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF /* internal CID */
+#define DB_LEGACY_ADDR_ICID_SHIFT 5
+};
+
+/*
+ * Structure for doorbell address, in PWM mode
+ */
+struct db_pwm_addr {
+ __le32 addr;
+#define DB_PWM_ADDR_RESERVED0_MASK 0x7
+#define DB_PWM_ADDR_RESERVED0_SHIFT 0
+/* Offset in PWM address space */
+#define DB_PWM_ADDR_OFFSET_MASK 0x7F
+#define DB_PWM_ADDR_OFFSET_SHIFT 3
+#define DB_PWM_ADDR_WID_MASK 0x3 /* Window ID */
+#define DB_PWM_ADDR_WID_SHIFT 10
+#define DB_PWM_ADDR_DPI_MASK 0xFFFF /* Doorbell page ID */
+#define DB_PWM_ADDR_DPI_SHIFT 12
+#define DB_PWM_ADDR_RESERVED1_MASK 0xF
+#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+};
+
+/*
+ * Parameters to RDMA firmware, passed in EDPM doorbell
+ */
+struct db_rdma_dpm_params {
+ __le32 params;
+/* Size in QWORD-s of the DPM burst */
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+/* opcode for RDMA operation */
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
+/* the size of the WQE payload in bytes */
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
+/* RoCE completion flag */
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+/* Connection type is iWARP */
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
+};
+
+/*
+ * Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
+ * DPM burst
+ */
+struct db_rdma_dpm_data {
+ __le16 icid /* internal CID */;
+ __le16 prod_val /* aggregated value to update */;
+/* parameters passed to RDMA firmware */
+ struct db_rdma_dpm_params params;
+};
+
+/* Igu interrupt command */
+enum igu_int_cmd {
+ IGU_INT_ENABLE = 0,
+ IGU_INT_DISABLE = 1,
+ IGU_INT_NOP = 2,
+ IGU_INT_NOP2 = 3,
+ MAX_IGU_INT_CMD
+};
+
+/* IGU producer or consumer update command */
+struct igu_prod_cons_update {
+ __le32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
+/* interrupt enable/disable/nop (use enum igu_int_cmd) */
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
+/* (use enum igu_seg_access) */
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
+/* must always be set cleared (use enum command_type_bit) */
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+/* Igu segments access for default status block only */
+enum igu_seg_access {
+ IGU_SEG_ACCESS_REG = 0,
+ IGU_SEG_ACCESS_ATTN = 1,
+ MAX_IGU_SEG_ACCESS
+};
+
+
+/*
+ * Enumeration for L3 type field of parsing_and_err_flags_union. L3Type:
+ * 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled according
+ * to the last-ethertype)
+ */
+enum l3_type {
+ e_l3_type_unknown,
+ e_l3_type_ipv4,
+ e_l3_type_ipv6,
+ MAX_L3_TYPE
+};
+
+
+/*
+ * Enumeration for l4Protocol field of parsing_and_err_flags_union. L4-protocol
+ * 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and its not the
+ * first fragment, the protocol-type should be set to none.
+ */
+enum l4_protocol {
+ e_l4_protocol_none,
+ e_l4_protocol_tcp,
+ e_l4_protocol_udp,
+ MAX_L4_PROTOCOL
+};
+
+
+/*
+ * Parsing and error flags field.
+ */
+struct parsing_and_err_flags {
+ __le16 flags;
+/* L3Type: 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled
+ * according to the last-ethertype) (use enum l3_type)
+ */
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
+/* L4-protocol 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and
+ * its not the first fragment, the protocol-type should be set to none.
+ * (use enum l4_protocol)
+ */
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
+/* Set if the packet is IPv4 fragment. */
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
+/* Set if VLAN tag exists. Invalid if tunnel type are IP GRE or IP GENEVE. */
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
+/* Set if L4 checksum was calculated. */
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
+/* Set for PTP packet. */
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
+/* Set if PTP timestamp recorded. */
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
+/* Set if either version-mismatch or hdr-len-error or ipv4-cksm is set or ipv6
+ * ver mismatch
+ */
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
+/* Set if L4 checksum validation failed. Valid only if L4 checksum was
+ * calculated.
+ */
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
+/* Set if GRE/VXLAN/GENEVE tunnel detected. */
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
+/* Set if VLAN tag exists in tunnel header. */
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
+/* Set if either tunnel-ipv4-version-mismatch or tunnel-ipv4-hdr-len-error or
+ * tunnel-ipv4-cksm is set or tunneling ipv6 ver mismatch
+ */
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
+/* Set if GRE or VXLAN/GENEVE UDP checksum was calculated. */
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+/* Set if tunnel L4 checksum validation failed. Valid only if tunnel L4 checksum
+ * was calculated.
+ */
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
+};
+
+
+/*
+ * Parsing error flags bitmap.
+ */
+struct parsing_err_flags {
+ __le16 flags;
+/* MAC error indication */
+#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0
+/* truncation error indication */
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1
+/* packet too small indication */
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2
+/* Header Missing Tag */
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5
+/* set this error if: 1. total-len is smaller than hdr-len 2. total-ip-len
+ * indicates number that is bigger than real packet length 3. tunneling:
+ * total-ip-length of the outer header points to offset that is smaller than
+ * the one pointed to by the total-ip-len of the inner hdr.
+ */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7
+/* from frame cracker output. for either TCP or UDP */
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9
+/* cksm calculated and value isn't 0xffff or L4-cksm-wasnt-calculated for any
+ * reason, like: udp/ipv4 checksum is 0 etc.
+ */
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12
+/* set if geneve option size was over 32 byte */
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
+};
+
+
+/*
+ * Pb context
+ */
+struct pb_context {
+ __le32 crc[4];
+};
+
+/* Concrete Function ID. */
+struct pxp_concrete_fid {
+ __le16 fid;
+#define PXP_CONCRETE_FID_PFID_MASK 0xF /* Parent PFID */
+#define PXP_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_CONCRETE_FID_PORT_MASK 0x3 /* port number */
+#define PXP_CONCRETE_FID_PORT_SHIFT 4
+#define PXP_CONCRETE_FID_PATH_MASK 0x1 /* path number */
+#define PXP_CONCRETE_FID_PATH_SHIFT 6
+#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT 8
+};
+
+struct pxp_pretend_concrete_fid {
+ __le16 fid;
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
+};
+
+union pxp_pretend_fid {
+ struct pxp_pretend_concrete_fid concrete_fid;
+ __le16 opaque_fid;
+};
+
+/* Pxp Pretend Command Register. */
+struct pxp_pretend_cmd {
+ union pxp_pretend_fid fid;
+ __le16 control;
+#define PXP_PRETEND_CMD_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT 0
+#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
+#define PXP_PRETEND_CMD_PORT_MASK 0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT 2
+#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
+#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
+};
+
+/* PTT Record in PXP Admin Window. */
+struct pxp_ptt_entry {
+ __le32 offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
+#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+ struct pxp_pretend_cmd pretend;
+};
+
+
+/*
+ * VF Zone A Permission Register.
+ */
+struct pxp_vf_zone_a_permission {
+ __le32 control;
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+};
+
+
+/*
+ * Rdif context
+ */
+struct rdif_task_context {
+ __le32 initial_ref_tag;
+ __le16 app_tag_value;
+ __le16 app_tag_mask;
+ u8 flags0;
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
+/* 0 = IP checksum, 1 = CRC */
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
+/* 1/2/3 - Protection Type */
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
+/* 0=0x0000, 1=0xffff */
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+/* Keep reference tag constant */
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7
+ u8 partial_dif_data[7];
+ __le16 partial_crc_value;
+ __le16 partial_checksum_value;
+ __le32 offset_in_io;
+ __le16 flags1;
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
+/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
+/* 0=None, 1=DIF, 2=DIX */
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
+/* DIF tag right at the beginning of DIF interval */
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
+/* 0=None, 1=DIF */
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
+/* Forward application tag with mask */
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
+/* Forward reference tag with mask */
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
+ __le16 state;
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9
+/* mask for refernce tag handling */
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
+ __le32 reserved2;
+};
+
+/*
+ * RSS hash type
+ */
+enum rss_hash_type {
+ RSS_HASH_TYPE_DEFAULT = 0,
+ RSS_HASH_TYPE_IPV4 = 1,
+ RSS_HASH_TYPE_TCP_IPV4 = 2,
+ RSS_HASH_TYPE_IPV6 = 3,
+ RSS_HASH_TYPE_TCP_IPV6 = 4,
+ RSS_HASH_TYPE_UDP_IPV4 = 5,
+ RSS_HASH_TYPE_UDP_IPV6 = 6,
+ MAX_RSS_HASH_TYPE
+};
+
+/*
+ * status block structure
+ */
+struct status_block_e4 {
+ __le16 pi_array[PIS_PER_SB_E4];
+ __le32 sb_num;
+#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
+ __le32 prod_index;
+#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
+};
+
+
+/*
+ * status block structure
+ */
+struct status_block_e5 {
+ __le16 pi_array[PIS_PER_SB_E5];
+ __le32 sb_num;
+#define STATUS_BLOCK_E5_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_E5_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_E5_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_E5_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_E5_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_E5_ZERO_PAD2_SHIFT 16
+ __le32 prod_index;
+#define STATUS_BLOCK_E5_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_E5_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_E5_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_E5_ZERO_PAD3_SHIFT 24
+};
+
+
+/*
+ * Tdif context
+ */
+struct tdif_task_context {
+ __le32 initial_ref_tag;
+ __le16 app_tag_value;
+ __le16 app_tag_mask;
+ __le16 partial_crc_value_b;
+ __le16 partial_checksum_value_b;
+ __le16 stateB;
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
+ u8 reserved1;
+ u8 flags0;
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
+/* 0 = IP checksum, 1 = CRC */
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
+/* 1/2/3 - Protection Type */
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
+/* 0=0x0000, 1=0xffff */
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
+ __le32 flags1;
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
+/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
+/* 0=None, 1=DIF, 2=DIX */
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
+/* DIF tag right at the beginning of DIF interval */
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 /* reserved */
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
+/* 0=None, 1=DIF */
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23
+/* mask for refernce tag handling */
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24
+/* Forward application tag with mask */
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28
+/* Forward reference tag with mask */
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29
+/* Keep reference tag constant */
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
+ __le32 offset_in_io_b;
+ __le16 partial_crc_value_a;
+ __le16 partial_checksum_value_a;
+ __le32 offset_in_io_a;
+ u8 partial_dif_data_a[8];
+ u8 partial_dif_data_b[8];
+};
+
+
+/*
+ * Timers context
+ */
+struct timers_context {
+ __le32 logical_client_0;
+/* Expiration time of logical client 0 */
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED0_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED0_SHIFT 27
+/* Valid bit of logical client 0 */
+#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
+/* Active bit of logical client 0 */
+#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
+ __le32 logical_client_1;
+/* Expiration time of logical client 1 */
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED2_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED2_SHIFT 27
+/* Valid bit of logical client 1 */
+#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
+/* Active bit of logical client 1 */
+#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED3_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED3_SHIFT 30
+ __le32 logical_client_2;
+/* Expiration time of logical client 2 */
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED4_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED4_SHIFT 27
+/* Valid bit of logical client 2 */
+#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
+/* Active bit of logical client 2 */
+#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED5_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED5_SHIFT 30
+ __le32 host_expiration_fields;
+/* Expiration time on host (closest one) */
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED6_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED6_SHIFT 27
+/* Valid bit of host expiration */
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
+#define TIMERS_CONTEXT_RESERVED7_MASK 0x7
+#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
+};
+
+
+/*
+ * Enum for next_protocol field of tunnel_parsing_flags
+ */
+enum tunnel_next_protocol {
+ e_unknown = 0,
+ e_l2 = 1,
+ e_ipv4 = 2,
+ e_ipv6 = 3,
+ MAX_TUNNEL_NEXT_PROTOCOL
+};
+
+#endif /* __COMMON_HSI__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore.h b/src/spdk/dpdk/drivers/net/qede/base/ecore.h
new file mode 100644
index 00000000..5d79fdf0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore.h
@@ -0,0 +1,970 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_H
+#define __ECORE_H
+
+/* @DPDK */
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#define CONFIG_ECORE_BINARY_FW
+#undef CONFIG_ECORE_ZIPPED_FW
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+#include <zlib.h>
+#endif
+
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_debug_tools.h"
+#include "ecore_hsi_init_func.h"
+#include "ecore_hsi_init_tool.h"
+#include "ecore_proto_if.h"
+#include "mcp_public.h"
+
+#define ECORE_MAJOR_VERSION 8
+#define ECORE_MINOR_VERSION 30
+#define ECORE_REVISION_VERSION 8
+#define ECORE_ENGINEERING_VERSION 0
+
+#define ECORE_VERSION \
+ ((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \
+ (ECORE_REVISION_VERSION << 8) | ECORE_ENGINEERING_VERSION)
+
+#define STORM_FW_VERSION \
+ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
+
+#define IS_ECORE_PACING(p_hwfn) \
+ (!!(p_hwfn->b_en_pacing))
+
+#define MAX_HWFNS_PER_DEVICE 2
+#define NAME_SIZE 128 /* @DPDK */
+#define ECORE_WFQ_UNIT 100
+#include "../qede_logs.h" /* @DPDK */
+
+#define ISCSI_BDQ_ID(_port_id) (_port_id)
+#define FCOE_BDQ_ID(_port_id) (_port_id + 2)
+/* Constants */
+#define ECORE_WID_SIZE (1024)
+#define ECORE_MIN_WIDS (4)
+
+/* Configurable */
+#define ECORE_PF_DEMS_SIZE (4)
+
+/* cau states */
+enum ecore_coalescing_mode {
+ ECORE_COAL_MODE_DISABLE,
+ ECORE_COAL_MODE_ENABLE
+};
+
+enum ecore_nvm_cmd {
+ ECORE_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
+ ECORE_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
+ ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM,
+ ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
+ ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE,
+ ECORE_EXT_PHY_FW_UPGRADE = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE,
+ ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE,
+ ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ,
+ ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
+ ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ,
+ ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE,
+ ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00
+};
+
+#ifndef LINUX_REMOVE
+#if !defined(CONFIG_ECORE_L2)
+#define CONFIG_ECORE_L2
+#define CONFIG_ECORE_SRIOV
+#endif
+#endif
+
+/* helpers */
+#ifndef __EXTRACT__LINUX__
+#define MASK_FIELD(_name, _value) \
+ ((_value) &= (_name##_MASK))
+
+#define FIELD_VALUE(_name, _value) \
+ ((_value & _name##_MASK) << _name##_SHIFT)
+
+#define SET_FIELD(value, name, flag) \
+do { \
+ (value) &= ~(name##_MASK << name##_SHIFT); \
+ (value) |= ((((u64)flag) & (u64)name##_MASK) << (name##_SHIFT));\
+} while (0)
+
+#define GET_FIELD(value, name) \
+ (((value) >> (name##_SHIFT)) & name##_MASK)
+
+#define GET_MFW_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _OFFSET))
+
+#define SET_MFW_FIELD(name, field, value) \
+do { \
+ (name) &= ~((field ## _MASK)); \
+ (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK)); \
+} while (0)
+#endif
+
+static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ (cid * ECORE_PF_DEMS_SIZE);
+
+ return db_addr;
+}
+
+static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
+
+ return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
+ ((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \
+ ~((1 << (p_hwfn->p_dev->cache_shift)) - 1))
+
+#ifndef LINUX_REMOVE
+#ifndef U64_HI
+#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
+#endif
+
+#ifndef U64_LO
+#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
+#endif
+#endif
+
+#ifndef __EXTRACT__LINUX__
+enum DP_LEVEL {
+ ECORE_LEVEL_VERBOSE = 0x0,
+ ECORE_LEVEL_INFO = 0x1,
+ ECORE_LEVEL_NOTICE = 0x2,
+ ECORE_LEVEL_ERR = 0x3,
+};
+
+#define ECORE_LOG_LEVEL_SHIFT (30)
+#define ECORE_LOG_VERBOSE_MASK (0x3fffffff)
+#define ECORE_LOG_INFO_MASK (0x40000000)
+#define ECORE_LOG_NOTICE_MASK (0x80000000)
+
+enum DP_MODULE {
+#ifndef LINUX_REMOVE
+ ECORE_MSG_DRV = 0x0001,
+ ECORE_MSG_PROBE = 0x0002,
+ ECORE_MSG_LINK = 0x0004,
+ ECORE_MSG_TIMER = 0x0008,
+ ECORE_MSG_IFDOWN = 0x0010,
+ ECORE_MSG_IFUP = 0x0020,
+ ECORE_MSG_RX_ERR = 0x0040,
+ ECORE_MSG_TX_ERR = 0x0080,
+ ECORE_MSG_TX_QUEUED = 0x0100,
+ ECORE_MSG_INTR = 0x0200,
+ ECORE_MSG_TX_DONE = 0x0400,
+ ECORE_MSG_RX_STATUS = 0x0800,
+ ECORE_MSG_PKTDATA = 0x1000,
+ ECORE_MSG_HW = 0x2000,
+ ECORE_MSG_WOL = 0x4000,
+#endif
+ ECORE_MSG_SPQ = 0x10000,
+ ECORE_MSG_STATS = 0x20000,
+ ECORE_MSG_DCB = 0x40000,
+ ECORE_MSG_IOV = 0x80000,
+ ECORE_MSG_SP = 0x100000,
+ ECORE_MSG_STORAGE = 0x200000,
+ ECORE_MSG_OOO = 0x200000,
+ ECORE_MSG_CXT = 0x800000,
+ ECORE_MSG_LL2 = 0x1000000,
+ ECORE_MSG_ILT = 0x2000000,
+ ECORE_MSG_RDMA = 0x4000000,
+ ECORE_MSG_DEBUG = 0x8000000,
+ /* to be added...up to 0x8000000 */
+};
+#endif
+
+#define for_each_hwfn(p_dev, i) for (i = 0; i < p_dev->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+ (val == (cond1) ? true1 : \
+ (val == (cond2) ? true2 : def))
+
+/* forward */
+struct ecore_ptt_pool;
+struct ecore_spq;
+struct ecore_sb_info;
+struct ecore_sb_attn_info;
+struct ecore_cxt_mngr;
+struct ecore_dma_mem;
+struct ecore_sb_sp_info;
+struct ecore_ll2_info;
+struct ecore_l2_info;
+struct ecore_igu_info;
+struct ecore_mcp_info;
+struct ecore_dcbx_info;
+
+struct ecore_rt_data {
+ u32 *init_val;
+ bool *b_valid;
+};
+
+enum ecore_tunn_mode {
+ ECORE_MODE_L2GENEVE_TUNN,
+ ECORE_MODE_IPGENEVE_TUNN,
+ ECORE_MODE_L2GRE_TUNN,
+ ECORE_MODE_IPGRE_TUNN,
+ ECORE_MODE_VXLAN_TUNN,
+};
+
+enum ecore_tunn_clss {
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ ECORE_TUNN_CLSS_INNER_MAC_VNI,
+ ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
+ MAX_ECORE_TUNN_CLSS,
+};
+
+struct ecore_tunn_update_type {
+ bool b_update_mode;
+ bool b_mode_enabled;
+ enum ecore_tunn_clss tun_cls;
+};
+
+struct ecore_tunn_update_udp_port {
+ bool b_update_port;
+ u16 port;
+};
+
+struct ecore_tunnel_info {
+ struct ecore_tunn_update_type vxlan;
+ struct ecore_tunn_update_type l2_geneve;
+ struct ecore_tunn_update_type ip_geneve;
+ struct ecore_tunn_update_type l2_gre;
+ struct ecore_tunn_update_type ip_gre;
+
+ struct ecore_tunn_update_udp_port vxlan_port;
+ struct ecore_tunn_update_udp_port geneve_port;
+
+ bool b_update_rx_cls;
+ bool b_update_tx_cls;
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE/iWARP protocol
+ */
+enum ecore_pci_personality {
+ ECORE_PCI_ETH,
+ ECORE_PCI_FCOE,
+ ECORE_PCI_ISCSI,
+ ECORE_PCI_ETH_ROCE,
+ ECORE_PCI_ETH_IWARP,
+ ECORE_PCI_ETH_RDMA,
+ ECORE_PCI_DEFAULT /* default in shmem */
+};
+
+/* All VFs are symmetric, all counters are PF + all VFs */
+struct ecore_qm_iids {
+ u32 cids;
+ u32 vf_cids;
+ u32 tids;
+};
+
+#define MAX_PF_PER_PORT 8
+
+/* HW / FW resources, output of features supported below, most information
+ * is received from MFW.
+ */
+enum ecore_resources {
+ ECORE_L2_QUEUE,
+ ECORE_VPORT,
+ ECORE_RSS_ENG,
+ ECORE_PQ,
+ ECORE_RL,
+ ECORE_MAC,
+ ECORE_VLAN,
+ ECORE_RDMA_CNQ_RAM,
+ ECORE_ILT,
+ ECORE_LL2_QUEUE,
+ ECORE_CMDQS_CQS,
+ ECORE_RDMA_STATS_QUEUE,
+ ECORE_BDQ,
+
+ /* This is needed only internally for matching against the IGU.
+ * In case of legacy MFW, would be set to `0'.
+ */
+ ECORE_SB,
+
+ ECORE_MAX_RESC,
+};
+
+/* Features that require resources, given as input to the resource management
+ * algorithm, the output are the resources above
+ */
+enum ecore_feature {
+ ECORE_PF_L2_QUE,
+ ECORE_PF_TC,
+ ECORE_VF,
+ ECORE_EXTRA_VF_QUE,
+ ECORE_VMQ,
+ ECORE_RDMA_CNQ,
+ ECORE_ISCSI_CQ,
+ ECORE_FCOE_CQ,
+ ECORE_VF_L2_QUE,
+ ECORE_MAX_FEATURES,
+};
+
+enum ecore_port_mode {
+ ECORE_PORT_MODE_DE_2X40G,
+ ECORE_PORT_MODE_DE_2X50G,
+ ECORE_PORT_MODE_DE_1X100G,
+ ECORE_PORT_MODE_DE_4X10G_F,
+ ECORE_PORT_MODE_DE_4X10G_E,
+ ECORE_PORT_MODE_DE_4X20G,
+ ECORE_PORT_MODE_DE_1X40G,
+ ECORE_PORT_MODE_DE_2X25G,
+ ECORE_PORT_MODE_DE_1X25G,
+ ECORE_PORT_MODE_DE_4X25G,
+ ECORE_PORT_MODE_DE_2X10G,
+};
+
+enum ecore_dev_cap {
+ ECORE_DEV_CAP_ETH,
+ ECORE_DEV_CAP_FCOE,
+ ECORE_DEV_CAP_ISCSI,
+ ECORE_DEV_CAP_ROCE,
+ ECORE_DEV_CAP_IWARP
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_hw_err_type {
+ ECORE_HW_ERR_FAN_FAIL,
+ ECORE_HW_ERR_MFW_RESP_FAIL,
+ ECORE_HW_ERR_HW_ATTN,
+ ECORE_HW_ERR_DMAE_FAIL,
+ ECORE_HW_ERR_RAMROD_FAIL,
+ ECORE_HW_ERR_FW_ASSERT,
+};
+#endif
+
+enum ecore_db_rec_exec {
+ DB_REC_DRY_RUN,
+ DB_REC_REAL_DEAL,
+ DB_REC_ONCE,
+};
+
+struct ecore_hw_info {
+ /* PCI personality */
+ enum ecore_pci_personality personality;
+#define ECORE_IS_RDMA_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_ROCE_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_IWARP_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_L2_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH || \
+ ECORE_IS_RDMA_PERSONALITY(dev))
+#define ECORE_IS_FCOE_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_FCOE)
+#define ECORE_IS_ISCSI_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ISCSI)
+
+ /* Resource Allocation scheme results */
+ u32 resc_start[ECORE_MAX_RESC];
+ u32 resc_num[ECORE_MAX_RESC];
+ u32 feat_num[ECORE_MAX_FEATURES];
+
+ #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+ #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+ #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
+ RESC_NUM(_p_hwfn, resc))
+ #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+ /* Amount of traffic classes HW supports */
+ u8 num_hw_tc;
+
+/* Amount of TCs which should be active according to DCBx or upper layer driver
+ * configuration
+ */
+
+ u8 num_active_tc;
+
+ /* The traffic class used by PF for it's offloaded protocol */
+ u8 offload_tc;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 ovlan;
+ u32 part_num[4];
+
+ unsigned char hw_mac_addr[ETH_ALEN];
+ u64 node_wwn; /* For FCoE only */
+ u64 port_wwn; /* For FCoE only */
+
+ u16 num_iscsi_conns;
+ u16 num_fcoe_conns;
+
+ struct ecore_igu_info *p_igu_info;
+ /* Sriov */
+ u8 max_chains_per_vf;
+
+ u32 port_mode;
+ u32 hw_mode;
+ unsigned long device_capabilities;
+
+ /* Default DCBX mode */
+ u8 dcbx_mode;
+
+ u16 mtu;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE 0x2000
+
+struct ecore_dmae_info {
+ /* Spinlock for synchronizing access to functions */
+ osal_spinlock_t lock;
+
+ bool b_mem_ready;
+
+ u8 channel;
+
+ dma_addr_t completion_word_phys_addr;
+
+ /* The memory location where the DMAE writes the completion
+ * value when an operation is finished on this context.
+ */
+ u32 *p_completion_word;
+
+ dma_addr_t intermediate_buffer_phys_addr;
+
+ /* An intermediate buffer for DMAE operations that use virtual
+ * addresses - data is DMA'd to/from this buffer and then
+ * memcpy'd to/from the virtual address
+ */
+ u32 *p_intermediate_buffer;
+
+ dma_addr_t dmae_cmd_phys_addr;
+ struct dmae_cmd *p_dmae_cmd;
+};
+
+struct ecore_wfq_data {
+ u32 default_min_speed; /* When wfq feature is not configured */
+ u32 min_speed; /* when feature is configured for any 1 vport */
+ bool configured;
+};
+
+struct ecore_qm_info {
+ struct init_qm_pq_params *qm_pq_params;
+ struct init_qm_vport_params *qm_vport_params;
+ struct init_qm_port_params *qm_port_params;
+ u16 start_pq;
+ u8 start_vport;
+ u16 pure_lb_pq;
+ u16 offload_pq;
+ u16 pure_ack_pq;
+ u16 ooo_pq;
+ u16 first_vf_pq;
+ u16 first_mcos_pq;
+ u16 first_rl_pq;
+ u16 num_pqs;
+ u16 num_vf_pqs;
+ u8 num_vports;
+ u8 max_phys_tcs_per_port;
+ u8 ooo_tc;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ u8 pf_wfq;
+ u32 pf_rl;
+ struct ecore_wfq_data *wfq_data;
+ u8 num_pf_rls;
+};
+
+struct ecore_db_recovery_info {
+ osal_list_t list;
+ osal_spinlock_t lock;
+ u32 db_recovery_counter;
+};
+
+struct storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct ecore_fw_data {
+#ifdef CONFIG_ECORE_BINARY_FW
+ struct fw_ver_info *fw_ver_info;
+#endif
+ const u8 *modes_tree_buf;
+ union init_op *init_ops;
+ const u32 *arr_data;
+ u32 init_ops_size;
+};
+
+enum ecore_mf_mode_bit {
+ /* Supports PF-classification based on tag */
+ ECORE_MF_OVLAN_CLSS,
+
+ /* Supports PF-classification based on MAC */
+ ECORE_MF_LLH_MAC_CLSS,
+
+ /* Supports PF-classification based on protocol type */
+ ECORE_MF_LLH_PROTO_CLSS,
+
+ /* Requires a default PF to be set */
+ ECORE_MF_NEED_DEF_PF,
+
+ /* Allow LL2 to multicast/broadcast */
+ ECORE_MF_LL2_NON_UNICAST,
+
+ /* Allow Cross-PF [& child VFs] Tx-switching */
+ ECORE_MF_INTER_PF_SWITCH,
+
+ /* TODO - if we ever re-utilize any of this logic, we can rename */
+ ECORE_MF_UFP_SPECIFIC,
+
+ ECORE_MF_DISABLE_ARFS,
+
+ /* Use vlan for steering */
+ ECORE_MF_8021Q_TAGGING,
+
+ /* Use stag for steering */
+ ECORE_MF_8021AD_TAGGING,
+};
+
+enum ecore_ufp_mode {
+ ECORE_UFP_MODE_ETS,
+ ECORE_UFP_MODE_VNIC_BW,
+};
+
+enum ecore_ufp_pri_type {
+ ECORE_UFP_PRI_OS,
+ ECORE_UFP_PRI_VNIC
+};
+
+struct ecore_ufp_info {
+ enum ecore_ufp_pri_type pri_type;
+ enum ecore_ufp_mode mode;
+ u8 tc;
+};
+
+enum BAR_ID {
+ BAR_ID_0, /* used for GRC */
+ BAR_ID_1 /* Used for doorbells */
+};
+
+struct ecore_hwfn {
+ struct ecore_dev *p_dev;
+ u8 my_id; /* ID inside the PF */
+#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
+ u8 rel_pf_id; /* Relative to engine*/
+ u8 abs_pf_id;
+#define ECORE_PATH_ID(_p_hwfn) \
+ (ECORE_IS_BB((_p_hwfn)->p_dev) ? ((_p_hwfn)->abs_pf_id & 1) : 0)
+ u8 port_id;
+ bool b_active;
+
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+ void *dp_ctx;
+
+ bool first_on_engine;
+ bool hw_init_done;
+
+ u8 num_funcs_on_engine;
+ u8 enabled_func_idx;
+
+ /* BAR access */
+ void OSAL_IOMEM *regview;
+ void OSAL_IOMEM *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PTT pool */
+ struct ecore_ptt_pool *p_ptt_pool;
+
+ /* HW info */
+ struct ecore_hw_info hw_info;
+
+ /* rt_array (for init-tool) */
+ struct ecore_rt_data rt_data;
+
+ /* SPQ */
+ struct ecore_spq *p_spq;
+
+ /* EQ */
+ struct ecore_eq *p_eq;
+
+ /* Consolidate Q*/
+ struct ecore_consq *p_consq;
+
+ /* Slow-Path definitions */
+ osal_dpc_t sp_dpc;
+ bool b_sp_dpc_enabled;
+
+ struct ecore_ptt *p_main_ptt;
+ struct ecore_ptt *p_dpc_ptt;
+
+ struct ecore_sb_sp_info *p_sp_sb;
+ struct ecore_sb_attn_info *p_sb_attn;
+
+ /* Protocol related */
+ bool using_ll2;
+ struct ecore_ll2_info *p_ll2_info;
+ struct ecore_ooo_info *p_ooo_info;
+ struct ecore_iscsi_info *p_iscsi_info;
+ struct ecore_fcoe_info *p_fcoe_info;
+ struct ecore_rdma_info *p_rdma_info;
+ struct ecore_pf_params pf_params;
+
+ bool b_rdma_enabled_in_prs;
+ u32 rdma_prs_search_reg;
+
+ struct ecore_cxt_mngr *p_cxt_mngr;
+
+ /* Flag indicating whether interrupts are enabled or not*/
+ bool b_int_enabled;
+ bool b_int_requested;
+
+ /* True if the driver requests for the link */
+ bool b_drv_link_init;
+
+ struct ecore_vf_iov *vf_iov_info;
+ struct ecore_pf_iov *pf_iov_info;
+ struct ecore_mcp_info *mcp_info;
+ struct ecore_dcbx_info *p_dcbx_info;
+ struct ecore_ufp_info ufp_info;
+
+ struct ecore_dmae_info dmae_info;
+
+ /* QM init */
+ struct ecore_qm_info qm_info;
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ /* Buffer for unzipping firmware data */
+ void *unzip_buf;
+#endif
+
+ struct dbg_tools_data dbg_info;
+
+ struct z_stream_s *stream;
+
+ /* PWM region specific data */
+ u32 dpi_size;
+ u32 dpi_count;
+ u32 dpi_start_offset; /* this is used to
+ * calculate th
+ * doorbell address
+ */
+
+ /* If one of the following is set then EDPM shouldn't be used */
+ u8 dcbx_no_edpm;
+ u8 db_bar_no_edpm;
+
+ /* L2-related */
+ struct ecore_l2_info *p_l2_info;
+
+ /* Mechanism for recovering from doorbell drop */
+ struct ecore_db_recovery_info db_recovery_info;
+
+ /* Enable/disable pacing, if request to enable then
+ * IOV and mcos configuration will be skipped.
+ * this actually reflects the value requested in
+ * struct ecore_hw_prepare_params by ecore client.
+ */
+ bool b_en_pacing;
+
+ /* @DPDK */
+ struct ecore_ptt *p_arfs_ptt;
+};
+
+enum ecore_mf_mode {
+ ECORE_MF_DEFAULT,
+ ECORE_MF_OVLAN,
+ ECORE_MF_NPAR,
+ ECORE_MF_UFP,
+};
+
+/* @DPDK */
+struct ecore_dbg_feature {
+ u8 *dump_buf;
+ u32 buf_size;
+ u32 dumped_dwords;
+};
+
+enum qed_dbg_features {
+ DBG_FEATURE_BUS,
+ DBG_FEATURE_GRC,
+ DBG_FEATURE_IDLE_CHK,
+ DBG_FEATURE_MCP_TRACE,
+ DBG_FEATURE_REG_FIFO,
+ DBG_FEATURE_PROTECTION_OVERRIDE,
+ DBG_FEATURE_NUM
+};
+
+enum ecore_dev_type {
+ ECORE_DEV_TYPE_BB,
+ ECORE_DEV_TYPE_AH,
+};
+
+struct ecore_dev {
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+ void *dp_ctx;
+
+ enum ecore_dev_type type;
+/* Translate type/revision combo into the proper conditions */
+#define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB)
+#define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_A0(dev))
+#ifndef ASIC_ONLY
+#define ECORE_IS_BB_B0(dev) ((ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev)) || \
+ (CHIP_REV_IS_TEDIBEAR(dev)))
+#else
+#define ECORE_IS_BB_B0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev))
+#endif
+#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
+#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
+
+ u16 vendor_id;
+ u16 device_id;
+#define ECORE_DEV_ID_MASK 0xff00
+#define ECORE_DEV_ID_MASK_BB 0x1600
+#define ECORE_DEV_ID_MASK_AH 0x8000
+
+ u16 chip_num;
+#define CHIP_NUM_MASK 0xffff
+#define CHIP_NUM_SHIFT 0
+
+ u8 chip_rev;
+#define CHIP_REV_MASK 0xf
+#define CHIP_REV_SHIFT 0
+#ifndef ASIC_ONLY
+#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
+#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
+#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
+#define CHIP_REV_IS_EMUL(_p_dev) \
+ (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_EMUL_B0(_p_dev))
+#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
+#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
+#define CHIP_REV_IS_FPGA(_p_dev) \
+ (CHIP_REV_IS_FPGA_A0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev))
+#define CHIP_REV_IS_SLOW(_p_dev) \
+ (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
+#define CHIP_REV_IS_A0(_p_dev) \
+ (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_FPGA_A0(_p_dev) || \
+ (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal))
+#define CHIP_REV_IS_B0(_p_dev) \
+ (CHIP_REV_IS_EMUL_B0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev) || \
+ ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal))
+#define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev)
+#else
+#define CHIP_REV_IS_A0(_p_dev) \
+ (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal)
+#define CHIP_REV_IS_B0(_p_dev) \
+ ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal)
+#endif
+
+ u8 chip_metal;
+#define CHIP_METAL_MASK 0xff
+#define CHIP_METAL_SHIFT 0
+
+ u8 chip_bond_id;
+#define CHIP_BOND_ID_MASK 0xff
+#define CHIP_BOND_ID_SHIFT 0
+
+ u8 num_engines;
+ u8 num_ports;
+ u8 num_ports_in_engine;
+ u8 num_funcs_in_port;
+
+ u8 path_id;
+
+ unsigned long mf_bits;
+ enum ecore_mf_mode mf_mode;
+#define IS_MF_DEFAULT(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
+#define IS_MF_SI(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
+#define IS_MF_SD(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+
+ int pcie_width;
+ int pcie_speed;
+
+ /* Add MF related configuration */
+ u8 mcp_rev;
+ u8 boot_mode;
+
+ u8 wol;
+
+ u32 int_mode;
+ enum ecore_coalescing_mode int_coalescing_mode;
+ u16 rx_coalesce_usecs;
+ u16 tx_coalesce_usecs;
+
+ /* Start Bar offset of first hwfn */
+ void OSAL_IOMEM *regview;
+ void OSAL_IOMEM *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PCI */
+ u8 cache_shift;
+
+ /* Init */
+ const struct iro *iro_arr;
+ #define IRO (p_hwfn->p_dev->iro_arr)
+
+ /* HW functions */
+ u8 num_hwfns;
+ struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+#define ECORE_IS_CMT(dev) ((dev)->num_hwfns > 1)
+
+ /* SRIOV */
+ struct ecore_hw_sriov_info *p_iov_info;
+#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
+ struct ecore_tunnel_info tunnel;
+ bool b_is_vf;
+ bool b_dont_override_vf_msix;
+
+ u32 drv_type;
+
+ u32 rdma_max_sge;
+ u32 rdma_max_inline;
+ u32 rdma_max_srq_sge;
+
+ struct ecore_eth_stats *reset_stats;
+ struct ecore_fw_data *fw_data;
+
+ u32 mcp_nvm_resp;
+
+ /* Recovery */
+ bool recov_in_prog;
+
+/* Indicates whether should prevent attentions from being reasserted */
+
+ bool attn_clr_en;
+
+ /* Indicates whether allowing the MFW to collect a crash dump */
+ bool allow_mdump;
+
+ /* Indicates if the reg_fifo is checked after any register access */
+ bool chk_reg_fifo;
+
+#ifndef ASIC_ONLY
+ bool b_is_emul_full;
+#endif
+
+#ifdef CONFIG_ECORE_BINARY_FW /* @DPDK */
+ void *firmware;
+ u64 fw_len;
+#endif
+
+ /* @DPDK */
+ struct ecore_dbg_feature dbg_features[DBG_FEATURE_NUM];
+ u8 engine_for_debug;
+};
+
+#define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \
+ : MAX_NUM_VFS_K2)
+#define NUM_OF_L2_QUEUES(dev) (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
+ : MAX_NUM_L2_QUEUES_K2)
+#define NUM_OF_PORTS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \
+ : MAX_NUM_PORTS_K2)
+#define NUM_OF_SBS(dev) (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
+ : MAX_SB_PER_PATH_K2)
+#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
+ : MAX_NUM_PFS_K2)
+
+#define CRC8_TABLE_SIZE 256
+
+/**
+ * @brief ecore_concrete_to_sw_fid - get the sw function id from
+ * the concrete value.
+ *
+ * @param concrete_fid
+ *
+ * @return OSAL_INLINE u8
+ */
+static OSAL_INLINE u8 ecore_concrete_to_sw_fid(u32 concrete_fid)
+{
+ u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
+ u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+ u8 vf_valid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID);
+ u8 sw_fid;
+
+ if (vf_valid)
+ sw_fid = vfid + MAX_NUM_PFS;
+ else
+ sw_fid = pfid;
+
+ return sw_fid;
+}
+
+#define PKT_LB_TC 9
+#define MAX_NUM_VOQS_E4 20
+
+int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
+void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+ struct ecore_ptt *p_ptt,
+ u32 min_pf_rate);
+
+int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw);
+int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
+void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+int ecore_device_num_engines(struct ecore_dev *p_dev);
+int ecore_device_num_ports(struct ecore_dev *p_dev);
+void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
+ u8 *mac);
+
+/* Flags for indication of required queues */
+#define PQ_FLAGS_RLS (1 << 0)
+#define PQ_FLAGS_MCOS (1 << 1)
+#define PQ_FLAGS_LB (1 << 2)
+#define PQ_FLAGS_OOO (1 << 3)
+#define PQ_FLAGS_ACK (1 << 4)
+#define PQ_FLAGS_OFLD (1 << 5)
+#define PQ_FLAGS_VFS (1 << 6)
+#define PQ_FLAGS_LLT (1 << 7)
+
+/* physical queue index for cm context intialization */
+u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags);
+u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
+u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+
+/* qm vport for rate limit configuration */
+u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+
+const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
+
+/* doorbell recovery mechanism */
+void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn);
+void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
+ enum ecore_db_rec_exec);
+
+/* amount of resources used in qm init */
+u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
+
+#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
+
+#endif /* __ECORE_H */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_attn_values.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_attn_values.h
new file mode 100644
index 00000000..ec773fbd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_attn_values.h
@@ -0,0 +1,13285 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ATTN_VALUES_H__
+#define __ATTN_VALUES_H__
+
+#ifndef __PREVENT_INT_ATTN__
+
+/* HW Attention register */
+struct attn_hw_reg {
+ u16 reg_idx; /* Index of this register in its block */
+ u16 num_of_bits; /* number of valid attention bits */
+ const u16 *bit_attn_idx; /* attention index per valid bit */
+ u32 sts_addr; /* Address of the STS register */
+ u32 sts_clr_addr; /* Address of the STS_CLR register */
+ u32 sts_wr_addr; /* Address of the STS_WR register */
+ u32 mask_addr; /* Address of the MASK register */
+};
+
+/* HW block attention registers */
+struct attn_hw_regs {
+ u16 num_of_int_regs; /* Number of interrupt regs */
+ u16 num_of_prty_regs; /* Number of parity regs */
+ struct attn_hw_reg **int_regs; /* interrupt regs */
+ struct attn_hw_reg **prty_regs; /* parity regs */
+};
+
+/* HW block attention registers */
+struct attn_hw_block {
+ const char *name; /* Block name */
+ const char **int_desc; /* Array of interrupt attention descriptions */
+ const char **prty_desc; /* Array of parity attention descriptions */
+ struct attn_hw_regs chip_regs[3]; /* attention regs per chip.*/
+};
+
+#ifdef ATTN_DESC
+static const char *grc_int_attn_desc[5] = {
+ "grc_address_error",
+ "grc_timeout_event",
+ "grc_global_reserved_address",
+ "grc_path_isolation_error",
+ "grc_trace_fifo_valid_data",
+};
+#else
+#define grc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 grc_int0_bb_a0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg grc_int0_bb_a0 = {
+ 0, 4, grc_int0_bb_a0_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184
+};
+
+static struct attn_hw_reg *grc_int_bb_a0_regs[1] = {
+ &grc_int0_bb_a0,
+};
+
+static const u16 grc_int0_bb_b0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg grc_int0_bb_b0 = {
+ 0, 4, grc_int0_bb_b0_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184
+};
+
+static struct attn_hw_reg *grc_int_bb_b0_regs[1] = {
+ &grc_int0_bb_b0,
+};
+
+static const u16 grc_int0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg grc_int0_k2 = {
+ 0, 5, grc_int0_k2_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184
+};
+
+static struct attn_hw_reg *grc_int_k2_regs[1] = {
+ &grc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *grc_prty_attn_desc[3] = {
+ "grc_mem003_i_mem_prty",
+ "grc_mem002_i_mem_prty",
+ "grc_mem001_i_mem_prty",
+};
+#else
+#define grc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 grc_prty1_bb_a0_attn_idx[2] = {
+ 1, 2,
+};
+
+static struct attn_hw_reg grc_prty1_bb_a0 = {
+ 0, 2, grc_prty1_bb_a0_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204
+};
+
+static struct attn_hw_reg *grc_prty_bb_a0_regs[1] = {
+ &grc_prty1_bb_a0,
+};
+
+static const u16 grc_prty1_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg grc_prty1_bb_b0 = {
+ 0, 2, grc_prty1_bb_b0_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204
+};
+
+static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = {
+ &grc_prty1_bb_b0,
+};
+
+static const u16 grc_prty1_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg grc_prty1_k2 = {
+ 0, 2, grc_prty1_k2_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204
+};
+
+static struct attn_hw_reg *grc_prty_k2_regs[1] = {
+ &grc_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *miscs_int_attn_desc[14] = {
+ "miscs_address_error",
+ "miscs_generic_sw",
+ "miscs_cnig_interrupt",
+ "miscs_opte_dorq_fifo_err_eng1",
+ "miscs_opte_dorq_fifo_err_eng0",
+ "miscs_opte_dbg_fifo_err_eng1",
+ "miscs_opte_dbg_fifo_err_eng0",
+ "miscs_opte_btb_if1_fifo_err_eng1",
+ "miscs_opte_btb_if1_fifo_err_eng0",
+ "miscs_opte_btb_if0_fifo_err_eng1",
+ "miscs_opte_btb_if0_fifo_err_eng0",
+ "miscs_opte_btb_sop_fifo_err_eng1",
+ "miscs_opte_btb_sop_fifo_err_eng0",
+ "miscs_opte_storm_fifo_err_eng0",
+};
+#else
+#define miscs_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 miscs_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg miscs_int0_bb_a0 = {
+ 0, 2, miscs_int0_bb_a0_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184
+};
+
+static const u16 miscs_int1_bb_a0_attn_idx[11] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg miscs_int1_bb_a0 = {
+ 1, 11, miscs_int1_bb_a0_attn_idx, 0x9190, 0x919c, 0x9198, 0x9194
+};
+
+static struct attn_hw_reg *miscs_int_bb_a0_regs[2] = {
+ &miscs_int0_bb_a0, &miscs_int1_bb_a0,
+};
+
+static const u16 miscs_int0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg miscs_int0_bb_b0 = {
+ 0, 3, miscs_int0_bb_b0_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184
+};
+
+static const u16 miscs_int1_bb_b0_attn_idx[11] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg miscs_int1_bb_b0 = {
+ 1, 11, miscs_int1_bb_b0_attn_idx, 0x9190, 0x919c, 0x9198, 0x9194
+};
+
+static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = {
+ &miscs_int0_bb_b0, &miscs_int1_bb_b0,
+};
+
+static const u16 miscs_int0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg miscs_int0_k2 = {
+ 0, 3, miscs_int0_k2_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184
+};
+
+static struct attn_hw_reg *miscs_int_k2_regs[1] = {
+ &miscs_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *miscs_prty_attn_desc[1] = {
+ "miscs_cnig_parity",
+};
+#else
+#define miscs_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 miscs_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg miscs_prty0_bb_b0 = {
+ 0, 1, miscs_prty0_bb_b0_attn_idx, 0x91a0, 0x91ac, 0x91a8, 0x91a4
+};
+
+static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = {
+ &miscs_prty0_bb_b0,
+};
+
+static const u16 miscs_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg miscs_prty0_k2 = {
+ 0, 1, miscs_prty0_k2_attn_idx, 0x91a0, 0x91ac, 0x91a8, 0x91a4
+};
+
+static struct attn_hw_reg *miscs_prty_k2_regs[1] = {
+ &miscs_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *misc_int_attn_desc[1] = {
+ "misc_address_error",
+};
+#else
+#define misc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 misc_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg misc_int0_bb_a0 = {
+ 0, 1, misc_int0_bb_a0_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184
+};
+
+static struct attn_hw_reg *misc_int_bb_a0_regs[1] = {
+ &misc_int0_bb_a0,
+};
+
+static const u16 misc_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg misc_int0_bb_b0 = {
+ 0, 1, misc_int0_bb_b0_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184
+};
+
+static struct attn_hw_reg *misc_int_bb_b0_regs[1] = {
+ &misc_int0_bb_b0,
+};
+
+static const u16 misc_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg misc_int0_k2 = {
+ 0, 1, misc_int0_k2_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184
+};
+
+static struct attn_hw_reg *misc_int_k2_regs[1] = {
+ &misc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pglue_b_int_attn_desc[24] = {
+ "pglue_b_address_error",
+ "pglue_b_incorrect_rcv_behavior",
+ "pglue_b_was_error_attn",
+ "pglue_b_vf_length_violation_attn",
+ "pglue_b_vf_grc_space_violation_attn",
+ "pglue_b_tcpl_error_attn",
+ "pglue_b_tcpl_in_two_rcbs_attn",
+ "pglue_b_cssnoop_fifo_overflow",
+ "pglue_b_tcpl_translation_size_different",
+ "pglue_b_pcie_rx_l0s_timeout",
+ "pglue_b_master_zlr_attn",
+ "pglue_b_admin_window_violation_attn",
+ "pglue_b_out_of_range_function_in_pretend",
+ "pglue_b_illegal_address",
+ "pglue_b_pgl_cpl_err",
+ "pglue_b_pgl_txw_of",
+ "pglue_b_pgl_cpl_aft",
+ "pglue_b_pgl_cpl_of",
+ "pglue_b_pgl_cpl_ecrc",
+ "pglue_b_pgl_pcie_attn",
+ "pglue_b_pgl_read_blocked",
+ "pglue_b_pgl_write_blocked",
+ "pglue_b_vf_ilt_err",
+ "pglue_b_rxobffexception_attn",
+};
+#else
+#define pglue_b_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pglue_b_int0_bb_a0_attn_idx[23] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22,
+};
+
+static struct attn_hw_reg pglue_b_int0_bb_a0 = {
+ 0, 23, pglue_b_int0_bb_a0_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188,
+ 0x2a8184
+};
+
+static struct attn_hw_reg *pglue_b_int_bb_a0_regs[1] = {
+ &pglue_b_int0_bb_a0,
+};
+
+static const u16 pglue_b_int0_bb_b0_attn_idx[23] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22,
+};
+
+static struct attn_hw_reg pglue_b_int0_bb_b0 = {
+ 0, 23, pglue_b_int0_bb_b0_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188,
+ 0x2a8184
+};
+
+static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = {
+ &pglue_b_int0_bb_b0,
+};
+
+static const u16 pglue_b_int0_k2_attn_idx[24] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23,
+};
+
+static struct attn_hw_reg pglue_b_int0_k2 = {
+ 0, 24, pglue_b_int0_k2_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184
+};
+
+static struct attn_hw_reg *pglue_b_int_k2_regs[1] = {
+ &pglue_b_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pglue_b_prty_attn_desc[35] = {
+ "pglue_b_datapath_registers",
+ "pglue_b_mem027_i_mem_prty",
+ "pglue_b_mem007_i_mem_prty",
+ "pglue_b_mem009_i_mem_prty",
+ "pglue_b_mem010_i_mem_prty",
+ "pglue_b_mem008_i_mem_prty",
+ "pglue_b_mem022_i_mem_prty",
+ "pglue_b_mem023_i_mem_prty",
+ "pglue_b_mem024_i_mem_prty",
+ "pglue_b_mem025_i_mem_prty",
+ "pglue_b_mem004_i_mem_prty",
+ "pglue_b_mem005_i_mem_prty",
+ "pglue_b_mem011_i_mem_prty",
+ "pglue_b_mem016_i_mem_prty",
+ "pglue_b_mem017_i_mem_prty",
+ "pglue_b_mem012_i_mem_prty",
+ "pglue_b_mem013_i_mem_prty",
+ "pglue_b_mem014_i_mem_prty",
+ "pglue_b_mem015_i_mem_prty",
+ "pglue_b_mem018_i_mem_prty",
+ "pglue_b_mem020_i_mem_prty",
+ "pglue_b_mem021_i_mem_prty",
+ "pglue_b_mem019_i_mem_prty",
+ "pglue_b_mem026_i_mem_prty",
+ "pglue_b_mem006_i_mem_prty",
+ "pglue_b_mem003_i_mem_prty",
+ "pglue_b_mem002_i_mem_prty_0",
+ "pglue_b_mem002_i_mem_prty_1",
+ "pglue_b_mem002_i_mem_prty_2",
+ "pglue_b_mem002_i_mem_prty_3",
+ "pglue_b_mem002_i_mem_prty_4",
+ "pglue_b_mem002_i_mem_prty_5",
+ "pglue_b_mem002_i_mem_prty_6",
+ "pglue_b_mem002_i_mem_prty_7",
+ "pglue_b_mem001_i_mem_prty",
+};
+#else
+#define pglue_b_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pglue_b_prty1_bb_a0_attn_idx[22] = {
+ 2, 3, 4, 5, 10, 11, 12, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pglue_b_prty1_bb_a0 = {
+ 0, 22, pglue_b_prty1_bb_a0_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208,
+ 0x2a8204
+};
+
+static struct attn_hw_reg *pglue_b_prty_bb_a0_regs[1] = {
+ &pglue_b_prty1_bb_a0,
+};
+
+static const u16 pglue_b_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pglue_b_prty0_bb_b0 = {
+ 0, 1, pglue_b_prty0_bb_b0_attn_idx, 0x2a8190, 0x2a819c, 0x2a8198,
+ 0x2a8194
+};
+
+static const u16 pglue_b_prty1_bb_b0_attn_idx[22] = {
+ 2, 3, 4, 5, 10, 11, 12, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pglue_b_prty1_bb_b0 = {
+ 1, 22, pglue_b_prty1_bb_b0_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208,
+ 0x2a8204
+};
+
+static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = {
+ &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0,
+};
+
+static const u16 pglue_b_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pglue_b_prty0_k2 = {
+ 0, 1, pglue_b_prty0_k2_attn_idx, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194
+};
+
+static const u16 pglue_b_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pglue_b_prty1_k2 = {
+ 1, 31, pglue_b_prty1_k2_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208,
+ 0x2a8204
+};
+
+static const u16 pglue_b_prty2_k2_attn_idx[3] = {
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pglue_b_prty2_k2 = {
+ 2, 3, pglue_b_prty2_k2_attn_idx, 0x2a8210, 0x2a821c, 0x2a8218, 0x2a8214
+};
+
+static struct attn_hw_reg *pglue_b_prty_k2_regs[3] = {
+ &pglue_b_prty0_k2, &pglue_b_prty1_k2, &pglue_b_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cnig_int_attn_desc[10] = {
+ "cnig_address_error",
+ "cnig_tx_illegal_sop_port0",
+ "cnig_tx_illegal_sop_port1",
+ "cnig_tx_illegal_sop_port2",
+ "cnig_tx_illegal_sop_port3",
+ "cnig_tdm_lane_0_bandwidth_exceed",
+ "cnig_tdm_lane_1_bandwidth_exceed",
+ "cnig_pmeg_intr",
+ "cnig_pmfc_intr",
+ "cnig_fifo_error",
+};
+#else
+#define cnig_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cnig_int0_bb_a0_attn_idx[4] = {
+ 0, 7, 8, 9,
+};
+
+static struct attn_hw_reg cnig_int0_bb_a0 = {
+ 0, 4, cnig_int0_bb_a0_attn_idx, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec
+};
+
+static struct attn_hw_reg *cnig_int_bb_a0_regs[1] = {
+ &cnig_int0_bb_a0,
+};
+
+static const u16 cnig_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 3, 7, 8, 9,
+};
+
+static struct attn_hw_reg cnig_int0_bb_b0 = {
+ 0, 6, cnig_int0_bb_b0_attn_idx, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec
+};
+
+static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = {
+ &cnig_int0_bb_b0,
+};
+
+static const u16 cnig_int0_k2_attn_idx[7] = {
+ 0, 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg cnig_int0_k2 = {
+ 0, 7, cnig_int0_k2_attn_idx, 0x218218, 0x218224, 0x218220, 0x21821c
+};
+
+static struct attn_hw_reg *cnig_int_k2_regs[1] = {
+ &cnig_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cnig_prty_attn_desc[3] = {
+ "cnig_unused_0",
+ "cnig_datapath_tx",
+ "cnig_datapath_rx",
+};
+#else
+#define cnig_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 cnig_prty0_bb_b0_attn_idx[2] = {
+ 1, 2,
+};
+
+static struct attn_hw_reg cnig_prty0_bb_b0 = {
+ 0, 2, cnig_prty0_bb_b0_attn_idx, 0x218348, 0x218354, 0x218350, 0x21834c
+};
+
+static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = {
+ &cnig_prty0_bb_b0,
+};
+
+static const u16 cnig_prty0_k2_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg cnig_prty0_k2 = {
+ 0, 1, cnig_prty0_k2_attn_idx, 0x21822c, 0x218238, 0x218234, 0x218230
+};
+
+static struct attn_hw_reg *cnig_prty_k2_regs[1] = {
+ &cnig_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cpmu_int_attn_desc[1] = {
+ "cpmu_address_error",
+};
+#else
+#define cpmu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cpmu_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg cpmu_int0_bb_a0 = {
+ 0, 1, cpmu_int0_bb_a0_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4
+};
+
+static struct attn_hw_reg *cpmu_int_bb_a0_regs[1] = {
+ &cpmu_int0_bb_a0,
+};
+
+static const u16 cpmu_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg cpmu_int0_bb_b0 = {
+ 0, 1, cpmu_int0_bb_b0_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4
+};
+
+static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = {
+ &cpmu_int0_bb_b0,
+};
+
+static const u16 cpmu_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg cpmu_int0_k2 = {
+ 0, 1, cpmu_int0_k2_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4
+};
+
+static struct attn_hw_reg *cpmu_int_k2_regs[1] = {
+ &cpmu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ncsi_int_attn_desc[1] = {
+ "ncsi_address_error",
+};
+#else
+#define ncsi_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ncsi_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_int0_bb_a0 = {
+ 0, 1, ncsi_int0_bb_a0_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0
+};
+
+static struct attn_hw_reg *ncsi_int_bb_a0_regs[1] = {
+ &ncsi_int0_bb_a0,
+};
+
+static const u16 ncsi_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_int0_bb_b0 = {
+ 0, 1, ncsi_int0_bb_b0_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0
+};
+
+static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = {
+ &ncsi_int0_bb_b0,
+};
+
+static const u16 ncsi_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_int0_k2 = {
+ 0, 1, ncsi_int0_k2_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0
+};
+
+static struct attn_hw_reg *ncsi_int_k2_regs[1] = {
+ &ncsi_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ncsi_prty_attn_desc[1] = {
+ "ncsi_mem002_i_mem_prty",
+};
+#else
+#define ncsi_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ncsi_prty1_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_prty1_bb_a0 = {
+ 0, 1, ncsi_prty1_bb_a0_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004
+};
+
+static struct attn_hw_reg *ncsi_prty_bb_a0_regs[1] = {
+ &ncsi_prty1_bb_a0,
+};
+
+static const u16 ncsi_prty1_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_prty1_bb_b0 = {
+ 0, 1, ncsi_prty1_bb_b0_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004
+};
+
+static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = {
+ &ncsi_prty1_bb_b0,
+};
+
+static const u16 ncsi_prty1_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_prty1_k2 = {
+ 0, 1, ncsi_prty1_k2_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004
+};
+
+static struct attn_hw_reg *ncsi_prty_k2_regs[1] = {
+ &ncsi_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *opte_prty_attn_desc[12] = {
+ "opte_mem009_i_mem_prty",
+ "opte_mem010_i_mem_prty",
+ "opte_mem005_i_mem_prty",
+ "opte_mem006_i_mem_prty",
+ "opte_mem007_i_mem_prty",
+ "opte_mem008_i_mem_prty",
+ "opte_mem001_i_mem_prty",
+ "opte_mem002_i_mem_prty",
+ "opte_mem003_i_mem_prty",
+ "opte_mem004_i_mem_prty",
+ "opte_mem011_i_mem_prty",
+ "opte_datapath_parity_error",
+};
+#else
+#define opte_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 opte_prty1_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg opte_prty1_bb_a0 = {
+ 0, 11, opte_prty1_bb_a0_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004
+};
+
+static struct attn_hw_reg *opte_prty_bb_a0_regs[1] = {
+ &opte_prty1_bb_a0,
+};
+
+static const u16 opte_prty1_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg opte_prty1_bb_b0 = {
+ 0, 11, opte_prty1_bb_b0_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004
+};
+
+static const u16 opte_prty0_bb_b0_attn_idx[1] = {
+ 11,
+};
+
+static struct attn_hw_reg opte_prty0_bb_b0 = {
+ 1, 1, opte_prty0_bb_b0_attn_idx, 0x53208, 0x53214, 0x53210, 0x5320c
+};
+
+static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = {
+ &opte_prty1_bb_b0, &opte_prty0_bb_b0,
+};
+
+static const u16 opte_prty1_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg opte_prty1_k2 = {
+ 0, 11, opte_prty1_k2_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004
+};
+
+static const u16 opte_prty0_k2_attn_idx[1] = {
+ 11,
+};
+
+static struct attn_hw_reg opte_prty0_k2 = {
+ 1, 1, opte_prty0_k2_attn_idx, 0x53208, 0x53214, 0x53210, 0x5320c
+};
+
+static struct attn_hw_reg *opte_prty_k2_regs[2] = {
+ &opte_prty1_k2, &opte_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *bmb_int_attn_desc[297] = {
+ "bmb_address_error",
+ "bmb_rc_pkt0_rls_error",
+ "bmb_unused_0",
+ "bmb_rc_pkt0_protocol_error",
+ "bmb_rc_pkt1_rls_error",
+ "bmb_unused_1",
+ "bmb_rc_pkt1_protocol_error",
+ "bmb_rc_pkt2_rls_error",
+ "bmb_unused_2",
+ "bmb_rc_pkt2_protocol_error",
+ "bmb_rc_pkt3_rls_error",
+ "bmb_unused_3",
+ "bmb_rc_pkt3_protocol_error",
+ "bmb_rc_sop_req_tc_port_error",
+ "bmb_unused_4",
+ "bmb_wc0_protocol_error",
+ "bmb_wc1_protocol_error",
+ "bmb_wc2_protocol_error",
+ "bmb_wc3_protocol_error",
+ "bmb_unused_5",
+ "bmb_ll_blk_error",
+ "bmb_unused_6",
+ "bmb_mac0_fc_cnt_error",
+ "bmb_ll_arb_calc_error",
+ "bmb_wc0_inp_fifo_error",
+ "bmb_wc0_sop_fifo_error",
+ "bmb_wc0_len_fifo_error",
+ "bmb_wc0_queue_fifo_error",
+ "bmb_wc0_free_point_fifo_error",
+ "bmb_wc0_next_point_fifo_error",
+ "bmb_wc0_strt_fifo_error",
+ "bmb_wc0_second_dscr_fifo_error",
+ "bmb_wc0_pkt_avail_fifo_error",
+ "bmb_wc0_cos_cnt_fifo_error",
+ "bmb_wc0_notify_fifo_error",
+ "bmb_wc0_ll_req_fifo_error",
+ "bmb_wc0_ll_pa_cnt_error",
+ "bmb_wc0_bb_pa_cnt_error",
+ "bmb_wc1_inp_fifo_error",
+ "bmb_wc1_sop_fifo_error",
+ "bmb_wc1_queue_fifo_error",
+ "bmb_wc1_free_point_fifo_error",
+ "bmb_wc1_next_point_fifo_error",
+ "bmb_wc1_strt_fifo_error",
+ "bmb_wc1_second_dscr_fifo_error",
+ "bmb_wc1_pkt_avail_fifo_error",
+ "bmb_wc1_cos_cnt_fifo_error",
+ "bmb_wc1_notify_fifo_error",
+ "bmb_wc1_ll_req_fifo_error",
+ "bmb_wc1_ll_pa_cnt_error",
+ "bmb_wc1_bb_pa_cnt_error",
+ "bmb_wc2_inp_fifo_error",
+ "bmb_wc2_sop_fifo_error",
+ "bmb_wc2_queue_fifo_error",
+ "bmb_wc2_free_point_fifo_error",
+ "bmb_wc2_next_point_fifo_error",
+ "bmb_wc2_strt_fifo_error",
+ "bmb_wc2_second_dscr_fifo_error",
+ "bmb_wc2_pkt_avail_fifo_error",
+ "bmb_wc2_cos_cnt_fifo_error",
+ "bmb_wc2_notify_fifo_error",
+ "bmb_wc2_ll_req_fifo_error",
+ "bmb_wc2_ll_pa_cnt_error",
+ "bmb_wc2_bb_pa_cnt_error",
+ "bmb_wc3_inp_fifo_error",
+ "bmb_wc3_sop_fifo_error",
+ "bmb_wc3_queue_fifo_error",
+ "bmb_wc3_free_point_fifo_error",
+ "bmb_wc3_next_point_fifo_error",
+ "bmb_wc3_strt_fifo_error",
+ "bmb_wc3_second_dscr_fifo_error",
+ "bmb_wc3_pkt_avail_fifo_error",
+ "bmb_wc3_cos_cnt_fifo_error",
+ "bmb_wc3_notify_fifo_error",
+ "bmb_wc3_ll_req_fifo_error",
+ "bmb_wc3_ll_pa_cnt_error",
+ "bmb_wc3_bb_pa_cnt_error",
+ "bmb_rc_pkt0_side_fifo_error",
+ "bmb_rc_pkt0_req_fifo_error",
+ "bmb_rc_pkt0_blk_fifo_error",
+ "bmb_rc_pkt0_rls_left_fifo_error",
+ "bmb_rc_pkt0_strt_ptr_fifo_error",
+ "bmb_rc_pkt0_second_ptr_fifo_error",
+ "bmb_rc_pkt0_rsp_fifo_error",
+ "bmb_rc_pkt0_dscr_fifo_error",
+ "bmb_rc_pkt1_side_fifo_error",
+ "bmb_rc_pkt1_req_fifo_error",
+ "bmb_rc_pkt1_blk_fifo_error",
+ "bmb_rc_pkt1_rls_left_fifo_error",
+ "bmb_rc_pkt1_strt_ptr_fifo_error",
+ "bmb_rc_pkt1_second_ptr_fifo_error",
+ "bmb_rc_pkt1_rsp_fifo_error",
+ "bmb_rc_pkt1_dscr_fifo_error",
+ "bmb_rc_pkt2_side_fifo_error",
+ "bmb_rc_pkt2_req_fifo_error",
+ "bmb_rc_pkt2_blk_fifo_error",
+ "bmb_rc_pkt2_rls_left_fifo_error",
+ "bmb_rc_pkt2_strt_ptr_fifo_error",
+ "bmb_rc_pkt2_second_ptr_fifo_error",
+ "bmb_rc_pkt2_rsp_fifo_error",
+ "bmb_rc_pkt2_dscr_fifo_error",
+ "bmb_rc_pkt3_side_fifo_error",
+ "bmb_rc_pkt3_req_fifo_error",
+ "bmb_rc_pkt3_blk_fifo_error",
+ "bmb_rc_pkt3_rls_left_fifo_error",
+ "bmb_rc_pkt3_strt_ptr_fifo_error",
+ "bmb_rc_pkt3_second_ptr_fifo_error",
+ "bmb_rc_pkt3_rsp_fifo_error",
+ "bmb_rc_pkt3_dscr_fifo_error",
+ "bmb_rc_sop_strt_fifo_error",
+ "bmb_rc_sop_req_fifo_error",
+ "bmb_rc_sop_dscr_fifo_error",
+ "bmb_rc_sop_queue_fifo_error",
+ "bmb_ll_arb_rls_fifo_error",
+ "bmb_ll_arb_prefetch_fifo_error",
+ "bmb_rc_pkt0_rls_fifo_error",
+ "bmb_rc_pkt1_rls_fifo_error",
+ "bmb_rc_pkt2_rls_fifo_error",
+ "bmb_rc_pkt3_rls_fifo_error",
+ "bmb_rc_pkt4_rls_fifo_error",
+ "bmb_rc_pkt5_rls_fifo_error",
+ "bmb_rc_pkt6_rls_fifo_error",
+ "bmb_rc_pkt7_rls_fifo_error",
+ "bmb_rc_pkt8_rls_fifo_error",
+ "bmb_rc_pkt9_rls_fifo_error",
+ "bmb_rc_pkt4_rls_error",
+ "bmb_rc_pkt4_protocol_error",
+ "bmb_rc_pkt4_side_fifo_error",
+ "bmb_rc_pkt4_req_fifo_error",
+ "bmb_rc_pkt4_blk_fifo_error",
+ "bmb_rc_pkt4_rls_left_fifo_error",
+ "bmb_rc_pkt4_strt_ptr_fifo_error",
+ "bmb_rc_pkt4_second_ptr_fifo_error",
+ "bmb_rc_pkt4_rsp_fifo_error",
+ "bmb_rc_pkt4_dscr_fifo_error",
+ "bmb_rc_pkt5_rls_error",
+ "bmb_rc_pkt5_protocol_error",
+ "bmb_rc_pkt5_side_fifo_error",
+ "bmb_rc_pkt5_req_fifo_error",
+ "bmb_rc_pkt5_blk_fifo_error",
+ "bmb_rc_pkt5_rls_left_fifo_error",
+ "bmb_rc_pkt5_strt_ptr_fifo_error",
+ "bmb_rc_pkt5_second_ptr_fifo_error",
+ "bmb_rc_pkt5_rsp_fifo_error",
+ "bmb_rc_pkt5_dscr_fifo_error",
+ "bmb_rc_pkt6_rls_error",
+ "bmb_rc_pkt6_protocol_error",
+ "bmb_rc_pkt6_side_fifo_error",
+ "bmb_rc_pkt6_req_fifo_error",
+ "bmb_rc_pkt6_blk_fifo_error",
+ "bmb_rc_pkt6_rls_left_fifo_error",
+ "bmb_rc_pkt6_strt_ptr_fifo_error",
+ "bmb_rc_pkt6_second_ptr_fifo_error",
+ "bmb_rc_pkt6_rsp_fifo_error",
+ "bmb_rc_pkt6_dscr_fifo_error",
+ "bmb_rc_pkt7_rls_error",
+ "bmb_rc_pkt7_protocol_error",
+ "bmb_rc_pkt7_side_fifo_error",
+ "bmb_rc_pkt7_req_fifo_error",
+ "bmb_rc_pkt7_blk_fifo_error",
+ "bmb_rc_pkt7_rls_left_fifo_error",
+ "bmb_rc_pkt7_strt_ptr_fifo_error",
+ "bmb_rc_pkt7_second_ptr_fifo_error",
+ "bmb_rc_pkt7_rsp_fifo_error",
+ "bmb_packet_available_sync_fifo_push_error",
+ "bmb_rc_pkt8_rls_error",
+ "bmb_rc_pkt8_protocol_error",
+ "bmb_rc_pkt8_side_fifo_error",
+ "bmb_rc_pkt8_req_fifo_error",
+ "bmb_rc_pkt8_blk_fifo_error",
+ "bmb_rc_pkt8_rls_left_fifo_error",
+ "bmb_rc_pkt8_strt_ptr_fifo_error",
+ "bmb_rc_pkt8_second_ptr_fifo_error",
+ "bmb_rc_pkt8_rsp_fifo_error",
+ "bmb_rc_pkt8_dscr_fifo_error",
+ "bmb_rc_pkt9_rls_error",
+ "bmb_rc_pkt9_protocol_error",
+ "bmb_rc_pkt9_side_fifo_error",
+ "bmb_rc_pkt9_req_fifo_error",
+ "bmb_rc_pkt9_blk_fifo_error",
+ "bmb_rc_pkt9_rls_left_fifo_error",
+ "bmb_rc_pkt9_strt_ptr_fifo_error",
+ "bmb_rc_pkt9_second_ptr_fifo_error",
+ "bmb_rc_pkt9_rsp_fifo_error",
+ "bmb_rc_pkt9_dscr_fifo_error",
+ "bmb_wc4_protocol_error",
+ "bmb_wc5_protocol_error",
+ "bmb_wc6_protocol_error",
+ "bmb_wc7_protocol_error",
+ "bmb_wc8_protocol_error",
+ "bmb_wc9_protocol_error",
+ "bmb_wc4_inp_fifo_error",
+ "bmb_wc4_sop_fifo_error",
+ "bmb_wc4_queue_fifo_error",
+ "bmb_wc4_free_point_fifo_error",
+ "bmb_wc4_next_point_fifo_error",
+ "bmb_wc4_strt_fifo_error",
+ "bmb_wc4_second_dscr_fifo_error",
+ "bmb_wc4_pkt_avail_fifo_error",
+ "bmb_wc4_cos_cnt_fifo_error",
+ "bmb_wc4_notify_fifo_error",
+ "bmb_wc4_ll_req_fifo_error",
+ "bmb_wc4_ll_pa_cnt_error",
+ "bmb_wc4_bb_pa_cnt_error",
+ "bmb_wc5_inp_fifo_error",
+ "bmb_wc5_sop_fifo_error",
+ "bmb_wc5_queue_fifo_error",
+ "bmb_wc5_free_point_fifo_error",
+ "bmb_wc5_next_point_fifo_error",
+ "bmb_wc5_strt_fifo_error",
+ "bmb_wc5_second_dscr_fifo_error",
+ "bmb_wc5_pkt_avail_fifo_error",
+ "bmb_wc5_cos_cnt_fifo_error",
+ "bmb_wc5_notify_fifo_error",
+ "bmb_wc5_ll_req_fifo_error",
+ "bmb_wc5_ll_pa_cnt_error",
+ "bmb_wc5_bb_pa_cnt_error",
+ "bmb_wc6_inp_fifo_error",
+ "bmb_wc6_sop_fifo_error",
+ "bmb_wc6_queue_fifo_error",
+ "bmb_wc6_free_point_fifo_error",
+ "bmb_wc6_next_point_fifo_error",
+ "bmb_wc6_strt_fifo_error",
+ "bmb_wc6_second_dscr_fifo_error",
+ "bmb_wc6_pkt_avail_fifo_error",
+ "bmb_wc6_cos_cnt_fifo_error",
+ "bmb_wc6_notify_fifo_error",
+ "bmb_wc6_ll_req_fifo_error",
+ "bmb_wc6_ll_pa_cnt_error",
+ "bmb_wc6_bb_pa_cnt_error",
+ "bmb_wc7_inp_fifo_error",
+ "bmb_wc7_sop_fifo_error",
+ "bmb_wc7_queue_fifo_error",
+ "bmb_wc7_free_point_fifo_error",
+ "bmb_wc7_next_point_fifo_error",
+ "bmb_wc7_strt_fifo_error",
+ "bmb_wc7_second_dscr_fifo_error",
+ "bmb_wc7_pkt_avail_fifo_error",
+ "bmb_wc7_cos_cnt_fifo_error",
+ "bmb_wc7_notify_fifo_error",
+ "bmb_wc7_ll_req_fifo_error",
+ "bmb_wc7_ll_pa_cnt_error",
+ "bmb_wc7_bb_pa_cnt_error",
+ "bmb_wc8_inp_fifo_error",
+ "bmb_wc8_sop_fifo_error",
+ "bmb_wc8_queue_fifo_error",
+ "bmb_wc8_free_point_fifo_error",
+ "bmb_wc8_next_point_fifo_error",
+ "bmb_wc8_strt_fifo_error",
+ "bmb_wc8_second_dscr_fifo_error",
+ "bmb_wc8_pkt_avail_fifo_error",
+ "bmb_wc8_cos_cnt_fifo_error",
+ "bmb_wc8_notify_fifo_error",
+ "bmb_wc8_ll_req_fifo_error",
+ "bmb_wc8_ll_pa_cnt_error",
+ "bmb_wc8_bb_pa_cnt_error",
+ "bmb_wc9_inp_fifo_error",
+ "bmb_wc9_sop_fifo_error",
+ "bmb_wc9_queue_fifo_error",
+ "bmb_wc9_free_point_fifo_error",
+ "bmb_wc9_next_point_fifo_error",
+ "bmb_wc9_strt_fifo_error",
+ "bmb_wc9_second_dscr_fifo_error",
+ "bmb_wc9_pkt_avail_fifo_error",
+ "bmb_wc9_cos_cnt_fifo_error",
+ "bmb_wc9_notify_fifo_error",
+ "bmb_wc9_ll_req_fifo_error",
+ "bmb_wc9_ll_pa_cnt_error",
+ "bmb_wc9_bb_pa_cnt_error",
+ "bmb_rc9_sop_rc_out_sync_fifo_error",
+ "bmb_rc9_sop_out_sync_fifo_push_error",
+ "bmb_rc0_sop_pend_fifo_error",
+ "bmb_rc1_sop_pend_fifo_error",
+ "bmb_rc2_sop_pend_fifo_error",
+ "bmb_rc3_sop_pend_fifo_error",
+ "bmb_rc4_sop_pend_fifo_error",
+ "bmb_rc5_sop_pend_fifo_error",
+ "bmb_rc6_sop_pend_fifo_error",
+ "bmb_rc7_sop_pend_fifo_error",
+ "bmb_rc0_dscr_pend_fifo_error",
+ "bmb_rc1_dscr_pend_fifo_error",
+ "bmb_rc2_dscr_pend_fifo_error",
+ "bmb_rc3_dscr_pend_fifo_error",
+ "bmb_rc4_dscr_pend_fifo_error",
+ "bmb_rc5_dscr_pend_fifo_error",
+ "bmb_rc6_dscr_pend_fifo_error",
+ "bmb_rc7_dscr_pend_fifo_error",
+ "bmb_rc8_sop_inp_sync_fifo_push_error",
+ "bmb_rc9_sop_inp_sync_fifo_push_error",
+ "bmb_rc8_sop_out_sync_fifo_push_error",
+ "bmb_rc_gnt_pend_fifo_error",
+ "bmb_rc8_out_sync_fifo_push_error",
+ "bmb_rc9_out_sync_fifo_push_error",
+ "bmb_wc8_sync_fifo_push_error",
+ "bmb_wc9_sync_fifo_push_error",
+ "bmb_rc8_sop_rc_out_sync_fifo_error",
+ "bmb_rc_pkt7_dscr_fifo_error",
+};
+#else
+#define bmb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 bmb_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22,
+};
+
+static struct attn_hw_reg bmb_int0_bb_a0 = {
+ 0, 16, bmb_int0_bb_a0_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4
+};
+
+static const u16 bmb_int1_bb_a0_attn_idx[28] = {
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_int1_bb_a0 = {
+ 1, 28, bmb_int1_bb_a0_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc
+};
+
+static const u16 bmb_int2_bb_a0_attn_idx[26] = {
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+};
+
+static struct attn_hw_reg bmb_int2_bb_a0 = {
+ 2, 26, bmb_int2_bb_a0_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4
+};
+
+static const u16 bmb_int3_bb_a0_attn_idx[31] = {
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+};
+
+static struct attn_hw_reg bmb_int3_bb_a0 = {
+ 3, 31, bmb_int3_bb_a0_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c
+};
+
+static const u16 bmb_int4_bb_a0_attn_idx[27] = {
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+};
+
+static struct attn_hw_reg bmb_int4_bb_a0 = {
+ 4, 27, bmb_int4_bb_a0_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124
+};
+
+static const u16 bmb_int5_bb_a0_attn_idx[29] = {
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+};
+
+static struct attn_hw_reg bmb_int5_bb_a0 = {
+ 5, 29, bmb_int5_bb_a0_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c
+};
+
+static const u16 bmb_int6_bb_a0_attn_idx[30] = {
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193,
+};
+
+static struct attn_hw_reg bmb_int6_bb_a0 = {
+ 6, 30, bmb_int6_bb_a0_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154
+};
+
+static const u16 bmb_int7_bb_a0_attn_idx[32] = {
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224,
+ 225,
+};
+
+static struct attn_hw_reg bmb_int7_bb_a0 = {
+ 7, 32, bmb_int7_bb_a0_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c
+};
+
+static const u16 bmb_int8_bb_a0_attn_idx[32] = {
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256,
+ 257,
+};
+
+static struct attn_hw_reg bmb_int8_bb_a0 = {
+ 8, 32, bmb_int8_bb_a0_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188
+};
+
+static const u16 bmb_int9_bb_a0_attn_idx[32] = {
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288,
+ 289,
+};
+
+static struct attn_hw_reg bmb_int9_bb_a0 = {
+ 9, 32, bmb_int9_bb_a0_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0
+};
+
+static const u16 bmb_int10_bb_a0_attn_idx[3] = {
+ 290, 291, 292,
+};
+
+static struct attn_hw_reg bmb_int10_bb_a0 = {
+ 10, 3, bmb_int10_bb_a0_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8
+};
+
+static const u16 bmb_int11_bb_a0_attn_idx[4] = {
+ 293, 294, 295, 296,
+};
+
+static struct attn_hw_reg bmb_int11_bb_a0 = {
+ 11, 4, bmb_int11_bb_a0_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0
+};
+
+static struct attn_hw_reg *bmb_int_bb_a0_regs[12] = {
+ &bmb_int0_bb_a0, &bmb_int1_bb_a0, &bmb_int2_bb_a0, &bmb_int3_bb_a0,
+ &bmb_int4_bb_a0, &bmb_int5_bb_a0, &bmb_int6_bb_a0, &bmb_int7_bb_a0,
+ &bmb_int8_bb_a0, &bmb_int9_bb_a0,
+ &bmb_int10_bb_a0, &bmb_int11_bb_a0,
+};
+
+static const u16 bmb_int0_bb_b0_attn_idx[16] = {
+ 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22,
+};
+
+static struct attn_hw_reg bmb_int0_bb_b0 = {
+ 0, 16, bmb_int0_bb_b0_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4
+};
+
+static const u16 bmb_int1_bb_b0_attn_idx[28] = {
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_int1_bb_b0 = {
+ 1, 28, bmb_int1_bb_b0_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc
+};
+
+static const u16 bmb_int2_bb_b0_attn_idx[26] = {
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+};
+
+static struct attn_hw_reg bmb_int2_bb_b0 = {
+ 2, 26, bmb_int2_bb_b0_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4
+};
+
+static const u16 bmb_int3_bb_b0_attn_idx[31] = {
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+};
+
+static struct attn_hw_reg bmb_int3_bb_b0 = {
+ 3, 31, bmb_int3_bb_b0_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c
+};
+
+static const u16 bmb_int4_bb_b0_attn_idx[27] = {
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+};
+
+static struct attn_hw_reg bmb_int4_bb_b0 = {
+ 4, 27, bmb_int4_bb_b0_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124
+};
+
+static const u16 bmb_int5_bb_b0_attn_idx[29] = {
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+};
+
+static struct attn_hw_reg bmb_int5_bb_b0 = {
+ 5, 29, bmb_int5_bb_b0_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c
+};
+
+static const u16 bmb_int6_bb_b0_attn_idx[30] = {
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193,
+};
+
+static struct attn_hw_reg bmb_int6_bb_b0 = {
+ 6, 30, bmb_int6_bb_b0_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154
+};
+
+static const u16 bmb_int7_bb_b0_attn_idx[32] = {
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224,
+ 225,
+};
+
+static struct attn_hw_reg bmb_int7_bb_b0 = {
+ 7, 32, bmb_int7_bb_b0_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c
+};
+
+static const u16 bmb_int8_bb_b0_attn_idx[32] = {
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256,
+ 257,
+};
+
+static struct attn_hw_reg bmb_int8_bb_b0 = {
+ 8, 32, bmb_int8_bb_b0_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188
+};
+
+static const u16 bmb_int9_bb_b0_attn_idx[32] = {
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288,
+ 289,
+};
+
+static struct attn_hw_reg bmb_int9_bb_b0 = {
+ 9, 32, bmb_int9_bb_b0_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0
+};
+
+static const u16 bmb_int10_bb_b0_attn_idx[3] = {
+ 290, 291, 292,
+};
+
+static struct attn_hw_reg bmb_int10_bb_b0 = {
+ 10, 3, bmb_int10_bb_b0_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8
+};
+
+static const u16 bmb_int11_bb_b0_attn_idx[4] = {
+ 293, 294, 295, 296,
+};
+
+static struct attn_hw_reg bmb_int11_bb_b0 = {
+ 11, 4, bmb_int11_bb_b0_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0
+};
+
+static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = {
+ &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0,
+ &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0,
+ &bmb_int8_bb_b0, &bmb_int9_bb_b0,
+ &bmb_int10_bb_b0, &bmb_int11_bb_b0,
+};
+
+static const u16 bmb_int0_k2_attn_idx[16] = {
+ 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22,
+};
+
+static struct attn_hw_reg bmb_int0_k2 = {
+ 0, 16, bmb_int0_k2_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4
+};
+
+static const u16 bmb_int1_k2_attn_idx[28] = {
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_int1_k2 = {
+ 1, 28, bmb_int1_k2_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc
+};
+
+static const u16 bmb_int2_k2_attn_idx[26] = {
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+};
+
+static struct attn_hw_reg bmb_int2_k2 = {
+ 2, 26, bmb_int2_k2_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4
+};
+
+static const u16 bmb_int3_k2_attn_idx[31] = {
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+};
+
+static struct attn_hw_reg bmb_int3_k2 = {
+ 3, 31, bmb_int3_k2_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c
+};
+
+static const u16 bmb_int4_k2_attn_idx[27] = {
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+};
+
+static struct attn_hw_reg bmb_int4_k2 = {
+ 4, 27, bmb_int4_k2_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124
+};
+
+static const u16 bmb_int5_k2_attn_idx[29] = {
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+};
+
+static struct attn_hw_reg bmb_int5_k2 = {
+ 5, 29, bmb_int5_k2_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c
+};
+
+static const u16 bmb_int6_k2_attn_idx[30] = {
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193,
+};
+
+static struct attn_hw_reg bmb_int6_k2 = {
+ 6, 30, bmb_int6_k2_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154
+};
+
+static const u16 bmb_int7_k2_attn_idx[32] = {
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224,
+ 225,
+};
+
+static struct attn_hw_reg bmb_int7_k2 = {
+ 7, 32, bmb_int7_k2_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c
+};
+
+static const u16 bmb_int8_k2_attn_idx[32] = {
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256,
+ 257,
+};
+
+static struct attn_hw_reg bmb_int8_k2 = {
+ 8, 32, bmb_int8_k2_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188
+};
+
+static const u16 bmb_int9_k2_attn_idx[32] = {
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288,
+ 289,
+};
+
+static struct attn_hw_reg bmb_int9_k2 = {
+ 9, 32, bmb_int9_k2_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0
+};
+
+static const u16 bmb_int10_k2_attn_idx[3] = {
+ 290, 291, 292,
+};
+
+static struct attn_hw_reg bmb_int10_k2 = {
+ 10, 3, bmb_int10_k2_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8
+};
+
+static const u16 bmb_int11_k2_attn_idx[4] = {
+ 293, 294, 295, 296,
+};
+
+static struct attn_hw_reg bmb_int11_k2 = {
+ 11, 4, bmb_int11_k2_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0
+};
+
+static struct attn_hw_reg *bmb_int_k2_regs[12] = {
+ &bmb_int0_k2, &bmb_int1_k2, &bmb_int2_k2, &bmb_int3_k2, &bmb_int4_k2,
+ &bmb_int5_k2, &bmb_int6_k2, &bmb_int7_k2, &bmb_int8_k2, &bmb_int9_k2,
+ &bmb_int10_k2, &bmb_int11_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *bmb_prty_attn_desc[61] = {
+ "bmb_ll_bank0_mem_prty",
+ "bmb_ll_bank1_mem_prty",
+ "bmb_ll_bank2_mem_prty",
+ "bmb_ll_bank3_mem_prty",
+ "bmb_datapath_registers",
+ "bmb_mem001_i_ecc_rf_int",
+ "bmb_mem008_i_ecc_rf_int",
+ "bmb_mem009_i_ecc_rf_int",
+ "bmb_mem010_i_ecc_rf_int",
+ "bmb_mem011_i_ecc_rf_int",
+ "bmb_mem012_i_ecc_rf_int",
+ "bmb_mem013_i_ecc_rf_int",
+ "bmb_mem014_i_ecc_rf_int",
+ "bmb_mem015_i_ecc_rf_int",
+ "bmb_mem016_i_ecc_rf_int",
+ "bmb_mem002_i_ecc_rf_int",
+ "bmb_mem003_i_ecc_rf_int",
+ "bmb_mem004_i_ecc_rf_int",
+ "bmb_mem005_i_ecc_rf_int",
+ "bmb_mem006_i_ecc_rf_int",
+ "bmb_mem007_i_ecc_rf_int",
+ "bmb_mem059_i_mem_prty",
+ "bmb_mem060_i_mem_prty",
+ "bmb_mem037_i_mem_prty",
+ "bmb_mem038_i_mem_prty",
+ "bmb_mem039_i_mem_prty",
+ "bmb_mem040_i_mem_prty",
+ "bmb_mem041_i_mem_prty",
+ "bmb_mem042_i_mem_prty",
+ "bmb_mem043_i_mem_prty",
+ "bmb_mem044_i_mem_prty",
+ "bmb_mem045_i_mem_prty",
+ "bmb_mem046_i_mem_prty",
+ "bmb_mem047_i_mem_prty",
+ "bmb_mem048_i_mem_prty",
+ "bmb_mem049_i_mem_prty",
+ "bmb_mem050_i_mem_prty",
+ "bmb_mem051_i_mem_prty",
+ "bmb_mem052_i_mem_prty",
+ "bmb_mem053_i_mem_prty",
+ "bmb_mem054_i_mem_prty",
+ "bmb_mem055_i_mem_prty",
+ "bmb_mem056_i_mem_prty",
+ "bmb_mem057_i_mem_prty",
+ "bmb_mem058_i_mem_prty",
+ "bmb_mem033_i_mem_prty",
+ "bmb_mem034_i_mem_prty",
+ "bmb_mem035_i_mem_prty",
+ "bmb_mem036_i_mem_prty",
+ "bmb_mem021_i_mem_prty",
+ "bmb_mem022_i_mem_prty",
+ "bmb_mem023_i_mem_prty",
+ "bmb_mem024_i_mem_prty",
+ "bmb_mem025_i_mem_prty",
+ "bmb_mem026_i_mem_prty",
+ "bmb_mem027_i_mem_prty",
+ "bmb_mem028_i_mem_prty",
+ "bmb_mem029_i_mem_prty",
+ "bmb_mem030_i_mem_prty",
+ "bmb_mem031_i_mem_prty",
+ "bmb_mem032_i_mem_prty",
+};
+#else
+#define bmb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 bmb_prty1_bb_a0_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg bmb_prty1_bb_a0 = {
+ 0, 31, bmb_prty1_bb_a0_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404
+};
+
+static const u16 bmb_prty2_bb_a0_attn_idx[25] = {
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60,
+};
+
+static struct attn_hw_reg bmb_prty2_bb_a0 = {
+ 1, 25, bmb_prty2_bb_a0_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414
+};
+
+static struct attn_hw_reg *bmb_prty_bb_a0_regs[2] = {
+ &bmb_prty1_bb_a0, &bmb_prty2_bb_a0,
+};
+
+static const u16 bmb_prty0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg bmb_prty0_bb_b0 = {
+ 0, 5, bmb_prty0_bb_b0_attn_idx, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0
+};
+
+static const u16 bmb_prty1_bb_b0_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg bmb_prty1_bb_b0 = {
+ 1, 31, bmb_prty1_bb_b0_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404
+};
+
+static const u16 bmb_prty2_bb_b0_attn_idx[15] = {
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_prty2_bb_b0 = {
+ 2, 15, bmb_prty2_bb_b0_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414
+};
+
+static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = {
+ &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0,
+};
+
+static const u16 bmb_prty0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg bmb_prty0_k2 = {
+ 0, 5, bmb_prty0_k2_attn_idx, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0
+};
+
+static const u16 bmb_prty1_k2_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg bmb_prty1_k2 = {
+ 1, 31, bmb_prty1_k2_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404
+};
+
+static const u16 bmb_prty2_k2_attn_idx[15] = {
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_prty2_k2 = {
+ 2, 15, bmb_prty2_k2_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414
+};
+
+static struct attn_hw_reg *bmb_prty_k2_regs[3] = {
+ &bmb_prty0_k2, &bmb_prty1_k2, &bmb_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcie_int_attn_desc[17] = {
+ "pcie_address_error",
+ "pcie_link_down_detect",
+ "pcie_link_up_detect",
+ "pcie_cfg_link_eq_req_int",
+ "pcie_pcie_bandwidth_change_detect",
+ "pcie_early_hot_reset_detect",
+ "pcie_hot_reset_detect",
+ "pcie_l1_entry_detect",
+ "pcie_l1_exit_detect",
+ "pcie_ltssm_state_match_detect",
+ "pcie_fc_timeout_detect",
+ "pcie_pme_turnoff_message_detect",
+ "pcie_cfg_send_cor_err",
+ "pcie_cfg_send_nf_err",
+ "pcie_cfg_send_f_err",
+ "pcie_qoverflow_detect",
+ "pcie_vdm_detect",
+};
+#else
+#define pcie_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcie_int0_k2_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg pcie_int0_k2 = {
+ 0, 17, pcie_int0_k2_attn_idx, 0x547a0, 0x547ac, 0x547a8, 0x547a4
+};
+
+static struct attn_hw_reg *pcie_int_k2_regs[1] = {
+ &pcie_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcie_prty_attn_desc[24] = {
+ "pcie_mem003_i_ecc_rf_int",
+ "pcie_mem004_i_ecc_rf_int",
+ "pcie_mem008_i_mem_prty",
+ "pcie_mem007_i_mem_prty",
+ "pcie_mem005_i_mem_prty",
+ "pcie_mem006_i_mem_prty",
+ "pcie_mem001_i_mem_prty",
+ "pcie_mem002_i_mem_prty",
+ "pcie_mem001_i_ecc_rf_int",
+ "pcie_mem005_i_ecc_rf_int",
+ "pcie_mem010_i_ecc_rf_int",
+ "pcie_mem009_i_ecc_rf_int",
+ "pcie_mem007_i_ecc_rf_int",
+ "pcie_mem004_i_mem_prty_0",
+ "pcie_mem004_i_mem_prty_1",
+ "pcie_mem004_i_mem_prty_2",
+ "pcie_mem004_i_mem_prty_3",
+ "pcie_mem011_i_mem_prty_1",
+ "pcie_mem011_i_mem_prty_2",
+ "pcie_mem012_i_mem_prty_1",
+ "pcie_mem012_i_mem_prty_2",
+ "pcie_app_parity_errs_0",
+ "pcie_app_parity_errs_1",
+ "pcie_app_parity_errs_2",
+};
+#else
+#define pcie_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcie_prty1_bb_a0_attn_idx[17] = {
+ 0, 2, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+};
+
+static struct attn_hw_reg pcie_prty1_bb_a0 = {
+ 0, 17, pcie_prty1_bb_a0_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004
+};
+
+static struct attn_hw_reg *pcie_prty_bb_a0_regs[1] = {
+ &pcie_prty1_bb_a0,
+};
+
+static const u16 pcie_prty1_bb_b0_attn_idx[17] = {
+ 0, 2, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+};
+
+static struct attn_hw_reg pcie_prty1_bb_b0 = {
+ 0, 17, pcie_prty1_bb_b0_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004
+};
+
+static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = {
+ &pcie_prty1_bb_b0,
+};
+
+static const u16 pcie_prty1_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg pcie_prty1_k2 = {
+ 0, 8, pcie_prty1_k2_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004
+};
+
+static const u16 pcie_prty0_k2_attn_idx[3] = {
+ 21, 22, 23,
+};
+
+static struct attn_hw_reg pcie_prty0_k2 = {
+ 1, 3, pcie_prty0_k2_attn_idx, 0x547b0, 0x547bc, 0x547b8, 0x547b4
+};
+
+static struct attn_hw_reg *pcie_prty_k2_regs[2] = {
+ &pcie_prty1_k2, &pcie_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *mcp2_prty_attn_desc[13] = {
+ "mcp2_rom_parity",
+ "mcp2_mem001_i_ecc_rf_int",
+ "mcp2_mem006_i_ecc_0_rf_int",
+ "mcp2_mem006_i_ecc_1_rf_int",
+ "mcp2_mem006_i_ecc_2_rf_int",
+ "mcp2_mem006_i_ecc_3_rf_int",
+ "mcp2_mem007_i_ecc_rf_int",
+ "mcp2_mem004_i_mem_prty",
+ "mcp2_mem003_i_mem_prty",
+ "mcp2_mem002_i_mem_prty",
+ "mcp2_mem009_i_mem_prty",
+ "mcp2_mem008_i_mem_prty",
+ "mcp2_mem005_i_mem_prty",
+};
+#else
+#define mcp2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 mcp2_prty0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg mcp2_prty0_bb_a0 = {
+ 0, 1, mcp2_prty0_bb_a0_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044
+};
+
+static const u16 mcp2_prty1_bb_a0_attn_idx[12] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg mcp2_prty1_bb_a0 = {
+ 1, 12, mcp2_prty1_bb_a0_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208
+};
+
+static struct attn_hw_reg *mcp2_prty_bb_a0_regs[2] = {
+ &mcp2_prty0_bb_a0, &mcp2_prty1_bb_a0,
+};
+
+static const u16 mcp2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg mcp2_prty0_bb_b0 = {
+ 0, 1, mcp2_prty0_bb_b0_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044
+};
+
+static const u16 mcp2_prty1_bb_b0_attn_idx[12] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg mcp2_prty1_bb_b0 = {
+ 1, 12, mcp2_prty1_bb_b0_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208
+};
+
+static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = {
+ &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0,
+};
+
+static const u16 mcp2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg mcp2_prty0_k2 = {
+ 0, 1, mcp2_prty0_k2_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044
+};
+
+static const u16 mcp2_prty1_k2_attn_idx[12] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg mcp2_prty1_k2 = {
+ 1, 12, mcp2_prty1_k2_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208
+};
+
+static struct attn_hw_reg *mcp2_prty_k2_regs[2] = {
+ &mcp2_prty0_k2, &mcp2_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst_int_attn_desc[18] = {
+ "pswhst_address_error",
+ "pswhst_hst_src_fifo1_err",
+ "pswhst_hst_src_fifo2_err",
+ "pswhst_hst_src_fifo3_err",
+ "pswhst_hst_src_fifo4_err",
+ "pswhst_hst_src_fifo5_err",
+ "pswhst_hst_hdr_sync_fifo_err",
+ "pswhst_hst_data_sync_fifo_err",
+ "pswhst_hst_cpl_sync_fifo_err",
+ "pswhst_hst_vf_disabled_access",
+ "pswhst_hst_permission_violation",
+ "pswhst_hst_incorrect_access",
+ "pswhst_hst_src_fifo6_err",
+ "pswhst_hst_src_fifo7_err",
+ "pswhst_hst_src_fifo8_err",
+ "pswhst_hst_src_fifo9_err",
+ "pswhst_hst_source_credit_violation",
+ "pswhst_hst_timeout",
+};
+#else
+#define pswhst_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst_int0_bb_a0_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_int0_bb_a0 = {
+ 0, 18, pswhst_int0_bb_a0_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188,
+ 0x2a0184
+};
+
+static struct attn_hw_reg *pswhst_int_bb_a0_regs[1] = {
+ &pswhst_int0_bb_a0,
+};
+
+static const u16 pswhst_int0_bb_b0_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_int0_bb_b0 = {
+ 0, 18, pswhst_int0_bb_b0_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188,
+ 0x2a0184
+};
+
+static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = {
+ &pswhst_int0_bb_b0,
+};
+
+static const u16 pswhst_int0_k2_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_int0_k2 = {
+ 0, 18, pswhst_int0_k2_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184
+};
+
+static struct attn_hw_reg *pswhst_int_k2_regs[1] = {
+ &pswhst_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst_prty_attn_desc[18] = {
+ "pswhst_datapath_registers",
+ "pswhst_mem006_i_mem_prty",
+ "pswhst_mem007_i_mem_prty",
+ "pswhst_mem005_i_mem_prty",
+ "pswhst_mem002_i_mem_prty",
+ "pswhst_mem003_i_mem_prty",
+ "pswhst_mem001_i_mem_prty",
+ "pswhst_mem008_i_mem_prty",
+ "pswhst_mem004_i_mem_prty",
+ "pswhst_mem009_i_mem_prty",
+ "pswhst_mem010_i_mem_prty",
+ "pswhst_mem016_i_mem_prty",
+ "pswhst_mem012_i_mem_prty",
+ "pswhst_mem013_i_mem_prty",
+ "pswhst_mem014_i_mem_prty",
+ "pswhst_mem015_i_mem_prty",
+ "pswhst_mem011_i_mem_prty",
+ "pswhst_mem017_i_mem_prty",
+};
+#else
+#define pswhst_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst_prty1_bb_a0_attn_idx[17] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_prty1_bb_a0 = {
+ 0, 17, pswhst_prty1_bb_a0_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208,
+ 0x2a0204
+};
+
+static struct attn_hw_reg *pswhst_prty_bb_a0_regs[1] = {
+ &pswhst_prty1_bb_a0,
+};
+
+static const u16 pswhst_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswhst_prty0_bb_b0 = {
+ 0, 1, pswhst_prty0_bb_b0_attn_idx, 0x2a0190, 0x2a019c, 0x2a0198,
+ 0x2a0194
+};
+
+static const u16 pswhst_prty1_bb_b0_attn_idx[17] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_prty1_bb_b0 = {
+ 1, 17, pswhst_prty1_bb_b0_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208,
+ 0x2a0204
+};
+
+static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = {
+ &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0,
+};
+
+static const u16 pswhst_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswhst_prty0_k2 = {
+ 0, 1, pswhst_prty0_k2_attn_idx, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194
+};
+
+static const u16 pswhst_prty1_k2_attn_idx[17] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_prty1_k2 = {
+ 1, 17, pswhst_prty1_k2_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204
+};
+
+static struct attn_hw_reg *pswhst_prty_k2_regs[2] = {
+ &pswhst_prty0_k2, &pswhst_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst2_int_attn_desc[5] = {
+ "pswhst2_address_error",
+ "pswhst2_hst_header_fifo_err",
+ "pswhst2_hst_data_fifo_err",
+ "pswhst2_hst_cpl_fifo_err",
+ "pswhst2_hst_ireq_fifo_err",
+};
+#else
+#define pswhst2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst2_int0_bb_a0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswhst2_int0_bb_a0 = {
+ 0, 5, pswhst2_int0_bb_a0_attn_idx, 0x29e180, 0x29e18c, 0x29e188,
+ 0x29e184
+};
+
+static struct attn_hw_reg *pswhst2_int_bb_a0_regs[1] = {
+ &pswhst2_int0_bb_a0,
+};
+
+static const u16 pswhst2_int0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswhst2_int0_bb_b0 = {
+ 0, 5, pswhst2_int0_bb_b0_attn_idx, 0x29e180, 0x29e18c, 0x29e188,
+ 0x29e184
+};
+
+static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = {
+ &pswhst2_int0_bb_b0,
+};
+
+static const u16 pswhst2_int0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswhst2_int0_k2 = {
+ 0, 5, pswhst2_int0_k2_attn_idx, 0x29e180, 0x29e18c, 0x29e188, 0x29e184
+};
+
+static struct attn_hw_reg *pswhst2_int_k2_regs[1] = {
+ &pswhst2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst2_prty_attn_desc[1] = {
+ "pswhst2_datapath_registers",
+};
+#else
+#define pswhst2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswhst2_prty0_bb_b0 = {
+ 0, 1, pswhst2_prty0_bb_b0_attn_idx, 0x29e190, 0x29e19c, 0x29e198,
+ 0x29e194
+};
+
+static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = {
+ &pswhst2_prty0_bb_b0,
+};
+
+static const u16 pswhst2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswhst2_prty0_k2 = {
+ 0, 1, pswhst2_prty0_k2_attn_idx, 0x29e190, 0x29e19c, 0x29e198, 0x29e194
+};
+
+static struct attn_hw_reg *pswhst2_prty_k2_regs[1] = {
+ &pswhst2_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd_int_attn_desc[3] = {
+ "pswrd_address_error",
+ "pswrd_pop_error",
+ "pswrd_pop_pbf_error",
+};
+#else
+#define pswrd_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd_int0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg pswrd_int0_bb_a0 = {
+ 0, 3, pswrd_int0_bb_a0_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184
+};
+
+static struct attn_hw_reg *pswrd_int_bb_a0_regs[1] = {
+ &pswrd_int0_bb_a0,
+};
+
+static const u16 pswrd_int0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg pswrd_int0_bb_b0 = {
+ 0, 3, pswrd_int0_bb_b0_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184
+};
+
+static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = {
+ &pswrd_int0_bb_b0,
+};
+
+static const u16 pswrd_int0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg pswrd_int0_k2 = {
+ 0, 3, pswrd_int0_k2_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184
+};
+
+static struct attn_hw_reg *pswrd_int_k2_regs[1] = {
+ &pswrd_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd_prty_attn_desc[1] = {
+ "pswrd_datapath_registers",
+};
+#else
+#define pswrd_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrd_prty0_bb_b0 = {
+ 0, 1, pswrd_prty0_bb_b0_attn_idx, 0x29c190, 0x29c19c, 0x29c198,
+ 0x29c194
+};
+
+static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = {
+ &pswrd_prty0_bb_b0,
+};
+
+static const u16 pswrd_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrd_prty0_k2 = {
+ 0, 1, pswrd_prty0_k2_attn_idx, 0x29c190, 0x29c19c, 0x29c198, 0x29c194
+};
+
+static struct attn_hw_reg *pswrd_prty_k2_regs[1] = {
+ &pswrd_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd2_int_attn_desc[5] = {
+ "pswrd2_address_error",
+ "pswrd2_sr_fifo_error",
+ "pswrd2_blk_fifo_error",
+ "pswrd2_push_error",
+ "pswrd2_push_pbf_error",
+};
+#else
+#define pswrd2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd2_int0_bb_a0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswrd2_int0_bb_a0 = {
+ 0, 5, pswrd2_int0_bb_a0_attn_idx, 0x29d180, 0x29d18c, 0x29d188,
+ 0x29d184
+};
+
+static struct attn_hw_reg *pswrd2_int_bb_a0_regs[1] = {
+ &pswrd2_int0_bb_a0,
+};
+
+static const u16 pswrd2_int0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswrd2_int0_bb_b0 = {
+ 0, 5, pswrd2_int0_bb_b0_attn_idx, 0x29d180, 0x29d18c, 0x29d188,
+ 0x29d184
+};
+
+static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = {
+ &pswrd2_int0_bb_b0,
+};
+
+static const u16 pswrd2_int0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswrd2_int0_k2 = {
+ 0, 5, pswrd2_int0_k2_attn_idx, 0x29d180, 0x29d18c, 0x29d188, 0x29d184
+};
+
+static struct attn_hw_reg *pswrd2_int_k2_regs[1] = {
+ &pswrd2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd2_prty_attn_desc[36] = {
+ "pswrd2_datapath_registers",
+ "pswrd2_mem017_i_ecc_rf_int",
+ "pswrd2_mem018_i_ecc_rf_int",
+ "pswrd2_mem019_i_ecc_rf_int",
+ "pswrd2_mem020_i_ecc_rf_int",
+ "pswrd2_mem021_i_ecc_rf_int",
+ "pswrd2_mem022_i_ecc_rf_int",
+ "pswrd2_mem023_i_ecc_rf_int",
+ "pswrd2_mem024_i_ecc_rf_int",
+ "pswrd2_mem025_i_ecc_rf_int",
+ "pswrd2_mem015_i_ecc_rf_int",
+ "pswrd2_mem034_i_mem_prty",
+ "pswrd2_mem032_i_mem_prty",
+ "pswrd2_mem028_i_mem_prty",
+ "pswrd2_mem033_i_mem_prty",
+ "pswrd2_mem030_i_mem_prty",
+ "pswrd2_mem029_i_mem_prty",
+ "pswrd2_mem031_i_mem_prty",
+ "pswrd2_mem027_i_mem_prty",
+ "pswrd2_mem026_i_mem_prty",
+ "pswrd2_mem001_i_mem_prty",
+ "pswrd2_mem007_i_mem_prty",
+ "pswrd2_mem008_i_mem_prty",
+ "pswrd2_mem009_i_mem_prty",
+ "pswrd2_mem010_i_mem_prty",
+ "pswrd2_mem011_i_mem_prty",
+ "pswrd2_mem012_i_mem_prty",
+ "pswrd2_mem013_i_mem_prty",
+ "pswrd2_mem014_i_mem_prty",
+ "pswrd2_mem002_i_mem_prty",
+ "pswrd2_mem003_i_mem_prty",
+ "pswrd2_mem004_i_mem_prty",
+ "pswrd2_mem005_i_mem_prty",
+ "pswrd2_mem006_i_mem_prty",
+ "pswrd2_mem016_i_mem_prty",
+ "pswrd2_mem015_i_mem_prty",
+};
+#else
+#define pswrd2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd2_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+};
+
+static struct attn_hw_reg pswrd2_prty1_bb_a0 = {
+ 0, 31, pswrd2_prty1_bb_a0_attn_idx, 0x29d200, 0x29d20c, 0x29d208,
+ 0x29d204
+};
+
+static const u16 pswrd2_prty2_bb_a0_attn_idx[3] = {
+ 33, 34, 35,
+};
+
+static struct attn_hw_reg pswrd2_prty2_bb_a0 = {
+ 1, 3, pswrd2_prty2_bb_a0_attn_idx, 0x29d210, 0x29d21c, 0x29d218,
+ 0x29d214
+};
+
+static struct attn_hw_reg *pswrd2_prty_bb_a0_regs[2] = {
+ &pswrd2_prty1_bb_a0, &pswrd2_prty2_bb_a0,
+};
+
+static const u16 pswrd2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrd2_prty0_bb_b0 = {
+ 0, 1, pswrd2_prty0_bb_b0_attn_idx, 0x29d190, 0x29d19c, 0x29d198,
+ 0x29d194
+};
+
+static const u16 pswrd2_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswrd2_prty1_bb_b0 = {
+ 1, 31, pswrd2_prty1_bb_b0_attn_idx, 0x29d200, 0x29d20c, 0x29d208,
+ 0x29d204
+};
+
+static const u16 pswrd2_prty2_bb_b0_attn_idx[3] = {
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pswrd2_prty2_bb_b0 = {
+ 2, 3, pswrd2_prty2_bb_b0_attn_idx, 0x29d210, 0x29d21c, 0x29d218,
+ 0x29d214
+};
+
+static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = {
+ &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0,
+};
+
+static const u16 pswrd2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrd2_prty0_k2 = {
+ 0, 1, pswrd2_prty0_k2_attn_idx, 0x29d190, 0x29d19c, 0x29d198, 0x29d194
+};
+
+static const u16 pswrd2_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswrd2_prty1_k2 = {
+ 1, 31, pswrd2_prty1_k2_attn_idx, 0x29d200, 0x29d20c, 0x29d208, 0x29d204
+};
+
+static const u16 pswrd2_prty2_k2_attn_idx[3] = {
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pswrd2_prty2_k2 = {
+ 2, 3, pswrd2_prty2_k2_attn_idx, 0x29d210, 0x29d21c, 0x29d218, 0x29d214
+};
+
+static struct attn_hw_reg *pswrd2_prty_k2_regs[3] = {
+ &pswrd2_prty0_k2, &pswrd2_prty1_k2, &pswrd2_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr_int_attn_desc[16] = {
+ "pswwr_address_error",
+ "pswwr_src_fifo_overflow",
+ "pswwr_qm_fifo_overflow",
+ "pswwr_tm_fifo_overflow",
+ "pswwr_usdm_fifo_overflow",
+ "pswwr_usdmdp_fifo_overflow",
+ "pswwr_xsdm_fifo_overflow",
+ "pswwr_tsdm_fifo_overflow",
+ "pswwr_cduwr_fifo_overflow",
+ "pswwr_dbg_fifo_overflow",
+ "pswwr_dmae_fifo_overflow",
+ "pswwr_hc_fifo_overflow",
+ "pswwr_msdm_fifo_overflow",
+ "pswwr_ysdm_fifo_overflow",
+ "pswwr_psdm_fifo_overflow",
+ "pswwr_m2p_fifo_overflow",
+};
+#else
+#define pswwr_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pswwr_int0_bb_a0 = {
+ 0, 16, pswwr_int0_bb_a0_attn_idx, 0x29a180, 0x29a18c, 0x29a188,
+ 0x29a184
+};
+
+static struct attn_hw_reg *pswwr_int_bb_a0_regs[1] = {
+ &pswwr_int0_bb_a0,
+};
+
+static const u16 pswwr_int0_bb_b0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pswwr_int0_bb_b0 = {
+ 0, 16, pswwr_int0_bb_b0_attn_idx, 0x29a180, 0x29a18c, 0x29a188,
+ 0x29a184
+};
+
+static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = {
+ &pswwr_int0_bb_b0,
+};
+
+static const u16 pswwr_int0_k2_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pswwr_int0_k2 = {
+ 0, 16, pswwr_int0_k2_attn_idx, 0x29a180, 0x29a18c, 0x29a188, 0x29a184
+};
+
+static struct attn_hw_reg *pswwr_int_k2_regs[1] = {
+ &pswwr_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr_prty_attn_desc[1] = {
+ "pswwr_datapath_registers",
+};
+#else
+#define pswwr_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswwr_prty0_bb_b0 = {
+ 0, 1, pswwr_prty0_bb_b0_attn_idx, 0x29a190, 0x29a19c, 0x29a198,
+ 0x29a194
+};
+
+static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = {
+ &pswwr_prty0_bb_b0,
+};
+
+static const u16 pswwr_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswwr_prty0_k2 = {
+ 0, 1, pswwr_prty0_k2_attn_idx, 0x29a190, 0x29a19c, 0x29a198, 0x29a194
+};
+
+static struct attn_hw_reg *pswwr_prty_k2_regs[1] = {
+ &pswwr_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr2_int_attn_desc[19] = {
+ "pswwr2_address_error",
+ "pswwr2_pglue_eop_error",
+ "pswwr2_pglue_lsr_error",
+ "pswwr2_tm_underflow",
+ "pswwr2_qm_underflow",
+ "pswwr2_src_underflow",
+ "pswwr2_usdm_underflow",
+ "pswwr2_tsdm_underflow",
+ "pswwr2_xsdm_underflow",
+ "pswwr2_usdmdp_underflow",
+ "pswwr2_cdu_underflow",
+ "pswwr2_dbg_underflow",
+ "pswwr2_dmae_underflow",
+ "pswwr2_hc_underflow",
+ "pswwr2_msdm_underflow",
+ "pswwr2_ysdm_underflow",
+ "pswwr2_psdm_underflow",
+ "pswwr2_m2p_underflow",
+ "pswwr2_pglue_eop_error_in_line",
+};
+#else
+#define pswwr2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr2_int0_bb_a0_attn_idx[19] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pswwr2_int0_bb_a0 = {
+ 0, 19, pswwr2_int0_bb_a0_attn_idx, 0x29b180, 0x29b18c, 0x29b188,
+ 0x29b184
+};
+
+static struct attn_hw_reg *pswwr2_int_bb_a0_regs[1] = {
+ &pswwr2_int0_bb_a0,
+};
+
+static const u16 pswwr2_int0_bb_b0_attn_idx[19] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pswwr2_int0_bb_b0 = {
+ 0, 19, pswwr2_int0_bb_b0_attn_idx, 0x29b180, 0x29b18c, 0x29b188,
+ 0x29b184
+};
+
+static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = {
+ &pswwr2_int0_bb_b0,
+};
+
+static const u16 pswwr2_int0_k2_attn_idx[19] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pswwr2_int0_k2 = {
+ 0, 19, pswwr2_int0_k2_attn_idx, 0x29b180, 0x29b18c, 0x29b188, 0x29b184
+};
+
+static struct attn_hw_reg *pswwr2_int_k2_regs[1] = {
+ &pswwr2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr2_prty_attn_desc[114] = {
+ "pswwr2_datapath_registers",
+ "pswwr2_mem008_i_ecc_rf_int",
+ "pswwr2_mem001_i_mem_prty",
+ "pswwr2_mem014_i_mem_prty_0",
+ "pswwr2_mem014_i_mem_prty_1",
+ "pswwr2_mem014_i_mem_prty_2",
+ "pswwr2_mem014_i_mem_prty_3",
+ "pswwr2_mem014_i_mem_prty_4",
+ "pswwr2_mem014_i_mem_prty_5",
+ "pswwr2_mem014_i_mem_prty_6",
+ "pswwr2_mem014_i_mem_prty_7",
+ "pswwr2_mem014_i_mem_prty_8",
+ "pswwr2_mem016_i_mem_prty_0",
+ "pswwr2_mem016_i_mem_prty_1",
+ "pswwr2_mem016_i_mem_prty_2",
+ "pswwr2_mem016_i_mem_prty_3",
+ "pswwr2_mem016_i_mem_prty_4",
+ "pswwr2_mem016_i_mem_prty_5",
+ "pswwr2_mem016_i_mem_prty_6",
+ "pswwr2_mem016_i_mem_prty_7",
+ "pswwr2_mem016_i_mem_prty_8",
+ "pswwr2_mem007_i_mem_prty_0",
+ "pswwr2_mem007_i_mem_prty_1",
+ "pswwr2_mem007_i_mem_prty_2",
+ "pswwr2_mem007_i_mem_prty_3",
+ "pswwr2_mem007_i_mem_prty_4",
+ "pswwr2_mem007_i_mem_prty_5",
+ "pswwr2_mem007_i_mem_prty_6",
+ "pswwr2_mem007_i_mem_prty_7",
+ "pswwr2_mem007_i_mem_prty_8",
+ "pswwr2_mem017_i_mem_prty_0",
+ "pswwr2_mem017_i_mem_prty_1",
+ "pswwr2_mem017_i_mem_prty_2",
+ "pswwr2_mem017_i_mem_prty_3",
+ "pswwr2_mem017_i_mem_prty_4",
+ "pswwr2_mem017_i_mem_prty_5",
+ "pswwr2_mem017_i_mem_prty_6",
+ "pswwr2_mem017_i_mem_prty_7",
+ "pswwr2_mem017_i_mem_prty_8",
+ "pswwr2_mem009_i_mem_prty_0",
+ "pswwr2_mem009_i_mem_prty_1",
+ "pswwr2_mem009_i_mem_prty_2",
+ "pswwr2_mem009_i_mem_prty_3",
+ "pswwr2_mem009_i_mem_prty_4",
+ "pswwr2_mem009_i_mem_prty_5",
+ "pswwr2_mem009_i_mem_prty_6",
+ "pswwr2_mem009_i_mem_prty_7",
+ "pswwr2_mem009_i_mem_prty_8",
+ "pswwr2_mem013_i_mem_prty_0",
+ "pswwr2_mem013_i_mem_prty_1",
+ "pswwr2_mem013_i_mem_prty_2",
+ "pswwr2_mem013_i_mem_prty_3",
+ "pswwr2_mem013_i_mem_prty_4",
+ "pswwr2_mem013_i_mem_prty_5",
+ "pswwr2_mem013_i_mem_prty_6",
+ "pswwr2_mem013_i_mem_prty_7",
+ "pswwr2_mem013_i_mem_prty_8",
+ "pswwr2_mem006_i_mem_prty_0",
+ "pswwr2_mem006_i_mem_prty_1",
+ "pswwr2_mem006_i_mem_prty_2",
+ "pswwr2_mem006_i_mem_prty_3",
+ "pswwr2_mem006_i_mem_prty_4",
+ "pswwr2_mem006_i_mem_prty_5",
+ "pswwr2_mem006_i_mem_prty_6",
+ "pswwr2_mem006_i_mem_prty_7",
+ "pswwr2_mem006_i_mem_prty_8",
+ "pswwr2_mem010_i_mem_prty_0",
+ "pswwr2_mem010_i_mem_prty_1",
+ "pswwr2_mem010_i_mem_prty_2",
+ "pswwr2_mem010_i_mem_prty_3",
+ "pswwr2_mem010_i_mem_prty_4",
+ "pswwr2_mem010_i_mem_prty_5",
+ "pswwr2_mem010_i_mem_prty_6",
+ "pswwr2_mem010_i_mem_prty_7",
+ "pswwr2_mem010_i_mem_prty_8",
+ "pswwr2_mem012_i_mem_prty",
+ "pswwr2_mem011_i_mem_prty_0",
+ "pswwr2_mem011_i_mem_prty_1",
+ "pswwr2_mem011_i_mem_prty_2",
+ "pswwr2_mem011_i_mem_prty_3",
+ "pswwr2_mem011_i_mem_prty_4",
+ "pswwr2_mem011_i_mem_prty_5",
+ "pswwr2_mem011_i_mem_prty_6",
+ "pswwr2_mem011_i_mem_prty_7",
+ "pswwr2_mem011_i_mem_prty_8",
+ "pswwr2_mem004_i_mem_prty_0",
+ "pswwr2_mem004_i_mem_prty_1",
+ "pswwr2_mem004_i_mem_prty_2",
+ "pswwr2_mem004_i_mem_prty_3",
+ "pswwr2_mem004_i_mem_prty_4",
+ "pswwr2_mem004_i_mem_prty_5",
+ "pswwr2_mem004_i_mem_prty_6",
+ "pswwr2_mem004_i_mem_prty_7",
+ "pswwr2_mem004_i_mem_prty_8",
+ "pswwr2_mem015_i_mem_prty_0",
+ "pswwr2_mem015_i_mem_prty_1",
+ "pswwr2_mem015_i_mem_prty_2",
+ "pswwr2_mem005_i_mem_prty_0",
+ "pswwr2_mem005_i_mem_prty_1",
+ "pswwr2_mem005_i_mem_prty_2",
+ "pswwr2_mem005_i_mem_prty_3",
+ "pswwr2_mem005_i_mem_prty_4",
+ "pswwr2_mem005_i_mem_prty_5",
+ "pswwr2_mem005_i_mem_prty_6",
+ "pswwr2_mem005_i_mem_prty_7",
+ "pswwr2_mem005_i_mem_prty_8",
+ "pswwr2_mem002_i_mem_prty_0",
+ "pswwr2_mem002_i_mem_prty_1",
+ "pswwr2_mem002_i_mem_prty_2",
+ "pswwr2_mem002_i_mem_prty_3",
+ "pswwr2_mem002_i_mem_prty_4",
+ "pswwr2_mem003_i_mem_prty_0",
+ "pswwr2_mem003_i_mem_prty_1",
+ "pswwr2_mem003_i_mem_prty_2",
+};
+#else
+#define pswwr2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr2_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswwr2_prty1_bb_a0 = {
+ 0, 31, pswwr2_prty1_bb_a0_attn_idx, 0x29b200, 0x29b20c, 0x29b208,
+ 0x29b204
+};
+
+static const u16 pswwr2_prty2_bb_a0_attn_idx[31] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+};
+
+static struct attn_hw_reg pswwr2_prty2_bb_a0 = {
+ 1, 31, pswwr2_prty2_bb_a0_attn_idx, 0x29b210, 0x29b21c, 0x29b218,
+ 0x29b214
+};
+
+static const u16 pswwr2_prty3_bb_a0_attn_idx[31] = {
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+};
+
+static struct attn_hw_reg pswwr2_prty3_bb_a0 = {
+ 2, 31, pswwr2_prty3_bb_a0_attn_idx, 0x29b220, 0x29b22c, 0x29b228,
+ 0x29b224
+};
+
+static const u16 pswwr2_prty4_bb_a0_attn_idx[20] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109,
+ 110, 111, 112, 113,
+};
+
+static struct attn_hw_reg pswwr2_prty4_bb_a0 = {
+ 3, 20, pswwr2_prty4_bb_a0_attn_idx, 0x29b230, 0x29b23c, 0x29b238,
+ 0x29b234
+};
+
+static struct attn_hw_reg *pswwr2_prty_bb_a0_regs[4] = {
+ &pswwr2_prty1_bb_a0, &pswwr2_prty2_bb_a0, &pswwr2_prty3_bb_a0,
+ &pswwr2_prty4_bb_a0,
+};
+
+static const u16 pswwr2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswwr2_prty0_bb_b0 = {
+ 0, 1, pswwr2_prty0_bb_b0_attn_idx, 0x29b190, 0x29b19c, 0x29b198,
+ 0x29b194
+};
+
+static const u16 pswwr2_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswwr2_prty1_bb_b0 = {
+ 1, 31, pswwr2_prty1_bb_b0_attn_idx, 0x29b200, 0x29b20c, 0x29b208,
+ 0x29b204
+};
+
+static const u16 pswwr2_prty2_bb_b0_attn_idx[31] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+};
+
+static struct attn_hw_reg pswwr2_prty2_bb_b0 = {
+ 2, 31, pswwr2_prty2_bb_b0_attn_idx, 0x29b210, 0x29b21c, 0x29b218,
+ 0x29b214
+};
+
+static const u16 pswwr2_prty3_bb_b0_attn_idx[31] = {
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+};
+
+static struct attn_hw_reg pswwr2_prty3_bb_b0 = {
+ 3, 31, pswwr2_prty3_bb_b0_attn_idx, 0x29b220, 0x29b22c, 0x29b228,
+ 0x29b224
+};
+
+static const u16 pswwr2_prty4_bb_b0_attn_idx[20] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109,
+ 110, 111, 112, 113,
+};
+
+static struct attn_hw_reg pswwr2_prty4_bb_b0 = {
+ 4, 20, pswwr2_prty4_bb_b0_attn_idx, 0x29b230, 0x29b23c, 0x29b238,
+ 0x29b234
+};
+
+static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = {
+ &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0,
+ &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0,
+};
+
+static const u16 pswwr2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswwr2_prty0_k2 = {
+ 0, 1, pswwr2_prty0_k2_attn_idx, 0x29b190, 0x29b19c, 0x29b198, 0x29b194
+};
+
+static const u16 pswwr2_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswwr2_prty1_k2 = {
+ 1, 31, pswwr2_prty1_k2_attn_idx, 0x29b200, 0x29b20c, 0x29b208, 0x29b204
+};
+
+static const u16 pswwr2_prty2_k2_attn_idx[31] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+};
+
+static struct attn_hw_reg pswwr2_prty2_k2 = {
+ 2, 31, pswwr2_prty2_k2_attn_idx, 0x29b210, 0x29b21c, 0x29b218, 0x29b214
+};
+
+static const u16 pswwr2_prty3_k2_attn_idx[31] = {
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+};
+
+static struct attn_hw_reg pswwr2_prty3_k2 = {
+ 3, 31, pswwr2_prty3_k2_attn_idx, 0x29b220, 0x29b22c, 0x29b228, 0x29b224
+};
+
+static const u16 pswwr2_prty4_k2_attn_idx[20] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109,
+ 110, 111, 112, 113,
+};
+
+static struct attn_hw_reg pswwr2_prty4_k2 = {
+ 4, 20, pswwr2_prty4_k2_attn_idx, 0x29b230, 0x29b23c, 0x29b238, 0x29b234
+};
+
+static struct attn_hw_reg *pswwr2_prty_k2_regs[5] = {
+ &pswwr2_prty0_k2, &pswwr2_prty1_k2, &pswwr2_prty2_k2, &pswwr2_prty3_k2,
+ &pswwr2_prty4_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq_int_attn_desc[21] = {
+ "pswrq_address_error",
+ "pswrq_pbf_fifo_overflow",
+ "pswrq_src_fifo_overflow",
+ "pswrq_qm_fifo_overflow",
+ "pswrq_tm_fifo_overflow",
+ "pswrq_usdm_fifo_overflow",
+ "pswrq_m2p_fifo_overflow",
+ "pswrq_xsdm_fifo_overflow",
+ "pswrq_tsdm_fifo_overflow",
+ "pswrq_ptu_fifo_overflow",
+ "pswrq_cduwr_fifo_overflow",
+ "pswrq_cdurd_fifo_overflow",
+ "pswrq_dmae_fifo_overflow",
+ "pswrq_hc_fifo_overflow",
+ "pswrq_dbg_fifo_overflow",
+ "pswrq_msdm_fifo_overflow",
+ "pswrq_ysdm_fifo_overflow",
+ "pswrq_psdm_fifo_overflow",
+ "pswrq_prm_fifo_overflow",
+ "pswrq_muld_fifo_overflow",
+ "pswrq_xyld_fifo_overflow",
+};
+#else
+#define pswrq_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq_int0_bb_a0_attn_idx[21] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+};
+
+static struct attn_hw_reg pswrq_int0_bb_a0 = {
+ 0, 21, pswrq_int0_bb_a0_attn_idx, 0x280180, 0x28018c, 0x280188,
+ 0x280184
+};
+
+static struct attn_hw_reg *pswrq_int_bb_a0_regs[1] = {
+ &pswrq_int0_bb_a0,
+};
+
+static const u16 pswrq_int0_bb_b0_attn_idx[21] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+};
+
+static struct attn_hw_reg pswrq_int0_bb_b0 = {
+ 0, 21, pswrq_int0_bb_b0_attn_idx, 0x280180, 0x28018c, 0x280188,
+ 0x280184
+};
+
+static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = {
+ &pswrq_int0_bb_b0,
+};
+
+static const u16 pswrq_int0_k2_attn_idx[21] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+};
+
+static struct attn_hw_reg pswrq_int0_k2 = {
+ 0, 21, pswrq_int0_k2_attn_idx, 0x280180, 0x28018c, 0x280188, 0x280184
+};
+
+static struct attn_hw_reg *pswrq_int_k2_regs[1] = {
+ &pswrq_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq_prty_attn_desc[1] = {
+ "pswrq_pxp_busip_parity",
+};
+#else
+#define pswrq_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrq_prty0_bb_b0 = {
+ 0, 1, pswrq_prty0_bb_b0_attn_idx, 0x280190, 0x28019c, 0x280198,
+ 0x280194
+};
+
+static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = {
+ &pswrq_prty0_bb_b0,
+};
+
+static const u16 pswrq_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrq_prty0_k2 = {
+ 0, 1, pswrq_prty0_k2_attn_idx, 0x280190, 0x28019c, 0x280198, 0x280194
+};
+
+static struct attn_hw_reg *pswrq_prty_k2_regs[1] = {
+ &pswrq_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq2_int_attn_desc[15] = {
+ "pswrq2_address_error",
+ "pswrq2_l2p_fifo_overflow",
+ "pswrq2_wdfifo_overflow",
+ "pswrq2_phyaddr_fifo_of",
+ "pswrq2_l2p_violation_1",
+ "pswrq2_l2p_violation_2",
+ "pswrq2_free_list_empty",
+ "pswrq2_elt_addr",
+ "pswrq2_l2p_vf_err",
+ "pswrq2_core_wdone_overflow",
+ "pswrq2_treq_fifo_underflow",
+ "pswrq2_treq_fifo_overflow",
+ "pswrq2_icpl_fifo_underflow",
+ "pswrq2_icpl_fifo_overflow",
+ "pswrq2_back2back_atc_response",
+};
+#else
+#define pswrq2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq2_int0_bb_a0_attn_idx[15] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg pswrq2_int0_bb_a0 = {
+ 0, 15, pswrq2_int0_bb_a0_attn_idx, 0x240180, 0x24018c, 0x240188,
+ 0x240184
+};
+
+static struct attn_hw_reg *pswrq2_int_bb_a0_regs[1] = {
+ &pswrq2_int0_bb_a0,
+};
+
+static const u16 pswrq2_int0_bb_b0_attn_idx[15] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg pswrq2_int0_bb_b0 = {
+ 0, 15, pswrq2_int0_bb_b0_attn_idx, 0x240180, 0x24018c, 0x240188,
+ 0x240184
+};
+
+static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = {
+ &pswrq2_int0_bb_b0,
+};
+
+static const u16 pswrq2_int0_k2_attn_idx[15] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg pswrq2_int0_k2 = {
+ 0, 15, pswrq2_int0_k2_attn_idx, 0x240180, 0x24018c, 0x240188, 0x240184
+};
+
+static struct attn_hw_reg *pswrq2_int_k2_regs[1] = {
+ &pswrq2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq2_prty_attn_desc[11] = {
+ "pswrq2_mem004_i_ecc_rf_int",
+ "pswrq2_mem005_i_ecc_rf_int",
+ "pswrq2_mem001_i_ecc_rf_int",
+ "pswrq2_mem006_i_mem_prty",
+ "pswrq2_mem008_i_mem_prty",
+ "pswrq2_mem009_i_mem_prty",
+ "pswrq2_mem003_i_mem_prty",
+ "pswrq2_mem002_i_mem_prty",
+ "pswrq2_mem010_i_mem_prty",
+ "pswrq2_mem007_i_mem_prty",
+ "pswrq2_mem005_i_mem_prty",
+};
+#else
+#define pswrq2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq2_prty1_bb_a0_attn_idx[9] = {
+ 0, 2, 3, 4, 5, 6, 7, 9, 10,
+};
+
+static struct attn_hw_reg pswrq2_prty1_bb_a0 = {
+ 0, 9, pswrq2_prty1_bb_a0_attn_idx, 0x240200, 0x24020c, 0x240208,
+ 0x240204
+};
+
+static struct attn_hw_reg *pswrq2_prty_bb_a0_regs[1] = {
+ &pswrq2_prty1_bb_a0,
+};
+
+static const u16 pswrq2_prty1_bb_b0_attn_idx[9] = {
+ 0, 2, 3, 4, 5, 6, 7, 9, 10,
+};
+
+static struct attn_hw_reg pswrq2_prty1_bb_b0 = {
+ 0, 9, pswrq2_prty1_bb_b0_attn_idx, 0x240200, 0x24020c, 0x240208,
+ 0x240204
+};
+
+static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = {
+ &pswrq2_prty1_bb_b0,
+};
+
+static const u16 pswrq2_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg pswrq2_prty1_k2 = {
+ 0, 10, pswrq2_prty1_k2_attn_idx, 0x240200, 0x24020c, 0x240208, 0x240204
+};
+
+static struct attn_hw_reg *pswrq2_prty_k2_regs[1] = {
+ &pswrq2_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pglcs_int_attn_desc[2] = {
+ "pglcs_address_error",
+ "pglcs_rasdp_error",
+};
+#else
+#define pglcs_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pglcs_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pglcs_int0_bb_a0 = {
+ 0, 1, pglcs_int0_bb_a0_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04
+};
+
+static struct attn_hw_reg *pglcs_int_bb_a0_regs[1] = {
+ &pglcs_int0_bb_a0,
+};
+
+static const u16 pglcs_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pglcs_int0_bb_b0 = {
+ 0, 1, pglcs_int0_bb_b0_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04
+};
+
+static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = {
+ &pglcs_int0_bb_b0,
+};
+
+static const u16 pglcs_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg pglcs_int0_k2 = {
+ 0, 2, pglcs_int0_k2_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04
+};
+
+static struct attn_hw_reg *pglcs_int_k2_regs[1] = {
+ &pglcs_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dmae_int_attn_desc[2] = {
+ "dmae_address_error",
+ "dmae_pci_rd_buf_err",
+};
+#else
+#define dmae_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 dmae_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg dmae_int0_bb_a0 = {
+ 0, 2, dmae_int0_bb_a0_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184
+};
+
+static struct attn_hw_reg *dmae_int_bb_a0_regs[1] = {
+ &dmae_int0_bb_a0,
+};
+
+static const u16 dmae_int0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg dmae_int0_bb_b0 = {
+ 0, 2, dmae_int0_bb_b0_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184
+};
+
+static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = {
+ &dmae_int0_bb_b0,
+};
+
+static const u16 dmae_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg dmae_int0_k2 = {
+ 0, 2, dmae_int0_k2_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184
+};
+
+static struct attn_hw_reg *dmae_int_k2_regs[1] = {
+ &dmae_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dmae_prty_attn_desc[3] = {
+ "dmae_mem002_i_mem_prty",
+ "dmae_mem001_i_mem_prty",
+ "dmae_mem003_i_mem_prty",
+};
+#else
+#define dmae_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 dmae_prty1_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg dmae_prty1_bb_a0 = {
+ 0, 3, dmae_prty1_bb_a0_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204
+};
+
+static struct attn_hw_reg *dmae_prty_bb_a0_regs[1] = {
+ &dmae_prty1_bb_a0,
+};
+
+static const u16 dmae_prty1_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg dmae_prty1_bb_b0 = {
+ 0, 3, dmae_prty1_bb_b0_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204
+};
+
+static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = {
+ &dmae_prty1_bb_b0,
+};
+
+static const u16 dmae_prty1_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg dmae_prty1_k2 = {
+ 0, 3, dmae_prty1_k2_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204
+};
+
+static struct attn_hw_reg *dmae_prty_k2_regs[1] = {
+ &dmae_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ptu_int_attn_desc[8] = {
+ "ptu_address_error",
+ "ptu_atc_tcpl_to_not_pend",
+ "ptu_atc_gpa_multiple_hits",
+ "ptu_atc_rcpl_to_empty_cnt",
+ "ptu_atc_tcpl_error",
+ "ptu_atc_inv_halt",
+ "ptu_atc_reuse_transpend",
+ "ptu_atc_ireq_less_than_stu",
+};
+#else
+#define ptu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ptu_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg ptu_int0_bb_a0 = {
+ 0, 8, ptu_int0_bb_a0_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184
+};
+
+static struct attn_hw_reg *ptu_int_bb_a0_regs[1] = {
+ &ptu_int0_bb_a0,
+};
+
+static const u16 ptu_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg ptu_int0_bb_b0 = {
+ 0, 8, ptu_int0_bb_b0_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184
+};
+
+static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = {
+ &ptu_int0_bb_b0,
+};
+
+static const u16 ptu_int0_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg ptu_int0_k2 = {
+ 0, 8, ptu_int0_k2_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184
+};
+
+static struct attn_hw_reg *ptu_int_k2_regs[1] = {
+ &ptu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ptu_prty_attn_desc[18] = {
+ "ptu_mem017_i_ecc_rf_int",
+ "ptu_mem018_i_mem_prty",
+ "ptu_mem006_i_mem_prty",
+ "ptu_mem001_i_mem_prty",
+ "ptu_mem002_i_mem_prty",
+ "ptu_mem003_i_mem_prty",
+ "ptu_mem004_i_mem_prty",
+ "ptu_mem005_i_mem_prty",
+ "ptu_mem009_i_mem_prty",
+ "ptu_mem010_i_mem_prty",
+ "ptu_mem016_i_mem_prty",
+ "ptu_mem007_i_mem_prty",
+ "ptu_mem015_i_mem_prty",
+ "ptu_mem013_i_mem_prty",
+ "ptu_mem012_i_mem_prty",
+ "ptu_mem014_i_mem_prty",
+ "ptu_mem011_i_mem_prty",
+ "ptu_mem008_i_mem_prty",
+};
+#else
+#define ptu_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ptu_prty1_bb_a0_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg ptu_prty1_bb_a0 = {
+ 0, 18, ptu_prty1_bb_a0_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204
+};
+
+static struct attn_hw_reg *ptu_prty_bb_a0_regs[1] = {
+ &ptu_prty1_bb_a0,
+};
+
+static const u16 ptu_prty1_bb_b0_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg ptu_prty1_bb_b0 = {
+ 0, 18, ptu_prty1_bb_b0_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204
+};
+
+static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = {
+ &ptu_prty1_bb_b0,
+};
+
+static const u16 ptu_prty1_k2_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg ptu_prty1_k2 = {
+ 0, 18, ptu_prty1_k2_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204
+};
+
+static struct attn_hw_reg *ptu_prty_k2_regs[1] = {
+ &ptu_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcm_int_attn_desc[41] = {
+ "tcm_address_error",
+ "tcm_is_storm_ovfl_err",
+ "tcm_is_storm_under_err",
+ "tcm_is_tsdm_ovfl_err",
+ "tcm_is_tsdm_under_err",
+ "tcm_is_msem_ovfl_err",
+ "tcm_is_msem_under_err",
+ "tcm_is_ysem_ovfl_err",
+ "tcm_is_ysem_under_err",
+ "tcm_is_dorq_ovfl_err",
+ "tcm_is_dorq_under_err",
+ "tcm_is_pbf_ovfl_err",
+ "tcm_is_pbf_under_err",
+ "tcm_is_prs_ovfl_err",
+ "tcm_is_prs_under_err",
+ "tcm_is_tm_ovfl_err",
+ "tcm_is_tm_under_err",
+ "tcm_is_qm_p_ovfl_err",
+ "tcm_is_qm_p_under_err",
+ "tcm_is_qm_s_ovfl_err",
+ "tcm_is_qm_s_under_err",
+ "tcm_is_grc_ovfl_err0",
+ "tcm_is_grc_under_err0",
+ "tcm_is_grc_ovfl_err1",
+ "tcm_is_grc_under_err1",
+ "tcm_is_grc_ovfl_err2",
+ "tcm_is_grc_under_err2",
+ "tcm_is_grc_ovfl_err3",
+ "tcm_is_grc_under_err3",
+ "tcm_in_prcs_tbl_ovfl",
+ "tcm_agg_con_data_buf_ovfl",
+ "tcm_agg_con_cmd_buf_ovfl",
+ "tcm_sm_con_data_buf_ovfl",
+ "tcm_sm_con_cmd_buf_ovfl",
+ "tcm_agg_task_data_buf_ovfl",
+ "tcm_agg_task_cmd_buf_ovfl",
+ "tcm_sm_task_data_buf_ovfl",
+ "tcm_sm_task_cmd_buf_ovfl",
+ "tcm_fi_desc_input_violate",
+ "tcm_se_desc_input_violate",
+ "tcm_qmreg_more4",
+};
+#else
+#define tcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcm_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tcm_int0_bb_a0 = {
+ 0, 8, tcm_int0_bb_a0_attn_idx, 0x1180180, 0x118018c, 0x1180188,
+ 0x1180184
+};
+
+static const u16 tcm_int1_bb_a0_attn_idx[32] = {
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_int1_bb_a0 = {
+ 1, 32, tcm_int1_bb_a0_attn_idx, 0x1180190, 0x118019c, 0x1180198,
+ 0x1180194
+};
+
+static const u16 tcm_int2_bb_a0_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg tcm_int2_bb_a0 = {
+ 2, 1, tcm_int2_bb_a0_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8,
+ 0x11801a4
+};
+
+static struct attn_hw_reg *tcm_int_bb_a0_regs[3] = {
+ &tcm_int0_bb_a0, &tcm_int1_bb_a0, &tcm_int2_bb_a0,
+};
+
+static const u16 tcm_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tcm_int0_bb_b0 = {
+ 0, 8, tcm_int0_bb_b0_attn_idx, 0x1180180, 0x118018c, 0x1180188,
+ 0x1180184
+};
+
+static const u16 tcm_int1_bb_b0_attn_idx[32] = {
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_int1_bb_b0 = {
+ 1, 32, tcm_int1_bb_b0_attn_idx, 0x1180190, 0x118019c, 0x1180198,
+ 0x1180194
+};
+
+static const u16 tcm_int2_bb_b0_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg tcm_int2_bb_b0 = {
+ 2, 1, tcm_int2_bb_b0_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8,
+ 0x11801a4
+};
+
+static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = {
+ &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0,
+};
+
+static const u16 tcm_int0_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tcm_int0_k2 = {
+ 0, 8, tcm_int0_k2_attn_idx, 0x1180180, 0x118018c, 0x1180188, 0x1180184
+};
+
+static const u16 tcm_int1_k2_attn_idx[32] = {
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_int1_k2 = {
+ 1, 32, tcm_int1_k2_attn_idx, 0x1180190, 0x118019c, 0x1180198, 0x1180194
+};
+
+static const u16 tcm_int2_k2_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg tcm_int2_k2 = {
+ 2, 1, tcm_int2_k2_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4
+};
+
+static struct attn_hw_reg *tcm_int_k2_regs[3] = {
+ &tcm_int0_k2, &tcm_int1_k2, &tcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcm_prty_attn_desc[51] = {
+ "tcm_mem026_i_ecc_rf_int",
+ "tcm_mem003_i_ecc_0_rf_int",
+ "tcm_mem003_i_ecc_1_rf_int",
+ "tcm_mem022_i_ecc_0_rf_int",
+ "tcm_mem022_i_ecc_1_rf_int",
+ "tcm_mem005_i_ecc_0_rf_int",
+ "tcm_mem005_i_ecc_1_rf_int",
+ "tcm_mem024_i_ecc_0_rf_int",
+ "tcm_mem024_i_ecc_1_rf_int",
+ "tcm_mem018_i_mem_prty",
+ "tcm_mem019_i_mem_prty",
+ "tcm_mem015_i_mem_prty",
+ "tcm_mem016_i_mem_prty",
+ "tcm_mem017_i_mem_prty",
+ "tcm_mem010_i_mem_prty",
+ "tcm_mem020_i_mem_prty",
+ "tcm_mem011_i_mem_prty",
+ "tcm_mem012_i_mem_prty",
+ "tcm_mem013_i_mem_prty",
+ "tcm_mem014_i_mem_prty",
+ "tcm_mem029_i_mem_prty",
+ "tcm_mem028_i_mem_prty",
+ "tcm_mem027_i_mem_prty",
+ "tcm_mem004_i_mem_prty",
+ "tcm_mem023_i_mem_prty",
+ "tcm_mem006_i_mem_prty",
+ "tcm_mem025_i_mem_prty",
+ "tcm_mem021_i_mem_prty",
+ "tcm_mem007_i_mem_prty_0",
+ "tcm_mem007_i_mem_prty_1",
+ "tcm_mem008_i_mem_prty",
+ "tcm_mem025_i_ecc_rf_int",
+ "tcm_mem021_i_ecc_0_rf_int",
+ "tcm_mem021_i_ecc_1_rf_int",
+ "tcm_mem023_i_ecc_0_rf_int",
+ "tcm_mem023_i_ecc_1_rf_int",
+ "tcm_mem026_i_mem_prty",
+ "tcm_mem022_i_mem_prty",
+ "tcm_mem024_i_mem_prty",
+ "tcm_mem009_i_mem_prty",
+ "tcm_mem024_i_ecc_rf_int",
+ "tcm_mem001_i_ecc_0_rf_int",
+ "tcm_mem001_i_ecc_1_rf_int",
+ "tcm_mem019_i_ecc_0_rf_int",
+ "tcm_mem019_i_ecc_1_rf_int",
+ "tcm_mem022_i_ecc_rf_int",
+ "tcm_mem002_i_mem_prty",
+ "tcm_mem005_i_mem_prty_0",
+ "tcm_mem005_i_mem_prty_1",
+ "tcm_mem001_i_mem_prty",
+ "tcm_mem007_i_mem_prty",
+};
+#else
+#define tcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcm_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 22, 23, 24, 25, 26, 30, 32,
+ 33, 36, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg tcm_prty1_bb_a0 = {
+ 0, 31, tcm_prty1_bb_a0_attn_idx, 0x1180200, 0x118020c, 0x1180208,
+ 0x1180204
+};
+
+static const u16 tcm_prty2_bb_a0_attn_idx[3] = {
+ 50, 21, 20,
+};
+
+static struct attn_hw_reg tcm_prty2_bb_a0 = {
+ 1, 3, tcm_prty2_bb_a0_attn_idx, 0x1180210, 0x118021c, 0x1180218,
+ 0x1180214
+};
+
+static struct attn_hw_reg *tcm_prty_bb_a0_regs[2] = {
+ &tcm_prty1_bb_a0, &tcm_prty2_bb_a0,
+};
+
+static const u16 tcm_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25,
+ 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_prty1_bb_b0 = {
+ 0, 31, tcm_prty1_bb_b0_attn_idx, 0x1180200, 0x118020c, 0x1180208,
+ 0x1180204
+};
+
+static const u16 tcm_prty2_bb_b0_attn_idx[2] = {
+ 49, 46,
+};
+
+static struct attn_hw_reg tcm_prty2_bb_b0 = {
+ 1, 2, tcm_prty2_bb_b0_attn_idx, 0x1180210, 0x118021c, 0x1180218,
+ 0x1180214
+};
+
+static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = {
+ &tcm_prty1_bb_b0, &tcm_prty2_bb_b0,
+};
+
+static const u16 tcm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg tcm_prty1_k2 = {
+ 0, 31, tcm_prty1_k2_attn_idx, 0x1180200, 0x118020c, 0x1180208,
+ 0x1180204
+};
+
+static const u16 tcm_prty2_k2_attn_idx[3] = {
+ 39, 49, 46,
+};
+
+static struct attn_hw_reg tcm_prty2_k2 = {
+ 1, 3, tcm_prty2_k2_attn_idx, 0x1180210, 0x118021c, 0x1180218, 0x1180214
+};
+
+static struct attn_hw_reg *tcm_prty_k2_regs[2] = {
+ &tcm_prty1_k2, &tcm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *mcm_int_attn_desc[41] = {
+ "mcm_address_error",
+ "mcm_is_storm_ovfl_err",
+ "mcm_is_storm_under_err",
+ "mcm_is_msdm_ovfl_err",
+ "mcm_is_msdm_under_err",
+ "mcm_is_ysdm_ovfl_err",
+ "mcm_is_ysdm_under_err",
+ "mcm_is_usdm_ovfl_err",
+ "mcm_is_usdm_under_err",
+ "mcm_is_tmld_ovfl_err",
+ "mcm_is_tmld_under_err",
+ "mcm_is_usem_ovfl_err",
+ "mcm_is_usem_under_err",
+ "mcm_is_ysem_ovfl_err",
+ "mcm_is_ysem_under_err",
+ "mcm_is_pbf_ovfl_err",
+ "mcm_is_pbf_under_err",
+ "mcm_is_qm_p_ovfl_err",
+ "mcm_is_qm_p_under_err",
+ "mcm_is_qm_s_ovfl_err",
+ "mcm_is_qm_s_under_err",
+ "mcm_is_grc_ovfl_err0",
+ "mcm_is_grc_under_err0",
+ "mcm_is_grc_ovfl_err1",
+ "mcm_is_grc_under_err1",
+ "mcm_is_grc_ovfl_err2",
+ "mcm_is_grc_under_err2",
+ "mcm_is_grc_ovfl_err3",
+ "mcm_is_grc_under_err3",
+ "mcm_in_prcs_tbl_ovfl",
+ "mcm_agg_con_data_buf_ovfl",
+ "mcm_agg_con_cmd_buf_ovfl",
+ "mcm_sm_con_data_buf_ovfl",
+ "mcm_sm_con_cmd_buf_ovfl",
+ "mcm_agg_task_data_buf_ovfl",
+ "mcm_agg_task_cmd_buf_ovfl",
+ "mcm_sm_task_data_buf_ovfl",
+ "mcm_sm_task_cmd_buf_ovfl",
+ "mcm_fi_desc_input_violate",
+ "mcm_se_desc_input_violate",
+ "mcm_qmreg_more4",
+};
+#else
+#define mcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 mcm_int0_bb_a0_attn_idx[14] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg mcm_int0_bb_a0 = {
+ 0, 14, mcm_int0_bb_a0_attn_idx, 0x1200180, 0x120018c, 0x1200188,
+ 0x1200184
+};
+
+static const u16 mcm_int1_bb_a0_attn_idx[26] = {
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg mcm_int1_bb_a0 = {
+ 1, 26, mcm_int1_bb_a0_attn_idx, 0x1200190, 0x120019c, 0x1200198,
+ 0x1200194
+};
+
+static const u16 mcm_int2_bb_a0_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg mcm_int2_bb_a0 = {
+ 2, 1, mcm_int2_bb_a0_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8,
+ 0x12001a4
+};
+
+static struct attn_hw_reg *mcm_int_bb_a0_regs[3] = {
+ &mcm_int0_bb_a0, &mcm_int1_bb_a0, &mcm_int2_bb_a0,
+};
+
+static const u16 mcm_int0_bb_b0_attn_idx[14] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg mcm_int0_bb_b0 = {
+ 0, 14, mcm_int0_bb_b0_attn_idx, 0x1200180, 0x120018c, 0x1200188,
+ 0x1200184
+};
+
+static const u16 mcm_int1_bb_b0_attn_idx[26] = {
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg mcm_int1_bb_b0 = {
+ 1, 26, mcm_int1_bb_b0_attn_idx, 0x1200190, 0x120019c, 0x1200198,
+ 0x1200194
+};
+
+static const u16 mcm_int2_bb_b0_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg mcm_int2_bb_b0 = {
+ 2, 1, mcm_int2_bb_b0_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8,
+ 0x12001a4
+};
+
+static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = {
+ &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0,
+};
+
+static const u16 mcm_int0_k2_attn_idx[14] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg mcm_int0_k2 = {
+ 0, 14, mcm_int0_k2_attn_idx, 0x1200180, 0x120018c, 0x1200188, 0x1200184
+};
+
+static const u16 mcm_int1_k2_attn_idx[26] = {
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg mcm_int1_k2 = {
+ 1, 26, mcm_int1_k2_attn_idx, 0x1200190, 0x120019c, 0x1200198, 0x1200194
+};
+
+static const u16 mcm_int2_k2_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg mcm_int2_k2 = {
+ 2, 1, mcm_int2_k2_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4
+};
+
+static struct attn_hw_reg *mcm_int_k2_regs[3] = {
+ &mcm_int0_k2, &mcm_int1_k2, &mcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *mcm_prty_attn_desc[46] = {
+ "mcm_mem028_i_ecc_rf_int",
+ "mcm_mem003_i_ecc_rf_int",
+ "mcm_mem023_i_ecc_0_rf_int",
+ "mcm_mem023_i_ecc_1_rf_int",
+ "mcm_mem005_i_ecc_0_rf_int",
+ "mcm_mem005_i_ecc_1_rf_int",
+ "mcm_mem025_i_ecc_0_rf_int",
+ "mcm_mem025_i_ecc_1_rf_int",
+ "mcm_mem026_i_ecc_rf_int",
+ "mcm_mem017_i_mem_prty",
+ "mcm_mem019_i_mem_prty",
+ "mcm_mem016_i_mem_prty",
+ "mcm_mem015_i_mem_prty",
+ "mcm_mem020_i_mem_prty",
+ "mcm_mem021_i_mem_prty",
+ "mcm_mem018_i_mem_prty",
+ "mcm_mem011_i_mem_prty",
+ "mcm_mem012_i_mem_prty",
+ "mcm_mem013_i_mem_prty",
+ "mcm_mem014_i_mem_prty",
+ "mcm_mem031_i_mem_prty",
+ "mcm_mem030_i_mem_prty",
+ "mcm_mem029_i_mem_prty",
+ "mcm_mem004_i_mem_prty",
+ "mcm_mem024_i_mem_prty",
+ "mcm_mem006_i_mem_prty",
+ "mcm_mem027_i_mem_prty",
+ "mcm_mem022_i_mem_prty",
+ "mcm_mem007_i_mem_prty_0",
+ "mcm_mem007_i_mem_prty_1",
+ "mcm_mem008_i_mem_prty",
+ "mcm_mem001_i_ecc_rf_int",
+ "mcm_mem021_i_ecc_0_rf_int",
+ "mcm_mem021_i_ecc_1_rf_int",
+ "mcm_mem003_i_ecc_0_rf_int",
+ "mcm_mem003_i_ecc_1_rf_int",
+ "mcm_mem024_i_ecc_rf_int",
+ "mcm_mem009_i_mem_prty",
+ "mcm_mem010_i_mem_prty",
+ "mcm_mem028_i_mem_prty",
+ "mcm_mem002_i_mem_prty",
+ "mcm_mem025_i_mem_prty",
+ "mcm_mem005_i_mem_prty_0",
+ "mcm_mem005_i_mem_prty_1",
+ "mcm_mem001_i_mem_prty",
+ "mcm_mem007_i_mem_prty",
+};
+#else
+#define mcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 mcm_prty1_bb_a0_attn_idx[31] = {
+ 2, 3, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 22, 23, 25, 26, 27, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg mcm_prty1_bb_a0 = {
+ 0, 31, mcm_prty1_bb_a0_attn_idx, 0x1200200, 0x120020c, 0x1200208,
+ 0x1200204
+};
+
+static const u16 mcm_prty2_bb_a0_attn_idx[4] = {
+ 45, 30, 21, 20,
+};
+
+static struct attn_hw_reg mcm_prty2_bb_a0 = {
+ 1, 4, mcm_prty2_bb_a0_attn_idx, 0x1200210, 0x120021c, 0x1200218,
+ 0x1200214
+};
+
+static struct attn_hw_reg *mcm_prty_bb_a0_regs[2] = {
+ &mcm_prty1_bb_a0, &mcm_prty2_bb_a0,
+};
+
+static const u16 mcm_prty1_bb_b0_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg mcm_prty1_bb_b0 = {
+ 0, 31, mcm_prty1_bb_b0_attn_idx, 0x1200200, 0x120020c, 0x1200208,
+ 0x1200204
+};
+
+static const u16 mcm_prty2_bb_b0_attn_idx[4] = {
+ 37, 38, 44, 40,
+};
+
+static struct attn_hw_reg mcm_prty2_bb_b0 = {
+ 1, 4, mcm_prty2_bb_b0_attn_idx, 0x1200210, 0x120021c, 0x1200218,
+ 0x1200214
+};
+
+static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = {
+ &mcm_prty1_bb_b0, &mcm_prty2_bb_b0,
+};
+
+static const u16 mcm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg mcm_prty1_k2 = {
+ 0, 31, mcm_prty1_k2_attn_idx, 0x1200200, 0x120020c, 0x1200208,
+ 0x1200204
+};
+
+static const u16 mcm_prty2_k2_attn_idx[4] = {
+ 37, 38, 44, 40,
+};
+
+static struct attn_hw_reg mcm_prty2_k2 = {
+ 1, 4, mcm_prty2_k2_attn_idx, 0x1200210, 0x120021c, 0x1200218, 0x1200214
+};
+
+static struct attn_hw_reg *mcm_prty_k2_regs[2] = {
+ &mcm_prty1_k2, &mcm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ucm_int_attn_desc[47] = {
+ "ucm_address_error",
+ "ucm_is_storm_ovfl_err",
+ "ucm_is_storm_under_err",
+ "ucm_is_xsdm_ovfl_err",
+ "ucm_is_xsdm_under_err",
+ "ucm_is_ysdm_ovfl_err",
+ "ucm_is_ysdm_under_err",
+ "ucm_is_usdm_ovfl_err",
+ "ucm_is_usdm_under_err",
+ "ucm_is_rdif_ovfl_err",
+ "ucm_is_rdif_under_err",
+ "ucm_is_tdif_ovfl_err",
+ "ucm_is_tdif_under_err",
+ "ucm_is_muld_ovfl_err",
+ "ucm_is_muld_under_err",
+ "ucm_is_yuld_ovfl_err",
+ "ucm_is_yuld_under_err",
+ "ucm_is_dorq_ovfl_err",
+ "ucm_is_dorq_under_err",
+ "ucm_is_pbf_ovfl_err",
+ "ucm_is_pbf_under_err",
+ "ucm_is_tm_ovfl_err",
+ "ucm_is_tm_under_err",
+ "ucm_is_qm_p_ovfl_err",
+ "ucm_is_qm_p_under_err",
+ "ucm_is_qm_s_ovfl_err",
+ "ucm_is_qm_s_under_err",
+ "ucm_is_grc_ovfl_err0",
+ "ucm_is_grc_under_err0",
+ "ucm_is_grc_ovfl_err1",
+ "ucm_is_grc_under_err1",
+ "ucm_is_grc_ovfl_err2",
+ "ucm_is_grc_under_err2",
+ "ucm_is_grc_ovfl_err3",
+ "ucm_is_grc_under_err3",
+ "ucm_in_prcs_tbl_ovfl",
+ "ucm_agg_con_data_buf_ovfl",
+ "ucm_agg_con_cmd_buf_ovfl",
+ "ucm_sm_con_data_buf_ovfl",
+ "ucm_sm_con_cmd_buf_ovfl",
+ "ucm_agg_task_data_buf_ovfl",
+ "ucm_agg_task_cmd_buf_ovfl",
+ "ucm_sm_task_data_buf_ovfl",
+ "ucm_sm_task_cmd_buf_ovfl",
+ "ucm_fi_desc_input_violate",
+ "ucm_se_desc_input_violate",
+ "ucm_qmreg_more4",
+};
+#else
+#define ucm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ucm_int0_bb_a0_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ucm_int0_bb_a0 = {
+ 0, 17, ucm_int0_bb_a0_attn_idx, 0x1280180, 0x128018c, 0x1280188,
+ 0x1280184
+};
+
+static const u16 ucm_int1_bb_a0_attn_idx[29] = {
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg ucm_int1_bb_a0 = {
+ 1, 29, ucm_int1_bb_a0_attn_idx, 0x1280190, 0x128019c, 0x1280198,
+ 0x1280194
+};
+
+static const u16 ucm_int2_bb_a0_attn_idx[1] = {
+ 46,
+};
+
+static struct attn_hw_reg ucm_int2_bb_a0 = {
+ 2, 1, ucm_int2_bb_a0_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8,
+ 0x12801a4
+};
+
+static struct attn_hw_reg *ucm_int_bb_a0_regs[3] = {
+ &ucm_int0_bb_a0, &ucm_int1_bb_a0, &ucm_int2_bb_a0,
+};
+
+static const u16 ucm_int0_bb_b0_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ucm_int0_bb_b0 = {
+ 0, 17, ucm_int0_bb_b0_attn_idx, 0x1280180, 0x128018c, 0x1280188,
+ 0x1280184
+};
+
+static const u16 ucm_int1_bb_b0_attn_idx[29] = {
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg ucm_int1_bb_b0 = {
+ 1, 29, ucm_int1_bb_b0_attn_idx, 0x1280190, 0x128019c, 0x1280198,
+ 0x1280194
+};
+
+static const u16 ucm_int2_bb_b0_attn_idx[1] = {
+ 46,
+};
+
+static struct attn_hw_reg ucm_int2_bb_b0 = {
+ 2, 1, ucm_int2_bb_b0_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8,
+ 0x12801a4
+};
+
+static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = {
+ &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0,
+};
+
+static const u16 ucm_int0_k2_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ucm_int0_k2 = {
+ 0, 17, ucm_int0_k2_attn_idx, 0x1280180, 0x128018c, 0x1280188, 0x1280184
+};
+
+static const u16 ucm_int1_k2_attn_idx[29] = {
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg ucm_int1_k2 = {
+ 1, 29, ucm_int1_k2_attn_idx, 0x1280190, 0x128019c, 0x1280198, 0x1280194
+};
+
+static const u16 ucm_int2_k2_attn_idx[1] = {
+ 46,
+};
+
+static struct attn_hw_reg ucm_int2_k2 = {
+ 2, 1, ucm_int2_k2_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4
+};
+
+static struct attn_hw_reg *ucm_int_k2_regs[3] = {
+ &ucm_int0_k2, &ucm_int1_k2, &ucm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ucm_prty_attn_desc[54] = {
+ "ucm_mem030_i_ecc_rf_int",
+ "ucm_mem005_i_ecc_0_rf_int",
+ "ucm_mem005_i_ecc_1_rf_int",
+ "ucm_mem024_i_ecc_0_rf_int",
+ "ucm_mem024_i_ecc_1_rf_int",
+ "ucm_mem025_i_ecc_rf_int",
+ "ucm_mem007_i_ecc_0_rf_int",
+ "ucm_mem007_i_ecc_1_rf_int",
+ "ucm_mem008_i_ecc_rf_int",
+ "ucm_mem027_i_ecc_0_rf_int",
+ "ucm_mem027_i_ecc_1_rf_int",
+ "ucm_mem028_i_ecc_rf_int",
+ "ucm_mem020_i_mem_prty",
+ "ucm_mem021_i_mem_prty",
+ "ucm_mem019_i_mem_prty",
+ "ucm_mem013_i_mem_prty",
+ "ucm_mem018_i_mem_prty",
+ "ucm_mem022_i_mem_prty",
+ "ucm_mem014_i_mem_prty",
+ "ucm_mem015_i_mem_prty",
+ "ucm_mem016_i_mem_prty",
+ "ucm_mem017_i_mem_prty",
+ "ucm_mem033_i_mem_prty",
+ "ucm_mem032_i_mem_prty",
+ "ucm_mem031_i_mem_prty",
+ "ucm_mem006_i_mem_prty",
+ "ucm_mem026_i_mem_prty",
+ "ucm_mem009_i_mem_prty",
+ "ucm_mem029_i_mem_prty",
+ "ucm_mem023_i_mem_prty",
+ "ucm_mem010_i_mem_prty_0",
+ "ucm_mem003_i_ecc_0_rf_int",
+ "ucm_mem003_i_ecc_1_rf_int",
+ "ucm_mem022_i_ecc_0_rf_int",
+ "ucm_mem022_i_ecc_1_rf_int",
+ "ucm_mem023_i_ecc_rf_int",
+ "ucm_mem006_i_ecc_rf_int",
+ "ucm_mem025_i_ecc_0_rf_int",
+ "ucm_mem025_i_ecc_1_rf_int",
+ "ucm_mem026_i_ecc_rf_int",
+ "ucm_mem011_i_mem_prty",
+ "ucm_mem012_i_mem_prty",
+ "ucm_mem030_i_mem_prty",
+ "ucm_mem004_i_mem_prty",
+ "ucm_mem024_i_mem_prty",
+ "ucm_mem007_i_mem_prty",
+ "ucm_mem027_i_mem_prty",
+ "ucm_mem008_i_mem_prty_0",
+ "ucm_mem010_i_mem_prty_1",
+ "ucm_mem003_i_mem_prty",
+ "ucm_mem001_i_mem_prty",
+ "ucm_mem002_i_mem_prty",
+ "ucm_mem008_i_mem_prty_1",
+ "ucm_mem010_i_mem_prty",
+};
+#else
+#define ucm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ucm_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 24, 28, 31, 32, 33, 34,
+ 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+};
+
+static struct attn_hw_reg ucm_prty1_bb_a0 = {
+ 0, 31, ucm_prty1_bb_a0_attn_idx, 0x1280200, 0x128020c, 0x1280208,
+ 0x1280204
+};
+
+static const u16 ucm_prty2_bb_a0_attn_idx[7] = {
+ 50, 51, 52, 27, 53, 23, 22,
+};
+
+static struct attn_hw_reg ucm_prty2_bb_a0 = {
+ 1, 7, ucm_prty2_bb_a0_attn_idx, 0x1280210, 0x128021c, 0x1280218,
+ 0x1280214
+};
+
+static struct attn_hw_reg *ucm_prty_bb_a0_regs[2] = {
+ &ucm_prty1_bb_a0, &ucm_prty2_bb_a0,
+};
+
+static const u16 ucm_prty1_bb_b0_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg ucm_prty1_bb_b0 = {
+ 0, 31, ucm_prty1_bb_b0_attn_idx, 0x1280200, 0x128020c, 0x1280208,
+ 0x1280204
+};
+
+static const u16 ucm_prty2_bb_b0_attn_idx[7] = {
+ 48, 40, 41, 49, 43, 50, 51,
+};
+
+static struct attn_hw_reg ucm_prty2_bb_b0 = {
+ 1, 7, ucm_prty2_bb_b0_attn_idx, 0x1280210, 0x128021c, 0x1280218,
+ 0x1280214
+};
+
+static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = {
+ &ucm_prty1_bb_b0, &ucm_prty2_bb_b0,
+};
+
+static const u16 ucm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg ucm_prty1_k2 = {
+ 0, 31, ucm_prty1_k2_attn_idx, 0x1280200, 0x128020c, 0x1280208,
+ 0x1280204
+};
+
+static const u16 ucm_prty2_k2_attn_idx[7] = {
+ 48, 40, 41, 49, 43, 50, 51,
+};
+
+static struct attn_hw_reg ucm_prty2_k2 = {
+ 1, 7, ucm_prty2_k2_attn_idx, 0x1280210, 0x128021c, 0x1280218, 0x1280214
+};
+
+static struct attn_hw_reg *ucm_prty_k2_regs[2] = {
+ &ucm_prty1_k2, &ucm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xcm_int_attn_desc[49] = {
+ "xcm_address_error",
+ "xcm_is_storm_ovfl_err",
+ "xcm_is_storm_under_err",
+ "xcm_is_msdm_ovfl_err",
+ "xcm_is_msdm_under_err",
+ "xcm_is_xsdm_ovfl_err",
+ "xcm_is_xsdm_under_err",
+ "xcm_is_ysdm_ovfl_err",
+ "xcm_is_ysdm_under_err",
+ "xcm_is_usdm_ovfl_err",
+ "xcm_is_usdm_under_err",
+ "xcm_is_msem_ovfl_err",
+ "xcm_is_msem_under_err",
+ "xcm_is_usem_ovfl_err",
+ "xcm_is_usem_under_err",
+ "xcm_is_ysem_ovfl_err",
+ "xcm_is_ysem_under_err",
+ "xcm_is_dorq_ovfl_err",
+ "xcm_is_dorq_under_err",
+ "xcm_is_pbf_ovfl_err",
+ "xcm_is_pbf_under_err",
+ "xcm_is_tm_ovfl_err",
+ "xcm_is_tm_under_err",
+ "xcm_is_qm_p_ovfl_err",
+ "xcm_is_qm_p_under_err",
+ "xcm_is_qm_s_ovfl_err",
+ "xcm_is_qm_s_under_err",
+ "xcm_is_grc_ovfl_err0",
+ "xcm_is_grc_under_err0",
+ "xcm_is_grc_ovfl_err1",
+ "xcm_is_grc_under_err1",
+ "xcm_is_grc_ovfl_err2",
+ "xcm_is_grc_under_err2",
+ "xcm_is_grc_ovfl_err3",
+ "xcm_is_grc_under_err3",
+ "xcm_in_prcs_tbl_ovfl",
+ "xcm_agg_con_data_buf_ovfl",
+ "xcm_agg_con_cmd_buf_ovfl",
+ "xcm_sm_con_data_buf_ovfl",
+ "xcm_sm_con_cmd_buf_ovfl",
+ "xcm_fi_desc_input_violate",
+ "xcm_qm_act_st_cnt_msg_prcs_under",
+ "xcm_qm_act_st_cnt_msg_prcs_ovfl",
+ "xcm_qm_act_st_cnt_ext_ld_under",
+ "xcm_qm_act_st_cnt_ext_ld_ovfl",
+ "xcm_qm_act_st_cnt_rbc_under",
+ "xcm_qm_act_st_cnt_rbc_ovfl",
+ "xcm_qm_act_st_cnt_drop_under",
+ "xcm_qm_act_st_cnt_illeg_pqnum",
+};
+#else
+#define xcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xcm_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg xcm_int0_bb_a0 = {
+ 0, 16, xcm_int0_bb_a0_attn_idx, 0x1000180, 0x100018c, 0x1000188,
+ 0x1000184
+};
+
+static const u16 xcm_int1_bb_a0_attn_idx[25] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg xcm_int1_bb_a0 = {
+ 1, 25, xcm_int1_bb_a0_attn_idx, 0x1000190, 0x100019c, 0x1000198,
+ 0x1000194
+};
+
+static const u16 xcm_int2_bb_a0_attn_idx[8] = {
+ 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_int2_bb_a0 = {
+ 2, 8, xcm_int2_bb_a0_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8,
+ 0x10001a4
+};
+
+static struct attn_hw_reg *xcm_int_bb_a0_regs[3] = {
+ &xcm_int0_bb_a0, &xcm_int1_bb_a0, &xcm_int2_bb_a0,
+};
+
+static const u16 xcm_int0_bb_b0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg xcm_int0_bb_b0 = {
+ 0, 16, xcm_int0_bb_b0_attn_idx, 0x1000180, 0x100018c, 0x1000188,
+ 0x1000184
+};
+
+static const u16 xcm_int1_bb_b0_attn_idx[25] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg xcm_int1_bb_b0 = {
+ 1, 25, xcm_int1_bb_b0_attn_idx, 0x1000190, 0x100019c, 0x1000198,
+ 0x1000194
+};
+
+static const u16 xcm_int2_bb_b0_attn_idx[8] = {
+ 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_int2_bb_b0 = {
+ 2, 8, xcm_int2_bb_b0_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8,
+ 0x10001a4
+};
+
+static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = {
+ &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0,
+};
+
+static const u16 xcm_int0_k2_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg xcm_int0_k2 = {
+ 0, 16, xcm_int0_k2_attn_idx, 0x1000180, 0x100018c, 0x1000188, 0x1000184
+};
+
+static const u16 xcm_int1_k2_attn_idx[25] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg xcm_int1_k2 = {
+ 1, 25, xcm_int1_k2_attn_idx, 0x1000190, 0x100019c, 0x1000198, 0x1000194
+};
+
+static const u16 xcm_int2_k2_attn_idx[8] = {
+ 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_int2_k2 = {
+ 2, 8, xcm_int2_k2_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4
+};
+
+static struct attn_hw_reg *xcm_int_k2_regs[3] = {
+ &xcm_int0_k2, &xcm_int1_k2, &xcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xcm_prty_attn_desc[59] = {
+ "xcm_mem036_i_ecc_rf_int",
+ "xcm_mem003_i_ecc_0_rf_int",
+ "xcm_mem003_i_ecc_1_rf_int",
+ "xcm_mem003_i_ecc_2_rf_int",
+ "xcm_mem003_i_ecc_3_rf_int",
+ "xcm_mem004_i_ecc_rf_int",
+ "xcm_mem033_i_ecc_0_rf_int",
+ "xcm_mem033_i_ecc_1_rf_int",
+ "xcm_mem034_i_ecc_rf_int",
+ "xcm_mem026_i_mem_prty",
+ "xcm_mem025_i_mem_prty",
+ "xcm_mem022_i_mem_prty",
+ "xcm_mem029_i_mem_prty",
+ "xcm_mem023_i_mem_prty",
+ "xcm_mem028_i_mem_prty",
+ "xcm_mem030_i_mem_prty",
+ "xcm_mem017_i_mem_prty",
+ "xcm_mem024_i_mem_prty",
+ "xcm_mem027_i_mem_prty",
+ "xcm_mem018_i_mem_prty",
+ "xcm_mem019_i_mem_prty",
+ "xcm_mem020_i_mem_prty",
+ "xcm_mem021_i_mem_prty",
+ "xcm_mem039_i_mem_prty",
+ "xcm_mem038_i_mem_prty",
+ "xcm_mem037_i_mem_prty",
+ "xcm_mem005_i_mem_prty",
+ "xcm_mem035_i_mem_prty",
+ "xcm_mem031_i_mem_prty",
+ "xcm_mem006_i_mem_prty",
+ "xcm_mem015_i_mem_prty",
+ "xcm_mem035_i_ecc_rf_int",
+ "xcm_mem032_i_ecc_0_rf_int",
+ "xcm_mem032_i_ecc_1_rf_int",
+ "xcm_mem033_i_ecc_rf_int",
+ "xcm_mem036_i_mem_prty",
+ "xcm_mem034_i_mem_prty",
+ "xcm_mem016_i_mem_prty",
+ "xcm_mem002_i_ecc_0_rf_int",
+ "xcm_mem002_i_ecc_1_rf_int",
+ "xcm_mem002_i_ecc_2_rf_int",
+ "xcm_mem002_i_ecc_3_rf_int",
+ "xcm_mem003_i_ecc_rf_int",
+ "xcm_mem031_i_ecc_0_rf_int",
+ "xcm_mem031_i_ecc_1_rf_int",
+ "xcm_mem032_i_ecc_rf_int",
+ "xcm_mem004_i_mem_prty",
+ "xcm_mem033_i_mem_prty",
+ "xcm_mem014_i_mem_prty",
+ "xcm_mem032_i_mem_prty",
+ "xcm_mem007_i_mem_prty",
+ "xcm_mem008_i_mem_prty",
+ "xcm_mem009_i_mem_prty",
+ "xcm_mem010_i_mem_prty",
+ "xcm_mem011_i_mem_prty",
+ "xcm_mem012_i_mem_prty",
+ "xcm_mem013_i_mem_prty",
+ "xcm_mem001_i_mem_prty",
+ "xcm_mem002_i_mem_prty",
+};
+#else
+#define xcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xcm_prty1_bb_a0_attn_idx[31] = {
+ 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 25, 26, 27, 30,
+ 35,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_prty1_bb_a0 = {
+ 0, 31, xcm_prty1_bb_a0_attn_idx, 0x1000200, 0x100020c, 0x1000208,
+ 0x1000204
+};
+
+static const u16 xcm_prty2_bb_a0_attn_idx[11] = {
+ 50, 51, 52, 53, 54, 55, 56, 57, 15, 29, 24,
+};
+
+static struct attn_hw_reg xcm_prty2_bb_a0 = {
+ 1, 11, xcm_prty2_bb_a0_attn_idx, 0x1000210, 0x100021c, 0x1000218,
+ 0x1000214
+};
+
+static struct attn_hw_reg *xcm_prty_bb_a0_regs[2] = {
+ &xcm_prty1_bb_a0, &xcm_prty2_bb_a0,
+};
+
+static const u16 xcm_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 24,
+ 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+};
+
+static struct attn_hw_reg xcm_prty1_bb_b0 = {
+ 0, 31, xcm_prty1_bb_b0_attn_idx, 0x1000200, 0x100020c, 0x1000208,
+ 0x1000204
+};
+
+static const u16 xcm_prty2_bb_b0_attn_idx[11] = {
+ 50, 51, 52, 53, 54, 55, 56, 48, 57, 58, 28,
+};
+
+static struct attn_hw_reg xcm_prty2_bb_b0 = {
+ 1, 11, xcm_prty2_bb_b0_attn_idx, 0x1000210, 0x100021c, 0x1000218,
+ 0x1000214
+};
+
+static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = {
+ &xcm_prty1_bb_b0, &xcm_prty2_bb_b0,
+};
+
+static const u16 xcm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg xcm_prty1_k2 = {
+ 0, 31, xcm_prty1_k2_attn_idx, 0x1000200, 0x100020c, 0x1000208,
+ 0x1000204
+};
+
+static const u16 xcm_prty2_k2_attn_idx[12] = {
+ 37, 49, 50, 51, 52, 53, 54, 55, 56, 48, 57, 58,
+};
+
+static struct attn_hw_reg xcm_prty2_k2 = {
+ 1, 12, xcm_prty2_k2_attn_idx, 0x1000210, 0x100021c, 0x1000218,
+ 0x1000214
+};
+
+static struct attn_hw_reg *xcm_prty_k2_regs[2] = {
+ &xcm_prty1_k2, &xcm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ycm_int_attn_desc[37] = {
+ "ycm_address_error",
+ "ycm_is_storm_ovfl_err",
+ "ycm_is_storm_under_err",
+ "ycm_is_msdm_ovfl_err",
+ "ycm_is_msdm_under_err",
+ "ycm_is_ysdm_ovfl_err",
+ "ycm_is_ysdm_under_err",
+ "ycm_is_xyld_ovfl_err",
+ "ycm_is_xyld_under_err",
+ "ycm_is_msem_ovfl_err",
+ "ycm_is_msem_under_err",
+ "ycm_is_usem_ovfl_err",
+ "ycm_is_usem_under_err",
+ "ycm_is_pbf_ovfl_err",
+ "ycm_is_pbf_under_err",
+ "ycm_is_qm_p_ovfl_err",
+ "ycm_is_qm_p_under_err",
+ "ycm_is_qm_s_ovfl_err",
+ "ycm_is_qm_s_under_err",
+ "ycm_is_grc_ovfl_err0",
+ "ycm_is_grc_under_err0",
+ "ycm_is_grc_ovfl_err1",
+ "ycm_is_grc_under_err1",
+ "ycm_is_grc_ovfl_err2",
+ "ycm_is_grc_under_err2",
+ "ycm_is_grc_ovfl_err3",
+ "ycm_is_grc_under_err3",
+ "ycm_in_prcs_tbl_ovfl",
+ "ycm_sm_con_data_buf_ovfl",
+ "ycm_sm_con_cmd_buf_ovfl",
+ "ycm_agg_task_data_buf_ovfl",
+ "ycm_agg_task_cmd_buf_ovfl",
+ "ycm_sm_task_data_buf_ovfl",
+ "ycm_sm_task_cmd_buf_ovfl",
+ "ycm_fi_desc_input_violate",
+ "ycm_se_desc_input_violate",
+ "ycm_qmreg_more4",
+};
+#else
+#define ycm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ycm_int0_bb_a0_attn_idx[13] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg ycm_int0_bb_a0 = {
+ 0, 13, ycm_int0_bb_a0_attn_idx, 0x1080180, 0x108018c, 0x1080188,
+ 0x1080184
+};
+
+static const u16 ycm_int1_bb_a0_attn_idx[23] = {
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg ycm_int1_bb_a0 = {
+ 1, 23, ycm_int1_bb_a0_attn_idx, 0x1080190, 0x108019c, 0x1080198,
+ 0x1080194
+};
+
+static const u16 ycm_int2_bb_a0_attn_idx[1] = {
+ 36,
+};
+
+static struct attn_hw_reg ycm_int2_bb_a0 = {
+ 2, 1, ycm_int2_bb_a0_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8,
+ 0x10801a4
+};
+
+static struct attn_hw_reg *ycm_int_bb_a0_regs[3] = {
+ &ycm_int0_bb_a0, &ycm_int1_bb_a0, &ycm_int2_bb_a0,
+};
+
+static const u16 ycm_int0_bb_b0_attn_idx[13] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg ycm_int0_bb_b0 = {
+ 0, 13, ycm_int0_bb_b0_attn_idx, 0x1080180, 0x108018c, 0x1080188,
+ 0x1080184
+};
+
+static const u16 ycm_int1_bb_b0_attn_idx[23] = {
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg ycm_int1_bb_b0 = {
+ 1, 23, ycm_int1_bb_b0_attn_idx, 0x1080190, 0x108019c, 0x1080198,
+ 0x1080194
+};
+
+static const u16 ycm_int2_bb_b0_attn_idx[1] = {
+ 36,
+};
+
+static struct attn_hw_reg ycm_int2_bb_b0 = {
+ 2, 1, ycm_int2_bb_b0_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8,
+ 0x10801a4
+};
+
+static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = {
+ &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0,
+};
+
+static const u16 ycm_int0_k2_attn_idx[13] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg ycm_int0_k2 = {
+ 0, 13, ycm_int0_k2_attn_idx, 0x1080180, 0x108018c, 0x1080188, 0x1080184
+};
+
+static const u16 ycm_int1_k2_attn_idx[23] = {
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg ycm_int1_k2 = {
+ 1, 23, ycm_int1_k2_attn_idx, 0x1080190, 0x108019c, 0x1080198, 0x1080194
+};
+
+static const u16 ycm_int2_k2_attn_idx[1] = {
+ 36,
+};
+
+static struct attn_hw_reg ycm_int2_k2 = {
+ 2, 1, ycm_int2_k2_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4
+};
+
+static struct attn_hw_reg *ycm_int_k2_regs[3] = {
+ &ycm_int0_k2, &ycm_int1_k2, &ycm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ycm_prty_attn_desc[44] = {
+ "ycm_mem027_i_ecc_rf_int",
+ "ycm_mem003_i_ecc_0_rf_int",
+ "ycm_mem003_i_ecc_1_rf_int",
+ "ycm_mem022_i_ecc_0_rf_int",
+ "ycm_mem022_i_ecc_1_rf_int",
+ "ycm_mem023_i_ecc_rf_int",
+ "ycm_mem005_i_ecc_0_rf_int",
+ "ycm_mem005_i_ecc_1_rf_int",
+ "ycm_mem025_i_ecc_0_rf_int",
+ "ycm_mem025_i_ecc_1_rf_int",
+ "ycm_mem018_i_mem_prty",
+ "ycm_mem020_i_mem_prty",
+ "ycm_mem017_i_mem_prty",
+ "ycm_mem016_i_mem_prty",
+ "ycm_mem019_i_mem_prty",
+ "ycm_mem015_i_mem_prty",
+ "ycm_mem011_i_mem_prty",
+ "ycm_mem012_i_mem_prty",
+ "ycm_mem013_i_mem_prty",
+ "ycm_mem014_i_mem_prty",
+ "ycm_mem030_i_mem_prty",
+ "ycm_mem029_i_mem_prty",
+ "ycm_mem028_i_mem_prty",
+ "ycm_mem004_i_mem_prty",
+ "ycm_mem024_i_mem_prty",
+ "ycm_mem006_i_mem_prty",
+ "ycm_mem026_i_mem_prty",
+ "ycm_mem021_i_mem_prty",
+ "ycm_mem007_i_mem_prty_0",
+ "ycm_mem007_i_mem_prty_1",
+ "ycm_mem008_i_mem_prty",
+ "ycm_mem026_i_ecc_rf_int",
+ "ycm_mem021_i_ecc_0_rf_int",
+ "ycm_mem021_i_ecc_1_rf_int",
+ "ycm_mem022_i_ecc_rf_int",
+ "ycm_mem024_i_ecc_0_rf_int",
+ "ycm_mem024_i_ecc_1_rf_int",
+ "ycm_mem027_i_mem_prty",
+ "ycm_mem023_i_mem_prty",
+ "ycm_mem025_i_mem_prty",
+ "ycm_mem009_i_mem_prty",
+ "ycm_mem010_i_mem_prty",
+ "ycm_mem001_i_mem_prty",
+ "ycm_mem002_i_mem_prty",
+};
+#else
+#define ycm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ycm_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg ycm_prty1_bb_a0 = {
+ 0, 31, ycm_prty1_bb_a0_attn_idx, 0x1080200, 0x108020c, 0x1080208,
+ 0x1080204
+};
+
+static const u16 ycm_prty2_bb_a0_attn_idx[3] = {
+ 41, 42, 43,
+};
+
+static struct attn_hw_reg ycm_prty2_bb_a0 = {
+ 1, 3, ycm_prty2_bb_a0_attn_idx, 0x1080210, 0x108021c, 0x1080218,
+ 0x1080214
+};
+
+static struct attn_hw_reg *ycm_prty_bb_a0_regs[2] = {
+ &ycm_prty1_bb_a0, &ycm_prty2_bb_a0,
+};
+
+static const u16 ycm_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg ycm_prty1_bb_b0 = {
+ 0, 31, ycm_prty1_bb_b0_attn_idx, 0x1080200, 0x108020c, 0x1080208,
+ 0x1080204
+};
+
+static const u16 ycm_prty2_bb_b0_attn_idx[3] = {
+ 41, 42, 43,
+};
+
+static struct attn_hw_reg ycm_prty2_bb_b0 = {
+ 1, 3, ycm_prty2_bb_b0_attn_idx, 0x1080210, 0x108021c, 0x1080218,
+ 0x1080214
+};
+
+static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = {
+ &ycm_prty1_bb_b0, &ycm_prty2_bb_b0,
+};
+
+static const u16 ycm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg ycm_prty1_k2 = {
+ 0, 31, ycm_prty1_k2_attn_idx, 0x1080200, 0x108020c, 0x1080208,
+ 0x1080204
+};
+
+static const u16 ycm_prty2_k2_attn_idx[4] = {
+ 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg ycm_prty2_k2 = {
+ 1, 4, ycm_prty2_k2_attn_idx, 0x1080210, 0x108021c, 0x1080218, 0x1080214
+};
+
+static struct attn_hw_reg *ycm_prty_k2_regs[2] = {
+ &ycm_prty1_k2, &ycm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcm_int_attn_desc[20] = {
+ "pcm_address_error",
+ "pcm_is_storm_ovfl_err",
+ "pcm_is_storm_under_err",
+ "pcm_is_psdm_ovfl_err",
+ "pcm_is_psdm_under_err",
+ "pcm_is_pbf_ovfl_err",
+ "pcm_is_pbf_under_err",
+ "pcm_is_grc_ovfl_err0",
+ "pcm_is_grc_under_err0",
+ "pcm_is_grc_ovfl_err1",
+ "pcm_is_grc_under_err1",
+ "pcm_is_grc_ovfl_err2",
+ "pcm_is_grc_under_err2",
+ "pcm_is_grc_ovfl_err3",
+ "pcm_is_grc_under_err3",
+ "pcm_in_prcs_tbl_ovfl",
+ "pcm_sm_con_data_buf_ovfl",
+ "pcm_sm_con_cmd_buf_ovfl",
+ "pcm_fi_desc_input_violate",
+ "pcm_qmreg_more4",
+};
+#else
+#define pcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcm_int0_bb_a0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pcm_int0_bb_a0 = {
+ 0, 5, pcm_int0_bb_a0_attn_idx, 0x1100180, 0x110018c, 0x1100188,
+ 0x1100184
+};
+
+static const u16 pcm_int1_bb_a0_attn_idx[14] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pcm_int1_bb_a0 = {
+ 1, 14, pcm_int1_bb_a0_attn_idx, 0x1100190, 0x110019c, 0x1100198,
+ 0x1100194
+};
+
+static const u16 pcm_int2_bb_a0_attn_idx[1] = {
+ 19,
+};
+
+static struct attn_hw_reg pcm_int2_bb_a0 = {
+ 2, 1, pcm_int2_bb_a0_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8,
+ 0x11001a4
+};
+
+static struct attn_hw_reg *pcm_int_bb_a0_regs[3] = {
+ &pcm_int0_bb_a0, &pcm_int1_bb_a0, &pcm_int2_bb_a0,
+};
+
+static const u16 pcm_int0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pcm_int0_bb_b0 = {
+ 0, 5, pcm_int0_bb_b0_attn_idx, 0x1100180, 0x110018c, 0x1100188,
+ 0x1100184
+};
+
+static const u16 pcm_int1_bb_b0_attn_idx[14] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pcm_int1_bb_b0 = {
+ 1, 14, pcm_int1_bb_b0_attn_idx, 0x1100190, 0x110019c, 0x1100198,
+ 0x1100194
+};
+
+static const u16 pcm_int2_bb_b0_attn_idx[1] = {
+ 19,
+};
+
+static struct attn_hw_reg pcm_int2_bb_b0 = {
+ 2, 1, pcm_int2_bb_b0_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8,
+ 0x11001a4
+};
+
+static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = {
+ &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0,
+};
+
+static const u16 pcm_int0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pcm_int0_k2 = {
+ 0, 5, pcm_int0_k2_attn_idx, 0x1100180, 0x110018c, 0x1100188, 0x1100184
+};
+
+static const u16 pcm_int1_k2_attn_idx[14] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pcm_int1_k2 = {
+ 1, 14, pcm_int1_k2_attn_idx, 0x1100190, 0x110019c, 0x1100198, 0x1100194
+};
+
+static const u16 pcm_int2_k2_attn_idx[1] = {
+ 19,
+};
+
+static struct attn_hw_reg pcm_int2_k2 = {
+ 2, 1, pcm_int2_k2_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4
+};
+
+static struct attn_hw_reg *pcm_int_k2_regs[3] = {
+ &pcm_int0_k2, &pcm_int1_k2, &pcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcm_prty_attn_desc[18] = {
+ "pcm_mem012_i_ecc_rf_int",
+ "pcm_mem010_i_ecc_0_rf_int",
+ "pcm_mem010_i_ecc_1_rf_int",
+ "pcm_mem008_i_mem_prty",
+ "pcm_mem007_i_mem_prty",
+ "pcm_mem006_i_mem_prty",
+ "pcm_mem002_i_mem_prty",
+ "pcm_mem003_i_mem_prty",
+ "pcm_mem004_i_mem_prty",
+ "pcm_mem005_i_mem_prty",
+ "pcm_mem011_i_mem_prty",
+ "pcm_mem001_i_mem_prty",
+ "pcm_mem011_i_ecc_rf_int",
+ "pcm_mem009_i_ecc_0_rf_int",
+ "pcm_mem009_i_ecc_1_rf_int",
+ "pcm_mem010_i_mem_prty",
+ "pcm_mem013_i_mem_prty",
+ "pcm_mem012_i_mem_prty",
+};
+#else
+#define pcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcm_prty1_bb_a0_attn_idx[14] = {
+ 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pcm_prty1_bb_a0 = {
+ 0, 14, pcm_prty1_bb_a0_attn_idx, 0x1100200, 0x110020c, 0x1100208,
+ 0x1100204
+};
+
+static struct attn_hw_reg *pcm_prty_bb_a0_regs[1] = {
+ &pcm_prty1_bb_a0,
+};
+
+static const u16 pcm_prty1_bb_b0_attn_idx[11] = {
+ 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pcm_prty1_bb_b0 = {
+ 0, 11, pcm_prty1_bb_b0_attn_idx, 0x1100200, 0x110020c, 0x1100208,
+ 0x1100204
+};
+
+static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = {
+ &pcm_prty1_bb_b0,
+};
+
+static const u16 pcm_prty1_k2_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg pcm_prty1_k2 = {
+ 0, 12, pcm_prty1_k2_attn_idx, 0x1100200, 0x110020c, 0x1100208,
+ 0x1100204
+};
+
+static struct attn_hw_reg *pcm_prty_k2_regs[1] = {
+ &pcm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *qm_int_attn_desc[22] = {
+ "qm_address_error",
+ "qm_ovf_err_tx",
+ "qm_ovf_err_other",
+ "qm_pf_usg_cnt_err",
+ "qm_vf_usg_cnt_err",
+ "qm_voq_crd_inc_err",
+ "qm_voq_crd_dec_err",
+ "qm_byte_crd_inc_err",
+ "qm_byte_crd_dec_err",
+ "qm_err_incdec_rlglblcrd",
+ "qm_err_incdec_rlpfcrd",
+ "qm_err_incdec_wfqpfcrd",
+ "qm_err_incdec_wfqvpcrd",
+ "qm_err_incdec_voqlinecrd",
+ "qm_err_incdec_voqbytecrd",
+ "qm_fifos_error",
+ "qm_qm_rl_dc_exp_pf_controller_pop_error",
+ "qm_qm_rl_dc_exp_pf_controller_push_error",
+ "qm_qm_rl_dc_rf_req_controller_pop_error",
+ "qm_qm_rl_dc_rf_req_controller_push_error",
+ "qm_qm_rl_dc_rf_res_controller_pop_error",
+ "qm_qm_rl_dc_rf_res_controller_push_error",
+};
+#else
+#define qm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 qm_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg qm_int0_bb_a0 = {
+ 0, 16, qm_int0_bb_a0_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184
+};
+
+static struct attn_hw_reg *qm_int_bb_a0_regs[1] = {
+ &qm_int0_bb_a0,
+};
+
+static const u16 qm_int0_bb_b0_attn_idx[22] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21,
+};
+
+static struct attn_hw_reg qm_int0_bb_b0 = {
+ 0, 22, qm_int0_bb_b0_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184
+};
+
+static struct attn_hw_reg *qm_int_bb_b0_regs[1] = {
+ &qm_int0_bb_b0,
+};
+
+static const u16 qm_int0_k2_attn_idx[22] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21,
+};
+
+static struct attn_hw_reg qm_int0_k2 = {
+ 0, 22, qm_int0_k2_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184
+};
+
+static struct attn_hw_reg *qm_int_k2_regs[1] = {
+ &qm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *qm_prty_attn_desc[109] = {
+ "qm_xcm_wrc_fifo",
+ "qm_ucm_wrc_fifo",
+ "qm_tcm_wrc_fifo",
+ "qm_ccm_wrc_fifo",
+ "qm_bigramhigh",
+ "qm_bigramlow",
+ "qm_base_address",
+ "qm_wrbuff",
+ "qm_bigramhigh_ext_a",
+ "qm_bigramlow_ext_a",
+ "qm_base_address_ext_a",
+ "qm_mem006_i_ecc_0_rf_int",
+ "qm_mem006_i_ecc_1_rf_int",
+ "qm_mem005_i_ecc_0_rf_int",
+ "qm_mem005_i_ecc_1_rf_int",
+ "qm_mem012_i_ecc_rf_int",
+ "qm_mem037_i_mem_prty",
+ "qm_mem036_i_mem_prty",
+ "qm_mem039_i_mem_prty",
+ "qm_mem038_i_mem_prty",
+ "qm_mem040_i_mem_prty",
+ "qm_mem042_i_mem_prty",
+ "qm_mem041_i_mem_prty",
+ "qm_mem056_i_mem_prty",
+ "qm_mem055_i_mem_prty",
+ "qm_mem053_i_mem_prty",
+ "qm_mem054_i_mem_prty",
+ "qm_mem057_i_mem_prty",
+ "qm_mem058_i_mem_prty",
+ "qm_mem062_i_mem_prty",
+ "qm_mem061_i_mem_prty",
+ "qm_mem059_i_mem_prty",
+ "qm_mem060_i_mem_prty",
+ "qm_mem063_i_mem_prty",
+ "qm_mem064_i_mem_prty",
+ "qm_mem033_i_mem_prty",
+ "qm_mem032_i_mem_prty",
+ "qm_mem030_i_mem_prty",
+ "qm_mem031_i_mem_prty",
+ "qm_mem034_i_mem_prty",
+ "qm_mem035_i_mem_prty",
+ "qm_mem051_i_mem_prty",
+ "qm_mem042_i_ecc_0_rf_int",
+ "qm_mem042_i_ecc_1_rf_int",
+ "qm_mem041_i_ecc_0_rf_int",
+ "qm_mem041_i_ecc_1_rf_int",
+ "qm_mem048_i_ecc_rf_int",
+ "qm_mem009_i_mem_prty",
+ "qm_mem008_i_mem_prty",
+ "qm_mem011_i_mem_prty",
+ "qm_mem010_i_mem_prty",
+ "qm_mem012_i_mem_prty",
+ "qm_mem014_i_mem_prty",
+ "qm_mem013_i_mem_prty",
+ "qm_mem028_i_mem_prty",
+ "qm_mem027_i_mem_prty",
+ "qm_mem025_i_mem_prty",
+ "qm_mem026_i_mem_prty",
+ "qm_mem029_i_mem_prty",
+ "qm_mem005_i_mem_prty",
+ "qm_mem004_i_mem_prty",
+ "qm_mem002_i_mem_prty",
+ "qm_mem003_i_mem_prty",
+ "qm_mem006_i_mem_prty",
+ "qm_mem007_i_mem_prty",
+ "qm_mem023_i_mem_prty",
+ "qm_mem047_i_mem_prty",
+ "qm_mem049_i_mem_prty",
+ "qm_mem048_i_mem_prty",
+ "qm_mem052_i_mem_prty",
+ "qm_mem050_i_mem_prty",
+ "qm_mem045_i_mem_prty",
+ "qm_mem046_i_mem_prty",
+ "qm_mem043_i_mem_prty",
+ "qm_mem044_i_mem_prty",
+ "qm_mem017_i_mem_prty",
+ "qm_mem016_i_mem_prty",
+ "qm_mem021_i_mem_prty",
+ "qm_mem024_i_mem_prty",
+ "qm_mem019_i_mem_prty",
+ "qm_mem018_i_mem_prty",
+ "qm_mem015_i_mem_prty",
+ "qm_mem022_i_mem_prty",
+ "qm_mem020_i_mem_prty",
+ "qm_mem007_i_mem_prty_0",
+ "qm_mem007_i_mem_prty_1",
+ "qm_mem007_i_mem_prty_2",
+ "qm_mem001_i_mem_prty",
+ "qm_mem043_i_mem_prty_0",
+ "qm_mem043_i_mem_prty_1",
+ "qm_mem043_i_mem_prty_2",
+ "qm_mem007_i_mem_prty_3",
+ "qm_mem007_i_mem_prty_4",
+ "qm_mem007_i_mem_prty_5",
+ "qm_mem007_i_mem_prty_6",
+ "qm_mem007_i_mem_prty_7",
+ "qm_mem007_i_mem_prty_8",
+ "qm_mem007_i_mem_prty_9",
+ "qm_mem007_i_mem_prty_10",
+ "qm_mem007_i_mem_prty_11",
+ "qm_mem007_i_mem_prty_12",
+ "qm_mem007_i_mem_prty_13",
+ "qm_mem007_i_mem_prty_14",
+ "qm_mem007_i_mem_prty_15",
+ "qm_mem043_i_mem_prty_3",
+ "qm_mem043_i_mem_prty_4",
+ "qm_mem043_i_mem_prty_5",
+ "qm_mem043_i_mem_prty_6",
+ "qm_mem043_i_mem_prty_7",
+};
+#else
+#define qm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 qm_prty0_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg qm_prty0_bb_a0 = {
+ 0, 11, qm_prty0_bb_a0_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194
+};
+
+static const u16 qm_prty1_bb_a0_attn_idx[31] = {
+ 17, 35, 36, 37, 38, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+};
+
+static struct attn_hw_reg qm_prty1_bb_a0 = {
+ 1, 31, qm_prty1_bb_a0_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204
+};
+
+static const u16 qm_prty2_bb_a0_attn_idx[31] = {
+ 66, 67, 69, 70, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 87, 20, 18, 25,
+ 27, 32, 24, 26, 41, 31, 29, 28, 30, 23, 88, 89, 90,
+};
+
+static struct attn_hw_reg qm_prty2_bb_a0 = {
+ 2, 31, qm_prty2_bb_a0_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214
+};
+
+static const u16 qm_prty3_bb_a0_attn_idx[11] = {
+ 104, 105, 106, 107, 108, 33, 16, 34, 19, 72, 71,
+};
+
+static struct attn_hw_reg qm_prty3_bb_a0 = {
+ 3, 11, qm_prty3_bb_a0_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224
+};
+
+static struct attn_hw_reg *qm_prty_bb_a0_regs[4] = {
+ &qm_prty0_bb_a0, &qm_prty1_bb_a0, &qm_prty2_bb_a0, &qm_prty3_bb_a0,
+};
+
+static const u16 qm_prty0_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg qm_prty0_bb_b0 = {
+ 0, 11, qm_prty0_bb_b0_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194
+};
+
+static const u16 qm_prty1_bb_b0_attn_idx[31] = {
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg qm_prty1_bb_b0 = {
+ 1, 31, qm_prty1_bb_b0_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204
+};
+
+static const u16 qm_prty2_bb_b0_attn_idx[31] = {
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 58, 60, 62, 49, 75, 76, 53, 77, 78,
+ 79, 80, 81, 52, 65, 57, 82, 56, 83, 48, 84, 85, 86,
+};
+
+static struct attn_hw_reg qm_prty2_bb_b0 = {
+ 2, 31, qm_prty2_bb_b0_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214
+};
+
+static const u16 qm_prty3_bb_b0_attn_idx[11] = {
+ 91, 92, 93, 94, 95, 55, 87, 54, 61, 50, 47,
+};
+
+static struct attn_hw_reg qm_prty3_bb_b0 = {
+ 3, 11, qm_prty3_bb_b0_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224
+};
+
+static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = {
+ &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0,
+};
+
+static const u16 qm_prty0_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg qm_prty0_k2 = {
+ 0, 11, qm_prty0_k2_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194
+};
+
+static const u16 qm_prty1_k2_attn_idx[31] = {
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg qm_prty1_k2 = {
+ 1, 31, qm_prty1_k2_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204
+};
+
+static const u16 qm_prty2_k2_attn_idx[31] = {
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 58, 60, 62, 49, 75, 76, 53, 77, 78,
+ 79, 80, 81, 52, 65, 57, 82, 56, 83, 48, 84, 85, 86,
+};
+
+static struct attn_hw_reg qm_prty2_k2 = {
+ 2, 31, qm_prty2_k2_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214
+};
+
+static const u16 qm_prty3_k2_attn_idx[19] = {
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 55, 87, 54, 61,
+ 50, 47,
+};
+
+static struct attn_hw_reg qm_prty3_k2 = {
+ 3, 19, qm_prty3_k2_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224
+};
+
+static struct attn_hw_reg *qm_prty_k2_regs[4] = {
+ &qm_prty0_k2, &qm_prty1_k2, &qm_prty2_k2, &qm_prty3_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tm_int_attn_desc[43] = {
+ "tm_address_error",
+ "tm_pxp_read_data_fifo_ov",
+ "tm_pxp_read_data_fifo_un",
+ "tm_pxp_read_ctrl_fifo_ov",
+ "tm_pxp_read_ctrl_fifo_un",
+ "tm_cfc_load_command_fifo_ov",
+ "tm_cfc_load_command_fifo_un",
+ "tm_cfc_load_echo_fifo_ov",
+ "tm_cfc_load_echo_fifo_un",
+ "tm_client_out_fifo_ov",
+ "tm_client_out_fifo_un",
+ "tm_ac_command_fifo_ov",
+ "tm_ac_command_fifo_un",
+ "tm_client_in_pbf_fifo_ov",
+ "tm_client_in_pbf_fifo_un",
+ "tm_client_in_ucm_fifo_ov",
+ "tm_client_in_ucm_fifo_un",
+ "tm_client_in_tcm_fifo_ov",
+ "tm_client_in_tcm_fifo_un",
+ "tm_client_in_xcm_fifo_ov",
+ "tm_client_in_xcm_fifo_un",
+ "tm_expiration_cmd_fifo_ov",
+ "tm_expiration_cmd_fifo_un",
+ "tm_stop_all_lc_invalid",
+ "tm_command_lc_invalid_0",
+ "tm_command_lc_invalid_1",
+ "tm_init_command_lc_valid",
+ "tm_stop_all_exp_lc_valid",
+ "tm_command_cid_invalid_0",
+ "tm_reserved_command",
+ "tm_command_cid_invalid_1",
+ "tm_cload_res_loaderr_conn",
+ "tm_cload_res_loadcancel_conn",
+ "tm_cload_res_validerr_conn",
+ "tm_context_rd_last",
+ "tm_context_wr_last",
+ "tm_pxp_rd_data_eop_bvalid",
+ "tm_pend_conn_scan",
+ "tm_pend_task_scan",
+ "tm_pxp_rd_data_eop_error",
+ "tm_cload_res_loaderr_task",
+ "tm_cload_res_loadcancel_task",
+ "tm_cload_res_validerr_task",
+};
+#else
+#define tm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tm_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tm_int0_bb_a0 = {
+ 0, 32, tm_int0_bb_a0_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184
+};
+
+static const u16 tm_int1_bb_a0_attn_idx[11] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static struct attn_hw_reg tm_int1_bb_a0 = {
+ 1, 11, tm_int1_bb_a0_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194
+};
+
+static struct attn_hw_reg *tm_int_bb_a0_regs[2] = {
+ &tm_int0_bb_a0, &tm_int1_bb_a0,
+};
+
+static const u16 tm_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tm_int0_bb_b0 = {
+ 0, 32, tm_int0_bb_b0_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184
+};
+
+static const u16 tm_int1_bb_b0_attn_idx[11] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static struct attn_hw_reg tm_int1_bb_b0 = {
+ 1, 11, tm_int1_bb_b0_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194
+};
+
+static struct attn_hw_reg *tm_int_bb_b0_regs[2] = {
+ &tm_int0_bb_b0, &tm_int1_bb_b0,
+};
+
+static const u16 tm_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tm_int0_k2 = {
+ 0, 32, tm_int0_k2_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184
+};
+
+static const u16 tm_int1_k2_attn_idx[11] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static struct attn_hw_reg tm_int1_k2 = {
+ 1, 11, tm_int1_k2_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194
+};
+
+static struct attn_hw_reg *tm_int_k2_regs[2] = {
+ &tm_int0_k2, &tm_int1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tm_prty_attn_desc[17] = {
+ "tm_mem012_i_ecc_0_rf_int",
+ "tm_mem012_i_ecc_1_rf_int",
+ "tm_mem003_i_ecc_rf_int",
+ "tm_mem016_i_mem_prty",
+ "tm_mem007_i_mem_prty",
+ "tm_mem010_i_mem_prty",
+ "tm_mem008_i_mem_prty",
+ "tm_mem009_i_mem_prty",
+ "tm_mem013_i_mem_prty",
+ "tm_mem015_i_mem_prty",
+ "tm_mem014_i_mem_prty",
+ "tm_mem004_i_mem_prty",
+ "tm_mem005_i_mem_prty",
+ "tm_mem006_i_mem_prty",
+ "tm_mem011_i_mem_prty",
+ "tm_mem001_i_mem_prty",
+ "tm_mem002_i_mem_prty",
+};
+#else
+#define tm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tm_prty1_bb_a0_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg tm_prty1_bb_a0 = {
+ 0, 17, tm_prty1_bb_a0_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204
+};
+
+static struct attn_hw_reg *tm_prty_bb_a0_regs[1] = {
+ &tm_prty1_bb_a0,
+};
+
+static const u16 tm_prty1_bb_b0_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg tm_prty1_bb_b0 = {
+ 0, 17, tm_prty1_bb_b0_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204
+};
+
+static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = {
+ &tm_prty1_bb_b0,
+};
+
+static const u16 tm_prty1_k2_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg tm_prty1_k2 = {
+ 0, 17, tm_prty1_k2_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204
+};
+
+static struct attn_hw_reg *tm_prty_k2_regs[1] = {
+ &tm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dorq_int_attn_desc[9] = {
+ "dorq_address_error",
+ "dorq_db_drop",
+ "dorq_dorq_fifo_ovfl_err",
+ "dorq_dorq_fifo_afull",
+ "dorq_cfc_byp_validation_err",
+ "dorq_cfc_ld_resp_err",
+ "dorq_xcm_done_cnt_err",
+ "dorq_cfc_ld_req_fifo_ovfl_err",
+ "dorq_cfc_ld_req_fifo_under_err",
+};
+#else
+#define dorq_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 dorq_int0_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg dorq_int0_bb_a0 = {
+ 0, 9, dorq_int0_bb_a0_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184
+};
+
+static struct attn_hw_reg *dorq_int_bb_a0_regs[1] = {
+ &dorq_int0_bb_a0,
+};
+
+static const u16 dorq_int0_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg dorq_int0_bb_b0 = {
+ 0, 9, dorq_int0_bb_b0_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184
+};
+
+static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = {
+ &dorq_int0_bb_b0,
+};
+
+static const u16 dorq_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg dorq_int0_k2 = {
+ 0, 9, dorq_int0_k2_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184
+};
+
+static struct attn_hw_reg *dorq_int_k2_regs[1] = {
+ &dorq_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dorq_prty_attn_desc[7] = {
+ "dorq_datapath_registers",
+ "dorq_mem002_i_ecc_rf_int",
+ "dorq_mem001_i_mem_prty",
+ "dorq_mem003_i_mem_prty",
+ "dorq_mem004_i_mem_prty",
+ "dorq_mem005_i_mem_prty",
+ "dorq_mem006_i_mem_prty",
+};
+#else
+#define dorq_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 dorq_prty1_bb_a0_attn_idx[6] = {
+ 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg dorq_prty1_bb_a0 = {
+ 0, 6, dorq_prty1_bb_a0_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204
+};
+
+static struct attn_hw_reg *dorq_prty_bb_a0_regs[1] = {
+ &dorq_prty1_bb_a0,
+};
+
+static const u16 dorq_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dorq_prty0_bb_b0 = {
+ 0, 1, dorq_prty0_bb_b0_attn_idx, 0x100190, 0x10019c, 0x100198, 0x100194
+};
+
+static const u16 dorq_prty1_bb_b0_attn_idx[6] = {
+ 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg dorq_prty1_bb_b0 = {
+ 1, 6, dorq_prty1_bb_b0_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204
+};
+
+static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = {
+ &dorq_prty0_bb_b0, &dorq_prty1_bb_b0,
+};
+
+static const u16 dorq_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dorq_prty0_k2 = {
+ 0, 1, dorq_prty0_k2_attn_idx, 0x100190, 0x10019c, 0x100198, 0x100194
+};
+
+static const u16 dorq_prty1_k2_attn_idx[6] = {
+ 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg dorq_prty1_k2 = {
+ 1, 6, dorq_prty1_k2_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204
+};
+
+static struct attn_hw_reg *dorq_prty_k2_regs[2] = {
+ &dorq_prty0_k2, &dorq_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *brb_int_attn_desc[237] = {
+ "brb_address_error",
+ "brb_rc_pkt0_rls_error",
+ "brb_rc_pkt0_1st_error",
+ "brb_rc_pkt0_len_error",
+ "brb_rc_pkt0_middle_error",
+ "brb_rc_pkt0_protocol_error",
+ "brb_rc_pkt1_rls_error",
+ "brb_rc_pkt1_1st_error",
+ "brb_rc_pkt1_len_error",
+ "brb_rc_pkt1_middle_error",
+ "brb_rc_pkt1_protocol_error",
+ "brb_rc_pkt2_rls_error",
+ "brb_rc_pkt2_1st_error",
+ "brb_rc_pkt2_len_error",
+ "brb_rc_pkt2_middle_error",
+ "brb_rc_pkt2_protocol_error",
+ "brb_rc_pkt3_rls_error",
+ "brb_rc_pkt3_1st_error",
+ "brb_rc_pkt3_len_error",
+ "brb_rc_pkt3_middle_error",
+ "brb_rc_pkt3_protocol_error",
+ "brb_rc_sop_req_tc_port_error",
+ "brb_uncomplient_lossless_error",
+ "brb_wc0_protocol_error",
+ "brb_wc1_protocol_error",
+ "brb_wc2_protocol_error",
+ "brb_wc3_protocol_error",
+ "brb_ll_arb_prefetch_sop_error",
+ "brb_ll_blk_error",
+ "brb_packet_counter_error",
+ "brb_byte_counter_error",
+ "brb_mac0_fc_cnt_error",
+ "brb_mac1_fc_cnt_error",
+ "brb_ll_arb_calc_error",
+ "brb_unused_0",
+ "brb_wc0_inp_fifo_error",
+ "brb_wc0_sop_fifo_error",
+ "brb_unused_1",
+ "brb_wc0_eop_fifo_error",
+ "brb_wc0_queue_fifo_error",
+ "brb_wc0_free_point_fifo_error",
+ "brb_wc0_next_point_fifo_error",
+ "brb_wc0_strt_fifo_error",
+ "brb_wc0_second_dscr_fifo_error",
+ "brb_wc0_pkt_avail_fifo_error",
+ "brb_wc0_cos_cnt_fifo_error",
+ "brb_wc0_notify_fifo_error",
+ "brb_wc0_ll_req_fifo_error",
+ "brb_wc0_ll_pa_cnt_error",
+ "brb_wc0_bb_pa_cnt_error",
+ "brb_wc1_inp_fifo_error",
+ "brb_wc1_sop_fifo_error",
+ "brb_wc1_eop_fifo_error",
+ "brb_wc1_queue_fifo_error",
+ "brb_wc1_free_point_fifo_error",
+ "brb_wc1_next_point_fifo_error",
+ "brb_wc1_strt_fifo_error",
+ "brb_wc1_second_dscr_fifo_error",
+ "brb_wc1_pkt_avail_fifo_error",
+ "brb_wc1_cos_cnt_fifo_error",
+ "brb_wc1_notify_fifo_error",
+ "brb_wc1_ll_req_fifo_error",
+ "brb_wc1_ll_pa_cnt_error",
+ "brb_wc1_bb_pa_cnt_error",
+ "brb_wc2_inp_fifo_error",
+ "brb_wc2_sop_fifo_error",
+ "brb_wc2_eop_fifo_error",
+ "brb_wc2_queue_fifo_error",
+ "brb_wc2_free_point_fifo_error",
+ "brb_wc2_next_point_fifo_error",
+ "brb_wc2_strt_fifo_error",
+ "brb_wc2_second_dscr_fifo_error",
+ "brb_wc2_pkt_avail_fifo_error",
+ "brb_wc2_cos_cnt_fifo_error",
+ "brb_wc2_notify_fifo_error",
+ "brb_wc2_ll_req_fifo_error",
+ "brb_wc2_ll_pa_cnt_error",
+ "brb_wc2_bb_pa_cnt_error",
+ "brb_wc3_inp_fifo_error",
+ "brb_wc3_sop_fifo_error",
+ "brb_wc3_eop_fifo_error",
+ "brb_wc3_queue_fifo_error",
+ "brb_wc3_free_point_fifo_error",
+ "brb_wc3_next_point_fifo_error",
+ "brb_wc3_strt_fifo_error",
+ "brb_wc3_second_dscr_fifo_error",
+ "brb_wc3_pkt_avail_fifo_error",
+ "brb_wc3_cos_cnt_fifo_error",
+ "brb_wc3_notify_fifo_error",
+ "brb_wc3_ll_req_fifo_error",
+ "brb_wc3_ll_pa_cnt_error",
+ "brb_wc3_bb_pa_cnt_error",
+ "brb_rc_pkt0_side_fifo_error",
+ "brb_rc_pkt0_req_fifo_error",
+ "brb_rc_pkt0_blk_fifo_error",
+ "brb_rc_pkt0_rls_left_fifo_error",
+ "brb_rc_pkt0_strt_ptr_fifo_error",
+ "brb_rc_pkt0_second_ptr_fifo_error",
+ "brb_rc_pkt0_rsp_fifo_error",
+ "brb_rc_pkt0_dscr_fifo_error",
+ "brb_rc_pkt1_side_fifo_error",
+ "brb_rc_pkt1_req_fifo_error",
+ "brb_rc_pkt1_blk_fifo_error",
+ "brb_rc_pkt1_rls_left_fifo_error",
+ "brb_rc_pkt1_strt_ptr_fifo_error",
+ "brb_rc_pkt1_second_ptr_fifo_error",
+ "brb_rc_pkt1_rsp_fifo_error",
+ "brb_rc_pkt1_dscr_fifo_error",
+ "brb_rc_pkt2_side_fifo_error",
+ "brb_rc_pkt2_req_fifo_error",
+ "brb_rc_pkt2_blk_fifo_error",
+ "brb_rc_pkt2_rls_left_fifo_error",
+ "brb_rc_pkt2_strt_ptr_fifo_error",
+ "brb_rc_pkt2_second_ptr_fifo_error",
+ "brb_rc_pkt2_rsp_fifo_error",
+ "brb_rc_pkt2_dscr_fifo_error",
+ "brb_rc_pkt3_side_fifo_error",
+ "brb_rc_pkt3_req_fifo_error",
+ "brb_rc_pkt3_blk_fifo_error",
+ "brb_rc_pkt3_rls_left_fifo_error",
+ "brb_rc_pkt3_strt_ptr_fifo_error",
+ "brb_rc_pkt3_second_ptr_fifo_error",
+ "brb_rc_pkt3_rsp_fifo_error",
+ "brb_rc_pkt3_dscr_fifo_error",
+ "brb_rc_sop_strt_fifo_error",
+ "brb_rc_sop_req_fifo_error",
+ "brb_rc_sop_dscr_fifo_error",
+ "brb_rc_sop_queue_fifo_error",
+ "brb_rc0_eop_error",
+ "brb_rc1_eop_error",
+ "brb_ll_arb_rls_fifo_error",
+ "brb_ll_arb_prefetch_fifo_error",
+ "brb_rc_pkt0_rls_fifo_error",
+ "brb_rc_pkt1_rls_fifo_error",
+ "brb_rc_pkt2_rls_fifo_error",
+ "brb_rc_pkt3_rls_fifo_error",
+ "brb_rc_pkt4_rls_fifo_error",
+ "brb_rc_pkt4_rls_error",
+ "brb_rc_pkt4_1st_error",
+ "brb_rc_pkt4_len_error",
+ "brb_rc_pkt4_middle_error",
+ "brb_rc_pkt4_protocol_error",
+ "brb_rc_pkt4_side_fifo_error",
+ "brb_rc_pkt4_req_fifo_error",
+ "brb_rc_pkt4_blk_fifo_error",
+ "brb_rc_pkt4_rls_left_fifo_error",
+ "brb_rc_pkt4_strt_ptr_fifo_error",
+ "brb_rc_pkt4_second_ptr_fifo_error",
+ "brb_rc_pkt4_rsp_fifo_error",
+ "brb_rc_pkt4_dscr_fifo_error",
+ "brb_rc_pkt5_rls_error",
+ "brb_packet_available_sync_fifo_push_error",
+ "brb_wc4_protocol_error",
+ "brb_wc5_protocol_error",
+ "brb_wc6_protocol_error",
+ "brb_wc7_protocol_error",
+ "brb_wc4_inp_fifo_error",
+ "brb_wc4_sop_fifo_error",
+ "brb_wc4_queue_fifo_error",
+ "brb_wc4_free_point_fifo_error",
+ "brb_wc4_next_point_fifo_error",
+ "brb_wc4_strt_fifo_error",
+ "brb_wc4_second_dscr_fifo_error",
+ "brb_wc4_pkt_avail_fifo_error",
+ "brb_wc4_cos_cnt_fifo_error",
+ "brb_wc4_notify_fifo_error",
+ "brb_wc4_ll_req_fifo_error",
+ "brb_wc4_ll_pa_cnt_error",
+ "brb_wc4_bb_pa_cnt_error",
+ "brb_wc5_inp_fifo_error",
+ "brb_wc5_sop_fifo_error",
+ "brb_wc5_queue_fifo_error",
+ "brb_wc5_free_point_fifo_error",
+ "brb_wc5_next_point_fifo_error",
+ "brb_wc5_strt_fifo_error",
+ "brb_wc5_second_dscr_fifo_error",
+ "brb_wc5_pkt_avail_fifo_error",
+ "brb_wc5_cos_cnt_fifo_error",
+ "brb_wc5_notify_fifo_error",
+ "brb_wc5_ll_req_fifo_error",
+ "brb_wc5_ll_pa_cnt_error",
+ "brb_wc5_bb_pa_cnt_error",
+ "brb_wc6_inp_fifo_error",
+ "brb_wc6_sop_fifo_error",
+ "brb_wc6_queue_fifo_error",
+ "brb_wc6_free_point_fifo_error",
+ "brb_wc6_next_point_fifo_error",
+ "brb_wc6_strt_fifo_error",
+ "brb_wc6_second_dscr_fifo_error",
+ "brb_wc6_pkt_avail_fifo_error",
+ "brb_wc6_cos_cnt_fifo_error",
+ "brb_wc6_notify_fifo_error",
+ "brb_wc6_ll_req_fifo_error",
+ "brb_wc6_ll_pa_cnt_error",
+ "brb_wc6_bb_pa_cnt_error",
+ "brb_wc7_inp_fifo_error",
+ "brb_wc7_sop_fifo_error",
+ "brb_wc7_queue_fifo_error",
+ "brb_wc7_free_point_fifo_error",
+ "brb_wc7_next_point_fifo_error",
+ "brb_wc7_strt_fifo_error",
+ "brb_wc7_second_dscr_fifo_error",
+ "brb_wc7_pkt_avail_fifo_error",
+ "brb_wc7_cos_cnt_fifo_error",
+ "brb_wc7_notify_fifo_error",
+ "brb_wc7_ll_req_fifo_error",
+ "brb_wc7_ll_pa_cnt_error",
+ "brb_wc7_bb_pa_cnt_error",
+ "brb_wc9_queue_fifo_error",
+ "brb_rc_sop_inp_sync_fifo_push_error",
+ "brb_rc0_inp_sync_fifo_push_error",
+ "brb_rc1_inp_sync_fifo_push_error",
+ "brb_rc2_inp_sync_fifo_push_error",
+ "brb_rc3_inp_sync_fifo_push_error",
+ "brb_rc0_out_sync_fifo_push_error",
+ "brb_rc1_out_sync_fifo_push_error",
+ "brb_rc2_out_sync_fifo_push_error",
+ "brb_rc3_out_sync_fifo_push_error",
+ "brb_rc4_out_sync_fifo_push_error",
+ "brb_unused_2",
+ "brb_rc0_eop_inp_sync_fifo_push_error",
+ "brb_rc1_eop_inp_sync_fifo_push_error",
+ "brb_rc2_eop_inp_sync_fifo_push_error",
+ "brb_rc3_eop_inp_sync_fifo_push_error",
+ "brb_rc0_eop_out_sync_fifo_push_error",
+ "brb_rc1_eop_out_sync_fifo_push_error",
+ "brb_rc2_eop_out_sync_fifo_push_error",
+ "brb_rc3_eop_out_sync_fifo_push_error",
+ "brb_unused_3",
+ "brb_rc2_eop_error",
+ "brb_rc3_eop_error",
+ "brb_mac2_fc_cnt_error",
+ "brb_mac3_fc_cnt_error",
+ "brb_wc4_eop_fifo_error",
+ "brb_wc5_eop_fifo_error",
+ "brb_wc6_eop_fifo_error",
+ "brb_wc7_eop_fifo_error",
+};
+#else
+#define brb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 brb_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg brb_int0_bb_a0 = {
+ 0, 32, brb_int0_bb_a0_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4
+};
+
+static const u16 brb_int1_bb_a0_attn_idx[30] = {
+ 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+};
+
+static struct attn_hw_reg brb_int1_bb_a0 = {
+ 1, 30, brb_int1_bb_a0_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc
+};
+
+static const u16 brb_int2_bb_a0_attn_idx[28] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+};
+
+static struct attn_hw_reg brb_int2_bb_a0 = {
+ 2, 28, brb_int2_bb_a0_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4
+};
+
+static const u16 brb_int3_bb_a0_attn_idx[31] = {
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+};
+
+static struct attn_hw_reg brb_int3_bb_a0 = {
+ 3, 31, brb_int3_bb_a0_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c
+};
+
+static const u16 brb_int4_bb_a0_attn_idx[27] = {
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+};
+
+static struct attn_hw_reg brb_int4_bb_a0 = {
+ 4, 27, brb_int4_bb_a0_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124
+};
+
+static const u16 brb_int5_bb_a0_attn_idx[1] = {
+ 150,
+};
+
+static struct attn_hw_reg brb_int5_bb_a0 = {
+ 5, 1, brb_int5_bb_a0_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c
+};
+
+static const u16 brb_int6_bb_a0_attn_idx[8] = {
+ 151, 152, 153, 154, 155, 156, 157, 158,
+};
+
+static struct attn_hw_reg brb_int6_bb_a0 = {
+ 6, 8, brb_int6_bb_a0_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154
+};
+
+static const u16 brb_int7_bb_a0_attn_idx[32] = {
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189,
+ 190,
+};
+
+static struct attn_hw_reg brb_int7_bb_a0 = {
+ 7, 32, brb_int7_bb_a0_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c
+};
+
+static const u16 brb_int8_bb_a0_attn_idx[17] = {
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205,
+ 206, 207,
+};
+
+static struct attn_hw_reg brb_int8_bb_a0 = {
+ 8, 17, brb_int8_bb_a0_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188
+};
+
+static const u16 brb_int9_bb_a0_attn_idx[1] = {
+ 208,
+};
+
+static struct attn_hw_reg brb_int9_bb_a0 = {
+ 9, 1, brb_int9_bb_a0_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0
+};
+
+static const u16 brb_int10_bb_a0_attn_idx[14] = {
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 224, 225,
+};
+
+static struct attn_hw_reg brb_int10_bb_a0 = {
+ 10, 14, brb_int10_bb_a0_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc,
+ 0x3401b8
+};
+
+static const u16 brb_int11_bb_a0_attn_idx[8] = {
+ 229, 230, 231, 232, 233, 234, 235, 236,
+};
+
+static struct attn_hw_reg brb_int11_bb_a0 = {
+ 11, 8, brb_int11_bb_a0_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0
+};
+
+static struct attn_hw_reg *brb_int_bb_a0_regs[12] = {
+ &brb_int0_bb_a0, &brb_int1_bb_a0, &brb_int2_bb_a0, &brb_int3_bb_a0,
+ &brb_int4_bb_a0, &brb_int5_bb_a0, &brb_int6_bb_a0, &brb_int7_bb_a0,
+ &brb_int8_bb_a0, &brb_int9_bb_a0,
+ &brb_int10_bb_a0, &brb_int11_bb_a0,
+};
+
+static const u16 brb_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg brb_int0_bb_b0 = {
+ 0, 32, brb_int0_bb_b0_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4
+};
+
+static const u16 brb_int1_bb_b0_attn_idx[30] = {
+ 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+};
+
+static struct attn_hw_reg brb_int1_bb_b0 = {
+ 1, 30, brb_int1_bb_b0_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc
+};
+
+static const u16 brb_int2_bb_b0_attn_idx[28] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+};
+
+static struct attn_hw_reg brb_int2_bb_b0 = {
+ 2, 28, brb_int2_bb_b0_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4
+};
+
+static const u16 brb_int3_bb_b0_attn_idx[31] = {
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+};
+
+static struct attn_hw_reg brb_int3_bb_b0 = {
+ 3, 31, brb_int3_bb_b0_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c
+};
+
+static const u16 brb_int4_bb_b0_attn_idx[27] = {
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+};
+
+static struct attn_hw_reg brb_int4_bb_b0 = {
+ 4, 27, brb_int4_bb_b0_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124
+};
+
+static const u16 brb_int5_bb_b0_attn_idx[1] = {
+ 150,
+};
+
+static struct attn_hw_reg brb_int5_bb_b0 = {
+ 5, 1, brb_int5_bb_b0_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c
+};
+
+static const u16 brb_int6_bb_b0_attn_idx[8] = {
+ 151, 152, 153, 154, 155, 156, 157, 158,
+};
+
+static struct attn_hw_reg brb_int6_bb_b0 = {
+ 6, 8, brb_int6_bb_b0_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154
+};
+
+static const u16 brb_int7_bb_b0_attn_idx[32] = {
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189,
+ 190,
+};
+
+static struct attn_hw_reg brb_int7_bb_b0 = {
+ 7, 32, brb_int7_bb_b0_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c
+};
+
+static const u16 brb_int8_bb_b0_attn_idx[17] = {
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205,
+ 206, 207,
+};
+
+static struct attn_hw_reg brb_int8_bb_b0 = {
+ 8, 17, brb_int8_bb_b0_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188
+};
+
+static const u16 brb_int9_bb_b0_attn_idx[1] = {
+ 208,
+};
+
+static struct attn_hw_reg brb_int9_bb_b0 = {
+ 9, 1, brb_int9_bb_b0_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0
+};
+
+static const u16 brb_int10_bb_b0_attn_idx[14] = {
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 224, 225,
+};
+
+static struct attn_hw_reg brb_int10_bb_b0 = {
+ 10, 14, brb_int10_bb_b0_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc,
+ 0x3401b8
+};
+
+static const u16 brb_int11_bb_b0_attn_idx[8] = {
+ 229, 230, 231, 232, 233, 234, 235, 236,
+};
+
+static struct attn_hw_reg brb_int11_bb_b0 = {
+ 11, 8, brb_int11_bb_b0_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0
+};
+
+static struct attn_hw_reg *brb_int_bb_b0_regs[12] = {
+ &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0,
+ &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0,
+ &brb_int8_bb_b0, &brb_int9_bb_b0,
+ &brb_int10_bb_b0, &brb_int11_bb_b0,
+};
+
+static const u16 brb_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg brb_int0_k2 = {
+ 0, 32, brb_int0_k2_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4
+};
+
+static const u16 brb_int1_k2_attn_idx[30] = {
+ 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+};
+
+static struct attn_hw_reg brb_int1_k2 = {
+ 1, 30, brb_int1_k2_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc
+};
+
+static const u16 brb_int2_k2_attn_idx[28] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+};
+
+static struct attn_hw_reg brb_int2_k2 = {
+ 2, 28, brb_int2_k2_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4
+};
+
+static const u16 brb_int3_k2_attn_idx[31] = {
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+};
+
+static struct attn_hw_reg brb_int3_k2 = {
+ 3, 31, brb_int3_k2_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c
+};
+
+static const u16 brb_int4_k2_attn_idx[27] = {
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+};
+
+static struct attn_hw_reg brb_int4_k2 = {
+ 4, 27, brb_int4_k2_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124
+};
+
+static const u16 brb_int5_k2_attn_idx[1] = {
+ 150,
+};
+
+static struct attn_hw_reg brb_int5_k2 = {
+ 5, 1, brb_int5_k2_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c
+};
+
+static const u16 brb_int6_k2_attn_idx[8] = {
+ 151, 152, 153, 154, 155, 156, 157, 158,
+};
+
+static struct attn_hw_reg brb_int6_k2 = {
+ 6, 8, brb_int6_k2_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154
+};
+
+static const u16 brb_int7_k2_attn_idx[32] = {
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189,
+ 190,
+};
+
+static struct attn_hw_reg brb_int7_k2 = {
+ 7, 32, brb_int7_k2_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c
+};
+
+static const u16 brb_int8_k2_attn_idx[17] = {
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205,
+ 206, 207,
+};
+
+static struct attn_hw_reg brb_int8_k2 = {
+ 8, 17, brb_int8_k2_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188
+};
+
+static const u16 brb_int9_k2_attn_idx[1] = {
+ 208,
+};
+
+static struct attn_hw_reg brb_int9_k2 = {
+ 9, 1, brb_int9_k2_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0
+};
+
+static const u16 brb_int10_k2_attn_idx[18] = {
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 222, 223,
+ 224,
+ 225, 226, 227,
+};
+
+static struct attn_hw_reg brb_int10_k2 = {
+ 10, 18, brb_int10_k2_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8
+};
+
+static const u16 brb_int11_k2_attn_idx[8] = {
+ 229, 230, 231, 232, 233, 234, 235, 236,
+};
+
+static struct attn_hw_reg brb_int11_k2 = {
+ 11, 8, brb_int11_k2_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0
+};
+
+static struct attn_hw_reg *brb_int_k2_regs[12] = {
+ &brb_int0_k2, &brb_int1_k2, &brb_int2_k2, &brb_int3_k2, &brb_int4_k2,
+ &brb_int5_k2, &brb_int6_k2, &brb_int7_k2, &brb_int8_k2, &brb_int9_k2,
+ &brb_int10_k2, &brb_int11_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *brb_prty_attn_desc[75] = {
+ "brb_ll_bank0_mem_prty",
+ "brb_ll_bank1_mem_prty",
+ "brb_ll_bank2_mem_prty",
+ "brb_ll_bank3_mem_prty",
+ "brb_datapath_registers",
+ "brb_mem001_i_ecc_rf_int",
+ "brb_mem008_i_ecc_rf_int",
+ "brb_mem009_i_ecc_rf_int",
+ "brb_mem010_i_ecc_rf_int",
+ "brb_mem011_i_ecc_rf_int",
+ "brb_mem012_i_ecc_rf_int",
+ "brb_mem013_i_ecc_rf_int",
+ "brb_mem014_i_ecc_rf_int",
+ "brb_mem015_i_ecc_rf_int",
+ "brb_mem016_i_ecc_rf_int",
+ "brb_mem002_i_ecc_rf_int",
+ "brb_mem003_i_ecc_rf_int",
+ "brb_mem004_i_ecc_rf_int",
+ "brb_mem005_i_ecc_rf_int",
+ "brb_mem006_i_ecc_rf_int",
+ "brb_mem007_i_ecc_rf_int",
+ "brb_mem070_i_mem_prty",
+ "brb_mem069_i_mem_prty",
+ "brb_mem053_i_mem_prty",
+ "brb_mem054_i_mem_prty",
+ "brb_mem055_i_mem_prty",
+ "brb_mem056_i_mem_prty",
+ "brb_mem057_i_mem_prty",
+ "brb_mem058_i_mem_prty",
+ "brb_mem059_i_mem_prty",
+ "brb_mem060_i_mem_prty",
+ "brb_mem061_i_mem_prty",
+ "brb_mem062_i_mem_prty",
+ "brb_mem063_i_mem_prty",
+ "brb_mem064_i_mem_prty",
+ "brb_mem065_i_mem_prty",
+ "brb_mem045_i_mem_prty",
+ "brb_mem046_i_mem_prty",
+ "brb_mem047_i_mem_prty",
+ "brb_mem048_i_mem_prty",
+ "brb_mem049_i_mem_prty",
+ "brb_mem050_i_mem_prty",
+ "brb_mem051_i_mem_prty",
+ "brb_mem052_i_mem_prty",
+ "brb_mem041_i_mem_prty",
+ "brb_mem042_i_mem_prty",
+ "brb_mem043_i_mem_prty",
+ "brb_mem044_i_mem_prty",
+ "brb_mem040_i_mem_prty",
+ "brb_mem035_i_mem_prty",
+ "brb_mem066_i_mem_prty",
+ "brb_mem067_i_mem_prty",
+ "brb_mem068_i_mem_prty",
+ "brb_mem030_i_mem_prty",
+ "brb_mem031_i_mem_prty",
+ "brb_mem032_i_mem_prty",
+ "brb_mem033_i_mem_prty",
+ "brb_mem037_i_mem_prty",
+ "brb_mem038_i_mem_prty",
+ "brb_mem034_i_mem_prty",
+ "brb_mem036_i_mem_prty",
+ "brb_mem017_i_mem_prty",
+ "brb_mem018_i_mem_prty",
+ "brb_mem019_i_mem_prty",
+ "brb_mem020_i_mem_prty",
+ "brb_mem021_i_mem_prty",
+ "brb_mem022_i_mem_prty",
+ "brb_mem023_i_mem_prty",
+ "brb_mem024_i_mem_prty",
+ "brb_mem029_i_mem_prty",
+ "brb_mem026_i_mem_prty",
+ "brb_mem027_i_mem_prty",
+ "brb_mem028_i_mem_prty",
+ "brb_mem025_i_mem_prty",
+ "brb_mem039_i_mem_prty",
+};
+#else
+#define brb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 brb_prty1_bb_a0_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 24, 36,
+ 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 49,
+};
+
+static struct attn_hw_reg brb_prty1_bb_a0 = {
+ 0, 31, brb_prty1_bb_a0_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404
+};
+
+static const u16 brb_prty2_bb_a0_attn_idx[19] = {
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 69, 70, 71, 72, 73, 74,
+ 48,
+};
+
+static struct attn_hw_reg brb_prty2_bb_a0 = {
+ 1, 19, brb_prty2_bb_a0_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414
+};
+
+static struct attn_hw_reg *brb_prty_bb_a0_regs[2] = {
+ &brb_prty1_bb_a0, &brb_prty2_bb_a0,
+};
+
+static const u16 brb_prty0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg brb_prty0_bb_b0 = {
+ 0, 5, brb_prty0_bb_b0_attn_idx, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0
+};
+
+static const u16 brb_prty1_bb_b0_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 24, 36,
+ 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg brb_prty1_bb_b0 = {
+ 1, 31, brb_prty1_bb_b0_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404
+};
+
+static const u16 brb_prty2_bb_b0_attn_idx[14] = {
+ 53, 54, 55, 56, 59, 61, 62, 63, 64, 69, 70, 71, 72, 73,
+};
+
+static struct attn_hw_reg brb_prty2_bb_b0 = {
+ 2, 14, brb_prty2_bb_b0_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414
+};
+
+static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = {
+ &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0,
+};
+
+static const u16 brb_prty0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg brb_prty0_k2 = {
+ 0, 5, brb_prty0_k2_attn_idx, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0
+};
+
+static const u16 brb_prty1_k2_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg brb_prty1_k2 = {
+ 1, 31, brb_prty1_k2_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404
+};
+
+static const u16 brb_prty2_k2_attn_idx[30] = {
+ 50, 51, 52, 36, 37, 38, 39, 40, 41, 42, 43, 47, 53, 54, 55, 56, 57, 58,
+ 59, 49, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+};
+
+static struct attn_hw_reg brb_prty2_k2 = {
+ 2, 30, brb_prty2_k2_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414
+};
+
+static struct attn_hw_reg *brb_prty_k2_regs[3] = {
+ &brb_prty0_k2, &brb_prty1_k2, &brb_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *src_int_attn_desc[1] = {
+ "src_address_error",
+};
+#else
+#define src_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 src_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg src_int0_bb_a0 = {
+ 0, 1, src_int0_bb_a0_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4
+};
+
+static struct attn_hw_reg *src_int_bb_a0_regs[1] = {
+ &src_int0_bb_a0,
+};
+
+static const u16 src_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg src_int0_bb_b0 = {
+ 0, 1, src_int0_bb_b0_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4
+};
+
+static struct attn_hw_reg *src_int_bb_b0_regs[1] = {
+ &src_int0_bb_b0,
+};
+
+static const u16 src_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg src_int0_k2 = {
+ 0, 1, src_int0_k2_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4
+};
+
+static struct attn_hw_reg *src_int_k2_regs[1] = {
+ &src_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prs_int_attn_desc[2] = {
+ "prs_address_error",
+ "prs_lcid_validation_err",
+};
+#else
+#define prs_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 prs_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_int0_bb_a0 = {
+ 0, 2, prs_int0_bb_a0_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044
+};
+
+static struct attn_hw_reg *prs_int_bb_a0_regs[1] = {
+ &prs_int0_bb_a0,
+};
+
+static const u16 prs_int0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_int0_bb_b0 = {
+ 0, 2, prs_int0_bb_b0_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044
+};
+
+static struct attn_hw_reg *prs_int_bb_b0_regs[1] = {
+ &prs_int0_bb_b0,
+};
+
+static const u16 prs_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_int0_k2 = {
+ 0, 2, prs_int0_k2_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044
+};
+
+static struct attn_hw_reg *prs_int_k2_regs[1] = {
+ &prs_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prs_prty_attn_desc[75] = {
+ "prs_cam_parity",
+ "prs_gft_cam_parity",
+ "prs_mem011_i_ecc_rf_int",
+ "prs_mem012_i_ecc_rf_int",
+ "prs_mem016_i_ecc_rf_int",
+ "prs_mem017_i_ecc_rf_int",
+ "prs_mem021_i_ecc_rf_int",
+ "prs_mem022_i_ecc_rf_int",
+ "prs_mem026_i_ecc_rf_int",
+ "prs_mem027_i_ecc_rf_int",
+ "prs_mem064_i_mem_prty",
+ "prs_mem044_i_mem_prty",
+ "prs_mem043_i_mem_prty",
+ "prs_mem037_i_mem_prty",
+ "prs_mem033_i_mem_prty",
+ "prs_mem034_i_mem_prty",
+ "prs_mem035_i_mem_prty",
+ "prs_mem036_i_mem_prty",
+ "prs_mem029_i_mem_prty",
+ "prs_mem030_i_mem_prty",
+ "prs_mem031_i_mem_prty",
+ "prs_mem032_i_mem_prty",
+ "prs_mem007_i_mem_prty",
+ "prs_mem028_i_mem_prty",
+ "prs_mem039_i_mem_prty",
+ "prs_mem040_i_mem_prty",
+ "prs_mem058_i_mem_prty",
+ "prs_mem059_i_mem_prty",
+ "prs_mem041_i_mem_prty",
+ "prs_mem042_i_mem_prty",
+ "prs_mem060_i_mem_prty",
+ "prs_mem061_i_mem_prty",
+ "prs_mem009_i_mem_prty",
+ "prs_mem009_i_ecc_rf_int",
+ "prs_mem010_i_ecc_rf_int",
+ "prs_mem014_i_ecc_rf_int",
+ "prs_mem015_i_ecc_rf_int",
+ "prs_mem026_i_mem_prty",
+ "prs_mem025_i_mem_prty",
+ "prs_mem021_i_mem_prty",
+ "prs_mem019_i_mem_prty",
+ "prs_mem020_i_mem_prty",
+ "prs_mem017_i_mem_prty",
+ "prs_mem018_i_mem_prty",
+ "prs_mem005_i_mem_prty",
+ "prs_mem016_i_mem_prty",
+ "prs_mem023_i_mem_prty",
+ "prs_mem024_i_mem_prty",
+ "prs_mem008_i_mem_prty",
+ "prs_mem012_i_mem_prty",
+ "prs_mem013_i_mem_prty",
+ "prs_mem006_i_mem_prty",
+ "prs_mem011_i_mem_prty",
+ "prs_mem003_i_mem_prty",
+ "prs_mem004_i_mem_prty",
+ "prs_mem027_i_mem_prty",
+ "prs_mem010_i_mem_prty",
+ "prs_mem014_i_mem_prty",
+ "prs_mem015_i_mem_prty",
+ "prs_mem054_i_mem_prty",
+ "prs_mem055_i_mem_prty",
+ "prs_mem056_i_mem_prty",
+ "prs_mem057_i_mem_prty",
+ "prs_mem046_i_mem_prty",
+ "prs_mem047_i_mem_prty",
+ "prs_mem048_i_mem_prty",
+ "prs_mem049_i_mem_prty",
+ "prs_mem050_i_mem_prty",
+ "prs_mem051_i_mem_prty",
+ "prs_mem052_i_mem_prty",
+ "prs_mem053_i_mem_prty",
+ "prs_mem062_i_mem_prty",
+ "prs_mem045_i_mem_prty",
+ "prs_mem002_i_mem_prty",
+ "prs_mem001_i_mem_prty",
+};
+#else
+#define prs_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 prs_prty0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg prs_prty0_bb_a0 = {
+ 0, 1, prs_prty0_bb_a0_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054
+};
+
+static const u16 prs_prty1_bb_a0_attn_idx[31] = {
+ 13, 14, 15, 16, 18, 21, 22, 23, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+};
+
+static struct attn_hw_reg prs_prty1_bb_a0 = {
+ 1, 31, prs_prty1_bb_a0_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208
+};
+
+static const u16 prs_prty2_bb_a0_attn_idx[5] = {
+ 73, 74, 20, 17, 19,
+};
+
+static struct attn_hw_reg prs_prty2_bb_a0 = {
+ 2, 5, prs_prty2_bb_a0_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218
+};
+
+static struct attn_hw_reg *prs_prty_bb_a0_regs[3] = {
+ &prs_prty0_bb_a0, &prs_prty1_bb_a0, &prs_prty2_bb_a0,
+};
+
+static const u16 prs_prty0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_prty0_bb_b0 = {
+ 0, 2, prs_prty0_bb_b0_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054
+};
+
+static const u16 prs_prty1_bb_b0_attn_idx[31] = {
+ 13, 14, 15, 16, 18, 19, 21, 22, 23, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+};
+
+static struct attn_hw_reg prs_prty1_bb_b0 = {
+ 1, 31, prs_prty1_bb_b0_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208
+};
+
+static const u16 prs_prty2_bb_b0_attn_idx[5] = {
+ 73, 74, 20, 17, 55,
+};
+
+static struct attn_hw_reg prs_prty2_bb_b0 = {
+ 2, 5, prs_prty2_bb_b0_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218
+};
+
+static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = {
+ &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0,
+};
+
+static const u16 prs_prty0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_prty0_k2 = {
+ 0, 2, prs_prty0_k2_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054
+};
+
+static const u16 prs_prty1_k2_attn_idx[31] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+};
+
+static struct attn_hw_reg prs_prty1_k2 = {
+ 1, 31, prs_prty1_k2_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208
+};
+
+static const u16 prs_prty2_k2_attn_idx[31] = {
+ 56, 57, 58, 40, 41, 47, 38, 48, 50, 43, 46, 59, 60, 61, 62, 53, 54, 44,
+ 51, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+};
+
+static struct attn_hw_reg prs_prty2_k2 = {
+ 2, 31, prs_prty2_k2_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218
+};
+
+static struct attn_hw_reg *prs_prty_k2_regs[3] = {
+ &prs_prty0_k2, &prs_prty1_k2, &prs_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsdm_int_attn_desc[28] = {
+ "tsdm_address_error",
+ "tsdm_inp_queue_error",
+ "tsdm_delay_fifo_error",
+ "tsdm_async_host_error",
+ "tsdm_prm_fifo_error",
+ "tsdm_ccfc_load_pend_error",
+ "tsdm_tcfc_load_pend_error",
+ "tsdm_dst_int_ram_wait_error",
+ "tsdm_dst_pas_buf_wait_error",
+ "tsdm_dst_pxp_immed_error",
+ "tsdm_dst_pxp_dst_pend_error",
+ "tsdm_dst_brb_src_pend_error",
+ "tsdm_dst_brb_src_addr_error",
+ "tsdm_rsp_brb_pend_error",
+ "tsdm_rsp_int_ram_pend_error",
+ "tsdm_rsp_brb_rd_data_error",
+ "tsdm_rsp_int_ram_rd_data_error",
+ "tsdm_rsp_pxp_rd_data_error",
+ "tsdm_cm_delay_error",
+ "tsdm_sh_delay_error",
+ "tsdm_cmpl_pend_error",
+ "tsdm_cprm_pend_error",
+ "tsdm_timer_addr_error",
+ "tsdm_timer_pend_error",
+ "tsdm_dorq_dpm_error",
+ "tsdm_dst_pxp_done_error",
+ "tsdm_xcm_rmt_buffer_error",
+ "tsdm_ycm_rmt_buffer_error",
+};
+#else
+#define tsdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg tsdm_int0_bb_a0 = {
+ 0, 26, tsdm_int0_bb_a0_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044
+};
+
+static struct attn_hw_reg *tsdm_int_bb_a0_regs[1] = {
+ &tsdm_int0_bb_a0,
+};
+
+static const u16 tsdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg tsdm_int0_bb_b0 = {
+ 0, 26, tsdm_int0_bb_b0_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044
+};
+
+static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = {
+ &tsdm_int0_bb_b0,
+};
+
+static const u16 tsdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg tsdm_int0_k2 = {
+ 0, 28, tsdm_int0_k2_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044
+};
+
+static struct attn_hw_reg *tsdm_int_k2_regs[1] = {
+ &tsdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsdm_prty_attn_desc[10] = {
+ "tsdm_mem009_i_mem_prty",
+ "tsdm_mem008_i_mem_prty",
+ "tsdm_mem007_i_mem_prty",
+ "tsdm_mem006_i_mem_prty",
+ "tsdm_mem005_i_mem_prty",
+ "tsdm_mem002_i_mem_prty",
+ "tsdm_mem010_i_mem_prty",
+ "tsdm_mem001_i_mem_prty",
+ "tsdm_mem003_i_mem_prty",
+ "tsdm_mem004_i_mem_prty",
+};
+#else
+#define tsdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsdm_prty1_bb_a0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tsdm_prty1_bb_a0 = {
+ 0, 10, tsdm_prty1_bb_a0_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208,
+ 0xfb0204
+};
+
+static struct attn_hw_reg *tsdm_prty_bb_a0_regs[1] = {
+ &tsdm_prty1_bb_a0,
+};
+
+static const u16 tsdm_prty1_bb_b0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tsdm_prty1_bb_b0 = {
+ 0, 10, tsdm_prty1_bb_b0_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208,
+ 0xfb0204
+};
+
+static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = {
+ &tsdm_prty1_bb_b0,
+};
+
+static const u16 tsdm_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tsdm_prty1_k2 = {
+ 0, 10, tsdm_prty1_k2_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204
+};
+
+static struct attn_hw_reg *tsdm_prty_k2_regs[1] = {
+ &tsdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msdm_int_attn_desc[28] = {
+ "msdm_address_error",
+ "msdm_inp_queue_error",
+ "msdm_delay_fifo_error",
+ "msdm_async_host_error",
+ "msdm_prm_fifo_error",
+ "msdm_ccfc_load_pend_error",
+ "msdm_tcfc_load_pend_error",
+ "msdm_dst_int_ram_wait_error",
+ "msdm_dst_pas_buf_wait_error",
+ "msdm_dst_pxp_immed_error",
+ "msdm_dst_pxp_dst_pend_error",
+ "msdm_dst_brb_src_pend_error",
+ "msdm_dst_brb_src_addr_error",
+ "msdm_rsp_brb_pend_error",
+ "msdm_rsp_int_ram_pend_error",
+ "msdm_rsp_brb_rd_data_error",
+ "msdm_rsp_int_ram_rd_data_error",
+ "msdm_rsp_pxp_rd_data_error",
+ "msdm_cm_delay_error",
+ "msdm_sh_delay_error",
+ "msdm_cmpl_pend_error",
+ "msdm_cprm_pend_error",
+ "msdm_timer_addr_error",
+ "msdm_timer_pend_error",
+ "msdm_dorq_dpm_error",
+ "msdm_dst_pxp_done_error",
+ "msdm_xcm_rmt_buffer_error",
+ "msdm_ycm_rmt_buffer_error",
+};
+#else
+#define msdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 msdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg msdm_int0_bb_a0 = {
+ 0, 26, msdm_int0_bb_a0_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044
+};
+
+static struct attn_hw_reg *msdm_int_bb_a0_regs[1] = {
+ &msdm_int0_bb_a0,
+};
+
+static const u16 msdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg msdm_int0_bb_b0 = {
+ 0, 26, msdm_int0_bb_b0_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044
+};
+
+static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = {
+ &msdm_int0_bb_b0,
+};
+
+static const u16 msdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg msdm_int0_k2 = {
+ 0, 28, msdm_int0_k2_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044
+};
+
+static struct attn_hw_reg *msdm_int_k2_regs[1] = {
+ &msdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msdm_prty_attn_desc[11] = {
+ "msdm_mem009_i_mem_prty",
+ "msdm_mem008_i_mem_prty",
+ "msdm_mem007_i_mem_prty",
+ "msdm_mem006_i_mem_prty",
+ "msdm_mem005_i_mem_prty",
+ "msdm_mem002_i_mem_prty",
+ "msdm_mem011_i_mem_prty",
+ "msdm_mem001_i_mem_prty",
+ "msdm_mem003_i_mem_prty",
+ "msdm_mem004_i_mem_prty",
+ "msdm_mem010_i_mem_prty",
+};
+#else
+#define msdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 msdm_prty1_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg msdm_prty1_bb_a0 = {
+ 0, 11, msdm_prty1_bb_a0_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208,
+ 0xfc0204
+};
+
+static struct attn_hw_reg *msdm_prty_bb_a0_regs[1] = {
+ &msdm_prty1_bb_a0,
+};
+
+static const u16 msdm_prty1_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg msdm_prty1_bb_b0 = {
+ 0, 11, msdm_prty1_bb_b0_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208,
+ 0xfc0204
+};
+
+static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = {
+ &msdm_prty1_bb_b0,
+};
+
+static const u16 msdm_prty1_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg msdm_prty1_k2 = {
+ 0, 11, msdm_prty1_k2_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204
+};
+
+static struct attn_hw_reg *msdm_prty_k2_regs[1] = {
+ &msdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usdm_int_attn_desc[28] = {
+ "usdm_address_error",
+ "usdm_inp_queue_error",
+ "usdm_delay_fifo_error",
+ "usdm_async_host_error",
+ "usdm_prm_fifo_error",
+ "usdm_ccfc_load_pend_error",
+ "usdm_tcfc_load_pend_error",
+ "usdm_dst_int_ram_wait_error",
+ "usdm_dst_pas_buf_wait_error",
+ "usdm_dst_pxp_immed_error",
+ "usdm_dst_pxp_dst_pend_error",
+ "usdm_dst_brb_src_pend_error",
+ "usdm_dst_brb_src_addr_error",
+ "usdm_rsp_brb_pend_error",
+ "usdm_rsp_int_ram_pend_error",
+ "usdm_rsp_brb_rd_data_error",
+ "usdm_rsp_int_ram_rd_data_error",
+ "usdm_rsp_pxp_rd_data_error",
+ "usdm_cm_delay_error",
+ "usdm_sh_delay_error",
+ "usdm_cmpl_pend_error",
+ "usdm_cprm_pend_error",
+ "usdm_timer_addr_error",
+ "usdm_timer_pend_error",
+ "usdm_dorq_dpm_error",
+ "usdm_dst_pxp_done_error",
+ "usdm_xcm_rmt_buffer_error",
+ "usdm_ycm_rmt_buffer_error",
+};
+#else
+#define usdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 usdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg usdm_int0_bb_a0 = {
+ 0, 26, usdm_int0_bb_a0_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044
+};
+
+static struct attn_hw_reg *usdm_int_bb_a0_regs[1] = {
+ &usdm_int0_bb_a0,
+};
+
+static const u16 usdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg usdm_int0_bb_b0 = {
+ 0, 26, usdm_int0_bb_b0_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044
+};
+
+static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = {
+ &usdm_int0_bb_b0,
+};
+
+static const u16 usdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg usdm_int0_k2 = {
+ 0, 28, usdm_int0_k2_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044
+};
+
+static struct attn_hw_reg *usdm_int_k2_regs[1] = {
+ &usdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usdm_prty_attn_desc[10] = {
+ "usdm_mem008_i_mem_prty",
+ "usdm_mem007_i_mem_prty",
+ "usdm_mem006_i_mem_prty",
+ "usdm_mem005_i_mem_prty",
+ "usdm_mem002_i_mem_prty",
+ "usdm_mem010_i_mem_prty",
+ "usdm_mem001_i_mem_prty",
+ "usdm_mem003_i_mem_prty",
+ "usdm_mem004_i_mem_prty",
+ "usdm_mem009_i_mem_prty",
+};
+#else
+#define usdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 usdm_prty1_bb_a0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg usdm_prty1_bb_a0 = {
+ 0, 10, usdm_prty1_bb_a0_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208,
+ 0xfd0204
+};
+
+static struct attn_hw_reg *usdm_prty_bb_a0_regs[1] = {
+ &usdm_prty1_bb_a0,
+};
+
+static const u16 usdm_prty1_bb_b0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg usdm_prty1_bb_b0 = {
+ 0, 10, usdm_prty1_bb_b0_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208,
+ 0xfd0204
+};
+
+static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = {
+ &usdm_prty1_bb_b0,
+};
+
+static const u16 usdm_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg usdm_prty1_k2 = {
+ 0, 10, usdm_prty1_k2_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204
+};
+
+static struct attn_hw_reg *usdm_prty_k2_regs[1] = {
+ &usdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsdm_int_attn_desc[28] = {
+ "xsdm_address_error",
+ "xsdm_inp_queue_error",
+ "xsdm_delay_fifo_error",
+ "xsdm_async_host_error",
+ "xsdm_prm_fifo_error",
+ "xsdm_ccfc_load_pend_error",
+ "xsdm_tcfc_load_pend_error",
+ "xsdm_dst_int_ram_wait_error",
+ "xsdm_dst_pas_buf_wait_error",
+ "xsdm_dst_pxp_immed_error",
+ "xsdm_dst_pxp_dst_pend_error",
+ "xsdm_dst_brb_src_pend_error",
+ "xsdm_dst_brb_src_addr_error",
+ "xsdm_rsp_brb_pend_error",
+ "xsdm_rsp_int_ram_pend_error",
+ "xsdm_rsp_brb_rd_data_error",
+ "xsdm_rsp_int_ram_rd_data_error",
+ "xsdm_rsp_pxp_rd_data_error",
+ "xsdm_cm_delay_error",
+ "xsdm_sh_delay_error",
+ "xsdm_cmpl_pend_error",
+ "xsdm_cprm_pend_error",
+ "xsdm_timer_addr_error",
+ "xsdm_timer_pend_error",
+ "xsdm_dorq_dpm_error",
+ "xsdm_dst_pxp_done_error",
+ "xsdm_xcm_rmt_buffer_error",
+ "xsdm_ycm_rmt_buffer_error",
+};
+#else
+#define xsdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg xsdm_int0_bb_a0 = {
+ 0, 26, xsdm_int0_bb_a0_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044
+};
+
+static struct attn_hw_reg *xsdm_int_bb_a0_regs[1] = {
+ &xsdm_int0_bb_a0,
+};
+
+static const u16 xsdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg xsdm_int0_bb_b0 = {
+ 0, 26, xsdm_int0_bb_b0_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044
+};
+
+static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = {
+ &xsdm_int0_bb_b0,
+};
+
+static const u16 xsdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg xsdm_int0_k2 = {
+ 0, 28, xsdm_int0_k2_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044
+};
+
+static struct attn_hw_reg *xsdm_int_k2_regs[1] = {
+ &xsdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsdm_prty_attn_desc[10] = {
+ "xsdm_mem009_i_mem_prty",
+ "xsdm_mem008_i_mem_prty",
+ "xsdm_mem007_i_mem_prty",
+ "xsdm_mem006_i_mem_prty",
+ "xsdm_mem003_i_mem_prty",
+ "xsdm_mem010_i_mem_prty",
+ "xsdm_mem002_i_mem_prty",
+ "xsdm_mem004_i_mem_prty",
+ "xsdm_mem005_i_mem_prty",
+ "xsdm_mem001_i_mem_prty",
+};
+#else
+#define xsdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsdm_prty1_bb_a0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsdm_prty1_bb_a0 = {
+ 0, 10, xsdm_prty1_bb_a0_attn_idx, 0xf80200, 0xf8020c, 0xf80208,
+ 0xf80204
+};
+
+static struct attn_hw_reg *xsdm_prty_bb_a0_regs[1] = {
+ &xsdm_prty1_bb_a0,
+};
+
+static const u16 xsdm_prty1_bb_b0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsdm_prty1_bb_b0 = {
+ 0, 10, xsdm_prty1_bb_b0_attn_idx, 0xf80200, 0xf8020c, 0xf80208,
+ 0xf80204
+};
+
+static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = {
+ &xsdm_prty1_bb_b0,
+};
+
+static const u16 xsdm_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsdm_prty1_k2 = {
+ 0, 10, xsdm_prty1_k2_attn_idx, 0xf80200, 0xf8020c, 0xf80208, 0xf80204
+};
+
+static struct attn_hw_reg *xsdm_prty_k2_regs[1] = {
+ &xsdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysdm_int_attn_desc[28] = {
+ "ysdm_address_error",
+ "ysdm_inp_queue_error",
+ "ysdm_delay_fifo_error",
+ "ysdm_async_host_error",
+ "ysdm_prm_fifo_error",
+ "ysdm_ccfc_load_pend_error",
+ "ysdm_tcfc_load_pend_error",
+ "ysdm_dst_int_ram_wait_error",
+ "ysdm_dst_pas_buf_wait_error",
+ "ysdm_dst_pxp_immed_error",
+ "ysdm_dst_pxp_dst_pend_error",
+ "ysdm_dst_brb_src_pend_error",
+ "ysdm_dst_brb_src_addr_error",
+ "ysdm_rsp_brb_pend_error",
+ "ysdm_rsp_int_ram_pend_error",
+ "ysdm_rsp_brb_rd_data_error",
+ "ysdm_rsp_int_ram_rd_data_error",
+ "ysdm_rsp_pxp_rd_data_error",
+ "ysdm_cm_delay_error",
+ "ysdm_sh_delay_error",
+ "ysdm_cmpl_pend_error",
+ "ysdm_cprm_pend_error",
+ "ysdm_timer_addr_error",
+ "ysdm_timer_pend_error",
+ "ysdm_dorq_dpm_error",
+ "ysdm_dst_pxp_done_error",
+ "ysdm_xcm_rmt_buffer_error",
+ "ysdm_ycm_rmt_buffer_error",
+};
+#else
+#define ysdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg ysdm_int0_bb_a0 = {
+ 0, 26, ysdm_int0_bb_a0_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044
+};
+
+static struct attn_hw_reg *ysdm_int_bb_a0_regs[1] = {
+ &ysdm_int0_bb_a0,
+};
+
+static const u16 ysdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg ysdm_int0_bb_b0 = {
+ 0, 26, ysdm_int0_bb_b0_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044
+};
+
+static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = {
+ &ysdm_int0_bb_b0,
+};
+
+static const u16 ysdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg ysdm_int0_k2 = {
+ 0, 28, ysdm_int0_k2_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044
+};
+
+static struct attn_hw_reg *ysdm_int_k2_regs[1] = {
+ &ysdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysdm_prty_attn_desc[9] = {
+ "ysdm_mem008_i_mem_prty",
+ "ysdm_mem007_i_mem_prty",
+ "ysdm_mem006_i_mem_prty",
+ "ysdm_mem005_i_mem_prty",
+ "ysdm_mem002_i_mem_prty",
+ "ysdm_mem009_i_mem_prty",
+ "ysdm_mem001_i_mem_prty",
+ "ysdm_mem003_i_mem_prty",
+ "ysdm_mem004_i_mem_prty",
+};
+#else
+#define ysdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysdm_prty1_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg ysdm_prty1_bb_a0 = {
+ 0, 9, ysdm_prty1_bb_a0_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204
+};
+
+static struct attn_hw_reg *ysdm_prty_bb_a0_regs[1] = {
+ &ysdm_prty1_bb_a0,
+};
+
+static const u16 ysdm_prty1_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg ysdm_prty1_bb_b0 = {
+ 0, 9, ysdm_prty1_bb_b0_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204
+};
+
+static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = {
+ &ysdm_prty1_bb_b0,
+};
+
+static const u16 ysdm_prty1_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg ysdm_prty1_k2 = {
+ 0, 9, ysdm_prty1_k2_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204
+};
+
+static struct attn_hw_reg *ysdm_prty_k2_regs[1] = {
+ &ysdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psdm_int_attn_desc[28] = {
+ "psdm_address_error",
+ "psdm_inp_queue_error",
+ "psdm_delay_fifo_error",
+ "psdm_async_host_error",
+ "psdm_prm_fifo_error",
+ "psdm_ccfc_load_pend_error",
+ "psdm_tcfc_load_pend_error",
+ "psdm_dst_int_ram_wait_error",
+ "psdm_dst_pas_buf_wait_error",
+ "psdm_dst_pxp_immed_error",
+ "psdm_dst_pxp_dst_pend_error",
+ "psdm_dst_brb_src_pend_error",
+ "psdm_dst_brb_src_addr_error",
+ "psdm_rsp_brb_pend_error",
+ "psdm_rsp_int_ram_pend_error",
+ "psdm_rsp_brb_rd_data_error",
+ "psdm_rsp_int_ram_rd_data_error",
+ "psdm_rsp_pxp_rd_data_error",
+ "psdm_cm_delay_error",
+ "psdm_sh_delay_error",
+ "psdm_cmpl_pend_error",
+ "psdm_cprm_pend_error",
+ "psdm_timer_addr_error",
+ "psdm_timer_pend_error",
+ "psdm_dorq_dpm_error",
+ "psdm_dst_pxp_done_error",
+ "psdm_xcm_rmt_buffer_error",
+ "psdm_ycm_rmt_buffer_error",
+};
+#else
+#define psdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 psdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg psdm_int0_bb_a0 = {
+ 0, 26, psdm_int0_bb_a0_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044
+};
+
+static struct attn_hw_reg *psdm_int_bb_a0_regs[1] = {
+ &psdm_int0_bb_a0,
+};
+
+static const u16 psdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg psdm_int0_bb_b0 = {
+ 0, 26, psdm_int0_bb_b0_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044
+};
+
+static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = {
+ &psdm_int0_bb_b0,
+};
+
+static const u16 psdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg psdm_int0_k2 = {
+ 0, 28, psdm_int0_k2_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044
+};
+
+static struct attn_hw_reg *psdm_int_k2_regs[1] = {
+ &psdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psdm_prty_attn_desc[9] = {
+ "psdm_mem008_i_mem_prty",
+ "psdm_mem007_i_mem_prty",
+ "psdm_mem006_i_mem_prty",
+ "psdm_mem005_i_mem_prty",
+ "psdm_mem002_i_mem_prty",
+ "psdm_mem009_i_mem_prty",
+ "psdm_mem001_i_mem_prty",
+ "psdm_mem003_i_mem_prty",
+ "psdm_mem004_i_mem_prty",
+};
+#else
+#define psdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 psdm_prty1_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psdm_prty1_bb_a0 = {
+ 0, 9, psdm_prty1_bb_a0_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204
+};
+
+static struct attn_hw_reg *psdm_prty_bb_a0_regs[1] = {
+ &psdm_prty1_bb_a0,
+};
+
+static const u16 psdm_prty1_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psdm_prty1_bb_b0 = {
+ 0, 9, psdm_prty1_bb_b0_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204
+};
+
+static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = {
+ &psdm_prty1_bb_b0,
+};
+
+static const u16 psdm_prty1_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psdm_prty1_k2 = {
+ 0, 9, psdm_prty1_k2_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204
+};
+
+static struct attn_hw_reg *psdm_prty_k2_regs[1] = {
+ &psdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsem_int_attn_desc[46] = {
+ "tsem_address_error",
+ "tsem_fic_last_error",
+ "tsem_fic_length_error",
+ "tsem_fic_fifo_error",
+ "tsem_pas_buf_fifo_error",
+ "tsem_sync_fin_pop_error",
+ "tsem_sync_dra_wr_push_error",
+ "tsem_sync_dra_wr_pop_error",
+ "tsem_sync_dra_rd_push_error",
+ "tsem_sync_dra_rd_pop_error",
+ "tsem_sync_fin_push_error",
+ "tsem_sem_fast_address_error",
+ "tsem_cam_lsb_inp_fifo",
+ "tsem_cam_msb_inp_fifo",
+ "tsem_cam_out_fifo",
+ "tsem_fin_fifo",
+ "tsem_thread_fifo_error",
+ "tsem_thread_overrun",
+ "tsem_sync_ext_store_push_error",
+ "tsem_sync_ext_store_pop_error",
+ "tsem_sync_ext_load_push_error",
+ "tsem_sync_ext_load_pop_error",
+ "tsem_sync_ram_rd_push_error",
+ "tsem_sync_ram_rd_pop_error",
+ "tsem_sync_ram_wr_pop_error",
+ "tsem_sync_ram_wr_push_error",
+ "tsem_sync_dbg_push_error",
+ "tsem_sync_dbg_pop_error",
+ "tsem_dbg_fifo_error",
+ "tsem_cam_msb2_inp_fifo",
+ "tsem_vfc_interrupt",
+ "tsem_vfc_out_fifo_error",
+ "tsem_storm_stack_uf_attn",
+ "tsem_storm_stack_of_attn",
+ "tsem_storm_runtime_error",
+ "tsem_ext_load_pend_wr_error",
+ "tsem_thread_rls_orun_error",
+ "tsem_thread_rls_aloc_error",
+ "tsem_thread_rls_vld_error",
+ "tsem_ext_thread_oor_error",
+ "tsem_ord_id_fifo_error",
+ "tsem_invld_foc_error",
+ "tsem_ext_ld_len_error",
+ "tsem_thrd_ord_fifo_error",
+ "tsem_invld_thrd_ord_error",
+ "tsem_fast_memory_address_error",
+};
+#else
+#define tsem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tsem_int0_bb_a0 = {
+ 0, 32, tsem_int0_bb_a0_attn_idx, 0x1700040, 0x170004c, 0x1700048,
+ 0x1700044
+};
+
+static const u16 tsem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg tsem_int1_bb_a0 = {
+ 1, 13, tsem_int1_bb_a0_attn_idx, 0x1700050, 0x170005c, 0x1700058,
+ 0x1700054
+};
+
+static const u16 tsem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg tsem_fast_memory_int0_bb_a0 = {
+ 2, 1, tsem_fast_memory_int0_bb_a0_attn_idx, 0x1740040, 0x174004c,
+ 0x1740048, 0x1740044
+};
+
+static struct attn_hw_reg *tsem_int_bb_a0_regs[3] = {
+ &tsem_int0_bb_a0, &tsem_int1_bb_a0, &tsem_fast_memory_int0_bb_a0,
+};
+
+static const u16 tsem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tsem_int0_bb_b0 = {
+ 0, 32, tsem_int0_bb_b0_attn_idx, 0x1700040, 0x170004c, 0x1700048,
+ 0x1700044
+};
+
+static const u16 tsem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg tsem_int1_bb_b0 = {
+ 1, 13, tsem_int1_bb_b0_attn_idx, 0x1700050, 0x170005c, 0x1700058,
+ 0x1700054
+};
+
+static const u16 tsem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = {
+ 2, 1, tsem_fast_memory_int0_bb_b0_attn_idx, 0x1740040, 0x174004c,
+ 0x1740048, 0x1740044
+};
+
+static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = {
+ &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0,
+};
+
+static const u16 tsem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tsem_int0_k2 = {
+ 0, 32, tsem_int0_k2_attn_idx, 0x1700040, 0x170004c, 0x1700048,
+ 0x1700044
+};
+
+static const u16 tsem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg tsem_int1_k2 = {
+ 1, 13, tsem_int1_k2_attn_idx, 0x1700050, 0x170005c, 0x1700058,
+ 0x1700054
+};
+
+static const u16 tsem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg tsem_fast_memory_int0_k2 = {
+ 2, 1, tsem_fast_memory_int0_k2_attn_idx, 0x1740040, 0x174004c,
+ 0x1740048,
+ 0x1740044
+};
+
+static struct attn_hw_reg *tsem_int_k2_regs[3] = {
+ &tsem_int0_k2, &tsem_int1_k2, &tsem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsem_prty_attn_desc[23] = {
+ "tsem_vfc_rbc_parity_error",
+ "tsem_storm_rf_parity_error",
+ "tsem_reg_gen_parity_error",
+ "tsem_mem005_i_ecc_0_rf_int",
+ "tsem_mem005_i_ecc_1_rf_int",
+ "tsem_mem004_i_mem_prty",
+ "tsem_mem002_i_mem_prty",
+ "tsem_mem003_i_mem_prty",
+ "tsem_mem001_i_mem_prty",
+ "tsem_fast_memory_mem024_i_mem_prty",
+ "tsem_fast_memory_mem023_i_mem_prty",
+ "tsem_fast_memory_mem022_i_mem_prty",
+ "tsem_fast_memory_mem021_i_mem_prty",
+ "tsem_fast_memory_mem020_i_mem_prty",
+ "tsem_fast_memory_mem019_i_mem_prty",
+ "tsem_fast_memory_mem018_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "tsem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "tsem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define tsem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg tsem_prty0_bb_a0 = {
+ 0, 3, tsem_prty0_bb_a0_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0,
+ 0x17000cc
+};
+
+static const u16 tsem_prty1_bb_a0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tsem_prty1_bb_a0 = {
+ 1, 6, tsem_prty1_bb_a0_attn_idx, 0x1700200, 0x170020c, 0x1700208,
+ 0x1700204
+};
+
+static const u16 tsem_fast_memory_vfc_config_prty1_bb_a0_attn_idx[6] = {
+ 16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_a0 = {
+ 2, 6, tsem_fast_memory_vfc_config_prty1_bb_a0_attn_idx, 0x174a200,
+ 0x174a20c, 0x174a208, 0x174a204
+};
+
+static struct attn_hw_reg *tsem_prty_bb_a0_regs[3] = {
+ &tsem_prty0_bb_a0, &tsem_prty1_bb_a0,
+ &tsem_fast_memory_vfc_config_prty1_bb_a0,
+};
+
+static const u16 tsem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg tsem_prty0_bb_b0 = {
+ 0, 3, tsem_prty0_bb_b0_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0,
+ 0x17000cc
+};
+
+static const u16 tsem_prty1_bb_b0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tsem_prty1_bb_b0 = {
+ 1, 6, tsem_prty1_bb_b0_attn_idx, 0x1700200, 0x170020c, 0x1700208,
+ 0x1700204
+};
+
+static const u16 tsem_fast_memory_vfc_config_prty1_bb_b0_attn_idx[6] = {
+ 16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = {
+ 2, 6, tsem_fast_memory_vfc_config_prty1_bb_b0_attn_idx, 0x174a200,
+ 0x174a20c, 0x174a208, 0x174a204
+};
+
+static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = {
+ &tsem_prty0_bb_b0, &tsem_prty1_bb_b0,
+ &tsem_fast_memory_vfc_config_prty1_bb_b0,
+};
+
+static const u16 tsem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg tsem_prty0_k2 = {
+ 0, 3, tsem_prty0_k2_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0,
+ 0x17000cc
+};
+
+static const u16 tsem_prty1_k2_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tsem_prty1_k2 = {
+ 1, 6, tsem_prty1_k2_attn_idx, 0x1700200, 0x170020c, 0x1700208,
+ 0x1700204
+};
+
+static const u16 tsem_fast_memory_prty1_k2_attn_idx[7] = {
+ 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg tsem_fast_memory_prty1_k2 = {
+ 2, 7, tsem_fast_memory_prty1_k2_attn_idx, 0x1740200, 0x174020c,
+ 0x1740208,
+ 0x1740204
+};
+
+static const u16 tsem_fast_memory_vfc_config_prty1_k2_attn_idx[6] = {
+ 16, 17, 18, 19, 20, 21,
+};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_k2 = {
+ 3, 6, tsem_fast_memory_vfc_config_prty1_k2_attn_idx, 0x174a200,
+ 0x174a20c,
+ 0x174a208, 0x174a204
+};
+
+static struct attn_hw_reg *tsem_prty_k2_regs[4] = {
+ &tsem_prty0_k2, &tsem_prty1_k2, &tsem_fast_memory_prty1_k2,
+ &tsem_fast_memory_vfc_config_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msem_int_attn_desc[46] = {
+ "msem_address_error",
+ "msem_fic_last_error",
+ "msem_fic_length_error",
+ "msem_fic_fifo_error",
+ "msem_pas_buf_fifo_error",
+ "msem_sync_fin_pop_error",
+ "msem_sync_dra_wr_push_error",
+ "msem_sync_dra_wr_pop_error",
+ "msem_sync_dra_rd_push_error",
+ "msem_sync_dra_rd_pop_error",
+ "msem_sync_fin_push_error",
+ "msem_sem_fast_address_error",
+ "msem_cam_lsb_inp_fifo",
+ "msem_cam_msb_inp_fifo",
+ "msem_cam_out_fifo",
+ "msem_fin_fifo",
+ "msem_thread_fifo_error",
+ "msem_thread_overrun",
+ "msem_sync_ext_store_push_error",
+ "msem_sync_ext_store_pop_error",
+ "msem_sync_ext_load_push_error",
+ "msem_sync_ext_load_pop_error",
+ "msem_sync_ram_rd_push_error",
+ "msem_sync_ram_rd_pop_error",
+ "msem_sync_ram_wr_pop_error",
+ "msem_sync_ram_wr_push_error",
+ "msem_sync_dbg_push_error",
+ "msem_sync_dbg_pop_error",
+ "msem_dbg_fifo_error",
+ "msem_cam_msb2_inp_fifo",
+ "msem_vfc_interrupt",
+ "msem_vfc_out_fifo_error",
+ "msem_storm_stack_uf_attn",
+ "msem_storm_stack_of_attn",
+ "msem_storm_runtime_error",
+ "msem_ext_load_pend_wr_error",
+ "msem_thread_rls_orun_error",
+ "msem_thread_rls_aloc_error",
+ "msem_thread_rls_vld_error",
+ "msem_ext_thread_oor_error",
+ "msem_ord_id_fifo_error",
+ "msem_invld_foc_error",
+ "msem_ext_ld_len_error",
+ "msem_thrd_ord_fifo_error",
+ "msem_invld_thrd_ord_error",
+ "msem_fast_memory_address_error",
+};
+#else
+#define msem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 msem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg msem_int0_bb_a0 = {
+ 0, 32, msem_int0_bb_a0_attn_idx, 0x1800040, 0x180004c, 0x1800048,
+ 0x1800044
+};
+
+static const u16 msem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg msem_int1_bb_a0 = {
+ 1, 13, msem_int1_bb_a0_attn_idx, 0x1800050, 0x180005c, 0x1800058,
+ 0x1800054
+};
+
+static const u16 msem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg msem_fast_memory_int0_bb_a0 = {
+ 2, 1, msem_fast_memory_int0_bb_a0_attn_idx, 0x1840040, 0x184004c,
+ 0x1840048, 0x1840044
+};
+
+static struct attn_hw_reg *msem_int_bb_a0_regs[3] = {
+ &msem_int0_bb_a0, &msem_int1_bb_a0, &msem_fast_memory_int0_bb_a0,
+};
+
+static const u16 msem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg msem_int0_bb_b0 = {
+ 0, 32, msem_int0_bb_b0_attn_idx, 0x1800040, 0x180004c, 0x1800048,
+ 0x1800044
+};
+
+static const u16 msem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg msem_int1_bb_b0 = {
+ 1, 13, msem_int1_bb_b0_attn_idx, 0x1800050, 0x180005c, 0x1800058,
+ 0x1800054
+};
+
+static const u16 msem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = {
+ 2, 1, msem_fast_memory_int0_bb_b0_attn_idx, 0x1840040, 0x184004c,
+ 0x1840048, 0x1840044
+};
+
+static struct attn_hw_reg *msem_int_bb_b0_regs[3] = {
+ &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0,
+};
+
+static const u16 msem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg msem_int0_k2 = {
+ 0, 32, msem_int0_k2_attn_idx, 0x1800040, 0x180004c, 0x1800048,
+ 0x1800044
+};
+
+static const u16 msem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg msem_int1_k2 = {
+ 1, 13, msem_int1_k2_attn_idx, 0x1800050, 0x180005c, 0x1800058,
+ 0x1800054
+};
+
+static const u16 msem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg msem_fast_memory_int0_k2 = {
+ 2, 1, msem_fast_memory_int0_k2_attn_idx, 0x1840040, 0x184004c,
+ 0x1840048,
+ 0x1840044
+};
+
+static struct attn_hw_reg *msem_int_k2_regs[3] = {
+ &msem_int0_k2, &msem_int1_k2, &msem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msem_prty_attn_desc[23] = {
+ "msem_vfc_rbc_parity_error",
+ "msem_storm_rf_parity_error",
+ "msem_reg_gen_parity_error",
+ "msem_mem005_i_ecc_0_rf_int",
+ "msem_mem005_i_ecc_1_rf_int",
+ "msem_mem004_i_mem_prty",
+ "msem_mem002_i_mem_prty",
+ "msem_mem003_i_mem_prty",
+ "msem_mem001_i_mem_prty",
+ "msem_fast_memory_mem024_i_mem_prty",
+ "msem_fast_memory_mem023_i_mem_prty",
+ "msem_fast_memory_mem022_i_mem_prty",
+ "msem_fast_memory_mem021_i_mem_prty",
+ "msem_fast_memory_mem020_i_mem_prty",
+ "msem_fast_memory_mem019_i_mem_prty",
+ "msem_fast_memory_mem018_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "msem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "msem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define msem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 msem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg msem_prty0_bb_a0 = {
+ 0, 3, msem_prty0_bb_a0_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0,
+ 0x18000cc
+};
+
+static const u16 msem_prty1_bb_a0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg msem_prty1_bb_a0 = {
+ 1, 6, msem_prty1_bb_a0_attn_idx, 0x1800200, 0x180020c, 0x1800208,
+ 0x1800204
+};
+
+static struct attn_hw_reg *msem_prty_bb_a0_regs[2] = {
+ &msem_prty0_bb_a0, &msem_prty1_bb_a0,
+};
+
+static const u16 msem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg msem_prty0_bb_b0 = {
+ 0, 3, msem_prty0_bb_b0_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0,
+ 0x18000cc
+};
+
+static const u16 msem_prty1_bb_b0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg msem_prty1_bb_b0 = {
+ 1, 6, msem_prty1_bb_b0_attn_idx, 0x1800200, 0x180020c, 0x1800208,
+ 0x1800204
+};
+
+static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = {
+ &msem_prty0_bb_b0, &msem_prty1_bb_b0,
+};
+
+static const u16 msem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg msem_prty0_k2 = {
+ 0, 3, msem_prty0_k2_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0,
+ 0x18000cc
+};
+
+static const u16 msem_prty1_k2_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg msem_prty1_k2 = {
+ 1, 6, msem_prty1_k2_attn_idx, 0x1800200, 0x180020c, 0x1800208,
+ 0x1800204
+};
+
+static const u16 msem_fast_memory_prty1_k2_attn_idx[7] = {
+ 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg msem_fast_memory_prty1_k2 = {
+ 2, 7, msem_fast_memory_prty1_k2_attn_idx, 0x1840200, 0x184020c,
+ 0x1840208,
+ 0x1840204
+};
+
+static struct attn_hw_reg *msem_prty_k2_regs[3] = {
+ &msem_prty0_k2, &msem_prty1_k2, &msem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usem_int_attn_desc[46] = {
+ "usem_address_error",
+ "usem_fic_last_error",
+ "usem_fic_length_error",
+ "usem_fic_fifo_error",
+ "usem_pas_buf_fifo_error",
+ "usem_sync_fin_pop_error",
+ "usem_sync_dra_wr_push_error",
+ "usem_sync_dra_wr_pop_error",
+ "usem_sync_dra_rd_push_error",
+ "usem_sync_dra_rd_pop_error",
+ "usem_sync_fin_push_error",
+ "usem_sem_fast_address_error",
+ "usem_cam_lsb_inp_fifo",
+ "usem_cam_msb_inp_fifo",
+ "usem_cam_out_fifo",
+ "usem_fin_fifo",
+ "usem_thread_fifo_error",
+ "usem_thread_overrun",
+ "usem_sync_ext_store_push_error",
+ "usem_sync_ext_store_pop_error",
+ "usem_sync_ext_load_push_error",
+ "usem_sync_ext_load_pop_error",
+ "usem_sync_ram_rd_push_error",
+ "usem_sync_ram_rd_pop_error",
+ "usem_sync_ram_wr_pop_error",
+ "usem_sync_ram_wr_push_error",
+ "usem_sync_dbg_push_error",
+ "usem_sync_dbg_pop_error",
+ "usem_dbg_fifo_error",
+ "usem_cam_msb2_inp_fifo",
+ "usem_vfc_interrupt",
+ "usem_vfc_out_fifo_error",
+ "usem_storm_stack_uf_attn",
+ "usem_storm_stack_of_attn",
+ "usem_storm_runtime_error",
+ "usem_ext_load_pend_wr_error",
+ "usem_thread_rls_orun_error",
+ "usem_thread_rls_aloc_error",
+ "usem_thread_rls_vld_error",
+ "usem_ext_thread_oor_error",
+ "usem_ord_id_fifo_error",
+ "usem_invld_foc_error",
+ "usem_ext_ld_len_error",
+ "usem_thrd_ord_fifo_error",
+ "usem_invld_thrd_ord_error",
+ "usem_fast_memory_address_error",
+};
+#else
+#define usem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 usem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg usem_int0_bb_a0 = {
+ 0, 32, usem_int0_bb_a0_attn_idx, 0x1900040, 0x190004c, 0x1900048,
+ 0x1900044
+};
+
+static const u16 usem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg usem_int1_bb_a0 = {
+ 1, 13, usem_int1_bb_a0_attn_idx, 0x1900050, 0x190005c, 0x1900058,
+ 0x1900054
+};
+
+static const u16 usem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg usem_fast_memory_int0_bb_a0 = {
+ 2, 1, usem_fast_memory_int0_bb_a0_attn_idx, 0x1940040, 0x194004c,
+ 0x1940048, 0x1940044
+};
+
+static struct attn_hw_reg *usem_int_bb_a0_regs[3] = {
+ &usem_int0_bb_a0, &usem_int1_bb_a0, &usem_fast_memory_int0_bb_a0,
+};
+
+static const u16 usem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg usem_int0_bb_b0 = {
+ 0, 32, usem_int0_bb_b0_attn_idx, 0x1900040, 0x190004c, 0x1900048,
+ 0x1900044
+};
+
+static const u16 usem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg usem_int1_bb_b0 = {
+ 1, 13, usem_int1_bb_b0_attn_idx, 0x1900050, 0x190005c, 0x1900058,
+ 0x1900054
+};
+
+static const u16 usem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = {
+ 2, 1, usem_fast_memory_int0_bb_b0_attn_idx, 0x1940040, 0x194004c,
+ 0x1940048, 0x1940044
+};
+
+static struct attn_hw_reg *usem_int_bb_b0_regs[3] = {
+ &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0,
+};
+
+static const u16 usem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg usem_int0_k2 = {
+ 0, 32, usem_int0_k2_attn_idx, 0x1900040, 0x190004c, 0x1900048,
+ 0x1900044
+};
+
+static const u16 usem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg usem_int1_k2 = {
+ 1, 13, usem_int1_k2_attn_idx, 0x1900050, 0x190005c, 0x1900058,
+ 0x1900054
+};
+
+static const u16 usem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg usem_fast_memory_int0_k2 = {
+ 2, 1, usem_fast_memory_int0_k2_attn_idx, 0x1940040, 0x194004c,
+ 0x1940048,
+ 0x1940044
+};
+
+static struct attn_hw_reg *usem_int_k2_regs[3] = {
+ &usem_int0_k2, &usem_int1_k2, &usem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usem_prty_attn_desc[23] = {
+ "usem_vfc_rbc_parity_error",
+ "usem_storm_rf_parity_error",
+ "usem_reg_gen_parity_error",
+ "usem_mem005_i_ecc_0_rf_int",
+ "usem_mem005_i_ecc_1_rf_int",
+ "usem_mem004_i_mem_prty",
+ "usem_mem002_i_mem_prty",
+ "usem_mem003_i_mem_prty",
+ "usem_mem001_i_mem_prty",
+ "usem_fast_memory_mem024_i_mem_prty",
+ "usem_fast_memory_mem023_i_mem_prty",
+ "usem_fast_memory_mem022_i_mem_prty",
+ "usem_fast_memory_mem021_i_mem_prty",
+ "usem_fast_memory_mem020_i_mem_prty",
+ "usem_fast_memory_mem019_i_mem_prty",
+ "usem_fast_memory_mem018_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "usem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "usem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define usem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 usem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg usem_prty0_bb_a0 = {
+ 0, 3, usem_prty0_bb_a0_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0,
+ 0x19000cc
+};
+
+static const u16 usem_prty1_bb_a0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg usem_prty1_bb_a0 = {
+ 1, 6, usem_prty1_bb_a0_attn_idx, 0x1900200, 0x190020c, 0x1900208,
+ 0x1900204
+};
+
+static struct attn_hw_reg *usem_prty_bb_a0_regs[2] = {
+ &usem_prty0_bb_a0, &usem_prty1_bb_a0,
+};
+
+static const u16 usem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg usem_prty0_bb_b0 = {
+ 0, 3, usem_prty0_bb_b0_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0,
+ 0x19000cc
+};
+
+static const u16 usem_prty1_bb_b0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg usem_prty1_bb_b0 = {
+ 1, 6, usem_prty1_bb_b0_attn_idx, 0x1900200, 0x190020c, 0x1900208,
+ 0x1900204
+};
+
+static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = {
+ &usem_prty0_bb_b0, &usem_prty1_bb_b0,
+};
+
+static const u16 usem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg usem_prty0_k2 = {
+ 0, 3, usem_prty0_k2_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0,
+ 0x19000cc
+};
+
+static const u16 usem_prty1_k2_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg usem_prty1_k2 = {
+ 1, 6, usem_prty1_k2_attn_idx, 0x1900200, 0x190020c, 0x1900208,
+ 0x1900204
+};
+
+static const u16 usem_fast_memory_prty1_k2_attn_idx[7] = {
+ 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg usem_fast_memory_prty1_k2 = {
+ 2, 7, usem_fast_memory_prty1_k2_attn_idx, 0x1940200, 0x194020c,
+ 0x1940208,
+ 0x1940204
+};
+
+static struct attn_hw_reg *usem_prty_k2_regs[3] = {
+ &usem_prty0_k2, &usem_prty1_k2, &usem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsem_int_attn_desc[46] = {
+ "xsem_address_error",
+ "xsem_fic_last_error",
+ "xsem_fic_length_error",
+ "xsem_fic_fifo_error",
+ "xsem_pas_buf_fifo_error",
+ "xsem_sync_fin_pop_error",
+ "xsem_sync_dra_wr_push_error",
+ "xsem_sync_dra_wr_pop_error",
+ "xsem_sync_dra_rd_push_error",
+ "xsem_sync_dra_rd_pop_error",
+ "xsem_sync_fin_push_error",
+ "xsem_sem_fast_address_error",
+ "xsem_cam_lsb_inp_fifo",
+ "xsem_cam_msb_inp_fifo",
+ "xsem_cam_out_fifo",
+ "xsem_fin_fifo",
+ "xsem_thread_fifo_error",
+ "xsem_thread_overrun",
+ "xsem_sync_ext_store_push_error",
+ "xsem_sync_ext_store_pop_error",
+ "xsem_sync_ext_load_push_error",
+ "xsem_sync_ext_load_pop_error",
+ "xsem_sync_ram_rd_push_error",
+ "xsem_sync_ram_rd_pop_error",
+ "xsem_sync_ram_wr_pop_error",
+ "xsem_sync_ram_wr_push_error",
+ "xsem_sync_dbg_push_error",
+ "xsem_sync_dbg_pop_error",
+ "xsem_dbg_fifo_error",
+ "xsem_cam_msb2_inp_fifo",
+ "xsem_vfc_interrupt",
+ "xsem_vfc_out_fifo_error",
+ "xsem_storm_stack_uf_attn",
+ "xsem_storm_stack_of_attn",
+ "xsem_storm_runtime_error",
+ "xsem_ext_load_pend_wr_error",
+ "xsem_thread_rls_orun_error",
+ "xsem_thread_rls_aloc_error",
+ "xsem_thread_rls_vld_error",
+ "xsem_ext_thread_oor_error",
+ "xsem_ord_id_fifo_error",
+ "xsem_invld_foc_error",
+ "xsem_ext_ld_len_error",
+ "xsem_thrd_ord_fifo_error",
+ "xsem_invld_thrd_ord_error",
+ "xsem_fast_memory_address_error",
+};
+#else
+#define xsem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg xsem_int0_bb_a0 = {
+ 0, 32, xsem_int0_bb_a0_attn_idx, 0x1400040, 0x140004c, 0x1400048,
+ 0x1400044
+};
+
+static const u16 xsem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg xsem_int1_bb_a0 = {
+ 1, 13, xsem_int1_bb_a0_attn_idx, 0x1400050, 0x140005c, 0x1400058,
+ 0x1400054
+};
+
+static const u16 xsem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg xsem_fast_memory_int0_bb_a0 = {
+ 2, 1, xsem_fast_memory_int0_bb_a0_attn_idx, 0x1440040, 0x144004c,
+ 0x1440048, 0x1440044
+};
+
+static struct attn_hw_reg *xsem_int_bb_a0_regs[3] = {
+ &xsem_int0_bb_a0, &xsem_int1_bb_a0, &xsem_fast_memory_int0_bb_a0,
+};
+
+static const u16 xsem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg xsem_int0_bb_b0 = {
+ 0, 32, xsem_int0_bb_b0_attn_idx, 0x1400040, 0x140004c, 0x1400048,
+ 0x1400044
+};
+
+static const u16 xsem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg xsem_int1_bb_b0 = {
+ 1, 13, xsem_int1_bb_b0_attn_idx, 0x1400050, 0x140005c, 0x1400058,
+ 0x1400054
+};
+
+static const u16 xsem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = {
+ 2, 1, xsem_fast_memory_int0_bb_b0_attn_idx, 0x1440040, 0x144004c,
+ 0x1440048, 0x1440044
+};
+
+static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = {
+ &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0,
+};
+
+static const u16 xsem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg xsem_int0_k2 = {
+ 0, 32, xsem_int0_k2_attn_idx, 0x1400040, 0x140004c, 0x1400048,
+ 0x1400044
+};
+
+static const u16 xsem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg xsem_int1_k2 = {
+ 1, 13, xsem_int1_k2_attn_idx, 0x1400050, 0x140005c, 0x1400058,
+ 0x1400054
+};
+
+static const u16 xsem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg xsem_fast_memory_int0_k2 = {
+ 2, 1, xsem_fast_memory_int0_k2_attn_idx, 0x1440040, 0x144004c,
+ 0x1440048,
+ 0x1440044
+};
+
+static struct attn_hw_reg *xsem_int_k2_regs[3] = {
+ &xsem_int0_k2, &xsem_int1_k2, &xsem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsem_prty_attn_desc[24] = {
+ "xsem_vfc_rbc_parity_error",
+ "xsem_storm_rf_parity_error",
+ "xsem_reg_gen_parity_error",
+ "xsem_mem006_i_ecc_0_rf_int",
+ "xsem_mem006_i_ecc_1_rf_int",
+ "xsem_mem005_i_mem_prty",
+ "xsem_mem002_i_mem_prty",
+ "xsem_mem004_i_mem_prty",
+ "xsem_mem003_i_mem_prty",
+ "xsem_mem001_i_mem_prty",
+ "xsem_fast_memory_mem024_i_mem_prty",
+ "xsem_fast_memory_mem023_i_mem_prty",
+ "xsem_fast_memory_mem022_i_mem_prty",
+ "xsem_fast_memory_mem021_i_mem_prty",
+ "xsem_fast_memory_mem020_i_mem_prty",
+ "xsem_fast_memory_mem019_i_mem_prty",
+ "xsem_fast_memory_mem018_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "xsem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "xsem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define xsem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg xsem_prty0_bb_a0 = {
+ 0, 3, xsem_prty0_bb_a0_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0,
+ 0x14000cc
+};
+
+static const u16 xsem_prty1_bb_a0_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsem_prty1_bb_a0 = {
+ 1, 7, xsem_prty1_bb_a0_attn_idx, 0x1400200, 0x140020c, 0x1400208,
+ 0x1400204
+};
+
+static struct attn_hw_reg *xsem_prty_bb_a0_regs[2] = {
+ &xsem_prty0_bb_a0, &xsem_prty1_bb_a0,
+};
+
+static const u16 xsem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg xsem_prty0_bb_b0 = {
+ 0, 3, xsem_prty0_bb_b0_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0,
+ 0x14000cc
+};
+
+static const u16 xsem_prty1_bb_b0_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsem_prty1_bb_b0 = {
+ 1, 7, xsem_prty1_bb_b0_attn_idx, 0x1400200, 0x140020c, 0x1400208,
+ 0x1400204
+};
+
+static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = {
+ &xsem_prty0_bb_b0, &xsem_prty1_bb_b0,
+};
+
+static const u16 xsem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg xsem_prty0_k2 = {
+ 0, 3, xsem_prty0_k2_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0,
+ 0x14000cc
+};
+
+static const u16 xsem_prty1_k2_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsem_prty1_k2 = {
+ 1, 7, xsem_prty1_k2_attn_idx, 0x1400200, 0x140020c, 0x1400208,
+ 0x1400204
+};
+
+static const u16 xsem_fast_memory_prty1_k2_attn_idx[7] = {
+ 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg xsem_fast_memory_prty1_k2 = {
+ 2, 7, xsem_fast_memory_prty1_k2_attn_idx, 0x1440200, 0x144020c,
+ 0x1440208,
+ 0x1440204
+};
+
+static struct attn_hw_reg *xsem_prty_k2_regs[3] = {
+ &xsem_prty0_k2, &xsem_prty1_k2, &xsem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysem_int_attn_desc[46] = {
+ "ysem_address_error",
+ "ysem_fic_last_error",
+ "ysem_fic_length_error",
+ "ysem_fic_fifo_error",
+ "ysem_pas_buf_fifo_error",
+ "ysem_sync_fin_pop_error",
+ "ysem_sync_dra_wr_push_error",
+ "ysem_sync_dra_wr_pop_error",
+ "ysem_sync_dra_rd_push_error",
+ "ysem_sync_dra_rd_pop_error",
+ "ysem_sync_fin_push_error",
+ "ysem_sem_fast_address_error",
+ "ysem_cam_lsb_inp_fifo",
+ "ysem_cam_msb_inp_fifo",
+ "ysem_cam_out_fifo",
+ "ysem_fin_fifo",
+ "ysem_thread_fifo_error",
+ "ysem_thread_overrun",
+ "ysem_sync_ext_store_push_error",
+ "ysem_sync_ext_store_pop_error",
+ "ysem_sync_ext_load_push_error",
+ "ysem_sync_ext_load_pop_error",
+ "ysem_sync_ram_rd_push_error",
+ "ysem_sync_ram_rd_pop_error",
+ "ysem_sync_ram_wr_pop_error",
+ "ysem_sync_ram_wr_push_error",
+ "ysem_sync_dbg_push_error",
+ "ysem_sync_dbg_pop_error",
+ "ysem_dbg_fifo_error",
+ "ysem_cam_msb2_inp_fifo",
+ "ysem_vfc_interrupt",
+ "ysem_vfc_out_fifo_error",
+ "ysem_storm_stack_uf_attn",
+ "ysem_storm_stack_of_attn",
+ "ysem_storm_runtime_error",
+ "ysem_ext_load_pend_wr_error",
+ "ysem_thread_rls_orun_error",
+ "ysem_thread_rls_aloc_error",
+ "ysem_thread_rls_vld_error",
+ "ysem_ext_thread_oor_error",
+ "ysem_ord_id_fifo_error",
+ "ysem_invld_foc_error",
+ "ysem_ext_ld_len_error",
+ "ysem_thrd_ord_fifo_error",
+ "ysem_invld_thrd_ord_error",
+ "ysem_fast_memory_address_error",
+};
+#else
+#define ysem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg ysem_int0_bb_a0 = {
+ 0, 32, ysem_int0_bb_a0_attn_idx, 0x1500040, 0x150004c, 0x1500048,
+ 0x1500044
+};
+
+static const u16 ysem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg ysem_int1_bb_a0 = {
+ 1, 13, ysem_int1_bb_a0_attn_idx, 0x1500050, 0x150005c, 0x1500058,
+ 0x1500054
+};
+
+static const u16 ysem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg ysem_fast_memory_int0_bb_a0 = {
+ 2, 1, ysem_fast_memory_int0_bb_a0_attn_idx, 0x1540040, 0x154004c,
+ 0x1540048, 0x1540044
+};
+
+static struct attn_hw_reg *ysem_int_bb_a0_regs[3] = {
+ &ysem_int0_bb_a0, &ysem_int1_bb_a0, &ysem_fast_memory_int0_bb_a0,
+};
+
+static const u16 ysem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg ysem_int0_bb_b0 = {
+ 0, 32, ysem_int0_bb_b0_attn_idx, 0x1500040, 0x150004c, 0x1500048,
+ 0x1500044
+};
+
+static const u16 ysem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg ysem_int1_bb_b0 = {
+ 1, 13, ysem_int1_bb_b0_attn_idx, 0x1500050, 0x150005c, 0x1500058,
+ 0x1500054
+};
+
+static const u16 ysem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = {
+ 2, 1, ysem_fast_memory_int0_bb_b0_attn_idx, 0x1540040, 0x154004c,
+ 0x1540048, 0x1540044
+};
+
+static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = {
+ &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0,
+};
+
+static const u16 ysem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg ysem_int0_k2 = {
+ 0, 32, ysem_int0_k2_attn_idx, 0x1500040, 0x150004c, 0x1500048,
+ 0x1500044
+};
+
+static const u16 ysem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg ysem_int1_k2 = {
+ 1, 13, ysem_int1_k2_attn_idx, 0x1500050, 0x150005c, 0x1500058,
+ 0x1500054
+};
+
+static const u16 ysem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg ysem_fast_memory_int0_k2 = {
+ 2, 1, ysem_fast_memory_int0_k2_attn_idx, 0x1540040, 0x154004c,
+ 0x1540048,
+ 0x1540044
+};
+
+static struct attn_hw_reg *ysem_int_k2_regs[3] = {
+ &ysem_int0_k2, &ysem_int1_k2, &ysem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysem_prty_attn_desc[24] = {
+ "ysem_vfc_rbc_parity_error",
+ "ysem_storm_rf_parity_error",
+ "ysem_reg_gen_parity_error",
+ "ysem_mem006_i_ecc_0_rf_int",
+ "ysem_mem006_i_ecc_1_rf_int",
+ "ysem_mem005_i_mem_prty",
+ "ysem_mem002_i_mem_prty",
+ "ysem_mem004_i_mem_prty",
+ "ysem_mem003_i_mem_prty",
+ "ysem_mem001_i_mem_prty",
+ "ysem_fast_memory_mem024_i_mem_prty",
+ "ysem_fast_memory_mem023_i_mem_prty",
+ "ysem_fast_memory_mem022_i_mem_prty",
+ "ysem_fast_memory_mem021_i_mem_prty",
+ "ysem_fast_memory_mem020_i_mem_prty",
+ "ysem_fast_memory_mem019_i_mem_prty",
+ "ysem_fast_memory_mem018_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "ysem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "ysem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define ysem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg ysem_prty0_bb_a0 = {
+ 0, 3, ysem_prty0_bb_a0_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0,
+ 0x15000cc
+};
+
+static const u16 ysem_prty1_bb_a0_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ysem_prty1_bb_a0 = {
+ 1, 7, ysem_prty1_bb_a0_attn_idx, 0x1500200, 0x150020c, 0x1500208,
+ 0x1500204
+};
+
+static struct attn_hw_reg *ysem_prty_bb_a0_regs[2] = {
+ &ysem_prty0_bb_a0, &ysem_prty1_bb_a0,
+};
+
+static const u16 ysem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg ysem_prty0_bb_b0 = {
+ 0, 3, ysem_prty0_bb_b0_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0,
+ 0x15000cc
+};
+
+static const u16 ysem_prty1_bb_b0_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ysem_prty1_bb_b0 = {
+ 1, 7, ysem_prty1_bb_b0_attn_idx, 0x1500200, 0x150020c, 0x1500208,
+ 0x1500204
+};
+
+static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = {
+ &ysem_prty0_bb_b0, &ysem_prty1_bb_b0,
+};
+
+static const u16 ysem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg ysem_prty0_k2 = {
+ 0, 3, ysem_prty0_k2_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0,
+ 0x15000cc
+};
+
+static const u16 ysem_prty1_k2_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ysem_prty1_k2 = {
+ 1, 7, ysem_prty1_k2_attn_idx, 0x1500200, 0x150020c, 0x1500208,
+ 0x1500204
+};
+
+static const u16 ysem_fast_memory_prty1_k2_attn_idx[7] = {
+ 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ysem_fast_memory_prty1_k2 = {
+ 2, 7, ysem_fast_memory_prty1_k2_attn_idx, 0x1540200, 0x154020c,
+ 0x1540208,
+ 0x1540204
+};
+
+static struct attn_hw_reg *ysem_prty_k2_regs[3] = {
+ &ysem_prty0_k2, &ysem_prty1_k2, &ysem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psem_int_attn_desc[46] = {
+ "psem_address_error",
+ "psem_fic_last_error",
+ "psem_fic_length_error",
+ "psem_fic_fifo_error",
+ "psem_pas_buf_fifo_error",
+ "psem_sync_fin_pop_error",
+ "psem_sync_dra_wr_push_error",
+ "psem_sync_dra_wr_pop_error",
+ "psem_sync_dra_rd_push_error",
+ "psem_sync_dra_rd_pop_error",
+ "psem_sync_fin_push_error",
+ "psem_sem_fast_address_error",
+ "psem_cam_lsb_inp_fifo",
+ "psem_cam_msb_inp_fifo",
+ "psem_cam_out_fifo",
+ "psem_fin_fifo",
+ "psem_thread_fifo_error",
+ "psem_thread_overrun",
+ "psem_sync_ext_store_push_error",
+ "psem_sync_ext_store_pop_error",
+ "psem_sync_ext_load_push_error",
+ "psem_sync_ext_load_pop_error",
+ "psem_sync_ram_rd_push_error",
+ "psem_sync_ram_rd_pop_error",
+ "psem_sync_ram_wr_pop_error",
+ "psem_sync_ram_wr_push_error",
+ "psem_sync_dbg_push_error",
+ "psem_sync_dbg_pop_error",
+ "psem_dbg_fifo_error",
+ "psem_cam_msb2_inp_fifo",
+ "psem_vfc_interrupt",
+ "psem_vfc_out_fifo_error",
+ "psem_storm_stack_uf_attn",
+ "psem_storm_stack_of_attn",
+ "psem_storm_runtime_error",
+ "psem_ext_load_pend_wr_error",
+ "psem_thread_rls_orun_error",
+ "psem_thread_rls_aloc_error",
+ "psem_thread_rls_vld_error",
+ "psem_ext_thread_oor_error",
+ "psem_ord_id_fifo_error",
+ "psem_invld_foc_error",
+ "psem_ext_ld_len_error",
+ "psem_thrd_ord_fifo_error",
+ "psem_invld_thrd_ord_error",
+ "psem_fast_memory_address_error",
+};
+#else
+#define psem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 psem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg psem_int0_bb_a0 = {
+ 0, 32, psem_int0_bb_a0_attn_idx, 0x1600040, 0x160004c, 0x1600048,
+ 0x1600044
+};
+
+static const u16 psem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg psem_int1_bb_a0 = {
+ 1, 13, psem_int1_bb_a0_attn_idx, 0x1600050, 0x160005c, 0x1600058,
+ 0x1600054
+};
+
+static const u16 psem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg psem_fast_memory_int0_bb_a0 = {
+ 2, 1, psem_fast_memory_int0_bb_a0_attn_idx, 0x1640040, 0x164004c,
+ 0x1640048, 0x1640044
+};
+
+static struct attn_hw_reg *psem_int_bb_a0_regs[3] = {
+ &psem_int0_bb_a0, &psem_int1_bb_a0, &psem_fast_memory_int0_bb_a0,
+};
+
+static const u16 psem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg psem_int0_bb_b0 = {
+ 0, 32, psem_int0_bb_b0_attn_idx, 0x1600040, 0x160004c, 0x1600048,
+ 0x1600044
+};
+
+static const u16 psem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg psem_int1_bb_b0 = {
+ 1, 13, psem_int1_bb_b0_attn_idx, 0x1600050, 0x160005c, 0x1600058,
+ 0x1600054
+};
+
+static const u16 psem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = {
+ 2, 1, psem_fast_memory_int0_bb_b0_attn_idx, 0x1640040, 0x164004c,
+ 0x1640048, 0x1640044
+};
+
+static struct attn_hw_reg *psem_int_bb_b0_regs[3] = {
+ &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0,
+};
+
+static const u16 psem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg psem_int0_k2 = {
+ 0, 32, psem_int0_k2_attn_idx, 0x1600040, 0x160004c, 0x1600048,
+ 0x1600044
+};
+
+static const u16 psem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg psem_int1_k2 = {
+ 1, 13, psem_int1_k2_attn_idx, 0x1600050, 0x160005c, 0x1600058,
+ 0x1600054
+};
+
+static const u16 psem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg psem_fast_memory_int0_k2 = {
+ 2, 1, psem_fast_memory_int0_k2_attn_idx, 0x1640040, 0x164004c,
+ 0x1640048,
+ 0x1640044
+};
+
+static struct attn_hw_reg *psem_int_k2_regs[3] = {
+ &psem_int0_k2, &psem_int1_k2, &psem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psem_prty_attn_desc[23] = {
+ "psem_vfc_rbc_parity_error",
+ "psem_storm_rf_parity_error",
+ "psem_reg_gen_parity_error",
+ "psem_mem005_i_ecc_0_rf_int",
+ "psem_mem005_i_ecc_1_rf_int",
+ "psem_mem004_i_mem_prty",
+ "psem_mem002_i_mem_prty",
+ "psem_mem003_i_mem_prty",
+ "psem_mem001_i_mem_prty",
+ "psem_fast_memory_mem024_i_mem_prty",
+ "psem_fast_memory_mem023_i_mem_prty",
+ "psem_fast_memory_mem022_i_mem_prty",
+ "psem_fast_memory_mem021_i_mem_prty",
+ "psem_fast_memory_mem020_i_mem_prty",
+ "psem_fast_memory_mem019_i_mem_prty",
+ "psem_fast_memory_mem018_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "psem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "psem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define psem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 psem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg psem_prty0_bb_a0 = {
+ 0, 3, psem_prty0_bb_a0_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0,
+ 0x16000cc
+};
+
+static const u16 psem_prty1_bb_a0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psem_prty1_bb_a0 = {
+ 1, 6, psem_prty1_bb_a0_attn_idx, 0x1600200, 0x160020c, 0x1600208,
+ 0x1600204
+};
+
+static const u16 psem_fast_memory_vfc_config_prty1_bb_a0_attn_idx[6] = {
+ 16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_a0 = {
+ 2, 6, psem_fast_memory_vfc_config_prty1_bb_a0_attn_idx, 0x164a200,
+ 0x164a20c, 0x164a208, 0x164a204
+};
+
+static struct attn_hw_reg *psem_prty_bb_a0_regs[3] = {
+ &psem_prty0_bb_a0, &psem_prty1_bb_a0,
+ &psem_fast_memory_vfc_config_prty1_bb_a0,
+};
+
+static const u16 psem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg psem_prty0_bb_b0 = {
+ 0, 3, psem_prty0_bb_b0_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0,
+ 0x16000cc
+};
+
+static const u16 psem_prty1_bb_b0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psem_prty1_bb_b0 = {
+ 1, 6, psem_prty1_bb_b0_attn_idx, 0x1600200, 0x160020c, 0x1600208,
+ 0x1600204
+};
+
+static const u16 psem_fast_memory_vfc_config_prty1_bb_b0_attn_idx[6] = {
+ 16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = {
+ 2, 6, psem_fast_memory_vfc_config_prty1_bb_b0_attn_idx, 0x164a200,
+ 0x164a20c, 0x164a208, 0x164a204
+};
+
+static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = {
+ &psem_prty0_bb_b0, &psem_prty1_bb_b0,
+ &psem_fast_memory_vfc_config_prty1_bb_b0,
+};
+
+static const u16 psem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg psem_prty0_k2 = {
+ 0, 3, psem_prty0_k2_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0,
+ 0x16000cc
+};
+
+static const u16 psem_prty1_k2_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psem_prty1_k2 = {
+ 1, 6, psem_prty1_k2_attn_idx, 0x1600200, 0x160020c, 0x1600208,
+ 0x1600204
+};
+
+static const u16 psem_fast_memory_prty1_k2_attn_idx[7] = {
+ 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg psem_fast_memory_prty1_k2 = {
+ 2, 7, psem_fast_memory_prty1_k2_attn_idx, 0x1640200, 0x164020c,
+ 0x1640208,
+ 0x1640204
+};
+
+static const u16 psem_fast_memory_vfc_config_prty1_k2_attn_idx[6] = {
+ 16, 17, 18, 19, 20, 21,
+};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_k2 = {
+ 3, 6, psem_fast_memory_vfc_config_prty1_k2_attn_idx, 0x164a200,
+ 0x164a20c,
+ 0x164a208, 0x164a204
+};
+
+static struct attn_hw_reg *psem_prty_k2_regs[4] = {
+ &psem_prty0_k2, &psem_prty1_k2, &psem_fast_memory_prty1_k2,
+ &psem_fast_memory_vfc_config_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rss_int_attn_desc[12] = {
+ "rss_address_error",
+ "rss_msg_inp_cnt_error",
+ "rss_msg_out_cnt_error",
+ "rss_inp_state_error",
+ "rss_out_state_error",
+ "rss_main_state_error",
+ "rss_calc_state_error",
+ "rss_inp_fifo_error",
+ "rss_cmd_fifo_error",
+ "rss_msg_fifo_error",
+ "rss_rsp_fifo_error",
+ "rss_hdr_fifo_error",
+};
+#else
+#define rss_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 rss_int0_bb_a0_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg rss_int0_bb_a0 = {
+ 0, 12, rss_int0_bb_a0_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984
+};
+
+static struct attn_hw_reg *rss_int_bb_a0_regs[1] = {
+ &rss_int0_bb_a0,
+};
+
+static const u16 rss_int0_bb_b0_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg rss_int0_bb_b0 = {
+ 0, 12, rss_int0_bb_b0_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984
+};
+
+static struct attn_hw_reg *rss_int_bb_b0_regs[1] = {
+ &rss_int0_bb_b0,
+};
+
+static const u16 rss_int0_k2_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg rss_int0_k2 = {
+ 0, 12, rss_int0_k2_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984
+};
+
+static struct attn_hw_reg *rss_int_k2_regs[1] = {
+ &rss_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rss_prty_attn_desc[4] = {
+ "rss_mem002_i_ecc_rf_int",
+ "rss_mem001_i_ecc_rf_int",
+ "rss_mem003_i_mem_prty",
+ "rss_mem004_i_mem_prty",
+};
+#else
+#define rss_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 rss_prty1_bb_a0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg rss_prty1_bb_a0 = {
+ 0, 4, rss_prty1_bb_a0_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04
+};
+
+static struct attn_hw_reg *rss_prty_bb_a0_regs[1] = {
+ &rss_prty1_bb_a0,
+};
+
+static const u16 rss_prty1_bb_b0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg rss_prty1_bb_b0 = {
+ 0, 4, rss_prty1_bb_b0_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04
+};
+
+static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = {
+ &rss_prty1_bb_b0,
+};
+
+static const u16 rss_prty1_k2_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg rss_prty1_k2 = {
+ 0, 4, rss_prty1_k2_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04
+};
+
+static struct attn_hw_reg *rss_prty_k2_regs[1] = {
+ &rss_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tmld_int_attn_desc[6] = {
+ "tmld_address_error",
+ "tmld_ld_hdr_err",
+ "tmld_ld_seg_msg_err",
+ "tmld_ld_tid_mini_cache_err",
+ "tmld_ld_cid_mini_cache_err",
+ "tmld_ld_long_message",
+};
+#else
+#define tmld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tmld_int0_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg tmld_int0_bb_a0 = {
+ 0, 6, tmld_int0_bb_a0_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184
+};
+
+static struct attn_hw_reg *tmld_int_bb_a0_regs[1] = {
+ &tmld_int0_bb_a0,
+};
+
+static const u16 tmld_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg tmld_int0_bb_b0 = {
+ 0, 6, tmld_int0_bb_b0_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184
+};
+
+static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = {
+ &tmld_int0_bb_b0,
+};
+
+static const u16 tmld_int0_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg tmld_int0_k2 = {
+ 0, 6, tmld_int0_k2_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184
+};
+
+static struct attn_hw_reg *tmld_int_k2_regs[1] = {
+ &tmld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tmld_prty_attn_desc[8] = {
+ "tmld_mem006_i_ecc_rf_int",
+ "tmld_mem002_i_ecc_rf_int",
+ "tmld_mem003_i_mem_prty",
+ "tmld_mem004_i_mem_prty",
+ "tmld_mem007_i_mem_prty",
+ "tmld_mem008_i_mem_prty",
+ "tmld_mem005_i_mem_prty",
+ "tmld_mem001_i_mem_prty",
+};
+#else
+#define tmld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tmld_prty1_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tmld_prty1_bb_a0 = {
+ 0, 8, tmld_prty1_bb_a0_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204
+};
+
+static struct attn_hw_reg *tmld_prty_bb_a0_regs[1] = {
+ &tmld_prty1_bb_a0,
+};
+
+static const u16 tmld_prty1_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tmld_prty1_bb_b0 = {
+ 0, 8, tmld_prty1_bb_b0_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204
+};
+
+static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = {
+ &tmld_prty1_bb_b0,
+};
+
+static const u16 tmld_prty1_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tmld_prty1_k2 = {
+ 0, 8, tmld_prty1_k2_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204
+};
+
+static struct attn_hw_reg *tmld_prty_k2_regs[1] = {
+ &tmld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *muld_int_attn_desc[6] = {
+ "muld_address_error",
+ "muld_ld_hdr_err",
+ "muld_ld_seg_msg_err",
+ "muld_ld_tid_mini_cache_err",
+ "muld_ld_cid_mini_cache_err",
+ "muld_ld_long_message",
+};
+#else
+#define muld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 muld_int0_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg muld_int0_bb_a0 = {
+ 0, 6, muld_int0_bb_a0_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184
+};
+
+static struct attn_hw_reg *muld_int_bb_a0_regs[1] = {
+ &muld_int0_bb_a0,
+};
+
+static const u16 muld_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg muld_int0_bb_b0 = {
+ 0, 6, muld_int0_bb_b0_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184
+};
+
+static struct attn_hw_reg *muld_int_bb_b0_regs[1] = {
+ &muld_int0_bb_b0,
+};
+
+static const u16 muld_int0_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg muld_int0_k2 = {
+ 0, 6, muld_int0_k2_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184
+};
+
+static struct attn_hw_reg *muld_int_k2_regs[1] = {
+ &muld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *muld_prty_attn_desc[10] = {
+ "muld_mem005_i_ecc_rf_int",
+ "muld_mem001_i_ecc_rf_int",
+ "muld_mem008_i_ecc_rf_int",
+ "muld_mem007_i_ecc_rf_int",
+ "muld_mem002_i_mem_prty",
+ "muld_mem003_i_mem_prty",
+ "muld_mem009_i_mem_prty",
+ "muld_mem010_i_mem_prty",
+ "muld_mem004_i_mem_prty",
+ "muld_mem006_i_mem_prty",
+};
+#else
+#define muld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 muld_prty1_bb_a0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg muld_prty1_bb_a0 = {
+ 0, 10, muld_prty1_bb_a0_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208,
+ 0x4e0204
+};
+
+static struct attn_hw_reg *muld_prty_bb_a0_regs[1] = {
+ &muld_prty1_bb_a0,
+};
+
+static const u16 muld_prty1_bb_b0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg muld_prty1_bb_b0 = {
+ 0, 10, muld_prty1_bb_b0_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208,
+ 0x4e0204
+};
+
+static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = {
+ &muld_prty1_bb_b0,
+};
+
+static const u16 muld_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg muld_prty1_k2 = {
+ 0, 10, muld_prty1_k2_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204
+};
+
+static struct attn_hw_reg *muld_prty_k2_regs[1] = {
+ &muld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *yuld_int_attn_desc[6] = {
+ "yuld_address_error",
+ "yuld_ld_hdr_err",
+ "yuld_ld_seg_msg_err",
+ "yuld_ld_tid_mini_cache_err",
+ "yuld_ld_cid_mini_cache_err",
+ "yuld_ld_long_message",
+};
+#else
+#define yuld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 yuld_int0_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_int0_bb_a0 = {
+ 0, 6, yuld_int0_bb_a0_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184
+};
+
+static struct attn_hw_reg *yuld_int_bb_a0_regs[1] = {
+ &yuld_int0_bb_a0,
+};
+
+static const u16 yuld_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_int0_bb_b0 = {
+ 0, 6, yuld_int0_bb_b0_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184
+};
+
+static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = {
+ &yuld_int0_bb_b0,
+};
+
+static const u16 yuld_int0_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_int0_k2 = {
+ 0, 6, yuld_int0_k2_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184
+};
+
+static struct attn_hw_reg *yuld_int_k2_regs[1] = {
+ &yuld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *yuld_prty_attn_desc[6] = {
+ "yuld_mem001_i_mem_prty",
+ "yuld_mem002_i_mem_prty",
+ "yuld_mem005_i_mem_prty",
+ "yuld_mem006_i_mem_prty",
+ "yuld_mem004_i_mem_prty",
+ "yuld_mem003_i_mem_prty",
+};
+#else
+#define yuld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 yuld_prty1_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_prty1_bb_a0 = {
+ 0, 6, yuld_prty1_bb_a0_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204
+};
+
+static struct attn_hw_reg *yuld_prty_bb_a0_regs[1] = {
+ &yuld_prty1_bb_a0,
+};
+
+static const u16 yuld_prty1_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_prty1_bb_b0 = {
+ 0, 6, yuld_prty1_bb_b0_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204
+};
+
+static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = {
+ &yuld_prty1_bb_b0,
+};
+
+static const u16 yuld_prty1_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_prty1_k2 = {
+ 0, 6, yuld_prty1_k2_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204
+};
+
+static struct attn_hw_reg *yuld_prty_k2_regs[1] = {
+ &yuld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xyld_int_attn_desc[6] = {
+ "xyld_address_error",
+ "xyld_ld_hdr_err",
+ "xyld_ld_seg_msg_err",
+ "xyld_ld_tid_mini_cache_err",
+ "xyld_ld_cid_mini_cache_err",
+ "xyld_ld_long_message",
+};
+#else
+#define xyld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xyld_int0_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg xyld_int0_bb_a0 = {
+ 0, 6, xyld_int0_bb_a0_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184
+};
+
+static struct attn_hw_reg *xyld_int_bb_a0_regs[1] = {
+ &xyld_int0_bb_a0,
+};
+
+static const u16 xyld_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg xyld_int0_bb_b0 = {
+ 0, 6, xyld_int0_bb_b0_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184
+};
+
+static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = {
+ &xyld_int0_bb_b0,
+};
+
+static const u16 xyld_int0_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg xyld_int0_k2 = {
+ 0, 6, xyld_int0_k2_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184
+};
+
+static struct attn_hw_reg *xyld_int_k2_regs[1] = {
+ &xyld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xyld_prty_attn_desc[9] = {
+ "xyld_mem004_i_ecc_rf_int",
+ "xyld_mem006_i_ecc_rf_int",
+ "xyld_mem001_i_mem_prty",
+ "xyld_mem002_i_mem_prty",
+ "xyld_mem008_i_mem_prty",
+ "xyld_mem009_i_mem_prty",
+ "xyld_mem003_i_mem_prty",
+ "xyld_mem005_i_mem_prty",
+ "xyld_mem007_i_mem_prty",
+};
+#else
+#define xyld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xyld_prty1_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg xyld_prty1_bb_a0 = {
+ 0, 9, xyld_prty1_bb_a0_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204
+};
+
+static struct attn_hw_reg *xyld_prty_bb_a0_regs[1] = {
+ &xyld_prty1_bb_a0,
+};
+
+static const u16 xyld_prty1_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg xyld_prty1_bb_b0 = {
+ 0, 9, xyld_prty1_bb_b0_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204
+};
+
+static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = {
+ &xyld_prty1_bb_b0,
+};
+
+static const u16 xyld_prty1_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg xyld_prty1_k2 = {
+ 0, 9, xyld_prty1_k2_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204
+};
+
+static struct attn_hw_reg *xyld_prty_k2_regs[1] = {
+ &xyld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prm_int_attn_desc[11] = {
+ "prm_address_error",
+ "prm_ififo_error",
+ "prm_immed_fifo_error",
+ "prm_ofst_pend_error",
+ "prm_pad_pend_error",
+ "prm_pbinp_pend_error",
+ "prm_tag_pend_error",
+ "prm_mstorm_eop_err",
+ "prm_ustorm_eop_err",
+ "prm_mstorm_que_err",
+ "prm_ustorm_que_err",
+};
+#else
+#define prm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 prm_int0_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg prm_int0_bb_a0 = {
+ 0, 11, prm_int0_bb_a0_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044
+};
+
+static struct attn_hw_reg *prm_int_bb_a0_regs[1] = {
+ &prm_int0_bb_a0,
+};
+
+static const u16 prm_int0_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg prm_int0_bb_b0 = {
+ 0, 11, prm_int0_bb_b0_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044
+};
+
+static struct attn_hw_reg *prm_int_bb_b0_regs[1] = {
+ &prm_int0_bb_b0,
+};
+
+static const u16 prm_int0_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg prm_int0_k2 = {
+ 0, 11, prm_int0_k2_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044
+};
+
+static struct attn_hw_reg *prm_int_k2_regs[1] = {
+ &prm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prm_prty_attn_desc[30] = {
+ "prm_datapath_registers",
+ "prm_mem012_i_ecc_rf_int",
+ "prm_mem013_i_ecc_rf_int",
+ "prm_mem014_i_ecc_rf_int",
+ "prm_mem020_i_ecc_rf_int",
+ "prm_mem004_i_mem_prty",
+ "prm_mem024_i_mem_prty",
+ "prm_mem016_i_mem_prty",
+ "prm_mem017_i_mem_prty",
+ "prm_mem008_i_mem_prty",
+ "prm_mem009_i_mem_prty",
+ "prm_mem010_i_mem_prty",
+ "prm_mem015_i_mem_prty",
+ "prm_mem011_i_mem_prty",
+ "prm_mem003_i_mem_prty",
+ "prm_mem002_i_mem_prty",
+ "prm_mem005_i_mem_prty",
+ "prm_mem023_i_mem_prty",
+ "prm_mem006_i_mem_prty",
+ "prm_mem007_i_mem_prty",
+ "prm_mem001_i_mem_prty",
+ "prm_mem022_i_mem_prty",
+ "prm_mem021_i_mem_prty",
+ "prm_mem019_i_mem_prty",
+ "prm_mem015_i_ecc_rf_int",
+ "prm_mem021_i_ecc_rf_int",
+ "prm_mem025_i_mem_prty",
+ "prm_mem018_i_mem_prty",
+ "prm_mem012_i_mem_prty",
+ "prm_mem020_i_mem_prty",
+};
+#else
+#define prm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 prm_prty1_bb_a0_attn_idx[25] = {
+ 2, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24,
+ 25, 26, 27, 28, 29,
+};
+
+static struct attn_hw_reg prm_prty1_bb_a0 = {
+ 0, 25, prm_prty1_bb_a0_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204
+};
+
+static struct attn_hw_reg *prm_prty_bb_a0_regs[1] = {
+ &prm_prty1_bb_a0,
+};
+
+static const u16 prm_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg prm_prty0_bb_b0 = {
+ 0, 1, prm_prty0_bb_b0_attn_idx, 0x230050, 0x23005c, 0x230058, 0x230054
+};
+
+static const u16 prm_prty1_bb_b0_attn_idx[24] = {
+ 2, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 24, 25,
+ 26, 27, 28, 29,
+};
+
+static struct attn_hw_reg prm_prty1_bb_b0 = {
+ 1, 24, prm_prty1_bb_b0_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204
+};
+
+static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = {
+ &prm_prty0_bb_b0, &prm_prty1_bb_b0,
+};
+
+static const u16 prm_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg prm_prty0_k2 = {
+ 0, 1, prm_prty0_k2_attn_idx, 0x230050, 0x23005c, 0x230058, 0x230054
+};
+
+static const u16 prm_prty1_k2_attn_idx[23] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23,
+};
+
+static struct attn_hw_reg prm_prty1_k2 = {
+ 1, 23, prm_prty1_k2_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204
+};
+
+static struct attn_hw_reg *prm_prty_k2_regs[2] = {
+ &prm_prty0_k2, &prm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb1_int_attn_desc[9] = {
+ "pbf_pb1_address_error",
+ "pbf_pb1_eop_error",
+ "pbf_pb1_ififo_error",
+ "pbf_pb1_pfifo_error",
+ "pbf_pb1_db_buf_error",
+ "pbf_pb1_th_exec_error",
+ "pbf_pb1_tq_error_wr",
+ "pbf_pb1_tq_error_rd_th",
+ "pbf_pb1_tq_error_rd_ih",
+};
+#else
+#define pbf_pb1_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb1_int0_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb1_int0_bb_a0 = {
+ 0, 9, pbf_pb1_int0_bb_a0_attn_idx, 0xda0040, 0xda004c, 0xda0048,
+ 0xda0044
+};
+
+static struct attn_hw_reg *pbf_pb1_int_bb_a0_regs[1] = {
+ &pbf_pb1_int0_bb_a0,
+};
+
+static const u16 pbf_pb1_int0_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb1_int0_bb_b0 = {
+ 0, 9, pbf_pb1_int0_bb_b0_attn_idx, 0xda0040, 0xda004c, 0xda0048,
+ 0xda0044
+};
+
+static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = {
+ &pbf_pb1_int0_bb_b0,
+};
+
+static const u16 pbf_pb1_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb1_int0_k2 = {
+ 0, 9, pbf_pb1_int0_k2_attn_idx, 0xda0040, 0xda004c, 0xda0048, 0xda0044
+};
+
+static struct attn_hw_reg *pbf_pb1_int_k2_regs[1] = {
+ &pbf_pb1_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb1_prty_attn_desc[1] = {
+ "pbf_pb1_datapath_registers",
+};
+#else
+#define pbf_pb1_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb1_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = {
+ 0, 1, pbf_pb1_prty0_bb_b0_attn_idx, 0xda0050, 0xda005c, 0xda0058,
+ 0xda0054
+};
+
+static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = {
+ &pbf_pb1_prty0_bb_b0,
+};
+
+static const u16 pbf_pb1_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_pb1_prty0_k2 = {
+ 0, 1, pbf_pb1_prty0_k2_attn_idx, 0xda0050, 0xda005c, 0xda0058, 0xda0054
+};
+
+static struct attn_hw_reg *pbf_pb1_prty_k2_regs[1] = {
+ &pbf_pb1_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb2_int_attn_desc[9] = {
+ "pbf_pb2_address_error",
+ "pbf_pb2_eop_error",
+ "pbf_pb2_ififo_error",
+ "pbf_pb2_pfifo_error",
+ "pbf_pb2_db_buf_error",
+ "pbf_pb2_th_exec_error",
+ "pbf_pb2_tq_error_wr",
+ "pbf_pb2_tq_error_rd_th",
+ "pbf_pb2_tq_error_rd_ih",
+};
+#else
+#define pbf_pb2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb2_int0_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb2_int0_bb_a0 = {
+ 0, 9, pbf_pb2_int0_bb_a0_attn_idx, 0xda4040, 0xda404c, 0xda4048,
+ 0xda4044
+};
+
+static struct attn_hw_reg *pbf_pb2_int_bb_a0_regs[1] = {
+ &pbf_pb2_int0_bb_a0,
+};
+
+static const u16 pbf_pb2_int0_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb2_int0_bb_b0 = {
+ 0, 9, pbf_pb2_int0_bb_b0_attn_idx, 0xda4040, 0xda404c, 0xda4048,
+ 0xda4044
+};
+
+static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = {
+ &pbf_pb2_int0_bb_b0,
+};
+
+static const u16 pbf_pb2_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb2_int0_k2 = {
+ 0, 9, pbf_pb2_int0_k2_attn_idx, 0xda4040, 0xda404c, 0xda4048, 0xda4044
+};
+
+static struct attn_hw_reg *pbf_pb2_int_k2_regs[1] = {
+ &pbf_pb2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb2_prty_attn_desc[1] = {
+ "pbf_pb2_datapath_registers",
+};
+#else
+#define pbf_pb2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = {
+ 0, 1, pbf_pb2_prty0_bb_b0_attn_idx, 0xda4050, 0xda405c, 0xda4058,
+ 0xda4054
+};
+
+static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = {
+ &pbf_pb2_prty0_bb_b0,
+};
+
+static const u16 pbf_pb2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_pb2_prty0_k2 = {
+ 0, 1, pbf_pb2_prty0_k2_attn_idx, 0xda4050, 0xda405c, 0xda4058, 0xda4054
+};
+
+static struct attn_hw_reg *pbf_pb2_prty_k2_regs[1] = {
+ &pbf_pb2_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rpb_int_attn_desc[9] = {
+ "rpb_address_error",
+ "rpb_eop_error",
+ "rpb_ififo_error",
+ "rpb_pfifo_error",
+ "rpb_db_buf_error",
+ "rpb_th_exec_error",
+ "rpb_tq_error_wr",
+ "rpb_tq_error_rd_th",
+ "rpb_tq_error_rd_ih",
+};
+#else
+#define rpb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 rpb_int0_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rpb_int0_bb_a0 = {
+ 0, 9, rpb_int0_bb_a0_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044
+};
+
+static struct attn_hw_reg *rpb_int_bb_a0_regs[1] = {
+ &rpb_int0_bb_a0,
+};
+
+static const u16 rpb_int0_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rpb_int0_bb_b0 = {
+ 0, 9, rpb_int0_bb_b0_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044
+};
+
+static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = {
+ &rpb_int0_bb_b0,
+};
+
+static const u16 rpb_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rpb_int0_k2 = {
+ 0, 9, rpb_int0_k2_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044
+};
+
+static struct attn_hw_reg *rpb_int_k2_regs[1] = {
+ &rpb_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rpb_prty_attn_desc[1] = {
+ "rpb_datapath_registers",
+};
+#else
+#define rpb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 rpb_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg rpb_prty0_bb_b0 = {
+ 0, 1, rpb_prty0_bb_b0_attn_idx, 0x23c050, 0x23c05c, 0x23c058, 0x23c054
+};
+
+static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = {
+ &rpb_prty0_bb_b0,
+};
+
+static const u16 rpb_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg rpb_prty0_k2 = {
+ 0, 1, rpb_prty0_k2_attn_idx, 0x23c050, 0x23c05c, 0x23c058, 0x23c054
+};
+
+static struct attn_hw_reg *rpb_prty_k2_regs[1] = {
+ &rpb_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *btb_int_attn_desc[139] = {
+ "btb_address_error",
+ "btb_rc_pkt0_rls_error",
+ "btb_unused_0",
+ "btb_rc_pkt0_len_error",
+ "btb_unused_1",
+ "btb_rc_pkt0_protocol_error",
+ "btb_rc_pkt1_rls_error",
+ "btb_unused_2",
+ "btb_rc_pkt1_len_error",
+ "btb_unused_3",
+ "btb_rc_pkt1_protocol_error",
+ "btb_rc_pkt2_rls_error",
+ "btb_unused_4",
+ "btb_rc_pkt2_len_error",
+ "btb_unused_5",
+ "btb_rc_pkt2_protocol_error",
+ "btb_rc_pkt3_rls_error",
+ "btb_unused_6",
+ "btb_rc_pkt3_len_error",
+ "btb_unused_7",
+ "btb_rc_pkt3_protocol_error",
+ "btb_rc_sop_req_tc_port_error",
+ "btb_unused_8",
+ "btb_wc0_protocol_error",
+ "btb_unused_9",
+ "btb_ll_blk_error",
+ "btb_ll_arb_calc_error",
+ "btb_fc_alm_calc_error",
+ "btb_wc0_inp_fifo_error",
+ "btb_wc0_sop_fifo_error",
+ "btb_wc0_len_fifo_error",
+ "btb_wc0_eop_fifo_error",
+ "btb_wc0_queue_fifo_error",
+ "btb_wc0_free_point_fifo_error",
+ "btb_wc0_next_point_fifo_error",
+ "btb_wc0_strt_fifo_error",
+ "btb_wc0_second_dscr_fifo_error",
+ "btb_wc0_pkt_avail_fifo_error",
+ "btb_wc0_notify_fifo_error",
+ "btb_wc0_ll_req_fifo_error",
+ "btb_wc0_ll_pa_cnt_error",
+ "btb_wc0_bb_pa_cnt_error",
+ "btb_wc_dup_upd_data_fifo_error",
+ "btb_wc_dup_rsp_dscr_fifo_error",
+ "btb_wc_dup_upd_point_fifo_error",
+ "btb_wc_dup_pkt_avail_fifo_error",
+ "btb_wc_dup_pkt_avail_cnt_error",
+ "btb_rc_pkt0_side_fifo_error",
+ "btb_rc_pkt0_req_fifo_error",
+ "btb_rc_pkt0_blk_fifo_error",
+ "btb_rc_pkt0_rls_left_fifo_error",
+ "btb_rc_pkt0_strt_ptr_fifo_error",
+ "btb_rc_pkt0_second_ptr_fifo_error",
+ "btb_rc_pkt0_rsp_fifo_error",
+ "btb_rc_pkt0_dscr_fifo_error",
+ "btb_rc_pkt1_side_fifo_error",
+ "btb_rc_pkt1_req_fifo_error",
+ "btb_rc_pkt1_blk_fifo_error",
+ "btb_rc_pkt1_rls_left_fifo_error",
+ "btb_rc_pkt1_strt_ptr_fifo_error",
+ "btb_rc_pkt1_second_ptr_fifo_error",
+ "btb_rc_pkt1_rsp_fifo_error",
+ "btb_rc_pkt1_dscr_fifo_error",
+ "btb_rc_pkt2_side_fifo_error",
+ "btb_rc_pkt2_req_fifo_error",
+ "btb_rc_pkt2_blk_fifo_error",
+ "btb_rc_pkt2_rls_left_fifo_error",
+ "btb_rc_pkt2_strt_ptr_fifo_error",
+ "btb_rc_pkt2_second_ptr_fifo_error",
+ "btb_rc_pkt2_rsp_fifo_error",
+ "btb_rc_pkt2_dscr_fifo_error",
+ "btb_rc_pkt3_side_fifo_error",
+ "btb_rc_pkt3_req_fifo_error",
+ "btb_rc_pkt3_blk_fifo_error",
+ "btb_rc_pkt3_rls_left_fifo_error",
+ "btb_rc_pkt3_strt_ptr_fifo_error",
+ "btb_rc_pkt3_second_ptr_fifo_error",
+ "btb_rc_pkt3_rsp_fifo_error",
+ "btb_rc_pkt3_dscr_fifo_error",
+ "btb_rc_sop_queue_fifo_error",
+ "btb_ll_arb_rls_fifo_error",
+ "btb_ll_arb_prefetch_fifo_error",
+ "btb_rc_pkt0_rls_fifo_error",
+ "btb_rc_pkt1_rls_fifo_error",
+ "btb_rc_pkt2_rls_fifo_error",
+ "btb_rc_pkt3_rls_fifo_error",
+ "btb_rc_pkt4_rls_fifo_error",
+ "btb_rc_pkt5_rls_fifo_error",
+ "btb_rc_pkt6_rls_fifo_error",
+ "btb_rc_pkt7_rls_fifo_error",
+ "btb_rc_pkt4_rls_error",
+ "btb_rc_pkt4_len_error",
+ "btb_rc_pkt4_protocol_error",
+ "btb_rc_pkt4_side_fifo_error",
+ "btb_rc_pkt4_req_fifo_error",
+ "btb_rc_pkt4_blk_fifo_error",
+ "btb_rc_pkt4_rls_left_fifo_error",
+ "btb_rc_pkt4_strt_ptr_fifo_error",
+ "btb_rc_pkt4_second_ptr_fifo_error",
+ "btb_rc_pkt4_rsp_fifo_error",
+ "btb_rc_pkt4_dscr_fifo_error",
+ "btb_rc_pkt5_rls_error",
+ "btb_rc_pkt5_len_error",
+ "btb_rc_pkt5_protocol_error",
+ "btb_rc_pkt5_side_fifo_error",
+ "btb_rc_pkt5_req_fifo_error",
+ "btb_rc_pkt5_blk_fifo_error",
+ "btb_rc_pkt5_rls_left_fifo_error",
+ "btb_rc_pkt5_strt_ptr_fifo_error",
+ "btb_rc_pkt5_second_ptr_fifo_error",
+ "btb_rc_pkt5_rsp_fifo_error",
+ "btb_rc_pkt5_dscr_fifo_error",
+ "btb_rc_pkt6_rls_error",
+ "btb_rc_pkt6_len_error",
+ "btb_rc_pkt6_protocol_error",
+ "btb_rc_pkt6_side_fifo_error",
+ "btb_rc_pkt6_req_fifo_error",
+ "btb_rc_pkt6_blk_fifo_error",
+ "btb_rc_pkt6_rls_left_fifo_error",
+ "btb_rc_pkt6_strt_ptr_fifo_error",
+ "btb_rc_pkt6_second_ptr_fifo_error",
+ "btb_rc_pkt6_rsp_fifo_error",
+ "btb_rc_pkt6_dscr_fifo_error",
+ "btb_rc_pkt7_rls_error",
+ "btb_rc_pkt7_len_error",
+ "btb_rc_pkt7_protocol_error",
+ "btb_rc_pkt7_side_fifo_error",
+ "btb_rc_pkt7_req_fifo_error",
+ "btb_rc_pkt7_blk_fifo_error",
+ "btb_rc_pkt7_rls_left_fifo_error",
+ "btb_rc_pkt7_strt_ptr_fifo_error",
+ "btb_rc_pkt7_second_ptr_fifo_error",
+ "btb_rc_pkt7_rsp_fifo_error",
+ "btb_packet_available_sync_fifo_push_error",
+ "btb_wc6_notify_fifo_error",
+ "btb_wc9_queue_fifo_error",
+ "btb_wc0_sync_fifo_push_error",
+ "btb_rls_sync_fifo_push_error",
+ "btb_rc_pkt7_dscr_fifo_error",
+};
+#else
+#define btb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 btb_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25,
+};
+
+static struct attn_hw_reg btb_int0_bb_a0 = {
+ 0, 16, btb_int0_bb_a0_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4
+};
+
+static const u16 btb_int1_bb_a0_attn_idx[16] = {
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg btb_int1_bb_a0 = {
+ 1, 16, btb_int1_bb_a0_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc
+};
+
+static const u16 btb_int2_bb_a0_attn_idx[4] = {
+ 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg btb_int2_bb_a0 = {
+ 2, 4, btb_int2_bb_a0_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4
+};
+
+static const u16 btb_int3_bb_a0_attn_idx[32] = {
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+};
+
+static struct attn_hw_reg btb_int3_bb_a0 = {
+ 3, 32, btb_int3_bb_a0_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c
+};
+
+static const u16 btb_int4_bb_a0_attn_idx[23] = {
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100,
+};
+
+static struct attn_hw_reg btb_int4_bb_a0 = {
+ 4, 23, btb_int4_bb_a0_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124
+};
+
+static const u16 btb_int5_bb_a0_attn_idx[32] = {
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131,
+ 132,
+};
+
+static struct attn_hw_reg btb_int5_bb_a0 = {
+ 5, 32, btb_int5_bb_a0_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c
+};
+
+static const u16 btb_int6_bb_a0_attn_idx[1] = {
+ 133,
+};
+
+static struct attn_hw_reg btb_int6_bb_a0 = {
+ 6, 1, btb_int6_bb_a0_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154
+};
+
+static const u16 btb_int8_bb_a0_attn_idx[1] = {
+ 134,
+};
+
+static struct attn_hw_reg btb_int8_bb_a0 = {
+ 7, 1, btb_int8_bb_a0_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188
+};
+
+static const u16 btb_int9_bb_a0_attn_idx[1] = {
+ 135,
+};
+
+static struct attn_hw_reg btb_int9_bb_a0 = {
+ 8, 1, btb_int9_bb_a0_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0
+};
+
+static const u16 btb_int10_bb_a0_attn_idx[1] = {
+ 136,
+};
+
+static struct attn_hw_reg btb_int10_bb_a0 = {
+ 9, 1, btb_int10_bb_a0_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8
+};
+
+static const u16 btb_int11_bb_a0_attn_idx[2] = {
+ 137, 138,
+};
+
+static struct attn_hw_reg btb_int11_bb_a0 = {
+ 10, 2, btb_int11_bb_a0_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0
+};
+
+static struct attn_hw_reg *btb_int_bb_a0_regs[11] = {
+ &btb_int0_bb_a0, &btb_int1_bb_a0, &btb_int2_bb_a0, &btb_int3_bb_a0,
+ &btb_int4_bb_a0, &btb_int5_bb_a0, &btb_int6_bb_a0, &btb_int8_bb_a0,
+ &btb_int9_bb_a0, &btb_int10_bb_a0,
+ &btb_int11_bb_a0,
+};
+
+static const u16 btb_int0_bb_b0_attn_idx[16] = {
+ 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25,
+};
+
+static struct attn_hw_reg btb_int0_bb_b0 = {
+ 0, 16, btb_int0_bb_b0_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4
+};
+
+static const u16 btb_int1_bb_b0_attn_idx[16] = {
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg btb_int1_bb_b0 = {
+ 1, 16, btb_int1_bb_b0_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc
+};
+
+static const u16 btb_int2_bb_b0_attn_idx[4] = {
+ 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg btb_int2_bb_b0 = {
+ 2, 4, btb_int2_bb_b0_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4
+};
+
+static const u16 btb_int3_bb_b0_attn_idx[32] = {
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+};
+
+static struct attn_hw_reg btb_int3_bb_b0 = {
+ 3, 32, btb_int3_bb_b0_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c
+};
+
+static const u16 btb_int4_bb_b0_attn_idx[23] = {
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100,
+};
+
+static struct attn_hw_reg btb_int4_bb_b0 = {
+ 4, 23, btb_int4_bb_b0_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124
+};
+
+static const u16 btb_int5_bb_b0_attn_idx[32] = {
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131,
+ 132,
+};
+
+static struct attn_hw_reg btb_int5_bb_b0 = {
+ 5, 32, btb_int5_bb_b0_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c
+};
+
+static const u16 btb_int6_bb_b0_attn_idx[1] = {
+ 133,
+};
+
+static struct attn_hw_reg btb_int6_bb_b0 = {
+ 6, 1, btb_int6_bb_b0_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154
+};
+
+static const u16 btb_int8_bb_b0_attn_idx[1] = {
+ 134,
+};
+
+static struct attn_hw_reg btb_int8_bb_b0 = {
+ 7, 1, btb_int8_bb_b0_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188
+};
+
+static const u16 btb_int9_bb_b0_attn_idx[1] = {
+ 135,
+};
+
+static struct attn_hw_reg btb_int9_bb_b0 = {
+ 8, 1, btb_int9_bb_b0_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0
+};
+
+static const u16 btb_int10_bb_b0_attn_idx[1] = {
+ 136,
+};
+
+static struct attn_hw_reg btb_int10_bb_b0 = {
+ 9, 1, btb_int10_bb_b0_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8
+};
+
+static const u16 btb_int11_bb_b0_attn_idx[2] = {
+ 137, 138,
+};
+
+static struct attn_hw_reg btb_int11_bb_b0 = {
+ 10, 2, btb_int11_bb_b0_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0
+};
+
+static struct attn_hw_reg *btb_int_bb_b0_regs[11] = {
+ &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0,
+ &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0,
+ &btb_int9_bb_b0, &btb_int10_bb_b0,
+ &btb_int11_bb_b0,
+};
+
+static const u16 btb_int0_k2_attn_idx[16] = {
+ 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25,
+};
+
+static struct attn_hw_reg btb_int0_k2 = {
+ 0, 16, btb_int0_k2_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4
+};
+
+static const u16 btb_int1_k2_attn_idx[16] = {
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg btb_int1_k2 = {
+ 1, 16, btb_int1_k2_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc
+};
+
+static const u16 btb_int2_k2_attn_idx[4] = {
+ 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg btb_int2_k2 = {
+ 2, 4, btb_int2_k2_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4
+};
+
+static const u16 btb_int3_k2_attn_idx[32] = {
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+};
+
+static struct attn_hw_reg btb_int3_k2 = {
+ 3, 32, btb_int3_k2_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c
+};
+
+static const u16 btb_int4_k2_attn_idx[23] = {
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100,
+};
+
+static struct attn_hw_reg btb_int4_k2 = {
+ 4, 23, btb_int4_k2_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124
+};
+
+static const u16 btb_int5_k2_attn_idx[32] = {
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131,
+ 132,
+};
+
+static struct attn_hw_reg btb_int5_k2 = {
+ 5, 32, btb_int5_k2_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c
+};
+
+static const u16 btb_int6_k2_attn_idx[1] = {
+ 133,
+};
+
+static struct attn_hw_reg btb_int6_k2 = {
+ 6, 1, btb_int6_k2_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154
+};
+
+static const u16 btb_int8_k2_attn_idx[1] = {
+ 134,
+};
+
+static struct attn_hw_reg btb_int8_k2 = {
+ 7, 1, btb_int8_k2_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188
+};
+
+static const u16 btb_int9_k2_attn_idx[1] = {
+ 135,
+};
+
+static struct attn_hw_reg btb_int9_k2 = {
+ 8, 1, btb_int9_k2_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0
+};
+
+static const u16 btb_int10_k2_attn_idx[1] = {
+ 136,
+};
+
+static struct attn_hw_reg btb_int10_k2 = {
+ 9, 1, btb_int10_k2_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8
+};
+
+static const u16 btb_int11_k2_attn_idx[2] = {
+ 137, 138,
+};
+
+static struct attn_hw_reg btb_int11_k2 = {
+ 10, 2, btb_int11_k2_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0
+};
+
+static struct attn_hw_reg *btb_int_k2_regs[11] = {
+ &btb_int0_k2, &btb_int1_k2, &btb_int2_k2, &btb_int3_k2, &btb_int4_k2,
+ &btb_int5_k2, &btb_int6_k2, &btb_int8_k2, &btb_int9_k2, &btb_int10_k2,
+ &btb_int11_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *btb_prty_attn_desc[36] = {
+ "btb_ll_bank0_mem_prty",
+ "btb_ll_bank1_mem_prty",
+ "btb_ll_bank2_mem_prty",
+ "btb_ll_bank3_mem_prty",
+ "btb_datapath_registers",
+ "btb_mem001_i_ecc_rf_int",
+ "btb_mem008_i_ecc_rf_int",
+ "btb_mem009_i_ecc_rf_int",
+ "btb_mem010_i_ecc_rf_int",
+ "btb_mem011_i_ecc_rf_int",
+ "btb_mem012_i_ecc_rf_int",
+ "btb_mem013_i_ecc_rf_int",
+ "btb_mem014_i_ecc_rf_int",
+ "btb_mem015_i_ecc_rf_int",
+ "btb_mem016_i_ecc_rf_int",
+ "btb_mem002_i_ecc_rf_int",
+ "btb_mem003_i_ecc_rf_int",
+ "btb_mem004_i_ecc_rf_int",
+ "btb_mem005_i_ecc_rf_int",
+ "btb_mem006_i_ecc_rf_int",
+ "btb_mem007_i_ecc_rf_int",
+ "btb_mem033_i_mem_prty",
+ "btb_mem035_i_mem_prty",
+ "btb_mem034_i_mem_prty",
+ "btb_mem032_i_mem_prty",
+ "btb_mem031_i_mem_prty",
+ "btb_mem021_i_mem_prty",
+ "btb_mem022_i_mem_prty",
+ "btb_mem023_i_mem_prty",
+ "btb_mem024_i_mem_prty",
+ "btb_mem025_i_mem_prty",
+ "btb_mem026_i_mem_prty",
+ "btb_mem027_i_mem_prty",
+ "btb_mem028_i_mem_prty",
+ "btb_mem030_i_mem_prty",
+ "btb_mem029_i_mem_prty",
+};
+#else
+#define btb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 btb_prty1_bb_a0_attn_idx[27] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 26, 27,
+ 28,
+ 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg btb_prty1_bb_a0 = {
+ 0, 27, btb_prty1_bb_a0_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404
+};
+
+static struct attn_hw_reg *btb_prty_bb_a0_regs[1] = {
+ &btb_prty1_bb_a0,
+};
+
+static const u16 btb_prty0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg btb_prty0_bb_b0 = {
+ 0, 5, btb_prty0_bb_b0_attn_idx, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0
+};
+
+static const u16 btb_prty1_bb_b0_attn_idx[23] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 30, 31,
+ 32,
+ 33, 34, 35,
+};
+
+static struct attn_hw_reg btb_prty1_bb_b0 = {
+ 1, 23, btb_prty1_bb_b0_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404
+};
+
+static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = {
+ &btb_prty0_bb_b0, &btb_prty1_bb_b0,
+};
+
+static const u16 btb_prty0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg btb_prty0_k2 = {
+ 0, 5, btb_prty0_k2_attn_idx, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0
+};
+
+static const u16 btb_prty1_k2_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg btb_prty1_k2 = {
+ 1, 31, btb_prty1_k2_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404
+};
+
+static struct attn_hw_reg *btb_prty_k2_regs[2] = {
+ &btb_prty0_k2, &btb_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_int_attn_desc[1] = {
+ "pbf_address_error",
+};
+#else
+#define pbf_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_int0_bb_a0 = {
+ 0, 1, pbf_int0_bb_a0_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184
+};
+
+static struct attn_hw_reg *pbf_int_bb_a0_regs[1] = {
+ &pbf_int0_bb_a0,
+};
+
+static const u16 pbf_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_int0_bb_b0 = {
+ 0, 1, pbf_int0_bb_b0_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184
+};
+
+static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = {
+ &pbf_int0_bb_b0,
+};
+
+static const u16 pbf_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_int0_k2 = {
+ 0, 1, pbf_int0_k2_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184
+};
+
+static struct attn_hw_reg *pbf_int_k2_regs[1] = {
+ &pbf_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_prty_attn_desc[59] = {
+ "pbf_datapath_registers",
+ "pbf_mem041_i_ecc_rf_int",
+ "pbf_mem042_i_ecc_rf_int",
+ "pbf_mem033_i_ecc_rf_int",
+ "pbf_mem003_i_ecc_rf_int",
+ "pbf_mem018_i_ecc_rf_int",
+ "pbf_mem009_i_ecc_0_rf_int",
+ "pbf_mem009_i_ecc_1_rf_int",
+ "pbf_mem012_i_ecc_0_rf_int",
+ "pbf_mem012_i_ecc_1_rf_int",
+ "pbf_mem012_i_ecc_2_rf_int",
+ "pbf_mem012_i_ecc_3_rf_int",
+ "pbf_mem012_i_ecc_4_rf_int",
+ "pbf_mem012_i_ecc_5_rf_int",
+ "pbf_mem012_i_ecc_6_rf_int",
+ "pbf_mem012_i_ecc_7_rf_int",
+ "pbf_mem012_i_ecc_8_rf_int",
+ "pbf_mem012_i_ecc_9_rf_int",
+ "pbf_mem012_i_ecc_10_rf_int",
+ "pbf_mem012_i_ecc_11_rf_int",
+ "pbf_mem012_i_ecc_12_rf_int",
+ "pbf_mem012_i_ecc_13_rf_int",
+ "pbf_mem012_i_ecc_14_rf_int",
+ "pbf_mem012_i_ecc_15_rf_int",
+ "pbf_mem040_i_mem_prty",
+ "pbf_mem039_i_mem_prty",
+ "pbf_mem038_i_mem_prty",
+ "pbf_mem034_i_mem_prty",
+ "pbf_mem032_i_mem_prty",
+ "pbf_mem031_i_mem_prty",
+ "pbf_mem030_i_mem_prty",
+ "pbf_mem029_i_mem_prty",
+ "pbf_mem022_i_mem_prty",
+ "pbf_mem023_i_mem_prty",
+ "pbf_mem021_i_mem_prty",
+ "pbf_mem020_i_mem_prty",
+ "pbf_mem001_i_mem_prty",
+ "pbf_mem002_i_mem_prty",
+ "pbf_mem006_i_mem_prty",
+ "pbf_mem007_i_mem_prty",
+ "pbf_mem005_i_mem_prty",
+ "pbf_mem004_i_mem_prty",
+ "pbf_mem028_i_mem_prty",
+ "pbf_mem026_i_mem_prty",
+ "pbf_mem027_i_mem_prty",
+ "pbf_mem019_i_mem_prty",
+ "pbf_mem016_i_mem_prty",
+ "pbf_mem017_i_mem_prty",
+ "pbf_mem008_i_mem_prty",
+ "pbf_mem011_i_mem_prty",
+ "pbf_mem010_i_mem_prty",
+ "pbf_mem024_i_mem_prty",
+ "pbf_mem025_i_mem_prty",
+ "pbf_mem037_i_mem_prty",
+ "pbf_mem036_i_mem_prty",
+ "pbf_mem035_i_mem_prty",
+ "pbf_mem014_i_mem_prty",
+ "pbf_mem015_i_mem_prty",
+ "pbf_mem013_i_mem_prty",
+};
+#else
+#define pbf_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pbf_prty1_bb_a0 = {
+ 0, 31, pbf_prty1_bb_a0_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204
+};
+
+static const u16 pbf_prty2_bb_a0_attn_idx[27] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58,
+};
+
+static struct attn_hw_reg pbf_prty2_bb_a0 = {
+ 1, 27, pbf_prty2_bb_a0_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214
+};
+
+static struct attn_hw_reg *pbf_prty_bb_a0_regs[2] = {
+ &pbf_prty1_bb_a0, &pbf_prty2_bb_a0,
+};
+
+static const u16 pbf_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_prty0_bb_b0 = {
+ 0, 1, pbf_prty0_bb_b0_attn_idx, 0xd80190, 0xd8019c, 0xd80198, 0xd80194
+};
+
+static const u16 pbf_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pbf_prty1_bb_b0 = {
+ 1, 31, pbf_prty1_bb_b0_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204
+};
+
+static const u16 pbf_prty2_bb_b0_attn_idx[27] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58,
+};
+
+static struct attn_hw_reg pbf_prty2_bb_b0 = {
+ 2, 27, pbf_prty2_bb_b0_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214
+};
+
+static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = {
+ &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0,
+};
+
+static const u16 pbf_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_prty0_k2 = {
+ 0, 1, pbf_prty0_k2_attn_idx, 0xd80190, 0xd8019c, 0xd80198, 0xd80194
+};
+
+static const u16 pbf_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pbf_prty1_k2 = {
+ 1, 31, pbf_prty1_k2_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204
+};
+
+static const u16 pbf_prty2_k2_attn_idx[27] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58,
+};
+
+static struct attn_hw_reg pbf_prty2_k2 = {
+ 2, 27, pbf_prty2_k2_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214
+};
+
+static struct attn_hw_reg *pbf_prty_k2_regs[3] = {
+ &pbf_prty0_k2, &pbf_prty1_k2, &pbf_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rdif_int_attn_desc[9] = {
+ "rdif_address_error",
+ "rdif_fatal_dix_err",
+ "rdif_fatal_config_err",
+ "rdif_cmd_fifo_err",
+ "rdif_order_fifo_err",
+ "rdif_rdata_fifo_err",
+ "rdif_dif_stop_err",
+ "rdif_partial_dif_w_eob",
+ "rdif_l1_dirty_bit",
+};
+#else
+#define rdif_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 rdif_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg rdif_int0_bb_a0 = {
+ 0, 8, rdif_int0_bb_a0_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184
+};
+
+static struct attn_hw_reg *rdif_int_bb_a0_regs[1] = {
+ &rdif_int0_bb_a0,
+};
+
+static const u16 rdif_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg rdif_int0_bb_b0 = {
+ 0, 8, rdif_int0_bb_b0_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184
+};
+
+static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = {
+ &rdif_int0_bb_b0,
+};
+
+static const u16 rdif_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rdif_int0_k2 = {
+ 0, 9, rdif_int0_k2_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184
+};
+
+static struct attn_hw_reg *rdif_int_k2_regs[1] = {
+ &rdif_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rdif_prty_attn_desc[2] = {
+ "rdif_unused_0",
+ "rdif_datapath_registers",
+};
+#else
+#define rdif_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 rdif_prty0_bb_b0_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg rdif_prty0_bb_b0 = {
+ 0, 1, rdif_prty0_bb_b0_attn_idx, 0x300190, 0x30019c, 0x300198, 0x300194
+};
+
+static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = {
+ &rdif_prty0_bb_b0,
+};
+
+static const u16 rdif_prty0_k2_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg rdif_prty0_k2 = {
+ 0, 1, rdif_prty0_k2_attn_idx, 0x300190, 0x30019c, 0x300198, 0x300194
+};
+
+static struct attn_hw_reg *rdif_prty_k2_regs[1] = {
+ &rdif_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tdif_int_attn_desc[9] = {
+ "tdif_address_error",
+ "tdif_fatal_dix_err",
+ "tdif_fatal_config_err",
+ "tdif_cmd_fifo_err",
+ "tdif_order_fifo_err",
+ "tdif_rdata_fifo_err",
+ "tdif_dif_stop_err",
+ "tdif_partial_dif_w_eob",
+ "tdif_l1_dirty_bit",
+};
+#else
+#define tdif_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tdif_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tdif_int0_bb_a0 = {
+ 0, 8, tdif_int0_bb_a0_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184
+};
+
+static struct attn_hw_reg *tdif_int_bb_a0_regs[1] = {
+ &tdif_int0_bb_a0,
+};
+
+static const u16 tdif_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tdif_int0_bb_b0 = {
+ 0, 8, tdif_int0_bb_b0_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184
+};
+
+static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = {
+ &tdif_int0_bb_b0,
+};
+
+static const u16 tdif_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tdif_int0_k2 = {
+ 0, 9, tdif_int0_k2_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184
+};
+
+static struct attn_hw_reg *tdif_int_k2_regs[1] = {
+ &tdif_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tdif_prty_attn_desc[13] = {
+ "tdif_unused_0",
+ "tdif_datapath_registers",
+ "tdif_mem005_i_ecc_rf_int",
+ "tdif_mem009_i_ecc_rf_int",
+ "tdif_mem010_i_ecc_rf_int",
+ "tdif_mem011_i_ecc_rf_int",
+ "tdif_mem001_i_mem_prty",
+ "tdif_mem003_i_mem_prty",
+ "tdif_mem002_i_mem_prty",
+ "tdif_mem006_i_mem_prty",
+ "tdif_mem007_i_mem_prty",
+ "tdif_mem008_i_mem_prty",
+ "tdif_mem004_i_mem_prty",
+};
+#else
+#define tdif_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tdif_prty1_bb_a0_attn_idx[11] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg tdif_prty1_bb_a0 = {
+ 0, 11, tdif_prty1_bb_a0_attn_idx, 0x310200, 0x31020c, 0x310208,
+ 0x310204
+};
+
+static struct attn_hw_reg *tdif_prty_bb_a0_regs[1] = {
+ &tdif_prty1_bb_a0,
+};
+
+static const u16 tdif_prty0_bb_b0_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg tdif_prty0_bb_b0 = {
+ 0, 1, tdif_prty0_bb_b0_attn_idx, 0x310190, 0x31019c, 0x310198, 0x310194
+};
+
+static const u16 tdif_prty1_bb_b0_attn_idx[11] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg tdif_prty1_bb_b0 = {
+ 1, 11, tdif_prty1_bb_b0_attn_idx, 0x310200, 0x31020c, 0x310208,
+ 0x310204
+};
+
+static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = {
+ &tdif_prty0_bb_b0, &tdif_prty1_bb_b0,
+};
+
+static const u16 tdif_prty0_k2_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg tdif_prty0_k2 = {
+ 0, 1, tdif_prty0_k2_attn_idx, 0x310190, 0x31019c, 0x310198, 0x310194
+};
+
+static const u16 tdif_prty1_k2_attn_idx[11] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg tdif_prty1_k2 = {
+ 1, 11, tdif_prty1_k2_attn_idx, 0x310200, 0x31020c, 0x310208, 0x310204
+};
+
+static struct attn_hw_reg *tdif_prty_k2_regs[2] = {
+ &tdif_prty0_k2, &tdif_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cdu_int_attn_desc[8] = {
+ "cdu_address_error",
+ "cdu_ccfc_ld_l1_num_error",
+ "cdu_tcfc_ld_l1_num_error",
+ "cdu_ccfc_wb_l1_num_error",
+ "cdu_tcfc_wb_l1_num_error",
+ "cdu_ccfc_cvld_error",
+ "cdu_tcfc_cvld_error",
+ "cdu_bvalid_error",
+};
+#else
+#define cdu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cdu_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg cdu_int0_bb_a0 = {
+ 0, 8, cdu_int0_bb_a0_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc
+};
+
+static struct attn_hw_reg *cdu_int_bb_a0_regs[1] = {
+ &cdu_int0_bb_a0,
+};
+
+static const u16 cdu_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg cdu_int0_bb_b0 = {
+ 0, 8, cdu_int0_bb_b0_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc
+};
+
+static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = {
+ &cdu_int0_bb_b0,
+};
+
+static const u16 cdu_int0_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg cdu_int0_k2 = {
+ 0, 8, cdu_int0_k2_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc
+};
+
+static struct attn_hw_reg *cdu_int_k2_regs[1] = {
+ &cdu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cdu_prty_attn_desc[5] = {
+ "cdu_mem001_i_mem_prty",
+ "cdu_mem004_i_mem_prty",
+ "cdu_mem002_i_mem_prty",
+ "cdu_mem005_i_mem_prty",
+ "cdu_mem003_i_mem_prty",
+};
+#else
+#define cdu_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 cdu_prty1_bb_a0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg cdu_prty1_bb_a0 = {
+ 0, 5, cdu_prty1_bb_a0_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204
+};
+
+static struct attn_hw_reg *cdu_prty_bb_a0_regs[1] = {
+ &cdu_prty1_bb_a0,
+};
+
+static const u16 cdu_prty1_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg cdu_prty1_bb_b0 = {
+ 0, 5, cdu_prty1_bb_b0_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204
+};
+
+static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = {
+ &cdu_prty1_bb_b0,
+};
+
+static const u16 cdu_prty1_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg cdu_prty1_k2 = {
+ 0, 5, cdu_prty1_k2_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204
+};
+
+static struct attn_hw_reg *cdu_prty_k2_regs[1] = {
+ &cdu_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ccfc_int_attn_desc[2] = {
+ "ccfc_address_error",
+ "ccfc_exe_error",
+};
+#else
+#define ccfc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ccfc_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_int0_bb_a0 = {
+ 0, 2, ccfc_int0_bb_a0_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184
+};
+
+static struct attn_hw_reg *ccfc_int_bb_a0_regs[1] = {
+ &ccfc_int0_bb_a0,
+};
+
+static const u16 ccfc_int0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_int0_bb_b0 = {
+ 0, 2, ccfc_int0_bb_b0_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184
+};
+
+static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = {
+ &ccfc_int0_bb_b0,
+};
+
+static const u16 ccfc_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_int0_k2 = {
+ 0, 2, ccfc_int0_k2_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184
+};
+
+static struct attn_hw_reg *ccfc_int_k2_regs[1] = {
+ &ccfc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ccfc_prty_attn_desc[10] = {
+ "ccfc_mem001_i_ecc_rf_int",
+ "ccfc_mem003_i_mem_prty",
+ "ccfc_mem007_i_mem_prty",
+ "ccfc_mem006_i_mem_prty",
+ "ccfc_ccam_par_err",
+ "ccfc_scam_par_err",
+ "ccfc_lc_que_ram_porta_lsb_par_err",
+ "ccfc_lc_que_ram_porta_msb_par_err",
+ "ccfc_lc_que_ram_portb_lsb_par_err",
+ "ccfc_lc_que_ram_portb_msb_par_err",
+};
+#else
+#define ccfc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ccfc_prty1_bb_a0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg ccfc_prty1_bb_a0 = {
+ 0, 4, ccfc_prty1_bb_a0_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204
+};
+
+static const u16 ccfc_prty0_bb_a0_attn_idx[2] = {
+ 4, 5,
+};
+
+static struct attn_hw_reg ccfc_prty0_bb_a0 = {
+ 1, 2, ccfc_prty0_bb_a0_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8
+};
+
+static struct attn_hw_reg *ccfc_prty_bb_a0_regs[2] = {
+ &ccfc_prty1_bb_a0, &ccfc_prty0_bb_a0,
+};
+
+static const u16 ccfc_prty1_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_prty1_bb_b0 = {
+ 0, 2, ccfc_prty1_bb_b0_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204
+};
+
+static const u16 ccfc_prty0_bb_b0_attn_idx[6] = {
+ 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ccfc_prty0_bb_b0 = {
+ 1, 6, ccfc_prty0_bb_b0_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8
+};
+
+static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = {
+ &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0,
+};
+
+static const u16 ccfc_prty1_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_prty1_k2 = {
+ 0, 2, ccfc_prty1_k2_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204
+};
+
+static const u16 ccfc_prty0_k2_attn_idx[6] = {
+ 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ccfc_prty0_k2 = {
+ 1, 6, ccfc_prty0_k2_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8
+};
+
+static struct attn_hw_reg *ccfc_prty_k2_regs[2] = {
+ &ccfc_prty1_k2, &ccfc_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcfc_int_attn_desc[2] = {
+ "tcfc_address_error",
+ "tcfc_exe_error",
+};
+#else
+#define tcfc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcfc_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_int0_bb_a0 = {
+ 0, 2, tcfc_int0_bb_a0_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184
+};
+
+static struct attn_hw_reg *tcfc_int_bb_a0_regs[1] = {
+ &tcfc_int0_bb_a0,
+};
+
+static const u16 tcfc_int0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_int0_bb_b0 = {
+ 0, 2, tcfc_int0_bb_b0_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184
+};
+
+static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = {
+ &tcfc_int0_bb_b0,
+};
+
+static const u16 tcfc_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_int0_k2 = {
+ 0, 2, tcfc_int0_k2_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184
+};
+
+static struct attn_hw_reg *tcfc_int_k2_regs[1] = {
+ &tcfc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcfc_prty_attn_desc[10] = {
+ "tcfc_mem002_i_mem_prty",
+ "tcfc_mem001_i_mem_prty",
+ "tcfc_mem006_i_mem_prty",
+ "tcfc_mem005_i_mem_prty",
+ "tcfc_ccam_par_err",
+ "tcfc_scam_par_err",
+ "tcfc_lc_que_ram_porta_lsb_par_err",
+ "tcfc_lc_que_ram_porta_msb_par_err",
+ "tcfc_lc_que_ram_portb_lsb_par_err",
+ "tcfc_lc_que_ram_portb_msb_par_err",
+};
+#else
+#define tcfc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcfc_prty1_bb_a0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg tcfc_prty1_bb_a0 = {
+ 0, 4, tcfc_prty1_bb_a0_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204
+};
+
+static const u16 tcfc_prty0_bb_a0_attn_idx[2] = {
+ 4, 5,
+};
+
+static struct attn_hw_reg tcfc_prty0_bb_a0 = {
+ 1, 2, tcfc_prty0_bb_a0_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8
+};
+
+static struct attn_hw_reg *tcfc_prty_bb_a0_regs[2] = {
+ &tcfc_prty1_bb_a0, &tcfc_prty0_bb_a0,
+};
+
+static const u16 tcfc_prty1_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_prty1_bb_b0 = {
+ 0, 2, tcfc_prty1_bb_b0_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204
+};
+
+static const u16 tcfc_prty0_bb_b0_attn_idx[6] = {
+ 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tcfc_prty0_bb_b0 = {
+ 1, 6, tcfc_prty0_bb_b0_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8
+};
+
+static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = {
+ &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0,
+};
+
+static const u16 tcfc_prty1_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_prty1_k2 = {
+ 0, 2, tcfc_prty1_k2_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204
+};
+
+static const u16 tcfc_prty0_k2_attn_idx[6] = {
+ 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tcfc_prty0_k2 = {
+ 1, 6, tcfc_prty0_k2_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8
+};
+
+static struct attn_hw_reg *tcfc_prty_k2_regs[2] = {
+ &tcfc_prty1_k2, &tcfc_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *igu_int_attn_desc[11] = {
+ "igu_address_error",
+ "igu_ctrl_fifo_error_err",
+ "igu_pxp_req_length_too_big",
+ "igu_host_tries2access_prod_upd",
+ "igu_vf_tries2acc_attn_cmd",
+ "igu_mme_bigger_then_5",
+ "igu_sb_index_is_not_valid",
+ "igu_durin_int_read_with_simd_dis",
+ "igu_cmd_fid_not_match",
+ "igu_segment_access_invalid",
+ "igu_attn_prod_acc",
+};
+#else
+#define igu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 igu_int0_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg igu_int0_bb_a0 = {
+ 0, 11, igu_int0_bb_a0_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184
+};
+
+static struct attn_hw_reg *igu_int_bb_a0_regs[1] = {
+ &igu_int0_bb_a0,
+};
+
+static const u16 igu_int0_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg igu_int0_bb_b0 = {
+ 0, 11, igu_int0_bb_b0_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184
+};
+
+static struct attn_hw_reg *igu_int_bb_b0_regs[1] = {
+ &igu_int0_bb_b0,
+};
+
+static const u16 igu_int0_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg igu_int0_k2 = {
+ 0, 11, igu_int0_k2_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184
+};
+
+static struct attn_hw_reg *igu_int_k2_regs[1] = {
+ &igu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *igu_prty_attn_desc[42] = {
+ "igu_cam_parity",
+ "igu_mem009_i_ecc_rf_int",
+ "igu_mem015_i_mem_prty",
+ "igu_mem016_i_mem_prty",
+ "igu_mem017_i_mem_prty",
+ "igu_mem018_i_mem_prty",
+ "igu_mem019_i_mem_prty",
+ "igu_mem001_i_mem_prty",
+ "igu_mem002_i_mem_prty_0",
+ "igu_mem002_i_mem_prty_1",
+ "igu_mem004_i_mem_prty_0",
+ "igu_mem004_i_mem_prty_1",
+ "igu_mem004_i_mem_prty_2",
+ "igu_mem003_i_mem_prty",
+ "igu_mem005_i_mem_prty",
+ "igu_mem006_i_mem_prty_0",
+ "igu_mem006_i_mem_prty_1",
+ "igu_mem008_i_mem_prty_0",
+ "igu_mem008_i_mem_prty_1",
+ "igu_mem008_i_mem_prty_2",
+ "igu_mem007_i_mem_prty",
+ "igu_mem010_i_mem_prty_0",
+ "igu_mem010_i_mem_prty_1",
+ "igu_mem012_i_mem_prty_0",
+ "igu_mem012_i_mem_prty_1",
+ "igu_mem012_i_mem_prty_2",
+ "igu_mem011_i_mem_prty",
+ "igu_mem013_i_mem_prty",
+ "igu_mem014_i_mem_prty",
+ "igu_mem020_i_mem_prty",
+ "igu_mem003_i_mem_prty_0",
+ "igu_mem003_i_mem_prty_1",
+ "igu_mem003_i_mem_prty_2",
+ "igu_mem002_i_mem_prty",
+ "igu_mem007_i_mem_prty_0",
+ "igu_mem007_i_mem_prty_1",
+ "igu_mem007_i_mem_prty_2",
+ "igu_mem006_i_mem_prty",
+ "igu_mem010_i_mem_prty_2",
+ "igu_mem010_i_mem_prty_3",
+ "igu_mem013_i_mem_prty_0",
+ "igu_mem013_i_mem_prty_1",
+};
+#else
+#define igu_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 igu_prty0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg igu_prty0_bb_a0 = {
+ 0, 1, igu_prty0_bb_a0_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194
+};
+
+static const u16 igu_prty1_bb_a0_attn_idx[31] = {
+ 1, 3, 4, 5, 6, 7, 10, 11, 14, 17, 18, 21, 22, 23, 24, 25, 26, 28, 29,
+ 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg igu_prty1_bb_a0 = {
+ 1, 31, igu_prty1_bb_a0_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204
+};
+
+static const u16 igu_prty2_bb_a0_attn_idx[1] = {
+ 2,
+};
+
+static struct attn_hw_reg igu_prty2_bb_a0 = {
+ 2, 1, igu_prty2_bb_a0_attn_idx, 0x180210, 0x18021c, 0x180218, 0x180214
+};
+
+static struct attn_hw_reg *igu_prty_bb_a0_regs[3] = {
+ &igu_prty0_bb_a0, &igu_prty1_bb_a0, &igu_prty2_bb_a0,
+};
+
+static const u16 igu_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg igu_prty0_bb_b0 = {
+ 0, 1, igu_prty0_bb_b0_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194
+};
+
+static const u16 igu_prty1_bb_b0_attn_idx[31] = {
+ 1, 3, 4, 5, 6, 7, 10, 11, 14, 17, 18, 21, 22, 23, 24, 25, 26, 28, 29,
+ 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg igu_prty1_bb_b0 = {
+ 1, 31, igu_prty1_bb_b0_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204
+};
+
+static const u16 igu_prty2_bb_b0_attn_idx[1] = {
+ 2,
+};
+
+static struct attn_hw_reg igu_prty2_bb_b0 = {
+ 2, 1, igu_prty2_bb_b0_attn_idx, 0x180210, 0x18021c, 0x180218, 0x180214
+};
+
+static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = {
+ &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0,
+};
+
+static const u16 igu_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg igu_prty0_k2 = {
+ 0, 1, igu_prty0_k2_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194
+};
+
+static const u16 igu_prty1_k2_attn_idx[28] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28,
+};
+
+static struct attn_hw_reg igu_prty1_k2 = {
+ 1, 28, igu_prty1_k2_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204
+};
+
+static struct attn_hw_reg *igu_prty_k2_regs[2] = {
+ &igu_prty0_k2, &igu_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cau_int_attn_desc[11] = {
+ "cau_address_error",
+ "cau_unauthorized_pxp_rd_cmd",
+ "cau_unauthorized_pxp_length_cmd",
+ "cau_pxp_sb_address_error",
+ "cau_pxp_pi_number_error",
+ "cau_cleanup_reg_sb_idx_error",
+ "cau_fsm_invalid_line",
+ "cau_cqe_fifo_err",
+ "cau_igu_wdata_fifo_err",
+ "cau_igu_req_fifo_err",
+ "cau_igu_cmd_fifo_err",
+};
+#else
+#define cau_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cau_int0_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg cau_int0_bb_a0 = {
+ 0, 11, cau_int0_bb_a0_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0
+};
+
+static struct attn_hw_reg *cau_int_bb_a0_regs[1] = {
+ &cau_int0_bb_a0,
+};
+
+static const u16 cau_int0_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg cau_int0_bb_b0 = {
+ 0, 11, cau_int0_bb_b0_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0
+};
+
+static struct attn_hw_reg *cau_int_bb_b0_regs[1] = {
+ &cau_int0_bb_b0,
+};
+
+static const u16 cau_int0_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg cau_int0_k2 = {
+ 0, 11, cau_int0_k2_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0
+};
+
+static struct attn_hw_reg *cau_int_k2_regs[1] = {
+ &cau_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cau_prty_attn_desc[15] = {
+ "cau_mem006_i_ecc_rf_int",
+ "cau_mem001_i_ecc_0_rf_int",
+ "cau_mem001_i_ecc_1_rf_int",
+ "cau_mem002_i_ecc_rf_int",
+ "cau_mem004_i_ecc_rf_int",
+ "cau_mem005_i_mem_prty",
+ "cau_mem007_i_mem_prty",
+ "cau_mem008_i_mem_prty",
+ "cau_mem009_i_mem_prty",
+ "cau_mem010_i_mem_prty",
+ "cau_mem011_i_mem_prty",
+ "cau_mem003_i_mem_prty_0",
+ "cau_mem003_i_mem_prty_1",
+ "cau_mem002_i_mem_prty",
+ "cau_mem004_i_mem_prty",
+};
+#else
+#define cau_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 cau_prty1_bb_a0_attn_idx[13] = {
+ 0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg cau_prty1_bb_a0 = {
+ 0, 13, cau_prty1_bb_a0_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204
+};
+
+static struct attn_hw_reg *cau_prty_bb_a0_regs[1] = {
+ &cau_prty1_bb_a0,
+};
+
+static const u16 cau_prty1_bb_b0_attn_idx[13] = {
+ 0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg cau_prty1_bb_b0 = {
+ 0, 13, cau_prty1_bb_b0_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204
+};
+
+static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = {
+ &cau_prty1_bb_b0,
+};
+
+static const u16 cau_prty1_k2_attn_idx[13] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg cau_prty1_k2 = {
+ 0, 13, cau_prty1_k2_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204
+};
+
+static struct attn_hw_reg *cau_prty_k2_regs[1] = {
+ &cau_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *umac_int_attn_desc[2] = {
+ "umac_address_error",
+ "umac_tx_overflow",
+};
+#else
+#define umac_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 umac_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg umac_int0_k2 = {
+ 0, 2, umac_int0_k2_attn_idx, 0x51180, 0x5118c, 0x51188, 0x51184
+};
+
+static struct attn_hw_reg *umac_int_k2_regs[1] = {
+ &umac_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dbg_int_attn_desc[1] = {
+ "dbg_address_error",
+};
+#else
+#define dbg_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 dbg_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_int0_bb_a0 = {
+ 0, 1, dbg_int0_bb_a0_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184
+};
+
+static struct attn_hw_reg *dbg_int_bb_a0_regs[1] = {
+ &dbg_int0_bb_a0,
+};
+
+static const u16 dbg_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_int0_bb_b0 = {
+ 0, 1, dbg_int0_bb_b0_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184
+};
+
+static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = {
+ &dbg_int0_bb_b0,
+};
+
+static const u16 dbg_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_int0_k2 = {
+ 0, 1, dbg_int0_k2_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184
+};
+
+static struct attn_hw_reg *dbg_int_k2_regs[1] = {
+ &dbg_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dbg_prty_attn_desc[1] = {
+ "dbg_mem001_i_mem_prty",
+};
+#else
+#define dbg_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 dbg_prty1_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_prty1_bb_a0 = {
+ 0, 1, dbg_prty1_bb_a0_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204
+};
+
+static struct attn_hw_reg *dbg_prty_bb_a0_regs[1] = {
+ &dbg_prty1_bb_a0,
+};
+
+static const u16 dbg_prty1_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_prty1_bb_b0 = {
+ 0, 1, dbg_prty1_bb_b0_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204
+};
+
+static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = {
+ &dbg_prty1_bb_b0,
+};
+
+static const u16 dbg_prty1_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_prty1_k2 = {
+ 0, 1, dbg_prty1_k2_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204
+};
+
+static struct attn_hw_reg *dbg_prty_k2_regs[1] = {
+ &dbg_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nig_int_attn_desc[196] = {
+ "nig_address_error",
+ "nig_debug_fifo_error",
+ "nig_dorq_fifo_error",
+ "nig_dbg_syncfifo_error_wr",
+ "nig_dorq_syncfifo_error_wr",
+ "nig_storm_syncfifo_error_wr",
+ "nig_dbgmux_syncfifo_error_wr",
+ "nig_msdm_syncfifo_error_wr",
+ "nig_tsdm_syncfifo_error_wr",
+ "nig_usdm_syncfifo_error_wr",
+ "nig_xsdm_syncfifo_error_wr",
+ "nig_ysdm_syncfifo_error_wr",
+ "nig_tx_sopq0_error",
+ "nig_tx_sopq1_error",
+ "nig_tx_sopq2_error",
+ "nig_tx_sopq3_error",
+ "nig_tx_sopq4_error",
+ "nig_tx_sopq5_error",
+ "nig_tx_sopq6_error",
+ "nig_tx_sopq7_error",
+ "nig_tx_sopq8_error",
+ "nig_tx_sopq9_error",
+ "nig_tx_sopq10_error",
+ "nig_tx_sopq11_error",
+ "nig_tx_sopq12_error",
+ "nig_tx_sopq13_error",
+ "nig_tx_sopq14_error",
+ "nig_tx_sopq15_error",
+ "nig_lb_sopq0_error",
+ "nig_lb_sopq1_error",
+ "nig_lb_sopq2_error",
+ "nig_lb_sopq3_error",
+ "nig_lb_sopq4_error",
+ "nig_lb_sopq5_error",
+ "nig_lb_sopq6_error",
+ "nig_lb_sopq7_error",
+ "nig_lb_sopq8_error",
+ "nig_lb_sopq9_error",
+ "nig_lb_sopq10_error",
+ "nig_lb_sopq11_error",
+ "nig_lb_sopq12_error",
+ "nig_lb_sopq13_error",
+ "nig_lb_sopq14_error",
+ "nig_lb_sopq15_error",
+ "nig_p0_purelb_sopq_error",
+ "nig_p0_rx_macfifo_error",
+ "nig_p0_tx_macfifo_error",
+ "nig_p0_tx_bmb_fifo_error",
+ "nig_p0_lb_bmb_fifo_error",
+ "nig_p0_tx_btb_fifo_error",
+ "nig_p0_lb_btb_fifo_error",
+ "nig_p0_rx_llh_dfifo_error",
+ "nig_p0_tx_llh_dfifo_error",
+ "nig_p0_lb_llh_dfifo_error",
+ "nig_p0_rx_llh_hfifo_error",
+ "nig_p0_tx_llh_hfifo_error",
+ "nig_p0_lb_llh_hfifo_error",
+ "nig_p0_rx_llh_rfifo_error",
+ "nig_p0_tx_llh_rfifo_error",
+ "nig_p0_lb_llh_rfifo_error",
+ "nig_p0_storm_fifo_error",
+ "nig_p0_storm_dscr_fifo_error",
+ "nig_p0_tx_gnt_fifo_error",
+ "nig_p0_lb_gnt_fifo_error",
+ "nig_p0_tx_pause_too_long_int",
+ "nig_p0_tc0_pause_too_long_int",
+ "nig_p0_tc1_pause_too_long_int",
+ "nig_p0_tc2_pause_too_long_int",
+ "nig_p0_tc3_pause_too_long_int",
+ "nig_p0_tc4_pause_too_long_int",
+ "nig_p0_tc5_pause_too_long_int",
+ "nig_p0_tc6_pause_too_long_int",
+ "nig_p0_tc7_pause_too_long_int",
+ "nig_p0_lb_tc0_pause_too_long_int",
+ "nig_p0_lb_tc1_pause_too_long_int",
+ "nig_p0_lb_tc2_pause_too_long_int",
+ "nig_p0_lb_tc3_pause_too_long_int",
+ "nig_p0_lb_tc4_pause_too_long_int",
+ "nig_p0_lb_tc5_pause_too_long_int",
+ "nig_p0_lb_tc6_pause_too_long_int",
+ "nig_p0_lb_tc7_pause_too_long_int",
+ "nig_p0_lb_tc8_pause_too_long_int",
+ "nig_p1_purelb_sopq_error",
+ "nig_p1_rx_macfifo_error",
+ "nig_p1_tx_macfifo_error",
+ "nig_p1_tx_bmb_fifo_error",
+ "nig_p1_lb_bmb_fifo_error",
+ "nig_p1_tx_btb_fifo_error",
+ "nig_p1_lb_btb_fifo_error",
+ "nig_p1_rx_llh_dfifo_error",
+ "nig_p1_tx_llh_dfifo_error",
+ "nig_p1_lb_llh_dfifo_error",
+ "nig_p1_rx_llh_hfifo_error",
+ "nig_p1_tx_llh_hfifo_error",
+ "nig_p1_lb_llh_hfifo_error",
+ "nig_p1_rx_llh_rfifo_error",
+ "nig_p1_tx_llh_rfifo_error",
+ "nig_p1_lb_llh_rfifo_error",
+ "nig_p1_storm_fifo_error",
+ "nig_p1_storm_dscr_fifo_error",
+ "nig_p1_tx_gnt_fifo_error",
+ "nig_p1_lb_gnt_fifo_error",
+ "nig_p1_tx_pause_too_long_int",
+ "nig_p1_tc0_pause_too_long_int",
+ "nig_p1_tc1_pause_too_long_int",
+ "nig_p1_tc2_pause_too_long_int",
+ "nig_p1_tc3_pause_too_long_int",
+ "nig_p1_tc4_pause_too_long_int",
+ "nig_p1_tc5_pause_too_long_int",
+ "nig_p1_tc6_pause_too_long_int",
+ "nig_p1_tc7_pause_too_long_int",
+ "nig_p1_lb_tc0_pause_too_long_int",
+ "nig_p1_lb_tc1_pause_too_long_int",
+ "nig_p1_lb_tc2_pause_too_long_int",
+ "nig_p1_lb_tc3_pause_too_long_int",
+ "nig_p1_lb_tc4_pause_too_long_int",
+ "nig_p1_lb_tc5_pause_too_long_int",
+ "nig_p1_lb_tc6_pause_too_long_int",
+ "nig_p1_lb_tc7_pause_too_long_int",
+ "nig_p1_lb_tc8_pause_too_long_int",
+ "nig_p2_purelb_sopq_error",
+ "nig_p2_rx_macfifo_error",
+ "nig_p2_tx_macfifo_error",
+ "nig_p2_tx_bmb_fifo_error",
+ "nig_p2_lb_bmb_fifo_error",
+ "nig_p2_tx_btb_fifo_error",
+ "nig_p2_lb_btb_fifo_error",
+ "nig_p2_rx_llh_dfifo_error",
+ "nig_p2_tx_llh_dfifo_error",
+ "nig_p2_lb_llh_dfifo_error",
+ "nig_p2_rx_llh_hfifo_error",
+ "nig_p2_tx_llh_hfifo_error",
+ "nig_p2_lb_llh_hfifo_error",
+ "nig_p2_rx_llh_rfifo_error",
+ "nig_p2_tx_llh_rfifo_error",
+ "nig_p2_lb_llh_rfifo_error",
+ "nig_p2_storm_fifo_error",
+ "nig_p2_storm_dscr_fifo_error",
+ "nig_p2_tx_gnt_fifo_error",
+ "nig_p2_lb_gnt_fifo_error",
+ "nig_p2_tx_pause_too_long_int",
+ "nig_p2_tc0_pause_too_long_int",
+ "nig_p2_tc1_pause_too_long_int",
+ "nig_p2_tc2_pause_too_long_int",
+ "nig_p2_tc3_pause_too_long_int",
+ "nig_p2_tc4_pause_too_long_int",
+ "nig_p2_tc5_pause_too_long_int",
+ "nig_p2_tc6_pause_too_long_int",
+ "nig_p2_tc7_pause_too_long_int",
+ "nig_p2_lb_tc0_pause_too_long_int",
+ "nig_p2_lb_tc1_pause_too_long_int",
+ "nig_p2_lb_tc2_pause_too_long_int",
+ "nig_p2_lb_tc3_pause_too_long_int",
+ "nig_p2_lb_tc4_pause_too_long_int",
+ "nig_p2_lb_tc5_pause_too_long_int",
+ "nig_p2_lb_tc6_pause_too_long_int",
+ "nig_p2_lb_tc7_pause_too_long_int",
+ "nig_p2_lb_tc8_pause_too_long_int",
+ "nig_p3_purelb_sopq_error",
+ "nig_p3_rx_macfifo_error",
+ "nig_p3_tx_macfifo_error",
+ "nig_p3_tx_bmb_fifo_error",
+ "nig_p3_lb_bmb_fifo_error",
+ "nig_p3_tx_btb_fifo_error",
+ "nig_p3_lb_btb_fifo_error",
+ "nig_p3_rx_llh_dfifo_error",
+ "nig_p3_tx_llh_dfifo_error",
+ "nig_p3_lb_llh_dfifo_error",
+ "nig_p3_rx_llh_hfifo_error",
+ "nig_p3_tx_llh_hfifo_error",
+ "nig_p3_lb_llh_hfifo_error",
+ "nig_p3_rx_llh_rfifo_error",
+ "nig_p3_tx_llh_rfifo_error",
+ "nig_p3_lb_llh_rfifo_error",
+ "nig_p3_storm_fifo_error",
+ "nig_p3_storm_dscr_fifo_error",
+ "nig_p3_tx_gnt_fifo_error",
+ "nig_p3_lb_gnt_fifo_error",
+ "nig_p3_tx_pause_too_long_int",
+ "nig_p3_tc0_pause_too_long_int",
+ "nig_p3_tc1_pause_too_long_int",
+ "nig_p3_tc2_pause_too_long_int",
+ "nig_p3_tc3_pause_too_long_int",
+ "nig_p3_tc4_pause_too_long_int",
+ "nig_p3_tc5_pause_too_long_int",
+ "nig_p3_tc6_pause_too_long_int",
+ "nig_p3_tc7_pause_too_long_int",
+ "nig_p3_lb_tc0_pause_too_long_int",
+ "nig_p3_lb_tc1_pause_too_long_int",
+ "nig_p3_lb_tc2_pause_too_long_int",
+ "nig_p3_lb_tc3_pause_too_long_int",
+ "nig_p3_lb_tc4_pause_too_long_int",
+ "nig_p3_lb_tc5_pause_too_long_int",
+ "nig_p3_lb_tc6_pause_too_long_int",
+ "nig_p3_lb_tc7_pause_too_long_int",
+ "nig_p3_lb_tc8_pause_too_long_int",
+};
+#else
+#define nig_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 nig_int0_bb_a0_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg nig_int0_bb_a0 = {
+ 0, 12, nig_int0_bb_a0_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044
+};
+
+static const u16 nig_int1_bb_a0_attn_idx[32] = {
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg nig_int1_bb_a0 = {
+ 1, 32, nig_int1_bb_a0_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054
+};
+
+static const u16 nig_int2_bb_a0_attn_idx[20] = {
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63,
+};
+
+static struct attn_hw_reg nig_int2_bb_a0 = {
+ 2, 20, nig_int2_bb_a0_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064
+};
+
+static const u16 nig_int3_bb_a0_attn_idx[18] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+};
+
+static struct attn_hw_reg nig_int3_bb_a0 = {
+ 3, 18, nig_int3_bb_a0_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074
+};
+
+static const u16 nig_int4_bb_a0_attn_idx[20] = {
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101,
+};
+
+static struct attn_hw_reg nig_int4_bb_a0 = {
+ 4, 20, nig_int4_bb_a0_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084
+};
+
+static const u16 nig_int5_bb_a0_attn_idx[18] = {
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116,
+ 117, 118, 119,
+};
+
+static struct attn_hw_reg nig_int5_bb_a0 = {
+ 5, 18, nig_int5_bb_a0_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094
+};
+
+static struct attn_hw_reg *nig_int_bb_a0_regs[6] = {
+ &nig_int0_bb_a0, &nig_int1_bb_a0, &nig_int2_bb_a0, &nig_int3_bb_a0,
+ &nig_int4_bb_a0, &nig_int5_bb_a0,
+};
+
+static const u16 nig_int0_bb_b0_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg nig_int0_bb_b0 = {
+ 0, 12, nig_int0_bb_b0_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044
+};
+
+static const u16 nig_int1_bb_b0_attn_idx[32] = {
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg nig_int1_bb_b0 = {
+ 1, 32, nig_int1_bb_b0_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054
+};
+
+static const u16 nig_int2_bb_b0_attn_idx[20] = {
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63,
+};
+
+static struct attn_hw_reg nig_int2_bb_b0 = {
+ 2, 20, nig_int2_bb_b0_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064
+};
+
+static const u16 nig_int3_bb_b0_attn_idx[18] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+};
+
+static struct attn_hw_reg nig_int3_bb_b0 = {
+ 3, 18, nig_int3_bb_b0_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074
+};
+
+static const u16 nig_int4_bb_b0_attn_idx[20] = {
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101,
+};
+
+static struct attn_hw_reg nig_int4_bb_b0 = {
+ 4, 20, nig_int4_bb_b0_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084
+};
+
+static const u16 nig_int5_bb_b0_attn_idx[18] = {
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116,
+ 117, 118, 119,
+};
+
+static struct attn_hw_reg nig_int5_bb_b0 = {
+ 5, 18, nig_int5_bb_b0_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094
+};
+
+static struct attn_hw_reg *nig_int_bb_b0_regs[6] = {
+ &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0,
+ &nig_int4_bb_b0, &nig_int5_bb_b0,
+};
+
+static const u16 nig_int0_k2_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg nig_int0_k2 = {
+ 0, 12, nig_int0_k2_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044
+};
+
+static const u16 nig_int1_k2_attn_idx[32] = {
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg nig_int1_k2 = {
+ 1, 32, nig_int1_k2_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054
+};
+
+static const u16 nig_int2_k2_attn_idx[20] = {
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63,
+};
+
+static struct attn_hw_reg nig_int2_k2 = {
+ 2, 20, nig_int2_k2_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064
+};
+
+static const u16 nig_int3_k2_attn_idx[18] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+};
+
+static struct attn_hw_reg nig_int3_k2 = {
+ 3, 18, nig_int3_k2_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074
+};
+
+static const u16 nig_int4_k2_attn_idx[20] = {
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101,
+};
+
+static struct attn_hw_reg nig_int4_k2 = {
+ 4, 20, nig_int4_k2_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084
+};
+
+static const u16 nig_int5_k2_attn_idx[18] = {
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116,
+ 117, 118, 119,
+};
+
+static struct attn_hw_reg nig_int5_k2 = {
+ 5, 18, nig_int5_k2_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094
+};
+
+static const u16 nig_int6_k2_attn_idx[20] = {
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
+ 134,
+ 135, 136, 137, 138, 139,
+};
+
+static struct attn_hw_reg nig_int6_k2 = {
+ 6, 20, nig_int6_k2_attn_idx, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4
+};
+
+static const u16 nig_int7_k2_attn_idx[18] = {
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
+ 154,
+ 155, 156, 157,
+};
+
+static struct attn_hw_reg nig_int7_k2 = {
+ 7, 18, nig_int7_k2_attn_idx, 0x5000b0, 0x5000bc, 0x5000b8, 0x5000b4
+};
+
+static const u16 nig_int8_k2_attn_idx[20] = {
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172,
+ 173, 174, 175, 176, 177,
+};
+
+static struct attn_hw_reg nig_int8_k2 = {
+ 8, 20, nig_int8_k2_attn_idx, 0x5000c0, 0x5000cc, 0x5000c8, 0x5000c4
+};
+
+static const u16 nig_int9_k2_attn_idx[18] = {
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192,
+ 193, 194, 195,
+};
+
+static struct attn_hw_reg nig_int9_k2 = {
+ 9, 18, nig_int9_k2_attn_idx, 0x5000d0, 0x5000dc, 0x5000d8, 0x5000d4
+};
+
+static struct attn_hw_reg *nig_int_k2_regs[10] = {
+ &nig_int0_k2, &nig_int1_k2, &nig_int2_k2, &nig_int3_k2, &nig_int4_k2,
+ &nig_int5_k2, &nig_int6_k2, &nig_int7_k2, &nig_int8_k2, &nig_int9_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nig_prty_attn_desc[113] = {
+ "nig_datapath_parity_error",
+ "nig_mem107_i_mem_prty",
+ "nig_mem103_i_mem_prty",
+ "nig_mem104_i_mem_prty",
+ "nig_mem105_i_mem_prty",
+ "nig_mem106_i_mem_prty",
+ "nig_mem072_i_mem_prty",
+ "nig_mem071_i_mem_prty",
+ "nig_mem074_i_mem_prty",
+ "nig_mem073_i_mem_prty",
+ "nig_mem076_i_mem_prty",
+ "nig_mem075_i_mem_prty",
+ "nig_mem078_i_mem_prty",
+ "nig_mem077_i_mem_prty",
+ "nig_mem055_i_mem_prty",
+ "nig_mem062_i_mem_prty",
+ "nig_mem063_i_mem_prty",
+ "nig_mem064_i_mem_prty",
+ "nig_mem065_i_mem_prty",
+ "nig_mem066_i_mem_prty",
+ "nig_mem067_i_mem_prty",
+ "nig_mem068_i_mem_prty",
+ "nig_mem069_i_mem_prty",
+ "nig_mem070_i_mem_prty",
+ "nig_mem056_i_mem_prty",
+ "nig_mem057_i_mem_prty",
+ "nig_mem058_i_mem_prty",
+ "nig_mem059_i_mem_prty",
+ "nig_mem060_i_mem_prty",
+ "nig_mem061_i_mem_prty",
+ "nig_mem035_i_mem_prty",
+ "nig_mem046_i_mem_prty",
+ "nig_mem051_i_mem_prty",
+ "nig_mem052_i_mem_prty",
+ "nig_mem090_i_mem_prty",
+ "nig_mem089_i_mem_prty",
+ "nig_mem092_i_mem_prty",
+ "nig_mem091_i_mem_prty",
+ "nig_mem109_i_mem_prty",
+ "nig_mem110_i_mem_prty",
+ "nig_mem001_i_mem_prty",
+ "nig_mem008_i_mem_prty",
+ "nig_mem009_i_mem_prty",
+ "nig_mem010_i_mem_prty",
+ "nig_mem011_i_mem_prty",
+ "nig_mem012_i_mem_prty",
+ "nig_mem013_i_mem_prty",
+ "nig_mem014_i_mem_prty",
+ "nig_mem015_i_mem_prty",
+ "nig_mem016_i_mem_prty",
+ "nig_mem002_i_mem_prty",
+ "nig_mem003_i_mem_prty",
+ "nig_mem004_i_mem_prty",
+ "nig_mem005_i_mem_prty",
+ "nig_mem006_i_mem_prty",
+ "nig_mem007_i_mem_prty",
+ "nig_mem080_i_mem_prty",
+ "nig_mem081_i_mem_prty",
+ "nig_mem082_i_mem_prty",
+ "nig_mem083_i_mem_prty",
+ "nig_mem048_i_mem_prty",
+ "nig_mem049_i_mem_prty",
+ "nig_mem102_i_mem_prty",
+ "nig_mem087_i_mem_prty",
+ "nig_mem086_i_mem_prty",
+ "nig_mem088_i_mem_prty",
+ "nig_mem079_i_mem_prty",
+ "nig_mem047_i_mem_prty",
+ "nig_mem050_i_mem_prty",
+ "nig_mem053_i_mem_prty",
+ "nig_mem054_i_mem_prty",
+ "nig_mem036_i_mem_prty",
+ "nig_mem037_i_mem_prty",
+ "nig_mem038_i_mem_prty",
+ "nig_mem039_i_mem_prty",
+ "nig_mem040_i_mem_prty",
+ "nig_mem041_i_mem_prty",
+ "nig_mem042_i_mem_prty",
+ "nig_mem043_i_mem_prty",
+ "nig_mem044_i_mem_prty",
+ "nig_mem045_i_mem_prty",
+ "nig_mem093_i_mem_prty",
+ "nig_mem094_i_mem_prty",
+ "nig_mem027_i_mem_prty",
+ "nig_mem028_i_mem_prty",
+ "nig_mem029_i_mem_prty",
+ "nig_mem030_i_mem_prty",
+ "nig_mem017_i_mem_prty",
+ "nig_mem018_i_mem_prty",
+ "nig_mem095_i_mem_prty",
+ "nig_mem084_i_mem_prty",
+ "nig_mem085_i_mem_prty",
+ "nig_mem099_i_mem_prty",
+ "nig_mem100_i_mem_prty",
+ "nig_mem096_i_mem_prty",
+ "nig_mem097_i_mem_prty",
+ "nig_mem098_i_mem_prty",
+ "nig_mem031_i_mem_prty",
+ "nig_mem032_i_mem_prty",
+ "nig_mem033_i_mem_prty",
+ "nig_mem034_i_mem_prty",
+ "nig_mem019_i_mem_prty",
+ "nig_mem020_i_mem_prty",
+ "nig_mem021_i_mem_prty",
+ "nig_mem022_i_mem_prty",
+ "nig_mem101_i_mem_prty",
+ "nig_mem023_i_mem_prty",
+ "nig_mem024_i_mem_prty",
+ "nig_mem025_i_mem_prty",
+ "nig_mem026_i_mem_prty",
+ "nig_mem108_i_mem_prty",
+ "nig_mem031_ext_i_mem_prty",
+ "nig_mem034_ext_i_mem_prty",
+};
+#else
+#define nig_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 nig_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 5, 12, 13, 23, 35, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 60, 61, 62, 63, 64, 65, 66,
+};
+
+static struct attn_hw_reg nig_prty1_bb_a0 = {
+ 0, 31, nig_prty1_bb_a0_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204
+};
+
+static const u16 nig_prty2_bb_a0_attn_idx[31] = {
+ 33, 69, 70, 90, 91, 8, 11, 10, 14, 17, 18, 19, 20, 21, 22, 7, 6, 24, 25,
+ 26, 27, 28, 29, 15, 16, 57, 58, 59, 9, 94, 95,
+};
+
+static struct attn_hw_reg nig_prty2_bb_a0 = {
+ 1, 31, nig_prty2_bb_a0_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214
+};
+
+static const u16 nig_prty3_bb_a0_attn_idx[31] = {
+ 96, 97, 98, 103, 104, 92, 93, 105, 106, 107, 108, 109, 80, 31, 67, 83,
+ 84,
+ 3, 68, 85, 86, 89, 77, 78, 79, 4, 32, 36, 81, 82, 87,
+};
+
+static struct attn_hw_reg nig_prty3_bb_a0 = {
+ 2, 31, nig_prty3_bb_a0_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224
+};
+
+static const u16 nig_prty4_bb_a0_attn_idx[14] = {
+ 88, 101, 102, 75, 71, 74, 76, 73, 72, 34, 37, 99, 30, 100,
+};
+
+static struct attn_hw_reg nig_prty4_bb_a0 = {
+ 3, 14, nig_prty4_bb_a0_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234
+};
+
+static struct attn_hw_reg *nig_prty_bb_a0_regs[4] = {
+ &nig_prty1_bb_a0, &nig_prty2_bb_a0, &nig_prty3_bb_a0, &nig_prty4_bb_a0,
+};
+
+static const u16 nig_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg nig_prty0_bb_b0 = {
+ 0, 1, nig_prty0_bb_b0_attn_idx, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4
+};
+
+static const u16 nig_prty1_bb_b0_attn_idx[31] = {
+ 4, 5, 9, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+};
+
+static struct attn_hw_reg nig_prty1_bb_b0 = {
+ 1, 31, nig_prty1_bb_b0_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204
+};
+
+static const u16 nig_prty2_bb_b0_attn_idx[31] = {
+ 90, 91, 64, 63, 65, 8, 11, 10, 13, 12, 66, 14, 17, 18, 19, 20, 21, 22,
+ 23,
+ 7, 6, 24, 25, 26, 27, 28, 29, 15, 16, 92, 93,
+};
+
+static struct attn_hw_reg nig_prty2_bb_b0 = {
+ 2, 31, nig_prty2_bb_b0_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214
+};
+
+static const u16 nig_prty3_bb_b0_attn_idx[31] = {
+ 94, 95, 96, 97, 99, 100, 103, 104, 105, 62, 108, 109, 80, 31, 1, 67, 60,
+ 69, 83, 84, 2, 3, 110, 61, 68, 70, 85, 86, 111, 112, 89,
+};
+
+static struct attn_hw_reg nig_prty3_bb_b0 = {
+ 3, 31, nig_prty3_bb_b0_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224
+};
+
+static const u16 nig_prty4_bb_b0_attn_idx[17] = {
+ 106, 107, 87, 88, 81, 82, 101, 102, 75, 71, 74, 76, 77, 78, 79, 73, 72,
+};
+
+static struct attn_hw_reg nig_prty4_bb_b0 = {
+ 4, 17, nig_prty4_bb_b0_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234
+};
+
+static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = {
+ &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0, &nig_prty3_bb_b0,
+ &nig_prty4_bb_b0,
+};
+
+static const u16 nig_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg nig_prty0_k2 = {
+ 0, 1, nig_prty0_k2_attn_idx, 0x5000e0, 0x5000ec, 0x5000e8, 0x5000e4
+};
+
+static const u16 nig_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg nig_prty1_k2 = {
+ 1, 31, nig_prty1_k2_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204
+};
+
+static const u16 nig_prty2_k2_attn_idx[31] = {
+ 67, 60, 61, 68, 32, 33, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 37, 36, 81, 82, 83, 84, 85, 86, 48, 49, 87, 88, 89,
+};
+
+static struct attn_hw_reg nig_prty2_k2 = {
+ 2, 31, nig_prty2_k2_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214
+};
+
+static const u16 nig_prty3_k2_attn_idx[31] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 92, 93, 105, 62, 106,
+ 107, 108, 109, 59, 90, 91, 64, 55, 41, 42, 43, 63, 65, 35, 34,
+};
+
+static struct attn_hw_reg nig_prty3_k2 = {
+ 3, 31, nig_prty3_k2_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224
+};
+
+static const u16 nig_prty4_k2_attn_idx[14] = {
+ 44, 45, 46, 47, 40, 50, 66, 56, 57, 58, 51, 52, 53, 54,
+};
+
+static struct attn_hw_reg nig_prty4_k2 = {
+ 4, 14, nig_prty4_k2_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234
+};
+
+static struct attn_hw_reg *nig_prty_k2_regs[5] = {
+ &nig_prty0_k2, &nig_prty1_k2, &nig_prty2_k2, &nig_prty3_k2,
+ &nig_prty4_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *wol_int_attn_desc[1] = {
+ "wol_address_error",
+};
+#else
+#define wol_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 wol_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg wol_int0_k2 = {
+ 0, 1, wol_int0_k2_attn_idx, 0x600040, 0x60004c, 0x600048, 0x600044
+};
+
+static struct attn_hw_reg *wol_int_k2_regs[1] = {
+ &wol_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *wol_prty_attn_desc[24] = {
+ "wol_mem017_i_mem_prty",
+ "wol_mem018_i_mem_prty",
+ "wol_mem019_i_mem_prty",
+ "wol_mem020_i_mem_prty",
+ "wol_mem021_i_mem_prty",
+ "wol_mem022_i_mem_prty",
+ "wol_mem023_i_mem_prty",
+ "wol_mem024_i_mem_prty",
+ "wol_mem001_i_mem_prty",
+ "wol_mem008_i_mem_prty",
+ "wol_mem009_i_mem_prty",
+ "wol_mem010_i_mem_prty",
+ "wol_mem011_i_mem_prty",
+ "wol_mem012_i_mem_prty",
+ "wol_mem013_i_mem_prty",
+ "wol_mem014_i_mem_prty",
+ "wol_mem015_i_mem_prty",
+ "wol_mem016_i_mem_prty",
+ "wol_mem002_i_mem_prty",
+ "wol_mem003_i_mem_prty",
+ "wol_mem004_i_mem_prty",
+ "wol_mem005_i_mem_prty",
+ "wol_mem006_i_mem_prty",
+ "wol_mem007_i_mem_prty",
+};
+#else
+#define wol_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 wol_prty1_k2_attn_idx[24] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23,
+};
+
+static struct attn_hw_reg wol_prty1_k2 = {
+ 0, 24, wol_prty1_k2_attn_idx, 0x600200, 0x60020c, 0x600208, 0x600204
+};
+
+static struct attn_hw_reg *wol_prty_k2_regs[1] = {
+ &wol_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *bmbn_int_attn_desc[1] = {
+ "bmbn_address_error",
+};
+#else
+#define bmbn_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 bmbn_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg bmbn_int0_k2 = {
+ 0, 1, bmbn_int0_k2_attn_idx, 0x610040, 0x61004c, 0x610048, 0x610044
+};
+
+static struct attn_hw_reg *bmbn_int_k2_regs[1] = {
+ &bmbn_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ipc_int_attn_desc[14] = {
+ "ipc_address_error",
+ "ipc_unused_0",
+ "ipc_vmain_por_assert",
+ "ipc_vmain_por_deassert",
+ "ipc_perst_assert",
+ "ipc_perst_deassert",
+ "ipc_otp_ecc_ded_0",
+ "ipc_otp_ecc_ded_1",
+ "ipc_otp_ecc_ded_2",
+ "ipc_otp_ecc_ded_3",
+ "ipc_otp_ecc_ded_4",
+ "ipc_otp_ecc_ded_5",
+ "ipc_otp_ecc_ded_6",
+ "ipc_otp_ecc_ded_7",
+};
+#else
+#define ipc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ipc_int0_bb_a0_attn_idx[5] = {
+ 0, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg ipc_int0_bb_a0 = {
+ 0, 5, ipc_int0_bb_a0_attn_idx, 0x2050c, 0x20518, 0x20514, 0x20510
+};
+
+static struct attn_hw_reg *ipc_int_bb_a0_regs[1] = {
+ &ipc_int0_bb_a0,
+};
+
+static const u16 ipc_int0_bb_b0_attn_idx[13] = {
+ 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg ipc_int0_bb_b0 = {
+ 0, 13, ipc_int0_bb_b0_attn_idx, 0x2050c, 0x20518, 0x20514, 0x20510
+};
+
+static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = {
+ &ipc_int0_bb_b0,
+};
+
+static const u16 ipc_int0_k2_attn_idx[5] = {
+ 0, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg ipc_int0_k2 = {
+ 0, 5, ipc_int0_k2_attn_idx, 0x202dc, 0x202e8, 0x202e4, 0x202e0
+};
+
+static struct attn_hw_reg *ipc_int_k2_regs[1] = {
+ &ipc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ipc_prty_attn_desc[1] = {
+ "ipc_fake_par_err",
+};
+#else
+#define ipc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ipc_prty0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ipc_prty0_bb_a0 = {
+ 0, 1, ipc_prty0_bb_a0_attn_idx, 0x2051c, 0x20528, 0x20524, 0x20520
+};
+
+static struct attn_hw_reg *ipc_prty_bb_a0_regs[1] = {
+ &ipc_prty0_bb_a0,
+};
+
+static const u16 ipc_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ipc_prty0_bb_b0 = {
+ 0, 1, ipc_prty0_bb_b0_attn_idx, 0x2051c, 0x20528, 0x20524, 0x20520
+};
+
+static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = {
+ &ipc_prty0_bb_b0,
+};
+
+#ifdef ATTN_DESC
+static const char *nwm_int_attn_desc[18] = {
+ "nwm_address_error",
+ "nwm_tx_overflow_0",
+ "nwm_tx_underflow_0",
+ "nwm_tx_overflow_1",
+ "nwm_tx_underflow_1",
+ "nwm_tx_overflow_2",
+ "nwm_tx_underflow_2",
+ "nwm_tx_overflow_3",
+ "nwm_tx_underflow_3",
+ "nwm_unused_0",
+ "nwm_ln0_at_10M",
+ "nwm_ln0_at_100M",
+ "nwm_ln1_at_10M",
+ "nwm_ln1_at_100M",
+ "nwm_ln2_at_10M",
+ "nwm_ln2_at_100M",
+ "nwm_ln3_at_10M",
+ "nwm_ln3_at_100M",
+};
+#else
+#define nwm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 nwm_int0_k2_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg nwm_int0_k2 = {
+ 0, 17, nwm_int0_k2_attn_idx, 0x800004, 0x800010, 0x80000c, 0x800008
+};
+
+static struct attn_hw_reg *nwm_int_k2_regs[1] = {
+ &nwm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nwm_prty_attn_desc[72] = {
+ "nwm_mem020_i_mem_prty",
+ "nwm_mem028_i_mem_prty",
+ "nwm_mem036_i_mem_prty",
+ "nwm_mem044_i_mem_prty",
+ "nwm_mem023_i_mem_prty",
+ "nwm_mem031_i_mem_prty",
+ "nwm_mem039_i_mem_prty",
+ "nwm_mem047_i_mem_prty",
+ "nwm_mem024_i_mem_prty",
+ "nwm_mem032_i_mem_prty",
+ "nwm_mem040_i_mem_prty",
+ "nwm_mem048_i_mem_prty",
+ "nwm_mem018_i_mem_prty",
+ "nwm_mem026_i_mem_prty",
+ "nwm_mem034_i_mem_prty",
+ "nwm_mem042_i_mem_prty",
+ "nwm_mem017_i_mem_prty",
+ "nwm_mem025_i_mem_prty",
+ "nwm_mem033_i_mem_prty",
+ "nwm_mem041_i_mem_prty",
+ "nwm_mem021_i_mem_prty",
+ "nwm_mem029_i_mem_prty",
+ "nwm_mem037_i_mem_prty",
+ "nwm_mem045_i_mem_prty",
+ "nwm_mem019_i_mem_prty",
+ "nwm_mem027_i_mem_prty",
+ "nwm_mem035_i_mem_prty",
+ "nwm_mem043_i_mem_prty",
+ "nwm_mem022_i_mem_prty",
+ "nwm_mem030_i_mem_prty",
+ "nwm_mem038_i_mem_prty",
+ "nwm_mem046_i_mem_prty",
+ "nwm_mem057_i_mem_prty",
+ "nwm_mem059_i_mem_prty",
+ "nwm_mem061_i_mem_prty",
+ "nwm_mem063_i_mem_prty",
+ "nwm_mem058_i_mem_prty",
+ "nwm_mem060_i_mem_prty",
+ "nwm_mem062_i_mem_prty",
+ "nwm_mem064_i_mem_prty",
+ "nwm_mem009_i_mem_prty",
+ "nwm_mem010_i_mem_prty",
+ "nwm_mem011_i_mem_prty",
+ "nwm_mem012_i_mem_prty",
+ "nwm_mem013_i_mem_prty",
+ "nwm_mem014_i_mem_prty",
+ "nwm_mem015_i_mem_prty",
+ "nwm_mem016_i_mem_prty",
+ "nwm_mem001_i_mem_prty",
+ "nwm_mem002_i_mem_prty",
+ "nwm_mem003_i_mem_prty",
+ "nwm_mem004_i_mem_prty",
+ "nwm_mem005_i_mem_prty",
+ "nwm_mem006_i_mem_prty",
+ "nwm_mem007_i_mem_prty",
+ "nwm_mem008_i_mem_prty",
+ "nwm_mem049_i_mem_prty",
+ "nwm_mem053_i_mem_prty",
+ "nwm_mem050_i_mem_prty",
+ "nwm_mem054_i_mem_prty",
+ "nwm_mem051_i_mem_prty",
+ "nwm_mem055_i_mem_prty",
+ "nwm_mem052_i_mem_prty",
+ "nwm_mem056_i_mem_prty",
+ "nwm_mem066_i_mem_prty",
+ "nwm_mem068_i_mem_prty",
+ "nwm_mem070_i_mem_prty",
+ "nwm_mem072_i_mem_prty",
+ "nwm_mem065_i_mem_prty",
+ "nwm_mem067_i_mem_prty",
+ "nwm_mem069_i_mem_prty",
+ "nwm_mem071_i_mem_prty",
+};
+#else
+#define nwm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 nwm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg nwm_prty1_k2 = {
+ 0, 31, nwm_prty1_k2_attn_idx, 0x800200, 0x80020c, 0x800208, 0x800204
+};
+
+static const u16 nwm_prty2_k2_attn_idx[31] = {
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+};
+
+static struct attn_hw_reg nwm_prty2_k2 = {
+ 1, 31, nwm_prty2_k2_attn_idx, 0x800210, 0x80021c, 0x800218, 0x800214
+};
+
+static const u16 nwm_prty3_k2_attn_idx[10] = {
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+};
+
+static struct attn_hw_reg nwm_prty3_k2 = {
+ 2, 10, nwm_prty3_k2_attn_idx, 0x800220, 0x80022c, 0x800228, 0x800224
+};
+
+static struct attn_hw_reg *nwm_prty_k2_regs[3] = {
+ &nwm_prty1_k2, &nwm_prty2_k2, &nwm_prty3_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nws_int_attn_desc[38] = {
+ "nws_address_error",
+ "nws_ln0_an_resolve_50g_cr2",
+ "nws_ln0_an_resolve_50g_kr2",
+ "nws_ln0_an_resolve_40g_cr4",
+ "nws_ln0_an_resolve_40g_kr4",
+ "nws_ln0_an_resolve_25g_gr",
+ "nws_ln0_an_resolve_25g_cr",
+ "nws_ln0_an_resolve_25g_kr",
+ "nws_ln0_an_resolve_10g_kr",
+ "nws_ln0_an_resolve_1g_kx",
+ "nws_unused_0",
+ "nws_ln1_an_resolve_50g_cr2",
+ "nws_ln1_an_resolve_50g_kr2",
+ "nws_ln1_an_resolve_40g_cr4",
+ "nws_ln1_an_resolve_40g_kr4",
+ "nws_ln1_an_resolve_25g_gr",
+ "nws_ln1_an_resolve_25g_cr",
+ "nws_ln1_an_resolve_25g_kr",
+ "nws_ln1_an_resolve_10g_kr",
+ "nws_ln1_an_resolve_1g_kx",
+ "nws_ln2_an_resolve_50g_cr2",
+ "nws_ln2_an_resolve_50g_kr2",
+ "nws_ln2_an_resolve_40g_cr4",
+ "nws_ln2_an_resolve_40g_kr4",
+ "nws_ln2_an_resolve_25g_gr",
+ "nws_ln2_an_resolve_25g_cr",
+ "nws_ln2_an_resolve_25g_kr",
+ "nws_ln2_an_resolve_10g_kr",
+ "nws_ln2_an_resolve_1g_kx",
+ "nws_ln3_an_resolve_50g_cr2",
+ "nws_ln3_an_resolve_50g_kr2",
+ "nws_ln3_an_resolve_40g_cr4",
+ "nws_ln3_an_resolve_40g_kr4",
+ "nws_ln3_an_resolve_25g_gr",
+ "nws_ln3_an_resolve_25g_cr",
+ "nws_ln3_an_resolve_25g_kr",
+ "nws_ln3_an_resolve_10g_kr",
+ "nws_ln3_an_resolve_1g_kx",
+};
+#else
+#define nws_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 nws_int0_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg nws_int0_k2 = {
+ 0, 10, nws_int0_k2_attn_idx, 0x700180, 0x70018c, 0x700188, 0x700184
+};
+
+static const u16 nws_int1_k2_attn_idx[9] = {
+ 11, 12, 13, 14, 15, 16, 17, 18, 19,
+};
+
+static struct attn_hw_reg nws_int1_k2 = {
+ 1, 9, nws_int1_k2_attn_idx, 0x700190, 0x70019c, 0x700198, 0x700194
+};
+
+static const u16 nws_int2_k2_attn_idx[9] = {
+ 20, 21, 22, 23, 24, 25, 26, 27, 28,
+};
+
+static struct attn_hw_reg nws_int2_k2 = {
+ 2, 9, nws_int2_k2_attn_idx, 0x7001a0, 0x7001ac, 0x7001a8, 0x7001a4
+};
+
+static const u16 nws_int3_k2_attn_idx[9] = {
+ 29, 30, 31, 32, 33, 34, 35, 36, 37,
+};
+
+static struct attn_hw_reg nws_int3_k2 = {
+ 3, 9, nws_int3_k2_attn_idx, 0x7001b0, 0x7001bc, 0x7001b8, 0x7001b4
+};
+
+static struct attn_hw_reg *nws_int_k2_regs[4] = {
+ &nws_int0_k2, &nws_int1_k2, &nws_int2_k2, &nws_int3_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nws_prty_attn_desc[4] = {
+ "nws_mem003_i_mem_prty",
+ "nws_mem001_i_mem_prty",
+ "nws_mem004_i_mem_prty",
+ "nws_mem002_i_mem_prty",
+};
+#else
+#define nws_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 nws_prty1_k2_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg nws_prty1_k2 = {
+ 0, 4, nws_prty1_k2_attn_idx, 0x700200, 0x70020c, 0x700208, 0x700204
+};
+
+static struct attn_hw_reg *nws_prty_k2_regs[1] = {
+ &nws_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ms_int_attn_desc[1] = {
+ "ms_address_error",
+};
+#else
+#define ms_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ms_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ms_int0_k2 = {
+ 0, 1, ms_int0_k2_attn_idx, 0x6a0180, 0x6a018c, 0x6a0188, 0x6a0184
+};
+
+static struct attn_hw_reg *ms_int_k2_regs[1] = {
+ &ms_int0_k2,
+};
+
+static struct attn_hw_block attn_blocks[] = {
+ {"grc", grc_int_attn_desc, grc_prty_attn_desc, {
+ {1, 1,
+ grc_int_bb_a0_regs,
+ grc_prty_bb_a0_regs},
+ {1, 1,
+ grc_int_bb_b0_regs,
+ grc_prty_bb_b0_regs},
+ {1, 1, grc_int_k2_regs,
+ grc_prty_k2_regs} } },
+ {"miscs", miscs_int_attn_desc, miscs_prty_attn_desc, {
+ {2, 0,
+
+ miscs_int_bb_a0_regs,
+ OSAL_NULL},
+ {2, 1,
+
+ miscs_int_bb_b0_regs,
+
+ miscs_prty_bb_b0_regs},
+ {1, 1,
+
+ miscs_int_k2_regs,
+
+ miscs_prty_k2_regs } } },
+ {"misc", misc_int_attn_desc, OSAL_NULL, {
+ {1, 0, misc_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 0, misc_int_bb_b0_regs,
+ OSAL_NULL},
+ {1, 0, misc_int_k2_regs,
+ OSAL_NULL } } },
+ {"dbu", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"pglue_b", pglue_b_int_attn_desc, pglue_b_prty_attn_desc, {
+ {1, 1,
+
+ pglue_b_int_bb_a0_regs,
+
+ pglue_b_prty_bb_a0_regs},
+ {1, 2,
+
+ pglue_b_int_bb_b0_regs,
+
+ pglue_b_prty_bb_b0_regs},
+ {1, 3,
+
+ pglue_b_int_k2_regs,
+
+ pglue_b_prty_k2_regs } } },
+ {"cnig", cnig_int_attn_desc, cnig_prty_attn_desc, {
+ {1, 0,
+ cnig_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+ cnig_int_bb_b0_regs,
+
+ cnig_prty_bb_b0_regs},
+ {1, 1,
+ cnig_int_k2_regs,
+
+ cnig_prty_k2_regs } } },
+ {"cpmu", cpmu_int_attn_desc, OSAL_NULL, {
+ {1, 0, cpmu_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 0, cpmu_int_bb_b0_regs,
+ OSAL_NULL},
+ {1, 0, cpmu_int_k2_regs,
+ OSAL_NULL } } },
+ {"ncsi", ncsi_int_attn_desc, ncsi_prty_attn_desc, {
+ {1, 1,
+ ncsi_int_bb_a0_regs,
+
+ ncsi_prty_bb_a0_regs},
+ {1, 1,
+ ncsi_int_bb_b0_regs,
+
+ ncsi_prty_bb_b0_regs},
+ {1, 1,
+ ncsi_int_k2_regs,
+
+ ncsi_prty_k2_regs } } },
+ {"opte", OSAL_NULL, opte_prty_attn_desc, {
+ {0, 1, OSAL_NULL,
+ opte_prty_bb_a0_regs},
+ {0, 2, OSAL_NULL,
+ opte_prty_bb_b0_regs},
+ {0, 2, OSAL_NULL,
+ opte_prty_k2_regs } } },
+ {"bmb", bmb_int_attn_desc, bmb_prty_attn_desc, {
+ {12, 2,
+ bmb_int_bb_a0_regs,
+ bmb_prty_bb_a0_regs},
+ {12, 3,
+ bmb_int_bb_b0_regs,
+ bmb_prty_bb_b0_regs},
+ {12, 3, bmb_int_k2_regs,
+ bmb_prty_k2_regs } } },
+ {"pcie", pcie_int_attn_desc, pcie_prty_attn_desc, {
+ {0, 1, OSAL_NULL,
+
+ pcie_prty_bb_a0_regs},
+ {0, 1, OSAL_NULL,
+
+ pcie_prty_bb_b0_regs},
+ {1, 2,
+ pcie_int_k2_regs,
+
+ pcie_prty_k2_regs } } },
+ {"mcp", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"mcp2", OSAL_NULL, mcp2_prty_attn_desc, {
+ {0, 2, OSAL_NULL,
+ mcp2_prty_bb_a0_regs},
+ {0, 2, OSAL_NULL,
+ mcp2_prty_bb_b0_regs},
+ {0, 2, OSAL_NULL,
+ mcp2_prty_k2_regs } } },
+ {"pswhst", pswhst_int_attn_desc, pswhst_prty_attn_desc, {
+ {1, 1,
+
+ pswhst_int_bb_a0_regs,
+
+ pswhst_prty_bb_a0_regs},
+ {1, 2,
+
+ pswhst_int_bb_b0_regs,
+
+ pswhst_prty_bb_b0_regs},
+ {1, 2,
+
+ pswhst_int_k2_regs,
+
+ pswhst_prty_k2_regs } } },
+ {"pswhst2", pswhst2_int_attn_desc, pswhst2_prty_attn_desc, {
+ {1, 0,
+
+ pswhst2_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pswhst2_int_bb_b0_regs,
+
+ pswhst2_prty_bb_b0_regs},
+ {1, 1,
+
+ pswhst2_int_k2_regs,
+
+ pswhst2_prty_k2_regs } } },
+ {"pswrd", pswrd_int_attn_desc, pswrd_prty_attn_desc, {
+ {1, 0,
+
+ pswrd_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pswrd_int_bb_b0_regs,
+
+ pswrd_prty_bb_b0_regs},
+ {1, 1,
+
+ pswrd_int_k2_regs,
+
+ pswrd_prty_k2_regs } } },
+ {"pswrd2", pswrd2_int_attn_desc, pswrd2_prty_attn_desc, {
+ {1, 2,
+
+ pswrd2_int_bb_a0_regs,
+
+ pswrd2_prty_bb_a0_regs},
+ {1, 3,
+
+ pswrd2_int_bb_b0_regs,
+
+ pswrd2_prty_bb_b0_regs},
+ {1, 3,
+
+ pswrd2_int_k2_regs,
+
+ pswrd2_prty_k2_regs } } },
+ {"pswwr", pswwr_int_attn_desc, pswwr_prty_attn_desc, {
+ {1, 0,
+
+ pswwr_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pswwr_int_bb_b0_regs,
+
+ pswwr_prty_bb_b0_regs},
+ {1, 1,
+
+ pswwr_int_k2_regs,
+
+ pswwr_prty_k2_regs } } },
+ {"pswwr2", pswwr2_int_attn_desc, pswwr2_prty_attn_desc, {
+ {1, 4,
+
+ pswwr2_int_bb_a0_regs,
+
+ pswwr2_prty_bb_a0_regs},
+ {1, 5,
+
+ pswwr2_int_bb_b0_regs,
+
+ pswwr2_prty_bb_b0_regs},
+ {1, 5,
+
+ pswwr2_int_k2_regs,
+
+ pswwr2_prty_k2_regs } } },
+ {"pswrq", pswrq_int_attn_desc, pswrq_prty_attn_desc, {
+ {1, 0,
+
+ pswrq_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pswrq_int_bb_b0_regs,
+
+ pswrq_prty_bb_b0_regs},
+ {1, 1,
+
+ pswrq_int_k2_regs,
+
+ pswrq_prty_k2_regs } } },
+ {"pswrq2", pswrq2_int_attn_desc, pswrq2_prty_attn_desc, {
+ {1, 1,
+
+ pswrq2_int_bb_a0_regs,
+
+ pswrq2_prty_bb_a0_regs},
+ {1, 1,
+
+ pswrq2_int_bb_b0_regs,
+
+ pswrq2_prty_bb_b0_regs},
+ {1, 1,
+
+ pswrq2_int_k2_regs,
+
+ pswrq2_prty_k2_regs } } },
+ {"pglcs", pglcs_int_attn_desc, OSAL_NULL, {
+ {1, 0, pglcs_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 0, pglcs_int_bb_b0_regs,
+ OSAL_NULL},
+ {1, 0, pglcs_int_k2_regs,
+ OSAL_NULL } } },
+ {"dmae", dmae_int_attn_desc, dmae_prty_attn_desc, {
+ {1, 1,
+ dmae_int_bb_a0_regs,
+
+ dmae_prty_bb_a0_regs},
+ {1, 1,
+ dmae_int_bb_b0_regs,
+
+ dmae_prty_bb_b0_regs},
+ {1, 1,
+ dmae_int_k2_regs,
+
+ dmae_prty_k2_regs } } },
+ {"ptu", ptu_int_attn_desc, ptu_prty_attn_desc, {
+ {1, 1,
+ ptu_int_bb_a0_regs,
+ ptu_prty_bb_a0_regs},
+ {1, 1,
+ ptu_int_bb_b0_regs,
+ ptu_prty_bb_b0_regs},
+ {1, 1, ptu_int_k2_regs,
+ ptu_prty_k2_regs } } },
+ {"tcm", tcm_int_attn_desc, tcm_prty_attn_desc, {
+ {3, 2,
+ tcm_int_bb_a0_regs,
+ tcm_prty_bb_a0_regs},
+ {3, 2,
+ tcm_int_bb_b0_regs,
+ tcm_prty_bb_b0_regs},
+ {3, 2, tcm_int_k2_regs,
+ tcm_prty_k2_regs } } },
+ {"mcm", mcm_int_attn_desc, mcm_prty_attn_desc, {
+ {3, 2,
+ mcm_int_bb_a0_regs,
+ mcm_prty_bb_a0_regs},
+ {3, 2,
+ mcm_int_bb_b0_regs,
+ mcm_prty_bb_b0_regs},
+ {3, 2, mcm_int_k2_regs,
+ mcm_prty_k2_regs } } },
+ {"ucm", ucm_int_attn_desc, ucm_prty_attn_desc, {
+ {3, 2,
+ ucm_int_bb_a0_regs,
+ ucm_prty_bb_a0_regs},
+ {3, 2,
+ ucm_int_bb_b0_regs,
+ ucm_prty_bb_b0_regs},
+ {3, 2, ucm_int_k2_regs,
+ ucm_prty_k2_regs } } },
+ {"xcm", xcm_int_attn_desc, xcm_prty_attn_desc, {
+ {3, 2,
+ xcm_int_bb_a0_regs,
+ xcm_prty_bb_a0_regs},
+ {3, 2,
+ xcm_int_bb_b0_regs,
+ xcm_prty_bb_b0_regs},
+ {3, 2, xcm_int_k2_regs,
+ xcm_prty_k2_regs } } },
+ {"ycm", ycm_int_attn_desc, ycm_prty_attn_desc, {
+ {3, 2,
+ ycm_int_bb_a0_regs,
+ ycm_prty_bb_a0_regs},
+ {3, 2,
+ ycm_int_bb_b0_regs,
+ ycm_prty_bb_b0_regs},
+ {3, 2, ycm_int_k2_regs,
+ ycm_prty_k2_regs } } },
+ {"pcm", pcm_int_attn_desc, pcm_prty_attn_desc, {
+ {3, 1,
+ pcm_int_bb_a0_regs,
+ pcm_prty_bb_a0_regs},
+ {3, 1,
+ pcm_int_bb_b0_regs,
+ pcm_prty_bb_b0_regs},
+ {3, 1, pcm_int_k2_regs,
+ pcm_prty_k2_regs } } },
+ {"qm", qm_int_attn_desc, qm_prty_attn_desc, {
+ {1, 4, qm_int_bb_a0_regs,
+ qm_prty_bb_a0_regs},
+ {1, 4, qm_int_bb_b0_regs,
+ qm_prty_bb_b0_regs},
+ {1, 4, qm_int_k2_regs,
+ qm_prty_k2_regs } } },
+ {"tm", tm_int_attn_desc, tm_prty_attn_desc, {
+ {2, 1, tm_int_bb_a0_regs,
+ tm_prty_bb_a0_regs},
+ {2, 1, tm_int_bb_b0_regs,
+ tm_prty_bb_b0_regs},
+ {2, 1, tm_int_k2_regs,
+ tm_prty_k2_regs } } },
+ {"dorq", dorq_int_attn_desc, dorq_prty_attn_desc, {
+ {1, 1,
+ dorq_int_bb_a0_regs,
+
+ dorq_prty_bb_a0_regs},
+ {1, 2,
+ dorq_int_bb_b0_regs,
+
+ dorq_prty_bb_b0_regs},
+ {1, 2,
+ dorq_int_k2_regs,
+
+ dorq_prty_k2_regs } } },
+ {"brb", brb_int_attn_desc, brb_prty_attn_desc, {
+ {12, 2,
+ brb_int_bb_a0_regs,
+ brb_prty_bb_a0_regs},
+ {12, 3,
+ brb_int_bb_b0_regs,
+ brb_prty_bb_b0_regs},
+ {12, 3, brb_int_k2_regs,
+ brb_prty_k2_regs } } },
+ {"src", src_int_attn_desc, OSAL_NULL, {
+ {1, 0, src_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 0, src_int_bb_b0_regs,
+ OSAL_NULL},
+ {1, 0, src_int_k2_regs,
+ OSAL_NULL } } },
+ {"prs", prs_int_attn_desc, prs_prty_attn_desc, {
+ {1, 3,
+ prs_int_bb_a0_regs,
+ prs_prty_bb_a0_regs},
+ {1, 3,
+ prs_int_bb_b0_regs,
+ prs_prty_bb_b0_regs},
+ {1, 3, prs_int_k2_regs,
+ prs_prty_k2_regs } } },
+ {"tsdm", tsdm_int_attn_desc, tsdm_prty_attn_desc, {
+ {1, 1,
+ tsdm_int_bb_a0_regs,
+
+ tsdm_prty_bb_a0_regs},
+ {1, 1,
+ tsdm_int_bb_b0_regs,
+
+ tsdm_prty_bb_b0_regs},
+ {1, 1,
+ tsdm_int_k2_regs,
+
+ tsdm_prty_k2_regs } } },
+ {"msdm", msdm_int_attn_desc, msdm_prty_attn_desc, {
+ {1, 1,
+ msdm_int_bb_a0_regs,
+
+ msdm_prty_bb_a0_regs},
+ {1, 1,
+ msdm_int_bb_b0_regs,
+
+ msdm_prty_bb_b0_regs},
+ {1, 1,
+ msdm_int_k2_regs,
+
+ msdm_prty_k2_regs } } },
+ {"usdm", usdm_int_attn_desc, usdm_prty_attn_desc, {
+ {1, 1,
+ usdm_int_bb_a0_regs,
+
+ usdm_prty_bb_a0_regs},
+ {1, 1,
+ usdm_int_bb_b0_regs,
+
+ usdm_prty_bb_b0_regs},
+ {1, 1,
+ usdm_int_k2_regs,
+
+ usdm_prty_k2_regs } } },
+ {"xsdm", xsdm_int_attn_desc, xsdm_prty_attn_desc, {
+ {1, 1,
+ xsdm_int_bb_a0_regs,
+
+ xsdm_prty_bb_a0_regs},
+ {1, 1,
+ xsdm_int_bb_b0_regs,
+
+ xsdm_prty_bb_b0_regs},
+ {1, 1,
+ xsdm_int_k2_regs,
+
+ xsdm_prty_k2_regs } } },
+ {"ysdm", ysdm_int_attn_desc, ysdm_prty_attn_desc, {
+ {1, 1,
+ ysdm_int_bb_a0_regs,
+
+ ysdm_prty_bb_a0_regs},
+ {1, 1,
+ ysdm_int_bb_b0_regs,
+
+ ysdm_prty_bb_b0_regs},
+ {1, 1,
+ ysdm_int_k2_regs,
+
+ ysdm_prty_k2_regs } } },
+ {"psdm", psdm_int_attn_desc, psdm_prty_attn_desc, {
+ {1, 1,
+ psdm_int_bb_a0_regs,
+
+ psdm_prty_bb_a0_regs},
+ {1, 1,
+ psdm_int_bb_b0_regs,
+
+ psdm_prty_bb_b0_regs},
+ {1, 1,
+ psdm_int_k2_regs,
+
+ psdm_prty_k2_regs } } },
+ {"tsem", tsem_int_attn_desc, tsem_prty_attn_desc, {
+ {3, 3,
+ tsem_int_bb_a0_regs,
+
+ tsem_prty_bb_a0_regs},
+ {3, 3,
+ tsem_int_bb_b0_regs,
+
+ tsem_prty_bb_b0_regs},
+ {3, 4,
+ tsem_int_k2_regs,
+
+ tsem_prty_k2_regs } } },
+ {"msem", msem_int_attn_desc, msem_prty_attn_desc, {
+ {3, 2,
+ msem_int_bb_a0_regs,
+
+ msem_prty_bb_a0_regs},
+ {3, 2,
+ msem_int_bb_b0_regs,
+
+ msem_prty_bb_b0_regs},
+ {3, 3,
+ msem_int_k2_regs,
+
+ msem_prty_k2_regs } } },
+ {"usem", usem_int_attn_desc, usem_prty_attn_desc, {
+ {3, 2,
+ usem_int_bb_a0_regs,
+
+ usem_prty_bb_a0_regs},
+ {3, 2,
+ usem_int_bb_b0_regs,
+
+ usem_prty_bb_b0_regs},
+ {3, 3,
+ usem_int_k2_regs,
+
+ usem_prty_k2_regs } } },
+ {"xsem", xsem_int_attn_desc, xsem_prty_attn_desc, {
+ {3, 2,
+ xsem_int_bb_a0_regs,
+
+ xsem_prty_bb_a0_regs},
+ {3, 2,
+ xsem_int_bb_b0_regs,
+
+ xsem_prty_bb_b0_regs},
+ {3, 3,
+ xsem_int_k2_regs,
+
+ xsem_prty_k2_regs } } },
+ {"ysem", ysem_int_attn_desc, ysem_prty_attn_desc, {
+ {3, 2,
+ ysem_int_bb_a0_regs,
+
+ ysem_prty_bb_a0_regs},
+ {3, 2,
+ ysem_int_bb_b0_regs,
+
+ ysem_prty_bb_b0_regs},
+ {3, 3,
+ ysem_int_k2_regs,
+
+ ysem_prty_k2_regs } } },
+ {"psem", psem_int_attn_desc, psem_prty_attn_desc, {
+ {3, 3,
+ psem_int_bb_a0_regs,
+
+ psem_prty_bb_a0_regs},
+ {3, 3,
+ psem_int_bb_b0_regs,
+
+ psem_prty_bb_b0_regs},
+ {3, 4,
+ psem_int_k2_regs,
+
+ psem_prty_k2_regs } } },
+ {"rss", rss_int_attn_desc, rss_prty_attn_desc, {
+ {1, 1,
+ rss_int_bb_a0_regs,
+ rss_prty_bb_a0_regs},
+ {1, 1,
+ rss_int_bb_b0_regs,
+ rss_prty_bb_b0_regs},
+ {1, 1, rss_int_k2_regs,
+ rss_prty_k2_regs } } },
+ {"tmld", tmld_int_attn_desc, tmld_prty_attn_desc, {
+ {1, 1,
+ tmld_int_bb_a0_regs,
+
+ tmld_prty_bb_a0_regs},
+ {1, 1,
+ tmld_int_bb_b0_regs,
+
+ tmld_prty_bb_b0_regs},
+ {1, 1,
+ tmld_int_k2_regs,
+
+ tmld_prty_k2_regs } } },
+ {"muld", muld_int_attn_desc, muld_prty_attn_desc, {
+ {1, 1,
+ muld_int_bb_a0_regs,
+
+ muld_prty_bb_a0_regs},
+ {1, 1,
+ muld_int_bb_b0_regs,
+
+ muld_prty_bb_b0_regs},
+ {1, 1,
+ muld_int_k2_regs,
+
+ muld_prty_k2_regs } } },
+ {"yuld", yuld_int_attn_desc, yuld_prty_attn_desc, {
+ {1, 1,
+ yuld_int_bb_a0_regs,
+
+ yuld_prty_bb_a0_regs},
+ {1, 1,
+ yuld_int_bb_b0_regs,
+
+ yuld_prty_bb_b0_regs},
+ {1, 1,
+ yuld_int_k2_regs,
+
+ yuld_prty_k2_regs } } },
+ {"xyld", xyld_int_attn_desc, xyld_prty_attn_desc, {
+ {1, 1,
+ xyld_int_bb_a0_regs,
+
+ xyld_prty_bb_a0_regs},
+ {1, 1,
+ xyld_int_bb_b0_regs,
+
+ xyld_prty_bb_b0_regs},
+ {1, 1,
+ xyld_int_k2_regs,
+
+ xyld_prty_k2_regs } } },
+ {"prm", prm_int_attn_desc, prm_prty_attn_desc, {
+ {1, 1,
+ prm_int_bb_a0_regs,
+ prm_prty_bb_a0_regs},
+ {1, 2,
+ prm_int_bb_b0_regs,
+ prm_prty_bb_b0_regs},
+ {1, 2, prm_int_k2_regs,
+ prm_prty_k2_regs } } },
+ {"pbf_pb1", pbf_pb1_int_attn_desc, pbf_pb1_prty_attn_desc, {
+ {1, 0,
+
+ pbf_pb1_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pbf_pb1_int_bb_b0_regs,
+
+ pbf_pb1_prty_bb_b0_regs},
+ {1, 1,
+
+ pbf_pb1_int_k2_regs,
+
+ pbf_pb1_prty_k2_regs } } },
+ {"pbf_pb2", pbf_pb2_int_attn_desc, pbf_pb2_prty_attn_desc, {
+ {1, 0,
+
+ pbf_pb2_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pbf_pb2_int_bb_b0_regs,
+
+ pbf_pb2_prty_bb_b0_regs},
+ {1, 1,
+
+ pbf_pb2_int_k2_regs,
+
+ pbf_pb2_prty_k2_regs } } },
+ {"rpb", rpb_int_attn_desc, rpb_prty_attn_desc, {
+ {1, 0,
+ rpb_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+ rpb_int_bb_b0_regs,
+ rpb_prty_bb_b0_regs},
+ {1, 1, rpb_int_k2_regs,
+ rpb_prty_k2_regs } } },
+ {"btb", btb_int_attn_desc, btb_prty_attn_desc, {
+ {11, 1,
+ btb_int_bb_a0_regs,
+ btb_prty_bb_a0_regs},
+ {11, 2,
+ btb_int_bb_b0_regs,
+ btb_prty_bb_b0_regs},
+ {11, 2, btb_int_k2_regs,
+ btb_prty_k2_regs } } },
+ {"pbf", pbf_int_attn_desc, pbf_prty_attn_desc, {
+ {1, 2,
+ pbf_int_bb_a0_regs,
+ pbf_prty_bb_a0_regs},
+ {1, 3,
+ pbf_int_bb_b0_regs,
+ pbf_prty_bb_b0_regs},
+ {1, 3, pbf_int_k2_regs,
+ pbf_prty_k2_regs } } },
+ {"rdif", rdif_int_attn_desc, rdif_prty_attn_desc, {
+ {1, 0,
+ rdif_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+ rdif_int_bb_b0_regs,
+
+ rdif_prty_bb_b0_regs},
+ {1, 1,
+ rdif_int_k2_regs,
+
+ rdif_prty_k2_regs } } },
+ {"tdif", tdif_int_attn_desc, tdif_prty_attn_desc, {
+ {1, 1,
+ tdif_int_bb_a0_regs,
+
+ tdif_prty_bb_a0_regs},
+ {1, 2,
+ tdif_int_bb_b0_regs,
+
+ tdif_prty_bb_b0_regs},
+ {1, 2,
+ tdif_int_k2_regs,
+
+ tdif_prty_k2_regs } } },
+ {"cdu", cdu_int_attn_desc, cdu_prty_attn_desc, {
+ {1, 1,
+ cdu_int_bb_a0_regs,
+ cdu_prty_bb_a0_regs},
+ {1, 1,
+ cdu_int_bb_b0_regs,
+ cdu_prty_bb_b0_regs},
+ {1, 1, cdu_int_k2_regs,
+ cdu_prty_k2_regs } } },
+ {"ccfc", ccfc_int_attn_desc, ccfc_prty_attn_desc, {
+ {1, 2,
+ ccfc_int_bb_a0_regs,
+
+ ccfc_prty_bb_a0_regs},
+ {1, 2,
+ ccfc_int_bb_b0_regs,
+
+ ccfc_prty_bb_b0_regs},
+ {1, 2,
+ ccfc_int_k2_regs,
+
+ ccfc_prty_k2_regs } } },
+ {"tcfc", tcfc_int_attn_desc, tcfc_prty_attn_desc, {
+ {1, 2,
+ tcfc_int_bb_a0_regs,
+
+ tcfc_prty_bb_a0_regs},
+ {1, 2,
+ tcfc_int_bb_b0_regs,
+
+ tcfc_prty_bb_b0_regs},
+ {1, 2,
+ tcfc_int_k2_regs,
+
+ tcfc_prty_k2_regs } } },
+ {"igu", igu_int_attn_desc, igu_prty_attn_desc, {
+ {1, 3,
+ igu_int_bb_a0_regs,
+ igu_prty_bb_a0_regs},
+ {1, 3,
+ igu_int_bb_b0_regs,
+ igu_prty_bb_b0_regs},
+ {1, 2, igu_int_k2_regs,
+ igu_prty_k2_regs } } },
+ {"cau", cau_int_attn_desc, cau_prty_attn_desc, {
+ {1, 1,
+ cau_int_bb_a0_regs,
+ cau_prty_bb_a0_regs},
+ {1, 1,
+ cau_int_bb_b0_regs,
+ cau_prty_bb_b0_regs},
+ {1, 1, cau_int_k2_regs,
+ cau_prty_k2_regs } } },
+ {"umac", umac_int_attn_desc, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {1, 0, umac_int_k2_regs,
+ OSAL_NULL } } },
+ {"xmac", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"dbg", dbg_int_attn_desc, dbg_prty_attn_desc, {
+ {1, 1,
+ dbg_int_bb_a0_regs,
+ dbg_prty_bb_a0_regs},
+ {1, 1,
+ dbg_int_bb_b0_regs,
+ dbg_prty_bb_b0_regs},
+ {1, 1, dbg_int_k2_regs,
+ dbg_prty_k2_regs } } },
+ {"nig", nig_int_attn_desc, nig_prty_attn_desc, {
+ {6, 4,
+ nig_int_bb_a0_regs,
+ nig_prty_bb_a0_regs},
+ {6, 5,
+ nig_int_bb_b0_regs,
+ nig_prty_bb_b0_regs},
+ {10, 5, nig_int_k2_regs,
+ nig_prty_k2_regs } } },
+ {"wol", wol_int_attn_desc, wol_prty_attn_desc, {
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {1, 1, wol_int_k2_regs,
+ wol_prty_k2_regs } } },
+ {"bmbn", bmbn_int_attn_desc, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {1, 0, bmbn_int_k2_regs,
+ OSAL_NULL } } },
+ {"ipc", ipc_int_attn_desc, ipc_prty_attn_desc, {
+ {1, 1,
+ ipc_int_bb_a0_regs,
+ ipc_prty_bb_a0_regs},
+ {1, 1,
+ ipc_int_bb_b0_regs,
+ ipc_prty_bb_b0_regs},
+ {1, 0, ipc_int_k2_regs,
+ OSAL_NULL } } },
+ {"nwm", nwm_int_attn_desc, nwm_prty_attn_desc, {
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {1, 3, nwm_int_k2_regs,
+ nwm_prty_k2_regs } } },
+ {"nws", nws_int_attn_desc, nws_prty_attn_desc, {
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {4, 1, nws_int_k2_regs,
+ nws_prty_k2_regs } } },
+ {"ms", ms_int_attn_desc, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {1, 0, ms_int_k2_regs,
+ OSAL_NULL } } },
+ {"phy_pcie", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"misc_aeu", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"bar0_map", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+};
+
+#define NUM_INT_REGS 423
+#define NUM_PRTY_REGS 378
+
+#endif /* __PREVENT_INT_ATTN__ */
+
+#endif /* __ATTN_VALUES_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_chain.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_chain.h
new file mode 100644
index 00000000..6d0382d3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_chain.h
@@ -0,0 +1,810 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_CHAIN_H__
+#define __ECORE_CHAIN_H__
+
+#include <assert.h> /* @DPDK */
+
+#include "common_hsi.h"
+#include "ecore_utils.h"
+
+enum ecore_chain_mode {
+ /* Each Page contains a next pointer at its end */
+ ECORE_CHAIN_MODE_NEXT_PTR,
+
+ /* Chain is a single page (next ptr) is unrequired */
+ ECORE_CHAIN_MODE_SINGLE,
+
+ /* Page pointers are located in a side list */
+ ECORE_CHAIN_MODE_PBL,
+};
+
+enum ecore_chain_use_mode {
+ ECORE_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
+ ECORE_CHAIN_USE_TO_CONSUME, /* Chain starts full */
+ ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
+};
+
+enum ecore_chain_cnt_type {
+ /* The chain's size/prod/cons are kept in 16-bit variables */
+ ECORE_CHAIN_CNT_TYPE_U16,
+
+ /* The chain's size/prod/cons are kept in 32-bit variables */
+ ECORE_CHAIN_CNT_TYPE_U32,
+};
+
+struct ecore_chain_next {
+ struct regpair next_phys;
+ void *next_virt;
+};
+
+struct ecore_chain_pbl_u16 {
+ u16 prod_page_idx;
+ u16 cons_page_idx;
+};
+
+struct ecore_chain_pbl_u32 {
+ u32 prod_page_idx;
+ u32 cons_page_idx;
+};
+
+struct ecore_chain_ext_pbl {
+ dma_addr_t p_pbl_phys;
+ void *p_pbl_virt;
+};
+
+struct ecore_chain_u16 {
+ /* Cyclic index of next element to produce/consme */
+ u16 prod_idx;
+ u16 cons_idx;
+};
+
+struct ecore_chain_u32 {
+ /* Cyclic index of next element to produce/consme */
+ u32 prod_idx;
+ u32 cons_idx;
+};
+
+struct ecore_chain {
+ /* fastpath portion of the chain - required for commands such
+ * as produce / consume.
+ */
+ /* Point to next element to produce/consume */
+ void *p_prod_elem;
+ void *p_cons_elem;
+
+ /* Fastpath portions of the PBL [if exists] */
+
+ struct {
+ /* Table for keeping the virtual addresses of the chain pages,
+ * respectively to the physical addresses in the pbl table.
+ */
+ void **pp_virt_addr_tbl;
+
+ union {
+ struct ecore_chain_pbl_u16 u16;
+ struct ecore_chain_pbl_u32 u32;
+ } c;
+ } pbl;
+
+ union {
+ struct ecore_chain_u16 chain16;
+ struct ecore_chain_u32 chain32;
+ } u;
+
+ /* Capacity counts only usable elements */
+ u32 capacity;
+ u32 page_cnt;
+
+ /* A u8 would suffice for mode, but it would save as a lot of headaches
+ * on castings & defaults.
+ */
+ enum ecore_chain_mode mode;
+
+ /* Elements information for fast calculations */
+ u16 elem_per_page;
+ u16 elem_per_page_mask;
+ u16 elem_size;
+ u16 next_page_mask;
+ u16 usable_per_page;
+ u8 elem_unusable;
+
+ u8 cnt_type;
+
+ /* Slowpath of the chain - required for initialization and destruction,
+ * but isn't involved in regular functionality.
+ */
+
+ /* Base address of a pre-allocated buffer for pbl */
+ struct {
+ dma_addr_t p_phys_table;
+ void *p_virt_table;
+ } pbl_sp;
+
+ /* Address of first page of the chain - the address is required
+ * for fastpath operation [consume/produce] but only for the SINGLE
+ * flavour which isn't considered fastpath [== SPQ].
+ */
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
+
+ /* Total number of elements [for entire chain] */
+ u32 size;
+
+ u8 intended_use;
+
+ /* TBD - do we really need this? Couldn't find usage for it */
+ bool b_external_pbl;
+
+ void *dp_ctx;
+};
+
+#define ECORE_CHAIN_PBL_ENTRY_SIZE (8)
+#define ECORE_CHAIN_PAGE_SIZE (0x1000)
+#define ELEMS_PER_PAGE(elem_size) (ECORE_CHAIN_PAGE_SIZE / (elem_size))
+
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ ((mode == ECORE_CHAIN_MODE_NEXT_PTR) ? \
+ (u8)(1 + ((sizeof(struct ecore_chain_next) - 1) / \
+ (elem_size))) : 0)
+
+#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ ((u32)(ELEMS_PER_PAGE(elem_size) - \
+ UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+
+#define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
+ DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+
+#define is_chain_u16(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U16)
+#define is_chain_u32(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U32)
+
+/* Accessors */
+static OSAL_INLINE u16 ecore_chain_get_prod_idx(struct ecore_chain *p_chain)
+{
+ OSAL_ASSERT(is_chain_u16(p_chain));
+ return p_chain->u.chain16.prod_idx;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_prod_idx_u32(struct ecore_chain *p_chain)
+{
+ OSAL_ASSERT(is_chain_u32(p_chain));
+ return p_chain->u.chain32.prod_idx;
+}
+
+static OSAL_INLINE u16 ecore_chain_get_cons_idx(struct ecore_chain *p_chain)
+{
+ OSAL_ASSERT(is_chain_u16(p_chain));
+ return p_chain->u.chain16.cons_idx;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_cons_idx_u32(struct ecore_chain *p_chain)
+{
+ OSAL_ASSERT(is_chain_u32(p_chain));
+ return p_chain->u.chain32.cons_idx;
+}
+
+/* FIXME:
+ * Should create OSALs for the below definitions.
+ * For Linux, replace them with the existing U16_MAX and U32_MAX, and handle
+ * kernel versions that lack them.
+ */
+#define ECORE_U16_MAX ((u16)~0U)
+#define ECORE_U32_MAX ((u32)~0U)
+
+static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain)
+{
+ u16 used;
+
+ OSAL_ASSERT(is_chain_u16(p_chain));
+
+ used = (u16)(((u32)ECORE_U16_MAX + 1 +
+ (u32)(p_chain->u.chain16.prod_idx)) -
+ (u32)p_chain->u.chain16.cons_idx);
+ if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
+ used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
+ p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+
+ return (u16)(p_chain->capacity - used);
+}
+
+static OSAL_INLINE u32
+ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain)
+{
+ u32 used;
+
+ OSAL_ASSERT(is_chain_u32(p_chain));
+
+ used = (u32)(((u64)ECORE_U32_MAX + 1 +
+ (u64)(p_chain->u.chain32.prod_idx)) -
+ (u64)p_chain->u.chain32.cons_idx);
+ if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
+ used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
+ p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+
+ return p_chain->capacity - used;
+}
+
+static OSAL_INLINE u8 ecore_chain_is_full(struct ecore_chain *p_chain)
+{
+ if (is_chain_u16(p_chain))
+ return (ecore_chain_get_elem_left(p_chain) ==
+ p_chain->capacity);
+ else
+ return (ecore_chain_get_elem_left_u32(p_chain) ==
+ p_chain->capacity);
+}
+
+static OSAL_INLINE u8 ecore_chain_is_empty(struct ecore_chain *p_chain)
+{
+ if (is_chain_u16(p_chain))
+ return (ecore_chain_get_elem_left(p_chain) == 0);
+ else
+ return (ecore_chain_get_elem_left_u32(p_chain) == 0);
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_elem_per_page(struct ecore_chain *p_chain)
+{
+ return p_chain->elem_per_page;
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain)
+{
+ return p_chain->usable_per_page;
+}
+
+static OSAL_INLINE
+u8 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
+{
+ return p_chain->elem_unusable;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_size(struct ecore_chain *p_chain)
+{
+ return p_chain->size;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
+{
+ return p_chain->page_cnt;
+}
+
+static OSAL_INLINE
+dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain)
+{
+ return p_chain->pbl_sp.p_phys_table;
+}
+
+/**
+ * @brief ecore_chain_advance_page -
+ *
+ * Advance the next element accros pages for a linked chain
+ *
+ * @param p_chain
+ * @param p_next_elem
+ * @param idx_to_inc
+ * @param page_to_inc
+ */
+static OSAL_INLINE void
+ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
+ void *idx_to_inc, void *page_to_inc)
+{
+ struct ecore_chain_next *p_next = OSAL_NULL;
+ u32 page_index = 0;
+
+ switch (p_chain->mode) {
+ case ECORE_CHAIN_MODE_NEXT_PTR:
+ p_next = (struct ecore_chain_next *)(*p_next_elem);
+ *p_next_elem = p_next->next_virt;
+ if (is_chain_u16(p_chain))
+ *(u16 *)idx_to_inc += (u16)p_chain->elem_unusable;
+ else
+ *(u32 *)idx_to_inc += (u16)p_chain->elem_unusable;
+ break;
+ case ECORE_CHAIN_MODE_SINGLE:
+ *p_next_elem = p_chain->p_virt_addr;
+ break;
+ case ECORE_CHAIN_MODE_PBL:
+ if (is_chain_u16(p_chain)) {
+ if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
+ *(u16 *)page_to_inc = 0;
+ page_index = *(u16 *)page_to_inc;
+ } else {
+ if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
+ *(u32 *)page_to_inc = 0;
+ page_index = *(u32 *)page_to_inc;
+ }
+ *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
+ }
+}
+
+#define is_unusable_idx(p, idx) \
+ (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_idx_u32(p, idx) \
+ (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_next_idx(p, idx) \
+ ((((p)->u.chain16.idx + 1) & \
+ (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_next_idx_u32(p, idx) \
+ ((((p)->u.chain32.idx + 1) & \
+ (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define test_and_skip(p, idx) \
+ do { \
+ if (is_chain_u16(p)) { \
+ if (is_unusable_idx(p, idx)) \
+ (p)->u.chain16.idx += \
+ (p)->elem_unusable; \
+ } else { \
+ if (is_unusable_idx_u32(p, idx)) \
+ (p)->u.chain32.idx += \
+ (p)->elem_unusable; \
+ } \
+ } while (0)
+
+/**
+ * @brief ecore_chain_return_multi_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ * @param num
+ */
+static OSAL_INLINE
+void ecore_chain_return_multi_produced(struct ecore_chain *p_chain, u32 num)
+{
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.cons_idx += (u16)num;
+ else
+ p_chain->u.chain32.cons_idx += num;
+ test_and_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief ecore_chain_return_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE void ecore_chain_return_produced(struct ecore_chain *p_chain)
+{
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.cons_idx++;
+ else
+ p_chain->u.chain32.cons_idx++;
+ test_and_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief ecore_chain_produce -
+ *
+ * A chain in which the driver "Produces" elements should use this to get
+ * a pointer to the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for new element.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to next element
+ */
+static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
+{
+ void *p_ret = OSAL_NULL, *p_prod_idx, *p_prod_page_idx;
+
+ if (is_chain_u16(p_chain)) {
+ if ((p_chain->u.chain16.prod_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_prod_idx = &p_chain->u.chain16.prod_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
+ ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+ p_prod_idx, p_prod_page_idx);
+ }
+ p_chain->u.chain16.prod_idx++;
+ } else {
+ if ((p_chain->u.chain32.prod_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_prod_idx = &p_chain->u.chain32.prod_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
+ ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+ p_prod_idx, p_prod_page_idx);
+ }
+ p_chain->u.chain32.prod_idx++;
+ }
+
+ p_ret = p_chain->p_prod_elem;
+ p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
+ p_chain->elem_size);
+
+ return p_ret;
+}
+
+/**
+ * @brief ecore_chain_get_capacity -
+ *
+ * Get the maximum number of BDs in chain
+ *
+ * @param p_chain
+ * @param num
+ *
+ * @return number of unusable BDs
+ */
+static OSAL_INLINE u32 ecore_chain_get_capacity(struct ecore_chain *p_chain)
+{
+ return p_chain->capacity;
+}
+
+/**
+ * @brief ecore_chain_recycle_consumed -
+ *
+ * Returns an element which was previously consumed;
+ * Increments producers so they could be written to FW.
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE
+void ecore_chain_recycle_consumed(struct ecore_chain *p_chain)
+{
+ test_and_skip(p_chain, prod_idx);
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.prod_idx++;
+ else
+ p_chain->u.chain32.prod_idx++;
+}
+
+/**
+ * @brief ecore_chain_consume -
+ *
+ * A Chain in which the driver utilizes data written by a different source
+ * (i.e., FW) should use this to access passed buffers.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to the next buffer written
+ */
+static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
+{
+ void *p_ret = OSAL_NULL, *p_cons_idx, *p_cons_page_idx;
+
+ if (is_chain_u16(p_chain)) {
+ if ((p_chain->u.chain16.cons_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_cons_idx = &p_chain->u.chain16.cons_idx;
+ p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
+ ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+ p_cons_idx, p_cons_page_idx);
+ }
+ p_chain->u.chain16.cons_idx++;
+ } else {
+ if ((p_chain->u.chain32.cons_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_cons_idx = &p_chain->u.chain32.cons_idx;
+ p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
+ ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+ p_cons_idx, p_cons_page_idx);
+ }
+ p_chain->u.chain32.cons_idx++;
+ }
+
+ p_ret = p_chain->p_cons_elem;
+ p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
+ p_chain->elem_size);
+
+ return p_ret;
+}
+
+/**
+ * @brief ecore_chain_reset -
+ *
+ * Resets the chain to its start state
+ *
+ * @param p_chain pointer to a previously allocted chain
+ */
+static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
+{
+ u32 i;
+
+ if (is_chain_u16(p_chain)) {
+ p_chain->u.chain16.prod_idx = 0;
+ p_chain->u.chain16.cons_idx = 0;
+ } else {
+ p_chain->u.chain32.prod_idx = 0;
+ p_chain->u.chain32.cons_idx = 0;
+ }
+ p_chain->p_cons_elem = p_chain->p_virt_addr;
+ p_chain->p_prod_elem = p_chain->p_virt_addr;
+
+ if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+ /* Use "page_cnt-1" as a reset value for the prod/cons page's
+ * indices, to avoid unnecessary page advancing on the first
+ * call to ecore_chain_produce/consume. Instead, the indices
+ * will be advanced to page_cnt and then will be wrapped to 0.
+ */
+ u32 reset_val = p_chain->page_cnt - 1;
+
+ if (is_chain_u16(p_chain)) {
+ p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
+ p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
+ } else {
+ p_chain->pbl.c.u32.prod_page_idx = reset_val;
+ p_chain->pbl.c.u32.cons_page_idx = reset_val;
+ }
+ }
+
+ switch (p_chain->intended_use) {
+ case ECORE_CHAIN_USE_TO_CONSUME:
+ /* produce empty elements */
+ for (i = 0; i < p_chain->capacity; i++)
+ ecore_chain_recycle_consumed(p_chain);
+ break;
+
+ case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
+ case ECORE_CHAIN_USE_TO_PRODUCE:
+ default:
+ /* Do nothing */
+ break;
+ }
+}
+
+/**
+ * @brief ecore_chain_init_params -
+ *
+ * Initalizes a basic chain struct
+ *
+ * @param p_chain
+ * @param page_cnt number of pages in the allocated buffer
+ * @param elem_size size of each element in the chain
+ * @param intended_use
+ * @param mode
+ * @param cnt_type
+ * @param dp_ctx
+ */
+static OSAL_INLINE void
+ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
+ enum ecore_chain_use_mode intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type, void *dp_ctx)
+{
+ /* chain fixed parameters */
+ p_chain->p_virt_addr = OSAL_NULL;
+ p_chain->p_phys_addr = 0;
+ p_chain->elem_size = elem_size;
+ p_chain->intended_use = (u8)intended_use;
+ p_chain->mode = mode;
+ p_chain->cnt_type = (u8)cnt_type;
+
+ p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
+ p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
+ p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
+ p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
+ p_chain->next_page_mask = (p_chain->usable_per_page &
+ p_chain->elem_per_page_mask);
+
+ p_chain->page_cnt = page_cnt;
+ p_chain->capacity = p_chain->usable_per_page * page_cnt;
+ p_chain->size = p_chain->elem_per_page * page_cnt;
+ p_chain->b_external_pbl = false;
+ p_chain->pbl_sp.p_phys_table = 0;
+ p_chain->pbl_sp.p_virt_table = OSAL_NULL;
+ p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
+
+ p_chain->dp_ctx = dp_ctx;
+}
+
+/**
+ * @brief ecore_chain_init_mem -
+ *
+ * Initalizes a basic chain struct with its chain buffers
+ *
+ * @param p_chain
+ * @param p_virt_addr virtual address of allocated buffer's beginning
+ * @param p_phys_addr physical address of allocated buffer's beginning
+ *
+ */
+static OSAL_INLINE void ecore_chain_init_mem(struct ecore_chain *p_chain,
+ void *p_virt_addr,
+ dma_addr_t p_phys_addr)
+{
+ p_chain->p_virt_addr = p_virt_addr;
+ p_chain->p_phys_addr = p_phys_addr;
+}
+
+/**
+ * @brief ecore_chain_init_pbl_mem -
+ *
+ * Initalizes a basic chain struct with its pbl buffers
+ *
+ * @param p_chain
+ * @param p_virt_pbl pointer to a pre allocated side table which will hold
+ * virtual page addresses.
+ * @param p_phys_pbl pointer to a pre-allocated side table which will hold
+ * physical page addresses.
+ * @param pp_virt_addr_tbl
+ * pointer to a pre-allocated side table which will hold
+ * the virtual addresses of the chain pages.
+ *
+ */
+static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain,
+ void *p_virt_pbl,
+ dma_addr_t p_phys_pbl,
+ void **pp_virt_addr_tbl)
+{
+ p_chain->pbl_sp.p_phys_table = p_phys_pbl;
+ p_chain->pbl_sp.p_virt_table = p_virt_pbl;
+ p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
+}
+
+/**
+ * @brief ecore_chain_init_next_ptr_elem -
+ *
+ * Initalizes a next pointer element
+ *
+ * @param p_chain
+ * @param p_virt_curr virtual address of a chain page of which the next
+ * pointer element is initialized
+ * @param p_virt_next virtual address of the next chain page
+ * @param p_phys_next physical address of the next chain page
+ *
+ */
+static OSAL_INLINE void
+ecore_chain_init_next_ptr_elem(struct ecore_chain *p_chain, void *p_virt_curr,
+ void *p_virt_next, dma_addr_t p_phys_next)
+{
+ struct ecore_chain_next *p_next;
+ u32 size;
+
+ size = p_chain->elem_size * p_chain->usable_per_page;
+ p_next = (struct ecore_chain_next *)((u8 *)p_virt_curr + size);
+
+ DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
+
+ p_next->next_virt = p_virt_next;
+}
+
+/**
+ * @brief ecore_chain_get_last_elem -
+ *
+ * Returns a pointer to the last element of the chain
+ *
+ * @param p_chain
+ *
+ * @return void*
+ */
+static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain)
+{
+ struct ecore_chain_next *p_next = OSAL_NULL;
+ void *p_virt_addr = OSAL_NULL;
+ u32 size, last_page_idx;
+
+ if (!p_chain->p_virt_addr)
+ goto out;
+
+ switch (p_chain->mode) {
+ case ECORE_CHAIN_MODE_NEXT_PTR:
+ size = p_chain->elem_size * p_chain->usable_per_page;
+ p_virt_addr = p_chain->p_virt_addr;
+ p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr + size);
+ while (p_next->next_virt != p_chain->p_virt_addr) {
+ p_virt_addr = p_next->next_virt;
+ p_next =
+ (struct ecore_chain_next *)((u8 *)p_virt_addr +
+ size);
+ }
+ break;
+ case ECORE_CHAIN_MODE_SINGLE:
+ p_virt_addr = p_chain->p_virt_addr;
+ break;
+ case ECORE_CHAIN_MODE_PBL:
+ last_page_idx = p_chain->page_cnt - 1;
+ p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+ break;
+ }
+ /* p_virt_addr points at this stage to the last page of the chain */
+ size = p_chain->elem_size * (p_chain->usable_per_page - 1);
+ p_virt_addr = ((u8 *)p_virt_addr + size);
+out:
+ return p_virt_addr;
+}
+
+/**
+ * @brief ecore_chain_set_prod - sets the prod to the given value
+ *
+ * @param prod_idx
+ * @param p_prod_elem
+ */
+static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
+ u32 prod_idx, void *p_prod_elem)
+{
+ if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+ /* Use "prod_idx-1" since ecore_chain_produce() advances the
+ * page index before the producer index when getting to
+ * "next_page_mask".
+ */
+ u32 elem_idx =
+ (prod_idx - 1 + p_chain->capacity) % p_chain->capacity;
+ u32 page_idx = elem_idx / p_chain->elem_per_page;
+
+ if (is_chain_u16(p_chain))
+ p_chain->pbl.c.u16.prod_page_idx = (u16)page_idx;
+ else
+ p_chain->pbl.c.u32.prod_page_idx = page_idx;
+ }
+
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.prod_idx = (u16)prod_idx;
+ else
+ p_chain->u.chain32.prod_idx = prod_idx;
+ p_chain->p_prod_elem = p_prod_elem;
+}
+
+/**
+ * @brief ecore_chain_set_cons - sets the cons to the given value
+ *
+ * @param cons_idx
+ * @param p_cons_elem
+ */
+static OSAL_INLINE void ecore_chain_set_cons(struct ecore_chain *p_chain,
+ u32 cons_idx, void *p_cons_elem)
+{
+ if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+ /* Use "cons_idx-1" since ecore_chain_consume() advances the
+ * page index before the consumer index when getting to
+ * "next_page_mask".
+ */
+ u32 elem_idx =
+ (cons_idx - 1 + p_chain->capacity) % p_chain->capacity;
+ u32 page_idx = elem_idx / p_chain->elem_per_page;
+
+ if (is_chain_u16(p_chain))
+ p_chain->pbl.c.u16.cons_page_idx = (u16)page_idx;
+ else
+ p_chain->pbl.c.u32.cons_page_idx = page_idx;
+ }
+
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.cons_idx = (u16)cons_idx;
+ else
+ p_chain->u.chain32.cons_idx = cons_idx;
+
+ p_chain->p_cons_elem = p_cons_elem;
+}
+
+/**
+ * @brief ecore_chain_pbl_zero_mem - set chain memory to 0
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct ecore_chain *p_chain)
+{
+ u32 i, page_cnt;
+
+ if (p_chain->mode != ECORE_CHAIN_MODE_PBL)
+ return;
+
+ page_cnt = ecore_chain_get_page_cnt(p_chain);
+
+ for (i = 0; i < page_cnt; i++)
+ OSAL_MEM_ZERO(p_chain->pbl.pp_virt_addr_tbl[i],
+ ECORE_CHAIN_PAGE_SIZE);
+}
+
+int ecore_chain_print(struct ecore_chain *p_chain, char *buffer,
+ u32 buffer_size, u32 *element_indx, u32 stop_indx,
+ bool print_metadata,
+ int (*func_ptr_print_element)(struct ecore_chain *p_chain,
+ void *p_element,
+ char *buffer),
+ int (*func_ptr_print_metadata)(struct ecore_chain
+ *p_chain,
+ char *buffer));
+
+#endif /* __ECORE_CHAIN_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.c
new file mode 100644
index 00000000..bf36ce58
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.c
@@ -0,0 +1,2227 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "common_hsi.h"
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_rt_defs.h"
+#include "ecore_status.h"
+#include "ecore.h"
+#include "ecore_init_ops.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_hw.h"
+#include "ecore_dev_api.h"
+#include "ecore_sriov.h"
+#include "ecore_mcp.h"
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES PROTOCOLID_COMMON
+#define NUM_TASK_TYPES 2
+#define NUM_TASK_PF_SEGMENTS 4
+#define NUM_TASK_VF_SEGMENTS 1
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT 4
+#define DQ_RANGE_ALIGN (1 << DQ_RANGE_SHIFT)
+
+/* Searcher constants */
+#define SRC_MIN_NUM_ELEMS 256
+
+/* Timers constants */
+#define TM_SHIFT 7
+#define TM_ALIGN (1 << TM_SHIFT)
+#define TM_ELEM_SIZE 4
+
+/* ILT constants */
+#define ILT_DEFAULT_HW_P_SIZE 4
+
+#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_SHIFT 0
+#define ILT_ENTRY_VALID_MASK 0x1ULL
+#define ILT_ENTRY_VALID_SHIFT 52
+#define ILT_ENTRY_IN_REGS 2
+#define ILT_REG_SIZE_IN_BYTES 4
+
+/* connection context union */
+union conn_context {
+ struct e4_core_conn_context core_ctx;
+ struct e4_eth_conn_context eth_ctx;
+};
+
+/* TYPE-0 task context - iSCSI, FCOE */
+union type0_task_context {
+};
+
+/* TYPE-1 task context - ROCE */
+union type1_task_context {
+ struct regpair reserved; /* @DPDK */
+};
+
+struct src_ent {
+ u8 opaque[56];
+ u64 next;
+};
+
+#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+
+#define CONN_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+#define SRQ_CXT_SIZE (sizeof(struct regpair) * 8) /* @DPDK */
+
+#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
+
+/* Alignment is inherent to the type1_task_context structure */
+#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
+
+/* PF per protocl configuration object */
+#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct ecore_tid_seg {
+ u32 count;
+ u8 type;
+ bool has_fl_mem;
+};
+
+struct ecore_conn_type_cfg {
+ u32 cid_count;
+ u32 cids_per_vf;
+ struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
+};
+
+/* ILT Client configuration,
+ * Per connection type (protocol) resources (cids, tis, vf cids etc.)
+ * 1 - for connection context (CDUC) and for each task context we need two
+ * values, for regular task context and for force load memory
+ */
+#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
+#define CDUC_BLK (0)
+#define SRQ_BLK (0)
+#define CDUT_SEG_BLK(n) (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS)
+
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
+ ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
+ ILT_CLI_MAX
+};
+
+struct ilt_cfg_pair {
+ u32 reg;
+ u32 val;
+};
+
+struct ecore_ilt_cli_blk {
+ u32 total_size; /* 0 means not active */
+ u32 real_size_in_page;
+ u32 start_line;
+ u32 dynamic_line_cnt;
+};
+
+struct ecore_ilt_client_cfg {
+ bool active;
+
+ /* ILT boundaries */
+ struct ilt_cfg_pair first;
+ struct ilt_cfg_pair last;
+ struct ilt_cfg_pair p_size;
+
+ /* ILT client blocks for PF */
+ struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+ u32 pf_total_lines;
+
+ /* ILT client blocks for VFs */
+ struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+ u32 vf_total_lines;
+};
+
+/* Per Path -
+ * ILT shadow table
+ * Protocol acquired CID lists
+ * PF start line in ILT
+ */
+struct ecore_dma_mem {
+ dma_addr_t p_phys;
+ void *p_virt;
+ osal_size_t size;
+};
+
+#define MAP_WORD_SIZE sizeof(unsigned long)
+#define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8)
+
+struct ecore_cid_acquired_map {
+ u32 start_cid;
+ u32 max_count;
+ unsigned long *cid_map;
+};
+
+struct ecore_cxt_mngr {
+ /* Per protocl configuration */
+ struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+ /* computed ILT structure */
+ struct ecore_ilt_client_cfg clients[ILT_CLI_MAX];
+
+ /* Task type sizes */
+ u32 task_type_size[NUM_TASK_TYPES];
+
+ /* total number of VFs for this hwfn -
+ * ALL VFs are symmetric in terms of HW resources
+ */
+ u32 vf_count;
+
+ /* Acquired CIDs */
+ struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
+ /* TBD - do we want this allocated to reserve space? */
+ struct ecore_cid_acquired_map
+ acquired_vf[MAX_CONN_TYPES][COMMON_MAX_NUM_VFS];
+
+ /* ILT shadow table */
+ struct ecore_dma_mem *ilt_shadow;
+ u32 pf_start_line;
+
+ /* Mutex for a dynamic ILT allocation */
+ osal_mutex_t mutex;
+
+ /* SRC T2 */
+ struct ecore_dma_mem *t2;
+ u32 t2_num_pages;
+ u64 first_free;
+ u64 last_free;
+
+ /* The infrastructure originally was very generic and context/task
+ * oriented - per connection-type we would set how many of those
+ * are needed, and later when determining how much memory we're
+ * needing for a given block we'd iterate over all the relevant
+ * connection-types.
+ * But since then we've had some additional resources, some of which
+ * require memory which is indepent of the general context/task
+ * scheme. We add those here explicitly per-feature.
+ */
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
+ /* Maximal number of L2 steering filters */
+ u32 arfs_count;
+
+ /* TODO - VF arfs filters ? */
+};
+
+static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_TOE;
+}
+
+static bool tm_tid_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_FCOE;
+}
+
+/* counts the iids for the CDU/CDUC ILT client configuration */
+struct ecore_cdu_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
+
+static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_cdu_iids *iids)
+{
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+ }
+}
+
+/* counts the iids for the Searcher block configuration */
+struct ecore_src_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
+
+static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_src_iids *iids)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
+ }
+
+ /* Add L2 filtering filters in addition */
+ iids->pf_cids += p_mngr->arfs_count;
+}
+
+/* counts the iids for the Timers block configuration */
+struct ecore_tm_iids {
+ u32 pf_cids;
+ u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
+ u32 pf_tids_total;
+ u32 per_vf_cids;
+ u32 per_vf_tids;
+};
+
+static void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_tm_iids *iids)
+{
+ bool tm_vf_required = false;
+ bool tm_required = false;
+ u32 i, j;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
+
+ if (tm_cid_proto(i) || tm_required) {
+ if (p_cfg->cid_count)
+ tm_required = true;
+
+ iids->pf_cids += p_cfg->cid_count;
+ }
+
+ if (tm_cid_proto(i) || tm_vf_required) {
+ if (p_cfg->cids_per_vf)
+ tm_vf_required = true;
+
+ }
+
+ if (tm_tid_proto(i)) {
+ struct ecore_tid_seg *segs = p_cfg->tid_seg;
+
+ /* for each segment there is at most one
+ * protocol for which count is not 0.
+ */
+ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+ iids->pf_tids[j] += segs[j].count;
+
+ /* The last array elelment is for the VFs. As for PF
+ * segments there can be only one protocol for
+ * which this value is not 0.
+ */
+ iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+ }
+ }
+
+ iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN);
+ iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN);
+ iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN);
+
+ for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
+ iids->pf_tids[j] = ROUNDUP(iids->pf_tids[j], TM_ALIGN);
+ iids->pf_tids_total += iids->pf_tids[j];
+ }
+}
+
+static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_iids *iids)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_tid_seg *segs;
+ u32 vf_cids = 0, type, j;
+ u32 vf_tids = 0;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ iids->cids += p_mngr->conn_cfg[type].cid_count;
+ vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+
+ segs = p_mngr->conn_cfg[type].tid_seg;
+ /* for each segment there is at most one
+ * protocol for which count is not 0.
+ */
+ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+ iids->tids += segs[j].count;
+
+ /* The last array elelment is for the VFs. As for PF
+ * segments there can be only one protocol for
+ * which this value is not 0.
+ */
+ vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+ }
+
+ iids->vf_cids += vf_cids * p_mngr->vf_count;
+ iids->tids += vf_tids * p_mngr->vf_count;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
+ iids->cids, iids->vf_cids, iids->tids, vf_tids);
+}
+
+static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
+ u32 seg)
+{
+ struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
+ u32 i;
+
+ /* Find the protocol with tid count > 0 for this segment.
+ * Note: there can only be one and this is already validated.
+ */
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ if (p_cfg->conn_cfg[i].tid_seg[seg].count)
+ return &p_cfg->conn_cfg[i].tid_seg[seg];
+ }
+ return OSAL_NULL;
+}
+
+static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ p_mgr->srq_count = num_srqs;
+}
+
+u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ return p_mgr->srq_count;
+}
+
+/* set the iids (cid/tid) count per protocol */
+static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 cid_count, u32 vf_cid_cnt)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+ struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+ p_conn->cid_count = ROUNDUP(cid_count, DQ_RANGE_ALIGN);
+ p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN);
+}
+
+u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid)
+{
+ if (vf_cid)
+ *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
+
+ return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+}
+
+u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
+}
+
+u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ u32 cnt = 0;
+ int i;
+
+ for (i = 0; i < TASK_SEGMENTS; i++)
+ cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
+
+ return cnt;
+}
+
+static OSAL_INLINE void
+ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto,
+ u8 seg, u8 seg_type, u32 count, bool has_fl)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ p_seg->count = count;
+ p_seg->has_fl_mem = has_fl;
+ p_seg->type = seg_type;
+}
+
+/* the *p_line parameter must be either 0 for the first invocation or the
+ * value returned in the previous invocation.
+ */
+static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
+ struct ecore_ilt_cli_blk *p_blk,
+ u32 start_line,
+ u32 total_size, u32 elem_size)
+{
+ u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ /* verify that it's called once for each block */
+ if (p_blk->total_size)
+ return;
+
+ p_blk->total_size = total_size;
+ p_blk->real_size_in_page = 0;
+ if (elem_size)
+ p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+ p_blk->start_line = start_line;
+}
+
+static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
+ struct ecore_ilt_client_cfg *p_cli,
+ struct ecore_ilt_cli_blk *p_blk,
+ u32 *p_line, enum ilt_clients client_id)
+{
+ if (!p_blk->total_size)
+ return;
+
+ if (!p_cli->active)
+ p_cli->first.val = *p_line;
+
+ p_cli->active = true;
+ *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
+ p_cli->last.val = *p_line - 1;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x"
+ " [Real %08x] Start line %d\n",
+ client_id, p_cli->first.val, p_cli->last.val,
+ p_blk->total_size, p_blk->real_size_in_page,
+ p_blk->start_line);
+}
+
+static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn,
+ enum ilt_clients ilt_client)
+{
+ u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
+ struct ecore_ilt_client_cfg *p_cli;
+ u32 lines_to_skip = 0;
+ u32 cxts_per_p;
+
+ /* TBD MK: ILT code should be simplified once PROTO enum is changed */
+
+ if (ilt_client == ILT_CLI_CDUC) {
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+
+ cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
+ (u32)CONN_CXT_SIZE(p_hwfn);
+
+ lines_to_skip = cid_count / cxts_per_p;
+ }
+
+ return lines_to_skip;
+}
+
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 curr_line, total, i, task_size, line;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ struct ecore_cdu_iids cdu_iids;
+ struct ecore_src_iids src_iids;
+ struct ecore_qm_iids qm_iids;
+ struct ecore_tm_iids tm_iids;
+ struct ecore_tid_seg *p_seg;
+
+ OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
+ OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
+ OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+ OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
+
+ p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "hwfn [%d] - Set context mngr starting line to be 0x%08x\n",
+ p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+ /* CDUC */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+
+ curr_line = p_mngr->pf_start_line;
+
+ /* CDUC PF */
+ p_cli->pf_total_lines = 0;
+
+ /* get the counters for the CDUC,CDUC and QM clients */
+ ecore_cxt_cdu_iids(p_mngr, &cdu_iids);
+
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+
+ total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn,
+ ILT_CLI_CDUC);
+
+ /* CDUC VF */
+ p_blk = &p_cli->vf_blks[CDUC_BLK];
+ total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
+ for (i = 1; i < p_mngr->vf_count; i++)
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUC);
+
+ /* CDUT PF */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ p_cli->first.val = curr_line;
+
+ /* first the 'working' task memory */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+
+ /* next the 'init' task memory (forced load memory) */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+
+ if (!p_seg->has_fl_mem) {
+ /* The segment is active (total size pf 'working'
+ * memory is > 0) but has no FL (forced-load, Init)
+ * memory. Thus:
+ *
+ * 1. The total-size in the corrsponding FL block of
+ * the ILT client is set to 0 - No ILT line are
+ * provisioned and no ILT memory allocated.
+ *
+ * 2. The start-line of said block is set to the
+ * start line of the matching working memory
+ * block in the ILT client. This is later used to
+ * configure the CDU segment offset registers and
+ * results in an FL command for TIDs of this
+ * segment behaves as regular load commands
+ * (loading TIDs from the working memory).
+ */
+ line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ continue;
+ }
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
+
+ /* CDUT VF */
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
+ if (p_seg && p_seg->count) {
+ /* Stricly speaking we need to iterate over all VF
+ * task segment types, but a VF has only 1 segment
+ */
+
+ /* 'working' memory */
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ /* 'init' memory */
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ if (!p_seg->has_fl_mem) {
+ /* see comment above */
+ line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ } else {
+ task_size = p_mngr->task_type_size[p_seg->type];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total, task_size);
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->vf_total_lines = curr_line -
+ p_cli->vf_blks[0].start_line;
+
+ /* Now for the rest of the VFs */
+ for (i = 1; i < p_mngr->vf_count; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ }
+
+ /* QM */
+ p_cli = &p_mngr->clients[ILT_CLI_QM];
+ p_blk = &p_cli->pf_blks[0];
+
+ ecore_cxt_qm_iids(p_hwfn, &qm_iids);
+ total = ecore_qm_pf_mem_size(qm_iids.cids,
+ qm_iids.vf_cids, qm_iids.tids,
+ p_hwfn->qm_info.num_pqs,
+ p_hwfn->qm_info.num_vf_pqs);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d,"
+ " num_vf_pqs=%d, memory_size=%d)\n",
+ qm_iids.cids, qm_iids.vf_cids, qm_iids.tids,
+ p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000,
+ QM_PQ_ELEMENT_SIZE);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ /* SRC */
+ p_cli = &p_mngr->clients[ILT_CLI_SRC];
+ ecore_cxt_src_iids(p_mngr, &src_iids);
+
+ /* Both the PF and VFs searcher connections are stored in the per PF
+ * database. Thus sum the PF searcher cids and all the VFs searcher
+ * cids.
+ */
+ total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (total) {
+ u32 local_max = OSAL_MAX_T(u32, total,
+ SRC_MIN_NUM_ELEMS);
+
+ total = OSAL_ROUNDUP_POW_OF_TWO(local_max);
+
+ p_blk = &p_cli->pf_blks[0];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * sizeof(struct src_ent),
+ sizeof(struct src_ent));
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_SRC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM PF */
+ p_cli = &p_mngr->clients[ILT_CLI_TM];
+ ecore_cxt_tm_iids(p_mngr, &tm_iids);
+ total = tm_iids.pf_cids + tm_iids.pf_tids_total;
+ if (total) {
+ p_blk = &p_cli->pf_blks[0];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM VF */
+ total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
+ if (total) {
+ p_blk = &p_cli->vf_blks[0];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
+ for (i = 1; i < p_mngr->vf_count; i++) {
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ }
+ }
+
+ /* TSDM (SRQ CONTEXT) */
+ total = ecore_cxt_get_srq_count(p_hwfn);
+
+ if (total) {
+ p_cli = &p_mngr->clients[ILT_CLI_TSDM];
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TSDM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+ RESC_NUM(p_hwfn, ECORE_ILT)) {
+ DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
+ curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 i;
+
+ if (!p_mngr->t2)
+ return;
+
+ for (i = 0; i < p_mngr->t2_num_pages; i++)
+ if (p_mngr->t2[i].p_virt)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_mngr->t2[i].p_virt,
+ p_mngr->t2[i].p_phys,
+ p_mngr->t2[i].size);
+
+ OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
+}
+
+static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 conn_num, total_size, ent_per_page, psz, i;
+ struct ecore_ilt_client_cfg *p_src;
+ struct ecore_src_iids src_iids;
+ struct ecore_dma_mem *p_t2;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+
+ /* if the SRC ILT client is inactive - there are no connection
+ * requiring the searcer, leave.
+ */
+ p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
+ if (!p_src->active)
+ return ECORE_SUCCESS;
+
+ ecore_cxt_src_iids(p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ total_size = conn_num * sizeof(struct src_ent);
+
+ /* use the same page size as the SRC ILT client */
+ psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
+ p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+
+ /* allocate t2 */
+ p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ p_mngr->t2_num_pages *
+ sizeof(struct ecore_dma_mem));
+ if (!p_mngr->t2) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n");
+ rc = ECORE_NOMEM;
+ goto t2_fail;
+ }
+
+ /* allocate t2 pages */
+ for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ u32 size = OSAL_MIN_T(u32, total_size, psz);
+ void **p_virt = &p_mngr->t2[i].p_virt;
+
+ *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_mngr->t2[i].p_phys, size);
+ if (!p_mngr->t2[i].p_virt) {
+ rc = ECORE_NOMEM;
+ goto t2_fail;
+ }
+ OSAL_MEM_ZERO(*p_virt, size);
+ p_mngr->t2[i].size = size;
+ total_size -= size;
+ }
+
+ /* Set the t2 pointers */
+
+ /* entries per page - must be a power of two */
+ ent_per_page = psz / sizeof(struct src_ent);
+
+ p_mngr->first_free = (u64)p_mngr->t2[0].p_phys;
+
+ p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
+ p_mngr->last_free = (u64)p_t2->p_phys +
+ ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
+
+ for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);
+ struct src_ent *entries = p_mngr->t2[i].p_virt;
+ u64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val;
+ u32 j;
+
+ for (j = 0; j < ent_num - 1; j++) {
+ val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
+ entries[j].next = OSAL_CPU_TO_BE64(val);
+ }
+
+ if (i < p_mngr->t2_num_pages - 1)
+ val = (u64)p_mngr->t2[i + 1].p_phys;
+ else
+ val = 0;
+ entries[j].next = OSAL_CPU_TO_BE64(val);
+
+ conn_num -= ent_num;
+ }
+
+ return ECORE_SUCCESS;
+
+t2_fail:
+ ecore_cxt_src_t2_free(p_hwfn);
+ return rc;
+}
+
+#define for_each_ilt_valid_client(pos, clients) \
+ for (pos = 0; pos < ILT_CLI_MAX; pos++) \
+ if (!clients[pos].active) { \
+ continue; \
+ } else \
+
+
+/* Total number of ILT lines used by this PF */
+static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients)
+{
+ u32 size = 0;
+ u32 i;
+
+ for_each_ilt_valid_client(i, ilt_clients)
+ size += (ilt_clients[i].last.val -
+ ilt_clients[i].first.val + 1);
+
+ return size;
+}
+
+static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 ilt_size, i;
+
+ if (p_mngr->ilt_shadow == OSAL_NULL)
+ return;
+
+ ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
+
+ for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+ struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+
+ if (p_dma->p_virt)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_dma->p_virt,
+ p_dma->p_phys, p_dma->size);
+ p_dma->p_virt = OSAL_NULL;
+ }
+ OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
+ p_mngr->ilt_shadow = OSAL_NULL;
+}
+
+static enum _ecore_status_t
+ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ilt_cli_blk *p_blk,
+ enum ilt_clients ilt_client, u32 start_line_offset)
+{
+ struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 lines, line, sz_left, lines_to_skip = 0;
+
+ /* Special handling for RoCE that supports dynamic allocation */
+ if (ilt_client == ILT_CLI_CDUT || ilt_client == ILT_CLI_TSDM)
+ return ECORE_SUCCESS;
+
+ lines_to_skip = p_blk->dynamic_line_cnt;
+
+ if (!p_blk->total_size)
+ return ECORE_SUCCESS;
+
+ sz_left = p_blk->total_size;
+ lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
+ line = p_blk->start_line + start_line_offset -
+ p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
+
+ for (; lines; lines--) {
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 size;
+
+ size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
+
+/* @DPDK */
+#define ILT_BLOCK_ALIGN_SIZE 0x1000
+ p_virt = OSAL_DMA_ALLOC_COHERENT_ALIGNED(p_hwfn->p_dev,
+ &p_phys, size,
+ ILT_BLOCK_ALIGN_SIZE);
+ if (!p_virt)
+ return ECORE_NOMEM;
+ OSAL_MEM_ZERO(p_virt, size);
+
+ ilt_shadow[line].p_phys = p_phys;
+ ilt_shadow[line].p_virt = p_virt;
+ ilt_shadow[line].size = size;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "ILT shadow: Line [%d] Physical 0x%lx"
+ " Virtual %p Size %d\n",
+ line, (unsigned long)p_phys, p_virt, size);
+
+ sz_left -= size;
+ line++;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_ilt_client_cfg *clients = p_mngr->clients;
+ struct ecore_ilt_cli_blk *p_blk;
+ u32 size, i, j, k;
+ enum _ecore_status_t rc;
+
+ size = ecore_cxt_ilt_shadow_size(clients);
+ p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ size * sizeof(struct ecore_dma_mem));
+
+ if (!p_mngr->ilt_shadow) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
+ rc = ECORE_NOMEM;
+ goto ilt_shadow_fail;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Allocated 0x%x bytes for ilt shadow\n",
+ (u32)(size * sizeof(struct ecore_dma_mem)));
+
+ for_each_ilt_valid_client(i, clients) {
+ for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+ p_blk = &clients[i].pf_blks[j];
+ rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+ if (rc != ECORE_SUCCESS)
+ goto ilt_shadow_fail;
+ }
+ for (k = 0; k < p_mngr->vf_count; k++) {
+ for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
+ u32 lines = clients[i].vf_total_lines * k;
+
+ p_blk = &clients[i].vf_blks[j];
+ rc = ecore_ilt_blk_alloc(p_hwfn, p_blk,
+ i, lines);
+ if (rc != ECORE_SUCCESS)
+ goto ilt_shadow_fail;
+ }
+ }
+ }
+
+ return ECORE_SUCCESS;
+
+ilt_shadow_fail:
+ ecore_ilt_shadow_free(p_hwfn);
+ return rc;
+}
+
+static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 type, vf;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
+ p_mngr->acquired[type].cid_map = OSAL_NULL;
+ p_mngr->acquired[type].max_count = 0;
+ p_mngr->acquired[type].start_cid = 0;
+
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ OSAL_FREE(p_hwfn->p_dev,
+ p_mngr->acquired_vf[type][vf].cid_map);
+ p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL;
+ p_mngr->acquired_vf[type][vf].max_count = 0;
+ p_mngr->acquired_vf[type][vf].start_cid = 0;
+ }
+ }
+}
+
+static enum _ecore_status_t
+ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
+ u32 cid_start, u32 cid_count,
+ struct ecore_cid_acquired_map *p_map)
+{
+ u32 size;
+
+ if (!cid_count)
+ return ECORE_SUCCESS;
+
+ size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_count, BITS_PER_MAP_WORD);
+ p_map->cid_map = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+ if (p_map->cid_map == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ p_map->max_count = cid_count;
+ p_map->start_cid = cid_start;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_map->start_cid, p_map->max_count);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 start_cid = 0, vf_start_cid = 0;
+ u32 type, vf;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
+ struct ecore_cid_acquired_map *p_map;
+
+ /* Handle PF maps */
+ p_map = &p_mngr->acquired[type];
+ if (ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
+ p_cfg->cid_count, p_map))
+ goto cid_map_fail;
+
+ /* Handle VF maps */
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ if (ecore_cid_map_alloc_single(p_hwfn, type,
+ vf_start_cid,
+ p_cfg->cids_per_vf,
+ p_map))
+ goto cid_map_fail;
+ }
+
+ start_cid += p_cfg->cid_count;
+ vf_start_cid += p_cfg->cids_per_vf;
+ }
+
+ return ECORE_SUCCESS;
+
+cid_map_fail:
+ ecore_cid_map_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+
+enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *clients;
+ struct ecore_cxt_mngr *p_mngr;
+ u32 i;
+
+ p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
+ if (!p_mngr) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* Initialize ILT client registers */
+ clients = p_mngr->clients;
+ clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+ clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+ clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+ clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+ clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+ clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+ clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
+ clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
+ clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
+
+ clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
+ clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
+ clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
+
+ clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
+ clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
+ clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
+
+ clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
+ clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
+ clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
+
+ /* default ILT page size for all clients is 64K */
+ for (i = 0; i < ILT_CLI_MAX; i++)
+ p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+ /* due to removal of ISCSI/FCoE files union type0_task_context
+ * task_type_size will be 0. So hardcoded for now.
+ */
+ p_mngr->task_type_size[0] = 512; /* @DPDK */
+ p_mngr->task_type_size[1] = 128; /* @DPDK */
+
+ if (p_hwfn->p_dev->p_iov_info)
+ p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ /* Initialize the dynamic ILT allocation mutex */
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
+#endif
+ OSAL_MUTEX_INIT(&p_mngr->mutex);
+
+ /* Set the cxt mangr pointer priori to further allocations */
+ p_hwfn->p_cxt_mngr = p_mngr;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
+{
+ enum _ecore_status_t rc;
+
+ /* Allocate the ILT shadow table */
+ rc = ecore_ilt_shadow_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n");
+ goto tables_alloc_fail;
+ }
+
+ /* Allocate the T2 table */
+ rc = ecore_cxt_src_t2_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n");
+ goto tables_alloc_fail;
+ }
+
+ /* Allocate and initialize the acquired cids bitmaps */
+ rc = ecore_cid_map_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n");
+ goto tables_alloc_fail;
+ }
+
+ return ECORE_SUCCESS;
+
+tables_alloc_fail:
+ ecore_cxt_mngr_free(p_hwfn);
+ return rc;
+}
+
+void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_cxt_mngr)
+ return;
+
+ ecore_cid_map_free(p_hwfn);
+ ecore_cxt_src_t2_free(p_hwfn);
+ ecore_ilt_shadow_free(p_hwfn);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
+#endif
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
+}
+
+void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map;
+ struct ecore_conn_type_cfg *p_cfg;
+ int type;
+ u32 len;
+
+ /* Reset acquired cids */
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 vf;
+
+ p_cfg = &p_mngr->conn_cfg[type];
+ if (p_cfg->cid_count) {
+ p_map = &p_mngr->acquired[type];
+ len = DIV_ROUND_UP(p_map->max_count,
+ BITS_PER_MAP_WORD) *
+ MAP_WORD_SIZE;
+ OSAL_MEM_ZERO(p_map->cid_map, len);
+ }
+
+ if (!p_cfg->cids_per_vf)
+ continue;
+
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ len = DIV_ROUND_UP(p_map->max_count,
+ BITS_PER_MAP_WORD) *
+ MAP_WORD_SIZE;
+ OSAL_MEM_ZERO(p_map->cid_map, len);
+ }
+ }
+}
+
+/* HW initialization helper (per Block, per phase) */
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+#define CDUT_TYPE0_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
+
+#define CDUT_TYPE0_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
+ CDUT_TYPE0_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE0_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
+ CDUT_TYPE0_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE0_NCIB_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE0_NCIB_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE0_NCIB_SHIFT)
+
+#define CDUT_TYPE1_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
+
+#define CDUT_TYPE1_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
+ CDUT_TYPE1_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE1_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
+ CDUT_TYPE1_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE1_NCIB_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE1_NCIB_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE1_NCIB_SHIFT)
+
+static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn)
+{
+ u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+ /* CDUC - connection configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+ cxt_size = CONN_CXT_SIZE(p_hwfn);
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+ SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+ SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-0 tasks configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-1 tasks configuration */
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
+}
+
+/* CDU PF */
+#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
+#define CDU_SEG_REG_TYPE_MASK 0x1
+#define CDU_SEG_REG_OFFSET_SHIFT 0
+#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
+
+static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_tid_seg *p_seg;
+ u32 cdu_seg_params, offset;
+ int i;
+
+ static const u32 rt_type_offset_arr[] = {
+ CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ static const u32 rt_type_offset_fl_arr[] = {
+ CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+
+ /* There are initializations only for CDUT during pf Phase */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ /* Segment 0 */
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg)
+ continue;
+
+ /* Note: start_line is already adjusted for the CDU
+ * segment register granularity, so we just need to
+ * divide. Adjustment is implicit as we assume ILT
+ * Page size is larger than 32K!
+ */
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
+
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
+ }
+}
+
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ bool is_pf_loading)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct ecore_mcp_link_state *p_link;
+ struct ecore_qm_iids iids;
+
+ OSAL_MEM_ZERO(&iids, sizeof(iids));
+ ecore_cxt_qm_iids(p_hwfn, &iids);
+
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+ ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ qm_info->max_phys_tcs_per_port,
+ is_pf_loading,
+ iids.cids, iids.vf_cids, iids.tids,
+ qm_info->start_pq,
+ qm_info->num_pqs - qm_info->num_vf_pqs,
+ qm_info->num_vf_pqs,
+ qm_info->start_vport,
+ qm_info->num_vports, qm_info->pf_wfq,
+ qm_info->pf_rl, p_link->speed,
+ p_hwfn->qm_info.qm_pq_params,
+ p_hwfn->qm_info.qm_vport_params);
+}
+
+/* CM PF */
+static void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
+ ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
+}
+
+/* DQ PF */
+static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
+
+ /* Connection types 6 & 7 are not in use, yet they must be configured
+ * as the highest possible connection. Not configuring them means the
+ * defaults will be used, and with a large number of cids a bug may
+ * occur, if the defaults will be smaller than dq_pf_max_cid /
+ * dq_vf_max_cid.
+ */
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
+
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
+}
+
+static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *ilt_clients;
+ int i;
+
+ ilt_clients = p_hwfn->p_cxt_mngr->clients;
+ for_each_ilt_valid_client(i, ilt_clients) {
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].first.reg,
+ ilt_clients[i].first.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].last.reg, ilt_clients[i].last.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].p_size.reg,
+ ilt_clients[i].p_size.val);
+ }
+}
+
+static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli;
+ u32 blk_factor;
+
+ /* For simplicty we set the 'block' to be an ILT page */
+ if (p_hwfn->p_dev->p_iov_info) {
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_BASE_RT_OFFSET,
+ p_iov->first_vf_in_pf);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+ p_iov->first_vf_in_pf + p_iov->total_vfs);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
+ blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+}
+
+/* ILT (PSWRQ2) PF */
+static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *clients;
+ struct ecore_cxt_mngr *p_mngr;
+ struct ecore_dma_mem *p_shdw;
+ u32 line, rt_offst, i;
+
+ ecore_ilt_bounds_init(p_hwfn);
+ ecore_ilt_vf_bounds_init(p_hwfn);
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_shdw = p_mngr->ilt_shadow;
+ clients = p_hwfn->p_cxt_mngr->clients;
+
+ for_each_ilt_valid_client(i, clients) {
+ /* Client's 1st val and RT array are absolute, ILT shadows'
+ * lines are relative.
+ */
+ line = clients[i].first.val - p_mngr->pf_start_line;
+ rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+ clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+ for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+ line++, rt_offst += ILT_ENTRY_IN_REGS) {
+ u64 ilt_hw_entry = 0;
+
+ /** p_virt could be OSAL_NULL incase of dynamic
+ * allocation
+ */
+ if (p_shdw[line].p_virt != OSAL_NULL) {
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+ (p_shdw[line].p_phys >> 12));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Setting RT[0x%08x] from"
+ " ILT[0x%08x] [Client is %d] to"
+ " Physical addr: 0x%lx\n",
+ rt_offst, line, i,
+ (unsigned long)(p_shdw[line].
+ p_phys >> 12));
+ }
+
+ STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+ }
+ }
+}
+
+/* SRC (Searcher) PF */
+static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rounded_conn_num, conn_num, conn_max;
+ struct ecore_src_iids src_iids;
+
+ OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+ ecore_cxt_src_iids(p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (!conn_num)
+ return;
+
+ conn_max = OSAL_MAX_T(u32, conn_num, SRC_MIN_NUM_ELEMS);
+ rounded_conn_num = OSAL_ROUNDUP_POW_OF_TWO(conn_max);
+
+ STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
+ STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
+ OSAL_LOG2(rounded_conn_num));
+
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->first_free);
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->last_free);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Configured SEARCHER for 0x%08x connections\n",
+ conn_num);
+}
+
+/* Timers PF */
+#define TM_CFG_NUM_IDS_SHIFT 0
+#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
+#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
+#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
+#define TM_CFG_PARENT_PF_SHIFT 25
+#define TM_CFG_PARENT_PF_MASK 0x7ULL
+
+#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
+#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+#define TM_CFG_TID_OFFSET_SHIFT 30
+#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
+#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
+#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 active_seg_mask = 0, tm_offset, rt_reg;
+ struct ecore_tm_iids tm_iids;
+ u64 cfg_word;
+ u8 i;
+
+ OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
+ ecore_cxt_tm_iids(p_mngr, &tm_iids);
+
+ /* @@@TBD No pre-scan for now */
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
+
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
+
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+
+ /* enale scan */
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
+ tm_iids.pf_cids ? 0x1 : 0x0);
+
+ /* @@@TBD how to enable the scan for the VFs */
+
+ tm_offset = tm_iids.per_vf_cids;
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ tm_offset = tm_iids.pf_cids;
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->p_dev) +
+ p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+
+ tm_offset += tm_iids.pf_tids[i];
+ }
+
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
+
+ /* @@@TBD how to enable the scan for the VFs */
+}
+
+static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_conn_type_cfg *p_fcoe;
+ struct ecore_tid_seg *p_tid;
+
+ p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
+
+ /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
+ if (!p_fcoe->cid_count)
+ return;
+
+ p_tid = &p_fcoe->tid_seg[ECORE_CXT_FCOE_TID_SEG];
+ STORE_RT_REG_AGG(p_hwfn,
+ PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
+ p_tid->count);
+}
+
+void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
+{
+ /* CDU configuration */
+ ecore_cdu_init_common(p_hwfn);
+}
+
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ ecore_qm_init_pf(p_hwfn, p_ptt, true);
+ ecore_cm_init_pf(p_hwfn);
+ ecore_dq_init_pf(p_hwfn);
+ ecore_cdu_init_pf(p_hwfn);
+ ecore_ilt_init_pf(p_hwfn);
+ ecore_src_init_pf(p_hwfn);
+ ecore_tm_init_pf(p_hwfn);
+ ecore_prs_init_pf(p_hwfn);
+}
+
+enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid, u8 vfid)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map;
+ u32 rel_cid;
+
+ if (type >= MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
+ return ECORE_INVAL;
+ }
+
+ if (vfid >= COMMON_MAX_NUM_VFS && vfid != ECORE_CXT_PF_CID) {
+ DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid);
+ return ECORE_INVAL;
+ }
+
+ /* Determine the right map to take this CID from */
+ if (vfid == ECORE_CXT_PF_CID)
+ p_map = &p_mngr->acquired[type];
+ else
+ p_map = &p_mngr->acquired_vf[type][vfid];
+
+ if (p_map->cid_map == OSAL_NULL) {
+ DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
+ return ECORE_INVAL;
+ }
+
+ rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_map->cid_map,
+ p_map->max_count);
+
+ if (rel_cid >= p_map->max_count) {
+ DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n",
+ type);
+ return ECORE_NORESOURCES;
+ }
+
+ OSAL_SET_BIT(rel_cid, p_map->cid_map);
+
+ *p_cid = rel_cid + p_map->start_cid;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
+ *p_cid, rel_cid, vfid, type);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid)
+{
+ return _ecore_cxt_acquire_cid(p_hwfn, type, p_cid, ECORE_CXT_PF_CID);
+}
+
+static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
+ u32 cid, u8 vfid,
+ enum protocol_type *p_type,
+ struct ecore_cid_acquired_map **pp_map)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rel_cid;
+
+ /* Iterate over protocols and find matching cid range */
+ for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
+ if (vfid == ECORE_CXT_PF_CID)
+ *pp_map = &p_mngr->acquired[*p_type];
+ else
+ *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
+
+ if (!((*pp_map)->cid_map))
+ continue;
+ if (cid >= (*pp_map)->start_cid &&
+ cid < (*pp_map)->start_cid + (*pp_map)->max_count) {
+ break;
+ }
+ }
+ if (*p_type == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid);
+ goto fail;
+ }
+
+ rel_cid = cid - (*pp_map)->start_cid;
+ if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) {
+ DP_NOTICE(p_hwfn, true,
+ "CID %d [vifd %02x] not acquired", cid, vfid);
+ goto fail;
+ }
+
+ return true;
+fail:
+ *p_type = MAX_CONN_TYPES;
+ *pp_map = OSAL_NULL;
+ return false;
+}
+
+void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid)
+{
+ struct ecore_cid_acquired_map *p_map = OSAL_NULL;
+ enum protocol_type type;
+ bool b_acquired;
+ u32 rel_cid;
+
+ if (vfid != ECORE_CXT_PF_CID && vfid > COMMON_MAX_NUM_VFS) {
+ DP_NOTICE(p_hwfn, true,
+ "Trying to return incorrect CID belonging to VF %02x\n",
+ vfid);
+ return;
+ }
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, vfid,
+ &type, &p_map);
+
+ if (!b_acquired)
+ return;
+
+ rel_cid = cid - p_map->start_cid;
+ OSAL_CLEAR_BIT(rel_cid, p_map->cid_map);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
+ cid, rel_cid, vfid, type);
+}
+
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
+{
+ _ecore_cxt_release_cid(p_hwfn, cid, ECORE_CXT_PF_CID);
+}
+
+enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_cxt_info *p_info)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map = OSAL_NULL;
+ u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+ enum protocol_type type;
+ bool b_acquired;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid,
+ ECORE_CXT_PF_CID,
+ &type, &p_map);
+
+ if (!b_acquired)
+ return ECORE_INVAL;
+
+ /* set the protocl type */
+ p_info->type = type;
+
+ /* compute context virtual pointer */
+ hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+ conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+ cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+ line = p_info->iid / cxts_per_p;
+
+ /* Make sure context is allocated (dynamic allocation) */
+ if (!p_mngr->ilt_shadow[line].p_virt)
+ return ECORE_INVAL;
+
+ p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt +
+ p_info->iid % cxts_per_p * conn_cxt_size;
+
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
+ "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+ (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
+{
+ /* Set the number of required CORE connections */
+ u32 core_cids = 1; /* SPQ */
+
+ ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
+
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ {
+ u32 count = 0;
+
+ struct ecore_eth_pf_params *p_params =
+ &p_hwfn->pf_params.eth_pf_params;
+
+ if (!p_params->num_vf_cons)
+ p_params->num_vf_cons = ETH_PF_PARAMS_VF_CONS_DEFAULT;
+ ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons,
+ p_params->num_vf_cons);
+
+ count = p_params->num_arfs_filters;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS,
+ &p_hwfn->p_dev->mf_bits))
+ p_hwfn->p_cxt_mngr->arfs_count = count;
+
+ break;
+ }
+ default:
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+enum _ecore_status_t
+ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
+ enum ecore_cxt_elem_type elem_type,
+ u32 iid)
+{
+ u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ struct ecore_ptt *p_ptt;
+ dma_addr_t p_phys;
+ u64 ilt_hw_entry;
+ void *p_virt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ switch (elem_type) {
+ case ECORE_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case ECORE_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case ECORE_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_INVALID elem type = %d", elem_type);
+ return ECORE_INVAL;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ line = p_blk->start_line + (iid / elems_per_p);
+ shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ /* If line is already allocated, do nothing, otherwise allocate it and
+ * write it to the PSWRQ2 registers.
+ * This section can be run in parallel from different contexts and thus
+ * a mutex protection is needed.
+ */
+
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);
+
+ if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+ goto out0;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
+ rc = ECORE_TIMEOUT;
+ goto out0;
+ }
+
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_phys,
+ p_blk->real_size_in_page);
+ if (!p_virt) {
+ rc = ECORE_NOMEM;
+ goto out1;
+ }
+ OSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page);
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
+ p_blk->real_size_in_page;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
+
+ ilt_hw_entry = 0;
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry,
+ ILT_ENTRY_PHY_ADDR,
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+
+/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
+
+ ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
+ reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
+ 0 /* no flags */);
+
+ if (elem_type == ECORE_ELEM_CXT) {
+ u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
+ elems_per_p;
+
+ /* Update the relevant register in the parser */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
+ last_cid_allocated - 1);
+
+ if (!p_hwfn->b_rdma_enabled_in_prs) {
+ /* Enable RoCE search */
+ ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
+ p_hwfn->b_rdma_enabled_in_prs = true;
+ }
+ }
+
+out1:
+ ecore_ptt_release(p_hwfn, p_ptt);
+out0:
+ OSAL_MUTEX_RELEASE(&p_hwfn->p_cxt_mngr->mutex);
+
+ return rc;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+static enum _ecore_status_t
+ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
+ enum ecore_cxt_elem_type elem_type,
+ u32 start_iid, u32 count)
+{
+ u32 start_line, end_line, shadow_start_line, shadow_end_line;
+ u32 reg_offset, elem_size, hw_p_size, elems_per_p;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ u32 end_iid = start_iid + count;
+ struct ecore_ptt *p_ptt;
+ u64 ilt_hw_entry = 0;
+ u32 i;
+
+ switch (elem_type) {
+ case ECORE_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case ECORE_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case ECORE_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_INVALID elem type = %d", elem_type);
+ return ECORE_INVAL;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ start_line = p_blk->start_line + (start_iid / elems_per_p);
+ end_line = p_blk->start_line + (end_iid / elems_per_p);
+ if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
+ end_line--;
+
+ shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
+ shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
+ return ECORE_TIMEOUT;
+ }
+
+ for (i = shadow_start_line; i < shadow_end_line; i++) {
+ if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+ continue;
+
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ ((start_line++) * ILT_REG_SIZE_IN_BYTES *
+ ILT_ENTRY_IN_REGS);
+
+ /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
+ * wide-bus.
+ */
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&ilt_hw_entry,
+ reg_offset,
+ sizeof(ilt_hw_entry) / sizeof(u32),
+ 0 /* no flags */);
+ }
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.h
new file mode 100644
index 00000000..f8c955ca
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef _ECORE_CID_
+#define _ECORE_CID_
+
+#include "ecore_hsi_common.h"
+#include "ecore_proto_if.h"
+#include "ecore_cxt_api.h"
+
+/* Tasks segments definitions */
+#define ECORE_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI /* 0 */
+#define ECORE_CXT_FCOE_TID_SEG PROTOCOLID_FCOE /* 1 */
+#define ECORE_CXT_ROCE_TID_SEG PROTOCOLID_ROCE /* 2 */
+
+enum ecore_cxt_elem_type {
+ ECORE_ELEM_CXT,
+ ECORE_ELEM_SRQ,
+ ECORE_ELEM_TASK
+};
+
+u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *vf_cid);
+
+u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type);
+
+u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type);
+u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_free
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired
+ * map
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_setup - Reset the acquired CIDs
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per
+ * path.
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param is_pf_loading
+ */
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ bool is_pf_loading);
+
+ /**
+ * @brief Reconfigures QM pf on the fly
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+#define ECORE_CXT_PF_CID (0xff)
+
+/**
+ * @brief ecore_cxt_release - Release a cid
+ *
+ * @param p_hwfn
+ * @param cid
+ */
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
+
+/**
+ * @brief ecore_cxt_release - Release a cid belonging to a vf-queue
+ *
+ * @param p_hwfn
+ * @param cid
+ * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
+ */
+void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
+ u32 cid, u8 vfid);
+
+/**
+ * @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid);
+
+/**
+ * @brief _ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+ * for a vf-queue
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid, u8 vfid);
+
+/**
+ * @brief ecore_cxt_get_tid_mem_info - function checks if the
+ * page containing the iid in the ilt is already
+ * allocated, if it is not it allocates the page.
+ *
+ * @param p_hwfn
+ * @param elem_type
+ * @param iid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
+ enum ecore_cxt_elem_type elem_type,
+ u32 iid);
+
+/**
+ * @brief ecore_cxt_free_proto_ilt - function frees ilt pages
+ * associated with the protocol passed.
+ *
+ * @param p_hwfn
+ * @param proto
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto);
+
+#define ECORE_CTX_WORKING_MEM 0
+#define ECORE_CTX_FL_MEM 1
+
+#endif /* _ECORE_CID_ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt_api.h
new file mode 100644
index 00000000..6c8b2831
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt_api.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_CXT_API_H__
+#define __ECORE_CXT_API_H__
+
+struct ecore_hwfn;
+
+struct ecore_cxt_info {
+ void *p_cxt;
+ u32 iid;
+ enum protocol_type type;
+};
+
+#define MAX_TID_BLOCKS 512
+struct ecore_tid_mem {
+ u32 tid_size;
+ u32 num_tids_per_block;
+ u32 waste;
+ u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
+};
+
+/**
+* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
+*
+*
+* @param p_hwfn
+* @param p_info in/out
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_cxt_info *p_info);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.c
new file mode 100644
index 00000000..96678745
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.c
@@ -0,0 +1,1535 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_sp_commands.h"
+#include "ecore_dcbx.h"
+#include "ecore_cxt.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "ecore_iov_api.h"
+
+#define ECORE_DCBX_MAX_MIB_READ_TRY (100)
+#define ECORE_ETH_TYPE_DEFAULT (0)
+
+#define ECORE_DCBX_INVALID_PRIORITY 0xFF
+
+/* Get Traffic Class from priority traffic class table, 4 bits represent
+ * the traffic class corresponding to the priority.
+ */
+#define ECORE_DCBX_PRIO2TC(prio_tc_tbl, prio) \
+ ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+
+static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
+{
+ return !!(GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_ETHTYPE);
+}
+
+static bool ecore_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
+{
+ u8 mfw_val = GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return ecore_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
+}
+
+static bool ecore_dcbx_app_port(u32 app_info_bitmap)
+{
+ return !!(GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_PORT);
+}
+
+static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
+{
+ u8 mfw_val = GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return ecore_dcbx_app_port(app_info_bitmap);
+
+ return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
+}
+
+static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool ethtype;
+
+ if (ieee)
+ ethtype = ecore_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = ecore_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == ECORE_ETH_TYPE_DEFAULT));
+}
+
+static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
+ u16 proto_id, bool ieee)
+{
+ bool port;
+
+ if (!p_hwfn->p_dcbx_info->iwarp_port)
+ return false;
+
+ if (ieee)
+ port = ecore_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_TCP_PORT);
+ else
+ port = ecore_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == p_hwfn->p_dcbx_info->iwarp_port));
+}
+
+static void
+ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_results *p_data)
+{
+ enum dcbx_protocol_type id;
+ int i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "DCBX negotiated: %d\n",
+ p_data->dcbx_enabled);
+
+ for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
+ id = ecore_dcbx_app_update[i].id;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "%s info: update %d, enable %d, prio %d, tc %d,"
+ " num_active_tc %d dscp_enable = %d dscp_val = %d\n",
+ ecore_dcbx_app_update[i].name,
+ p_data->arr[id].update,
+ p_data->arr[id].enable, p_data->arr[id].priority,
+ p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc,
+ p_data->arr[id].dscp_enable,
+ p_data->arr[id].dscp_val);
+ }
+}
+
+u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri)
+{
+ struct ecore_dcbx_dscp_params *dscp = &p_hwfn->p_dcbx_info->get.dscp;
+ u8 i;
+
+ if (!dscp->enabled)
+ return ECORE_DCBX_DSCP_DISABLED;
+
+ for (i = 0; i < ECORE_DCBX_DSCP_SIZE; i++)
+ if (pri == dscp->dscp_pri_map[i])
+ return i;
+
+ return ECORE_DCBX_DSCP_DISABLED;
+}
+
+static void
+ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
+ struct ecore_hwfn *p_hwfn,
+ bool enable, u8 prio, u8 tc,
+ enum dcbx_protocol_type type,
+ enum ecore_pci_personality personality)
+{
+ /* PF update ramrod data */
+ p_data->arr[type].enable = enable;
+ p_data->arr[type].priority = prio;
+ p_data->arr[type].tc = tc;
+ p_data->arr[type].dscp_val = ecore_dcbx_get_dscp_value(p_hwfn, prio);
+ if (p_data->arr[type].dscp_val == ECORE_DCBX_DSCP_DISABLED) {
+ p_data->arr[type].dscp_enable = false;
+ p_data->arr[type].dscp_val = 0;
+ } else {
+ p_data->arr[type].dscp_enable = true;
+ }
+ p_data->arr[type].update = UPDATE_DCB_DSCP;
+
+ /* Do not add valn tag 0 when DCB is enabled and port is in UFP mode */
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
+ p_data->arr[type].dont_add_vlan0 = true;
+
+ /* QM reconf data */
+ if (p_hwfn->hw_info.personality == personality)
+ p_hwfn->hw_info.offload_tc = tc;
+}
+
+/* Update app protocol data and hw_info fields with the TLV info */
+static void
+ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
+ struct ecore_hwfn *p_hwfn,
+ bool enable, u8 prio, u8 tc,
+ enum dcbx_protocol_type type)
+{
+ enum ecore_pci_personality personality;
+ enum dcbx_protocol_type id;
+ int i;
+
+ for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
+ id = ecore_dcbx_app_update[i].id;
+
+ if (type != id)
+ continue;
+
+ personality = ecore_dcbx_app_update[i].personality;
+
+ ecore_dcbx_set_params(p_data, p_hwfn, enable,
+ prio, tc, type, personality);
+ }
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_app_priority(u8 pri_bitmap, u8 *priority)
+{
+ u32 pri_mask, pri = ECORE_MAX_PFC_PRIORITIES;
+ u32 index = ECORE_MAX_PFC_PRIORITIES - 1;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Bitmap 1 corresponds to priority 0, return priority 0 */
+ if (pri_bitmap == 1) {
+ *priority = 0;
+ return rc;
+ }
+
+ /* Choose the highest priority */
+ while ((pri == ECORE_MAX_PFC_PRIORITIES) && index) {
+ pri_mask = 1 << index;
+ if (pri_bitmap & pri_mask)
+ pri = index;
+ index--;
+ }
+
+ if (pri < ECORE_MAX_PFC_PRIORITIES)
+ *priority = (u8)pri;
+ else
+ rc = ECORE_INVAL;
+
+ return rc;
+}
+
+static bool
+ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
+ u32 app_prio_bitmap, u16 id,
+ enum dcbx_protocol_type *type, bool ieee)
+{
+ if (ecore_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ETH;
+ } else {
+ *type = DCBX_MAX_PROTOCOL_TYPE;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "No action required, App TLV entry = 0x%x\n",
+ app_prio_bitmap);
+ return false;
+ }
+
+ return true;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static enum _ecore_status_t
+ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_results *p_data,
+ struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl,
+ int count, u8 dcbx_version)
+{
+ enum dcbx_protocol_type type;
+ bool enable, ieee, eth_tlv;
+ u8 tc, priority_map;
+ u16 protocol_id;
+ u8 priority;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Num APP entries = %d pri_tc_tbl = 0x%x dcbx_version = %u\n",
+ count, pri_tc_tbl, dcbx_version);
+
+ ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
+ eth_tlv = false;
+ /* Parse APP TLV */
+ for (i = 0; i < count; i++) {
+ protocol_id = GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ priority_map = GET_MFW_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Id = 0x%x pri_map = %u\n",
+ protocol_id, priority_map);
+ rc = ecore_dcbx_get_app_priority(priority_map, &priority);
+ if (rc == ECORE_INVAL) {
+ DP_ERR(p_hwfn, "Invalid priority\n");
+ return ECORE_INVAL;
+ }
+
+ tc = ECORE_DCBX_PRIO2TC(pri_tc_tbl, priority);
+ if (ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ protocol_id, &type,
+ ieee)) {
+ /* ETH always have the enable bit reset, as it gets
+ * vlan information per packet. For other protocols,
+ * should be set according to the dcbx_enabled
+ * indication, but we only got here if there was an
+ * app tlv for the protocol, so dcbx must be enabled.
+ */
+ if (type == DCBX_PROTOCOL_ETH) {
+ enable = false;
+ eth_tlv = true;
+ } else {
+ enable = true;
+ }
+
+ ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
+ priority, tc, type);
+ }
+ }
+
+ /* If Eth TLV is not detected, use UFP TC as default TC */
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC,
+ &p_hwfn->p_dev->mf_bits) && !eth_tlv)
+ p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc;
+
+ /* Update ramrod protocol data and hw_info fields
+ * with default info when corresponding APP TLV's are not detected.
+ * The enabled field has a different logic for ethernet as only for
+ * ethernet dcb should disabled by default, as the information arrives
+ * from the OS (unless an explicit app tlv was present).
+ */
+ tc = p_data->arr[DCBX_PROTOCOL_ETH].tc;
+ priority = p_data->arr[DCBX_PROTOCOL_ETH].priority;
+ for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) {
+ if (p_data->arr[type].update)
+ continue;
+
+ enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
+ ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
+ priority, tc, type);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static enum _ecore_status_t
+ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
+{
+ struct dcbx_app_priority_feature *p_app;
+ struct dcbx_app_priority_entry *p_tbl;
+ struct ecore_dcbx_results data = { 0 };
+ struct dcbx_ets_feature *p_ets;
+ struct ecore_hw_info *p_info;
+ u32 pri_tc_tbl, flags;
+ u8 dcbx_version;
+ int num_entries;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+ dcbx_version = GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION);
+
+ p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+ p_tbl = p_app->app_pri_tbl;
+
+ p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+ pri_tc_tbl = p_ets->pri_tc_tbl[0];
+
+ p_info = &p_hwfn->hw_info;
+ num_entries = GET_MFW_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+
+ rc = ecore_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+ num_entries, dcbx_version);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_info->num_active_tc = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ p_hwfn->qm_info.ooo_tc = GET_MFW_FIELD(p_ets->flags, DCBX_OOO_TC);
+ data.pf_id = p_hwfn->rel_pf_id;
+ data.dcbx_enabled = !!dcbx_version;
+
+ ecore_dcbx_dp_protocol(p_hwfn, &data);
+
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->results, &data,
+ sizeof(struct ecore_dcbx_results));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_mib_meta_data *p_data,
+ enum ecore_mib_read_type type)
+{
+ u32 prefix_seq_num, suffix_seq_num;
+ int read_count = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* The data is considered to be valid only if both sequence numbers are
+ * the same.
+ */
+ do {
+ if (type == ECORE_DCBX_REMOTE_LLDP_MIB) {
+ ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
+ suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+ } else if (type == ECORE_DCBX_LLDP_TLVS) {
+ ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_tlvs,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->lldp_tlvs->prefix_seq_num;
+ suffix_seq_num = p_data->lldp_tlvs->suffix_seq_num;
+
+ } else {
+ ecore_memcpy_from(p_hwfn, p_ptt, p_data->mib,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->mib->prefix_seq_num;
+ suffix_seq_num = p_data->mib->suffix_seq_num;
+ }
+ read_count++;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "mib type = %d, try count = %d prefix seq num ="
+ " %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (read_count < ECORE_DCBX_MAX_MIB_READ_TRY));
+
+ if (read_count >= ECORE_DCBX_MAX_MIB_READ_TRY) {
+ DP_ERR(p_hwfn,
+ "MIB read err, mib type = %d, try count ="
+ " %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ rc = ECORE_IO;
+ }
+
+ return rc;
+}
+
+static void
+ecore_dcbx_get_priority_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_app_prio *p_prio,
+ struct ecore_dcbx_results *p_results)
+{
+ u8 val;
+
+ if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
+ p_results->arr[DCBX_PROTOCOL_ETH].enable)
+ p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Priorities: eth %d\n",
+ p_prio->eth);
+}
+
+static void
+ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct ecore_dcbx_params *p_params, bool ieee)
+{
+ struct ecore_app_entry *entry;
+ u8 pri_map;
+ int i;
+
+ p_params->app_willing = GET_MFW_FIELD(p_app->flags, DCBX_APP_WILLING);
+ p_params->app_valid = GET_MFW_FIELD(p_app->flags, DCBX_APP_ENABLED);
+ p_params->app_error = GET_MFW_FIELD(p_app->flags, DCBX_APP_ERROR);
+ p_params->num_app_entries = GET_MFW_FIELD(p_app->flags,
+ DCBX_APP_NUM_ENTRIES);
+ for (i = 0; i < p_params->num_app_entries; i++) {
+ entry = &p_params->app_entry[i];
+ if (ieee) {
+ u8 sf_ieee;
+ u32 val;
+
+ sf_ieee = GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF_IEEE);
+ switch (sf_ieee) {
+ case DCBX_APP_SF_IEEE_RESERVED:
+ /* Old MFW */
+ val = GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF);
+ entry->sf_ieee = val ?
+ ECORE_DCBX_SF_IEEE_TCP_UDP_PORT :
+ ECORE_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_ETHTYPE:
+ entry->sf_ieee = ECORE_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_PORT:
+ entry->sf_ieee = ECORE_DCBX_SF_IEEE_TCP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_UDP_PORT:
+ entry->sf_ieee = ECORE_DCBX_SF_IEEE_UDP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
+ entry->sf_ieee =
+ ECORE_DCBX_SF_IEEE_TCP_UDP_PORT;
+ break;
+ }
+ } else {
+ entry->ethtype = !(GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF));
+ }
+
+ pri_map = GET_MFW_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
+ ecore_dcbx_get_app_priority(pri_map, &entry->prio);
+ entry->proto_id = GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ entry->proto_id,
+ &entry->proto_type, ieee);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "APP params: willing %d, valid %d error = %d\n",
+ p_params->app_willing, p_params->app_valid,
+ p_params->app_error);
+}
+
+static void
+ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
+ u32 pfc, struct ecore_dcbx_params *p_params)
+{
+ u8 pfc_map;
+
+ p_params->pfc.willing = GET_MFW_FIELD(pfc, DCBX_PFC_WILLING);
+ p_params->pfc.max_tc = GET_MFW_FIELD(pfc, DCBX_PFC_CAPS);
+ p_params->pfc.enabled = GET_MFW_FIELD(pfc, DCBX_PFC_ENABLED);
+ pfc_map = GET_MFW_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
+ p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
+ p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
+ p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
+ p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3);
+ p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4);
+ p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5);
+ p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6);
+ p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n",
+ p_params->pfc.willing, pfc_map, p_params->pfc.max_tc,
+ p_params->pfc.enabled);
+}
+
+static void
+ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct ecore_dcbx_params *p_params)
+{
+ u32 bw_map[2], tsa_map[2], pri_map;
+ int i;
+
+ p_params->ets_willing = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_WILLING);
+ p_params->ets_enabled = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_ENABLED);
+ p_params->ets_cbs = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_CBS);
+ p_params->max_ets_tc = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing, p_params->ets_enabled,
+ p_params->ets_cbs, p_ets->pri_tc_tbl[0],
+ p_params->max_ets_tc);
+
+ /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
+ * encoded in a type u32 array of size 2.
+ */
+ bw_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[0]);
+ bw_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[1]);
+ tsa_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[0]);
+ tsa_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[1]);
+ pri_map = p_ets->pri_tc_tbl[0];
+ for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
+ p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
+ p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
+ p_params->ets_pri_tc_tbl[i] = ECORE_DCBX_PRIO2TC(pri_map, i);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "elem %d bw_tbl %x tsa_tbl %x\n",
+ i, p_params->ets_tc_bw_tbl[i],
+ p_params->ets_tc_tsa_tbl[i]);
+ }
+}
+
+static void
+ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct dcbx_ets_feature *p_ets,
+ u32 pfc, struct ecore_dcbx_params *p_params,
+ bool ieee)
+{
+ ecore_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
+ ecore_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
+ ecore_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
+}
+
+static void
+ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->local.params, false);
+ params->local.valid = true;
+}
+
+static void
+ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->remote.features;
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->remote.params,
+ false);
+ params->remote.valid = true;
+}
+
+static void ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
+{
+ struct ecore_dcbx_dscp_params *p_dscp;
+ struct dcb_dscp_map *p_dscp_map;
+ int i, j, entry;
+ u32 pri_map;
+
+ p_dscp = &params->dscp;
+ p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
+ p_dscp->enabled = GET_MFW_FIELD(p_dscp_map->flags, DCB_DSCP_ENABLE);
+
+ /* MFW encodes 64 dscp entries into 8 element array of u32 entries,
+ * where each entry holds the 4bit priority map for 8 dscp entries.
+ */
+ for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
+ pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
+ entry, pri_map);
+ for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
+ p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
+ (j * 4)) & 0xf;
+ }
+}
+
+static void
+ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
+{
+ struct ecore_dcbx_operational_params *p_operational;
+ struct ecore_dcbx_results *p_results;
+ struct dcbx_features *p_feat;
+ bool enabled, err;
+ u32 flags;
+ bool val;
+
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+
+ /* If DCBx version is non zero, then negotiation
+ * was successfuly performed
+ */
+ p_operational = &params->operational;
+ enabled = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) !=
+ DCBX_CONFIG_VERSION_DISABLED);
+ if (!enabled) {
+ p_operational->enabled = enabled;
+ p_operational->valid = false;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx is disabled\n");
+ return;
+ }
+
+ p_feat = &p_hwfn->p_dcbx_info->operational.features;
+ p_results = &p_hwfn->p_dcbx_info->results;
+
+ val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_IEEE);
+ p_operational->ieee = val;
+
+ val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_CEE);
+ p_operational->cee = val;
+
+ val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_STATIC);
+ p_operational->local = val;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Version support: ieee %d, cee %d, static %d\n",
+ p_operational->ieee, p_operational->cee,
+ p_operational->local);
+
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->operational.params,
+ p_operational->ieee);
+ ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio,
+ p_results);
+ err = GET_MFW_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
+ p_operational->err = err;
+ p_operational->enabled = enabled;
+ p_operational->valid = true;
+}
+
+static void ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
+{
+ struct lldp_config_params_s *p_local;
+
+ p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
+
+ OSAL_MEMCPY(params->lldp_local.local_chassis_id,
+ p_local->local_chassis_id,
+ sizeof(params->lldp_local.local_chassis_id));
+ OSAL_MEMCPY(params->lldp_local.local_port_id, p_local->local_port_id,
+ sizeof(params->lldp_local.local_port_id));
+}
+
+static void ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
+{
+ struct lldp_status_params_s *p_remote;
+
+ p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
+
+ OSAL_MEMCPY(params->lldp_remote.peer_chassis_id,
+ p_remote->peer_chassis_id,
+ sizeof(params->lldp_remote.peer_chassis_id));
+ OSAL_MEMCPY(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+ sizeof(params->lldp_remote.peer_port_id));
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *p_params,
+ enum ecore_mib_read_type type)
+{
+ switch (type) {
+ case ECORE_DCBX_REMOTE_MIB:
+ ecore_dcbx_get_remote_params(p_hwfn, p_params);
+ break;
+ case ECORE_DCBX_LOCAL_MIB:
+ ecore_dcbx_get_local_params(p_hwfn, p_params);
+ break;
+ case ECORE_DCBX_OPERATIONAL_MIB:
+ ecore_dcbx_get_operational_params(p_hwfn, p_params);
+ break;
+ case ECORE_DCBX_REMOTE_LLDP_MIB:
+ ecore_dcbx_get_remote_lldp_params(p_hwfn, p_params);
+ break;
+ case ECORE_DCBX_LOCAL_LLDP_MIB:
+ ecore_dcbx_get_local_lldp_params(p_hwfn, p_params);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_local_lldp_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_config_params);
+ data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
+ data.size = sizeof(struct lldp_config_params_s);
+ ecore_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_remote_lldp_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_status_params);
+ data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote;
+ data.size = sizeof(struct lldp_status_params_s);
+ rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_operational_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, operational_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->operational;
+ data.size = sizeof(struct dcbx_mib);
+ rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_remote_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, remote_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->remote;
+ data.size = sizeof(struct dcbx_mib);
+ rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_local_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ ecore_memcpy_from(p_hwfn, p_ptt, data.local_admin,
+ data.addr, data.size);
+
+ return rc;
+}
+
+static void
+ecore_dcbx_read_dscp_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_dcbx_mib_meta_data data;
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, dcb_dscp_map);
+ data.dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
+ data.size = sizeof(struct dcb_dscp_map);
+ ecore_memcpy_from(p_hwfn, p_ptt, data.dscp_map, data.addr, data.size);
+}
+
+static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ enum _ecore_status_t rc = ECORE_INVAL;
+
+ switch (type) {
+ case ECORE_DCBX_OPERATIONAL_MIB:
+ ecore_dcbx_read_dscp_mib(p_hwfn, p_ptt);
+ rc = ecore_dcbx_read_operational_mib(p_hwfn, p_ptt, type);
+ break;
+ case ECORE_DCBX_REMOTE_MIB:
+ rc = ecore_dcbx_read_remote_mib(p_hwfn, p_ptt, type);
+ break;
+ case ECORE_DCBX_LOCAL_MIB:
+ rc = ecore_dcbx_read_local_mib(p_hwfn, p_ptt);
+ break;
+ case ECORE_DCBX_REMOTE_LLDP_MIB:
+ rc = ecore_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type);
+ break;
+ case ECORE_DCBX_LOCAL_LLDP_MIB:
+ rc = ecore_dcbx_read_local_lldp_mib(p_hwfn, p_ptt);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/*
+ * Read updated MIB.
+ * Reconfigure QM and invoke PF update ramrod command if operational MIB
+ * change is detected.
+ */
+enum _ecore_status_t
+ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ return rc;
+
+ if (type == ECORE_DCBX_OPERATIONAL_MIB) {
+ ecore_dcbx_get_dscp_params(p_hwfn, &p_hwfn->p_dcbx_info->get);
+
+ rc = ecore_dcbx_process_mib_info(p_hwfn);
+ if (!rc) {
+ /* reconfigure tcs of QM queues according
+ * to negotiation results
+ */
+ ecore_qm_reconf(p_hwfn, p_ptt);
+
+ /* update storm FW with negotiation results */
+ ecore_sp_pf_update_dcbx(p_hwfn);
+ }
+ }
+
+ ecore_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
+
+ /* Update the DSCP to TC mapping bit if required */
+ if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
+ p_hwfn->p_dcbx_info->dscp_nig_update) {
+ u8 val = !!p_hwfn->p_dcbx_info->get.dscp.enabled;
+
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE, val);
+ p_hwfn->p_dcbx_info->dscp_nig_update = false;
+ }
+
+ OSAL_DCBX_AEN(p_hwfn, type);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
+{
+ p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*p_hwfn->p_dcbx_info));
+ if (!p_hwfn->p_dcbx_info) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate `struct ecore_dcbx_info'");
+ return ECORE_NOMEM;
+ }
+
+ p_hwfn->p_dcbx_info->iwarp_port =
+ p_hwfn->pf_params.rdma_pf_params.iwarp_port;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_dcbx_info);
+}
+
+static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
+ struct ecore_dcbx_results *p_src,
+ enum dcbx_protocol_type type)
+{
+ p_data->dcb_enable_flag = p_src->arr[type].enable;
+ p_data->dcb_priority = p_src->arr[type].priority;
+ p_data->dcb_tc = p_src->arr[type].tc;
+ p_data->dscp_enable_flag = p_src->arr[type].dscp_enable;
+ p_data->dscp_val = p_src->arr[type].dscp_val;
+ p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
+}
+
+/* Set pf update ramrod command params */
+void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest)
+{
+ struct protocol_dcb_data *p_dcb_data;
+ u8 update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
+ p_dest->update_eth_dcb_data_mode = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_IWARP].update;
+ p_dest->update_iwarp_dcb_data_mode = update_flag;
+
+ p_dcb_data = &p_dest->eth_dcb_data;
+ ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+ p_dcb_data = &p_dest->iwarp_dcb_data;
+ ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_IWARP);
+}
+
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *p_get,
+ enum ecore_mib_read_type type)
+{
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_TIMEOUT;
+
+ rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ ecore_dcbx_get_dscp_params(p_hwfn, p_get);
+
+ rc = ecore_dcbx_get_params(p_hwfn, p_get, type);
+
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+static void
+ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
+ u32 *pfc, struct ecore_dcbx_params *p_params)
+{
+ u8 pfc_map = 0;
+ int i;
+
+ if (p_params->pfc.willing)
+ *pfc |= DCBX_PFC_WILLING_MASK;
+ else
+ *pfc &= ~DCBX_PFC_WILLING_MASK;
+
+ if (p_params->pfc.enabled)
+ *pfc |= DCBX_PFC_ENABLED_MASK;
+ else
+ *pfc &= ~DCBX_PFC_ENABLED_MASK;
+
+ *pfc &= ~DCBX_PFC_CAPS_MASK;
+ *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_OFFSET;
+
+ for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
+ if (p_params->pfc.prio[i])
+ pfc_map |= (1 << i);
+ *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
+ *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_OFFSET);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc);
+}
+
+static void
+ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct ecore_dcbx_params *p_params)
+{
+ u8 *bw_map, *tsa_map;
+ u32 val;
+ int i;
+
+ if (p_params->ets_willing)
+ p_ets->flags |= DCBX_ETS_WILLING_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_WILLING_MASK;
+
+ if (p_params->ets_cbs)
+ p_ets->flags |= DCBX_ETS_CBS_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_CBS_MASK;
+
+ if (p_params->ets_enabled)
+ p_ets->flags |= DCBX_ETS_ENABLED_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
+
+ p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
+ p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_OFFSET;
+
+ bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
+ tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
+ p_ets->pri_tc_tbl[0] = 0;
+ for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
+ bw_map[i] = p_params->ets_tc_bw_tbl[i];
+ tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
+ /* Copy the priority value to the corresponding 4 bits in the
+ * traffic class table.
+ */
+ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
+ p_ets->pri_tc_tbl[0] |= val;
+ }
+ for (i = 0; i < 2; i++) {
+ p_ets->tc_bw_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_bw_tbl[i]);
+ p_ets->tc_tsa_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_tsa_tbl[i]);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "flags = 0x%x pri_tc = 0x%x tc_bwl[] = {0x%x, 0x%x} tc_tsa = {0x%x, 0x%x}\n",
+ p_ets->flags, p_ets->pri_tc_tbl[0], p_ets->tc_bw_tbl[0],
+ p_ets->tc_bw_tbl[1], p_ets->tc_tsa_tbl[0],
+ p_ets->tc_tsa_tbl[1]);
+}
+
+static void
+ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct ecore_dcbx_params *p_params, bool ieee)
+{
+ u32 *entry;
+ int i;
+
+ if (p_params->app_willing)
+ p_app->flags |= DCBX_APP_WILLING_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_WILLING_MASK;
+
+ if (p_params->app_valid)
+ p_app->flags |= DCBX_APP_ENABLED_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_ENABLED_MASK;
+
+ p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
+ p_app->flags |= (u32)p_params->num_app_entries <<
+ DCBX_APP_NUM_ENTRIES_OFFSET;
+
+ for (i = 0; i < p_params->num_app_entries; i++) {
+ entry = &p_app->app_pri_tbl[i].entry;
+ *entry = 0;
+ if (ieee) {
+ *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
+ switch (p_params->app_entry[i].sf_ieee) {
+ case ECORE_DCBX_SF_IEEE_ETHTYPE:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
+ DCBX_APP_SF_IEEE_OFFSET);
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_OFFSET);
+ break;
+ case ECORE_DCBX_SF_IEEE_TCP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
+ DCBX_APP_SF_IEEE_OFFSET);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_OFFSET);
+ break;
+ case ECORE_DCBX_SF_IEEE_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
+ DCBX_APP_SF_IEEE_OFFSET);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_OFFSET);
+ break;
+ case ECORE_DCBX_SF_IEEE_TCP_UDP_PORT:
+ *entry |= (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
+ DCBX_APP_SF_IEEE_OFFSET;
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_OFFSET);
+ break;
+ }
+ } else {
+ *entry &= ~DCBX_APP_SF_MASK;
+ if (p_params->app_entry[i].ethtype)
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_OFFSET);
+ else
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_OFFSET);
+ }
+ *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
+ *entry |= ((u32)p_params->app_entry[i].proto_id <<
+ DCBX_APP_PROTOCOL_ID_OFFSET);
+ *entry &= ~DCBX_APP_PRI_MAP_MASK;
+ *entry |= ((u32)(p_params->app_entry[i].prio) <<
+ DCBX_APP_PRI_MAP_OFFSET);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags);
+}
+
+static void
+ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
+ struct dcbx_local_params *local_admin,
+ struct ecore_dcbx_set *params)
+{
+ bool ieee = false;
+
+ local_admin->flags = 0;
+ OSAL_MEMCPY(&local_admin->features,
+ &p_hwfn->p_dcbx_info->operational.features,
+ sizeof(local_admin->features));
+
+ if (params->enabled) {
+ local_admin->config = params->ver_num;
+ ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
+ } else {
+ local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx version = %d\n",
+ local_admin->config);
+
+ if (params->override_flags & ECORE_DCBX_OVERRIDE_PFC_CFG)
+ ecore_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
+ &params->config.params);
+
+ if (params->override_flags & ECORE_DCBX_OVERRIDE_ETS_CFG)
+ ecore_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets,
+ &params->config.params);
+
+ if (params->override_flags & ECORE_DCBX_OVERRIDE_APP_CFG)
+ ecore_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
+ &params->config.params, ieee);
+}
+
+static enum _ecore_status_t
+ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
+ struct dcb_dscp_map *p_dscp_map,
+ struct ecore_dcbx_set *p_params)
+{
+ int entry, i, j;
+ u32 val;
+
+ OSAL_MEMCPY(p_dscp_map, &p_hwfn->p_dcbx_info->dscp_map,
+ sizeof(*p_dscp_map));
+
+ p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK;
+ if (p_params->dscp.enabled)
+ p_dscp_map->flags |= DCB_DSCP_ENABLE_MASK;
+
+ for (i = 0, entry = 0; i < 8; i++) {
+ val = 0;
+ for (j = 0; j < 8; j++, entry++)
+ val |= (((u32)p_params->dscp.dscp_pri_map[entry]) <<
+ (j * 4));
+
+ p_dscp_map->dscp_pri_map[i] = OSAL_CPU_TO_BE32(val);
+ }
+
+ p_hwfn->p_dcbx_info->dscp_nig_update = true;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_dscp_map->flags);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "pri_map[] = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ p_dscp_map->dscp_pri_map[0], p_dscp_map->dscp_pri_map[1],
+ p_dscp_map->dscp_pri_map[2], p_dscp_map->dscp_pri_map[3],
+ p_dscp_map->dscp_pri_map[4], p_dscp_map->dscp_pri_map[5],
+ p_dscp_map->dscp_pri_map[6], p_dscp_map->dscp_pri_map[7]);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_set *params,
+ bool hw_commit)
+{
+ struct dcbx_local_params local_admin;
+ struct ecore_dcbx_mib_meta_data data;
+ struct dcb_dscp_map dscp_map;
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
+ sizeof(p_hwfn->p_dcbx_info->set));
+ if (!hw_commit)
+ return ECORE_SUCCESS;
+
+ OSAL_MEMSET(&local_admin, 0, sizeof(local_admin));
+ ecore_dcbx_set_local_params(p_hwfn, &local_admin, params);
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ ecore_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size);
+
+ if (params->override_flags & ECORE_DCBX_OVERRIDE_DSCP_CFG) {
+ OSAL_MEMSET(&dscp_map, 0, sizeof(dscp_map));
+ ecore_dcbx_set_dscp_params(p_hwfn, &dscp_map, params);
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, dcb_dscp_map);
+ data.dscp_map = &dscp_map;
+ data.size = sizeof(struct dcb_dscp_map);
+ ecore_memcpy_to(p_hwfn, p_ptt, data.addr, data.dscp_map,
+ data.size);
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_LLDP_SEND_OFFSET, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send DCBX update request\n");
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_set *params)
+{
+ struct ecore_dcbx_get *dcbx_info;
+ int rc;
+
+ if (p_hwfn->p_dcbx_info->set.config.valid) {
+ OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
+ sizeof(struct ecore_dcbx_set));
+ return ECORE_SUCCESS;
+ }
+
+ dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*dcbx_info));
+ if (!dcbx_info)
+ return ECORE_NOMEM;
+
+ OSAL_MEMSET(dcbx_info, 0, sizeof(*dcbx_info));
+ rc = ecore_dcbx_query_params(p_hwfn, dcbx_info,
+ ECORE_DCBX_OPERATIONAL_MIB);
+ if (rc) {
+ OSAL_FREE(p_hwfn->p_dev, dcbx_info);
+ return rc;
+ }
+ p_hwfn->p_dcbx_info->set.override_flags = 0;
+
+ p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED;
+ if (dcbx_info->operational.cee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ if (dcbx_info->operational.ieee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+ if (dcbx_info->operational.local)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
+
+ p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.dscp,
+ &p_hwfn->p_dcbx_info->get.dscp,
+ sizeof(struct ecore_dcbx_dscp_params));
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.config.params,
+ &dcbx_info->operational.params,
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
+ p_hwfn->p_dcbx_info->set.config.valid = true;
+
+ OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
+ sizeof(struct ecore_dcbx_set));
+
+ OSAL_FREE(p_hwfn->p_dev, dcbx_info);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_lldp_register_tlv(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_lldp_agent agent,
+ u8 tlv_type)
+{
+ u32 mb_param = 0, mcp_resp = 0, mcp_param = 0, val = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ switch (agent) {
+ case ECORE_LLDP_NEAREST_BRIDGE:
+ val = LLDP_NEAREST_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
+ val = LLDP_NEAREST_NON_TPMR_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
+ val = LLDP_NEAREST_CUSTOMER_BRIDGE;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid agent type %d\n", agent);
+ return ECORE_INVAL;
+ }
+
+ SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_AGENT, val);
+ SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_TLV_RX_TYPE, tlv_type);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_REGISTER_LLDP_TLVS_RX,
+ mb_param, &mcp_resp, &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "Failed to register TLV\n");
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_lldp_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct lldp_received_tlvs_s tlvs;
+ int i;
+
+ for (i = 0; i < LLDP_MAX_LLDP_AGENTS; i++) {
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, lldp_received_tlvs[i]);
+ data.lldp_tlvs = &tlvs;
+ data.size = sizeof(tlvs);
+ rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data,
+ ECORE_DCBX_LLDP_TLVS);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false, "Failed to read lldp TLVs\n");
+ return rc;
+ }
+
+ if (!tlvs.length)
+ continue;
+
+ for (i = 0; i < MAX_TLV_BUFFER; i++)
+ tlvs.tlvs_buffer[i] =
+ OSAL_CPU_TO_BE32(tlvs.tlvs_buffer[i]);
+
+ OSAL_LLDP_RX_TLVS(p_hwfn, tlvs.tlvs_buffer, tlvs.length);
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_lldp_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_config_params *p_params)
+{
+ struct lldp_config_params_s lldp_params;
+ u32 addr, val;
+ int i;
+
+ switch (p_params->agent) {
+ case ECORE_LLDP_NEAREST_BRIDGE:
+ val = LLDP_NEAREST_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
+ val = LLDP_NEAREST_NON_TPMR_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
+ val = LLDP_NEAREST_CUSTOMER_BRIDGE;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent);
+ return ECORE_INVAL;
+ }
+
+ addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, lldp_config_params[val]);
+
+ ecore_memcpy_from(p_hwfn, p_ptt, &lldp_params, addr,
+ sizeof(lldp_params));
+
+ p_params->tx_interval = GET_MFW_FIELD(lldp_params.config,
+ LLDP_CONFIG_TX_INTERVAL);
+ p_params->tx_hold = GET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_HOLD);
+ p_params->tx_credit = GET_MFW_FIELD(lldp_params.config,
+ LLDP_CONFIG_MAX_CREDIT);
+ p_params->rx_enable = GET_MFW_FIELD(lldp_params.config,
+ LLDP_CONFIG_ENABLE_RX);
+ p_params->tx_enable = GET_MFW_FIELD(lldp_params.config,
+ LLDP_CONFIG_ENABLE_TX);
+
+ OSAL_MEMCPY(p_params->chassis_id_tlv, lldp_params.local_chassis_id,
+ sizeof(p_params->chassis_id_tlv));
+ for (i = 0; i < ECORE_LLDP_CHASSIS_ID_STAT_LEN; i++)
+ p_params->chassis_id_tlv[i] =
+ OSAL_BE32_TO_CPU(p_params->chassis_id_tlv[i]);
+
+ OSAL_MEMCPY(p_params->port_id_tlv, lldp_params.local_port_id,
+ sizeof(p_params->port_id_tlv));
+ for (i = 0; i < ECORE_LLDP_PORT_ID_STAT_LEN; i++)
+ p_params->port_id_tlv[i] =
+ OSAL_BE32_TO_CPU(p_params->port_id_tlv[i]);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_lldp_set_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_config_params *p_params)
+{
+ u32 mb_param = 0, mcp_resp = 0, mcp_param = 0;
+ struct lldp_config_params_s lldp_params;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 addr, val;
+ int i;
+
+ switch (p_params->agent) {
+ case ECORE_LLDP_NEAREST_BRIDGE:
+ val = LLDP_NEAREST_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
+ val = LLDP_NEAREST_NON_TPMR_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
+ val = LLDP_NEAREST_CUSTOMER_BRIDGE;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent);
+ return ECORE_INVAL;
+ }
+
+ SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_AGENT, val);
+ addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, lldp_config_params[val]);
+
+ OSAL_MEMSET(&lldp_params, 0, sizeof(lldp_params));
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_TX_INTERVAL,
+ p_params->tx_interval);
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_HOLD, p_params->tx_hold);
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_MAX_CREDIT,
+ p_params->tx_credit);
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_ENABLE_RX,
+ !!p_params->rx_enable);
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_ENABLE_TX,
+ !!p_params->tx_enable);
+
+ for (i = 0; i < ECORE_LLDP_CHASSIS_ID_STAT_LEN; i++)
+ p_params->chassis_id_tlv[i] =
+ OSAL_CPU_TO_BE32(p_params->chassis_id_tlv[i]);
+ OSAL_MEMCPY(lldp_params.local_chassis_id, p_params->chassis_id_tlv,
+ sizeof(lldp_params.local_chassis_id));
+
+ for (i = 0; i < ECORE_LLDP_PORT_ID_STAT_LEN; i++)
+ p_params->port_id_tlv[i] =
+ OSAL_CPU_TO_BE32(p_params->port_id_tlv[i]);
+ OSAL_MEMCPY(lldp_params.local_port_id, p_params->port_id_tlv,
+ sizeof(lldp_params.local_port_id));
+
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &lldp_params, sizeof(lldp_params));
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LLDP,
+ mb_param, &mcp_resp, &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "SET_LLDP failed, error = %d\n", rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_sys_tlvs *p_params)
+{
+ u32 mb_param = 0, mcp_resp = 0, mcp_param = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct lldp_system_tlvs_buffer_s lld_tlv_buf;
+ u32 addr, *p_val;
+ u8 len;
+ int i;
+
+ p_val = (u32 *)p_params->buf;
+ for (i = 0; i < ECORE_LLDP_SYS_TLV_SIZE / 4; i++)
+ p_val[i] = OSAL_CPU_TO_BE32(p_val[i]);
+
+ OSAL_MEMSET(&lld_tlv_buf, 0, sizeof(lld_tlv_buf));
+ SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_VALID, 1);
+ SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_MANDATORY,
+ !!p_params->discard_mandatory_tlv);
+ SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_LENGTH,
+ p_params->buf_size);
+ len = ECORE_LLDP_SYS_TLV_SIZE / 2;
+ OSAL_MEMCPY(lld_tlv_buf.data, p_params->buf, len);
+
+ addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, system_lldp_tlvs_buf);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &lld_tlv_buf, sizeof(lld_tlv_buf));
+
+ if (p_params->buf_size > len) {
+ addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, system_lldp_tlvs_buf2);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &p_params->buf[len],
+ ECORE_LLDP_SYS_TLV_SIZE / 2);
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LLDP,
+ mb_param, &mcp_resp, &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "SET_LLDP failed, error = %d\n", rc);
+
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.h
new file mode 100644
index 00000000..519e6cea
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_DCBX_H__
+#define __ECORE_DCBX_H__
+
+#include "ecore.h"
+#include "ecore_mcp.h"
+#include "mcp_public.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_hsi_common.h"
+#include "ecore_dcbx_api.h"
+
+#define ECORE_DCBX_DSCP_DISABLED 0XFF
+
+struct ecore_dcbx_info {
+ struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
+ struct dcbx_local_params local_admin;
+ struct ecore_dcbx_results results;
+ struct dcb_dscp_map dscp_map;
+ bool dscp_nig_update;
+ struct dcbx_mib operational;
+ struct dcbx_mib remote;
+ struct ecore_dcbx_set set;
+ struct ecore_dcbx_get get;
+ u8 dcbx_cap;
+ u16 iwarp_port;
+};
+
+struct ecore_dcbx_mib_meta_data {
+ struct lldp_config_params_s *lldp_local;
+ struct lldp_status_params_s *lldp_remote;
+ struct lldp_received_tlvs_s *lldp_tlvs;
+ struct dcbx_local_params *local_admin;
+ struct dcb_dscp_map *dscp_map;
+ struct dcbx_mib *mib;
+ osal_size_t size;
+ u32 addr;
+};
+
+/* ECORE local interface routines */
+enum _ecore_status_t
+ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *,
+ enum ecore_mib_read_type);
+
+enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
+void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn);
+void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest);
+
+/* Returns TOS value for a given priority */
+u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri);
+
+enum _ecore_status_t
+ecore_lldp_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+#endif /* __ECORE_DCBX_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx_api.h
new file mode 100644
index 00000000..eaf8e082
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx_api.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_DCBX_API_H__
+#define __ECORE_DCBX_API_H__
+
+#include "ecore_status.h"
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
+
+enum ecore_mib_read_type {
+ ECORE_DCBX_OPERATIONAL_MIB,
+ ECORE_DCBX_REMOTE_MIB,
+ ECORE_DCBX_LOCAL_MIB,
+ ECORE_DCBX_REMOTE_LLDP_MIB,
+ ECORE_DCBX_LOCAL_LLDP_MIB,
+ ECORE_DCBX_LLDP_TLVS
+};
+
+struct ecore_dcbx_app_data {
+ bool enable; /* DCB enabled */
+ u8 update; /* Update indication */
+ u8 priority; /* Priority */
+ u8 tc; /* Traffic Class */
+ bool dscp_enable; /* DSCP enabled */
+ u8 dscp_val; /* DSCP value */
+ bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */
+};
+
+#ifndef __EXTRACT__LINUX__
+enum dcbx_protocol_type {
+ DCBX_PROTOCOL_ISCSI,
+ DCBX_PROTOCOL_FCOE,
+ DCBX_PROTOCOL_ROCE,
+ DCBX_PROTOCOL_ROCE_V2,
+ DCBX_PROTOCOL_ETH,
+ DCBX_PROTOCOL_IWARP,
+ DCBX_MAX_PROTOCOL_TYPE
+};
+
+#define ECORE_LLDP_CHASSIS_ID_STAT_LEN 4
+#define ECORE_LLDP_PORT_ID_STAT_LEN 4
+#define ECORE_DCBX_MAX_APP_PROTOCOL 32
+#define ECORE_MAX_PFC_PRIORITIES 8
+#define ECORE_DCBX_DSCP_SIZE 64
+
+struct ecore_dcbx_lldp_remote {
+ u32 peer_chassis_id[ECORE_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[ECORE_LLDP_PORT_ID_STAT_LEN];
+ bool enable_rx;
+ bool enable_tx;
+ u32 tx_interval;
+ u32 max_credit;
+};
+
+struct ecore_dcbx_lldp_local {
+ u32 local_chassis_id[ECORE_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[ECORE_LLDP_PORT_ID_STAT_LEN];
+};
+
+struct ecore_dcbx_app_prio {
+ u8 roce;
+ u8 roce_v2;
+ u8 fcoe;
+ u8 iscsi;
+ u8 eth;
+};
+
+struct ecore_dbcx_pfc_params {
+ bool willing;
+ bool enabled;
+ u8 prio[ECORE_MAX_PFC_PRIORITIES];
+ u8 max_tc;
+};
+
+enum ecore_dcbx_sf_ieee_type {
+ ECORE_DCBX_SF_IEEE_ETHTYPE,
+ ECORE_DCBX_SF_IEEE_TCP_PORT,
+ ECORE_DCBX_SF_IEEE_UDP_PORT,
+ ECORE_DCBX_SF_IEEE_TCP_UDP_PORT
+};
+
+struct ecore_app_entry {
+ bool ethtype;
+ enum ecore_dcbx_sf_ieee_type sf_ieee;
+ bool enabled;
+ u8 prio;
+ u16 proto_id;
+ enum dcbx_protocol_type proto_type;
+};
+
+struct ecore_dcbx_params {
+ struct ecore_app_entry app_entry[ECORE_DCBX_MAX_APP_PROTOCOL];
+ u16 num_app_entries;
+ bool app_willing;
+ bool app_valid;
+ bool app_error;
+ bool ets_willing;
+ bool ets_enabled;
+ bool ets_cbs;
+ u8 ets_pri_tc_tbl[ECORE_MAX_PFC_PRIORITIES];
+ u8 ets_tc_bw_tbl[ECORE_MAX_PFC_PRIORITIES];
+ u8 ets_tc_tsa_tbl[ECORE_MAX_PFC_PRIORITIES];
+ struct ecore_dbcx_pfc_params pfc;
+ u8 max_ets_tc;
+};
+
+struct ecore_dcbx_admin_params {
+ struct ecore_dcbx_params params;
+ bool valid; /* Indicate validity of params */
+};
+
+struct ecore_dcbx_remote_params {
+ struct ecore_dcbx_params params;
+ bool valid; /* Indicate validity of params */
+};
+
+struct ecore_dcbx_operational_params {
+ struct ecore_dcbx_app_prio app_prio;
+ struct ecore_dcbx_params params;
+ bool valid; /* Indicate validity of params */
+ bool enabled;
+ bool ieee;
+ bool cee;
+ bool local;
+ u32 err;
+};
+
+struct ecore_dcbx_dscp_params {
+ bool enabled;
+ u8 dscp_pri_map[ECORE_DCBX_DSCP_SIZE];
+};
+
+struct ecore_dcbx_get {
+ struct ecore_dcbx_operational_params operational;
+ struct ecore_dcbx_lldp_remote lldp_remote;
+ struct ecore_dcbx_lldp_local lldp_local;
+ struct ecore_dcbx_remote_params remote;
+ struct ecore_dcbx_admin_params local;
+ struct ecore_dcbx_dscp_params dscp;
+};
+#endif
+
+#define ECORE_DCBX_VERSION_DISABLED 0
+#define ECORE_DCBX_VERSION_IEEE 1
+#define ECORE_DCBX_VERSION_CEE 2
+#define ECORE_DCBX_VERSION_DYNAMIC 3
+
+struct ecore_dcbx_set {
+#define ECORE_DCBX_OVERRIDE_STATE (1 << 0)
+#define ECORE_DCBX_OVERRIDE_PFC_CFG (1 << 1)
+#define ECORE_DCBX_OVERRIDE_ETS_CFG (1 << 2)
+#define ECORE_DCBX_OVERRIDE_APP_CFG (1 << 3)
+#define ECORE_DCBX_OVERRIDE_DSCP_CFG (1 << 4)
+ u32 override_flags;
+ bool enabled;
+ struct ecore_dcbx_admin_params config;
+ u32 ver_num;
+ struct ecore_dcbx_dscp_params dscp;
+};
+
+struct ecore_dcbx_results {
+ bool dcbx_enabled;
+ u8 pf_id;
+ struct ecore_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE];
+};
+
+struct ecore_dcbx_app_metadata {
+ enum dcbx_protocol_type id;
+ const char *name; /* @DPDK */
+ enum ecore_pci_personality personality;
+};
+
+enum ecore_lldp_agent {
+ ECORE_LLDP_NEAREST_BRIDGE = 0,
+ ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE,
+ ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE,
+ ECORE_LLDP_MAX_AGENTS
+};
+
+struct ecore_lldp_config_params {
+ enum ecore_lldp_agent agent;
+ u8 tx_interval;
+ u8 tx_hold;
+ u8 tx_credit;
+ bool rx_enable;
+ bool tx_enable;
+ u32 chassis_id_tlv[ECORE_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 port_id_tlv[ECORE_LLDP_PORT_ID_STAT_LEN];
+};
+
+#define ECORE_LLDP_SYS_TLV_SIZE 256
+struct ecore_lldp_sys_tlvs {
+ bool discard_mandatory_tlv;
+ u8 buf[ECORE_LLDP_SYS_TLV_SIZE];
+ u16 buf_size;
+};
+
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *,
+ struct ecore_dcbx_get *,
+ enum ecore_mib_read_type);
+
+enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *,
+ struct ecore_dcbx_set *);
+
+enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *,
+ struct ecore_ptt *,
+ struct ecore_dcbx_set *,
+ bool);
+
+enum _ecore_status_t ecore_lldp_register_tlv(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_lldp_agent agent,
+ u8 tlv_type);
+
+enum _ecore_status_t
+ecore_lldp_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_config_params *p_params);
+
+enum _ecore_status_t
+ecore_lldp_set_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_config_params *p_params);
+
+enum _ecore_status_t
+ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_sys_tlvs *p_params);
+
+static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
+ {DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI},
+ {DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
+ {DCBX_PROTOCOL_ROCE, "ROCE", ECORE_PCI_ETH_ROCE},
+ {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", ECORE_PCI_ETH_ROCE},
+ {DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH},
+ {DCBX_PROTOCOL_IWARP, "IWARP", ECORE_PCI_ETH_IWARP}
+};
+
+#endif /* __ECORE_DCBX_API_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_dev.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_dev.c
new file mode 100644
index 00000000..31f1f3ee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_dev.c
@@ -0,0 +1,5715 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore.h"
+#include "ecore_chain.h"
+#include "ecore_status.h"
+#include "ecore_hw.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_ops.h"
+#include "ecore_int.h"
+#include "ecore_cxt.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_sp_commands.h"
+#include "ecore_dev_api.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
+#include "ecore_mcp.h"
+#include "ecore_hw_defs.h"
+#include "mcp_public.h"
+#include "ecore_iro.h"
+#include "nvm_cfg.h"
+#include "ecore_dcbx.h"
+#include "ecore_l2.h"
+
+/* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
+ * registers involved are not split and thus configuration is a race where
+ * some of the PFs configuration might be lost.
+ * Eventually, this needs to move into a MFW-covered HW-lock as arbitration
+ * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where
+ * there's more than a single compiled ecore component in system].
+ */
+static osal_spinlock_t qm_lock;
+static u32 qm_lock_ref_cnt;
+
+/******************** Doorbell Recovery *******************/
+/* The doorbell recovery mechanism consists of a list of entries which represent
+ * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
+ * entity needs to register with the mechanism and provide the parameters
+ * describing it's doorbell, including a location where last used doorbell data
+ * can be found. The doorbell execute function will traverse the list and
+ * doorbell all of the registered entries.
+ */
+struct ecore_db_recovery_entry {
+ osal_list_entry_t list_entry;
+ void OSAL_IOMEM *db_addr;
+ void *db_data;
+ enum ecore_db_rec_width db_width;
+ enum ecore_db_rec_space db_space;
+ u8 hwfn_idx;
+};
+
+/* display a single doorbell recovery entry */
+void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_db_recovery_entry *db_entry,
+ const char *action)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
+ action, db_entry, db_entry->db_addr, db_entry->db_data,
+ db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
+ db_entry->db_space == DB_REC_USER ? "user" : "kernel",
+ db_entry->hwfn_idx);
+}
+
+/* doorbell address sanity (address within doorbell bar range) */
+bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr,
+ void *db_data)
+{
+ /* make sure doorbell address is within the doorbell bar */
+ if (db_addr < p_dev->doorbells || (u8 *)db_addr >
+ (u8 *)p_dev->doorbells + p_dev->db_size) {
+ OSAL_WARN(true,
+ "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
+ db_addr, p_dev->doorbells,
+ (u8 *)p_dev->doorbells + p_dev->db_size);
+ return false;
+ }
+
+ /* make sure doorbell data pointer is not null */
+ if (!db_data) {
+ OSAL_WARN(true, "Illegal doorbell data pointer: %p", db_data);
+ return false;
+ }
+
+ return true;
+}
+
+/* find hwfn according to the doorbell address */
+struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr)
+{
+ struct ecore_hwfn *p_hwfn;
+
+ /* In CMT doorbell bar is split down the middle between engine 0 and
+ * enigne 1
+ */
+ if (ECORE_IS_CMT(p_dev))
+ p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
+ &p_dev->hwfns[0] : &p_dev->hwfns[1];
+ else
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+
+ return p_hwfn;
+}
+
+/* add a new entry to the doorbell recovery mechanism */
+enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data,
+ enum ecore_db_rec_width db_width,
+ enum ecore_db_rec_space db_space)
+{
+ struct ecore_db_recovery_entry *db_entry;
+ struct ecore_hwfn *p_hwfn;
+
+ /* shortcircuit VFs, for now */
+ if (IS_VF(p_dev)) {
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return ECORE_SUCCESS;
+ }
+
+ /* sanitize doorbell address */
+ if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
+ return ECORE_INVAL;
+
+ /* obtain hwfn from doorbell address */
+ p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
+
+ /* create entry */
+ db_entry = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*db_entry));
+ if (!db_entry) {
+ DP_NOTICE(p_dev, false, "Failed to allocate a db recovery entry\n");
+ return ECORE_NOMEM;
+ }
+
+ /* populate entry */
+ db_entry->db_addr = db_addr;
+ db_entry->db_data = db_data;
+ db_entry->db_width = db_width;
+ db_entry->db_space = db_space;
+ db_entry->hwfn_idx = p_hwfn->my_id;
+
+ /* display */
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_PUSH_TAIL(&db_entry->list_entry,
+ &p_hwfn->db_recovery_info.list);
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+
+ return ECORE_SUCCESS;
+}
+
+/* remove an entry from the doorbell recovery mechanism */
+enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_hwfn *p_hwfn;
+
+ /* shortcircuit VFs, for now */
+ if (IS_VF(p_dev)) {
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return ECORE_SUCCESS;
+ }
+
+ /* sanitize doorbell address */
+ if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
+ return ECORE_INVAL;
+
+ /* obtain hwfn from doorbell address */
+ p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_FOR_EACH_ENTRY(db_entry,
+ &p_hwfn->db_recovery_info.list,
+ list_entry,
+ struct ecore_db_recovery_entry) {
+ /* search according to db_data addr since db_addr is not unique
+ * (roce)
+ */
+ if (db_entry->db_data == db_data) {
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry,
+ "Deleting");
+ OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
+ &p_hwfn->db_recovery_info.list);
+ rc = ECORE_SUCCESS;
+ break;
+ }
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+
+ if (rc == ECORE_INVAL)
+ /*OSAL_WARN(true,*/
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
+ db_data, db_addr);
+ else
+ OSAL_FREE(p_dev, db_entry);
+
+ return rc;
+}
+
+/* initialize the doorbell recovery mechanism */
+enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n");
+
+ /* make sure db_size was set in p_dev */
+ if (!p_hwfn->p_dev->db_size) {
+ DP_ERR(p_hwfn->p_dev, "db_size not set\n");
+ return ECORE_INVAL;
+ }
+
+ OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock))
+ return ECORE_NOMEM;
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock);
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+
+ return ECORE_SUCCESS;
+}
+
+/* destroy the doorbell recovery mechanism */
+void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n");
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
+ DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
+ while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
+ db_entry = OSAL_LIST_FIRST_ENTRY(
+ &p_hwfn->db_recovery_info.list,
+ struct ecore_db_recovery_entry,
+ list_entry);
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
+ OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
+ &p_hwfn->db_recovery_info.list);
+ OSAL_FREE(p_hwfn->p_dev, db_entry);
+ }
+ }
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock);
+#endif
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+}
+
+/* print the content of the doorbell recovery mechanism */
+void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+
+ DP_NOTICE(p_hwfn, false,
+ "Dispalying doorbell recovery database. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_FOR_EACH_ENTRY(db_entry,
+ &p_hwfn->db_recovery_info.list,
+ list_entry,
+ struct ecore_db_recovery_entry) {
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+}
+
+/* ring the doorbell of a single doorbell recovery entry */
+void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
+ struct ecore_db_recovery_entry *db_entry,
+ enum ecore_db_rec_exec db_exec)
+{
+ /* Print according to width */
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %x\n",
+ db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing",
+ db_entry->db_addr, *(u32 *)db_entry->db_data);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %lx\n",
+ db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing",
+ db_entry->db_addr,
+ *(unsigned long *)(db_entry->db_data));
+
+ /* Sanity */
+ if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr,
+ db_entry->db_data))
+ return;
+
+ /* Flush the write combined buffer. Since there are multiple doorbelling
+ * entities using the same address, if we don't flush, a transaction
+ * could be lost.
+ */
+ OSAL_WMB(p_hwfn->p_dev);
+
+ /* Ring the doorbell */
+ if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DIRECT_REG_WR(p_hwfn, db_entry->db_addr,
+ *(u32 *)(db_entry->db_data));
+ else
+ DIRECT_REG_WR64(p_hwfn, db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
+ }
+
+ /* Flush the write combined buffer. Next doorbell may come from a
+ * different entity to the same address...
+ */
+ OSAL_WMB(p_hwfn->p_dev);
+}
+
+/* traverse the doorbell recovery entry list and ring all the doorbells */
+void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
+ enum ecore_db_rec_exec db_exec)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+
+ if (db_exec != DB_REC_ONCE) {
+ DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* track amount of times recovery was executed */
+ p_hwfn->db_recovery_info.db_recovery_counter++;
+ }
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_FOR_EACH_ENTRY(db_entry,
+ &p_hwfn->db_recovery_info.list,
+ list_entry,
+ struct ecore_db_recovery_entry) {
+ ecore_db_recovery_ring(p_hwfn, db_entry, db_exec);
+ if (db_exec == DB_REC_ONCE)
+ break;
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+}
+/******************** Doorbell Recovery end ****************/
+
+/* Configurable */
+#define ECORE_MIN_DPIS (4) /* The minimal num of DPIs required to
+ * load the driver. The number was
+ * arbitrarily set.
+ */
+
+/* Derived */
+#define ECORE_MIN_PWM_REGION (ECORE_WID_SIZE * ECORE_MIN_DPIS)
+
+static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum BAR_ID bar_id)
+{
+ u32 bar_reg = (bar_id == BAR_ID_0 ?
+ PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_hw_bar_size(p_hwfn, bar_id);
+
+ val = ecore_rd(p_hwfn, p_ptt, bar_reg);
+ if (val)
+ return 1 << (val + 15);
+
+ /* The above registers were updated in the past only in CMT mode. Since
+ * they were found to be useful MFW started updating them from 8.7.7.0.
+ * In older MFW versions they are set to 0 which means disabled.
+ */
+ if (ECORE_IS_CMT(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
+ val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+ } else {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
+ val = 512 * 1024;
+ }
+
+ return val;
+}
+
+void ecore_init_dp(struct ecore_dev *p_dev,
+ u32 dp_module, u8 dp_level, void *dp_ctx)
+{
+ u32 i;
+
+ p_dev->dp_level = dp_level;
+ p_dev->dp_module = dp_module;
+ p_dev->dp_ctx = dp_ctx;
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ p_hwfn->dp_level = dp_level;
+ p_hwfn->dp_module = dp_module;
+ p_hwfn->dp_ctx = dp_ctx;
+ }
+}
+
+enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ p_hwfn->p_dev = p_dev;
+ p_hwfn->my_id = i;
+ p_hwfn->b_active = false;
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock))
+ goto handle_err;
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock);
+ }
+
+ /* hwfn 0 is always active */
+ p_dev->hwfns[0].b_active = true;
+
+ /* set the default cache alignment to 128 (may be overridden later) */
+ p_dev->cache_shift = 7;
+ return ECORE_SUCCESS;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+handle_err:
+ while (--i) {
+ struct ecore_hwfn *p_hwfn = OSAL_NULL;
+
+ p_hwfn = &p_dev->hwfns[i];
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
+ }
+ return ECORE_NOMEM;
+#endif
+}
+
+static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
+ OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
+ OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
+ OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
+}
+
+void ecore_resc_free(struct ecore_dev *p_dev)
+{
+ int i;
+
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i)
+ ecore_l2_free(&p_dev->hwfns[i]);
+ return;
+ }
+
+ OSAL_FREE(p_dev, p_dev->fw_data);
+
+ OSAL_FREE(p_dev, p_dev->reset_stats);
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ ecore_cxt_mngr_free(p_hwfn);
+ ecore_qm_info_free(p_hwfn);
+ ecore_spq_free(p_hwfn);
+ ecore_eq_free(p_hwfn);
+ ecore_consq_free(p_hwfn);
+ ecore_int_free(p_hwfn);
+ ecore_iov_free(p_hwfn);
+ ecore_l2_free(p_hwfn);
+ ecore_dmae_info_free(p_hwfn);
+ ecore_dcbx_info_free(p_hwfn);
+ /* @@@TBD Flush work-queue ? */
+
+ /* destroy doorbell recovery mechanism */
+ ecore_db_recovery_teardown(p_hwfn);
+ }
+}
+
+/******************** QM initialization *******************/
+
+/* bitmaps for indicating active traffic classes.
+ * Special case for Arrowhead 4 port
+ */
+/* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
+#define ACTIVE_TCS_BMAP 0x9f
+/* 0..3 actually used, OOO and high priority stuff all use 3 */
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+
+/* determines the physical queue flags for a given PF. */
+static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
+{
+ u32 flags;
+
+ /* common flags */
+ flags = PQ_FLAGS_LB;
+
+ /* feature flags */
+ if (IS_ECORE_SRIOV(p_hwfn->p_dev))
+ flags |= PQ_FLAGS_VFS;
+ if (IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_RLS;
+
+ /* protocol flags */
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ if (!IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_MCOS;
+ break;
+ case ECORE_PCI_FCOE:
+ flags |= PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ISCSI:
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ETH_ROCE:
+ flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+ if (!IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_MCOS;
+ break;
+ case ECORE_PCI_ETH_IWARP:
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ if (!IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_MCOS;
+ break;
+ default:
+ DP_ERR(p_hwfn, "unknown personality %d\n",
+ p_hwfn->hw_info.personality);
+ return 0;
+ }
+ return flags;
+}
+
+/* Getters for resource amounts necessary for qm initialization */
+u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.num_hw_tc;
+}
+
+u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
+{
+ return IS_ECORE_SRIOV(p_hwfn->p_dev) ?
+ p_hwfn->p_dev->p_iov_info->total_vfs : 0;
+}
+
+#define NUM_DEFAULT_RLS 1
+
+u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
+{
+ u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
+
+ /* @DPDK */
+ /* num RLs can't exceed resource amount of rls or vports or the
+ * dcqcn qps
+ */
+ num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
+ (u16)RESC_NUM(p_hwfn, ECORE_VPORT));
+
+ /* make sure after we reserve the default and VF rls we'll have
+ * something left
+ */
+ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
+ DP_NOTICE(p_hwfn, false,
+ "no rate limiters left for PF rate limiting"
+ " [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
+ return 0;
+ }
+
+ /* subtract rls necessary for VFs and one default one for the PF */
+ num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
+
+ return num_pf_rls;
+}
+
+u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
+{
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ /* all pqs share the same vport (hence the 1 below), except for vfs
+ * and pf_rl pqs
+ */
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ ecore_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ ecore_init_qm_get_num_vfs(p_hwfn) + 1;
+}
+
+/* calc amount of PQs according to the requested flags */
+u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ ecore_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_MCOS & pq_flags)) *
+ ecore_init_qm_get_num_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LB & pq_flags)) +
+ (!!(PQ_FLAGS_OOO & pq_flags)) +
+ (!!(PQ_FLAGS_ACK & pq_flags)) +
+ (!!(PQ_FLAGS_OFLD & pq_flags)) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ ecore_init_qm_get_num_vfs(p_hwfn);
+}
+
+/* initialize the top level QM params */
+static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ bool four_port;
+
+ /* pq and vport bases for this PF */
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+
+ /* rate limiting and weighted fair queueing are always enabled */
+ qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
+
+ /* TC config is different for AH 4 port */
+ four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2;
+
+ /* in AH 4 port we have fewer TCs per port */
+ qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
+ NUM_OF_PHYS_TCS;
+
+ /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and
+ * 4 otherwise
+ */
+ if (!qm_info->ooo_tc)
+ qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
+ DCBX_TCP_OOO_TC;
+}
+
+/* initialize qm vport params */
+static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 i;
+
+ /* all vports participate in weighted fair queueing */
+ for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
+}
+
+/* initialize qm port params */
+static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
+{
+ /* Initialize qm port parameters */
+ u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine;
+
+ /* indicate how ooo and high pri traffic is dealt with */
+ active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
+ ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
+
+ for (i = 0; i < num_ports; i++) {
+ struct init_qm_port_params *p_qm_port =
+ &p_hwfn->qm_info.qm_port_params[i];
+
+ p_qm_port->active = 1;
+ p_qm_port->active_phys_tcs = active_phys_tcs;
+ p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports;
+ p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+ }
+}
+
+/* Reset the params which must be reset for qm init. QM init may be called as
+ * a result of flows other than driver load (e.g. dcbx renegotiation). Other
+ * params may be affected by the init but would simply recalculate to the same
+ * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
+ * affected as these amounts stay the same.
+ */
+static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_pqs = 0;
+ qm_info->num_vports = 0;
+ qm_info->num_pf_rls = 0;
+ qm_info->num_vf_pqs = 0;
+ qm_info->first_vf_pq = 0;
+ qm_info->first_mcos_pq = 0;
+ qm_info->first_rl_pq = 0;
+}
+
+static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_vports++;
+
+ if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d,"
+ " qm_init_get_num_vports() %d\n",
+ qm_info->num_vports,
+ ecore_init_qm_get_num_vports(p_hwfn));
+}
+
+/* initialize a single pq and manage qm_info resources accounting.
+ * The pq_init_flags param determines whether the PQ is rate limited
+ * (for VF or PF)
+ * and whether a new vport is allocated to the pq or not (i.e. vport will be
+ * shared)
+ */
+
+/* flags for pq init */
+#define PQ_INIT_SHARE_VPORT (1 << 0)
+#define PQ_INIT_PF_RL (1 << 1)
+#define PQ_INIT_VF_RL (1 << 2)
+
+/* defines for pq init */
+#define PQ_INIT_DEFAULT_WRR_GROUP 1
+#define PQ_INIT_DEFAULT_TC 0
+#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
+
+static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_info *qm_info,
+ u8 tc, u32 pq_init_flags)
+{
+ u16 pq_idx = qm_info->num_pqs, max_pq =
+ ecore_init_qm_get_num_pqs(p_hwfn);
+
+ if (pq_idx > max_pq)
+ DP_ERR(p_hwfn,
+ "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+
+ /* init pq params */
+ qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id;
+ qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
+ qm_info->num_vports;
+ qm_info->qm_pq_params[pq_idx].tc_id = tc;
+ qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
+ qm_info->qm_pq_params[pq_idx].rl_valid =
+ (pq_init_flags & PQ_INIT_PF_RL ||
+ pq_init_flags & PQ_INIT_VF_RL);
+
+ /* qm params accounting */
+ qm_info->num_pqs++;
+ if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
+ qm_info->num_vports++;
+
+ if (pq_init_flags & PQ_INIT_PF_RL)
+ qm_info->num_pf_rls++;
+
+ if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d,"
+ " qm_init_get_num_vports() %d\n",
+ qm_info->num_vports,
+ ecore_init_qm_get_num_vports(p_hwfn));
+
+ if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
+ DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d,"
+ " qm_init_get_num_pf_rls() %d\n",
+ qm_info->num_pf_rls,
+ ecore_init_qm_get_num_pf_rls(p_hwfn));
+}
+
+/* get pq index according to PQ_FLAGS */
+static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
+ u32 pq_flags)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ /* Can't have multiple flags set here */
+ if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags,
+ sizeof(pq_flags)) > 1)
+ goto err;
+
+ switch (pq_flags) {
+ case PQ_FLAGS_RLS:
+ return &qm_info->first_rl_pq;
+ case PQ_FLAGS_MCOS:
+ return &qm_info->first_mcos_pq;
+ case PQ_FLAGS_LB:
+ return &qm_info->pure_lb_pq;
+ case PQ_FLAGS_OOO:
+ return &qm_info->ooo_pq;
+ case PQ_FLAGS_ACK:
+ return &qm_info->pure_ack_pq;
+ case PQ_FLAGS_OFLD:
+ return &qm_info->offload_pq;
+ case PQ_FLAGS_VFS:
+ return &qm_info->first_vf_pq;
+ default:
+ goto err;
+ }
+
+err:
+ DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
+ return OSAL_NULL;
+}
+
+/* save pq index in qm info */
+static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
+ u32 pq_flags, u16 pq_val)
+{
+ u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
+}
+
+/* get tx pq index, with the PQ TX base already set (ready for context init) */
+u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
+{
+ u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ return *base_pq_idx + CM_TX_PQ_BASE;
+}
+
+u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
+{
+ u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
+
+ if (tc > max_tc)
+ DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+}
+
+u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
+{
+ u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
+
+ if (vf > max_vf)
+ DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+}
+
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+{
+ u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
+
+ if (rl > max_rl)
+ DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+}
+
+u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+{
+ u16 start_pq, pq, qm_pq_idx;
+
+ pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl);
+ start_pq = p_hwfn->qm_info.start_pq;
+ qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE;
+
+ if (qm_pq_idx > p_hwfn->qm_info.num_pqs) {
+ DP_ERR(p_hwfn,
+ "qm_pq_idx %d must be smaller than %d\n",
+ qm_pq_idx, p_hwfn->qm_info.num_pqs);
+ }
+
+ return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id;
+}
+
+/* Functions for creating specific types of pqs */
+static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc_idx;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
+ for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
+
+ qm_info->num_vf_pqs = num_vfs;
+ for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC,
+ PQ_INIT_VF_RL);
+}
+
+static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
+ for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC,
+ PQ_INIT_PF_RL);
+}
+
+static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
+{
+ /* rate limited pqs, must come first (FW assumption) */
+ ecore_init_qm_rl_pqs(p_hwfn);
+
+ /* pqs for multi cos */
+ ecore_init_qm_mcos_pqs(p_hwfn);
+
+ /* pure loopback pq */
+ ecore_init_qm_lb_pq(p_hwfn);
+
+ /* out of order pq */
+ ecore_init_qm_ooo_pq(p_hwfn);
+
+ /* pure ack pq */
+ ecore_init_qm_pure_ack_pq(p_hwfn);
+
+ /* pq for offloaded protocol */
+ ecore_init_qm_offload_pq(p_hwfn);
+
+ /* done sharing vports */
+ ecore_init_qm_advance_vport(p_hwfn);
+
+ /* pqs for vfs */
+ ecore_init_qm_vf_pqs(p_hwfn);
+}
+
+/* compare values of getters against resources amounts */
+static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
+{
+ if (ecore_init_qm_get_num_vports(p_hwfn) >
+ RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
+ return ECORE_INVAL;
+ }
+
+ if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
+ DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/*
+ * Function for verbose printing of the qm initialization results
+ */
+static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_vport_params *vport;
+ struct init_qm_port_params *port;
+ struct init_qm_pq_params *pq;
+ int i, tc;
+
+ /* top level params */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "qm init top level params: start_pq %d, start_vport %d,"
+ " pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+ qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq,
+ qm_info->offload_pq, qm_info->pure_ack_pq);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d,"
+ " num_vports %d, max_phys_tcs_per_port %d\n",
+ qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs,
+ qm_info->num_vf_pqs, qm_info->num_vports,
+ qm_info->max_phys_tcs_per_port);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d,"
+ " pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
+ qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en,
+ qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl,
+ qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
+
+ /* port table */
+ for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) {
+ port = &qm_info->qm_port_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "port idx %d, active %d, active_phys_tcs %d,"
+ " num_pbf_cmd_lines %d, num_btb_blocks %d,"
+ " reserved %d\n",
+ i, port->active, port->active_phys_tcs,
+ port->num_pbf_cmd_lines, port->num_btb_blocks,
+ port->reserved);
+ }
+
+ /* vport table */
+ for (i = 0; i < qm_info->num_vports; i++) {
+ vport = &qm_info->qm_vport_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "vport idx %d, vport_rl %d, wfq %d,"
+ " first_tx_pq_id [ ",
+ qm_info->start_vport + i, vport->vport_rl,
+ vport->vport_wfq);
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ",
+ vport->first_tx_pq_id[tc]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
+ }
+
+ /* pq table */
+ for (i = 0; i < qm_info->num_pqs; i++) {
+ pq = &qm_info->qm_pq_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ qm_info->start_pq + i, pq->port_id, pq->vport_id,
+ pq->tc_id, pq->wrr_group, pq->rl_valid);
+ }
+}
+
+static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
+{
+ /* reset params required for init run */
+ ecore_init_qm_reset_params(p_hwfn);
+
+ /* init QM top level params */
+ ecore_init_qm_params(p_hwfn);
+
+ /* init QM port params */
+ ecore_init_qm_port_params(p_hwfn);
+
+ /* init QM vport params */
+ ecore_init_qm_vport_params(p_hwfn);
+
+ /* init QM physical queue params */
+ ecore_init_qm_pq_params(p_hwfn);
+
+ /* display all that init */
+ ecore_dp_init_qm_params(p_hwfn);
+}
+
+/* This function reconfigures the QM pf on the fly.
+ * For this purpose we:
+ * 1. reconfigure the QM database
+ * 2. set new values to runtime array
+ * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
+ * 4. activate init tool in QM_PF stage
+ * 5. send an sdm_qm_cmd through rbc interface to release the QM
+ */
+enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ bool b_rc;
+ enum _ecore_status_t rc;
+
+ /* initialize ecore's qm data structure */
+ ecore_init_qm_info(p_hwfn);
+
+ /* stop PF's qm queues */
+ OSAL_SPIN_LOCK(&qm_lock);
+ b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ OSAL_SPIN_UNLOCK(&qm_lock);
+ if (!b_rc)
+ return ECORE_INVAL;
+
+ /* clear the QM_PF runtime phase leftovers from previous init */
+ ecore_init_clear_rt_data(p_hwfn);
+
+ /* prepare QM portion of runtime array */
+ ecore_qm_init_pf(p_hwfn, p_ptt, false);
+
+ /* activate init tool on runtime array */
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
+ p_hwfn->hw_info.hw_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* start PF's qm queues */
+ OSAL_SPIN_LOCK(&qm_lock);
+ b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ OSAL_SPIN_UNLOCK(&qm_lock);
+ if (!b_rc)
+ return ECORE_INVAL;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ enum _ecore_status_t rc;
+
+ rc = ecore_init_qm_sanity(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ goto alloc_err;
+
+ qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_pq_params) *
+ ecore_init_qm_get_num_pqs(p_hwfn));
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_vport_params) *
+ ecore_init_qm_get_num_vports(p_hwfn));
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_port_params) *
+ p_hwfn->p_dev->num_ports_in_engine);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct ecore_wfq_data) *
+ ecore_init_qm_get_num_vports(p_hwfn));
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
+ return ECORE_SUCCESS;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
+ ecore_qm_info_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+/******************** End QM initialization ***************/
+
+enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i) {
+ rc = ecore_l2_alloc(&p_dev->hwfns[i]);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+ return rc;
+ }
+
+ p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+ sizeof(*p_dev->fw_data));
+ if (!p_dev->fw_data)
+ return ECORE_NOMEM;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ u32 n_eqes, num_cons;
+
+ /* initialize the doorbell recovery mechanism */
+ rc = ecore_db_recovery_setup(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* First allocate the context manager structure */
+ rc = ecore_cxt_mngr_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Set the HW cid/tid numbers (in the context manager)
+ * Must be done prior to any further computations.
+ */
+ rc = ecore_cxt_set_pf_params(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_alloc_qm_data(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* init qm info */
+ ecore_init_qm_info(p_hwfn);
+
+ /* Compute the ILT client partition */
+ rc = ecore_cxt_cfg_ilt_compute(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* CID map / ILT shadow table / T2
+ * The talbes sizes are determined by the computations above
+ */
+ rc = ecore_cxt_tables_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SPQ, must follow ILT because initializes SPQ context */
+ rc = ecore_spq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SP status block allocation */
+ p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
+ RESERVED_PTT_DPC);
+
+ rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_iov_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* EQ */
+ n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain);
+ if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
+ /* Calculate the EQ size
+ * ---------------------
+ * Each ICID may generate up to one event at a time i.e.
+ * the event must be handled/cleared before a new one
+ * can be generated. We calculate the sum of events per
+ * protocol and create an EQ deep enough to handle the
+ * worst case:
+ * - Core - according to SPQ.
+ * - RoCE - per QP there are a couple of ICIDs, one
+ * responder and one requester, each can
+ * generate an EQE => n_eqes_qp = 2 * n_qp.
+ * Each CQ can generate an EQE. There are 2 CQs
+ * per QP => n_eqes_cq = 2 * n_qp.
+ * Hence the RoCE total is 4 * n_qp or
+ * 2 * num_cons.
+ * - ENet - There can be up to two events per VF. One
+ * for VF-PF channel and another for VF FLR
+ * initial cleanup. The number of VFs is
+ * bounded by MAX_NUM_VFS_BB, and is much
+ * smaller than RoCE's so we avoid exact
+ * calculation.
+ */
+ if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
+ num_cons =
+ ecore_cxt_get_proto_cid_count(
+ p_hwfn,
+ PROTOCOLID_ROCE,
+ OSAL_NULL);
+ num_cons *= 2;
+ } else {
+ num_cons = ecore_cxt_get_proto_cid_count(
+ p_hwfn,
+ PROTOCOLID_IWARP,
+ OSAL_NULL);
+ }
+ n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
+ } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
+ num_cons =
+ ecore_cxt_get_proto_cid_count(p_hwfn,
+ PROTOCOLID_ISCSI,
+ OSAL_NULL);
+ n_eqes += 2 * num_cons;
+ }
+
+ if (n_eqes > 0xFFFF) {
+ DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements."
+ "The maximum of a u16 chain is 0x%x\n",
+ n_eqes, 0xFFFF);
+ goto alloc_no_mem;
+ }
+
+ rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_consq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_l2_alloc(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ goto alloc_err;
+
+ /* DMA info initialization */
+ rc = ecore_dmae_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dmae_info structure\n");
+ goto alloc_err;
+ }
+
+ /* DCBX initialization */
+ rc = ecore_dcbx_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate memory for dcbx structure\n");
+ goto alloc_err;
+ }
+ }
+
+ p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+ sizeof(*p_dev->reset_stats));
+ if (!p_dev->reset_stats) {
+ DP_NOTICE(p_dev, false, "Failed to allocate reset statistics\n");
+ goto alloc_no_mem;
+ }
+
+ return ECORE_SUCCESS;
+
+alloc_no_mem:
+ rc = ECORE_NOMEM;
+alloc_err:
+ ecore_resc_free(p_dev);
+ return rc;
+}
+
+void ecore_resc_setup(struct ecore_dev *p_dev)
+{
+ int i;
+
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i)
+ ecore_l2_setup(&p_dev->hwfns[i]);
+ return;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ ecore_cxt_mngr_setup(p_hwfn);
+ ecore_spq_setup(p_hwfn);
+ ecore_eq_setup(p_hwfn);
+ ecore_consq_setup(p_hwfn);
+
+ /* Read shadow of current MFW mailbox */
+ ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+ OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+
+ ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+
+ ecore_l2_setup(p_hwfn);
+ ecore_iov_setup(p_hwfn);
+ }
+}
+
+#define FINAL_CLEANUP_POLL_CNT (100)
+#define FINAL_CLEANUP_POLL_TIME (10)
+enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 id, bool is_vf)
+{
+ u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+ enum _ecore_status_t rc = ECORE_TIMEOUT;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) ||
+ CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n");
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
+
+ if (is_vf)
+ id += 0x10;
+
+ command |= X_FINAL_CLEANUP_AGG_INT <<
+ SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
+ command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
+ command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
+ command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+/* Make sure notification is not set before initiating final cleanup */
+
+ if (REG_RD(p_hwfn, addr)) {
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected; Found final cleanup notification");
+ DP_NOTICE(p_hwfn, false,
+ " before initiating final cleanup\n");
+ REG_WR(p_hwfn, addr, 0);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Sending final cleanup for PFVF[%d] [Command %08x]\n",
+ id, command);
+
+ ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
+
+ /* Poll until completion */
+ while (!REG_RD(p_hwfn, addr) && count--)
+ OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME);
+
+ if (REG_RD(p_hwfn, addr))
+ rc = ECORE_SUCCESS;
+ else
+ DP_NOTICE(p_hwfn, true,
+ "Failed to receive FW final cleanup notification\n");
+
+ /* Cleanup afterwards */
+ REG_WR(p_hwfn, addr, 0);
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
+{
+ int hw_mode = 0;
+
+ if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
+ hw_mode |= 1 << MODE_BB;
+ } else if (ECORE_IS_AH(p_hwfn->p_dev)) {
+ hw_mode |= 1 << MODE_K2;
+ } else {
+ DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n",
+ p_hwfn->p_dev->type);
+ return ECORE_INVAL;
+ }
+
+ /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
+ switch (p_hwfn->p_dev->num_ports_in_engine) {
+ case 1:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+ break;
+ case 2:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+ break;
+ case 4:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "num_ports_in_engine = %d not supported\n",
+ p_hwfn->p_dev->num_ports_in_engine);
+ return ECORE_INVAL;
+ }
+
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS,
+ &p_hwfn->p_dev->mf_bits))
+ hw_mode |= 1 << MODE_MF_SD;
+ else
+ hw_mode |= 1 << MODE_MF_SI;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ hw_mode |= 1 << MODE_FPGA;
+ } else {
+ if (p_hwfn->p_dev->b_is_emul_full)
+ hw_mode |= 1 << MODE_EMUL_FULL;
+ else
+ hw_mode |= 1 << MODE_EMUL_REDUCED;
+ }
+ } else
+#endif
+ hw_mode |= 1 << MODE_ASIC;
+
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ hw_mode |= 1 << MODE_100G;
+
+ p_hwfn->hw_info.hw_mode = hw_mode;
+
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP),
+ "Configuring function for hw_mode: 0x%08x\n",
+ p_hwfn->hw_info.hw_mode);
+
+ return ECORE_SUCCESS;
+}
+
+#ifndef ASIC_ONLY
+/* MFW-replacement initializations for non-ASIC */
+static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 pl_hv = 1;
+ int i;
+
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (ECORE_IS_AH(p_dev))
+ pl_hv |= 0x600;
+ }
+
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
+
+ if (CHIP_REV_IS_EMUL(p_dev) &&
+ (ECORE_IS_AH(p_dev)))
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5,
+ 0x3ffffff);
+
+ /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
+ /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
+ if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev))
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);
+
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (ECORE_IS_AH(p_dev)) {
+ /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
+ (p_dev->num_ports_in_engine >> 1));
+
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
+ p_dev->num_ports_in_engine == 4 ? 0 : 3);
+ }
+ }
+
+ /* Poll on RBC */
+ ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
+ for (i = 0; i < 100; i++) {
+ OSAL_UDELAY(50);
+ if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
+ break;
+ }
+ if (i == 100)
+ DP_NOTICE(p_hwfn, true,
+ "RBC done failed to complete in PSWRQ2\n");
+
+ return ECORE_SUCCESS;
+}
+#endif
+
+/* Init run time data for all PFs and their VFs on an engine.
+ * TBD - for VFs - Once we have parent PF info for each VF in
+ * shmem available as CAU requires knowledge of parent PF for each VF.
+ */
+static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
+{
+ u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+ int i, igu_sb_id;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_igu_info *p_igu_info;
+ struct ecore_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ for (igu_sb_id = 0;
+ igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
+ igu_sb_id++) {
+ p_block = &p_igu_info->entry[igu_sb_id];
+
+ if (!p_block->is_pf)
+ continue;
+
+ ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_block->function_id, 0, 0);
+ STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
+ sb_entry);
+ }
+ }
+}
+
+static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 val, wr_mbs, cache_line_size;
+
+ val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
+ switch (val) {
+ case 0:
+ wr_mbs = 128;
+ break;
+ case 1:
+ wr_mbs = 256;
+ break;
+ case 2:
+ wr_mbs = 512;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ val);
+ return;
+ }
+
+ cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs);
+ switch (cache_line_size) {
+ case 32:
+ val = 0;
+ break;
+ case 64:
+ val = 1;
+ break;
+ case 128:
+ val = 2;
+ break;
+ case 256:
+ val = 3;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ cache_line_size);
+ }
+
+ if (wr_mbs < OSAL_CACHE_LINE_SIZE)
+ DP_INFO(p_hwfn,
+ "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
+ OSAL_CACHE_LINE_SIZE, wr_mbs);
+
+ STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
+ if (val > 0) {
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
+ }
+}
+
+static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int hw_mode)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u8 vf_id, max_num_vfs;
+ u16 num_pfs, pf_id;
+ u32 concrete_fid;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ ecore_init_cau_rt_data(p_dev);
+
+ /* Program GTT windows */
+ ecore_gtt_init(p_hwfn, p_ptt);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ rc = ecore_hw_init_chip(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+#endif
+
+ if (p_hwfn->mcp_info) {
+ if (p_hwfn->mcp_info->func_info.bandwidth_max)
+ qm_info->pf_rl_en = 1;
+ if (p_hwfn->mcp_info->func_info.bandwidth_min)
+ qm_info->pf_wfq_en = 1;
+ }
+
+ ecore_qm_common_rt_init(p_hwfn,
+ p_dev->num_ports_in_engine,
+ qm_info->max_phys_tcs_per_port,
+ qm_info->pf_rl_en, qm_info->pf_wfq_en,
+ qm_info->vport_rl_en, qm_info->vport_wfq_en,
+ qm_info->qm_port_params);
+
+ ecore_cxt_hw_init_common(p_hwfn);
+
+ ecore_init_cache_line_size(p_hwfn, p_ptt);
+
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn),
+ hw_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* @@TBD MichalK - should add VALIDATE_VFID to init tool...
+ * need to decide with which value, maybe runtime
+ */
+ ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+ if (ECORE_IS_BB(p_dev)) {
+ /* Workaround clears ROCE search for all functions to prevent
+ * involving non initialized function in processing ROCE packet.
+ */
+ num_pfs = NUM_OF_ENG_PFS(p_dev);
+ for (pf_id = 0; pf_id < num_pfs; pf_id++) {
+ ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ }
+ /* pretend to original PF */
+ ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+
+ /* Workaround for avoiding CCFC execution error when getting packets
+ * with CRC errors, and allowing instead the invoking of the FW error
+ * handler.
+ * This is not done inside the init tool since it currently can't
+ * perform a pretending to VFs.
+ */
+ max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
+ for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
+ concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
+ ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
+ ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
+ ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
+ }
+ /* pretend to original PF */
+ ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
+ return rc;
+}
+
+#ifndef ASIC_ONLY
+#define MISC_REG_RESET_REG_2_XMAC_BIT (1 << 4)
+#define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1 << 5)
+
+#define PMEG_IF_BYTE_COUNT 8
+
+static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u64 data, u8 reg_type, u8 port)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
+ ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) |
+ (8 << PMEG_IF_BYTE_COUNT),
+ (reg_type << 25) | (addr << 8) | port,
+ (u32)((data >> 32) & 0xffffffff),
+ (u32)(data & 0xffffffff));
+
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB,
+ (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) &
+ 0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT));
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB,
+ (reg_type << 25) | (addr << 8) | port);
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff);
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB,
+ (data >> 32) & 0xffffffff);
+}
+
+#define XLPORT_MODE_REG (0x20a)
+#define XLPORT_MAC_CONTROL (0x210)
+#define XLPORT_FLOW_CONTROL_CONFIG (0x207)
+#define XLPORT_ENABLE_REG (0x20b)
+
+#define XLMAC_CTRL (0x600)
+#define XLMAC_MODE (0x601)
+#define XLMAC_RX_MAX_SIZE (0x608)
+#define XLMAC_TX_CTRL (0x604)
+#define XLMAC_PAUSE_CTRL (0x60d)
+#define XLMAC_PFC_CTRL (0x60e)
+
+static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 loopback = 0, port = p_hwfn->port_id * 2;
+
+ DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
+
+ /* XLPORT MAC MODE *//* 0 Quad, 4 Single... */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
+ port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
+ /* XLMAC: SOFT RESET */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port);
+ /* XLMAC: Port Speed >= 10Gbps */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port);
+ /* XLMAC: Max Size */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
+ 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
+ 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 0x7c000, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
+ 0x30ffffc000ULL, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0,
+ port); /* XLMAC: TX_EN, RX_EN */
+ /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL,
+ 0x1003 | (loopback << 2), 0, port);
+ /* Enabled Parallel PFC interface */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port);
+
+ /* XLPORT port enable */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
+}
+
+static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 port = p_hwfn->port_id;
+ u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE;
+
+ DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
+
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2),
+ (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) |
+ (port <<
+ CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) |
+ (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT));
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5,
+ 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5,
+ 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5,
+ 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5,
+ 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5,
+ (0xA <<
+ ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) |
+ (8 <<
+ ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT));
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5,
+ 0xa853);
+}
+
+static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (ECORE_IS_AH(p_hwfn->p_dev))
+ ecore_emul_link_init_ah_e5(p_hwfn, p_ptt);
+ else /* BB */
+ ecore_emul_link_init_bb(p_hwfn, p_ptt);
+}
+
+static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 port)
+{
+ int port_offset = port ? 0x800 : 0;
+ u32 xmac_rxctrl = 0;
+
+ /* Reset of XMAC */
+ /* FIXME: move to common start */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
+ MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
+ OSAL_MSLEEP(1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
+ MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
+
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1);
+
+ /* Set the number of ports on the Warp Core to 10G */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3);
+
+ /* Soft reset of XMAC */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
+ MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
+ OSAL_MSLEEP(1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
+ MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
+
+ /* FIXME: move to common end */
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20);
+
+ /* Set Max packet size: initialize XMAC block register for port 0 */
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710);
+
+ /* CRC append for Tx packets: init XMAC block register for port 1 */
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800);
+
+ /* Enable TX and RX: initialize XMAC block register for port 1 */
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset,
+ XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB);
+ xmac_rxctrl = ecore_rd(p_hwfn, p_ptt,
+ XMAC_REG_RX_CTRL_BB + port_offset);
+ xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB;
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl);
+}
+#endif
+
+static enum _ecore_status_t
+ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
+{
+ u32 dpi_bit_shift, dpi_count, dpi_page_size;
+ u32 min_dpis;
+ u32 n_wids;
+
+ /* Calculate DPI size
+ * ------------------
+ * The PWM region contains Doorbell Pages. The first is reserverd for
+ * the kernel for, e.g, L2. The others are free to be used by non-
+ * trusted applications, typically from user space. Each page, called a
+ * doorbell page is sectioned into windows that allow doorbells to be
+ * issued in parallel by the kernel/application. The size of such a
+ * window (a.k.a. WID) is 1kB.
+ * Summary:
+ * 1kB WID x N WIDS = DPI page size
+ * DPI page size x N DPIs = PWM region size
+ * Notes:
+ * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE
+ * in order to ensure that two applications won't share the same page.
+ * It also must contain at least one WID per CPU to allow parallelism.
+ * It also must be a power of 2, since it is stored as a bit shift.
+ *
+ * The DPI page size is stored in a register as 'dpi_bit_shift' so that
+ * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096
+ * containing 4 WIDs.
+ */
+ n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus);
+ dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids);
+ dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) &
+ ~(OSAL_PAGE_SIZE - 1);
+ dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
+ dpi_count = pwm_region_size / dpi_page_size;
+
+ min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
+ min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis);
+
+ /* Update hwfn */
+ p_hwfn->dpi_size = dpi_page_size;
+ p_hwfn->dpi_count = dpi_count;
+
+ /* Update registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
+
+ if (dpi_count < min_dpis)
+ return ECORE_NORESOURCES;
+
+ return ECORE_SUCCESS;
+}
+
+enum ECORE_ROCE_EDPM_MODE {
+ ECORE_ROCE_EDPM_MODE_ENABLE = 0,
+ ECORE_ROCE_EDPM_MODE_FORCE_ON = 1,
+ ECORE_ROCE_EDPM_MODE_DISABLE = 2,
+};
+
+static enum _ecore_status_t
+ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 pwm_regsize, norm_regsize;
+ u32 non_pwm_conn, min_addr_reg1;
+ u32 db_bar_size, n_cpus;
+ u32 roce_edpm_mode;
+ u32 pf_dems_shift;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 cond;
+
+ db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ db_bar_size /= 2;
+
+ /* Calculate doorbell regions
+ * -----------------------------------
+ * The doorbell BAR is made of two regions. The first is called normal
+ * region and the second is called PWM region. In the normal region
+ * each ICID has its own set of addresses so that writing to that
+ * specific address identifies the ICID. In the Process Window Mode
+ * region the ICID is given in the data written to the doorbell. The
+ * above per PF register denotes the offset in the doorbell BAR in which
+ * the PWM region begins.
+ * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per
+ * non-PWM connection. The calculation below computes the total non-PWM
+ * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
+ * in units of 4,096 bytes.
+ */
+ non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+ OSAL_NULL) +
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
+ norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn,
+ OSAL_PAGE_SIZE);
+ min_addr_reg1 = norm_regsize / 4096;
+ pwm_regsize = db_bar_size - norm_regsize;
+
+ /* Check that the normal and PWM sizes are valid */
+ if (db_bar_size < norm_regsize) {
+ DP_ERR(p_hwfn->p_dev,
+ "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
+ db_bar_size, norm_regsize);
+ return ECORE_NORESOURCES;
+ }
+ if (pwm_regsize < ECORE_MIN_PWM_REGION) {
+ DP_ERR(p_hwfn->p_dev,
+ "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
+ pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size,
+ norm_regsize);
+ return ECORE_NORESOURCES;
+ }
+
+ /* Calculate number of DPIs */
+ roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
+ if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) ||
+ ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) {
+ /* Either EDPM is mandatory, or we are attempting to allocate a
+ * WID per CPU.
+ */
+ n_cpus = OSAL_NUM_CPUS();
+ rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+ }
+
+ cond = ((rc != ECORE_SUCCESS) &&
+ (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
+ (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
+ if (cond || p_hwfn->dcbx_no_edpm) {
+ /* Either EDPM is disabled from user configuration, or it is
+ * disabled via DCBx, or it is not mandatory and we failed to
+ * allocated a WID per CPU.
+ */
+ n_cpus = 1;
+ rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+
+ /* If we entered this flow due to DCBX then the DPM register is
+ * already configured.
+ */
+ }
+
+ DP_INFO(p_hwfn,
+ "doorbell bar: normal_region_size=%d, pwm_region_size=%d",
+ norm_regsize, pwm_regsize);
+ DP_INFO(p_hwfn,
+ " dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
+ p_hwfn->dpi_size, p_hwfn->dpi_count,
+ ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+ "disabled" : "enabled");
+
+ /* Check return codes from above calls */
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "Failed to allocate enough DPIs\n");
+ return ECORE_NORESOURCES;
+ }
+
+ /* Update hwfn */
+ p_hwfn->dpi_start_offset = norm_regsize;
+
+ /* Update registers */
+ /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
+ pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int hw_mode)
+{
+ u32 ppf_to_eng_sel[NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE];
+ u32 val;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 i;
+
+ /* In CMT for non-RoCE packets - use connection based classification */
+ val = ECORE_IS_CMT(p_hwfn->p_dev) ? 0x8 : 0x0;
+ for (i = 0; i < NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE; i++)
+ ppf_to_eng_sel[i] = val;
+ STORE_RT_REG_AGG(p_hwfn, NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET,
+ ppf_to_eng_sel);
+
+ /* In CMT the gate should be cleared by the 2nd hwfn */
+ if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))
+ STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
+
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+ hw_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ if (ECORE_IS_AH(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+ else if (ECORE_IS_BB(p_hwfn->p_dev))
+ ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
+ } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (ECORE_IS_CMT(p_hwfn->p_dev)) {
+ /* Activate OPTE in CMT */
+ u32 val;
+
+ val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
+ val |= 0x10;
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
+ 0x55555555);
+ }
+
+ ecore_emul_link_init(p_hwfn, p_ptt);
+ } else {
+ DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
+ }
+#endif
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_tunnel_info *p_tunn,
+ int hw_mode,
+ bool b_hw_start,
+ enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
+{
+ u8 rel_pf_id = p_hwfn->rel_pf_id;
+ u32 prs_reg;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 ctrl;
+ int pos;
+
+ if (p_hwfn->mcp_info) {
+ struct ecore_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+ if (p_info->bandwidth_min)
+ p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+ /* Update rate limit once we'll actually have a link */
+ p_hwfn->qm_info.pf_rl = 100000;
+ }
+ ecore_cxt_hw_init_pf(p_hwfn, p_ptt);
+
+ ecore_int_igu_init_rt(p_hwfn);
+
+ /* Set VLAN in NIG if needed */
+ if (hw_mode & (1 << MODE_MF_SD)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+ p_hwfn->hw_info.ovlan);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
+ 1);
+ }
+
+ /* Enable classification by MAC if needed */
+ if (hw_mode & (1 << MODE_MF_SI)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Configuring TAGMAC_CLS_TYPE\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET,
+ 1);
+ }
+
+ /* Protocl Configuration - @@@TBD - should we set 0 otherwise? */
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
+ (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
+ (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
+
+ /* perform debug configuration when chip is out of reset */
+ OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
+
+ /* Sanity check before the PF init sequence that uses DMAE */
+ rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
+ if (rc)
+ return rc;
+
+ /* PF Init sequence */
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* Pure runtime initializations - directly to the HW */
+ ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+ /* PCI relaxed ordering causes a decrease in the performance on some
+ * systems. Till a root cause is found, disable this attribute in the
+ * PCI config space.
+ */
+ /* Not in use @DPDK
+ * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
+ * if (!pos) {
+ * DP_NOTICE(p_hwfn, true,
+ * "Failed to find the PCIe Cap\n");
+ * return ECORE_IO;
+ * }
+ * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl);
+ * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl);
+ */
+
+ rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ if (b_hw_start) {
+ /* enable interrupts */
+ rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* send function start command */
+ rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_tunn,
+ allow_npar_tx_switch);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Function start ramrod failed\n");
+ } else {
+ return rc;
+ }
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+
+ if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
+ (1 << 2));
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
+ 0x100);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH registers after start PFn\n");
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_SEARCH_TCP_FIRST_FRAG);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
+ prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+ }
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_enable)
+{
+ u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
+
+ /* Configure the PF's internal FID_enable for master transactions */
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+ /* Wait until value is set - try for 1 second every 50us */
+ for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+ val = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ if (val == set_val)
+ break;
+
+ OSAL_UDELAY(50);
+ }
+
+ if (val != set_val) {
+ DP_NOTICE(p_hwfn, true,
+ "PFID_ENABLE_MASTER wasn't changed after a second\n");
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_main_ptt)
+{
+ /* Read shadow of current MFW mailbox */
+ ecore_mcp_read_mb(p_hwfn, p_main_ptt);
+ OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+}
+
+static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ 1 << p_hwfn->abs_pf_id);
+}
+
+static enum _ecore_status_t
+ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_load_req_params *p_load_req,
+ struct ecore_drv_load_params *p_drv_load)
+{
+ /* Make sure that if ecore-client didn't provide inputs, all the
+ * expected defaults are indeed zero.
+ */
+ OSAL_BUILD_BUG_ON(ECORE_DRV_ROLE_OS != 0);
+ OSAL_BUILD_BUG_ON(ECORE_LOAD_REQ_LOCK_TO_DEFAULT != 0);
+ OSAL_BUILD_BUG_ON(ECORE_OVERRIDE_FORCE_LOAD_NONE != 0);
+
+ OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
+
+ if (p_drv_load == OSAL_NULL)
+ goto out;
+
+ p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+ ECORE_DRV_ROLE_KDUMP :
+ ECORE_DRV_ROLE_OS;
+ p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+ p_load_req->override_force_load = p_drv_load->override_force_load;
+
+ /* Old MFW versions don't support timeout values other than default and
+ * none, so these values are replaced according to the fall-back action.
+ */
+
+ if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT ||
+ p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE ||
+ (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) {
+ p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
+ goto out;
+ }
+
+ switch (p_drv_load->mfw_timeout_fallback) {
+ case ECORE_TO_FALLBACK_TO_NONE:
+ p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE;
+ break;
+ case ECORE_TO_FALLBACK_TO_DEFAULT:
+ p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
+ break;
+ case ECORE_TO_FALLBACK_FAIL_LOAD:
+ DP_NOTICE(p_hwfn, false,
+ "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n",
+ p_drv_load->mfw_timeout_val,
+ ECORE_LOAD_REQ_LOCK_TO_DEFAULT,
+ ECORE_LOAD_REQ_LOCK_TO_NONE);
+ return ECORE_ABORTED;
+ }
+
+ DP_INFO(p_hwfn,
+ "Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n",
+ p_drv_load->mfw_timeout_val,
+ (p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ?
+ "default" : "none",
+ p_load_req->timeout_val);
+out:
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_hw_init_params *p_params)
+{
+ if (p_params->p_tunn) {
+ ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
+ ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
+ }
+
+ p_hwfn->b_int_enabled = 1;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
+ struct ecore_hw_init_params *p_params)
+{
+ struct ecore_load_req_params load_req_params;
+ u32 load_code, resp, param, drv_mb_param;
+ bool b_default_mtu = true;
+ struct ecore_hwfn *p_hwfn;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
+ DP_NOTICE(p_dev, false,
+ "MSI mode is not supported for CMT devices\n");
+ return ECORE_INVAL;
+ }
+
+ if (IS_PF(p_dev)) {
+ rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ p_hwfn = &p_dev->hwfns[i];
+
+ /* If management didn't provide a default, set one of our own */
+ if (!p_hwfn->hw_info.mtu) {
+ p_hwfn->hw_info.mtu = 1500;
+ b_default_mtu = false;
+ }
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_start(p_hwfn, p_params);
+ continue;
+ }
+
+ rc = ecore_calc_hw_mode(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms);
+
+ rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
+ p_params->p_drv_load_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_req_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed sending a LOAD_REQ command\n");
+ return rc;
+ }
+
+ load_code = load_req_params.load_code;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load request was sent. Load code: 0x%x\n",
+ load_code);
+
+ ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
+
+ /* CQ75580:
+ * When coming back from hiberbate state, the registers from
+ * which shadow is read initially are not initialized. It turns
+ * out that these registers get initialized during the call to
+ * ecore_mcp_load_req request. So we need to reread them here
+ * to get the proper shadow register value.
+ * Note: This is a workaround for the missing MFW
+ * initialization. It may be removed once the implementation
+ * is done.
+ */
+ ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+ /* Only relevant for recovery:
+ * Clear the indication after the LOAD_REQ command is responded
+ * by the MFW.
+ */
+ p_dev->recov_in_prog = false;
+
+ p_hwfn->first_on_engine = (load_code ==
+ FW_MSG_CODE_DRV_LOAD_ENGINE);
+
+ if (!qm_lock_ref_cnt) {
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock);
+ if (rc) {
+ DP_ERR(p_hwfn, "qm_lock allocation failed\n");
+ goto qm_lock_fail;
+ }
+#endif
+ OSAL_SPIN_LOCK_INIT(&qm_lock);
+ }
+ ++qm_lock_ref_cnt;
+
+ /* Clean up chip from previous driver if such remains exist.
+ * This is not needed when the PF is the first one on the
+ * engine, since afterwards we are going to init the FW.
+ */
+ if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
+ rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->rel_pf_id, false);
+ if (rc != ECORE_SUCCESS) {
+ ecore_hw_err_notify(p_hwfn,
+ ECORE_HW_ERR_RAMROD_FAIL);
+ goto load_err;
+ }
+ }
+
+ /* Log and clear previous pglue_b errors if such exist */
+ ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
+
+ /* Enable the PF's internal FID_enable in the PXP */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+ true);
+ if (rc != ECORE_SUCCESS)
+ goto load_err;
+
+ /* Clear the pglue_b was_error indication.
+ * In E4 it must be done after the BME and the internal
+ * FID_enable for the PF are set, since VDMs may cause the
+ * indication to be set again.
+ */
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc != ECORE_SUCCESS)
+ break;
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc != ECORE_SUCCESS)
+ break;
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+ p_params->p_tunn,
+ p_hwfn->hw_info.hw_mode,
+ p_params->b_hw_start,
+ p_params->int_mode,
+ p_params->allow_npar_tx_switch);
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected load code [0x%08x]", load_code);
+ rc = ECORE_NOTIMPL;
+ break;
+ }
+
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "init phase failed for loadcode 0x%x (rc %d)\n",
+ load_code, rc);
+ goto load_err;
+ }
+
+ rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Sending load done failed, rc = %d\n", rc);
+ if (rc == ECORE_NOMEM) {
+ DP_NOTICE(p_hwfn, false,
+ "Sending load done was failed due to memory allocation failure\n");
+ goto load_err;
+ }
+ return rc;
+ }
+
+ /* send DCBX attention request command */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "sending phony dcbx set command to trigger DCBx attention handling\n");
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp,
+ &param);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send DCBX attention request\n");
+ return rc;
+ }
+
+ p_hwfn->hw_init_done = true;
+ }
+
+ if (IS_PF(p_dev)) {
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ drv_mb_param = STORM_FW_VERSION;
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update firmware version\n");
+
+ if (!b_default_mtu)
+ rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.mtu);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update default mtu\n");
+
+ rc = ecore_mcp_ov_update_driver_state(p_hwfn,
+ p_hwfn->p_main_ptt,
+ ECORE_OV_DRIVER_STATE_DISABLED);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update driver state\n");
+ }
+
+ return rc;
+
+load_err:
+ --qm_lock_ref_cnt;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (!qm_lock_ref_cnt)
+ OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
+qm_lock_fail:
+#endif
+ /* The MFW load lock should be released regardless of success or failure
+ * of initialization.
+ * TODO: replace this with an attempt to send cancel_load.
+ */
+ ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
+ return rc;
+}
+
+#define ECORE_HW_STOP_RETRY_LIMIT (10)
+static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ int i;
+
+ /* close timers */
+ ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog;
+ i++) {
+ if ((!ecore_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
+
+ /* Dependent on number of connection/tasks, possibly
+ * 1ms sleep is required between polls
+ */
+ OSAL_MSLEEP(1);
+ }
+
+ if (i < ECORE_HW_STOP_RETRY_LIMIT)
+ return;
+
+ DP_NOTICE(p_hwfn, false,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
+}
+
+void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
+{
+ int j;
+
+ for_each_hwfn(p_dev, j) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
+ }
+}
+
+static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u32 expected_val)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt, addr);
+
+ if (val != expected_val) {
+ DP_NOTICE(p_hwfn, true,
+ "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n",
+ addr, val, expected_val);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
+{
+ struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc, rc2 = ECORE_SUCCESS;
+ int j;
+
+ for_each_hwfn(p_dev, j) {
+ p_hwfn = &p_dev->hwfns[j];
+ p_ptt = p_hwfn->p_main_ptt;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_pf_int_cleanup(p_hwfn);
+ rc = ecore_vf_pf_reset(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_vf_pf_reset failed. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ continue;
+ }
+
+ /* mark the hw as uninitialized... */
+ p_hwfn->hw_init_done = false;
+
+ /* Send unload command to MCP */
+ if (!p_dev->recov_in_prog) {
+ rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ }
+
+ OSAL_DPC_SYNC(p_hwfn);
+
+ /* After this point no MFW attentions are expected, e.g. prevent
+ * race between pf stop and dcbx pf update.
+ */
+
+ rc = ecore_sp_pf_stop(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+
+ /* perform debug action after PF stop was sent */
+ OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
+
+ /* close NIG to BRB gate */
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ /* close parser */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ /* @@@TBD - clean transmission queues (5.b) */
+ /* @@@TBD - clean BTB (5.c) */
+
+ ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
+
+ /* @@@TBD - verify DMAE requests are done (8) */
+
+ /* Disable Attention Generation */
+ ecore_int_igu_disable_int(p_hwfn, p_ptt);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+ rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to return IGU CAM to default\n");
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ OSAL_MSLEEP(1);
+
+ if (!p_dev->recov_in_prog) {
+ ecore_verify_reg_val(p_hwfn, p_ptt,
+ QM_REG_USG_CNT_PF_TX, 0);
+ ecore_verify_reg_val(p_hwfn, p_ptt,
+ QM_REG_USG_CNT_PF_OTHER, 0);
+ /* @@@TBD - assert on incorrect xCFC values (10.b) */
+ }
+
+ /* Disable PF in HW blocks */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+
+ --qm_lock_ref_cnt;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (!qm_lock_ref_cnt)
+ OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
+#endif
+
+ if (!p_dev->recov_in_prog) {
+ rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc == ECORE_NOMEM) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n");
+ rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
+ }
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ }
+ } /* hwfn loop */
+
+ if (IS_PF(p_dev) && !p_dev->recov_in_prog) {
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
+
+ /* Clear the PF's internal FID_enable in the PXP.
+ * In CMT this should only be done for first hw-function, and
+ * only after all transactions have stopped for all active
+ * hw-functions.
+ */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+ false);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ }
+
+ return rc2;
+}
+
+enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
+{
+ int j;
+
+ for_each_hwfn(p_dev, j) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+ struct ecore_ptt *p_ptt;
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+ "Shutting down the fastpath\n");
+
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ /* @@@TBD - clean transmission queues (5.b) */
+ /* @@@TBD - clean BTB (5.c) */
+
+ /* @@@TBD - verify DMAE requests are done (8) */
+
+ ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ OSAL_MSLEEP(1);
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ /* If roce info is allocated it means roce is initialized and should
+ * be enabled in searcher.
+ */
+ if (p_hwfn->p_rdma_info) {
+ if (p_hwfn->b_rdma_enabled_in_prs)
+ ecore_wr(p_hwfn, p_ptt,
+ p_hwfn->rdma_prs_search_reg, 0x1);
+ ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1);
+ }
+
+ /* Re-open incoming traffic */
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
+{
+ ecore_ptt_pool_free(p_hwfn);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info);
+}
+
+/* Setup bar access */
+static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
+{
+ /* clear indirect access */
+ if (ECORE_IS_AH(p_hwfn->p_dev)) {
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0);
+ } else {
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
+ }
+
+ /* Clean previous pglue_b errors if such exist */
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
+ /* enable internal target-read */
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+}
+
+static void get_function_id(struct ecore_hwfn *p_hwfn)
+{
+ /* ME Register */
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
+ PXP_PF_ME_OPAQUE_ADDR);
+
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+ /* Bits 16-19 from the ME registers are the pf_num */
+ p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+ p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID);
+ p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PORT);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+ "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
+ p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
+}
+
+static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
+{
+ u32 *feat_num = p_hwfn->hw_info.feat_num;
+ struct ecore_sb_cnt_info sb_cnt;
+ u32 non_l2_sbs = 0;
+
+ OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt));
+ ecore_int_get_num_sbs(p_hwfn, &sb_cnt);
+
+ /* L2 Queues require each: 1 status block. 1 L2 queue */
+ if (ECORE_IS_L2_PERSONALITY(p_hwfn)) {
+ /* Start by allocating VF queues, then PF's */
+ feat_num[ECORE_VF_L2_QUE] =
+ OSAL_MIN_T(u32,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
+ sb_cnt.iov_cnt);
+ feat_num[ECORE_PF_L2_QUE] =
+ OSAL_MIN_T(u32,
+ sb_cnt.cnt - non_l2_sbs,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
+ FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
+ }
+
+ if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
+ feat_num[ECORE_FCOE_CQ] =
+ OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
+ ECORE_CMDQS_CQS));
+
+ if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
+ feat_num[ECORE_ISCSI_CQ] =
+ OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
+ ECORE_CMDQS_CQS));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+ "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
+ (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
+ (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
+ (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
+ (int)sb_cnt.cnt);
+}
+
+const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
+{
+ switch (res_id) {
+ case ECORE_L2_QUEUE:
+ return "L2_QUEUE";
+ case ECORE_VPORT:
+ return "VPORT";
+ case ECORE_RSS_ENG:
+ return "RSS_ENG";
+ case ECORE_PQ:
+ return "PQ";
+ case ECORE_RL:
+ return "RL";
+ case ECORE_MAC:
+ return "MAC";
+ case ECORE_VLAN:
+ return "VLAN";
+ case ECORE_RDMA_CNQ_RAM:
+ return "RDMA_CNQ_RAM";
+ case ECORE_ILT:
+ return "ILT";
+ case ECORE_LL2_QUEUE:
+ return "LL2_QUEUE";
+ case ECORE_CMDQS_CQS:
+ return "CMDQS_CQS";
+ case ECORE_RDMA_STATS_QUEUE:
+ return "RDMA_STATS_QUEUE";
+ case ECORE_BDQ:
+ return "BDQ";
+ case ECORE_SB:
+ return "SB";
+ default:
+ return "UNKNOWN_RESOURCE";
+ }
+}
+
+static enum _ecore_status_t
+__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id,
+ u32 resc_max_val,
+ u32 *p_mcp_resp)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
+ resc_max_val, p_mcp_resp);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "MFW response failure for a max value setting of resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
+ DP_INFO(p_hwfn,
+ "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
+ res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ u32 resc_max_val, mcp_resp;
+ u8 res_id;
+ enum _ecore_status_t rc;
+
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
+ /* @DPDK */
+ switch (res_id) {
+ case ECORE_LL2_QUEUE:
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_RDMA_STATS_QUEUE:
+ case ECORE_BDQ:
+ resc_max_val = 0;
+ break;
+ default:
+ continue;
+ }
+
+ rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
+ resc_max_val, &mcp_resp);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* There's no point to continue to the next resource if the
+ * command is not supported by the MFW.
+ * We do continue if the command is supported but the resource
+ * is unknown to the MFW. Such a resource will be later
+ * configured with the default allocation values.
+ */
+ if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
+ return ECORE_NOTIMPL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static
+enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
+ enum ecore_resources res_id,
+ u32 *p_resc_num, u32 *p_resc_start)
+{
+ u8 num_funcs = p_hwfn->num_funcs_on_engine;
+ bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+
+ switch (res_id) {
+ case ECORE_L2_QUEUE:
+ *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+ MAX_NUM_L2_QUEUES_BB) / num_funcs;
+ break;
+ case ECORE_VPORT:
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ MAX_NUM_VPORTS_BB) / num_funcs;
+ break;
+ case ECORE_RSS_ENG:
+ *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+ ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+ break;
+ case ECORE_PQ:
+ *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+ MAX_QM_TX_QUEUES_BB) / num_funcs;
+ break;
+ case ECORE_RL:
+ *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ break;
+ case ECORE_MAC:
+ case ECORE_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+ break;
+ case ECORE_ILT:
+ *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+ PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+ break;
+ case ECORE_LL2_QUEUE:
+ *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ break;
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ /* @DPDK */
+ *p_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;
+ break;
+ case ECORE_RDMA_STATS_QUEUE:
+ /* @DPDK */
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ MAX_NUM_VPORTS_BB) / num_funcs;
+ break;
+ case ECORE_BDQ:
+ /* @DPDK */
+ *p_resc_num = 0;
+ break;
+ default:
+ break;
+ }
+
+
+ switch (res_id) {
+ case ECORE_BDQ:
+ if (!*p_resc_num)
+ *p_resc_start = 0;
+ break;
+ case ECORE_SB:
+ /* Since we want its value to reflect whether MFW supports
+ * the new scheme, have a default of 0.
+ */
+ *p_resc_num = 0;
+ break;
+ default:
+ *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
+ break;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+__ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
+ bool drv_resc_alloc)
+{
+ u32 dflt_resc_num = 0, dflt_resc_start = 0;
+ u32 mcp_resp, *p_resc_num, *p_resc_start;
+ enum _ecore_status_t rc;
+
+ p_resc_num = &RESC_NUM(p_hwfn, res_id);
+ p_resc_start = &RESC_START(p_hwfn, res_id);
+
+ rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
+ &dflt_resc_start);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "Failed to get default amount for resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ *p_resc_num = dflt_resc_num;
+ *p_resc_start = dflt_resc_start;
+ goto out;
+ }
+#endif
+
+ rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ &mcp_resp, p_resc_num, p_resc_start);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "MFW response failure for an allocation request for"
+ " resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ /* Default driver values are applied in the following cases:
+ * - The resource allocation MB command is not supported by the MFW
+ * - There is an internal error in the MFW while processing the request
+ * - The resource ID is unknown to the MFW
+ */
+ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to receive allocation info for resource %d [%s]."
+ " mcp_resp = 0x%x. Applying default values"
+ " [%d,%d].\n",
+ res_id, ecore_hw_get_resc_name(res_id), mcp_resp,
+ dflt_resc_num, dflt_resc_start);
+
+ *p_resc_num = dflt_resc_num;
+ *p_resc_start = dflt_resc_start;
+ goto out;
+ }
+
+ if ((*p_resc_num != dflt_resc_num ||
+ *p_resc_start != dflt_resc_start) &&
+ res_id != ECORE_SB) {
+ DP_INFO(p_hwfn,
+ "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n",
+ res_id, ecore_hw_get_resc_name(res_id), *p_resc_num,
+ *p_resc_start, dflt_resc_num, dflt_resc_start,
+ drv_resc_alloc ? " - Applying default values" : "");
+ if (drv_resc_alloc) {
+ *p_resc_num = dflt_resc_num;
+ *p_resc_start = dflt_resc_start;
+ }
+ }
+out:
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
+ bool drv_resc_alloc)
+{
+ enum _ecore_status_t rc;
+ u8 res_id;
+
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
+ rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool drv_resc_alloc)
+{
+ struct ecore_resc_unlock_params resc_unlock_params;
+ struct ecore_resc_lock_params resc_lock_params;
+ bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ u8 res_id;
+ enum _ecore_status_t rc;
+#ifndef ASIC_ONLY
+ u32 *resc_start = p_hwfn->hw_info.resc_start;
+ u32 *resc_num = p_hwfn->hw_info.resc_num;
+ /* For AH, an equal share of the ILT lines between the maximal number of
+ * PFs is not enough for RoCE. This would be solved by the future
+ * resource allocation scheme, but isn't currently present for
+ * FPGA/emulation. For now we keep a number that is sufficient for RoCE
+ * to work - the BB number of ILT lines divided by its max PFs number.
+ */
+ u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB;
+#endif
+
+ /* Setting the max values of the soft resources and the following
+ * resources allocation queries should be atomic. Since several PFs can
+ * run in parallel - a resource lock is needed.
+ * If either the resource lock or resource set value commands are not
+ * supported - skip the max values setting, release the lock if
+ * needed, and proceed to the queries. Other failures, including a
+ * failure to acquire the lock, will cause this function to fail.
+ * Old drivers that don't acquire the lock can run in parallel, and
+ * their allocation values won't be affected by the updated max values.
+ */
+ ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
+ ECORE_RESC_LOCK_RESC_ALLOC, false);
+
+ rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
+ return rc;
+ } else if (rc == ECORE_NOTIMPL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
+ } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to acquire the resource lock for the resource allocation commands\n");
+ rc = ECORE_BUSY;
+ goto unlock_and_exit;
+ } else {
+ rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to set the max values of the soft resources\n");
+ goto unlock_and_exit;
+ } else if (rc == ECORE_NOTIMPL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
+ &resc_unlock_params);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+ }
+
+ rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS)
+ goto unlock_and_exit;
+
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
+ &resc_unlock_params);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ /* Reduced build contains less PQs */
+ if (!(p_hwfn->p_dev->b_is_emul_full)) {
+ resc_num[ECORE_PQ] = 32;
+ resc_start[ECORE_PQ] = resc_num[ECORE_PQ] *
+ p_hwfn->enabled_func_idx;
+ }
+
+ /* For AH emulation, since we have a possible maximal number of
+ * 16 enabled PFs, in case there are not enough ILT lines -
+ * allocate only first PF as RoCE and have all the other ETH
+ * only with less ILT lines.
+ */
+ if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
+ resc_num[ECORE_ILT] = OSAL_MAX_T(u32,
+ resc_num[ECORE_ILT],
+ roce_min_ilt_lines);
+ }
+
+ /* Correct the common ILT calculation if PF0 has more */
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
+ p_hwfn->p_dev->b_is_emul_full &&
+ p_hwfn->rel_pf_id && resc_num[ECORE_ILT] < roce_min_ilt_lines)
+ resc_start[ECORE_ILT] += roce_min_ilt_lines -
+ resc_num[ECORE_ILT];
+#endif
+
+ /* Sanity for ILT */
+ if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
+ (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't assign ILT pages [%08x,...,%08x]\n",
+ RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn,
+ ECORE_ILT) -
+ 1);
+ return ECORE_INVAL;
+ }
+
+ /* This will also learn the number of SBs from MFW */
+ if (ecore_int_igu_reset_cam(p_hwfn, p_ptt))
+ return ECORE_INVAL;
+
+ ecore_hw_set_feat(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+ "The numbers for each resource are:\n");
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n",
+ ecore_hw_get_resc_name(res_id),
+ RESC_NUM(p_hwfn, res_id),
+ RESC_START(p_hwfn, res_id));
+
+ return ECORE_SUCCESS;
+
+unlock_and_exit:
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
+ ecore_mcp_resc_unlock(p_hwfn, p_ptt,
+ &resc_unlock_params);
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_hw_prepare_params *p_params)
+{
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode;
+ u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
+ struct ecore_mcp_link_capabilities *p_caps;
+ struct ecore_mcp_link_params *link;
+ enum _ecore_status_t rc;
+
+ /* Read global nvm_cfg address */
+ nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+ /* Verify MCP has initialized it */
+ if (!nvm_cfg_addr) {
+ DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM;
+ return ECORE_INVAL;
+ }
+
+/* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
+
+ nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, core_cfg);
+
+ core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
+
+ switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+ NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
+ core_cfg);
+ break;
+ }
+
+ /* Read DCBX configuration */
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ dcbx_mode = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, generic_cont0));
+ dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK)
+ >> NVM_CFG1_PORT_DCBX_MODE_OFFSET;
+ switch (dcbx_mode) {
+ case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC;
+ break;
+ case NVM_CFG1_PORT_DCBX_MODE_CEE:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE;
+ break;
+ case NVM_CFG1_PORT_DCBX_MODE_IEEE:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE;
+ break;
+ default:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED;
+ }
+
+ /* Read default link configuration */
+ link = &p_hwfn->mcp_info->link_input;
+ p_caps = &p_hwfn->mcp_info->link_capabilities;
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ link_temp = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
+ link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+ link->speed.advertised_speeds = link_temp;
+ p_caps->speed_capabilities = link->speed.advertised_speeds;
+
+ link_temp = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, link_settings));
+ switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+ NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+ link->speed.autoneg = true;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+ link->speed.forced_speed = 1000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+ link->speed.forced_speed = 10000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+ link->speed.forced_speed = 25000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+ link->speed.forced_speed = 40000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+ link->speed.forced_speed = 50000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
+ link->speed.forced_speed = 100000;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp);
+ }
+
+ p_caps->default_speed = link->speed.forced_speed;
+ p_caps->default_speed_autoneg = link->speed.autoneg;
+
+ link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
+ link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+ link->pause.autoneg = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+ link->pause.forced_rx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+ link->pause.forced_tx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+ link->loopback_mode = 0;
+
+ if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
+ link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, ext_phy));
+ link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
+ link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
+ p_caps->default_eee = ECORE_MCP_EEE_ENABLED;
+ link->eee.enable = true;
+ switch (link_temp) {
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
+ p_caps->default_eee = ECORE_MCP_EEE_DISABLED;
+ link->eee.enable = false;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
+ p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
+ p_caps->eee_lpi_timer =
+ EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
+ p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
+ break;
+ }
+
+ link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
+ link->eee.tx_lpi_enable = link->eee.enable;
+ link->eee.adv_caps = ECORE_EEE_1G_ADV | ECORE_EEE_10G_ADV;
+ } else {
+ p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n EEE: %02x [%08x usec]",
+ link->speed.forced_speed, link->speed.advertised_speeds,
+ link->speed.autoneg, link->pause.autoneg,
+ p_caps->default_eee, p_caps->eee_lpi_timer);
+
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
+
+ generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
+
+ mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+ NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+ 1 << ECORE_MF_UFP_SPECIFIC |
+ 1 << ECORE_MF_8021Q_TAGGING;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_BD:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_8021AD_TAGGING;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_LL2_NON_UNICAST |
+ 1 << ECORE_MF_INTER_PF_SWITCH |
+ 1 << ECORE_MF_DISABLE_ARFS;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_LL2_NON_UNICAST;
+ if (ECORE_IS_BB(p_hwfn->p_dev))
+ p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;
+ break;
+ }
+ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
+ p_hwfn->p_dev->mf_bits);
+
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS);
+
+ /* It's funny since we have another switch, but it's easier
+ * to throw this away in linux this way. Long term, it might be
+ * better to have have getters for needed ECORE_MF_* fields,
+ * convert client code and eliminate this.
+ */
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ case NVM_CFG1_GLOB_MF_MODE_BD:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_UFP;
+ break;
+ }
+
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
+
+ device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
+ OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
+ OSAL_SET_BIT(ECORE_DEV_CAP_FCOE,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
+ OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
+ OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
+ OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
+ &p_hwfn->hw_info.device_capabilities);
+
+ rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
+ rc = ECORE_SUCCESS;
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
+ }
+
+ return rc;
+}
+
+static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
+ u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+ num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
+
+ /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+ * in the other bits are selected.
+ * Bits 1-15 are for functions 1-15, respectively, and their value is
+ * '0' only for enabled functions (function 0 always exists and
+ * enabled).
+ * In case of CMT in BB, only the "even" functions are enabled, and thus
+ * the number of functions for both hwfns is learnt from the same bits.
+ */
+ if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) {
+ reg_function_hide = ecore_rd(p_hwfn, p_ptt,
+ MISCS_REG_FUNCTION_HIDE_BB_K2);
+ } else { /* E5 */
+ reg_function_hide = 0;
+ }
+
+ if (reg_function_hide & 0x1) {
+ if (ECORE_IS_BB(p_dev)) {
+ if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) {
+ num_funcs = 0;
+ eng_mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ eng_mask = 0x5554;
+ }
+ } else {
+ num_funcs = 1;
+ eng_mask = 0xfffe;
+ }
+
+ /* Get the number of the enabled functions on the engine */
+ tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ num_funcs++;
+ tmp >>= 0x1;
+ }
+
+ /* Get the PF index within the enabled functions */
+ low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
+ tmp = reg_function_hide & eng_mask & low_pfs_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ enabled_func_idx--;
+ tmp >>= 0x1;
+ }
+ }
+
+ p_hwfn->num_funcs_on_engine = num_funcs;
+ p_hwfn->enabled_func_idx = enabled_func_idx;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n");
+ p_hwfn->num_funcs_on_engine = 4;
+ }
+#endif
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+ "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
+ p_hwfn->rel_pf_id, p_hwfn->abs_pf_id,
+ p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
+}
+
+static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 port_mode;
+
+#ifndef ASIC_ONLY
+ /* Read the port mode */
+ if (CHIP_REV_IS_FPGA(p_dev))
+ port_mode = 4;
+ else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev))
+ /* In CMT on emulation, assume 1 port */
+ port_mode = 1;
+ else
+#endif
+ port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
+
+ if (port_mode < 3) {
+ p_dev->num_ports_in_engine = 1;
+ } else if (port_mode <= 5) {
+ p_dev->num_ports_in_engine = 2;
+ } else {
+ DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
+ p_dev->num_ports_in_engine);
+
+ /* Default num_ports_in_engine to something */
+ p_dev->num_ports_in_engine = 1;
+ }
+}
+
+static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 port;
+ int i;
+
+ p_dev->num_ports_in_engine = 0;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
+ switch ((port & 0xf000) >> 12) {
+ case 1:
+ p_dev->num_ports_in_engine = 1;
+ break;
+ case 3:
+ p_dev->num_ports_in_engine = 2;
+ break;
+ case 0xf:
+ p_dev->num_ports_in_engine = 4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unknown port mode in ECO_RESERVED %08x\n",
+ port);
+ }
+ } else
+#endif
+ for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
+ port = ecore_rd(p_hwfn, p_ptt,
+ CNIG_REG_NIG_PORT0_CONF_K2_E5 +
+ (i * 4));
+ if (port & 1)
+ p_dev->num_ports_in_engine++;
+ }
+
+ if (!p_dev->num_ports_in_engine) {
+ DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n");
+
+ /* Default num_ports_in_engine to something */
+ p_dev->num_ports_in_engine = 1;
+ }
+}
+
+static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+ /* Determine the number of ports per engine */
+ if (ECORE_IS_BB(p_dev))
+ ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
+ else
+ ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
+
+ /* Get the total number of ports of the device */
+ if (ECORE_IS_CMT(p_dev)) {
+ /* In CMT there is always only one port */
+ p_dev->num_ports = 1;
+#ifndef ASIC_ONLY
+ } else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) {
+ p_dev->num_ports = p_dev->num_ports_in_engine *
+ ecore_device_num_engines(p_dev);
+#endif
+ } else {
+ u32 addr, global_offsize, global_addr;
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + OFFSETOF(struct public_global, max_ports);
+ p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr);
+ }
+}
+
+static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_link_capabilities *p_caps;
+ u32 eee_status;
+
+ p_caps = &p_hwfn->mcp_info->link_capabilities;
+ if (p_caps->default_eee == ECORE_MCP_EEE_UNSUPPORTED)
+ return;
+
+ p_caps->eee_speed_caps = 0;
+ eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, eee_status));
+ eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
+ EEE_SUPPORTED_SPEED_OFFSET;
+ if (eee_status & EEE_1G_SUPPORTED)
+ p_caps->eee_speed_caps |= ECORE_EEE_1G_ADV;
+ if (eee_status & EEE_10G_ADV)
+ p_caps->eee_speed_caps |= ECORE_EEE_10G_ADV;
+}
+
+static enum _ecore_status_t
+ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_pci_personality personality,
+ struct ecore_hw_prepare_params *p_params)
+{
+ bool drv_resc_alloc = p_params->drv_resc_alloc;
+ enum _ecore_status_t rc;
+
+ if (IS_ECORE_PACING(p_hwfn)) {
+ DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_IOV,
+ "Skipping IOV as packet pacing is requested\n");
+ }
+
+ /* Since all information is common, only first hwfns should do this */
+ if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn)) {
+ rc = ecore_iov_hw_info(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_BAD_IOV;
+ else
+ return rc;
+ }
+ }
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ ecore_hw_info_port_num(p_hwfn, p_ptt);
+
+ ecore_mcp_get_capabilities(p_hwfn, p_ptt);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
+#endif
+ rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+#ifndef ASIC_ONLY
+ }
+#endif
+
+ rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU;
+ else
+ return rc;
+ }
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
+#endif
+ OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr,
+ p_hwfn->mcp_info->func_info.mac, ETH_ALEN);
+#ifndef ASIC_ONLY
+ } else {
+ static u8 mcp_hw_mac[6] = { 0, 2, 3, 4, 5, 6 };
+
+ OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN);
+ p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id;
+ }
+#endif
+
+ if (ecore_mcp_is_init(p_hwfn)) {
+ if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET)
+ p_hwfn->hw_info.ovlan =
+ p_hwfn->mcp_info->func_info.ovlan;
+
+ ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+
+ ecore_mcp_get_eee_caps(p_hwfn, p_ptt);
+
+ ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
+ }
+
+ if (personality != ECORE_PCI_DEFAULT) {
+ p_hwfn->hw_info.personality = personality;
+ } else if (ecore_mcp_is_init(p_hwfn)) {
+ enum ecore_pci_personality protocol;
+
+ protocol = p_hwfn->mcp_info->func_info.protocol;
+ p_hwfn->hw_info.personality = protocol;
+ }
+
+#ifndef ASIC_ONLY
+ /* To overcome ILT lack for emulation, until at least until we'll have
+ * a definite answer from system about it, allow only PF0 to be RoCE.
+ */
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
+ if (!p_hwfn->rel_pf_id)
+ p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
+ else
+ p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+ }
+#endif
+
+ /* although in BB some constellations may support more than 4 tcs,
+ * that can result in performance penalty in some cases. 4
+ * represents a good tradeoff between performance and flexibility.
+ */
+ if (IS_ECORE_PACING(p_hwfn))
+ p_hwfn->hw_info.num_hw_tc = 1;
+ else
+ p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+
+ /* start out with a single active tc. This can be increased either
+ * by dcbx negotiation or by upper layer driver
+ */
+ p_hwfn->hw_info.num_active_tc = 1;
+
+ ecore_get_num_funcs(p_hwfn, p_ptt);
+
+ if (ecore_mcp_is_init(p_hwfn))
+ p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
+
+ /* In case of forcing the driver's default resource allocation, calling
+ * ecore_hw_get_resc() should come after initializing the personality
+ * and after getting the number of functions, since the calculation of
+ * the resources/features depends on them.
+ * This order is not harmful if not forcing.
+ */
+ rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
+ rc = ECORE_SUCCESS;
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u16 device_id_mask;
+ u32 tmp;
+
+ /* Read Vendor Id / Device Id */
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET,
+ &p_dev->vendor_id);
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET,
+ &p_dev->device_id);
+
+ /* Determine type */
+ device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK;
+ switch (device_id_mask) {
+ case ECORE_DEV_ID_MASK_BB:
+ p_dev->type = ECORE_DEV_TYPE_BB;
+ break;
+ case ECORE_DEV_ID_MASK_AH:
+ p_dev->type = ECORE_DEV_TYPE_AH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n",
+ p_dev->device_id);
+ return ECORE_ABORTED;
+ }
+
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
+ p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+ p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV);
+
+ /* Learn number of HW-functions */
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+ if (tmp & (1 << p_hwfn->rel_pf_id)) {
+ DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
+ p_dev->num_hwfns = 2;
+ } else {
+ p_dev->num_hwfns = 1;
+ }
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ /* For some reason we have problems with this register
+ * in B0 emulation; Simply assume no CMT
+ */
+ DP_NOTICE(p_dev->hwfns, false,
+ "device on emul - assume no CMT\n");
+ p_dev->num_hwfns = 1;
+ }
+#endif
+
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG);
+ p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
+ p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL);
+
+ DP_INFO(p_dev->hwfns,
+ "Chip details - %s %c%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n",
+ ECORE_IS_BB(p_dev) ? "BB" : "AH",
+ 'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
+ p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
+ p_dev->chip_metal);
+
+ if (ECORE_IS_BB_A0(p_dev)) {
+ DP_NOTICE(p_dev->hwfns, false,
+ "The chip type/rev (BB A0) is not supported!\n");
+ return ECORE_ABORTED;
+ }
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
+
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
+ if (tmp & (1 << 29)) {
+ DP_NOTICE(p_hwfn, false,
+ "Emulation: Running on a FULL build\n");
+ p_dev->b_is_emul_full = true;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "Emulation: Running on a REDUCED build\n");
+ }
+ }
+#endif
+
+ return ECORE_SUCCESS;
+}
+
+#ifndef LINUX_REMOVE
+void ecore_prepare_hibernate(struct ecore_dev *p_dev)
+{
+ int j;
+
+ if (IS_VF(p_dev))
+ return;
+
+ for_each_hwfn(p_dev, j) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+ "Mark hw/fw uninitialized\n");
+
+ p_hwfn->hw_init_done = false;
+
+ ecore_ptt_invalidate(p_hwfn);
+ }
+}
+#endif
+
+static enum _ecore_status_t
+ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM * p_regview,
+ void OSAL_IOMEM * p_doorbells,
+ struct ecore_hw_prepare_params *p_params)
+{
+ struct ecore_mdump_retain_data mdump_retain;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_mdump_info mdump_info;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Split PCI bars evenly between hwfns */
+ p_hwfn->regview = p_regview;
+ p_hwfn->doorbells = p_doorbells;
+
+ if (IS_VF(p_dev))
+ return ecore_vf_hw_prepare(p_hwfn);
+
+ /* Validate that chip access is feasible */
+ if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+ DP_ERR(p_hwfn,
+ "Reading the ME register returns all Fs; Preventing further chip access\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME;
+ return ECORE_INVAL;
+ }
+
+ get_function_id(p_hwfn);
+
+ /* Allocate PTT pool */
+ rc = ecore_ptt_pool_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
+ goto err0;
+ }
+
+ /* Allocate the main PTT */
+ p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+ /* First hwfn learns basic information, e.g., number of hwfns */
+ if (!p_hwfn->my_id) {
+ rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_FAILED_DEV;
+ goto err1;
+ }
+ }
+
+ ecore_hw_hwfn_prepare(p_hwfn);
+
+ /* Initialize MCP structure */
+ rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
+ goto err1;
+ }
+
+ /* Read the device configuration information from the HW and SHMEM */
+ rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
+ p_params->personality, p_params);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false, "Failed to get HW information\n");
+ goto err2;
+ }
+
+ /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
+ * called, since among others it sets the ports number in an engine.
+ */
+ if (p_params->initiate_pf_flr && IS_LEAD_HWFN(p_hwfn) &&
+ !p_dev->recov_in_prog) {
+ rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+ }
+
+ /* Check if mdump logs/data are present and update the epoch value */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+#ifndef ASIC_ONLY
+ if (!CHIP_REV_IS_EMUL(p_dev)) {
+#endif
+ rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
+ &mdump_info);
+ if (rc == ECORE_SUCCESS && mdump_info.num_of_logs)
+ DP_NOTICE(p_hwfn, false,
+ "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
+
+ rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt,
+ &mdump_retain);
+ if (rc == ECORE_SUCCESS && mdump_retain.valid)
+ DP_NOTICE(p_hwfn, false,
+ "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n",
+ mdump_retain.epoch, mdump_retain.pf,
+ mdump_retain.status);
+
+ ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
+ p_params->epoch);
+#ifndef ASIC_ONLY
+ }
+#endif
+ }
+
+ /* Allocate the init RT array and initialize the init-ops engine */
+ rc = ecore_init_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
+ goto err2;
+ }
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: workaround; Prevent DMAE parities\n");
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5,
+ 7);
+
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: workaround: Set VF bar0 size\n");
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4);
+ }
+#endif
+
+ return rc;
+err2:
+ if (IS_LEAD_HWFN(p_hwfn))
+ ecore_iov_free_hw_info(p_dev);
+ ecore_mcp_free(p_hwfn);
+err1:
+ ecore_hw_hwfn_free(p_hwfn);
+err0:
+ return rc;
+}
+
+enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
+ struct ecore_hw_prepare_params *p_params)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ enum _ecore_status_t rc;
+
+ p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
+ p_dev->allow_mdump = p_params->allow_mdump;
+ p_hwfn->b_en_pacing = p_params->b_en_pacing;
+
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
+
+ /* Store the precompiled init data ptrs */
+ if (IS_PF(p_dev))
+ ecore_init_iro_array(p_dev);
+
+ /* Initialize the first hwfn - will learn number of hwfns */
+ rc = ecore_hw_prepare_single(p_hwfn,
+ p_dev->regview,
+ p_dev->doorbells, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_params->personality = p_hwfn->hw_info.personality;
+
+ /* initilalize 2nd hwfn if necessary */
+ if (ECORE_IS_CMT(p_dev)) {
+ void OSAL_IOMEM *p_regview, *p_doorbell;
+ u8 OSAL_IOMEM *addr;
+
+ /* adjust bar offset for second engine */
+ addr = (u8 OSAL_IOMEM *)p_dev->regview +
+ ecore_hw_bar_size(p_hwfn,
+ p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
+ p_regview = (void OSAL_IOMEM *)addr;
+
+ addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
+ ecore_hw_bar_size(p_hwfn,
+ p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
+ p_doorbell = (void OSAL_IOMEM *)addr;
+
+ p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing;
+ /* prepare second hw function */
+ rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
+ p_doorbell, p_params);
+
+ /* in case of error, need to free the previously
+ * initiliazed hwfn 0.
+ */
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_FAILED_ENG2;
+
+ if (IS_PF(p_dev)) {
+ ecore_init_free(p_hwfn);
+ ecore_mcp_free(p_hwfn);
+ ecore_hw_hwfn_free(p_hwfn);
+ } else {
+ DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n");
+ }
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+void ecore_hw_remove(struct ecore_dev *p_dev)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ int i;
+
+ if (IS_PF(p_dev))
+ ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
+ ECORE_OV_DRIVER_STATE_NOT_LOADED);
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_pf_release(p_hwfn);
+ continue;
+ }
+
+ ecore_init_free(p_hwfn);
+ ecore_hw_hwfn_free(p_hwfn);
+ ecore_mcp_free(p_hwfn);
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
+#endif
+ }
+
+ ecore_iov_free_hw_info(p_dev);
+}
+
+static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain)
+{
+ void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL;
+ dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
+ struct ecore_chain_next *p_next;
+ u32 size, i;
+
+ if (!p_virt)
+ return;
+
+ size = p_chain->elem_size * p_chain->usable_per_page;
+
+ for (i = 0; i < p_chain->page_cnt; i++) {
+ if (!p_virt)
+ break;
+
+ p_next = (struct ecore_chain_next *)((u8 *)p_virt + size);
+ p_virt_next = p_next->next_virt;
+ p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
+
+ OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys,
+ ECORE_CHAIN_PAGE_SIZE);
+
+ p_virt = p_virt_next;
+ p_phys = p_phys_next;
+ }
+}
+
+static void ecore_chain_free_single(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain)
+{
+ if (!p_chain->p_virt_addr)
+ return;
+
+ OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr,
+ p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE);
+}
+
+static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain)
+{
+ void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+ u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table;
+ u32 page_cnt = p_chain->page_cnt, i, pbl_size;
+
+ if (!pp_virt_addr_tbl)
+ return;
+
+ if (!p_pbl_virt)
+ goto out;
+
+ for (i = 0; i < page_cnt; i++) {
+ if (!pp_virt_addr_tbl[i])
+ break;
+
+ OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i],
+ *(dma_addr_t *)p_pbl_virt,
+ ECORE_CHAIN_PAGE_SIZE);
+
+ p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
+ }
+
+ pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
+
+ if (!p_chain->b_external_pbl)
+ OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
+ p_chain->pbl_sp.p_phys_table, pbl_size);
+out:
+ OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
+}
+
+void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+ switch (p_chain->mode) {
+ case ECORE_CHAIN_MODE_NEXT_PTR:
+ ecore_chain_free_next_ptr(p_dev, p_chain);
+ break;
+ case ECORE_CHAIN_MODE_SINGLE:
+ ecore_chain_free_single(p_dev, p_chain);
+ break;
+ case ECORE_CHAIN_MODE_PBL:
+ ecore_chain_free_pbl(p_dev, p_chain);
+ break;
+ }
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
+ enum ecore_chain_cnt_type cnt_type,
+ osal_size_t elem_size, u32 page_cnt)
+{
+ u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
+
+ /* The actual chain size can be larger than the maximal possible value
+ * after rounding up the requested elements number to pages, and after
+ * taking into acount the unusuable elements (next-ptr elements).
+ * The size of a "u16" chain can be (U16_MAX + 1) since the chain
+ * size/capacity fields are of a u32 type.
+ */
+ if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 &&
+ chain_size > ((u32)ECORE_U16_MAX + 1)) ||
+ (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
+ chain_size > ECORE_U32_MAX)) {
+ DP_NOTICE(p_dev, true,
+ "The actual chain size (0x%lx) is larger than the maximal possible value\n",
+ (unsigned long)chain_size);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+ void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL;
+ dma_addr_t p_phys = 0;
+ u32 i;
+
+ for (i = 0; i < p_chain->page_cnt; i++) {
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+ ECORE_CHAIN_PAGE_SIZE);
+ if (!p_virt) {
+ DP_NOTICE(p_dev, false,
+ "Failed to allocate chain memory\n");
+ return ECORE_NOMEM;
+ }
+
+ if (i == 0) {
+ ecore_chain_init_mem(p_chain, p_virt, p_phys);
+ ecore_chain_reset(p_chain);
+ } else {
+ ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+ p_virt, p_phys);
+ }
+
+ p_virt_prev = p_virt;
+ }
+ /* Last page's next element should point to the beginning of the
+ * chain.
+ */
+ ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+ p_chain->p_virt_addr,
+ p_chain->p_phys_addr);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+ dma_addr_t p_phys = 0;
+ void *p_virt = OSAL_NULL;
+
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
+ if (!p_virt) {
+ DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n");
+ return ECORE_NOMEM;
+ }
+
+ ecore_chain_init_mem(p_chain, p_virt, p_phys);
+ ecore_chain_reset(p_chain);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl)
+{
+ u32 page_cnt = p_chain->page_cnt, size, i;
+ dma_addr_t p_phys = 0, p_pbl_phys = 0;
+ void **pp_virt_addr_tbl = OSAL_NULL;
+ u8 *p_pbl_virt = OSAL_NULL;
+ void *p_virt = OSAL_NULL;
+
+ size = page_cnt * sizeof(*pp_virt_addr_tbl);
+ pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
+ if (!pp_virt_addr_tbl) {
+ DP_NOTICE(p_dev, false,
+ "Failed to allocate memory for the chain virtual addresses table\n");
+ return ECORE_NOMEM;
+ }
+
+ /* The allocation of the PBL table is done with its full size, since it
+ * is expected to be successive.
+ * ecore_chain_init_pbl_mem() is called even in a case of an allocation
+ * failure, since pp_virt_addr_tbl was previously allocated, and it
+ * should be saved to allow its freeing during the error flow.
+ */
+ size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
+
+ if (ext_pbl == OSAL_NULL) {
+ p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
+ } else {
+ p_pbl_virt = ext_pbl->p_pbl_virt;
+ p_pbl_phys = ext_pbl->p_pbl_phys;
+ p_chain->b_external_pbl = true;
+ }
+
+ ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
+ pp_virt_addr_tbl);
+ if (!p_pbl_virt) {
+ DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n");
+ return ECORE_NOMEM;
+ }
+
+ for (i = 0; i < page_cnt; i++) {
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+ ECORE_CHAIN_PAGE_SIZE);
+ if (!p_virt) {
+ DP_NOTICE(p_dev, false,
+ "Failed to allocate chain memory\n");
+ return ECORE_NOMEM;
+ }
+
+ if (i == 0) {
+ ecore_chain_init_mem(p_chain, p_virt, p_phys);
+ ecore_chain_reset(p_chain);
+ }
+
+ /* Fill the PBL table with the physical address of the page */
+ *(dma_addr_t *)p_pbl_virt = p_phys;
+ /* Keep the virtual address of the page */
+ p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+
+ p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
+ enum ecore_chain_use_mode intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type,
+ u32 num_elems, osal_size_t elem_size,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl)
+{
+ u32 page_cnt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (mode == ECORE_CHAIN_MODE_SINGLE)
+ page_cnt = 1;
+ else
+ page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+ rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
+ page_cnt);
+ if (rc) {
+ DP_NOTICE(p_dev, false,
+ "Cannot allocate a chain with the given arguments:\n"
+ "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
+ intended_use, mode, cnt_type, num_elems, elem_size);
+ return rc;
+ }
+
+ ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use,
+ mode, cnt_type, p_dev->dp_ctx);
+
+ switch (mode) {
+ case ECORE_CHAIN_MODE_NEXT_PTR:
+ rc = ecore_chain_alloc_next_ptr(p_dev, p_chain);
+ break;
+ case ECORE_CHAIN_MODE_SINGLE:
+ rc = ecore_chain_alloc_single(p_dev, p_chain);
+ break;
+ case ECORE_CHAIN_MODE_PBL:
+ rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl);
+ break;
+ }
+ if (rc)
+ goto nomem;
+
+ return ECORE_SUCCESS;
+
+nomem:
+ ecore_chain_free(p_dev, p_chain);
+ return rc;
+}
+
+enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
+ u16 src_id, u16 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+ u16 min, max;
+
+ min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
+ max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+ DP_NOTICE(p_hwfn, true,
+ "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return ECORE_INVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+ max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
+ DP_NOTICE(p_hwfn, true,
+ "vport id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return ECORE_INVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
+ max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
+ DP_NOTICE(p_hwfn, true,
+ "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return ECORE_INVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 high, u32 low,
+ u32 *p_entry_num)
+{
+ u32 en;
+ int i;
+
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32));
+ if (en)
+ continue;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32), low);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32), high);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32), 1);
+ break;
+ }
+
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_NORESOURCES;
+
+ *p_entry_num = i;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+ &p_hwfn->p_dev->mf_bits))
+ return ECORE_SUCCESS;
+
+ high = p_filter[1] | (p_filter[0] << 8);
+ low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low,
+ &entry_num);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find an empty LLH filter to utilize\n");
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at %d\n",
+ p_filter[0], p_filter[1], p_filter[2], p_filter[3],
+ p_filter[4], p_filter[5], entry_num);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 high, u32 low,
+ u32 *p_entry_num)
+{
+ int i;
+
+ /* Find the entry and clean it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32), 0);
+ break;
+ }
+
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_INVAL;
+
+ *p_entry_num = i;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+ &p_hwfn->p_dev->mf_bits))
+ return;
+
+ high = p_filter[1] | (p_filter[0] << 8);
+ low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high,
+ low, &entry_num);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Tried to remove a non-configured filter\n");
+ return;
+ }
+
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from %d\n",
+ p_filter[0], p_filter[1], p_filter[2], p_filter[3],
+ p_filter[4], p_filter[5], entry_num);
+}
+
+static enum _ecore_status_t
+ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_llh_port_filter_type_t type,
+ u32 high, u32 low, u32 *p_entry_num)
+{
+ u32 en;
+ int i;
+
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32));
+ if (en)
+ continue;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32), low);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32), high);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32), 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32), 1 << type);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1);
+ break;
+ }
+
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_NORESOURCES;
+
+ *p_entry_num = i;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum ecore_llh_port_filter_type_t type)
+{
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+ &p_hwfn->p_dev->mf_bits))
+ return rc;
+
+ high = 0;
+ low = 0;
+
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ high = source_port_or_eth_type;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ low = source_port_or_eth_type << 16;
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ low = dest_port;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "Non valid LLH protocol filter type %d\n", type);
+ return ECORE_INVAL;
+ }
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
+ high, low, &entry_num);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find an empty LLH filter to utilize\n");
+ return rc;
+ }
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "ETH type %x is added at %d\n",
+ source_port_or_eth_type, entry_num);
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "TCP src port %x is added at %d\n",
+ source_port_or_eth_type, entry_num);
+ break;
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "UDP src port %x is added at %d\n",
+ source_port_or_eth_type, entry_num);
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "TCP dst port %x is added at %d\n", dest_port,
+ entry_num);
+ break;
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "UDP dst port %x is added at %d\n", dest_port,
+ entry_num);
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "TCP src/dst ports %x/%x are added at %d\n",
+ source_port_or_eth_type, dest_port, entry_num);
+ break;
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "UDP src/dst ports %x/%x are added at %d\n",
+ source_port_or_eth_type, dest_port, entry_num);
+ break;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_llh_port_filter_type_t type,
+ u32 high, u32 low, u32 *p_entry_num)
+{
+ int i;
+
+ /* Find the entry and clean it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32)))
+ continue;
+ if (!ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32)))
+ continue;
+ if (!(ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32)) & (1 << type)))
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32), 0);
+ break;
+ }
+
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_INVAL;
+
+ *p_entry_num = i;
+
+ return ECORE_SUCCESS;
+}
+
+void
+ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum ecore_llh_port_filter_type_t type)
+{
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+ &p_hwfn->p_dev->mf_bits))
+ return;
+
+ high = 0;
+ low = 0;
+
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ high = source_port_or_eth_type;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ low = source_port_or_eth_type << 16;
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ low = dest_port;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "Non valid LLH protocol filter type %d\n", type);
+ return;
+ }
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
+ high, low,
+ &entry_num);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n",
+ type, source_port_or_eth_type, dest_port);
+ return;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from %d\n",
+ type, source_port_or_eth_type, dest_port, entry_num);
+}
+
+static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32), 0);
+ }
+}
+
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+ &p_hwfn->p_dev->mf_bits) &&
+ !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+ &p_hwfn->p_dev->mf_bits))
+ return;
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt);
+}
+
+enum _ecore_status_t
+ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) {
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR,
+ 1 << p_hwfn->abs_pf_id / 2);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0);
+ return ECORE_SUCCESS;
+ }
+
+ DP_NOTICE(p_hwfn, false,
+ "This function can't be set as default\n");
+ return ECORE_INVAL;
+}
+
+static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr, void *p_eth_qzone,
+ osal_size_t eth_qzone_size,
+ u8 timeset)
+{
+ struct coalescing_timeset *p_coal_timeset;
+
+ if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
+ DP_NOTICE(p_hwfn, true,
+ "Coalescing configuration not enabled\n");
+ return ECORE_INVAL;
+ }
+
+ p_coal_timeset = p_eth_qzone;
+ OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
+ ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ void *p_handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_ptt *p_ptt;
+
+ /* TODO - Configuring a single queue's coalescing but
+ * claiming all queues are abiding same configuration
+ * for PF and VF both.
+ */
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal,
+ tx_coal, p_cid);
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ if (rx_coal) {
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc)
+ goto out;
+ p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
+ }
+
+ if (tx_coal) {
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
+ if (rc)
+ goto out;
+ p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
+ }
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ustorm_eth_queue_zone eth_qzone;
+ u8 timeset, timer_res;
+ u32 address;
+ enum _ecore_status_t rc;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return ECORE_INVAL;
+ }
+ timeset = (u8)(coalesce >> timer_res);
+
+ rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
+ p_cid->sb_igu_id, false);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+
+ rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct ustorm_eth_queue_zone), timeset);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+out:
+ return rc;
+}
+
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid)
+{
+ struct xstorm_eth_queue_zone eth_qzone;
+ u8 timeset, timer_res;
+ u32 address;
+ enum _ecore_status_t rc;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return ECORE_INVAL;
+ }
+
+ timeset = (u8)(coalesce >> timer_res);
+
+ rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
+ p_cid->sb_igu_id, true);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ address = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+
+ rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct xstorm_eth_queue_zone), timeset);
+out:
+ return rc;
+}
+
+/* Calculate final WFQ values for all vports and configure it.
+ * After this configuration each vport must have
+ * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
+ */
+static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+ vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) /
+ min_pf_rate;
+ ecore_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn)
+{
+ int i;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
+ p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+}
+
+static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ ecore_init_wfq_default_param(p_hwfn);
+ ecore_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+/* This function performs several validations for WFQ
+ * configuration and required min rate for a given vport
+ * 1. req_rate must be greater than one percent of min_pf_rate.
+ * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
+ * rates to get less than one percent of min_pf_rate.
+ * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
+ */
+static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
+ u16 vport_id, u32 req_rate,
+ u32 min_pf_rate)
+{
+ u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+ int non_requested_count = 0, req_count = 0, i, num_vports;
+
+ num_vports = p_hwfn->qm_info.num_vports;
+
+/* Accounting for the vports which are configured for WFQ explicitly */
+
+ for (i = 0; i < num_vports; i++) {
+ u32 tmp_speed;
+
+ if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) {
+ req_count++;
+ tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+ total_req_min_rate += tmp_speed;
+ }
+ }
+
+ /* Include current vport data as well */
+ req_count++;
+ total_req_min_rate += req_rate;
+ non_requested_count = num_vports - req_count;
+
+ /* validate possible error cases */
+ if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ vport_id, req_rate, min_pf_rate);
+ return ECORE_INVAL;
+ }
+
+ /* TBD - for number of vports greater than 100 */
+ if (num_vports > ECORE_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Number of vports is greater than %d\n",
+ ECORE_WFQ_UNIT);
+ return ECORE_INVAL;
+ }
+
+ if (total_req_min_rate > min_pf_rate) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+ total_req_min_rate, min_pf_rate);
+ return ECORE_INVAL;
+ }
+
+ /* Data left for non requested vports */
+ total_left_rate = min_pf_rate - total_req_min_rate;
+ left_rate_per_vp = total_left_rate / non_requested_count;
+
+ /* validate if non requested get < 1% of min bw */
+ if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ left_rate_per_vp, min_pf_rate);
+ return ECORE_INVAL;
+ }
+
+ /* now req_rate for given vport passes all scenarios.
+ * assign final wfq rates to all vports.
+ */
+ p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+ p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+ for (i = 0; i < num_vports; i++) {
+ if (p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 vp_id, u32 rate)
+{
+ struct ecore_mcp_link_state *p_link;
+ int rc = ECORE_SUCCESS;
+
+ p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
+
+ if (!p_link->min_pf_rate) {
+ p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
+ p_hwfn->qm_info.wfq_data[vp_id].configured = true;
+ return rc;
+ }
+
+ rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+ else
+ DP_NOTICE(p_hwfn, false,
+ "Validation failed while configuring min rate\n");
+
+ return rc;
+}
+
+static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ bool use_wfq = false;
+ int rc = ECORE_SUCCESS;
+ u16 i;
+
+ /* Validate all pre configured vports for wfq */
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 rate;
+
+ if (!p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+ use_wfq = true;
+
+ rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "WFQ validation failed while configuring min rate\n");
+ break;
+ }
+ }
+
+ if (rc == ECORE_SUCCESS && use_wfq)
+ ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+ else
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+/* Main API for ecore clients to configure vport min rate.
+ * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
+ * rate - Speed in Mbps needs to be assigned to a given vport.
+ */
+int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
+{
+ int i, rc = ECORE_INVAL;
+
+ /* TBD - for multiple hardware functions - that is 100 gig */
+ if (ECORE_IS_CMT(p_dev)) {
+ DP_NOTICE(p_dev, false,
+ "WFQ configuration is not supported for this device\n");
+ return rc;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_ptt *p_ptt;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_TIMEOUT;
+
+ rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
+
+ if (rc != ECORE_SUCCESS) {
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+ struct ecore_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ int i;
+
+ /* TBD - for multiple hardware functions - that is 100 gig */
+ if (ECORE_IS_CMT(p_dev)) {
+ DP_VERBOSE(p_dev, ECORE_MSG_LINK,
+ "WFQ configuration is not supported for this device\n");
+ return;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
+ min_pf_rate);
+ }
+}
+
+int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link,
+ u8 max_bw)
+{
+ int rc = ECORE_SUCCESS;
+
+ p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+ if (!p_link->line_speed && (max_bw != 100))
+ return rc;
+
+ p_link->speed = (p_link->line_speed * max_bw) / 100;
+ p_hwfn->qm_info.pf_rl = p_link->speed;
+
+ /* Since the limiter also affects Tx-switched traffic, we don't want it
+ * to limit such traffic in case there's no actual limit.
+ * In that case, set limit to imaginary high boundary.
+ */
+ if (max_bw == 100)
+ p_hwfn->qm_info.pf_rl = 100000;
+
+ rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_hwfn->qm_info.pf_rl);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Configured MAX bandwidth to be %08x Mb/sec\n",
+ p_link->speed);
+
+ return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw)
+{
+ int i, rc = ECORE_INVAL;
+
+ if (max_bw < 1 || max_bw > 100) {
+ DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_link_state *p_link;
+ struct ecore_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_TIMEOUT;
+
+ rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+ p_link, max_bw);
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ if (rc != ECORE_SUCCESS)
+ break;
+ }
+
+ return rc;
+}
+
+int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link,
+ u8 min_bw)
+{
+ int rc = ECORE_SUCCESS;
+
+ p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+ p_hwfn->qm_info.pf_wfq = min_bw;
+
+ if (!p_link->line_speed)
+ return rc;
+
+ p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+ rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Configured MIN bandwidth to be %d Mb/sec\n",
+ p_link->min_pf_rate);
+
+ return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw)
+{
+ int i, rc = ECORE_INVAL;
+
+ if (min_bw < 1 || min_bw > 100) {
+ DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_link_state *p_link;
+ struct ecore_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_TIMEOUT;
+
+ rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+ p_link, min_bw);
+ if (rc != ECORE_SUCCESS) {
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ if (p_link->min_pf_rate) {
+ u32 min_rate = p_link->min_pf_rate;
+
+ rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn,
+ p_ptt,
+ min_rate);
+ }
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_link_state *p_link;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+
+ if (p_link->min_pf_rate)
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
+
+ OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
+ sizeof(*p_hwfn->qm_info.wfq_data) *
+ p_hwfn->qm_info.num_vports);
+}
+
+int ecore_device_num_engines(struct ecore_dev *p_dev)
+{
+ return ECORE_IS_BB(p_dev) ? 2 : 1;
+}
+
+int ecore_device_num_ports(struct ecore_dev *p_dev)
+{
+ return p_dev->num_ports;
+}
+
+void ecore_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid,
+ __le16 *fw_lsb,
+ u8 *mac)
+{
+ ((u8 *)fw_msb)[0] = mac[1];
+ ((u8 *)fw_msb)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lsb)[0] = mac[5];
+ ((u8 *)fw_lsb)[1] = mac[4];
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_dev_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_dev_api.h
new file mode 100644
index 00000000..02bacc22
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_dev_api.h
@@ -0,0 +1,704 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_DEV_API_H__
+#define __ECORE_DEV_API_H__
+
+#include "ecore_status.h"
+#include "ecore_chain.h"
+#include "ecore_int_api.h"
+
+/**
+ * @brief ecore_init_dp - initialize the debug level
+ *
+ * @param p_dev
+ * @param dp_module
+ * @param dp_level
+ * @param dp_ctx
+ */
+void ecore_init_dp(struct ecore_dev *p_dev,
+ u32 dp_module,
+ u8 dp_level,
+ void *dp_ctx);
+
+/**
+ * @brief ecore_init_struct - initialize the device structure to
+ * its defaults
+ *
+ * @param p_dev
+ */
+enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_free -
+ *
+ * @param p_dev
+ */
+void ecore_resc_free(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_alloc -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_setup -
+ *
+ * @param p_dev
+ */
+void ecore_resc_setup(struct ecore_dev *p_dev);
+
+enum ecore_mfw_timeout_fallback {
+ ECORE_TO_FALLBACK_TO_NONE,
+ ECORE_TO_FALLBACK_TO_DEFAULT,
+ ECORE_TO_FALLBACK_FAIL_LOAD,
+};
+
+enum ecore_override_force_load {
+ ECORE_OVERRIDE_FORCE_LOAD_NONE,
+ ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
+ ECORE_OVERRIDE_FORCE_LOAD_NEVER,
+};
+
+struct ecore_drv_load_params {
+ /* Indicates whether the driver is running over a crash kernel.
+ * As part of the load request, this will be used for providing the
+ * driver role to the MFW.
+ * In case of a crash kernel over PDA - this should be set to false.
+ */
+ bool is_crash_kernel;
+
+ /* The timeout value that the MFW should use when locking the engine for
+ * the driver load process.
+ * A value of '0' means the default value, and '255' means no timeout.
+ */
+ u8 mfw_timeout_val;
+#define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0
+#define ECORE_LOAD_REQ_LOCK_TO_NONE 255
+
+ /* Action to take in case the MFW doesn't support timeout values other
+ * than default and none.
+ */
+ enum ecore_mfw_timeout_fallback mfw_timeout_fallback;
+
+ /* Avoid engine reset when first PF loads on it */
+ bool avoid_eng_reset;
+
+ /* Allow overriding the default force load behavior */
+ enum ecore_override_force_load override_force_load;
+};
+
+struct ecore_hw_init_params {
+ /* Tunneling parameters */
+ struct ecore_tunnel_info *p_tunn;
+
+ bool b_hw_start;
+
+ /* Interrupt mode [msix, inta, etc.] to use */
+ enum ecore_int_mode int_mode;
+
+ /* NPAR tx switching to be used for vports configured for tx-switching
+ */
+ bool allow_npar_tx_switch;
+
+ /* Binary fw data pointer in binary fw file */
+ const u8 *bin_fw_data;
+
+ /* Driver load parameters */
+ struct ecore_drv_load_params *p_drv_load_params;
+
+ /* SPQ block timeout in msec */
+ u32 spq_timeout_ms;
+};
+
+/**
+ * @brief ecore_hw_init -
+ *
+ * @param p_dev
+ * @param p_params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
+ struct ecore_hw_init_params *p_params);
+
+/**
+ * @brief ecore_hw_timers_stop_all -
+ *
+ * @param p_dev
+ *
+ * @return void
+ */
+void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_stop -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_stop_fastpath -should be called incase
+ * slowpath is still required for the device,
+ * but fastpath is not.
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
+
+#ifndef LINUX_REMOVE
+/**
+ * @brief ecore_prepare_hibernate -should be called when
+ * the system is going into the hibernate state
+ *
+ * @param p_dev
+ *
+ */
+void ecore_prepare_hibernate(struct ecore_dev *p_dev);
+
+enum ecore_db_rec_width {
+ DB_REC_WIDTH_32B,
+ DB_REC_WIDTH_64B,
+};
+
+enum ecore_db_rec_space {
+ DB_REC_KERNEL,
+ DB_REC_USER,
+};
+
+/**
+ * @brief db_recovery_add - add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @param p_dev
+ * @param db_addr - doorbell address
+ * @param db_data - address of where db_data is stored
+ * @param db_width - doorbell is 32b pr 64b
+ * @param db_space - doorbell recovery addresses are user or kernel space
+ */
+enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data,
+ enum ecore_db_rec_width db_width,
+ enum ecore_db_rec_space db_space);
+
+/**
+ * @brief db_recovery_del - remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address where db_data is stored. Serves as key for the
+ * entry to delete.
+ */
+enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data);
+
+static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn)
+{
+ return !!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits);
+}
+
+#endif
+
+/**
+ * @brief ecore_hw_start_fastpath -restart fastpath traffic,
+ * only if hw_stop_fastpath was called
+
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
+
+enum ecore_hw_prepare_result {
+ ECORE_HW_PREPARE_SUCCESS,
+
+ /* FAILED results indicate probe has failed & cleaned up */
+ ECORE_HW_PREPARE_FAILED_ENG2,
+ ECORE_HW_PREPARE_FAILED_ME,
+ ECORE_HW_PREPARE_FAILED_MEM,
+ ECORE_HW_PREPARE_FAILED_DEV,
+ ECORE_HW_PREPARE_FAILED_NVM,
+
+ /* BAD results indicate probe is passed even though some wrongness
+ * has occurred; Trying to actually use [I.e., hw_init()] might have
+ * dire reprecautions.
+ */
+ ECORE_HW_PREPARE_BAD_IOV,
+ ECORE_HW_PREPARE_BAD_MCP,
+ ECORE_HW_PREPARE_BAD_IGU,
+};
+
+struct ecore_hw_prepare_params {
+ /* Personality to initialize */
+ int personality;
+
+ /* Force the driver's default resource allocation */
+ bool drv_resc_alloc;
+
+ /* Check the reg_fifo after any register access */
+ bool chk_reg_fifo;
+
+ /* Request the MFW to initiate PF FLR */
+ bool initiate_pf_flr;
+
+ /* The OS Epoch time in seconds */
+ u32 epoch;
+
+ /* Allow the MFW to collect a crash dump */
+ bool allow_mdump;
+
+ /* Allow prepare to pass even if some initializations are failing.
+ * If set, the `p_prepare_res' field would be set with the return,
+ * and might allow probe to pass even if there are certain issues.
+ */
+ bool b_relaxed_probe;
+ enum ecore_hw_prepare_result p_relaxed_res;
+
+ /* Enable/disable request by ecore client for pacing */
+ bool b_en_pacing;
+};
+
+/**
+ * @brief ecore_hw_prepare -
+ *
+ * @param p_dev
+ * @param p_params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
+ struct ecore_hw_prepare_params *p_params);
+
+/**
+ * @brief ecore_hw_remove -
+ *
+ * @param p_dev
+ */
+void ecore_hw_remove(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_ptt_acquire - Allocate a PTT window
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function)
+ *
+ * @param p_hwfn
+ *
+ * @return struct ecore_ptt
+ */
+struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_release - Release PTT Window
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+struct ecore_eth_stats_common {
+ u64 no_buff_discards;
+ u64 packet_too_big_discard;
+ u64 ttl0_discard;
+ u64 rx_ucast_bytes;
+ u64 rx_mcast_bytes;
+ u64 rx_bcast_bytes;
+ u64 rx_ucast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_bcast_pkts;
+ u64 mftag_filter_discards;
+ u64 mac_filter_discards;
+ u64 tx_ucast_bytes;
+ u64 tx_mcast_bytes;
+ u64 tx_bcast_bytes;
+ u64 tx_ucast_pkts;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_err_drop_pkts;
+ u64 tpa_coalesced_pkts;
+ u64 tpa_coalesced_events;
+ u64 tpa_aborts_num;
+ u64 tpa_not_coalesced_pkts;
+ u64 tpa_coalesced_bytes;
+
+ /* port */
+ u64 rx_64_byte_packets;
+ u64 rx_65_to_127_byte_packets;
+ u64 rx_128_to_255_byte_packets;
+ u64 rx_256_to_511_byte_packets;
+ u64 rx_512_to_1023_byte_packets;
+ u64 rx_1024_to_1518_byte_packets;
+ u64 rx_crc_errors;
+ u64 rx_mac_crtl_frames;
+ u64 rx_pause_frames;
+ u64 rx_pfc_frames;
+ u64 rx_align_errors;
+ u64 rx_carrier_errors;
+ u64 rx_oversize_packets;
+ u64 rx_jabbers;
+ u64 rx_undersize_packets;
+ u64 rx_fragments;
+ u64 tx_64_byte_packets;
+ u64 tx_65_to_127_byte_packets;
+ u64 tx_128_to_255_byte_packets;
+ u64 tx_256_to_511_byte_packets;
+ u64 tx_512_to_1023_byte_packets;
+ u64 tx_1024_to_1518_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 rx_mac_bytes;
+ u64 rx_mac_uc_packets;
+ u64 rx_mac_mc_packets;
+ u64 rx_mac_bc_packets;
+ u64 rx_mac_frames_ok;
+ u64 tx_mac_bytes;
+ u64 tx_mac_uc_packets;
+ u64 tx_mac_mc_packets;
+ u64 tx_mac_bc_packets;
+ u64 tx_mac_ctrl_frames;
+ u64 link_change_count;
+};
+
+struct ecore_eth_stats_bb {
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+};
+
+struct ecore_eth_stats_ah {
+ u64 rx_1519_to_max_byte_packets;
+ u64 tx_1519_to_max_byte_packets;
+};
+
+struct ecore_eth_stats {
+ struct ecore_eth_stats_common common;
+ union {
+ struct ecore_eth_stats_bb bb;
+ struct ecore_eth_stats_ah ah;
+ };
+};
+
+enum ecore_dmae_address_type_t {
+ ECORE_DMAE_ADDRESS_HOST_VIRT,
+ ECORE_DMAE_ADDRESS_HOST_PHYS,
+ ECORE_DMAE_ADDRESS_GRC
+};
+
+/* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define ECORE_DMAE_FLAG_RW_REPL_SRC 0x00000001
+#define ECORE_DMAE_FLAG_VF_SRC 0x00000002
+#define ECORE_DMAE_FLAG_VF_DST 0x00000004
+#define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008
+
+struct ecore_dmae_params {
+ u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
+ u8 src_vfid;
+ u8 dst_vfid;
+};
+
+/**
+ * @brief ecore_dmae_host2grc - copy data from source addr to
+ * dmae registers using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param grc_addr (dmae_data_offset)
+ * @param size_in_dwords
+ * @param flags (one of the flags defined above)
+ */
+enum _ecore_status_t
+ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ u32 flags);
+
+/**
+ * @brief ecore_dmae_grc2host - Read data from dmae data offset
+ * to source address using the given ptt
+ *
+ * @param p_ptt
+ * @param grc_addr (dmae_data_offset)
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param flags - one of the flags defined above
+ */
+enum _ecore_status_t
+ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords,
+ u32 flags);
+
+/**
+ * @brief ecore_dmae_host2host - copy data from to source address
+ * to a destination address (for SRIOV) using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param params
+ */
+enum _ecore_status_t
+ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords,
+ struct ecore_dmae_params *p_params);
+
+/**
+ * @brief ecore_chain_alloc - Allocate and initialize a chain
+ *
+ * @param p_hwfn
+ * @param intended_use
+ * @param mode
+ * @param num_elems
+ * @param elem_size
+ * @param p_chain
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_chain_alloc(struct ecore_dev *p_dev,
+ enum ecore_chain_use_mode intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type,
+ u32 num_elems,
+ osal_size_t elem_size,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl);
+
+/**
+ * @brief ecore_chain_free - Free chain DMA memory
+ *
+ * @param p_hwfn
+ * @param p_chain
+ */
+void ecore_chain_free(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain);
+
+/**
+ * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
+ u16 src_id,
+ u16 *dst_id);
+
+/**
+ * @@brief ecore_fw_vport - Get absolute vport ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to add
+ */
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 *p_filter);
+
+/**
+ * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to remove
+ */
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 *p_filter);
+
+enum ecore_llh_port_filter_type_t {
+ ECORE_LLH_FILTER_ETHERTYPE,
+ ECORE_LLH_FILTER_TCP_SRC_PORT,
+ ECORE_LLH_FILTER_TCP_DEST_PORT,
+ ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
+ ECORE_LLH_FILTER_UDP_SRC_PORT,
+ ECORE_LLH_FILTER_UDP_DEST_PORT,
+ ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT
+};
+
+/**
+ * @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_port_or_eth_type - source port or ethertype to add
+ * @param dest_port - destination port to add
+ * @param type - type of filters and comparing
+ */
+enum _ecore_status_t
+ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum ecore_llh_port_filter_type_t type);
+
+/**
+ * @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_port_or_eth_type - source port or ethertype to add
+ * @param dest_port - destination port to add
+ * @param type - type of filters and comparing
+ */
+void
+ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum ecore_llh_port_filter_type_t type);
+
+/**
+ * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_llh_set_function_as_default - set function as default per port
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t
+ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ *@brief Cleanup of previous driver remains prior to load
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ * @param is_vf - true iff cleanup is made for a VF.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 id,
+ bool is_vf);
+
+/**
+ * @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue.
+ *
+ * @param p_hwfn
+ * @param p_coal - store coalesce value read from the hardware.
+ * @param p_handle
+ *
+ * @return enum _ecore_status_t
+ **/
+enum _ecore_status_t
+ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal,
+ void *handle);
+
+/**
+ * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
+ * Tx queue. The fact that we can configure coalescing to up to 511, but on
+ * varying accuracy [the bigger the value the less accurate] up to a mistake
+ * of 3usec for the highest values.
+ * While the API allows setting coalescing per-qid, all queues sharing a SB
+ * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
+ *
+ * @param p_hwfn
+ * @param rx_coal - Rx Coalesce value in micro seconds.
+ * @param tx_coal - TX Coalesce value in micro seconds.
+ * @param p_handle
+ *
+ * @return enum _ecore_status_t
+ **/
+enum _ecore_status_t
+ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
+ u16 tx_coal, void *p_handle);
+
+/**
+ * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_enable - true/false
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_enable);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_reg_addr.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_reg_addr.h
new file mode 100644
index 00000000..8c8fed4e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_reg_addr.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef GTT_REG_ADDR_H
+#define GTT_REG_ADDR_H
+
+/* Win 2 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
+
+/* Win 3 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
+
+/* Win 4 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
+
+/* Win 5 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
+
+/* Win 6 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
+
+/* Win 7 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
+
+/* Win 8 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
+
+/* Win 9 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
+
+/* Win 10 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
+
+/* Win 11 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_values.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_values.h
new file mode 100644
index 00000000..adc20c0c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_values.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __PREVENT_PXP_GLOBAL_WIN__
+
+static u32 pxp_global_win[] = {
+ 0,
+ 0,
+ 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
+ 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
+ 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
+ 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
+ 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
+ 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
+ 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
+ 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
+ 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
+ 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+#endif /* __PREVENT_PXP_GLOBAL_WIN__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_common.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_common.h
new file mode 100644
index 00000000..2d761b97
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_common.h
@@ -0,0 +1,2467 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_HSI_COMMON__
+#define __ECORE_HSI_COMMON__
+/********************************/
+/* Add include to common target */
+/********************************/
+#include "common_hsi.h"
+
+
+/*
+ * opcodes for the event ring
+ */
+enum common_event_opcode {
+ COMMON_EVENT_PF_START,
+ COMMON_EVENT_PF_STOP,
+ COMMON_EVENT_VF_START,
+ COMMON_EVENT_VF_STOP,
+ COMMON_EVENT_VF_PF_CHANNEL,
+ COMMON_EVENT_VF_FLR,
+ COMMON_EVENT_PF_UPDATE,
+ COMMON_EVENT_MALICIOUS_VF,
+ COMMON_EVENT_RL_UPDATE,
+ COMMON_EVENT_EMPTY,
+ MAX_COMMON_EVENT_OPCODE
+};
+
+
+/*
+ * Common Ramrod Command IDs
+ */
+enum common_ramrod_cmd_id {
+ COMMON_RAMROD_UNUSED,
+ COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
+ COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
+ COMMON_RAMROD_VF_START /* VF Function Start */,
+ COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */,
+ COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */,
+ COMMON_RAMROD_RL_UPDATE /* QCN/DCQCN RL update Ramrod */,
+ COMMON_RAMROD_EMPTY /* Empty Ramrod */,
+ MAX_COMMON_RAMROD_CMD_ID
+};
+
+
+/*
+ * The core storm context for the Ystorm
+ */
+struct ystorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/*
+ * The core storm context for the Pstorm
+ */
+struct pstorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/*
+ * Core Slowpath Connection storm context of Xstorm
+ */
+struct xstorm_core_conn_st_ctx {
+ __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
+ __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
+/* Consolidation Ring Base Address */
+ struct regpair consolid_base_addr;
+ __le16 spq_cons /* SPQ Ring Consumer */;
+ __le16 consolid_cons /* Consolidation Ring Consumer */;
+ __le32 reserved0[55] /* Pad to 15 cycles */;
+};
+
+struct e4_xstorm_core_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 core_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+/* bit6 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+/* bit7 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+/* bit8 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+/* bit9 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+/* bit12 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+/* bit13 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+/* bit14 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+/* timer1cf */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+/* timer2cf */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+/* cf10 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+/* cf11 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+/* cf12 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+/* cf13 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+/* cf14 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+/* cf15 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+/* cf16 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+/* cf_array_cf */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+/* cf18 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+/* cf19 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+/* cf20 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+/* cf21 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+/* cf22 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+/* cf1en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+/* cf3en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+/* cf4en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+/* cf5en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+/* cf6en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+/* cf7en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+/* cf8en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+/* cf9en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+/* cf11en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+/* cf12en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+/* cf13en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+/* cf14en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+/* cf15en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+/* cf16en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+/* rule0en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+/* bit17 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+/* bit18 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+/* bit19 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+/* bit20 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+/* bit21 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+/* cf23 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2 /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 consolid_prod /* physical_q1 */;
+ __le16 reserved16 /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_or_spq_prod /* word4 */;
+ __le16 updated_qm_pq_id /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 e5_reserved /* e5_reserved */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+struct e4_tstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct e4_ustorm_core_conn_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 word1 /* word1 */;
+ __le32 rx_producers /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+};
+
+/*
+ * The core storm context for the Mstorm
+ */
+struct mstorm_core_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/*
+ * The core storm context for the Ustorm
+ */
+struct ustorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/*
+ * core connection context
+ */
+struct e4_core_conn_context {
+/* ystorm storm context */
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2] /* padding */;
+/* pstorm storm context */
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2] /* padding */;
+/* xstorm storm context */
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+/* xstorm aggregative context */
+ struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
+/* tstorm aggregative context */
+ struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
+/* ustorm aggregative context */
+ struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
+/* mstorm storm context */
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+/* ustorm storm context */
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2] /* padding */;
+};
+
+
+/*
+ * How ll2 should deal with packet upon errors
+ */
+enum core_error_handle {
+ LL2_DROP_PACKET /* If error occurs drop packet */,
+ LL2_DO_NOTHING /* If error occurs do nothing */,
+ LL2_ASSERT /* If error occurs assert */,
+ MAX_CORE_ERROR_HANDLE
+};
+
+
+/*
+ * opcodes for the event ring
+ */
+enum core_event_opcode {
+ CORE_EVENT_TX_QUEUE_START,
+ CORE_EVENT_TX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_START,
+ CORE_EVENT_RX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_FLUSH,
+ CORE_EVENT_TX_QUEUE_UPDATE,
+ MAX_CORE_EVENT_OPCODE
+};
+
+
+/*
+ * The L4 pseudo checksum mode for Core
+ */
+enum core_l4_pseudo_checksum_mode {
+/* Pseudo Checksum on packet is calculated with the correct packet length. */
+ CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+/* Pseudo Checksum on packet is calculated with zero length. */
+ CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
+ MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
+};
+
+
+/*
+ * Light-L2 RX Producers in Tstorm RAM
+ */
+struct core_ll2_port_stats {
+ struct regpair gsi_invalid_hdr;
+ struct regpair gsi_invalid_pkt_length;
+ struct regpair gsi_unsupported_pkt_typ;
+ struct regpair gsi_crcchksm_error;
+};
+
+
+/*
+ * Ethernet TX Per Queue Stats
+ */
+struct core_ll2_pstorm_per_queue_stat {
+/* number of total bytes sent without errors */
+ struct regpair sent_ucast_bytes;
+/* number of total bytes sent without errors */
+ struct regpair sent_mcast_bytes;
+/* number of total bytes sent without errors */
+ struct regpair sent_bcast_bytes;
+/* number of total packets sent without errors */
+ struct regpair sent_ucast_pkts;
+/* number of total packets sent without errors */
+ struct regpair sent_mcast_pkts;
+/* number of total packets sent without errors */
+ struct regpair sent_bcast_pkts;
+};
+
+
+/*
+ * Light-L2 RX Producers in Tstorm RAM
+ */
+struct core_ll2_rx_prod {
+ __le16 bd_prod /* BD Producer */;
+ __le16 cqe_prod /* CQE Producer */;
+ __le32 reserved;
+};
+
+
+struct core_ll2_tstorm_per_queue_stat {
+/* Number of packets discarded because they are bigger than MTU */
+ struct regpair packet_too_big_discard;
+/* Number of packets discarded due to lack of host buffers */
+ struct regpair no_buff_discard;
+};
+
+
+struct core_ll2_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+
+/*
+ * Core Ramrod Command IDs (light L2)
+ */
+enum core_ramrod_cmd_id {
+ CORE_RAMROD_UNUSED,
+ CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+ CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+ CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+ CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
+ CORE_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
+ MAX_CORE_RAMROD_CMD_ID
+};
+
+
+/*
+ * Core RX CQE Type for Light L2
+ */
+enum core_roce_flavor_type {
+ CORE_ROCE,
+ CORE_RROCE,
+ MAX_CORE_ROCE_FLAVOR_TYPE
+};
+
+
+/*
+ * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
+ */
+struct core_rx_action_on_error {
+ u8 error_type;
+/* ll2 how to handle error packet_too_big (use enum core_error_handle) */
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+/* ll2 how to handle error with no_buff (use enum core_error_handle) */
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+};
+
+
+/*
+ * Core RX BD for Light L2
+ */
+struct core_rx_bd {
+ struct regpair addr;
+ __le16 reserved[4];
+};
+
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+struct core_rx_bd_with_buff_len {
+ struct regpair addr;
+ __le16 buff_length;
+ __le16 reserved[3];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+union core_rx_bd_union {
+ struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
+/* Core Rx Bd with dynamic buffer length */
+ struct core_rx_bd_with_buff_len rx_bd_with_len;
+};
+
+
+
+/*
+ * Opaque Data for Light L2 RX CQE .
+ */
+struct core_rx_cqe_opaque_data {
+ __le32 data[2] /* Opaque CQE Data */;
+};
+
+
+/*
+ * Core RX CQE Type for Light L2
+ */
+enum core_rx_cqe_type {
+ CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */,
+ CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */,
+ CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */,
+ CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */,
+ MAX_CORE_RX_CQE_TYPE
+};
+
+
+/*
+ * Core RX CQE for Light L2 .
+ */
+struct core_rx_fast_path_cqe {
+ u8 type /* CQE type */;
+/* Offset (in bytes) of the packet from start of the buffer */
+ u8 placement_offset;
+/* Parsing and error flags from the parser */
+ struct parsing_and_err_flags parse_flags;
+ __le16 packet_length /* Total packet length (from the parser) */;
+ __le16 vlan /* 802.1q VLAN tag */;
+ struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
+/* bit- map: each bit represents a specific error. errors indications are
+ * provided by the cracker. see spec for detailed description
+ */
+ struct parsing_err_flags err_flags;
+ __le16 reserved0;
+ __le32 reserved1[3];
+};
+
+/*
+ * Core Rx CM offload CQE .
+ */
+struct core_rx_gsi_offload_cqe {
+ u8 type /* CQE type */;
+ u8 data_length_error /* set if gsi data is bigger than buff */;
+/* Parsing and error flags from the parser */
+ struct parsing_and_err_flags parse_flags;
+ __le16 data_length /* Total packet length (from the parser) */;
+ __le16 vlan /* 802.1q VLAN tag */;
+ __le32 src_mac_addrhi /* hi 4 bytes source mac address */;
+ __le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
+/* These are the lower 16 bit of QP id in RoCE BTH header */
+ __le16 qp_id;
+ __le32 src_qp /* Source QP from DETH header */;
+ __le32 reserved[3];
+};
+
+/*
+ * Core RX CQE for Light L2 .
+ */
+struct core_rx_slow_path_cqe {
+ u8 type /* CQE type */;
+ u8 ramrod_cmd_id;
+ __le16 echo;
+ struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
+ __le32 reserved1[5];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+union core_rx_cqe_union {
+ struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */;
+ struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */;
+ struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
+};
+
+
+
+
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct core_rx_start_ramrod_data {
+ struct regpair bd_base /* bd address of the first bd page */;
+ struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
+ __le16 mtu /* Maximum transmission unit */;
+ __le16 sb_id /* Status block ID */;
+ u8 sb_index /* index of the protocol index */;
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
+ __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
+/* if set, 802.1q tags will be removed and copied to CQE */
+/* if set, 802.1q tags will be removed and copied to CQE */
+ u8 inner_vlan_stripping_en;
+/* if set and inner vlan does not exist, the outer vlan will copied to CQE as
+ * inner vlan. should be used in MF_OVLAN mode only.
+ */
+ u8 report_outer_vlan;
+ u8 queue_id /* Light L2 RX Queue ID */;
+ u8 main_func_queue /* Is this the main queue for the PF */;
+/* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
+ * main_func_queue is set.
+ */
+ u8 mf_si_bcast_accept_all;
+/* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if
+ * main_func_queue is set.
+ */
+ u8 mf_si_mcast_accept_all;
+/* Specifies how ll2 should deal with packets errors: packet_too_big and
+ * no_buff
+ */
+ struct core_rx_action_on_error action_on_error;
+/* set when in GSI offload mode on ROCE connection */
+ u8 gsi_offload_flag;
+ u8 reserved[6];
+};
+
+
+/*
+ * Ramrod data for rx queue stop ramrod
+ */
+struct core_rx_stop_ramrod_data {
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 queue_id /* Light L2 RX Queue ID */;
+ u8 reserved1;
+ __le16 reserved2[2];
+};
+
+
+/*
+ * Flags for Core TX BD
+ */
+struct core_tx_bd_data {
+ __le16 as_bitfield;
+/* Do not allow additional VLAN manipulations on this packet (DCB) */
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
+/* Insert VLAN into packet. Cannot be set for LB packets
+ * (tx_dst == CORE_TX_DEST_LB)
+ */
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
+/* This is the first BD of the packet (for debug) */
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
+/* Calculate the IP checksum for the packet */
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
+/* Calculate the L4 checksum for the packet */
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
+/* Packet is IPv6 with extensions */
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
+/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
+ * 0-TCP, 1-UDP
+ */
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
+/* The pseudo checksum mode to place in the L4 checksum field. Required only
+ * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
+ */
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+/* Number of BDs that make up one packet - width wide enough to present
+ * CORE_LL2_TX_MAX_BDS_PER_PACKET
+ */
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+/* Use roce_flavor enum - Differentiate between Roce flavors is valid when
+ * connType is ROCE (use enum core_roce_flavor_type)
+ */
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+/* Calculate ip length */
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+/* disables the STAG insertion, relevant only in MF OVLAN mode. */
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15
+};
+
+/*
+ * Core TX BD for Light L2
+ */
+struct core_tx_bd {
+ struct regpair addr /* Buffer Address */;
+ __le16 nbytes /* Number of Bytes in Buffer */;
+/* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack
+ * packets: echo data to pass to Rx
+ */
+ __le16 nw_vlan_or_lb_echo;
+ struct core_tx_bd_data bd_data /* BD Flags */;
+ __le16 bitfield1;
+/* L4 Header Offset from start of packet (in Words). This is needed if both
+ * l4_csum and ipv6_ext are set
+ */
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
+/* Packet destination - Network, Loopback or Drop (use enum core_tx_dest) */
+#define CORE_TX_BD_TX_DST_MASK 0x3
+#define CORE_TX_BD_TX_DST_SHIFT 14
+};
+
+
+
+/*
+ * Light L2 TX Destination
+ */
+enum core_tx_dest {
+ CORE_TX_DEST_NW /* TX Destination to the Network */,
+ CORE_TX_DEST_LB /* TX Destination to the Loopback */,
+ CORE_TX_DEST_RESERVED,
+ CORE_TX_DEST_DROP /* TX Drop */,
+ MAX_CORE_TX_DEST
+};
+
+
+/*
+ * Ramrod data for tx queue start ramrod
+ */
+struct core_tx_start_ramrod_data {
+ struct regpair pbl_base_addr /* Address of the pbl page */;
+ __le16 mtu /* Maximum transmission unit */;
+ __le16 sb_id /* Status block ID */;
+ u8 sb_index /* Status block protocol index */;
+ u8 stats_en /* Statistics Enable */;
+ u8 stats_id /* Statistics Counter ID */;
+ u8 conn_type /* connection type that loaded ll2 */;
+ __le16 pbl_size /* Number of BD pages pointed by PBL */;
+ __le16 qm_pq_id /* QM PQ ID */;
+/* set when in GSI offload mode on ROCE connection */
+ u8 gsi_offload_flag;
+ u8 resrved[3];
+};
+
+
+/*
+ * Ramrod data for tx queue stop ramrod
+ */
+struct core_tx_stop_ramrod_data {
+ __le32 reserved0[2];
+};
+
+
+/*
+ * Ramrod data for tx queue update ramrod
+ */
+struct core_tx_update_ramrod_data {
+ u8 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
+ u8 reserved0;
+ __le16 qm_pq_id /* Updated QM PQ ID */;
+ __le32 reserved1[1];
+};
+
+
+/*
+ * Enum flag for what type of dcb data to update
+ */
+enum dcb_dscp_update_mode {
+/* use when no change should be done to DCB data */
+ DONT_UPDATE_DCB_DSCP,
+ UPDATE_DCB /* use to update only L2 (vlan) priority */,
+ UPDATE_DSCP /* use to update only IP DSCP */,
+ UPDATE_DCB_DSCP /* update vlan pri and DSCP */,
+ MAX_DCB_DSCP_UPDATE_FLAG
+};
+
+
+struct eth_mstorm_per_pf_stat {
+ struct regpair gre_discard_pkts /* Dropped GRE RX packets */;
+ struct regpair vxlan_discard_pkts /* Dropped VXLAN RX packets */;
+ struct regpair geneve_discard_pkts /* Dropped GENEVE RX packets */;
+ struct regpair lb_discard_pkts /* Dropped Tx switched packets */;
+};
+
+
+struct eth_mstorm_per_queue_stat {
+/* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (IPv6) */
+ struct regpair ttl0_discard;
+/* Number of packets discarded because they are bigger than MTU */
+ struct regpair packet_too_big_discard;
+/* Number of packets discarded due to lack of host buffers (BDs/SGEs/CQEs) */
+ struct regpair no_buff_discard;
+/* Number of packets discarded because of no active Rx connection */
+ struct regpair not_active_discard;
+/* number of coalesced packets in all TPA aggregations */
+ struct regpair tpa_coalesced_pkts;
+/* total number of TPA aggregations */
+ struct regpair tpa_coalesced_events;
+/* number of aggregations, which abnormally ended */
+ struct regpair tpa_aborts_num;
+/* total TCP payload length in all TPA aggregations */
+ struct regpair tpa_coalesced_bytes;
+};
+
+
+/*
+ * Ethernet TX Per PF
+ */
+struct eth_pstorm_per_pf_stat {
+/* number of total ucast bytes sent on loopback port without errors */
+ struct regpair sent_lb_ucast_bytes;
+/* number of total mcast bytes sent on loopback port without errors */
+ struct regpair sent_lb_mcast_bytes;
+/* number of total bcast bytes sent on loopback port without errors */
+ struct regpair sent_lb_bcast_bytes;
+/* number of total ucast packets sent on loopback port without errors */
+ struct regpair sent_lb_ucast_pkts;
+/* number of total mcast packets sent on loopback port without errors */
+ struct regpair sent_lb_mcast_pkts;
+/* number of total bcast packets sent on loopback port without errors */
+ struct regpair sent_lb_bcast_pkts;
+ struct regpair sent_gre_bytes /* Sent GRE bytes */;
+ struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */;
+ struct regpair sent_geneve_bytes /* Sent GENEVE bytes */;
+ struct regpair sent_gre_pkts /* Sent GRE packets */;
+ struct regpair sent_vxlan_pkts /* Sent VXLAN packets */;
+ struct regpair sent_geneve_pkts /* Sent GENEVE packets */;
+ struct regpair gre_drop_pkts /* Dropped GRE TX packets */;
+ struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */;
+ struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
+};
+
+
+/*
+ * Ethernet TX Per Queue Stats
+ */
+struct eth_pstorm_per_queue_stat {
+/* number of total bytes sent without errors */
+ struct regpair sent_ucast_bytes;
+/* number of total bytes sent without errors */
+ struct regpair sent_mcast_bytes;
+/* number of total bytes sent without errors */
+ struct regpair sent_bcast_bytes;
+/* number of total packets sent without errors */
+ struct regpair sent_ucast_pkts;
+/* number of total packets sent without errors */
+ struct regpair sent_mcast_pkts;
+/* number of total packets sent without errors */
+ struct regpair sent_bcast_pkts;
+/* number of total packets dropped due to errors */
+ struct regpair error_drop_pkts;
+};
+
+
+/*
+ * ETH Rx producers data
+ */
+struct eth_rx_rate_limit {
+/* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */
+ __le16 mult;
+/* Constant term to add (or subtract from number of cycles) */
+ __le16 cnst;
+ u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+
+struct eth_ustorm_per_pf_stat {
+/* number of total ucast bytes received on loopback port without errors */
+ struct regpair rcv_lb_ucast_bytes;
+/* number of total mcast bytes received on loopback port without errors */
+ struct regpair rcv_lb_mcast_bytes;
+/* number of total bcast bytes received on loopback port without errors */
+ struct regpair rcv_lb_bcast_bytes;
+/* number of total ucast packets received on loopback port without errors */
+ struct regpair rcv_lb_ucast_pkts;
+/* number of total mcast packets received on loopback port without errors */
+ struct regpair rcv_lb_mcast_pkts;
+/* number of total bcast packets received on loopback port without errors */
+ struct regpair rcv_lb_bcast_pkts;
+ struct regpair rcv_gre_bytes /* Received GRE bytes */;
+ struct regpair rcv_vxlan_bytes /* Received VXLAN bytes */;
+ struct regpair rcv_geneve_bytes /* Received GENEVE bytes */;
+ struct regpair rcv_gre_pkts /* Received GRE packets */;
+ struct regpair rcv_vxlan_pkts /* Received VXLAN packets */;
+ struct regpair rcv_geneve_pkts /* Received GENEVE packets */;
+};
+
+
+struct eth_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+
+/*
+ * Event Ring VF-PF Channel data
+ */
+struct vf_pf_channel_eqe_data {
+ struct regpair msg_addr /* VF-PF message address */;
+};
+
+/*
+ * Event Ring malicious VF data
+ */
+struct malicious_vf_eqe_data {
+ u8 vf_id /* Malicious VF ID */;
+ u8 err_id /* Malicious VF error (use enum malicious_vf_error_id) */;
+ __le16 reserved[3];
+};
+
+/*
+ * Event Ring initial cleanup data
+ */
+struct initial_cleanup_eqe_data {
+ u8 vf_id /* VF ID */;
+ u8 reserved[7];
+};
+
+/*
+ * Event Data Union
+ */
+union event_ring_data {
+ u8 bytes[8] /* Byte Array */;
+ struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
+ struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
+/* Dedicated fields to iscsi connect done results */
+ struct iscsi_connect_done_results iscsi_conn_done_info;
+ struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
+/* VF Initial Cleanup data */
+ struct initial_cleanup_eqe_data vf_init_cleanup;
+};
+
+
+/*
+ * Event Ring Entry
+ */
+struct event_ring_entry {
+ u8 protocol_id /* Event Protocol ID (use enum protocol_type) */;
+ u8 opcode /* Event Opcode */;
+ __le16 reserved0 /* Reserved */;
+ __le16 echo /* Echo value from ramrod data on the host */;
+ u8 fw_return_code /* FW return code for SP ramrods */;
+ u8 flags;
+/* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
+#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
+#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+ union event_ring_data data;
+};
+
+/*
+ * Event Ring Next Page Address
+ */
+struct event_ring_next_addr {
+ struct regpair addr /* Next Page Address */;
+ __le32 reserved[2] /* Reserved */;
+};
+
+/*
+ * Event Ring Element
+ */
+union event_ring_element {
+ struct event_ring_entry entry /* Event Ring Entry */;
+/* Event Ring Next Page Address */
+ struct event_ring_next_addr next_addr;
+};
+
+
+
+/*
+ * Ports mode
+ */
+enum fw_flow_ctrl_mode {
+ flow_ctrl_pause,
+ flow_ctrl_pfc,
+ MAX_FW_FLOW_CTRL_MODE
+};
+
+
+/*
+ * GFT profile type.
+ */
+enum gft_profile_type {
+/* tunnel type, inner 4 tuple, IP type and L4 type match. */
+ GFT_PROFILE_TYPE_4_TUPLE,
+/* tunnel type, inner L4 destination port, IP type and L4 type match. */
+ GFT_PROFILE_TYPE_L4_DST_PORT,
+/* tunnel type, inner IP destination address and IP type match. */
+ GFT_PROFILE_TYPE_IP_DST_ADDR,
+/* tunnel type, inner IP source address and IP type match. */
+ GFT_PROFILE_TYPE_IP_SRC_ADDR,
+ GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */,
+ MAX_GFT_PROFILE_TYPE
+};
+
+
+/*
+ * Major and Minor hsi Versions
+ */
+struct hsi_fp_ver_struct {
+ u8 minor_ver_arr[2] /* Minor Version of hsi loading pf */;
+ u8 major_ver_arr[2] /* Major Version of driver loading pf */;
+};
+
+
+/*
+ * Integration Phase
+ */
+enum integ_phase {
+ INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */,
+ INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */,
+ INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */,
+ MAX_INTEG_PHASE
+};
+
+
+/*
+ * Ports mode
+ */
+enum iwarp_ll2_tx_queues {
+/* LL2 queue for OOO packets sent in-order by the driver */
+ IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
+/* LL2 queue for unaligned packets sent aligned by the driver */
+ IWARP_LL2_ALIGNED_TX_QUEUE,
+/* LL2 queue for unaligned packets sent aligned and was right-trimmed by the
+ * driver
+ */
+ IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
+ IWARP_LL2_ERROR /* Error indication */,
+ MAX_IWARP_LL2_TX_QUEUES
+};
+
+
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+ MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
+/* Writing to VF/PF channel when it is not ready */
+ VF_PF_CHANNEL_NOT_READY,
+ VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
+ VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
+/* TX packet is shorter then reported on BDs or from minimal size */
+ ETH_PACKET_TOO_SMALL,
+/* Tx packet with marked as insert VLAN when its illegal */
+ ETH_ILLEGAL_VLAN_MODE,
+ ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
+/* TX packet has illegal inband tags marked */
+ ETH_ILLEGAL_INBAND_TAGS,
+/* Vlan cant be added to inband tag */
+ ETH_VLAN_INSERT_AND_INBAND_VLAN,
+/* indicated number of BDs for the packet is illegal */
+ ETH_ILLEGAL_NBDS,
+ ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
+/* There are not enough BDs for transmission of even one packet */
+ ETH_INSUFFICIENT_BDS,
+ ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
+ ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
+/* empty BD (which not contains control flags) is illegal */
+ ETH_ZERO_SIZE_BD,
+ ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit */,
+/* In LSO its expected that on the local BD ring there will be at least MSS
+ * bytes of data
+ */
+ ETH_INSUFFICIENT_PAYLOAD,
+ ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
+/* Tunneled packet with IPv6+Ext without a proper number of BDs */
+ ETH_TUNN_IPV6_EXT_NBD_ERR,
+ ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
+ ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
+
+
+
+/*
+ * Mstorm non-triggering VF zone
+ */
+struct mstorm_non_trigger_vf_zone {
+/* VF statistic bucket */
+ struct eth_mstorm_per_queue_stat eth_queue_stat;
+/* VF RX queues producers */
+ struct eth_rx_prod_data
+ eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
+};
+
+
+/*
+ * Mstorm VF zone
+ */
+struct mstorm_vf_zone {
+/* non-interrupt-triggering zone */
+ struct mstorm_non_trigger_vf_zone non_trigger;
+};
+
+
+/*
+ * vlan header including TPID and TCI fields
+ */
+struct vlan_header {
+ __le16 tpid /* Tag Protocol Identifier */;
+ __le16 tci /* Tag Control Information */;
+};
+
+/*
+ * outer tag configurations
+ */
+struct outer_tag_config_struct {
+/* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette
+ * Davis, UFP with Host Control mode, and UFP with DCB over base interface.
+ * else - 0.
+ */
+ u8 enable_stag_pri_change;
+/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
+ u8 pri_map_valid;
+ u8 reserved[2];
+/* In case mf_mode is MF_OVLAN, this field specifies the outer tag protocol
+ * identifier and outer tag control information
+ */
+ struct vlan_header outer_tag;
+/* Map from inner to outer priority. Set pri_map_valid when init map */
+ u8 inner_to_outer_pri_map[8];
+};
+
+
+/*
+ * personality per PF
+ */
+enum personality_type {
+ BAD_PERSONALITY_TYP,
+ PERSONALITY_ISCSI /* iSCSI and LL2 */,
+ PERSONALITY_FCOE /* Fcoe and LL2 */,
+ PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */,
+ PERSONALITY_RDMA /* Roce and LL2 */,
+ PERSONALITY_CORE /* CORE(LL2) */,
+ PERSONALITY_ETH /* Ethernet */,
+ PERSONALITY_TOE /* Toe and LL2 */,
+ MAX_PERSONALITY_TYPE
+};
+
+
+/*
+ * tunnel configuration
+ */
+struct pf_start_tunnel_config {
+/* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set -
+ * FW will use a default port
+ */
+ u8 set_vxlan_udp_port_flg;
+/* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set -
+ * FW will use a default port
+ */
+ u8 set_geneve_udp_port_flg;
+ u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. */;
+/* Rx classification scheme for l2 GENEVE tunnel. */
+ u8 tunnel_clss_l2geneve;
+/* Rx classification scheme for ip GENEVE tunnel. */
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. */;
+ u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. */;
+ u8 reserved;
+/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
+ __le16 vxlan_udp_port;
+/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
+ __le16 geneve_udp_port;
+};
+
+/*
+ * Ramrod data for PF start ramrod
+ */
+struct pf_start_ramrod_data {
+ struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
+/* PBL address of consolidation queue */
+ struct regpair consolid_q_pbl_addr;
+/* tunnel configuration. */
+ struct pf_start_tunnel_config tunnel_config;
+ __le16 event_ring_sb_id /* Status block ID */;
+/* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
+ u8 base_vf_id;
+ u8 num_vfs /* Amount of vfs owned by PF */;
+ u8 event_ring_num_pages /* Number of PBL pages in event ring */;
+ u8 event_ring_sb_index /* Status block index */;
+ u8 path_id /* HW path ID (engine ID) */;
+ u8 warning_as_error /* In FW asserts, treat warning as error */;
+/* If not set - throw a warning for each ramrod (for debug) */
+ u8 dont_log_ramrods;
+ u8 personality /* define what type of personality is new PF */;
+/* Log type mask. Each bit set enables a corresponding event type logging.
+ * Event types are defined as ASSERT_LOG_TYPE_xxx
+ */
+ __le16 log_type_mask;
+ u8 mf_mode /* Multi function mode */;
+ u8 integ_phase /* Integration phase */;
+/* If set, inter-pf tx switching is allowed in Switch Independent func mode */
+ u8 allow_npar_tx_switching;
+ u8 reserved0;
+/* FP HSI version to be used by FW */
+ struct hsi_fp_ver_struct hsi_fp_ver;
+/* Outer tag configurations */
+ struct outer_tag_config_struct outer_tag_config;
+};
+
+
+
+/*
+ * Per protocol DCB data
+ */
+struct protocol_dcb_data {
+ u8 dcb_enable_flag /* Enable DCB */;
+ u8 dscp_enable_flag /* Enable updating DSCP value */;
+ u8 dcb_priority /* DCB priority */;
+ u8 dcb_tc /* DCB TC */;
+ u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */;
+/* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged
+ * frames
+ */
+ u8 dcb_dont_add_vlan0;
+};
+
+/*
+ * Update tunnel configuration
+ */
+struct pf_update_tunnel_config {
+/* Update RX per PF tunnel classification scheme. */
+ u8 update_rx_pf_clss;
+/* Update per PORT default tunnel RX classification scheme for traffic with
+ * unknown unicast outer MAC in NPAR mode.
+ */
+ u8 update_rx_def_ucast_clss;
+/* Update per PORT default tunnel RX classification scheme for traffic with non
+ * unicast outer MAC in NPAR mode.
+ */
+ u8 update_rx_def_non_ucast_clss;
+/* Update VXLAN tunnel UDP destination port. */
+ u8 set_vxlan_udp_port_flg;
+/* Update GENEVE tunnel UDP destination port. */
+ u8 set_geneve_udp_port_flg;
+ u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+/* Classification scheme for l2 GENEVE tunnel. */
+ u8 tunnel_clss_l2geneve;
+/* Classification scheme for ip GENEVE tunnel. */
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
+ u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+ __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+ __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+ __le16 reserved;
+};
+
+/*
+ * Data for port update ramrod
+ */
+struct pf_update_ramrod_data {
+/* Update Eth DCB data indication (use enum dcb_dscp_update_mode) */
+ u8 update_eth_dcb_data_mode;
+/* Update FCOE DCB data indication (use enum dcb_dscp_update_mode) */
+ u8 update_fcoe_dcb_data_mode;
+/* Update iSCSI DCB data indication (use enum dcb_dscp_update_mode) */
+ u8 update_iscsi_dcb_data_mode;
+ u8 update_roce_dcb_data_mode /* Update ROCE DCB data indication */;
+/* Update RROCE (RoceV2) DCB data indication */
+ u8 update_rroce_dcb_data_mode;
+ u8 update_iwarp_dcb_data_mode /* Update IWARP DCB data indication */;
+ u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
+/* Update Enable STAG Priority Change indication */
+ u8 update_enable_stag_pri_change;
+ struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
+ struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
+/* core iscsi related fields */
+ struct protocol_dcb_data iscsi_dcb_data;
+ struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
+/* core roce related fields */
+ struct protocol_dcb_data rroce_dcb_data;
+/* core iwarp related fields */
+ struct protocol_dcb_data iwarp_dcb_data;
+ __le16 mf_vlan /* new outer vlan id value */;
+/* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette
+ * Davis, UFP with Host Control mode, and UFP with DCB over base interface.
+ * else - 0
+ */
+ u8 enable_stag_pri_change;
+ u8 reserved;
+/* tunnel configuration. */
+ struct pf_update_tunnel_config tunnel_config;
+};
+
+
+
+/*
+ * Ports mode
+ */
+enum ports_mode {
+ ENGX2_PORTX1 /* 2 engines x 1 port */,
+ ENGX2_PORTX2 /* 2 engines x 2 ports */,
+ ENGX1_PORTX1 /* 1 engine x 1 port */,
+ ENGX1_PORTX2 /* 1 engine x 2 ports */,
+ ENGX1_PORTX4 /* 1 engine x 4 ports */,
+ MAX_PORTS_MODE
+};
+
+
+
+/*
+ * use to index in hsi_fp_[major|minor]_ver_arr per protocol
+ */
+enum protocol_version_array_key {
+ ETH_VER_KEY = 0,
+ ROCE_VER_KEY,
+ MAX_PROTOCOL_VERSION_ARRAY_KEY
+};
+
+
+
+/*
+ * RDMA TX Stats
+ */
+struct rdma_sent_stats {
+ struct regpair sent_bytes /* number of total RDMA bytes sent */;
+ struct regpair sent_pkts /* number of total RDMA packets sent */;
+};
+
+/*
+ * Pstorm non-triggering VF zone
+ */
+struct pstorm_non_trigger_vf_zone {
+/* VF statistic bucket */
+ struct eth_pstorm_per_queue_stat eth_queue_stat;
+ struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
+};
+
+
+/*
+ * Pstorm VF zone
+ */
+struct pstorm_vf_zone {
+/* non-interrupt-triggering zone */
+ struct pstorm_non_trigger_vf_zone non_trigger;
+ struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
+};
+
+
+/*
+ * Ramrod Header of SPQE
+ */
+struct ramrod_header {
+ __le32 cid /* Slowpath Connection CID */;
+ u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
+ u8 protocol_id /* Ramrod Protocol ID */;
+ __le16 echo /* Ramrod echo */;
+};
+
+
+/*
+ * RDMA RX Stats
+ */
+struct rdma_rcv_stats {
+ struct regpair rcv_bytes /* number of total RDMA bytes received */;
+ struct regpair rcv_pkts /* number of total RDMA packets received */;
+};
+
+
+
+/*
+ * Data for update QCN/DCQCN RL ramrod
+ */
+struct rl_update_ramrod_data {
+ u8 qcn_update_param_flg /* Update QCN global params: timeout. */;
+/* Update DCQCN global params: timeout, g, k. */
+ u8 dcqcn_update_param_flg;
+ u8 rl_init_flg /* Init RL parameters, when RL disabled. */;
+ u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */;
+ u8 rl_stop_flg /* Stop RL. */;
+ u8 rl_id_first /* ID of first or single RL, that will be updated. */;
+/* ID of last RL, that will be updated. If clear, single RL will updated. */
+ u8 rl_id_last;
+ u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
+ __le32 rl_bc_rate /* Byte Counter Limit. */;
+ __le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
+ __le16 rl_r_ai /* Active increase rate. */;
+ __le16 rl_r_hai /* Hyper active increase rate. */;
+ __le16 dcqcn_g /* DCQCN Alpha update gain in 1/64K resolution . */;
+ __le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
+ __le32 dcqcn_timeuot_us /* DCQCN timeout. */;
+ __le32 qcn_timeuot_us /* QCN timeout. */;
+ __le32 reserved[2];
+};
+
+
+/*
+ * Slowpath Element (SPQE)
+ */
+struct slow_path_element {
+ struct ramrod_header hdr /* Ramrod Header */;
+ struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
+};
+
+
+/*
+ * Tstorm non-triggering VF zone
+ */
+struct tstorm_non_trigger_vf_zone {
+ struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
+};
+
+
+struct tstorm_per_port_stat {
+/* packet is dropped because it was truncated in NIG */
+ struct regpair trunc_error_discard;
+/* packet is dropped because of Ethernet FCS error */
+ struct regpair mac_error_discard;
+/* packet is dropped because classification was unsuccessful */
+ struct regpair mftag_filter_discard;
+/* packet was passed to Ethernet and dropped because of no mac filter match */
+ struct regpair eth_mac_filter_discard;
+/* packet passed to Light L2 and dropped because Light L2 is not configured for
+ * this PF
+ */
+ struct regpair ll2_mac_filter_discard;
+/* packet passed to Light L2 and dropped because Light L2 is not configured for
+ * this PF
+ */
+ struct regpair ll2_conn_disabled_discard;
+/* packet is an ISCSI irregular packet */
+ struct regpair iscsi_irregular_pkt;
+/* packet is an FCOE irregular packet */
+ struct regpair fcoe_irregular_pkt;
+/* packet is an ROCE irregular packet */
+ struct regpair roce_irregular_pkt;
+/* packet is an IWARP irregular packet */
+ struct regpair iwarp_irregular_pkt;
+/* packet is an ETH irregular packet */
+ struct regpair eth_irregular_pkt;
+/* packet is an TOE irregular packet */
+ struct regpair toe_irregular_pkt;
+/* packet is an PREROCE irregular packet */
+ struct regpair preroce_irregular_pkt;
+ struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */;
+/* VXLAN dropped packets */
+ struct regpair eth_vxlan_tunn_filter_discard;
+/* GENEVE dropped packets */
+ struct regpair eth_geneve_tunn_filter_discard;
+ struct regpair eth_gft_drop_pkt /* GFT dropped packets */;
+};
+
+
+/*
+ * Tstorm VF zone
+ */
+struct tstorm_vf_zone {
+/* non-interrupt-triggering zone */
+ struct tstorm_non_trigger_vf_zone non_trigger;
+};
+
+
+/*
+ * Tunnel classification scheme
+ */
+enum tunnel_clss {
+/* Use MAC and VLAN from first L2 header for vport classification. */
+ TUNNEL_CLSS_MAC_VLAN = 0,
+/* Use MAC from first L2 header and VNI from tunnel header for vport
+ * classification
+ */
+ TUNNEL_CLSS_MAC_VNI,
+/* Use MAC and VLAN from last L2 header for vport classification */
+ TUNNEL_CLSS_INNER_MAC_VLAN,
+/* Use MAC from last L2 header and VNI from tunnel header for vport
+ * classification
+ */
+ TUNNEL_CLSS_INNER_MAC_VNI,
+/* Use MAC and VLAN from last L2 header for vport classification. If no exact
+ * match, use MAC and VLAN from first L2 header for classification.
+ */
+ TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
+ MAX_TUNNEL_CLSS
+};
+
+
+
+/*
+ * Ustorm non-triggering VF zone
+ */
+struct ustorm_non_trigger_vf_zone {
+/* VF statistic bucket */
+ struct eth_ustorm_per_queue_stat eth_queue_stat;
+ struct regpair vf_pf_msg_addr /* VF-PF message address */;
+};
+
+
+/*
+ * Ustorm triggering VF zone
+ */
+struct ustorm_trigger_vf_zone {
+ u8 vf_pf_msg_valid /* VF-PF message valid flag */;
+ u8 reserved[7];
+};
+
+
+/*
+ * Ustorm VF zone
+ */
+struct ustorm_vf_zone {
+/* non-interrupt-triggering zone */
+ struct ustorm_non_trigger_vf_zone non_trigger;
+ struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
+};
+
+
+/*
+ * VF-PF channel data
+ */
+struct vf_pf_channel_data {
+/* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel
+ * is ready for a new transaction.
+ */
+ __le32 ready;
+/* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is
+ * valid.
+ */
+ u8 valid;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+
+/*
+ * Ramrod data for VF start ramrod
+ */
+struct vf_start_ramrod_data {
+ u8 vf_id /* VF ID */;
+/* If set, initial cleanup ack will be sent to parent PF SP event queue */
+ u8 enable_flr_ack;
+ __le16 opaque_fid /* VF opaque FID */;
+ u8 personality /* define what type of personality is new VF */;
+ u8 reserved[7];
+/* FP HSI version to be used by FW */
+ struct hsi_fp_ver_struct hsi_fp_ver;
+};
+
+
+/*
+ * Ramrod data for VF start ramrod
+ */
+struct vf_stop_ramrod_data {
+ u8 vf_id /* VF ID */;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 reserved2;
+};
+
+
+/*
+ * VF zone size mode.
+ */
+enum vf_zone_size_mode {
+/* Default VF zone size. Up to 192 VF supported. */
+ VF_ZONE_SIZE_MODE_DEFAULT,
+/* Doubled VF zone size. Up to 96 VF supported. */
+ VF_ZONE_SIZE_MODE_DOUBLE,
+/* Quad VF zone size. Up to 48 VF supported. */
+ VF_ZONE_SIZE_MODE_QUAD,
+ MAX_VF_ZONE_SIZE_MODE
+};
+
+
+
+
+
+/*
+ * Attentions status block
+ */
+struct atten_status_block {
+ __le32 atten_bits;
+ __le32 atten_ack;
+ __le16 reserved0;
+ __le16 sb_index /* status block running index */;
+ __le32 reserved1;
+};
+
+
+/*
+ * DMAE command
+ */
+struct dmae_cmd {
+ __le32 opcode;
+/* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */
+#define DMAE_CMD_SRC_MASK 0x1
+#define DMAE_CMD_SRC_SHIFT 0
+/* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None
+ * (use enum dmae_cmd_dst_enum)
+ */
+#define DMAE_CMD_DST_MASK 0x3
+#define DMAE_CMD_DST_SHIFT 1
+/* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */
+#define DMAE_CMD_C_DST_MASK 0x1
+#define DMAE_CMD_C_DST_SHIFT 3
+/* Reset the CRC result (do not use the previous result as the seed) */
+#define DMAE_CMD_CRC_RESET_MASK 0x1
+#define DMAE_CMD_CRC_RESET_SHIFT 4
+/* Reset the source address in the next go to the same source address of the
+ * previous go
+ */
+#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
+/* Reset the destination address in the next go to the same destination address
+ * of the previous go
+ */
+#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
+/* 0 completion function is the same as src function, 1 - 0 completion
+ * function is the same as dst function (use enum dmae_cmd_comp_func_enum)
+ */
+#define DMAE_CMD_COMP_FUNC_MASK 0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT 7
+/* 0 - Do not write a completion word, 1 - Write a completion word
+ * (use enum dmae_cmd_comp_word_en_enum)
+ */
+#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
+/* 0 - Do not write a CRC word, 1 - Write a CRC word
+ * (use enum dmae_cmd_comp_crc_en_enum)
+ */
+#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
+/* The CRC word should be taken from the DMAE address space from address 9+X,
+ * where X is the value in these bits.
+ */
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK 0x1
+#define DMAE_CMD_RESERVED1_SHIFT 13
+#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
+/* The field specifies how the completion word is affected by PCIe read error. 0
+ * Send a regular completion, 1 - Send a completion with an error indication,
+ * 2 do not send a completion (use enum dmae_cmd_error_handling_enum)
+ */
+#define DMAE_CMD_ERR_HANDLING_MASK 0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT 16
+/* The port ID to be placed on the RF FID field of the GRC bus. this field is
+ * used both when GRC is the destination and when it is the source of the DMAE
+ * transaction.
+ */
+#define DMAE_CMD_PORT_ID_MASK 0x3
+#define DMAE_CMD_PORT_ID_SHIFT 18
+/* Source PCI function number [3:0] */
+#define DMAE_CMD_SRC_PF_ID_MASK 0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT 20
+/* Destination PCI function number [3:0] */
+#define DMAE_CMD_DST_PF_ID_MASK 0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT 24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 /* Source VFID valid */
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 /* Destination VFID valid */
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK 0x3
+#define DMAE_CMD_RESERVED2_SHIFT 30
+/* PCIe source address low in bytes or GRC source address in DW */
+ __le32 src_addr_lo;
+/* PCIe source address high in bytes or reserved (if source is GRC) */
+ __le32 src_addr_hi;
+/* PCIe destination address low in bytes or GRC destination address in DW */
+ __le32 dst_addr_lo;
+/* PCIe destination address high in bytes or reserved (if destination is GRC) */
+ __le32 dst_addr_hi;
+ __le16 length_dw /* Length in DW */;
+ __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
+#define DMAE_CMD_SRC_VF_ID_SHIFT 0
+#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
+#define DMAE_CMD_DST_VF_ID_SHIFT 8
+/* PCIe completion address low in bytes or GRC completion address in DW */
+ __le32 comp_addr_lo;
+/* PCIe completion address high in bytes or reserved (if completion address is
+ * GRC)
+ */
+ __le32 comp_addr_hi;
+ __le32 comp_val /* Value to write to completion address */;
+ __le32 crc32 /* crc16 result */;
+ __le32 crc_32_c /* crc32_c result */;
+ __le16 crc16 /* crc16 result */;
+ __le16 crc16_c /* crc16_c result */;
+ __le16 crc10 /* crc_t10 result */;
+ __le16 reserved;
+ __le16 xsum16 /* checksum16 result */;
+ __le16 xsum8 /* checksum8 result */;
+};
+
+
+enum dmae_cmd_comp_crc_en_enum {
+ dmae_cmd_comp_crc_disabled /* Do not write a CRC word */,
+ dmae_cmd_comp_crc_enabled /* Write a CRC word */,
+ MAX_DMAE_CMD_COMP_CRC_EN_ENUM
+};
+
+
+enum dmae_cmd_comp_func_enum {
+/* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */
+ dmae_cmd_comp_func_to_src,
+/* completion word and/or CRC will be sent to DST-PCI function/DST VFID */
+ dmae_cmd_comp_func_to_dst,
+ MAX_DMAE_CMD_COMP_FUNC_ENUM
+};
+
+
+enum dmae_cmd_comp_word_en_enum {
+ dmae_cmd_comp_word_disabled /* Do not write a completion word */,
+ dmae_cmd_comp_word_enabled /* Write the completion word */,
+ MAX_DMAE_CMD_COMP_WORD_EN_ENUM
+};
+
+
+enum dmae_cmd_c_dst_enum {
+ dmae_cmd_c_dst_pcie,
+ dmae_cmd_c_dst_grc,
+ MAX_DMAE_CMD_C_DST_ENUM
+};
+
+
+enum dmae_cmd_dst_enum {
+ dmae_cmd_dst_none_0,
+ dmae_cmd_dst_pcie,
+ dmae_cmd_dst_grc,
+ dmae_cmd_dst_none_3,
+ MAX_DMAE_CMD_DST_ENUM
+};
+
+
+enum dmae_cmd_error_handling_enum {
+/* Send a regular completion (with no error indication) */
+ dmae_cmd_error_handling_send_regular_comp,
+/* Send a completion with an error indication (i.e. set bit 31 of the completion
+ * word)
+ */
+ dmae_cmd_error_handling_send_comp_with_err,
+ dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */,
+ MAX_DMAE_CMD_ERROR_HANDLING_ENUM
+};
+
+
+enum dmae_cmd_src_enum {
+ dmae_cmd_src_pcie /* The source is the PCIe */,
+ dmae_cmd_src_grc /* The source is the GRC */,
+ MAX_DMAE_CMD_SRC_ENUM
+};
+
+
+struct e4_mstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+struct e4_ystorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+
+struct fw_asserts_ram_section {
+/* The offset of the section in the RAM in RAM lines (64-bit units) */
+ __le16 section_ram_line_offset;
+/* The size of the section in RAM lines (64-bit units) */
+ __le16 section_ram_line_size;
+/* The offset of the asserts list within the section in dwords */
+ u8 list_dword_offset;
+/* The size of an assert list element in dwords */
+ u8 list_element_dword_size;
+ u8 list_num_elements /* The number of elements in the asserts list */;
+/* The offset of the next list index field within the section in dwords */
+ u8 list_next_index_dword_offset;
+};
+
+
+struct fw_ver_num {
+ u8 major /* Firmware major version number */;
+ u8 minor /* Firmware minor version number */;
+ u8 rev /* Firmware revision version number */;
+ u8 eng /* Firmware engineering version number (for bootleg versions) */;
+};
+
+struct fw_ver_info {
+ __le16 tools_ver /* Tools version number */;
+ u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
+ u8 reserved1;
+ struct fw_ver_num num /* FW version number */;
+ __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
+ __le32 reserved2;
+};
+
+struct fw_info {
+ struct fw_ver_info ver /* FW version information */;
+/* Info regarding the FW asserts section in the Storm RAM */
+ struct fw_asserts_ram_section fw_asserts_section;
+};
+
+
+struct fw_info_location {
+ __le32 grc_addr /* GRC address where the fw_info struct is located. */;
+/* Size of the fw_info structure (thats located at the grc_addr). */
+ __le32 size;
+};
+
+
+
+
+/*
+ * IGU cleanup command
+ */
+struct igu_cleanup {
+ __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT 0
+/* cleanup clear - 0, set - 1 */
+#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+/* must always be set (use enum command_type_bit) */
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+
+/*
+ * IGU firmware driver command
+ */
+union igu_command {
+ struct igu_prod_cons_update prod_cons_update;
+ struct igu_cleanup cleanup;
+};
+
+
+/*
+ * IGU firmware driver command
+ */
+struct igu_command_reg_ctrl {
+ __le16 opaque_fid;
+ __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
+/* command typ: 0 - read, 1 - write */
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+
+/*
+ * IGU mapping line structure
+ */
+struct igu_mapping_line {
+ __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT 0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
+/* In BB: VF-0-120, PF-0-7; In K2: VF-0-191, PF-0-15 */
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
+#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+};
+
+
+/*
+ * IGU MSIX line structure
+ */
+struct igu_msix_vector {
+ struct regpair address;
+ __le32 data;
+ __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+};
+
+
+/*
+ * per encapsulation type enabling flags
+ */
+struct prs_reg_encapsulation_type_en {
+ u8 flags;
+/* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
+/* Enable bit for IP-over-GRE (IP GRE) encapsulation. */
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
+/* Enable bit for VXLAN encapsulation. */
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
+/* Enable bit for T-Tag encapsulation. */
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
+/* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+/* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
+};
+
+
+enum pxp_tph_st_hint {
+ TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
+ TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
+/* Device Write and Host Read, or Host Write and Device Read */
+ TPH_ST_HINT_TARGET,
+/* Device Write and Host Read, or Host Write and Device Read - with temporal
+ * reuse
+ */
+ TPH_ST_HINT_TARGET_PRIO,
+ MAX_PXP_TPH_ST_HINT
+};
+
+
+/*
+ * QM hardware structure of enable bypass credit mask
+ */
+struct qm_rf_bypass_mask {
+ u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
+#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+
+/*
+ * QM hardware structure of opportunistic credit mask
+ */
+struct qm_rf_opportunistic_mask {
+ __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
+};
+
+
+/*
+ * QM hardware structure of QM map memory
+ */
+struct qm_rf_pq_map_e4 {
+ __le32 reg;
+#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1 /* PQ active */
+#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF /* RL ID */
+#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1
+/* the first PQ associated with the VPORT and VOQ of this PQ */
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F /* VOQ */
+#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1 /* RL active */
+#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26
+};
+
+
+/*
+ * Completion params for aggregated interrupt completion
+ */
+struct sdm_agg_int_comp_params {
+ __le16 params;
+/* the number of aggregated interrupt, 0-31 */
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
+/* 1 - set a bit in aggregated vector, 0 - dont set */
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
+/* Number of bit in the aggregated vector, 0-279 (TBD) */
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
+};
+
+
+/*
+ * SDM operation gen command (generate aggregative interrupt)
+ */
+struct sdm_op_gen {
+ __le32 command;
+/* completion parameters 0-15 */
+#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
+#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
+#define SDM_OP_GEN_RESERVED_SHIFT 20
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+#endif /* __ECORE_HSI_COMMON__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_debug_tools.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_debug_tools.h
new file mode 100644
index 00000000..bf548722
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -0,0 +1,1112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_HSI_DEBUG_TOOLS__
+#define __ECORE_HSI_DEBUG_TOOLS__
+/****************************************/
+/* Debug Tools HSI constants and macros */
+/****************************************/
+
+
+enum block_addr {
+ GRCBASE_GRC = 0x50000,
+ GRCBASE_MISCS = 0x9000,
+ GRCBASE_MISC = 0x8000,
+ GRCBASE_DBU = 0xa000,
+ GRCBASE_PGLUE_B = 0x2a8000,
+ GRCBASE_CNIG = 0x218000,
+ GRCBASE_CPMU = 0x30000,
+ GRCBASE_NCSI = 0x40000,
+ GRCBASE_OPTE = 0x53000,
+ GRCBASE_BMB = 0x540000,
+ GRCBASE_PCIE = 0x54000,
+ GRCBASE_MCP = 0xe00000,
+ GRCBASE_MCP2 = 0x52000,
+ GRCBASE_PSWHST = 0x2a0000,
+ GRCBASE_PSWHST2 = 0x29e000,
+ GRCBASE_PSWRD = 0x29c000,
+ GRCBASE_PSWRD2 = 0x29d000,
+ GRCBASE_PSWWR = 0x29a000,
+ GRCBASE_PSWWR2 = 0x29b000,
+ GRCBASE_PSWRQ = 0x280000,
+ GRCBASE_PSWRQ2 = 0x240000,
+ GRCBASE_PGLCS = 0x0,
+ GRCBASE_DMAE = 0xc000,
+ GRCBASE_PTU = 0x560000,
+ GRCBASE_TCM = 0x1180000,
+ GRCBASE_MCM = 0x1200000,
+ GRCBASE_UCM = 0x1280000,
+ GRCBASE_XCM = 0x1000000,
+ GRCBASE_YCM = 0x1080000,
+ GRCBASE_PCM = 0x1100000,
+ GRCBASE_QM = 0x2f0000,
+ GRCBASE_TM = 0x2c0000,
+ GRCBASE_DORQ = 0x100000,
+ GRCBASE_BRB = 0x340000,
+ GRCBASE_SRC = 0x238000,
+ GRCBASE_PRS = 0x1f0000,
+ GRCBASE_TSDM = 0xfb0000,
+ GRCBASE_MSDM = 0xfc0000,
+ GRCBASE_USDM = 0xfd0000,
+ GRCBASE_XSDM = 0xf80000,
+ GRCBASE_YSDM = 0xf90000,
+ GRCBASE_PSDM = 0xfa0000,
+ GRCBASE_TSEM = 0x1700000,
+ GRCBASE_MSEM = 0x1800000,
+ GRCBASE_USEM = 0x1900000,
+ GRCBASE_XSEM = 0x1400000,
+ GRCBASE_YSEM = 0x1500000,
+ GRCBASE_PSEM = 0x1600000,
+ GRCBASE_RSS = 0x238800,
+ GRCBASE_TMLD = 0x4d0000,
+ GRCBASE_MULD = 0x4e0000,
+ GRCBASE_YULD = 0x4c8000,
+ GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PTLD = 0x590000,
+ GRCBASE_YPLD = 0x5b0000,
+ GRCBASE_PRM = 0x230000,
+ GRCBASE_PBF_PB1 = 0xda0000,
+ GRCBASE_PBF_PB2 = 0xda4000,
+ GRCBASE_RPB = 0x23c000,
+ GRCBASE_BTB = 0xdb0000,
+ GRCBASE_PBF = 0xd80000,
+ GRCBASE_RDIF = 0x300000,
+ GRCBASE_TDIF = 0x310000,
+ GRCBASE_CDU = 0x580000,
+ GRCBASE_CCFC = 0x2e0000,
+ GRCBASE_TCFC = 0x2d0000,
+ GRCBASE_IGU = 0x180000,
+ GRCBASE_CAU = 0x1c0000,
+ GRCBASE_RGFS = 0xf00000,
+ GRCBASE_RGSRC = 0x320000,
+ GRCBASE_TGFS = 0xd00000,
+ GRCBASE_TGSRC = 0x322000,
+ GRCBASE_UMAC = 0x51000,
+ GRCBASE_XMAC = 0x210000,
+ GRCBASE_DBG = 0x10000,
+ GRCBASE_NIG = 0x500000,
+ GRCBASE_WOL = 0x600000,
+ GRCBASE_BMBN = 0x610000,
+ GRCBASE_IPC = 0x20000,
+ GRCBASE_NWM = 0x800000,
+ GRCBASE_NWS = 0x700000,
+ GRCBASE_MS = 0x6a0000,
+ GRCBASE_PHY_PCIE = 0x620000,
+ GRCBASE_LED = 0x6b8000,
+ GRCBASE_AVS_WRAP = 0x6b0000,
+ GRCBASE_MISC_AEU = 0x8000,
+ GRCBASE_BAR0_MAP = 0x1c00000,
+ MAX_BLOCK_ADDR
+};
+
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_DMAE,
+ BLOCK_PTU,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_RGFS,
+ BLOCK_RGSRC,
+ BLOCK_TGFS,
+ BLOCK_TGSRC,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_MISC_AEU,
+ BLOCK_BAR0_MAP,
+ MAX_BLOCK_ID
+};
+
+
+/*
+ * binary debug buffer types
+ */
+enum bin_dbg_buffer_type {
+ BIN_BUF_DBG_MODE_TREE /* init modes tree */,
+ BIN_BUF_DBG_DUMP_REG /* GRC Dump registers */,
+ BIN_BUF_DBG_DUMP_MEM /* GRC Dump memories */,
+ BIN_BUF_DBG_IDLE_CHK_REGS /* Idle Check registers */,
+ BIN_BUF_DBG_IDLE_CHK_IMMS /* Idle Check immediates */,
+ BIN_BUF_DBG_IDLE_CHK_RULES /* Idle Check rules */,
+ BIN_BUF_DBG_IDLE_CHK_PARSING_DATA /* Idle Check parsing data */,
+ BIN_BUF_DBG_ATTN_BLOCKS /* Attention blocks */,
+ BIN_BUF_DBG_ATTN_REGS /* Attention registers */,
+ BIN_BUF_DBG_ATTN_INDEXES /* Attention indexes */,
+ BIN_BUF_DBG_ATTN_NAME_OFFSETS /* Attention name offsets */,
+ BIN_BUF_DBG_BUS_BLOCKS /* Debug Bus blocks */,
+ BIN_BUF_DBG_BUS_LINES /* Debug Bus lines */,
+ BIN_BUF_DBG_BUS_BLOCKS_USER_DATA /* Debug Bus blocks user data */,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS /* Debug Bus line name offsets */,
+ BIN_BUF_DBG_PARSING_STRINGS /* Debug Tools parsing strings */,
+ MAX_BIN_DBG_BUFFER_TYPE
+};
+
+
+/*
+ * Attention bit mapping
+ */
+struct dbg_attn_bit_mapping {
+ u16 data;
+/* The index of an attention in the blocks attentions list
+ * (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits
+ * (if is_unused_bit_cnt=1)
+ */
+#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
+#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
+/* if set, the val field indicates the number of consecutive unused attention
+ * bits
+ */
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
+};
+
+
+/*
+ * Attention block per-type data
+ */
+struct dbg_attn_block_type_data {
+/* Offset of this block attention names in the debug attention name offsets
+ * array
+ */
+ u16 names_offset;
+ u16 reserved1;
+ u8 num_regs /* Number of attention registers in this block */;
+ u8 reserved2;
+/* Offset of this blocks attention registers in the attention registers array
+ * (in dbg_attn_reg units)
+ */
+ u16 regs_offset;
+};
+
+/*
+ * Block attentions
+ */
+struct dbg_attn_block {
+/* attention block per-type data. Count must match the number of elements in
+ * dbg_attn_type.
+ */
+ struct dbg_attn_block_type_data per_type_data[2];
+};
+
+
+/*
+ * Attention register result
+ */
+struct dbg_attn_reg_result {
+ u32 data;
+/* STS attention register GRC address (in dwords) */
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
+/* Number of attention indexes in this register */
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+/* The offset of this registers attentions within the blocks attentions list
+ * (a value in the range 0..number of block attentions-1)
+ */
+ u16 block_attn_offset;
+ u16 reserved;
+ u32 sts_val /* Value read from the STS attention register */;
+ u32 mask_val /* Value read from the MASK attention register */;
+};
+
+/*
+ * Attention block result
+ */
+struct dbg_attn_block_result {
+ u8 block_id /* Registers block ID */;
+ u8 data;
+/* Value from dbg_attn_type enum */
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
+/* Number of registers in block in which at least one attention bit is set */
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
+/* Offset of this registers block attention names in the attention name offsets
+ * array
+ */
+ u16 names_offset;
+/* result data for each register in the block in which at least one attention
+ * bit is set
+ */
+ struct dbg_attn_reg_result reg_results[15];
+};
+
+
+
+/*
+ * mode header
+ */
+struct dbg_mode_hdr {
+ u16 data;
+/* indicates if a mode expression should be evaluated (0/1) */
+#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
+#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
+/* offset (in bytes) in modes expression buffer. valid only if eval_mode is
+ * set.
+ */
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
+};
+
+/*
+ * Attention register
+ */
+struct dbg_attn_reg {
+/* The offset of this registers attentions within the blocks attentions list
+ * (a value in the range 0..number of block attentions-1)
+ */
+ u16 block_attn_offset;
+ u32 data;
+/* STS attention register GRC address (in dwords) */
+#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
+/* Number of attention in this register */
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
+/* STS_CLR attention register GRC address (in dwords) */
+ u32 sts_clr_address;
+ u32 mask_address /* MASK attention register GRC address (in dwords) */;
+};
+
+
+
+/*
+ * attention types
+ */
+enum dbg_attn_type {
+ ATTN_TYPE_INTERRUPT,
+ ATTN_TYPE_PARITY,
+ MAX_DBG_ATTN_TYPE
+};
+
+
+/*
+ * Debug Bus block data
+ */
+struct dbg_bus_block {
+/* Number of debug lines in this block (excluding signature & latency events) */
+ u8 num_of_lines;
+/* Indicates if this block has a latency events debug line (0/1). */
+ u8 has_latency_events;
+/* Offset of this blocks lines in the Debug Bus lines array. */
+ u16 lines_offset;
+};
+
+
+/*
+ * Debug Bus block user data
+ */
+struct dbg_bus_block_user_data {
+/* Number of debug lines in this block (excluding signature & latency events) */
+ u8 num_of_lines;
+/* Indicates if this block has a latency events debug line (0/1). */
+ u8 has_latency_events;
+/* Offset of this blocks lines in the debug bus line name offsets array. */
+ u16 names_offset;
+};
+
+
+/*
+ * Block Debug line data
+ */
+struct dbg_bus_line {
+ u8 data;
+/* Number of groups in the line (0-3) */
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+/* Indicates if this is a 128b line (0) or a 256b line (1). */
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+/* Four 2-bit values, indicating the size of each group minus 1 (i.e.
+ * value=0 means size=1, value=1 means size=2, etc), starting from lsb.
+ * The sizes are in dwords (if is_256b=0) or in qwords (if is_256b=1).
+ */
+ u8 group_sizes;
+};
+
+
+/*
+ * condition header for registers dump
+ */
+struct dbg_dump_cond_hdr {
+ struct dbg_mode_hdr mode /* Mode header */;
+ u8 block_id /* block ID */;
+ u8 data_size /* size in dwords of the data following this header */;
+};
+
+
+/*
+ * memory data for registers dump
+ */
+struct dbg_dump_mem {
+ u32 dword0;
+/* register address (in dwords) */
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF /* memory group ID */
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+ u32 dword1;
+/* register size (in dwords) */
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
+};
+
+
+/*
+ * register data for registers dump
+ */
+struct dbg_dump_reg {
+ u32 data;
+/* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
+};
+
+
+/*
+ * split header for registers dump
+ */
+struct dbg_dump_split_hdr {
+ u32 hdr;
+/* size in dwords of the data following this header */
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF /* split type ID */
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+
+/*
+ * condition header for idle check
+ */
+struct dbg_idle_chk_cond_hdr {
+ struct dbg_mode_hdr mode /* Mode header */;
+ u16 data_size /* size in dwords of the data following this header */;
+};
+
+
+/*
+ * Idle Check condition register
+ */
+struct dbg_idle_chk_cond_reg {
+ u32 data;
+/* Register GRC address (in dwords) */
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
+/* value from block_id enum */
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+ u16 num_entries /* number of registers entries to check */;
+ u8 entry_size /* size of registers entry (in dwords) */;
+ u8 start_entry /* index of the first entry to check */;
+};
+
+
+/*
+ * Idle Check info register
+ */
+struct dbg_idle_chk_info_reg {
+ u32 data;
+/* Register GRC address (in dwords) */
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
+/* value from block_id enum */
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+ u16 size /* register size in dwords */;
+ struct dbg_mode_hdr mode /* Mode header */;
+};
+
+
+/*
+ * Idle Check register
+ */
+union dbg_idle_chk_reg {
+ struct dbg_idle_chk_cond_reg cond_reg /* condition register */;
+ struct dbg_idle_chk_info_reg info_reg /* info register */;
+};
+
+
+/*
+ * Idle Check result header
+ */
+struct dbg_idle_chk_result_hdr {
+ u16 rule_id /* Failing rule index */;
+ u16 mem_entry_id /* Failing memory entry index */;
+ u8 num_dumped_cond_regs /* number of dumped condition registers */;
+ u8 num_dumped_info_regs /* number of dumped condition registers */;
+ u8 severity /* from dbg_idle_chk_severity_types enum */;
+ u8 reserved;
+};
+
+
+/*
+ * Idle Check result register header
+ */
+struct dbg_idle_chk_result_reg_hdr {
+ u8 data;
+/* indicates if this register is a memory */
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+/* register index within the failing rule */
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+ u8 start_entry /* index of the first checked entry */;
+ u16 size /* register size in dwords */;
+};
+
+
+/*
+ * Idle Check rule
+ */
+struct dbg_idle_chk_rule {
+ u16 rule_id /* Idle Check rule ID */;
+ u8 severity /* value from dbg_idle_chk_severity_types enum */;
+ u8 cond_id /* Condition ID */;
+ u8 num_cond_regs /* number of condition registers */;
+ u8 num_info_regs /* number of info registers */;
+ u8 num_imms /* number of immediates in the condition */;
+ u8 reserved1;
+/* offset of this rules registers in the idle check register array
+ * (in dbg_idle_chk_reg units)
+ */
+ u16 reg_offset;
+/* offset of this rules immediate values in the immediate values array
+ * (in dwords)
+ */
+ u16 imm_offset;
+};
+
+
+/*
+ * Idle Check rule parsing data
+ */
+struct dbg_idle_chk_rule_parsing_data {
+ u32 data;
+/* indicates if this register has a FW message */
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+/* Offset of this rules strings in the debug strings array (in bytes) */
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+
+/*
+ * idle check severity types
+ */
+enum dbg_idle_chk_severity_types {
+/* idle check failure should cause an error */
+ IDLE_CHK_SEVERITY_ERROR,
+/* idle check failure should cause an error only if theres no traffic */
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+/* idle check failure should cause a warning */
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+
+
+/*
+ * Debug Bus block data
+ */
+struct dbg_bus_block_data {
+ __le16 data;
+/* 4-bit value: bit i set -> dword/qword i is enabled. */
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
+/* Number of dwords/qwords to shift right the debug data (0-3) */
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
+/* 4-bit value: bit i set -> dword/qword i is forced valid. */
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
+/* 4-bit value: bit i set -> dword/qword i frame bit is forced. */
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+ u8 line_num /* Debug line number to select */;
+ u8 hw_id /* HW ID associated with the block */;
+};
+
+
+/*
+ * Debug Bus Clients
+ */
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+
+/*
+ * Debug Bus constraint operation types
+ */
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ /* equal */,
+ DBG_BUS_CONSTRAINT_OP_NE /* not equal */,
+ DBG_BUS_CONSTRAINT_OP_LT /* less than */,
+ DBG_BUS_CONSTRAINT_OP_LTC /* less than (cyclic) */,
+ DBG_BUS_CONSTRAINT_OP_LE /* less than or equal */,
+ DBG_BUS_CONSTRAINT_OP_LEC /* less than or equal (cyclic) */,
+ DBG_BUS_CONSTRAINT_OP_GT /* greater than */,
+ DBG_BUS_CONSTRAINT_OP_GTC /* greater than (cyclic) */,
+ DBG_BUS_CONSTRAINT_OP_GE /* greater than or equal */,
+ DBG_BUS_CONSTRAINT_OP_GEC /* greater than or equal (cyclic) */,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
+
+/*
+ * Debug Bus trigger state data
+ */
+struct dbg_bus_trigger_state_data {
+ u8 data;
+/* 4-bit value: bit i set -> dword i of the trigger state block
+ * (after right shift) is enabled.
+ */
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
+/* 4-bit value: bit i set -> dword i is compared by a constraint */
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+};
+
+/*
+ * Debug Bus memory address
+ */
+struct dbg_bus_mem_addr {
+ u32 lo;
+ u32 hi;
+};
+
+/*
+ * Debug Bus PCI buffer data
+ */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */;
+ struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */;
+ u32 size /* PCI buffer size in bytes */;
+};
+
+/*
+ * Debug Bus Storm EID range filter params
+ */
+struct dbg_bus_storm_eid_range_params {
+ u8 min /* Minimal event ID to filter on */;
+ u8 max /* Maximal event ID to filter on */;
+};
+
+/*
+ * Debug Bus Storm EID mask filter params
+ */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val /* Event ID value */;
+ u8 mask /* Event ID mask. 1s in the mask = dont care bits. */;
+};
+
+/*
+ * Debug Bus Storm EID filter params
+ */
+union dbg_bus_storm_eid_params {
+/* EID range filter params */
+ struct dbg_bus_storm_eid_range_params range;
+/* EID mask filter params */
+ struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/*
+ * Debug Bus Storm data
+ */
+struct dbg_bus_storm_data {
+ u8 enabled /* indicates if the Storm is enabled for recording */;
+ u8 mode /* Storm debug mode, valid only if the Storm is enabled */;
+ u8 hw_id /* HW ID associated with the Storm */;
+ u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */;
+/* 1 = EID range filter, 0 = EID mask filter. Valid only if eid_filter_en is
+ * set,
+ */
+ u8 eid_range_not_mask;
+ u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
+/* EID filter params to filter on. Valid only if eid_filter_en is set. */
+ union dbg_bus_storm_eid_params eid_filter_params;
+ u32 cid /* CID to filter on. Valid only if cid_filter_en is set. */;
+};
+
+/*
+ * Debug Bus data
+ */
+struct dbg_bus_data {
+ u32 app_version /* The tools version number of the application */;
+ u8 state /* The current debug bus state */;
+ u8 hw_dwords /* HW dwords per cycle */;
+/* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the
+ * HW ID of dword/qword i
+ */
+ u16 hw_id_mask;
+ u8 num_enabled_blocks /* Number of blocks enabled for recording */;
+ u8 num_enabled_storms /* Number of Storms enabled for recording */;
+ u8 target /* Output target */;
+ u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */;
+ u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */;
+/* Indicates if timestamp recording is enabled (0/1) */
+ u8 timestamp_input_en;
+ u8 filter_en /* Indicates if the recording filter is enabled (0/1) */;
+/* If true, the next added constraint belong to the filter. Otherwise,
+ * it belongs to the last added trigger state. Valid only if either filter or
+ * triggers are enabled.
+ */
+ u8 adding_filter;
+/* Indicates if the recording filter should be applied before the trigger.
+ * Valid only if both filter and trigger are enabled (0/1)
+ */
+ u8 filter_pre_trigger;
+/* Indicates if the recording filter should be applied after the trigger.
+ * Valid only if both filter and trigger are enabled (0/1)
+ */
+ u8 filter_post_trigger;
+ u16 reserved;
+/* Indicates if the recording trigger is enabled (0/1) */
+ u8 trigger_en;
+/* trigger states data */
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 next_trigger_state /* ID of next trigger state to be added */;
+/* ID of next filter/trigger constraint to be added */
+ u8 next_constraint_id;
+/* If true, all inputs are associated with HW ID 0. Otherwise, each input is
+ * assigned a different HW ID (0/1)
+ */
+ u8 unify_inputs;
+/* Indicates if the other engine sends it NW recording to this engine (0/1) */
+ u8 rcv_from_other_engine;
+/* Debug Bus PCI buffer data. Valid only when the target is
+ * DBG_BUS_TARGET_ID_PCI.
+ */
+ struct dbg_bus_pci_buf_data pci_buf;
+/* Debug Bus data for each block */
+ struct dbg_bus_block_data blocks[88];
+/* Debug Bus data for each block */
+ struct dbg_bus_storm_data storms[6];
+};
+
+
+/*
+ * Debug bus filter types
+ */
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF /* filter always off */,
+ DBG_BUS_FILTER_TYPE_PRE /* filter before trigger only */,
+ DBG_BUS_FILTER_TYPE_POST /* filter after trigger only */,
+ DBG_BUS_FILTER_TYPE_ON /* filter always on */,
+ MAX_DBG_BUS_FILTER_TYPES
+};
+
+
+/*
+ * Debug bus frame modes
+ */
+enum dbg_bus_frame_modes {
+ DBG_BUS_FRAME_MODE_0HW_4ST = 0 /* 0 HW dwords, 4 Storm dwords */,
+ DBG_BUS_FRAME_MODE_4HW_0ST = 3 /* 4 HW dwords, 0 Storm dwords */,
+ DBG_BUS_FRAME_MODE_8HW_0ST = 4 /* 8 HW dwords, 0 Storm dwords */,
+ MAX_DBG_BUS_FRAME_MODES
+};
+
+
+/*
+ * Debug bus other engine mode
+ */
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+ MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+
+
+/*
+ * Debug bus post-trigger recording types
+ */
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD /* start recording after trigger */,
+ DBG_BUS_POST_TRIGGER_DROP /* drop data after trigger */,
+ MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+
+/*
+ * Debug bus pre-trigger recording types
+ */
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_START_FROM_ZERO /* start recording from time 0 */,
+/* start recording some chunks before trigger */
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
+ DBG_BUS_PRE_TRIGGER_DROP /* drop data before trigger */,
+ MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+
+/*
+ * Debug bus SEMI frame modes
+ */
+enum dbg_bus_semi_frame_modes {
+/* 0 slow dwords, 4 fast dwords */
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+/* 4 slow dwords, 0 fast dwords */
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+ MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
+
+/*
+ * Debug bus states
+ */
+enum dbg_bus_states {
+ DBG_BUS_STATE_IDLE /* debug bus idle state (not recording) */,
+/* debug bus is ready for configuration and recording */
+ DBG_BUS_STATE_READY,
+ DBG_BUS_STATE_RECORDING /* debug bus is currently recording */,
+ DBG_BUS_STATE_STOPPED /* debug bus recording has stopped */,
+ MAX_DBG_BUS_STATES
+};
+
+
+
+
+
+
+/*
+ * Debug Bus Storm modes
+ */
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF /* store data (fast debug) */,
+ DBG_BUS_STORM_MODE_PRAM_ADDR /* pram address (fast debug) */,
+ DBG_BUS_STORM_MODE_DRA_RW /* DRA read/write data (fast debug) */,
+ DBG_BUS_STORM_MODE_DRA_W /* DRA write data (fast debug) */,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR /* load/store address (fast debug) */,
+ DBG_BUS_STORM_MODE_DRA_FSM /* DRA state machines (fast debug) */,
+ DBG_BUS_STORM_MODE_RH /* recording handlers (fast debug) */,
+ DBG_BUS_STORM_MODE_FOC /* FOC: FIN + DRA Rd (slow debug) */,
+ DBG_BUS_STORM_MODE_EXT_STORE /* FOC: External Store (slow) */,
+ MAX_DBG_BUS_STORM_MODES
+};
+
+
+/*
+ * Debug bus target IDs
+ */
+enum dbg_bus_targets {
+/* records debug bus to DBG block internal buffer */
+ DBG_BUS_TARGET_ID_INT_BUF,
+ DBG_BUS_TARGET_ID_NIG /* records debug bus to the NW */,
+ DBG_BUS_TARGET_ID_PCI /* records debug bus to a PCI buffer */,
+ MAX_DBG_BUS_TARGETS
+};
+
+
+
+/*
+ * GRC Dump data
+ */
+struct dbg_grc_data {
+/* Indicates if the GRC parameters were initialized */
+ u8 params_initialized;
+ u8 reserved1;
+ u16 reserved2;
+/* Value of each GRC parameter. Array size must match the enum dbg_grc_params.
+ */
+ u32 param_val[48];
+};
+
+
+/*
+ * Debug GRC params
+ */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM /* dump Tstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_MSTORM /* dump Mstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_USTORM /* dump Ustorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_XSTORM /* dump Xstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_YSTORM /* dump Ystorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_PSTORM /* dump Pstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_REGS /* dump non-memory registers (0/1) */,
+ DBG_GRC_PARAM_DUMP_RAM /* dump Storm internal RAMs (0/1) */,
+ DBG_GRC_PARAM_DUMP_PBUF /* dump Storm passive buffer (0/1) */,
+ DBG_GRC_PARAM_DUMP_IOR /* dump Storm IORs (0/1) */,
+ DBG_GRC_PARAM_DUMP_VFC /* dump VFC memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_CM_CTX /* dump CM contexts (0/1) */,
+ DBG_GRC_PARAM_DUMP_PXP /* dump PXP memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_RSS /* dump RSS memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */,
+/* MCP Trace meta data size in bytes */
+ DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
+ DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_BTB /* dump BTB memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_BMB /* dump BMB memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_NIG /* dump NIG memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_MULD /* dump MULD memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_PRS /* dump PRS memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_DMAE /* dump PRS memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_TM /* dump TM (timers) memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_SDM /* dump SDM memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_DIF /* dump DIF memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_STATIC /* dump static debug data (0/1) */,
+ DBG_GRC_PARAM_UNSTALL /* un-stall Storms after dump (0/1) */,
+ DBG_GRC_PARAM_NUM_LCIDS /* number of LCIDs (0..320) */,
+ DBG_GRC_PARAM_NUM_LTIDS /* number of LTIDs (0..320) */,
+/* preset: exclude all memories from dump (1 only) */
+ DBG_GRC_PARAM_EXCLUDE_ALL,
+/* preset: include memories for crash dump (1 only) */
+ DBG_GRC_PARAM_CRASH,
+/* perform dump only if MFW is responding (0/1) */
+ DBG_GRC_PARAM_PARITY_SAFE,
+ DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_PHY /* dump PHY memories (0/1) */,
+ DBG_GRC_PARAM_NO_MCP /* dont perform MCP commands (0/1) */,
+ DBG_GRC_PARAM_NO_FW_VER /* dont read FW/MFW version (0/1) */,
+ MAX_DBG_GRC_PARAMS
+};
+
+
+/*
+ * Debug reset registers
+ */
+enum dbg_reset_regs {
+ DBG_RESET_REG_MISCS_PL_UA,
+ DBG_RESET_REG_MISCS_PL_HV,
+ DBG_RESET_REG_MISCS_PL_HV_2,
+ DBG_RESET_REG_MISC_PL_UA,
+ DBG_RESET_REG_MISC_PL_HV,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ DBG_RESET_REG_MISC_PL_PDA_VAUX,
+ MAX_DBG_RESET_REGS
+};
+
+
+/*
+ * Debug status codes
+ */
+enum dbg_status {
+ DBG_STATUS_OK,
+ DBG_STATUS_APP_VERSION_NOT_SET,
+ DBG_STATUS_UNSUPPORTED_APP_VERSION,
+ DBG_STATUS_DBG_BLOCK_NOT_RESET,
+ DBG_STATUS_INVALID_ARGS,
+ DBG_STATUS_OUTPUT_ALREADY_SET,
+ DBG_STATUS_INVALID_PCI_BUF_SIZE,
+ DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+ DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+ DBG_STATUS_TOO_MANY_INPUTS,
+ DBG_STATUS_INPUT_OVERLAP,
+ DBG_STATUS_HW_ONLY_RECORDING,
+ DBG_STATUS_STORM_ALREADY_ENABLED,
+ DBG_STATUS_STORM_NOT_ENABLED,
+ DBG_STATUS_BLOCK_ALREADY_ENABLED,
+ DBG_STATUS_BLOCK_NOT_ENABLED,
+ DBG_STATUS_NO_INPUT_ENABLED,
+ DBG_STATUS_NO_FILTER_TRIGGER_64B,
+ DBG_STATUS_FILTER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_NOT_ENABLED,
+ DBG_STATUS_CANT_ADD_CONSTRAINT,
+ DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+ DBG_STATUS_TOO_MANY_CONSTRAINTS,
+ DBG_STATUS_RECORDING_NOT_STARTED,
+ DBG_STATUS_DATA_DIDNT_TRIGGER,
+ DBG_STATUS_NO_DATA_RECORDED,
+ DBG_STATUS_DUMP_BUF_TOO_SMALL,
+ DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+ DBG_STATUS_UNKNOWN_CHIP,
+ DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+ DBG_STATUS_BLOCK_IN_RESET,
+ DBG_STATUS_INVALID_TRACE_SIGNATURE,
+ DBG_STATUS_INVALID_NVRAM_BUNDLE,
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+ DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+ DBG_STATUS_NVRAM_READ_FAILED,
+ DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+ DBG_STATUS_MCP_TRACE_BAD_DATA,
+ DBG_STATUS_MCP_TRACE_NO_META,
+ DBG_STATUS_MCP_COULD_NOT_HALT,
+ DBG_STATUS_MCP_COULD_NOT_RESUME,
+ DBG_STATUS_RESERVED2,
+ DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+ DBG_STATUS_IGU_FIFO_BAD_DATA,
+ DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+ DBG_STATUS_REG_FIFO_BAD_DATA,
+ DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+ DBG_STATUS_DBG_ARRAY_NOT_SET,
+ DBG_STATUS_FILTER_BUG,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET,
+ DBG_STATUS_DBG_BUS_IN_USE,
+ MAX_DBG_STATUS
+};
+
+
+/*
+ * Debug Storms IDs
+ */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+
+/*
+ * Idle Check data
+ */
+struct idle_chk_data {
+ u32 buf_size /* Idle check buffer size in dwords */;
+/* Indicates if the idle check buffer size was set (0/1) */
+ u8 buf_size_set;
+ u8 reserved1;
+ u16 reserved2;
+};
+
+/*
+ * Debug Tools data (per HW function)
+ */
+struct dbg_tools_data {
+ struct dbg_grc_data grc /* GRC Dump data */;
+ struct dbg_bus_data bus /* Debug Bus data */;
+ struct idle_chk_data idle_chk /* Idle Check data */;
+ u8 mode_enable[40] /* Indicates if a mode is enabled (0/1) */;
+/* Indicates if a block is in reset state (0/1) */
+ u8 block_in_reset[88];
+ u8 chip_id /* Chip ID (from enum chip_ids) */;
+ u8 platform_id /* Platform ID */;
+ u8 initialized /* Indicates if the data was initialized */;
+ u8 use_dmae /* Indicates if DMAE should be used */;
+/* Numbers of registers that were read since last log */
+ u32 num_regs_read;
+};
+
+
+#endif /* __ECORE_HSI_DEBUG_TOOLS__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_eth.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_eth.h
new file mode 100644
index 00000000..6b512305
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_eth.h
@@ -0,0 +1,2414 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_HSI_ETH__
+#define __ECORE_HSI_ETH__
+/************************************************************************/
+/* Add include to common eth target for both eCore and protocol driver */
+/************************************************************************/
+#include "eth_common.h"
+
+/*
+ * The eth storm context for the Tstorm
+ */
+struct tstorm_eth_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/*
+ * The eth storm context for the Pstorm
+ */
+struct pstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/*
+ * The eth storm context for the Xstorm
+ */
+struct xstorm_eth_conn_st_ctx {
+ __le32 reserved[60];
+};
+
+struct e4_xstorm_eth_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+/* bit6 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+/* bit7 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+/* bit8 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+/* bit9 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+/* bit12 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+/* bit13 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+/* bit14 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+/* timer1cf */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+/* timer2cf */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+/* cf4 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+/* cf5 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+/* cf6 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+/* cf7 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+/* cf8 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+/* cf9 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+/* cf10 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+/* cf11 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+/* cf12 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+/* cf13 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+/* cf14 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+/* cf15 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+/* cf16 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+/* cf18 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+/* cf19 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+/* cf20 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+/* cf21 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+/* cf22 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+/* cf1en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+/* cf3en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+/* cf4en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+/* cf5en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+/* cf6en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+/* cf7en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+/* cf8en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+/* cf9en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+/* cf11en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+/* cf12en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+/* cf13en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+/* cf14en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+/* cf15en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+/* cf16en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+/* cf23 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 e5_reserved1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 updated_qm_pq_id /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 e5_reserved /* e5_reserved */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+/*
+ * The eth storm context for the Ystorm
+ */
+struct ystorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+struct e4_ystorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+/* cf0en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+/* cf1en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+/* cf2en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+/* rule0en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+/* rule1en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+/* rule2en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+/* rule3en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+/* rule4en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 tx_q0_int_coallecing_timeset /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 terminate_spqe /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 tx_bd_cons_upd /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+struct e4_tstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 rx_bd_cons /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 rx_bd_prod /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct e4_ustorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+/* timer0cf */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+/* timer1cf */
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+/* timer2cf */
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+/* timer_stop_all */
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+/* cf4 */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+/* cf5 */
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+/* cf6 */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+ u8 flags2;
+/* cf0en */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+/* cf1en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+/* cf2en */
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+/* cf3en */
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+/* cf4en */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+/* cf5en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+/* cf6en */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+/* rule0en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+/* rule1en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+/* rule2en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+/* rule3en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+/* rule4en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+/* rule5en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+/* rule6en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+/* rule7en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+/* rule8en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 tx_bd_cons /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 tx_int_coallecing_timeset /* reg3 */;
+ __le16 tx_drv_bd_cons /* word2 */;
+ __le16 rx_drv_cqe_cons /* word3 */;
+};
+
+/*
+ * The eth storm context for the Ustorm
+ */
+struct ustorm_eth_conn_st_ctx {
+ __le32 reserved[40];
+};
+
+/*
+ * The eth storm context for the Mstorm
+ */
+struct mstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/*
+ * eth connection context
+ */
+struct e4_eth_conn_context {
+/* tstorm storm context */
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2] /* padding */;
+/* pstorm storm context */
+ struct pstorm_eth_conn_st_ctx pstorm_st_context;
+/* xstorm storm context */
+ struct xstorm_eth_conn_st_ctx xstorm_st_context;
+/* xstorm aggregative context */
+ struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
+/* ystorm storm context */
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+/* ystorm aggregative context */
+ struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
+/* tstorm aggregative context */
+ struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
+/* ustorm aggregative context */
+ struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
+/* ustorm storm context */
+ struct ustorm_eth_conn_st_ctx ustorm_st_context;
+/* mstorm storm context */
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
+};
+
+
+/*
+ * Ethernet filter types: mac/vlan/pair
+ */
+enum eth_error_code {
+ ETH_OK = 0x00 /* command succeeded */,
+/* mac add filters command failed due to cam full state */
+ ETH_FILTERS_MAC_ADD_FAIL_FULL,
+/* mac add filters command failed due to mtt2 full state */
+ ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2,
+/* mac add filters command failed due to duplicate mac address */
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2,
+/* mac add filters command failed due to duplicate mac address */
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2,
+/* mac delete filters command failed due to not found state */
+ ETH_FILTERS_MAC_DEL_FAIL_NOF,
+/* mac delete filters command failed due to not found state */
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2,
+/* mac delete filters command failed due to not found state */
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2,
+/* mac add filters command failed due to MAC Address of 00:00:00:00:00:00 */
+ ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC,
+/* vlan add filters command failed due to cam full state */
+ ETH_FILTERS_VLAN_ADD_FAIL_FULL,
+/* vlan add filters command failed due to duplicate VLAN filter */
+ ETH_FILTERS_VLAN_ADD_FAIL_DUP,
+/* vlan delete filters command failed due to not found state */
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF,
+/* vlan delete filters command failed due to not found state */
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1,
+/* pair add filters command failed due to duplicate request */
+ ETH_FILTERS_PAIR_ADD_FAIL_DUP,
+/* pair add filters command failed due to full state */
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL,
+/* pair add filters command failed due to full state */
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC,
+/* pair add filters command failed due not found state */
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF,
+/* pair add filters command failed due not found state */
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1,
+/* pair add filters command failed due to MAC Address of 00:00:00:00:00:00 */
+ ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
+/* vni add filters command failed due to cam full state */
+ ETH_FILTERS_VNI_ADD_FAIL_FULL,
+/* vni add filters command failed due to duplicate VNI filter */
+ ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ ETH_FILTERS_GFT_UPDATE_FAIL /* Fail update GFT filter. */,
+ MAX_ETH_ERROR_CODE
+};
+
+
+/*
+ * opcodes for the event ring
+ */
+enum eth_event_opcode {
+ ETH_EVENT_UNUSED,
+ ETH_EVENT_VPORT_START,
+ ETH_EVENT_VPORT_UPDATE,
+ ETH_EVENT_VPORT_STOP,
+ ETH_EVENT_TX_QUEUE_START,
+ ETH_EVENT_TX_QUEUE_STOP,
+ ETH_EVENT_RX_QUEUE_START,
+ ETH_EVENT_RX_QUEUE_UPDATE,
+ ETH_EVENT_RX_QUEUE_STOP,
+ ETH_EVENT_FILTERS_UPDATE,
+ ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+ ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+ ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
+ ETH_EVENT_RX_ADD_UDP_FILTER,
+ ETH_EVENT_RX_DELETE_UDP_FILTER,
+ ETH_EVENT_RX_CREATE_GFT_ACTION,
+ ETH_EVENT_RX_GFT_UPDATE_FILTER,
+ ETH_EVENT_TX_QUEUE_UPDATE,
+ MAX_ETH_EVENT_OPCODE
+};
+
+
+/*
+ * Classify rule types in E2/E3
+ */
+enum eth_filter_action {
+ ETH_FILTER_ACTION_UNUSED,
+ ETH_FILTER_ACTION_REMOVE,
+ ETH_FILTER_ACTION_ADD,
+/* Remove all filters of given type and vport ID. */
+ ETH_FILTER_ACTION_REMOVE_ALL,
+ MAX_ETH_FILTER_ACTION
+};
+
+
+/*
+ * Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_cmd {
+ u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
+ u8 vport_id /* the vport id */;
+ u8 action /* filter command action: add/remove/replace */;
+ u8 reserved0;
+ __le32 vni;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan_id;
+};
+
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_cmd_header {
+ u8 rx /* If set, apply these commands to the RX path */;
+ u8 tx /* If set, apply these commands to the TX path */;
+ u8 cmd_cnt /* Number of filter commands */;
+/* 0 - dont assert in case of filter configuration error. Just return an error
+ * code. 1 - assert in case of filter configuration error.
+ */
+ u8 assert_on_error;
+ u8 reserved1[4];
+};
+
+
+/*
+ * Ethernet filter types: mac/vlan/pair
+ */
+enum eth_filter_type {
+ ETH_FILTER_TYPE_UNUSED,
+ ETH_FILTER_TYPE_MAC /* Add/remove a MAC address */,
+ ETH_FILTER_TYPE_VLAN /* Add/remove a VLAN */,
+ ETH_FILTER_TYPE_PAIR /* Add/remove a MAC-VLAN pair */,
+ ETH_FILTER_TYPE_INNER_MAC /* Add/remove a inner MAC address */,
+ ETH_FILTER_TYPE_INNER_VLAN /* Add/remove a inner VLAN */,
+ ETH_FILTER_TYPE_INNER_PAIR /* Add/remove a inner MAC-VLAN pair */,
+/* Add/remove a inner MAC-VNI pair */
+ ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
+ ETH_FILTER_TYPE_MAC_VNI_PAIR /* Add/remove a MAC-VNI pair */,
+ ETH_FILTER_TYPE_VNI /* Add/remove a VNI */,
+ MAX_ETH_FILTER_TYPE
+};
+
+
+/*
+ * eth IPv4 Fragment Type
+ */
+enum eth_ipv4_frag_type {
+ ETH_IPV4_NOT_FRAG /* IPV4 Packet Not Fragmented */,
+/* First Fragment of IPv4 Packet (contains headers) */
+ ETH_IPV4_FIRST_FRAG,
+/* Non-First Fragment of IPv4 Packet (does not contain headers) */
+ ETH_IPV4_NON_FIRST_FRAG,
+ MAX_ETH_IPV4_FRAG_TYPE
+};
+
+
+/*
+ * eth IPv4 Fragment Type
+ */
+enum eth_ip_type {
+ ETH_IPV4 /* IPv4 */,
+ ETH_IPV6 /* IPv6 */,
+ MAX_ETH_IP_TYPE
+};
+
+
+/*
+ * Ethernet Ramrod Command IDs
+ */
+enum eth_ramrod_cmd_id {
+ ETH_RAMROD_UNUSED,
+ ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
+ ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
+ ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
+ ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+ ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+ ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+ ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
+ ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
+/* RX - Create an Openflow Action */
+ ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION,
+/* RX - Add an Openflow Filter to the Searcher */
+ ETH_RAMROD_RX_ADD_OPENFLOW_FILTER,
+/* RX - Delete an Openflow Filter to the Searcher */
+ ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER,
+/* RX - Add a UDP Filter to the Searcher */
+ ETH_RAMROD_RX_ADD_UDP_FILTER,
+/* RX - Delete a UDP Filter to the Searcher */
+ ETH_RAMROD_RX_DELETE_UDP_FILTER,
+ ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create a Gft Action */,
+/* RX - Add/Delete a GFT Filter to the Searcher */
+ ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
+ MAX_ETH_RAMROD_CMD_ID
+};
+
+
+/*
+ * return code from eth sp ramrods
+ */
+struct eth_return_code {
+ u8 value;
+/* error code (use enum eth_error_code) */
+#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK 0x3
+#define ETH_RETURN_CODE_RESERVED_SHIFT 5
+/* rx path - 0, tx path - 1 */
+#define ETH_RETURN_CODE_RX_TX_MASK 0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+};
+
+
+/*
+ * What to do in case an error occurs
+ */
+enum eth_tx_err {
+ ETH_TX_ERR_DROP /* Drop erroneous packet. */,
+/* Assert an interrupt for PF, declare as malicious for VF */
+ ETH_TX_ERR_ASSERT_MALICIOUS,
+ MAX_ETH_TX_ERR
+};
+
+
+/*
+ * Array of the different error type behaviors
+ */
+struct eth_tx_err_vals {
+ __le16 values;
+/* Wrong VLAN insertion mode (use enum eth_tx_err) */
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
+/* Packet is below minimal size (use enum eth_tx_err) */
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
+/* Vport has sent spoofed packet (use enum eth_tx_err) */
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
+/* Packet with illegal type of inband tag (use enum eth_tx_err) */
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
+/* Packet marked for VLAN insertion when inband tag is present
+ * (use enum eth_tx_err)
+ */
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
+/* Non LSO packet larger than MTU (use enum eth_tx_err) */
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
+/* VF/PF has sent LLDP/PFC or any other type of control packet which is not
+ * allowed to (use enum eth_tx_err)
+ */
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
+#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
+};
+
+
+/*
+ * vport rss configuration data
+ */
+struct eth_vport_rss_config {
+ __le16 capabilities;
+/* configuration of the IpV4 2-tuple capability */
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
+/* configuration of the IpV6 2-tuple capability */
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
+/* configuration of the IpV4 4-tuple capability for TCP */
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
+/* configuration of the IpV6 4-tuple capability for TCP */
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
+/* configuration of the IpV4 4-tuple capability for UDP */
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
+/* configuration of the IpV6 4-tuple capability for UDP */
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
+/* configuration of the 5-tuple capability */
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+/* if set update the rss keys */
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
+/* The RSS engine ID. Must be allocated to each vport with RSS enabled.
+ * Total number of RSS engines is ETH_RSS_ENGINE_NUM_ , according to chip type.
+ */
+ u8 rss_id;
+ u8 rss_mode /* The RSS mode for this function */;
+ u8 update_rss_key /* if set update the rss key */;
+/* if set update the indirection table values */
+ u8 update_rss_ind_table;
+/* if set update the capabilities and indirection table size. */
+ u8 update_rss_capabilities;
+ u8 tbl_size /* rss mask (Tbl size) */;
+ __le32 reserved2[2];
+/* RSS indirection table */
+ __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+/* RSS key supplied to us by OS */
+ __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
+ __le32 reserved3[2];
+};
+
+
+/*
+ * eth vport RSS mode
+ */
+enum eth_vport_rss_mode {
+ ETH_VPORT_RSS_MODE_DISABLED /* RSS Disabled */,
+ ETH_VPORT_RSS_MODE_REGULAR /* Regular (ndis-like) RSS */,
+ MAX_ETH_VPORT_RSS_MODE
+};
+
+
+/*
+ * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ */
+struct eth_vport_rx_mode {
+ __le16 state;
+/* drop all unicast packets */
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
+/* accept all unicast packets (subject to vlan) */
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+/* accept all unmatched unicast packets */
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+/* drop all multicast packets */
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
+/* accept all multicast packets (subject to vlan) */
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
+/* accept all broadcast packets (subject to vlan) */
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+};
+
+
+/*
+ * Command for setting tpa parameters
+ */
+struct eth_vport_tpa_param {
+ u8 tpa_ipv4_en_flg /* Enable TPA for IPv4 packets */;
+ u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */;
+ u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */;
+ u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
+/* If set, start each TPA segment on new BD (GRO mode). One BD per segment
+ * allowed.
+ */
+ u8 tpa_pkt_split_flg;
+/* If set, put header of first TPA segment on first BD and data on second BD. */
+ u8 tpa_hdr_data_split_flg;
+/* If set, GRO data consistent will checked for TPA continue */
+ u8 tpa_gro_consistent_flg;
+/* maximum number of opened aggregations per v-port */
+ u8 tpa_max_aggs_num;
+ __le16 tpa_max_size /* maximal size for the aggregated TPA packets */;
+/* minimum TCP payload size for a packet to start aggregation */
+ __le16 tpa_min_size_to_start;
+/* minimum TCP payload size for a packet to continue aggregation */
+ __le16 tpa_min_size_to_cont;
+/* maximal number of buffers that can be used for one aggregation */
+ u8 max_buff_num;
+ u8 reserved;
+};
+
+
+/*
+ * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ */
+struct eth_vport_tx_mode {
+ __le16 state;
+/* drop all unicast packets */
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
+/* accept all unicast packets (subject to vlan) */
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+/* drop all multicast packets */
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
+/* accept all multicast packets (subject to vlan) */
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+/* accept all broadcast packets (subject to vlan) */
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
+};
+
+
+/*
+ * GFT filter update action type.
+ */
+enum gft_filter_update_action {
+ GFT_ADD_FILTER,
+ GFT_DELETE_FILTER,
+ MAX_GFT_FILTER_UPDATE_ACTION
+};
+
+
+
+
+/*
+ * Ramrod data for rx add openflow filter
+ */
+struct rx_add_openflow_filter_data {
+ __le16 action_icid /* CID of Action to run for this filter */;
+ u8 priority /* Searcher String - Packet priority */;
+ u8 reserved0;
+ __le32 tenant_id /* Searcher String - Tenant ID */;
+/* Searcher String - Destination Mac Bytes 0 to 1 */
+ __le16 dst_mac_hi;
+/* Searcher String - Destination Mac Bytes 2 to 3 */
+ __le16 dst_mac_mid;
+/* Searcher String - Destination Mac Bytes 4 to 5 */
+ __le16 dst_mac_lo;
+ __le16 src_mac_hi /* Searcher String - Source Mac 0 to 1 */;
+ __le16 src_mac_mid /* Searcher String - Source Mac 2 to 3 */;
+ __le16 src_mac_lo /* Searcher String - Source Mac 4 to 5 */;
+ __le16 vlan_id /* Searcher String - Vlan ID */;
+ __le16 l2_eth_type /* Searcher String - Last L2 Ethertype */;
+ u8 ipv4_dscp /* Searcher String - IPv4 6 MSBs of the TOS Field */;
+ u8 ipv4_frag_type /* Searcher String - IPv4 Fragmentation Type */;
+ u8 ipv4_over_ip /* Searcher String - IPv4 Over IP Type */;
+ u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
+ __le32 ipv4_dst_addr /* Searcher String - IPv4 Destination Address */;
+ __le32 ipv4_src_addr /* Searcher String - IPv4 Source Address */;
+ __le16 l4_dst_port /* Searcher String - TCP/UDP Destination Port */;
+ __le16 l4_src_port /* Searcher String - TCP/UDP Source Port */;
+};
+
+
+/*
+ * Ramrod data for rx create gft action
+ */
+struct rx_create_gft_action_data {
+ u8 vport_id /* Vport Id of GFT Action */;
+ u8 reserved[7];
+};
+
+
+/*
+ * Ramrod data for rx create openflow action
+ */
+struct rx_create_openflow_action_data {
+ u8 vport_id /* ID of RX queue */;
+ u8 reserved[7];
+};
+
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct rx_queue_start_ramrod_data {
+ __le16 rx_queue_id /* ID of RX queue */;
+ __le16 num_of_pbl_pages /* Number of pages in CQE PBL */;
+ __le16 bd_max_bytes /* maximal bytes that can be places on the bd */;
+ __le16 sb_id /* Status block ID */;
+ u8 sb_index /* index of the protocol index */;
+ u8 vport_id /* ID of virtual port */;
+ u8 default_rss_queue_flg /* set queue as default rss queue if set */;
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 stats_counter_id /* Statistics counter ID */;
+ u8 pin_context /* Pin context in CCFC to improve performance */;
+ u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD/SGE fetch */;
+/* PXP command TPH Valid - for packet placement */
+ u8 pxp_tph_valid_pkt;
+/* PXP command Steering tag hint. Use enum pxp_tph_st_hint */
+ u8 pxp_st_hint;
+ __le16 pxp_st_index /* PXP command Steering tag index */;
+/* Indicates that current queue belongs to poll-mode driver */
+ u8 pmd_mode;
+/* Indicates that the current queue is using the TX notification queue
+ * mechanism - should be set only for PMD queue
+ */
+ u8 notify_en;
+/* Initial value for the toggle valid bit - used in PMD mode */
+ u8 toggle_val;
+/* Index of RX producers in VF zone. Used for VF only. */
+ u8 vf_rx_prod_index;
+/* Backward compatibility mode. If set, unprotected mStorm queue zone will used
+ * for VF RX producers instead of VF zone.
+ */
+ u8 vf_rx_prod_use_zone_a;
+ u8 reserved[5];
+ __le16 reserved1 /* FW reserved. */;
+ struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
+ struct regpair bd_base /* bd address of the first bd page */;
+ struct regpair reserved2 /* FW reserved. */;
+};
+
+
+/*
+ * Ramrod data for rx queue stop ramrod
+ */
+struct rx_queue_stop_ramrod_data {
+ __le16 rx_queue_id /* ID of RX queue */;
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 vport_id /* ID of virtual port */;
+ u8 reserved[3];
+};
+
+
+/*
+ * Ramrod data for rx queue update ramrod
+ */
+struct rx_queue_update_ramrod_data {
+ __le16 rx_queue_id /* ID of RX queue */;
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 vport_id /* ID of virtual port */;
+/* If set, update default rss queue to this RX queue. */
+ u8 set_default_rss_queue;
+ u8 reserved[3];
+ u8 reserved1 /* FW reserved. */;
+ u8 reserved2 /* FW reserved. */;
+ u8 reserved3 /* FW reserved. */;
+ __le16 reserved4 /* FW reserved. */;
+ __le16 reserved5 /* FW reserved. */;
+ struct regpair reserved6 /* FW reserved. */;
+};
+
+
+/*
+ * Ramrod data for rx Add UDP Filter
+ */
+struct rx_udp_filter_data {
+ __le16 action_icid /* CID of Action to run for this filter */;
+ __le16 vlan_id /* Searcher String - Vlan ID */;
+ u8 ip_type /* Searcher String - IP Type */;
+ u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
+ __le16 reserved1;
+/* Searcher String - IP Destination Address, for IPv4 use ip_dst_addr[0] only */
+ __le32 ip_dst_addr[4];
+/* Searcher String - IP Source Address, for IPv4 use ip_dst_addr[0] only */
+ __le32 ip_src_addr[4];
+ __le16 udp_dst_port /* Searcher String - UDP Destination Port */;
+ __le16 udp_src_port /* Searcher String - UDP Source Port */;
+ __le32 tenant_id /* Searcher String - Tenant ID */;
+};
+
+
+/*
+ * add or delete GFT filter - filter is packet header of type of packet wished
+ * to pass certain FW flow
+ */
+struct rx_update_gft_filter_data {
+/* Pointer to Packet Header That Defines GFT Filter */
+ struct regpair pkt_hdr_addr;
+ __le16 pkt_hdr_length /* Packet Header Length */;
+/* Action icid. Valid if action_icid_valid flag set. */
+ __le16 action_icid;
+ __le16 rx_qid /* RX queue ID. Valid if rx_qid_valid set. */;
+ __le16 flow_id /* RX flow ID. Valid if flow_id_valid set. */;
+/* RX vport Id. For drop flow, set to ETH_GFT_TRASHCAN_VPORT. */
+ __le16 vport_id;
+/* If set, action_icid will used for GFT filter update. */
+ u8 action_icid_valid;
+/* If set, rx_qid will used for traffic steering, in additional to vport_id.
+ * flow_id_valid must be cleared. If cleared, queue ID will selected by RSS.
+ */
+ u8 rx_qid_valid;
+/* If set, flow_id will reported by CQE, rx_qid_valid must be cleared. If
+ * cleared, flow_id 0 will reported by CQE.
+ */
+ u8 flow_id_valid;
+ u8 filter_action /* Use to set type of action on filter */;
+/* 0 - dont assert in case of error. Just return an error code. 1 - assert in
+ * case of error.
+ */
+ u8 assert_on_error;
+/* If set, inner VLAN will be removed regardless to VPORT configuration.
+ * Supported by E4 only.
+ */
+ u8 inner_vlan_removal_en;
+};
+
+
+
+/*
+ * Ramrod data for tx queue start ramrod
+ */
+struct tx_queue_start_ramrod_data {
+ __le16 sb_id /* Status block ID */;
+ u8 sb_index /* Status block protocol index */;
+ u8 vport_id /* VPort ID */;
+ u8 reserved0 /* FW reserved. (qcn_rl_en) */;
+ u8 stats_counter_id /* Statistics counter ID to use */;
+ __le16 qm_pq_id /* QM PQ ID */;
+ u8 flags;
+/* 0: Enable QM opportunistic flow. 1: Disable QM opportunistic flow */
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+/* If set, Test Mode - packets will be duplicated by Xstorm handler */
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
+/* If set, Test Mode - packets destination will be determined by dest_port_mode
+ * field from Tx BD
+ */
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
+/* Indicates that current queue belongs to poll-mode driver */
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
+/* Indicates that the current queue is using the TX notification queue
+ * mechanism - should be set only for PMD queue
+ */
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
+/* Pin context in CCFC to improve performance */
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
+ u8 pxp_st_hint /* PXP command Steering tag hint */;
+ u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD fetch */;
+ u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet fetch */;
+ __le16 pxp_st_index /* PXP command Steering tag index */;
+/* TX completion min agg size - for PMD queues */
+ __le16 comp_agg_size;
+ __le16 queue_zone_id /* queue zone ID to use */;
+ __le16 reserved2 /* FW reserved. (test_dup_count) */;
+ __le16 pbl_size /* Number of BD pages pointed by PBL */;
+/* unique Queue ID - currently used only by PMD flow */
+ __le16 tx_queue_id;
+/* Unique Same-As-Last Resource ID - improves performance for same-as-last
+ * packets per connection (range 0..ETH_TX_NUM_SAME_AS_LAST_ENTRIES-1 IDs
+ * available)
+ */
+ __le16 same_as_last_id;
+ __le16 reserved[3];
+ struct regpair pbl_base_addr /* address of the pbl page */;
+/* BD consumer address in host - for PMD queues */
+ struct regpair bd_cons_address;
+};
+
+
+/*
+ * Ramrod data for tx queue stop ramrod
+ */
+struct tx_queue_stop_ramrod_data {
+ __le16 reserved[4];
+};
+
+
+/*
+ * Ramrod data for tx queue update ramrod
+ */
+struct tx_queue_update_ramrod_data {
+ __le16 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
+ __le16 qm_pq_id /* Updated QM PQ ID */;
+ __le32 reserved0;
+ struct regpair reserved1[5];
+};
+
+
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_filter_update_ramrod_data {
+/* Header for Filter Commands (RX/TX, Add/Remove/Replace, etc) */
+ struct eth_filter_cmd_header filter_cmd_hdr;
+/* Filter Commands */
+ struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
+};
+
+
+/*
+ * Ramrod data for vport start ramrod
+ */
+struct vport_start_ramrod_data {
+ u8 vport_id;
+ u8 sw_fid;
+ __le16 mtu;
+ u8 drop_ttl0_en /* if set, drop packet with ttl=0 */;
+ u8 inner_vlan_removal_en;
+ struct eth_vport_rx_mode rx_mode /* Rx filter data */;
+ struct eth_vport_tx_mode tx_mode /* Tx filter data */;
+/* TPA configuration parameters */
+ struct eth_vport_tpa_param tpa_param;
+ __le16 default_vlan /* Default Vlan value to be forced by FW */;
+ u8 tx_switching_en /* Tx switching is enabled for current Vport */;
+/* Anti-spoofing verification is set for current Vport */
+ u8 anti_spoofing_en;
+/* If set, the default Vlan value is forced by the FW */
+ u8 default_vlan_en;
+/* If set, the vport handles PTP Timesync Packets */
+ u8 handle_ptp_pkts;
+/* If enable then innerVlan will be striped and not written to cqe */
+ u8 silent_vlan_removal_en;
+/* If set untagged filter (vlan0) is added to current Vport, otherwise port is
+ * marked as any-vlan
+ */
+ u8 untagged;
+/* Desired behavior per TX error type */
+ struct eth_tx_err_vals tx_err_behav;
+/* If set, ETH header padding will not inserted. placement_offset will be zero.
+ */
+ u8 zero_placement_offset;
+/* If set, control frames will be filtered according to MAC check. */
+ u8 ctl_frame_mac_check_en;
+/* If set, control frames will be filtered according to ethtype check. */
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[1];
+};
+
+
+/*
+ * Ramrod data for vport stop ramrod
+ */
+struct vport_stop_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_update_ramrod_data_cmn {
+ u8 vport_id;
+ u8 update_rx_active_flg /* set if rx active flag should be handled */;
+ u8 rx_active_flg /* rx active flag value */;
+ u8 update_tx_active_flg /* set if tx active flag should be handled */;
+ u8 tx_active_flg /* tx active flag value */;
+ u8 update_rx_mode_flg /* set if rx state data should be handled */;
+ u8 update_tx_mode_flg /* set if tx state data should be handled */;
+/* set if approx. mcast data should be handled */
+ u8 update_approx_mcast_flg;
+ u8 update_rss_flg /* set if rss data should be handled */;
+/* set if inner_vlan_removal_en should be handled */
+ u8 update_inner_vlan_removal_en_flg;
+ u8 inner_vlan_removal_en;
+/* set if tpa parameters should be handled, TPA must be disable before */
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg /* set if tpa enable changes */;
+/* set if tx switching en flag should be handled */
+ u8 update_tx_switching_en_flg;
+ u8 tx_switching_en /* tx switching en value */;
+/* set if anti spoofing flag should be handled */
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en /* Anti-spoofing verification en value */;
+/* set if handle_ptp_pkts should be handled. */
+ u8 update_handle_ptp_pkts;
+/* If set, the vport handles PTP Timesync Packets */
+ u8 handle_ptp_pkts;
+/* If set, the default Vlan enable flag is updated */
+ u8 update_default_vlan_en_flg;
+/* If set, the default Vlan value is forced by the FW */
+ u8 default_vlan_en;
+/* If set, the default Vlan value is updated */
+ u8 update_default_vlan_flg;
+ __le16 default_vlan /* Default Vlan value to be forced by FW */;
+/* set if accept_any_vlan should be handled */
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan /* accept_any_vlan updated value */;
+/* Set to remove vlan silently, update_inner_vlan_removal_en_flg must be enabled
+ * as well. If Rx is in noSgl mode send rx_queue_update_ramrod_data
+ */
+ u8 silent_vlan_removal_en;
+/* If set, MTU will be updated. Vport must be not active. */
+ u8 update_mtu_flg;
+ __le16 mtu /* New MTU value. Used if update_mtu_flg are set */;
+/* If set, ctl_frame_mac_check_en and ctl_frame_ethtype_check_en will be
+ * updated
+ */
+ u8 update_ctl_frame_checks_en_flg;
+/* If set, control frames will be filtered according to MAC check. */
+ u8 ctl_frame_mac_check_en;
+/* If set, control frames will be filtered according to ethtype check. */
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[15];
+};
+
+struct vport_update_ramrod_mcast {
+ __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS] /* multicast bins */;
+};
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_update_ramrod_data {
+/* Common data for all vport update ramrods */
+ struct vport_update_ramrod_data_cmn common;
+ struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */;
+ struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */;
+ __le32 reserved[3];
+/* TPA configuration parameters */
+ struct eth_vport_tpa_param tpa_param;
+ struct vport_update_ramrod_mcast approx_mcast;
+ struct eth_vport_rss_config rss_config /* rss config data */;
+};
+
+
+
+
+
+
+struct E4XstormEthConnAgCtxDqExtLdPart {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+/* bit6 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+/* bit7 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+ u8 flags1;
+/* bit8 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+/* bit9 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+/* bit12 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+/* bit13 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+/* bit14 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+/* timer1cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+/* timer2cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+/* cf4 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+/* cf5 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+/* cf6 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+/* cf7 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+ u8 flags4;
+/* cf8 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+/* cf9 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+/* cf10 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+/* cf11 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+/* cf12 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+/* cf13 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+/* cf14 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+/* cf15 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+/* cf16 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+/* cf18 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+/* cf19 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+/* cf20 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+/* cf21 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+/* cf22 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+/* cf1en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+/* cf3en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+/* cf4en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+/* cf5en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+/* cf6en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+/* cf7en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+/* cf8en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+/* cf9en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+/* cf11en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+/* cf12en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+/* cf13en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+/* cf14en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+/* cf15en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+/* cf16en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+/* cf23 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 e5_reserved1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 updated_qm_pq_id /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+};
+
+
+struct e4_mstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+struct e4_xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+/* bit12 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+/* bit13 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+/* bit14 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+/* timer1cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+/* timer2cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+/* cf1en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+/* cf3en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+/* cf4en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+/* cf5en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+/* cf6en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+/* cf7en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+/* cf8en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+/* cf9en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+/* cf11en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+/* cf12en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+/* cf13en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+/* cf14en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+/* cf15en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+/* cf16en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 e5_reserved1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 updated_qm_pq_id /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+};
+
+
+
+/*
+ * GFT CAM line struct
+ */
+struct gft_cam_line {
+ __le32 camline;
+/* Indication if the line is valid. */
+#define GFT_CAM_LINE_VALID_MASK 0x1
+#define GFT_CAM_LINE_VALID_SHIFT 0
+/* Data bits, the word that compared with the profile key */
+#define GFT_CAM_LINE_DATA_MASK 0x3FFF
+#define GFT_CAM_LINE_DATA_SHIFT 1
+/* Mask bits, indicate the bits in the data that are Dont-Care */
+#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
+#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
+#define GFT_CAM_LINE_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_RESERVED1_SHIFT 29
+};
+
+
+/*
+ * GFT CAM line struct (for driversim use)
+ */
+struct gft_cam_line_mapped {
+ __le32 camline;
+/* Indication if the line is valid. */
+#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
+/* use enum gft_profile_upper_protocol_type
+ * (use enum gft_profile_upper_protocol_type)
+ */
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
+/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
+/* use enum gft_profile_upper_protocol_type
+ * (use enum gft_profile_upper_protocol_type)
+ */
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
+};
+
+
+union gft_cam_line_union {
+ struct gft_cam_line cam_line;
+ struct gft_cam_line_mapped cam_line_mapped;
+};
+
+
+/*
+ * Used in gft_profile_key: Indication for ip version
+ */
+enum gft_profile_ip_version {
+ GFT_PROFILE_IPV4 = 0,
+ GFT_PROFILE_IPV6 = 1,
+ MAX_GFT_PROFILE_IP_VERSION
+};
+
+
+/*
+ * Profile key stucr fot GFT logic in Prs
+ */
+struct gft_profile_key {
+ __le16 profile_key;
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
+/* use enum gft_profile_upper_protocol_type
+ * (use enum gft_profile_upper_protocol_type)
+ */
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
+#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
+#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
+};
+
+
+/*
+ * Used in gft_profile_key: Indication for tunnel type
+ */
+enum gft_profile_tunnel_type {
+ GFT_PROFILE_NO_TUNNEL = 0,
+ GFT_PROFILE_VXLAN_TUNNEL = 1,
+ GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2,
+ GFT_PROFILE_GRE_IP_TUNNEL = 3,
+ GFT_PROFILE_GENEVE_MAC_TUNNEL = 4,
+ GFT_PROFILE_GENEVE_IP_TUNNEL = 5,
+ MAX_GFT_PROFILE_TUNNEL_TYPE
+};
+
+
+/*
+ * Used in gft_profile_key: Indication for protocol type
+ */
+enum gft_profile_upper_protocol_type {
+ GFT_PROFILE_ROCE_PROTOCOL = 0,
+ GFT_PROFILE_RROCE_PROTOCOL = 1,
+ GFT_PROFILE_FCOE_PROTOCOL = 2,
+ GFT_PROFILE_ICMP_PROTOCOL = 3,
+ GFT_PROFILE_ARP_PROTOCOL = 4,
+ GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+ GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+ GFT_PROFILE_TCP_PROTOCOL = 7,
+ GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+ GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+ GFT_PROFILE_UDP_PROTOCOL = 10,
+ GFT_PROFILE_USER_IP_1_INNER = 11,
+ GFT_PROFILE_USER_IP_2_OUTER = 12,
+ GFT_PROFILE_USER_ETH_1_INNER = 13,
+ GFT_PROFILE_USER_ETH_2_OUTER = 14,
+ GFT_PROFILE_RAW = 15,
+ MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+
+/*
+ * GFT RAM line struct
+ */
+struct gft_ram_line {
+ __le32 lo;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
+#define GFT_RAM_LINE_TTL_MASK 0x1
+#define GFT_RAM_LINE_TTL_SHIFT 18
+#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
+#define GFT_RAM_LINE_RESERVED0_MASK 0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT 20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
+#define GFT_RAM_LINE_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT 30
+#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
+ __le32 hi;
+#define GFT_RAM_LINE_DSCP_MASK 0x1
+#define GFT_RAM_LINE_DSCP_SHIFT 0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
+#define GFT_RAM_LINE_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT 2
+#define GFT_RAM_LINE_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT 3
+#define GFT_RAM_LINE_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT 4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
+#define GFT_RAM_LINE_VLAN_MASK 0x1
+#define GFT_RAM_LINE_VLAN_SHIFT 6
+#define GFT_RAM_LINE_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT 7
+#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
+#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
+#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT 10
+};
+
+
+/*
+ * Used in the first 2 bits for gft_ram_line: Indication for vlan mask
+ */
+enum gft_vlan_select {
+ INNER_PROVIDER_VLAN = 0,
+ INNER_VLAN = 1,
+ OUTER_PROVIDER_VLAN = 2,
+ OUTER_VLAN = 3,
+ MAX_GFT_VLAN_SELECT
+};
+
+
+#endif /* __ECORE_HSI_ETH__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_func.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_func.h
new file mode 100644
index 00000000..d77edaa1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_func.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_HSI_INIT_FUNC__
+#define __ECORE_HSI_INIT_FUNC__
+/********************************/
+/* HSI Init Functions constants */
+/********************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+/* Size of CRC8 lookup table */
+#ifndef LINUX_REMOVE
+#define CRC8_TABLE_SIZE 256
+#endif
+
+/*
+ * BRB RAM init requirements
+ */
+struct init_brb_ram_req {
+ u32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
+ u32 headroom_per_tc /* headroom size per TC, in bytes */;
+ u32 min_pkt_size /* min packet size, in bytes */;
+ u32 max_ports_per_engine /* min packet size, in bytes */;
+ u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */;
+};
+
+
+/*
+ * ETS per-TC init requirements
+ */
+struct init_ets_tc_req {
+/* if set, this TC participates in the arbitration with a strict priority
+ * (the priority is equal to the TC ID)
+ */
+ u8 use_sp;
+/* if set, this TC participates in the arbitration with a WFQ weight
+ * (indicated by the weight field)
+ */
+ u8 use_wfq;
+ u16 weight /* An arbitration weight. Valid only if use_wfq is set. */;
+};
+
+/*
+ * ETS init requirements
+ */
+struct init_ets_req {
+ u32 mtu /* Max packet size (in bytes) */;
+/* ETS initialization requirements per TC. */
+ struct init_ets_tc_req tc_req[NUM_OF_TCS];
+};
+
+
+
+/*
+ * NIG LB RL init requirements
+ */
+struct init_nig_lb_rl_req {
+/* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */
+ u16 lb_mac_rate;
+/* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */
+ u16 lb_rate;
+ u32 mtu /* Max packet size (in bytes) */;
+/* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */
+ u16 tc_rate[NUM_OF_PHYS_TCS];
+};
+
+
+/*
+ * NIG TC mapping for each priority
+ */
+struct init_nig_pri_tc_map_entry {
+ u8 tc_id /* the mapped TC ID */;
+ u8 valid /* indicates if the mapping entry is valid */;
+};
+
+
+/*
+ * NIG priority to TC map init requirements
+ */
+struct init_nig_pri_tc_map_req {
+ struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
+};
+
+
+/*
+ * QM per-port init parameters
+ */
+struct init_qm_port_params {
+ u8 active /* Indicates if this port is active */;
+/* Vector of valid bits for active TCs used by this port */
+ u8 active_phys_tcs;
+/* number of PBF command lines that can be used by this port */
+ u16 num_pbf_cmd_lines;
+/* number of BTB blocks that can be used by this port */
+ u16 num_btb_blocks;
+ u16 reserved;
+};
+
+
+/*
+ * QM per-PQ init parameters
+ */
+struct init_qm_pq_params {
+ u8 vport_id /* VPORT ID */;
+ u8 tc_id /* TC ID */;
+ u8 wrr_group /* WRR group */;
+/* Indicates if a rate limiter should be allocated for the PQ (0/1) */
+ u8 rl_valid;
+ u8 port_id /* Port ID */;
+ u8 reserved0;
+ u16 reserved1;
+};
+
+
+/*
+ * QM per-vport init parameters
+ */
+struct init_qm_vport_params {
+/* rate limit in Mb/sec units. a value of 0 means dont configure. ignored if
+ * VPORT RL is globally disabled.
+ */
+ u32 vport_rl;
+/* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is
+ * globally disabled.
+ */
+ u16 vport_wfq;
+/* the first Tx PQ ID associated with this VPORT for each TC. */
+ u16 first_tx_pq_id[NUM_OF_TCS];
+};
+
+#endif /* __ECORE_HSI_INIT_FUNC__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_tool.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_tool.h
new file mode 100644
index 00000000..0e157f9b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_tool.h
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_HSI_INIT_TOOL__
+#define __ECORE_HSI_INIT_TOOL__
+/**************************************/
+/* Init Tool HSI constants and macros */
+/**************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS 23
+#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID 0xffff
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE 8192
+
+enum chip_ids {
+ CHIP_BB,
+ CHIP_K2,
+ CHIP_E5,
+ MAX_CHIP_IDS
+};
+
+
+/*
+ * Binary buffer header
+ */
+struct bin_buffer_hdr {
+/* buffer offset in bytes from the beginning of the binary file */
+ u32 offset;
+ u32 length /* buffer length in bytes */;
+};
+
+
+/*
+ * binary init buffer types
+ */
+enum bin_init_buffer_type {
+ BIN_BUF_INIT_FW_VER_INFO /* fw_ver_info struct */,
+ BIN_BUF_INIT_CMD /* init commands */,
+ BIN_BUF_INIT_VAL /* init data */,
+ BIN_BUF_INIT_MODE_TREE /* init modes tree */,
+ BIN_BUF_INIT_IRO /* internal RAM offsets */,
+ MAX_BIN_INIT_BUFFER_TYPE
+};
+
+
+/*
+ * init array header: raw
+ */
+struct init_array_raw_hdr {
+ u32 data;
+/* Init array type, from init_array_types enum */
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
+/* init array params */
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+/*
+ * init array header: standard
+ */
+struct init_array_standard_hdr {
+ u32 data;
+/* Init array type, from init_array_types enum */
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+/* Init array size (in dwords) */
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+/*
+ * init array header: zipped
+ */
+struct init_array_zipped_hdr {
+ u32 data;
+/* Init array type, from init_array_types enum */
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
+/* Init array zipped size (in bytes) */
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+/*
+ * init array header: pattern
+ */
+struct init_array_pattern_hdr {
+ u32 data;
+/* Init array type, from init_array_types enum */
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
+/* pattern size in dword */
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+/* pattern repetitions */
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
+};
+
+/*
+ * init array header union
+ */
+union init_array_hdr {
+ struct init_array_raw_hdr raw /* raw init array header */;
+/* standard init array header */
+ struct init_array_standard_hdr standard;
+ struct init_array_zipped_hdr zipped /* zipped init array header */;
+ struct init_array_pattern_hdr pattern /* pattern init array header */;
+};
+
+
+enum init_modes {
+ MODE_BB_A0_DEPRECATED,
+ MODE_BB,
+ MODE_K2,
+ MODE_ASIC,
+ MODE_EMUL_REDUCED,
+ MODE_EMUL_FULL,
+ MODE_FPGA,
+ MODE_CHIPSIM,
+ MODE_SF,
+ MODE_MF_SD,
+ MODE_MF_SI,
+ MODE_PORTS_PER_ENG_1,
+ MODE_PORTS_PER_ENG_2,
+ MODE_PORTS_PER_ENG_4,
+ MODE_100G,
+ MODE_E5,
+ MAX_INIT_MODES
+};
+
+
+enum init_phases {
+ PHASE_ENGINE,
+ PHASE_PORT,
+ PHASE_PF,
+ PHASE_VF,
+ PHASE_QM_PF,
+ MAX_INIT_PHASES
+};
+
+
+enum init_split_types {
+ SPLIT_TYPE_NONE,
+ SPLIT_TYPE_PORT,
+ SPLIT_TYPE_PF,
+ SPLIT_TYPE_PORT_PF,
+ SPLIT_TYPE_VF,
+ MAX_INIT_SPLIT_TYPES
+};
+
+
+/*
+ * init array types
+ */
+enum init_array_types {
+ INIT_ARR_STANDARD /* standard init array */,
+ INIT_ARR_ZIPPED /* zipped init array */,
+ INIT_ARR_PATTERN /* a repeated pattern */,
+ MAX_INIT_ARRAY_TYPES
+};
+
+
+
+/*
+ * init operation: callback
+ */
+struct init_callback_op {
+ u32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_CALLBACK_OP_OP_MASK 0xF
+#define INIT_CALLBACK_OP_OP_SHIFT 0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+ u16 callback_id /* Callback ID */;
+ u16 block_id /* Blocks ID */;
+};
+
+
+/*
+ * init operation: delay
+ */
+struct init_delay_op {
+ u32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_DELAY_OP_OP_MASK 0xF
+#define INIT_DELAY_OP_OP_SHIFT 0
+#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+ __le32 delay /* delay in us */;
+};
+
+
+/*
+ * init operation: if_mode
+ */
+struct init_if_mode_op {
+ u32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_IF_MODE_OP_OP_MASK 0xF
+#define INIT_IF_MODE_OP_OP_SHIFT 0
+#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
+/* Commands to skip if the modes dont match */
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+ u16 reserved2;
+ u16 modes_buf_offset /* offset (in bytes) in modes expression buffer */;
+};
+
+
+/*
+ * init operation: if_phase
+ */
+struct init_if_phase_op {
+ u32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_IF_PHASE_OP_OP_MASK 0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT 0
+/* Indicates if DMAE is enabled in this phase */
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+/* Commands to skip if the phases dont match */
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
+ u32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
+#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
+};
+
+
+/*
+ * init mode operators
+ */
+enum init_mode_ops {
+ INIT_MODE_OP_NOT /* init mode not operator */,
+ INIT_MODE_OP_OR /* init mode or operator */,
+ INIT_MODE_OP_AND /* init mode and operator */,
+ MAX_INIT_MODE_OPS
+};
+
+
+/*
+ * init operation: raw
+ */
+struct init_raw_op {
+ u32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_RAW_OP_OP_MASK 0xF
+#define INIT_RAW_OP_OP_SHIFT 0
+#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+ u32 param2 /* Init param 2 */;
+};
+
+/*
+ * init array params
+ */
+struct init_op_array_params {
+ u16 size /* array size in dwords */;
+ u16 offset /* array start offset in dwords */;
+};
+
+/*
+ * Write init operation arguments
+ */
+union init_write_args {
+/* value to write, used when init source is INIT_SRC_INLINE */
+ u32 inline_val;
+/* number of zeros to write, used when init source is INIT_SRC_ZEROS */
+ u32 zeros_count;
+/* array offset to write, used when init source is INIT_SRC_ARRAY */
+ u32 array_offset;
+/* runtime array params to write, used when init source is INIT_SRC_RUNTIME */
+ struct init_op_array_params runtime;
+};
+
+/*
+ * init operation: write
+ */
+struct init_write_op {
+ u32 data;
+/* init operation, from init_op_types enum */
+#define INIT_WRITE_OP_OP_MASK 0xF
+#define INIT_WRITE_OP_OP_SHIFT 0
+/* init source type, taken from init_source_types enum */
+#define INIT_WRITE_OP_SOURCE_MASK 0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT 4
+#define INIT_WRITE_OP_RESERVED_MASK 0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+/* indicates if the register is wide-bus */
+#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+/* internal (absolute) GRC address, in dwords */
+#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT 9
+ union init_write_args args /* Write init operation arguments */;
+};
+
+/*
+ * init operation: read
+ */
+struct init_read_op {
+ u32 op_data;
+/* init operation, from init_op_types enum */
+#define INIT_READ_OP_OP_MASK 0xF
+#define INIT_READ_OP_OP_SHIFT 0
+/* polling type, from init_poll_types enum */
+#define INIT_READ_OP_POLL_TYPE_MASK 0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK 0x1
+#define INIT_READ_OP_RESERVED_SHIFT 8
+/* internal (absolute) GRC address, in dwords */
+#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT 9
+/* expected polling value, used only when polling is done */
+ u32 expected_val;
+};
+
+/*
+ * Init operations union
+ */
+union init_op {
+ struct init_raw_op raw /* raw init operation */;
+ struct init_write_op write /* write init operation */;
+ struct init_read_op read /* read init operation */;
+ struct init_if_mode_op if_mode /* if_mode init operation */;
+ struct init_if_phase_op if_phase /* if_phase init operation */;
+ struct init_callback_op callback /* callback init operation */;
+ struct init_delay_op delay /* delay init operation */;
+};
+
+
+
+/*
+ * Init command operation types
+ */
+enum init_op_types {
+ INIT_OP_READ /* GRC read init command */,
+ INIT_OP_WRITE /* GRC write init command */,
+/* Skip init commands if the init modes expression doesn't match */
+ INIT_OP_IF_MODE,
+/* Skip init commands if the init phase doesn't match */
+ INIT_OP_IF_PHASE,
+ INIT_OP_DELAY /* delay init command */,
+ INIT_OP_CALLBACK /* callback init command */,
+ MAX_INIT_OP_TYPES
+};
+
+
+/*
+ * init polling types
+ */
+enum init_poll_types {
+ INIT_POLL_NONE /* No polling */,
+ INIT_POLL_EQ /* init value is included in the init command */,
+ INIT_POLL_OR /* init value is all zeros */,
+ INIT_POLL_AND /* init value is an array of values */,
+ MAX_INIT_POLL_TYPES
+};
+
+
+
+
+/*
+ * init source types
+ */
+enum init_source_types {
+ INIT_SRC_INLINE /* init value is included in the init command */,
+ INIT_SRC_ZEROS /* init value is all zeros */,
+ INIT_SRC_ARRAY /* init value is an array of values */,
+ INIT_SRC_RUNTIME /* init value is provided during runtime */,
+ MAX_INIT_SOURCE_TYPES
+};
+
+
+
+
+/*
+ * Internal RAM Offsets macro data
+ */
+struct iro {
+ u32 base /* RAM field offset */;
+ u16 m1 /* multiplier 1 */;
+ u16 m2 /* multiplier 2 */;
+ u16 m3 /* multiplier 3 */;
+ u16 size /* RAM field size */;
+};
+
+#endif /* __ECORE_HSI_INIT_TOOL__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hw.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_hw.c
new file mode 100644
index 00000000..51bba27e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hw.c
@@ -0,0 +1,1032 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bcm_osal.h"
+#include "ecore_hsi_common.h"
+#include "ecore_status.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "reg_addr.h"
+#include "ecore_utils.h"
+#include "ecore_iov_api.h"
+
+#ifndef ASIC_ONLY
+#define ECORE_EMUL_FACTOR 2000
+#define ECORE_FPGA_FACTOR 200
+#endif
+
+#define ECORE_BAR_ACQUIRE_TIMEOUT 1000
+
+/* Invalid values */
+#define ECORE_BAR_INVALID_OFFSET (OSAL_CPU_TO_LE32(-1))
+
+struct ecore_ptt {
+ osal_list_entry_t list_entry;
+ unsigned int idx;
+ struct pxp_ptt_entry pxp;
+ u8 hwfn_id;
+};
+
+struct ecore_ptt_pool {
+ osal_list_t free_list;
+ osal_spinlock_t lock; /* ptt synchronized access */
+ struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
+ p_hwfn->p_ptt_pool = OSAL_NULL;
+}
+
+enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
+ GFP_KERNEL,
+ sizeof(*p_pool));
+ int i;
+
+ if (!p_pool)
+ return ECORE_NOMEM;
+
+ OSAL_LIST_INIT(&p_pool->free_list);
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_pool->ptts[i].idx = i;
+ p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
+ p_pool->ptts[i].pxp.pretend.control = 0;
+ p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
+
+ /* There are special PTT entries that are taken only by design.
+ * The rest are added ot the list for general usage.
+ */
+ if (i >= RESERVED_PTT_MAX)
+ OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
+ &p_pool->free_list);
+ }
+
+ p_hwfn->p_ptt_pool = p_pool;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) {
+ __ecore_ptt_pool_free(p_hwfn);
+ return ECORE_NOMEM;
+ }
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_pool->lock);
+ return ECORE_SUCCESS;
+}
+
+void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt;
+ int i;
+
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+ p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
+ }
+}
+
+void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
+{
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (p_hwfn->p_ptt_pool)
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
+#endif
+ __ecore_ptt_pool_free(p_hwfn);
+}
+
+struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt;
+ unsigned int i;
+
+ /* Take the free PTT from the list */
+ for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
+ OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
+ p_ptt = OSAL_LIST_FIRST_ENTRY(
+ &p_hwfn->p_ptt_pool->free_list,
+ struct ecore_ptt, list_entry);
+ OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
+ &p_hwfn->p_ptt_pool->free_list);
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "allocated ptt %d\n", p_ptt->idx);
+
+ return p_ptt;
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+ OSAL_MSLEEP(1);
+ }
+
+ DP_NOTICE(p_hwfn, true,
+ "PTT acquire timeout - failed to allocate PTT\n");
+ return OSAL_NULL;
+}
+
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ /* This PTT should not be set to pretend if it is being released */
+ /* TODO - add some pretend sanity checks, to make sure pretend
+ * isn't set on this ptt
+ */
+
+ OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
+ OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+}
+
+static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
+{
+ /* The HW is using DWORDS and we need to translate it to Bytes */
+ return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
+}
+
+static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
+{
+ return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+ p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
+{
+ return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+ p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 new_hw_addr)
+{
+ u32 prev_hw_addr;
+
+ prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
+
+ if (new_hw_addr == prev_hw_addr)
+ return;
+
+ /* Update PTT entery in admin window */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Updating PTT entry %d to offset 0x%x\n",
+ p_ptt->idx, new_hw_addr);
+
+ /* The HW is using DWORDS and the address is in Bytes */
+ p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2);
+
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, offset),
+ OSAL_LE32_TO_CPU(p_ptt->pxp.offset));
+}
+
+static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 hw_addr)
+{
+ u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
+ u32 offset;
+
+ offset = hw_addr - win_hw_addr;
+
+ if (p_ptt->hwfn_id != p_hwfn->my_id)
+ DP_NOTICE(p_hwfn, true,
+ "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
+ p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
+
+ /* Verify the address is within the window */
+ if (hw_addr < win_hw_addr ||
+ offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+ ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+ offset = 0;
+ }
+
+ return ecore_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx)
+{
+ if (ptt_idx >= RESERVED_PTT_MAX) {
+ DP_NOTICE(p_hwfn, true,
+ "Requested PTT %d is out of range\n", ptt_idx);
+ return OSAL_NULL;
+ }
+
+ return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ bool is_empty = true;
+ u32 bar_addr;
+
+ if (!p_hwfn->p_dev->chk_reg_fifo)
+ goto out;
+
+ /* ecore_rd() cannot be used here since it calls this function */
+ bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA);
+ is_empty = REG_RD(p_hwfn, bar_addr) == 0;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ OSAL_UDELAY(100);
+#endif
+
+out:
+ return is_empty;
+}
+
+void ecore_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
+{
+ bool prev_fifo_err;
+ u32 bar_addr;
+
+ prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
+
+ bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
+ REG_WR(p_hwfn, bar_addr, val);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ OSAL_UDELAY(100);
+#endif
+
+ OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
+ "reg_fifo err was caused by a call to ecore_wr(0x%x, 0x%x)\n",
+ hw_addr, val);
+}
+
+u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
+{
+ bool prev_fifo_err;
+ u32 bar_addr, val;
+
+ prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
+
+ bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
+ val = REG_RD(p_hwfn, bar_addr);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ OSAL_UDELAY(100);
+#endif
+
+ OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
+ "reg_fifo error was caused by a call to ecore_rd(0x%x)\n",
+ hw_addr);
+
+ return val;
+}
+
+static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *addr,
+ u32 hw_addr, osal_size_t n, bool to_device)
+{
+ u32 dw_count, *host_addr, hw_offset;
+ osal_size_t quota, done = 0;
+ u32 OSAL_IOMEM *reg_addr;
+
+ while (done < n) {
+ quota = OSAL_MIN_T(osal_size_t, n - done,
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+ if (IS_PF(p_hwfn->p_dev)) {
+ ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+ hw_offset = ecore_ptt_get_bar_addr(p_ptt);
+ } else {
+ hw_offset = hw_addr + done;
+ }
+
+ dw_count = quota / 4;
+ host_addr = (u32 *)((u8 *)addr + done);
+ reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
+
+ if (to_device)
+ while (dw_count--)
+ DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
+ else
+ while (dw_count--)
+ *host_addr++ = DIRECT_REG_RD(p_hwfn,
+ reg_addr++);
+
+ done += quota;
+ }
+}
+
+void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *dest, u32 hw_addr, osal_size_t n)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+ hw_addr, dest, hw_addr, (unsigned long)n);
+
+ ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr, void *src, osal_size_t n)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+ hw_addr, hw_addr, src, (unsigned long)n);
+
+ ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 fid)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+/* Every pretend undos prev pretends, including previous port pretend */
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+ fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+ p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
+ p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
+
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 port_id)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+ p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
+
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
+
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
+{
+ u32 concrete_fid = 0;
+
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
+
+ return concrete_fid;
+}
+
+/* Not in use @DPDK
+ * Ecore HW lock
+ * =============
+ * Although the implementation is ready, today we don't have any flow that
+ * utliizes said locks - and we want to keep it this way.
+ * If this changes, this needs to be revisted.
+ */
+
+/* Ecore DMAE
+ * =============
+ */
+static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
+ const u8 is_src_type_grc,
+ const u8 is_dst_type_grc,
+ struct ecore_dmae_params *p_params)
+{
+ u16 opcode_b = 0;
+ u32 opcode = 0;
+
+ /* Whether the source is the PCIe or the GRC.
+ * 0- The source is the PCIe
+ * 1- The source is the GRC.
+ */
+ opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
+ : DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
+ opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+ DMAE_CMD_SRC_PF_ID_SHIFT;
+
+ /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+ opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
+ : DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
+ opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+ DMAE_CMD_DST_PF_ID_SHIFT;
+
+ /* DMAE_E4_TODO need to check which value to specifiy here. */
+ /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
+
+ /* Whether to write a completion word to the completion destination:
+ * 0-Do not write a completion word
+ * 1-Write the completion word
+ */
+ opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
+ opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
+
+ if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
+ opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
+
+ /* swapping mode 3 - big endian there should be a define ifdefed in
+ * the HSI somewhere. Since it is currently
+ */
+ opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
+
+ opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
+
+ /* reset source address in next go */
+ opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
+
+ /* reset dest address in next go */
+ opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
+
+ /* SRC/DST VFID: all 1's - pf, otherwise VF id */
+ if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
+ opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
+ opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
+ } else {
+ opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
+ DMAE_CMD_SRC_VF_ID_SHIFT);
+ }
+ if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
+ opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
+ opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+ } else {
+ opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+ }
+
+ p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
+ p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
+}
+
+static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
+{
+ OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
+
+ /* All the DMAE 'go' registers form an array in internal memory */
+ return DMAE_REG_GO_C0 + (idx << 2);
+}
+
+static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
+ u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+
+ /* verify address is not OSAL_NULL */
+ if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
+ ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
+ DP_NOTICE(p_hwfn, true,
+ "source or destination address 0 idx_cmd=%d\n"
+ "opcode = [0x%08x,0x%04x] len=0x%x"
+ " src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ OSAL_LE32_TO_CPU(p_command->opcode),
+ OSAL_LE16_TO_CPU(p_command->opcode_b),
+ OSAL_LE16_TO_CPU(p_command->length_dw),
+ OSAL_LE32_TO_CPU(p_command->src_addr_hi),
+ OSAL_LE32_TO_CPU(p_command->src_addr_lo),
+ OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
+ OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
+
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
+ "len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ OSAL_LE32_TO_CPU(p_command->opcode),
+ OSAL_LE16_TO_CPU(p_command->opcode_b),
+ OSAL_LE16_TO_CPU(p_command->length_dw),
+ OSAL_LE32_TO_CPU(p_command->src_addr_hi),
+ OSAL_LE32_TO_CPU(p_command->src_addr_lo),
+ OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
+ OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
+
+ /* Copy the command to DMAE - need to do it before every call
+ * for source/dest address no reset.
+ * The number of commands have been increased to 16 (previous was 14)
+ * The first 9 DWs are the command registers, the 10 DW is the
+ * GO register, and
+ * the rest are result registers (which are read only by the client).
+ */
+ for (i = 0; i < DMAE_CMD_SIZE; i++) {
+ u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+ *(((u32 *)p_command) + i) : 0;
+
+ ecore_wr(p_hwfn, p_ptt,
+ DMAE_REG_CMD_MEM +
+ (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+ (i * sizeof(u32)), data);
+ }
+
+ ecore_wr(p_hwfn, p_ptt,
+ ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
+
+ return ecore_status;
+}
+
+enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
+{
+ dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+ struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+ u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+ u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+ *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
+ if (*p_comp == OSAL_NULL) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate `p_completion_word'\n");
+ goto err;
+ }
+
+ p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
+ sizeof(struct dmae_cmd));
+ if (*p_cmd == OSAL_NULL) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate `struct dmae_cmd'\n");
+ goto err;
+ }
+
+ p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
+ sizeof(u32) * DMAE_MAX_RW_SIZE);
+ if (*p_buff == OSAL_NULL) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate `intermediate_buffer'\n");
+ goto err;
+ }
+
+ p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+ p_hwfn->dmae_info.b_mem_ready = true;
+
+ return ECORE_SUCCESS;
+err:
+ ecore_dmae_info_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+
+void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
+{
+ dma_addr_t p_phys;
+
+ OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
+ p_hwfn->dmae_info.b_mem_ready = false;
+ OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
+
+ if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
+ p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_hwfn->dmae_info.p_completion_word,
+ p_phys, sizeof(u32));
+ p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
+ p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_hwfn->dmae_info.p_dmae_cmd,
+ p_phys, sizeof(struct dmae_cmd));
+ p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
+ p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_hwfn->dmae_info.p_intermediate_buffer,
+ p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
+ p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
+ }
+}
+
+static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
+{
+ u32 wait_cnt_limit = 10000, wait_cnt = 0;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+ u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
+ ECORE_EMUL_FACTOR :
+ (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
+ ECORE_FPGA_FACTOR : 1));
+
+ wait_cnt_limit *= factor;
+#endif
+
+ /* DMAE_E4_TODO : TODO check if we have to call any other function
+ * other than BARRIER to sync the completion_word since we are not
+ * using the volatile keyword for this
+ */
+ OSAL_BARRIER(p_hwfn->p_dev);
+ while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
+ OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
+ if (++wait_cnt > wait_cnt_limit) {
+ DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
+ "Timed-out waiting for operation to"
+ " complete. Completion word is 0x%08x"
+ " expected 0x%08x.\n",
+ *p_hwfn->dmae_info.p_completion_word,
+ DMAE_COMPLETION_VAL);
+ ecore_status = ECORE_TIMEOUT;
+ break;
+ }
+ /* to sync the completion_word since we are not
+ * using the volatile keyword for p_completion_word
+ */
+ OSAL_BARRIER(p_hwfn->p_dev);
+ }
+
+ if (ecore_status == ECORE_SUCCESS)
+ *p_hwfn->dmae_info.p_completion_word = 0;
+
+ return ecore_status;
+}
+
+static enum _ecore_status_t
+ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 src_addr,
+ u64 dst_addr,
+ u8 src_type, u8 dst_type, u32 length_dw)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+
+ switch (src_type) {
+ case ECORE_DMAE_ADDRESS_GRC:
+ case ECORE_DMAE_ADDRESS_HOST_PHYS:
+ cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
+ cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
+ break;
+ /* for virt source addresses we use the intermediate buffer. */
+ case ECORE_DMAE_ADDRESS_HOST_VIRT:
+ cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
+ cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
+ OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0],
+ (void *)(osal_uintptr_t)src_addr,
+ length_dw * sizeof(u32));
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ switch (dst_type) {
+ case ECORE_DMAE_ADDRESS_GRC:
+ case ECORE_DMAE_ADDRESS_HOST_PHYS:
+ cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
+ cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
+ break;
+ /* for virt destination address we use the intermediate buff. */
+ case ECORE_DMAE_ADDRESS_HOST_VIRT:
+ cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
+ cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
+
+ if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
+ src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
+ OSAL_DMA_SYNC(p_hwfn->p_dev,
+ (void *)HILO_U64(cmd->src_addr_hi,
+ cmd->src_addr_lo),
+ length_dw * sizeof(u32), false);
+
+ ecore_dmae_post_command(p_hwfn, p_ptt);
+
+ ecore_status = ecore_dmae_operation_wait(p_hwfn);
+
+ /* TODO - is it true ? */
+ if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
+ src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
+ OSAL_DMA_SYNC(p_hwfn->p_dev,
+ (void *)HILO_U64(cmd->src_addr_hi,
+ cmd->src_addr_lo),
+ length_dw * sizeof(u32), true);
+
+ if (ecore_status != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, ECORE_MSG_HW,
+ "Wait Failed. source_addr 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x, intermediate buffer 0x%lx.\n",
+ (unsigned long)src_addr, (unsigned long)dst_addr,
+ length_dw,
+ (unsigned long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
+ return ecore_status;
+ }
+
+ if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
+ OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
+ &p_hwfn->dmae_info.p_intermediate_buffer[0],
+ length_dw * sizeof(u32));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 src_addr,
+ u64 dst_addr,
+ u8 src_type,
+ u8 dst_type,
+ u32 size_in_dwords,
+ struct ecore_dmae_params *p_params)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ u64 src_addr_split = 0, dst_addr_split = 0;
+ u16 length_limit = DMAE_MAX_RW_SIZE;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+ u32 offset = 0;
+
+ if (!p_hwfn->dmae_info.b_mem_ready) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
+ (unsigned long)src_addr, src_type,
+ (unsigned long)dst_addr, dst_type,
+ size_in_dwords);
+ return ECORE_NOMEM;
+ }
+
+ if (p_hwfn->p_dev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
+ (unsigned long)src_addr, src_type,
+ (unsigned long)dst_addr, dst_type,
+ size_in_dwords);
+ /* Return success to let the flow to be completed successfully
+ * w/o any error handling.
+ */
+ return ECORE_SUCCESS;
+ }
+
+ if (!cmd) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_dmae_execute_sub_operation failed. Invalid state. source_addr 0x%lx, destination addr 0x%lx, size_in_dwords 0x%x\n",
+ (unsigned long)src_addr,
+ (unsigned long)dst_addr,
+ length_cur);
+ return ECORE_INVAL;
+ }
+
+ ecore_dmae_opcode(p_hwfn,
+ (src_type == ECORE_DMAE_ADDRESS_GRC),
+ (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
+
+ cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
+ cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
+ cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL);
+
+ /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+ cnt_split = size_in_dwords / length_limit;
+ length_mod = size_in_dwords % length_limit;
+
+ src_addr_split = src_addr;
+ dst_addr_split = dst_addr;
+
+ for (i = 0; i <= cnt_split; i++) {
+ offset = length_limit * i;
+
+ if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
+ if (src_type == ECORE_DMAE_ADDRESS_GRC)
+ src_addr_split = src_addr + offset;
+ else
+ src_addr_split = src_addr + (offset * 4);
+ }
+
+ if (dst_type == ECORE_DMAE_ADDRESS_GRC)
+ dst_addr_split = dst_addr + offset;
+ else
+ dst_addr_split = dst_addr + (offset * 4);
+
+ length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+ /* might be zero on last iteration */
+ if (!length_cur)
+ continue;
+
+ ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
+ p_ptt,
+ src_addr_split,
+ dst_addr_split,
+ src_type,
+ dst_type,
+ length_cur);
+ if (ecore_status != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "ecore_dmae_execute_sub_operation Failed"
+ " with error 0x%x. source_addr 0x%lx,"
+ " dest addr 0x%lx, size_in_dwords 0x%x\n",
+ ecore_status, (unsigned long)src_addr,
+ (unsigned long)dst_addr, length_cur);
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
+ break;
+ }
+ }
+
+ return ecore_status;
+}
+
+enum _ecore_status_t
+ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr, u32 size_in_dwords, u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct ecore_dmae_params params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+ params.flags = flags;
+
+ OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
+
+ rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ grc_addr_in_dw,
+ ECORE_DMAE_ADDRESS_HOST_VIRT,
+ ECORE_DMAE_ADDRESS_GRC,
+ size_in_dwords, &params);
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct ecore_dmae_params params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+ params.flags = flags;
+
+ OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
+
+ rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
+ dest_addr, ECORE_DMAE_ADDRESS_GRC,
+ ECORE_DMAE_ADDRESS_HOST_VIRT,
+ size_in_dwords, &params);
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct ecore_dmae_params *p_params)
+{
+ enum _ecore_status_t rc;
+
+ OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
+
+ rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ dest_addr,
+ ECORE_DMAE_ADDRESS_HOST_PHYS,
+ ECORE_DMAE_ADDRESS_HOST_PHYS,
+ size_in_dwords, p_params);
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
+
+ return rc;
+}
+
+void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
+ enum ecore_hw_err_type err_type)
+{
+ /* Fan failure cannot be masked by handling of another HW error */
+ if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
+ "Recovery is in progress."
+ "Avoid notifying about HW error %d.\n",
+ err_type);
+ return;
+ }
+
+ OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
+}
+
+enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ const char *phase)
+{
+ u32 size = OSAL_PAGE_SIZE / 2, val;
+ struct ecore_dmae_params params;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 *p_tmp;
+
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size);
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn, false,
+ "DMAE sanity [%s]: failed to allocate memory\n",
+ phase);
+ return ECORE_NOMEM;
+ }
+
+ /* Fill the bottom half of the allocated memory with a known pattern */
+ for (p_tmp = (u32 *)p_virt;
+ p_tmp < (u32 *)((u8 *)p_virt + size);
+ p_tmp++) {
+ /* Save the address itself as the value */
+ val = (u32)(osal_uintptr_t)p_tmp;
+ *p_tmp = val;
+ }
+
+ /* Zero the top half of the allocated memory */
+ OSAL_MEM_ZERO((u8 *)p_virt + size, size);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "DMAE sanity [%s]: src_addr={phys 0x%lx, virt %p}, dst_addr={phys 0x%lx, virt %p}, size 0x%x\n",
+ phase, (unsigned long)p_phys, p_virt,
+ (unsigned long)(p_phys + size),
+ (u8 *)p_virt + size, size);
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
+ size / 4 /* size_in_dwords */, &params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n",
+ phase, rc);
+ goto out;
+ }
+
+ /* Verify that the top half of the allocated memory has the pattern */
+ for (p_tmp = (u32 *)((u8 *)p_virt + size);
+ p_tmp < (u32 *)((u8 *)p_virt + (2 * size));
+ p_tmp++) {
+ /* The corresponding address in the bottom half */
+ val = (u32)(osal_uintptr_t)p_tmp - size;
+
+ if (*p_tmp != val) {
+ DP_NOTICE(p_hwfn, false,
+ "DMAE sanity [%s]: addr={phys 0x%lx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
+ phase,
+ (unsigned long)p_phys +
+ ((u8 *)p_tmp - (u8 *)p_virt),
+ p_tmp, *p_tmp, val);
+ rc = ECORE_UNKNOWN_ERROR;
+ goto out;
+ }
+ }
+
+out:
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size);
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hw.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hw.h
new file mode 100644
index 00000000..394207eb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hw.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_HW_H__
+#define __ECORE_HW_H__
+
+#include "ecore.h"
+#include "ecore_dev_api.h"
+
+/* Forward decleration */
+struct ecore_ptt;
+
+enum reserved_ptts {
+ RESERVED_PTT_EDIAG,
+ RESERVED_PTT_USER_SPACE,
+ RESERVED_PTT_MAIN,
+ RESERVED_PTT_DPC,
+ RESERVED_PTT_MAX
+};
+
+/* @@@TMP - in earlier versions of the emulation, the HW lock started from 1
+ * instead of 0, this should be fixed in later HW versions.
+ */
+#ifndef MISC_REG_DRIVER_CONTROL_0
+#define MISC_REG_DRIVER_CONTROL_0 MISC_REG_DRIVER_CONTROL_1
+#endif
+#ifndef MISC_REG_DRIVER_CONTROL_0_SIZE
+#define MISC_REG_DRIVER_CONTROL_0_SIZE MISC_REG_DRIVER_CONTROL_1_SIZE
+#endif
+
+enum _dmae_cmd_dst_mask {
+ DMAE_CMD_DST_MASK_NONE = 0,
+ DMAE_CMD_DST_MASK_PCIE = 1,
+ DMAE_CMD_DST_MASK_GRC = 2
+};
+
+enum _dmae_cmd_src_mask {
+ DMAE_CMD_SRC_MASK_PCIE = 0,
+ DMAE_CMD_SRC_MASK_GRC = 1
+};
+
+enum _dmae_cmd_crc_mask {
+ DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
+ DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE 0x1
+
+#ifdef __BIG_ENDIAN
+#define DMAE_COMPLETION_VAL 0xAED10000
+#define DMAE_CMD_ENDIANITY 0x3
+#else
+#define DMAE_COMPLETION_VAL 0xD1AE
+#define DMAE_CMD_ENDIANITY 0x2
+#endif
+
+#define DMAE_CMD_SIZE 14
+/* size of DMAE command structure to fill.. DMAE_CMD_SIZE-5 */
+#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5)
+/* Minimum wait for dmae opertaion to complete 2 milliseconds */
+#define DMAE_MIN_WAIT_TIME 0x2
+#define DMAE_MAX_CLIENTS 32
+
+/**
+* @brief ecore_gtt_init - Initialize GTT windows
+*
+* @param p_hwfn
+* @param p_ptt
+*/
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured
+ *
+ * @param p_hwfn
+ */
+void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_pool_alloc - Allocate and initialize PTT pool
+ *
+ * @param p_hwfn
+ *
+ * @return _ecore_status_t - success (0), negative - error.
+ */
+enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_pool_free -
+ *
+ * @param p_hwfn
+ */
+void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_ptt_set_win - Set PTT Window's GRC BAR address
+ *
+ * @param p_hwfn
+ * @param new_hw_addr
+ * @param p_ptt
+ */
+void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 new_hw_addr);
+
+/**
+ * @brief ecore_get_reserved_ptt - Get a specific reserved PTT
+ *
+ * @param p_hwfn
+ * @param ptt_idx
+ *
+ * @return struct ecore_ptt *
+ */
+struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx);
+
+/**
+ * @brief ecore_wr - Write value to BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+void ecore_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr,
+ u32 val);
+
+/**
+ * @brief ecore_rd - Read value from BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+u32 ecore_rd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr);
+
+/**
+ * @brief ecore_memcpy_from - copy n bytes from BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param dest
+ * @param hw_addr
+ * @param n
+ */
+void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *dest,
+ u32 hw_addr,
+ osal_size_t n);
+
+/**
+ * @brief ecore_memcpy_to - copy n bytes to BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param hw_addr
+ * @param src
+ * @param n
+ */
+void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr,
+ void *src,
+ osal_size_t n);
+/**
+ * @brief ecore_fid_pretend - pretend to another function when
+ * accessing the ptt window. There is no way to unpretend
+ * a function. The only way to cancel a pretend is to
+ * pretend back to the original function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param fid - fid field of pxp_pretend structure. Can contain
+ * either pf / vf, port/path fields are don't care.
+ */
+void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 fid);
+
+/**
+ * @brief ecore_port_pretend - pretend to another port when
+ * accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ */
+void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 port_id);
+
+/**
+ * @brief ecore_port_unpretend - cancel any previously set port
+ * pretend
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_port_unpretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_vfid_to_concrete - build a concrete FID for a
+ * given VF ID
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid);
+
+/**
+* @brief ecore_dmae_info_alloc - Init the dmae_info structure
+* which is part of p_hwfn.
+* @param p_hwfn
+*/
+enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+* @brief ecore_dmae_info_free - Free the dmae_info structure
+* which is part of p_hwfn
+*
+* @param p_hwfn
+*/
+void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
+
+enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
+ const u8 *fw_data);
+
+void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
+ enum ecore_hw_err_type err_type);
+
+enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ const char *phase);
+
+#endif /* __ECORE_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hw_defs.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hw_defs.h
new file mode 100644
index 00000000..b8c2686f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hw_defs.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef _ECORE_IGU_DEF_H_
+#define _ECORE_IGU_DEF_H_
+
+/* Fields of IGU PF CONFIGRATION REGISTER */
+/* function enable */
+#define IGU_PF_CONF_FUNC_EN (0x1 << 0)
+/* MSI/MSIX enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1)
+/* INT enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2)
+/* attention enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3)
+/* single ISR mode enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4)
+/* simd all ones mode */
+#define IGU_PF_CONF_SIMD_MODE (0x1 << 5)
+
+/* Fields of IGU VF CONFIGRATION REGISTER */
+/* function enable */
+#define IGU_VF_CONF_FUNC_EN (0x1 << 0)
+/* MSI/MSIX enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1)
+/* single ISR mode enable */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4)
+/* Parent PF */
+#define IGU_VF_CONF_PARENT_MASK (0xF)
+/* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 5
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+ u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
+#define IGU_CTRL_REG_FID_SHIFT 0
+#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
+#define IGU_CTRL_REG_RESERVED_MASK 0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT 28
+#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT 31
+};
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c
new file mode 100644
index 00000000..b8496cb2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -0,0 +1,2047 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bcm_osal.h"
+#include "ecore_hw.h"
+#include "ecore_init_ops.h"
+#include "reg_addr.h"
+#include "ecore_rt_defs.h"
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_init_func.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_hsi_init_tool.h"
+#include "ecore_iro.h"
+#include "ecore_init_fw_funcs.h"
+
+#define CDU_VALIDATION_DEFAULT_CFG 61
+
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
+ { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
+ { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
+ { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+};
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
+ { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
+};
+
+/* General constants */
+#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
+ QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
+ 0)
+#define QM_INVALID_PQ_ID 0xffff
+
+/* Feature enable */
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+
+/* Other PQ constants */
+#define QM_OTHER_PQS_PER_PF 4
+
+/* VOQ constants */
+#define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
+
+/* WFQ constants: */
+
+/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_WFQ_UPPER_BOUND 62500000
+
+/* Bit of VOQ in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
+#define QM_WFQ_VP_PQ_PF_E5_SHIFT 6
+
+/* 0x9000 = 4*9*1024 */
+#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+
+/* Max WFQ increment value is 0.7 * upper bound */
+#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
+
+/* Number of VOQs in E5 QmWfqCrd register */
+#define QM_WFQ_CRD_E5_NUM_VOQS 16
+
+/* RL constants: */
+
+/* Period in us */
+#define QM_RL_PERIOD 5
+
+/* Period in 25MHz cycles */
+#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+
+/* RL increment value - rate is specified in mbps. the factor of 1.01 was
+ * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
+ * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
+ * although the credit increment value was the correct one and FW calculated
+ * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
+ * this point.
+ */
+#define QM_RL_INC_VAL(rate) \
+ OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
+ (8 * 100)), 1)
+
+/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_RL_UPPER_BOUND 62500000
+
+/* Max PF RL increment value is 0.7 * upper bound */
+#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
+
+/* Vport RL Upper bound, link speed is in Mpbs */
+#define QM_VP_RL_UPPER_BOUND(speed) \
+ ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
+
+/* Max Vport RL increment value is the Vport RL upper bound */
+#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
+
+/* Vport RL credit threshold in case of QM bypass */
+#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
+
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF 1
+#define QM_OPPOR_FW_STOP_DEF 0
+#define QM_OPPOR_PQ_EMPTY_DEF 1
+
+/* Command Queue constants: */
+
+/* Pure LB CmdQ lines (+spare) */
+#define PBF_CMDQ_PURE_LB_LINES 150
+
+#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
+
+#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+ ext_voq * \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+
+#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
+ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
+ ext_voq * \
+ (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
+((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
+/* BTB: blocks constants (block size = 256B) */
+
+/* 256B blocks in 9700B packet */
+#define BTB_JUMBO_PKT_BLOCKS 38
+
+/* Headroom per-port */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+#define BTB_PURE_LB_FACTOR 10
+
+/* Factored (hence really 0.7) */
+#define BTB_PURE_LB_RATIO 7
+
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 2
+#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
+#define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, value) \
+ SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
+
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, \
+ vp_pq_id, rl_id, ext_voq, wrr) \
+ do { \
+ OSAL_MEMSET(&map, 0, sizeof(map)); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); \
+ SET_FIELD(map.reg, \
+ QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); \
+ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, \
+ *((u32 *)&map)); \
+ } while (0)
+
+#define WRITE_PQ_INFO_TO_RAM 1
+#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
+ (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \
+ ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
+
+/******************** INTERNAL IMPLEMENTATION *********************/
+
+/* Returns the external VOQ number */
+static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
+ u8 port_id,
+ u8 tc,
+ u8 max_phys_tcs_per_port)
+{
+ if (tc == PURE_LB_TC)
+ return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
+ else
+ return port_id * (max_phys_tcs_per_port) + tc;
+}
+
+/* Prepare PF RL enable/disable runtime init values */
+static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+ if (pf_rl_en) {
+ u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
+ /* Enable RLs for all VOQs */
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
+ (u32)voq_bit_mask);
+#ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
+ if (num_ext_voqs >= 32)
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
+ (u32)(voq_bit_mask >> 32));
+#endif
+
+ /* Write RL period */
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+
+ /* Set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+ QM_PF_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+
+ /* Set credit threshold for QM bypass flow */
+ if (pf_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare VPORT RL enable/disable runtime init values */
+static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+ vport_rl_en ? 1 : 0);
+ if (vport_rl_en) {
+ /* Write RL period (use timer 0 only) */
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+
+ /* Set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+ QM_VP_RL_BYPASS_THRESH_SPEED);
+ }
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+ vport_wfq_en ? 1 : 0);
+
+ /* Set credit threshold for QM bypass flow */
+ if (vport_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+ * the specified VOQ
+ */
+static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 ext_voq,
+ u16 cmdq_lines)
+{
+ u32 qm_line_crd;
+
+ qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
+ (u32)cmdq_lines);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
+ qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
+ qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params
+ port_params[MAX_NUM_PORTS])
+{
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
+ u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+
+ /* Clear PBF lines of all VOQs */
+ for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
+
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ u16 phys_lines, phys_lines_per_tc;
+
+ if (!port_params[port_id].active)
+ continue;
+
+ /* Find number of command queue lines to divide between the
+ * active physical TCs. In E5, 1/8 of the lines are reserved.
+ * the lines for pure LB TC are subtracted.
+ */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines;
+ phys_lines -= PBF_CMDQ_PURE_LB_LINES;
+
+ /* Find #lines per active physical TC */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++)
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1)
+ num_tcs_in_port++;
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
+
+ /* Init registers per active TC */
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
+ max_phys_tcs_per_port);
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1)
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
+ phys_lines_per_tc);
+ }
+
+ /* Init registers for pure LB TC */
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
+ max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
+ PBF_CMDQ_PURE_LB_LINES);
+ }
+}
+
+/*
+ * Prepare runtime init values to allocate guaranteed BTB blocks for the
+ * specified port. The guaranteed BTB space is divided between the TCs as
+ * follows (shared space Is currently not used):
+ * 1. Parameters:
+ * B BTB blocks for this port
+ * C Number of physical TCs for this port
+ * 2. Calculation:
+ * a. 38 blocks (9700B jumbo frame) are allocated for global per port
+ * headroom
+ * b. B = B 38 (remainder after global headroom allocation)
+ * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
+ * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
+ * e. B/C blocks are allocated for each physical TC.
+ * Assumptions:
+ * - MTU is up to 9700 bytes (38 blocks)
+ * - All TCs are considered symmetrical (same rate and packet size)
+ * - No optimization for lossy TC (all are considered lossless). Shared space is
+ * not enabled and allocated for each TC.
+ */
+static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params
+ port_params[MAX_NUM_PORTS])
+{
+ u32 usable_blocks, pure_lb_blocks, phys_blocks;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
+
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ if (!port_params[port_id].active)
+ continue;
+
+ /* Subtract headroom blocks */
+ usable_blocks = port_params[port_id].num_btb_blocks -
+ BTB_HEADROOM_BLOCKS;
+
+ /* Find blocks per physical TC. use factor to avoid floating
+ * arithmethic.
+ */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1)
+ num_tcs_in_port++;
+
+ pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+ (num_tcs_in_port * BTB_PURE_LB_FACTOR +
+ BTB_PURE_LB_RATIO);
+ pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
+ pure_lb_blocks /
+ BTB_PURE_LB_FACTOR);
+ phys_blocks = (usable_blocks - pure_lb_blocks) /
+ num_tcs_in_port;
+
+ /* Init physical TCs */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1) {
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
+ phys_blocks);
+ }
+ }
+
+ /* Init pure LB TC */
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
+ pure_lb_blocks);
+ }
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ bool is_pf_loading,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u32 base_mem_addr_4kb,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params)
+{
+ /* A bit per Tx PQ indicating if the PQ is associated with a VF */
+ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
+ u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
+ u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+ num_pqs = num_pf_pqs + num_vf_pqs;
+
+ first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
+
+ pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
+ vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
+ mem_addr_4kb = base_mem_addr_4kb;
+
+ /* Set mapping from PQ group to PF */
+ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+ STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+ (u32)(pf_id));
+
+ /* Set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+ QM_PQ_SIZE_256B(num_pf_cids));
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+ QM_PQ_SIZE_256B(num_vf_cids));
+
+ /* Go over all Tx PQs */
+ for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
+ u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ u8 ext_voq, vport_id_in_pf;
+ bool is_vf_pq, rl_valid;
+ u16 first_tx_pq_id;
+
+ ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
+ pq_params[i].tc_id,
+ max_phys_tcs_per_port);
+ is_vf_pq = (i >= num_pf_pqs);
+ rl_valid = pq_params[i].rl_valid > 0;
+
+ /* Update first Tx PQ of VPORT/TC */
+ vport_id_in_pf = pq_params[i].vport_id - start_vport;
+ first_tx_pq_id =
+ vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
+ if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+ u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+ (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
+
+ /* Create new VP PQ */
+ vport_params[vport_id_in_pf].
+ first_tx_pq_id[pq_params[i].tc_id] = pq_id;
+ first_tx_pq_id = pq_id;
+
+ /* Map VP PQ to VOQ and PF */
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
+ first_tx_pq_id, map_val);
+ }
+
+ /* Check RL ID */
+ if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT ID for rate limiter config\n");
+ rl_valid = false;
+ }
+
+ /* Prepare PQ map entry */
+ struct qm_rf_pq_map_e4 tx_pq_map;
+
+ QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ?
+ 1 : 0,
+ first_tx_pq_id, rl_valid ?
+ pq_params[i].vport_id : 0,
+ ext_voq, pq_params[i].wrr_group);
+
+ /* Set PQ base address */
+ STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+
+ /* Clear PQ pointer table entry (64 bit) */
+ if (is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
+ /* Write PQ info to RAM */
+ if (WRITE_PQ_INFO_TO_RAM != 0) {
+ u32 pq_info = 0;
+
+ pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
+ pq_params[i].tc_id,
+ pq_params[i].port_id,
+ rl_valid ? 1 : 0, rl_valid ?
+ pq_params[i].vport_id : 0);
+ ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+ pq_info);
+ }
+
+ /* If VF PQ, add indication to PQ VF mask */
+ if (is_vf_pq) {
+ tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
+ (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
+ mem_addr_4kb += vport_pq_mem_4kb;
+ } else {
+ mem_addr_4kb += pq_mem_4kb;
+ }
+ }
+
+ /* Store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++)
+ if (tx_pq_vf_mask[i])
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
+ i, tx_pq_vf_mask[i]);
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 pf_id,
+ bool is_pf_loading,
+ u32 num_pf_cids,
+ u32 num_tids,
+ u32 base_mem_addr_4kb)
+{
+ u32 pq_size, pq_mem_4kb, mem_addr_4kb;
+ u16 i, j, pq_id, pq_group;
+
+ /* A single other PQ group is used in each PF, where PQ group i is used
+ * in PF i.
+ */
+ pq_group = pf_id;
+ pq_size = num_pf_cids + num_tids;
+ pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ mem_addr_4kb = base_mem_addr_4kb;
+
+ /* Map PQ group to PF */
+ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+ (u32)(pf_id));
+
+ /* Set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+ QM_PQ_SIZE_256B(pq_size));
+
+ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+ i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ /* Set PQ base address */
+ STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+
+ /* Clear PQ pointer table entry */
+ if (is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_PTRTBLOTHER_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
+ mem_addr_4kb += pq_mem_4kb;
+ }
+}
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 pf_id,
+ u16 pf_wfq,
+ u8 max_phys_tcs_per_port,
+ u16 num_tx_pqs,
+ struct init_qm_pq_params *pq_params)
+{
+ u32 inc_val, crd_reg_offset;
+ u8 ext_voq;
+ u16 i;
+
+ inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF WFQ weight configuration\n");
+ return -1;
+ }
+
+ for (i = 0; i < num_tx_pqs; i++) {
+ ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
+ pq_params[i].tc_id,
+ max_phys_tcs_per_port);
+ crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
+ QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ ext_voq * MAX_NUM_PFS_BB +
+ (pf_id % MAX_NUM_PFS_BB);
+ OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ }
+
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
+ pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
+
+ return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
+{
+ u32 inc_val;
+
+ inc_val = QM_RL_INC_VAL(pf_rl);
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF rate limit configuration\n");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
+ return 0;
+}
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
+static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u16 vport_pq_id;
+ u32 inc_val;
+ u8 tc, i;
+
+ /* Go over all PF VPORTs */
+ for (i = 0; i < num_vports; i++) {
+ if (!vport_params[i].vport_wfq)
+ continue;
+
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT WFQ weight configuration\n");
+ return -1;
+ }
+
+ /* Each VPORT can have several VPORT PQ IDs for various TCs */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ vport_pq_id = vport_params[i].first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
+ }
+ }
+ }
+ return 0;
+}
+
+/* Prepare VPORT RL runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
+static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 start_vport,
+ u8 num_vports,
+ u32 link_speed,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 i, vport_id;
+ u32 inc_val;
+
+ if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT ID for rate limiter configuration\n");
+ return -1;
+ }
+
+ /* Go over all PF VPORTs */
+ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+ inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
+ vport_params[i].vport_rl : link_speed);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT rate-limit configuration\n");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
+ QM_VP_RL_UPPER_BOUND(link_speed) |
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+ inc_val);
+ }
+
+ return 0;
+}
+
+static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 reg_val, i;
+
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
+ i++) {
+ OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
+ reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+ }
+
+ /* Check if timeout while waiting for SDM command ready */
+ if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
+ "Timeout waiting for QM SDM cmd ready signal\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd_addr,
+ u32 cmd_data_lsb,
+ u32 cmd_data_msb)
+{
+ if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+ return false;
+
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
+ return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+
+u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs)
+{
+ return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+ QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+ QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ bool pf_rl_en,
+ bool pf_wfq_en,
+ bool vport_rl_en,
+ bool vport_wfq_en,
+ struct init_qm_port_params
+ port_params[MAX_NUM_PORTS])
+{
+ u32 mask;
+
+ /* Init AFullOprtnstcCrdMask */
+ mask = (QM_OPPOR_LINE_VOQ_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+ (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+ (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+ (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+ (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+ (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+ (QM_OPPOR_FW_STOP_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+ (QM_OPPOR_PQ_EMPTY_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+
+ /* Enable/disable PF RL */
+ ecore_enable_pf_rl(p_hwfn, pf_rl_en);
+
+ /* Enable/disable PF WFQ */
+ ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
+
+ /* Enable/disable VPORT RL */
+ ecore_enable_vport_rl(p_hwfn, vport_rl_en);
+
+ /* Enable/disable VPORT WFQ */
+ ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
+
+ /* Init PBF CMDQ line credit */
+ ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
+ max_phys_tcs_per_port, port_params);
+
+ /* Init BTB blocks in PBF */
+ ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
+ max_phys_tcs_per_port, port_params);
+
+ return 0;
+}
+
+int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ bool is_pf_loading,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u8 num_vports,
+ u16 pf_wfq,
+ u32 pf_rl,
+ u32 link_speed,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params)
+{
+ u32 other_mem_size_4kb;
+ u8 tc, i;
+
+ other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
+ QM_OTHER_PQS_PER_PF;
+
+ /* Clear first Tx PQ ID array for each VPORT */
+ for (i = 0; i < num_vports; i++)
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+
+ /* Map Other PQs (if any) */
+#if QM_OTHER_PQS_PER_PF > 0
+ ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
+ num_tids, 0);
+#endif
+
+ /* Map Tx PQs */
+ ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
+ is_pf_loading, num_pf_cids, num_vf_cids,
+ start_pq, num_pf_pqs, num_vf_pqs, start_vport,
+ other_mem_size_4kb, pq_params, vport_params);
+
+ /* Init PF WFQ */
+ if (pf_wfq)
+ if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
+ max_phys_tcs_per_port,
+ num_pf_pqs + num_vf_pqs, pq_params))
+ return -1;
+
+ /* Init PF RL */
+ if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
+ return -1;
+
+ /* Set VPORT WFQ */
+ if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
+ return -1;
+
+ /* Set VPORT RL */
+ if (ecore_vport_rl_rt_init
+ (p_hwfn, start_vport, num_vports, link_speed, vport_params))
+ return -1;
+
+ return 0;
+}
+
+int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
+{
+ u32 inc_val;
+
+ inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF WFQ weight configuration\n");
+ return -1;
+ }
+
+ ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
+ return 0;
+}
+
+int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
+{
+ u32 inc_val;
+
+ inc_val = QM_RL_INC_VAL(pf_rl);
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF rate limit configuration\n");
+ return -1;
+ }
+
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
+ return 0;
+}
+
+int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
+{
+ u16 vport_pq_id;
+ u32 inc_val;
+ u8 tc;
+
+ inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT WFQ weight configuration\n");
+ return -1;
+ }
+
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ vport_pq_id = first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ ecore_wr(p_hwfn, p_ptt,
+ QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
+ }
+ }
+
+ return 0;
+}
+
+int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 vport_id,
+ u32 vport_rl,
+ u32 link_speed)
+{
+ u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+
+ if (vport_id >= max_qm_global_rls) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT ID for rate limiter configuration\n");
+ return -1;
+ }
+
+ inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT rate-limit configuration\n");
+ return -1;
+ }
+
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+
+ return 0;
+}
+
+bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq, u16 start_pq, u16 num_pqs)
+{
+ u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+ u32 pq_mask = 0, last_pq, pq_id;
+
+ last_pq = start_pq + num_pqs - 1;
+
+ /* Set command's PQ type */
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+
+ /* Go over requested PQs */
+ for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+ /* Set PQ bit in mask (stop command only) */
+ if (!is_release_cmd)
+ pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+
+ /* If last PQ or end of PQ mask, write command */
+ if ((pq_id == last_pq) ||
+ (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+ (QM_STOP_PQ_MASK_WIDTH - 1))) {
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
+ pq_mask);
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
+ pq_id / QM_STOP_PQ_MASK_WIDTH);
+ if (!ecore_send_qm_cmd
+ (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
+ cmd_arr[1]))
+ return false;
+ pq_mask = 0;
+ }
+ }
+
+ return true;
+}
+
+
+/* NIG: ETS configuration constants */
+#define NIG_TX_ETS_CLIENT_OFFSET 4
+#define NIG_LB_ETS_CLIENT_OFFSET 1
+#define NIG_ETS_MIN_WFQ_BYTES 1600
+
+/* NIG: ETS constants */
+#define NIG_ETS_UP_BOUND(weight, mtu) \
+ (2 * ((weight) > (mtu) ? (weight) : (mtu)))
+
+/* NIG: RL constants */
+
+/* Byte base type value */
+#define NIG_RL_BASE_TYPE 1
+
+/* Period in us */
+#define NIG_RL_PERIOD 1
+
+/* Period in 25MHz cycles */
+#define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
+
+/* Rate in mbps */
+#define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
+
+#define NIG_RL_MAX_VAL(inc_val, mtu) \
+ (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
+
+/* NIG: packet prioritry configuration constants */
+#define NIG_PRIORITY_MAP_TC_BITS 4
+
+
+void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req, bool is_lb)
+{
+ u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
+ u32 tc_bound_base_addr, tc_bound_addr_diff;
+ u8 sp_tc_map = 0, wfq_tc_map = 0;
+ u8 tc, num_tc, tc_client_offset;
+
+ num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
+ tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
+ NIG_TX_ETS_CLIENT_OFFSET;
+ min_weight = 0xffffffff;
+ tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+ tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
+ NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+ tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+ tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
+ NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+
+ /* Update SP map */
+ if (tc_req->use_sp)
+ sp_tc_map |= (1 << tc);
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Update WFQ map */
+ wfq_tc_map |= (1 << tc);
+
+ /* Find minimal weight */
+ if (tc_req->weight < min_weight)
+ min_weight = tc_req->weight;
+ }
+
+ /* Write SP map */
+ ecore_wr(p_hwfn, p_ptt,
+ is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
+ NIG_REG_TX_ARB_CLIENT_IS_STRICT,
+ (sp_tc_map << tc_client_offset));
+
+ /* Write WFQ map */
+ ecore_wr(p_hwfn, p_ptt,
+ is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+ (wfq_tc_map << tc_client_offset));
+ /* write WFQ weights */
+ for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
+ struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+ u32 byte_weight;
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Translate weight to bytes */
+ byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+ min_weight;
+
+ /* Write WFQ weight */
+ ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
+ tc_weight_addr_diff * tc_client_offset, byte_weight);
+
+ /* Write WFQ upper bound */
+ ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
+ tc_bound_addr_diff * tc_client_offset,
+ NIG_ETS_UP_BOUND(byte_weight, req->mtu));
+ }
+}
+
+void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_nig_lb_rl_req *req)
+{
+ u32 ctrl, inc_val, reg_offset;
+ u8 tc;
+
+ /* Disable global MAC+LB RL */
+ ctrl =
+ NIG_RL_BASE_TYPE <<
+ NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
+
+ /* Configure and enable global MAC+LB RL */
+ if (req->lb_mac_rate) {
+ /* Configure */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
+ NIG_RL_PERIOD_CLK_25M);
+ inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
+ inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
+ NIG_RL_MAX_VAL(inc_val, req->mtu));
+
+ /* Enable */
+ ctrl |=
+ 1 <<
+ NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
+ }
+
+ /* Disable global LB-only RL */
+ ctrl =
+ NIG_RL_BASE_TYPE <<
+ NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
+
+ /* Configure and enable global LB-only RL */
+ if (req->lb_rate) {
+ /* Configure */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
+ NIG_RL_PERIOD_CLK_25M);
+ inc_val = NIG_RL_INC_VAL(req->lb_rate);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
+ inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
+ NIG_RL_MAX_VAL(inc_val, req->mtu));
+
+ /* Enable */
+ ctrl |=
+ 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
+ }
+
+ /* Per-TC RLs */
+ for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
+ tc++, reg_offset += 4) {
+ /* Disable TC RL */
+ ctrl =
+ NIG_RL_BASE_TYPE <<
+ NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
+
+ /* Configure and enable TC RL */
+ if (!req->tc_rate[tc])
+ continue;
+
+ /* Configure */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
+ reg_offset, NIG_RL_PERIOD_CLK_25M);
+ inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
+ reg_offset, inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
+ reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
+
+ /* Enable */
+ ctrl |= 1 <<
+ NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
+ reg_offset, ctrl);
+ }
+}
+
+void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_nig_pri_tc_map_req *req)
+{
+ u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
+ u32 pri_tc_mask = 0;
+ u8 pri, tc;
+
+ for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
+ if (!req->pri[pri].valid)
+ continue;
+
+ pri_tc_mask |= (req->pri[pri].tc_id <<
+ (pri * NIG_PRIORITY_MAP_TC_BITS));
+ tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
+ }
+
+ /* Write priority -> TC mask */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
+
+ /* Write TC -> priority mask */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
+ tc_pri_mask[tc]);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
+ tc_pri_mask[tc]);
+ }
+}
+
+
+/* PRS: ETS configuration constants */
+#define PRS_ETS_MIN_WFQ_BYTES 1600
+#define PRS_ETS_UP_BOUND(weight, mtu) \
+ (2 * ((weight) > (mtu) ? (weight) : (mtu)))
+
+
+void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct init_ets_req *req)
+{
+ u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
+ u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
+
+ tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
+ PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
+ tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
+ PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
+
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+
+ /* Update SP map */
+ if (tc_req->use_sp)
+ sp_tc_map |= (1 << tc);
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Update WFQ map */
+ wfq_tc_map |= (1 << tc);
+
+ /* Find minimal weight */
+ if (tc_req->weight < min_weight)
+ min_weight = tc_req->weight;
+ }
+
+ /* write SP map */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
+
+ /* write WFQ map */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
+ wfq_tc_map);
+
+ /* write WFQ weights */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+ u32 byte_weight;
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Translate weight to bytes */
+ byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+ min_weight;
+
+ /* Write WFQ weight */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
+ tc_weight_addr_diff, byte_weight);
+
+ /* Write WFQ upper bound */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
+ tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
+ req->mtu));
+ }
+}
+
+
+/* BRB: RAM configuration constants */
+#define BRB_TOTAL_RAM_BLOCKS_BB 4800
+#define BRB_TOTAL_RAM_BLOCKS_K2 5632
+#define BRB_BLOCK_SIZE 128
+#define BRB_MIN_BLOCKS_PER_TC 9
+#define BRB_HYST_BYTES 10240
+#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
+
+/* Temporary big RAM allocation - should be updated */
+void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
+{
+ u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
+ u32 active_port_blocks, reg_offset = 0;
+ u8 port, active_ports = 0;
+
+ tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
+ BRB_BLOCK_SIZE);
+ min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
+ BRB_BLOCK_SIZE);
+ total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
+ BRB_TOTAL_RAM_BLOCKS_BB;
+
+ /* Find number of active ports */
+ for (port = 0; port < MAX_NUM_PORTS; port++)
+ if (req->num_active_tcs[port])
+ active_ports++;
+
+ active_port_blocks = (u32)(total_blocks / active_ports);
+
+ for (port = 0; port < req->max_ports_per_engine; port++) {
+ u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
+ u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
+ u32 tc_guaranteed_blocks;
+ u8 tc;
+
+ /* Calculate per-port sizes */
+ tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
+ BRB_BLOCK_SIZE);
+ port_blocks = req->num_active_tcs[port] ? active_port_blocks :
+ 0;
+ port_guaranteed_blocks = req->num_active_tcs[port] *
+ tc_guaranteed_blocks;
+ port_shared_blocks = port_blocks - port_guaranteed_blocks;
+ full_xoff_th = req->num_active_tcs[port] *
+ BRB_MIN_BLOCKS_PER_TC;
+ full_xon_th = full_xoff_th + min_pkt_size_blocks;
+ pause_xoff_th = tc_headroom_blocks;
+ pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
+
+ /* Init total size per port */
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
+ port_blocks);
+
+ /* Init shared size per port */
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
+ port_shared_blocks);
+
+ for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
+ /* Clear init values for non-active TCs */
+ if (tc == req->num_active_tcs[port]) {
+ tc_guaranteed_blocks = 0;
+ full_xoff_th = 0;
+ full_xon_th = 0;
+ pause_xoff_th = 0;
+ pause_xon_th = 0;
+ }
+
+ /* Init guaranteed size per TC */
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_TC_GUARANTIED_0 + reg_offset,
+ tc_guaranteed_blocks);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
+ BRB_HYST_BLOCKS);
+
+ /* Init pause/full thresholds per physical TC - for
+ * loopback traffic.
+ */
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
+ reg_offset, full_xoff_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
+ reg_offset, full_xon_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
+ reg_offset, pause_xoff_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
+ reg_offset, pause_xon_th);
+
+ /* Init pause/full thresholds per physical TC - for
+ * main traffic.
+ */
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
+ reg_offset, full_xoff_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
+ reg_offset, full_xon_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
+ reg_offset, pause_xoff_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
+ reg_offset, pause_xon_th);
+ }
+ }
+}
+
+/* In MF should be called once per port to set EtherType of OuterTag */
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
+{
+ /* Update DORQ register */
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
+}
+
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
+#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
+#define PRS_ETH_OUTPUT_FORMAT -46832
+
+void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 dest_port)
+{
+ /* Update PRS register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+
+ /* Update NIG register */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
+
+ /* Update PBF register */
+ ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, bool vxlan_enable)
+{
+ u32 reg_val;
+
+ /* Update PRS register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
+ vxlan_enable);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val) { /* TODO: handle E5 init */
+ reg_val = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
+ }
+
+ /* Update NIG register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
+ vxlan_enable);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ /* Update DORQ register */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
+ vxlan_enable ? 1 : 0);
+}
+
+void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable)
+{
+ u32 reg_val;
+
+ /* Update PRS register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
+ eth_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
+ ip_gre_enable);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val) { /* TODO: handle E5 init */
+ reg_val = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
+ }
+
+ /* Update NIG register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
+ eth_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
+ ip_gre_enable);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ /* Update DORQ registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
+ eth_gre_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
+ ip_gre_enable ? 1 : 0);
+}
+
+void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 dest_port)
+{
+ /* Update PRS register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+
+ /* Update NIG register */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+
+ /* Update PBF register */
+ ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool eth_geneve_enable, bool ip_geneve_enable)
+{
+ u32 reg_val;
+
+ /* Update PRS register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
+ eth_geneve_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
+ ip_geneve_enable);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val) { /* TODO: handle E5 init */
+ reg_val = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
+ }
+
+ /* Update NIG register */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+ eth_geneve_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
+ ip_geneve_enable ? 1 : 0);
+
+ /* EDPM with geneve tunnel not supported in BB */
+ if (ECORE_IS_BB_B0(p_hwfn->p_dev))
+ return;
+
+ /* Update DORQ registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
+ eth_geneve_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
+ ip_geneve_enable ? 1 : 0);
+}
+
+#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
+
+void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool enable)
+{
+ u32 reg_val, cfg_mask;
+
+ /* read PRS config register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
+
+ /* set VXLAN_NO_L2_ENABLE mask */
+ cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
+
+ if (enable) {
+ /* set VXLAN_NO_L2_ENABLE flag */
+ reg_val |= cfg_mask;
+
+ /* update PRS FIC register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
+ } else {
+ /* clear VXLAN_NO_L2_ENABLE flag */
+ reg_val &= ~cfg_mask;
+ }
+
+ /* write PRS config register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
+}
+
+#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
+#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
+#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
+#define PARSER_ETH_CONN_CM_HDR 0
+#define CAM_LINE_SIZE sizeof(u32)
+#define RAM_LINE_SIZE sizeof(u64)
+#define REG_SIZE sizeof(u32)
+
+void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id)
+{
+ /* disable gft search for PF */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
+
+ /* Clean ram & cam for next gft session*/
+
+ /* Zero camline */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
+
+ /* Zero ramline */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id, 0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
+}
+
+
+void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 rfs_cm_hdr_event_id;
+
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
+ rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
+ PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+}
+
+void ecore_gft_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4,
+ bool ipv6,
+ enum gft_profile_type profile_type)
+{
+ u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
+
+ if (!ipv6 && !ipv4)
+ DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
+ if (!tcp && !udp)
+ DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
+ if (profile_type >= MAX_GFT_PROFILE_TYPE)
+ DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
+
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
+ reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
+
+ /* Do not load context only cid in PRS on match. */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
+
+ /* Do not use tenant ID exist bit for gft search*/
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
+
+ /* Set Cam */
+ cam_line = 0;
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
+
+ /* Filters are per PF!! */
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
+ if (!(tcp && udp)) {
+ SET_FIELD(cam_line,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
+ if (tcp)
+ SET_FIELD(cam_line,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_TCP_PROTOCOL);
+ else
+ SET_FIELD(cam_line,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_UDP_PROTOCOL);
+ }
+
+ if (!(ipv4 && ipv6)) {
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+ if (ipv4)
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV4);
+ else
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV6);
+ }
+
+ /* Write characteristics to cam */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ cam_line);
+ cam_line = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
+
+ /* Write line to RAM - compare to filter 4 tuple */
+ ram_line_lo = 0;
+ ram_line_hi = 0;
+
+ /* Tunnel type */
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+
+ if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
+ }
+
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ ram_line_lo);
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
+ REG_SIZE, ram_line_hi);
+
+ /* Set default profile so that no filter match will happen */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+
+ /* Enable gft search */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+}
+
+/* Configure VF zone size mode */
+void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 mode,
+ bool runtime_init)
+{
+ u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
+ u32 msdm_vf_offset_mask;
+
+ if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
+ msdm_vf_size_log += 1;
+ else if (mode == VF_ZONE_SIZE_MODE_QUAD)
+ msdm_vf_size_log += 2;
+
+ msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
+
+ if (runtime_init) {
+ STORE_RT_REG(p_hwfn,
+ PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
+ msdm_vf_size_log);
+ STORE_RT_REG(p_hwfn,
+ PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
+ msdm_vf_offset_mask);
+ } else {
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
+ }
+}
+
+/* Get mstorm statistics for offset by VF zone size mode */
+u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
+ u16 stat_cnt_id,
+ u16 vf_zone_size_mode)
+{
+ u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
+
+ if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
+ (stat_cnt_id > MAX_NUM_PFS)) {
+ if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
+ offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
+ (stat_cnt_id - MAX_NUM_PFS);
+ else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
+ offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
+ (stat_cnt_id - MAX_NUM_PFS);
+ }
+
+ return offset;
+}
+
+/* Get mstorm VF producer offset by VF zone size mode */
+u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
+ u8 vf_id,
+ u8 vf_queue_id,
+ u16 vf_zone_size_mode)
+{
+ u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
+
+ if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
+ if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
+ offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
+ vf_id;
+ else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
+ offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
+ vf_id;
+ }
+
+ return offset;
+}
+
+#ifndef LINUX_REMOVE
+#define CRC8_INIT_VALUE 0xFF
+#endif
+static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
+
+/* Calculate and return CDU validation byte per connection type / region /
+ * cid
+ */
+static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
+{
+ const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+
+ static u8 crc8_table_valid; /*automatically initialized to 0*/
+ u8 crc, validation_byte = 0;
+ u32 validation_string = 0;
+ u32 data_to_crc;
+
+ if (crc8_table_valid == 0) {
+ OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
+ crc8_table_valid = 1;
+ }
+
+ /*
+ * The CRC is calculated on the String-to-compress:
+ * [31:8] = {CID[31:20],CID[11:0]}
+ * [7:4] = Region
+ * [3:0] = Type
+ */
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+ validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+ validation_string |= ((region & 0xF) << 4);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+ validation_string |= (conn_type & 0xF);
+
+ /* Convert to big-endian and calculate CRC8*/
+ data_to_crc = OSAL_BE32_TO_CPU(validation_string);
+
+ crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
+ CRC8_INIT_VALUE);
+
+ /* The validation byte [7:0] is composed:
+ * for type A validation
+ * [7] = active configuration bit
+ * [6:0] = crc[6:0]
+ *
+ * for type B validation
+ * [7] = active configuration bit
+ * [6:3] = connection_type[3:0]
+ * [2:0] = crc[2:0]
+ */
+
+ validation_byte |= ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+ if ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+ validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+ else
+ validation_byte |= crc & 0x7F;
+
+ return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
+ *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
+ *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
+ u32 tid)
+{
+ u8 *p_ctx, *region1_val_ptr;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+ u8 x_val, t_val, u_val;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ x_val = *x_val_ptr;
+ t_val = *t_val_ptr;
+ u_val = *u_val_ptr;
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = x_val;
+ *t_val_ptr = t_val;
+ *u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *p_ctx, *region1_val_ptr;
+ u8 region1_val;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ region1_val = *region1_val_ptr;
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 ctx_validation;
+
+ /* Enable validation for connection region 3 - bits [31:24] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+ /* Enable validation for connection region 5 - bits [15: 8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+ /* Enable validation for connection region 1 - bits [15: 8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
+}
+
+#define RSS_IND_TABLE_BASE_ADDR 4112
+#define RSS_IND_TABLE_VPORT_SIZE 16
+#define RSS_IND_TABLE_ENTRY_PER_LINE 8
+
+/* Update RSS indirection table entry. */
+void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 rss_id,
+ u8 ind_table_index,
+ u16 ind_table_value)
+{
+ u32 cnt, rss_addr;
+ u32 *reg_val;
+ u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE];
+ u16 rss_ind_mask[RSS_IND_TABLE_ENTRY_PER_LINE];
+
+ /* get entry address */
+ rss_addr = RSS_IND_TABLE_BASE_ADDR +
+ RSS_IND_TABLE_VPORT_SIZE * rss_id +
+ ind_table_index / RSS_IND_TABLE_ENTRY_PER_LINE;
+
+ /* prepare update command */
+ ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE;
+
+ for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt++) {
+ if (cnt == ind_table_index) {
+ rss_ind_entry[cnt] = ind_table_value;
+ rss_ind_mask[cnt] = 0xFFFF;
+ } else {
+ rss_ind_entry[cnt] = 0;
+ rss_ind_mask[cnt] = 0;
+ }
+ }
+
+ /* Update entry in HW*/
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
+
+ reg_val = (u32 *)rss_ind_mask;
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]);
+
+ reg_val = (u32 *)rss_ind_entry;
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]);
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.h
new file mode 100644
index 00000000..1024bb26
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -0,0 +1,492 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef _INIT_FW_FUNCS_H
+#define _INIT_FW_FUNCS_H
+/* Forward declarations */
+
+struct init_qm_pq_params;
+
+/**
+ * @brief ecore_qm_pf_mem_size - Prepare QM ILT sizes
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ *
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
+ *
+ * @return The required host memory size in 4KB units.
+ */
+u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs);
+
+/**
+ * @brief ecore_qm_common_rt_init - Prepare QM runtime init values for engine
+ * phase
+ *
+ * @param p_hwfn
+ * @param max_ports_per_engine - max number of ports per engine in HW
+ * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ * @param pf_rl_en - enable per-PF rate limiters
+ * @param pf_wfq_en - enable per-PF WFQ
+ * @param vport_rl_en - enable per-VPORT rate limiters
+ * @param vport_wfq_en - enable per-VPORT WFQ
+ * @param port_params - array of size MAX_NUM_PORTS with params for each port
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ bool pf_rl_en,
+ bool pf_wfq_en,
+ bool vport_rl_en,
+ bool vport_wfq_en,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS]);
+
+/**
+ * @brief ecore_qm_pf_rt_init Prepare QM runtime init values for the PF phase
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ * @param is_pf_loading - indicates if the PF is currently loading,
+ * i.e. it has no allocated QM resources.
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param start_pq - first Tx PQ ID associated with this PF
+ * @param num_pf_pqs - number of Tx PQs associated with this PF
+ * (non-VF)
+ * @param num_vf_pqs - number of Tx PQs associated with a VF
+ * @param start_vport - first VPORT ID associated with this PF
+ * @param num_vports - number of VPORTs associated with this PF
+ * @param pf_wfq - WFQ weight. if PF WFQ is globally disabled, the weight must
+ * be 0. otherwise, the weight must be non-zero.
+ * @param pf_rl - rate limit in Mb/sec units. a value of 0 means don't
+ * configure. ignored if PF RL is globally disabled.
+ * @param link_speed - link speed in Mbps.
+ * @param pq_params - array of size (num_pf_pqs+num_vf_pqs) with parameters for
+ * each Tx PQ associated with the specified PF.
+ * @param vport_params - array of size num_vports with parameters for each
+ * associated VPORT.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ bool is_pf_loading,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u8 num_vports,
+ u16 pf_wfq,
+ u32 pf_rl,
+ u32 link_speed,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params);
+
+/**
+ * @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_wfq - WFQ weight. Must be non-zero.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u16 pf_wfq);
+
+/**
+ * @brief ecore_init_pf_rl - Initializes the rate limit of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl);
+
+/**
+ * @brief ecore_init_vport_wfq Initializes the WFQ weight of specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
+ * with the VPORT for each TC. This array is filled by
+ * ecore_qm_pf_rt_init
+ * @param vport_wfq - WFQ weight. Must be non-zero.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS],
+ u16 vport_wfq);
+
+/**
+ * @brief ecore_init_vport_rl - Initializes the rate limit of the specified
+ * VPORT.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ * @param link_speed - link speed in Mbps.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vport_id,
+ u32 vport_rl,
+ u32 link_speed);
+
+/**
+ * @brief ecore_send_qm_stop_cmd Sends a stop command to the QM
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param is_release_cmd - true for release, false for stop.
+ * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+ * @param start_pq - first PQ ID to stop
+ * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occurred while waiting
+ * for QM command done.
+ */
+bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs);
+#ifndef UNUSED_HSI_FUNC
+
+/**
+ * @brief ecore_init_nig_ets - initializes the NIG ETS arbiter
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the NIG ETS initialization requirements.
+ * @param is_lb - if set, the loopback port arbiter is initialized, otherwise
+ * the physical port arbiter is initialized. The pure-LB TC
+ * requirements are ignored when is_lb is cleared.
+ */
+void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req,
+ bool is_lb);
+
+/**
+ * @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs
+ *
+ * Based on global and per-TC rate requirements
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the NIG LB RLs initialization requirements.
+ */
+void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_nig_lb_rl_req *req);
+#endif /* UNUSED_HSI_FUNC */
+
+/**
+ * @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map.
+ *
+ * Assumes valid arguments.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - required mapping from prioirties to TCs.
+ */
+void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_nig_pri_tc_map_req *req);
+
+#ifndef UNUSED_HSI_FUNC
+/**
+ * @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the PRS ETS initialization requirements.
+ */
+void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req);
+#endif /* UNUSED_HSI_FUNC */
+
+#ifndef UNUSED_HSI_FUNC
+/**
+ * @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the BRB RAM initialization requirements.
+ */
+void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_brb_ram_req *req);
+#endif /* UNUSED_HSI_FUNC */
+
+/**
+ * @brief ecore_set_vxlan_no_l2_enable - enable or disable VXLAN no L2 parsing
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param enable - VXLAN no L2 enable flag.
+ */
+void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool enable);
+
+#ifndef UNUSED_HSI_FUNC
+/**
+ * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
+ * input ethType should Be called
+ * once per port.
+ *
+ * @param p_hwfn - HW device data
+ * @param ethType - etherType to configure
+ */
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+ u32 ethType);
+#endif /* UNUSED_HSI_FUNC */
+
+/**
+ * @brief ecore_set_vxlan_dest_port - initializes vxlan tunnel destination udp
+ * port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - vxlan destination udp port.
+ */
+void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 dest_port);
+
+/**
+ * @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
+ */
+void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool vxlan_enable);
+
+/**
+ * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_gre_enable - eth GRE enable enable flag.
+ * @param ip_gre_enable - IP GRE enable enable flag.
+ */
+void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool eth_gre_enable,
+ bool ip_gre_enable);
+
+/**
+ * @brief ecore_set_geneve_dest_port - initializes geneve tunnel destination
+ * udp port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - geneve destination udp port.
+ */
+void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 dest_port);
+
+/**
+ * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_geneve_enable - eth GENEVE enable enable flag.
+ * @param ip_geneve_enable - IP GENEVE enable enable flag.
+ */
+void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool eth_geneve_enable,
+ bool ip_geneve_enable);
+#ifndef UNUSED_HSI_FUNC
+
+/**
+* @brief ecore_set_gft_event_id_cm_hdr - configure GFT event id and cm header
+*
+* @param p_ptt - ptt window used for writing the registers.
+*/
+void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_gft_disable - Disable and GFT
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to disable GFT.
+ */
+void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id);
+
+/**
+ * @brief ecore_gft_config - Enable and configure HW for GFT
+*
+* @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to enable GFT.
+* @param tcp - set profile tcp packets.
+* @param udp - set profile udp packet.
+* @param ipv4 - set profile ipv4 packet.
+* @param ipv6 - set profile ipv6 packet.
+ * @param profile_type - define packet same fields. Use enum gft_profile_type.
+*/
+void ecore_gft_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4,
+ bool ipv6,
+ enum gft_profile_type profile_type);
+#endif /* UNUSED_HSI_FUNC */
+
+/**
+* @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be
+* used before first ETH queue started.
+*
+*
+* @param p_ptt - ptt window used for writing the registers. Don't care
+* if runtime_init used
+* @param mode - VF zone size mode. Use enum vf_zone_size_mode.
+* @param runtime_init - Set 1 to init runtime registers in engine phase. Set 0
+* if VF zone size mode configured after engine phase.
+*/
+void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt
+ *p_ptt, u16 mode, bool runtime_init);
+
+/**
+ * @brief ecore_get_mstorm_queue_stat_offset - Get mstorm statistics offset by
+ * VF zone size mode.
+*
+* @param stat_cnt_id - statistic counter id
+* @param vf_zone_size_mode - VF zone size mode. Use enum vf_zone_size_mode.
+*/
+u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
+ u16 stat_cnt_id, u16 vf_zone_size_mode);
+
+/**
+ * @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
+ * size mode.
+*
+* @param vf_id - vf id.
+* @param vf_queue_id - per VF rx queue id.
+* @param vf_zone_size_mode - vf zone size mode. Use enum vf_zone_size_mode.
+*/
+u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8
+ vf_queue_id, u16 vf_zone_size_mode);
+/**
+ * @brief ecore_enable_context_validation - Enable and configure context
+ * validation.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ */
+void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+/**
+ * @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
+ * session context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
+ */
+void ecore_calc_session_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size,
+ u8 ctx_type,
+ u32 cid);
+
+/**
+ * @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
+ * context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
+ */
+void ecore_calc_task_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size,
+ u8 ctx_type,
+ u32 tid);
+
+/**
+ * @brief ecore_memset_session_ctx - Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void ecore_memset_session_ctx(void *p_ctx_mem,
+ u32 ctx_size,
+ u8 ctx_type);
+/**
+ * @brief ecore_memset_task_ctx - Memset task context to 0 while preserving
+ * validation bytes.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void ecore_memset_task_ctx(void *p_ctx_mem,
+ u32 ctx_size,
+ u8 ctx_type);
+
+/**
+ * @brief ecore_update_eth_rss_ind_table_entry - Update RSS indirection table
+ * entry.
+ * The function must run in exclusive mode to prevent wrong RSS configuration.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param rss_id - RSS engine ID.
+ * @param ind_table_index - RSS indirect table index.
+ * @param ind_table_value - RSS indirect table new value.
+ */
+void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 rss_id,
+ u8 ind_table_index,
+ u16 ind_table_value);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.c
new file mode 100644
index 00000000..b7636f36
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.c
@@ -0,0 +1,621 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+/* include the precompiled configuration values - only once */
+#include "bcm_osal.h"
+#include "ecore_hsi_common.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_status.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_fw_funcs.h"
+
+#include "ecore_iro_values.h"
+#include "ecore_sriov.h"
+#include "ecore_gtt_values.h"
+#include "reg_addr.h"
+#include "ecore_init_ops.h"
+
+#define ECORE_INIT_MAX_POLL_COUNT 100
+#define ECORE_INIT_POLL_PERIOD_US 500
+
+void ecore_init_iro_array(struct ecore_dev *p_dev)
+{
+ p_dev->iro_arr = iro_arr;
+}
+
+/* Runtime configuration helpers */
+void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
+{
+ int i;
+
+ for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
+ p_hwfn->rt_data.b_valid[i] = false;
+}
+
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
+{
+ if (rt_offset >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n",
+ val, rt_offset, RUNTIME_ARRAY_SIZE);
+ return;
+ }
+
+ p_hwfn->rt_data.init_val[rt_offset] = val;
+ p_hwfn->rt_data.b_valid[rt_offset] = true;
+}
+
+void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
+ u32 rt_offset, u32 *p_val, osal_size_t size)
+{
+ osal_size_t i;
+
+ if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n",
+ rt_offset, (u32)(rt_offset + size - 1),
+ RUNTIME_ARRAY_SIZE);
+ return;
+ }
+
+ for (i = 0; i < size / sizeof(u32); i++) {
+ p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
+ p_hwfn->rt_data.b_valid[rt_offset + i] = true;
+ }
+}
+
+static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr,
+ u16 rt_offset,
+ u16 size, bool b_must_dmae)
+{
+ u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
+ bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
+ u16 i, segment;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Since not all RT entries are initialized, go over the RT and
+ * for each segment of initialized values use DMA.
+ */
+ for (i = 0; i < size; i++) {
+ if (!p_valid[i])
+ continue;
+
+ /* In case there isn't any wide-bus configuration here,
+ * simply write the data instead of using dmae.
+ */
+ if (!b_must_dmae) {
+ ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
+ continue;
+ }
+
+ /* Start of a new segment */
+ for (segment = 1; i + segment < size; segment++)
+ if (!p_valid[i + segment])
+ break;
+
+ rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (osal_uintptr_t)(p_init_val + i),
+ addr + (i << 2), segment, 0);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Jump over the entire segment, including invalid entry */
+ i += segment;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(bool) * RUNTIME_ARRAY_SIZE);
+ if (!rt_data->b_valid)
+ return ECORE_NOMEM;
+
+ rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(u32) * RUNTIME_ARRAY_SIZE);
+ if (!rt_data->init_val) {
+ OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
+ return ECORE_NOMEM;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_init_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
+}
+
+static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr,
+ u32 dmae_data_offset,
+ u32 size, const u32 *p_buf,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Perform DMAE only for lengthy enough sections or for wide-bus */
+#ifndef ASIC_ONLY
+ if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
+ !b_can_dmae || (!b_must_dmae && (size < 16))) {
+#else
+ if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+#endif
+ const u32 *data = p_buf + dmae_data_offset;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+ } else {
+ rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (osal_uintptr_t)(p_buf +
+ dmae_data_offset),
+ addr, size, 0);
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u32 fill_count)
+{
+ static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+
+ OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+ return ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (osal_uintptr_t)&zero_buffer[0],
+ addr, fill_count,
+ ECORE_DMAE_FLAG_RW_REPL_SRC);
+}
+
+static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u32 fill, u32 fill_count)
+{
+ u32 i;
+
+ for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+ ecore_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_write_op *cmd,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
+ u32 data = OSAL_LE32_TO_CPU(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ u32 offset, output_len, input_len, max_size;
+#endif
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ union init_array_hdr *hdr;
+ const u32 *array_data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 size;
+
+ array_data = p_dev->fw_data->arr_data;
+
+ hdr = (union init_array_hdr *)
+ (uintptr_t)(array_data + dmae_array_offset);
+ data = OSAL_LE32_TO_CPU(hdr->raw.data);
+ switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+ case INIT_ARR_ZIPPED:
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ offset = dmae_array_offset + 1;
+ input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+ max_size = MAX_ZIPPED_SIZE * 4;
+ OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
+
+ output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
+ (u8 *)(uintptr_t)&array_data[offset],
+ max_size,
+ (u8 *)p_hwfn->unzip_buf);
+ if (output_len) {
+ rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+ output_len,
+ p_hwfn->unzip_buf,
+ b_must_dmae, b_can_dmae);
+ } else {
+ DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
+ rc = ECORE_INVAL;
+ }
+#else
+ DP_NOTICE(p_hwfn, true,
+ "Using zipped firmware without config enabled\n");
+ rc = ECORE_INVAL;
+#endif
+ break;
+ case INIT_ARR_PATTERN:
+ {
+ u32 repeats = GET_FIELD(data,
+ INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+ u32 i;
+
+ size = GET_FIELD(data,
+ INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+ for (i = 0; i < repeats; i++, addr += size << 2) {
+ rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset +
+ 1, size, array_data,
+ b_must_dmae,
+ b_can_dmae);
+ if (rc)
+ break;
+ }
+ break;
+ }
+ case INIT_ARR_STANDARD:
+ size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+ rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ break;
+ }
+
+ return rc;
+}
+
+/* init_ops write command */
+static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_write_op *p_cmd,
+ bool b_can_dmae)
+{
+ u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
+ bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Sanitize */
+ if (b_must_dmae && !b_can_dmae) {
+ DP_NOTICE(p_hwfn, true,
+ "Need to write to %08x for Wide-bus but DMAE isn't"
+ " allowed\n",
+ addr);
+ return ECORE_INVAL;
+ }
+
+ switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+ case INIT_SRC_INLINE:
+ data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
+ ecore_wr(p_hwfn, p_ptt, addr, data);
+ break;
+ case INIT_SRC_ZEROS:
+ data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
+ if (b_must_dmae || (b_can_dmae && (data >= 64)))
+ rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
+ else
+ ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
+ break;
+ case INIT_SRC_ARRAY:
+ rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
+ b_must_dmae, b_can_dmae);
+ break;
+ case INIT_SRC_RUNTIME:
+ rc = ecore_init_rt(p_hwfn, p_ptt, addr,
+ OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
+ OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
+ b_must_dmae);
+ break;
+ }
+
+ return rc;
+}
+
+static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
+{
+ return (val == expected_val);
+}
+
+static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
+{
+ return (val & expected_val) == expected_val;
+}
+
+static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
+{
+ return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct init_read_op *cmd)
+{
+ bool (*comp_check)(u32 val, u32 expected_val);
+ u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
+ u32 data, addr, poll;
+ int i;
+
+ data = OSAL_LE32_TO_CPU(cmd->op_data);
+ addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+ poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay *= 100;
+#endif
+
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+
+ if (poll == INIT_POLL_NONE)
+ return;
+
+ switch (poll) {
+ case INIT_POLL_EQ:
+ comp_check = comp_eq;
+ break;
+ case INIT_POLL_OR:
+ comp_check = comp_or;
+ break;
+ case INIT_POLL_AND:
+ comp_check = comp_and;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+ cmd->op_data);
+ return;
+ }
+
+ data = OSAL_LE32_TO_CPU(cmd->expected_val);
+ for (i = 0;
+ i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
+ OSAL_UDELAY(delay);
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+ }
+
+ if (i == ECORE_INIT_MAX_POLL_COUNT)
+ DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
+ addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
+ OSAL_LE32_TO_CPU(cmd->op_data));
+}
+
+/* init_ops callbacks entry point */
+static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
+{
+ enum _ecore_status_t rc;
+
+ switch (p_cmd->callback_id) {
+ case DMAE_READY_CB:
+ rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n",
+ p_cmd->callback_id);
+ return ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
+ u16 *p_offset, int modes)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ const u8 *modes_tree_buf;
+ u8 arg1, arg2, tree_val;
+
+ modes_tree_buf = p_dev->fw_data->modes_tree_buf;
+ tree_val = modes_tree_buf[(*p_offset)++];
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
+ case INIT_MODE_OP_OR:
+ arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ return arg1 | arg2;
+ case INIT_MODE_OP_AND:
+ arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ return arg1 & arg2;
+ default:
+ tree_val -= MAX_INIT_MODE_OPS;
+ return (modes & (1 << tree_val)) ? 1 : 0;
+ }
+}
+
+static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
+ struct init_if_mode_op *p_cmd, int modes)
+{
+ u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
+
+ if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
+ return 0;
+ else
+ return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
+ INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
+ u32 phase, u32 phase_id)
+{
+ u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
+ u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data);
+
+ if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+ (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+ GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+ return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
+ else
+ return 0;
+}
+
+enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int phase, int phase_id, int modes)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 cmd_num, num_init_ops;
+ union init_op *init_ops;
+ bool b_dmae = false;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ num_init_ops = p_dev->fw_data->init_ops_size;
+ init_ops = p_dev->fw_data->init_ops;
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+ MAX_ZIPPED_SIZE * 4);
+ if (!p_hwfn->unzip_buf) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
+ return ECORE_NOMEM;
+ }
+#endif
+
+ for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+ union init_op *cmd = &init_ops[cmd_num];
+ u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
+
+ switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+ case INIT_OP_WRITE:
+ rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+ b_dmae);
+ break;
+
+ case INIT_OP_READ:
+ ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+ break;
+
+ case INIT_OP_IF_MODE:
+ cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
+ modes);
+ break;
+ case INIT_OP_IF_PHASE:
+ cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
+ phase_id);
+ b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
+ break;
+ case INIT_OP_DELAY:
+ /* ecore_init_run is always invoked from
+ * sleep-able context
+ */
+ OSAL_UDELAY(cmd->delay.delay);
+ break;
+
+ case INIT_OP_CALLBACK:
+ rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
+#endif
+ return rc;
+}
+
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 gtt_base;
+ u32 i;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ /* This is done by MFW on ASIC; regardless, this should only
+ * be done once per chip [i.e., common]. Implementation is
+ * not too bright, but it should work on the simple FPGA/EMUL
+ * scenarios.
+ */
+ static bool initialized;
+ int poll_cnt = 500;
+ u32 val;
+
+ /* initialize PTT/GTT (poll for completion) */
+ if (!initialized) {
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_START_INIT_PTT_GTT, 1);
+ initialized = true;
+ }
+
+ do {
+ /* ptt might be overrided by HW until this is done */
+ OSAL_UDELAY(10);
+ ecore_ptt_invalidate(p_hwfn);
+ val = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_INIT_DONE_PTT_GTT);
+ } while ((val != 1) && --poll_cnt);
+
+ if (!poll_cnt)
+ DP_ERR(p_hwfn,
+ "PGLUE_B_REG_INIT_DONE didn't complete\n");
+ }
+#endif
+
+ /* Set the global windows */
+ gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+ for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
+ if (pxp_global_win[i])
+ REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+ pxp_global_win[i]);
+}
+
+enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
+#ifdef CONFIG_ECORE_BINARY_FW
+ const u8 *fw_data)
+#else
+ const u8 OSAL_UNUSED * fw_data)
+#endif
+{
+ struct ecore_fw_data *fw = p_dev->fw_data;
+
+#ifdef CONFIG_ECORE_BINARY_FW
+ struct bin_buffer_hdr *buf_hdr;
+ u32 offset, len;
+
+ if (!fw_data) {
+ DP_NOTICE(p_dev, true, "Invalid fw data\n");
+ return ECORE_INVAL;
+ }
+
+ buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data;
+
+ offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
+ fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset));
+
+ offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+ fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset));
+
+ offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+ fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset));
+
+ offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+ fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
+ len = buf_hdr[BIN_BUF_INIT_CMD].length;
+ fw->init_ops_size = len / sizeof(struct init_raw_op);
+#else
+ fw->init_ops = (union init_op *)init_ops;
+ fw->arr_data = (u32 *)init_val;
+ fw->modes_tree_buf = (u8 *)modes_tree_buf;
+ fw->init_ops_size = init_ops_size;
+#endif
+
+ return ECORE_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.h
new file mode 100644
index 00000000..de7846d4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_INIT_OPS__
+#define __ECORE_INIT_OPS__
+
+#include "ecore.h"
+
+/**
+ * @brief ecore_init_iro_array - init iro_arr.
+ *
+ *
+ * @param p_dev
+ */
+void ecore_init_iro_array(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_init_run - Run the init-sequence.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param phase
+ * @param phase_id
+ * @param modes
+ * @return _ecore_status_t
+ */
+enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int phase,
+ int phase_id,
+ int modes);
+
+/**
+ * @brief ecore_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ *
+ *
+ * @param p_hwfn
+ *
+ * @return _ecore_status_t
+ */
+enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_hwfn_deallocate
+ *
+ *
+ * @param p_hwfn
+ */
+void ecore_init_free(struct ecore_hwfn *p_hwfn);
+
+
+/**
+ * @brief ecore_init_clear_rt_data - Clears the runtime init array.
+ *
+ *
+ * @param p_hwfn
+ */
+void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_store_rt_reg - Store a configuration value in the RT array.
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ */
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 val);
+
+#define STORE_RT_REG(hwfn, offset, val) \
+ ecore_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val) \
+ ecore_init_store_rt_reg(hwfn, offset, val)
+
+/**
+* @brief
+*
+*
+* @param p_hwfn
+* @param rt_offset
+* @param val
+* @param size
+*/
+
+void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 *val,
+ osal_size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val) \
+ ecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+
+/**
+ * @brief
+ * Initialize GTT global windows and set admin window
+ * related params of GTT/PTT to default values.
+ *
+ * @param p_hwfn
+ */
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+#endif /* __ECORE_INIT_OPS__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_int.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_int.c
new file mode 100644
index 00000000..4c271d35
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_int.c
@@ -0,0 +1,2683 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include <rte_string_fns.h>
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_spq.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_init_ops.h"
+#include "ecore_rt_defs.h"
+#include "ecore_int.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
+#include "ecore_hw_defs.h"
+#include "ecore_hsi_common.h"
+#include "ecore_mcp.h"
+
+struct ecore_pi_info {
+ ecore_int_comp_cb_t comp_cb;
+ void *cookie; /* Will be sent to the compl cb function */
+};
+
+struct ecore_sb_sp_info {
+ struct ecore_sb_info sb_info;
+ /* per protocol index data */
+ struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4];
+};
+
+enum ecore_attention_type {
+ ECORE_ATTN_TYPE_ATTN,
+ ECORE_ATTN_TYPE_PARITY,
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+struct aeu_invert_reg_bit {
+ char bit_name[30];
+
+#define ATTENTION_PARITY (1 << 0)
+
+#define ATTENTION_LENGTH_MASK (0x00000ff0)
+#define ATTENTION_LENGTH_SHIFT (4)
+#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
+ ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
+#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
+ ATTENTION_PARITY)
+
+/* Multiple bits start with this offset */
+#define ATTENTION_OFFSET_MASK (0x000ff000)
+#define ATTENTION_OFFSET_SHIFT (12)
+
+#define ATTENTION_BB_MASK (0x00700000)
+#define ATTENTION_BB_SHIFT (20)
+#define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT)
+#define ATTENTION_BB_DIFFERENT (1 << 23)
+
+#define ATTENTION_CLEAR_ENABLE (1 << 28)
+ unsigned int flags;
+
+ /* Callback to call if attention will be triggered */
+ enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
+
+ enum block_id block_index;
+};
+
+struct aeu_invert_reg {
+ struct aeu_invert_reg_bit bits[32];
+};
+
+#define MAX_ATTN_GRPS (8)
+#define NUM_ATTN_REGS (9)
+
+static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
+
+ DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
+
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000)
+#define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5)
+#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e)
+#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1)
+#define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1)
+#define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
+#define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
+static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 tmp =
+ ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_VF_DISABLED_ERROR_VALID);
+
+ /* Disabled VF access */
+ if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
+ u32 addr, data;
+
+ addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
+ data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_VF_DISABLED_ERROR_DATA);
+ DP_INFO(p_hwfn->p_dev,
+ "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
+ " Write [0x%02x] Addr [0x%08x]\n",
+ (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
+ >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
+ (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
+ >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
+ ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
+ ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
+ ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
+ addr);
+ }
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_VALID);
+ if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
+ u32 addr, data, length;
+
+ addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
+ data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_DATA);
+ length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_LENGTH);
+
+ DP_INFO(p_hwfn->p_dev,
+ "Incorrect access to %08x of length %08x - PF [%02x]"
+ " VF [%04x] [valid %02x] client [%02x] write [%02x]"
+ " Byte-Enable [%04x] [%08x]\n",
+ addr, length,
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
+ data);
+ }
+
+ /* TODO - We know 'some' of these are legal due to virtualization,
+ * but is it true for all of them?
+ */
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0)
+#define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0)
+#define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23)
+#define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24)
+#define ECORE_GRC_ATTENTION_MASTER_SHIFT (24)
+#define ECORE_GRC_ATTENTION_PF_MASK (0xf)
+#define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4)
+#define ECORE_GRC_ATTENTION_VF_SHIFT (4)
+#define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14)
+#define ECORE_GRC_ATTENTION_PRIV_SHIFT (14)
+#define ECORE_GRC_ATTENTION_PRIV_VF (0)
+static const char *grc_timeout_attn_master_to_str(u8 master)
+{
+ switch (master) {
+ case 1:
+ return "PXP";
+ case 2:
+ return "MCP";
+ case 3:
+ return "MSDM";
+ case 4:
+ return "PSDM";
+ case 5:
+ return "YSDM";
+ case 6:
+ return "USDM";
+ case 7:
+ return "TSDM";
+ case 8:
+ return "XSDM";
+ case 9:
+ return "DBU";
+ case 10:
+ return "DMAE";
+ default:
+ return "Unknown";
+ }
+}
+
+static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 tmp, tmp2;
+
+ /* We've already cleared the timeout interrupt register, so we learn
+ * of interrupts via the validity register.
+ * Any attention which is not for a timeout event is treated as fatal.
+ */
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
+ if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) {
+ rc = ECORE_INVAL;
+ goto out;
+ }
+
+ /* Read the GRC timeout information */
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
+ tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
+
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
+ tmp2, tmp,
+ (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
+ : "Read from",
+ (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
+ grc_timeout_attn_master_to_str(
+ (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
+ ECORE_GRC_ATTENTION_MASTER_SHIFT),
+ (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
+ (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
+ ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
+ ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
+ (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
+ ECORE_GRC_ATTENTION_VF_SHIFT);
+
+ /* Clean the validity bit */
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
+out:
+ return rc;
+}
+
+#define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
+#define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
+#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
+#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
+#define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
+#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
+#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
+#define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
+#define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22)
+#define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
+#define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
+#define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
+#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
+
+enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool is_hw_init)
+{
+ u32 tmp;
+ char str[512] = {0};
+
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+ if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
+ details = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS);
+ OSAL_SNPRINTF(str, 512,
+ "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
+ tmp,
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
+ 1 : 0));
+ if (is_hw_init)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str);
+ else
+ DP_NOTICE(p_hwfn, false, "%s", str);
+ }
+
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+ if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
+ details = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_TX_ERR_RD_DETAILS);
+
+ DP_NOTICE(p_hwfn, false,
+ "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
+ tmp,
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
+ 1 : 0));
+ }
+
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+ if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
+ DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp);
+
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+ if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
+ u32 addr_hi, addr_lo;
+
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
+
+ DP_NOTICE(p_hwfn, false,
+ "ICPL erorr - %08x [Address %08x:%08x]\n",
+ tmp, addr_hi, addr_lo);
+ }
+
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+ if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
+ u32 addr_hi, addr_lo, details;
+
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
+ details = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_DETAILS);
+
+ DP_NOTICE(p_hwfn, false,
+ "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
+ details, tmp, addr_hi, addr_lo);
+ }
+
+ /* Clear the indications */
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false);
+}
+
+static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
+{
+ DP_NOTICE(p_hwfn, false, "FW assertion!\n");
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
+
+ return ECORE_INVAL;
+}
+
+static enum _ecore_status_t
+ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
+{
+ DP_INFO(p_hwfn, "General attention 35!\n");
+
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
+#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
+
+#define ECORE_DB_REC_COUNT 10
+#define ECORE_DB_REC_INTERVAL 100
+
+/* assumes sticky overflow indication was set for this PF */
+static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 count = ECORE_DB_REC_COUNT;
+ u32 usage = 1;
+
+ /* wait for usage to zero or count to run out. This is necessary since
+ * EDPM doorbell transactions can take multiple 64b cycles, and as such
+ * can "split" over the pci. Possibly, the doorbell drop can happen with
+ * half an EDPM in the queue and other half dropped. Another EDPM
+ * doorbell to the same address (from doorbell recovery mechanism or
+ * from the doorbelling entity) could have first half dropped and second
+ * half interperted as continuation of the first. To prevent such
+ * malformed doorbells from reaching the device, flush the queue before
+ * releaseing the overflow sticky indication.
+ */
+ while (count-- && usage) {
+ usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
+ OSAL_UDELAY(ECORE_DB_REC_INTERVAL);
+ }
+
+ /* should have been depleted by now */
+ if (usage) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
+ ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage);
+ return ECORE_TIMEOUT;
+ }
+
+ /* flush any pedning (e)dpm as they may never arrive */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
+ /* release overflow sticky indication (stop silently dropping
+ * everything)
+ */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+
+ /* repeat all last doorbells (doorbell drop recovery) */
+ ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 int_sts, first_drop_reason, details, address, overflow,
+ all_drops_reason;
+ struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+ enum _ecore_status_t rc;
+
+ int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
+ DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n",
+ int_sts);
+
+ /* int_sts may be zero since all PFs were interrupted for doorbell
+ * overflow but another one already handled it. Can abort here. If
+ * This PF also requires overflow recovery we will be interrupted again
+ */
+ if (!int_sts)
+ return ECORE_SUCCESS;
+
+ /* check if db_drop or overflow happened */
+ if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
+ /* obtain data about db drop/overflow */
+ first_drop_reason = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_REASON) &
+ ECORE_DORQ_ATTENTION_REASON_MASK;
+ details = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS);
+ address = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS_ADDRESS);
+ overflow = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_PF_OVFL_STICKY);
+ all_drops_reason = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS_REASON);
+
+ /* log info */
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Doorbell drop occurred\n"
+ "Address\t\t0x%08x\t(second BAR address)\n"
+ "FID\t\t0x%04x\t\t(Opaque FID)\n"
+ "Size\t\t0x%04x\t\t(in bytes)\n"
+ "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
+ "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n"
+ "Overflow\t0x%x\t\t(a per PF indication)\n",
+ address,
+ GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
+ GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4,
+ first_drop_reason, all_drops_reason, overflow);
+
+ /* if this PF caused overflow, initiate recovery */
+ if (overflow) {
+ rc = ecore_db_rec_attn(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ /* clear the doorbell drop details and prepare for next drop */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
+
+ /* mark interrupt as handeld (note: even if drop was due to a
+ * different reason than overflow we mark as handled)
+ */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR,
+ DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
+
+ /* if there are no indications otherthan drop indications,
+ * success
+ */
+ if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
+ DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
+ return ECORE_SUCCESS;
+ }
+
+ /* some other indication was present - non recoverable */
+ DP_INFO(p_hwfn, "DORQ fatal attention\n");
+
+ return ECORE_INVAL;
+}
+
+static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
+ u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ TM_REG_INT_STS_1);
+
+ if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
+ TM_REG_INT_STS_1_PEND_CONN_SCAN))
+ return ECORE_INVAL;
+
+ if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
+ TM_REG_INT_STS_1_PEND_CONN_SCAN))
+ DP_INFO(p_hwfn,
+ "TM attention on emulation - most likely"
+ " results of clock-ratios\n");
+ val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
+ val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
+ TM_REG_INT_MASK_1_PEND_TASK_SCAN;
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
+
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ return ECORE_INVAL;
+}
+
+/* Instead of major changes to the data-structure, we have a some 'special'
+ * identifiers for sources that changed meaning between adapters.
+ */
+enum aeu_invert_reg_special_type {
+ AEU_INVERT_REG_SPECIAL_CNIG_0,
+ AEU_INVERT_REG_SPECIAL_CNIG_1,
+ AEU_INVERT_REG_SPECIAL_CNIG_2,
+ AEU_INVERT_REG_SPECIAL_CNIG_3,
+ AEU_INVERT_REG_SPECIAL_MAX,
+};
+
+static struct aeu_invert_reg_bit
+aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
+ {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+};
+
+/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
+static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
+ {
+ { /* After Invert 1 */
+ {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 2 */
+ {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb,
+ BLOCK_PGLUE_B},
+ {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"SW timers #%d",
+ (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
+ OSAL_NULL, MAX_BLOCK_ID},
+ {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ BLOCK_PGLCS},
+ }
+ },
+
+ {
+ { /* After Invert 3 */
+ {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 4 */
+ {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
+ ecore_fw_assertion, MAX_BLOCK_ID},
+ {"General Attention %d",
+ (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
+ OSAL_NULL, MAX_BLOCK_ID},
+ {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
+ ecore_general_attention_35, MAX_BLOCK_ID},
+ {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
+ OSAL_NULL, BLOCK_NWS},
+ {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
+ OSAL_NULL, BLOCK_NWS},
+ {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
+ OSAL_NULL, BLOCK_NWM},
+ {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
+ OSAL_NULL, BLOCK_NWM},
+ {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
+ {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+ {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
+ {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
+ {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
+ {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
+ {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
+ }
+ },
+
+ {
+ { /* After Invert 5 */
+ {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
+ {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
+ {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
+ {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
+ {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
+ {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
+ {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
+ {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
+ {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
+ {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
+ {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
+ {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
+ {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
+ {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
+ {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
+ {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
+ }
+ },
+
+ {
+ { /* After Invert 6 */
+ {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
+ {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
+ {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
+ {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
+ {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
+ {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
+ {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
+ {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
+ {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
+ {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
+ {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
+ {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
+ {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
+ {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
+ {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
+ {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
+ }
+ },
+
+ {
+ { /* After Invert 7 */
+ {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
+ {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
+ {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
+ {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
+ {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+ {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
+ {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
+ {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
+ {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
+ {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
+ {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
+ {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
+ {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
+ {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
+ {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
+ {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
+ }
+ },
+
+ {
+ { /* After Invert 8 */
+ {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
+ {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
+ {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
+ {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
+ {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
+ {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
+ {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
+ {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
+ {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
+ {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
+ {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
+ {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 9 */
+ {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
+ MAX_BLOCK_ID},
+ {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
+
+};
+
+static struct aeu_invert_reg_bit *
+ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ if (!ECORE_IS_BB(p_hwfn->p_dev))
+ return p_bit;
+
+ if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
+ return p_bit;
+
+ return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
+ ATTENTION_BB_SHIFT];
+}
+
+static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
+ ATTENTION_PARITY);
+}
+
+#define ATTN_STATE_BITS (0xfff)
+#define ATTN_BITS_MASKABLE (0x3ff)
+struct ecore_sb_attn_info {
+ /* Virtual & Physical address of the SB */
+ struct atten_status_block *sb_attn;
+ dma_addr_t sb_phys;
+
+ /* Last seen running index */
+ u16 index;
+
+ /* A mask of the AEU bits resulting in a parity error */
+ u32 parity_mask[NUM_ATTN_REGS];
+
+ /* A pointer to the attention description structure */
+ struct aeu_invert_reg *p_aeu_desc;
+
+ /* Previously asserted attentions, which are still unasserted */
+ u16 known_attn;
+
+ /* Cleanup address for the link's general hw attention */
+ u32 mfw_attn_addr;
+};
+
+static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_attn_info *p_sb_desc)
+{
+ u16 rc = 0, index;
+
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
+ if (p_sb_desc->index != index) {
+ p_sb_desc->index = index;
+ rc = ECORE_SB_ATT_IDX;
+ }
+
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ return rc;
+}
+
+/**
+ * @brief ecore_int_assertion - handles asserted attention bits
+ *
+ * @param p_hwfn
+ * @param asserted_bits newly asserted bits
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
+ u16 asserted_bits)
+{
+ struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 igu_mask;
+
+ /* Mask the source of the attention in the IGU */
+ igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
+ igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
+ igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "inner known ATTN state: 0x%04x --> 0x%04x\n",
+ sb_attn_sw->known_attn,
+ sb_attn_sw->known_attn | asserted_bits);
+ sb_attn_sw->known_attn |= asserted_bits;
+
+ /* Handle MCP events */
+ if (asserted_bits & 0x100) {
+ ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
+ /* Clean the MCP attention */
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ sb_attn_sw->mfw_attn_addr, 0);
+ }
+
+ /* FIXME - this will change once we'll have GOOD gtt definitions */
+ DIRECT_REG_WR(p_hwfn,
+ (u8 OSAL_IOMEM *) p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_SET_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
+ asserted_bits);
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
+ enum block_id id, enum dbg_attn_type type,
+ bool b_clear)
+{
+ /* @DPDK */
+ DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type);
+}
+
+/**
+ * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
+ * cause of the attention
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the attention
+ * @param aeu_en_reg - register offset of the AEU enable reg. which configured
+ * this bit to this group.
+ * @param bit_index - index of this bit in the aeu_en_reg
+ *
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t
+ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u32 aeu_en_reg,
+ const char *p_bit_name,
+ u32 bitmask)
+{
+ enum _ecore_status_t rc = ECORE_INVAL;
+ bool b_fatal = false;
+
+ DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
+ p_bit_name, bitmask);
+
+ /* Call callback before clearing the interrupt status */
+ if (p_aeu->cb) {
+ DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
+ p_bit_name);
+ rc = p_aeu->cb(p_hwfn);
+ }
+
+ if (rc != ECORE_SUCCESS)
+ b_fatal = true;
+
+ /* Print HW block interrupt registers */
+ if (p_aeu->block_index != MAX_BLOCK_ID) {
+ ecore_int_attn_print(p_hwfn, p_aeu->block_index,
+ ATTN_TYPE_INTERRUPT, !b_fatal);
+}
+
+ /* @DPDK */
+ /* Reach assertion if attention is fatal */
+ if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) {
+ DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
+ p_bit_name);
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
+ }
+
+ /* Prevent this Attention from being asserted in the future */
+ if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
+ p_hwfn->p_dev->attn_clr_en) {
+ u32 val;
+ u32 mask = ~bitmask;
+ val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
+ DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n",
+ p_bit_name);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief ecore_int_deassertion_parity - handle a single parity AEU source
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the parity
+ * @param aeu_en_reg - address of the AEU enable register
+ * @param bit_index
+ */
+static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u32 aeu_en_reg, u8 bit_index)
+{
+ u32 block_id = p_aeu->block_index, mask, val;
+
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "%s parity attention is set [address 0x%08x, bit %d]\n",
+ p_aeu->bit_name, aeu_en_reg, bit_index);
+
+ if (block_id != MAX_BLOCK_ID) {
+ ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
+
+ /* In A0, there's a single parity bit for several blocks */
+ if (block_id == BLOCK_BTB) {
+ ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
+ ATTN_TYPE_PARITY, false);
+ ecore_int_attn_print(p_hwfn, BLOCK_MCP,
+ ATTN_TYPE_PARITY, false);
+ }
+ }
+
+ /* Prevent this parity error from being re-asserted */
+ mask = ~(0x1 << bit_index);
+ val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
+ DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
+ p_aeu->bit_name);
+}
+
+/**
+ * @brief - handles deassertion of previously asserted attentions.
+ *
+ * @param p_hwfn
+ * @param deasserted_bits - newly deasserted bits
+ * @return enum _ecore_status_t
+ *
+ */
+static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
+ u16 deasserted_bits)
+{
+ struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
+ u8 i, j, k, bit_idx;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Read the attention registers in the AEU */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_AFTER_INVERT_1_IGU +
+ i * 0x4);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
+ }
+
+ /* Handle parity attentions first */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
+ u32 parities;
+
+ aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
+ en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+ parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
+
+ /* Skip register in which no parity bit is currently set */
+ if (!parities)
+ continue;
+
+ for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
+
+ if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
+ !!(parities & (1 << bit_idx)))
+ ecore_int_deassertion_parity(p_hwfn, p_bit,
+ aeu_en, bit_idx);
+
+ bit_idx += ATTENTION_LENGTH(p_bit->flags);
+ }
+ }
+
+ /* Find non-parity cause for attention and act */
+ for (k = 0; k < MAX_ATTN_GRPS; k++) {
+ struct aeu_invert_reg_bit *p_aeu;
+
+ /* Handle only groups whose attention is currently deasserted */
+ if (!(deasserted_bits & (1 << k)))
+ continue;
+
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ u32 bits;
+
+ aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32) +
+ k * sizeof(u32) * NUM_ATTN_REGS;
+ en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+ bits = aeu_inv_arr[i] & en;
+
+ /* Skip if no bit from this group is currently set */
+ if (!bits)
+ continue;
+
+ /* Find all set bits from current register which belong
+ * to current group, making them responsible for the
+ * previous assertion.
+ */
+ for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ unsigned long int bitmask;
+ u8 bit, bit_len;
+
+ /* Need to account bits with changed meaning */
+ p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
+
+ bit = bit_idx;
+ bit_len = ATTENTION_LENGTH(p_aeu->flags);
+ if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
+ /* Skip Parity */
+ bit++;
+ bit_len--;
+ }
+
+ /* Find the bits relating to HW-block, then
+ * shift so they'll become LSB.
+ */
+ bitmask = bits & (((1 << bit_len) - 1) << bit);
+ bitmask >>= bit;
+
+ if (bitmask) {
+ u32 flags = p_aeu->flags;
+ char bit_name[30];
+ u8 num;
+
+ num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
+ bit_len);
+
+ /* Some bits represent more than a
+ * a single interrupt. Correctly print
+ * their name.
+ */
+ if (ATTENTION_LENGTH(flags) > 2 ||
+ ((flags & ATTENTION_PAR_INT) &&
+ ATTENTION_LENGTH(flags) > 1))
+ OSAL_SNPRINTF(bit_name, 30,
+ p_aeu->bit_name,
+ num);
+ else
+ strlcpy(bit_name,
+ p_aeu->bit_name,
+ sizeof(bit_name));
+
+ /* We now need to pass bitmask in its
+ * correct position.
+ */
+ bitmask <<= bit;
+
+ /* Handle source of the attention */
+ ecore_int_deassertion_aeu_bit(p_hwfn,
+ p_aeu,
+ aeu_en,
+ bit_name,
+ bitmask);
+ }
+
+ bit_idx += ATTENTION_LENGTH(p_aeu->flags);
+ }
+ }
+ }
+
+ /* Clear IGU indication for the deasserted bits */
+ /* FIXME - this will change once we'll have GOOD gtt definitions */
+ DIRECT_REG_WR(p_hwfn,
+ (u8 OSAL_IOMEM *) p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits));
+
+ /* Unmask deasserted attentions in IGU */
+ aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
+
+ /* Clear deassertion from inner state */
+ sb_attn_sw->known_attn &= ~deasserted_bits;
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
+ struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
+ u16 index = 0, asserted_bits, deasserted_bits;
+ u32 attn_bits = 0, attn_acks = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Read current attention bits/acks - safeguard against attentions
+ * by guaranting work on a synchronized timeframe
+ */
+ do {
+ index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
+ attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
+ attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
+ } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
+ p_sb_attn->sb_index = index;
+
+ /* Attention / Deassertion are meaningful (and in correct state)
+ * only when they differ and consistent with known state - deassertion
+ * when previous attention & current ack, and assertion when current
+ * attention with no previous attention
+ */
+ asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
+ ~p_sb_attn_sw->known_attn;
+ deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
+ p_sb_attn_sw->known_attn;
+
+ if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
+ DP_INFO(p_hwfn,
+ "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
+ index, attn_bits, attn_acks, asserted_bits,
+ deasserted_bits, p_sb_attn_sw->known_attn);
+ else if (asserted_bits == 0x100)
+ DP_INFO(p_hwfn, "MFW indication via attention\n");
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "MFW indication [deassertion]\n");
+
+ if (asserted_bits) {
+ rc = ecore_int_assertion(p_hwfn, asserted_bits);
+ if (rc)
+ return rc;
+ }
+
+ if (deasserted_bits)
+ rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
+
+ return rc;
+}
+
+static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM *igu_addr, u32 ack_cons)
+{
+ struct igu_prod_cons_update igu_ack = { 0 };
+
+ igu_ack.sb_id_and_flags =
+ ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_ATTN <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+ DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
+
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ OSAL_MMIOWB(p_hwfn->p_dev);
+ OSAL_BARRIER(p_hwfn->p_dev);
+}
+
+void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
+{
+ struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
+ struct ecore_pi_info *pi_info = OSAL_NULL;
+ struct ecore_sb_attn_info *sb_attn;
+ struct ecore_sb_info *sb_info;
+ int arr_size;
+ u16 rc = 0;
+
+ if (!p_hwfn)
+ return;
+
+ if (!p_hwfn->p_sp_sb) {
+ DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
+ return;
+ }
+
+ sb_info = &p_hwfn->p_sp_sb->sb_info;
+ arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+ if (!sb_info) {
+ DP_ERR(p_hwfn->p_dev,
+ "Status block is NULL - cannot ack interrupts\n");
+ return;
+ }
+
+ if (!p_hwfn->p_sb_attn) {
+ DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
+ return;
+ }
+ sb_attn = p_hwfn->p_sb_attn;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+ p_hwfn, p_hwfn->my_id);
+
+ /* Disable ack for def status block. Required both for msix +
+ * inta in non-mask mode, in inta does no harm.
+ */
+ ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+ /* Gather Interrupts/Attentions information */
+ if (!sb_info->sb_virt) {
+ DP_ERR(p_hwfn->p_dev,
+ "Interrupt Status block is NULL -"
+ " cannot check for new interrupts!\n");
+ } else {
+ u32 tmp_index = sb_info->sb_ack;
+ rc = ecore_sb_update_sb_idx(sb_info);
+ DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
+ "Interrupt indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_info->sb_ack);
+ }
+
+ if (!sb_attn || !sb_attn->sb_attn) {
+ DP_ERR(p_hwfn->p_dev,
+ "Attentions Status block is NULL -"
+ " cannot check for new attentions!\n");
+ } else {
+ u16 tmp_index = sb_attn->index;
+
+ rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
+ DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
+ "Attention indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_attn->index);
+ }
+
+ /* Check if we expect interrupts at this time. if not just ack them */
+ if (!(rc & ECORE_SB_EVENT_MASK)) {
+ ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+/* Check the validity of the DPC ptt. If not ack interrupts and fail */
+
+ if (!p_hwfn->p_dpc_ptt) {
+ DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
+ ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ if (rc & ECORE_SB_ATT_IDX)
+ ecore_int_attentions(p_hwfn);
+
+ if (rc & ECORE_SB_IDX) {
+ int pi;
+
+ /* Since we only looked at the SB index, it's possible more
+ * than a single protocol-index on the SB incremented.
+ * Iterate over all configured protocol indices and check
+ * whether something happened for each.
+ */
+ for (pi = 0; pi < arr_size; pi++) {
+ pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+ if (pi_info->comp_cb != OSAL_NULL)
+ pi_info->comp_cb(p_hwfn, pi_info->cookie);
+ }
+ }
+
+ if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
+ /* This should be done before the interrupts are enabled,
+ * since otherwise a new attention will be generated.
+ */
+ ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+ }
+
+ ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_attn) {
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
+ p_sb->sb_phys,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn));
+ }
+ OSAL_FREE(p_hwfn->p_dev, p_sb);
+}
+
+static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+ OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
+
+ sb_info->index = 0;
+ sb_info->known_attn = 0;
+
+ /* Configure Attention Status Block in IGU */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
+ DMA_LO(p_hwfn->p_sb_attn->sb_phys));
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
+ DMA_HI(p_hwfn->p_sb_attn->sb_phys));
+}
+
+static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *sb_virt_addr, dma_addr_t sb_phy_addr)
+{
+ struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+ int i, j, k;
+
+ sb_info->sb_attn = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ /* Set the pointer to the AEU descriptors */
+ sb_info->p_aeu_desc = aeu_descs;
+
+ /* Calculate Parity Masks */
+ OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ /* j is array index, k is bit index */
+ for (j = 0, k = 0; k < 32; j++) {
+ struct aeu_invert_reg_bit *p_aeu;
+
+ p_aeu = &aeu_descs[i].bits[j];
+ if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
+ sb_info->parity_mask[i] |= 1 << k;
+
+ k += ATTENTION_LENGTH(p_aeu->flags);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Attn Mask [Reg %d]: 0x%08x\n",
+ i, sb_info->parity_mask[i]);
+ }
+
+ /* Set the address of cleanup for the mcp attention */
+ sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
+ MISC_REG_AEU_GENERAL_ATTN_0;
+
+ ecore_int_sb_attn_setup(p_hwfn, p_ptt);
+}
+
+static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_sb_attn_info *p_sb;
+ dma_addr_t p_phys = 0;
+ void *p_virt;
+
+ /* SB struct */
+ p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
+ if (!p_sb) {
+ DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* SB ring */
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn));
+ if (!p_virt) {
+ DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n");
+ OSAL_FREE(p_dev, p_sb);
+ return ECORE_NOMEM;
+ }
+
+ /* Attention setup */
+ p_hwfn->p_sb_attn = p_sb;
+ ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
+
+ return ECORE_SUCCESS;
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#define ECORE_CAU_DEF_RX_USECS 24
+#define ECORE_CAU_DEF_TX_USECS 48
+
+void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id, u16 vf_number, u8 vf_valid)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 cau_state;
+ u8 timer_res;
+
+ OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+ cau_state = CAU_HC_DISABLE_STATE;
+
+ if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
+ cau_state = CAU_HC_ENABLE_STATE;
+ if (!p_dev->rx_coalesce_usecs)
+ p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
+ if (!p_dev->tx_coalesce_usecs)
+ p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
+ }
+
+ /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
+ if (p_dev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_dev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ if (p_dev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_dev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+}
+
+static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 igu_sb_id, u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ struct cau_pi_entry pi_entry;
+ u32 sb_offset, pi_offset;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return;/* @@@TBD MichalK- VF CAU... */
+
+ sb_offset = igu_sb_id * PIS_PER_SB_E4;
+ OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+ if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+ else
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+ pi_offset = sb_offset + pi_index;
+ if (p_hwfn->hw_init_done) {
+ ecore_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+ *((u32 *)&(pi_entry)));
+ } else {
+ STORE_RT_REG(p_hwfn,
+ CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+ *((u32 *)&(pi_entry)));
+ }
+}
+
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb, u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
+ pi_index, coalescing_fsm, timeset);
+}
+
+void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t sb_phys, u16 igu_sb_id,
+ u16 vf_number, u8 vf_valid)
+{
+ struct cau_sb_entry sb_entry;
+
+ ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+ vf_number, vf_valid);
+
+ if (p_hwfn->hw_init_done) {
+ /* Wide-bus, initialize via DMAE */
+ u64 phys_addr = (u64)sb_phys;
+
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&phys_addr,
+ CAU_REG_SB_ADDR_MEMORY +
+ igu_sb_id * sizeof(u64), 2, 0);
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ igu_sb_id * sizeof(u64), 2, 0);
+ } else {
+ /* Initialize Status Block Address */
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2, sb_phys);
+
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2, sb_entry);
+ }
+
+ /* Configure pi coalescing if set */
+ if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
+ /* eth will open queues for all tcs, so configure all of them
+ * properly, rather than just the active ones
+ */
+ u8 num_tc = p_hwfn->hw_info.num_hw_tc;
+
+ u8 timeset, timer_res;
+ u8 i;
+
+ /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
+ if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
+ _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+ ECORE_COAL_RX_STATE_MACHINE,
+ timeset);
+
+ if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
+ for (i = 0; i < num_tc; i++) {
+ _ecore_int_cau_conf_pi(p_hwfn, p_ptt,
+ igu_sb_id, TX_PI(i),
+ ECORE_COAL_TX_STATE_MACHINE,
+ timeset);
+ }
+ }
+}
+
+void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
+{
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ if (IS_PF(p_hwfn->p_dev))
+ ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+ sb_info->igu_sb_id, 0, 0);
+}
+
+struct ecore_igu_block *
+ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf)
+{
+ struct ecore_igu_block *p_block;
+ u16 igu_id;
+
+ for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_id++) {
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+ !(p_block->status & ECORE_IGU_STATUS_FREE))
+ continue;
+
+ if (!!(p_block->status & ECORE_IGU_STATUS_PF) ==
+ b_is_pf)
+ return p_block;
+ }
+
+ return OSAL_NULL;
+}
+
+static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn,
+ u16 vector_id)
+{
+ struct ecore_igu_block *p_block;
+ u16 igu_id;
+
+ for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_id++) {
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+ !p_block->is_pf ||
+ p_block->vector_number != vector_id)
+ continue;
+
+ return igu_id;
+ }
+
+ return ECORE_SB_INVALID_IDX;
+}
+
+u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+{
+ u16 igu_sb_id;
+
+ /* Assuming continuous set of IGU SBs dedicated for given PF */
+ if (sb_id == ECORE_SP_SB_ID)
+ igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ else if (IS_PF(p_hwfn->p_dev))
+ igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
+ else
+ igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
+
+ if (igu_sb_id == ECORE_SB_INVALID_IDX)
+ DP_NOTICE(p_hwfn, true,
+ "Slowpath SB vector %04x doesn't exist\n",
+ sb_id);
+ else if (sb_id == ECORE_SP_SB_ID)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
+
+ return igu_sb_id;
+}
+
+enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr, u16 sb_id)
+{
+ sb_info->sb_virt = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
+
+ if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
+ return ECORE_INVAL;
+
+ /* Let the igu info reference the client's SB info */
+ if (sb_id != ECORE_SP_SB_ID) {
+ if (IS_PF(p_hwfn->p_dev)) {
+ struct ecore_igu_info *p_info;
+ struct ecore_igu_block *p_block;
+
+ p_info = p_hwfn->hw_info.p_igu_info;
+ p_block = &p_info->entry[sb_info->igu_sb_id];
+
+ p_block->sb_info = sb_info;
+ p_block->status &= ~ECORE_IGU_STATUS_FREE;
+ p_info->usage.free_cnt--;
+ } else {
+ ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
+ }
+ }
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+ sb_info->p_hwfn = p_hwfn;
+#endif
+ sb_info->p_dev = p_hwfn->p_dev;
+
+ /* The igu address will hold the absolute address that needs to be
+ * written to for a specific status block
+ */
+ if (IS_PF(p_hwfn->p_dev)) {
+ sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
+
+ } else {
+ sb_info->igu_addr =
+ (u8 OSAL_IOMEM *)p_hwfn->regview +
+ PXP_VF_BAR0_START_IGU +
+ ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
+ }
+
+ sb_info->flags |= ECORE_SB_INFO_INIT;
+
+ ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_info *sb_info,
+ u16 sb_id)
+{
+ struct ecore_igu_info *p_info;
+ struct ecore_igu_block *p_block;
+
+ if (sb_info == OSAL_NULL)
+ return ECORE_SUCCESS;
+
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
+ return ECORE_SUCCESS;
+ }
+
+ p_info = p_hwfn->hw_info.p_igu_info;
+ p_block = &p_info->entry[sb_info->igu_sb_id];
+
+ /* Vector 0 is reserved to Default SB */
+ if (p_block->vector_number == 0) {
+ DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+ return ECORE_INVAL;
+ }
+
+ /* Lose reference to client's SB info, and fix counters */
+ p_block->sb_info = OSAL_NULL;
+ p_block->status |= ECORE_IGU_STATUS_FREE;
+ p_info->usage.free_cnt++;
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_info.sb_virt) {
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_sb->sb_info.sb_virt,
+ p_sb->sb_info.sb_phys,
+ SB_ALIGNED_SIZE(p_hwfn));
+ }
+
+ OSAL_FREE(p_hwfn->p_dev, p_sb);
+}
+
+static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_sb_sp_info *p_sb;
+ dma_addr_t p_phys = 0;
+ void *p_virt;
+
+ /* SB struct */
+ p_sb =
+ OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*p_sb));
+ if (!p_sb) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* SB ring */
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_phys, SB_ALIGNED_SIZE(p_hwfn));
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n");
+ OSAL_FREE(p_hwfn->p_dev, p_sb);
+ return ECORE_NOMEM;
+ }
+
+ /* Status Block setup */
+ p_hwfn->p_sp_sb = p_sb;
+ ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
+ p_virt, p_phys, ECORE_SP_SB_ID);
+
+ OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
+ ecore_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx, __le16 **p_fw_cons)
+{
+ struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ enum _ecore_status_t rc = ECORE_NOMEM;
+ u8 pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+ if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
+ continue;
+
+ p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+ p_sp_sb->pi_info_arr[pi].cookie = cookie;
+ *sb_idx = pi;
+ *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+ rc = ECORE_SUCCESS;
+ break;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
+{
+ struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+
+ if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
+ p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
+ return ECORE_SUCCESS;
+}
+
+u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_int_mode int_mode)
+{
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
+ igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
+ }
+#endif
+
+ p_hwfn->p_dev->int_mode = int_mode;
+ switch (p_hwfn->p_dev->int_mode) {
+ case ECORE_INT_MODE_INTA:
+ igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case ECORE_INT_MODE_MSI:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case ECORE_INT_MODE_MSIX:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ break;
+ case ECORE_INT_MODE_POLL:
+ break;
+ }
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn,
+ "FPGA - Don't enable Attentions in IGU and MISC\n");
+ return;
+ }
+#endif
+
+ /* Configure AEU signal change to produce attentions */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
+
+ /* Flush the writes to IGU */
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ /* Unmask AEU signals toward IGU */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+enum _ecore_status_t
+ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_int_mode int_mode)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ ecore_int_igu_enable_attn(p_hwfn, p_ptt);
+
+ if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
+ rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Slowpath IRQ request failed\n");
+ return ECORE_NORESOURCES;
+ }
+ p_hwfn->b_int_requested = true;
+ }
+
+ /* Enable interrupt Generation */
+ ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+
+ p_hwfn->b_int_enabled = 1;
+
+ return rc;
+}
+
+void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ p_hwfn->b_int_enabled = 0;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return;
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH (1000)
+static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 igu_sb_id,
+ bool cleanup_set,
+ u16 opaque_fid)
+{
+ u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
+ u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
+ u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+ u8 type = 0; /* FIXME MichalS type??? */
+
+ OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
+ IGU_REG_CLEANUP_STATUS_0) != 0x200);
+
+ /* USE Control Command Register to perform cleanup. There is an
+ * option to do this using IGU bar, but then it can't be used for VFs.
+ */
+
+ /* Set the data field */
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
+ SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+ /* Set the control register */
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+ OSAL_BARRIER(p_hwfn->p_dev);
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+ /* Flush the write to IGU */
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ /* calculate where to read the status bit from */
+ sb_bit = 1 << (igu_sb_id % 32);
+ sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
+
+ sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
+
+ /* Now wait for the command to complete */
+ while (--sleep_cnt) {
+ val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
+ if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+ break;
+ OSAL_MSLEEP(5);
+ }
+
+ if (!sleep_cnt)
+ DP_NOTICE(p_hwfn, true,
+ "Timeout waiting for clear status 0x%08x [for sb %d]\n",
+ val, igu_sb_id);
+}
+
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 igu_sb_id, u16 opaque, bool b_set)
+{
+ struct ecore_igu_block *p_block;
+ int pi, i;
+
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
+ igu_sb_id, p_block->function_id, p_block->is_pf,
+ p_block->vector_number);
+
+ /* Set */
+ if (b_set)
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
+
+ /* Clear */
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
+
+ /* Wait for the IGU SB to cleanup */
+ for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
+ u32 val;
+
+ val = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_WRITE_DONE_PENDING +
+ ((igu_sb_id / 32) * 4));
+ if (val & (1 << (igu_sb_id % 32)))
+ OSAL_UDELAY(10);
+ else
+ break;
+ }
+ if (i == IGU_CLEANUP_SLEEP_LENGTH)
+ DP_NOTICE(p_hwfn, true,
+ "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
+ igu_sb_id);
+
+ /* Clear the CAU for the SB */
+ for (pi = 0; pi < 12; pi++)
+ ecore_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
+}
+
+void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_set, bool b_slowpath)
+{
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct ecore_igu_block *p_block;
+ u16 igu_sb_id = 0;
+ u32 val = 0;
+
+ /* @@@TBD MichalK temporary... should be moved to init-tool... */
+ val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+ val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+ val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+ /* end temporary */
+
+ for (igu_sb_id = 0;
+ igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+ !p_block->is_pf ||
+ (p_block->status & ECORE_IGU_STATUS_DSB))
+ continue;
+
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+ }
+
+ if (b_slowpath)
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ p_info->igu_dsb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+}
+
+int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct ecore_igu_block *p_block;
+ int pf_sbs, vf_sbs;
+ u16 igu_sb_id;
+ u32 val, rval;
+
+ if (!RESC_NUM(p_hwfn, ECORE_SB)) {
+ /* We're using an old MFW - have to prevent any switching
+ * of SBs between PF and VFs as later driver wouldn't be
+ * able to tell which belongs to which.
+ */
+ p_info->b_allow_pf_vf_change = false;
+ } else {
+ /* Use the numbers the MFW have provided -
+ * don't forget MFW accounts for the default SB as well.
+ */
+ p_info->b_allow_pf_vf_change = true;
+
+ if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) {
+ DP_INFO(p_hwfn,
+ "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
+ RESC_NUM(p_hwfn, ECORE_SB) - 1,
+ p_info->usage.cnt);
+ p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1;
+ }
+
+ /* TODO - how do we learn about VF SBs from MFW? */
+ if (IS_PF_SRIOV(p_hwfn)) {
+ u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ if (vfs != p_info->usage.iov_cnt)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
+ p_info->usage.iov_cnt, vfs);
+
+ /* At this point we know how many SBs we have totally
+ * in IGU + number of PF SBs. So we can validate that
+ * we'd have sufficient for VF.
+ */
+ if (vfs > p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov -
+ p_info->usage.cnt) {
+ DP_NOTICE(p_hwfn, true,
+ "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
+ p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov,
+ p_info->usage.cnt, vfs);
+ return ECORE_INVAL;
+ }
+ }
+ }
+
+ /* Cap the number of VFs SBs by the number of VFs */
+ if (IS_PF_SRIOV(p_hwfn))
+ p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ /* Mark all SBs as free, now in the right PF/VFs division */
+ p_info->usage.free_cnt = p_info->usage.cnt;
+ p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
+ p_info->usage.orig = p_info->usage.cnt;
+ p_info->usage.iov_orig = p_info->usage.iov_cnt;
+
+ /* We now proceed to re-configure the IGU cam to reflect the initial
+ * configuration. We can start with the Default SB.
+ */
+ pf_sbs = p_info->usage.cnt;
+ vf_sbs = p_info->usage.iov_cnt;
+
+ for (igu_sb_id = p_info->igu_dsb_id;
+ igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
+ val = 0;
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID))
+ continue;
+
+ if (p_block->status & ECORE_IGU_STATUS_DSB) {
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = 0;
+ p_block->status = ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_PF |
+ ECORE_IGU_STATUS_DSB;
+ } else if (pf_sbs) {
+ pf_sbs--;
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = p_info->usage.cnt - pf_sbs;
+ p_block->status = ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_PF |
+ ECORE_IGU_STATUS_FREE;
+ } else if (vf_sbs) {
+ p_block->function_id =
+ p_hwfn->p_dev->p_iov_info->first_vf_in_pf +
+ p_info->usage.iov_cnt - vf_sbs;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ p_block->status = ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_FREE;
+ vf_sbs--;
+ } else {
+ p_block->function_id = 0;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ }
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
+ p_block->function_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
+ p_block->vector_number);
+
+ /* VF entries would be enabled when VF is initializaed */
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
+
+ rval = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * igu_sb_id);
+
+ if (rval != val) {
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * igu_sb_id,
+ val);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
+ igu_sb_id, p_block->function_id,
+ p_block->is_pf, p_block->vector_number,
+ rval, val);
+ }
+ }
+
+ return 0;
+}
+
+int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage;
+
+ /* Return all the usage indications to default prior to the reset;
+ * The reset expects the !orig to reflect the initial status of the
+ * SBs, and would re-calculate the originals based on those.
+ */
+ p_cnt->cnt = p_cnt->orig;
+ p_cnt->free_cnt = p_cnt->orig;
+ p_cnt->iov_cnt = p_cnt->iov_orig;
+ p_cnt->free_cnt_iov = p_cnt->iov_orig;
+ p_cnt->orig = 0;
+ p_cnt->iov_orig = 0;
+
+ /* TODO - we probably need to re-configure the CAU as well... */
+ return ecore_int_igu_reset_cam(p_hwfn, p_ptt);
+}
+
+static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 igu_sb_id)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
+ struct ecore_igu_block *p_block;
+
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
+
+ /* Fill the block information */
+ p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+ p_block->igu_sb_id = igu_sb_id;
+}
+
+enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_igu_info *p_igu_info;
+ struct ecore_igu_block *p_block;
+ u32 min_vf = 0, max_vf = 0;
+ u16 igu_sb_id;
+
+ p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev,
+ GFP_KERNEL,
+ sizeof(*p_igu_info));
+ if (!p_hwfn->hw_info.p_igu_info)
+ return ECORE_NOMEM;
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Distinguish between existent and onn-existent default SB */
+ p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX;
+
+ /* Find the range of VF ids whose SB belong to this PF */
+ if (p_hwfn->p_dev->p_iov_info) {
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+
+ min_vf = p_iov->first_vf_in_pf;
+ max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
+ }
+
+ for (igu_sb_id = 0;
+ igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_sb_id++) {
+ /* Read current entry; Notice it might not belong to this PF */
+ ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
+ p_block = &p_igu_info->entry[igu_sb_id];
+
+ if ((p_block->is_pf) &&
+ (p_block->function_id == p_hwfn->rel_pf_id)) {
+ p_block->status = ECORE_IGU_STATUS_PF |
+ ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_FREE;
+
+ if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
+ p_igu_info->usage.cnt++;
+ } else if (!(p_block->is_pf) &&
+ (p_block->function_id >= min_vf) &&
+ (p_block->function_id < max_vf)) {
+ /* Available for VFs of this PF */
+ p_block->status = ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_FREE;
+
+ if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
+ p_igu_info->usage.iov_cnt++;
+ }
+
+ /* Mark the First entry belonging to the PF or its VFs
+ * as the default SB [we'll reset IGU prior to first usage].
+ */
+ if ((p_block->status & ECORE_IGU_STATUS_VALID) &&
+ (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) {
+ p_igu_info->igu_dsb_id = igu_sb_id;
+ p_block->status |= ECORE_IGU_STATUS_DSB;
+ }
+
+ /* While this isn't suitable for all clients, limit number
+ * of prints by having each PF print only its entries with the
+ * exception of PF0 which would print everything.
+ */
+ if ((p_block->status & ECORE_IGU_STATUS_VALID) ||
+ (p_hwfn->abs_pf_id == 0))
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+ igu_sb_id, p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+ }
+
+ if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) {
+ DP_NOTICE(p_hwfn, true,
+ "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
+ p_igu_info->igu_dsb_id);
+ return ECORE_INVAL;
+ }
+
+ /* All non default SB are considered free at this point */
+ p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
+ p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
+ p_igu_info->igu_dsb_id, p_igu_info->usage.cnt,
+ p_igu_info->usage.iov_cnt);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u16 sb_id, bool b_to_vf)
+{
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct ecore_igu_block *p_block = OSAL_NULL;
+ u16 igu_sb_id = 0, vf_num = 0;
+ u32 val = 0;
+
+ if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn))
+ return ECORE_INVAL;
+
+ if (sb_id == ECORE_SP_SB_ID)
+ return ECORE_INVAL;
+
+ if (!p_info->b_allow_pf_vf_change) {
+ DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n");
+ return ECORE_INVAL;
+ }
+
+ /* If we're moving a SB from PF to VF, the client had to specify
+ * which vector it wants to move.
+ */
+ if (b_to_vf) {
+ igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
+ if (igu_sb_id == ECORE_SB_INVALID_IDX)
+ return ECORE_INVAL;
+ }
+
+ /* If we're moving a SB from VF to PF, need to validate there isn't
+ * already a line configured for that vector.
+ */
+ if (!b_to_vf) {
+ if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
+ ECORE_SB_INVALID_IDX)
+ return ECORE_INVAL;
+ }
+
+ /* We need to validate that the SB can actually be relocated.
+ * This would also handle the previous case where we've explicitly
+ * stated which IGU SB needs to move.
+ */
+ for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+ !(p_block->status & ECORE_IGU_STATUS_FREE) ||
+ (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) {
+ if (b_to_vf)
+ return ECORE_INVAL;
+ else
+ continue;
+ }
+
+ break;
+ }
+
+ if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
+ "Failed to find a free SB to move\n");
+ return ECORE_INVAL;
+ }
+
+ /* At this point, p_block points to the SB we want to relocate */
+ if (b_to_vf) {
+ p_block->status &= ~ECORE_IGU_STATUS_PF;
+
+ /* It doesn't matter which VF number we choose, since we're
+ * going to disable the line; But let's keep it in range.
+ */
+ vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
+
+ p_block->function_id = (u8)vf_num;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+
+ p_info->usage.cnt--;
+ p_info->usage.free_cnt--;
+ p_info->usage.iov_cnt++;
+ p_info->usage.free_cnt_iov++;
+
+ /* TODO - if SBs aren't really the limiting factor,
+ * then it might not be accurate [in the since that
+ * we might not need decrement the feature].
+ */
+ p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--;
+ p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++;
+ } else {
+ p_block->status |= ECORE_IGU_STATUS_PF;
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = sb_id + 1;
+
+ p_info->usage.cnt++;
+ p_info->usage.free_cnt++;
+ p_info->usage.iov_cnt--;
+ p_info->usage.free_cnt_iov--;
+
+ p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++;
+ p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
+ }
+
+ /* Update the IGU and CAU with the new configuration */
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
+ p_block->function_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
+ p_block->vector_number);
+
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
+ val);
+
+ ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0,
+ igu_sb_id, vf_num,
+ p_block->is_pf ? 0 : 1);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+ igu_sb_id, p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+
+ return ECORE_SUCCESS;
+}
+
+/**
+ * @brief Initialize igu runtime registers
+ *
+ * @param p_hwfn
+ */
+void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
+{
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
+
+ STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+#define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
+ IGU_CMD_INT_ACK_BASE)
+#define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
+ IGU_CMD_INT_ACK_BASE)
+u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
+{
+ u32 intr_status_hi = 0, intr_status_lo = 0;
+ u64 intr_status = 0;
+
+ intr_status_lo = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ LSB_IGU_CMD_ADDR * 8);
+ intr_status_hi = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ MSB_IGU_CMD_ADDR * 8);
+ intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
+
+ return intr_status;
+}
+
+static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
+ p_hwfn->b_sp_dpc_enabled = true;
+}
+
+static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
+{
+ p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
+ if (!p_hwfn->sp_dpc)
+ return ECORE_NOMEM;
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
+}
+
+enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ rc = ecore_int_sp_dpc_alloc(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
+ return rc;
+ }
+
+ rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
+ return rc;
+ }
+
+ rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
+
+ return rc;
+}
+
+void ecore_int_free(struct ecore_hwfn *p_hwfn)
+{
+ ecore_int_sp_sb_free(p_hwfn);
+ ecore_int_sb_attn_free(p_hwfn);
+ ecore_int_sp_dpc_free(p_hwfn);
+}
+
+void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
+ return;
+
+ ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+ ecore_int_sb_attn_setup(p_hwfn, p_ptt);
+ ecore_int_sp_dpc_setup(p_hwfn);
+}
+
+void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_cnt_info *p_sb_cnt_info)
+{
+ struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ if (!p_igu_info || !p_sb_cnt_info)
+ return;
+
+ OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage,
+ sizeof(*p_sb_cnt_info));
+}
+
+void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
+{
+ int i;
+
+ for_each_hwfn(p_dev, i)
+ p_dev->hwfns[i].b_int_requested = false;
+}
+
+void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
+{
+ p_dev->attn_clr_en = clr_enable;
+}
+
+enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx)
+{
+ struct cau_sb_entry sb_entry;
+ enum _ecore_status_t rc;
+
+ if (!p_hwfn->hw_init_done) {
+ DP_ERR(p_hwfn, "hardware not initialized yet\n");
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64),
+ (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ if (tx)
+ SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+ else
+ SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64), 2, 0);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ struct ecore_sb_info_dbg *p_info)
+{
+ u16 sbid = p_sb->igu_sb_id;
+ int i;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_PRODUCER_MEMORY + sbid * 4);
+ p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_CONSUMER_MEM + sbid * 4);
+
+ for (i = 0; i < PIS_PER_SB_E4; i++)
+ p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY +
+ sbid * 4 * PIS_PER_SB_E4 +
+ i * 4);
+
+ return ECORE_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_int.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_int.h
new file mode 100644
index 00000000..041240d7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_int.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_INT_H__
+#define __ECORE_INT_H__
+
+#include "ecore.h"
+#include "ecore_int_api.h"
+
+#define ECORE_CAU_DEF_RX_TIMER_RES 0
+#define ECORE_CAU_DEF_TX_TIMER_RES 0
+
+#define ECORE_SB_ATT_IDX 0x0001
+#define ECORE_SB_EVENT_MASK 0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
+
+#define ECORE_SB_INVALID_IDX 0xffff
+
+struct ecore_igu_block {
+ u8 status;
+#define ECORE_IGU_STATUS_FREE 0x01
+#define ECORE_IGU_STATUS_VALID 0x02
+#define ECORE_IGU_STATUS_PF 0x04
+#define ECORE_IGU_STATUS_DSB 0x08
+
+ u8 vector_number;
+ u8 function_id;
+ u8 is_pf;
+
+ /* Index inside IGU [meant for back reference] */
+ u16 igu_sb_id;
+
+ struct ecore_sb_info *sb_info;
+};
+
+struct ecore_igu_info {
+ struct ecore_igu_block entry[MAX_TOT_SB_PER_PATH];
+ u16 igu_dsb_id;
+
+ /* The numbers can shift when using APIs to switch SBs between PF and
+ * VF.
+ */
+ struct ecore_sb_cnt_info usage;
+
+ /* Determine whether we can shift SBs between VFs and PFs */
+ bool b_allow_pf_vf_change;
+};
+
+/**
+ * @brief - Make sure the IGU CAM reflects the resources provided by MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Make sure IGU CAM reflects the default resources once again,
+ * starting with a 'dirty' SW database.
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Translate the weakly-defined client sb-id into an IGU sb-id
+ *
+ * @param p_hwfn
+ * @param sb_id - user provided sb_id
+ *
+ * @return an index inside IGU CAM where the SB resides
+ */
+u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief return a pointer to an unused valid SB
+ *
+ * @param p_hwfn
+ * @param b_is_pf - true iff we want a SB belonging to a PF
+ *
+ * @return point to an igu_block, OSAL_NULL if none is available
+ */
+struct ecore_igu_block *
+ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf);
+/* TODO Names of function may change... */
+void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_set, bool b_slowpath);
+
+void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_int_igu_read_cam - Reads the IGU CAM.
+ * This function needs to be called during hardware
+ * prepare. It reads the info from igu cam to know which
+ * status block is the default / base status block etc.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+typedef enum _ecore_status_t (*ecore_int_comp_cb_t) (struct ecore_hwfn *p_hwfn,
+ void *cookie);
+/**
+ * @brief ecore_int_register_cb - Register callback func for
+ * slowhwfn statusblock.
+ *
+ * Every protocol that uses the slowhwfn status block
+ * should register a callback function that will be called
+ * once there is an update of the sp status block.
+ *
+ * @param p_hwfn
+ * @param comp_cb - function to be called when there is an
+ * interrupt on the sp sb
+ *
+ * @param cookie - passed to the callback function
+ * @param sb_idx - OUT parameter which gives the chosen index
+ * for this protocol.
+ * @param p_fw_cons - pointer to the actual address of the
+ * consumer for this protocol.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
+ ecore_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx, __le16 **p_fw_cons);
+/**
+ * @brief ecore_int_unregister_cb - Unregisters callback
+ * function from sp sb.
+ * Partner of ecore_int_register_cb -> should be called
+ * when no longer required.
+ *
+ * @param p_hwfn
+ * @param pi
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi);
+
+/**
+ * @brief ecore_int_get_sp_sb_id - Get the slowhwfn sb id.
+ *
+ * @param p_hwfn
+ *
+ * @return u16
+ */
+u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ * block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - igu status block id
+ * @param opaque - opaque fid of the sb owner.
+ * @param cleanup_set - set(1) / clear(0)
+ */
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 sb_id,
+ u16 opaque,
+ bool b_set);
+
+/**
+ * @brief ecore_int_cau_conf - configure cau for a given status
+ * block
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param sb_phys
+ * @param igu_sb_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id, u16 vf_number, u8 vf_valid);
+
+/**
+* @brief ecore_int_alloc
+*
+* @param p_hwfn
+ * @param p_ptt
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+* @brief ecore_int_free
+*
+* @param p_hwfn
+*/
+void ecore_int_free(struct ecore_hwfn *p_hwfn);
+
+/**
+* @brief ecore_int_setup
+*
+* @param p_hwfn
+* @param p_ptt
+*/
+void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Enable Interrupt & Attention for hw function
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode
+ *
+* @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_igu_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_int_mode int_mode);
+
+/**
+ * @brief - Initialize CAU status block entry
+ *
+ * @param p_hwfn
+ * @param p_sb_entry
+ * @param pf_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry, u8 pf_id,
+ u16 vf_number, u8 vf_valid);
+
+enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx);
+#ifndef ASIC_ONLY
+#define ECORE_MAPPING_MEMORY_SIZE(dev) \
+ ((CHIP_REV_IS_SLOW(dev) && (!(dev)->b_is_emul_full)) ? \
+ 136 : NUM_OF_SBS(dev))
+#else
+#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)
+#endif
+
+enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool is_hw_init);
+
+#endif /* __ECORE_INT_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_int_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_int_api.h
new file mode 100644
index 00000000..aeaf469e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_int_api.h
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_INT_API_H__
+#define __ECORE_INT_API_H__
+
+#ifndef __EXTRACT__LINUX__
+#define ECORE_SB_IDX 0x0002
+
+#define RX_PI 0
+#define TX_PI(tc) (RX_PI + 1 + tc)
+
+#ifndef ECORE_INT_MODE
+#define ECORE_INT_MODE
+enum ecore_int_mode {
+ ECORE_INT_MODE_INTA,
+ ECORE_INT_MODE_MSIX,
+ ECORE_INT_MODE_MSI,
+ ECORE_INT_MODE_POLL,
+};
+#endif
+
+struct ecore_sb_info {
+ struct status_block_e4 *sb_virt;
+ dma_addr_t sb_phys;
+ u32 sb_ack; /* Last given ack */
+ u16 igu_sb_id;
+ void OSAL_IOMEM *igu_addr;
+ u8 flags;
+#define ECORE_SB_INFO_INIT 0x1
+#define ECORE_SB_INFO_SETUP 0x2
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+ struct ecore_hwfn *p_hwfn;
+#endif
+ struct ecore_dev *p_dev;
+};
+
+struct ecore_sb_info_dbg {
+ u32 igu_prod;
+ u32 igu_cons;
+ u16 pi[PIS_PER_SB_E4];
+};
+
+struct ecore_sb_cnt_info {
+ /* Original, current, and free SBs for PF */
+ int orig;
+ int cnt;
+ int free_cnt;
+
+ /* Original, current and free SBS for child VFs */
+ int iov_orig;
+ int iov_cnt;
+ int free_cnt_iov;
+};
+
+static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
+{
+ u32 prod = 0;
+ u16 rc = 0;
+
+ /* barrier(); status block is written to by the chip */
+ /* FIXME: need some sort of barrier. */
+ prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
+ STATUS_BLOCK_E4_PROD_INDEX_MASK;
+ if (sb_info->sb_ack != prod) {
+ sb_info->sb_ack = prod;
+ rc |= ECORE_SB_IDX;
+ }
+
+ OSAL_MMIOWB(sb_info->p_dev);
+ return rc;
+}
+
+/**
+ *
+ * @brief This function creates an update command for interrupts that is
+ * written to the IGU.
+ *
+ * @param sb_info - This is the structure allocated and
+ * initialized per status block. Assumption is
+ * that it was initialized using ecore_sb_init
+ * @param int_cmd - Enable/Disable/Nop
+ * @param upd_flg - whether igu consumer should be
+ * updated.
+ *
+ * @return OSAL_INLINE void
+ */
+static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
+ enum igu_int_cmd int_cmd, u8 upd_flg)
+{
+ struct igu_prod_cons_update igu_ack = { 0 };
+
+ igu_ack.sb_id_and_flags =
+ ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+ DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr,
+ igu_ack.sb_id_and_flags);
+#else
+ DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags);
+#endif
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ OSAL_MMIOWB(sb_info->p_dev);
+ OSAL_BARRIER(sb_info->p_dev);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM *addr,
+ int size, u32 *data)
+#else
+static OSAL_INLINE void __internal_ram_wr(__rte_unused void *p_hwfn,
+ void OSAL_IOMEM *addr,
+ int size, u32 *data)
+#endif
+{
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*data); i++)
+ DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void __internal_ram_wr_relaxed(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
+#else
+static OSAL_INLINE void __internal_ram_wr_relaxed(__rte_unused void *p_hwfn,
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
+#endif
+{
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*data); i++)
+ DIRECT_REG_WR_RELAXED(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i],
+ data[i]);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
+{
+ __internal_ram_wr_relaxed(p_hwfn, addr, size, data);
+}
+#else
+static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
+ int size, u32 *data)
+{
+ __internal_ram_wr_relaxed(OSAL_NULL, addr, size, data);
+}
+#endif
+
+#endif
+
+struct ecore_hwfn;
+struct ecore_ptt;
+
+enum ecore_coalescing_fsm {
+ ECORE_COAL_RX_STATE_MACHINE,
+ ECORE_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * @brief ecore_int_cau_conf_pi - configure cau for a given
+ * status block
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_sb
+ * @param pi_index
+ * @param state
+ * @param timeset
+ */
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm,
+ u8 timeset);
+
+/**
+ *
+ * @brief ecore_int_igu_enable_int - enable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode - interrupt mode to use
+ */
+void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_int_mode int_mode);
+
+/**
+ *
+ * @brief ecore_int_igu_disable_int - disable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ *
+ * @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+ * register from igu.
+ *
+ * @param p_hwfn
+ *
+ * @return u64
+ */
+u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
+
+#define ECORE_SP_SB_ID 0xffff
+
+/**
+ * @brief ecore_int_sb_init - Initializes the sb_info structure.
+ *
+ * once the structure is initialized it can be passed to sb related functions.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info points to an uninitialized (but
+ * allocated) sb_info structure
+ * @param sb_virt_addr
+ * @param sb_phy_addr
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should use ECORE_SP_SB_ID for SP Status block
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr, u16 sb_id);
+/**
+ * @brief ecore_int_sb_setup - Setup the sb.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info initialized sb_info structure
+ */
+void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info);
+
+/**
+ * @brief ecore_int_sb_release - releases the sb_info structure.
+ *
+ * once the structure is released, it's memory can be freed
+ *
+ * @param p_hwfn
+ * @param sb_info points to an allocated sb_info structure
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should never be equal to ECORE_SP_SB_ID
+ * (SP Status block)
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_info *sb_info,
+ u16 sb_id);
+
+/**
+ * @brief ecore_int_sp_dpc - To be called when an interrupt is received on the
+ * default status block.
+ *
+ * @param p_hwfn - pointer to hwfn
+ *
+ */
+void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie);
+
+/**
+ * @brief ecore_int_get_num_sbs - get the number of status
+ * blocks configured for this funciton in the igu.
+ *
+ * @param p_hwfn
+ * @param p_sb_cnt_info
+ *
+ * @return
+ */
+void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_cnt_info *p_sb_cnt_info);
+
+/**
+ * @brief ecore_int_disable_post_isr_release - performs the cleanup post ISR
+ * release. The API need to be called after releasing all slowpath IRQs
+ * of the device.
+ *
+ * @param p_dev
+ *
+ */
+void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_int_attn_clr_enable - sets whether the general behavior is
+ * preventing attentions from being reasserted, or following the
+ * attributes of the specific attention.
+ *
+ * @param p_dev
+ * @param clr_enable
+ *
+ */
+void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable);
+
+/**
+ * @brief Read debug information regarding a given SB.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_sb - point to Status block for which we want to get info.
+ * @param p_info - pointer to struct to fill with information regarding SB.
+ *
+ * @return ECORE_SUCCESS if pointer is filled; failure otherwise.
+ */
+enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ struct ecore_sb_info_dbg *p_info);
+
+/**
+ * @brief - Move a free Status block between PF and child VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - The PF fastpath vector to be moved [re-assigned if claiming
+ * from VF, given-up if moving to VF]
+ * @param b_to_vf - PF->VF == true, VF->PF == false
+ *
+ * @return ECORE_SUCCESS if SB successfully moved.
+ */
+enum _ecore_status_t
+ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u16 sb_id, bool b_to_vf);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_iov_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_iov_api.h
new file mode 100644
index 00000000..29001d71
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_iov_api.h
@@ -0,0 +1,753 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_SRIOV_API_H__
+#define __ECORE_SRIOV_API_H__
+
+#include "common_hsi.h"
+#include "ecore_status.h"
+
+#define ECORE_ETH_VF_NUM_MAC_FILTERS 1
+#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
+#define ECORE_VF_ARRAY_LENGTH (3)
+
+#define IS_VF(p_dev) ((p_dev)->b_is_vf)
+#define IS_PF(p_dev) (!((p_dev)->b_is_vf))
+#ifdef CONFIG_ECORE_SRIOV
+#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->p_dev->p_iov_info))
+#else
+#define IS_PF_SRIOV(p_hwfn) (0)
+#endif
+#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
+#define IS_PF_PDA(p_hwfn) 0 /* @@TBD Michalk */
+
+/* @@@ TBD MichalK - what should this number be*/
+#define ECORE_MAX_VF_CHAINS_PER_PF 16
+
+/* vport update extended feature tlvs flags */
+enum ecore_iov_vport_update_flag {
+ ECORE_IOV_VP_UPDATE_ACTIVATE = 0,
+ ECORE_IOV_VP_UPDATE_VLAN_STRIP = 1,
+ ECORE_IOV_VP_UPDATE_TX_SWITCH = 2,
+ ECORE_IOV_VP_UPDATE_MCAST = 3,
+ ECORE_IOV_VP_UPDATE_ACCEPT_PARAM = 4,
+ ECORE_IOV_VP_UPDATE_RSS = 5,
+ ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN = 6,
+ ECORE_IOV_VP_UPDATE_SGE_TPA = 7,
+ ECORE_IOV_VP_UPDATE_MAX = 8,
+};
+
+/* PF to VF STATUS is part of vfpf-channel API
+ * and must be forward compatible
+*/
+enum ecore_iov_pf_to_vf_status {
+ PFVF_STATUS_WAITING = 0,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE,
+ PFVF_STATUS_FORCED,
+ PFVF_STATUS_MALICIOUS,
+};
+
+struct ecore_mcp_link_params;
+struct ecore_mcp_link_state;
+struct ecore_mcp_link_capabilities;
+
+/* These defines are used by the hw-channel; should never change order */
+#define VFPF_ACQUIRE_OS_LINUX (0)
+#define VFPF_ACQUIRE_OS_WINDOWS (1)
+#define VFPF_ACQUIRE_OS_ESX (2)
+#define VFPF_ACQUIRE_OS_SOLARIS (3)
+#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
+
+struct ecore_vf_acquire_sw_info {
+ u32 driver_version;
+ u8 os_type;
+
+ /* We have several close releases that all use ~same FW with different
+ * versions [making it incompatible as the versioning scheme is still
+ * tied directly to FW version], allow to override the checking. Only
+ * those versions would actually support this feature [so it would not
+ * break forward compatibility with newer HV drivers that are no longer
+ * suited].
+ */
+ bool override_fw_version;
+};
+
+struct ecore_public_vf_info {
+ /* These copies will later be reflected in the bulletin board,
+ * but this copy should be newer.
+ */
+ u8 forced_mac[ETH_ALEN];
+ u16 forced_vlan;
+};
+
+struct ecore_iov_vf_init_params {
+ u16 rel_vf_id;
+
+ /* Number of requested Queues; Currently, don't support different
+ * number of Rx/Tx queues.
+ */
+ /* TODO - remove this limitation */
+ u16 num_queues;
+
+ /* Allow the client to choose which qzones to use for Rx/Tx,
+ * and which queue_base to use for Tx queues on a per-queue basis.
+ * Notice values should be relative to the PF resources.
+ */
+ u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+
+ u8 vport_id;
+
+ /* Should be set in case RSS is going to be used for VF */
+ u8 rss_eng_id;
+};
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/* This is SW channel related only... */
+enum mbx_state {
+ VF_PF_UNKNOWN_STATE = 0,
+ VF_PF_WAIT_FOR_START_REQUEST = 1,
+ VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST = 2,
+ VF_PF_REQUEST_IN_PROCESSING = 3,
+ VF_PF_RESPONSE_READY = 4,
+};
+
+struct ecore_iov_sw_mbx {
+ enum mbx_state mbx_state;
+
+ u32 request_size;
+ u32 request_offset;
+
+ u32 response_size;
+ u32 response_offset;
+};
+
+/**
+ * @brief Get the vf sw mailbox params
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return struct ecore_iov_sw_mbx*
+ */
+struct ecore_iov_sw_mbx*
+ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+#endif
+
+/* This struct is part of ecore_dev and contains data relevant to all hwfns;
+ * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
+ */
+struct ecore_hw_sriov_info {
+ /* standard SRIOV capability fields, mostly for debugging */
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total_vfs; /* total VFs associated with the PF */
+ u16 num_vfs; /* number of vfs that have been started */
+ u16 initial_vfs; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u16 vf_device_id; /* VF device id */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+
+ u32 first_vf_in_pf;
+};
+
+#ifdef CONFIG_ECORE_SRIOV
+#ifndef LINUX_REMOVE
+/**
+ * @brief mark/clear all VFs before/after an incoming PCIe sriov
+ * disable.
+ *
+ * @param p_dev
+ * @param to_disable
+ */
+void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
+ u8 to_disable);
+
+/**
+ * @brief mark/clear chosen VF before/after an incoming PCIe
+ * sriov disable.
+ *
+ * @param p_dev
+ * @param rel_vf_id
+ * @param to_disable
+ */
+void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
+ u16 rel_vf_id,
+ u8 to_disable);
+
+/**
+ * @brief ecore_iov_init_hw_for_vf - initialize the HW for
+ * enabling access of a VF. Also includes preparing the
+ * IGU for VF access. This needs to be called AFTER hw is
+ * initialized and BEFORE VF is loaded inside the VM.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_iov_vf_init_params
+ *p_params);
+
+/**
+ * @brief ecore_iov_process_mbx_req - process a request received
+ * from the VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid);
+
+/**
+ * @brief ecore_iov_release_hw_for_vf - called once upper layer
+ * knows VF is done with - can release any resources
+ * allocated for VF at this point. this must be done once
+ * we know VF is no longer loaded in VM.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param rel_vf_id
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id);
+
+/**
+ * @brief ecore_iov_set_vf_ctx - set a context for a given VF
+ *
+ * @param p_hwfn
+ * @param vf_id
+ * @param ctx
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
+ u16 vf_id,
+ void *ctx);
+
+/**
+ * @brief FLR cleanup for all VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief FLR cleanup for single VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param rel_vf_id
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id);
+
+/**
+ * @brief Update the bulletin with link information. Notice this does NOT
+ * send a bulletin update, only updates the PF's bulletin.
+ *
+ * @param p_hwfn
+ * @param p_vf
+ * @param params - the link params to use for the VF link configuration
+ * @param link - the link output to use for the VF link configuration
+ * @param p_caps - the link default capabilities.
+ */
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps);
+
+/**
+ * @brief Returns link information as perceived by VF.
+ *
+ * @param p_hwfn
+ * @param p_vf
+ * @param p_params - the link params visible to vf.
+ * @param p_link - the link state visible to vf.
+ * @param p_caps - the link default capabilities visible to vf.
+ */
+void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps);
+
+/**
+ * @brief return if the VF is pending FLR
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return bool
+ */
+bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief Check if given VF ID @vfid is valid
+ * w.r.t. @b_enabled_only value
+ * if b_enabled_only = true - only enabled VF id is valid
+ * else any VF id less than max_vfs is valid
+ *
+ * @param p_hwfn
+ * @param rel_vf_id - Relative VF ID
+ * @param b_enabled_only - consider only enabled VF
+ * @param b_non_malicious - true iff we want to validate vf isn't malicious.
+ *
+ * @return bool - true for valid VF ID
+ */
+bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
+ int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious);
+
+/**
+ * @brief Get VF's public info structure
+ *
+ * @param p_hwfn
+ * @param vfid - Relative VF ID
+ * @param b_enabled_only - false if want to access even if vf is disabled
+ *
+ * @return struct ecore_public_vf_info *
+ */
+struct ecore_public_vf_info*
+ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 vfid, bool b_enabled_only);
+
+/**
+ * @brief fills a bitmask of all VFs which have pending unhandled
+ * messages.
+ *
+ * @param p_hwfn
+ */
+void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
+ u64 *events);
+
+/**
+ * @brief Copy VF's message to PF's buffer
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param vfid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *ptt,
+ int vfid);
+/**
+ * @brief Set forced MAC address in PFs copy of bulletin board
+ * and configures FW/HW to support the configuration.
+ *
+ * @param p_hwfn
+ * @param mac
+ * @param vfid
+ */
+void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
+ u8 *mac, int vfid);
+
+/**
+ * @brief Set MAC address in PFs copy of bulletin board without
+ * configuring FW/HW.
+ *
+ * @param p_hwfn
+ * @param mac
+ * @param vfid
+ */
+enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
+ u8 *mac, int vfid);
+
+/**
+ * @brief Set default behaviour of VF in case no vlans are configured for it
+ * whether to accept only untagged traffic or all.
+ * Must be called prior to the VF vport-start.
+ *
+ * @param p_hwfn
+ * @param b_untagged_only
+ * @param vfid
+ *
+ * @return ECORE_SUCCESS if configuration would stick.
+ */
+enum _ecore_status_t
+ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
+ bool b_untagged_only,
+ int vfid);
+
+/**
+ * @brief Get VFs opaque fid.
+ *
+ * @param p_hwfn
+ * @param vfid
+ * @param opaque_fid
+ */
+void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
+ u16 *opaque_fid);
+
+/**
+ * @brief Set forced VLAN [pvid] in PFs copy of bulletin board
+ * and configures FW/HW to support the configuration.
+ * Setting of pvid 0 would clear the feature.
+ * @param p_hwfn
+ * @param pvid
+ * @param vfid
+ */
+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 pvid, int vfid);
+
+/**
+ * @brief Check if VF has VPORT instance. This can be used
+ * to check if VPORT is active.
+ *
+ * @param p_hwfn
+ */
+bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief PF posts the bulletin to the VF
+ *
+ * @param p_hwfn
+ * @param p_vf
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
+ int vfid,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Check if given VF (@vfid) is marked as stopped
+ *
+ * @param p_hwfn
+ * @param vfid
+ *
+ * @return bool : true if stopped
+ */
+bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief Configure VF anti spoofing
+ *
+ * @param p_hwfn
+ * @param vfid
+ * @param val - spoofchk value - true/false
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
+ int vfid, bool val);
+
+/**
+ * @brief Get VF's configured spoof value.
+ *
+ * @param p_hwfn
+ * @param vfid
+ *
+ * @return bool - spoofchk value - true/false
+ */
+bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief Check for SRIOV sanity by PF.
+ *
+ * @param p_hwfn
+ * @param vfid
+ *
+ * @return bool - true if sanity checks passes, else false
+ */
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief Get the num of VF chains.
+ *
+ * @param p_hwfn
+ *
+ * @return u8
+ */
+u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Get vf request mailbox params
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ * @param pp_req_virt_addr
+ * @param p_req_virt_size
+ */
+void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ void **pp_req_virt_addr,
+ u16 *p_req_virt_size);
+
+/**
+ * @brief Get vf mailbox params
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ * @param pp_reply_virt_addr
+ * @param p_reply_virt_size
+ */
+void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ void **pp_reply_virt_addr,
+ u16 *p_reply_virt_size);
+
+/**
+ * @brief Validate if the given length is a valid vfpf message
+ * length
+ *
+ * @param length
+ *
+ * @return bool
+ */
+bool ecore_iov_is_valid_vfpf_msg_length(u32 length);
+
+/**
+ * @brief Return the max pfvf message length
+ *
+ * @return u32
+ */
+u32 ecore_iov_pfvf_msg_length(void);
+
+/**
+ * @brief Returns MAC address if one is configured
+ *
+ * @parm p_hwfn
+ * @parm rel_vf_id
+ *
+ * @return OSAL_NULL if mac isn't set; Otherwise, returns MAC.
+ */
+u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief Returns forced MAC address if one is configured
+ *
+ * @parm p_hwfn
+ * @parm rel_vf_id
+ *
+ * @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC.
+ */
+u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief Returns pvid if one is configured
+ *
+ * @parm p_hwfn
+ * @parm rel_vf_id
+ *
+ * @return 0 if no pvid is configured, otherwise the pvid.
+ */
+u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+/**
+ * @brief Configure VFs tx rate
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ * @param val - tx rate value in Mb/sec.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid, int val);
+
+/**
+ * @brief - Retrieves the statistics associated with a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ * @param p_stats - this will be filled with the VF statistics
+ *
+ * @return ECORE_SUCCESS iff statistics were retrieved. Error otherwise.
+ */
+enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid,
+ struct ecore_eth_stats *p_stats);
+
+/**
+ * @brief - Retrieves num of rxqs chains
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return num of rxqs chains.
+ */
+u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Retrieves num of active rxqs chains
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Retrieves ctx pointer
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Retrieves VF`s num sbs
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF is waiting for acquire
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF is acquired but not initialized
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF is acquired and initialized
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF has started in FW
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Get VF's vport min rate configured.
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return - rate in Mbps
+ */
+int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
+
+#endif
+
+/**
+ * @brief ecore_pf_configure_vf_queue_coalesce - PF configure coalesce
+ * parameters of VFs for Rx and Tx queue.
+ * While the API allows setting coalescing per-qid, all queues sharing a SB
+ * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
+ *
+ * @param p_hwfn
+ * @param rx_coal - Rx Coalesce value in micro seconds.
+ * @param tx_coal - TX Coalesce value in micro seconds.
+ * @param vf_id
+ * @param qid
+ *
+ * @return int
+ **/
+enum _ecore_status_t
+ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ u16 vf_id, u16 qid);
+
+/**
+ * @brief - Given a VF index, return index of next [including that] active VF.
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return MAX_NUM_VFS_E4 in case no further active VFs, otherwise index.
+ */
+u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
+ u16 vxlan_port, u16 geneve_port);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/**
+ * @brief Set whether PF should communicate with VF using SW/HW channel
+ * Needs to be called for an enabled VF before acquire is over
+ * [latest good point for doing that is OSAL_IOV_VF_ACQUIRE()]
+ *
+ * @param p_hwfn
+ * @param vfid - relative vf index
+ * @param b_is_hw - true iff PF is to use HW channel for communication
+ */
+void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_is_hw);
+#endif
+#endif /* CONFIG_ECORE_SRIOV */
+
+#define ecore_for_each_vf(_p_hwfn, _i) \
+ for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \
+ _i < MAX_NUM_VFS_E4; \
+ _i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_iro.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_iro.h
new file mode 100644
index 00000000..05693029
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_iro.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __IRO_H__
+#define __IRO_H__
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+/* Tstorm ll2 port statistics */
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) (IRO[2].base + \
+ ((port_id) * IRO[2].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[3].base + \
+ ((vf_id) * IRO[3].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) (IRO[6].base + \
+ ((queue_zone_id) * IRO[6].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) (IRO[7].base + \
+ ((queue_zone_id) * IRO[7].m1))
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[14].base + \
+ ((core_rx_queue_id) * IRO[14].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+/* Tstorm LightL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+/* Pstorm LiteL2 queue statistics */
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size)
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[18].base + \
+ ((stat_counter_id) * IRO[18].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) (IRO[19].base + \
+ ((queue_id) * IRO[19].m1))
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
+ * mode.
+ */
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) (IRO[20].base + \
+ ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[22].base + ((pf_id) * IRO[22].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[23].base + \
+ ((stat_counter_id) * IRO[23].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[25].base + \
+ ((stat_counter_id) * IRO[25].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[26].base + ((pf_id) * IRO[26].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) (IRO[27].base + \
+ ((ethType_id) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[29].base + ((pf_id) * IRO[29].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[30].base + \
+ ((queue_id) * IRO[30].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[31].base + \
+ ((rss_id) * IRO[31].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[32].base + \
+ ((rss_id) * IRO[32].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[33].base + \
+ ((pf_id) * IRO[33].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size)
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[34].base + \
+ ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id
+ */
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[35].base + \
+ ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[36].base + \
+ ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+/* Tstorm iSCSI RX stats */
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[37].base + \
+ ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[38].base + \
+ ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[39].base + \
+ ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[40].base + \
+ ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[41].base + \
+ ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[42].base + \
+ ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+/* Tstorm FCoE RX stats */
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) (IRO[43].base + \
+ ((pf_id) * IRO[43].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size)
+/* Pstorm FCoE TX stats */
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) (IRO[44].base + \
+ ((pf_id) * IRO[44].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size)
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[46].base + \
+ ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[47].base + \
+ ((pf_id) * IRO[47].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[48].base + \
+ ((roce_pf_id) * IRO[48].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) (IRO[49].base + \
+ ((roce_pf_id) * IRO[49].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size)
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) (IRO[50].base + \
+ ((roce_pf_id) * IRO[50].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size)
+
+#endif /* __IRO_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_iro_values.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_iro_values.h
new file mode 100644
index 00000000..685fa2e8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_iro_values.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __IRO_VALUES_H__
+#define __IRO_VALUES_H__
+
+static const struct iro iro_arr[51] = {
+/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
+ { 0x0, 0x0, 0x0, 0x0, 0x8},
+/* TSTORM_PORT_STAT_OFFSET(port_id) */
+ { 0x4cb8, 0x88, 0x0, 0x0, 0x88},
+/* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
+ { 0x6530, 0x20, 0x0, 0x0, 0x20},
+/* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
+ { 0xb00, 0x8, 0x0, 0x0, 0x4},
+/* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
+ { 0xa80, 0x8, 0x0, 0x0, 0x4},
+/* USTORM_EQE_CONS_OFFSET(pf_id) */
+ { 0x0, 0x8, 0x0, 0x0, 0x2},
+/* USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) */
+ { 0x80, 0x8, 0x0, 0x0, 0x4},
+/* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */
+ { 0x84, 0x8, 0x0, 0x0, 0x2},
+/* XSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x4c48, 0x0, 0x0, 0x0, 0x78},
+/* YSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x3e38, 0x0, 0x0, 0x0, 0x78},
+/* PSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x2b78, 0x0, 0x0, 0x0, 0x78},
+/* TSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x4c40, 0x0, 0x0, 0x0, 0x78},
+/* MSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x4998, 0x0, 0x0, 0x0, 0x78},
+/* USTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x7f50, 0x0, 0x0, 0x0, 0x78},
+/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
+ { 0xa28, 0x8, 0x0, 0x0, 0x8},
+/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
+ { 0x6210, 0x10, 0x0, 0x0, 0x10},
+/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
+ { 0xb820, 0x30, 0x0, 0x0, 0x30},
+/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
+ { 0x96c0, 0x30, 0x0, 0x0, 0x30},
+/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
+ { 0x4b68, 0x80, 0x0, 0x0, 0x40},
+/* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
+ { 0x1f8, 0x4, 0x0, 0x0, 0x4},
+/* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */
+ { 0x53a8, 0x80, 0x4, 0x0, 0x4},
+/* MSTORM_TPA_TIMEOUT_US_OFFSET */
+ { 0xc7d0, 0x0, 0x0, 0x0, 0x4},
+/* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
+ { 0x4ba8, 0x80, 0x0, 0x0, 0x20},
+/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
+ { 0x8158, 0x40, 0x0, 0x0, 0x30},
+/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
+ { 0xe770, 0x60, 0x0, 0x0, 0x60},
+/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
+ { 0x2d10, 0x80, 0x0, 0x0, 0x38},
+/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
+ { 0xf2b8, 0x78, 0x0, 0x0, 0x78},
+/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
+ { 0x1f8, 0x4, 0x0, 0x0, 0x4},
+/* TSTORM_ETH_PRS_INPUT_OFFSET */
+ { 0xaf20, 0x0, 0x0, 0x0, 0xf0},
+/* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
+ { 0xb010, 0x8, 0x0, 0x0, 0x8},
+/* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
+ { 0x1f8, 0x8, 0x0, 0x0, 0x8},
+/* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
+ { 0xac0, 0x8, 0x0, 0x0, 0x8},
+/* USTORM_TOE_CQ_PROD_OFFSET(rss_id) */
+ { 0x2578, 0x8, 0x0, 0x0, 0x8},
+/* USTORM_TOE_GRQ_PROD_OFFSET(pf_id) */
+ { 0x24f8, 0x8, 0x0, 0x0, 0x8},
+/* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */
+ { 0x0, 0x8, 0x0, 0x0, 0x8},
+/* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
+ { 0x400, 0x18, 0x8, 0x0, 0x8},
+/* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
+ { 0xb78, 0x18, 0x8, 0x0, 0x2},
+/* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
+ { 0xd898, 0x50, 0x0, 0x0, 0x3c},
+/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
+ { 0x12908, 0x18, 0x0, 0x0, 0x10},
+/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
+ { 0x11aa8, 0x40, 0x0, 0x0, 0x18},
+/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
+ { 0xa588, 0x50, 0x0, 0x0, 0x20},
+/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
+ { 0x8700, 0x40, 0x0, 0x0, 0x28},
+/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
+ { 0x10300, 0x18, 0x0, 0x0, 0x10},
+/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
+ { 0xde48, 0x48, 0x0, 0x0, 0x38},
+/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
+ { 0x10768, 0x20, 0x0, 0x0, 0x20},
+/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
+ { 0x2d48, 0x80, 0x0, 0x0, 0x10},
+/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
+ { 0x5048, 0x10, 0x0, 0x0, 0x10},
+/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
+ { 0xc9b8, 0x30, 0x0, 0x0, 0x10},
+/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
+ { 0xed90, 0x10, 0x0, 0x0, 0x10},
+/* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */
+ { 0xa520, 0x10, 0x0, 0x0, 0x10},
+/* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */
+ { 0x13108, 0x8, 0x0, 0x0, 0x8},
+};
+
+#endif /* __IRO_VALUES_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_l2.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_l2.c
new file mode 100644
index 00000000..d71f4616
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_l2.c
@@ -0,0 +1,2304 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bcm_osal.h"
+
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_chain.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_l2.h"
+#include "ecore_sp_commands.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_hw.h"
+#include "ecore_vf.h"
+#include "ecore_sriov.h"
+#include "ecore_mcp.h"
+
+#define ECORE_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+struct ecore_l2_info {
+ u32 queues;
+ unsigned long **pp_qid_usage;
+
+ /* The lock is meant to synchronize access to the qid usage */
+ osal_mutex_t lock;
+};
+
+enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_l2_info *p_l2_info;
+ unsigned long **pp_qids;
+ u32 i;
+
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return ECORE_SUCCESS;
+
+ p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
+ if (!p_l2_info)
+ return ECORE_NOMEM;
+ p_hwfn->p_l2_info = p_l2_info;
+
+ if (IS_PF(p_hwfn->p_dev)) {
+ p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+ } else {
+ u8 rx = 0, tx = 0;
+
+ ecore_vf_get_num_rxqs(p_hwfn, &rx);
+ ecore_vf_get_num_txqs(p_hwfn, &tx);
+
+ p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
+ }
+
+ pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
+ sizeof(unsigned long *) *
+ p_l2_info->queues);
+ if (pp_qids == OSAL_NULL)
+ return ECORE_NOMEM;
+ p_l2_info->pp_qid_usage = pp_qids;
+
+ for (i = 0; i < p_l2_info->queues; i++) {
+ pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
+ MAX_QUEUES_PER_QZONE / 8);
+ if (pp_qids[i] == OSAL_NULL)
+ return ECORE_NOMEM;
+ }
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock))
+ return ECORE_NOMEM;
+#endif
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
+{
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return;
+
+ OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
+}
+
+void ecore_l2_free(struct ecore_hwfn *p_hwfn)
+{
+ u32 i;
+
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return;
+
+ if (p_hwfn->p_l2_info == OSAL_NULL)
+ return;
+
+ if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
+ goto out_l2_info;
+
+ /* Free until hit first uninitialized entry */
+ for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
+ if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
+ break;
+ OSAL_VFREE(p_hwfn->p_dev,
+ p_hwfn->p_l2_info->pp_qid_usage[i]);
+ p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL;
+ }
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ /* Lock is last to initialize, if everything else was */
+ if (i == p_hwfn->p_l2_info->queues)
+ OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
+#endif
+
+ OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
+ p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL;
+
+out_l2_info:
+ OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
+ p_hwfn->p_l2_info = OSAL_NULL;
+}
+
+/* TODO - we'll need locking around these... */
+static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
+ u16 queue_id = p_cid->rel.queue_id;
+ bool b_rc = true;
+ u8 first;
+
+ OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
+
+ if (queue_id > p_l2_info->queues) {
+ DP_NOTICE(p_hwfn, true,
+ "Requested to increase usage for qzone %04x out of %08x\n",
+ queue_id, p_l2_info->queues);
+ b_rc = false;
+ goto out;
+ }
+
+ first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
+ MAX_QUEUES_PER_QZONE);
+ if (first >= MAX_QUEUES_PER_QZONE) {
+ b_rc = false;
+ goto out;
+ }
+
+ OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
+ p_cid->qid_usage_idx = first;
+
+out:
+ OSAL_MUTEX_RELEASE(&p_l2_info->lock);
+ return b_rc;
+}
+
+static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
+
+ OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
+ p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
+}
+
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ bool b_legacy_vf = !!(p_cid->vf_legacy &
+ ECORE_QCID_LEGACY_VF_CID);
+
+ /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF.
+ * For legacy vf-queues, the CID doesn't go through here.
+ */
+ if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
+ _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
+
+ /* VFs maintain the index inside queue-zone on their own */
+ if (p_cid->vfid == ECORE_QUEUE_CID_PF)
+ ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
+
+ OSAL_VFREE(p_hwfn->p_dev, p_cid);
+}
+
+/* The internal is only meant to be directly called by PFs initializeing CIDs
+ * for their VFs.
+ */
+static struct ecore_queue_cid *
+_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid, u32 cid,
+ struct ecore_queue_start_common_params *p_params,
+ bool b_is_rx,
+ struct ecore_queue_cid_vf_params *p_vf_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
+ p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
+ if (p_cid == OSAL_NULL)
+ return OSAL_NULL;
+
+ p_cid->opaque_fid = opaque_fid;
+ p_cid->cid = cid;
+ p_cid->p_owner = p_hwfn;
+
+ /* Fill in parameters */
+ p_cid->rel.vport_id = p_params->vport_id;
+ p_cid->rel.queue_id = p_params->queue_id;
+ p_cid->rel.stats_id = p_params->stats_id;
+ p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
+ p_cid->b_is_rx = b_is_rx;
+ p_cid->sb_idx = p_params->sb_idx;
+
+ /* Fill-in bits related to VFs' queues if information was provided */
+ if (p_vf_params != OSAL_NULL) {
+ p_cid->vfid = p_vf_params->vfid;
+ p_cid->vf_qid = p_vf_params->vf_qid;
+ p_cid->vf_legacy = p_vf_params->vf_legacy;
+ } else {
+ p_cid->vfid = ECORE_QUEUE_CID_PF;
+ }
+
+ /* Don't try calculating the absolute indices for VFs */
+ if (IS_VF(p_hwfn->p_dev)) {
+ p_cid->abs = p_cid->rel;
+
+ goto out;
+ }
+
+ /* Calculate the engine-absolute indices of the resources.
+ * This would guarantee they're valid later on.
+ * In some cases [SBs] we already have the right values.
+ */
+ rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+
+ rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
+ &p_cid->abs.queue_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+
+ /* In case of a PF configuring its VF's queues, the stats-id is already
+ * absolute [since there's a single index that's suitable per-VF].
+ */
+ if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
+ rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
+ &p_cid->abs.stats_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+ } else {
+ p_cid->abs.stats_id = p_cid->rel.stats_id;
+ }
+
+out:
+ /* VF-images have provided the qid_usage_idx on their own.
+ * Otherwise, we need to allocate a unique one.
+ */
+ if (!p_vf_params) {
+ if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
+ goto fail;
+ } else {
+ p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
+ p_cid->opaque_fid, p_cid->cid,
+ p_cid->rel.vport_id, p_cid->abs.vport_id,
+ p_cid->rel.queue_id, p_cid->qid_usage_idx,
+ p_cid->abs.queue_id,
+ p_cid->rel.stats_id, p_cid->abs.stats_id,
+ p_cid->sb_igu_id, p_cid->sb_idx);
+
+ return p_cid;
+
+fail:
+ OSAL_VFREE(p_hwfn->p_dev, p_cid);
+ return OSAL_NULL;
+}
+
+struct ecore_queue_cid *
+ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ bool b_is_rx,
+ struct ecore_queue_cid_vf_params *p_vf_params)
+{
+ struct ecore_queue_cid *p_cid;
+ u8 vfid = ECORE_CXT_PF_CID;
+ bool b_legacy_vf = false;
+ u32 cid = 0;
+
+ /* In case of legacy VFs, The CID can be derived from the additional
+ * VF parameters - the VF assumes queue X uses CID X, so we can simply
+ * use the vf_qid for this purpose as well.
+ */
+ if (p_vf_params) {
+ vfid = p_vf_params->vfid;
+
+ if (p_vf_params->vf_legacy &
+ ECORE_QCID_LEGACY_VF_CID) {
+ b_legacy_vf = true;
+ cid = p_vf_params->vf_qid;
+ }
+ }
+
+ /* Get a unique firmware CID for this queue, in case it's a PF.
+ * VF's don't need a CID as the queue configuration will be done
+ * by PF.
+ */
+ if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
+ if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &cid, vfid) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+ return OSAL_NULL;
+ }
+ }
+
+ p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
+ p_params, b_is_rx, p_vf_params);
+ if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
+ _ecore_cxt_release_cid(p_hwfn, cid, vfid);
+
+ return p_cid;
+}
+
+static struct ecore_queue_cid *
+ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ bool b_is_rx,
+ struct ecore_queue_start_common_params *p_params)
+{
+ return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
+ OSAL_NULL);
+}
+
+enum _ecore_status_t
+ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_start_params *p_params)
+{
+ struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ struct eth_vport_tpa_param *p_tpa;
+ u16 rx_mode = 0, tx_err = 0;
+ u8 abs_vport_id = 0;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_params->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_START,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
+ p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
+ p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
+ p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
+ p_ramrod->untagged = p_params->only_untagged;
+ p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
+
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+ p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
+
+ /* Handle requests for strict behavior on transmission errors */
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
+ p_params->b_err_illegal_vlan_mode ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
+ p_params->b_err_small_pkt ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
+ p_params->b_err_anti_spoof ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
+ p_params->b_err_illegal_inband_mode ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
+ p_params->b_err_vlan_insert_with_inband ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
+ p_params->b_err_big_pkt ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
+ p_params->b_err_ctrl_frame ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
+
+ /* TPA related fields */
+ p_tpa = &p_ramrod->tpa_param;
+ OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param));
+ p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+
+ switch (p_params->tpa_mode) {
+ case ECORE_TPA_MODE_GRO:
+ p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ p_tpa->tpa_max_size = (u16)-1;
+ p_tpa->tpa_min_size_to_cont = p_params->mtu / 2;
+ p_tpa->tpa_min_size_to_start = p_params->mtu / 2;
+ p_tpa->tpa_ipv4_en_flg = 1;
+ p_tpa->tpa_ipv6_en_flg = 1;
+ p_tpa->tpa_ipv4_tunn_en_flg = 1;
+ p_tpa->tpa_ipv6_tunn_en_flg = 1;
+ p_tpa->tpa_pkt_split_flg = 1;
+ p_tpa->tpa_gro_consistent_flg = 1;
+ break;
+ default:
+ break;
+ }
+
+ p_ramrod->tx_switching_en = p_params->tx_switching;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ p_ramrod->tx_switching_en = 0;
+#endif
+
+ p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
+ p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
+
+ /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+ p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t
+ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_start_params *p_params)
+{
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
+ p_params->mtu,
+ p_params->remove_inner_vlan,
+ p_params->tpa_mode,
+ p_params->max_buffers_per_cqe,
+ p_params->only_untagged);
+
+ return ecore_sp_eth_vport_start(p_hwfn, p_params);
+}
+
+static enum _ecore_status_t
+ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct ecore_rss_params *p_rss)
+{
+ struct eth_vport_rss_config *p_config;
+ u16 capabilities = 0;
+ int i, table_size;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (!p_rss) {
+ p_ramrod->common.update_rss_flg = 0;
+ return rc;
+ }
+ p_config = &p_ramrod->rss_config;
+
+ OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
+ ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+ rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
+ p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
+ p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
+ p_config->update_rss_key = p_rss->update_rss_key;
+
+ p_config->rss_mode = p_rss->rss_enable ?
+ ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
+
+ p_config->capabilities = 0;
+
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV4));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV6));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
+ p_config->tbl_size = p_rss->rss_table_size_log;
+ p_config->capabilities = OSAL_CPU_TO_LE16(capabilities);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+ p_ramrod->common.update_rss_flg,
+ p_config->rss_mode,
+ p_config->update_rss_capabilities,
+ p_config->capabilities,
+ p_config->update_rss_ind_table, p_config->update_rss_key);
+
+ table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
+ 1 << p_config->tbl_size);
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
+
+ if (!p_queue)
+ return ECORE_INVAL;
+
+ p_config->indirection_table[i] =
+ OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "Configured RSS indirection table [%d entries]:\n",
+ table_size);
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
+ }
+
+ for (i = 0; i < 10; i++)
+ p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
+
+ return rc;
+}
+
+static void
+ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct ecore_filter_accept_flags accept_flags)
+{
+ p_ramrod->common.update_rx_mode_flg =
+ accept_flags.update_rx_mode_config;
+ p_ramrod->common.update_tx_mode_flg =
+ accept_flags.update_tx_mode_config;
+
+#ifndef ASIC_ONLY
+ /* On B0 emulation we cannot enable Tx, since this would cause writes
+ * to PVFC HW block which isn't implemented in emulation.
+ */
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Non-Asic - prevent Tx mode in vport update\n");
+ p_ramrod->common.update_tx_mode_flg = 0;
+ }
+#endif
+
+ /* Set Rx mode accept flags */
+ if (p_ramrod->common.update_rx_mode_flg) {
+ u8 accept_filter = accept_flags.rx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+ !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
+ !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+ !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+ !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
+ !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & ECORE_ACCEPT_BCAST));
+
+ p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
+ p_ramrod->common.vport_id, state);
+ }
+
+ /* Set Tx mode accept flags */
+ if (p_ramrod->common.update_tx_mode_flg) {
+ u8 accept_filter = accept_flags.tx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+ !!(accept_filter & ECORE_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+ !!(accept_filter & ECORE_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & ECORE_ACCEPT_BCAST));
+
+ p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
+ p_ramrod->common.vport_id, state);
+ }
+}
+
+static void
+ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
+ struct ecore_sge_tpa_params *p_params)
+{
+ struct eth_vport_tpa_param *p_tpa;
+ u16 val;
+
+ if (!p_params) {
+ p_ramrod->common.update_tpa_param_flg = 0;
+ p_ramrod->common.update_tpa_en_flg = 0;
+ p_ramrod->common.update_tpa_param_flg = 0;
+ return;
+ }
+
+ p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
+ p_tpa = &p_ramrod->tpa_param;
+ p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
+ p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
+ p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
+ p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+
+ p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
+ p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+ p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
+ p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
+ p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
+ p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
+ val = p_params->tpa_max_size;
+ p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val);
+ val = p_params->tpa_min_size_to_start;
+ p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val);
+ val = p_params->tpa_min_size_to_cont;
+ p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val);
+}
+
+static void
+ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
+ struct ecore_sp_vport_update_params *p_params)
+{
+ int i;
+
+ OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+
+ if (!p_params->update_approx_mcast_flg)
+ return;
+
+ p_ramrod->common.update_approx_mcast_flg = 1;
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = p_params->bins;
+
+ p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
+ }
+}
+
+enum _ecore_status_t
+ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct ecore_rss_params *p_rss_params = p_params->rss_params;
+ struct vport_update_ramrod_data_cmn *p_cmn;
+ struct ecore_sp_init_data init_data;
+ struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ u8 abs_vport_id = 0, val;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
+ return rc;
+ }
+
+ rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_params->opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Copy input params to ramrod according to FW struct */
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_cmn = &p_ramrod->common;
+
+ p_cmn->vport_id = abs_vport_id;
+
+ p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
+ p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
+ p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
+ p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
+
+ p_cmn->accept_any_vlan = p_params->accept_any_vlan;
+ val = p_params->update_accept_any_vlan_flg;
+ p_cmn->update_accept_any_vlan_flg = val;
+
+ p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
+ val = p_params->update_inner_vlan_removal_flg;
+ p_cmn->update_inner_vlan_removal_en_flg = val;
+
+ p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
+ val = p_params->update_default_vlan_enable_flg;
+ p_cmn->update_default_vlan_en_flg = val;
+
+ p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
+ p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
+
+ p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
+
+ p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ if (p_ramrod->common.tx_switching_en ||
+ p_ramrod->common.update_tx_switching_en_flg) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA - why are we seeing tx-switching? Overriding it\n");
+ p_ramrod->common.tx_switching_en = 0;
+ p_ramrod->common.update_tx_switching_en_flg = 1;
+ }
+#endif
+ p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
+
+ p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
+ val = p_params->update_anti_spoofing_en_flg;
+ p_ramrod->common.update_anti_spoofing_en_flg = val;
+
+ rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+ if (rc != ECORE_SUCCESS) {
+ /* Return spq entry which is taken in ecore_sp_init_request()*/
+ ecore_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+
+ /* Update mcast bins for VFs, PF doesn't use this functionality */
+ ecore_sp_update_mcast_bin(p_ramrod, p_params);
+
+ ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+ ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params);
+ if (p_params->mtu) {
+ p_ramrod->common.update_mtu_flg = 1;
+ p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
+ }
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid, u8 vport_id)
+{
+ struct vport_stop_ramrod_data *p_ramrod;
+ struct ecore_sp_init_data init_data;
+ struct ecore_spq_entry *p_ent;
+ u8 abs_vport_id = 0;
+ enum _ecore_status_t rc;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_vport_stop(p_hwfn);
+
+ rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_STOP,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_stop;
+ p_ramrod->vport_id = abs_vport_id;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+static enum _ecore_status_t
+ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_accept_flags *p_accept_flags)
+{
+ struct ecore_sp_vport_update_params s_params;
+
+ OSAL_MEMSET(&s_params, 0, sizeof(s_params));
+ OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
+ sizeof(struct ecore_filter_accept_flags));
+
+ return ecore_vf_pf_vport_update(p_hwfn, &s_params);
+}
+
+enum _ecore_status_t
+ecore_filter_accept_cmd(struct ecore_dev *p_dev,
+ u8 vport,
+ struct ecore_filter_accept_flags accept_flags,
+ u8 update_accept_any_vlan,
+ u8 accept_any_vlan,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct ecore_sp_vport_update_params vport_update_params;
+ int i, rc;
+
+ /* Prepare and send the vport rx_mode change */
+ OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = vport;
+ vport_update_params.accept_flags = accept_flags;
+ vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
+ vport_update_params.accept_any_vlan = accept_any_vlan;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (IS_VF(p_dev)) {
+ rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ continue;
+ }
+
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
+ comp_mode, p_comp_data);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+ accept_flags.rx_accept_filter,
+ accept_flags.tx_accept_filter);
+
+ if (update_accept_any_vlan)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "accept_any_vlan=%d configured\n",
+ accept_any_vlan);
+ }
+
+ return 0;
+}
+
+enum _ecore_status_t
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size)
+{
+ struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
+ p_cid->abs.vport_id, p_cid->sb_igu_id);
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
+ p_ramrod->sb_index = p_cid->sb_idx;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+ p_ramrod->complete_cqe_flg = 0;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
+ DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
+
+ p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
+ DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
+
+ if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
+ bool b_legacy_vf = !!(p_cid->vf_legacy &
+ ECORE_QCID_LEGACY_VF_RX_PROD);
+
+ p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Queue%s is meant for VF rxq[%02x]\n",
+ b_legacy_vf ? " [legacy]" : "",
+ p_cid->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
+ }
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM * *pp_prod)
+{
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)(&init_prod_val));
+
+ return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size);
+}
+
+enum _ecore_status_t
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *p_ret_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
+ /* Allocate a CID for the queue */
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
+ if (p_cid == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size,
+ &p_ret_params->p_prod);
+ else
+ rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size,
+ &p_ret_params->p_prod);
+
+ /* Provide the caller with a reference to as handler */
+ if (rc != ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
+ void **pp_rxq_handles,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ u8 i;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_rxqs_update(p_hwfn,
+ (struct ecore_queue_cid **)
+ pp_rxq_handles,
+ num_rxqs,
+ complete_cqe_flg,
+ complete_event_flg);
+
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ for (i = 0; i < num_rxqs; i++) {
+ p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
+
+ /* Get SPQ entry */
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_update;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+ p_ramrod->complete_cqe_flg = complete_cqe_flg;
+ p_ramrod->complete_event_flg = complete_event_flg;
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool b_eq_completion_only,
+ bool b_cqe_completion)
+{
+ struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_stop;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+
+ /* Cleaning the queue requires the completion to arrive there.
+ * In addition, VFs require the answer to come as eqe to PF.
+ */
+ p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
+ !b_eq_completion_only) ||
+ b_cqe_completion;
+ p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
+ b_eq_completion_only;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only,
+ bool cqe_completion)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
+ eq_completion_only,
+ cqe_completion);
+ else
+ rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ u16 pq_id)
+{
+ struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.tx_queue_start;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
+ p_ramrod->sb_index = p_cid->sb_idx;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+
+ p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+ p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+
+ p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
+
+ p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u8 tc,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM * *pp_doorbell)
+{
+ enum _ecore_status_t rc;
+ u16 pq_id;
+
+ /* TODO - set tc in the pq_params for multi-cos.
+ * If pacing is enabled then select queue according to
+ * rate limiter availability otherwise select queue based
+ * on multi cos.
+ */
+ if (IS_ECORE_PACING(p_hwfn))
+ pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id);
+ else
+ pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc);
+
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr,
+ pbl_size, pq_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Provide the caller with the necessary return values */
+ *pp_doorbell = (u8 OSAL_IOMEM *)
+ p_hwfn->doorbells +
+ DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ struct ecore_txq_start_ret_params *p_ret_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
+ if (p_cid == OSAL_NULL)
+ return ECORE_INVAL;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
+ else
+ rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
+
+ if (rc != ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
+ enum _ecore_status_t rc;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
+ else
+ rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ return rc;
+}
+
+static enum eth_filter_action
+ecore_filter_action(enum ecore_filter_opcode opcode)
+{
+ enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+ switch (opcode) {
+ case ECORE_FILTER_ADD:
+ action = ETH_FILTER_ACTION_ADD;
+ break;
+ case ECORE_FILTER_REMOVE:
+ action = ETH_FILTER_ACTION_REMOVE;
+ break;
+ case ECORE_FILTER_FLUSH:
+ action = ETH_FILTER_ACTION_REMOVE_ALL;
+ break;
+ default:
+ action = MAX_ETH_FILTER_ACTION;
+ }
+
+ return action;
+}
+
+static enum _ecore_status_t
+ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_filter_ucast *p_filter_cmd,
+ struct vport_filter_update_ramrod_data **pp_ramrod,
+ struct ecore_spq_entry **pp_ent,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+ struct vport_filter_update_ramrod_data *p_ramrod;
+ struct eth_filter_cmd *p_first_filter;
+ struct eth_filter_cmd *p_second_filter;
+ struct ecore_sp_init_data init_data;
+ enum eth_filter_action action;
+ enum _ecore_status_t rc;
+
+ rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &vport_to_remove_from);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &vport_to_add_to);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = ecore_sp_init_request(p_hwfn, pp_ent,
+ ETH_RAMROD_FILTERS_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+ p_ramrod = *pp_ramrod;
+ p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+ p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Non-Asic - prevent Tx filters\n");
+ p_ramrod->filter_cmd_hdr.tx = 0;
+ }
+#endif
+
+ switch (p_filter_cmd->opcode) {
+ case ECORE_FILTER_REPLACE:
+ case ECORE_FILTER_MOVE:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
+ break;
+ default:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
+ break;
+ }
+
+ p_first_filter = &p_ramrod->filter_cmds[0];
+ p_second_filter = &p_ramrod->filter_cmds[1];
+
+ switch (p_filter_cmd->type) {
+ case ECORE_FILTER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC;
+ break;
+ case ECORE_FILTER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_VLAN;
+ break;
+ case ECORE_FILTER_MAC_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_PAIR;
+ break;
+ case ECORE_FILTER_INNER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
+ break;
+ case ECORE_FILTER_INNER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
+ break;
+ case ECORE_FILTER_INNER_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
+ break;
+ case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+ break;
+ case ECORE_FILTER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
+ break;
+ case ECORE_FILTER_VNI:
+ p_first_filter->type = ETH_FILTER_TYPE_VNI;
+ break;
+ case ECORE_FILTER_UNUSED: /* @DPDK */
+ p_first_filter->type = MAX_ETH_FILTER_TYPE;
+ break;
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
+ ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
+ &p_first_filter->mac_mid,
+ &p_first_filter->mac_lsb,
+ (u8 *)p_filter_cmd->mac);
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+ p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+ p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
+
+ if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
+ p_second_filter->type = p_first_filter->type;
+ p_second_filter->mac_msb = p_first_filter->mac_msb;
+ p_second_filter->mac_mid = p_first_filter->mac_mid;
+ p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+ p_second_filter->vlan_id = p_first_filter->vlan_id;
+ p_second_filter->vni = p_first_filter->vni;
+
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+ p_first_filter->vport_id = vport_to_remove_from;
+
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ p_second_filter->vport_id = vport_to_add_to;
+ } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
+ p_first_filter->vport_id = vport_to_add_to;
+ OSAL_MEMCPY(p_second_filter, p_first_filter,
+ sizeof(*p_second_filter));
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ } else {
+ action = ecore_filter_action(p_filter_cmd->opcode);
+
+ if (action == MAX_ETH_FILTER_ACTION) {
+ DP_NOTICE(p_hwfn, true,
+ "%d is not supported yet\n",
+ p_filter_cmd->opcode);
+ return ECORE_NOTIMPL;
+ }
+
+ p_first_filter->action = action;
+ p_first_filter->vport_id =
+ (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
+ vport_to_remove_from : vport_to_add_to;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct eth_filter_cmd_header *p_header;
+ enum _ecore_status_t rc;
+
+ rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+ &p_ramrod, &p_ent,
+ comp_mode, p_comp_data);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+ return rc;
+ }
+ p_header = &p_ramrod->filter_cmd_hdr;
+ p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
+ (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
+ ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
+ "REMOVE" :
+ ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
+ "MOVE" : "REPLACE")),
+ (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
+ ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
+ "VLAN" : "MAC & VLAN"),
+ p_ramrod->filter_cmd_hdr.cmd_cnt,
+ p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+ p_filter_cmd->vport_to_add_to,
+ p_filter_cmd->vport_to_remove_from,
+ p_filter_cmd->mac[0], p_filter_cmd->mac[1],
+ p_filter_cmd->mac[2], p_filter_cmd->mac[3],
+ p_filter_cmd->mac[4], p_filter_cmd->mac[5],
+ p_filter_cmd->vlan);
+
+ return ECORE_SUCCESS;
+}
+
+/*******************************************************************************
+ * Description:
+ * Calculates crc 32 on a buffer
+ * Note: crc32_length MUST be aligned to 8
+ * Return:
+ ******************************************************************************/
+static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed)
+{
+ u32 byte = 0, bit = 0, crc32_result = crc32_seed;
+ u8 msb = 0, current_byte = 0;
+
+ if ((crc32_packet == OSAL_NULL) ||
+ (crc32_length == 0) || ((crc32_length % 8) != 0)) {
+ return crc32_result;
+ }
+
+ for (byte = 0; byte < crc32_length; byte++) {
+ current_byte = crc32_packet[byte];
+ for (bit = 0; bit < 8; bit++) {
+ msb = (u8)(crc32_result >> 31);
+ crc32_result = crc32_result << 1;
+ if (msb != (0x1 & (current_byte >> bit))) {
+ crc32_result = crc32_result ^ CRC32_POLY;
+ crc32_result |= 1;
+ }
+ }
+ }
+
+ return crc32_result;
+}
+
+static u32 ecore_crc32c_le(u32 seed, u8 *mac)
+{
+ u32 packet_buf[2] = { 0 };
+
+ OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
+ return ecore_calc_crc32c((u8 *)packet_buf, 8, seed);
+}
+
+u8 ecore_mcast_bin_from_mac(u8 *mac)
+{
+ u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac);
+
+ return crc & 0xff;
+}
+
+static enum _ecore_status_t
+ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+ u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ u8 abs_vport_id = 0;
+ enum _ecore_status_t rc;
+ int i;
+
+ if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
+ rc = ecore_fw_vport(p_hwfn,
+ p_filter_cmd->vport_to_add_to,
+ &abs_vport_id);
+ else
+ rc = ecore_fw_vport(p_hwfn,
+ p_filter_cmd->vport_to_remove_from,
+ &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_ramrod->common.update_approx_mcast_flg = 1;
+
+ /* explicitly clear out the entire vector */
+ OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
+ 0, sizeof(p_ramrod->approx_mcast.bins));
+ OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ /* filter ADD op is explicit set op and it removes
+ * any existing filters for the vport.
+ */
+ if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ bins[bit / 32] |= 1 << (bit % 32);
+ }
+
+ /* Convert to correct endianity */
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ struct vport_update_ramrod_mcast *p_ramrod_bins;
+
+ p_ramrod_bins = &p_ramrod->approx_mcast;
+ p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]);
+ }
+ }
+
+ p_ramrod->common.vport_id = abs_vport_id;
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
+ struct ecore_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ /* only ADD and REMOVE operations are supported for multi-cast */
+ if ((p_filter_cmd->opcode != ECORE_FILTER_ADD &&
+ (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
+ (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
+ return ECORE_INVAL;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
+ continue;
+ }
+
+ rc = ecore_sp_eth_filter_mcast(p_hwfn,
+ p_filter_cmd,
+ comp_mode, p_comp_data);
+ if (rc != ECORE_SUCCESS)
+ break;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
+ struct ecore_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ u16 opaque_fid;
+
+ if (IS_VF(p_dev)) {
+ rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
+ continue;
+ }
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_eth_filter_ucast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode, p_comp_data);
+ if (rc != ECORE_SUCCESS)
+ break;
+ }
+
+ return rc;
+}
+
+/* Statistics related code */
+static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
+ u32 *p_addr, u32 *p_len,
+ u16 statistics_bin)
+{
+ if (IS_PF(p_hwfn->p_dev)) {
+ *p_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+ } else {
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.pstats.len;
+ }
+}
+
+static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_pstorm_per_queue_stat pstats;
+ u32 pstats_addr = 0, pstats_len = 0;
+
+ __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
+ statistics_bin);
+
+ OSAL_MEMSET(&pstats, 0, sizeof(pstats));
+ ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
+
+ p_stats->common.tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->common.tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->common.tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->common.tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->common.tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->common.tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->common.tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
+}
+
+static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats)
+{
+ struct tstorm_per_port_stat tstats;
+ u32 tstats_addr, tstats_len;
+
+ if (IS_PF(p_hwfn->p_dev)) {
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ tstats_len = sizeof(struct tstorm_per_port_stat);
+ } else {
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
+ tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
+ }
+
+ OSAL_MEMSET(&tstats, 0, sizeof(tstats));
+ ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
+
+ p_stats->common.mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->common.mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+}
+
+static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
+ u32 *p_addr, u32 *p_len,
+ u16 statistics_bin)
+{
+ if (IS_PF(p_hwfn->p_dev)) {
+ *p_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+ } else {
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
+ *p_len = p_resp->pfdev_info.stats_info.ustats.len;
+ }
+}
+
+static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_ustorm_per_queue_stat ustats;
+ u32 ustats_addr = 0, ustats_len = 0;
+
+ __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
+ statistics_bin);
+
+ OSAL_MEMSET(&ustats, 0, sizeof(ustats));
+ ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
+
+ p_stats->common.rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->common.rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->common.rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->common.rx_ucast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->common.rx_mcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->common.rx_bcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
+ u32 *p_addr, u32 *p_len,
+ u16 statistics_bin)
+{
+ if (IS_PF(p_hwfn->p_dev)) {
+ *p_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+ } else {
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.mstats.len;
+ }
+}
+
+static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_mstorm_per_queue_stat mstats;
+ u32 mstats_addr = 0, mstats_len = 0;
+
+ __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
+ statistics_bin);
+
+ OSAL_MEMSET(&mstats, 0, sizeof(mstats));
+ ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
+
+ p_stats->common.no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->common.packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->common.ttl0_discard +=
+ HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->common.tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->common.tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->common.tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->common.tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+}
+
+static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats)
+{
+ struct ecore_eth_stats_common *p_common = &p_stats->common;
+ struct port_stats port_stats;
+ int j;
+
+ OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
+
+ ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, stats),
+ sizeof(port_stats));
+
+ p_common->rx_64_byte_packets += port_stats.eth.r64;
+ p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_common->rx_crc_errors += port_stats.eth.rfcs;
+ p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_common->rx_pause_frames += port_stats.eth.rxpf;
+ p_common->rx_pfc_frames += port_stats.eth.rxpp;
+ p_common->rx_align_errors += port_stats.eth.raln;
+ p_common->rx_carrier_errors += port_stats.eth.rfcr;
+ p_common->rx_oversize_packets += port_stats.eth.rovr;
+ p_common->rx_jabbers += port_stats.eth.rjbr;
+ p_common->rx_undersize_packets += port_stats.eth.rund;
+ p_common->rx_fragments += port_stats.eth.rfrg;
+ p_common->tx_64_byte_packets += port_stats.eth.t64;
+ p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_common->tx_pause_frames += port_stats.eth.txpf;
+ p_common->tx_pfc_frames += port_stats.eth.txpp;
+ p_common->rx_mac_bytes += port_stats.eth.rbyte;
+ p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_common->tx_mac_bytes += port_stats.eth.tbyte;
+ p_common->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_common->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_common->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
+ for (j = 0; j < 8; j++) {
+ p_common->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_common->brb_discards += port_stats.brb.brb_discard[j];
+ }
+
+ if (ECORE_IS_BB(p_hwfn->p_dev)) {
+ struct ecore_eth_stats_bb *p_bb = &p_stats->bb;
+
+ p_bb->rx_1519_to_1522_byte_packets +=
+ port_stats.eth.u0.bb0.r1522;
+ p_bb->rx_1519_to_2047_byte_packets +=
+ port_stats.eth.u0.bb0.r2047;
+ p_bb->rx_2048_to_4095_byte_packets +=
+ port_stats.eth.u0.bb0.r4095;
+ p_bb->rx_4096_to_9216_byte_packets +=
+ port_stats.eth.u0.bb0.r9216;
+ p_bb->rx_9217_to_16383_byte_packets +=
+ port_stats.eth.u0.bb0.r16383;
+ p_bb->tx_1519_to_2047_byte_packets +=
+ port_stats.eth.u1.bb1.t2047;
+ p_bb->tx_2048_to_4095_byte_packets +=
+ port_stats.eth.u1.bb1.t4095;
+ p_bb->tx_4096_to_9216_byte_packets +=
+ port_stats.eth.u1.bb1.t9216;
+ p_bb->tx_9217_to_16383_byte_packets +=
+ port_stats.eth.u1.bb1.t16383;
+ p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
+ p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
+ } else {
+ struct ecore_eth_stats_ah *p_ah = &p_stats->ah;
+
+ p_ah->rx_1519_to_max_byte_packets +=
+ port_stats.eth.u0.ah0.r1519_to_max;
+ p_ah->tx_1519_to_max_byte_packets =
+ port_stats.eth.u1.ah1.t1519_to_max;
+ }
+
+ p_common->link_change_count = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ link_change_count));
+}
+
+void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *stats,
+ u16 statistics_bin, bool b_get_port_stats)
+{
+ __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
+ __ecore_get_vport_tstats(p_hwfn, p_ptt, stats);
+ __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
+
+#ifndef ASIC_ONLY
+ /* Avoid getting PORT stats for emulation. */
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ return;
+#endif
+
+ if (b_get_port_stats && p_hwfn->mcp_info)
+ __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
+}
+
+static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
+ struct ecore_eth_stats *stats)
+{
+ u8 fw_vport = 0;
+ int i;
+
+ OSAL_MEMSET(stats, 0, sizeof(*stats));
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
+ ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
+ bool b_get_port_stats;
+
+ if (IS_PF(p_dev)) {
+ /* The main vport index is relative first */
+ if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
+ DP_ERR(p_hwfn, "No vport available!\n");
+ goto out;
+ }
+ }
+
+ if (IS_PF(p_dev) && !p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn);
+ __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+ b_get_port_stats);
+
+out:
+ if (IS_PF(p_dev) && p_ptt)
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+}
+
+void ecore_get_vport_stats(struct ecore_dev *p_dev,
+ struct ecore_eth_stats *stats)
+{
+ u32 i;
+
+ if (!p_dev) {
+ OSAL_MEMSET(stats, 0, sizeof(*stats));
+ return;
+ }
+
+ _ecore_get_vport_stats(p_dev, stats);
+
+ if (!p_dev->reset_stats)
+ return;
+
+ /* Reduce the statistics baseline */
+ for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
+ ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void ecore_reset_vport_stats(struct ecore_dev *p_dev)
+{
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
+ ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
+ u32 addr = 0, len = 0;
+
+ if (IS_PF(p_dev) && !p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ OSAL_MEMSET(&mstats, 0, sizeof(mstats));
+ __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
+
+ OSAL_MEMSET(&ustats, 0, sizeof(ustats));
+ __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
+
+ OSAL_MEMSET(&pstats, 0, sizeof(pstats));
+ __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
+
+ if (IS_PF(p_dev))
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+
+ /* PORT statistics are not necessarily reset, so we need to
+ * read and create a baseline for future statistics.
+ * Link change stat is maintained by MFW, return its value as is.
+ */
+ if (!p_dev->reset_stats)
+ DP_INFO(p_dev, "Reset stats not allocated\n");
+ else {
+ _ecore_get_vport_stats(p_dev, p_dev->reset_stats);
+ p_dev->reset_stats->common.link_change_count = 0;
+ }
+}
+
+void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_arfs_config_params *p_cfg_params)
+{
+ if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits))
+ return;
+
+ if (p_cfg_params->arfs_enable) {
+ ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp,
+ p_cfg_params->udp,
+ p_cfg_params->ipv4,
+ p_cfg_params->ipv6,
+ GFT_PROFILE_TYPE_4_TUPLE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+ p_cfg_params->tcp ? "Enable" : "Disable",
+ p_cfg_params->udp ? "Enable" : "Disable",
+ p_cfg_params->ipv4 ? "Enable" : "Disable",
+ p_cfg_params->ipv6 ? "Enable" : "Disable");
+ } else {
+ ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
+ p_cfg_params->arfs_enable ? "Enable" : "Disable");
+}
+
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add)
+{
+ struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (p_cb) {
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+ init_data.p_comp_data = p_cb;
+ } else {
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+ }
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_GFT_UPDATE_FILTER,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_update_gft;
+
+ DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
+ p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
+
+ p_ramrod->action_icid_valid = 0;
+ p_ramrod->action_icid = 0;
+
+ p_ramrod->rx_qid_valid = 1;
+ p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id);
+
+ p_ramrod->flow_id_valid = 0;
+ p_ramrod->flow_id = 0;
+
+ p_ramrod->vport_id = OSAL_CPU_TO_LE16((u16)abs_vport_id);
+ p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
+ : GFT_DELETE_FILTER;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n",
+ abs_vport_id, abs_rx_q_id,
+ b_is_add ? "Adding" : "Removing",
+ (unsigned long)p_addr, length);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_rx_coal)
+{
+ u32 coalesce, address, is_valid;
+ struct cau_sb_entry sb_entry;
+ u8 timer_res;
+ enum _ecore_status_t rc;
+
+ rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ p_cid->sb_igu_id * sizeof(u64),
+ (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
+
+ address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ coalesce = ecore_rd(p_hwfn, p_ptt, address);
+
+ is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
+ if (!is_valid)
+ return ECORE_INVAL;
+
+ coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
+ *p_rx_coal = (u16)(coalesce << timer_res);
+
+ return ECORE_SUCCESS;
+}
+
+int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_tx_coal)
+{
+ u32 coalesce, address, is_valid;
+ struct cau_sb_entry sb_entry;
+ u8 timer_res;
+ enum _ecore_status_t rc;
+
+ rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ p_cid->sb_igu_id * sizeof(u64),
+ (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
+
+ address = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ coalesce = ecore_rd(p_hwfn, p_ptt, address);
+
+ is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
+ if (!is_valid)
+ return ECORE_INVAL;
+
+ coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
+ *p_tx_coal = (u16)(coalesce << timer_res);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal,
+ void *handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_ptt *p_ptt;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Unable to read queue calescing\n");
+
+ return rc;
+ }
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ if (p_cid->b_is_rx) {
+ rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+ } else {
+ rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+ }
+
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid, u32 rate)
+{
+ struct ecore_mcp_link_state *p_link;
+ u8 vport;
+
+ vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id);
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "About to rate limit qm vport %d for queue %d with rate %d\n",
+ vport, p_cid->rel.queue_id, rate);
+
+ return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,
+ p_link->speed);
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_l2.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_l2.h
new file mode 100644
index 00000000..8fa40302
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_l2.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_L2_H__
+#define __ECORE_L2_H__
+
+
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_spq.h"
+#include "ecore_l2_api.h"
+
+#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
+#define ECORE_QUEUE_CID_PF (0xff)
+
+/* Almost identical to the ecore_queue_start_common_params,
+ * but here we maintain the SB index in IGU CAM.
+ */
+struct ecore_queue_cid_params {
+ u8 vport_id;
+ u16 queue_id;
+ u8 stats_id;
+};
+
+ /* Additional parameters required for initialization of the queue_cid
+ * and are relevant only for a PF initializing one for its VFs.
+ */
+struct ecore_queue_cid_vf_params {
+ /* Should match the VF's relative index */
+ u8 vfid;
+
+ /* 0-based queue index. Should reflect the relative qzone the
+ * VF thinks is associated with it [in its range].
+ */
+ u8 vf_qid;
+
+ /* Indicates a VF is legacy, making it differ in several things:
+ * - Producers would be placed in a different place.
+ * - Makes assumptions regarding the CIDs.
+ */
+ u8 vf_legacy;
+
+ /* For VFs, this index arrives via TLV to diffrentiate between
+ * different queues opened on the same qzone, and is passed
+ * [where the PF would have allocated it internally for its own].
+ */
+ u8 qid_usage_idx;
+};
+
+struct ecore_queue_cid {
+ /* For stats-id, the `rel' is actually absolute as well */
+ struct ecore_queue_cid_params rel;
+ struct ecore_queue_cid_params abs;
+
+ /* These have no 'relative' meaning */
+ u16 sb_igu_id;
+ u8 sb_idx;
+
+ u32 cid;
+ u16 opaque_fid;
+
+ bool b_is_rx;
+
+ /* VFs queues are mapped differently, so we need to know the
+ * relative queue associated with them [0-based].
+ * Notice this is relevant on the *PF* queue-cid of its VF's queues,
+ * and not on the VF itself.
+ */
+ u8 vfid;
+ u8 vf_qid;
+
+ /* We need an additional index to diffrentiate between queues opened
+ * for same queue-zone, as VFs would have to communicate the info
+ * to the PF [otherwise PF has no way to diffrentiate].
+ */
+ u8 qid_usage_idx;
+
+ /* Legacy VFs might have Rx producer located elsewhere */
+ u8 vf_legacy;
+#define ECORE_QCID_LEGACY_VF_RX_PROD (1 << 0)
+#define ECORE_QCID_LEGACY_VF_CID (1 << 1)
+
+ struct ecore_hwfn *p_owner;
+};
+
+enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn);
+void ecore_l2_setup(struct ecore_hwfn *p_hwfn);
+void ecore_l2_free(struct ecore_hwfn *p_hwfn);
+
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid);
+
+struct ecore_queue_cid *
+ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ bool b_is_rx,
+ struct ecore_queue_cid_vf_params *p_vf_params);
+
+enum _ecore_status_t
+ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_start_params *p_params);
+
+/**
+ * @brief - Starts an Rx queue, when queue_cid is already prepared
+ *
+ * @param p_hwfn
+ * @param p_cid
+ * @param bd_max_bytes
+ * @param bd_chain_phys_addr
+ * @param cqe_pbl_addr
+ * @param cqe_pbl_size
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size);
+
+/**
+ * @brief - Starts a Tx queue, where queue_cid is already prepared
+ *
+ * @param p_hwfn
+ * @param p_cid
+ * @param pbl_addr
+ * @param pbl_size
+ * @param p_pq_params - parameters for choosing the PQ for this Tx queue
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ u16 pq_id);
+
+u8 ecore_mcast_bin_from_mac(u8 *mac);
+
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+
+enum _ecore_status_t ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_hw_coal);
+
+enum _ecore_status_t ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_hw_coal);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_l2_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_l2_api.h
new file mode 100644
index 00000000..575b9e3a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_l2_api.h
@@ -0,0 +1,463 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_L2_API_H__
+#define __ECORE_L2_API_H__
+
+#include "ecore_status.h"
+#include "ecore_sp_api.h"
+#include "ecore_int_api.h"
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_rss_caps {
+ ECORE_RSS_IPV4 = 0x1,
+ ECORE_RSS_IPV6 = 0x2,
+ ECORE_RSS_IPV4_TCP = 0x4,
+ ECORE_RSS_IPV6_TCP = 0x8,
+ ECORE_RSS_IPV4_UDP = 0x10,
+ ECORE_RSS_IPV6_UDP = 0x20,
+};
+
+/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
+#define ECORE_RSS_IND_TABLE_SIZE 128
+#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
+#endif
+
+struct ecore_queue_start_common_params {
+ /* Should always be relative to entity sending this. */
+ u8 vport_id;
+ u16 queue_id;
+
+ /* Relative, but relevant only for PFs */
+ u8 stats_id;
+
+ struct ecore_sb_info *p_sb;
+ u8 sb_idx;
+};
+
+struct ecore_rxq_start_ret_params {
+ void OSAL_IOMEM *p_prod;
+ void *p_handle;
+};
+
+struct ecore_txq_start_ret_params {
+ void OSAL_IOMEM *p_doorbell;
+ void *p_handle;
+};
+
+struct ecore_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+
+ /* Indirection table consist of rx queue handles */
+ void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+ u32 rss_key[ECORE_RSS_KEY_SIZE];
+};
+
+struct ecore_sge_tpa_params {
+ u8 max_buffers_per_cqe;
+
+ u8 update_tpa_en_flg;
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+
+ u8 update_tpa_param_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+ u8 tpa_max_aggs_num;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+};
+
+enum ecore_filter_opcode {
+ ECORE_FILTER_ADD,
+ ECORE_FILTER_REMOVE,
+ ECORE_FILTER_MOVE,
+ ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ ECORE_FILTER_FLUSH, /* Removes all filters */
+};
+
+enum ecore_filter_ucast_type {
+ ECORE_FILTER_MAC,
+ ECORE_FILTER_VLAN,
+ ECORE_FILTER_MAC_VLAN,
+ ECORE_FILTER_INNER_MAC,
+ ECORE_FILTER_INNER_VLAN,
+ ECORE_FILTER_INNER_PAIR,
+ ECORE_FILTER_INNER_MAC_VNI_PAIR,
+ ECORE_FILTER_MAC_VNI_PAIR,
+ ECORE_FILTER_VNI,
+ ECORE_FILTER_UNUSED, /* @DPDK */
+};
+
+struct ecore_filter_ucast {
+ enum ecore_filter_opcode opcode;
+ enum ecore_filter_ucast_type type;
+ u8 is_rx_filter;
+ u8 is_tx_filter;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ unsigned char mac[ETH_ALEN];
+ u8 assert_on_error;
+ u16 vlan;
+ u32 vni;
+};
+
+struct ecore_filter_mcast {
+ /* MOVE is not supported for multicast */
+ enum ecore_filter_opcode opcode;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ u8 num_mc_addrs;
+#define ECORE_MAX_MC_ADDRS 64
+ unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+struct ecore_filter_accept_flags {
+ u8 update_rx_mode_config;
+ u8 update_tx_mode_config;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+#define ECORE_ACCEPT_NONE 0x01
+#define ECORE_ACCEPT_UCAST_MATCHED 0x02
+#define ECORE_ACCEPT_UCAST_UNMATCHED 0x04
+#define ECORE_ACCEPT_MCAST_MATCHED 0x08
+#define ECORE_ACCEPT_MCAST_UNMATCHED 0x10
+#define ECORE_ACCEPT_BCAST 0x20
+};
+
+struct ecore_arfs_config_params {
+ bool tcp;
+ bool udp;
+ bool ipv4;
+ bool ipv6;
+ bool arfs_enable; /* Enable or disable arfs mode */
+};
+
+/* Add / remove / move / remove-all unicast MAC-VLAN filters.
+ * FW will assert in the following cases, so driver should take care...:
+ * 1. Adding a filter to a full table.
+ * 2. Adding a filter which already exists on that vport.
+ * 3. Removing a filter which doesn't exist.
+ */
+
+enum _ecore_status_t
+ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
+ struct ecore_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/* Add / remove / move multicast MAC filters. */
+enum _ecore_status_t
+ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
+ struct ecore_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/* Set "accept" filters */
+enum _ecore_status_t
+ecore_filter_accept_cmd(
+ struct ecore_dev *p_dev,
+ u8 vport,
+ struct ecore_filter_accept_flags accept_flags,
+ u8 update_accept_any_vlan,
+ u8 accept_any_vlan,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
+ *
+ * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
+ * the VPort ID is not currently initialized.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @p_params Inputs; Relative for PF [SB being an exception]
+ * @param bd_max_bytes Maximum bytes that can be placed on a BD
+ * @param bd_chain_phys_addr Physical address of BDs for receive.
+ * @param cqe_pbl_addr Physical address of the CQE PBL Table.
+ * @param cqe_pbl_size Size of the CQE PBL Table
+ * @param p_ret_params Pointed struct to be filled with outputs.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *p_ret_params);
+
+/**
+ * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
+ *
+ * @param p_hwfn
+ * @param p_rxq Handler of queue to close
+ * @param eq_completion_only If True completion will be on
+ * EQe, if False completion will be
+ * on EQe if p_hwfn opaque
+ * different from the RXQ opaque
+ * otherwise on CQe.
+ * @param cqe_completion If True completion will be
+ * receive on CQe.
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only,
+ bool cqe_completion);
+
+/**
+ * @brief - TX Queue Start Ramrod
+ *
+ * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
+ * the VPort is not currently initialized.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @p_params
+ * @param tc traffic class to use with this L2 txq
+ * @param pbl_addr address of the pbl array
+ * @param pbl_size number of entries in pbl
+ * @param p_ret_params Pointer to fill the return parameters in.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ struct ecore_txq_start_ret_params *p_ret_params);
+
+/**
+ * @brief ecore_eth_tx_queue_stop - closes a Tx queue
+ *
+ * @param p_hwfn
+ * @param p_txq - handle to Tx queue needed to be closed
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_txq);
+
+enum ecore_tpa_mode {
+ ECORE_TPA_MODE_NONE,
+ ECORE_TPA_MODE_RSC,
+ ECORE_TPA_MODE_GRO,
+ ECORE_TPA_MODE_MAX
+};
+
+struct ecore_sp_vport_start_params {
+ enum ecore_tpa_mode tpa_mode;
+ bool remove_inner_vlan; /* Inner VLAN removal is enabled */
+ bool tx_switching; /* Vport supports tx-switching */
+ bool handle_ptp_pkts; /* Handle PTP packets */
+ bool only_untagged; /* Untagged pkt control */
+ bool drop_ttl0; /* Drop packets with TTL = 0 */
+ u8 max_buffers_per_cqe;
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u8 vport_id; /* VPORT ID */
+ u16 mtu; /* VPORT MTU */
+ bool zero_placement_offset;
+ bool check_mac;
+ bool check_ethtype;
+
+ /* Strict behavior on transmission errors */
+ bool b_err_illegal_vlan_mode;
+ bool b_err_illegal_inband_mode;
+ bool b_err_vlan_insert_with_inband;
+ bool b_err_small_pkt;
+ bool b_err_big_pkt;
+ bool b_err_anti_spoof;
+ bool b_err_ctrl_frame;
+};
+
+/**
+ * @brief ecore_sp_vport_start -
+ *
+ * This ramrod initializes a VPort. An Assert if generated if the Function ID
+ * of the VPort is not enabled.
+ *
+ * @param p_hwfn
+ * @param p_params VPORT start params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_start_params *p_params);
+
+struct ecore_sp_vport_update_params {
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_inner_vlan_removal_flg;
+ u8 inner_vlan_removal_flg;
+ u8 silent_vlan_removal_flg;
+ u8 update_default_vlan_enable_flg;
+ u8 default_vlan_enable_flg;
+ u8 update_default_vlan_flg;
+ u16 default_vlan;
+ u8 update_tx_switching_flg;
+ u8 tx_switching_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ u32 bins[8];
+ struct ecore_rss_params *rss_params;
+ struct ecore_filter_accept_flags accept_flags;
+ struct ecore_sge_tpa_params *sge_tpa_params;
+ /* MTU change - notice this requires the vport to be disabled.
+ * If non-zero, value would be used.
+ */
+ u16 mtu;
+};
+
+/**
+ * @brief ecore_sp_vport_update -
+ *
+ * This ramrod updates the parameters of the VPort. Every field can be updated
+ * independently, according to flags.
+ *
+ * This ramrod is also used to set the VPort state to active after creation.
+ * An Assert is generated if the VPort does not contain an RX queue.
+ *
+ * @param p_hwfn
+ * @param p_params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+/**
+ * @brief ecore_sp_vport_stop -
+ *
+ * This ramrod closes a VPort after all its RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param vport_id VPort ID
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u8 vport_id);
+
+enum _ecore_status_t
+ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_sp_rx_eth_queues_update -
+ *
+ * This ramrod updates an RX queue. It is used for setting the active state
+ * of the queue and updating the TPA and SGE parameters.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ * @param pp_rxq_handlers An array of queue handlers to be updated.
+ * @param num_rxqs number of queues to update.
+ * @param complete_cqe_flg Post completion to the CQE Ring if set
+ * @param complete_event_flg Post completion to the Event Ring if set
+ * @param comp_mode
+ * @param p_comp_data
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
+ void **pp_rxq_handlers,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *stats,
+ u16 statistics_bin, bool b_get_port_stats);
+
+void ecore_get_vport_stats(struct ecore_dev *p_dev,
+ struct ecore_eth_stats *stats);
+
+void ecore_reset_vport_stats(struct ecore_dev *p_dev);
+
+/**
+ *@brief ecore_arfs_mode_configure -
+ *
+ *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ *and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ *@param p_hwfn
+ *@param p_ptt
+ *@param p_cfg_params arfs mode configuration parameters.
+ *
+ */
+void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - ecore_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
+ * it with cookie and callback function address, if not
+ * using this mode then client must pass NULL.
+ * @params p_addr p_addr is an actual packet header that needs to be
+ * filter. It has to mapped with IO to read prior to
+ * calling this, [contains 4 tuples- src ip, dest ip,
+ * src port, dest port].
+ * @params length length of p_addr header up to past the transport header.
+ * @params qid receive packet will be directed to this queue.
+ * @params vport_id
+ * @params b_is_add flag to add or remove filter.
+ *
+ */
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.c
new file mode 100644
index 00000000..ea14c172
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.c
@@ -0,0 +1,4029 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_status.h"
+#include "nvm_cfg.h"
+#include "ecore_mcp.h"
+#include "mcp_public.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
+#include "ecore_iov_api.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "ecore_dcbx.h"
+#include "ecore_sp_commands.h"
+#include "ecore_cxt.h"
+
+#define CHIP_MCP_RESP_ITER_US 10
+#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
+
+#define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
+#define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
+ ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+ _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+ ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
+ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+ OFFSETOF(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
+ DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+ OFFSETOF(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+ DRV_ID_PDA_COMP_VER_OFFSET)
+
+#define MCP_BYTES_PER_MBIT_OFFSET 17
+
+#ifndef ASIC_ONLY
+static int loaded;
+static int loaded_port[MAX_NUM_PORTS] = { 0 };
+#endif
+
+bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+ return false;
+ return true;
+}
+
+void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PORT);
+ u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+
+ p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+ MFW_PORT(p_hwfn));
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "port_addr = 0x%x, port_id 0x%02x\n",
+ p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+ OSAL_BE32 tmp;
+ u32 i;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
+ return;
+#endif
+
+ if (!p_hwfn->mcp_info->public_base)
+ return;
+
+ for (i = 0; i < length; i++) {
+ tmp = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->mfw_mb_addr +
+ (i << 2) + sizeof(u32));
+
+ ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
+ OSAL_BE32_TO_CPU(tmp);
+ }
+}
+
+struct ecore_mcp_cmd_elem {
+ osal_list_entry_t list;
+ struct ecore_mcp_mb_params *p_mb_params;
+ u16 expected_seq_num;
+ bool b_is_completed;
+};
+
+/* Must be called while cmd_lock is acquired */
+static struct ecore_mcp_cmd_elem *
+ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u16 expected_seq_num)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+ sizeof(*p_cmd_elem));
+ if (!p_cmd_elem) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
+ goto out;
+ }
+
+ p_cmd_elem->p_mb_params = p_mb_params;
+ p_cmd_elem->expected_seq_num = expected_seq_num;
+ OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+out:
+ return p_cmd_elem;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_cmd_elem *p_cmd_elem)
+{
+ OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+ OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
+}
+
+/* Must be called while cmd_lock is acquired */
+static struct ecore_mcp_cmd_elem *
+ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
+ struct ecore_mcp_cmd_elem) {
+ if (p_cmd_elem->expected_seq_num == seq_num)
+ return p_cmd_elem;
+ }
+
+ return OSAL_NULL;
+}
+
+enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
+{
+ if (p_hwfn->mcp_info) {
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
+ &p_hwfn->mcp_info->cmd_list, list,
+ struct ecore_mcp_cmd_elem) {
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ }
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
+#endif
+ }
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
+ u32 drv_mb_offsize, mfw_mb_offsize;
+ u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
+ p_info->public_base = 0;
+ return ECORE_INVAL;
+ }
+#endif
+
+ p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+ if (!p_info->public_base)
+ return ECORE_INVAL;
+
+ p_info->public_base |= GRCBASE_MCP;
+
+ /* Calculate the driver and MFW mailbox address */
+ drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_DRV_MB));
+ p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
+ " mcp_pf_id = 0x%x\n",
+ drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+ /* Set the MFW MB address */
+ mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr);
+
+ /* Get the current driver mailbox sequence before sending
+ * the first command
+ */
+ p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Get current FW pulse sequence */
+ p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK;
+
+ p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_info *p_info;
+ u32 size;
+
+ /* Allocate mcp_info structure */
+ p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*p_hwfn->mcp_info));
+ if (!p_hwfn->mcp_info) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
+ return ECORE_NOMEM;
+ }
+ p_info = p_hwfn->mcp_info;
+
+ /* Initialize the MFW spinlocks */
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+ return ECORE_NOMEM;
+ }
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
+ OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+ return ECORE_NOMEM;
+ }
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
+ OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
+
+ OSAL_LIST_INIT(&p_info->cmd_list);
+
+ if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
+ /* Do not free mcp_info here, since public_base indicate that
+ * the MCP is not initialized
+ */
+ return ECORE_SUCCESS;
+ }
+
+ size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+ p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+ p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+ if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+ goto err;
+
+ return ECORE_SUCCESS;
+
+err:
+ DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
+ ecore_mcp_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+
+static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ /* Use MCP history register to check if MCP reset occurred between init
+ * time and now.
+ */
+ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
+ p_hwfn->mcp_info->mcp_hist, generic_por_0);
+
+ ecore_load_mcp_offsets(p_hwfn, p_ptt);
+ ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+}
+
+enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay = EMUL_MCP_RESP_ITER_US;
+#endif
+
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
+ return ECORE_ABORTED;
+ }
+
+ /* Ensure that only a single thread is accessing the mailbox */
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ /* Set drv command along with the updated sequence */
+ ecore_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
+
+ do {
+ /* Wait for MFW response */
+ OSAL_UDELAY(delay);
+ /* Give the FW up to 500 second (50*1000*10usec) */
+ } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
+ MISCS_REG_GENERIC_POR_0)) &&
+ (cnt++ < ECORE_MCP_RESET_RETRIES));
+
+ if (org_mcp_reset_seq !=
+ ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MCP was reset after %d usec\n", cnt * delay);
+ } else {
+ DP_ERR(p_hwfn, "Failed to reset MCP\n");
+ rc = ECORE_AGAIN;
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ return rc;
+}
+
+/* Must be called while cmd_lock is acquired */
+static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ /* There is at most one pending command at a certain time, and if it
+ * exists - it is placed at the HEAD of the list.
+ */
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
+ p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
+ struct ecore_mcp_cmd_elem,
+ list);
+ return !p_cmd_elem->b_is_completed;
+ }
+
+ return false;
+}
+
+/* Must be called while cmd_lock is acquired */
+static enum _ecore_status_t
+ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_mb_params *p_mb_params;
+ struct ecore_mcp_cmd_elem *p_cmd_elem;
+ u32 mcp_resp;
+ u16 seq_num;
+
+ mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
+
+ /* Return if no new non-handled response has been received */
+ if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
+ return ECORE_AGAIN;
+
+ p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
+ if (!p_cmd_elem) {
+ DP_ERR(p_hwfn,
+ "Failed to find a pending mailbox cmd that expects sequence number %d\n",
+ seq_num);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ p_mb_params = p_cmd_elem->p_mb_params;
+
+ /* Get the MFW response along with the sequence number */
+ p_mb_params->mcp_resp = mcp_resp;
+
+ /* Get the MFW param */
+ p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+
+ /* Get the union data */
+ if (p_mb_params->p_data_dst != OSAL_NULL &&
+ p_mb_params->data_dst_size) {
+ u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb,
+ union_data);
+ ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr, p_mb_params->data_dst_size);
+ }
+
+ p_cmd_elem->b_is_completed = true;
+
+ return ECORE_SUCCESS;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u16 seq_num)
+{
+ union drv_union_data union_data;
+ u32 union_data_addr;
+
+ /* Set the union data */
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb, union_data);
+ OSAL_MEM_ZERO(&union_data, sizeof(union_data));
+ if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
+ OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
+
+ /* Set the drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
+
+ /* Set the drv command along with the sequence number */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mailbox: command 0x%08x param 0x%08x\n",
+ (p_mb_params->cmd | seq_num), p_mb_params->param);
+}
+
+static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
+ bool block_cmd)
+{
+ p_hwfn->mcp_info->b_block_cmd = block_cmd;
+
+ DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
+ block_cmd ? "Block" : "Unblock");
+}
+
+void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+
+ cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
+ cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
+ cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+
+ DP_NOTICE(p_hwfn, false,
+ "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
+ cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
+}
+
+static enum _ecore_status_t
+_ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u32 max_retries, u32 delay)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem;
+ u32 cnt = 0;
+ u16 seq_num;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Wait until the mailbox is non-occupied */
+ do {
+ /* Exit the loop if there is no pending command, or if the
+ * pending command is completed during this iteration.
+ * The spinlock stays locked until the command is sent.
+ */
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ if (!ecore_mcp_has_pending_cmd(p_hwfn))
+ break;
+
+ rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (rc == ECORE_SUCCESS)
+ break;
+ else if (rc != ECORE_AGAIN)
+ goto err;
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_UDELAY(delay);
+ OSAL_MFW_CMD_PREEMPT(p_hwfn);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return ECORE_AGAIN;
+ }
+
+ /* Send the mailbox command */
+ ecore_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
+ p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
+ if (!p_cmd_elem) {
+ rc = ECORE_NOMEM;
+ goto err;
+ }
+
+ __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ /* Wait for the MFW response */
+ do {
+ /* Exit the loop if the command is already completed, or if the
+ * command is completed during this iteration.
+ * The spinlock stays locked until the list element is removed.
+ */
+
+ OSAL_UDELAY(delay);
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ if (p_cmd_elem->b_is_completed)
+ break;
+
+ rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (rc == ECORE_SUCCESS)
+ break;
+ else if (rc != ECORE_AGAIN)
+ goto err;
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MFW_CMD_PREEMPT(p_hwfn);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, true);
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
+ return ECORE_AGAIN;
+ }
+
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
+ p_mb_params->mcp_resp, p_mb_params->mcp_param,
+ (cnt * delay) / 1000, (cnt * delay) % 1000);
+
+ /* Clear the sequence number from the MFW response */
+ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
+
+ return ECORE_SUCCESS;
+
+err:
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params)
+{
+ osal_size_t union_data_size = sizeof(union drv_union_data);
+ u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
+ u32 delay = CHIP_MCP_RESP_ITER_US;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay = EMUL_MCP_RESP_ITER_US;
+ /* There is a built-in delay of 100usec in each MFW response read */
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ max_retries /= 10;
+#endif
+
+ /* MCP not initialized */
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
+ return ECORE_BUSY;
+ }
+
+ if (p_mb_params->data_src_size > union_data_size ||
+ p_mb_params->data_dst_size > union_data_size) {
+ DP_ERR(p_hwfn,
+ "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
+ p_mb_params->data_src_size, p_mb_params->data_dst_size,
+ union_data_size);
+ return ECORE_INVAL;
+ }
+
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return ECORE_ABORTED;
+ }
+
+ return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
+ delay);
+}
+
+enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 cmd, u32 param,
+ u32 *o_mcp_resp, u32 *o_mcp_param)
+{
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
+ loaded--;
+ loaded_port[p_hwfn->port_id]--;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
+ loaded);
+ }
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 i_txn_size, u32 *i_buf)
+{
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ mb_params.p_data_src = i_buf;
+ mb_params.data_src_size = (u8)i_txn_size;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf)
+{
+ struct ecore_mcp_mb_params mb_params;
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ mb_params.p_data_dst = raw_data;
+
+ /* Use the maximal value since the actual one is part of the response */
+ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ *o_txn_size = *o_mcp_param;
+ /* @DPDK */
+ OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
+
+ return ECORE_SUCCESS;
+}
+
+#ifndef ASIC_ONLY
+static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
+ u32 *p_load_code)
+{
+ static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+ if (!loaded)
+ load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+ else if (!loaded_port[p_hwfn->port_id])
+ load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+
+ /* On CMT, always tell that it's engine */
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+ *p_load_code = load_phase;
+ loaded++;
+ loaded_port[p_hwfn->port_id]++;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
+ *p_load_code, loaded, p_hwfn->port_id,
+ loaded_port[p_hwfn->port_id]);
+}
+#endif
+
+static bool
+ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
+ enum ecore_override_force_load override_force_load)
+{
+ bool can_force_load = false;
+
+ switch (override_force_load) {
+ case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
+ can_force_load = true;
+ break;
+ case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
+ can_force_load = false;
+ break;
+ default:
+ can_force_load = (drv_role == DRV_ROLE_OS &&
+ exist_drv_role == DRV_ROLE_PREBOOT) ||
+ (drv_role == DRV_ROLE_KDUMP &&
+ exist_drv_role == DRV_ROLE_OS);
+ break;
+ }
+
+ return can_force_load;
+}
+
+static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
+ &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send cancel load request, rc = %d\n", rc);
+
+ return rc;
+}
+
+#define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
+#define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
+#define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
+#define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
+#define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
+#define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
+#define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
+
+static u32 ecore_get_config_bitmap(void)
+{
+ u32 config_bitmap = 0x0;
+
+#ifdef CONFIG_ECORE_L2
+ config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_SRIOV
+ config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_ROCE
+ config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_IWARP
+ config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_FCOE
+ config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_ISCSI
+ config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_LL2
+ config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
+#endif
+
+ return config_bitmap;
+}
+
+struct ecore_load_req_in_params {
+ u8 hsi_ver;
+#define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
+#define ECORE_LOAD_REQ_HSI_VER_1 1
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u8 drv_role;
+ u8 timeout_val;
+ u8 force_cmd;
+ bool avoid_eng_reset;
+};
+
+struct ecore_load_req_out_params {
+ u32 load_code;
+ u32 exist_drv_ver_0;
+ u32 exist_drv_ver_1;
+ u32 exist_fw_ver;
+ u8 exist_drv_role;
+ u8 mfw_hsi_ver;
+ bool drv_exists;
+};
+
+static enum _ecore_status_t
+__ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_load_req_in_params *p_in_params,
+ struct ecore_load_req_out_params *p_out_params)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ u32 hsi_ver;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&load_req, sizeof(load_req));
+ load_req.drv_ver_0 = p_in_params->drv_ver_0;
+ load_req.drv_ver_1 = p_in_params->drv_ver_1;
+ load_req.fw_ver = p_in_params->fw_ver;
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
+
+ hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
+ DRV_ID_MCP_HSI_VER_CURRENT :
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
+ mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
+ mb_params.p_data_src = &load_req;
+ mb_params.data_src_size = sizeof(load_req);
+ mb_params.p_data_dst = &load_rsp;
+ mb_params.data_dst_size = sizeof(load_rsp);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+ mb_params.param,
+ GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+
+ if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
+ load_req.drv_ver_0, load_req.drv_ver_1,
+ load_req.fw_ver, load_req.misc0,
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send load request, rc = %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
+ p_out_params->load_code = mb_params.mcp_resp;
+
+ if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
+ p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
+ load_rsp.drv_ver_0, load_rsp.drv_ver_1,
+ load_rsp.fw_ver, load_rsp.misc0,
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
+
+ p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+ p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+ p_out_params->exist_fw_ver = load_rsp.fw_ver;
+ p_out_params->exist_drv_role =
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+ p_out_params->mfw_hsi_ver =
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+ p_out_params->drv_exists =
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+ LOAD_RSP_FLAGS0_DRV_EXISTS;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
+ u8 *p_mfw_drv_role)
+{
+ switch (drv_role) {
+ case ECORE_DRV_ROLE_OS:
+ *p_mfw_drv_role = DRV_ROLE_OS;
+ break;
+ case ECORE_DRV_ROLE_KDUMP:
+ *p_mfw_drv_role = DRV_ROLE_KDUMP;
+ break;
+ }
+}
+
+enum ecore_load_req_force {
+ ECORE_LOAD_REQ_FORCE_NONE,
+ ECORE_LOAD_REQ_FORCE_PF,
+ ECORE_LOAD_REQ_FORCE_ALL,
+};
+
+static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
+ u8 *p_mfw_force_cmd)
+{
+ switch (force_cmd) {
+ case ECORE_LOAD_REQ_FORCE_NONE:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
+ break;
+ case ECORE_LOAD_REQ_FORCE_PF:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
+ break;
+ case ECORE_LOAD_REQ_FORCE_ALL:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
+ break;
+ }
+}
+
+enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_load_req_params *p_params)
+{
+ struct ecore_load_req_out_params out_params;
+ struct ecore_load_req_in_params in_params;
+ u8 mfw_drv_role = 0, mfw_force_cmd;
+ enum _ecore_status_t rc;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
+ in_params.drv_ver_0 = ECORE_VERSION;
+ in_params.drv_ver_1 = ecore_get_config_bitmap();
+ in_params.fw_ver = STORM_FW_VERSION;
+ ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
+ in_params.drv_role = mfw_drv_role;
+ in_params.timeout_val = p_params->timeout_val;
+ ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
+ in_params.force_cmd = mfw_force_cmd;
+ in_params.avoid_eng_reset = p_params->avoid_eng_reset;
+
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* First handle cases where another load request should/might be sent:
+ * - MFW expects the old interface [HSI version = 1]
+ * - MFW responds that a force load request is required
+ */
+ if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_INFO(p_hwfn,
+ "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
+
+ in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ } else if (out_params.load_code ==
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
+ if (ecore_mcp_can_force_load(in_params.drv_role,
+ out_params.exist_drv_role,
+ p_params->override_force_load)) {
+ DP_INFO(p_hwfn,
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
+ &mfw_force_cmd);
+
+ in_params.force_cmd = mfw_force_cmd;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
+ return ECORE_BUSY;
+ }
+ }
+
+ /* Now handle the other types of responses.
+ * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
+ * expected here after the additional revised load requests were sent.
+ */
+ switch (out_params.load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
+ out_params.drv_exists) {
+ /* The role and fw/driver version match, but the PF is
+ * already loaded and has not been unloaded gracefully.
+ * This is unexpected since a quasi-FLR request was
+ * previously sent as part of ecore_hw_prepare().
+ */
+ DP_NOTICE(p_hwfn, false,
+ "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
+ return ECORE_INVAL;
+ }
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
+ return ECORE_BUSY;
+ }
+
+ p_params->load_code = out_params.load_code;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
+ &param);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a LOAD_DONE command, rc = %d\n", rc);
+ return rc;
+ }
+
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+ DP_NOTICE(p_hwfn, false,
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 wol_param, mcp_resp, mcp_param;
+
+ /* @DPDK */
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
+ &mcp_resp, &mcp_param);
+}
+
+enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct mcp_mac wol_mac;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
+
+ return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
+static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+ ECORE_PATH_ID(p_hwfn));
+ u32 disabled_vfs[VF_MAX_STATIC / 32];
+ int i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Reading Disabled VF information from [offset %08x],"
+ " path_addr %08x\n",
+ mfw_path_offsize, path_addr);
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+ disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
+ path_addr +
+ OFFSETOF(struct public_path,
+ mcp_vf_disabled) +
+ sizeof(u32) * i);
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+ "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+ }
+
+ if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
+ OSAL_VF_FLR_UPDATE(p_hwfn);
+}
+
+enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *vfs_to_ack)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+ MCP_PF_ID(p_hwfn));
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+ int i;
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+ "Acking VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
+ mb_params.p_data_src = vfs_to_ack;
+ mb_params.data_src_size = VF_MAX_STATIC / 8;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
+ &mb_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to pass ACK for VF flr to MFW\n");
+ return ECORE_TIMEOUT;
+ }
+
+ /* TMP - clear the ACK bits; should be done by MFW */
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ ecore_wr(p_hwfn, p_ptt,
+ func_addr +
+ OFFSETOF(struct public_func, drv_ack_vf_disabled) +
+ i * sizeof(u32), 0);
+
+ return rc;
+}
+
+static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 transceiver_state;
+
+ transceiver_state = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ transceiver_data));
+
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
+ "Received transceiver state update [0x%08x] from mfw"
+ " [Addr 0x%x]\n",
+ transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ transceiver_data)));
+
+ transceiver_state = GET_MFW_FIELD(transceiver_state,
+ ETH_TRANSCEIVER_STATE);
+
+ if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
+ DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
+ else
+ DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
+
+ OSAL_TRANSCEIVER_UPDATE(p_hwfn);
+}
+
+static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link)
+{
+ u32 eee_status, val;
+
+ p_link->eee_adv_caps = 0;
+ p_link->eee_lp_adv_caps = 0;
+ eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, eee_status));
+ p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
+ val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
+ if (val & EEE_1G_ADV)
+ p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
+ if (val & EEE_10G_ADV)
+ p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
+ val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
+ if (val & EEE_1G_ADV)
+ p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
+ if (val & EEE_10G_ADV)
+ p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
+}
+
+static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ OSAL_MEM_ZERO(p_data, sizeof(*p_data));
+
+ size = OSAL_MIN_T(u32, sizeof(*p_data),
+ SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+
+ return size;
+}
+
+static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_reset)
+{
+ struct ecore_mcp_link_state *p_link;
+ u8 max_bw, min_bw;
+ u32 status = 0;
+
+ /* Prevent SW/attentions from doing this at the same time */
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
+
+ p_link = &p_hwfn->mcp_info->link_output;
+ OSAL_MEMSET(p_link, 0, sizeof(*p_link));
+ if (!b_reset) {
+ status = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, link_status));
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
+ "Received link update [0x%08x] from mfw"
+ " [Addr 0x%x]\n",
+ status, (u32)(p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ link_status)));
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Resetting link indications\n");
+ goto out;
+ }
+
+ if (p_hwfn->b_drv_link_init) {
+ /* Link indication with modern MFW arrives as per-PF
+ * indication.
+ */
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
+ struct public_func shmem_info;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ p_link->link_up = !!(shmem_info.status &
+ FUNC_STATUS_VIRTUAL_LINK_UP);
+ } else {
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ }
+ } else {
+ p_link->link_up = false;
+ }
+
+ p_link->full_duplex = true;
+ switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+ case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+ p_link->speed = 100000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+ p_link->speed = 50000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+ p_link->speed = 40000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+ p_link->speed = 25000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+ p_link->speed = 20000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+ p_link->speed = 10000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+ p_link->full_duplex = false;
+ /* Fall-through */
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+ p_link->speed = 1000;
+ break;
+ default:
+ p_link->speed = 0;
+ }
+
+ /* We never store total line speed as p_link->speed is
+ * again changes according to bandwidth allocation.
+ */
+ if (p_link->link_up && p_link->speed)
+ p_link->line_speed = p_link->speed;
+ else
+ p_link->line_speed = 0;
+
+ max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+ min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+ /* Max bandwidth configuration */
+ __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+ p_link, max_bw);
+
+ /* Min bandwidth configuration */
+ __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+ p_link, min_bw);
+ ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
+ p_link->min_pf_rate);
+
+ p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+ p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+ p_link->parallel_detection = !!(status &
+ LINK_STATUS_PARALLEL_DETECTION_USED);
+ p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_10G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_20G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_25G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_40G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_50G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_100G : 0;
+
+ p_link->partner_tx_flow_ctrl_en =
+ !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+ p_link->partner_rx_flow_ctrl_en =
+ !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+ switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+ case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+ p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
+ break;
+ default:
+ p_link->partner_adv_pause = 0;
+ }
+
+ p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+ if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
+ ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
+
+ OSAL_LINK_UPDATE(p_hwfn);
+out:
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
+}
+
+enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, bool b_up)
+{
+ struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+ struct ecore_mcp_mb_params mb_params;
+ struct eth_phy_cfg phy_cfg;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 cmd;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+#endif
+
+ /* Set the shmem configuration according to params */
+ OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
+ cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+ if (!params->speed.autoneg)
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
+
+ /* There are MFWs that share this capability regardless of whether
+ * this is feasible or not. And given that at the very least adv_caps
+ * would be set internally by ecore, we want to make sure LFA would
+ * still work.
+ */
+ if ((p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
+ params->eee.enable) {
+ phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
+ if (params->eee.tx_lpi_enable)
+ phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
+ if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
+ phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
+ if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
+ phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
+ phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
+ EEE_TX_TIMER_USEC_OFFSET) &
+ EEE_TX_TIMER_USEC_MASK;
+ }
+
+ p_hwfn->b_drv_link_init = b_up;
+
+ if (b_up)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
+ phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
+ phy_cfg.loopback_mode);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.p_data_src = &phy_cfg;
+ mb_params.data_src_size = sizeof(phy_cfg);
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+
+ /* if mcp fails to respond we must abort */
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* Mimic link-change attention, done for several reasons:
+ * - On reset, there's no guarantee MFW would trigger
+ * an attention.
+ * - On initialization, older MFWs might not indicate link change
+ * during LFA, so we'll never get an UP indication.
+ */
+ ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
+
+ return ECORE_SUCCESS;
+}
+
+u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
+ path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
+
+ proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
+ path_addr +
+ OFFSETOF(struct public_path, process_kill)) &
+ PROCESS_KILL_COUNTER_MASK;
+
+ return proc_kill_cnt;
+}
+
+static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 proc_kill_cnt;
+
+ /* Prevent possible attentions/interrupts during the recovery handling
+ * and till its load phase, during which they will be re-enabled.
+ */
+ ecore_int_igu_disable_int(p_hwfn, p_ptt);
+
+ DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
+
+ /* The following operations should be done once, and thus in CMT mode
+ * are carried out by only the first HW function.
+ */
+ if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
+ return;
+
+ if (p_dev->recov_in_prog) {
+ DP_NOTICE(p_hwfn, false,
+ "Ignoring the indication since a recovery"
+ " process is already in progress\n");
+ return;
+ }
+
+ p_dev->recov_in_prog = true;
+
+ proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
+ DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
+
+ OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
+}
+
+static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum MFW_DRV_MSG_TYPE type)
+{
+ enum ecore_mcp_protocol_type stats_type;
+ union ecore_mcp_protocol_stats stats;
+ struct ecore_mcp_mb_params mb_params;
+ u32 hsi_param;
+ enum _ecore_status_t rc;
+
+ switch (type) {
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ stats_type = ECORE_MCP_LAN_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
+ break;
+ default:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Invalid protocol type %d\n", type);
+ return;
+ }
+
+ OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_STATS;
+ mb_params.param = hsi_param;
+ mb_params.p_data_src = &stats;
+ mb_params.data_src_size = sizeof(stats);
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
+}
+
+static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
+{
+ struct ecore_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ /* TODO - bandwidth min/max should have valid values of 1-100,
+ * as well as some indication that the feature is disabled.
+ * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
+ * limit and correct value to min `1' and max `100' if limit isn't in
+ * range.
+ */
+ p_info->bandwidth_min = (p_shmem_info->config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_OFFSET;
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ p_info->bandwidth_min);
+ p_info->bandwidth_min = 1;
+ }
+
+ p_info->bandwidth_max = (p_shmem_info->config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_OFFSET;
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ p_info->bandwidth_max);
+ p_info->bandwidth_max = 100;
+ }
+}
+
+static void
+ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_function_info *p_info;
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+
+ ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
+
+ ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
+
+ /* Acknowledge the MFW */
+ ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+ &param);
+}
+
+static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
+{
+ /* A single notification should be sent to upper driver in CMT mode */
+ if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
+ return;
+
+ DP_NOTICE(p_hwfn, false,
+ "Fan failure was detected on the network interface card"
+ " and it's going to be shut down.\n");
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
+}
+
+struct ecore_mdump_cmd_params {
+ u32 cmd;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+};
+
+static enum _ecore_status_t
+ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_cmd_params *p_mdump_cmd_params)
+{
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
+ mb_params.param = p_mdump_cmd_params->cmd;
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ DP_INFO(p_hwfn,
+ "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
+ p_mdump_cmd_params->cmd);
+ rc = ECORE_NOTIMPL;
+ } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
+ rc = ECORE_NOTIMPL;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 epoch)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
+ mdump_cmd_params.p_data_src = &epoch;
+ mdump_cmd_params.data_src_size = sizeof(epoch);
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+static enum _ecore_status_t
+ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct mdump_config_stc *p_mdump_config)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
+ mdump_cmd_params.p_data_dst = p_mdump_config;
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
+
+ rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_info *p_mdump_info)
+{
+ u32 addr, global_offsize, global_addr;
+ struct mdump_config_stc mdump_config;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
+ global_addr +
+ OFFSETOF(struct public_global,
+ mdump_reason));
+
+ if (p_mdump_info->reason) {
+ rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_mdump_info->version = mdump_config.version;
+ p_mdump_info->config = mdump_config.config;
+ p_mdump_info->epoch = mdump_config.epoc;
+ p_mdump_info->num_of_logs = mdump_config.num_of_logs;
+ p_mdump_info->valid_logs = mdump_config.valid_logs;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
+ p_mdump_info->reason, p_mdump_info->version,
+ p_mdump_info->config, p_mdump_info->epoch,
+ p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mdump info: reason %d\n", p_mdump_info->reason);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+enum _ecore_status_t
+ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_retain_data *p_mdump_retain)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+ struct mdump_retain_data_stc mfw_mdump_retain;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
+ mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
+ mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
+
+ rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ p_mdump_retain->valid = mfw_mdump_retain.valid;
+ p_mdump_retain->epoch = mfw_mdump_retain.epoch;
+ p_mdump_retain->pf = mfw_mdump_retain.pf;
+ p_mdump_retain->status = mfw_mdump_retain.status;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mdump_retain_data mdump_retain;
+ enum _ecore_status_t rc;
+
+ /* In CMT mode - no need for more than a single acknowledgment to the
+ * MFW, and no more than a single notification to the upper driver.
+ */
+ if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
+ return;
+
+ rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
+ if (rc == ECORE_SUCCESS && mdump_retain.valid) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
+ mdump_retain.epoch, mdump_retain.pf,
+ mdump_retain.status);
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW notified that a critical error occurred in the device\n");
+ }
+
+ if (p_hwfn->p_dev->allow_mdump) {
+ DP_NOTICE(p_hwfn, false,
+ "Not acknowledging the notification to allow the MFW crash dump\n");
+ return;
+ }
+
+ DP_NOTICE(p_hwfn, false,
+ "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
+ ecore_mcp_mdump_ack(p_hwfn, p_ptt);
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
+}
+
+void
+ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 port_cfg, val;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
+ return;
+
+ OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
+ port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, oem_cfg_port));
+ val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
+ if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
+ DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n",
+ val);
+
+ val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
+ if (val == OEM_CFG_SCHED_TYPE_ETS)
+ p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
+ else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
+ p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
+ else
+ DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
+ val);
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
+ p_hwfn->ufp_info.tc = (u8)val;
+ val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
+ OEM_CFG_FUNC_HOST_PRI_CTRL);
+ if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
+ p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
+ else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
+ p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
+ else
+ DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
+ val);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
+ p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
+ p_hwfn->ufp_info.pri_type);
+}
+
+static enum _ecore_status_t
+ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
+
+ if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
+ p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
+ p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
+
+ ecore_qm_reconf(p_hwfn, p_ptt);
+ } else {
+ /* Merge UFP TC with the dcbx TC data */
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_OPERATIONAL_MIB);
+ }
+
+ /* update storm FW with negotiation results */
+ ecore_sp_pf_update_ufp(p_hwfn);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_info *info = p_hwfn->mcp_info;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ bool found = false;
+ u16 i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
+
+ /* Read Messages from MFW */
+ ecore_mcp_read_mb(p_hwfn, p_ptt);
+
+ /* Compare current messages to old ones */
+ for (i = 0; i < info->mfw_mb_length; i++) {
+ if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+ continue;
+
+ found = true;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+ i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+ switch (i) {
+ case MFW_DRV_MSG_LINK_CHANGE:
+ ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
+ break;
+ case MFW_DRV_MSG_VF_DISABLED:
+ ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_LLDP_DATA_UPDATED:
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_REMOTE_LLDP_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_REMOTE_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_OPERATIONAL_MIB);
+ /* clear the user-config cache */
+ OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
+ sizeof(struct ecore_dcbx_set));
+ break;
+ case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
+ ecore_lldp_mib_update_event(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_OEM_CFG_UPDATE:
+ ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
+ ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_ERROR_RECOVERY:
+ ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
+ break;
+ case MFW_DRV_MSG_BW_UPDATE:
+ ecore_mcp_update_bw(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_FAILURE_DETECTED:
+ ecore_mcp_handle_fan_failure(p_hwfn);
+ break;
+ case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
+ ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
+ break;
+ default:
+ DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
+ rc = ECORE_INVAL;
+ }
+ }
+
+ /* ACK everything */
+ for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+ OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
+
+ /* MFW expect answer in BE, so we force write in that format */
+ ecore_wr(p_hwfn, p_ptt,
+ info->mfw_mb_addr + sizeof(u32) +
+ MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+ sizeof(u32) + i * sizeof(u32), val);
+ }
+
+ if (!found) {
+ DP_NOTICE(p_hwfn, false,
+ "Received an MFW message indication but no"
+ " new message!\n");
+ rc = ECORE_INVAL;
+ }
+
+ /* Copy the new mfw messages into the shadow */
+ OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_mfw_ver,
+ u32 *p_running_bundle_id)
+{
+ u32 global_offsize;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ if (p_hwfn->vf_iov_info) {
+ struct pfvf_acquire_resp_tlv *p_resp;
+
+ p_resp = &p_hwfn->vf_iov_info->acquire_resp;
+ *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
+ return ECORE_SUCCESS;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF requested MFW version prior to ACQUIRE\n");
+ return ECORE_INVAL;
+ }
+ }
+
+ global_offsize = ecore_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
+ public_base,
+ PUBLIC_GLOBAL));
+ *p_mfw_ver =
+ ecore_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) + OFFSETOF(struct public_global, mfw_ver));
+
+ if (p_running_bundle_id != OSAL_NULL) {
+ *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) +
+ OFFSETOF(struct public_global,
+ running_bundle_id));
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_media_type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
+ return ECORE_BUSY;
+ }
+
+ if (!p_ptt) {
+ *p_media_type = MEDIA_UNSPECIFIED;
+ rc = ECORE_INVAL;
+ } else {
+ *p_media_type = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ media_type));
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_tranceiver_type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
+ return ECORE_BUSY;
+ }
+ if (!p_ptt) {
+ *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
+ rc = ECORE_INVAL;
+ } else {
+ *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+ }
+
+ return rc;
+}
+
+static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
+{
+ if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
+ ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
+ (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
+ return 1;
+
+ return 0;
+}
+
+enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_speed_mask)
+{
+ u32 transceiver_data, transceiver_type, transceiver_state;
+
+ ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data);
+
+ transceiver_state = GET_MFW_FIELD(transceiver_data,
+ ETH_TRANSCEIVER_STATE);
+
+ transceiver_type = GET_MFW_FIELD(transceiver_data,
+ ETH_TRANSCEIVER_TYPE);
+
+ if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
+ return ECORE_INVAL;
+
+ switch (transceiver_type) {
+ case ETH_TRANSCEIVER_TYPE_1G_LX:
+ case ETH_TRANSCEIVER_TYPE_1G_SX:
+ case ETH_TRANSCEIVER_TYPE_1G_PCC:
+ case ETH_TRANSCEIVER_TYPE_1G_ACC:
+ case ETH_TRANSCEIVER_TYPE_1000BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_10G_SR:
+ case ETH_TRANSCEIVER_TYPE_10G_LR:
+ case ETH_TRANSCEIVER_TYPE_10G_LRM:
+ case ETH_TRANSCEIVER_TYPE_10G_ER:
+ case ETH_TRANSCEIVER_TYPE_10G_PCC:
+ case ETH_TRANSCEIVER_TYPE_10G_ACC:
+ case ETH_TRANSCEIVER_TYPE_4x10G:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_40G_LR4:
+ case ETH_TRANSCEIVER_TYPE_40G_SR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_100G_AOC:
+ case ETH_TRANSCEIVER_TYPE_100G_SR4:
+ case ETH_TRANSCEIVER_TYPE_100G_LR4:
+ case ETH_TRANSCEIVER_TYPE_100G_ER4:
+ case ETH_TRANSCEIVER_TYPE_100G_ACC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_25G_SR:
+ case ETH_TRANSCEIVER_TYPE_25G_LR:
+ case ETH_TRANSCEIVER_TYPE_25G_AOC:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_25G_CA_N:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_S:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_L:
+ case ETH_TRANSCEIVER_TYPE_4x25G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_40G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_100G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_XLPPI:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_10G_BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ default:
+ DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
+ transceiver_type);
+ *p_speed_mask = 0xff;
+ break;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_board_config)
+{
+ u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
+ return ECORE_BUSY;
+ }
+ if (!p_ptt) {
+ *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
+ rc = ECORE_INVAL;
+ } else {
+ nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
+ MISC_REG_GEN_PURP_CR0);
+ nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
+ nvm_cfg_addr + 4);
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ *p_board_config = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port,
+ board_cfg));
+ }
+
+ return rc;
+}
+
+/* @DPDK */
+/* Old MFW has a global configuration for all PFs regarding RDMA support */
+static void
+ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
+ enum ecore_pci_personality *p_proto)
+{
+ *p_proto = ECORE_PCI_ETH;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "According to Legacy capabilities, L2 personality is %08x\n",
+ (u32)*p_proto);
+}
+
+/* @DPDK */
+static enum _ecore_status_t
+ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_pci_personality *p_proto)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
+ (u32)*p_proto, resp, param);
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
+ struct public_func *p_info,
+ struct ecore_ptt *p_ptt,
+ enum ecore_pci_personality *p_proto)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+ case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+ if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
+ ECORE_SUCCESS)
+ ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
+ break;
+ default:
+ rc = ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_function_info *info;
+ struct public_func shmem_info;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+ info = &p_hwfn->mcp_info->func_info;
+
+ info->pause_on_host = (shmem_info.config &
+ FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+ if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
+ &info->protocol)) {
+ DP_ERR(p_hwfn, "Unknown personality %08x\n",
+ (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+ return ECORE_INVAL;
+ }
+
+ ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+ if (shmem_info.mac_upper || shmem_info.mac_lower) {
+ info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
+ info->mac[1] = (u8)(shmem_info.mac_upper);
+ info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
+ info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
+ info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
+ info->mac[5] = (u8)(shmem_info.mac_lower);
+ } else {
+ /* TODO - are there protocols for which there's no MAC? */
+ DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
+ }
+
+ /* TODO - are these calculations true for BE machine? */
+ info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
+ (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
+ info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
+ (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
+
+ info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+ info->mtu = (u16)shmem_info.mtu_size;
+
+ if (info->mtu == 0)
+ info->mtu = 1500;
+
+ info->mtu = (u16)shmem_info.mtu_size;
+
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
+ "Read configuration from shmem: pause_on_host %02x"
+ " protocol %02x BW [%02x - %02x]"
+ " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
+ " node %lx ovlan %04x\n",
+ info->pause_on_host, info->protocol,
+ info->bandwidth_min, info->bandwidth_max,
+ info->mac[0], info->mac[1], info->mac[2],
+ info->mac[3], info->mac[4], info->mac[5],
+ (unsigned long)info->wwn_port,
+ (unsigned long)info->wwn_node, info->ovlan);
+
+ return ECORE_SUCCESS;
+}
+
+struct ecore_mcp_link_params
+*ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return OSAL_NULL;
+ return &p_hwfn->mcp_info->link_input;
+}
+
+struct ecore_mcp_link_state
+*ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return OSAL_NULL;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
+ p_hwfn->mcp_info->link_output.link_up = true;
+ }
+#endif
+
+ return &p_hwfn->mcp_info->link_output;
+}
+
+struct ecore_mcp_link_capabilities
+*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return OSAL_NULL;
+ return &p_hwfn->mcp_info->link_capabilities;
+}
+
+enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
+
+ /* Wait for the drain to complete before returning */
+ OSAL_MSLEEP(1020);
+
+ return rc;
+}
+
+const struct ecore_mcp_function_info
+*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return OSAL_NULL;
+ return &p_hwfn->mcp_info->func_info;
+}
+
+int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 personalities)
+{
+ enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
+ struct public_func shmem_info;
+ int i, count = 0, num_pfs;
+
+ num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
+
+ for (i = 0; i < num_pfs; i++) {
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID_BY_REL(p_hwfn, i));
+ if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
+ continue;
+
+ if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
+ &protocol) !=
+ ECORE_SUCCESS)
+ continue;
+
+ if ((1 << ((u32)protocol)) & personalities)
+ count++;
+ }
+
+ return count;
+}
+
+enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_flash_size)
+{
+ u32 flash_size;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
+ return ECORE_INVAL;
+ }
+#endif
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+ flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+ MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+ flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
+
+ *p_flash_size = flash_size;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+ if (p_dev->recov_in_prog) {
+ DP_NOTICE(p_hwfn, false,
+ "Avoid triggering a recovery since such a process"
+ " is already in progress\n");
+ return ECORE_AGAIN;
+ }
+
+ DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num)
+{
+ u32 resp = 0, param = 0, rc_param = 0;
+ enum _ecore_status_t rc;
+
+/* Only Leader can configure MSIX, and need to take CMT into account */
+
+ if (!IS_LEAD_HWFN(p_hwfn))
+ return ECORE_SUCCESS;
+ num *= p_hwfn->p_dev->num_hwfns;
+
+ param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
+ DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
+ param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
+ DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
+ &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
+ vf_id);
+ rc = ECORE_INVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
+ num, vf_id);
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 num)
+{
+ u32 resp = 0, param = num, rc_param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
+ param, &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
+ rc = ECORE_INVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts for VFs\n",
+ num);
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num)
+{
+ if (ECORE_IS_BB(p_hwfn->p_dev))
+ return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
+ else
+ return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
+}
+
+enum _ecore_status_t
+ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_version *p_ver)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct drv_version_stc drv_version;
+ u32 num_words, i;
+ void *p_name;
+ OSAL_BE32 val;
+ enum _ecore_status_t rc;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+#endif
+
+ OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
+ drv_version.version = p_ver->version;
+ num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
+ for (i = 0; i < num_words; i++) {
+ /* The driver name is expected to be in a big-endian format */
+ p_name = &p_ver->name[i * sizeof(u32)];
+ val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
+ *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
+ }
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
+ mb_params.p_data_src = &drv_version;
+ mb_params.data_src_size = sizeof(drv_version);
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define ECORE_MCP_HALT_SLEEP_MS 10
+#define ECORE_MCP_HALT_MAX_RETRIES 10
+
+enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0, cpu_state, cnt = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
+ &param);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ do {
+ OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+ break;
+ } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
+
+ if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+ return ECORE_BUSY;
+ }
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, true);
+
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_MCP_RESUME_SLEEP_MS 10
+
+enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 cpu_mode, cpu_state;
+
+ ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
+
+ cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+
+ OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ cpu_mode, cpu_state);
+ return ECORE_BUSY;
+ }
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, false);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_ov_client client)
+{
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+ enum _ecore_status_t rc;
+
+ switch (client) {
+ case ECORE_OV_CLIENT_DRV:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
+ break;
+ case ECORE_OV_CLIENT_USER:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
+ break;
+ case ECORE_OV_CLIENT_VENDOR_SPEC:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_ov_driver_state drv_state)
+{
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+ enum _ecore_status_t rc;
+
+ switch (drv_state) {
+ case ECORE_OV_DRIVER_STATE_NOT_LOADED:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
+ break;
+ case ECORE_OV_DRIVER_STATE_DISABLED:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
+ break;
+ case ECORE_OV_DRIVER_STATE_ACTIVE:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send driver state\n");
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_fc_npiv_tbl *p_table)
+{
+ return 0;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 mtu)
+{
+ return 0;
+}
+
+enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_led_mode mode)
+{
+ u32 resp = 0, param = 0, drv_mb_param;
+ enum _ecore_status_t rc;
+
+ switch (mode) {
+ case ECORE_LED_MODE_ON:
+ drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
+ break;
+ case ECORE_LED_MODE_OFF:
+ drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
+ break;
+ case ECORE_LED_MODE_RESTORE:
+ drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 mask_parities)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
+ mask_parities, &resp, &param);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "MCP response failure for mask parities, aborting\n");
+ } else if (resp != FW_MSG_CODE_OK) {
+ DP_ERR(p_hwfn,
+ "MCP did not ack mask parity request. Old MFW?\n");
+ rc = ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
+ u8 *p_buf, u32 len)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ u32 bytes_left, offset, bytes_to_copy, buf_size;
+ u32 nvm_offset, resp, param;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ bytes_left = len;
+ offset = 0;
+ while (bytes_left > 0) {
+ bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
+ MCP_DRV_NVM_BUF_LEN);
+ nvm_offset = (addr + offset) | (bytes_to_copy <<
+ DRV_MB_PARAM_NVM_LEN_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
+ rc);
+ resp = FW_MSG_CODE_ERROR;
+ break;
+ }
+
+ if (resp != FW_MSG_CODE_NVM_OK) {
+ DP_NOTICE(p_dev, false,
+ "nvm read failed, resp = 0x%08x\n", resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ break;
+ }
+
+ /* This can be a lengthy process, and it's possible scheduler
+ * isn't preemptible. Sleep a bit to prevent CPU hogging.
+ */
+ if (bytes_left % 0x1000 <
+ (bytes_left - buf_size) % 0x1000)
+ OSAL_MSLEEP(1);
+
+ offset += buf_size;
+ bytes_left -= buf_size;
+ }
+
+ p_dev->mcp_nvm_resp = resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt;
+ u32 resp, param;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ (cmd == ECORE_PHY_CORE_READ) ?
+ DRV_MSG_CODE_PHY_CORE_READ :
+ DRV_MSG_CODE_PHY_RAW_READ,
+ addr, &resp, &param, &len, (u32 *)p_buf);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+
+ p_dev->mcp_nvm_resp = resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt;
+ u32 resp, param;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
+ &resp, &param);
+ p_dev->mcp_nvm_resp = resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
+ u32 addr)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt;
+ u32 resp, param;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
+ &resp, &param);
+ p_dev->mcp_nvm_resp = resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+/* rc receives ECORE_INVAL as default parameter because
+ * it might not enter the while loop if the len is 0
+ */
+enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len)
+{
+ u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_ptt *p_ptt;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ switch (cmd) {
+ case ECORE_PUT_FILE_DATA:
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
+ break;
+ case ECORE_NVM_WRITE_NVRAM:
+ nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ break;
+ case ECORE_EXT_PHY_FW_UPGRADE:
+ nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
+ cmd);
+ rc = ECORE_INVAL;
+ goto out;
+ }
+
+ buf_idx = 0;
+ while (buf_idx < len) {
+ buf_size = OSAL_MIN_T(u32, (len - buf_idx),
+ MCP_DRV_NVM_BUF_LEN);
+ nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
+ addr) +
+ buf_idx;
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
+ &resp, &param, buf_size,
+ (u32 *)&p_buf[buf_idx]);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "ecore_mcp_nvm_write() failed, rc = %d\n",
+ rc);
+ resp = FW_MSG_CODE_ERROR;
+ break;
+ }
+
+ if (resp != FW_MSG_CODE_OK &&
+ resp != FW_MSG_CODE_NVM_OK &&
+ resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
+ DP_NOTICE(p_dev, false,
+ "nvm write failed, resp = 0x%08x\n", resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ break;
+ }
+
+ /* This can be a lengthy process, and it's possible scheduler
+ * isn't preemptible. Sleep a bit to prevent CPU hogging.
+ */
+ if (buf_idx % 0x1000 >
+ (buf_idx + buf_size) % 0x1000)
+ OSAL_MSLEEP(1);
+
+ buf_idx += buf_size;
+ }
+
+ p_dev->mcp_nvm_resp = resp;
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt;
+ u32 resp, param, nvm_cmd;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
+ DRV_MSG_CODE_PHY_RAW_WRITE;
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
+ &resp, &param, len, (u32 *)p_buf);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ p_dev->mcp_nvm_resp = resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
+ u32 addr)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt;
+ u32 resp, param;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
+ &resp, &param);
+ p_dev->mcp_nvm_resp = resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset,
+ u32 len, u8 *p_buf)
+{
+ u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
+ u32 resp, param;
+ enum _ecore_status_t rc;
+
+ nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
+ addr = offset;
+ offset = 0;
+ bytes_left = len;
+ while (bytes_left > 0) {
+ bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
+ MAX_I2C_TRANSACTION_SIZE);
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((addr + offset) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
+ nvm_offset |= (bytes_to_copy <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_READ,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a transceiver read command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
+ return ECORE_NODEV;
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ offset += buf_size;
+ bytes_left -= buf_size;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset,
+ u32 len, u8 *p_buf)
+{
+ u32 buf_idx, buf_size, nvm_offset, resp, param;
+ enum _ecore_status_t rc;
+
+ nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
+ buf_idx = 0;
+ while (buf_idx < len) {
+ buf_size = OSAL_MIN_T(u32, (len - buf_idx),
+ MAX_I2C_TRANSACTION_SIZE);
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((offset + buf_idx) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
+ nvm_offset |= (buf_size <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_WRITE,
+ nvm_offset, &resp, &param, buf_size,
+ (u32 *)&p_buf[buf_idx]);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a transceiver write command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
+ return ECORE_NODEV;
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ buf_idx += buf_size;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u32 *gpio_val)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 drv_mb_param = 0, rsp;
+
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
+ drv_mb_param, &rsp, gpio_val);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u16 gpio_val)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 drv_mb_param = 0, param, rsp;
+
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
+ (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
+ drv_mb_param, &rsp, &param);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u32 *gpio_direction,
+ u32 *gpio_ctrl)
+{
+ u32 drv_mb_param = 0, rsp, val = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
+ drv_mb_param, &rsp, &val);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
+ DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
+ *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
+ DRV_MB_PARAM_GPIO_CTRL_OFFSET;
+
+ if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 drv_mb_param = 0, rsp, param;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 drv_mb_param, rsp, param;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
+ struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
+{
+ u32 drv_mb_param = 0, rsp;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, num_images);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
+ struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att, u32 image_index)
+{
+ u32 buf_size, nvm_offset, resp, param;
+ enum _ecore_status_t rc;
+
+ nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
+ nvm_offset |= (image_index <<
+ DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)p_image_att);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (p_image_att->return_code != 1))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_temperature_info *p_temp_info)
+{
+ struct ecore_temperature_sensor *p_temp_sensor;
+ struct temperature_status_stc mfw_temp_info;
+ struct ecore_mcp_mb_params mb_params;
+ u32 val;
+ enum _ecore_status_t rc;
+ u8 i;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
+ mb_params.p_data_dst = &mfw_temp_info;
+ mb_params.data_dst_size = sizeof(mfw_temp_info);
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
+ p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
+ ECORE_MAX_NUM_OF_SENSORS);
+ for (i = 0; i < p_temp_info->num_sensors; i++) {
+ val = mfw_temp_info.sensor[i];
+ p_temp_sensor = &p_temp_info->sensors[i];
+ p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
+ SENSOR_LOCATION_OFFSET;
+ p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
+ THRESHOLD_HIGH_OFFSET;
+ p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
+ CRITICAL_TEMPERATURE_OFFSET;
+ p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
+ CURRENT_TEMP_OFFSET;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_mba_versions(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mba_vers *p_mba_vers)
+{
+ u32 buf_size, resp, param;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
+ 0, &resp, &param, &buf_size,
+ &p_mba_vers->mba_vers[0]);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ rc = ECORE_UNKNOWN_ERROR;
+
+ if (buf_size != MCP_DRV_NVM_BUF_LEN)
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 *num_events)
+{
+ u32 rsp;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
+ 0, &rsp, (u32 *)num_events);
+}
+
+static enum resource_id_enum
+ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
+{
+ enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+ switch (res_id) {
+ case ECORE_SB:
+ mfw_res_id = RESOURCE_NUM_SB_E;
+ break;
+ case ECORE_L2_QUEUE:
+ mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+ break;
+ case ECORE_VPORT:
+ mfw_res_id = RESOURCE_NUM_VPORT_E;
+ break;
+ case ECORE_RSS_ENG:
+ mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+ break;
+ case ECORE_PQ:
+ mfw_res_id = RESOURCE_NUM_PQ_E;
+ break;
+ case ECORE_RL:
+ mfw_res_id = RESOURCE_NUM_RL_E;
+ break;
+ case ECORE_MAC:
+ case ECORE_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ mfw_res_id = RESOURCE_VFC_FILTER_E;
+ break;
+ case ECORE_ILT:
+ mfw_res_id = RESOURCE_ILT_E;
+ break;
+ case ECORE_LL2_QUEUE:
+ mfw_res_id = RESOURCE_LL2_QUEUE_E;
+ break;
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ mfw_res_id = RESOURCE_CQS_E;
+ break;
+ case ECORE_RDMA_STATS_QUEUE:
+ mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+ break;
+ case ECORE_BDQ:
+ mfw_res_id = RESOURCE_BDQ_E;
+ break;
+ default:
+ break;
+ }
+
+ return mfw_res_id;
+}
+
+#define ECORE_RESC_ALLOC_VERSION_MAJOR 2
+#define ECORE_RESC_ALLOC_VERSION_MINOR 0
+#define ECORE_RESC_ALLOC_VERSION \
+ ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
+ (ECORE_RESC_ALLOC_VERSION_MINOR << \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
+
+struct ecore_resc_alloc_in_params {
+ u32 cmd;
+ enum ecore_resources res_id;
+ u32 resc_max_val;
+};
+
+struct ecore_resc_alloc_out_params {
+ u32 mcp_resp;
+ u32 mcp_param;
+ u32 resc_num;
+ u32 resc_start;
+ u32 vf_resc_num;
+ u32 vf_resc_start;
+ u32 flags;
+};
+
+#define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
+
+enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ enum _ecore_status_t rc;
+
+ /* Allow ongoing PCIe transactions to complete */
+ OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
+
+ /* Clear the PF's internal FID_enable in the PXP */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_resc_alloc_in_params *p_in_params,
+ struct ecore_resc_alloc_out_params *p_out_params)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct resource_info mfw_resc_info;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
+
+ mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
+ if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
+ DP_ERR(p_hwfn,
+ "Failed to match resource %d [%s] with the MFW resources\n",
+ p_in_params->res_id,
+ ecore_hw_get_resc_name(p_in_params->res_id));
+ return ECORE_INVAL;
+ }
+
+ switch (p_in_params->cmd) {
+ case DRV_MSG_SET_RESOURCE_VALUE_MSG:
+ mfw_resc_info.size = p_in_params->resc_max_val;
+ /* Fallthrough */
+ case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
+ p_in_params->cmd);
+ return ECORE_INVAL;
+ }
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = p_in_params->cmd;
+ mb_params.param = ECORE_RESC_ALLOC_VERSION;
+ mb_params.p_data_src = &mfw_resc_info;
+ mb_params.data_src_size = sizeof(mfw_resc_info);
+ mb_params.p_data_dst = mb_params.p_data_src;
+ mb_params.data_dst_size = mb_params.data_src_size;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
+ p_in_params->cmd, p_in_params->res_id,
+ ecore_hw_get_resc_name(p_in_params->res_id),
+ GET_MFW_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ GET_MFW_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_in_params->resc_max_val);
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_out_params->mcp_resp = mb_params.mcp_resp;
+ p_out_params->mcp_param = mb_params.mcp_param;
+ p_out_params->resc_num = mfw_resc_info.size;
+ p_out_params->resc_start = mfw_resc_info.offset;
+ p_out_params->vf_resc_num = mfw_resc_info.vf_size;
+ p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
+ p_out_params->flags = mfw_resc_info.flags;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
+ GET_MFW_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ GET_MFW_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_out_params->resc_num, p_out_params->resc_start,
+ p_out_params->vf_resc_num, p_out_params->vf_resc_start,
+ p_out_params->flags);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp)
+{
+ struct ecore_resc_alloc_out_params out_params;
+ struct ecore_resc_alloc_in_params in_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
+ in_params.res_id = res_id;
+ in_params.resc_max_val = resc_max_val;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 *p_mcp_resp,
+ u32 *p_resc_num, u32 *p_resc_start)
+{
+ struct ecore_resc_alloc_out_params out_params;
+ struct ecore_resc_alloc_in_params in_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ in_params.res_id = res_id;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ *p_resc_num = out_params.resc_num;
+ *p_resc_start = out_params.resc_start;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp, mcp_param;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
+ &mcp_resp, &mcp_param);
+}
+
+static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 param, u32 *p_mcp_resp,
+ u32 *p_mcp_param)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
+ p_mcp_resp, p_mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The resource command is unsupported by the MFW\n");
+ return ECORE_NOTIMPL;
+ }
+
+ if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
+ u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+
+ DP_NOTICE(p_hwfn, false,
+ "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
+ param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+__ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ enum _ecore_status_t rc;
+
+ switch (p_params->timeout) {
+ case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
+ opcode = RESOURCE_OPCODE_REQ;
+ p_params->timeout = 0;
+ break;
+ case ECORE_MCP_RESC_LOCK_TO_NONE:
+ opcode = RESOURCE_OPCODE_REQ_WO_AGING;
+ p_params->timeout = 0;
+ break;
+ default:
+ opcode = RESOURCE_OPCODE_REQ_W_AGING;
+ break;
+ }
+
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
+ param, p_params->timeout, opcode, p_params->resource);
+
+ /* Attempt to acquire the resource */
+ rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
+ &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Analyze the response */
+ p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
+ opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
+ mcp_param, opcode, p_params->owner);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_GNT:
+ p_params->b_granted = true;
+ break;
+ case RESOURCE_OPCODE_BUSY:
+ p_params->b_granted = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params)
+{
+ u32 retry_cnt = 0;
+ enum _ecore_status_t rc;
+
+ do {
+ /* No need for an interval before the first iteration */
+ if (retry_cnt) {
+ if (p_params->sleep_b4_retry) {
+ u16 retry_interval_in_ms =
+ DIV_ROUND_UP(p_params->retry_interval,
+ 1000);
+
+ OSAL_MSLEEP(retry_interval_in_ms);
+ } else {
+ OSAL_UDELAY(p_params->retry_interval);
+ }
+ }
+
+ rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (p_params->b_granted)
+ break;
+ } while (retry_cnt++ < p_params->retry_num);
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
+ struct ecore_resc_unlock_params *p_unlock,
+ enum ecore_resc_lock resource,
+ bool b_is_permanent)
+{
+ if (p_lock != OSAL_NULL) {
+ OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
+
+ /* Permanent resources don't require aging, and there's no
+ * point in trying to acquire them more than once since it's
+ * unexpected another entity would release them.
+ */
+ if (b_is_permanent) {
+ p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
+ } else {
+ p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
+ p_lock->retry_interval =
+ ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
+ p_lock->sleep_b4_retry = true;
+ }
+
+ p_lock->resource = resource;
+ }
+
+ if (p_unlock != OSAL_NULL) {
+ OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
+ p_unlock->resource = resource;
+ }
+}
+
+enum _ecore_status_t
+ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_unlock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ enum _ecore_status_t rc;
+
+ opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
+ : RESOURCE_OPCODE_RELEASE;
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
+ param, opcode, p_params->resource);
+
+ /* Attempt to release the resource */
+ rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
+ &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Analyze the response */
+ opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
+ mcp_param, opcode);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_RELEASED_PREVIOUS:
+ DP_INFO(p_hwfn,
+ "Resource unlock request for an already released resource [%d]\n",
+ p_params->resource);
+ /* Fallthrough */
+ case RESOURCE_OPCODE_RELEASED:
+ p_params->b_released = true;
+ break;
+ case RESOURCE_OPCODE_WRONG_OWNER:
+ p_params->b_released = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
+{
+ return !!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
+}
+
+enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
+ 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
+ if (rc == ECORE_SUCCESS)
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
+ "MFW supported features: %08x\n",
+ p_hwfn->mcp_info->capabilities);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp, mcp_param, features;
+
+ features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
+ DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
+ DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
+ features, &mcp_resp, &mcp_param);
+}
+
+enum _ecore_status_t
+ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_attr *p_drv_attr)
+{
+ struct attribute_cmd_write_stc attr_cmd_write;
+ enum _attribute_commands_e mfw_attr_cmd;
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+ switch (p_drv_attr->attr_cmd) {
+ case ECORE_MCP_DRV_ATTR_CMD_READ:
+ mfw_attr_cmd = ATTRIBUTE_CMD_READ;
+ break;
+ case ECORE_MCP_DRV_ATTR_CMD_WRITE:
+ mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
+ break;
+ case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
+ mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
+ break;
+ case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
+ mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
+ p_drv_attr->attr_cmd);
+ return ECORE_INVAL;
+ }
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
+ SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
+ p_drv_attr->attr_num);
+ SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
+ mfw_attr_cmd);
+ if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
+ OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
+ attr_cmd_write.val = p_drv_attr->val;
+ attr_cmd_write.mask = p_drv_attr->mask;
+ attr_cmd_write.offset = p_drv_attr->offset;
+
+ mb_params.p_data_src = &attr_cmd_write;
+ mb_params.data_src_size = sizeof(attr_cmd_write);
+ }
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The attribute command is not supported by the MFW\n");
+ return ECORE_NOTIMPL;
+ } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
+ mb_params.mcp_resp, p_drv_attr->attr_cmd,
+ p_drv_attr->attr_num);
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
+ p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
+ p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
+ mb_params.mcp_param);
+
+ if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
+ p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
+ p_drv_attr->val = mb_params.mcp_param;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u32 offset, u32 val)
+{
+ struct ecore_mcp_mb_params mb_params = {0};
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 dword = val;
+
+ mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
+ mb_params.param = offset;
+ mb_params.p_data_src = &dword;
+ mb_params.data_src_size = sizeof(dword);
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to wol write request, rc = %d\n", rc);
+ }
+
+ if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
+ val, offset, mb_params.mcp_resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.h
new file mode 100644
index 00000000..8e125310
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.h
@@ -0,0 +1,569 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_MCP_H__
+#define __ECORE_MCP_H__
+
+#include "bcm_osal.h"
+#include "mcp_public.h"
+#include "ecore.h"
+#include "ecore_mcp_api.h"
+#include "ecore_dev_api.h"
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in ecore_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (ECORE_IS_BB((p_hwfn)->p_dev) ? \
+ ((rel_pfid) | \
+ ((p_hwfn)->abs_pf_id & 1) << 3) : \
+ rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ecore_device_num_ports((_p_hwfn)->p_dev))
+
+struct ecore_mcp_info {
+ /* List for mailbox commands which were sent and wait for a response */
+ osal_list_t cmd_list;
+
+ /* Spinlock used for protecting the access to the mailbox commands list
+ * and the sending of the commands.
+ */
+ osal_spinlock_t cmd_lock;
+
+ /* Flag to indicate whether sending a MFW mailbox command is blocked */
+ bool b_block_cmd;
+
+ /* Spinlock used for syncing SW link-changes and link-changes
+ * originating from attention context.
+ */
+ osal_spinlock_t link_lock;
+
+ /* Address of the MCP public area */
+ u32 public_base;
+ /* Address of the driver mailbox */
+ u32 drv_mb_addr;
+ /* Address of the MFW mailbox */
+ u32 mfw_mb_addr;
+ /* Address of the port configuration (link) */
+ u32 port_addr;
+
+ /* Current driver mailbox sequence */
+ u16 drv_mb_seq;
+ /* Current driver pulse sequence */
+ u16 drv_pulse_seq;
+
+ struct ecore_mcp_link_params link_input;
+ struct ecore_mcp_link_state link_output;
+ struct ecore_mcp_link_capabilities link_capabilities;
+
+ struct ecore_mcp_function_info func_info;
+
+ u8 *mfw_mb_cur;
+ u8 *mfw_mb_shadow;
+ u16 mfw_mb_length;
+ u32 mcp_hist;
+
+ /* Capabilties negotiated with the MFW */
+ u32 capabilities;
+};
+
+struct ecore_mcp_mb_params {
+ u32 cmd;
+ u32 param;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+ u32 mcp_param;
+};
+
+struct ecore_drv_tlv_hdr {
+ u8 tlv_type; /* According to the enum below */
+ u8 tlv_length; /* In dwords - not including this header */
+ u8 tlv_reserved;
+#define ECORE_DRV_TLV_FLAGS_CHANGED 0x01
+ u8 tlv_flags;
+};
+
+/**
+ * @brief Initialize the interface with the MCP
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Initialize the port interface with the MCP
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * Can only be called after `num_ports_in_engine' is set
+ */
+void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+/**
+ * @brief Releases resources allocated during the init process.
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief This function is called from the DPC context. After
+ * pointing PTT to the mfw mb, check for events sent by the MCP
+ * to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @param p_hwfn - HW function
+ * @param p_ptt - PTT required for register access
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation
+ * was successul.
+ */
+enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief When MFW doesn't get driver pulse for couple of seconds, at some
+ * threshold before timeout expires, it will generate interrupt
+ * through a dedicated status block (DPSB - Driver Pulse Status
+ * Block), which the driver should respond immediately, by
+ * providing keepalive indication after setting the PTT to the
+ * driver-MFW mailbox. This function is called directly from the
+ * DPC upon receiving the DPSB attention.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation
+ * was successful.
+ */
+enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+enum ecore_drv_role {
+ ECORE_DRV_ROLE_OS,
+ ECORE_DRV_ROLE_KDUMP,
+};
+
+struct ecore_load_req_params {
+ /* Input params */
+ enum ecore_drv_role drv_role;
+ u8 timeout_val; /* 1..254, '0' - default value, '255' - no timeout */
+ bool avoid_eng_reset;
+ enum ecore_override_force_load override_force_load;
+
+ /* Output params */
+ u32 load_code;
+};
+
+/**
+ * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
+ * returns whether this PF is the first on the engine/port or function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_load_req_params *p_params);
+
+/**
+ * @brief Sends a LOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Sends a UNLOAD_REQ message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Sends a UNLOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Read the MFW mailbox into Current buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Ack to mfw that driver finished FLR process for VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ *
+ * @param return enum _ecore_status_t - ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *vfs_to_ack);
+
+/**
+ * @brief - calls during init to read shmem of all function-related info.
+ *
+ * @param p_hwfn
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Reset the MCP using mailbox command.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ *
+ * @param p_hwfn
+ *
+ * @return true iff MFW is running and mcp_info is initialized
+ */
+bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief request MFW to configure MSI-X for a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf_id - absolute inside engine
+ * @param num_sbs - number of entries to request
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num);
+
+/**
+ * @brief - Halt the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Wake up the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link,
+ u8 max_bw);
+int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link,
+ u8 min_bw);
+enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 mask_parities);
+/**
+ * @brief - Sends crash mdump related info to the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 epoch);
+
+/**
+ * @brief - Triggers a MFW crash dump procedure.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param epoch
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+struct ecore_mdump_retain_data {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
+/**
+ * @brief - Gets the mdump retained data from the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mdump_retain
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t
+ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_retain_data *p_mdump_retain);
+
+/**
+ * @brief - Sets the MFW's max value for the given resource
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param resc_max_val
+ * @param p_mcp_resp
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp);
+
+/**
+ * @brief - Gets the MFW allocation info for the given resource
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param p_mcp_resp
+ * @param p_resc_num
+ * @param p_resc_start
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 *p_mcp_resp,
+ u32 *p_resc_num, u32 *p_resc_start);
+
+/**
+ * @brief - Initiates PF FLR
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+#define ECORE_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP /* 0 */
+#define ECORE_MCP_RESC_LOCK_MAX_VAL 31
+
+enum ecore_resc_lock {
+ ECORE_RESC_LOCK_DBG_DUMP = ECORE_MCP_RESC_LOCK_MIN_VAL,
+ /* Locks that the MFW is aware of should be added here downwards */
+
+ /* Ecore only locks should be added here upwards */
+ ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL,
+
+ /* A dummy value to be used for auxiliary functions in need of
+ * returning an 'error' value.
+ */
+ ECORE_RESC_LOCK_RESC_INVALID,
+};
+
+struct ecore_resc_lock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Lock timeout value in seconds [default, none or 1..254] */
+ u8 timeout;
+#define ECORE_MCP_RESC_LOCK_TO_DEFAULT 0
+#define ECORE_MCP_RESC_LOCK_TO_NONE 255
+
+ /* Number of times to retry locking */
+ u8 retry_num;
+#define ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
+
+ /* The interval in usec between retries */
+ u16 retry_interval;
+#define ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
+
+ /* Use sleep or delay between retries */
+ bool sleep_b4_retry;
+
+ /* Will be set as true if the resource is free and granted */
+ bool b_granted;
+
+ /* Will be filled with the resource owner.
+ * [0..15 = PF0-15, 16 = MFW, 17 = diag over serial]
+ */
+ u8 owner;
+};
+
+/**
+ * @brief Acquires MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params);
+
+struct ecore_resc_unlock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Allow to release a resource even if belongs to another PF */
+ bool b_force;
+
+ /* Will be set as true if the resource is released */
+ bool b_released;
+};
+
+/**
+ * @brief Releases MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_unlock_params *p_params);
+
+/**
+ * @brief - default initialization for lock/unlock resource structs
+ *
+ * @param p_lock - lock params struct to be initialized; Can be OSAL_NULL
+ * @param p_unlock - unlock params struct to be initialized; Can be OSAL_NULL
+ * @param resource - the requested resource
+ * @paral b_is_permanent - disable retries & aging when set
+ */
+void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
+ struct ecore_resc_unlock_params *p_unlock,
+ enum ecore_resc_lock resource,
+ bool b_is_permanent);
+
+/**
+ * @brief Learn of supported MFW features; To be done during early init
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Inform MFW of set of features supported by driver. Should be done
+ * inside the contet of the LOAD_REQ.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+enum ecore_mcp_drv_attr_cmd {
+ ECORE_MCP_DRV_ATTR_CMD_READ,
+ ECORE_MCP_DRV_ATTR_CMD_WRITE,
+ ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR,
+ ECORE_MCP_DRV_ATTR_CMD_CLEAR,
+};
+
+struct ecore_mcp_drv_attr {
+ enum ecore_mcp_drv_attr_cmd attr_cmd;
+ u32 attr_num;
+
+ /* R/RC - will be set with the read value
+ * W - should hold the required value to be written
+ * C - DC
+ */
+ u32 val;
+
+ /* W - mask/offset to be applied on the given value
+ * R/RC/C - DC
+ */
+ u32 mask;
+ u32 offset;
+};
+
+/**
+ * @brief Handle the drivers' attributes that are kept by the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_drv_attr
+ */
+enum _ecore_status_t
+ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_attr *p_drv_attr);
+
+/**
+ * @brief Read ufp config from the shared memory.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void
+ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u32 offset, u32 val);
+
+#endif /* __ECORE_MCP_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp_api.h
new file mode 100644
index 00000000..cfb9f99d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp_api.h
@@ -0,0 +1,1215 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_MCP_API_H__
+#define __ECORE_MCP_API_H__
+
+#include "ecore_status.h"
+
+struct ecore_mcp_link_speed_params {
+ bool autoneg;
+ u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */
+ u32 forced_speed; /* In Mb/s */
+};
+
+struct ecore_mcp_link_pause_params {
+ bool autoneg;
+ bool forced_rx;
+ bool forced_tx;
+};
+
+enum ecore_mcp_eee_mode {
+ ECORE_MCP_EEE_DISABLED,
+ ECORE_MCP_EEE_ENABLED,
+ ECORE_MCP_EEE_UNSUPPORTED
+};
+
+struct ecore_link_eee_params {
+ u32 tx_lpi_timer;
+#define ECORE_EEE_1G_ADV (1 << 0)
+#define ECORE_EEE_10G_ADV (1 << 1)
+ /* Capabilities are represented using ECORE_EEE_*_ADV values */
+ u8 adv_caps;
+ u8 lp_adv_caps;
+ bool enable;
+ bool tx_lpi_enable;
+};
+
+struct ecore_mcp_link_params {
+ struct ecore_mcp_link_speed_params speed;
+ struct ecore_mcp_link_pause_params pause;
+ u32 loopback_mode; /* in PMM_LOOPBACK values */
+ struct ecore_link_eee_params eee;
+};
+
+struct ecore_mcp_link_capabilities {
+ u32 speed_capabilities;
+ bool default_speed_autoneg; /* In Mb/s */
+ u32 default_speed; /* In Mb/s */
+ enum ecore_mcp_eee_mode default_eee;
+ u32 eee_lpi_timer;
+ u8 eee_speed_caps;
+};
+
+struct ecore_mcp_link_state {
+ bool link_up;
+
+ u32 min_pf_rate; /* In Mb/s */
+
+ /* Actual link speed in Mb/s */
+ u32 line_speed;
+
+ /* PF max speed in MB/s, deduced from line_speed
+ * according to PF max bandwidth configuration.
+ */
+ u32 speed;
+ bool full_duplex;
+
+ bool an;
+ bool an_complete;
+ bool parallel_detection;
+ bool pfc_enabled;
+
+#define ECORE_LINK_PARTNER_SPEED_1G_HD (1 << 0)
+#define ECORE_LINK_PARTNER_SPEED_1G_FD (1 << 1)
+#define ECORE_LINK_PARTNER_SPEED_10G (1 << 2)
+#define ECORE_LINK_PARTNER_SPEED_20G (1 << 3)
+#define ECORE_LINK_PARTNER_SPEED_25G (1 << 4)
+#define ECORE_LINK_PARTNER_SPEED_40G (1 << 5)
+#define ECORE_LINK_PARTNER_SPEED_50G (1 << 6)
+#define ECORE_LINK_PARTNER_SPEED_100G (1 << 7)
+ u32 partner_adv_speed;
+
+ bool partner_tx_flow_ctrl_en;
+ bool partner_rx_flow_ctrl_en;
+
+#define ECORE_LINK_PARTNER_SYMMETRIC_PAUSE (1)
+#define ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
+#define ECORE_LINK_PARTNER_BOTH_PAUSE (3)
+ u8 partner_adv_pause;
+
+ bool sfp_tx_fault;
+
+ bool eee_active;
+ u8 eee_adv_caps;
+ u8 eee_lp_adv_caps;
+};
+
+struct ecore_mcp_function_info {
+ u8 pause_on_host;
+
+ enum ecore_pci_personality protocol;
+
+ u8 bandwidth_min;
+ u8 bandwidth_max;
+
+ u8 mac[ETH_ALEN];
+
+ u64 wwn_port;
+ u64 wwn_node;
+
+#define ECORE_MCP_VLAN_UNSET (0xffff)
+ u16 ovlan;
+
+ u16 mtu;
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_nvm_images {
+ ECORE_NVM_IMAGE_ISCSI_CFG,
+ ECORE_NVM_IMAGE_FCOE_CFG,
+};
+#endif
+
+struct ecore_mcp_drv_version {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct ecore_mcp_lan_stats {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+};
+
+#ifndef ECORE_PROTO_STATS
+#define ECORE_PROTO_STATS
+struct ecore_mcp_fcoe_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct ecore_mcp_iscsi_stats {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct ecore_mcp_rdma_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_byts;
+};
+
+enum ecore_mcp_protocol_type {
+ ECORE_MCP_LAN_STATS,
+ ECORE_MCP_FCOE_STATS,
+ ECORE_MCP_ISCSI_STATS,
+ ECORE_MCP_RDMA_STATS
+};
+
+union ecore_mcp_protocol_stats {
+ struct ecore_mcp_lan_stats lan_stats;
+ struct ecore_mcp_fcoe_stats fcoe_stats;
+ struct ecore_mcp_iscsi_stats iscsi_stats;
+ struct ecore_mcp_rdma_stats rdma_stats;
+};
+#endif
+
+enum ecore_ov_client {
+ ECORE_OV_CLIENT_DRV,
+ ECORE_OV_CLIENT_USER,
+ ECORE_OV_CLIENT_VENDOR_SPEC
+};
+
+enum ecore_ov_driver_state {
+ ECORE_OV_DRIVER_STATE_NOT_LOADED,
+ ECORE_OV_DRIVER_STATE_DISABLED,
+ ECORE_OV_DRIVER_STATE_ACTIVE
+};
+
+#define ECORE_MAX_NPIV_ENTRIES 128
+#define ECORE_WWN_SIZE 8
+struct ecore_fc_npiv_tbl {
+ u32 count;
+ u8 wwpn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE];
+ u8 wwnn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE];
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_led_mode {
+ ECORE_LED_MODE_OFF,
+ ECORE_LED_MODE_ON,
+ ECORE_LED_MODE_RESTORE
+};
+#endif
+
+struct ecore_temperature_sensor {
+ u8 sensor_location;
+ u8 threshold_high;
+ u8 critical;
+ u8 current_temp;
+};
+
+#define ECORE_MAX_NUM_OF_SENSORS 7
+struct ecore_temperature_info {
+ u32 num_sensors;
+ struct ecore_temperature_sensor sensors[ECORE_MAX_NUM_OF_SENSORS];
+};
+
+enum ecore_mba_img_idx {
+ ECORE_MBA_LEGACY_IDX,
+ ECORE_MBA_PCI3CLP_IDX,
+ ECORE_MBA_PCI3_IDX,
+ ECORE_MBA_FCODE_IDX,
+ ECORE_EFI_X86_IDX,
+ ECORE_EFI_IPF_IDX,
+ ECORE_EFI_EBC_IDX,
+ ECORE_EFI_X64_IDX,
+ ECORE_MAX_NUM_OF_ROMIMG
+};
+
+struct ecore_mba_vers {
+ u32 mba_vers[ECORE_MAX_NUM_OF_ROMIMG];
+};
+
+enum ecore_mfw_tlv_type {
+ ECORE_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */
+ ECORE_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */
+ ECORE_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */
+ ECORE_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */
+ ECORE_MFW_TLV_MAX = 0x16,
+};
+
+struct ecore_mfw_tlv_generic {
+ u16 feat_flags;
+ bool feat_flags_set;
+ u64 local_mac;
+ bool local_mac_set;
+ u64 additional_mac1;
+ bool additional_mac1_set;
+ u64 additional_mac2;
+ bool additional_mac2_set;
+ u8 drv_state;
+ bool drv_state_set;
+ u8 pxe_progress;
+ bool pxe_progress_set;
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+struct ecore_mfw_tlv_eth {
+ u16 lso_maxoff_size;
+ bool lso_maxoff_size_set;
+ u16 lso_minseg_size;
+ bool lso_minseg_size_set;
+ u8 prom_mode;
+ bool prom_mode_set;
+ u16 tx_descr_size;
+ bool tx_descr_size_set;
+ u16 rx_descr_size;
+ bool rx_descr_size_set;
+ u16 netq_count;
+ bool netq_count_set;
+ u32 tcp4_offloads;
+ bool tcp4_offloads_set;
+ u32 tcp6_offloads;
+ bool tcp6_offloads_set;
+ u16 tx_descr_qdepth;
+ bool tx_descr_qdepth_set;
+ u16 rx_descr_qdepth;
+ bool rx_descr_qdepth_set;
+ u8 iov_offload;
+ bool iov_offload_set;
+ u8 txqs_empty;
+ bool txqs_empty_set;
+ u8 rxqs_empty;
+ bool rxqs_empty_set;
+ u8 num_txqs_full;
+ bool num_txqs_full_set;
+ u8 num_rxqs_full;
+ bool num_rxqs_full_set;
+};
+
+struct ecore_mfw_tlv_fcoe {
+ u8 scsi_timeout;
+ bool scsi_timeout_set;
+ u32 rt_tov;
+ bool rt_tov_set;
+ u32 ra_tov;
+ bool ra_tov_set;
+ u32 ed_tov;
+ bool ed_tov_set;
+ u32 cr_tov;
+ bool cr_tov_set;
+ u8 boot_type;
+ bool boot_type_set;
+ u8 npiv_state;
+ bool npiv_state_set;
+ u32 num_npiv_ids;
+ bool num_npiv_ids_set;
+ u8 switch_name[8];
+ bool switch_name_set;
+ u16 switch_portnum;
+ bool switch_portnum_set;
+ u8 switch_portid[3];
+ bool switch_portid_set;
+ u8 vendor_name[8];
+ bool vendor_name_set;
+ u8 switch_model[8];
+ bool switch_model_set;
+ u8 switch_fw_version[8];
+ bool switch_fw_version_set;
+ u8 qos_pri;
+ bool qos_pri_set;
+ u8 port_alias[3];
+ bool port_alias_set;
+ u8 port_state;
+ bool port_state_set;
+ u16 fip_tx_descr_size;
+ bool fip_tx_descr_size_set;
+ u16 fip_rx_descr_size;
+ bool fip_rx_descr_size_set;
+ u16 link_failures;
+ bool link_failures_set;
+ u8 fcoe_boot_progress;
+ bool fcoe_boot_progress_set;
+ u64 rx_bcast;
+ bool rx_bcast_set;
+ u64 tx_bcast;
+ bool tx_bcast_set;
+ u16 fcoe_txq_depth;
+ bool fcoe_txq_depth_set;
+ u16 fcoe_rxq_depth;
+ bool fcoe_rxq_depth_set;
+ u64 fcoe_rx_frames;
+ bool fcoe_rx_frames_set;
+ u64 fcoe_rx_bytes;
+ bool fcoe_rx_bytes_set;
+ u64 fcoe_tx_frames;
+ bool fcoe_tx_frames_set;
+ u64 fcoe_tx_bytes;
+ bool fcoe_tx_bytes_set;
+ u16 crc_count;
+ bool crc_count_set;
+ u32 crc_err_src_fcid[5];
+ bool crc_err_src_fcid_set[5];
+ u8 crc_err_tstamp[5][14];
+ bool crc_err_tstamp_set[5];
+ u16 losync_err;
+ bool losync_err_set;
+ u16 losig_err;
+ bool losig_err_set;
+ u16 primtive_err;
+ bool primtive_err_set;
+ u16 disparity_err;
+ bool disparity_err_set;
+ u16 code_violation_err;
+ bool code_violation_err_set;
+ u32 flogi_param[4];
+ bool flogi_param_set[4];
+ u8 flogi_tstamp[14];
+ bool flogi_tstamp_set;
+ u32 flogi_acc_param[4];
+ bool flogi_acc_param_set[4];
+ u8 flogi_acc_tstamp[14];
+ bool flogi_acc_tstamp_set;
+ u32 flogi_rjt;
+ bool flogi_rjt_set;
+ u8 flogi_rjt_tstamp[14];
+ bool flogi_rjt_tstamp_set;
+ u32 fdiscs;
+ bool fdiscs_set;
+ u8 fdisc_acc;
+ bool fdisc_acc_set;
+ u8 fdisc_rjt;
+ bool fdisc_rjt_set;
+ u8 plogi;
+ bool plogi_set;
+ u8 plogi_acc;
+ bool plogi_acc_set;
+ u8 plogi_rjt;
+ bool plogi_rjt_set;
+ u32 plogi_dst_fcid[5];
+ bool plogi_dst_fcid_set[5];
+ u8 plogi_tstamp[5][14];
+ bool plogi_tstamp_set[5];
+ u32 plogi_acc_src_fcid[5];
+ bool plogi_acc_src_fcid_set[5];
+ u8 plogi_acc_tstamp[5][14];
+ bool plogi_acc_tstamp_set[5];
+ u8 tx_plogos;
+ bool tx_plogos_set;
+ u8 plogo_acc;
+ bool plogo_acc_set;
+ u8 plogo_rjt;
+ bool plogo_rjt_set;
+ u32 plogo_src_fcid[5];
+ bool plogo_src_fcid_set[5];
+ u8 plogo_tstamp[5][14];
+ bool plogo_tstamp_set[5];
+ u8 rx_logos;
+ bool rx_logos_set;
+ u8 tx_accs;
+ bool tx_accs_set;
+ u8 tx_prlis;
+ bool tx_prlis_set;
+ u8 rx_accs;
+ bool rx_accs_set;
+ u8 tx_abts;
+ bool tx_abts_set;
+ u8 rx_abts_acc;
+ bool rx_abts_acc_set;
+ u8 rx_abts_rjt;
+ bool rx_abts_rjt_set;
+ u32 abts_dst_fcid[5];
+ bool abts_dst_fcid_set[5];
+ u8 abts_tstamp[5][14];
+ bool abts_tstamp_set[5];
+ u8 rx_rscn;
+ bool rx_rscn_set;
+ u32 rx_rscn_nport[4];
+ bool rx_rscn_nport_set[4];
+ u8 tx_lun_rst;
+ bool tx_lun_rst_set;
+ u8 abort_task_sets;
+ bool abort_task_sets_set;
+ u8 tx_tprlos;
+ bool tx_tprlos_set;
+ u8 tx_nos;
+ bool tx_nos_set;
+ u8 rx_nos;
+ bool rx_nos_set;
+ u8 ols;
+ bool ols_set;
+ u8 lr;
+ bool lr_set;
+ u8 lrr;
+ bool lrr_set;
+ u8 tx_lip;
+ bool tx_lip_set;
+ u8 rx_lip;
+ bool rx_lip_set;
+ u8 eofa;
+ bool eofa_set;
+ u8 eofni;
+ bool eofni_set;
+ u8 scsi_chks;
+ bool scsi_chks_set;
+ u8 scsi_cond_met;
+ bool scsi_cond_met_set;
+ u8 scsi_busy;
+ bool scsi_busy_set;
+ u8 scsi_inter;
+ bool scsi_inter_set;
+ u8 scsi_inter_cond_met;
+ bool scsi_inter_cond_met_set;
+ u8 scsi_rsv_conflicts;
+ bool scsi_rsv_conflicts_set;
+ u8 scsi_tsk_full;
+ bool scsi_tsk_full_set;
+ u8 scsi_aca_active;
+ bool scsi_aca_active_set;
+ u8 scsi_tsk_abort;
+ bool scsi_tsk_abort_set;
+ u32 scsi_rx_chk[5];
+ bool scsi_rx_chk_set[5];
+ u8 scsi_chk_tstamp[5][14];
+ bool scsi_chk_tstamp_set[5];
+};
+
+struct ecore_mfw_tlv_iscsi {
+ u8 target_llmnr;
+ bool target_llmnr_set;
+ u8 header_digest;
+ bool header_digest_set;
+ u8 data_digest;
+ bool data_digest_set;
+ u8 auth_method;
+ bool auth_method_set;
+ u16 boot_taget_portal;
+ bool boot_taget_portal_set;
+ u16 frame_size;
+ bool frame_size_set;
+ u16 tx_desc_size;
+ bool tx_desc_size_set;
+ u16 rx_desc_size;
+ bool rx_desc_size_set;
+ u8 boot_progress;
+ bool boot_progress_set;
+ u16 tx_desc_qdepth;
+ bool tx_desc_qdepth_set;
+ u16 rx_desc_qdepth;
+ bool rx_desc_qdepth_set;
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+union ecore_mfw_tlv_data {
+ struct ecore_mfw_tlv_generic generic;
+ struct ecore_mfw_tlv_eth eth;
+ struct ecore_mfw_tlv_fcoe fcoe;
+ struct ecore_mfw_tlv_iscsi iscsi;
+};
+
+/**
+ * @brief - returns the link params of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link params
+ */
+struct ecore_mcp_link_params *ecore_mcp_get_link_params(struct ecore_hwfn *);
+
+/**
+ * @brief - return the link state of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link state
+ */
+struct ecore_mcp_link_state *ecore_mcp_get_link_state(struct ecore_hwfn *);
+
+/**
+ * @brief - return the link capabilities of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link capabilities
+ */
+struct ecore_mcp_link_capabilities
+*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Request the MFW to set the link according to 'link_input'.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_up - raise link if `true'. Reset link if `false'.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_up);
+
+/**
+ * @brief Get the management firmware version value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mfw_ver - mfw version value
+ * @param p_running_bundle_id - image id in nvram; Optional.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_mfw_ver,
+ u32 *p_running_bundle_id);
+
+/**
+ * @brief Get media type value of the port.
+ *
+ * @param p_dev - ecore dev pointer
+ * @param p_ptt
+ * @param mfw_ver - media type value
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successful.
+ * ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *media_type);
+
+/**
+ * @brief Get transceiver data of the port.
+ *
+ * @param p_dev - ecore dev pointer
+ * @param p_ptt
+ * @param p_transceiver_type - media type value
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successful.
+ * ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_tranceiver_type);
+
+/**
+ * @brief Get transceiver supported speed mask.
+ *
+ * @param p_dev - ecore dev pointer
+ * @param p_ptt
+ * @param p_speed_mask - Bit mask of all supported speeds.
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successful.
+ * ECORE_BUSY - Operation failed
+ */
+
+enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_speed_mask);
+
+/**
+ * @brief Get board configuration.
+ *
+ * @param p_dev - ecore dev pointer
+ * @param p_ptt
+ * @param p_board_config - Board config.
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successful.
+ * ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_board_config);
+
+/**
+ * @brief - Sends a command to the MCP mailbox.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param cmd - command to be sent to the MCP
+ * @param param - Optional param
+ * @param o_mcp_resp - The MCP response code (exclude sequence)
+ * @param o_mcp_param - Optional parameter provided by the MCP response
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - operation was successful
+ * ECORE_BUSY - operation failed
+ */
+enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 cmd, u32 param,
+ u32 *o_mcp_resp, u32 *o_mcp_param);
+
+/**
+ * @brief - drains the nig, allowing completion to pass in case of pauses.
+ * (Should be called only from sleepable context)
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+#ifndef LINUX_REMOVE
+/**
+ * @brief - return the mcp function info of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to mcp function info
+ */
+const struct ecore_mcp_function_info
+*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn);
+#endif
+
+#ifndef LINUX_REMOVE
+/**
+ * @brief - count number of function with a matching personality on engine.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param personalities - a bitmask of ecore_pci_personality values
+ *
+ * @returns the count of all devices on engine whose personality match one of
+ * the bitsmasks.
+ */
+int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 personalities);
+#endif
+
+/**
+ * @brief Get the flash size value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_flash_size - flash size in bytes to be filled.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_flash_size);
+
+/**
+ * @brief Send driver version to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param version - Version value
+ * @param name - Protocol driver name
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_version *p_ver);
+
+/**
+ * @brief Read the MFW process kill counter
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Trigger a recovery process
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief A recovery handler must call this function as its first step.
+ * It is assumed that the handler is not run from an interrupt context.
+ *
+ * @param p_dev
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev);
+
+/**
+ * @brief Notify MFW about the change in base device properties
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param client - ecore client type
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_ov_client client);
+
+/**
+ * @brief Notify MFW about the driver state
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param drv_state - Driver state
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_ov_driver_state drv_state);
+
+/**
+ * @brief Read NPIV settings form the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_table - Array to hold the FC NPIV data. Client need allocate the
+ * required buffer. The field 'count' specifies number of NPIV
+ * entries. A value of 0 means the table was not populated.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_fc_npiv_tbl *p_table);
+
+/**
+ * @brief Send MTU size to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param mtu - MTU size
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 mtu);
+
+/**
+ * @brief Set LED status
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param mode - LED mode
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_led_mode mode);
+
+/**
+ * @brief Set secure mode
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
+ u32 addr);
+
+/**
+ * @brief Write to phy
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ * @param cmd - nvm command
+ * @param p_buf - nvm write buffer
+ * @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Write to nvm
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ * @param cmd - nvm command
+ * @param p_buf - nvm write buffer
+ * @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Put file begin
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
+ u32 addr);
+
+/**
+ * @brief Delete file
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
+ u32 addr);
+
+/**
+ * @brief Check latest response
+ *
+ * @param p_dev
+ * @param p_buf - nvm write buffer
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf);
+
+/**
+ * @brief Read from phy
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ * @param cmd - nvm command
+ * @param p_buf - nvm read buffer
+ * @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Read from nvm
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ * @param p_buf - nvm read buffer
+ * @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
+ u8 *p_buf, u32 len);
+
+/**
+ * @brief - Sends an NVM write command request to the MFW with
+ * payload.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
+ * DRV_MSG_CODE_NVM_PUT_FILE_DATA
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param i_txn_size - Buffer size
+ * @param i_buf - Pointer to the buffer
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 i_txn_size,
+ u32 *i_buf);
+
+/**
+ * @brief - Sends an NVM read command request to the MFW to get
+ * a buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param o_txn_size - Buffer size output
+ * @param o_buf - Pointer to the buffer returned by the MFW.
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 *o_txn_size,
+ u32 *o_buf);
+
+/**
+ * @brief Read from sfp
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param port - transceiver port
+ * @param addr - I2C address
+ * @param offset - offset in sfp
+ * @param len - buffer length
+ * @param p_buf - buffer to read into
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset,
+ u32 len, u8 *p_buf);
+
+/**
+ * @brief Write to sfp
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param port - transceiver port
+ * @param addr - I2C address
+ * @param offset - offset in sfp
+ * @param len - buffer length
+ * @param p_buf - buffer to write from
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset,
+ u32 len, u8 *p_buf);
+
+/**
+ * @brief Gpio read
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param gpio - gpio number
+ * @param gpio_val - value read from gpio
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u32 *gpio_val);
+
+/**
+ * @brief Gpio write
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param gpio - gpio number
+ * @param gpio_val - value to write to gpio
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u16 gpio_val);
+
+/**
+ * @brief Gpio get information
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param gpio - gpio number
+ * @param gpio_direction - gpio is output (0) or input (1)
+ * @param gpio_ctrl - gpio control is uninitialized (0),
+ * path 0 (1), path 1 (2) or shared(3)
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u32 *gpio_direction,
+ u32 *gpio_ctrl);
+
+/**
+ * @brief Bist register test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Bist clock test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Bist nvm test - get number of images
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param num_images - number of images if operation was
+ * successful. 0 if not.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *num_images);
+
+/**
+ * @brief Bist nvm test - get image attributes by index
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_image_att - Attributes of image
+ * @param image_index - Index of image to get information for
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att,
+ u32 image_index);
+
+/**
+ * @brief ecore_mcp_get_temperature_info - get the status of the temperature
+ * sensors
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_temp_status - A pointer to an ecore_temperature_info structure to
+ * be filled with the temperature data
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_temperature_info *p_temp_info);
+
+/**
+ * @brief Get MBA versions - get MBA sub images versions
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_mba_vers - MBA versions array to fill
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_mba_versions(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mba_vers *p_mba_vers);
+
+/**
+ * @brief Count memory ecc events
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param num_events - number of memory ecc events
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 *num_events);
+
+struct ecore_mdump_info {
+ u32 reason;
+ u32 version;
+ u32 config;
+ u32 epoch;
+ u32 num_of_logs;
+ u32 valid_logs;
+};
+
+/**
+ * @brief - Gets the MFW crash dump configuration and logs info.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mdump_info
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t
+ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_info *p_mdump_info);
+
+/**
+ * @brief - Clears the MFW crash dump logs.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Clear the mdump retained data.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Processes the TLV request from MFW i.e., get the required TLV info
+ * from the ecore client and send it to the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+
+/**
+ * @brief - Return whether management firmware support smart AN
+ *
+ * @param p_hwfn
+ *
+ * @return bool - true iff feature is supported.
+ */
+bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_mng_tlv.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_mng_tlv.c
new file mode 100644
index 00000000..f7666472
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_mng_tlv.c
@@ -0,0 +1,1540 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_mcp.h"
+#include "ecore_hw.h"
+#include "reg_addr.h"
+
+#define TLV_TYPE(p) (p[0])
+#define TLV_LENGTH(p) (p[1])
+#define TLV_FLAGS(p) (p[3])
+
+static enum _ecore_status_t
+ecore_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
+{
+ switch (tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ case DRV_TLV_OS_DRIVER_STATES:
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ case DRV_TLV_TX_FRAMES_SENT:
+ case DRV_TLV_TX_BYTES_SENT:
+ *tlv_group |= ECORE_MFW_TLV_GENERIC;
+ break;
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ case DRV_TLV_PROMISCUOUS_MODE:
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_IOV_OFFLOAD:
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ case DRV_TLV_TX_QUEUES_FULL:
+ case DRV_TLV_RX_QUEUES_FULL:
+ *tlv_group |= ECORE_MFW_TLV_ETH;
+ break;
+ case DRV_TLV_SCSI_TO:
+ case DRV_TLV_R_T_TOV:
+ case DRV_TLV_R_A_TOV:
+ case DRV_TLV_E_D_TOV:
+ case DRV_TLV_CR_TOV:
+ case DRV_TLV_BOOT_TYPE:
+ case DRV_TLV_NPIV_STATE:
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ case DRV_TLV_SWITCH_NAME:
+ case DRV_TLV_SWITCH_PORT_NUM:
+ case DRV_TLV_SWITCH_PORT_ID:
+ case DRV_TLV_VENDOR_NAME:
+ case DRV_TLV_SWITCH_MODEL:
+ case DRV_TLV_SWITCH_FW_VER:
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ case DRV_TLV_PORT_ALIAS:
+ case DRV_TLV_PORT_STATE:
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ case DRV_TLV_CRC_ERROR_COUNT:
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_RJT:
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ case DRV_TLV_LOGOS_ISSUED:
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ case DRV_TLV_LOGOS_RECEIVED:
+ case DRV_TLV_ACCS_ISSUED:
+ case DRV_TLV_PRLIS_ISSUED:
+ case DRV_TLV_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_SENT_COUNT:
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ case DRV_TLV_RSCNS_RECEIVED:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ case DRV_TLV_TPRLOS_SENT:
+ case DRV_TLV_NOS_SENT_COUNT:
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ case DRV_TLV_OLS_COUNT:
+ case DRV_TLV_LR_COUNT:
+ case DRV_TLV_LRR_COUNT:
+ case DRV_TLV_LIP_SENT_COUNT:
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ case DRV_TLV_EOFA_COUNT:
+ case DRV_TLV_EOFNI_COUNT:
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ *tlv_group = ECORE_MFW_TLV_FCOE;
+ break;
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ case DRV_TLV_MAX_FRAME_SIZE:
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ *tlv_group |= ECORE_MFW_TLV_ISCSI;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int
+ecore_mfw_get_gen_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_generic *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ if (p_drv_buf->feat_flags_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->feat_flags;
+ return sizeof(p_drv_buf->feat_flags);
+ }
+ break;
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ if (p_drv_buf->local_mac_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->local_mac;
+ return sizeof(p_drv_buf->local_mac);
+ }
+ break;
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ if (p_drv_buf->additional_mac1_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->additional_mac1;
+ return sizeof(p_drv_buf->additional_mac1);
+ }
+ break;
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ if (p_drv_buf->additional_mac2_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->additional_mac2;
+ return sizeof(p_drv_buf->additional_mac2);
+ }
+ break;
+ case DRV_TLV_OS_DRIVER_STATES:
+ if (p_drv_buf->drv_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->drv_state;
+ return sizeof(p_drv_buf->drv_state);
+ }
+ break;
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ if (p_drv_buf->pxe_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->pxe_progress;
+ return sizeof(p_drv_buf->pxe_progress);
+ }
+ break;
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_eth_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_eth *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ if (p_drv_buf->lso_maxoff_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lso_maxoff_size;
+ return sizeof(p_drv_buf->lso_maxoff_size);
+ }
+ break;
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ if (p_drv_buf->lso_minseg_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lso_minseg_size;
+ return sizeof(p_drv_buf->lso_minseg_size);
+ }
+ break;
+ case DRV_TLV_PROMISCUOUS_MODE:
+ if (p_drv_buf->prom_mode_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->prom_mode;
+ return sizeof(p_drv_buf->prom_mode);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_descr_size;
+ return sizeof(p_drv_buf->tx_descr_size);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_descr_size;
+ return sizeof(p_drv_buf->rx_descr_size);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ if (p_drv_buf->netq_count_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->netq_count;
+ return sizeof(p_drv_buf->netq_count);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ if (p_drv_buf->tcp4_offloads_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tcp4_offloads;
+ return sizeof(p_drv_buf->tcp4_offloads);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ if (p_drv_buf->tcp6_offloads_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tcp6_offloads;
+ return sizeof(p_drv_buf->tcp6_offloads);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_descr_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_descr_qdepth;
+ return sizeof(p_drv_buf->tx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_descr_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_descr_qdepth;
+ return sizeof(p_drv_buf->rx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_IOV_OFFLOAD:
+ if (p_drv_buf->iov_offload_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->iov_offload;
+ return sizeof(p_drv_buf->iov_offload);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ if (p_drv_buf->txqs_empty_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->txqs_empty;
+ return sizeof(p_drv_buf->txqs_empty);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ if (p_drv_buf->rxqs_empty_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rxqs_empty;
+ return sizeof(p_drv_buf->rxqs_empty);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_FULL:
+ if (p_drv_buf->num_txqs_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_txqs_full;
+ return sizeof(p_drv_buf->num_txqs_full);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_FULL:
+ if (p_drv_buf->num_rxqs_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_rxqs_full;
+ return sizeof(p_drv_buf->num_rxqs_full);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_fcoe_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_fcoe *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_SCSI_TO:
+ if (p_drv_buf->scsi_timeout_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_timeout;
+ return sizeof(p_drv_buf->scsi_timeout);
+ }
+ break;
+ case DRV_TLV_R_T_TOV:
+ if (p_drv_buf->rt_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rt_tov;
+ return sizeof(p_drv_buf->rt_tov);
+ }
+ break;
+ case DRV_TLV_R_A_TOV:
+ if (p_drv_buf->ra_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ra_tov;
+ return sizeof(p_drv_buf->ra_tov);
+ }
+ break;
+ case DRV_TLV_E_D_TOV:
+ if (p_drv_buf->ed_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ed_tov;
+ return sizeof(p_drv_buf->ed_tov);
+ }
+ break;
+ case DRV_TLV_CR_TOV:
+ if (p_drv_buf->cr_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->cr_tov;
+ return sizeof(p_drv_buf->cr_tov);
+ }
+ break;
+ case DRV_TLV_BOOT_TYPE:
+ if (p_drv_buf->boot_type_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_type;
+ return sizeof(p_drv_buf->boot_type);
+ }
+ break;
+ case DRV_TLV_NPIV_STATE:
+ if (p_drv_buf->npiv_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->npiv_state;
+ return sizeof(p_drv_buf->npiv_state);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ if (p_drv_buf->num_npiv_ids_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_npiv_ids;
+ return sizeof(p_drv_buf->num_npiv_ids);
+ }
+ break;
+ case DRV_TLV_SWITCH_NAME:
+ if (p_drv_buf->switch_name_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_name;
+ return sizeof(p_drv_buf->switch_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_NUM:
+ if (p_drv_buf->switch_portnum_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_portnum;
+ return sizeof(p_drv_buf->switch_portnum);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_ID:
+ if (p_drv_buf->switch_portid_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_portid;
+ return sizeof(p_drv_buf->switch_portid);
+ }
+ break;
+ case DRV_TLV_VENDOR_NAME:
+ if (p_drv_buf->vendor_name_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->vendor_name;
+ return sizeof(p_drv_buf->vendor_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_MODEL:
+ if (p_drv_buf->switch_model_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_model;
+ return sizeof(p_drv_buf->switch_model);
+ }
+ break;
+ case DRV_TLV_SWITCH_FW_VER:
+ if (p_drv_buf->switch_fw_version_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_fw_version;
+ return sizeof(p_drv_buf->switch_fw_version);
+ }
+ break;
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ if (p_drv_buf->qos_pri_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->qos_pri;
+ return sizeof(p_drv_buf->qos_pri);
+ }
+ break;
+ case DRV_TLV_PORT_ALIAS:
+ if (p_drv_buf->port_alias_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->port_alias;
+ return sizeof(p_drv_buf->port_alias);
+ }
+ break;
+ case DRV_TLV_PORT_STATE:
+ if (p_drv_buf->port_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->port_state;
+ return sizeof(p_drv_buf->port_state);
+ }
+ break;
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_tx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fip_tx_descr_size;
+ return sizeof(p_drv_buf->fip_tx_descr_size);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_rx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fip_rx_descr_size;
+ return sizeof(p_drv_buf->fip_rx_descr_size);
+ }
+ break;
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ if (p_drv_buf->link_failures_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->link_failures;
+ return sizeof(p_drv_buf->link_failures);
+ }
+ break;
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ if (p_drv_buf->fcoe_boot_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_boot_progress;
+ return sizeof(p_drv_buf->fcoe_boot_progress);
+ }
+ break;
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ if (p_drv_buf->rx_bcast_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bcast;
+ return sizeof(p_drv_buf->rx_bcast);
+ }
+ break;
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ if (p_drv_buf->tx_bcast_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bcast;
+ return sizeof(p_drv_buf->tx_bcast);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_txq_depth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_txq_depth;
+ return sizeof(p_drv_buf->fcoe_txq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_rxq_depth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rxq_depth;
+ return sizeof(p_drv_buf->fcoe_rxq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rx_frames;
+ return sizeof(p_drv_buf->fcoe_rx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rx_bytes;
+ return sizeof(p_drv_buf->fcoe_rx_bytes);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ if (p_drv_buf->fcoe_tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_tx_frames;
+ return sizeof(p_drv_buf->fcoe_tx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ if (p_drv_buf->fcoe_tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_tx_bytes;
+ return sizeof(p_drv_buf->fcoe_tx_bytes);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_COUNT:
+ if (p_drv_buf->crc_count_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_count;
+ return sizeof(p_drv_buf->crc_count);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[0];
+ return sizeof(p_drv_buf->crc_err_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[1];
+ return sizeof(p_drv_buf->crc_err_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[2];
+ return sizeof(p_drv_buf->crc_err_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[3];
+ return sizeof(p_drv_buf->crc_err_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[4];
+ return sizeof(p_drv_buf->crc_err_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[0];
+ return sizeof(p_drv_buf->crc_err_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[1];
+ return sizeof(p_drv_buf->crc_err_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[2];
+ return sizeof(p_drv_buf->crc_err_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[3];
+ return sizeof(p_drv_buf->crc_err_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[4];
+ return sizeof(p_drv_buf->crc_err_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ if (p_drv_buf->losync_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->losync_err;
+ return sizeof(p_drv_buf->losync_err);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ if (p_drv_buf->losig_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->losig_err;
+ return sizeof(p_drv_buf->losig_err);
+ }
+ break;
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ if (p_drv_buf->primtive_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->primtive_err;
+ return sizeof(p_drv_buf->primtive_err);
+ }
+ break;
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ if (p_drv_buf->disparity_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->disparity_err;
+ return sizeof(p_drv_buf->disparity_err);
+ }
+ break;
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ if (p_drv_buf->code_violation_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->code_violation_err;
+ return sizeof(p_drv_buf->code_violation_err);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ if (p_drv_buf->flogi_param_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[0];
+ return sizeof(p_drv_buf->flogi_param[0]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ if (p_drv_buf->flogi_param_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[1];
+ return sizeof(p_drv_buf->flogi_param[1]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ if (p_drv_buf->flogi_param_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[2];
+ return sizeof(p_drv_buf->flogi_param[2]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ if (p_drv_buf->flogi_param_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[3];
+ return sizeof(p_drv_buf->flogi_param[3]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ if (p_drv_buf->flogi_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_tstamp;
+ return sizeof(p_drv_buf->flogi_tstamp);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ if (p_drv_buf->flogi_acc_param_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[0];
+ return sizeof(p_drv_buf->flogi_acc_param[0]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ if (p_drv_buf->flogi_acc_param_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[1];
+ return sizeof(p_drv_buf->flogi_acc_param[1]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ if (p_drv_buf->flogi_acc_param_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[2];
+ return sizeof(p_drv_buf->flogi_acc_param[2]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ if (p_drv_buf->flogi_acc_param_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[3];
+ return sizeof(p_drv_buf->flogi_acc_param[3]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ if (p_drv_buf->flogi_acc_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_tstamp;
+ return sizeof(p_drv_buf->flogi_acc_tstamp);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT:
+ if (p_drv_buf->flogi_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_rjt;
+ return sizeof(p_drv_buf->flogi_rjt);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ if (p_drv_buf->flogi_rjt_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_rjt_tstamp;
+ return sizeof(p_drv_buf->flogi_rjt_tstamp);
+ }
+ break;
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ if (p_drv_buf->fdiscs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdiscs;
+ return sizeof(p_drv_buf->fdiscs);
+ }
+ break;
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ if (p_drv_buf->fdisc_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdisc_acc;
+ return sizeof(p_drv_buf->fdisc_acc);
+ }
+ break;
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ if (p_drv_buf->fdisc_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdisc_rjt;
+ return sizeof(p_drv_buf->fdisc_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ if (p_drv_buf->plogi_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi;
+ return sizeof(p_drv_buf->plogi);
+ }
+ break;
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ if (p_drv_buf->plogi_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc;
+ return sizeof(p_drv_buf->plogi_acc);
+ }
+ break;
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ if (p_drv_buf->plogi_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_rjt;
+ return sizeof(p_drv_buf->plogi_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[0];
+ return sizeof(p_drv_buf->plogi_dst_fcid[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[1];
+ return sizeof(p_drv_buf->plogi_dst_fcid[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[2];
+ return sizeof(p_drv_buf->plogi_dst_fcid[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[3];
+ return sizeof(p_drv_buf->plogi_dst_fcid[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[4];
+ return sizeof(p_drv_buf->plogi_dst_fcid[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[0];
+ return sizeof(p_drv_buf->plogi_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[1];
+ return sizeof(p_drv_buf->plogi_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[2];
+ return sizeof(p_drv_buf->plogi_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[3];
+ return sizeof(p_drv_buf->plogi_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[4];
+ return sizeof(p_drv_buf->plogi_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[0];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[1];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[2];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[3];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[4];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[0];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[1];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[2];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[3];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[4];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOGOS_ISSUED:
+ if (p_drv_buf->tx_plogos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_plogos;
+ return sizeof(p_drv_buf->tx_plogos);
+ }
+ break;
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ if (p_drv_buf->plogo_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_acc;
+ return sizeof(p_drv_buf->plogo_acc);
+ }
+ break;
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ if (p_drv_buf->plogo_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_rjt;
+ return sizeof(p_drv_buf->plogo_rjt);
+ }
+ break;
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[0];
+ return sizeof(p_drv_buf->plogo_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[1];
+ return sizeof(p_drv_buf->plogo_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[2];
+ return sizeof(p_drv_buf->plogo_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[3];
+ return sizeof(p_drv_buf->plogo_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[4];
+ return sizeof(p_drv_buf->plogo_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[0];
+ return sizeof(p_drv_buf->plogo_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[1];
+ return sizeof(p_drv_buf->plogo_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[2];
+ return sizeof(p_drv_buf->plogo_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[3];
+ return sizeof(p_drv_buf->plogo_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[4];
+ return sizeof(p_drv_buf->plogo_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOGOS_RECEIVED:
+ if (p_drv_buf->rx_logos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_logos;
+ return sizeof(p_drv_buf->rx_logos);
+ }
+ break;
+ case DRV_TLV_ACCS_ISSUED:
+ if (p_drv_buf->tx_accs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_accs;
+ return sizeof(p_drv_buf->tx_accs);
+ }
+ break;
+ case DRV_TLV_PRLIS_ISSUED:
+ if (p_drv_buf->tx_prlis_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_prlis;
+ return sizeof(p_drv_buf->tx_prlis);
+ }
+ break;
+ case DRV_TLV_ACCS_RECEIVED:
+ if (p_drv_buf->rx_accs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_accs;
+ return sizeof(p_drv_buf->rx_accs);
+ }
+ break;
+ case DRV_TLV_ABTS_SENT_COUNT:
+ if (p_drv_buf->tx_abts_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_abts;
+ return sizeof(p_drv_buf->tx_abts);
+ }
+ break;
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ if (p_drv_buf->rx_abts_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_abts_acc;
+ return sizeof(p_drv_buf->rx_abts_acc);
+ }
+ break;
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ if (p_drv_buf->rx_abts_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_abts_rjt;
+ return sizeof(p_drv_buf->rx_abts_rjt);
+ }
+ break;
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[0];
+ return sizeof(p_drv_buf->abts_dst_fcid[0]);
+ }
+ break;
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[1];
+ return sizeof(p_drv_buf->abts_dst_fcid[1]);
+ }
+ break;
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[2];
+ return sizeof(p_drv_buf->abts_dst_fcid[2]);
+ }
+ break;
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[3];
+ return sizeof(p_drv_buf->abts_dst_fcid[3]);
+ }
+ break;
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[4];
+ return sizeof(p_drv_buf->abts_dst_fcid[4]);
+ }
+ break;
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[0];
+ return sizeof(p_drv_buf->abts_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[1];
+ return sizeof(p_drv_buf->abts_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[2];
+ return sizeof(p_drv_buf->abts_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[3];
+ return sizeof(p_drv_buf->abts_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[4];
+ return sizeof(p_drv_buf->abts_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_RSCNS_RECEIVED:
+ if (p_drv_buf->rx_rscn_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn;
+ return sizeof(p_drv_buf->rx_rscn);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ if (p_drv_buf->rx_rscn_nport_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[0];
+ return sizeof(p_drv_buf->rx_rscn_nport[0]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ if (p_drv_buf->rx_rscn_nport_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[1];
+ return sizeof(p_drv_buf->rx_rscn_nport[1]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ if (p_drv_buf->rx_rscn_nport_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[2];
+ return sizeof(p_drv_buf->rx_rscn_nport[2]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ if (p_drv_buf->rx_rscn_nport_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[3];
+ return sizeof(p_drv_buf->rx_rscn_nport[3]);
+ }
+ break;
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ if (p_drv_buf->tx_lun_rst_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_lun_rst;
+ return sizeof(p_drv_buf->tx_lun_rst);
+ }
+ break;
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ if (p_drv_buf->abort_task_sets_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abort_task_sets;
+ return sizeof(p_drv_buf->abort_task_sets);
+ }
+ break;
+ case DRV_TLV_TPRLOS_SENT:
+ if (p_drv_buf->tx_tprlos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_tprlos;
+ return sizeof(p_drv_buf->tx_tprlos);
+ }
+ break;
+ case DRV_TLV_NOS_SENT_COUNT:
+ if (p_drv_buf->tx_nos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_nos;
+ return sizeof(p_drv_buf->tx_nos);
+ }
+ break;
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ if (p_drv_buf->rx_nos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_nos;
+ return sizeof(p_drv_buf->rx_nos);
+ }
+ break;
+ case DRV_TLV_OLS_COUNT:
+ if (p_drv_buf->ols_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ols;
+ return sizeof(p_drv_buf->ols);
+ }
+ break;
+ case DRV_TLV_LR_COUNT:
+ if (p_drv_buf->lr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lr;
+ return sizeof(p_drv_buf->lr);
+ }
+ break;
+ case DRV_TLV_LRR_COUNT:
+ if (p_drv_buf->lrr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lrr;
+ return sizeof(p_drv_buf->lrr);
+ }
+ break;
+ case DRV_TLV_LIP_SENT_COUNT:
+ if (p_drv_buf->tx_lip_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_lip;
+ return sizeof(p_drv_buf->tx_lip);
+ }
+ break;
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ if (p_drv_buf->rx_lip_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_lip;
+ return sizeof(p_drv_buf->rx_lip);
+ }
+ break;
+ case DRV_TLV_EOFA_COUNT:
+ if (p_drv_buf->eofa_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->eofa;
+ return sizeof(p_drv_buf->eofa);
+ }
+ break;
+ case DRV_TLV_EOFNI_COUNT:
+ if (p_drv_buf->eofni_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->eofni;
+ return sizeof(p_drv_buf->eofni);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ if (p_drv_buf->scsi_chks_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chks;
+ return sizeof(p_drv_buf->scsi_chks);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_cond_met_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_cond_met;
+ return sizeof(p_drv_buf->scsi_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ if (p_drv_buf->scsi_busy_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_busy;
+ return sizeof(p_drv_buf->scsi_busy);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ if (p_drv_buf->scsi_inter_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_inter;
+ return sizeof(p_drv_buf->scsi_inter);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_inter_cond_met_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_inter_cond_met;
+ return sizeof(p_drv_buf->scsi_inter_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ if (p_drv_buf->scsi_rsv_conflicts_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rsv_conflicts;
+ return sizeof(p_drv_buf->scsi_rsv_conflicts);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ if (p_drv_buf->scsi_tsk_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_tsk_full;
+ return sizeof(p_drv_buf->scsi_tsk_full);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ if (p_drv_buf->scsi_aca_active_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_aca_active;
+ return sizeof(p_drv_buf->scsi_aca_active);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ if (p_drv_buf->scsi_tsk_abort_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_tsk_abort;
+ return sizeof(p_drv_buf->scsi_tsk_abort);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[0];
+ return sizeof(p_drv_buf->scsi_rx_chk[0]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[1];
+ return sizeof(p_drv_buf->scsi_rx_chk[1]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[2];
+ return sizeof(p_drv_buf->scsi_rx_chk[2]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[3];
+ return sizeof(p_drv_buf->scsi_rx_chk[4]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[4];
+ return sizeof(p_drv_buf->scsi_rx_chk[4]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[0];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[1];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[2];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[3];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[4];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[4]);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_iscsi_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_iscsi *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ if (p_drv_buf->target_llmnr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->target_llmnr;
+ return sizeof(p_drv_buf->target_llmnr);
+ }
+ break;
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->header_digest_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->header_digest;
+ return sizeof(p_drv_buf->header_digest);
+ }
+ break;
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->data_digest_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->data_digest;
+ return sizeof(p_drv_buf->data_digest);
+ }
+ break;
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ if (p_drv_buf->auth_method_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->auth_method;
+ return sizeof(p_drv_buf->auth_method);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ if (p_drv_buf->boot_taget_portal_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_taget_portal;
+ return sizeof(p_drv_buf->boot_taget_portal);
+ }
+ break;
+ case DRV_TLV_MAX_FRAME_SIZE:
+ if (p_drv_buf->frame_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->frame_size;
+ return sizeof(p_drv_buf->frame_size);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_desc_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_desc_size;
+ return sizeof(p_drv_buf->tx_desc_size);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_desc_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_desc_size;
+ return sizeof(p_drv_buf->rx_desc_size);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ if (p_drv_buf->boot_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_progress;
+ return sizeof(p_drv_buf->boot_progress);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_desc_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_desc_qdepth;
+ return sizeof(p_drv_buf->tx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_desc_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_desc_qdepth;
+ return sizeof(p_drv_buf->rx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static enum _ecore_status_t ecore_mfw_update_tlvs(struct ecore_hwfn *p_hwfn,
+ u8 tlv_group, u8 *p_mfw_buf,
+ u32 size)
+{
+ union ecore_mfw_tlv_data *p_tlv_data;
+ struct ecore_drv_tlv_hdr tlv;
+ u8 *p_tlv_ptr = OSAL_NULL, *p_temp;
+ u32 offset;
+ int len;
+
+ p_tlv_data = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_tlv_data));
+ if (!p_tlv_data)
+ return ECORE_NOMEM;
+
+ if (OSAL_MFW_FILL_TLV_DATA(p_hwfn, tlv_group, p_tlv_data)) {
+ OSAL_VFREE(p_hwfn->p_dev, p_tlv_data);
+ return ECORE_INVAL;
+ }
+
+ offset = 0;
+ OSAL_MEMSET(&tlv, 0, sizeof(tlv));
+ while (offset < size) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ tlv.tlv_flags = TLV_FLAGS(p_temp);
+ DP_INFO(p_hwfn, "Type %d length = %d flags = 0x%x\n",
+ tlv.tlv_type, tlv.tlv_length, tlv.tlv_flags);
+
+ offset += sizeof(tlv);
+ if (tlv_group == ECORE_MFW_TLV_GENERIC)
+ len = ecore_mfw_get_gen_tlv_value(&tlv,
+ &p_tlv_data->generic, &p_tlv_ptr);
+ else if (tlv_group == ECORE_MFW_TLV_ETH)
+ len = ecore_mfw_get_eth_tlv_value(&tlv,
+ &p_tlv_data->eth, &p_tlv_ptr);
+ else if (tlv_group == ECORE_MFW_TLV_FCOE)
+ len = ecore_mfw_get_fcoe_tlv_value(&tlv,
+ &p_tlv_data->fcoe, &p_tlv_ptr);
+ else
+ len = ecore_mfw_get_iscsi_tlv_value(&tlv,
+ &p_tlv_data->iscsi, &p_tlv_ptr);
+
+ if (len > 0) {
+ OSAL_WARN(len > 4 * tlv.tlv_length,
+ "Incorrect MFW TLV length");
+ len = OSAL_MIN_T(int, len, 4 * tlv.tlv_length);
+ tlv.tlv_flags |= ECORE_DRV_TLV_FLAGS_CHANGED;
+ /* TODO: Endianness handling? */
+ OSAL_MEMCPY(p_mfw_buf, &tlv, sizeof(tlv));
+ OSAL_MEMCPY(p_mfw_buf + offset, p_tlv_ptr, len);
+ }
+
+ offset += sizeof(u32) * tlv.tlv_length;
+ }
+
+ OSAL_VFREE(p_hwfn->p_dev, p_tlv_data);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u32 addr, size, offset, resp, param, val;
+ u8 tlv_group = 0, id, *p_mfw_buf = OSAL_NULL, *p_temp;
+ u32 global_offsize, global_addr;
+ enum _ecore_status_t rc;
+ struct ecore_drv_tlv_hdr tlv;
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + OFFSETOF(struct public_global, data_ptr);
+ size = ecore_rd(p_hwfn, p_ptt, global_addr +
+ OFFSETOF(struct public_global, data_size));
+
+ if (!size) {
+ DP_NOTICE(p_hwfn, false, "Invalid TLV req size = %d\n", size);
+ goto drv_done;
+ }
+
+ p_mfw_buf = (void *)OSAL_VZALLOC(p_hwfn->p_dev, size);
+ if (!p_mfw_buf) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed allocate memory for p_mfw_buf\n");
+ goto drv_done;
+ }
+
+ /* Read the TLV request to local buffer */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = ecore_rd(p_hwfn, p_ptt, addr + offset);
+ OSAL_MEMCPY(&p_mfw_buf[offset], &val, sizeof(u32));
+ }
+
+ /* Parse the headers to enumerate the requested TLV groups */
+ for (offset = 0; offset < size;
+ offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ if (ecore_mfw_get_tlv_group(tlv.tlv_type, &tlv_group))
+ goto drv_done;
+ }
+
+ /* Update the TLV values in the local buffer */
+ for (id = ECORE_MFW_TLV_GENERIC; id < ECORE_MFW_TLV_MAX; id <<= 1) {
+ if (tlv_group & id) {
+ if (ecore_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size))
+ goto drv_done;
+ }
+ }
+
+ /* Write the TLV data to shared memory */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = (u32)p_mfw_buf[offset];
+ ecore_wr(p_hwfn, p_ptt, addr + offset, val);
+ offset += sizeof(u32);
+ }
+
+drv_done:
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp,
+ &param);
+
+ OSAL_VFREE(p_hwfn->p_dev, p_mfw_buf);
+
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_proto_if.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_proto_if.h
new file mode 100644
index 00000000..f91b25e2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_proto_if.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_PROTO_IF_H__
+#define __ECORE_PROTO_IF_H__
+
+/*
+ * PF parameters (according to personality/protocol)
+ */
+
+#define ECORE_ROCE_PROTOCOL_INDEX (3)
+
+struct ecore_eth_pf_params {
+ /* The following parameters are used during HW-init
+ * and these parameters need to be passed as arguments
+ * to update_pf_params routine invoked before slowpath start
+ */
+ u16 num_cons;
+
+ /* per-VF number of CIDs */
+ u8 num_vf_cons;
+#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32)
+
+ /* To enable arfs, previous to HW-init a positive number needs to be
+ * set [as filters require allocated searcher ILT memory].
+ * This will set the maximal number of configured steering-filters.
+ */
+ u32 num_arfs_filters;
+
+ /* To allow VF to change its MAC despite of PF set forced MAC. */
+ bool allow_vf_mac_change;
+};
+
+/* Most of the parameters below are described in the FW iSCSI / TCP HSI */
+struct ecore_iscsi_pf_params {
+ u64 glbl_q_params_addr;
+ u64 bdq_pbl_base_addr[2];
+ u16 cq_num_entries;
+ u16 cmdq_num_entries;
+ u32 two_msl_timer;
+ u16 tx_sws_timer;
+ /* The following parameters are used during HW-init
+ * and these parameters need to be passed as arguments
+ * to update_pf_params routine invoked before slowpath start
+ */
+ u16 num_cons;
+ u16 num_tasks;
+
+ /* The following parameters are used during protocol-init */
+ u16 half_way_close_timeout;
+ u16 bdq_xoff_threshold[2];
+ u16 bdq_xon_threshold[2];
+ u16 cmdq_xoff_threshold;
+ u16 cmdq_xon_threshold;
+ u16 rq_buffer_size;
+
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 num_queues;
+ u8 log_page_size;
+ u8 rqe_log_size;
+ u8 max_fin_rt;
+ u8 gl_rq_pi;
+ u8 gl_cmd_pi;
+ u8 debug_mode;
+ u8 ll2_ooo_queue_id;
+ u8 ooo_enable;
+
+ u8 is_target;
+ u8 bdq_pbl_num_entries[2];
+ u8 disable_stats_collection;
+};
+
+enum ecore_rdma_protocol {
+ ECORE_RDMA_PROTOCOL_DEFAULT,
+ ECORE_RDMA_PROTOCOL_ROCE,
+ ECORE_RDMA_PROTOCOL_IWARP,
+};
+
+struct ecore_rdma_pf_params {
+ /* Supplied to ECORE during resource allocation (may affect the ILT and
+ * the doorbell BAR).
+ */
+ u32 min_dpis; /* number of requested DPIs */
+ u32 num_mrs; /* number of requested memory regions*/
+ u32 num_qps; /* number of requested Queue Pairs */
+ u32 num_srqs; /* number of requested SRQ */
+ u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
+ u8 gl_pi; /* protocol index */
+
+ /* Will allocate rate limiters to be used with QPs */
+ u8 enable_dcqcn;
+
+ /* TCP port number used for the iwarp traffic */
+ u16 iwarp_port;
+ enum ecore_rdma_protocol rdma_protocol;
+};
+
+struct ecore_pf_params {
+ struct ecore_eth_pf_params eth_pf_params;
+ struct ecore_iscsi_pf_params iscsi_pf_params;
+ struct ecore_rdma_pf_params rdma_pf_params;
+};
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_rt_defs.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_rt_defs.h
new file mode 100644
index 00000000..721b8c15
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_rt_defs.h
@@ -0,0 +1,538 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __RT_DEFS_H__
+#define __RT_DEFS_H__
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18
+#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21
+#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22
+#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23
+#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24
+#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25
+#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26
+#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27
+#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28
+#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2093
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6525
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6527
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6529
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561
+#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562
+#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563
+#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564
+#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211
+#define QM_REG_PTRTBLOTHER_RT_SIZE 256
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493
+#define QM_REG_PQTX2PF_0_RT_OFFSET 34494
+#define QM_REG_PQTX2PF_1_RT_OFFSET 34495
+#define QM_REG_PQTX2PF_2_RT_OFFSET 34496
+#define QM_REG_PQTX2PF_3_RT_OFFSET 34497
+#define QM_REG_PQTX2PF_4_RT_OFFSET 34498
+#define QM_REG_PQTX2PF_5_RT_OFFSET 34499
+#define QM_REG_PQTX2PF_6_RT_OFFSET 34500
+#define QM_REG_PQTX2PF_7_RT_OFFSET 34501
+#define QM_REG_PQTX2PF_8_RT_OFFSET 34502
+#define QM_REG_PQTX2PF_9_RT_OFFSET 34503
+#define QM_REG_PQTX2PF_10_RT_OFFSET 34504
+#define QM_REG_PQTX2PF_11_RT_OFFSET 34505
+#define QM_REG_PQTX2PF_12_RT_OFFSET 34506
+#define QM_REG_PQTX2PF_13_RT_OFFSET 34507
+#define QM_REG_PQTX2PF_14_RT_OFFSET 34508
+#define QM_REG_PQTX2PF_15_RT_OFFSET 34509
+#define QM_REG_PQTX2PF_16_RT_OFFSET 34510
+#define QM_REG_PQTX2PF_17_RT_OFFSET 34511
+#define QM_REG_PQTX2PF_18_RT_OFFSET 34512
+#define QM_REG_PQTX2PF_19_RT_OFFSET 34513
+#define QM_REG_PQTX2PF_20_RT_OFFSET 34514
+#define QM_REG_PQTX2PF_21_RT_OFFSET 34515
+#define QM_REG_PQTX2PF_22_RT_OFFSET 34516
+#define QM_REG_PQTX2PF_23_RT_OFFSET 34517
+#define QM_REG_PQTX2PF_24_RT_OFFSET 34518
+#define QM_REG_PQTX2PF_25_RT_OFFSET 34519
+#define QM_REG_PQTX2PF_26_RT_OFFSET 34520
+#define QM_REG_PQTX2PF_27_RT_OFFSET 34521
+#define QM_REG_PQTX2PF_28_RT_OFFSET 34522
+#define QM_REG_PQTX2PF_29_RT_OFFSET 34523
+#define QM_REG_PQTX2PF_30_RT_OFFSET 34524
+#define QM_REG_PQTX2PF_31_RT_OFFSET 34525
+#define QM_REG_PQTX2PF_32_RT_OFFSET 34526
+#define QM_REG_PQTX2PF_33_RT_OFFSET 34527
+#define QM_REG_PQTX2PF_34_RT_OFFSET 34528
+#define QM_REG_PQTX2PF_35_RT_OFFSET 34529
+#define QM_REG_PQTX2PF_36_RT_OFFSET 34530
+#define QM_REG_PQTX2PF_37_RT_OFFSET 34531
+#define QM_REG_PQTX2PF_38_RT_OFFSET 34532
+#define QM_REG_PQTX2PF_39_RT_OFFSET 34533
+#define QM_REG_PQTX2PF_40_RT_OFFSET 34534
+#define QM_REG_PQTX2PF_41_RT_OFFSET 34535
+#define QM_REG_PQTX2PF_42_RT_OFFSET 34536
+#define QM_REG_PQTX2PF_43_RT_OFFSET 34537
+#define QM_REG_PQTX2PF_44_RT_OFFSET 34538
+#define QM_REG_PQTX2PF_45_RT_OFFSET 34539
+#define QM_REG_PQTX2PF_46_RT_OFFSET 34540
+#define QM_REG_PQTX2PF_47_RT_OFFSET 34541
+#define QM_REG_PQTX2PF_48_RT_OFFSET 34542
+#define QM_REG_PQTX2PF_49_RT_OFFSET 34543
+#define QM_REG_PQTX2PF_50_RT_OFFSET 34544
+#define QM_REG_PQTX2PF_51_RT_OFFSET 34545
+#define QM_REG_PQTX2PF_52_RT_OFFSET 34546
+#define QM_REG_PQTX2PF_53_RT_OFFSET 34547
+#define QM_REG_PQTX2PF_54_RT_OFFSET 34548
+#define QM_REG_PQTX2PF_55_RT_OFFSET 34549
+#define QM_REG_PQTX2PF_56_RT_OFFSET 34550
+#define QM_REG_PQTX2PF_57_RT_OFFSET 34551
+#define QM_REG_PQTX2PF_58_RT_OFFSET 34552
+#define QM_REG_PQTX2PF_59_RT_OFFSET 34553
+#define QM_REG_PQTX2PF_60_RT_OFFSET 34554
+#define QM_REG_PQTX2PF_61_RT_OFFSET 34555
+#define QM_REG_PQTX2PF_62_RT_OFFSET 34556
+#define QM_REG_PQTX2PF_63_RT_OFFSET 34557
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 35098
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354
+#define QM_REG_RLPFPERIOD_RT_OFFSET 35355
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356
+#define QM_REG_RLPFINCVAL_RT_OFFSET 35357
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 35389
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 35405
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 35439
+#define QM_REG_WFQPFCRD_RT_SIZE 256
+#define QM_REG_WFQPFENABLE_RT_OFFSET 35695
+#define QM_REG_WFQVPENABLE_RT_OFFSET 35696
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 36209
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 37233
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 37745
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_PTRTBLTX_RT_OFFSET 38257
+#define QM_REG_PTRTBLTX_RT_SIZE 1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
+#define QM_REG_VOQCRDLINE_RT_OFFSET 39601
+#define QM_REG_VOQCRDLINE_RT_SIZE 36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022
+
+#define RUNTIME_ARRAY_SIZE 43023
+
+/* Init Callbacks */
+#define DMAE_READY_CB 0
+
+#endif /* __RT_DEFS_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_api.h
new file mode 100644
index 00000000..4633dbeb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_api.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_SP_API_H__
+#define __ECORE_SP_API_H__
+
+#include "ecore_status.h"
+
+enum spq_mode {
+ ECORE_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
+ ECORE_SPQ_MODE_CB, /* Client supplies a callback */
+ ECORE_SPQ_MODE_EBLOCK, /* ECORE should block until completion */
+};
+
+struct ecore_hwfn;
+union event_ring_data;
+struct eth_slow_path_rx_cqe;
+
+struct ecore_spq_comp_cb {
+ void (*function)(struct ecore_hwfn *,
+ void *,
+ union event_ring_data *,
+ u8 fw_return_code);
+ void *cookie;
+};
+
+
+/**
+ * @brief ecore_eth_cqe_completion - handles the completion of a
+ * ramrod on the cqe ring
+ *
+ * @param p_hwfn
+ * @param cqe
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe);
+/**
+ * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
+ * update Ramrod
+ *
+ * This ramrod is sent to update a tunneling configuration
+ * for a physical function (PF).
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_tunn - pf update tunneling parameters
+ * @param comp_mode - completion mode
+ * @param p_comp_data - callback function
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_tunnel_info *p_tunn,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.c
new file mode 100644
index 00000000..b43baf9d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.c
@@ -0,0 +1,660 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bcm_osal.h"
+
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_chain.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_sp_commands.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_hw.h"
+#include "ecore_dcbx.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
+
+enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry **pp_ent,
+ u8 cmd,
+ u8 protocol,
+ struct ecore_sp_init_data *p_data)
+{
+ u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc;
+
+ if (!pp_ent)
+ return ECORE_INVAL;
+
+ /* Get an SPQ entry */
+ rc = ecore_spq_get_entry(p_hwfn, pp_ent);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Fill the SPQ entry */
+ p_ent = *pp_ent;
+ p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
+ p_ent->elem.hdr.cmd_id = cmd;
+ p_ent->elem.hdr.protocol_id = protocol;
+ p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
+ p_ent->comp_mode = p_data->comp_mode;
+ p_ent->comp_done.done = 0;
+
+ switch (p_ent->comp_mode) {
+ case ECORE_SPQ_MODE_EBLOCK:
+ p_ent->comp_cb.cookie = &p_ent->comp_done;
+ break;
+
+ case ECORE_SPQ_MODE_BLOCK:
+ if (!p_data->p_comp_data)
+ return ECORE_INVAL;
+
+ p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
+ break;
+
+ case ECORE_SPQ_MODE_CB:
+ if (!p_data->p_comp_data)
+ p_ent->comp_cb.function = OSAL_NULL;
+ else
+ p_ent->comp_cb = *p_data->p_comp_data;
+ break;
+
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
+ opaque_cid, cmd, protocol,
+ (unsigned long)&p_ent->ramrod,
+ D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
+ ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+
+ OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
+
+ return ECORE_SUCCESS;
+}
+
+static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
+{
+ switch (type) {
+ case ECORE_TUNN_CLSS_MAC_VLAN:
+ return TUNNEL_CLSS_MAC_VLAN;
+ case ECORE_TUNN_CLSS_MAC_VNI:
+ return TUNNEL_CLSS_MAC_VNI;
+ case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
+ return TUNNEL_CLSS_INNER_MAC_VLAN;
+ case ECORE_TUNN_CLSS_INNER_MAC_VNI:
+ return TUNNEL_CLSS_INNER_MAC_VNI;
+ case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
+ return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
+ default:
+ return TUNNEL_CLSS_MAC_VLAN;
+ }
+}
+
+static void
+ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src,
+ bool b_pf_start)
+{
+ if (p_src->vxlan.b_update_mode || b_pf_start)
+ p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
+
+ if (p_src->l2_gre.b_update_mode || b_pf_start)
+ p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
+
+ if (p_src->ip_gre.b_update_mode || b_pf_start)
+ p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
+
+ if (p_src->l2_geneve.b_update_mode || b_pf_start)
+ p_tun->l2_geneve.b_mode_enabled =
+ p_src->l2_geneve.b_mode_enabled;
+
+ if (p_src->ip_geneve.b_update_mode || b_pf_start)
+ p_tun->ip_geneve.b_mode_enabled =
+ p_src->ip_geneve.b_mode_enabled;
+}
+
+static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src)
+{
+ enum tunnel_clss type;
+
+ p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+ p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+ /* @DPDK - typecast tunnul class */
+ type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+ p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+ p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+ p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+ p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+ p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
+}
+
+static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src)
+{
+ p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+ p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
+
+ if (p_src->geneve_port.b_update_port)
+ p_tun->geneve_port.port = p_src->geneve_port.port;
+
+ if (p_src->vxlan_port.b_update_port)
+ p_tun->vxlan_port.port = p_src->vxlan_port.port;
+}
+
+static void
+__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
+ struct ecore_tunn_update_type *tun_type)
+{
+ *p_tunn_cls = tun_type->tun_cls;
+}
+
+static void
+ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
+ struct ecore_tunn_update_type *tun_type,
+ u8 *p_update_port, __le16 *p_port,
+ struct ecore_tunn_update_udp_port *p_udp_port)
+{
+ __ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
+ if (p_udp_port->b_update_port) {
+ *p_update_port = 1;
+ *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
+ }
+}
+
+static void
+ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+
+ ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
+ ecore_set_tunn_cls_info(p_tun, p_src);
+ ecore_set_tunn_ports(p_tun, p_src);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tun->l2_gre);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tun->ip_gre);
+
+ p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
+}
+
+static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_tunnel_info *p_tun)
+{
+ ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+ ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
+
+ ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled);
+}
+
+static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_tunnel_info *p_tunn)
+{
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel hw config is not supported\n");
+ return;
+ }
+
+ if (p_tunn->vxlan_port.b_update_port)
+ ecore_set_vxlan_dest_port(p_hwfn, p_ptt,
+ p_tunn->vxlan_port.port);
+
+ if (p_tunn->geneve_port.b_update_port)
+ ecore_set_geneve_dest_port(p_hwfn, p_ptt,
+ p_tunn->geneve_port.port);
+
+ ecore_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
+}
+
+static void
+ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_src,
+ struct pf_start_tunnel_config *p_tunn_cfg)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel pf start config is not supported\n");
+ return;
+ }
+
+ if (!p_src)
+ return;
+
+ ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
+ ecore_set_tunn_cls_info(p_tun, p_src);
+ ecore_set_tunn_ports(p_tun, p_src);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tun->l2_gre);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tun->ip_gre);
+}
+
+#define ETH_P_8021Q 0x8100
+#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
+
+enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_tunnel_info *p_tunn,
+ bool allow_npar_tx_switch)
+{
+ struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
+ u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
+ u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ u8 page_cnt;
+ u8 i;
+
+ /* update initial eq producer */
+ ecore_eq_prod_update(p_hwfn,
+ ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+ /* Initialize the SPQ entry for the ramrod */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Fill the ramrod data */
+ p_ramrod = &p_ent->ramrod.pf_start;
+ p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
+ p_ramrod->event_ring_sb_index = sb_index;
+ p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
+
+ /* For easier debugging */
+ p_ramrod->dont_log_ramrods = 0;
+ p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
+
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
+ p_ramrod->mf_mode = MF_OVLAN;
+ else
+ p_ramrod->mf_mode = MF_NPAR;
+
+ p_ramrod->outer_tag_config.outer_tag.tci =
+ OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
+ if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits)) {
+ p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
+ } else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
+ &p_hwfn->p_dev->mf_bits)) {
+ p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+ }
+
+ p_ramrod->outer_tag_config.pri_map_valid = 1;
+ for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
+ p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
+
+ /* enable_stag_pri_change should be set if port is in BD mode or,
+ * UFP with Host Control mode or, UFP with DCB over base interface.
+ */
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
+ if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
+ (p_hwfn->p_dcbx_info->results.dcbx_enabled))
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+ else
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
+ }
+
+ /* Place EQ address in RAMROD */
+ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
+ p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
+ page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
+ p_ramrod->event_ring_num_pages = page_cnt;
+ DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
+ p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
+
+ ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
+ &p_ramrod->tunnel_config);
+
+ if (OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH,
+ &p_hwfn->p_dev->mf_bits))
+ p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
+ p_hwfn->hw_info.personality);
+ p_ramrod->personality = PERSONALITY_ETH;
+ }
+
+ if (p_hwfn->p_dev->p_iov_info) {
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+
+ p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
+ p_ramrod->num_vfs = (u8)p_iov->total_vfs;
+ }
+ /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
+ * version is available.
+ */
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Setting event_ring_sb [id %04x index %02x], outer_tag.tpid [%d], outer_tag.tci [%d]\n",
+ sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid,
+ p_ramrod->outer_tag_config.outer_tag.tci);
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+
+ if (p_tunn)
+ ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt,
+ &p_hwfn->p_dev->tunnel);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
+ &p_ent->ramrod.pf_update);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
+ if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
+ (p_hwfn->p_dcbx_info->results.dcbx_enabled))
+ p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
+ else
+ p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+
+/* QM rate limiter resolution is 1.6Mbps */
+#define QM_RL_RESOLUTION(mb_val) ((mb_val) * 10 / 16)
+
+/* FW uses 1/64k to express gd */
+#define FW_GD_RESOLUTION(gd) (64 * 1024 / (gd))
+
+u16 ecore_sp_rl_mb_to_qm(u32 mb_val)
+{
+ return (u16)OSAL_MIN_T(u32, (u16)(~0U), QM_RL_RESOLUTION(mb_val));
+}
+
+u16 ecore_sp_rl_gd_denom(u32 gd)
+{
+ return gd ? (u16)OSAL_MIN_T(u32, (u16)(~0U), FW_GD_RESOLUTION(gd)) : 0;
+}
+
+enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_rl_update_params *params)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct rl_update_ramrod_data *rl_update;
+ struct ecore_sp_init_data init_data;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rl_update = &p_ent->ramrod.rl_update;
+
+ rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
+ rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
+ rl_update->rl_init_flg = params->rl_init_flg;
+ rl_update->rl_start_flg = params->rl_start_flg;
+ rl_update->rl_stop_flg = params->rl_stop_flg;
+ rl_update->rl_id_first = params->rl_id_first;
+ rl_update->rl_id_last = params->rl_id_last;
+ rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
+ rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
+ rl_update->rl_max_rate =
+ OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
+ rl_update->rl_r_ai =
+ OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_ai));
+ rl_update->rl_r_hai =
+ OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_hai));
+ rl_update->dcqcn_g =
+ OSAL_CPU_TO_LE16(ecore_sp_rl_gd_denom(params->dcqcn_gd));
+ rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
+ rl_update->dcqcn_timeuot_us =
+ OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
+ rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
+ rl_update->qcn_update_param_flg,
+ rl_update->dcqcn_update_param_flg,
+ rl_update->rl_init_flg, rl_update->rl_start_flg,
+ rl_update->rl_stop_flg, rl_update->rl_id_first,
+ rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
+ rl_update->rl_bc_rate, rl_update->rl_max_rate,
+ rl_update->rl_r_ai, rl_update->rl_r_hai,
+ rl_update->dcqcn_g, rl_update->dcqcn_k_us,
+ rl_update->dcqcn_timeuot_us, rl_update->qcn_timeuot_us);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+/* Set pf update ramrod command params */
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_tunnel_info *p_tunn,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
+
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel pf update config is not supported\n");
+ return rc;
+ }
+
+ if (!p_tunn)
+ return ECORE_INVAL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
+ &p_ent->ramrod.pf_update.tunnel_config);
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
+ p_ent->ramrod.pf_update.mf_vlan =
+ OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.h
new file mode 100644
index 00000000..e57414cf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_SP_COMMANDS_H__
+#define __ECORE_SP_COMMANDS_H__
+
+#include "ecore.h"
+#include "ecore_spq.h"
+#include "ecore_sp_api.h"
+
+#define ECORE_SP_EQ_COMPLETION 0x01
+#define ECORE_SP_CQE_COMPLETION 0x02
+
+struct ecore_sp_init_data {
+ /* The CID and FID aren't necessarily derived from hwfn,
+ * e.g., in IOV scenarios. CID might defer between SPQ and
+ * other elements.
+ */
+ u32 cid;
+ u16 opaque_fid;
+
+ /* Information regarding operation upon sending & completion */
+ enum spq_mode comp_mode;
+ struct ecore_spq_comp_cb *p_comp_data;
+
+};
+
+/**
+ * @brief Acquire and initialize and SPQ entry for a given ramrod.
+ *
+ * @param p_hwfn
+ * @param pp_ent - will be filled with a pointer to an entry upon success
+ * @param cmd - dependent upon protocol
+ * @param protocol
+ * @param p_data - various configuration required for ramrod
+ *
+ * @return ECORE_SUCCESS upon success, otherwise failure.
+ */
+enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry **pp_ent,
+ u8 cmd,
+ u8 protocol,
+ struct ecore_sp_init_data *p_data);
+
+/**
+ * @brief ecore_sp_pf_start - PF Function Start Ramrod
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_tunn - pf start tunneling configuration
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ * for vports configured for tx-switching.
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_tunnel_info *p_tunn,
+ bool allow_npar_tx_switch);
+
+/**
+ * @brief ecore_sp_pf_update - PF Function Update Ramrod
+ *
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_pf_stop - PF Function Stop Ramrod
+ *
+ * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+ * sent and the last completion written to the PFs Event Ring. This ramrod also
+ * deletes the context for the Slowhwfn connection on this PF.
+ *
+ * @note Not required for first packet.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_heartbeat_ramrod - Send empty Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn);
+
+struct ecore_rl_update_params {
+ u8 qcn_update_param_flg;
+ u8 dcqcn_update_param_flg;
+ u8 rl_init_flg;
+ u8 rl_start_flg;
+ u8 rl_stop_flg;
+ u8 rl_id_first;
+ u8 rl_id_last;
+ u8 rl_dc_qcn_flg; /* If set, RL will used for DCQCN */
+ u32 rl_bc_rate; /* Byte Counter Limit */
+ u32 rl_max_rate; /* Maximum rate in Mbps resolution */
+ u32 rl_r_ai; /* Active increase rate */
+ u32 rl_r_hai; /* Hyper active increase rate */
+ u32 dcqcn_gd; /* DCQCN Alpha update gain */
+ u32 dcqcn_k_us; /* DCQCN Alpha update interval */
+ u32 dcqcn_timeuot_us;
+ u32 qcn_timeuot_us;
+};
+
+/**
+ * @brief ecore_sp_rl_update - Update rate limiters
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_rl_update_params *params);
+
+/**
+ * @brief ecore_sp_pf_update_stag - PF STAG value update Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_pf_update_ufp - PF ufp update Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn);
+
+#endif /*__ECORE_SP_COMMANDS_H__*/
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_spq.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_spq.c
new file mode 100644
index 00000000..776c86f7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_spq.c
@@ -0,0 +1,1061 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_hsi_common.h"
+#include "ecore.h"
+#include "ecore_sp_api.h"
+#include "ecore_spq.h"
+#include "ecore_iro.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_int.h"
+#include "ecore_dev_api.h"
+#include "ecore_mcp.h"
+#include "ecore_hw.h"
+#include "ecore_sriov.h"
+
+/***************************************************************************
+ * Structures & Definitions
+ ***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
+
+#define SPQ_BLOCK_DELAY_MAX_ITER (10)
+#define SPQ_BLOCK_DELAY_US (10)
+#define SPQ_BLOCK_SLEEP_MAX_ITER (200)
+#define SPQ_BLOCK_SLEEP_MS (5)
+
+/***************************************************************************
+ * Blocking Imp. (BLOCK/EBLOCK mode)
+ ***************************************************************************/
+static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
+ union event_ring_data OSAL_UNUSED * data,
+ u8 fw_return_code)
+{
+ struct ecore_spq_comp_done *comp_done;
+
+ comp_done = (struct ecore_spq_comp_done *)cookie;
+
+ comp_done->done = 0x1;
+ comp_done->fw_return_code = fw_return_code;
+
+ /* make update visible to waiting thread */
+ OSAL_SMP_WMB(p_hwfn->p_dev);
+}
+
+static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *p_fw_ret,
+ bool sleep_between_iter)
+{
+ struct ecore_spq_comp_done *comp_done;
+ u32 iter_cnt;
+
+ comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
+ iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter
+ : SPQ_BLOCK_DELAY_MAX_ITER;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
+ iter_cnt *= 5;
+#endif
+
+ while (iter_cnt--) {
+ OSAL_POLL_MODE_DPC(p_hwfn);
+ OSAL_SMP_RMB(p_hwfn->p_dev);
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return ECORE_SUCCESS;
+ }
+
+ if (sleep_between_iter)
+ OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
+ else
+ OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
+ }
+
+ return ECORE_TIMEOUT;
+}
+
+static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *p_fw_ret, bool skip_quick_poll)
+{
+ struct ecore_spq_comp_done *comp_done;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ /* A relatively short polling period w/o sleeping, to allow the FW to
+ * complete the ramrod and thus possibly to avoid the following sleeps.
+ */
+ if (!skip_quick_poll) {
+ rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
+ if (rc == ECORE_SUCCESS)
+ return ECORE_SUCCESS;
+ }
+
+ /* Move to polling with a sleeping period between iterations */
+ rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (rc == ECORE_SUCCESS)
+ return ECORE_SUCCESS;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+ rc = ecore_mcp_drain(p_hwfn, p_ptt);
+ ecore_ptt_release(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
+ goto err;
+ }
+
+ /* Retry after drain */
+ rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (rc == ECORE_SUCCESS)
+ return ECORE_SUCCESS;
+
+ comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return ECORE_SUCCESS;
+ }
+err:
+ DP_NOTICE(p_hwfn, true,
+ "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
+ OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
+ p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
+ OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
+
+ return ECORE_BUSY;
+}
+
+void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
+ u32 spq_timeout_ms)
+{
+ p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ?
+ spq_timeout_ms / SPQ_BLOCK_SLEEP_MS :
+ SPQ_BLOCK_SLEEP_MAX_ITER;
+}
+
+/***************************************************************************
+ * SPQ entries inner API
+ ***************************************************************************/
+static enum _ecore_status_t
+ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
+{
+ p_ent->flags = 0;
+
+ switch (p_ent->comp_mode) {
+ case ECORE_SPQ_MODE_EBLOCK:
+ case ECORE_SPQ_MODE_BLOCK:
+ p_ent->comp_cb.function = ecore_spq_blocking_cb;
+ break;
+ case ECORE_SPQ_MODE_CB:
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
+ " Data pointer: [%08x:%08x] Completion Mode: %s\n",
+ p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
+ D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
+ ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+
+ return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * HSI access
+ ***************************************************************************/
+static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq *p_spq)
+{
+ struct e4_core_conn_context *p_cxt;
+ struct ecore_cxt_info cxt_info;
+ u16 physical_q;
+ enum _ecore_status_t rc;
+
+ cxt_info.iid = p_spq->cid;
+
+ rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+ if (rc < 0) {
+ DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
+ p_spq->cid);
+ return;
+ }
+
+ p_cxt = cxt_info.p_cxt;
+
+ /* @@@TBD we zero the context until we have ilt_reset implemented. */
+ OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
+ SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ * E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
+ */
+ SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+ E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ }
+
+ /* CDU validation - FIXME currently disabled */
+
+ /* QM physical queue */
+ physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
+
+ p_cxt->xstorm_st_context.spq_base_lo =
+ DMA_LO_LE(p_spq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.spq_base_hi =
+ DMA_HI_LE(p_spq->chain.p_phys_addr);
+
+ DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
+ p_hwfn->p_consq->chain.p_phys_addr);
+}
+
+static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq *p_spq,
+ struct ecore_spq_entry *p_ent)
+{
+ struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
+ struct core_db_data *p_db_data = &p_spq->db_data;
+ u16 echo = ecore_chain_get_prod_idx(p_chain);
+ struct slow_path_element *elem;
+
+ p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
+ elem = ecore_chain_produce(p_chain);
+ if (!elem) {
+ DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
+ return ECORE_INVAL;
+ }
+
+ *elem = p_ent->elem; /* Struct assignment */
+
+ p_db_data->spq_prod =
+ OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
+
+ /* Make sure the SPQE is updated before the doorbell */
+ OSAL_WMB(p_hwfn->p_dev);
+
+ DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
+
+ /* Make sure doorbell is rang */
+ OSAL_WMB(p_hwfn->p_dev);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
+ " agg_params: %02x, prod: %04x\n",
+ p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
+ p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
+
+ return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * Asynchronous events
+ ***************************************************************************/
+
+static enum _ecore_status_t
+ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe)
+{
+ ecore_spq_async_comp_cb cb;
+
+ if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+ return ECORE_INVAL;
+
+ cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
+ if (cb) {
+ return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+ &p_eqe->data, p_eqe->fw_return_code);
+ } else {
+ DP_NOTICE(p_hwfn,
+ true, "Unknown Async completion for protocol: %d\n",
+ p_eqe->protocol_id);
+ return ECORE_INVAL;
+ }
+}
+
+enum _ecore_status_t
+ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ ecore_spq_async_comp_cb cb)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return ECORE_INVAL;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
+ return ECORE_SUCCESS;
+}
+
+void
+ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
+}
+
+/***************************************************************************
+ * EQ API
+ ***************************************************************************/
+void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
+{
+ u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ OSAL_MMIOWB(p_hwfn->p_dev);
+}
+
+enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
+ void *cookie)
+{
+ struct ecore_eq *p_eq = cookie;
+ struct ecore_chain *p_chain = &p_eq->chain;
+ enum _ecore_status_t rc = 0;
+
+ /* take a snapshot of the FW consumer */
+ u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+ /* Need to guarantee the fw_cons index we use points to a usuable
+ * element (to comply with our chain), so our macros would comply
+ */
+ if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
+ ecore_chain_get_usable_per_page(p_chain)) {
+ fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
+ }
+
+ /* Complete current segment of eq entries */
+ while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
+ struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
+ if (!p_eqe) {
+ rc = ECORE_INVAL;
+ break;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
+ p_eqe->opcode, /* Event Opcode */
+ p_eqe->protocol_id, /* Event Protocol ID */
+ p_eqe->reserved0, /* Reserved */
+ /* Echo value from ramrod data on the host */
+ OSAL_LE16_TO_CPU(p_eqe->echo),
+ p_eqe->fw_return_code, /* FW return code for SP
+ * ramrods
+ */
+ p_eqe->flags);
+
+ if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+ if (ecore_async_event_completion(p_hwfn, p_eqe))
+ rc = ECORE_INVAL;
+ } else if (ecore_spq_completion(p_hwfn,
+ p_eqe->echo,
+ p_eqe->fw_return_code,
+ &p_eqe->data)) {
+ rc = ECORE_INVAL;
+ }
+
+ ecore_chain_recycle_consumed(p_chain);
+ }
+
+ ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
+{
+ struct ecore_eq *p_eq;
+
+ /* Allocate EQ struct */
+ p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
+ if (!p_eq) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate `struct ecore_eq'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* Allocate and initialize EQ chain*/
+ if (ecore_chain_alloc(p_hwfn->p_dev,
+ ECORE_CHAIN_USE_TO_PRODUCE,
+ ECORE_CHAIN_MODE_PBL,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ num_elem,
+ sizeof(union event_ring_element),
+ &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
+ goto eq_allocate_fail;
+ }
+
+ /* register EQ completion on the SP SB */
+ ecore_int_register_cb(p_hwfn, ecore_eq_completion,
+ p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
+
+ p_hwfn->p_eq = p_eq;
+ return ECORE_SUCCESS;
+
+eq_allocate_fail:
+ OSAL_FREE(p_hwfn->p_dev, p_eq);
+ return ECORE_NOMEM;
+}
+
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
+{
+ ecore_chain_reset(&p_hwfn->p_eq->chain);
+}
+
+void ecore_eq_free(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_eq)
+ return;
+
+ ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
+ p_hwfn->p_eq = OSAL_NULL;
+}
+
+/***************************************************************************
+* CQE API - manipulate EQ functionality
+***************************************************************************/
+static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe
+ *cqe,
+ enum protocol_type protocol)
+{
+ if (IS_VF(p_hwfn->p_dev))
+ return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
+
+ /* @@@tmp - it's possible we'll eventually want to handle some
+ * actual commands that can arrive here, but for now this is only
+ * used to complete the ramrod using the echo value on the cqe
+ */
+ return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+ cqe->ramrod_cmd_id);
+ }
+
+ return rc;
+}
+
+/***************************************************************************
+ * Slow hwfn Queue (spq)
+ ***************************************************************************/
+void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ struct ecore_spq_entry *p_virt = OSAL_NULL;
+ struct core_db_data *p_db_data;
+ void OSAL_IOMEM *db_addr;
+ dma_addr_t p_phys = 0;
+ u32 i, capacity;
+ enum _ecore_status_t rc;
+
+ OSAL_LIST_INIT(&p_spq->pending);
+ OSAL_LIST_INIT(&p_spq->completion_pending);
+ OSAL_LIST_INIT(&p_spq->free_pool);
+ OSAL_LIST_INIT(&p_spq->unlimited_pending);
+ OSAL_SPIN_LOCK_INIT(&p_spq->lock);
+
+ /* SPQ empty pool */
+ p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
+ p_virt = p_spq->p_virt;
+
+ capacity = ecore_chain_get_capacity(&p_spq->chain);
+ for (i = 0; i < capacity; i++) {
+ DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
+
+ OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
+
+ p_virt++;
+ p_phys += sizeof(struct ecore_spq_entry);
+ }
+
+ /* Statistics */
+ p_spq->normal_count = 0;
+ p_spq->comp_count = 0;
+ p_spq->comp_sent_count = 0;
+ p_spq->unlimited_pending_count = 0;
+
+ OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
+ SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
+ p_spq->comp_bitmap_idx = 0;
+
+ /* SPQ cid, cannot fail */
+ ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+ ecore_spq_hw_initialize(p_hwfn, p_spq);
+
+ /* reset the chain itself */
+ ecore_chain_reset(&p_spq->chain);
+
+ /* Initialize the address/data of the SPQ doorbell */
+ p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
+ p_db_data = &p_spq->db_data;
+ OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_SPQ_PROD_CMD);
+ p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+ /* Register the SPQ doorbell with the doorbell recovery mechanism */
+ db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
+ rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn,
+ "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
+}
+
+enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_virt = OSAL_NULL;
+ struct ecore_spq *p_spq = OSAL_NULL;
+ dma_addr_t p_phys = 0;
+ u32 capacity;
+
+ /* SPQ struct */
+ p_spq =
+ OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
+ if (!p_spq) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* SPQ ring */
+ if (ecore_chain_alloc(p_hwfn->p_dev,
+ ECORE_CHAIN_USE_TO_PRODUCE,
+ ECORE_CHAIN_MODE_SINGLE,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ 0, /* N/A when the mode is SINGLE */
+ sizeof(struct slow_path_element),
+ &p_spq->chain, OSAL_NULL)) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
+ goto spq_allocate_fail;
+ }
+
+ /* allocate and fill the SPQ elements (incl. ramrod data list) */
+ capacity = ecore_chain_get_capacity(&p_spq->chain);
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
+ capacity *
+ sizeof(struct ecore_spq_entry));
+ if (!p_virt)
+ goto spq_allocate_fail;
+
+ p_spq->p_virt = p_virt;
+ p_spq->p_phys = p_phys;
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
+ goto spq_allocate_fail;
+#endif
+
+ p_hwfn->p_spq = p_spq;
+ return ECORE_SUCCESS;
+
+spq_allocate_fail:
+ ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+ OSAL_FREE(p_hwfn->p_dev, p_spq);
+ return ECORE_NOMEM;
+}
+
+void ecore_spq_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ void OSAL_IOMEM *db_addr;
+ u32 capacity;
+
+ if (!p_spq)
+ return;
+
+ /* Delete the SPQ doorbell from the doorbell recovery mechanism */
+ db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
+ ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
+
+ if (p_spq->p_virt) {
+ capacity = ecore_chain_get_capacity(&p_spq->chain);
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_spq->p_virt,
+ p_spq->p_phys,
+ capacity *
+ sizeof(struct ecore_spq_entry));
+ }
+
+ ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
+#endif
+
+ OSAL_FREE(p_hwfn->p_dev, p_spq);
+}
+
+enum _ecore_status_t
+ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_SPIN_LOCK(&p_spq->lock);
+
+ if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+ p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
+ if (!p_ent) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
+ rc = ECORE_NOMEM;
+ goto out_unlock;
+ }
+ p_ent->queue = &p_spq->unlimited_pending;
+ } else {
+ p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
+ struct ecore_spq_entry, list);
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
+ p_ent->queue = &p_spq->pending;
+ }
+
+ *pp_ent = p_ent;
+
+out_unlock:
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+ return rc;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent)
+{
+ OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent)
+{
+ OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
+ __ecore_spq_return_entry(p_hwfn, p_ent);
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * @brief ecore_spq_add_entry - adds a new entry to the pending
+ * list. Should be used while lock is being held.
+ *
+ * Addes an entry to the pending list is there is room (en empty
+ * element is available in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ * @param priority
+ *
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t
+ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent, enum spq_priority priority)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+ OSAL_LIST_PUSH_TAIL(&p_ent->list,
+ &p_spq->unlimited_pending);
+ p_spq->unlimited_pending_count++;
+
+ return ECORE_SUCCESS;
+
+ } else {
+ struct ecore_spq_entry *p_en2;
+
+ p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
+ struct ecore_spq_entry,
+ list);
+ OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
+
+ /* Copy the ring element physical pointer to the new
+ * entry, since we are about to override the entire ring
+ * entry and don't want to lose the pointer.
+ */
+ p_ent->elem.data_ptr = p_en2->elem.data_ptr;
+
+ *p_en2 = *p_ent;
+
+ /* EBLOCK responsible to free the allocated p_ent */
+ if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
+ OSAL_FREE(p_hwfn->p_dev, p_ent);
+
+ p_ent = p_en2;
+ }
+ }
+
+ /* entry is to be placed in 'pending' queue */
+ switch (priority) {
+ case ECORE_SPQ_PRIORITY_NORMAL:
+ OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
+ p_spq->normal_count++;
+ break;
+ case ECORE_SPQ_PRIORITY_HIGH:
+ OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
+ p_spq->high_count++;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * Accessor
+ ***************************************************************************/
+
+u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_spq)
+ return 0xffffffff; /* illegal */
+ return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+ * Posting new Ramrods
+ ***************************************************************************/
+
+static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
+ osal_list_t *head,
+ u32 keep_reserve)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ enum _ecore_status_t rc;
+
+ /* TODO - implementation might be wasteful; will always keep room
+ * for an additional high priority ramrod (even if one is already
+ * pending FW)
+ */
+ while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+ !OSAL_LIST_IS_EMPTY(head)) {
+ struct ecore_spq_entry *p_ent =
+ OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
+ if (p_ent != OSAL_NULL) {
+#if defined(_NTDDK_)
+#pragma warning(suppress : 6011 28182)
+#endif
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
+ OSAL_LIST_PUSH_TAIL(&p_ent->list,
+ &p_spq->completion_pending);
+ p_spq->comp_sent_count++;
+
+ rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
+ if (rc) {
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
+ &p_spq->completion_pending);
+ __ecore_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+ }
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+
+ while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+ if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
+ break;
+
+ p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
+ struct ecore_spq_entry, list);
+ if (!p_ent)
+ return ECORE_INVAL;
+
+#if defined(_NTDDK_)
+#pragma warning(suppress : 6011)
+#endif
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
+
+ ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ }
+
+ return ecore_spq_post_list(p_hwfn,
+ &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
+}
+
+enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *fw_return_code)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
+ bool b_ret_ent = true;
+
+ if (!p_hwfn)
+ return ECORE_INVAL;
+
+ if (!p_ent) {
+ DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
+ return ECORE_INVAL;
+ }
+
+ if (p_hwfn->p_dev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Recovery is in progress -> skip spq post"
+ " [cmd %02x protocol %02x]\n",
+ p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
+ /* Return success to let the flows to be completed successfully
+ * w/o any error handling.
+ */
+ return ECORE_SUCCESS;
+ }
+
+ OSAL_SPIN_LOCK(&p_spq->lock);
+
+ /* Complete the entry */
+ rc = ecore_spq_fill_entry(p_hwfn, p_ent);
+
+ /* Check return value after LOCK is taken for cleaner error flow */
+ if (rc)
+ goto spq_post_fail;
+
+ /* Add the request to the pending queue */
+ rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ if (rc)
+ goto spq_post_fail;
+
+ rc = ecore_spq_pend_post(p_hwfn);
+ if (rc) {
+ /* Since it's possible that pending failed for a different
+ * entry [although unlikely], the failed entry was already
+ * dealt with; No need to return it here.
+ */
+ b_ret_ent = false;
+ goto spq_post_fail;
+ }
+
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
+ /* For entries in ECORE BLOCK mode, the completion code cannot
+ * perform the necessary cleanup - if it did, we couldn't
+ * access p_ent here to see whether it's successful or not.
+ * Thus, after gaining the answer perform the cleanup here.
+ */
+ rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
+ p_ent->queue == &p_spq->unlimited_pending);
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ /* This is an allocated p_ent which does not need to
+ * return to pool.
+ */
+ OSAL_FREE(p_hwfn->p_dev, p_ent);
+
+ /* TBD: handle error flow and remove p_ent from
+ * completion pending
+ */
+ return rc;
+ }
+
+ if (rc)
+ goto spq_post_fail2;
+
+ /* return to pool */
+ ecore_spq_return_entry(p_hwfn, p_ent);
+ }
+ return rc;
+
+spq_post_fail2:
+ OSAL_SPIN_LOCK(&p_spq->lock);
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
+ ecore_chain_return_produced(&p_spq->chain);
+
+spq_post_fail:
+ /* return to the free pool */
+ if (b_ret_ent)
+ __ecore_spq_return_entry(p_hwfn, p_ent);
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data)
+{
+ struct ecore_spq *p_spq;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_spq_entry *tmp;
+ struct ecore_spq_entry *found = OSAL_NULL;
+ enum _ecore_status_t rc;
+
+ if (!p_hwfn)
+ return ECORE_INVAL;
+
+ p_spq = p_hwfn->p_spq;
+ if (!p_spq)
+ return ECORE_INVAL;
+
+ OSAL_SPIN_LOCK(&p_spq->lock);
+ OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
+ tmp,
+ &p_spq->completion_pending,
+ list, struct ecore_spq_entry) {
+ if (p_ent->elem.hdr.echo == echo) {
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
+ &p_spq->completion_pending);
+
+ /* Avoid overriding of SPQ entries when getting
+ * out-of-order completions, by marking the completions
+ * in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+ SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
+ while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
+ p_spq->comp_bitmap_idx)) {
+ SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
+ p_spq->comp_bitmap_idx);
+ p_spq->comp_bitmap_idx++;
+ ecore_chain_return_produced(&p_spq->chain);
+ }
+
+ p_spq->comp_count++;
+ found = p_ent;
+ break;
+ }
+
+ /* This is debug and should be relatively uncommon - depends
+ * on scenarios which have mutliple per-PF sent ramrods.
+ */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Got completion for echo %04x - doesn't match"
+ " echo %04x in completion pending list\n",
+ OSAL_LE16_TO_CPU(echo),
+ OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
+ }
+
+ /* Release lock before callback, as callback may post
+ * an additional ramrod.
+ */
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ if (!found) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to find an entry this"
+ " EQE [echo %04x] completes\n",
+ OSAL_LE16_TO_CPU(echo));
+ return ECORE_EXISTS;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Complete EQE [echo %04x]: func %p cookie %p)\n",
+ OSAL_LE16_TO_CPU(echo),
+ p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+ if (found->comp_cb.function)
+ found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+ fw_return_code);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Got a completion without a callback function\n");
+
+ if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
+ (found->queue == &p_spq->unlimited_pending))
+ /* EBLOCK is responsible for returning its own entry into the
+ * free list, unless it originally added the entry into the
+ * unlimited pending list.
+ */
+ ecore_spq_return_entry(p_hwfn, found);
+
+ /* Attempt to post pending requests */
+ OSAL_SPIN_LOCK(&p_spq->lock);
+ rc = ecore_spq_pend_post(p_hwfn);
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_consq *p_consq;
+
+ /* Allocate ConsQ struct */
+ p_consq =
+ OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
+ if (!p_consq) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate `struct ecore_consq'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* Allocate and initialize EQ chain */
+ if (ecore_chain_alloc(p_hwfn->p_dev,
+ ECORE_CHAIN_USE_TO_PRODUCE,
+ ECORE_CHAIN_MODE_PBL,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ ECORE_CHAIN_PAGE_SIZE / 0x80,
+ 0x80,
+ &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
+ goto consq_allocate_fail;
+ }
+
+ p_hwfn->p_consq = p_consq;
+ return ECORE_SUCCESS;
+
+consq_allocate_fail:
+ OSAL_FREE(p_hwfn->p_dev, p_consq);
+ return ECORE_NOMEM;
+}
+
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
+{
+ ecore_chain_reset(&p_hwfn->p_consq->chain);
+}
+
+void ecore_consq_free(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_consq)
+ return;
+
+ ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_spq.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_spq.h
new file mode 100644
index 00000000..6142c399
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_spq.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_SPQ_H__
+#define __ECORE_SPQ_H__
+
+#include "ecore_hsi_common.h"
+#include "ecore_status.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_chain.h"
+#include "ecore_sp_api.h"
+
+union ramrod_data {
+ struct pf_start_ramrod_data pf_start;
+ struct pf_update_ramrod_data pf_update;
+ struct rl_update_ramrod_data rl_update;
+ struct rx_queue_start_ramrod_data rx_queue_start;
+ struct rx_queue_update_ramrod_data rx_queue_update;
+ struct rx_queue_stop_ramrod_data rx_queue_stop;
+ struct tx_queue_start_ramrod_data tx_queue_start;
+ struct tx_queue_stop_ramrod_data tx_queue_stop;
+ struct vport_start_ramrod_data vport_start;
+ struct vport_stop_ramrod_data vport_stop;
+ struct rx_update_gft_filter_data rx_update_gft;
+ struct vport_update_ramrod_data vport_update;
+ struct core_rx_start_ramrod_data core_rx_queue_start;
+ struct core_rx_stop_ramrod_data core_rx_queue_stop;
+ struct core_tx_start_ramrod_data core_tx_queue_start;
+ struct core_tx_stop_ramrod_data core_tx_queue_stop;
+ struct vport_filter_update_ramrod_data vport_filter_update;
+
+ struct vf_start_ramrod_data vf_start;
+ struct vf_stop_ramrod_data vf_stop;
+};
+
+#define EQ_MAX_CREDIT 0xffffffff
+
+enum spq_priority {
+ ECORE_SPQ_PRIORITY_NORMAL,
+ ECORE_SPQ_PRIORITY_HIGH,
+};
+
+union ecore_spq_req_comp {
+ struct ecore_spq_comp_cb cb;
+ u64 *done_addr;
+};
+
+/* SPQ_MODE_EBLOCK */
+struct ecore_spq_comp_done {
+ u64 done;
+ u8 fw_return_code;
+};
+
+struct ecore_spq_entry {
+ osal_list_entry_t list;
+
+ u8 flags;
+
+ /* HSI slow path element */
+ struct slow_path_element elem;
+
+ union ramrod_data ramrod;
+
+ enum spq_priority priority;
+
+ /* pending queue for this entry */
+ osal_list_t *queue;
+
+ enum spq_mode comp_mode;
+ struct ecore_spq_comp_cb comp_cb;
+ struct ecore_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+};
+
+struct ecore_eq {
+ struct ecore_chain chain;
+ u8 eq_sb_index; /* index within the SB */
+ __le16 *p_fw_cons; /* ptr to index value */
+};
+
+struct ecore_consq {
+ struct ecore_chain chain;
+};
+
+typedef enum _ecore_status_t
+(*ecore_spq_async_comp_cb)(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ u16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
+enum _ecore_status_t
+ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ ecore_spq_async_comp_cb cb);
+
+void
+ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id);
+
+struct ecore_spq {
+ osal_spinlock_t lock;
+
+ osal_list_t unlimited_pending;
+ osal_list_t pending;
+ osal_list_t completion_pending;
+ osal_list_t free_pool;
+
+ struct ecore_chain chain;
+
+ /* allocated dma-able memory for spq entries (+ramrod data) */
+ dma_addr_t p_phys;
+ struct ecore_spq_entry *p_virt;
+
+ /* SPQ max sleep iterations used in __ecore_spq_block() */
+ u32 block_sleep_max_iter;
+
+ /* Bitmap for handling out-of-order completions */
+#define SPQ_RING_SIZE \
+ (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
+/* BITS_PER_LONG */
+#define SPQ_COMP_BMAP_SIZE (SPQ_RING_SIZE / (sizeof(unsigned long) * 8))
+ unsigned long p_comp_bitmap[SPQ_COMP_BMAP_SIZE];
+ u8 comp_bitmap_idx;
+#define SPQ_COMP_BMAP_SET_BIT(p_spq, idx) \
+ (OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+#define SPQ_COMP_BMAP_CLEAR_BIT(p_spq, idx) \
+ (OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+#define SPQ_COMP_BMAP_TEST_BIT(p_spq, idx) \
+ (OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+ /* Statistics */
+ u32 unlimited_pending_count;
+ u32 normal_count;
+ u32 high_count;
+ u32 comp_sent_count;
+ u32 comp_count;
+
+ u32 cid;
+
+ u32 db_addr_offset;
+ struct core_db_data db_data;
+ ecore_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
+};
+
+struct ecore_port;
+struct ecore_hwfn;
+
+/**
+ * @brief ecore_set_spq_block_timeout - calculates the maximum sleep
+ * iterations used in __ecore_spq_block();
+ *
+ * @param p_hwfn
+ * @param spq_timeout_ms
+ */
+void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
+ u32 spq_timeout_ms);
+
+/**
+ * @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that
+ * Pends it to the future list.
+ *
+ * @param p_hwfn
+ * @param p_req
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *fw_return_code);
+
+/**
+ * @brief ecore_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void ecore_spq_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_deallocate - Deallocates the given SPQ struct.
+ *
+ * @param p_hwfn
+ */
+void ecore_spq_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_get_entry - Obtain an entrry from the spq
+ * free pool list.
+ *
+ *
+ *
+ * @param p_hwfn
+ * @param pp_ent
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry **pp_ent);
+
+/**
+ * @brief ecore_spq_return_entry - Return an entry to spq free
+ * pool list
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent);
+/**
+ * @brief ecore_eq_allocate - Allocates & initializes an EQ struct
+ *
+ * @param p_hwfn
+ * @param num_elem number of elements in the eq
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
+
+/**
+ * @brief ecore_eq_setup - Reset the EQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_eq_free - deallocates the given EQ struct.
+ *
+ * @param p_hwfn
+ */
+void ecore_eq_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_eq_prod_update - update the FW with default EQ producer
+ *
+ * @param p_hwfn
+ * @param prod
+ */
+void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn,
+ u16 prod);
+
+/**
+ * @brief ecore_eq_completion - Completes currently pending EQ elements
+ *
+ * @param p_hwfn
+ * @param cookie
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
+ void *cookie);
+
+/**
+ * @brief ecore_spq_completion - Completes a single event
+ *
+ * @param p_hwfn
+ * @param echo - echo value from cookie (used for determining completion)
+ * @param p_data - data from cookie (used in callback function if applicable)
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data);
+
+/**
+ * @brief ecore_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ *
+ * @param p_hwfn
+ *
+ * @return u32 - SPQ CID
+ */
+u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_consq_alloc - Allocates & initializes an ConsQ struct
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_consq_setup - Reset the ConsQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_consq_free - deallocates the given ConsQ struct.
+ *
+ * @param p_hwfn
+ */
+void ecore_consq_free(struct ecore_hwfn *p_hwfn);
+
+#endif /* __ECORE_SPQ_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.c
new file mode 100644
index 00000000..f7ebf7ad
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.c
@@ -0,0 +1,4923 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "reg_addr.h"
+#include "ecore_sriov.h"
+#include "ecore_status.h"
+#include "ecore_hw.h"
+#include "ecore_hw_defs.h"
+#include "ecore_int.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_l2.h"
+#include "ecore_vfpf_if.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_ops.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "ecore_mcp.h"
+#include "ecore_cxt.h"
+#include "ecore_vf.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_sp_commands.h"
+
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
+const char *ecore_channel_tlvs_string[] = {
+ "CHANNEL_TLV_NONE", /* ends tlv sequence */
+ "CHANNEL_TLV_ACQUIRE",
+ "CHANNEL_TLV_VPORT_START",
+ "CHANNEL_TLV_VPORT_UPDATE",
+ "CHANNEL_TLV_VPORT_TEARDOWN",
+ "CHANNEL_TLV_START_RXQ",
+ "CHANNEL_TLV_START_TXQ",
+ "CHANNEL_TLV_STOP_RXQ",
+ "CHANNEL_TLV_STOP_TXQ",
+ "CHANNEL_TLV_UPDATE_RXQ",
+ "CHANNEL_TLV_INT_CLEANUP",
+ "CHANNEL_TLV_CLOSE",
+ "CHANNEL_TLV_RELEASE",
+ "CHANNEL_TLV_LIST_END",
+ "CHANNEL_TLV_UCAST_FILTER",
+ "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
+ "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
+ "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
+ "CHANNEL_TLV_VPORT_UPDATE_MCAST",
+ "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
+ "CHANNEL_TLV_VPORT_UPDATE_RSS",
+ "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
+ "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
+ "CHANNEL_TLV_UPDATE_TUNN_PARAM",
+ "CHANNEL_TLV_COALESCE_UPDATE",
+ "CHANNEL_TLV_QID",
+ "CHANNEL_TLV_COALESCE_READ",
+ "CHANNEL_TLV_BULLETIN_UPDATE_MAC",
+ "CHANNEL_TLV_UPDATE_MTU",
+ "CHANNEL_TLV_MAX"
+};
+
+static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
+{
+ u8 legacy = 0;
+
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
+
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ legacy |= ECORE_QCID_LEGACY_VF_CID;
+
+ return legacy;
+}
+
+/* IOV ramrods */
+static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ u8 fp_minor;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_vf->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_start;
+
+ p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
+ p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
+
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ case ECORE_PCI_ETH_ROCE:
+ case ECORE_PCI_ETH_IWARP:
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
+ p_hwfn->hw_info.personality);
+ return ECORE_INVAL;
+ }
+
+ fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
+ if (fp_minor > ETH_HSI_VER_MINOR &&
+ fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%d] - Requested fp hsi %02x.%02x which is"
+ " slightly newer than PF's %02x.%02x; Configuring"
+ " PFs version\n",
+ p_vf->abs_vf_id,
+ ETH_HSI_VER_MAJOR, fp_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+ fp_minor = ETH_HSI_VER_MINOR;
+ }
+
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Starting using HSI %02x.%02x\n",
+ p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
+ u32 concrete_vfid,
+ u16 opaque_vfid)
+{
+ struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_STOP,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_stop;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious)
+{
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
+ return false;
+ }
+
+ if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
+ (rel_vf_id < 0))
+ return false;
+
+ if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
+ b_enabled_only)
+ return false;
+
+ if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
+ b_non_malicious)
+ return false;
+
+ return true;
+}
+
+struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct ecore_vf_info *vf = OSAL_NULL;
+
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
+ return OSAL_NULL;
+ }
+
+ if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
+ b_enabled_only, false))
+ vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+ else
+ DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
+ relative_vf_id);
+
+ return vf;
+}
+
+static struct ecore_queue_cid *
+ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
+{
+ int i;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid &&
+ !p_queue->cids[i].b_is_tx)
+ return p_queue->cids[i].p_cid;
+ }
+
+ return OSAL_NULL;
+}
+
+enum ecore_iov_validate_q_mode {
+ ECORE_IOV_VALIDATE_Q_NA,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ ECORE_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
+ u16 qid,
+ enum ecore_iov_validate_q_mode mode,
+ bool b_is_tx)
+{
+ int i;
+
+ if (mode == ECORE_IOV_VALIDATE_Q_NA)
+ return true;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ struct ecore_vf_queue_cid *p_qcid;
+
+ p_qcid = &p_vf->vf_queues[qid].cids[i];
+
+ if (p_qcid->p_cid == OSAL_NULL)
+ continue;
+
+ if (p_qcid->b_is_tx != b_is_tx)
+ continue;
+
+ /* Found. It's enabled. */
+ return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
+ }
+
+ /* In case we haven't found any valid cid, then its disabled */
+ return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
+}
+
+static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 rx_qid,
+ enum ecore_iov_validate_q_mode mode)
+{
+ if (rx_qid >= p_vf->num_rxqs) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[0x%02x] - can't touch Rx queue[%04x];"
+ " Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
+ return false;
+ }
+
+ return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
+}
+
+static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 tx_qid,
+ enum ecore_iov_validate_q_mode mode)
+{
+ if (tx_qid >= p_vf->num_txqs) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[0x%02x] - can't touch Tx queue[%04x];"
+ " Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
+ return false;
+ }
+
+ return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
+}
+
+static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 sb_idx)
+{
+ int i;
+
+ for (i = 0; i < p_vf->num_sbs; i++)
+ if (p_vf->igu_sbs[i] == sb_idx)
+ return true;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
+ " one of its 0x%02x SBs\n",
+ p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
+
+ return false;
+}
+
+/* Is there at least 1 queue open? */
+static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_rxqs; i++)
+ if (ecore_iov_validate_queue_mode(p_vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ false))
+ return true;
+
+ return false;
+}
+
+static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_txqs; i++)
+ if (ecore_iov_validate_queue_mode(p_vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ true))
+ return true;
+
+ return false;
+}
+
+enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
+ int vfid,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_bulletin_content *p_bulletin;
+ int crc_size = sizeof(p_bulletin->crc);
+ struct ecore_dmae_params params;
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf)
+ return ECORE_INVAL;
+
+ /* TODO - check VF is in a state where it can accept message */
+ if (!p_vf->vf_bulletin)
+ return ECORE_INVAL;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ /* Increment bulletin board version and compute crc */
+ p_bulletin->version++;
+ p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
+ p_vf->bulletin.size - crc_size);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
+ p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
+
+ /* propagate bulletin board via dmae to vm memory */
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.flags = ECORE_DMAE_FLAG_VF_DST;
+ params.dst_vfid = p_vf->abs_vf_id;
+ return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
+ p_vf->vf_bulletin, p_vf->bulletin.size / 4,
+ &params);
+}
+
+static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
+{
+ struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
+ int pos = iov->pos;
+
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_INITIAL_VF,
+ &iov->initial_vfs);
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
+ if (iov->num_vfs) {
+ /* @@@TODO - in future we might want to add an OSAL here to
+ * allow each OS to decide on its own how to act.
+ */
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV,
+ "Number of VFs are already set to non-zero value."
+ " Ignoring PCI configuration value\n");
+ iov->num_vfs = 0;
+ }
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
+
+ OSAL_PCI_READ_CONFIG_DWORD(p_dev,
+ pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+
+ OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
+
+ OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
+ "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
+ " stride %d, page size 0x%x\n",
+ iov->nres, iov->cap, iov->ctrl,
+ iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
+ iov->offset, iov->stride, iov->pgsz);
+
+ /* Some sanity checks */
+ if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
+ iov->total_vfs > NUM_OF_VFS(p_dev)) {
+ /* This can happen only due to a bug. In this case we set
+ * num_vfs to zero to avoid memory corruption in the code that
+ * assumes max number of vfs
+ */
+ DP_NOTICE(p_dev, false,
+ "IOV: Unexpected number of vfs set: %d"
+ " setting num_vf to zero\n",
+ iov->num_vfs);
+
+ iov->num_vfs = 0;
+ iov->total_vfs = 0;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+ struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ struct ecore_bulletin_content *p_bulletin_virt;
+ dma_addr_t req_p, rply_p, bulletin_p;
+ union pfvf_tlvs *p_reply_virt_addr;
+ union vfpf_tlvs *p_req_virt_addr;
+ u8 idx = 0;
+
+ OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
+
+ p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
+ req_p = p_iov_info->mbx_msg_phys_addr;
+ p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
+ rply_p = p_iov_info->mbx_reply_phys_addr;
+ p_bulletin_virt = p_iov_info->p_bulletins;
+ bulletin_p = p_iov_info->bulletins_phys;
+ if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
+ DP_ERR(p_hwfn,
+ "ecore_iov_setup_vfdb called without alloc mem first\n");
+ return;
+ }
+
+ for (idx = 0; idx < p_iov->total_vfs; idx++) {
+ struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
+ u32 concrete;
+
+ vf->vf_mbx.req_virt = p_req_virt_addr + idx;
+ vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
+ vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
+ vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
+ vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
+#endif
+ vf->state = VF_STOPPED;
+ vf->b_init = false;
+
+ vf->bulletin.phys = idx *
+ sizeof(struct ecore_bulletin_content) + bulletin_p;
+ vf->bulletin.p_virt = p_bulletin_virt + idx;
+ vf->bulletin.size = sizeof(struct ecore_bulletin_content);
+
+ vf->relative_vf_id = idx;
+ vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
+ concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
+ vf->concrete_fid = concrete;
+ /* TODO - need to devise a better way of getting opaque */
+ vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
+ (vf->abs_vf_id << 8);
+
+ vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
+ vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
+ }
+}
+
+static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ void **p_v_addr;
+ u16 num_vfs = 0;
+
+ num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
+
+ /* Allocate PF Mailbox buffer (per-VF) */
+ p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_msg_virt_addr;
+ *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_msg_size);
+ if (!*p_v_addr)
+ return ECORE_NOMEM;
+
+ /* Allocate PF Mailbox Reply buffer (per-VF) */
+ p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_reply_virt_addr;
+ *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->mbx_reply_size);
+ if (!*p_v_addr)
+ return ECORE_NOMEM;
+
+ p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
+ num_vfs;
+ p_v_addr = &p_iov_info->p_bulletins;
+ *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov_info->bulletins_phys,
+ p_iov_info->bulletins_size);
+ if (!*p_v_addr)
+ return ECORE_NOMEM;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "PF's Requests mailbox [%p virt 0x%lx phys], "
+ "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
+ " [%p virt 0x%lx phys]\n",
+ p_iov_info->mbx_msg_virt_addr,
+ (unsigned long)p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_reply_virt_addr,
+ (unsigned long)p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins,
+ (unsigned long)p_iov_info->bulletins_phys);
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+
+ if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov_info->mbx_msg_virt_addr,
+ p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_msg_size);
+
+ if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov_info->mbx_reply_virt_addr,
+ p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->mbx_reply_size);
+
+ if (p_iov_info->p_bulletins)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov_info->p_bulletins,
+ p_iov_info->bulletins_phys,
+ p_iov_info->bulletins_size);
+}
+
+enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_pf_iov *p_sriov;
+
+ if (!IS_PF_SRIOV(p_hwfn)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No SR-IOV - no need for IOV db\n");
+ return ECORE_SUCCESS;
+ }
+
+ p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
+ if (!p_sriov) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
+ return ECORE_NOMEM;
+ }
+
+ p_hwfn->pf_iov_info = p_sriov;
+
+ ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ ecore_sriov_eqe_event);
+
+ return ecore_iov_allocate_vfdb(p_hwfn);
+}
+
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
+{
+ if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return;
+
+ ecore_iov_setup_vfdb(p_hwfn);
+}
+
+void ecore_iov_free(struct ecore_hwfn *p_hwfn)
+{
+ ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
+ if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
+ ecore_iov_free_vfdb(p_hwfn);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
+ }
+}
+
+void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
+{
+ OSAL_FREE(p_dev, p_dev->p_iov_info);
+}
+
+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ int pos;
+ enum _ecore_status_t rc;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ /* Learn the PCI configuration */
+ pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
+ PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
+ return ECORE_SUCCESS;
+ }
+
+ /* Allocate a new struct for IOV information */
+ /* TODO - can change to VALLOC when its available */
+ p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+ sizeof(*p_dev->p_iov_info));
+ if (!p_dev->p_iov_info) {
+ DP_NOTICE(p_hwfn, false,
+ "Can't support IOV due to lack of memory\n");
+ return ECORE_NOMEM;
+ }
+ p_dev->p_iov_info->pos = pos;
+
+ rc = ecore_iov_pci_cfg_info(p_dev);
+ if (rc)
+ return rc;
+
+ /* We want PF IOV to be synonemous with the existence of p_iov_info;
+ * In case the capability is published but there are no VFs, simply
+ * de-allocate the struct.
+ */
+ if (!p_dev->p_iov_info->total_vfs) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "IOV capabilities, but no VFs are published\n");
+ OSAL_FREE(p_dev, p_dev->p_iov_info);
+ return ECORE_SUCCESS;
+ }
+
+ /* First VF index based on offset is tricky:
+ * - If ARI is supported [likely], offset - (16 - pf_id) would
+ * provide the number for eng0. 2nd engine Vfs would begin
+ * after the first engine's VFs.
+ * - If !ARI, VFs would start on next device.
+ * so offset - (256 - pf_id) would provide the number.
+ * Utilize the fact that (256 - pf_id) is achieved only be later
+ * to diffrentiate between the two.
+ */
+
+ if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+ u32 first = p_hwfn->p_dev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+
+ p_dev->p_iov_info->first_vf_in_pf = first;
+
+ if (ECORE_PATH_ID(p_hwfn))
+ p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+ } else {
+ u32 first = p_hwfn->p_dev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 256;
+
+ p_dev->p_iov_info->first_vf_in_pf = first;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "First VF in hwfn 0x%08x\n",
+ p_dev->p_iov_info->first_vf_in_pf);
+
+ return ECORE_SUCCESS;
+}
+
+static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_fail_malicious)
+{
+ /* Check PF supports sriov */
+ if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
+ !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return false;
+
+ /* Check VF validity */
+ if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
+ return false;
+
+ return true;
+}
+
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
+}
+
+void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
+ u16 rel_vf_id, u8 to_disable)
+{
+ struct ecore_vf_info *vf;
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf)
+ continue;
+
+ vf->to_disable = to_disable;
+ }
+}
+
+void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
+ u8 to_disable)
+{
+ u16 i;
+
+ if (!IS_ECORE_SRIOV(p_dev))
+ return;
+
+ for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
+ ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
+}
+
+#ifndef LINUX_REMOVE
+/* @@@TBD Consider taking outside of ecore... */
+enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
+ u16 vf_id,
+ void *ctx)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
+
+ if (vf != OSAL_NULL) {
+ vf->ctx = ctx;
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
+#endif
+ } else {
+ rc = ECORE_UNKNOWN_ERROR;
+ }
+ return rc;
+}
+#endif
+
+static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 abs_vfid)
+{
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
+ 1 << (abs_vfid & 0x1f));
+}
+
+static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ int i;
+
+ /* Set VF masks and configuration - pretend */
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
+
+ /* unpretend */
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf->num_sbs; i++)
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, true);
+}
+
+static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf, bool enable)
+{
+ u32 igu_vf_conf;
+
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
+
+ igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
+
+ if (enable)
+ igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
+ else
+ igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
+
+ /* unpretend */
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+}
+
+static enum _ecore_status_t
+ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 abs_vf_id,
+ u8 num_sbs)
+{
+ u8 current_max = 0;
+ int i;
+
+ /* If client overrides this, don't do anything */
+ if (p_hwfn->p_dev->b_dont_override_vf_msix)
+ return ECORE_SUCCESS;
+
+ /* For AH onward, configuration is per-PF. Find maximum of all
+ * the currently enabled child VFs, and set the number to be that.
+ */
+ if (!ECORE_IS_BB(p_hwfn->p_dev)) {
+ ecore_for_each_vf(p_hwfn, i) {
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
+ if (!p_vf)
+ continue;
+
+ current_max = OSAL_MAX_T(u8, current_max,
+ p_vf->num_sbs);
+ }
+ }
+
+ if (num_sbs > current_max)
+ return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
+ abs_vf_id, num_sbs);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
+{
+ u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* It's possible VF was previously considered malicious -
+ * clear the indication even if we're only going to disable VF.
+ */
+ vf->b_malicious = false;
+
+ if (vf->to_disable)
+ return ECORE_SUCCESS;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
+ ECORE_VF_ABS_ID(p_hwfn, vf));
+
+ ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
+ ECORE_VF_ABS_ID(p_hwfn, vf));
+
+ ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
+ vf->abs_vf_id, vf->num_sbs);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
+
+ SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
+ STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
+
+ ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
+ p_hwfn->hw_info.hw_mode);
+
+ /* unpretend */
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+
+ vf->state = VF_FREE;
+
+ return rc;
+}
+
+/**
+ *
+ * @brief ecore_iov_config_perm_table - configure the permission
+ * zone table.
+ * In E4, queue zone permission table size is 320x9. There
+ * are 320 VF queues for single engine device (256 for dual
+ * engine device), and each entry has the following format:
+ * {Valid, VF[7:0]}
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ * @param enable
+ */
+static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf, u8 enable)
+{
+ u32 reg_addr, val;
+ u16 qzone_id = 0;
+ int qid;
+
+ for (qid = 0; qid < vf->num_rxqs; qid++) {
+ ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
+ &qzone_id);
+
+ reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
+ val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+ ecore_wr(p_hwfn, p_ptt, reg_addr, val);
+ }
+}
+
+static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ /* Reset vf in IGU - interrupts are still disabled */
+ ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
+
+ /* Permission Table */
+ ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
+}
+
+static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf,
+ u16 num_rx_queues)
+{
+ struct ecore_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+ int qid = 0;
+ u32 val = 0;
+
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
+ num_rx_queues =
+ (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
+
+ for (qid = 0; qid < num_rx_queues; qid++) {
+ p_block = ecore_get_igu_free_sb(p_hwfn, false);
+ vf->igu_sbs[qid] = p_block->igu_sb_id;
+ p_block->status &= ~ECORE_IGU_STATUS_FREE;
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * p_block->igu_sb_id, val);
+
+ /* Configure igu sb in CAU which were marked valid */
+ ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id,
+ vf->abs_vf_id, 1);
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ p_block->igu_sb_id * sizeof(u64), 2, 0);
+ }
+
+ vf->num_sbs = (u8)num_rx_queues;
+
+ return vf->num_sbs;
+}
+
+/**
+ *
+ * @brief The function invalidates all the VF entries,
+ * technically this isn't required, but added for
+ * cleaness and ease of debugging incase a VF attempts to
+ * produce an interrupt after it has been taken down.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ */
+static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ int idx, igu_id;
+ u32 addr, val;
+
+ /* Invalidate igu CAM lines and mark them as free */
+ for (idx = 0; idx < vf->num_sbs; idx++) {
+ igu_id = vf->igu_sbs[idx];
+ addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
+
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ ecore_wr(p_hwfn, p_ptt, addr, val);
+
+ p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
+ }
+
+ vf->num_sbs = 0;
+}
+
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps)
+{
+ struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+ struct ecore_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+enum _ecore_status_t
+ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_iov_vf_init_params *p_params)
+{
+ struct ecore_mcp_link_capabilities link_caps;
+ struct ecore_mcp_link_params link_params;
+ struct ecore_mcp_link_state link_state;
+ u8 num_of_vf_available_chains = 0;
+ struct ecore_vf_info *vf = OSAL_NULL;
+ u16 qid, num_irqs;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 cids;
+ u8 i;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
+ if (!vf) {
+ DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ if (vf->b_init) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
+ p_params->rel_vf_id);
+ return ECORE_INVAL;
+ }
+
+ /* Perform sanity checking on the requested vport/rss */
+ if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
+ p_params->rel_vf_id, p_params->vport_id);
+ return ECORE_INVAL;
+ }
+
+ if ((p_params->num_queues > 1) &&
+ (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
+ p_params->rel_vf_id, p_params->rss_eng_id);
+ return ECORE_INVAL;
+ }
+
+ /* TODO - remove this once we get confidence of change */
+ if (!p_params->vport_id) {
+ DP_NOTICE(p_hwfn, false,
+ "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
+ p_params->rel_vf_id);
+ }
+ if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
+ DP_NOTICE(p_hwfn, false,
+ "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
+ p_params->rel_vf_id);
+ }
+ vf->vport_id = p_params->vport_id;
+ vf->rss_eng_id = p_params->rss_eng_id;
+
+ /* Since it's possible to relocate SBs, it's a bit difficult to check
+ * things here. Simply check whether the index falls in the range
+ * belonging to the PF.
+ */
+ for (i = 0; i < p_params->num_queues; i++) {
+ qid = p_params->req_rx_queue[i];
+ if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
+ qid, p_params->rel_vf_id,
+ (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+ return ECORE_INVAL;
+ }
+
+ qid = p_params->req_tx_queue[i];
+ if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
+ qid, p_params->rel_vf_id,
+ (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+ return ECORE_INVAL;
+ }
+ }
+
+ /* Limit number of queues according to number of CIDs */
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - requesting to initialize for 0x%04x queues"
+ " [0x%04x CIDs available]\n",
+ vf->relative_vf_id, p_params->num_queues, (u16)cids);
+ num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
+
+ num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
+ p_ptt,
+ vf,
+ num_irqs);
+ if (num_of_vf_available_chains == 0) {
+ DP_ERR(p_hwfn, "no available igu sbs\n");
+ return ECORE_NOMEM;
+ }
+
+ /* Choose queue number and index ranges */
+ vf->num_rxqs = num_of_vf_available_chains;
+ vf->num_txqs = num_of_vf_available_chains;
+
+ for (i = 0; i < vf->num_rxqs; i++) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
+
+ p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+ p_queue->fw_tx_qid = p_params->req_tx_queue[i];
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i],
+ p_queue->fw_rx_qid, p_queue->fw_tx_qid);
+ }
+
+ /* Update the link configuration in bulletin.
+ */
+ OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
+ sizeof(link_params));
+ OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
+ sizeof(link_state));
+ OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
+ sizeof(link_caps));
+ ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
+ &link_params, &link_state, &link_caps);
+
+ rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+
+ if (rc == ECORE_SUCCESS) {
+ vf->b_init = true;
+ p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
+ (1ULL << (vf->relative_vf_id % 64));
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->p_dev->p_iov_info->num_vfs++;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id)
+{
+ struct ecore_mcp_link_capabilities caps;
+ struct ecore_mcp_link_params params;
+ struct ecore_mcp_link_state link;
+ struct ecore_vf_info *vf = OSAL_NULL;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!vf) {
+ DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ if (vf->bulletin.p_virt)
+ OSAL_MEMSET(vf->bulletin.p_virt, 0,
+ sizeof(*vf->bulletin.p_virt));
+
+ OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
+
+ /* Get the link configuration back in bulletin so
+ * that when VFs are re-enabled they get the actual
+ * link configuration.
+ */
+ OSAL_MEMCPY(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
+ OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
+ OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
+ sizeof(caps));
+ ecore_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
+
+ /* Forget the VF's acquisition message */
+ OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
+
+ /* disablng interrupts and resetting permission table was done during
+ * vf-close, however, we could get here without going through vf_close
+ */
+ /* Disable Interrupts for VF */
+ ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ vf->num_rxqs = 0;
+ vf->num_txqs = 0;
+ ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
+
+ if (vf->b_init) {
+ vf->b_init = false;
+ p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
+ ~(1ULL << (vf->relative_vf_id / 64));
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->p_dev->p_iov_info->num_vfs--;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static bool ecore_iov_tlv_supported(u16 tlvtype)
+{
+ return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
+}
+
+static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf, u16 tlv)
+{
+ /* lock the channel */
+ /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
+
+ /* record the locking op */
+ /* vf->op_current = tlv; @@@TBD MichalK */
+
+ /* log the lock */
+ if (ecore_iov_tlv_supported(tlv))
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel locked by %s\n",
+ vf->abs_vf_id,
+ ecore_channel_tlvs_string[tlv]);
+ else
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel locked by %04x\n",
+ vf->abs_vf_id, tlv);
+}
+
+static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ u16 expected_tlv)
+{
+ /* log the unlock */
+ if (ecore_iov_tlv_supported(expected_tlv))
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel unlocked by %s\n",
+ vf->abs_vf_id,
+ ecore_channel_tlvs_string[expected_tlv]);
+ else
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel unlocked by %04x\n",
+ vf->abs_vf_id, expected_tlv);
+
+ /* record the locking op */
+ /* vf->op_current = CHANNEL_TLV_NONE; */
+}
+
+/* place a given tlv on the tlv buffer, continuing current tlv list */
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
+{
+ struct channel_tlv *tl = (struct channel_tlv *)*offset;
+
+ tl->type = type;
+ tl->length = length;
+
+ /* Offset should keep pointing to next TLV (the end of the last) */
+ *offset += length;
+
+ /* Return a pointer to the start of the added tlv */
+ return *offset - length;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
+{
+ u16 i = 1, total_length = 0;
+ struct channel_tlv *tlv;
+
+ do {
+ /* cast current tlv list entry to channel tlv header */
+ tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
+
+ /* output tlv */
+ if (ecore_iov_tlv_supported(tlv->type))
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "TLV number %d: type %s, length %d\n",
+ i, ecore_channel_tlvs_string[tlv->type],
+ tlv->length);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "TLV number %d: type %d, length %d\n",
+ i, tlv->type, tlv->length);
+
+ if (tlv->type == CHANNEL_TLV_LIST_END)
+ return;
+
+ /* Validate entry - protect against malicious VFs */
+ if (!tlv->length) {
+ DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
+ return;
+ }
+ total_length += tlv->length;
+ if (total_length >= sizeof(struct tlv_buffer_size)) {
+ DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
+ return;
+ }
+
+ i++;
+ } while (1);
+}
+
+static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ u16 length,
+#else
+ u16 OSAL_UNUSED length,
+#endif
+ u8 status)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct ecore_dmae_params params;
+ u8 eng_vf_id;
+
+ mbx->reply_virt->default_resp.hdr.status = status;
+
+ ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ mbx->sw_mbx.response_size =
+ length + sizeof(struct channel_list_end_tlv);
+
+ if (!p_vf->b_hw_channel)
+ return;
+#endif
+
+ eng_vf_id = p_vf->abs_vf_id;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+ params.flags = ECORE_DMAE_FLAG_VF_DST;
+ params.dst_vfid = eng_vf_id;
+
+ ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
+ mbx->req_virt->first_tlv.reply_address +
+ sizeof(u64),
+ (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
+ &params);
+
+ /* Once PF copies the rc to the VF, the latter can continue and
+ * and send an additional message. So we have to make sure the
+ * channel would be re-set to ready prior to that.
+ */
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
+ ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+ mbx->req_virt->first_tlv.reply_address,
+ sizeof(u64) / 4, &params);
+
+ OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
+}
+
+static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
+{
+ switch (flag) {
+ case ECORE_IOV_VP_UPDATE_ACTIVATE:
+ return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
+ return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+ case ECORE_IOV_VP_UPDATE_TX_SWITCH:
+ return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ case ECORE_IOV_VP_UPDATE_MCAST:
+ return CHANNEL_TLV_VPORT_UPDATE_MCAST;
+ case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ case ECORE_IOV_VP_UPDATE_RSS:
+ return CHANNEL_TLV_VPORT_UPDATE_RSS;
+ case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ case ECORE_IOV_VP_UPDATE_SGE_TPA:
+ return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+ default:
+ return 0;
+ }
+}
+
+static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u8 status, u16 tlvs_mask,
+ u16 tlvs_accepted)
+{
+ struct pfvf_def_resp_tlv *resp;
+ u16 size, total_len, i;
+
+ OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
+ p_mbx->offset = (u8 *)p_mbx->reply_virt;
+ size = sizeof(struct pfvf_def_resp_tlv);
+ total_len = size;
+
+ ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+
+ /* Prepare response for all extended tlvs if they are found by PF */
+ for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
+ if (!(tlvs_mask & (1 << i)))
+ continue;
+
+ resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
+ size);
+
+ if (tlvs_accepted & (1 << i))
+ resp->hdr.status = status;
+ else
+ resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - vport_update resp: TLV %d, status %02x\n",
+ p_vf->relative_vf_id,
+ ecore_iov_vport_to_tlv(i),
+ resp->hdr.status);
+
+ total_len += size;
+ }
+
+ ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ return total_len;
+}
+
+static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf_info,
+ u16 type, u16 length, u8 status)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ ecore_add_tlv(&mbx->offset, type, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
+}
+
+struct ecore_public_vf_info
+*ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct ecore_vf_info *vf = OSAL_NULL;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+ if (!vf)
+ return OSAL_NULL;
+
+ return &vf->p_vf_info;
+}
+
+static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ u32 i, j;
+ p_vf->vf_bulletin = 0;
+ p_vf->vport_instance = 0;
+ p_vf->configured_features = 0;
+
+ /* If VF previously requested less resources, go back to default */
+ p_vf->num_rxqs = p_vf->num_sbs;
+ p_vf->num_txqs = p_vf->num_sbs;
+
+ p_vf->num_active_rxqs = 0;
+
+ for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
+ struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (!p_queue->cids[j].p_cid)
+ continue;
+
+ ecore_eth_queue_cid_release(p_hwfn,
+ p_queue->cids[j].p_cid);
+ p_queue->cids[j].p_cid = OSAL_NULL;
+ }
+ }
+
+ OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+ OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
+ OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
+}
+
+/* Returns either 0, or log(size) */
+static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
+
+ if (val)
+ return val + 11;
+ return 0;
+}
+
+static void
+ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
+ u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
+ DB_ADDR_VF(0, DQ_DEMS_LEGACY);
+ u32 bar_size;
+
+ p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
+
+ /* If VF didn't bother asking for QIDs than don't bother limiting
+ * number of CIDs. The VF doesn't care about the number, and this
+ * has the likely result of causing an additional acquisition.
+ */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
+ * that would make sure doorbells for all CIDs fall within the bar.
+ * If it doesn't, make sure regview window is sufficient.
+ */
+ if (p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
+ bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
+ if (bar_size)
+ bar_size = 1 << bar_size;
+
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ bar_size /= 2;
+ } else {
+ bar_size = PXP_VF_BAR0_DQ_LENGTH;
+ }
+
+ if (bar_size / db_size < 256)
+ p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
+ (u8)(bar_size / db_size));
+}
+
+static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ u8 i;
+
+ /* Queue related information */
+ p_resp->num_rxqs = p_vf->num_rxqs;
+ p_resp->num_txqs = p_vf->num_txqs;
+ p_resp->num_sbs = p_vf->num_sbs;
+
+ for (i = 0; i < p_resp->num_sbs; i++) {
+ p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
+ /* TODO - what's this sb_qid field? Is it deprecated?
+ * or is there an ecore_client that looks at this?
+ */
+ p_resp->hw_sbs[i].sb_qid = 0;
+ }
+
+ /* These fields are filled for backward compatibility.
+ * Unused by modern vfs.
+ */
+ for (i = 0; i < p_resp->num_rxqs; i++) {
+ ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
+ (u16 *)&p_resp->hw_qid[i]);
+ p_resp->cid[i] = i;
+ }
+
+ /* Filter related information */
+ p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
+ p_req->num_mac_filters);
+ p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
+ p_req->num_vlan_filters);
+
+ ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
+
+ /* This isn't really needed/enforced, but some legacy VFs might depend
+ * on the correct filling of this field.
+ */
+ p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
+
+ /* Validate sufficient resources for VF */
+ if (p_resp->num_rxqs < p_req->num_rxqs ||
+ p_resp->num_txqs < p_req->num_txqs ||
+ p_resp->num_sbs < p_req->num_sbs ||
+ p_resp->num_mac_filters < p_req->num_mac_filters ||
+ p_resp->num_vlan_filters < p_req->num_vlan_filters ||
+ p_resp->num_mc_filters < p_req->num_mc_filters ||
+ p_resp->num_cids < p_req->num_cids) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
+ p_vf->abs_vf_id,
+ p_req->num_rxqs, p_resp->num_rxqs,
+ p_req->num_rxqs, p_resp->num_txqs,
+ p_req->num_sbs, p_resp->num_sbs,
+ p_req->num_mac_filters, p_resp->num_mac_filters,
+ p_req->num_vlan_filters, p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters,
+ p_req->num_cids, p_resp->num_cids);
+
+ /* Some legacy OSes are incapable of correctly handling this
+ * failure.
+ */
+ if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
+ (p_vf->acquire.vfdev_info.os_type ==
+ VFPF_ACQUIRE_OS_WINDOWS))
+ return PFVF_STATUS_SUCCESS;
+
+ return PFVF_STATUS_NO_RESOURCE;
+ }
+
+ return PFVF_STATUS_SUCCESS;
+}
+
+static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
+{
+ p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ OFFSETOF(struct mstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+ p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
+ OFFSETOF(struct ustorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+ p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
+ OFFSETOF(struct pstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+ p_stats->tstats.address = 0;
+ p_stats->tstats.len = 0;
+}
+
+static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+ u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ struct pf_vf_resc *resc = &resp->resc;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(resp, 0, sizeof(*resp));
+
+ /* Write the PF version so that VF would know which version
+ * is supported - might be later overridden. This guarantees that
+ * VF could recognize legacy PF based on lack of versions in reply.
+ */
+ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+ pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
+ /* TODO - not doing anything is bad since we'll assert, but this isn't
+ * necessarily the right behavior - perhaps we should have allowed some
+ * versatility here.
+ */
+ if (vf->state != VF_FREE &&
+ vf->state != VF_STOPPED) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
+ vf->abs_vf_id, vf->state);
+ goto out;
+ }
+
+ /* Validate FW compatibility */
+ if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
+
+ /* This legacy support would need to be removed once
+ * the major has changed.
+ */
+ OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] is pre-fastpath HSI\n",
+ vf->abs_vf_id);
+ p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF[%d] needs fastpath HSI %02x.%02x, which is"
+ " incompatible with loaded FW's faspath"
+ " HSI %02x.%02x\n",
+ vf->abs_vf_id,
+ req->vfdev_info.eth_fp_hsi_major,
+ req->vfdev_info.eth_fp_hsi_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+ goto out;
+ }
+ }
+
+ /* On 100g PFs, prevent old VFs from loading */
+ if (ECORE_IS_CMT(p_hwfn->p_dev) &&
+ !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
+ DP_INFO(p_hwfn,
+ "VF[%d] is running an old driver that doesn't support"
+ " 100g\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+
+#ifndef __EXTRACT__LINUX__
+ if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
+ vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+#endif
+
+ /* Store the acquire message */
+ OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
+
+ vf->opaque_fid = req->vfdev_info.opaque_fid;
+
+ vf->vf_bulletin = req->bulletin_addr;
+ vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
+ vf->bulletin.size : req->bulletin_size;
+
+ /* fill in pfdev info */
+ pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
+ pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
+ pfdev_info->indices_per_sb = PIS_PER_SB_E4;
+
+ pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
+
+ /* Share our ability to use multiple queue-ids only with VFs
+ * that request it.
+ */
+ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
+
+ /* Share the sizes of the bars with VF */
+ resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
+ p_ptt);
+
+ ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
+
+ OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
+ ETH_ALEN);
+
+ pfdev_info->fw_major = FW_MAJOR_VERSION;
+ pfdev_info->fw_minor = FW_MINOR_VERSION;
+ pfdev_info->fw_rev = FW_REVISION_VERSION;
+ pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+
+ /* Incorrect when legacy, but doesn't matter as legacy isn't reading
+ * this field.
+ */
+ pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
+ req->vfdev_info.eth_fp_hsi_minor);
+ pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
+ ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
+ OSAL_NULL);
+
+ pfdev_info->dev_type = p_hwfn->p_dev->type;
+ pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
+
+ /* Fill resources available to VF; Make sure there are enough to
+ * satisfy the VF's request.
+ */
+ vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
+ &req->resc_request, resc);
+ if (vfpf_status != PFVF_STATUS_SUCCESS)
+ goto out;
+
+ /* Start the VF in FW */
+ rc = ecore_sp_vf_start(p_hwfn, vf);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
+ vf->abs_vf_id);
+ vfpf_status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Fill agreed size of bulletin board in response, and post
+ * an initial image to the bulletin board.
+ */
+ resp->bulletin_size = vf->bulletin.size;
+ ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
+ " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
+ " n_vlans-%d\n",
+ vf->abs_vf_id, resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
+ (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
+ resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
+ resc->num_vlan_filters);
+
+ vf->state = VF_ACQUIRED;
+
+out:
+ /* Prepare Response */
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
+ sizeof(struct pfvf_acquire_resp_tlv),
+ vfpf_status);
+}
+
+static enum _ecore_status_t
+__ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, bool val)
+{
+ struct ecore_sp_vport_update_params params;
+ enum _ecore_status_t rc;
+
+ if (val == p_vf->spoof_chk) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Spoofchk value[%d] is already configured\n", val);
+ return ECORE_SUCCESS;
+ }
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.update_anti_spoofing_en_flg = 1;
+ params.anti_spoofing_en = val;
+
+ rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc == ECORE_SUCCESS) {
+ p_vf->spoof_chk = val;
+ p_vf->req_spoofchk_val = p_vf->spoof_chk;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Spoofchk val[%d] configured\n", val);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Spoofchk configuration[val:%d] failed for VF[%d]\n",
+ val, p_vf->relative_vf_id);
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_filter_ucast filter;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ OSAL_MEMSET(&filter, 0, sizeof(filter));
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.opcode = ECORE_FILTER_ADD;
+
+ /* Reconfigure vlans */
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (!p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ filter.type = ECORE_FILTER_VLAN;
+ filter.vlan = p_vf->shadow_config.vlans[i].vid;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, ECORE_SPQ_MODE_CB,
+ OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to configure VLAN [%04x]"
+ " to VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, u64 events)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /*TODO - what about MACs? */
+
+ if ((events & (1 << VLAN_ADDR_FORCED)) &&
+ !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
+ rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u64 events)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_filter_ucast filter;
+
+ if (!p_vf->vport_instance)
+ return ECORE_INVAL;
+
+ if ((events & (1 << MAC_ADDR_FORCED)) ||
+ p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) {
+ /* Since there's no way [currently] of removing the MAC,
+ * we can always assume this means we need to force it.
+ */
+ OSAL_MEMSET(&filter, 0, sizeof(filter));
+ filter.type = ECORE_FILTER_MAC;
+ filter.opcode = ECORE_FILTER_REPLACE;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
+
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter,
+ ECORE_SPQ_MODE_CB, OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "PF failed to configure MAC for VF\n");
+ return rc;
+ }
+
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ p_vf->configured_features |=
+ 1 << VFPF_BULLETIN_MAC_ADDR;
+ else
+ p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ }
+
+ if (events & (1 << VLAN_ADDR_FORCED)) {
+ struct ecore_sp_vport_update_params vport_update;
+ u8 removal;
+ int i;
+
+ OSAL_MEMSET(&filter, 0, sizeof(filter));
+ filter.type = ECORE_FILTER_VLAN;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.vlan = p_vf->bulletin.p_virt->pvid;
+ filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
+ ECORE_FILTER_FLUSH;
+
+ /* Send the ramrod */
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter,
+ ECORE_SPQ_MODE_CB, OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "PF failed to configure VLAN for VF\n");
+ return rc;
+ }
+
+ /* Update the default-vlan & silent vlan stripping */
+ OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
+ vport_update.opaque_fid = p_vf->opaque_fid;
+ vport_update.vport_id = p_vf->vport_id;
+ vport_update.update_default_vlan_enable_flg = 1;
+ vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
+ vport_update.update_default_vlan_flg = 1;
+ vport_update.default_vlan = filter.vlan;
+
+ vport_update.update_inner_vlan_removal_flg = 1;
+ removal = filter.vlan ?
+ 1 : p_vf->shadow_config.inner_vlan_removal;
+ vport_update.inner_vlan_removal_flg = removal;
+ vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update,
+ ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "PF failed to configure VF vport for vlan\n");
+ return rc;
+ }
+
+ /* Update all the Rx queues */
+ for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
+ struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+ struct ecore_queue_cid *p_cid = OSAL_NULL;
+
+ /* There can be at most 1 Rx queue on qzone. Find it */
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
+ if (p_cid == OSAL_NULL)
+ continue;
+
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn,
+ (void **)&p_cid,
+ 1, 0, 1,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to send Rx update"
+ " fo queue[0x%04x]\n",
+ p_cid->rel.queue_id);
+ return rc;
+ }
+ }
+
+ if (filter.vlan)
+ p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
+ else
+ p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+ }
+
+ /* If forced features are terminated, we need to configure the shadow
+ * configuration back again.
+ */
+ if (events)
+ ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
+
+ return rc;
+}
+
+static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_sp_vport_start_params params = { 0 };
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_vport_start_tlv *start;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct ecore_vf_info *vf_info;
+ u64 *p_bitmap;
+ int sb_id;
+ enum _ecore_status_t rc;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Failed to get VF info, invalid vfid [%d]\n",
+ vf->relative_vf_id);
+ return;
+ }
+
+ vf->state = VF_ENABLED;
+ start = &mbx->req_virt->start_vport;
+
+ ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
+ /* Initialize Status block in CAU */
+ for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
+ if (!start->sb_addr[sb_id]) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] did not fill the address of SB %d\n",
+ vf->relative_vf_id, sb_id);
+ break;
+ }
+
+ ecore_int_cau_conf_sb(p_hwfn, p_ptt,
+ start->sb_addr[sb_id],
+ vf->igu_sbs[sb_id],
+ vf->abs_vf_id, 1);
+ }
+
+ vf->mtu = start->mtu;
+ vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
+
+ /* Take into consideration configuration forced by hypervisor;
+ * If none is configured, use the supplied VF values [for old
+ * vfs that would still be fine, since they passed '0' as padding].
+ */
+ p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
+ if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+ u8 vf_req = start->only_untagged;
+
+ vf_info->bulletin.p_virt->default_only_untagged = vf_req;
+ *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
+ }
+
+ params.tpa_mode = start->tpa_mode;
+ params.remove_inner_vlan = start->inner_vlan_removal;
+ params.tx_switching = true;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
+ params.tx_switching = false;
+ }
+#endif
+
+ params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
+ params.drop_ttl0 = false;
+ params.concrete_fid = vf->concrete_fid;
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.max_buffers_per_cqe = start->max_buffers_per_cqe;
+ params.mtu = vf->mtu;
+ params.check_mac = true;
+
+ rc = ecore_sp_eth_vport_start(p_hwfn, &params);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ vf->vport_instance++;
+
+ /* Force configuration if needed on the newly opened vport */
+ ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
+ OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
+ vf->vport_id, vf->opaque_fid);
+ __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
+ }
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ u8 status = PFVF_STATUS_SUCCESS;
+ enum _ecore_status_t rc;
+
+ OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
+ vf->vport_instance--;
+ vf->spoof_chk = false;
+
+ if ((ecore_iov_validate_active_rxq(vf)) ||
+ (ecore_iov_validate_active_txq(vf))) {
+ vf->b_malicious = true;
+ DP_NOTICE(p_hwfn, false,
+ "VF [%02x] - considered malicious;"
+ " Unable to stop RX/TX queuess\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_MALICIOUS;
+ goto out;
+ }
+
+ rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ /* Forget the configuration on the vport */
+ vf->configured_features = 0;
+ OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf,
+ u8 status, bool b_legacy)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+ struct vfpf_start_rxq_tlv *req;
+ u16 length;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
+ p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
+ req = &mbx->req_virt->start_rxq;
+ p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ OFFSETOF(struct mstorm_vf_zone,
+ non_trigger.eth_rx_queue_producers) +
+ sizeof(struct eth_rx_prod_data) * req->rx_qid;
+ }
+
+ ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, bool b_is_tx)
+{
+ struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Search for the qid if the VF published if its going to provide it */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
+ if (b_is_tx)
+ return ECORE_IOV_LEGACY_QID_TX;
+ else
+ return ECORE_IOV_LEGACY_QID_RX;
+ }
+
+ p_qid_tlv = (struct vfpf_qid_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ CHANNEL_TLV_QID);
+ if (p_qid_tlv == OSAL_NULL) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%2x]: Failed to provide qid\n",
+ p_vf->relative_vf_id);
+
+ return ECORE_IOV_QID_INVALID;
+ }
+
+ if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%02x]: Provided qid out-of-bounds %02x\n",
+ p_vf->relative_vf_id, p_qid_tlv->qid);
+ return ECORE_IOV_QID_INVALID;
+ }
+
+ return p_qid_tlv->qid;
+}
+
+static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_queue_start_common_params params;
+ struct ecore_queue_cid_vf_params vf_params;
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
+ u8 qid_usage_idx, vf_legacy = 0;
+ struct ecore_vf_queue *p_queue;
+ struct vfpf_start_rxq_tlv *req;
+ struct ecore_queue_cid *p_cid;
+ struct ecore_sb_info sb_dummy;
+ enum _ecore_status_t rc;
+
+ req = &mbx->req_virt->start_rxq;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+ ECORE_IOV_VALIDATE_Q_DISABLE) ||
+ !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+
+ p_queue = &vf->vf_queues[req->rx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = ecore_vf_calculate_legacy(vf);
+
+ /* Acquire a new queue-cid */
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.queue_id = (u8)p_queue->fw_rx_qid;
+ params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
+
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
+ params.sb_idx = req->sb_index;
+
+ OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->rx_qid;
+ vf_params.vf_legacy = vf_legacy;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, true, &vf_params);
+ if (p_cid == OSAL_NULL)
+ goto out;
+
+ /* Legacy VFs have their Producers in a different location, which they
+ * calculate on their own and clean the producer prior to this.
+ */
+ if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
+ 0);
+
+ rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr,
+ req->cqe_pbl_size);
+ if (rc != ECORE_SUCCESS) {
+ status = PFVF_STATUS_FAILURE;
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ } else {
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = false;
+ status = PFVF_STATUS_SUCCESS;
+ vf->num_active_rxqs++;
+ }
+
+out:
+ ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+ !!(vf_legacy &
+ ECORE_QCID_LEGACY_VF_RX_PROD));
+}
+
+static void
+ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
+ struct ecore_tunnel_info *p_tun,
+ u16 tunn_feature_mask)
+{
+ p_resp->tunn_feature_mask = tunn_feature_mask;
+ p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
+ p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
+ p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
+ p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
+ p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
+ p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
+ p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
+ p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
+ p_resp->geneve_udp_port = p_tun->geneve_port.port;
+ p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
+}
+
+static void
+__ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_tun,
+ enum ecore_tunn_mode mask, u8 tun_cls)
+{
+ if (p_req->tun_mode_update_mask & (1 << mask)) {
+ p_tun->b_update_mode = true;
+
+ if (p_req->tunn_mode & (1 << mask))
+ p_tun->b_mode_enabled = true;
+ }
+
+ p_tun->tun_cls = tun_cls;
+}
+
+static void
+ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_tun,
+ struct ecore_tunn_update_udp_port *p_port,
+ enum ecore_tunn_mode mask,
+ u8 tun_cls, u8 update_port, u16 port)
+{
+ if (update_port) {
+ p_port->b_update_port = true;
+ p_port->port = port;
+ }
+
+ __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
+}
+
+static bool
+ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
+{
+ bool b_update_requested = false;
+
+ if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
+ p_req->update_geneve_port || p_req->update_vxlan_port)
+ b_update_requested = true;
+
+ return b_update_requested;
+}
+
+static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 status = PFVF_STATUS_SUCCESS;
+ bool b_update_required = false;
+ struct ecore_tunnel_info tunn;
+ u16 tunn_feature_mask = 0;
+ int i;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ OSAL_MEM_ZERO(&tunn, sizeof(tunn));
+ p_req = &mbx->req_virt->tunn_param_update;
+
+ if (!ecore_iov_pf_validate_tunn_param(p_req)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No tunnel update requested by VF\n");
+ status = PFVF_STATUS_FAILURE;
+ goto send_resp;
+ }
+
+ tunn.b_update_rx_cls = p_req->update_tun_cls;
+ tunn.b_update_tx_cls = p_req->update_tun_cls;
+
+ ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
+ ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
+ p_req->update_vxlan_port,
+ p_req->vxlan_port);
+ ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
+ ECORE_MODE_L2GENEVE_TUNN,
+ p_req->l2geneve_clss,
+ p_req->update_geneve_port,
+ p_req->geneve_port);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
+ ECORE_MODE_IPGENEVE_TUNN,
+ p_req->ipgeneve_clss);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
+ ECORE_MODE_L2GRE_TUNN,
+ p_req->l2gre_clss);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
+ ECORE_MODE_IPGRE_TUNN,
+ p_req->ipgre_clss);
+
+ /* If PF modifies VF's req then it should
+ * still return an error in case of partial configuration
+ * or modified configuration as opposed to requested one.
+ */
+ rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
+ &b_update_required, &tunn);
+
+ if (rc != ECORE_SUCCESS)
+ status = PFVF_STATUS_FAILURE;
+
+ /* If ECORE client is willing to update anything ? */
+ if (b_update_required) {
+ u16 geneve_port;
+
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ status = PFVF_STATUS_FAILURE;
+
+ geneve_port = p_tun->geneve_port.port;
+ ecore_for_each_vf(p_hwfn, i) {
+ ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
+ p_tun->vxlan_port.port,
+ geneve_port);
+ }
+ }
+
+send_resp:
+ p_resp = ecore_add_tlv(&mbx->offset,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
+
+ ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
+static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ u32 cid,
+ u8 status)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+ bool b_legacy = false;
+ u16 length;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy = true;
+
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
+ p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
+ p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
+}
+
+static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_queue_start_common_params params;
+ struct ecore_queue_cid_vf_params vf_params;
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
+ struct ecore_vf_queue *p_queue;
+ struct vfpf_start_txq_tlv *req;
+ struct ecore_queue_cid *p_cid;
+ struct ecore_sb_info sb_dummy;
+ u8 qid_usage_idx, vf_legacy;
+ u32 cid = 0;
+ enum _ecore_status_t rc;
+ u16 pq;
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ req = &mbx->req_virt->start_txq;
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+ ECORE_IOV_VALIDATE_Q_NA) ||
+ !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+
+ p_queue = &vf->vf_queues[req->tx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = ecore_vf_calculate_legacy(vf);
+
+ /* Acquire a new queue-cid */
+ params.queue_id = p_queue->fw_tx_qid;
+ params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
+
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
+ params.sb_idx = req->sb_index;
+
+ OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->tx_qid;
+ vf_params.vf_legacy = vf_legacy;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, false, &vf_params);
+ if (p_cid == OSAL_NULL)
+ goto out;
+
+ pq = ecore_get_cm_pq_idx_vf(p_hwfn,
+ vf->relative_vf_id);
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+ req->pbl_addr, req->pbl_size, pq);
+ if (rc != ECORE_SUCCESS) {
+ status = PFVF_STATUS_FAILURE;
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ } else {
+ status = PFVF_STATUS_SUCCESS;
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = true;
+ cid = p_cid->cid;
+ }
+
+out:
+ ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
+ cid, status);
+}
+
+static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ u16 rxq_id,
+ u8 qid_usage_idx,
+ bool cqe_completion)
+{
+ struct ecore_vf_queue *p_queue;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
+ ECORE_IOV_VALIDATE_Q_NA)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
+ vf->relative_vf_id, rxq_id, qid_usage_idx);
+ return ECORE_INVAL;
+ }
+
+ p_queue = &vf->vf_queues[rxq_id];
+
+ /* We've validated the index and the existence of the active RXQ -
+ * now we need to make sure that it's using the correct qid.
+ */
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ p_queue->cids[qid_usage_idx].b_is_tx) {
+ struct ecore_queue_cid *p_cid;
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
+ vf->relative_vf_id, rxq_id, qid_usage_idx,
+ rxq_id, p_cid->qid_usage_idx);
+ return ECORE_INVAL;
+ }
+
+ /* Now that we know we have a valid Rx-queue - close it */
+ rc = ecore_eth_rx_queue_stop(p_hwfn,
+ p_queue->cids[qid_usage_idx].p_cid,
+ false, cqe_completion);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+ vf->num_active_rxqs--;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ u16 txq_id,
+ u8 qid_usage_idx)
+{
+ struct ecore_vf_queue *p_queue;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
+ ECORE_IOV_VALIDATE_Q_NA))
+ return ECORE_INVAL;
+
+ p_queue = &vf->vf_queues[txq_id];
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ !p_queue->cids[qid_usage_idx].b_is_tx)
+ return ECORE_INVAL;
+
+ rc = ecore_eth_tx_queue_stop(p_hwfn,
+ p_queue->cids[qid_usage_idx].p_cid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+ return ECORE_SUCCESS;
+}
+
+static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct vfpf_stop_rxqs_tlv *req;
+ u8 qid_usage_idx;
+ enum _ecore_status_t rc;
+
+ /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
+ * would be one. Since no older ecore passed multiple queues
+ * using this API, sanitize on the value.
+ */
+ req = &mbx->req_virt->stop_rxqs;
+ if (req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Rx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+
+ rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ qid_usage_idx, req->cqe_completion);
+ if (rc == ECORE_SUCCESS)
+ status = PFVF_STATUS_SUCCESS;
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
+ length, status);
+}
+
+static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct vfpf_stop_txqs_tlv *req;
+ u8 qid_usage_idx;
+ enum _ecore_status_t rc;
+
+ /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
+ * would be one. Since no older ecore passed multiple queues
+ * using this API, sanitize on the value.
+ */
+ req = &mbx->req_virt->stop_txqs;
+ if (req->num_txqs != 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Tx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+
+ rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
+ qid_usage_idx);
+ if (rc == ECORE_SUCCESS)
+ status = PFVF_STATUS_SUCCESS;
+
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
+ length, status);
+}
+
+static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_update_rxq_tlv *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ u8 complete_event_flg;
+ u8 complete_cqe_flg;
+ u8 qid_usage_idx;
+ enum _ecore_status_t rc;
+ u16 i;
+
+ req = &mbx->req_virt->update_rxq;
+ complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
+ complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+
+ /* Starting with the addition of CHANNEL_TLV_QID, this API started
+ * expecting a single queue at a time. Validate this.
+ */
+ if ((vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
+ req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] supports QIDs but sends multiple queues\n",
+ vf->relative_vf_id);
+ goto out;
+ }
+
+ /* Validate inputs - for the legacy case this is still true since
+ * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
+ */
+ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
+ ECORE_IOV_VALIDATE_Q_NA) ||
+ !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
+ vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid,
+ req->num_rxqs);
+ goto out;
+ }
+ }
+
+ for (i = 0; i < req->num_rxqs; i++) {
+ u16 qid = req->rx_qid + i;
+
+ handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
+ }
+
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+ req->num_rxqs,
+ complete_cqe_flg,
+ complete_event_flg,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ status = PFVF_STATUS_SUCCESS;
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
+ length, status);
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct ecore_sp_vport_update_params params;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct vfpf_update_mtu_tlv *p_req;
+ u8 status = PFVF_STATUS_SUCCESS;
+
+ /* Valiate PF can send such a request */
+ if (!p_vf->vport_instance) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing MTU update\n",
+ p_vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto send_status;
+ }
+
+ p_req = &mbx->req_virt->update_mtu;
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.mtu = p_req->mtu;
+ rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+send_status:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ CHANNEL_TLV_UPDATE_MTU,
+ sizeof(struct pfvf_def_resp_tlv),
+ status);
+ return rc;
+}
+
+void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type)
+{
+ struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
+ int len = 0;
+
+ do {
+ if (!p_tlv->length) {
+ DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
+ return OSAL_NULL;
+ }
+
+ if (p_tlv->type == req_type) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Extended tlv type %s, length %d found\n",
+ ecore_channel_tlvs_string[p_tlv->type],
+ p_tlv->length);
+ return p_tlv;
+ }
+
+ len += p_tlv->length;
+ p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
+
+ if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
+ DP_NOTICE(p_hwfn, true,
+ "TLVs has overrun the buffer size\n");
+ return OSAL_NULL;
+ }
+ } while (p_tlv->type != CHANNEL_TLV_LIST_END);
+
+ return OSAL_NULL;
+}
+
+static void
+ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+
+ p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_act_tlv)
+ return;
+
+ p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+ p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+ p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+ p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
+}
+
+static void
+ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_vf_info *p_vf,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+
+ p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_vlan_tlv)
+ return;
+
+ p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
+
+ /* Ignore the VF request if we're forcing a vlan */
+ if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+ p_data->update_inner_vlan_removal_flg = 1;
+ p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
+ }
+
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
+}
+
+static void
+ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+
+ p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_tx_switch_tlv)
+ return;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: Ignore tx-switching configuration originating"
+ " from VFs\n");
+ return;
+ }
+#endif
+
+ p_data->update_tx_switching_flg = 1;
+ p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
+}
+
+static void
+ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+
+ p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_mcast_tlv)
+ return;
+
+ p_data->update_approx_mcast_flg = 1;
+ OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
+}
+
+static void
+ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+
+ p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_accept_tlv)
+ return;
+
+ p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
+ p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
+ p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
+ p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
+}
+
+static void
+ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+
+ p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_accept_any_vlan)
+ return;
+
+ p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+ p_data->update_accept_any_vlan_flg =
+ p_accept_any_vlan->update_accept_any_vlan_flg;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
+}
+
+static void
+ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_rss_params *p_rss,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask, u16 *tlvs_accepted)
+{
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+ bool b_reject = false;
+ u16 table_size;
+ u16 i, q_idx;
+
+ p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_rss_tlv) {
+ p_data->rss_params = OSAL_NULL;
+ return;
+ }
+
+ OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
+
+ p_rss->update_rss_config =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CONFIG_FLAG);
+ p_rss->update_rss_capabilities =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CAPS_FLAG);
+ p_rss->update_rss_ind_table =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+ p_rss->update_rss_key =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_KEY_FLAG);
+
+ p_rss->rss_enable = p_rss_tlv->rss_enable;
+ p_rss->rss_eng_id = vf->rss_eng_id;
+ p_rss->rss_caps = p_rss_tlv->rss_caps;
+ p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+ OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
+ sizeof(p_rss->rss_key));
+
+ table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
+ (1 << p_rss_tlv->rss_table_size_log));
+
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_cid;
+
+ q_idx = p_rss_tlv->rss_ind_table[i];
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
+ p_rss->rss_ind_table[i] = p_cid;
+ }
+
+ p_data->rss_params = p_rss;
+out:
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+ if (!b_reject)
+ *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+}
+
+static void
+ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_sge_tpa_params *p_sge_tpa,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+
+ p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+ if (!p_sge_tpa_tlv) {
+ p_data->sge_tpa_params = OSAL_NULL;
+ return;
+ }
+
+ OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
+
+ p_sge_tpa->update_tpa_en_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
+ p_sge_tpa->update_tpa_param_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags &
+ VFPF_UPDATE_TPA_PARAM_FLAG);
+
+ p_sge_tpa->tpa_ipv4_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
+ p_sge_tpa->tpa_ipv6_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
+ p_sge_tpa->tpa_pkt_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
+ p_sge_tpa->tpa_hdr_data_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
+ p_sge_tpa->tpa_gro_consistent_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
+
+ p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
+ p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
+ p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
+ p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
+ p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
+
+ p_data->sge_tpa_params = p_sge_tpa;
+
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
+}
+
+static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_rss_params *p_rss_params = OSAL_NULL;
+ struct ecore_sp_vport_update_params params;
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct ecore_sge_tpa_params sge_tpa_params;
+ u16 tlvs_mask = 0, tlvs_accepted = 0;
+ u8 status = PFVF_STATUS_SUCCESS;
+ u16 length;
+ enum _ecore_status_t rc;
+
+ /* Valiate PF can send such a request */
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No VPORT instance available for VF[%d],"
+ " failing vport update\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
+ if (p_rss_params == OSAL_NULL) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.rss_params = OSAL_NULL;
+
+ /* Search for extended tlvs list and update values
+ * from VF in struct ecore_sp_vport_update_params.
+ */
+ ecore_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
+ ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_sge_tpa_param(p_hwfn, &params,
+ &sge_tpa_params, mbx, &tlvs_mask);
+
+ tlvs_accepted = tlvs_mask;
+
+ /* Some of the extended TLVs need to be validated first; In that case,
+ * they can update the mask without updating the accepted [so that
+ * PF could communicate to VF it has rejected request].
+ */
+ ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
+ mbx, &tlvs_mask, &tlvs_accepted);
+
+ /* Just log a message if there is no single extended tlv in buffer.
+ * When all features of vport update ramrod would be requested by VF
+ * as extended TLVs in buffer then an error can be returned in response
+ * if there is no extended TLV present in buffer.
+ */
+ if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
+ &params, &tlvs_accepted) !=
+ ECORE_SUCCESS) {
+ tlvs_accepted = 0;
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ if (!tlvs_accepted) {
+ if (tlvs_mask)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Upper-layer prevents said VF"
+ " configuration\n");
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No feature tlvs found for vport update\n");
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
+ length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
+ tlvs_mask, tlvs_accepted);
+ ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_filter_ucast *p_params)
+{
+ int i;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == ECORE_FILTER_REMOVE) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ if (p_vf->shadow_config.vlans[i].used &&
+ p_vf->shadow_config.vlans[i].vid ==
+ p_params->vlan) {
+ p_vf->shadow_config.vlans[i].used = false;
+ break;
+ }
+ if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%d] - Tries to remove a non-existing"
+ " vlan\n",
+ p_vf->relative_vf_id);
+ return ECORE_INVAL;
+ }
+ } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
+ p_params->opcode == ECORE_FILTER_FLUSH) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ p_vf->shadow_config.vlans[i].used = false;
+ }
+
+ /* In forced mode, we're willing to remove entries - but we don't add
+ * new ones.
+ */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+ return ECORE_SUCCESS;
+
+ if (p_params->opcode == ECORE_FILTER_ADD ||
+ p_params->opcode == ECORE_FILTER_REPLACE) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ p_vf->shadow_config.vlans[i].used = true;
+ p_vf->shadow_config.vlans[i].vid = p_params->vlan;
+ break;
+ }
+
+ if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%d] - Tries to configure more than %d"
+ " vlan filters\n",
+ p_vf->relative_vf_id,
+ ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
+ return ECORE_INVAL;
+ }
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_filter_ucast *p_params)
+{
+ char empty_mac[ETH_ALEN];
+ int i;
+
+ OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
+
+ /* If we're in forced-mode, we don't allow any change */
+ /* TODO - this would change if we were ever to implement logic for
+ * removing a forced MAC altogether [in which case, like for vlans,
+ * we should be able to re-trace previous configuration.
+ */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+ return ECORE_SUCCESS;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == ECORE_FILTER_REMOVE) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
+ p_params->mac, ETH_ALEN)) {
+ OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
+ ETH_ALEN);
+ break;
+ }
+ }
+
+ if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "MAC isn't configured\n");
+ return ECORE_INVAL;
+ }
+ } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
+ p_params->opcode == ECORE_FILTER_FLUSH) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
+ OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
+ }
+
+ /* List the new MAC address */
+ if (p_params->opcode != ECORE_FILTER_ADD &&
+ p_params->opcode != ECORE_FILTER_REPLACE)
+ return ECORE_SUCCESS;
+
+ for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
+ empty_mac, ETH_ALEN)) {
+ OSAL_MEMCPY(p_vf->shadow_config.macs[i],
+ p_params->mac, ETH_ALEN);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Added MAC at %d entry in shadow\n", i);
+ break;
+ }
+ }
+
+ if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No available place for MAC\n");
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_filter_ucast *p_params)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (p_params->type == ECORE_FILTER_MAC) {
+ rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ if (p_params->type == ECORE_FILTER_VLAN)
+ rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
+
+ return rc;
+}
+
+static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_ucast_filter_tlv *req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct ecore_filter_ucast params;
+ enum _ecore_status_t rc;
+
+ /* Prepare the unicast filter params */
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_filter_ucast));
+ req = &mbx->req_virt->ucast_filter;
+ params.opcode = (enum ecore_filter_opcode)req->opcode;
+ params.type = (enum ecore_filter_ucast_type)req->type;
+
+ /* @@@TBD - We might need logic on HV side in determining this */
+ params.is_rx_filter = 1;
+ params.is_tx_filter = 1;
+ params.vport_to_remove_from = vf->vport_id;
+ params.vport_to_add_to = vf->vport_id;
+ OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
+ params.vlan = req->vlan;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
+ " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+ vf->abs_vf_id, params.opcode, params.type,
+ params.is_rx_filter ? "RX" : "",
+ params.is_tx_filter ? "TX" : "",
+ params.vport_to_add_to,
+ params.mac[0], params.mac[1], params.mac[2],
+ params.mac[3], params.mac[4], params.mac[5], params.vlan);
+
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No VPORT instance available for VF[%d],"
+ " failing ucast MAC configuration\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Update shadow copy of the VF configuration. In case shadow indicates
+ * the action should be blocked return success to VF to imitate the
+ * firmware behaviour in such case.
+ */
+ if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
+ ECORE_SUCCESS)
+ goto out;
+
+ /* Determine if the unicast filtering is acceptible by PF */
+ if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+ (params.type == ECORE_FILTER_VLAN ||
+ params.type == ECORE_FILTER_MAC_VLAN)) {
+ /* Once VLAN is forced or PVID is set, do not allow
+ * to add/replace any further VLANs.
+ */
+ if (params.opcode == ECORE_FILTER_ADD ||
+ params.opcode == ECORE_FILTER_REPLACE)
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+ (params.type == ECORE_FILTER_MAC ||
+ params.type == ECORE_FILTER_MAC_VLAN)) {
+ if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
+ (params.opcode != ECORE_FILTER_ADD &&
+ params.opcode != ECORE_FILTER_REPLACE))
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, &params);
+ if (rc == ECORE_EXISTS) {
+ goto out;
+ } else if (rc == ECORE_INVAL) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
+ ECORE_SPQ_MODE_CB, OSAL_NULL);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ int i;
+
+ /* Reset the SBs */
+ for (i = 0; i < vf->num_sbs; i++)
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, false);
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_SUCCESS);
+}
+
+static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+
+ /* Disable Interrupts for VF */
+ ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
+ length, status);
+}
+
+static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ ecore_iov_vf_cleanup(p_hwfn, p_vf);
+
+ if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
+ /* Stopping the VF */
+ rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
+ p_vf->opaque_fid);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
+ rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ p_vf->state = VF_STOPPED;
+ }
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
+ length, status);
+}
+
+static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_read_coal_resp_tlv *p_resp;
+ struct vfpf_read_coal_req_tlv *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct ecore_vf_queue *p_queue;
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 coal = 0, qid, i;
+ bool b_is_rx;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+ req = &mbx->req_virt->read_coal_req;
+
+ qid = req->qid;
+ b_is_rx = req->is_rx ? true : false;
+
+ if (b_is_rx) {
+ if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Invalid Rx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
+ rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
+ if (rc != ECORE_SUCCESS)
+ goto send_resp;
+ } else {
+ if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Invalid Tx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ p_queue = &p_vf->vf_queues[qid];
+ if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
+ (!p_queue->cids[i].b_is_tx))
+ continue;
+
+ p_cid = p_queue->cids[i].p_cid;
+
+ rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
+ p_cid, &coal);
+ if (rc != ECORE_SUCCESS)
+ goto send_resp;
+ break;
+ }
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+
+send_resp:
+ p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
+ sizeof(*p_resp));
+ p_resp->coal = coal;
+
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
+static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct vfpf_update_coalesce *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct ecore_queue_cid *p_cid;
+ u16 rx_coal, tx_coal;
+ u16 qid;
+ int i;
+
+ req = &mbx->req_virt->update_coalesce;
+
+ rx_coal = req->rx_coal;
+ tx_coal = req->tx_coal;
+ qid = req->qid;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ rx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ tx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+ vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+ if (rx_coal) {
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
+
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set rx queue = %d coalesce\n",
+ vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+ goto out;
+ }
+ vf->rx_coal = rx_coal;
+ }
+
+ /* TODO - in future, it might be possible to pass this in a per-cid
+ * granularity. For now, do this for all Tx queues.
+ */
+ if (tx_coal) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[i].b_is_tx)
+ continue;
+
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+ p_queue->cids[i].p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set tx queue coalesce\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+ }
+ vf->tx_coal = tx_coal;
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+enum _ecore_status_t
+ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ u16 vf_id, u16 qid)
+{
+ struct ecore_queue_cid *p_cid;
+ struct ecore_vf_info *vf;
+ struct ecore_ptt *p_ptt;
+ int i, rc = 0;
+
+ if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
+ DP_NOTICE(p_hwfn, true,
+ "VF[%d] - Can not set coalescing: VF is not active\n",
+ vf_id);
+ return ECORE_INVAL;
+ }
+
+ vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ rx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ tx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+ vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+ if (rx_coal) {
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
+
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set rx queue = %d coalesce\n",
+ vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+ goto out;
+ }
+ vf->rx_coal = rx_coal;
+ }
+
+ /* TODO - in future, it might be possible to pass this in a per-cid
+ * granularity. For now, do this for all Tx queues.
+ */
+ if (tx_coal) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[i].b_is_tx)
+ continue;
+
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+ p_queue->cids[i].p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set tx queue coalesce\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+ }
+ vf->tx_coal = tx_coal;
+ }
+
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
+{
+ int cnt;
+ u32 val;
+
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
+
+ for (cnt = 0; cnt < 50; cnt++) {
+ val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
+ if (!val)
+ break;
+ OSAL_MSLEEP(20);
+ }
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn,
+ "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
+ p_vf->abs_vf_id, val);
+ return ECORE_TIMEOUT;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
+{
+ u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
+ int i, cnt;
+
+ /* Read initial consumers & producers */
+ for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
+ u32 prod;
+
+ cons[i] = ecore_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ prod = ecore_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
+ i * 0x40);
+ distance[i] = prod - cons[i];
+ }
+
+ /* Wait for consumers to pass the producers */
+ i = 0;
+ for (cnt = 0; cnt < 50; cnt++) {
+ for (; i < MAX_NUM_VOQS_E4; i++) {
+ u32 tmp;
+
+ tmp = ecore_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ if (distance[i] > tmp - cons[i])
+ break;
+ }
+
+ if (i == MAX_NUM_VOQS_E4)
+ break;
+
+ OSAL_MSLEEP(20);
+ }
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
+ p_vf->abs_vf_id, i);
+ return ECORE_TIMEOUT;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_ptt *p_ptt)
+{
+ enum _ecore_status_t rc;
+
+ /* TODO - add SRC and TM polling once we add storage IOV */
+
+ rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id, u32 *ack_vfs)
+{
+ struct ecore_vf_info *p_vf;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!p_vf)
+ return ECORE_SUCCESS;
+
+ if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+ (1ULL << (rel_vf_id % 64))) {
+ u16 vfid = p_vf->abs_vf_id;
+
+ /* TODO - should we lock channel? */
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Handling FLR\n", vfid);
+
+ ecore_iov_vf_cleanup(p_hwfn, p_vf);
+
+ /* If VF isn't active, no need for anything but SW */
+ if (!p_vf->b_init)
+ goto cleanup;
+
+ /* TODO - what to do in case of failure? */
+ rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ goto cleanup;
+
+ rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
+ if (rc) {
+ /* TODO - what's now? What a mess.... */
+ DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
+ return rc;
+ }
+
+ /* Workaround to make VF-PF channel ready, as FW
+ * doesn't do that as a part of FLR.
+ */
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
+
+ /* VF_STOPPED has to be set only after final cleanup
+ * but prior to re-enabling the VF.
+ */
+ p_vf->state = VF_STOPPED;
+
+ rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
+ if (rc) {
+ /* TODO - again, a mess... */
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ vfid);
+ return rc;
+ }
+cleanup:
+ /* Mark VF for ack and clean pending state */
+ if (p_vf->state == VF_RESET)
+ p_vf->state = VF_STOPPED;
+ ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+ p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ p_vf->vf_mbx.b_pending_msg = false;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 ack_vfs[VF_MAX_STATIC / 32];
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 i;
+
+ OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+ /* Since BRB <-> PRS interface can't be tested as part of the flr
+ * polling due to HW limitations, simply sleep a bit. And since
+ * there's no need to wait per-vf, do it before looping.
+ */
+ OSAL_MSLEEP(100);
+
+ for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
+ ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
+
+ rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 rel_vf_id)
+{
+ u32 ack_vfs[VF_MAX_STATIC / 32];
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+ /* Wait instead of polling the BRB <-> PRS interface */
+ OSAL_MSLEEP(100);
+
+ ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
+
+ rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+ return rc;
+}
+
+bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
+{
+ bool found = false;
+ u16 i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "[%08x,...,%08x]: %08x\n",
+ i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
+
+ if (!p_hwfn->p_dev->p_iov_info) {
+ DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
+ return false;
+ }
+
+ /* Mark VFs */
+ for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
+ struct ecore_vf_info *p_vf;
+ u8 vfid;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
+ if (!p_vf)
+ continue;
+
+ vfid = p_vf->abs_vf_id;
+ if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+ u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
+ u16 rel_vf_id = p_vf->relative_vf_id;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] [rel %d] got FLR-ed\n",
+ vfid, rel_vf_id);
+
+ p_vf->state = VF_RESET;
+
+ /* No need to lock here, since pending_flr should
+ * only change here and before ACKing MFw. Since
+ * MFW will not trigger an additional attention for
+ * VF flr until ACKs, we're safe.
+ */
+ p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
+ found = true;
+ }
+ }
+
+ return found;
+}
+
+void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *p_params,
+ struct ecore_mcp_link_state *p_link,
+ struct ecore_mcp_link_capabilities *p_caps)
+{
+ struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+ struct ecore_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ if (p_params)
+ __ecore_vf_get_link_params(p_params, p_bulletin);
+ if (p_link)
+ __ecore_vf_get_link_state(p_link, p_bulletin);
+ if (p_caps)
+ __ecore_vf_get_link_caps(p_caps, p_bulletin);
+}
+
+void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, int vfid)
+{
+ struct ecore_iov_vf_mbx *mbx;
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf)
+ return;
+
+ mbx = &p_vf->vf_mbx;
+
+ /* ecore_iov_process_mbx_request */
+#ifndef CONFIG_ECORE_SW_CHANNEL
+ if (!mbx->b_pending_msg) {
+ DP_NOTICE(p_hwfn, true,
+ "VF[%02x]: Trying to process mailbox message when none is pending\n",
+ p_vf->abs_vf_id);
+ return;
+ }
+ mbx->b_pending_msg = false;
+#endif
+
+ mbx->first_tlv = mbx->req_virt->first_tlv;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%02x]: Processing mailbox message [type %04x]\n",
+ p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+
+ OSAL_IOV_VF_MSG_TYPE(p_hwfn,
+ p_vf->relative_vf_id,
+ mbx->first_tlv.tl.type);
+
+ /* Lock the per vf op mutex and note the locker's identity.
+ * The unlock will take place in mbx response.
+ */
+ ecore_iov_lock_vf_pf_channel(p_hwfn,
+ p_vf, mbx->first_tlv.tl.type);
+
+ /* check if tlv type is known */
+ if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
+ !p_vf->b_malicious) {
+ /* switch on the opcode */
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_START:
+ ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_TEARDOWN:
+ ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_RXQ:
+ ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_TXQ:
+ ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_RXQS:
+ ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_TXQS:
+ ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_RXQ:
+ ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_UPDATE:
+ ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UCAST_FILTER:
+ ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_CLOSE:
+ ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_INT_CLEANUP:
+ ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_RELEASE:
+ ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_TUNN_PARAM:
+ ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_COALESCE_UPDATE:
+ ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_COALESCE_READ:
+ ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_MTU:
+ ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf);
+ break;
+ }
+ } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ /* If we've received a message from a VF we consider malicious
+ * we ignore the messasge unless it's one for RELEASE, in which
+ * case we'll let it have the benefit of doubt, allowing the
+ * next loaded driver to start again.
+ */
+ if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
+ /* TODO - initiate FLR, remove malicious indication */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
+ p_vf->abs_vf_id);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
+ p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+ }
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_MALICIOUS);
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ DP_NOTICE(p_hwfn, false,
+ "VF[%02x]: unknown TLV. type %04x length %04x"
+ " padding %08x reply address %lu\n",
+ p_vf->abs_vf_id,
+ mbx->first_tlv.tl.type,
+ mbx->first_tlv.tl.length,
+ mbx->first_tlv.padding,
+ (unsigned long)mbx->first_tlv.reply_address);
+
+ /* Try replying in case reply address matches the acquisition's
+ * posted address.
+ */
+ if (p_vf->acquire.first_tlv.reply_address &&
+ (mbx->first_tlv.reply_address ==
+ p_vf->acquire.first_tlv.reply_address))
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_NOT_SUPPORTED);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%02x]: Can't respond to TLV -"
+ " no valid reply address\n",
+ p_vf->abs_vf_id);
+ }
+
+ ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
+ mbx->first_tlv.tl.type);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
+ mbx->sw_mbx.response_offset = 0;
+#endif
+}
+
+void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
+ u64 *events)
+{
+ int i;
+
+ OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+
+ ecore_for_each_vf(p_hwfn, i) {
+ struct ecore_vf_info *p_vf;
+
+ p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
+ if (p_vf->vf_mbx.b_pending_msg)
+ events[i / 64] |= 1ULL << (i % 64);
+ }
+}
+
+static struct ecore_vf_info *
+ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
+{
+ u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
+
+ if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Got indication for VF [abs 0x%08x] that cannot be"
+ " handled by PF\n",
+ abs_vfid);
+ return OSAL_NULL;
+ }
+
+ return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+}
+
+static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
+ u16 abs_vfid,
+ struct regpair *vf_msg)
+{
+ struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
+ abs_vfid);
+
+ if (!p_vf)
+ return ECORE_SUCCESS;
+
+ /* List the physical address of the request so that handler
+ * could later on copy the message from it.
+ */
+ p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+
+ p_vf->vf_mbx.b_pending_msg = true;
+
+ return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
+}
+
+static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
+ struct malicious_vf_eqe_data *p_data)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
+
+ if (!p_vf)
+ return;
+
+ if (!p_vf->b_malicious) {
+ DP_NOTICE(p_hwfn, false,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
+
+ p_vf->b_malicious = true;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
+ }
+
+ OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
+}
+
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 OSAL_UNUSED fw_return_code)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
+ &data->vf_pf_channel.msg_addr);
+ case COMMON_EVENT_VF_FLR:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF-FLR is still not supported\n");
+ return ECORE_SUCCESS;
+ case COMMON_EVENT_MALICIOUS_VF:
+ ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
+ return ECORE_SUCCESS;
+ default:
+ DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
+ opcode);
+ return ECORE_INVAL;
+ }
+}
+
+bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+ (1ULL << (rel_vf_id % 64)));
+}
+
+u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+ u16 i;
+
+ if (!p_iov)
+ goto out;
+
+ for (i = rel_vf_id; i < p_iov->total_vfs; i++)
+ if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
+ return i;
+
+out:
+ return MAX_NUM_VFS_E4;
+}
+
+enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *ptt, int vfid)
+{
+ struct ecore_dmae_params params;
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return ECORE_INVAL;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+ params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
+ params.src_vfid = vf_info->abs_vf_id;
+
+ if (ecore_dmae_host2host(p_hwfn, ptt,
+ vf_info->vf_mbx.pending_req,
+ vf_info->vf_mbx.req_phys,
+ sizeof(union vfpf_tlvs) / 4, &params)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Failed to copy message from VF 0x%02x\n", vfid);
+
+ return ECORE_IO;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
+ u8 *mac, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set forced MAC to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ feature = 1 << VFPF_BULLETIN_MAC_ADDR;
+ else
+ feature = 1 << MAC_ADDR_FORCED;
+
+ OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << VFPF_BULLETIN_MAC_ADDR);
+
+ ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
+ u8 *mac, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set MAC, invalid vfid [%d]\n", vfid);
+ return ECORE_INVAL;
+ }
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set MAC to malicious VF [%d]\n",
+ vfid);
+ return ECORE_INVAL;
+ }
+
+ if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Can not set MAC, Forced MAC is configured\n");
+ return ECORE_INVAL;
+ }
+
+ feature = 1 << VFPF_BULLETIN_MAC_ADDR;
+ OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+
+ return ECORE_SUCCESS;
+}
+
+#ifndef LINUX_REMOVE
+enum _ecore_status_t
+ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
+ bool b_untagged_only, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set untagged default, invalid vfid [%d]\n",
+ vfid);
+ return ECORE_INVAL;
+ }
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set untagged default to malicious VF [%d]\n",
+ vfid);
+ return ECORE_INVAL;
+ }
+
+ /* Since this is configurable only during vport-start, don't take it
+ * if we're past that point.
+ */
+ if (vf_info->state == VF_ENABLED) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Can't support untagged change for vfid[%d] -"
+ " VF is already active\n",
+ vfid);
+ return ECORE_INVAL;
+ }
+
+ /* Set configuration; This will later be taken into account during the
+ * VF initialization.
+ */
+ feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
+ (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+
+ vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
+ : 0;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
+ u16 *opaque_fid)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return;
+
+ *opaque_fid = vf_info->opaque_fid;
+}
+#endif
+
+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 pvid, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set forced MAC, invalid vfid [%d]\n",
+ vfid);
+ return;
+ }
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set forced vlan to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ feature = 1 << VLAN_ADDR_FORCED;
+ vf_info->bulletin.p_virt->pvid = pvid;
+ if (pvid)
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ else
+ vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+ ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set udp ports, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Can not set udp ports to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
+ vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
+}
+
+bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ struct ecore_vf_info *p_vf_info;
+
+ p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf_info)
+ return false;
+
+ return !!p_vf_info->vport_instance;
+}
+
+bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ struct ecore_vf_info *p_vf_info;
+
+ p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf_info)
+ return true;
+
+ return p_vf_info->state == VF_STOPPED;
+}
+
+bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return false;
+
+ return vf_info->spoof_chk;
+}
+
+enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
+ int vfid, bool val)
+{
+ struct ecore_vf_info *vf;
+ enum _ecore_status_t rc = ECORE_INVAL;
+
+ if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn, true,
+ "SR-IOV sanity check failed, can't set spoofchk\n");
+ goto out;
+ }
+
+ vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf)
+ goto out;
+
+ if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
+ /* After VF VPORT start PF will configure spoof check */
+ vf->req_spoofchk_val = val;
+ rc = ECORE_SUCCESS;
+ goto out;
+ }
+
+ rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
+
+out:
+ return rc;
+}
+
+u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
+{
+ u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
+
+ max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
+ : ECORE_MAX_VF_CHAINS_PER_PF;
+
+ return max_chains_per_vf;
+}
+
+void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ void **pp_req_virt_addr,
+ u16 *p_req_virt_size)
+{
+ struct ecore_vf_info *vf_info =
+ ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+
+ if (!vf_info)
+ return;
+
+ if (pp_req_virt_addr)
+ *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
+
+ if (p_req_virt_size)
+ *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
+}
+
+void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ void **pp_reply_virt_addr,
+ u16 *p_reply_virt_size)
+{
+ struct ecore_vf_info *vf_info =
+ ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+
+ if (!vf_info)
+ return;
+
+ if (pp_reply_virt_addr)
+ *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
+
+ if (p_reply_virt_size)
+ *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
+}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *vf_info =
+ ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+
+ if (!vf_info)
+ return OSAL_NULL;
+
+ return &vf_info->vf_mbx.sw_mbx;
+}
+#endif
+
+bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
+{
+ return (length >= sizeof(struct vfpf_first_tlv) &&
+ (length <= sizeof(union vfpf_tlvs)));
+}
+
+u32 ecore_iov_pfvf_msg_length(void)
+{
+ return sizeof(union pfvf_tlvs);
+}
+
+u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return OSAL_NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap &
+ (1 << VFPF_BULLETIN_MAC_ADDR)))
+ return OSAL_NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
+u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return OSAL_NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return OSAL_NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
+u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return 0;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ return 0;
+
+ return p_vf->bulletin.p_virt->pvid;
+}
+
+enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid, int val)
+{
+ struct ecore_mcp_link_state *p_link;
+ struct ecore_vf_info *vf;
+ u8 abs_vp_id = 0;
+ enum _ecore_status_t rc;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+
+ if (!vf)
+ return ECORE_INVAL;
+
+ rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+ return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+ p_link->speed);
+}
+
+enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid,
+ struct ecore_eth_stats *p_stats)
+{
+ struct ecore_vf_info *vf;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf)
+ return ECORE_INVAL;
+
+ if (vf->state != VF_ENABLED)
+ return ECORE_INVAL;
+
+ __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
+ vf->abs_vf_id + 0x10, false);
+
+ return ECORE_SUCCESS;
+}
+
+u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return 0;
+
+ return p_vf->num_rxqs;
+}
+
+u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return 0;
+
+ return p_vf->num_active_rxqs;
+}
+
+void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return OSAL_NULL;
+
+ return p_vf->ctx;
+}
+
+u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return 0;
+
+ return p_vf->num_sbs;
+}
+
+bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state == VF_FREE);
+}
+
+bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state == VF_ACQUIRED);
+}
+
+bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state == VF_ENABLED);
+}
+
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
+}
+
+enum _ecore_status_t
+ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ struct ecore_wfq_data *vf_vp_wfq;
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return 0;
+
+ vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
+
+ if (vf_vp_wfq->configured)
+ return vf_vp_wfq->min_speed;
+ else
+ return 0;
+}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_is_hw)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return;
+
+ vf_info->b_hw_channel = b_is_hw;
+}
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.h
new file mode 100644
index 00000000..50c7d2c9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.h
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_SRIOV_H__
+#define __ECORE_SRIOV_H__
+
+#include "ecore_status.h"
+#include "ecore_vfpf_if.h"
+#include "ecore_iov_api.h"
+#include "ecore_hsi_common.h"
+#include "ecore_l2.h"
+
+#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
+ (MAX_NUM_VFS_E4 * ECORE_ETH_VF_NUM_VLAN_FILTERS)
+
+/* Represents a full message. Both the request filled by VF
+ * and the response filled by the PF. The VF needs one copy
+ * of this message, it fills the request part and sends it to
+ * the PF. The PF will copy the response to the response part for
+ * the VF to later read it. The PF needs to hold a message like this
+ * per VF, the request that is copied to the PF is placed in the
+ * request size, and the response is filled by the PF before sending
+ * it to the VF.
+ */
+struct ecore_vf_mbx_msg {
+ union vfpf_tlvs req;
+ union pfvf_tlvs resp;
+};
+
+/* This mailbox is maintained per VF in its PF
+ * contains all information required for sending / receiving
+ * a message
+ */
+struct ecore_iov_vf_mbx {
+ union vfpf_tlvs *req_virt;
+ dma_addr_t req_phys;
+ union pfvf_tlvs *reply_virt;
+ dma_addr_t reply_phys;
+
+ /* Address in VF where a pending message is located */
+ dma_addr_t pending_req;
+
+ /* Message from VF awaits handling */
+ bool b_pending_msg;
+
+ u8 *offset;
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ struct ecore_iov_sw_mbx sw_mbx;
+#endif
+
+ /* VF GPA address */
+ u32 vf_addr_lo;
+ u32 vf_addr_hi;
+
+ struct vfpf_first_tlv first_tlv; /* saved VF request header */
+
+ u8 flags;
+#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
+ * more then one pending msg
+ */
+};
+
+#define ECORE_IOV_LEGACY_QID_RX (0)
+#define ECORE_IOV_LEGACY_QID_TX (1)
+#define ECORE_IOV_QID_INVALID (0xFE)
+
+struct ecore_vf_queue_cid {
+ bool b_is_tx;
+ struct ecore_queue_cid *p_cid;
+};
+
+/* Describes a qzone associated with the VF */
+struct ecore_vf_queue {
+ /* Input from upper-layer, mapping relateive queue to queue-zone */
+ u16 fw_rx_qid;
+ u16 fw_tx_qid;
+
+ struct ecore_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
+};
+
+enum vf_state {
+ VF_FREE = 0, /* VF ready to be acquired holds no resc */
+ VF_ACQUIRED = 1, /* VF, acquired, but not initalized */
+ VF_ENABLED = 2, /* VF, Enabled */
+ VF_RESET = 3, /* VF, FLR'd, pending cleanup */
+ VF_STOPPED = 4 /* VF, Stopped */
+};
+
+struct ecore_vf_vlan_shadow {
+ bool used;
+ u16 vid;
+};
+
+struct ecore_vf_shadow_config {
+ /* Shadow copy of all guest vlans */
+ struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1];
+
+ /* Shadow copy of all configured MACs; Empty if forcing MACs */
+ u8 macs[ECORE_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
+ u8 inner_vlan_removal;
+};
+
+/* PFs maintain an array of this structure, per VF */
+struct ecore_vf_info {
+ struct ecore_iov_vf_mbx vf_mbx;
+ enum vf_state state;
+ bool b_init;
+ bool b_malicious;
+ u8 to_disable;
+
+ struct ecore_bulletin bulletin;
+ dma_addr_t vf_bulletin;
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ /* Determine whether PF communicate with VF using HW/SW channel */
+ bool b_hw_channel;
+#endif
+
+ /* PF saves a copy of the last VF acquire message */
+ struct vfpf_acquire_tlv acquire;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 rss_eng_id;
+ u8 relative_vf_id;
+ u8 abs_vf_id;
+#define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \
+ (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
+ (p_vf)->abs_vf_id)
+
+ u8 vport_instance; /* Number of active vports */
+ u8 num_rxqs;
+ u8 num_txqs;
+
+ u16 rx_coal;
+ u16 tx_coal;
+
+ u8 num_sbs;
+
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+
+ struct ecore_vf_queue vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
+
+ /* TODO - Only windows is using it - should be removed */
+ u8 was_malicious;
+ u8 num_active_rxqs;
+ void *ctx;
+ struct ecore_public_vf_info p_vf_info;
+ bool spoof_chk; /* Current configured on HW */
+ bool req_spoofchk_val; /* Requested value */
+
+ /* Stores the configuration requested by VF */
+ struct ecore_vf_shadow_config shadow_config;
+
+ /* A bitfield using bulletin's valid-map bits, used to indicate
+ * which of the bulletin board features have been configured.
+ */
+ u64 configured_features;
+#define ECORE_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
+ (1 << VLAN_ADDR_FORCED))
+};
+
+/* This structure is part of ecore_hwfn and used only for PFs that have sriov
+ * capability enabled.
+ */
+struct ecore_pf_iov {
+ struct ecore_vf_info vfs_array[MAX_NUM_VFS_E4];
+ u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
+
+#ifndef REMOVE_DBG
+ /* This doesn't serve anything functionally, but it makes windows
+ * debugging of IOV related issues easier.
+ */
+ u64 active_vfs[ECORE_VF_ARRAY_LENGTH];
+#endif
+
+ /* Allocate message address continuosuly and split to each VF */
+ void *mbx_msg_virt_addr;
+ dma_addr_t mbx_msg_phys_addr;
+ u32 mbx_msg_size;
+ void *mbx_reply_virt_addr;
+ dma_addr_t mbx_reply_phys_addr;
+ u32 mbx_reply_size;
+ void *p_bulletins;
+ dma_addr_t bulletins_phys;
+ u32 bulletins_size;
+};
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief Read sriov related information and allocated resources
+ * reads from configuraiton space, shmem, etc.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
+ *
+ * @param offset
+ * @param type
+ * @param length
+ *
+ * @return pointer to the newly placed tlv
+ */
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length);
+
+/**
+ * @brief list the types and lengths of the tlvs on the buffer
+ *
+ * @param p_hwfn
+ * @param tlvs_list
+ */
+void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
+ void *tlvs_list);
+
+/**
+ * @brief ecore_iov_alloc - allocate sriov related resources
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_iov_setup - setup sriov related resources
+ *
+ * @param p_hwfn
+ */
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_iov_free - free sriov related resources
+ *
+ * @param p_hwfn
+ */
+void ecore_iov_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief free sriov related memory that was allocated during hw_prepare
+ *
+ * @param p_dev
+ */
+void ecore_iov_free_hw_info(struct ecore_dev *p_dev);
+
+/**
+ * @brief Mark structs of vfs that have been FLR-ed.
+ *
+ * @param p_hwfn
+ * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ *
+ * @return true iff one of the PF's vfs got FLRed. false otherwise.
+ */
+bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
+ u32 *disabled_vfs);
+
+/**
+ * @brief Search extended TLVs in request/reply buffer.
+ *
+ * @param p_hwfn
+ * @param p_tlvs_list - Pointer to tlvs list
+ * @param req_type - Type of TLV
+ *
+ * @return pointer to tlv type if found, otherwise returns NULL.
+ */
+void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type);
+
+/**
+ * @brief ecore_iov_get_vf_info - return the database of a
+ * specific VF
+ *
+ * @param p_hwfn
+ * @param relative_vf_id - relative id of the VF for which info
+ * is requested
+ * @param b_enabled_only - false iff want to access even if vf is disabled
+ *
+ * @return struct ecore_vf_info*
+ */
+struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only);
+#endif
+#endif /* __ECORE_SRIOV_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_status.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_status.h
new file mode 100644
index 00000000..b893f1d4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_status.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_STATUS_H__
+#define __ECORE_STATUS_H__
+
+enum _ecore_status_t {
+ ECORE_CONN_RESET = -13,
+ ECORE_UNKNOWN_ERROR = -12,
+ ECORE_NORESOURCES = -11,
+ ECORE_NODEV = -10,
+ ECORE_ABORTED = -9,
+ ECORE_AGAIN = -8,
+ ECORE_NOTIMPL = -7,
+ ECORE_EXISTS = -6,
+ ECORE_IO = -5,
+ ECORE_TIMEOUT = -4,
+ ECORE_INVAL = -3,
+ ECORE_BUSY = -2,
+ ECORE_NOMEM = -1,
+ ECORE_SUCCESS = 0,
+ /* PENDING is not an error and should be positive */
+ ECORE_PENDING = 1,
+};
+
+#endif /* __ECORE_STATUS_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_utils.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_utils.h
new file mode 100644
index 00000000..249136b0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_utils.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_UTILS_H__
+#define __ECORE_UTILS_H__
+
+/* dma_addr_t manip */
+/* Suppress "right shift count >= width of type" warning when that quantity is
+ * 32-bits rquires the >> 16) >> 16)
+ */
+#define PTR_LO(x) ((u32)(((osal_uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x) ((u32)((((osal_uintptr_t)(x)) >> 16) >> 16))
+
+#define DMA_LO(x) ((u32)(((dma_addr_t)(x)) & 0xffffffff))
+#define DMA_HI(x) ((u32)(((dma_addr_t)(x)) >> 32))
+
+#define DMA_LO_LE(x) OSAL_CPU_TO_LE32(DMA_LO(x))
+#define DMA_HI_LE(x) OSAL_CPU_TO_LE32(DMA_HI(x))
+
+/* It's assumed that whoever includes this has previously included an hsi
+ * file defining the regpair.
+ */
+#define DMA_REGPAIR_LE(x, val) (x).hi = DMA_HI_LE((val)); \
+ (x).lo = DMA_LO_LE((val))
+
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
+#define HILO_64(hi, lo) HILO_GEN(hi, lo, u64)
+#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
+#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_vf.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_vf.c
new file mode 100644
index 00000000..d2213f79
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_vf.c
@@ -0,0 +1,1920 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_sriov.h"
+#include "ecore_l2_api.h"
+#include "ecore_vf.h"
+#include "ecore_vfpf_if.h"
+#include "ecore_status.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_l2.h"
+#include "ecore_mcp_api.h"
+#include "ecore_vf_api.h"
+
+static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ void *p_tlv;
+
+ /* This lock is released when we receive PF's response
+ * in ecore_send_msg2pf().
+ * So, ecore_vf_pf_prep() and ecore_send_msg2pf()
+ * must come in sequence.
+ */
+ OSAL_MUTEX_ACQUIRE(&p_iov->mutex);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "preparing to send %s tlv over vf pf channel\n",
+ ecore_channel_tlvs_string[type]);
+
+ /* Reset Request offset */
+ p_iov->offset = (u8 *)(p_iov->vf2pf_request);
+
+ /* Clear mailbox - both request and reply */
+ OSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
+ OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
+ /* Init type and length */
+ p_tlv = ecore_add_tlv(&p_iov->offset, type, length);
+
+ /* Init first tlv header */
+ ((struct vfpf_first_tlv *)p_tlv)->reply_address =
+ (u64)p_iov->pf2vf_reply_phys;
+
+ return p_tlv;
+}
+
+static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
+ enum _ecore_status_t req_status)
+{
+ union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF request status = 0x%x, PF reply status = 0x%x\n",
+ req_status, resp->default_resp.hdr.status);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
+}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/* The SW channel implementation of Windows needs to know the 'exact'
+ * response size of any given message. That means that for future
+ * messages we'd be unable to send TLVs to PF if he'll be unable to
+ * answer them if the |response| != |default response|.
+ * We'd need to handshake in acquire capabilities for any such.
+ */
+#endif
+static enum _ecore_status_t
+ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
+ u8 *done, u32 resp_size)
+{
+ union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
+ struct ustorm_trigger_vf_zone trigger;
+ struct ustorm_vf_zone *zone_data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int time = 100;
+
+ zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
+
+ /* output tlvs list */
+ ecore_dp_tlv_list(p_hwfn, p_req);
+
+ /* need to add the END TLV to the message size */
+ resp_size += sizeof(struct channel_list_end_tlv);
+
+ /* Send TLVs over HW channel */
+ OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
+ trigger.vf_pf_msg_valid = 1;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF -> PF [%02x] message: [%08x, %08x] --> %p,"
+ " %08x --> %p\n",
+ GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID),
+ U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ &zone_data->non_trigger.vf_pf_msg_addr,
+ *((u32 *)&trigger), &zone_data->trigger);
+
+ REG_WR(p_hwfn,
+ (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
+ U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ REG_WR(p_hwfn,
+ (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
+ U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ /* The message data must be written first, to prevent trigger before
+ * data is written.
+ */
+ OSAL_WMB(p_hwfn->p_dev);
+
+ REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger,
+ *((u32 *)&trigger));
+
+ /* When PF would be done with the response, it would write back to the
+ * `done' address. Poll until then.
+ */
+ while ((!*done) && time) {
+ OSAL_MSLEEP(25);
+ time--;
+ }
+
+ if (!*done) {
+ DP_NOTICE(p_hwfn, true,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
+ rc = ECORE_TIMEOUT;
+ } else {
+ if ((*done != PFVF_STATUS_SUCCESS) &&
+ (*done != PFVF_STATUS_NO_RESOURCE))
+ DP_NOTICE(p_hwfn, false,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ }
+
+ return rc;
+}
+
+static void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Only add QIDs for the queue if it was negotiated with PF */
+ if (!(p_iov->acquire_resp.pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ p_qid_tlv = ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
+ p_qid_tlv->qid = p_cid->qid_usage_idx;
+}
+
+enum _ecore_status_t _ecore_vf_pf_release(struct ecore_hwfn *p_hwfn,
+ bool b_final)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ u32 size;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
+ rc = ECORE_AGAIN;
+
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ if (!b_final)
+ return rc;
+
+ p_hwfn->b_int_enabled = 0;
+
+ if (p_iov->vf2pf_request)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys,
+ sizeof(union vfpf_tlvs));
+ if (p_iov->pf2vf_reply)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->pf2vf_reply,
+ p_iov->pf2vf_reply_phys,
+ sizeof(union pfvf_tlvs));
+
+ if (p_iov->bulletin.p_virt) {
+ size = sizeof(struct ecore_bulletin_content);
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->bulletin.p_virt,
+ p_iov->bulletin.phys,
+ size);
+ }
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_MUTEX_DEALLOC(&p_iov->mutex);
+#endif
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
+ p_hwfn->vf_iov_info = OSAL_NULL;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
+{
+ return _ecore_vf_pf_release(p_hwfn, true);
+}
+
+#define VF_ACQUIRE_THRESH 3
+static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
+ p_req->num_rxqs, p_resp->num_rxqs,
+ p_req->num_rxqs, p_resp->num_txqs,
+ p_req->num_sbs, p_resp->num_sbs,
+ p_req->num_mac_filters, p_resp->num_mac_filters,
+ p_req->num_vlan_filters, p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters,
+ p_req->num_cids, p_resp->num_cids);
+
+ /* humble our request */
+ p_req->num_txqs = p_resp->num_txqs;
+ p_req->num_rxqs = p_resp->num_rxqs;
+ p_req->num_sbs = p_resp->num_sbs;
+ p_req->num_mac_filters = p_resp->num_mac_filters;
+ p_req->num_vlan_filters = p_resp->num_vlan_filters;
+ p_req->num_mc_filters = p_resp->num_mc_filters;
+ p_req->num_cids = p_resp->num_cids;
+}
+
+static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct ecore_vf_acquire_sw_info vf_sw_info;
+ struct vf_pf_resc_request *p_resc;
+ bool resources_acquired = false;
+ struct vfpf_acquire_tlv *req;
+ int attempts = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+ p_resc = &req->resc_request;
+
+ /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */
+ req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
+ p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
+ p_resc->num_cids = ECORE_ETH_VF_DEFAULT_NUM_CIDS;
+
+ OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
+ OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
+
+ req->vfdev_info.os_type = vf_sw_info.os_type;
+ req->vfdev_info.driver_version = vf_sw_info.driver_version;
+ req->vfdev_info.fw_major = FW_MAJOR_VERSION;
+ req->vfdev_info.fw_minor = FW_MINOR_VERSION;
+ req->vfdev_info.fw_revision = FW_REVISION_VERSION;
+ req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+ req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
+
+ /* Fill capability field with any non-deprecated config we support */
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+
+ /* If we've mapped the doorbell bar, try using queue qids */
+ if (p_iov->b_doorbell_bar)
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = p_iov->bulletin.phys;
+ req->bulletin_size = p_iov->bulletin.size;
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ while (!resources_acquired) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "attempting to acquire resources\n");
+
+ /* Clear response buffer, as this might be a re-send */
+ OSAL_MEMSET(p_iov->pf2vf_reply, 0,
+ sizeof(union pfvf_tlvs));
+
+ /* send acquire request */
+ rc = ecore_send_msg2pf(p_hwfn,
+ &resp->hdr.status, sizeof(*resp));
+ if (rc != ECORE_SUCCESS)
+ goto exit;
+
+ /* copy acquire response from buffer to p_hwfn */
+ OSAL_MEMCPY(&p_iov->acquire_resp,
+ resp, sizeof(p_iov->acquire_resp));
+
+ attempts++;
+
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF agrees to allocate our resources */
+ if (!(resp->pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
+ /* It's possible legacy PF mistakenly accepted;
+ * but we don't care - simply mark it as
+ * legacy and continue.
+ */
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "resources acquired\n");
+ resources_acquired = true;
+ } /* PF refuses to allocate our resources */
+ else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
+ &resp->resc);
+
+ } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
+ if (pfdev_info->major_fp_hsi &&
+ (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+ DP_NOTICE(p_hwfn, false,
+ "PF uses an incompatible fastpath HSI"
+ " %02x.%02x [VF requires %02x.%02x]."
+ " Please change to a VF driver using"
+ " %02x.xx.\n",
+ pfdev_info->major_fp_hsi,
+ pfdev_info->minor_fp_hsi,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR,
+ pfdev_info->major_fp_hsi);
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+ if (!pfdev_info->major_fp_hsi) {
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ DP_NOTICE(p_hwfn, false,
+ "PF uses very old drivers."
+ " Please change to a VF"
+ " driver using no later than"
+ " 8.8.x.x.\n");
+ rc = ECORE_INVAL;
+ goto exit;
+ } else {
+ DP_INFO(p_hwfn,
+ "PF is old - try re-acquire to"
+ " see if it supports FW-version"
+ " override\n");
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ continue;
+ }
+ }
+
+ /* If PF/VF are using same Major, PF must have had
+ * it's reasons. Simply fail.
+ */
+ DP_NOTICE(p_hwfn, false,
+ "PF rejected acquisition by VF\n");
+ rc = ECORE_INVAL;
+ goto exit;
+ } else {
+ DP_ERR(p_hwfn,
+ "PF returned err %d to VF acquisition request\n",
+ resp->hdr.status);
+ rc = ECORE_AGAIN;
+ goto exit;
+ }
+ }
+
+ /* Mark the PF as legacy, if needed */
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI)
+ p_iov->b_pre_fp_hsi = true;
+
+ /* In case PF doesn't support multi-queue Tx, update the number of
+ * CIDs to reflect the number of queues [older PFs didn't fill that
+ * field].
+ */
+ if (!(resp->pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+ resp->resc.num_cids = resp->resc.num_rxqs +
+ resp->resc.num_txqs;
+
+ rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "VF_UPDATE_ACQUIRE_RESC_RESP Failed:"
+ " status = 0x%x.\n",
+ rc);
+ rc = ECORE_AGAIN;
+ goto exit;
+ }
+
+ /* Update bulletin board size with response from PF */
+ p_iov->bulletin.size = resp->bulletin_size;
+
+ /* get HW info */
+ p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
+ p_hwfn->p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;
+
+ DP_INFO(p_hwfn, "Chip details - %s%d\n",
+ ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
+ CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
+
+ p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
+
+ /* Learn of the possibility of CMT */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
+ DP_INFO(p_hwfn, "100g VF\n");
+ p_hwfn->p_dev->num_hwfns = 2;
+ }
+ }
+
+ /* @DPDK */
+ if ((~p_iov->b_pre_fp_hsi &
+ ETH_HSI_VER_MINOR) &&
+ (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR))
+ DP_INFO(p_hwfn,
+ "PF is using older fastpath HSI;"
+ " %02x.%02x is configured\n",
+ ETH_HSI_VER_MAJOR,
+ resp->pfdev_info.minor_fp_hsi);
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
+ enum BAR_ID bar_id)
+{
+ u32 bar_size;
+
+ /* Regview size is fixed */
+ if (bar_id == BAR_ID_0)
+ return 1 << 17;
+
+ /* Doorbell is received from PF */
+ bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size;
+ if (bar_size)
+ return 1 << bar_size;
+ return 0;
+}
+
+enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev);
+ struct ecore_vf_iov *p_iov;
+ u32 reg;
+ enum _ecore_status_t rc;
+
+ /* Set number of hwfns - might be overridden once leading hwfn learns
+ * actual configuration from PF.
+ */
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->p_dev->num_hwfns = 1;
+
+ reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
+
+ reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
+
+ /* Allocate vf sriov info */
+ p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov));
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_sriov'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
+ * value, but there are several incompatibily scenarios where that
+ * would be incorrect and we'd need to override it.
+ */
+ if (p_hwfn->doorbells == OSAL_NULL) {
+ p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ } else if (p_hwfn == p_lead) {
+ /* For leading hw-function, value is always correct, but need
+ * to handle scenario where legacy PF would not support 100g
+ * mapped bars later.
+ */
+ p_iov->b_doorbell_bar = true;
+ } else {
+ /* here, value would be correct ONLY if the leading hwfn
+ * received indication that mapped-bars are supported.
+ */
+ if (p_lead->vf_iov_info->b_doorbell_bar)
+ p_iov->b_doorbell_bar = true;
+ else
+ p_hwfn->doorbells = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ }
+
+ /* Allocate vf2pf msg */
+ p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov->
+ vf2pf_request_phys,
+ sizeof(union
+ vfpf_tlvs));
+ if (!p_iov->vf2pf_request) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `vf2pf_request' DMA memory\n");
+ goto free_p_iov;
+ }
+
+ p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov->
+ pf2vf_reply_phys,
+ sizeof(union pfvf_tlvs));
+ if (!p_iov->pf2vf_reply) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `pf2vf_reply' DMA memory\n");
+ goto free_vf2pf_request;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF's Request mailbox [%p virt 0x%lx phys], "
+ "Response mailbox [%p virt 0x%lx phys]\n",
+ p_iov->vf2pf_request,
+ (unsigned long)p_iov->vf2pf_request_phys,
+ p_iov->pf2vf_reply,
+ (unsigned long)p_iov->pf2vf_reply_phys);
+
+ /* Allocate Bulletin board */
+ p_iov->bulletin.size = sizeof(struct ecore_bulletin_content);
+ p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov->bulletin.
+ phys,
+ p_iov->bulletin.
+ size);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n",
+ p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
+ p_iov->bulletin.size);
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
+#endif
+ OSAL_MUTEX_INIT(&p_iov->mutex);
+
+ p_hwfn->vf_iov_info = p_iov;
+
+ p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+
+ rc = ecore_vf_pf_acquire(p_hwfn);
+
+ /* If VF is 100g using a mapped bar and PF is too old to support that,
+ * acquisition would succeed - but the VF would have no way knowing
+ * the size of the doorbell bar configured in HW and thus will not
+ * know how to split it for 2nd hw-function.
+ * In this case we re-try without the indication of the mapped
+ * doorbell.
+ */
+ if (rc == ECORE_SUCCESS &&
+ p_iov->b_doorbell_bar &&
+ !ecore_vf_hw_bar_size(p_hwfn, BAR_ID_1) &&
+ ECORE_IS_CMT(p_hwfn->p_dev)) {
+ rc = _ecore_vf_pf_release(p_hwfn, false);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_iov->b_doorbell_bar = false;
+ p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ rc = ecore_vf_pf_acquire(p_hwfn);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
+ p_hwfn->regview, p_hwfn->doorbells,
+ p_hwfn->p_dev->doorbells);
+
+ return rc;
+
+free_vf2pf_request:
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys,
+ sizeof(union vfpf_tlvs));
+free_p_iov:
+ OSAL_FREE(p_hwfn->p_dev, p_iov);
+
+ return ECORE_NOMEM;
+}
+
+#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
+ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+
+/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
+static void
+__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_src,
+ enum ecore_tunn_mode mask, u8 *p_cls)
+{
+ if (p_src->b_update_mode) {
+ p_req->tun_mode_update_mask |= (1 << mask);
+
+ if (p_src->b_mode_enabled)
+ p_req->tunn_mode |= (1 << mask);
+ }
+
+ *p_cls = p_src->tun_cls;
+}
+
+/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
+static void
+ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_src,
+ enum ecore_tunn_mode mask, u8 *p_cls,
+ struct ecore_tunn_update_udp_port *p_port,
+ u8 *p_update_port, u16 *p_udp_port)
+{
+ if (p_port->b_update_port) {
+ *p_update_port = 1;
+ *p_udp_port = p_port->port;
+ }
+
+ __ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
+}
+
+void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun)
+{
+ if (p_tun->vxlan.b_mode_enabled)
+ p_tun->vxlan.b_update_mode = true;
+ if (p_tun->l2_geneve.b_mode_enabled)
+ p_tun->l2_geneve.b_update_mode = true;
+ if (p_tun->ip_geneve.b_mode_enabled)
+ p_tun->ip_geneve.b_update_mode = true;
+ if (p_tun->l2_gre.b_mode_enabled)
+ p_tun->l2_gre.b_update_mode = true;
+ if (p_tun->ip_gre.b_mode_enabled)
+ p_tun->ip_gre.b_update_mode = true;
+
+ p_tun->b_update_rx_cls = true;
+ p_tun->b_update_tx_cls = true;
+}
+
+static void
+__ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun,
+ u16 feature_mask, u8 tunn_mode, u8 tunn_cls,
+ enum ecore_tunn_mode val)
+{
+ if (feature_mask & (1 << val)) {
+ p_tun->b_mode_enabled = tunn_mode;
+ p_tun->tun_cls = tunn_cls;
+ } else {
+ p_tun->b_mode_enabled = false;
+ }
+}
+
+static void
+ecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tun,
+ struct pfvf_update_tunn_param_tlv *p_resp)
+{
+ /* Update mode and classes provided by PF */
+ u16 feat_mask = p_resp->tunn_feature_mask;
+
+ __ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
+ p_resp->vxlan_mode, p_resp->vxlan_clss,
+ ECORE_MODE_VXLAN_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
+ p_resp->l2geneve_mode,
+ p_resp->l2geneve_clss,
+ ECORE_MODE_L2GENEVE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
+ p_resp->ipgeneve_mode,
+ p_resp->ipgeneve_clss,
+ ECORE_MODE_IPGENEVE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
+ p_resp->l2gre_mode, p_resp->l2gre_clss,
+ ECORE_MODE_L2GRE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
+ p_resp->ipgre_mode, p_resp->ipgre_clss,
+ ECORE_MODE_IPGRE_TUNN);
+ p_tun->geneve_port.port = p_resp->geneve_udp_port;
+ p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
+ p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled,
+ p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+}
+
+enum _ecore_status_t
+ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_src)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ enum _ecore_status_t rc;
+
+ p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ sizeof(*p_req));
+
+ if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
+ p_req->update_tun_cls = 1;
+
+ ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, ECORE_MODE_VXLAN_TUNN,
+ &p_req->vxlan_clss, &p_src->vxlan_port,
+ &p_req->update_vxlan_port,
+ &p_req->vxlan_port);
+ ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
+ ECORE_MODE_L2GENEVE_TUNN,
+ &p_req->l2geneve_clss, &p_src->geneve_port,
+ &p_req->update_geneve_port,
+ &p_req->geneve_port);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
+ ECORE_MODE_IPGENEVE_TUNN,
+ &p_req->ipgeneve_clss);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
+ ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
+ ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+
+ if (rc)
+ goto exit;
+
+ if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Failed to update tunnel parameters\n");
+ rc = ECORE_INVAL;
+ }
+
+ ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM **pp_prod)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
+ struct vfpf_start_rxq_tlv *req;
+ u16 rx_qid = p_cid->rel.queue_id;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
+
+ req->rx_qid = rx_qid;
+ req->cqe_pbl_addr = cqe_pbl_addr;
+ req->cqe_pbl_size = cqe_pbl_size;
+ req->rxq_addr = bd_chain_phys_addr;
+ req->hw_sb = p_cid->sb_igu_id;
+ req->sb_index = p_cid->sb_idx;
+ req->bd_max_bytes = bd_max_bytes;
+ req->stat_id = -1; /* Keep initialized, for future compatibility */
+
+ /* If PF is legacy, we'll need to calculate producers ourselves
+ * as well as clean them.
+ */
+ if (p_iov->b_pre_fp_hsi) {
+ u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
+ MSTORM_QZONE_START(p_hwfn->p_dev) +
+ (hw_qid) * MSTORM_QZONE_SIZE;
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)(&init_prod_val));
+ }
+
+ ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->queue_start;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+ /* Learn the address of the producer from the response */
+ if (!p_iov->b_pre_fp_hsi) {
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
+ rx_qid, *pp_prod, resp->offset);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0.
+ * It was actually the PF's responsibility, but since some
+ * old PFs might fail to do so, we do this as well.
+ */
+ OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)&init_prod_val);
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool cqe_completion)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_rxqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
+
+ req->rx_qid = p_cid->rel.queue_id;
+ req->num_rxqs = 1;
+ req->cqe_completion = cqe_completion;
+
+ ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM **pp_doorbell)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
+ struct vfpf_start_txq_tlv *req;
+ u16 qid = p_cid->rel.queue_id;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
+
+ req->tx_qid = qid;
+
+ /* Tx */
+ req->pbl_addr = pbl_addr;
+ req->pbl_size = pbl_size;
+ req->hw_sb = p_cid->sb_igu_id;
+ req->sb_index = p_cid->sb_idx;
+
+ ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->queue_start;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
+ *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
+ resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[qid];
+
+ *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
+ DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+ qid, *pp_doorbell, resp->offset);
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_txqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
+
+ req->tx_qid = p_cid->rel.queue_id;
+ req->num_txqs = 1;
+
+ ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid **pp_cid,
+ u8 num_rxqs,
+ u8 comp_cqe_flg,
+ u8 comp_event_flg)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ struct vfpf_update_rxq_tlv *req;
+ enum _ecore_status_t rc;
+
+ /* Starting with CHANNEL_TLV_QID and the need for additional queue
+ * information, this API stopped supporting multiple rxqs.
+ * TODO - remove this and change the API to accept a single queue-cid
+ * in a follow-up patch.
+ */
+ if (num_rxqs != 1) {
+ DP_NOTICE(p_hwfn, true,
+ "VFs can no longer update more than a single queue\n");
+ return ECORE_INVAL;
+ }
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
+
+ req->rx_qid = (*pp_cid)->rel.queue_id;
+ req->num_rxqs = 1;
+
+ if (comp_cqe_flg)
+ req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
+ if (comp_event_flg)
+ req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;
+
+ ecore_vf_pf_add_qid(p_hwfn, *pp_cid);
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
+ u16 mtu, u8 inner_vlan_removal,
+ enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe,
+ u8 only_untagged)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_start_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+ int i;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
+
+ req->mtu = mtu;
+ req->vport_id = vport_id;
+ req->inner_vlan_removal = inner_vlan_removal;
+ req->tpa_mode = tpa_mode;
+ req->max_buffers_per_cqe = max_buffers_per_cqe;
+ req->only_untagged = only_untagged;
+
+ /* status blocks */
+ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
+ struct ecore_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
+
+ if (p_sb)
+ req->sb_addr[i] = p_sb->sb_phys;
+ }
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+static bool
+ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ u16 tlv)
+{
+ switch (tlv) {
+ case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
+ return !!(p_data->update_vport_active_rx_flg ||
+ p_data->update_vport_active_tx_flg);
+ case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
+#ifndef ASIC_ONLY
+ /* FPGA doesn't have PVFC and so can't support tx-switching */
+ return !!(p_data->update_tx_switching_flg &&
+ !CHIP_REV_IS_FPGA(p_hwfn->p_dev));
+#else
+ return !!p_data->update_tx_switching_flg;
+#endif
+ case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
+ return !!p_data->update_inner_vlan_removal_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
+ return !!p_data->update_accept_any_vlan_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_MCAST:
+ return !!p_data->update_approx_mcast_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
+ return !!(p_data->accept_flags.update_rx_mode_config ||
+ p_data->accept_flags.update_tx_mode_config);
+ case CHANNEL_TLV_VPORT_UPDATE_RSS:
+ return !!p_data->rss_params;
+ case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
+ return !!p_data->sge_tpa_params;
+ default:
+ DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n",
+ tlv, ecore_channel_tlvs_string[tlv]);
+ return false;
+ }
+}
+
+static void
+ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *p_resp;
+ u16 tlv;
+
+ for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ tlv < CHANNEL_TLV_VPORT_UPDATE_MAX;
+ tlv++) {
+ if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
+ continue;
+
+ p_resp = (struct pfvf_def_resp_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "TLV[%d] type %s Configuration %s\n",
+ tlv, ecore_channel_tlvs_string[tlv],
+ (p_resp && p_resp->hdr.status) ? "succeeded"
+ : "failed");
+ }
+}
+
+enum _ecore_status_t
+ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_update_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ u8 update_rx, update_tx;
+ u32 resp_size = 0;
+ u16 size, tlv;
+ enum _ecore_status_t rc;
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ resp_size = sizeof(*resp);
+
+ update_rx = p_params->update_vport_active_rx_flg;
+ update_tx = p_params->update_vport_active_tx_flg;
+
+ /* clear mailbox and prep header tlv */
+ ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
+
+ /* Prepare extended tlvs */
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+
+ size = sizeof(struct vfpf_vport_update_activate_tlv);
+ p_act_tlv = ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_act_tlv->update_rx = update_rx;
+ p_act_tlv->active_rx = p_params->vport_active_rx_flg;
+ }
+
+ if (update_tx) {
+ p_act_tlv->update_tx = update_tx;
+ p_act_tlv->active_tx = p_params->vport_active_tx_flg;
+ }
+ }
+
+ if (p_params->update_inner_vlan_removal_flg) {
+ struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+
+ size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
+ p_vlan_tlv = ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg;
+ }
+
+ if (p_params->update_tx_switching_flg) {
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+
+ size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ p_tx_switch_tlv = ecore_add_tlv(&p_iov->offset,
+ tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
+ }
+
+ if (p_params->update_approx_mcast_flg) {
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+
+ size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
+ p_mcast_tlv = ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins,
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ }
+
+ update_rx = p_params->accept_flags.update_rx_mode_config;
+ update_tx = p_params->accept_flags.update_tx_mode_config;
+
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ size = sizeof(struct vfpf_vport_update_accept_param_tlv);
+ p_accept_tlv = ecore_add_tlv(&p_iov->offset, tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_accept_tlv->update_rx_mode = update_rx;
+ p_accept_tlv->rx_accept_filter =
+ p_params->accept_flags.rx_accept_filter;
+ }
+
+ if (update_tx) {
+ p_accept_tlv->update_tx_mode = update_tx;
+ p_accept_tlv->tx_accept_filter =
+ p_params->accept_flags.tx_accept_filter;
+ }
+ }
+
+ if (p_params->rss_params) {
+ struct ecore_rss_params *rss_params = p_params->rss_params;
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ int i, table_size;
+
+ size = sizeof(struct vfpf_vport_update_rss_tlv);
+ p_rss_tlv = ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_RSS, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (rss_params->update_rss_config)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CONFIG_FLAG;
+ if (rss_params->update_rss_capabilities)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CAPS_FLAG;
+ if (rss_params->update_rss_ind_table)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG;
+ if (rss_params->update_rss_key)
+ p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
+
+ p_rss_tlv->rss_enable = rss_params->rss_enable;
+ p_rss_tlv->rss_caps = rss_params->rss_caps;
+ p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
+
+ table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE,
+ 1 << p_rss_tlv->rss_table_size_log);
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_queue;
+
+ p_queue = rss_params->rss_ind_table[i];
+ p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
+ }
+
+ OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ }
+
+ if (p_params->update_accept_any_vlan_flg) {
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
+
+ size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ p_any_vlan_tlv = ecore_add_tlv(&p_iov->offset, tlv, size);
+
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+ p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
+ p_any_vlan_tlv->update_accept_any_vlan_flg =
+ p_params->update_accept_any_vlan_flg;
+ }
+
+ if (p_params->sge_tpa_params) {
+ struct ecore_sge_tpa_params *sge_tpa_params;
+ struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+
+ sge_tpa_params = p_params->sge_tpa_params;
+ size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
+ p_sge_tpa_tlv = ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (sge_tpa_params->update_tpa_en_flg)
+ p_sge_tpa_tlv->update_sge_tpa_flags |=
+ VFPF_UPDATE_TPA_EN_FLAG;
+ if (sge_tpa_params->update_tpa_param_flg)
+ p_sge_tpa_tlv->update_sge_tpa_flags |=
+ VFPF_UPDATE_TPA_PARAM_FLAG;
+
+ if (sge_tpa_params->tpa_ipv4_en_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG;
+ if (sge_tpa_params->tpa_ipv6_en_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG;
+ if (sge_tpa_params->tpa_pkt_split_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG;
+ if (sge_tpa_params->tpa_hdr_data_split_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |=
+ VFPF_TPA_HDR_DATA_SPLIT_FLAG;
+ if (sge_tpa_params->tpa_gro_consistent_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |=
+ VFPF_TPA_GRO_CONSIST_FLAG;
+ if (sge_tpa_params->tpa_ipv4_tunn_en_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |=
+ VFPF_TPA_TUNN_IPV4_EN_FLAG;
+ if (sge_tpa_params->tpa_ipv6_tunn_en_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |=
+ VFPF_TPA_TUNN_IPV6_EN_FLAG;
+
+ p_sge_tpa_tlv->tpa_max_aggs_num =
+ sge_tpa_params->tpa_max_aggs_num;
+ p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size;
+ p_sge_tpa_tlv->tpa_min_size_to_start =
+ sge_tpa_params->tpa_min_size_to_start;
+ p_sge_tpa_tlv->tpa_min_size_to_cont =
+ sge_tpa_params->tpa_min_size_to_cont;
+
+ p_sge_tpa_tlv->max_buffers_per_cqe =
+ sge_tpa_params->max_buffers_per_cqe;
+ }
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+ ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_AGAIN;
+ goto exit;
+ }
+
+ p_hwfn->b_int_enabled = 0;
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_mcast *p_filter_cmd)
+{
+ struct ecore_sp_vport_update_params sp_params;
+ int i;
+
+ OSAL_MEMSET(&sp_params, 0, sizeof(sp_params));
+ sp_params.update_approx_mcast_flg = 1;
+
+ if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ sp_params.bins[bit / 32] |= 1 << (bit % 32);
+ }
+ }
+
+ ecore_vf_pf_vport_update(p_hwfn, &sp_params);
+}
+
+enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_ucast
+ *p_ucast)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_ucast_filter_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+
+ /* Sanitize */
+ if (p_ucast->opcode == ECORE_FILTER_MOVE) {
+ DP_NOTICE(p_hwfn, true,
+ "VFs don't support Moving of filters\n");
+ return ECORE_INVAL;
+ }
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
+ req->opcode = (u8)p_ucast->opcode;
+ req->type = (u8)p_ucast->type;
+ OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN);
+ req->vlan = p_ucast->vlan;
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_AGAIN;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 *p_coal,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_read_coal_resp_tlv *resp;
+ struct vfpf_read_coal_req_tlv *req;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep header tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ,
+ sizeof(*req));
+ req->qid = p_cid->rel.queue_id;
+ req->is_rx = p_cid->b_is_rx ? 1 : 0;
+
+ ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+ resp = &p_iov->pf2vf_reply->read_coal_resp;
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc != ECORE_SUCCESS)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ goto exit;
+
+ *p_coal = resp->coal;
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_update_coalesce *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep header tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE,
+ sizeof(*req));
+
+ req->rx_coal = rx_coal;
+ req->tx_coal = tx_coal;
+ req->qid = p_cid->rel.queue_id;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
+ rx_coal, tx_coal, req->qid);
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (rc != ECORE_SUCCESS)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ goto exit;
+
+ p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
+ p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_update_mtu_tlv *p_req;
+ struct pfvf_def_resp_tlv *p_resp;
+ enum _ecore_status_t rc;
+
+ if (!mtu)
+ return ECORE_INVAL;
+
+ /* clear mailbox and prep header tlv */
+ p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_MTU,
+ sizeof(*p_req));
+ p_req->mtu = mtu;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requesting MTU update to %d\n", mtu);
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+ if (p_resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED)
+ rc = ECORE_INVAL;
+
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
+ u16 sb_id)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
+ return 0;
+ }
+
+ return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
+}
+
+void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn,
+ u16 sb_id, struct ecore_sb_info *p_sb)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
+ return;
+ }
+
+ if (sb_id >= PFVF_MAX_SBS_PER_VF) {
+ DP_NOTICE(p_hwfn, true, "Can't configure SB %04x\n", sb_id);
+ return;
+ }
+
+ p_iov->sbs_info[sb_id] = p_sb;
+}
+
+enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
+ u8 *p_change)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct ecore_bulletin_content shadow;
+ u32 crc, crc_size;
+
+ crc_size = sizeof(p_iov->bulletin.p_virt->crc);
+ *p_change = 0;
+
+ /* Need to guarantee PF is not in the middle of writing it */
+ OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
+
+ /* If version did not update, no need to do anything */
+ if (shadow.version == p_iov->bulletin_shadow.version)
+ return ECORE_SUCCESS;
+
+ /* Verify the bulletin we see is valid */
+ crc = OSAL_CRC32(0, (u8 *)&shadow + crc_size,
+ p_iov->bulletin.size - crc_size);
+ if (crc != shadow.crc)
+ return ECORE_AGAIN;
+
+ /* Set the shadow bulletin and process it */
+ OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Read a bulletin update %08x\n", shadow.version);
+
+ *p_change = 1;
+
+ return ECORE_SUCCESS;
+}
+
+void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params,
+ struct ecore_bulletin_content *p_bulletin)
+{
+ OSAL_MEMSET(p_params, 0, sizeof(*p_params));
+
+ p_params->speed.autoneg = p_bulletin->req_autoneg;
+ p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
+ p_params->speed.forced_speed = p_bulletin->req_forced_speed;
+ p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
+ p_params->pause.forced_rx = p_bulletin->req_forced_rx;
+ p_params->pause.forced_tx = p_bulletin->req_forced_tx;
+ p_params->loopback_mode = p_bulletin->req_loopback;
+}
+
+void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_params *params)
+{
+ __ecore_vf_get_link_params(params,
+ &p_hwfn->vf_iov_info->bulletin_shadow);
+}
+
+void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link,
+ struct ecore_bulletin_content *p_bulletin)
+{
+ OSAL_MEMSET(p_link, 0, sizeof(*p_link));
+
+ p_link->link_up = p_bulletin->link_up;
+ p_link->speed = p_bulletin->speed;
+ p_link->full_duplex = p_bulletin->full_duplex;
+ p_link->an = p_bulletin->autoneg;
+ p_link->an_complete = p_bulletin->autoneg_complete;
+ p_link->parallel_detection = p_bulletin->parallel_detection;
+ p_link->pfc_enabled = p_bulletin->pfc_enabled;
+ p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
+ p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
+ p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
+ p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
+ p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
+}
+
+void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_state *link)
+{
+ __ecore_vf_get_link_state(link,
+ &p_hwfn->vf_iov_info->bulletin_shadow);
+}
+
+void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps,
+ struct ecore_bulletin_content *p_bulletin)
+{
+ OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
+ p_link_caps->speed_capabilities = p_bulletin->capability_speed;
+}
+
+void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_capabilities *p_link_caps)
+{
+ __ecore_vf_get_link_caps(p_link_caps,
+ &p_hwfn->vf_iov_info->bulletin_shadow);
+}
+
+void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
+{
+ *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
+}
+
+void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_txqs)
+{
+ *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
+}
+
+void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
+{
+ OSAL_MEMCPY(port_mac,
+ p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac,
+ ETH_ALEN);
+}
+
+void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
+ u8 *num_vlan_filters)
+{
+ struct ecore_vf_iov *p_vf;
+
+ p_vf = p_hwfn->vf_iov_info;
+ *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
+}
+
+void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ u32 *num_sbs)
+{
+ struct ecore_vf_iov *p_vf;
+
+ p_vf = p_hwfn->vf_iov_info;
+ *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
+}
+
+void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
+ u32 *num_mac_filters)
+{
+ struct ecore_vf_iov *p_vf = p_hwfn->vf_iov_info;
+
+ *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
+}
+
+bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
+{
+ struct ecore_bulletin_content *bulletin;
+
+ bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+ if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return true;
+
+ /* Forbid VF from changing a MAC enforced by PF */
+ if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN))
+ return false;
+
+ return false;
+}
+
+bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
+ u8 *p_is_forced)
+{
+ struct ecore_bulletin_content *bulletin;
+
+ bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+ if (p_is_forced)
+ *p_is_forced = 1;
+ } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
+ if (p_is_forced)
+ *p_is_forced = 0;
+ } else {
+ return false;
+ }
+
+ OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN);
+
+ return true;
+}
+
+void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
+ u16 *p_vxlan_port,
+ u16 *p_geneve_port)
+{
+ struct ecore_bulletin_content *p_bulletin;
+
+ p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+
+ *p_vxlan_port = p_bulletin->vxlan_udp_port;
+ *p_geneve_port = p_bulletin->geneve_udp_port;
+}
+
+bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
+{
+ struct ecore_bulletin_content *bulletin;
+
+ bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+ if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ return false;
+
+ if (dst_pvid)
+ *dst_pvid = bulletin->pvid;
+
+ return true;
+}
+
+bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->vf_iov_info->b_pre_fp_hsi;
+}
+
+void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor, u16 *fw_rev,
+ u16 *fw_eng)
+{
+ struct pf_vf_pfdev_info *info;
+
+ info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
+
+ *fw_major = info->fw_major;
+ *fw_minor = info->fw_minor;
+ *fw_rev = info->fw_rev;
+ *fw_eng = info->fw_eng;
+}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw)
+{
+ p_hwfn->vf_iov_info->b_hw_channel = b_is_hw;
+}
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_vf.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_vf.h
new file mode 100644
index 00000000..a07f82eb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_vf.h
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_VF_H__
+#define __ECORE_VF_H__
+
+#include "ecore_status.h"
+#include "ecore_vf_api.h"
+#include "ecore_l2_api.h"
+#include "ecore_vfpf_if.h"
+
+/* Default number of CIDs [total of both Rx and Tx] to be requested
+ * by default.
+ */
+#define ECORE_ETH_VF_DEFAULT_NUM_CIDS (32)
+
+/* This data is held in the ecore_hwfn structure for VFs only. */
+struct ecore_vf_iov {
+ union vfpf_tlvs *vf2pf_request;
+ dma_addr_t vf2pf_request_phys;
+ union pfvf_tlvs *pf2vf_reply;
+ dma_addr_t pf2vf_reply_phys;
+
+ /* Should be taken whenever the mailbox buffers are accessed */
+ osal_mutex_t mutex;
+ u8 *offset;
+
+ /* Bulletin Board */
+ struct ecore_bulletin bulletin;
+ struct ecore_bulletin_content bulletin_shadow;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* In case PF originates prior to the fp-hsi version comparison,
+ * this has to be propagated as it affects the fastpath.
+ */
+ bool b_pre_fp_hsi;
+
+ /* Current day VFs are passing the SBs physical address on vport
+ * start, and as they lack an IGU mapping they need to store the
+ * addresses of previously registered SBs.
+ * Even if we were to change configuration flow, due to backward
+ * compatibility [with older PFs] we'd still need to store these.
+ */
+ struct ecore_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ /* Would be set if the VF is to try communicating with it PF
+ * using a hw channel.
+ */
+ bool b_hw_channel;
+#endif
+
+ /* Determines whether VF utilizes doorbells via limited register
+ * bar or via the doorbell bar.
+ */
+ bool b_doorbell_bar;
+};
+
+/**
+ * @brief VF - Get coalesce per VF's relative queue.
+ *
+ * @param p_hwfn
+ * @param p_coal - coalesce value in micro second for VF queues.
+ * @param p_cid - queue cid
+ *
+ **/
+enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 *p_coal,
+ struct ecore_queue_cid *p_cid);
+/**
+ * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
+ * Coalesce value '0' will omit the configuration.
+ *
+ * @param p_hwfn
+ * @param rx_coal - coalesce value in micro second for rx queue
+ * @param tx_coal - coalesce value in micro second for tx queue
+ * @param p_cid - queue cid
+ *
+ **/
+enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ struct ecore_queue_cid *p_cid);
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief hw preparation for VF
+ * sends ACQUIRE message
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief VF - start the RX Queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param p_cid - Only relative fields are relevant
+ * @param bd_max_bytes - maximum number of bytes per bd
+ * @param bd_chain_phys_addr - physical address of bd chain
+ * @param cqe_pbl_addr - physical address of pbl
+ * @param cqe_pbl_size - pbl size
+ * @param pp_prod - pointer to the producer to be
+ * used in fasthpath
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM **pp_prod);
+
+/**
+ * @brief VF - start the TX queue by sending a message to the
+ * PF.
+ *
+ * @param p_hwfn
+ * @param p_cid
+ * @param bd_chain_phys_addr - physical address of tx chain
+ * @param pp_doorbell - pointer to address to which to
+ * write the doorbell too..
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM **pp_doorbell);
+
+/**
+ * @brief VF - stop the RX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param p_cid
+ * @param cqe_completion
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool cqe_completion);
+
+/**
+ * @brief VF - stop the TX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param p_cid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid);
+
+/* TODO - fix all the !SRIOV prototypes */
+
+#ifndef LINUX_REMOVE
+/**
+ * @brief VF - update the RX queue by sending a message to the
+ * PF
+ *
+ * @param p_hwfn
+ * @param pp_cid - list of queue-cids which we want to update
+ * @param num_rxqs
+ * @param comp_cqe_flg
+ * @param comp_event_flg
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid **pp_cid,
+ u8 num_rxqs,
+ u8 comp_cqe_flg,
+ u8 comp_event_flg);
+#endif
+
+/**
+ * @brief VF - send a vport update command
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params);
+
+/**
+ * @brief VF - send a close message to PF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief VF - free vf`s memories
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ * sb_id. For VFs igu sbs don't have to be contiguous
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return INLINE u16
+ */
+u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
+ u16 sb_id);
+
+/**
+ * @brief Stores [or removes] a configured sb_info.
+ *
+ * @param p_hwfn
+ * @param sb_id - zero-based SB index [for fastpath]
+ * @param sb_info - may be OSAL_NULL [during removal].
+ */
+void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn,
+ u16 sb_id, struct ecore_sb_info *p_sb);
+
+/**
+ * @brief ecore_vf_pf_vport_start - perform vport start for VF.
+ *
+ * @param p_hwfn
+ * @param vport_id
+ * @param mtu
+ * @param inner_vlan_removal
+ * @param tpa_mode
+ * @param max_buffers_per_cqe,
+ * @param only_untagged - default behavior regarding vlan acceptance
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_vport_start(
+ struct ecore_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum ecore_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe,
+ u8 only_untagged);
+
+/**
+ * @brief ecore_vf_pf_vport_stop - stop the VF's vport
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn);
+
+enum _ecore_status_t ecore_vf_pf_filter_ucast(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_ucast *p_param);
+
+void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_mcast *p_filter_cmd);
+
+/**
+ * @brief ecore_vf_pf_int_cleanup - clean the SB of the VF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief - return the link params in a given bulletin board
+ *
+ * @param p_params - pointer to a struct to fill with link params
+ * @param p_bulletin
+ */
+void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params,
+ struct ecore_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link state in a given bulletin board
+ *
+ * @param p_link - pointer to a struct to fill with link state
+ * @param p_bulletin
+ */
+void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link,
+ struct ecore_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link capabilities in a given bulletin board
+ *
+ * @param p_link - pointer to a struct to fill with link capabilities
+ * @param p_bulletin
+ */
+void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps,
+ struct ecore_bulletin_content *p_bulletin);
+
+enum _ecore_status_t
+ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn);
+
+void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
+
+u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
+ enum BAR_ID bar_id);
+
+/**
+ * @brief - ecore_vf_pf_update_mtu Update MTU for VF.
+ *
+ * @param p_hwfn
+ * @param - mtu
+ */
+enum _ecore_status_t
+ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu);
+#endif
+#endif /* __ECORE_VF_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_vf_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_vf_api.h
new file mode 100644
index 00000000..1a9fb3b1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_vf_api.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_VF_API_H__
+#define __ECORE_VF_API_H__
+
+#include "ecore_sp_api.h"
+#include "ecore_mcp_api.h"
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief Read the VF bulletin and act on it if needed
+ *
+ * @param p_hwfn
+ * @param p_change - ecore fills 1 iff bulletin board has changed, 0 otherwise.
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
+ u8 *p_change);
+
+/**
+ * @brief Get link parameters for VF from ecore
+ *
+ * @param p_hwfn
+ * @param params - the link params structure to be filled for the VF
+ */
+void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_params *params);
+
+/**
+ * @brief Get link state for VF from ecore
+ *
+ * @param p_hwfn
+ * @param link - the link state structure to be filled for the VF
+ */
+void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_state *link);
+
+/**
+ * @brief Get link capabilities for VF from ecore
+ *
+ * @param p_hwfn
+ * @param p_link_caps - the link capabilities structure to be filled for the VF
+ */
+void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_capabilities *p_link_caps);
+
+/**
+ * @brief Get number of Rx queues allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated RX queues
+ */
+void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_rxqs);
+
+/**
+ * @brief Get number of Rx queues allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_txqs - allocated RX queues
+ */
+void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_txqs);
+
+/**
+ * @brief Get port mac address for VF
+ *
+ * @param p_hwfn
+ * @param port_mac - destination location for port mac
+ */
+void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn,
+ u8 *port_mac);
+
+/**
+ * @brief Get number of VLAN filters allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated VLAN filters
+ */
+void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
+ u8 *num_vlan_filters);
+
+void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ u32 *num_sbs);
+
+/**
+ * @brief Get number of MAC filters allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated MAC filters
+ */
+void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
+ u32 *num_mac_filters);
+
+/**
+ * @brief Check if VF can set a MAC address
+ *
+ * @param p_hwfn
+ * @param mac
+ *
+ * @return bool
+ */
+bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac);
+
+#ifndef LINUX_REMOVE
+/**
+ * @brief Copy forced MAC address from bulletin board
+ *
+ * @param hwfn
+ * @param dst_mac
+ * @param p_is_forced - out param which indicate in case mac
+ * exist if it forced or not.
+ *
+ * @return bool - return true if mac exist and false if
+ * not.
+ */
+bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
+ u8 *p_is_forced);
+
+/**
+ * @brief Check if force vlan is set and copy the forced vlan
+ * from bulletin board
+ *
+ * @param hwfn
+ * @param dst_pvid
+ * @return bool
+ */
+bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid);
+
+/**
+ * @brief Check if VF is based on PF whose driver is pre-fp-hsi version;
+ * This affects the fastpath implementation of the driver.
+ *
+ * @param p_hwfn
+ *
+ * @return bool - true iff PF is pre-fp-hsi version.
+ */
+bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn);
+
+#endif
+
+/**
+ * @brief Set firmware version information in dev_info from VFs acquire
+ * response tlv
+ *
+ * @param p_hwfn
+ * @param fw_major
+ * @param fw_minor
+ * @param fw_rev
+ * @param fw_eng
+ */
+void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
+ u16 *fw_major,
+ u16 *fw_minor,
+ u16 *fw_rev,
+ u16 *fw_eng);
+void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
+ u16 *p_vxlan_port, u16 *p_geneve_port);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/**
+ * @brief set the VF to use a SW/HW channel when communicating with PF.
+ * NOTICE: today the likely first place to call this from VF
+ * would be OSAL_VF_FILL_ACQUIRE_RESC_REQ(); Might want to consider
+ * something a bit more appropriate.
+ *
+ * @param p_hwfn
+ * @param b_is_hw - true iff VF is to use a HW-channel
+ */
+void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw);
+#endif
+#endif
+#endif
diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_vfpf_if.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_vfpf_if.h
new file mode 100644
index 00000000..c30677ab
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_vfpf_if.h
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ECORE_VF_PF_IF_H__
+#define __ECORE_VF_PF_IF_H__
+
+/* @@@ TBD MichalK this should be HSI? */
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY_SIZE 10 /* @@@ TBD this should be HSI? */
+
+/***********************************************
+ *
+ * Common definitions for all HVs
+ *
+ **/
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters; /* No limit so superfluous */
+ u8 num_cids;
+ u8 padding;
+};
+
+struct hw_sb_info {
+ u16 hw_sb_id; /* aka absolute igu id, used to ack the sb */
+ u8 sb_qid; /* used to update DHC for sb */
+ u8 padding[5];
+};
+
+/***********************************************
+ *
+ * HW VF-PF channel definitions
+ *
+ * A.K.A VF-PF mailbox
+ *
+ **/
+#define TLV_BUFFER_SIZE 1024
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate response
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 padding;
+ u64 reply_address;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_def_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+/* Acquire */
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+#ifndef LINUX_REMOVE
+ /* First bit was used on 8.7.x and 8.8.x versions, which had different
+ * FWs used but with the same faspath HSI. As this was prior to the
+ * fastpath versioning, wanted to have ability to override fw matching
+ * and allow them to interact.
+ */
+#endif
+/* VF pre-FP hsi version */
+#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0)
+#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+
+ /* A requirement for supporting multi-Tx queues on a single queue-zone,
+ * VF would pass qids as additional information whenever passing queue
+ * references.
+ * TODO - due to the CID limitations in Bar0, VFs currently don't pass
+ * this, and use the legacy CID scheme.
+ */
+#define VFPF_ACQUIRE_CAP_QUEUE_QIDS (1 << 2)
+
+ /* The VF is using the physical bar. While this is mostly internal
+ * to the VF, might affect the number of CIDs supported assuming
+ * QUEUE_QIDS is set.
+ */
+#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR (1 << 3)
+ u64 capabilities;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_revision;
+ u8 fw_engineering;
+ u32 driver_version;
+ u16 opaque_fid; /* ME register value */
+ u8 os_type; /* VFPF_ACQUIRE_OS_* value */
+ u8 eth_fp_hsi_major;
+ u8 eth_fp_hsi_minor;
+ u8 padding[3];
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ u64 bulletin_addr;
+ u32 bulletin_size;
+ u32 padding;
+};
+
+/* receive side scaling tlv */
+struct vfpf_vport_update_rss_tlv {
+ struct channel_tlv tl;
+
+ u8 update_rss_flags;
+ #define VFPF_UPDATE_RSS_CONFIG_FLAG (1 << 0)
+ #define VFPF_UPDATE_RSS_CAPS_FLAG (1 << 1)
+ #define VFPF_UPDATE_RSS_IND_TABLE_FLAG (1 << 2)
+ #define VFPF_UPDATE_RSS_KEY_FLAG (1 << 3)
+
+ u8 rss_enable;
+ u8 rss_caps;
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+ u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY_SIZE];
+};
+
+struct pfvf_storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct pfvf_stats_info {
+ struct pfvf_storm_stats mstats;
+ struct pfvf_storm_stats pstats;
+ struct pfvf_storm_stats tstats;
+ struct pfvf_storm_stats ustats;
+};
+
+/* acquire response tlv - carries the allocated resources */
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 mfw_ver;
+
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_rev;
+ u16 fw_eng;
+
+ u64 capabilities;
+#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED (1 << 0)
+#define PFVF_ACQUIRE_CAP_100G (1 << 1) /* If set, 100g PF */
+/* There are old PF versions where the PF might mistakenly override the sanity
+ * mechanism [version-based] and allow a VF that can't be supported to pass
+ * the acquisition phase.
+ * To overcome this, PFs now indicate that they're past that point and the new
+ * VFs would fail probe on the older PFs that fail to do so.
+ */
+#ifndef LINUX_REMOVE
+/* Said bug was in quest/serpens; Can't be certain no official release included
+ * the bug since the fix arrived very late in the programs.
+ */
+#endif
+#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE (1 << 2)
+
+ /* PF expects queues to be received with additional qids */
+#define PFVF_ACQUIRE_CAP_QUEUE_QIDS (1 << 3)
+
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 os_type;
+
+ /* These should match the PF's ecore_dev values */
+ u16 chip_rev;
+ u8 dev_type;
+
+ /* Doorbell bar size configured in HW: log(size) or 0 */
+ u8 bar_size;
+
+ struct pfvf_stats_info stats_info;
+
+ u8 port_mac[ETH_ALEN];
+
+ /* It's possible PF had to configure an older fastpath HSI
+ * [in case VF is newer than PF]. This is communicated back
+ * to the VF. It can also be used in case of error due to
+ * non-matching versions to shed light in VF about failure.
+ */
+ u8 major_fp_hsi;
+ u8 minor_fp_hsi;
+ } pfdev_info;
+
+ struct pf_vf_resc {
+ /* in case of status NO_RESOURCE in message hdr, pf will fill
+ * this struct with suggested amount of resources for next
+ * acquire request
+ */
+ #define PFVF_MAX_QUEUES_PER_VF 16
+ #define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 cid[PFVF_MAX_QUEUES_PER_VF];
+
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 num_cids;
+ u8 padding;
+ } resc;
+
+ u32 bulletin_size;
+ u32 padding;
+};
+
+struct pfvf_start_queue_resp_tlv {
+ struct pfvf_tlv hdr;
+ u32 offset; /* offset to consumer/producer of queue */
+ u8 padding[4];
+};
+
+/* Extended queue information - additional index for reference inside qzone.
+ * If commmunicated between VF/PF, each TLV relating to queues should be
+ * extended by one such [or have a future base TLV that already contains info].
+ */
+struct vfpf_qid_tlv {
+ struct channel_tlv tl;
+ u8 qid;
+ u8 padding[3];
+};
+
+/* Setup Queue */
+struct vfpf_start_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 rxq_addr;
+ u64 deprecated_sge_addr;
+ u64 cqe_pbl_addr;
+
+ u16 cqe_pbl_size;
+ u16 hw_sb;
+ u16 rx_qid;
+ u16 hc_rate; /* desired interrupts per sec. */
+
+ u16 bd_max_bytes;
+ u16 stat_id;
+ u8 sb_index;
+ u8 padding[3];
+
+};
+
+struct vfpf_start_txq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 pbl_addr;
+ u16 pbl_size;
+ u16 stat_id;
+ u16 tx_qid;
+ u16 hw_sb;
+
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 hc_rate; /* desired interrupts per sec. */
+ u8 sb_index;
+ u8 padding[3];
+};
+
+/* Stop RX Queue */
+struct vfpf_stop_rxqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 rx_qid;
+
+ /* While the API supports multiple Rx-queues on a single TLV
+ * message, in practice older VFs always used it as one [ecore].
+ * And there are PFs [starting with the CHANNEL_TLV_QID] which
+ * would start assuming this is always a '1'. So in practice this
+ * field should be considered deprecated and *Always* set to '1'.
+ */
+ u8 num_rxqs;
+
+ u8 cqe_completion;
+ u8 padding[4];
+};
+
+/* Stop TX Queues */
+struct vfpf_stop_txqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 tx_qid;
+
+ /* While the API supports multiple Tx-queues on a single TLV
+ * message, in practice older VFs always used it as one [ecore].
+ * And there are PFs [starting with the CHANNEL_TLV_QID] which
+ * would start assuming this is always a '1'. So in practice this
+ * field should be considered deprecated and *Always* set to '1'.
+ */
+ u8 num_txqs;
+ u8 padding[5];
+};
+
+struct vfpf_update_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 flags;
+ #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG (1 << 0)
+ #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG (1 << 1)
+ #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG (1 << 2)
+
+ u8 padding[4];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+ #define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+ #define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+ #define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+
+ u8 padding[4];
+};
+
+/* Start a vport */
+struct vfpf_vport_start_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 sb_addr[PFVF_MAX_SBS_PER_VF];
+
+ u32 tpa_mode;
+ u16 dep1;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 inner_vlan_removal;
+
+ u8 only_untagged;
+ u8 max_buffers_per_cqe;
+
+ u8 padding[4];
+};
+
+/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
+struct vfpf_vport_update_activate_tlv {
+ struct channel_tlv tl;
+ u8 update_rx;
+ u8 update_tx;
+ u8 active_rx;
+ u8 active_tx;
+};
+
+struct vfpf_vport_update_tx_switch_tlv {
+ struct channel_tlv tl;
+ u8 tx_switching;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_vlan_strip_tlv {
+ struct channel_tlv tl;
+ u8 remove_vlan;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_mcast_bin_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+
+ /* This was a mistake; There are only 256 approx bins,
+ * and in HSI they're divided into 32-bit values.
+ * As old VFs used to set-bit to the values on its side,
+ * the upper half of the array is never expected to contain any data.
+ */
+ u64 bins[4];
+ u64 obsolete_bins[4];
+};
+
+struct vfpf_vport_update_accept_param_tlv {
+ struct channel_tlv tl;
+ u8 update_rx_mode;
+ u8 update_tx_mode;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+};
+
+struct vfpf_vport_update_accept_any_vlan_tlv {
+ struct channel_tlv tl;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+
+ u8 padding[2];
+};
+
+struct vfpf_vport_update_sge_tpa_tlv {
+ struct channel_tlv tl;
+
+ u16 sge_tpa_flags;
+ #define VFPF_TPA_IPV4_EN_FLAG (1 << 0)
+ #define VFPF_TPA_IPV6_EN_FLAG (1 << 1)
+ #define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2)
+ #define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
+ #define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4)
+ #define VFPF_TPA_TUNN_IPV4_EN_FLAG (1 << 5)
+ #define VFPF_TPA_TUNN_IPV6_EN_FLAG (1 << 6)
+
+ u8 update_sge_tpa_flags;
+ #define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0)
+ #define VFPF_UPDATE_TPA_EN_FLAG (1 << 1)
+ #define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2)
+
+ u8 max_buffers_per_cqe;
+
+ u16 deprecated_sge_buff_size;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+
+ u8 tpa_max_aggs_num;
+ u8 padding[7];
+
+};
+
+/* Primary tlv as a header for various extended tlvs for
+ * various functionalities in vport update ramrod.
+ */
+struct vfpf_vport_update_tlv {
+ struct vfpf_first_tlv first_tlv;
+};
+
+struct vfpf_ucast_filter_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 opcode;
+ u8 type;
+
+ u8 mac[ETH_ALEN];
+
+ u16 vlan;
+ u16 padding[3];
+};
+
+/* tunnel update param tlv */
+struct vfpf_update_tunn_param_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 tun_mode_update_mask;
+ u8 tunn_mode;
+ u8 update_tun_cls;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u8 update_geneve_port;
+ u8 update_vxlan_port;
+ u16 geneve_port;
+ u16 vxlan_port;
+ u8 padding[2];
+};
+
+struct pfvf_update_tunn_param_tlv {
+ struct pfvf_tlv hdr;
+
+ u16 tunn_feature_mask;
+ u8 vxlan_mode;
+ u8 l2geneve_mode;
+ u8 ipgeneve_mode;
+ u8 l2gre_mode;
+ u8 ipgre_mode;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+struct vfpf_update_coalesce {
+ struct vfpf_first_tlv first_tlv;
+ u16 rx_coal;
+ u16 tx_coal;
+ u16 qid;
+ u8 padding[2];
+};
+
+struct vfpf_read_coal_req_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 qid;
+ u8 is_rx;
+ u8 padding[5];
+};
+
+struct pfvf_read_coal_resp_tlv {
+ struct pfvf_tlv hdr;
+ u16 coal;
+ u8 padding[6];
+};
+
+struct vfpf_bulletin_update_mac_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u8 mac[ETH_ALEN];
+ u8 padding[2];
+};
+
+struct vfpf_update_mtu_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 mtu;
+ u8 padding[6];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_start_rxq_tlv start_rxq;
+ struct vfpf_start_txq_tlv start_txq;
+ struct vfpf_stop_rxqs_tlv stop_rxqs;
+ struct vfpf_stop_txqs_tlv stop_txqs;
+ struct vfpf_update_rxq_tlv update_rxq;
+ struct vfpf_vport_start_tlv start_vport;
+ struct vfpf_vport_update_tlv vport_update;
+ struct vfpf_ucast_filter_tlv ucast_filter;
+ struct vfpf_update_tunn_param_tlv tunn_param_update;
+ struct vfpf_update_coalesce update_coalesce;
+ struct vfpf_read_coal_req_tlv read_coal_req;
+ struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
+ struct vfpf_update_mtu_tlv update_mtu;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_def_resp_tlv default_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct tlv_buffer_size tlv_buf_size;
+ struct pfvf_start_queue_resp_tlv queue_start;
+ struct pfvf_update_tunn_param_tlv tunn_param_resp;
+ struct pfvf_read_coal_resp_tlv read_coal_resp;
+};
+
+/* This is a structure which is allocated in the VF, which the PF may update
+ * when it deems it necessary to do so. The bulletin board is sampled
+ * periodically by the VF. A copy per VF is maintained in the PF (to prevent
+ * loss of data upon multiple updates (or the need for read modify write)).
+ */
+enum ecore_bulletin_bit {
+ /* Alert the VF that a forced MAC was set by the PF */
+ MAC_ADDR_FORCED = 0,
+
+ /* The VF should not access the vfpf channel */
+ VFPF_CHANNEL_INVALID = 1,
+
+ /* Alert the VF that a forced VLAN was set by the PF */
+ VLAN_ADDR_FORCED = 2,
+
+ /* Indicate that `default_only_untagged' contains actual data */
+ VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
+ VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
+
+ /* Alert the VF that suggested mac was sent by the PF.
+ * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set
+ */
+ VFPF_BULLETIN_MAC_ADDR = 5
+};
+
+struct ecore_bulletin_content {
+ /* crc of structure to ensure is not in mid-update */
+ u32 crc;
+
+ u32 version;
+
+ /* bitmap indicating which fields hold valid values */
+ u64 valid_bitmap;
+
+ /* used for MAC_ADDR or MAC_ADDR_FORCED */
+ u8 mac[ETH_ALEN];
+
+ /* If valid, 1 => only untagged Rx if no vlan is configured */
+ u8 default_only_untagged;
+ u8 padding;
+
+ /* The following is a 'copy' of ecore_mcp_link_state,
+ * ecore_mcp_link_params and ecore_mcp_link_capabilities. Since it's
+ * possible the structs will increase further along the road we cannot
+ * have it here; Instead we need to have all of its fields.
+ */
+ u8 req_autoneg;
+ u8 req_autoneg_pause;
+ u8 req_forced_rx;
+ u8 req_forced_tx;
+ u8 padding2[4];
+
+ u32 req_adv_speed;
+ u32 req_forced_speed;
+ u32 req_loopback;
+ u32 padding3;
+
+ u8 link_up;
+ u8 full_duplex;
+ u8 autoneg;
+ u8 autoneg_complete;
+ u8 parallel_detection;
+ u8 pfc_enabled;
+ u8 partner_tx_flow_ctrl_en;
+ u8 partner_rx_flow_ctrl_en;
+
+ u8 partner_adv_pause;
+ u8 sfp_tx_fault;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 padding4[2];
+
+ u32 speed;
+ u32 partner_adv_speed;
+
+ u32 capability_speed;
+
+ /* Forced vlan */
+ u16 pvid;
+ u16 padding5;
+};
+
+struct ecore_bulletin {
+ dma_addr_t phys;
+ struct ecore_bulletin_content *p_virt;
+ u32 size;
+};
+
+enum {
+/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
+
+ CHANNEL_TLV_NONE, /* ends tlv sequence */
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_VPORT_START,
+ CHANNEL_TLV_VPORT_UPDATE,
+ CHANNEL_TLV_VPORT_TEARDOWN,
+ CHANNEL_TLV_START_RXQ,
+ CHANNEL_TLV_START_TXQ,
+ CHANNEL_TLV_STOP_RXQS,
+ CHANNEL_TLV_STOP_TXQS,
+ CHANNEL_TLV_UPDATE_RXQ,
+ CHANNEL_TLV_INT_CLEANUP,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_UCAST_FILTER,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+ CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+ CHANNEL_TLV_VPORT_UPDATE_RSS,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+ CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ CHANNEL_TLV_COALESCE_UPDATE,
+ CHANNEL_TLV_QID,
+ CHANNEL_TLV_COALESCE_READ,
+ CHANNEL_TLV_BULLETIN_UPDATE_MAC,
+ CHANNEL_TLV_UPDATE_MTU,
+ CHANNEL_TLV_MAX,
+
+ /* Required for iterating over vport-update tlvs.
+ * Will break in case non-sequential vport-update tlvs.
+ */
+ CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
+
+/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
+};
+extern const char *ecore_channel_tlvs_string[];
+
+#endif /* __ECORE_VF_PF_IF_H__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/eth_common.h b/src/spdk/dpdk/drivers/net/qede/base/eth_common.h
new file mode 100644
index 00000000..abfa6854
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/eth_common.h
@@ -0,0 +1,684 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef __ETH_COMMON__
+#define __ETH_COMMON__
+/********************/
+/* ETH FW CONSTANTS */
+/********************/
+
+/* FP HSI version. FP HSI is compatible if (fwVer.major == drvVer.major &&
+ * fwVer.minor >= drvVer.minor)
+ */
+/* ETH FP HSI Major version */
+#define ETH_HSI_VER_MAJOR 3
+/* ETH FP HSI Minor version */
+#define ETH_HSI_VER_MINOR 10
+
+/* Alias for 8.7.x.x/8.8.x.x ETH FP HSI MINOR version. In this version driver
+ * is not required to set pkt_len field in eth_tx_1st_bd struct, and tunneling
+ * offload is not supported.
+ */
+#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
+
+#define ETH_CACHE_LINE_SIZE 64
+#define ETH_RX_CQE_GAP 32
+#define ETH_MAX_RAMROD_PER_CON 8
+#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+
+/* Limitation for Tunneled LSO Packets on the offset (in bytes) of the inner IP
+ * header (relevant to LSO for tunneled packet):
+ */
+/* Offset is limited to 253 bytes (inclusive). */
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
+/* Offset is limited to 251 bytes (inclusive). */
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
+#define ETH_TX_MAX_LSO_HDR_NBD 4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
+/* (QM_REG_TASKBYTECRDCOST_0, QM_VOQ_BYTE_CRD_TASK_COST) -
+ * (VLAN-TAG + CRC + IPG + PREAMBLE)
+ */
+#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES 510
+/* Number of BDs to consider for LSO sliding window restriction is
+ * (ETH_TX_LSO_WINDOW_BDS_NUM - hdr_nbd)
+ */
+#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
+/* Minimum data length (in bytes) in LSO sliding window */
+#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
+/* Maximum LSO packet TCP payload length (in bytes) */
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
+/* Number of same-as-last resources in tx switching */
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
+/* Value for a connection for which same as last feature is disabled */
+#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
+
+/* Maximum number of statistics counters */
+#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+/* Maximum number of statistics counters when doubled VF zone used */
+#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
+ (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
+/* Maximum number of statistics counters when quad VF zone used */
+#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
+ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
+
+/* Maximum number of buffers, used for RX packet placement */
+#define ETH_RX_MAX_BUFF_PER_PKT 5
+/* Minimum number of free BDs in RX ring, that guarantee receiving of at least
+ * one RX packet.
+ */
+#define ETH_RX_BD_THRESHOLD 12
+
+/* num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS 512
+#define ETH_NUM_VLAN_FILTERS 512
+
+/* approx. multicast constants */
+/* CRC seed for multicast bin calculation */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
+#define ETH_MULTICAST_MAC_BINS 256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
+
+/* ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT 10
+/* number of RSS indirection table entries, per Vport) */
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+/* Length of RSS key (in regs) */
+#define ETH_RSS_KEY_SIZE_REGS 10
+/* number of available RSS engines in K2 */
+#define ETH_RSS_ENGINE_NUM_K2 207
+/* number of available RSS engines in BB */
+#define ETH_RSS_ENGINE_NUM_BB 127
+
+/* TPA constants */
+/* Maximum number of open TPA aggregations */
+#define ETH_TPA_MAX_AGGS_NUM 64
+/* Maximum number of additional buffers, reported by TPA-start CQE */
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
+/* Maximum number of buffers, reported by TPA-continue CQE */
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
+/* Maximum number of buffers, reported by TPA-end CQE */
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
+
+/* Control frame check constants */
+/* Number of etherType values configured by driver for control frame check */
+#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
+
+/* GFS constants */
+#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
+
+
+
+/*
+ * Destination port mode
+ */
+enum dest_port_mode {
+ DEST_PORT_PHY /* Send to physical port. */,
+ DEST_PORT_LOOPBACK /* Send to loopback port. */,
+ DEST_PORT_PHY_LOOPBACK /* Send to physical and loopback port. */,
+ DEST_PORT_DROP /* Drop the packet in PBF. */,
+ MAX_DEST_PORT_MODE
+};
+
+
+/*
+ * Ethernet address type
+ */
+enum eth_addr_type {
+ BROADCAST_ADDRESS,
+ MULTICAST_ADDRESS,
+ UNICAST_ADDRESS,
+ UNKNOWN_ADDRESS,
+ MAX_ETH_ADDR_TYPE
+};
+
+
+struct eth_tx_1st_bd_flags {
+ u8 bitfields;
+/* Set to 1 in the first BD. (for debug) */
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
+/* Do not allow additional VLAN manipulations on this packet. */
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
+/* Recalculate IP checksum. For tunneled packet - relevant to inner header. */
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
+/* Recalculate TCP/UDP checksum.
+ * For tunneled packet - relevant to inner header.
+ */
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
+/* If set, insert VLAN tag from vlan field to the packet.
+ * For tunneled packet - relevant to outer header.
+ */
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
+/* If set, this is an LSO packet. Note: For Tunneled LSO packets, the offset of
+ * the inner IPV4 (and IPV6) header is limited to 253 (and 251 respectively)
+ * bytes, inclusive.
+ */
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
+/* Recalculate Tunnel IP Checksum (if Tunnel IP Header is IPv4) */
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
+/* Recalculate Tunnel UDP/GRE Checksum (Depending on Tunnel Type) */
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
+};
+
+/*
+ * The parsing information data for the first tx bd of a given packet.
+ */
+struct eth_tx_data_1st_bd {
+/* VLAN tag to insert to packet (if enabled by vlan_insertion flag). */
+ __le16 vlan;
+/* Number of BDs in packet. Should be at least 1 in non-LSO packet and at least
+ * 3 in LSO (or Tunnel with IPv6+ext) packet.
+ */
+ u8 nbds;
+ struct eth_tx_1st_bd_flags bd_flags;
+ __le16 bitfields;
+/* Indicates a tunneled packet. Must be set for encapsulated packet. */
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
+/* Total packet length - must be filled for non-LSO packets. */
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
+};
+
+/*
+ * The parsing information data for the second tx bd of a given packet.
+ */
+struct eth_tx_data_2nd_bd {
+/* For tunnel with IPv6+ext - Tunnel header IP datagram length (in BYTEs) */
+ __le16 tunn_ip_size;
+ __le16 bitfields1;
+/* For Tunnel header with IPv6 ext. - Inner L2 Header Size (in 2-byte WORDs) */
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+/* For Tunnel header with IPv6 ext. - Inner L2 Header MAC DA Type
+ * (use enum eth_addr_type)
+ */
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
+/* Destination port mode. (use enum dest_port_mode) */
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
+/* Should be 0 in all the BDs, except the first one. (for debug) */
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
+/* For Tunnel header with IPv6 ext. - Tunnel Type (use enum eth_tx_tunn_type) */
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
+/* For LSO / Tunnel header with IPv6+ext - Set if inner header is IPv6 */
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
+/* In tunneling mode - Set to 1 when the Inner header is IPv6 with extension.
+ * Otherwise set to 1 if the header is IPv6 with extension.
+ */
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
+/* Set to 1 if Tunnel (outer = encapsulating) header has IPv6 ext. (Note: 3rd BD
+ * is required, hence EDPM does not support Tunnel [outer] header with Ipv6Ext)
+ */
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
+/* Set if (inner) L4 protocol is UDP. (Required when IPv6+ext (or tunnel with
+ * inner or outer Ipv6+ext) and l4_csum is set)
+ */
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
+/* The pseudo header checksum type in the L4 checksum field. Required when
+ * IPv6+ext and l4_csum is set. (use enum eth_l4_pseudo_checksum_mode)
+ */
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
+ __le16 bitfields2;
+/* For inner/outer header IPv6+ext - (inner) L4 header offset (in 2-byte WORDs).
+ * For regular packet - offset from the beginning of the packet. For tunneled
+ * packet - offset from the beginning of the inner header
+ */
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
+};
+
+/*
+ * Firmware data for L2-EDPM packet.
+ */
+struct eth_edpm_fw_data {
+/* Parsing information data from the 1st BD. */
+ struct eth_tx_data_1st_bd data_1st_bd;
+/* Parsing information data from the 2nd BD. */
+ struct eth_tx_data_2nd_bd data_2nd_bd;
+ __le32 reserved;
+};
+
+
+/*
+ * FW debug.
+ */
+struct eth_fast_path_cqe_fw_debug {
+ __le16 reserved2 /* FW reserved. */;
+};
+
+
+/*
+ * tunneling parsing flags
+ */
+struct eth_tunnel_parsing_flags {
+ u8 flags;
+/* 0 - no tunneling, 1 - GENEVE, 2 - GRE, 3 - VXLAN
+ * (use enum eth_rx_tunn_type)
+ */
+#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
+#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
+/* If it s not an encapsulated packet then put 0x0. If it s an encapsulated
+ * packet but the tenant-id doesn t exist then put 0x0. Else put 0x1
+ *
+ */
+#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
+/* Type of the next header above the tunneling: 0 - unknown, 1 - L2, 2 - Ipv4,
+ * 3 - IPv6 (use enum tunnel_next_protocol)
+ */
+#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
+#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
+/* The result of comparing the DA-ip of the tunnel header. */
+#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
+};
+
+/*
+ * PMD flow control bits
+ */
+struct eth_pmd_flow_flags {
+ u8 flags;
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 /* CQE valid bit */
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 /* CQE ring toggle bit */
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
+};
+
+/*
+ * Regular ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_reg_cqe {
+ u8 type /* CQE type */;
+ u8 bitfields;
+/* Type of calculated RSS hash (use enum rss_hash_type) */
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+/* Traffic Class */
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
+ __le16 pkt_len /* Total packet length (from the parser) */;
+/* Parsing and error flags from the parser */
+ struct parsing_and_err_flags pars_flags;
+ __le16 vlan_tag /* 802.1q VLAN tag */;
+ __le32 rss_hash /* RSS hash result */;
+ __le16 len_on_first_bd /* Number of bytes placed on first BD */;
+ u8 placement_offset /* Offset of placement from BD start */;
+/* Tunnel Parsing Flags */
+ struct eth_tunnel_parsing_flags tunnel_pars_flags;
+ u8 bd_num /* Number of BDs, used for packet */;
+ u8 reserved[9];
+ struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
+ u8 reserved1[3];
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
+};
+
+
+/*
+ * TPA-continue ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_tpa_cont_cqe {
+ u8 type /* CQE type */;
+ u8 tpa_agg_index /* TPA aggregation index */;
+/* List of the segment sizes */
+ __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 reserved;
+ u8 reserved1 /* FW reserved. */;
+ __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] /* FW reserved. */;
+ u8 reserved3[3];
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
+};
+
+
+/*
+ * TPA-end ETH Rx FP CQE .
+ */
+struct eth_fast_path_rx_tpa_end_cqe {
+ u8 type /* CQE type */;
+ u8 tpa_agg_index /* TPA aggregation index */;
+ __le16 total_packet_len /* Total aggregated packet length */;
+ u8 num_of_bds /* Total number of BDs comprising the packet */;
+/* Aggregation end reason. Use enum eth_tpa_end_reason */
+ u8 end_reason;
+ __le16 num_of_coalesced_segs /* Number of coalesced TCP segments */;
+ __le32 ts_delta /* TCP timestamp delta */;
+/* List of the segment sizes */
+ __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE] /* FW reserved. */;
+ __le16 reserved1;
+ u8 reserved2 /* FW reserved. */;
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
+};
+
+
+/*
+ * TPA-start ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_tpa_start_cqe {
+ u8 type /* CQE type */;
+ u8 bitfields;
+/* Type of calculated RSS hash (use enum rss_hash_type) */
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
+/* Traffic Class */
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
+ __le16 seg_len /* Segment length (packetLen from the parser) */;
+/* Parsing and error flags from the parser */
+ struct parsing_and_err_flags pars_flags;
+ __le16 vlan_tag /* 802.1q VLAN tag */;
+ __le32 rss_hash /* RSS hash result */;
+ __le16 len_on_first_bd /* Number of bytes placed on first BD */;
+ u8 placement_offset /* Offset of placement from BD start */;
+/* Tunnel Parsing Flags */
+ struct eth_tunnel_parsing_flags tunnel_pars_flags;
+ u8 tpa_agg_index /* TPA aggregation index */;
+ u8 header_len /* Packet L2+L3+L4 header length */;
+/* Additional BDs length list. */
+ __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
+ struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
+ u8 reserved;
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
+};
+
+
+/*
+ * The L4 pseudo checksum mode for Ethernet
+ */
+enum eth_l4_pseudo_checksum_mode {
+/* Pseudo Header checksum on packet is calculated with the correct packet length
+ * field.
+ */
+ ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+/* Pseudo Header checksum on packet is calculated with zero length field. */
+ ETH_L4_PSEUDO_CSUM_ZERO_LENGTH,
+ MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
+};
+
+
+
+struct eth_rx_bd {
+ struct regpair addr /* single continues buffer */;
+};
+
+
+/*
+ * regular ETH Rx SP CQE
+ */
+struct eth_slow_path_rx_cqe {
+ u8 type /* CQE type */;
+ u8 ramrod_cmd_id;
+ u8 error_flag;
+ u8 reserved[25];
+ __le16 echo;
+ u8 reserved1;
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
+};
+
+/*
+ * union for all ETH Rx CQE types
+ */
+union eth_rx_cqe {
+/* Regular FP CQE */
+ struct eth_fast_path_rx_reg_cqe fast_path_regular;
+/* TPA-start CQE */
+ struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
+/* TPA-continue CQE */
+ struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
+/* TPA-end CQE */
+ struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
+ struct eth_slow_path_rx_cqe slow_path /* SP CQE */;
+};
+
+
+/*
+ * ETH Rx CQE type
+ */
+enum eth_rx_cqe_type {
+ ETH_RX_CQE_TYPE_UNUSED,
+ ETH_RX_CQE_TYPE_REGULAR /* Regular FP ETH Rx CQE */,
+ ETH_RX_CQE_TYPE_SLOW_PATH /* Slow path ETH Rx CQE */,
+ ETH_RX_CQE_TYPE_TPA_START /* TPA start ETH Rx CQE */,
+ ETH_RX_CQE_TYPE_TPA_CONT /* TPA Continue ETH Rx CQE */,
+ ETH_RX_CQE_TYPE_TPA_END /* TPA end ETH Rx CQE */,
+ MAX_ETH_RX_CQE_TYPE
+};
+
+
+/*
+ * Wrapper for PD RX CQE - used in order to cover full cache line when writing
+ * CQE
+ */
+struct eth_rx_pmd_cqe {
+ union eth_rx_cqe cqe /* CQE data itself */;
+ u8 reserved[ETH_RX_CQE_GAP];
+};
+
+
+/*
+ * Eth RX Tunnel Type
+ */
+enum eth_rx_tunn_type {
+ ETH_RX_NO_TUNN /* No Tunnel. */,
+ ETH_RX_TUNN_GENEVE /* GENEVE Tunnel. */,
+ ETH_RX_TUNN_GRE /* GRE Tunnel. */,
+ ETH_RX_TUNN_VXLAN /* VXLAN Tunnel. */,
+ MAX_ETH_RX_TUNN_TYPE
+};
+
+
+
+/*
+ * Aggregation end reason.
+ */
+enum eth_tpa_end_reason {
+ ETH_AGG_END_UNUSED,
+ ETH_AGG_END_SP_UPDATE /* SP configuration update */,
+/* Maximum aggregation length or maximum buffer number used. */
+ ETH_AGG_END_MAX_LEN,
+/* TCP PSH flag or TCP payload length below continue threshold. */
+ ETH_AGG_END_LAST_SEG,
+ ETH_AGG_END_TIMEOUT /* Timeout expiration. */,
+/* Packet header not consistency: different IPv4 TOS, TTL or flags, IPv6 TC,
+ * Hop limit or Flow label, TCP header length or TS options. In GRO different
+ * TS value, SMAC, DMAC, ackNum, windowSize or VLAN
+ */
+ ETH_AGG_END_NOT_CONSISTENT,
+/* Out of order or retransmission packet: sequence, ack or timestamp not
+ * consistent with previous segment.
+ */
+ ETH_AGG_END_OUT_OF_ORDER,
+/* Next segment cant be aggregated due to LLC/SNAP, IP error, IP fragment, IPv4
+ * options, IPv6 extension, IP ECN = CE, TCP errors, TCP options, zero TCP
+ * payload length , TCP flags or not supported tunnel header options.
+ */
+ ETH_AGG_END_NON_TPA_SEG,
+ MAX_ETH_TPA_END_REASON
+};
+
+
+
+/*
+ * The first tx bd of a given packet
+ */
+struct eth_tx_1st_bd {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ struct eth_tx_data_1st_bd data /* Parsing information data. */;
+};
+
+
+
+/*
+ * The second tx bd of a given packet
+ */
+struct eth_tx_2nd_bd {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ struct eth_tx_data_2nd_bd data /* Parsing information data. */;
+};
+
+
+/*
+ * The parsing information data for the third tx bd of a given packet.
+ */
+struct eth_tx_data_3rd_bd {
+ __le16 lso_mss /* For LSO packet - the MSS in bytes. */;
+ __le16 bitfields;
+/* For LSO with inner/outer IPv6+ext - TCP header length (in 4-byte WORDs) */
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+/* LSO - number of BDs which contain headers. value should be in range
+ * (1..ETH_TX_MAX_LSO_HDR_NBD).
+ */
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
+/* Should be 0 in all the BDs, except the first one. (for debug) */
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
+/* For tunnel with IPv6+ext - Pointer to tunnel L4 Header (in 2-byte WORDs) */
+ u8 tunn_l4_hdr_start_offset_w;
+/* For tunnel with IPv6+ext - Total size of Tunnel Header (in 2-byte WORDs) */
+ u8 tunn_hdr_size_w;
+};
+
+/*
+ * The third tx bd of a given packet
+ */
+struct eth_tx_3rd_bd {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ struct eth_tx_data_3rd_bd data /* Parsing information data. */;
+};
+
+
+/*
+ * Complementary information for the regular tx bd of a given packet.
+ */
+struct eth_tx_data_bd {
+ __le16 reserved0;
+ __le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+/* Should be 0 in all the BDs, except the first one. (for debug) */
+#define ETH_TX_DATA_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+ __le16 reserved3;
+};
+
+/*
+ * The common regular TX BD ring element
+ */
+struct eth_tx_bd {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ struct eth_tx_data_bd data /* Complementary information. */;
+};
+
+
+union eth_tx_bd_types {
+ struct eth_tx_1st_bd first_bd /* The first tx bd of a given packet */;
+/* The second tx bd of a given packet */
+ struct eth_tx_2nd_bd second_bd;
+ struct eth_tx_3rd_bd third_bd /* The third tx bd of a given packet */;
+ struct eth_tx_bd reg_bd /* The common non-special bd */;
+};
+
+
+
+
+
+
+/*
+ * Eth Tx Tunnel Type
+ */
+enum eth_tx_tunn_type {
+ ETH_TX_TUNN_GENEVE /* GENEVE Tunnel. */,
+ ETH_TX_TUNN_TTAG /* T-Tag Tunnel. */,
+ ETH_TX_TUNN_GRE /* GRE Tunnel. */,
+ ETH_TX_TUNN_VXLAN /* VXLAN Tunnel. */,
+ MAX_ETH_TX_TUNN_TYPE
+};
+
+
+/*
+ * Ystorm Queue Zone
+ */
+struct xstorm_eth_queue_zone {
+/* Tx interrupt coalescing TimeSet */
+ struct coalescing_timeset int_coalescing_timeset;
+ u8 reserved[7];
+};
+
+
+/*
+ * ETH doorbell data
+ */
+struct eth_db_data {
+ u8 params;
+/* destination of doorbell (use enum db_dest) */
+#define ETH_DB_DATA_DEST_MASK 0x3
+#define ETH_DB_DATA_DEST_SHIFT 0
+/* aggregative command to CM (use enum db_agg_cmd_sel) */
+#define ETH_DB_DATA_AGG_CMD_MASK 0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT 2
+#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
+#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
+#define ETH_DB_DATA_RESERVED_MASK 0x1
+#define ETH_DB_DATA_RESERVED_SHIFT 5
+/* aggregative value selection */
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+/* bit for every DQ counter flags in CM context that DQ can increment */
+ u8 agg_flags;
+ __le16 bd_prod;
+};
+
+#endif /* __ETH_COMMON__ */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/mcp_public.h b/src/spdk/dpdk/drivers/net/qede/base/mcp_public.h
new file mode 100644
index 00000000..81aa88e7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/mcp_public.h
@@ -0,0 +1,1924 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+/****************************************************************************
+ *
+ * Name: mcp_public.h
+ *
+ * Description: MCP public data
+ *
+ * Created: 13/01/2013 yanivr
+ *
+ ****************************************************************************/
+
+#ifndef MCP_PUBLIC_H
+#define MCP_PUBLIC_H
+
+#define VF_MAX_STATIC 192 /* In case of AH */
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2 /* Global */
+#define MCP_GLOB_PORT_MAX 4 /* Global */
+#define MCP_GLOB_FUNC_MAX 16 /* Global */
+
+typedef u32 offsize_t; /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_OFFSET 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_OFFSET 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
+
+/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
+#define SECTION_OFFSET(_offsize) \
+ ((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_OFFSET) << 2))
+
+/* SECTION_SIZE is calculating the size in bytes out of offsize */
+#define SECTION_SIZE(_offsize) \
+ (((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_OFFSET) << 2)
+
+/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
+ * within section
+ */
+#define SECTION_ADDR(_offsize, idx) \
+ (MCP_REG_SCRATCH + \
+ SECTION_OFFSET(_offsize) + (SECTION_SIZE(_offsize) * idx))
+
+/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address. Use
+ * offsetof, since the OFFSETUP collide with the firmware definition
+ */
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+ (_pub_base + offsetof(struct mcp_public_data, sections[_section]))
+/* PHY configuration */
+struct eth_phy_cfg {
+/* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
+ u32 speed;
+#define ETH_SPEED_AUTONEG 0
+#define ETH_SPEED_SMARTLINQ 0x8 /* deprecated - use link_modes field instead */
+
+ u32 pause; /* bitmask */
+#define ETH_PAUSE_NONE 0x0
+#define ETH_PAUSE_AUTONEG 0x1
+#define ETH_PAUSE_RX 0x2
+#define ETH_PAUSE_TX 0x4
+
+ u32 adv_speed; /* Default should be the speed_cap_mask */
+ u32 loopback_mode;
+#define ETH_LOOPBACK_NONE (0)
+/* Serdes loopback. In AH, it refers to Near End */
+#define ETH_LOOPBACK_INT_PHY (1)
+#define ETH_LOOPBACK_EXT_PHY (2) /* External PHY Loopback */
+/* External Loopback (Require loopback plug) */
+#define ETH_LOOPBACK_EXT (3)
+#define ETH_LOOPBACK_MAC (4) /* MAC Loopback - not supported */
+#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 (5) /* Port to itself */
+#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 (6) /* Port to Port */
+#define ETH_LOOPBACK_PCS_AH_ONLY (7) /* PCS loopback (TX to RX) */
+/* Loop RX packet from PCS to TX */
+#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY (8)
+/* Remote Serdes Loopback (RX to TX) */
+#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY (9)
+
+ u32 eee_cfg;
+/* EEE is enabled (configuration). Refer to eee_status->active for negotiated
+ * status
+ */
+#define EEE_CFG_EEE_ENABLED (1 << 0)
+#define EEE_CFG_TX_LPI (1 << 1)
+#define EEE_CFG_ADV_SPEED_1G (1 << 2)
+#define EEE_CFG_ADV_SPEED_10G (1 << 3)
+#define EEE_TX_TIMER_USEC_MASK (0xfffffff0)
+#define EEE_TX_TIMER_USEC_OFFSET 4
+#define EEE_TX_TIMER_USEC_BALANCED_TIME (0xa00)
+#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME (0x100)
+#define EEE_TX_TIMER_USEC_LATENCY_TIME (0x6000)
+
+ u32 link_modes; /* Additional link modes */
+#define LINK_MODE_SMARTLINQ_ENABLE 0x1 /* XXX Deprecate */
+};
+
+struct port_mf_cfg {
+ u32 dynamic_cfg; /* device control channel */
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_OFFSET 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+/* DO NOT add new fields in the middle
+ * MUST be synced with struct pmm_stats_map
+ */
+struct eth_stats {
+ u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
+ u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
+ u64 r255; /* 0x02 (Offset 0x10 ) RX 128 to 255 byte frame counter*/
+ u64 r511; /* 0x03 (Offset 0x18 ) RX 256 to 511 byte frame counter*/
+ u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter*/
+/* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */
+ u64 r1518;
+ union {
+ struct { /* bb */
+/* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged frame counter */
+ u64 r1522;
+/* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/
+ u64 r2047;
+/* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/
+ u64 r4095;
+/* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/
+ u64 r9216;
+/* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame counter */
+ u64 r16383;
+ } bb0;
+ struct { /* ah */
+ u64 unused1;
+/* 0x07 (Offset 0x38 ) RX 1519 to max byte frame counter*/
+ u64 r1519_to_max;
+ u64 unused2;
+ u64 unused3;
+ u64 unused4;
+ } ah0;
+ } u0;
+ u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
+ u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
+ u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
+ u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
+ u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/
+ u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
+ u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
+ u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
+ u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
+ u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
+ u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
+ u64 t127; /* 0x41 (Offset 0xb0 ) TX 65 to 127 byte frame counter */
+ u64 t255; /* 0x42 (Offset 0xb8 ) TX 128 to 255 byte frame counter*/
+ u64 t511; /* 0x43 (Offset 0xc0 ) TX 256 to 511 byte frame counter*/
+ u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter*/
+/* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */
+ u64 t1518;
+ union {
+ struct { /* bb */
+/* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */
+ u64 t2047;
+/* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */
+ u64 t4095;
+/* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */
+ u64 t9216;
+/* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame counter */
+ u64 t16383;
+ } bb1;
+ struct { /* ah */
+/* 0x47 (Offset 0xd8 ) TX 1519 to max byte frame counter */
+ u64 t1519_to_max;
+ u64 unused6;
+ u64 unused7;
+ u64 unused8;
+ } ah1;
+ } u1;
+ u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
+ u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
+/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */
+ union {
+ struct { /* bb */
+/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */
+ u64 tlpiec;
+/* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+ u64 tncl;
+ } bb2;
+ struct { /* ah */
+ u64 unused9;
+ u64 unused10;
+ } ah2;
+ } u2;
+ u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
+ u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
+ u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
+ u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
+/* 0x22 (Offset 0x138) RX good frame (good CRC, not oversized, no ERROR) */
+ u64 rxpok;
+ u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
+ u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
+ u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
+ u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
+ u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+/* HSI - Cannot add more stats to this struct. If needed, then need to open new
+ * struct
+ */
+
+};
+
+struct brb_stats {
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
+};
+
+struct port_stats {
+ struct brb_stats brb;
+ struct eth_stats eth;
+};
+
+/*----+------------------------------------------------------------------------
+ * C | Number and | Ports in| Ports in|2 PHY-s |# of ports|# of engines
+ * h | rate of | team #1 | team #2 |are used|per path | (paths)
+ * i | physical | | | | | enabled
+ * p | ports | | | | |
+ *====+============+=========+=========+========+==========+===================
+ * BB | 1x100G | This is special mode, where there are actually 2 HW func
+ * BB | 2x10/20Gbps| 0,1 | NA | No | 1 | 1
+ * BB | 2x40 Gbps | 0,1 | NA | Yes | 1 | 1
+ * BB | 2x50Gbps | 0,1 | NA | No | 1 | 1
+ * BB | 4x10Gbps | 0,2 | 1,3 | No | 1/2 | 1,2 (2 is optional)
+ * BB | 4x10Gbps | 0,1 | 2,3 | No | 1/2 | 1,2 (2 is optional)
+ * BB | 4x10Gbps | 0,3 | 1,2 | No | 1/2 | 1,2 (2 is optional)
+ * BB | 4x10Gbps | 0,1,2,3 | NA | No | 1 | 1
+ * AH | 2x10/20Gbps| 0,1 | NA | NA | 1 | NA
+ * AH | 4x10Gbps | 0,1 | 2,3 | NA | 2 | NA
+ * AH | 4x10Gbps | 0,2 | 1,3 | NA | 2 | NA
+ * AH | 4x10Gbps | 0,3 | 1,2 | NA | 2 | NA
+ * AH | 4x10Gbps | 0,1,2,3 | NA | NA | 1 | NA
+ *====+============+=========+=========+========+==========+===================
+ */
+
+#define CMT_TEAM0 0
+#define CMT_TEAM1 1
+#define CMT_TEAM_MAX 2
+
+struct couple_mode_teaming {
+ u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM (1 << 0)
+
+#define PORT_CMT_PORT_ROLE (1 << 1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE (1 << 1)
+
+#define PORT_CMT_TEAM_MASK (1 << 2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 (1 << 2)
+};
+
+/**************************************
+ * LLDP and DCBX HSI structures
+ **************************************/
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32 /* In dwords. 128 in bytes*/
+#define MAX_TLV_BUFFER 128 /* In dwords. 512 in bytes*/
+typedef enum _lldp_agent_e {
+ LLDP_NEAREST_BRIDGE = 0,
+ LLDP_NEAREST_NON_TPMR_BRIDGE,
+ LLDP_NEAREST_CUSTOMER_BRIDGE,
+ LLDP_MAX_LLDP_AGENTS
+} lldp_agent_e;
+
+struct lldp_config_params_s {
+ u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_OFFSET 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_OFFSET 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_OFFSET 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_OFFSET 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_OFFSET 31
+ /* Holds local Chassis ID TLV header, subtype and 9B of payload.
+ * If firtst byte is 0, then we will use default chassis ID
+ */
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ /* Holds local Port ID TLV header, subtype and 9B of payload.
+ * If firtst byte is 0, then we will use default port ID
+ */
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+ u32 prefix_seq_num;
+ u32 status; /* TBD */
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ /* Holds remote Port ID TLV header, subtype and 9B of payload. */
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+ u32 flags;
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_OFFSET 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_OFFSET 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_OFFSET 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_OFFSET 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_OFFSET 4
+#define DCBX_OOO_TC_MASK 0x00000f00
+#define DCBX_OOO_TC_OFFSET 8
+/* Entries in tc table are orginized that the left most is pri 0, right most is
+ * prio 7
+ */
+
+ u32 pri_tc_tbl[1];
+/* Fixed TCP OOO TC usage is deprecated and used only for driver backward
+ * compatibility
+ */
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
+#define DCBX_CEE_STRICT_PRIORITY 0xf
+/* Entries in tc table are orginized that the left most is pri 0, right most is
+ * prio 7
+ */
+
+ u32 tc_bw_tbl[2];
+/* Entries in tc table are orginized that the left most is pri 0, right most is
+ * prio 7
+ */
+
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
+};
+
+struct dcbx_app_priority_entry {
+ u32 entry;
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_OFFSET 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_OFFSET 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_SF_IEEE_MASK 0x0000f000
+#define DCBX_APP_SF_IEEE_OFFSET 12
+#define DCBX_APP_SF_IEEE_RESERVED 0
+#define DCBX_APP_SF_IEEE_ETHTYPE 1
+#define DCBX_APP_SF_IEEE_TCP_PORT 2
+#define DCBX_APP_SF_IEEE_UDP_PORT 3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
+
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_OFFSET 16
+};
+
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+ u32 flags;
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_OFFSET 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_OFFSET 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_OFFSET 2
+ /* Not in use
+ #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
+ #define DCBX_APP_DEFAULT_PRI_OFFSET 8
+ */
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_OFFSET 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_OFFSET 16
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+ /* PG feature */
+ struct dcbx_ets_feature ets;
+ /* PFC feature */
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_OFFSET 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_OFFSET 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_OFFSET 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_OFFSET 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_OFFSET 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_OFFSET 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_OFFSET 17
+
+ /* APP feature */
+ struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+ u32 config;
+#define DCBX_CONFIG_VERSION_MASK 0x00000007
+#define DCBX_CONFIG_VERSION_OFFSET 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_DYNAMIC \
+ (DCBX_CONFIG_VERSION_IEEE | DCBX_CONFIG_VERSION_CEE)
+#define DCBX_CONFIG_VERSION_STATIC 4
+
+ u32 flags;
+ struct dcbx_features features;
+};
+
+struct dcbx_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ /*
+ #define DCBX_CONFIG_VERSION_MASK 0x00000007
+ #define DCBX_CONFIG_VERSION_OFFSET 0
+ #define DCBX_CONFIG_VERSION_DISABLED 0
+ #define DCBX_CONFIG_VERSION_IEEE 1
+ #define DCBX_CONFIG_VERSION_CEE 2
+ #define DCBX_CONFIG_VERSION_STATIC 4
+ */
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+ u32 flags;
+#define LLDP_SYSTEM_TLV_VALID_MASK 0x1
+#define LLDP_SYSTEM_TLV_VALID_OFFSET 0
+/* This bit defines if system TLVs are instead of mandatory TLVS or in
+ * addition to them. Set 1 for replacing mandatory TLVs
+ */
+#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2
+#define LLDP_SYSTEM_TLV_MANDATORY_OFFSET 1
+#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000
+#define LLDP_SYSTEM_TLV_LENGTH_OFFSET 16
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+/* Since this struct is written by MFW and read by driver need to add
+ * sequence guards (as in case of DCBX MIB)
+ */
+struct lldp_received_tlvs_s {
+ u32 prefix_seq_num;
+ u32 length;
+ u32 tlvs_buffer[MAX_TLV_BUFFER];
+ u32 suffix_seq_num;
+};
+
+struct dcb_dscp_map {
+ u32 flags;
+#define DCB_DSCP_ENABLE_MASK 0x1
+#define DCB_DSCP_ENABLE_OFFSET 0
+#define DCB_DSCP_ENABLE 1
+ u32 dscp_pri_map[8];
+};
+
+/**************************************
+ * Attributes commands
+ **************************************/
+
+enum _attribute_commands_e {
+ ATTRIBUTE_CMD_READ = 0,
+ ATTRIBUTE_CMD_WRITE,
+ ATTRIBUTE_CMD_READ_CLEAR,
+ ATTRIBUTE_CMD_CLEAR,
+ ATTRIBUTE_NUM_OF_COMMANDS
+};
+
+/**************************************/
+/* */
+/* P U B L I C G L O B A L */
+/* */
+/**************************************/
+struct public_global {
+ u32 max_path; /* 32bit is wasty, but this will be used often */
+/* (Global) 32bit is wasty, but this will be used often */
+ u32 max_ports;
+#define MODE_1P 1 /* TBD - NEED TO THINK OF A BETTER NAME */
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+/* Temperature in Celcius (-255C / +255C), measured every second. */
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+ s32 external_temperature;
+ u32 mdump_reason;
+#define MDUMP_REASON_INTERNAL_ERROR (1 << 0)
+#define MDUMP_REASON_EXTERNAL_TRIGGER (1 << 1)
+#define MDUMP_REASON_DUMP_AGED (1 << 2)
+ u32 ext_phy_upgrade_fw;
+#define EXT_PHY_FW_UPGRADE_STATUS_MASK (0x0000ffff)
+#define EXT_PHY_FW_UPGRADE_STATUS_OFFSET (0)
+#define EXT_PHY_FW_UPGRADE_STATUS_IN_PROGRESS (1)
+#define EXT_PHY_FW_UPGRADE_STATUS_FAILED (2)
+#define EXT_PHY_FW_UPGRADE_STATUS_SUCCESS (3)
+#define EXT_PHY_FW_UPGRADE_TYPE_MASK (0xffff0000)
+#define EXT_PHY_FW_UPGRADE_TYPE_OFFSET (16)
+};
+
+/**************************************/
+/* */
+/* P U B L I C P A T H */
+/* */
+/**************************************/
+
+/****************************************************************************
+ * Shared Memory 2 Region *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way: */
+/* 8 bit: PF ack */
+/* 128 bit: VF ack */
+/* 8 bit: ios_dis_ack */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it */
+/* access arrays(it expects always the VF to reside after the PF, and that */
+/* makes the calculation much easier for it. ) */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition */
+/* above */
+/****************************************************************************/
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
+#define ACCUM_ACK_PF_BASE 0
+#define ACCUM_ACK_PF_SHIFT 0
+
+#define ACCUM_ACK_VF_BASE 8
+#define ACCUM_ACK_VF_SHIFT 3
+
+#define ACCUM_ACK_IOV_DIS_BASE 256
+#define ACCUM_ACK_IOV_DIS_SHIFT 8
+
+};
+
+struct public_path {
+ struct fw_flr_mb flr_mb;
+ /*
+ * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+ * which were disabled/flred
+ */
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; /* 0x003c */
+
+/* Reset on mcp reset, and incremented for eveny process kill event. */
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_OFFSET 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_OFFSET 16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
+};
+
+/**************************************/
+/* */
+/* P U B L I C P O R T */
+/* */
+/**************************************/
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct dci_npiv_settings {
+ u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+ u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct dci_fc_npiv_cfg {
+ /* hdr used internally by the MFW */
+ u32 hdr;
+ u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct dci_fc_npiv_tbl {
+ struct dci_fc_npiv_cfg fc_npiv_cfg;
+ struct dci_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+/****************************************************************************
+ * Driver <-> FW Mailbox *
+ ****************************************************************************/
+
+struct public_port {
+ u32 validity_map; /* 0x0 (4*2 = 0x8) */
+
+ /* validity bits */
+#define MCP_VALIDITY_PCI_CFG 0x00100000
+#define MCP_VALIDITY_MB 0x00200000
+#define MCP_VALIDITY_DEV_INFO 0x00400000
+#define MCP_VALIDITY_RESERVED 0x00000007
+
+ /* One licensing bit should be set */
+/* yaniv - tbd ? license */
+#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
+#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
+#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
+#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
+
+ /* Active MFW */
+#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
+#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040
+#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+
+ u32 link_status;
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+#define LINK_STATUS_FEC_MODE_MASK 0x38000000
+#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
+#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
+#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+#define LINK_STATUS_EXT_PHY_LINK_UP 0x40000000
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+/* Points to struct eth_phy_cfg (For READ-ONLY) */
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1 /* Use MEDIA_MODULE_FIBER instead */
+#define MEDIA_XFP_FIBER 0x2 /* Use MEDIA_MODULE_FIBER instead */
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5 /* Use MEDIA_MODULE_FIBER instead */
+#define MEDIA_MODULE_FIBER 0x6
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
+
+ u32 lfa_status;
+#define LFA_LINK_FLAP_REASON_OFFSET 0
+#define LFA_LINK_FLAP_REASON_MASK 0x000000ff
+#define LFA_NO_REASON (0 << 0)
+#define LFA_LINK_DOWN (1 << 0)
+#define LFA_FORCE_INIT (1 << 1)
+#define LFA_LOOPBACK_MISMATCH (1 << 2)
+#define LFA_SPEED_MISMATCH (1 << 3)
+#define LFA_FLOW_CTRL_MISMATCH (1 << 4)
+#define LFA_ADV_SPEED_MISMATCH (1 << 5)
+#define LFA_EEE_MISMATCH (1 << 6)
+#define LFA_LINK_MODES_MISMATCH (1 << 7)
+#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
+#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
+#define LINK_FLAP_COUNT_OFFSET 16
+#define LINK_FLAP_COUNT_MASK 0x00ff0000
+
+ u32 link_change_count;
+
+ /* LLDP params */
+/* offset: 536 bytes? */
+ struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+ /* DCBX related MIB */
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
+
+/* FC_NPIV table offset & size in NVRAM value of 0 means not present */
+
+ u32 fc_npiv_nvram_tbl_addr;
+ u32 fc_npiv_nvram_tbl_size;
+ u32 transceiver_data;
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
+#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
+#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
+#define ETH_TRANSCEIVER_TYPE_OFFSET 0x00000008
+#define ETH_TRANSCEIVER_TYPE_NONE 0x00000000
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
+/* 1G Passive copper cable */
+#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
+/* 1G Active copper cable */
+#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
+/* 10G Passive copper cable */
+#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
+/* 10G Active copper cable */
+#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
+/* Active optical cable */
+#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
+#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
+/* Active copper cable */
+#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
+#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
+/* 25G Passive copper cable - short */
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
+/* 25G Active copper cable - short */
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+/* 25G Passive copper cable - medium */
+#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
+/* 25G Active copper cable - medium */
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
+/* 25G Passive copper cable - long */
+#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
+/* 25G Active copper cable - long */
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
+#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
+#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
+#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
+
+#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
+#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
+#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
+#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
+ u32 wol_info;
+ u32 wol_pkt_len;
+ u32 wol_pkt_details;
+ struct dcb_dscp_map dcb_dscp_map;
+
+ u32 eee_status;
+/* Set when EEE negotiation is complete. */
+#define EEE_ACTIVE_BIT (1 << 0)
+
+/* Shows the Local Device EEE capabilities */
+#define EEE_LD_ADV_STATUS_MASK 0x000000f0
+#define EEE_LD_ADV_STATUS_OFFSET 4
+ #define EEE_1G_ADV (1 << 1)
+ #define EEE_10G_ADV (1 << 2)
+/* Same values as in EEE_LD_ADV, but for Link Parter */
+#define EEE_LP_ADV_STATUS_MASK 0x00000f00
+#define EEE_LP_ADV_STATUS_OFFSET 8
+
+/* Supported speeds for EEE */
+#define EEE_SUPPORTED_SPEED_MASK 0x0000f000
+#define EEE_SUPPORTED_SPEED_OFFSET 12
+ #define EEE_1G_SUPPORTED (1 << 1)
+ #define EEE_10G_SUPPORTED (1 << 2)
+
+ u32 eee_remote; /* Used for EEE in LLDP */
+#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
+#define EEE_REMOTE_TW_TX_OFFSET 0
+#define EEE_REMOTE_TW_RX_MASK 0xffff0000
+#define EEE_REMOTE_TW_RX_OFFSET 16
+
+ u32 module_info;
+#define ETH_TRANSCEIVER_MONITORING_TYPE_MASK 0x000000FF
+#define ETH_TRANSCEIVER_MONITORING_TYPE_OFFSET 0
+#define ETH_TRANSCEIVER_ADDR_CHNG_REQUIRED (1 << 2)
+#define ETH_TRANSCEIVER_RCV_PWR_MEASURE_TYPE (1 << 3)
+#define ETH_TRANSCEIVER_EXTERNALLY_CALIBRATED (1 << 4)
+#define ETH_TRANSCEIVER_INTERNALLY_CALIBRATED (1 << 5)
+#define ETH_TRANSCEIVER_HAS_DIAGNOSTIC (1 << 6)
+#define ETH_TRANSCEIVER_IDENT_MASK 0x0000ff00
+#define ETH_TRANSCEIVER_IDENT_OFFSET 8
+
+ u32 oem_cfg_port;
+#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
+#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
+#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
+#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
+
+#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
+#define OEM_CFG_SCHED_TYPE_OFFSET 2
+#define OEM_CFG_SCHED_TYPE_ETS 0x1
+#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
+
+ struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS];
+ u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+/**************************************/
+/* */
+/* P U B L I C F U N C */
+/* */
+/**************************************/
+
+struct public_func {
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+ /* MTU size per funciton is needed for the OV feature */
+ u32 mtu_size;
+/* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+
+ /* For PCP values 0-3 use the map lower */
+ /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+ * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+ */
+ u32 c2s_pcp_map_lower;
+ /* For PCP values 4-7 use the map upper */
+ /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+ * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+ */
+ u32 c2s_pcp_map_upper;
+
+ /* For PCP default value get the MSB byte of the map default */
+ u32 c2s_pcp_map_default;
+
+ u32 reserved[4];
+
+ /* replace old mf_cfg */
+ u32 config;
+ /* E/R/I/D */
+ /* function 0 of each port cannot be hidden */
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_OFFSET 0x00000001
+
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_OFFSET 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+
+ /* MINBW, MAXBW */
+ /* value range - 0..100, increments in 1 % */
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_OFFSET 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_OFFSET 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+
+ u32 status;
+#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
+#define FUNC_STATUS_LOGICAL_LINK_UP 0x00000002
+#define FUNC_STATUS_FORCED_LINK 0x00000004
+
+ u32 mac_upper; /* MAC */
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_OFFSET 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 ovlan_stag; /* tags */
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_OFFSET 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+
+ u32 pf_allocation; /* vf per pf */
+
+ u32 preserve_data; /* Will be used bt CCM */
+
+ u32 driver_last_activity_ts;
+
+ /*
+ * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+ * VFs
+ */
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_OFFSET 0
+
+#define LOAD_REQ_HSI_VERSION 2
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_OFFSET 16
+#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
+ DRV_ID_MCP_HSI_VER_OFFSET)
+
+#define DRV_ID_DRV_TYPE_MASK 0x7f000000
+#define DRV_ID_DRV_TYPE_OFFSET 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_OFFSET)
+
+#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
+#define DRV_ID_DRV_INIT_HW_OFFSET 31
+#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_OFFSET)
+
+ u32 oem_cfg_func;
+#define OEM_CFG_FUNC_TC_MASK 0x0000000F
+#define OEM_CFG_FUNC_TC_OFFSET 0
+#define OEM_CFG_FUNC_TC_0 0x0
+#define OEM_CFG_FUNC_TC_1 0x1
+#define OEM_CFG_FUNC_TC_2 0x2
+#define OEM_CFG_FUNC_TC_3 0x3
+#define OEM_CFG_FUNC_TC_4 0x4
+#define OEM_CFG_FUNC_TC_5 0x5
+#define OEM_CFG_FUNC_TC_6 0x6
+#define OEM_CFG_FUNC_TC_7 0x7
+
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
+};
+
+/**************************************/
+/* */
+/* P U B L I C M B */
+/* */
+/**************************************/
+/* This is the only section that the driver can write to, and each */
+/* Basically each driver request to set feature parameters,
+ * will be done using a different command, which will be linked
+ * to a specific data structure from the union below.
+ * For huge strucuture, the common blank structure should be used.
+ */
+
+struct mcp_mac {
+ u32 mac_upper; /* Upper 16 bits are always zeroes */
+ u32 mac_lower;
+};
+
+struct mcp_val64 {
+ u32 lo;
+ u32 hi;
+};
+
+struct mcp_file_att {
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct bist_nvm_image_att {
+ u32 return_code;
+ u32 image_type; /* Image type */
+ u32 nvm_start_addr; /* NVM address of the image */
+ u32 len; /* Include CRC */
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+/* statistics for ncsi */
+struct lan_stats_stc {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+ u32 rserved;
+};
+
+struct fcoe_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct iscsi_stats_stc {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct rdma_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct ocbb_data_stc {
+ u32 ocbb_host_addr;
+ u32 ocsd_host_addr;
+ u32 ocsd_req_update_interval;
+};
+
+#define MAX_NUM_OF_SENSORS 7
+#define MFW_SENSOR_LOCATION_INTERNAL 1
+#define MFW_SENSOR_LOCATION_EXTERNAL 2
+#define MFW_SENSOR_LOCATION_SFP 3
+
+#define SENSOR_LOCATION_OFFSET 0
+#define SENSOR_LOCATION_MASK 0x000000ff
+#define THRESHOLD_HIGH_OFFSET 8
+#define THRESHOLD_HIGH_MASK 0x0000ff00
+#define CRITICAL_TEMPERATURE_OFFSET 16
+#define CRITICAL_TEMPERATURE_MASK 0x00ff0000
+#define CURRENT_TEMP_OFFSET 24
+#define CURRENT_TEMP_MASK 0xff000000
+struct temperature_status_stc {
+ u32 num_of_sensors;
+ u32 sensor[MAX_NUM_OF_SENSORS];
+};
+
+/* crash dump configuration header */
+struct mdump_config_stc {
+ u32 version;
+ u32 config;
+ u32 epoc;
+ u32 num_of_logs;
+ u32 valid_logs;
+};
+
+enum resource_id_enum {
+ RESOURCE_NUM_SB_E = 0,
+ RESOURCE_NUM_L2_QUEUE_E = 1,
+ RESOURCE_NUM_VPORT_E = 2,
+ RESOURCE_NUM_VMQ_E = 3,
+/* Not a real resource!! it's a factor used to calculate others */
+ RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
+/* Not a real resource!! it's a factor used to calculate others */
+ RESOURCE_FACTOR_RSS_PER_VF_E = 5,
+ RESOURCE_NUM_RL_E = 6,
+ RESOURCE_NUM_PQ_E = 7,
+ RESOURCE_NUM_VF_E = 8,
+ RESOURCE_VFC_FILTER_E = 9,
+ RESOURCE_ILT_E = 10,
+ RESOURCE_CQS_E = 11,
+ RESOURCE_GFT_PROFILES_E = 12,
+ RESOURCE_NUM_TC_E = 13,
+ RESOURCE_NUM_RSS_ENGINES_E = 14,
+ RESOURCE_LL2_QUEUE_E = 15,
+ RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_BDQ_E = 17,
+ RESOURCE_MAX_NUM,
+ RESOURCE_NUM_INVALID = 0xFFFFFFFF
+};
+
+/* Resource ID is to be filled by the driver in the MB request
+ * Size, offset & flags to be filled by the MFW in the MB response
+ */
+struct resource_info {
+ enum resource_id_enum res_id;
+ u32 size; /* number of allocated resources */
+ u32 offset; /* Offset of the 1st resource */
+ u32 vf_size;
+ u32 vf_offset;
+ u32 flags;
+#define RESOURCE_ELEMENT_STRICT (1 << 0)
+};
+
+#define DRV_ROLE_NONE 0
+#define DRV_ROLE_PREBOOT 1
+#define DRV_ROLE_OS 2
+#define DRV_ROLE_KDUMP 3
+
+struct load_req_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_REQ_ROLE_MASK 0x000000FF
+#define LOAD_REQ_ROLE_OFFSET 0
+#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
+#define LOAD_REQ_LOCK_TO_OFFSET 8
+#define LOAD_REQ_LOCK_TO_DEFAULT 0
+#define LOAD_REQ_LOCK_TO_NONE 255
+#define LOAD_REQ_FORCE_MASK 0x000F0000
+#define LOAD_REQ_FORCE_OFFSET 16
+#define LOAD_REQ_FORCE_NONE 0
+#define LOAD_REQ_FORCE_PF 1
+#define LOAD_REQ_FORCE_ALL 2
+#define LOAD_REQ_FLAGS0_MASK 0x00F00000
+#define LOAD_REQ_FLAGS0_OFFSET 20
+#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
+};
+
+struct load_rsp_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_RSP_ROLE_MASK 0x000000FF
+#define LOAD_RSP_ROLE_OFFSET 0
+#define LOAD_RSP_HSI_MASK 0x0000FF00
+#define LOAD_RSP_HSI_OFFSET 8
+#define LOAD_RSP_FLAGS0_MASK 0x000F0000
+#define LOAD_RSP_FLAGS0_OFFSET 16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
+};
+
+struct mdump_retain_data_stc {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
+struct attribute_cmd_write_stc {
+ u32 val;
+ u32 mask;
+ u32 offset;
+};
+
+union drv_union_data {
+ struct mcp_mac wol_mac; /* UNLOAD_DONE */
+
+/* This configuration should be set by the driver for the LINK_SET command. */
+
+ struct eth_phy_cfg drv_phy_cfg;
+
+ struct mcp_val64 val64; /* For PHY / AVS commands */
+
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+ struct mcp_file_att file_att;
+
+ u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+
+ struct drv_version_stc drv_version;
+
+ struct lan_stats_stc lan_stats;
+ struct fcoe_stats_stc fcoe_stats;
+ struct iscsi_stats_stc iscsi_stats;
+ struct rdma_stats_stc rdma_stats;
+ struct ocbb_data_stc ocbb_info;
+ struct temperature_status_stc temp_info;
+ struct resource_info resource;
+ struct bist_nvm_image_att nvm_image_att;
+ struct mdump_config_stc mdump_config;
+ u32 dword;
+
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ struct mdump_retain_data_stc mdump_retain;
+ struct attribute_cmd_write_stc attribute_cmd_write;
+ /* ... */
+};
+
+struct public_drv_mb {
+ u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ 0x10000000
+#define DRV_MSG_CODE_LOAD_DONE 0x11000000
+#define DRV_MSG_CODE_INIT_HW 0x12000000
+#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
+#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+#define DRV_MSG_CODE_INIT_PHY 0x22000000
+ /* Params - FORCE - Reinitialize the link regardless of LFA */
+ /* - DONT_CARE - Don't flap the link if up */
+#define DRV_MSG_CODE_LINK_RESET 0x23000000
+
+#define DRV_MSG_CODE_SET_LLDP 0x24000000
+#define DRV_MSG_CODE_REGISTER_LLDP_TLVS_RX 0x24100000
+#define DRV_MSG_CODE_SET_DCBX 0x25000000
+ /* OneView feature driver HSI*/
+#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
+#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
+#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
+#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
+#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
+#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
+/* DRV_MB Param: driver version supp, FW_MB param: MFW version supp,
+ * data: struct resource_info
+ */
+#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
+#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
+
+/*deprecated don't use*/
+#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
+#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
+#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
+/* Param is either DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW/IMAGE */
+#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
+/* Param should be set to the transaction size (up to 64 bytes) */
+#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
+/* MFW will place the file offset and len in file_att struct */
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
+/* Read 32bytes of nvram data. Param is [0:23] ??? Offset [24:31] -
+ * ??? Len in Bytes
+ */
+#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
+/* Writes up to 32Bytes to nvram. Param is [0:23] ??? Offset [24:31]
+ * ??? Len in Bytes. In case this address is in the range of secured file in
+ * secured mode, the operation will fail
+ */
+#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
+/* Delete a file from nvram. Param is image_type. */
+#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000
+/* Reset MCP when no NVM operation is going on, and no drivers are loaded.
+ * In case operation succeed, MCP will not ack back.
+ */
+#define DRV_MSG_CODE_MCP_RESET 0x00090000
+/* Temporary command to set secure mode, where the param is 0 (None secure) /
+ * 1 (Secure) / 2 (Full-Secure)
+ */
+#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000
+/* Param: [0:15] - Address, [16:18] - lane# (0/1/2/3 - for single lane,
+ * 4/5 - for dual lanes, 6 - for all lanes, [28] - PMD reg, [29] - select port,
+ * [30:31] - port
+ */
+#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000
+/* Param: [0:15] - Address, [16:18] - lane# (0/1/2/3 - for single lane,
+ * 4/5 - for dual lanes, 6 - for all lanes, [28] - PMD reg, [29] - select port,
+ * [30:31] - port
+ */
+#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000
+/* Param: [0:15] - Address, [30:31] - port */
+#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000
+/* Param: [0:15] - Address, [30:31] - port */
+#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
+/* Param: [0:3] - version, [4:15] - name (null terminated) */
+#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+/* Halts the MCP. To resume MCP, user will need to use
+ * MCP_REG_CPU_STATE/MCP_REG_CPU_MODE registers.
+ */
+#define DRV_MSG_CODE_MCP_HALT 0x00100000
+/* Set virtual mac address, params [31:6] - reserved, [5:4] - type,
+ * [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
+ */
+#define DRV_MSG_CODE_SET_VMAC 0x00110000
+/* Set virtual mac address, params [31:6] - reserved, [5:4] - type,
+ * [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
+ */
+#define DRV_MSG_CODE_GET_VMAC 0x00120000
+#define DRV_MSG_CODE_VMAC_TYPE_OFFSET 4
+#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
+#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
+#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
+#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
+/* Get statistics from pf, params [31:4] - reserved, [3:0] - stats type */
+#define DRV_MSG_CODE_GET_STATS 0x00130000
+#define DRV_MSG_CODE_STATS_TYPE_LAN 1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+/* Host shall provide buffer and size for MFW */
+#define DRV_MSG_CODE_PMD_DIAG_DUMP 0x00140000
+/* Host shall provide buffer and size for MFW */
+#define DRV_MSG_CODE_PMD_DIAG_EYE 0x00150000
+/* Param: [0:1] - Port, [2:7] - read size, [8:15] - I2C address,
+ * [16:31] - offset
+ */
+#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
+/* Param: [0:1] - Port, [2:7] - write size, [8:15] - I2C address,
+ * [16:31] - offset
+ */
+#define DRV_MSG_CODE_TRANSCEIVER_WRITE 0x00170000
+/* indicate OCBB related information */
+#define DRV_MSG_CODE_OCBB_DATA 0x00180000
+/* Set function BW, params[15:8] - min, params[7:0] - max */
+#define DRV_MSG_CODE_SET_BW 0x00190000
+#define BW_MAX_MASK 0x000000ff
+#define BW_MAX_OFFSET 0
+#define BW_MIN_MASK 0x0000ff00
+#define BW_MIN_OFFSET 8
+
+/* When param is set to 1, all parities will be masked(disabled). When params
+ * are set to 0, parities will be unmasked again.
+ */
+#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
+/* param[0] - Simulate fan failure, param[1] - simulate over temp. */
+#define DRV_MSG_CODE_INDUCE_FAILURE 0x001b0000
+#define DRV_MSG_FAN_FAILURE_TYPE (1 << 0)
+#define DRV_MSG_TEMPERATURE_FAILURE_TYPE (1 << 1)
+/* Param: [0:15] - gpio number */
+#define DRV_MSG_CODE_GPIO_READ 0x001c0000
+/* Param: [0:15] - gpio number, [16:31] - gpio value */
+#define DRV_MSG_CODE_GPIO_WRITE 0x001d0000
+/* Param: [0:7] - test enum, [8:15] - image index, [16:31] - reserved */
+#define DRV_MSG_CODE_BIST_TEST 0x001e0000
+#define DRV_MSG_CODE_GET_TEMPERATURE 0x001f0000
+
+/* Set LED mode params :0 operational, 1 LED turn ON, 2 LED turn OFF */
+#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
+/* drv_data[7:0] - EPOC in seconds, drv_data[15:8] -
+ * driver version (MAJ MIN BUILD SUB)
+ */
+#define DRV_MSG_CODE_TIMESTAMP 0x00210000
+/* This is an empty mailbox just return OK*/
+#define DRV_MSG_CODE_EMPTY_MB 0x00220000
+
+/* Param[0:4] - resource number (0-31), Param[5:7] - opcode,
+ * param[15:8] - age
+ */
+#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+
+#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
+#define RESOURCE_CMD_REQ_RESC_OFFSET 0
+#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_OFFSET 5
+/* request resource ownership with default aging */
+#define RESOURCE_OPCODE_REQ 1
+/* request resource ownership without aging */
+#define RESOURCE_OPCODE_REQ_WO_AGING 2
+/* request resource ownership with specific aging timer (in seconds) */
+#define RESOURCE_OPCODE_REQ_W_AGING 3
+#define RESOURCE_OPCODE_RELEASE 4 /* release resource */
+/* force resource release */
+#define RESOURCE_OPCODE_FORCE_RELEASE 5
+#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_OFFSET 8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_OFFSET 0
+#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_OFFSET 8
+/* resource is free and granted to requester */
+#define RESOURCE_OPCODE_GNT 1
+/* resource is busy, param[7:0] indicates owner as follow 0-15 = PF0-15,
+ * 16 = MFW, 17 = diag over serial
+ */
+#define RESOURCE_OPCODE_BUSY 2
+/* indicate release request was acknowledged */
+#define RESOURCE_OPCODE_RELEASED 3
+/* indicate release request was previously received by other owner */
+#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
+/* indicate wrong owner during release */
+#define RESOURCE_OPCODE_WRONG_OWNER 5
+#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+
+/* dedicate resource 0 for dump */
+#define RESOURCE_DUMP 0
+
+#define DRV_MSG_CODE_GET_MBA_VERSION 0x00240000 /* Get MBA version */
+/* Send crash dump commands with param[3:0] - opcode */
+#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
+#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
+/* acknowledge reception of error indication */
+#define DRV_MSG_CODE_MDUMP_ACK 0x01
+/* set epoc and personality as follow: drv_data[3:0] - epoch,
+ * drv_data[7:4] - personality
+ */
+#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
+/* trigger crash dump procedure */
+#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
+/* Request valid logs and config words */
+#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
+/* Set triggers mask. drv_mb_param should indicate (bitwise) which
+ * trigger enabled
+ */
+#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
+/* Clear all logs */
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
+#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07 /* Get retained data */
+#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08 /* Clear retain data */
+#define DRV_MSG_CODE_MEM_ECC_EVENTS 0x00260000 /* Param: None */
+/* Param: [0:15] - gpio number */
+#define DRV_MSG_CODE_GPIO_INFO 0x00270000
+/* Value will be placed in union */
+#define DRV_MSG_CODE_EXT_PHY_READ 0x00280000
+/* Value should be placed in union */
+#define DRV_MSG_CODE_EXT_PHY_WRITE 0x00290000
+#define DRV_MB_PARAM_ADDR_OFFSET 0
+#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_DEVAD_OFFSET 16
+#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000
+#define DRV_MB_PARAM_PORT_OFFSET 21
+#define DRV_MB_PARAM_PORT_MASK 0x00600000
+#define DRV_MSG_CODE_EXT_PHY_FW_UPGRADE 0x002a0000
+
+#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000 /* Param: None */
+/* Param: Set DRV_MB_PARAM_FEATURE_SUPPORT_* */
+#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000
+/* return FW_MB_PARAM_FEATURE_SUPPORT_* */
+#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000
+#define DRV_MSG_CODE_READ_WOL_REG 0X00320000
+#define DRV_MSG_CODE_WRITE_WOL_REG 0X00330000
+#define DRV_MSG_CODE_GET_WOL_BUFFER 0X00340000
+/* Param: [0:23] Attribute key, [24:31] Attribute sub command */
+#define DRV_MSG_CODE_ATTRIBUTE 0x00350000
+
+/* Param: Password len. Union: Plain Password */
+#define DRV_MSG_CODE_ENCRYPT_PASSWORD 0x00360000
+
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 drv_mb_param;
+ /* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
+
+ /* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
+
+ /* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
+
+ /* LLDP / DCBX params*/
+ /* To be used with SET_LLDP command */
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_OFFSET 0
+ /* To be used with SET_LLDP and REGISTER_LLDP_TLVS_RX commands */
+#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_OFFSET 1
+ /* To be used with REGISTER_LLDP_TLVS_RX command */
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_OFFSET 0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_OFFSET 4
+ /* To be used with SET_DCBX command */
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_OFFSET 3
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_OFFSET 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
+
+#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
+
+#define DRV_MB_PARAM_PHY_ADDR_OFFSET 0
+#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
+#define DRV_MB_PARAM_PHY_LANE_OFFSET 16
+#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
+#define DRV_MB_PARAM_PHY_SELECT_PORT_OFFSET 29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
+#define DRV_MB_PARAM_PHY_PORT_OFFSET 30
+#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
+
+#define DRV_MB_PARAM_PHYMOD_LANE_OFFSET 0
+#define DRV_MB_PARAM_PHYMOD_LANE_MASK 0x000000FF
+#define DRV_MB_PARAM_PHYMOD_SIZE_OFFSET 8
+#define DRV_MB_PARAM_PHYMOD_SIZE_MASK 0x000FFF00
+ /* configure vf MSIX params BB */
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+ /* configure vf MSIX for PF params AH*/
+#define DRV_MB_PARAM_CFG_PF_VFS_MSIX_SB_NUM_OFFSET 0
+#define DRV_MB_PARAM_CFG_PF_VFS_MSIX_SB_NUM_MASK 0x000000FF
+
+ /* OneView configuration parametres */
+#define DRV_MB_PARAM_OV_CURR_CFG_OFFSET 0
+#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
+#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
+#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
+#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
+#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
+#define DRV_MB_PARAM_OV_CURR_CFG_VC_CLP 4
+#define DRV_MB_PARAM_OV_CURR_CFG_CNU 5
+#define DRV_MB_PARAM_OV_CURR_CFG_DCI 6
+#define DRV_MB_PARAM_OV_CURR_CFG_HII 7
+
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OFFSET 0
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_MASK 0x000000FF
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_NONE (1 << 0)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_IP_ACQUIRED (1 << 1)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS (1 << 1)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_TRARGET_FOUND (1 << 2)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_CHAP_SUCCESS (1 << 3)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_FCOE_LUN_FOUND (1 << 3)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_LOGGED_INTO_TGT (1 << 4)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_IMG_DOWNLOADED (1 << 5)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OS_HANDOFF (1 << 6)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_COMPLETED 0
+
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_OFFSET 0
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_MASK 0x000000FF
+
+#define DRV_MB_PARAM_OV_STORM_FW_VER_OFFSET 0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
+#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
+
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_OFFSET 0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
+/* Not Installed*/
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
+/* installed but disabled by user/admin/OS */
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
+/* installed and active */
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
+
+#define DRV_MB_PARAM_OV_MTU_SIZE_OFFSET 0
+#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000
+
+#define DRV_MB_PARAM_GPIO_NUMBER_OFFSET 0
+#define DRV_MB_PARAM_GPIO_NUMBER_MASK 0x0000FFFF
+#define DRV_MB_PARAM_GPIO_VALUE_OFFSET 16
+#define DRV_MB_PARAM_GPIO_VALUE_MASK 0xFFFF0000
+#define DRV_MB_PARAM_GPIO_DIRECTION_OFFSET 16
+#define DRV_MB_PARAM_GPIO_DIRECTION_MASK 0x00FF0000
+#define DRV_MB_PARAM_GPIO_CTRL_OFFSET 24
+#define DRV_MB_PARAM_GPIO_CTRL_MASK 0xFF000000
+
+ /* Resource Allocation params - Driver version support*/
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET 16
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET 0
+
+#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
+#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET 8
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00
+
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
+/* driver supports SmartLinQ parameter */
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001
+/* driver supports EEE parameter */
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_MASK 0xFFFF0000
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_OFFSET 16
+/* driver supports virtual link parameter */
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+ /* Driver attributes params */
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xFF000000
+
+ u32 fw_mb_header;
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_UNSUPPORTED 0x00000000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
+#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000
+#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000
+#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
+#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
+#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
+#define FW_MSG_CODE_REGISTER_LLDP_TLVS_RX_DONE 0x24100000
+#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
+#define FW_MSG_CODE_UPDATE_CURR_CFG_DONE 0x26000000
+#define FW_MSG_CODE_UPDATE_BUS_NUM_DONE 0x27000000
+#define FW_MSG_CODE_UPDATE_BOOT_PROGRESS_DONE 0x28000000
+#define FW_MSG_CODE_UPDATE_STORM_FW_VER_DONE 0x29000000
+#define FW_MSG_CODE_UPDATE_DRIVER_STATE_DONE 0x31000000
+#define FW_MSG_CODE_DRV_MSG_CODE_BW_UPDATE_DONE 0x32000000
+#define FW_MSG_CODE_DRV_MSG_CODE_MTU_SIZE_DONE 0x33000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_GEN_ERR 0x37000000
+#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
+#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+#define FW_MSG_CODE_FLR_ACK 0x02000000
+#define FW_MSG_CODE_FLR_NACK 0x02100000
+#define FW_MSG_CODE_SET_DRIVER_DONE 0x02200000
+#define FW_MSG_CODE_SET_VMAC_SUCCESS 0x02300000
+#define FW_MSG_CODE_SET_VMAC_FAIL 0x02400000
+
+#define FW_MSG_CODE_NVM_OK 0x00010000
+#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000
+#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000
+#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
+#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000
+#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
+#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000
+#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000
+#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000
+#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000
+#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000
+#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000
+#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000
+#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000
+#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000
+#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000
+#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
+/* MFW reject "mcp reset" command if one of the drivers is up */
+#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
+#define FW_MSG_CODE_NVM_FAILED_CALC_HASH 0x00310000
+#define FW_MSG_CODE_NVM_PUBLIC_KEY_MISSING 0x00320000
+#define FW_MSG_CODE_NVM_INVALID_PUBLIC_KEY 0x00330000
+
+#define FW_MSG_CODE_PHY_OK 0x00110000
+#define FW_MSG_CODE_PHY_ERROR 0x00120000
+#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
+#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
+#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
+#define FW_MSG_CODE_OK 0x00160000
+#define FW_MSG_CODE_ERROR 0x00170000
+#define FW_MSG_CODE_LED_MODE_INVALID 0x00170000
+#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000
+#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_INIT_HW_FAILED_TO_ALLOCATE_PAGE 0x00040000
+#define FW_MSG_CODE_INIT_HW_FAILED_BAD_STATE 0x00170000
+#define FW_MSG_CODE_INIT_HW_FAILED_TO_SET_WINDOW 0x000d0000
+#define FW_MSG_CODE_INIT_HW_FAILED_NO_IMAGE 0x000c0000
+#define FW_MSG_CODE_INIT_HW_FAILED_VERSION_MISMATCH 0x00100000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
+#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000
+#define FW_MSG_CODE_GPIO_OK 0x00160000
+#define FW_MSG_CODE_GPIO_DIRECTION_ERR 0x00170000
+#define FW_MSG_CODE_GPIO_CTRL_ERR 0x00020000
+#define FW_MSG_CODE_GPIO_INVALID 0x000f0000
+#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000
+#define FW_MSG_CODE_BIST_TEST_INVALID 0x000f0000
+#define FW_MSG_CODE_EXTPHY_INVALID_IMAGE_HEADER 0x00700000
+#define FW_MSG_CODE_EXTPHY_INVALID_PHY_TYPE 0x00710000
+#define FW_MSG_CODE_EXTPHY_OPERATION_FAILED 0x00720000
+#define FW_MSG_CODE_EXTPHY_NO_PHY_DETECTED 0x00730000
+#define FW_MSG_CODE_RECOVERY_MODE 0x00740000
+
+ /* mdump related response codes */
+#define FW_MSG_CODE_MDUMP_NO_IMAGE_FOUND 0x00010000
+#define FW_MSG_CODE_MDUMP_ALLOC_FAILED 0x00020000
+#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
+#define FW_MSG_CODE_MDUMP_IN_PROGRESS 0x00040000
+#define FW_MSG_CODE_MDUMP_WRITE_FAILED 0x00050000
+
+
+#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
+#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_BAD_ASIC 0x00880000
+
+#define FW_MSG_CODE_WOL_READ_WRITE_OK 0x00820000
+#define FW_MSG_CODE_WOL_READ_WRITE_INVALID_VAL 0x00830000
+#define FW_MSG_CODE_WOL_READ_WRITE_INVALID_ADDR 0x00840000
+#define FW_MSG_CODE_WOL_READ_BUFFER_OK 0x00850000
+#define FW_MSG_CODE_WOL_READ_BUFFER_INVALID_VAL 0x00860000
+
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+#define FW_MSG_CODE_ATTRIBUTE_INVALID_KEY 0x00020000
+#define FW_MSG_CODE_ATTRIBUTE_INVALID_CMD 0x00030000
+
+ u32 fw_mb_param;
+/* Resource Allocation params - MFW version support */
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET 0
+
+/* get MFW feature support response */
+/* MFW supports SmartLinQ */
+#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001
+/* MFW supports EEE */
+#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
+/* MFW supports DRV_LOAD Timeout */
+#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO 0x00000004
+/* MFW supports virtual link */
+#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
+
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+ /*
+ * The system time is in the format of
+ * (year-2001)*12*32 + month*32 + day.
+ */
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+ /*
+ * Indicate to the firmware not to go into the
+ * OS-absent when it is not getting driver pulse.
+ * This is used for debugging as well for PXE(MBA).
+ */
+
+ u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+ /* Indicates to the driver not to assert due to lack
+ * of MCP response
+ */
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+/* The union data is used by the driver to pass parameters to the scratchpad. */
+
+ union drv_union_data union_data;
+
+};
+
+/* MFW - DRV MB */
+/**********************************************************************
+ * Description
+ * Incremental Aggregative
+ * 8-bit MFW counter per message
+ * 8-bit ack-counter per message
+ * Capabilities
+ * Provides up to 256 aggregative message per type
+ * Provides 4 message types in dword
+ * Message type pointers to byte offset
+ * Backward Compatibility by using sizeof for the counters.
+ * No lock requires for 32bit messages
+ * Limitations:
+ * In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
+ * is required to prevent data corruption.
+ **********************************************************************/
+enum MFW_DRV_MSG_TYPE {
+ MFW_DRV_MSG_LINK_CHANGE,
+ MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+ MFW_DRV_MSG_VF_DISABLED,
+ MFW_DRV_MSG_LLDP_DATA_UPDATED,
+ MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+ MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+ MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_BW_UPDATE,
+ MFW_DRV_MSG_S_TAG_UPDATE,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
+ MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+ MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
+ MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
+ MFW_DRV_MSG_GET_TLV_REQ,
+ MFW_DRV_MSG_OEM_CFG_UPDATE,
+ MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED,
+ MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+#ifdef BIG_ENDIAN /* Like MFW */
+#define DRV_ACK_MSG(msg_p, msg_id) \
+((u8)((u8 *)msg_p)[msg_id]++;)
+#else
+#define DRV_ACK_MSG(msg_p, msg_id) \
+((u8)((u8 *)msg_p)[((msg_id & ~3) | ((~msg_id) & 3))]++;)
+#endif
+
+#define MFW_DRV_UPDATE(shmem_func, msg_id) \
+((u8)((u8 *)(MFW_MB_P(shmem_func)->msg))[msg_id]++;)
+
+struct public_mfw_mb {
+ u32 sup_msgs; /* Assigend with MFW_DRV_MSG_MAX */
+/* Incremented by the MFW */
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+/* Incremented by the driver */
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+/**************************************/
+/* */
+/* P U B L I C D A T A */
+/* */
+/**************************************/
+enum public_sections {
+ PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */
+ PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */
+ PUBLIC_GLOBAL,
+ PUBLIC_PATH,
+ PUBLIC_PORT,
+ PUBLIC_FUNC,
+ PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+ u32 ver;
+ u8 name[32];
+};
+
+/* Runtime data needs about 1/2K. We use 2K to be on the safe side.
+ * Please make sure data does not exceed this size.
+ */
+#define NUM_RUNTIME_DWORDS 16
+struct drv_init_hw_stc {
+ u32 init_hw_bitmask[NUM_RUNTIME_DWORDS];
+ u32 init_hw_data[NUM_RUNTIME_DWORDS * 32];
+};
+
+struct mcp_public_data {
+ /* The sections fields is an array */
+ u32 num_sections;
+ offsize_t sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
+};
+
+#define I2C_TRANSCEIVER_ADDR 0xa0
+#define MAX_I2C_TRANSACTION_SIZE 16
+#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256
+
+#endif /* MCP_PUBLIC_H */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/nvm_cfg.h b/src/spdk/dpdk/drivers/net/qede/base/nvm_cfg.h
new file mode 100644
index 00000000..ab86260e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/nvm_cfg.h
@@ -0,0 +1,1983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+/****************************************************************************
+ *
+ * Name: nvm_cfg.h
+ *
+ * Description: NVM config file - Generated file from nvm cfg excel.
+ * DO NOT MODIFY !!!
+ *
+ * Created: 5/8/2017
+ *
+ ****************************************************************************/
+
+#ifndef NVM_CFG_H
+#define NVM_CFG_H
+
+#define NVM_CFG_version 0x83000
+
+#define NVM_CFG_new_option_seq 23
+
+#define NVM_CFG_removed_option_seq 1
+
+#define NVM_CFG_updated_value_seq 4
+
+struct nvm_cfg_mac_address {
+ u32 mac_addr_hi;
+ #define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
+ #define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+ u32 mac_addr_lo;
+};
+
+/******************************************
+ * nvm_cfg1 structs
+ ******************************************/
+struct nvm_cfg1_glob {
+ u32 generic_cont0; /* 0x0 */
+ #define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
+ #define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
+ #define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
+ #define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
+ #define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
+ #define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
+ #define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+ #define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+ #define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
+ #define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+ #define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+ #define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+ #define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+ #define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+ #define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+ #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
+ #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
+ #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
+ #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
+ #define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
+ #define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
+ #define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
+ #define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
+ #define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
+ #define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
+ #define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
+ #define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
+ #define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
+ #define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
+ #define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
+ #define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_MASK \
+ 0x80000000
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_OFFSET 31
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_DISABLED \
+ 0x0
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_ENABLED 0x1
+ u32 engineering_change[3]; /* 0x4 */
+ u32 manufacturing_id; /* 0x10 */
+ u32 serial_number[4]; /* 0x14 */
+ u32 pcie_cfg; /* 0x24 */
+ #define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
+ #define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
+ #define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
+ #define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
+ #define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
+ #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
+ #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
+ #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
+ #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3
+ #define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_MASK \
+ 0x00000020
+ #define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_OFFSET 5
+ #define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
+ #define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
+ #define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
+ #define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
+ #define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
+ #define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
+ /* Set the duration, in sec, fan failure signal should be sampled */
+ #define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_MASK \
+ 0x80000000
+ #define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_OFFSET 31
+ u32 mgmt_traffic; /* 0x28 */
+ #define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
+ #define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
+ #define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
+ #define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
+ #define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
+ #define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
+ #define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
+ #define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
+ #define NVM_CFG1_GLOB_AUX_MODE_MASK 0x78000000
+ #define NVM_CFG1_GLOB_AUX_MODE_OFFSET 27
+ #define NVM_CFG1_GLOB_AUX_MODE_DEFAULT 0x0
+ #define NVM_CFG1_GLOB_AUX_MODE_SMBUS_ONLY 0x1
+ /* Indicates whether external thermal sonsor is available */
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_MASK 0x80000000
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_OFFSET 31
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_DISABLED 0x0
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ENABLED 0x1
+ u32 core_cfg; /* 0x2C */
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xB
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF
+ #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_MASK 0x00000100
+ #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_OFFSET 8
+ #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+ #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+ #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_MASK 0x00000200
+ #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_OFFSET 9
+ #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+ #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+ #define NVM_CFG1_GLOB_MPS10_CORE_ADDR_MASK 0x0003FC00
+ #define NVM_CFG1_GLOB_MPS10_CORE_ADDR_OFFSET 10
+ #define NVM_CFG1_GLOB_MPS25_CORE_ADDR_MASK 0x03FC0000
+ #define NVM_CFG1_GLOB_MPS25_CORE_ADDR_OFFSET 18
+ #define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
+ #define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
+ #define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
+ #define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_CFG 0x1
+ #define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_OTP 0x2
+ #define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
+ #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
+ #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
+ #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
+ u32 e_lane_cfg1; /* 0x30 */
+ #define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+ #define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+ #define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+ #define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+ #define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+ #define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+ #define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+ #define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+ #define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+ #define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+ #define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+ #define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+ #define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+ u32 e_lane_cfg2; /* 0x34 */
+ #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+ #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+ #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+ #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+ #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+ #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+ #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+ #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+ #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+ #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+ #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+ #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+ #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+ #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+ #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+ #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+ #define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
+ #define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
+ #define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
+ #define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_NCSI_OFFSET 12
+ #define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
+ #define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
+ /* Maximum advertised pcie link width */
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_MASK 0x000F0000
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_OFFSET 16
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_BB_16_LANES 0x0
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_1_LANE 0x1
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_2_LANES 0x2
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_4_LANES 0x3
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_8_LANES 0x4
+ /* ASPM L1 mode */
+ #define NVM_CFG1_GLOB_ASPM_L1_MODE_MASK 0x00300000
+ #define NVM_CFG1_GLOB_ASPM_L1_MODE_OFFSET 20
+ #define NVM_CFG1_GLOB_ASPM_L1_MODE_FORCED 0x0
+ #define NVM_CFG1_GLOB_ASPM_L1_MODE_DYNAMIC_LOW_LATENCY 0x1
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_MASK 0x01C00000
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_OFFSET 22
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_I2C 0x1
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_ONLY 0x2
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_SMBUS 0x3
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_MASK \
+ 0x06000000
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_OFFSET 25
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_DISABLE 0x0
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_INTERNAL 0x1
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_EXTERNAL 0x2
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_BOTH 0x3
+ /* Set the PLDM sensor modes */
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_MASK 0x38000000
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_OFFSET 27
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL 0x0
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL 0x1
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH 0x2
+ /* ROL enable */
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_MASK 0x80000000
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_OFFSET 31
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_DISABLED 0x0
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_ENABLED 0x1
+ u32 f_lane_cfg1; /* 0x38 */
+ #define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+ #define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+ #define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+ #define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+ #define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+ #define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+ #define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+ #define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+ #define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+ #define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+ #define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+ #define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+ #define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+ u32 f_lane_cfg2; /* 0x3C */
+ #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+ #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+ #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+ #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+ #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+ #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+ #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+ #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+ #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+ #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+ #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+ #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+ #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+ #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+ #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+ #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+ /* Control the period between two successive checks */
+ #define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_MASK \
+ 0x0000FF00
+ #define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_OFFSET 8
+ /* Set shutdown temperature */
+ #define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_OFFSET 16
+ /* Set max. count for over operational temperature */
+ #define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_OFFSET 24
+ u32 mps10_preemphasis; /* 0x40 */
+ #define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+ u32 mps10_driver_current; /* 0x44 */
+ #define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+ u32 mps25_preemphasis; /* 0x48 */
+ #define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+ u32 mps25_driver_current; /* 0x4C */
+ #define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+ u32 pci_id; /* 0x50 */
+ #define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
+ #define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
+ /* Set caution temperature */
+ #define NVM_CFG1_GLOB_DEAD_TEMP_TH_TEMPERATURE_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_DEAD_TEMP_TH_TEMPERATURE_OFFSET 16
+ /* Set external thermal sensor I2C address */
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK \
+ 0xFF000000
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_OFFSET 24
+ u32 pci_subsys_id; /* 0x54 */
+ #define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
+ #define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
+ #define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
+ #define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
+ u32 bar; /* 0x58 */
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
+ /* BB VF BAR2 size */
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
+ /* BB BAR2 size (global) */
+ #define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
+ #define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
+ #define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
+ #define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
+ #define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
+ #define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
+ #define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
+ #define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
+ #define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
+ #define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
+ #define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
+ #define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
+ #define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
+ #define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
+ #define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
+ #define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
+ /* Set the duration, in secs, fan failure signal should be sampled */
+ #define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_OFFSET 12
+ /* This field defines the board total budget for bar2 when disabled
+ * the regular bar size is used.
+ */
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_OFFSET 16
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_DISABLED 0x0
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_64K 0x1
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_128K 0x2
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_256K 0x3
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_512K 0x4
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_1M 0x5
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_2M 0x6
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_4M 0x7
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_8M 0x8
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_16M 0x9
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_32M 0xA
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_64M 0xB
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_128M 0xC
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_256M 0xD
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_512M 0xE
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_1G 0xF
+ /* Enable/Disable Crash dump triggers */
+ #define NVM_CFG1_GLOB_CRASH_DUMP_TRIGGER_ENABLE_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_CRASH_DUMP_TRIGGER_ENABLE_OFFSET 24
+ u32 mps10_txfir_main; /* 0x5C */
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+ u32 mps10_txfir_post; /* 0x60 */
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+ u32 mps25_txfir_main; /* 0x64 */
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+ u32 mps25_txfir_post; /* 0x68 */
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+ u32 manufacture_ver; /* 0x6C */
+ #define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
+ #define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
+ #define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
+ #define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
+ #define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
+ #define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
+ #define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
+ #define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
+ #define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
+ #define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
+ /* Select package id method */
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_MASK 0x40000000
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_OFFSET 30
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_NVRAM 0x0
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_IO_PINS 0x1
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_MASK 0x80000000
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_OFFSET 31
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_ENABLED 0x1
+ u32 manufacture_time; /* 0x70 */
+ #define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
+ #define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
+ #define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
+ #define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
+ #define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
+ #define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
+ /* Max MSIX for Ethernet in default mode */
+ #define NVM_CFG1_GLOB_MAX_MSIX_MASK 0x03FC0000
+ #define NVM_CFG1_GLOB_MAX_MSIX_OFFSET 18
+ /* PF Mapping */
+ #define NVM_CFG1_GLOB_PF_MAPPING_MASK 0x0C000000
+ #define NVM_CFG1_GLOB_PF_MAPPING_OFFSET 26
+ #define NVM_CFG1_GLOB_PF_MAPPING_CONTINUOUS 0x0
+ #define NVM_CFG1_GLOB_PF_MAPPING_FIXED 0x1
+ #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_MASK 0x30000000
+ #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_OFFSET 28
+ #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_TI 0x1
+ u32 led_global_settings; /* 0x74 */
+ #define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
+ #define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
+ #define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
+ #define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
+ #define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
+ /* Max. continues operating temperature */
+ #define NVM_CFG1_GLOB_MAX_CONT_OPERATING_TEMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_MAX_CONT_OPERATING_TEMP_OFFSET 16
+ /* GPIO which triggers run-time port swap according to the map
+ * specified in option 205
+ */
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO31 0x20
+ u32 generic_cont1; /* 0x78 */
+ #define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
+ #define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE0_SWAP_MASK 0x00000C00
+ #define NVM_CFG1_GLOB_LANE0_SWAP_OFFSET 10
+ #define NVM_CFG1_GLOB_LANE1_SWAP_MASK 0x00003000
+ #define NVM_CFG1_GLOB_LANE1_SWAP_OFFSET 12
+ #define NVM_CFG1_GLOB_LANE2_SWAP_MASK 0x0000C000
+ #define NVM_CFG1_GLOB_LANE2_SWAP_OFFSET 14
+ #define NVM_CFG1_GLOB_LANE3_SWAP_MASK 0x00030000
+ #define NVM_CFG1_GLOB_LANE3_SWAP_OFFSET 16
+ /* Enable option 195 - Overriding the PCIe Preset value */
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_MASK 0x00040000
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_OFFSET 18
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_DISABLED 0x0
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_ENABLED 0x1
+ /* PCIe Preset value - applies only if option 194 is enabled */
+ #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_MASK 0x00780000
+ #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_OFFSET 19
+ /* Port mapping to be used when the run-time GPIO for port-swap is
+ * defined and set.
+ */
+ #define NVM_CFG1_GLOB_RUNTIME_PORT0_SWAP_MAP_MASK 0x01800000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT0_SWAP_MAP_OFFSET 23
+ #define NVM_CFG1_GLOB_RUNTIME_PORT1_SWAP_MAP_MASK 0x06000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT1_SWAP_MAP_OFFSET 25
+ #define NVM_CFG1_GLOB_RUNTIME_PORT2_SWAP_MAP_MASK 0x18000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT2_SWAP_MAP_OFFSET 27
+ #define NVM_CFG1_GLOB_RUNTIME_PORT3_SWAP_MAP_MASK 0x60000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT3_SWAP_MAP_OFFSET 29
+ u32 mbi_version; /* 0x7C */
+ #define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+ #define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+ #define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+ /* If set to other than NA, 0 - Normal operation, 1 - Thermal event
+ * occurred
+ */
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO31 0x20
+ u32 mbi_date; /* 0x80 */
+ u32 misc_sig; /* 0x84 */
+ /* Define the GPIO mapping to switch i2c mux */
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
+ /* Interrupt signal used for SMBus/I2C management interface
+ * 0 = Interrupt event occurred
+ * 1 = Normal
+ */
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_OFFSET 16
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO31 0x20
+ /* Set aLOM FAN on GPIO */
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO31 0x20
+ u32 device_capabilities; /* 0x88 */
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP 0x10
+ u32 power_dissipated; /* 0x8C */
+ #define NVM_CFG1_GLOB_POWER_DIS_D0_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_POWER_DIS_D0_OFFSET 0
+ #define NVM_CFG1_GLOB_POWER_DIS_D1_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_POWER_DIS_D1_OFFSET 8
+ #define NVM_CFG1_GLOB_POWER_DIS_D2_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_POWER_DIS_D2_OFFSET 16
+ #define NVM_CFG1_GLOB_POWER_DIS_D3_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_POWER_DIS_D3_OFFSET 24
+ u32 power_consumed; /* 0x90 */
+ #define NVM_CFG1_GLOB_POWER_CONS_D0_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_POWER_CONS_D0_OFFSET 0
+ #define NVM_CFG1_GLOB_POWER_CONS_D1_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_POWER_CONS_D1_OFFSET 8
+ #define NVM_CFG1_GLOB_POWER_CONS_D2_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_POWER_CONS_D2_OFFSET 16
+ #define NVM_CFG1_GLOB_POWER_CONS_D3_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_POWER_CONS_D3_OFFSET 24
+ u32 efi_version; /* 0x94 */
+ u32 multi_network_modes_capability; /* 0x98 */
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_4X10G 0x1
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_1X25G 0x2
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X25G 0x4
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_4X25G 0x8
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_1X40G 0x10
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X40G 0x20
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X50G 0x40
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_BB_1X100G \
+ 0x80
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X10G 0x100
+ /* @DPDK */
+ u32 reserved1[12]; /* 0x9C */
+ u32 oem1_number[8]; /* 0xCC */
+ u32 oem2_number[8]; /* 0xEC */
+ u32 mps25_active_txfir_pre; /* 0x10C */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_PRE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_PRE_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_PRE_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_PRE_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_PRE_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_PRE_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_PRE_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_PRE_OFFSET 24
+ u32 mps25_active_txfir_main; /* 0x110 */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_MAIN_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_MAIN_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_MAIN_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_MAIN_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_MAIN_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_MAIN_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_MAIN_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_MAIN_OFFSET 24
+ u32 mps25_active_txfir_post; /* 0x114 */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_POST_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_POST_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_POST_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_POST_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_POST_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_POST_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_POST_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_POST_OFFSET 24
+ u32 features; /* 0x118 */
+ /* Set the Aux Fan on temperature */
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_VALUE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_VALUE_OFFSET 0
+ /* Set NC-SI package ID */
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_OFFSET 8
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO31 0x20
+ /* PMBUS Clock GPIO */
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_OFFSET 16
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO31 0x20
+ /* PMBUS Data GPIO */
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO31 0x20
+ u32 tx_rx_eq_25g_hlpc; /* 0x11C */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_HLPC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_HLPC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_HLPC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_HLPC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_HLPC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_HLPC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_HLPC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_HLPC_OFFSET 24
+ u32 tx_rx_eq_25g_llpc; /* 0x120 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_LLPC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_LLPC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_LLPC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_LLPC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_LLPC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_LLPC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_LLPC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_LLPC_OFFSET 24
+ u32 tx_rx_eq_25g_ac; /* 0x124 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_AC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_AC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_AC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_AC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_AC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_AC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_AC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_AC_OFFSET 24
+ u32 tx_rx_eq_10g_pc; /* 0x128 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_PC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_PC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_PC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_PC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_PC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_PC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_PC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_PC_OFFSET 24
+ u32 tx_rx_eq_10g_ac; /* 0x12C */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_AC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_AC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_AC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_AC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_AC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_AC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_AC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_AC_OFFSET 24
+ u32 tx_rx_eq_1g; /* 0x130 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_1G_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_1G_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_1G_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_1G_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_1G_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_1G_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_1G_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_1G_OFFSET 24
+ u32 tx_rx_eq_25g_bt; /* 0x134 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_BT_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_BT_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_BT_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_BT_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_BT_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_BT_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_BT_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_BT_OFFSET 24
+ u32 tx_rx_eq_10g_bt; /* 0x138 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_BT_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_BT_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_BT_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_BT_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_BT_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_BT_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_BT_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_BT_OFFSET 24
+ u32 generic_cont4; /* 0x13C */
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_OFFSET 0
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO31 0x20
+ u32 preboot_debug_mode_std; /* 0x140 */
+ u32 preboot_debug_mode_ext; /* 0x144 */
+ u32 ext_phy_cfg1; /* 0x148 */
+ /* Ext PHY MDI pair swap value */
+ #define NVM_CFG1_GLOB_EXT_PHY_MDI_PAIR_SWAP_MASK 0x0000FFFF
+ #define NVM_CFG1_GLOB_EXT_PHY_MDI_PAIR_SWAP_OFFSET 0
+ u32 reserved[55]; /* 0x14C */
+};
+
+struct nvm_cfg1_path {
+ u32 reserved[1]; /* 0x0 */
+};
+
+struct nvm_cfg1_port {
+ u32 reserved__m_relocated_to_option_123; /* 0x0 */
+ u32 reserved__m_relocated_to_option_124; /* 0x4 */
+ u32 generic_cont0; /* 0x8 */
+ #define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
+ #define NVM_CFG1_PORT_LED_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
+ #define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
+ #define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
+ #define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
+ #define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
+ #define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
+ #define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
+ #define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
+ #define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
+ #define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
+ #define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
+ #define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
+ #define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
+ #define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
+ #define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
+ #define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
+ #define NVM_CFG1_PORT_LED_MODE_BREAKOUT 0x10
+ #define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8
+ #define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
+ #define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+ #define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+ #define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+ #define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+ #define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+ /* GPIO for HW reset the PHY. In case it is the same for all ports,
+ * need to set same value for all ports
+ */
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_MASK 0xFF000000
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_OFFSET 24
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_NA 0x0
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO0 0x1
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO1 0x2
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO2 0x3
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO3 0x4
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO4 0x5
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO5 0x6
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO6 0x7
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO7 0x8
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO8 0x9
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO9 0xA
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO10 0xB
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO11 0xC
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO12 0xD
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO13 0xE
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO14 0xF
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO15 0x10
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO16 0x11
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO17 0x12
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO18 0x13
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO19 0x14
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO20 0x15
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO21 0x16
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO22 0x17
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO23 0x18
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO24 0x19
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO25 0x1A
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO26 0x1B
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO27 0x1C
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO28 0x1D
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO29 0x1E
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO30 0x1F
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO31 0x20
+ u32 pcie_cfg; /* 0xC */
+ #define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
+ #define NVM_CFG1_PORT_RESERVED15_OFFSET 0
+ u32 features; /* 0x10 */
+ #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
+ #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
+ #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
+ #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
+ #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
+ #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
+ #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
+ #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
+ u32 speed_cap_mask; /* 0x14 */
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_20G 0x4
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 link_settings; /* 0x18 */
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
+ #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK \
+ 0x00004000
+ #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
+ #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED \
+ 0x0
+ #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED \
+ 0x1
+ #define NVM_CFG1_PORT_AN_25G_50G_OUI_MASK 0x00018000
+ #define NVM_CFG1_PORT_AN_25G_50G_OUI_OFFSET 15
+ #define NVM_CFG1_PORT_AN_25G_50G_OUI_CONSORTIUM 0x0
+ #define NVM_CFG1_PORT_AN_25G_50G_OUI_BAM 0x1
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000E0000
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
+ #define NVM_CFG1_PORT_FEC_AN_MODE_MASK 0x00700000
+ #define NVM_CFG1_PORT_FEC_AN_MODE_OFFSET 20
+ #define NVM_CFG1_PORT_FEC_AN_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_FEC_AN_MODE_10G_FIRECODE 0x1
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE 0x2
+ #define NVM_CFG1_PORT_FEC_AN_MODE_10G_AND_25G_FIRECODE 0x3
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_RS 0x4
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE_AND_RS 0x5
+ #define NVM_CFG1_PORT_FEC_AN_MODE_ALL 0x6
+ #define NVM_CFG1_PORT_SMARTLINQ_MODE_MASK 0x00800000
+ #define NVM_CFG1_PORT_SMARTLINQ_MODE_OFFSET 23
+ #define NVM_CFG1_PORT_SMARTLINQ_MODE_DISABLED 0x0
+ #define NVM_CFG1_PORT_SMARTLINQ_MODE_ENABLED 0x1
+ #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_MASK 0x01000000
+ #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_OFFSET 24
+ #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_DISABLED 0x0
+ #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_ENABLED 0x1
+ u32 phy_cfg; /* 0x1C */
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
+ #define NVM_CFG1_PORT_AN_MODE_OFFSET 24
+ #define NVM_CFG1_PORT_AN_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_AN_MODE_CL73 0x1
+ #define NVM_CFG1_PORT_AN_MODE_CL37 0x2
+ #define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
+ #define NVM_CFG1_PORT_AN_MODE_BB_CL37_BAM 0x4
+ #define NVM_CFG1_PORT_AN_MODE_BB_HPAM 0x5
+ #define NVM_CFG1_PORT_AN_MODE_BB_SGMII 0x6
+ u32 mgmt_traffic; /* 0x20 */
+ #define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
+ #define NVM_CFG1_PORT_RESERVED61_OFFSET 0
+ u32 ext_phy; /* 0x24 */
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM8485X 0x1
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM5422X 0x2
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
+ /* EEE power saving mode */
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
+ u32 mba_cfg1; /* 0x28 */
+ #define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
+ #define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
+ #define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0
+ #define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1
+ #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006
+ #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1
+ #define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
+ #define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
+ #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
+ #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
+ #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
+ #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
+ #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
+ #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
+ #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
+ #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
+ #define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
+ #define NVM_CFG1_PORT_RESERVED5_OFFSET 9
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK \
+ 0x00E00000
+ #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
+ #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_MASK \
+ 0x01000000
+ #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_OFFSET 24
+ #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_DISABLED \
+ 0x0
+ #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_ENABLED 0x1
+ u32 mba_cfg2; /* 0x2C */
+ #define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
+ #define NVM_CFG1_PORT_RESERVED65_OFFSET 0
+ #define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
+ #define NVM_CFG1_PORT_RESERVED66_OFFSET 16
+ #define NVM_CFG1_PORT_PREBOOT_LINK_UP_DELAY_MASK 0x01FE0000
+ #define NVM_CFG1_PORT_PREBOOT_LINK_UP_DELAY_OFFSET 17
+ u32 vf_cfg; /* 0x30 */
+ #define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
+ #define NVM_CFG1_PORT_RESERVED8_OFFSET 0
+ #define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
+ #define NVM_CFG1_PORT_RESERVED6_OFFSET 16
+ struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
+ u32 led_port_settings; /* 0x3C */
+ #define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
+ #define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
+ #define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
+ #define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_25G 0x4
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_25G 0x8
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_40G 0x8
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_40G 0x10
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_50G 0x10
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_50G 0x20
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_100G 0x40
+ u32 transceiver_00; /* 0x40 */
+ /* Define for mapping of transceiver signal module absent */
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
+ /* Define the GPIO mux settings to switch i2c mux to this port */
+ #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
+ #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
+ #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
+ #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
+ u32 device_ids; /* 0x44 */
+ #define NVM_CFG1_PORT_ETH_DID_SUFFIX_MASK 0x000000FF
+ #define NVM_CFG1_PORT_ETH_DID_SUFFIX_OFFSET 0
+ #define NVM_CFG1_PORT_FCOE_DID_SUFFIX_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_FCOE_DID_SUFFIX_OFFSET 8
+ #define NVM_CFG1_PORT_ISCSI_DID_SUFFIX_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_ISCSI_DID_SUFFIX_OFFSET 16
+ #define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_OFFSET 24
+ u32 board_cfg; /* 0x48 */
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF
+ #define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
+ #define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
+ /* This field defines the GPIO mapped to tx_disable signal in SFP */
+ #define NVM_CFG1_PORT_TX_DISABLE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_TX_DISABLE_OFFSET 8
+ #define NVM_CFG1_PORT_TX_DISABLE_NA 0x0
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO0 0x1
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO1 0x2
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO2 0x3
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO3 0x4
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO4 0x5
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO5 0x6
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO6 0x7
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO7 0x8
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO8 0x9
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO9 0xA
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO10 0xB
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO11 0xC
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO12 0xD
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO13 0xE
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO14 0xF
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO15 0x10
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO16 0x11
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO17 0x12
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO18 0x13
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO19 0x14
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO20 0x15
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO21 0x16
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO22 0x17
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO23 0x18
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO24 0x19
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO25 0x1A
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO26 0x1B
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO27 0x1C
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO28 0x1D
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO29 0x1E
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO30 0x1F
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO31 0x20
+ u32 mnm_10g_cap; /* 0x4C */
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_OFFSET \
+ 16
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 mnm_10g_ctrl; /* 0x50 */
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_BB_100G 0x7
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_10G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_10G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_10g_misc; /* 0x54 */
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_AUTO 0x7
+ u32 mnm_25g_cap; /* 0x58 */
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_OFFSET \
+ 16
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 mnm_25g_ctrl; /* 0x5C */
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_BB_100G 0x7
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_25G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_25G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_25g_misc; /* 0x60 */
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_AUTO 0x7
+ u32 mnm_40g_cap; /* 0x64 */
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_OFFSET \
+ 16
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 mnm_40g_ctrl; /* 0x68 */
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_BB_100G 0x7
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_40G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_40G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_40g_misc; /* 0x6C */
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_AUTO 0x7
+ u32 mnm_50g_cap; /* 0x70 */
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_BB_100G \
+ 0x40
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_OFFSET \
+ 16
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_BB_100G \
+ 0x40
+ u32 mnm_50g_ctrl; /* 0x74 */
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_BB_100G 0x7
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_50G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_50G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_50g_misc; /* 0x78 */
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_AUTO 0x7
+ u32 mnm_100g_cap; /* 0x7C */
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_20G 0x4
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_50G 0x20
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_20G 0x4
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_50G 0x20
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_BB_100G 0x40
+ u32 mnm_100g_ctrl; /* 0x80 */
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_20G 0x3
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_BB_100G 0x7
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_100G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_100G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_100g_misc; /* 0x84 */
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_AUTO 0x7
+ u32 temperature; /* 0x88 */
+ #define NVM_CFG1_PORT_PHY_MODULE_DEAD_TEMP_TH_MASK 0x000000FF
+ #define NVM_CFG1_PORT_PHY_MODULE_DEAD_TEMP_TH_OFFSET 0
+ #define NVM_CFG1_PORT_PHY_MODULE_ALOM_FAN_ON_TEMP_TH_MASK \
+ 0x0000FF00
+ #define NVM_CFG1_PORT_PHY_MODULE_ALOM_FAN_ON_TEMP_TH_OFFSET 8
+ u32 reserved[115]; /* 0x8C */
+};
+
+struct nvm_cfg1_func {
+ struct nvm_cfg_mac_address mac_address; /* 0x0 */
+ u32 rsrv1; /* 0x8 */
+ #define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
+ #define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
+ #define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
+ #define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
+ u32 rsrv2; /* 0xC */
+ #define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
+ #define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
+ #define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
+ #define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
+ u32 device_id; /* 0x10 */
+ #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
+ #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
+ #define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000
+ #define NVM_CFG1_FUNC_RESERVED77_OFFSET 16
+ u32 cmn_cfg; /* 0x14 */
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7
+ #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
+ #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
+ #define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
+ #define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
+ #define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
+ #define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1
+ #define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2
+ #define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3
+ #define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
+ #define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
+ #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
+ #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
+ #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
+ #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
+ u32 pci_cfg; /* 0x18 */
+ #define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
+ #define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
+ /* AH VF BAR2 size */
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_MASK 0x00003F80
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_OFFSET 7
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_DISABLED 0x0
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_4K 0x1
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_8K 0x2
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_16K 0x3
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_32K 0x4
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_64K 0x5
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_128K 0x6
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_256K 0x7
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_512K 0x8
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_1M 0x9
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_2M 0xA
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_4M 0xB
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_8M 0xC
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_16M 0xD
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_32M 0xE
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_64M 0xF
+ #define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
+ #define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
+ #define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
+ #define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
+ #define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
+ #define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
+ #define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
+ #define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
+ #define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
+ #define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
+ #define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
+ #define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
+ #define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
+ #define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
+ #define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
+ #define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
+ #define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
+ #define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
+ #define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
+ #define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
+ /* Hide function in npar mode */
+ #define NVM_CFG1_FUNC_FUNCTION_HIDE_MASK 0x04000000
+ #define NVM_CFG1_FUNC_FUNCTION_HIDE_OFFSET 26
+ #define NVM_CFG1_FUNC_FUNCTION_HIDE_DISABLED 0x0
+ #define NVM_CFG1_FUNC_FUNCTION_HIDE_ENABLED 0x1
+ /* AH BAR2 size (per function) */
+ #define NVM_CFG1_FUNC_BAR2_SIZE_MASK 0x78000000
+ #define NVM_CFG1_FUNC_BAR2_SIZE_OFFSET 27
+ #define NVM_CFG1_FUNC_BAR2_SIZE_DISABLED 0x0
+ #define NVM_CFG1_FUNC_BAR2_SIZE_1M 0x5
+ #define NVM_CFG1_FUNC_BAR2_SIZE_2M 0x6
+ #define NVM_CFG1_FUNC_BAR2_SIZE_4M 0x7
+ #define NVM_CFG1_FUNC_BAR2_SIZE_8M 0x8
+ #define NVM_CFG1_FUNC_BAR2_SIZE_16M 0x9
+ #define NVM_CFG1_FUNC_BAR2_SIZE_32M 0xA
+ #define NVM_CFG1_FUNC_BAR2_SIZE_64M 0xB
+ #define NVM_CFG1_FUNC_BAR2_SIZE_128M 0xC
+ #define NVM_CFG1_FUNC_BAR2_SIZE_256M 0xD
+ #define NVM_CFG1_FUNC_BAR2_SIZE_512M 0xE
+ #define NVM_CFG1_FUNC_BAR2_SIZE_1G 0xF
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
+ u32 preboot_generic_cfg; /* 0x2C */
+ #define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_MASK 0x0000FFFF
+ #define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET 0
+ #define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK 0x00010000
+ #define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET 16
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_MASK 0x001E0000
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_OFFSET 17
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ETHERNET 0x1
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_FCOE 0x2
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ISCSI 0x4
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_RDMA 0x8
+ u32 reserved[8]; /* 0x30 */
+};
+
+struct nvm_cfg1 {
+ struct nvm_cfg1_glob glob; /* 0x0 */
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x228 */
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
+};
+
+/******************************************
+ * nvm_cfg structs
+ ******************************************/
+enum nvm_cfg_sections {
+ NVM_CFG_SECTION_NVM_CFG1,
+ NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+ u32 num_sections;
+ u32 sections_offset[NVM_CFG_SECTION_MAX];
+ struct nvm_cfg1 cfg1;
+};
+
+#endif /* NVM_CFG_H */
diff --git a/src/spdk/dpdk/drivers/net/qede/base/reg_addr.h b/src/spdk/dpdk/drivers/net/qede/base/reg_addr.h
new file mode 100644
index 00000000..402f6204
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/base/reg_addr.h
@@ -0,0 +1,1216 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+ 0
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \
+ 0xfff << 0)
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+ 12
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \
+ 0xfff << 12)
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+ 24
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
+ 0xffUL << 24) /* @DPDK */
+
+#define XSDM_REG_OPERATION_GEN \
+ 0xf80408UL
+#define NIG_REG_RX_BRB_OUT_EN \
+ 0x500e18UL
+#define NIG_REG_STORM_OUT_EN \
+ 0x500e08UL
+#define PSWRQ2_REG_L2P_VALIDATE_VFID \
+ 0x240c50UL
+#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \
+ 0x2aae04UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
+ 0x2aa16cUL
+#define BAR0_MAP_REG_MSDM_RAM \
+ 0x1d00000UL
+#define BAR0_MAP_REG_USDM_RAM \
+ 0x1d80000UL
+#define BAR0_MAP_REG_PSDM_RAM \
+ 0x1f00000UL
+#define BAR0_MAP_REG_TSDM_RAM \
+ 0x1c80000UL
+#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+ 0x5011f4UL
+#define PRS_REG_SEARCH_TCP \
+ 0x1f0400UL
+#define PRS_REG_SEARCH_UDP \
+ 0x1f0404UL
+#define PRS_REG_SEARCH_OPENFLOW \
+ 0x1f0434UL
+#define TM_REG_PF_ENABLE_CONN \
+ 0x2c043cUL
+#define TM_REG_PF_ENABLE_TASK \
+ 0x2c0444UL
+#define TM_REG_PF_SCAN_ACTIVE_CONN \
+ 0x2c04fcUL
+#define TM_REG_PF_SCAN_ACTIVE_TASK \
+ 0x2c0500UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x18082cUL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x180830UL
+#define QM_REG_USG_CNT_PF_TX \
+ 0x2f2eacUL
+#define QM_REG_USG_CNT_PF_OTHER \
+ 0x2f2eb0UL
+#define DORQ_REG_PF_DB_ENABLE \
+ 0x100508UL
+#define QM_REG_PF_EN \
+ 0x2f2ea4UL
+#define TCFC_REG_STRONG_ENABLE_PF \
+ 0x2d0708UL
+#define CCFC_REG_STRONG_ENABLE_PF \
+ 0x2e0708UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0 \
+ 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
+ 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0 \
+ 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0 \
+ 0x2aa410UL
+#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+ 0x2aa138UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+ 0x2aa174UL
+#define MISC_REG_GEN_PURP_CR0 \
+ 0x008c80UL
+#define MCP_REG_SCRATCH \
+ 0xe20000UL
+#define CNIG_REG_NW_PORT_MODE_BB_B0 \
+ 0x218200UL
+#define MISCS_REG_CHIP_NUM \
+ 0x00976cUL
+#define MISCS_REG_CHIP_REV \
+ 0x009770UL
+#define MISCS_REG_CMT_ENABLED_FOR_PAIR \
+ 0x00971cUL
+#define MISCS_REG_CHIP_TEST_REG \
+ 0x009778UL
+#define MISCS_REG_CHIP_METAL \
+ 0x009774UL
+#define BRB_REG_HEADER_SIZE \
+ 0x340804UL
+#define BTB_REG_HEADER_SIZE \
+ 0xdb0804UL
+#define CAU_REG_LONG_TIMEOUT_THRESHOLD \
+ 0x1c0708UL
+#define CCFC_REG_ACTIVITY_COUNTER \
+ 0x2e8800UL
+#define CDU_REG_CID_ADDR_PARAMS \
+ 0x580900UL
+#define DBG_REG_CLIENT_ENABLE \
+ 0x010004UL
+#define DMAE_REG_INIT \
+ 0x00c000UL
+#define DORQ_REG_IFEN \
+ 0x100040UL
+#define GRC_REG_TIMEOUT_EN \
+ 0x050404UL
+#define IGU_REG_BLOCK_CONFIGURATION \
+ 0x180040UL
+#define MCM_REG_INIT \
+ 0x1200000UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MISC_REG_PORT_MODE \
+ 0x008c00UL
+#define MISC_REG_BLOCK_256B_EN \
+ 0x008c14UL
+#define MISCS_REG_RESET_PL_HV \
+ 0x009060UL
+#define MISCS_REG_CLK_100G_MODE \
+ 0x009070UL
+#define MISCS_REG_RESET_PL_HV_2 \
+ 0x009150UL
+#define MSDM_REG_ENABLE_IN1 \
+ 0xfc0004UL
+#define MSEM_REG_ENABLE_IN \
+ 0x1800004UL
+#define NIG_REG_CM_HDR \
+ 0x500840UL
+#define NCSI_REG_CONFIG \
+ 0x040200UL
+#define PSWRQ2_REG_RBC_DONE \
+ 0x240000UL
+#define PSWRQ2_REG_CFG_DONE \
+ 0x240004UL
+#define PBF_REG_INIT \
+ 0xd80000UL
+#define PTU_REG_ATC_INIT_ARRAY \
+ 0x560000UL
+#define PCM_REG_INIT \
+ 0x1100000UL
+#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
+ 0x2a9000UL
+#define PRM_REG_DISABLE_PRM \
+ 0x230000UL
+#define PRS_REG_SOFT_RST \
+ 0x1f0000UL
+#define PSDM_REG_ENABLE_IN1 \
+ 0xfa0004UL
+#define PSEM_REG_ENABLE_IN \
+ 0x1600004UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ2_REG_CDUT_P_SIZE \
+ 0x24000cUL
+#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
+ 0x2a0040UL
+#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+ 0x29e050UL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD2_REG_CONF11 \
+ 0x29d064UL
+#define PSWWR_REG_USDM_FULL_TH \
+ 0x29a040UL
+#define PSWWR2_REG_CDU_FULL_TH2 \
+ 0x29b040UL
+#define QM_REG_MAXPQSIZE_0 \
+ 0x2f0434UL
+#define RSS_REG_RSS_INIT_EN \
+ 0x238804UL
+#define RDIF_REG_STOP_ON_ERROR \
+ 0x300040UL
+#define SRC_REG_SOFT_RST \
+ 0x23874cUL
+#define TCFC_REG_ACTIVITY_COUNTER \
+ 0x2d8800UL
+#define TCM_REG_INIT \
+ 0x1180000UL
+#define TM_REG_PXP_READ_DATA_FIFO_INIT \
+ 0x2c0014UL
+#define TSDM_REG_ENABLE_IN1 \
+ 0xfb0004UL
+#define TSEM_REG_ENABLE_IN \
+ 0x1700004UL
+#define TDIF_REG_STOP_ON_ERROR \
+ 0x310040UL
+#define UCM_REG_INIT \
+ 0x1280000UL
+#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+ 0x051004UL
+#define USDM_REG_ENABLE_IN1 \
+ 0xfd0004UL
+#define USEM_REG_ENABLE_IN \
+ 0x1900004UL
+#define XCM_REG_INIT \
+ 0x1000000UL
+#define XSDM_REG_ENABLE_IN1 \
+ 0xf80004UL
+#define XSEM_REG_ENABLE_IN \
+ 0x1400004UL
+#define YCM_REG_INIT \
+ 0x1080000UL
+#define YSDM_REG_ENABLE_IN1 \
+ 0xf90004UL
+#define YSEM_REG_ENABLE_IN \
+ 0x1500004UL
+#define XYLD_REG_SCBD_STRICT_PRIO \
+ 0x4c0000UL
+#define TMLD_REG_SCBD_STRICT_PRIO \
+ 0x4d0000UL
+#define MULD_REG_SCBD_STRICT_PRIO \
+ 0x4e0000UL
+#define YULD_REG_SCBD_STRICT_PRIO \
+ 0x4c8000UL
+#define MISC_REG_SHARED_MEM_ADDR \
+ 0x008c20UL
+#define DMAE_REG_GO_C0 \
+ 0x00c048UL
+#define DMAE_REG_GO_C1 \
+ 0x00c04cUL
+#define DMAE_REG_GO_C2 \
+ 0x00c050UL
+#define DMAE_REG_GO_C3 \
+ 0x00c054UL
+#define DMAE_REG_GO_C4 \
+ 0x00c058UL
+#define DMAE_REG_GO_C5 \
+ 0x00c05cUL
+#define DMAE_REG_GO_C6 \
+ 0x00c060UL
+#define DMAE_REG_GO_C7 \
+ 0x00c064UL
+#define DMAE_REG_GO_C8 \
+ 0x00c068UL
+#define DMAE_REG_GO_C9 \
+ 0x00c06cUL
+#define DMAE_REG_GO_C10 \
+ 0x00c070UL
+#define DMAE_REG_GO_C11 \
+ 0x00c074UL
+#define DMAE_REG_GO_C12 \
+ 0x00c078UL
+#define DMAE_REG_GO_C13 \
+ 0x00c07cUL
+#define DMAE_REG_GO_C14 \
+ 0x00c080UL
+#define DMAE_REG_GO_C15 \
+ 0x00c084UL
+#define DMAE_REG_GO_C16 \
+ 0x00c088UL
+#define DMAE_REG_GO_C17 \
+ 0x00c08cUL
+#define DMAE_REG_GO_C18 \
+ 0x00c090UL
+#define DMAE_REG_GO_C19 \
+ 0x00c094UL
+#define DMAE_REG_GO_C20 \
+ 0x00c098UL
+#define DMAE_REG_GO_C21 \
+ 0x00c09cUL
+#define DMAE_REG_GO_C22 \
+ 0x00c0a0UL
+#define DMAE_REG_GO_C23 \
+ 0x00c0a4UL
+#define DMAE_REG_GO_C24 \
+ 0x00c0a8UL
+#define DMAE_REG_GO_C25 \
+ 0x00c0acUL
+#define DMAE_REG_GO_C26 \
+ 0x00c0b0UL
+#define DMAE_REG_GO_C27 \
+ 0x00c0b4UL
+#define DMAE_REG_GO_C28 \
+ 0x00c0b8UL
+#define DMAE_REG_GO_C29 \
+ 0x00c0bcUL
+#define DMAE_REG_GO_C30 \
+ 0x00c0c0UL
+#define DMAE_REG_GO_C31 \
+ 0x00c0c4UL
+#define DMAE_REG_CMD_MEM \
+ 0x00c800UL
+#define QM_REG_MAXPQSIZETXSEL_0 \
+ 0x2f0440UL
+#define QM_REG_SDMCMDREADY \
+ 0x2f1e10UL
+#define QM_REG_SDMCMDADDR \
+ 0x2f1e04UL
+#define QM_REG_SDMCMDDATALSB \
+ 0x2f1e08UL
+#define QM_REG_SDMCMDDATAMSB \
+ 0x2f1e0cUL
+#define QM_REG_SDMCMDGO \
+ 0x2f1e14UL
+#define QM_REG_RLPFCRD \
+ 0x2f4d80UL
+#define QM_REG_RLPFINCVAL \
+ 0x2f4c80UL
+#define QM_REG_RLGLBLCRD \
+ 0x2f4400UL
+#define QM_REG_RLGLBLINCVAL \
+ 0x2f3400UL
+#define IGU_REG_ATTENTION_ENABLE \
+ 0x18083cUL
+#define IGU_REG_ATTN_MSG_ADDR_L \
+ 0x180820UL
+#define IGU_REG_ATTN_MSG_ADDR_H \
+ 0x180824UL
+#define MISC_REG_AEU_GENERAL_ATTN_0 \
+ 0x008400UL
+#define CAU_REG_SB_ADDR_MEMORY \
+ 0x1c8000UL
+#define CAU_REG_SB_VAR_MEMORY \
+ 0x1c6000UL
+#define CAU_REG_PI_MEMORY \
+ 0x1d0000UL
+#define IGU_REG_PF_CONFIGURATION \
+ 0x180800UL
+#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+ 0x00849cUL
+#define MISC_REG_AEU_MASK_ATTN_IGU \
+ 0x008494UL
+#define IGU_REG_CLEANUP_STATUS_0 \
+ 0x180980UL
+#define IGU_REG_CLEANUP_STATUS_1 \
+ 0x180a00UL
+#define IGU_REG_CLEANUP_STATUS_2 \
+ 0x180a80UL
+#define IGU_REG_CLEANUP_STATUS_3 \
+ 0x180b00UL
+#define IGU_REG_CLEANUP_STATUS_4 \
+ 0x180b80UL
+#define IGU_REG_COMMAND_REG_32LSB_DATA \
+ 0x180840UL
+#define IGU_REG_COMMAND_REG_CTRL \
+ 0x180848UL
+#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \
+ 0x1 << 1)
+#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
+ 0x1 << 0)
+#define IGU_REG_MAPPING_MEMORY \
+ 0x184000UL
+#define MISCS_REG_GENERIC_POR_0 \
+ 0x0096d4UL
+#define MCP_REG_NVM_CFG4 \
+ 0xe0642cUL
+#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \
+ 0x7 << 0)
+#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+ 0
+#define CCFC_REG_STRONG_ENABLE_VF 0x2e070cUL
+#define CNIG_REG_PMEG_IF_CMD_BB_B0 0x21821cUL
+#define CNIG_REG_PMEG_IF_ADDR_BB_B0 0x218224UL
+#define CNIG_REG_PMEG_IF_WRDATA_BB_B0 0x218228UL
+#define NWM_REG_MAC0 0x800400UL
+#define NWM_REG_MAC0_SIZE 256
+#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT 0
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT 1
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT 3
+#define ETH_MAC_REG_XIF_MODE 0x000080UL
+#define ETH_MAC_REG_XIF_MODE_XGMII_SHIFT 0
+#define ETH_MAC_REG_FRM_LENGTH 0x000014UL
+#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT 0
+#define ETH_MAC_REG_TX_IPG_LENGTH 0x000044UL
+#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT 0
+#define ETH_MAC_REG_RX_FIFO_SECTIONS 0x00001cUL
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT 0
+#define ETH_MAC_REG_TX_FIFO_SECTIONS 0x000020UL
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT 16
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT 0
+#define ETH_MAC_REG_COMMAND_CONFIG 0x000008UL
+#define MISC_REG_RESET_PL_PDA_VAUX 0x008090UL
+#define MISC_REG_XMAC_CORE_PORT_MODE 0x008c08UL
+#define MISC_REG_XMAC_PHY_PORT_MODE 0x008c04UL
+#define XMAC_REG_MODE 0x210008UL
+#define XMAC_REG_RX_MAX_SIZE 0x210040UL
+#define XMAC_REG_TX_CTRL_LO 0x210020UL
+#define XMAC_REG_CTRL 0x210000UL
+#define XMAC_REG_RX_CTRL 0x210030UL
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE (0x1 << 12)
+#define MISC_REG_CLK_100G_MODE 0x008c10UL
+#define MISC_REG_OPTE_MODE 0x008c0cUL
+#define NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH 0x501b84UL
+#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
+#define PRS_REG_SEARCH_TAG1 0x1f0444UL
+#define PRS_REG_SEARCH_TCP_FIRST_FRAG 0x1f0410UL
+#define MISCS_REG_PLL_MAIN_CTRL_4 0x00974cUL
+#define MISCS_REG_ECO_RESERVED 0x0097b4UL
+#define PGLUE_B_REG_PF_BAR0_SIZE 0x2aae60UL
+#define PGLUE_B_REG_PF_BAR1_SIZE 0x2aae64UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define XMAC_REG_CTRL_TX_EN (0x1 << 0)
+#define XMAC_REG_CTRL_RX_EN (0x1 << 1)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xffUL << 24) /* @DPDK */
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xff << 16)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 16
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xff << 16)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xffUL << 24) /* @DPDK */
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfff << 0)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 0
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfff << 0)
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT 0
+#define PSWRQ2_REG_ILT_MEMORY 0x260000UL
+#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT 0x2fa000UL
+#define NIG_REG_LB_ARB_CREDIT_WEIGHT_0 0x50160cUL
+#define NIG_REG_TX_ARB_CREDIT_WEIGHT_0 0x501f88UL
+#define NIG_REG_LB_ARB_CREDIT_WEIGHT_1 0x501610UL
+#define NIG_REG_TX_ARB_CREDIT_WEIGHT_1 0x501f8cUL
+#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 0x5015e4UL
+#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0 0x501f58UL
+#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 0x5015e8UL
+#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 0x501f5cUL
+#define NIG_REG_LB_ARB_CLIENT_IS_STRICT 0x5015c0UL
+#define NIG_REG_TX_ARB_CLIENT_IS_STRICT 0x501f34UL
+#define NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ 0x5015c4UL
+#define NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x501f38UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT 1
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL 0x501f1cUL
+#define NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD 0x501f20UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE 0x501f24UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE 0x501f28UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT 0
+#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT 1
+#define NIG_REG_LB_BRBRATELIMIT_CTRL 0x50150cUL
+#define NIG_REG_LB_BRBRATELIMIT_INC_PERIOD 0x501510UL
+#define NIG_REG_LB_BRBRATELIMIT_INC_VALUE 0x501514UL
+#define NIG_REG_LB_BRBRATELIMIT_MAX_VALUE 0x501518UL
+#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT 0
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT 1
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0 0x501520UL
+#define NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 0x501540UL
+#define NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 0x501560UL
+#define NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 0x501580UL
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT 0
+#define NIG_REG_PRIORITY_FOR_TC_0 0x501bccUL
+#define NIG_REG_RX_TC0_PRIORITY_MASK 0x501becUL
+#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 0x1f0540UL
+#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 0x1f0534UL
+#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 0x1f053cUL
+#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 0x1f0530UL
+#define PRS_REG_ETS_ARB_CLIENT_IS_STRICT 0x1f0514UL
+#define PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ 0x1f0518UL
+#define BRB_REG_TOTAL_MAC_SIZE 0x3408c0UL
+#define BRB_REG_SHARED_HR_AREA 0x340880UL
+#define BRB_REG_TC_GUARANTIED_0 0x340900UL
+#define BRB_REG_MAIN_TC_GUARANTIED_HYST_0 0x340978UL
+#define BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 0x340c60UL
+#define BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 0x340d38UL
+#define BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 0x340ab0UL
+#define BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 0x340b88UL
+#define BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 0x340c00UL
+#define BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 0x340cd8UL
+#define BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 0x340a50UL
+#define BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 0x340b28UL
+#define PRS_REG_VXLAN_PORT 0x1f0738UL
+#define NIG_REG_VXLAN_PORT 0x50105cUL
+#define PBF_REG_VXLAN_PORT 0xd80518UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define PRS_REG_NGE_PORT 0x1f086cUL
+#define NIG_REG_NGE_PORT 0x508b38UL
+#define PBF_REG_NGE_PORT 0xd8051cUL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
+#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
+#define NIG_REG_NGE_COMP_VER 0x508b30UL
+#define PBF_REG_NGE_COMP_VER 0xd80524UL
+#define PRS_REG_NGE_COMP_VER 0x1f0878UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
+#define NIG_REG_PKT_PRIORITY_TO_TC 0x501ba4UL
+#define PGLUE_B_REG_START_INIT_PTT_GTT 0x2a8008UL
+#define PGLUE_B_REG_INIT_DONE_PTT_GTT 0x2a800cUL
+#define MISC_REG_AEU_GENERAL_ATTN_35 0x00848cUL
+#define MCP_REG_CPU_STATE 0xe05004UL
+#define MCP_REG_CPU_MODE 0xe05000UL
+#define MCP_REG_CPU_MODE_SOFT_HALT (0x1 << 10)
+#define MCP_REG_CPU_EVENT_MASK 0xe05008UL
+#define PSWHST_REG_VF_DISABLED_ERROR_VALID 0x2a0060UL
+#define PSWHST_REG_VF_DISABLED_ERROR_ADDRESS 0x2a0064UL
+#define PSWHST_REG_VF_DISABLED_ERROR_DATA 0x2a005cUL
+#define PSWHST_REG_INCORRECT_ACCESS_VALID 0x2a0070UL
+#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS 0x2a0074UL
+#define PSWHST_REG_INCORRECT_ACCESS_DATA 0x2a0068UL
+#define PSWHST_REG_INCORRECT_ACCESS_LENGTH 0x2a006cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID 0x050054UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 0x05004cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 0x050050UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x2aa150UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x2aa144UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x2aa148UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x2aa14cUL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x2aa160UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x2aa154UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x2aa158UL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x2aa15cUL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL 0x2aa164UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS 0x2aa54cUL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 0x2aa544UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 0x2aa548UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 0x2aae80UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 0x2aae74UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 0x2aae78UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS 0x2aae7cUL
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x2aa3bcUL
+#define NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT (0x1 << 10)
+#define DORQ_REG_DB_DROP_REASON 0x100a2cUL
+#define DORQ_REG_DB_DROP_DETAILS 0x100a24UL
+#define TM_REG_INT_STS_1 0x2c0190UL
+#define TM_REG_INT_STS_1_PEND_TASK_SCAN (0x1 << 6)
+#define TM_REG_INT_STS_1_PEND_CONN_SCAN (0x1 << 5)
+#define TM_REG_INT_MASK_1 0x2c0194UL
+#define TM_REG_INT_MASK_1_PEND_CONN_SCAN (0x1 << 5)
+#define TM_REG_INT_MASK_1_PEND_TASK_SCAN (0x1 << 6)
+#define MISC_REG_AEU_AFTER_INVERT_1_IGU 0x0087b4UL
+#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 0x0084a8UL
+#define MISC_REG_AEU_ENABLE3_IGU_OUT_0 0x0084a4UL
+#define YSEM_REG_FAST_MEMORY 0x1540000UL
+#define NIG_REG_FLOWCTRL_MODE 0x501ba0UL
+#define TSEM_REG_FAST_MEMORY 0x1740000UL
+#define TSEM_REG_DBG_FRAME_MODE 0x1701408UL
+#define TSEM_REG_SLOW_DBG_ACTIVE 0x1701400UL
+#define TSEM_REG_SLOW_DBG_MODE 0x1701404UL
+#define TSEM_REG_DBG_MODE1_CFG 0x1701420UL
+#define TSEM_REG_SYNC_DBG_EMPTY 0x1701160UL
+#define TSEM_REG_SLOW_DBG_EMPTY 0x1701140UL
+#define TCM_REG_CTX_RBC_ACCS 0x11814c0UL
+#define TCM_REG_AGG_CON_CTX 0x11814c4UL
+#define TCM_REG_SM_CON_CTX 0x11814ccUL
+#define TCM_REG_AGG_TASK_CTX 0x11814c8UL
+#define TCM_REG_SM_TASK_CTX 0x11814d0UL
+#define MSEM_REG_FAST_MEMORY 0x1840000UL
+#define MSEM_REG_DBG_FRAME_MODE 0x1801408UL
+#define MSEM_REG_SLOW_DBG_ACTIVE 0x1801400UL
+#define MSEM_REG_SLOW_DBG_MODE 0x1801404UL
+#define MSEM_REG_DBG_MODE1_CFG 0x1801420UL
+#define MSEM_REG_SYNC_DBG_EMPTY 0x1801160UL
+#define MSEM_REG_SLOW_DBG_EMPTY 0x1801140UL
+#define MCM_REG_CTX_RBC_ACCS 0x1201800UL
+#define MCM_REG_AGG_CON_CTX 0x1201804UL
+#define MCM_REG_SM_CON_CTX 0x120180cUL
+#define MCM_REG_AGG_TASK_CTX 0x1201808UL
+#define MCM_REG_SM_TASK_CTX 0x1201810UL
+#define USEM_REG_FAST_MEMORY 0x1940000UL
+#define USEM_REG_DBG_FRAME_MODE 0x1901408UL
+#define USEM_REG_SLOW_DBG_ACTIVE 0x1901400UL
+#define USEM_REG_SLOW_DBG_MODE 0x1901404UL
+#define USEM_REG_DBG_MODE1_CFG 0x1901420UL
+#define USEM_REG_SYNC_DBG_EMPTY 0x1901160UL
+#define USEM_REG_SLOW_DBG_EMPTY 0x1901140UL
+#define UCM_REG_CTX_RBC_ACCS 0x1281700UL
+#define UCM_REG_AGG_CON_CTX 0x1281704UL
+#define UCM_REG_SM_CON_CTX 0x128170cUL
+#define UCM_REG_AGG_TASK_CTX 0x1281708UL
+#define UCM_REG_SM_TASK_CTX 0x1281710UL
+#define XSEM_REG_FAST_MEMORY 0x1440000UL
+#define XSEM_REG_DBG_FRAME_MODE 0x1401408UL
+#define XSEM_REG_SLOW_DBG_ACTIVE 0x1401400UL
+#define XSEM_REG_SLOW_DBG_MODE 0x1401404UL
+#define XSEM_REG_DBG_MODE1_CFG 0x1401420UL
+#define XSEM_REG_SYNC_DBG_EMPTY 0x1401160UL
+#define XSEM_REG_SLOW_DBG_EMPTY 0x1401140UL
+#define XCM_REG_CTX_RBC_ACCS 0x1001800UL
+#define XCM_REG_AGG_CON_CTX 0x1001804UL
+#define XCM_REG_SM_CON_CTX 0x1001808UL
+#define YSEM_REG_DBG_FRAME_MODE 0x1501408UL
+#define YSEM_REG_SLOW_DBG_ACTIVE 0x1501400UL
+#define YSEM_REG_SLOW_DBG_MODE 0x1501404UL
+#define YSEM_REG_DBG_MODE1_CFG 0x1501420UL
+#define YSEM_REG_SYNC_DBG_EMPTY 0x1501160UL
+#define YCM_REG_CTX_RBC_ACCS 0x1081800UL
+#define YCM_REG_AGG_CON_CTX 0x1081804UL
+#define YCM_REG_SM_CON_CTX 0x108180cUL
+#define YCM_REG_AGG_TASK_CTX 0x1081808UL
+#define YCM_REG_SM_TASK_CTX 0x1081810UL
+#define PSEM_REG_FAST_MEMORY 0x1640000UL
+#define PSEM_REG_DBG_FRAME_MODE 0x1601408UL
+#define PSEM_REG_SLOW_DBG_ACTIVE 0x1601400UL
+#define PSEM_REG_SLOW_DBG_MODE 0x1601404UL
+#define PSEM_REG_DBG_MODE1_CFG 0x1601420UL
+#define PSEM_REG_SYNC_DBG_EMPTY 0x1601160UL
+#define PSEM_REG_SLOW_DBG_EMPTY 0x1601140UL
+#define PCM_REG_CTX_RBC_ACCS 0x1101440UL
+#define PCM_REG_SM_CON_CTX 0x1101444UL
+#define GRC_REG_DBG_SELECT 0x0500a4UL
+#define GRC_REG_DBG_DWORD_ENABLE 0x0500a8UL
+#define GRC_REG_DBG_SHIFT 0x0500acUL
+#define GRC_REG_DBG_FORCE_VALID 0x0500b0UL
+#define GRC_REG_DBG_FORCE_FRAME 0x0500b4UL
+#define PGLUE_B_REG_DBG_SELECT 0x2a8400UL
+#define PGLUE_B_REG_DBG_DWORD_ENABLE 0x2a8404UL
+#define PGLUE_B_REG_DBG_SHIFT 0x2a8408UL
+#define PGLUE_B_REG_DBG_FORCE_VALID 0x2a840cUL
+#define PGLUE_B_REG_DBG_FORCE_FRAME 0x2a8410UL
+#define CNIG_REG_DBG_SELECT_K2 0x218254UL
+#define CNIG_REG_DBG_DWORD_ENABLE_K2 0x218258UL
+#define CNIG_REG_DBG_SHIFT_K2 0x21825cUL
+#define CNIG_REG_DBG_FORCE_VALID_K2 0x218260UL
+#define CNIG_REG_DBG_FORCE_FRAME_K2 0x218264UL
+#define NCSI_REG_DBG_SELECT 0x040474UL
+#define NCSI_REG_DBG_DWORD_ENABLE 0x040478UL
+#define NCSI_REG_DBG_SHIFT 0x04047cUL
+#define NCSI_REG_DBG_FORCE_VALID 0x040480UL
+#define NCSI_REG_DBG_FORCE_FRAME 0x040484UL
+#define BMB_REG_DBG_SELECT 0x540a7cUL
+#define BMB_REG_DBG_DWORD_ENABLE 0x540a80UL
+#define BMB_REG_DBG_SHIFT 0x540a84UL
+#define BMB_REG_DBG_FORCE_VALID 0x540a88UL
+#define BMB_REG_DBG_FORCE_FRAME 0x540a8cUL
+#define PCIE_REG_DBG_SELECT 0x0547e8UL
+#define PHY_PCIE_REG_DBG_SELECT 0x629fe8UL
+#define PCIE_REG_DBG_DWORD_ENABLE 0x0547ecUL
+#define PHY_PCIE_REG_DBG_DWORD_ENABLE 0x629fecUL
+#define PCIE_REG_DBG_SHIFT 0x0547f0UL
+#define PHY_PCIE_REG_DBG_SHIFT 0x629ff0UL
+#define PCIE_REG_DBG_FORCE_VALID 0x0547f4UL
+#define PHY_PCIE_REG_DBG_FORCE_VALID 0x629ff4UL
+#define PCIE_REG_DBG_FORCE_FRAME 0x0547f8UL
+#define PHY_PCIE_REG_DBG_FORCE_FRAME 0x629ff8UL
+#define MCP2_REG_DBG_SELECT 0x052400UL
+#define MCP2_REG_DBG_SHIFT 0x052408UL
+#define MCP2_REG_DBG_FORCE_VALID 0x052440UL
+#define MCP2_REG_DBG_FORCE_FRAME 0x052444UL
+#define PSWHST_REG_DBG_SELECT 0x2a0100UL
+#define PSWHST_REG_DBG_DWORD_ENABLE 0x2a0104UL
+#define PSWHST_REG_DBG_SHIFT 0x2a0108UL
+#define PSWHST_REG_DBG_FORCE_VALID 0x2a010cUL
+#define PSWHST_REG_DBG_FORCE_FRAME 0x2a0110UL
+#define PSWHST2_REG_DBG_SELECT 0x29e058UL
+#define PSWHST2_REG_DBG_DWORD_ENABLE 0x29e05cUL
+#define PSWHST2_REG_DBG_SHIFT 0x29e060UL
+#define PSWHST2_REG_DBG_FORCE_VALID 0x29e064UL
+#define PSWHST2_REG_DBG_FORCE_FRAME 0x29e068UL
+#define PSWRD_REG_DBG_DWORD_ENABLE 0x29c044UL
+#define PSWRD_REG_DBG_SHIFT 0x29c048UL
+#define PSWRD_REG_DBG_FORCE_VALID 0x29c04cUL
+#define PSWRD_REG_DBG_FORCE_FRAME 0x29c050UL
+#define PSWRD2_REG_DBG_SELECT 0x29d400UL
+#define PSWRD2_REG_DBG_DWORD_ENABLE 0x29d404UL
+#define PSWRD2_REG_DBG_SHIFT 0x29d408UL
+#define PSWRD2_REG_DBG_FORCE_VALID 0x29d40cUL
+#define PSWRD2_REG_DBG_FORCE_FRAME 0x29d410UL
+#define PSWWR_REG_DBG_SELECT 0x29a084UL
+#define PSWWR_REG_DBG_DWORD_ENABLE 0x29a088UL
+#define PSWWR_REG_DBG_SHIFT 0x29a08cUL
+#define PSWWR_REG_DBG_FORCE_VALID 0x29a090UL
+#define PSWWR_REG_DBG_FORCE_FRAME 0x29a094UL
+#define PSWRQ_REG_DBG_DWORD_ENABLE 0x280024UL
+#define PSWRQ_REG_DBG_SHIFT 0x280028UL
+#define PSWRQ_REG_DBG_FORCE_VALID 0x28002cUL
+#define PSWRQ_REG_DBG_FORCE_FRAME 0x280030UL
+#define PSWRQ2_REG_DBG_SELECT 0x240100UL
+#define PSWRQ2_REG_DBG_DWORD_ENABLE 0x240104UL
+#define PSWRQ2_REG_DBG_SHIFT 0x240108UL
+#define PSWRQ2_REG_DBG_FORCE_VALID 0x24010cUL
+#define PSWRQ2_REG_DBG_FORCE_FRAME 0x240110UL
+#define PGLCS_REG_DBG_SELECT 0x001d14UL
+#define PGLCS_REG_DBG_DWORD_ENABLE 0x001d18UL
+#define PGLCS_REG_DBG_SHIFT 0x001d1cUL
+#define PGLCS_REG_DBG_FORCE_VALID 0x001d20UL
+#define PGLCS_REG_DBG_FORCE_FRAME 0x001d24UL
+#define PTU_REG_DBG_SELECT 0x560100UL
+#define PTU_REG_DBG_DWORD_ENABLE 0x560104UL
+#define PTU_REG_DBG_SHIFT 0x560108UL
+#define PTU_REG_DBG_FORCE_VALID 0x56010cUL
+#define PTU_REG_DBG_FORCE_FRAME 0x560110UL
+#define DMAE_REG_DBG_SELECT 0x00c510UL
+#define DMAE_REG_DBG_DWORD_ENABLE 0x00c514UL
+#define DMAE_REG_DBG_SHIFT 0x00c518UL
+#define DMAE_REG_DBG_FORCE_VALID 0x00c51cUL
+#define DMAE_REG_DBG_FORCE_FRAME 0x00c520UL
+#define TCM_REG_DBG_SELECT 0x1180040UL
+#define TCM_REG_DBG_DWORD_ENABLE 0x1180044UL
+#define TCM_REG_DBG_SHIFT 0x1180048UL
+#define TCM_REG_DBG_FORCE_VALID 0x118004cUL
+#define TCM_REG_DBG_FORCE_FRAME 0x1180050UL
+#define MCM_REG_DBG_SELECT 0x1200040UL
+#define MCM_REG_DBG_DWORD_ENABLE 0x1200044UL
+#define MCM_REG_DBG_SHIFT 0x1200048UL
+#define MCM_REG_DBG_FORCE_VALID 0x120004cUL
+#define MCM_REG_DBG_FORCE_FRAME 0x1200050UL
+#define UCM_REG_DBG_SELECT 0x1280050UL
+#define UCM_REG_DBG_DWORD_ENABLE 0x1280054UL
+#define UCM_REG_DBG_SHIFT 0x1280058UL
+#define UCM_REG_DBG_FORCE_VALID 0x128005cUL
+#define UCM_REG_DBG_FORCE_FRAME 0x1280060UL
+#define XCM_REG_DBG_SELECT 0x1000040UL
+#define XCM_REG_DBG_DWORD_ENABLE 0x1000044UL
+#define XCM_REG_DBG_SHIFT 0x1000048UL
+#define XCM_REG_DBG_FORCE_VALID 0x100004cUL
+#define XCM_REG_DBG_FORCE_FRAME 0x1000050UL
+#define YCM_REG_DBG_SELECT 0x1080040UL
+#define YCM_REG_DBG_DWORD_ENABLE 0x1080044UL
+#define YCM_REG_DBG_SHIFT 0x1080048UL
+#define YCM_REG_DBG_FORCE_VALID 0x108004cUL
+#define YCM_REG_DBG_FORCE_FRAME 0x1080050UL
+#define PCM_REG_DBG_SELECT 0x1100040UL
+#define PCM_REG_DBG_DWORD_ENABLE 0x1100044UL
+#define PCM_REG_DBG_SHIFT 0x1100048UL
+#define PCM_REG_DBG_FORCE_VALID 0x110004cUL
+#define PCM_REG_DBG_FORCE_FRAME 0x1100050UL
+#define QM_REG_DBG_SELECT 0x2f2e74UL
+#define QM_REG_DBG_DWORD_ENABLE 0x2f2e78UL
+#define QM_REG_DBG_SHIFT 0x2f2e7cUL
+#define QM_REG_DBG_FORCE_VALID 0x2f2e80UL
+#define QM_REG_DBG_FORCE_FRAME 0x2f2e84UL
+#define TM_REG_DBG_SELECT 0x2c07a8UL
+#define TM_REG_DBG_DWORD_ENABLE 0x2c07acUL
+#define TM_REG_DBG_SHIFT 0x2c07b0UL
+#define TM_REG_DBG_FORCE_VALID 0x2c07b4UL
+#define TM_REG_DBG_FORCE_FRAME 0x2c07b8UL
+#define DORQ_REG_DBG_SELECT 0x100ad0UL
+#define DORQ_REG_DBG_DWORD_ENABLE 0x100ad4UL
+#define DORQ_REG_DBG_SHIFT 0x100ad8UL
+#define DORQ_REG_DBG_FORCE_VALID 0x100adcUL
+#define DORQ_REG_DBG_FORCE_FRAME 0x100ae0UL
+#define BRB_REG_DBG_SELECT 0x340ed0UL
+#define BRB_REG_DBG_DWORD_ENABLE 0x340ed4UL
+#define BRB_REG_DBG_SHIFT 0x340ed8UL
+#define BRB_REG_DBG_FORCE_VALID 0x340edcUL
+#define BRB_REG_DBG_FORCE_FRAME 0x340ee0UL
+#define SRC_REG_DBG_SELECT 0x238700UL
+#define SRC_REG_DBG_DWORD_ENABLE 0x238704UL
+#define SRC_REG_DBG_SHIFT 0x238708UL
+#define SRC_REG_DBG_FORCE_VALID 0x23870cUL
+#define SRC_REG_DBG_FORCE_FRAME 0x238710UL
+#define PRS_REG_DBG_SELECT 0x1f0b6cUL
+#define PRS_REG_DBG_DWORD_ENABLE 0x1f0b70UL
+#define PRS_REG_DBG_SHIFT 0x1f0b74UL
+#define PRS_REG_DBG_FORCE_VALID 0x1f0ba0UL
+#define PRS_REG_DBG_FORCE_FRAME 0x1f0ba4UL
+#define TSDM_REG_DBG_SELECT 0xfb0e28UL
+#define TSDM_REG_DBG_DWORD_ENABLE 0xfb0e2cUL
+#define TSDM_REG_DBG_SHIFT 0xfb0e30UL
+#define TSDM_REG_DBG_FORCE_VALID 0xfb0e34UL
+#define TSDM_REG_DBG_FORCE_FRAME 0xfb0e38UL
+#define MSDM_REG_DBG_SELECT 0xfc0e28UL
+#define MSDM_REG_DBG_DWORD_ENABLE 0xfc0e2cUL
+#define MSDM_REG_DBG_SHIFT 0xfc0e30UL
+#define MSDM_REG_DBG_FORCE_VALID 0xfc0e34UL
+#define MSDM_REG_DBG_FORCE_FRAME 0xfc0e38UL
+#define USDM_REG_DBG_SELECT 0xfd0e28UL
+#define USDM_REG_DBG_DWORD_ENABLE 0xfd0e2cUL
+#define USDM_REG_DBG_SHIFT 0xfd0e30UL
+#define USDM_REG_DBG_FORCE_VALID 0xfd0e34UL
+#define USDM_REG_DBG_FORCE_FRAME 0xfd0e38UL
+#define XSDM_REG_DBG_SELECT 0xf80e28UL
+#define XSDM_REG_DBG_DWORD_ENABLE 0xf80e2cUL
+#define XSDM_REG_DBG_SHIFT 0xf80e30UL
+#define XSDM_REG_DBG_FORCE_VALID 0xf80e34UL
+#define XSDM_REG_DBG_FORCE_FRAME 0xf80e38UL
+#define YSDM_REG_DBG_SELECT 0xf90e28UL
+#define YSDM_REG_DBG_DWORD_ENABLE 0xf90e2cUL
+#define YSDM_REG_DBG_SHIFT 0xf90e30UL
+#define YSDM_REG_DBG_FORCE_VALID 0xf90e34UL
+#define YSDM_REG_DBG_FORCE_FRAME 0xf90e38UL
+#define PSDM_REG_DBG_SELECT 0xfa0e28UL
+#define PSDM_REG_DBG_DWORD_ENABLE 0xfa0e2cUL
+#define PSDM_REG_DBG_SHIFT 0xfa0e30UL
+#define PSDM_REG_DBG_FORCE_VALID 0xfa0e34UL
+#define PSDM_REG_DBG_FORCE_FRAME 0xfa0e38UL
+#define TSEM_REG_DBG_SELECT 0x1701528UL
+#define TSEM_REG_DBG_DWORD_ENABLE 0x170152cUL
+#define TSEM_REG_DBG_SHIFT 0x1701530UL
+#define TSEM_REG_DBG_FORCE_VALID 0x1701534UL
+#define TSEM_REG_DBG_FORCE_FRAME 0x1701538UL
+#define MSEM_REG_DBG_SELECT 0x1801528UL
+#define MSEM_REG_DBG_DWORD_ENABLE 0x180152cUL
+#define MSEM_REG_DBG_SHIFT 0x1801530UL
+#define MSEM_REG_DBG_FORCE_VALID 0x1801534UL
+#define MSEM_REG_DBG_FORCE_FRAME 0x1801538UL
+#define USEM_REG_DBG_SELECT 0x1901528UL
+#define USEM_REG_DBG_DWORD_ENABLE 0x190152cUL
+#define USEM_REG_DBG_SHIFT 0x1901530UL
+#define USEM_REG_DBG_FORCE_VALID 0x1901534UL
+#define USEM_REG_DBG_FORCE_FRAME 0x1901538UL
+#define XSEM_REG_DBG_SELECT 0x1401528UL
+#define XSEM_REG_DBG_DWORD_ENABLE 0x140152cUL
+#define XSEM_REG_DBG_SHIFT 0x1401530UL
+#define XSEM_REG_DBG_FORCE_VALID 0x1401534UL
+#define XSEM_REG_DBG_FORCE_FRAME 0x1401538UL
+#define YSEM_REG_DBG_SELECT 0x1501528UL
+#define YSEM_REG_DBG_DWORD_ENABLE 0x150152cUL
+#define YSEM_REG_DBG_SHIFT 0x1501530UL
+#define YSEM_REG_DBG_FORCE_VALID 0x1501534UL
+#define YSEM_REG_DBG_FORCE_FRAME 0x1501538UL
+#define PSEM_REG_DBG_SELECT 0x1601528UL
+#define PSEM_REG_DBG_DWORD_ENABLE 0x160152cUL
+#define PSEM_REG_DBG_SHIFT 0x1601530UL
+#define PSEM_REG_DBG_FORCE_VALID 0x1601534UL
+#define PSEM_REG_DBG_FORCE_FRAME 0x1601538UL
+#define RSS_REG_DBG_SELECT 0x238c4cUL
+#define RSS_REG_DBG_DWORD_ENABLE 0x238c50UL
+#define RSS_REG_DBG_SHIFT 0x238c54UL
+#define RSS_REG_DBG_FORCE_VALID 0x238c58UL
+#define RSS_REG_DBG_FORCE_FRAME 0x238c5cUL
+#define TMLD_REG_DBG_SELECT 0x4d1600UL
+#define TMLD_REG_DBG_DWORD_ENABLE 0x4d1604UL
+#define TMLD_REG_DBG_SHIFT 0x4d1608UL
+#define TMLD_REG_DBG_FORCE_VALID 0x4d160cUL
+#define TMLD_REG_DBG_FORCE_FRAME 0x4d1610UL
+#define MULD_REG_DBG_SELECT 0x4e1600UL
+#define MULD_REG_DBG_DWORD_ENABLE 0x4e1604UL
+#define MULD_REG_DBG_SHIFT 0x4e1608UL
+#define MULD_REG_DBG_FORCE_VALID 0x4e160cUL
+#define MULD_REG_DBG_FORCE_FRAME 0x4e1610UL
+#define YULD_REG_DBG_SELECT 0x4c9600UL
+#define YULD_REG_DBG_DWORD_ENABLE 0x4c9604UL
+#define YULD_REG_DBG_SHIFT 0x4c9608UL
+#define YULD_REG_DBG_FORCE_VALID 0x4c960cUL
+#define YULD_REG_DBG_FORCE_FRAME 0x4c9610UL
+#define XYLD_REG_DBG_SELECT 0x4c1600UL
+#define XYLD_REG_DBG_DWORD_ENABLE 0x4c1604UL
+#define XYLD_REG_DBG_SHIFT 0x4c1608UL
+#define XYLD_REG_DBG_FORCE_VALID 0x4c160cUL
+#define XYLD_REG_DBG_FORCE_FRAME 0x4c1610UL
+#define PRM_REG_DBG_SELECT 0x2306a8UL
+#define PRM_REG_DBG_DWORD_ENABLE 0x2306acUL
+#define PRM_REG_DBG_SHIFT 0x2306b0UL
+#define PRM_REG_DBG_FORCE_VALID 0x2306b4UL
+#define PRM_REG_DBG_FORCE_FRAME 0x2306b8UL
+#define PBF_PB1_REG_DBG_SELECT 0xda0728UL
+#define PBF_PB1_REG_DBG_DWORD_ENABLE 0xda072cUL
+#define PBF_PB1_REG_DBG_SHIFT 0xda0730UL
+#define PBF_PB1_REG_DBG_FORCE_VALID 0xda0734UL
+#define PBF_PB1_REG_DBG_FORCE_FRAME 0xda0738UL
+#define PBF_PB2_REG_DBG_SELECT 0xda4728UL
+#define PBF_PB2_REG_DBG_DWORD_ENABLE 0xda472cUL
+#define PBF_PB2_REG_DBG_SHIFT 0xda4730UL
+#define PBF_PB2_REG_DBG_FORCE_VALID 0xda4734UL
+#define PBF_PB2_REG_DBG_FORCE_FRAME 0xda4738UL
+#define RPB_REG_DBG_SELECT 0x23c728UL
+#define RPB_REG_DBG_DWORD_ENABLE 0x23c72cUL
+#define RPB_REG_DBG_SHIFT 0x23c730UL
+#define RPB_REG_DBG_FORCE_VALID 0x23c734UL
+#define RPB_REG_DBG_FORCE_FRAME 0x23c738UL
+#define BTB_REG_DBG_SELECT 0xdb08c8UL
+#define BTB_REG_DBG_DWORD_ENABLE 0xdb08ccUL
+#define BTB_REG_DBG_SHIFT 0xdb08d0UL
+#define BTB_REG_DBG_FORCE_VALID 0xdb08d4UL
+#define BTB_REG_DBG_FORCE_FRAME 0xdb08d8UL
+#define PBF_REG_DBG_SELECT 0xd80060UL
+#define PBF_REG_DBG_DWORD_ENABLE 0xd80064UL
+#define PBF_REG_DBG_SHIFT 0xd80068UL
+#define PBF_REG_DBG_FORCE_VALID 0xd8006cUL
+#define PBF_REG_DBG_FORCE_FRAME 0xd80070UL
+#define RDIF_REG_DBG_SELECT 0x300500UL
+#define RDIF_REG_DBG_DWORD_ENABLE 0x300504UL
+#define RDIF_REG_DBG_SHIFT 0x300508UL
+#define RDIF_REG_DBG_FORCE_VALID 0x30050cUL
+#define RDIF_REG_DBG_FORCE_FRAME 0x300510UL
+#define TDIF_REG_DBG_SELECT 0x310500UL
+#define TDIF_REG_DBG_DWORD_ENABLE 0x310504UL
+#define TDIF_REG_DBG_SHIFT 0x310508UL
+#define TDIF_REG_DBG_FORCE_VALID 0x31050cUL
+#define TDIF_REG_DBG_FORCE_FRAME 0x310510UL
+#define CDU_REG_DBG_SELECT 0x580704UL
+#define CDU_REG_DBG_DWORD_ENABLE 0x580708UL
+#define CDU_REG_DBG_SHIFT 0x58070cUL
+#define CDU_REG_DBG_FORCE_VALID 0x580710UL
+#define CDU_REG_DBG_FORCE_FRAME 0x580714UL
+#define CCFC_REG_DBG_SELECT 0x2e0500UL
+#define CCFC_REG_DBG_DWORD_ENABLE 0x2e0504UL
+#define CCFC_REG_DBG_SHIFT 0x2e0508UL
+#define CCFC_REG_DBG_FORCE_VALID 0x2e050cUL
+#define CCFC_REG_DBG_FORCE_FRAME 0x2e0510UL
+#define TCFC_REG_DBG_SELECT 0x2d0500UL
+#define TCFC_REG_DBG_DWORD_ENABLE 0x2d0504UL
+#define TCFC_REG_DBG_SHIFT 0x2d0508UL
+#define TCFC_REG_DBG_FORCE_VALID 0x2d050cUL
+#define TCFC_REG_DBG_FORCE_FRAME 0x2d0510UL
+#define IGU_REG_DBG_SELECT 0x181578UL
+#define IGU_REG_DBG_DWORD_ENABLE 0x18157cUL
+#define IGU_REG_DBG_SHIFT 0x181580UL
+#define IGU_REG_DBG_FORCE_VALID 0x181584UL
+#define IGU_REG_DBG_FORCE_FRAME 0x181588UL
+#define CAU_REG_DBG_SELECT 0x1c0ea8UL
+#define CAU_REG_DBG_DWORD_ENABLE 0x1c0eacUL
+#define CAU_REG_DBG_SHIFT 0x1c0eb0UL
+#define CAU_REG_DBG_FORCE_VALID 0x1c0eb4UL
+#define CAU_REG_DBG_FORCE_FRAME 0x1c0eb8UL
+#define UMAC_REG_DBG_SELECT 0x051094UL
+#define UMAC_REG_DBG_DWORD_ENABLE 0x051098UL
+#define UMAC_REG_DBG_SHIFT 0x05109cUL
+#define UMAC_REG_DBG_FORCE_VALID 0x0510a0UL
+#define UMAC_REG_DBG_FORCE_FRAME 0x0510a4UL
+#define NIG_REG_DBG_SELECT 0x502140UL
+#define NIG_REG_DBG_DWORD_ENABLE 0x502144UL
+#define NIG_REG_DBG_SHIFT 0x502148UL
+#define NIG_REG_DBG_FORCE_VALID 0x50214cUL
+#define NIG_REG_DBG_FORCE_FRAME 0x502150UL
+#define WOL_REG_DBG_SELECT 0x600140UL
+#define WOL_REG_DBG_DWORD_ENABLE 0x600144UL
+#define WOL_REG_DBG_SHIFT 0x600148UL
+#define WOL_REG_DBG_FORCE_VALID 0x60014cUL
+#define WOL_REG_DBG_FORCE_FRAME 0x600150UL
+#define BMBN_REG_DBG_SELECT 0x610140UL
+#define BMBN_REG_DBG_DWORD_ENABLE 0x610144UL
+#define BMBN_REG_DBG_SHIFT 0x610148UL
+#define BMBN_REG_DBG_FORCE_VALID 0x61014cUL
+#define BMBN_REG_DBG_FORCE_FRAME 0x610150UL
+#define NWM_REG_DBG_SELECT 0x8000ecUL
+#define NWM_REG_DBG_DWORD_ENABLE 0x8000f0UL
+#define NWM_REG_DBG_SHIFT 0x8000f4UL
+#define NWM_REG_DBG_FORCE_VALID 0x8000f8UL
+#define NWM_REG_DBG_FORCE_FRAME 0x8000fcUL
+#define BRB_REG_BIG_RAM_ADDRESS 0x340800UL
+#define BRB_REG_BIG_RAM_DATA 0x341500UL
+#define BTB_REG_BIG_RAM_ADDRESS 0xdb0800UL
+#define BTB_REG_BIG_RAM_DATA 0xdb0c00UL
+#define BMB_REG_BIG_RAM_ADDRESS 0x540800UL
+#define BMB_REG_BIG_RAM_DATA 0x540f00UL
+#define MISCS_REG_RESET_PL_UA 0x009050UL
+#define MISC_REG_RESET_PL_UA 0x008050UL
+#define MISC_REG_RESET_PL_HV 0x008060UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_1 0x008070UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_2 0x008080UL
+#define SEM_FAST_REG_INT_RAM 0x020000UL
+#define DBG_REG_DBG_BLOCK_ON 0x010454UL
+#define DBG_REG_FRAMING_MODE 0x010058UL
+#define SEM_FAST_REG_DEBUG_MODE 0x000744UL
+#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL
+#define SEM_FAST_REG_FILTER_CID 0x000754UL
+#define SEM_FAST_REG_EVENT_ID_RANGE_STRT 0x000760UL
+#define SEM_FAST_REG_EVENT_ID_RANGE_END 0x000764UL
+#define SEM_FAST_REG_FILTER_EVENT_ID 0x000758UL
+#define SEM_FAST_REG_EVENT_ID_MASK 0x00075cUL
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL
+#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL
+#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL
+#define DBG_REG_FILTER_ENABLE 0x0109d0UL
+#define DBG_REG_TRIGGER_ENABLE 0x01054cUL
+#define DBG_REG_FILTER_CNSTR_OPRTN_0 0x010a28UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0 0x01071cUL
+#define DBG_REG_FILTER_CNSTR_DATA_0 0x0109d8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0 0x01059cUL
+#define DBG_REG_FILTER_CNSTR_DATA_MASK_0 0x0109f8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0 0x01065cUL
+#define DBG_REG_FILTER_CNSTR_FRAME_0 0x0109e8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0 0x0105fcUL
+#define DBG_REG_FILTER_CNSTR_FRAME_MASK_0 0x010a08UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0 0x0106bcUL
+#define DBG_REG_FILTER_CNSTR_OFFSET_0 0x010a18UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0 0x0107dcUL
+#define DBG_REG_FILTER_CNSTR_RANGE_0 0x010a38UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0 0x01077cUL
+#define DBG_REG_FILTER_CNSTR_CYCLIC_0 0x010a68UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0 0x0108fcUL
+#define DBG_REG_FILTER_CNSTR_MUST_0 0x010a48UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0 0x01083cUL
+#define DBG_REG_INTR_BUFFER 0x014000UL
+#define DBG_REG_INTR_BUFFER_WR_PTR 0x010404UL
+#define DBG_REG_WRAP_ON_INT_BUFFER 0x010418UL
+#define DBG_REG_INTR_BUFFER_RD_PTR 0x010400UL
+#define DBG_REG_EXT_BUFFER_WR_PTR 0x010410UL
+#define DBG_REG_WRAP_ON_EXT_BUFFER 0x01041cUL
+#define SEM_FAST_REG_STALL_0 0x000488UL
+#define SEM_FAST_REG_STALLED 0x000494UL
+#define SEM_FAST_REG_STORM_REG_FILE 0x008000UL
+#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL
+#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL
+#define RSS_REG_RSS_RAM_ADDR 0x238c30UL
+#define RSS_REG_RSS_RAM_DATA 0x238c20UL
+#define MISCS_REG_BLOCK_256B_EN 0x009074UL
+#define MCP_REG_CPU_REG_FILE 0xe05200UL
+#define MCP_REG_CPU_REG_FILE_SIZE 32
+#define DBG_REG_CALENDAR_OUT_DATA 0x010480UL
+#define DBG_REG_FULL_MODE 0x010060UL
+#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB 0x010430UL
+#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB 0x010434UL
+#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL
+#define DBG_REG_PCI_EXT_BUFFER_SIZE 0x010438UL
+#define DBG_REG_PCI_FUNC_NUM 0x010a98UL
+#define DBG_REG_PCI_LOGIC_ADDR 0x010460UL
+#define DBG_REG_PCI_REQ_CREDIT 0x010440UL
+#define DBG_REG_DEBUG_TARGET 0x01005cUL
+#define DBG_REG_OUTPUT_ENABLE 0x01000cUL
+#define DBG_REG_OUTPUT_ENABLE 0x01000cUL
+#define DBG_REG_DEBUG_TARGET 0x01005cUL
+#define DBG_REG_OTHER_ENGINE_MODE 0x010010UL
+#define NIG_REG_DEBUG_PORT 0x5020d0UL
+#define DBG_REG_ETHERNET_HDR_WIDTH 0x010b38UL
+#define DBG_REG_ETHERNET_HDR_7 0x010b34UL
+#define DBG_REG_ETHERNET_HDR_6 0x010b30UL
+#define DBG_REG_ETHERNET_HDR_5 0x010b2cUL
+#define DBG_REG_ETHERNET_HDR_4 0x010b28UL
+#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL
+#define DBG_REG_NIG_DATA_LIMIT_SIZE 0x01043cUL
+#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL
+#define DBG_REG_TIMESTAMP_FRAME_EN 0x010b54UL
+#define DBG_REG_TIMESTAMP_TICK 0x010b50UL
+#define DBG_REG_FILTER_ID_NUM 0x0109d4UL
+#define DBG_REG_FILTER_MSG_LENGTH_ENABLE 0x010a78UL
+#define DBG_REG_FILTER_MSG_LENGTH 0x010a7cUL
+#define DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS 0x010a90UL
+#define DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES 0x010a94UL
+#define DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE 0x010a88UL
+#define DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE 0x010a8cUL
+#define DBG_REG_TRIGGER_ENABLE 0x01054cUL
+#define DBG_REG_TRIGGER_STATE_ID_0 0x010554UL
+#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 0x01095cUL
+#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 0x010968UL
+#define DBG_REG_TRIGGER_STATE_SET_COUNT_0 0x010584UL
+#define DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 0x01056cUL
+#define DBG_REG_NO_GRANT_ON_FULL 0x010458UL
+#define DBG_REG_STORM_ID_NUM 0x010b14UL
+#define DBG_REG_CALENDAR_SLOT0 0x010014UL
+#define DBG_REG_HW_ID_NUM 0x010b10UL
+#define DBG_REG_FILTER_ENABLE 0x0109d0UL
+#define DBG_REG_TIMESTAMP 0x010b4cUL
+#define DBG_REG_CPU_TIMEOUT 0x010450UL
+#define DBG_REG_TRIGGER_STATUS_CUR_STATE 0x010b60UL
+#define GRC_REG_TRACE_FIFO_VALID_DATA 0x050064UL
+#define GRC_REG_TRACE_FIFO 0x050068UL
+#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x181530UL
+#define IGU_REG_ERROR_HANDLING_MEMORY 0x181520UL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL
+#define GRC_REG_PROTECTION_OVERRIDE_WINDOW 0x050500UL
+#define TSEM_REG_VF_ERROR 0x1700408UL
+#define USEM_REG_VF_ERROR 0x1900408UL
+#define MSEM_REG_VF_ERROR 0x1800408UL
+#define XSEM_REG_VF_ERROR 0x1400408UL
+#define YSEM_REG_VF_ERROR 0x1500408UL
+#define PSEM_REG_VF_ERROR 0x1600408UL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x2aa118UL
+#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT 0x180408UL
+#define IGU_REG_VF_CONFIGURATION 0x180804UL
+#define PSWHST_REG_ZONE_PERMISSION_TABLE 0x2a0800UL
+#define DORQ_REG_VF_USAGE_CNT 0x1009c4UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 0xd806ccUL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 0xd806c8UL
+#define PRS_REG_MSG_CT_MAIN_0 0x1f0a24UL
+#define PRS_REG_MSG_CT_LB_0 0x1f0a28UL
+#define BRB_REG_PER_TC_COUNTERS 0x341a00UL
+
+/* added */
+#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
+#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
+#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
+#define MISCS_REG_FUNCTION_HIDE 0x0096f0UL
+#define PCIE_REG_PRTY_MASK 0x0547b4UL
+#define PGLUE_B_REG_VF_BAR0_SIZE 0x2aaeb4UL
+#define BAR0_MAP_REG_YSDM_RAM 0x1e80000UL
+#define SEM_FAST_REG_INT_RAM_SIZE 20480
+#define MCP_REG_SCRATCH_SIZE 57344
+
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT 24
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT 24
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT 16
+#define DORQ_REG_DB_DROP_DETAILS_ADDRESS 0x100a1cUL
+
+/* 8.10.9.0 FW */
+#define NIG_REG_VXLAN_CTRL 0x50105cUL
+#define PRS_REG_SEARCH_ROCE 0x1f040cUL
+#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
+#define PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT 0
+#define PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT 8
+#define CCFC_REG_WEAK_ENABLE_VF 0x2e0704UL
+#define TCFC_REG_STRONG_ENABLE_VF 0x2d070cUL
+#define TCFC_REG_WEAK_ENABLE_VF 0x2d0704UL
+#define PRS_REG_SEARCH_GFT 0x1f11bcUL
+#define PRS_REG_LOAD_L2_FILTER 0x1f0198UL
+#define PRS_REG_GFT_CAM 0x1f1100UL
+#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
+#define PGLUE_B_REG_MSDM_VF_SHIFT_B 0x2aa1c4UL
+#define PGLUE_B_REG_MSDM_OFFSET_MASK_B 0x2aa1c0UL
+#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST 0x1f0a0cUL
+#define PRS_REG_SEARCH_FCOE 0x1f0408UL
+#define PGLUE_B_REG_PGL_ADDR_E8_F0 0x2aaf98UL
+#define NIG_REG_DSCP_TO_TC_MAP_ENABLE 0x5088f8UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0 0x2aafa0UL
+#define PRS_REG_ROCE_DEST_QP_MAX_PF 0x1f0430UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0 0x2aafa4UL
+#define IGU_REG_WRITE_DONE_PENDING 0x180900UL
+#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR 0x50196cUL
+#define PRS_REG_MSG_INFO 0x1f0a1cUL
+#define BAR0_MAP_REG_XSDM_RAM 0x1e00000UL
+
+/* 8.18.7.0 FW */
+#define BRB_REG_INT_MASK_10 0x3401b8UL
+
+#define IGU_REG_PRODUCER_MEMORY 0x182000UL
+#define IGU_REG_CONSUMER_MEM 0x183000UL
+
+#define CDU_REG_CCFC_CTX_VALID0 0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 0x580408UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
+#define MISCS_REG_RESET_PL_HV_2_K2_E5 0x009150UL
+#define CNIG_REG_NW_PORT_MODE_BB 0x218200UL
+#define CNIG_REG_PMEG_IF_CMD_BB 0x21821cUL
+#define CNIG_REG_PMEG_IF_ADDR_BB 0x218224UL
+#define CNIG_REG_PMEG_IF_WRDATA_BB 0x218228UL
+#define NWM_REG_MAC0_K2_E5 0x800400UL
+#define CNIG_REG_NIG_PORT0_CONF_K2_E5 0x218200UL
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT 0
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT 1
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT 3
+#define ETH_MAC_REG_XIF_MODE_K2_E5 0x000080UL
+#define ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT 0
+#define ETH_MAC_REG_FRM_LENGTH_K2_E5 0x000014UL
+#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT 0
+#define ETH_MAC_REG_TX_IPG_LENGTH_K2_E5 0x000044UL
+#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT 0
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5 0x00001cUL
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT 0
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5 0x000020UL
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT 16
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT 0
+#define ETH_MAC_REG_COMMAND_CONFIG_K2_E5 0x000008UL
+#define MISC_REG_XMAC_CORE_PORT_MODE_BB 0x008c08UL
+#define MISC_REG_XMAC_PHY_PORT_MODE_BB 0x008c04UL
+#define XMAC_REG_MODE_BB 0x210008UL
+#define XMAC_REG_RX_MAX_SIZE_BB 0x210040UL
+#define XMAC_REG_TX_CTRL_LO_BB 0x210020UL
+#define XMAC_REG_CTRL_BB 0x210000UL
+#define XMAC_REG_CTRL_TX_EN_BB (0x1 << 0)
+#define XMAC_REG_CTRL_RX_EN_BB (0x1 << 1)
+#define XMAC_REG_RX_CTRL_BB 0x210030UL
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB (0x1 << 12)
+
+#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5 0x2aaf98UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5 0x2aafa0UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5 0x2aafa4UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0_BB 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0_BB 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0_BB 0x2aa410UL
+#define MISCS_REG_FUNCTION_HIDE_BB_K2 0x0096f0UL
+#define PCIE_REG_PRTY_MASK_K2_E5 0x0547b4UL
+#define PGLUE_B_REG_VF_BAR0_SIZE_K2_E5 0x2aaeb4UL
+
+#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
+
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 0x501b00UL
+
+#define PSWRQ2_REG_WR_MBS0 0x240400UL
+#define PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE 0x2aae30UL
+#define DORQ_REG_PF_USAGE_CNT 0x1009c0UL
+#define DORQ_REG_DPM_FORCE_ABORT 0x1009d8UL
+#define DORQ_REG_PF_OVFL_STICKY 0x1009d0UL
+#define DORQ_REG_INT_STS 0x100180UL
+ #define DORQ_REG_INT_STS_DB_DROP (0x1 << 1)
+ #define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR (0x1 << 2)
+ #define DORQ_REG_INT_STS_DORQ_FIFO_AFULL (0x1 << 3)
+#define DORQ_REG_DB_DROP_DETAILS_REL 0x100a28UL
+#define DORQ_REG_INT_STS_WR 0x100188UL
+#define DORQ_REG_DB_DROP_DETAILS_REASON 0x100a20UL
+#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
+ #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1 << 10)
+#define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL
+#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
+
+#define RSS_REG_RSS_RAM_MASK 0x238c10UL
diff --git a/src/spdk/dpdk/drivers/net/qede/qede_ethdev.c b/src/spdk/dpdk/drivers/net/qede/qede_ethdev.c
new file mode 100644
index 00000000..df52ea92
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/qede_ethdev.c
@@ -0,0 +1,3452 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include "qede_ethdev.h"
+#include <rte_alarm.h>
+#include <rte_version.h>
+#include <rte_kvargs.h>
+
+/* Globals */
+int qede_logtype_init;
+int qede_logtype_driver;
+
+static const struct qed_eth_ops *qed_ops;
+#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
+
+/* VXLAN tunnel classification mapping */
+const struct _qede_udp_tunn_types {
+ uint16_t rte_filter_type;
+ enum ecore_filter_ucast_type qede_type;
+ enum ecore_tunn_clss qede_tunn_clss;
+ const char *string;
+} qede_tunn_types[] = {
+ {
+ ETH_TUNNEL_FILTER_OMAC,
+ ECORE_FILTER_MAC,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ "outer-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_VNI,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_VLAN,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "outer-mac and vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VNI,
+ "vni and inner-mac",
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "vni and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_OIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-IP"
+ },
+ {
+ ETH_TUNNEL_FILTER_IIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "inner-IP"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "OMAC_TENID_IMAC"
+ },
+};
+
+struct rte_qede_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+
+static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
+ {"rx_unicast_bytes",
+ offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
+ {"rx_multicast_bytes",
+ offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
+ {"rx_broadcast_bytes",
+ offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
+ {"rx_unicast_packets",
+ offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
+ {"rx_multicast_packets",
+ offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
+ {"rx_broadcast_packets",
+ offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
+
+ {"tx_unicast_bytes",
+ offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
+ {"tx_multicast_bytes",
+ offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
+ {"tx_broadcast_bytes",
+ offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
+ {"tx_unicast_packets",
+ offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
+ {"tx_multicast_packets",
+ offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
+ {"tx_broadcast_packets",
+ offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
+
+ {"rx_64_byte_packets",
+ offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
+ {"rx_65_to_127_byte_packets",
+ offsetof(struct ecore_eth_stats_common,
+ rx_65_to_127_byte_packets)},
+ {"rx_128_to_255_byte_packets",
+ offsetof(struct ecore_eth_stats_common,
+ rx_128_to_255_byte_packets)},
+ {"rx_256_to_511_byte_packets",
+ offsetof(struct ecore_eth_stats_common,
+ rx_256_to_511_byte_packets)},
+ {"rx_512_to_1023_byte_packets",
+ offsetof(struct ecore_eth_stats_common,
+ rx_512_to_1023_byte_packets)},
+ {"rx_1024_to_1518_byte_packets",
+ offsetof(struct ecore_eth_stats_common,
+ rx_1024_to_1518_byte_packets)},
+ {"tx_64_byte_packets",
+ offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
+ {"tx_65_to_127_byte_packets",
+ offsetof(struct ecore_eth_stats_common,
+ tx_65_to_127_byte_packets)},
+ {"tx_128_to_255_byte_packets",
+ offsetof(struct ecore_eth_stats_common,
+ tx_128_to_255_byte_packets)},
+ {"tx_256_to_511_byte_packets",
+ offsetof(struct ecore_eth_stats_common,
+ tx_256_to_511_byte_packets)},
+ {"tx_512_to_1023_byte_packets",
+ offsetof(struct ecore_eth_stats_common,
+ tx_512_to_1023_byte_packets)},
+ {"tx_1024_to_1518_byte_packets",
+ offsetof(struct ecore_eth_stats_common,
+ tx_1024_to_1518_byte_packets)},
+
+ {"rx_mac_crtl_frames",
+ offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
+ {"tx_mac_control_frames",
+ offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
+ {"rx_pause_frames",
+ offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
+ {"tx_pause_frames",
+ offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
+ {"rx_priority_flow_control_frames",
+ offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
+ {"tx_priority_flow_control_frames",
+ offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
+
+ {"rx_crc_errors",
+ offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
+ {"rx_align_errors",
+ offsetof(struct ecore_eth_stats_common, rx_align_errors)},
+ {"rx_carrier_errors",
+ offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
+ {"rx_oversize_packet_errors",
+ offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
+ {"rx_jabber_errors",
+ offsetof(struct ecore_eth_stats_common, rx_jabbers)},
+ {"rx_undersize_packet_errors",
+ offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
+ {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
+ {"rx_host_buffer_not_available",
+ offsetof(struct ecore_eth_stats_common, no_buff_discards)},
+ /* Number of packets discarded because they are bigger than MTU */
+ {"rx_packet_too_big_discards",
+ offsetof(struct ecore_eth_stats_common,
+ packet_too_big_discard)},
+ {"rx_ttl_zero_discards",
+ offsetof(struct ecore_eth_stats_common, ttl0_discard)},
+ {"rx_multi_function_tag_filter_discards",
+ offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
+ {"rx_mac_filter_discards",
+ offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
+ {"rx_hw_buffer_truncates",
+ offsetof(struct ecore_eth_stats_common, brb_truncates)},
+ {"rx_hw_buffer_discards",
+ offsetof(struct ecore_eth_stats_common, brb_discards)},
+ {"tx_error_drop_packets",
+ offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
+
+ {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
+ {"rx_mac_unicast_packets",
+ offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
+ {"rx_mac_multicast_packets",
+ offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
+ {"rx_mac_broadcast_packets",
+ offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
+ {"rx_mac_frames_ok",
+ offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
+ {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
+ {"tx_mac_unicast_packets",
+ offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
+ {"tx_mac_multicast_packets",
+ offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
+ {"tx_mac_broadcast_packets",
+ offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
+
+ {"lro_coalesced_packets",
+ offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
+ {"lro_coalesced_events",
+ offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
+ {"lro_aborts_num",
+ offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
+ {"lro_not_coalesced_packets",
+ offsetof(struct ecore_eth_stats_common,
+ tpa_not_coalesced_pkts)},
+ {"lro_coalesced_bytes",
+ offsetof(struct ecore_eth_stats_common,
+ tpa_coalesced_bytes)},
+};
+
+static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
+ {"rx_1519_to_1522_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_1519_to_1522_byte_packets)},
+ {"rx_1519_to_2047_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_1519_to_2047_byte_packets)},
+ {"rx_2048_to_4095_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_2048_to_4095_byte_packets)},
+ {"rx_4096_to_9216_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_4096_to_9216_byte_packets)},
+ {"rx_9217_to_16383_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_9217_to_16383_byte_packets)},
+
+ {"tx_1519_to_2047_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_1519_to_2047_byte_packets)},
+ {"tx_2048_to_4095_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_2048_to_4095_byte_packets)},
+ {"tx_4096_to_9216_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_4096_to_9216_byte_packets)},
+ {"tx_9217_to_16383_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_9217_to_16383_byte_packets)},
+
+ {"tx_lpi_entry_count",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
+ {"tx_total_collisions",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
+};
+
+static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
+ {"rx_1519_to_max_byte_packets",
+ offsetof(struct ecore_eth_stats, ah) +
+ offsetof(struct ecore_eth_stats_ah,
+ rx_1519_to_max_byte_packets)},
+ {"tx_1519_to_max_byte_packets",
+ offsetof(struct ecore_eth_stats, ah) +
+ offsetof(struct ecore_eth_stats_ah,
+ tx_1519_to_max_byte_packets)},
+};
+
+static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
+ {"rx_q_segments",
+ offsetof(struct qede_rx_queue, rx_segs)},
+ {"rx_q_hw_errors",
+ offsetof(struct qede_rx_queue, rx_hw_errors)},
+ {"rx_q_allocation_errors",
+ offsetof(struct qede_rx_queue, rx_alloc_errors)}
+};
+
+static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
+{
+ ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+}
+
+static void
+qede_interrupt_handler_intx(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ u64 status;
+
+ /* Check if our device actually raised an interrupt */
+ status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
+ if (status & 0x1) {
+ qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+
+ if (rte_intr_enable(eth_dev->intr_handle))
+ DP_ERR(edev, "rte_intr_enable failed\n");
+ }
+}
+
+static void
+qede_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+ if (rte_intr_enable(eth_dev->intr_handle))
+ DP_ERR(edev, "rte_intr_enable failed\n");
+}
+
+static void
+qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
+{
+ rte_memcpy(&qdev->dev_info, info, sizeof(*info));
+ qdev->ops = qed_ops;
+}
+
+static void qede_print_adapter_info(struct qede_dev *qdev)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_dev_info *info = &qdev->dev_info.common;
+ static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
+ static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
+
+ DP_INFO(edev, "*********************************\n");
+ DP_INFO(edev, " DPDK version:%s\n", rte_version());
+ DP_INFO(edev, " Chip details : %s %c%d\n",
+ ECORE_IS_BB(edev) ? "BB" : "AH",
+ 'A' + edev->chip_rev,
+ (int)edev->chip_metal);
+ snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
+ info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
+ snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
+ ver_str, QEDE_PMD_VERSION);
+ DP_INFO(edev, " Driver version : %s\n", drv_ver);
+ DP_INFO(edev, " Firmware version : %s\n", ver_str);
+
+ snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
+ "%d.%d.%d.%d",
+ (info->mfw_rev >> 24) & 0xff,
+ (info->mfw_rev >> 16) & 0xff,
+ (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
+ DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
+ DP_INFO(edev, " Firmware file : %s\n", fw_file);
+ DP_INFO(edev, "*********************************\n");
+}
+
+static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
+{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ unsigned int i = 0, j = 0, qid;
+ unsigned int rxq_stat_cntrs, txq_stat_cntrs;
+ struct qede_tx_queue *txq;
+
+ DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
+
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ for_each_rss(qid) {
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rcv_pkts), 0,
+ sizeof(uint64_t));
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rx_hw_errors), 0,
+ sizeof(uint64_t));
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
+ sizeof(uint64_t));
+
+ if (xstats)
+ for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
+ OSAL_MEMSET((((char *)
+ (qdev->fp_array[qid].rxq)) +
+ qede_rxq_xstats_strings[j].offset),
+ 0,
+ sizeof(uint64_t));
+
+ i++;
+ if (i == rxq_stat_cntrs)
+ break;
+ }
+
+ i = 0;
+
+ for_each_tss(qid) {
+ txq = qdev->fp_array[qid].txq;
+
+ OSAL_MEMSET((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue, xmit_pkts)), 0,
+ sizeof(uint64_t));
+
+ i++;
+ if (i == txq_stat_cntrs)
+ break;
+ }
+}
+
+static int
+qede_stop_vport(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ uint8_t vport_id;
+ int rc;
+ int i;
+
+ vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
+ vport_id);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ DP_INFO(edev, "vport stopped\n");
+
+ return 0;
+}
+
+static int
+qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
+{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_start_params params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ if (qdev->vport_started)
+ qede_stop_vport(edev);
+
+ memset(&params, 0, sizeof(params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ /* @DPDK - Disable FW placement */
+ params.zero_placement_offset = 1;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_start(p_hwfn, &params);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ return rc;
+ }
+ }
+ ecore_reset_vport_stats(edev);
+ qdev->vport_started = true;
+ DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
+
+ return 0;
+}
+
+#define QEDE_NPAR_TX_SWITCHING "npar_tx_switching"
+#define QEDE_VF_TX_SWITCHING "vf_tx_switching"
+
+/* Activate or deactivate vport via vport-update */
+int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
+ int rc = -1;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_vport_active_rx_flg = 1;
+ params.update_vport_active_tx_flg = 1;
+ params.vport_active_rx_flg = flg;
+ params.vport_active_tx_flg = flg;
+ if (~qdev->enable_tx_switching & flg) {
+ params.update_tx_switching_flg = 1;
+ params.tx_switching_flg = !flg;
+ }
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update vport\n");
+ break;
+ }
+ }
+ DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
+
+ return rc;
+}
+
+static void
+qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
+ uint16_t mtu, bool enable)
+{
+ /* Enable LRO in split mode */
+ sge_tpa_params->tpa_ipv4_en_flg = enable;
+ sge_tpa_params->tpa_ipv6_en_flg = enable;
+ sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
+ sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
+ /* set if tpa enable changes */
+ sge_tpa_params->update_tpa_en_flg = 1;
+ /* set if tpa parameters should be handled */
+ sge_tpa_params->update_tpa_param_flg = enable;
+
+ sge_tpa_params->max_buffers_per_cqe = 20;
+ /* Enable TPA in split mode. In this mode each TPA segment
+ * starts on the new BD, so there is one BD per segment.
+ */
+ sge_tpa_params->tpa_pkt_split_flg = 1;
+ sge_tpa_params->tpa_hdr_data_split_flg = 0;
+ sge_tpa_params->tpa_gro_consistent_flg = 0;
+ sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ sge_tpa_params->tpa_max_size = 0x7FFF;
+ sge_tpa_params->tpa_min_size_to_start = mtu / 2;
+ sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
+}
+
+/* Enable/disable LRO via vport-update */
+int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_sge_tpa_params tpa_params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+ qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
+ params.vport_id = 0;
+ params.sge_tpa_params = &tpa_params;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update LRO\n");
+ return -1;
+ }
+ }
+ qdev->enable_lro = flg;
+ eth_dev->data->lro = flg;
+
+ DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
+{
+ memset(ucast, 0, sizeof(struct ecore_filter_ucast));
+ ucast->is_rx_filter = true;
+ ucast->is_tx_filter = true;
+ /* ucast->assert_on_error = true; - For debug */
+}
+
+static int
+qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
+ enum qed_filter_rx_mode_type type)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_filter_accept_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+
+ flags.update_rx_mode_config = 1;
+ flags.update_tx_mode_config = 1;
+ flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ if (IS_VF(edev)) {
+ flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
+ }
+ } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+ } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
+ QED_FILTER_RX_MODE_TYPE_PROMISC)) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
+ ECORE_ACCEPT_MCAST_UNMATCHED;
+ }
+
+ return ecore_filter_accept_cmd(edev, 0, flags, false, false,
+ ECORE_SPQ_MODE_CB, NULL);
+}
+
+static int
+qede_tunnel_update(struct qede_dev *qdev,
+ struct ecore_tunnel_info *tunn_info)
+{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ if (IS_PF(edev)) {
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+ } else {
+ p_ptt = NULL;
+ }
+
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
+ tunn_info, ECORE_SPQ_MODE_CB, NULL);
+ if (IS_PF(edev))
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ if (rc != ECORE_SUCCESS)
+ break;
+ }
+
+ return rc;
+}
+
+static int
+qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ if (qdev->vxlan.enable == enable)
+ return ECORE_SUCCESS;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.vxlan.b_update_mode = true;
+ tunn.vxlan.b_mode_enabled = enable;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+ tunn.vxlan.tun_cls = clss;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->vxlan.enable = enable;
+ qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
+ DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
+ enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ }
+
+ return rc;
+}
+
+static int
+qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.l2_geneve.b_update_mode = true;
+ tunn.l2_geneve.b_mode_enabled = enable;
+ tunn.ip_geneve.b_update_mode = true;
+ tunn.ip_geneve.b_mode_enabled = enable;
+ tunn.l2_geneve.tun_cls = clss;
+ tunn.ip_geneve.tun_cls = clss;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->geneve.enable = enable;
+ qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
+ DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
+ enable ? "enabled" : "disabled", qdev->geneve.udp_port);
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ clss);
+ }
+
+ return rc;
+}
+
+static int
+qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.ip_gre.b_update_mode = true;
+ tunn.ip_gre.b_mode_enabled = enable;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->ipgre.enable = enable;
+ DP_INFO(edev, "IPGRE is %s\n",
+ enable ? "enabled" : "disabled");
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ clss);
+ }
+
+ return rc;
+}
+
+static int
+qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ enum rte_eth_tunnel_type tunn_type, bool enable)
+{
+ int rc = -EINVAL;
+
+ switch (tunn_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ rc = qede_vxlan_enable(eth_dev, clss, enable);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ rc = qede_geneve_enable(eth_dev, clss, enable);
+ break;
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ rc = qede_ipgre_enable(eth_dev, clss, enable);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_ucast_entry *tmp = NULL;
+ struct qede_ucast_entry *u;
+ struct ether_addr *mac_addr;
+
+ mac_addr = (struct ether_addr *)ucast->mac;
+ if (add) {
+ SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
+ if ((memcmp(mac_addr, &tmp->mac,
+ ETHER_ADDR_LEN) == 0) &&
+ ucast->vni == tmp->vni &&
+ ucast->vlan == tmp->vlan) {
+ DP_INFO(edev, "Unicast MAC is already added"
+ " with vlan = %u, vni = %u\n",
+ ucast->vlan, ucast->vni);
+ return 0;
+ }
+ }
+ u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!u) {
+ DP_ERR(edev, "Did not allocate memory for ucast\n");
+ return -ENOMEM;
+ }
+ ether_addr_copy(mac_addr, &u->mac);
+ u->vlan = ucast->vlan;
+ u->vni = ucast->vni;
+ SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
+ qdev->num_uc_addr++;
+ } else {
+ SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
+ if ((memcmp(mac_addr, &tmp->mac,
+ ETHER_ADDR_LEN) == 0) &&
+ ucast->vlan == tmp->vlan &&
+ ucast->vni == tmp->vni)
+ break;
+ }
+ if (tmp == NULL) {
+ DP_INFO(edev, "Unicast MAC is not found\n");
+ return -EINVAL;
+ }
+ SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
+ qdev->num_uc_addr--;
+ }
+
+ return 0;
+}
+
+static int
+qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_filter_mcast mcast;
+ struct qede_mcast_entry *m = NULL;
+ uint8_t i;
+ int rc;
+
+ for (i = 0; i < mc_addrs_num; i++) {
+ m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!m) {
+ DP_ERR(edev, "Did not allocate memory for mcast\n");
+ return -ENOMEM;
+ }
+ ether_addr_copy(&mc_addrs[i], &m->mac);
+ SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
+ }
+ memset(&mcast, 0, sizeof(mcast));
+ mcast.num_mc_addrs = mc_addrs_num;
+ mcast.opcode = ECORE_FILTER_ADD;
+ for (i = 0; i < mc_addrs_num; i++)
+ ether_addr_copy(&mc_addrs[i], (struct ether_addr *)
+ &mcast.mac[i]);
+ rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_mcast_entry *tmp = NULL;
+ struct ecore_filter_mcast mcast;
+ int j;
+ int rc;
+
+ memset(&mcast, 0, sizeof(mcast));
+ mcast.num_mc_addrs = qdev->num_mc_addr;
+ mcast.opcode = ECORE_FILTER_REMOVE;
+ j = 0;
+ SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+ ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]);
+ j++;
+ }
+ rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to delete multicast filter\n");
+ return -1;
+ }
+ /* Init the list */
+ while (!SLIST_EMPTY(&qdev->mc_list_head)) {
+ tmp = SLIST_FIRST(&qdev->mc_list_head);
+ SLIST_REMOVE_HEAD(&qdev->mc_list_head, list);
+ }
+ SLIST_INIT(&qdev->mc_list_head);
+
+ return 0;
+}
+
+static enum _ecore_status_t
+qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+
+ if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) {
+ DP_ERR(edev, "Ucast filter table limit exceeded,"
+ " Please enable promisc mode\n");
+ return ECORE_INVAL;
+ }
+
+ rc = qede_ucast_filter(eth_dev, ucast, add);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ /* Indicate error only for add filter operation.
+ * Delete filter operations are not severe.
+ */
+ if ((rc != ECORE_SUCCESS) && add)
+ DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
+ rc, add);
+
+ return rc;
+}
+
+static int
+qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
+ __rte_unused uint32_t index, __rte_unused uint32_t pool)
+{
+ struct ecore_filter_ucast ucast;
+ int re;
+
+ if (!is_valid_assigned_ether_addr(mac_addr))
+ return -EINVAL;
+
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_ADD;
+ ucast.type = ECORE_FILTER_MAC;
+ ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
+ re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
+ return re;
+}
+
+static void
+qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct ecore_filter_ucast ucast;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ if (index >= qdev->dev_info.num_mac_filters) {
+ DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
+ index, qdev->dev_info.num_mac_filters);
+ return;
+ }
+
+ if (!is_valid_assigned_ether_addr(&eth_dev->data->mac_addrs[index]))
+ return;
+
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_REMOVE;
+ ucast.type = ECORE_FILTER_MAC;
+
+ /* Use the index maintained by rte */
+ ether_addr_copy(&eth_dev->data->mac_addrs[index],
+ (struct ether_addr *)&ucast.mac);
+
+ qede_mac_int_ops(eth_dev, &ucast, false);
+}
+
+static int
+qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
+ mac_addr->addr_bytes)) {
+ DP_ERR(edev, "Setting MAC address is not allowed\n");
+ return -EPERM;
+ }
+
+ qede_mac_addr_remove(eth_dev, 0);
+
+ return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
+}
+
+static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
+{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
+ int rc;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_accept_any_vlan_flg = 1;
+ params.accept_any_vlan = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to configure accept-any-vlan\n");
+ return;
+ }
+ }
+
+ DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
+}
+
+static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
+ int rc;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_inner_vlan_removal_flg = 1;
+ params.inner_vlan_removal_flg = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update vport\n");
+ return -1;
+ }
+ }
+
+ DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
+ return 0;
+}
+
+static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
+ uint16_t vlan_id, int on)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qed_dev_eth_info *dev_info = &qdev->dev_info;
+ struct qede_vlan_entry *tmp = NULL;
+ struct qede_vlan_entry *vlan;
+ struct ecore_filter_ucast ucast;
+ int rc;
+
+ if (on) {
+ if (qdev->configured_vlans == dev_info->num_vlan_filters) {
+ DP_ERR(edev, "Reached max VLAN filter limit"
+ " enabling accept_any_vlan\n");
+ qede_config_accept_any_vlan(qdev, true);
+ return 0;
+ }
+
+ SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
+ if (tmp->vid == vlan_id) {
+ DP_INFO(edev, "VLAN %u already configured\n",
+ vlan_id);
+ return 0;
+ }
+ }
+
+ vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!vlan) {
+ DP_ERR(edev, "Did not allocate memory for VLAN\n");
+ return -ENOMEM;
+ }
+
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_ADD;
+ ucast.type = ECORE_FILTER_VLAN;
+ ucast.vlan = vlan_id;
+ rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
+ NULL);
+ if (rc != 0) {
+ DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
+ rc);
+ rte_free(vlan);
+ } else {
+ vlan->vid = vlan_id;
+ SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
+ qdev->configured_vlans++;
+ DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
+ vlan_id, qdev->configured_vlans);
+ }
+ } else {
+ SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
+ if (tmp->vid == vlan_id)
+ break;
+ }
+
+ if (!tmp) {
+ if (qdev->configured_vlans == 0) {
+ DP_INFO(edev,
+ "No VLAN filters configured yet\n");
+ return 0;
+ }
+
+ DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
+ return -EINVAL;
+ }
+
+ SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
+
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_REMOVE;
+ ucast.type = ECORE_FILTER_VLAN;
+ ucast.vlan = vlan_id;
+ rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
+ NULL);
+ if (rc != 0) {
+ DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
+ vlan_id, rc);
+ } else {
+ qdev->configured_vlans--;
+ DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
+ vlan_id, qdev->configured_vlans);
+ }
+ }
+
+ return rc;
+}
+
+static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ (void)qede_vlan_stripping(eth_dev, 1);
+ else
+ (void)qede_vlan_stripping(eth_dev, 0);
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ /* VLAN filtering kicks in when a VLAN is added */
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ qede_vlan_filter_set(eth_dev, 0, 1);
+ } else {
+ if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
+ DP_ERR(edev,
+ " Please remove existing VLAN filters"
+ " before disabling VLAN filtering\n");
+ /* Signal app that VLAN filtering is still
+ * enabled
+ */
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+ } else {
+ qede_vlan_filter_set(eth_dev, 0, 0);
+ }
+ }
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK)
+ DP_ERR(edev, "Extend VLAN not supported\n");
+
+ qdev->vlan_offload_mask = mask;
+
+ DP_INFO(edev, "VLAN offload mask %d\n", mask);
+
+ return 0;
+}
+
+static void qede_prandom_bytes(uint32_t *buff)
+{
+ uint8_t i;
+
+ srand((unsigned int)time(NULL));
+ for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
+ buff[i] = rand();
+}
+
+int qede_config_rss(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
+ struct rte_eth_rss_reta_entry64 reta_conf[2];
+ struct rte_eth_rss_conf rss_conf;
+ uint32_t i, id, pos, q;
+
+ rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if (!rss_conf.rss_key) {
+ DP_INFO(edev, "Applying driver default key\n");
+ rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
+ qede_prandom_bytes(&def_rss_key[0]);
+ rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
+ }
+
+ /* Configure RSS hash */
+ if (qede_rss_hash_update(eth_dev, &rss_conf))
+ return -EINVAL;
+
+ /* Configure default RETA */
+ memset(reta_conf, 0, sizeof(reta_conf));
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
+ reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+ id = i / RTE_RETA_GROUP_SIZE;
+ pos = i % RTE_RETA_GROUP_SIZE;
+ q = i % QEDE_RSS_COUNT(qdev);
+ reta_conf[id].reta[pos] = q;
+ }
+ if (qede_rss_reta_update(eth_dev, &reta_conf[0],
+ ECORE_RSS_IND_TABLE_SIZE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void qede_fastpath_start(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ ecore_hw_start_fastpath(p_hwfn);
+ }
+}
+
+static int qede_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Update MTU only if it has changed */
+ if (eth_dev->data->mtu != qdev->mtu) {
+ if (qede_update_mtu(eth_dev, qdev->mtu))
+ goto err;
+ }
+
+ /* Configure TPA parameters */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (qede_enable_tpa(eth_dev, true))
+ return -EINVAL;
+ /* Enable scatter mode for LRO */
+ if (!eth_dev->data->scattered_rx)
+ rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
+ }
+
+ /* Start queues */
+ if (qede_start_queues(eth_dev))
+ goto err;
+
+ if (IS_PF(edev))
+ qede_reset_queue_stats(qdev, true);
+
+ /* Newer SR-IOV PF driver expects RX/TX queues to be started before
+ * enabling RSS. Hence RSS configuration is deferred upto this point.
+ * Also, we would like to retain similar behavior in PF case, so we
+ * don't do PF/VF specific check here.
+ */
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (qede_config_rss(eth_dev))
+ goto err;
+
+ /* Enable vport*/
+ if (qede_activate_vport(eth_dev, true))
+ goto err;
+
+ /* Update link status */
+ qede_link_update(eth_dev, 0);
+
+ /* Start/resume traffic */
+ qede_fastpath_start(edev);
+
+ DP_INFO(edev, "Device started\n");
+
+ return 0;
+err:
+ DP_ERR(edev, "Device start fails\n");
+ return -1; /* common error code is < 0 */
+}
+
+static void qede_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Disable vport */
+ if (qede_activate_vport(eth_dev, false))
+ return;
+
+ if (qdev->enable_lro)
+ qede_enable_tpa(eth_dev, false);
+
+ /* Stop queues */
+ qede_stop_queues(eth_dev);
+
+ /* Disable traffic */
+ ecore_hw_stop_fastpath(edev); /* TBD - loop */
+
+ DP_INFO(edev, "Device is stopped\n");
+}
+
+const char *valid_args[] = {
+ QEDE_NPAR_TX_SWITCHING,
+ QEDE_VF_TX_SWITCHING,
+ NULL,
+};
+
+static int qede_args_check(const char *key, const char *val, void *opaque)
+{
+ unsigned long tmp;
+ int ret = 0;
+ struct rte_eth_dev *eth_dev = opaque;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ errno = 0;
+ tmp = strtoul(val, NULL, 0);
+ if (errno) {
+ DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
+ return errno;
+ }
+
+ if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
+ ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) {
+ qdev->enable_tx_switching = !!tmp;
+ DP_INFO(edev, "Disabling %s tx-switching\n",
+ strcmp(QEDE_NPAR_TX_SWITCHING, key) ?
+ "VF" : "NPAR");
+ }
+
+ return ret;
+}
+
+static int qede_args(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_kvargs *kvlist;
+ struct rte_devargs *devargs;
+ int ret;
+ int i;
+
+ devargs = pci_dev->device.devargs;
+ if (!devargs)
+ return 0; /* return success */
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (kvlist == NULL)
+ return -EINVAL;
+
+ /* Process parameters. */
+ for (i = 0; (valid_args[i] != NULL); ++i) {
+ if (rte_kvargs_count(kvlist, valid_args[i])) {
+ ret = rte_kvargs_process(kvlist, valid_args[i],
+ qede_args_check, eth_dev);
+ if (ret != ECORE_SUCCESS) {
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+ }
+ }
+ rte_kvargs_free(kvlist);
+
+ return 0;
+}
+
+static int qede_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Check requirements for 100G mode */
+ if (ECORE_IS_CMT(edev)) {
+ if (eth_dev->data->nb_rx_queues < 2 ||
+ eth_dev->data->nb_tx_queues < 2) {
+ DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
+ return -EINVAL;
+ }
+
+ if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
+ (eth_dev->data->nb_tx_queues % 2 != 0)) {
+ DP_ERR(edev,
+ "100G mode needs even no. of RX/TX queues\n");
+ return -EINVAL;
+ }
+ }
+
+ /* We need to have min 1 RX queue.There is no min check in
+ * rte_eth_dev_configure(), so we are checking it here.
+ */
+ if (eth_dev->data->nb_rx_queues == 0) {
+ DP_ERR(edev, "Minimum one RX queue is required\n");
+ return -EINVAL;
+ }
+
+ /* Enable Tx switching by default */
+ qdev->enable_tx_switching = 1;
+
+ /* Parse devargs and fix up rxmode */
+ if (qede_args(eth_dev))
+ DP_NOTICE(edev, false,
+ "Invalid devargs supplied, requested change will not take effect\n");
+
+ if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
+ rxmode->mq_mode == ETH_MQ_RX_RSS)) {
+ DP_ERR(edev, "Unsupported multi-queue mode\n");
+ return -ENOTSUP;
+ }
+ /* Flow director mode check */
+ if (qede_check_fdir_support(eth_dev))
+ return -ENOTSUP;
+
+ qede_dealloc_fp_resc(eth_dev);
+ qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
+ qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
+ if (qede_alloc_fp_resc(qdev))
+ return -ENOMEM;
+
+ /* If jumbo enabled adjust MTU */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ eth_dev->data->mtu =
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ ETHER_HDR_LEN - ETHER_CRC_LEN;
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+ eth_dev->data->scattered_rx = 1;
+
+ if (qede_start_vport(qdev, eth_dev->data->mtu))
+ return -1;
+
+ qdev->mtu = eth_dev->data->mtu;
+
+ /* Enable VLAN offloads by default */
+ ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK);
+ if (ret)
+ return ret;
+
+ DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
+ QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
+
+ return 0;
+}
+
+/* Info about HW descriptor ring limitations */
+static const struct rte_eth_desc_lim qede_rx_desc_lim = {
+ .nb_max = 0x8000, /* 32K */
+ .nb_min = 128,
+ .nb_align = 128 /* lowest common multiple */
+};
+
+static const struct rte_eth_desc_lim qede_tx_desc_lim = {
+ .nb_max = 0x8000, /* 32K */
+ .nb_min = 256,
+ .nb_align = 256,
+ .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
+ .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
+};
+
+static void
+qede_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_link_output link;
+ uint32_t speed_cap = 0;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
+ dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
+ dev_info->rx_desc_lim = qede_rx_desc_lim;
+ dev_info->tx_desc_lim = qede_tx_desc_lim;
+
+ if (IS_PF(edev))
+ dev_info->max_rx_queues = (uint16_t)RTE_MIN(
+ QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
+ else
+ dev_info->max_rx_queues = (uint16_t)RTE_MIN(
+ QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
+ dev_info->max_tx_queues = dev_info->max_rx_queues;
+
+ dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
+ dev_info->max_vfs = 0;
+ dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
+ dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
+ dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
+ dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_STRIP);
+ dev_info->rx_queue_offload_capa = 0;
+
+ /* TX offloads are on a per-packet basis, so it is applicable
+ * to both at port and queue levels.
+ */
+ dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+ };
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ /* Packets are always dropped if no descriptors are available */
+ .rx_drop_en = 1,
+ .offloads = 0,
+ };
+
+ memset(&link, 0, sizeof(struct qed_link_output));
+ qdev->ops->common->get_link(edev, &link);
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ speed_cap |= ETH_LINK_SPEED_1G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ speed_cap |= ETH_LINK_SPEED_10G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ speed_cap |= ETH_LINK_SPEED_25G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ speed_cap |= ETH_LINK_SPEED_40G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ speed_cap |= ETH_LINK_SPEED_50G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+ speed_cap |= ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = speed_cap;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+int
+qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_link_output q_link;
+ struct rte_eth_link link;
+ uint16_t link_duplex;
+
+ memset(&q_link, 0, sizeof(q_link));
+ memset(&link, 0, sizeof(link));
+
+ qdev->ops->common->get_link(edev, &q_link);
+
+ /* Link Speed */
+ link.link_speed = q_link.speed;
+
+ /* Link Mode */
+ switch (q_link.duplex) {
+ case QEDE_DUPLEX_HALF:
+ link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case QEDE_DUPLEX_FULL:
+ link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case QEDE_DUPLEX_UNKNOWN:
+ default:
+ link_duplex = -1;
+ }
+ link.link_duplex = link_duplex;
+
+ /* Link Status */
+ link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+ /* AN */
+ link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
+ ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+
+ DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
+ link.link_speed, link.link_duplex,
+ link.link_autoneg, link.link_status);
+
+ return rte_eth_linkstatus_set(eth_dev, &link);
+}
+
+static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+#endif
+
+ enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
+
+ if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
+ type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+
+ qed_configure_filter_rx_mode(eth_dev, type);
+}
+
+static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+#endif
+
+ if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
+ qed_configure_filter_rx_mode(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
+ else
+ qed_configure_filter_rx_mode(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_REGULAR);
+}
+
+static void qede_poll_sp_sb_cb(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rc;
+
+ qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+ qede_interrupt_action(&edev->hwfns[1]);
+
+ rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
+ qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ if (rc != 0) {
+ DP_ERR(edev, "Unable to start periodic"
+ " timer rc %d\n", rc);
+ assert(false && "Unable to start periodic timer");
+ }
+}
+
+static void qede_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* dev_stop() shall cleanup fp resources in hw but without releasing
+ * dma memories and sw structures so that dev_start() can be called
+ * by the app without reconfiguration. However, in dev_close() we
+ * can release all the resources and device can be brought up newly
+ */
+ if (eth_dev->data->dev_started)
+ qede_dev_stop(eth_dev);
+
+ qede_stop_vport(edev);
+ qdev->vport_started = false;
+ qede_fdir_dealloc_resc(eth_dev);
+ qede_dealloc_fp_resc(eth_dev);
+
+ eth_dev->data->nb_rx_queues = 0;
+ eth_dev->data->nb_tx_queues = 0;
+
+ /* Bring the link down */
+ qede_dev_set_link_state(eth_dev, false);
+ qdev->ops->common->slowpath_stop(edev);
+ qdev->ops->common->remove(edev);
+ rte_intr_disable(&pci_dev->intr_handle);
+
+ switch (pci_dev->intr_handle.type) {
+ case RTE_INTR_HANDLE_UIO_INTX:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ qede_interrupt_handler_intx,
+ (void *)eth_dev);
+ break;
+ default:
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ qede_interrupt_handler,
+ (void *)eth_dev);
+ }
+
+ if (ECORE_IS_CMT(edev))
+ rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
+}
+
+static int
+qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct ecore_eth_stats stats;
+ unsigned int i = 0, j = 0, qid;
+ unsigned int rxq_stat_cntrs, txq_stat_cntrs;
+ struct qede_tx_queue *txq;
+
+ ecore_get_vport_stats(edev, &stats);
+
+ /* RX Stats */
+ eth_stats->ipackets = stats.common.rx_ucast_pkts +
+ stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
+
+ eth_stats->ibytes = stats.common.rx_ucast_bytes +
+ stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
+
+ eth_stats->ierrors = stats.common.rx_crc_errors +
+ stats.common.rx_align_errors +
+ stats.common.rx_carrier_errors +
+ stats.common.rx_oversize_packets +
+ stats.common.rx_jabbers + stats.common.rx_undersize_packets;
+
+ eth_stats->rx_nombuf = stats.common.no_buff_discards;
+
+ eth_stats->imissed = stats.common.mftag_filter_discards +
+ stats.common.mac_filter_discards +
+ stats.common.no_buff_discards +
+ stats.common.brb_truncates + stats.common.brb_discards;
+
+ /* TX stats */
+ eth_stats->opackets = stats.common.tx_ucast_pkts +
+ stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
+
+ eth_stats->obytes = stats.common.tx_ucast_bytes +
+ stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
+
+ eth_stats->oerrors = stats.common.tx_err_drop_pkts;
+
+ /* Queue stats */
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
+ (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
+ DP_VERBOSE(edev, ECORE_MSG_DEBUG,
+ "Not all the queue stats will be displayed. Set"
+ " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
+ " appropriately and retry.\n");
+
+ for_each_rss(qid) {
+ eth_stats->q_ipackets[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rcv_pkts));
+ eth_stats->q_errors[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_hw_errors)) +
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_alloc_errors));
+ i++;
+ if (i == rxq_stat_cntrs)
+ break;
+ }
+
+ for_each_tss(qid) {
+ txq = qdev->fp_array[qid].txq;
+ eth_stats->q_opackets[j] =
+ *((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue,
+ xmit_pkts)));
+ j++;
+ if (j == txq_stat_cntrs)
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned
+qede_get_xstats_count(struct qede_dev *qdev) {
+ if (ECORE_IS_BB(&qdev->edev))
+ return RTE_DIM(qede_xstats_strings) +
+ RTE_DIM(qede_bb_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ else
+ return RTE_DIM(qede_xstats_strings) +
+ RTE_DIM(qede_ah_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
+}
+
+static int
+qede_get_xstats_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ const unsigned int stat_cnt = qede_get_xstats_count(qdev);
+ unsigned int i, qid, stat_idx = 0;
+ unsigned int rxq_stat_cntrs;
+
+ if (xstats_names != NULL) {
+ for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%s",
+ qede_xstats_strings[i].name);
+ stat_idx++;
+ }
+
+ if (ECORE_IS_BB(edev)) {
+ for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%s",
+ qede_bb_xstats_strings[i].name);
+ stat_idx++;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%s",
+ qede_ah_xstats_strings[i].name);
+ stat_idx++;
+ }
+ }
+
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (qid = 0; qid < rxq_stat_cntrs; qid++) {
+ for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%.4s%d%s",
+ qede_rxq_xstats_strings[i].name, qid,
+ qede_rxq_xstats_strings[i].name + 4);
+ stat_idx++;
+ }
+ }
+ }
+
+ return stat_cnt;
+}
+
+static int
+qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct ecore_eth_stats stats;
+ const unsigned int num = qede_get_xstats_count(qdev);
+ unsigned int i, qid, stat_idx = 0;
+ unsigned int rxq_stat_cntrs;
+
+ if (n < num)
+ return num;
+
+ ecore_get_vport_stats(edev, &stats);
+
+ for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+ xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
+ qede_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+
+ if (ECORE_IS_BB(edev)) {
+ for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+ xstats[stat_idx].value =
+ *(uint64_t *)(((char *)&stats) +
+ qede_bb_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+ xstats[stat_idx].value =
+ *(uint64_t *)(((char *)&stats) +
+ qede_ah_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ }
+
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (qid = 0; qid < rxq_stat_cntrs; qid++) {
+ for_each_rss(qid) {
+ for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
+ xstats[stat_idx].value = *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ qede_rxq_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ }
+ }
+
+ return stat_idx;
+}
+
+static void
+qede_reset_xstats(struct rte_eth_dev *dev)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ ecore_reset_vport_stats(edev);
+ qede_reset_queue_stats(qdev, true);
+}
+
+int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qed_link_params link_params;
+ int rc;
+
+ DP_INFO(edev, "setting link state %d\n", link_up);
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = link_up;
+ rc = qdev->ops->common->set_link(edev, &link_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(edev, "Unable to set link state %d\n", link_up);
+
+ return rc;
+}
+
+static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
+{
+ return qede_dev_set_link_state(eth_dev, true);
+}
+
+static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
+{
+ return qede_dev_set_link_state(eth_dev, false);
+}
+
+static void qede_reset_stats(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ ecore_reset_vport_stats(edev);
+ qede_reset_queue_stats(qdev, false);
+}
+
+static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ enum qed_filter_rx_mode_type type =
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+
+ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+ type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
+
+ qed_configure_filter_rx_mode(eth_dev, type);
+}
+
+static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+ qed_configure_filter_rx_mode(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_PROMISC);
+ else
+ qed_configure_filter_rx_mode(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_REGULAR);
+}
+
+static int
+qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ uint8_t i;
+
+ if (mc_addrs_num > ECORE_MAX_MC_ADDRS) {
+ DP_ERR(edev, "Reached max multicast filters limit,"
+ "Please enable multicast promisc mode\n");
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < mc_addrs_num; i++) {
+ if (!is_multicast_ether_addr(&mc_addrs[i])) {
+ DP_ERR(edev, "Not a valid multicast MAC\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Flush all existing entries */
+ if (qede_del_mcast_filters(eth_dev))
+ return -1;
+
+ /* Set new mcast list */
+ return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
+}
+
+/* Update MTU via vport-update without doing port restart.
+ * The vport must be deactivated before calling this API.
+ */
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ if (IS_PF(edev)) {
+ struct ecore_sp_vport_update_params params;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ params.vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+ }
+ } else {
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
+ if (rc == ECORE_INVAL) {
+ DP_INFO(edev, "VF MTU Update TLV not supported\n");
+ /* Recreate vport */
+ rc = qede_start_vport(qdev, mtu);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ /* Restore config lost due to vport stop */
+ if (eth_dev->data->promiscuous)
+ qede_promiscuous_enable(eth_dev);
+ else
+ qede_promiscuous_disable(eth_dev);
+
+ if (eth_dev->data->all_multicast)
+ qede_allmulticast_enable(eth_dev);
+ else
+ qede_allmulticast_disable(eth_dev);
+
+ qede_vlan_offload_set(eth_dev,
+ qdev->vlan_offload_mask);
+ } else if (rc != ECORE_SUCCESS) {
+ goto err;
+ }
+ }
+ }
+ DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
+
+ return 0;
+
+err:
+ DP_ERR(edev, "Failed to update MTU\n");
+ return -1;
+}
+
+static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qed_link_output current_link;
+ struct qed_link_params params;
+
+ memset(&current_link, 0, sizeof(current_link));
+ qdev->ops->common->get_link(edev, &current_link);
+
+ memset(&params, 0, sizeof(params));
+ params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
+ if (fc_conf->autoneg) {
+ if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
+ DP_ERR(edev, "Autoneg not supported\n");
+ return -EINVAL;
+ }
+ params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+ }
+
+ /* Pause is assumed to be supported (SUPPORTED_Pause) */
+ if (fc_conf->mode == RTE_FC_FULL)
+ params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
+ QED_LINK_PAUSE_RX_ENABLE);
+ if (fc_conf->mode == RTE_FC_TX_PAUSE)
+ params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+ if (fc_conf->mode == RTE_FC_RX_PAUSE)
+ params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+
+ params.link_up = true;
+ (void)qdev->ops->common->set_link(edev, &params);
+
+ return 0;
+}
+
+static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qed_link_output current_link;
+
+ memset(&current_link, 0, sizeof(current_link));
+ qdev->ops->common->get_link(edev, &current_link);
+
+ if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+ fc_conf->autoneg = true;
+
+ if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
+ QED_LINK_PAUSE_TX_ENABLE))
+ fc_conf->mode = RTE_FC_FULL;
+ else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static const uint32_t *
+qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_VXLAN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_TUNNEL_GENEVE,
+ RTE_PTYPE_TUNNEL_GRE,
+ /* Inner */
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (eth_dev->rx_pkt_burst == qede_recv_pkts)
+ return ptypes;
+
+ return NULL;
+}
+
+static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
+{
+ *rss_caps = 0;
+ *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
+ *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
+ *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
+}
+
+int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params vport_update_params;
+ struct ecore_rss_params rss_params;
+ struct ecore_hwfn *p_hwfn;
+ uint32_t *key = (uint32_t *)rss_conf->rss_key;
+ uint64_t hf = rss_conf->rss_hf;
+ uint8_t len = rss_conf->rss_key_len;
+ uint8_t idx;
+ uint8_t i;
+ int rc;
+
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ memset(&rss_params, 0, sizeof(rss_params));
+
+ DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
+ (unsigned long)hf, len, key);
+
+ if (hf != 0) {
+ /* Enabling RSS */
+ DP_INFO(edev, "Enabling rss\n");
+
+ /* RSS caps */
+ qede_init_rss_caps(&rss_params.rss_caps, hf);
+ rss_params.update_rss_capabilities = 1;
+
+ /* RSS hash key */
+ if (key) {
+ if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
+ DP_ERR(edev, "RSS key length exceeds limit\n");
+ return -EINVAL;
+ }
+ DP_INFO(edev, "Applying user supplied hash key\n");
+ rss_params.update_rss_key = 1;
+ memcpy(&rss_params.rss_key, key, len);
+ }
+ rss_params.rss_enable = 1;
+ }
+
+ rss_params.update_rss_config = 1;
+ /* tbl_size has to be set with capabilities */
+ rss_params.rss_table_size_log = 7;
+ vport_update_params.vport_id = 0;
+ /* pass the L2 handles instead of qids */
+ for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
+ idx = i % QEDE_RSS_COUNT(qdev);
+ rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
+ }
+ vport_update_params.rss_params = &rss_params;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_ERR(edev, "vport-update for RSS failed\n");
+ return rc;
+ }
+ }
+ qdev->rss_enable = rss_params.rss_enable;
+
+ /* Update local structure for hash query */
+ qdev->rss_conf.rss_hf = hf;
+ qdev->rss_conf.rss_key_len = len;
+ if (qdev->rss_enable) {
+ if (qdev->rss_conf.rss_key == NULL) {
+ qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
+ if (qdev->rss_conf.rss_key == NULL) {
+ DP_ERR(edev, "No memory to store RSS key\n");
+ return -ENOMEM;
+ }
+ }
+ if (key && len) {
+ DP_INFO(edev, "Storing RSS key\n");
+ memcpy(qdev->rss_conf.rss_key, key, len);
+ }
+ } else if (!qdev->rss_enable && len == 0) {
+ if (qdev->rss_conf.rss_key) {
+ free(qdev->rss_conf.rss_key);
+ qdev->rss_conf.rss_key = NULL;
+ DP_INFO(edev, "Free RSS key\n");
+ }
+ }
+
+ return 0;
+}
+
+static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+
+ rss_conf->rss_hf = qdev->rss_conf.rss_hf;
+ rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
+
+ if (rss_conf->rss_key && qdev->rss_conf.rss_key)
+ memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
+ rss_conf->rss_key_len);
+ return 0;
+}
+
+static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
+ struct ecore_rss_params *rss)
+{
+ int i, fn;
+ bool rss_mode = 1; /* enable */
+ struct ecore_queue_cid *cid;
+ struct ecore_rss_params *t_rss;
+
+ /* In regular scenario, we'd simply need to take input handlers.
+ * But in CMT, we'd have to split the handlers according to the
+ * engine they were configured on. We'd then have to understand
+ * whether RSS is really required, since 2-queues on CMT doesn't
+ * require RSS.
+ */
+
+ /* CMT should be round-robin */
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+ cid = rss->rss_ind_table[i];
+
+ if (cid->p_owner == ECORE_LEADING_HWFN(edev))
+ t_rss = &rss[0];
+ else
+ t_rss = &rss[1];
+
+ t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
+ }
+
+ t_rss = &rss[1];
+ t_rss->update_rss_ind_table = 1;
+ t_rss->rss_table_size_log = 7;
+ t_rss->update_rss_config = 1;
+
+ /* Make sure RSS is actually required */
+ for_each_hwfn(edev, fn) {
+ for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
+ i++) {
+ if (rss[fn].rss_ind_table[i] !=
+ rss[fn].rss_ind_table[0])
+ break;
+ }
+
+ if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
+ DP_INFO(edev,
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ rss_mode = 0;
+ goto out;
+ }
+ }
+
+out:
+ t_rss->rss_enable = rss_mode;
+
+ return rss_mode;
+}
+
+int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params vport_update_params;
+ struct ecore_rss_params *params;
+ struct ecore_hwfn *p_hwfn;
+ uint16_t i, idx, shift;
+ uint8_t entry;
+ int rc = 0;
+
+ if (reta_size > ETH_RSS_RETA_SIZE_128) {
+ DP_ERR(edev, "reta_size %d is not supported by hardware\n",
+ reta_size);
+ return -EINVAL;
+ }
+
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
+ RTE_CACHE_LINE_SIZE);
+ if (params == NULL) {
+ DP_ERR(edev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift)) {
+ entry = reta_conf[idx].reta[shift];
+ /* Pass rxq handles to ecore */
+ params->rss_ind_table[i] =
+ qdev->fp_array[entry].rxq->handle;
+ /* Update the local copy for RETA query command */
+ qdev->rss_ind_table[i] = entry;
+ }
+ }
+
+ params->update_rss_ind_table = 1;
+ params->rss_table_size_log = 7;
+ params->update_rss_config = 1;
+
+ /* Fix up RETA for CMT mode device */
+ if (ECORE_IS_CMT(edev))
+ qdev->rss_enable = qede_update_rss_parm_cmt(edev,
+ params);
+ vport_update_params.vport_id = 0;
+ /* Use the current value of rss_enable */
+ params->rss_enable = qdev->rss_enable;
+ vport_update_params.rss_params = params;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_ERR(edev, "vport-update for RSS failed\n");
+ goto out;
+ }
+ }
+
+out:
+ rte_free(params);
+ return rc;
+}
+
+static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ uint16_t i, idx, shift;
+ uint8_t entry;
+
+ if (reta_size > ETH_RSS_RETA_SIZE_128) {
+ DP_ERR(edev, "reta_size %d is not supported\n",
+ reta_size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift)) {
+ entry = qdev->rss_ind_table[i];
+ reta_conf[idx].reta[shift] = entry;
+ }
+ }
+
+ return 0;
+}
+
+
+
+static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_dev_info dev_info = {0};
+ struct qede_fastpath *fp;
+ uint32_t max_rx_pkt_len;
+ uint32_t frame_size;
+ uint16_t rx_buf_size;
+ uint16_t bufsz;
+ bool restart = false;
+ int i;
+
+ PMD_INIT_FUNC_TRACE(edev);
+ qede_dev_info_get(dev, &dev_info);
+ max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
+ DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
+ mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
+ ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
+ return -EINVAL;
+ }
+ if (!dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+ DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
+ dev->data->min_rx_buf_size);
+ return -EINVAL;
+ }
+ /* Temporarily replace I/O functions with dummy ones. It cannot
+ * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
+ */
+ dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
+ dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
+ if (dev->data->dev_started) {
+ dev->data->dev_started = 0;
+ qede_dev_stop(dev);
+ restart = true;
+ }
+ rte_delay_ms(1000);
+ qdev->mtu = mtu;
+
+ /* Fix up RX buf size for all queues of the port */
+ for_each_rss(i) {
+ fp = &qdev->fp_array[i];
+ if (fp->rxq != NULL) {
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = frame_size;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "RX buffer size %u\n", rx_buf_size);
+ }
+ }
+ if (max_rx_pkt_len > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ if (!dev->data->dev_started && restart) {
+ qede_dev_start(dev);
+ dev->data->dev_started = 1;
+ }
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
+ /* Reassign back */
+ dev->rx_pkt_burst = qede_recv_pkts;
+ dev->tx_pkt_burst = qede_xmit_pkts;
+
+ return 0;
+}
+
+static int
+qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ uint16_t udp_port;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+
+ switch (tunnel_udp->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
+ DP_ERR(edev, "UDP port %u doesn't exist\n",
+ tunnel_udp->udp_port);
+ return ECORE_INVAL;
+ }
+ udp_port = 0;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+
+ qdev->vxlan.udp_port = udp_port;
+ /* If the request is to delete UDP port and if the number of
+ * VXLAN filters have reached 0 then VxLAN offload can be be
+ * disabled.
+ */
+ if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
+ return qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false);
+
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
+ DP_ERR(edev, "UDP port %u doesn't exist\n",
+ tunnel_udp->udp_port);
+ return ECORE_INVAL;
+ }
+
+ udp_port = 0;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+
+ qdev->vxlan.udp_port = udp_port;
+ /* If the request is to delete UDP port and if the number of
+ * GENEVE filters have reached 0 then GENEVE offload can be be
+ * disabled.
+ */
+ if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
+ return qede_geneve_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false);
+
+ break;
+
+ default:
+ return ECORE_INVAL;
+ }
+
+ return 0;
+
+}
+static int
+qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ uint16_t udp_port;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+
+ switch (tunnel_udp->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
+ DP_INFO(edev,
+ "UDP port %u for VXLAN was already configured\n",
+ tunnel_udp->udp_port);
+ return ECORE_SUCCESS;
+ }
+
+ /* Enable VxLAN tunnel with default MAC/VLAN classification if
+ * it was not enabled while adding VXLAN filter before UDP port
+ * update.
+ */
+ if (!qdev->vxlan.enable) {
+ rc = qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to enable VXLAN "
+ "prior to updating UDP port\n");
+ return rc;
+ }
+ }
+ udp_port = tunnel_udp->udp_port;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
+ udp_port);
+ return rc;
+ }
+
+ DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
+
+ qdev->vxlan.udp_port = udp_port;
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
+ DP_INFO(edev,
+ "UDP port %u for GENEVE was already configured\n",
+ tunnel_udp->udp_port);
+ return ECORE_SUCCESS;
+ }
+
+ /* Enable GENEVE tunnel with default MAC/VLAN classification if
+ * it was not enabled while adding GENEVE filter before UDP port
+ * update.
+ */
+ if (!qdev->geneve.enable) {
+ rc = qede_geneve_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to enable GENEVE "
+ "prior to updating UDP port\n");
+ return rc;
+ }
+ }
+ udp_port = tunnel_udp->udp_port;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
+ udp_port);
+ return rc;
+ }
+
+ DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
+
+ qdev->geneve.udp_port = udp_port;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ return 0;
+}
+
+static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
+ uint32_t *clss, char *str)
+{
+ uint16_t j;
+ *clss = MAX_ECORE_TUNN_CLSS;
+
+ for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
+ if (filter == qede_tunn_types[j].rte_filter_type) {
+ *type = qede_tunn_types[j].qede_type;
+ *clss = qede_tunn_types[j].qede_tunn_clss;
+ strcpy(str, qede_tunn_types[j].string);
+ return;
+ }
+ }
+}
+
+static int
+qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ uint32_t type)
+{
+ /* Init commmon ucast params first */
+ qede_set_ucast_cmn_params(ucast);
+
+ /* Copy out the required fields based on classification type */
+ ucast->type = type;
+
+ switch (type) {
+ case ECORE_FILTER_VNI:
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_VLAN:
+ ucast->vlan = conf->inner_vlan;
+ break;
+ case ECORE_FILTER_MAC:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_INNER_MAC:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vlan = conf->inner_vlan;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int
+_qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ __attribute__((unused)) enum rte_filter_op filter_op,
+ enum ecore_tunn_clss *clss,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_filter_ucast ucast = {0};
+ enum ecore_filter_ucast_type type;
+ uint16_t filter_type = 0;
+ char str[80];
+ int rc;
+
+ filter_type = conf->filter_type;
+ /* Determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, clss, str);
+ if (*clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Unsupported filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
+
+ ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
+
+ /* Skip MAC/VLAN if filter is based on VNI */
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, add);
+ if ((rc == 0) && add) {
+ /* Enable accept anyvlan */
+ qede_config_accept_any_vlan(qdev, true);
+ }
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, add);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+
+ return rc;
+}
+
+static int
+qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ const struct rte_eth_tunnel_filter_conf *conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
+ bool add;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ add = true;
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ add = false;
+ break;
+ default:
+ DP_ERR(edev, "Unsupported operation %d\n", filter_op);
+ return -EINVAL;
+ }
+
+ if (IS_VF(edev))
+ return qede_tunn_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ conf->tunnel_type, add);
+
+ rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (add) {
+ if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
+ qdev->vxlan.num_filters++;
+ qdev->vxlan.filter_type = conf->filter_type;
+ } else { /* GENEVE */
+ qdev->geneve.num_filters++;
+ qdev->geneve.filter_type = conf->filter_type;
+ }
+
+ if (!qdev->vxlan.enable || !qdev->geneve.enable ||
+ !qdev->ipgre.enable)
+ return qede_tunn_enable(eth_dev, clss,
+ conf->tunnel_type,
+ true);
+ } else {
+ if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
+ qdev->vxlan.num_filters--;
+ else /*GENEVE*/
+ qdev->geneve.num_filters--;
+
+ /* Disable VXLAN if VXLAN filters become 0 */
+ if ((qdev->vxlan.num_filters == 0) ||
+ (qdev->geneve.num_filters == 0))
+ return qede_tunn_enable(eth_dev, clss,
+ conf->tunnel_type,
+ false);
+ }
+
+ return 0;
+}
+
+int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_tunnel_filter_conf *filter_conf =
+ (struct rte_eth_tunnel_filter_conf *)arg;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_TUNNEL:
+ switch (filter_conf->tunnel_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ DP_INFO(edev,
+ "Packet steering to the specified Rx queue"
+ " is not supported with UDP tunneling");
+ return(qede_tunn_filter_config(eth_dev, filter_op,
+ filter_conf));
+ case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_TUNNEL_TYPE_NVGRE:
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ DP_ERR(edev, "Unsupported tunnel type %d\n",
+ filter_conf->tunnel_type);
+ return -EINVAL;
+ case RTE_TUNNEL_TYPE_NONE:
+ default:
+ return 0;
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ return qede_fdir_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_NTUPLE:
+ return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_MACVLAN:
+ case RTE_ETH_FILTER_ETHERTYPE:
+ case RTE_ETH_FILTER_FLEXIBLE:
+ case RTE_ETH_FILTER_SYN:
+ case RTE_ETH_FILTER_HASH:
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ case RTE_ETH_FILTER_MAX:
+ default:
+ DP_ERR(edev, "Unsupported filter type %d\n",
+ filter_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct eth_dev_ops qede_eth_dev_ops = {
+ .dev_configure = qede_dev_configure,
+ .dev_infos_get = qede_dev_info_get,
+ .rx_queue_setup = qede_rx_queue_setup,
+ .rx_queue_release = qede_rx_queue_release,
+ .tx_queue_setup = qede_tx_queue_setup,
+ .tx_queue_release = qede_tx_queue_release,
+ .dev_start = qede_dev_start,
+ .dev_set_link_up = qede_dev_set_link_up,
+ .dev_set_link_down = qede_dev_set_link_down,
+ .link_update = qede_link_update,
+ .promiscuous_enable = qede_promiscuous_enable,
+ .promiscuous_disable = qede_promiscuous_disable,
+ .allmulticast_enable = qede_allmulticast_enable,
+ .allmulticast_disable = qede_allmulticast_disable,
+ .set_mc_addr_list = qede_set_mc_addr_list,
+ .dev_stop = qede_dev_stop,
+ .dev_close = qede_dev_close,
+ .stats_get = qede_get_stats,
+ .stats_reset = qede_reset_stats,
+ .xstats_get = qede_get_xstats,
+ .xstats_reset = qede_reset_xstats,
+ .xstats_get_names = qede_get_xstats_names,
+ .mac_addr_add = qede_mac_addr_add,
+ .mac_addr_remove = qede_mac_addr_remove,
+ .mac_addr_set = qede_mac_addr_set,
+ .vlan_offload_set = qede_vlan_offload_set,
+ .vlan_filter_set = qede_vlan_filter_set,
+ .flow_ctrl_set = qede_flow_ctrl_set,
+ .flow_ctrl_get = qede_flow_ctrl_get,
+ .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
+ .rss_hash_update = qede_rss_hash_update,
+ .rss_hash_conf_get = qede_rss_hash_conf_get,
+ .reta_update = qede_rss_reta_update,
+ .reta_query = qede_rss_reta_query,
+ .mtu_set = qede_set_mtu,
+ .filter_ctrl = qede_dev_filter_ctrl,
+ .udp_tunnel_port_add = qede_udp_dst_port_add,
+ .udp_tunnel_port_del = qede_udp_dst_port_del,
+};
+
+static const struct eth_dev_ops qede_eth_vf_dev_ops = {
+ .dev_configure = qede_dev_configure,
+ .dev_infos_get = qede_dev_info_get,
+ .rx_queue_setup = qede_rx_queue_setup,
+ .rx_queue_release = qede_rx_queue_release,
+ .tx_queue_setup = qede_tx_queue_setup,
+ .tx_queue_release = qede_tx_queue_release,
+ .dev_start = qede_dev_start,
+ .dev_set_link_up = qede_dev_set_link_up,
+ .dev_set_link_down = qede_dev_set_link_down,
+ .link_update = qede_link_update,
+ .promiscuous_enable = qede_promiscuous_enable,
+ .promiscuous_disable = qede_promiscuous_disable,
+ .allmulticast_enable = qede_allmulticast_enable,
+ .allmulticast_disable = qede_allmulticast_disable,
+ .set_mc_addr_list = qede_set_mc_addr_list,
+ .dev_stop = qede_dev_stop,
+ .dev_close = qede_dev_close,
+ .stats_get = qede_get_stats,
+ .stats_reset = qede_reset_stats,
+ .xstats_get = qede_get_xstats,
+ .xstats_reset = qede_reset_xstats,
+ .xstats_get_names = qede_get_xstats_names,
+ .vlan_offload_set = qede_vlan_offload_set,
+ .vlan_filter_set = qede_vlan_filter_set,
+ .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
+ .rss_hash_update = qede_rss_hash_update,
+ .rss_hash_conf_get = qede_rss_hash_conf_get,
+ .reta_update = qede_rss_reta_update,
+ .reta_query = qede_rss_reta_query,
+ .mtu_set = qede_set_mtu,
+ .udp_tunnel_port_add = qede_udp_dst_port_add,
+ .udp_tunnel_port_del = qede_udp_dst_port_del,
+ .mac_addr_add = qede_mac_addr_add,
+ .mac_addr_remove = qede_mac_addr_remove,
+ .mac_addr_set = qede_mac_addr_set,
+};
+
+static void qede_update_pf_params(struct ecore_dev *edev)
+{
+ struct ecore_pf_params pf_params;
+
+ memset(&pf_params, 0, sizeof(struct ecore_pf_params));
+ pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
+ pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
+ qed_ops->common->update_pf_params(edev, &pf_params);
+}
+
+static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
+{
+ struct rte_pci_device *pci_dev;
+ struct rte_pci_addr pci_addr;
+ struct qede_dev *adapter;
+ struct ecore_dev *edev;
+ struct qed_dev_eth_info dev_info;
+ struct qed_slowpath_params params;
+ static bool do_once = true;
+ uint8_t bulletin_change;
+ uint8_t vf_mac[ETHER_ADDR_LEN];
+ uint8_t is_mac_forced;
+ bool is_mac_exist;
+ /* Fix up ecore debug level */
+ uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
+ uint8_t dp_level = ECORE_LEVEL_VERBOSE;
+ uint32_t int_mode;
+ int rc;
+
+ /* Extract key data structures */
+ adapter = eth_dev->data->dev_private;
+ adapter->ethdev = eth_dev;
+ edev = &adapter->edev;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ pci_addr = pci_dev->addr;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
+ pci_addr.bus, pci_addr.devid, pci_addr.function,
+ eth_dev->data->port_id);
+
+ eth_dev->rx_pkt_burst = qede_recv_pkts;
+ eth_dev->tx_pkt_burst = qede_xmit_pkts;
+ eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DP_ERR(edev, "Skipping device init from secondary process\n");
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ /* @DPDK */
+ edev->vendor_id = pci_dev->id.vendor_id;
+ edev->device_id = pci_dev->id.device_id;
+
+ qed_ops = qed_get_eth_ops();
+ if (!qed_ops) {
+ DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
+ return -EINVAL;
+ }
+
+ DP_INFO(edev, "Starting qede probe\n");
+ rc = qed_ops->common->probe(edev, pci_dev, dp_module,
+ dp_level, is_vf);
+ if (rc != 0) {
+ DP_ERR(edev, "qede probe failed rc %d\n", rc);
+ return -ENODEV;
+ }
+ qede_update_pf_params(edev);
+
+ switch (pci_dev->intr_handle.type) {
+ case RTE_INTR_HANDLE_UIO_INTX:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ int_mode = ECORE_INT_MODE_INTA;
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ qede_interrupt_handler_intx,
+ (void *)eth_dev);
+ break;
+ default:
+ int_mode = ECORE_INT_MODE_MSIX;
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ qede_interrupt_handler,
+ (void *)eth_dev);
+ }
+
+ if (rte_intr_enable(&pci_dev->intr_handle)) {
+ DP_ERR(edev, "rte_intr_enable() failed\n");
+ return -ENODEV;
+ }
+
+ /* Start the Slowpath-process */
+ memset(&params, 0, sizeof(struct qed_slowpath_params));
+
+ params.int_mode = int_mode;
+ params.drv_major = QEDE_PMD_VERSION_MAJOR;
+ params.drv_minor = QEDE_PMD_VERSION_MINOR;
+ params.drv_rev = QEDE_PMD_VERSION_REVISION;
+ params.drv_eng = QEDE_PMD_VERSION_PATCH;
+ strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
+ QEDE_PMD_DRV_VER_STR_SIZE);
+
+ /* For CMT mode device do periodic polling for slowpath events.
+ * This is required since uio device uses only one MSI-x
+ * interrupt vector but we need one for each engine.
+ */
+ if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
+ rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
+ qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ if (rc != 0) {
+ DP_ERR(edev, "Unable to start periodic"
+ " timer rc %d\n", rc);
+ return -EINVAL;
+ }
+ }
+
+ rc = qed_ops->common->slowpath_start(edev, &params);
+ if (rc) {
+ DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
+ rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ return -ENODEV;
+ }
+
+ rc = qed_ops->fill_dev_info(edev, &dev_info);
+ if (rc) {
+ DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
+ qed_ops->common->slowpath_stop(edev);
+ qed_ops->common->remove(edev);
+ rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ return -ENODEV;
+ }
+
+ qede_alloc_etherdev(adapter, &dev_info);
+
+ adapter->ops->common->set_name(edev, edev->name);
+
+ if (!is_vf)
+ adapter->dev_info.num_mac_filters =
+ (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
+ ECORE_MAC);
+ else
+ ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
+ (uint32_t *)&adapter->dev_info.num_mac_filters);
+
+ /* Allocate memory for storing MAC addr */
+ eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
+ (ETHER_ADDR_LEN *
+ adapter->dev_info.num_mac_filters),
+ RTE_CACHE_LINE_SIZE);
+
+ if (eth_dev->data->mac_addrs == NULL) {
+ DP_ERR(edev, "Failed to allocate MAC address\n");
+ qed_ops->common->slowpath_stop(edev);
+ qed_ops->common->remove(edev);
+ rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ return -ENOMEM;
+ }
+
+ if (!is_vf) {
+ ether_addr_copy((struct ether_addr *)edev->hwfns[0].
+ hw_info.hw_mac_addr,
+ &eth_dev->data->mac_addrs[0]);
+ ether_addr_copy(&eth_dev->data->mac_addrs[0],
+ &adapter->primary_mac);
+ } else {
+ ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
+ &bulletin_change);
+ if (bulletin_change) {
+ is_mac_exist =
+ ecore_vf_bulletin_get_forced_mac(
+ ECORE_LEADING_HWFN(edev),
+ vf_mac,
+ &is_mac_forced);
+ if (is_mac_exist) {
+ DP_INFO(edev, "VF macaddr received from PF\n");
+ ether_addr_copy((struct ether_addr *)&vf_mac,
+ &eth_dev->data->mac_addrs[0]);
+ ether_addr_copy(&eth_dev->data->mac_addrs[0],
+ &adapter->primary_mac);
+ } else {
+ DP_ERR(edev, "No VF macaddr assigned\n");
+ }
+ }
+ }
+
+ eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
+
+ if (do_once) {
+ qede_print_adapter_info(adapter);
+ do_once = false;
+ }
+
+ /* Bring-up the link */
+ qede_dev_set_link_state(eth_dev, true);
+
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ SLIST_INIT(&adapter->fdir_info.fdir_list_head);
+ SLIST_INIT(&adapter->vlan_list_head);
+ SLIST_INIT(&adapter->uc_list_head);
+ SLIST_INIT(&adapter->mc_list_head);
+ adapter->mtu = ETHER_MTU;
+ adapter->vport_started = false;
+
+ /* VF tunnel offloads is enabled by default in PF driver */
+ adapter->vxlan.num_filters = 0;
+ adapter->geneve.num_filters = 0;
+ adapter->ipgre.num_filters = 0;
+ if (is_vf) {
+ adapter->vxlan.enable = true;
+ adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
+ ETH_TUNNEL_FILTER_IVLAN;
+ adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
+ adapter->geneve.enable = true;
+ adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
+ ETH_TUNNEL_FILTER_IVLAN;
+ adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
+ adapter->ipgre.enable = true;
+ adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
+ ETH_TUNNEL_FILTER_IVLAN;
+ } else {
+ adapter->vxlan.enable = false;
+ adapter->geneve.enable = false;
+ adapter->ipgre.enable = false;
+ }
+
+ DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+ adapter->primary_mac.addr_bytes[0],
+ adapter->primary_mac.addr_bytes[1],
+ adapter->primary_mac.addr_bytes[2],
+ adapter->primary_mac.addr_bytes[3],
+ adapter->primary_mac.addr_bytes[4],
+ adapter->primary_mac.addr_bytes[5]);
+
+ DP_INFO(edev, "Device initialized\n");
+
+ return 0;
+}
+
+static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+ return qede_common_dev_init(eth_dev, 1);
+}
+
+static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+ return qede_common_dev_init(eth_dev, 0);
+}
+
+static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
+{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+#endif
+
+ /* only uninitialize in the primary process */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* safe to close dev here */
+ qede_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ if (eth_dev->data->mac_addrs)
+ rte_free(eth_dev->data->mac_addrs);
+
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
+static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ return qede_dev_common_uninit(eth_dev);
+}
+
+static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ return qede_dev_common_uninit(eth_dev);
+}
+
+static const struct rte_pci_id pci_id_qedevf_map[] = {
+#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
+ {
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
+ },
+ {
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
+ },
+ {
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
+ },
+ {.vendor_id = 0,}
+};
+
+static const struct rte_pci_id pci_id_qede_map[] = {
+#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
+ },
+ {.vendor_id = 0,}
+};
+
+static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct qede_dev), qedevf_eth_dev_init);
+}
+
+static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
+}
+
+static struct rte_pci_driver rte_qedevf_pmd = {
+ .id_table = pci_id_qedevf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = qedevf_eth_dev_pci_probe,
+ .remove = qedevf_eth_dev_pci_remove,
+};
+
+static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct qede_dev), qede_eth_dev_init);
+}
+
+static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
+}
+
+static struct rte_pci_driver rte_qede_pmd = {
+ .id_table = pci_id_qede_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = qede_eth_dev_pci_probe,
+ .remove = qede_eth_dev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
+
+RTE_INIT(qede_init_log)
+{
+ qede_logtype_init = rte_log_register("pmd.net.qede.init");
+ if (qede_logtype_init >= 0)
+ rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
+ qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
+ if (qede_logtype_driver >= 0)
+ rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/qede_ethdev.h b/src/spdk/dpdk/drivers/net/qede/qede_ethdev.h
new file mode 100644
index 00000000..6e9a5b4b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/qede_ethdev.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+
+#ifndef _QEDE_ETHDEV_H_
+#define _QEDE_ETHDEV_H_
+
+#include <sys/queue.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_dev.h>
+#include <rte_ip.h>
+
+/* ecore includes */
+#include "base/bcm_osal.h"
+#include "base/ecore.h"
+#include "base/ecore_dev_api.h"
+#include "base/ecore_l2_api.h"
+#include "base/ecore_vf_api.h"
+#include "base/ecore_hsi_common.h"
+#include "base/ecore_int_api.h"
+#include "base/ecore_chain.h"
+#include "base/ecore_status.h"
+#include "base/ecore_hsi_eth.h"
+#include "base/ecore_iov_api.h"
+#include "base/ecore_cxt.h"
+#include "base/nvm_cfg.h"
+#include "base/ecore_sp_commands.h"
+#include "base/ecore_l2.h"
+#include "base/ecore_vf.h"
+
+#include "qede_logs.h"
+#include "qede_if.h"
+#include "qede_rxtx.h"
+
+#define qede_stringify1(x...) #x
+#define qede_stringify(x...) qede_stringify1(x)
+
+/* Driver versions */
+#define QEDE_PMD_VER_PREFIX "QEDE PMD"
+#define QEDE_PMD_VERSION_MAJOR 2
+#define QEDE_PMD_VERSION_MINOR 9
+#define QEDE_PMD_VERSION_REVISION 0
+#define QEDE_PMD_VERSION_PATCH 1
+
+#define QEDE_PMD_VERSION qede_stringify(QEDE_PMD_VERSION_MAJOR) "." \
+ qede_stringify(QEDE_PMD_VERSION_MINOR) "." \
+ qede_stringify(QEDE_PMD_VERSION_REVISION) "." \
+ qede_stringify(QEDE_PMD_VERSION_PATCH)
+
+#define QEDE_PMD_DRV_VER_STR_SIZE NAME_SIZE
+#define QEDE_PMD_VER_PREFIX "QEDE PMD"
+
+
+#define QEDE_RSS_INDIR_INITED (1 << 0)
+#define QEDE_RSS_KEY_INITED (1 << 1)
+#define QEDE_RSS_CAPS_INITED (1 << 2)
+
+#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
+ (edev)->dev_info.num_tc)
+
+#define QEDE_QUEUE_CNT(qdev) ((qdev)->num_queues)
+#define QEDE_RSS_COUNT(qdev) ((qdev)->num_rx_queues)
+#define QEDE_TSS_COUNT(qdev) ((qdev)->num_tx_queues)
+
+#define QEDE_DUPLEX_FULL 1
+#define QEDE_DUPLEX_HALF 2
+#define QEDE_DUPLEX_UNKNOWN 0xff
+
+#define QEDE_SUPPORTED_AUTONEG (1 << 6)
+#define QEDE_SUPPORTED_PAUSE (1 << 13)
+
+#define QEDE_INIT_QDEV(eth_dev) (eth_dev->data->dev_private)
+
+#define QEDE_INIT_EDEV(adapter) (&((struct qede_dev *)adapter)->edev)
+
+#define QEDE_INIT(eth_dev) { \
+ struct qede_dev *qdev = eth_dev->data->dev_private; \
+ struct ecore_dev *edev = &qdev->edev; \
+}
+
+/************* QLogic 10G/25G/40G/50G/100G vendor/devices ids *************/
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+
+#define CHIP_NUM_57980E 0x1634
+#define CHIP_NUM_57980S 0x1629
+#define CHIP_NUM_VF 0x1630
+#define CHIP_NUM_57980S_40 0x1634
+#define CHIP_NUM_57980S_25 0x1656
+#define CHIP_NUM_57980S_IOV 0x1664
+#define CHIP_NUM_57980S_100 0x1644
+#define CHIP_NUM_57980S_50 0x1654
+#define CHIP_NUM_AH_50G 0x8070
+#define CHIP_NUM_AH_10G 0x8071
+#define CHIP_NUM_AH_40G 0x8072
+#define CHIP_NUM_AH_25G 0x8073
+#define CHIP_NUM_AH_IOV 0x8090
+
+#define PCI_DEVICE_ID_QLOGIC_NX2_57980E CHIP_NUM_57980E
+#define PCI_DEVICE_ID_QLOGIC_NX2_57980S CHIP_NUM_57980S
+#define PCI_DEVICE_ID_QLOGIC_NX2_VF CHIP_NUM_VF
+#define PCI_DEVICE_ID_QLOGIC_57980S_40 CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_QLOGIC_57980S_25 CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_QLOGIC_57980S_IOV CHIP_NUM_57980S_IOV
+#define PCI_DEVICE_ID_QLOGIC_57980S_100 CHIP_NUM_57980S_100
+#define PCI_DEVICE_ID_QLOGIC_57980S_50 CHIP_NUM_57980S_50
+#define PCI_DEVICE_ID_QLOGIC_AH_50G CHIP_NUM_AH_50G
+#define PCI_DEVICE_ID_QLOGIC_AH_10G CHIP_NUM_AH_10G
+#define PCI_DEVICE_ID_QLOGIC_AH_40G CHIP_NUM_AH_40G
+#define PCI_DEVICE_ID_QLOGIC_AH_25G CHIP_NUM_AH_25G
+#define PCI_DEVICE_ID_QLOGIC_AH_IOV CHIP_NUM_AH_IOV
+
+
+
+extern char fw_file[];
+
+/* Number of PF connections - 32 RX + 32 TX */
+#define QEDE_PF_NUM_CONNS (64)
+
+/* Maximum number of flowdir filters */
+#define QEDE_RFS_MAX_FLTR (256)
+
+#define QEDE_MAX_MCAST_FILTERS (64)
+
+enum qed_filter_rx_mode_type {
+ QED_FILTER_RX_MODE_TYPE_REGULAR,
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+ QED_FILTER_RX_MODE_TYPE_PROMISC,
+};
+
+struct qede_vlan_entry {
+ SLIST_ENTRY(qede_vlan_entry) list;
+ uint16_t vid;
+};
+
+struct qede_mcast_entry {
+ struct ether_addr mac;
+ SLIST_ENTRY(qede_mcast_entry) list;
+};
+
+struct qede_ucast_entry {
+ struct ether_addr mac;
+ uint16_t vlan;
+ uint16_t vni;
+ SLIST_ENTRY(qede_ucast_entry) list;
+};
+
+struct qede_fdir_entry {
+ uint32_t soft_id; /* unused for now */
+ uint16_t pkt_len; /* actual packet length to match */
+ uint16_t rx_queue; /* queue to be steered to */
+ const struct rte_memzone *mz; /* mz used to hold L2 frame */
+ SLIST_ENTRY(qede_fdir_entry) list;
+};
+
+struct qede_fdir_info {
+ struct ecore_arfs_config_params arfs;
+ uint16_t filter_count;
+ SLIST_HEAD(fdir_list_head, qede_fdir_entry)fdir_list_head;
+};
+
+/* IANA assigned default UDP ports for encapsulation protocols */
+#define QEDE_VXLAN_DEF_PORT (4789)
+#define QEDE_GENEVE_DEF_PORT (6081)
+
+struct qede_tunn_params {
+ bool enable;
+ uint16_t num_filters;
+ uint16_t filter_type;
+ uint16_t udp_port;
+};
+
+/*
+ * Structure to store private data for each port.
+ */
+struct qede_dev {
+ struct ecore_dev edev;
+ const struct qed_eth_ops *ops;
+ struct qed_dev_eth_info dev_info;
+ struct ecore_sb_info *sb_array;
+ struct qede_fastpath *fp_array;
+ uint16_t mtu;
+ bool enable_tx_switching;
+ bool rss_enable;
+ struct rte_eth_rss_conf rss_conf;
+ uint16_t rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+ uint64_t rss_hf;
+ uint8_t rss_key_len;
+ bool enable_lro;
+ uint8_t num_rx_queues;
+ uint8_t num_tx_queues;
+ SLIST_HEAD(vlan_list_head, qede_vlan_entry)vlan_list_head;
+ uint16_t configured_vlans;
+ bool accept_any_vlan;
+ struct ether_addr primary_mac;
+ SLIST_HEAD(mc_list_head, qede_mcast_entry) mc_list_head;
+ uint16_t num_mc_addr;
+ SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
+ uint16_t num_uc_addr;
+ bool handle_hw_err;
+ struct qede_tunn_params vxlan;
+ struct qede_tunn_params geneve;
+ struct qede_tunn_params ipgre;
+ struct qede_fdir_info fdir_info;
+ bool vlan_strip_flg;
+ char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
+ bool vport_started;
+ int vlan_offload_mask;
+ void *ethdev;
+};
+
+/* Non-static functions */
+int qede_config_rss(struct rte_eth_dev *eth_dev);
+
+int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
+int qed_fill_eth_dev_info(struct ecore_dev *edev,
+ struct qed_dev_eth_info *info);
+int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);
+
+int qede_link_update(struct rte_eth_dev *eth_dev,
+ __rte_unused int wait_to_complete);
+
+int qede_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type type,
+ enum rte_filter_op op, void *arg);
+
+int qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op, void *arg);
+
+int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op, void *arg);
+
+int qede_check_fdir_support(struct rte_eth_dev *eth_dev);
+
+uint16_t qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ void *buff,
+ struct ecore_arfs_config_params *params);
+
+void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev);
+
+int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg);
+
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
+
+int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg);
+
+#endif /* _QEDE_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/qede/qede_fdir.c b/src/spdk/dpdk/drivers/net/qede/qede_fdir.c
new file mode 100644
index 00000000..83580d04
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/qede_fdir.c
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_errno.h>
+
+#include "qede_ethdev.h"
+
+#define IP_VERSION (0x40)
+#define IP_HDRLEN (0x5)
+#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
+#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
+#define QEDE_FDIR_IPV4_DEF_TTL (64)
+
+/* Sum of length of header types of L2, L3, L4.
+ * L2 : ether_hdr + vlan_hdr + vxlan_hdr
+ * L3 : ipv6_hdr
+ * L4 : tcp_hdr
+ */
+#define QEDE_MAX_FDIR_PKT_LEN (86)
+
+#ifndef IPV6_ADDR_LEN
+#define IPV6_ADDR_LEN (16)
+#endif
+
+#define QEDE_VALID_FLOW(flow_type) \
+ ((flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
+
+/* Note: Flowdir support is only partial.
+ * For ex: drop_queue, FDIR masks, flex_conf are not supported.
+ * Parameters like pballoc/status fields are irrelevant here.
+ */
+int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+
+ /* check FDIR modes */
+ switch (fdir->mode) {
+ case RTE_FDIR_MODE_NONE:
+ qdev->fdir_info.arfs.arfs_enable = false;
+ DP_INFO(edev, "flowdir is disabled\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT:
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ qdev->fdir_info.arfs.arfs_enable = false;
+ return -ENOTSUP;
+ }
+ qdev->fdir_info.arfs.arfs_enable = true;
+ DP_INFO(edev, "flowdir is enabled\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT_TUNNEL:
+ case RTE_FDIR_MODE_SIGNATURE:
+ case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
+ DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct qede_fdir_entry *tmp = NULL;
+
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (tmp) {
+ if (tmp->mz)
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
+ qede_fdir_entry, list);
+ rte_free(tmp);
+ }
+ }
+}
+
+static int
+qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir_filter,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
+ struct qede_fdir_entry *tmp = NULL;
+ struct qede_fdir_entry *fdir = NULL;
+ const struct rte_memzone *mz;
+ struct ecore_hwfn *p_hwfn;
+ enum _ecore_status_t rc;
+ uint16_t pkt_len;
+ void *pkt;
+
+ if (add) {
+ if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
+ DP_ERR(edev, "Reached max flowdir filter limit\n");
+ return -EINVAL;
+ }
+ fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!fdir) {
+ DP_ERR(edev, "Did not allocate memory for fdir\n");
+ return -ENOMEM;
+ }
+ }
+ /* soft_id could have been used as memzone string, but soft_id is
+ * not currently used so it has no significance.
+ */
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
+ SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+ if (!mz) {
+ DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
+ rte_strerror(rte_errno));
+ rc = -rte_errno;
+ goto err1;
+ }
+
+ pkt = mz->addr;
+ memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
+ pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
+ &qdev->fdir_info.arfs);
+ if (pkt_len == 0) {
+ rc = -EINVAL;
+ goto err2;
+ }
+ DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
+ if (add) {
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
+ DP_INFO(edev, "flowdir filter exist\n");
+ rc = 0;
+ goto err2;
+ }
+ }
+ } else {
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
+ break;
+ }
+ if (!tmp) {
+ DP_ERR(edev, "flowdir filter does not exist\n");
+ rc = -EEXIST;
+ goto err2;
+ }
+ }
+ p_hwfn = ECORE_LEADING_HWFN(edev);
+ if (add) {
+ if (!qdev->fdir_info.arfs.arfs_enable) {
+ /* Force update */
+ eth_dev->data->dev_conf.fdir_conf.mode =
+ RTE_FDIR_MODE_PERFECT;
+ qdev->fdir_info.arfs.arfs_enable = true;
+ DP_INFO(edev, "Force enable flowdir in perfect mode\n");
+ }
+ /* Enable ARFS searcher with updated flow_types */
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->fdir_info.arfs);
+ }
+ /* configure filter with ECORE_SPQ_MODE_EBLOCK */
+ rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
+ (dma_addr_t)mz->iova,
+ pkt_len,
+ fdir_filter->action.rx_queue,
+ 0, add);
+ if (rc == ECORE_SUCCESS) {
+ if (add) {
+ fdir->rx_queue = fdir_filter->action.rx_queue;
+ fdir->pkt_len = pkt_len;
+ fdir->mz = mz;
+ SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
+ fdir, list);
+ qdev->fdir_info.filter_count++;
+ DP_INFO(edev, "flowdir filter added, count = %d\n",
+ qdev->fdir_info.filter_count);
+ } else {
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
+ qede_fdir_entry, list);
+ rte_free(tmp); /* the node deleted */
+ rte_memzone_free(mz); /* temp node allocated */
+ qdev->fdir_info.filter_count--;
+ DP_INFO(edev, "Fdir filter deleted, count = %d\n",
+ qdev->fdir_info.filter_count);
+ }
+ } else {
+ DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
+ rc, qdev->fdir_info.filter_count);
+ }
+
+ /* Disable ARFS searcher if there are no more filters */
+ if (qdev->fdir_info.filter_count == 0) {
+ memset(&qdev->fdir_info.arfs, 0,
+ sizeof(struct ecore_arfs_config_params));
+ DP_INFO(edev, "Disabling flowdir\n");
+ qdev->fdir_info.arfs.arfs_enable = false;
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->fdir_info.arfs);
+ }
+ return 0;
+
+err2:
+ rte_memzone_free(mz);
+err1:
+ if (add)
+ rte_free(fdir);
+ return rc;
+}
+
+static int
+qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ if (!QEDE_VALID_FLOW(fdir->input.flow_type)) {
+ DP_ERR(edev, "invalid flow_type input\n");
+ return -EINVAL;
+ }
+
+ if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
+ DP_ERR(edev, "invalid queue number %u\n",
+ fdir->action.rx_queue);
+ return -EINVAL;
+ }
+
+ if (fdir->input.flow_ext.is_vf) {
+ DP_ERR(edev, "flowdir is not supported over VF\n");
+ return -EINVAL;
+ }
+
+ return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
+}
+
+/* Fills the L3/L4 headers and returns the actual length of flowdir packet */
+uint16_t
+qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ void *buff,
+ struct ecore_arfs_config_params *params)
+
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ uint16_t *ether_type;
+ uint8_t *raw_pkt;
+ struct rte_eth_fdir_input *input;
+ static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ struct udp_hdr *udp;
+ struct tcp_hdr *tcp;
+ uint16_t len;
+ static const uint8_t next_proto[] = {
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
+ };
+ raw_pkt = (uint8_t *)buff;
+ input = &fdir->input;
+ DP_INFO(edev, "flow_type %d\n", input->flow_type);
+
+ len = 2 * sizeof(struct ether_addr);
+ raw_pkt += 2 * sizeof(struct ether_addr);
+ if (input->flow_ext.vlan_tci) {
+ DP_INFO(edev, "adding VLAN header\n");
+ rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+ rte_memcpy(raw_pkt + sizeof(uint16_t),
+ &input->flow_ext.vlan_tci,
+ sizeof(uint16_t));
+ raw_pkt += sizeof(vlan_frame);
+ len += sizeof(vlan_frame);
+ }
+ ether_type = (uint16_t *)raw_pkt;
+ raw_pkt += sizeof(uint16_t);
+ len += sizeof(uint16_t);
+
+ switch (input->flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ /* fill the common ip header */
+ ip = (struct ipv4_hdr *)raw_pkt;
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
+ ip->total_length = sizeof(struct ipv4_hdr);
+ ip->next_proto_id = input->flow.ip4_flow.proto ?
+ input->flow.ip4_flow.proto :
+ next_proto[input->flow_type];
+ ip->time_to_live = input->flow.ip4_flow.ttl ?
+ input->flow.ip4_flow.ttl :
+ QEDE_FDIR_IPV4_DEF_TTL;
+ ip->type_of_service = input->flow.ip4_flow.tos;
+ ip->dst_addr = input->flow.ip4_flow.dst_ip;
+ ip->src_addr = input->flow.ip4_flow.src_ip;
+ len += sizeof(struct ipv4_hdr);
+ params->ipv4 = true;
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->dst_port = input->flow.udp4_flow.dst_port;
+ udp->src_port = input->flow.udp4_flow.src_port;
+ udp->dgram_len = sizeof(struct udp_hdr);
+ len += sizeof(struct udp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = input->flow.tcp4_flow.src_port;
+ tcp->dst_port = input->flow.tcp4_flow.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ ip6 = (struct ipv6_hdr *)raw_pkt;
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ip6->proto = input->flow.ipv6_flow.proto ?
+ input->flow.ipv6_flow.proto :
+ next_proto[input->flow_type];
+ rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
+ IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
+ IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->src_port = input->flow.udp6_flow.dst_port;
+ udp->dst_port = input->flow.udp6_flow.src_port;
+ len += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = input->flow.tcp4_flow.src_port;
+ tcp->dst_port = input->flow.tcp4_flow.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported flow_type %u\n",
+ input->flow_type);
+ return 0;
+ }
+
+ return len;
+}
+
+int
+qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_fdir_filter *fdir;
+ int ret;
+
+ fdir = (struct rte_eth_fdir_filter *)arg;
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query flowdir support */
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 1);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 0);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_INFO:
+ return -ENOTSUP;
+ break;
+ default:
+ DP_ERR(edev, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_ntuple_filter *ntuple;
+ struct rte_eth_fdir_filter fdir_entry;
+ struct rte_eth_tcpv4_flow *tcpv4_flow;
+ struct rte_eth_udpv4_flow *udpv4_flow;
+ bool add = false;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query fdir support */
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ add = true;
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ break;
+ case RTE_ETH_FILTER_INFO:
+ case RTE_ETH_FILTER_GET:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_SET:
+ case RTE_ETH_FILTER_STATS:
+ case RTE_ETH_FILTER_OP_MAX:
+ DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
+ return -ENOTSUP;
+ }
+ ntuple = (struct rte_eth_ntuple_filter *)arg;
+ /* Internally convert ntuple to fdir entry */
+ memset(&fdir_entry, 0, sizeof(fdir_entry));
+ if (ntuple->proto == IPPROTO_TCP) {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
+ tcpv4_flow->ip.src_ip = ntuple->src_ip;
+ tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ tcpv4_flow->ip.proto = IPPROTO_TCP;
+ tcpv4_flow->src_port = ntuple->src_port;
+ tcpv4_flow->dst_port = ntuple->dst_port;
+ } else {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ udpv4_flow = &fdir_entry.input.flow.udp4_flow;
+ udpv4_flow->ip.src_ip = ntuple->src_ip;
+ udpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ udpv4_flow->ip.proto = IPPROTO_TCP;
+ udpv4_flow->src_port = ntuple->src_port;
+ udpv4_flow->dst_port = ntuple->dst_port;
+ }
+
+ fdir_entry.action.rx_queue = ntuple->queue;
+
+ return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/qede_if.h b/src/spdk/dpdk/drivers/net/qede/qede_if.h
new file mode 100644
index 00000000..ee5e54c1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/qede_if.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef _QEDE_IF_H
+#define _QEDE_IF_H
+
+#include "qede_ethdev.h"
+
+/* forward */
+struct ecore_dev;
+struct qed_sb_info;
+struct qed_pf_params;
+enum ecore_int_mode;
+
+struct qed_dev_info {
+ uint8_t num_hwfns;
+ uint8_t hw_mac[ETHER_ADDR_LEN];
+ bool is_mf_default;
+
+ /* FW version */
+ uint16_t fw_major;
+ uint16_t fw_minor;
+ uint16_t fw_rev;
+ uint16_t fw_eng;
+
+ /* MFW version */
+ uint32_t mfw_rev;
+#define QED_MFW_VERSION_0_MASK 0x000000FF
+#define QED_MFW_VERSION_0_OFFSET 0
+#define QED_MFW_VERSION_1_MASK 0x0000FF00
+#define QED_MFW_VERSION_1_OFFSET 8
+#define QED_MFW_VERSION_2_MASK 0x00FF0000
+#define QED_MFW_VERSION_2_OFFSET 16
+#define QED_MFW_VERSION_3_MASK 0xFF000000
+#define QED_MFW_VERSION_3_OFFSET 24
+
+ uint32_t flash_size;
+ bool b_arfs_capable;
+ bool b_inter_pf_switch;
+ bool tx_switching;
+ u16 mtu;
+
+ bool smart_an;
+
+ /* Out param for qede */
+ bool vxlan_enable;
+ bool gre_enable;
+ bool geneve_enable;
+
+ enum ecore_dev_type dev_type;
+};
+
+struct qed_dev_eth_info {
+ struct qed_dev_info common;
+
+ uint8_t num_queues;
+ uint8_t num_tc;
+
+ struct ether_addr port_mac;
+ uint16_t num_vlan_filters;
+ uint32_t num_mac_filters;
+
+ /* Legacy VF - this affects the datapath */
+ bool is_legacy;
+};
+
+#define INIT_STRUCT_FIELD(field, value) .field = value
+
+struct qed_eth_ops {
+ const struct qed_common_ops *common;
+ int (*fill_dev_info)(struct ecore_dev *edev,
+ struct qed_dev_eth_info *info);
+};
+
+struct qed_link_params {
+ bool link_up;
+
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG (1 << 0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS (1 << 1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED (1 << 2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG (1 << 3)
+#define QED_LINK_OVERRIDE_EEE_CONFIG (1 << 5)
+ uint32_t override_flags;
+ bool autoneg;
+ uint32_t adv_speeds;
+ uint32_t forced_speed;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE (1 << 0)
+#define QED_LINK_PAUSE_RX_ENABLE (1 << 1)
+#define QED_LINK_PAUSE_TX_ENABLE (1 << 2)
+ uint32_t pause_config;
+ struct ecore_link_eee_params eee;
+};
+
+struct qed_link_output {
+ bool link_up;
+ uint32_t supported_caps; /* In SUPPORTED defs */
+ uint32_t advertised_caps; /* In ADVERTISED defs */
+ uint32_t lp_caps; /* In ADVERTISED defs */
+ uint32_t speed; /* In Mb/s */
+ uint32_t adv_speed; /* Speed mask */
+ uint8_t duplex; /* In DUPLEX defs */
+ uint16_t port; /* In PORT defs */
+ bool autoneg;
+ uint32_t pause_config;
+
+ /* EEE - capability & param */
+ bool eee_supported;
+ bool eee_active;
+ u8 sup_caps;
+ struct ecore_link_eee_params eee;
+};
+
+struct qed_slowpath_params {
+ uint32_t int_mode;
+ uint8_t drv_major;
+ uint8_t drv_minor;
+ uint8_t drv_rev;
+ uint8_t drv_eng;
+ uint8_t name[NAME_SIZE];
+};
+
+struct qed_common_cb_ops {
+ void (*link_update)(void *dev, struct qed_link_output *link);
+};
+
+struct qed_common_ops {
+ int (*probe)(struct ecore_dev *edev,
+ struct rte_pci_device *pci_dev,
+ uint32_t dp_module, uint8_t dp_level, bool is_vf);
+ void (*set_name)(struct ecore_dev *edev, char name[]);
+ enum _ecore_status_t
+ (*chain_alloc)(struct ecore_dev *edev,
+ enum ecore_chain_use_mode
+ intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type,
+ uint32_t num_elems,
+ osal_size_t elem_size,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl);
+
+ void (*chain_free)(struct ecore_dev *edev,
+ struct ecore_chain *p_chain);
+
+ void (*get_link)(struct ecore_dev *edev,
+ struct qed_link_output *if_link);
+ int (*set_link)(struct ecore_dev *edev,
+ struct qed_link_params *params);
+
+ int (*drain)(struct ecore_dev *edev);
+
+ void (*remove)(struct ecore_dev *edev);
+
+ int (*slowpath_stop)(struct ecore_dev *edev);
+
+ void (*update_pf_params)(struct ecore_dev *edev,
+ struct ecore_pf_params *params);
+
+ int (*slowpath_start)(struct ecore_dev *edev,
+ struct qed_slowpath_params *params);
+
+ int (*set_fp_int)(struct ecore_dev *edev, uint16_t cnt);
+
+ uint32_t (*sb_init)(struct ecore_dev *edev,
+ struct ecore_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ uint16_t sb_id);
+
+ int (*get_sb_info)(struct ecore_dev *edev,
+ struct ecore_sb_info *sb, u16 qid,
+ struct ecore_sb_info_dbg *sb_dbg);
+
+ bool (*can_link_change)(struct ecore_dev *edev);
+
+ void (*update_msglvl)(struct ecore_dev *edev,
+ uint32_t dp_module, uint8_t dp_level);
+
+ int (*send_drv_state)(struct ecore_dev *edev, bool active);
+};
+
+/* Externs */
+
+const struct qed_eth_ops *qed_get_eth_ops(void);
+
+#endif /* _QEDE_IF_H */
diff --git a/src/spdk/dpdk/drivers/net/qede/qede_logs.h b/src/spdk/dpdk/drivers/net/qede/qede_logs.h
new file mode 100644
index 00000000..3187d97b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/qede_logs.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#ifndef _QEDE_LOGS_H_
+#define _QEDE_LOGS_H_
+
+extern int qede_logtype_driver;
+
+#define DP_ERR(p_dev, fmt, ...) \
+ rte_log(RTE_LOG_ERR, qede_logtype_driver, \
+ "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ ##__VA_ARGS__)
+
+#define DP_NOTICE(p_dev, is_assert, fmt, ...) \
+do { \
+ if (is_assert) \
+ rte_log(RTE_LOG_ERR, qede_logtype_driver,\
+ "[QEDE PMD: (%s)]%s:" fmt, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ __func__, \
+ ##__VA_ARGS__); \
+ else \
+ rte_log(RTE_LOG_NOTICE, qede_logtype_driver,\
+ "[QEDE PMD: (%s)]%s:" fmt, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ __func__, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define DP_INFO(p_dev, fmt, ...) \
+ rte_log(RTE_LOG_INFO, qede_logtype_driver, \
+ "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ ##__VA_ARGS__)
+
+#define DP_VERBOSE(p_dev, module, fmt, ...) \
+ do { \
+ if ((p_dev)->dp_module & module) \
+ rte_log(RTE_LOG_DEBUG, qede_logtype_driver, \
+ "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ ##__VA_ARGS__); \
+ } while (0)
+
+extern int qede_logtype_init;
+#define PMD_INIT_LOG(level, edev, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, qede_logtype_init, \
+ "[qede_pmd: %s] %s() " fmt "\n", \
+ (edev)->name, __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE(edev) PMD_INIT_LOG(DEBUG, edev, " >>")
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+#define PMD_TX_LOG(level, q, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n", \
+ __func__, q->port_id, q->queue_id, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+#define PMD_RX_LOG(level, q, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n", \
+ __func__, q->port_id, q->queue_id, ## args)
+#else
+#define PMD_RX_LOG(level, q, fmt, args...) do { } while (0)
+#endif
+
+#endif /* _QEDE_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/qede/qede_main.c b/src/spdk/dpdk/drivers/net/qede/qede_main.c
new file mode 100644
index 00000000..46fa8371
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/qede_main.c
@@ -0,0 +1,784 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include <limits.h>
+#include <time.h>
+#include <rte_alarm.h>
+#include <rte_string_fns.h>
+
+#include "qede_ethdev.h"
+
+/* Alarm timeout. */
+#define QEDE_ALARM_TIMEOUT_US 100000
+
+/* Global variable to hold absolute path of fw file */
+char fw_file[PATH_MAX];
+
+const char *QEDE_DEFAULT_FIRMWARE =
+ "/lib/firmware/qed/qed_init_values-8.33.12.0.bin";
+
+static void
+qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
+{
+ int i;
+
+ for (i = 0; i < edev->num_hwfns; i++) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+ p_hwfn->pf_params = *params;
+ }
+}
+
+static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
+{
+ edev->regview = pci_dev->mem_resource[0].addr;
+ edev->doorbells = pci_dev->mem_resource[2].addr;
+ edev->db_size = pci_dev->mem_resource[2].len;
+}
+
+static int
+qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
+ uint32_t dp_module, uint8_t dp_level, bool is_vf)
+{
+ struct ecore_hw_prepare_params hw_prepare_params;
+ int rc;
+
+ ecore_init_struct(edev);
+ edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+ /* Protocol type is always fixed to PROTOCOL_ETH */
+
+ if (is_vf)
+ edev->b_is_vf = true;
+
+ ecore_init_dp(edev, dp_module, dp_level, NULL);
+ qed_init_pci(edev, pci_dev);
+
+ memset(&hw_prepare_params, 0, sizeof(hw_prepare_params));
+ hw_prepare_params.personality = ECORE_PCI_ETH;
+ hw_prepare_params.drv_resc_alloc = false;
+ hw_prepare_params.chk_reg_fifo = false;
+ hw_prepare_params.initiate_pf_flr = true;
+ hw_prepare_params.allow_mdump = false;
+ hw_prepare_params.b_en_pacing = false;
+ hw_prepare_params.epoch = (u32)time(NULL);
+ rc = ecore_hw_prepare(edev, &hw_prepare_params);
+ if (rc) {
+ DP_ERR(edev, "hw prepare failed\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+static int qed_nic_setup(struct ecore_dev *edev)
+{
+ int rc;
+
+ rc = ecore_resc_alloc(edev);
+ if (rc)
+ return rc;
+
+ DP_INFO(edev, "Allocated qed resources\n");
+ ecore_resc_setup(edev);
+
+ return rc;
+}
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+static int qed_alloc_stream_mem(struct ecore_dev *edev)
+{
+ int i;
+
+ for_each_hwfn(edev, i) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+ p_hwfn->stream = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*p_hwfn->stream));
+ if (!p_hwfn->stream)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void qed_free_stream_mem(struct ecore_dev *edev)
+{
+ int i;
+
+ for_each_hwfn(edev, i) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+ if (!p_hwfn->stream)
+ return;
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->stream);
+ }
+}
+#endif
+
+#ifdef CONFIG_ECORE_BINARY_FW
+static int qed_load_firmware_data(struct ecore_dev *edev)
+{
+ int fd;
+ struct stat st;
+ const char *fw = RTE_LIBRTE_QEDE_FW;
+
+ if (strcmp(fw, "") == 0)
+ strcpy(fw_file, QEDE_DEFAULT_FIRMWARE);
+ else
+ strcpy(fw_file, fw);
+
+ fd = open(fw_file, O_RDONLY);
+ if (fd < 0) {
+ DP_ERR(edev, "Can't open firmware file\n");
+ return -ENOENT;
+ }
+
+ if (fstat(fd, &st) < 0) {
+ DP_ERR(edev, "Can't stat firmware file\n");
+ close(fd);
+ return -1;
+ }
+
+ edev->firmware = rte_zmalloc("qede_fw", st.st_size,
+ RTE_CACHE_LINE_SIZE);
+ if (!edev->firmware) {
+ DP_ERR(edev, "Can't allocate memory for firmware\n");
+ close(fd);
+ return -ENOMEM;
+ }
+
+ if (read(fd, edev->firmware, st.st_size) != st.st_size) {
+ DP_ERR(edev, "Can't read firmware data\n");
+ close(fd);
+ return -1;
+ }
+
+ edev->fw_len = st.st_size;
+ if (edev->fw_len < 104) {
+ DP_ERR(edev, "Invalid fw size: %" PRIu64 "\n",
+ edev->fw_len);
+ close(fd);
+ return -EINVAL;
+ }
+
+ close(fd);
+ return 0;
+}
+#endif
+
+static void qed_handle_bulletin_change(struct ecore_hwfn *hwfn)
+{
+ uint8_t mac[ETH_ALEN], is_mac_exist, is_mac_forced;
+
+ is_mac_exist = ecore_vf_bulletin_get_forced_mac(hwfn, mac,
+ &is_mac_forced);
+ if (is_mac_exist && is_mac_forced)
+ rte_memcpy(hwfn->hw_info.hw_mac_addr, mac, ETH_ALEN);
+
+ /* Always update link configuration according to bulletin */
+ qed_link_update(hwfn);
+}
+
+static void qede_vf_task(void *arg)
+{
+ struct ecore_hwfn *p_hwfn = arg;
+ uint8_t change = 0;
+
+ /* Read the bulletin board, and re-schedule the task */
+ ecore_vf_read_bulletin(p_hwfn, &change);
+ if (change)
+ qed_handle_bulletin_change(p_hwfn);
+
+ rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task, p_hwfn);
+}
+
+static void qed_start_iov_task(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ if (!IS_PF(edev))
+ rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task,
+ p_hwfn);
+ }
+}
+
+static void qed_stop_iov_task(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ if (!IS_PF(edev))
+ rte_eal_alarm_cancel(qede_vf_task, p_hwfn);
+ }
+}
+static int qed_slowpath_start(struct ecore_dev *edev,
+ struct qed_slowpath_params *params)
+{
+ struct ecore_drv_load_params drv_load_params;
+ struct ecore_hw_init_params hw_init_params;
+ struct ecore_mcp_drv_version drv_version;
+ const uint8_t *data = NULL;
+ struct ecore_hwfn *hwfn;
+ struct ecore_ptt *p_ptt;
+ int rc;
+
+ if (IS_PF(edev)) {
+#ifdef CONFIG_ECORE_BINARY_FW
+ rc = qed_load_firmware_data(edev);
+ if (rc) {
+ DP_ERR(edev, "Failed to find fw file %s\n", fw_file);
+ goto err;
+ }
+#endif
+ hwfn = ECORE_LEADING_HWFN(edev);
+ if (edev->num_hwfns == 1) { /* skip aRFS for 100G device */
+ p_ptt = ecore_ptt_acquire(hwfn);
+ if (p_ptt) {
+ ECORE_LEADING_HWFN(edev)->p_arfs_ptt = p_ptt;
+ } else {
+ DP_ERR(edev, "Failed to acquire PTT for flowdir\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+ }
+
+ rc = qed_nic_setup(edev);
+ if (rc)
+ goto err;
+
+ /* set int_coalescing_mode */
+ edev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ if (IS_PF(edev)) {
+ /* Allocate stream for unzipping */
+ rc = qed_alloc_stream_mem(edev);
+ if (rc) {
+ DP_ERR(edev, "Failed to allocate stream memory\n");
+ goto err1;
+ }
+ }
+#endif
+
+ qed_start_iov_task(edev);
+
+#ifdef CONFIG_ECORE_BINARY_FW
+ if (IS_PF(edev))
+ data = (const uint8_t *)edev->firmware + sizeof(u32);
+#endif
+
+ /* Start the slowpath */
+ memset(&hw_init_params, 0, sizeof(hw_init_params));
+ hw_init_params.b_hw_start = true;
+ hw_init_params.int_mode = params->int_mode;
+ hw_init_params.allow_npar_tx_switch = true;
+ hw_init_params.bin_fw_data = data;
+
+ memset(&drv_load_params, 0, sizeof(drv_load_params));
+ drv_load_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
+ drv_load_params.avoid_eng_reset = false;
+ drv_load_params.override_force_load = ECORE_OVERRIDE_FORCE_LOAD_ALWAYS;
+ hw_init_params.p_drv_load_params = &drv_load_params;
+
+ rc = ecore_hw_init(edev, &hw_init_params);
+ if (rc) {
+ DP_ERR(edev, "ecore_hw_init failed\n");
+ goto err2;
+ }
+
+ DP_INFO(edev, "HW inited and function started\n");
+
+ if (IS_PF(edev)) {
+ hwfn = ECORE_LEADING_HWFN(edev);
+ drv_version.version = (params->drv_major << 24) |
+ (params->drv_minor << 16) |
+ (params->drv_rev << 8) | (params->drv_eng);
+ strlcpy((char *)drv_version.name, (const char *)params->name,
+ sizeof(drv_version.name));
+ rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+ &drv_version);
+ if (rc) {
+ DP_ERR(edev, "Failed sending drv version command\n");
+ goto err3;
+ }
+ }
+
+ ecore_reset_vport_stats(edev);
+
+ return 0;
+
+err3:
+ ecore_hw_stop(edev);
+err2:
+ qed_stop_iov_task(edev);
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ qed_free_stream_mem(edev);
+err1:
+#endif
+ ecore_resc_free(edev);
+err:
+#ifdef CONFIG_ECORE_BINARY_FW
+ if (IS_PF(edev)) {
+ if (edev->firmware)
+ rte_free(edev->firmware);
+ edev->firmware = NULL;
+ }
+#endif
+ qed_stop_iov_task(edev);
+
+ return rc;
+}
+
+static int
+qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(edev);
+ struct ecore_ptt *ptt = NULL;
+ struct ecore_tunnel_info *tun = &edev->tunnel;
+
+ memset(dev_info, 0, sizeof(struct qed_dev_info));
+
+ if (tun->vxlan.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+ tun->vxlan.b_mode_enabled)
+ dev_info->vxlan_enable = true;
+
+ if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
+ tun->l2_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN)
+ dev_info->gre_enable = true;
+
+ if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
+ tun->l2_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN)
+ dev_info->geneve_enable = true;
+
+ dev_info->num_hwfns = edev->num_hwfns;
+ dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]);
+ dev_info->mtu = ECORE_LEADING_HWFN(edev)->hw_info.mtu;
+ dev_info->dev_type = edev->type;
+
+ rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
+ ETHER_ADDR_LEN);
+
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+
+ if (IS_PF(edev)) {
+ dev_info->b_inter_pf_switch =
+ OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH, &edev->mf_bits);
+ if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &edev->mf_bits))
+ dev_info->b_arfs_capable = true;
+ dev_info->tx_switching = false;
+
+ dev_info->smart_an = ecore_mcp_is_smart_an_supported(p_hwfn);
+
+ ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));
+ if (ptt) {
+ ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt,
+ &dev_info->mfw_rev, NULL);
+
+ ecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt,
+ &dev_info->flash_size);
+
+ /* Workaround to allow PHY-read commands for
+ * B0 bringup.
+ */
+ if (ECORE_IS_BB_B0(edev))
+ dev_info->flash_size = 0xffffffff;
+
+ ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt);
+ }
+ } else {
+ ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt,
+ &dev_info->mfw_rev, NULL);
+ }
+
+ return 0;
+}
+
+int
+qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
+{
+ uint8_t queues = 0;
+ int i;
+
+ memset(info, 0, sizeof(*info));
+
+ info->num_tc = 1 /* @@@TBD aelior MULTI_COS */;
+
+ if (IS_PF(edev)) {
+ int max_vf_vlan_filters = 0;
+
+ info->num_queues = 0;
+ for_each_hwfn(edev, i)
+ info->num_queues +=
+ FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);
+
+ if (IS_ECORE_SRIOV(edev))
+ max_vf_vlan_filters = edev->p_iov_info->total_vfs *
+ ECORE_ETH_VF_NUM_VLAN_FILTERS;
+ info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN) -
+ max_vf_vlan_filters;
+
+ rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
+ ETHER_ADDR_LEN);
+ } else {
+ ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev),
+ &info->num_queues);
+ if (ECORE_IS_CMT(edev)) {
+ ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues);
+ info->num_queues += queues;
+ }
+
+ ecore_vf_get_num_vlan_filters(&edev->hwfns[0],
+ (u8 *)&info->num_vlan_filters);
+
+ ecore_vf_get_port_mac(&edev->hwfns[0],
+ (uint8_t *)&info->port_mac);
+
+ info->is_legacy = ecore_vf_get_pre_fp_hsi(&edev->hwfns[0]);
+ }
+
+ qed_fill_dev_info(edev, &info->common);
+
+ if (IS_VF(edev))
+ memset(&info->common.hw_mac, 0, ETHER_ADDR_LEN);
+
+ return 0;
+}
+
+static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE])
+{
+ int i;
+
+ rte_memcpy(edev->name, name, NAME_SIZE);
+ for_each_hwfn(edev, i) {
+ snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+ }
+}
+
+static uint32_t
+qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
+ void *sb_virt_addr, dma_addr_t sb_phy_addr, uint16_t sb_id)
+{
+ struct ecore_hwfn *p_hwfn;
+ int hwfn_index;
+ uint16_t rel_sb_id;
+ uint8_t n_hwfns = edev->num_hwfns;
+ uint32_t rc;
+
+ hwfn_index = sb_id % n_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ rel_sb_id = sb_id / n_hwfns;
+
+ DP_INFO(edev, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ hwfn_index, rel_sb_id, sb_id);
+
+ rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
+ sb_virt_addr, sb_phy_addr, rel_sb_id);
+
+ return rc;
+}
+
+static void qed_fill_link(struct ecore_hwfn *hwfn,
+ __rte_unused struct ecore_ptt *ptt,
+ struct qed_link_output *if_link)
+{
+ struct ecore_mcp_link_params params;
+ struct ecore_mcp_link_state link;
+ struct ecore_mcp_link_capabilities link_caps;
+ uint8_t change = 0;
+
+ memset(if_link, 0, sizeof(*if_link));
+
+ /* Prepare source inputs */
+ if (IS_PF(hwfn->p_dev)) {
+ rte_memcpy(&params, ecore_mcp_get_link_params(hwfn),
+ sizeof(params));
+ rte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));
+ rte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),
+ sizeof(link_caps));
+ } else {
+ ecore_vf_read_bulletin(hwfn, &change);
+ ecore_vf_get_link_params(hwfn, &params);
+ ecore_vf_get_link_state(hwfn, &link);
+ ecore_vf_get_link_caps(hwfn, &link_caps);
+ }
+
+ /* Set the link parameters to pass to protocol driver */
+ if (link.link_up)
+ if_link->link_up = true;
+
+ if (link.link_up)
+ if_link->speed = link.speed;
+
+ if_link->duplex = QEDE_DUPLEX_FULL;
+
+ /* Fill up the native advertised speed cap mask */
+ if_link->adv_speed = params.speed.advertised_speeds;
+
+ if (params.speed.autoneg)
+ if_link->supported_caps |= QEDE_SUPPORTED_AUTONEG;
+
+ if (params.pause.autoneg || params.pause.forced_rx ||
+ params.pause.forced_tx)
+ if_link->supported_caps |= QEDE_SUPPORTED_PAUSE;
+
+ if (params.pause.autoneg)
+ if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+
+ if (params.pause.forced_rx)
+ if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+
+ if (params.pause.forced_tx)
+ if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+
+ if (link_caps.default_eee == ECORE_MCP_EEE_UNSUPPORTED) {
+ if_link->eee_supported = false;
+ } else {
+ if_link->eee_supported = true;
+ if_link->eee_active = link.eee_active;
+ if_link->sup_caps = link_caps.eee_speed_caps;
+ /* MFW clears adv_caps on eee disable; use configured value */
+ if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
+ params.eee.adv_caps;
+ if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
+ if_link->eee.enable = params.eee.enable;
+ if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
+ if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
+ }
+}
+
+static void
+qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link)
+{
+ struct ecore_hwfn *hwfn;
+ struct ecore_ptt *ptt;
+
+ hwfn = &edev->hwfns[0];
+ if (IS_PF(edev)) {
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt)
+ DP_NOTICE(hwfn, true, "Failed to fill link; No PTT\n");
+
+ qed_fill_link(hwfn, ptt, if_link);
+
+ if (ptt)
+ ecore_ptt_release(hwfn, ptt);
+ } else {
+ qed_fill_link(hwfn, NULL, if_link);
+ }
+}
+
+static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
+{
+ struct ecore_hwfn *hwfn;
+ struct ecore_ptt *ptt;
+ struct ecore_mcp_link_params *link_params;
+ int rc;
+
+ if (IS_VF(edev))
+ return 0;
+
+ /* The link should be set only once per PF */
+ hwfn = &edev->hwfns[0];
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ link_params = ecore_mcp_get_link_params(hwfn);
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+ link_params->speed.autoneg = params->autoneg;
+
+ if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+ if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+ link_params->pause.autoneg = true;
+ else
+ link_params->pause.autoneg = false;
+ if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+ link_params->pause.forced_rx = true;
+ else
+ link_params->pause.forced_rx = false;
+ if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+ link_params->pause.forced_tx = true;
+ else
+ link_params->pause.forced_tx = false;
+ }
+
+ if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
+ memcpy(&link_params->eee, &params->eee,
+ sizeof(link_params->eee));
+
+ rc = ecore_mcp_set_link(hwfn, ptt, params->link_up);
+
+ ecore_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+void qed_link_update(struct ecore_hwfn *hwfn)
+{
+ struct ecore_dev *edev = hwfn->p_dev;
+ struct qede_dev *qdev = (struct qede_dev *)edev;
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
+
+ if (!qede_link_update(dev, 0))
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
+static int qed_drain(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *hwfn;
+ struct ecore_ptt *ptt;
+ int i, rc;
+
+ if (IS_VF(edev))
+ return 0;
+
+ for_each_hwfn(edev, i) {
+ hwfn = &edev->hwfns[i];
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to drain NIG; No PTT\n");
+ return -EBUSY;
+ }
+ rc = ecore_mcp_drain(hwfn, ptt);
+ if (rc)
+ return rc;
+ ecore_ptt_release(hwfn, ptt);
+ }
+
+ return 0;
+}
+
+static int qed_nic_stop(struct ecore_dev *edev)
+{
+ int i, rc;
+
+ rc = ecore_hw_stop(edev);
+ for (i = 0; i < edev->num_hwfns; i++) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+ if (p_hwfn->b_sp_dpc_enabled)
+ p_hwfn->b_sp_dpc_enabled = false;
+ }
+ return rc;
+}
+
+static int qed_slowpath_stop(struct ecore_dev *edev)
+{
+#ifdef CONFIG_QED_SRIOV
+ int i;
+#endif
+
+ if (!edev)
+ return -ENODEV;
+
+ if (IS_PF(edev)) {
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ qed_free_stream_mem(edev);
+#endif
+
+#ifdef CONFIG_QED_SRIOV
+ if (IS_QED_ETH_IF(edev))
+ qed_sriov_disable(edev, true);
+#endif
+ }
+
+ qed_nic_stop(edev);
+
+ ecore_resc_free(edev);
+ qed_stop_iov_task(edev);
+
+ return 0;
+}
+
+static void qed_remove(struct ecore_dev *edev)
+{
+ if (!edev)
+ return;
+
+ ecore_hw_remove(edev);
+}
+
+static int qed_send_drv_state(struct ecore_dev *edev, bool active)
+{
+ struct ecore_hwfn *hwfn = ECORE_LEADING_HWFN(edev);
+ struct ecore_ptt *ptt;
+ int status = 0;
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = ecore_mcp_ov_update_driver_state(hwfn, ptt, active ?
+ ECORE_OV_DRIVER_STATE_ACTIVE :
+ ECORE_OV_DRIVER_STATE_DISABLED);
+
+ ecore_ptt_release(hwfn, ptt);
+
+ return status;
+}
+
+static int qed_get_sb_info(struct ecore_dev *edev, struct ecore_sb_info *sb,
+ u16 qid, struct ecore_sb_info_dbg *sb_dbg)
+{
+ struct ecore_hwfn *hwfn = &edev->hwfns[qid % edev->num_hwfns];
+ struct ecore_ptt *ptt;
+ int rc;
+
+ if (IS_VF(edev))
+ return -EINVAL;
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_ERR(hwfn, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+
+ memset(sb_dbg, 0, sizeof(*sb_dbg));
+ rc = ecore_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg);
+
+ ecore_ptt_release(hwfn, ptt);
+ return rc;
+}
+
+const struct qed_common_ops qed_common_ops_pass = {
+ INIT_STRUCT_FIELD(probe, &qed_probe),
+ INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params),
+ INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start),
+ INIT_STRUCT_FIELD(set_name, &qed_set_name),
+ INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc),
+ INIT_STRUCT_FIELD(chain_free, &ecore_chain_free),
+ INIT_STRUCT_FIELD(sb_init, &qed_sb_init),
+ INIT_STRUCT_FIELD(get_sb_info, &qed_get_sb_info),
+ INIT_STRUCT_FIELD(get_link, &qed_get_current_link),
+ INIT_STRUCT_FIELD(set_link, &qed_set_link),
+ INIT_STRUCT_FIELD(drain, &qed_drain),
+ INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop),
+ INIT_STRUCT_FIELD(remove, &qed_remove),
+ INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state),
+};
+
+const struct qed_eth_ops qed_eth_ops_pass = {
+ INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
+ INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(void)
+{
+ return &qed_eth_ops_pass;
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/qede_rxtx.c b/src/spdk/dpdk/drivers/net/qede/qede_rxtx.c
new file mode 100644
index 00000000..0f157ded
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/qede_rxtx.c
@@ -0,0 +1,2108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include <rte_net.h>
+#include "qede_rxtx.h"
+
+static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
+{
+ struct rte_mbuf *new_mb = NULL;
+ struct eth_rx_bd *rx_bd;
+ dma_addr_t mapping;
+ uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+
+ new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!new_mb)) {
+ PMD_RX_LOG(ERR, rxq,
+ "Failed to allocate rx buffer "
+ "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
+ idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
+ rte_mempool_avail_count(rxq->mb_pool),
+ rte_mempool_in_use_count(rxq->mb_pool));
+ return -ENOMEM;
+ }
+ rxq->sw_rx_ring[idx].mbuf = new_mb;
+ rxq->sw_rx_ring[idx].page_offset = 0;
+ mapping = rte_mbuf_data_iova_default(new_mb);
+ /* Advance PROD and get BD pointer */
+ rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
+ rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
+ rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
+ rxq->sw_rx_prod++;
+ return 0;
+}
+
+int
+qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct qede_rx_queue *rxq;
+ uint16_t max_rx_pkt_len;
+ uint16_t bufsz;
+ size_t size;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
+ if (!rte_is_power_of_2(nb_desc)) {
+ DP_ERR(edev, "Ring size %u is not power of 2\n",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (!rxq) {
+ DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
+ socket_id);
+ return -ENOMEM;
+ }
+
+ rxq->qdev = qdev;
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+
+ max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+
+ /* Fix up RX buffer size */
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+ if (!dev->data->scattered_rx) {
+ DP_INFO(edev, "Forcing scatter-gather mode\n");
+ dev->data->scattered_rx = 1;
+ }
+ }
+
+ if (dev->data->scattered_rx)
+ rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
+ else
+ rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+ /* Align to cache-line size if needed */
+ rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
+
+ DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
+ qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
+
+ /* Allocate the parallel driver ring for Rx buffers */
+ size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
+ rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_rx_ring) {
+ DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
+ " socket %u\n", socket_id);
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+
+ /* Allocate FW Rx ring */
+ rc = qdev->ops->common->chain_alloc(edev,
+ ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
+ ECORE_CHAIN_MODE_NEXT_PTR,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ rxq->nb_rx_desc,
+ sizeof(struct eth_rx_bd),
+ &rxq->rx_bd_ring,
+ NULL);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Memory allocation fails for RX BD ring"
+ " on socket %u\n", socket_id);
+ rte_free(rxq->sw_rx_ring);
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+
+ /* Allocate FW completion ring */
+ rc = qdev->ops->common->chain_alloc(edev,
+ ECORE_CHAIN_USE_TO_CONSUME,
+ ECORE_CHAIN_MODE_PBL,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ rxq->nb_rx_desc,
+ sizeof(union eth_rx_cqe),
+ &rxq->rx_comp_ring,
+ NULL);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Memory allocation fails for RX CQE ring"
+ " on socket %u\n", socket_id);
+ qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
+ rte_free(rxq->sw_rx_ring);
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ qdev->fp_array[queue_idx].rxq = rxq;
+
+ DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
+ queue_idx, nb_desc, rxq->rx_buf_size, socket_id);
+
+ return 0;
+}
+
+static void
+qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
+ struct qede_rx_queue *rxq)
+{
+ DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
+ ecore_chain_reset(&rxq->rx_bd_ring);
+ ecore_chain_reset(&rxq->rx_comp_ring);
+ rxq->sw_rx_prod = 0;
+ rxq->sw_rx_cons = 0;
+ *rxq->hw_cons_ptr = 0;
+}
+
+static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
+{
+ uint16_t i;
+
+ if (rxq->sw_rx_ring) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_rx_ring[i].mbuf) {
+ rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
+ rxq->sw_rx_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+void qede_rx_queue_release(void *rx_queue)
+{
+ struct qede_rx_queue *rxq = rx_queue;
+ struct qede_dev *qdev = rxq->qdev;
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ if (rxq) {
+ qede_rx_queue_release_mbufs(rxq);
+ qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
+ qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
+ rte_free(rxq->sw_rx_ring);
+ rte_free(rxq);
+ }
+}
+
+/* Stops a given RX queue in the HW */
+static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_hwfn *p_hwfn;
+ struct qede_rx_queue *rxq;
+ int hwfn_index;
+ int rc;
+
+ if (rx_queue_id < eth_dev->data->nb_rx_queues) {
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ hwfn_index = rx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
+ true, false);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
+ return -1;
+ }
+ qede_rx_queue_release_mbufs(rxq);
+ qede_rx_queue_reset(qdev, rxq);
+ eth_dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
+ } else {
+ DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int
+qede_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qede_tx_queue *txq;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ DP_ERR(edev, "Ring size %u is not power of 2\n",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->tx_queues[queue_idx] != NULL) {
+ qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (txq == NULL) {
+ DP_ERR(edev,
+ "Unable to allocate memory for txq on socket %u",
+ socket_id);
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->qdev = qdev;
+ txq->port_id = dev->data->port_id;
+
+ rc = qdev->ops->common->chain_alloc(edev,
+ ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
+ ECORE_CHAIN_MODE_PBL,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ txq->nb_tx_desc,
+ sizeof(union eth_tx_bd_types),
+ &txq->tx_pbl,
+ NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev,
+ "Unable to allocate memory for txbd ring on socket %u",
+ socket_id);
+ qede_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ /* Allocate software ring */
+ txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
+ (sizeof(struct qede_tx_entry) *
+ txq->nb_tx_desc),
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (!txq->sw_tx_ring) {
+ DP_ERR(edev,
+ "Unable to allocate memory for txbd ring on socket %u",
+ socket_id);
+ qdev->ops->common->chain_free(edev, &txq->tx_pbl);
+ qede_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ txq->queue_id = queue_idx;
+
+ txq->nb_tx_avail = txq->nb_tx_desc;
+
+ txq->tx_free_thresh =
+ tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
+ (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
+
+ dev->data->tx_queues[queue_idx] = txq;
+ qdev->fp_array[queue_idx].txq = txq;
+
+ DP_INFO(edev,
+ "txq %u num_desc %u tx_free_thresh %u socket %u\n",
+ queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
+
+ return 0;
+}
+
+static void
+qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
+ struct qede_tx_queue *txq)
+{
+ DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
+ ecore_chain_reset(&txq->tx_pbl);
+ txq->sw_tx_cons = 0;
+ txq->sw_tx_prod = 0;
+ *txq->hw_cons_ptr = 0;
+}
+
+static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
+{
+ uint16_t i;
+
+ if (txq->sw_tx_ring) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_tx_ring[i].mbuf) {
+ rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
+ txq->sw_tx_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+void qede_tx_queue_release(void *tx_queue)
+{
+ struct qede_tx_queue *txq = tx_queue;
+ struct qede_dev *qdev = txq->qdev;
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ if (txq) {
+ qede_tx_queue_release_mbufs(txq);
+ qdev->ops->common->chain_free(edev, &txq->tx_pbl);
+ rte_free(txq->sw_tx_ring);
+ rte_free(txq);
+ }
+}
+
+/* This function allocates fast-path status block memory */
+static int
+qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
+ uint16_t sb_id)
+{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct status_block_e4 *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
+ sizeof(struct status_block_e4));
+ if (!sb_virt) {
+ DP_ERR(edev, "Status block allocation failed\n");
+ return -ENOMEM;
+ }
+ rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
+ sb_phys, sb_id);
+ if (rc) {
+ DP_ERR(edev, "Status block initialization failed\n");
+ OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
+ sizeof(struct status_block_e4));
+ return rc;
+ }
+
+ return 0;
+}
+
+int qede_alloc_fp_resc(struct qede_dev *qdev)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ struct qede_fastpath *fp;
+ uint32_t num_sbs;
+ uint16_t sb_idx;
+
+ if (IS_VF(edev))
+ ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
+ else
+ num_sbs = ecore_cxt_get_proto_cid_count
+ (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
+
+ if (num_sbs == 0) {
+ DP_ERR(edev, "No status blocks available\n");
+ return -EINVAL;
+ }
+
+ qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
+ sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
+
+ if (!qdev->fp_array) {
+ DP_ERR(edev, "fp array allocation failed\n");
+ return -ENOMEM;
+ }
+
+ memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
+ sizeof(*qdev->fp_array));
+
+ for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
+ fp = &qdev->fp_array[sb_idx];
+ if (!fp)
+ continue;
+ fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
+ RTE_CACHE_LINE_SIZE);
+ if (!fp->sb_info) {
+ DP_ERR(edev, "FP sb_info allocation fails\n");
+ return -1;
+ }
+ if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
+ DP_ERR(edev, "FP status block allocation fails\n");
+ return -1;
+ }
+ DP_INFO(edev, "sb_info idx 0x%x initialized\n",
+ fp->sb_info->igu_sb_id);
+ }
+
+ return 0;
+}
+
+void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_fastpath *fp;
+ uint16_t sb_idx;
+ uint8_t i;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
+ fp = &qdev->fp_array[sb_idx];
+ if (!fp)
+ continue;
+ DP_INFO(edev, "Free sb_info index 0x%x\n",
+ fp->sb_info->igu_sb_id);
+ if (fp->sb_info) {
+ OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
+ fp->sb_info->sb_phys,
+ sizeof(struct status_block_e4));
+ rte_free(fp->sb_info);
+ fp->sb_info = NULL;
+ }
+ }
+
+ /* Free packet buffers and ring memories */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ if (eth_dev->data->rx_queues[i]) {
+ qede_rx_queue_release(eth_dev->data->rx_queues[i]);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ if (eth_dev->data->tx_queues[i]) {
+ qede_tx_queue_release(eth_dev->data->tx_queues[i]);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+ }
+
+ if (qdev->fp_array)
+ rte_free(qdev->fp_array);
+ qdev->fp_array = NULL;
+}
+
+static inline void
+qede_update_rx_prod(__rte_unused struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
+ uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
+ struct eth_rx_prod_data rx_prods = { 0 };
+
+ /* Update producers */
+ rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
+ rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ */
+ rte_wmb();
+
+ internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+ (uint32_t *)&rx_prods);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the napi lock is released and another qede_poll is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ rte_wmb();
+
+ PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
+}
+
+/* Starts a given RX queue in HW */
+static int
+qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_queue_start_common_params params;
+ struct ecore_rxq_start_ret_params ret_params;
+ struct qede_rx_queue *rxq;
+ struct qede_fastpath *fp;
+ struct ecore_hwfn *p_hwfn;
+ dma_addr_t p_phys_table;
+ uint16_t page_cnt;
+ uint16_t j;
+ int hwfn_index;
+ int rc;
+
+ if (rx_queue_id < eth_dev->data->nb_rx_queues) {
+ fp = &qdev->fp_array[rx_queue_id];
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ /* Allocate buffers for the Rx ring */
+ for (j = 0; j < rxq->nb_rx_desc; j++) {
+ rc = qede_alloc_rx_buffer(rxq);
+ if (rc) {
+ DP_ERR(edev, "RX buffer allocation failed"
+ " for rxq = %u\n", rx_queue_id);
+ return -ENOMEM;
+ }
+ }
+ /* disable interrupts */
+ ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
+ /* Prepare ramrod */
+ memset(&params, 0, sizeof(params));
+ params.queue_id = rx_queue_id / edev->num_hwfns;
+ params.vport_id = 0;
+ params.stats_id = params.vport_id;
+ params.p_sb = fp->sb_info;
+ DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
+ fp->rxq->queue_id, fp->sb_info->igu_sb_id);
+ params.sb_idx = RX_PI;
+ hwfn_index = rx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
+ page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
+ memset(&ret_params, 0, sizeof(ret_params));
+ rc = ecore_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ &params, fp->rxq->rx_buf_size,
+ fp->rxq->rx_bd_ring.p_phys_addr,
+ p_phys_table, page_cnt,
+ &ret_params);
+ if (rc) {
+ DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
+ rx_queue_id, rc);
+ return -1;
+ }
+ /* Update with the returned parameters */
+ fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
+ fp->rxq->handle = ret_params.p_handle;
+
+ fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ qede_update_rx_prod(qdev, fp->rxq);
+ eth_dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
+ } else {
+ DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int
+qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_queue_start_common_params params;
+ struct ecore_txq_start_ret_params ret_params;
+ struct ecore_hwfn *p_hwfn;
+ dma_addr_t p_phys_table;
+ struct qede_tx_queue *txq;
+ struct qede_fastpath *fp;
+ uint16_t page_cnt;
+ int hwfn_index;
+ int rc;
+
+ if (tx_queue_id < eth_dev->data->nb_tx_queues) {
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ fp = &qdev->fp_array[tx_queue_id];
+ memset(&params, 0, sizeof(params));
+ params.queue_id = tx_queue_id / edev->num_hwfns;
+ params.vport_id = 0;
+ params.stats_id = params.vport_id;
+ params.p_sb = fp->sb_info;
+ DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
+ fp->txq->queue_id, fp->sb_info->igu_sb_id);
+ params.sb_idx = TX_PI(0); /* tc = 0 */
+ p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
+ page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
+ hwfn_index = tx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ if (qdev->dev_info.is_legacy)
+ fp->txq->is_legacy = true;
+ rc = ecore_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ &params, 0 /* tc */,
+ p_phys_table, page_cnt,
+ &ret_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
+ tx_queue_id, rc);
+ return -1;
+ }
+ txq->doorbell_addr = ret_params.p_doorbell;
+ txq->handle = ret_params.p_handle;
+
+ txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
+ DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+ DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ eth_dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
+ } else {
+ DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static inline void
+qede_free_tx_pkt(struct qede_tx_queue *txq)
+{
+ struct rte_mbuf *mbuf;
+ uint16_t nb_segs;
+ uint16_t idx;
+
+ idx = TX_CONS(txq);
+ mbuf = txq->sw_tx_ring[idx].mbuf;
+ if (mbuf) {
+ nb_segs = mbuf->nb_segs;
+ PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
+ while (nb_segs) {
+ /* It's like consuming rxbuf in recv() */
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ nb_segs--;
+ }
+ rte_pktmbuf_free(mbuf);
+ txq->sw_tx_ring[idx].mbuf = NULL;
+ txq->sw_tx_cons++;
+ PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
+ } else {
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ }
+}
+
+static inline void
+qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ uint16_t hw_bd_cons;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ uint16_t sw_tx_cons;
+#endif
+
+ rte_compiler_barrier();
+ hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
+ PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
+ abs(hw_bd_cons - sw_tx_cons));
+#endif
+ while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
+ qede_free_tx_pkt(txq);
+}
+
+static int qede_drain_txq(struct qede_dev *qdev,
+ struct qede_tx_queue *txq, bool allow_drain)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ int rc, cnt = 1000;
+
+ while (txq->sw_tx_cons != txq->sw_tx_prod) {
+ qede_process_tx_compl(edev, txq);
+ if (!cnt) {
+ if (allow_drain) {
+ DP_ERR(edev, "Tx queue[%u] is stuck,"
+ "requesting MCP to drain\n",
+ txq->queue_id);
+ rc = qdev->ops->common->drain(edev);
+ if (rc)
+ return rc;
+ return qede_drain_txq(qdev, txq, false);
+ }
+ DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
+ "PROD=%d, CONS=%d\n",
+ txq->queue_id, txq->sw_tx_prod,
+ txq->sw_tx_cons);
+ return -1;
+ }
+ cnt--;
+ DELAY(1000);
+ rte_compiler_barrier();
+ }
+
+ /* FW finished processing, wait for HW to transmit all tx packets */
+ DELAY(2000);
+
+ return 0;
+}
+
+/* Stops a given TX queue in the HW */
+static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_hwfn *p_hwfn;
+ struct qede_tx_queue *txq;
+ int hwfn_index;
+ int rc;
+
+ if (tx_queue_id < eth_dev->data->nb_tx_queues) {
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ /* Drain txq */
+ if (qede_drain_txq(qdev, txq, true))
+ return -1; /* For the lack of retcodes */
+ /* Stop txq */
+ hwfn_index = tx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
+ return -1;
+ }
+ qede_tx_queue_release_mbufs(txq);
+ qede_tx_queue_reset(qdev, txq);
+ eth_dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
+ } else {
+ DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int qede_start_queues(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ uint8_t id;
+ int rc = -1;
+
+ for_each_rss(id) {
+ rc = qede_rx_queue_start(eth_dev, id);
+ if (rc != ECORE_SUCCESS)
+ return -1;
+ }
+
+ for_each_tss(id) {
+ rc = qede_tx_queue_start(eth_dev, id);
+ if (rc != ECORE_SUCCESS)
+ return -1;
+ }
+
+ return rc;
+}
+
+void qede_stop_queues(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ uint8_t id;
+
+ /* Stopping RX/TX queues */
+ for_each_tss(id) {
+ qede_tx_queue_stop(eth_dev, id);
+ }
+
+ for_each_rss(id) {
+ qede_rx_queue_stop(eth_dev, id);
+ }
+}
+
+static inline bool qede_tunn_exist(uint16_t flag)
+{
+ return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
+}
+
+static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
+{
+ return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
+}
+
+/*
+ * qede_check_tunn_csum_l4:
+ * Returns:
+ * 1 : If L4 csum is enabled AND if the validation has failed.
+ * 0 : Otherwise
+ */
+static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
+{
+ if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
+ return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
+
+ return 0;
+}
+
+static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
+{
+ if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
+ return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
+
+ return 0;
+}
+
+/* Returns outer L2, L3 and L4 packet_type for tunneled packets */
+static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
+{
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ struct ether_hdr *eth_hdr;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ struct vlan_hdr *vlan_hdr;
+ uint16_t ethertype;
+ bool vlan_tagged = 0;
+ uint16_t len;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ len = sizeof(struct ether_hdr);
+ ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
+
+ /* Note: Valid only if VLAN stripping is disabled */
+ if (ethertype == ETHER_TYPE_VLAN) {
+ vlan_tagged = 1;
+ vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+ len += sizeof(struct vlan_hdr);
+ ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
+ }
+
+ if (ethertype == ETHER_TYPE_IPv4) {
+ packet_type |= RTE_PTYPE_L3_IPV4;
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, len);
+ if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
+ packet_type |= RTE_PTYPE_L4_UDP;
+ } else if (ethertype == ETHER_TYPE_IPv6) {
+ packet_type |= RTE_PTYPE_L3_IPV6;
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, len);
+ if (ipv6_hdr->proto == IPPROTO_TCP)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else if (ipv6_hdr->proto == IPPROTO_UDP)
+ packet_type |= RTE_PTYPE_L4_UDP;
+ }
+
+ if (vlan_tagged)
+ packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+ else
+ packet_type |= RTE_PTYPE_L2_ETHER;
+
+ return packet_type;
+}
+
+static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
+{
+ uint16_t val;
+
+ /* Lookup table */
+ static const uint32_t
+ ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
+ [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_TCP |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_TCP |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_UDP |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_UDP |
+ RTE_PTYPE_INNER_L2_ETHER,
+ /* Frags with no VLAN */
+ [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L2_ETHER,
+ /* VLANs */
+ [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_TCP |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_TCP |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_UDP |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_UDP |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ /* Frags with VLAN */
+ [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ };
+
+ /* Bits (0..3) provides L3/L4 protocol type */
+ /* Bits (4,5) provides frag and VLAN info */
+ val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+ PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
+ PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+ PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
+
+ if (val < QEDE_PKT_TYPE_MAX)
+ return ptype_lkup_tbl[val];
+
+ return RTE_PTYPE_UNKNOWN;
+}
+
+static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
+{
+ uint16_t val;
+
+ /* Lookup table */
+ static const uint32_t
+ ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
+ [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_L2_ETHER,
+ /* Frags with no VLAN */
+ [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_L2_ETHER,
+ /* VLANs */
+ [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ /* Frags with VLAN */
+ [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ };
+
+ /* Bits (0..3) provides L3/L4 protocol type */
+ /* Bits (4,5) provides frag and VLAN info */
+ val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+ PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
+ PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+ PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
+
+ if (val < QEDE_PKT_TYPE_MAX)
+ return ptype_lkup_tbl[val];
+
+ return RTE_PTYPE_UNKNOWN;
+}
+
+static inline uint8_t
+qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
+{
+ struct ipv4_hdr *ip;
+ uint16_t pkt_csum;
+ uint16_t calc_csum;
+ uint16_t val;
+
+ val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
+
+ if (unlikely(val)) {
+ m->packet_type = qede_rx_cqe_to_pkt_type(flag);
+ if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
+ ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ pkt_csum = ip->hdr_checksum;
+ ip->hdr_checksum = 0;
+ calc_csum = rte_ipv4_cksum(ip);
+ ip->hdr_checksum = pkt_csum;
+ return (calc_csum != pkt_csum);
+ } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+ ecore_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+}
+
+static inline void
+qede_reuse_page(__rte_unused struct qede_dev *qdev,
+ struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
+{
+ struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
+ uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ struct qede_rx_entry *curr_prod;
+ dma_addr_t new_mapping;
+
+ curr_prod = &rxq->sw_rx_ring[idx];
+ *curr_prod = *curr_cons;
+
+ new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
+ curr_prod->page_offset;
+
+ rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
+ rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
+
+ rxq->sw_rx_prod++;
+}
+
+static inline void
+qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+ struct qede_dev *qdev, uint8_t count)
+{
+ struct qede_rx_entry *curr_cons;
+
+ for (; count > 0; count--) {
+ curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
+ qede_reuse_page(qdev, rxq, curr_cons);
+ qede_rx_bd_ring_consume(rxq);
+ }
+}
+
+static inline void
+qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ uint8_t agg_index, uint16_t len)
+{
+ struct qede_agg_info *tpa_info;
+ struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
+ uint16_t cons_idx;
+
+ /* Under certain conditions it is possible that FW may not consume
+ * additional or new BD. So decision to consume the BD must be made
+ * based on len_list[0].
+ */
+ if (rte_le_to_cpu_16(len)) {
+ tpa_info = &rxq->tpa_info[agg_index];
+ cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
+ assert(curr_frag);
+ curr_frag->nb_segs = 1;
+ curr_frag->pkt_len = rte_le_to_cpu_16(len);
+ curr_frag->data_len = curr_frag->pkt_len;
+ tpa_info->tpa_tail->next = curr_frag;
+ tpa_info->tpa_tail = curr_frag;
+ qede_rx_bd_ring_consume(rxq);
+ if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
+ PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ }
+ }
+}
+
+static inline void
+qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+ PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
+ cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
+ /* only len_list[0] will have value */
+ qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+ cqe->len_list[0]);
+}
+
+static inline void
+qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+ struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
+
+ qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+ cqe->len_list[0]);
+ /* Update total length and frags based on end TPA */
+ rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
+ /* TODO: Add Sanity Checks */
+ rx_mb->nb_segs = cqe->num_of_bds;
+ rx_mb->pkt_len = cqe->total_packet_len;
+
+ PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
+ " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
+ rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
+ rx_mb->pkt_len);
+}
+
+static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
+{
+ uint32_t val;
+
+ /* Lookup table */
+ static const uint32_t
+ ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
+ [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
+ [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
+ };
+
+ /* Cover bits[4-0] to include tunn_type and next protocol */
+ val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
+ (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
+
+ if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
+ return ptype_tunn_lkup_tbl[val];
+ else
+ return RTE_PTYPE_UNKNOWN;
+}
+
+static inline int
+qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
+ uint8_t num_segs, uint16_t pkt_len)
+{
+ struct qede_rx_queue *rxq = p_rxq;
+ struct qede_dev *qdev = rxq->qdev;
+ register struct rte_mbuf *seg1 = NULL;
+ register struct rte_mbuf *seg2 = NULL;
+ uint16_t sw_rx_index;
+ uint16_t cur_size;
+
+ seg1 = rx_mb;
+ while (num_segs) {
+ cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+ pkt_len;
+ if (unlikely(!cur_size)) {
+ PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
+ " left for mapping jumbo\n", num_segs);
+ qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
+ return -EINVAL;
+ }
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ qede_rx_bd_ring_consume(rxq);
+ pkt_len -= cur_size;
+ seg2->data_len = cur_size;
+ seg1->next = seg2;
+ seg1 = seg1->next;
+ num_segs--;
+ rxq->rx_segs++;
+ }
+
+ return 0;
+}
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+static inline void
+print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
+ uint8_t bitfield)
+{
+ PMD_RX_LOG(INFO, rxq,
+ "len 0x%04x bf 0x%04x hash_val 0x%x"
+ " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
+ " inner_l2=%s inner_l3=%s inner_l4=%s\n",
+ m->data_len, bitfield, m->hash.rss,
+ (unsigned long)m->ol_flags,
+ rte_get_ptype_l2_name(m->packet_type),
+ rte_get_ptype_l3_name(m->packet_type),
+ rte_get_ptype_l4_name(m->packet_type),
+ rte_get_ptype_tunnel_name(m->packet_type),
+ rte_get_ptype_inner_l2_name(m->packet_type),
+ rte_get_ptype_inner_l3_name(m->packet_type),
+ rte_get_ptype_inner_l4_name(m->packet_type));
+}
+#endif
+
+uint16_t
+qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct qede_rx_queue *rxq = p_rxq;
+ struct qede_dev *qdev = rxq->qdev;
+ struct ecore_dev *edev = &qdev->edev;
+ uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
+ uint16_t rx_pkt = 0;
+ union eth_rx_cqe *cqe;
+ struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
+ register struct rte_mbuf *rx_mb = NULL;
+ register struct rte_mbuf *seg1 = NULL;
+ enum eth_rx_cqe_type cqe_type;
+ uint16_t pkt_len = 0; /* Sum of all BD segments */
+ uint16_t len; /* Length of first BD */
+ uint8_t num_segs = 1;
+ uint16_t preload_idx;
+ uint16_t parse_flag;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ uint8_t bitfield_val;
+#endif
+ uint8_t tunn_parse_flag;
+ uint8_t j;
+ struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
+ uint64_t ol_flags;
+ uint32_t packet_type;
+ uint16_t vlan_tci;
+ bool tpa_start_flg;
+ uint8_t offset, tpa_agg_idx, flags;
+ struct qede_agg_info *tpa_info = NULL;
+ uint32_t rss_hash;
+
+ hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ rte_rmb();
+
+ if (hw_comp_cons == sw_comp_cons)
+ return 0;
+
+ while (sw_comp_cons != hw_comp_cons) {
+ ol_flags = 0;
+ packet_type = RTE_PTYPE_UNKNOWN;
+ vlan_tci = 0;
+ tpa_start_flg = false;
+ rss_hash = 0;
+
+ /* Get the CQE from the completion ring */
+ cqe =
+ (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+ cqe_type = cqe->fast_path_regular.type;
+ PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
+
+ switch (cqe_type) {
+ case ETH_RX_CQE_TYPE_REGULAR:
+ fp_cqe = &cqe->fast_path_regular;
+ break;
+ case ETH_RX_CQE_TYPE_TPA_START:
+ cqe_start_tpa = &cqe->fast_path_tpa_start;
+ tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
+ tpa_start_flg = true;
+ /* Mark it as LRO packet */
+ ol_flags |= PKT_RX_LRO;
+ /* In split mode, seg_len is same as len_on_first_bd
+ * and ext_bd_len_list will be empty since there are
+ * no additional buffers
+ */
+ PMD_RX_LOG(INFO, rxq,
+ "TPA start[%d] - len_on_first_bd %d header %d"
+ " [bd_list[0] %d], [seg_len %d]\n",
+ cqe_start_tpa->tpa_agg_index,
+ rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
+ cqe_start_tpa->header_len,
+ rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
+ rte_le_to_cpu_16(cqe_start_tpa->seg_len));
+
+ break;
+ case ETH_RX_CQE_TYPE_TPA_CONT:
+ qede_rx_process_tpa_cont_cqe(qdev, rxq,
+ &cqe->fast_path_tpa_cont);
+ goto next_cqe;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ qede_rx_process_tpa_end_cqe(qdev, rxq,
+ &cqe->fast_path_tpa_end);
+ tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
+ tpa_info = &rxq->tpa_info[tpa_agg_idx];
+ rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
+ goto tpa_end;
+ case ETH_RX_CQE_TYPE_SLOW_PATH:
+ PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
+ ecore_eth_cqe_completion(
+ &edev->hwfns[rxq->queue_id % edev->num_hwfns],
+ (struct eth_slow_path_rx_cqe *)cqe);
+ /* fall-thru */
+ default:
+ goto next_cqe;
+ }
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ assert(rx_mb != NULL);
+
+ /* Handle regular CQE or TPA start CQE */
+ if (!tpa_start_flg) {
+ parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
+ offset = fp_cqe->placement_offset;
+ len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
+ pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+ vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+ rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ bitfield_val = fp_cqe->bitfields;
+#endif
+ } else {
+ parse_flag =
+ rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
+ offset = cqe_start_tpa->placement_offset;
+ /* seg_len = len_on_first_bd */
+ len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
+ vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ bitfield_val = cqe_start_tpa->bitfields;
+#endif
+ rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
+ }
+ if (qede_tunn_exist(parse_flag)) {
+ PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
+ if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "L4 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+
+ if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "Outer L3 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_EIP_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ }
+
+ if (tpa_start_flg)
+ flags = cqe_start_tpa->tunnel_pars_flags.flags;
+ else
+ flags = fp_cqe->tunnel_pars_flags.flags;
+ tunn_parse_flag = flags;
+
+ /* Tunnel_type */
+ packet_type =
+ qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
+
+ /* Inner header */
+ packet_type |=
+ qede_rx_cqe_to_pkt_type_inner(parse_flag);
+
+ /* Outer L3/L4 types is not available in CQE */
+ packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+
+ /* Outer L3/L4 types is not available in CQE.
+ * Need to add offset to parse correctly,
+ */
+ rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
+ packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+ } else {
+ packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
+ }
+
+ /* Common handling for non-tunnel packets and for inner
+ * headers in the case of tunnel.
+ */
+ if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "L4 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
+ PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ }
+
+ if (CQE_HAS_VLAN(parse_flag) ||
+ CQE_HAS_OUTER_VLAN(parse_flag)) {
+ /* Note: FW doesn't indicate Q-in-Q packet */
+ ol_flags |= PKT_RX_VLAN;
+ if (qdev->vlan_strip_flg) {
+ ol_flags |= PKT_RX_VLAN_STRIPPED;
+ rx_mb->vlan_tci = vlan_tci;
+ }
+ }
+
+ /* RSS Hash */
+ if (qdev->rss_enable) {
+ ol_flags |= PKT_RX_RSS_HASH;
+ rx_mb->hash.rss = rss_hash;
+ }
+
+ if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
+ PMD_RX_LOG(ERR, rxq,
+ "New buffer allocation failed,"
+ "dropping incoming packet\n");
+ qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
+ rte_eth_devices[rxq->port_id].
+ data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ break;
+ }
+ qede_rx_bd_ring_consume(rxq);
+
+ if (!tpa_start_flg && fp_cqe->bd_num > 1) {
+ PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
+ " len on first: %04x Total Len: %04x",
+ fp_cqe->bd_num, len, pkt_len);
+ num_segs = fp_cqe->bd_num - 1;
+ seg1 = rx_mb;
+ if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
+ pkt_len - len))
+ goto next_cqe;
+ for (j = 0; j < num_segs; j++) {
+ if (qede_alloc_rx_buffer(rxq)) {
+ PMD_RX_LOG(ERR, rxq,
+ "Buffer allocation failed");
+ rte_eth_devices[rxq->port_id].
+ data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ break;
+ }
+ rxq->rx_segs++;
+ }
+ }
+ rxq->rx_segs++; /* for the first segment */
+
+ /* Prefetch next mbuf while processing current one. */
+ preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+
+ /* Update rest of the MBUF fields */
+ rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
+ rx_mb->port = rxq->port_id;
+ rx_mb->ol_flags = ol_flags;
+ rx_mb->data_len = len;
+ rx_mb->packet_type = packet_type;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ print_rx_bd_info(rx_mb, rxq, bitfield_val);
+#endif
+ if (!tpa_start_flg) {
+ rx_mb->nb_segs = fp_cqe->bd_num;
+ rx_mb->pkt_len = pkt_len;
+ } else {
+ /* store ref to the updated mbuf */
+ tpa_info->tpa_head = rx_mb;
+ tpa_info->tpa_tail = tpa_info->tpa_head;
+ }
+ rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
+tpa_end:
+ if (!tpa_start_flg) {
+ rx_pkts[rx_pkt] = rx_mb;
+ rx_pkt++;
+ }
+next_cqe:
+ ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+ if (rx_pkt == nb_pkts) {
+ PMD_RX_LOG(DEBUG, rxq,
+ "Budget reached nb_pkts=%u received=%u",
+ rx_pkt, nb_pkts);
+ break;
+ }
+ }
+
+ qede_update_rx_prod(qdev, rxq);
+
+ rxq->rcv_pkts += rx_pkt;
+
+ PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
+
+ return rx_pkt;
+}
+
+
+/* Populate scatter gather buffer descriptor fields */
+static inline uint16_t
+qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
+ struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
+ uint16_t start_seg)
+{
+ struct qede_tx_queue *txq = p_txq;
+ struct eth_tx_bd *tx_bd = NULL;
+ dma_addr_t mapping;
+ uint16_t nb_segs = 0;
+
+ /* Check for scattered buffers */
+ while (m_seg) {
+ if (start_seg == 0) {
+ if (!*bd2) {
+ *bd2 = (struct eth_tx_2nd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
+ nb_segs++;
+ }
+ mapping = rte_mbuf_data_iova(m_seg);
+ QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
+ } else if (start_seg == 1) {
+ if (!*bd3) {
+ *bd3 = (struct eth_tx_3rd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
+ nb_segs++;
+ }
+ mapping = rte_mbuf_data_iova(m_seg);
+ QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
+ } else {
+ tx_bd = (struct eth_tx_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(tx_bd, 0, sizeof(*tx_bd));
+ nb_segs++;
+ mapping = rte_mbuf_data_iova(m_seg);
+ QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
+ }
+ start_seg++;
+ m_seg = m_seg->next;
+ }
+
+ /* Return total scattered buffers */
+ return nb_segs;
+}
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+static inline void
+print_tx_bd_info(struct qede_tx_queue *txq,
+ struct eth_tx_1st_bd *bd1,
+ struct eth_tx_2nd_bd *bd2,
+ struct eth_tx_3rd_bd *bd3,
+ uint64_t tx_ol_flags)
+{
+ char ol_buf[256] = { 0 }; /* for verbose prints */
+
+ if (bd1)
+ PMD_TX_LOG(INFO, txq,
+ "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
+ rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
+ bd1->data.bd_flags.bitfields,
+ rte_cpu_to_le_16(bd1->data.bitfields));
+ if (bd2)
+ PMD_TX_LOG(INFO, txq,
+ "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
+ rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
+ bd2->data.bitfields2, bd2->data.tunn_ip_size);
+ if (bd3)
+ PMD_TX_LOG(INFO, txq,
+ "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
+ "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
+ rte_cpu_to_le_16(bd3->nbytes),
+ rte_cpu_to_le_16(bd3->data.bitfields),
+ rte_cpu_to_le_16(bd3->data.lso_mss),
+ bd3->data.tunn_l4_hdr_start_offset_w,
+ bd3->data.tunn_hdr_size_w);
+
+ rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
+ PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
+}
+#endif
+
+/* TX prepare to check packets meets TX conditions */
+uint16_t
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct qede_tx_queue *txq = p_txq;
+#else
+qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+#endif
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+ uint16_t i;
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ int ret;
+#endif
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
+ rte_errno = -EINVAL;
+ break;
+ }
+ /* TBD: confirm its ~9700B for both ? */
+ if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
+ rte_errno = -EINVAL;
+ break;
+ }
+ } else {
+ if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
+ rte_errno = -EINVAL;
+ break;
+ }
+ }
+ if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ break;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ break;
+ }
+#endif
+ }
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ if (unlikely(i != nb_pkts))
+ PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
+ nb_pkts - i);
+#endif
+ return i;
+}
+
+#define MPLSINUDP_HDR_SIZE (12)
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+static inline void
+qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
+ struct qede_tx_queue *txq)
+{
+ if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
+ PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
+ if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
+ MPLSINUDP_HDR_SIZE) / 2) > 0xff)
+ PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
+ if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
+ PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
+ if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+ PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
+}
+#endif
+
+uint16_t
+qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct qede_tx_queue *txq = p_txq;
+ struct qede_dev *qdev = txq->qdev;
+ struct ecore_dev *edev = &qdev->edev;
+ struct rte_mbuf *mbuf;
+ struct rte_mbuf *m_seg = NULL;
+ uint16_t nb_tx_pkts;
+ uint16_t bd_prod;
+ uint16_t idx;
+ uint16_t nb_frags;
+ uint16_t nb_pkt_sent = 0;
+ uint8_t nbds;
+ bool lso_flg;
+ bool mplsoudp_flg;
+ __rte_unused bool tunn_flg;
+ bool tunn_ipv6_ext_flg;
+ struct eth_tx_1st_bd *bd1;
+ struct eth_tx_2nd_bd *bd2;
+ struct eth_tx_3rd_bd *bd3;
+ uint64_t tx_ol_flags;
+ uint16_t hdr_size;
+ /* BD1 */
+ uint16_t bd1_bf;
+ uint8_t bd1_bd_flags_bf;
+ uint16_t vlan;
+ /* BD2 */
+ uint16_t bd2_bf1;
+ uint16_t bd2_bf2;
+ /* BD3 */
+ uint16_t mss;
+ uint16_t bd3_bf;
+
+ uint8_t tunn_l4_hdr_start_offset;
+ uint8_t tunn_hdr_size;
+ uint8_t inner_l2_hdr_size;
+ uint16_t inner_l4_hdr_offset;
+
+ if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
+ PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
+ nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
+ qede_process_tx_compl(edev, txq);
+ }
+
+ nb_tx_pkts = nb_pkts;
+ bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+ while (nb_tx_pkts--) {
+ /* Init flags/values */
+ tunn_flg = false;
+ lso_flg = false;
+ nbds = 0;
+ vlan = 0;
+ bd1 = NULL;
+ bd2 = NULL;
+ bd3 = NULL;
+ hdr_size = 0;
+ bd1_bf = 0;
+ bd1_bd_flags_bf = 0;
+ bd2_bf1 = 0;
+ bd2_bf2 = 0;
+ mss = 0;
+ bd3_bf = 0;
+ mplsoudp_flg = false;
+ tunn_ipv6_ext_flg = false;
+ tunn_hdr_size = 0;
+ tunn_l4_hdr_start_offset = 0;
+
+ mbuf = *tx_pkts++;
+ assert(mbuf);
+
+ /* Check minimum TX BDS availability against available BDs */
+ if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
+ break;
+
+ tx_ol_flags = mbuf->ol_flags;
+ bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+ /* TX prepare would have already checked supported tunnel Tx
+ * offloads. Don't rely on pkt_type marked by Rx, instead use
+ * tx_ol_flags to decide.
+ */
+ tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
+
+ if (tunn_flg) {
+ /* Check against max which is Tunnel IPv6 + ext */
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
+ break;
+
+ /* First indicate its a tunnel pkt */
+ bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ /* Legacy FW had flipped behavior in regard to this bit
+ * i.e. it needed to set to prevent FW from touching
+ * encapsulated packets when it didn't need to.
+ */
+ if (unlikely(txq->is_legacy)) {
+ bd1_bf ^= 1 <<
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ }
+
+ /* Outer IP checksum offload */
+ if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_OUTER_IPV4)) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+ }
+
+ /**
+ * Currently, only inner checksum offload in MPLS-in-UDP
+ * tunnel with one MPLS label is supported. Both outer
+ * and inner layers lengths need to be provided in
+ * mbuf.
+ */
+ if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
+ PKT_TX_TUNNEL_MPLSINUDP) {
+ mplsoudp_flg = true;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ qede_mpls_tunn_tx_sanity_check(mbuf, txq);
+#endif
+ /* Outer L4 offset in two byte words */
+ tunn_l4_hdr_start_offset =
+ (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
+ /* Tunnel header size in two byte words */
+ tunn_hdr_size = (mbuf->outer_l2_len +
+ mbuf->outer_l3_len +
+ MPLSINUDP_HDR_SIZE) / 2;
+ /* Inner L2 header size in two byte words */
+ inner_l2_hdr_size = (mbuf->l2_len -
+ MPLSINUDP_HDR_SIZE) / 2;
+ /* Inner L4 header offset from the beggining
+ * of inner packet in two byte words
+ */
+ inner_l4_hdr_offset = (mbuf->l2_len -
+ MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
+
+ /* Inner L2 size and address type */
+ bd2_bf1 |= (inner_l2_hdr_size &
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
+ bd2_bf1 |= (UNICAST_ADDRESS &
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
+ /* Treated as IPv6+Ext */
+ bd2_bf1 |=
+ 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
+
+ /* Mark inner IPv6 if present */
+ if (tx_ol_flags & PKT_TX_IPV6)
+ bd2_bf1 |=
+ 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
+
+ /* Inner L4 offsets */
+ if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
+ (tx_ol_flags & (PKT_TX_UDP_CKSUM |
+ PKT_TX_TCP_CKSUM))) {
+ /* Determines if BD3 is needed */
+ tunn_ipv6_ext_flg = true;
+ if ((tx_ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_UDP_CKSUM) {
+ bd2_bf1 |=
+ 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+ }
+
+ /* TODO other pseudo checksum modes are
+ * not supported
+ */
+ bd2_bf1 |=
+ ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
+ bd2_bf2 |= (inner_l4_hdr_offset &
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+ }
+ } /* End MPLSoUDP */
+ } /* End Tunnel handling */
+
+ if (tx_ol_flags & PKT_TX_TCP_SEG) {
+ lso_flg = true;
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_LSO_PKT))
+ break;
+ /* For LSO, packet header and payload must reside on
+ * buffers pointed by different BDs. Using BD1 for HDR
+ * and BD2 onwards for data.
+ */
+ hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ if (tunn_flg)
+ hdr_size += mbuf->outer_l2_len +
+ mbuf->outer_l3_len;
+
+ bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ mss = rte_cpu_to_le_16(mbuf->tso_segsz);
+ /* Using one header BD */
+ bd3_bf |= rte_cpu_to_le_16(1 <<
+ ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+ } else {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
+ break;
+ bd1_bf |=
+ (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
+ << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ }
+
+ /* Descriptor based VLAN insertion */
+ if (tx_ol_flags & PKT_TX_VLAN_PKT) {
+ vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+ }
+
+ /* Offload the IP checksum in the hardware */
+ if (tx_ol_flags & PKT_TX_IP_CKSUM) {
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ /* There's no DPDK flag to request outer-L4 csum
+ * offload. But in the case of tunnel if inner L3 or L4
+ * csum offload is requested then we need to force
+ * recalculation of L4 tunnel header csum also.
+ */
+ if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
+ PKT_TX_TUNNEL_GRE)) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
+ }
+
+ /* L4 checksum offload (tcp or udp) */
+ if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
+ (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ /* There's no DPDK flag to request outer-L4 csum
+ * offload. But in the case of tunnel if inner L3 or L4
+ * csum offload is requested then we need to force
+ * recalculation of L4 tunnel header csum also.
+ */
+ if (tunn_flg) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
+ }
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = TX_PROD(txq);
+ txq->sw_tx_ring[idx].mbuf = mbuf;
+
+ /* BD1 */
+ bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
+ memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
+ nbds++;
+
+ /* Map MBUF linear data for DMA and set in the BD1 */
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
+ mbuf->data_len);
+ bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
+ bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
+ bd1->data.vlan = vlan;
+
+ if (lso_flg || mplsoudp_flg) {
+ bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
+ (&txq->tx_pbl);
+ memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
+ nbds++;
+
+ /* BD1 */
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
+ hdr_size);
+ /* BD2 */
+ QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
+ rte_mbuf_data_iova(mbuf)),
+ mbuf->data_len - hdr_size);
+ bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
+ if (mplsoudp_flg) {
+ bd2->data.bitfields2 =
+ rte_cpu_to_le_16(bd2_bf2);
+ /* Outer L3 size */
+ bd2->data.tunn_ip_size =
+ rte_cpu_to_le_16(mbuf->outer_l3_len);
+ }
+ /* BD3 */
+ if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
+ bd3 = (struct eth_tx_3rd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
+ nbds++;
+ bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
+ if (lso_flg)
+ bd3->data.lso_mss = mss;
+ if (mplsoudp_flg) {
+ bd3->data.tunn_l4_hdr_start_offset_w =
+ tunn_l4_hdr_start_offset;
+ bd3->data.tunn_hdr_size_w =
+ tunn_hdr_size;
+ }
+ }
+ }
+
+ /* Handle fragmented MBUF */
+ m_seg = mbuf->next;
+
+ /* Encode scatter gather buffer descriptors if required */
+ nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
+ bd1->data.nbds = nbds + nb_frags;
+
+ txq->nb_tx_avail -= bd1->data.nbds;
+ txq->sw_tx_prod++;
+ rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
+ bd_prod =
+ rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
+#endif
+ nb_pkt_sent++;
+ txq->xmit_pkts++;
+ }
+
+ /* Write value of prod idx into bd_prod */
+ txq->tx_db.data.bd_prod = bd_prod;
+ rte_wmb();
+ rte_compiler_barrier();
+ DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
+ rte_wmb();
+
+ /* Check again for Tx completions */
+ qede_process_tx_compl(edev, txq);
+
+ PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
+ nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
+
+ return nb_pkt_sent;
+}
+
+uint16_t
+qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+ __rte_unused struct rte_mbuf **pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/qede/qede_rxtx.h b/src/spdk/dpdk/drivers/net/qede/qede_rxtx.h
new file mode 100644
index 00000000..e710fbae
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/qede_rxtx.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+
+#ifndef _QEDE_RXTX_H_
+#define _QEDE_RXTX_H_
+
+#include "qede_ethdev.h"
+
+/* Ring Descriptors */
+#define RX_RING_SIZE_POW 16 /* 64K */
+#define RX_RING_SIZE (1ULL << RX_RING_SIZE_POW)
+#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+#define NUM_RX_BDS(q) (q->nb_rx_desc - 1)
+
+#define TX_RING_SIZE_POW 16 /* 64K */
+#define TX_RING_SIZE (1ULL << TX_RING_SIZE_POW)
+#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+#define NUM_TX_BDS(q) (q->nb_tx_desc - 1)
+
+#define TX_CONS(txq) (txq->sw_tx_cons & NUM_TX_BDS(txq))
+#define TX_PROD(txq) (txq->sw_tx_prod & NUM_TX_BDS(txq))
+
+#define QEDE_DEFAULT_TX_FREE_THRESH 32
+
+#define QEDE_CSUM_ERROR (1 << 0)
+#define QEDE_CSUM_UNNECESSARY (1 << 1)
+#define QEDE_TUNN_CSUM_UNNECESSARY (1 << 2)
+
+#define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \
+ do { \
+ (bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
+ (bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
+ (bd)->nbytes = rte_cpu_to_le_16(len); \
+ } while (0)
+
+#define CQE_HAS_VLAN(flags) \
+ ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
+ << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
+
+#define CQE_HAS_OUTER_VLAN(flags) \
+ ((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
+ << PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
+
+#define QEDE_MIN_RX_BUFF_SIZE (1024)
+#define QEDE_VLAN_TAG_SIZE (4)
+#define QEDE_LLC_SNAP_HDR_LEN (8)
+
+/* Max supported alignment is 256 (8 shift)
+ * minimal alignment shift 6 is optimal for 57xxx HW performance
+ */
+#define QEDE_L1_CACHE_SHIFT 6
+#define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
+#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
+#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
+ ~(QEDE_FW_RX_ALIGN_END - 1))
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
+#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
+ + (QEDE_LLC_SNAP_HDR_LEN))
+
+#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
+ ETH_RSS_NONFRAG_IPV4_TCP |\
+ ETH_RSS_NONFRAG_IPV4_UDP |\
+ ETH_RSS_IPV6 |\
+ ETH_RSS_NONFRAG_IPV6_TCP |\
+ ETH_RSS_NONFRAG_IPV6_UDP |\
+ ETH_RSS_VXLAN |\
+ ETH_RSS_GENEVE)
+
+#define for_each_rss(i) for (i = 0; i < qdev->num_rx_queues; i++)
+#define for_each_tss(i) for (i = 0; i < qdev->num_tx_queues; i++)
+#define QEDE_RXTX_MAX(qdev) \
+ (RTE_MAX(QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)))
+
+/* Macros for non-tunnel packet types lkup table */
+#define QEDE_PKT_TYPE_UNKNOWN 0x0
+#define QEDE_PKT_TYPE_MAX 0x3f
+
+#define QEDE_PKT_TYPE_IPV4 0x1
+#define QEDE_PKT_TYPE_IPV6 0x2
+#define QEDE_PKT_TYPE_IPV4_TCP 0x5
+#define QEDE_PKT_TYPE_IPV6_TCP 0x6
+#define QEDE_PKT_TYPE_IPV4_UDP 0x9
+#define QEDE_PKT_TYPE_IPV6_UDP 0xa
+
+/* For frag pkts, corresponding IP bits is set */
+#define QEDE_PKT_TYPE_IPV4_FRAG 0x11
+#define QEDE_PKT_TYPE_IPV6_FRAG 0x12
+
+#define QEDE_PKT_TYPE_IPV4_VLAN 0x21
+#define QEDE_PKT_TYPE_IPV6_VLAN 0x22
+#define QEDE_PKT_TYPE_IPV4_TCP_VLAN 0x25
+#define QEDE_PKT_TYPE_IPV6_TCP_VLAN 0x26
+#define QEDE_PKT_TYPE_IPV4_UDP_VLAN 0x29
+#define QEDE_PKT_TYPE_IPV6_UDP_VLAN 0x2a
+
+#define QEDE_PKT_TYPE_IPV4_VLAN_FRAG 0x31
+#define QEDE_PKT_TYPE_IPV6_VLAN_FRAG 0x32
+
+/* Macros for tunneled packets with next protocol lkup table */
+#define QEDE_PKT_TYPE_TUNN_GENEVE 0x1
+#define QEDE_PKT_TYPE_TUNN_GRE 0x2
+#define QEDE_PKT_TYPE_TUNN_VXLAN 0x3
+
+/* Bit 2 is don't care bit */
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE 0x9
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE 0xa
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN 0xb
+
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE 0xd
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE 0xe
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN 0xf
+
+
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE 0x11
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE 0x12
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN 0x13
+
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE 0x15
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE 0x16
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN 0x17
+
+
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE 0x19
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE 0x1a
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN 0x1b
+
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE 0x1d
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE 0x1e
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN 0x1f
+
+#define QEDE_PKT_TYPE_TUNN_MAX_TYPE 0x20 /* 2^5 */
+
+#define QEDE_TX_CSUM_OFFLOAD_MASK (PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_CKSUM | \
+ PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG)
+
+#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_TUNNEL_VXLAN | \
+ PKT_TX_TUNNEL_GENEVE | \
+ PKT_TX_TUNNEL_MPLSINUDP | \
+ PKT_TX_TUNNEL_GRE)
+
+#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
+
+/*
+ * RX BD descriptor ring
+ */
+struct qede_rx_entry {
+ struct rte_mbuf *mbuf;
+ uint32_t page_offset;
+ /* allows expansion .. */
+};
+
+/* TPA related structures */
+struct qede_agg_info {
+ struct rte_mbuf *tpa_head; /* Pointer to first TPA segment */
+ struct rte_mbuf *tpa_tail; /* Pointer to last TPA segment */
+};
+
+/*
+ * Structure associated with each RX queue.
+ */
+struct qede_rx_queue {
+ struct rte_mempool *mb_pool;
+ struct ecore_chain rx_bd_ring;
+ struct ecore_chain rx_comp_ring;
+ uint16_t *hw_cons_ptr;
+ void OSAL_IOMEM *hw_rxq_prod_addr;
+ struct qede_rx_entry *sw_rx_ring;
+ struct ecore_sb_info *sb_info;
+ uint16_t sw_rx_cons;
+ uint16_t sw_rx_prod;
+ uint16_t nb_rx_desc;
+ uint16_t queue_id;
+ uint16_t port_id;
+ uint16_t rx_buf_size;
+ uint64_t rcv_pkts;
+ uint64_t rx_segs;
+ uint64_t rx_hw_errors;
+ uint64_t rx_alloc_errors;
+ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
+ struct qede_dev *qdev;
+ void *handle;
+};
+
+/*
+ * TX BD descriptor ring
+ */
+struct qede_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint8_t flags;
+};
+
+union db_prod {
+ struct eth_db_data data;
+ uint32_t raw;
+};
+
+struct qede_tx_queue {
+ struct ecore_chain tx_pbl;
+ struct qede_tx_entry *sw_tx_ring;
+ uint16_t nb_tx_desc;
+ uint16_t nb_tx_avail;
+ uint16_t tx_free_thresh;
+ uint16_t queue_id;
+ uint16_t *hw_cons_ptr;
+ uint16_t sw_tx_cons;
+ uint16_t sw_tx_prod;
+ void OSAL_IOMEM *doorbell_addr;
+ volatile union db_prod tx_db;
+ uint16_t port_id;
+ uint64_t xmit_pkts;
+ bool is_legacy;
+ struct qede_dev *qdev;
+ void *handle;
+};
+
+struct qede_fastpath {
+ struct ecore_sb_info *sb_info;
+ struct qede_rx_queue *rxq;
+ struct qede_tx_queue *txq;
+};
+
+/*
+ * RX/TX function prototypes
+ */
+int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+
+int qede_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+void qede_rx_queue_release(void *rx_queue);
+
+void qede_tx_queue_release(void *tx_queue);
+
+uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
+ struct rte_mbuf **pkts,
+ uint16_t nb_pkts);
+
+int qede_start_queues(struct rte_eth_dev *eth_dev);
+
+void qede_stop_queues(struct rte_eth_dev *eth_dev);
+
+/* Fastpath resource alloc/dealloc helpers */
+int qede_alloc_fp_resc(struct qede_dev *qdev);
+
+void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev);
+
+#endif /* _QEDE_RXTX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/qede/rte_pmd_qede_version.map b/src/spdk/dpdk/drivers/net/qede/rte_pmd_qede_version.map
new file mode 100644
index 00000000..349c6e1c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/qede/rte_pmd_qede_version.map
@@ -0,0 +1,4 @@
+DPDK_16.04 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/ring/Makefile b/src/spdk/dpdk/drivers/net/ring/Makefile
new file mode 100644
index 00000000..517312e0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ring/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ring.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vdev
+
+EXPORT_MAP := rte_pmd_ring_version.map
+
+LIBABIVER := 2
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += rte_eth_ring.c
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_eth_ring.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/ring/meson.build b/src/spdk/dpdk/drivers/net/ring/meson.build
new file mode 100644
index 00000000..7659b04f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ring/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+version = 2
+sources = files('rte_eth_ring.c')
+install_headers('rte_eth_ring.h')
diff --git a/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.c b/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.c
new file mode 100644
index 00000000..791deb0b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.c
@@ -0,0 +1,693 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include "rte_eth_ring.h"
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_bus_vdev.h>
+#include <rte_kvargs.h>
+#include <rte_errno.h>
+
+#define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
+#define ETH_RING_ACTION_CREATE "CREATE"
+#define ETH_RING_ACTION_ATTACH "ATTACH"
+#define ETH_RING_INTERNAL_ARG "internal"
+
+static const char *valid_arguments[] = {
+ ETH_RING_NUMA_NODE_ACTION_ARG,
+ ETH_RING_INTERNAL_ARG,
+ NULL
+};
+
+struct ring_internal_args {
+ struct rte_ring * const *rx_queues;
+ const unsigned int nb_rx_queues;
+ struct rte_ring * const *tx_queues;
+ const unsigned int nb_tx_queues;
+ const unsigned int numa_node;
+ void *addr; /* self addr for sanity check */
+};
+
+enum dev_action {
+ DEV_CREATE,
+ DEV_ATTACH
+};
+
+struct ring_queue {
+ struct rte_ring *rng;
+ rte_atomic64_t rx_pkts;
+ rte_atomic64_t tx_pkts;
+ rte_atomic64_t err_pkts;
+};
+
+struct pmd_internals {
+ unsigned max_rx_queues;
+ unsigned max_tx_queues;
+
+ struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
+ struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
+
+ struct ether_addr address;
+ enum dev_action action;
+};
+
+
+static struct rte_eth_link pmd_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_FIXED,
+};
+
+static int eth_ring_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eth_ring_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+static uint16_t
+eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ void **ptrs = (void *)&bufs[0];
+ struct ring_queue *r = q;
+ const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
+ ptrs, nb_bufs, NULL);
+ if (r->rng->flags & RING_F_SC_DEQ)
+ r->rx_pkts.cnt += nb_rx;
+ else
+ rte_atomic64_add(&(r->rx_pkts), nb_rx);
+ return nb_rx;
+}
+
+static uint16_t
+eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ void **ptrs = (void *)&bufs[0];
+ struct ring_queue *r = q;
+ const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
+ ptrs, nb_bufs, NULL);
+ if (r->rng->flags & RING_F_SP_ENQ) {
+ r->tx_pkts.cnt += nb_tx;
+ r->err_pkts.cnt += nb_bufs - nb_tx;
+ } else {
+ rte_atomic64_add(&(r->tx_pkts), nb_tx);
+ rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
+ }
+ return nb_tx;
+}
+
+static int
+eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
+
+static int
+eth_dev_start(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+ return 0;
+}
+
+static void
+eth_dev_stop(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+}
+
+static int
+eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ return 0;
+}
+
+static int
+eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+ return 0;
+}
+
+static int
+eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool __rte_unused)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
+ return 0;
+}
+
+static int
+eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
+ return 0;
+}
+
+
+static void
+eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)-1;
+ dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
+}
+
+static int
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ unsigned i;
+ unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ const struct pmd_internals *internal = dev->data->dev_private;
+
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+ i < dev->data->nb_rx_queues; i++) {
+ stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
+ rx_total += stats->q_ipackets[i];
+ }
+
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+ i < dev->data->nb_tx_queues; i++) {
+ stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
+ stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
+ tx_total += stats->q_opackets[i];
+ tx_err_total += stats->q_errors[i];
+ }
+
+ stats->ipackets = rx_total;
+ stats->opackets = tx_total;
+ stats->oerrors = tx_err_total;
+
+ return 0;
+}
+
+static void
+eth_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ struct pmd_internals *internal = dev->data->dev_private;
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ internal->rx_ring_queues[i].rx_pkts.cnt = 0;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ internal->tx_ring_queues[i].tx_pkts.cnt = 0;
+ internal->tx_ring_queues[i].err_pkts.cnt = 0;
+ }
+}
+
+static void
+eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
+ uint32_t index __rte_unused)
+{
+}
+
+static int
+eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
+ struct ether_addr *mac_addr __rte_unused,
+ uint32_t index __rte_unused,
+ uint32_t vmdq __rte_unused)
+{
+ return 0;
+}
+
+static void
+eth_queue_release(void *q __rte_unused) { ; }
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused) { return 0; }
+
+static const struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_set_link_up = eth_dev_set_link_up,
+ .dev_set_link_down = eth_dev_set_link_down,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_release = eth_queue_release,
+ .tx_queue_release = eth_queue_release,
+ .link_update = eth_link_update,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
+ .mac_addr_remove = eth_mac_addr_remove,
+ .mac_addr_add = eth_mac_addr_add,
+};
+
+static struct rte_vdev_driver pmd_ring_drv;
+
+static int
+do_eth_dev_ring_create(const char *name,
+ struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
+ struct rte_ring *const tx_queues[], const unsigned nb_tx_queues,
+ const unsigned int numa_node, enum dev_action action,
+ struct rte_eth_dev **eth_dev_p)
+{
+ struct rte_eth_dev_data *data = NULL;
+ struct pmd_internals *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ void **rx_queues_local = NULL;
+ void **tx_queues_local = NULL;
+ unsigned i;
+
+ PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u",
+ numa_node);
+
+ rx_queues_local = rte_zmalloc_socket(name,
+ sizeof(void *) * nb_rx_queues, 0, numa_node);
+ if (rx_queues_local == NULL) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
+
+ tx_queues_local = rte_zmalloc_socket(name,
+ sizeof(void *) * nb_tx_queues, 0, numa_node);
+ if (tx_queues_local == NULL) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
+
+ internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
+ if (internals == NULL) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (eth_dev == NULL) {
+ rte_errno = ENOSPC;
+ goto error;
+ }
+
+ /* now put it all together
+ * - store queue data in internals,
+ * - store numa_node info in eth_dev_data
+ * - point eth_dev_data to internals
+ * - and point eth_dev structure to new eth_dev_data structure
+ */
+
+ data = eth_dev->data;
+ data->rx_queues = rx_queues_local;
+ data->tx_queues = tx_queues_local;
+
+ internals->action = action;
+ internals->max_rx_queues = nb_rx_queues;
+ internals->max_tx_queues = nb_tx_queues;
+ for (i = 0; i < nb_rx_queues; i++) {
+ internals->rx_ring_queues[i].rng = rx_queues[i];
+ data->rx_queues[i] = &internals->rx_ring_queues[i];
+ }
+ for (i = 0; i < nb_tx_queues; i++) {
+ internals->tx_ring_queues[i].rng = tx_queues[i];
+ data->tx_queues[i] = &internals->tx_ring_queues[i];
+ }
+
+ data->dev_private = internals;
+ data->nb_rx_queues = (uint16_t)nb_rx_queues;
+ data->nb_tx_queues = (uint16_t)nb_tx_queues;
+ data->dev_link = pmd_link;
+ data->mac_addrs = &internals->address;
+
+ eth_dev->dev_ops = &ops;
+ data->kdrv = RTE_KDRV_NONE;
+ data->numa_node = numa_node;
+
+ /* finally assign rx and tx ops */
+ eth_dev->rx_pkt_burst = eth_ring_rx;
+ eth_dev->tx_pkt_burst = eth_ring_tx;
+
+ rte_eth_dev_probing_finish(eth_dev);
+ *eth_dev_p = eth_dev;
+
+ return data->port_id;
+
+error:
+ rte_free(rx_queues_local);
+ rte_free(tx_queues_local);
+ rte_free(internals);
+
+ return -1;
+}
+
+int
+rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
+ const unsigned nb_rx_queues,
+ struct rte_ring *const tx_queues[],
+ const unsigned nb_tx_queues,
+ const unsigned numa_node)
+{
+ struct ring_internal_args args = {
+ .rx_queues = rx_queues,
+ .nb_rx_queues = nb_rx_queues,
+ .tx_queues = tx_queues,
+ .nb_tx_queues = nb_tx_queues,
+ .numa_node = numa_node,
+ .addr = &args,
+ };
+ char args_str[32] = { 0 };
+ char ring_name[32] = { 0 };
+ uint16_t port_id = RTE_MAX_ETHPORTS;
+ int ret;
+
+ /* do some parameter checking */
+ if (rx_queues == NULL && nb_rx_queues > 0) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ if (tx_queues == NULL && nb_tx_queues > 0) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ snprintf(args_str, 32, "%s=%p", ETH_RING_INTERNAL_ARG, &args);
+ snprintf(ring_name, 32, "net_ring_%s", name);
+
+ ret = rte_vdev_init(ring_name, args_str);
+ if (ret) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ rte_eth_dev_get_port_by_name(ring_name, &port_id);
+
+ return port_id;
+}
+
+int
+rte_eth_from_ring(struct rte_ring *r)
+{
+ return rte_eth_from_rings(r->name, &r, 1, &r, 1,
+ r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
+}
+
+static int
+eth_dev_ring_create(const char *name, const unsigned numa_node,
+ enum dev_action action, struct rte_eth_dev **eth_dev)
+{
+ /* rx and tx are so-called from point of view of first port.
+ * They are inverted from the point of view of second port
+ */
+ struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
+ unsigned i;
+ char rng_name[RTE_RING_NAMESIZE];
+ unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
+ RTE_PMD_RING_MAX_TX_RINGS);
+
+ for (i = 0; i < num_rings; i++) {
+ snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
+ rxtx[i] = (action == DEV_CREATE) ?
+ rte_ring_create(rng_name, 1024, numa_node,
+ RING_F_SP_ENQ|RING_F_SC_DEQ) :
+ rte_ring_lookup(rng_name);
+ if (rxtx[i] == NULL)
+ return -1;
+ }
+
+ if (do_eth_dev_ring_create(name, rxtx, num_rings, rxtx, num_rings,
+ numa_node, action, eth_dev) < 0)
+ return -1;
+
+ return 0;
+}
+
+struct node_action_pair {
+ char name[PATH_MAX];
+ unsigned node;
+ enum dev_action action;
+};
+
+struct node_action_list {
+ unsigned total;
+ unsigned count;
+ struct node_action_pair *list;
+};
+
+static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
+{
+ struct node_action_list *info = data;
+ int ret;
+ char *name;
+ char *action;
+ char *node;
+ char *end;
+
+ name = strdup(value);
+
+ ret = -EINVAL;
+
+ if (!name) {
+ PMD_LOG(WARNING, "command line parameter is empty for ring pmd!");
+ goto out;
+ }
+
+ node = strchr(name, ':');
+ if (!node) {
+ PMD_LOG(WARNING, "could not parse node value from %s",
+ name);
+ goto out;
+ }
+
+ *node = '\0';
+ node++;
+
+ action = strchr(node, ':');
+ if (!action) {
+ PMD_LOG(WARNING, "could not parse action value from %s",
+ node);
+ goto out;
+ }
+
+ *action = '\0';
+ action++;
+
+ /*
+ * Need to do some sanity checking here
+ */
+
+ if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
+ info->list[info->count].action = DEV_ATTACH;
+ else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
+ info->list[info->count].action = DEV_CREATE;
+ else
+ goto out;
+
+ errno = 0;
+ info->list[info->count].node = strtol(node, &end, 10);
+
+ if ((errno != 0) || (*end != '\0')) {
+ PMD_LOG(WARNING,
+ "node value %s is unparseable as a number", node);
+ goto out;
+ }
+
+ snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
+
+ info->count++;
+
+ ret = 0;
+out:
+ free(name);
+ return ret;
+}
+
+static int
+parse_internal_args(const char *key __rte_unused, const char *value,
+ void *data)
+{
+ struct ring_internal_args **internal_args = data;
+ void *args;
+
+ sscanf(value, "%p", &args);
+
+ *internal_args = args;
+
+ if ((*internal_args)->addr != args)
+ return -1;
+
+ return 0;
+}
+
+static int
+rte_pmd_ring_probe(struct rte_vdev_device *dev)
+{
+ const char *name, *params;
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+ struct node_action_list *info = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct ring_internal_args *internal_args;
+
+ name = rte_vdev_device_name(dev);
+ params = rte_vdev_device_args(dev);
+
+ PMD_LOG(INFO, "Initializing pmd_ring for %s", name);
+
+ if (params == NULL || params[0] == '\0') {
+ ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE,
+ &eth_dev);
+ if (ret == -1) {
+ PMD_LOG(INFO,
+ "Attach to pmd_ring for %s", name);
+ ret = eth_dev_ring_create(name, rte_socket_id(),
+ DEV_ATTACH, &eth_dev);
+ }
+ } else {
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+
+ if (!kvlist) {
+ PMD_LOG(INFO, "Ignoring unsupported parameters when creating"
+ " rings-backed ethernet device");
+ ret = eth_dev_ring_create(name, rte_socket_id(),
+ DEV_CREATE, &eth_dev);
+ if (ret == -1) {
+ PMD_LOG(INFO,
+ "Attach to pmd_ring for %s",
+ name);
+ ret = eth_dev_ring_create(name, rte_socket_id(),
+ DEV_ATTACH, &eth_dev);
+ }
+
+ if (eth_dev)
+ eth_dev->device = &dev->device;
+
+ return ret;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
+ parse_internal_args,
+ &internal_args);
+ if (ret < 0)
+ goto out_free;
+
+ ret = do_eth_dev_ring_create(name,
+ internal_args->rx_queues,
+ internal_args->nb_rx_queues,
+ internal_args->tx_queues,
+ internal_args->nb_tx_queues,
+ internal_args->numa_node,
+ DEV_ATTACH,
+ &eth_dev);
+ if (ret >= 0)
+ ret = 0;
+ } else {
+ ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
+ info = rte_zmalloc("struct node_action_list",
+ sizeof(struct node_action_list) +
+ (sizeof(struct node_action_pair) * ret),
+ 0);
+ if (!info)
+ goto out_free;
+
+ info->total = ret;
+ info->list = (struct node_action_pair*)(info + 1);
+
+ ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
+ parse_kvlist, info);
+
+ if (ret < 0)
+ goto out_free;
+
+ for (info->count = 0; info->count < info->total; info->count++) {
+ ret = eth_dev_ring_create(info->list[info->count].name,
+ info->list[info->count].node,
+ info->list[info->count].action,
+ &eth_dev);
+ if ((ret == -1) &&
+ (info->list[info->count].action == DEV_CREATE)) {
+ PMD_LOG(INFO,
+ "Attach to pmd_ring for %s",
+ name);
+ ret = eth_dev_ring_create(name,
+ info->list[info->count].node,
+ DEV_ATTACH,
+ &eth_dev);
+ }
+ }
+ }
+ }
+
+ if (eth_dev)
+ eth_dev->device = &dev->device;
+
+out_free:
+ rte_kvargs_free(kvlist);
+ rte_free(info);
+ return ret;
+}
+
+static int
+rte_pmd_ring_remove(struct rte_vdev_device *dev)
+{
+ const char *name = rte_vdev_device_name(dev);
+ struct rte_eth_dev *eth_dev = NULL;
+ struct pmd_internals *internals = NULL;
+ struct ring_queue *r = NULL;
+ uint16_t i;
+
+ PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name);
+
+ if (name == NULL)
+ return -EINVAL;
+
+ /* find an ethdev entry */
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ eth_dev_stop(eth_dev);
+
+ internals = eth_dev->data->dev_private;
+ if (internals->action == DEV_CREATE) {
+ /*
+ * it is only necessary to delete the rings in rx_queues because
+ * they are the same used in tx_queues
+ */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ r = eth_dev->data->rx_queues[i];
+ rte_ring_free(r->rng);
+ }
+ }
+
+ rte_free(eth_dev->data->rx_queues);
+ rte_free(eth_dev->data->tx_queues);
+ rte_free(eth_dev->data->dev_private);
+
+ rte_eth_dev_release_port(eth_dev);
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_ring_drv = {
+ .probe = rte_pmd_ring_probe,
+ .remove = rte_pmd_ring_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
+RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
+RTE_PMD_REGISTER_PARAM_STRING(net_ring,
+ ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");
+
+RTE_INIT(eth_ring_init_log)
+{
+ eth_ring_logtype = rte_log_register("pmd.net.ring");
+ if (eth_ring_logtype >= 0)
+ rte_log_set_level(eth_ring_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.h b/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.h
new file mode 100644
index 00000000..59e074d0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_ETH_RING_H_
+#define _RTE_ETH_RING_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_ring.h>
+
+/**
+ * Create a new ethdev port from a set of rings
+ *
+ * @param name
+ * name to be given to the new ethdev port
+ * @param rx_queues
+ * pointer to array of rte_rings to be used as RX queues
+ * @param nb_rx_queues
+ * number of elements in the rx_queues array
+ * @param tx_queues
+ * pointer to array of rte_rings to be used as TX queues
+ * @param nb_tx_queues
+ * number of elements in the tx_queues array
+ * @param numa_node
+ * the numa node on which the memory for this port is to be allocated
+ * @return
+ * the port number of the newly created the ethdev or -1 on error.
+ */
+int rte_eth_from_rings(const char *name,
+ struct rte_ring * const rx_queues[],
+ const unsigned nb_rx_queues,
+ struct rte_ring *const tx_queues[],
+ const unsigned nb_tx_queues,
+ const unsigned numa_node);
+
+/**
+ * Create a new ethdev port from a ring
+ *
+ * This function is a shortcut call for rte_eth_from_rings for the
+ * case where one wants to take a single rte_ring and use it as though
+ * it were an ethdev
+ *
+ * @param ring
+ * the ring to be used as an ethdev
+ * @return
+ * the port number of the newly created ethdev, or -1 on error
+ */
+int rte_eth_from_ring(struct rte_ring *r);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ring/rte_pmd_ring_version.map b/src/spdk/dpdk/drivers/net/ring/rte_pmd_ring_version.map
new file mode 100644
index 00000000..1f785d94
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ring/rte_pmd_ring_version.map
@@ -0,0 +1,14 @@
+DPDK_2.0 {
+ global:
+
+ rte_eth_from_rings;
+
+ local: *;
+};
+
+DPDK_2.2 {
+ global:
+
+ rte_eth_from_ring;
+
+} DPDK_2.0;
diff --git a/src/spdk/dpdk/drivers/net/sfc/Makefile b/src/spdk/dpdk/drivers/net/sfc/Makefile
new file mode 100644
index 00000000..3bb41a00
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/Makefile
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2016-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_sfc_efx.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -I$(SRCDIR)/base/
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+# Strict-aliasing rules are violated by rte_eth_link to uint64_t casts
+CFLAGS += -Wno-strict-aliasing
+
+# Enable extra warnings
+CFLAGS += -Wextra
+
+# More warnings not enabled by above aggregators
+CFLAGS += -Wdisabled-optimization
+
+# Extra CFLAGS for base driver files
+CFLAGS_BASE_DRIVER += -Wno-sign-compare
+CFLAGS_BASE_DRIVER += -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+
+# Compiler and version dependent flags
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS += -Waggregate-return
+CFLAGS += -Wnested-externs
+CFLAGS_BASE_DRIVER += -Wno-empty-body
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+CFLAGS += -Waggregate-return
+CFLAGS += -Wbad-function-cast
+CFLAGS_BASE_DRIVER += -Wno-empty-body
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+# Suppress ICC false positive warning on 'bulk' may be used before its
+# value is set
+CFLAGS_sfc_ef10_tx.o += -diag-disable 3656
+endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci -lrte_pci
+
+#
+# List of base driver object files for which
+# special CFLAGS above should be applied
+#
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(BASE_DRIVER_OBJS), \
+ $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+EXPORT_MAP := rte_pmd_sfc_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_kvargs.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_port.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tso.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_essb_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_tx.c
+
+VPATH += $(SRCDIR)/base
+
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_bootcfg.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_crc32.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_ev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_hash.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_lic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mon.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_nvram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_port.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_sram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_tunnel.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_vpd.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += mcdi_mon.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_nvram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_sram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_vpd.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_ev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_image.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nvram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_vpd.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += hunt_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford2_nic.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/README b/src/spdk/dpdk/drivers/net/sfc/base/README
new file mode 100644
index 00000000..685c502c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/README
@@ -0,0 +1,16 @@
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2006-2018 Solarflare Communications Inc.
+ All rights reserved.
+
+Solarflare libefx driver library
+================================
+
+This directory contains source code of Solarflare Communications libefx
+driver library of version v4.10.0.1012.
+
+Updating
+========
+
+The source code in this directory should not be modified.
+Please contact the driver maintainers to request changes.
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_ev.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_ev.c
new file mode 100644
index 00000000..7f89a7bf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_ev.c
@@ -0,0 +1,1449 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_STATS
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+#if EFSYS_OPT_QSTATS
+#define EFX_EV_QSTAT_INCR(_eep, _stat) \
+ do { \
+ (_eep)->ee_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_EV_QSTAT_INCR(_eep, _stat)
+#endif
+
+/*
+ * Non-interrupting event queue requires interrrupting event queue to
+ * refer to for wake-up events even if wake ups are never used.
+ * It could be even non-allocated event queue.
+ */
+#define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
+
+static __checkReturn boolean_t
+ef10_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_set_evq_tmr(
+ __in efx_nic_t *enp,
+ __in uint32_t instance,
+ __in uint32_t mode,
+ __in uint32_t timer_ns)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
+ MC_CMD_SET_EVQ_TMR_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_EVQ_TMR;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_evq(
+ __in efx_nic_t *enp,
+ __in unsigned int instance,
+ __in efsys_mem_t *esmp,
+ __in size_t nevs,
+ __in uint32_t irq,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in boolean_t low_latency)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[
+ MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_OUT_LEN)];
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ boolean_t interrupting;
+ int ev_cut_through;
+ efx_rc_t rc;
+
+ npages = EFX_EVQ_NBUFS(nevs);
+ if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
+
+ interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+ /*
+ * On Huntington RX and TX event batching can only be requested together
+ * (even if the datapath firmware doesn't actually support RX
+ * batching). If event cut through is enabled no RX batching will occur.
+ *
+ * So always enable RX and TX event batching, and enable event cut
+ * through if we want low latency operation.
+ */
+ switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
+ case EFX_EVQ_FLAGS_TYPE_AUTO:
+ ev_cut_through = low_latency ? 1 : 0;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+ ev_cut_through = 0;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
+ ev_cut_through = 1;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+ MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
+ INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
+ INIT_EVQ_IN_FLAG_INT_ARMD, 0,
+ INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1);
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail3;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
+ }
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail4;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail5;
+ }
+
+ /* NOTE: ignore the returned IRQ param as firmware does not set it. */
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_evq_v2(
+ __in efx_nic_t *enp,
+ __in unsigned int instance,
+ __in efsys_mem_t *esmp,
+ __in size_t nevs,
+ __in uint32_t irq,
+ __in uint32_t us,
+ __in uint32_t flags)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[
+ MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_V2_OUT_LEN)];
+ boolean_t interrupting;
+ unsigned int evq_type;
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ efx_rc_t rc;
+
+ npages = EFX_EVQ_NBUFS(nevs);
+ if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
+
+ interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+ switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
+ case EFX_EVQ_FLAGS_TYPE_AUTO:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+ MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
+ INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
+ INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
+ INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
+ INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail3;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
+ }
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail4;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail5;
+ }
+
+ /* NOTE: ignore the returned IRQ param as firmware does not set it. */
+
+ EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
+ MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_evq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
+ MC_CMD_FINI_EVQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ /*
+ * EALREADY is not an error, but indicates that the MC has rebooted and
+ * that the EVQ has already been destroyed.
+ */
+ if (rc != EALREADY)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+ __checkReturn efx_rc_t
+ef10_ev_init(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+}
+
+ void
+ef10_ev_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t irq;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
+
+ if (!ISP2(ndescs) ||
+ (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (index >= encp->enc_evq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ /* Set up the handler table */
+ eep->ee_rx = ef10_ev_rx;
+ eep->ee_tx = ef10_ev_tx;
+ eep->ee_driver = ef10_ev_driver;
+ eep->ee_drv_gen = ef10_ev_drv_gen;
+ eep->ee_mcdi = ef10_ev_mcdi;
+
+ /* Set up the event queue */
+ /* INIT_EVQ expects function-relative vector number */
+ if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
+ irq = index;
+ } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
+ irq = index;
+ flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
+ } else {
+ irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
+ }
+
+ /*
+ * Interrupts may be raised for events immediately after the queue is
+ * created. See bug58606.
+ */
+
+ if (encp->enc_init_evq_v2_supported) {
+ /*
+ * On Medford the low latency license is required to enable RX
+ * and event cut through and to disable RX batching. If event
+ * queue type in flags is auto, we let the firmware decide the
+ * settings to use. If the adapter has a low latency license,
+ * it will choose the best settings for low latency, otherwise
+ * it will choose the best settings for throughput.
+ */
+ rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us,
+ flags);
+ if (rc != 0)
+ goto fail4;
+ } else {
+ /*
+ * On Huntington we need to specify the settings to use.
+ * If event queue type in flags is auto, we favour throughput
+ * if the adapter is running virtualization supporting firmware
+ * (i.e. the full featured firmware variant)
+ * and latency otherwise. The Ethernet Virtual Bridging
+ * capability is used to make this decision. (Note though that
+ * the low latency firmware variant is also best for
+ * throughput and corresponding type should be specified
+ * to choose it.)
+ */
+ boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
+ rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
+ low_latency);
+ if (rc != 0)
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ (void) efx_mcdi_fini_evq(enp, eep->ee_index);
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t rptr;
+ efx_dword_t dword;
+
+ rptr = count & eep->ee_mask;
+
+ if (enp->en_nic_cfg.enc_bug35388_workaround) {
+ EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
+ (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
+ EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
+ (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
+
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
+ ERF_DD_EVQ_IND_RPTR,
+ (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
+ EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ &dword, B_FALSE);
+
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
+ ERF_DD_EVQ_IND_RPTR,
+ rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
+ EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ &dword, B_FALSE);
+ } else {
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
+ EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
+ &dword, B_FALSE);
+ }
+
+ return (0);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_driver_event(
+ __in efx_nic_t *enp,
+ __in uint32_t evq,
+ __in efx_qword_t data)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
+ MC_CMD_DRIVER_EVENT_OUT_LEN)];
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_DRIVER_EVENT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
+
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
+ EFX_QWORD_FIELD(data, EFX_DWORD_0));
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
+ EFX_QWORD_FIELD(data, EFX_DWORD_1));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_3(event,
+ ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
+ ESF_DZ_DRV_SUB_CODE, 0,
+ ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
+
+ (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_dword_t dword;
+ uint32_t mode;
+ efx_rc_t rc;
+
+ /* Check that hardware and MCDI use the same timer MODE values */
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ mode = FFE_CZ_TIMER_MODE_DIS;
+ } else {
+ mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
+ }
+
+ if (encp->enc_bug61265_workaround) {
+ uint32_t ns = us * 1000;
+
+ rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
+ if (rc != 0)
+ goto fail2;
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail3;
+
+ if (encp->enc_bug35388_workaround) {
+ EFX_POPULATE_DWORD_3(dword,
+ ERF_DD_EVQ_IND_TIMER_FLAGS,
+ EFE_DD_EVQ_IND_TIMER_FLAGS,
+ ERF_DD_EVQ_IND_TIMER_MODE, mode,
+ ERF_DD_EVQ_IND_TIMER_VAL, ticks);
+ EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
+ eep->ee_index, &dword, 0);
+ } else {
+ /*
+ * NOTE: The TMR_REL field introduced in Medford2 is
+ * ignored on earlier EF10 controllers. See bug66418
+ * comment 9 for details.
+ */
+ EFX_POPULATE_DWORD_3(dword,
+ ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, ticks,
+ ERF_FZ_TC_TMR_REL_VAL, ticks);
+ EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
+ eep->ee_index, &dword, 0);
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+ eep->ee_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+
+static __checkReturn boolean_t
+ef10_ev_rx_packed_stream(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t label;
+ uint32_t pkt_count_lbits;
+ uint16_t flags;
+ boolean_t should_abort;
+ efx_evq_rxq_state_t *eersp;
+ unsigned int pkt_count;
+ unsigned int current_id;
+ boolean_t new_buffer;
+
+ pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
+ new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
+
+ flags = 0;
+
+ eersp = &eep->ee_rxq_state[label];
+
+ /*
+ * RX_DSC_PTR_LBITS has least significant bits of the global
+ * (not per-buffer) packet counter. It is guaranteed that
+ * maximum number of completed packets fits in lbits-mask.
+ * So, modulo lbits-mask arithmetic should be used to calculate
+ * packet counter increment.
+ */
+ pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ eersp->eers_rx_stream_npackets += pkt_count;
+
+ if (new_buffer) {
+ flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ /*
+ * If both packed stream and equal stride super-buffer
+ * modes are compiled in, in theory credits should be
+ * be maintained for packed stream only, but right now
+ * these modes are not distinguished in the event queue
+ * Rx queue state and it is OK to increment the counter
+ * regardless (it might be event cheaper than branching
+ * since neighbour structure member are updated as well).
+ */
+ eersp->eers_rx_packed_stream_credits++;
+#endif
+ eersp->eers_rx_read_ptr++;
+ }
+ current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
+
+ /* Check for errors that invalidate checksum and L3/L4 fields */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
+ /* RX frame truncated */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
+ /* Bad Ethernet frame CRC */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
+ flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
+ goto deliver;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+
+deliver:
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
+ should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
+ flags);
+
+ return (should_abort);
+}
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
+
+static __checkReturn boolean_t
+ef10_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t size;
+ uint32_t label;
+ uint32_t mac_class;
+ uint32_t eth_tag_class;
+ uint32_t l3_class;
+ uint32_t l4_class;
+ uint32_t next_read_lbits;
+ uint16_t flags;
+ boolean_t cont;
+ boolean_t should_abort;
+ efx_evq_rxq_state_t *eersp;
+ unsigned int desc_count;
+ unsigned int last_used_id;
+
+ EFX_EV_QSTAT_INCR(eep, EV_RX);
+
+ /* Discard events after RXQ/TXQ errors */
+ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+ return (B_FALSE);
+
+ /* Basic packet information */
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
+ eersp = &eep->ee_rxq_state[label];
+
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+ /*
+ * Packed stream events are very different,
+ * so handle them separately
+ */
+ if (eersp->eers_rx_packed_stream)
+ return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
+#endif
+
+ size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
+ cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
+ next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
+ eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
+ mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
+ l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
+
+ /*
+ * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
+ * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
+ * and values for all EF10 controllers.
+ */
+ EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
+
+ l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
+ /* Drop this event */
+ return (B_FALSE);
+ }
+ flags = 0;
+
+ if (cont != 0) {
+ /*
+ * This may be part of a scattered frame, or it may be a
+ * truncated frame if scatter is disabled on this RXQ.
+ * Overlength frames can be received if e.g. a VF is configured
+ * for 1500 MTU but connected to a port set to 9000 MTU
+ * (see bug56567).
+ * FIXME: There is not yet any driver that supports scatter on
+ * Huntington. Scatter support is required for OSX.
+ */
+ flags |= EFX_PKT_CONT;
+ }
+
+ if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
+ flags |= EFX_PKT_UNICAST;
+
+ /* Increment the count of descriptors read */
+ desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ eersp->eers_rx_read_ptr += desc_count;
+
+ /*
+ * FIXME: add error checking to make sure this a batched event.
+ * This could also be an aborted scatter, see Bug36629.
+ */
+ if (desc_count > 1) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
+ flags |= EFX_PKT_PREFIX_LEN;
+ }
+
+ /* Calculate the index of the last descriptor consumed */
+ last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
+
+ /* Check for errors that invalidate checksum and L3/L4 fields */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
+ /* RX frame truncated */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
+ /* Bad Ethernet frame CRC */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
+ /*
+ * Hardware parse failed, due to malformed headers
+ * or headers that are too long for the parser.
+ * Headers and checksums must be validated by the host.
+ */
+ /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
+ goto deliver;
+ }
+
+ if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
+ (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
+ flags |= EFX_PKT_VLAN_TAGGED;
+ }
+
+ switch (l3_class) {
+ case ESE_DZ_L3_CLASS_IP4:
+ case ESE_DZ_L3_CLASS_IP4_FRAG:
+ flags |= EFX_PKT_IPV4;
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+ } else {
+ flags |= EFX_CKSUM_IPV4;
+ }
+
+ /*
+ * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
+ * only 2 bits wide on Medford2. Check it is safe to use the
+ * Medford2 field and values for all EF10 controllers.
+ */
+ EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
+ ESF_DE_RX_L4_CLASS_LBN);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
+ ESE_DE_L4_CLASS_UNKNOWN);
+
+ if (l4_class == ESE_FZ_L4_CLASS_TCP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
+ flags |= EFX_PKT_TCP;
+ } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
+ flags |= EFX_PKT_UDP;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
+ }
+ break;
+
+ case ESE_DZ_L3_CLASS_IP6:
+ case ESE_DZ_L3_CLASS_IP6_FRAG:
+ flags |= EFX_PKT_IPV6;
+
+ /*
+ * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
+ * only 2 bits wide on Medford2. Check it is safe to use the
+ * Medford2 field and values for all EF10 controllers.
+ */
+ EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
+ ESF_DE_RX_L4_CLASS_LBN);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
+ ESE_DE_L4_CLASS_UNKNOWN);
+
+ if (l4_class == ESE_FZ_L4_CLASS_TCP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
+ flags |= EFX_PKT_TCP;
+ } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
+ flags |= EFX_PKT_UDP;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
+ }
+ break;
+
+ default:
+ EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
+ break;
+ }
+
+ if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+ } else {
+ flags |= EFX_CKSUM_TCPUDP;
+ }
+ }
+
+deliver:
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ EFSYS_ASSERT(eecp->eec_rx != NULL);
+ should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t id;
+ uint32_t label;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX);
+
+ /* Discard events after RXQ/TXQ errors */
+ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+ return (B_FALSE);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
+ /* Drop this event */
+ return (B_FALSE);
+ }
+
+ /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
+
+ EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
+
+ EFSYS_ASSERT(eecp->eec_tx != NULL);
+ should_abort = eecp->eec_tx(arg, label, id);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ unsigned int code;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
+ should_abort = B_FALSE;
+
+ code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
+ switch (code) {
+ case ESE_DZ_DRV_TIMER_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
+
+ EFSYS_ASSERT(eecp->eec_timer != NULL);
+ should_abort = eecp->eec_timer(arg, id);
+ break;
+ }
+
+ case ESE_DZ_DRV_WAKE_UP_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
+
+ EFSYS_ASSERT(eecp->eec_wake_up != NULL);
+ should_abort = eecp->eec_wake_up(arg, id);
+ break;
+ }
+
+ case ESE_DZ_DRV_START_UP_EV:
+ EFSYS_ASSERT(eecp->eec_initialized != NULL);
+ should_abort = eecp->eec_initialized(arg);
+ break;
+
+ default:
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ break;
+ }
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t data;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
+ should_abort = B_FALSE;
+
+ data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
+ if (data >= ((uint32_t)1 << 16)) {
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ return (B_TRUE);
+ }
+
+ EFSYS_ASSERT(eecp->eec_software != NULL);
+ should_abort = eecp->eec_software(arg, (uint16_t)data);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned int code;
+ boolean_t should_abort = B_FALSE;
+
+ EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
+
+ code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ efx_mcdi_ev_death(enp, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(enp,
+ MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
+ MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
+ MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
+ break;
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ case MCDI_EVENT_CODE_PROXY_RESPONSE:
+ /*
+ * This event notifies a function that an authorization request
+ * has been processed. If the request was authorized then the
+ * function can now re-send the original MCDI request.
+ * See SF-113652-SW "SR-IOV Proxied Network Access Control".
+ */
+ efx_mcdi_ev_proxy_response(enp,
+ MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
+ MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
+ break;
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+ case MCDI_EVENT_CODE_LINKCHANGE: {
+ efx_link_mode_t link_mode;
+
+ ef10_phy_link_ev(enp, eqp, &link_mode);
+ should_abort = eecp->eec_link_change(arg, link_mode);
+ break;
+ }
+
+ case MCDI_EVENT_CODE_SENSOREVT: {
+#if EFSYS_OPT_MON_STATS
+ efx_mon_stat_t id;
+ efx_mon_stat_value_t value;
+ efx_rc_t rc;
+
+ /* Decode monitor stat for MCDI sensor (if supported) */
+ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
+ /* Report monitor stat change */
+ should_abort = eecp->eec_monitor(arg, id, value);
+ } else if (rc == ENOTSUP) {
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_SENSOREVT,
+ MCDI_EV_FIELD(eqp, DATA));
+ } else {
+ EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
+ }
+#endif
+ break;
+ }
+
+ case MCDI_EVENT_CODE_SCHEDERR:
+ /* Informational only */
+ break;
+
+ case MCDI_EVENT_CODE_REBOOT:
+ /* Falcon/Siena only (should not been seen with Huntington). */
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MC_REBOOT:
+ /* MC_REBOOT event is used for Huntington (EF10) and later. */
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+#if EFSYS_OPT_MAC_STATS
+ if (eecp->eec_mac_stats != NULL) {
+ eecp->eec_mac_stats(arg,
+ MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
+ }
+#endif
+ break;
+
+ case MCDI_EVENT_CODE_FWALERT: {
+ uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
+
+ if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_FWALERT_SRAM,
+ MCDI_EV_FIELD(eqp, FWALERT_DATA));
+ else
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_FWALERT,
+ MCDI_EV_FIELD(eqp, DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_TX_ERR: {
+ /*
+ * After a TXQ error is detected, firmware sends a TX_ERR event.
+ * This may be followed by TX completions (which we discard),
+ * and then finally by a TX_FLUSH event. Firmware destroys the
+ * TXQ automatically after sending the TX_FLUSH event.
+ */
+ enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
+
+ EFSYS_PROBE2(tx_descq_err,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ /* Inform the driver that a reset is required. */
+ eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
+ MCDI_EV_FIELD(eqp, TX_ERR_DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_TX_FLUSH: {
+ uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
+
+ /*
+ * EF10 firmware sends two TX_FLUSH events: one to the txq's
+ * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
+ * We want to wait for all completions, so ignore the events
+ * with TX_FLUSH_TO_DRIVER.
+ */
+ if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
+ should_abort = B_FALSE;
+ break;
+ }
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
+
+ EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
+ should_abort = eecp->eec_txq_flush_done(arg, txq_index);
+ break;
+ }
+
+ case MCDI_EVENT_CODE_RX_ERR: {
+ /*
+ * After an RXQ error is detected, firmware sends an RX_ERR
+ * event. This may be followed by RX events (which we discard),
+ * and then finally by an RX_FLUSH event. Firmware destroys the
+ * RXQ automatically after sending the RX_FLUSH event.
+ */
+ enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
+
+ EFSYS_PROBE2(rx_descq_err,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ /* Inform the driver that a reset is required. */
+ eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
+ MCDI_EV_FIELD(eqp, RX_ERR_DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_RX_FLUSH: {
+ uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
+
+ /*
+ * EF10 firmware sends two RX_FLUSH events: one to the rxq's
+ * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
+ * We want to wait for all completions, so ignore the events
+ * with RX_FLUSH_TO_DRIVER.
+ */
+ if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
+ should_abort = B_FALSE;
+ break;
+ }
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
+
+ EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
+ should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
+ break;
+ }
+
+ default:
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ break;
+ }
+
+ return (should_abort);
+}
+
+ void
+ef10_ev_rxlabel_init(
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp,
+ __in unsigned int label,
+ __in efx_rxq_type_t type)
+{
+ efx_evq_rxq_state_t *eersp;
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+ boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
+ boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
+#endif
+
+ _NOTE(ARGUNUSED(type))
+ EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+ eersp = &eep->ee_rxq_state[label];
+
+ EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ /*
+ * For packed stream modes, the very first event will
+ * have a new buffer flag set, so it will be incremented,
+ * yielding the correct pointer. That results in a simpler
+ * code than trying to detect start-of-the-world condition
+ * in the event handler.
+ */
+ eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
+#else
+ eersp->eers_rx_read_ptr = 0;
+#endif
+ eersp->eers_rx_mask = erp->er_mask;
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+ eersp->eers_rx_stream_npackets = 0;
+ eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
+#endif
+#if EFSYS_OPT_RX_PACKED_STREAM
+ if (packed_stream) {
+ eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
+ EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
+ EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
+ EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
+ /*
+ * A single credit is allocated to the queue when it is started.
+ * It is immediately spent by the first packet which has NEW
+ * BUFFER flag set, though, but still we shall take into
+ * account, as to not wrap around the maximum number of credits
+ * accidentally
+ */
+ eersp->eers_rx_packed_stream_credits--;
+ EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
+ EFX_RX_PACKED_STREAM_MAX_CREDITS);
+ }
+#endif
+}
+
+ void
+ef10_ev_rxlabel_fini(
+ __in efx_evq_t *eep,
+ __in unsigned int label)
+{
+ efx_evq_rxq_state_t *eersp;
+
+ EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+ eersp = &eep->ee_rxq_state[label];
+
+ EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
+
+ eersp->eers_rx_read_ptr = 0;
+ eersp->eers_rx_mask = 0;
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+ eersp->eers_rx_stream_npackets = 0;
+ eersp->eers_rx_packed_stream = B_FALSE;
+#endif
+#if EFSYS_OPT_RX_PACKED_STREAM
+ eersp->eers_rx_packed_stream_credits = 0;
+#endif
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_filter.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_filter.c
new file mode 100644
index 00000000..ae872853
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_filter.c
@@ -0,0 +1,1750 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+#if EFSYS_OPT_FILTER
+
+#define EFE_SPEC(eftp, index) ((eftp)->eft_entry[(index)].efe_spec)
+
+static efx_filter_spec_t *
+ef10_filter_entry_spec(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ return ((efx_filter_spec_t *)(EFE_SPEC(eftp, index) &
+ ~(uintptr_t)EFX_EF10_FILTER_FLAGS));
+}
+
+static boolean_t
+ef10_filter_entry_is_busy(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_BUSY)
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+static boolean_t
+ef10_filter_entry_is_auto_old(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_AUTO_OLD)
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+static void
+ef10_filter_set_entry(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index,
+ __in_opt const efx_filter_spec_t *efsp)
+{
+ EFE_SPEC(eftp, index) = (uintptr_t)efsp;
+}
+
+static void
+ef10_filter_set_entry_busy(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;
+}
+
+static void
+ef10_filter_set_entry_not_busy(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;
+}
+
+static void
+ef10_filter_set_entry_auto_old(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);
+ EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;
+}
+
+static void
+ef10_filter_set_entry_not_auto_old(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_init(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *eftp;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+#define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match))
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_VNI_OR_VSID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_LOC_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_UNKNOWN_MCAST_DST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST));
+ EFX_STATIC_ASSERT((uint32_t)EFX_FILTER_MATCH_UNKNOWN_UCAST_DST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST));
+#undef MATCH_MASK
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp);
+
+ if (!eftp) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_filter.ef_ef10_filter_table = eftp;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_filter_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ if (enp->en_filter.ef_ef10_filter_table != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t),
+ enp->en_filter.ef_ef10_filter_table);
+ }
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_filter_op_add(
+ __in efx_nic_t *enp,
+ __in efx_filter_spec_t *spec,
+ __in unsigned int filter_op,
+ __inout ef10_filter_handle_t *handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_V3_IN_LEN,
+ MC_CMD_FILTER_OP_EXT_OUT_LEN)];
+ efx_filter_match_flags_t match_flags;
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FILTER_OP;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FILTER_OP_V3_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN;
+
+ /*
+ * Remove match flag for encapsulated filters that does not correspond
+ * to the MCDI match flags
+ */
+ match_flags = spec->efs_match_flags & ~EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ switch (filter_op) {
+ case MC_CMD_FILTER_OP_IN_OP_REPLACE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO,
+ handle->efh_lo);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI,
+ handle->efh_hi);
+ /* Fall through */
+ case MC_CMD_FILTER_OP_IN_OP_INSERT:
+ case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, filter_op);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_MATCH_FIELDS,
+ match_flags);
+ if (spec->efs_dmaq_id == EFX_FILTER_SPEC_RX_DMAQ_ID_DROP) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST,
+ MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP);
+ } else {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST,
+ MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE,
+ spec->efs_dmaq_id);
+ }
+
+#if EFSYS_OPT_RX_SCALE
+ if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) {
+ uint32_t rss_context;
+
+ if (spec->efs_rss_context == EFX_RSS_CONTEXT_DEFAULT)
+ rss_context = enp->en_rss_context;
+ else
+ rss_context = spec->efs_rss_context;
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_CONTEXT,
+ rss_context);
+ }
+#endif
+
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_MODE,
+ spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ?
+ MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_TX_DEST,
+ MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT);
+
+ if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) {
+ /*
+ * NOTE: Unlike most MCDI requests, the filter fields
+ * are presented in network (big endian) byte order.
+ */
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_MAC),
+ spec->efs_rem_mac, EFX_MAC_ADDR_LEN);
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_MAC),
+ spec->efs_loc_mac, EFX_MAC_ADDR_LEN);
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_SRC_PORT,
+ __CPU_TO_BE_16(spec->efs_rem_port));
+ MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_DST_PORT,
+ __CPU_TO_BE_16(spec->efs_loc_port));
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_ETHER_TYPE,
+ __CPU_TO_BE_16(spec->efs_ether_type));
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_INNER_VLAN,
+ __CPU_TO_BE_16(spec->efs_inner_vid));
+ MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_OUTER_VLAN,
+ __CPU_TO_BE_16(spec->efs_outer_vid));
+
+ /* IP protocol (in low byte, high byte is zero) */
+ MCDI_IN_SET_BYTE(req, FILTER_OP_EXT_IN_IP_PROTO,
+ spec->efs_ip_proto);
+
+ EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) ==
+ MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN);
+ EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) ==
+ MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN);
+
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_IP),
+ &spec->efs_rem_host.eo_byte[0],
+ MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN);
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_IP),
+ &spec->efs_loc_host.eo_byte[0],
+ MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN);
+
+ /*
+ * On Medford, filters for encapsulated packets match based on
+ * the ether type and IP protocol in the outer frame. In
+ * addition we need to fill in the VNI or VSID type field.
+ */
+ switch (spec->efs_encap_type) {
+ case EFX_TUNNEL_PROTOCOL_NONE:
+ break;
+ case EFX_TUNNEL_PROTOCOL_VXLAN:
+ case EFX_TUNNEL_PROTOCOL_GENEVE:
+ MCDI_IN_POPULATE_DWORD_1(req,
+ FILTER_OP_EXT_IN_VNI_OR_VSID,
+ FILTER_OP_EXT_IN_VNI_TYPE,
+ spec->efs_encap_type == EFX_TUNNEL_PROTOCOL_VXLAN ?
+ MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN :
+ MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE);
+ break;
+ case EFX_TUNNEL_PROTOCOL_NVGRE:
+ MCDI_IN_POPULATE_DWORD_1(req,
+ FILTER_OP_EXT_IN_VNI_OR_VSID,
+ FILTER_OP_EXT_IN_VSID_TYPE,
+ MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_VNI_OR_VSID),
+ spec->efs_vni_or_vsid, EFX_VNI_OR_VSID_LEN);
+
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_IFRM_DST_MAC),
+ spec->efs_ifrm_loc_mac, EFX_MAC_ADDR_LEN);
+ }
+
+ /*
+ * Set the "MARK" or "FLAG" action for all packets matching this filter
+ * if necessary (only useful with equal stride packed stream Rx mode
+ * which provide the information in pseudo-header).
+ * These actions require MC_CMD_FILTER_OP_V3_IN msgrequest.
+ */
+ if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) &&
+ (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION,
+ MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_MARK_VALUE,
+ spec->efs_mark);
+ } else if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION,
+ MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail4;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail5;
+ }
+
+ handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_LO);
+ handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_HI);
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_filter_op_delete(
+ __in efx_nic_t *enp,
+ __in unsigned int filter_op,
+ __inout ef10_filter_handle_t *handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN,
+ MC_CMD_FILTER_OP_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FILTER_OP;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN;
+
+ switch (filter_op) {
+ case MC_CMD_FILTER_OP_IN_OP_REMOVE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE);
+ break;
+ case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO, handle->efh_lo);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI, handle->efh_hi);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn boolean_t
+ef10_filter_equal(
+ __in const efx_filter_spec_t *left,
+ __in const efx_filter_spec_t *right)
+{
+ /* FIXME: Consider rx vs tx filters (look at efs_flags) */
+ if (left->efs_match_flags != right->efs_match_flags)
+ return (B_FALSE);
+ if (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host))
+ return (B_FALSE);
+ if (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host))
+ return (B_FALSE);
+ if (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
+ if (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
+ if (left->efs_rem_port != right->efs_rem_port)
+ return (B_FALSE);
+ if (left->efs_loc_port != right->efs_loc_port)
+ return (B_FALSE);
+ if (left->efs_inner_vid != right->efs_inner_vid)
+ return (B_FALSE);
+ if (left->efs_outer_vid != right->efs_outer_vid)
+ return (B_FALSE);
+ if (left->efs_ether_type != right->efs_ether_type)
+ return (B_FALSE);
+ if (left->efs_ip_proto != right->efs_ip_proto)
+ return (B_FALSE);
+ if (left->efs_encap_type != right->efs_encap_type)
+ return (B_FALSE);
+ if (memcmp(left->efs_vni_or_vsid, right->efs_vni_or_vsid,
+ EFX_VNI_OR_VSID_LEN))
+ return (B_FALSE);
+ if (memcmp(left->efs_ifrm_loc_mac, right->efs_ifrm_loc_mac,
+ EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
+
+ return (B_TRUE);
+
+}
+
+static __checkReturn boolean_t
+ef10_filter_same_dest(
+ __in const efx_filter_spec_t *left,
+ __in const efx_filter_spec_t *right)
+{
+ if ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&
+ (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) {
+ if (left->efs_rss_context == right->efs_rss_context)
+ return (B_TRUE);
+ } else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) &&
+ (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) {
+ if (left->efs_dmaq_id == right->efs_dmaq_id)
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+
+static __checkReturn uint32_t
+ef10_filter_hash(
+ __in efx_filter_spec_t *spec)
+{
+ EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t))
+ == 0);
+ EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) %
+ sizeof (uint32_t)) == 0);
+
+ /*
+ * As the area of the efx_filter_spec_t we need to hash is DWORD
+ * aligned and an exact number of DWORDs in size we can use the
+ * optimised efx_hash_dwords() rather than efx_hash_bytes()
+ */
+ return (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid,
+ (sizeof (efx_filter_spec_t) -
+ EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) /
+ sizeof (uint32_t), 0));
+}
+
+/*
+ * Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static __checkReturn boolean_t
+ef10_filter_is_exclusive(
+ __in efx_filter_spec_t *spec)
+{
+ if ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) &&
+ !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac))
+ return (B_TRUE);
+
+ if ((spec->efs_match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) &&
+ ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe))
+ return (B_TRUE);
+ if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) &&
+ (spec->efs_loc_host.eo_u8[0] != 0xff))
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_restore(
+ __in efx_nic_t *enp)
+{
+ int tbl_id;
+ efx_filter_spec_t *spec;
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ boolean_t restoring;
+ efsys_lock_state_t state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) {
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ spec = ef10_filter_entry_spec(eftp, tbl_id);
+ if (spec == NULL) {
+ restoring = B_FALSE;
+ } else if (ef10_filter_entry_is_busy(eftp, tbl_id)) {
+ /* Ignore busy entries. */
+ restoring = B_FALSE;
+ } else {
+ ef10_filter_set_entry_busy(eftp, tbl_id);
+ restoring = B_TRUE;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (restoring == B_FALSE)
+ continue;
+
+ if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_INSERT,
+ &eftp->eft_entry[tbl_id].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,
+ &eftp->eft_entry[tbl_id].efe_handle);
+ }
+
+ if (rc != 0)
+ goto fail1;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ ef10_filter_set_entry_not_busy(eftp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * An arbitrary search limit for the software hash table. As per the linux net
+ * driver.
+ */
+#define EF10_FILTER_SEARCH_LIMIT 200
+
+static __checkReturn efx_rc_t
+ef10_filter_add_internal(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace,
+ __out_opt uint32_t *filter_id)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *saved_spec;
+ uint32_t hash;
+ unsigned int depth;
+ int ins_index;
+ boolean_t replacing = B_FALSE;
+ unsigned int i;
+ efsys_lock_state_t state;
+ boolean_t locked = B_FALSE;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ hash = ef10_filter_hash(spec);
+
+ /*
+ * FIXME: Add support for inserting filters of different priorities
+ * and removing lower priority multicast filters (bug 42378)
+ */
+
+ /*
+ * Find any existing filters with the same match tuple or
+ * else a free slot to insert at. If any of them are busy,
+ * we have to wait and retry.
+ */
+ for (;;) {
+ ins_index = -1;
+ depth = 1;
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ for (;;) {
+ i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);
+ saved_spec = ef10_filter_entry_spec(eftp, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0) {
+ ins_index = i;
+ }
+ } else if (ef10_filter_equal(spec, saved_spec)) {
+ if (ef10_filter_entry_is_busy(eftp, i))
+ break;
+ if (saved_spec->efs_priority
+ == EFX_FILTER_PRI_AUTO) {
+ ins_index = i;
+ goto found;
+ } else if (ef10_filter_is_exclusive(spec)) {
+ if (may_replace) {
+ ins_index = i;
+ goto found;
+ } else {
+ rc = EEXIST;
+ goto fail1;
+ }
+ }
+
+ /* Leave existing */
+ }
+
+ /*
+ * Once we reach the maximum search depth, use
+ * the first suitable slot or return EBUSY if
+ * there was none.
+ */
+ if (depth == EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = EBUSY;
+ goto fail2;
+ }
+ goto found;
+ }
+ depth++;
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+ }
+
+found:
+ /*
+ * Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = ef10_filter_entry_spec(eftp, ins_index);
+ if (saved_spec) {
+ if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) {
+ /* This is a filter we are refreshing */
+ ef10_filter_set_entry_not_auto_old(eftp, ins_index);
+ goto out_unlock;
+
+ }
+ replacing = B_TRUE;
+ } else {
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), saved_spec);
+ if (!saved_spec) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+ *saved_spec = *spec;
+ ef10_filter_set_entry(eftp, ins_index, saved_spec);
+ }
+ ef10_filter_set_entry_busy(eftp, ins_index);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ /*
+ * On replacing the filter handle may change after after a successful
+ * replace operation.
+ */
+ if (replacing) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE,
+ &eftp->eft_entry[ins_index].efe_handle);
+ } else if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_INSERT,
+ &eftp->eft_entry[ins_index].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,
+ &eftp->eft_entry[ins_index].efe_handle);
+ }
+
+ if (rc != 0)
+ goto fail4;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ if (replacing) {
+ /* Update the fields that may differ */
+ saved_spec->efs_priority = spec->efs_priority;
+ saved_spec->efs_flags = spec->efs_flags;
+ saved_spec->efs_rss_context = spec->efs_rss_context;
+ saved_spec->efs_dmaq_id = spec->efs_dmaq_id;
+ }
+
+ ef10_filter_set_entry_not_busy(eftp, ins_index);
+
+out_unlock:
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ if (filter_id)
+ *filter_id = ins_index;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+ if (!replacing) {
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), saved_spec);
+ saved_spec = NULL;
+ }
+ ef10_filter_set_entry_not_busy(eftp, ins_index);
+ ef10_filter_set_entry(eftp, ins_index, NULL);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ if (locked)
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace)
+{
+ efx_rc_t rc;
+
+ rc = ef10_filter_add_internal(enp, spec, may_replace, NULL);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+ef10_filter_delete_internal(
+ __in efx_nic_t *enp,
+ __in uint32_t filter_id)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *spec;
+ efsys_lock_state_t state;
+ uint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS;
+
+ /*
+ * Find the software table entry and mark it busy. Don't
+ * remove it yet; any attempt to update while we're waiting
+ * for the firmware must find the busy entry.
+ *
+ * FIXME: What if the busy flag is never cleared?
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ while (ef10_filter_entry_is_busy(table, filter_idx)) {
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_SPIN(1);
+ EFSYS_LOCK(enp->en_eslp, state);
+ }
+ if ((spec = ef10_filter_entry_spec(table, filter_idx)) != NULL) {
+ ef10_filter_set_entry_busy(table, filter_idx);
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (spec == NULL) {
+ rc = ENOENT;
+ goto fail1;
+ }
+
+ /*
+ * Try to remove the hardware filter. This may fail if the MC has
+ * rebooted (which frees all hardware filter resources).
+ */
+ if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_delete(enp,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE,
+ &table->eft_entry[filter_idx].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_delete(enp,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE,
+ &table->eft_entry[filter_idx].efe_handle);
+ }
+
+ /* Free the software table entry */
+ EFSYS_LOCK(enp->en_eslp, state);
+ ef10_filter_set_entry_not_busy(table, filter_idx);
+ ef10_filter_set_entry(table, filter_idx, NULL);
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec);
+
+ /* Check result of hardware filter removal */
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *saved_spec;
+ unsigned int hash;
+ unsigned int depth;
+ unsigned int i;
+ efsys_lock_state_t state;
+ boolean_t locked = B_FALSE;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ hash = ef10_filter_hash(spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ depth = 1;
+ for (;;) {
+ i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);
+ saved_spec = ef10_filter_entry_spec(table, i);
+ if (saved_spec && ef10_filter_equal(spec, saved_spec) &&
+ ef10_filter_same_dest(spec, saved_spec)) {
+ break;
+ }
+ if (depth == EF10_FILTER_SEARCH_LIMIT) {
+ rc = ENOENT;
+ goto fail1;
+ }
+ depth++;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ rc = ef10_filter_delete_internal(enp, i);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ if (locked)
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_parser_disp_info(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __in boolean_t encap,
+ __out size_t *list_lengthp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN,
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)];
+ size_t matches_count;
+ size_t list_size;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, encap ?
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ matches_count = MCDI_OUT_DWORD(req,
+ GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES);
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(matches_count)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *list_lengthp = matches_count;
+
+ if (buffer_length < matches_count) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ /*
+ * Check that the elements in the list in the MCDI response are the size
+ * we expect, so we can just copy them directly. Any conversion of the
+ * flags is handled by the caller.
+ */
+ EFX_STATIC_ASSERT(sizeof (uint32_t) ==
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN);
+
+ list_size = matches_count *
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN;
+ memcpy(buffer,
+ MCDI_OUT2(req, uint32_t,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES),
+ list_size);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ size_t mcdi_list_length;
+ size_t mcdi_encap_list_length;
+ size_t list_length;
+ uint32_t i;
+ uint32_t next_buf_idx;
+ size_t next_buf_length;
+ efx_rc_t rc;
+ boolean_t no_space = B_FALSE;
+ efx_filter_match_flags_t all_filter_flags =
+ (EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST |
+ EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT |
+ EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_INNER_VID |
+ EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_VNI_OR_VSID |
+ EFX_FILTER_MATCH_IFRM_LOC_MAC |
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST |
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST |
+ EFX_FILTER_MATCH_ENCAP_TYPE |
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST |
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST);
+
+ /*
+ * Two calls to MC_CMD_GET_PARSER_DISP_INFO are needed: one to get the
+ * list of supported filters for ordinary packets, and then another to
+ * get the list of supported filters for encapsulated packets. To
+ * distinguish the second list from the first, the
+ * EFX_FILTER_MATCH_ENCAP_TYPE flag is added to each filter for
+ * encapsulated packets.
+ */
+ rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length, B_FALSE,
+ &mcdi_list_length);
+ if (rc != 0) {
+ if (rc == ENOSPC)
+ no_space = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if (no_space) {
+ next_buf_idx = 0;
+ next_buf_length = 0;
+ } else {
+ EFSYS_ASSERT(mcdi_list_length <= buffer_length);
+ next_buf_idx = mcdi_list_length;
+ next_buf_length = buffer_length - mcdi_list_length;
+ }
+
+ if (encp->enc_tunnel_encapsulations_supported != 0) {
+ rc = efx_mcdi_get_parser_disp_info(enp, &buffer[next_buf_idx],
+ next_buf_length, B_TRUE, &mcdi_encap_list_length);
+ if (rc != 0) {
+ if (rc == ENOSPC)
+ no_space = B_TRUE;
+ else
+ goto fail2;
+ } else {
+ for (i = next_buf_idx;
+ i < next_buf_idx + mcdi_encap_list_length; i++)
+ buffer[i] |= EFX_FILTER_MATCH_ENCAP_TYPE;
+ }
+ } else {
+ mcdi_encap_list_length = 0;
+ }
+
+ if (no_space) {
+ *list_lengthp = mcdi_list_length + mcdi_encap_list_length;
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ /*
+ * The static assertions in ef10_filter_init() ensure that the values of
+ * the EFX_FILTER_MATCH flags match those used by MCDI, so they don't
+ * need to be converted.
+ *
+ * In case support is added to MCDI for additional flags, remove any
+ * matches from the list which include flags we don't support. The order
+ * of the matches is preserved as they are ordered from highest to
+ * lowest priority.
+ */
+ EFSYS_ASSERT(mcdi_list_length + mcdi_encap_list_length <=
+ buffer_length);
+ list_length = 0;
+ for (i = 0; i < mcdi_list_length + mcdi_encap_list_length; i++) {
+ if ((buffer[i] & ~all_filter_flags) == 0) {
+ buffer[list_length] = buffer[i];
+ list_length++;
+ }
+ }
+
+ *list_lengthp = list_length;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_unicast(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *addr,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the filter for the local station address */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_unicst_filter_count++;
+ EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_all_unicast(
+ __in efx_nic_t *enp,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the unknown unicast filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ efx_filter_spec_set_uc_def(&spec);
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_unicst_filter_count++;
+ EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_multicast_list(
+ __in efx_nic_t *enp,
+ __in boolean_t mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count,
+ __in efx_filter_flags_t filter_flags,
+ __in boolean_t rollback)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ uint8_t addr[6];
+ uint32_t i;
+ uint32_t filter_index;
+ uint32_t filter_count;
+ efx_rc_t rc;
+
+ if (mulcst == B_FALSE)
+ count = 0;
+
+ if (count + (brdcst ? 1 : 0) >
+ EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) {
+ /* Too many MAC addresses */
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Insert/renew multicast address list filters */
+ filter_count = 0;
+ for (i = 0; i < count; i++) {
+ efx_filter_spec_init_rx(&spec,
+ EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+
+ efx_filter_spec_set_eth_local(&spec,
+ EFX_FILTER_SPEC_VID_UNSPEC,
+ &addrs[i * EFX_MAC_ADDR_LEN]);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &filter_index);
+
+ if (rc == 0) {
+ eftp->eft_mulcst_filter_indexes[filter_count] =
+ filter_index;
+ filter_count++;
+ } else if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ }
+
+ }
+
+ if (brdcst == B_TRUE) {
+ /* Insert/renew broadcast address filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+
+ EFX_MAC_BROADCAST_ADDR_SET(addr);
+ efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC,
+ addr);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &filter_index);
+
+ if (rc == 0) {
+ eftp->eft_mulcst_filter_indexes[filter_count] =
+ filter_index;
+ filter_count++;
+ } else if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ }
+ }
+
+ eftp->eft_mulcst_filter_count = filter_count;
+ eftp->eft_using_all_mulcst = B_FALSE;
+
+ return (0);
+
+rollback:
+ /* Remove any filters we have inserted */
+ i = filter_count;
+ while (i--) {
+ (void) ef10_filter_delete_internal(enp,
+ eftp->eft_mulcst_filter_indexes[i]);
+ }
+ eftp->eft_mulcst_filter_count = 0;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_all_multicast(
+ __in efx_nic_t *enp,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the unknown multicast filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ efx_filter_spec_set_mc_def(&spec);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_mulcst_filter_indexes[0]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_mulcst_filter_count = 1;
+ eftp->eft_using_all_mulcst = B_TRUE;
+
+ /*
+ * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic.
+ */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+typedef struct ef10_filter_encap_entry_s {
+ uint16_t ether_type;
+ efx_tunnel_protocol_t encap_type;
+ uint32_t inner_frame_match;
+} ef10_filter_encap_entry_t;
+
+#define EF10_ENCAP_FILTER_ENTRY(ipv, encap_type, inner_frame_match) \
+ { EFX_ETHER_TYPE_##ipv, EFX_TUNNEL_PROTOCOL_##encap_type, \
+ EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_##inner_frame_match }
+
+static ef10_filter_encap_entry_t ef10_filter_encap_list[] = {
+ EF10_ENCAP_FILTER_ENTRY(IPV4, VXLAN, UCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV4, VXLAN, MCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV6, VXLAN, UCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV6, VXLAN, MCAST_DST),
+
+ EF10_ENCAP_FILTER_ENTRY(IPV4, GENEVE, UCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV4, GENEVE, MCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV6, GENEVE, UCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV6, GENEVE, MCAST_DST),
+
+ EF10_ENCAP_FILTER_ENTRY(IPV4, NVGRE, UCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV4, NVGRE, MCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV6, NVGRE, UCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV6, NVGRE, MCAST_DST),
+};
+
+#undef EF10_ENCAP_FILTER_ENTRY
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_encap_filters(
+ __in efx_nic_t *enp,
+ __in boolean_t mulcst,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ uint32_t i;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(ef10_filter_encap_list) <=
+ EFX_ARRAY_SIZE(table->eft_encap_filter_indexes));
+
+ /*
+ * On Medford, full-featured firmware can identify packets as being
+ * tunnel encapsulated, even if no encapsulated packet offloads are in
+ * use. When packets are identified as such, ordinary filters are not
+ * applied, only ones specific to encapsulated packets. Hence we need to
+ * insert filters for encapsulated packets in order to receive them.
+ *
+ * Separate filters need to be inserted for each ether type,
+ * encapsulation type, and inner frame type (unicast or multicast). To
+ * keep things simple and reduce the number of filters needed, catch-all
+ * filters for all combinations of types are inserted, even if
+ * all_unicst or all_mulcst have not been set. (These catch-all filters
+ * may well, however, fail to insert on unprivileged functions.)
+ */
+ table->eft_encap_filter_count = 0;
+ for (i = 0; i < EFX_ARRAY_SIZE(ef10_filter_encap_list); i++) {
+ efx_filter_spec_t spec;
+ ef10_filter_encap_entry_t *encap_filter =
+ &ef10_filter_encap_list[i];
+
+ /*
+ * Skip multicast filters if we've not been asked for
+ * any multicast traffic.
+ */
+ if ((mulcst == B_FALSE) &&
+ (encap_filter->inner_frame_match ==
+ EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST))
+ continue;
+
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ table->eft_default_rxq);
+ efx_filter_spec_set_ether_type(&spec, encap_filter->ether_type);
+ rc = efx_filter_spec_set_encap_type(&spec,
+ encap_filter->encap_type,
+ encap_filter->inner_frame_match);
+ if (rc != 0)
+ goto fail1;
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &table->eft_encap_filter_indexes[
+ table->eft_encap_filter_count]);
+ if (rc != 0) {
+ if (rc != EACCES)
+ goto fail2;
+ } else {
+ table->eft_encap_filter_count++;
+ }
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+ef10_filter_remove_old(
+ __in efx_nic_t *enp)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ uint32_t i;
+
+ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ if (ef10_filter_entry_is_auto_old(table, i)) {
+ (void) ef10_filter_delete_internal(enp, i);
+ }
+ }
+}
+
+
+static __checkReturn efx_rc_t
+ef10_filter_get_workarounds(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ uint32_t implemented = 0;
+ uint32_t enabled = 0;
+ efx_rc_t rc;
+
+ rc = efx_mcdi_get_workarounds(enp, &implemented, &enabled);
+ if (rc == 0) {
+ /* Check if chained multicast filter support is enabled */
+ if (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807)
+ encp->enc_bug26807_workaround = B_TRUE;
+ else
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else if (rc == ENOTSUP) {
+ /*
+ * Firmware is too old to support GET_WORKAROUNDS, and support
+ * for this workaround was implemented later.
+ */
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+
+/*
+ * Reconfigure all filters.
+ * If all_unicst and/or all mulcst filters cannot be applied then
+ * return ENOTSUP (Note the filters for the specified addresses are
+ * still applied in this case).
+ */
+ __checkReturn efx_rc_t
+ef10_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_flags_t filter_flags;
+ unsigned int i;
+ efx_rc_t all_unicst_rc = 0;
+ efx_rc_t all_mulcst_rc = 0;
+ efx_rc_t rc;
+
+ if (table->eft_default_rxq == NULL) {
+ /*
+ * Filters direct traffic to the default RXQ, and so cannot be
+ * inserted until it is available. Any currently configured
+ * filters must be removed (ignore errors in case the MC
+ * has rebooted, which removes hardware filters).
+ */
+ for (i = 0; i < table->eft_unicst_filter_count; i++) {
+ (void) ef10_filter_delete_internal(enp,
+ table->eft_unicst_filter_indexes[i]);
+ }
+ table->eft_unicst_filter_count = 0;
+
+ for (i = 0; i < table->eft_mulcst_filter_count; i++) {
+ (void) ef10_filter_delete_internal(enp,
+ table->eft_mulcst_filter_indexes[i]);
+ }
+ table->eft_mulcst_filter_count = 0;
+
+ for (i = 0; i < table->eft_encap_filter_count; i++) {
+ (void) ef10_filter_delete_internal(enp,
+ table->eft_encap_filter_indexes[i]);
+ }
+ table->eft_encap_filter_count = 0;
+
+ return (0);
+ }
+
+ if (table->eft_using_rss)
+ filter_flags = EFX_FILTER_FLAG_RX_RSS;
+ else
+ filter_flags = 0;
+
+ /* Mark old filters which may need to be removed */
+ for (i = 0; i < table->eft_unicst_filter_count; i++) {
+ ef10_filter_set_entry_auto_old(table,
+ table->eft_unicst_filter_indexes[i]);
+ }
+ for (i = 0; i < table->eft_mulcst_filter_count; i++) {
+ ef10_filter_set_entry_auto_old(table,
+ table->eft_mulcst_filter_indexes[i]);
+ }
+ for (i = 0; i < table->eft_encap_filter_count; i++) {
+ ef10_filter_set_entry_auto_old(table,
+ table->eft_encap_filter_indexes[i]);
+ }
+
+ /*
+ * Insert or renew unicast filters.
+ *
+ * Frimware does not perform chaining on unicast filters. As traffic is
+ * therefore only delivered to the first matching filter, we should
+ * always insert the specific filter for our MAC address, to try and
+ * ensure we get that traffic.
+ *
+ * (If the filter for our MAC address has already been inserted by
+ * another function, we won't receive traffic sent to us, even if we
+ * insert a unicast mismatch filter. To prevent traffic stealing, this
+ * therefore relies on the privilege model only allowing functions to
+ * insert filters for their own MAC address unless explicitly given
+ * additional privileges by the user. This also means that, even on a
+ * priviliged function, inserting a unicast mismatch filter may not
+ * catch all traffic in multi PCI function scenarios.)
+ */
+ table->eft_unicst_filter_count = 0;
+ rc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags);
+ if (all_unicst || (rc != 0)) {
+ all_unicst_rc = ef10_filter_insert_all_unicast(enp,
+ filter_flags);
+ if ((rc != 0) && (all_unicst_rc != 0))
+ goto fail1;
+ }
+
+ /*
+ * WORKAROUND_BUG26807 controls firmware support for chained multicast
+ * filters, and can only be enabled or disabled when the hardware filter
+ * table is empty.
+ *
+ * Chained multicast filters require support from the datapath firmware,
+ * and may not be available (e.g. low-latency variants or old Huntington
+ * firmware).
+ *
+ * Firmware will reset (FLR) functions which have inserted filters in
+ * the hardware filter table when the workaround is enabled/disabled.
+ * Functions without any hardware filters are not reset.
+ *
+ * Re-check if the workaround is enabled after adding unicast hardware
+ * filters. This ensures that encp->enc_bug26807_workaround matches the
+ * firmware state, and that later changes to enable/disable the
+ * workaround will result in this function seeing a reset (FLR).
+ *
+ * In common-code drivers, we only support multiple PCI function
+ * scenarios with firmware that supports multicast chaining, so we can
+ * assume it is enabled for such cases and hence simplify the filter
+ * insertion logic. Firmware that does not support multicast chaining
+ * does not support multiple PCI function configurations either, so
+ * filter insertion is much simpler and the same strategies can still be
+ * used.
+ */
+ if ((rc = ef10_filter_get_workarounds(enp)) != 0)
+ goto fail2;
+
+ if ((table->eft_using_all_mulcst != all_mulcst) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is enabled, so traffic that matches
+ * more than one multicast filter will be replicated and
+ * delivered to multiple recipients. To avoid this duplicate
+ * delivery, remove old multicast filters before inserting new
+ * multicast filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
+
+ /* Insert or renew multicast filters */
+ if (all_mulcst == B_TRUE) {
+ /*
+ * Insert the all multicast filter. If that fails, try to insert
+ * all of our multicast filters (but without rollback on
+ * failure).
+ */
+ all_mulcst_rc = ef10_filter_insert_all_multicast(enp,
+ filter_flags);
+ if (all_mulcst_rc != 0) {
+ rc = ef10_filter_insert_multicast_list(enp, B_TRUE,
+ brdcst, addrs, count, filter_flags, B_FALSE);
+ if (rc != 0)
+ goto fail3;
+ }
+ } else {
+ /*
+ * Insert filters for multicast addresses.
+ * If any insertion fails, then rollback and try to insert the
+ * all multicast filter instead.
+ * If that also fails, try to insert all of the multicast
+ * filters (but without rollback on failure).
+ */
+ rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst,
+ addrs, count, filter_flags, B_TRUE);
+ if (rc != 0) {
+ if ((table->eft_using_all_mulcst == B_FALSE) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is on, so remove
+ * old filters before inserting the multicast
+ * all filter to avoid duplicate delivery caused
+ * by packets matching multiple filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
+
+ rc = ef10_filter_insert_all_multicast(enp,
+ filter_flags);
+ if (rc != 0) {
+ rc = ef10_filter_insert_multicast_list(enp,
+ mulcst, brdcst,
+ addrs, count, filter_flags, B_FALSE);
+ if (rc != 0)
+ goto fail4;
+ }
+ }
+ }
+
+ if (encp->enc_tunnel_encapsulations_supported != 0) {
+ /* Try to insert filters for encapsulated packets. */
+ (void) ef10_filter_insert_encap_filters(enp,
+ mulcst || all_mulcst || brdcst,
+ filter_flags);
+ }
+
+ /* Remove old filters which were not renewed */
+ ef10_filter_remove_old(enp);
+
+ /* report if any optional flags were rejected */
+ if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) ||
+ ((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) {
+ rc = ENOTSUP;
+ }
+
+ return (rc);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Clear auto old flags */
+ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ if (ef10_filter_entry_is_auto_old(table, i)) {
+ ef10_filter_set_entry_not_auto_old(table, i);
+ }
+ }
+
+ return (rc);
+}
+
+ void
+ef10_filter_get_default_rxq(
+ __in efx_nic_t *enp,
+ __out efx_rxq_t **erpp,
+ __out boolean_t *using_rss)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+ *erpp = table->eft_default_rxq;
+ *using_rss = table->eft_using_rss;
+}
+
+
+ void
+ef10_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+#if EFSYS_OPT_RX_SCALE
+ EFSYS_ASSERT((using_rss == B_FALSE) ||
+ (enp->en_rss_context != EF10_RSS_CONTEXT_INVALID));
+ table->eft_using_rss = using_rss;
+#else
+ EFSYS_ASSERT(using_rss == B_FALSE);
+ table->eft_using_rss = B_FALSE;
+#endif
+ table->eft_default_rxq = erp;
+}
+
+ void
+ef10_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+ table->eft_default_rxq = NULL;
+ table->eft_using_rss = B_FALSE;
+}
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_image.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_image.c
new file mode 100644
index 00000000..6fb7e476
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_image.c
@@ -0,0 +1,885 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+#if EFSYS_OPT_IMAGE_LAYOUT
+
+/*
+ * Utility routines to support limited parsing of ASN.1 tags. This is not a
+ * general purpose ASN.1 parser, but is sufficient to locate the required
+ * objects in a signed image with CMS headers.
+ */
+
+/* DER encodings for ASN.1 tags (see ITU-T X.690) */
+#define ASN1_TAG_INTEGER (0x02)
+#define ASN1_TAG_OCTET_STRING (0x04)
+#define ASN1_TAG_OBJ_ID (0x06)
+#define ASN1_TAG_SEQUENCE (0x30)
+#define ASN1_TAG_SET (0x31)
+
+#define ASN1_TAG_IS_PRIM(tag) ((tag & 0x20) == 0)
+
+#define ASN1_TAG_PRIM_CONTEXT(n) (0x80 + (n))
+#define ASN1_TAG_CONS_CONTEXT(n) (0xA0 + (n))
+
+typedef struct efx_asn1_cursor_s {
+ uint8_t *buffer;
+ uint32_t length;
+
+ uint8_t tag;
+ uint32_t hdr_size;
+ uint32_t val_size;
+} efx_asn1_cursor_t;
+
+
+/* Parse header of DER encoded ASN.1 TLV and match tag */
+static __checkReturn efx_rc_t
+efx_asn1_parse_header_match_tag(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL || cursor->buffer == NULL || cursor->length < 2) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ cursor->tag = cursor->buffer[0];
+ if (cursor->tag != tag) {
+ /* Tag not matched */
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ if ((cursor->tag & 0x1F) == 0x1F) {
+ /* Long tag format not used in CMS syntax */
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ if ((cursor->buffer[1] & 0x80) == 0) {
+ /* Short form: length is 0..127 */
+ cursor->hdr_size = 2;
+ cursor->val_size = cursor->buffer[1];
+ } else {
+ /* Long form: length encoded as [0x80+nbytes][length bytes] */
+ uint32_t nbytes = cursor->buffer[1] & 0x7F;
+ uint32_t offset;
+
+ if (nbytes == 0) {
+ /* Indefinite length not allowed in DER encoding */
+ rc = EINVAL;
+ goto fail4;
+ }
+ if (2 + nbytes > cursor->length) {
+ /* Header length overflows image buffer */
+ rc = EINVAL;
+ goto fail6;
+ }
+ if (nbytes > sizeof (uint32_t)) {
+ /* Length encoding too big */
+ rc = E2BIG;
+ goto fail5;
+ }
+ cursor->hdr_size = 2 + nbytes;
+ cursor->val_size = 0;
+ for (offset = 2; offset < cursor->hdr_size; offset++) {
+ cursor->val_size =
+ (cursor->val_size << 8) | cursor->buffer[offset];
+ }
+ }
+
+ if ((cursor->hdr_size + cursor->val_size) > cursor->length) {
+ /* Length overflows image buffer */
+ rc = E2BIG;
+ goto fail7;
+ }
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Enter nested ASN.1 TLV (contained in value of current TLV) */
+static __checkReturn efx_rc_t
+efx_asn1_enter_tag(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (ASN1_TAG_IS_PRIM(tag)) {
+ /* Cannot enter a primitive tag */
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ rc = efx_asn1_parse_header_match_tag(cursor, tag);
+ if (rc != 0) {
+ /* Invalid TLV or wrong tag */
+ goto fail3;
+ }
+
+ /* Limit cursor range to nested TLV */
+ cursor->buffer += cursor->hdr_size;
+ cursor->length = cursor->val_size;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Check that the current ASN.1 TLV matches the given tag and value.
+ * Advance cursor to next TLV on a successful match.
+ */
+static __checkReturn efx_rc_t
+efx_asn1_match_tag_value(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag,
+ __in const void *valp,
+ __in uint32_t val_size)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ rc = efx_asn1_parse_header_match_tag(cursor, tag);
+ if (rc != 0) {
+ /* Invalid TLV or wrong tag */
+ goto fail2;
+ }
+ if (cursor->val_size != val_size) {
+ /* Value size is different */
+ rc = EINVAL;
+ goto fail3;
+ }
+ if (memcmp(cursor->buffer + cursor->hdr_size, valp, val_size) != 0) {
+ /* Value content is different */
+ rc = EINVAL;
+ goto fail4;
+ }
+ cursor->buffer += cursor->hdr_size + cursor->val_size;
+ cursor->length -= cursor->hdr_size + cursor->val_size;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Advance cursor to next TLV */
+static __checkReturn efx_rc_t
+efx_asn1_skip_tag(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = efx_asn1_parse_header_match_tag(cursor, tag);
+ if (rc != 0) {
+ /* Invalid TLV or wrong tag */
+ goto fail2;
+ }
+ cursor->buffer += cursor->hdr_size + cursor->val_size;
+ cursor->length -= cursor->hdr_size + cursor->val_size;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Return pointer to value octets and value size from current TLV */
+static __checkReturn efx_rc_t
+efx_asn1_get_tag_value(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag,
+ __out uint8_t **valp,
+ __out uint32_t *val_sizep)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL || valp == NULL || val_sizep == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = efx_asn1_parse_header_match_tag(cursor, tag);
+ if (rc != 0) {
+ /* Invalid TLV or wrong tag */
+ goto fail2;
+ }
+ *valp = cursor->buffer + cursor->hdr_size;
+ *val_sizep = cursor->val_size;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Utility routines for parsing CMS headers (see RFC2315, PKCS#7)
+ */
+
+/* OID 1.2.840.113549.1.7.2 */
+static const uint8_t PKCS7_SignedData[] =
+{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x02 };
+
+/* OID 1.2.840.113549.1.7.1 */
+static const uint8_t PKCS7_Data[] =
+{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x01 };
+
+/* SignedData structure version */
+static const uint8_t SignedData_Version[] =
+{ 0x03 };
+
+/*
+ * Check for a valid image in signed image format. This uses CMS syntax
+ * (see RFC2315, PKCS#7) to provide signatures, and certificates required
+ * to validate the signatures. The encapsulated content is in unsigned image
+ * format (reflash header, image code, trailer checksum).
+ */
+static __checkReturn efx_rc_t
+efx_check_signed_image_header(
+ __in void *bufferp,
+ __in uint32_t buffer_size,
+ __out uint32_t *content_offsetp,
+ __out uint32_t *content_lengthp)
+{
+ efx_asn1_cursor_t cursor;
+ uint8_t *valp;
+ uint32_t val_size;
+ efx_rc_t rc;
+
+ if (content_offsetp == NULL || content_lengthp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ cursor.buffer = (uint8_t *)bufferp;
+ cursor.length = buffer_size;
+
+ /* ContextInfo */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE);
+ if (rc != 0)
+ goto fail2;
+
+ /* ContextInfo.contentType */
+ rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_OBJ_ID,
+ PKCS7_SignedData, sizeof (PKCS7_SignedData));
+ if (rc != 0)
+ goto fail3;
+
+ /* ContextInfo.content */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_CONS_CONTEXT(0));
+ if (rc != 0)
+ goto fail4;
+
+ /* SignedData */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE);
+ if (rc != 0)
+ goto fail5;
+
+ /* SignedData.version */
+ rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_INTEGER,
+ SignedData_Version, sizeof (SignedData_Version));
+ if (rc != 0)
+ goto fail6;
+
+ /* SignedData.digestAlgorithms */
+ rc = efx_asn1_skip_tag(&cursor, ASN1_TAG_SET);
+ if (rc != 0)
+ goto fail7;
+
+ /* SignedData.encapContentInfo */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE);
+ if (rc != 0)
+ goto fail8;
+
+ /* SignedData.encapContentInfo.econtentType */
+ rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_OBJ_ID,
+ PKCS7_Data, sizeof (PKCS7_Data));
+ if (rc != 0)
+ goto fail9;
+
+ /* SignedData.encapContentInfo.econtent */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_CONS_CONTEXT(0));
+ if (rc != 0)
+ goto fail10;
+
+ /*
+ * The octet string contains the image header, image code bytes and
+ * image trailer CRC (same as unsigned image layout).
+ */
+ valp = NULL;
+ val_size = 0;
+ rc = efx_asn1_get_tag_value(&cursor, ASN1_TAG_OCTET_STRING,
+ &valp, &val_size);
+ if (rc != 0)
+ goto fail11;
+
+ if ((valp == NULL) || (val_size == 0)) {
+ rc = EINVAL;
+ goto fail12;
+ }
+ if (valp < (uint8_t *)bufferp) {
+ rc = EINVAL;
+ goto fail13;
+ }
+ if ((valp + val_size) > ((uint8_t *)bufferp + buffer_size)) {
+ rc = EINVAL;
+ goto fail14;
+ }
+
+ *content_offsetp = (uint32_t)(valp - (uint8_t *)bufferp);
+ *content_lengthp = val_size;
+
+ return (0);
+
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_check_unsigned_image(
+ __in void *bufferp,
+ __in uint32_t buffer_size)
+{
+ efx_image_header_t *header;
+ efx_image_trailer_t *trailer;
+ uint32_t crc;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(sizeof (*header) == EFX_IMAGE_HEADER_SIZE);
+ EFX_STATIC_ASSERT(sizeof (*trailer) == EFX_IMAGE_TRAILER_SIZE);
+
+ /* Must have at least enough space for required image header fields */
+ if (buffer_size < (EFX_FIELD_OFFSET(efx_image_header_t, eih_size) +
+ sizeof (header->eih_size))) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ header = (efx_image_header_t *)bufferp;
+
+ if (header->eih_magic != EFX_IMAGE_HEADER_MAGIC) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /*
+ * Check image header version is same or higher than lowest required
+ * version.
+ */
+ if (header->eih_version < EFX_IMAGE_HEADER_VERSION) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ /* Buffer must have space for image header, code and image trailer. */
+ if (buffer_size < (header->eih_size + header->eih_code_size +
+ EFX_IMAGE_TRAILER_SIZE)) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ /* Check CRC from image buffer matches computed CRC. */
+ trailer = (efx_image_trailer_t *)((uint8_t *)header +
+ header->eih_size + header->eih_code_size);
+
+ crc = efx_crc32_calculate(0, (uint8_t *)header,
+ (header->eih_size + header->eih_code_size));
+
+ if (trailer->eit_crc != crc) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_check_reflash_image(
+ __in void *bufferp,
+ __in uint32_t buffer_size,
+ __out efx_image_info_t *infop)
+{
+ efx_image_format_t format = EFX_IMAGE_FORMAT_NO_IMAGE;
+ uint32_t image_offset;
+ uint32_t image_size;
+ void *imagep;
+ efx_rc_t rc;
+
+
+ EFSYS_ASSERT(infop != NULL);
+ if (infop == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ memset(infop, 0, sizeof (*infop));
+
+ if (bufferp == NULL || buffer_size == 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /*
+ * Check if the buffer contains an image in signed format, and if so,
+ * locate the image header.
+ */
+ rc = efx_check_signed_image_header(bufferp, buffer_size,
+ &image_offset, &image_size);
+ if (rc == 0) {
+ /*
+ * Buffer holds signed image format. Check that the encapsulated
+ * content is in unsigned image format.
+ */
+ format = EFX_IMAGE_FORMAT_SIGNED;
+ } else {
+ /* Check if the buffer holds image in unsigned image format */
+ format = EFX_IMAGE_FORMAT_UNSIGNED;
+ image_offset = 0;
+ image_size = buffer_size;
+ }
+ if (image_offset + image_size > buffer_size) {
+ rc = E2BIG;
+ goto fail3;
+ }
+ imagep = (uint8_t *)bufferp + image_offset;
+
+ /* Check unsigned image layout (image header, code, image trailer) */
+ rc = efx_check_unsigned_image(imagep, image_size);
+ if (rc != 0)
+ goto fail4;
+
+ /* Return image details */
+ infop->eii_format = format;
+ infop->eii_imagep = bufferp;
+ infop->eii_image_size = buffer_size;
+ infop->eii_headerp = (efx_image_header_t *)imagep;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+ infop->eii_format = EFX_IMAGE_FORMAT_INVALID;
+ infop->eii_imagep = NULL;
+ infop->eii_image_size = 0;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_build_signed_image_write_buffer(
+ __out uint8_t *bufferp,
+ __in uint32_t buffer_size,
+ __in efx_image_info_t *infop,
+ __out efx_image_header_t **headerpp)
+{
+ signed_image_chunk_hdr_t chunk_hdr;
+ uint32_t hdr_offset;
+ struct {
+ uint32_t offset;
+ uint32_t size;
+ } cms_header, image_header, code, image_trailer, signature;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT((infop != NULL) && (headerpp != NULL));
+
+ if ((bufferp == NULL) || (buffer_size == 0) ||
+ (infop == NULL) || (headerpp == NULL)) {
+ /* Invalid arguments */
+ rc = EINVAL;
+ goto fail1;
+ }
+ if ((infop->eii_format != EFX_IMAGE_FORMAT_SIGNED) ||
+ (infop->eii_imagep == NULL) ||
+ (infop->eii_headerp == NULL) ||
+ ((uint8_t *)infop->eii_headerp < (uint8_t *)infop->eii_imagep) ||
+ (infop->eii_image_size < EFX_IMAGE_HEADER_SIZE) ||
+ ((size_t)((uint8_t *)infop->eii_headerp - infop->eii_imagep) >
+ (infop->eii_image_size - EFX_IMAGE_HEADER_SIZE))) {
+ /* Invalid image info */
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /* Locate image chunks in original signed image */
+ cms_header.offset = 0;
+ cms_header.size =
+ (uint32_t)((uint8_t *)infop->eii_headerp - infop->eii_imagep);
+ if ((cms_header.size > buffer_size) ||
+ (cms_header.offset > (buffer_size - cms_header.size))) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ image_header.offset = cms_header.offset + cms_header.size;
+ image_header.size = infop->eii_headerp->eih_size;
+ if ((image_header.size > buffer_size) ||
+ (image_header.offset > (buffer_size - image_header.size))) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ code.offset = image_header.offset + image_header.size;
+ code.size = infop->eii_headerp->eih_code_size;
+ if ((code.size > buffer_size) ||
+ (code.offset > (buffer_size - code.size))) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ image_trailer.offset = code.offset + code.size;
+ image_trailer.size = EFX_IMAGE_TRAILER_SIZE;
+ if ((image_trailer.size > buffer_size) ||
+ (image_trailer.offset > (buffer_size - image_trailer.size))) {
+ rc = EINVAL;
+ goto fail6;
+ }
+
+ signature.offset = image_trailer.offset + image_trailer.size;
+ signature.size = (uint32_t)(infop->eii_image_size - signature.offset);
+ if ((signature.size > buffer_size) ||
+ (signature.offset > (buffer_size - signature.size))) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ EFSYS_ASSERT3U(infop->eii_image_size, ==, cms_header.size +
+ image_header.size + code.size + image_trailer.size +
+ signature.size);
+
+ /* BEGIN CSTYLED */
+ /*
+ * Build signed image partition, inserting chunk headers.
+ *
+ * Signed Image: Image in NVRAM partition:
+ *
+ * +-----------------+ +-----------------+
+ * | CMS header | | mcfw.update |<----+
+ * +-----------------+ | | |
+ * | reflash header | +-----------------+ |
+ * +-----------------+ | chunk header: |-->--|-+
+ * | mcfw.update | | REFLASH_TRAILER | | |
+ * | | +-----------------+ | |
+ * +-----------------+ +-->| CMS header | | |
+ * | reflash trailer | | +-----------------+ | |
+ * +-----------------+ | | chunk header: |->-+ | |
+ * | signature | | | REFLASH_HEADER | | | |
+ * +-----------------+ | +-----------------+ | | |
+ * | | reflash header |<--+ | |
+ * | +-----------------+ | |
+ * | | chunk header: |-->--+ |
+ * | | IMAGE | |
+ * | +-----------------+ |
+ * | | reflash trailer |<------+
+ * | +-----------------+
+ * | | chunk header: |
+ * | | SIGNATURE |->-+
+ * | +-----------------+ |
+ * | | signature |<--+
+ * | +-----------------+
+ * | | ...unused... |
+ * | +-----------------+
+ * +-<-| chunk header: |
+ * >-->| CMS_HEADER |
+ * +-----------------+
+ *
+ * Each chunk header gives the partition offset and length of the image
+ * chunk's data. The image chunk data is immediately followed by the
+ * chunk header for the next chunk.
+ *
+ * The data chunk for the firmware code must be at the start of the
+ * partition (needed for the bootloader). The first chunk header in the
+ * chain (for the CMS header) is stored at the end of the partition. The
+ * chain of chunk headers maintains the same logical order of image
+ * chunks as the original signed image file. This set of constraints
+ * results in the layout used for the data chunks and chunk headers.
+ */
+ /* END CSTYLED */
+ memset(bufferp, buffer_size, 0xFF);
+
+ EFX_STATIC_ASSERT(sizeof (chunk_hdr) == SIGNED_IMAGE_CHUNK_HDR_LEN);
+ memset(&chunk_hdr, 0, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ /*
+ * CMS header
+ */
+ if (buffer_size < SIGNED_IMAGE_CHUNK_HDR_LEN) {
+ rc = ENOSPC;
+ goto fail8;
+ }
+ hdr_offset = buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN;
+
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_CMS_HEADER;
+ chunk_hdr.offset = code.size + SIGNED_IMAGE_CHUNK_HDR_LEN;
+ chunk_hdr.len = cms_header.size;
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, sizeof (chunk_hdr));
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail9;
+ }
+ memcpy(bufferp + chunk_hdr.offset,
+ infop->eii_imagep + cms_header.offset,
+ cms_header.size);
+
+ /*
+ * Image header
+ */
+ hdr_offset = chunk_hdr.offset + chunk_hdr.len;
+ if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) {
+ rc = ENOSPC;
+ goto fail10;
+ }
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_REFLASH_HEADER;
+ chunk_hdr.offset = hdr_offset + SIGNED_IMAGE_CHUNK_HDR_LEN;
+ chunk_hdr.len = image_header.size;
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail11;
+ }
+ memcpy(bufferp + chunk_hdr.offset,
+ infop->eii_imagep + image_header.offset,
+ image_header.size);
+
+ *headerpp = (efx_image_header_t *)(bufferp + chunk_hdr.offset);
+
+ /*
+ * Firmware code
+ */
+ hdr_offset = chunk_hdr.offset + chunk_hdr.len;
+ if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) {
+ rc = ENOSPC;
+ goto fail12;
+ }
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_IMAGE;
+ chunk_hdr.offset = 0;
+ chunk_hdr.len = code.size;
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail13;
+ }
+ memcpy(bufferp + chunk_hdr.offset,
+ infop->eii_imagep + code.offset,
+ code.size);
+
+ /*
+ * Image trailer (CRC)
+ */
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_REFLASH_TRAILER;
+ chunk_hdr.offset = hdr_offset + SIGNED_IMAGE_CHUNK_HDR_LEN;
+ chunk_hdr.len = image_trailer.size;
+
+ hdr_offset = code.size;
+ if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) {
+ rc = ENOSPC;
+ goto fail14;
+ }
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail15;
+ }
+ memcpy((uint8_t *)bufferp + chunk_hdr.offset,
+ infop->eii_imagep + image_trailer.offset,
+ image_trailer.size);
+
+ /*
+ * Signature
+ */
+ hdr_offset = chunk_hdr.offset + chunk_hdr.len;
+ if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) {
+ rc = ENOSPC;
+ goto fail16;
+ }
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_SIGNATURE;
+ chunk_hdr.offset = chunk_hdr.offset + SIGNED_IMAGE_CHUNK_HDR_LEN;
+ chunk_hdr.len = signature.size;
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail17;
+ }
+ memcpy(bufferp + chunk_hdr.offset,
+ infop->eii_imagep + signature.offset,
+ signature.size);
+
+ return (0);
+
+fail17:
+ EFSYS_PROBE(fail17);
+fail16:
+ EFSYS_PROBE(fail16);
+fail15:
+ EFSYS_PROBE(fail15);
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+#endif /* EFSYS_OPT_IMAGE_LAYOUT */
+
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_impl.h b/src/spdk/dpdk/drivers/net/sfc/base/ef10_impl.h
new file mode 100644
index 00000000..4751faf1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_impl.h
@@ -0,0 +1,1233 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_EF10_IMPL_H
+#define _SYS_EF10_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Number of hardware PIO buffers (for compile-time resource dimensions) */
+#define EF10_MAX_PIOBUF_NBUFS (16)
+
+#if EFSYS_OPT_HUNTINGTON
+# if (EF10_MAX_PIOBUF_NBUFS < HUNT_PIOBUF_NBUFS)
+# error "EF10_MAX_PIOBUF_NBUFS too small"
+# endif
+#endif /* EFSYS_OPT_HUNTINGTON */
+#if EFSYS_OPT_MEDFORD
+# if (EF10_MAX_PIOBUF_NBUFS < MEDFORD_PIOBUF_NBUFS)
+# error "EF10_MAX_PIOBUF_NBUFS too small"
+# endif
+#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+# if (EF10_MAX_PIOBUF_NBUFS < MEDFORD2_PIOBUF_NBUFS)
+# error "EF10_MAX_PIOBUF_NBUFS too small"
+# endif
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+
+
+/*
+ * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could
+ * possibly be increased, or the write size reported by newer firmware used
+ * instead.
+ */
+#define EF10_NVRAM_CHUNK 0x80
+
+/*
+ * Alignment requirement for value written to RX WPTR: the WPTR must be aligned
+ * to an 8 descriptor boundary.
+ */
+#define EF10_RX_WPTR_ALIGN 8
+
+/*
+ * Max byte offset into the packet the TCP header must start for the hardware
+ * to be able to parse the packet correctly.
+ */
+#define EF10_TCP_HEADER_OFFSET_LIMIT 208
+
+/* Invalid RSS context handle */
+#define EF10_RSS_CONTEXT_INVALID (0xffffffff)
+
+
+/* EV */
+
+ __checkReturn efx_rc_t
+ef10_ev_init(
+ __in efx_nic_t *enp);
+
+ void
+ef10_ev_fini(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep);
+
+ void
+ef10_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+ __checkReturn efx_rc_t
+ef10_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+ void
+ef10_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+ __checkReturn efx_rc_t
+ef10_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+#endif /* EFSYS_OPT_QSTATS */
+
+ void
+ef10_ev_rxlabel_init(
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp,
+ __in unsigned int label,
+ __in efx_rxq_type_t type);
+
+ void
+ef10_ev_rxlabel_fini(
+ __in efx_evq_t *eep,
+ __in unsigned int label);
+
+/* INTR */
+
+ __checkReturn efx_rc_t
+ef10_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+ void
+ef10_intr_enable(
+ __in efx_nic_t *enp);
+
+ void
+ef10_intr_disable(
+ __in efx_nic_t *enp);
+
+ void
+ef10_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+ void
+ef10_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp);
+
+ void
+ef10_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+ void
+ef10_intr_fatal(
+ __in efx_nic_t *enp);
+ void
+ef10_intr_fini(
+ __in efx_nic_t *enp);
+
+/* NIC */
+
+extern __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+ef10_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp);
+
+
+/* MAC */
+
+extern __checkReturn efx_rc_t
+ef10_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+ef10_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_addr_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_pdu_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+extern __checkReturn efx_rc_t
+ef10_mac_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_multicast_list_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+ef10_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+extern __checkReturn efx_rc_t
+ef10_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type);
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+extern __checkReturn efx_rc_t
+ef10_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size);
+
+extern __checkReturn efx_rc_t
+ef10_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+
+/* MCDI */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+ef10_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern void
+ef10_mcdi_fini(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len);
+
+extern __checkReturn boolean_t
+ef10_mcdi_poll_response(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length);
+
+extern efx_rc_t
+ef10_mcdi_poll_reboot(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp);
+
+extern void
+ef10_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* NVRAM */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buf_read_tlv(
+ __in efx_nic_t *enp,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buf_write_tlv(
+ __inout_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size,
+ __in uint32_t tag,
+ __in_bcount(tag_size) caddr_t tag_data,
+ __in size_t tag_size,
+ __out size_t *total_lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write_segment_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t all_segments);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *resultp);
+
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+ef10_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+ef10_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_backup(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_validate(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_create(
+ __in efx_nic_t *enp,
+ __in uint16_t partn_type,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_find_item_start(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_find_end(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp);
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+ef10_nvram_buffer_find_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_get_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(item_max_size, *lengthp)
+ caddr_t itemp,
+ __in size_t item_max_size,
+ __out uint32_t *lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_insert_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_delete_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_finish(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+
+/* PHY */
+
+typedef struct ef10_link_state_s {
+ uint32_t els_adv_cap_mask;
+ uint32_t els_lp_cap_mask;
+ unsigned int els_fcntl;
+ efx_link_mode_t els_link_mode;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t els_loopback;
+#endif
+ boolean_t els_mac_up;
+} ef10_link_state_t;
+
+extern void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t on);
+
+extern __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+#if EFSYS_OPT_PHY_STATS
+
+extern __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+extern __checkReturn efx_rc_t
+ef10_bist_enable_offline(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+ef10_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+ef10_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+/* TX */
+
+extern __checkReturn efx_rc_t
+ef10_tx_init(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_tx_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp);
+
+extern void
+ef10_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_buffer_t *ebp,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+ef10_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+extern void
+ef10_rx_qpush_ps_credits(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn uint8_t *
+ef10_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp);
+#endif
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qflush(
+ __in efx_txq_t *etp);
+
+extern void
+ef10_tx_qenable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_enable(
+ __in efx_txq_t *etp);
+
+extern void
+ef10_tx_qpio_disable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+ef10_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint16_t outer_ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count);
+
+extern void
+ef10_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t vlan_tci,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_checksum_create(
+ __in efx_txq_t *etp,
+ __in uint16_t flags,
+ __out efx_desc_t *edp);
+
+#if EFSYS_OPT_QSTATS
+
+extern void
+ef10_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+typedef uint32_t efx_piobuf_handle_t;
+
+#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t)-1)
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index);
+
+
+/* VPD */
+
+#if EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+ef10_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+ef10_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+
+/* RX */
+
+extern __checkReturn efx_rc_t
+ef10_rx_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+extern __checkReturn efx_rc_t
+ef10_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+
+#if EFSYS_OPT_RX_SCALE
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_context_alloc(
+ __in efx_nic_t *enp,
+ __in efx_rx_scale_context_type_t type,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+extern __checkReturn uint32_t
+ef10_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+extern __checkReturn efx_rc_t
+ef10_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp);
+
+extern void
+ef10_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(ndescs) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+extern void
+ef10_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_qflush(
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_qenable(
+ __in efx_rxq_t *erp);
+
+union efx_rxq_type_data_u;
+
+extern __checkReturn efx_rc_t
+ef10_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in const union efx_rxq_type_data_u *type_data,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_FILTER
+
+typedef struct ef10_filter_handle_s {
+ uint32_t efh_lo;
+ uint32_t efh_hi;
+} ef10_filter_handle_t;
+
+typedef struct ef10_filter_entry_s {
+ uintptr_t efe_spec; /* pointer to filter spec plus busy bit */
+ ef10_filter_handle_t efe_handle;
+} ef10_filter_entry_t;
+
+/*
+ * BUSY flag indicates that an update is in progress.
+ * AUTO_OLD flag is used to mark and sweep MAC packet filters.
+ */
+#define EFX_EF10_FILTER_FLAG_BUSY 1U
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2U
+#define EFX_EF10_FILTER_FLAGS 3U
+
+/*
+ * Size of the hash table used by the driver. Doesn't need to be the
+ * same size as the hardware's table.
+ */
+#define EFX_EF10_FILTER_TBL_ROWS 8192
+
+/* Only need to allow for one directed and one unknown unicast filter */
+#define EFX_EF10_FILTER_UNICAST_FILTERS_MAX 2
+
+/* Allow for the broadcast address to be added to the multicast list */
+#define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1)
+
+/*
+ * For encapsulated packets, there is one filter each for each combination of
+ * IPv4 or IPv6 outer frame, VXLAN, GENEVE or NVGRE packet type, and unicast or
+ * multicast inner frames.
+ */
+#define EFX_EF10_FILTER_ENCAP_FILTERS_MAX 12
+
+typedef struct ef10_filter_table_s {
+ ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS];
+ efx_rxq_t *eft_default_rxq;
+ boolean_t eft_using_rss;
+ uint32_t eft_unicst_filter_indexes[
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX];
+ uint32_t eft_unicst_filter_count;
+ uint32_t eft_mulcst_filter_indexes[
+ EFX_EF10_FILTER_MULTICAST_FILTERS_MAX];
+ uint32_t eft_mulcst_filter_count;
+ boolean_t eft_using_all_mulcst;
+ uint32_t eft_encap_filter_indexes[
+ EFX_EF10_FILTER_ENCAP_FILTERS_MAX];
+ uint32_t eft_encap_filter_count;
+} ef10_filter_table_t;
+
+ __checkReturn efx_rc_t
+ef10_filter_init(
+ __in efx_nic_t *enp);
+
+ void
+ef10_filter_fini(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_filter_restore(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace);
+
+ __checkReturn efx_rc_t
+ef10_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+ef10_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count);
+
+extern void
+ef10_filter_get_default_rxq(
+ __in efx_nic_t *enp,
+ __out efx_rxq_t **erpp,
+ __out boolean_t *using_rss);
+
+extern void
+ef10_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+ef10_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_function_info(
+ __in efx_nic_t *enp,
+ __out uint32_t *pfp,
+ __out_opt uint32_t *vfp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_privilege_mask(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __in uint32_t vf,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_port_assignment(
+ __in efx_nic_t *enp,
+ __out uint32_t *portp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_port_modes(
+ __in efx_nic_t *enp,
+ __out uint32_t *modesp,
+ __out_opt uint32_t *current_modep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_port_mode_bandwidth(
+ __in uint32_t port_mode,
+ __out uint32_t *bandwidth_mbpsp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_pf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_vf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_clock(
+ __in efx_nic_t *enp,
+ __out uint32_t *sys_freqp,
+ __out uint32_t *dpcpu_freqp);
+
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_rxdp_config(
+ __in efx_nic_t *enp,
+ __out uint32_t *end_paddingp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_vector_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *vec_basep,
+ __out_opt uint32_t *pf_nvecp,
+ __out_opt uint32_t *vf_nvecp);
+
+extern __checkReturn efx_rc_t
+ef10_get_privilege_mask(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp);
+
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_nic_global(
+ __in efx_nic_t *enp,
+ __in uint32_t key,
+ __out uint32_t *valuep);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_set_nic_global(
+ __in efx_nic_t *enp,
+ __in uint32_t key,
+ __in uint32_t value);
+
+#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
+
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+/* Data space per credit in packed stream mode */
+#define EFX_RX_PACKED_STREAM_MEM_PER_CREDIT (1 << 16)
+
+/*
+ * Received packets are always aligned at this boundary. Also there always
+ * exists a gap of this size between packets.
+ * (see SF-112241-TC, 4.5)
+ */
+#define EFX_RX_PACKED_STREAM_ALIGNMENT 64
+
+/*
+ * Size of a pseudo-header prepended to received packets
+ * in packed stream mode
+ */
+#define EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE 8
+
+/* Minimum space for packet in packed stream mode */
+#define EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE \
+ P2ROUNDUP(EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE + \
+ EFX_MAC_PDU_MIN + \
+ EFX_RX_PACKED_STREAM_ALIGNMENT, \
+ EFX_RX_PACKED_STREAM_ALIGNMENT)
+
+/* Maximum number of credits */
+#define EFX_RX_PACKED_STREAM_MAX_CREDITS 127
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+
+/*
+ * Maximum DMA length and buffer stride alignment.
+ * (see SF-119419-TC, 3.2)
+ */
+#define EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT 64
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EF10_IMPL_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_intr.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_intr.c
new file mode 100644
index 00000000..1ffe266b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_intr.c
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+ __checkReturn efx_rc_t
+ef10_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ _NOTE(ARGUNUSED(enp, type, esmp))
+ return (0);
+}
+
+
+ void
+ef10_intr_enable(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+ void
+ef10_intr_disable(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+ void
+ef10_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_trigger_interrupt(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_TRIGGER_INTERRUPT_IN_LEN,
+ MC_CMD_TRIGGER_INTERRUPT_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ if (level >= enp->en_nic_cfg.enc_intr_limit) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_TRIGGER_INTERRUPT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_TRIGGER_INTERRUPT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_TRIGGER_INTERRUPT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, TRIGGER_INTERRUPT_IN_INTR_LEVEL, level);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ if (encp->enc_bug41750_workaround) {
+ /*
+ * bug 41750: Test interrupts don't work on Greenport
+ * bug 50084: Test interrupts don't work on VFs
+ */
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_trigger_interrupt(enp, level)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_dword_t dword;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ /* Read the queue mask and implicitly acknowledge the interrupt. */
+ EFX_BAR_READD(enp, ER_DZ_BIU_INT_ISR_REG, &dword, B_FALSE);
+ *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
+
+ *fatalp = B_FALSE;
+}
+
+ void
+ef10_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ _NOTE(ARGUNUSED(enp, message))
+
+ /* EF10 fatal errors are reported via events */
+ *fatalp = B_FALSE;
+}
+
+ void
+ef10_intr_fatal(
+ __in efx_nic_t *enp)
+{
+ /* EF10 fatal errors are reported via events */
+ _NOTE(ARGUNUSED(enp))
+}
+
+ void
+ef10_intr_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_mac.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_mac.c
new file mode 100644
index 00000000..1031e836
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_mac.c
@@ -0,0 +1,1048 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+ __checkReturn efx_rc_t
+ef10_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ ef10_link_state_t els;
+ efx_rc_t rc;
+
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail1;
+
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_fcntl = els.els_fcntl;
+
+ *link_modep = els.els_link_mode;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ ef10_link_state_t els;
+ efx_rc_t rc;
+
+ /*
+ * Because EF10 doesn't *require* polling, we can't rely on
+ * ef10_mac_poll() being executed to populate epp->ep_mac_up.
+ */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail1;
+
+ *mac_upp = els.els_mac_up;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * EF10 adapters use MC_CMD_VADAPTOR_SET_MAC to set the
+ * MAC address; the address field in MC_CMD_SET_MAC has no
+ * effect.
+ * MC_CMD_VADAPTOR_SET_MAC requires mac-spoofing privilege and
+ * the port to have no filters or queues active.
+ */
+static __checkReturn efx_rc_t
+efx_mcdi_vadapter_set_mac(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_SET_MAC_IN_LEN,
+ MC_CMD_VADAPTOR_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
+ enp->en_vport_id);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, VADAPTOR_SET_MAC_IN_MACADDR),
+ epp->ep_mac_addr);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_addr_set(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_vadapter_set_mac(enp)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail1;
+
+ /*
+ * Fallback for older Huntington firmware without Vadapter
+ * support.
+ */
+ if ((rc = ef10_mac_reconfigure(enp)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_mtu_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mtu)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ /* Only configure the MTU in this call to MC_CMD_SET_MAC */
+ MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_MTU, mtu);
+ MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_EXT_IN_CONTROL,
+ SET_MAC_EXT_IN_CFG_MTU, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_mtu_get(
+ __in efx_nic_t *enp,
+ __out size_t *mtu)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_V2_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_V2_OUT_LEN;
+
+ /*
+ * With MC_CMD_SET_MAC_EXT_IN_CONTROL set to 0, this just queries the
+ * MTU. This should always be supported on Medford, but it is not
+ * supported on older Huntington firmware.
+ */
+ MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_CONTROL, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ if (req.emr_out_length_used < MC_CMD_SET_MAC_V2_OUT_MTU_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *mtu = MCDI_OUT_DWORD(req, SET_MAC_V2_OUT_MTU);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_pdu_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ if (encp->enc_enhanced_set_mac_supported) {
+ if ((rc = efx_mcdi_mtu_set(enp, epp->ep_mac_pdu)) != 0)
+ goto fail1;
+ } else {
+ /*
+ * Fallback for older Huntington firmware, which always
+ * configure all of the parameters to MC_CMD_SET_MAC. This isn't
+ * suitable for setting the MTU on unpriviliged functions.
+ */
+ if ((rc = ef10_mac_reconfigure(enp)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_mtu_get(enp, pdu)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+__checkReturn efx_rc_t
+ef10_mac_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
+ epp->ep_mac_addr);
+
+ /*
+ * Note: The Huntington MAC does not support REJECT_BRDCST.
+ * The REJECT_UNCST flag will also prevent multicast traffic
+ * from reaching the filters. As Huntington filters drop any
+ * traffic that does not match a filter it is ok to leave the
+ * MAC running in promiscuous mode. See bug41141.
+ *
+ * FIXME: Does REJECT_UNCST behave the same way on Medford?
+ */
+ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, 0,
+ SET_MAC_IN_REJECT_BRDCST, 0);
+
+ /*
+ * Flow control, whether it is auto-negotiated or not,
+ * is set via the PHY advertised capabilities. When set to
+ * automatic the MAC will use the PHY settings to determine
+ * the flow control settings.
+ */
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, MC_CMD_FCNTL_AUTO);
+
+ /* Do not include the Ethernet frame checksum in RX packets */
+ MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_IN_FLAGS,
+ SET_MAC_IN_FLAG_INCLUDE_FCS, 0);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ /*
+ * Unprivileged functions cannot control link state,
+ * but still need to configure filters.
+ */
+ if (req.emr_rc != EACCES) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ }
+
+ /*
+ * Apply the filters for the MAC configuration.
+ * If the NIC isn't ready to accept filters this may
+ * return success without setting anything.
+ */
+ rc = efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_multicast_list_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_rxq_t *old_rxq;
+ boolean_t old_using_rss;
+ efx_rc_t rc;
+
+ ef10_filter_get_default_rxq(enp, &old_rxq, &old_using_rss);
+
+ ef10_filter_default_rxq_set(enp, erp, using_rss);
+
+ rc = efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ ef10_filter_default_rxq_set(enp, old_rxq, old_using_rss);
+
+ return (rc);
+}
+
+ void
+ef10_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ ef10_filter_default_rxq_clear(enp);
+
+ efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+}
+
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+ef10_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_loopback_type_t old_loopback_type;
+ efx_link_mode_t old_loopback_link_mode;
+ efx_rc_t rc;
+
+ /* The PHY object handles this on EF10 */
+ old_loopback_type = epp->ep_loopback_type;
+ old_loopback_link_mode = epp->ep_loopback_link_mode;
+ epp->ep_loopback_type = loopback_type;
+ epp->ep_loopback_link_mode = link_mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_loopback_type = old_loopback_type;
+ epp->ep_loopback_link_mode = old_loopback_link_mode;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+ __checkReturn efx_rc_t
+ef10_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size)
+{
+ const struct efx_mac_stats_range ef10_common[] = {
+ { EFX_MAC_RX_OCTETS, EFX_MAC_RX_GE_15XX_PKTS },
+ { EFX_MAC_RX_FCS_ERRORS, EFX_MAC_RX_DROP_EVENTS },
+ { EFX_MAC_RX_JABBER_PKTS, EFX_MAC_RX_JABBER_PKTS },
+ { EFX_MAC_RX_NODESC_DROP_CNT, EFX_MAC_TX_PAUSE_PKTS },
+ };
+ const struct efx_mac_stats_range ef10_tx_size_bins[] = {
+ { EFX_MAC_TX_LE_64_PKTS, EFX_MAC_TX_GE_15XX_PKTS },
+ };
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ efx_rc_t rc;
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_common, EFX_ARRAY_SIZE(ef10_common))) != 0)
+ goto fail1;
+
+ if (epp->ep_phy_cap_mask & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
+ const struct efx_mac_stats_range ef10_40g_extra[] = {
+ { EFX_MAC_RX_ALIGN_ERRORS, EFX_MAC_RX_ALIGN_ERRORS },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_40g_extra, EFX_ARRAY_SIZE(ef10_40g_extra))) != 0)
+ goto fail2;
+
+ if (encp->enc_mac_stats_40g_tx_size_bins) {
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp,
+ mask_size, ef10_tx_size_bins,
+ EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0)
+ goto fail3;
+ }
+ } else {
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_tx_size_bins, EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0)
+ goto fail4;
+ }
+
+ if (encp->enc_pm_and_rxdp_counters) {
+ const struct efx_mac_stats_range ef10_pm_and_rxdp[] = {
+ { EFX_MAC_PM_TRUNC_BB_OVERFLOW, EFX_MAC_RXDP_HLB_WAIT },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_pm_and_rxdp, EFX_ARRAY_SIZE(ef10_pm_and_rxdp))) != 0)
+ goto fail5;
+ }
+
+ if (encp->enc_datapath_cap_evb) {
+ const struct efx_mac_stats_range ef10_vadaptor[] = {
+ { EFX_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_OVERFLOW },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_vadaptor, EFX_ARRAY_SIZE(ef10_vadaptor))) != 0)
+ goto fail6;
+ }
+
+ if (encp->enc_fec_counters) {
+ const struct efx_mac_stats_range ef10_fec[] = {
+ { EFX_MAC_FEC_UNCORRECTED_ERRORS,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3 },
+ };
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_fec, EFX_ARRAY_SIZE(ef10_fec))) != 0)
+ goto fail7;
+ }
+
+ if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V4) {
+ const struct efx_mac_stats_range ef10_rxdp_sdt[] = {
+ { EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC,
+ EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_rxdp_sdt, EFX_ARRAY_SIZE(ef10_rxdp_sdt))) != 0)
+ goto fail8;
+ }
+
+ if (encp->enc_hlb_counters) {
+ const struct efx_mac_stats_range ef10_hlb[] = {
+ { EFX_MAC_RXDP_HLB_IDLE, EFX_MAC_RXDP_HLB_TIMEOUT },
+ };
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_hlb, EFX_ARRAY_SIZE(ef10_hlb))) != 0)
+ goto fail9;
+ }
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#define EF10_MAC_STAT_READ(_esmp, _field, _eqp) \
+ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp)
+
+
+ __checkReturn efx_rc_t
+ef10_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp)
+{
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_qword_t generation_start;
+ efx_qword_t generation_end;
+ efx_qword_t value;
+ efx_rc_t rc;
+
+ /*
+ * The MAC_STATS contain start and end generation counters used to
+ * detect when the DMA buffer has been updated during stats decode.
+ * All stats counters are 64bit unsigned values.
+ *
+ * Siena-compatible MAC stats contain MC_CMD_MAC_NSTATS 64bit counters.
+ * The generation end counter is at index MC_CMD_MAC_GENERATION_END
+ * (same as MC_CMD_MAC_NSTATS-1).
+ *
+ * Medford2 and later use a larger DMA buffer: MAC_STATS_NUM_STATS from
+ * MC_CMD_GET_CAPABILITIES_V4_OUT reports the number of 64bit counters.
+ *
+ * Firmware writes the generation end counter as the last counter in the
+ * DMA buffer. Do not use MC_CMD_MAC_GENERATION_END, as that is only
+ * correct for legacy Siena-compatible MAC stats.
+ */
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) {
+ /* MAC stats count too small for legacy MAC stats */
+ rc = ENOSPC;
+ goto fail1;
+ }
+ if (EFSYS_MEM_SIZE(esmp) <
+ (encp->enc_mac_stats_nstats * sizeof (efx_qword_t))) {
+ /* DMA buffer too small */
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ /* Read END first so we don't race with the MC */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp));
+ EF10_MAC_STAT_READ(esmp, (encp->enc_mac_stats_nstats - 1),
+ &generation_end);
+ EFSYS_MEM_READ_BARRIER();
+
+ /* TX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value);
+ EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value);
+
+ /* RX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
+
+ /* Packet memory (EF10 only) */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_BB_OVERFLOW]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_BB_OVERFLOW]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_VFIFO_FULL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_VFIFO_FULL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_VFIFO_FULL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_VFIFO_FULL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_QBB, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_QBB]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_QBB, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_QBB]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_MAPPING, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_MAPPING]), &value);
+
+ /* RX datapath */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_Q_DISABLED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_Q_DISABLED_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_DI_DROPPED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_DI_DROPPED_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_STREAMING_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_STREAMING_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_FETCH]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_WAIT]), &value);
+
+
+ /* VADAPTER RX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_BYTES]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_OVERFLOW]), &value);
+
+ /* VADAPTER TX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_PACKETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_BYTES]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_OVERFLOW]), &value);
+
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V2)
+ goto done;
+
+ /* FEC */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_UNCORRECTED_ERRORS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_UNCORRECTED_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_ERRORS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE0]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE1]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE2]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3]),
+ &value);
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V3)
+ goto done;
+
+ /* CTPIO exceptions */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_VI_BUSY_FALLBACK]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_LONG_WRITE_SUCCESS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_MISSING_DBELL_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_OVERFLOW_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_OVERFLOW_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_UNDERFLOW_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_TIMEOUT_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_TIMEOUT_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_NONCONTIG_WR_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_FRM_CLOBBER_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_INVALID_WR_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_INVALID_WR_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_VI_CLOBBER_FALLBACK]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_UNQUALIFIED_FALLBACK]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_RUNT_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_RUNT_FALLBACK]), &value);
+
+ /* CTPIO per-port stats */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_SUCCESS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_SUCCESS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_FALLBACK]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_POISON, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_POISON]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_ERASE, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_ERASE]), &value);
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V4)
+ goto done;
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC]),
+ &value);
+
+ /* Head-of-line blocking */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_IDLE, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_IDLE]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_TIMEOUT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_TIMEOUT]), &value);
+
+done:
+ /* Read START generation counter */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp));
+ EFSYS_MEM_READ_BARRIER();
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
+ &generation_start);
+
+ /* Check that we didn't read the stats in the middle of a DMA */
+ /* Not a good enough check ? */
+ if (memcmp(&generation_start, &generation_end,
+ sizeof (generation_start)))
+ return (EAGAIN);
+
+ if (generationp)
+ *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_mcdi.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_mcdi.c
new file mode 100644
index 00000000..8a3fc3b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_mcdi.c
@@ -0,0 +1,325 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+#if EFSYS_OPT_MCDI
+
+#ifndef WITH_MCDI_V2
+#error "WITH_MCDI_V2 required for EF10 MCDIv2 commands."
+#endif
+
+
+ __checkReturn efx_rc_t
+ef10_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *emtp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t dword;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+ EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA);
+
+ /*
+ * All EF10 firmware supports MCDIv2 and MCDIv1.
+ * Medford BootROM supports MCDIv2 and MCDIv1.
+ * Huntington BootROM supports MCDIv1 only.
+ */
+ emip->emi_max_version = 2;
+
+ /* A host DMA buffer is required for EF10 MCDI */
+ if (esmp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Ensure that the MC doorbell is in a known state before issuing MCDI
+ * commands. The recovery algorithm requires that the MC command buffer
+ * must be 256 byte aligned. See bug24769.
+ */
+ if ((EFSYS_MEM_ADDR(esmp) & 0xFF) != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 1);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
+
+ /* Save initial MC reboot status */
+ (void) ef10_mcdi_poll_reboot(enp);
+
+ /* Start a new epoch (allow fresh MCDI requests to succeed) */
+ efx_mcdi_new_epoch(enp);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+
+ emip->emi_new_epoch = B_FALSE;
+}
+
+/*
+ * In older firmware all commands are processed in a single thread, so a long
+ * running command for one PCIe function can block processing for another
+ * function (see bug 61269).
+ *
+ * In newer firmware that supports multithreaded MCDI processing, we can extend
+ * the timeout for long-running requests which we know firmware may choose to
+ * process in a background thread.
+ */
+#define EF10_MCDI_CMD_TIMEOUT_US (10 * 1000 * 1000)
+#define EF10_MCDI_CMD_LONG_TIMEOUT_US (60 * 1000 * 1000)
+
+ void
+ef10_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ switch (emrp->emr_cmd) {
+ case MC_CMD_POLL_BIST:
+ case MC_CMD_NVRAM_ERASE:
+ case MC_CMD_LICENSING_V3:
+ case MC_CMD_NVRAM_UPDATE_FINISH:
+ if (encp->enc_nvram_update_verify_result_supported != B_FALSE) {
+ /*
+ * Potentially longer running commands, which firmware
+ * may choose to process in a background thread.
+ */
+ *timeoutp = EF10_MCDI_CMD_LONG_TIMEOUT_US;
+ break;
+ }
+ /* FALLTHRU */
+ default:
+ *timeoutp = EF10_MCDI_CMD_TIMEOUT_US;
+ break;
+ }
+}
+
+ void
+ef10_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t dword;
+ unsigned int pos;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ /* Write the header */
+ for (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)hdrp + pos);
+ EFSYS_MEM_WRITED(esmp, pos, &dword);
+ }
+
+ /* Write the payload */
+ for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)sdup + pos);
+ EFSYS_MEM_WRITED(esmp, hdr_len + pos, &dword);
+ }
+
+ /* Guarantee ordering of memory (MCDI request) and PIO (MC doorbell) */
+ EFSYS_DMA_SYNC_FOR_DEVICE(esmp, 0, hdr_len + sdu_len);
+ EFSYS_PIO_WRITE_BARRIER();
+
+ /* Ring the doorbell to post the command DMA address to the MC */
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_LWRD_REG, &dword, B_FALSE);
+
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
+}
+
+ __checkReturn boolean_t
+ef10_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t hdr;
+
+ EFSYS_MEM_READD(esmp, 0, &hdr);
+ EFSYS_MEM_READ_BARRIER();
+
+ return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE);
+}
+
+ void
+ef10_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ unsigned int pos = 0;
+ efx_dword_t data;
+ size_t remaining = length;
+
+ while (remaining > 0) {
+ size_t chunk = MIN(remaining, sizeof (data));
+
+ EFSYS_MEM_READD(esmp, offset + pos, &data);
+ memcpy((uint8_t *)bufferp + pos, &data, chunk);
+ pos += chunk;
+ remaining -= chunk;
+ }
+}
+
+ efx_rc_t
+ef10_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t dword;
+ uint32_t old_status;
+ uint32_t new_status;
+ efx_rc_t rc;
+
+ old_status = emip->emi_mc_reboot_status;
+
+ /* Update MC reboot status word */
+ EFX_BAR_TBL_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, 0, &dword, B_FALSE);
+ new_status = dword.ed_u32[0];
+
+ /* MC has rebooted if the value has changed */
+ if (new_status != old_status) {
+ emip->emi_mc_reboot_status = new_status;
+
+ /*
+ * FIXME: Ignore detected MC REBOOT for now.
+ *
+ * The Siena support for checking for MC reboot from status
+ * flags is broken - see comments in siena_mcdi_poll_reboot().
+ * As the generic MCDI code is shared the EF10 reboot
+ * detection suffers similar problems.
+ *
+ * Do not report an error when the boot status changes until
+ * this can be handled by common code drivers (and reworked to
+ * support Siena too).
+ */
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = EIO;
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t privilege_mask = encp->enc_privilege_mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ /*
+ * Use privilege mask state at MCDI attach.
+ */
+
+ switch (id) {
+ case EFX_MCDI_FEATURE_FW_UPDATE:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * specific flag.
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_LINK_CONTROL:
+ /*
+ * Admin privilege used prior to introduction of
+ * specific flag.
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, LINK) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_MACADDR_CHANGE:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * mac spoofing privilege (at v4.6), which is used up to
+ * introduction of change mac spoofing privilege (at v4.7)
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, CHANGE_MAC) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_MAC_SPOOFING:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * mac spoofing privilege (at v4.6), which is used up to
+ * introduction of mac spoofing TX privilege (at v4.7)
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING_TX) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MCDI */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_nic.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_nic.c
new file mode 100644
index 00000000..7dbf843b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_nic.c
@@ -0,0 +1,2463 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+#include "ef10_tlv_layout.h"
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_assignment(
+ __in efx_nic_t *enp,
+ __out uint32_t *portp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
+ MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_modes(
+ __in efx_nic_t *enp,
+ __out uint32_t *modesp,
+ __out_opt uint32_t *current_modep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
+ MC_CMD_GET_PORT_MODES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_MODES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /*
+ * Require only Modes and DefaultMode fields, unless the current mode
+ * was requested (CurrentMode field was added for Medford).
+ */
+ if (req.emr_out_length_used <
+ MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ if ((current_modep != NULL) && (req.emr_out_length_used <
+ MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
+
+ if (current_modep != NULL) {
+ *current_modep = MCDI_OUT_DWORD(req,
+ GET_PORT_MODES_OUT_CURRENT_MODE);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_port_mode_bandwidth(
+ __in uint32_t port_mode,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ switch (port_mode) {
+ case TLV_PORT_MODE_10G:
+ bandwidth = 10000;
+ break;
+ case TLV_PORT_MODE_10G_10G:
+ bandwidth = 10000 * 2;
+ break;
+ case TLV_PORT_MODE_10G_10G_10G_10G:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q2:
+ bandwidth = 10000 * 4;
+ break;
+ case TLV_PORT_MODE_40G:
+ bandwidth = 40000;
+ break;
+ case TLV_PORT_MODE_40G_40G:
+ bandwidth = 40000 * 2;
+ break;
+ case TLV_PORT_MODE_40G_10G_10G:
+ case TLV_PORT_MODE_10G_10G_40G:
+ bandwidth = 40000 + (10000 * 2);
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_alloc(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
+ MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
+ VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
+ enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_free(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
+ MC_CMD_VADAPTOR_FREE_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_FREE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_pf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_vf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_clock(
+ __in efx_nic_t *enp,
+ __out uint32_t *sys_freqp,
+ __out uint32_t *dpcpu_freqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
+ MC_CMD_GET_CLOCK_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CLOCK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
+ if (*sys_freqp == 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
+ if (*dpcpu_freqp == 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_rxdp_config(
+ __in efx_nic_t *enp,
+ __out uint32_t *end_paddingp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
+ MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
+ uint32_t end_padding;
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
+ /* RX DMA end padding is disabled */
+ end_padding = 0;
+ } else {
+ switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
+ end_padding = 64;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
+ end_padding = 128;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
+ end_padding = 256;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ }
+
+ *end_paddingp = end_padding;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_vector_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *vec_basep,
+ __out_opt uint32_t *pf_nvecp,
+ __out_opt uint32_t *vf_nvecp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
+ MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (vec_basep != NULL)
+ *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
+ if (pf_nvecp != NULL)
+ *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
+ if (vf_nvecp != NULL)
+ *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_vis(
+ __in efx_nic_t *enp,
+ __in uint32_t min_vi_count,
+ __in uint32_t max_vi_count,
+ __out uint32_t *vi_basep,
+ __out uint32_t *vi_countp,
+ __out uint32_t *vi_shiftp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
+ MC_CMD_ALLOC_VIS_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (vi_countp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_VIS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
+ *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
+
+ /* Report VI_SHIFT if available (always zero for Huntington) */
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
+ *vi_shiftp = 0;
+ else
+ *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_vis(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_FREE_VIS;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ /* Ignore ELREADY (no allocated VIs, so nothing to free) */
+ if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_piobuf(
+ __in efx_nic_t *enp,
+ __out efx_piobuf_handle_t *handlep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
+ MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (handlep == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_piobuf(
+ __in efx_nic_t *enp,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
+ MC_CMD_FREE_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FREE_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_link_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_LINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_unlink_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+ef10_nic_alloc_piobufs(
+ __in efx_nic_t *enp,
+ __in uint32_t max_piobuf_count)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ EFSYS_ASSERT3U(max_piobuf_count, <=,
+ EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
+
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+
+ for (i = 0; i < max_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
+ goto fail1;
+
+ enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
+ enp->en_arch.ef10.ena_piobuf_count++;
+ }
+
+ return;
+
+fail1:
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+
+static void
+ef10_nic_free_piobufs(
+ __in efx_nic_t *enp)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+/* Sub-allocate a block from a piobuf */
+ __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
+ uint32_t blk_per_buf;
+ uint32_t buf, blk;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+ EFSYS_ASSERT(bufnump);
+ EFSYS_ASSERT(handlep);
+ EFSYS_ASSERT(blknump);
+ EFSYS_ASSERT(offsetp);
+ EFSYS_ASSERT(sizep);
+
+ if ((edcp->edc_pio_alloc_size == 0) ||
+ (enp->en_arch.ef10.ena_piobuf_count == 0)) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+ blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
+
+ for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
+ uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
+
+ if (~(*map) == 0)
+ continue;
+
+ EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
+ for (blk = 0; blk < blk_per_buf; blk++) {
+ if ((*map & (1u << blk)) == 0) {
+ *map |= (1u << blk);
+ goto done;
+ }
+ }
+ }
+ rc = ENOMEM;
+ goto fail2;
+
+done:
+ *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
+ *bufnump = buf;
+ *blknump = blk;
+ *sizep = edcp->edc_pio_alloc_size;
+ *offsetp = blk * (*sizep);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Free a piobuf sub-allocated block */
+ __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum)
+{
+ uint32_t *map;
+ efx_rc_t rc;
+
+ if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
+ (blknum >= (8 * sizeof (*map)))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
+ if ((*map & (1u << blknum)) == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+ *map &= ~(1u << blknum);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ return (efx_mcdi_link_piobuf(enp, vi_index, handle));
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ return (efx_mcdi_unlink_piobuf(enp, vi_index));
+}
+
+static __checkReturn efx_rc_t
+ef10_mcdi_get_pf_count(
+ __in efx_nic_t *enp,
+ __out uint32_t *pf_countp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PF_COUNT_IN_LEN,
+ MC_CMD_GET_PF_COUNT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PF_COUNT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *pf_countp = *MCDI_OUT(req, uint8_t,
+ MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
+
+ EFSYS_ASSERT(*pf_countp != 0);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_get_datapath_caps(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)];
+ efx_rc_t rc;
+
+ if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
+ goto fail1;
+
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CAPABILITIES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CAPABILITIES_V5_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+#define CAP_FLAGS1(_req, _flag) \
+ (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \
+ (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))
+
+#define CAP_FLAGS2(_req, _flag) \
+ (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \
+ (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \
+ (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
+
+ /*
+ * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
+ * We only support the 14 byte prefix here.
+ */
+ if (CAP_FLAGS1(req, RX_PREFIX_LEN_14) == 0) {
+ rc = ENOTSUP;
+ goto fail4;
+ }
+ encp->enc_rx_prefix_size = 14;
+
+ /* Check if the firmware supports additional RSS modes */
+ if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES))
+ encp->enc_rx_scale_additional_modes_supported = B_TRUE;
+ else
+ encp->enc_rx_scale_additional_modes_supported = B_FALSE;
+
+ /* Check if the firmware supports TSO */
+ if (CAP_FLAGS1(req, TX_TSO))
+ encp->enc_fw_assisted_tso_enabled = B_TRUE;
+ else
+ encp->enc_fw_assisted_tso_enabled = B_FALSE;
+
+ /* Check if the firmware supports FATSOv2 */
+ if (CAP_FLAGS2(req, TX_TSO_V2)) {
+ encp->enc_fw_assisted_tso_v2_enabled = B_TRUE;
+ encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
+ } else {
+ encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_v2_n_contexts = 0;
+ }
+
+ /* Check if the firmware supports FATSOv2 encap */
+ if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP))
+ encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE;
+ else
+ encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE;
+
+ /* Check if the firmware has vadapter/vport/vswitch support */
+ if (CAP_FLAGS1(req, EVB))
+ encp->enc_datapath_cap_evb = B_TRUE;
+ else
+ encp->enc_datapath_cap_evb = B_FALSE;
+
+ /* Check if the firmware supports VLAN insertion */
+ if (CAP_FLAGS1(req, TX_VLAN_INSERTION))
+ encp->enc_hw_tx_insert_vlan_enabled = B_TRUE;
+ else
+ encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
+
+ /* Check if the firmware supports RX event batching */
+ if (CAP_FLAGS1(req, RX_BATCHING))
+ encp->enc_rx_batching_enabled = B_TRUE;
+ else
+ encp->enc_rx_batching_enabled = B_FALSE;
+
+ /*
+ * Even if batching isn't reported as supported, we may still get
+ * batched events (see bug61153).
+ */
+ encp->enc_rx_batch_max = 16;
+
+ /* Check if the firmware supports disabling scatter on RXQs */
+ if (CAP_FLAGS1(req, RX_DISABLE_SCATTER))
+ encp->enc_rx_disable_scatter_supported = B_TRUE;
+ else
+ encp->enc_rx_disable_scatter_supported = B_FALSE;
+
+ /* Check if the firmware supports packed stream mode */
+ if (CAP_FLAGS1(req, RX_PACKED_STREAM))
+ encp->enc_rx_packed_stream_supported = B_TRUE;
+ else
+ encp->enc_rx_packed_stream_supported = B_FALSE;
+
+ /*
+ * Check if the firmware supports configurable buffer sizes
+ * for packed stream mode (otherwise buffer size is 1Mbyte)
+ */
+ if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS))
+ encp->enc_rx_var_packed_stream_supported = B_TRUE;
+ else
+ encp->enc_rx_var_packed_stream_supported = B_FALSE;
+
+ /* Check if the firmware supports equal stride super-buffer mode */
+ if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER))
+ encp->enc_rx_es_super_buffer_supported = B_TRUE;
+ else
+ encp->enc_rx_es_super_buffer_supported = B_FALSE;
+
+ /* Check if the firmware supports FW subvariant w/o Tx checksumming */
+ if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM))
+ encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE;
+ else
+ encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
+
+ /* Check if the firmware supports set mac with running filters */
+ if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED))
+ encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
+ else
+ encp->enc_allow_set_mac_with_installed_filters = B_FALSE;
+
+ /*
+ * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
+ * specifying which parameters to configure.
+ */
+ if (CAP_FLAGS1(req, SET_MAC_ENHANCED))
+ encp->enc_enhanced_set_mac_supported = B_TRUE;
+ else
+ encp->enc_enhanced_set_mac_supported = B_FALSE;
+
+ /*
+ * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
+ * us to let the firmware choose the settings to use on an EVQ.
+ */
+ if (CAP_FLAGS2(req, INIT_EVQ_V2))
+ encp->enc_init_evq_v2_supported = B_TRUE;
+ else
+ encp->enc_init_evq_v2_supported = B_FALSE;
+
+ /*
+ * Check if firmware-verified NVRAM updates must be used.
+ *
+ * The firmware trusted installer requires all NVRAM updates to use
+ * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
+ * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
+ * partition and report the result).
+ */
+ if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT))
+ encp->enc_nvram_update_verify_result_supported = B_TRUE;
+ else
+ encp->enc_nvram_update_verify_result_supported = B_FALSE;
+
+ /*
+ * Check if firmware provides packet memory and Rx datapath
+ * counters.
+ */
+ if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS))
+ encp->enc_pm_and_rxdp_counters = B_TRUE;
+ else
+ encp->enc_pm_and_rxdp_counters = B_FALSE;
+
+ /*
+ * Check if the 40G MAC hardware is capable of reporting
+ * statistics for Tx size bins.
+ */
+ if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS))
+ encp->enc_mac_stats_40g_tx_size_bins = B_TRUE;
+ else
+ encp->enc_mac_stats_40g_tx_size_bins = B_FALSE;
+
+ /*
+ * Check if firmware supports VXLAN and NVGRE tunnels.
+ * The capability indicates Geneve protocol support as well.
+ */
+ if (CAP_FLAGS1(req, VXLAN_NVGRE)) {
+ encp->enc_tunnel_encapsulations_supported =
+ (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
+ (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
+ (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
+
+ EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
+ encp->enc_tunnel_config_udp_entries_max =
+ EFX_TUNNEL_MAXNENTRIES;
+ } else {
+ encp->enc_tunnel_config_udp_entries_max = 0;
+ }
+
+ /*
+ * Check if firmware reports the VI window mode.
+ * Medford2 has a variable VI window size (8K, 16K or 64K).
+ * Medford and Huntington have a fixed 8K VI window size.
+ */
+ if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
+ uint8_t mode =
+ MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
+
+ switch (mode) {
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K;
+ break;
+ default:
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
+ break;
+ }
+ } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) ||
+ (enp->en_family == EFX_FAMILY_MEDFORD)) {
+ /* Huntington and Medford have fixed 8K window size */
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
+ } else {
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
+ }
+
+ /* Check if firmware supports extended MAC stats. */
+ if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
+ /* Extended stats buffer supported */
+ encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
+ } else {
+ /* Use Siena-compatible legacy MAC stats */
+ encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
+ }
+
+ if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2)
+ encp->enc_fec_counters = B_TRUE;
+ else
+ encp->enc_fec_counters = B_FALSE;
+
+ /* Check if the firmware provides head-of-line blocking counters */
+ if (CAP_FLAGS2(req, RXDP_HLB_IDLE))
+ encp->enc_hlb_counters = B_TRUE;
+ else
+ encp->enc_hlb_counters = B_FALSE;
+
+ if (CAP_FLAGS1(req, RX_RSS_LIMITED)) {
+ /* Only one exclusive RSS context is available per port. */
+ encp->enc_rx_scale_max_exclusive_contexts = 1;
+
+ switch (enp->en_family) {
+ case EFX_FAMILY_MEDFORD2:
+ encp->enc_rx_scale_hash_alg_mask =
+ (1U << EFX_RX_HASHALG_TOEPLITZ);
+ break;
+
+ case EFX_FAMILY_MEDFORD:
+ case EFX_FAMILY_HUNTINGTON:
+ /*
+ * Packed stream firmware variant maintains a
+ * non-standard algorithm for hash computation.
+ * It implies explicit XORing together
+ * source + destination IP addresses (or last
+ * four bytes in the case of IPv6) and using the
+ * resulting value as the input to a Toeplitz hash.
+ */
+ encp->enc_rx_scale_hash_alg_mask =
+ (1U << EFX_RX_HASHALG_PACKED_STREAM);
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ /* Port numbers cannot contribute to the hash value */
+ encp->enc_rx_scale_l4_hash_supported = B_FALSE;
+ } else {
+ /*
+ * Maximum number of exclusive RSS contexts.
+ * EF10 hardware supports 64 in total, but 6 are reserved
+ * for shared contexts. They are a global resource so
+ * not all may be available.
+ */
+ encp->enc_rx_scale_max_exclusive_contexts = 64 - 6;
+
+ encp->enc_rx_scale_hash_alg_mask =
+ (1U << EFX_RX_HASHALG_TOEPLITZ);
+
+ /*
+ * It is possible to use port numbers as
+ * the input data for hash computation.
+ */
+ encp->enc_rx_scale_l4_hash_supported = B_TRUE;
+ }
+ /* Check if the firmware supports "FLAG" and "MARK" filter actions */
+ if (CAP_FLAGS2(req, FILTER_ACTION_FLAG))
+ encp->enc_filter_action_flag_supported = B_TRUE;
+ else
+ encp->enc_filter_action_flag_supported = B_FALSE;
+
+ if (CAP_FLAGS2(req, FILTER_ACTION_MARK))
+ encp->enc_filter_action_mark_supported = B_TRUE;
+ else
+ encp->enc_filter_action_mark_supported = B_FALSE;
+
+ /* Get maximum supported value for "MARK" filter action */
+ if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)
+ encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req,
+ GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX);
+ else
+ encp->enc_filter_action_mark_max = 0;
+
+#undef CAP_FLAGS1
+#undef CAP_FLAGS2
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#define EF10_LEGACY_PF_PRIVILEGE_MASK \
+ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
+
+#define EF10_LEGACY_VF_PRIVILEGE_MASK 0
+
+
+ __checkReturn efx_rc_t
+ef10_get_privilege_mask(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t mask;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
+ &mask)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail1;
+
+ /* Fallback for old firmware without privilege mask support */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ /* Assume PF has admin privilege */
+ mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
+ } else {
+ /* VF is always unprivileged by default */
+ mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
+ }
+ }
+
+ *maskp = mask;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Table of mapping schemes from port number to external number.
+ *
+ * Each port number ultimately corresponds to a connector: either as part of
+ * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on
+ * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T
+ * "Salina"). In general:
+ *
+ * Port number (0-based)
+ * |
+ * port mapping (n:1)
+ * |
+ * v
+ * External port number (normally 1-based)
+ * |
+ * fixed (1:1) or cable assembly (1:m)
+ * |
+ * v
+ * Connector
+ *
+ * The external numbering refers to the cages or magjacks on the board,
+ * as visibly annotated on the board or back panel. This table describes
+ * how to determine which external cage/magjack corresponds to the port
+ * numbers used by the driver.
+ *
+ * The count of adjacent port numbers that map to each external number,
+ * and the offset in the numbering, is determined by the chip family and
+ * current port mode.
+ *
+ * For the Huntington family, the current port mode cannot be discovered,
+ * but a single mapping is used by all modes for a given chip variant,
+ * so the mapping used is instead the last match in the table to the full
+ * set of port modes to which the NIC can be configured. Therefore the
+ * ordering of entries in the mapping table is significant.
+ */
+static struct ef10_external_port_map_s {
+ efx_family_t family;
+ uint32_t modes_mask;
+ int32_t count;
+ int32_t offset;
+} __ef10_external_port_mappings[] = {
+ /*
+ * Modes used by Huntington family controllers where each port
+ * number maps to a separate cage.
+ * SFN7x22F (Torino):
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * SFN7xx4F (Pavia):
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * port 2 -> cage 3
+ * port 3 -> cage 4
+ */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1U << TLV_PORT_MODE_10G) | /* mode 0 */
+ (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */
+ 1, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * Modes which for Huntington identify a chip variant where 2
+ * adjacent port numbers map to each cage.
+ * SFN7x42Q (Monza):
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1U << TLV_PORT_MODE_40G) | /* mode 1 */
+ (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
+ (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
+ (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */
+ 2, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * Modes that on Medford allocate each port number to a separate
+ * cage.
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * port 2 -> cage 3
+ * port 3 -> cage 4
+ */
+ {
+ EFX_FAMILY_MEDFORD,
+ (1U << TLV_PORT_MODE_10G) | /* mode 0 */
+ (1U << TLV_PORT_MODE_10G_10G), /* mode 2 */
+ 1, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * Modes that on Medford allocate 2 adjacent port numbers to each
+ * cage.
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
+ {
+ EFX_FAMILY_MEDFORD,
+ (1U << TLV_PORT_MODE_40G) | /* mode 1 */
+ (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
+ (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
+ (1U << TLV_PORT_MODE_10G_10G_40G) | /* mode 7 */
+ /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */
+ 2, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * Modes that on Medford allocate 4 adjacent port numbers to each
+ * connector, starting on cage 1.
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 1
+ * port 3 -> cage 1
+ */
+ {
+ EFX_FAMILY_MEDFORD,
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q) | /* mode 5 */
+ /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1), /* mode 4 */
+ 4, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * Modes that on Medford allocate 4 adjacent port numbers to each
+ * connector, starting on cage 2.
+ * port 0 -> cage 2
+ * port 1 -> cage 2
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
+ {
+ EFX_FAMILY_MEDFORD,
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q2), /* mode 8 */
+ 4, /* ports per cage */
+ 2 /* first cage */
+ },
+ /*
+ * Modes that on Medford2 allocate each port number to a separate
+ * cage.
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * port 2 -> cage 3
+ * port 3 -> cage 4
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
+ (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
+ (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */
+ (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */
+ (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */
+ (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */
+ (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */
+ 1, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * FIXME: Some port modes are not representable in this mapping:
+ * - TLV_PORT_MODE_1x2_2x1 (mode 17):
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * port 2 -> cage 2
+ */
+ /*
+ * Modes that on Medford2 allocate 2 adjacent port numbers to each
+ * cage, starting on cage 1.
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
+ (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */
+ (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */
+ (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
+ (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */
+ (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */
+ 2, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * Modes that on Medford2 allocate 2 adjacent port numbers to each
+ * cage, starting on cage 2.
+ * port 0 -> cage 2
+ * port 1 -> cage 2
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */
+ 2, /* ports per cage */
+ 2 /* first cage */
+ },
+ /*
+ * Modes that on Medford2 allocate 4 adjacent port numbers to each
+ * connector, starting on cage 1.
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 1
+ * port 3 -> cage 1
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */
+ 4, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * Modes that on Medford2 allocate 4 adjacent port numbers to each
+ * connector, starting on cage 2.
+ * port 0 -> cage 2
+ * port 1 -> cage 2
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */
+ (1U << TLV_PORT_MODE_NA_1x2), /* mode 11 */
+ 4, /* ports per cage */
+ 2 /* first cage */
+ },
+};
+
+static __checkReturn efx_rc_t
+ef10_external_port_mapping(
+ __in efx_nic_t *enp,
+ __in uint32_t port,
+ __out uint8_t *external_portp)
+{
+ efx_rc_t rc;
+ int i;
+ uint32_t port_modes;
+ uint32_t matches;
+ uint32_t current;
+ int32_t count = 1; /* Default 1-1 mapping */
+ int32_t offset = 1; /* Default starting external port number */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, &current)) != 0) {
+ /*
+ * No current port mode information (i.e. Huntington)
+ * - infer mapping from available modes
+ */
+ if ((rc = efx_mcdi_get_port_modes(enp,
+ &port_modes, NULL)) != 0) {
+ /*
+ * No port mode information available
+ * - use default mapping
+ */
+ goto out;
+ }
+ } else {
+ /* Only need to scan the current mode */
+ port_modes = 1 << current;
+ }
+
+ /*
+ * Infer the internal port -> external number mapping from
+ * the possible port modes for this NIC.
+ */
+ for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
+ struct ef10_external_port_map_s *eepmp =
+ &__ef10_external_port_mappings[i];
+ if (eepmp->family != enp->en_family)
+ continue;
+ matches = (eepmp->modes_mask & port_modes);
+ if (matches != 0) {
+ /*
+ * Some modes match. For some Huntington boards
+ * there will be multiple matches. The mapping on the
+ * last match is used.
+ */
+ count = eepmp->count;
+ offset = eepmp->offset;
+ port_modes &= ~matches;
+ }
+ }
+
+ if (port_modes != 0) {
+ /* Some advertised modes are not supported */
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+out:
+ /*
+ * Scale as required by last matched mode and then convert to
+ * correctly offset numbering
+ */
+ *external_portp = (uint8_t)((port / count) + offset);
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_nic_board_cfg(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ ef10_link_state_t els;
+ efx_port_t *epp = &(enp->en_port);
+ uint32_t board_type = 0;
+ uint32_t base, nvec;
+ uint32_t port;
+ uint32_t mask;
+ uint32_t pf;
+ uint32_t vf;
+ uint8_t mac_addr[6] = { 0 };
+ efx_rc_t rc;
+
+ /* Get the (zero-based) MCDI port number */
+ if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
+ goto fail1;
+
+ /* EFX MCDI interface uses one-based port numbers */
+ emip->emi_port = port + 1;
+
+ if ((rc = ef10_external_port_mapping(enp, port,
+ &encp->enc_external_port)) != 0)
+ goto fail2;
+
+ /*
+ * Get PCIe function number from firmware (used for
+ * per-function privilege and dynamic config info).
+ * - PCIe PF: pf = PF number, vf = 0xffff.
+ * - PCIe VF: pf = parent PF, vf = VF number.
+ */
+ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
+ goto fail3;
+
+ encp->enc_pf = pf;
+ encp->enc_vf = vf;
+
+ /* MAC address for this function */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
+ /*
+ * Disable static config checking, ONLY for manufacturing test
+ * and setup at the factory, to allow the static config to be
+ * installed.
+ */
+#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+ if ((rc == 0) && (mac_addr[0] & 0x02)) {
+ /*
+ * If the static config does not include a global MAC
+ * address pool then the board may return a locally
+ * administered MAC address (this should only happen on
+ * incorrectly programmed boards).
+ */
+ rc = EINVAL;
+ }
+#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+ } else {
+ rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
+ }
+ if (rc != 0)
+ goto fail4;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ /* Board configuration (legacy) */
+ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
+ if (rc != 0) {
+ /* Unprivileged functions may not be able to read board cfg */
+ if (rc == EACCES)
+ board_type = 0;
+ else
+ goto fail5;
+ }
+
+ encp->enc_board_type = board_type;
+ encp->enc_clk_mult = 1; /* not used for EF10 */
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail7;
+ epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+
+ /* Check capabilities of running datapath firmware */
+ if ((rc = ef10_get_datapath_caps(enp)) != 0)
+ goto fail8;
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
+ /* No boundary crossing limits */
+ encp->enc_tx_dma_desc_boundary = 0;
+
+ /*
+ * Maximum number of bytes into the frame the TCP header can start for
+ * firmware assisted TSO to work.
+ */
+ encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+
+ /*
+ * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
+ * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
+ * resources (allocated to this PCIe function), which is zero until
+ * after we have allocated VIs.
+ */
+ encp->enc_evq_limit = 1024;
+ encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+ encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+
+ encp->enc_buftbl_limit = 0xFFFFFFFF;
+
+ /* Get interrupt vector limits */
+ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(encp))
+ goto fail9;
+
+ /* Ignore error (cannot query vector limits from a VF). */
+ base = 0;
+ nvec = 1024;
+ }
+ encp->enc_intr_vec_base = base;
+ encp->enc_intr_limit = nvec;
+
+ /*
+ * Get the current privilege mask. Note that this may be modified
+ * dynamically, so this value is informational only. DO NOT use
+ * the privilege mask to check for sufficient privileges, as that
+ * can result in time-of-check/time-of-use bugs.
+ */
+ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+ goto fail10;
+ encp->enc_privilege_mask = mask;
+
+ /* Get remaining controller-specific board config */
+ if ((rc = enop->eno_board_cfg(enp)) != 0)
+ if (rc != EACCES)
+ goto fail11;
+
+ return (0);
+
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ /* Read and clear any assertion state */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+
+ /* Exit the assertion handler */
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ if (rc != EACCES)
+ goto fail2;
+
+ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+ goto fail3;
+
+ if ((rc = ef10_nic_board_cfg(enp)) != 0)
+ goto fail4;
+
+ /*
+ * Set default driver config limits (based on board config).
+ *
+ * FIXME: For now allocate a fixed number of VIs which is likely to be
+ * sufficient and small enough to allow multiple functions on the same
+ * port.
+ */
+ edcp->edc_min_vi_count = edcp->edc_max_vi_count =
+ MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
+
+ /* The client driver must configure and enable PIO buffer support */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
+ goto fail5;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
+ goto fail6;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
+ /* Unprivileged functions do not have access to sensors */
+ if (rc != EACCES)
+ goto fail7;
+ }
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail7:
+ EFSYS_PROBE(fail7);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail6:
+ EFSYS_PROBE(fail6);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail5:
+ EFSYS_PROBE(fail5);
+#endif
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_evq_count, max_evq_count;
+ uint32_t min_rxq_count, max_rxq_count;
+ uint32_t min_txq_count, max_txq_count;
+ efx_rc_t rc;
+
+ if (edlp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Get minimum required and maximum usable VI limits */
+ min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
+ min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
+ min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_min_vi_count =
+ MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
+
+ max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
+ max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
+ max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_max_vi_count =
+ MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
+
+ /*
+ * Check limits for sub-allocated piobuf blocks.
+ * PIO is optional, so don't fail if the limits are incorrect.
+ */
+ if ((encp->enc_piobuf_size == 0) ||
+ (encp->enc_piobuf_limit == 0) ||
+ (edlp->edl_min_pio_alloc_size == 0) ||
+ (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
+ /* Disable PIO */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+ } else {
+ uint32_t blk_size, blk_count, blks_per_piobuf;
+
+ blk_size =
+ MAX(edlp->edl_min_pio_alloc_size,
+ encp->enc_piobuf_min_alloc_size);
+
+ blks_per_piobuf = encp->enc_piobuf_size / blk_size;
+ EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
+
+ blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
+
+ /* A zero max pio alloc count means unlimited */
+ if ((edlp->edl_max_pio_alloc_count > 0) &&
+ (edlp->edl_max_pio_alloc_count < blk_count)) {
+ blk_count = edlp->edl_max_pio_alloc_count;
+ }
+
+ edcp->edc_pio_alloc_size = blk_size;
+ edcp->edc_max_piobuf_count =
+ (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
+ MC_CMD_ENTITY_RESET_OUT_LEN)];
+ efx_rc_t rc;
+
+ /* ef10_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ /* Clear RX/TX DMA queue errors */
+ enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_vi_count, max_vi_count;
+ uint32_t vi_count, vi_base, vi_shift;
+ uint32_t i;
+ uint32_t retry;
+ uint32_t delay_us;
+ uint32_t vi_window_size;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ /* Enable reporting of some events (e.g. link change) */
+ if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+ goto fail1;
+
+ /* Allocate (optional) on-chip PIO buffers */
+ ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
+
+ /*
+ * For best performance, PIO writes should use a write-combined
+ * (WC) memory mapping. Using a separate WC mapping for the PIO
+ * aperture of each VI would be a burden to drivers (and not
+ * possible if the host page size is >4Kbyte).
+ *
+ * To avoid this we use a single uncached (UC) mapping for VI
+ * register access, and a single WC mapping for extra VIs used
+ * for PIO writes.
+ *
+ * Each piobuf must be linked to a VI in the WC mapping, and to
+ * each VI that is using a sub-allocated block from the piobuf.
+ */
+ min_vi_count = edcp->edc_min_vi_count;
+ max_vi_count =
+ edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Ensure that the previously attached driver's VIs are freed */
+ if ((rc = efx_mcdi_free_vis(enp)) != 0)
+ goto fail2;
+
+ /*
+ * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
+ * fails then retrying the request for fewer VI resources may succeed.
+ */
+ vi_count = 0;
+ if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
+ &vi_base, &vi_count, &vi_shift)) != 0)
+ goto fail3;
+
+ EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
+
+ if (vi_count < min_vi_count) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+
+ enp->en_arch.ef10.ena_vi_base = vi_base;
+ enp->en_arch.ef10.ena_vi_count = vi_count;
+ enp->en_arch.ef10.ena_vi_shift = vi_shift;
+
+ if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
+ /* Not enough extra VIs to map piobufs */
+ ef10_nic_free_piobufs(enp);
+ }
+
+ enp->en_arch.ef10.ena_pio_write_vi_base =
+ vi_count - enp->en_arch.ef10.ena_piobuf_count;
+
+ EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
+ EFX_VI_WINDOW_SHIFT_INVALID);
+ EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
+ EFX_VI_WINDOW_SHIFT_64K);
+ vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
+
+ /* Save UC memory mapping details */
+ enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (vi_window_size *
+ enp->en_arch.ef10.ena_pio_write_vi_base);
+ } else {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (vi_window_size *
+ enp->en_arch.ef10.ena_vi_count);
+ }
+
+ /* Save WC memory mapping details */
+ enp->en_arch.ef10.ena_wc_mem_map_offset =
+ enp->en_arch.ef10.ena_uc_mem_map_offset +
+ enp->en_arch.ef10.ena_uc_mem_map_size;
+
+ enp->en_arch.ef10.ena_wc_mem_map_size =
+ (vi_window_size *
+ enp->en_arch.ef10.ena_piobuf_count);
+
+ /* Link piobufs to extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_link_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i,
+ enp->en_arch.ef10.ena_piobuf_handle[i]);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ /*
+ * Allocate a vAdaptor attached to our upstream vPort/pPort.
+ *
+ * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
+ * driver has yet to bring up the EVB port. See bug 56147. In this case,
+ * retry the request several times after waiting a while. The wait time
+ * between retries starts small (10ms) and exponentially increases.
+ * Total wait time is a little over two seconds. Retry logic in the
+ * client driver may mean this whole loop is repeated if it continues to
+ * fail.
+ */
+ retry = 0;
+ delay_us = 10000;
+ while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
+ (rc != ENOENT)) {
+ /*
+ * Do not retry alloc for PF, or for other errors on
+ * a VF.
+ */
+ goto fail5;
+ }
+
+ /* VF startup before PF is ready. Retry allocation. */
+ if (retry > 5) {
+ /* Too many attempts */
+ rc = EINVAL;
+ goto fail6;
+ }
+ EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
+ EFSYS_SLEEP(delay_us);
+ retry++;
+ if (delay_us < 500000)
+ delay_us <<= 2;
+ }
+
+ enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
+ enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+
+ ef10_nic_free_piobufs(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ /*
+ * Report VIs that the client driver can use.
+ * Do not include VIs used for PIO buffer writes.
+ */
+ *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ /*
+ * TODO: Specify host memory mapping alignment and granularity
+ * in efx_drv_limits_t so that they can be taken into account
+ * when allocating extra VIs for PIO writes.
+ */
+ switch (region) {
+ case EFX_REGION_VI:
+ /* UC mapped memory BAR region for VI registers */
+ *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
+ break;
+
+ case EFX_REGION_PIO_WRITE_VI:
+ /* WC mapped memory BAR region for piobuf writes */
+ *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_nic_fini(
+ __in efx_nic_t *enp)
+{
+ uint32_t i;
+ efx_rc_t rc;
+
+ (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
+ enp->en_vport_id = 0;
+
+ /* Unlink piobufs from extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_unlink_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ ef10_nic_free_piobufs(enp);
+
+ (void) efx_mcdi_free_vis(enp);
+ enp->en_arch.ef10.ena_vi_count = 0;
+}
+
+ void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_cfg_free(enp);
+#endif /* EFSYS_OPT_MON_STATS */
+ (void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ /* FIXME */
+ _NOTE(ARGUNUSED(enp))
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ /* FIXME */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_nic_global(
+ __in efx_nic_t *enp,
+ __in uint32_t key,
+ __out uint32_t *valuep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_NIC_GLOBAL_IN_LEN,
+ MC_CMD_GET_NIC_GLOBAL_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_NIC_GLOBAL;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_set_nic_global(
+ __in efx_nic_t *enp,
+ __in uint32_t key,
+ __in uint32_t value)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_SET_NIC_GLOBAL_IN_LEN];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_NIC_GLOBAL;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key);
+ MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_nvram.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_nvram.c
new file mode 100644
index 00000000..2883ec8f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_nvram.c
@@ -0,0 +1,2388 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+#include "ef10_tlv_layout.h"
+
+/* Cursor for TLV partition format */
+typedef struct tlv_cursor_s {
+ uint32_t *block; /* Base of data block */
+ uint32_t *current; /* Cursor position */
+ uint32_t *end; /* End tag position */
+ uint32_t *limit; /* Last dword of data block */
+} tlv_cursor_t;
+
+typedef struct nvram_partition_s {
+ uint16_t type;
+ uint8_t chip_select;
+ uint8_t flags;
+ /*
+ * The full length of the NVRAM partition.
+ * This is different from tlv_partition_header.total_length,
+ * which can be smaller.
+ */
+ uint32_t length;
+ uint32_t erase_size;
+ uint32_t *data;
+ tlv_cursor_t tlv_cursor;
+} nvram_partition_t;
+
+
+static __checkReturn efx_rc_t
+tlv_validate_state(
+ __inout tlv_cursor_t *cursor);
+
+
+static void
+tlv_init_block(
+ __out uint32_t *block)
+{
+ *block = __CPU_TO_LE_32(TLV_TAG_END);
+}
+
+static uint32_t
+tlv_tag(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t dword, tag;
+
+ dword = cursor->current[0];
+ tag = __LE_TO_CPU_32(dword);
+
+ return (tag);
+}
+
+static size_t
+tlv_length(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t dword, length;
+
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (0);
+
+ dword = cursor->current[1];
+ length = __LE_TO_CPU_32(dword);
+
+ return ((size_t)length);
+}
+
+static uint8_t *
+tlv_value(
+ __in tlv_cursor_t *cursor)
+{
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (NULL);
+
+ return ((uint8_t *)(&cursor->current[2]));
+}
+
+static uint8_t *
+tlv_item(
+ __in tlv_cursor_t *cursor)
+{
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (NULL);
+
+ return ((uint8_t *)cursor->current);
+}
+
+/*
+ * TLV item DWORD length is tag + length + value (rounded up to DWORD)
+ * equivalent to tlv_n_words_for_len in mc-comms tlv.c
+ */
+#define TLV_DWORD_COUNT(length) \
+ (1 + 1 + (((length) + sizeof (uint32_t) - 1) / sizeof (uint32_t)))
+
+
+static uint32_t *
+tlv_next_item_ptr(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t length;
+
+ length = tlv_length(cursor);
+
+ return (cursor->current + TLV_DWORD_COUNT(length));
+}
+
+static __checkReturn efx_rc_t
+tlv_advance(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (cursor->current == cursor->end) {
+ /* No more tags after END tag */
+ cursor->current = NULL;
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ /* Advance to next item and validate */
+ cursor->current = tlv_next_item_ptr(cursor);
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_rewind(
+ __in tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ cursor->current = cursor->block;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_find(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag)
+{
+ efx_rc_t rc;
+
+ rc = tlv_rewind(cursor);
+ while (rc == 0) {
+ if (tlv_tag(cursor) == tag)
+ break;
+
+ rc = tlv_advance(cursor);
+ }
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_validate_state(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ /* Check cursor position */
+ if (cursor->current < cursor->block) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (cursor->current > cursor->limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (tlv_tag(cursor) != TLV_TAG_END) {
+ /* Check current item has space for tag and length */
+ if (cursor->current > (cursor->limit - 2)) {
+ cursor->current = NULL;
+ rc = EFAULT;
+ goto fail3;
+ }
+
+ /* Check we have value data for current item and another tag */
+ if (tlv_next_item_ptr(cursor) > (cursor->limit - 1)) {
+ cursor->current = NULL;
+ rc = EFAULT;
+ goto fail4;
+ }
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_init_cursor(
+ __out tlv_cursor_t *cursor,
+ __in uint32_t *block,
+ __in uint32_t *limit,
+ __in uint32_t *current)
+{
+ cursor->block = block;
+ cursor->limit = limit;
+
+ cursor->current = current;
+ cursor->end = NULL;
+
+ return (tlv_validate_state(cursor));
+}
+
+static __checkReturn efx_rc_t
+tlv_init_cursor_from_size(
+ __out tlv_cursor_t *cursor,
+ __in_bcount(size)
+ uint8_t *block,
+ __in size_t size)
+{
+ uint32_t *limit;
+ limit = (uint32_t *)(block + size - sizeof (uint32_t));
+ return (tlv_init_cursor(cursor, (uint32_t *)block,
+ limit, (uint32_t *)block));
+}
+
+static __checkReturn efx_rc_t
+tlv_init_cursor_at_offset(
+ __out tlv_cursor_t *cursor,
+ __in_bcount(size)
+ uint8_t *block,
+ __in size_t size,
+ __in size_t offset)
+{
+ uint32_t *limit;
+ uint32_t *current;
+ limit = (uint32_t *)(block + size - sizeof (uint32_t));
+ current = (uint32_t *)(block + offset);
+ return (tlv_init_cursor(cursor, (uint32_t *)block, limit, current));
+}
+
+static __checkReturn efx_rc_t
+tlv_require_end(
+ __inout tlv_cursor_t *cursor)
+{
+ uint32_t *pos;
+ efx_rc_t rc;
+
+ if (cursor->end == NULL) {
+ pos = cursor->current;
+ if ((rc = tlv_find(cursor, TLV_TAG_END)) != 0)
+ goto fail1;
+
+ cursor->end = cursor->current;
+ cursor->current = pos;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static size_t
+tlv_block_length_used(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail2;
+
+ /* Return space used (including the END tag) */
+ return (cursor->end + 1 - cursor->block) * sizeof (uint32_t);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+static uint32_t *
+tlv_last_segment_end(
+ __in tlv_cursor_t *cursor)
+{
+ tlv_cursor_t segment_cursor;
+ uint32_t *last_segment_end = cursor->block;
+ uint32_t *segment_start = cursor->block;
+
+ /*
+ * Go through each segment and check that it has an end tag. If there
+ * is no end tag then the previous segment was the last valid one,
+ * so return the pointer to its end tag.
+ */
+ for (;;) {
+ if (tlv_init_cursor(&segment_cursor, segment_start,
+ cursor->limit, segment_start) != 0)
+ break;
+ if (tlv_require_end(&segment_cursor) != 0)
+ break;
+ last_segment_end = segment_cursor.end;
+ segment_start = segment_cursor.end + 1;
+ }
+
+ return (last_segment_end);
+}
+
+
+static uint32_t *
+tlv_write(
+ __in tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size) uint8_t *data,
+ __in size_t size)
+{
+ uint32_t len = size;
+ uint32_t *ptr;
+
+ ptr = cursor->current;
+
+ *ptr++ = __CPU_TO_LE_32(tag);
+ *ptr++ = __CPU_TO_LE_32(len);
+
+ if (len > 0) {
+ ptr[(len - 1) / sizeof (uint32_t)] = 0;
+ memcpy(ptr, data, len);
+ ptr += P2ROUNDUP(len, sizeof (uint32_t)) / sizeof (*ptr);
+ }
+
+ return (ptr);
+}
+
+static __checkReturn efx_rc_t
+tlv_insert(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size)
+ uint8_t *data,
+ __in size_t size)
+{
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail2;
+
+ if (tag == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ delta = TLV_DWORD_COUNT(size);
+ if (last_segment_end + 1 + delta > cursor->limit) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ /* Move data up: new space at cursor->current */
+ memmove(cursor->current + delta, cursor->current,
+ (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end += delta;
+
+ /* Write new TLV item */
+ tlv_write(cursor, tag, data, size);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_delete(
+ __inout tlv_cursor_t *cursor)
+{
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (tlv_tag(cursor) == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ delta = TLV_DWORD_COUNT(tlv_length(cursor));
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail3;
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ /* Shuffle things down, destroying the item at cursor->current */
+ memmove(cursor->current, cursor->current + delta,
+ (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
+ /* Zero the new space at the end of the TLV chain */
+ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t));
+ /* Adjust the end pointer */
+ cursor->end -= delta;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_modify(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size)
+ uint8_t *data,
+ __in size_t size)
+{
+ uint32_t *pos;
+ unsigned int old_ndwords;
+ unsigned int new_ndwords;
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (tlv_tag(cursor) == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ if (tlv_tag(cursor) != tag) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ old_ndwords = TLV_DWORD_COUNT(tlv_length(cursor));
+ new_ndwords = TLV_DWORD_COUNT(size);
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail4;
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ if (new_ndwords > old_ndwords) {
+ /* Expand space used for TLV item */
+ delta = new_ndwords - old_ndwords;
+ pos = cursor->current + old_ndwords;
+
+ if (last_segment_end + 1 + delta > cursor->limit) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Move up: new space at (cursor->current + old_ndwords) */
+ memmove(pos + delta, pos,
+ (last_segment_end + 1 - pos) * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end += delta;
+
+ } else if (new_ndwords < old_ndwords) {
+ /* Shrink space used for TLV item */
+ delta = old_ndwords - new_ndwords;
+ pos = cursor->current + new_ndwords;
+
+ /* Move down: remove words at (cursor->current + new_ndwords) */
+ memmove(pos, pos + delta,
+ (last_segment_end + 1 - pos) * sizeof (uint32_t));
+
+ /* Zero the new space at the end of the TLV chain */
+ memset(last_segment_end + 1 - delta, 0,
+ delta * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end -= delta;
+ }
+
+ /* Write new data */
+ tlv_write(cursor, tag, data, size);
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint32_t checksum_tlv_partition(
+ __in nvram_partition_t *partition)
+{
+ tlv_cursor_t *cursor;
+ uint32_t *ptr;
+ uint32_t *end;
+ uint32_t csum;
+ size_t len;
+
+ cursor = &partition->tlv_cursor;
+ len = tlv_block_length_used(cursor);
+ EFSYS_ASSERT3U((len & 3), ==, 0);
+
+ csum = 0;
+ ptr = partition->data;
+ end = &ptr[len >> 2];
+
+ while (ptr < end)
+ csum += __LE_TO_CPU_32(*ptr++);
+
+ return (csum);
+}
+
+static __checkReturn efx_rc_t
+tlv_update_partition_len_and_cks(
+ __in tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+ nvram_partition_t partition;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t new_len;
+
+ /*
+ * We just modified the partition, so the total length may not be
+ * valid. Don't use tlv_find(), which performs some sanity checks
+ * that may fail here.
+ */
+ partition.data = cursor->block;
+ memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor));
+ header = (struct tlv_partition_header *)partition.data;
+ /* Sanity check. */
+ if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ new_len = tlv_block_length_used(&partition.tlv_cursor);
+ if (new_len == 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ header->total_length = __CPU_TO_LE_32(new_len);
+ /* Ensure the modified partition always has a new generation count. */
+ header->generation = __CPU_TO_LE_32(
+ __LE_TO_CPU_32(header->generation) + 1);
+
+ trailer = (struct tlv_partition_trailer *)((uint8_t *)header +
+ new_len - sizeof (*trailer) - sizeof (uint32_t));
+ trailer->generation = header->generation;
+ trailer->checksum = __CPU_TO_LE_32(
+ __LE_TO_CPU_32(trailer->checksum) -
+ checksum_tlv_partition(&partition));
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Validate buffer contents (before writing to flash) */
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_validate(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t total_length;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp, partn))
+ EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
+
+ if ((partn_data == NULL) || (partn_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* The partition header must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)partn_data,
+ partn_size)) != 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Check TLV partition length (includes the END tag) */
+ total_length = __LE_TO_CPU_32(header->total_length);
+ if (total_length > partn_size) {
+ rc = EFBIG;
+ goto fail4;
+ }
+
+ /* Check partition ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ /* Check generation counts are consistent */
+ if (trailer->generation != header->generation) {
+ rc = EINVAL;
+ goto fail8;
+ }
+
+ /* Verify partition checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(partn_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_create(
+ __in efx_nic_t *enp,
+ __in uint16_t partn_type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ uint32_t *buf = (uint32_t *)partn_data;
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ struct tlv_partition_header header;
+ struct tlv_partition_trailer trailer;
+
+ unsigned int min_buf_size = sizeof (struct tlv_partition_header) +
+ sizeof (struct tlv_partition_trailer);
+ if (partn_size < min_buf_size) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ memset(buf, 0xff, partn_size);
+
+ tlv_init_block(buf);
+ if ((rc = tlv_init_cursor(&cursor, buf,
+ (uint32_t *)((uint8_t *)buf + partn_size),
+ buf)) != 0) {
+ goto fail2;
+ }
+
+ header.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_HEADER);
+ header.length = __CPU_TO_LE_32(sizeof (header) - 8);
+ header.type_id = __CPU_TO_LE_16(partn_type);
+ header.preset = 0;
+ header.generation = __CPU_TO_LE_32(1);
+ header.total_length = 0; /* This will be fixed below. */
+ if ((rc = tlv_insert(
+ &cursor, TLV_TAG_PARTITION_HEADER,
+ (uint8_t *)&header.type_id, sizeof (header) - 8)) != 0)
+ goto fail3;
+ if ((rc = tlv_advance(&cursor)) != 0)
+ goto fail4;
+
+ trailer.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_TRAILER);
+ trailer.length = __CPU_TO_LE_32(sizeof (trailer) - 8);
+ trailer.generation = header.generation;
+ trailer.checksum = 0; /* This will be fixed below. */
+ if ((rc = tlv_insert(&cursor, TLV_TAG_PARTITION_TRAILER,
+ (uint8_t *)&trailer.generation, sizeof (trailer) - 8)) != 0)
+ goto fail5;
+
+ if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
+ goto fail6;
+
+ /* Check that the partition is valid. */
+ if ((rc = ef10_nvram_buffer_validate(enp, partn_type,
+ partn_data, partn_size)) != 0)
+ goto fail7;
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint32_t
+byte_offset(
+ __in uint32_t *position,
+ __in uint32_t *base)
+{
+ return (uint32_t)((uint8_t *)position - (uint8_t *)base);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_find_item_start(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp)
+{
+ /* Read past partition header to find start address of the first key */
+ tlv_cursor_t cursor;
+ efx_rc_t rc;
+
+ /* A PARTITION_HEADER tag must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ *startp = byte_offset(cursor.current, cursor.block);
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_find_end(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp)
+{
+ /* Read to end of partition */
+ tlv_cursor_t cursor;
+ efx_rc_t rc;
+ uint32_t *segment_used;
+
+ _NOTE(ARGUNUSED(offset))
+
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ segment_used = cursor.block;
+
+ /*
+ * Go through each segment and check that it has an end tag. If there
+ * is no end tag then the previous segment was the last valid one,
+ * so return the used space including that end tag.
+ */
+ while (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) {
+ if (tlv_require_end(&cursor) != 0) {
+ if (segment_used == cursor.block) {
+ /*
+ * First segment is corrupt, so there is
+ * no valid data in partition.
+ */
+ rc = EINVAL;
+ goto fail2;
+ }
+ break;
+ }
+ segment_used = cursor.end + 1;
+
+ cursor.current = segment_used;
+ }
+ /* Return space used (including the END tag) */
+ *endp = (segment_used - cursor.block) * sizeof (uint32_t);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+ef10_nvram_buffer_find_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp)
+{
+ /* Find TLV at offset and return key start and length */
+ tlv_cursor_t cursor;
+ uint8_t *key;
+ uint32_t tag;
+
+ if (tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset) != 0) {
+ return (B_FALSE);
+ }
+
+ while ((key = tlv_item(&cursor)) != NULL) {
+ tag = tlv_tag(&cursor);
+ if (tag == TLV_TAG_PARTITION_HEADER ||
+ tag == TLV_TAG_PARTITION_TRAILER) {
+ if (tlv_advance(&cursor) != 0) {
+ break;
+ }
+ continue;
+ }
+ *startp = byte_offset(cursor.current, cursor.block);
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_get_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(item_max_size, *lengthp)
+ caddr_t itemp,
+ __in size_t item_max_size,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ uint32_t item_length;
+
+ if (item_max_size < length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail2;
+ }
+
+ item_length = tlv_length(&cursor);
+ if (length < item_length) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+ memcpy(itemp, tlv_value(&cursor), item_length);
+
+ *lengthp = item_length;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_insert_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ rc = tlv_insert(&cursor, TLV_TAG_LICENSE, (uint8_t *)keyp, length);
+
+ if (rc != 0) {
+ goto fail2;
+ }
+
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_delete_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ _NOTE(ARGUNUSED(length, end))
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ if ((rc = tlv_delete(&cursor)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_finish(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail2;
+
+ if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+/*
+ * Read and validate a segment from a partition. A segment is a complete
+ * tlv chain between PARTITION_HEADER and PARTITION_END tags. There may
+ * be multiple segments in a partition, so seg_offset allows segments
+ * beyond the first to be read.
+ */
+static __checkReturn efx_rc_t
+ef10_nvram_read_tlv_segment(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in size_t seg_offset,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t total_length;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
+
+ if ((seg_data == NULL) || (max_seg_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Read initial chunk of the segment, starting at offset */
+ if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset, seg_data,
+ EF10_NVRAM_CHUNK,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) {
+ goto fail2;
+ }
+
+ /* A PARTITION_HEADER tag must be the first item at the given offset */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Check TLV segment length (includes the END tag) */
+ total_length = __LE_TO_CPU_32(header->total_length);
+ if (total_length > max_seg_size) {
+ rc = EFBIG;
+ goto fail5;
+ }
+
+ /* Read the remaining segment content */
+ if (total_length > EF10_NVRAM_CHUNK) {
+ if ((rc = ef10_nvram_partn_read_mode(enp, partn,
+ seg_offset + EF10_NVRAM_CHUNK,
+ seg_data + EF10_NVRAM_CHUNK,
+ total_length - EF10_NVRAM_CHUNK,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0)
+ goto fail6;
+ }
+
+ /* Check segment ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+ trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail8;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ /* Check data read from segment is consistent */
+ if (trailer->generation != header->generation) {
+ /*
+ * The partition data may have been modified between successive
+ * MCDI NVRAM_READ requests by the MC or another PCI function.
+ *
+ * The caller must retry to obtain consistent partition data.
+ */
+ rc = EAGAIN;
+ goto fail10;
+ }
+
+ /* Verify segment checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(seg_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail11;
+ }
+
+ return (0);
+
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Read a single TLV item from a host memory
+ * buffer containing a TLV formatted segment.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_buf_read_tlv(
+ __in efx_nic_t *enp,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep)
+{
+ tlv_cursor_t cursor;
+ caddr_t data;
+ size_t length;
+ caddr_t value;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((seg_data == NULL) || (max_seg_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Find requested TLV tag in segment data */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ if ((rc = tlv_find(&cursor, tag)) != 0) {
+ rc = ENOENT;
+ goto fail3;
+ }
+ value = (caddr_t)tlv_value(&cursor);
+ length = tlv_length(&cursor);
+
+ if (length == 0)
+ data = NULL;
+ else {
+ /* Copy out data from TLV item */
+ EFSYS_KMEM_ALLOC(enp->en_esip, length, data);
+ if (data == NULL) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ memcpy(data, value, length);
+ }
+
+ *datap = data;
+ *sizep = length;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Read a single TLV item from the first segment in a TLV formatted partition */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*seg_sizep) caddr_t *seg_datap,
+ __out size_t *seg_sizep)
+{
+ caddr_t seg_data = NULL;
+ size_t partn_size = 0;
+ size_t length;
+ caddr_t data;
+ int retry;
+ efx_rc_t rc;
+
+ /* Allocate sufficient memory for the entire partition */
+ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0)
+ goto fail1;
+
+ if (partn_size == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, seg_data);
+ if (seg_data == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ /*
+ * Read the first segment in a TLV partition. Retry until consistent
+ * segment contents are returned. Inconsistent data may be read if:
+ * a) the segment contents are invalid
+ * b) the MC has rebooted while we were reading the partition
+ * c) the partition has been modified while we were reading it
+ * Limit retry attempts to ensure forward progress.
+ */
+ retry = 10;
+ do {
+ if ((rc = ef10_nvram_read_tlv_segment(enp, partn, 0,
+ seg_data, partn_size)) != 0)
+ --retry;
+ } while ((rc == EAGAIN) && (retry > 0));
+
+ if (rc != 0) {
+ /* Failed to obtain consistent segment data */
+ if (rc == EAGAIN)
+ rc = EIO;
+
+ goto fail4;
+ }
+
+ if ((rc = ef10_nvram_buf_read_tlv(enp, seg_data, partn_size,
+ tag, &data, &length)) != 0)
+ goto fail5;
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data);
+
+ *seg_datap = data;
+ *seg_sizep = length;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Compute the size of a segment. */
+ static __checkReturn efx_rc_t
+ef10_nvram_buf_segment_size(
+ __in caddr_t seg_data,
+ __in size_t max_seg_size,
+ __out size_t *seg_sizep)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ uint32_t cksum;
+ int pos;
+ uint32_t *end_tag_position;
+ uint32_t segment_length;
+
+ /* A PARTITION_HEADER tag must be the first item at the given offset */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Check TLV segment length (includes the END tag) */
+ *seg_sizep = __LE_TO_CPU_32(header->total_length);
+ if (*seg_sizep > max_seg_size) {
+ rc = EFBIG;
+ goto fail3;
+ }
+
+ /* Check segment ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ end_tag_position = cursor.current;
+
+ /* Verify segment checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < *seg_sizep; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(seg_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ /*
+ * Calculate total length from HEADER to END tags and compare to
+ * max_seg_size and the total_length field in the HEADER tag.
+ */
+ segment_length = tlv_block_length_used(&cursor);
+
+ if (segment_length > max_seg_size) {
+ rc = EINVAL;
+ goto fail8;
+ }
+
+ if (segment_length != *seg_sizep) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ /* Skip over the first HEADER tag. */
+ rc = tlv_rewind(&cursor);
+ rc = tlv_advance(&cursor);
+
+ while (rc == 0) {
+ if (tlv_tag(&cursor) == TLV_TAG_END) {
+ /* Check that the END tag is the one found earlier. */
+ if (cursor.current != end_tag_position)
+ goto fail10;
+ break;
+ }
+ /* Check for duplicate HEADER tags before the END tag. */
+ if (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail11;
+ }
+
+ rc = tlv_advance(&cursor);
+ }
+ if (rc != 0)
+ goto fail12;
+
+ return (0);
+
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in a host memory buffer containing a TLV
+ * formatted segment. Historically partitions consisted of only one segment.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_buf_write_tlv(
+ __inout_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __in_bcount(tag_size) caddr_t tag_data,
+ __in size_t tag_size,
+ __out size_t *total_lengthp)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ uint32_t generation;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ /* A PARTITION_HEADER tag must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Update the TLV chain to contain the new data */
+ if ((rc = tlv_find(&cursor, tag)) == 0) {
+ /* Modify existing TLV item */
+ if ((rc = tlv_modify(&cursor, tag,
+ (uint8_t *)tag_data, tag_size)) != 0)
+ goto fail3;
+ } else {
+ /* Insert a new TLV item before the PARTITION_TRAILER */
+ rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER);
+ if (rc != 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ if ((rc = tlv_insert(&cursor, tag,
+ (uint8_t *)tag_data, tag_size)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ }
+
+ /* Find the trailer tag */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
+
+ /* Update PARTITION_HEADER and PARTITION_TRAILER fields */
+ *total_lengthp = tlv_block_length_used(&cursor);
+ if (*total_lengthp > max_seg_size) {
+ rc = ENOSPC;
+ goto fail7;
+ }
+ generation = __LE_TO_CPU_32(header->generation) + 1;
+
+ header->total_length = __CPU_TO_LE_32(*total_lengthp);
+ header->generation = __CPU_TO_LE_32(generation);
+ trailer->generation = __CPU_TO_LE_32(generation);
+
+ /* Recompute PARTITION_TRAILER checksum */
+ trailer->checksum = 0;
+ cksum = 0;
+ for (pos = 0; (size_t)pos < *total_lengthp; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(seg_data + pos));
+ }
+ trailer->checksum = ~cksum + 1;
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in the first segment of a TLV formatted
+ * dynamic config partition. The first segment is the current active
+ * configuration.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ return ef10_nvram_partn_write_segment_tlv(enp, partn, tag, data,
+ size, B_FALSE);
+}
+
+/*
+ * Read a segment from nvram at the given offset into a buffer (segment_data)
+ * and optionally write a new tag to it.
+ */
+static __checkReturn efx_rc_t
+ef10_nvram_segment_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout caddr_t *seg_datap,
+ __inout size_t *partn_offsetp,
+ __inout size_t *src_remain_lenp,
+ __inout size_t *dest_remain_lenp,
+ __in boolean_t write)
+{
+ efx_rc_t rc;
+ efx_rc_t status;
+ size_t original_segment_size;
+ size_t modified_segment_size;
+
+ /*
+ * Read the segment from NVRAM into the segment_data buffer and validate
+ * it, returning if it does not validate. This is not a failure unless
+ * this is the first segment in a partition. In this case the caller
+ * must propagate the error.
+ */
+ status = ef10_nvram_read_tlv_segment(enp, partn, *partn_offsetp,
+ *seg_datap, *src_remain_lenp);
+ if (status != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ status = ef10_nvram_buf_segment_size(*seg_datap,
+ *src_remain_lenp, &original_segment_size);
+ if (status != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (write) {
+ /* Update the contents of the segment in the buffer */
+ if ((rc = ef10_nvram_buf_write_tlv(*seg_datap,
+ *dest_remain_lenp, tag, data, size,
+ &modified_segment_size)) != 0) {
+ goto fail3;
+ }
+ *dest_remain_lenp -= modified_segment_size;
+ *seg_datap += modified_segment_size;
+ } else {
+ /*
+ * We won't modify this segment, but still need to update the
+ * remaining lengths and pointers.
+ */
+ *dest_remain_lenp -= original_segment_size;
+ *seg_datap += original_segment_size;
+ }
+
+ *partn_offsetp += original_segment_size;
+ *src_remain_lenp -= original_segment_size;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in either the first segment or in all
+ * segments in a TLV formatted dynamic config partition. Dynamic config
+ * partitions on boards that support RFID are divided into a number of segments,
+ * each formatted like a partition, with header, trailer and end tags. The first
+ * segment is the current active configuration.
+ *
+ * The segments are initialised by manftest and each contain a different
+ * configuration e.g. firmware variant. The firmware can be instructed
+ * via RFID to copy a segment to replace the first segment, hence changing the
+ * active configuration. This allows ops to change the configuration of a board
+ * prior to shipment using RFID.
+ *
+ * Changes to the dynamic config may need to be written to all segments (e.g.
+ * firmware versions) or just the first segment (changes to the active
+ * configuration). See SF-111324-SW "The use of RFID in Solarflare Products".
+ * If only the first segment is written the code still needs to be aware of the
+ * possible presence of subsequent segments as writing to a segment may cause
+ * its size to increase, which would overwrite the subsequent segments and
+ * invalidate them.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write_segment_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t all_segments)
+{
+ size_t partn_size = 0;
+ caddr_t partn_data;
+ size_t total_length = 0;
+ efx_rc_t rc;
+ size_t current_offset = 0;
+ size_t remaining_original_length;
+ size_t remaining_modified_length;
+ caddr_t segment_data;
+
+ EFSYS_ASSERT3U(partn, ==, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG);
+
+ /* Allocate sufficient memory for the entire partition */
+ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, partn_data);
+ if (partn_data == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ remaining_original_length = partn_size;
+ remaining_modified_length = partn_size;
+ segment_data = partn_data;
+
+ /* Lock the partition */
+ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
+ goto fail3;
+
+ /* Iterate over each (potential) segment to update it. */
+ do {
+ boolean_t write = all_segments || current_offset == 0;
+
+ rc = ef10_nvram_segment_write_tlv(enp, partn, tag, data, size,
+ &segment_data, &current_offset, &remaining_original_length,
+ &remaining_modified_length, write);
+ if (rc != 0) {
+ if (current_offset == 0) {
+ /*
+ * If no data has been read then the first
+ * segment is invalid, which is an error.
+ */
+ goto fail4;
+ }
+ break;
+ }
+ } while (current_offset < partn_size);
+
+ total_length = segment_data - partn_data;
+
+ /*
+ * We've run out of space. This should actually be dealt with by
+ * ef10_nvram_buf_write_tlv returning ENOSPC.
+ */
+ if (total_length > partn_size) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Erase the whole partition in NVRAM */
+ if ((rc = ef10_nvram_partn_erase(enp, partn, 0, partn_size)) != 0)
+ goto fail6;
+
+ /* Write new partition contents from the buffer to NVRAM */
+ if ((rc = ef10_nvram_partn_write(enp, partn, 0, partn_data,
+ total_length)) != 0)
+ goto fail7;
+
+ /* Unlock the partition */
+ ef10_nvram_partn_unlock(enp, partn, NULL);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ ef10_nvram_partn_unlock(enp, partn, NULL);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Get the size of a NVRAM partition. This is the total size allocated in nvram,
+ * not the data used by the segments in the partition.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, sizep,
+ NULL, NULL, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, EF10_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_read(enp, partn, offset,
+ data, chunk, mode)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ /*
+ * An A/B partition has two data stores (current and backup).
+ * Read requests which come in through the EFX API expect to read the
+ * current, active store of an A/B partition. For non A/B partitions,
+ * there is only a single store and so the mode param is ignored.
+ */
+ return ef10_nvram_partn_read_mode(enp, partn, offset, data, size,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read_backup(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ /*
+ * An A/B partition has two data stores (current and backup).
+ * Read the backup store of an A/B partition (i.e. the store currently
+ * being written to if the partition is locked).
+ *
+ * This is needed when comparing the existing partition content to avoid
+ * unnecessary writes, or to read back what has been written to check
+ * that the writes have succeeded.
+ */
+ return ef10_nvram_partn_read_mode(enp, partn, offset, data, size,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size)
+{
+ efx_rc_t rc;
+ uint32_t erase_size;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
+ &erase_size, NULL)) != 0)
+ goto fail1;
+
+ if (erase_size == 0) {
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0)
+ goto fail2;
+ } else {
+ if (size % erase_size != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ while (size > 0) {
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset,
+ erase_size)) != 0)
+ goto fail4;
+ offset += erase_size;
+ size -= erase_size;
+ }
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ uint32_t write_size;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
+ NULL, &write_size)) != 0)
+ goto fail1;
+
+ if (write_size != 0) {
+ /*
+ * Check that the size is a multiple of the write chunk size if
+ * the write chunk size is available.
+ */
+ if (size % write_size != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ } else {
+ write_size = EF10_NVRAM_CHUNK;
+ }
+
+ while (size > 0) {
+ chunk = MIN(size, write_size);
+
+ if ((rc = efx_mcdi_nvram_write(enp, partn, offset,
+ data, chunk)) != 0) {
+ goto fail3;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp)
+{
+ boolean_t reboot = B_FALSE;
+ efx_rc_t rc;
+
+ if (verify_resultp != NULL)
+ *verify_resultp = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN;
+
+ rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, verify_resultp);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4])
+{
+ struct tlv_partition_version partn_version;
+ size_t size;
+ efx_rc_t rc;
+
+ /* Add or modify partition version TLV item */
+ partn_version.version_w = __CPU_TO_LE_16(version[0]);
+ partn_version.version_x = __CPU_TO_LE_16(version[1]);
+ partn_version.version_y = __CPU_TO_LE_16(version[2]);
+ partn_version.version_z = __CPU_TO_LE_16(version[3]);
+
+ size = sizeof (partn_version) - (2 * sizeof (uint32_t));
+
+ /* Write the version number to all segments in the partition */
+ if ((rc = ef10_nvram_partn_write_segment_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ TLV_TAG_PARTITION_VERSION(partn),
+ (caddr_t)&partn_version.version_w, size, B_TRUE)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef struct ef10_parttbl_entry_s {
+ unsigned int partn;
+ unsigned int port_mask;
+ efx_nvram_type_t nvtype;
+} ef10_parttbl_entry_t;
+
+/* Port mask values */
+#define PORT_1 (1u << 1)
+#define PORT_2 (1u << 2)
+#define PORT_3 (1u << 3)
+#define PORT_4 (1u << 4)
+#define PORT_ALL (0xffffffffu)
+
+#define PARTN_MAP_ENTRY(partn, port_mask, nvtype) \
+{ (NVRAM_PARTITION_TYPE_##partn), (PORT_##port_mask), (EFX_NVRAM_##nvtype) }
+
+/* Translate EFX NVRAM types to firmware partition types */
+static ef10_parttbl_entry_t hunt_parttbl[] = {
+ /* partn ports nvtype */
+ PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE),
+ PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN),
+ PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT0, 1, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT1, 2, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT2, 3, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT3, 4, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG),
+ PARTN_MAP_ENTRY(FPGA, ALL, FPGA),
+ PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP),
+ PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE),
+};
+
+static ef10_parttbl_entry_t medford_parttbl[] = {
+ /* partn ports nvtype */
+ PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE),
+ PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN),
+ PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG, ALL, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG),
+ PARTN_MAP_ENTRY(FPGA, ALL, FPGA),
+ PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP),
+ PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE),
+ PARTN_MAP_ENTRY(EXPANSION_UEFI, ALL, UEFIROM),
+ PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE),
+};
+
+static ef10_parttbl_entry_t medford2_parttbl[] = {
+ /* partn ports nvtype */
+ PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE),
+ PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN),
+ PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG, ALL, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG),
+ PARTN_MAP_ENTRY(FPGA, ALL, FPGA),
+ PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP),
+ PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE),
+ PARTN_MAP_ENTRY(EXPANSION_UEFI, ALL, UEFIROM),
+ PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE),
+};
+
+static __checkReturn efx_rc_t
+ef10_parttbl_get(
+ __in efx_nic_t *enp,
+ __out ef10_parttbl_entry_t **parttblp,
+ __out size_t *parttbl_rowsp)
+{
+ switch (enp->en_family) {
+ case EFX_FAMILY_HUNTINGTON:
+ *parttblp = hunt_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(hunt_parttbl);
+ break;
+
+ case EFX_FAMILY_MEDFORD:
+ *parttblp = medford_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl);
+ break;
+
+ case EFX_FAMILY_MEDFORD2:
+ *parttblp = medford2_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(medford2_parttbl);
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ return (EINVAL);
+ }
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ ef10_parttbl_entry_t *parttbl = NULL;
+ size_t parttbl_rows = 0;
+ unsigned int i;
+
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT(partnp != NULL);
+
+ if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) {
+ for (i = 0; i < parttbl_rows; i++) {
+ ef10_parttbl_entry_t *entry = &parttbl[i];
+
+ if ((entry->nvtype == type) &&
+ (entry->port_mask & (1u << emip->emi_port))) {
+ *partnp = entry->partn;
+ return (0);
+ }
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_DIAG
+
+static __checkReturn efx_rc_t
+ef10_nvram_partn_to_type(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out efx_nvram_type_t *typep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ ef10_parttbl_entry_t *parttbl = NULL;
+ size_t parttbl_rows = 0;
+ unsigned int i;
+
+ EFSYS_ASSERT(typep != NULL);
+
+ if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) {
+ for (i = 0; i < parttbl_rows; i++) {
+ ef10_parttbl_entry_t *entry = &parttbl[i];
+
+ if ((entry->partn == partn) &&
+ (entry->port_mask & (1u << emip->emi_port))) {
+ *typep = entry->nvtype;
+ return (0);
+ }
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_test(
+ __in efx_nic_t *enp)
+{
+ efx_nvram_type_t type;
+ unsigned int npartns = 0;
+ uint32_t *partns = NULL;
+ size_t size;
+ unsigned int i;
+ efx_rc_t rc;
+
+ /* Read available partitions from NVRAM partition map */
+ size = MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM * sizeof (uint32_t);
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, partns);
+ if (partns == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_nvram_partitions(enp, (caddr_t)partns, size,
+ &npartns)) != 0) {
+ goto fail2;
+ }
+
+ for (i = 0; i < npartns; i++) {
+ /* Check if the partition is supported for this port */
+ if ((rc = ef10_nvram_partn_to_type(enp, partns[i], &type)) != 0)
+ continue;
+
+ if ((rc = efx_mcdi_nvram_test(enp, partns[i])) != 0)
+ goto fail3;
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, partns);
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+ EFSYS_KMEM_FREE(enp->en_esip, size, partns);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ efx_rc_t rc;
+
+ /* FIXME: get highest partn version from all ports */
+ /* FIXME: return partn description if available */
+
+ if ((rc = efx_mcdi_nvram_metadata(enp, partn, subtypep,
+ version, NULL, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep)
+{
+ uint32_t write_size = 0;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
+ NULL, &write_size)) != 0)
+ goto fail1;
+
+ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
+ goto fail2;
+
+ if (chunk_sizep != NULL) {
+ if (write_size == 0)
+ *chunk_sizep = EF10_NVRAM_CHUNK;
+ else
+ *chunk_sizep = write_size;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp)
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_partn_unlock(enp, partn, verify_resultp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_phy.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_phy.c
new file mode 100644
index 00000000..84acb70a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_phy.c
@@ -0,0 +1,691 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+static void
+mcdi_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+#define CHECK_CAP(_cap) \
+ EFX_STATIC_ASSERT(EFX_PHY_CAP_##_cap == MC_CMD_PHY_CAP_##_cap##_LBN)
+
+ CHECK_CAP(10HDX);
+ CHECK_CAP(10FDX);
+ CHECK_CAP(100HDX);
+ CHECK_CAP(100FDX);
+ CHECK_CAP(1000HDX);
+ CHECK_CAP(1000FDX);
+ CHECK_CAP(10000FDX);
+ CHECK_CAP(25000FDX);
+ CHECK_CAP(40000FDX);
+ CHECK_CAP(50000FDX);
+ CHECK_CAP(100000FDX);
+ CHECK_CAP(PAUSE);
+ CHECK_CAP(ASYM);
+ CHECK_CAP(AN);
+ CHECK_CAP(DDM);
+ CHECK_CAP(BASER_FEC);
+ CHECK_CAP(BASER_FEC_REQUESTED);
+ CHECK_CAP(RS_FEC);
+ CHECK_CAP(RS_FEC_REQUESTED);
+ CHECK_CAP(25G_BASER_FEC);
+ CHECK_CAP(25G_BASER_FEC_REQUESTED);
+#undef CHECK_CAP
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_25000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_40000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_50000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100000FDX);
+
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ /* FEC caps (supported on Medford2 and later) */
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN))
+ mask |= (1 << EFX_PHY_CAP_BASER_FEC);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN))
+ mask |= (1 << EFX_PHY_CAP_BASER_FEC_REQUESTED);
+
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN))
+ mask |= (1 << EFX_PHY_CAP_RS_FEC);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN))
+ mask |= (1 << EFX_PHY_CAP_RS_FEC_REQUESTED);
+
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN))
+ mask |= (1 << EFX_PHY_CAP_25G_BASER_FEC);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN))
+ mask |= (1 << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED);
+
+ *maskp = mask;
+}
+
+static void
+mcdi_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 100000 && fd)
+ *link_modep = EFX_LINK_100000FDX;
+ else if (speed == 50000 && fd)
+ *link_modep = EFX_LINK_50000FDX;
+ else if (speed == 40000 && fd)
+ *link_modep = EFX_LINK_40000FDX;
+ else if (speed == 25000 && fd)
+ *link_modep = EFX_LINK_25000FDX;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_GENERATE)
+ *fcntlp = EFX_FCNTL_GENERATE;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+
+ void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_25G:
+ speed = 25000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_40G:
+ speed = 40000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_50G:
+ speed = 50000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_100G:
+ speed = 100000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+ mcdi_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ efx_rc_t rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = ef10_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &elsp->els_adv_cap_mask);
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &elsp->els_lp_cap_mask);
+
+ mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &elsp->els_link_mode, &elsp->els_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /*
+ * MC_CMD_LOOPBACK and EFX_LOOPBACK names are equivalent, so use the
+ * MCDI value directly. Agreement is checked in efx_loopback_mask().
+ */
+ elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ elsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN)];
+ uint32_t cap_mask;
+#if EFSYS_OPT_PHY_LED_CONTROL
+ unsigned int led_mode;
+#endif
+ unsigned int speed;
+ boolean_t supported;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_link_control_supported(enp, &supported)) != 0)
+ goto fail1;
+ if (supported == B_FALSE)
+ goto out;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+ /* Too many fields for for POPULATE macros, so insert this afterwards */
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_25000FDX, (cap_mask >> EFX_PHY_CAP_25000FDX) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_50000FDX, (cap_mask >> EFX_PHY_CAP_50000FDX) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_100000FDX, (cap_mask >> EFX_PHY_CAP_100000FDX) & 0x1);
+
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_BASER_FEC, (cap_mask >> EFX_PHY_CAP_BASER_FEC) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_BASER_FEC_REQUESTED,
+ (cap_mask >> EFX_PHY_CAP_BASER_FEC_REQUESTED) & 0x1);
+
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_RS_FEC, (cap_mask >> EFX_PHY_CAP_RS_FEC) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_RS_FEC_REQUESTED,
+ (cap_mask >> EFX_PHY_CAP_RS_FEC_REQUESTED) & 0x1);
+
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_25G_BASER_FEC,
+ (cap_mask >> EFX_PHY_CAP_25G_BASER_FEC) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_25G_BASER_FEC_REQUESTED,
+ (cap_mask >> EFX_PHY_CAP_25G_BASER_FEC_REQUESTED) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ case EFX_LINK_25000FDX:
+ speed = 25000;
+ break;
+ case EFX_LINK_40000FDX:
+ speed = 40000;
+ break;
+ case EFX_LINK_50000FDX:
+ speed = 50000;
+ break;
+ case EFX_LINK_100000FDX:
+ speed = 100000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ /* And set the blink mode */
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+out:
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ uint32_t state;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+ __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ /* TBD: no stats support in firmware yet */
+ _NOTE(ARGUNUSED(enp, esmp))
+ memset(stat, 0, EFX_PHY_NSTATS * sizeof (*stat));
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+ef10_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_enable_offline(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_start(enp, type)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX)];
+ uint32_t value_mask = 0;
+ uint32_t result;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(type))
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_POLL_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MCDI_CTL_SDU_LEN_MAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (count > 0)
+ (void) memset(valuesp, '\0', count * sizeof (unsigned long));
+
+ result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT);
+
+ if (result == MC_CMD_POLL_BIST_FAILED &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MEM_LEN &&
+ count > EFX_BIST_MEM_ECC_FATAL) {
+ if (valuesp != NULL) {
+ valuesp[EFX_BIST_MEM_TEST] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_TEST);
+ valuesp[EFX_BIST_MEM_ADDR] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ADDR);
+ valuesp[EFX_BIST_MEM_BUS] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_BUS);
+ valuesp[EFX_BIST_MEM_EXPECT] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_EXPECT);
+ valuesp[EFX_BIST_MEM_ACTUAL] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ACTUAL);
+ valuesp[EFX_BIST_MEM_ECC] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC);
+ valuesp[EFX_BIST_MEM_ECC_PARITY] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_PARITY);
+ valuesp[EFX_BIST_MEM_ECC_FATAL] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_FATAL);
+ }
+ value_mask |= (1 << EFX_BIST_MEM_TEST) |
+ (1 << EFX_BIST_MEM_ADDR) |
+ (1 << EFX_BIST_MEM_BUS) |
+ (1 << EFX_BIST_MEM_EXPECT) |
+ (1 << EFX_BIST_MEM_ACTUAL) |
+ (1 << EFX_BIST_MEM_ECC) |
+ (1 << EFX_BIST_MEM_ECC_PARITY) |
+ (1 << EFX_BIST_MEM_ECC_FATAL);
+ } else if (result == MC_CMD_POLL_BIST_FAILED &&
+ encp->enc_phy_type == EFX_PHY_XFI_FARMI &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN &&
+ count > EFX_BIST_FAULT_CODE) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_FAULT_CODE] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST);
+ value_mask |= 1 << EFX_BIST_FAULT_CODE;
+ }
+
+ if (value_maskp != NULL)
+ *value_maskp = value_mask;
+
+ EFSYS_ASSERT(resultp != NULL);
+ if (result == MC_CMD_POLL_BIST_RUNNING)
+ *resultp = EFX_BIST_RESULT_RUNNING;
+ else if (result == MC_CMD_POLL_BIST_PASSED)
+ *resultp = EFX_BIST_RESULT_PASSED;
+ else
+ *resultp = EFX_BIST_RESULT_FAILED;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ /* There is no way to stop BIST on EF10. */
+ _NOTE(ARGUNUSED(enp, type))
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_rx.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_rx.c
new file mode 100644
index 00000000..313a3691
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_rx.c
@@ -0,0 +1,1230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_rxq(
+ __in efx_nic_t *enp,
+ __in uint32_t ndescs,
+ __in uint32_t target_evq,
+ __in uint32_t label,
+ __in uint32_t instance,
+ __in efsys_mem_t *esmp,
+ __in boolean_t disable_scatter,
+ __in boolean_t want_inner_classes,
+ __in uint32_t ps_bufsize,
+ __in uint32_t es_bufs_per_desc,
+ __in uint32_t es_max_dma_len,
+ __in uint32_t es_buf_stride,
+ __in uint32_t hol_block_timeout)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_INIT_RXQ_V3_IN_LEN,
+ MC_CMD_INIT_RXQ_V3_OUT_LEN)];
+ int npages = EFX_RXQ_NBUFS(ndescs);
+ int i;
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ efx_rc_t rc;
+ uint32_t dma_mode;
+ boolean_t want_outer_classes;
+
+ EFSYS_ASSERT3U(ndescs, <=, EFX_RXQ_MAXNDESCS);
+
+ if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_RXQ_SIZE(ndescs))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (ps_bufsize > 0)
+ dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
+ else if (es_bufs_per_desc > 0)
+ dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER;
+ else
+ dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
+
+ if (encp->enc_tunnel_encapsulations_supported != 0 &&
+ !want_inner_classes) {
+ /*
+ * WANT_OUTER_CLASSES can only be specified on hardware which
+ * supports tunnel encapsulation offloads, even though it is
+ * effectively the behaviour the hardware gives.
+ *
+ * Also, on hardware which does support such offloads, older
+ * firmware rejects the flag if the offloads are not supported
+ * by the current firmware variant, which means this may fail if
+ * the capabilities are not updated when the firmware variant
+ * changes. This is not an issue on newer firmware, as it was
+ * changed in bug 69842 (v6.4.2.1007) to permit this flag to be
+ * specified on all firmware variants.
+ */
+ want_outer_classes = B_TRUE;
+ } else {
+ want_outer_classes = B_FALSE;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_RXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_RXQ_V3_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_RXQ_V3_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
+ MCDI_IN_POPULATE_DWORD_9(req, INIT_RXQ_EXT_IN_FLAGS,
+ INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
+ INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
+ INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
+ INIT_RXQ_EXT_IN_CRC_MODE, 0,
+ INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,
+ INIT_RXQ_EXT_IN_DMA_MODE,
+ dma_mode,
+ INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize,
+ INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ if (es_bufs_per_desc > 0) {
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET,
+ es_bufs_per_desc);
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, es_max_dma_len);
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_PACKET_STRIDE, es_buf_stride);
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT,
+ hol_block_timeout);
+ }
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_rxq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN,
+ MC_CMD_FINI_RXQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_RXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ /*
+ * EALREADY is not an error, but indicates that the MC has rebooted and
+ * that the RXQ has already been destroyed.
+ */
+ if (rc != EALREADY)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+efx_mcdi_rss_context_alloc(
+ __in efx_nic_t *enp,
+ __in efx_rx_scale_context_type_t type,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
+ MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)];
+ uint32_t rss_context;
+ uint32_t context_type;
+ efx_rc_t rc;
+
+ if (num_queues > EFX_MAXRSS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (type) {
+ case EFX_RX_SCALE_EXCLUSIVE:
+ context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE;
+ break;
+ case EFX_RX_SCALE_SHARED:
+ context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type);
+
+ /*
+ * For exclusive contexts, NUM_QUEUES is only used to validate
+ * indirection table offsets.
+ * For shared contexts, the provided context will spread traffic over
+ * NUM_QUEUES many queues.
+ */
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail4;
+ }
+
+ rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = ENOENT;
+ goto fail5;
+ }
+
+ *rss_contextp = rss_context;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_flags(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in efx_rx_hash_type_t type)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_rx_hash_type_t type_ipv4;
+ efx_rx_hash_type_t type_ipv4_tcp;
+ efx_rx_hash_type_t type_ipv6;
+ efx_rx_hash_type_t type_ipv6_tcp;
+ efx_rx_hash_type_t modes;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_LBN ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_WIDTH ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_LBN ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_WIDTH ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_LBN ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_WIDTH ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_LBN ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_WIDTH ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH);
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) | EFX_RX_HASH(IPV4_TCP, 2TUPLE) |
+ EFX_RX_HASH(IPV4_UDP, 2TUPLE);
+ type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
+ type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) | EFX_RX_HASH(IPV6_TCP, 2TUPLE) |
+ EFX_RX_HASH(IPV6_UDP, 2TUPLE);
+ type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
+
+ /*
+ * Create a copy of the original hash type.
+ * The copy will be used to fill in RSS_MODE bits and
+ * may be cleared beforehand. The original variable
+ * and, thus, EN bits will remain unaffected.
+ */
+ modes = type;
+
+ /*
+ * If the firmware lacks support for additional modes, RSS_MODE
+ * fields must contain zeros, otherwise the operation will fail.
+ */
+ if (encp->enc_rx_scale_additional_modes_supported == B_FALSE)
+ modes = 0;
+
+#define EXTRACT_RSS_MODE(_type, _class) \
+ (EFX_EXTRACT_NATIVE(_type, 0, 31, \
+ EFX_LOW_BIT(EFX_RX_CLASS_##_class), \
+ EFX_HIGH_BIT(EFX_RX_CLASS_##_class)) & \
+ EFX_MASK32(EFX_RX_CLASS_##_class))
+
+ MCDI_IN_POPULATE_DWORD_10(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN,
+ ((type & type_ipv4) == type_ipv4) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN,
+ ((type & type_ipv4_tcp) == type_ipv4_tcp) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN,
+ ((type & type_ipv6) == type_ipv6) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN,
+ ((type & type_ipv6_tcp) == type_ipv6_tcp) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV4_TCP),
+ RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV4_UDP),
+ RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV4),
+ RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV6_TCP),
+ RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV6_UDP),
+ RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV6));
+
+#undef EXTRACT_RSS_MODE
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_key(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ memcpy(MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY),
+ key, n);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_table(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)];
+ uint8_t *req_table;
+ int i, rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ req_table =
+ MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE);
+
+ for (i = 0;
+ i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN;
+ i++) {
+ req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+
+ __checkReturn efx_rc_t
+ef10_rx_init(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_RX_SCALE
+
+ if (efx_mcdi_rss_context_alloc(enp, EFX_RX_SCALE_EXCLUSIVE, EFX_MAXRSS,
+ &enp->en_rss_context) == 0) {
+ /*
+ * Allocated an exclusive RSS context, which allows both the
+ * indirection table and key to be modified.
+ */
+ enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE;
+ enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
+ } else {
+ /*
+ * Failed to allocate an exclusive RSS context. Continue
+ * operation without support for RSS. The pseudo-header in
+ * received packets will not contain a Toeplitz hash value.
+ */
+ enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE;
+ enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE;
+ }
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+ef10_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ _NOTE(ARGUNUSED(enp, buf_size))
+ return (0);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_context_alloc(
+ __in efx_nic_t *enp,
+ __in efx_rx_scale_context_type_t type,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp)
+{
+ efx_rc_t rc;
+
+ rc = efx_mcdi_rss_context_alloc(enp, type, num_queues, rss_contextp);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context)
+{
+ efx_rc_t rc;
+
+ rc = efx_mcdi_rss_context_free(enp, rss_context);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(insert, ==, B_TRUE);
+
+ if ((encp->enc_rx_scale_hash_alg_mask & (1U << alg)) == 0 ||
+ insert == B_FALSE) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
+ if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ rss_context = enp->en_rss_context;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_flags(enp,
+ rss_context, type)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(EFX_RSS_KEY_SIZE ==
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+
+ if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
+ if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ rss_context = enp->en_rss_context;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_key(enp, rss_context, key, n)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_rc_t rc;
+
+
+ if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
+ if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ rss_context = enp->en_rss_context;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_table(enp,
+ rss_context, table, n)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+
+/*
+ * EF10 RX pseudo-header
+ * ---------------------
+ *
+ * Receive packets are prefixed by an (optional) 14 byte pseudo-header:
+ *
+ * +00: Toeplitz hash value.
+ * (32bit little-endian)
+ * +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag.
+ * (16bit big-endian)
+ * +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag.
+ * (16bit big-endian)
+ * +08: Packet Length. Zero if the RX datapath was in cut-through mode.
+ * (16bit little-endian)
+ * +10: MAC timestamp. Zero if timestamping is not enabled.
+ * (32bit little-endian)
+ *
+ * See "The RX Pseudo-header" in SF-109306-TC.
+ */
+
+ __checkReturn efx_rc_t
+ef10_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ /*
+ * The RX pseudo-header contains the packet length, excluding the
+ * pseudo-header. If the hardware receive datapath was operating in
+ * cut-through mode then the length in the RX pseudo-header will be
+ * zero, and the packet length must be obtained from the DMA length
+ * reported in the RX event.
+ */
+ *lengthp = buffer[8] | (buffer[9] << 8);
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn uint32_t
+ef10_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ switch (func) {
+ case EFX_RX_HASHALG_PACKED_STREAM:
+ case EFX_RX_HASHALG_TOEPLITZ:
+ return (buffer[0] |
+ (buffer[1] << 8) |
+ (buffer[2] << 16) |
+ (buffer[3] << 24));
+
+ default:
+ EFSYS_ASSERT(0);
+ return (0);
+ }
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+/*
+ * Fake length for RXQ descriptors in packed stream mode
+ * to make hardware happy
+ */
+#define EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE 32
+#endif
+
+ void
+ef10_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(ndescs) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_qword_t qword;
+ unsigned int i;
+ unsigned int offset;
+ unsigned int id;
+
+ _NOTE(ARGUNUSED(completed))
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ /*
+ * Real size of the buffer does not fit into ESF_DZ_RX_KER_BYTE_CNT
+ * and equal to 0 after applying mask. Hardware does not like it.
+ */
+ if (erp->er_ev_qstate->eers_rx_packed_stream)
+ size = EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE;
+#endif
+
+ /* The client driver must not overfill the queue */
+ EFSYS_ASSERT3U(added - completed + ndescs, <=,
+ EFX_RXQ_LIMIT(erp->er_mask + 1));
+
+ id = added & (erp->er_mask);
+ for (i = 0; i < ndescs; i++) {
+ EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
+ unsigned int, id, efsys_dma_addr_t, addrp[i],
+ size_t, size);
+
+ EFX_POPULATE_QWORD_3(qword,
+ ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_RX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addrp[i] & 0xffffffff),
+ ESF_DZ_RX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addrp[i] >> 32));
+
+ offset = id * sizeof (efx_qword_t);
+ EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
+
+ id = (id + 1) & (erp->er_mask);
+ }
+}
+
+ void
+ef10_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ unsigned int pushed = *pushedp;
+ uint32_t wptr;
+ efx_dword_t dword;
+
+ /* Hardware has alignment restriction for WPTR */
+ wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN);
+ if (pushed == wptr)
+ return;
+
+ *pushedp = wptr;
+
+ /* Push the populated descriptors out */
+ wptr &= erp->er_mask;
+
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr);
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
+ wptr, pushed & erp->er_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ erp->er_index, &dword, B_FALSE);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+ void
+ef10_rx_qpush_ps_credits(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_dword_t dword;
+ efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate;
+ uint32_t credits;
+
+ EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
+
+ if (rxq_state->eers_rx_packed_stream_credits == 0)
+ return;
+
+ /*
+ * It is a bug if we think that FW has utilized more
+ * credits than it is allowed to have (maximum). However,
+ * make sure that we do not credit more than maximum anyway.
+ */
+ credits = MIN(rxq_state->eers_rx_packed_stream_credits,
+ EFX_RX_PACKED_STREAM_MAX_CREDITS);
+ EFX_POPULATE_DWORD_3(dword,
+ ERF_DZ_RX_DESC_MAGIC_DOORBELL, 1,
+ ERF_DZ_RX_DESC_MAGIC_CMD,
+ ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS,
+ ERF_DZ_RX_DESC_MAGIC_DATA, credits);
+ EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ erp->er_index, &dword, B_FALSE);
+
+ rxq_state->eers_rx_packed_stream_credits = 0;
+}
+
+/*
+ * In accordance with SF-112241-TC the received data has the following layout:
+ * - 8 byte pseudo-header which consist of:
+ * - 4 byte little-endian timestamp
+ * - 2 byte little-endian captured length in bytes
+ * - 2 byte little-endian original packet length in bytes
+ * - captured packet bytes
+ * - optional padding to align to 64 bytes boundary
+ * - 64 bytes scratch space for the host software
+ */
+ __checkReturn uint8_t *
+ef10_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp)
+{
+ uint16_t buf_len;
+ uint8_t *pkt_start;
+ efx_qword_t *qwordp;
+ efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate;
+
+ EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
+
+ buffer += current_offset;
+ pkt_start = buffer + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE;
+
+ qwordp = (efx_qword_t *)buffer;
+ *timestamp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_TSTAMP);
+ *lengthp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_ORIG_LEN);
+ buf_len = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_CAP_LEN);
+
+ buf_len = P2ROUNDUP(buf_len + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE,
+ EFX_RX_PACKED_STREAM_ALIGNMENT);
+ *next_offsetp =
+ current_offset + buf_len + EFX_RX_PACKED_STREAM_ALIGNMENT;
+
+ EFSYS_ASSERT3U(*next_offsetp, <=, buffer_length);
+ EFSYS_ASSERT3U(current_offset + *lengthp, <, *next_offsetp);
+
+ if ((*next_offsetp ^ current_offset) &
+ EFX_RX_PACKED_STREAM_MEM_PER_CREDIT)
+ rxq_state->eers_rx_packed_stream_credits++;
+
+ return (pkt_start);
+}
+
+
+#endif
+
+ __checkReturn efx_rc_t
+ef10_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ /*
+ * EALREADY is not an error, but indicates that the MC has rebooted and
+ * that the RXQ has already been destroyed. Callers need to know that
+ * the RXQ flush has completed to avoid waiting until timeout for a
+ * flush done event that will not be delivered.
+ */
+ if (rc != EALREADY)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(erp))
+ /* FIXME */
+}
+
+ __checkReturn efx_rc_t
+ef10_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in const efx_rxq_type_data_t *type_data,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+ boolean_t disable_scatter;
+ boolean_t want_inner_classes;
+ unsigned int ps_buf_size;
+ uint32_t es_bufs_per_desc = 0;
+ uint32_t es_max_dma_len = 0;
+ uint32_t es_buf_stride = 0;
+ uint32_t hol_block_timeout = 0;
+
+ _NOTE(ARGUNUSED(id, erp, type_data))
+
+ EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
+ EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
+
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
+
+ if (!ISP2(ndescs) ||
+ (ndescs < EFX_RXQ_MINNDESCS) || (ndescs > EFX_RXQ_MAXNDESCS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_rxq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ ps_buf_size = 0;
+ break;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ case EFX_RXQ_TYPE_PACKED_STREAM:
+ switch (type_data->ertd_packed_stream.eps_buf_size) {
+ case EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M;
+ break;
+ case EFX_RXQ_PACKED_STREAM_BUF_SIZE_512K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K;
+ break;
+ case EFX_RXQ_PACKED_STREAM_BUF_SIZE_256K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K;
+ break;
+ case EFX_RXQ_PACKED_STREAM_BUF_SIZE_128K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K;
+ break;
+ case EFX_RXQ_PACKED_STREAM_BUF_SIZE_64K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail3;
+ }
+ break;
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+ case EFX_RXQ_TYPE_ES_SUPER_BUFFER:
+ ps_buf_size = 0;
+ es_bufs_per_desc =
+ type_data->ertd_es_super_buffer.eessb_bufs_per_desc;
+ es_max_dma_len =
+ type_data->ertd_es_super_buffer.eessb_max_dma_len;
+ es_buf_stride =
+ type_data->ertd_es_super_buffer.eessb_buf_stride;
+ hol_block_timeout =
+ type_data->ertd_es_super_buffer.eessb_hol_block_timeout;
+ break;
+#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
+ default:
+ rc = ENOTSUP;
+ goto fail4;
+ }
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ if (ps_buf_size != 0) {
+ /* Check if datapath firmware supports packed stream mode */
+ if (encp->enc_rx_packed_stream_supported == B_FALSE) {
+ rc = ENOTSUP;
+ goto fail5;
+ }
+ /* Check if packed stream allows configurable buffer sizes */
+ if ((ps_buf_size != MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M) &&
+ (encp->enc_rx_var_packed_stream_supported == B_FALSE)) {
+ rc = ENOTSUP;
+ goto fail6;
+ }
+ }
+#else /* EFSYS_OPT_RX_PACKED_STREAM */
+ EFSYS_ASSERT(ps_buf_size == 0);
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+ if (es_bufs_per_desc > 0) {
+ if (encp->enc_rx_es_super_buffer_supported == B_FALSE) {
+ rc = ENOTSUP;
+ goto fail7;
+ }
+ if (!IS_P2ALIGNED(es_max_dma_len,
+ EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) {
+ rc = EINVAL;
+ goto fail8;
+ }
+ if (!IS_P2ALIGNED(es_buf_stride,
+ EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) {
+ rc = EINVAL;
+ goto fail9;
+ }
+ }
+#else /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
+ EFSYS_ASSERT(es_bufs_per_desc == 0);
+#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
+
+ /* Scatter can only be disabled if the firmware supports doing so */
+ if (flags & EFX_RXQ_FLAG_SCATTER)
+ disable_scatter = B_FALSE;
+ else
+ disable_scatter = encp->enc_rx_disable_scatter_supported;
+
+ if (flags & EFX_RXQ_FLAG_INNER_CLASSES)
+ want_inner_classes = B_TRUE;
+ else
+ want_inner_classes = B_FALSE;
+
+ if ((rc = efx_mcdi_init_rxq(enp, ndescs, eep->ee_index, label, index,
+ esmp, disable_scatter, want_inner_classes,
+ ps_buf_size, es_bufs_per_desc, es_max_dma_len,
+ es_buf_stride, hol_block_timeout)) != 0)
+ goto fail10;
+
+ erp->er_eep = eep;
+ erp->er_label = label;
+
+ ef10_ev_rxlabel_init(eep, erp, label, type);
+
+ erp->er_ev_qstate = &erp->er_eep->ee_rxq_state[label];
+
+ return (0);
+
+fail10:
+ EFSYS_PROBE(fail10);
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
+#if EFSYS_OPT_RX_PACKED_STREAM
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+fail4:
+ EFSYS_PROBE(fail4);
+#if EFSYS_OPT_RX_PACKED_STREAM
+fail3:
+ EFSYS_PROBE(fail3);
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_evq_t *eep = erp->er_eep;
+ unsigned int label = erp->er_label;
+
+ ef10_ev_rxlabel_fini(eep, label);
+
+ EFSYS_ASSERT(enp->en_rx_qcount != 0);
+ --enp->en_rx_qcount;
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+}
+
+ void
+ef10_rx_fini(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_RX_SCALE
+ if (enp->en_rss_context_type != EFX_RX_SCALE_UNAVAILABLE)
+ (void) efx_mcdi_rss_context_free(enp, enp->en_rss_context);
+ enp->en_rss_context = 0;
+ enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE;
+#else
+ _NOTE(ARGUNUSED(enp))
+#endif /* EFSYS_OPT_RX_SCALE */
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_signed_image_layout.h b/src/spdk/dpdk/drivers/net/sfc/base/ef10_signed_image_layout.h
new file mode 100644
index 00000000..a35d1601
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_signed_image_layout.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+/* These structures define the layouts for the signed firmware image binary
+ * saved in NVRAM. The original image is in the Cryptographic message
+ * syntax (CMS) format which contains the bootable firmware binary plus the
+ * signatures. The entire image is written into NVRAM to enable the firmware
+ * to validate the signatures. However, the bootrom still requires the
+ * bootable-image to start at offset 0 of the NVRAM partition. Hence the image
+ * is parsed upfront by host utilities (sfupdate) and written into nvram as
+ * 'signed_image_chunks' described by a header.
+ *
+ * This file is used by the MC as well as host-utilities (sfupdate).
+ */
+
+
+#ifndef CI_MGMT_SIGNED_IMAGE_LAYOUT_H
+#define CI_MGMT_SIGNED_IMAGE_LAYOUT_H
+
+/* Signed image chunk type identifiers */
+enum {
+ SIGNED_IMAGE_CHUNK_CMS_HEADER, /* CMS header describing the signed data */
+ SIGNED_IMAGE_CHUNK_REFLASH_HEADER, /* Reflash header */
+ SIGNED_IMAGE_CHUNK_IMAGE, /* Bootable binary image */
+ SIGNED_IMAGE_CHUNK_REFLASH_TRAILER, /* Reflash trailer */
+ SIGNED_IMAGE_CHUNK_SIGNATURE, /* Remaining contents of the signed image,
+ * including the certifiates and signature */
+ NUM_SIGNED_IMAGE_CHUNKS,
+};
+
+/* Magic */
+#define SIGNED_IMAGE_CHUNK_HDR_MAGIC 0xEF105161 /* EF10 SIGned Image */
+
+/* Initial version definition - version 1 */
+#define SIGNED_IMAGE_CHUNK_HDR_VERSION 0x1
+
+/* Header length is 32 bytes */
+#define SIGNED_IMAGE_CHUNK_HDR_LEN 32
+/* Structure describing the header of each chunk of signed image
+ * as stored in nvram
+ */
+typedef struct signed_image_chunk_hdr_e {
+ /* Magic field to recognise a valid entry
+ * should match SIGNED_IMAGE_CHUNK_HDR_MAGIC
+ */
+ uint32_t magic;
+ /* Version number of this header */
+ uint32_t version;
+ /* Chunk type identifier */
+ uint32_t id;
+ /* Chunk offset */
+ uint32_t offset;
+ /* Chunk length */
+ uint32_t len;
+ /* Reserved for future expansion of this structure - always set to zeros */
+ uint32_t reserved[3];
+} signed_image_chunk_hdr_t;
+
+#endif /* CI_MGMT_SIGNED_IMAGE_LAYOUT_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_tlv_layout.h b/src/spdk/dpdk/drivers/net/sfc/base/ef10_tlv_layout.h
new file mode 100644
index 00000000..56cffaee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_tlv_layout.h
@@ -0,0 +1,1011 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+/*
+ * This is NOT the original source file. Do NOT edit it.
+ * To update the tlv layout, please edit the copy in
+ * the sfregistry repo and then, in that repo,
+ * "make tlv_headers" or "make export" to
+ * regenerate and export all types of headers.
+ */
+
+/* These structures define the layouts for the TLV items stored in static and
+ * dynamic configuration partitions in NVRAM for EF10 (Huntington etc.).
+ *
+ * They contain the same sort of information that was kept in the
+ * siena_mc_static_config_hdr_t and siena_mc_dynamic_config_hdr_t structures
+ * (defined in <ci/mgmt/mc_flash_layout.h> and <ci/mgmt/mc_dynamic_cfg.h>) for
+ * Siena.
+ *
+ * These are used directly by the MC and should also be usable directly on host
+ * systems which are little-endian and do not do strange things with structure
+ * padding. (Big-endian host systems will require some byte-swapping.)
+ *
+ * -----
+ *
+ * Please refer to SF-108797-SW for a general overview of the TLV partition
+ * format.
+ *
+ * -----
+ *
+ * The current tag IDs have a general structure: with the exception of the
+ * special values defined in the document, they are of the form 0xLTTTNNNN,
+ * where:
+ *
+ * - L is a location, indicating where this tag is expected to be found:
+ * 0: static configuration
+ * 1: dynamic configuration
+ * 2: firmware internal use
+ * 3: license partition
+ * 4: tsa configuration
+ *
+ * - TTT is a type, which is just a unique value. The same type value
+ * might appear in both locations, indicating a relationship between
+ * the items (e.g. static and dynamic VPD below).
+ *
+ * - NNNN is an index of some form. Some item types are per-port, some
+ * are per-PF, some are per-partition-type.
+ *
+ * -----
+ *
+ * As with the previous Siena structures, each structure here is laid out
+ * carefully: values are aligned to their natural boundary, with explicit
+ * padding fields added where necessary. (No, technically this does not
+ * absolutely guarantee portability. But, in practice, compilers are generally
+ * sensible enough not to introduce completely pointless padding, and it works
+ * well enough.)
+ */
+
+
+#ifndef CI_MGMT_TLV_LAYOUT_H
+#define CI_MGMT_TLV_LAYOUT_H
+
+
+/* ----------------------------------------------------------------------------
+ * General structure (defined by SF-108797-SW)
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* The "end" tag.
+ *
+ * (Note that this is *not* followed by length or value fields: anything after
+ * the tag itself is irrelevant.)
+ */
+
+#define TLV_TAG_END (0xEEEEEEEE)
+
+
+/* Other special reserved tag values.
+ */
+
+#define TLV_TAG_SKIP (0x00000000)
+#define TLV_TAG_INVALID (0xFFFFFFFF)
+
+
+/* TLV partition header.
+ *
+ * In a TLV partition, this must be the first item in the sequence, at offset
+ * 0.
+ */
+
+#define TLV_TAG_PARTITION_HEADER (0xEF10DA7A)
+
+struct tlv_partition_header {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t type_id;
+/* 0 indicates the default segment (always located at offset 0), while other values
+ * are for RFID-selectable presets that should immediately follow the default segment.
+ * The default segment may also have preset > 0, which means that it is a preset
+ * selected through an RFID command and copied by FW to the location at offset 0. */
+ uint16_t preset;
+ uint32_t generation;
+ uint32_t total_length;
+};
+
+
+/* TLV partition trailer.
+ *
+ * In a TLV partition, this must be the last item in the sequence, immediately
+ * preceding the TLV_TAG_END word.
+ */
+
+#define TLV_TAG_PARTITION_TRAILER (0xEF101A57)
+
+struct tlv_partition_trailer {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t generation;
+ uint32_t checksum;
+};
+
+
+/* Appendable TLV partition header.
+ *
+ * In an appendable TLV partition, this must be the first item in the sequence,
+ * at offset 0. (Note that, unlike the configuration partitions, there is no
+ * trailer before the TLV_TAG_END word.)
+ */
+
+#define TLV_TAG_APPENDABLE_PARTITION_HEADER (0xEF10ADA7)
+
+struct tlv_appendable_partition_header {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t type_id;
+ uint16_t reserved;
+};
+
+
+/* ----------------------------------------------------------------------------
+ * Configuration items
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* NIC global capabilities.
+ */
+
+#define TLV_TAG_GLOBAL_CAPABILITIES (0x00010000)
+
+struct tlv_global_capabilities {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t flags;
+};
+
+
+/* Siena-style per-port MAC address allocation.
+ *
+ * There are <count> addresses, starting at <base_address> and incrementing
+ * by adding <stride> to the low-order byte(s).
+ *
+ * (See also TLV_TAG_GLOBAL_MAC for an alternative, specifying a global pool
+ * of contiguous MAC addresses for the firmware to allocate as it sees fit.)
+ */
+
+#define TLV_TAG_PORT_MAC(port) (0x00020000 + (port))
+
+struct tlv_port_mac {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t base_address[6];
+ uint16_t reserved;
+ uint16_t count;
+ uint16_t stride;
+};
+
+
+/* Static VPD.
+ *
+ * This is the portion of VPD which is set at manufacturing time and not
+ * expected to change. It is formatted as a standard PCI VPD block. There are
+ * global and per-pf TLVs for this, the global TLV is new for Medford and is
+ * used in preference to the per-pf TLV.
+ */
+
+#define TLV_TAG_PF_STATIC_VPD(pf) (0x00030000 + (pf))
+
+struct tlv_pf_static_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+#define TLV_TAG_GLOBAL_STATIC_VPD (0x001f0000)
+
+struct tlv_global_static_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+
+/* Dynamic VPD.
+ *
+ * This is the portion of VPD which may be changed (e.g. by firmware updates).
+ * It is formatted as a standard PCI VPD block. There are global and per-pf TLVs
+ * for this, the global TLV is new for Medford and is used in preference to the
+ * per-pf TLV.
+ */
+
+#define TLV_TAG_PF_DYNAMIC_VPD(pf) (0x10030000 + (pf))
+
+struct tlv_pf_dynamic_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+#define TLV_TAG_GLOBAL_DYNAMIC_VPD (0x10200000)
+
+struct tlv_global_dynamic_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+
+/* "DBI" PCI config space changes.
+ *
+ * This is a set of edits made to the default PCI config space values before
+ * the device is allowed to enumerate. There are global and per-pf TLVs for
+ * this, the global TLV is new for Medford and is used in preference to the
+ * per-pf TLV.
+ */
+
+#define TLV_TAG_PF_DBI(pf) (0x00040000 + (pf))
+
+struct tlv_pf_dbi {
+ uint32_t tag;
+ uint32_t length;
+ struct {
+ uint16_t addr;
+ uint16_t byte_enables;
+ uint32_t value;
+ } items[];
+};
+
+
+#define TLV_TAG_GLOBAL_DBI (0x00210000)
+
+struct tlv_global_dbi {
+ uint32_t tag;
+ uint32_t length;
+ struct {
+ uint16_t addr;
+ uint16_t byte_enables;
+ uint32_t value;
+ } items[];
+};
+
+
+/* Partition subtype codes.
+ *
+ * A subtype may optionally be stored for each type of partition present in
+ * the NVRAM. For example, this may be used to allow a generic firmware update
+ * utility to select a specific variant of firmware for a specific variant of
+ * board.
+ *
+ * The description[] field is an optional string which is returned in the
+ * MC_CMD_NVRAM_METADATA response if present.
+ */
+
+#define TLV_TAG_PARTITION_SUBTYPE(type) (0x00050000 + (type))
+
+struct tlv_partition_subtype {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t subtype;
+ uint8_t description[];
+};
+
+
+/* Partition version codes.
+ *
+ * A version may optionally be stored for each type of partition present in
+ * the NVRAM. This provides a standard way of tracking the currently stored
+ * version of each of the various component images.
+ */
+
+#define TLV_TAG_PARTITION_VERSION(type) (0x10060000 + (type))
+
+struct tlv_partition_version {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t version_w;
+ uint16_t version_x;
+ uint16_t version_y;
+ uint16_t version_z;
+};
+
+/* Global PCIe configuration */
+
+#define TLV_TAG_GLOBAL_PCIE_CONFIG (0x10070000)
+
+struct tlv_pcie_config {
+ uint32_t tag;
+ uint32_t length;
+ int16_t max_pf_number; /**< Largest PF RID (lower PFs may be hidden) */
+ uint16_t pf_aper; /**< BIU aperture for PF BAR2 */
+ uint16_t vf_aper; /**< BIU aperture for VF BAR0 */
+ uint16_t int_aper; /**< BIU aperture for PF BAR4 and VF BAR2 */
+#define TLV_MAX_PF_DEFAULT (-1) /* Use FW default for largest PF RID */
+#define TLV_APER_DEFAULT (0xFFFF) /* Use FW default for a given aperture */
+};
+
+/* Per-PF configuration. Note that not all these fields are necessarily useful
+ * as the apertures are constrained by the BIU settings (the one case we do
+ * use is to make BAR2 bigger than the BIU thinks to reserve space), but we can
+ * tidy things up later */
+
+#define TLV_TAG_PF_PCIE_CONFIG(pf) (0x10080000 + (pf))
+
+struct tlv_per_pf_pcie_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t vfs_total;
+ uint8_t port_allocation;
+ uint16_t vectors_per_pf;
+ uint16_t vectors_per_vf;
+ uint8_t pf_bar0_aperture;
+ uint8_t pf_bar2_aperture;
+ uint8_t vf_bar0_aperture;
+ uint8_t vf_base;
+ uint16_t supp_pagesz;
+ uint16_t msix_vec_base;
+};
+
+
+/* Development ONLY. This is a single TLV tag for all the gubbins
+ * that can be set through the MC command-line other than the PCIe
+ * settings. This is a temporary measure. */
+#define TLV_TAG_TMP_GUBBINS (0x10090000) /* legacy symbol - do not use */
+#define TLV_TAG_TMP_GUBBINS_HUNT TLV_TAG_TMP_GUBBINS
+
+struct tlv_tmp_gubbins {
+ uint32_t tag;
+ uint32_t length;
+ /* Consumed by dpcpu.c */
+ uint64_t tx0_tags; /* Bitmap */
+ uint64_t tx1_tags; /* Bitmap */
+ uint64_t dl_tags; /* Bitmap */
+ uint32_t flags;
+#define TLV_DPCPU_TX_STRIPE (1) /* No longer used, has no effect */
+#define TLV_DPCPU_BIU_TAGS (2) /* Use BIU tag manager */
+#define TLV_DPCPU_TX0_TAGS (4) /* tx0_tags is valid */
+#define TLV_DPCPU_TX1_TAGS (8) /* tx1_tags is valid */
+#define TLV_DPCPU_DL_TAGS (16) /* dl_tags is valid */
+ /* Consumed by features.c */
+ uint32_t dut_features; /* All 1s -> leave alone */
+ int8_t with_rmon; /* 0 -> off, 1 -> on, -1 -> leave alone */
+ /* Consumed by clocks_hunt.c */
+ int8_t clk_mode; /* 0 -> off, 1 -> on, -1 -> leave alone */
+ /* No longer used, superseded by TLV_TAG_DESCRIPTOR_CACHE_CONFIG. */
+ int8_t rx_dc_size; /* -1 -> leave alone */
+ int8_t tx_dc_size;
+ int16_t num_q_allocs;
+};
+
+/* Global port configuration
+ *
+ * This is now deprecated in favour of a platform-provided default
+ * and dynamic config override via tlv_global_port_options.
+ */
+#define TLV_TAG_GLOBAL_PORT_CONFIG (0x000a0000)
+
+struct tlv_global_port_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t ports_per_core;
+ uint32_t max_port_speed;
+};
+
+
+/* Firmware options.
+ *
+ * This is intended for user-configurable selection of optional firmware
+ * features and variants.
+ *
+ * Initially, this consists only of the satellite CPU firmware variant
+ * selection, but this tag could be extended in the future (using the
+ * tag length to determine whether additional fields are present).
+ */
+
+#define TLV_TAG_FIRMWARE_OPTIONS (0x100b0000)
+
+struct tlv_firmware_options {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t firmware_variant;
+#define TLV_FIRMWARE_VARIANT_DRIVER_SELECTED (0xffffffff)
+
+/* These are the values for overriding the driver's choice; the definitions
+ * are taken from MCDI so that they don't get out of step. Include
+ * <ci/mgmt/mc_driver_pcol.h> or the equivalent from your driver's tree if
+ * you need to use these constants.
+ */
+#define TLV_FIRMWARE_VARIANT_FULL_FEATURED MC_CMD_FW_FULL_FEATURED
+#define TLV_FIRMWARE_VARIANT_LOW_LATENCY MC_CMD_FW_LOW_LATENCY
+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM MC_CMD_FW_PACKED_STREAM
+#define TLV_FIRMWARE_VARIANT_HIGH_TX_RATE MC_CMD_FW_HIGH_TX_RATE
+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM_HASH_MODE_1 \
+ MC_CMD_FW_PACKED_STREAM_HASH_MODE_1
+#define TLV_FIRMWARE_VARIANT_RULES_ENGINE MC_CMD_FW_RULES_ENGINE
+#define TLV_FIRMWARE_VARIANT_DPDK MC_CMD_FW_DPDK
+#define TLV_FIRMWARE_VARIANT_L3XUDP MC_CMD_FW_L3XUDP
+};
+
+/* Voltage settings
+ *
+ * Intended for boards with A0 silicon where the core voltage may
+ * need tweaking. Most likely set once when the pass voltage is
+ * determined. */
+
+#define TLV_TAG_0V9_SETTINGS (0x000c0000)
+
+struct tlv_0v9_settings {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t flags; /* Boards with high 0v9 settings may need active cooling */
+#define TLV_TAG_0V9_REQUIRES_FAN (1)
+ uint16_t target_voltage; /* In millivolts */
+ /* Since the limits are meant to be centred to the target (and must at least
+ * contain it) they need setting as well. */
+ uint16_t warn_low; /* In millivolts */
+ uint16_t warn_high; /* In millivolts */
+ uint16_t panic_low; /* In millivolts */
+ uint16_t panic_high; /* In millivolts */
+};
+
+
+/* Clock configuration */
+
+#define TLV_TAG_CLOCK_CONFIG (0x000d0000) /* legacy symbol - do not use */
+#define TLV_TAG_CLOCK_CONFIG_HUNT TLV_TAG_CLOCK_CONFIG
+
+struct tlv_clock_config {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t clk_sys; /* MHz */
+ uint16_t clk_dpcpu; /* MHz */
+ uint16_t clk_icore; /* MHz */
+ uint16_t clk_pcs; /* MHz */
+};
+
+#define TLV_TAG_CLOCK_CONFIG_MEDFORD (0x00100000)
+
+struct tlv_clock_config_medford {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t clk_sys; /* MHz */
+ uint16_t clk_mc; /* MHz */
+ uint16_t clk_rmon; /* MHz */
+ uint16_t clk_vswitch; /* MHz */
+ uint16_t clk_dpcpu; /* MHz */
+ uint16_t clk_pcs; /* MHz */
+};
+
+
+/* EF10-style global pool of MAC addresses.
+ *
+ * There are <count> addresses, starting at <base_address>, which are
+ * contiguous. Firmware is responsible for allocating addresses from this
+ * pool to ports / PFs as appropriate.
+ */
+
+#define TLV_TAG_GLOBAL_MAC (0x000e0000)
+
+struct tlv_global_mac {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t base_address[6];
+ uint16_t reserved1;
+ uint16_t count;
+ uint16_t reserved2;
+};
+
+#define TLV_TAG_ATB_0V9_TARGET (0x000f0000) /* legacy symbol - do not use */
+#define TLV_TAG_ATB_0V9_TARGET_HUNT TLV_TAG_ATB_0V9_TARGET
+
+/* The target value for the 0v9 power rail measured on-chip at the
+ * analogue test bus */
+struct tlv_0v9_atb_target {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t millivolts;
+ uint16_t reserved;
+};
+
+/* Factory settings for amplitude calibration of the PCIE TX serdes */
+#define TLV_TAG_TX_PCIE_AMP_CONFIG (0x00220000)
+struct tlv_pcie_tx_amp_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t quad_tx_imp2k[4];
+ uint8_t quad_tx_imp50[4];
+ uint8_t lane_amp[16];
+};
+
+
+/* Global PCIe configuration, second revision. This represents the visible PFs
+ * by a bitmap rather than having the number of the highest visible one. As such
+ * it can (for a 16-PF chip) represent a superset of what TLV_TAG_GLOBAL_PCIE_CONFIG
+ * can and it should be used in place of that tag in future (but compatibility with
+ * the old tag will be left in the firmware indefinitely). */
+
+#define TLV_TAG_GLOBAL_PCIE_CONFIG_R2 (0x10100000)
+
+struct tlv_pcie_config_r2 {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t visible_pfs; /**< Bitmap of visible PFs */
+ uint16_t pf_aper; /**< BIU aperture for PF BAR2 */
+ uint16_t vf_aper; /**< BIU aperture for VF BAR0 */
+ uint16_t int_aper; /**< BIU aperture for PF BAR4 and VF BAR2 */
+};
+
+/* Dynamic port mode.
+ *
+ * Allows selecting alternate port configuration for platforms that support it
+ * (e.g. 1x40G vs 2x10G on Milano, 1x40G vs 4x10G on Medford). This affects the
+ * number of externally visible ports (and, hence, PF to port mapping), so must
+ * be done at boot time.
+ *
+ * Port mode naming convention is
+ *
+ * [nports_on_cage0]x[port_lane_width]_[nports_on_cage1]x[port_lane_width]
+ *
+ * Port lane width determines the capabilities (speeds) of the ports, subject
+ * to architecture capabilities (e.g. 25G support) and switch bandwidth
+ * constraints:
+ * - single lane ports can do 25G/10G/1G
+ * - dual lane ports can do 50G/25G/10G/1G (with fallback to 1 lane)
+ * - quad lane ports can do 100G/40G/50G/25G/10G/1G (with fallback to 2 or 1 lanes)
+
+ * This tag supercedes tlv_global_port_config.
+ */
+
+#define TLV_TAG_GLOBAL_PORT_MODE (0x10110000)
+
+struct tlv_global_port_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t port_mode;
+#define TLV_PORT_MODE_DEFAULT (0xffffffff) /* Default for given platform */
+
+/* Huntington port modes */
+#define TLV_PORT_MODE_10G (0)
+#define TLV_PORT_MODE_40G (1)
+#define TLV_PORT_MODE_10G_10G (2)
+#define TLV_PORT_MODE_40G_40G (3)
+#define TLV_PORT_MODE_10G_10G_10G_10G (4)
+#define TLV_PORT_MODE_40G_10G_10G (6)
+#define TLV_PORT_MODE_10G_10G_40G (7)
+
+/* Medford (and later) port modes */
+#define TLV_PORT_MODE_1x1_NA (0) /* Single 10G/25G on mdi0 */
+#define TLV_PORT_MODE_1x4_NA (1) /* Single 100G/40G on mdi0 */
+#define TLV_PORT_MODE_NA_1x4 (22) /* Single 100G/40G on mdi1 */
+#define TLV_PORT_MODE_1x2_NA (10) /* Single 50G on mdi0 */
+#define TLV_PORT_MODE_NA_1x2 (11) /* Single 50G on mdi1 */
+#define TLV_PORT_MODE_1x1_1x1 (2) /* Single 10G/25G on mdi0, single 10G/25G on mdi1 */
+#define TLV_PORT_MODE_1x4_1x4 (3) /* Single 40G on mdi0, single 40G on mdi1 */
+#define TLV_PORT_MODE_2x1_2x1 (5) /* Dual 10G/25G on mdi0, dual 10G/25G on mdi1 */
+#define TLV_PORT_MODE_4x1_NA (4) /* Quad 10G/25G on mdi0 */
+#define TLV_PORT_MODE_NA_4x1 (8) /* Quad 10G/25G on mdi1 */
+#define TLV_PORT_MODE_1x4_2x1 (6) /* Single 40G on mdi0, dual 10G/25G on mdi1 */
+#define TLV_PORT_MODE_2x1_1x4 (7) /* Dual 10G/25G on mdi0, single 40G on mdi1 */
+#define TLV_PORT_MODE_1x2_1x2 (12) /* Single 50G on mdi0, single 50G on mdi1 */
+#define TLV_PORT_MODE_2x2_NA (13) /* Dual 50G on mdi0 */
+#define TLV_PORT_MODE_NA_2x2 (14) /* Dual 50G on mdi1 */
+#define TLV_PORT_MODE_1x4_1x2 (15) /* Single 40G on mdi0, single 50G on mdi1 */
+#define TLV_PORT_MODE_1x2_1x4 (16) /* Single 50G on mdi0, single 40G on mdi1 */
+#define TLV_PORT_MODE_1x2_2x1 (17) /* Single 50G on mdi0, dual 10G/25G on mdi1 */
+#define TLV_PORT_MODE_2x1_1x2 (18) /* Dual 10G/25G on mdi0, single 50G on mdi1 */
+
+/* Snapper-only Medford2 port modes.
+ * These modes are eftest only, to allow snapper explicit
+ * selection between multi-channel and LLPCS. In production,
+ * this selection is automatic and outside world should not
+ * care about LLPCS.
+ */
+#define TLV_PORT_MODE_2x1_2x1_LL (19) /* Dual 10G/25G on mdi0, dual 10G/25G on mdi1, low-latency PCS */
+#define TLV_PORT_MODE_4x1_NA_LL (20) /* Quad 10G/25G on mdi0, low-latency PCS */
+#define TLV_PORT_MODE_NA_4x1_LL (21) /* Quad 10G/25G on mdi1, low-latency PCS */
+#define TLV_PORT_MODE_1x1_NA_LL (23) /* Single 10G/25G on mdi0, low-latency PCS */
+#define TLV_PORT_MODE_1x1_1x1_LL (24) /* Single 10G/25G on mdi0, single 10G/25G on mdi1, low-latency PCS */
+#define TLV_PORT_MODE_BUG63720_DO_NOT_USE (9) /* bug63720: Do not use */
+#define TLV_PORT_MODE_MAX TLV_PORT_MODE_1x1_1x1_LL
+
+/* Deprecated Medford aliases - DO NOT USE IN NEW CODE */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5)
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4)
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8)
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9)
+
+#define TLV_PORT_MODE_MAX TLV_PORT_MODE_1x1_1x1_LL
+};
+
+/* Type of the v-switch created implicitly by the firmware */
+
+#define TLV_TAG_VSWITCH_TYPE(port) (0x10120000 + (port))
+
+struct tlv_vswitch_type {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t vswitch_type;
+#define TLV_VSWITCH_TYPE_DEFAULT (0xffffffff) /* Firmware default; equivalent to no TLV present for a given port */
+#define TLV_VSWITCH_TYPE_NONE (0)
+#define TLV_VSWITCH_TYPE_VLAN (1)
+#define TLV_VSWITCH_TYPE_VEB (2)
+#define TLV_VSWITCH_TYPE_VEPA (3)
+#define TLV_VSWITCH_TYPE_MUX (4)
+#define TLV_VSWITCH_TYPE_TEST (5)
+};
+
+/* A VLAN tag for the v-port created implicitly by the firmware */
+
+#define TLV_TAG_VPORT_VLAN_TAG(pf) (0x10130000 + (pf))
+
+struct tlv_vport_vlan_tag {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t vlan_tag;
+#define TLV_VPORT_NO_VLAN_TAG (0xFFFFFFFF) /* Default in the absence of TLV for a given PF */
+};
+
+/* Offset to be applied to the 0v9 setting, wherever it came from */
+
+#define TLV_TAG_ATB_0V9_OFFSET (0x10140000)
+
+struct tlv_0v9_atb_offset {
+ uint32_t tag;
+ uint32_t length;
+ int16_t offset_millivolts;
+ uint16_t reserved;
+};
+
+/* A privilege mask given on reset to all non-admin PCIe functions (that is other than first-PF-per-port).
+ * The meaning of particular bits is defined in mcdi_ef10.yml under MC_CMD_PRIVILEGE_MASK, see also bug 44583.
+ * TLV_TAG_PRIVILEGE_MASK_ADD specifies bits that should be added (ORed) to firmware default while
+ * TLV_TAG_PRIVILEGE_MASK_REM specifies bits that should be removed (ANDed) from firmware default:
+ * Initial_privilege_mask = (firmware_default_mask | privilege_mask_add) & ~privilege_mask_rem */
+
+#define TLV_TAG_PRIVILEGE_MASK (0x10150000) /* legacy symbol - do not use */
+
+struct tlv_privilege_mask { /* legacy structure - do not use */
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask;
+};
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD (0x10150000)
+
+struct tlv_privilege_mask_add {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+#define TLV_TAG_PRIVILEGE_MASK_REM (0x10160000)
+
+struct tlv_privilege_mask_rem {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_rem;
+};
+
+/* Additional privileges given to all PFs.
+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD_ALL_PFS (0x10190000)
+
+struct tlv_privilege_mask_add_all_pfs {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+/* Additional privileges given to a selected PF.
+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD_SINGLE_PF(pf) (0x101A0000 + (pf))
+
+struct tlv_privilege_mask_add_single_pf {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+/* Turning on/off the PFIOV mode.
+ * This tag only takes effect if TLV_TAG_VSWITCH_TYPE is missing or set to DEFAULT. */
+
+#define TLV_TAG_PFIOV(port) (0x10170000 + (port))
+
+struct tlv_pfiov {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t pfiov;
+#define TLV_PFIOV_OFF (0) /* Default */
+#define TLV_PFIOV_ON (1)
+};
+
+/* Multicast filter chaining mode selection.
+ *
+ * When enabled, multicast packets are delivered to all recipients of all
+ * matching multicast filters, with the exception that IP multicast filters
+ * will steal traffic from MAC multicast filters on a per-function basis.
+ * (New behaviour.)
+ *
+ * When disabled, multicast packets will always be delivered only to the
+ * recipients of the highest priority matching multicast filter.
+ * (Legacy behaviour.)
+ *
+ * The DEFAULT mode (which is the same as the tag not being present at all)
+ * is equivalent to ENABLED in production builds, and DISABLED in eftest
+ * builds.
+ *
+ * This option is intended to provide run-time control over this feature
+ * while it is being stabilised and may be withdrawn at some point in the
+ * future; the new behaviour is intended to become the standard behaviour.
+ */
+
+#define TLV_TAG_MCAST_FILTER_CHAINING (0x10180000)
+
+struct tlv_mcast_filter_chaining {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+#define TLV_MCAST_FILTER_CHAINING_DEFAULT (0xffffffff)
+#define TLV_MCAST_FILTER_CHAINING_DISABLED (0)
+#define TLV_MCAST_FILTER_CHAINING_ENABLED (1)
+};
+
+/* Pacer rate limit per PF */
+#define TLV_TAG_RATE_LIMIT(pf) (0x101b0000 + (pf))
+
+struct tlv_rate_limit {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t rate_mbps;
+};
+
+/* OCSD Enable/Disable
+ *
+ * This setting allows OCSD to be disabled. This is a requirement for HP
+ * servers to support PCI passthrough for virtualization.
+ *
+ * The DEFAULT mode (which is the same as the tag not being present) is
+ * equivalent to ENABLED.
+ *
+ * This option is not used by the MCFW, and is entirely handled by the various
+ * drivers that support OCSD, by reading the setting before they attempt
+ * to enable OCSD.
+ *
+ * bit0: OCSD Disabled/Enabled
+ */
+
+#define TLV_TAG_OCSD (0x101C0000)
+
+struct tlv_ocsd {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+#define TLV_OCSD_DISABLED 0
+#define TLV_OCSD_ENABLED 1 /* Default */
+};
+
+/* Descriptor cache config.
+ *
+ * Sets the sizes of the TX and RX descriptor caches as a power of 2. It also
+ * sets the total number of VIs. When the number of VIs is reduced VIs are taken
+ * away from the highest numbered port first, so a vi_count of 1024 means 1024
+ * VIs on the first port and 0 on the second (on a Torino).
+ */
+
+#define TLV_TAG_DESCRIPTOR_CACHE_CONFIG (0x101d0000)
+
+struct tlv_descriptor_cache_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t rx_desc_cache_size;
+ uint8_t tx_desc_cache_size;
+ uint16_t vi_count;
+};
+#define TLV_DESC_CACHE_DEFAULT (0xff)
+#define TLV_VI_COUNT_DEFAULT (0xffff)
+
+/* RX event merging config (read batching).
+ *
+ * Sets the global maximum number of events for the merging bins, and the
+ * global timeout configuration for the bins.
+ */
+
+#define TLV_TAG_RX_EVENT_MERGING_CONFIG (0x101e0000)
+
+struct tlv_rx_event_merging_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t max_events;
+#define TLV_RX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)
+ uint32_t timeout_ns;
+};
+#define TLV_RX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff)
+#define TLV_RX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff)
+
+#define TLV_TAG_PCIE_LINK_SETTINGS (0x101f0000)
+struct tlv_pcie_link_settings {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t gen; /* Target PCIe generation: 1, 2, 3 */
+ uint16_t width; /* Number of lanes */
+};
+
+/* TX event merging config.
+ *
+ * Sets the global maximum number of events for the merging bins, and the
+ * global timeout configuration for the bins, and the global timeout for
+ * empty queues.
+ */
+#define TLV_TAG_TX_EVENT_MERGING_CONFIG (0x10210000)
+struct tlv_tx_event_merging_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t max_events;
+#define TLV_TX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)
+ uint32_t timeout_ns;
+ uint32_t qempty_timeout_ns; /* Medford only */
+};
+#define TLV_TX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff)
+#define TLV_TX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff)
+#define TLV_TX_EVENT_MERGING_QEMPTY_TIMEOUT_NS_DEFAULT (0xffffffff)
+
+#define TLV_TAG_LICENSE (0x30800000)
+
+typedef struct tlv_license {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t data[];
+} tlv_license_t;
+
+/* TSA NIC IP address configuration (DEPRECATED)
+ *
+ * Sets the TSA NIC IP address statically via configuration tool or dynamically
+ * via DHCP via snooping based on the mode selection (0=Static, 1=DHCP, 2=Snoop)
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000) /* DEPRECATED */
+
+#define TLV_TSAN_IP_MODE_STATIC (0)
+#define TLV_TSAN_IP_MODE_DHCP (1)
+#define TLV_TSAN_IP_MODE_SNOOP (2)
+typedef struct tlv_tsan_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+ uint32_t ip;
+ uint32_t netmask;
+ uint32_t gateway;
+ uint32_t port;
+ uint32_t bind_retry; /* DEPRECATED */
+ uint32_t bind_bkout; /* DEPRECATED */
+} tlv_tsan_config_t;
+
+/* TSA Controller IP address configuration (DEPRECATED)
+ *
+ * Sets the TSA Controller IP address statically via configuration tool
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000) /* DEPRECATED */
+
+#define TLV_MAX_TSACS (4)
+typedef struct tlv_tsac_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t num_tsacs;
+ uint32_t ip[TLV_MAX_TSACS];
+ uint32_t port[TLV_MAX_TSACS];
+} tlv_tsac_config_t;
+
+/* Binding ticket (DEPRECATED)
+ *
+ * Sets the TSA NIC binding ticket used for binding process between the TSA NIC
+ * and the TSA Controller
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_BINDING_TICKET (0x10240000) /* DEPRECATED */
+
+typedef struct tlv_binding_ticket {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_binding_ticket_t;
+
+/* Solarflare private key (DEPRECATED)
+ *
+ * Sets the Solareflare private key used for signing during the binding process
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_PIK_SF (0x10250000) /* DEPRECATED */
+
+typedef struct tlv_pik_sf {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_pik_sf_t;
+
+/* CA root certificate (DEPRECATED)
+ *
+ * Sets the CA root certificate used for TSA Controller verfication during
+ * TLS connection setup between the TSA NIC and the TSA Controller
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000) /* DEPRECATED */
+
+typedef struct tlv_ca_root_cert {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_ca_root_cert_t;
+
+/* Tx vFIFO Low latency configuration
+ *
+ * To keep the desired booting behaviour for the switch, it just requires to
+ * know if the low latency mode is enabled.
+ */
+
+#define TLV_TAG_TX_VFIFO_ULL_MODE (0x10270000)
+struct tlv_tx_vfifo_ull_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_TX_VFIFO_ULL_MODE_DEFAULT 0
+};
+
+/* BIU mode
+ *
+ * Medford2 tag for selecting VI window decode (see values below)
+ */
+#define TLV_TAG_BIU_VI_WINDOW_MODE (0x10280000)
+struct tlv_biu_vi_window_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_BIU_VI_WINDOW_MODE_8K 0 /* 8k per VI, CTPIO not mapped, medford/hunt compatible */
+#define TLV_BIU_VI_WINDOW_MODE_16K 1 /* 16k per VI, CTPIO mapped */
+#define TLV_BIU_VI_WINDOW_MODE_64K 2 /* 64k per VI, CTPIO mapped, POWER-friendly */
+};
+
+/* FastPD mode
+ *
+ * Medford2 tag for configuring the FastPD mode (see values below)
+ */
+#define TLV_TAG_FASTPD_MODE(port) (0x10290000 + (port))
+struct tlv_fastpd_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_FASTPD_MODE_SOFT_ALL 0 /* All packets to the SoftPD */
+#define TLV_FASTPD_MODE_FAST_ALL 1 /* All packets to the FastPD */
+#define TLV_FASTPD_MODE_FAST_SUPPORTED 2 /* Supported packet types to the FastPD; everything else to the SoftPD */
+};
+
+/* L3xUDP datapath firmware UDP port configuration
+ *
+ * Sets the list of UDP ports on which the encapsulation will be handled.
+ * The number of ports in the list is implied by the length of the TLV item.
+ */
+#define TLV_TAG_L3XUDP_PORTS (0x102a0000)
+struct tlv_l3xudp_ports {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t ports[];
+#define TLV_TAG_L3XUDP_PORTS_MAX_NUM_PORTS 16
+};
+
+#endif /* CI_MGMT_TLV_LAYOUT_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_tx.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_tx.c
new file mode 100644
index 00000000..7d27f710
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_tx.c
@@ -0,0 +1,778 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+#if EFSYS_OPT_QSTATS
+#define EFX_TX_QSTAT_INCR(_etp, _stat) \
+ do { \
+ (_etp)->et_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_TX_QSTAT_INCR(_etp, _stat)
+#endif
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_txq(
+ __in efx_nic_t *enp,
+ __in uint32_t ndescs,
+ __in uint32_t target_evq,
+ __in uint32_t label,
+ __in uint32_t instance,
+ __in uint16_t flags,
+ __in efsys_mem_t *esmp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
+ MC_CMD_INIT_TXQ_OUT_LEN)];
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
+ EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
+
+ if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_TXQ_SIZE(ndescs))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ npages = EFX_TXQ_NBUFS(ndescs);
+ if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_TXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
+
+ MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS,
+ INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
+ INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN,
+ (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0,
+ INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN,
+ (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
+ INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
+ INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
+ INIT_TXQ_IN_CRC_MODE, 0,
+ INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
+
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_txq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
+ MC_CMD_FINI_TXQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_TXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ /*
+ * EALREADY is not an error, but indicates that the MC has rebooted and
+ * that the TXQ has already been destroyed.
+ */
+ if (rc != EALREADY)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_init(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+}
+
+ void
+ef10_tx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ uint16_t inner_csum;
+ efx_desc_t desc;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(id))
+
+ inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP;
+ if (((flags & inner_csum) != 0) &&
+ (encp->enc_tunnel_encapsulations_supported == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_init_txq(enp, ndescs, eep->ee_index, label, index,
+ flags, esmp)) != 0)
+ goto fail2;
+
+ /*
+ * A previous user of this TX queue may have written a descriptor to the
+ * TX push collector, but not pushed the doorbell (e.g. after a crash).
+ * The next doorbell write would then push the stale descriptor.
+ *
+ * Ensure the (per network port) TX push collector is cleared by writing
+ * a no-op TX option descriptor. See bug29981 for details.
+ */
+ *addedp = 1;
+ ef10_tx_qdesc_checksum_create(etp, flags, &desc);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq);
+ ef10_tx_qpush(etp, *addedp, 0);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp))
+ /* FIXME */
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_enable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_piobuf_handle_t handle;
+ efx_rc_t rc;
+
+ if (etp->et_pio_size != 0) {
+ rc = EALREADY;
+ goto fail1;
+ }
+
+ /* Sub-allocate a PIO block from a piobuf */
+ if ((rc = ef10_nic_pio_alloc(enp,
+ &etp->et_pio_bufnum,
+ &handle,
+ &etp->et_pio_blknum,
+ &etp->et_pio_offset,
+ &etp->et_pio_size)) != 0) {
+ goto fail2;
+ }
+ EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
+
+ /* Link the piobuf to this TXQ */
+ if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
+ goto fail3;
+ }
+
+ /*
+ * et_pio_offset is the offset of the sub-allocated block within the
+ * hardware PIO buffer. It is used as the buffer address in the PIO
+ * option descriptor.
+ *
+ * et_pio_write_offset is the offset of the sub-allocated block from the
+ * start of the write-combined memory mapping, and is used for writing
+ * data into the PIO buffer.
+ */
+ etp->et_pio_write_offset =
+ (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
+ ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+ ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+fail2:
+ EFSYS_PROBE(fail2);
+ etp->et_pio_size = 0;
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qpio_disable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+
+ if (etp->et_pio_size != 0) {
+ /* Unlink the piobuf from this TXQ */
+ ef10_nic_pio_unlink(enp, etp->et_index);
+
+ /* Free the sub-allocated PIO block */
+ ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+ etp->et_pio_size = 0;
+ etp->et_pio_write_offset = 0;
+ }
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(length) uint8_t *buffer,
+ __in size_t length,
+ __in size_t offset)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efsys_bar_t *esbp = enp->en_esbp;
+ uint32_t write_offset;
+ uint32_t write_offset_limit;
+ efx_qword_t *eqp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
+
+ if (etp->et_pio_size == 0) {
+ rc = ENOENT;
+ goto fail1;
+ }
+ if (offset + length > etp->et_pio_size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ /*
+ * Writes to PIO buffers must be 64 bit aligned, and multiples of
+ * 64 bits.
+ */
+ write_offset = etp->et_pio_write_offset + offset;
+ write_offset_limit = write_offset + length;
+ eqp = (efx_qword_t *)buffer;
+ while (write_offset < write_offset_limit) {
+ EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
+ eqp++;
+ write_offset += sizeof (efx_qword_t);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_qword_t pio_desc;
+ unsigned int id;
+ size_t offset;
+ unsigned int added = *addedp;
+ efx_rc_t rc;
+
+
+ if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if (etp->et_pio_size == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
+ unsigned int, id, uint32_t, etp->et_pio_offset,
+ size_t, pkt_length);
+
+ EFX_POPULATE_QWORD_5(pio_desc,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, 1,
+ ESF_DZ_TX_PIO_CONT, 0,
+ ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
+ ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
+
+ *addedp = added;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_buffer_t *eb,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < ndescs; i++) {
+ efx_buffer_t *ebp = &eb[i];
+ efsys_dma_addr_t addr = ebp->eb_addr;
+ size_t size = ebp->eb_size;
+ boolean_t eop = ebp->eb_eop;
+ unsigned int id;
+ size_t offset;
+ efx_qword_t qword;
+
+ /* No limitations on boundary crossing */
+ EFSYS_ASSERT(size <=
+ etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
+ unsigned int, id, efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_5(qword,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
+ ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
+ ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
+ }
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * This improves performance by, when possible, pushing a TX descriptor at the
+ * same time as the doorbell. The descriptor must be added to the TXQ, so that
+ * can be used if the hardware decides not to use the pushed descriptor.
+ */
+ void
+ef10_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ unsigned int wptr;
+ unsigned int id;
+ size_t offset;
+ efx_qword_t desc;
+ efx_oword_t oword;
+
+ wptr = added & etp->et_mask;
+ id = pushed & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
+
+ /*
+ * Bug 65776: TSO option descriptors cannot be pushed if pacer bypass is
+ * enabled on the event queue this transmit queue is attached to.
+ *
+ * To ensure the code is safe, it is easiest to simply test the type of
+ * the descriptor to push, and only push it is if it not a TSO option
+ * descriptor.
+ */
+ if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) ||
+ (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) !=
+ ESE_DZ_TX_OPTION_DESC_TSO)) {
+ /* Push the descriptor and update the wptr. */
+ EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr,
+ ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
+ ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
+
+ /* Ensure ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
+ wptr, id);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_VI_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG,
+ etp->et_index, &oword);
+ } else {
+ efx_dword_t dword;
+
+ /*
+ * Only update the wptr. This is signalled to the hardware by
+ * only writing one DWORD of the doorbell register.
+ */
+ EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr);
+ dword = oword.eo_dword[2];
+
+ /* Ensure ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
+ wptr, id);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_VI_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG,
+ etp->et_index, &dword, B_FALSE);
+ }
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_desc_t *ed,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < ndescs; i++) {
+ efx_desc_t *edp = &ed[i];
+ unsigned int id;
+ size_t offset;
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_desc_t);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
+ }
+
+ EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
+ unsigned int, added, unsigned int, ndescs);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ _NOTE(ARGUNUSED(etp))
+
+ /* No limitations on boundary crossing */
+ EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
+
+ EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
+ efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_5(edp->ed_eq,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
+ ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
+ ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
+}
+
+ void
+ef10_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp)
+{
+ _NOTE(ARGUNUSED(etp))
+
+ EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
+ uint16_t, ipv4_id, uint32_t, tcp_seq,
+ uint8_t, tcp_flags);
+
+ EFX_POPULATE_QWORD_5(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+ ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+}
+
+ void
+ef10_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint16_t outer_ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count)
+{
+ _NOTE(ARGUNUSED(etp, count))
+
+ EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
+ uint16_t, ipv4_id, uint32_t, tcp_seq,
+ uint16_t, tcp_mss);
+
+ EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
+
+ EFX_POPULATE_QWORD_6(edp[0].ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
+ ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+ EFX_POPULATE_QWORD_4(edp[1].ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
+ ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
+}
+
+ void
+ef10_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp)
+{
+ _NOTE(ARGUNUSED(etp))
+
+ EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
+ uint16_t, tci);
+
+ EFX_POPULATE_QWORD_4(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_VLAN,
+ ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
+ ESF_DZ_TX_VLAN_TAG1, tci);
+}
+
+ void
+ef10_tx_qdesc_checksum_create(
+ __in efx_txq_t *etp,
+ __in uint16_t flags,
+ __out efx_desc_t *edp)
+{
+ _NOTE(ARGUNUSED(etp));
+
+ EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index,
+ uint32_t, flags);
+
+ EFX_POPULATE_QWORD_6(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+ ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
+ ESF_DZ_TX_OPTION_IP_CSUM,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0,
+ ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM,
+ (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
+ ESF_DZ_TX_OPTION_INNER_IP_CSUM,
+ (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_rc_t rc;
+
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp, ns))
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ /* FIXME */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ /*
+ * EALREADY is not an error, but indicates that the MC has rebooted and
+ * that the TXQ has already been destroyed. Callers need to know that
+ * the TXQ flush has completed to avoid waiting until timeout for a
+ * flush done event that will not be delivered.
+ */
+ if (rc != EALREADY)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp))
+ /* FIXME */
+}
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < TX_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, etp->et_stat[id]);
+ etp->et_stat[id] = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_QSTATS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_vpd.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_vpd.c
new file mode 100644
index 00000000..097fe1d4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_vpd.c
@@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_VPD
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+#include "ef10_tlv_layout.h"
+
+ __checkReturn efx_rc_t
+ef10_vpd_init(
+ __in efx_nic_t *enp)
+{
+ caddr_t svpd;
+ size_t svpd_size;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_STATIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_STATIC_VPD(pci_pf);
+ }
+
+ /*
+ * The VPD interface exposes VPD resources from the combined static and
+ * dynamic VPD storage. As the static VPD configuration should *never*
+ * change, we can cache it.
+ */
+ svpd = NULL;
+ svpd_size = 0;
+ rc = ef10_nvram_partn_read_tlv(enp,
+ NVRAM_PARTITION_TYPE_STATIC_CONFIG,
+ tag, &svpd, &svpd_size);
+ if (rc != 0) {
+ if (rc == EACCES) {
+ /* Unprivileged functions cannot access VPD */
+ goto out;
+ }
+ goto fail1;
+ }
+
+ if (svpd != NULL && svpd_size > 0) {
+ if ((rc = efx_vpd_hunk_verify(svpd, svpd_size, NULL)) != 0)
+ goto fail2;
+ }
+
+ enp->en_arch.ef10.ena_svpd = svpd;
+ enp->en_arch.ef10.ena_svpd_length = svpd_size;
+
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, svpd_size, svpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ /*
+ * This function returns the total size the user should allocate
+ * for all VPD operations. We've already cached the static vpd,
+ * so we just need to return an upper bound on the dynamic vpd,
+ * which is the size of the DYNAMIC_CONFIG partition.
+ */
+ if ((rc = efx_mcdi_nvram_info(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ sizep, NULL, NULL, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ caddr_t dvpd;
+ size_t dvpd_size;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_DYNAMIC_VPD(pci_pf);
+ }
+
+ if ((rc = ef10_nvram_partn_read_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ tag, &dvpd, &dvpd_size)) != 0)
+ goto fail1;
+
+ if (dvpd_size > size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+ if (dvpd != NULL)
+ memcpy(data, dvpd, dvpd_size);
+
+ /* Pad data with all-1s, consistent with update operations */
+ memset(data + dvpd_size, 0xff, size - dvpd_size);
+
+ if (dvpd != NULL)
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ if (dvpd != NULL)
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_tag_t stag;
+ efx_vpd_tag_t dtag;
+ efx_vpd_keyword_t skey;
+ efx_vpd_keyword_t dkey;
+ unsigned int scont;
+ unsigned int dcont;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ /*
+ * Strictly you could take the view that dynamic vpd is optional.
+ * Instead, to conform more closely to the read/verify/reinit()
+ * paradigm, we require dynamic vpd. ef10_vpd_reinit() will
+ * reinitialize it as required.
+ */
+ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0)
+ goto fail1;
+
+ /*
+ * Verify that there is no duplication between the static and
+ * dynamic cfg sectors.
+ */
+ if (enp->en_arch.ef10.ena_svpd_length == 0)
+ goto done;
+
+ dcont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(data, size, &dtag,
+ &dkey, NULL, NULL, &dcont)) != 0)
+ goto fail2;
+ if (dcont == 0)
+ break;
+
+ /*
+ * Skip the RV keyword. It should be present in both the static
+ * and dynamic cfg sectors.
+ */
+ if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V'))
+ continue;
+
+ scont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(
+ enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, &stag, &skey,
+ NULL, NULL, &scont)) != 0)
+ goto fail3;
+ if (scont == 0)
+ break;
+
+ if (stag == dtag && skey == dkey) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ boolean_t wantpid;
+ efx_rc_t rc;
+
+ /*
+ * Only create an ID string if the dynamic cfg doesn't have one
+ */
+ if (enp->en_arch.ef10.ena_svpd_length == 0)
+ wantpid = B_TRUE;
+ else {
+ unsigned int offset;
+ uint8_t length;
+
+ rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length,
+ EFX_VPD_ID, 0, &offset, &length);
+ if (rc == 0)
+ wantpid = B_FALSE;
+ else if (rc == ENOENT)
+ wantpid = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ unsigned int offset;
+ uint8_t length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ /* Attempt to satisfy the request from svpd first */
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ if ((rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value,
+ enp->en_arch.ef10.ena_svpd + offset, length);
+ return (0);
+ } else if (rc != ENOENT)
+ goto fail1;
+ }
+
+ /* And then from the provided data buffer */
+ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+ goto fail2;
+ }
+
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value, data + offset, length);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ /* If the provided (tag,keyword) exists in svpd, then it is readonly */
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ unsigned int offset;
+ uint8_t length;
+
+ if ((rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ rc = EACCES;
+ goto fail1;
+ }
+ }
+
+ if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ _NOTE(ARGUNUSED(enp, data, size, evvp, contp))
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t vpd_length;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_DYNAMIC_VPD(pci_pf);
+ }
+
+ /* Determine total length of new dynamic VPD */
+ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0)
+ goto fail1;
+
+ /* Store new dynamic VPD in all segments in DYNAMIC_CONFIG partition */
+ if ((rc = ef10_nvram_partn_write_segment_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ tag, data, vpd_length, B_TRUE)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
+
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip, enp->en_arch.ef10.ena_svpd_length,
+ enp->en_arch.ef10.ena_svpd);
+
+ enp->en_arch.ef10.ena_svpd = NULL;
+ enp->en_arch.ef10.ena_svpd_length = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx.h b/src/spdk/dpdk/drivers/net/sfc/base/efx.h
new file mode 100644
index 00000000..5108b9b1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx.h
@@ -0,0 +1,3064 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2006-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_EFX_H
+#define _SYS_EFX_H
+
+#include "efsys.h"
+#include "efx_check.h"
+#include "efx_phy_ids.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFX_STATIC_ASSERT(_cond) \
+ ((void)sizeof (char[(_cond) ? 1 : -1]))
+
+#define EFX_ARRAY_SIZE(_array) \
+ (sizeof (_array) / sizeof ((_array)[0]))
+
+#define EFX_FIELD_OFFSET(_type, _field) \
+ ((size_t)&(((_type *)0)->_field))
+
+/* The macro expands divider twice */
+#define EFX_DIV_ROUND_UP(_n, _d) (((_n) + (_d) - 1) / (_d))
+
+/* Return codes */
+
+typedef __success(return == 0) int efx_rc_t;
+
+
+/* Chip families */
+
+typedef enum efx_family_e {
+ EFX_FAMILY_INVALID,
+ EFX_FAMILY_FALCON, /* Obsolete and not supported */
+ EFX_FAMILY_SIENA,
+ EFX_FAMILY_HUNTINGTON,
+ EFX_FAMILY_MEDFORD,
+ EFX_FAMILY_MEDFORD2,
+ EFX_FAMILY_NTYPES
+} efx_family_t;
+
+extern __checkReturn efx_rc_t
+efx_family(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __out efx_family_t *efp,
+ __out unsigned int *membarp);
+
+
+#define EFX_PCI_VENID_SFC 0x1924
+
+#define EFX_PCI_DEVID_FALCON 0x0710 /* SFC4000 */
+
+#define EFX_PCI_DEVID_BETHPAGE 0x0803 /* SFC9020 */
+#define EFX_PCI_DEVID_SIENA 0x0813 /* SFL9021 */
+#define EFX_PCI_DEVID_SIENA_F1_UNINIT 0x0810
+
+#define EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT 0x0901
+#define EFX_PCI_DEVID_FARMINGDALE 0x0903 /* SFC9120 PF */
+#define EFX_PCI_DEVID_GREENPORT 0x0923 /* SFC9140 PF */
+
+#define EFX_PCI_DEVID_FARMINGDALE_VF 0x1903 /* SFC9120 VF */
+#define EFX_PCI_DEVID_GREENPORT_VF 0x1923 /* SFC9140 VF */
+
+#define EFX_PCI_DEVID_MEDFORD_PF_UNINIT 0x0913
+#define EFX_PCI_DEVID_MEDFORD 0x0A03 /* SFC9240 PF */
+#define EFX_PCI_DEVID_MEDFORD_VF 0x1A03 /* SFC9240 VF */
+
+#define EFX_PCI_DEVID_MEDFORD2_PF_UNINIT 0x0B13
+#define EFX_PCI_DEVID_MEDFORD2 0x0B03 /* SFC9250 PF */
+#define EFX_PCI_DEVID_MEDFORD2_VF 0x1B03 /* SFC9250 VF */
+
+
+#define EFX_MEM_BAR_SIENA 2
+
+#define EFX_MEM_BAR_HUNTINGTON_PF 2
+#define EFX_MEM_BAR_HUNTINGTON_VF 0
+
+#define EFX_MEM_BAR_MEDFORD_PF 2
+#define EFX_MEM_BAR_MEDFORD_VF 0
+
+#define EFX_MEM_BAR_MEDFORD2 0
+
+
+/* Error codes */
+
+enum {
+ EFX_ERR_INVALID,
+ EFX_ERR_SRAM_OOB,
+ EFX_ERR_BUFID_DC_OOB,
+ EFX_ERR_MEM_PERR,
+ EFX_ERR_RBUF_OWN,
+ EFX_ERR_TBUF_OWN,
+ EFX_ERR_RDESQ_OWN,
+ EFX_ERR_TDESQ_OWN,
+ EFX_ERR_EVQ_OWN,
+ EFX_ERR_EVFF_OFLO,
+ EFX_ERR_ILL_ADDR,
+ EFX_ERR_SRAM_PERR,
+ EFX_ERR_NCODES
+};
+
+/* Calculate the IEEE 802.3 CRC32 of a MAC addr */
+extern __checkReturn uint32_t
+efx_crc32_calculate(
+ __in uint32_t crc_init,
+ __in_ecount(length) uint8_t const *input,
+ __in int length);
+
+
+/* Type prototypes */
+
+typedef struct efx_rxq_s efx_rxq_t;
+
+/* NIC */
+
+typedef struct efx_nic_s efx_nic_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_create(
+ __in efx_family_t family,
+ __in efsys_identifier_t *esip,
+ __in efsys_bar_t *esbp,
+ __in efsys_lock_t *eslp,
+ __deref_out efx_nic_t **enpp);
+
+/* EFX_FW_VARIANT codes map one to one on MC_CMD_FW codes */
+typedef enum efx_fw_variant_e {
+ EFX_FW_VARIANT_FULL_FEATURED,
+ EFX_FW_VARIANT_LOW_LATENCY,
+ EFX_FW_VARIANT_PACKED_STREAM,
+ EFX_FW_VARIANT_HIGH_TX_RATE,
+ EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1,
+ EFX_FW_VARIANT_RULES_ENGINE,
+ EFX_FW_VARIANT_DPDK,
+ EFX_FW_VARIANT_DONT_CARE = 0xffffffff
+} efx_fw_variant_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_probe(
+ __in efx_nic_t *enp,
+ __in efx_fw_variant_t efv);
+
+extern __checkReturn efx_rc_t
+efx_nic_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_nic_reset(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+efx_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+efx_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_unprobe(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_destroy(
+ __in efx_nic_t *enp);
+
+#define EFX_PCIE_LINK_SPEED_GEN1 1
+#define EFX_PCIE_LINK_SPEED_GEN2 2
+#define EFX_PCIE_LINK_SPEED_GEN3 3
+
+typedef enum efx_pcie_link_performance_e {
+ EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH,
+ EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH,
+ EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY,
+ EFX_PCIE_LINK_PERFORMANCE_OPTIMAL
+} efx_pcie_link_performance_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_calculate_pcie_link_bandwidth(
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out uint32_t *bandwidth_mbpsp);
+
+extern __checkReturn efx_rc_t
+efx_nic_check_pcie_link_speed(
+ __in efx_nic_t *enp,
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out efx_pcie_link_performance_t *resultp);
+
+#if EFSYS_OPT_MCDI
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+/* Huntington and Medford require MCDIv2 commands */
+#define WITH_MCDI_V2 1
+#endif
+
+typedef struct efx_mcdi_req_s efx_mcdi_req_t;
+
+typedef enum efx_mcdi_exception_e {
+ EFX_MCDI_EXCEPTION_MC_REBOOT,
+ EFX_MCDI_EXCEPTION_MC_BADASSERT,
+} efx_mcdi_exception_t;
+
+#if EFSYS_OPT_MCDI_LOGGING
+typedef enum efx_log_msg_e {
+ EFX_LOG_INVALID,
+ EFX_LOG_MCDI_REQUEST,
+ EFX_LOG_MCDI_RESPONSE,
+} efx_log_msg_t;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+typedef struct efx_mcdi_transport_s {
+ void *emt_context;
+ efsys_mem_t *emt_dma_mem;
+ void (*emt_execute)(void *, efx_mcdi_req_t *);
+ void (*emt_ev_cpl)(void *);
+ void (*emt_exception)(void *, efx_mcdi_exception_t);
+#if EFSYS_OPT_MCDI_LOGGING
+ void (*emt_logger)(void *, efx_log_msg_t,
+ void *, size_t, void *, size_t);
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ void (*emt_ev_proxy_response)(void *, uint32_t, efx_rc_t);
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+} efx_mcdi_transport_t;
+
+extern __checkReturn efx_rc_t
+efx_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_reboot(
+ __in efx_nic_t *enp);
+
+ void
+efx_mcdi_new_epoch(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *usec_timeoutp);
+
+extern void
+efx_mcdi_request_start(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __in boolean_t ev_cpl);
+
+extern __checkReturn boolean_t
+efx_mcdi_request_poll(
+ __in efx_nic_t *enp);
+
+extern __checkReturn boolean_t
+efx_mcdi_request_abort(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mcdi_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* INTR */
+
+#define EFX_NINTR_SIENA 1024
+
+typedef enum efx_intr_type_e {
+ EFX_INTR_INVALID = 0,
+ EFX_INTR_LINE,
+ EFX_INTR_MESSAGE,
+ EFX_INTR_NTYPES
+} efx_intr_type_t;
+
+#define EFX_INTR_SIZE (sizeof (efx_oword_t))
+
+extern __checkReturn efx_rc_t
+efx_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+extern void
+efx_intr_enable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_disable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+#define EFX_INTR_NEVQS 32
+
+extern __checkReturn efx_rc_t
+efx_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+extern void
+efx_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *maskp);
+
+extern void
+efx_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+extern void
+efx_intr_fatal(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_fini(
+ __in efx_nic_t *enp);
+
+/* MAC */
+
+#if EFSYS_OPT_MAC_STATS
+
+/* START MKCONFIG GENERATED EfxHeaderMacBlock ea466a9bc8789994 */
+typedef enum efx_mac_stat_e {
+ EFX_MAC_RX_OCTETS,
+ EFX_MAC_RX_PKTS,
+ EFX_MAC_RX_UNICST_PKTS,
+ EFX_MAC_RX_MULTICST_PKTS,
+ EFX_MAC_RX_BRDCST_PKTS,
+ EFX_MAC_RX_PAUSE_PKTS,
+ EFX_MAC_RX_LE_64_PKTS,
+ EFX_MAC_RX_65_TO_127_PKTS,
+ EFX_MAC_RX_128_TO_255_PKTS,
+ EFX_MAC_RX_256_TO_511_PKTS,
+ EFX_MAC_RX_512_TO_1023_PKTS,
+ EFX_MAC_RX_1024_TO_15XX_PKTS,
+ EFX_MAC_RX_GE_15XX_PKTS,
+ EFX_MAC_RX_ERRORS,
+ EFX_MAC_RX_FCS_ERRORS,
+ EFX_MAC_RX_DROP_EVENTS,
+ EFX_MAC_RX_FALSE_CARRIER_ERRORS,
+ EFX_MAC_RX_SYMBOL_ERRORS,
+ EFX_MAC_RX_ALIGN_ERRORS,
+ EFX_MAC_RX_INTERNAL_ERRORS,
+ EFX_MAC_RX_JABBER_PKTS,
+ EFX_MAC_RX_LANE0_CHAR_ERR,
+ EFX_MAC_RX_LANE1_CHAR_ERR,
+ EFX_MAC_RX_LANE2_CHAR_ERR,
+ EFX_MAC_RX_LANE3_CHAR_ERR,
+ EFX_MAC_RX_LANE0_DISP_ERR,
+ EFX_MAC_RX_LANE1_DISP_ERR,
+ EFX_MAC_RX_LANE2_DISP_ERR,
+ EFX_MAC_RX_LANE3_DISP_ERR,
+ EFX_MAC_RX_MATCH_FAULT,
+ EFX_MAC_RX_NODESC_DROP_CNT,
+ EFX_MAC_TX_OCTETS,
+ EFX_MAC_TX_PKTS,
+ EFX_MAC_TX_UNICST_PKTS,
+ EFX_MAC_TX_MULTICST_PKTS,
+ EFX_MAC_TX_BRDCST_PKTS,
+ EFX_MAC_TX_PAUSE_PKTS,
+ EFX_MAC_TX_LE_64_PKTS,
+ EFX_MAC_TX_65_TO_127_PKTS,
+ EFX_MAC_TX_128_TO_255_PKTS,
+ EFX_MAC_TX_256_TO_511_PKTS,
+ EFX_MAC_TX_512_TO_1023_PKTS,
+ EFX_MAC_TX_1024_TO_15XX_PKTS,
+ EFX_MAC_TX_GE_15XX_PKTS,
+ EFX_MAC_TX_ERRORS,
+ EFX_MAC_TX_SGL_COL_PKTS,
+ EFX_MAC_TX_MULT_COL_PKTS,
+ EFX_MAC_TX_EX_COL_PKTS,
+ EFX_MAC_TX_LATE_COL_PKTS,
+ EFX_MAC_TX_DEF_PKTS,
+ EFX_MAC_TX_EX_DEF_PKTS,
+ EFX_MAC_PM_TRUNC_BB_OVERFLOW,
+ EFX_MAC_PM_DISCARD_BB_OVERFLOW,
+ EFX_MAC_PM_TRUNC_VFIFO_FULL,
+ EFX_MAC_PM_DISCARD_VFIFO_FULL,
+ EFX_MAC_PM_TRUNC_QBB,
+ EFX_MAC_PM_DISCARD_QBB,
+ EFX_MAC_PM_DISCARD_MAPPING,
+ EFX_MAC_RXDP_Q_DISABLED_PKTS,
+ EFX_MAC_RXDP_DI_DROPPED_PKTS,
+ EFX_MAC_RXDP_STREAMING_PKTS,
+ EFX_MAC_RXDP_HLB_FETCH,
+ EFX_MAC_RXDP_HLB_WAIT,
+ EFX_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_UNICAST_BYTES,
+ EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_MULTICAST_BYTES,
+ EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_BROADCAST_BYTES,
+ EFX_MAC_VADAPTER_RX_BAD_PACKETS,
+ EFX_MAC_VADAPTER_RX_BAD_BYTES,
+ EFX_MAC_VADAPTER_RX_OVERFLOW,
+ EFX_MAC_VADAPTER_TX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_UNICAST_BYTES,
+ EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_MULTICAST_BYTES,
+ EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_BROADCAST_BYTES,
+ EFX_MAC_VADAPTER_TX_BAD_PACKETS,
+ EFX_MAC_VADAPTER_TX_BAD_BYTES,
+ EFX_MAC_VADAPTER_TX_OVERFLOW,
+ EFX_MAC_FEC_UNCORRECTED_ERRORS,
+ EFX_MAC_FEC_CORRECTED_ERRORS,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE0,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE1,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE2,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3,
+ EFX_MAC_CTPIO_VI_BUSY_FALLBACK,
+ EFX_MAC_CTPIO_LONG_WRITE_SUCCESS,
+ EFX_MAC_CTPIO_MISSING_DBELL_FAIL,
+ EFX_MAC_CTPIO_OVERFLOW_FAIL,
+ EFX_MAC_CTPIO_UNDERFLOW_FAIL,
+ EFX_MAC_CTPIO_TIMEOUT_FAIL,
+ EFX_MAC_CTPIO_NONCONTIG_WR_FAIL,
+ EFX_MAC_CTPIO_FRM_CLOBBER_FAIL,
+ EFX_MAC_CTPIO_INVALID_WR_FAIL,
+ EFX_MAC_CTPIO_VI_CLOBBER_FALLBACK,
+ EFX_MAC_CTPIO_UNQUALIFIED_FALLBACK,
+ EFX_MAC_CTPIO_RUNT_FALLBACK,
+ EFX_MAC_CTPIO_SUCCESS,
+ EFX_MAC_CTPIO_FALLBACK,
+ EFX_MAC_CTPIO_POISON,
+ EFX_MAC_CTPIO_ERASE,
+ EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC,
+ EFX_MAC_RXDP_HLB_IDLE,
+ EFX_MAC_RXDP_HLB_TIMEOUT,
+ EFX_MAC_NSTATS
+} efx_mac_stat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderMacBlock */
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+typedef enum efx_link_mode_e {
+ EFX_LINK_UNKNOWN = 0,
+ EFX_LINK_DOWN,
+ EFX_LINK_10HDX,
+ EFX_LINK_10FDX,
+ EFX_LINK_100HDX,
+ EFX_LINK_100FDX,
+ EFX_LINK_1000HDX,
+ EFX_LINK_1000FDX,
+ EFX_LINK_10000FDX,
+ EFX_LINK_40000FDX,
+ EFX_LINK_25000FDX,
+ EFX_LINK_50000FDX,
+ EFX_LINK_100000FDX,
+ EFX_LINK_NMODES
+} efx_link_mode_t;
+
+#define EFX_MAC_ADDR_LEN 6
+
+#define EFX_VNI_OR_VSID_LEN 3
+
+#define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t *)_address)[0] & 0x01)
+
+#define EFX_MAC_MULTICAST_LIST_MAX 256
+
+#define EFX_MAC_SDU_MAX 9202
+
+#define EFX_MAC_PDU_ADJUSTMENT \
+ (/* EtherII */ 14 \
+ + /* VLAN */ 4 \
+ + /* CRC */ 4 \
+ + /* bug16011 */ 16) \
+
+#define EFX_MAC_PDU(_sdu) \
+ P2ROUNDUP((_sdu) + EFX_MAC_PDU_ADJUSTMENT, 8)
+
+/*
+ * Due to the P2ROUNDUP in EFX_MAC_PDU(), EFX_MAC_SDU_FROM_PDU() may give
+ * the SDU rounded up slightly.
+ */
+#define EFX_MAC_SDU_FROM_PDU(_pdu) ((_pdu) - EFX_MAC_PDU_ADJUSTMENT)
+
+#define EFX_MAC_PDU_MIN 60
+#define EFX_MAC_PDU_MAX EFX_MAC_PDU(EFX_MAC_SDU_MAX)
+
+extern __checkReturn efx_rc_t
+efx_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+extern __checkReturn efx_rc_t
+efx_mac_pdu_set(
+ __in efx_nic_t *enp,
+ __in size_t pdu);
+
+extern __checkReturn efx_rc_t
+efx_mac_addr_set(
+ __in efx_nic_t *enp,
+ __in uint8_t *addr);
+
+extern __checkReturn efx_rc_t
+efx_mac_filter_set(
+ __in efx_nic_t *enp,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst);
+
+extern __checkReturn efx_rc_t
+efx_mac_multicast_list_set(
+ __in efx_nic_t *enp,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count);
+
+extern __checkReturn efx_rc_t
+efx_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+efx_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mac_drain(
+ __in efx_nic_t *enp,
+ __in boolean_t enabled);
+
+extern __checkReturn efx_rc_t
+efx_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+#define EFX_FCNTL_RESPOND 0x00000001
+#define EFX_FCNTL_GENERATE 0x00000002
+
+extern __checkReturn efx_rc_t
+efx_mac_fcntl_set(
+ __in efx_nic_t *enp,
+ __in unsigned int fcntl,
+ __in boolean_t autoneg);
+
+extern void
+efx_mac_fcntl_get(
+ __in efx_nic_t *enp,
+ __out unsigned int *fcntl_wantedp,
+ __out unsigned int *fcntl_linkp);
+
+
+#if EFSYS_OPT_MAC_STATS
+
+#if EFSYS_OPT_NAMES
+
+extern __checkReturn const char *
+efx_mac_stat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_MAC_STATS_MASK_BITS_PER_PAGE (8 * sizeof (uint32_t))
+
+#define EFX_MAC_STATS_MASK_NPAGES \
+ (P2ROUNDUP(EFX_MAC_NSTATS, EFX_MAC_STATS_MASK_BITS_PER_PAGE) / \
+ EFX_MAC_STATS_MASK_BITS_PER_PAGE)
+
+/*
+ * Get mask of MAC statistics supported by the hardware.
+ *
+ * If mask_size is insufficient to return the mask, EINVAL error is
+ * returned. EFX_MAC_STATS_MASK_NPAGES multiplied by size of the page
+ * (which is sizeof (uint32_t)) is sufficient.
+ */
+extern __checkReturn efx_rc_t
+efx_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __out_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size);
+
+#define EFX_MAC_STAT_SUPPORTED(_mask, _stat) \
+ ((_mask)[(_stat) / EFX_MAC_STATS_MASK_BITS_PER_PAGE] & \
+ (1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1))))
+
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_clear(
+ __in efx_nic_t *enp);
+
+/*
+ * Upload mac statistics supported by the hardware into the given buffer.
+ *
+ * The DMA buffer must be 4Kbyte aligned and sized to hold at least
+ * efx_nic_cfg_t::enc_mac_stats_nstats 64bit counters.
+ *
+ * The hardware will only DMA statistics that it understands (of course).
+ * Drivers should not make any assumptions about which statistics are
+ * supported, especially when the statistics are generated by firmware.
+ *
+ * Thus, drivers should zero this buffer before use, so that not-understood
+ * statistics read back as zero.
+ */
+extern __checkReturn efx_rc_t
+efx_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp);
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events);
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+/* MON */
+
+typedef enum efx_mon_type_e {
+ EFX_MON_INVALID = 0,
+ EFX_MON_SFC90X0,
+ EFX_MON_SFC91X0,
+ EFX_MON_SFC92X0,
+ EFX_MON_NTYPES
+} efx_mon_type_t;
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_mon_name(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn efx_rc_t
+efx_mon_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_MON_STATS
+
+#define EFX_MON_STATS_PAGE_SIZE 0x100
+#define EFX_MON_MASK_ELEMENT_SIZE 32
+
+/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 400fdb0517af1fca */
+typedef enum efx_mon_stat_e {
+ EFX_MON_STAT_2_5V,
+ EFX_MON_STAT_VCCP1,
+ EFX_MON_STAT_VCC,
+ EFX_MON_STAT_5V,
+ EFX_MON_STAT_12V,
+ EFX_MON_STAT_VCCP2,
+ EFX_MON_STAT_EXT_TEMP,
+ EFX_MON_STAT_INT_TEMP,
+ EFX_MON_STAT_AIN1,
+ EFX_MON_STAT_AIN2,
+ EFX_MON_STAT_INT_COOLING,
+ EFX_MON_STAT_EXT_COOLING,
+ EFX_MON_STAT_1V,
+ EFX_MON_STAT_1_2V,
+ EFX_MON_STAT_1_8V,
+ EFX_MON_STAT_3_3V,
+ EFX_MON_STAT_1_2VA,
+ EFX_MON_STAT_VREF,
+ EFX_MON_STAT_VAOE,
+ EFX_MON_STAT_AOE_TEMP,
+ EFX_MON_STAT_PSU_AOE_TEMP,
+ EFX_MON_STAT_PSU_TEMP,
+ EFX_MON_STAT_FAN0,
+ EFX_MON_STAT_FAN1,
+ EFX_MON_STAT_FAN2,
+ EFX_MON_STAT_FAN3,
+ EFX_MON_STAT_FAN4,
+ EFX_MON_STAT_VAOE_IN,
+ EFX_MON_STAT_IAOE,
+ EFX_MON_STAT_IAOE_IN,
+ EFX_MON_STAT_NIC_POWER,
+ EFX_MON_STAT_0_9V,
+ EFX_MON_STAT_I0_9V,
+ EFX_MON_STAT_I1_2V,
+ EFX_MON_STAT_0_9V_ADC,
+ EFX_MON_STAT_INT_TEMP2,
+ EFX_MON_STAT_VREG_TEMP,
+ EFX_MON_STAT_VREG_0_9V_TEMP,
+ EFX_MON_STAT_VREG_1_2V_TEMP,
+ EFX_MON_STAT_INT_VPTAT,
+ EFX_MON_STAT_INT_ADC_TEMP,
+ EFX_MON_STAT_EXT_VPTAT,
+ EFX_MON_STAT_EXT_ADC_TEMP,
+ EFX_MON_STAT_AMBIENT_TEMP,
+ EFX_MON_STAT_AIRFLOW,
+ EFX_MON_STAT_VDD08D_VSS08D_CSR,
+ EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC,
+ EFX_MON_STAT_HOTPOINT_TEMP,
+ EFX_MON_STAT_PHY_POWER_SWITCH_PORT0,
+ EFX_MON_STAT_PHY_POWER_SWITCH_PORT1,
+ EFX_MON_STAT_MUM_VCC,
+ EFX_MON_STAT_0V9_A,
+ EFX_MON_STAT_I0V9_A,
+ EFX_MON_STAT_0V9_A_TEMP,
+ EFX_MON_STAT_0V9_B,
+ EFX_MON_STAT_I0V9_B,
+ EFX_MON_STAT_0V9_B_TEMP,
+ EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY,
+ EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXT_ADC,
+ EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY,
+ EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_MASTER_VPTAT,
+ EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP,
+ EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT,
+ EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP,
+ EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC,
+ EFX_MON_STAT_SODIMM_VOUT,
+ EFX_MON_STAT_SODIMM_0_TEMP,
+ EFX_MON_STAT_SODIMM_1_TEMP,
+ EFX_MON_STAT_PHY0_VCC,
+ EFX_MON_STAT_PHY1_VCC,
+ EFX_MON_STAT_CONTROLLER_TDIODE_TEMP,
+ EFX_MON_STAT_BOARD_FRONT_TEMP,
+ EFX_MON_STAT_BOARD_BACK_TEMP,
+ EFX_MON_STAT_I1V8,
+ EFX_MON_STAT_I2V5,
+ EFX_MON_STAT_I3V3,
+ EFX_MON_STAT_I12V0,
+ EFX_MON_STAT_1_3V,
+ EFX_MON_STAT_I1V3,
+ EFX_MON_NSTATS
+} efx_mon_stat_t;
+
+/* END MKCONFIG GENERATED MonitorHeaderStatsBlock */
+
+typedef enum efx_mon_stat_state_e {
+ EFX_MON_STAT_STATE_OK = 0,
+ EFX_MON_STAT_STATE_WARNING = 1,
+ EFX_MON_STAT_STATE_FATAL = 2,
+ EFX_MON_STAT_STATE_BROKEN = 3,
+ EFX_MON_STAT_STATE_NO_READING = 4,
+} efx_mon_stat_state_t;
+
+typedef struct efx_mon_stat_value_s {
+ uint16_t emsv_value;
+ uint16_t emsv_state;
+} efx_mon_stat_value_t;
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_mon_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn efx_rc_t
+efx_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+extern void
+efx_mon_fini(
+ __in efx_nic_t *enp);
+
+/* PHY */
+
+extern __checkReturn efx_rc_t
+efx_phy_verify(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+
+typedef enum efx_phy_led_mode_e {
+ EFX_PHY_LED_DEFAULT = 0,
+ EFX_PHY_LED_OFF,
+ EFX_PHY_LED_ON,
+ EFX_PHY_LED_FLASH,
+ EFX_PHY_LED_NMODES
+} efx_phy_led_mode_t;
+
+extern __checkReturn efx_rc_t
+efx_phy_led_set(
+ __in efx_nic_t *enp,
+ __in efx_phy_led_mode_t mode);
+
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+extern __checkReturn efx_rc_t
+efx_port_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+typedef enum efx_loopback_type_e {
+ EFX_LOOPBACK_OFF = 0,
+ EFX_LOOPBACK_DATA = 1,
+ EFX_LOOPBACK_GMAC = 2,
+ EFX_LOOPBACK_XGMII = 3,
+ EFX_LOOPBACK_XGXS = 4,
+ EFX_LOOPBACK_XAUI = 5,
+ EFX_LOOPBACK_GMII = 6,
+ EFX_LOOPBACK_SGMII = 7,
+ EFX_LOOPBACK_XGBR = 8,
+ EFX_LOOPBACK_XFI = 9,
+ EFX_LOOPBACK_XAUI_FAR = 10,
+ EFX_LOOPBACK_GMII_FAR = 11,
+ EFX_LOOPBACK_SGMII_FAR = 12,
+ EFX_LOOPBACK_XFI_FAR = 13,
+ EFX_LOOPBACK_GPHY = 14,
+ EFX_LOOPBACK_PHY_XS = 15,
+ EFX_LOOPBACK_PCS = 16,
+ EFX_LOOPBACK_PMA_PMD = 17,
+ EFX_LOOPBACK_XPORT = 18,
+ EFX_LOOPBACK_XGMII_WS = 19,
+ EFX_LOOPBACK_XAUI_WS = 20,
+ EFX_LOOPBACK_XAUI_WS_FAR = 21,
+ EFX_LOOPBACK_XAUI_WS_NEAR = 22,
+ EFX_LOOPBACK_GMII_WS = 23,
+ EFX_LOOPBACK_XFI_WS = 24,
+ EFX_LOOPBACK_XFI_WS_FAR = 25,
+ EFX_LOOPBACK_PHYXS_WS = 26,
+ EFX_LOOPBACK_PMA_INT = 27,
+ EFX_LOOPBACK_SD_NEAR = 28,
+ EFX_LOOPBACK_SD_FAR = 29,
+ EFX_LOOPBACK_PMA_INT_WS = 30,
+ EFX_LOOPBACK_SD_FEP2_WS = 31,
+ EFX_LOOPBACK_SD_FEP1_5_WS = 32,
+ EFX_LOOPBACK_SD_FEP_WS = 33,
+ EFX_LOOPBACK_SD_FES_WS = 34,
+ EFX_LOOPBACK_AOE_INT_NEAR = 35,
+ EFX_LOOPBACK_DATA_WS = 36,
+ EFX_LOOPBACK_FORCE_EXT_LINK = 37,
+ EFX_LOOPBACK_NTYPES
+} efx_loopback_type_t;
+
+typedef enum efx_loopback_kind_e {
+ EFX_LOOPBACK_KIND_OFF = 0,
+ EFX_LOOPBACK_KIND_ALL,
+ EFX_LOOPBACK_KIND_MAC,
+ EFX_LOOPBACK_KIND_PHY,
+ EFX_LOOPBACK_NKINDS
+} efx_loopback_kind_t;
+
+extern void
+efx_loopback_mask(
+ __in efx_loopback_kind_t loopback_kind,
+ __out efx_qword_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_port_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t type);
+
+#if EFSYS_OPT_NAMES
+
+extern __checkReturn const char *
+efx_loopback_type_name(
+ __in efx_nic_t *enp,
+ __in efx_loopback_type_t type);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+extern __checkReturn efx_rc_t
+efx_port_poll(
+ __in efx_nic_t *enp,
+ __out_opt efx_link_mode_t *link_modep);
+
+extern void
+efx_port_fini(
+ __in efx_nic_t *enp);
+
+typedef enum efx_phy_cap_type_e {
+ EFX_PHY_CAP_INVALID = 0,
+ EFX_PHY_CAP_10HDX,
+ EFX_PHY_CAP_10FDX,
+ EFX_PHY_CAP_100HDX,
+ EFX_PHY_CAP_100FDX,
+ EFX_PHY_CAP_1000HDX,
+ EFX_PHY_CAP_1000FDX,
+ EFX_PHY_CAP_10000FDX,
+ EFX_PHY_CAP_PAUSE,
+ EFX_PHY_CAP_ASYM,
+ EFX_PHY_CAP_AN,
+ EFX_PHY_CAP_40000FDX,
+ EFX_PHY_CAP_DDM,
+ EFX_PHY_CAP_100000FDX,
+ EFX_PHY_CAP_25000FDX,
+ EFX_PHY_CAP_50000FDX,
+ EFX_PHY_CAP_BASER_FEC,
+ EFX_PHY_CAP_BASER_FEC_REQUESTED,
+ EFX_PHY_CAP_RS_FEC,
+ EFX_PHY_CAP_RS_FEC_REQUESTED,
+ EFX_PHY_CAP_25G_BASER_FEC,
+ EFX_PHY_CAP_25G_BASER_FEC_REQUESTED,
+ EFX_PHY_CAP_NTYPES
+} efx_phy_cap_type_t;
+
+
+#define EFX_PHY_CAP_CURRENT 0x00000000
+#define EFX_PHY_CAP_DEFAULT 0x00000001
+#define EFX_PHY_CAP_PERM 0x00000002
+
+extern void
+efx_phy_adv_cap_get(
+ __in efx_nic_t *enp,
+ __in uint32_t flag,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_phy_adv_cap_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mask);
+
+extern void
+efx_phy_lp_cap_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+typedef enum efx_phy_media_type_e {
+ EFX_PHY_MEDIA_INVALID = 0,
+ EFX_PHY_MEDIA_XAUI,
+ EFX_PHY_MEDIA_CX4,
+ EFX_PHY_MEDIA_KX4,
+ EFX_PHY_MEDIA_XFP,
+ EFX_PHY_MEDIA_SFP_PLUS,
+ EFX_PHY_MEDIA_BASE_T,
+ EFX_PHY_MEDIA_QSFP_PLUS,
+ EFX_PHY_MEDIA_NTYPES
+} efx_phy_media_type_t;
+
+/*
+ * Get the type of medium currently used. If the board has ports for
+ * modules, a module is present, and we recognise the media type of
+ * the module, then this will be the media type of the module.
+ * Otherwise it will be the media type of the port.
+ */
+extern void
+efx_phy_media_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_media_type_t *typep);
+
+extern __checkReturn efx_rc_t
+efx_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
+
+#if EFSYS_OPT_PHY_STATS
+
+/* START MKCONFIG GENERATED PhyHeaderStatsBlock 30ed56ad501f8e36 */
+typedef enum efx_phy_stat_e {
+ EFX_PHY_STAT_OUI,
+ EFX_PHY_STAT_PMA_PMD_LINK_UP,
+ EFX_PHY_STAT_PMA_PMD_RX_FAULT,
+ EFX_PHY_STAT_PMA_PMD_TX_FAULT,
+ EFX_PHY_STAT_PMA_PMD_REV_A,
+ EFX_PHY_STAT_PMA_PMD_REV_B,
+ EFX_PHY_STAT_PMA_PMD_REV_C,
+ EFX_PHY_STAT_PMA_PMD_REV_D,
+ EFX_PHY_STAT_PCS_LINK_UP,
+ EFX_PHY_STAT_PCS_RX_FAULT,
+ EFX_PHY_STAT_PCS_TX_FAULT,
+ EFX_PHY_STAT_PCS_BER,
+ EFX_PHY_STAT_PCS_BLOCK_ERRORS,
+ EFX_PHY_STAT_PHY_XS_LINK_UP,
+ EFX_PHY_STAT_PHY_XS_RX_FAULT,
+ EFX_PHY_STAT_PHY_XS_TX_FAULT,
+ EFX_PHY_STAT_PHY_XS_ALIGN,
+ EFX_PHY_STAT_PHY_XS_SYNC_A,
+ EFX_PHY_STAT_PHY_XS_SYNC_B,
+ EFX_PHY_STAT_PHY_XS_SYNC_C,
+ EFX_PHY_STAT_PHY_XS_SYNC_D,
+ EFX_PHY_STAT_AN_LINK_UP,
+ EFX_PHY_STAT_AN_MASTER,
+ EFX_PHY_STAT_AN_LOCAL_RX_OK,
+ EFX_PHY_STAT_AN_REMOTE_RX_OK,
+ EFX_PHY_STAT_CL22EXT_LINK_UP,
+ EFX_PHY_STAT_SNR_A,
+ EFX_PHY_STAT_SNR_B,
+ EFX_PHY_STAT_SNR_C,
+ EFX_PHY_STAT_SNR_D,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_A,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_B,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_C,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_D,
+ EFX_PHY_STAT_AN_COMPLETE,
+ EFX_PHY_STAT_PMA_PMD_REV_MAJOR,
+ EFX_PHY_STAT_PMA_PMD_REV_MINOR,
+ EFX_PHY_STAT_PMA_PMD_REV_MICRO,
+ EFX_PHY_STAT_PCS_FW_VERSION_0,
+ EFX_PHY_STAT_PCS_FW_VERSION_1,
+ EFX_PHY_STAT_PCS_FW_VERSION_2,
+ EFX_PHY_STAT_PCS_FW_VERSION_3,
+ EFX_PHY_STAT_PCS_FW_BUILD_YY,
+ EFX_PHY_STAT_PCS_FW_BUILD_MM,
+ EFX_PHY_STAT_PCS_FW_BUILD_DD,
+ EFX_PHY_STAT_PCS_OP_MODE,
+ EFX_PHY_NSTATS
+} efx_phy_stat_t;
+
+/* END MKCONFIG GENERATED PhyHeaderStatsBlock */
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_phy_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_phy_stat_t stat);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_PHY_STATS_SIZE 0x100
+
+extern __checkReturn efx_rc_t
+efx_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+
+#if EFSYS_OPT_BIST
+
+typedef enum efx_bist_type_e {
+ EFX_BIST_TYPE_UNKNOWN,
+ EFX_BIST_TYPE_PHY_NORMAL,
+ EFX_BIST_TYPE_PHY_CABLE_SHORT,
+ EFX_BIST_TYPE_PHY_CABLE_LONG,
+ EFX_BIST_TYPE_MC_MEM, /* Test the MC DMEM and IMEM */
+ EFX_BIST_TYPE_SAT_MEM, /* Test the DMEM and IMEM of satellite cpus */
+ EFX_BIST_TYPE_REG, /* Test the register memories */
+ EFX_BIST_TYPE_NTYPES,
+} efx_bist_type_t;
+
+typedef enum efx_bist_result_e {
+ EFX_BIST_RESULT_UNKNOWN,
+ EFX_BIST_RESULT_RUNNING,
+ EFX_BIST_RESULT_PASSED,
+ EFX_BIST_RESULT_FAILED,
+} efx_bist_result_t;
+
+typedef enum efx_phy_cable_status_e {
+ EFX_PHY_CABLE_STATUS_OK,
+ EFX_PHY_CABLE_STATUS_INVALID,
+ EFX_PHY_CABLE_STATUS_OPEN,
+ EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT,
+ EFX_PHY_CABLE_STATUS_INTERPAIRSHORT,
+ EFX_PHY_CABLE_STATUS_BUSY,
+} efx_phy_cable_status_t;
+
+typedef enum efx_bist_value_e {
+ EFX_BIST_PHY_CABLE_LENGTH_A,
+ EFX_BIST_PHY_CABLE_LENGTH_B,
+ EFX_BIST_PHY_CABLE_LENGTH_C,
+ EFX_BIST_PHY_CABLE_LENGTH_D,
+ EFX_BIST_PHY_CABLE_STATUS_A,
+ EFX_BIST_PHY_CABLE_STATUS_B,
+ EFX_BIST_PHY_CABLE_STATUS_C,
+ EFX_BIST_PHY_CABLE_STATUS_D,
+ EFX_BIST_FAULT_CODE,
+ /*
+ * Memory BIST specific values. These match to the MC_CMD_BIST_POLL
+ * response.
+ */
+ EFX_BIST_MEM_TEST,
+ EFX_BIST_MEM_ADDR,
+ EFX_BIST_MEM_BUS,
+ EFX_BIST_MEM_EXPECT,
+ EFX_BIST_MEM_ACTUAL,
+ EFX_BIST_MEM_ECC,
+ EFX_BIST_MEM_ECC_PARITY,
+ EFX_BIST_MEM_ECC_FATAL,
+ EFX_BIST_NVALUES,
+} efx_bist_value_t;
+
+extern __checkReturn efx_rc_t
+efx_bist_enable_offline(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt uint32_t *value_maskp,
+ __out_ecount_opt(count) unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+efx_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+#define EFX_FEATURE_IPV6 0x00000001
+#define EFX_FEATURE_LFSR_HASH_INSERT 0x00000002
+#define EFX_FEATURE_LINK_EVENTS 0x00000004
+#define EFX_FEATURE_PERIODIC_MAC_STATS 0x00000008
+#define EFX_FEATURE_MCDI 0x00000020
+#define EFX_FEATURE_LOOKAHEAD_SPLIT 0x00000040
+#define EFX_FEATURE_MAC_HEADER_FILTERS 0x00000080
+#define EFX_FEATURE_TURBO 0x00000100
+#define EFX_FEATURE_MCDI_DMA 0x00000200
+#define EFX_FEATURE_TX_SRC_FILTERS 0x00000400
+#define EFX_FEATURE_PIO_BUFFERS 0x00000800
+#define EFX_FEATURE_FW_ASSISTED_TSO 0x00001000
+#define EFX_FEATURE_FW_ASSISTED_TSO_V2 0x00002000
+#define EFX_FEATURE_PACKED_STREAM 0x00004000
+
+typedef enum efx_tunnel_protocol_e {
+ EFX_TUNNEL_PROTOCOL_NONE = 0,
+ EFX_TUNNEL_PROTOCOL_VXLAN,
+ EFX_TUNNEL_PROTOCOL_GENEVE,
+ EFX_TUNNEL_PROTOCOL_NVGRE,
+ EFX_TUNNEL_NPROTOS
+} efx_tunnel_protocol_t;
+
+typedef enum efx_vi_window_shift_e {
+ EFX_VI_WINDOW_SHIFT_INVALID = 0,
+ EFX_VI_WINDOW_SHIFT_8K = 13,
+ EFX_VI_WINDOW_SHIFT_16K = 14,
+ EFX_VI_WINDOW_SHIFT_64K = 16,
+} efx_vi_window_shift_t;
+
+typedef struct efx_nic_cfg_s {
+ uint32_t enc_board_type;
+ uint32_t enc_phy_type;
+#if EFSYS_OPT_NAMES
+ char enc_phy_name[21];
+#endif
+ char enc_phy_revision[21];
+ efx_mon_type_t enc_mon_type;
+#if EFSYS_OPT_MON_STATS
+ uint32_t enc_mon_stat_dma_buf_size;
+ uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32];
+#endif
+ unsigned int enc_features;
+ efx_vi_window_shift_t enc_vi_window_shift;
+ uint8_t enc_mac_addr[6];
+ uint8_t enc_port; /* PHY port number */
+ uint32_t enc_intr_vec_base;
+ uint32_t enc_intr_limit;
+ uint32_t enc_evq_limit;
+ uint32_t enc_txq_limit;
+ uint32_t enc_rxq_limit;
+ uint32_t enc_txq_max_ndescs;
+ uint32_t enc_buftbl_limit;
+ uint32_t enc_piobuf_limit;
+ uint32_t enc_piobuf_size;
+ uint32_t enc_piobuf_min_alloc_size;
+ uint32_t enc_evq_timer_quantum_ns;
+ uint32_t enc_evq_timer_max_us;
+ uint32_t enc_clk_mult;
+ uint32_t enc_rx_prefix_size;
+ uint32_t enc_rx_buf_align_start;
+ uint32_t enc_rx_buf_align_end;
+ uint32_t enc_rx_scale_max_exclusive_contexts;
+ /*
+ * Mask of supported hash algorithms.
+ * Hash algorithm types are used as the bit indices.
+ */
+ uint32_t enc_rx_scale_hash_alg_mask;
+ /*
+ * Indicates whether port numbers can be included to the
+ * input data for hash computation.
+ */
+ boolean_t enc_rx_scale_l4_hash_supported;
+ boolean_t enc_rx_scale_additional_modes_supported;
+#if EFSYS_OPT_LOOPBACK
+ efx_qword_t enc_loopback_types[EFX_LINK_NMODES];
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_PHY_FLAGS
+ uint32_t enc_phy_flags_mask;
+#endif /* EFSYS_OPT_PHY_FLAGS */
+#if EFSYS_OPT_PHY_LED_CONTROL
+ uint32_t enc_led_mask;
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+#if EFSYS_OPT_PHY_STATS
+ uint64_t enc_phy_stat_mask;
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_MCDI
+ uint8_t enc_mcdi_mdio_channel;
+#if EFSYS_OPT_PHY_STATS
+ uint32_t enc_mcdi_phy_stat_mask;
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_MON_STATS
+ uint32_t *enc_mcdi_sensor_maskp;
+ uint32_t enc_mcdi_sensor_mask_size;
+#endif /* EFSYS_OPT_MON_STATS */
+#endif /* EFSYS_OPT_MCDI */
+#if EFSYS_OPT_BIST
+ uint32_t enc_bist_mask;
+#endif /* EFSYS_OPT_BIST */
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+ uint32_t enc_pf;
+ uint32_t enc_vf;
+ uint32_t enc_privilege_mask;
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+ boolean_t enc_bug26807_workaround;
+ boolean_t enc_bug35388_workaround;
+ boolean_t enc_bug41750_workaround;
+ boolean_t enc_bug61265_workaround;
+ boolean_t enc_rx_batching_enabled;
+ /* Maximum number of descriptors completed in an rx event. */
+ uint32_t enc_rx_batch_max;
+ /* Number of rx descriptors the hardware requires for a push. */
+ uint32_t enc_rx_push_align;
+ /* Maximum amount of data in DMA descriptor */
+ uint32_t enc_tx_dma_desc_size_max;
+ /*
+ * Boundary which DMA descriptor data must not cross or 0 if no
+ * limitation.
+ */
+ uint32_t enc_tx_dma_desc_boundary;
+ /*
+ * Maximum number of bytes into the packet the TCP header can start for
+ * the hardware to apply TSO packet edits.
+ */
+ uint32_t enc_tx_tso_tcp_header_offset_limit;
+ boolean_t enc_fw_assisted_tso_enabled;
+ boolean_t enc_fw_assisted_tso_v2_enabled;
+ boolean_t enc_fw_assisted_tso_v2_encap_enabled;
+ /* Number of TSO contexts on the NIC (FATSOv2) */
+ uint32_t enc_fw_assisted_tso_v2_n_contexts;
+ boolean_t enc_hw_tx_insert_vlan_enabled;
+ /* Number of PFs on the NIC */
+ uint32_t enc_hw_pf_count;
+ /* Datapath firmware vadapter/vport/vswitch support */
+ boolean_t enc_datapath_cap_evb;
+ boolean_t enc_rx_disable_scatter_supported;
+ boolean_t enc_allow_set_mac_with_installed_filters;
+ boolean_t enc_enhanced_set_mac_supported;
+ boolean_t enc_init_evq_v2_supported;
+ boolean_t enc_rx_packed_stream_supported;
+ boolean_t enc_rx_var_packed_stream_supported;
+ boolean_t enc_rx_es_super_buffer_supported;
+ boolean_t enc_fw_subvariant_no_tx_csum_supported;
+ boolean_t enc_pm_and_rxdp_counters;
+ boolean_t enc_mac_stats_40g_tx_size_bins;
+ uint32_t enc_tunnel_encapsulations_supported;
+ /*
+ * NIC global maximum for unique UDP tunnel ports shared by all
+ * functions.
+ */
+ uint32_t enc_tunnel_config_udp_entries_max;
+ /* External port identifier */
+ uint8_t enc_external_port;
+ uint32_t enc_mcdi_max_payload_length;
+ /* VPD may be per-PF or global */
+ boolean_t enc_vpd_is_global;
+ /* Minimum unidirectional bandwidth in Mb/s to max out all ports */
+ uint32_t enc_required_pcie_bandwidth_mbps;
+ uint32_t enc_max_pcie_link_gen;
+ /* Firmware verifies integrity of NVRAM updates */
+ uint32_t enc_nvram_update_verify_result_supported;
+ /* Firmware support for extended MAC_STATS buffer */
+ uint32_t enc_mac_stats_nstats;
+ boolean_t enc_fec_counters;
+ boolean_t enc_hlb_counters;
+ /* Firmware support for "FLAG" and "MARK" filter actions */
+ boolean_t enc_filter_action_flag_supported;
+ boolean_t enc_filter_action_mark_supported;
+ uint32_t enc_filter_action_mark_max;
+} efx_nic_cfg_t;
+
+#define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff)
+#define EFX_PCI_FUNCTION_IS_VF(_encp) ((_encp)->enc_vf != 0xffff)
+
+#define EFX_PCI_FUNCTION(_encp) \
+ (EFX_PCI_FUNCTION_IS_PF(_encp) ? (_encp)->enc_pf : (_encp)->enc_vf)
+
+#define EFX_PCI_VF_PARENT(_encp) ((_encp)->enc_pf)
+
+extern const efx_nic_cfg_t *
+efx_nic_cfg_get(
+ __in efx_nic_t *enp);
+
+/* RxDPCPU firmware id values by which FW variant can be identified */
+#define EFX_RXDP_FULL_FEATURED_FW_ID 0x0
+#define EFX_RXDP_LOW_LATENCY_FW_ID 0x1
+#define EFX_RXDP_PACKED_STREAM_FW_ID 0x2
+#define EFX_RXDP_RULES_ENGINE_FW_ID 0x5
+#define EFX_RXDP_DPDK_FW_ID 0x6
+
+typedef struct efx_nic_fw_info_s {
+ /* Basic FW version information */
+ uint16_t enfi_mc_fw_version[4];
+ /*
+ * If datapath capabilities can be detected,
+ * additional FW information is to be shown
+ */
+ boolean_t enfi_dpcpu_fw_ids_valid;
+ /* Rx and Tx datapath CPU FW IDs */
+ uint16_t enfi_rx_dpcpu_fw_id;
+ uint16_t enfi_tx_dpcpu_fw_id;
+} efx_nic_fw_info_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_get_fw_version(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_info_t *enfip);
+
+/* Driver resource limits (minimum required/maximum usable). */
+typedef struct efx_drv_limits_s {
+ uint32_t edl_min_evq_count;
+ uint32_t edl_max_evq_count;
+
+ uint32_t edl_min_rxq_count;
+ uint32_t edl_max_rxq_count;
+
+ uint32_t edl_min_txq_count;
+ uint32_t edl_max_txq_count;
+
+ /* PIO blocks (sub-allocated from piobuf) */
+ uint32_t edl_min_pio_alloc_size;
+ uint32_t edl_max_pio_alloc_count;
+} efx_drv_limits_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp);
+
+typedef enum efx_nic_region_e {
+ EFX_REGION_VI, /* Memory BAR UC mapping */
+ EFX_REGION_PIO_WRITE_VI, /* Memory BAR WC mapping */
+} efx_nic_region_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *evq_countp,
+ __out uint32_t *rxq_countp,
+ __out uint32_t *txq_countp);
+
+
+#if EFSYS_OPT_VPD
+
+typedef enum efx_vpd_tag_e {
+ EFX_VPD_ID = 0x02,
+ EFX_VPD_END = 0x0f,
+ EFX_VPD_RO = 0x10,
+ EFX_VPD_RW = 0x11,
+} efx_vpd_tag_t;
+
+typedef uint16_t efx_vpd_keyword_t;
+
+typedef struct efx_vpd_value_s {
+ efx_vpd_tag_t evv_tag;
+ efx_vpd_keyword_t evv_keyword;
+ uint8_t evv_length;
+ uint8_t evv_value[0x100];
+} efx_vpd_value_t;
+
+
+#define EFX_VPD_KEYWORD(x, y) ((x) | ((y) << 8))
+
+extern __checkReturn efx_rc_t
+efx_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_set(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_next(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+efx_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+/* NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef enum efx_nvram_type_e {
+ EFX_NVRAM_INVALID = 0,
+ EFX_NVRAM_BOOTROM,
+ EFX_NVRAM_BOOTROM_CFG,
+ EFX_NVRAM_MC_FIRMWARE,
+ EFX_NVRAM_MC_GOLDEN,
+ EFX_NVRAM_PHY,
+ EFX_NVRAM_NULLPHY,
+ EFX_NVRAM_FPGA,
+ EFX_NVRAM_FCFW,
+ EFX_NVRAM_CPLD,
+ EFX_NVRAM_FPGA_BACKUP,
+ EFX_NVRAM_DYNAMIC_CFG,
+ EFX_NVRAM_LICENSE,
+ EFX_NVRAM_UEFIROM,
+ EFX_NVRAM_MUM_FIRMWARE,
+ EFX_NVRAM_NTYPES,
+} efx_nvram_type_t;
+
+extern __checkReturn efx_rc_t
+efx_nvram_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+efx_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+efx_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt size_t *pref_chunkp);
+
+extern __checkReturn efx_rc_t
+efx_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt uint32_t *verify_resultp);
+
+extern __checkReturn efx_rc_t
+efx_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+efx_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_nvram_read_backup(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+efx_nvram_validate(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size);
+
+extern __checkReturn efx_rc_t
+efx_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+efx_nvram_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_BOOTCFG
+
+/* Report size and offset of bootcfg sector in NVRAM partition. */
+extern __checkReturn efx_rc_t
+efx_bootcfg_sector_info(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __out_opt uint32_t *sector_countp,
+ __out size_t *offsetp,
+ __out size_t *max_sizep);
+
+/*
+ * Copy bootcfg sector data to a target buffer which may differ in size.
+ * Optionally corrects format errors in source buffer.
+ */
+extern efx_rc_t
+efx_bootcfg_copy_sector(
+ __in efx_nic_t *enp,
+ __inout_bcount(sector_length)
+ uint8_t *sector,
+ __in size_t sector_length,
+ __out_bcount(data_size) uint8_t *data,
+ __in size_t data_size,
+ __in boolean_t handle_format_errors);
+
+extern efx_rc_t
+efx_bootcfg_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) uint8_t *data,
+ __in size_t size);
+
+extern efx_rc_t
+efx_bootcfg_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) uint8_t *data,
+ __in size_t size);
+
+#endif /* EFSYS_OPT_BOOTCFG */
+
+#if EFSYS_OPT_IMAGE_LAYOUT
+
+#include "ef10_signed_image_layout.h"
+
+/*
+ * Image header used in unsigned and signed image layouts (see SF-102785-PS).
+ *
+ * NOTE:
+ * The image header format is extensible. However, older drivers require an
+ * exact match of image header version and header length when validating and
+ * writing firmware images.
+ *
+ * To avoid breaking backward compatibility, we use the upper bits of the
+ * controller version fields to contain an extra version number used for
+ * combined bootROM and UEFI ROM images on EF10 and later (to hold the UEFI ROM
+ * version). See bug39254 and SF-102785-PS for details.
+ */
+typedef struct efx_image_header_s {
+ uint32_t eih_magic;
+ uint32_t eih_version;
+ uint32_t eih_type;
+ uint32_t eih_subtype;
+ uint32_t eih_code_size;
+ uint32_t eih_size;
+ union {
+ uint32_t eih_controller_version_min;
+ struct {
+ uint16_t eih_controller_version_min_short;
+ uint8_t eih_extra_version_a;
+ uint8_t eih_extra_version_b;
+ };
+ };
+ union {
+ uint32_t eih_controller_version_max;
+ struct {
+ uint16_t eih_controller_version_max_short;
+ uint8_t eih_extra_version_c;
+ uint8_t eih_extra_version_d;
+ };
+ };
+ uint16_t eih_code_version_a;
+ uint16_t eih_code_version_b;
+ uint16_t eih_code_version_c;
+ uint16_t eih_code_version_d;
+} efx_image_header_t;
+
+#define EFX_IMAGE_HEADER_SIZE (40)
+#define EFX_IMAGE_HEADER_VERSION (4)
+#define EFX_IMAGE_HEADER_MAGIC (0x106F1A5)
+
+
+typedef struct efx_image_trailer_s {
+ uint32_t eit_crc;
+} efx_image_trailer_t;
+
+#define EFX_IMAGE_TRAILER_SIZE (4)
+
+typedef enum efx_image_format_e {
+ EFX_IMAGE_FORMAT_NO_IMAGE,
+ EFX_IMAGE_FORMAT_INVALID,
+ EFX_IMAGE_FORMAT_UNSIGNED,
+ EFX_IMAGE_FORMAT_SIGNED,
+} efx_image_format_t;
+
+typedef struct efx_image_info_s {
+ efx_image_format_t eii_format;
+ uint8_t * eii_imagep;
+ size_t eii_image_size;
+ efx_image_header_t * eii_headerp;
+} efx_image_info_t;
+
+extern __checkReturn efx_rc_t
+efx_check_reflash_image(
+ __in void *bufferp,
+ __in uint32_t buffer_size,
+ __out efx_image_info_t *infop);
+
+extern __checkReturn efx_rc_t
+efx_build_signed_image_write_buffer(
+ __out uint8_t *bufferp,
+ __in uint32_t buffer_size,
+ __in efx_image_info_t *infop,
+ __out efx_image_header_t **headerpp);
+
+#endif /* EFSYS_OPT_IMAGE_LAYOUT */
+
+#if EFSYS_OPT_DIAG
+
+typedef enum efx_pattern_type_t {
+ EFX_PATTERN_BYTE_INCREMENT = 0,
+ EFX_PATTERN_ALL_THE_SAME,
+ EFX_PATTERN_BIT_ALTERNATE,
+ EFX_PATTERN_BYTE_ALTERNATE,
+ EFX_PATTERN_BYTE_CHANGING,
+ EFX_PATTERN_BIT_SWEEP,
+ EFX_PATTERN_NTYPES
+} efx_pattern_type_t;
+
+typedef void
+(*efx_sram_pattern_fn_t)(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp);
+
+extern __checkReturn efx_rc_t
+efx_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_pattern_type_t type);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+efx_sram_buf_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in efsys_mem_t *esmp,
+ __in size_t n);
+
+extern void
+efx_sram_buf_tbl_clear(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in size_t n);
+
+#define EFX_BUF_TBL_SIZE 0x20000
+
+#define EFX_BUF_SIZE 4096
+
+/* EV */
+
+typedef struct efx_evq_s efx_evq_t;
+
+#if EFSYS_OPT_QSTATS
+
+/* START MKCONFIG GENERATED EfxHeaderEventQueueBlock 6f3843f5fe7cc843 */
+typedef enum efx_ev_qstat_e {
+ EV_ALL,
+ EV_RX,
+ EV_RX_OK,
+ EV_RX_FRM_TRUNC,
+ EV_RX_TOBE_DISC,
+ EV_RX_PAUSE_FRM_ERR,
+ EV_RX_BUF_OWNER_ID_ERR,
+ EV_RX_IPV4_HDR_CHKSUM_ERR,
+ EV_RX_TCP_UDP_CHKSUM_ERR,
+ EV_RX_ETH_CRC_ERR,
+ EV_RX_IP_FRAG_ERR,
+ EV_RX_MCAST_PKT,
+ EV_RX_MCAST_HASH_MATCH,
+ EV_RX_TCP_IPV4,
+ EV_RX_TCP_IPV6,
+ EV_RX_UDP_IPV4,
+ EV_RX_UDP_IPV6,
+ EV_RX_OTHER_IPV4,
+ EV_RX_OTHER_IPV6,
+ EV_RX_NON_IP,
+ EV_RX_BATCH,
+ EV_TX,
+ EV_TX_WQ_FF_FULL,
+ EV_TX_PKT_ERR,
+ EV_TX_PKT_TOO_BIG,
+ EV_TX_UNEXPECTED,
+ EV_GLOBAL,
+ EV_GLOBAL_MNT,
+ EV_DRIVER,
+ EV_DRIVER_SRM_UPD_DONE,
+ EV_DRIVER_TX_DESCQ_FLS_DONE,
+ EV_DRIVER_RX_DESCQ_FLS_DONE,
+ EV_DRIVER_RX_DESCQ_FLS_FAILED,
+ EV_DRIVER_RX_DSC_ERROR,
+ EV_DRIVER_TX_DSC_ERROR,
+ EV_DRV_GEN,
+ EV_MCDI_RESPONSE,
+ EV_NQSTATS
+} efx_ev_qstat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderEventQueueBlock */
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern __checkReturn efx_rc_t
+efx_ev_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_ev_fini(
+ __in efx_nic_t *enp);
+
+#define EFX_EVQ_MAXNEVS 32768
+#define EFX_EVQ_MINNEVS 512
+
+#define EFX_EVQ_SIZE(_nevs) ((_nevs) * sizeof (efx_qword_t))
+#define EFX_EVQ_NBUFS(_nevs) (EFX_EVQ_SIZE(_nevs) / EFX_BUF_SIZE)
+
+#define EFX_EVQ_FLAGS_TYPE_MASK (0x3)
+#define EFX_EVQ_FLAGS_TYPE_AUTO (0x0)
+#define EFX_EVQ_FLAGS_TYPE_THROUGHPUT (0x1)
+#define EFX_EVQ_FLAGS_TYPE_LOW_LATENCY (0x2)
+
+#define EFX_EVQ_FLAGS_NOTIFY_MASK (0xC)
+#define EFX_EVQ_FLAGS_NOTIFY_INTERRUPT (0x0) /* Interrupting (default) */
+#define EFX_EVQ_FLAGS_NOTIFY_DISABLED (0x4) /* Non-interrupting */
+
+extern __checkReturn efx_rc_t
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __deref_out efx_evq_t **eepp);
+
+extern void
+efx_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+typedef __checkReturn boolean_t
+(*efx_initialized_ev_t)(
+ __in_opt void *arg);
+
+#define EFX_PKT_UNICAST 0x0004
+#define EFX_PKT_START 0x0008
+
+#define EFX_PKT_VLAN_TAGGED 0x0010
+#define EFX_CKSUM_TCPUDP 0x0020
+#define EFX_CKSUM_IPV4 0x0040
+#define EFX_PKT_CONT 0x0080
+
+#define EFX_CHECK_VLAN 0x0100
+#define EFX_PKT_TCP 0x0200
+#define EFX_PKT_UDP 0x0400
+#define EFX_PKT_IPV4 0x0800
+
+#define EFX_PKT_IPV6 0x1000
+#define EFX_PKT_PREFIX_LEN 0x2000
+#define EFX_ADDR_MISMATCH 0x4000
+#define EFX_DISCARD 0x8000
+
+/*
+ * The following flags are used only for packed stream
+ * mode. The values for the flags are reused to fit into 16 bit,
+ * since EFX_PKT_START and EFX_PKT_CONT are never used in
+ * packed stream mode
+ */
+#define EFX_PKT_PACKED_STREAM_NEW_BUFFER EFX_PKT_START
+#define EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE EFX_PKT_CONT
+
+
+#define EFX_EV_RX_NLABELS 32
+#define EFX_EV_TX_NLABELS 32
+
+typedef __checkReturn boolean_t
+(*efx_rx_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id,
+ __in uint32_t size,
+ __in uint16_t flags);
+
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+
+/*
+ * Packed stream mode is documented in SF-112241-TC.
+ * The general idea is that, instead of putting each incoming
+ * packet into a separate buffer which is specified in a RX
+ * descriptor, a large buffer is provided to the hardware and
+ * packets are put there in a continuous stream.
+ * The main advantage of such an approach is that RX queue refilling
+ * happens much less frequently.
+ *
+ * Equal stride packed stream mode is documented in SF-119419-TC.
+ * The general idea is to utilize advantages of the packed stream,
+ * but avoid indirection in packets representation.
+ * The main advantage of such an approach is that RX queue refilling
+ * happens much less frequently and packets buffers are independent
+ * from upper layers point of view.
+ */
+
+typedef __checkReturn boolean_t
+(*efx_rx_ps_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id,
+ __in uint32_t pkt_count,
+ __in uint16_t flags);
+
+#endif
+
+typedef __checkReturn boolean_t
+(*efx_tx_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id);
+
+#define EFX_EXCEPTION_RX_RECOVERY 0x00000001
+#define EFX_EXCEPTION_RX_DSC_ERROR 0x00000002
+#define EFX_EXCEPTION_TX_DSC_ERROR 0x00000003
+#define EFX_EXCEPTION_UNKNOWN_SENSOREVT 0x00000004
+#define EFX_EXCEPTION_FWALERT_SRAM 0x00000005
+#define EFX_EXCEPTION_UNKNOWN_FWALERT 0x00000006
+#define EFX_EXCEPTION_RX_ERROR 0x00000007
+#define EFX_EXCEPTION_TX_ERROR 0x00000008
+#define EFX_EXCEPTION_EV_ERROR 0x00000009
+
+typedef __checkReturn boolean_t
+(*efx_exception_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t data);
+
+typedef __checkReturn boolean_t
+(*efx_rxq_flush_done_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t rxq_index);
+
+typedef __checkReturn boolean_t
+(*efx_rxq_flush_failed_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t rxq_index);
+
+typedef __checkReturn boolean_t
+(*efx_txq_flush_done_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t txq_index);
+
+typedef __checkReturn boolean_t
+(*efx_software_ev_t)(
+ __in_opt void *arg,
+ __in uint16_t magic);
+
+typedef __checkReturn boolean_t
+(*efx_sram_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t code);
+
+#define EFX_SRAM_CLEAR 0
+#define EFX_SRAM_UPDATE 1
+#define EFX_SRAM_ILLEGAL_CLEAR 2
+
+typedef __checkReturn boolean_t
+(*efx_wake_up_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_timer_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_link_change_ev_t)(
+ __in_opt void *arg,
+ __in efx_link_mode_t link_mode);
+
+#if EFSYS_OPT_MON_STATS
+
+typedef __checkReturn boolean_t
+(*efx_monitor_ev_t)(
+ __in_opt void *arg,
+ __in efx_mon_stat_t id,
+ __in efx_mon_stat_value_t value);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#if EFSYS_OPT_MAC_STATS
+
+typedef __checkReturn boolean_t
+(*efx_mac_stats_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t generation);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+typedef struct efx_ev_callbacks_s {
+ efx_initialized_ev_t eec_initialized;
+ efx_rx_ev_t eec_rx;
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+ efx_rx_ps_ev_t eec_rx_ps;
+#endif
+ efx_tx_ev_t eec_tx;
+ efx_exception_ev_t eec_exception;
+ efx_rxq_flush_done_ev_t eec_rxq_flush_done;
+ efx_rxq_flush_failed_ev_t eec_rxq_flush_failed;
+ efx_txq_flush_done_ev_t eec_txq_flush_done;
+ efx_software_ev_t eec_software;
+ efx_sram_ev_t eec_sram;
+ efx_wake_up_ev_t eec_wake_up;
+ efx_timer_ev_t eec_timer;
+ efx_link_change_ev_t eec_link_change;
+#if EFSYS_OPT_MON_STATS
+ efx_monitor_ev_t eec_monitor;
+#endif /* EFSYS_OPT_MON_STATS */
+#if EFSYS_OPT_MAC_STATS
+ efx_mac_stats_ev_t eec_mac_stats;
+#endif /* EFSYS_OPT_MAC_STATS */
+} efx_ev_callbacks_t;
+
+extern __checkReturn boolean_t
+efx_ev_qpending(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#if EFSYS_OPT_EV_PREFETCH
+
+extern void
+efx_ev_qprefetch(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+extern void
+efx_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+extern __checkReturn efx_rc_t
+efx_ev_usecs_to_ticks(
+ __in efx_nic_t *enp,
+ __in unsigned int usecs,
+ __out unsigned int *ticksp);
+
+extern __checkReturn efx_rc_t
+efx_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+extern __checkReturn efx_rc_t
+efx_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#if EFSYS_OPT_QSTATS
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_ev_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern void
+efx_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern void
+efx_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+/* RX */
+
+extern __checkReturn efx_rc_t
+efx_rx_init(
+ __inout efx_nic_t *enp);
+
+extern void
+efx_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+efx_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+/* Handle to represent use of the default RSS context. */
+#define EFX_RSS_CONTEXT_DEFAULT 0xffffffff
+
+#if EFSYS_OPT_RX_SCALE
+
+typedef enum efx_rx_hash_alg_e {
+ EFX_RX_HASHALG_LFSR = 0,
+ EFX_RX_HASHALG_TOEPLITZ,
+ EFX_RX_HASHALG_PACKED_STREAM,
+ EFX_RX_NHASHALGS
+} efx_rx_hash_alg_t;
+
+/*
+ * Legacy hash type flags.
+ *
+ * They represent standard tuples for distinct traffic classes.
+ */
+#define EFX_RX_HASH_IPV4 (1U << 0)
+#define EFX_RX_HASH_TCPIPV4 (1U << 1)
+#define EFX_RX_HASH_IPV6 (1U << 2)
+#define EFX_RX_HASH_TCPIPV6 (1U << 3)
+
+#define EFX_RX_HASH_LEGACY_MASK \
+ (EFX_RX_HASH_IPV4 | \
+ EFX_RX_HASH_TCPIPV4 | \
+ EFX_RX_HASH_IPV6 | \
+ EFX_RX_HASH_TCPIPV6)
+
+/*
+ * The type of the argument used by efx_rx_scale_mode_set() to
+ * provide a means for the client drivers to configure hashing.
+ *
+ * A properly constructed value can either be:
+ * - a combination of legacy flags
+ * - a combination of EFX_RX_HASH() flags
+ */
+typedef unsigned int efx_rx_hash_type_t;
+
+typedef enum efx_rx_hash_support_e {
+ EFX_RX_HASH_UNAVAILABLE = 0, /* Hardware hash not inserted */
+ EFX_RX_HASH_AVAILABLE /* Insert hash with/without RSS */
+} efx_rx_hash_support_t;
+
+#define EFX_RSS_KEY_SIZE 40 /* RSS key size (bytes) */
+#define EFX_RSS_TBL_SIZE 128 /* Rows in RX indirection table */
+#define EFX_MAXRSS 64 /* RX indirection entry range */
+#define EFX_MAXRSS_LEGACY 16 /* See bug16611 and bug17213 */
+
+typedef enum efx_rx_scale_context_type_e {
+ EFX_RX_SCALE_UNAVAILABLE = 0, /* No RX scale context */
+ EFX_RX_SCALE_EXCLUSIVE, /* Writable key/indirection table */
+ EFX_RX_SCALE_SHARED /* Read-only key/indirection table */
+} efx_rx_scale_context_type_t;
+
+/*
+ * Traffic classes eligible for hash computation.
+ *
+ * Select packet headers used in computing the receive hash.
+ * This uses the same encoding as the RSS_MODES field of
+ * MC_CMD_RSS_CONTEXT_SET_FLAGS.
+ */
+#define EFX_RX_CLASS_IPV4_TCP_LBN 8
+#define EFX_RX_CLASS_IPV4_TCP_WIDTH 4
+#define EFX_RX_CLASS_IPV4_UDP_LBN 12
+#define EFX_RX_CLASS_IPV4_UDP_WIDTH 4
+#define EFX_RX_CLASS_IPV4_LBN 16
+#define EFX_RX_CLASS_IPV4_WIDTH 4
+#define EFX_RX_CLASS_IPV6_TCP_LBN 20
+#define EFX_RX_CLASS_IPV6_TCP_WIDTH 4
+#define EFX_RX_CLASS_IPV6_UDP_LBN 24
+#define EFX_RX_CLASS_IPV6_UDP_WIDTH 4
+#define EFX_RX_CLASS_IPV6_LBN 28
+#define EFX_RX_CLASS_IPV6_WIDTH 4
+
+#define EFX_RX_NCLASSES 6
+
+/*
+ * Ancillary flags used to construct generic hash tuples.
+ * This uses the same encoding as RSS_MODE_HASH_SELECTOR.
+ */
+#define EFX_RX_CLASS_HASH_SRC_ADDR (1U << 0)
+#define EFX_RX_CLASS_HASH_DST_ADDR (1U << 1)
+#define EFX_RX_CLASS_HASH_SRC_PORT (1U << 2)
+#define EFX_RX_CLASS_HASH_DST_PORT (1U << 3)
+
+/*
+ * Generic hash tuples.
+ *
+ * They express combinations of packet fields
+ * which can contribute to the hash value for
+ * a particular traffic class.
+ */
+#define EFX_RX_CLASS_HASH_DISABLE 0
+
+#define EFX_RX_CLASS_HASH_1TUPLE_SRC EFX_RX_CLASS_HASH_SRC_ADDR
+#define EFX_RX_CLASS_HASH_1TUPLE_DST EFX_RX_CLASS_HASH_DST_ADDR
+
+#define EFX_RX_CLASS_HASH_2TUPLE \
+ (EFX_RX_CLASS_HASH_SRC_ADDR | \
+ EFX_RX_CLASS_HASH_DST_ADDR)
+
+#define EFX_RX_CLASS_HASH_2TUPLE_SRC \
+ (EFX_RX_CLASS_HASH_SRC_ADDR | \
+ EFX_RX_CLASS_HASH_SRC_PORT)
+
+#define EFX_RX_CLASS_HASH_2TUPLE_DST \
+ (EFX_RX_CLASS_HASH_DST_ADDR | \
+ EFX_RX_CLASS_HASH_DST_PORT)
+
+#define EFX_RX_CLASS_HASH_4TUPLE \
+ (EFX_RX_CLASS_HASH_SRC_ADDR | \
+ EFX_RX_CLASS_HASH_DST_ADDR | \
+ EFX_RX_CLASS_HASH_SRC_PORT | \
+ EFX_RX_CLASS_HASH_DST_PORT)
+
+#define EFX_RX_CLASS_HASH_NTUPLES 7
+
+/*
+ * Hash flag constructor.
+ *
+ * Resulting flags encode hash tuples for specific traffic classes.
+ * The client drivers are encouraged to use these flags to form
+ * a hash type value.
+ */
+#define EFX_RX_HASH(_class, _tuple) \
+ EFX_INSERT_FIELD_NATIVE32(0, 31, \
+ EFX_RX_CLASS_##_class, EFX_RX_CLASS_HASH_##_tuple)
+
+/*
+ * The maximum number of EFX_RX_HASH() flags.
+ */
+#define EFX_RX_HASH_NFLAGS (EFX_RX_NCLASSES * EFX_RX_CLASS_HASH_NTUPLES)
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_hash_flags_get(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t hash_alg,
+ __inout_ecount(EFX_RX_HASH_NFLAGS) unsigned int *flags,
+ __out unsigned int *nflagsp);
+
+extern __checkReturn efx_rc_t
+efx_rx_hash_default_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_hash_support_t *supportp);
+
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_default_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_scale_context_type_t *typep);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_context_alloc(
+ __in efx_nic_t *enp,
+ __in efx_rx_scale_context_type_t type,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+extern __checkReturn uint32_t
+efx_pseudo_hdr_hash_get(
+ __in efx_rxq_t *erp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+extern __checkReturn efx_rc_t
+efx_pseudo_hdr_pkt_length_get(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __out uint16_t *pkt_lengthp);
+
+#define EFX_RXQ_MAXNDESCS 4096
+#define EFX_RXQ_MINNDESCS 512
+
+#define EFX_RXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
+#define EFX_RXQ_NBUFS(_ndescs) (EFX_RXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
+#define EFX_RXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+#define EFX_RXQ_DC_NDESCS(_dcsize) (8 << _dcsize)
+
+typedef enum efx_rxq_type_e {
+ EFX_RXQ_TYPE_DEFAULT,
+ EFX_RXQ_TYPE_PACKED_STREAM,
+ EFX_RXQ_TYPE_ES_SUPER_BUFFER,
+ EFX_RXQ_NTYPES
+} efx_rxq_type_t;
+
+/*
+ * Dummy flag to be used instead of 0 to make it clear that the argument
+ * is receive queue flags.
+ */
+#define EFX_RXQ_FLAG_NONE 0x0
+#define EFX_RXQ_FLAG_SCATTER 0x1
+/*
+ * If tunnels are supported and Rx event can provide information about
+ * either outer or inner packet classes (e.g. SFN8xxx adapters with
+ * full-feature firmware variant running), outer classes are requested by
+ * default. However, if the driver supports tunnels, the flag allows to
+ * request inner classes which are required to be able to interpret inner
+ * Rx checksum offload results.
+ */
+#define EFX_RXQ_FLAG_INNER_CLASSES 0x2
+
+extern __checkReturn efx_rc_t
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M (1U * 1024 * 1024)
+#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_512K (512U * 1024)
+#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_256K (256U * 1024)
+#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_128K (128U * 1024)
+#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_64K (64U * 1024)
+
+extern __checkReturn efx_rc_t
+efx_rx_qcreate_packed_stream(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in uint32_t ps_buf_size,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp);
+
+#endif
+
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+
+/* Maximum head-of-line block timeout in nanoseconds */
+#define EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX (400U * 1000 * 1000)
+
+extern __checkReturn efx_rc_t
+efx_rx_qcreate_es_super_buffer(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in uint32_t n_bufs_per_desc,
+ __in uint32_t max_dma_len,
+ __in uint32_t buf_stride,
+ __in uint32_t hol_block_timeout,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp);
+
+#endif
+
+typedef struct efx_buffer_s {
+ efsys_dma_addr_t eb_addr;
+ size_t eb_size;
+ boolean_t eb_eop;
+} efx_buffer_t;
+
+typedef struct efx_desc_s {
+ efx_qword_t ed_eq;
+} efx_desc_t;
+
+extern void
+efx_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(ndescs) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+extern void
+efx_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+extern void
+efx_rx_qpush_ps_credits(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn uint8_t *
+efx_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp);
+#endif
+
+extern __checkReturn efx_rc_t
+efx_rx_qflush(
+ __in efx_rxq_t *erp);
+
+extern void
+efx_rx_qenable(
+ __in efx_rxq_t *erp);
+
+extern void
+efx_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+/* TX */
+
+typedef struct efx_txq_s efx_txq_t;
+
+#if EFSYS_OPT_QSTATS
+
+/* START MKCONFIG GENERATED EfxHeaderTransmitQueueBlock 12dff8778598b2db */
+typedef enum efx_tx_qstat_e {
+ TX_POST,
+ TX_POST_PIO,
+ TX_NQSTATS
+} efx_tx_qstat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderTransmitQueueBlock */
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern __checkReturn efx_rc_t
+efx_tx_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_tx_fini(
+ __in efx_nic_t *enp);
+
+#define EFX_TXQ_MINNDESCS 512
+
+#define EFX_TXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
+#define EFX_TXQ_NBUFS(_ndescs) (EFX_TXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
+#define EFX_TXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+
+#define EFX_TXQ_MAX_BUFS 8 /* Maximum independent of EFX_BUG35388_WORKAROUND. */
+
+#define EFX_TXQ_CKSUM_IPV4 0x0001
+#define EFX_TXQ_CKSUM_TCPUDP 0x0002
+#define EFX_TXQ_FATSOV2 0x0004
+#define EFX_TXQ_CKSUM_INNER_IPV4 0x0008
+#define EFX_TXQ_CKSUM_INNER_TCPUDP 0x0010
+
+extern __checkReturn efx_rc_t
+efx_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_txq_t **etpp,
+ __out unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_buffer_t *eb,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+extern void
+efx_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+extern __checkReturn efx_rc_t
+efx_tx_qflush(
+ __in efx_txq_t *etp);
+
+extern void
+efx_tx_qenable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_enable(
+ __in efx_txq_t *etp);
+
+extern void
+efx_tx_qpio_disable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+efx_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+extern void
+efx_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp);
+
+/* Number of FATSOv2 option descriptors */
+#define EFX_TX_FATSOV2_OPT_NDESCS 2
+
+/* Maximum number of DMA segments per TSO packet (not superframe) */
+#define EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX 24
+
+extern void
+efx_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint16_t outer_ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count);
+
+extern void
+efx_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp);
+
+extern void
+efx_tx_qdesc_checksum_create(
+ __in efx_txq_t *etp,
+ __in uint16_t flags,
+ __out efx_desc_t *edp);
+
+#if EFSYS_OPT_QSTATS
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_tx_qstat_name(
+ __in efx_nic_t *etp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern void
+efx_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern void
+efx_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+
+/* FILTER */
+
+#if EFSYS_OPT_FILTER
+
+#define EFX_ETHER_TYPE_IPV4 0x0800
+#define EFX_ETHER_TYPE_IPV6 0x86DD
+
+#define EFX_IPPROTO_TCP 6
+#define EFX_IPPROTO_UDP 17
+#define EFX_IPPROTO_GRE 47
+
+/* Use RSS to spread across multiple queues */
+#define EFX_FILTER_FLAG_RX_RSS 0x01
+/* Enable RX scatter */
+#define EFX_FILTER_FLAG_RX_SCATTER 0x02
+/*
+ * Override an automatic filter (priority EFX_FILTER_PRI_AUTO).
+ * May only be set by the filter implementation for each type.
+ * A removal request will restore the automatic filter in its place.
+ */
+#define EFX_FILTER_FLAG_RX_OVER_AUTO 0x04
+/* Filter is for RX */
+#define EFX_FILTER_FLAG_RX 0x08
+/* Filter is for TX */
+#define EFX_FILTER_FLAG_TX 0x10
+/* Set match flag on the received packet */
+#define EFX_FILTER_FLAG_ACTION_FLAG 0x20
+/* Set match mark on the received packet */
+#define EFX_FILTER_FLAG_ACTION_MARK 0x40
+
+typedef uint8_t efx_filter_flags_t;
+
+/*
+ * Flags which specify the fields to match on. The values are the same as in the
+ * MC_CMD_FILTER_OP/MC_CMD_FILTER_OP_EXT commands.
+ */
+
+/* Match by remote IP host address */
+#define EFX_FILTER_MATCH_REM_HOST 0x00000001
+/* Match by local IP host address */
+#define EFX_FILTER_MATCH_LOC_HOST 0x00000002
+/* Match by remote MAC address */
+#define EFX_FILTER_MATCH_REM_MAC 0x00000004
+/* Match by remote TCP/UDP port */
+#define EFX_FILTER_MATCH_REM_PORT 0x00000008
+/* Match by remote TCP/UDP port */
+#define EFX_FILTER_MATCH_LOC_MAC 0x00000010
+/* Match by local TCP/UDP port */
+#define EFX_FILTER_MATCH_LOC_PORT 0x00000020
+/* Match by Ether-type */
+#define EFX_FILTER_MATCH_ETHER_TYPE 0x00000040
+/* Match by inner VLAN ID */
+#define EFX_FILTER_MATCH_INNER_VID 0x00000080
+/* Match by outer VLAN ID */
+#define EFX_FILTER_MATCH_OUTER_VID 0x00000100
+/* Match by IP transport protocol */
+#define EFX_FILTER_MATCH_IP_PROTO 0x00000200
+/* Match by VNI or VSID */
+#define EFX_FILTER_MATCH_VNI_OR_VSID 0x00000800
+/* For encapsulated packets, match by inner frame local MAC address */
+#define EFX_FILTER_MATCH_IFRM_LOC_MAC 0x00010000
+/* For encapsulated packets, match all multicast inner frames */
+#define EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 0x01000000
+/* For encapsulated packets, match all unicast inner frames */
+#define EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST 0x02000000
+/*
+ * Match by encap type, this flag does not correspond to
+ * the MCDI match flags and any unoccupied value may be used
+ */
+#define EFX_FILTER_MATCH_ENCAP_TYPE 0x20000000
+/* Match otherwise-unmatched multicast and broadcast packets */
+#define EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 0x40000000
+/* Match otherwise-unmatched unicast packets */
+#define EFX_FILTER_MATCH_UNKNOWN_UCAST_DST 0x80000000
+
+typedef uint32_t efx_filter_match_flags_t;
+
+typedef enum efx_filter_priority_s {
+ EFX_FILTER_PRI_HINT = 0, /* Performance hint */
+ EFX_FILTER_PRI_AUTO, /* Automatic filter based on device
+ * address list or hardware
+ * requirements. This may only be used
+ * by the filter implementation for
+ * each NIC type. */
+ EFX_FILTER_PRI_MANUAL, /* Manually configured filter */
+ EFX_FILTER_PRI_REQUIRED, /* Required for correct behaviour of the
+ * client (e.g. SR-IOV, HyperV VMQ etc.)
+ */
+} efx_filter_priority_t;
+
+/*
+ * FIXME: All these fields are assumed to be in little-endian byte order.
+ * It may be better for some to be big-endian. See bug42804.
+ */
+
+typedef struct efx_filter_spec_s {
+ efx_filter_match_flags_t efs_match_flags;
+ uint8_t efs_priority;
+ efx_filter_flags_t efs_flags;
+ uint16_t efs_dmaq_id;
+ uint32_t efs_rss_context;
+ uint16_t efs_outer_vid;
+ uint16_t efs_inner_vid;
+ uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN];
+ uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN];
+ uint16_t efs_ether_type;
+ uint8_t efs_ip_proto;
+ efx_tunnel_protocol_t efs_encap_type;
+ uint16_t efs_loc_port;
+ uint16_t efs_rem_port;
+ efx_oword_t efs_rem_host;
+ efx_oword_t efs_loc_host;
+ uint8_t efs_vni_or_vsid[EFX_VNI_OR_VSID_LEN];
+ uint8_t efs_ifrm_loc_mac[EFX_MAC_ADDR_LEN];
+ uint32_t efs_mark;
+} efx_filter_spec_t;
+
+
+/* Default values for use in filter specifications */
+#define EFX_FILTER_SPEC_RX_DMAQ_ID_DROP 0xfff
+#define EFX_FILTER_SPEC_VID_UNSPEC 0xffff
+
+extern __checkReturn efx_rc_t
+efx_filter_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_filter_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_filter_insert(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_remove(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_restore(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp);
+
+extern void
+efx_filter_spec_init_rx(
+ __out efx_filter_spec_t *spec,
+ __in efx_filter_priority_t priority,
+ __in efx_filter_flags_t flags,
+ __in efx_rxq_t *erp);
+
+extern void
+efx_filter_spec_init_tx(
+ __out efx_filter_spec_t *spec,
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t host,
+ __in uint16_t port);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t lhost,
+ __in uint16_t lport,
+ __in uint32_t rhost,
+ __in uint16_t rport);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_eth_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t vid,
+ __in const uint8_t *addr);
+
+extern void
+efx_filter_spec_set_ether_type(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t ether_type);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_uc_def(
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_mc_def(
+ __inout efx_filter_spec_t *spec);
+
+typedef enum efx_filter_inner_frame_match_e {
+ EFX_FILTER_INNER_FRAME_MATCH_OTHER = 0,
+ EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST,
+ EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_UCAST_DST
+} efx_filter_inner_frame_match_t;
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_encap_type(
+ __inout efx_filter_spec_t *spec,
+ __in efx_tunnel_protocol_t encap_type,
+ __in efx_filter_inner_frame_match_t inner_frame_match);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_vxlan_full(
+ __inout efx_filter_spec_t *spec,
+ __in const uint8_t *vxlan_id,
+ __in const uint8_t *inner_addr,
+ __in const uint8_t *outer_addr);
+
+#if EFSYS_OPT_RX_SCALE
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_rss_context(
+ __inout efx_filter_spec_t *spec,
+ __in uint32_t rss_context);
+#endif
+#endif /* EFSYS_OPT_FILTER */
+
+/* HASH */
+
+extern __checkReturn uint32_t
+efx_hash_dwords(
+ __in_ecount(count) uint32_t const *input,
+ __in size_t count,
+ __in uint32_t init);
+
+extern __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init);
+
+#if EFSYS_OPT_LICENSING
+
+/* LICENSING */
+
+typedef struct efx_key_stats_s {
+ uint32_t eks_valid;
+ uint32_t eks_invalid;
+ uint32_t eks_blacklisted;
+ uint32_t eks_unverifiable;
+ uint32_t eks_wrong_node;
+ uint32_t eks_licensed_apps_lo;
+ uint32_t eks_licensed_apps_hi;
+ uint32_t eks_licensed_features_lo;
+ uint32_t eks_licensed_features_hi;
+} efx_key_stats_t;
+
+extern __checkReturn efx_rc_t
+efx_lic_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_lic_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn boolean_t
+efx_lic_check_support(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_lic_update_licenses(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_lic_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *ksp);
+
+extern __checkReturn efx_rc_t
+efx_lic_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+extern __checkReturn efx_rc_t
+efx_lic_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_opt uint8_t *bufferp);
+
+
+extern __checkReturn efx_rc_t
+efx_lic_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp);
+
+extern __checkReturn efx_rc_t
+efx_lic_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp);
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp);
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length);
+
+extern __checkReturn efx_rc_t
+efx_lic_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp);
+
+extern __checkReturn efx_rc_t
+efx_lic_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp);
+
+ __checkReturn efx_rc_t
+efx_lic_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap);
+
+extern __checkReturn efx_rc_t
+efx_lic_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+efx_lic_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+#endif /* EFSYS_OPT_LICENSING */
+
+/* TUNNEL */
+
+#if EFSYS_OPT_TUNNEL
+
+extern __checkReturn efx_rc_t
+efx_tunnel_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_tunnel_fini(
+ __in efx_nic_t *enp);
+
+/*
+ * For overlay network encapsulation using UDP, the firmware needs to know
+ * the configured UDP port for the overlay so it can decode encapsulated
+ * frames correctly.
+ * The UDP port/protocol list is global.
+ */
+
+extern __checkReturn efx_rc_t
+efx_tunnel_config_udp_add(
+ __in efx_nic_t *enp,
+ __in uint16_t port /* host/cpu-endian */,
+ __in efx_tunnel_protocol_t protocol);
+
+extern __checkReturn efx_rc_t
+efx_tunnel_config_udp_remove(
+ __in efx_nic_t *enp,
+ __in uint16_t port /* host/cpu-endian */,
+ __in efx_tunnel_protocol_t protocol);
+
+extern void
+efx_tunnel_config_clear(
+ __in efx_nic_t *enp);
+
+/**
+ * Apply tunnel UDP ports configuration to hardware.
+ *
+ * EAGAIN is returned if hardware will be reset (datapath and managment CPU
+ * reboot).
+ */
+extern __checkReturn efx_rc_t
+efx_tunnel_reconfigure(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_TUNNEL */
+
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+
+/**
+ * Firmware subvariant choice options.
+ *
+ * It may be switched to no Tx checksum if attached drivers are either
+ * preboot or firmware subvariant aware and no VIS are allocated.
+ * If may be always switched to default explicitly using set request or
+ * implicitly if unaware driver is attaching. If switching is done when
+ * a driver is attached, it gets MC_REBOOT event and should recreate its
+ * datapath.
+ *
+ * See SF-119419-TC DPDK Firmware Driver Interface and
+ * SF-109306-TC EF10 for Driver Writers for details.
+ */
+typedef enum efx_nic_fw_subvariant_e {
+ EFX_NIC_FW_SUBVARIANT_DEFAULT = 0,
+ EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM = 1,
+ EFX_NIC_FW_SUBVARIANT_NTYPES
+} efx_nic_fw_subvariant_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_get_fw_subvariant(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_subvariant_t *subvariantp);
+
+extern __checkReturn efx_rc_t
+efx_nic_set_fw_subvariant(
+ __in efx_nic_t *enp,
+ __in efx_nic_fw_subvariant_t subvariant);
+
+#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_bootcfg.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_bootcfg.c
new file mode 100644
index 00000000..715e18e8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_bootcfg.c
@@ -0,0 +1,586 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_BOOTCFG
+
+/*
+ * Maximum size of BOOTCFG block across all nics as understood by SFCgPXE.
+ * NOTE: This is larger than the Medford per-PF bootcfg sector.
+ */
+#define BOOTCFG_MAX_SIZE 0x1000
+
+/* Medford per-PF bootcfg sector */
+#define BOOTCFG_PER_PF 0x800
+#define BOOTCFG_PF_COUNT 16
+
+#define DHCP_END ((uint8_t)0xff)
+#define DHCP_PAD ((uint8_t)0)
+
+
+/* Report the layout of bootcfg sectors in NVRAM partition. */
+ __checkReturn efx_rc_t
+efx_bootcfg_sector_info(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __out_opt uint32_t *sector_countp,
+ __out size_t *offsetp,
+ __out size_t *max_sizep)
+{
+ uint32_t count;
+ size_t max_size;
+ size_t offset;
+ int rc;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ max_size = BOOTCFG_MAX_SIZE;
+ offset = 0;
+ count = 1;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ max_size = BOOTCFG_MAX_SIZE;
+ offset = 0;
+ count = 1;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD: {
+ /* Shared partition (array indexed by PF) */
+ max_size = BOOTCFG_PER_PF;
+ count = BOOTCFG_PF_COUNT;
+ if (pf >= count) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ offset = max_size * pf;
+ break;
+ }
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2: {
+ /* Shared partition (array indexed by PF) */
+ max_size = BOOTCFG_PER_PF;
+ count = BOOTCFG_PF_COUNT;
+ if (pf >= count) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ offset = max_size * pf;
+ break;
+ }
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ EFSYS_ASSERT3U(max_size, <=, BOOTCFG_MAX_SIZE);
+
+ if (sector_countp != NULL)
+ *sector_countp = count;
+ *offsetp = offset;
+ *max_sizep = max_size;
+
+ return (0);
+
+#if EFSYS_OPT_MEDFORD2
+fail3:
+ EFSYS_PROBE(fail3);
+#endif
+#if EFSYS_OPT_MEDFORD
+fail2:
+ EFSYS_PROBE(fail2);
+#endif
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+
+static __checkReturn uint8_t
+efx_bootcfg_csum(
+ __in efx_nic_t *enp,
+ __in_bcount(size) uint8_t const *data,
+ __in size_t size)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ unsigned int pos;
+ uint8_t checksum = 0;
+
+ for (pos = 0; pos < size; pos++)
+ checksum += data[pos];
+ return (checksum);
+}
+
+static __checkReturn efx_rc_t
+efx_bootcfg_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) uint8_t const *data,
+ __in size_t size,
+ __out_opt size_t *usedp)
+{
+ size_t offset = 0;
+ size_t used = 0;
+ efx_rc_t rc;
+
+ /* Start parsing tags immediately after the checksum */
+ for (offset = 1; offset < size; ) {
+ uint8_t tag;
+ uint8_t length;
+
+ /* Consume tag */
+ tag = data[offset];
+ if (tag == DHCP_END) {
+ offset++;
+ used = offset;
+ break;
+ }
+ if (tag == DHCP_PAD) {
+ offset++;
+ continue;
+ }
+
+ /* Consume length */
+ if (offset + 1 >= size) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ length = data[offset + 1];
+
+ /* Consume *length */
+ if (offset + 1 + length >= size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ offset += 2 + length;
+ used = offset;
+ }
+
+ /* Checksum the entire sector, including bytes after any DHCP_END */
+ if (efx_bootcfg_csum(enp, data, size) != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ if (usedp != NULL)
+ *usedp = used;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Copy bootcfg sector data to a target buffer which may differ in size.
+ * Optionally corrects format errors in source buffer.
+ */
+ efx_rc_t
+efx_bootcfg_copy_sector(
+ __in efx_nic_t *enp,
+ __inout_bcount(sector_length)
+ uint8_t *sector,
+ __in size_t sector_length,
+ __out_bcount(data_size) uint8_t *data,
+ __in size_t data_size,
+ __in boolean_t handle_format_errors)
+{
+ size_t used_bytes;
+ efx_rc_t rc;
+
+ /* Minimum buffer is checksum byte and DHCP_END terminator */
+ if (data_size < 2) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ /* Verify that the area is correctly formatted and checksummed */
+ rc = efx_bootcfg_verify(enp, sector, sector_length,
+ &used_bytes);
+
+ if (!handle_format_errors) {
+ if (rc != 0)
+ goto fail2;
+
+ if ((used_bytes < 2) ||
+ (sector[used_bytes - 1] != DHCP_END)) {
+ /* Block too short, or DHCP_END missing */
+ rc = ENOENT;
+ goto fail3;
+ }
+ }
+
+ /* Synthesize empty format on verification failure */
+ if (rc != 0 || used_bytes == 0) {
+ sector[0] = 0;
+ sector[1] = DHCP_END;
+ used_bytes = 2;
+ }
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ EFSYS_ASSERT(used_bytes <= sector_length);
+ EFSYS_ASSERT(sector_length >= 2);
+
+ /*
+ * Legacy bootcfg sectors don't terminate with a DHCP_END character.
+ * Modify the returned payload so it does.
+ * Reinitialise the sector if there isn't room for the character.
+ */
+ if (sector[used_bytes - 1] != DHCP_END) {
+ if (used_bytes >= sector_length) {
+ sector[0] = 0;
+ used_bytes = 1;
+ }
+ sector[used_bytes] = DHCP_END;
+ ++used_bytes;
+ }
+
+ /*
+ * Verify that the target buffer is large enough for the
+ * entire used bootcfg area, then copy into the target buffer.
+ */
+ if (used_bytes > data_size) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ data[0] = 0; /* checksum, updated below */
+
+ /* Copy all after the checksum to the target buffer */
+ memcpy(data + 1, sector + 1, used_bytes - 1);
+
+ /* Zero out the unused portion of the target buffer */
+ if (used_bytes < data_size)
+ (void) memset(data + used_bytes, 0, data_size - used_bytes);
+
+ /*
+ * The checksum includes trailing data after any DHCP_END character,
+ * which we've just modified (by truncation or appending DHCP_END).
+ */
+ data[0] -= efx_bootcfg_csum(enp, data, data_size);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_bootcfg_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) uint8_t *data,
+ __in size_t size)
+{
+ uint8_t *payload = NULL;
+ size_t used_bytes;
+ size_t partn_length;
+ size_t sector_length;
+ size_t sector_offset;
+ efx_rc_t rc;
+ uint32_t sector_number;
+
+ /* Minimum buffer is checksum byte and DHCP_END terminator */
+ if (size < 2) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+ sector_number = enp->en_nic_cfg.enc_pf;
+#else
+ sector_number = 0;
+#endif
+ rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length);
+ if (rc != 0)
+ goto fail2;
+
+ /* The bootcfg sector may be stored in a (larger) shared partition */
+ rc = efx_bootcfg_sector_info(enp, sector_number,
+ NULL, &sector_offset, &sector_length);
+ if (rc != 0)
+ goto fail3;
+
+ if (sector_length < 2) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ if (sector_length > BOOTCFG_MAX_SIZE)
+ sector_length = BOOTCFG_MAX_SIZE;
+
+ if (sector_offset + sector_length > partn_length) {
+ /* Partition is too small */
+ rc = EFBIG;
+ goto fail5;
+ }
+
+ /*
+ * We need to read the entire BOOTCFG sector to ensure we read all the
+ * tags, because legacy bootcfg sectors are not guaranteed to end with
+ * a DHCP_END character. If the user hasn't supplied a sufficiently
+ * large buffer then use our own buffer.
+ */
+ if (sector_length > size) {
+ EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload);
+ if (payload == NULL) {
+ rc = ENOMEM;
+ goto fail6;
+ }
+ } else
+ payload = (uint8_t *)data;
+
+ if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
+ goto fail7;
+
+ if ((rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
+ sector_offset, (caddr_t)payload, sector_length)) != 0) {
+ (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL);
+ goto fail8;
+ }
+
+ if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
+ goto fail9;
+
+ /* Verify that the area is correctly formatted and checksummed */
+ rc = efx_bootcfg_verify(enp, payload, sector_length,
+ &used_bytes);
+ if (rc != 0 || used_bytes == 0) {
+ payload[0] = 0;
+ payload[1] = DHCP_END;
+ used_bytes = 2;
+ }
+
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ EFSYS_ASSERT(used_bytes <= sector_length);
+
+ /*
+ * Legacy bootcfg sectors don't terminate with a DHCP_END character.
+ * Modify the returned payload so it does. BOOTCFG_MAX_SIZE is by
+ * definition large enough for any valid (per-port) bootcfg sector,
+ * so reinitialise the sector if there isn't room for the character.
+ */
+ if (payload[used_bytes - 1] != DHCP_END) {
+ if (used_bytes >= sector_length)
+ used_bytes = 1;
+
+ payload[used_bytes] = DHCP_END;
+ ++used_bytes;
+ }
+
+ /*
+ * Verify that the user supplied buffer is large enough for the
+ * entire used bootcfg area, then copy into the user supplied buffer.
+ */
+ if (used_bytes > size) {
+ rc = ENOSPC;
+ goto fail10;
+ }
+
+ data[0] = 0; /* checksum, updated below */
+
+ if (sector_length > size) {
+ /* Copy all after the checksum to the target buffer */
+ memcpy(data + 1, payload + 1, used_bytes - 1);
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
+ }
+
+ /* Zero out the unused portion of the user buffer */
+ if (used_bytes < size)
+ (void) memset(data + used_bytes, 0, size - used_bytes);
+
+ /*
+ * The checksum includes trailing data after any DHCP_END character,
+ * which we've just modified (by truncation or appending DHCP_END).
+ */
+ data[0] -= efx_bootcfg_csum(enp, data, size);
+
+ return (0);
+
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+ if (sector_length > size)
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_bootcfg_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) uint8_t *data,
+ __in size_t size)
+{
+ uint8_t *partn_data;
+ uint8_t checksum;
+ size_t partn_length;
+ size_t sector_length;
+ size_t sector_offset;
+ size_t used_bytes;
+ efx_rc_t rc;
+ uint32_t sector_number;
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+ sector_number = enp->en_nic_cfg.enc_pf;
+#else
+ sector_number = 0;
+#endif
+
+ rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length);
+ if (rc != 0)
+ goto fail1;
+
+ /* The bootcfg sector may be stored in a (larger) shared partition */
+ rc = efx_bootcfg_sector_info(enp, sector_number,
+ NULL, &sector_offset, &sector_length);
+ if (rc != 0)
+ goto fail2;
+
+ if (sector_length > BOOTCFG_MAX_SIZE)
+ sector_length = BOOTCFG_MAX_SIZE;
+
+ if (sector_offset + sector_length > partn_length) {
+ /* Partition is too small */
+ rc = EFBIG;
+ goto fail3;
+ }
+
+ if ((rc = efx_bootcfg_verify(enp, data, size, &used_bytes)) != 0)
+ goto fail4;
+
+ /* The caller *must* terminate their block with a DHCP_END character */
+ if ((used_bytes < 2) || ((uint8_t)data[used_bytes - 1] != DHCP_END)) {
+ /* Block too short or DHCP_END missing */
+ rc = ENOENT;
+ goto fail5;
+ }
+
+ /* Check that the hardware has support for this much data */
+ if (used_bytes > MIN(sector_length, BOOTCFG_MAX_SIZE)) {
+ rc = ENOSPC;
+ goto fail6;
+ }
+
+ /*
+ * If the BOOTCFG sector is stored in a shared partition, then we must
+ * read the whole partition and insert the updated bootcfg sector at the
+ * correct offset.
+ */
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_length, partn_data);
+ if (partn_data == NULL) {
+ rc = ENOMEM;
+ goto fail7;
+ }
+
+ rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL);
+ if (rc != 0)
+ goto fail8;
+
+ /* Read the entire partition */
+ rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, 0,
+ (caddr_t)partn_data, partn_length);
+ if (rc != 0)
+ goto fail9;
+
+ /*
+ * Insert the BOOTCFG sector into the partition, Zero out all data after
+ * the DHCP_END tag, and adjust the checksum.
+ */
+ (void) memset(partn_data + sector_offset, 0x0, sector_length);
+ (void) memcpy(partn_data + sector_offset, data, used_bytes);
+
+ checksum = efx_bootcfg_csum(enp, data, used_bytes);
+ partn_data[sector_offset] -= checksum;
+
+ if ((rc = efx_nvram_erase(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ goto fail10;
+
+ if ((rc = efx_nvram_write_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
+ 0, (caddr_t)partn_data, partn_length)) != 0)
+ goto fail11;
+
+ if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
+ goto fail12;
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data);
+
+ return (0);
+
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+
+ (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL);
+fail8:
+ EFSYS_PROBE(fail8);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_BOOTCFG */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_check.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_check.h
new file mode 100644
index 00000000..ef5eadc6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_check.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_EFX_CHECK_H
+#define _SYS_EFX_CHECK_H
+
+#include "efsys.h"
+
+/*
+ * Check that the efsys.h header in client code has a valid combination of
+ * EFSYS_OPT_xxx options.
+ *
+ * NOTE: Keep checks for obsolete options here to ensure that they are removed
+ * from client code (and do not reappear in merges from other branches).
+ */
+
+#ifdef EFSYS_OPT_FALCON
+# error "FALCON is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_BOOTCFG
+/* Support NVRAM based boot config */
+# if !EFSYS_OPT_NVRAM
+# error "BOOTCFG requires NVRAM"
+# endif
+#endif /* EFSYS_OPT_BOOTCFG */
+
+#if EFSYS_OPT_CHECK_REG
+/* Verify chip implements accessed registers */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_CHECK_REG */
+
+#if EFSYS_OPT_DECODE_INTR_FATAL
+/* Decode fatal errors */
+# if !EFSYS_OPT_SIENA
+# error "INTR_FATAL requires SIENA"
+# endif
+#endif /* EFSYS_OPT_DECODE_INTR_FATAL */
+
+#if EFSYS_OPT_DIAG
+/* Support diagnostic hardware tests */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "DIAG requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_EV_PREFETCH
+/* Support optimized EVQ data access */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+#ifdef EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE
+# error "FALCON_NIC_CFG_OVERRIDE is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_FILTER
+/* Support hardware packet filters */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "FILTER requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_FILTER */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# if !EFSYS_OPT_FILTER
+# error "HUNTINGTON or MEDFORD or MEDFORD2 requires FILTER"
+# endif
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_LOOPBACK
+/* Support hardware loopback modes */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#ifdef EFSYS_OPT_MAC_FALCON_GMAC
+# error "MAC_FALCON_GMAC is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MAC_FALCON_XMAC
+# error "MAC_FALCON_XMAC is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_MAC_STATS
+/* Support MAC statistics */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#if EFSYS_OPT_MCDI
+/* Support management controller messages */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "MCDI requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_MCDI */
+
+#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# if !EFSYS_OPT_MCDI
+# error "SIENA or HUNTINGTON or MEDFORD or MEDFORD2 requires MCDI"
+# endif
+#endif
+
+#if EFSYS_OPT_MCDI_LOGGING
+/* Support MCDI logging */
+# if !EFSYS_OPT_MCDI
+# error "MCDI_LOGGING requires MCDI"
+# endif
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+/* Support MCDI proxy authorization */
+# if !EFSYS_OPT_MCDI
+# error "MCDI_PROXY_AUTH requires MCDI"
+# endif
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+#ifdef EFSYS_OPT_MON_LM87
+# error "MON_LM87 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_MAX6647
+# error "MON_MAX6647 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_NULL
+# error "MON_NULL is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_SIENA
+# error "MON_SIENA is obsolete (replaced by MON_MCDI)."
+#endif
+
+#ifdef EFSYS_OPT_MON_HUNTINGTON
+# error "MON_HUNTINGTON is obsolete (replaced by MON_MCDI)."
+#endif
+
+#if EFSYS_OPT_MON_STATS
+/* Support monitor statistics (voltage/temperature) */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_MON_STATS */
+
+#if EFSYS_OPT_MON_MCDI
+/* Support Monitor via mcdi */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_MON_MCDI*/
+
+#if EFSYS_OPT_NAMES
+/* Support printable names for statistics */
+# if !(EFSYS_OPT_LOOPBACK || EFSYS_OPT_MAC_STATS || EFSYS_OPT_MCDI || \
+ EFSYS_MON_STATS || EFSYS_OPT_PHY_STATS || EFSYS_OPT_QSTATS)
+# error "NAMES requires LOOPBACK or xxxSTATS or MCDI"
+# endif
+#endif /* EFSYS_OPT_NAMES */
+
+#if EFSYS_OPT_NVRAM
+/* Support non volatile configuration */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_IMAGE_LAYOUT
+/* Support signed image layout handling */
+# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "IMAGE_LAYOUT requires MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_IMAGE_LAYOUT */
+
+#ifdef EFSYS_OPT_NVRAM_FALCON_BOOTROM
+# error "NVRAM_FALCON_BOOTROM is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_NVRAM_SFT9001
+# error "NVRAM_SFT9001 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_NVRAM_SFX7101
+# error "NVRAM_SFX7101 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PCIE_TUNE
+# error "PCIE_TUNE is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_BIST
+# error "PHY_BIST is obsolete (replaced by BIST)."
+#endif
+
+#if EFSYS_OPT_PHY_FLAGS
+/* Support PHY flags */
+# if !EFSYS_OPT_SIENA
+# error "PHY_FLAGS requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+/* Support for PHY LED control */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "PHY_LED_CONTROL requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+#ifdef EFSYS_OPT_PHY_NULL
+# error "PHY_NULL is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_PM8358
+# error "PHY_PM8358 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_PROPS
+# error "PHY_PROPS is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_QT2022C2
+# error "PHY_QT2022C2 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_QT2025C
+# error "PHY_QT2025C is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_SFT9001
+# error "PHY_SFT9001 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_SFX7101
+# error "PHY_SFX7101 is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_PHY_STATS
+/* Support PHY statistics */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "PHY_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#ifdef EFSYS_OPT_PHY_TXC43128
+# error "PHY_TXC43128 is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_QSTATS
+/* Support EVQ/RXQ/TXQ statistics */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_QSTATS */
+
+#ifdef EFSYS_OPT_RX_HDR_SPLIT
+# error "RX_HDR_SPLIT is obsolete and is not supported"
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+/* Support receive scaling (RSS) */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCATTER
+/* Support receive scatter DMA */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#ifdef EFSYS_OPT_STAT_NAME
+# error "STAT_NAME is obsolete (replaced by NAMES)."
+#endif
+
+#if EFSYS_OPT_VPD
+/* Support PCI Vital Product Data (VPD) */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "VPD requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_VPD */
+
+#ifdef EFSYS_OPT_WOL
+# error "WOL is obsolete and is not supported"
+#endif /* EFSYS_OPT_WOL */
+
+#ifdef EFSYS_OPT_MCAST_FILTER_LIST
+# error "MCAST_FILTER_LIST is obsolete and is not supported"
+#endif
+
+#if EFSYS_OPT_BIST
+/* Support BIST */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "BIST requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_BIST */
+
+#if EFSYS_OPT_LICENSING
+/* Support MCDI licensing API */
+# if !EFSYS_OPT_MCDI
+# error "LICENSING requires MCDI"
+# endif
+# if !EFSYS_HAS_UINT64
+# error "LICENSING requires UINT64"
+# endif
+#endif /* EFSYS_OPT_LICENSING */
+
+#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
+/* Support adapters with missing static config (for factory use only) */
+# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+/* Support packed stream mode */
+# if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "PACKED_STREAM requires HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif
+
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+/* Support equal stride super-buffer mode */
+# if !(EFSYS_OPT_MEDFORD2)
+# error "ES_SUPER_BUFFER requires MEDFORD2"
+# endif
+#endif
+
+/* Support hardware assistance for tunnels */
+#if EFSYS_OPT_TUNNEL
+# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "TUNNEL requires MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_TUNNEL */
+
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+/* Advertise that the driver is firmware subvariant aware */
+# if !(EFSYS_OPT_MEDFORD2)
+# error "FW_SUBVARIANT_AWARE requires MEDFORD2"
+# endif
+#endif
+
+#endif /* _SYS_EFX_CHECK_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_crc32.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_crc32.c
new file mode 100644
index 00000000..29c02e19
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_crc32.c
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2013-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+/*
+ * Precomputed table for computing IEEE 802.3 CRC32
+ * with polynomial 0x04c11db7 (bit-reversed 0xedb88320)
+ */
+
+static const uint32_t efx_crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+/* Calculate the IEEE 802.3 CRC32 of a MAC addr */
+ __checkReturn uint32_t
+efx_crc32_calculate(
+ __in uint32_t crc_init,
+ __in_ecount(length) uint8_t const *input,
+ __in int length)
+{
+ int index;
+ uint32_t crc = crc_init;
+
+ for (index = 0; index < length; index++) {
+ uint32_t data = *(input++);
+ crc = (crc >> 8) ^ efx_crc32_table[(crc ^ data) & 0xff];
+ }
+
+ return (crc);
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_ev.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_ev.c
new file mode 100644
index 00000000..1139cc26
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_ev.c
@@ -0,0 +1,1455 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_QSTATS
+#define EFX_EV_QSTAT_INCR(_eep, _stat) \
+ do { \
+ (_eep)->ee_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_EV_QSTAT_INCR(_eep, _stat)
+#endif
+
+#define EFX_EV_PRESENT(_qword) \
+ (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
+ EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
+
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_ev_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_ev_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep);
+
+static void
+siena_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+static __checkReturn efx_rc_t
+siena_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+static void
+siena_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+static __checkReturn efx_rc_t
+siena_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+
+#endif
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_ev_ops_t __efx_ev_siena_ops = {
+ siena_ev_init, /* eevo_init */
+ siena_ev_fini, /* eevo_fini */
+ siena_ev_qcreate, /* eevo_qcreate */
+ siena_ev_qdestroy, /* eevo_qdestroy */
+ siena_ev_qprime, /* eevo_qprime */
+ siena_ev_qpost, /* eevo_qpost */
+ siena_ev_qmoderate, /* eevo_qmoderate */
+#if EFSYS_OPT_QSTATS
+ siena_ev_qstats_update, /* eevo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+static const efx_ev_ops_t __efx_ev_ef10_ops = {
+ ef10_ev_init, /* eevo_init */
+ ef10_ev_fini, /* eevo_fini */
+ ef10_ev_qcreate, /* eevo_qcreate */
+ ef10_ev_qdestroy, /* eevo_qdestroy */
+ ef10_ev_qprime, /* eevo_qprime */
+ ef10_ev_qpost, /* eevo_qpost */
+ ef10_ev_qmoderate, /* eevo_qmoderate */
+#if EFSYS_OPT_QSTATS
+ ef10_ev_qstats_update, /* eevo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+
+ __checkReturn efx_rc_t
+efx_ev_init(
+ __in efx_nic_t *enp)
+{
+ const efx_ev_ops_t *eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ if (enp->en_mod_flags & EFX_MOD_EV) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ eevop = &__efx_ev_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
+
+ if ((rc = eevop->eevo_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_eevop = eevop;
+ enp->en_mod_flags |= EFX_MOD_EV;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_eevop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_EV;
+ return (rc);
+}
+
+ void
+efx_ev_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+ EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
+
+ eevop->eevo_fini(enp);
+
+ enp->en_eevop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_EV;
+}
+
+
+ __checkReturn efx_rc_t
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __deref_out efx_evq_t **eepp)
+{
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_evq_t *eep;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
+
+ EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <,
+ enp->en_nic_cfg.enc_evq_limit);
+
+ switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
+ case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
+ break;
+ case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
+ if (us != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /* Allocate an EVQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
+ if (eep == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ eep->ee_magic = EFX_EVQ_MAGIC;
+ eep->ee_enp = enp;
+ eep->ee_index = index;
+ eep->ee_mask = ndescs - 1;
+ eep->ee_flags = flags;
+ eep->ee_esmp = esmp;
+
+ /*
+ * Set outputs before the queue is created because interrupts may be
+ * raised for events immediately after the queue is created, before the
+ * function call below returns. See bug58606.
+ *
+ * The eepp pointer passed in by the client must therefore point to data
+ * shared with the client's event processing context.
+ */
+ enp->en_ev_qcount++;
+ *eepp = eep;
+
+ if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags,
+ eep)) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+ *eepp = NULL;
+ enp->en_ev_qcount--;
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_ev_qcount != 0);
+ --enp->en_ev_qcount;
+
+ eevop->eevo_qdestroy(eep);
+
+ /* Free the EVQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = eevop->eevo_qprime(eep, count)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn boolean_t
+efx_ev_qpending(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ size_t offset;
+ efx_qword_t qword;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
+
+ return (EFX_EV_PRESENT(qword));
+}
+
+#if EFSYS_OPT_EV_PREFETCH
+
+ void
+efx_ev_qprefetch(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ unsigned int offset;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+}
+
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+#define EFX_EV_BATCH 8
+
+ void
+efx_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_qword_t ev[EFX_EV_BATCH];
+ unsigned int batch;
+ unsigned int total;
+ unsigned int count;
+ unsigned int index;
+ size_t offset;
+
+ /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
+ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
+ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
+
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
+ FSE_AZ_EV_CODE_DRV_GEN_EV);
+#if EFSYS_OPT_MCDI
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
+ FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
+#endif
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+ EFSYS_ASSERT(countp != NULL);
+ EFSYS_ASSERT(eecp != NULL);
+
+ count = *countp;
+ do {
+ /* Read up until the end of the batch period */
+ batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (total = 0; total < batch; ++total) {
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
+
+ if (!EFX_EV_PRESENT(ev[total]))
+ break;
+
+ EFSYS_PROBE3(event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
+
+ offset += sizeof (efx_qword_t);
+ }
+
+#if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
+ /*
+ * Prefetch the next batch when we get within PREFETCH_PERIOD
+ * of a completed batch. If the batch is smaller, then prefetch
+ * immediately.
+ */
+ if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ /* Process the batch of events */
+ for (index = 0; index < total; ++index) {
+ boolean_t should_abort;
+ uint32_t code;
+
+#if EFSYS_OPT_EV_PREFETCH
+ /* Prefetch if we've now reached the batch period */
+ if (total == batch &&
+ index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
+ offset = (count + batch) & eep->ee_mask;
+ offset *= sizeof (efx_qword_t);
+
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+ }
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ EFX_EV_QSTAT_INCR(eep, EV_ALL);
+
+ code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
+ switch (code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ should_abort = eep->ee_rx(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ should_abort = eep->ee_tx(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ should_abort = eep->ee_driver(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ should_abort = eep->ee_drv_gen(eep,
+ &(ev[index]), eecp, arg);
+ break;
+#if EFSYS_OPT_MCDI
+ case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
+ should_abort = eep->ee_mcdi(eep,
+ &(ev[index]), eecp, arg);
+ break;
+#endif
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (eep->ee_global) {
+ should_abort = eep->ee_global(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ }
+ /* else fallthrough */
+ default:
+ EFSYS_PROBE3(bad_event,
+ unsigned int, eep->ee_index,
+ uint32_t,
+ EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
+ uint32_t,
+ EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ (void) eecp->eec_exception(arg,
+ EFX_EXCEPTION_EV_ERROR, code);
+ should_abort = B_TRUE;
+ }
+ if (should_abort) {
+ /* Ignore subsequent events */
+ total = index + 1;
+ break;
+ }
+ }
+
+ /*
+ * Now that the hardware has most likely moved onto dma'ing
+ * into the next cache line, clear the processed events. Take
+ * care to only clear out events that we've processed
+ */
+ EFX_SET_QWORD(ev[0]);
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (index = 0; index < total; ++index) {
+ EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
+ offset += sizeof (efx_qword_t);
+ }
+
+ count += total;
+
+ } while (total == batch);
+
+ *countp = count;
+}
+
+ void
+efx_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ EFSYS_ASSERT(eevop != NULL &&
+ eevop->eevo_qpost != NULL);
+
+ eevop->eevo_qpost(eep, data);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_usecs_to_ticks(
+ __in efx_nic_t *enp,
+ __in unsigned int us,
+ __out unsigned int *ticksp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ unsigned int ticks;
+
+ /* Convert microseconds to a timer tick count */
+ if (us == 0)
+ ticks = 0;
+ else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
+ ticks = 1; /* Never round down to zero */
+ else
+ ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
+
+ *ticksp = ticks;
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#if EFSYS_OPT_QSTATS
+ void
+efx_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+
+{ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ eevop->eevo_qstats_update(eep, stat);
+}
+
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_ev_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * Program the event queue for receive and transmit queue
+ * flush events.
+ */
+ EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
+
+ return (0);
+
+}
+
+static __checkReturn boolean_t
+siena_ev_rx_not_ok(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in uint32_t label,
+ __in uint32_t id,
+ __inout uint16_t *flagsp)
+{
+ boolean_t ignore = B_FALSE;
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
+ EFSYS_PROBE(tobe_disc);
+ /*
+ * Assume this is a unicast address mismatch, unless below
+ * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
+ * EV_RX_PAUSE_FRM_ERR is set.
+ */
+ (*flagsp) |= EFX_ADDR_MISMATCH;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
+ EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ (*flagsp) |= EFX_DISCARD;
+
+#if EFSYS_OPT_RX_SCATTER
+ /*
+ * Lookout for payload queue ran dry errors and ignore them.
+ *
+ * Sadly for the header/data split cases, the descriptor
+ * pointer in this event refers to the header queue and
+ * therefore cannot be easily detected as duplicate.
+ * So we drop these and rely on the receive processing seeing
+ * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
+ * the partially received packet.
+ */
+ if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
+ (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
+ (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
+ ignore = B_TRUE;
+#endif /* EFSYS_OPT_RX_SCATTER */
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ EFSYS_PROBE(crc_err);
+ (*flagsp) &= ~EFX_ADDR_MISMATCH;
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
+ EFSYS_PROBE(pause_frm_err);
+ (*flagsp) &= ~EFX_ADDR_MISMATCH;
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
+ EFSYS_PROBE(owner_id_err);
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+ EFSYS_PROBE(ipv4_err);
+ (*flagsp) &= ~EFX_CKSUM_IPV4;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+ EFSYS_PROBE(udp_chk_err);
+ (*flagsp) &= ~EFX_CKSUM_TCPUDP;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
+
+ /*
+ * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
+ * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
+ * condition.
+ */
+ (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
+ }
+
+ return (ignore);
+}
+
+static __checkReturn boolean_t
+siena_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t id;
+ uint32_t size;
+ uint32_t label;
+ boolean_t ok;
+#if EFSYS_OPT_RX_SCATTER
+ boolean_t sop;
+ boolean_t jumbo_cont;
+#endif /* EFSYS_OPT_RX_SCATTER */
+ uint32_t hdr_type;
+ boolean_t is_v6;
+ uint16_t flags;
+ boolean_t ignore;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_RX);
+
+ /* Basic packet information */
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
+ size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
+ ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
+
+#if EFSYS_OPT_RX_SCATTER
+ sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
+ jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
+
+ is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
+
+ /*
+ * If packet is marked as OK and packet type is TCP/IP or
+ * UDP/IP or other IP, then we can rely on the hardware checksums.
+ */
+ switch (hdr_type) {
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+ flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
+ flags |= EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
+ flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+ flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
+ flags |= EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
+ flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
+ flags = EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
+ flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+ EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
+ flags = 0;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ flags = 0;
+ break;
+ }
+
+#if EFSYS_OPT_RX_SCATTER
+ /* Report scatter and header/lookahead split buffer flags */
+ if (sop)
+ flags |= EFX_PKT_START;
+ if (jumbo_cont)
+ flags |= EFX_PKT_CONT;
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
+ if (!ok) {
+ ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
+ if (ignore) {
+ EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
+ uint32_t, size, uint16_t, flags);
+
+ return (B_FALSE);
+ }
+ }
+
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ /* Detect multicast packets that didn't match the filter */
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
+ } else {
+ EFSYS_PROBE(mcast_mismatch);
+ flags |= EFX_ADDR_MISMATCH;
+ }
+ } else {
+ flags |= EFX_PKT_UNICAST;
+ }
+
+ /*
+ * The packet parser in Siena can abort parsing packets under
+ * certain error conditions, setting the PKT_NOT_PARSED bit
+ * (which clears PKT_OK). If this is set, then don't trust
+ * the PKT_TYPE field.
+ */
+ if (!ok) {
+ uint32_t parse_err;
+
+ parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
+ if (parse_err != 0)
+ flags |= EFX_CHECK_VLAN;
+ }
+
+ if (~flags & EFX_CHECK_VLAN) {
+ uint32_t pkt_type;
+
+ pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
+ if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
+ flags |= EFX_PKT_VLAN_TAGGED;
+ }
+
+ EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
+ uint32_t, size, uint16_t, flags);
+
+ EFSYS_ASSERT(eecp->eec_rx != NULL);
+ should_abort = eecp->eec_rx(arg, label, id, size, flags);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+siena_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t id;
+ uint32_t label;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
+
+ EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
+
+ EFSYS_ASSERT(eecp->eec_tx != NULL);
+ should_abort = eecp->eec_tx(arg, label, id);
+
+ return (should_abort);
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
+ return (B_FALSE);
+}
+
+static __checkReturn boolean_t
+siena_ev_global(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ _NOTE(ARGUNUSED(eqp, eecp, arg))
+
+ EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
+
+ return (B_FALSE);
+}
+
+static __checkReturn boolean_t
+siena_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
+ should_abort = B_FALSE;
+
+ switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
+ uint32_t txq_index;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
+
+ txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
+
+ EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
+ should_abort = eecp->eec_txq_flush_done(arg, txq_index);
+
+ break;
+ }
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
+ uint32_t rxq_index;
+ uint32_t failed;
+
+ rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+
+ EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
+ EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
+
+ if (failed) {
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
+
+ EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
+
+ should_abort = eecp->eec_rxq_flush_failed(arg,
+ rxq_index);
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
+
+ should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
+ }
+
+ break;
+ }
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ EFSYS_ASSERT(eecp->eec_initialized != NULL);
+ should_abort = eecp->eec_initialized(arg);
+
+ break;
+
+ case FSE_AZ_EVQ_NOT_EN_EV:
+ EFSYS_PROBE(evq_not_en);
+ break;
+
+ case FSE_AZ_SRM_UPD_DONE_EV: {
+ uint32_t code;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
+
+ code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_sram != NULL);
+ should_abort = eecp->eec_sram(arg, code);
+
+ break;
+ }
+ case FSE_AZ_WAKE_UP_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_wake_up != NULL);
+ should_abort = eecp->eec_wake_up(arg, id);
+
+ break;
+ }
+ case FSE_AZ_TX_PKT_NON_TCP_UDP:
+ EFSYS_PROBE(tx_pkt_non_tcp_udp);
+ break;
+
+ case FSE_AZ_TIMER_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_timer != NULL);
+ should_abort = eecp->eec_timer(arg, id);
+
+ break;
+ }
+ case FSE_AZ_RX_DSC_ERROR_EV:
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
+
+ EFSYS_PROBE(rx_dsc_error);
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_RX_DSC_ERROR, 0);
+
+ break;
+
+ case FSE_AZ_TX_DSC_ERROR_EV:
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
+
+ EFSYS_PROBE(tx_dsc_error);
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_TX_DSC_ERROR, 0);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+siena_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t data;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
+
+ data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
+ if (data >= ((uint32_t)1 << 16)) {
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ return (B_TRUE);
+ }
+
+ EFSYS_ASSERT(eecp->eec_software != NULL);
+ should_abort = eecp->eec_software(arg, (uint16_t)data);
+
+ return (should_abort);
+}
+
+#if EFSYS_OPT_MCDI
+
+static __checkReturn boolean_t
+siena_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned int code;
+ boolean_t should_abort = B_FALSE;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ if (enp->en_family != EFX_FAMILY_SIENA)
+ goto out;
+
+ EFSYS_ASSERT(eecp->eec_link_change != NULL);
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+#if EFSYS_OPT_MON_STATS
+ EFSYS_ASSERT(eecp->eec_monitor != NULL);
+#endif
+
+ EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
+
+ code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ efx_mcdi_ev_death(enp, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(enp,
+ MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
+ MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
+ MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
+ break;
+
+ case MCDI_EVENT_CODE_LINKCHANGE: {
+ efx_link_mode_t link_mode;
+
+ siena_phy_link_ev(enp, eqp, &link_mode);
+ should_abort = eecp->eec_link_change(arg, link_mode);
+ break;
+ }
+ case MCDI_EVENT_CODE_SENSOREVT: {
+#if EFSYS_OPT_MON_STATS
+ efx_mon_stat_t id;
+ efx_mon_stat_value_t value;
+ efx_rc_t rc;
+
+ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
+ should_abort = eecp->eec_monitor(arg, id, value);
+ else if (rc == ENOTSUP) {
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_SENSOREVT,
+ MCDI_EV_FIELD(eqp, DATA));
+ } else
+ EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
+#else
+ should_abort = B_FALSE;
+#endif
+ break;
+ }
+ case MCDI_EVENT_CODE_SCHEDERR:
+ /* Informational only */
+ break;
+
+ case MCDI_EVENT_CODE_REBOOT:
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+#if EFSYS_OPT_MAC_STATS
+ if (eecp->eec_mac_stats != NULL) {
+ eecp->eec_mac_stats(arg,
+ MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
+ }
+#endif
+ break;
+
+ case MCDI_EVENT_CODE_FWALERT: {
+ uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
+
+ if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_FWALERT_SRAM,
+ MCDI_EV_FIELD(eqp, FWALERT_DATA));
+ else
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_FWALERT,
+ MCDI_EV_FIELD(eqp, DATA));
+ break;
+ }
+
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, code);
+ break;
+ }
+
+out:
+ return (should_abort);
+}
+
+#endif /* EFSYS_OPT_MCDI */
+
+static __checkReturn efx_rc_t
+siena_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t rptr;
+ efx_dword_t dword;
+
+ rptr = count & eep->ee_mask;
+
+ EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
+
+ EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
+ &dword, B_FALSE);
+
+ return (0);
+}
+
+static void
+siena_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_qword_t ev;
+ efx_oword_t oword;
+
+ EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_EV_DATA_DW0, (uint32_t)data);
+
+ EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
+ EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
+ EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
+
+ EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
+}
+
+static __checkReturn efx_rc_t
+siena_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ unsigned int locked;
+ efx_dword_t dword;
+ efx_rc_t rc;
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
+ FRF_CZ_TC_TIMER_VAL, 0);
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail2;
+
+ EFSYS_ASSERT(ticks > 0);
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
+ FRF_CZ_TC_TIMER_VAL, ticks - 1);
+ }
+
+ locked = (eep->ee_index == 0) ? 1 : 0;
+
+ EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
+ eep->ee_index, &dword, locked);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t size;
+ efx_oword_t oword;
+ efx_rc_t rc;
+ boolean_t notify_mode;
+
+ _NOTE(ARGUNUSED(esmp))
+
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
+
+ if (!ISP2(ndescs) ||
+ (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_evq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+#if EFSYS_OPT_RX_SCALE
+ if (enp->en_intr.ei_type == EFX_INTR_LINE &&
+ index >= EFX_MAXRSS_LEGACY) {
+ rc = EINVAL;
+ goto fail3;
+ }
+#endif
+ for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
+ size++)
+ if ((1 << size) == (int)(ndescs / EFX_EVQ_MINNEVS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* Set up the handler table */
+ eep->ee_rx = siena_ev_rx;
+ eep->ee_tx = siena_ev_tx;
+ eep->ee_driver = siena_ev_driver;
+ eep->ee_global = siena_ev_global;
+ eep->ee_drv_gen = siena_ev_drv_gen;
+#if EFSYS_OPT_MCDI
+ eep->ee_mcdi = siena_ev_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+
+ notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+ /* Set up the new event queue */
+ EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
+ FRF_AZ_EVQ_BUF_BASE_ID, id);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
+
+ /* Set initial interrupt moderation */
+ siena_ev_qmoderate(eep, us);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+#if EFSYS_OPT_RX_SCALE
+fail3:
+ EFSYS_PROBE(fail3);
+#endif
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_QSTATS
+#if EFSYS_OPT_NAMES
+/* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock c0f3bc5083b40532 */
+static const char * const __efx_ev_qstat_name[] = {
+ "all",
+ "rx",
+ "rx_ok",
+ "rx_frm_trunc",
+ "rx_tobe_disc",
+ "rx_pause_frm_err",
+ "rx_buf_owner_id_err",
+ "rx_ipv4_hdr_chksum_err",
+ "rx_tcp_udp_chksum_err",
+ "rx_eth_crc_err",
+ "rx_ip_frag_err",
+ "rx_mcast_pkt",
+ "rx_mcast_hash_match",
+ "rx_tcp_ipv4",
+ "rx_tcp_ipv6",
+ "rx_udp_ipv4",
+ "rx_udp_ipv6",
+ "rx_other_ipv4",
+ "rx_other_ipv6",
+ "rx_non_ip",
+ "rx_batch",
+ "tx",
+ "tx_wq_ff_full",
+ "tx_pkt_err",
+ "tx_pkt_too_big",
+ "tx_unexpected",
+ "global",
+ "global_mnt",
+ "driver",
+ "driver_srm_upd_done",
+ "driver_tx_descq_fls_done",
+ "driver_rx_descq_fls_done",
+ "driver_rx_descq_fls_failed",
+ "driver_rx_dsc_error",
+ "driver_tx_dsc_error",
+ "drv_gen",
+ "mcdi_response",
+};
+/* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
+
+ const char *
+efx_ev_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(id, <, EV_NQSTATS);
+
+ return (__efx_ev_qstat_name[id]);
+}
+#endif /* EFSYS_OPT_NAMES */
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+ eep->ee_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+static void
+siena_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_oword_t oword;
+
+ /* Purge event queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
+ eep->ee_index, &oword, B_TRUE);
+
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
+}
+
+static void
+siena_ev_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_filter.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_filter.c
new file mode 100644
index 00000000..412298ac
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_filter.c
@@ -0,0 +1,1554 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_FILTER
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_filter_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_filter_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_filter_restore(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace);
+
+static __checkReturn efx_rc_t
+siena_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+static __checkReturn efx_rc_t
+siena_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_filter_ops_t __efx_filter_siena_ops = {
+ siena_filter_init, /* efo_init */
+ siena_filter_fini, /* efo_fini */
+ siena_filter_restore, /* efo_restore */
+ siena_filter_add, /* efo_add */
+ siena_filter_delete, /* efo_delete */
+ siena_filter_supported_filters, /* efo_supported_filters */
+ NULL, /* efo_reconfigure */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+static const efx_filter_ops_t __efx_filter_ef10_ops = {
+ ef10_filter_init, /* efo_init */
+ ef10_filter_fini, /* efo_fini */
+ ef10_filter_restore, /* efo_restore */
+ ef10_filter_add, /* efo_add */
+ ef10_filter_delete, /* efo_delete */
+ ef10_filter_supported_filters, /* efo_supported_filters */
+ ef10_filter_reconfigure, /* efo_reconfigure */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+ __checkReturn efx_rc_t
+efx_filter_insert(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ const efx_filter_ops_t *efop = enp->en_efop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+ if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) &&
+ !encp->enc_filter_action_mark_supported) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) &&
+ !encp->enc_filter_action_flag_supported) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ return (efop->efo_add(enp, spec, B_FALSE));
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_filter_remove(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ const efx_filter_ops_t *efop = enp->en_efop;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+ return (efop->efo_delete(enp, spec));
+}
+
+ __checkReturn efx_rc_t
+efx_filter_restore(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ if ((rc = enp->en_efop->efo_restore(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_filter_init(
+ __in efx_nic_t *enp)
+{
+ const efx_filter_ops_t *efop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_FILTER));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ efop = &__efx_filter_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efop->efo_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_efop = efop;
+ enp->en_mod_flags |= EFX_MOD_FILTER;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_efop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_FILTER;
+ return (rc);
+}
+
+ void
+efx_filter_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ enp->en_efop->efo_fini(enp);
+
+ enp->en_efop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_FILTER;
+}
+
+/*
+ * Query the possible combinations of match flags which can be filtered on.
+ * These are returned as a list, of which each 32 bit element is a bitmask
+ * formed of EFX_FILTER_MATCH flags.
+ *
+ * The combinations are ordered in priority from highest to lowest.
+ *
+ * If the provided buffer is too short to hold the list, the call with fail with
+ * ENOSPC and *list_lengthp will be set to the buffer length required.
+ */
+ __checkReturn efx_rc_t
+efx_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT(enp->en_efop->efo_supported_filters != NULL);
+
+ if (buffer == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = enp->en_efop->efo_supported_filters(enp, buffer, buffer_length,
+ list_lengthp);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ if (enp->en_efop->efo_reconfigure != NULL) {
+ if ((rc = enp->en_efop->efo_reconfigure(enp, mac_addr,
+ all_unicst, mulcst,
+ all_mulcst, brdcst,
+ addrs, count)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_filter_spec_init_rx(
+ __out efx_filter_spec_t *spec,
+ __in efx_filter_priority_t priority,
+ __in efx_filter_flags_t flags,
+ __in efx_rxq_t *erp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(erp, !=, NULL);
+ EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_SCATTER)) == 0);
+
+ memset(spec, 0, sizeof (*spec));
+ spec->efs_priority = priority;
+ spec->efs_flags = EFX_FILTER_FLAG_RX | flags;
+ spec->efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
+ spec->efs_dmaq_id = (uint16_t)erp->er_index;
+}
+
+ void
+efx_filter_spec_init_tx(
+ __out efx_filter_spec_t *spec,
+ __in efx_txq_t *etp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(etp, !=, NULL);
+
+ memset(spec, 0, sizeof (*spec));
+ spec->efs_priority = EFX_FILTER_PRI_REQUIRED;
+ spec->efs_flags = EFX_FILTER_FLAG_TX;
+ spec->efs_dmaq_id = (uint16_t)etp->et_index;
+}
+
+
+/*
+ * Specify IPv4 host, transport protocol and port in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t host,
+ __in uint16_t port)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->efs_ether_type = EFX_ETHER_TYPE_IPV4;
+ spec->efs_ip_proto = proto;
+ spec->efs_loc_host.eo_u32[0] = host;
+ spec->efs_loc_port = port;
+ return (0);
+}
+
+/*
+ * Specify IPv4 hosts, transport protocol and ports in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t lhost,
+ __in uint16_t lport,
+ __in uint32_t rhost,
+ __in uint16_t rport)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec->efs_ether_type = EFX_ETHER_TYPE_IPV4;
+ spec->efs_ip_proto = proto;
+ spec->efs_loc_host.eo_u32[0] = lhost;
+ spec->efs_loc_port = lport;
+ spec->efs_rem_host.eo_u32[0] = rhost;
+ spec->efs_rem_port = rport;
+ return (0);
+}
+
+/*
+ * Specify local Ethernet address and/or VID in filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_eth_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t vid,
+ __in const uint8_t *addr)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(addr, !=, NULL);
+
+ if (vid == EFX_FILTER_SPEC_VID_UNSPEC && addr == NULL)
+ return (EINVAL);
+
+ if (vid != EFX_FILTER_SPEC_VID_UNSPEC) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec->efs_outer_vid = vid;
+ }
+ if (addr != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ memcpy(spec->efs_loc_mac, addr, EFX_MAC_ADDR_LEN);
+ }
+ return (0);
+}
+
+ void
+efx_filter_spec_set_ether_type(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t ether_type)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_ether_type = ether_type;
+ spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+}
+
+/*
+ * Specify matching otherwise-unmatched unicast in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_uc_def(
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |= EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
+ return (0);
+}
+
+/*
+ * Specify matching otherwise-unmatched multicast in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_mc_def(
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |= EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
+ return (0);
+}
+
+
+__checkReturn efx_rc_t
+efx_filter_spec_set_encap_type(
+ __inout efx_filter_spec_t *spec,
+ __in efx_tunnel_protocol_t encap_type,
+ __in efx_filter_inner_frame_match_t inner_frame_match)
+{
+ uint32_t match_flags = EFX_FILTER_MATCH_ENCAP_TYPE;
+ uint8_t ip_proto;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ switch (encap_type) {
+ case EFX_TUNNEL_PROTOCOL_VXLAN:
+ case EFX_TUNNEL_PROTOCOL_GENEVE:
+ ip_proto = EFX_IPPROTO_UDP;
+ break;
+ case EFX_TUNNEL_PROTOCOL_NVGRE:
+ ip_proto = EFX_IPPROTO_GRE;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (inner_frame_match) {
+ case EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST:
+ match_flags |= EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
+ break;
+ case EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_UCAST_DST:
+ match_flags |= EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST;
+ break;
+ case EFX_FILTER_INNER_FRAME_MATCH_OTHER:
+ /* This is for when specific inner frames are to be matched. */
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ spec->efs_encap_type = encap_type;
+ spec->efs_ip_proto = ip_proto;
+ spec->efs_match_flags |= (match_flags | EFX_FILTER_MATCH_IP_PROTO);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Specify inner and outer Ethernet address and VXLAN ID in filter
+ * specification.
+ */
+ __checkReturn efx_rc_t
+efx_filter_spec_set_vxlan_full(
+ __inout efx_filter_spec_t *spec,
+ __in const uint8_t *vxlan_id,
+ __in const uint8_t *inner_addr,
+ __in const uint8_t *outer_addr)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(vxlan_id, !=, NULL);
+ EFSYS_ASSERT3P(inner_addr, !=, NULL);
+ EFSYS_ASSERT3P(outer_addr, !=, NULL);
+
+ if ((inner_addr == NULL) && (outer_addr == NULL))
+ return (EINVAL);
+
+ if (vxlan_id != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
+ memcpy(spec->efs_vni_or_vsid, vxlan_id, EFX_VNI_OR_VSID_LEN);
+ }
+ if (outer_addr != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ memcpy(spec->efs_loc_mac, outer_addr, EFX_MAC_ADDR_LEN);
+ }
+ if (inner_addr != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_IFRM_LOC_MAC;
+ memcpy(spec->efs_ifrm_loc_mac, inner_addr, EFX_MAC_ADDR_LEN);
+ }
+ spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+ spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
+
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_filter_spec_set_rss_context(
+ __inout efx_filter_spec_t *spec,
+ __in uint32_t rss_context)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ /* The filter must have been created with EFX_FILTER_FLAG_RX_RSS. */
+ if ((spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ spec->efs_rss_context = rss_context;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_SIENA
+
+/*
+ * "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define FILTER_CTL_SRCH_FUDGE_WILD 3
+#define FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/*
+ * Hard maximum hop limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_filter_search() when the
+ * table is full.
+ */
+#define FILTER_CTL_SRCH_MAX 200
+
+static __checkReturn efx_rc_t
+siena_filter_spec_from_gen_spec(
+ __out siena_filter_spec_t *sf_spec,
+ __in efx_filter_spec_t *gen_spec)
+{
+ efx_rc_t rc;
+ boolean_t is_full = B_FALSE;
+
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX)
+ EFSYS_ASSERT3U(gen_spec->efs_flags, ==, EFX_FILTER_FLAG_TX);
+ else
+ EFSYS_ASSERT3U(gen_spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+ /* Siena only has one RSS context */
+ if ((gen_spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&
+ gen_spec->efs_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ sf_spec->sfs_flags = gen_spec->efs_flags;
+ sf_spec->sfs_dmaq_id = gen_spec->efs_dmaq_id;
+
+ switch (gen_spec->efs_match_flags) {
+ case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT:
+ is_full = B_TRUE;
+ /* Fall through */
+ case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT: {
+ uint32_t rhost, host1, host2;
+ uint16_t rport, port1, port2;
+
+ if (gen_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ if (gen_spec->efs_loc_port == 0 ||
+ (is_full && gen_spec->efs_rem_port == 0)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ switch (gen_spec->efs_ip_proto) {
+ case EFX_IPPROTO_TCP:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_TCP_FULL :
+ EFX_SIENA_FILTER_TX_TCP_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_TCP_FULL :
+ EFX_SIENA_FILTER_RX_TCP_WILD);
+ }
+ break;
+ case EFX_IPPROTO_UDP:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_UDP_FULL :
+ EFX_SIENA_FILTER_TX_UDP_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_UDP_FULL :
+ EFX_SIENA_FILTER_RX_UDP_WILD);
+ }
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail4;
+ }
+ /*
+ * The filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * addresses (zero for a wildcard).
+ */
+ rhost = is_full ? gen_spec->efs_rem_host.eo_u32[0] : 0;
+ rport = is_full ? gen_spec->efs_rem_port : 0;
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ host1 = gen_spec->efs_loc_host.eo_u32[0];
+ host2 = rhost;
+ } else {
+ host1 = rhost;
+ host2 = gen_spec->efs_loc_host.eo_u32[0];
+ }
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ if (sf_spec->sfs_type ==
+ EFX_SIENA_FILTER_TX_UDP_WILD) {
+ port1 = rport;
+ port2 = gen_spec->efs_loc_port;
+ } else {
+ port1 = gen_spec->efs_loc_port;
+ port2 = rport;
+ }
+ } else {
+ if (sf_spec->sfs_type ==
+ EFX_SIENA_FILTER_RX_UDP_WILD) {
+ port1 = gen_spec->efs_loc_port;
+ port2 = rport;
+ } else {
+ port1 = rport;
+ port2 = gen_spec->efs_loc_port;
+ }
+ }
+ sf_spec->sfs_dword[0] = (host1 << 16) | port1;
+ sf_spec->sfs_dword[1] = (port2 << 16) | (host1 >> 16);
+ sf_spec->sfs_dword[2] = host2;
+ break;
+ }
+
+ case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
+ is_full = B_TRUE;
+ /* Fall through */
+ case EFX_FILTER_MATCH_LOC_MAC:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_MAC_FULL :
+ EFX_SIENA_FILTER_TX_MAC_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_MAC_FULL :
+ EFX_SIENA_FILTER_RX_MAC_WILD);
+ }
+ sf_spec->sfs_dword[0] = is_full ? gen_spec->efs_outer_vid : 0;
+ sf_spec->sfs_dword[1] =
+ gen_spec->efs_loc_mac[2] << 24 |
+ gen_spec->efs_loc_mac[3] << 16 |
+ gen_spec->efs_loc_mac[4] << 8 |
+ gen_spec->efs_loc_mac[5];
+ sf_spec->sfs_dword[2] =
+ gen_spec->efs_loc_mac[0] << 8 |
+ gen_spec->efs_loc_mac[1];
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple.
+ */
+static uint16_t
+siena_filter_tbl_hash(
+ __in uint32_t key)
+{
+ uint16_t tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ (uint16_t)(key >> 16);
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ (uint16_t)(key & 0xffff);
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+
+ return (tmp);
+}
+
+/*
+ * To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash.
+ */
+static uint16_t
+siena_filter_tbl_increment(
+ __in uint32_t key)
+{
+ return ((uint16_t)(key * 2 - 1));
+}
+
+static __checkReturn boolean_t
+siena_filter_test_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ return ((sftp->sft_bitmap[index / 32] & (1 << (index % 32))) != 0);
+}
+
+static void
+siena_filter_set_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ sftp->sft_bitmap[index / 32] |= (1 << (index % 32));
+ ++sftp->sft_used;
+}
+
+static void
+siena_filter_clear_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ sftp->sft_bitmap[index / 32] &= ~(1 << (index % 32));
+
+ --sftp->sft_used;
+ EFSYS_ASSERT3U(sftp->sft_used, >=, 0);
+}
+
+
+static siena_filter_tbl_id_t
+siena_filter_tbl_id(
+ __in siena_filter_type_t type)
+{
+ siena_filter_tbl_id_t tbl_id;
+
+ switch (type) {
+ case EFX_SIENA_FILTER_RX_TCP_FULL:
+ case EFX_SIENA_FILTER_RX_TCP_WILD:
+ case EFX_SIENA_FILTER_RX_UDP_FULL:
+ case EFX_SIENA_FILTER_RX_UDP_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_RX_IP;
+ break;
+
+ case EFX_SIENA_FILTER_RX_MAC_FULL:
+ case EFX_SIENA_FILTER_RX_MAC_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_RX_MAC;
+ break;
+
+ case EFX_SIENA_FILTER_TX_TCP_FULL:
+ case EFX_SIENA_FILTER_TX_TCP_WILD:
+ case EFX_SIENA_FILTER_TX_UDP_FULL:
+ case EFX_SIENA_FILTER_TX_UDP_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_TX_IP;
+ break;
+
+ case EFX_SIENA_FILTER_TX_MAC_FULL:
+ case EFX_SIENA_FILTER_TX_MAC_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_TX_MAC;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ tbl_id = EFX_SIENA_FILTER_NTBLS;
+ break;
+ }
+ return (tbl_id);
+}
+
+static void
+siena_filter_reset_search_depth(
+ __inout siena_filter_t *sfp,
+ __in siena_filter_tbl_id_t tbl_id)
+{
+ switch (tbl_id) {
+ case EFX_SIENA_FILTER_TBL_RX_IP:
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_RX_MAC:
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_TX_IP:
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_TX_MAC:
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] = 0;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+}
+
+static void
+siena_filter_push_rx_limits(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_FULL_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_WILD_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_FULL_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_WILD_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC].sft_size) {
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+}
+
+static void
+siena_filter_push_tx_limits(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP].sft_size != 0) {
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC].sft_size != 0) {
+ EFX_SET_OWORD_FIELD(
+ oword, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ oword, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static __checkReturn uint32_t
+siena_filter_build(
+ __out efx_oword_t *filter,
+ __in siena_filter_spec_t *spec)
+{
+ uint32_t dword3;
+ uint32_t key;
+ uint8_t type = spec->sfs_type;
+ uint32_t flags = spec->sfs_flags;
+
+ switch (siena_filter_tbl_id(type)) {
+ case EFX_SIENA_FILTER_TBL_RX_IP: {
+ boolean_t is_udp = (type == EFX_SIENA_FILTER_RX_UDP_FULL ||
+ type == EFX_SIENA_FILTER_RX_UDP_WILD);
+ EFX_POPULATE_OWORD_7(*filter,
+ FRF_BZ_RSS_EN,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
+ FRF_BZ_SCATTER_EN,
+ (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
+ FRF_AZ_TCP_UDP, is_udp,
+ FRF_AZ_RXQ_ID, spec->sfs_dmaq_id,
+ EFX_DWORD_2, spec->sfs_dword[2],
+ EFX_DWORD_1, spec->sfs_dword[1],
+ EFX_DWORD_0, spec->sfs_dword[0]);
+ dword3 = is_udp;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_RX_MAC: {
+ boolean_t is_wild = (type == EFX_SIENA_FILTER_RX_MAC_WILD);
+ EFX_POPULATE_OWORD_7(*filter,
+ FRF_CZ_RMFT_RSS_EN,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
+ FRF_CZ_RMFT_SCATTER_EN,
+ (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
+ FRF_CZ_RMFT_RXQ_ID, spec->sfs_dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_DW1, spec->sfs_dword[2],
+ FRF_CZ_RMFT_DEST_MAC_DW0, spec->sfs_dword[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->sfs_dword[0]);
+ dword3 = is_wild;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_TX_IP: {
+ boolean_t is_udp = (type == EFX_SIENA_FILTER_TX_UDP_FULL ||
+ type == EFX_SIENA_FILTER_TX_UDP_WILD);
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TIFT_TCP_UDP, is_udp,
+ FRF_CZ_TIFT_TXQ_ID, spec->sfs_dmaq_id,
+ EFX_DWORD_2, spec->sfs_dword[2],
+ EFX_DWORD_1, spec->sfs_dword[1],
+ EFX_DWORD_0, spec->sfs_dword[0]);
+ dword3 = is_udp | spec->sfs_dmaq_id << 1;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_TX_MAC: {
+ boolean_t is_wild = (type == EFX_SIENA_FILTER_TX_MAC_WILD);
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->sfs_dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_DW1, spec->sfs_dword[2],
+ FRF_CZ_TMFT_SRC_MAC_DW0, spec->sfs_dword[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->sfs_dword[0]);
+ dword3 = is_wild | spec->sfs_dmaq_id << 1;
+ break;
+ }
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ EFX_ZERO_OWORD(*filter);
+ return (0);
+ }
+
+ key =
+ spec->sfs_dword[0] ^
+ spec->sfs_dword[1] ^
+ spec->sfs_dword[2] ^
+ dword3;
+
+ return (key);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_push_entry(
+ __inout efx_nic_t *enp,
+ __in siena_filter_type_t type,
+ __in int index,
+ __in efx_oword_t *eop)
+{
+ efx_rc_t rc;
+
+ switch (type) {
+ case EFX_SIENA_FILTER_RX_TCP_FULL:
+ case EFX_SIENA_FILTER_RX_TCP_WILD:
+ case EFX_SIENA_FILTER_RX_UDP_FULL:
+ case EFX_SIENA_FILTER_RX_UDP_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_RX_MAC_FULL:
+ case EFX_SIENA_FILTER_RX_MAC_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_RX_MAC_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_TX_TCP_FULL:
+ case EFX_SIENA_FILTER_TX_TCP_WILD:
+ case EFX_SIENA_FILTER_TX_UDP_FULL:
+ case EFX_SIENA_FILTER_TX_UDP_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_TX_MAC_FULL:
+ case EFX_SIENA_FILTER_TX_MAC_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_MAC_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ return (0);
+
+fail1:
+ return (rc);
+}
+
+
+static __checkReturn boolean_t
+siena_filter_equal(
+ __in const siena_filter_spec_t *left,
+ __in const siena_filter_spec_t *right)
+{
+ siena_filter_tbl_id_t tbl_id;
+
+ tbl_id = siena_filter_tbl_id(left->sfs_type);
+
+
+ if (left->sfs_type != right->sfs_type)
+ return (B_FALSE);
+
+ if (memcmp(left->sfs_dword, right->sfs_dword,
+ sizeof (left->sfs_dword)))
+ return (B_FALSE);
+
+ if ((tbl_id == EFX_SIENA_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC) &&
+ left->sfs_dmaq_id != right->sfs_dmaq_id)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_search(
+ __in siena_filter_tbl_t *sftp,
+ __in siena_filter_spec_t *spec,
+ __in uint32_t key,
+ __in boolean_t for_insert,
+ __out int *filter_index,
+ __out unsigned int *depth_required)
+{
+ unsigned int hash, incr, filter_idx, depth;
+
+ hash = siena_filter_tbl_hash(key);
+ incr = siena_filter_tbl_increment(key);
+
+ filter_idx = hash & (sftp->sft_size - 1);
+ depth = 1;
+
+ for (;;) {
+ /*
+ * Return success if entry is used and matches this spec
+ * or entry is unused and we are trying to insert.
+ */
+ if (siena_filter_test_used(sftp, filter_idx) ?
+ siena_filter_equal(spec,
+ &sftp->sft_spec[filter_idx]) :
+ for_insert) {
+ *filter_index = filter_idx;
+ *depth_required = depth;
+ return (0);
+ }
+
+ /* Return failure if we reached the maximum search depth */
+ if (depth == FILTER_CTL_SRCH_MAX)
+ return (for_insert ? EBUSY : ENOENT);
+
+ filter_idx = (filter_idx + incr) & (sftp->sft_size - 1);
+ ++depth;
+ }
+}
+
+static void
+siena_filter_clear_entry(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_t *sftp,
+ __in int index)
+{
+ efx_oword_t filter;
+
+ if (siena_filter_test_used(sftp, index)) {
+ siena_filter_clear_used(sftp, index);
+
+ EFX_ZERO_OWORD(filter);
+ siena_filter_push_entry(enp,
+ sftp->sft_spec[index].sfs_type,
+ index, &filter);
+
+ memset(&sftp->sft_spec[index],
+ 0, sizeof (sftp->sft_spec[0]));
+ }
+}
+
+ void
+siena_filter_tbl_clear(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_id_t tbl_id)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id];
+ int index;
+ efsys_lock_state_t state;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ for (index = 0; index < sftp->sft_size; ++index) {
+ siena_filter_clear_entry(enp, sftp, index);
+ }
+
+ if (sftp->sft_used == 0)
+ siena_filter_reset_search_depth(sfp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_init(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp;
+ siena_filter_tbl_t *sftp;
+ int tbl_id;
+ efx_rc_t rc;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (siena_filter_t), sfp);
+
+ if (!sfp) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_filter.ef_siena_filter = sfp;
+
+ switch (enp->en_family) {
+ case EFX_FAMILY_SIENA:
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_IP];
+ sftp->sft_size = FR_AZ_RX_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC];
+ sftp->sft_size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP];
+ sftp->sft_size = FR_CZ_TX_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC];
+ sftp->sft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ break;
+
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ unsigned int bitmap_size;
+
+ sftp = &sfp->sf_tbl[tbl_id];
+ if (sftp->sft_size == 0)
+ continue;
+
+ EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) ==
+ sizeof (uint32_t));
+ bitmap_size =
+ (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, bitmap_size, sftp->sft_bitmap);
+ if (!sftp->sft_bitmap) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ sftp->sft_size * sizeof (*sftp->sft_spec),
+ sftp->sft_spec);
+ if (!sftp->sft_spec) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ memset(sftp->sft_spec, 0,
+ sftp->sft_size * sizeof (*sftp->sft_spec));
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ siena_filter_fini(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static void
+siena_filter_fini(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (sfp == NULL)
+ return;
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id];
+ unsigned int bitmap_size;
+
+ EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) ==
+ sizeof (uint32_t));
+ bitmap_size =
+ (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8;
+
+ if (sftp->sft_bitmap != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, bitmap_size,
+ sftp->sft_bitmap);
+ sftp->sft_bitmap = NULL;
+ }
+
+ if (sftp->sft_spec != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, sftp->sft_size *
+ sizeof (*sftp->sft_spec), sftp->sft_spec);
+ sftp->sft_spec = NULL;
+ }
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (siena_filter_t),
+ enp->en_filter.ef_siena_filter);
+}
+
+/* Restore filter state after a reset */
+static __checkReturn efx_rc_t
+siena_filter_restore(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ siena_filter_spec_t *spec;
+ efx_oword_t filter;
+ int filter_idx;
+ efsys_lock_state_t state;
+ uint32_t key;
+ efx_rc_t rc;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ sftp = &sfp->sf_tbl[tbl_id];
+ for (filter_idx = 0;
+ filter_idx < sftp->sft_size;
+ filter_idx++) {
+ if (!siena_filter_test_used(sftp, filter_idx))
+ continue;
+
+ spec = &sftp->sft_spec[filter_idx];
+ if ((key = siena_filter_build(&filter, spec)) == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if ((rc = siena_filter_push_entry(enp,
+ spec->sfs_type, filter_idx, &filter)) != 0)
+ goto fail2;
+ }
+ }
+
+ siena_filter_push_rx_limits(enp);
+ siena_filter_push_tx_limits(enp);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace)
+{
+ efx_rc_t rc;
+ siena_filter_spec_t sf_spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ siena_filter_spec_t *saved_sf_spec;
+ efx_oword_t filter;
+ int filter_idx;
+ unsigned int depth;
+ efsys_lock_state_t state;
+ uint32_t key;
+
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0)
+ goto fail1;
+
+ tbl_id = siena_filter_tbl_id(sf_spec.sfs_type);
+ sftp = &sfp->sf_tbl[tbl_id];
+
+ if (sftp->sft_size == 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ key = siena_filter_build(&filter, &sf_spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = siena_filter_search(sftp, &sf_spec, key, B_TRUE,
+ &filter_idx, &depth);
+ if (rc != 0)
+ goto fail3;
+
+ EFSYS_ASSERT3U(filter_idx, <, sftp->sft_size);
+ saved_sf_spec = &sftp->sft_spec[filter_idx];
+
+ if (siena_filter_test_used(sftp, filter_idx)) {
+ if (may_replace == B_FALSE) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ siena_filter_set_used(sftp, filter_idx);
+ *saved_sf_spec = sf_spec;
+
+ if (sfp->sf_depth[sf_spec.sfs_type] < depth) {
+ sfp->sf_depth[sf_spec.sfs_type] = depth;
+ if (tbl_id == EFX_SIENA_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC)
+ siena_filter_push_tx_limits(enp);
+ else
+ siena_filter_push_rx_limits(enp);
+ }
+
+ siena_filter_push_entry(enp, sf_spec.sfs_type,
+ filter_idx, &filter);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+fail3:
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ efx_rc_t rc;
+ siena_filter_spec_t sf_spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ efx_oword_t filter;
+ int filter_idx;
+ unsigned int depth;
+ efsys_lock_state_t state;
+ uint32_t key;
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0)
+ goto fail1;
+
+ tbl_id = siena_filter_tbl_id(sf_spec.sfs_type);
+ sftp = &sfp->sf_tbl[tbl_id];
+
+ key = siena_filter_build(&filter, &sf_spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = siena_filter_search(sftp, &sf_spec, key, B_FALSE,
+ &filter_idx, &depth);
+ if (rc != 0)
+ goto fail2;
+
+ siena_filter_clear_entry(enp, sftp, filter_idx);
+ if (sftp->sft_used == 0)
+ siena_filter_reset_search_depth(sfp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (0);
+
+fail2:
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#define SIENA_MAX_SUPPORTED_MATCHES 4
+
+static __checkReturn efx_rc_t
+siena_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+ uint32_t index = 0;
+ uint32_t rx_matches[SIENA_MAX_SUPPORTED_MATCHES];
+ size_t list_length;
+ efx_rc_t rc;
+
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+
+ if (enp->en_features & EFX_FEATURE_MAC_HEADER_FILTERS) {
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC;
+
+ rx_matches[index++] = EFX_FILTER_MATCH_LOC_MAC;
+ }
+
+ EFSYS_ASSERT3U(index, <=, SIENA_MAX_SUPPORTED_MATCHES);
+ list_length = index;
+
+ *list_lengthp = list_length;
+
+ if (buffer_length < list_length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ memcpy(buffer, rx_matches, list_length * sizeof (rx_matches[0]));
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#undef MAX_SUPPORTED
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_FILTER */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_hash.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_hash.c
new file mode 100644
index 00000000..43e6bc05
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_hash.c
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2006 Bob Jenkins
+ *
+ * Derived from public domain source, see
+ * <http://burtleburtle.net/bob/c/lookup3.c>:
+ *
+ * "lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
+ * These are functions for producing 32-bit hashes for hash table lookup...
+ * ...You can use this free for any purpose. It's in the public domain.
+ * It has no warranty."
+ *
+ * Copyright (c) 2014-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+/* Hash initial value */
+#define EFX_HASH_INITIAL_VALUE 0xdeadbeef
+
+/*
+ * Rotate a 32-bit value left
+ *
+ * Allow platform to provide an intrinsic or optimised routine and
+ * fall-back to a simple shift based implementation.
+ */
+#if EFSYS_HAS_ROTL_DWORD
+
+#define EFX_HASH_ROTATE(_value, _shift) \
+ EFSYS_ROTL_DWORD(_value, _shift)
+
+#else
+
+#define EFX_HASH_ROTATE(_value, _shift) \
+ (((_value) << (_shift)) | ((_value) >> (32 - (_shift))))
+
+#endif
+
+/* Mix three 32-bit values reversibly */
+#define EFX_HASH_MIX(_a, _b, _c) \
+ do { \
+ _a -= _c; \
+ _a ^= EFX_HASH_ROTATE(_c, 4); \
+ _c += _b; \
+ _b -= _a; \
+ _b ^= EFX_HASH_ROTATE(_a, 6); \
+ _a += _c; \
+ _c -= _b; \
+ _c ^= EFX_HASH_ROTATE(_b, 8); \
+ _b += _a; \
+ _a -= _c; \
+ _a ^= EFX_HASH_ROTATE(_c, 16); \
+ _c += _b; \
+ _b -= _a; \
+ _b ^= EFX_HASH_ROTATE(_a, 19); \
+ _a += _c; \
+ _c -= _b; \
+ _c ^= EFX_HASH_ROTATE(_b, 4); \
+ _b += _a; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Final mixing of three 32-bit values into one (_c) */
+#define EFX_HASH_FINALISE(_a, _b, _c) \
+ do { \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 14); \
+ _a ^= _c; \
+ _a -= EFX_HASH_ROTATE(_c, 11); \
+ _b ^= _a; \
+ _b -= EFX_HASH_ROTATE(_a, 25); \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 16); \
+ _a ^= _c; \
+ _a -= EFX_HASH_ROTATE(_c, 4); \
+ _b ^= _a; \
+ _b -= EFX_HASH_ROTATE(_a, 14); \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 24); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+
+/* Produce a 32-bit hash from 32-bit aligned input */
+ __checkReturn uint32_t
+efx_hash_dwords(
+ __in_ecount(count) uint32_t const *input,
+ __in size_t count,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE +
+ (((uint32_t)count) * sizeof (uint32_t)) + init;
+
+ /* Handle all but the last three dwords of the input */
+ while (count > 3) {
+ a += input[0];
+ b += input[1];
+ c += input[2];
+ EFX_HASH_MIX(a, b, c);
+
+ count -= 3;
+ input += 3;
+ }
+
+ /* Handle the left-overs */
+ switch (count) {
+ case 3:
+ c += input[2];
+ /* Fall-through */
+ case 2:
+ b += input[1];
+ /* Fall-through */
+ case 1:
+ a += input[0];
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if count parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#if EFSYS_IS_BIG_ENDIAN
+
+/* Produce a 32-bit hash from arbitrarily aligned input */
+ __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE + (uint32_t)length + init;
+
+ /* Handle all but the last twelve bytes of the input */
+ while (length > 12) {
+ a += ((uint32_t)input[0]) << 24;
+ a += ((uint32_t)input[1]) << 16;
+ a += ((uint32_t)input[2]) << 8;
+ a += ((uint32_t)input[3]);
+ b += ((uint32_t)input[4]) << 24;
+ b += ((uint32_t)input[5]) << 16;
+ b += ((uint32_t)input[6]) << 8;
+ b += ((uint32_t)input[7]);
+ c += ((uint32_t)input[8]) << 24;
+ c += ((uint32_t)input[9]) << 16;
+ c += ((uint32_t)input[10]) << 8;
+ c += ((uint32_t)input[11]);
+ EFX_HASH_MIX(a, b, c);
+ length -= 12;
+ input += 12;
+ }
+
+ /* Handle the left-overs */
+ switch (length) {
+ case 12:
+ c += ((uint32_t)input[11]);
+ /* Fall-through */
+ case 11:
+ c += ((uint32_t)input[10]) << 8;
+ /* Fall-through */
+ case 10:
+ c += ((uint32_t)input[9]) << 16;
+ /* Fall-through */
+ case 9:
+ c += ((uint32_t)input[8]) << 24;
+ /* Fall-through */
+ case 8:
+ b += ((uint32_t)input[7]);
+ /* Fall-through */
+ case 7:
+ b += ((uint32_t)input[6]) << 8;
+ /* Fall-through */
+ case 6:
+ b += ((uint32_t)input[5]) << 16;
+ /* Fall-through */
+ case 5:
+ b += ((uint32_t)input[4]) << 24;
+ /* Fall-through */
+ case 4:
+ a += ((uint32_t)input[3]);
+ /* Fall-through */
+ case 3:
+ a += ((uint32_t)input[2]) << 8;
+ /* Fall-through */
+ case 2:
+ a += ((uint32_t)input[1]) << 16;
+ /* Fall-through */
+ case 1:
+ a += ((uint32_t)input[0]) << 24;
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if length parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#elif EFSYS_IS_LITTLE_ENDIAN
+
+/* Produce a 32-bit hash from arbitrarily aligned input */
+ __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE + (uint32_t)length + init;
+
+ /* Handle all but the last twelve bytes of the input */
+ while (length > 12) {
+ a += ((uint32_t)input[0]);
+ a += ((uint32_t)input[1]) << 8;
+ a += ((uint32_t)input[2]) << 16;
+ a += ((uint32_t)input[3]) << 24;
+ b += ((uint32_t)input[4]);
+ b += ((uint32_t)input[5]) << 8;
+ b += ((uint32_t)input[6]) << 16;
+ b += ((uint32_t)input[7]) << 24;
+ c += ((uint32_t)input[8]);
+ c += ((uint32_t)input[9]) << 8;
+ c += ((uint32_t)input[10]) << 16;
+ c += ((uint32_t)input[11]) << 24;
+ EFX_HASH_MIX(a, b, c);
+ length -= 12;
+ input += 12;
+ }
+
+ /* Handle the left-overs */
+ switch (length) {
+ case 12:
+ c += ((uint32_t)input[11]) << 24;
+ /* Fall-through */
+ case 11:
+ c += ((uint32_t)input[10]) << 16;
+ /* Fall-through */
+ case 10:
+ c += ((uint32_t)input[9]) << 8;
+ /* Fall-through */
+ case 9:
+ c += ((uint32_t)input[8]);
+ /* Fall-through */
+ case 8:
+ b += ((uint32_t)input[7]) << 24;
+ /* Fall-through */
+ case 7:
+ b += ((uint32_t)input[6]) << 16;
+ /* Fall-through */
+ case 6:
+ b += ((uint32_t)input[5]) << 8;
+ /* Fall-through */
+ case 5:
+ b += ((uint32_t)input[4]);
+ /* Fall-through */
+ case 4:
+ a += ((uint32_t)input[3]) << 24;
+ /* Fall-through */
+ case 3:
+ a += ((uint32_t)input[2]) << 16;
+ /* Fall-through */
+ case 2:
+ a += ((uint32_t)input[1]) << 8;
+ /* Fall-through */
+ case 1:
+ a += ((uint32_t)input[0]);
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if length parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#else
+
+#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set"
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_impl.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_impl.h
new file mode 100644
index 00000000..548834f9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_impl.h
@@ -0,0 +1,1267 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_EFX_IMPL_H
+#define _SYS_EFX_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+/* FIXME: Add definition for driver generated software events */
+#ifndef ESE_DZ_EV_CODE_DRV_GEN_EV
+#define ESE_DZ_EV_CODE_DRV_GEN_EV FSE_AZ_EV_CODE_DRV_GEN_EV
+#endif
+
+
+#if EFSYS_OPT_SIENA
+#include "siena_impl.h"
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+#include "hunt_impl.h"
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+#include "medford_impl.h"
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+#include "medford2_impl.h"
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+#include "ef10_impl.h"
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFX_MOD_MCDI 0x00000001
+#define EFX_MOD_PROBE 0x00000002
+#define EFX_MOD_NVRAM 0x00000004
+#define EFX_MOD_VPD 0x00000008
+#define EFX_MOD_NIC 0x00000010
+#define EFX_MOD_INTR 0x00000020
+#define EFX_MOD_EV 0x00000040
+#define EFX_MOD_RX 0x00000080
+#define EFX_MOD_TX 0x00000100
+#define EFX_MOD_PORT 0x00000200
+#define EFX_MOD_MON 0x00000400
+#define EFX_MOD_FILTER 0x00001000
+#define EFX_MOD_LIC 0x00002000
+#define EFX_MOD_TUNNEL 0x00004000
+
+#define EFX_RESET_PHY 0x00000001
+#define EFX_RESET_RXQ_ERR 0x00000002
+#define EFX_RESET_TXQ_ERR 0x00000004
+
+typedef enum efx_mac_type_e {
+ EFX_MAC_INVALID = 0,
+ EFX_MAC_SIENA,
+ EFX_MAC_HUNTINGTON,
+ EFX_MAC_MEDFORD,
+ EFX_MAC_MEDFORD2,
+ EFX_MAC_NTYPES
+} efx_mac_type_t;
+
+typedef struct efx_ev_ops_s {
+ efx_rc_t (*eevo_init)(efx_nic_t *);
+ void (*eevo_fini)(efx_nic_t *);
+ efx_rc_t (*eevo_qcreate)(efx_nic_t *, unsigned int,
+ efsys_mem_t *, size_t, uint32_t,
+ uint32_t, uint32_t, efx_evq_t *);
+ void (*eevo_qdestroy)(efx_evq_t *);
+ efx_rc_t (*eevo_qprime)(efx_evq_t *, unsigned int);
+ void (*eevo_qpost)(efx_evq_t *, uint16_t);
+ efx_rc_t (*eevo_qmoderate)(efx_evq_t *, unsigned int);
+#if EFSYS_OPT_QSTATS
+ void (*eevo_qstats_update)(efx_evq_t *, efsys_stat_t *);
+#endif
+} efx_ev_ops_t;
+
+typedef struct efx_tx_ops_s {
+ efx_rc_t (*etxo_init)(efx_nic_t *);
+ void (*etxo_fini)(efx_nic_t *);
+ efx_rc_t (*etxo_qcreate)(efx_nic_t *,
+ unsigned int, unsigned int,
+ efsys_mem_t *, size_t,
+ uint32_t, uint16_t,
+ efx_evq_t *, efx_txq_t *,
+ unsigned int *);
+ void (*etxo_qdestroy)(efx_txq_t *);
+ efx_rc_t (*etxo_qpost)(efx_txq_t *, efx_buffer_t *,
+ unsigned int, unsigned int,
+ unsigned int *);
+ void (*etxo_qpush)(efx_txq_t *, unsigned int, unsigned int);
+ efx_rc_t (*etxo_qpace)(efx_txq_t *, unsigned int);
+ efx_rc_t (*etxo_qflush)(efx_txq_t *);
+ void (*etxo_qenable)(efx_txq_t *);
+ efx_rc_t (*etxo_qpio_enable)(efx_txq_t *);
+ void (*etxo_qpio_disable)(efx_txq_t *);
+ efx_rc_t (*etxo_qpio_write)(efx_txq_t *, uint8_t *, size_t,
+ size_t);
+ efx_rc_t (*etxo_qpio_post)(efx_txq_t *, size_t, unsigned int,
+ unsigned int *);
+ efx_rc_t (*etxo_qdesc_post)(efx_txq_t *, efx_desc_t *,
+ unsigned int, unsigned int,
+ unsigned int *);
+ void (*etxo_qdesc_dma_create)(efx_txq_t *, efsys_dma_addr_t,
+ size_t, boolean_t,
+ efx_desc_t *);
+ void (*etxo_qdesc_tso_create)(efx_txq_t *, uint16_t,
+ uint32_t, uint8_t,
+ efx_desc_t *);
+ void (*etxo_qdesc_tso2_create)(efx_txq_t *, uint16_t,
+ uint16_t, uint32_t, uint16_t,
+ efx_desc_t *, int);
+ void (*etxo_qdesc_vlantci_create)(efx_txq_t *, uint16_t,
+ efx_desc_t *);
+ void (*etxo_qdesc_checksum_create)(efx_txq_t *, uint16_t,
+ efx_desc_t *);
+#if EFSYS_OPT_QSTATS
+ void (*etxo_qstats_update)(efx_txq_t *,
+ efsys_stat_t *);
+#endif
+} efx_tx_ops_t;
+
+typedef union efx_rxq_type_data_u {
+ /* Dummy member to have non-empty union if no options are enabled */
+ uint32_t ertd_dummy;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ struct {
+ uint32_t eps_buf_size;
+ } ertd_packed_stream;
+#endif
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+ struct {
+ uint32_t eessb_bufs_per_desc;
+ uint32_t eessb_max_dma_len;
+ uint32_t eessb_buf_stride;
+ uint32_t eessb_hol_block_timeout;
+ } ertd_es_super_buffer;
+#endif
+} efx_rxq_type_data_t;
+
+typedef struct efx_rx_ops_s {
+ efx_rc_t (*erxo_init)(efx_nic_t *);
+ void (*erxo_fini)(efx_nic_t *);
+#if EFSYS_OPT_RX_SCATTER
+ efx_rc_t (*erxo_scatter_enable)(efx_nic_t *, unsigned int);
+#endif
+#if EFSYS_OPT_RX_SCALE
+ efx_rc_t (*erxo_scale_context_alloc)(efx_nic_t *,
+ efx_rx_scale_context_type_t,
+ uint32_t, uint32_t *);
+ efx_rc_t (*erxo_scale_context_free)(efx_nic_t *, uint32_t);
+ efx_rc_t (*erxo_scale_mode_set)(efx_nic_t *, uint32_t,
+ efx_rx_hash_alg_t,
+ efx_rx_hash_type_t, boolean_t);
+ efx_rc_t (*erxo_scale_key_set)(efx_nic_t *, uint32_t,
+ uint8_t *, size_t);
+ efx_rc_t (*erxo_scale_tbl_set)(efx_nic_t *, uint32_t,
+ unsigned int *, size_t);
+ uint32_t (*erxo_prefix_hash)(efx_nic_t *, efx_rx_hash_alg_t,
+ uint8_t *);
+#endif /* EFSYS_OPT_RX_SCALE */
+ efx_rc_t (*erxo_prefix_pktlen)(efx_nic_t *, uint8_t *,
+ uint16_t *);
+ void (*erxo_qpost)(efx_rxq_t *, efsys_dma_addr_t *, size_t,
+ unsigned int, unsigned int,
+ unsigned int);
+ void (*erxo_qpush)(efx_rxq_t *, unsigned int, unsigned int *);
+#if EFSYS_OPT_RX_PACKED_STREAM
+ void (*erxo_qpush_ps_credits)(efx_rxq_t *);
+ uint8_t * (*erxo_qps_packet_info)(efx_rxq_t *, uint8_t *,
+ uint32_t, uint32_t,
+ uint16_t *, uint32_t *, uint32_t *);
+#endif
+ efx_rc_t (*erxo_qflush)(efx_rxq_t *);
+ void (*erxo_qenable)(efx_rxq_t *);
+ efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int,
+ unsigned int, efx_rxq_type_t,
+ const efx_rxq_type_data_t *,
+ efsys_mem_t *, size_t, uint32_t,
+ unsigned int,
+ efx_evq_t *, efx_rxq_t *);
+ void (*erxo_qdestroy)(efx_rxq_t *);
+} efx_rx_ops_t;
+
+typedef struct efx_mac_ops_s {
+ efx_rc_t (*emo_poll)(efx_nic_t *, efx_link_mode_t *);
+ efx_rc_t (*emo_up)(efx_nic_t *, boolean_t *);
+ efx_rc_t (*emo_addr_set)(efx_nic_t *);
+ efx_rc_t (*emo_pdu_set)(efx_nic_t *);
+ efx_rc_t (*emo_pdu_get)(efx_nic_t *, size_t *);
+ efx_rc_t (*emo_reconfigure)(efx_nic_t *);
+ efx_rc_t (*emo_multicast_list_set)(efx_nic_t *);
+ efx_rc_t (*emo_filter_default_rxq_set)(efx_nic_t *,
+ efx_rxq_t *, boolean_t);
+ void (*emo_filter_default_rxq_clear)(efx_nic_t *);
+#if EFSYS_OPT_LOOPBACK
+ efx_rc_t (*emo_loopback_set)(efx_nic_t *, efx_link_mode_t,
+ efx_loopback_type_t);
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ efx_rc_t (*emo_stats_get_mask)(efx_nic_t *, uint32_t *, size_t);
+ efx_rc_t (*emo_stats_clear)(efx_nic_t *);
+ efx_rc_t (*emo_stats_upload)(efx_nic_t *, efsys_mem_t *);
+ efx_rc_t (*emo_stats_periodic)(efx_nic_t *, efsys_mem_t *,
+ uint16_t, boolean_t);
+ efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ efsys_stat_t *, uint32_t *);
+#endif /* EFSYS_OPT_MAC_STATS */
+} efx_mac_ops_t;
+
+typedef struct efx_phy_ops_s {
+ efx_rc_t (*epo_power)(efx_nic_t *, boolean_t); /* optional */
+ efx_rc_t (*epo_reset)(efx_nic_t *);
+ efx_rc_t (*epo_reconfigure)(efx_nic_t *);
+ efx_rc_t (*epo_verify)(efx_nic_t *);
+ efx_rc_t (*epo_oui_get)(efx_nic_t *, uint32_t *);
+#if EFSYS_OPT_PHY_STATS
+ efx_rc_t (*epo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ uint32_t *);
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ efx_rc_t (*epo_bist_enable_offline)(efx_nic_t *);
+ efx_rc_t (*epo_bist_start)(efx_nic_t *, efx_bist_type_t);
+ efx_rc_t (*epo_bist_poll)(efx_nic_t *, efx_bist_type_t,
+ efx_bist_result_t *, uint32_t *,
+ unsigned long *, size_t);
+ void (*epo_bist_stop)(efx_nic_t *, efx_bist_type_t);
+#endif /* EFSYS_OPT_BIST */
+} efx_phy_ops_t;
+
+#if EFSYS_OPT_FILTER
+typedef struct efx_filter_ops_s {
+ efx_rc_t (*efo_init)(efx_nic_t *);
+ void (*efo_fini)(efx_nic_t *);
+ efx_rc_t (*efo_restore)(efx_nic_t *);
+ efx_rc_t (*efo_add)(efx_nic_t *, efx_filter_spec_t *,
+ boolean_t may_replace);
+ efx_rc_t (*efo_delete)(efx_nic_t *, efx_filter_spec_t *);
+ efx_rc_t (*efo_supported_filters)(efx_nic_t *, uint32_t *,
+ size_t, size_t *);
+ efx_rc_t (*efo_reconfigure)(efx_nic_t *, uint8_t const *, boolean_t,
+ boolean_t, boolean_t, boolean_t,
+ uint8_t const *, uint32_t);
+} efx_filter_ops_t;
+
+extern __checkReturn efx_rc_t
+efx_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count);
+
+#endif /* EFSYS_OPT_FILTER */
+
+#if EFSYS_OPT_TUNNEL
+typedef struct efx_tunnel_ops_s {
+ boolean_t (*eto_udp_encap_supported)(efx_nic_t *);
+ efx_rc_t (*eto_reconfigure)(efx_nic_t *);
+} efx_tunnel_ops_t;
+#endif /* EFSYS_OPT_TUNNEL */
+
+typedef struct efx_port_s {
+ efx_mac_type_t ep_mac_type;
+ uint32_t ep_phy_type;
+ uint8_t ep_port;
+ uint32_t ep_mac_pdu;
+ uint8_t ep_mac_addr[6];
+ efx_link_mode_t ep_link_mode;
+ boolean_t ep_all_unicst;
+ boolean_t ep_mulcst;
+ boolean_t ep_all_mulcst;
+ boolean_t ep_brdcst;
+ unsigned int ep_fcntl;
+ boolean_t ep_fcntl_autoneg;
+ efx_oword_t ep_multicst_hash[2];
+ uint8_t ep_mulcst_addr_list[EFX_MAC_ADDR_LEN *
+ EFX_MAC_MULTICAST_LIST_MAX];
+ uint32_t ep_mulcst_addr_count;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t ep_loopback_type;
+ efx_link_mode_t ep_loopback_link_mode;
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_PHY_FLAGS
+ uint32_t ep_phy_flags;
+#endif /* EFSYS_OPT_PHY_FLAGS */
+#if EFSYS_OPT_PHY_LED_CONTROL
+ efx_phy_led_mode_t ep_phy_led_mode;
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+ efx_phy_media_type_t ep_fixed_port_type;
+ efx_phy_media_type_t ep_module_type;
+ uint32_t ep_adv_cap_mask;
+ uint32_t ep_lp_cap_mask;
+ uint32_t ep_default_adv_cap_mask;
+ uint32_t ep_phy_cap_mask;
+ boolean_t ep_mac_drain;
+#if EFSYS_OPT_BIST
+ efx_bist_type_t ep_current_bist;
+#endif
+ const efx_mac_ops_t *ep_emop;
+ const efx_phy_ops_t *ep_epop;
+} efx_port_t;
+
+typedef struct efx_mon_ops_s {
+#if EFSYS_OPT_MON_STATS
+ efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ efx_mon_stat_value_t *);
+#endif /* EFSYS_OPT_MON_STATS */
+} efx_mon_ops_t;
+
+typedef struct efx_mon_s {
+ efx_mon_type_t em_type;
+ const efx_mon_ops_t *em_emop;
+} efx_mon_t;
+
+typedef struct efx_intr_ops_s {
+ efx_rc_t (*eio_init)(efx_nic_t *, efx_intr_type_t, efsys_mem_t *);
+ void (*eio_enable)(efx_nic_t *);
+ void (*eio_disable)(efx_nic_t *);
+ void (*eio_disable_unlocked)(efx_nic_t *);
+ efx_rc_t (*eio_trigger)(efx_nic_t *, unsigned int);
+ void (*eio_status_line)(efx_nic_t *, boolean_t *, uint32_t *);
+ void (*eio_status_message)(efx_nic_t *, unsigned int,
+ boolean_t *);
+ void (*eio_fatal)(efx_nic_t *);
+ void (*eio_fini)(efx_nic_t *);
+} efx_intr_ops_t;
+
+typedef struct efx_intr_s {
+ const efx_intr_ops_t *ei_eiop;
+ efsys_mem_t *ei_esmp;
+ efx_intr_type_t ei_type;
+ unsigned int ei_level;
+} efx_intr_t;
+
+typedef struct efx_nic_ops_s {
+ efx_rc_t (*eno_probe)(efx_nic_t *);
+ efx_rc_t (*eno_board_cfg)(efx_nic_t *);
+ efx_rc_t (*eno_set_drv_limits)(efx_nic_t *, efx_drv_limits_t*);
+ efx_rc_t (*eno_reset)(efx_nic_t *);
+ efx_rc_t (*eno_init)(efx_nic_t *);
+ efx_rc_t (*eno_get_vi_pool)(efx_nic_t *, uint32_t *);
+ efx_rc_t (*eno_get_bar_region)(efx_nic_t *, efx_nic_region_t,
+ uint32_t *, size_t *);
+#if EFSYS_OPT_DIAG
+ efx_rc_t (*eno_register_test)(efx_nic_t *);
+#endif /* EFSYS_OPT_DIAG */
+ void (*eno_fini)(efx_nic_t *);
+ void (*eno_unprobe)(efx_nic_t *);
+} efx_nic_ops_t;
+
+#ifndef EFX_TXQ_LIMIT_TARGET
+#define EFX_TXQ_LIMIT_TARGET 259
+#endif
+#ifndef EFX_RXQ_LIMIT_TARGET
+#define EFX_RXQ_LIMIT_TARGET 512
+#endif
+
+
+#if EFSYS_OPT_FILTER
+
+#if EFSYS_OPT_SIENA
+
+typedef struct siena_filter_spec_s {
+ uint8_t sfs_type;
+ uint32_t sfs_flags;
+ uint32_t sfs_dmaq_id;
+ uint32_t sfs_dword[3];
+} siena_filter_spec_t;
+
+typedef enum siena_filter_type_e {
+ EFX_SIENA_FILTER_RX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_RX_TCP_WILD, /* TCP/IPv4 {dIP,dTCP, -, -} */
+ EFX_SIENA_FILTER_RX_UDP_FULL, /* UDP/IPv4 {dIP,dUDP,sIP,sUDP} */
+ EFX_SIENA_FILTER_RX_UDP_WILD, /* UDP/IPv4 {dIP,dUDP, -, -} */
+ EFX_SIENA_FILTER_RX_MAC_FULL, /* Ethernet {dMAC,VLAN} */
+ EFX_SIENA_FILTER_RX_MAC_WILD, /* Ethernet {dMAC, -} */
+
+ EFX_SIENA_FILTER_TX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_TCP_WILD, /* TCP/IPv4 { -, -,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_UDP_FULL, /* UDP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_UDP_WILD, /* UDP/IPv4 { -, -,sIP,sUDP} */
+ EFX_SIENA_FILTER_TX_MAC_FULL, /* Ethernet {sMAC,VLAN} */
+ EFX_SIENA_FILTER_TX_MAC_WILD, /* Ethernet {sMAC, -} */
+
+ EFX_SIENA_FILTER_NTYPES
+} siena_filter_type_t;
+
+typedef enum siena_filter_tbl_id_e {
+ EFX_SIENA_FILTER_TBL_RX_IP = 0,
+ EFX_SIENA_FILTER_TBL_RX_MAC,
+ EFX_SIENA_FILTER_TBL_TX_IP,
+ EFX_SIENA_FILTER_TBL_TX_MAC,
+ EFX_SIENA_FILTER_NTBLS
+} siena_filter_tbl_id_t;
+
+typedef struct siena_filter_tbl_s {
+ int sft_size; /* number of entries */
+ int sft_used; /* active count */
+ uint32_t *sft_bitmap; /* active bitmap */
+ siena_filter_spec_t *sft_spec; /* array of saved specs */
+} siena_filter_tbl_t;
+
+typedef struct siena_filter_s {
+ siena_filter_tbl_t sf_tbl[EFX_SIENA_FILTER_NTBLS];
+ unsigned int sf_depth[EFX_SIENA_FILTER_NTYPES];
+} siena_filter_t;
+
+#endif /* EFSYS_OPT_SIENA */
+
+typedef struct efx_filter_s {
+#if EFSYS_OPT_SIENA
+ siena_filter_t *ef_siena_filter;
+#endif /* EFSYS_OPT_SIENA */
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+ ef10_filter_table_t *ef_ef10_filter_table;
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+} efx_filter_t;
+
+#if EFSYS_OPT_SIENA
+
+extern void
+siena_filter_tbl_clear(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_id_t tbl);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_FILTER */
+
+#if EFSYS_OPT_MCDI
+
+#define EFX_TUNNEL_MAXNENTRIES (16)
+
+#if EFSYS_OPT_TUNNEL
+
+typedef struct efx_tunnel_udp_entry_s {
+ uint16_t etue_port; /* host/cpu-endian */
+ uint16_t etue_protocol;
+} efx_tunnel_udp_entry_t;
+
+typedef struct efx_tunnel_cfg_s {
+ efx_tunnel_udp_entry_t etc_udp_entries[EFX_TUNNEL_MAXNENTRIES];
+ unsigned int etc_udp_entries_num;
+} efx_tunnel_cfg_t;
+
+#endif /* EFSYS_OPT_TUNNEL */
+
+typedef struct efx_mcdi_ops_s {
+ efx_rc_t (*emco_init)(efx_nic_t *, const efx_mcdi_transport_t *);
+ void (*emco_send_request)(efx_nic_t *, void *, size_t,
+ void *, size_t);
+ efx_rc_t (*emco_poll_reboot)(efx_nic_t *);
+ boolean_t (*emco_poll_response)(efx_nic_t *);
+ void (*emco_read_response)(efx_nic_t *, void *, size_t, size_t);
+ void (*emco_fini)(efx_nic_t *);
+ efx_rc_t (*emco_feature_supported)(efx_nic_t *,
+ efx_mcdi_feature_id_t, boolean_t *);
+ void (*emco_get_timeout)(efx_nic_t *, efx_mcdi_req_t *,
+ uint32_t *);
+} efx_mcdi_ops_t;
+
+typedef struct efx_mcdi_s {
+ const efx_mcdi_ops_t *em_emcop;
+ const efx_mcdi_transport_t *em_emtp;
+ efx_mcdi_iface_t em_emip;
+} efx_mcdi_t;
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_NVRAM
+
+/* Invalid partition ID for en_nvram_partn_locked field of efx_nc_t */
+#define EFX_NVRAM_PARTN_INVALID (0xffffffffu)
+
+typedef struct efx_nvram_ops_s {
+#if EFSYS_OPT_DIAG
+ efx_rc_t (*envo_test)(efx_nic_t *);
+#endif /* EFSYS_OPT_DIAG */
+ efx_rc_t (*envo_type_to_partn)(efx_nic_t *, efx_nvram_type_t,
+ uint32_t *);
+ efx_rc_t (*envo_partn_size)(efx_nic_t *, uint32_t, size_t *);
+ efx_rc_t (*envo_partn_rw_start)(efx_nic_t *, uint32_t, size_t *);
+ efx_rc_t (*envo_partn_read)(efx_nic_t *, uint32_t,
+ unsigned int, caddr_t, size_t);
+ efx_rc_t (*envo_partn_read_backup)(efx_nic_t *, uint32_t,
+ unsigned int, caddr_t, size_t);
+ efx_rc_t (*envo_partn_erase)(efx_nic_t *, uint32_t,
+ unsigned int, size_t);
+ efx_rc_t (*envo_partn_write)(efx_nic_t *, uint32_t,
+ unsigned int, caddr_t, size_t);
+ efx_rc_t (*envo_partn_rw_finish)(efx_nic_t *, uint32_t,
+ uint32_t *);
+ efx_rc_t (*envo_partn_get_version)(efx_nic_t *, uint32_t,
+ uint32_t *, uint16_t *);
+ efx_rc_t (*envo_partn_set_version)(efx_nic_t *, uint32_t,
+ uint16_t *);
+ efx_rc_t (*envo_buffer_validate)(efx_nic_t *, uint32_t,
+ caddr_t, size_t);
+} efx_nvram_ops_t;
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_VPD
+typedef struct efx_vpd_ops_s {
+ efx_rc_t (*evpdo_init)(efx_nic_t *);
+ efx_rc_t (*evpdo_size)(efx_nic_t *, size_t *);
+ efx_rc_t (*evpdo_read)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_verify)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_reinit)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_get)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *);
+ efx_rc_t (*evpdo_set)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *);
+ efx_rc_t (*evpdo_next)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *, unsigned int *);
+ efx_rc_t (*evpdo_write)(efx_nic_t *, caddr_t, size_t);
+ void (*evpdo_fini)(efx_nic_t *);
+} efx_vpd_ops_t;
+#endif /* EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_partitions(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __out unsigned int *npartnp);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_metadata(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4],
+ __out_bcount_opt(size) char *descp,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_info(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt size_t *sizep,
+ __out_opt uint32_t *addressp,
+ __out_opt uint32_t *erase_sizep,
+ __out_opt uint32_t *write_sizep);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t reboot,
+ __out_opt uint32_t *verify_resultp);
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_test(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_LICENSING
+
+typedef struct efx_lic_ops_s {
+ efx_rc_t (*elo_update_licenses)(efx_nic_t *);
+ efx_rc_t (*elo_get_key_stats)(efx_nic_t *, efx_key_stats_t *);
+ efx_rc_t (*elo_app_state)(efx_nic_t *, uint64_t, boolean_t *);
+ efx_rc_t (*elo_get_id)(efx_nic_t *, size_t, uint32_t *,
+ size_t *, uint8_t *);
+ efx_rc_t (*elo_find_start)
+ (efx_nic_t *, caddr_t, size_t, uint32_t *);
+ efx_rc_t (*elo_find_end)(efx_nic_t *, caddr_t, size_t,
+ uint32_t, uint32_t *);
+ boolean_t (*elo_find_key)(efx_nic_t *, caddr_t, size_t,
+ uint32_t, uint32_t *, uint32_t *);
+ boolean_t (*elo_validate_key)(efx_nic_t *,
+ caddr_t, uint32_t);
+ efx_rc_t (*elo_read_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t, uint32_t,
+ caddr_t, size_t, uint32_t *);
+ efx_rc_t (*elo_write_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t,
+ caddr_t, uint32_t, uint32_t *);
+ efx_rc_t (*elo_delete_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t,
+ uint32_t, uint32_t, uint32_t *);
+ efx_rc_t (*elo_create_partition)(efx_nic_t *,
+ caddr_t, size_t);
+ efx_rc_t (*elo_finish_partition)(efx_nic_t *,
+ caddr_t, size_t);
+} efx_lic_ops_t;
+
+#endif
+
+typedef struct efx_drv_cfg_s {
+ uint32_t edc_min_vi_count;
+ uint32_t edc_max_vi_count;
+
+ uint32_t edc_max_piobuf_count;
+ uint32_t edc_pio_alloc_size;
+} efx_drv_cfg_t;
+
+struct efx_nic_s {
+ uint32_t en_magic;
+ efx_family_t en_family;
+ uint32_t en_features;
+ efsys_identifier_t *en_esip;
+ efsys_lock_t *en_eslp;
+ efsys_bar_t *en_esbp;
+ unsigned int en_mod_flags;
+ unsigned int en_reset_flags;
+ efx_nic_cfg_t en_nic_cfg;
+ efx_drv_cfg_t en_drv_cfg;
+ efx_port_t en_port;
+ efx_mon_t en_mon;
+ efx_intr_t en_intr;
+ uint32_t en_ev_qcount;
+ uint32_t en_rx_qcount;
+ uint32_t en_tx_qcount;
+ const efx_nic_ops_t *en_enop;
+ const efx_ev_ops_t *en_eevop;
+ const efx_tx_ops_t *en_etxop;
+ const efx_rx_ops_t *en_erxop;
+ efx_fw_variant_t efv;
+#if EFSYS_OPT_FILTER
+ efx_filter_t en_filter;
+ const efx_filter_ops_t *en_efop;
+#endif /* EFSYS_OPT_FILTER */
+#if EFSYS_OPT_TUNNEL
+ efx_tunnel_cfg_t en_tunnel_cfg;
+ const efx_tunnel_ops_t *en_etop;
+#endif /* EFSYS_OPT_TUNNEL */
+#if EFSYS_OPT_MCDI
+ efx_mcdi_t en_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+#if EFSYS_OPT_NVRAM
+ uint32_t en_nvram_partn_locked;
+ const efx_nvram_ops_t *en_envop;
+#endif /* EFSYS_OPT_NVRAM */
+#if EFSYS_OPT_VPD
+ const efx_vpd_ops_t *en_evpdop;
+#endif /* EFSYS_OPT_VPD */
+#if EFSYS_OPT_RX_SCALE
+ efx_rx_hash_support_t en_hash_support;
+ efx_rx_scale_context_type_t en_rss_context_type;
+ uint32_t en_rss_context;
+#endif /* EFSYS_OPT_RX_SCALE */
+ uint32_t en_vport_id;
+#if EFSYS_OPT_LICENSING
+ const efx_lic_ops_t *en_elop;
+ boolean_t en_licensing_supported;
+#endif
+ union {
+#if EFSYS_OPT_SIENA
+ struct {
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+ unsigned int enu_partn_mask;
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
+#if EFSYS_OPT_VPD
+ caddr_t enu_svpd;
+ size_t enu_svpd_length;
+#endif /* EFSYS_OPT_VPD */
+ int enu_unused;
+ } siena;
+#endif /* EFSYS_OPT_SIENA */
+ int enu_unused;
+ } en_u;
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+ union en_arch {
+ struct {
+ int ena_vi_base;
+ int ena_vi_count;
+ int ena_vi_shift;
+#if EFSYS_OPT_VPD
+ caddr_t ena_svpd;
+ size_t ena_svpd_length;
+#endif /* EFSYS_OPT_VPD */
+ efx_piobuf_handle_t ena_piobuf_handle[EF10_MAX_PIOBUF_NBUFS];
+ uint32_t ena_piobuf_count;
+ uint32_t ena_pio_alloc_map[EF10_MAX_PIOBUF_NBUFS];
+ uint32_t ena_pio_write_vi_base;
+ /* Memory BAR mapping regions */
+ uint32_t ena_uc_mem_map_offset;
+ size_t ena_uc_mem_map_size;
+ uint32_t ena_wc_mem_map_offset;
+ size_t ena_wc_mem_map_size;
+ } ef10;
+ } en_arch;
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) */
+};
+
+
+#define EFX_NIC_MAGIC 0x02121996
+
+typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *,
+ const efx_ev_callbacks_t *, void *);
+
+typedef struct efx_evq_rxq_state_s {
+ unsigned int eers_rx_read_ptr;
+ unsigned int eers_rx_mask;
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+ unsigned int eers_rx_stream_npackets;
+ boolean_t eers_rx_packed_stream;
+#endif
+#if EFSYS_OPT_RX_PACKED_STREAM
+ unsigned int eers_rx_packed_stream_credits;
+#endif
+} efx_evq_rxq_state_t;
+
+struct efx_evq_s {
+ uint32_t ee_magic;
+ efx_nic_t *ee_enp;
+ unsigned int ee_index;
+ unsigned int ee_mask;
+ efsys_mem_t *ee_esmp;
+#if EFSYS_OPT_QSTATS
+ uint32_t ee_stat[EV_NQSTATS];
+#endif /* EFSYS_OPT_QSTATS */
+
+ efx_ev_handler_t ee_rx;
+ efx_ev_handler_t ee_tx;
+ efx_ev_handler_t ee_driver;
+ efx_ev_handler_t ee_global;
+ efx_ev_handler_t ee_drv_gen;
+#if EFSYS_OPT_MCDI
+ efx_ev_handler_t ee_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+
+ efx_evq_rxq_state_t ee_rxq_state[EFX_EV_RX_NLABELS];
+
+ uint32_t ee_flags;
+};
+
+#define EFX_EVQ_MAGIC 0x08081997
+
+#define EFX_EVQ_SIENA_TIMER_QUANTUM_NS 6144 /* 768 cycles */
+
+struct efx_rxq_s {
+ uint32_t er_magic;
+ efx_nic_t *er_enp;
+ efx_evq_t *er_eep;
+ unsigned int er_index;
+ unsigned int er_label;
+ unsigned int er_mask;
+ efsys_mem_t *er_esmp;
+ efx_evq_rxq_state_t *er_ev_qstate;
+};
+
+#define EFX_RXQ_MAGIC 0x15022005
+
+struct efx_txq_s {
+ uint32_t et_magic;
+ efx_nic_t *et_enp;
+ unsigned int et_index;
+ unsigned int et_mask;
+ efsys_mem_t *et_esmp;
+#if EFSYS_OPT_HUNTINGTON
+ uint32_t et_pio_bufnum;
+ uint32_t et_pio_blknum;
+ uint32_t et_pio_write_offset;
+ uint32_t et_pio_offset;
+ size_t et_pio_size;
+#endif
+#if EFSYS_OPT_QSTATS
+ uint32_t et_stat[TX_NQSTATS];
+#endif /* EFSYS_OPT_QSTATS */
+};
+
+#define EFX_TXQ_MAGIC 0x05092005
+
+#define EFX_MAC_ADDR_COPY(_dst, _src) \
+ do { \
+ (_dst)[0] = (_src)[0]; \
+ (_dst)[1] = (_src)[1]; \
+ (_dst)[2] = (_src)[2]; \
+ (_dst)[3] = (_src)[3]; \
+ (_dst)[4] = (_src)[4]; \
+ (_dst)[5] = (_src)[5]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_MAC_BROADCAST_ADDR_SET(_dst) \
+ do { \
+ uint16_t *_d = (uint16_t *)(_dst); \
+ _d[0] = 0xffff; \
+ _d[1] = 0xffff; \
+ _d[2] = 0xffff; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if EFSYS_OPT_CHECK_REG
+#define EFX_CHECK_REG(_enp, _reg) \
+ do { \
+ const char *name = #_reg; \
+ char min = name[4]; \
+ char max = name[5]; \
+ char rev; \
+ \
+ switch ((_enp)->en_family) { \
+ case EFX_FAMILY_SIENA: \
+ rev = 'C'; \
+ break; \
+ \
+ case EFX_FAMILY_HUNTINGTON: \
+ rev = 'D'; \
+ break; \
+ \
+ case EFX_FAMILY_MEDFORD: \
+ rev = 'E'; \
+ break; \
+ \
+ case EFX_FAMILY_MEDFORD2: \
+ rev = 'F'; \
+ break; \
+ \
+ default: \
+ rev = '?'; \
+ break; \
+ } \
+ \
+ EFSYS_ASSERT3S(rev, >=, min); \
+ EFSYS_ASSERT3S(rev, <=, max); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_CHECK_REG(_enp, _reg) do { \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#endif
+
+#define EFX_BAR_READD(_enp, _reg, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, _reg ## _OFST, \
+ (_edp), (_lock)); \
+ EFSYS_PROBE3(efx_bar_readd, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITED(_enp, _reg, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE3(efx_bar_writed, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, _reg ## _OFST, \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_READQ(_enp, _reg, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READQ((_enp)->en_esbp, _reg ## _OFST, \
+ (_eqp)); \
+ EFSYS_PROBE4(efx_bar_readq, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITEQ(_enp, _reg, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_writeq, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ EFSYS_BAR_WRITEQ((_enp)->en_esbp, _reg ## _OFST, \
+ (_eqp)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_READO(_enp, _reg, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READO((_enp)->en_esbp, _reg ## _OFST, \
+ (_eop), B_TRUE); \
+ EFSYS_PROBE6(efx_bar_reado, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITEO(_enp, _reg, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE6(efx_bar_writeo, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_WRITEO((_enp)->en_esbp, _reg ## _OFST, \
+ (_eop), B_TRUE); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Accessors for memory BAR non-VI tables.
+ *
+ * Code used on EF10 *must* use EFX_BAR_VI_*() macros for per-VI registers,
+ * to ensure the correct runtime VI window size is used on Medford2.
+ *
+ * Siena-only code may continue using EFX_BAR_TBL_*() macros for VI registers.
+ */
+
+#define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ EFSYS_PROBE4(efx_bar_tbl_readd, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + \
+ (3 * sizeof (efx_dword_t)) + \
+ ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READQ(_enp, _reg, _index, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READQ((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eqp)); \
+ EFSYS_PROBE5(efx_bar_tbl_readq, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITEQ(_enp, _reg, _index, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE5(efx_bar_tbl_writeq, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ EFSYS_BAR_WRITEQ((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eqp)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READO(_enp, _reg, _index, _eop, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop), (_lock)); \
+ EFSYS_PROBE7(efx_bar_tbl_reado, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITEO(_enp, _reg, _index, _eop, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE7(efx_bar_tbl_writeo, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_WRITEO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Accessors for memory BAR per-VI registers.
+ *
+ * The VI window size is 8KB for Medford and all earlier controllers.
+ * For Medford2, the VI window size can be 8KB, 16KB or 64KB.
+ */
+
+#define EFX_BAR_VI_READD(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, \
+ ((_reg ## _OFST) + \
+ ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \
+ (_edp), (_lock)); \
+ EFSYS_PROBE4(efx_bar_vi_readd, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_VI_WRITED(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_vi_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ ((_reg ## _OFST) + \
+ ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_VI_WRITED2(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_vi_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ ((_reg ## _OFST) + \
+ (2 * sizeof (efx_dword_t)) + \
+ ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Allow drivers to perform optimised 128-bit VI doorbell writes.
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * special-cased in the BIU on the Falcon/Siena and EF10 architectures to avoid
+ * the need for locking in the host, and are the only ones known to be safe to
+ * use 128-bites write with.
+ */
+#define EFX_BAR_VI_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE7(efx_bar_vi_doorbell_writeo, \
+ const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_DOORBELL_WRITEO((_enp)->en_esbp, \
+ (_reg ## _OFST + \
+ ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \
+ (_eop)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_DMA_SYNC_QUEUE_FOR_DEVICE(_esmp, _entries, _wptr, _owptr) \
+ do { \
+ unsigned int _new = (_wptr); \
+ unsigned int _old = (_owptr); \
+ \
+ if ((_new) >= (_old)) \
+ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \
+ (_old) * sizeof (efx_desc_t), \
+ ((_new) - (_old)) * sizeof (efx_desc_t)); \
+ else \
+ /* \
+ * It is cheaper to sync entire map than sync \
+ * two parts especially when offset/size are \
+ * ignored and entire map is synced in any case.\
+ */ \
+ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \
+ 0, \
+ (_entries) * sizeof (efx_desc_t)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+extern __checkReturn efx_rc_t
+efx_mac_select(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mac_multicast_hash_compute(
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count,
+ __out efx_oword_t *hash_low,
+ __out efx_oword_t *hash_high);
+
+extern __checkReturn efx_rc_t
+efx_phy_probe(
+ __in efx_nic_t *enp);
+
+extern void
+efx_phy_unprobe(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_VPD
+
+/* VPD utility functions */
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_length(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *lengthp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_verify(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt boolean_t *cksummedp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_reinit(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t wantpid);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_get(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_tag_t tag,
+ __in efx_vpd_keyword_t keyword,
+ __out unsigned int *payloadp,
+ __out uint8_t *paylenp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_next(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_tag_t *tagp,
+ __out efx_vpd_keyword_t *keyword,
+ __out_opt unsigned int *payloadp,
+ __out_opt uint8_t *paylenp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_set(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+#endif /* EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+efx_mcdi_set_workaround(
+ __in efx_nic_t *enp,
+ __in uint32_t type,
+ __in boolean_t enabled,
+ __out_opt uint32_t *flagsp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_workarounds(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *implementedp,
+ __out_opt uint32_t *enabledp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_MAC_STATS
+
+/*
+ * Closed range of stats (i.e. the first and the last are included).
+ * The last must be greater or equal (if the range is one item only) to
+ * the first.
+ */
+struct efx_mac_stats_range {
+ efx_mac_stat_t first;
+ efx_mac_stat_t last;
+};
+
+extern efx_rc_t
+efx_mac_stats_mask_add_ranges(
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size,
+ __in_ecount(rng_count) const struct efx_mac_stats_range *rngp,
+ __in unsigned int rng_count);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_IMPL_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_intr.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_intr.c
new file mode 100644
index 00000000..b518916d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_intr.c
@@ -0,0 +1,565 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+static void
+siena_intr_enable(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_disable(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+static void
+siena_intr_fini(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp);
+
+static void
+siena_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+static void
+siena_intr_fatal(
+ __in efx_nic_t *enp);
+
+static __checkReturn boolean_t
+siena_intr_check_fatal(
+ __in efx_nic_t *enp);
+
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_intr_ops_t __efx_intr_siena_ops = {
+ siena_intr_init, /* eio_init */
+ siena_intr_enable, /* eio_enable */
+ siena_intr_disable, /* eio_disable */
+ siena_intr_disable_unlocked, /* eio_disable_unlocked */
+ siena_intr_trigger, /* eio_trigger */
+ siena_intr_status_line, /* eio_status_line */
+ siena_intr_status_message, /* eio_status_message */
+ siena_intr_fatal, /* eio_fatal */
+ siena_intr_fini, /* eio_fini */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+static const efx_intr_ops_t __efx_intr_ef10_ops = {
+ ef10_intr_init, /* eio_init */
+ ef10_intr_enable, /* eio_enable */
+ ef10_intr_disable, /* eio_disable */
+ ef10_intr_disable_unlocked, /* eio_disable_unlocked */
+ ef10_intr_trigger, /* eio_trigger */
+ ef10_intr_status_line, /* eio_status_line */
+ ef10_intr_status_message, /* eio_status_message */
+ ef10_intr_fatal, /* eio_fatal */
+ ef10_intr_fini, /* eio_fini */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+ __checkReturn efx_rc_t
+efx_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enp->en_mod_flags & EFX_MOD_INTR) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ eip->ei_esmp = esmp;
+ eip->ei_type = type;
+ eip->ei_level = 0;
+
+ enp->en_mod_flags |= EFX_MOD_INTR;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ eiop = &__efx_intr_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ if ((rc = eiop->eio_init(enp, type, esmp)) != 0)
+ goto fail3;
+
+ eip->ei_eiop = eiop;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_intr_fini(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_fini(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_INTR;
+}
+
+ void
+efx_intr_enable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_enable(enp);
+}
+
+ void
+efx_intr_disable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_disable(enp);
+}
+
+ void
+efx_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_disable_unlocked(enp);
+}
+
+
+ __checkReturn efx_rc_t
+efx_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ return (eiop->eio_trigger(enp, level));
+}
+
+ void
+efx_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_status_line(enp, fatalp, qmaskp);
+}
+
+ void
+efx_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_status_message(enp, message, fatalp);
+}
+
+ void
+efx_intr_fatal(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_fatal(enp);
+}
+
+
+/* ************************************************************************* */
+/* ************************************************************************* */
+/* ************************************************************************* */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_INTR_SIZE)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * bug17213 workaround.
+ *
+ * Under legacy interrupts, don't share a level between fatal
+ * interrupts and event queue interrupts. Under MSI-X, they
+ * must share, or we won't get an interrupt.
+ */
+ if (enp->en_family == EFX_FAMILY_SIENA &&
+ eip->ei_type == EFX_INTR_LINE)
+ eip->ei_level = 0x1f;
+ else
+ eip->ei_level = 0;
+
+ /* Enable all the genuinely fatal interrupts */
+ EFX_SET_OWORD(oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_ILL_ADR_INT_KER_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RBUF_OWN_INT_KER_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TBUF_OWN_INT_KER_EN, 0);
+ if (enp->en_family >= EFX_FAMILY_SIENA)
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_FATAL_INTR_REG_KER, &oword);
+
+ /* Set up the interrupt address register */
+ EFX_POPULATE_OWORD_3(oword,
+ FRF_AZ_NORM_INT_VEC_DIS_KER, (type == EFX_INTR_MESSAGE) ? 1 : 0,
+ FRF_AZ_INT_ADR_KER_DW0, EFSYS_MEM_ADDR(esmp) & 0xffffffff,
+ FRF_AZ_INT_ADR_KER_DW1, EFSYS_MEM_ADDR(esmp) >> 32);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+siena_intr_enable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+}
+
+static void
+siena_intr_disable(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ EFSYS_SPIN(10);
+}
+
+static void
+siena_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFSYS_BAR_READO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
+ &oword, B_FALSE);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
+ EFSYS_BAR_WRITEO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
+ &oword, B_FALSE);
+}
+
+static __checkReturn efx_rc_t
+siena_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+ unsigned int count;
+ uint32_t sel;
+ efx_rc_t rc;
+
+ /* bug16757: No event queues can be initialized */
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+
+ if (level >= EFX_NINTR_SIENA) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (level > EFX_MASK32(FRF_AZ_KER_INT_LEVE_SEL))
+ return (ENOTSUP); /* avoid EFSYS_PROBE() */
+
+ sel = level;
+
+ /* Trigger a test interrupt */
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, sel);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ /*
+ * Wait up to 100ms for the interrupt to be raised before restoring
+ * KER_INT_LEVE_SEL. Ignore a failure to raise (the caller will
+ * observe this soon enough anyway), but always reset KER_INT_LEVE_SEL
+ */
+ count = 0;
+ do {
+ EFSYS_SPIN(100); /* 100us */
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ } while (EFX_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER) && ++count < 1000);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn boolean_t
+siena_intr_check_fatal(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efsys_mem_t *esmp = eip->ei_esmp;
+ efx_oword_t oword;
+
+ /* Read the syndrome */
+ EFSYS_MEM_READO(esmp, 0, &oword);
+
+ if (EFX_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT) != 0) {
+ EFSYS_PROBE(fatal);
+
+ /* Clear the fatal interrupt condition */
+ EFX_SET_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT, 0);
+ EFSYS_MEM_WRITEO(esmp, 0, &oword);
+
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+static void
+siena_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_dword_t dword;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ /*
+ * Read the queue mask and implicitly acknowledge the
+ * interrupt.
+ */
+ EFX_BAR_READD(enp, FR_BZ_INT_ISR0_REG, &dword, B_FALSE);
+ *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
+
+ if (*qmaskp & (1U << eip->ei_level))
+ *fatalp = siena_intr_check_fatal(enp);
+ else
+ *fatalp = B_FALSE;
+}
+
+static void
+siena_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ if (message == eip->ei_level)
+ *fatalp = siena_intr_check_fatal(enp);
+ else
+ *fatalp = B_FALSE;
+}
+
+
+static void
+siena_intr_fatal(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_DECODE_INTR_FATAL
+ efx_oword_t fatal;
+ efx_oword_t mem_per;
+
+ EFX_BAR_READO(enp, FR_AZ_FATAL_INTR_REG_KER, &fatal);
+ EFX_ZERO_OWORD(mem_per);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0 ||
+ EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
+ EFX_BAR_READO(enp, FR_AZ_MEM_STAT_REG, &mem_per);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRAM_OOB_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_OOB, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_BUFID_DC_OOB_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_BUFID_DC_OOB, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_MEM_PERR,
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_RBUF_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_RBUF_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_TBUF_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_TBUF_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_RDESCQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_RDESQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_TDESCQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_TDESQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_EVQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVF_OFLO_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_EVFF_OFLO, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_ILL_ADR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_ILL_ADDR, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_PERR,
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
+#else
+ EFSYS_ASSERT(0);
+#endif
+}
+
+static void
+siena_intr_fini(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /* Clear the interrupt address register */
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_lic.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_lic.c
new file mode 100644
index 00000000..49c00347
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_lic.c
@@ -0,0 +1,1703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_LICENSING
+
+#include "ef10_tlv_layout.h"
+#if EFSYS_OPT_SIENA
+#include "efx_regs_mcdi_aoe.h"
+#endif
+
+#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp);
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp);
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp);
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length);
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp);
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp);
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap);
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_update_license(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static const efx_lic_ops_t __efx_lic_v1_ops = {
+ efx_mcdi_fc_license_update_license, /* elo_update_licenses */
+ efx_mcdi_fc_license_get_key_stats, /* elo_get_key_stats */
+ NULL, /* elo_app_state */
+ NULL, /* elo_get_id */
+ efx_lic_v1v2_find_start, /* elo_find_start */
+ efx_lic_v1v2_find_end, /* elo_find_end */
+ efx_lic_v1v2_find_key, /* elo_find_key */
+ efx_lic_v1v2_validate_key, /* elo_validate_key */
+ efx_lic_v1v2_read_key, /* elo_read_key */
+ efx_lic_v1v2_write_key, /* elo_write_key */
+ efx_lic_v1v2_delete_key, /* elo_delete_key */
+ efx_lic_v1v2_create_partition, /* elo_create_partition */
+ efx_lic_v1v2_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_update_licenses(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensed_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+static const efx_lic_ops_t __efx_lic_v2_ops = {
+ efx_mcdi_licensing_update_licenses, /* elo_update_licenses */
+ efx_mcdi_licensing_get_key_stats, /* elo_get_key_stats */
+ efx_mcdi_licensed_app_state, /* elo_app_state */
+ NULL, /* elo_get_id */
+ efx_lic_v1v2_find_start, /* elo_find_start */
+ efx_lic_v1v2_find_end, /* elo_find_end */
+ efx_lic_v1v2_find_key, /* elo_find_key */
+ efx_lic_v1v2_validate_key, /* elo_validate_key */
+ efx_lic_v1v2_read_key, /* elo_read_key */
+ efx_lic_v1v2_write_key, /* elo_write_key */
+ efx_lic_v1v2_delete_key, /* elo_delete_key */
+ efx_lic_v1v2_create_partition, /* elo_create_partition */
+ efx_lic_v1v2_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_update_licenses(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_report_license(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_bcount_part_opt(buffer_size, *lengthp)
+ uint8_t *bufferp);
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp);
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp);
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp);
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length);
+
+ __checkReturn efx_rc_t
+efx_lic_v3_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp);
+
+ __checkReturn efx_rc_t
+efx_lic_v3_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp);
+
+ __checkReturn efx_rc_t
+efx_lic_v3_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap);
+
+ __checkReturn efx_rc_t
+efx_lic_v3_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+ __checkReturn efx_rc_t
+efx_lic_v3_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+static const efx_lic_ops_t __efx_lic_v3_ops = {
+ efx_mcdi_licensing_v3_update_licenses, /* elo_update_licenses */
+ efx_mcdi_licensing_v3_report_license, /* elo_get_key_stats */
+ efx_mcdi_licensing_v3_app_state, /* elo_app_state */
+ efx_mcdi_licensing_v3_get_id, /* elo_get_id */
+ efx_lic_v3_find_start, /* elo_find_start */
+ efx_lic_v3_find_end, /* elo_find_end */
+ efx_lic_v3_find_key, /* elo_find_key */
+ efx_lic_v3_validate_key, /* elo_validate_key */
+ efx_lic_v3_read_key, /* elo_read_key */
+ efx_lic_v3_write_key, /* elo_write_key */
+ efx_lic_v3_delete_key, /* elo_delete_key */
+ efx_lic_v3_create_partition, /* elo_create_partition */
+ efx_lic_v3_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+
+/* V1 Licensing - used in Siena Modena only */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_update_license(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_FC_IN_LICENSE_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, FC_IN_CMD,
+ MC_CMD_FC_OP_LICENSE);
+
+ MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
+ MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != 0) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FC_IN_LICENSE_LEN,
+ MC_CMD_FC_OUT_LICENSE_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FC_OUT_LICENSE_LEN;
+
+ MCDI_IN_SET_DWORD(req, FC_IN_CMD,
+ MC_CMD_FC_OP_LICENSE);
+
+ MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
+ MC_CMD_FC_IN_LICENSE_GET_KEY_STATS);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FC_OUT_LICENSE_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_VALID_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_INVALID_KEYS);
+ eksp->eks_blacklisted =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_BLACKLISTED_KEYS);
+ eksp->eks_unverifiable = 0;
+ eksp->eks_wrong_node = 0;
+ eksp->eks_licensed_apps_lo = 0;
+ eksp->eks_licensed_apps_hi = 0;
+ eksp->eks_licensed_features_lo = 0;
+ eksp->eks_licensed_features_hi = 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+/* V1 and V2 Partition format - based on a 16-bit TLV format */
+
+#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
+
+/*
+ * V1/V2 format - defined in SF-108542-TC section 4.2:
+ * Type (T): 16bit - revision/HMAC algorithm
+ * Length (L): 16bit - value length in bytes
+ * Value (V): L bytes - payload
+ */
+#define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX (256)
+#define EFX_LICENSE_V1V2_HEADER_LENGTH (2 * sizeof (uint16_t))
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp)
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ *startp = 0;
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp)
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ *endp = offset + EFX_LICENSE_V1V2_HEADER_LENGTH;
+ return (0);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp)
+{
+ boolean_t found;
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((size_t)buffer_size - offset < EFX_LICENSE_V1V2_HEADER_LENGTH)
+ goto fail1;
+
+ tlv_type = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[0]);
+ tlv_length = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[1]);
+ if ((tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) ||
+ (tlv_type == 0 && tlv_length == 0)) {
+ found = B_FALSE;
+ } else {
+ *startp = offset;
+ *lengthp = tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH;
+ found = B_TRUE;
+ }
+ return (found);
+
+fail1:
+ EFSYS_PROBE1(fail1, boolean_t, B_FALSE);
+
+ return (B_FALSE);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length)
+{
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (length < EFX_LICENSE_V1V2_HEADER_LENGTH) {
+ goto fail1;
+ }
+
+ tlv_type = __LE_TO_CPU_16(((uint16_t *)keyp)[0]);
+ tlv_length = __LE_TO_CPU_16(((uint16_t *)keyp)[1]);
+
+ if (tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) {
+ goto fail2;
+ }
+ if (tlv_type == 0) {
+ goto fail3;
+ }
+ if ((tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH) != length) {
+ goto fail4;
+ }
+
+ return (B_TRUE);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, boolean_t, B_FALSE);
+
+ return (B_FALSE);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp, buffer_size))
+ EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
+ EFX_LICENSE_V1V2_HEADER_LENGTH));
+
+ if (key_max_size < length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ memcpy(keyp, &bufferp[offset], length);
+
+ *lengthp = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
+ EFX_LICENSE_V1V2_HEADER_LENGTH));
+
+ /* Ensure space for terminator remains */
+ if ((offset + length) >
+ (buffer_size - EFX_LICENSE_V1V2_HEADER_LENGTH)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ memcpy(bufferp + offset, keyp, length);
+
+ *lengthp = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap)
+{
+ uint32_t move_start = offset + length;
+ uint32_t move_length = end - move_start;
+
+ _NOTE(ARGUNUSED(enp, buffer_size))
+ EFSYS_ASSERT(end <= buffer_size);
+
+ /* Shift everything after the key down */
+ memmove(bufferp + offset, bufferp + move_start, move_length);
+
+ *deltap = length;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ _NOTE(ARGUNUSED(enp, buffer_size))
+ EFSYS_ASSERT(EFX_LICENSE_V1V2_HEADER_LENGTH <= buffer_size);
+
+ /* Write terminator */
+ memset(bufferp, '\0', EFX_LICENSE_V1V2_HEADER_LENGTH);
+ return (0);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
+
+
+/* V2 Licensing - used by Huntington family only. See SF-113611-TC */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensed_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LICENSED_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN)];
+ uint32_t app_state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ /* V2 licensing supports 32bit app id only */
+ if ((app_id >> 32) != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LICENSED_APP_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LICENSED_APP_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_APP_STATE_IN_APP_ID,
+ app_id & 0xffffffff);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_APP_STATE_OUT_STATE));
+ if (app_state != MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED) {
+ *licensedp = B_TRUE;
+ } else {
+ *licensedp = B_FALSE;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_update_licenses(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_LICENSING_IN_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_IN_OP,
+ MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != 0) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_IN_LEN,
+ MC_CMD_LICENSING_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LICENSING_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_IN_OP,
+ MC_CMD_LICENSING_IN_OP_GET_KEY_STATS);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_VALID_APP_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_INVALID_APP_KEYS);
+ eksp->eks_blacklisted =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_BLACKLISTED_APP_KEYS);
+ eksp->eks_unverifiable =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_UNVERIFIABLE_APP_KEYS);
+ eksp->eks_wrong_node =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_WRONG_NODE_APP_KEYS);
+ eksp->eks_licensed_apps_lo = 0;
+ eksp->eks_licensed_apps_hi = 0;
+ eksp->eks_licensed_features_lo = 0;
+ eksp->eks_licensed_features_hi = 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+/* V3 Licensing - used starting from Medford family. See SF-114884-SW */
+
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_update_licenses(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) ||
+ (enp->en_family == EFX_FAMILY_MEDFORD2));
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING_V3;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_report_license(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_V3_IN_LEN,
+ MC_CMD_LICENSING_V3_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) ||
+ (enp->en_family == EFX_FAMILY_MEDFORD2));
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING_V3;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LICENSING_V3_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_V3_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_VALID_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_INVALID_KEYS);
+ eksp->eks_blacklisted = 0;
+ eksp->eks_unverifiable =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_UNVERIFIABLE_KEYS);
+ eksp->eks_wrong_node =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_WRONG_NODE_KEYS);
+ eksp->eks_licensed_apps_lo =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_LO);
+ eksp->eks_licensed_apps_hi =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_HI);
+ eksp->eks_licensed_features_lo =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_LO);
+ eksp->eks_licensed_features_hi =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_HI);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN)];
+ uint32_t app_state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) ||
+ (enp->en_family == EFX_FAMILY_MEDFORD2));
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO,
+ app_id & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI,
+ app_id >> 32);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_V3_APP_STATE_OUT_STATE));
+ if (app_state != MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED) {
+ *licensedp = B_TRUE;
+ } else {
+ *licensedp = B_FALSE;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_bcount_part_opt(buffer_size, *lengthp)
+ uint8_t *bufferp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_GET_ID_V3_IN_LEN,
+ MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN)];
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_LICENSING_GET_ID_V3;
+
+ if (bufferp == NULL) {
+ /* Request id type and length only */
+ req.emr_in_buf = bufferp;
+ req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
+ req.emr_out_buf = bufferp;
+ req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
+ (void) memset(payload, 0, sizeof (payload));
+ } else {
+ /* Request full buffer */
+ req.emr_in_buf = bufferp;
+ req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
+ req.emr_out_buf = bufferp;
+ req.emr_out_length =
+ MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
+ (void) memset(bufferp, 0, req.emr_out_length);
+ }
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *typep = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_TYPE);
+ *lengthp =
+ MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH);
+
+ if (bufferp == NULL) {
+ /*
+ * Modify length requirements to indicate to caller the extra
+ * buffering needed to read the complete output.
+ */
+ *lengthp += MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
+ } else {
+ /* Shift ID down to start of buffer */
+ memmove(bufferp,
+ bufferp + MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST,
+ *lengthp);
+ memset(bufferp + (*lengthp), 0,
+ MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* V3 format uses Huntington TLV format partition. See SF-108797-SW */
+#define EFX_LICENSE_V3_KEY_LENGTH_MIN (64)
+#define EFX_LICENSE_V3_KEY_LENGTH_MAX (160)
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return (ef10_nvram_buffer_find_item_start(bufferp, buffer_size,
+ startp));
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return (ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp));
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_item(bufferp, buffer_size,
+ offset, startp, lengthp);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length)
+{
+ /* Check key is a valid V3 key */
+ uint8_t key_type;
+ uint8_t key_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (length < EFX_LICENSE_V3_KEY_LENGTH_MIN) {
+ goto fail1;
+ }
+
+ if (length > EFX_LICENSE_V3_KEY_LENGTH_MAX) {
+ goto fail2;
+ }
+
+ key_type = ((uint8_t *)keyp)[0];
+ key_length = ((uint8_t *)keyp)[1];
+
+ if (key_type < 3) {
+ goto fail3;
+ }
+ if (key_length > length) {
+ goto fail4;
+ }
+ return (B_TRUE);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, boolean_t, B_FALSE);
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_get_item(bufferp, buffer_size,
+ offset, length, keyp, key_max_size, lengthp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= EFX_LICENSE_V3_KEY_LENGTH_MAX);
+
+ return ef10_nvram_buffer_insert_item(bufferp, buffer_size,
+ offset, keyp, length, lengthp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap)
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((rc = ef10_nvram_buffer_delete_item(bufferp,
+ buffer_size, offset, length, end)) != 0) {
+ goto fail1;
+ }
+
+ *deltap = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ efx_rc_t rc;
+
+ /* Construct empty partition */
+ if ((rc = ef10_nvram_buffer_create(enp,
+ NVRAM_PARTITION_TYPE_LICENSE,
+ bufferp, buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_buffer_finish(bufferp,
+ buffer_size)) != 0) {
+ goto fail1;
+ }
+
+ /* Validate completed partition */
+ if ((rc = ef10_nvram_buffer_validate(enp, NVRAM_PARTITION_TYPE_LICENSE,
+ bufferp, buffer_size)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+ __checkReturn efx_rc_t
+efx_lic_init(
+ __in efx_nic_t *enp)
+{
+ const efx_lic_ops_t *elop;
+ efx_key_stats_t eks;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_LIC));
+
+ switch (enp->en_family) {
+
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ elop = &__efx_lic_v1_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ elop = &__efx_lic_v2_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ elop = &__efx_lic_v3_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ elop = &__efx_lic_v3_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ enp->en_elop = elop;
+ enp->en_mod_flags |= EFX_MOD_LIC;
+
+ /* Probe for support */
+ if (efx_lic_get_key_stats(enp, &eks) == 0) {
+ enp->en_licensing_supported = B_TRUE;
+ } else {
+ enp->en_licensing_supported = B_FALSE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+extern __checkReturn boolean_t
+efx_lic_check_support(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ return (enp->en_licensing_supported);
+}
+
+ void
+efx_lic_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ enp->en_elop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_LIC;
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_update_licenses(
+ __in efx_nic_t *enp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_update_licenses(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_get_key_stats(enp, eksp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if (elop->elo_app_state == NULL)
+ return (ENOTSUP);
+
+ if ((rc = elop->elo_app_state(enp, app_id, licensedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_opt uint8_t *bufferp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if (elop->elo_get_id == NULL)
+ return (ENOTSUP);
+
+ if ((rc = elop->elo_get_id(enp, buffer_size, typep,
+ lengthp, bufferp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Buffer management API - abstracts varying TLV format used for License
+ * partition.
+ */
+
+ __checkReturn efx_rc_t
+efx_lic_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_find_start(enp, bufferp, buffer_size, startp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ EFSYS_ASSERT(bufferp);
+ EFSYS_ASSERT(startp);
+ EFSYS_ASSERT(lengthp);
+
+ return (elop->elo_find_key(enp, bufferp, buffer_size, offset,
+ startp, lengthp));
+}
+
+
+/*
+ * Validate that the buffer contains a single key in a recognised format.
+ * An empty or terminator buffer is not accepted as a valid key.
+ */
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ boolean_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_validate_key(enp, keyp, length)) == B_FALSE)
+ goto fail1;
+
+ return (B_TRUE);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_read_key(enp, bufferp, buffer_size, offset,
+ length, keyp, key_max_size, lengthp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_write_key(enp, bufferp, buffer_size, offset,
+ keyp, length, lengthp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_delete_key(enp, bufferp, buffer_size, offset,
+ length, end, deltap)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_create_partition(enp, bufferp, buffer_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_finish_partition(enp, bufferp, buffer_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LICENSING */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_mac.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_mac.c
new file mode 100644
index 00000000..57436b95
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_mac.c
@@ -0,0 +1,950 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_mac_multicast_list_set(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_mac_ops_t __efx_mac_siena_ops = {
+ siena_mac_poll, /* emo_poll */
+ siena_mac_up, /* emo_up */
+ siena_mac_reconfigure, /* emo_addr_set */
+ siena_mac_reconfigure, /* emo_pdu_set */
+ siena_mac_pdu_get, /* emo_pdu_get */
+ siena_mac_reconfigure, /* emo_reconfigure */
+ siena_mac_multicast_list_set, /* emo_multicast_list_set */
+ NULL, /* emo_filter_set_default_rxq */
+ NULL, /* emo_filter_default_rxq_clear */
+#if EFSYS_OPT_LOOPBACK
+ siena_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ siena_mac_stats_get_mask, /* emo_stats_get_mask */
+ efx_mcdi_mac_stats_clear, /* emo_stats_clear */
+ efx_mcdi_mac_stats_upload, /* emo_stats_upload */
+ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */
+ siena_mac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+static const efx_mac_ops_t __efx_mac_ef10_ops = {
+ ef10_mac_poll, /* emo_poll */
+ ef10_mac_up, /* emo_up */
+ ef10_mac_addr_set, /* emo_addr_set */
+ ef10_mac_pdu_set, /* emo_pdu_set */
+ ef10_mac_pdu_get, /* emo_pdu_get */
+ ef10_mac_reconfigure, /* emo_reconfigure */
+ ef10_mac_multicast_list_set, /* emo_multicast_list_set */
+ ef10_mac_filter_default_rxq_set, /* emo_filter_default_rxq_set */
+ ef10_mac_filter_default_rxq_clear,
+ /* emo_filter_default_rxq_clear */
+#if EFSYS_OPT_LOOPBACK
+ ef10_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ ef10_mac_stats_get_mask, /* emo_stats_get_mask */
+ efx_mcdi_mac_stats_clear, /* emo_stats_clear */
+ efx_mcdi_mac_stats_upload, /* emo_stats_upload */
+ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */
+ ef10_mac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+ __checkReturn efx_rc_t
+efx_mac_pdu_set(
+ __in efx_nic_t *enp,
+ __in size_t pdu)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint32_t old_pdu;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if (pdu < EFX_MAC_PDU_MIN) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (pdu > EFX_MAC_PDU_MAX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ old_pdu = epp->ep_mac_pdu;
+ epp->ep_mac_pdu = (uint32_t)pdu;
+ if ((rc = emop->emo_pdu_set(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ epp->ep_mac_pdu = old_pdu;
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ if ((rc = emop->emo_pdu_get(enp, pdu)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_addr_set(
+ __in efx_nic_t *enp,
+ __in uint8_t *addr)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint8_t old_addr[6];
+ uint32_t oui;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (EFX_MAC_ADDR_IS_MULTICAST(addr)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ oui = addr[0] << 16 | addr[1] << 8 | addr[2];
+ if (oui == 0x000000) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ EFX_MAC_ADDR_COPY(old_addr, epp->ep_mac_addr);
+ EFX_MAC_ADDR_COPY(epp->ep_mac_addr, addr);
+ if ((rc = emop->emo_addr_set(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFX_MAC_ADDR_COPY(epp->ep_mac_addr, old_addr);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_filter_set(
+ __in efx_nic_t *enp,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ boolean_t old_all_unicst;
+ boolean_t old_mulcst;
+ boolean_t old_all_mulcst;
+ boolean_t old_brdcst;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ old_all_unicst = epp->ep_all_unicst;
+ old_mulcst = epp->ep_mulcst;
+ old_all_mulcst = epp->ep_all_mulcst;
+ old_brdcst = epp->ep_brdcst;
+
+ epp->ep_all_unicst = all_unicst;
+ epp->ep_mulcst = mulcst;
+ epp->ep_all_mulcst = all_mulcst;
+ epp->ep_brdcst = brdcst;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_all_unicst = old_all_unicst;
+ epp->ep_mulcst = old_mulcst;
+ epp->ep_all_mulcst = old_all_mulcst;
+ epp->ep_brdcst = old_brdcst;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_drain(
+ __in efx_nic_t *enp,
+ __in boolean_t enabled)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if (epp->ep_mac_drain == enabled)
+ return (0);
+
+ epp->ep_mac_drain = enabled;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((rc = emop->emo_up(enp, mac_upp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_fcntl_set(
+ __in efx_nic_t *enp,
+ __in unsigned int fcntl,
+ __in boolean_t autoneg)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ unsigned int old_fcntl;
+ boolean_t old_autoneg;
+ unsigned int old_adv_cap;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((fcntl & ~(EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE)) != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Ignore a request to set flow control auto-negotiation
+ * if the PHY doesn't support it.
+ */
+ if (~epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
+ autoneg = B_FALSE;
+
+ old_fcntl = epp->ep_fcntl;
+ old_autoneg = epp->ep_fcntl_autoneg;
+ old_adv_cap = epp->ep_adv_cap_mask;
+
+ epp->ep_fcntl = fcntl;
+ epp->ep_fcntl_autoneg = autoneg;
+
+ /*
+ * Always encode the flow control settings in the advertised
+ * capabilities even if we are not trying to auto-negotiate
+ * them and reconfigure both the PHY and the MAC.
+ */
+ if (fcntl & EFX_FCNTL_RESPOND)
+ epp->ep_adv_cap_mask |= (1 << EFX_PHY_CAP_PAUSE |
+ 1 << EFX_PHY_CAP_ASYM);
+ else
+ epp->ep_adv_cap_mask &= ~(1 << EFX_PHY_CAP_PAUSE |
+ 1 << EFX_PHY_CAP_ASYM);
+
+ if (fcntl & EFX_FCNTL_GENERATE)
+ epp->ep_adv_cap_mask ^= (1 << EFX_PHY_CAP_ASYM);
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_fcntl = old_fcntl;
+ epp->ep_fcntl_autoneg = old_autoneg;
+ epp->ep_adv_cap_mask = old_adv_cap;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_mac_fcntl_get(
+ __in efx_nic_t *enp,
+ __out unsigned int *fcntl_wantedp,
+ __out unsigned int *fcntl_linkp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int wanted = 0;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ /*
+ * Decode the requested flow control settings from the PHY
+ * advertised capabilities.
+ */
+ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_PAUSE))
+ wanted = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_ASYM))
+ wanted ^= EFX_FCNTL_GENERATE;
+
+ *fcntl_linkp = epp->ep_fcntl;
+ *fcntl_wantedp = wanted;
+}
+
+ __checkReturn efx_rc_t
+efx_mac_multicast_list_set(
+ __in efx_nic_t *enp,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint8_t *old_mulcst_addr_list = NULL;
+ uint32_t old_mulcst_addr_count;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (count > EFX_MAC_MULTICAST_LIST_MAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ old_mulcst_addr_count = epp->ep_mulcst_addr_count;
+ if (old_mulcst_addr_count > 0) {
+ /* Allocate memory to store old list (instead of using stack) */
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ if (old_mulcst_addr_list == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ /* Save the old list in case we need to rollback */
+ memcpy(old_mulcst_addr_list, epp->ep_mulcst_addr_list,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN);
+ }
+
+ /* Store the new list */
+ memcpy(epp->ep_mulcst_addr_list, addrs,
+ count * EFX_MAC_ADDR_LEN);
+ epp->ep_mulcst_addr_count = count;
+
+ if ((rc = emop->emo_multicast_list_set(enp)) != 0)
+ goto fail3;
+
+ if (old_mulcst_addr_count > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ /* Restore original list on failure */
+ epp->ep_mulcst_addr_count = old_mulcst_addr_count;
+ if (old_mulcst_addr_count > 0) {
+ memcpy(epp->ep_mulcst_addr_list, old_mulcst_addr_list,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN);
+
+ EFSYS_KMEM_FREE(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ }
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+ __checkReturn efx_rc_t
+efx_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (emop->emo_filter_default_rxq_set != NULL) {
+ rc = emop->emo_filter_default_rxq_set(enp, erp, using_rss);
+ if (rc != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (emop->emo_filter_default_rxq_clear != NULL)
+ emop->emo_filter_default_rxq_clear(enp);
+}
+
+
+#if EFSYS_OPT_MAC_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED EfxMacStatNamesBlock 1a45a82fcfb30c1b */
+static const char * const __efx_mac_stat_name[] = {
+ "rx_octets",
+ "rx_pkts",
+ "rx_unicst_pkts",
+ "rx_multicst_pkts",
+ "rx_brdcst_pkts",
+ "rx_pause_pkts",
+ "rx_le_64_pkts",
+ "rx_65_to_127_pkts",
+ "rx_128_to_255_pkts",
+ "rx_256_to_511_pkts",
+ "rx_512_to_1023_pkts",
+ "rx_1024_to_15xx_pkts",
+ "rx_ge_15xx_pkts",
+ "rx_errors",
+ "rx_fcs_errors",
+ "rx_drop_events",
+ "rx_false_carrier_errors",
+ "rx_symbol_errors",
+ "rx_align_errors",
+ "rx_internal_errors",
+ "rx_jabber_pkts",
+ "rx_lane0_char_err",
+ "rx_lane1_char_err",
+ "rx_lane2_char_err",
+ "rx_lane3_char_err",
+ "rx_lane0_disp_err",
+ "rx_lane1_disp_err",
+ "rx_lane2_disp_err",
+ "rx_lane3_disp_err",
+ "rx_match_fault",
+ "rx_nodesc_drop_cnt",
+ "tx_octets",
+ "tx_pkts",
+ "tx_unicst_pkts",
+ "tx_multicst_pkts",
+ "tx_brdcst_pkts",
+ "tx_pause_pkts",
+ "tx_le_64_pkts",
+ "tx_65_to_127_pkts",
+ "tx_128_to_255_pkts",
+ "tx_256_to_511_pkts",
+ "tx_512_to_1023_pkts",
+ "tx_1024_to_15xx_pkts",
+ "tx_ge_15xx_pkts",
+ "tx_errors",
+ "tx_sgl_col_pkts",
+ "tx_mult_col_pkts",
+ "tx_ex_col_pkts",
+ "tx_late_col_pkts",
+ "tx_def_pkts",
+ "tx_ex_def_pkts",
+ "pm_trunc_bb_overflow",
+ "pm_discard_bb_overflow",
+ "pm_trunc_vfifo_full",
+ "pm_discard_vfifo_full",
+ "pm_trunc_qbb",
+ "pm_discard_qbb",
+ "pm_discard_mapping",
+ "rxdp_q_disabled_pkts",
+ "rxdp_di_dropped_pkts",
+ "rxdp_streaming_pkts",
+ "rxdp_hlb_fetch",
+ "rxdp_hlb_wait",
+ "vadapter_rx_unicast_packets",
+ "vadapter_rx_unicast_bytes",
+ "vadapter_rx_multicast_packets",
+ "vadapter_rx_multicast_bytes",
+ "vadapter_rx_broadcast_packets",
+ "vadapter_rx_broadcast_bytes",
+ "vadapter_rx_bad_packets",
+ "vadapter_rx_bad_bytes",
+ "vadapter_rx_overflow",
+ "vadapter_tx_unicast_packets",
+ "vadapter_tx_unicast_bytes",
+ "vadapter_tx_multicast_packets",
+ "vadapter_tx_multicast_bytes",
+ "vadapter_tx_broadcast_packets",
+ "vadapter_tx_broadcast_bytes",
+ "vadapter_tx_bad_packets",
+ "vadapter_tx_bad_bytes",
+ "vadapter_tx_overflow",
+ "fec_uncorrected_errors",
+ "fec_corrected_errors",
+ "fec_corrected_symbols_lane0",
+ "fec_corrected_symbols_lane1",
+ "fec_corrected_symbols_lane2",
+ "fec_corrected_symbols_lane3",
+ "ctpio_vi_busy_fallback",
+ "ctpio_long_write_success",
+ "ctpio_missing_dbell_fail",
+ "ctpio_overflow_fail",
+ "ctpio_underflow_fail",
+ "ctpio_timeout_fail",
+ "ctpio_noncontig_wr_fail",
+ "ctpio_frm_clobber_fail",
+ "ctpio_invalid_wr_fail",
+ "ctpio_vi_clobber_fallback",
+ "ctpio_unqualified_fallback",
+ "ctpio_runt_fallback",
+ "ctpio_success",
+ "ctpio_fallback",
+ "ctpio_poison",
+ "ctpio_erase",
+ "rxdp_scatter_disabled_trunc",
+ "rxdp_hlb_idle",
+ "rxdp_hlb_timeout",
+};
+/* END MKCONFIG GENERATED EfxMacStatNamesBlock */
+
+ __checkReturn const char *
+efx_mac_stat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MAC_NSTATS);
+ return (__efx_mac_stat_name[id]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+static efx_rc_t
+efx_mac_stats_mask_add_range(
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size,
+ __in const struct efx_mac_stats_range *rngp)
+{
+ unsigned int mask_npages = mask_size / sizeof (*maskp);
+ unsigned int el;
+ unsigned int el_min;
+ unsigned int el_max;
+ unsigned int low;
+ unsigned int high;
+ unsigned int width;
+ efx_rc_t rc;
+
+ if ((mask_npages * EFX_MAC_STATS_MASK_BITS_PER_PAGE) <=
+ (unsigned int)rngp->last) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(rngp->first, <=, rngp->last);
+ EFSYS_ASSERT3U(rngp->last, <, EFX_MAC_NSTATS);
+
+ for (el = 0; el < mask_npages; ++el) {
+ el_min = el * EFX_MAC_STATS_MASK_BITS_PER_PAGE;
+ el_max =
+ el_min + (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1);
+ if ((unsigned int)rngp->first > el_max ||
+ (unsigned int)rngp->last < el_min)
+ continue;
+ low = MAX((unsigned int)rngp->first, el_min);
+ high = MIN((unsigned int)rngp->last, el_max);
+ width = high - low + 1;
+ maskp[el] |=
+ (width == EFX_MAC_STATS_MASK_BITS_PER_PAGE) ?
+ (~0ULL) : (((1ULL << width) - 1) << (low - el_min));
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_mac_stats_mask_add_ranges(
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size,
+ __in_ecount(rng_count) const struct efx_mac_stats_range *rngp,
+ __in unsigned int rng_count)
+{
+ unsigned int i;
+ efx_rc_t rc;
+
+ for (i = 0; i < rng_count; ++i) {
+ if ((rc = efx_mac_stats_mask_add_range(maskp, mask_size,
+ &rngp[i])) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __out_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(maskp != NULL);
+ EFSYS_ASSERT(mask_size % sizeof (maskp[0]) == 0);
+
+ (void) memset(maskp, 0, mask_size);
+
+ if ((rc = emop->emo_stats_get_mask(enp, maskp, mask_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if ((rc = emop->emo_stats_clear(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if ((rc = emop->emo_stats_upload(enp, esmp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(emop != NULL);
+
+ if (emop->emo_stats_periodic == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = emop->emo_stats_periodic(enp, esmp, period_ms, events)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *essp,
+ __inout_opt uint32_t *generationp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ rc = emop->emo_stats_update(enp, esmp, essp, generationp);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+ __checkReturn efx_rc_t
+efx_mac_select(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_type_t type = EFX_MAC_INVALID;
+ const efx_mac_ops_t *emop;
+ int rc = EINVAL;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ emop = &__efx_mac_siena_ops;
+ type = EFX_MAC_SIENA;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ emop = &__efx_mac_ef10_ops;
+ type = EFX_MAC_HUNTINGTON;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ emop = &__efx_mac_ef10_ops;
+ type = EFX_MAC_MEDFORD;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ emop = &__efx_mac_ef10_ops;
+ type = EFX_MAC_MEDFORD2;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT(type != EFX_MAC_INVALID);
+ EFSYS_ASSERT3U(type, <, EFX_MAC_NTYPES);
+ EFSYS_ASSERT(emop != NULL);
+
+ epp->ep_emop = emop;
+ epp->ep_mac_type = type;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_SIENA
+
+#define EFX_MAC_HASH_BITS (1 << 8)
+
+/* Compute the multicast hash as used on Falcon and Siena. */
+static void
+siena_mac_multicast_hash_compute(
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count,
+ __out efx_oword_t *hash_low,
+ __out efx_oword_t *hash_high)
+{
+ uint32_t crc, index;
+ int i;
+
+ EFSYS_ASSERT(hash_low != NULL);
+ EFSYS_ASSERT(hash_high != NULL);
+
+ EFX_ZERO_OWORD(*hash_low);
+ EFX_ZERO_OWORD(*hash_high);
+
+ for (i = 0; i < count; i++) {
+ /* Calculate hash bucket (IEEE 802.3 CRC32 of the MAC addr) */
+ crc = efx_crc32_calculate(0xffffffff, addrs, EFX_MAC_ADDR_LEN);
+ index = crc % EFX_MAC_HASH_BITS;
+ if (index < 128) {
+ EFX_SET_OWORD_BIT(*hash_low, index);
+ } else {
+ EFX_SET_OWORD_BIT(*hash_high, index - 128);
+ }
+
+ addrs += EFX_MAC_ADDR_LEN;
+ }
+}
+
+static __checkReturn efx_rc_t
+siena_mac_multicast_list_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_oword_t old_hash[2];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash));
+
+ siena_mac_multicast_hash_compute(
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count,
+ &epp->ep_multicst_hash[0],
+ &epp->ep_multicst_hash[1]);
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ memcpy(epp->ep_multicst_hash, old_hash, sizeof (old_hash));
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.c
new file mode 100644
index 00000000..d4ebcf26
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.c
@@ -0,0 +1,2367 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2008-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MCDI
+
+/*
+ * There are three versions of the MCDI interface:
+ * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers.
+ * - MCDIv1: Siena firmware and Huntington BootROM.
+ * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM.
+ * Transport uses MCDIv2 headers.
+ *
+ * MCDIv2 Header NOT_EPOCH flag
+ * ----------------------------
+ * A new epoch begins at initial startup or after an MC reboot, and defines when
+ * the MC should reject stale MCDI requests.
+ *
+ * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all
+ * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1.
+ *
+ * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a
+ * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0.
+ */
+
+
+
+#if EFSYS_OPT_SIENA
+
+static const efx_mcdi_ops_t __efx_mcdi_siena_ops = {
+ siena_mcdi_init, /* emco_init */
+ siena_mcdi_send_request, /* emco_send_request */
+ siena_mcdi_poll_reboot, /* emco_poll_reboot */
+ siena_mcdi_poll_response, /* emco_poll_response */
+ siena_mcdi_read_response, /* emco_read_response */
+ siena_mcdi_fini, /* emco_fini */
+ siena_mcdi_feature_supported, /* emco_feature_supported */
+ siena_mcdi_get_timeout, /* emco_get_timeout */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
+ ef10_mcdi_init, /* emco_init */
+ ef10_mcdi_send_request, /* emco_send_request */
+ ef10_mcdi_poll_reboot, /* emco_poll_reboot */
+ ef10_mcdi_poll_response, /* emco_poll_response */
+ ef10_mcdi_read_response, /* emco_read_response */
+ ef10_mcdi_fini, /* emco_fini */
+ ef10_mcdi_feature_supported, /* emco_feature_supported */
+ ef10_mcdi_get_timeout, /* emco_get_timeout */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+
+
+ __checkReturn efx_rc_t
+efx_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *emtp)
+{
+ const efx_mcdi_ops_t *emcop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ emcop = &__efx_mcdi_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (enp->en_features & EFX_FEATURE_MCDI_DMA) {
+ /* MCDI requires a DMA buffer in host memory */
+ if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ }
+ enp->en_mcdi.em_emtp = emtp;
+
+ if (emcop != NULL && emcop->emco_init != NULL) {
+ if ((rc = emcop->emco_init(enp, emtp)) != 0)
+ goto fail3;
+ }
+
+ enp->en_mcdi.em_emcop = emcop;
+ enp->en_mod_flags |= EFX_MOD_MCDI;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_mcdi.em_emcop = NULL;
+ enp->en_mcdi.em_emtp = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_MCDI;
+
+ return (rc);
+}
+
+ void
+efx_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI);
+
+ if (emcop != NULL && emcop->emco_fini != NULL)
+ emcop->emco_fini(enp);
+
+ emip->emi_port = 0;
+ emip->emi_aborted = 0;
+
+ enp->en_mcdi.em_emcop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_MCDI;
+}
+
+ void
+efx_mcdi_new_epoch(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efsys_lock_state_t state;
+
+ /* Start a new epoch (allow fresh MCDI requests to succeed) */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emip->emi_new_epoch = B_TRUE;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+static void
+efx_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in void *hdrp,
+ __in size_t hdr_len,
+ __in void *sdup,
+ __in size_t sdu_len)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len);
+}
+
+static efx_rc_t
+efx_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ rc = emcop->emco_poll_reboot(enp);
+ return (rc);
+}
+
+static boolean_t
+efx_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ boolean_t available;
+
+ available = emcop->emco_poll_response(enp);
+ return (available);
+}
+
+static void
+efx_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_read_response(enp, bufferp, offset, length);
+}
+
+ void
+efx_mcdi_request_start(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __in boolean_t ev_cpl)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr[2];
+ size_t hdr_len;
+ unsigned int max_version;
+ unsigned int seq;
+ unsigned int xflags;
+ boolean_t new_epoch;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * efx_mcdi_request_start() is naturally serialised against both
+ * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(),
+ * by virtue of there only being one outstanding MCDI request.
+ * Unfortunately, upper layers may also call efx_mcdi_request_abort()
+ * at any time, to timeout a pending mcdi request, That request may
+ * then subsequently complete, meaning efx_mcdi_ev_cpl() or
+ * efx_mcdi_ev_death() may end up running in parallel with
+ * efx_mcdi_request_start(). This race is handled by ensuring that
+ * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the
+ * en_eslp lock.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ EFSYS_ASSERT(emip->emi_pending_req == NULL);
+ emip->emi_pending_req = emrp;
+ emip->emi_ev_cpl = ev_cpl;
+ emip->emi_poll_cnt = 0;
+ seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ);
+ new_epoch = emip->emi_new_epoch;
+ max_version = emip->emi_max_version;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ xflags = 0;
+ if (ev_cpl)
+ xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+
+ /*
+ * Huntington firmware supports MCDIv2, but the Huntington BootROM only
+ * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where
+ * possible to support this.
+ */
+ if ((max_version >= 2) &&
+ ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) ||
+ (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1) ||
+ (emrp->emr_out_length > MCDI_CTL_SDU_LEN_MAX_V1))) {
+ /* Construct MCDI v2 header */
+ hdr_len = sizeof (hdr);
+ EFX_POPULATE_DWORD_8(hdr[0],
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, seq,
+ MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
+ MCDI_HEADER_ERROR, 0,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_XFLAGS, xflags);
+
+ EFX_POPULATE_DWORD_2(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length);
+ } else {
+ /* Construct MCDI v1 header */
+ hdr_len = sizeof (hdr[0]);
+ EFX_POPULATE_DWORD_8(hdr[0],
+ MCDI_HEADER_CODE, emrp->emr_cmd,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_DATALEN, emrp->emr_in_length,
+ MCDI_HEADER_SEQ, seq,
+ MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
+ MCDI_HEADER_ERROR, 0,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_XFLAGS, xflags);
+ }
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST,
+ &hdr[0], hdr_len,
+ emrp->emr_in_buf, emrp->emr_in_length);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+ efx_mcdi_send_request(enp, &hdr[0], hdr_len,
+ emrp->emr_in_buf, emrp->emr_in_length);
+}
+
+
+static void
+efx_mcdi_read_response_header(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr[2];
+ unsigned int hdr_len;
+ unsigned int data_len;
+ unsigned int seq;
+ unsigned int cmd;
+ unsigned int error;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(emrp != NULL);
+
+ efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0]));
+ hdr_len = sizeof (hdr[0]);
+
+ cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE);
+ seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ);
+ error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR);
+
+ if (cmd != MC_CMD_V2_EXTN) {
+ data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN);
+ } else {
+ efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
+ hdr_len += sizeof (hdr[1]);
+
+ cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD);
+ data_len =
+ EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ if (error && (data_len == 0)) {
+ /* The MC has rebooted since the request was sent. */
+ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
+ efx_mcdi_poll_reboot(enp);
+ rc = EIO;
+ goto fail1;
+ }
+ if ((cmd != emrp->emr_cmd) ||
+ (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
+ /* Response is for a different request */
+ rc = EIO;
+ goto fail2;
+ }
+ if (error) {
+ efx_dword_t err[2];
+ unsigned int err_len = MIN(data_len, sizeof (err));
+ int err_code = MC_CMD_ERR_EPROTO;
+ int err_arg = 0;
+
+ /* Read error code (and arg num for MCDI v2 commands) */
+ efx_mcdi_read_response(enp, &err, hdr_len, err_len);
+
+ if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t)))
+ err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0);
+#ifdef WITH_MCDI_V2
+ if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t)))
+ err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0);
+#endif
+ emrp->emr_err_code = err_code;
+ emrp->emr_err_arg = err_arg;
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ if ((err_code == MC_CMD_ERR_PROXY_PENDING) &&
+ (err_len == sizeof (err))) {
+ /*
+ * The MCDI request would normally fail with EPERM, but
+ * firmware has forwarded it to an authorization agent
+ * attached to a privileged PF.
+ *
+ * Save the authorization request handle. The client
+ * must wait for a PROXY_RESPONSE event, or timeout.
+ */
+ emrp->emr_proxy_handle = err_arg;
+ }
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context,
+ EFX_LOG_MCDI_RESPONSE,
+ &hdr[0], hdr_len,
+ &err[0], err_len);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+ if (!emrp->emr_quiet) {
+ EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd,
+ int, err_code, int, err_arg);
+ }
+
+ rc = efx_mcdi_request_errcode(err_code);
+ goto fail3;
+ }
+
+ emrp->emr_rc = 0;
+ emrp->emr_out_length_used = data_len;
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ emrp->emr_proxy_handle = 0;
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+ return;
+
+fail3:
+fail2:
+fail1:
+ emrp->emr_rc = rc;
+ emrp->emr_out_length_used = 0;
+}
+
+static void
+efx_mcdi_finish_response(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+ efx_dword_t hdr[2];
+ unsigned int hdr_len;
+ size_t bytes;
+
+ if (emrp->emr_out_buf == NULL)
+ return;
+
+ /* Read the command header to detect MCDI response format */
+ hdr_len = sizeof (hdr[0]);
+ efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len);
+ if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) {
+ /*
+ * Read the actual payload length. The length given in the event
+ * is only correct for responses with the V1 format.
+ */
+ efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
+ hdr_len += sizeof (hdr[1]);
+
+ emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1],
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ /* Copy payload out into caller supplied buffer */
+ bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length);
+ efx_mcdi_read_response(enp, emrp->emr_out_buf, hdr_len, bytes);
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context,
+ EFX_LOG_MCDI_RESPONSE,
+ &hdr[0], hdr_len,
+ emrp->emr_out_buf, bytes);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+}
+
+
+ __checkReturn boolean_t
+efx_mcdi_request_poll(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t *emrp;
+ efsys_lock_state_t state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /* Serialise against post-watchdog efx_mcdi_ev* */
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ EFSYS_ASSERT(emip->emi_pending_req != NULL);
+ EFSYS_ASSERT(!emip->emi_ev_cpl);
+ emrp = emip->emi_pending_req;
+
+ /* Check for reboot atomically w.r.t efx_mcdi_request_start */
+ if (emip->emi_poll_cnt++ == 0) {
+ if ((rc = efx_mcdi_poll_reboot(enp)) != 0) {
+ emip->emi_pending_req = NULL;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ /* Reboot/Assertion */
+ if (rc == EIO || rc == EINTR)
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
+ goto fail1;
+ }
+ }
+
+ /* Check if a response is available */
+ if (efx_mcdi_poll_response(enp) == B_FALSE) {
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (B_FALSE);
+ }
+
+ /* Read the response header */
+ efx_mcdi_read_response_header(enp, emrp);
+
+ /* Request complete */
+ emip->emi_pending_req = NULL;
+
+ /* Ensure stale MCDI requests fail after an MC reboot. */
+ emip->emi_new_epoch = B_FALSE;
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if ((rc = emrp->emr_rc) != 0)
+ goto fail2;
+
+ efx_mcdi_finish_response(enp, emrp);
+ return (B_TRUE);
+
+fail2:
+ if (!emrp->emr_quiet)
+ EFSYS_PROBE(fail2);
+fail1:
+ if (!emrp->emr_quiet)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (B_TRUE);
+}
+
+ __checkReturn boolean_t
+efx_mcdi_request_abort(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t *emrp;
+ boolean_t aborted;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * efx_mcdi_ev_* may have already completed this event, and be
+ * spinning/blocked on the upper layer lock. So it *is* legitimate
+ * to for emi_pending_req to be NULL. If there is a pending event
+ * completed request, then provide a "credit" to allow
+ * efx_mcdi_ev_cpl() to accept a single spurious completion.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emrp = emip->emi_pending_req;
+ aborted = (emrp != NULL);
+ if (aborted) {
+ emip->emi_pending_req = NULL;
+
+ /* Error the request */
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = ETIMEDOUT;
+
+ /* Provide a credit for seqno/emr_pending_req mismatches */
+ if (emip->emi_ev_cpl)
+ ++emip->emi_aborted;
+
+ /*
+ * The upper layer has called us, so we don't
+ * need to complete the request.
+ */
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (aborted);
+}
+
+ void
+efx_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_get_timeout(enp, emrp, timeoutp);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_request_errcode(
+ __in unsigned int err)
+{
+
+ switch (err) {
+ /* MCDI v1 */
+ case MC_CMD_ERR_EPERM:
+ return (EACCES);
+ case MC_CMD_ERR_ENOENT:
+ return (ENOENT);
+ case MC_CMD_ERR_EINTR:
+ return (EINTR);
+ case MC_CMD_ERR_EACCES:
+ return (EACCES);
+ case MC_CMD_ERR_EBUSY:
+ return (EBUSY);
+ case MC_CMD_ERR_EINVAL:
+ return (EINVAL);
+ case MC_CMD_ERR_EDEADLK:
+ return (EDEADLK);
+ case MC_CMD_ERR_ENOSYS:
+ return (ENOTSUP);
+ case MC_CMD_ERR_ETIME:
+ return (ETIMEDOUT);
+ case MC_CMD_ERR_ENOTSUP:
+ return (ENOTSUP);
+ case MC_CMD_ERR_EALREADY:
+ return (EALREADY);
+
+ /* MCDI v2 */
+ case MC_CMD_ERR_EEXIST:
+ return (EEXIST);
+#ifdef MC_CMD_ERR_EAGAIN
+ case MC_CMD_ERR_EAGAIN:
+ return (EAGAIN);
+#endif
+#ifdef MC_CMD_ERR_ENOSPC
+ case MC_CMD_ERR_ENOSPC:
+ return (ENOSPC);
+#endif
+ case MC_CMD_ERR_ERANGE:
+ return (ERANGE);
+
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return (ENOMEM);
+ case MC_CMD_ERR_NO_VADAPTOR:
+ return (ENOENT);
+ case MC_CMD_ERR_NO_EVB_PORT:
+ return (ENOENT);
+ case MC_CMD_ERR_NO_VSWITCH:
+ return (ENODEV);
+ case MC_CMD_ERR_VLAN_LIMIT:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_PCI_FUNC:
+ return (ENODEV);
+ case MC_CMD_ERR_BAD_VLAN_MODE:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_VSWITCH_TYPE:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_VPORT_TYPE:
+ return (EINVAL);
+ case MC_CMD_ERR_MAC_EXIST:
+ return (EEXIST);
+
+ case MC_CMD_ERR_PROXY_PENDING:
+ return (EAGAIN);
+
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, err);
+ return (EIO);
+ }
+}
+
+ void
+efx_mcdi_raise_exception(
+ __in efx_nic_t *enp,
+ __in_opt efx_mcdi_req_t *emrp,
+ __in int rc)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_exception_t exception;
+
+ /* Reboot or Assertion failure only */
+ EFSYS_ASSERT(rc == EIO || rc == EINTR);
+
+ /*
+ * If MC_CMD_REBOOT causes a reboot (dependent on parameters),
+ * then the EIO is not worthy of an exception.
+ */
+ if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO)
+ return;
+
+ exception = (rc == EIO)
+ ? EFX_MCDI_EXCEPTION_MC_REBOOT
+ : EFX_MCDI_EXCEPTION_MC_BADASSERT;
+
+ emtp->emt_exception(emtp->emt_context, exception);
+}
+
+ void
+efx_mcdi_execute(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ emrp->emr_quiet = B_FALSE;
+ emtp->emt_execute(emtp->emt_context, emrp);
+}
+
+ void
+efx_mcdi_execute_quiet(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ emrp->emr_quiet = B_TRUE;
+ emtp->emt_execute(emtp->emt_context, emrp);
+}
+
+ void
+efx_mcdi_ev_cpl(
+ __in efx_nic_t *enp,
+ __in unsigned int seq,
+ __in unsigned int outlen,
+ __in int errcode)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_req_t *emrp;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start()
+ * when we're completing an aborted request.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl ||
+ (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
+ EFSYS_ASSERT(emip->emi_aborted > 0);
+ if (emip->emi_aborted > 0)
+ --emip->emi_aborted;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return;
+ }
+
+ emrp = emip->emi_pending_req;
+ emip->emi_pending_req = NULL;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (emip->emi_max_version >= 2) {
+ /* MCDIv2 response details do not fit into an event. */
+ efx_mcdi_read_response_header(enp, emrp);
+ } else {
+ if (errcode != 0) {
+ if (!emrp->emr_quiet) {
+ EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd,
+ int, errcode);
+ }
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = efx_mcdi_request_errcode(errcode);
+ } else {
+ emrp->emr_out_length_used = outlen;
+ emrp->emr_rc = 0;
+ }
+ }
+ if (emrp->emr_rc == 0)
+ efx_mcdi_finish_response(enp, emrp);
+
+ emtp->emt_ev_cpl(emtp->emt_context);
+}
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_proxy_handle(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *handlep)
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+
+ /*
+ * Return proxy handle from MCDI request that returned with error
+ * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching
+ * PROXY_RESPONSE event.
+ */
+ if ((emrp == NULL) || (handlep == NULL)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if ((emrp->emr_rc != 0) &&
+ (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) {
+ *handlep = emrp->emr_proxy_handle;
+ rc = 0;
+ } else {
+ *handlep = 0;
+ rc = ENOENT;
+ }
+ return (rc);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_mcdi_ev_proxy_response(
+ __in efx_nic_t *enp,
+ __in unsigned int handle,
+ __in unsigned int status)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_rc_t rc;
+
+ /*
+ * Handle results of an authorization request for a privileged MCDI
+ * command. If authorization was granted then we must re-issue the
+ * original MCDI request. If authorization failed or timed out,
+ * then the original MCDI request should be completed with the
+ * result code from this event.
+ */
+ rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status);
+
+ emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc);
+}
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+ void
+efx_mcdi_ev_death(
+ __in efx_nic_t *enp,
+ __in int rc)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_req_t *emrp = NULL;
+ boolean_t ev_cpl;
+ efsys_lock_state_t state;
+
+ /*
+ * The MCDI request (if there is one) has been terminated, either
+ * by a BADASSERT or REBOOT event.
+ *
+ * If there is an outstanding event-completed MCDI operation, then we
+ * will never receive the completion event (because both MCDI
+ * completions and BADASSERT events are sent to the same evq). So
+ * complete this MCDI op.
+ *
+ * This function might run in parallel with efx_mcdi_request_poll()
+ * for poll completed mcdi requests, and also with
+ * efx_mcdi_request_start() for post-watchdog completions.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emrp = emip->emi_pending_req;
+ ev_cpl = emip->emi_ev_cpl;
+ if (emrp != NULL && emip->emi_ev_cpl) {
+ emip->emi_pending_req = NULL;
+
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = rc;
+ ++emip->emi_aborted;
+ }
+
+ /*
+ * Since we're running in parallel with a request, consume the
+ * status word before dropping the lock.
+ */
+ if (rc == EIO || rc == EINTR) {
+ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
+ (void) efx_mcdi_poll_reboot(enp);
+ emip->emi_new_epoch = B_TRUE;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
+ if (emrp != NULL && ev_cpl)
+ emtp->emt_ev_cpl(emtp->emt_context);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_version(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(4) uint16_t versionp[4],
+ __out_opt uint32_t *buildp,
+ __out_opt efx_mcdi_boot_t *statusp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_GET_VERSION_IN_LEN,
+ MC_CMD_GET_VERSION_OUT_LEN),
+ MAX(MC_CMD_GET_BOOT_STATUS_IN_LEN,
+ MC_CMD_GET_BOOT_STATUS_OUT_LEN))];
+ efx_word_t *ver_words;
+ uint16_t version[4];
+ uint32_t build;
+ efx_mcdi_boot_t status;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_VERSION;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* bootrom support */
+ if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) {
+ version[0] = version[1] = version[2] = version[3] = 0;
+ build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
+
+ goto version;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VERSION_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION);
+ version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0);
+ version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0);
+ version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0);
+ version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0);
+ build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
+
+version:
+ /* The bootrom doesn't understand BOOT_STATUS */
+ if (MC_FW_VERSION_IS_BOOTLOADER(build)) {
+ status = EFX_MCDI_BOOT_ROM;
+ goto out;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOOT_STATUS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot access BOOT_STATUS */
+ status = EFX_MCDI_BOOT_PRIMARY;
+ version[0] = version[1] = version[2] = version[3] = 0;
+ build = 0;
+ goto out;
+ }
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail4;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS,
+ GET_BOOT_STATUS_OUT_FLAGS_PRIMARY))
+ status = EFX_MCDI_BOOT_PRIMARY;
+ else
+ status = EFX_MCDI_BOOT_SECONDARY;
+
+out:
+ if (versionp != NULL)
+ memcpy(versionp, version, sizeof (version));
+ if (buildp != NULL)
+ *buildp = build;
+ if (statusp != NULL)
+ *statusp = status;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_capabilities(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *flagsp,
+ __out_opt uint16_t *rx_dpcpu_fw_idp,
+ __out_opt uint16_t *tx_dpcpu_fw_idp,
+ __out_opt uint32_t *flags2p,
+ __out_opt uint32_t *tso2ncp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];
+ boolean_t v2_capable;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CAPABILITIES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (flagsp != NULL)
+ *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);
+
+ if (rx_dpcpu_fw_idp != NULL)
+ *rx_dpcpu_fw_idp = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
+
+ if (tx_dpcpu_fw_idp != NULL)
+ *tx_dpcpu_fw_idp = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+ v2_capable = B_FALSE;
+ else
+ v2_capable = B_TRUE;
+
+ if (flags2p != NULL) {
+ *flags2p = (v2_capable) ?
+ MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2) :
+ 0;
+ }
+
+ if (tso2ncp != NULL) {
+ *tso2ncp = (v2_capable) ?
+ MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS) :
+ 0;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_do_reboot(
+ __in efx_nic_t *enp,
+ __in boolean_t after_assertion)
+{
+ uint8_t payload[MAX(MC_CMD_REBOOT_IN_LEN, MC_CMD_REBOOT_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ /*
+ * We could require the caller to have caused en_mod_flags=0 to
+ * call this function. This doesn't help the other port though,
+ * who's about to get the MC ripped out from underneath them.
+ * Since they have to cope with the subsequent fallout of MCDI
+ * failures, we should as well.
+ */
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_REBOOT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_REBOOT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS,
+ (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0));
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot reboot the MC. */
+ goto out;
+ }
+
+ /* A successful reboot request returns EIO. */
+ if (req.emr_rc != 0 && req.emr_rc != EIO) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+out:
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_reboot(
+ __in efx_nic_t *enp)
+{
+ return (efx_mcdi_do_reboot(enp, B_FALSE));
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_exit_assertion_handler(
+ __in efx_nic_t *enp)
+{
+ return (efx_mcdi_do_reboot(enp, B_TRUE));
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_read_assertion(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_ASSERTS_IN_LEN,
+ MC_CMD_GET_ASSERTS_OUT_LEN)];
+ const char *reason;
+ unsigned int flags;
+ unsigned int index;
+ unsigned int ofst;
+ int retry;
+ efx_rc_t rc;
+
+ /*
+ * Before we attempt to chat to the MC, we should verify that the MC
+ * isn't in it's assertion handler, either due to a previous reboot,
+ * or because we're reinitializing due to an eec_exception().
+ *
+ * Use GET_ASSERTS to read any assertion state that may be present.
+ * Retry this command twice. Once because a boot-time assertion failure
+ * might cause the 1st MCDI request to fail. And once again because
+ * we might race with efx_mcdi_exit_assertion_handler() running on
+ * partner port(s) on the same NIC.
+ */
+ retry = 2;
+ do {
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_ASSERTS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1);
+ efx_mcdi_execute_quiet(enp, &req);
+
+ } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0);
+
+ if (req.emr_rc != 0) {
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot clear assertions. */
+ goto out;
+ }
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ /* Print out any assertion state recorded */
+ flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
+ return (0);
+
+ reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
+ ? "system-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
+ ? "thread-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
+ ? "watchdog reset"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP)
+ ? "illegal address trap"
+ : "unknown assertion";
+ EFSYS_PROBE3(mcpu_assertion,
+ const char *, reason, unsigned int,
+ MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ unsigned int,
+ MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS));
+
+ /* Print out the registers (r1 ... r31) */
+ ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
+ for (index = 1;
+ index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
+ index++) {
+ EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int,
+ EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst),
+ EFX_DWORD_0));
+ ofst += sizeof (efx_dword_t);
+ }
+ EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN);
+
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Internal routines for for specific MCDI requests.
+ */
+
+ __checkReturn efx_rc_t
+efx_mcdi_drv_attach(
+ __in efx_nic_t *enp,
+ __in boolean_t attach)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_DRV_ATTACH_IN_LEN,
+ MC_CMD_DRV_ATTACH_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_DRV_ATTACH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN;
+
+ /*
+ * Typically, client drivers use DONT_CARE for the datapath firmware
+ * type to ensure that the driver can attach to an unprivileged
+ * function. The datapath firmware type to use is controlled by the
+ * 'sfboot' utility.
+ * If a client driver wishes to attach with a specific datapath firmware
+ * type, that can be passed in second argument of efx_nic_probe API. One
+ * such example is the ESXi native driver that attempts attaching with
+ * FULL_FEATURED datapath firmware type first and fall backs to
+ * DONT_CARE datapath firmware type if MC_CMD_DRV_ATTACH fails.
+ */
+ MCDI_IN_POPULATE_DWORD_2(req, DRV_ATTACH_IN_NEW_STATE,
+ DRV_ATTACH_IN_ATTACH, attach ? 1 : 0,
+ DRV_ATTACH_IN_SUBVARIANT_AWARE, EFSYS_OPT_FW_SUBVARIANT_AWARE);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, enp->efv);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_board_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *board_typep,
+ __out_opt efx_dword_t *capabilitiesp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMIN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOARD_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ if (emip->emi_port == 1) {
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0);
+ } else if (emip->emi_port == 2) {
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1);
+ } else {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ if (capabilitiesp != NULL) {
+ if (emip->emi_port == 1) {
+ *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
+ } else if (emip->emi_port == 2) {
+ *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
+ } else {
+ rc = EINVAL;
+ goto fail4;
+ }
+ }
+
+ if (board_typep != NULL) {
+ *board_typep = MCDI_OUT_DWORD(req,
+ GET_BOARD_CFG_OUT_BOARD_TYPE);
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_resource_limits(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *nevqp,
+ __out_opt uint32_t *nrxqp,
+ __out_opt uint32_t *ntxqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_RESOURCE_LIMITS_IN_LEN,
+ MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (nevqp != NULL)
+ *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ);
+ if (nrxqp != NULL)
+ *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ);
+ if (ntxqp != NULL)
+ *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_phy_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN,
+ MC_CMD_GET_PHY_CFG_OUT_LEN)];
+#if EFSYS_OPT_NAMES
+ const char *namep;
+ size_t namelen;
+#endif
+ uint32_t phy_media_type;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE);
+#if EFSYS_OPT_NAMES
+ namep = MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME);
+ namelen = MIN(sizeof (encp->enc_phy_name) - 1,
+ strnlen(namep, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
+ (void) memset(encp->enc_phy_name, 0,
+ sizeof (encp->enc_phy_name));
+ memcpy(encp->enc_phy_name, namep, namelen);
+#endif /* EFSYS_OPT_NAMES */
+ (void) memset(encp->enc_phy_revision, 0,
+ sizeof (encp->enc_phy_revision));
+ memcpy(encp->enc_phy_revision,
+ MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION),
+ MIN(sizeof (encp->enc_phy_revision) - 1,
+ MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN));
+#if EFSYS_OPT_PHY_LED_CONTROL
+ encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) |
+ (1 << EFX_PHY_LED_OFF) |
+ (1 << EFX_PHY_LED_ON));
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ /* Get the media type of the fixed port, if recognised. */
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS);
+ phy_media_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ epp->ep_fixed_port_type = (efx_phy_media_type_t)phy_media_type;
+ if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES)
+ epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID;
+
+ epp->ep_phy_cap_mask =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+#if EFSYS_OPT_PHY_FLAGS
+ encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT);
+
+ /* Populate internal state */
+ encp->enc_mcdi_mdio_channel =
+ (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL);
+
+#if EFSYS_OPT_PHY_STATS
+ encp->enc_mcdi_phy_stat_mask =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK);
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+ encp->enc_bist_mask = 0;
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST_CABLE_SHORT))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT);
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST_CABLE_LONG))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG);
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL);
+#endif /* EFSYS_OPT_BIST */
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_firmware_update_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported updates */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_macaddr_change_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported MAC changes */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_link_control_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported link control */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_spoofing_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported MAC spoofing */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_BIST
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+/*
+ * Enter bist offline mode. This is a fw mode which puts the NIC into a state
+ * where memory BIST tests can be run and not much else can interfere or happen.
+ * A reboot is required to exit this mode.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0);
+ EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+ __checkReturn efx_rc_t
+efx_mcdi_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_START_BIST_IN_LEN,
+ MC_CMD_START_BIST_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_START_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_START_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_START_BIST_OUT_LEN;
+
+ switch (type) {
+ case EFX_BIST_TYPE_PHY_NORMAL:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST);
+ break;
+ case EFX_BIST_TYPE_PHY_CABLE_SHORT:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PHY_BIST_CABLE_SHORT);
+ break;
+ case EFX_BIST_TYPE_PHY_CABLE_LONG:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PHY_BIST_CABLE_LONG);
+ break;
+ case EFX_BIST_TYPE_MC_MEM:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_MC_MEM_BIST);
+ break;
+ case EFX_BIST_TYPE_SAT_MEM:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PORT_MEM_BIST);
+ break;
+ case EFX_BIST_TYPE_REG:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_REG_BIST);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+
+/* Enable logging of some events (e.g. link state changes) */
+ __checkReturn efx_rc_t
+efx_mcdi_log_ctrl(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LOG_CTRL_IN_LEN,
+ MC_CMD_LOG_CTRL_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LOG_CTRL;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST,
+ MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ);
+ MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_MAC_STATS
+
+typedef enum efx_stats_action_e {
+ EFX_STATS_CLEAR,
+ EFX_STATS_UPLOAD,
+ EFX_STATS_ENABLE_NOEVENTS,
+ EFX_STATS_ENABLE_EVENTS,
+ EFX_STATS_DISABLE,
+} efx_stats_action_t;
+
+static __checkReturn efx_rc_t
+efx_mcdi_mac_stats(
+ __in efx_nic_t *enp,
+ __in_opt efsys_mem_t *esmp,
+ __in efx_stats_action_t action,
+ __in uint16_t period_ms)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN,
+ MC_CMD_MAC_STATS_V2_OUT_DMA_LEN)];
+ int clear = (action == EFX_STATS_CLEAR);
+ int upload = (action == EFX_STATS_UPLOAD);
+ int enable = (action == EFX_STATS_ENABLE_NOEVENTS);
+ int events = (action == EFX_STATS_ENABLE_EVENTS);
+ int disable = (action == EFX_STATS_DISABLE);
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_MAC_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_MAC_STATS_V2_OUT_DMA_LEN;
+
+ MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, upload,
+ MAC_STATS_IN_CLEAR, clear,
+ MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable,
+ MAC_STATS_IN_PERIODIC_ENABLE, enable | events,
+ MAC_STATS_IN_PERIODIC_NOEVENT, !events,
+ MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0);
+
+ if (enable || events || upload) {
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ uint32_t bytes;
+
+ /* Periodic stats or stats upload require a DMA buffer */
+ if (esmp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) {
+ /* MAC stats count too small for legacy MAC stats */
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ bytes = encp->enc_mac_stats_nstats * sizeof (efx_qword_t);
+
+ if (EFSYS_MEM_SIZE(esmp) < bytes) {
+ /* DMA buffer too small */
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
+ }
+
+ /*
+ * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats,
+ * as this may fail (and leave periodic DMA enabled) if the
+ * vadapter has already been deleted.
+ */
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID,
+ (disable ? EVB_PORT_ID_NULL : enp->en_vport_id));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ /* EF10: Expect ENOENT if no DMA queues are initialised */
+ if ((req.emr_rc != ENOENT) ||
+ (enp->en_rx_qcount + enp->en_tx_qcount != 0)) {
+ rc = req.emr_rc;
+ goto fail4;
+ }
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_clear(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp)
+{
+ efx_rc_t rc;
+
+ /*
+ * The MC DMAs aggregate statistics for our convenience, so we can
+ * avoid having to pull the statistics buffer into the cache to
+ * maintain cumulative statistics.
+ */
+ if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events)
+{
+ efx_rc_t rc;
+
+ /*
+ * The MC DMAs aggregate statistics for our convenience, so we can
+ * avoid having to pull the statistics buffer into the cache to
+ * maintain cumulative statistics.
+ * Huntington uses a fixed 1sec period.
+ * Medford uses a fixed 1sec period before v6.2.1.1033 firmware.
+ */
+ if (period_ms == 0)
+ rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE, 0);
+ else if (events)
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS,
+ period_ms);
+ else
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS,
+ period_ms);
+
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+/*
+ * This function returns the pf and vf number of a function. If it is a pf the
+ * vf number is 0xffff. The vf number is the index of the vf on that
+ * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0),
+ * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff).
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_get_function_info(
+ __in efx_nic_t *enp,
+ __out uint32_t *pfp,
+ __out_opt uint32_t *vfp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_FUNCTION_INFO_IN_LEN,
+ MC_CMD_GET_FUNCTION_INFO_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_FUNCTION_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF);
+ if (vfp != NULL)
+ *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_privilege_mask(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __in uint32_t vf,
+ __out uint32_t *maskp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_PRIVILEGE_MASK_IN_LEN,
+ MC_CMD_PRIVILEGE_MASK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_PRIVILEGE_MASK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION,
+ PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
+ PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+ __checkReturn efx_rc_t
+efx_mcdi_set_workaround(
+ __in efx_nic_t *enp,
+ __in uint32_t type,
+ __in boolean_t enabled,
+ __out_opt uint32_t *flagsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_WORKAROUND_IN_LEN,
+ MC_CMD_WORKAROUND_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_WORKAROUND;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type);
+ MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (flagsp != NULL) {
+ if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+ *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS);
+ else
+ *flagsp = 0;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_workarounds(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *implementedp,
+ __out_opt uint32_t *enabledp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_GET_WORKAROUNDS_OUT_LEN];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_WORKAROUNDS;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (implementedp != NULL) {
+ *implementedp =
+ MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED);
+ }
+
+ if (enabledp != NULL) {
+ *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED);
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Size of media information page in accordance with SFF-8472 and SFF-8436.
+ * It is used in MCDI interface as well.
+ */
+#define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_phy_media_info(
+ __in efx_nic_t *enp,
+ __in uint32_t mcdi_page,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE))];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length =
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used !=
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) !=
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ memcpy(data,
+ MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
+ len);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * 2-wire device address of the base information in accordance with SFF-8472
+ * Diagnostic Monitoring Interface for Optical Transceivers section
+ * 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE 0xA0
+
+/*
+ * 2-wire device address of the digital diagnostics monitoring interface
+ * in accordance with SFF-8472 Diagnostic Monitoring Interface for Optical
+ * Transceivers section 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM 0xA2
+
+/*
+ * Hard wired 2-wire device address for QSFP+ in accordance with SFF-8436
+ * QSFP+ 10 Gbs 4X PLUGGABLE TRANSCEIVER section 7.4 Device Addressing and
+ * Operation.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP 0xA0
+
+ __checkReturn efx_rc_t
+efx_mcdi_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_rc_t rc;
+ uint32_t mcdi_lower_page;
+ uint32_t mcdi_upper_page;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ /*
+ * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages.
+ * Offset plus length interface allows to access page 0 only.
+ * I.e. non-zero upper pages are not accessible.
+ * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6
+ * QSFP+ Memory Map for details on how information is structured
+ * and accessible.
+ */
+ switch (epp->ep_fixed_port_type) {
+ case EFX_PHY_MEDIA_SFP_PLUS:
+ /*
+ * In accordance with SFF-8472 Diagnostic Monitoring
+ * Interface for Optical Transceivers section 4 Memory
+ * Organization two 2-wire addresses are defined.
+ */
+ switch (dev_addr) {
+ /* Base information */
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE:
+ /*
+ * MCDI page 0 should be used to access lower
+ * page 0 (0x00 - 0x7f) at the device address 0xA0.
+ */
+ mcdi_lower_page = 0;
+ /*
+ * MCDI page 1 should be used to access upper
+ * page 0 (0x80 - 0xff) at the device address 0xA0.
+ */
+ mcdi_upper_page = 1;
+ break;
+ /* Diagnostics */
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM:
+ /*
+ * MCDI page 2 should be used to access lower
+ * page 0 (0x00 - 0x7f) at the device address 0xA2.
+ */
+ mcdi_lower_page = 2;
+ /*
+ * MCDI page 3 should be used to access upper
+ * page 0 (0x80 - 0xff) at the device address 0xA2.
+ */
+ mcdi_upper_page = 3;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ case EFX_PHY_MEDIA_QSFP_PLUS:
+ switch (dev_addr) {
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP:
+ /*
+ * MCDI page -1 should be used to access lower page 0
+ * (0x00 - 0x7f).
+ */
+ mcdi_lower_page = (uint32_t)-1;
+ /*
+ * MCDI page 0 should be used to access upper page 0
+ * (0x80h - 0xff).
+ */
+ mcdi_upper_page = 0;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
+ uint8_t read_len =
+ MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset);
+
+ rc = efx_mcdi_get_phy_media_info(enp,
+ mcdi_lower_page, offset, read_len, data);
+ if (rc != 0)
+ goto fail2;
+
+ data += read_len;
+ len -= read_len;
+
+ offset = 0;
+ } else {
+ offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE;
+ }
+
+ if (len > 0) {
+ EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+ EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ rc = efx_mcdi_get_phy_media_info(enp,
+ mcdi_upper_page, offset, len, data);
+ if (rc != 0)
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MCDI */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.h
new file mode 100644
index 00000000..253a9e60
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.h
@@ -0,0 +1,395 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_EFX_MCDI_H
+#define _SYS_EFX_MCDI_H
+
+#include "efx.h"
+#include "efx_regs_mcdi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * A reboot/assertion causes the MCDI status word to be set after the
+ * command word is set or a REBOOT event is sent. If we notice a reboot
+ * via these mechanisms then wait 10ms for the status word to be set.
+ */
+#define EFX_MCDI_STATUS_SLEEP_US 10000
+
+struct efx_mcdi_req_s {
+ boolean_t emr_quiet;
+ /* Inputs: Command #, input buffer and length */
+ unsigned int emr_cmd;
+ uint8_t *emr_in_buf;
+ size_t emr_in_length;
+ /* Outputs: retcode, buffer, length, and length used */
+ efx_rc_t emr_rc;
+ uint8_t *emr_out_buf;
+ size_t emr_out_length;
+ size_t emr_out_length_used;
+ /* Internals: low level transport details */
+ unsigned int emr_err_code;
+ unsigned int emr_err_arg;
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ uint32_t emr_proxy_handle;
+#endif
+};
+
+typedef struct efx_mcdi_iface_s {
+ unsigned int emi_port;
+ unsigned int emi_max_version;
+ unsigned int emi_seq;
+ efx_mcdi_req_t *emi_pending_req;
+ boolean_t emi_ev_cpl;
+ boolean_t emi_new_epoch;
+ int emi_aborted;
+ uint32_t emi_poll_cnt;
+ uint32_t emi_mc_reboot_status;
+} efx_mcdi_iface_t;
+
+extern void
+efx_mcdi_execute(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp);
+
+extern void
+efx_mcdi_execute_quiet(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp);
+
+extern void
+efx_mcdi_ev_cpl(
+ __in efx_nic_t *enp,
+ __in unsigned int seq,
+ __in unsigned int outlen,
+ __in int errcode);
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+extern __checkReturn efx_rc_t
+efx_mcdi_get_proxy_handle(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *handlep);
+
+extern void
+efx_mcdi_ev_proxy_response(
+ __in efx_nic_t *enp,
+ __in unsigned int handle,
+ __in unsigned int status);
+#endif
+
+extern void
+efx_mcdi_ev_death(
+ __in efx_nic_t *enp,
+ __in int rc);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_request_errcode(
+ __in unsigned int err);
+
+extern void
+efx_mcdi_raise_exception(
+ __in efx_nic_t *enp,
+ __in_opt efx_mcdi_req_t *emrp,
+ __in int rc);
+
+typedef enum efx_mcdi_boot_e {
+ EFX_MCDI_BOOT_PRIMARY,
+ EFX_MCDI_BOOT_SECONDARY,
+ EFX_MCDI_BOOT_ROM,
+} efx_mcdi_boot_t;
+
+extern __checkReturn efx_rc_t
+efx_mcdi_version(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(4) uint16_t versionp[4],
+ __out_opt uint32_t *buildp,
+ __out_opt efx_mcdi_boot_t *statusp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_capabilities(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *flagsp,
+ __out_opt uint16_t *rx_dpcpu_fw_idp,
+ __out_opt uint16_t *tx_dpcpu_fw_idp,
+ __out_opt uint32_t *flags2p,
+ __out_opt uint32_t *tso2ncp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_read_assertion(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_exit_assertion_handler(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_drv_attach(
+ __in efx_nic_t *enp,
+ __in boolean_t attach);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_board_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *board_typep,
+ __out_opt efx_dword_t *capabilitiesp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_phy_cfg(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_firmware_update_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_macaddr_change_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_link_control_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_spoofing_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+
+#if EFSYS_OPT_BIST
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+extern __checkReturn efx_rc_t
+efx_mcdi_bist_enable_offline(
+ __in efx_nic_t *enp);
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+extern __checkReturn efx_rc_t
+efx_mcdi_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+#endif /* EFSYS_OPT_BIST */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_resource_limits(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *nevqp,
+ __out_opt uint32_t *nrxqp,
+ __out_opt uint32_t *ntxqp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_log_ctrl(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events);
+
+
+#if EFSYS_OPT_LOOPBACK
+extern __checkReturn efx_rc_t
+efx_mcdi_get_loopback_modes(
+ __in efx_nic_t *enp);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
+
+#define MCDI_IN(_emr, _type, _ofst) \
+ ((_type *)((_emr).emr_in_buf + (_ofst)))
+
+#define MCDI_IN2(_emr, _type, _ofst) \
+ MCDI_IN(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_IN_SET_BYTE(_emr, _ofst, _value) \
+ EFX_POPULATE_BYTE_1(*MCDI_IN2(_emr, efx_byte_t, _ofst), \
+ EFX_BYTE_0, _value)
+
+#define MCDI_IN_SET_WORD(_emr, _ofst, _value) \
+ EFX_POPULATE_WORD_1(*MCDI_IN2(_emr, efx_word_t, _ofst), \
+ EFX_WORD_0, _value)
+
+#define MCDI_IN_SET_DWORD(_emr, _ofst, _value) \
+ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ EFX_DWORD_0, _value)
+
+#define MCDI_IN_SET_DWORD_FIELD(_emr, _ofst, _field, _value) \
+ EFX_SET_DWORD_FIELD(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field, _value)
+
+#define MCDI_IN_POPULATE_DWORD_1(_emr, _ofst, _field1, _value1) \
+ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1)
+
+#define MCDI_IN_POPULATE_DWORD_2(_emr, _ofst, _field1, _value1, \
+ _field2, _value2) \
+ EFX_POPULATE_DWORD_2(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2)
+
+#define MCDI_IN_POPULATE_DWORD_3(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_DWORD_3(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3)
+
+#define MCDI_IN_POPULATE_DWORD_4(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4) \
+ EFX_POPULATE_DWORD_4(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4)
+
+#define MCDI_IN_POPULATE_DWORD_5(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5) \
+ EFX_POPULATE_DWORD_5(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5)
+
+#define MCDI_IN_POPULATE_DWORD_6(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_DWORD_6(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6)
+
+#define MCDI_IN_POPULATE_DWORD_7(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7) \
+ EFX_POPULATE_DWORD_7(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7)
+
+#define MCDI_IN_POPULATE_DWORD_8(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8) \
+ EFX_POPULATE_DWORD_8(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8)
+
+#define MCDI_IN_POPULATE_DWORD_9(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_DWORD_9(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8, \
+ MC_CMD_ ## _field9, _value9)
+
+#define MCDI_IN_POPULATE_DWORD_10(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8, _field9, _value9, _field10, _value10) \
+ EFX_POPULATE_DWORD_10(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8, \
+ MC_CMD_ ## _field9, _value9, \
+ MC_CMD_ ## _field10, _value10)
+
+#define MCDI_OUT(_emr, _type, _ofst) \
+ ((_type *)((_emr).emr_out_buf + (_ofst)))
+
+#define MCDI_OUT2(_emr, _type, _ofst) \
+ MCDI_OUT(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_OUT_BYTE(_emr, _ofst) \
+ EFX_BYTE_FIELD(*MCDI_OUT2(_emr, efx_byte_t, _ofst), \
+ EFX_BYTE_0)
+
+#define MCDI_OUT_WORD(_emr, _ofst) \
+ EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \
+ EFX_WORD_0)
+
+#define MCDI_OUT_WORD_FIELD(_emr, _ofst, _field) \
+ EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \
+ MC_CMD_ ## _field)
+
+#define MCDI_OUT_DWORD(_emr, _ofst) \
+ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
+ EFX_DWORD_0)
+
+#define MCDI_OUT_DWORD_FIELD(_emr, _ofst, _field) \
+ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field)
+
+#define MCDI_EV_FIELD(_eqp, _field) \
+ EFX_QWORD_FIELD(*_eqp, MCDI_EVENT_ ## _field)
+
+#define MCDI_CMD_DWORD_FIELD(_edp, _field) \
+ EFX_DWORD_FIELD(*_edp, MC_CMD_ ## _field)
+
+#define EFX_MCDI_HAVE_PRIVILEGE(mask, priv) \
+ (((mask) & (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) == \
+ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv))
+
+typedef enum efx_mcdi_feature_id_e {
+ EFX_MCDI_FEATURE_FW_UPDATE = 0,
+ EFX_MCDI_FEATURE_LINK_CONTROL,
+ EFX_MCDI_FEATURE_MACADDR_CHANGE,
+ EFX_MCDI_FEATURE_MAC_SPOOFING,
+ EFX_MCDI_FEATURE_NIDS
+} efx_mcdi_feature_id_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_MCDI_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_mon.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_mon.c
new file mode 100644
index 00000000..9fc268ec
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_mon.c
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_NAMES
+
+static const char * const __efx_mon_name[] = {
+ "",
+ "sfx90x0",
+ "sfx91x0",
+ "sfx92x0"
+};
+
+ const char *
+efx_mon_name(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
+ EFSYS_ASSERT3U(encp->enc_mon_type, <, EFX_MON_NTYPES);
+ return (__efx_mon_name[encp->enc_mon_type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+#if EFSYS_OPT_MON_MCDI
+static const efx_mon_ops_t __efx_mon_mcdi_ops = {
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MON_STATS */
+};
+#endif
+
+
+ __checkReturn efx_rc_t
+efx_mon_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mon_t *emp = &(enp->en_mon);
+ const efx_mon_ops_t *emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enp->en_mod_flags & EFX_MOD_MON) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_MON;
+
+ emp->em_type = encp->enc_mon_type;
+
+ EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
+ switch (emp->em_type) {
+#if EFSYS_OPT_MON_MCDI
+ case EFX_MON_SFC90X0:
+ case EFX_MON_SFC91X0:
+ case EFX_MON_SFC92X0:
+ emop = &__efx_mon_mcdi_ops;
+ break;
+#endif
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ emp->em_emop = emop;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ emp->em_type = EFX_MON_INVALID;
+
+ enp->en_mod_flags &= ~EFX_MOD_MON;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_MON_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED MonitorStatNamesBlock 8150a068198c0f96 */
+static const char * const __mon_stat_name[] = {
+ "value_2_5v",
+ "value_vccp1",
+ "value_vcc",
+ "value_5v",
+ "value_12v",
+ "value_vccp2",
+ "value_ext_temp",
+ "value_int_temp",
+ "value_ain1",
+ "value_ain2",
+ "controller_cooling",
+ "ext_cooling",
+ "1v",
+ "1_2v",
+ "1_8v",
+ "3_3v",
+ "1_2va",
+ "vref",
+ "vaoe",
+ "aoe_temperature",
+ "psu_aoe_temperature",
+ "psu_temperature",
+ "fan0",
+ "fan1",
+ "fan2",
+ "fan3",
+ "fan4",
+ "vaoe_in",
+ "iaoe",
+ "iaoe_in",
+ "nic_power",
+ "0_9v",
+ "i0_9v",
+ "i1_2v",
+ "0_9v_adc",
+ "controller_temperature2",
+ "vreg_temperature",
+ "vreg_0_9v_temperature",
+ "vreg_1_2v_temperature",
+ "int_vptat",
+ "controller_internal_adc_temperature",
+ "ext_vptat",
+ "controller_external_adc_temperature",
+ "ambient_temperature",
+ "airflow",
+ "vdd08d_vss08d_csr",
+ "vdd08d_vss08d_csr_extadc",
+ "hotpoint_temperature",
+ "phy_power_switch_port0",
+ "phy_power_switch_port1",
+ "mum_vcc",
+ "0v9_a",
+ "i0v9_a",
+ "0v9_a_temp",
+ "0v9_b",
+ "i0v9_b",
+ "0v9_b_temp",
+ "ccom_avreg_1v2_supply",
+ "ccom_avreg_1v2_supply_ext_adc",
+ "ccom_avreg_1v8_supply",
+ "ccom_avreg_1v8_supply_ext_adc",
+ "controller_master_vptat",
+ "controller_master_internal_temp",
+ "controller_master_vptat_ext_adc",
+ "controller_master_internal_temp_ext_adc",
+ "controller_slave_vptat",
+ "controller_slave_internal_temp",
+ "controller_slave_vptat_ext_adc",
+ "controller_slave_internal_temp_ext_adc",
+ "sodimm_vout",
+ "sodimm_0_temp",
+ "sodimm_1_temp",
+ "phy0_vcc",
+ "phy1_vcc",
+ "controller_tdiode_temp",
+ "board_front_temp",
+ "board_back_temp",
+ "i1v8",
+ "i2v5",
+ "i3v3",
+ "i12v0",
+ "1v3",
+ "i1v3",
+};
+
+/* END MKCONFIG GENERATED MonitorStatNamesBlock */
+
+extern const char *
+efx_mon_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MON_NSTATS);
+ return (__mon_stat_name[id]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn efx_rc_t
+efx_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+ const efx_mon_ops_t *emop = emp->em_emop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ return (emop->emo_stats_update(enp, esmp, values));
+}
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+ void
+efx_mon_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ emp->em_emop = NULL;
+
+ emp->em_type = EFX_MON_INVALID;
+
+ enp->en_mod_flags &= ~EFX_MOD_MON;
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_nic.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_nic.c
new file mode 100644
index 00000000..6c162e03
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_nic.c
@@ -0,0 +1,1072 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+ __checkReturn efx_rc_t
+efx_family(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __out efx_family_t *efp,
+ __out unsigned int *membarp)
+{
+ if (venid == EFX_PCI_VENID_SFC) {
+ switch (devid) {
+#if EFSYS_OPT_SIENA
+ case EFX_PCI_DEVID_SIENA_F1_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Siena.
+ * manftest must be able to cope with this device id.
+ */
+ case EFX_PCI_DEVID_BETHPAGE:
+ case EFX_PCI_DEVID_SIENA:
+ *efp = EFX_FAMILY_SIENA;
+ *membarp = EFX_MEM_BAR_SIENA;
+ return (0);
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Huntington.
+ * manftest must be able to cope with this device id.
+ */
+ case EFX_PCI_DEVID_FARMINGDALE:
+ case EFX_PCI_DEVID_GREENPORT:
+ *efp = EFX_FAMILY_HUNTINGTON;
+ *membarp = EFX_MEM_BAR_HUNTINGTON_PF;
+ return (0);
+
+ case EFX_PCI_DEVID_FARMINGDALE_VF:
+ case EFX_PCI_DEVID_GREENPORT_VF:
+ *efp = EFX_FAMILY_HUNTINGTON;
+ *membarp = EFX_MEM_BAR_HUNTINGTON_VF;
+ return (0);
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_PCI_DEVID_MEDFORD_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Medford.
+ * manftest must be able to cope with this device id.
+ */
+ case EFX_PCI_DEVID_MEDFORD:
+ *efp = EFX_FAMILY_MEDFORD;
+ *membarp = EFX_MEM_BAR_MEDFORD_PF;
+ return (0);
+
+ case EFX_PCI_DEVID_MEDFORD_VF:
+ *efp = EFX_FAMILY_MEDFORD;
+ *membarp = EFX_MEM_BAR_MEDFORD_VF;
+ return (0);
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_PCI_DEVID_MEDFORD2_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Medford2.
+ * manftest must be able to cope with this device id.
+ */
+ case EFX_PCI_DEVID_MEDFORD2:
+ case EFX_PCI_DEVID_MEDFORD2_VF:
+ *efp = EFX_FAMILY_MEDFORD2;
+ *membarp = EFX_MEM_BAR_MEDFORD2;
+ return (0);
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */
+ default:
+ break;
+ }
+ }
+
+ *efp = EFX_FAMILY_INVALID;
+ return (ENOTSUP);
+}
+
+
+#if EFSYS_OPT_SIENA
+
+static const efx_nic_ops_t __efx_nic_siena_ops = {
+ siena_nic_probe, /* eno_probe */
+ NULL, /* eno_board_cfg */
+ NULL, /* eno_set_drv_limits */
+ siena_nic_reset, /* eno_reset */
+ siena_nic_init, /* eno_init */
+ NULL, /* eno_get_vi_pool */
+ NULL, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ siena_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ siena_nic_fini, /* eno_fini */
+ siena_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static const efx_nic_ops_t __efx_nic_hunt_ops = {
+ ef10_nic_probe, /* eno_probe */
+ hunt_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+
+static const efx_nic_ops_t __efx_nic_medford_ops = {
+ ef10_nic_probe, /* eno_probe */
+ medford_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+
+static const efx_nic_ops_t __efx_nic_medford2_ops = {
+ ef10_nic_probe, /* eno_probe */
+ medford2_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+
+ __checkReturn efx_rc_t
+efx_nic_create(
+ __in efx_family_t family,
+ __in efsys_identifier_t *esip,
+ __in efsys_bar_t *esbp,
+ __in efsys_lock_t *eslp,
+ __deref_out efx_nic_t **enpp)
+{
+ efx_nic_t *enp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(family, >, EFX_FAMILY_INVALID);
+ EFSYS_ASSERT3U(family, <, EFX_FAMILY_NTYPES);
+
+ /* Allocate a NIC object */
+ EFSYS_KMEM_ALLOC(esip, sizeof (efx_nic_t), enp);
+
+ if (enp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_magic = EFX_NIC_MAGIC;
+
+ switch (family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ enp->en_enop = &__efx_nic_siena_ops;
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LFSR_HASH_INSERT |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_LOOKAHEAD_SPLIT |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_TX_SRC_FILTERS;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ enp->en_enop = &__efx_nic_hunt_ops;
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2 |
+ EFX_FEATURE_PACKED_STREAM;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ enp->en_enop = &__efx_nic_medford_ops;
+ /*
+ * FW_ASSISTED_TSO omitted as Medford only supports firmware
+ * assisted TSO version 2, not the v1 scheme used on Huntington.
+ */
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2 |
+ EFX_FEATURE_PACKED_STREAM;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ enp->en_enop = &__efx_nic_medford2_ops;
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2 |
+ EFX_FEATURE_PACKED_STREAM;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ enp->en_family = family;
+ enp->en_esip = esip;
+ enp->en_esbp = esbp;
+ enp->en_eslp = eslp;
+
+ *enpp = enp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ enp->en_magic = 0;
+
+ /* Free the NIC object */
+ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_probe(
+ __in efx_nic_t *enp,
+ __in efx_fw_variant_t efv)
+{
+ const efx_nic_ops_t *enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+#if EFSYS_OPT_MCDI
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+#endif /* EFSYS_OPT_MCDI */
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE));
+
+ /* Ensure FW variant codes match with MC_CMD_FW codes */
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_FULL_FEATURED ==
+ MC_CMD_FW_FULL_FEATURED);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_LOW_LATENCY ==
+ MC_CMD_FW_LOW_LATENCY);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM ==
+ MC_CMD_FW_PACKED_STREAM);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_HIGH_TX_RATE ==
+ MC_CMD_FW_HIGH_TX_RATE);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1 ==
+ MC_CMD_FW_PACKED_STREAM_HASH_MODE_1);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_RULES_ENGINE ==
+ MC_CMD_FW_RULES_ENGINE);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_DPDK ==
+ MC_CMD_FW_DPDK);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_DONT_CARE ==
+ (int)MC_CMD_FW_DONT_CARE);
+
+ enop = enp->en_enop;
+ enp->efv = efv;
+
+ if ((rc = enop->eno_probe(enp)) != 0)
+ goto fail1;
+
+ if ((rc = efx_phy_probe(enp)) != 0)
+ goto fail2;
+
+ enp->en_mod_flags |= EFX_MOD_PROBE;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ enop->eno_unprobe(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enop->eno_set_drv_limits != NULL) {
+ if ((rc = enop->eno_set_drv_limits(enp, edlp)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enop->eno_get_bar_region == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if ((rc = (enop->eno_get_bar_region)(enp,
+ region, offsetp, sizep)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *evq_countp,
+ __out uint32_t *rxq_countp,
+ __out uint32_t *txq_countp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enop->eno_get_vi_pool != NULL) {
+ uint32_t vi_count = 0;
+
+ if ((rc = (enop->eno_get_vi_pool)(enp, &vi_count)) != 0)
+ goto fail1;
+
+ *evq_countp = vi_count;
+ *rxq_countp = vi_count;
+ *txq_countp = vi_count;
+ } else {
+ /* Use NIC limits as default value */
+ *evq_countp = encp->enc_evq_limit;
+ *rxq_countp = encp->enc_rxq_limit;
+ *txq_countp = encp->enc_txq_limit;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_init(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enp->en_mod_flags & EFX_MOD_NIC) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = enop->eno_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_mod_flags |= EFX_MOD_NIC;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_nic_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_NIC);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+
+ enop->eno_fini(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_NIC;
+}
+
+ void
+efx_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+#if EFSYS_OPT_MCDI
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+#endif /* EFSYS_OPT_MCDI */
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+
+ efx_phy_unprobe(enp);
+
+ enop->eno_unprobe(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_PROBE;
+}
+
+ void
+efx_nic_destroy(
+ __in efx_nic_t *enp)
+{
+ efsys_identifier_t *esip = enp->en_esip;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
+
+ enp->en_family = EFX_FAMILY_INVALID;
+ enp->en_esip = NULL;
+ enp->en_esbp = NULL;
+ enp->en_eslp = NULL;
+
+ enp->en_enop = NULL;
+
+ enp->en_magic = 0;
+
+ /* Free the NIC object */
+ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_reset(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ unsigned int mod_flags;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
+ /*
+ * All modules except the MCDI, PROBE, NVRAM, VPD, MON
+ * (which we do not reset here) must have been shut down or never
+ * initialized.
+ *
+ * A rule of thumb here is: If the controller or MC reboots, is *any*
+ * state lost. If it's lost and needs reapplying, then the module
+ * *must* not be initialised during the reset.
+ */
+ mod_flags = enp->en_mod_flags;
+ mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM |
+ EFX_MOD_VPD | EFX_MOD_MON);
+ EFSYS_ASSERT3U(mod_flags, ==, 0);
+ if (mod_flags != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = enop->eno_reset(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ const efx_nic_cfg_t *
+efx_nic_cfg_get(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ return (&(enp->en_nic_cfg));
+}
+
+ __checkReturn efx_rc_t
+efx_nic_get_fw_version(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_info_t *enfip)
+{
+ uint16_t mc_fw_version[4];
+ efx_rc_t rc;
+
+ if (enfip == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /* Ensure RXDP_FW_ID codes match with MC_CMD_GET_CAPABILITIES codes */
+ EFX_STATIC_ASSERT(EFX_RXDP_FULL_FEATURED_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP);
+ EFX_STATIC_ASSERT(EFX_RXDP_LOW_LATENCY_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY);
+ EFX_STATIC_ASSERT(EFX_RXDP_PACKED_STREAM_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM);
+ EFX_STATIC_ASSERT(EFX_RXDP_RULES_ENGINE_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE);
+ EFX_STATIC_ASSERT(EFX_RXDP_DPDK_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK);
+
+ rc = efx_mcdi_version(enp, mc_fw_version, NULL, NULL);
+ if (rc != 0)
+ goto fail2;
+
+ rc = efx_mcdi_get_capabilities(enp, NULL,
+ &enfip->enfi_rx_dpcpu_fw_id,
+ &enfip->enfi_tx_dpcpu_fw_id,
+ NULL, NULL);
+ if (rc == 0) {
+ enfip->enfi_dpcpu_fw_ids_valid = B_TRUE;
+ } else if (rc == ENOTSUP) {
+ enfip->enfi_dpcpu_fw_ids_valid = B_FALSE;
+ enfip->enfi_rx_dpcpu_fw_id = 0;
+ enfip->enfi_tx_dpcpu_fw_id = 0;
+ } else {
+ goto fail3;
+ }
+
+ memcpy(enfip->enfi_mc_fw_version, mc_fw_version,
+ sizeof (mc_fw_version));
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+
+ if ((rc = enop->eno_register_test(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_LOOPBACK
+
+extern void
+efx_loopback_mask(
+ __in efx_loopback_kind_t loopback_kind,
+ __out efx_qword_t *maskp)
+{
+ efx_qword_t mask;
+
+ EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS);
+ EFSYS_ASSERT(maskp != NULL);
+
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree */
+#define LOOPBACK_CHECK(_mcdi, _efx) \
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_##_mcdi == EFX_LOOPBACK_##_efx)
+
+ LOOPBACK_CHECK(NONE, OFF);
+ LOOPBACK_CHECK(DATA, DATA);
+ LOOPBACK_CHECK(GMAC, GMAC);
+ LOOPBACK_CHECK(XGMII, XGMII);
+ LOOPBACK_CHECK(XGXS, XGXS);
+ LOOPBACK_CHECK(XAUI, XAUI);
+ LOOPBACK_CHECK(GMII, GMII);
+ LOOPBACK_CHECK(SGMII, SGMII);
+ LOOPBACK_CHECK(XGBR, XGBR);
+ LOOPBACK_CHECK(XFI, XFI);
+ LOOPBACK_CHECK(XAUI_FAR, XAUI_FAR);
+ LOOPBACK_CHECK(GMII_FAR, GMII_FAR);
+ LOOPBACK_CHECK(SGMII_FAR, SGMII_FAR);
+ LOOPBACK_CHECK(XFI_FAR, XFI_FAR);
+ LOOPBACK_CHECK(GPHY, GPHY);
+ LOOPBACK_CHECK(PHYXS, PHY_XS);
+ LOOPBACK_CHECK(PCS, PCS);
+ LOOPBACK_CHECK(PMAPMD, PMA_PMD);
+ LOOPBACK_CHECK(XPORT, XPORT);
+ LOOPBACK_CHECK(XGMII_WS, XGMII_WS);
+ LOOPBACK_CHECK(XAUI_WS, XAUI_WS);
+ LOOPBACK_CHECK(XAUI_WS_FAR, XAUI_WS_FAR);
+ LOOPBACK_CHECK(XAUI_WS_NEAR, XAUI_WS_NEAR);
+ LOOPBACK_CHECK(GMII_WS, GMII_WS);
+ LOOPBACK_CHECK(XFI_WS, XFI_WS);
+ LOOPBACK_CHECK(XFI_WS_FAR, XFI_WS_FAR);
+ LOOPBACK_CHECK(PHYXS_WS, PHYXS_WS);
+ LOOPBACK_CHECK(PMA_INT, PMA_INT);
+ LOOPBACK_CHECK(SD_NEAR, SD_NEAR);
+ LOOPBACK_CHECK(SD_FAR, SD_FAR);
+ LOOPBACK_CHECK(PMA_INT_WS, PMA_INT_WS);
+ LOOPBACK_CHECK(SD_FEP2_WS, SD_FEP2_WS);
+ LOOPBACK_CHECK(SD_FEP1_5_WS, SD_FEP1_5_WS);
+ LOOPBACK_CHECK(SD_FEP_WS, SD_FEP_WS);
+ LOOPBACK_CHECK(SD_FES_WS, SD_FES_WS);
+ LOOPBACK_CHECK(AOE_INT_NEAR, AOE_INT_NEAR);
+ LOOPBACK_CHECK(DATA_WS, DATA_WS);
+ LOOPBACK_CHECK(FORCE_EXT_LINK, FORCE_EXT_LINK);
+#undef LOOPBACK_CHECK
+
+ /* Build bitmask of possible loopback types */
+ EFX_ZERO_QWORD(mask);
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_OFF) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_OFF);
+ }
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_MAC) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ /*
+ * The "MAC" grouping has historically been used by drivers to
+ * mean loopbacks supported by on-chip hardware. Keep that
+ * meaning here, and include on-chip PHY layer loopbacks.
+ */
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_DATA);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMAC);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGXS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGBR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_INT);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_NEAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_FAR);
+ }
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_PHY) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ /*
+ * The "PHY" grouping has historically been used by drivers to
+ * mean loopbacks supported by off-chip hardware. Keep that
+ * meaning here.
+ */
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GPHY);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PHY_XS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PCS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_PMD);
+ }
+
+ *maskp = mask;
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_loopback_modes(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN,
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN)];
+ efx_qword_t mask;
+ efx_qword_t modes;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ /*
+ * We assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree
+ * in efx_loopback_mask() and in siena_phy.c:siena_phy_get_link().
+ */
+ efx_loopback_mask(EFX_LOOPBACK_KIND_ALL, &mask);
+
+ EFX_AND_QWORD(mask,
+ *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_SUGGESTED));
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_100M);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_100FDX] = modes;
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_1G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_1000FDX] = modes;
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_10G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_10000FDX] = modes;
+
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) {
+ /* Response includes 40G loopback modes */
+ modes = *MCDI_OUT2(req, efx_qword_t,
+ GET_LOOPBACK_MODES_OUT_40G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_40000FDX] = modes;
+ }
+
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN) {
+ /* Response includes 25G loopback modes */
+ modes = *MCDI_OUT2(req, efx_qword_t,
+ GET_LOOPBACK_MODES_OUT_V2_25G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_25000FDX] = modes;
+ }
+
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN) {
+ /* Response includes 50G loopback modes */
+ modes = *MCDI_OUT2(req, efx_qword_t,
+ GET_LOOPBACK_MODES_OUT_V2_50G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_50000FDX] = modes;
+ }
+
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN) {
+ /* Response includes 100G loopback modes */
+ modes = *MCDI_OUT2(req, efx_qword_t,
+ GET_LOOPBACK_MODES_OUT_V2_100G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_100000FDX] = modes;
+ }
+
+ EFX_ZERO_QWORD(modes);
+ EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_25000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_50000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100000FDX]);
+ encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ __checkReturn efx_rc_t
+efx_nic_calculate_pcie_link_bandwidth(
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t lane_bandwidth;
+ uint32_t total_bandwidth;
+ efx_rc_t rc;
+
+ if ((pcie_link_width == 0) || (pcie_link_width > 16) ||
+ !ISP2(pcie_link_width)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (pcie_link_gen) {
+ case EFX_PCIE_LINK_SPEED_GEN1:
+ /* 2.5 Gb/s raw bandwidth with 8b/10b encoding */
+ lane_bandwidth = 2000;
+ break;
+ case EFX_PCIE_LINK_SPEED_GEN2:
+ /* 5.0 Gb/s raw bandwidth with 8b/10b encoding */
+ lane_bandwidth = 4000;
+ break;
+ case EFX_PCIE_LINK_SPEED_GEN3:
+ /* 8.0 Gb/s raw bandwidth with 128b/130b encoding */
+ lane_bandwidth = 7877;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ total_bandwidth = lane_bandwidth * pcie_link_width;
+ *bandwidth_mbpsp = total_bandwidth;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+
+ __checkReturn efx_rc_t
+efx_nic_get_fw_subvariant(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_subvariant_t *subvariantp)
+{
+ efx_rc_t rc;
+ uint32_t value;
+
+ rc = efx_mcdi_get_nic_global(enp,
+ MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, &value);
+ if (rc != 0)
+ goto fail1;
+
+ /* Mapping is not required since values match MCDI */
+ EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_DEFAULT ==
+ MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT);
+ EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM ==
+ MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM);
+
+ switch (value) {
+ case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT:
+ case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM:
+ *subvariantp = value;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_set_fw_subvariant(
+ __in efx_nic_t *enp,
+ __in efx_nic_fw_subvariant_t subvariant)
+{
+ efx_rc_t rc;
+
+ switch (subvariant) {
+ case EFX_NIC_FW_SUBVARIANT_DEFAULT:
+ case EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM:
+ /* Mapping is not required since values match MCDI */
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = efx_mcdi_set_nic_global(enp,
+ MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, subvariant);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
+
+ __checkReturn efx_rc_t
+efx_nic_check_pcie_link_speed(
+ __in efx_nic_t *enp,
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out efx_pcie_link_performance_t *resultp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t bandwidth;
+ efx_pcie_link_performance_t result;
+ efx_rc_t rc;
+
+ if ((encp->enc_required_pcie_bandwidth_mbps == 0) ||
+ (pcie_link_width == 0) || (pcie_link_width == 32) ||
+ (pcie_link_gen == 0)) {
+ /*
+ * No usable info on what is required and/or in use. In virtual
+ * machines, sometimes the PCIe link width is reported as 0 or
+ * 32, or the speed as 0.
+ */
+ result = EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH;
+ goto out;
+ }
+
+ /* Calculate the available bandwidth in megabits per second */
+ rc = efx_nic_calculate_pcie_link_bandwidth(pcie_link_width,
+ pcie_link_gen, &bandwidth);
+ if (rc != 0)
+ goto fail1;
+
+ if (bandwidth < encp->enc_required_pcie_bandwidth_mbps) {
+ result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH;
+ } else if (pcie_link_gen < encp->enc_max_pcie_link_gen) {
+ /* The link provides enough bandwidth but not optimal latency */
+ result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY;
+ } else {
+ result = EFX_PCIE_LINK_PERFORMANCE_OPTIMAL;
+ }
+
+out:
+ *resultp = result;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_nvram.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_nvram.c
new file mode 100644
index 00000000..be409c3a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_nvram.c
@@ -0,0 +1,1054 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_SIENA
+
+static const efx_nvram_ops_t __efx_nvram_siena_ops = {
+#if EFSYS_OPT_DIAG
+ siena_nvram_test, /* envo_test */
+#endif /* EFSYS_OPT_DIAG */
+ siena_nvram_type_to_partn, /* envo_type_to_partn */
+ siena_nvram_partn_size, /* envo_partn_size */
+ siena_nvram_partn_rw_start, /* envo_partn_rw_start */
+ siena_nvram_partn_read, /* envo_partn_read */
+ siena_nvram_partn_read, /* envo_partn_read_backup */
+ siena_nvram_partn_erase, /* envo_partn_erase */
+ siena_nvram_partn_write, /* envo_partn_write */
+ siena_nvram_partn_rw_finish, /* envo_partn_rw_finish */
+ siena_nvram_partn_get_version, /* envo_partn_get_version */
+ siena_nvram_partn_set_version, /* envo_partn_set_version */
+ NULL, /* envo_partn_validate */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+static const efx_nvram_ops_t __efx_nvram_ef10_ops = {
+#if EFSYS_OPT_DIAG
+ ef10_nvram_test, /* envo_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nvram_type_to_partn, /* envo_type_to_partn */
+ ef10_nvram_partn_size, /* envo_partn_size */
+ ef10_nvram_partn_rw_start, /* envo_partn_rw_start */
+ ef10_nvram_partn_read, /* envo_partn_read */
+ ef10_nvram_partn_read_backup, /* envo_partn_read_backup */
+ ef10_nvram_partn_erase, /* envo_partn_erase */
+ ef10_nvram_partn_write, /* envo_partn_write */
+ ef10_nvram_partn_rw_finish, /* envo_partn_rw_finish */
+ ef10_nvram_partn_get_version, /* envo_partn_get_version */
+ ef10_nvram_partn_set_version, /* envo_partn_set_version */
+ ef10_nvram_buffer_validate, /* envo_buffer_validate */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+ __checkReturn efx_rc_t
+efx_nvram_init(
+ __in efx_nic_t *enp)
+{
+ const efx_nvram_ops_t *envop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NVRAM));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ envop = &__efx_nvram_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ enp->en_envop = envop;
+ enp->en_mod_flags |= EFX_MOD_NVRAM;
+
+ enp->en_nvram_partn_locked = EFX_NVRAM_PARTN_INVALID;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_nvram_test(
+ __in efx_nic_t *enp)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_test(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+ __checkReturn efx_rc_t
+efx_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_size(enp, partn, sizep)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ *sizep = 0;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_get_version(enp, partn,
+ subtypep, version)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt size_t *chunk_sizep)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, EFX_NVRAM_PARTN_INVALID);
+
+ if ((rc = envop->envo_partn_rw_start(enp, partn, chunk_sizep)) != 0)
+ goto fail2;
+
+ enp->en_nvram_partn_locked = partn;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn);
+
+ if ((rc = envop->envo_partn_read(enp, partn, offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Read from the backup (writeable) store of an A/B partition.
+ * For non A/B partitions, there is only a single store, and so this
+ * function has the same behaviour as efx_nvram_read_chunk().
+ */
+ __checkReturn efx_rc_t
+efx_nvram_read_backup(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn);
+
+ if ((rc = envop->envo_partn_read_backup(enp, partn, offset,
+ data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ unsigned int offset = 0;
+ size_t size = 0;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn);
+
+ if ((rc = envop->envo_partn_size(enp, partn, &size)) != 0)
+ goto fail2;
+
+ if ((rc = envop->envo_partn_erase(enp, partn, offset, size)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn);
+
+ if ((rc = envop->envo_partn_write(enp, partn, offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt uint32_t *verify_resultp)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ uint32_t verify_result = 0;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn);
+
+ if ((rc = envop->envo_partn_rw_finish(enp, partn, &verify_result)) != 0)
+ goto fail2;
+
+ enp->en_nvram_partn_locked = EFX_NVRAM_PARTN_INVALID;
+
+ if (verify_resultp != NULL)
+ *verify_resultp = verify_result;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ enp->en_nvram_partn_locked = EFX_NVRAM_PARTN_INVALID;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Always report verification result */
+ if (verify_resultp != NULL)
+ *verify_resultp = verify_result;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_ecount(4) uint16_t version[4])
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ /*
+ * The Siena implementation of envo_set_version() will attempt to
+ * acquire the NVRAM_UPDATE lock for the DYNAMIC_CONFIG partition.
+ * Therefore, you can't have already acquired the NVRAM_UPDATE lock.
+ */
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, EFX_NVRAM_PARTN_INVALID);
+
+ if ((rc = envop->envo_partn_set_version(enp, partn, version)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Validate buffer contents (before writing to flash) */
+ __checkReturn efx_rc_t
+efx_nvram_validate(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if (envop->envo_buffer_validate != NULL) {
+ if ((rc = envop->envo_buffer_validate(enp, partn,
+ partn_data, partn_size)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+void
+efx_nvram_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, EFX_NVRAM_PARTN_INVALID);
+
+ enp->en_envop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_NVRAM;
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+/*
+ * Internal MCDI request handling
+ */
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_partitions(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __out unsigned int *npartnp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_PARTITIONS_IN_LEN,
+ MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX)];
+ unsigned int npartn;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_PARTITIONS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_PARTITIONS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ npartn = MCDI_OUT_DWORD(req, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LEN(npartn)) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (size < npartn * sizeof (uint32_t)) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ *npartnp = npartn;
+
+ memcpy(data,
+ MCDI_OUT2(req, uint32_t, NVRAM_PARTITIONS_OUT_TYPE_ID),
+ (npartn * sizeof (uint32_t)));
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_metadata(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4],
+ __out_bcount_opt(size) char *descp,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_METADATA_IN_LEN,
+ MC_CMD_NVRAM_METADATA_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_METADATA;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_METADATA_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_METADATA_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_METADATA_IN_TYPE, partn);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_METADATA_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_SUBTYPE_VALID)) {
+ *subtypep = MCDI_OUT_DWORD(req, NVRAM_METADATA_OUT_SUBTYPE);
+ } else {
+ *subtypep = 0;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_VERSION_VALID)) {
+ version[0] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_W);
+ version[1] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_X);
+ version[2] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Y);
+ version[3] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Z);
+ } else {
+ version[0] = version[1] = version[2] = version[3] = 0;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_DESCRIPTION_VALID)) {
+ /* Return optional descrition string */
+ if ((descp != NULL) && (size > 0)) {
+ size_t desclen;
+
+ descp[0] = '\0';
+ desclen = (req.emr_out_length_used
+ - MC_CMD_NVRAM_METADATA_OUT_LEN(0));
+
+ EFSYS_ASSERT3U(desclen, <=,
+ MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM);
+
+ if (size < desclen) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ memcpy(descp, MCDI_OUT2(req, char,
+ NVRAM_METADATA_OUT_DESCRIPTION),
+ desclen);
+
+ /* Ensure string is NUL terminated */
+ descp[desclen] = '\0';
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_info(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt size_t *sizep,
+ __out_opt uint32_t *addressp,
+ __out_opt uint32_t *erase_sizep,
+ __out_opt uint32_t *write_sizep)
+{
+ uint8_t payload[MAX(MC_CMD_NVRAM_INFO_IN_LEN,
+ MC_CMD_NVRAM_INFO_V2_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_INFO_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_INFO_IN_TYPE, partn);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (sizep)
+ *sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_SIZE);
+
+ if (addressp)
+ *addressp = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_PHYSADDR);
+
+ if (erase_sizep)
+ *erase_sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_ERASESIZE);
+
+ if (write_sizep) {
+ *write_sizep =
+ (req.emr_out_length_used <
+ MC_CMD_NVRAM_INFO_V2_OUT_LEN) ?
+ 0 : MCDI_OUT_DWORD(req, NVRAM_INFO_V2_OUT_WRITESIZE);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * MC_CMD_NVRAM_UPDATE_START_V2 must be used to support firmware-verified
+ * NVRAM updates. Older firmware will ignore the flags field in the request.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_START_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_UPDATE_START;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_UPDATE_START_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_START_V2_IN_TYPE, partn);
+
+ MCDI_IN_POPULATE_DWORD_1(req, NVRAM_UPDATE_START_V2_IN_FLAGS,
+ NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_READ_IN_V2_LEN,
+ MC_CMD_NVRAM_READ_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ if (size > MC_CMD_NVRAM_READ_OUT_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_READ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_READ_IN_V2_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_READ_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_LENGTH, size);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_MODE, mode);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_READ_OUT_LEN(size)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ memcpy(data,
+ MCDI_OUT2(req, uint8_t, NVRAM_READ_OUT_READ_BUFFER),
+ size);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_ERASE_IN_LEN,
+ MC_CMD_NVRAM_ERASE_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_ERASE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_ERASE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_ERASE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_LENGTH, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * The NVRAM_WRITE MCDI command is a V1 command and so is supported by both
+ * Sienna and EF10 based boards. However EF10 based boards support the use
+ * of this command with payloads up to the maximum MCDI V2 payload length.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MCDI_CTL_SDU_LEN_MAX_V1,
+ MCDI_CTL_SDU_LEN_MAX_V2)];
+ efx_rc_t rc;
+ size_t max_data_size;
+
+ max_data_size = enp->en_nic_cfg.enc_mcdi_max_payload_length
+ - MC_CMD_NVRAM_WRITE_IN_LEN(0);
+ EFSYS_ASSERT3U(enp->en_nic_cfg.enc_mcdi_max_payload_length, >, 0);
+ EFSYS_ASSERT3U(max_data_size, <,
+ enp->en_nic_cfg.enc_mcdi_max_payload_length);
+
+ if (size > max_data_size) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_WRITE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_WRITE_IN_LEN(size);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_WRITE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_LENGTH, size);
+
+ memcpy(MCDI_IN2(req, uint8_t, NVRAM_WRITE_IN_WRITE_BUFFER),
+ data, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2 must be used to support firmware-verified
+ * NVRAM updates. Older firmware will ignore the flags field in the request.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t reboot,
+ __out_opt uint32_t *verify_resultp)
+{
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN)];
+ uint32_t verify_result = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_UPDATE_FINISH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_V2_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_V2_IN_REBOOT, reboot);
+
+ MCDI_IN_POPULATE_DWORD_1(req, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
+ verify_result = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN;
+ if (encp->enc_nvram_update_verify_result_supported) {
+ /* Result of update verification is missing */
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ } else {
+ verify_result =
+ MCDI_OUT_DWORD(req, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
+ }
+
+ if ((encp->enc_nvram_update_verify_result_supported) &&
+ (verify_result != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)) {
+ /* Update verification failed */
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ if (verify_resultp != NULL)
+ *verify_resultp = verify_result;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Always report verification result */
+ if (verify_resultp != NULL)
+ *verify_resultp = verify_result;
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_test(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_TEST_IN_LEN,
+ MC_CMD_NVRAM_TEST_OUT_LEN)];
+ int result;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_TEST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_TEST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_TEST_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_TEST_IN_TYPE, partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_TEST_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ result = MCDI_OUT_DWORD(req, NVRAM_TEST_OUT_RESULT);
+ if (result == MC_CMD_NVRAM_TEST_FAIL) {
+
+ EFSYS_PROBE1(nvram_test_failure, int, partn);
+
+ rc = (EINVAL);
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_phy.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_phy.c
new file mode 100644
index 00000000..ba2f51c1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_phy.c
@@ -0,0 +1,547 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+static const efx_phy_ops_t __efx_phy_siena_ops = {
+ siena_phy_power, /* epo_power */
+ NULL, /* epo_reset */
+ siena_phy_reconfigure, /* epo_reconfigure */
+ siena_phy_verify, /* epo_verify */
+ siena_phy_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ siena_phy_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ NULL, /* epo_bist_enable_offline */
+ siena_phy_bist_start, /* epo_bist_start */
+ siena_phy_bist_poll, /* epo_bist_poll */
+ siena_phy_bist_stop, /* epo_bist_stop */
+#endif /* EFSYS_OPT_BIST */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+static const efx_phy_ops_t __efx_phy_ef10_ops = {
+ ef10_phy_power, /* epo_power */
+ NULL, /* epo_reset */
+ ef10_phy_reconfigure, /* epo_reconfigure */
+ ef10_phy_verify, /* epo_verify */
+ ef10_phy_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ ef10_phy_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ ef10_bist_enable_offline, /* epo_bist_enable_offline */
+ ef10_bist_start, /* epo_bist_start */
+ ef10_bist_poll, /* epo_bist_poll */
+ ef10_bist_stop, /* epo_bist_stop */
+#endif /* EFSYS_OPT_BIST */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+ __checkReturn efx_rc_t
+efx_phy_probe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ const efx_phy_ops_t *epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ epp->ep_port = encp->enc_port;
+ epp->ep_phy_type = encp->enc_phy_type;
+
+ /* Hook in operations structure */
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ epop = &__efx_phy_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ epp->ep_epop = epop;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_port = 0;
+ epp->ep_phy_type = 0;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_verify(enp));
+}
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+
+ __checkReturn efx_rc_t
+efx_phy_led_set(
+ __in efx_nic_t *enp,
+ __in efx_phy_led_mode_t mode)
+{
+ efx_nic_cfg_t *encp = (&enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ uint32_t mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (epp->ep_phy_led_mode == mode)
+ goto done;
+
+ mask = (1 << EFX_PHY_LED_DEFAULT);
+ mask |= encp->enc_led_mask;
+
+ if (!((1 << mode) & mask)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(mode, <, EFX_PHY_LED_NMODES);
+ epp->ep_phy_led_mode = mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ void
+efx_phy_adv_cap_get(
+ __in efx_nic_t *enp,
+ __in uint32_t flag,
+ __out uint32_t *maskp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ switch (flag) {
+ case EFX_PHY_CAP_CURRENT:
+ *maskp = epp->ep_adv_cap_mask;
+ break;
+ case EFX_PHY_CAP_DEFAULT:
+ *maskp = epp->ep_default_adv_cap_mask;
+ break;
+ case EFX_PHY_CAP_PERM:
+ *maskp = epp->ep_phy_cap_mask;
+ break;
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ *maskp = 0;
+ break;
+ }
+}
+
+ __checkReturn efx_rc_t
+efx_phy_adv_cap_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mask)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ uint32_t old_mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((mask & ~epp->ep_phy_cap_mask) != 0) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (epp->ep_adv_cap_mask == mask)
+ goto done;
+
+ old_mask = epp->ep_adv_cap_mask;
+ epp->ep_adv_cap_mask = mask;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_adv_cap_mask = old_mask;
+ /* Reconfigure for robustness */
+ if (epop->epo_reconfigure(enp) != 0) {
+ /*
+ * We may have an inconsistent view of our advertised speed
+ * capabilities.
+ */
+ EFSYS_ASSERT(0);
+ }
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_phy_lp_cap_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ *maskp = epp->ep_lp_cap_mask;
+}
+
+ __checkReturn efx_rc_t
+efx_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_oui_get(enp, ouip));
+}
+
+ void
+efx_phy_media_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_media_type_t *typep)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (epp->ep_module_type != EFX_PHY_MEDIA_INVALID)
+ *typep = epp->ep_module_type;
+ else
+ *typep = epp->ep_fixed_port_type;
+}
+
+ __checkReturn efx_rc_t
+efx_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(data != NULL);
+
+ if ((uint32_t)offset + len > 0xff) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_phy_module_get_info(enp, dev_addr,
+ offset, len, data)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED PhyStatNamesBlock af9ffa24da3bc100 */
+static const char * const __efx_phy_stat_name[] = {
+ "oui",
+ "pma_pmd_link_up",
+ "pma_pmd_rx_fault",
+ "pma_pmd_tx_fault",
+ "pma_pmd_rev_a",
+ "pma_pmd_rev_b",
+ "pma_pmd_rev_c",
+ "pma_pmd_rev_d",
+ "pcs_link_up",
+ "pcs_rx_fault",
+ "pcs_tx_fault",
+ "pcs_ber",
+ "pcs_block_errors",
+ "phy_xs_link_up",
+ "phy_xs_rx_fault",
+ "phy_xs_tx_fault",
+ "phy_xs_align",
+ "phy_xs_sync_a",
+ "phy_xs_sync_b",
+ "phy_xs_sync_c",
+ "phy_xs_sync_d",
+ "an_link_up",
+ "an_master",
+ "an_local_rx_ok",
+ "an_remote_rx_ok",
+ "cl22ext_link_up",
+ "snr_a",
+ "snr_b",
+ "snr_c",
+ "snr_d",
+ "pma_pmd_signal_a",
+ "pma_pmd_signal_b",
+ "pma_pmd_signal_c",
+ "pma_pmd_signal_d",
+ "an_complete",
+ "pma_pmd_rev_major",
+ "pma_pmd_rev_minor",
+ "pma_pmd_rev_micro",
+ "pcs_fw_version_0",
+ "pcs_fw_version_1",
+ "pcs_fw_version_2",
+ "pcs_fw_version_3",
+ "pcs_fw_build_yy",
+ "pcs_fw_build_mm",
+ "pcs_fw_build_dd",
+ "pcs_op_mode",
+};
+
+/* END MKCONFIG GENERATED PhyStatNamesBlock */
+
+ const char *
+efx_phy_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_phy_stat_t type)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(type, <, EFX_PHY_NSTATS);
+
+ return (__efx_phy_stat_name[type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn efx_rc_t
+efx_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_stats_update(enp, esmp, stat));
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+efx_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ if (epop->epo_bist_enable_offline == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_enable_offline(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+ __checkReturn efx_rc_t
+efx_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, EFX_BIST_TYPE_UNKNOWN);
+
+ if (epop->epo_bist_start == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_start(enp, type)) != 0)
+ goto fail2;
+
+ epp->ep_current_bist = type;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt uint32_t *value_maskp,
+ __out_ecount_opt(count) unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
+
+ EFSYS_ASSERT(epop->epo_bist_poll != NULL);
+ if (epop->epo_bist_poll == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_poll(enp, type, resultp, value_maskp,
+ valuesp, count)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
+
+ EFSYS_ASSERT(epop->epo_bist_stop != NULL);
+
+ if (epop->epo_bist_stop != NULL)
+ epop->epo_bist_stop(enp, type);
+
+ epp->ep_current_bist = EFX_BIST_TYPE_UNKNOWN;
+}
+
+#endif /* EFSYS_OPT_BIST */
+ void
+efx_phy_unprobe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ epp->ep_epop = NULL;
+
+ epp->ep_adv_cap_mask = 0;
+
+ epp->ep_port = 0;
+ epp->ep_phy_type = 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_phy_ids.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_phy_ids.h
new file mode 100644
index 00000000..9fb1c6c4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_phy_ids.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2013-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_EFX_PHY_IDS_H
+#define _SYS_EFX_PHY_IDS_H
+
+#define EFX_PHY_NULL 0
+
+typedef enum efx_phy_type_e { /* GENERATED BY scripts/genfwdef */
+ EFX_PHY_TXC43128 = 1,
+ EFX_PHY_SFX7101 = 3,
+ EFX_PHY_QT2022C2 = 4,
+ EFX_PHY_PM8358 = 6,
+ EFX_PHY_SFT9001A = 8,
+ EFX_PHY_QT2025C = 9,
+ EFX_PHY_SFT9001B = 10,
+ EFX_PHY_QLX111V = 12,
+ EFX_PHY_QT2025_KR = 17,
+ EFX_PHY_AEL3020 = 18,
+ EFX_PHY_XFI_FARMI = 19,
+} efx_phy_type_t;
+
+
+#endif /* _SYS_EFX_PHY_IDS_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_port.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_port.c
new file mode 100644
index 00000000..33a1a084
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_port.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_port_init(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enp->en_mod_flags & EFX_MOD_PORT) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_PORT;
+
+ epp->ep_mac_type = EFX_MAC_INVALID;
+ epp->ep_link_mode = EFX_LINK_UNKNOWN;
+ epp->ep_mac_drain = B_TRUE;
+
+ /* Configure the MAC */
+ if ((rc = efx_mac_select(enp)) != 0)
+ goto fail1;
+
+ epp->ep_emop->emo_reconfigure(enp);
+
+ /* Pick up current phy capababilities */
+ efx_port_poll(enp, NULL);
+
+ /*
+ * Turn on the PHY if available, otherwise reset it, and
+ * reconfigure it with the current configuration.
+ */
+ if (epop->epo_power != NULL) {
+ if ((rc = epop->epo_power(enp, B_TRUE)) != 0)
+ goto fail2;
+ } else {
+ if ((rc = epop->epo_reset(enp)) != 0)
+ goto fail2;
+ }
+
+ EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_PHY);
+ enp->en_reset_flags &= ~EFX_RESET_PHY;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_mod_flags &= ~EFX_MOD_PORT;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_port_poll(
+ __in efx_nic_t *enp,
+ __out_opt efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_link_mode_t ignore_link_mode;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(emop != NULL);
+
+ if (link_modep == NULL)
+ link_modep = &ignore_link_mode;
+
+ if ((rc = emop->emo_poll(enp, link_modep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+efx_port_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ EFSYS_ASSERT(link_mode < EFX_LINK_NMODES);
+
+ if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode],
+ (int)loopback_type) == 0) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (epp->ep_loopback_type == loopback_type &&
+ epp->ep_loopback_link_mode == link_mode)
+ return (0);
+
+ if ((rc = emop->emo_loopback_set(enp, link_mode, loopback_type)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_NAMES
+
+static const char * const __efx_loopback_type_name[] = {
+ "OFF",
+ "DATA",
+ "GMAC",
+ "XGMII",
+ "XGXS",
+ "XAUI",
+ "GMII",
+ "SGMII",
+ "XGBR",
+ "XFI",
+ "XAUI_FAR",
+ "GMII_FAR",
+ "SGMII_FAR",
+ "XFI_FAR",
+ "GPHY",
+ "PHY_XS",
+ "PCS",
+ "PMA_PMD",
+ "XPORT",
+ "XGMII_WS",
+ "XAUI_WS",
+ "XAUI_WS_FAR",
+ "XAUI_WS_NEAR",
+ "GMII_WS",
+ "XFI_WS",
+ "XFI_WS_FAR",
+ "PHYXS_WS",
+ "PMA_INT",
+ "SD_NEAR",
+ "SD_FAR",
+ "PMA_INT_WS",
+ "SD_FEP2_WS",
+ "SD_FEP1_5_WS",
+ "SD_FEP_WS",
+ "SD_FES_WS",
+ "AOE_INT_NEAR",
+ "DATA_WS",
+ "FORCE_EXT_LINK",
+};
+
+ __checkReturn const char *
+efx_loopback_type_name(
+ __in efx_nic_t *enp,
+ __in efx_loopback_type_t type)
+{
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__efx_loopback_type_name) ==
+ EFX_LOOPBACK_NTYPES);
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(type, <, EFX_LOOPBACK_NTYPES);
+
+ return (__efx_loopback_type_name[type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ void
+efx_port_fini(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(epp->ep_mac_drain);
+
+ epp->ep_emop = NULL;
+ epp->ep_mac_type = EFX_MAC_INVALID;
+ epp->ep_mac_drain = B_FALSE;
+
+ /* Turn off the PHY */
+ if (epop->epo_power != NULL)
+ (void) epop->epo_power(enp, B_FALSE);
+
+ enp->en_mod_flags &= ~EFX_MOD_PORT;
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_regs.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs.h
new file mode 100644
index 00000000..aef212e7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs.h
@@ -0,0 +1,3846 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_EFX_REGS_H
+#define _SYS_EFX_REGS_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**************************************************************************
+ *
+ * Falcon/Siena registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/*
+ * FR_AB_EE_VPD_CFG0_REG_SF(128bit):
+ * SPI/VPD configuration register 0
+ */
+#define FR_AB_EE_VPD_CFG0_REG_SF_OFST 0x00000300
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_EE_VPD_CFG0_REG(128bit):
+ * SPI/VPD configuration register 0
+ */
+#define FR_AB_EE_VPD_CFG0_REG_OFST 0x00000140
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPDW_BASE_LBN 64
+#define FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define FRF_AB_EE_VPD_BASE_LBN 32
+#define FRF_AB_EE_VPD_BASE_WIDTH 24
+#define FRF_AB_EE_VPD_LENGTH_LBN 16
+#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define FRF_AB_EE_VPD_EN_LBN 0
+#define FRF_AB_EE_VPD_EN_WIDTH 1
+
+
+/*
+ * FR_AB_PCIE_SD_CTL0123_REG_SF(128bit):
+ * PCIE SerDes control register 0 to 3
+ */
+#define FR_AB_PCIE_SD_CTL0123_REG_SF_OFST 0x00000320
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_SD_CTL0123_REG(128bit):
+ * PCIE SerDes control register 0 to 3
+ */
+#define FR_AB_PCIE_SD_CTL0123_REG_OFST 0x00000320
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define FRF_AB_PCIE_OFFSET_LBN 56
+#define FRF_AB_PCIE_OFFSET_WIDTH 8
+#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_H_LBN 51
+#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_L_LBN 50
+#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define FRF_AB_PCIE_LPBK_LBN 40
+#define FRF_AB_PCIE_LPBK_WIDTH 8
+#define FRF_AB_PCIE_PARLPBK_LBN 32
+#define FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define FFE_AB_PCIE_RXEQCTL_OFF 2
+#define FFE_AB_PCIE_RXEQCTL_MIN 1
+#define FFE_AB_PCIE_RXEQCTL_MAX 0
+#define FRF_AB_PCIE_HIDRV_LBN 8
+#define FRF_AB_PCIE_HIDRV_WIDTH 8
+#define FRF_AB_PCIE_LODRV_LBN 0
+#define FRF_AB_PCIE_LODRV_WIDTH 8
+
+
+/*
+ * FR_AB_PCIE_SD_CTL45_REG_SF(128bit):
+ * PCIE SerDes control register 4 and 5
+ */
+#define FR_AB_PCIE_SD_CTL45_REG_SF_OFST 0x00000330
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_SD_CTL45_REG(128bit):
+ * PCIE SerDes control register 4 and 5
+ */
+#define FR_AB_PCIE_SD_CTL45_REG_OFST 0x00000330
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_DTX7_LBN 60
+#define FRF_AB_PCIE_DTX7_WIDTH 4
+#define FRF_AB_PCIE_DTX6_LBN 56
+#define FRF_AB_PCIE_DTX6_WIDTH 4
+#define FRF_AB_PCIE_DTX5_LBN 52
+#define FRF_AB_PCIE_DTX5_WIDTH 4
+#define FRF_AB_PCIE_DTX4_LBN 48
+#define FRF_AB_PCIE_DTX4_WIDTH 4
+#define FRF_AB_PCIE_DTX3_LBN 44
+#define FRF_AB_PCIE_DTX3_WIDTH 4
+#define FRF_AB_PCIE_DTX2_LBN 40
+#define FRF_AB_PCIE_DTX2_WIDTH 4
+#define FRF_AB_PCIE_DTX1_LBN 36
+#define FRF_AB_PCIE_DTX1_WIDTH 4
+#define FRF_AB_PCIE_DTX0_LBN 32
+#define FRF_AB_PCIE_DTX0_WIDTH 4
+#define FRF_AB_PCIE_DEQ7_LBN 28
+#define FRF_AB_PCIE_DEQ7_WIDTH 4
+#define FRF_AB_PCIE_DEQ6_LBN 24
+#define FRF_AB_PCIE_DEQ6_WIDTH 4
+#define FRF_AB_PCIE_DEQ5_LBN 20
+#define FRF_AB_PCIE_DEQ5_WIDTH 4
+#define FRF_AB_PCIE_DEQ4_LBN 16
+#define FRF_AB_PCIE_DEQ4_WIDTH 4
+#define FRF_AB_PCIE_DEQ3_LBN 12
+#define FRF_AB_PCIE_DEQ3_WIDTH 4
+#define FRF_AB_PCIE_DEQ2_LBN 8
+#define FRF_AB_PCIE_DEQ2_WIDTH 4
+#define FRF_AB_PCIE_DEQ1_LBN 4
+#define FRF_AB_PCIE_DEQ1_WIDTH 4
+#define FRF_AB_PCIE_DEQ0_LBN 0
+#define FRF_AB_PCIE_DEQ0_WIDTH 4
+
+
+/*
+ * FR_AB_PCIE_PCS_CTL_STAT_REG_SF(128bit):
+ * PCIE PCS control and status register
+ */
+#define FR_AB_PCIE_PCS_CTL_STAT_REG_SF_OFST 0x00000340
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_PCS_CTL_STAT_REG(128bit):
+ * PCIE PCS control and status register
+ */
+#define FR_AB_PCIE_PCS_CTL_STAT_REG_OFST 0x00000340
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define FRF_AB_PCIE_PRBSERR_LBN 40
+#define FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSEL_LBN 0
+#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+
+/*
+ * FR_AB_HW_INIT_REG_SF(128bit):
+ * Hardware initialization register
+ */
+#define FR_AB_HW_INIT_REG_SF_OFST 0x00000350
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AZ_HW_INIT_REG(128bit):
+ * Hardware initialization register
+ */
+#define FR_AZ_HW_INIT_REG_OFST 0x000000c0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define FRF_CZ_TX_MRG_TAGS_LBN 120
+#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define FRF_AZ_TRGT_MASK_ALL_LBN 100
+#define FRF_AZ_TRGT_MASK_ALL_WIDTH 1
+#define FRF_AZ_DOORBELL_DROP_LBN 92
+#define FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define FRF_AB_PE_EIDLE_DIS_LBN 75
+#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define FRF_AZ_FC_BLOCKING_EN_LBN 45
+#define FRF_AZ_FC_BLOCKING_EN_WIDTH 1
+#define FRF_AZ_B2B_REQ_EN_LBN 44
+#define FRF_AZ_B2B_REQ_EN_WIDTH 1
+#define FRF_AZ_POST_WR_MASK_LBN 40
+#define FRF_AZ_POST_WR_MASK_WIDTH 4
+#define FRF_AZ_TLP_TC_LBN 34
+#define FRF_AZ_TLP_TC_WIDTH 3
+#define FRF_AZ_TLP_ATTR_LBN 32
+#define FRF_AZ_TLP_ATTR_WIDTH 2
+#define FRF_AB_INTB_VEC_LBN 24
+#define FRF_AB_INTB_VEC_WIDTH 5
+#define FRF_AB_INTA_VEC_LBN 16
+#define FRF_AB_INTA_VEC_WIDTH 5
+#define FRF_AZ_WD_TIMER_LBN 8
+#define FRF_AZ_WD_TIMER_WIDTH 8
+#define FRF_AZ_US_DISABLE_LBN 5
+#define FRF_AZ_US_DISABLE_WIDTH 1
+#define FRF_AZ_TLP_EP_LBN 4
+#define FRF_AZ_TLP_EP_WIDTH 1
+#define FRF_AZ_ATTR_SEL_LBN 3
+#define FRF_AZ_ATTR_SEL_WIDTH 1
+#define FRF_AZ_TD_SEL_LBN 1
+#define FRF_AZ_TD_SEL_WIDTH 1
+#define FRF_AZ_TLP_TD_LBN 0
+#define FRF_AZ_TLP_TD_WIDTH 1
+
+
+/*
+ * FR_AB_NIC_STAT_REG_SF(128bit):
+ * NIC status register
+ */
+#define FR_AB_NIC_STAT_REG_SF_OFST 0x00000360
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_NIC_STAT_REG(128bit):
+ * NIC status register
+ */
+#define FR_AB_NIC_STAT_REG_OFST 0x00000200
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_BB_AER_DIS_LBN 34
+#define FRF_BB_AER_DIS_WIDTH 1
+#define FRF_BB_EE_STRAP_EN_LBN 31
+#define FRF_BB_EE_STRAP_EN_WIDTH 1
+#define FRF_BB_EE_STRAP_LBN 24
+#define FRF_BB_EE_STRAP_WIDTH 4
+#define FRF_BB_REVISION_ID_LBN 17
+#define FRF_BB_REVISION_ID_WIDTH 7
+#define FRF_AB_ONCHIP_SRAM_LBN 16
+#define FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define FRF_AB_SF_PRST_LBN 9
+#define FRF_AB_SF_PRST_WIDTH 1
+#define FRF_AB_EE_PRST_LBN 8
+#define FRF_AB_EE_PRST_WIDTH 1
+#define FRF_AB_ATE_MODE_LBN 3
+#define FRF_AB_ATE_MODE_WIDTH 1
+#define FRF_AB_STRAP_PINS_LBN 0
+#define FRF_AB_STRAP_PINS_WIDTH 3
+
+
+/*
+ * FR_AB_GLB_CTL_REG_SF(128bit):
+ * Global control register
+ */
+#define FR_AB_GLB_CTL_REG_SF_OFST 0x00000370
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_GLB_CTL_REG(128bit):
+ * Global control register
+ */
+#define FR_AB_GLB_CTL_REG_OFST 0x00000220
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define FRF_AA_PCIX_RST_CTL_LBN 60
+#define FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define FRF_BB_BIU_RST_CTL_LBN 60
+#define FRF_BB_BIU_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define FRF_AB_XGRX_RST_CTL_LBN 56
+#define FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define FRF_AB_XGTX_RST_CTL_LBN 55
+#define FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define FRF_AB_EM_RST_CTL_LBN 54
+#define FRF_AB_EM_RST_CTL_WIDTH 1
+#define FRF_AB_EV_RST_CTL_LBN 53
+#define FRF_AB_EV_RST_CTL_WIDTH 1
+#define FRF_AB_SR_RST_CTL_LBN 52
+#define FRF_AB_SR_RST_CTL_WIDTH 1
+#define FRF_AB_RX_RST_CTL_LBN 51
+#define FRF_AB_RX_RST_CTL_WIDTH 1
+#define FRF_AB_TX_RST_CTL_LBN 50
+#define FRF_AB_TX_RST_CTL_WIDTH 1
+#define FRF_AB_EE_RST_CTL_LBN 49
+#define FRF_AB_EE_RST_CTL_WIDTH 1
+#define FRF_AB_CS_RST_CTL_LBN 48
+#define FRF_AB_CS_RST_CTL_WIDTH 1
+#define FRF_AB_HOT_RST_CTL_LBN 40
+#define FRF_AB_HOT_RST_CTL_WIDTH 2
+#define FRF_AB_RST_EXT_PHY_LBN 31
+#define FRF_AB_RST_EXT_PHY_WIDTH 1
+#define FRF_AB_RST_XAUI_SD_LBN 30
+#define FRF_AB_RST_XAUI_SD_WIDTH 1
+#define FRF_AB_RST_PCIE_SD_LBN 29
+#define FRF_AB_RST_PCIE_SD_WIDTH 1
+#define FRF_AA_RST_PCIX_LBN 28
+#define FRF_AA_RST_PCIX_WIDTH 1
+#define FRF_BB_RST_BIU_LBN 28
+#define FRF_BB_RST_BIU_WIDTH 1
+#define FRF_AB_RST_PCIE_STKY_LBN 27
+#define FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define FRF_AB_RST_PCIE_CORE_LBN 25
+#define FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define FRF_AB_RST_XGRX_LBN 24
+#define FRF_AB_RST_XGRX_WIDTH 1
+#define FRF_AB_RST_XGTX_LBN 23
+#define FRF_AB_RST_XGTX_WIDTH 1
+#define FRF_AB_RST_EM_LBN 22
+#define FRF_AB_RST_EM_WIDTH 1
+#define FRF_AB_RST_EV_LBN 21
+#define FRF_AB_RST_EV_WIDTH 1
+#define FRF_AB_RST_SR_LBN 20
+#define FRF_AB_RST_SR_WIDTH 1
+#define FRF_AB_RST_RX_LBN 19
+#define FRF_AB_RST_RX_WIDTH 1
+#define FRF_AB_RST_TX_LBN 18
+#define FRF_AB_RST_TX_WIDTH 1
+#define FRF_AB_RST_SF_LBN 17
+#define FRF_AB_RST_SF_WIDTH 1
+#define FRF_AB_RST_CS_LBN 16
+#define FRF_AB_RST_CS_WIDTH 1
+#define FRF_AB_INT_RST_DUR_LBN 4
+#define FRF_AB_INT_RST_DUR_WIDTH 3
+#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define FRF_AB_SWRST_LBN 0
+#define FRF_AB_SWRST_WIDTH 1
+
+
+/*
+ * FR_AZ_IOM_IND_ADR_REG(32bit):
+ * IO-mapped indirect access address register
+ */
+#define FR_AZ_IOM_IND_ADR_REG_OFST 0x00000000
+/* falcona0,falconb0,sienaa0=net_func_bar0 */
+
+#define FRF_AZ_IOM_AUTO_ADR_INC_EN_LBN 24
+#define FRF_AZ_IOM_AUTO_ADR_INC_EN_WIDTH 1
+#define FRF_AZ_IOM_IND_ADR_LBN 0
+#define FRF_AZ_IOM_IND_ADR_WIDTH 24
+
+
+/*
+ * FR_AZ_IOM_IND_DAT_REG(32bit):
+ * IO-mapped indirect access data register
+ */
+#define FR_AZ_IOM_IND_DAT_REG_OFST 0x00000004
+/* falcona0,falconb0,sienaa0=net_func_bar0 */
+
+#define FRF_AZ_IOM_IND_DAT_LBN 0
+#define FRF_AZ_IOM_IND_DAT_WIDTH 32
+
+
+/*
+ * FR_AZ_ADR_REGION_REG(128bit):
+ * Address region register
+ */
+#define FR_AZ_ADR_REGION_REG_OFST 0x00000000
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_ADR_REGION3_LBN 96
+#define FRF_AZ_ADR_REGION3_WIDTH 18
+#define FRF_AZ_ADR_REGION2_LBN 64
+#define FRF_AZ_ADR_REGION2_WIDTH 18
+#define FRF_AZ_ADR_REGION1_LBN 32
+#define FRF_AZ_ADR_REGION1_WIDTH 18
+#define FRF_AZ_ADR_REGION0_LBN 0
+#define FRF_AZ_ADR_REGION0_WIDTH 18
+
+
+/*
+ * FR_AZ_INT_EN_REG_KER(128bit):
+ * Kernel driver Interrupt enable register
+ */
+#define FR_AZ_INT_EN_REG_KER_OFST 0x00000010
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_KER_INT_CHAR_LBN 4
+#define FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define FRF_AZ_KER_INT_KER_LBN 3
+#define FRF_AZ_KER_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+
+/*
+ * FR_AZ_INT_EN_REG_CHAR(128bit):
+ * Char Driver interrupt enable register
+ */
+#define FR_AZ_INT_EN_REG_CHAR_OFST 0x00000020
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_CHAR_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_CHAR_INT_CHAR_LBN 4
+#define FRF_AZ_CHAR_INT_CHAR_WIDTH 1
+#define FRF_AZ_CHAR_INT_KER_LBN 3
+#define FRF_AZ_CHAR_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_CHAR_LBN 0
+#define FRF_AZ_DRV_INT_EN_CHAR_WIDTH 1
+
+
+/*
+ * FR_AZ_INT_ADR_REG_KER(128bit):
+ * Interrupt host address for Kernel driver
+ */
+#define FR_AZ_INT_ADR_REG_KER_OFST 0x00000030
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define FRF_AZ_INT_ADR_KER_LBN 0
+#define FRF_AZ_INT_ADR_KER_WIDTH 64
+#define FRF_AZ_INT_ADR_KER_DW0_LBN 0
+#define FRF_AZ_INT_ADR_KER_DW0_WIDTH 32
+#define FRF_AZ_INT_ADR_KER_DW1_LBN 32
+#define FRF_AZ_INT_ADR_KER_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_INT_ADR_REG_CHAR(128bit):
+ * Interrupt host address for Char driver
+ */
+#define FR_AZ_INT_ADR_REG_CHAR_OFST 0x00000040
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define FRF_AZ_INT_ADR_CHAR_LBN 0
+#define FRF_AZ_INT_ADR_CHAR_WIDTH 64
+#define FRF_AZ_INT_ADR_CHAR_DW0_LBN 0
+#define FRF_AZ_INT_ADR_CHAR_DW0_WIDTH 32
+#define FRF_AZ_INT_ADR_CHAR_DW1_LBN 32
+#define FRF_AZ_INT_ADR_CHAR_DW1_WIDTH 32
+
+
+/*
+ * FR_AA_INT_ACK_KER(32bit):
+ * Kernel interrupt acknowledge register
+ */
+#define FR_AA_INT_ACK_KER_OFST 0x00000050
+/* falcona0=net_func_bar2 */
+
+#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+
+/*
+ * FR_BZ_INT_ISR0_REG(128bit):
+ * Function 0 Interrupt Acknowlege Status register
+ */
+#define FR_BZ_INT_ISR0_REG_OFST 0x00000090
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_INT_ISR_REG_LBN 0
+#define FRF_BZ_INT_ISR_REG_WIDTH 64
+#define FRF_BZ_INT_ISR_REG_DW0_LBN 0
+#define FRF_BZ_INT_ISR_REG_DW0_WIDTH 32
+#define FRF_BZ_INT_ISR_REG_DW1_LBN 32
+#define FRF_BZ_INT_ISR_REG_DW1_WIDTH 32
+
+
+/*
+ * FR_AB_EE_SPI_HCMD_REG(128bit):
+ * SPI host command register
+ */
+#define FR_AB_EE_SPI_HCMD_REG_OFST 0x00000100
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+
+/*
+ * FR_CZ_USR_EV_CFG(32bit):
+ * User Level Event Configuration register
+ */
+#define FR_CZ_USR_EV_CFG_OFST 0x00000100
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_USREV_DIS_LBN 16
+#define FRF_CZ_USREV_DIS_WIDTH 1
+#define FRF_CZ_DFLT_EVQ_LBN 0
+#define FRF_CZ_DFLT_EVQ_WIDTH 10
+
+
+/*
+ * FR_AB_EE_SPI_HADR_REG(128bit):
+ * SPI host address register
+ */
+#define FR_AB_EE_SPI_HADR_REG_OFST 0x00000110
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+
+/*
+ * FR_AB_EE_SPI_HDATA_REG(128bit):
+ * SPI host data register
+ */
+#define FR_AB_EE_SPI_HDATA_REG_OFST 0x00000120
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HDATA3_LBN 96
+#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA2_LBN 64
+#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA1_LBN 32
+#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA0_LBN 0
+#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+
+/*
+ * FR_AB_EE_BASE_PAGE_REG(128bit):
+ * Expansion ROM base mirror register
+ */
+#define FR_AB_EE_BASE_PAGE_REG_OFST 0x00000130
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_EXPROM_MASK_LBN 16
+#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+
+/*
+ * FR_AB_EE_VPD_SW_CNTL_REG(128bit):
+ * VPD access SW control register
+ */
+#define FR_AB_EE_VPD_SW_CNTL_REG_OFST 0x00000150
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+
+/*
+ * FR_AB_EE_VPD_SW_DATA_REG(128bit):
+ * VPD access SW data register
+ */
+#define FR_AB_EE_VPD_SW_DATA_REG_OFST 0x00000160
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+
+/*
+ * FR_BB_PCIE_CORE_INDIRECT_REG(64bit):
+ * Indirect Access to PCIE Core registers
+ */
+#define FR_BB_PCIE_CORE_INDIRECT_REG_OFST 0x000001f0
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+
+/*
+ * FR_AB_GPIO_CTL_REG(128bit):
+ * GPIO control register
+ */
+#define FR_AB_GPIO_CTL_REG_OFST 0x00000210
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GPIO15_OEN_LBN 63
+#define FRF_AB_GPIO15_OEN_WIDTH 1
+#define FRF_AB_GPIO14_OEN_LBN 62
+#define FRF_AB_GPIO14_OEN_WIDTH 1
+#define FRF_AB_GPIO13_OEN_LBN 61
+#define FRF_AB_GPIO13_OEN_WIDTH 1
+#define FRF_AB_GPIO12_OEN_LBN 60
+#define FRF_AB_GPIO12_OEN_WIDTH 1
+#define FRF_AB_GPIO11_OEN_LBN 59
+#define FRF_AB_GPIO11_OEN_WIDTH 1
+#define FRF_AB_GPIO10_OEN_LBN 58
+#define FRF_AB_GPIO10_OEN_WIDTH 1
+#define FRF_AB_GPIO9_OEN_LBN 57
+#define FRF_AB_GPIO9_OEN_WIDTH 1
+#define FRF_AB_GPIO8_OEN_LBN 56
+#define FRF_AB_GPIO8_OEN_WIDTH 1
+#define FRF_AB_GPIO15_OUT_LBN 55
+#define FRF_AB_GPIO15_OUT_WIDTH 1
+#define FRF_AB_GPIO14_OUT_LBN 54
+#define FRF_AB_GPIO14_OUT_WIDTH 1
+#define FRF_AB_GPIO13_OUT_LBN 53
+#define FRF_AB_GPIO13_OUT_WIDTH 1
+#define FRF_AB_GPIO12_OUT_LBN 52
+#define FRF_AB_GPIO12_OUT_WIDTH 1
+#define FRF_AB_GPIO11_OUT_LBN 51
+#define FRF_AB_GPIO11_OUT_WIDTH 1
+#define FRF_AB_GPIO10_OUT_LBN 50
+#define FRF_AB_GPIO10_OUT_WIDTH 1
+#define FRF_AB_GPIO9_OUT_LBN 49
+#define FRF_AB_GPIO9_OUT_WIDTH 1
+#define FRF_AB_GPIO8_OUT_LBN 48
+#define FRF_AB_GPIO8_OUT_WIDTH 1
+#define FRF_AB_GPIO15_IN_LBN 47
+#define FRF_AB_GPIO15_IN_WIDTH 1
+#define FRF_AB_GPIO14_IN_LBN 46
+#define FRF_AB_GPIO14_IN_WIDTH 1
+#define FRF_AB_GPIO13_IN_LBN 45
+#define FRF_AB_GPIO13_IN_WIDTH 1
+#define FRF_AB_GPIO12_IN_LBN 44
+#define FRF_AB_GPIO12_IN_WIDTH 1
+#define FRF_AB_GPIO11_IN_LBN 43
+#define FRF_AB_GPIO11_IN_WIDTH 1
+#define FRF_AB_GPIO10_IN_LBN 42
+#define FRF_AB_GPIO10_IN_WIDTH 1
+#define FRF_AB_GPIO9_IN_LBN 41
+#define FRF_AB_GPIO9_IN_WIDTH 1
+#define FRF_AB_GPIO8_IN_LBN 40
+#define FRF_AB_GPIO8_IN_WIDTH 1
+#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define FRF_BB_CLK156_OUT_EN_LBN 31
+#define FRF_BB_CLK156_OUT_EN_WIDTH 1
+#define FRF_BB_USE_NIC_CLK_LBN 30
+#define FRF_BB_USE_NIC_CLK_WIDTH 1
+#define FRF_AB_GPIO5_OEN_LBN 29
+#define FRF_AB_GPIO5_OEN_WIDTH 1
+#define FRF_AB_GPIO4_OEN_LBN 28
+#define FRF_AB_GPIO4_OEN_WIDTH 1
+#define FRF_AB_GPIO3_OEN_LBN 27
+#define FRF_AB_GPIO3_OEN_WIDTH 1
+#define FRF_AB_GPIO2_OEN_LBN 26
+#define FRF_AB_GPIO2_OEN_WIDTH 1
+#define FRF_AB_GPIO1_OEN_LBN 25
+#define FRF_AB_GPIO1_OEN_WIDTH 1
+#define FRF_AB_GPIO0_OEN_LBN 24
+#define FRF_AB_GPIO0_OEN_WIDTH 1
+#define FRF_AB_GPIO5_OUT_LBN 21
+#define FRF_AB_GPIO5_OUT_WIDTH 1
+#define FRF_AB_GPIO4_OUT_LBN 20
+#define FRF_AB_GPIO4_OUT_WIDTH 1
+#define FRF_AB_GPIO3_OUT_LBN 19
+#define FRF_AB_GPIO3_OUT_WIDTH 1
+#define FRF_AB_GPIO2_OUT_LBN 18
+#define FRF_AB_GPIO2_OUT_WIDTH 1
+#define FRF_AB_GPIO1_OUT_LBN 17
+#define FRF_AB_GPIO1_OUT_WIDTH 1
+#define FRF_AB_GPIO0_OUT_LBN 16
+#define FRF_AB_GPIO0_OUT_WIDTH 1
+#define FRF_AB_GPIO5_IN_LBN 13
+#define FRF_AB_GPIO5_IN_WIDTH 1
+#define FRF_AB_GPIO4_IN_LBN 12
+#define FRF_AB_GPIO4_IN_WIDTH 1
+#define FRF_AB_GPIO3_IN_LBN 11
+#define FRF_AB_GPIO3_IN_WIDTH 1
+#define FRF_AB_GPIO2_IN_LBN 10
+#define FRF_AB_GPIO2_IN_WIDTH 1
+#define FRF_AB_GPIO1_IN_LBN 9
+#define FRF_AB_GPIO1_IN_WIDTH 1
+#define FRF_AB_GPIO0_IN_LBN 8
+#define FRF_AB_GPIO0_IN_WIDTH 1
+#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+
+/*
+ * FR_AZ_FATAL_INTR_REG_KER(128bit):
+ * Fatal interrupt register for Kernel
+ */
+#define FR_AZ_FATAL_INTR_REG_KER_OFST 0x00000230
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+
+/*
+ * FR_AZ_FATAL_INTR_REG_CHAR(128bit):
+ * Fatal interrupt register for Char
+ */
+#define FR_AZ_FATAL_INTR_REG_CHAR_OFST 0x00000240
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_CHAR_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_CHAR_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_CHAR_LBN 8
+#define FRF_AZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_CHAR_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_CHAR_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_CHAR_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_CHAR_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_LBN 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_CHAR_LBN 0
+#define FRF_AZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+
+/*
+ * FR_AZ_DP_CTRL_REG(128bit):
+ * Datapath control register
+ */
+#define FR_AZ_DP_CTRL_REG_OFST 0x00000250
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_FLS_EVQ_ID_LBN 0
+#define FRF_AZ_FLS_EVQ_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_MEM_STAT_REG(128bit):
+ * Memory status register
+ */
+#define FR_AZ_MEM_STAT_REG_OFST 0x00000260
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MEM_PERR_VEC_LBN 53
+#define FRF_AB_MEM_PERR_VEC_WIDTH 40
+#define FRF_AB_MEM_PERR_VEC_DW0_LBN 53
+#define FRF_AB_MEM_PERR_VEC_DW0_WIDTH 32
+#define FRF_AB_MEM_PERR_VEC_DW1_LBN 85
+#define FRF_AB_MEM_PERR_VEC_DW1_WIDTH 6
+#define FRF_AB_MBIST_CORR_LBN 38
+#define FRF_AB_MBIST_CORR_WIDTH 15
+#define FRF_AB_MBIST_ERR_LBN 0
+#define FRF_AB_MBIST_ERR_WIDTH 40
+#define FRF_AB_MBIST_ERR_DW0_LBN 0
+#define FRF_AB_MBIST_ERR_DW0_WIDTH 32
+#define FRF_AB_MBIST_ERR_DW1_LBN 32
+#define FRF_AB_MBIST_ERR_DW1_WIDTH 6
+#define FRF_CZ_MEM_PERR_VEC_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
+#define FRF_CZ_MEM_PERR_VEC_DW0_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_DW0_WIDTH 32
+#define FRF_CZ_MEM_PERR_VEC_DW1_LBN 32
+#define FRF_CZ_MEM_PERR_VEC_DW1_WIDTH 3
+
+
+/*
+ * FR_PORT0_CS_DEBUG_REG(128bit):
+ * Debug register
+ */
+
+#define FR_AZ_CS_DEBUG_REG_OFST 0x00000270
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define FRF_CZ_CS_PORT_NUM_LBN 40
+#define FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_RESERVED_LBN 36
+#define FRF_CZ_CS_RESERVED_WIDTH 4
+#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_PORT_FPE_DW0_LBN 1
+#define FRF_CZ_CS_PORT_FPE_DW0_WIDTH 32
+#define FRF_CZ_CS_PORT_FPE_DW1_LBN 33
+#define FRF_CZ_CS_PORT_FPE_DW1_WIDTH 3
+#define FRF_CZ_CS_PORT_FPE_LBN 1
+#define FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define FRF_AZ_CS_DEBUG_EN_LBN 0
+#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_DRIVER_REG(128bit):
+ * Driver scratch register [0-7]
+ */
+#define FR_AZ_DRIVER_REG_OFST 0x00000280
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_DRIVER_REG_STEP 16
+#define FR_AZ_DRIVER_REG_ROWS 8
+
+#define FRF_AZ_DRIVER_DW0_LBN 0
+#define FRF_AZ_DRIVER_DW0_WIDTH 32
+
+
+/*
+ * FR_AZ_ALTERA_BUILD_REG(128bit):
+ * Altera build register
+ */
+#define FR_AZ_ALTERA_BUILD_REG_OFST 0x00000300
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+
+/*
+ * FR_AZ_CSR_SPARE_REG(128bit):
+ * Spare register
+ */
+#define FR_AZ_CSR_SPARE_REG_OFST 0x00000310
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_MEM_PERR_EN_TX_DATA_LBN 72
+#define FRF_AZ_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define FRF_AZ_MEM_PERR_EN_LBN 64
+#define FRF_AZ_MEM_PERR_EN_WIDTH 38
+#define FRF_AZ_MEM_PERR_EN_DW0_LBN 64
+#define FRF_AZ_MEM_PERR_EN_DW0_WIDTH 32
+#define FRF_AZ_MEM_PERR_EN_DW1_LBN 96
+#define FRF_AZ_MEM_PERR_EN_DW1_WIDTH 6
+#define FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+
+/*
+ * FR_BZ_DEBUG_DATA_OUT_REG(128bit):
+ * Live Debug and Debug 2 out ports
+ */
+#define FR_BZ_DEBUG_DATA_OUT_REG_OFST 0x00000350
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_DEBUG2_PORT_LBN 25
+#define FRF_BZ_DEBUG2_PORT_WIDTH 15
+#define FRF_BZ_DEBUG1_PORT_LBN 0
+#define FRF_BZ_DEBUG1_PORT_WIDTH 25
+
+
+/*
+ * FR_BZ_EVQ_RPTR_REGP0(32bit):
+ * Event queue read pointer register
+ */
+#define FR_BZ_EVQ_RPTR_REGP0_OFST 0x00000400
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_EVQ_RPTR_REGP0_STEP 8192
+#define FR_BZ_EVQ_RPTR_REGP0_ROWS 1024
+/*
+ * FR_AA_EVQ_RPTR_REG_KER(32bit):
+ * Event queue read pointer register
+ */
+#define FR_AA_EVQ_RPTR_REG_KER_OFST 0x00011b00
+/* falcona0=net_func_bar2 */
+#define FR_AA_EVQ_RPTR_REG_KER_STEP 4
+#define FR_AA_EVQ_RPTR_REG_KER_ROWS 4
+/*
+ * FR_AZ_EVQ_RPTR_REG(32bit):
+ * Event queue read pointer register
+ */
+#define FR_AZ_EVQ_RPTR_REG_OFST 0x00fa0000
+/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_EVQ_RPTR_REG_STEP 16
+#define FR_AB_EVQ_RPTR_REG_ROWS 4096
+#define FR_CZ_EVQ_RPTR_REG_ROWS 1024
+/*
+ * FR_BB_EVQ_RPTR_REGP123(32bit):
+ * Event queue read pointer register
+ */
+#define FR_BB_EVQ_RPTR_REGP123_OFST 0x01000400
+/* falconb0=net_func_bar2 */
+#define FR_BB_EVQ_RPTR_REGP123_STEP 8192
+#define FR_BB_EVQ_RPTR_REGP123_ROWS 3072
+
+#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define FRF_AZ_EVQ_RPTR_LBN 0
+#define FRF_AZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * FR_BZ_TIMER_COMMAND_REGP0(128bit):
+ * Timer Command Registers
+ */
+#define FR_BZ_TIMER_COMMAND_REGP0_OFST 0x00000420
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_TIMER_COMMAND_REGP0_STEP 8192
+#define FR_BZ_TIMER_COMMAND_REGP0_ROWS 1024
+/*
+ * FR_AA_TIMER_COMMAND_REG_KER(128bit):
+ * Timer Command Registers
+ */
+#define FR_AA_TIMER_COMMAND_REG_KER_OFST 0x00000420
+/* falcona0=net_func_bar2 */
+#define FR_AA_TIMER_COMMAND_REG_KER_STEP 8192
+#define FR_AA_TIMER_COMMAND_REG_KER_ROWS 4
+/*
+ * FR_AB_TIMER_COMMAND_REGP123(128bit):
+ * Timer Command Registers
+ */
+#define FR_AB_TIMER_COMMAND_REGP123_OFST 0x01000420
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TIMER_COMMAND_REGP123_STEP 8192
+#define FR_AB_TIMER_COMMAND_REGP123_ROWS 3072
+/*
+ * FR_AA_TIMER_COMMAND_REGP0(128bit):
+ * Timer Command Registers
+ */
+#define FR_AA_TIMER_COMMAND_REGP0_OFST 0x00008420
+/* falcona0=char_func_bar0 */
+#define FR_AA_TIMER_COMMAND_REGP0_STEP 8192
+#define FR_AA_TIMER_COMMAND_REGP0_ROWS 1020
+
+#define FRF_CZ_TC_TIMER_MODE_LBN 14
+#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define FRF_AB_TC_TIMER_MODE_LBN 12
+#define FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define FRF_CZ_TC_TIMER_VAL_LBN 0
+#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define FRF_AB_TC_TIMER_VAL_LBN 0
+#define FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+
+/*
+ * FR_AZ_DRV_EV_REG(128bit):
+ * Driver generated event register
+ */
+#define FR_AZ_DRV_EV_REG_OFST 0x00000440
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_DRV_EV_QID_LBN 64
+#define FRF_AZ_DRV_EV_QID_WIDTH 12
+#define FRF_AZ_DRV_EV_DATA_LBN 0
+#define FRF_AZ_DRV_EV_DATA_WIDTH 64
+#define FRF_AZ_DRV_EV_DATA_DW0_LBN 0
+#define FRF_AZ_DRV_EV_DATA_DW0_WIDTH 32
+#define FRF_AZ_DRV_EV_DATA_DW1_LBN 32
+#define FRF_AZ_DRV_EV_DATA_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_EVQ_CTL_REG(128bit):
+ * Event queue control register
+ */
+#define FR_AZ_EVQ_CTL_REG_OFST 0x00000450
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+
+/*
+ * FR_AZ_EVQ_CNT1_REG(128bit):
+ * Event counter 1 register
+ */
+#define FR_AZ_EVQ_CNT1_REG_OFST 0x00000460
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+
+/*
+ * FR_AZ_EVQ_CNT2_REG(128bit):
+ * Event counter 2 register
+ */
+#define FR_AZ_EVQ_CNT2_REG_OFST 0x00000470
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+
+/*
+ * FR_CZ_USR_EV_REG(32bit):
+ * Event mailbox register
+ */
+#define FR_CZ_USR_EV_REG_OFST 0x00000540
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_USR_EV_REG_STEP 8192
+#define FR_CZ_USR_EV_REG_ROWS 1024
+
+#define FRF_CZ_USR_EV_DATA_LBN 0
+#define FRF_CZ_USR_EV_DATA_WIDTH 32
+
+
+/*
+ * FR_AZ_BUF_TBL_CFG_REG(128bit):
+ * Buffer table configuration register
+ */
+#define FR_AZ_BUF_TBL_CFG_REG_OFST 0x00000600
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_BUF_TBL_MODE_LBN 3
+#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+
+/*
+ * FR_AZ_SRM_RX_DC_CFG_REG(128bit):
+ * SRAM receive descriptor cache configuration register
+ */
+#define FR_AZ_SRM_RX_DC_CFG_REG_OFST 0x00000610
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+
+/*
+ * FR_AZ_SRM_TX_DC_CFG_REG(128bit):
+ * SRAM transmit descriptor cache configuration register
+ */
+#define FR_AZ_SRM_TX_DC_CFG_REG_OFST 0x00000620
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+
+/*
+ * FR_AZ_SRM_CFG_REG(128bit):
+ * SRAM configuration register
+ */
+#define FR_AZ_SRM_CFG_REG_SF_OFST 0x00000380
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AZ_SRM_CFG_REG(128bit):
+ * SRAM configuration register
+ */
+#define FR_AZ_SRM_CFG_REG_OFST 0x00000630
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define FRF_AZ_SRM_INIT_EN_LBN 3
+#define FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define FRF_AZ_SRM_NUM_BANK_LBN 2
+#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+
+/*
+ * FR_AZ_BUF_TBL_UPD_REG(128bit):
+ * Buffer table update register
+ */
+#define FR_AZ_BUF_TBL_UPD_REG_OFST 0x00000650
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_BUF_UPD_CMD_LBN 63
+#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_CMD_LBN 62
+#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+
+/*
+ * FR_AZ_SRM_UPD_EVQ_REG(128bit):
+ * Buffer table update register
+ */
+#define FR_AZ_SRM_UPD_EVQ_REG_OFST 0x00000660
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_SRAM_PARITY_REG(128bit):
+ * SRAM parity register.
+ */
+#define FR_AZ_SRAM_PARITY_REG_OFST 0x00000670
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_BYPASS_ECC_LBN 3
+#define FRF_CZ_BYPASS_ECC_WIDTH 1
+#define FRF_CZ_SEC_INT_LBN 2
+#define FRF_CZ_SEC_INT_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+#define FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+
+
+/*
+ * FR_AZ_RX_CFG_REG(128bit):
+ * Receive configuration register
+ */
+#define FR_AZ_RX_CFG_REG_OFST 0x00000800
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define FRF_BZ_RX_TCP_SUP_LBN 48
+#define FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define FRF_BZ_RX_INGR_EN_LBN 47
+#define FRF_BZ_RX_INGR_EN_WIDTH 1
+#define FRF_BZ_RX_IP_HASH_LBN 46
+#define FRF_BZ_RX_IP_HASH_WIDTH 1
+#define FRF_BZ_RX_HASH_ALG_LBN 45
+#define FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define FRF_BZ_RX_XON_TX_TH_LBN 33
+#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_OWNERR_CTL_LBN 30
+#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_RX_XON_TX_TH_LBN 25
+#define FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XON_MAC_TH_LBN 6
+#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_RX_FILTER_CTL_REG(128bit):
+ * Receive filter control registers
+ */
+#define FR_AZ_RX_FILTER_CTL_REG_OFST 0x00000810
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define FRF_AZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define FRF_AZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_NUM_KER_LBN 24
+#define FRF_AZ_NUM_KER_WIDTH 2
+#define FRF_AZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define FRF_AZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define FRF_AZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define FRF_AZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+
+/*
+ * FR_AZ_RX_FLUSH_DESCQ_REG(128bit):
+ * Receive flush descriptor queue register
+ */
+#define FR_AZ_RX_FLUSH_DESCQ_REG_OFST 0x00000820
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+
+/*
+ * FR_BZ_RX_DESC_UPD_REGP0(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_BZ_RX_DESC_UPD_REGP0_OFST 0x00000830
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_RX_DESC_UPD_REGP0_STEP 8192
+#define FR_BZ_RX_DESC_UPD_REGP0_ROWS 1024
+/*
+ * FR_AA_RX_DESC_UPD_REG_KER(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AA_RX_DESC_UPD_REG_KER_OFST 0x00000830
+/* falcona0=net_func_bar2 */
+#define FR_AA_RX_DESC_UPD_REG_KER_STEP 8192
+#define FR_AA_RX_DESC_UPD_REG_KER_ROWS 4
+/*
+ * FR_AB_RX_DESC_UPD_REGP123(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AB_RX_DESC_UPD_REGP123_OFST 0x01000830
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_RX_DESC_UPD_REGP123_STEP 8192
+#define FR_AB_RX_DESC_UPD_REGP123_ROWS 3072
+/*
+ * FR_AA_RX_DESC_UPD_REGP0(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AA_RX_DESC_UPD_REGP0_OFST 0x00008830
+/* falcona0=char_func_bar0 */
+#define FR_AA_RX_DESC_UPD_REGP0_STEP 8192
+#define FR_AA_RX_DESC_UPD_REGP0_ROWS 1020
+
+#define FRF_AZ_RX_DESC_WPTR_LBN 96
+#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_RX_DESC_LBN 0
+#define FRF_AZ_RX_DESC_WIDTH 64
+#define FRF_AZ_RX_DESC_DW0_LBN 0
+#define FRF_AZ_RX_DESC_DW0_WIDTH 32
+#define FRF_AZ_RX_DESC_DW1_LBN 32
+#define FRF_AZ_RX_DESC_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_DC_CFG_REG(128bit):
+ * Receive descriptor cache configuration register
+ */
+#define FR_AZ_RX_DC_CFG_REG_OFST 0x00000840
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_MAX_PF_LBN 2
+#define FRF_AZ_RX_MAX_PF_WIDTH 2
+#define FRF_AZ_RX_DC_SIZE_LBN 0
+#define FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define FFE_AZ_RX_DC_SIZE_64 3
+#define FFE_AZ_RX_DC_SIZE_32 2
+#define FFE_AZ_RX_DC_SIZE_16 1
+#define FFE_AZ_RX_DC_SIZE_8 0
+
+
+/*
+ * FR_AZ_RX_DC_PF_WM_REG(128bit):
+ * Receive descriptor cache pre-fetch watermark register
+ */
+#define FR_AZ_RX_DC_PF_WM_REG_OFST 0x00000850
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+
+/*
+ * FR_BZ_RX_RSS_TKEY_REG(128bit):
+ * RSS Toeplitz hash key
+ */
+#define FR_BZ_RX_RSS_TKEY_REG_OFST 0x00000860
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_RX_RSS_TKEY_LBN 96
+#define FRF_BZ_RX_RSS_TKEY_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW3_LBN 96
+#define FRF_BZ_RX_RSS_TKEY_DW3_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW2_LBN 64
+#define FRF_BZ_RX_RSS_TKEY_DW2_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW1_LBN 32
+#define FRF_BZ_RX_RSS_TKEY_DW1_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW0_LBN 0
+#define FRF_BZ_RX_RSS_TKEY_DW0_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_NODESC_DROP_REG(128bit):
+ * Receive dropped packet counter register
+ */
+#define FR_AZ_RX_NODESC_DROP_REG_OFST 0x00000880
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_AZ_RX_NODESC_DROP_CNT_WIDTH 16
+
+
+/*
+ * FR_AZ_RX_SELF_RST_REG(128bit):
+ * Receive self reset register
+ */
+#define FR_AZ_RX_SELF_RST_REG_OFST 0x00000890
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_ISCSI_DIS_LBN 17
+#define FRF_AZ_RX_ISCSI_DIS_WIDTH 1
+#define FRF_AB_RX_SW_RST_REG_LBN 16
+#define FRF_AB_RX_SW_RST_REG_WIDTH 1
+#define FRF_AB_RX_SELF_RST_EN_LBN 8
+#define FRF_AB_RX_SELF_RST_EN_WIDTH 1
+#define FRF_AZ_RX_MAX_PF_LAT_LBN 4
+#define FRF_AZ_RX_MAX_PF_LAT_WIDTH 4
+#define FRF_AZ_RX_MAX_LU_LAT_LBN 0
+#define FRF_AZ_RX_MAX_LU_LAT_WIDTH 4
+
+
+/*
+ * FR_AZ_RX_DEBUG_REG(128bit):
+ * undocumented register
+ */
+#define FR_AZ_RX_DEBUG_REG_OFST 0x000008a0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_DEBUG_LBN 0
+#define FRF_AZ_RX_DEBUG_WIDTH 64
+#define FRF_AZ_RX_DEBUG_DW0_LBN 0
+#define FRF_AZ_RX_DEBUG_DW0_WIDTH 32
+#define FRF_AZ_RX_DEBUG_DW1_LBN 32
+#define FRF_AZ_RX_DEBUG_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_PUSH_DROP_REG(128bit):
+ * Receive descriptor push dropped counter register
+ */
+#define FR_AZ_RX_PUSH_DROP_REG_OFST 0x000008b0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG1(128bit):
+ * IPv6 RSS Toeplitz hash key low bytes
+ */
+#define FR_CZ_RX_RSS_IPV6_REG1_OFST 0x000008d0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_LBN 96
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG2(128bit):
+ * IPv6 RSS Toeplitz hash key middle bytes
+ */
+#define FR_CZ_RX_RSS_IPV6_REG2_OFST 0x000008e0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_LBN 96
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG3(128bit):
+ * IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings
+ */
+#define FR_CZ_RX_RSS_IPV6_REG3_OFST 0x000008f0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_FLUSH_DESCQ_REG(128bit):
+ * Transmit flush descriptor queue register
+ */
+#define FR_AZ_TX_FLUSH_DESCQ_REG_OFST 0x00000a00
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+
+/*
+ * FR_BZ_TX_DESC_UPD_REGP0(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_BZ_TX_DESC_UPD_REGP0_OFST 0x00000a10
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_TX_DESC_UPD_REGP0_STEP 8192
+#define FR_BZ_TX_DESC_UPD_REGP0_ROWS 1024
+/*
+ * FR_AA_TX_DESC_UPD_REG_KER(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AA_TX_DESC_UPD_REG_KER_OFST 0x00000a10
+/* falcona0=net_func_bar2 */
+#define FR_AA_TX_DESC_UPD_REG_KER_STEP 8192
+#define FR_AA_TX_DESC_UPD_REG_KER_ROWS 8
+/*
+ * FR_AB_TX_DESC_UPD_REGP123(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AB_TX_DESC_UPD_REGP123_OFST 0x01000a10
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TX_DESC_UPD_REGP123_STEP 8192
+#define FR_AB_TX_DESC_UPD_REGP123_ROWS 3072
+/*
+ * FR_AA_TX_DESC_UPD_REGP0(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AA_TX_DESC_UPD_REGP0_OFST 0x00008a10
+/* falcona0=char_func_bar0 */
+#define FR_AA_TX_DESC_UPD_REGP0_STEP 8192
+#define FR_AA_TX_DESC_UPD_REGP0_ROWS 1020
+
+#define FRF_AZ_TX_DESC_WPTR_LBN 96
+#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_TX_DESC_LBN 0
+#define FRF_AZ_TX_DESC_WIDTH 95
+#define FRF_AZ_TX_DESC_DW0_LBN 0
+#define FRF_AZ_TX_DESC_DW0_WIDTH 32
+#define FRF_AZ_TX_DESC_DW1_LBN 32
+#define FRF_AZ_TX_DESC_DW1_WIDTH 32
+#define FRF_AZ_TX_DESC_DW2_LBN 64
+#define FRF_AZ_TX_DESC_DW2_WIDTH 31
+
+
+/*
+ * FR_AZ_TX_DC_CFG_REG(128bit):
+ * Transmit descriptor cache configuration register
+ */
+#define FR_AZ_TX_DC_CFG_REG_OFST 0x00000a20
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_DC_SIZE_LBN 0
+#define FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define FFE_AZ_TX_DC_SIZE_32 2
+#define FFE_AZ_TX_DC_SIZE_16 1
+#define FFE_AZ_TX_DC_SIZE_8 0
+
+
+/*
+ * FR_AA_TX_CHKSM_CFG_REG(128bit):
+ * Transmit checksum configuration register
+ */
+#define FR_AA_TX_CHKSM_CFG_REG_OFST 0x00000a30
+/* falcona0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_CFG_REG(128bit):
+ * Transmit configuration register
+ */
+#define FR_AZ_TX_CFG_REG_OFST 0x00000a50
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_TX_PUSH_DROP_REG(128bit):
+ * Transmit push dropped register
+ */
+#define FR_AZ_TX_PUSH_DROP_REG_OFST 0x00000a60
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_RESERVED_REG(128bit):
+ * Transmit configuration register
+ */
+#define FR_AZ_TX_RESERVED_REG_OFST 0x00000a80
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_EVT_CNT_LBN 121
+#define FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define FRF_AZ_TX_PUSH_EN_LBN 89
+#define FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define FRF_AZ_TX_DMAQ_ST_LBN 78
+#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_LBN 64
+#define FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define FRF_AZ_TX_XP_TIMER_LBN 52
+#define FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define FRF_AZ_TX_PREF_SPACER_LBN 44
+#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define FRF_AZ_TX_ONLY1TAG_LBN 21
+#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define FRF_AA_TX_DMA_FF_THR_LBN 16
+#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define FRF_AZ_TX_DMA_SPACER_LBN 8
+#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define FRF_AA_TX_TCP_DIS_LBN 7
+#define FRF_AA_TX_TCP_DIS_WIDTH 1
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define FRF_AA_TX_IP_DIS_LBN 6
+#define FRF_AA_TX_IP_DIS_WIDTH 1
+#define FRF_AZ_TX_MAX_CPL_LBN 2
+#define FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define FFE_AZ_TX_MAX_CPL_16 3
+#define FFE_AZ_TX_MAX_CPL_8 2
+#define FFE_AZ_TX_MAX_CPL_4 1
+#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define FRF_AZ_TX_MAX_PREF_LBN 0
+#define FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define FFE_AZ_TX_MAX_PREF_32 3
+#define FFE_AZ_TX_MAX_PREF_16 2
+#define FFE_AZ_TX_MAX_PREF_8 1
+#define FFE_AZ_TX_MAX_PREF_OFF 0
+
+
+/*
+ * FR_BZ_TX_PACE_REG(128bit):
+ * Transmit pace control register
+ */
+#define FR_BZ_TX_PACE_REG_OFST 0x00000a90
+/* falconb0,sienaa0=net_func_bar2 */
+/*
+ * FR_AA_TX_PACE_REG(128bit):
+ * Transmit pace control register
+ */
+#define FR_AA_TX_PACE_REG_OFST 0x00f80000
+/* falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PACE_SB_NOT_AF_LBN 19
+#define FRF_AZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define FRF_AZ_TX_PACE_SB_AF_LBN 9
+#define FRF_AZ_TX_PACE_SB_AF_WIDTH 10
+#define FRF_AZ_TX_PACE_FB_BASE_LBN 5
+#define FRF_AZ_TX_PACE_FB_BASE_WIDTH 4
+#define FRF_AZ_TX_PACE_BIN_TH_LBN 0
+#define FRF_AZ_TX_PACE_BIN_TH_WIDTH 5
+
+
+/*
+ * FR_AZ_TX_PACE_DROP_QID_REG(128bit):
+ * PACE Drop QID Counter
+ */
+#define FR_AZ_TX_PACE_DROP_QID_REG_OFST 0x00000aa0
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define FRF_AZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+
+/*
+ * FR_AB_TX_VLAN_REG(128bit):
+ * Transmit VLAN tag register
+ */
+#define FR_AB_TX_VLAN_REG_OFST 0x00000ae0
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_TX_VLAN_EN_LBN 127
+#define FRF_AB_TX_VLAN_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_PORT1_EN_LBN 125
+#define FRF_AB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_PORT0_EN_LBN 124
+#define FRF_AB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_LBN 112
+#define FRF_AB_TX_VLAN7_WIDTH 12
+#define FRF_AB_TX_VLAN6_PORT1_EN_LBN 109
+#define FRF_AB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN6_PORT0_EN_LBN 108
+#define FRF_AB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN6_LBN 96
+#define FRF_AB_TX_VLAN6_WIDTH 12
+#define FRF_AB_TX_VLAN5_PORT1_EN_LBN 93
+#define FRF_AB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN5_PORT0_EN_LBN 92
+#define FRF_AB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN5_LBN 80
+#define FRF_AB_TX_VLAN5_WIDTH 12
+#define FRF_AB_TX_VLAN4_PORT1_EN_LBN 77
+#define FRF_AB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN4_PORT0_EN_LBN 76
+#define FRF_AB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN4_LBN 64
+#define FRF_AB_TX_VLAN4_WIDTH 12
+#define FRF_AB_TX_VLAN3_PORT1_EN_LBN 61
+#define FRF_AB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN3_PORT0_EN_LBN 60
+#define FRF_AB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN3_LBN 48
+#define FRF_AB_TX_VLAN3_WIDTH 12
+#define FRF_AB_TX_VLAN2_PORT1_EN_LBN 45
+#define FRF_AB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN2_PORT0_EN_LBN 44
+#define FRF_AB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN2_LBN 32
+#define FRF_AB_TX_VLAN2_WIDTH 12
+#define FRF_AB_TX_VLAN1_PORT1_EN_LBN 29
+#define FRF_AB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN1_PORT0_EN_LBN 28
+#define FRF_AB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN1_LBN 16
+#define FRF_AB_TX_VLAN1_WIDTH 12
+#define FRF_AB_TX_VLAN0_PORT1_EN_LBN 13
+#define FRF_AB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN0_PORT0_EN_LBN 12
+#define FRF_AB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN0_LBN 0
+#define FRF_AB_TX_VLAN0_WIDTH 12
+
+
+/*
+ * FR_AZ_TX_IPFIL_PORTEN_REG(128bit):
+ * Transmit filter control register
+ */
+#define FR_AZ_TX_IPFIL_PORTEN_REG_OFST 0x00000af0
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_MADR0_FIL_EN_LBN 64
+#define FRF_AZ_TX_MADR0_FIL_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL31_PORT_EN_LBN 62
+#define FRF_AB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL30_PORT_EN_LBN 60
+#define FRF_AB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL29_PORT_EN_LBN 58
+#define FRF_AB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL28_PORT_EN_LBN 56
+#define FRF_AB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL27_PORT_EN_LBN 54
+#define FRF_AB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL26_PORT_EN_LBN 52
+#define FRF_AB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL25_PORT_EN_LBN 50
+#define FRF_AB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL24_PORT_EN_LBN 48
+#define FRF_AB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL23_PORT_EN_LBN 46
+#define FRF_AB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL22_PORT_EN_LBN 44
+#define FRF_AB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL21_PORT_EN_LBN 42
+#define FRF_AB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL20_PORT_EN_LBN 40
+#define FRF_AB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL19_PORT_EN_LBN 38
+#define FRF_AB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL18_PORT_EN_LBN 36
+#define FRF_AB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL17_PORT_EN_LBN 34
+#define FRF_AB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL16_PORT_EN_LBN 32
+#define FRF_AB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL15_PORT_EN_LBN 30
+#define FRF_AB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL14_PORT_EN_LBN 28
+#define FRF_AB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL13_PORT_EN_LBN 26
+#define FRF_AB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL12_PORT_EN_LBN 24
+#define FRF_AB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL11_PORT_EN_LBN 22
+#define FRF_AB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL10_PORT_EN_LBN 20
+#define FRF_AB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL9_PORT_EN_LBN 18
+#define FRF_AB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL8_PORT_EN_LBN 16
+#define FRF_AB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL7_PORT_EN_LBN 14
+#define FRF_AB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL6_PORT_EN_LBN 12
+#define FRF_AB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL5_PORT_EN_LBN 10
+#define FRF_AB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL4_PORT_EN_LBN 8
+#define FRF_AB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL3_PORT_EN_LBN 6
+#define FRF_AB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL2_PORT_EN_LBN 4
+#define FRF_AB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL1_PORT_EN_LBN 2
+#define FRF_AB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL0_PORT_EN_LBN 0
+#define FRF_AB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+
+/*
+ * FR_AB_TX_IPFIL_TBL(128bit):
+ * Transmit IP source address filter table
+ */
+#define FR_AB_TX_IPFIL_TBL_OFST 0x00000b00
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TX_IPFIL_TBL_STEP 16
+#define FR_AB_TX_IPFIL_TBL_ROWS 16
+
+#define FRF_AB_TX_IPFIL_MASK_1_LBN 96
+#define FRF_AB_TX_IPFIL_MASK_1_WIDTH 32
+#define FRF_AB_TX_IP_SRC_ADR_1_LBN 64
+#define FRF_AB_TX_IP_SRC_ADR_1_WIDTH 32
+#define FRF_AB_TX_IPFIL_MASK_0_LBN 32
+#define FRF_AB_TX_IPFIL_MASK_0_WIDTH 32
+#define FRF_AB_TX_IP_SRC_ADR_0_LBN 0
+#define FRF_AB_TX_IP_SRC_ADR_0_WIDTH 32
+
+
+/*
+ * FR_AB_MD_TXD_REG(128bit):
+ * PHY management transmit data register
+ */
+#define FR_AB_MD_TXD_REG_OFST 0x00000c00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_TXD_LBN 0
+#define FRF_AB_MD_TXD_WIDTH 16
+
+
+/*
+ * FR_AB_MD_RXD_REG(128bit):
+ * PHY management receive data register
+ */
+#define FR_AB_MD_RXD_REG_OFST 0x00000c10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_RXD_LBN 0
+#define FRF_AB_MD_RXD_WIDTH 16
+
+
+/*
+ * FR_AB_MD_CS_REG(128bit):
+ * PHY management configuration & status register
+ */
+#define FR_AB_MD_CS_REG_OFST 0x00000c20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_RD_EN_LBN 15
+#define FRF_AB_MD_RD_EN_WIDTH 1
+#define FRF_AB_MD_WR_EN_LBN 14
+#define FRF_AB_MD_WR_EN_WIDTH 1
+#define FRF_AB_MD_ADDR_CMD_LBN 13
+#define FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define FRF_AB_MD_PT_LBN 7
+#define FRF_AB_MD_PT_WIDTH 3
+#define FRF_AB_MD_PL_LBN 6
+#define FRF_AB_MD_PL_WIDTH 1
+#define FRF_AB_MD_INT_CLR_LBN 5
+#define FRF_AB_MD_INT_CLR_WIDTH 1
+#define FRF_AB_MD_GC_LBN 4
+#define FRF_AB_MD_GC_WIDTH 1
+#define FRF_AB_MD_PRSP_LBN 3
+#define FRF_AB_MD_PRSP_WIDTH 1
+#define FRF_AB_MD_RIC_LBN 2
+#define FRF_AB_MD_RIC_WIDTH 1
+#define FRF_AB_MD_RDC_LBN 1
+#define FRF_AB_MD_RDC_WIDTH 1
+#define FRF_AB_MD_WRC_LBN 0
+#define FRF_AB_MD_WRC_WIDTH 1
+
+
+/*
+ * FR_AB_MD_PHY_ADR_REG(128bit):
+ * PHY management PHY address register
+ */
+#define FR_AB_MD_PHY_ADR_REG_OFST 0x00000c30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PHY_ADR_LBN 0
+#define FRF_AB_MD_PHY_ADR_WIDTH 16
+
+
+/*
+ * FR_AB_MD_ID_REG(128bit):
+ * PHY management ID register
+ */
+#define FR_AB_MD_ID_REG_OFST 0x00000c40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PRT_ADR_LBN 11
+#define FRF_AB_MD_PRT_ADR_WIDTH 5
+#define FRF_AB_MD_DEV_ADR_LBN 6
+#define FRF_AB_MD_DEV_ADR_WIDTH 5
+
+
+/*
+ * FR_AB_MD_STAT_REG(128bit):
+ * PHY management status & mask register
+ */
+#define FR_AB_MD_STAT_REG_OFST 0x00000c50
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PINT_LBN 4
+#define FRF_AB_MD_PINT_WIDTH 1
+#define FRF_AB_MD_DONE_LBN 3
+#define FRF_AB_MD_DONE_WIDTH 1
+#define FRF_AB_MD_BSERR_LBN 2
+#define FRF_AB_MD_BSERR_WIDTH 1
+#define FRF_AB_MD_LNFL_LBN 1
+#define FRF_AB_MD_LNFL_WIDTH 1
+#define FRF_AB_MD_BSY_LBN 0
+#define FRF_AB_MD_BSY_WIDTH 1
+
+
+/*
+ * FR_AB_MAC_STAT_DMA_REG(128bit):
+ * Port MAC statistical counter DMA register
+ */
+#define FR_AB_MAC_STAT_DMA_REG_OFST 0x00000c60
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+#define FRF_AB_MAC_STAT_DMA_ADR_DW0_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_DW0_WIDTH 32
+#define FRF_AB_MAC_STAT_DMA_ADR_DW1_LBN 32
+#define FRF_AB_MAC_STAT_DMA_ADR_DW1_WIDTH 16
+
+
+/*
+ * FR_AB_MAC_CTRL_REG(128bit):
+ * Port MAC control register
+ */
+#define FR_AB_MAC_CTRL_REG_OFST 0x00000c80
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_XOFF_VAL_LBN 16
+#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define FRF_AB_MAC_UC_PROM_LBN 3
+#define FRF_AB_MAC_UC_PROM_WIDTH 1
+#define FRF_AB_MAC_LINK_STATUS_LBN 2
+#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define FRF_AB_MAC_SPEED_LBN 0
+#define FRF_AB_MAC_SPEED_WIDTH 2
+#define FRF_AB_MAC_SPEED_10M 0
+#define FRF_AB_MAC_SPEED_100M 1
+#define FRF_AB_MAC_SPEED_1G 2
+#define FRF_AB_MAC_SPEED_10G 3
+
+/*
+ * FR_BB_GEN_MODE_REG(128bit):
+ * General Purpose mode register (external interrupt mask)
+ */
+#define FR_BB_GEN_MODE_REG_OFST 0x00000c90
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+
+/*
+ * FR_AB_MAC_MC_HASH_REG0(128bit):
+ * Multicast address hash table
+ */
+#define FR_AB_MAC_MC_HASH0_REG_OFST 0x00000ca0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+#define FRF_AB_MAC_MCAST_HASH0_DW0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_DW0_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW1_LBN 32
+#define FRF_AB_MAC_MCAST_HASH0_DW1_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW2_LBN 64
+#define FRF_AB_MAC_MCAST_HASH0_DW2_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW3_LBN 96
+#define FRF_AB_MAC_MCAST_HASH0_DW3_WIDTH 32
+
+
+/*
+ * FR_AB_MAC_MC_HASH_REG1(128bit):
+ * Multicast address hash table
+ */
+#define FR_AB_MAC_MC_HASH1_REG_OFST 0x00000cb0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+#define FRF_AB_MAC_MCAST_HASH1_DW0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_DW0_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW1_LBN 32
+#define FRF_AB_MAC_MCAST_HASH1_DW1_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW2_LBN 64
+#define FRF_AB_MAC_MCAST_HASH1_DW2_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW3_LBN 96
+#define FRF_AB_MAC_MCAST_HASH1_DW3_WIDTH 32
+
+
+/*
+ * FR_AB_GM_CFG1_REG(32bit):
+ * GMAC configuration register 1
+ */
+#define FR_AB_GM_CFG1_REG_OFST 0x00000e00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_SW_RST_LBN 31
+#define FRF_AB_GM_SW_RST_WIDTH 1
+#define FRF_AB_GM_SIM_RST_LBN 30
+#define FRF_AB_GM_SIM_RST_WIDTH 1
+#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define FRF_AB_GM_LOOP_LBN 8
+#define FRF_AB_GM_LOOP_WIDTH 1
+#define FRF_AB_GM_RX_FC_EN_LBN 5
+#define FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define FRF_AB_GM_TX_FC_EN_LBN 4
+#define FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_RXEN_LBN 3
+#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define FRF_AB_GM_RX_EN_LBN 2
+#define FRF_AB_GM_RX_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_TXEN_LBN 1
+#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define FRF_AB_GM_TX_EN_LBN 0
+#define FRF_AB_GM_TX_EN_WIDTH 1
+
+
+/*
+ * FR_AB_GM_CFG2_REG(32bit):
+ * GMAC configuration register 2
+ */
+#define FR_AB_GM_CFG2_REG_OFST 0x00000e10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_PAMBL_LEN_LBN 12
+#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define FRF_AB_GM_IF_MODE_LBN 8
+#define FRF_AB_GM_IF_MODE_WIDTH 2
+#define FRF_AB_GM_IF_MODE_BYTE_MODE 2
+#define FRF_AB_GM_IF_MODE_NIBBLE_MODE 1
+#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define FRF_AB_GM_LEN_CHK_LBN 4
+#define FRF_AB_GM_LEN_CHK_WIDTH 1
+#define FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define FRF_AB_GM_CRC_EN_LBN 1
+#define FRF_AB_GM_CRC_EN_WIDTH 1
+#define FRF_AB_GM_FD_LBN 0
+#define FRF_AB_GM_FD_WIDTH 1
+
+
+/*
+ * FR_AB_GM_IPG_REG(32bit):
+ * GMAC IPG register
+ */
+#define FR_AB_GM_IPG_REG_OFST 0x00000e20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define FRF_AB_GM_B2B_IPG_LBN 0
+#define FRF_AB_GM_B2B_IPG_WIDTH 7
+
+
+/*
+ * FR_AB_GM_HD_REG(32bit):
+ * GMAC half duplex register
+ */
+#define FR_AB_GM_HD_REG_OFST 0x00000e30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define FRF_AB_GM_DIS_BOFF_LBN 17
+#define FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define FRF_AB_GM_COL_WIN_LBN 0
+#define FRF_AB_GM_COL_WIN_WIDTH 10
+
+
+/*
+ * FR_AB_GM_MAX_FLEN_REG(32bit):
+ * GMAC maximum frame length register
+ */
+#define FR_AB_GM_MAX_FLEN_REG_OFST 0x00000e40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_MAX_FLEN_LBN 0
+#define FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+
+/*
+ * FR_AB_GM_TEST_REG(32bit):
+ * GMAC test register
+ */
+#define FR_AB_GM_TEST_REG_OFST 0x00000e70
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_MAX_BOFF_LBN 3
+#define FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define FRF_AB_GM_TEST_PAUSE_LBN 1
+#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define FRF_AB_GM_SHORT_SLOT_LBN 0
+#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+
+/*
+ * FR_AB_GM_ADR1_REG(32bit):
+ * GMAC station address register 1
+ */
+#define FR_AB_GM_ADR1_REG_OFST 0x00000f00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ADR_B0_LBN 24
+#define FRF_AB_GM_ADR_B0_WIDTH 8
+#define FRF_AB_GM_ADR_B1_LBN 16
+#define FRF_AB_GM_ADR_B1_WIDTH 8
+#define FRF_AB_GM_ADR_B2_LBN 8
+#define FRF_AB_GM_ADR_B2_WIDTH 8
+#define FRF_AB_GM_ADR_B3_LBN 0
+#define FRF_AB_GM_ADR_B3_WIDTH 8
+
+
+/*
+ * FR_AB_GM_ADR2_REG(32bit):
+ * GMAC station address register 2
+ */
+#define FR_AB_GM_ADR2_REG_OFST 0x00000f10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ADR_B4_LBN 24
+#define FRF_AB_GM_ADR_B4_WIDTH 8
+#define FRF_AB_GM_ADR_B5_LBN 16
+#define FRF_AB_GM_ADR_B5_WIDTH 8
+
+
+/*
+ * FR_AB_GMF_CFG0_REG(32bit):
+ * GMAC FIFO configuration register 0
+ */
+#define FR_AB_GMF_CFG0_REG_OFST 0x00000f20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_FTFENRPLY_LBN 20
+#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define FRF_AB_GMF_STFENRPLY_LBN 19
+#define FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define FRF_AB_GMF_FRFENRPLY_LBN 18
+#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_SRFENRPLY_LBN 17
+#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_WTMENRPLY_LBN 16
+#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define FRF_AB_GMF_FTFENREQ_LBN 12
+#define FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define FRF_AB_GMF_STFENREQ_LBN 11
+#define FRF_AB_GMF_STFENREQ_WIDTH 1
+#define FRF_AB_GMF_FRFENREQ_LBN 10
+#define FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define FRF_AB_GMF_SRFENREQ_LBN 9
+#define FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define FRF_AB_GMF_WTMENREQ_LBN 8
+#define FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFT_LBN 4
+#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define FRF_AB_GMF_HSTRSTST_LBN 3
+#define FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFR_LBN 2
+#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTSR_LBN 1
+#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTWT_LBN 0
+#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+
+/*
+ * FR_AB_GMF_CFG1_REG(32bit):
+ * GMAC FIFO configuration register 1
+ */
+#define FR_AB_GMF_CFG1_REG_OFST 0x00000f30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGFRTH_LBN 16
+#define FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+
+/*
+ * FR_AB_GMF_CFG2_REG(32bit):
+ * GMAC FIFO configuration register 2
+ */
+#define FR_AB_GMF_CFG2_REG_OFST 0x00000f40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHWM_LBN 16
+#define FRF_AB_GMF_CFGHWM_WIDTH 6
+#define FRF_AB_GMF_CFGLWM_LBN 0
+#define FRF_AB_GMF_CFGLWM_WIDTH 6
+
+
+/*
+ * FR_AB_GMF_CFG3_REG(32bit):
+ * GMAC FIFO configuration register 3
+ */
+#define FR_AB_GMF_CFG3_REG_OFST 0x00000f50
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHWMFT_LBN 16
+#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define FRF_AB_GMF_CFGFTTH_LBN 0
+#define FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+
+/*
+ * FR_AB_GMF_CFG4_REG(32bit):
+ * GMAC FIFO configuration register 4
+ */
+#define FR_AB_GMF_CFG4_REG_OFST 0x00000f60
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+
+/*
+ * FR_AB_GMF_CFG5_REG(32bit):
+ * GMAC FIFO configuration register 5
+ */
+#define FR_AB_GMF_CFG5_REG_OFST 0x00000f70
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHDPLX_LBN 22
+#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define FRF_AB_GMF_SRFULL_LBN 21
+#define FRF_AB_GMF_SRFULL_WIDTH 1
+#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+
+/*
+ * FR_BB_TX_SRC_MAC_TBL(128bit):
+ * Transmit IP source address filter table
+ */
+#define FR_BB_TX_SRC_MAC_TBL_OFST 0x00001000
+/* falconb0=net_func_bar2 */
+#define FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
+
+#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_WIDTH 32
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_LBN 96
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_WIDTH 16
+#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_WIDTH 32
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_LBN 32
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_WIDTH 16
+
+
+/*
+ * FR_BB_TX_SRC_MAC_CTL_REG(128bit):
+ * Transmit MAC source address filter control
+ */
+#define FR_BB_TX_SRC_MAC_CTL_REG_OFST 0x00001100
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+
+/*
+ * FR_AB_XM_ADR_LO_REG(128bit):
+ * XGMAC address register low
+ */
+#define FR_AB_XM_ADR_LO_REG_OFST 0x00001200
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_ADR_LO_LBN 0
+#define FRF_AB_XM_ADR_LO_WIDTH 32
+
+
+/*
+ * FR_AB_XM_ADR_HI_REG(128bit):
+ * XGMAC address register high
+ */
+#define FR_AB_XM_ADR_HI_REG_OFST 0x00001210
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_ADR_HI_LBN 0
+#define FRF_AB_XM_ADR_HI_WIDTH 16
+
+
+/*
+ * FR_AB_XM_GLB_CFG_REG(128bit):
+ * XGMAC global configuration
+ */
+#define FR_AB_XM_GLB_CFG_REG_OFST 0x00001220
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define FRF_AB_XM_DEBUG_MODE_LBN 16
+#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define FRF_AB_XM_RX_STAT_EN_LBN 11
+#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_TX_STAT_EN_LBN 10
+#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_WAN_MODE_LBN 5
+#define FRF_AB_XM_WAN_MODE_WIDTH 1
+#define FRF_AB_XM_INTCLR_MODE_LBN 3
+#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define FRF_AB_XM_CORE_RST_LBN 0
+#define FRF_AB_XM_CORE_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_TX_CFG_REG(128bit):
+ * XGMAC transmit configuration
+ */
+#define FR_AB_XM_TX_CFG_REG_OFST 0x00001230
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_PROG_LBN 24
+#define FRF_AB_XM_TX_PROG_WIDTH 1
+#define FRF_AB_XM_IPG_LBN 16
+#define FRF_AB_XM_IPG_WIDTH 4
+#define FRF_AB_XM_FCNTL_LBN 10
+#define FRF_AB_XM_FCNTL_WIDTH 1
+#define FRF_AB_XM_TXCRC_LBN 8
+#define FRF_AB_XM_TXCRC_WIDTH 1
+#define FRF_AB_XM_EDRC_LBN 6
+#define FRF_AB_XM_EDRC_WIDTH 1
+#define FRF_AB_XM_AUTO_PAD_LBN 5
+#define FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define FRF_AB_XM_TX_PRMBL_LBN 2
+#define FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define FRF_AB_XM_TXEN_LBN 1
+#define FRF_AB_XM_TXEN_WIDTH 1
+#define FRF_AB_XM_TX_RST_LBN 0
+#define FRF_AB_XM_TX_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_RX_CFG_REG(128bit):
+ * XGMAC receive configuration
+ */
+#define FR_AB_XM_RX_CFG_REG_OFST 0x00001240
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_PASS_LENERR_LBN 26
+#define FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_REJ_BCAST_LBN 20
+#define FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define FRF_AB_XM_RXCRC_LBN 3
+#define FRF_AB_XM_RXCRC_WIDTH 1
+#define FRF_AB_XM_RX_PRMBL_LBN 2
+#define FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define FRF_AB_XM_RXEN_LBN 1
+#define FRF_AB_XM_RXEN_WIDTH 1
+#define FRF_AB_XM_RX_RST_LBN 0
+#define FRF_AB_XM_RX_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_MGT_INT_MASK(128bit):
+ * documentation to be written for sum_XM_MGT_INT_MASK
+ */
+#define FR_AB_XM_MGT_INT_MASK_OFST 0x00001250
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+
+/*
+ * FR_AB_XM_FC_REG(128bit):
+ * XGMAC flow control register
+ */
+#define FR_AB_XM_FC_REG_OFST 0x00001270
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_PAUSE_TIME_LBN 16
+#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_MCNTL_PASS_LBN 8
+#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define FRF_AB_XM_ZPAUSE_LBN 2
+#define FRF_AB_XM_ZPAUSE_WIDTH 1
+#define FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define FRF_AB_XM_DIS_FCNTL_LBN 0
+#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+
+/*
+ * FR_AB_XM_PAUSE_TIME_REG(128bit):
+ * XGMAC pause time register
+ */
+#define FR_AB_XM_PAUSE_TIME_REG_OFST 0x00001290
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+
+/*
+ * FR_AB_XM_TX_PARAM_REG(128bit):
+ * XGMAC transmit parameter register
+ */
+#define FR_AB_XM_TX_PARAM_REG_OFST 0x000012d0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define FRF_AB_XM_PAD_CHAR_LBN 0
+#define FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+
+/*
+ * FR_AB_XM_RX_PARAM_REG(128bit):
+ * XGMAC receive parameter register
+ */
+#define FR_AB_XM_RX_PARAM_REG_OFST 0x000012e0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+
+/*
+ * FR_AB_XM_MGT_INT_MSK_REG(128bit):
+ * XGMAC management interrupt mask register
+ */
+#define FR_AB_XM_MGT_INT_REG_OFST 0x000012f0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_RMTFLT_LBN 1
+#define FRF_AB_XM_RMTFLT_WIDTH 1
+#define FRF_AB_XM_LCLFLT_LBN 0
+#define FRF_AB_XM_LCLFLT_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PWR_RST_REG(128bit):
+ * XGXS/XAUI powerdown/reset register
+ */
+#define FR_AB_XX_PWR_RST_REG_OFST 0x00001300
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_PWRDND_SIG_LBN 31
+#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define FRF_AB_XX_SIM_MODE_LBN 27
+#define FRF_AB_XX_SIM_MODE_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETD_SIG_LBN 23
+#define FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define FRF_AB_XX_RESETC_SIG_LBN 22
+#define FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define FRF_AB_XX_RESETB_SIG_LBN 21
+#define FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETA_SIG_LBN 20
+#define FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define FRF_AB_XX_SD_RST_ACT_LBN 16
+#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define FRF_AB_XX_PWRDND_EN_LBN 15
+#define FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNC_EN_LBN 14
+#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNB_EN_LBN 13
+#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNA_EN_LBN 12
+#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define FRF_AB_XX_RESETD_EN_LBN 7
+#define FRF_AB_XX_RESETD_EN_WIDTH 1
+#define FRF_AB_XX_RESETC_EN_LBN 6
+#define FRF_AB_XX_RESETC_EN_WIDTH 1
+#define FRF_AB_XX_RESETB_EN_LBN 5
+#define FRF_AB_XX_RESETB_EN_WIDTH 1
+#define FRF_AB_XX_RESETA_EN_LBN 4
+#define FRF_AB_XX_RESETA_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define FRF_AB_XX_RST_XX_EN_LBN 0
+#define FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+
+/*
+ * FR_AB_XX_SD_CTL_REG(128bit):
+ * XGXS/XAUI powerdown/reset control register
+ */
+#define FR_AB_XX_SD_CTL_REG_OFST 0x00001310
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_TERMADJ1_LBN 17
+#define FRF_AB_XX_TERMADJ1_WIDTH 1
+#define FRF_AB_XX_TERMADJ0_LBN 16
+#define FRF_AB_XX_TERMADJ0_WIDTH 1
+#define FRF_AB_XX_HIDRVD_LBN 15
+#define FRF_AB_XX_HIDRVD_WIDTH 1
+#define FRF_AB_XX_LODRVD_LBN 14
+#define FRF_AB_XX_LODRVD_WIDTH 1
+#define FRF_AB_XX_HIDRVC_LBN 13
+#define FRF_AB_XX_HIDRVC_WIDTH 1
+#define FRF_AB_XX_LODRVC_LBN 12
+#define FRF_AB_XX_LODRVC_WIDTH 1
+#define FRF_AB_XX_HIDRVB_LBN 11
+#define FRF_AB_XX_HIDRVB_WIDTH 1
+#define FRF_AB_XX_LODRVB_LBN 10
+#define FRF_AB_XX_LODRVB_WIDTH 1
+#define FRF_AB_XX_HIDRVA_LBN 9
+#define FRF_AB_XX_HIDRVA_WIDTH 1
+#define FRF_AB_XX_LODRVA_LBN 8
+#define FRF_AB_XX_LODRVA_WIDTH 1
+#define FRF_AB_XX_LPBKD_LBN 3
+#define FRF_AB_XX_LPBKD_WIDTH 1
+#define FRF_AB_XX_LPBKC_LBN 2
+#define FRF_AB_XX_LPBKC_WIDTH 1
+#define FRF_AB_XX_LPBKB_LBN 1
+#define FRF_AB_XX_LPBKB_WIDTH 1
+#define FRF_AB_XX_LPBKA_LBN 0
+#define FRF_AB_XX_LPBKA_WIDTH 1
+
+
+/*
+ * FR_AB_XX_TXDRV_CTL_REG(128bit):
+ * XAUI SerDes transmit drive control register
+ */
+#define FR_AB_XX_TXDRV_CTL_REG_OFST 0x00001320
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_DEQD_LBN 28
+#define FRF_AB_XX_DEQD_WIDTH 4
+#define FRF_AB_XX_DEQC_LBN 24
+#define FRF_AB_XX_DEQC_WIDTH 4
+#define FRF_AB_XX_DEQB_LBN 20
+#define FRF_AB_XX_DEQB_WIDTH 4
+#define FRF_AB_XX_DEQA_LBN 16
+#define FRF_AB_XX_DEQA_WIDTH 4
+#define FRF_AB_XX_DTXD_LBN 12
+#define FRF_AB_XX_DTXD_WIDTH 4
+#define FRF_AB_XX_DTXC_LBN 8
+#define FRF_AB_XX_DTXC_WIDTH 4
+#define FRF_AB_XX_DTXB_LBN 4
+#define FRF_AB_XX_DTXB_WIDTH 4
+#define FRF_AB_XX_DTXA_LBN 0
+#define FRF_AB_XX_DTXA_WIDTH 4
+
+
+/*
+ * FR_AB_XX_PRBS_CTL_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_CTL_REG
+ */
+#define FR_AB_XX_PRBS_CTL_REG_OFST 0x00001330
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PRBS_CHK_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_CHK_REG
+ */
+#define FR_AB_XX_PRBS_CHK_REG_OFST 0x00001340
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_REV_LB_EN_LBN 16
+#define FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PRBS_ERR_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_ERR_REG
+ */
+#define FR_AB_XX_PRBS_ERR_REG_OFST 0x00001350
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+
+/*
+ * FR_AB_XX_CORE_STAT_REG(128bit):
+ * XAUI XGXS core status register
+ */
+#define FR_AB_XX_CORE_STAT_REG_OFST 0x00001360
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_FORCE_SIG3_LBN 31
+#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_LBN 29
+#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_LBN 27
+#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_LBN 25
+#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define FRF_AB_XX_MATCH_FAULT_LBN 21
+#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define FRF_AB_XX_ALIGN_DONE_LBN 20
+#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT3_LBN 19
+#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT2_LBN 18
+#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT1_LBN 17
+#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT0_LBN 16
+#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH3_LBN 3
+#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH2_LBN 2
+#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH1_LBN 1
+#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH0_LBN 0
+#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+
+/*
+ * FR_AA_RX_DESC_PTR_TBL_KER(128bit):
+ * Receive descriptor pointer table
+ */
+#define FR_AA_RX_DESC_PTR_TBL_KER_OFST 0x00011800
+/* falcona0=net_func_bar2 */
+#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/*
+ * FR_AZ_RX_DESC_PTR_TBL(128bit):
+ * Receive descriptor pointer table
+ */
+#define FR_AZ_RX_DESC_PTR_TBL_OFST 0x00f40000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_RX_DESC_PTR_TBL_STEP 16
+#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define FR_AB_RX_DESC_PTR_TBL_ROWS 4096
+
+#define FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define FRF_AZ_RX_RESET_LBN 89
+#define FRF_AZ_RX_RESET_WIDTH 1
+#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define FFE_AZ_RX_DESCQ_SIZE_512 0
+#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define FRF_AZ_RX_DESCQ_EN_LBN 0
+#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+
+/*
+ * FR_AA_TX_DESC_PTR_TBL_KER(128bit):
+ * Transmit descriptor pointer
+ */
+#define FR_AA_TX_DESC_PTR_TBL_KER_OFST 0x00011900
+/* falcona0=net_func_bar2 */
+#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/*
+ * FR_AZ_TX_DESC_PTR_TBL(128bit):
+ * Transmit descriptor pointer
+ */
+#define FR_AZ_TX_DESC_PTR_TBL_OFST 0x00f50000
+/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_TX_DESC_PTR_TBL_STEP 16
+#define FR_AB_TX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define FRF_AZ_TX_DESCQ_EN_LBN 88
+#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define FFE_AZ_TX_DESCQ_SIZE_512 0
+#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+
+/*
+ * FR_AA_EVQ_PTR_TBL_KER(128bit):
+ * Event queue pointer table
+ */
+#define FR_AA_EVQ_PTR_TBL_KER_OFST 0x00011a00
+/* falcona0=net_func_bar2 */
+#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/*
+ * FR_AZ_EVQ_PTR_TBL(128bit):
+ * Event queue pointer table
+ */
+#define FR_AZ_EVQ_PTR_TBL_OFST 0x00f60000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_EVQ_PTR_TBL_STEP 16
+#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define FR_AB_EVQ_PTR_TBL_ROWS 4096
+
+#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define FRF_AZ_EVQ_WKUP_OR_INT_EN_LBN 39
+#define FRF_AZ_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define FRF_AZ_EVQ_EN_LBN 23
+#define FRF_AZ_EVQ_EN_WIDTH 1
+#define FRF_AZ_EVQ_SIZE_LBN 20
+#define FRF_AZ_EVQ_SIZE_WIDTH 3
+#define FFE_AZ_EVQ_SIZE_32K 6
+#define FFE_AZ_EVQ_SIZE_16K 5
+#define FFE_AZ_EVQ_SIZE_8K 4
+#define FFE_AZ_EVQ_SIZE_4K 3
+#define FFE_AZ_EVQ_SIZE_2K 2
+#define FFE_AZ_EVQ_SIZE_1K 1
+#define FFE_AZ_EVQ_SIZE_512 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+
+/*
+ * FR_AA_BUF_HALF_TBL_KER(64bit):
+ * Buffer table in half buffer table mode direct access by driver
+ */
+#define FR_AA_BUF_HALF_TBL_KER_OFST 0x00018000
+/* falcona0=net_func_bar2 */
+#define FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/*
+ * FR_AZ_BUF_HALF_TBL(64bit):
+ * Buffer table in half buffer table mode direct access by driver
+ */
+#define FR_AZ_BUF_HALF_TBL_OFST 0x00800000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_BUF_HALF_TBL_STEP 8
+#define FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define FR_AB_BUF_HALF_TBL_ROWS 524288
+
+#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+
+/*
+ * FR_AA_BUF_FULL_TBL_KER(64bit):
+ * Buffer table in full buffer table mode direct access by driver
+ */
+#define FR_AA_BUF_FULL_TBL_KER_OFST 0x00018000
+/* falcona0=net_func_bar2 */
+#define FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/*
+ * FR_AZ_BUF_FULL_TBL(64bit):
+ * Buffer table in full buffer table mode direct access by driver
+ */
+#define FR_AZ_BUF_FULL_TBL_OFST 0x00800000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_BUF_FULL_TBL_STEP 8
+
+#define FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define FR_AB_BUF_FULL_TBL_ROWS 917504
+
+#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define FRF_AZ_BUF_ADR_REGION_LBN 48
+#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define FFE_AZ_BUF_ADR_REGN3 3
+#define FFE_AZ_BUF_ADR_REGN2 2
+#define FFE_AZ_BUF_ADR_REGN1 1
+#define FFE_AZ_BUF_ADR_REGN0 0
+#define FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define FRF_AZ_BUF_ADR_FBUF_DW0_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_DW0_WIDTH 32
+#define FRF_AZ_BUF_ADR_FBUF_DW1_LBN 46
+#define FRF_AZ_BUF_ADR_FBUF_DW1_WIDTH 2
+#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+
+/*
+ * FR_AZ_RX_FILTER_TBL0(128bit):
+ * TCP/IPv4 Receive filter table
+ */
+#define FR_AZ_RX_FILTER_TBL0_OFST 0x00f00000
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_RX_FILTER_TBL0_STEP 32
+#define FR_AZ_RX_FILTER_TBL0_ROWS 8192
+/*
+ * FR_AB_RX_FILTER_TBL1(128bit):
+ * TCP/IPv4 Receive filter table
+ */
+#define FR_AB_RX_FILTER_TBL1_OFST 0x00f00010
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_RX_FILTER_TBL1_STEP 32
+#define FR_AB_RX_FILTER_TBL1_ROWS 8192
+
+#define FRF_BZ_RSS_EN_LBN 110
+#define FRF_BZ_RSS_EN_WIDTH 1
+#define FRF_BZ_SCATTER_EN_LBN 109
+#define FRF_BZ_SCATTER_EN_WIDTH 1
+#define FRF_AZ_TCP_UDP_LBN 108
+#define FRF_AZ_TCP_UDP_WIDTH 1
+#define FRF_AZ_RXQ_ID_LBN 96
+#define FRF_AZ_RXQ_ID_WIDTH 12
+#define FRF_AZ_DEST_IP_LBN 64
+#define FRF_AZ_DEST_IP_WIDTH 32
+#define FRF_AZ_DEST_PORT_TCP_LBN 48
+#define FRF_AZ_DEST_PORT_TCP_WIDTH 16
+#define FRF_AZ_SRC_IP_LBN 16
+#define FRF_AZ_SRC_IP_WIDTH 32
+#define FRF_AZ_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_AZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+
+/*
+ * FR_CZ_RX_MAC_FILTER_TBL0(128bit):
+ * Receive Ethernet filter table
+ */
+#define FR_CZ_RX_MAC_FILTER_TBL0_OFST 0x00f00010
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+
+#define FRF_CZ_RMFT_RSS_EN_LBN 75
+#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_RMFT_DEST_MAC_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
+#define FRF_CZ_RMFT_DEST_MAC_DW0_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_DW0_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_DW1_LBN 44
+#define FRF_CZ_RMFT_DEST_MAC_DW1_WIDTH 16
+#define FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_TIMER_TBL(128bit):
+ * Timer table
+ */
+#define FR_AZ_TIMER_TBL_OFST 0x00f70000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_TIMER_TBL_STEP 16
+#define FR_CZ_TIMER_TBL_ROWS 1024
+#define FR_AB_TIMER_TBL_ROWS 4096
+
+#define FRF_CZ_TIMER_Q_EN_LBN 33
+#define FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define FRF_CZ_INT_ARMD_LBN 32
+#define FRF_CZ_INT_ARMD_WIDTH 1
+#define FRF_CZ_INT_PEND_LBN 31
+#define FRF_CZ_INT_PEND_WIDTH 1
+#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define FRF_CZ_TIMER_MODE_LBN 14
+#define FRF_CZ_TIMER_MODE_WIDTH 2
+#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define FFE_CZ_TIMER_MODE_TRIG_START 2
+#define FFE_CZ_TIMER_MODE_IMMED_START 1
+#define FFE_CZ_TIMER_MODE_DIS 0
+#define FRF_AB_TIMER_MODE_LBN 12
+#define FRF_AB_TIMER_MODE_WIDTH 2
+#define FFE_AB_TIMER_MODE_INT_HLDOFF 2
+#define FFE_AB_TIMER_MODE_TRIG_START 2
+#define FFE_AB_TIMER_MODE_IMMED_START 1
+#define FFE_AB_TIMER_MODE_DIS 0
+#define FRF_CZ_TIMER_VAL_LBN 0
+#define FRF_CZ_TIMER_VAL_WIDTH 14
+#define FRF_AB_TIMER_VAL_LBN 0
+#define FRF_AB_TIMER_VAL_WIDTH 12
+
+
+/*
+ * FR_BZ_TX_PACE_TBL(128bit):
+ * Transmit pacing table
+ */
+#define FR_BZ_TX_PACE_TBL_OFST 0x00f80000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2 */
+#define FR_AZ_TX_PACE_TBL_STEP 16
+#define FR_CZ_TX_PACE_TBL_ROWS 1024
+#define FR_BB_TX_PACE_TBL_ROWS 4096
+/*
+ * FR_AA_TX_PACE_TBL(128bit):
+ * Transmit pacing table
+ */
+#define FR_AA_TX_PACE_TBL_OFST 0x00f80040
+/* falcona0=char_func_bar0 */
+/* FR_AZ_TX_PACE_TBL_STEP 16 */
+#define FR_AA_TX_PACE_TBL_ROWS 4092
+
+#define FRF_AZ_TX_PACE_LBN 0
+#define FRF_AZ_TX_PACE_WIDTH 5
+
+
+/*
+ * FR_BZ_RX_INDIRECTION_TBL(7bit):
+ * RX Indirection Table
+ */
+#define FR_BZ_RX_INDIRECTION_TBL_OFST 0x00fb0000
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+
+#define FRF_BZ_IT_QUEUE_LBN 0
+#define FRF_BZ_IT_QUEUE_WIDTH 6
+
+
+/*
+ * FR_CZ_TX_FILTER_TBL0(128bit):
+ * TCP/IPv4 Transmit filter table
+ */
+#define FR_CZ_TX_FILTER_TBL0_OFST 0x00fc0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_TX_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
+
+#define FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TIFT_DEST_IP_LBN 64
+#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define FRF_CZ_TIFT_SRC_IP_LBN 16
+#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+
+/*
+ * FR_CZ_TX_MAC_FILTER_TBL0(128bit):
+ * Transmit Ethernet filter table
+ */
+#define FR_CZ_TX_MAC_FILTER_TBL0_OFST 0x00fe0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+
+#define FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_TMFT_SRC_MAC_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
+#define FRF_CZ_TMFT_SRC_MAC_DW0_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_DW0_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_DW1_LBN 44
+#define FRF_CZ_TMFT_SRC_MAC_DW1_WIDTH 16
+#define FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+
+/*
+ * FR_CZ_MC_TREG_SMEM(32bit):
+ * MC Shared Memory
+ */
+#define FR_CZ_MC_TREG_SMEM_OFST 0x00ff0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_MC_TREG_SMEM_STEP 4
+#define FR_CZ_MC_TREG_SMEM_ROWS 512
+
+#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+
+/*
+ * FR_BB_MSIX_VECTOR_TABLE(128bit):
+ * MSIX Vector Table
+ */
+#define FR_BB_MSIX_VECTOR_TABLE_OFST 0x00ff0000
+/* falconb0=net_func_bar2 */
+#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/*
+ * FR_CZ_MSIX_VECTOR_TABLE(128bit):
+ * MSIX Vector Table
+ */
+#define FR_CZ_MSIX_VECTOR_TABLE_OFST 0x00000000
+/* sienaa0=pci_f0_bar4 */
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+
+#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+
+/*
+ * FR_BB_MSIX_PBA_TABLE(32bit):
+ * MSIX Pending Bit Array
+ */
+#define FR_BB_MSIX_PBA_TABLE_OFST 0x00ff2000
+/* falconb0=net_func_bar2 */
+#define FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define FR_BB_MSIX_PBA_TABLE_ROWS 2
+/*
+ * FR_CZ_MSIX_PBA_TABLE(32bit):
+ * MSIX Pending Bit Array
+ */
+#define FR_CZ_MSIX_PBA_TABLE_OFST 0x00008000
+/* sienaa0=pci_f0_bar4 */
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
+
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+
+/*
+ * FR_AZ_SRM_DBG_REG(64bit):
+ * SRAM debug access
+ */
+#define FR_AZ_SRM_DBG_REG_OFST 0x03000000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_SRM_DBG_REG_STEP 8
+
+#define FR_CZ_SRM_DBG_REG_ROWS 262144
+#define FR_AB_SRM_DBG_REG_ROWS 2097152
+
+#define FRF_AZ_SRM_DBG_LBN 0
+#define FRF_AZ_SRM_DBG_WIDTH 64
+#define FRF_AZ_SRM_DBG_DW0_LBN 0
+#define FRF_AZ_SRM_DBG_DW0_WIDTH 32
+#define FRF_AZ_SRM_DBG_DW1_LBN 32
+#define FRF_AZ_SRM_DBG_DW1_WIDTH 32
+
+
+/*
+ * FR_AA_INT_ACK_CHAR(32bit):
+ * CHAR interrupt acknowledge register
+ */
+#define FR_AA_INT_ACK_CHAR_OFST 0x00000060
+/* falcona0=char_func_bar0 */
+
+#define FRF_AA_INT_ACK_CHAR_FIELD_LBN 0
+#define FRF_AA_INT_ACK_CHAR_FIELD_WIDTH 32
+
+
+/* FS_DRIVER_EV */
+#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define FSE_AZ_TX_DSC_ERROR_EV 15
+#define FSE_AZ_RX_DSC_ERROR_EV 14
+#define FSE_AZ_RX_RECOVER_EV 11
+#define FSE_AZ_TIMER_EV 10
+#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define FSE_AZ_WAKE_UP_EV 6
+#define FSE_AZ_SRM_UPD_DONE_EV 5
+#define FSE_AZ_EVQ_NOT_EN_EV 3
+#define FSE_AZ_EVQ_INIT_DONE_EV 2
+#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+
+/* FS_EVENT_ENTRY */
+#define FSF_AZ_EV_CODE_LBN 60
+#define FSF_AZ_EV_CODE_WIDTH 4
+#define FSE_AZ_EV_CODE_USER_EV 8
+#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define FSE_AZ_EV_CODE_DRIVER_EV 5
+#define FSE_AZ_EV_CODE_TX_EV 2
+#define FSE_AZ_EV_CODE_RX_EV 0
+#define FSF_AZ_EV_DATA_LBN 0
+#define FSF_AZ_EV_DATA_WIDTH 60
+#define FSF_AZ_EV_DATA_DW0_LBN 0
+#define FSF_AZ_EV_DATA_DW0_WIDTH 32
+#define FSF_AZ_EV_DATA_DW1_LBN 32
+#define FSF_AZ_EV_DATA_DW1_WIDTH 28
+
+
+/* FS_GLOBAL_EV */
+#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 12
+#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_BZ_GLB_EV_XG_MNT_INTR_LBN 11
+#define FSF_BZ_GLB_EV_XG_MNT_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_XG_PHY0_INTR_LBN 9
+#define FSF_AZ_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_G_PHY0_INTR_LBN 7
+#define FSF_AZ_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+
+/* FS_RX_EV */
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define FSF_AZ_RX_EV_PORT_LBN 30
+#define FSF_AZ_RX_EV_PORT_WIDTH 1
+#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define FSF_AZ_RX_EV_SOP_LBN 15
+#define FSF_AZ_RX_EV_SOP_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+
+/* FS_RX_KER_DESC */
+#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+#define FSF_AZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define FSF_AZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define FSF_AZ_RX_KER_BUF_ADDR_DW1_WIDTH 14
+
+
+/* FS_RX_USER_DESC */
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+
+/* FS_TX_EV */
+#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_TX_EV_PORT_LBN 16
+#define FSF_AZ_TX_EV_PORT_WIDTH 1
+#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_COMP_LBN 12
+#define FSF_AZ_TX_EV_COMP_WIDTH 1
+#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+
+/* FS_TX_KER_DESC */
+#define FSF_AZ_TX_KER_CONT_LBN 62
+#define FSF_AZ_TX_KER_CONT_WIDTH 1
+#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+#define FSF_AZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define FSF_AZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define FSF_AZ_TX_KER_BUF_ADDR_DW1_WIDTH 14
+
+
+/* FS_TX_USER_DESC */
+#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define FSF_AZ_TX_USER_CONT_LBN 46
+#define FSF_AZ_TX_USER_CONT_WIDTH 1
+#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+
+/* FS_USER_EV */
+#define FSF_CZ_USER_QID_LBN 32
+#define FSF_CZ_USER_QID_WIDTH 10
+#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+
+/* FS_NET_IVEC */
+#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+
+
+/**************************************************************************
+ *
+ * Falcon non-volatile configuration
+ *
+ **************************************************************************
+ */
+
+
+#define FR_AZ_TX_PACE_TBL_OFST FR_BZ_TX_PACE_TBL_OFST
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+
+#endif /* _SYS_EFX_REGS_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_ef10.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_ef10.h
new file mode 100644
index 00000000..968aaaca
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_ef10.h
@@ -0,0 +1,727 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_EFX_EF10_REGS_H
+#define _SYS_EFX_EF10_REGS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**************************************************************************
+ * NOTE: the line below marks the start of the autogenerated section
+ * EF10 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/*
+ * BIU_HW_REV_ID_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_HW_REV_ID_REG_OFST 0x00000000
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
+#define ER_DZ_BIU_HW_REV_ID_REG_RESET 0xeb14face
+
+
+#define ERF_DZ_HW_REV_ID_LBN 0
+#define ERF_DZ_HW_REV_ID_WIDTH 32
+
+
+/*
+ * BIU_MC_SFT_STATUS_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_OFST 0x00000010
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_RESET 0x1111face
+
+
+#define ERF_DZ_MC_SFT_STATUS_LBN 0
+#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
+
+
+/*
+ * BIU_INT_ISR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_INT_ISR_REG_OFST 0x00000090
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
+#define ER_DZ_BIU_INT_ISR_REG_RESET 0x0
+
+
+#define ERF_DZ_ISR_REG_LBN 0
+#define ERF_DZ_ISR_REG_WIDTH 32
+
+
+/*
+ * MC_DB_LWRD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_MC_DB_LWRD_REG_OFST 0x00000200
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
+#define ER_DZ_MC_DB_LWRD_REG_RESET 0x0
+
+
+#define ERF_DZ_MC_DOORBELL_L_LBN 0
+#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
+
+
+/*
+ * MC_DB_HWRD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_MC_DB_HWRD_REG_OFST 0x00000204
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
+#define ER_DZ_MC_DB_HWRD_REG_RESET 0x0
+
+
+#define ERF_DZ_MC_DOORBELL_H_LBN 0
+#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
+
+
+/*
+ * EVQ_RPTR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_EVQ_RPTR_REG_OFST 0x00000400
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
+#define ER_DZ_EVQ_RPTR_REG_STEP 8192
+#define ER_DZ_EVQ_RPTR_REG_ROWS 2048
+#define ER_DZ_EVQ_RPTR_REG_RESET 0x0
+
+
+#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_DZ_EVQ_RPTR_LBN 0
+#define ERF_DZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * EVQ_RPTR_REG_64K(32bit):
+ *
+ */
+
+#define ER_FZ_EVQ_RPTR_REG_64K_OFST 0x00000400
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_EVQ_RPTR_REG_64K_STEP 65536
+#define ER_FZ_EVQ_RPTR_REG_64K_ROWS 2048
+#define ER_FZ_EVQ_RPTR_REG_64K_RESET 0x0
+
+
+#define ERF_FZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_FZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_FZ_EVQ_RPTR_LBN 0
+#define ERF_FZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * EVQ_RPTR_REG_16K(32bit):
+ *
+ */
+
+#define ER_FZ_EVQ_RPTR_REG_16K_OFST 0x00000400
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_EVQ_RPTR_REG_16K_STEP 16384
+#define ER_FZ_EVQ_RPTR_REG_16K_ROWS 2048
+#define ER_FZ_EVQ_RPTR_REG_16K_RESET 0x0
+
+
+/* defined as ERF_FZ_EVQ_RPTR_VLD_LBN 15; */
+/* defined as ERF_FZ_EVQ_RPTR_VLD_WIDTH 1 */
+/* defined as ERF_FZ_EVQ_RPTR_LBN 0; */
+/* defined as ERF_FZ_EVQ_RPTR_WIDTH 15 */
+
+
+/*
+ * EVQ_TMR_REG_64K(32bit):
+ *
+ */
+
+#define ER_FZ_EVQ_TMR_REG_64K_OFST 0x00000420
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_EVQ_TMR_REG_64K_STEP 65536
+#define ER_FZ_EVQ_TMR_REG_64K_ROWS 2048
+#define ER_FZ_EVQ_TMR_REG_64K_RESET 0x0
+
+
+#define ERF_FZ_TC_TMR_REL_VAL_LBN 16
+#define ERF_FZ_TC_TMR_REL_VAL_WIDTH 14
+#define ERF_FZ_TC_TIMER_MODE_LBN 14
+#define ERF_FZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_FZ_TC_TIMER_VAL_LBN 0
+#define ERF_FZ_TC_TIMER_VAL_WIDTH 14
+
+
+/*
+ * EVQ_TMR_REG_16K(32bit):
+ *
+ */
+
+#define ER_FZ_EVQ_TMR_REG_16K_OFST 0x00000420
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_EVQ_TMR_REG_16K_STEP 16384
+#define ER_FZ_EVQ_TMR_REG_16K_ROWS 2048
+#define ER_FZ_EVQ_TMR_REG_16K_RESET 0x0
+
+
+/* defined as ERF_FZ_TC_TMR_REL_VAL_LBN 16; */
+/* defined as ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 */
+/* defined as ERF_FZ_TC_TIMER_MODE_LBN 14; */
+/* defined as ERF_FZ_TC_TIMER_MODE_WIDTH 2 */
+/* defined as ERF_FZ_TC_TIMER_VAL_LBN 0; */
+/* defined as ERF_FZ_TC_TIMER_VAL_WIDTH 14 */
+
+
+/*
+ * EVQ_TMR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_EVQ_TMR_REG_OFST 0x00000420
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
+#define ER_DZ_EVQ_TMR_REG_STEP 8192
+#define ER_DZ_EVQ_TMR_REG_ROWS 2048
+#define ER_DZ_EVQ_TMR_REG_RESET 0x0
+
+
+/* defined as ERF_FZ_TC_TMR_REL_VAL_LBN 16; */
+/* defined as ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 */
+#define ERF_DZ_TC_TIMER_MODE_LBN 14
+#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_DZ_TC_TIMER_VAL_LBN 0
+#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
+
+
+/*
+ * RX_DESC_UPD_REG_16K(32bit):
+ *
+ */
+
+#define ER_FZ_RX_DESC_UPD_REG_16K_OFST 0x00000830
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_RX_DESC_UPD_REG_16K_STEP 16384
+#define ER_FZ_RX_DESC_UPD_REG_16K_ROWS 2048
+#define ER_FZ_RX_DESC_UPD_REG_16K_RESET 0x0
+
+
+#define ERF_FZ_RX_DESC_WPTR_LBN 0
+#define ERF_FZ_RX_DESC_WPTR_WIDTH 12
+
+
+/*
+ * RX_DESC_UPD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_RX_DESC_UPD_REG_OFST 0x00000830
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
+#define ER_DZ_RX_DESC_UPD_REG_STEP 8192
+#define ER_DZ_RX_DESC_UPD_REG_ROWS 2048
+#define ER_DZ_RX_DESC_UPD_REG_RESET 0x0
+
+
+#define ERF_DZ_RX_DESC_WPTR_LBN 0
+#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+
+/*
+ * RX_DESC_UPD_REG_64K(32bit):
+ *
+ */
+
+#define ER_FZ_RX_DESC_UPD_REG_64K_OFST 0x00000830
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_RX_DESC_UPD_REG_64K_STEP 65536
+#define ER_FZ_RX_DESC_UPD_REG_64K_ROWS 2048
+#define ER_FZ_RX_DESC_UPD_REG_64K_RESET 0x0
+
+
+/* defined as ERF_FZ_RX_DESC_WPTR_LBN 0; */
+/* defined as ERF_FZ_RX_DESC_WPTR_WIDTH 12 */
+
+
+/*
+ * TX_DESC_UPD_REG_64K(96bit):
+ *
+ */
+
+#define ER_FZ_TX_DESC_UPD_REG_64K_OFST 0x00000a10
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_TX_DESC_UPD_REG_64K_STEP 65536
+#define ER_FZ_TX_DESC_UPD_REG_64K_ROWS 2048
+#define ER_FZ_TX_DESC_UPD_REG_64K_RESET 0x0
+
+
+#define ERF_FZ_RSVD_LBN 76
+#define ERF_FZ_RSVD_WIDTH 20
+#define ERF_FZ_TX_DESC_WPTR_LBN 64
+#define ERF_FZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_FZ_TX_DESC_HWORD_LBN 32
+#define ERF_FZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_FZ_TX_DESC_LWORD_LBN 0
+#define ERF_FZ_TX_DESC_LWORD_WIDTH 32
+
+
+/*
+ * TX_DESC_UPD_REG_16K(96bit):
+ *
+ */
+
+#define ER_FZ_TX_DESC_UPD_REG_16K_OFST 0x00000a10
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_TX_DESC_UPD_REG_16K_STEP 16384
+#define ER_FZ_TX_DESC_UPD_REG_16K_ROWS 2048
+#define ER_FZ_TX_DESC_UPD_REG_16K_RESET 0x0
+
+
+/* defined as ERF_FZ_RSVD_LBN 76; */
+/* defined as ERF_FZ_RSVD_WIDTH 20 */
+/* defined as ERF_FZ_TX_DESC_WPTR_LBN 64; */
+/* defined as ERF_FZ_TX_DESC_WPTR_WIDTH 12 */
+/* defined as ERF_FZ_TX_DESC_HWORD_LBN 32; */
+/* defined as ERF_FZ_TX_DESC_HWORD_WIDTH 32 */
+/* defined as ERF_FZ_TX_DESC_LWORD_LBN 0; */
+/* defined as ERF_FZ_TX_DESC_LWORD_WIDTH 32 */
+
+
+/*
+ * TX_DESC_UPD_REG(96bit):
+ *
+ */
+
+#define ER_DZ_TX_DESC_UPD_REG_OFST 0x00000a10
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
+#define ER_DZ_TX_DESC_UPD_REG_STEP 8192
+#define ER_DZ_TX_DESC_UPD_REG_ROWS 2048
+#define ER_DZ_TX_DESC_UPD_REG_RESET 0x0
+
+
+#define ERF_DZ_RSVD_LBN 76
+#define ERF_DZ_RSVD_WIDTH 20
+#define ERF_DZ_TX_DESC_WPTR_LBN 64
+#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_DZ_TX_DESC_HWORD_LBN 32
+#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_DZ_TX_DESC_LWORD_LBN 0
+#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
+
+
+/* ES_DRIVER_EV */
+#define ESF_DZ_DRV_CODE_LBN 60
+#define ESF_DZ_DRV_CODE_WIDTH 4
+#define ESF_DZ_DRV_SUB_CODE_LBN 56
+#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
+#define ESE_DZ_DRV_TIMER_EV 3
+#define ESE_DZ_DRV_START_UP_EV 2
+#define ESE_DZ_DRV_WAKE_UP_EV 1
+#define ESF_DZ_DRV_SUB_DATA_DW0_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_DW0_WIDTH 32
+#define ESF_DZ_DRV_SUB_DATA_DW1_LBN 32
+#define ESF_DZ_DRV_SUB_DATA_DW1_WIDTH 24
+#define ESF_DZ_DRV_SUB_DATA_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
+#define ESF_DZ_DRV_EVQ_ID_LBN 0
+#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
+#define ESF_DZ_DRV_TMR_ID_LBN 0
+#define ESF_DZ_DRV_TMR_ID_WIDTH 14
+
+
+/* ES_EVENT_ENTRY */
+#define ESF_DZ_EV_CODE_LBN 60
+#define ESF_DZ_EV_CODE_WIDTH 4
+#define ESE_DZ_EV_CODE_MCDI_EV 12
+#define ESE_DZ_EV_CODE_DRIVER_EV 5
+#define ESE_DZ_EV_CODE_TX_EV 2
+#define ESE_DZ_EV_CODE_RX_EV 0
+#define ESE_DZ_OTHER other
+#define ESF_DZ_EV_DATA_DW0_LBN 0
+#define ESF_DZ_EV_DATA_DW0_WIDTH 32
+#define ESF_DZ_EV_DATA_DW1_LBN 32
+#define ESF_DZ_EV_DATA_DW1_WIDTH 28
+#define ESF_DZ_EV_DATA_LBN 0
+#define ESF_DZ_EV_DATA_WIDTH 60
+
+
+/* ES_MC_EVENT */
+#define ESF_DZ_MC_CODE_LBN 60
+#define ESF_DZ_MC_CODE_WIDTH 4
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_MC_DROP_EVENT_LBN 58
+#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
+#define ESF_DZ_MC_SOFT_DW0_LBN 0
+#define ESF_DZ_MC_SOFT_DW0_WIDTH 32
+#define ESF_DZ_MC_SOFT_DW1_LBN 32
+#define ESF_DZ_MC_SOFT_DW1_WIDTH 26
+#define ESF_DZ_MC_SOFT_LBN 0
+#define ESF_DZ_MC_SOFT_WIDTH 58
+
+
+/* ES_RX_EVENT */
+#define ESF_DZ_RX_CODE_LBN 60
+#define ESF_DZ_RX_CODE_WIDTH 4
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_RX_DROP_EVENT_LBN 58
+#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
+#define ESF_DD_RX_EV_RSVD2_LBN 54
+#define ESF_DD_RX_EV_RSVD2_WIDTH 4
+#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN 56
+#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_RX_EV_RSVD2_LBN 54
+#define ESF_EZ_RX_EV_RSVD2_WIDTH 2
+#define ESF_DZ_RX_EV_SOFT2_LBN 52
+#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
+#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
+#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
+#define ESF_DE_RX_L4_CLASS_LBN 45
+#define ESF_DE_RX_L4_CLASS_WIDTH 3
+#define ESE_DE_L4_CLASS_RSVD7 7
+#define ESE_DE_L4_CLASS_RSVD6 6
+#define ESE_DE_L4_CLASS_RSVD5 5
+#define ESE_DE_L4_CLASS_RSVD4 4
+#define ESE_DE_L4_CLASS_RSVD3 3
+#define ESE_DE_L4_CLASS_UDP 2
+#define ESE_DE_L4_CLASS_TCP 1
+#define ESE_DE_L4_CLASS_UNKNOWN 0
+#define ESF_FZ_RX_FASTPD_INDCTR_LBN 47
+#define ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1
+#define ESF_FZ_RX_L4_CLASS_LBN 45
+#define ESF_FZ_RX_L4_CLASS_WIDTH 2
+#define ESE_FZ_L4_CLASS_RSVD3 3
+#define ESE_FZ_L4_CLASS_UDP 2
+#define ESE_FZ_L4_CLASS_TCP 1
+#define ESE_FZ_L4_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_L3_CLASS_LBN 42
+#define ESF_DZ_RX_L3_CLASS_WIDTH 3
+#define ESE_DZ_L3_CLASS_RSVD7 7
+#define ESE_DZ_L3_CLASS_IP6_FRAG 6
+#define ESE_DZ_L3_CLASS_ARP 5
+#define ESE_DZ_L3_CLASS_IP4_FRAG 4
+#define ESE_DZ_L3_CLASS_FCOE 3
+#define ESE_DZ_L3_CLASS_IP6 2
+#define ESE_DZ_L3_CLASS_IP4 1
+#define ESE_DZ_L3_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
+#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
+#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
+#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
+#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
+#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
+#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
+#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
+#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
+#define ESE_DZ_ETH_TAG_CLASS_NONE 0
+#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
+#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
+#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
+#define ESE_DZ_ETH_BASE_CLASS_LLC 1
+#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
+#define ESF_DZ_RX_MAC_CLASS_LBN 35
+#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
+#define ESE_DZ_MAC_CLASS_MCAST 1
+#define ESE_DZ_MAC_CLASS_UCAST 0
+#define ESF_DD_RX_EV_SOFT1_LBN 32
+#define ESF_DD_RX_EV_SOFT1_WIDTH 3
+#define ESF_EZ_RX_EV_SOFT1_LBN 34
+#define ESF_EZ_RX_EV_SOFT1_WIDTH 1
+#define ESF_EZ_RX_ENCAP_HDR_LBN 32
+#define ESF_EZ_RX_ENCAP_HDR_WIDTH 2
+#define ESE_EZ_ENCAP_HDR_GRE 2
+#define ESE_EZ_ENCAP_HDR_VXLAN 1
+#define ESE_EZ_ENCAP_HDR_NONE 0
+#define ESF_DD_RX_EV_RSVD1_LBN 30
+#define ESF_DD_RX_EV_RSVD1_WIDTH 2
+#define ESF_EZ_RX_EV_RSVD1_LBN 31
+#define ESF_EZ_RX_EV_RSVD1_WIDTH 1
+#define ESF_EZ_RX_ABORT_LBN 30
+#define ESF_EZ_RX_ABORT_WIDTH 1
+#define ESF_DZ_RX_ECC_ERR_LBN 29
+#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_TRUNC_ERR_LBN 29
+#define ESF_DZ_RX_TRUNC_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC1_ERR_LBN 28
+#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC0_ERR_LBN 27
+#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
+#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_ECRC_ERR_LBN 24
+#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
+#define ESF_DZ_RX_QLABEL_LBN 16
+#define ESF_DZ_RX_QLABEL_WIDTH 5
+#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
+#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
+#define ESF_DZ_RX_CONT_LBN 14
+#define ESF_DZ_RX_CONT_WIDTH 1
+#define ESF_DZ_RX_BYTES_LBN 0
+#define ESF_DZ_RX_BYTES_WIDTH 14
+
+
+/* ES_RX_KER_DESC */
+#define ESF_DZ_RX_KER_RESERVED_LBN 62
+#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
+#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define ESF_DZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define ESF_DZ_RX_KER_BUF_ADDR_DW1_WIDTH 16
+#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_CSUM_TSTAMP_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_OPTION_TS_AT_TXDP_LBN 8
+#define ESF_DZ_TX_OPTION_TS_AT_TXDP_WIDTH 1
+#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_LBN 7
+#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_LBN 6
+#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_WIDTH 1
+#define ESF_DZ_TX_TIMESTAMP_LBN 5
+#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
+#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
+#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
+#define ESE_DZ_TX_OPTION_CRC_FCOE 1
+#define ESE_DZ_TX_OPTION_CRC_OFF 0
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
+#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
+
+
+/* ES_TX_EVENT */
+#define ESF_DZ_TX_CODE_LBN 60
+#define ESF_DZ_TX_CODE_WIDTH 4
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_TX_DROP_EVENT_LBN 58
+#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
+#define ESF_DD_TX_EV_RSVD_LBN 48
+#define ESF_DD_TX_EV_RSVD_WIDTH 10
+#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_IP_INNER_CHKSUM_ERR_LBN 56
+#define ESF_EZ_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_TX_EV_RSVD_LBN 48
+#define ESF_EZ_TX_EV_RSVD_WIDTH 8
+#define ESF_DZ_TX_SOFT2_LBN 32
+#define ESF_DZ_TX_SOFT2_WIDTH 16
+#define ESF_DD_TX_SOFT1_LBN 24
+#define ESF_DD_TX_SOFT1_WIDTH 8
+#define ESF_EZ_TX_CAN_MERGE_LBN 31
+#define ESF_EZ_TX_CAN_MERGE_WIDTH 1
+#define ESF_EZ_TX_SOFT1_LBN 24
+#define ESF_EZ_TX_SOFT1_WIDTH 7
+#define ESF_DZ_TX_QLABEL_LBN 16
+#define ESF_DZ_TX_QLABEL_WIDTH 5
+#define ESF_DZ_TX_DESCR_INDX_LBN 0
+#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
+
+
+/* ES_TX_KER_DESC */
+#define ESF_DZ_TX_KER_TYPE_LBN 63
+#define ESF_DZ_TX_KER_TYPE_WIDTH 1
+#define ESF_DZ_TX_KER_CONT_LBN 62
+#define ESF_DZ_TX_KER_CONT_WIDTH 1
+#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define ESF_DZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define ESF_DZ_TX_KER_BUF_ADDR_DW1_WIDTH 16
+#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_PIO_DESC */
+#define ESF_DZ_TX_PIO_TYPE_LBN 63
+#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
+#define ESF_DZ_TX_PIO_OPT_LBN 60
+#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESF_DZ_TX_PIO_CONT_LBN 59
+#define ESF_DZ_TX_PIO_CONT_WIDTH 1
+#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
+#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
+#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
+
+
+/* ES_TX_TSO_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
+#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* ES_TX_TSO_V2_DESC_A */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* ES_TX_TSO_V2_DESC_B */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
+#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
+#define ESF_DZ_TX_TSO_OUTER_IPID_LBN 0
+#define ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16
+
+
+/* ES_TX_VLAN_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_VLAN_OP_LBN 32
+#define ESF_DZ_TX_VLAN_OP_WIDTH 2
+#define ESF_DZ_TX_VLAN_TAG2_LBN 16
+#define ESF_DZ_TX_VLAN_TAG2_WIDTH 16
+#define ESF_DZ_TX_VLAN_TAG1_LBN 0
+#define ESF_DZ_TX_VLAN_TAG1_WIDTH 16
+
+
+/*************************************************************************
+ * NOTE: the comment line above marks the end of the autogenerated section
+ */
+
+/*
+ * The workaround for bug 35388 requires multiplexing writes through
+ * the ERF_DZ_TX_DESC_WPTR address.
+ * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
+ * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
+ * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
+ */
+#define ER_DD_EVQ_INDIRECT_OFST (ER_DZ_TX_DESC_UPD_REG_OFST + 2 * 4)
+#define ER_DD_EVQ_INDIRECT_STEP ER_DZ_TX_DESC_UPD_REG_STEP
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
+#define ERF_DD_EVQ_IND_RPTR_LBN 0
+#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
+#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
+#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
+#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
+#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
+#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
+
+/* Packed stream magic doorbell command */
+#define ERF_DZ_RX_DESC_MAGIC_DOORBELL_LBN 11
+#define ERF_DZ_RX_DESC_MAGIC_DOORBELL_WIDTH 1
+
+#define ERF_DZ_RX_DESC_MAGIC_CMD_LBN 8
+#define ERF_DZ_RX_DESC_MAGIC_CMD_WIDTH 3
+#define ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS 0
+
+#define ERF_DZ_RX_DESC_MAGIC_DATA_LBN 0
+#define ERF_DZ_RX_DESC_MAGIC_DATA_WIDTH 8
+
+/* Packed stream RX packet prefix */
+#define ES_DZ_PS_RX_PREFIX_TSTAMP_LBN 0
+#define ES_DZ_PS_RX_PREFIX_TSTAMP_WIDTH 32
+#define ES_DZ_PS_RX_PREFIX_CAP_LEN_LBN 32
+#define ES_DZ_PS_RX_PREFIX_CAP_LEN_WIDTH 16
+#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_LBN 48
+#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_WIDTH 16
+
+/* Equal stride super-buffer RX packet prefix (see SF-119419-TC) */
+#define ES_EZ_ESSB_RX_PREFIX_LEN 8
+#define ES_EZ_ESSB_RX_PREFIX_DATA_LEN_LBN 0
+#define ES_EZ_ESSB_RX_PREFIX_DATA_LEN_WIDTH 16
+#define ES_EZ_ESSB_RX_PREFIX_MARK_LBN 16
+#define ES_EZ_ESSB_RX_PREFIX_MARK_WIDTH 8
+#define ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN 28
+#define ES_EZ_ESSB_RX_PREFIX_HASH_VALID_WIDTH 1
+#define ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN 29
+#define ES_EZ_ESSB_RX_PREFIX_MARK_VALID_WIDTH 1
+#define ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN 30
+#define ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_WIDTH 1
+#define ES_EZ_ESSB_RX_PREFIX_HASH_LBN 32
+#define ES_EZ_ESSB_RX_PREFIX_HASH_WIDTH 32
+
+/*
+ * An extra flag for the packed stream mode,
+ * signalling the start of a new buffer
+ */
+#define ESF_DZ_RX_EV_ROTATE_LBN 53
+#define ESF_DZ_RX_EV_ROTATE_WIDTH 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_EF10_REGS_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h
new file mode 100644
index 00000000..cf8a7936
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h
@@ -0,0 +1,18144 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2008-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+/*! \cidoxg_firmware_mc_cmd */
+
+#ifndef _SIENA_MC_DRIVER_PCOL_H
+#define _SIENA_MC_DRIVER_PCOL_H
+
+
+/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
+/* Power-on reset state */
+#define MC_FW_STATE_POR (1)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash. */
+#define MC_FW_WARM_BOOT_OK (2)
+/* The MC main image has started to boot. */
+#define MC_FW_STATE_BOOTING (4)
+/* The Scheduler has started. */
+#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode. This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
+
+/* Siena MC shared memmory offsets */
+/* The 'doorbell' addresses are hard-wired to alert the MC when written */
+#define MC_SMEM_P0_DOORBELL_OFST 0x000
+#define MC_SMEM_P1_DOORBELL_OFST 0x004
+/* The rest of these are firmware-defined */
+#define MC_SMEM_P0_PDU_OFST 0x008
+#define MC_SMEM_P1_PDU_OFST 0x108
+#define MC_SMEM_PDU_LEN 0x100
+#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0
+#define MC_SMEM_P0_STATUS_OFST 0x7f8
+#define MC_SMEM_P1_STATUS_OFST 0x7fc
+
+/* Values to be written to the per-port status dword in shared
+ * memory on reboot and assert */
+#define MC_STATUS_DWORD_REBOOT (0xb007b007)
+#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
+/* The current version of the MCDI protocol.
+ *
+ * Note that the ROM burnt into the card only talks V0, so at the very
+ * least every driver must support version 0 and MCDI_PCOL_VERSION
+ */
+#ifdef WITH_MCDI_V2
+#define MCDI_PCOL_VERSION 2
+#else
+#define MCDI_PCOL_VERSION 1
+#endif
+
+/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
+
+/* MCDI version 1
+ *
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
+ * structure, filled in by the client.
+ *
+ * 0 7 8 16 20 22 23 24 31
+ * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ * | | |
+ * | | \--- Response
+ * | \------- Error
+ * \------------------------------ Resync (always set)
+ *
+ * The client writes it's request into MC shared memory, and rings the
+ * doorbell. Each request is completed by either by the MC writting
+ * back into shared memory, or by writting out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some response's may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request, a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#ifdef WITH_MCDI_V2
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+#else
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V1
+#endif
+
+
+/* The MC can generate events for two reasons:
+ * - To advance a shared memory request if XFLAGS_EVREQ was set
+ * - As a notification (link state, i2c event), controlled
+ * via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ * 0 32 33 36 44 52 60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ * |
+ * \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ * - LEVEL==INFO Command succeeded
+ * - LEVEL==ERR Command failed
+ *
+ * 0 8 16 24 32
+ * | Seq | Datalen | Errno | Rsvd |
+ *
+ * These fields are taken directly out of the standard MCDI header, i.e.,
+ * LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header. An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0. This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ * 0 7 8
+ * | command | Resync | = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ * 56 60 63
+ * | Rsvd | Code | = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
+
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 13
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 35
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 38
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* The requested operation might require the
+ command to be passed between MCs, and the
+ transport doesn't support that. Should
+ only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807.
+ * May also returned for other operations such as sub-variant switching. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set set
+ * doesn't exist on this NIC */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+/* Returned by MC_CMD_TESTASSERT if the action that should
+ * have caused an assertion failed to do so. */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
+/* The operation could not complete because some VIs are allocated */
+#define MC_CMD_ERR_VIS_PRESENT 0x101a
+/* The operation could not complete because some PIO buffers are allocated */
+#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
+
+#define MC_CMD_ERR_CODE_OFST 0
+
+/* We define 8 "escape" commands to allow
+ for command number space extension */
+
+#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78
+#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79
+#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A
+#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B
+#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C
+#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D
+#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E
+#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
+#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
+/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
+/* Points to the recovery mode entry point. Same as above, but the right name. */
+#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4)
+
+/* Points to noflash mode entry point. */
+#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4)
+
+/* The command set exported by the boot ROM (MCDI v0) */
+#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
+ (1 << MC_CMD_READ32) | \
+ (1 << MC_CMD_WRITE32) | \
+ (1 << MC_CMD_COPYCODE) | \
+ (1 << MC_CMD_GET_VERSION), \
+ 0, 0, 0 }
+
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
+ (MC_CMD_SENSOR_ENTRY_OFST + (_x))
+
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n) (((n) & 0xff) << 16)
+
+
+#ifdef WITH_MCDI_V2
+
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
+#endif
+
+/* MCDI_EVENT structuredef */
+#define MCDI_EVENT_LEN 8
+#define MCDI_EVENT_CONT_LBN 32
+#define MCDI_EVENT_CONT_WIDTH 1
+#define MCDI_EVENT_LEVEL_LBN 33
+#define MCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MCDI_EVENT_LEVEL_FATAL 0x3
+#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_DATA_LEN 4
+#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0
+/* enum: 100Mbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+/* enum: 25Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5
+/* enum: 50Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6
+/* enum: 100Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7
+#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define MCDI_EVENT_FWALERT_DATA_LBN 8
+#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
+#define MCDI_EVENT_FWALERT_REASON_LBN 0
+#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
+/* enum: SRAM Access. */
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
+#define MCDI_EVENT_FLR_VF_LBN 0
+#define MCDI_EVENT_FLR_VF_WIDTH 8
+#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
+#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
+#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
+/* enum: Descriptor loader reported failure */
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
+#define MCDI_EVENT_TX_ERR_INFO_LBN 16
+#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
+#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
+#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
+#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
+/* enum: PLL lost lock */
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define MCDI_EVENT_AOE_FC_RUNNING 0x14
+#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
+#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define MCDI_EVENT_MUM_WATCHDOG 0x3
+#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_DBRET_SEQ_LBN 0
+#define MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define MCDI_EVENT_SUC_WATCHDOG 0x4
+#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
+#define MCDI_EVENT_DATA_LBN 0
+#define MCDI_EVENT_DATA_WIDTH 32
+#define MCDI_EVENT_SRC_LBN 36
+#define MCDI_EVENT_SRC_WIDTH 8
+#define MCDI_EVENT_EV_CODE_LBN 60
+#define MCDI_EVENT_EV_CODE_WIDTH 4
+#define MCDI_EVENT_CODE_LBN 44
+#define MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define MCDI_EVENT_SW_EVENT 0x0
+/* enum: Bad assert. */
+#define MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define MCDI_EVENT_CODE_SUC 0x1f
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
+#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LEN 4
+#define MCDI_EVENT_CMDDONE_DATA_LBN 0
+#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
+#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
+#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
+#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
+#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LEN 4
+#define MCDI_EVENT_TX_ERR_DATA_LBN 0
+#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LEN 4
+#define MCDI_EVENT_PTP_SECONDS_LBN 0
+#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LEN 4
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
+#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4
+#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
+#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LEN 4
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
+#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LEN 4
+#define MCDI_EVENT_PTP_UUID_LBN 0
+#define MCDI_EVENT_PTP_UUID_WIDTH 32
+#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LEN 4
+#define MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LEN 4
+#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define MCDI_EVENT_DBRET_DATA_OFST 0
+#define MCDI_EVENT_DBRET_DATA_LEN 4
+#define MCDI_EVENT_DBRET_DATA_LBN 0
+#define MCDI_EVENT_DBRET_DATA_WIDTH 32
+
+/* FCDI_EVENT structuredef */
+#define FCDI_EVENT_LEN 8
+#define FCDI_EVENT_CONT_LBN 32
+#define FCDI_EVENT_CONT_WIDTH 1
+#define FCDI_EVENT_LEVEL_LBN 33
+#define FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define FCDI_EVENT_LEVEL_FATAL 0x3
+#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_DATA_LEN 4
+#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define FCDI_EVENT_DATA_LBN 0
+#define FCDI_EVENT_DATA_WIDTH 32
+#define FCDI_EVENT_SRC_LBN 36
+#define FCDI_EVENT_SRC_WIDTH 8
+#define FCDI_EVENT_EV_CODE_LBN 60
+#define FCDI_EVENT_EV_CODE_WIDTH 4
+#define FCDI_EVENT_CODE_LBN 44
+#define FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
+/* enum: Boot result or error code */
+#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
+#define FCDI_EVENT_REBOOT_SRC_LBN 36
+#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
+#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
+#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
+#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_STATE_LEN 4
+#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define FCDI_EVENT_PTP_STATE_LBN 0
+#define FCDI_EVENT_PTP_STATE_WIDTH 32
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
+#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
+#define FCDI_EVENT_BOOT_RESULT_OFST 0
+#define FCDI_EVENT_BOOT_RESULT_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
+#define FCDI_EVENT_BOOT_RESULT_LBN 0
+#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
+/* Number of timestamps following */
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
+
+/* MUM_EVENT structuredef */
+#define MUM_EVENT_LEN 8
+#define MUM_EVENT_CONT_LBN 32
+#define MUM_EVENT_CONT_WIDTH 1
+#define MUM_EVENT_LEVEL_LBN 33
+#define MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MUM_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MUM_EVENT_LEVEL_FATAL 0x3
+#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_DATA_LEN 4
+#define MUM_EVENT_SENSOR_ID_LBN 0
+#define MUM_EVENT_SENSOR_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MUM_EVENT_SENSOR_STATE_LBN 8
+#define MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define MUM_EVENT_PORT_PHY_READY_LBN 0
+#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define MUM_EVENT_DATA_LBN 0
+#define MUM_EVENT_DATA_WIDTH 32
+#define MUM_EVENT_SRC_LBN 36
+#define MUM_EVENT_SRC_WIDTH 8
+#define MUM_EVENT_EV_CODE_LBN 60
+#define MUM_EVENT_EV_CODE_WIDTH 4
+#define MUM_EVENT_CODE_LBN 44
+#define MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LEN 4
+#define MUM_EVENT_SENSOR_DATA_LBN 0
+#define MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
+#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
+#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_TECH_LEN 4
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_READ32
+ * Read multiple 32byte words from MC memory. Note - this command really
+ * belongs to INSECURE category but is required by shmboot. The command handler
+ * has additional checks to reject insecure calls.
+ */
+#define MC_CMD_READ32 0x1
+#undef MC_CMD_0x1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ32_IN msgrequest */
+#define MC_CMD_READ32_IN_LEN 8
+#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_ADDR_LEN 4
+#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define MC_CMD_READ32_IN_NUMWORDS_LEN 4
+
+/* MC_CMD_READ32_OUT msgresponse */
+#define MC_CMD_READ32_OUT_LENMIN 4
+#define MC_CMD_READ32_OUT_LENMAX 252
+#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_WRITE32
+ * Write multiple 32byte words to MC memory.
+ */
+#define MC_CMD_WRITE32 0x2
+#undef MC_CMD_0x2_PRIVILEGE_CTG
+
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_WRITE32_IN msgrequest */
+#define MC_CMD_WRITE32_IN_LENMIN 8
+#define MC_CMD_WRITE32_IN_LENMAX 252
+#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
+#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_ADDR_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
+
+/* MC_CMD_WRITE32_OUT msgresponse */
+#define MC_CMD_WRITE32_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_COPYCODE
+ * Copy MC code between two locations and jump. Note - this command really
+ * belongs to INSECURE category but is required by shmboot. The command handler
+ * has additional checks to reject insecure calls.
+ */
+#define MC_CMD_COPYCODE 0x3
+#undef MC_CMD_0x3_PRIVILEGE_CTG
+
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_COPYCODE_IN msgrequest */
+#define MC_CMD_COPYCODE_IN_LEN 16
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
+ */
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
+ */
+#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
+/* Destination address */
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
+#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
+/* Address of where to jump after copy. */
+#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
+/* enum: Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1
+
+/* MC_CMD_COPYCODE_OUT msgresponse */
+#define MC_CMD_COPYCODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
+ */
+#define MC_CMD_SET_FUNC 0x4
+#undef MC_CMD_0x4_PRIVILEGE_CTG
+
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_FUNC_IN msgrequest */
+#define MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
+#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
+
+/* MC_CMD_SET_FUNC_OUT msgresponse */
+#define MC_CMD_SET_FUNC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x5
+#undef MC_CMD_0x5_PRIVILEGE_CTG
+
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
+#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
+#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
+/* enum: indicates that the MC wasn't flash booted */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_ASSERTS
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
+ */
+#define MC_CMD_GET_ASSERTS 0x6
+#undef MC_CMD_0x6_PRIVILEGE_CTG
+
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_ASSERTS_IN msgrequest */
+#define MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
+
+/* MC_CMD_GET_ASSERTS_OUT msgresponse */
+#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
+/* enum: No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
+
+
+/***********************************/
+/* MC_CMD_LOG_CTRL
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
+ */
+#define MC_CMD_LOG_CTRL 0x7
+#undef MC_CMD_0x7_PRIVILEGE_CTG
+
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LOG_CTRL_IN msgrequest */
+#define MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
+/* enum: UART. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
+
+/* MC_CMD_LOG_CTRL_OUT msgresponse */
+#define MC_CMD_LOG_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VERSION
+ * Get version information about the MC firmware.
+ */
+#define MC_CMD_GET_VERSION 0x8
+#undef MC_CMD_0x8_PRIVILEGE_CTG
+
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VERSION_IN msgrequest */
+#define MC_CMD_GET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
+#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
+/* enum: Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
+
+/* MC_CMD_GET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
+
+
+/***********************************/
+/* MC_CMD_PTP
+ * Perform PTP operation
+ */
+#define MC_CMD_PTP 0xb
+#undef MC_CMD_0xb_PRIVILEGE_CTG
+
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PTP_IN msgrequest */
+#define MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
+#define MC_CMD_PTP_IN_OP_OFST 0
+#define MC_CMD_PTP_IN_OP_LEN 1
+/* enum: Enable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
+#define MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register. Siena PTP adapters only. */
+#define MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register. Siena PTP adapters only. */
+#define MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change the frequency correction applied to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
+#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
+#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
+#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
+#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. Not implemented. */
+#define MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC. Siena PTP adapters only.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
+/* enum: Above this for future use. */
+#define MC_CMD_PTP_OP_MAX 0x1c
+
+/* MC_CMD_PTP_IN_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_ENABLE_LEN 16
+#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_CMD_LEN 4
+#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
+#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
+/* enum: PTP, version 1 */
+#define MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define MC_CMD_PTP_MODE_FCOE 0x5
+
+/* MC_CMD_PTP_IN_DISABLE msgrequest */
+#define MC_CMD_PTP_IN_DISABLE_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
+#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
+#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
+#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Transmit packet length */
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
+/* Transmit packet data */
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_STATUS msgrequest */
+#define MC_CMD_PTP_IN_STATUS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
+
+/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Number of time readings to capture */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+
+/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Enable or disable packet testing */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
+
+/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */
+#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_DEBUG msgrequest */
+#define MC_CMD_PTP_IN_DEBUG_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Debug operations */
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
+
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/* Enum values, see field(s): */
+/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
+/* Set of VLAN tags to filter against */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
+/* UUID to filter against */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
+/* Domain number to filter against */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Set the clock source. */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
+/* enum: Internal. */
+#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */
+#define MC_CMD_PTP_IN_RST_CLK_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* Enable or disable */
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
+/* enum: Enable */
+#define MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define MC_CMD_PTP_DISABLE_PPS 0x1
+/* Not used. Events are always sent to function relative queue 0. */
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
+
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Original field containing queue ID. Now extended to include flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
+
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* NIC - Host System Clock Synchronization status */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
+/* enum: Host System clock and NIC clock are not in sync */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
+
+/* MC_CMD_PTP_OUT msgresponse */
+#define MC_CMD_PTP_OUT_LEN 0
+
+/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
+#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
+
+/* MC_CMD_PTP_OUT_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
+/* Number of packets transmitted and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
+/* Number of packets received and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
+/* Number of packets timestamped by the FPGA */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
+/* Number of packets filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
+/* Number of packets not filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
+/* Number of PPS overflows (noise on input?) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
+/* Number of PPS bad periods */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
+/* Minimum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
+/* Maximum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
+/* Last period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
+/* Mean period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
+/* Last offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
+/* Mean offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
+
+/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+/* A set of host and NIC times */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+/* Host time immediately before NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
+/* Host time immediately after NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
+/* Number of nanoseconds waited after reading NIC's hardware clock */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
+/* enum: Successful test */
+#define MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
+/* Presence of external oscillator */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
+/* Number of packets received by FPGA */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
+/* Number of packets received by Siena filters */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
+
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
+/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
+/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CSR_READ32
+ * Read 32bit words from the indirect memory map.
+ */
+#define MC_CMD_CSR_READ32 0xc
+#undef MC_CMD_0xc_PRIVILEGE_CTG
+
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_CSR_READ32_IN msgrequest */
+#define MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
+#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
+#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
+
+/* MC_CMD_CSR_READ32_OUT msgresponse */
+#define MC_CMD_CSR_READ32_OUT_LENMIN 4
+#define MC_CMD_CSR_READ32_OUT_LENMAX 252
+#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+/* The last dword is the status, not a value read */
+#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_CSR_WRITE32
+ * Write 32bit dwords to the indirect memory map.
+ */
+#define MC_CMD_CSR_WRITE32 0xd
+#undef MC_CMD_0xd_PRIVILEGE_CTG
+
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_CSR_WRITE32_IN msgrequest */
+#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
+#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
+#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+/* Address */
+#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
+
+/* MC_CMD_CSR_WRITE32_OUT msgresponse */
+#define MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+#undef MC_CMD_0x54_PRIVILEGE_CTG
+
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_HP_IN msgrequest */
+#define MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define MC_CMD_HP_IN_SUBCMD_OFST 0
+#define MC_CMD_HP_IN_SUBCMD_LEN 4
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
+
+/* MC_CMD_HP_OUT msgresponse */
+#define MC_CMD_HP_OUT_LEN 4
+#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
+/* enum: OCSD stopped for this card. */
+#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
+/* MC_CMD_STACKINFO
+ * Get stack information.
+ */
+#define MC_CMD_STACKINFO 0xf
+#undef MC_CMD_0xf_PRIVILEGE_CTG
+
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_STACKINFO_IN msgrequest */
+#define MC_CMD_STACKINFO_IN_LEN 0
+
+/* MC_CMD_STACKINFO_OUT msgresponse */
+#define MC_CMD_STACKINFO_OUT_LENMIN 12
+#define MC_CMD_STACKINFO_OUT_LENMAX 252
+#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+/* (thread ptr, stack size, free space) for each thread in system */
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_MDIO_READ
+ * MDIO register read.
+ */
+#define MC_CMD_MDIO_READ 0x10
+#undef MC_CMD_0x10_PRIVILEGE_CTG
+
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MDIO_READ_IN msgrequest */
+#define MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
+/* enum: Internal. */
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
+#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
+#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
+
+/* MC_CMD_MDIO_READ_OUT msgresponse */
+#define MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
+#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
+/* enum: Good. */
+#define MC_CMD_MDIO_STATUS_GOOD 0x8
+
+
+/***********************************/
+/* MC_CMD_MDIO_WRITE
+ * MDIO register write.
+ */
+#define MC_CMD_MDIO_WRITE 0x11
+#undef MC_CMD_0x11_PRIVILEGE_CTG
+
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MDIO_WRITE_IN msgrequest */
+#define MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
+/* enum: Internal. */
+/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
+/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+/* MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
+#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
+/* Value */
+#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
+
+/* MC_CMD_MDIO_WRITE_OUT msgresponse */
+#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
+/* enum: Good. */
+/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
+
+
+/***********************************/
+/* MC_CMD_DBI_WRITE
+ * Write DBI register(s).
+ */
+#define MC_CMD_DBI_WRITE 0x12
+#undef MC_CMD_0x12_PRIVILEGE_CTG
+
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DBI_WRITE_IN msgrequest */
+#define MC_CMD_DBI_WRITE_IN_LENMIN 12
+#define MC_CMD_DBI_WRITE_IN_LENMAX 252
+#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
+
+/* MC_CMD_DBI_WRITE_OUT msgresponse */
+#define MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBIWROP_TYPEDEF structuredef */
+#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PORT_READ32
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ32 0x14
+
+/* MC_CMD_PORT_READ32_IN msgrequest */
+#define MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
+
+/* MC_CMD_PORT_READ32_OUT msgresponse */
+#define MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
+#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
+/* Status */
+#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE32
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE32 0x15
+
+/* MC_CMD_PORT_WRITE32_IN msgrequest */
+#define MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
+#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
+/* Value */
+#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
+
+/* MC_CMD_PORT_WRITE32_OUT msgresponse */
+#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PORT_READ128
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ128 0x16
+
+/* MC_CMD_PORT_READ128_IN msgrequest */
+#define MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
+
+/* MC_CMD_PORT_READ128_OUT msgresponse */
+#define MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
+#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
+#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE128
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE128 0x17
+
+/* MC_CMD_PORT_WRITE128_IN msgrequest */
+#define MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
+#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
+/* Value */
+#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
+
+/* MC_CMD_PORT_WRITE128_OUT msgresponse */
+#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
+
+/* MC_CMD_CAPABILITIES structuredef */
+#define MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define MC_CMD_CAPABILITIES_PTP_LBN 3
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define MC_CMD_CAPABILITIES_AOE_LBN 4
+#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_CFG
+ * Returns the MC firmware configuration structure.
+ */
+#define MC_CMD_GET_BOARD_CFG 0x18
+#undef MC_CMD_0x18_PRIVILEGE_CTG
+
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
+#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
+#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_DBI_READX
+ * Read DBI register(s) -- extended functionality
+ */
+#define MC_CMD_DBI_READX 0x19
+#undef MC_CMD_0x19_PRIVILEGE_CTG
+
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DBI_READX_IN msgrequest */
+#define MC_CMD_DBI_READX_IN_LENMIN 8
+#define MC_CMD_DBI_READX_IN_LENMAX 248
+#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+/* Each Read op consists of an address (offset 0), VF/CS2) */
+#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
+
+/* MC_CMD_DBI_READX_OUT msgresponse */
+#define MC_CMD_DBI_READX_OUT_LENMIN 4
+#define MC_CMD_DBI_READX_OUT_LENMAX 252
+#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+/* Value */
+#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
+#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
+#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
+#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_RAND_SEED
+ * Set the 16byte seed for the MC pseudo-random generator.
+ */
+#define MC_CMD_SET_RAND_SEED 0x1a
+#undef MC_CMD_0x1a_PRIVILEGE_CTG
+
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_RAND_SEED_IN msgrequest */
+#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
+#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
+
+/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
+#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LTSSM_HIST
+ * Retrieve the history of the LTSSM, if the build supports it.
+ */
+#define MC_CMD_LTSSM_HIST 0x1b
+
+/* MC_CMD_LTSSM_HIST_IN msgrequest */
+#define MC_CMD_LTSSM_HIST_IN_LEN 0
+
+/* MC_CMD_LTSSM_HIST_OUT msgresponse */
+#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
+#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
+#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
+#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_DRV_ATTACH
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
+ */
+#define MC_CMD_DRV_ATTACH 0x1c
+#undef MC_CMD_0x1c_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRV_ATTACH_IN msgrequest */
+#define MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state to set if UPDATE=1 */
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
+#define MC_CMD_DRV_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1
+#define MC_CMD_DRV_PREBOOT_LBN 1
+#define MC_CMD_DRV_PREBOOT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1
+#define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2
+#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3
+#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1
+/* 1 to set new state, or 0 to just report the existing state */
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
+/* enum: Prefer to use full featured firmware */
+#define MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
+/* enum: Prefer to use firmware with additional "rules engine" filtering
+ * support
+ */
+#define MC_CMD_FW_RULES_ENGINE 0x5
+/* enum: Prefer to use firmware with additional DPDK support */
+#define MC_CMD_FW_DPDK 0x6
+/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and
+ * bug69716)
+ */
+#define MC_CMD_FW_L3XUDP 0x7
+/* enum: Requests that the MC keep whatever datapath firmware is currently
+ * running. It's used for test purposes, where we want to be able to shmboot
+ * special test firmware variants. This option is only recognised in eftest
+ * (i.e. non-production) builds.
+ */
+#define MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe
+/* enum: Only this option is allowed for non-admin functions */
+#define MC_CMD_FW_DONT_CARE 0xffffffff
+
+/* MC_CMD_DRV_ATTACH_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
+
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
+/* Flags associated with this function */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
+/* enum: The function does not have an active port associated with it. The port
+ * refers to the Sorrento external FPGA port.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
+/* enum: If set, indicates that VI spreading is currently enabled. Will always
+ * indicate the current state, regardless of the value in the WANT_VI_SPREADING
+ * input.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4
+
+
+/***********************************/
+/* MC_CMD_SHMUART
+ * Route UART output to circular buffer in shared memory instead.
+ */
+#define MC_CMD_SHMUART 0x1f
+
+/* MC_CMD_SHMUART_IN msgrequest */
+#define MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
+#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define MC_CMD_SHMUART_IN_FLAG_LEN 4
+
+/* MC_CMD_SHMUART_OUT msgresponse */
+#define MC_CMD_SHMUART_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+#undef MC_CMD_0x20_PRIVILEGE_CTG
+
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ENTITY_RESET
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
+ */
+#define MC_CMD_ENTITY_RESET 0x20
+/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
+
+/* MC_CMD_ENTITY_RESET_IN msgrequest */
+#define MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
+#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
+
+/* MC_CMD_ENTITY_RESET_OUT msgresponse */
+#define MC_CMD_ENTITY_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_CREDITS
+ * Read instantaneous and minimum flow control thresholds.
+ */
+#define MC_CMD_PCIE_CREDITS 0x21
+
+/* MC_CMD_PCIE_CREDITS_IN msgrequest */
+#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
+/* wipe statistics */
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
+
+/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
+#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
+
+
+/***********************************/
+/* MC_CMD_RXD_MONITOR
+ * Get histogram of RX queue fill level.
+ */
+#define MC_CMD_RXD_MONITOR 0x22
+
+/* MC_CMD_RXD_MONITOR_IN msgrequest */
+#define MC_CMD_RXD_MONITOR_IN_LEN 12
+#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
+#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
+
+/* MC_CMD_RXD_MONITOR_OUT msgresponse */
+#define MC_CMD_RXD_MONITOR_OUT_LEN 80
+#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PUTS
+ * Copy the given ASCII string out onto UART and/or out of the network port.
+ */
+#define MC_CMD_PUTS 0x23
+#undef MC_CMD_0x23_PRIVILEGE_CTG
+
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_PUTS_IN msgrequest */
+#define MC_CMD_PUTS_IN_LENMIN 13
+#define MC_CMD_PUTS_IN_LENMAX 252
+#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
+#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_DEST_LEN 4
+#define MC_CMD_PUTS_IN_UART_LBN 0
+#define MC_CMD_PUTS_IN_UART_WIDTH 1
+#define MC_CMD_PUTS_IN_PORT_LBN 1
+#define MC_CMD_PUTS_IN_PORT_WIDTH 1
+#define MC_CMD_PUTS_IN_DHOST_OFST 4
+#define MC_CMD_PUTS_IN_DHOST_LEN 6
+#define MC_CMD_PUTS_IN_STRING_OFST 12
+#define MC_CMD_PUTS_IN_STRING_LEN 1
+#define MC_CMD_PUTS_IN_STRING_MINNUM 1
+#define MC_CMD_PUTS_IN_STRING_MAXNUM 240
+
+/* MC_CMD_PUTS_OUT msgresponse */
+#define MC_CMD_PUTS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_CFG
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
+ */
+#define MC_CMD_GET_PHY_CFG 0x24
+#undef MC_CMD_0x24_PRIVILEGE_CTG
+
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_CFG_IN msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+
+/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
+#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
+/* Bitmask of supported capabilities */
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
+#define MC_CMD_PHY_CAP_10HDX_LBN 1
+#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10FDX_LBN 2
+#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100HDX_LBN 3
+#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100FDX_LBN 4
+#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define MC_CMD_PHY_CAP_ASYM_LBN 9
+#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define MC_CMD_PHY_CAP_AN_LBN 10
+#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_DDM_LBN 12
+#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
+/* enum: Xaui. */
+#define MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
+/* enum: Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0x0
+#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
+#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
+#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
+#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
+#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
+#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
+#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+
+/***********************************/
+/* MC_CMD_START_BIST
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_START_BIST 0x25
+#undef MC_CMD_0x25_PRIVILEGE_CTG
+
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_START_BIST_IN msgrequest */
+#define MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_IN_TYPE_LEN 4
+/* enum: Run the PHY's short cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define MC_CMD_REG_BIST 0x8
+
+/* MC_CMD_START_BIST_OUT msgresponse */
+#define MC_CMD_START_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_BIST
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
+ */
+#define MC_CMD_POLL_BIST 0x26
+#undef MC_CMD_0x26_PRIVILEGE_CTG
+
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_POLL_BIST_IN msgrequest */
+#define MC_CMD_POLL_BIST_IN_LEN 0
+
+/* MC_CMD_POLL_BIST_OUT msgresponse */
+#define MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
+#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
+/* enum: Running. */
+#define MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
+
+/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
+/* Status of each channel A */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
+/* enum: Ok. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel C */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel D */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+
+/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
+/* enum: Complete. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
+/* enum: Test has completed. */
+#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
+/* Bus or address space to which the failure address corresponds */
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
+/* enum: MC MIPS bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX0 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* enum: RX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7
+/* enum: RX1 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
+/* Pattern written to RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
+/* Actual value read from RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
+/* ECC error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
+/* ECC parity error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
+/* ECC fatal error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FLUSH_RX_QUEUES
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
+ */
+#define MC_CMD_FLUSH_RX_QUEUES 0x27
+
+/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
+
+/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
+#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LOOPBACK_MODES
+ * Returns a bitmask of loopback modes available at each speed.
+ */
+#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#undef MC_CMD_0x28_PRIVILEGE_CTG
+
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+/* enum: None. */
+#define MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+/* enum: Medford Wireside datapath loopback */
+#define MC_CMD_LOOPBACK_DATA_WS 0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/* Enum values, see field(s): */
+/* 100M */
+
+/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */
+#define AN_TYPE_LEN 4
+#define AN_TYPE_TYPE_OFST 0
+#define AN_TYPE_TYPE_LEN 4
+/* enum: None, AN disabled or not supported */
+#define MC_CMD_AN_NONE 0x0
+/* enum: Clause 28 - BASE-T */
+#define MC_CMD_AN_CLAUSE28 0x1
+/* enum: Clause 37 - BASE-X */
+#define MC_CMD_AN_CLAUSE37 0x2
+/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable
+ * assemblies. Includes Clause 72/Clause 92 link-training.
+ */
+#define MC_CMD_AN_CLAUSE73 0x3
+#define AN_TYPE_TYPE_LBN 0
+#define AN_TYPE_TYPE_WIDTH 32
+
+/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3
+ */
+#define FEC_TYPE_LEN 4
+#define FEC_TYPE_TYPE_OFST 0
+#define FEC_TYPE_TYPE_LEN 4
+/* enum: No FEC */
+#define MC_CMD_FEC_NONE 0x0
+/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */
+#define MC_CMD_FEC_BASER 0x1
+/* enum: Clause 91/Clause 108 Reed-Solomon FEC */
+#define MC_CMD_FEC_RS 0x2
+#define FEC_TYPE_TYPE_LBN 0
+#define FEC_TYPE_TYPE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_GET_LINK
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
+ */
+#define MC_CMD_GET_LINK 0x29
+#undef MC_CMD_0x29_PRIVILEGE_CTG
+
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LINK_IN msgrequest */
+#define MC_CMD_GET_LINK_IN_LEN 0
+
+/* MC_CMD_GET_LINK_OUT msgresponse */
+#define MC_CMD_GET_LINK_OUT_LEN 28
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_CAP_LEN 4
+/* Link-partner advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+
+/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */
+#define MC_CMD_GET_LINK_OUT_V2_LEN 44
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4
+/* Link-partner advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4
+/* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */
+/* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */
+/* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */
+/* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */
+/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */
+/* True local device capabilities (taking into account currently used PMD/MDI,
+ * e.g. plugged-in module). In general, subset of
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST
+ * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal
+ * to SUPPORTED_CAP for non-pluggable PMDs. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28
+#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4
+/* Auto-negotiation type used on the link */
+#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32
+#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+/* Forward error correction used on the link */
+#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36
+#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0
+#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2
+#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3
+#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4
+#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5
+#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6
+#define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7
+#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8
+#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_SET_LINK
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME
+ */
+#define MC_CMD_SET_LINK 0x2a
+#undef MC_CMD_0x2a_PRIVILEGE_CTG
+
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_LINK_IN msgrequest */
+#define MC_CMD_SET_LINK_IN_LEN 16
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_CAP_LEN 4
+/* Flags */
+#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4
+#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
+
+/* MC_CMD_SET_LINK_OUT msgresponse */
+#define MC_CMD_SET_LINK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_ID_LED
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_ID_LED 0x2b
+#undef MC_CMD_0x2b_PRIVILEGE_CTG
+
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_ID_LED_IN msgrequest */
+#define MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
+#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4
+#define MC_CMD_LED_OFF 0x0 /* enum */
+#define MC_CMD_LED_ON 0x1 /* enum */
+#define MC_CMD_LED_DEFAULT 0x2 /* enum */
+
+/* MC_CMD_SET_ID_LED_OUT msgresponse */
+#define MC_CMD_SET_ID_LED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MAC
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_MAC 0x2c
+#undef MC_CMD_0x2c_PRIVILEGE_CTG
+
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_MAC_IN msgrequest */
+#define MC_CMD_SET_MAC_IN_LEN 28
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_MTU_LEN 4
+#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4
+#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_LEN 4
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto neg flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_EXT_IN msgrequest */
+#define MC_CMD_SET_MAC_EXT_IN_LEN 32
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_OUT msgresponse */
+#define MC_CMD_SET_MAC_OUT_LEN 0
+
+/* MC_CMD_SET_MAC_V2_OUT msgresponse */
+#define MC_CMD_SET_MAC_V2_OUT_LEN 4
+/* MTU as configured after processing the request. See comment at
+ * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL
+ * to 0.
+ */
+#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PHY_STATS
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
+ */
+#define MC_CMD_PHY_STATS 0x2d
+#undef MC_CMD_0x2d_PRIVILEGE_CTG
+
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_PHY_STATS_IN msgrequest */
+#define MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
+/* enum: OUI. */
+#define MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define MC_CMD_PHY_NSTATS 0x17
+
+
+/***********************************/
+/* MC_CMD_MAC_STATS
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
+ */
+#define MC_CMD_MAC_STATS 0x2e
+#undef MC_CMD_0x2e_PRIVILEGE_CTG
+
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATS_IN msgrequest */
+#define MC_CMD_MAC_STATS_IN_LEN 20
+/* ??? */
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
+
+/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
+#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_START 0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define MC_CMD_MAC_GENERATION_END 0x60
+#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_FEC_DMABUF_START 0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V2 0x68
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define MC_CMD_MAC_CTPIO_SUCCESS 0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define MC_CMD_MAC_CTPIO_FALLBACK 0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define MC_CMD_MAC_CTPIO_POISON 0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define MC_CMD_MAC_CTPIO_ERASE 0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V3 0x79
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3)
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum: Start of V4 stats buffer space */
+#define MC_CMD_MAC_V4_DMABUF_START 0x79
+/* enum: RXDP counter: Number of packets truncated because scattering was
+ * disabled.
+ */
+#define MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79
+/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting
+ * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define MC_CMD_MAC_RXDP_HLB_IDLE 0x7a
+/* enum: RXDP counter: Number of times the RXDP timed out while head of line
+ * blocking. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b
+/* enum: This includes the space at offset 124 which is the final
+ * GENERATION_END in a MAC_STATS_V4 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V4 0x7d
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
+
+
+/***********************************/
+/* MC_CMD_SRIOV
+ * to be documented
+ */
+#define MC_CMD_SRIOV 0x30
+
+/* MC_CMD_SRIOV_IN msgrequest */
+#define MC_CMD_SRIOV_IN_LEN 12
+#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
+#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
+#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
+
+/* MC_CMD_SRIOV_OUT msgresponse */
+#define MC_CMD_SRIOV_OUT_LEN 8
+#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
+
+/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MEMCPY
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
+ */
+#define MC_CMD_MEMCPY 0x31
+
+/* MC_CMD_MEMCPY_IN msgrequest */
+#define MC_CMD_MEMCPY_IN_LENMIN 32
+#define MC_CMD_MEMCPY_IN_LENMAX 224
+#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
+#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
+#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
+#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
+#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
+
+/* MC_CMD_MEMCPY_OUT msgresponse */
+#define MC_CMD_MEMCPY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_SET
+ * Set a WoL filter.
+ */
+#define MC_CMD_WOL_FILTER_SET 0x32
+#undef MC_CMD_0x32_PRIVILEGE_CTG
+
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
+/* enum: Magic */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define MC_CMD_WOL_TYPE_MAX 0x7
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
+
+/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
+
+/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
+/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_REMOVE
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#undef MC_CMD_0x33_PRIVILEGE_CTG
+
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
+#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
+
+/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_RESET
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_RESET 0x34
+#undef MC_CMD_0x34_PRIVILEGE_CTG
+
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
+#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
+
+/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MCAST_HASH
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
+ */
+#define MC_CMD_SET_MCAST_HASH 0x35
+
+/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
+#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
+
+/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
+#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TYPES
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
+ */
+#define MC_CMD_NVRAM_TYPES 0x36
+#undef MC_CMD_0x36_PRIVILEGE_CTG
+
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TYPES_IN msgrequest */
+#define MC_CMD_NVRAM_TYPES_IN_LEN 0
+
+/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
+#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
+/* enum: Disabled callisto. */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
+
+
+/***********************************/
+/* MC_CMD_NVRAM_INFO
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_INFO 0x37
+#undef MC_CMD_0x37_PRIVILEGE_CTG
+
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_INFO_IN msgrequest */
+#define MC_CMD_NVRAM_INFO_IN_LEN 4
+#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_INFO_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
+
+/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
+/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
+ */
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_START
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held). In an adapter bound to a TSA controller,
+ * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types
+ * i.e. static config, dynamic config and expansion ROM config. Attempting to
+ * perform this operation on a restricted partition will return the error
+ * EPERM.
+ */
+#define MC_CMD_NVRAM_UPDATE_START 0x38
+#undef MC_CMD_0x38_PRIVILEGE_CTG
+
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest: Legacy NVRAM_UPDATE_START request.
+ * Use NVRAM_UPDATE_START_V2_IN in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_UPDATE_START_V2_IN msgrequest: Extended NVRAM_UPDATE_START
+ * request with additional flags indicating version of command in use. See
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended functionality. Use
+ * paired up with NVRAM_UPDATE_FINISH_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_READ
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_READ 0x39
+#undef MC_CMD_0x39_PRIVILEGE_CTG
+
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_READ_IN msgrequest */
+#define MC_CMD_NVRAM_READ_IN_LEN 12
+#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
+
+/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
+#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
+/* Optional control info. If a partition is stored with an A/B versioning
+ * scheme (i.e. in more than one physical partition in NVRAM) the host can set
+ * this to control which underlying physical partition is used to read data
+ * from. This allows it to perform a read-modify-write-verify with the write
+ * lock continuously held by calling NVRAM_UPDATE_START, reading the old
+ * contents using MODE=TARGET_CURRENT, overwriting the old partition and then
+ * verifying by reading with MODE=TARGET_BACKUP.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
+/* enum: Same as omitting MODE: caller sees data in current partition unless it
+ * holds the write lock in which case it sees data in the partition it is
+ * updating.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0
+/* enum: Read from the current partition of an A/B pair, even if holding the
+ * write lock.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1
+/* enum: Read from the non-current (i.e. to be updated) partition of an A/B
+ * pair
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2
+
+/* MC_CMD_NVRAM_READ_OUT msgresponse */
+#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
+#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
+#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_NVRAM_WRITE
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_WRITE 0x3a
+#undef MC_CMD_0x3a_PRIVILEGE_CTG
+
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_WRITE_IN msgrequest */
+#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
+#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
+#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_ERASE
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_ERASE 0x3b
+#undef MC_CMD_0x3b_PRIVILEGE_CTG
+
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_ERASE_IN msgrequest */
+#define MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
+
+/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
+#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_FINISH
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/
+ * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to
+ * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of
+ * partition types i.e. static config, dynamic config and expansion ROM config.
+ * Attempting to perform this operation on a restricted partition will return
+ * the error EPERM.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#undef MC_CMD_0x3c_PRIVILEGE_CTG
+
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest: Legacy NVRAM_UPDATE_FINISH
+ * request. Use NVRAM_UPDATE_FINISH_V2_IN in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
+ * request with additional flags indicating version of NVRAM_UPDATE commands in
+ * use. See MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended
+ * functionality. Use paired up with NVRAM_UPDATE_START_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH
+ * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT msgresponse:
+ *
+ * Extended NVRAM_UPDATE_FINISH response that communicates the result of secure
+ * firmware validation where applicable back to the host.
+ *
+ * Medford only: For signed firmware images, such as those for medford, the MC
+ * firmware verifies the signature before marking the firmware image as valid.
+ * This process takes a few seconds to complete. So is likely to take more than
+ * the MCDI timeout. Hence signature verification is initiated when
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
+/* Result of nvram update completion processing */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
+/* enum: Verify succeeded without any errors. */
+#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
+/* enum: CMS format verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2
+/* enum: Invalid CMS format in image metadata. */
+#define MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3
+/* enum: Message digest verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5
+/* enum: Signature verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6
+/* enum: There are no valid signatures in the image. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7
+/* enum: Trusted approvers verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8
+/* enum: The Trusted approver's list is empty. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9
+/* enum: Signature chain verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
+
+
+/***********************************/
+/* MC_CMD_REBOOT
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
+ */
+#define MC_CMD_REBOOT 0x3d
+#undef MC_CMD_0x3d_PRIVILEGE_CTG
+
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_REBOOT_IN msgrequest */
+#define MC_CMD_REBOOT_IN_LEN 4
+#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_IN_FLAGS_LEN 4
+#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
+
+/* MC_CMD_REBOOT_OUT msgresponse */
+#define MC_CMD_REBOOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SCHEDINFO
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
+ */
+#define MC_CMD_SCHEDINFO 0x3e
+#undef MC_CMD_0x3e_PRIVILEGE_CTG
+
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SCHEDINFO_IN msgrequest */
+#define MC_CMD_SCHEDINFO_IN_LEN 0
+
+/* MC_CMD_SCHEDINFO_OUT msgresponse */
+#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
+#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
+#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
+#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
+#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
+#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
+ */
+#define MC_CMD_REBOOT_MODE 0x3f
+#undef MC_CMD_0x3f_PRIVILEGE_CTG
+
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_REBOOT_MODE_IN msgrequest */
+#define MC_CMD_REBOOT_MODE_IN_LEN 4
+#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
+/* enum: Normal. */
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
+
+/* MC_CMD_REBOOT_MODE_OUT msgresponse */
+#define MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SENSOR_INFO
+ * Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
+ */
+#define MC_CMD_SENSOR_INFO 0x41
+#undef MC_CMD_0x41_PRIVILEGE_CTG
+
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SENSOR_INFO_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_IN_LEN 0
+
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
+
+/* MC_CMD_SENSOR_INFO_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
+/* enum: Controller temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage: mV */
+#define MC_CMD_SENSOR_MUM_VCC 0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_A 0x31
+/* enum: 0.9v power phase A current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_A 0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_B 0x34
+/* enum: 0.9v power phase B current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_B 0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: CCOM RTS temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY0_VCC 0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY1_VCC 0x4d
+/* enum: Controller die temperature (TDIODE): degC */
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+/* enum: Board temperature (front): degC */
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+/* enum: Board temperature (back): degC */
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* enum: 1.8v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V8 0x51
+/* enum: 2.5v power current: mA */
+#define MC_CMD_SENSOR_IN_I2V5 0x52
+/* enum: 3.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I3V3 0x53
+/* enum: 12v power current: mA */
+#define MC_CMD_SENSOR_IN_I12V0 0x54
+/* enum: 1.3v power: mV */
+#define MC_CMD_SENSOR_IN_1V3 0x55
+/* enum: 1.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V3 0x56
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+#define MC_CMD_SENSOR_ENTRY_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LEN 8
+#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
+#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO_OUT */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/* MC_CMD_SENSOR_ENTRY_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LEN 8 */
+/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_SENSORS
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
+ */
+#define MC_CMD_READ_SENSORS 0x42
+#undef MC_CMD_0x42_PRIVILEGE_CTG
+
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_READ_SENSORS_IN msgrequest */
+#define MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
+
+/* MC_CMD_READ_SENSORS_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_OUT_LEN 0
+
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
+/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
+/* enum: Ok. */
+#define MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+/* enum: Sensor initialisation failed. */
+#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_STATE
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
+ */
+#define MC_CMD_GET_PHY_STATE 0x43
+#undef MC_CMD_0x43_PRIVILEGE_CTG
+
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_STATE_IN msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+
+/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
+#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
+/* enum: Ok. */
+#define MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2
+
+
+/***********************************/
+/* MC_CMD_SETUP_8021QBB
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
+ */
+#define MC_CMD_SETUP_8021QBB 0x44
+
+/* MC_CMD_SETUP_8021QBB_IN msgrequest */
+#define MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
+
+/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
+#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_GET
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_GET 0x45
+#undef MC_CMD_0x45_PRIVILEGE_CTG
+
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
+
+/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#undef MC_CMD_0x46_PRIVILEGE_CTG
+
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#undef MC_CMD_0x47_PRIVILEGE_CTG
+
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_RESET_RESTORE
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
+ */
+#define MC_CMD_MAC_RESET_RESTORE 0x48
+
+/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
+#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+
+/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
+#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
+ */
+#define MC_CMD_TESTASSERT 0x49
+#undef MC_CMD_0x49_PRIVILEGE_CTG
+
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TESTASSERT_IN msgrequest */
+#define MC_CMD_TESTASSERT_IN_LEN 0
+
+/* MC_CMD_TESTASSERT_OUT msgresponse */
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+/* MC_CMD_TESTASSERT_V2_IN msgrequest */
+#define MC_CMD_TESTASSERT_V2_IN_LEN 4
+/* How to provoke the assertion */
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
+/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
+ * you're testing firmware, this is what you want.
+ */
+#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
+/* enum: Assert using assert(0); */
+#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
+/* enum: Deliberately trigger a watchdog */
+#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
+/* enum: Deliberately trigger a trap by loading from an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
+/* enum: Deliberately trigger a trap by storing to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
+/* enum: Jump to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
+
+/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
+#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WORKAROUND
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#undef MC_CMD_0x4a_PRIVILEGE_CTG
+
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_WORKAROUND_IN msgrequest */
+#define MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define MC_CMD_WORKAROUND_BUG26807 0x6
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_WORKAROUND_BUG61265 0x7
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
+
+/* MC_CMD_WORKAROUND_OUT msgresponse */
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_MEDIA_INFO
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#undef MC_CMD_0x4b_PRIVILEGE_CTG
+
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TEST
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#undef MC_CMD_0x4c_PRIVILEGE_CTG
+
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_NVRAM_TEST_IN msgrequest */
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_TEST_OUT msgresponse */
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
+/* enum: Passed. */
+#define MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
+
+
+/***********************************/
+/* MC_CMD_MRSFP_TWEAK
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+
+/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
+/* 0-8 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
+
+/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
+
+/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
+/* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
+/* direction */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
+/* enum: Out. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
+
+
+/***********************************/
+/* MC_CMD_SENSOR_SET_LIMS
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#undef MC_CMD_0x4e_PRIVILEGE_CTG
+
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
+
+/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
+#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RESOURCE_LIMITS
+ */
+#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
+
+/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
+#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
+
+/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+#undef MC_CMD_0x51_PRIVILEGE_CTG
+
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+/* total number of partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+#undef MC_CMD_0x52_PRIVILEGE_CTG
+
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requesting function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+#undef MC_CMD_0x55_PRIVILEGE_CTG
+
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
+/* Spacing of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation
+ */
+#define MC_CMD_CLP 0x56
+#undef MC_CMD_0x56_PRIVILEGE_CTG
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_CLP_IN msgrequest */
+#define MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define MC_CMD_CLP_IN_OP_OFST 0
+#define MC_CMD_CLP_IN_OP_LEN 4
+/* enum: Return to factory default settings */
+#define MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define MC_CMD_CLP_IN_DEFAULT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define MC_CMD_CLP_IN_SET_MAC_LEN 12
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+/* MAC address assigned to port */
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define MC_CMD_CLP_IN_GET_MAC_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+/* Boot flag */
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+#undef MC_CMD_0x57_PRIVILEGE_CTG
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_MUM_IN msgrequest */
+#define MC_CMD_MUM_IN_LEN 4
+#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_HDR_LEN 4
+#define MC_CMD_MUM_IN_OP_LBN 0
+#define MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define MC_CMD_MUM_OP_QSFP 0xc
+/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
+ * level) from MUM
+ */
+#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define MC_CMD_MUM_IN_CMD_OFST 0
+#define MC_CMD_MUM_IN_CMD_LEN 4
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* ID of (device connected to MUM) to read from registers of */
+#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
+/* Number of words to read. */
+#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* ID of (device connected to MUM) to write to registers of */
+#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/* MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
+/* Words to write */
+#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* MUM I2C cmd code */
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
+/* Number of bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
+/* Number of bytes to read */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
+/* Bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_LEN 4
+#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
+/* Enable/disable debug output to UART */
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* Bit-mask of clocks to be programmed */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
+#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* Enable/Disable FPGA config from flash */
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
+#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+/* returned data */
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_READ_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
+/* The second 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
+/* The second 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
+
+/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
+/* Discrete (soldered) DDR resistor strap info */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
+/* Number of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
+/* Array of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
+/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
+/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
+/* enum: Total number of SODIMM banks */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
+/* enum: Values 5-15 are reserved for future usage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
+/* enum: No module present */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
+/* enum: Module present supported and powered on */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
+/* enum: Module present but bad type */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
+/* enum: Module present but incompatible voltage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
+/* enum: Module present but unknown SPD */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
+/* enum: Module present but slot cannot support it */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
+/* enum: Modules may or may not be present, but cannot establish contact by I2C
+ */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
+
+/* MC_CMD_RESOURCE_SPECIFIER enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+/* enum: None */
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
+
+/* EVB_PORT_ID structuredef */
+#define EVB_PORT_ID_LEN 4
+#define EVB_PORT_ID_PORT_ID_OFST 0
+#define EVB_PORT_ID_PORT_ID_LEN 4
+/* enum: An invalid port handle. */
+#define EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_PORT_ID_LBN 0
+#define EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define EVB_VLAN_TAG_MODE_LBN 12
+#define EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define BUFTBL_ENTRY_OID_OFST 0
+#define BUFTBL_ENTRY_OID_LEN 2
+#define BUFTBL_ENTRY_OID_LBN 0
+#define BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define BUFTBL_ENTRY_PGSZ_OFST 2
+#define BUFTBL_ENTRY_PGSZ_LEN 2
+#define BUFTBL_ENTRY_PGSZ_LBN 16
+#define BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define BUFTBL_ENTRY_RAWADDR_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LEN 8
+#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define NVRAM_PARTITION_TYPE_LEN 2
+#define NVRAM_PARTITION_TYPE_ID_OFST 0
+#define NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output of second core on dual-core device */
+#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: Device state dump output partition */
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Primary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA 0xb00
+/* enum: Secondary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+/* enum: FC firmware partition */
+#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+/* enum: FC License partition */
+#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+/* enum: Non-volatile log output partition for FC */
+#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: MUM firmware partition */
+#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: MUM Application table partition. */
+#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+/* enum: MUM boot rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+/* enum: UEFI expansion ROM if separate from PXE */
+#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Used by the expansion ROM for logging */
+#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000
+/* enum: Used for XIP code of shmbooted images */
+#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
+/* enum: Spare partition 2 */
+#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+/* enum: Manufacturing partition. Used during manufacture to pass information
+ * between XJTAG and Manftest.
+ */
+#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
+/* enum: Spare partition 4 */
+#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+/* enum: Spare partition 5 */
+#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+/* enum: Spare partition 13 */
+#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700
+/* enum: Spare partition 14 */
+#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800
+/* enum: Spare partition 15 */
+#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900
+/* enum: Spare partition 16 */
+#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_ID_LBN 0
+#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+#define LICENSED_APP_ID_ID_LEN 4
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+/* enum: SolarSecure filter engine */
+#define LICENSED_APP_ID_SOLARSECURE 0x8
+/* enum: Performance monitor */
+#define LICENSED_APP_ID_PERF_MONITOR 0x10
+/* enum: SolarCapture Live */
+#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+/* enum: Capture SolarSystem */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+/* enum: Network Access Control */
+#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
+/* enum: TCP Direct */
+#define LICENSED_APP_ID_TCP_DIRECT 0x100
+/* enum: Low Latency */
+#define LICENSED_APP_ID_LOW_LATENCY 0x200
+/* enum: SolarCapture Tap */
+#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
+/* enum: Capture SolarSystem 40G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+/* enum: ScaleOut Onload */
+#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define LICENSED_APP_ID_DSHBRD 0x4000
+/* enum: SolarCapture Trading Analytics */
+#define LICENSED_APP_ID_SCATRD 0x8000
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
+/* LICENSED_FEATURES structuredef */
+#define LICENSED_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_FEATURES_MASK_OFST 0
+#define LICENSED_FEATURES_MASK_LEN 8
+#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_PIO_LBN 1
+#define LICENSED_FEATURES_PIO_WIDTH 1
+#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_FEATURES_CLOCK_LBN 3
+#define LICENSED_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_MASK_LBN 0
+#define LICENSED_FEATURES_MASK_WIDTH 64
+
+/* LICENSED_V3_APPS structuredef */
+#define LICENSED_V3_APPS_LEN 8
+/* Bitmask of licensed applications */
+#define LICENSED_V3_APPS_MASK_OFST 0
+#define LICENSED_V3_APPS_MASK_LEN 8
+#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_ONLOAD_LBN 0
+#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_PTP_LBN 1
+#define LICENSED_V3_APPS_PTP_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
+#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
+#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
+#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
+#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
+#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
+#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
+#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_DSHBRD_LBN 14
+#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define LICENSED_V3_APPS_SCATRD_LBN 15
+#define LICENSED_V3_APPS_SCATRD_WIDTH 1
+#define LICENSED_V3_APPS_MASK_LBN 0
+#define LICENSED_V3_APPS_MASK_WIDTH 64
+
+/* LICENSED_V3_FEATURES structuredef */
+#define LICENSED_V3_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_V3_FEATURES_MASK_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LEN 8
+#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_PIO_LBN 1
+#define LICENSED_V3_FEATURES_PIO_WIDTH 1
+#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_V3_FEATURES_CLOCK_LBN 3
+#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_MASK_LBN 0
+#define LICENSED_V3_FEATURES_MASK_WIDTH 64
+
+/* TX_TIMESTAMP_EVENT structuredef */
+#define TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13
+/* enum: This is the low part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+/* enum: This is the high part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define RSS_MODE_HASH_SELECTOR_OFST 0
+#define RSS_MODE_HASH_SELECTOR_LEN 1
+#define RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define RSS_MODE_HASH_DST_ADDR_LBN 1
+#define RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define RSS_MODE_HASH_SRC_PORT_LBN 2
+#define RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define RSS_MODE_HASH_DST_PORT_LBN 3
+#define RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define RSS_MODE_HASH_SELECTOR_LBN 0
+#define RSS_MODE_HASH_SELECTOR_WIDTH 8
+
+/* CTPIO_STATS_MAP structuredef */
+#define CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define CTPIO_STATS_MAP_VI_OFST 0
+#define CTPIO_STATS_MAP_VI_LEN 2
+#define CTPIO_STATS_MAP_VI_LBN 0
+#define CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define CTPIO_STATS_MAP_BUCKET_OFST 2
+#define CTPIO_STATS_MAP_BUCKET_LEN 2
+#define CTPIO_STATS_MAP_BUCKET_LBN 16
+#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
+/* MESSAGE_TYPE structuredef: When present this defines the meaning of a
+ * message, and is used to protect against chosen message attacks in signed
+ * messages, regardless their origin. The message type also defines the
+ * signature cryptographic algorithm, encoding, and message fields included in
+ * the signature. The values are used in different commands but must be unique
+ * across all commands, e.g. MC_CMD_TSA_BIND_IN_SECURE_UNBIND uses different
+ * message type than MC_CMD_SECURE_NIC_INFO_IN_STATUS.
+ */
+#define MESSAGE_TYPE_LEN 4
+#define MESSAGE_TYPE_MESSAGE_TYPE_OFST 0
+#define MESSAGE_TYPE_MESSAGE_TYPE_LEN 4
+#define MESSAGE_TYPE_UNUSED 0x0 /* enum */
+/* enum: Message type value for the response to a
+ * MC_CMD_TSA_BIND_IN_SECURE_UNBIND message. TSA_SECURE_UNBIND messages are
+ * ECDSA SECP384R1 signed using SHA384 message digest algorithm over fields
+ * MESSAGE_TYPE, TSANID, TSAID, and UNBINDTOKEN, and encoded as suggested by
+ * RFC6979 (section 2.4).
+ */
+#define MESSAGE_TYPE_TSA_SECURE_UNBIND 0x1
+/* enum: Message type value for the response to a
+ * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION message. TSA_SECURE_DECOMMISSION
+ * messages are ECDSA SECP384R1 signed using SHA384 message digest algorithm
+ * over fields MESSAGE_TYPE, TSAID, USER, and REASON, and encoded as suggested
+ * by RFC6979 (section 2.4).
+ */
+#define MESSAGE_TYPE_TSA_SECURE_DECOMMISSION 0x2
+/* enum: Message type value for the response to a
+ * MC_CMD_SECURE_NIC_INFO_IN_STATUS message. This enum value is not sequential
+ * to other message types for backwards compatibility as the message type for
+ * MC_CMD_SECURE_NIC_INFO_IN_STATUS was defined before the existence of this
+ * global enum.
+ */
+#define MESSAGE_TYPE_SECURE_NIC_INFO_STATUS 0xdb4
+#define MESSAGE_TYPE_MESSAGE_TYPE_LBN 0
+#define MESSAGE_TYPE_MESSAGE_TYPE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+#undef MC_CMD_0x50_PRIVILEGE_CTG
+
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+#undef MC_CMD_0x80_PRIVILEGE_CTG
+
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
+/* tbd */
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
+
+/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
+#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
+/* tbd */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4
+/* enum: All initialisation flags specified by host. */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the lowest latency achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the best throughput achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2
+/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by
+ * firmware based on licenses and firmware variant. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
+/* Actual configuration applied on the card */
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+#undef MC_CMD_0x81_PRIVILEGE_CTG
+
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10
+#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
+
+/* MC_CMD_INIT_RXQ_V3_IN msgrequest */
+#define MC_CMD_INIT_RXQ_V3_IN_LEN 560
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548
+#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_V3_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+#undef MC_CMD_0x82_PRIVILEGE_CTG
+
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+/* Flags related to Qbb flow control mode. */
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+#undef MC_CMD_0x83_PRIVILEGE_CTG
+
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+#undef MC_CMD_0x84_PRIVILEGE_CTG
+
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+#undef MC_CMD_0x85_PRIVILEGE_CTG
+
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+#undef MC_CMD_0x86_PRIVILEGE_CTG
+
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
+/* Bits 0 - 63 of event */
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_CMD
+ * Execute an arbitrary MCDI command on behalf of a different function, subject
+ * to security restrictions. The command to be proxied follows immediately
+ * afterward in the host buffer (or on the UART). This command supercedes
+ * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
+ */
+#define MC_CMD_PROXY_CMD 0x5b
+#undef MC_CMD_0x5b_PRIVILEGE_CTG
+
+#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CMD_IN msgrequest */
+#define MC_CMD_PROXY_CMD_IN_LEN 4
+/* The handle of the target function. */
+#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_LEN 4
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define MC_CMD_PROXY_CMD_OUT_LEN 0
+
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
+/* enum: An invalid handle. */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+#undef MC_CMD_0x58_PRIVILEGE_CTG
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+#undef MC_CMD_0x5f_PRIVILEGE_CTG
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#undef MC_CMD_0x87_PRIVILEGE_CTG
+
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#undef MC_CMD_0x88_PRIVILEGE_CTG
+
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
+/* ID */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
+/* Num entries */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
+/* Buffer table entry address */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#undef MC_CMD_0x89_PRIVILEGE_CTG
+
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+#undef MC_CMD_0x8a_PRIVILEGE_CTG
+
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_IN_OP_LEN 4
+/* enum: single-recipient filter insert */
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional
+ * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via
+ * its rte_flow API. This extension is only useful with the sfc_efx driver
+ * included as part of DPDK, used in conjunction with the dpdk datapath
+ * firmware variant.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_LEN 180
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_V3_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_V3_IN_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16
+/* Set an action for all packets matching this filter. The DPDK driver and dpdk
+ * f/w variant use their own specific delivery structures, which are documented
+ * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything
+ * other than MATCH_ACTION_NONE when the NIC is running another f/w variant
+ * will cause the filter insertion to fail with ENOTSUP.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4
+/* enum: do nothing extra */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0
+/* enum: Set the match flag in the packet prefix for packets matching the
+ * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to
+ * support the DPDK rte_flow "FLAG" action.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1
+/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching
+ * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to
+ * support the DPDK rte_flow "MARK" action.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2
+/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the
+ * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX)
+ * will cause the filter insertion to fail with EINVAL.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_OUT/HANDLE */
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#undef MC_CMD_0xe4_PRIVILEGE_CTG
+
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
+/* enum: read the list of supported RX filter matches */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+/* enum: read properties relating to security rules (Medford-only; for use by
+ * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
+ * encapsulated frames, which follow a different match sequence to normal
+ * frames (Medford only)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
+/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse:
+ * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* a version number representing the set of rule lookups that are implemented
+ * by the currently running firmware
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4
+/* enum: implements lookup sequences described in SF-114946-SW draft C */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+/* the number of nodes in the subnet map */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4
+/* the number of entries in one subnet map node */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4
+/* minimum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4
+/* maximum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4
+/* the number of entries in the local and remote port range maps */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4
+/* minimum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4
+/* maximum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PARSER_DISP_RW
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
+ * Please note that this interface is only of use to debug tools which have
+ * knowledge of firmware and hardware data structures; nothing here is intended
+ * for use by normal driver code. Note that although this command is in the
+ * Admin privilege group, in tamperproof adapters, only read operations are
+ * permitted.
+ */
+#define MC_CMD_PARSER_DISP_RW 0xe5
+#undef MC_CMD_0xe5_PRIVILEGE_CTG
+
+#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
+#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
+/* identifies the target of the operation */
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
+/* enum: RX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+/* enum: TX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine (with original metadata format). Deprecated; used only
+ * by cmdclient as a fallback for very old Huntington firmware, and not
+ * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
+ * instead.
+ */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
+/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
+/* enum: RX1 dispatcher CPU (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
+/* enum: Miscellaneous other state (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
+/* identifies the type of operation requested */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
+#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
+/* enum: Read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on
+ * tamperproof adapters.
+ */
+#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not
+ * permitted on tamperproof adapters.
+ */
+#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+/* data memory address (DICPU targets) or LUE index (LUE targets) */
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
+/* selector (for MISC_STATE target) */
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
+/* enum: Port to datapath mapping */
+#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
+/* value to write (for DMEM writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
+/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
+/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
+/* value to write (for LUE writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
+
+/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
+#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
+/* value read (for DMEM reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
+/* value read (for LUE reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
+/* up to 8 32-bit words of additional soft state from the LUE manager (the
+ * exact content is firmware-dependent and intended only for debug use)
+ */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
+#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
+#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
+
+
+/***********************************/
+/* MC_CMD_GET_PF_COUNT
+ * Get number of PFs on the device.
+ */
+#define MC_CMD_GET_PF_COUNT 0xb6
+#undef MC_CMD_0xb6_PRIVILEGE_CTG
+
+#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PF_COUNT_IN msgrequest */
+#define MC_CMD_GET_PF_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
+/* Identifies the number of PFs on the device. */
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
+
+
+/***********************************/
+/* MC_CMD_SET_PF_COUNT
+ * Set number of PFs on the device.
+ */
+#define MC_CMD_SET_PF_COUNT 0xb7
+
+/* MC_CMD_SET_PF_COUNT_IN msgrequest */
+#define MC_CMD_SET_PF_COUNT_IN_LEN 4
+/* New number of PFs on the device. */
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
+
+/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#undef MC_CMD_0xb8_PRIVILEGE_CTG
+
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#undef MC_CMD_0xb9_PRIVILEGE_CTG
+
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+#undef MC_CMD_0x8b_PRIVILEGE_CTG
+
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
+/* The maximum number of VIs that would be useful */
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
+
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+#undef MC_CMD_0x8c_PRIVILEGE_CTG
+
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+#undef MC_CMD_0xba_PRIVILEGE_CTG
+
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
+/* RID offset of each subsequent VF from the previous. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+#undef MC_CMD_0xbb_PRIVILEGE_CTG
+
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#undef MC_CMD_0x8d_PRIVILEGE_CTG
+
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+#undef MC_CMD_0x8e_PRIVILEGE_CTG
+
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+#undef MC_CMD_0x8f_PRIVILEGE_CTG
+
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+#undef MC_CMD_0x90_PRIVILEGE_CTG
+
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_TLP_PROCESSING
+ * Get TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+#undef MC_CMD_0xb0_PRIVILEGE_CTG
+
+#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
+/* VI number to get information for. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
+/* Set no snoop bit for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_VI_TLP_PROCESSING
+ * Set TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+#undef MC_CMD_0xb1_PRIVILEGE_CTG
+
+#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
+/* VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
+/* Set the no snoop bit for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
+ * Get global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+#undef MC_CMD_0xbc_PRIVILEGE_CTG
+
+#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
+/* enum: MISC. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+/* enum: IDO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+/* enum: RO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+/* enum: TPH Type. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
+ * Set global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+#undef MC_CMD_0xbd_PRIVILEGE_CTG
+
+#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SATELLITE_DOWNLOAD
+ * Download a new set of images to the satellite CPUs from the host.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+#undef MC_CMD_0x91_PRIVILEGE_CTG
+
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
+ * are subtle, and so downloads must proceed in a number of phases.
+ *
+ * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
+ * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
+ * be a checksum (a simple 32-bit sum) of the transferred data. An individual
+ * download may be aborted using CHUNK_ID_ABORT.
+ *
+ * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
+ * similar to PHASE_IMEMS.
+ *
+ * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * After any error (a requested abort is not considered to be an error) the
+ * sequence must be restarted from PHASE_RESET.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
+/* Download phase. (Note: the IDLE phase is used internally and is never valid
+ * in a command from the host.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+/* Target for download. (These match the blob numbers defined in
+ * mc_flash_layout.h.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
+/* enum: Last chunk, containing checksum rather than data */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+/* enum: Abort download of this item */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+/* Length of this chunk in bytes */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
+/* Data for this chunk */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
+
+/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
+/* Extra status information */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
+/* enum: Code download OK, completed. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+/* enum: Code download aborted as requested. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+/* enum: Code download OK so far, send next chunk. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+/* enum: Download phases out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+/* enum: Bad target for this phase */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+/* enum: Chunk ID out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+/* enum: Chunk length zero or too large */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+/* enum: Checksum was incorrect */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+#undef MC_CMD_0xbe_PRIVILEGE_CTG
+
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2
+/* Type of command/response */
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4
+/* enum: MCDI command directed to or response originating from the MC. */
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0
+/* enum: MCDI command directed to a TSA controller. MCDI responses of this type
+ * are not defined.
+ */
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_ALLOC
+ * Allocate a pacer bucket (for qau rp or a snapper test)
+ */
+#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+#undef MC_CMD_0xb2_PRIVILEGE_CTG
+
+#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
+
+/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_FREE
+ * Free a pacer bucket
+ */
+#define MC_CMD_TCM_BUCKET_FREE 0xb3
+#undef MC_CMD_0xb3_PRIVILEGE_CTG
+
+#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
+
+/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_INIT
+ * Initialise pacer bucket with a given rate
+ */
+#define MC_CMD_TCM_BUCKET_INIT 0xb4
+#undef MC_CMD_0xb4_PRIVILEGE_CTG
+
+#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
+
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
+/* the desired maximum fill level */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
+
+/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_TXQ_INIT
+ * Initialise txq in pacer with given options or set options
+ */
+#define MC_CMD_TCM_TXQ_INIT 0xb5
+#undef MC_CMD_0xb5_PRIVILEGE_CTG
+
+#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
+
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
+
+/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
+#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+#undef MC_CMD_0x92_PRIVILEGE_CTG
+
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+#undef MC_CMD_0x93_PRIVILEGE_CTG
+
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+#undef MC_CMD_0x94_PRIVILEGE_CTG
+
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of v-switch to create. */
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
+/* enum: VLAN */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* enum: MUX */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+/* enum: Snapper specific; semantics TBD */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
+/* Flags controlling v-port creation */
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+#undef MC_CMD_0x95_PRIVILEGE_CTG
+
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_QUERY
+ * read some config of v-switch. For now this command is an empty placeholder.
+ * It may be used to check if a v-switch is connected to a given EVB port (if
+ * not, then the command returns ENOENT).
+ */
+#define MC_CMD_VSWITCH_QUERY 0x63
+#undef MC_CMD_0x63_PRIVILEGE_CTG
+
+#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
+#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
+#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+#undef MC_CMD_0x96_PRIVILEGE_CTG
+
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of the new v-port. */
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
+/* enum: VLAN (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+#undef MC_CMD_0x97_PRIVILEGE_CTG
+
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+#undef MC_CMD_0x98_PRIVILEGE_CTG
+
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
+/* The port to connect to the v-adaptor's port. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* Flags controlling v-adaptor creation */
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
+/* The number of VLAN tags to transparently insert/remove. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+#undef MC_CMD_0x99_PRIVILEGE_CTG
+
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+#undef MC_CMD_0x5d_PRIVILEGE_CTG
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The new MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+#undef MC_CMD_0x5e_PRIVILEGE_CTG
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_QUERY
+ * read some config of v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_QUERY 0x61
+#undef MC_CMD_0x61_PRIVILEGE_CTG
+
+#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */
+#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
+#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
+/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
+/* The number of VLAN tags that may still be added */
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#undef MC_CMD_0x9a_PRIVILEGE_CTG
+
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
+/* The target function to modify. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+#undef MC_CMD_0x9b_PRIVILEGE_CTG
+
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
+/* Write enable bits 0-3, set to write, clear to read. */
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#undef MC_CMD_0x9c_PRIVILEGE_CTG
+
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#undef MC_CMD_0x9d_PRIVILEGE_CTG
+
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#undef MC_CMD_0x9e_PRIVILEGE_CTG
+
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* Number of queues spanned by this context, in the range 1-64; valid offsets
+ * in the indirection table will be in the range 0 to NUM_QUEUES-1.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
+/* enum: guaranteed invalid RSS context handle value */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#undef MC_CMD_0x9f_PRIVILEGE_CTG
+
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#undef MC_CMD_0xa0_PRIVILEGE_CTG
+
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#undef MC_CMD_0xa1_PRIVILEGE_CTG
+
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#undef MC_CMD_0xa2_PRIVILEGE_CTG
+
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#undef MC_CMD_0xa3_PRIVILEGE_CTG
+
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#undef MC_CMD_0xe1_PRIVILEGE_CTG
+
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
+/* Hash control flags. The _EN bits are always supported, but new modes are
+ * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
+ * in this case, the MODE fields may be set to non-zero values, and will take
+ * effect regardless of the settings of the _EN flags. See the RSS_MODE
+ * structure for the meaning of the mode bits. Drivers must check the
+ * capability before trying to set any _MODE fields, as older firmware will
+ * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In
+ * the case where all the _MODE flags are zero, the _EN flags take effect,
+ * providing backward compatibility for existing drivers. (Setting all _MODE
+ * *and* all _EN flags to zero is valid, to disable RSS spreading for that
+ * particular packet type.)
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#undef MC_CMD_0xe2_PRIVILEGE_CTG
+
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags. If all _MODE bits are zero (which will always be true
+ * for older firmware which does not report the ADDITIONAL_RSS_MODES
+ * capability), the _EN bits report the state. If any _MODE bits are non-zero
+ * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES)
+ * then the _EN bits should be disregarded, although the _MODE flags are
+ * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS
+ * context and in the case where the _EN flags were used in the SET. This
+ * provides backward compatibility: old drivers will not be attempting to
+ * derive any meaning from the _MODE bits (and can never set them to any value
+ * not representable by the _EN bits); new drivers can always determine the
+ * mode by looking only at the _MODE bits; the value returned by a GET can
+ * always be used for a SET regardless of old/new driver vs. old/new firmware.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_ALLOC
+ * Allocate a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+#undef MC_CMD_0xa4_PRIVILEGE_CTG
+
+#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
+/* The handle of the owning upstream port */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
+ * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
+ * referenced RSS contexts must span no more than this number.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
+/* enum: guaranteed invalid .1p mapping handle value */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_FREE
+ * Free a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+#undef MC_CMD_0xa5_PRIVILEGE_CTG
+
+#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
+
+/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE
+ * Set the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+#undef MC_CMD_0xa6_PRIVILEGE_CTG
+
+#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE
+ * Get the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+#undef MC_CMD_0xa7_PRIVILEGE_CTG
+
+#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_VECTOR_CFG
+ * Get Interrupt Vector config for this PF.
+ */
+#define MC_CMD_GET_VECTOR_CFG 0xbf
+#undef MC_CMD_0xbf_PRIVILEGE_CTG
+
+#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
+
+/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
+/* Base absolute interrupt vector number. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_VECTOR_CFG
+ * Set Interrupt Vector config for this PF.
+ */
+#define MC_CMD_SET_VECTOR_CFG 0xc0
+#undef MC_CMD_0xc0_PRIVILEGE_CTG
+
+#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
+/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
+ * let the system find a suitable base.
+ */
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
+
+/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#undef MC_CMD_0xa8_PRIVILEGE_CTG
+
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
+/* MAC address to add */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#undef MC_CMD_0xa9_PRIVILEGE_CTG
+
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
+/* MAC address to add */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#undef MC_CMD_0xaa_PRIVILEGE_CTG
+
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+/* The number of MAC addresses returned */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
+/* Array of MAC addresses */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+
+
+/***********************************/
+/* MC_CMD_VPORT_RECONFIGURE
+ * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port
+ * has already been passed to another function (v-port's user), then that
+ * function will be reset before applying the changes.
+ */
+#define MC_CMD_VPORT_RECONFIGURE 0xeb
+#undef MC_CMD_0xeb_PRIVILEGE_CTG
+
+#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */
+#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
+/* The handle of the v-port */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
+/* Flags requesting what should be changed. */
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
+/* The number of MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
+/* MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4
+
+/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
+#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_QUERY
+ * read some config of v-port.
+ */
+#define MC_CMD_EVB_PORT_QUERY 0x62
+#undef MC_CMD_0x62_PRIVILEGE_CTG
+
+#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
+#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
+
+/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
+#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
+/* The number of VLAN tags that may be used on a v-adaptor connected to this
+ * EVB port.
+ */
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_BUFTBL_ENTRIES
+ * Dump buffer table entries, mainly for command client debug use. Dumps
+ * absolute entries, and does not use chunk handles. All entries must be in
+ * range, and used for q page mapping, Although the latter restriction may be
+ * lifted in future.
+ */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+#undef MC_CMD_0xab_PRIVILEGE_CTG
+
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
+/* Index of the first buffer table entry. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
+/* Number of buffer table entries to dump. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_SET_RXDP_CONFIG
+ * Set global RXDP configuration settings
+ */
+#define MC_CMD_SET_RXDP_CONFIG 0xc1
+#undef MC_CMD_0xc1_PRIVILEGE_CTG
+
+#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
+/* enum: pad to 64 bytes */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
+/* enum: pad to 128 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
+/* enum: pad to 256 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
+
+/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RXDP_CONFIG
+ * Get global RXDP configuration settings
+ */
+#define MC_CMD_GET_RXDP_CONFIG 0xc2
+#undef MC_CMD_0xc2_PRIVILEGE_CTG
+
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
+/* Enum values, see field(s): */
+/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+#undef MC_CMD_0xac_PRIVILEGE_CTG
+
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
+/* DPCPU frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_CLOCK
+ * Control the system and DPCPU clock frequencies. Changes are lost reboot.
+ */
+#define MC_CMD_SET_CLOCK 0xad
+#undef MC_CMD_0xad_PRIVILEGE_CTG
+
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_CLOCK_IN msgrequest */
+#define MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
+/* enum: Leave the system clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for inter-core clock domain */
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for DPCPU clock domain */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for MC clock domain */
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
+/* enum: Leave the MC clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
+
+/* MC_CMD_SET_CLOCK_OUT msgresponse */
+#define MC_CMD_SET_CLOCK_OUT_LEN 28
+/* Resulting system frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
+/* enum: The system clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting inter-core frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
+/* Resulting DPCPU frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
+/* enum: The dpcpu clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
+/* Resulting PCS frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting MC frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
+/* Resulting rmon frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
+/* Resulting vswitch frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
+
+
+/***********************************/
+/* MC_CMD_DPCPU_RPC
+ * Send an arbitrary DPCPU message.
+ */
+#define MC_CMD_DPCPU_RPC 0xae
+#undef MC_CMD_0xae_PRIVILEGE_CTG
+
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DPCPU_RPC_IN msgrequest */
+#define MC_CMD_DPCPU_RPC_IN_LEN 36
+#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
+/* enum: RxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
+/* enum: TxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+/* enum: TxDPCPU1 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
+/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
+ * initialised to zero
+ */
+#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
+/* Register data to write. Only valid in write/write-read. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
+/* Register address. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
+
+/* MC_CMD_DPCPU_RPC_OUT msgresponse */
+#define MC_CMD_DPCPU_RPC_OUT_LEN 36
+#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
+/* DATA */
+#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#undef MC_CMD_0xe3_PRIVILEGE_CTG
+
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+#undef MC_CMD_0xe6_PRIVILEGE_CTG
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+#undef MC_CMD_0xe7_PRIVILEGE_CTG
+
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+
+
+/***********************************/
+/* MC_CMD_DUMP_DO
+ * Take a dump of the DUT state
+ */
+#define MC_CMD_DUMP_DO 0xe8
+#undef MC_CMD_0xe8_PRIVILEGE_CTG
+
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DUMP_DO_IN msgrequest */
+#define MC_CMD_DUMP_DO_IN_LEN 52
+#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_PADDING_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
+
+/* MC_CMD_DUMP_DO_OUT msgresponse */
+#define MC_CMD_DUMP_DO_OUT_LEN 4
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
+ * Configure unsolicited dumps
+ */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+#undef MC_CMD_0xe9_PRIVILEGE_CTG
+
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+#undef MC_CMD_0xea_PRIVILEGE_CTG
+
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define MC_CMD_SET_PSU_IN_LEN 12
+#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+#undef MC_CMD_0xec_PRIVILEGE_CTG
+
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#undef MC_CMD_0xed_PRIVILEGE_CTG
+
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+#undef MC_CMD_0xee_PRIVILEGE_CTG
+
+#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
+/* Offset at which to write the data */
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
+/* Length of data */
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
+/* Reserved for future use */
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+#undef MC_CMD_0xef_PRIVILEGE_CTG
+
+#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
+/* Offset from which to read the data */
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
+/* Offset at which to write the data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
+#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+#undef MC_CMD_0xf0_PRIVILEGE_CTG
+
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
+/* Length of data to read in bytes */
+#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+/* Length of returned OTP data in bytes */
+#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
+/* Returned data */
+#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_KR_TUNE
+ * Get or set KR Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_KR_TUNE 0xf1
+#undef MC_CMD_0xf1_PRIVILEGE_CTG
+
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_KR_TUNE_IN msgrequest */
+#define MC_CMD_KR_TUNE_IN_LENMIN 4
+#define MC_CMD_KR_TUNE_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+/* enum: Force KR Serdes reset / recalibration */
+#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7
+/* enum: Start/stop link training frames */
+#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_RUN 0x8
+/* enum: Issue KR link training command (control training coefficients) */
+#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_CMD 0x9
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_KR_TUNE_OUT msgresponse */
+#define MC_CMD_KR_TUNE_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15, Huntington) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15, Huntington) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max
+ * positive, Medford - 0-31)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-31)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+/* enum: Edge DFE DLEV (0-128 for Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
+/* enum: Variable Gain Amplifier (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
+/* enum: CTLE EQ Capacitor (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (0-7, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+/* enum: CTLE gain (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb
+/* enum: CTLE pole (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc
+/* enum: CTLE peaking (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd
+/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe
+/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf
+/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10
+/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11
+/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12
+/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13
+/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14
+/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15
+/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16
+/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17
+/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18
+/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19
+/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a
+/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b
+/* enum: Negative h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c
+/* enum: Negative h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d
+/* enum: Positive h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e
+/* enum: Positive h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f
+/* enum: CDR calibration loop code (Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20
+/* enum: CDR integral loop code (Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude (Huntington, Medford, Medford2) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+/* enum: De-Emphasis Tap2 Fine (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+/* enum: Pre-Emphasis Magnitude (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+/* enum: Pre-Emphasis Fine (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+/* enum: TX Slew Rate Coarse control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+/* enum: TX Slew Rate Fine control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+/* enum: TX Termination Impedance control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
+/* enum: TX Amplitude Fine control (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
+/* enum: Pre-shoot Tap (Medford, Medford2) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
+/* enum: De-emphasis Tap (Medford, Medford2) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
+#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_LBN 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_WIDTH 8
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_LBN 31
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_WIDTH 1
+/* Scan duration / cycle count */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_LBN 0
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_WIDTH 8
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_LBN 31
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_WIDTH 1
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4
+
+/* MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN msgrequest */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_OFST 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_LEN 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_STOP 0x0 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_START 0x1 /* enum */
+
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN msgrequest */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LEN 28
+/* Requested operation */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_LEN 4
+/* Set INITIALIZE state */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_OFST 8
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_LEN 4
+/* Set PRESET state */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_OFST 12
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_LEN 4
+/* C(-1) request */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_OFST 16
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_LEN 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_HOLD 0x0 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_INCREMENT 0x1 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_DECREMENT 0x2 /* enum */
+/* C(0) request */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_OFST 20
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+/* C(+1) request */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_OFST 24
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT msgresponse */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_LEN 24
+/* C(-1) status */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_OFST 0
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_LEN 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_NOT_UPDATED 0x0 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_UPDATED 0x1 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MINIMUM 0x2 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MAXIMUM 0x3 /* enum */
+/* C(0) status */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_OFST 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+/* C(+1) status */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_OFST 8
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+/* C(-1) value */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_OFST 12
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_LEN 4
+/* C(0) value */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_OFST 16
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_LEN 4
+/* C(+1) status */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_OFST 20
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PCIE_TUNE
+ * Get or set PCIE Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_PCIE_TUNE 0xf2
+#undef MC_CMD_0xf2_PRIVILEGE_CTG
+
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_PCIE_TUNE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
+#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
+#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_PCIE_TUNE_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+/* enum: DFE DLev */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
+/* enum: Figure of Merit */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
+/* enum: CTLE EQ Capacitor (HF Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (DC Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TxMargin (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+/* enum: TxSwing (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+/* enum: De-emphasis coefficient C(-1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+/* enum: De-emphasis coefficient C(0) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+/* enum: De-emphasis coefficient C(+1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
+
+/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */
+#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - not used for V3 licensing
+ */
+#define MC_CMD_LICENSING 0xf3
+#undef MC_CMD_0xf3_PRIVILEGE_CTG
+
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_IN_OP_OFST 0
+#define MC_CMD_LICENSING_IN_OP_LEN 4
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
+/* count of application keys which are invalid due to being blacklisted */
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
+/* count of application keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_V3 0xd0
+#undef MC_CMD_0xd0_PRIVILEGE_CTG
+
+#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_IN msgrequest */
+#define MC_CMD_LICENSING_V3_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_IN_OP_LEN 4
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses Returns EAGAIN if license
+ * processing (updating) has been started but not yet completed.
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+
+/* MC_CMD_LICENSING_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_V3_OUT_LEN 88
+/* count of keys which are valid */
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
+/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
+/* count of keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
+/* count of keys which are invalid due to being for the wrong node */
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+/* bitmask of licensed applications */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
+
+
+/***********************************/
+/* MC_CMD_LICENSING_GET_ID_V3
+ * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
+ * partition - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_GET_ID_V3 0xd1
+#undef MC_CMD_0xd1_PRIVILEGE_CTG
+
+#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
+#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
+
+/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
+/* type of license (eg 3) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
+/* length of the license ID (in bytes) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
+/* the unique license ID of the adapter */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
+
+
+/***********************************/
+/* MC_CMD_MC2MC_PROXY
+ * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
+ * This will fail on a single-core system.
+ */
+#define MC_CMD_MC2MC_PROXY 0xf4
+#undef MC_CMD_0xf4_PRIVILEGE_CTG
+
+#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.) Not used for V3 licensing
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#undef MC_CMD_0xf5_PRIVILEGE_CTG
+
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
+#undef MC_CMD_0xd2_PRIVILEGE_CTG
+
+#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
+/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
+ * mask
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
+ * Query the state of an one or more licensed features. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
+#undef MC_CMD_0xd3_PRIVILEGE_CTG
+
+#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
+/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
+ * more bits set
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
+/* states of these features - bit set for licensed, clear for not licensed */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application - not used for V3
+ * licensing.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+#undef MC_CMD_0xf6_PRIVILEGE_CTG
+
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* enum: mask application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
+/* flag */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_VALIDATE_APP
+ * Perform validation for an individual licensed application - V3 licensing
+ * (Medford)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
+#undef MC_CMD_0xd4_PRIVILEGE_CTG
+
+#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
+/* challenge for validation (384 bits) */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
+/* application ID expressed as a single bit mask */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
+/* validation response to challenge in the form of ECDSA signature consisting
+ * of two 384-bit integers, r and s, in big-endian order. The signature signs a
+ * SHA-384 digest of a message constructed from the concatenation of the input
+ * message and the remaining fields of this output message, e.g. challenge[48
+ * bytes] ... expiry_time[4 bytes] ...
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
+/* application expiry time */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
+/* application expiry units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
+/* enum: expiry units are accounting units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+/* enum: expiry units are calendar days */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+/* base MAC address of the NIC stored in NVRAM (note that this is a constant
+ * value for a given NIC regardless which function is calling, effectively this
+ * is PF0 base MAC address)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
+/* MAC address of v-adaptor associated with the client. If no such v-adapator
+ * exists, then the field is filled with 0xFF.
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_MASK_FEATURES
+ * Mask features - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
+#undef MC_CMD_0xd5_PRIVILEGE_CTG
+
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
+/* mask to be applied to features to be changed */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+/* whether to turn on or turn off the masked features */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
+/* enum: turn the features off */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+/* enum: turn the features back on */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3_TEMPORARY
+ * Perform operations to support installation of a single temporary license in
+ * the adapter, in addition to those found in the licensing partition. See
+ * SF-116124-SW for an overview of how this could be used. The license is
+ * stored in MC persistent data and so will survive a MC reboot, but will be
+ * erased when the adapter is power cycled
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
+#undef MC_CMD_0xd6_PRIVILEGE_CTG
+
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
+/* operation code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
+/* enum: install a new license, overwriting any existing temporary license.
+ * This is an asynchronous operation owing to the time taken to validate an
+ * ECDSA license
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
+/* enum: clear the license immediately rather than waiting for the next power
+ * cycle
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
+/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
+ * operation
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
+/* ECDSA license and signature */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
+/* status code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
+/* enum: finished validating and installing license */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
+/* enum: license validation and installation in progress */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
+/* enum: licensing error. More specific error messages are not provided to
+ * avoid exposing details of the licensing system to the client
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure RX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+#undef MC_CMD_0xf7_PRIVILEGE_CTG
+
+#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current RX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+#undef MC_CMD_0xf8_PRIVILEGE_CTG
+
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+#undef MC_CMD_0xf9_PRIVILEGE_CTG
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+/* the type of configuration setting to change */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+#undef MC_CMD_0xfa_PRIVILEGE_CTG
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+#undef MC_CMD_0xfb_PRIVILEGE_CTG
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+#undef MC_CMD_0xfc_PRIVILEGE_CTG
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+#undef MC_CMD_0xfe_PRIVILEGE_CTG
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+#undef MC_CMD_0xfd_PRIVILEGE_CTG
+
+#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
+/* The maximum number of VFs the device can expose in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+#undef MC_CMD_0xff_PRIVILEGE_CTG
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
+/* Default (canonical) board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
+/* Current board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+#undef MC_CMD_0x100_PRIVILEGE_CTG
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define MC_CMD_READ_ATB_IN_LEN 16
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
+#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define MC_CMD_READ_ATB_OUT_LEN 4
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+#undef MC_CMD_0x59_PRIVILEGE_CTG
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+#undef MC_CMD_0x5a_PRIVILEGE_CTG
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
+ * adress.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+/* enum: Privilege that allows a Function to change the MAC address configured
+ * in its associated vAdapter/vPort.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+/* enum: Privilege that allows a Function to install filters that specify VLANs
+ * that are not in the permit list for the associated vPort. This privilege is
+ * primarily to support ESX where vPorts are created that restrict traffic to
+ * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000
+/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for
+ * administrator-level operations that are not allowed from the local host once
+ * an adapter has Bound to a remote ServerLock Controller (see doxbox
+ * SF-117064-DG for background).
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+#undef MC_CMD_0x5c_PRIVILEGE_CTG
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+#undef MC_CMD_0x101_PRIVILEGE_CTG
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
+/* Maximum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+#undef MC_CMD_0x102_PRIVILEGE_CTG
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
+/* Checksum of data after logical OR of pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
+/* Total number of mismatched bits between pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
+/* Checksum of data after logical OR of pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
+/* Total number of mismatched bits between pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
+/* Checksum of data after logical OR of pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+#undef MC_CMD_0x60_PRIVILEGE_CTG
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+#undef MC_CMD_0x103_PRIVILEGE_CTG
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
+/* Count (bytes) */
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+/* Data */
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+#undef MC_CMD_0x104_PRIVILEGE_CTG
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+/* Start address (byte) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
+/* Count (bytes) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
+/* Data */
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+#undef MC_CMD_0x105_PRIVILEGE_CTG
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
+/* Sector size */
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+/* Sector type */
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
+/* Sector data */
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+#undef MC_CMD_0x106_PRIVILEGE_CTG
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
+/* Sector data */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+#undef MC_CMD_0x107_PRIVILEGE_CTG
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+#undef MC_CMD_0x108_PRIVILEGE_CTG
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
+/* Count (bytes) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+/* Total number of bad (non-blank) locations */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+#undef MC_CMD_0x109_PRIVILEGE_CTG
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
+/* Count (bytes) */
+#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+#undef MC_CMD_0x10a_PRIVILEGE_CTG
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+#undef MC_CMD_0x10b_PRIVILEGE_CTG
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_EXEC_SIGNED
+ * Check the CMAC of the contents of IMEM and DMEM against the value supplied
+ * and if correct begin execution from the start of IMEM. The caller supplies a
+ * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
+ * computation runs from the start of IMEM, and from the start of DMEM + 16k,
+ * to match flash booting. The command will respond with EINVAL if the CMAC
+ * does match, otherwise it will respond with success before it jumps to IMEM.
+ */
+#define MC_CMD_EXEC_SIGNED 0x10c
+#undef MC_CMD_0x10c_PRIVILEGE_CTG
+
+#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_EXEC_SIGNED_IN msgrequest */
+#define MC_CMD_EXEC_SIGNED_IN_LEN 28
+/* the length of code to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
+/* the length of date to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
+/* the XPM sector containing the key to use */
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
+/* the expected CMAC value */
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
+
+/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
+#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PREPARE_SIGNED
+ * Prepare to upload a signed image. This will scrub the specified length of
+ * the data region, which must be at least as large as the DATALEN supplied to
+ * MC_CMD_EXEC_SIGNED.
+ */
+#define MC_CMD_PREPARE_SIGNED 0x10d
+#undef MC_CMD_0x10d_PRIVILEGE_CTG
+
+#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
+#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
+/* the length of data area to clear */
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
+
+/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
+#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_SECURITY_RULE
+ * Set blacklist and/or whitelist action for a particular match criteria.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_SET_SECURITY_RULE 0x10f
+#undef MC_CMD_0x10f_PRIVILEGE_CTG
+
+#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_SET_SECURITY_RULE_IN msgrequest */
+#define MC_CMD_SET_SECURITY_RULE_IN_LEN 92
+/* fields to include in match criteria */
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_OFST 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_LEN 4
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_LBN 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_LBN 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_LBN 2
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_LBN 3
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_LBN 4
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_LBN 5
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_LBN 10
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_LBN 11
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_LBN 12
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_LBN 13
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_LBN 14
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_WIDTH 1
+/* remote MAC address to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_OFST 4
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_LEN 6
+/* remote port to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_OFST 10
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_LEN 2
+/* local MAC address to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_OFST 12
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_LEN 6
+/* local port to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_OFST 18
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_OFST 20
+#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_OFST 22
+#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_OFST 24
+#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_OFST 26
+#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_LEN 2
+/* Physical port to match (as little-endian 32-bit value) */
+#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_OFST 28
+#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_LEN 4
+/* Reserved; set to 0 */
+#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_OFST 32
+#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_LEN 4
+/* remote IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_OFST 36
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_LEN 16
+/* local IP address to match (as bytes in network order; set last 12 bytes to 0
+ * for IPv4 address)
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_OFST 52
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_LEN 16
+/* remote subnet ID to match (as little-endian 32-bit value); note that remote
+ * subnets are matched by mapping the remote IP address to a "subnet ID" via a
+ * data structure which must already have been configured using
+ * MC_CMD_SUBNET_MAP_SET_NODE appropriately
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_OFST 68
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_LEN 4
+/* remote portrange ID to match (as little-endian 32-bit value); note that
+ * remote port ranges are matched by mapping the remote port to a "portrange
+ * ID" via a data structure which must already have been configured using
+ * MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_OFST 72
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_LEN 4
+/* local portrange ID to match (as little-endian 32-bit value); note that local
+ * port ranges are matched by mapping the local port to a "portrange ID" via a
+ * data structure which must already have been configured using
+ * MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_OFST 76
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_LEN 4
+/* set the action for transmitted packets matching this rule */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_OFST 80
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_LEN 4
+/* enum: make no decision */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0
+/* enum: decide to accept the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1
+/* enum: decide to drop the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2
+/* enum: inform the TSA controller about some sample of packets matching this
+ * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with
+ * either the WHITELIST or BLACKLIST action
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_SAMPLE 0x4
+/* enum: do not change the current TX action */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff
+/* set the action for received packets matching this rule */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_OFST 84
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_LEN 4
+/* enum: make no decision */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0
+/* enum: decide to accept the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1
+/* enum: decide to drop the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2
+/* enum: inform the TSA controller about some sample of packets matching this
+ * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with
+ * either the WHITELIST or BLACKLIST action
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_SAMPLE 0x4
+/* enum: do not change the current RX action */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff
+/* counter ID to associate with this rule; IDs are allocated using
+ * MC_CMD_SECURITY_RULE_COUNTER_ALLOC
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_OFST 88
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_LEN 4
+/* enum: special value for the null counter ID */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0
+/* enum: special value to tell the MC to allocate an available counter */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_SW_AUTO 0xeeeeeeee
+/* enum: special value to request use of hardware counter (Medford2 only) */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_HW 0xffffffff
+
+/* MC_CMD_SET_SECURITY_RULE_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 32
+/* new reference count for uses of counter ID */
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_OFST 0
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_LEN 4
+/* constructed match bits for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_OFST 4
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_LEN 12
+/* constructed discriminator bits for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_OFST 16
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_LEN 4
+/* base location for probes for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_OFST 20
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_LEN 4
+/* step for probes for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_OFST 24
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_LEN 4
+/* ID for reading back the counter */
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_OFST 28
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_RESET_SECURITY_RULES
+ * Reset all blacklist and whitelist actions for a particular physical port, or
+ * all ports. (Medford-only; for use by SolarSecure apps, not directly by
+ * drivers. See SF-114946-SW.) NOTE - this message definition is provisional.
+ * It has not yet been used in any released code and may change during
+ * development. This note will be removed once it is regarded as stable.
+ */
+#define MC_CMD_RESET_SECURITY_RULES 0x110
+#undef MC_CMD_0x110_PRIVILEGE_CTG
+
+#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_RESET_SECURITY_RULES_IN msgrequest */
+#define MC_CMD_RESET_SECURITY_RULES_IN_LEN 4
+/* index of physical port to reset (or ALL_PHYSICAL_PORTS to reset all) */
+#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_OFST 0
+#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_LEN 4
+/* enum: special value to reset all physical ports */
+#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff
+
+/* MC_CMD_RESET_SECURITY_RULES_OUT msgresponse */
+#define MC_CMD_RESET_SECURITY_RULES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SECURITY_RULESET_VERSION
+ * Return a large hash value representing a "version" of the complete set of
+ * currently active blacklist / whitelist rules and associated data structures.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION 0x111
+#undef MC_CMD_0x111_PRIVILEGE_CTG
+
+#define MC_CMD_0x111_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_SECURITY_RULESET_VERSION_IN msgrequest */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_SECURITY_RULESET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMIN 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMAX 252
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LEN(num) (0+1*(num))
+/* Opaque hash value; length may vary depending on the hash scheme used */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_OFST 0
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_LEN 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MINNUM 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC
+ * Allocate counters for use with blacklist / whitelist rules. (Medford-only;
+ * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ * NOTE - this message definition is provisional. It has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC 0x112
+#undef MC_CMD_0x112_PRIVILEGE_CTG
+
+#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN msgrequest */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_LEN 4
+/* the number of new counter IDs to request */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_OFST 0
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_LEN 4
+
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT msgresponse */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMIN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMAX 252
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LEN(num) (4+4*(num))
+/* the number of new counter IDs allocated (may be less than the number
+ * requested if resources are unavailable)
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_OFST 0
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_LEN 4
+/* new counter ID(s) */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM 0
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE
+ * Allocate counters for use with blacklist / whitelist rules. (Medford-only;
+ * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ * NOTE - this message definition is provisional. It has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE 0x113
+#undef MC_CMD_0x113_PRIVILEGE_CTG
+
+#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE_IN msgrequest */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMIN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMAX 252
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LEN(num) (4+4*(num))
+/* the number of counter IDs to free */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_OFST 0
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_LEN 4
+/* the counter ID(s) to free */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_OFST 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_LEN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MINNUM 0
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MAXNUM 62
+
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT msgresponse */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SUBNET_MAP_SET_NODE
+ * Atomically update a trie node in the map of subnets to subnet IDs. The
+ * constants in the descriptions of the fields of this message may be retrieved
+ * by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO. (Medford-
+ * only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_SUBNET_MAP_SET_NODE 0x114
+#undef MC_CMD_0x114_PRIVILEGE_CTG
+
+#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_SUBNET_MAP_SET_NODE_IN msgrequest */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMIN 6
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMAX 252
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LEN(num) (4+2*(num))
+/* node to update in the range 0 .. SUBNET_MAP_NUM_NODES-1 */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_OFST 0
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_LEN 4
+/* SUBNET_MAP_NUM_ENTRIES_PER_NODE new entries; each entry is either a pointer
+ * to the next node, expressed as an offset in the trie memory (i.e. node ID
+ * multiplied by SUBNET_MAP_NUM_ENTRIES_PER_NODE), or a leaf value in the range
+ * SUBNET_ID_MIN .. SUBNET_ID_MAX
+ */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_OFST 4
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_LEN 2
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MINNUM 1
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MAXNUM 124
+
+/* MC_CMD_SUBNET_MAP_SET_NODE_OUT msgresponse */
+#define MC_CMD_SUBNET_MAP_SET_NODE_OUT_LEN 0
+
+/* PORTRANGE_TREE_ENTRY structuredef */
+#define PORTRANGE_TREE_ENTRY_LEN 4
+/* key for branch nodes (<= key takes left branch, > key takes right branch),
+ * or magic value for leaf nodes
+ */
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_OFST 0
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LEN 2
+#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LBN 0
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_WIDTH 16
+/* final portrange ID for leaf nodes (don't care for branch nodes) */
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_OFST 2
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LEN 2
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LBN 16
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
+ * Atomically update the entire tree mapping remote port ranges to portrange
+ * IDs. The constants in the descriptions of the fields of this message may be
+ * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE 0x115
+#undef MC_CMD_0x115_PRIVILEGE_CTG
+
+#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN msgrequest */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num))
+/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a
+ * PORTRANGE_TREE_ENTRY
+ */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63
+
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT msgresponse */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
+ * Atomically update the entire tree mapping remote port ranges to portrange
+ * IDs. The constants in the descriptions of the fields of this message may be
+ * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE 0x116
+#undef MC_CMD_0x116_PRIVILEGE_CTG
+
+#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN msgrequest */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num))
+/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a
+ * PORTRANGE_TREE_ENTRY
+ */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63
+
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT msgresponse */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT_LEN 0
+
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
+ * Configure UDP ports for tunnel encapsulation hardware acceleration. The
+ * parser-dispatcher will attempt to parse traffic on these ports as tunnel
+ * encapsulation PDUs and filter them using the tunnel encapsulation filter
+ * chain rather than the standard filter chain. Note that this command can
+ * cause all functions to see a reset. (Available on Medford only.)
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
+#undef MC_CMD_0x117_PRIVILEGE_CTG
+
+#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
+/* The number of entries in the ENTRIES array */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2
+/* Entries defining the UDP port to protocol mapping, each laid out as a
+ * TUNNEL_ENCAP_UDP_PORT_ENTRY
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RX_BALANCING
+ * Configure a port upconverter to distribute the packets on both RX engines.
+ * Packets are distributed based on a table with the destination vFIFO. The
+ * index of the table is a hash of source and destination of IPV4 and VLAN
+ * priority.
+ */
+#define MC_CMD_RX_BALANCING 0x118
+#undef MC_CMD_0x118_PRIVILEGE_CTG
+
+#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_RX_BALANCING_IN msgrequest */
+#define MC_CMD_RX_BALANCING_IN_LEN 16
+/* The RX port whose upconverter table will be modified */
+#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4
+/* The VLAN priority associated to the table index and vFIFO */
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
+/* The resulting bit of SRC^DST for indexing the table */
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
+/* The RX engine to which the vFIFO in the table entry will point to */
+#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4
+
+/* MC_CMD_RX_BALANCING_OUT msgresponse */
+#define MC_CMD_RX_BALANCING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSA_BIND
+ * TSAN - TSAC binding communication protocol. Refer to SF-115479-TC for more
+ * info in respect to the binding protocol.
+ */
+#define MC_CMD_TSA_BIND 0x119
+#undef MC_CMD_0x119_PRIVILEGE_CTG
+
+#define MC_CMD_0x119_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TSA_BIND_IN msgrequest: Protocol operation code */
+#define MC_CMD_TSA_BIND_IN_LEN 4
+#define MC_CMD_TSA_BIND_IN_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_OP_LEN 4
+/* enum: Obsolete. Use MC_CMD_SECURE_NIC_INFO_IN_STATUS. */
+#define MC_CMD_TSA_BIND_OP_GET_ID 0x1
+/* enum: Get a binding ticket from the TSAN. The binding ticket is used as part
+ * of the binding procedure to authorize the binding of an adapter to a TSAID.
+ * Refer to SF-114946-SW for more information. This sub-command is only
+ * available over a TLS secure connection between the TSAN and TSAC.
+ */
+#define MC_CMD_TSA_BIND_OP_GET_TICKET 0x2
+/* enum: Opcode associated with the propagation of a private key that TSAN uses
+ * as part of post-binding authentication procedure. More specifically, TSAN
+ * uses this key for a signing operation. TSAC uses the counterpart public key
+ * to verify the signature. Note - The post-binding authentication occurs when
+ * the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer to
+ * SF-114946-SW for more information. This sub-command is only available over a
+ * TLS secure connection between the TSAN and TSAC.
+ */
+#define MC_CMD_TSA_BIND_OP_SET_KEY 0x3
+/* enum: Request an insecure unbinding operation. This sub-command is available
+ * for any privileged client.
+ */
+#define MC_CMD_TSA_BIND_OP_UNBIND 0x4
+/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */
+#define MC_CMD_TSA_BIND_OP_UNBIND_EXT 0x5
+/* enum: Opcode associated with the propagation of the unbinding secret token.
+ * TSAN persists the unbinding secret token. Refer to SF-115479-TC for more
+ * information. This sub-command is only available over a TLS secure connection
+ * between the TSAN and TSAC.
+ */
+#define MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN 0x6
+/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */
+#define MC_CMD_TSA_BIND_OP_DECOMMISSION 0x7
+/* enum: Obsolete. Use MC_CMD_GET_CERTIFICATE. */
+#define MC_CMD_TSA_BIND_OP_GET_CERTIFICATE 0x8
+/* enum: Request a secure unbinding operation using unbinding token. This sub-
+ * command is available for any privileged client.
+ */
+#define MC_CMD_TSA_BIND_OP_SECURE_UNBIND 0x9
+/* enum: Request a secure decommissioning operation. This sub-command is
+ * available for any privileged client.
+ */
+#define MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION 0xa
+/* enum: Test facility that allows an adapter to be configured to behave as if
+ * Bound to a TSA controller with restricted MCDI administrator operations.
+ * This operation is primarily intended to aid host driver development.
+ */
+#define MC_CMD_TSA_BIND_OP_TEST_MCDI 0xb
+
+/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest: Obsolete. Use
+ * MC_CMD_SECURE_NIC_INFO_IN_STATUS.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_ID_LEN 20
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_GET_ID_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_GET_ID_OP_LEN 4
+/* Cryptographic nonce that TSAC generates and sends to TSAN. TSAC generates
+ * the nonce every time as part of the TSAN post-binding authentication
+ * procedure when the TSAN-TSAC connection terminates and TSAN does need to re-
+ * connect to the TSAC. Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_OFST 4
+#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_LEN 16
+
+/* MC_CMD_TSA_BIND_IN_GET_TICKET msgrequest */
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_LEN 4
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_LEN 4
+
+/* MC_CMD_TSA_BIND_IN_SET_KEY msgrequest */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMIN 5
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMAX 252
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LEN(num) (4+1*(num))
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_LEN 4
+/* This data blob contains the private key generated by the TSAC. TSAN uses
+ * this key for a signing operation. Note- This private key is used in
+ * conjunction with the post-binding TSAN authentication procedure that occurs
+ * when the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer
+ * to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_OFST 4
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_LEN 1
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM 248
+
+/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Request an insecure unbinding
+ * operation.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_LEN 10
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_UNBIND_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_UNBIND_OP_LEN 4
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_OFST 4
+#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_LEN 6
+
+/* MC_CMD_TSA_BIND_IN_UNBIND_EXT msgrequest: Obsolete. Use
+ * MC_CMD_TSA_BIND_IN_SECURE_UNBIND.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMIN 93
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMAX 252
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LEN(num) (92+1*(num))
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_LEN 4
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_OFST 4
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_LEN 6
+/* Align the arguments to 32 bits */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_OFST 10
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_LEN 2
+/* This attribute identifies the TSA infrastructure domain. The length of the
+ * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max
+ * length. Note- The TSAID is the Organizational Unit Name filed as part of the
+ * root and server certificates.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_OFST 12
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_LEN 1
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_NUM 64
+/* Unbinding secret token. The adapter validates this unbinding token by
+ * comparing it against the one stored on the adapter as part of the
+ * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for
+ * more information.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_OFST 76
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_LEN 16
+/* This is the signature of the above mentioned fields- TSANID, TSAID and
+ * UNBINDTOKEN. As per current requirements, the SIG opaque data blob contains
+ * ECDSA ECC-384 based signature. The ECC curve is secp384r1. The signature is
+ * also ASN-1 encoded. Note- The signature is verified based on the public key
+ * stored into the root certificate that is provisioned on the adapter side.
+ * This key is known as the PUKtsaid. Refer to SF-115479-TC for more
+ * information.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_OFST 92
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_LEN 1
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MAXNUM 160
+
+/* MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest */
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_LEN 20
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_LEN 4
+/* Unbinding secret token. TSAN persists the unbinding secret token. Refer to
+ * SF-115479-TC for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_OFST 4
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_LEN 16
+/* enum: There are situations when the binding process does not complete
+ * successfully due to key, other attributes corruption at the database level
+ * (Controller). Adapter can't connect to the controller anymore. To recover,
+ * make usage of the decommission command that forces the adapter into
+ * unbinding state.
+ */
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_ADAPTER_BINDING_FAILURE 0x1
+
+/* MC_CMD_TSA_BIND_IN_DECOMMISSION msgrequest: Obsolete. Use
+ * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMIN 109
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMAX 252
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LEN(num) (108+1*(num))
+/* This is the signature of the above mentioned fields- TSAID, USER and REASON.
+ * As per current requirements, the SIG opaque data blob contains ECDSA ECC-384
+ * based signature. The ECC curve is secp384r1. The signature is also ASN-1
+ * encoded . Note- The signature is verified based on the public key stored
+ * into the root certificate that is provisioned on the adapter side. This key
+ * is known as the PUKtsaid. Refer to SF-115479-TC for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_OFST 108
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_LEN 1
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MAXNUM 144
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_LEN 4
+/* This attribute identifies the TSA infrastructure domain. The length of the
+ * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max
+ * length. Note- The TSAID is the Organizational Unit Name filed as part of the
+ * root and server certificates.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_OFST 4
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_LEN 1
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_NUM 64
+/* User ID that comes, as an example, from the Controller. Note- The 33 byte
+ * length of this attribute is max length of the linux user name plus null
+ * character.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_OFST 68
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_LEN 1
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_NUM 33
+/* Align the arguments to 32 bits */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_OFST 101
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_LEN 3
+/* Reason of why decommissioning happens Note- The list of reasons, defined as
+ * part of the enumeration below, can be extended.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_OFST 104
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_LEN 4
+
+/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE msgrequest: Obsolete. Use
+ * MC_CMD_GET_CERTIFICATE.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_LEN 8
+/* The operation requested, must be MC_CMD_TSA_BIND_OP_GET_CERTIFICATE. */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_LEN 4
+/* Type of the certificate to be retrieved. */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_OFST 4
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_LEN 4
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_UNUSED 0x0 /* enum */
+/* enum: Adapter Authentication Certificate (AAC). The AAC is used by the
+ * controller to verify the authenticity of the adapter.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AAC 0x1
+/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is used by
+ * the controller to verify the validity of AAC.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AASC 0x2
+
+/* MC_CMD_TSA_BIND_IN_SECURE_UNBIND msgrequest: Request a secure unbinding
+ * operation using unbinding token.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMIN 97
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMAX 200
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LEN(num) (96+1*(num))
+/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_LEN 4
+/* Type of the message. (MESSAGE_TYPE_xxx) Must be
+ * MESSAGE_TYPE_TSA_SECURE_UNBIND.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_OFST 4
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_LEN 4
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_OFST 8
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_LEN 6
+/* Align the arguments to 32 bits */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_OFST 14
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_LEN 2
+/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This
+ * field is for information only, and not used by the firmware. Note- The TSAID
+ * is the Organizational Unit Name field as part of the root and server
+ * certificates.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_OFST 16
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_NUM 64
+/* Unbinding secret token. The adapter validates this unbinding token by
+ * comparing it against the one stored on the adapter as part of the
+ * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for
+ * more information.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_OFST 80
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_LEN 16
+/* The signature computed and encoded as specified by MESSAGE_TYPE. */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_OFST 96
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MAXNUM 104
+
+/* MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION msgrequest: Request a secure
+ * decommissioning operation.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMIN 113
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMAX 216
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LEN(num) (112+1*(num))
+/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_LEN 4
+/* Type of the message. (MESSAGE_TYPE_xxx) Must be
+ * MESSAGE_TYPE_SECURE_DECOMMISSION.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_OFST 4
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_LEN 4
+/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This
+ * field is for information only, and not used by the firmware. Note- The TSAID
+ * is the Organizational Unit Name field as part of the root and server
+ * certificates.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_OFST 8
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_NUM 64
+/* A NUL padded US-ASCII string containing user name of the creator of the
+ * decommissioning ticket. This field is for information only, and not used by
+ * the firmware.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_OFST 72
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_NUM 36
+/* Reason of why decommissioning happens */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_OFST 108
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_LEN 4
+/* enum: There are situations when the binding process does not complete
+ * successfully due to key, other attributes corruption at the database level
+ * (Controller). Adapter can't connect to the controller anymore. To recover,
+ * use the decommission command to force the adapter into unbound state.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_ADAPTER_BINDING_FAILURE 0x1
+/* The signature computed and encoded as specified by MESSAGE_TYPE. */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_OFST 112
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MAXNUM 104
+
+/* MC_CMD_TSA_BIND_IN_TEST_MCDI msgrequest: Test mode that emulates MCDI
+ * interface restrictions of a bound adapter. This operation is intended for
+ * test use on adapters that are not deployed and bound to a TSA Controller.
+ * Using it on a Bound adapter will succeed but will not alter the MCDI
+ * privileges as MCDI operations will already be restricted.
+ */
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_LEN 8
+/* The operation requested must be MC_CMD_TSA_BIND_OP_TEST_MCDI. */
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_LEN 4
+/* Enable or disable emulation of bound adapter */
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_OFST 4
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_LEN 4
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_DISABLE 0x0 /* enum */
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_ENABLE 0x1 /* enum */
+
+/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse: Obsolete. Use
+ * MC_CMD_SECURE_NIC_INFO_OUT_STATUS.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMIN 15
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LEN(num) (14+1*(num))
+/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_ID that is sent back to
+ * the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_LEN 4
+/* Rules engine type. Note- The rules engine type allows TSAC to further
+ * identify the connected endpoint (e.g. TSAN, NIC Emulator) type and take the
+ * proper action accordingly. As an example, TSAC uses the rules engine type to
+ * select the SF key that differs in the case of TSAN vs. NIC Emulator.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_OFST 4
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_LEN 4
+/* enum: Hardware rules engine. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_TSAN 0x1
+/* enum: Nic emulator rules engine. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_NEMU 0x2
+/* enum: SSFE. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_SSFE 0x3
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_OFST 8
+#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_LEN 6
+/* The signature data blob. The signature is computed against the message
+ * formed by TSAN ID concatenated with the NONCE value. Refer to SF-115479-TC
+ * for more information also in respect to the private keys that are used to
+ * sign the message based on TSAN pre/post-binding authentication procedure.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_OFST 14
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MAXNUM 238
+
+/* MC_CMD_TSA_BIND_OUT_GET_TICKET msgresponse */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMIN 5
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LEN(num) (4+1*(num))
+/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_TICKET that is sent back
+ * to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_LEN 4
+/* The ticket represents the data blob construct that TSAN sends to TSAC as
+ * part of the binding protocol. From the TSAN perspective the ticket is an
+ * opaque construct. For more info refer to SF-115479-TC.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_OFST 4
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MAXNUM 248
+
+/* MC_CMD_TSA_BIND_OUT_SET_KEY msgresponse */
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_LEN 4
+/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_KEY that is sent back to
+ * the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_LEN 4
+
+/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse: Response to insecure unbind request.
+ */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_OFST 0
+#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_LEN 4
+/* Extra status information */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_OFST 4
+#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_LEN 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a binding ticket. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3
+
+/* MC_CMD_TSA_BIND_OUT_UNBIND_EXT msgresponse: Obsolete. Use
+ * MC_CMD_TSA_BIND_OUT_SECURE_UNBIND.
+ */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_OFST 0
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_LEN 4
+/* Extra status information */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_OFST 4
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_LEN 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a binding ticket. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_NOT_BOUND 0x3
+/* enum: Invalid unbind token */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TOKEN 0x4
+/* enum: Invalid signature */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_SIGNATURE 0x5
+
+/* MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN msgresponse */
+#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_LEN 4
+/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN that is sent
+ * back to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_LEN 4
+
+/* MC_CMD_TSA_BIND_OUT_DECOMMISSION msgresponse: Obsolete. Use
+ * MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION.
+ */
+#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_LEN 4
+/* The protocol operation code MC_CMD_TSA_BIND_OP_DECOMMISSION that is sent
+ * back to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_LEN 4
+
+/* MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE msgresponse */
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMIN 9
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LEN(num) (8+1*(num))
+/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_CERTIFICATE that is sent
+ * back to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_LEN 4
+/* Type of the certificate. */
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_OFST 4
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE/TYPE */
+/* The certificate data. */
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_OFST 8
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MAXNUM 244
+
+/* MC_CMD_TSA_BIND_OUT_SECURE_UNBIND msgresponse: Response to secure unbind
+ * request.
+ */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_LEN 8
+/* The protocol operation code that is sent back to the caller. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_LEN 4
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_OFST 4
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_LEN 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a domain. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_NOT_BOUND 0x3
+/* enum: Invalid unbind token */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TOKEN 0x4
+/* enum: Invalid signature */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_SIGNATURE 0x5
+
+/* MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION msgresponse: Response to secure
+ * decommission request.
+ */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_LEN 8
+/* The protocol operation code that is sent back to the caller. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_LEN 4
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_OFST 4
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_LEN 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a domain. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_NOT_BOUND 0x3
+/* enum: Invalid unbind token */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TOKEN 0x4
+/* enum: Invalid signature */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_SIGNATURE 0x5
+
+/* MC_CMD_TSA_BIND_OUT_TEST_MCDI msgrequest */
+#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_LEN 4
+/* The protocol operation code MC_CMD_TSA_BIND_OP_TEST_MCDI that is sent back
+ * to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_LEN 4
+
+
+/***********************************/
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE
+ * Manage the persistent NVRAM cache of security rules created with
+ * MC_CMD_SET_SECURITY_RULE. Note that the cache is not automatically updated
+ * as rules are added or removed; the active ruleset must be explicitly
+ * committed to the cache. The cache may also be explicitly invalidated,
+ * without affecting the currently active ruleset. When the cache is valid, it
+ * will be loaded at power on or MC reboot, instead of the default ruleset.
+ * Rollback of the currently active ruleset to the cached version (when it is
+ * valid) is also supported. (Medford-only; for use by SolarSecure apps, not
+ * directly by drivers. See SF-114946-SW.) NOTE - The only sub-operation
+ * allowed in an adapter bound to a TSA controller from the local host is
+ * OP_GET_CACHED_VERSION. All other sub-operations are prohibited.
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE 0x11a
+#undef MC_CMD_0x11a_PRIVILEGE_CTG
+
+#define MC_CMD_0x11a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN msgrequest */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_LEN 4
+/* the operation to perform */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_OFST 0
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_LEN 4
+/* enum: reports the ruleset version that is cached in persistent storage but
+ * performs no other action
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0
+/* enum: rolls back the active state to the cached version. (May fail with
+ * ENOENT if there is no valid cached version.)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1
+/* enum: commits the active state to the persistent cache */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2
+/* enum: invalidates the persistent cache without affecting the active state */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3
+
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT msgresponse */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMIN 5
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMAX 252
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LEN(num) (4+1*(num))
+/* indicates whether the persistent cache is valid (after completion of the
+ * requested operation in the case of rollback, commit, or invalidate)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_OFST 0
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_LEN 4
+/* enum: persistent cache is invalid (the VERSION field will be empty in this
+ * case)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0
+/* enum: persistent cache is valid */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1
+/* cached ruleset version (after completion of the requested operation, in the
+ * case of rollback, commit, or invalidate) as an opaque hash value in the same
+ * form as MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_OFST 4
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_LEN 1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MINNUM 1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PRIVATE_APPEND
+ * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST
+ * if the tag is already present.
+ */
+#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
+#undef MC_CMD_0x11c_PRIVILEGE_CTG
+
+#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
+/* The tag to be appended */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
+/* The length of the data */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
+/* The data to be contained in the TLV structure */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
+
+/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_VERIFY_CONTENTS
+ * Verify that the contents of the XPM memory is correct (Medford only). This
+ * is used during manufacture to check that the XPM memory has been programmed
+ * correctly at ATE.
+ */
+#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
+#undef MC_CMD_0x11b_PRIVILEGE_CTG
+
+#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
+/* Data type to be checked */
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
+
+/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
+/* Number of sectors found (test builds only) */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
+/* Number of bytes found (test builds only) */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
+/* Length of signature */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
+/* Signature */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
+
+
+/***********************************/
+/* MC_CMD_SET_EVQ_TMR
+ * Update the timer load, timer reload and timer mode values for a given EVQ.
+ * The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
+ * be rounded up to the granularity supported by the hardware, then truncated
+ * to the range supported by the hardware. The resulting value after the
+ * rounding and truncation will be returned to the caller (in TMR_LOAD_ACT_NS
+ * and TMR_RELOAD_ACT_NS).
+ */
+#define MC_CMD_SET_EVQ_TMR 0x120
+#undef MC_CMD_0x120_PRIVILEGE_CTG
+
+#define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_EVQ_TMR_IN msgrequest */
+#define MC_CMD_SET_EVQ_TMR_IN_LEN 16
+/* Function-relative queue instance */
+#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4
+/* Requested value for timer load (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4
+/* Requested value for timer reload (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4
+/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */
+
+/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */
+#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8
+/* Actual value for timer load (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4
+/* Actual value for timer reload (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES
+ * Query properties about the event queue timers.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122
+#undef MC_CMD_0x122_PRIVILEGE_CTG
+
+#define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_IN msgrequest */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_IN_LEN 0
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
+/* Reserved for future use. */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4
+/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
+ * nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4
+/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
+ * allowed for timer load/reload counts.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4
+/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
+ * multiple of this step size will be rounded in an implementation defined
+ * manner.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4
+/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
+ * meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4
+/* Timer durations requested via MCDI that are not a multiple of this step size
+ * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4
+/* For timers updated using the bug35388 workaround, this is the time interval
+ * (in nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count. This field is only meaningful if the bug35388 workaround
+ * is enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4
+/* For timers updated using the bug35388 workaround, this is the maximum value
+ * allowed for timer load/reload counts. This field is only meaningful if the
+ * bug35388 workaround is enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4
+/* For timers updated using the bug35388 workaround, timer load/reload counts
+ * not a multiple of this step size will be rounded in an implementation
+ * defined manner. This field is only meaningful if the bug35388 workaround is
+ * enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP
+ * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the
+ * non used switch buffers.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
+#undef MC_CMD_0x11d_PRIVILEGE_CTG
+
+#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
+/* Will the common pool be used as TX_vFIFO_ULL (1) */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
+/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
+/* Number of buffers to reserve for the common pool */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
+/* TX datapath to which the Common Pool is connected to. */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
+/* enum: Extracts information from function */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
+/* Network port or RX Engine to which the common pool connects. */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
+/* enum: Extracts information from function */
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
+/* enum: To enable Switch loopback with Rx engine 0 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
+/* enum: To enable Switch loopback with Rx engine 1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
+/* ID of the common pool allocated */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO
+ * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the
+ * previously allocated common pools.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
+#undef MC_CMD_0x11e_PRIVILEGE_CTG
+
+#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
+/* Common pool previously allocated to which the new vFIFO will be associated
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
+/* Port or RX engine to associate the vFIFO egress */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
+/* enum: Extracts information from common pool */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
+/* enum: To enable Switch loopback with Rx engine 0 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
+/* enum: To enable Switch loopback with Rx engine 1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
+/* Minimum number of buffers that the pool must have */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
+/* enum: Do not check the space available */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
+/* Will the vFIFO be used as TX_vFIFO_ULL */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
+/* Network priority of the vFIFO,if applicable */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
+/* enum: Search for the lowest unused priority */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
+/* Short vFIFO ID */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
+/* Network priority of the vFIFO */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF
+ * This interface clears the configuration of the given vFIFO and leaves it
+ * ready to be re-used.
+ */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
+#undef MC_CMD_0x11f_PRIVILEGE_CTG
+
+#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
+/* Short vFIFO ID */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
+
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP
+ * This interface clears the configuration of the given common pool and leaves
+ * it ready to be re-used.
+ */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
+#undef MC_CMD_0x121_PRIVILEGE_CTG
+
+#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
+/* Common pool ID given when pool allocated */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
+
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_REKEY
+ * This request causes the NIC to generate a new per-NIC key and program it
+ * into the write-once memory. During the process all flash partitions that are
+ * protected with a CMAC are verified with the old per-NIC key and then signed
+ * with the new per-NIC key. If the NIC has already reached its rekey limit the
+ * REKEY op will return MC_CMD_ERR_ERANGE. The REKEY op may block until
+ * completion or it may return 0 and continue processing, therefore the caller
+ * must poll at least once to confirm that the rekeying has completed. The POLL
+ * operation returns MC_CMD_ERR_EBUSY if the rekey process is still running
+ * otherwise it will return the result of the last completed rekey operation,
+ * or 0 if there has not been a previous rekey.
+ */
+#define MC_CMD_REKEY 0x123
+#undef MC_CMD_0x123_PRIVILEGE_CTG
+
+#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_REKEY_IN msgrequest */
+#define MC_CMD_REKEY_IN_LEN 4
+/* the type of operation requested */
+#define MC_CMD_REKEY_IN_OP_OFST 0
+#define MC_CMD_REKEY_IN_OP_LEN 4
+/* enum: Start the rekeying operation */
+#define MC_CMD_REKEY_IN_OP_REKEY 0x0
+/* enum: Poll for completion of the rekeying operation */
+#define MC_CMD_REKEY_IN_OP_POLL 0x1
+
+/* MC_CMD_REKEY_OUT msgresponse */
+#define MC_CMD_REKEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS
+ * This interface allows the host to find out how many common pool buffers are
+ * not yet assigned.
+ */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
+#undef MC_CMD_0x124_PRIVILEGE_CTG
+
+#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
+
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
+/* Available buffers for the ENG to NET vFIFOs. */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
+/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_SECURITY_FUSES
+ * Change the security level of the adapter by setting bits in the write-once
+ * memory. The firmware maps each flag in the message to a set of one or more
+ * hardware-defined or software-defined bits and sets these bits in the write-
+ * once memory. For Medford the hardware-defined bits are defined in
+ * SF-112079-PS 5.3, the software-defined bits are defined in xpm.h. Returns 0
+ * if all of the required bits were set and returns MC_CMD_ERR_EIO if any of
+ * the required bits were not set.
+ */
+#define MC_CMD_SET_SECURITY_FUSES 0x126
+#undef MC_CMD_0x126_PRIVILEGE_CTG
+
+#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_SET_SECURITY_FUSES_IN msgrequest */
+#define MC_CMD_SET_SECURITY_FUSES_IN_LEN 4
+/* Flags specifying what type of security features are being set */
+#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_OFST 0
+#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_LEN 4
+#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_LBN 0
+#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_WIDTH 1
+#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_LBN 1
+#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_WIDTH 1
+#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_LBN 31
+#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_WIDTH 1
+
+/* MC_CMD_SET_SECURITY_FUSES_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_FUSES_OUT_LEN 0
+
+/* MC_CMD_SET_SECURITY_FUSES_V2_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_LEN 4
+/* Flags specifying which security features are enforced on the NIC after the
+ * flags in the request have been applied. See
+ * MC_CMD_SET_SECURITY_FUSES_IN/FLAGS for flag definitions.
+ */
+#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TSA_INFO
+ * Messages sent from TSA adapter to TSA controller. This command is only valid
+ * when the MCDI header has MESSAGE_TYPE set to MCDI_MESSAGE_TYPE_TSA. This
+ * command is not sent by the driver to the MC; it is sent from the MC to a TSA
+ * controller, being treated more like an alert message rather than a command;
+ * hence the MC does not expect a response in return. Doxbox reference
+ * SF-117371-SW
+ */
+#define MC_CMD_TSA_INFO 0x127
+#undef MC_CMD_0x127_PRIVILEGE_CTG
+
+#define MC_CMD_0x127_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_INFO_IN msgrequest */
+#define MC_CMD_TSA_INFO_IN_LEN 4
+#define MC_CMD_TSA_INFO_IN_OP_HDR_OFST 0
+#define MC_CMD_TSA_INFO_IN_OP_HDR_LEN 4
+#define MC_CMD_TSA_INFO_IN_OP_LBN 0
+#define MC_CMD_TSA_INFO_IN_OP_WIDTH 16
+/* enum: Information about recently discovered local IP address of the adapter
+ */
+#define MC_CMD_TSA_INFO_OP_LOCAL_IP 0x1
+/* enum: Information about a sampled packet that either - did not match any
+ * black/white-list filters and was allowed by the default filter or - did not
+ * match any black/white-list filters and was denied by the default filter
+ */
+#define MC_CMD_TSA_INFO_OP_PKT_SAMPLE 0x2
+
+/* MC_CMD_TSA_INFO_IN_LOCAL_IP msgrequest:
+ *
+ * The TSA controller maintains a list of IP addresses valid for each port of a
+ * TSA adapter. The TSA controller requires information from the adapter
+ * inorder to learn new IP addresses assigned to a physical port and to
+ * identify those that are no longer assigned to the physical port. For this
+ * purpose, the TSA adapter snoops ARP replys, gratuitous ARP requests and ARP
+ * probe packets seen on each physical port. This definition describes the
+ * format of the notification message sent from a TSA adapter to a TSA
+ * controller related to any information related to a change in IP address
+ * assignment for a port. Doxbox reference SF-117371.
+ *
+ * There may be a possibility of combining multiple notifications in a single
+ * message in future. When that happens, a new flag can be defined using the
+ * reserved bits to describe the extended format of this notification.
+ */
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_LEN 18
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_OFST 0
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_LEN 4
+/* Additional metadata describing the IP address information such as source of
+ * information retrieval, type of IP address, physical port number.
+ */
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_OFST 4
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_LEN 4
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_LBN 0
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_WIDTH 8
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_LBN 8
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_WIDTH 8
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_LBN 16
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_WIDTH 8
+/* enum: ARP reply sent out of the physical port */
+#define MC_CMD_TSA_INFO_IP_REASON_TX_ARP 0x0
+/* enum: ARP probe packet received on the physical port */
+#define MC_CMD_TSA_INFO_IP_REASON_RX_ARP_PROBE 0x1
+/* enum: Gratuitous ARP packet received on the physical port */
+#define MC_CMD_TSA_INFO_IP_REASON_RX_GRATUITOUS_ARP 0x2
+/* enum: DHCP ACK packet received on the physical port */
+#define MC_CMD_TSA_INFO_IP_REASON_RX_DHCP_ACK 0x3
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_LBN 24
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_WIDTH 1
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_LBN 25
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_WIDTH 7
+/* IPV4 address retrieved from the sampled packets. This field is relevant only
+ * when META_IPV4 is set to 1.
+ */
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_OFST 8
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_LEN 4
+/* Target MAC address retrieved from the sampled packet. */
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_OFST 12
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_LEN 1
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_NUM 6
+
+/* MC_CMD_TSA_INFO_IN_PKT_SAMPLE msgrequest:
+ *
+ * It is desireable for the TSA controller to learn the traffic pattern of
+ * packets seen at the network port being monitored. In order to learn about
+ * the traffic pattern, the TSA controller may want to sample packets seen at
+ * the network port. Based on the packet samples that the TSA controller
+ * receives from the adapter, the controller may choose to configure additional
+ * black-list or white-list rules to allow or block packets as required.
+ *
+ * Although the entire sampled packet as seen on the network port is available
+ * to the MC the length of sampled packet sent to controller is restricted by
+ * MCDI payload size. Besides, the TSA controller does not require the entire
+ * packet to make decisions about filter updates. Hence the packet sample being
+ * passed to the controller is truncated to 128 bytes. This length is large
+ * enough to hold the ethernet header, IP header and maximum length of
+ * supported L4 protocol headers (IPv4 only, but can hold IPv6 header too, if
+ * required in future).
+ *
+ * The intention is that any future changes to this message format that are not
+ * backwards compatible will be defined with a new operation code.
+ */
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_LEN 136
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_OFST 0
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_LEN 4
+/* Additional metadata describing the sampled packet */
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_OFST 4
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_LEN 4
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_LBN 0
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_WIDTH 8
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_LBN 8
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_WIDTH 1
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_LBN 9
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_WIDTH 7
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_LBN 16
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_WIDTH 4
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_LBN 16
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_WIDTH 1
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_LBN 17
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_WIDTH 1
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_LBN 18
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_WIDTH 1
+/* 128-byte raw prefix of the sampled packet which includes the ethernet
+ * header, IP header and L4 protocol header (only IPv4 supported initially).
+ * This provides the controller enough information about the packet sample to
+ * report traffic patterns seen on a network port and to make decisions
+ * concerning rule-set updates.
+ */
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_OFST 8
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_LEN 1
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_NUM 128
+
+/* MC_CMD_TSA_INFO_OUT msgresponse */
+#define MC_CMD_TSA_INFO_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_HOST_INFO
+ * Commands to appply or retrieve host-related information from an adapter.
+ * Doxbox reference SF-117371-SW
+ */
+#define MC_CMD_HOST_INFO 0x128
+#undef MC_CMD_0x128_PRIVILEGE_CTG
+
+#define MC_CMD_0x128_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_HOST_INFO_IN msgrequest */
+#define MC_CMD_HOST_INFO_IN_LEN 4
+/* sub-operation code info */
+#define MC_CMD_HOST_INFO_IN_OP_HDR_OFST 0
+#define MC_CMD_HOST_INFO_IN_OP_HDR_LEN 4
+#define MC_CMD_HOST_INFO_IN_OP_LBN 0
+#define MC_CMD_HOST_INFO_IN_OP_WIDTH 16
+/* enum: Read a 16-byte unique host identifier from the adapter. This UUID
+ * helps to identify the host that an adapter is plugged into. This identifier
+ * is ideally the system UUID retrieved and set by the UEFI driver. If the UEFI
+ * driver is unable to extract the system UUID, it would still set a random
+ * 16-byte value into each supported SF adapter plugged into it. Host UUIDs may
+ * change if the system is power-cycled, however, they persist across adapter
+ * resets. If the host UUID was not set on an adapter, due to an unsupported
+ * version of UEFI driver, then this command returns an error. Doxbox reference
+ * - SF-117371-SW section 'Host UUID'.
+ */
+#define MC_CMD_HOST_INFO_OP_GET_UUID 0x0
+/* enum: Set a 16-byte unique host identifier on the adapter to identify the
+ * host that the adapter is plugged into. See MC_CMD_HOST_INFO_OP_GET_UUID for
+ * further details.
+ */
+#define MC_CMD_HOST_INFO_OP_SET_UUID 0x1
+
+/* MC_CMD_HOST_INFO_IN_GET_UUID msgrequest */
+#define MC_CMD_HOST_INFO_IN_GET_UUID_LEN 4
+/* sub-operation code info */
+#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_OFST 0
+#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_LEN 4
+
+/* MC_CMD_HOST_INFO_OUT_GET_UUID msgresponse */
+#define MC_CMD_HOST_INFO_OUT_GET_UUID_LEN 16
+/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID
+ * for further details.
+ */
+#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_OFST 0
+#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_LEN 1
+#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_NUM 16
+
+/* MC_CMD_HOST_INFO_IN_SET_UUID msgrequest */
+#define MC_CMD_HOST_INFO_IN_SET_UUID_LEN 20
+/* sub-operation code info */
+#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_OFST 0
+#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_LEN 4
+/* 16-byte host UUID set on the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID for
+ * further details.
+ */
+#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_OFST 4
+#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_LEN 1
+#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_NUM 16
+
+/* MC_CMD_HOST_INFO_OUT_SET_UUID msgresponse */
+#define MC_CMD_HOST_INFO_OUT_SET_UUID_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSAN_INFO
+ * Get TSA adapter information. TSA controllers query each TSA adapter to learn
+ * some configuration parameters of each adapter. Doxbox reference SF-117371-SW
+ * section 'Adapter Information'
+ */
+#define MC_CMD_TSAN_INFO 0x129
+#undef MC_CMD_0x129_PRIVILEGE_CTG
+
+#define MC_CMD_0x129_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TSAN_INFO_IN msgrequest */
+#define MC_CMD_TSAN_INFO_IN_LEN 4
+/* sub-operation code info */
+#define MC_CMD_TSAN_INFO_IN_OP_HDR_OFST 0
+#define MC_CMD_TSAN_INFO_IN_OP_HDR_LEN 4
+#define MC_CMD_TSAN_INFO_IN_OP_LBN 0
+#define MC_CMD_TSAN_INFO_IN_OP_WIDTH 16
+/* enum: Read configuration parameters and IDs that uniquely identify an
+ * adapter. The parameters include - host identification, adapter
+ * identification string and number of physical ports on the adapter.
+ */
+#define MC_CMD_TSAN_INFO_OP_GET_CFG 0x0
+
+/* MC_CMD_TSAN_INFO_IN_GET_CFG msgrequest */
+#define MC_CMD_TSAN_INFO_IN_GET_CFG_LEN 4
+/* sub-operation code info */
+#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_OFST 0
+#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_LEN 4
+
+/* MC_CMD_TSAN_INFO_OUT_GET_CFG msgresponse */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_LEN 26
+/* Information about the configuration parameters returned in this response. */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_OFST 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_LEN 4
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_LBN 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_WIDTH 16
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_LBN 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_WIDTH 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_LBN 16
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_WIDTH 8
+/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID
+ * for further details.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_OFST 4
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_LEN 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_NUM 16
+/* A unique identifier per adapter. The base MAC address of the card is used
+ * for this purpose.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_OFST 20
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_LEN 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_NUM 6
+
+/* MC_CMD_TSAN_INFO_OUT_GET_CFG_V2 msgresponse */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_LEN 36
+/* Information about the configuration parameters returned in this response. */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_OFST 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_LEN 4
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_LBN 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_WIDTH 16
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_LBN 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_WIDTH 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_LBN 16
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_WIDTH 8
+/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID
+ * for further details.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_OFST 4
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_LEN 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_NUM 16
+/* A unique identifier per adapter. The base MAC address of the card is used
+ * for this purpose.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_OFST 20
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_LEN 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_NUM 6
+/* Unused bytes, defined for 32-bit alignment of new fields. */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_OFST 26
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_LEN 2
+/* Maximum number of TSA statistics counters in each direction of dataflow
+ * supported on the card. Note that the statistics counters are always
+ * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx
+ * counter.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_OFST 28
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_LEN 4
+/* Width of each statistics counter (represented in bits). This gives an
+ * indication of wrap point to the user.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_OFST 32
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TSA_STATISTICS
+ * TSA adapter statistics operations.
+ */
+#define MC_CMD_TSA_STATISTICS 0x130
+#undef MC_CMD_0x130_PRIVILEGE_CTG
+
+#define MC_CMD_0x130_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_STATISTICS_IN msgrequest */
+#define MC_CMD_TSA_STATISTICS_IN_LEN 4
+/* TSA statistics sub-operation code */
+#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_OFST 0
+#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_LEN 4
+/* enum: Get the configuration parameters that describe the TSA statistics
+ * layout on the adapter.
+ */
+#define MC_CMD_TSA_STATISTICS_OP_GET_CONFIG 0x0
+/* enum: Read and/or clear TSA statistics counters. */
+#define MC_CMD_TSA_STATISTICS_OP_READ_CLEAR 0x1
+
+/* MC_CMD_TSA_STATISTICS_IN_GET_CONFIG msgrequest */
+#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_LEN 4
+/* TSA statistics sub-operation code */
+#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_OFST 0
+#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_LEN 4
+
+/* MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG msgresponse */
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_LEN 8
+/* Maximum number of TSA statistics counters in each direction of dataflow
+ * supported on the card. Note that the statistics counters are always
+ * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx
+ * counter.
+ */
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_OFST 0
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_LEN 4
+/* Width of each statistics counter (represented in bits). This gives an
+ * indication of wrap point to the user.
+ */
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_OFST 4
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_LEN 4
+
+/* MC_CMD_TSA_STATISTICS_IN_READ_CLEAR msgrequest */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMIN 20
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMAX 252
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LEN(num) (16+4*(num))
+/* TSA statistics sub-operation code */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_OFST 0
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_LEN 4
+/* Parameters describing the statistics operation */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_OFST 4
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_LEN 4
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_LBN 0
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_WIDTH 1
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_LBN 1
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_WIDTH 1
+/* Counter ID list specification type */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_OFST 8
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_LEN 4
+/* enum: The statistics counters are specified as an unordered list of
+ * individual counter ID.
+ */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LIST 0x0
+/* enum: The statistics counters are specified as a range of consecutive
+ * counter IDs.
+ */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_RANGE 0x1
+/* Number of statistics counters */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_OFST 12
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_LEN 4
+/* Counter IDs to be read/cleared. When mode is set to LIST, this entry holds a
+ * list of counter IDs to be operated on. When mode is set to RANGE, this entry
+ * holds a single counter ID representing the start of the range of counter IDs
+ * to be operated on.
+ */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_OFST 16
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_LEN 4
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MINNUM 1
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MAXNUM 59
+
+/* MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR msgresponse */
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMIN 24
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMAX 248
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LEN(num) (8+16*(num))
+/* Number of statistics counters returned in this response */
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_OFST 0
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_LEN 4
+/* MC_TSA_STATISTICS_ENTRY Note that this field is expected to start at a
+ * 64-bit aligned offset
+ */
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_OFST 8
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_LEN 16
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MINNUM 1
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MAXNUM 15
+
+/* MC_TSA_STATISTICS_ENTRY structuredef */
+#define MC_TSA_STATISTICS_ENTRY_LEN 16
+/* Tx statistics counter */
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_OFST 0
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LEN 8
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LO_OFST 0
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_HI_OFST 4
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LBN 0
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_WIDTH 64
+/* Rx statistics counter */
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_OFST 8
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LEN 8
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LO_OFST 8
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_HI_OFST 12
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LBN 64
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_WIDTH 64
+
+
+/***********************************/
+/* MC_CMD_ERASE_INITIAL_NIC_SECRET
+ * This request causes the NIC to find the initial NIC secret (programmed
+ * during ATE) in XPM memory and if and only if the NIC has already been
+ * rekeyed with MC_CMD_REKEY, erase it. This is used by manftest after
+ * installing TSA binding certificates. See SF-117631-TC.
+ */
+#define MC_CMD_ERASE_INITIAL_NIC_SECRET 0x131
+#undef MC_CMD_0x131_PRIVILEGE_CTG
+
+#define MC_CMD_0x131_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_ERASE_INITIAL_NIC_SECRET_IN msgrequest */
+#define MC_CMD_ERASE_INITIAL_NIC_SECRET_IN_LEN 0
+
+/* MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT msgresponse */
+#define MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSA_CONFIG
+ * TSA adapter configuration operations. This command is used to prepare the
+ * NIC for TSA binding.
+ */
+#define MC_CMD_TSA_CONFIG 0x64
+#undef MC_CMD_0x64_PRIVILEGE_CTG
+
+#define MC_CMD_0x64_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TSA_CONFIG_IN msgrequest */
+#define MC_CMD_TSA_CONFIG_IN_LEN 4
+/* TSA configuration sub-operation code */
+#define MC_CMD_TSA_CONFIG_IN_OP_OFST 0
+#define MC_CMD_TSA_CONFIG_IN_OP_LEN 4
+/* enum: Append a single item to the tsa_config partition. Items will be
+ * encrypted unless they are declared as non-sensitive. Returns
+ * MC_CMD_ERR_EEXIST if the tag is already present.
+ */
+#define MC_CMD_TSA_CONFIG_OP_APPEND 0x1
+/* enum: Reset the tsa_config partition to a clean state. */
+#define MC_CMD_TSA_CONFIG_OP_RESET 0x2
+/* enum: Read back a configured item from tsa_config partition. Returns
+ * MC_CMD_ERR_ENOENT if the item doesn't exist, or MC_CMD_ERR_EPERM if the item
+ * is declared as sensitive (i.e. is encrypted).
+ */
+#define MC_CMD_TSA_CONFIG_OP_READ 0x3
+
+/* MC_CMD_TSA_CONFIG_IN_APPEND msgrequest */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMIN 12
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMAX 252
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LEN(num) (12+1*(num))
+/* TSA configuration sub-operation code. The value shall be
+ * MC_CMD_TSA_CONFIG_OP_APPEND.
+ */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_OFST 0
+#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_LEN 4
+/* The tag to be appended */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_OFST 4
+#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_LEN 4
+/* The length of the data in bytes */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_OFST 8
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_LEN 4
+/* The item data */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_OFST 12
+#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_LEN 1
+#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MINNUM 0
+#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MAXNUM 240
+
+/* MC_CMD_TSA_CONFIG_OUT_APPEND msgresponse */
+#define MC_CMD_TSA_CONFIG_OUT_APPEND_LEN 0
+
+/* MC_CMD_TSA_CONFIG_IN_RESET msgrequest */
+#define MC_CMD_TSA_CONFIG_IN_RESET_LEN 4
+/* TSA configuration sub-operation code. The value shall be
+ * MC_CMD_TSA_CONFIG_OP_RESET.
+ */
+#define MC_CMD_TSA_CONFIG_IN_RESET_OP_OFST 0
+#define MC_CMD_TSA_CONFIG_IN_RESET_OP_LEN 4
+
+/* MC_CMD_TSA_CONFIG_OUT_RESET msgresponse */
+#define MC_CMD_TSA_CONFIG_OUT_RESET_LEN 0
+
+/* MC_CMD_TSA_CONFIG_IN_READ msgrequest */
+#define MC_CMD_TSA_CONFIG_IN_READ_LEN 8
+/* TSA configuration sub-operation code. The value shall be
+ * MC_CMD_TSA_CONFIG_OP_READ.
+ */
+#define MC_CMD_TSA_CONFIG_IN_READ_OP_OFST 0
+#define MC_CMD_TSA_CONFIG_IN_READ_OP_LEN 4
+/* The tag to be read */
+#define MC_CMD_TSA_CONFIG_IN_READ_TAG_OFST 4
+#define MC_CMD_TSA_CONFIG_IN_READ_TAG_LEN 4
+
+/* MC_CMD_TSA_CONFIG_OUT_READ msgresponse */
+#define MC_CMD_TSA_CONFIG_OUT_READ_LENMIN 8
+#define MC_CMD_TSA_CONFIG_OUT_READ_LENMAX 252
+#define MC_CMD_TSA_CONFIG_OUT_READ_LEN(num) (8+1*(num))
+/* The tag that was read */
+#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_OFST 0
+#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_LEN 4
+/* The length of the data in bytes */
+#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_OFST 4
+#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_LEN 4
+/* The data of the item. */
+#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_OFST 8
+#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_LEN 1
+#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MINNUM 0
+#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MAXNUM 244
+
+/* MC_TSA_IPV4_ITEM structuredef */
+#define MC_TSA_IPV4_ITEM_LEN 8
+/* Additional metadata describing the IP address information such as the
+ * physical port number the address is being used on. Unused space in this
+ * field is reserved for future expansion.
+ */
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_OFST 0
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LEN 4
+#define MC_TSA_IPV4_ITEM_PORT_IDX_LBN 0
+#define MC_TSA_IPV4_ITEM_PORT_IDX_WIDTH 8
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LBN 0
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_WIDTH 32
+/* The IPv4 address in little endian byte order. */
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_OFST 4
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LEN 4
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LBN 32
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_TSA_IPADDR
+ * TSA operations relating to the monitoring and expiry of local IP addresses
+ * discovered by the controller. These commands are sent from a TSA controller
+ * to a TSA adapter.
+ */
+#define MC_CMD_TSA_IPADDR 0x65
+#undef MC_CMD_0x65_PRIVILEGE_CTG
+
+#define MC_CMD_0x65_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_IPADDR_IN msgrequest */
+#define MC_CMD_TSA_IPADDR_IN_LEN 4
+/* Header containing information to identify which sub-operation of this
+ * command to perform. The header contains a 16-bit op-code. Unused space in
+ * this field is reserved for future expansion.
+ */
+#define MC_CMD_TSA_IPADDR_IN_OP_HDR_OFST 0
+#define MC_CMD_TSA_IPADDR_IN_OP_HDR_LEN 4
+#define MC_CMD_TSA_IPADDR_IN_OP_LBN 0
+#define MC_CMD_TSA_IPADDR_IN_OP_WIDTH 16
+/* enum: Request that the adapter verifies that the IPv4 addresses supplied are
+ * still in use by the host by sending ARP probes to the host. The MC does not
+ * wait for a response to the probes and sends an MCDI response to the
+ * controller once the probes have been sent to the host. The response to the
+ * probes (if there are any) will be forwarded to the controller using
+ * MC_CMD_TSA_INFO alerts.
+ */
+#define MC_CMD_TSA_IPADDR_OP_VALIDATE_IPV4 0x1
+/* enum: Notify the adapter that one or more IPv4 addresses are no longer valid
+ * for the host of the adapter. The adapter should remove the IPv4 addresses
+ * from its local cache.
+ */
+#define MC_CMD_TSA_IPADDR_OP_REMOVE_IPV4 0x2
+
+/* MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4 msgrequest */
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMIN 16
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMAX 248
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LEN(num) (8+8*(num))
+/* Header containing information to identify which sub-operation of this
+ * command to perform. The header contains a 16-bit op-code. Unused space in
+ * this field is reserved for future expansion.
+ */
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_OFST 0
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_LEN 4
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_LBN 0
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_WIDTH 16
+/* Number of IPv4 addresses to validate. */
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_OFST 4
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_LEN 4
+/* The IPv4 addresses to validate, in struct MC_TSA_IPV4_ITEM format. */
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_OFST 8
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LEN 8
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LO_OFST 8
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_HI_OFST 12
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MINNUM 1
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MAXNUM 30
+
+/* MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4 msgresponse */
+#define MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4_LEN 0
+
+/* MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4 msgrequest */
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMIN 16
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMAX 248
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LEN(num) (8+8*(num))
+/* Header containing information to identify which sub-operation of this
+ * command to perform. The header contains a 16-bit op-code. Unused space in
+ * this field is reserved for future expansion.
+ */
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_OFST 0
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_LEN 4
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_LBN 0
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_WIDTH 16
+/* Number of IPv4 addresses to remove. */
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_OFST 4
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_LEN 4
+/* The IPv4 addresses that have expired, in struct MC_TSA_IPV4_ITEM format. */
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_OFST 8
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LEN 8
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LO_OFST 8
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_HI_OFST 12
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MINNUM 1
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MAXNUM 30
+
+/* MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4 msgresponse */
+#define MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SECURE_NIC_INFO
+ * Get secure NIC information. While many of the features reported by these
+ * commands are related to TSA, they must be supported in firmware where TSA is
+ * disabled.
+ */
+#define MC_CMD_SECURE_NIC_INFO 0x132
+#undef MC_CMD_0x132_PRIVILEGE_CTG
+
+#define MC_CMD_0x132_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SECURE_NIC_INFO_IN msgrequest */
+#define MC_CMD_SECURE_NIC_INFO_IN_LEN 4
+/* sub-operation code info */
+#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_OFST 0
+#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_LEN 4
+#define MC_CMD_SECURE_NIC_INFO_IN_OP_LBN 0
+#define MC_CMD_SECURE_NIC_INFO_IN_OP_WIDTH 16
+/* enum: Get the status of various security settings, all signed along with a
+ * challenge chosen by the host.
+ */
+#define MC_CMD_SECURE_NIC_INFO_OP_STATUS 0x0
+
+/* MC_CMD_SECURE_NIC_INFO_IN_STATUS msgrequest */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_LEN 24
+/* sub-operation code, must be MC_CMD_SECURE_NIC_INFO_OP_STATUS */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_OFST 0
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_LEN 4
+/* Type of key to be used to sign response. */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_OFST 4
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_LEN 4
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_UNUSED 0x0 /* enum */
+/* enum: Solarflare adapter authentication key, installed by Manftest. */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_SF_ADAPTER_AUTH 0x1
+/* enum: TSA binding key, installed after adapter is bound to a TSA controller.
+ * This is not supported in firmware which does not support TSA.
+ */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_TSA_BINDING 0x2
+/* enum: Customer adapter authentication key. Installed by the customer in the
+ * field, but otherwise similar to the Solarflare adapter authentication key.
+ */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CUSTOMER_ADAPTER_AUTH 0x3
+/* Random challenge generated by the host. */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_OFST 8
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_LEN 16
+
+/* MC_CMD_SECURE_NIC_INFO_OUT_STATUS msgresponse */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_LEN 420
+/* Length of the signature in MSG_SIGNATURE. */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_OFST 0
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_LEN 4
+/* Signature over the message, starting at MESSAGE_TYPE and continuing to the
+ * end of the MCDI response, allowing the message format to be extended. The
+ * signature uses ECDSA 384 encoding in ASN.1 format. It has variable length,
+ * with a maximum of 384 bytes.
+ */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_OFST 4
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN 384
+/* Enum value indicating the type of response. This protects against chosen
+ * message attacks. The enum values are random rather than sequential to make
+ * it unlikely that values will be reused should other commands in a different
+ * namespace need to create signed messages.
+ */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_OFST 388
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_LEN 4
+/* enum: Message type value for the response to a
+ * MC_CMD_SECURE_NIC_INFO_IN_STATUS message.
+ */
+#define MC_CMD_SECURE_NIC_INFO_STATUS 0xdb4
+/* The challenge provided by the host in the MC_CMD_SECURE_NIC_INFO_IN_STATUS
+ * message
+ */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_OFST 392
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_LEN 16
+/* The first 32 bits of XPM memory, which include security and flag bits, die
+ * ID and chip ID revision. The meaning of these bits is defined in
+ * mc/include/mc/xpm.h in the firmwaresrc repository.
+ */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_OFST 408
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_LEN 4
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_OFST 412
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_LEN 2
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_OFST 414
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_LEN 2
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_OFST 416
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_LEN 2
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_OFST 418
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_LEN 2
+
+
+/***********************************/
+/* MC_CMD_TSA_TEST
+ * A simple ping-pong command just to test the adapter<>controller MCDI
+ * communication channel. This command makes not changes to the TSA adapter's
+ * internal state. It is used by the controller just to verify that the MCDI
+ * communication channel is working fine. This command takes no additonal
+ * parameters in request or response.
+ */
+#define MC_CMD_TSA_TEST 0x125
+#undef MC_CMD_0x125_PRIVILEGE_CTG
+
+#define MC_CMD_0x125_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_TEST_IN msgrequest */
+#define MC_CMD_TSA_TEST_IN_LEN 0
+
+/* MC_CMD_TSA_TEST_OUT msgresponse */
+#define MC_CMD_TSA_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSA_RULESET_OVERRIDE
+ * Override TSA ruleset that is currently active on the adapter. This operation
+ * does not modify the ruleset itself. This operation provides a mechanism to
+ * apply an allow-all or deny-all operation on all packets, thereby completely
+ * ignoring the rule-set configured on the adapter. The main purpose of this
+ * operation is to provide a deterministic state to the TSA firewall during
+ * rule-set transitions.
+ */
+#define MC_CMD_TSA_RULESET_OVERRIDE 0x12a
+#undef MC_CMD_0x12a_PRIVILEGE_CTG
+
+#define MC_CMD_0x12a_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_RULESET_OVERRIDE_IN msgrequest */
+#define MC_CMD_TSA_RULESET_OVERRIDE_IN_LEN 4
+/* The override state to apply. */
+#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_OFST 0
+#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_LEN 4
+/* enum: No override in place - the existing ruleset is in operation. */
+#define MC_CMD_TSA_RULESET_OVERRIDE_NONE 0x0
+/* enum: Block all packets seen on all datapath channel except those packets
+ * required for basic configuration of the TSA NIC such as ARPs and TSA-
+ * communication traffic. Such exceptional traffic is handled differently
+ * compared to TSA rulesets.
+ */
+#define MC_CMD_TSA_RULESET_OVERRIDE_BLOCK 0x1
+/* enum: Allow all packets through all datapath channel. The TSA adapter
+ * behaves like a normal NIC without any firewalls.
+ */
+#define MC_CMD_TSA_RULESET_OVERRIDE_ALLOW 0x2
+
+/* MC_CMD_TSA_RULESET_OVERRIDE_OUT msgresponse */
+#define MC_CMD_TSA_RULESET_OVERRIDE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSAC_REQUEST
+ * Generic command to send requests from a TSA controller to a TSA adapter.
+ * Specific usage is determined by the TYPE field.
+ */
+#define MC_CMD_TSAC_REQUEST 0x12b
+#undef MC_CMD_0x12b_PRIVILEGE_CTG
+
+#define MC_CMD_0x12b_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSAC_REQUEST_IN msgrequest */
+#define MC_CMD_TSAC_REQUEST_IN_LEN 4
+/* The type of request from the controller. */
+#define MC_CMD_TSAC_REQUEST_IN_TYPE_OFST 0
+#define MC_CMD_TSAC_REQUEST_IN_TYPE_LEN 4
+/* enum: Request the adapter to resend localIP information from it's cache. The
+ * command does not return any IP address information; IP addresses are sent as
+ * TSA notifications as descibed in MC_CMD_TSA_INFO_IN_LOCAL_IP.
+ */
+#define MC_CMD_TSAC_REQUEST_LOCALIP 0x0
+
+/* MC_CMD_TSAC_REQUEST_OUT msgresponse */
+#define MC_CMD_TSAC_REQUEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SUC_VERSION
+ * Get the version of the SUC
+ */
+#define MC_CMD_SUC_VERSION 0x134
+#undef MC_CMD_0x134_PRIVILEGE_CTG
+
+#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SUC_VERSION_IN msgrequest */
+#define MC_CMD_SUC_VERSION_IN_LEN 0
+
+/* MC_CMD_SUC_VERSION_OUT msgresponse */
+#define MC_CMD_SUC_VERSION_OUT_LEN 24
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0
+#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4
+#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4
+/* The date, in seconds since the Unix epoch, when the firmware image was
+ * built.
+ */
+#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16
+#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20
+#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4
+
+/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot
+ * loader.
+ */
+#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4
+#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0
+#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4
+/* enum: Requests the SUC boot version. */
+#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b
+
+/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */
+#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4
+/* The SUC boot version */
+#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0
+#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SUC_MANFTEST
+ * Operations to support manftest on SUC based systems.
+ */
+#define MC_CMD_SUC_MANFTEST 0x135
+#undef MC_CMD_0x135_PRIVILEGE_CTG
+
+#define MC_CMD_0x135_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_SUC_MANFTEST_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_IN_LEN 4
+/* The manftest operation to be performed. */
+#define MC_CMD_SUC_MANFTEST_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_IN_OP_LEN 4
+/* enum: Read serial number and use count. */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ 0x0
+/* enum: Update use count on wearout adapter. */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE 0x1
+/* enum: Start an ADC calibration. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START 0x2
+/* enum: Read the status of an ADC calibration. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS 0x3
+/* enum: Read the results of an ADC calibration. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT 0x4
+/* enum: Read the PCIe configuration. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ 0x5
+/* enum: Write the PCIe configuration. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE 0x6
+/* enum: Write FRU information to SUC. The FRU information is taken from the
+ * FRU_INFORMATION partition. Attempts to write to read-only FRUs are rejected.
+ */
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE 0x7
+
+/* MC_CMD_SUC_MANFTEST_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_OUT_LEN 0
+
+/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_WEAROUT_READ.
+ */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_LEN 20
+/* The serial number of the wearout adapter, see SF-112717-PR for format. */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_OFST 0
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_LEN 16
+/* The use count of the wearout adapter. */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_OFST 16
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE.
+ */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT_LEN 0
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START.
+ */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT_LEN 0
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS.
+ */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_LEN 4
+/* The combined status of the calibration operation. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_LEN 4
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_LBN 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_WIDTH 1
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_LBN 1
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_WIDTH 1
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_LBN 2
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_WIDTH 4
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_LBN 6
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_WIDTH 2
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT.
+ */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_LEN 12
+/* The set of calibration results. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_LEN 4
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_NUM 3
+
+/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ.
+ */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_LEN 4
+/* The PCIe vendor ID. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_OFST 0
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_LEN 2
+/* The PCIe device ID. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_OFST 2
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_LEN 2
+
+/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_LEN 8
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE.
+ */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_LEN 4
+/* The PCIe vendor ID. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_OFST 4
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_LEN 2
+/* The PCIe device ID. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_OFST 6
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_LEN 2
+
+/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT_LEN 0
+
+/* MC_CMD_SUC_MANFTEST_FRU_WRITE_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_FRU_WRITE
+ */
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_CERTIFICATE
+ * Request a certificate.
+ */
+#define MC_CMD_GET_CERTIFICATE 0x12c
+#undef MC_CMD_0x12c_PRIVILEGE_CTG
+
+#define MC_CMD_0x12c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CERTIFICATE_IN msgrequest */
+#define MC_CMD_GET_CERTIFICATE_IN_LEN 8
+/* Type of the certificate to be retrieved. */
+#define MC_CMD_GET_CERTIFICATE_IN_TYPE_OFST 0
+#define MC_CMD_GET_CERTIFICATE_IN_TYPE_LEN 4
+#define MC_CMD_GET_CERTIFICATE_IN_UNUSED 0x0 /* enum */
+#define MC_CMD_GET_CERTIFICATE_IN_AAC 0x1 /* enum */
+/* enum: Adapter Authentication Certificate (AAC). The AAC is unique to each
+ * adapter and is used to verify its authenticity. It is installed by Manftest.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH 0x1
+#define MC_CMD_GET_CERTIFICATE_IN_AASC 0x2 /* enum */
+/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is shared
+ * by a group of adapters (typically a purchase order) and is used to verify
+ * the validity of AAC along with the SF root certificate. It is installed by
+ * Manftest.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH_SIGNING 0x2
+#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AAC 0x3 /* enum */
+/* enum: Customer Adapter Authentication Certificate. The Customer AAC is
+ * unique to each adapter and is used to verify its authenticity in cases where
+ * either the AAC is not installed or a customer desires to use their own
+ * certificate chain. It is installed by the customer.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH 0x3
+#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AASC 0x4 /* enum */
+/* enum: Customer Adapter Authentication Certificate. The Customer AASC is
+ * shared by a group of adapters and is used to verify the validity of the
+ * Customer AAC along with the customers root certificate. It is installed by
+ * the customer.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH_SIGNING 0x4
+/* Offset, measured in bytes, relative to the start of the certificate data
+ * from which the certificate is to be retrieved.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_OFST 4
+#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_LEN 4
+
+/* MC_CMD_GET_CERTIFICATE_OUT msgresponse */
+#define MC_CMD_GET_CERTIFICATE_OUT_LENMIN 13
+#define MC_CMD_GET_CERTIFICATE_OUT_LENMAX 252
+#define MC_CMD_GET_CERTIFICATE_OUT_LEN(num) (12+1*(num))
+/* Type of the certificate. */
+#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_OFST 0
+#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_CERTIFICATE_IN/TYPE */
+/* Offset, measured in bytes, relative to the start of the certificate data
+ * from which data in this message starts.
+ */
+#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_OFST 4
+#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_LEN 4
+/* Total length of the certificate data. */
+#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_OFST 8
+#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_LEN 4
+/* The certificate data. */
+#define MC_CMD_GET_CERTIFICATE_OUT_DATA_OFST 12
+#define MC_CMD_GET_CERTIFICATE_OUT_DATA_LEN 1
+#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MAXNUM 240
+
+
+/***********************************/
+/* MC_CMD_GET_NIC_GLOBAL
+ * Get a global value which applies to all PCI functions
+ */
+#define MC_CMD_GET_NIC_GLOBAL 0x12d
+#undef MC_CMD_0x12d_PRIVILEGE_CTG
+
+#define MC_CMD_0x12d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_NIC_GLOBAL_IN msgrequest */
+#define MC_CMD_GET_NIC_GLOBAL_IN_LEN 4
+/* Key to request value for, see enum values in MC_CMD_SET_NIC_GLOBAL. If the
+ * given key is unknown to the current firmware, the call will fail with
+ * ENOENT.
+ */
+#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_OFST 0
+#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_LEN 4
+
+/* MC_CMD_GET_NIC_GLOBAL_OUT msgresponse */
+#define MC_CMD_GET_NIC_GLOBAL_OUT_LEN 4
+/* Value of requested key, see key descriptions below. */
+#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_OFST 0
+#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_NIC_GLOBAL
+ * Set a global value which applies to all PCI functions. Most global values
+ * can only be changed under specific conditions, and this call will return an
+ * appropriate error otherwise (see key descriptions).
+ */
+#define MC_CMD_SET_NIC_GLOBAL 0x12e
+#undef MC_CMD_0x12e_PRIVILEGE_CTG
+
+#define MC_CMD_0x12e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_NIC_GLOBAL_IN msgrequest */
+#define MC_CMD_SET_NIC_GLOBAL_IN_LEN 8
+/* Key to change value of. Firmware will return ENOENT for keys it doesn't know
+ * about.
+ */
+#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_OFST 0
+#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_LEN 4
+/* enum: Request switching the datapath firmware sub-variant. Currently only
+ * useful when running the DPDK f/w variant. See key values below, and the DPDK
+ * section of the EF10 Driver Writers Guide. Note that any driver attaching
+ * with the SUBVARIANT_AWARE flag cleared is implicitly considered as a request
+ * to switch back to the default sub-variant, and will thus reset this value.
+ * If a sub-variant switch happens, all other PCI functions will get their
+ * resources reset (they will see an MC reboot).
+ */
+#define MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT 0x1
+/* New value to set, see key descriptions above. */
+#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_OFST 4
+#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_LEN 4
+/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Default sub-variant with support
+ * for maximum features for the current f/w variant. A request from a
+ * privileged function to set this particular value will always succeed.
+ */
+#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT 0x0
+/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Increases packet rate at the cost
+ * of not supporting any TX checksum offloads. Only supported when running some
+ * f/w variants, others will return ENOTSUP (as reported by the homonymous bit
+ * in MC_CMD_GET_CAPABILITIES_V2). Can only be set when no other drivers are
+ * attached, and the calling driver must have no resources allocated. See the
+ * DPDK section of the EF10 Driver Writers Guide for a more detailed
+ * description with possible error codes.
+ */
+#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM 0x1
+
+
+/***********************************/
+/* MC_CMD_LTSSM_TRACE_POLL
+ * Medford2 hardware has support for logging all LTSSM state transitions to a
+ * hardware buffer. When built with WITH_LTSSM_TRACE=1, the firmware will
+ * periodially dump the contents of this hardware buffer to an internal
+ * firmware buffer for later extraction.
+ */
+#define MC_CMD_LTSSM_TRACE_POLL 0x12f
+#undef MC_CMD_0x12f_PRIVILEGE_CTG
+
+#define MC_CMD_0x12f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LTSSM_TRACE_POLL_IN msgrequest: Read transitions from the firmware
+ * internal buffer.
+ */
+#define MC_CMD_LTSSM_TRACE_POLL_IN_LEN 4
+/* The maximum number of row that the caller can accept. The format of each row
+ * is defined in MC_CMD_LTSSM_TRACE_POLL_OUT.
+ */
+#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_OFST 0
+#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_LEN 4
+
+/* MC_CMD_LTSSM_TRACE_POLL_OUT msgresponse */
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMIN 16
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMAX 248
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LEN(num) (8+8*(num))
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_OFST 0
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_LEN 4
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_LBN 0
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_WIDTH 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_LBN 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_WIDTH 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_LBN 31
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_WIDTH 1
+/* The number of rows present in this response. */
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_OFST 4
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_LEN 4
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_OFST 8
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LEN 8
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LO_OFST 8
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_HI_OFST 12
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MINNUM 0
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MAXNUM 30
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_LBN 0
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_WIDTH 6
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_LBN 6
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_WIDTH 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_LBN 7
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_WIDTH 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_LBN 8
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_WIDTH 24
+/* The time of the LTSSM transition. Times are reported as fractional
+ * microseconds since MC boot (wrapping at 2^32us). The fractional part is
+ * reported in picoseconds. 0 <= TIMESTAMP_PS < 1000000 timestamp in seconds =
+ * ((TIMESTAMP_US + TIMESTAMP_PS / 1000000) / 1000000)
+ */
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_OFST 12
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_LEN 4
+
+#endif /* _SIENA_MC_DRIVER_PCOL_H */
+/*! \cidoxg_end */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi_aoe.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi_aoe.h
new file mode 100644
index 00000000..6aaf212f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi_aoe.h
@@ -0,0 +1,2914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2008-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+/*! \cidoxg_firmware_mc_cmd */
+
+#ifndef _SIENA_MC_DRIVER_PCOL_AOE_H
+#define _SIENA_MC_DRIVER_PCOL_AOE_H
+
+
+
+/***********************************/
+/* MC_CMD_FC
+ * Perform an FC operation
+ */
+#define MC_CMD_FC 0x9
+
+/* MC_CMD_FC_IN msgrequest */
+#define MC_CMD_FC_IN_LEN 4
+#define MC_CMD_FC_IN_OP_HDR_OFST 0
+#define MC_CMD_FC_IN_OP_HDR_LEN 4
+#define MC_CMD_FC_IN_OP_LBN 0
+#define MC_CMD_FC_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to FC. */
+#define MC_CMD_FC_OP_NULL 0x1
+/* enum: Unused opcode */
+#define MC_CMD_FC_OP_UNUSED 0x2
+/* enum: MAC driver commands */
+#define MC_CMD_FC_OP_MAC 0x3
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_READ32 0x4
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_WRITE32 0x5
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_READ 0x6
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_WRITE 0x7
+/* enum: FC firmware Version */
+#define MC_CMD_FC_OP_GET_VERSION 0x8
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_RX_READ 0x9
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa
+/* enum: SFP parameters */
+#define MC_CMD_FC_OP_SFP 0xb
+/* enum: DDR3 test */
+#define MC_CMD_FC_OP_DDR_TEST 0xc
+/* enum: Get Crash context from FC */
+#define MC_CMD_FC_OP_GET_ASSERT 0xd
+/* enum: Get FPGA Build registers */
+#define MC_CMD_FC_OP_FPGA_BUILD 0xe
+/* enum: Read map support commands */
+#define MC_CMD_FC_OP_READ_MAP 0xf
+/* enum: FC Capabilities */
+#define MC_CMD_FC_OP_CAPABILITIES 0x10
+/* enum: FC Global flags */
+#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11
+/* enum: FC IO using relative addressing modes */
+#define MC_CMD_FC_OP_IO_REL 0x12
+/* enum: FPGA link information */
+#define MC_CMD_FC_OP_UHLINK 0x13
+/* enum: Configure loopbacks and link on FPGA ports */
+#define MC_CMD_FC_OP_SET_LINK 0x14
+/* enum: Licensing operations relating to AOE */
+#define MC_CMD_FC_OP_LICENSE 0x15
+/* enum: Startup information to the FC */
+#define MC_CMD_FC_OP_STARTUP 0x16
+/* enum: Configure a DMA read */
+#define MC_CMD_FC_OP_DMA 0x17
+/* enum: Configure a timed read */
+#define MC_CMD_FC_OP_TIMED_READ 0x18
+/* enum: Control UART logging */
+#define MC_CMD_FC_OP_LOG 0x19
+/* enum: Get the value of a given clock_id */
+#define MC_CMD_FC_OP_CLOCK 0x1a
+/* enum: DDR3/QDR3 parameters */
+#define MC_CMD_FC_OP_DDR 0x1b
+/* enum: PTP and timestamp control */
+#define MC_CMD_FC_OP_TIMESTAMP 0x1c
+/* enum: Commands for SPI Flash interface */
+#define MC_CMD_FC_OP_SPI 0x1d
+/* enum: Commands for diagnostic components */
+#define MC_CMD_FC_OP_DIAG 0x1e
+/* enum: External AOE port. */
+#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0
+/* enum: Internal AOE port. */
+#define MC_CMD_FC_IN_PORT_INT_OFST 0x40
+
+/* MC_CMD_FC_IN_NULL msgrequest */
+#define MC_CMD_FC_IN_NULL_LEN 4
+#define MC_CMD_FC_IN_CMD_OFST 0
+#define MC_CMD_FC_IN_CMD_LEN 4
+
+/* MC_CMD_FC_IN_PHY msgrequest */
+#define MC_CMD_FC_IN_PHY_LEN 5
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* FC PHY driver operation code */
+#define MC_CMD_FC_IN_PHY_OP_OFST 4
+#define MC_CMD_FC_IN_PHY_OP_LEN 1
+/* enum: PHY init handler */
+#define MC_CMD_FC_OP_PHY_OP_INIT 0x1
+/* enum: PHY reconfigure handler */
+#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2
+/* enum: PHY reboot handler */
+#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3
+/* enum: PHY get_supported_cap handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4
+/* enum: PHY get_config handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5
+/* enum: PHY get_media_info handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6
+/* enum: PHY set_led handler */
+#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7
+/* enum: PHY lasi_interrupt handler */
+#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8
+/* enum: PHY check_link handler */
+#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9
+/* enum: PHY fill_stats handler */
+#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa
+/* enum: PHY bpx_link_state_changed handler */
+#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb
+/* enum: PHY get_state handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc
+/* enum: PHY start_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd
+/* enum: PHY poll_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe
+/* enum: PHY nvram_test handler */
+#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf
+/* enum: PHY relinquish handler */
+#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10
+/* enum: PHY read connection from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11
+/* enum: PHY read flags from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12
+
+/* MC_CMD_FC_IN_PHY_INIT msgrequest */
+#define MC_CMD_FC_IN_PHY_INIT_LEN 4
+#define MC_CMD_FC_IN_PHY_CMD_OFST 0
+#define MC_CMD_FC_IN_PHY_CMD_LEN 4
+
+/* MC_CMD_FC_IN_MAC msgrequest */
+#define MC_CMD_FC_IN_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_MAC_HEADER_OFST 4
+#define MC_CMD_FC_IN_MAC_HEADER_LEN 4
+#define MC_CMD_FC_IN_MAC_OP_LBN 0
+#define MC_CMD_FC_IN_MAC_OP_WIDTH 8
+/* enum: MAC reconfigure handler */
+#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1
+/* enum: MAC Set command - same as MC_CMD_SET_MAC */
+#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2
+/* enum: MAC statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3
+/* enum: MAC RX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6
+/* enum: MAC TX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7
+/* enum: MAC Read status */
+#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8
+/* enum: External FPGA port. */
+#define MC_CMD_FC_PORT_EXT 0x0
+/* enum: Internal Siena-facing FPGA ports. */
+#define MC_CMD_FC_PORT_INT 0x1
+#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */
+#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+/* MTU size */
+#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8
+#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_LEN 4
+/* Drain Tx FIFO */
+#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12
+#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_LEN 4
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_LEN 4
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28
+#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_LEN 4
+
+/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */
+#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+/* MC Statistics index */
+#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8
+#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_LEN 4
+#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12
+#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_LEN 4
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1
+/* Number of statistics to read */
+#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16
+#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_LEN 4
+#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */
+#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */
+
+/* MC_CMD_FC_IN_READ32 msgrequest */
+#define MC_CMD_FC_IN_READ32_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_READ32_ADDR_HI_LEN 4
+#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_READ32_ADDR_LO_LEN 4
+#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12
+#define MC_CMD_FC_IN_READ32_NUMWORDS_LEN 4
+
+/* MC_CMD_FC_IN_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_WRITE32_LENMIN 16
+#define MC_CMD_FC_IN_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_WRITE32_ADDR_HI_LEN 4
+#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_WRITE32_ADDR_LO_LEN 4
+#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12
+#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_TRC_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_READ_TRC_LEN 4
+#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_READ_CHANNEL_LEN 4
+
+/* MC_CMD_FC_IN_TRC_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_WRITE_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_WRITE_TRC_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4
+
+/* MC_CMD_FC_IN_GET_VERSION msgrequest */
+#define MC_CMD_FC_IN_GET_VERSION_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+
+/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_READ_TRC_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_LEN 4
+
+/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2
+
+/* MC_CMD_FC_IN_SFP msgrequest */
+#define MC_CMD_FC_IN_SFP_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* Link speed is 100, 1000, 10000, 40000 */
+#define MC_CMD_FC_IN_SFP_SPEED_OFST 4
+#define MC_CMD_FC_IN_SFP_SPEED_LEN 4
+/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */
+#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8
+#define MC_CMD_FC_IN_SFP_COPPER_LEN_LEN 4
+/* Not relevant for cards with QSFP modules. For older cards, true if module is
+ * a dual speed SFP+ module.
+ */
+#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12
+#define MC_CMD_FC_IN_SFP_DUAL_SPEED_LEN 4
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16
+#define MC_CMD_FC_IN_SFP_PRESENT_LEN 4
+/* The type of the SFP+ Module. For later cards with QSFP modules, this field
+ * is unused and the type is communicated by other means.
+ */
+#define MC_CMD_FC_IN_SFP_TYPE_OFST 20
+#define MC_CMD_FC_IN_SFP_TYPE_LEN 4
+/* Capabilities corresponding to 1 bits. */
+#define MC_CMD_FC_IN_SFP_CAPS_OFST 24
+#define MC_CMD_FC_IN_SFP_CAPS_LEN 4
+
+/* MC_CMD_FC_IN_DDR_TEST msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4
+#define MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4
+#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8
+/* enum: DRAM Test Start */
+#define MC_CMD_FC_OP_DDR_TEST_START 0x1
+/* enum: DRAM Test Poll */
+#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2
+
+/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 */
+#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8
+#define MC_CMD_FC_IN_DDR_TEST_START_MASK_LEN 4
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1
+
+/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12
+#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0
+#define MC_CMD_FC_IN_DDR_TEST_CMD_LEN 4
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 */
+/* Clear previous test result and prepare for restarting DDR test */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8
+#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_LEN 4
+
+/* MC_CMD_FC_IN_GET_ASSERT msgrequest */
+#define MC_CMD_FC_IN_GET_ASSERT_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+
+/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */
+#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* FPGA build info operation code */
+#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4
+#define MC_CMD_FC_IN_FPGA_BUILD_OP_LEN 4
+/* enum: Get the build registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1
+/* enum: Get the services registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2
+/* enum: Get the BSP version */
+#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3
+/* enum: Get build register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4
+/* enum: GEt the services register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5
+
+/* MC_CMD_FC_IN_READ_MAP msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4
+#define MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4
+#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0
+#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8
+/* enum: Get the number of map regions */
+#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1
+/* enum: Get the specified map */
+#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2
+
+/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 */
+#define MC_CMD_FC_IN_MAP_INDEX_OFST 8
+#define MC_CMD_FC_IN_MAP_INDEX_LEN 4
+
+/* MC_CMD_FC_IN_CAPABILITIES msgrequest */
+#define MC_CMD_FC_IN_CAPABILITIES_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+
+/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_LEN 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1
+
+/* MC_CMD_FC_IN_IO_REL msgrequest */
+#define MC_CMD_FC_IN_IO_REL_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4
+#define MC_CMD_FC_IN_IO_REL_HEADER_LEN 4
+#define MC_CMD_FC_IN_IO_REL_OP_LBN 0
+#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8
+/* enum: Get the base address that the FC applies to relative commands */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1
+/* enum: Read data */
+#define MC_CMD_FC_IN_IO_REL_READ32 0x2
+/* enum: Write data */
+#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8
+/* enum: Application address space */
+#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1
+/* enum: Flash address space */
+#define MC_CMD_FC_COMP_TYPE_FLASH 0x2
+
+/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_LEN 4
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_LEN 4
+#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16
+#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_LEN 4
+
+/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59
+
+/* MC_CMD_FC_IN_UHLINK msgrequest */
+#define MC_CMD_FC_IN_UHLINK_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4
+#define MC_CMD_FC_IN_UHLINK_HEADER_LEN 4
+#define MC_CMD_FC_IN_UHLINK_OP_LBN 0
+#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8
+/* enum: Get PHY configuration info */
+#define MC_CMD_FC_OP_UHLINK_PHY 0x1
+/* enum: Get MAC configuration info */
+#define MC_CMD_FC_OP_UHLINK_MAC 0x2
+/* enum: Get Rx eye table */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5
+/* enum: Retune Rx settings */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6
+/* enum: Set loopback mode on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7
+/* enum: Get loopback mode config state on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */
+#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+
+/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */
+#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+
+/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_LEN 4
+#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+
+/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_LEN 4
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_LEN 4
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_LEN 4
+#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_LEN 4
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_LEN 4
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_LEN 4
+
+/* MC_CMD_FC_IN_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_SET_LINK_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4
+#define MC_CMD_FC_IN_SET_LINK_MODE_LEN 4
+#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8
+#define MC_CMD_FC_IN_SET_LINK_SPEED_LEN 4
+#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12
+#define MC_CMD_FC_IN_SET_LINK_FLAGS_LEN 4
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1
+
+/* MC_CMD_FC_IN_LICENSE msgrequest */
+#define MC_CMD_FC_IN_LICENSE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_LICENSE_OP_OFST 4
+#define MC_CMD_FC_IN_LICENSE_OP_LEN 4
+#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */
+#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */
+
+/* MC_CMD_FC_IN_STARTUP msgrequest */
+#define MC_CMD_FC_IN_STARTUP_LEN 40
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4
+#define MC_CMD_FC_IN_STARTUP_BASE_LEN 4
+#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8
+#define MC_CMD_FC_IN_STARTUP_LENGTH_LEN 4
+/* Length of identifier */
+#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12
+#define MC_CMD_FC_IN_STARTUP_IDLENGTH_LEN 4
+/* Identifier for AOE FPGA */
+#define MC_CMD_FC_IN_STARTUP_ID_OFST 16
+#define MC_CMD_FC_IN_STARTUP_ID_LEN 1
+#define MC_CMD_FC_IN_STARTUP_ID_NUM 24
+
+/* MC_CMD_FC_IN_DMA msgrequest */
+#define MC_CMD_FC_IN_DMA_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DMA_OP_OFST 4
+#define MC_CMD_FC_IN_DMA_OP_LEN 4
+#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */
+#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DMA_STOP msgrequest */
+#define MC_CMD_FC_IN_DMA_STOP_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+/* MC_CMD_FC_IN_DMA_OP_LEN 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8
+#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_LEN 4
+
+/* MC_CMD_FC_IN_DMA_READ msgrequest */
+#define MC_CMD_FC_IN_DMA_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+/* MC_CMD_FC_IN_DMA_OP_LEN 4 */
+#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8
+#define MC_CMD_FC_IN_DMA_READ_OFFSET_LEN 4
+#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12
+#define MC_CMD_FC_IN_DMA_READ_LENGTH_LEN 4
+
+/* MC_CMD_FC_IN_TIMED_READ msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4
+#define MC_CMD_FC_IN_TIMED_READ_OP_LEN 4
+#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */
+
+/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_LEN 4
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_LEN 4
+/* Length of host transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_LEN 4
+/* Offset back from aoe_address to apply operation to */
+#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36
+#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_LEN 4
+/* Data to apply at offset */
+#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40
+#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_LEN 4
+#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44
+#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_LEN 4
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */
+/* Period at which reads are performed (100ms units) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48
+#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_LEN 4
+
+/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8
+#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_LEN 4
+
+/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_LEN 4
+
+/* MC_CMD_FC_IN_LOG msgrequest */
+#define MC_CMD_FC_IN_LOG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_LOG_OP_OFST 4
+#define MC_CMD_FC_IN_LOG_OP_LEN 4
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */
+#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */
+
+/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* MC_CMD_FC_IN_LOG_OP_LEN 4 */
+/* Partition offset into flash */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_LEN 4
+/* Partition length */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_LEN 4
+/* Partition erase size */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_LEN 4
+
+/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* MC_CMD_FC_IN_LOG_OP_LEN 4 */
+/* Enable/disable printing to JTAG UART */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8
+#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_LEN 4
+
+/* MC_CMD_FC_IN_CLOCK msgrequest: Perform a clock operation */
+#define MC_CMD_FC_IN_CLOCK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_CLOCK_OP_OFST 4
+#define MC_CMD_FC_IN_CLOCK_OP_LEN 4
+#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */
+#define MC_CMD_FC_IN_CLOCK_ID_OFST 8
+#define MC_CMD_FC_IN_CLOCK_ID_LEN 4
+#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */
+
+/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest: Retrieve the clock value of the
+ * specified clock
+ */
+#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* MC_CMD_FC_IN_CLOCK_OP_LEN 4 */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+/* MC_CMD_FC_IN_CLOCK_ID_LEN 4 */
+
+/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest: Set the clock value of the specified
+ * clock
+ */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* MC_CMD_FC_IN_CLOCK_OP_LEN 4 */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+/* MC_CMD_FC_IN_CLOCK_ID_LEN 4 */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_LEN 4
+
+/* MC_CMD_FC_IN_DDR msgrequest */
+#define MC_CMD_FC_IN_DDR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DDR_OP_OFST 4
+#define MC_CMD_FC_IN_DDR_OP_LEN 4
+#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_OFST 8
+#define MC_CMD_FC_IN_DDR_BANK_LEN 4
+#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */
+#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* MC_CMD_FC_IN_DDR_OP_LEN 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */
+/* Flags */
+#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12
+#define MC_CMD_FC_IN_DDR_FLAGS_LEN 4
+#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */
+/* 128-byte page of serial presence detect data read from module's EEPROM */
+#define MC_CMD_FC_IN_DDR_SPD_OFST 16
+#define MC_CMD_FC_IN_DDR_SPD_LEN 1
+#define MC_CMD_FC_IN_DDR_SPD_NUM 128
+/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */
+#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144
+#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_LEN 4
+
+/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* MC_CMD_FC_IN_DDR_OP_LEN 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */
+/* Size of DDR */
+#define MC_CMD_FC_IN_DDR_SIZE_OFST 12
+#define MC_CMD_FC_IN_DDR_SIZE_LEN 4
+
+/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */
+#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* MC_CMD_FC_IN_DDR_OP_LEN 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */
+
+/* MC_CMD_FC_IN_TIMESTAMP msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* FC timestamp operation code */
+#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4
+#define MC_CMD_FC_IN_TIMESTAMP_OP_LEN 4
+/* enum: Read transmit timestamp(s) */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0
+/* enum: Read snapshot timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1
+/* enum: Clear all transmit timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_LEN 4
+/* Control filtering of the returned timestamp and sequence number specified
+ * here
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_LEN 4
+/* enum: Return most recent timestamp. No filtering */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0
+/* enum: Match timestamp against the PTP clock ID, port number and sequence
+ * number specified
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1
+/* Clock identity of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16
+/* Port number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_LEN 4
+/* Sequence number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_LEN 4
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_LEN 4
+
+/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_LEN 4
+
+/* MC_CMD_FC_IN_SPI msgrequest */
+#define MC_CMD_FC_IN_SPI_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* Basic commands for SPI Flash. */
+#define MC_CMD_FC_IN_SPI_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_OP_LEN 4
+/* enum: SPI Flash read */
+#define MC_CMD_FC_IN_SPI_READ 0x0
+/* enum: SPI Flash write */
+#define MC_CMD_FC_IN_SPI_WRITE 0x1
+/* enum: SPI Flash erase */
+#define MC_CMD_FC_IN_SPI_ERASE 0x2
+
+/* MC_CMD_FC_IN_SPI_READ msgrequest */
+#define MC_CMD_FC_IN_SPI_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_READ_OP_LEN 4
+#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_READ_ADDR_LEN 4
+#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12
+#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_LEN 4
+
+/* MC_CMD_FC_IN_SPI_WRITE msgrequest */
+#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16
+#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252
+#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_WRITE_OP_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_WRITE_ADDR_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_SPI_ERASE msgrequest */
+#define MC_CMD_FC_IN_SPI_ERASE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_ERASE_OP_LEN 4
+#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_ERASE_ADDR_LEN 4
+#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12
+#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_LEN 4
+
+/* MC_CMD_FC_IN_DIAG msgrequest */
+#define MC_CMD_FC_IN_DIAG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* Operation code indicating component type */
+#define MC_CMD_FC_IN_DIAG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_OP_LEN 4
+/* enum: Power noise generator. */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0
+/* enum: DDR soak test component. */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1
+/* enum: Diagnostics datapath control component. */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_LEN 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_LEN 4
+/* enum: Read the configuration (the 32-bit values in each of the clock enable
+ * count and toggle count registers)
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0
+/* enum: Write a new configuration to the clock enable count and toggle count
+ * registers
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_LEN 4
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_LEN 4
+/* The 32-bit value to be written to the toggle count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_LEN 4
+/* The 32-bit value to be written to the clock enable count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_LEN 4
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_LEN 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_LEN 4
+/* enum: Starts DDR soak test on selected banks */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0
+/* enum: Read status of DDR soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1
+/* enum: Stop test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2
+/* enum: Set or clear bit that triggers fake errors. These cause subsequent
+ * tests to fail until the bit is cleared.
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_LEN 4
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_LEN 4
+/* Pattern to use in the soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */
+/* Either multiple automatic tests until a STOP command is issued, or one
+ * single test
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_LEN 4
+/* DDR bank to read status from */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_LEN 4
+#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */
+#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */
+#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */
+#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */
+#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_LEN 4
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_LEN 4
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_LEN 4
+/* Mask of DDR banks to set/clear error flag on */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_LEN 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_LEN 4
+/* enum: Set a known datapath configuration */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0
+/* enum: Apply raw config to datapath control registers */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_LEN 4
+/* Datapath configuration identifier */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_LEN 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_LEN 4
+/* Value to write into control register 1 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_LEN 4
+/* Value to write into control register 2 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_LEN 4
+/* Value to write into control register 3 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_LEN 4
+
+/* MC_CMD_FC_OUT msgresponse */
+#define MC_CMD_FC_OUT_LEN 0
+
+/* MC_CMD_FC_OUT_NULL msgresponse */
+#define MC_CMD_FC_OUT_NULL_LEN 0
+
+/* MC_CMD_FC_OUT_READ32 msgresponse */
+#define MC_CMD_FC_OUT_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_TRC_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_READ_LEN 16
+#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4
+
+/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_GET_VERSION msgresponse */
+#define MC_CMD_FC_OUT_GET_VERSION_LEN 12
+#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_LEN 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2
+
+/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */
+#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_LEN 4
+
+/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS
+#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */
+#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_RX_NSTATS 0x19
+
+/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS
+#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */
+#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_TX_NSTATS 0x16
+
+/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3)
+/* MAC Statistics */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_MAC msgresponse */
+#define MC_CMD_FC_OUT_MAC_LEN 0
+
+/* MC_CMD_FC_OUT_SFP msgresponse */
+#define MC_CMD_FC_OUT_SFP_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8
+/* enum: Test not yet initiated */
+#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0
+/* enum: Test is in progress */
+#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1
+/* enum: Timed completed */
+#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2
+/* enum: Test did not complete in specified time */
+#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1
+/* Test result from FPGA */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_LEN 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */
+
+/* MC_CMD_FC_OUT_DDR_TEST msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_LEN 0
+
+/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */
+#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144
+/* Assertion status flag. */
+#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8
+/* enum: No crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0
+/* enum: New crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1
+/* enum: Crash data has been sent */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8
+/* enum: No crash has been recorded. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0
+/* enum: Crash due to exception. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1
+/* enum: Crash due to assertion. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2
+/* Failing PC value */
+#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31
+/* Exception Type */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_LEN 4
+/* Instruction at which exception occurred */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_LEN 4
+/* BAD Address that triggered address-based exception */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_LEN 4
+
+/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8
+#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */
+#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */
+#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4
+#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */
+#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4
+/* Qsys system ID */
+#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0
+#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_LEN 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4
+
+/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4
+/* Number of maps */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_LEN 4
+
+/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164
+/* Index of the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_LEN 4
+/* Options for the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_LEN 4
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */
+/* Address of start of map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12
+/* Length of address map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20
+/* Component information field */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_LEN 4
+/* License expiry data for map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32
+/* Name of the component */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128
+
+/* MC_CMD_FC_OUT_READ_MAP msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_LEN 0
+
+/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */
+#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8
+/* Number of internal ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0
+#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_LEN 4
+/* Number of external ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4
+#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_LEN 4
+
+/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_LEN 4
+
+/* MC_CMD_FC_OUT_IO_REL msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_LEN 0
+
+/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_LEN 4
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_LEN 4
+
+/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16
+/* Transceiver Transmit settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16
+/* Transceiver Receive settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16
+/* Rx eye opening */
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16
+/* PCS status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_LEN 4
+/* Link status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1
+/* Current SFp parameters applied */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20
+/* Link speed is 100, 1000, 10000 */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_LEN 4
+/* Length of copper cable - zero when not relevant */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_LEN 4
+/* True if a dual speed SFP+ module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_LEN 4
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_LEN 4
+/* The type of the SFP+ Module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_LEN 4
+/* PHY config flags */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1
+
+/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20
+/* MAC configuration applied */
+#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_LEN 4
+/* MTU size */
+#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_LEN 4
+/* IF Mode status */
+#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_LEN 4
+/* MAC address configured */
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16
+
+/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3)
+/* Rx Eye measurements */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3)
+/* Has the eye plot dump completed and data returned is valid? */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_LEN 4
+/* Rx Eye binary plot */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_LEN 4
+
+/* MC_CMD_FC_OUT_UHLINK msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LEN 0
+
+/* MC_CMD_FC_OUT_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_LICENSE msgresponse */
+#define MC_CMD_FC_OUT_LICENSE_LEN 12
+/* Count of valid keys */
+#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0
+#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_LEN 4
+/* Count of invalid keys */
+#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4
+#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_LEN 4
+/* Count of blacklisted keys */
+#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8
+#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_LEN 4
+
+/* MC_CMD_FC_OUT_STARTUP msgresponse */
+#define MC_CMD_FC_OUT_STARTUP_LEN 4
+/* Capabilities of the FPGA/FC */
+#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0
+#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_LEN 4
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1
+
+/* MC_CMD_FC_OUT_DMA_READ msgresponse */
+#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1
+#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252
+#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num))
+/* The data read */
+#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252
+
+/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4
+/* Timer handle */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0
+#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_LEN 4
+
+/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_LEN 4
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_LEN 4
+/* Length of host transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_LEN 4
+/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28
+#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32
+#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_LEN 4
+/* When active, start read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40
+/* When active, end read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48
+
+/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */
+#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0
+
+/* MC_CMD_FC_OUT_LOG msgresponse */
+#define MC_CMD_FC_OUT_LOG_LEN 0
+
+/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_LEN 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_LEN 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_LEN 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_LEN 4
+
+/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_LEN 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_LEN 4
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num))
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_LEN 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_LEN 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31
+
+/* MC_CMD_FC_OUT_SPI_READ msgresponse */
+#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4
+#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252
+#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */
+#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */
+#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8
+/* The 32-bit value read from the toggle count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_LEN 4
+/* The 32-bit value read from the clock enable count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_LEN 4
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8
+/* DDR soak test status word; bits [4:0] are relevant. */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1
+/* DDR soak test error count */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_LEN 4
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0
+
+
+/***********************************/
+/* MC_CMD_AOE
+ * AOE operations on MC
+ */
+#define MC_CMD_AOE 0xa
+
+/* MC_CMD_AOE_IN msgrequest */
+#define MC_CMD_AOE_IN_LEN 4
+#define MC_CMD_AOE_IN_OP_HDR_OFST 0
+#define MC_CMD_AOE_IN_OP_HDR_LEN 4
+#define MC_CMD_AOE_IN_OP_LBN 0
+#define MC_CMD_AOE_IN_OP_WIDTH 8
+/* enum: FPGA and CPLD information */
+#define MC_CMD_AOE_OP_INFO 0x1
+/* enum: Currents and voltages read from MCP3424s; DEBUG */
+#define MC_CMD_AOE_OP_CURRENTS 0x2
+/* enum: Temperatures at locations around the PCB; DEBUG */
+#define MC_CMD_AOE_OP_TEMPERATURES 0x3
+/* enum: Set CPLD to idle */
+#define MC_CMD_AOE_OP_CPLD_IDLE 0x4
+/* enum: Read from CPLD register */
+#define MC_CMD_AOE_OP_CPLD_READ 0x5
+/* enum: Write to CPLD register */
+#define MC_CMD_AOE_OP_CPLD_WRITE 0x6
+/* enum: Execute CPLD instruction */
+#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7
+/* enum: Reprogram the CPLD on the AOE device */
+#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8
+/* enum: AOE power control */
+#define MC_CMD_AOE_OP_POWER 0x9
+/* enum: AOE image loading */
+#define MC_CMD_AOE_OP_LOAD 0xa
+/* enum: Fan monitoring */
+#define MC_CMD_AOE_OP_FAN_CONTROL 0xb
+/* enum: Fan failures since last reset */
+#define MC_CMD_AOE_OP_FAN_FAILURES 0xc
+/* enum: Get generic AOE MAC statistics */
+#define MC_CMD_AOE_OP_MAC_STATS 0xd
+/* enum: Retrieve PHY specific information */
+#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe
+/* enum: Write a number of JTAG primitive commands, return will give data */
+#define MC_CMD_AOE_OP_JTAG_WRITE 0xf
+/* enum: Control access to the FPGA via the Siena JTAG Chain */
+#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10
+/* enum: Set the MTU offset between Siena and AOE MACs */
+#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11
+/* enum: How link state is handled */
+#define MC_CMD_AOE_OP_LINK_STATE 0x12
+/* enum: How Siena MAC statistics are reported (deprecated - use
+ * MC_CMD_AOE_OP_ASIC_STATS)
+ */
+#define MC_CMD_AOE_OP_SIENA_STATS 0x13
+/* enum: How native ASIC MAC statistics are reported - replaces the deprecated
+ * command MC_CMD_AOE_OP_SIENA_STATS
+ */
+#define MC_CMD_AOE_OP_ASIC_STATS 0x13
+/* enum: DDR memory information */
+#define MC_CMD_AOE_OP_DDR 0x14
+/* enum: FC control */
+#define MC_CMD_AOE_OP_FC 0x15
+/* enum: DDR ECC status reads */
+#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16
+/* enum: Commands for MC-SPI Master emulation */
+#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17
+/* enum: Commands for FC boot control */
+#define MC_CMD_AOE_OP_FC_BOOT 0x18
+/* enum: Get number of internal ports */
+#define MC_CMD_AOE_OP_GET_ASIC_PORTS 0x19
+/* enum: Get FC assert information and register dump */
+#define MC_CMD_AOE_OP_GET_FC_ASSERT_INFO 0x1a
+
+/* MC_CMD_AOE_OUT msgresponse */
+#define MC_CMD_AOE_OUT_LEN 0
+
+/* MC_CMD_AOE_IN_INFO msgrequest */
+#define MC_CMD_AOE_IN_INFO_LEN 4
+#define MC_CMD_AOE_IN_CMD_OFST 0
+#define MC_CMD_AOE_IN_CMD_LEN 4
+
+/* MC_CMD_AOE_IN_CURRENTS msgrequest */
+#define MC_CMD_AOE_IN_CURRENTS_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */
+#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_CPLD_READ msgrequest */
+#define MC_CMD_AOE_IN_CPLD_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_LEN 4
+#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8
+#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_LEN 4
+
+/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_LEN 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8
+#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_LEN 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12
+#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_LEN 4
+
+/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_LEN 4
+
+/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_LEN 4
+/* enum: Reprogram CPLD, poll for completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1
+/* enum: Reprogram CPLD, send event on completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3
+/* enum: Get status of reprogramming operation */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4
+
+/* MC_CMD_AOE_IN_POWER msgrequest */
+#define MC_CMD_AOE_IN_POWER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* Turn on or off AOE power */
+#define MC_CMD_AOE_IN_POWER_OP_OFST 4
+#define MC_CMD_AOE_IN_POWER_OP_LEN 4
+/* enum: Turn off FPGA power */
+#define MC_CMD_AOE_IN_POWER_OFF 0x0
+/* enum: Turn on FPGA power */
+#define MC_CMD_AOE_IN_POWER_ON 0x1
+/* enum: Clear peak power measurement */
+#define MC_CMD_AOE_IN_POWER_CLEAR 0x2
+/* enum: Show current power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3
+/* enum: Show peak power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4
+/* enum: Show current DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5
+/* enum: Show peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6
+/* enum: Clear peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7
+
+/* MC_CMD_AOE_IN_LOAD msgrequest */
+#define MC_CMD_AOE_IN_LOAD_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence
+ */
+#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4
+#define MC_CMD_AOE_IN_LOAD_IMAGE_LEN 4
+
+/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */
+#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* If non zero report measured fan RPM rather than nominal */
+#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4
+#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_LEN 4
+
+/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */
+#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_MAC_STATS msgrequest */
+#define MC_CMD_AOE_IN_MAC_STATS_LEN 24
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* AOE port */
+#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4
+#define MC_CMD_AOE_IN_MAC_STATS_PORT_LEN 4
+/* Host memory address for statistics */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12
+#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16
+#define MC_CMD_AOE_IN_MAC_STATS_CMD_LEN 4
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16
+/* Length of DMA data (optional) */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_LEN 4
+
+/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* AOE port */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_LEN 4
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_LEN 4
+
+/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num))
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_LEN 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* Enable or disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4
+#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_LEN 4
+/* enum: Enable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1
+/* enum: Disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2
+
+/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_LEN 4
+/* enum: Apply to all external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000
+/* enum: Apply to all internal ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000
+/* The MTU offset to be applied to the external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_LEN 4
+
+/* MC_CMD_AOE_IN_LINK_STATE msgrequest */
+#define MC_CMD_AOE_IN_LINK_STATE_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4
+#define MC_CMD_AOE_IN_LINK_STATE_MODE_LEN 4
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8
+/* enum: AOE and associated external port */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0
+/* enum: AOE and OR of all external ports */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1
+/* enum: Individual ports */
+#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2
+/* enum: Configure link state mode on given AOE port */
+#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8
+/* enum: No-op */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0
+/* enum: logical OR of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1
+/* enum: logical AND of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16
+
+/* MC_CMD_AOE_IN_GET_ASIC_PORTS msgrequest */
+#define MC_CMD_AOE_IN_GET_ASIC_PORTS_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_GET_FC_ASSERT_INFO msgrequest */
+#define MC_CMD_AOE_IN_GET_FC_ASSERT_INFO_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */
+#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4
+#define MC_CMD_AOE_IN_SIENA_STATS_MODE_LEN 4
+/* enum: Statistics from Siena (default) */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */
+#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4
+#define MC_CMD_AOE_IN_ASIC_STATS_MODE_LEN 4
+/* enum: Statistics from the ASIC (default) */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_DDR msgrequest */
+#define MC_CMD_AOE_IN_DDR_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_DDR_BANK_OFST 4
+#define MC_CMD_AOE_IN_DDR_BANK_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+/* Page index of SPD data */
+#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8
+#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_LEN 4
+
+/* MC_CMD_AOE_IN_FC msgrequest */
+#define MC_CMD_AOE_IN_FC_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* Basic commands for MC SPI Master emulation. */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_LEN 4
+/* enum: MC SPI read */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0
+/* enum: MC SPI write */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_LEN 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_LEN 4
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_LEN 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_LEN 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_LEN 4
+
+/* MC_CMD_AOE_IN_FC_BOOT msgrequest */
+#define MC_CMD_AOE_IN_FC_BOOT_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* FC boot control flags */
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_LEN 4
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1
+
+/* MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO msgresponse */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_LEN 144
+/* Assertion status flag. */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_STATE_LBN 8
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_STATE_WIDTH 8
+/* enum: No crash data available */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0 */
+/* enum: New crash data available */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1 */
+/* enum: Crash data has been sent */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2 */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_TYPE_LBN 0
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_TYPE_WIDTH 8
+/* enum: No crash has been recorded. */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0 */
+/* enum: Crash due to exception. */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1 */
+/* enum: Crash due to assertion. */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2 */
+/* Failing PC value */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_OFST 8
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_LEN 4
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_NUM 31
+/* Exception Type */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_TYPE_OFFS_OFST 132
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_TYPE_OFFS_LEN 4
+/* Instruction at which exception occurred */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_PC_ADDR_OFFS_OFST 136
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_PC_ADDR_OFFS_LEN 4
+/* BAD Address that triggered address-based exception */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_BAD_ADDR_OFFS_OFST 140
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_BAD_ADDR_OFFS_LEN 4
+
+/* MC_CMD_AOE_OUT_INFO msgresponse */
+#define MC_CMD_AOE_OUT_INFO_LEN 44
+/* JTAG IDCODE of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0
+#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_LEN 4
+/* Version of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4
+#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_LEN 4
+/* JTAG IDCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8
+#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_LEN 4
+/* JTAG USERCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12
+#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_LEN 4
+/* FPGA type - read from CPLD straps */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_LEN 4
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */
+/* FPGA state (debug) */
+#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20
+#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_LEN 4
+/* FPGA image - partition from which loaded */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24
+#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_LEN 4
+/* FC state */
+#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28
+#define MC_CMD_AOE_OUT_INFO_FC_STATE_LEN 4
+/* enum: Set if watchdog working */
+#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1
+/* enum: Set if MC-FC communications working */
+#define MC_CMD_AOE_OUT_INFO_COMMS 0x2
+/* Random pieces of information */
+#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32
+#define MC_CMD_AOE_OUT_INFO_FLAGS_LEN 4
+/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */
+#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1
+/* enum: CPLD apparently good */
+#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2
+/* enum: FPGA working normally */
+#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4
+/* enum: FPGA is powered */
+#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8
+/* enum: Board has incompatible SODIMMs fitted */
+#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10
+/* enum: Board has ByteBlaster connected */
+#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20
+/* enum: FPGA Boot flash has an invalid header. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40
+/* enum: FPGA Application flash is accessible. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80
+/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */
+#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36
+#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_LEN 4
+#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */
+/* Result of FC booting - not valid while a ByteBlaster is connected. */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_LEN 4
+/* enum: No error */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0
+/* enum: Bad address set in CPLD */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1
+/* enum: Bad header */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2
+/* enum: Bad text section details */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3
+/* enum: Bad checksum */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4
+/* enum: Bad BSP */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5
+/* enum: Flash mode is invalid */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6
+/* enum: FC application loaded and execution attempted */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80
+/* enum: FC application Started */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81
+/* enum: No bootrom in FPGA */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff
+
+/* MC_CMD_AOE_OUT_CURRENTS msgresponse */
+#define MC_CMD_AOE_OUT_CURRENTS_LEN 68
+/* Set of currents and voltages (mA or mV as appropriate) */
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17
+#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */
+
+/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */
+#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40
+/* Set of temperatures */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10
+/* enum: The first set of enum values are for Modena code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */
+/* enum: The second set of enum values are for Sorrento code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */
+
+/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4
+/* The value read from the CPLD */
+#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0
+#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_LEN 4
+
+/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num))
+/* Failure counts for each fan */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63
+
+/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4
+/* Results of status command (only) */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_LEN 4
+
+/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */
+#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0
+
+/* MC_CMD_AOE_OUT_POWER_ON msgresponse */
+#define MC_CMD_AOE_OUT_POWER_ON_LEN 0
+
+/* MC_CMD_AOE_OUT_LOAD msgresponse */
+#define MC_CMD_AOE_OUT_LOAD_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */
+#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA
+ * for details
+ */
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+
+/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_LEN 4
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num))
+/* Used to align the in and out data blocks so the MC can re-use the cmd */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_LEN 4
+/* out bytes */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_LEN 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */
+#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0
+
+/* MC_CMD_AOE_OUT_DDR msgresponse */
+#define MC_CMD_AOE_OUT_DDR_LENMIN 17
+#define MC_CMD_AOE_OUT_DDR_LENMAX 252
+#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num))
+/* Information on the module. */
+#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_FLAGS_LEN 4
+#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0
+#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1
+/* Memory size, in MB. */
+#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4
+#define MC_CMD_AOE_OUT_DDR_CAPACITY_LEN 4
+/* The memory type, as reported from SPD information */
+#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8
+#define MC_CMD_AOE_OUT_DDR_TYPE_LEN 4
+/* Nominal voltage of the module (as applied) */
+#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12
+#define MC_CMD_AOE_OUT_DDR_VOLTAGE_LEN 4
+/* SPD data read from the module */
+#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16
+#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236
+
+/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */
+#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0
+
+/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */
+#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0
+
+/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */
+#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */
+#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_FC msgresponse */
+#define MC_CMD_AOE_OUT_FC_LEN 0
+
+/* MC_CMD_AOE_OUT_GET_ASIC_PORTS msgresponse */
+#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_LEN 4
+/* get the number of internal ports */
+#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_COUNT_PORTS_OFST 0
+#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_COUNT_PORTS_LEN 4
+
+/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8
+/* Flags describing status info on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_LEN 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1
+/* DDR ECC status on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_LEN 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_LEN 4
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0
+
+/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */
+#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0
+
+#endif /* _SIENA_MC_DRIVER_PCOL_AOE_H */
+/*! \cidoxg_end */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_pci.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_pci.h
new file mode 100644
index 00000000..29f51385
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_pci.h
@@ -0,0 +1,2332 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_EFX_REGS_PCI_H
+#define _SYS_EFX_REGS_PCI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * PC_VEND_ID_REG(16bit):
+ * Vendor ID register
+ */
+
+#define PCR_AZ_VEND_ID_REG 0x00000000
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_VEND_ID_LBN 0
+#define PCRF_AZ_VEND_ID_WIDTH 16
+
+
+/*
+ * PC_DEV_ID_REG(16bit):
+ * Device ID register
+ */
+
+#define PCR_AZ_DEV_ID_REG 0x00000002
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_DEV_ID_LBN 0
+#define PCRF_AZ_DEV_ID_WIDTH 16
+
+
+/*
+ * PC_CMD_REG(16bit):
+ * Command register
+ */
+
+#define PCR_AZ_CMD_REG 0x00000004
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INTX_DIS_LBN 10
+#define PCRF_AZ_INTX_DIS_WIDTH 1
+#define PCRF_AZ_FB2B_EN_LBN 9
+#define PCRF_AZ_FB2B_EN_WIDTH 1
+#define PCRF_AZ_SERR_EN_LBN 8
+#define PCRF_AZ_SERR_EN_WIDTH 1
+#define PCRF_AZ_IDSEL_CTL_LBN 7
+#define PCRF_AZ_IDSEL_CTL_WIDTH 1
+#define PCRF_AZ_PERR_EN_LBN 6
+#define PCRF_AZ_PERR_EN_WIDTH 1
+#define PCRF_AZ_VGA_PAL_SNP_LBN 5
+#define PCRF_AZ_VGA_PAL_SNP_WIDTH 1
+#define PCRF_AZ_MWI_EN_LBN 4
+#define PCRF_AZ_MWI_EN_WIDTH 1
+#define PCRF_AZ_SPEC_CYC_LBN 3
+#define PCRF_AZ_SPEC_CYC_WIDTH 1
+#define PCRF_AZ_MST_EN_LBN 2
+#define PCRF_AZ_MST_EN_WIDTH 1
+#define PCRF_AZ_MEM_EN_LBN 1
+#define PCRF_AZ_MEM_EN_WIDTH 1
+#define PCRF_AZ_IO_EN_LBN 0
+#define PCRF_AZ_IO_EN_WIDTH 1
+
+
+/*
+ * PC_STAT_REG(16bit):
+ * Status register
+ */
+
+#define PCR_AZ_STAT_REG 0x00000006
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_DET_PERR_LBN 15
+#define PCRF_AZ_DET_PERR_WIDTH 1
+#define PCRF_AZ_SIG_SERR_LBN 14
+#define PCRF_AZ_SIG_SERR_WIDTH 1
+#define PCRF_AZ_GOT_MABRT_LBN 13
+#define PCRF_AZ_GOT_MABRT_WIDTH 1
+#define PCRF_AZ_GOT_TABRT_LBN 12
+#define PCRF_AZ_GOT_TABRT_WIDTH 1
+#define PCRF_AZ_SIG_TABRT_LBN 11
+#define PCRF_AZ_SIG_TABRT_WIDTH 1
+#define PCRF_AZ_DEVSEL_TIM_LBN 9
+#define PCRF_AZ_DEVSEL_TIM_WIDTH 2
+#define PCRF_AZ_MDAT_PERR_LBN 8
+#define PCRF_AZ_MDAT_PERR_WIDTH 1
+#define PCRF_AZ_FB2B_CAP_LBN 7
+#define PCRF_AZ_FB2B_CAP_WIDTH 1
+#define PCRF_AZ_66MHZ_CAP_LBN 5
+#define PCRF_AZ_66MHZ_CAP_WIDTH 1
+#define PCRF_AZ_CAP_LIST_LBN 4
+#define PCRF_AZ_CAP_LIST_WIDTH 1
+#define PCRF_AZ_INTX_STAT_LBN 3
+#define PCRF_AZ_INTX_STAT_WIDTH 1
+
+
+/*
+ * PC_REV_ID_REG(8bit):
+ * Class code & revision ID register
+ */
+
+#define PCR_AZ_REV_ID_REG 0x00000008
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_REV_ID_LBN 0
+#define PCRF_AZ_REV_ID_WIDTH 8
+
+
+/*
+ * PC_CC_REG(24bit):
+ * Class code register
+ */
+
+#define PCR_AZ_CC_REG 0x00000009
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BASE_CC_LBN 16
+#define PCRF_AZ_BASE_CC_WIDTH 8
+#define PCRF_AZ_SUB_CC_LBN 8
+#define PCRF_AZ_SUB_CC_WIDTH 8
+#define PCRF_AZ_PROG_IF_LBN 0
+#define PCRF_AZ_PROG_IF_WIDTH 8
+
+
+/*
+ * PC_CACHE_LSIZE_REG(8bit):
+ * Cache line size
+ */
+
+#define PCR_AZ_CACHE_LSIZE_REG 0x0000000c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_CACHE_LSIZE_LBN 0
+#define PCRF_AZ_CACHE_LSIZE_WIDTH 8
+
+
+/*
+ * PC_MST_LAT_REG(8bit):
+ * Master latency timer register
+ */
+
+#define PCR_AZ_MST_LAT_REG 0x0000000d
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MST_LAT_LBN 0
+#define PCRF_AZ_MST_LAT_WIDTH 8
+
+
+/*
+ * PC_HDR_TYPE_REG(8bit):
+ * Header type register
+ */
+
+#define PCR_AZ_HDR_TYPE_REG 0x0000000e
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MULT_FUNC_LBN 7
+#define PCRF_AZ_MULT_FUNC_WIDTH 1
+#define PCRF_AZ_TYPE_LBN 0
+#define PCRF_AZ_TYPE_WIDTH 7
+
+
+/*
+ * PC_BIST_REG(8bit):
+ * BIST register
+ */
+
+#define PCR_AZ_BIST_REG 0x0000000f
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BIST_LBN 0
+#define PCRF_AZ_BIST_WIDTH 8
+
+
+/*
+ * PC_BAR0_REG(32bit):
+ * Primary function base address register 0
+ */
+
+#define PCR_AZ_BAR0_REG 0x00000010
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR0_LBN 4
+#define PCRF_AZ_BAR0_WIDTH 28
+#define PCRF_AZ_BAR0_PREF_LBN 3
+#define PCRF_AZ_BAR0_PREF_WIDTH 1
+#define PCRF_AZ_BAR0_TYPE_LBN 1
+#define PCRF_AZ_BAR0_TYPE_WIDTH 2
+#define PCRF_AZ_BAR0_IOM_LBN 0
+#define PCRF_AZ_BAR0_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR1_REG(32bit):
+ * Primary function base address register 1, BAR1 is not implemented so read only.
+ */
+
+#define PCR_DZ_BAR1_REG 0x00000014
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_BAR1_LBN 0
+#define PCRF_DZ_BAR1_WIDTH 32
+
+
+/*
+ * PC_BAR2_LO_REG(32bit):
+ * Primary function base address register 2 low bits
+ */
+
+#define PCR_AZ_BAR2_LO_REG 0x00000018
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR2_LO_LBN 4
+#define PCRF_AZ_BAR2_LO_WIDTH 28
+#define PCRF_AZ_BAR2_PREF_LBN 3
+#define PCRF_AZ_BAR2_PREF_WIDTH 1
+#define PCRF_AZ_BAR2_TYPE_LBN 1
+#define PCRF_AZ_BAR2_TYPE_WIDTH 2
+#define PCRF_AZ_BAR2_IOM_LBN 0
+#define PCRF_AZ_BAR2_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR2_HI_REG(32bit):
+ * Primary function base address register 2 high bits
+ */
+
+#define PCR_AZ_BAR2_HI_REG 0x0000001c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR2_HI_LBN 0
+#define PCRF_AZ_BAR2_HI_WIDTH 32
+
+
+/*
+ * PC_BAR4_LO_REG(32bit):
+ * Primary function base address register 2 low bits
+ */
+
+#define PCR_CZ_BAR4_LO_REG 0x00000020
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_BAR4_LO_LBN 4
+#define PCRF_CZ_BAR4_LO_WIDTH 28
+#define PCRF_CZ_BAR4_PREF_LBN 3
+#define PCRF_CZ_BAR4_PREF_WIDTH 1
+#define PCRF_CZ_BAR4_TYPE_LBN 1
+#define PCRF_CZ_BAR4_TYPE_WIDTH 2
+#define PCRF_CZ_BAR4_IOM_LBN 0
+#define PCRF_CZ_BAR4_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR4_HI_REG(32bit):
+ * Primary function base address register 2 high bits
+ */
+
+#define PCR_CZ_BAR4_HI_REG 0x00000024
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_BAR4_HI_LBN 0
+#define PCRF_CZ_BAR4_HI_WIDTH 32
+
+
+/*
+ * PC_SS_VEND_ID_REG(16bit):
+ * Sub-system vendor ID register
+ */
+
+#define PCR_AZ_SS_VEND_ID_REG 0x0000002c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SS_VEND_ID_LBN 0
+#define PCRF_AZ_SS_VEND_ID_WIDTH 16
+
+
+/*
+ * PC_SS_ID_REG(16bit):
+ * Sub-system ID register
+ */
+
+#define PCR_AZ_SS_ID_REG 0x0000002e
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SS_ID_LBN 0
+#define PCRF_AZ_SS_ID_WIDTH 16
+
+
+/*
+ * PC_EXPROM_BAR_REG(32bit):
+ * Expansion ROM base address register
+ */
+
+#define PCR_AZ_EXPROM_BAR_REG 0x00000030
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_EXPROM_BAR_LBN 11
+#define PCRF_AZ_EXPROM_BAR_WIDTH 21
+#define PCRF_AB_EXPROM_MIN_SIZE_LBN 2
+#define PCRF_AB_EXPROM_MIN_SIZE_WIDTH 9
+#define PCRF_CZ_EXPROM_MIN_SIZE_LBN 1
+#define PCRF_CZ_EXPROM_MIN_SIZE_WIDTH 10
+#define PCRF_AB_EXPROM_FEATURE_ENABLE_LBN 1
+#define PCRF_AB_EXPROM_FEATURE_ENABLE_WIDTH 1
+#define PCRF_AZ_EXPROM_EN_LBN 0
+#define PCRF_AZ_EXPROM_EN_WIDTH 1
+
+
+/*
+ * PC_CAP_PTR_REG(8bit):
+ * Capability pointer register
+ */
+
+#define PCR_AZ_CAP_PTR_REG 0x00000034
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_CAP_PTR_LBN 0
+#define PCRF_AZ_CAP_PTR_WIDTH 8
+
+
+/*
+ * PC_INT_LINE_REG(8bit):
+ * Interrupt line register
+ */
+
+#define PCR_AZ_INT_LINE_REG 0x0000003c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INT_LINE_LBN 0
+#define PCRF_AZ_INT_LINE_WIDTH 8
+
+
+/*
+ * PC_INT_PIN_REG(8bit):
+ * Interrupt pin register
+ */
+
+#define PCR_AZ_INT_PIN_REG 0x0000003d
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INT_PIN_LBN 0
+#define PCRF_AZ_INT_PIN_WIDTH 8
+#define PCFE_DZ_INTPIN_INTD 4
+#define PCFE_DZ_INTPIN_INTC 3
+#define PCFE_DZ_INTPIN_INTB 2
+#define PCFE_DZ_INTPIN_INTA 1
+
+
+/*
+ * PC_PM_CAP_ID_REG(8bit):
+ * Power management capability ID
+ */
+
+#define PCR_AZ_PM_CAP_ID_REG 0x00000040
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_CAP_ID_LBN 0
+#define PCRF_AZ_PM_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_PM_NXT_PTR_REG(8bit):
+ * Power management next item pointer
+ */
+
+#define PCR_AZ_PM_NXT_PTR_REG 0x00000041
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_NXT_PTR_LBN 0
+#define PCRF_AZ_PM_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_PM_CAP_REG(16bit):
+ * Power management capabilities register
+ */
+
+#define PCR_AZ_PM_CAP_REG 0x00000042
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_PME_SUPT_LBN 11
+#define PCRF_AZ_PM_PME_SUPT_WIDTH 5
+#define PCRF_AZ_PM_D2_SUPT_LBN 10
+#define PCRF_AZ_PM_D2_SUPT_WIDTH 1
+#define PCRF_AZ_PM_D1_SUPT_LBN 9
+#define PCRF_AZ_PM_D1_SUPT_WIDTH 1
+#define PCRF_AZ_PM_AUX_CURR_LBN 6
+#define PCRF_AZ_PM_AUX_CURR_WIDTH 3
+#define PCRF_AZ_PM_DSI_LBN 5
+#define PCRF_AZ_PM_DSI_WIDTH 1
+#define PCRF_AZ_PM_PME_CLK_LBN 3
+#define PCRF_AZ_PM_PME_CLK_WIDTH 1
+#define PCRF_AZ_PM_PME_VER_LBN 0
+#define PCRF_AZ_PM_PME_VER_WIDTH 3
+
+
+/*
+ * PC_PM_CS_REG(16bit):
+ * Power management control & status register
+ */
+
+#define PCR_AZ_PM_CS_REG 0x00000044
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_PME_STAT_LBN 15
+#define PCRF_AZ_PM_PME_STAT_WIDTH 1
+#define PCRF_AZ_PM_DAT_SCALE_LBN 13
+#define PCRF_AZ_PM_DAT_SCALE_WIDTH 2
+#define PCRF_AZ_PM_DAT_SEL_LBN 9
+#define PCRF_AZ_PM_DAT_SEL_WIDTH 4
+#define PCRF_AZ_PM_PME_EN_LBN 8
+#define PCRF_AZ_PM_PME_EN_WIDTH 1
+#define PCRF_CZ_NO_SOFT_RESET_LBN 3
+#define PCRF_CZ_NO_SOFT_RESET_WIDTH 1
+#define PCRF_AZ_PM_PWR_ST_LBN 0
+#define PCRF_AZ_PM_PWR_ST_WIDTH 2
+
+
+/*
+ * PC_MSI_CAP_ID_REG(8bit):
+ * MSI capability ID
+ */
+
+#define PCR_AZ_MSI_CAP_ID_REG 0x00000050
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_CAP_ID_LBN 0
+#define PCRF_AZ_MSI_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_MSI_NXT_PTR_REG(8bit):
+ * MSI next item pointer
+ */
+
+#define PCR_AZ_MSI_NXT_PTR_REG 0x00000051
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_NXT_PTR_LBN 0
+#define PCRF_AZ_MSI_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_MSI_CTL_REG(16bit):
+ * MSI control register
+ */
+
+#define PCR_AZ_MSI_CTL_REG 0x00000052
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_64_EN_LBN 7
+#define PCRF_AZ_MSI_64_EN_WIDTH 1
+#define PCRF_AZ_MSI_MULT_MSG_EN_LBN 4
+#define PCRF_AZ_MSI_MULT_MSG_EN_WIDTH 3
+#define PCRF_AZ_MSI_MULT_MSG_CAP_LBN 1
+#define PCRF_AZ_MSI_MULT_MSG_CAP_WIDTH 3
+#define PCRF_AZ_MSI_EN_LBN 0
+#define PCRF_AZ_MSI_EN_WIDTH 1
+
+
+/*
+ * PC_MSI_ADR_LO_REG(32bit):
+ * MSI low 32 bits address register
+ */
+
+#define PCR_AZ_MSI_ADR_LO_REG 0x00000054
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_ADR_LO_LBN 2
+#define PCRF_AZ_MSI_ADR_LO_WIDTH 30
+
+
+/*
+ * PC_MSI_ADR_HI_REG(32bit):
+ * MSI high 32 bits address register
+ */
+
+#define PCR_AZ_MSI_ADR_HI_REG 0x00000058
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_ADR_HI_LBN 0
+#define PCRF_AZ_MSI_ADR_HI_WIDTH 32
+
+
+/*
+ * PC_MSI_DAT_REG(16bit):
+ * MSI data register
+ */
+
+#define PCR_AZ_MSI_DAT_REG 0x0000005c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_DAT_LBN 0
+#define PCRF_AZ_MSI_DAT_WIDTH 16
+
+
+/*
+ * PC_PCIE_CAP_LIST_REG(16bit):
+ * PCIe capability list register
+ */
+
+#define PCR_AB_PCIE_CAP_LIST_REG 0x00000060
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_PCIE_CAP_LIST_REG 0x00000070
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PCIE_NXT_PTR_LBN 8
+#define PCRF_AZ_PCIE_NXT_PTR_WIDTH 8
+#define PCRF_AZ_PCIE_CAP_ID_LBN 0
+#define PCRF_AZ_PCIE_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_PCIE_CAP_REG(16bit):
+ * PCIe capability register
+ */
+
+#define PCR_AB_PCIE_CAP_REG 0x00000062
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_PCIE_CAP_REG 0x00000072
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PCIE_INT_MSG_NUM_LBN 9
+#define PCRF_AZ_PCIE_INT_MSG_NUM_WIDTH 5
+#define PCRF_AZ_PCIE_SLOT_IMP_LBN 8
+#define PCRF_AZ_PCIE_SLOT_IMP_WIDTH 1
+#define PCRF_AZ_PCIE_DEV_PORT_TYPE_LBN 4
+#define PCRF_AZ_PCIE_DEV_PORT_TYPE_WIDTH 4
+#define PCRF_AZ_PCIE_CAP_VER_LBN 0
+#define PCRF_AZ_PCIE_CAP_VER_WIDTH 4
+
+
+/*
+ * PC_DEV_CAP_REG(32bit):
+ * PCIe device capabilities register
+ */
+
+#define PCR_AB_DEV_CAP_REG 0x00000064
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_CAP_REG 0x00000074
+/* sienaa0=pci_f0_config,hunta0=pci_f0_config */
+
+#define PCRF_CZ_CAP_FN_LEVEL_RESET_LBN 28
+#define PCRF_CZ_CAP_FN_LEVEL_RESET_WIDTH 1
+#define PCRF_AZ_CAP_SLOT_PWR_SCL_LBN 26
+#define PCRF_AZ_CAP_SLOT_PWR_SCL_WIDTH 2
+#define PCRF_AZ_CAP_SLOT_PWR_VAL_LBN 18
+#define PCRF_AZ_CAP_SLOT_PWR_VAL_WIDTH 8
+#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_LBN 15
+#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_WIDTH 1
+#define PCRF_AB_PWR_IND_LBN 14
+#define PCRF_AB_PWR_IND_WIDTH 1
+#define PCRF_AB_ATTN_IND_LBN 13
+#define PCRF_AB_ATTN_IND_WIDTH 1
+#define PCRF_AB_ATTN_BUTTON_LBN 12
+#define PCRF_AB_ATTN_BUTTON_WIDTH 1
+#define PCRF_AZ_ENDPT_L1_LAT_LBN 9
+#define PCRF_AZ_ENDPT_L1_LAT_WIDTH 3
+#define PCRF_AZ_ENDPT_L0_LAT_LBN 6
+#define PCRF_AZ_ENDPT_L0_LAT_WIDTH 3
+#define PCRF_AZ_TAG_FIELD_LBN 5
+#define PCRF_AZ_TAG_FIELD_WIDTH 1
+#define PCRF_AZ_PHAN_FUNC_LBN 3
+#define PCRF_AZ_PHAN_FUNC_WIDTH 2
+#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_LBN 0
+#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_WIDTH 3
+
+
+/*
+ * PC_DEV_CTL_REG(16bit):
+ * PCIe device control register
+ */
+
+#define PCR_AB_DEV_CTL_REG 0x00000068
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_CTL_REG 0x00000078
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_FN_LEVEL_RESET_LBN 15
+#define PCRF_CZ_FN_LEVEL_RESET_WIDTH 1
+#define PCRF_AZ_MAX_RD_REQ_SIZE_LBN 12
+#define PCRF_AZ_MAX_RD_REQ_SIZE_WIDTH 3
+#define PCFE_AZ_MAX_RD_REQ_SIZE_4096 5
+#define PCFE_AZ_MAX_RD_REQ_SIZE_2048 4
+#define PCFE_AZ_MAX_RD_REQ_SIZE_1024 3
+#define PCFE_AZ_MAX_RD_REQ_SIZE_512 2
+#define PCFE_AZ_MAX_RD_REQ_SIZE_256 1
+#define PCFE_AZ_MAX_RD_REQ_SIZE_128 0
+#define PCRF_AZ_EN_NO_SNOOP_LBN 11
+#define PCRF_AZ_EN_NO_SNOOP_WIDTH 1
+#define PCRF_AZ_AUX_PWR_PM_EN_LBN 10
+#define PCRF_AZ_AUX_PWR_PM_EN_WIDTH 1
+#define PCRF_AZ_PHAN_FUNC_EN_LBN 9
+#define PCRF_AZ_PHAN_FUNC_EN_WIDTH 1
+#define PCRF_AB_DEV_CAP_REG_RSVD0_LBN 8
+#define PCRF_AB_DEV_CAP_REG_RSVD0_WIDTH 1
+#define PCRF_CZ_EXTENDED_TAG_EN_LBN 8
+#define PCRF_CZ_EXTENDED_TAG_EN_WIDTH 1
+#define PCRF_AZ_MAX_PAYL_SIZE_LBN 5
+#define PCRF_AZ_MAX_PAYL_SIZE_WIDTH 3
+#define PCFE_AZ_MAX_PAYL_SIZE_4096 5
+#define PCFE_AZ_MAX_PAYL_SIZE_2048 4
+#define PCFE_AZ_MAX_PAYL_SIZE_1024 3
+#define PCFE_AZ_MAX_PAYL_SIZE_512 2
+#define PCFE_AZ_MAX_PAYL_SIZE_256 1
+#define PCFE_AZ_MAX_PAYL_SIZE_128 0
+#define PCRF_AZ_EN_RELAX_ORDER_LBN 4
+#define PCRF_AZ_EN_RELAX_ORDER_WIDTH 1
+#define PCRF_AZ_UNSUP_REQ_RPT_EN_LBN 3
+#define PCRF_AZ_UNSUP_REQ_RPT_EN_WIDTH 1
+#define PCRF_AZ_FATAL_ERR_RPT_EN_LBN 2
+#define PCRF_AZ_FATAL_ERR_RPT_EN_WIDTH 1
+#define PCRF_AZ_NONFATAL_ERR_RPT_EN_LBN 1
+#define PCRF_AZ_NONFATAL_ERR_RPT_EN_WIDTH 1
+#define PCRF_AZ_CORR_ERR_RPT_EN_LBN 0
+#define PCRF_AZ_CORR_ERR_RPT_EN_WIDTH 1
+
+
+/*
+ * PC_DEV_STAT_REG(16bit):
+ * PCIe device status register
+ */
+
+#define PCR_AB_DEV_STAT_REG 0x0000006a
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_STAT_REG 0x0000007a
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_TRNS_PEND_LBN 5
+#define PCRF_AZ_TRNS_PEND_WIDTH 1
+#define PCRF_AZ_AUX_PWR_DET_LBN 4
+#define PCRF_AZ_AUX_PWR_DET_WIDTH 1
+#define PCRF_AZ_UNSUP_REQ_DET_LBN 3
+#define PCRF_AZ_UNSUP_REQ_DET_WIDTH 1
+#define PCRF_AZ_FATAL_ERR_DET_LBN 2
+#define PCRF_AZ_FATAL_ERR_DET_WIDTH 1
+#define PCRF_AZ_NONFATAL_ERR_DET_LBN 1
+#define PCRF_AZ_NONFATAL_ERR_DET_WIDTH 1
+#define PCRF_AZ_CORR_ERR_DET_LBN 0
+#define PCRF_AZ_CORR_ERR_DET_WIDTH 1
+
+
+/*
+ * PC_LNK_CAP_REG(32bit):
+ * PCIe link capabilities register
+ */
+
+#define PCR_AB_LNK_CAP_REG 0x0000006c
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_CAP_REG 0x0000007c
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PORT_NUM_LBN 24
+#define PCRF_AZ_PORT_NUM_WIDTH 8
+#define PCRF_DZ_ASPM_OPTIONALITY_CAP_LBN 22
+#define PCRF_DZ_ASPM_OPTIONALITY_CAP_WIDTH 1
+#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_LBN 21
+#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_WIDTH 1
+#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_LBN 20
+#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_WIDTH 1
+#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_LBN 19
+#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_WIDTH 1
+#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_LBN 18
+#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_WIDTH 1
+#define PCRF_AZ_DEF_L1_EXIT_LAT_LBN 15
+#define PCRF_AZ_DEF_L1_EXIT_LAT_WIDTH 3
+#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_LBN 12
+#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_WIDTH 3
+#define PCRF_AZ_AS_LNK_PM_SUPT_LBN 10
+#define PCRF_AZ_AS_LNK_PM_SUPT_WIDTH 2
+#define PCRF_AZ_MAX_LNK_WIDTH_LBN 4
+#define PCRF_AZ_MAX_LNK_WIDTH_WIDTH 6
+#define PCRF_AZ_MAX_LNK_SP_LBN 0
+#define PCRF_AZ_MAX_LNK_SP_WIDTH 4
+
+
+/*
+ * PC_LNK_CTL_REG(16bit):
+ * PCIe link control register
+ */
+
+#define PCR_AB_LNK_CTL_REG 0x00000070
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_CTL_REG 0x00000080
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_EXT_SYNC_LBN 7
+#define PCRF_AZ_EXT_SYNC_WIDTH 1
+#define PCRF_AZ_COMM_CLK_CFG_LBN 6
+#define PCRF_AZ_COMM_CLK_CFG_WIDTH 1
+#define PCRF_AB_LNK_CTL_REG_RSVD0_LBN 5
+#define PCRF_AB_LNK_CTL_REG_RSVD0_WIDTH 1
+#define PCRF_CZ_LNK_RETRAIN_LBN 5
+#define PCRF_CZ_LNK_RETRAIN_WIDTH 1
+#define PCRF_AZ_LNK_DIS_LBN 4
+#define PCRF_AZ_LNK_DIS_WIDTH 1
+#define PCRF_AZ_RD_COM_BDRY_LBN 3
+#define PCRF_AZ_RD_COM_BDRY_WIDTH 1
+#define PCRF_AZ_ACT_ST_LNK_PM_CTL_LBN 0
+#define PCRF_AZ_ACT_ST_LNK_PM_CTL_WIDTH 2
+
+
+/*
+ * PC_LNK_STAT_REG(16bit):
+ * PCIe link status register
+ */
+
+#define PCR_AB_LNK_STAT_REG 0x00000072
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_STAT_REG 0x00000082
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SLOT_CLK_CFG_LBN 12
+#define PCRF_AZ_SLOT_CLK_CFG_WIDTH 1
+#define PCRF_AZ_LNK_TRAIN_LBN 11
+#define PCRF_AZ_LNK_TRAIN_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_LBN 10
+#define PCRF_AB_TRAIN_ERR_WIDTH 1
+#define PCRF_AZ_LNK_WIDTH_LBN 4
+#define PCRF_AZ_LNK_WIDTH_WIDTH 6
+#define PCRF_AZ_LNK_SP_LBN 0
+#define PCRF_AZ_LNK_SP_WIDTH 4
+
+
+/*
+ * PC_SLOT_CAP_REG(32bit):
+ * PCIe slot capabilities register
+ */
+
+#define PCR_AB_SLOT_CAP_REG 0x00000074
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_SLOT_NUM_LBN 19
+#define PCRF_AB_SLOT_NUM_WIDTH 13
+#define PCRF_AB_SLOT_PWR_LIM_SCL_LBN 15
+#define PCRF_AB_SLOT_PWR_LIM_SCL_WIDTH 2
+#define PCRF_AB_SLOT_PWR_LIM_VAL_LBN 7
+#define PCRF_AB_SLOT_PWR_LIM_VAL_WIDTH 8
+#define PCRF_AB_SLOT_HP_CAP_LBN 6
+#define PCRF_AB_SLOT_HP_CAP_WIDTH 1
+#define PCRF_AB_SLOT_HP_SURP_LBN 5
+#define PCRF_AB_SLOT_HP_SURP_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_PRST_LBN 4
+#define PCRF_AB_SLOT_PWR_IND_PRST_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_IND_PRST_LBN 3
+#define PCRF_AB_SLOT_ATTN_IND_PRST_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_PRST_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_PRST_WIDTH 1
+#define PCRF_AB_SLOT_PWR_CTL_PRST_LBN 1
+#define PCRF_AB_SLOT_PWR_CTL_PRST_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_BUT_PRST_LBN 0
+#define PCRF_AB_SLOT_ATTN_BUT_PRST_WIDTH 1
+
+
+/*
+ * PC_SLOT_CTL_REG(16bit):
+ * PCIe slot control register
+ */
+
+#define PCR_AB_SLOT_CTL_REG 0x00000078
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_SLOT_PWR_CTLR_CTL_LBN 10
+#define PCRF_AB_SLOT_PWR_CTLR_CTL_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_CTL_LBN 8
+#define PCRF_AB_SLOT_PWR_IND_CTL_WIDTH 2
+#define PCRF_AB_SLOT_ATT_IND_CTL_LBN 6
+#define PCRF_AB_SLOT_ATT_IND_CTL_WIDTH 2
+#define PCRF_AB_SLOT_HP_INT_EN_LBN 5
+#define PCRF_AB_SLOT_HP_INT_EN_WIDTH 1
+#define PCRF_AB_SLOT_CMD_COMP_INT_EN_LBN 4
+#define PCRF_AB_SLOT_CMD_COMP_INT_EN_WIDTH 1
+#define PCRF_AB_SLOT_PRES_DET_CHG_EN_LBN 3
+#define PCRF_AB_SLOT_PRES_DET_CHG_EN_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_WIDTH 1
+#define PCRF_AB_SLOT_PWR_FLTDET_EN_LBN 1
+#define PCRF_AB_SLOT_PWR_FLTDET_EN_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_BUT_EN_LBN 0
+#define PCRF_AB_SLOT_ATTN_BUT_EN_WIDTH 1
+
+
+/*
+ * PC_SLOT_STAT_REG(16bit):
+ * PCIe slot status register
+ */
+
+#define PCR_AB_SLOT_STAT_REG 0x0000007a
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_PRES_DET_ST_LBN 6
+#define PCRF_AB_PRES_DET_ST_WIDTH 1
+#define PCRF_AB_MRL_SENS_ST_LBN 5
+#define PCRF_AB_MRL_SENS_ST_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_LBN 4
+#define PCRF_AB_SLOT_PWR_IND_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_IND_LBN 3
+#define PCRF_AB_SLOT_ATTN_IND_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_WIDTH 1
+#define PCRF_AB_PWR_FLTDET_LBN 1
+#define PCRF_AB_PWR_FLTDET_WIDTH 1
+#define PCRF_AB_ATTN_BUTDET_LBN 0
+#define PCRF_AB_ATTN_BUTDET_WIDTH 1
+
+
+/*
+ * PC_MSIX_CAP_ID_REG(8bit):
+ * MSIX Capability ID
+ */
+
+#define PCR_BB_MSIX_CAP_ID_REG 0x00000090
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_CAP_ID_REG 0x000000b0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_CAP_ID_LBN 0
+#define PCRF_BZ_MSIX_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_MSIX_NXT_PTR_REG(8bit):
+ * MSIX Capability Next Capability Ptr
+ */
+
+#define PCR_BB_MSIX_NXT_PTR_REG 0x00000091
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_NXT_PTR_REG 0x000000b1
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_NXT_PTR_LBN 0
+#define PCRF_BZ_MSIX_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_MSIX_CTL_REG(16bit):
+ * MSIX control register
+ */
+
+#define PCR_BB_MSIX_CTL_REG 0x00000092
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_CTL_REG 0x000000b2
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_EN_LBN 15
+#define PCRF_BZ_MSIX_EN_WIDTH 1
+#define PCRF_BZ_MSIX_FUNC_MASK_LBN 14
+#define PCRF_BZ_MSIX_FUNC_MASK_WIDTH 1
+#define PCRF_BZ_MSIX_TBL_SIZE_LBN 0
+#define PCRF_BZ_MSIX_TBL_SIZE_WIDTH 11
+
+
+/*
+ * PC_MSIX_TBL_BASE_REG(32bit):
+ * MSIX Capability Vector Table Base
+ */
+
+#define PCR_BB_MSIX_TBL_BASE_REG 0x00000094
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_TBL_BASE_REG 0x000000b4
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_TBL_OFF_LBN 3
+#define PCRF_BZ_MSIX_TBL_OFF_WIDTH 29
+#define PCRF_BZ_MSIX_TBL_BIR_LBN 0
+#define PCRF_BZ_MSIX_TBL_BIR_WIDTH 3
+
+
+/*
+ * PC_DEV_CAP2_REG(32bit):
+ * PCIe Device Capabilities 2
+ */
+
+#define PCR_CZ_DEV_CAP2_REG 0x00000094
+/* sienaa0=pci_f0_config,hunta0=pci_f0_config */
+
+#define PCRF_DZ_OBFF_SUPPORTED_LBN 18
+#define PCRF_DZ_OBFF_SUPPORTED_WIDTH 2
+#define PCRF_DZ_TPH_CMPL_SUPPORTED_LBN 12
+#define PCRF_DZ_TPH_CMPL_SUPPORTED_WIDTH 2
+#define PCRF_DZ_LTR_M_SUPPORTED_LBN 11
+#define PCRF_DZ_LTR_M_SUPPORTED_WIDTH 1
+#define PCRF_CC_CMPL_TIMEOUT_DIS_LBN 4
+#define PCRF_CC_CMPL_TIMEOUT_DIS_WIDTH 1
+#define PCRF_DZ_CMPL_TIMEOUT_DIS_SUPPORTED_LBN 4
+#define PCRF_DZ_CMPL_TIMEOUT_DIS_SUPPORTED_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_LBN 0
+#define PCRF_CZ_CMPL_TIMEOUT_WIDTH 4
+#define PCFE_CZ_CMPL_TIMEOUT_17000_TO_6400MS 14
+#define PCFE_CZ_CMPL_TIMEOUT_4000_TO_1300MS 13
+#define PCFE_CZ_CMPL_TIMEOUT_1000_TO_3500MS 10
+#define PCFE_CZ_CMPL_TIMEOUT_260_TO_900MS 9
+#define PCFE_CZ_CMPL_TIMEOUT_65_TO_210MS 6
+#define PCFE_CZ_CMPL_TIMEOUT_16_TO_55MS 5
+#define PCFE_CZ_CMPL_TIMEOUT_1_TO_10MS 2
+#define PCFE_CZ_CMPL_TIMEOUT_50_TO_100US 1
+#define PCFE_CZ_CMPL_TIMEOUT_DEFAULT 0
+
+
+/*
+ * PC_DEV_CTL2_REG(16bit):
+ * PCIe Device Control 2
+ */
+
+#define PCR_CZ_DEV_CTL2_REG 0x00000098
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_DZ_OBFF_ENABLE_LBN 13
+#define PCRF_DZ_OBFF_ENABLE_WIDTH 2
+#define PCRF_DZ_LTR_ENABLE_LBN 10
+#define PCRF_DZ_LTR_ENABLE_WIDTH 1
+#define PCRF_DZ_IDO_COMPLETION_ENABLE_LBN 9
+#define PCRF_DZ_IDO_COMPLETION_ENABLE_WIDTH 1
+#define PCRF_DZ_IDO_REQUEST_ENABLE_LBN 8
+#define PCRF_DZ_IDO_REQUEST_ENABLE_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_LBN 4
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_CTL_LBN 0
+#define PCRF_CZ_CMPL_TIMEOUT_CTL_WIDTH 4
+
+
+/*
+ * PC_MSIX_PBA_BASE_REG(32bit):
+ * MSIX Capability PBA Base
+ */
+
+#define PCR_BB_MSIX_PBA_BASE_REG 0x00000098
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_PBA_BASE_REG 0x000000b8
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_PBA_OFF_LBN 3
+#define PCRF_BZ_MSIX_PBA_OFF_WIDTH 29
+#define PCRF_BZ_MSIX_PBA_BIR_LBN 0
+#define PCRF_BZ_MSIX_PBA_BIR_WIDTH 3
+
+
+/*
+ * PC_LNK_CAP2_REG(32bit):
+ * PCIe Link Capability 2
+ */
+
+#define PCR_DZ_LNK_CAP2_REG 0x0000009c
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LNK_SPEED_SUP_LBN 1
+#define PCRF_DZ_LNK_SPEED_SUP_WIDTH 7
+
+
+/*
+ * PC_LNK_CTL2_REG(16bit):
+ * PCIe Link Control 2
+ */
+
+#define PCR_CZ_LNK_CTL2_REG 0x000000a0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_POLLING_DEEMPH_LVL_LBN 12
+#define PCRF_CZ_POLLING_DEEMPH_LVL_WIDTH 1
+#define PCRF_CZ_COMPLIANCE_SOS_CTL_LBN 11
+#define PCRF_CZ_COMPLIANCE_SOS_CTL_WIDTH 1
+#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_LBN 10
+#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_WIDTH 1
+#define PCRF_CZ_TRANSMIT_MARGIN_LBN 7
+#define PCRF_CZ_TRANSMIT_MARGIN_WIDTH 3
+#define PCRF_CZ_SELECT_DEEMPH_LBN 6
+#define PCRF_CZ_SELECT_DEEMPH_WIDTH 1
+#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_LBN 5
+#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_WIDTH 1
+#define PCRF_CZ_ENTER_COMPLIANCE_CTL_LBN 4
+#define PCRF_CZ_ENTER_COMPLIANCE_CTL_WIDTH 1
+#define PCRF_CZ_TGT_LNK_SPEED_CTL_LBN 0
+#define PCRF_CZ_TGT_LNK_SPEED_CTL_WIDTH 4
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN3 3
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN2 2
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN1 1
+
+
+/*
+ * PC_LNK_STAT2_REG(16bit):
+ * PCIe Link Status 2
+ */
+
+#define PCR_CZ_LNK_STAT2_REG 0x000000a2
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_CURRENT_DEEMPH_LBN 0
+#define PCRF_CZ_CURRENT_DEEMPH_WIDTH 1
+
+
+/*
+ * PC_VPD_CAP_ID_REG(8bit):
+ * VPD data register
+ */
+
+#define PCR_AB_VPD_CAP_ID_REG 0x000000b0
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_CAP_ID_LBN 0
+#define PCRF_AB_VPD_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_VPD_NXT_PTR_REG(8bit):
+ * VPD next item pointer
+ */
+
+#define PCR_AB_VPD_NXT_PTR_REG 0x000000b1
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_NXT_PTR_LBN 0
+#define PCRF_AB_VPD_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_VPD_ADDR_REG(16bit):
+ * VPD address register
+ */
+
+#define PCR_AB_VPD_ADDR_REG 0x000000b2
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_FLAG_LBN 15
+#define PCRF_AB_VPD_FLAG_WIDTH 1
+#define PCRF_AB_VPD_ADDR_LBN 0
+#define PCRF_AB_VPD_ADDR_WIDTH 15
+
+
+/*
+ * PC_VPD_CAP_DATA_REG(32bit):
+ * documentation to be written for sum_PC_VPD_CAP_DATA_REG
+ */
+
+#define PCR_AB_VPD_CAP_DATA_REG 0x000000b4
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_VPD_CAP_DATA_REG 0x000000d4
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_VPD_DATA_LBN 0
+#define PCRF_AZ_VPD_DATA_WIDTH 32
+
+
+/*
+ * PC_VPD_CAP_CTL_REG(8bit):
+ * VPD control and capabilities register
+ */
+
+#define PCR_CZ_VPD_CAP_CTL_REG 0x000000d0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_VPD_FLAG_LBN 31
+#define PCRF_CZ_VPD_FLAG_WIDTH 1
+#define PCRF_CZ_VPD_ADDR_LBN 16
+#define PCRF_CZ_VPD_ADDR_WIDTH 15
+#define PCRF_CZ_VPD_NXT_PTR_LBN 8
+#define PCRF_CZ_VPD_NXT_PTR_WIDTH 8
+#define PCRF_CZ_VPD_CAP_ID_LBN 0
+#define PCRF_CZ_VPD_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_AER_CAP_HDR_REG(32bit):
+ * AER capability header register
+ */
+
+#define PCR_AZ_AER_CAP_HDR_REG 0x00000100
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_AERCAPHDR_NXT_PTR_LBN 20
+#define PCRF_AZ_AERCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_AZ_AERCAPHDR_VER_LBN 16
+#define PCRF_AZ_AERCAPHDR_VER_WIDTH 4
+#define PCRF_AZ_AERCAPHDR_ID_LBN 0
+#define PCRF_AZ_AERCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_AER_UNCORR_ERR_STAT_REG(32bit):
+ * AER Uncorrectable error status register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_STAT_REG 0x00000104
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_STAT_LBN 19
+#define PCRF_AZ_ECRC_ERR_STAT_WIDTH 1
+#define PCRF_AZ_MALF_TLP_STAT_LBN 18
+#define PCRF_AZ_MALF_TLP_STAT_WIDTH 1
+#define PCRF_AZ_RX_OVF_STAT_LBN 17
+#define PCRF_AZ_RX_OVF_STAT_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_STAT_LBN 16
+#define PCRF_AZ_UNEXP_COMP_STAT_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_STAT_LBN 15
+#define PCRF_AZ_COMP_ABRT_STAT_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_STAT_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_STAT_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_STAT_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_STAT_WIDTH 1
+#define PCRF_AZ_PSON_TLP_STAT_LBN 12
+#define PCRF_AZ_PSON_TLP_STAT_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_STAT_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_STAT_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_STAT_LBN 0
+#define PCRF_AB_TRAIN_ERR_STAT_WIDTH 1
+
+
+/*
+ * PC_AER_UNCORR_ERR_MASK_REG(32bit):
+ * AER Uncorrectable error mask register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_MASK_REG 0x00000108
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_DZ_ATOMIC_OP_EGR_BLOCKED_MASK_LBN 24
+#define PCRF_DZ_ATOMIC_OP_EGR_BLOCKED_MASK_WIDTH 1
+#define PCRF_DZ_UNCORR_INT_ERR_MASK_LBN 22
+#define PCRF_DZ_UNCORR_INT_ERR_MASK_WIDTH 1
+#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_MASK_LBN 19
+#define PCRF_AZ_ECRC_ERR_MASK_WIDTH 1
+#define PCRF_AZ_MALF_TLP_MASK_LBN 18
+#define PCRF_AZ_MALF_TLP_MASK_WIDTH 1
+#define PCRF_AZ_RX_OVF_MASK_LBN 17
+#define PCRF_AZ_RX_OVF_MASK_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_MASK_LBN 16
+#define PCRF_AZ_UNEXP_COMP_MASK_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_MASK_LBN 15
+#define PCRF_AZ_COMP_ABRT_MASK_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_MASK_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_MASK_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_MASK_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_MASK_WIDTH 1
+#define PCRF_AZ_PSON_TLP_MASK_LBN 12
+#define PCRF_AZ_PSON_TLP_MASK_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_MASK_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_MASK_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_MASK_LBN 0
+#define PCRF_AB_TRAIN_ERR_MASK_WIDTH 1
+
+
+/*
+ * PC_AER_UNCORR_ERR_SEV_REG(32bit):
+ * AER Uncorrectable error severity register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_SEV_REG 0x0000010c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_SEV_LBN 19
+#define PCRF_AZ_ECRC_ERR_SEV_WIDTH 1
+#define PCRF_AZ_MALF_TLP_SEV_LBN 18
+#define PCRF_AZ_MALF_TLP_SEV_WIDTH 1
+#define PCRF_AZ_RX_OVF_SEV_LBN 17
+#define PCRF_AZ_RX_OVF_SEV_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_SEV_LBN 16
+#define PCRF_AZ_UNEXP_COMP_SEV_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_SEV_LBN 15
+#define PCRF_AZ_COMP_ABRT_SEV_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_SEV_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_SEV_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_SEV_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_SEV_WIDTH 1
+#define PCRF_AZ_PSON_TLP_SEV_LBN 12
+#define PCRF_AZ_PSON_TLP_SEV_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_SEV_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_SEV_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_SEV_LBN 0
+#define PCRF_AB_TRAIN_ERR_SEV_WIDTH 1
+
+
+/*
+ * PC_AER_CORR_ERR_STAT_REG(32bit):
+ * AER Correctable error status register
+ */
+
+#define PCR_AZ_AER_CORR_ERR_STAT_REG 0x00000110
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ADVSY_NON_FATAL_STAT_LBN 13
+#define PCRF_CZ_ADVSY_NON_FATAL_STAT_WIDTH 1
+#define PCRF_AZ_RPLY_TMR_TOUT_STAT_LBN 12
+#define PCRF_AZ_RPLY_TMR_TOUT_STAT_WIDTH 1
+#define PCRF_AZ_RPLAY_NUM_RO_STAT_LBN 8
+#define PCRF_AZ_RPLAY_NUM_RO_STAT_WIDTH 1
+#define PCRF_AZ_BAD_DLLP_STAT_LBN 7
+#define PCRF_AZ_BAD_DLLP_STAT_WIDTH 1
+#define PCRF_AZ_BAD_TLP_STAT_LBN 6
+#define PCRF_AZ_BAD_TLP_STAT_WIDTH 1
+#define PCRF_AZ_RX_ERR_STAT_LBN 0
+#define PCRF_AZ_RX_ERR_STAT_WIDTH 1
+
+
+/*
+ * PC_AER_CORR_ERR_MASK_REG(32bit):
+ * AER Correctable error status register
+ */
+
+#define PCR_AZ_AER_CORR_ERR_MASK_REG 0x00000114
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ADVSY_NON_FATAL_MASK_LBN 13
+#define PCRF_CZ_ADVSY_NON_FATAL_MASK_WIDTH 1
+#define PCRF_AZ_RPLY_TMR_TOUT_MASK_LBN 12
+#define PCRF_AZ_RPLY_TMR_TOUT_MASK_WIDTH 1
+#define PCRF_AZ_RPLAY_NUM_RO_MASK_LBN 8
+#define PCRF_AZ_RPLAY_NUM_RO_MASK_WIDTH 1
+#define PCRF_AZ_BAD_DLLP_MASK_LBN 7
+#define PCRF_AZ_BAD_DLLP_MASK_WIDTH 1
+#define PCRF_AZ_BAD_TLP_MASK_LBN 6
+#define PCRF_AZ_BAD_TLP_MASK_WIDTH 1
+#define PCRF_AZ_RX_ERR_MASK_LBN 0
+#define PCRF_AZ_RX_ERR_MASK_WIDTH 1
+
+
+/*
+ * PC_AER_CAP_CTL_REG(32bit):
+ * AER capability and control register
+ */
+
+#define PCR_AZ_AER_CAP_CTL_REG 0x00000118
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_ECRC_CHK_EN_LBN 8
+#define PCRF_AZ_ECRC_CHK_EN_WIDTH 1
+#define PCRF_AZ_ECRC_CHK_CAP_LBN 7
+#define PCRF_AZ_ECRC_CHK_CAP_WIDTH 1
+#define PCRF_AZ_ECRC_GEN_EN_LBN 6
+#define PCRF_AZ_ECRC_GEN_EN_WIDTH 1
+#define PCRF_AZ_ECRC_GEN_CAP_LBN 5
+#define PCRF_AZ_ECRC_GEN_CAP_WIDTH 1
+#define PCRF_AZ_1ST_ERR_PTR_LBN 0
+#define PCRF_AZ_1ST_ERR_PTR_WIDTH 5
+
+
+/*
+ * PC_AER_HDR_LOG_REG(128bit):
+ * AER Header log register
+ */
+
+#define PCR_AZ_AER_HDR_LOG_REG 0x0000011c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_HDR_LOG_LBN 0
+#define PCRF_AZ_HDR_LOG_WIDTH 128
+
+
+/*
+ * PC_DEVSN_CAP_HDR_REG(32bit):
+ * Device serial number capability header register
+ */
+
+#define PCR_CZ_DEVSN_CAP_HDR_REG 0x00000140
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_DEVSNCAPHDR_VER_LBN 16
+#define PCRF_CZ_DEVSNCAPHDR_VER_WIDTH 4
+#define PCRF_CZ_DEVSNCAPHDR_ID_LBN 0
+#define PCRF_CZ_DEVSNCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_DEVSN_DWORD0_REG(32bit):
+ * Device serial number DWORD0
+ */
+
+#define PCR_CZ_DEVSN_DWORD0_REG 0x00000144
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSN_DWORD0_LBN 0
+#define PCRF_CZ_DEVSN_DWORD0_WIDTH 32
+
+
+/*
+ * PC_DEVSN_DWORD1_REG(32bit):
+ * Device serial number DWORD0
+ */
+
+#define PCR_CZ_DEVSN_DWORD1_REG 0x00000148
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSN_DWORD1_LBN 0
+#define PCRF_CZ_DEVSN_DWORD1_WIDTH 32
+
+
+/*
+ * PC_ARI_CAP_HDR_REG(32bit):
+ * ARI capability header register
+ */
+
+#define PCR_CZ_ARI_CAP_HDR_REG 0x00000150
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARICAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_ARICAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_ARICAPHDR_VER_LBN 16
+#define PCRF_CZ_ARICAPHDR_VER_WIDTH 4
+#define PCRF_CZ_ARICAPHDR_ID_LBN 0
+#define PCRF_CZ_ARICAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_ARI_CAP_REG(16bit):
+ * ARI Capabilities
+ */
+
+#define PCR_CZ_ARI_CAP_REG 0x00000154
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARI_NXT_FN_NUM_LBN 8
+#define PCRF_CZ_ARI_NXT_FN_NUM_WIDTH 8
+#define PCRF_CZ_ARI_ACS_FNGRP_CAP_LBN 1
+#define PCRF_CZ_ARI_ACS_FNGRP_CAP_WIDTH 1
+#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_LBN 0
+#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_WIDTH 1
+
+
+/*
+ * PC_ARI_CTL_REG(16bit):
+ * ARI Control
+ */
+
+#define PCR_CZ_ARI_CTL_REG 0x00000156
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARI_FN_GRP_LBN 4
+#define PCRF_CZ_ARI_FN_GRP_WIDTH 3
+#define PCRF_CZ_ARI_ACS_FNGRP_EN_LBN 1
+#define PCRF_CZ_ARI_ACS_FNGRP_EN_WIDTH 1
+#define PCRF_CZ_ARI_MFVC_FNGRP_EN_LBN 0
+#define PCRF_CZ_ARI_MFVC_FNGRP_EN_WIDTH 1
+
+
+/*
+ * PC_SEC_PCIE_CAP_REG(32bit):
+ * Secondary PCIE Capability Register
+ */
+
+#define PCR_DZ_SEC_PCIE_CAP_REG 0x00000160
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_SEC_NXT_PTR_LBN 20
+#define PCRF_DZ_SEC_NXT_PTR_WIDTH 12
+#define PCRF_DZ_SEC_VERSION_LBN 16
+#define PCRF_DZ_SEC_VERSION_WIDTH 4
+#define PCRF_DZ_SEC_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_SEC_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_CAP_HDR_REG(32bit):
+ * SRIOV capability header register
+ */
+
+#define PCR_CC_SRIOV_CAP_HDR_REG 0x00000160
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CAP_HDR_REG 0x00000180
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_SRIOVCAPHDR_VER_LBN 16
+#define PCRF_CZ_SRIOVCAPHDR_VER_WIDTH 4
+#define PCRF_CZ_SRIOVCAPHDR_ID_LBN 0
+#define PCRF_CZ_SRIOVCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_CAP_REG(32bit):
+ * SRIOV Capabilities
+ */
+
+#define PCR_CC_SRIOV_CAP_REG 0x00000164
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CAP_REG 0x00000184
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_LBN 21
+#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_WIDTH 11
+#define PCRF_DZ_VF_ARI_CAP_PRESV_LBN 1
+#define PCRF_DZ_VF_ARI_CAP_PRESV_WIDTH 1
+#define PCRF_CZ_VF_MIGR_CAP_LBN 0
+#define PCRF_CZ_VF_MIGR_CAP_WIDTH 1
+
+
+/*
+ * PC_LINK_CONTROL3_REG(32bit):
+ * Link Control 3.
+ */
+
+#define PCR_DZ_LINK_CONTROL3_REG 0x00000164
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LINK_EQ_INT_EN_LBN 1
+#define PCRF_DZ_LINK_EQ_INT_EN_WIDTH 1
+#define PCRF_DZ_PERFORM_EQL_LBN 0
+#define PCRF_DZ_PERFORM_EQL_WIDTH 1
+
+
+/*
+ * PC_LANE_ERROR_STAT_REG(32bit):
+ * Lane Error Status Register.
+ */
+
+#define PCR_DZ_LANE_ERROR_STAT_REG 0x00000168
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE_STATUS_LBN 0
+#define PCRF_DZ_LANE_STATUS_WIDTH 8
+
+
+/*
+ * PC_SRIOV_CTL_REG(16bit):
+ * SRIOV Control
+ */
+
+#define PCR_CC_SRIOV_CTL_REG 0x00000168
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CTL_REG 0x00000188
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_ARI_CAP_HRCHY_LBN 4
+#define PCRF_CZ_VF_ARI_CAP_HRCHY_WIDTH 1
+#define PCRF_CZ_VF_MSE_LBN 3
+#define PCRF_CZ_VF_MSE_WIDTH 1
+#define PCRF_CZ_VF_MIGR_INT_EN_LBN 2
+#define PCRF_CZ_VF_MIGR_INT_EN_WIDTH 1
+#define PCRF_CZ_VF_MIGR_EN_LBN 1
+#define PCRF_CZ_VF_MIGR_EN_WIDTH 1
+#define PCRF_CZ_VF_EN_LBN 0
+#define PCRF_CZ_VF_EN_WIDTH 1
+
+
+/*
+ * PC_SRIOV_STAT_REG(16bit):
+ * SRIOV Status
+ */
+
+#define PCR_CC_SRIOV_STAT_REG 0x0000016a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_STAT_REG 0x0000018a
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_STAT_LBN 0
+#define PCRF_CZ_VF_MIGR_STAT_WIDTH 1
+
+
+/*
+ * PC_LANE01_EQU_CONTROL_REG(32bit):
+ * Lanes 0,1 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE01_EQU_CONTROL_REG 0x0000016c
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE1_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE1_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE0_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE0_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_INITIALVFS_REG(16bit):
+ * SRIOV Initial VFs
+ */
+
+#define PCR_CC_SRIOV_INITIALVFS_REG 0x0000016c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_INITIALVFS_REG 0x0000018c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_INITIALVFS_LBN 0
+#define PCRF_CZ_VF_INITIALVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_TOTALVFS_REG(10bit):
+ * SRIOV Total VFs
+ */
+
+#define PCR_CC_SRIOV_TOTALVFS_REG 0x0000016e
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_TOTALVFS_REG 0x0000018e
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_TOTALVFS_LBN 0
+#define PCRF_CZ_VF_TOTALVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_NUMVFS_REG(16bit):
+ * SRIOV Number of VFs
+ */
+
+#define PCR_CC_SRIOV_NUMVFS_REG 0x00000170
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_NUMVFS_REG 0x00000190
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_NUMVFS_LBN 0
+#define PCRF_CZ_VF_NUMVFS_WIDTH 16
+
+
+/*
+ * PC_LANE23_EQU_CONTROL_REG(32bit):
+ * Lanes 2,3 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE23_EQU_CONTROL_REG 0x00000170
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE3_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE3_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE2_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE2_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_FN_DPND_LNK_REG(16bit):
+ * SRIOV Function dependency link
+ */
+
+#define PCR_CC_SRIOV_FN_DPND_LNK_REG 0x00000172
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_FN_DPND_LNK_REG 0x00000192
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_SRIOV_FN_DPND_LNK_LBN 0
+#define PCRF_CZ_SRIOV_FN_DPND_LNK_WIDTH 8
+
+
+/*
+ * PC_SRIOV_1STVF_OFFSET_REG(16bit):
+ * SRIOV First VF Offset
+ */
+
+#define PCR_CC_SRIOV_1STVF_OFFSET_REG 0x00000174
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_1STVF_OFFSET_REG 0x00000194
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_1STVF_OFFSET_LBN 0
+#define PCRF_CZ_VF_1STVF_OFFSET_WIDTH 16
+
+
+/*
+ * PC_LANE45_EQU_CONTROL_REG(32bit):
+ * Lanes 4,5 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE45_EQU_CONTROL_REG 0x00000174
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE5_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE5_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE4_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE4_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_VFSTRIDE_REG(16bit):
+ * SRIOV VF Stride
+ */
+
+#define PCR_CC_SRIOV_VFSTRIDE_REG 0x00000176
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_VFSTRIDE_REG 0x00000196
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_VFSTRIDE_LBN 0
+#define PCRF_CZ_VF_VFSTRIDE_WIDTH 16
+
+
+/*
+ * PC_LANE67_EQU_CONTROL_REG(32bit):
+ * Lanes 6,7 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE67_EQU_CONTROL_REG 0x00000178
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE7_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE7_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE6_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE6_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_DEVID_REG(16bit):
+ * SRIOV VF Device ID
+ */
+
+#define PCR_CC_SRIOV_DEVID_REG 0x0000017a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_DEVID_REG 0x0000019a
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_DEVID_LBN 0
+#define PCRF_CZ_VF_DEVID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_SUP_PAGESZ_REG(16bit):
+ * SRIOV Supported Page Sizes
+ */
+
+#define PCR_CC_SRIOV_SUP_PAGESZ_REG 0x0000017c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_SUP_PAGESZ_REG 0x0000019c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_SUP_PAGESZ_LBN 0
+#define PCRF_CZ_VF_SUP_PAGESZ_WIDTH 16
+
+
+/*
+ * PC_SRIOV_SYS_PAGESZ_REG(32bit):
+ * SRIOV System Page Size
+ */
+
+#define PCR_CC_SRIOV_SYS_PAGESZ_REG 0x00000180
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_SYS_PAGESZ_REG 0x000001a0
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_SYS_PAGESZ_LBN 0
+#define PCRF_CZ_VF_SYS_PAGESZ_WIDTH 16
+
+
+/*
+ * PC_SRIOV_BAR0_REG(32bit):
+ * SRIOV VF Bar0
+ */
+
+#define PCR_CC_SRIOV_BAR0_REG 0x00000184
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR0_REG 0x000001a4
+/* hunta0=pci_f0_config */
+
+#define PCRF_CC_VF_BAR_ADDRESS_LBN 0
+#define PCRF_CC_VF_BAR_ADDRESS_WIDTH 32
+#define PCRF_DZ_VF_BAR0_ADDRESS_LBN 4
+#define PCRF_DZ_VF_BAR0_ADDRESS_WIDTH 28
+#define PCRF_DZ_VF_BAR0_PREF_LBN 3
+#define PCRF_DZ_VF_BAR0_PREF_WIDTH 1
+#define PCRF_DZ_VF_BAR0_TYPE_LBN 1
+#define PCRF_DZ_VF_BAR0_TYPE_WIDTH 2
+#define PCRF_DZ_VF_BAR0_IOM_LBN 0
+#define PCRF_DZ_VF_BAR0_IOM_WIDTH 1
+
+
+/*
+ * PC_SRIOV_BAR1_REG(32bit):
+ * SRIOV Bar1
+ */
+
+#define PCR_CC_SRIOV_BAR1_REG 0x00000188
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR1_REG 0x000001a8
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR1_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR1_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR2_REG(32bit):
+ * SRIOV Bar2
+ */
+
+#define PCR_CC_SRIOV_BAR2_REG 0x0000018c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR2_REG 0x000001ac
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR2_ADDRESS_LBN 4
+#define PCRF_DZ_VF_BAR2_ADDRESS_WIDTH 28
+#define PCRF_DZ_VF_BAR2_PREF_LBN 3
+#define PCRF_DZ_VF_BAR2_PREF_WIDTH 1
+#define PCRF_DZ_VF_BAR2_TYPE_LBN 1
+#define PCRF_DZ_VF_BAR2_TYPE_WIDTH 2
+#define PCRF_DZ_VF_BAR2_IOM_LBN 0
+#define PCRF_DZ_VF_BAR2_IOM_WIDTH 1
+
+
+/*
+ * PC_SRIOV_BAR3_REG(32bit):
+ * SRIOV Bar3
+ */
+
+#define PCR_CC_SRIOV_BAR3_REG 0x00000190
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR3_REG 0x000001b0
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR3_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR3_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR4_REG(32bit):
+ * SRIOV Bar4
+ */
+
+#define PCR_CC_SRIOV_BAR4_REG 0x00000194
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR4_REG 0x000001b4
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR4_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR4_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR5_REG(32bit):
+ * SRIOV Bar5
+ */
+
+#define PCR_CC_SRIOV_BAR5_REG 0x00000198
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR5_REG 0x000001b8
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR5_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR5_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_RSVD_REG(16bit):
+ * Reserved register
+ */
+
+#define PCR_DZ_SRIOV_RSVD_REG 0x00000198
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_VF_RSVD_LBN 0
+#define PCRF_DZ_VF_RSVD_WIDTH 16
+
+
+/*
+ * PC_SRIOV_MIBR_SARRAY_OFFSET_REG(32bit):
+ * SRIOV VF Migration State Array Offset
+ */
+
+#define PCR_CC_SRIOV_MIBR_SARRAY_OFFSET_REG 0x0000019c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_MIBR_SARRAY_OFFSET_REG 0x000001bc
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_OFFSET_LBN 3
+#define PCRF_CZ_VF_MIGR_OFFSET_WIDTH 29
+#define PCRF_CZ_VF_MIGR_BIR_LBN 0
+#define PCRF_CZ_VF_MIGR_BIR_WIDTH 3
+
+
+/*
+ * PC_TPH_CAP_HDR_REG(32bit):
+ * TPH Capability Header Register
+ */
+
+#define PCR_DZ_TPH_CAP_HDR_REG 0x000001c0
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_TPH_NXT_PTR_LBN 20
+#define PCRF_DZ_TPH_NXT_PTR_WIDTH 12
+#define PCRF_DZ_TPH_VERSION_LBN 16
+#define PCRF_DZ_TPH_VERSION_WIDTH 4
+#define PCRF_DZ_TPH_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_TPH_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_TPH_REQ_CAP_REG(32bit):
+ * TPH Requester Capability Register
+ */
+
+#define PCR_DZ_TPH_REQ_CAP_REG 0x000001c4
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_ST_TBLE_SIZE_LBN 16
+#define PCRF_DZ_ST_TBLE_SIZE_WIDTH 11
+#define PCRF_DZ_ST_TBLE_LOC_LBN 9
+#define PCRF_DZ_ST_TBLE_LOC_WIDTH 2
+#define PCRF_DZ_EXT_TPH_MODE_SUP_LBN 8
+#define PCRF_DZ_EXT_TPH_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_DEV_MODE_SUP_LBN 2
+#define PCRF_DZ_TPH_DEV_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_INT_MODE_SUP_LBN 1
+#define PCRF_DZ_TPH_INT_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_NOST_MODE_SUP_LBN 0
+#define PCRF_DZ_TPH_NOST_MODE_SUP_WIDTH 1
+
+
+/*
+ * PC_TPH_REQ_CTL_REG(32bit):
+ * TPH Requester Control Register
+ */
+
+#define PCR_DZ_TPH_REQ_CTL_REG 0x000001c8
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_TPH_REQ_ENABLE_LBN 8
+#define PCRF_DZ_TPH_REQ_ENABLE_WIDTH 2
+#define PCRF_DZ_TPH_ST_MODE_LBN 0
+#define PCRF_DZ_TPH_ST_MODE_WIDTH 3
+
+
+/*
+ * PC_LTR_CAP_HDR_REG(32bit):
+ * Latency Tolerance Reporting Cap Header Reg
+ */
+
+#define PCR_DZ_LTR_CAP_HDR_REG 0x00000290
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LTR_NXT_PTR_LBN 20
+#define PCRF_DZ_LTR_NXT_PTR_WIDTH 12
+#define PCRF_DZ_LTR_VERSION_LBN 16
+#define PCRF_DZ_LTR_VERSION_WIDTH 4
+#define PCRF_DZ_LTR_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_LTR_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_LTR_MAX_SNOOP_REG(32bit):
+ * LTR Maximum Snoop/No Snoop Register
+ */
+
+#define PCR_DZ_LTR_MAX_SNOOP_REG 0x00000294
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_LBN 26
+#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_WIDTH 3
+#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_LBN 16
+#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_WIDTH 10
+#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_LBN 10
+#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_WIDTH 3
+#define PCRF_DZ_LTR_MAX_SNOOP_LAT_LBN 0
+#define PCRF_DZ_LTR_MAX_SNOOP_LAT_WIDTH 10
+
+
+/*
+ * PC_ACK_LAT_TMR_REG(32bit):
+ * ACK latency timer & replay timer register
+ */
+
+#define PCR_AC_ACK_LAT_TMR_REG 0x00000700
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_RT_LBN 16
+#define PCRF_AC_RT_WIDTH 16
+#define PCRF_AC_ALT_LBN 0
+#define PCRF_AC_ALT_WIDTH 16
+
+
+/*
+ * PC_OTHER_MSG_REG(32bit):
+ * Other message register
+ */
+
+#define PCR_AC_OTHER_MSG_REG 0x00000704
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_OM_CRPT3_LBN 24
+#define PCRF_AC_OM_CRPT3_WIDTH 8
+#define PCRF_AC_OM_CRPT2_LBN 16
+#define PCRF_AC_OM_CRPT2_WIDTH 8
+#define PCRF_AC_OM_CRPT1_LBN 8
+#define PCRF_AC_OM_CRPT1_WIDTH 8
+#define PCRF_AC_OM_CRPT0_LBN 0
+#define PCRF_AC_OM_CRPT0_WIDTH 8
+
+
+/*
+ * PC_FORCE_LNK_REG(24bit):
+ * Port force link register
+ */
+
+#define PCR_AC_FORCE_LNK_REG 0x00000708
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_LFS_LBN 16
+#define PCRF_AC_LFS_WIDTH 6
+#define PCRF_AC_FL_LBN 15
+#define PCRF_AC_FL_WIDTH 1
+#define PCRF_AC_LN_LBN 0
+#define PCRF_AC_LN_WIDTH 8
+
+
+/*
+ * PC_ACK_FREQ_REG(32bit):
+ * ACK frequency register
+ */
+
+#define PCR_AC_ACK_FREQ_REG 0x0000070c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_LBN 30
+#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_WIDTH 1
+#define PCRF_AC_L1_ENTR_LAT_LBN 27
+#define PCRF_AC_L1_ENTR_LAT_WIDTH 3
+#define PCRF_AC_L0_ENTR_LAT_LBN 24
+#define PCRF_AC_L0_ENTR_LAT_WIDTH 3
+#define PCRF_CC_COMM_NFTS_LBN 16
+#define PCRF_CC_COMM_NFTS_WIDTH 8
+#define PCRF_AB_ACK_FREQ_REG_RSVD0_LBN 16
+#define PCRF_AB_ACK_FREQ_REG_RSVD0_WIDTH 3
+#define PCRF_AC_MAX_FTS_LBN 8
+#define PCRF_AC_MAX_FTS_WIDTH 8
+#define PCRF_AC_ACK_FREQ_LBN 0
+#define PCRF_AC_ACK_FREQ_WIDTH 8
+
+
+/*
+ * PC_PORT_LNK_CTL_REG(32bit):
+ * Port link control register
+ */
+
+#define PCR_AC_PORT_LNK_CTL_REG 0x00000710
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AB_LRE_LBN 27
+#define PCRF_AB_LRE_WIDTH 1
+#define PCRF_AB_ESYNC_LBN 26
+#define PCRF_AB_ESYNC_WIDTH 1
+#define PCRF_AB_CRPT_LBN 25
+#define PCRF_AB_CRPT_WIDTH 1
+#define PCRF_AB_XB_LBN 24
+#define PCRF_AB_XB_WIDTH 1
+#define PCRF_AC_LC_LBN 16
+#define PCRF_AC_LC_WIDTH 6
+#define PCRF_AC_LDR_LBN 8
+#define PCRF_AC_LDR_WIDTH 4
+#define PCRF_AC_FLM_LBN 7
+#define PCRF_AC_FLM_WIDTH 1
+#define PCRF_AC_LKD_LBN 6
+#define PCRF_AC_LKD_WIDTH 1
+#define PCRF_AC_DLE_LBN 5
+#define PCRF_AC_DLE_WIDTH 1
+#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_LBN 4
+#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_WIDTH 1
+#define PCRF_AC_RA_LBN 3
+#define PCRF_AC_RA_WIDTH 1
+#define PCRF_AC_LE_LBN 2
+#define PCRF_AC_LE_WIDTH 1
+#define PCRF_AC_SD_LBN 1
+#define PCRF_AC_SD_WIDTH 1
+#define PCRF_AC_OMR_LBN 0
+#define PCRF_AC_OMR_WIDTH 1
+
+
+/*
+ * PC_LN_SKEW_REG(32bit):
+ * Lane skew register
+ */
+
+#define PCR_AC_LN_SKEW_REG 0x00000714
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_DIS_LBN 31
+#define PCRF_AC_DIS_WIDTH 1
+#define PCRF_AB_RST_LBN 30
+#define PCRF_AB_RST_WIDTH 1
+#define PCRF_AC_AD_LBN 25
+#define PCRF_AC_AD_WIDTH 1
+#define PCRF_AC_FCD_LBN 24
+#define PCRF_AC_FCD_WIDTH 1
+#define PCRF_AC_LS2_LBN 16
+#define PCRF_AC_LS2_WIDTH 8
+#define PCRF_AC_LS1_LBN 8
+#define PCRF_AC_LS1_WIDTH 8
+#define PCRF_AC_LS0_LBN 0
+#define PCRF_AC_LS0_WIDTH 8
+
+
+/*
+ * PC_SYM_NUM_REG(16bit):
+ * Symbol number register
+ */
+
+#define PCR_AC_SYM_NUM_REG 0x00000718
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_CC_MAX_FUNCTIONS_LBN 29
+#define PCRF_CC_MAX_FUNCTIONS_WIDTH 3
+#define PCRF_CC_FC_WATCHDOG_TMR_LBN 24
+#define PCRF_CC_FC_WATCHDOG_TMR_WIDTH 5
+#define PCRF_CC_ACK_NAK_TMR_MOD_LBN 19
+#define PCRF_CC_ACK_NAK_TMR_MOD_WIDTH 5
+#define PCRF_CC_REPLAY_TMR_MOD_LBN 14
+#define PCRF_CC_REPLAY_TMR_MOD_WIDTH 5
+#define PCRF_AB_ES_LBN 12
+#define PCRF_AB_ES_WIDTH 3
+#define PCRF_AB_SYM_NUM_REG_RSVD0_LBN 11
+#define PCRF_AB_SYM_NUM_REG_RSVD0_WIDTH 1
+#define PCRF_CC_NUM_SKP_SYMS_LBN 8
+#define PCRF_CC_NUM_SKP_SYMS_WIDTH 3
+#define PCRF_AB_TS2_LBN 4
+#define PCRF_AB_TS2_WIDTH 4
+#define PCRF_AC_TS1_LBN 0
+#define PCRF_AC_TS1_WIDTH 4
+
+
+/*
+ * PC_SYM_TMR_FLT_MSK_REG(16bit):
+ * Symbol timer and Filter Mask Register
+ */
+
+#define PCR_CC_SYM_TMR_FLT_MSK_REG 0x0000071c
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_DEFAULT_FLT_MSK1_LBN 16
+#define PCRF_CC_DEFAULT_FLT_MSK1_WIDTH 16
+#define PCRF_CC_FC_WDOG_TMR_DIS_LBN 15
+#define PCRF_CC_FC_WDOG_TMR_DIS_WIDTH 1
+#define PCRF_CC_SI1_LBN 8
+#define PCRF_CC_SI1_WIDTH 3
+#define PCRF_CC_SKIP_INT_VAL_LBN 0
+#define PCRF_CC_SKIP_INT_VAL_WIDTH 11
+#define PCRF_CC_SI0_LBN 0
+#define PCRF_CC_SI0_WIDTH 8
+
+
+/*
+ * PC_SYM_TMR_REG(16bit):
+ * Symbol timer register
+ */
+
+#define PCR_AB_SYM_TMR_REG 0x0000071c
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_ET_LBN 11
+#define PCRF_AB_ET_WIDTH 4
+#define PCRF_AB_SI1_LBN 8
+#define PCRF_AB_SI1_WIDTH 3
+#define PCRF_AB_SI0_LBN 0
+#define PCRF_AB_SI0_WIDTH 8
+
+
+/*
+ * PC_FLT_MSK_REG(32bit):
+ * Filter Mask Register 2
+ */
+
+#define PCR_CC_FLT_MSK_REG 0x00000720
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_DEFAULT_FLT_MSK2_LBN 0
+#define PCRF_CC_DEFAULT_FLT_MSK2_WIDTH 32
+
+
+/*
+ * PC_PHY_STAT_REG(32bit):
+ * PHY status register
+ */
+
+#define PCR_AB_PHY_STAT_REG 0x00000720
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PHY_STAT_REG 0x00000810
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AC_SSL_LBN 3
+#define PCRF_AC_SSL_WIDTH 1
+#define PCRF_AC_SSR_LBN 2
+#define PCRF_AC_SSR_WIDTH 1
+#define PCRF_AC_SSCL_LBN 1
+#define PCRF_AC_SSCL_WIDTH 1
+#define PCRF_AC_SSCD_LBN 0
+#define PCRF_AC_SSCD_WIDTH 1
+
+
+/*
+ * PC_PHY_CTL_REG(32bit):
+ * PHY control register
+ */
+
+#define PCR_AB_PHY_CTL_REG 0x00000724
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PHY_CTL_REG 0x00000814
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AC_BD_LBN 31
+#define PCRF_AC_BD_WIDTH 1
+#define PCRF_AC_CDS_LBN 30
+#define PCRF_AC_CDS_WIDTH 1
+#define PCRF_AC_DWRAP_LB_LBN 29
+#define PCRF_AC_DWRAP_LB_WIDTH 1
+#define PCRF_AC_EBD_LBN 28
+#define PCRF_AC_EBD_WIDTH 1
+#define PCRF_AC_SNR_LBN 27
+#define PCRF_AC_SNR_WIDTH 1
+#define PCRF_AC_RX_NOT_DET_LBN 2
+#define PCRF_AC_RX_NOT_DET_WIDTH 1
+#define PCRF_AC_FORCE_LOS_VAL_LBN 1
+#define PCRF_AC_FORCE_LOS_VAL_WIDTH 1
+#define PCRF_AC_FORCE_LOS_EN_LBN 0
+#define PCRF_AC_FORCE_LOS_EN_WIDTH 1
+
+
+/*
+ * PC_DEBUG0_REG(32bit):
+ * Debug register 0
+ */
+
+#define PCR_AC_DEBUG0_REG 0x00000728
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_CDI03_LBN 24
+#define PCRF_AC_CDI03_WIDTH 8
+#define PCRF_AC_CDI0_LBN 0
+#define PCRF_AC_CDI0_WIDTH 32
+#define PCRF_AC_CDI02_LBN 16
+#define PCRF_AC_CDI02_WIDTH 8
+#define PCRF_AC_CDI01_LBN 8
+#define PCRF_AC_CDI01_WIDTH 8
+#define PCRF_AC_CDI00_LBN 0
+#define PCRF_AC_CDI00_WIDTH 8
+
+
+/*
+ * PC_DEBUG1_REG(32bit):
+ * Debug register 1
+ */
+
+#define PCR_AC_DEBUG1_REG 0x0000072c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_CDI13_LBN 24
+#define PCRF_AC_CDI13_WIDTH 8
+#define PCRF_AC_CDI1_LBN 0
+#define PCRF_AC_CDI1_WIDTH 32
+#define PCRF_AC_CDI12_LBN 16
+#define PCRF_AC_CDI12_WIDTH 8
+#define PCRF_AC_CDI11_LBN 8
+#define PCRF_AC_CDI11_WIDTH 8
+#define PCRF_AC_CDI10_LBN 0
+#define PCRF_AC_CDI10_WIDTH 8
+
+
+/*
+ * PC_XPFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XPFCC_STAT_REG
+ */
+
+#define PCR_AC_XPFCC_STAT_REG 0x00000730
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XPDC_LBN 12
+#define PCRF_AC_XPDC_WIDTH 8
+#define PCRF_AC_XPHC_LBN 0
+#define PCRF_AC_XPHC_WIDTH 12
+
+
+/*
+ * PC_XNPFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XNPFCC_STAT_REG
+ */
+
+#define PCR_AC_XNPFCC_STAT_REG 0x00000734
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XNPDC_LBN 12
+#define PCRF_AC_XNPDC_WIDTH 8
+#define PCRF_AC_XNPHC_LBN 0
+#define PCRF_AC_XNPHC_WIDTH 12
+
+
+/*
+ * PC_XCFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XCFCC_STAT_REG
+ */
+
+#define PCR_AC_XCFCC_STAT_REG 0x00000738
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XCDC_LBN 12
+#define PCRF_AC_XCDC_WIDTH 8
+#define PCRF_AC_XCHC_LBN 0
+#define PCRF_AC_XCHC_WIDTH 12
+
+
+/*
+ * PC_Q_STAT_REG(8bit):
+ * documentation to be written for sum_PC_Q_STAT_REG
+ */
+
+#define PCR_AC_Q_STAT_REG 0x0000073c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_RQNE_LBN 2
+#define PCRF_AC_RQNE_WIDTH 1
+#define PCRF_AC_XRNE_LBN 1
+#define PCRF_AC_XRNE_WIDTH 1
+#define PCRF_AC_RCNR_LBN 0
+#define PCRF_AC_RCNR_WIDTH 1
+
+
+/*
+ * PC_VC_XMIT_ARB1_REG(32bit):
+ * VC Transmit Arbitration Register 1
+ */
+
+#define PCR_CC_VC_XMIT_ARB1_REG 0x00000740
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC_XMIT_ARB2_REG(32bit):
+ * VC Transmit Arbitration Register 2
+ */
+
+#define PCR_CC_VC_XMIT_ARB2_REG 0x00000744
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_P_RQ_CTL_REG(32bit):
+ * VC0 Posted Receive Queue Control
+ */
+
+#define PCR_CC_VC0_P_RQ_CTL_REG 0x00000748
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_NP_RQ_CTL_REG(32bit):
+ * VC0 Non-Posted Receive Queue Control
+ */
+
+#define PCR_CC_VC0_NP_RQ_CTL_REG 0x0000074c
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_C_RQ_CTL_REG(32bit):
+ * VC0 Completion Receive Queue Control
+ */
+
+#define PCR_CC_VC0_C_RQ_CTL_REG 0x00000750
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_GEN2_REG(32bit):
+ * Gen2 Register
+ */
+
+#define PCR_CC_GEN2_REG 0x0000080c
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_SET_DE_EMPHASIS_LBN 20
+#define PCRF_CC_SET_DE_EMPHASIS_WIDTH 1
+#define PCRF_CC_CFG_TX_COMPLIANCE_LBN 19
+#define PCRF_CC_CFG_TX_COMPLIANCE_WIDTH 1
+#define PCRF_CC_CFG_TX_SWING_LBN 18
+#define PCRF_CC_CFG_TX_SWING_WIDTH 1
+#define PCRF_CC_DIR_SPEED_CHANGE_LBN 17
+#define PCRF_CC_DIR_SPEED_CHANGE_WIDTH 1
+#define PCRF_CC_LANE_ENABLE_LBN 8
+#define PCRF_CC_LANE_ENABLE_WIDTH 9
+#define PCRF_CC_NUM_FTS_LBN 0
+#define PCRF_CC_NUM_FTS_WIDTH 8
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_REGS_PCI_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_rx.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_rx.c
new file mode 100644
index 00000000..4fd73bab
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_rx.c
@@ -0,0 +1,1661 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_rx_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+static __checkReturn efx_rc_t
+siena_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+static __checkReturn efx_rc_t
+siena_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+static __checkReturn efx_rc_t
+siena_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+static __checkReturn uint32_t
+siena_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+static __checkReturn efx_rc_t
+siena_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp);
+
+static void
+siena_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(ndescs) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+static void
+siena_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+static void
+siena_rx_qpush_ps_credits(
+ __in efx_rxq_t *erp);
+
+static __checkReturn uint8_t *
+siena_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp);
+#endif
+
+static __checkReturn efx_rc_t
+siena_rx_qflush(
+ __in efx_rxq_t *erp);
+
+static void
+siena_rx_qenable(
+ __in efx_rxq_t *erp);
+
+static __checkReturn efx_rc_t
+siena_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in const efx_rxq_type_data_t *type_data,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp);
+
+static void
+siena_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_rx_ops_t __efx_rx_siena_ops = {
+ siena_rx_init, /* erxo_init */
+ siena_rx_fini, /* erxo_fini */
+#if EFSYS_OPT_RX_SCATTER
+ siena_rx_scatter_enable, /* erxo_scatter_enable */
+#endif
+#if EFSYS_OPT_RX_SCALE
+ NULL, /* erxo_scale_context_alloc */
+ NULL, /* erxo_scale_context_free */
+ siena_rx_scale_mode_set, /* erxo_scale_mode_set */
+ siena_rx_scale_key_set, /* erxo_scale_key_set */
+ siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
+ siena_rx_prefix_hash, /* erxo_prefix_hash */
+#endif
+ siena_rx_prefix_pktlen, /* erxo_prefix_pktlen */
+ siena_rx_qpost, /* erxo_qpost */
+ siena_rx_qpush, /* erxo_qpush */
+#if EFSYS_OPT_RX_PACKED_STREAM
+ siena_rx_qpush_ps_credits, /* erxo_qpush_ps_credits */
+ siena_rx_qps_packet_info, /* erxo_qps_packet_info */
+#endif
+ siena_rx_qflush, /* erxo_qflush */
+ siena_rx_qenable, /* erxo_qenable */
+ siena_rx_qcreate, /* erxo_qcreate */
+ siena_rx_qdestroy, /* erxo_qdestroy */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+static const efx_rx_ops_t __efx_rx_ef10_ops = {
+ ef10_rx_init, /* erxo_init */
+ ef10_rx_fini, /* erxo_fini */
+#if EFSYS_OPT_RX_SCATTER
+ ef10_rx_scatter_enable, /* erxo_scatter_enable */
+#endif
+#if EFSYS_OPT_RX_SCALE
+ ef10_rx_scale_context_alloc, /* erxo_scale_context_alloc */
+ ef10_rx_scale_context_free, /* erxo_scale_context_free */
+ ef10_rx_scale_mode_set, /* erxo_scale_mode_set */
+ ef10_rx_scale_key_set, /* erxo_scale_key_set */
+ ef10_rx_scale_tbl_set, /* erxo_scale_tbl_set */
+ ef10_rx_prefix_hash, /* erxo_prefix_hash */
+#endif
+ ef10_rx_prefix_pktlen, /* erxo_prefix_pktlen */
+ ef10_rx_qpost, /* erxo_qpost */
+ ef10_rx_qpush, /* erxo_qpush */
+#if EFSYS_OPT_RX_PACKED_STREAM
+ ef10_rx_qpush_ps_credits, /* erxo_qpush_ps_credits */
+ ef10_rx_qps_packet_info, /* erxo_qps_packet_info */
+#endif
+ ef10_rx_qflush, /* erxo_qflush */
+ ef10_rx_qenable, /* erxo_qenable */
+ ef10_rx_qcreate, /* erxo_qcreate */
+ ef10_rx_qdestroy, /* erxo_qdestroy */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+
+ __checkReturn efx_rc_t
+efx_rx_init(
+ __inout efx_nic_t *enp)
+{
+ const efx_rx_ops_t *erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_EV)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_mod_flags & EFX_MOD_RX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ erxop = &__efx_rx_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+ if ((rc = erxop->erxo_init(enp)) != 0)
+ goto fail4;
+
+ enp->en_erxop = erxop;
+ enp->en_mod_flags |= EFX_MOD_RX;
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_erxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_RX;
+ return (rc);
+}
+
+ void
+efx_rx_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+ EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0);
+
+ erxop->erxo_fini(enp);
+
+ enp->en_erxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_RX;
+}
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+efx_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scatter_enable(enp, buf_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_hash_flags_get(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t hash_alg,
+ __inout_ecount(EFX_RX_HASH_NFLAGS) unsigned int *flags,
+ __out unsigned int *nflagsp)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ boolean_t l4;
+ boolean_t additional_modes;
+ unsigned int *entryp = flags;
+ efx_rc_t rc;
+
+ if (flags == NULL || nflagsp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ l4 = encp->enc_rx_scale_l4_hash_supported;
+ additional_modes = encp->enc_rx_scale_additional_modes_supported;
+
+#define LIST_FLAGS(_entryp, _class, _l4_hashing, _additional_modes) \
+ do { \
+ if (_l4_hashing) { \
+ *(_entryp++) = EFX_RX_HASH(_class, 4TUPLE); \
+ \
+ if (_additional_modes) { \
+ *(_entryp++) = \
+ EFX_RX_HASH(_class, 2TUPLE_DST); \
+ *(_entryp++) = \
+ EFX_RX_HASH(_class, 2TUPLE_SRC); \
+ } \
+ } \
+ \
+ *(_entryp++) = EFX_RX_HASH(_class, 2TUPLE); \
+ \
+ if (_additional_modes) { \
+ *(_entryp++) = EFX_RX_HASH(_class, 1TUPLE_DST); \
+ *(_entryp++) = EFX_RX_HASH(_class, 1TUPLE_SRC); \
+ } \
+ \
+ *(_entryp++) = EFX_RX_HASH(_class, DISABLE); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+ switch (hash_alg) {
+ case EFX_RX_HASHALG_PACKED_STREAM:
+ if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0)
+ break;
+ /* FALLTHRU */
+ case EFX_RX_HASHALG_TOEPLITZ:
+ if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0)
+ break;
+
+ LIST_FLAGS(entryp, IPV4_TCP, l4, additional_modes);
+ LIST_FLAGS(entryp, IPV6_TCP, l4, additional_modes);
+
+ if (additional_modes) {
+ LIST_FLAGS(entryp, IPV4_UDP, l4, additional_modes);
+ LIST_FLAGS(entryp, IPV6_UDP, l4, additional_modes);
+ }
+
+ LIST_FLAGS(entryp, IPV4, B_FALSE, additional_modes);
+ LIST_FLAGS(entryp, IPV6, B_FALSE, additional_modes);
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+#undef LIST_FLAGS
+
+ *nflagsp = (unsigned int)(entryp - flags);
+ EFSYS_ASSERT3U(*nflagsp, <=, EFX_RX_HASH_NFLAGS);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_hash_default_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_hash_support_t *supportp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (supportp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Report the hashing support the client gets by default if it
+ * does not allocate an RSS context itself.
+ */
+ *supportp = enp->en_hash_support;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_scale_default_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_scale_context_type_t *typep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (typep == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Report the RSS support the client gets by default if it
+ * does not allocate an RSS context itself.
+ */
+ *typep = enp->en_rss_context_type;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_context_alloc(
+ __in efx_nic_t *enp,
+ __in efx_rx_scale_context_type_t type,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (erxop->erxo_scale_context_alloc == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if ((rc = erxop->erxo_scale_context_alloc(enp, type,
+ num_queues, rss_contextp)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (erxop->erxo_scale_context_free == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if ((rc = erxop->erxo_scale_context_free(enp, rss_context)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ unsigned int type_flags[EFX_RX_HASH_NFLAGS];
+ unsigned int type_nflags;
+ efx_rx_hash_type_t type_check;
+ unsigned int i;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ /*
+ * Legacy flags and modern bits cannot be
+ * used at the same time in the hash type.
+ */
+ if ((type & EFX_RX_HASH_LEGACY_MASK) &&
+ (type & ~EFX_RX_HASH_LEGACY_MASK)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Translate legacy flags to the new representation
+ * so that chip-specific handlers will consider the
+ * new flags only.
+ */
+ if (type & EFX_RX_HASH_IPV4) {
+ type |= EFX_RX_HASH(IPV4, 2TUPLE);
+ type |= EFX_RX_HASH(IPV4_TCP, 2TUPLE);
+ type |= EFX_RX_HASH(IPV4_UDP, 2TUPLE);
+ }
+
+ if (type & EFX_RX_HASH_TCPIPV4)
+ type |= EFX_RX_HASH(IPV4_TCP, 4TUPLE);
+
+ if (type & EFX_RX_HASH_IPV6) {
+ type |= EFX_RX_HASH(IPV6, 2TUPLE);
+ type |= EFX_RX_HASH(IPV6_TCP, 2TUPLE);
+ type |= EFX_RX_HASH(IPV6_UDP, 2TUPLE);
+ }
+
+ if (type & EFX_RX_HASH_TCPIPV6)
+ type |= EFX_RX_HASH(IPV6_TCP, 4TUPLE);
+
+ type &= ~EFX_RX_HASH_LEGACY_MASK;
+ type_check = type;
+
+ /*
+ * Get the list of supported hash flags and sanitise the input.
+ */
+ rc = efx_rx_scale_hash_flags_get(enp, alg, type_flags, &type_nflags);
+ if (rc != 0)
+ goto fail2;
+
+ for (i = 0; i < type_nflags; ++i) {
+ if ((type_check & type_flags[i]) == type_flags[i])
+ type_check &= ~(type_flags[i]);
+ }
+
+ if (type_check != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ if (erxop->erxo_scale_mode_set != NULL) {
+ if ((rc = erxop->erxo_scale_mode_set(enp, rss_context, alg,
+ type, insert)) != 0)
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scale_key_set(enp, rss_context, key, n)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scale_tbl_set(enp, rss_context, table, n)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ void
+efx_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(ndescs) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qpost(erp, addrp, size, ndescs, completed, added);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+ void
+efx_rx_qpush_ps_credits(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qpush_ps_credits(erp);
+}
+
+ __checkReturn uint8_t *
+efx_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ return (erxop->erxo_qps_packet_info(erp, buffer,
+ buffer_length, current_offset, lengthp,
+ next_offsetp, timestamp));
+}
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+ void
+efx_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qpush(erp, added, pushedp);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ if ((rc = erxop->erxo_qflush(erp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qenable(erp);
+}
+
+static __checkReturn efx_rc_t
+efx_rx_qcreate_internal(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in const efx_rxq_type_data_t *type_data,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rxq_t *erp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ /* Allocate an RXQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp);
+
+ if (erp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ erp->er_magic = EFX_RXQ_MAGIC;
+ erp->er_enp = enp;
+ erp->er_index = index;
+ erp->er_mask = ndescs - 1;
+ erp->er_esmp = esmp;
+
+ if ((rc = erxop->erxo_qcreate(enp, index, label, type, type_data, esmp,
+ ndescs, id, flags, eep, erp)) != 0)
+ goto fail2;
+
+ enp->en_rx_qcount++;
+ *erpp = erp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ return efx_rx_qcreate_internal(enp, index, label, type, NULL,
+ esmp, ndescs, id, flags, eep, erpp);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+ __checkReturn efx_rc_t
+efx_rx_qcreate_packed_stream(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in uint32_t ps_buf_size,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ efx_rxq_type_data_t type_data;
+
+ memset(&type_data, 0, sizeof(type_data));
+
+ type_data.ertd_packed_stream.eps_buf_size = ps_buf_size;
+
+ return efx_rx_qcreate_internal(enp, index, label,
+ EFX_RXQ_TYPE_PACKED_STREAM, &type_data, esmp, ndescs,
+ 0 /* id unused on EF10 */, EFX_RXQ_FLAG_NONE, eep, erpp);
+}
+
+#endif
+
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+
+ __checkReturn efx_rc_t
+efx_rx_qcreate_es_super_buffer(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in uint32_t n_bufs_per_desc,
+ __in uint32_t max_dma_len,
+ __in uint32_t buf_stride,
+ __in uint32_t hol_block_timeout,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ efx_rc_t rc;
+ efx_rxq_type_data_t type_data;
+
+ if (hol_block_timeout > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ memset(&type_data, 0, sizeof(type_data));
+
+ type_data.ertd_es_super_buffer.eessb_bufs_per_desc = n_bufs_per_desc;
+ type_data.ertd_es_super_buffer.eessb_max_dma_len = max_dma_len;
+ type_data.ertd_es_super_buffer.eessb_buf_stride = buf_stride;
+ type_data.ertd_es_super_buffer.eessb_hol_block_timeout =
+ hol_block_timeout;
+
+ rc = efx_rx_qcreate_internal(enp, index, label,
+ EFX_RXQ_TYPE_ES_SUPER_BUFFER, &type_data, esmp, ndescs,
+ 0 /* id unused on EF10 */, flags, eep, erpp);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif
+
+
+ void
+efx_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qdestroy(erp);
+}
+
+ __checkReturn efx_rc_t
+efx_pseudo_hdr_pkt_length_get(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ return (erxop->erxo_prefix_pktlen(enp, buffer, lengthp));
+}
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn uint32_t
+efx_pseudo_hdr_hash_get(
+ __in efx_rxq_t *erp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ EFSYS_ASSERT3U(enp->en_hash_support, ==, EFX_RX_HASH_AVAILABLE);
+ return (erxop->erxo_prefix_hash(enp, func, buffer));
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_rx_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ unsigned int index;
+
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Zero the RSS table */
+ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS;
+ index++) {
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+ }
+
+#if EFSYS_OPT_RX_SCALE
+ /* The RSS key and indirection table are writable. */
+ enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE;
+
+ /* Hardware can insert RX hash with/without RSS */
+ enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCATTER
+static __checkReturn efx_rc_t
+siena_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ unsigned int nbuf32;
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ nbuf32 = buf_size / 32;
+ if ((nbuf32 == 0) ||
+ (nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) ||
+ ((buf_size % 32) != 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_rx_qcount > 0) {
+ rc = EBUSY;
+ goto fail2;
+ }
+
+ /* Set scatter buffer size */
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Enable scatter for packets not matching a filter */
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+
+#define EFX_RX_LFSR_HASH(_enp, _insert) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
+ (_insert) ? 1 : 0); \
+ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ \
+ if ((_enp)->en_family == EFX_FAMILY_SIENA) { \
+ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
+ &oword); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \
+ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
+ &oword); \
+ } \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \
+ (_ip) ? 1 : 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \
+ (_tcp) ? 0 : 1); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
+ (_insert) ? 1 : 0); \
+ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \
+ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
+ \
+ (_rc) = 0; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+
+#if EFSYS_OPT_RX_SCALE
+
+static __checkReturn efx_rc_t
+siena_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ efx_rx_hash_type_t type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE);
+ efx_rx_hash_type_t type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
+ efx_rx_hash_type_t type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE);
+ efx_rx_hash_type_t type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
+ efx_rc_t rc;
+
+ if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (alg) {
+ case EFX_RX_HASHALG_LFSR:
+ EFX_RX_LFSR_HASH(enp, insert);
+ break;
+
+ case EFX_RX_HASHALG_TOEPLITZ:
+ EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert,
+ (type & type_ipv4) == type_ipv4,
+ (type & type_ipv4_tcp) == type_ipv4_tcp);
+
+ EFX_RX_TOEPLITZ_IPV6_HASH(enp,
+ (type & type_ipv6) == type_ipv6,
+ (type & type_ipv6_tcp) == type_ipv6_tcp,
+ rc);
+ if (rc != 0)
+ goto fail2;
+
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ EFX_RX_LFSR_HASH(enp, B_FALSE);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ unsigned int byte;
+ unsigned int offset;
+ efx_rc_t rc;
+
+ if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ byte = 0;
+
+ /* Write Toeplitz IPv4 hash key */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
+
+ byte = 0;
+
+ /* Verify Toeplitz IPv4 hash key */
+ EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
+ for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+
+ if ((enp->en_features & EFX_FEATURE_IPV6) == 0)
+ goto done;
+
+ byte = 0;
+
+ /* Write Toeplitz IPv6 hash key 3 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+
+ /* Write Toeplitz IPv6 hash key 2 */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
+
+ /* Write Toeplitz IPv6 hash key 1 */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
+
+ byte = 0;
+
+ /* Verify Toeplitz IPv6 hash key 3 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ /* Verify Toeplitz IPv6 hash key 2 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail4;
+ }
+ }
+
+ /* Verify Toeplitz IPv6 hash key 1 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail5;
+ }
+ }
+
+done:
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ int index;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS);
+ EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH));
+
+ if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) {
+ uint32_t byte;
+
+ /* Calculate the entry to place in the table */
+ byte = (n > 0) ? (uint32_t)table[index % n] : 0;
+
+ EFSYS_PROBE2(table, int, index, uint32_t, byte);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte);
+
+ /* Write the table */
+ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+ }
+
+ for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) {
+ uint32_t byte;
+
+ /* Determine if we're starting a new batch */
+ byte = (n > 0) ? (uint32_t)table[index % n] : 0;
+
+ /* Read the table */
+ EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+
+ /* Verify the entry */
+ if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif
+
+/*
+ * Falcon/Siena pseudo-header
+ * --------------------------
+ *
+ * Receive packets are prefixed by an optional 16 byte pseudo-header.
+ * The pseudo-header is a byte array of one of the forms:
+ *
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.TT.TT.TT.TT
+ * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.LL.LL
+ *
+ * where:
+ * TT.TT.TT.TT Toeplitz hash (32-bit big-endian)
+ * LL.LL LFSR hash (16-bit big-endian)
+ */
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn uint32_t
+siena_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ switch (func) {
+ case EFX_RX_HASHALG_TOEPLITZ:
+ return ((buffer[12] << 24) |
+ (buffer[13] << 16) |
+ (buffer[14] << 8) |
+ buffer[15]);
+
+ case EFX_RX_HASHALG_LFSR:
+ return ((buffer[14] << 8) | buffer[15]);
+
+ default:
+ EFSYS_ASSERT(0);
+ return (0);
+ }
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+static __checkReturn efx_rc_t
+siena_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp, buffer, lengthp))
+
+ /* Not supported by Falcon/Siena hardware */
+ EFSYS_ASSERT(0);
+ return (ENOTSUP);
+}
+
+
+static void
+siena_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(ndescs) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_qword_t qword;
+ unsigned int i;
+ unsigned int offset;
+ unsigned int id;
+
+ /* The client driver must not overfill the queue */
+ EFSYS_ASSERT3U(added - completed + ndescs, <=,
+ EFX_RXQ_LIMIT(erp->er_mask + 1));
+
+ id = added & (erp->er_mask);
+ for (i = 0; i < ndescs; i++) {
+ EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
+ unsigned int, id, efsys_dma_addr_t, addrp[i],
+ size_t, size);
+
+ EFX_POPULATE_QWORD_3(qword,
+ FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size),
+ FSF_AZ_RX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addrp[i] & 0xffffffff),
+ FSF_AZ_RX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addrp[i] >> 32));
+
+ offset = id * sizeof (efx_qword_t);
+ EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
+
+ id = (id + 1) & (erp->er_mask);
+ }
+}
+
+static void
+siena_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ unsigned int pushed = *pushedp;
+ uint32_t wptr;
+ efx_oword_t oword;
+ efx_dword_t dword;
+
+ /* All descriptors are pushed */
+ *pushedp = added;
+
+ /* Push the populated descriptors out */
+ wptr = added & erp->er_mask;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr);
+
+ /* Only write the third DWORD */
+ EFX_POPULATE_DWORD_1(dword,
+ EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
+ wptr, pushed & erp->er_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0,
+ erp->er_index, &dword, B_FALSE);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+static void
+siena_rx_qpush_ps_credits(
+ __in efx_rxq_t *erp)
+{
+ /* Not supported by Siena hardware */
+ EFSYS_ASSERT(0);
+}
+
+static uint8_t *
+siena_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp)
+{
+ /* Not supported by Siena hardware */
+ EFSYS_ASSERT(0);
+
+ return (NULL);
+}
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+static __checkReturn efx_rc_t
+siena_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+ uint32_t label;
+
+ label = erp->er_index;
+
+ /* Flush the queue */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ, label);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword);
+
+ return (0);
+}
+
+static void
+siena_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in const efx_rxq_type_data_t *type_data,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t size;
+ boolean_t jumbo = B_FALSE;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(esmp))
+ _NOTE(ARGUNUSED(type_data))
+
+ EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS ==
+ (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
+ EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
+
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
+
+ if (!ISP2(ndescs) ||
+ (ndescs < EFX_RXQ_MINNDESCS) || (ndescs > EFX_RXQ_MAXNDESCS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_rxq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ for (size = 0; (1 << size) <= (EFX_RXQ_MAXNDESCS / EFX_RXQ_MINNDESCS);
+ size++)
+ if ((1 << size) == (int)(ndescs / EFX_RXQ_MINNDESCS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ switch (type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ if (flags & EFX_RXQ_FLAG_SCATTER) {
+#if EFSYS_OPT_RX_SCATTER
+ jumbo = B_TRUE;
+#else
+ rc = EINVAL;
+ goto fail5;
+#endif /* EFSYS_OPT_RX_SCATTER */
+ }
+
+ /* Set up the new descriptor queue */
+ EFX_POPULATE_OWORD_7(oword,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, id,
+ FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL, label,
+ FRF_AZ_RX_DESCQ_SIZE, size,
+ FRF_AZ_RX_DESCQ_TYPE, 0,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ return (0);
+
+#if !EFSYS_OPT_RX_SCATTER
+fail5:
+ EFSYS_PROBE(fail5);
+#endif
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+siena_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT(enp->en_rx_qcount != 0);
+ --enp->en_rx_qcount;
+
+ /* Purge descriptor queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ /* Free the RXQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+}
+
+static void
+siena_rx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_sram.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_sram.c
new file mode 100644
index 00000000..7851ff13
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_sram.c
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_sram_buf_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in efsys_mem_t *esmp,
+ __in size_t n)
+{
+ efx_qword_t qword;
+ uint32_t start = id;
+ uint32_t stop = start + n;
+ efsys_dma_addr_t addr;
+ efx_oword_t oword;
+ unsigned int count;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+ if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2) {
+ /*
+ * FIXME: the efx_sram_buf_tbl_*() functionality needs to be
+ * pulled inside the Falcon/Siena queue create/destroy code,
+ * and then the original functions can be removed (see bug30834
+ * comment #1). But, for now, we just ensure that they are
+ * no-ops for EF10, to allow bringing up existing drivers
+ * without modification.
+ */
+
+ return (0);
+ }
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+ if (stop >= EFX_BUF_TBL_SIZE) {
+ rc = EFBIG;
+ goto fail1;
+ }
+
+ /* Add the entries into the buffer table */
+ addr = EFSYS_MEM_ADDR(esmp);
+ for (id = start; id != stop; id++) {
+ EFX_POPULATE_QWORD_5(qword,
+ FRF_AZ_IP_DAT_BUF_SIZE, 0, FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF_DW0,
+ (uint32_t)((addr >> 12) & 0xffffffff),
+ FRF_AZ_BUF_ADR_FBUF_DW1,
+ (uint32_t)((addr >> 12) >> 32),
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_FULL_TBL,
+ id, &qword);
+
+ addr += EFX_BUF_SIZE;
+ }
+
+ EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
+
+ /* Flush the write buffer */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_BUF_UPD_CMD, 1,
+ FRF_AZ_BUF_CLR_CMD, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+
+ /* Poll for the last entry being written to the buffer table */
+ EFSYS_ASSERT3U(id, ==, stop);
+ addr -= EFX_BUF_SIZE;
+
+ count = 0;
+ do {
+ EFSYS_PROBE1(wait, unsigned int, count);
+
+ /* Spin for 1 ms */
+ EFSYS_SPIN(1000);
+
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
+ id - 1, &qword);
+
+ if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) ==
+ (uint32_t)((addr >> 12) & 0xffffffff) &&
+ EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) ==
+ (uint32_t)((addr >> 12) >> 32))
+ goto verify;
+
+ } while (++count < 100);
+
+ rc = ETIMEDOUT;
+ goto fail2;
+
+verify:
+ /* Verify the rest of the entries in the buffer table */
+ while (--id != start) {
+ addr -= EFX_BUF_SIZE;
+
+ /* Read the buffer table entry */
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
+ id - 1, &qword);
+
+ if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) !=
+ (uint32_t)((addr >> 12) & 0xffffffff) ||
+ EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) !=
+ (uint32_t)((addr >> 12) >> 32)) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ id = stop;
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, id - 1,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_sram_buf_tbl_clear(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ uint32_t start = id;
+ uint32_t stop = start + n;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+ if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2) {
+ /*
+ * FIXME: the efx_sram_buf_tbl_*() functionality needs to be
+ * pulled inside the Falcon/Siena queue create/destroy code,
+ * and then the original functions can be removed (see bug30834
+ * comment #1). But, for now, we just ensure that they are
+ * no-ops for EF10, to allow bringing up existing drivers
+ * without modification.
+ */
+
+ return;
+ }
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+ EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE);
+
+ EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
+
+ EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, stop - 1,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+}
+
+
+#if EFSYS_OPT_DIAG
+
+static void
+efx_sram_byte_increment_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+ unsigned int index;
+
+ _NOTE(ARGUNUSED(negate))
+
+ for (index = 0; index < sizeof (efx_qword_t); index++)
+ eqp->eq_u8[index] = offset + index;
+}
+
+static void
+efx_sram_all_the_same_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ if (negate)
+ EFX_SET_QWORD(*eqp);
+ else
+ EFX_ZERO_QWORD(*eqp);
+}
+
+static void
+efx_sram_bit_alternate_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ EFX_POPULATE_QWORD_2(*eqp,
+ EFX_DWORD_0, (negate) ? 0x55555555 : 0xaaaaaaaa,
+ EFX_DWORD_1, (negate) ? 0x55555555 : 0xaaaaaaaa);
+}
+
+static void
+efx_sram_byte_alternate_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ EFX_POPULATE_QWORD_2(*eqp,
+ EFX_DWORD_0, (negate) ? 0x00ff00ff : 0xff00ff00,
+ EFX_DWORD_1, (negate) ? 0x00ff00ff : 0xff00ff00);
+}
+
+static void
+efx_sram_byte_changing_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+ unsigned int index;
+
+ for (index = 0; index < sizeof (efx_qword_t); index++) {
+ uint8_t byte;
+
+ if (offset / 256 == 0)
+ byte = (uint8_t)((offset % 257) % 256);
+ else
+ byte = (uint8_t)(~((offset - 8) % 257) % 256);
+
+ eqp->eq_u8[index] = (negate) ? ~byte : byte;
+ }
+}
+
+static void
+efx_sram_bit_sweep_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+
+ if (negate) {
+ EFX_SET_QWORD(*eqp);
+ EFX_CLEAR_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
+ } else {
+ EFX_ZERO_QWORD(*eqp);
+ EFX_SET_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
+ }
+}
+
+efx_sram_pattern_fn_t __efx_sram_pattern_fns[] = {
+ efx_sram_byte_increment_set,
+ efx_sram_all_the_same_set,
+ efx_sram_bit_alternate_set,
+ efx_sram_byte_alternate_set,
+ efx_sram_byte_changing_set,
+ efx_sram_bit_sweep_set
+};
+
+ __checkReturn efx_rc_t
+efx_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_pattern_type_t type)
+{
+ efx_sram_pattern_fn_t func;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+
+ /* SRAM testing is only available on Siena. */
+ if (enp->en_family != EFX_FAMILY_SIENA)
+ return (0);
+
+ /* Select pattern generator */
+ EFSYS_ASSERT3U(type, <, EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[type];
+
+ return (siena_sram_test(enp, func));
+}
+
+#endif /* EFSYS_OPT_DIAG */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_tunnel.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_tunnel.c
new file mode 100644
index 00000000..399fd540
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_tunnel.c
@@ -0,0 +1,469 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_TUNNEL
+
+#if EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON
+static const efx_tunnel_ops_t __efx_tunnel_dummy_ops = {
+ NULL, /* eto_udp_encap_supported */
+ NULL, /* eto_reconfigure */
+};
+#endif /* EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+static __checkReturn boolean_t
+ef10_udp_encap_supported(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+ef10_tunnel_reconfigure(
+ __in efx_nic_t *enp);
+
+static const efx_tunnel_ops_t __efx_tunnel_ef10_ops = {
+ ef10_udp_encap_supported, /* eto_udp_encap_supported */
+ ef10_tunnel_reconfigure, /* eto_reconfigure */
+};
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+static __checkReturn efx_rc_t
+efx_mcdi_set_tunnel_encap_udp_ports(
+ __in efx_nic_t *enp,
+ __in efx_tunnel_cfg_t *etcp,
+ __in boolean_t unloading,
+ __out boolean_t *resetting)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX,
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN)];
+ efx_word_t flags;
+ efx_rc_t rc;
+ unsigned int i;
+ unsigned int entries_num;
+
+ if (etcp == NULL)
+ entries_num = 0;
+ else
+ entries_num = etcp->etc_udp_entries_num;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS;
+ req.emr_in_buf = payload;
+ req.emr_in_length =
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(entries_num);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN;
+
+ EFX_POPULATE_WORD_1(flags,
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
+ (unloading == B_TRUE) ? 1 : 0);
+ MCDI_IN_SET_WORD(req, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS,
+ EFX_WORD_FIELD(flags, EFX_WORD_0));
+
+ MCDI_IN_SET_WORD(req, SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES,
+ entries_num);
+
+ for (i = 0; i < entries_num; ++i) {
+ uint16_t mcdi_udp_protocol;
+
+ switch (etcp->etc_udp_entries[i].etue_protocol) {
+ case EFX_TUNNEL_PROTOCOL_VXLAN:
+ mcdi_udp_protocol = TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
+ break;
+ case EFX_TUNNEL_PROTOCOL_GENEVE:
+ mcdi_udp_protocol = TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * UDP port is MCDI native little-endian in the request
+ * and EFX_POPULATE_DWORD cares about conversion from
+ * host/CPU byte order to little-endian.
+ */
+ EFX_STATIC_ASSERT(sizeof (efx_dword_t) ==
+ TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN);
+ EFX_POPULATE_DWORD_2(
+ MCDI_IN2(req, efx_dword_t,
+ SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES)[i],
+ TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
+ etcp->etc_udp_entries[i].etue_port,
+ TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
+ mcdi_udp_protocol);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used !=
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *resetting = MCDI_OUT_WORD_FIELD(req,
+ SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS,
+ SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tunnel_init(
+ __in efx_nic_t *enp)
+{
+ efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
+ const efx_tunnel_ops_t *etop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TUNNEL));
+
+ EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ etop = &__efx_tunnel_dummy_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ etop = &__efx_tunnel_dummy_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ etop = &__efx_tunnel_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ etop = &__efx_tunnel_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ memset(etcp->etc_udp_entries, 0, sizeof (etcp->etc_udp_entries));
+ etcp->etc_udp_entries_num = 0;
+
+ enp->en_etop = etop;
+ enp->en_mod_flags |= EFX_MOD_TUNNEL;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_etop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TUNNEL;
+
+ return (rc);
+}
+
+ void
+efx_tunnel_fini(
+ __in efx_nic_t *enp)
+{
+ boolean_t resetting;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL);
+
+ if ((enp->en_etop->eto_udp_encap_supported != NULL) &&
+ enp->en_etop->eto_udp_encap_supported(enp)) {
+ /*
+ * The UNLOADING flag allows the MC to suppress the datapath
+ * reset if it was set on the last call to
+ * MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS by all functions
+ */
+ (void) efx_mcdi_set_tunnel_encap_udp_ports(enp, NULL, B_TRUE,
+ &resetting);
+ }
+
+ enp->en_etop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TUNNEL;
+}
+
+static __checkReturn efx_rc_t
+efx_tunnel_config_find_udp_tunnel_entry(
+ __in efx_tunnel_cfg_t *etcp,
+ __in uint16_t port,
+ __out unsigned int *entryp)
+{
+ unsigned int i;
+
+ for (i = 0; i < etcp->etc_udp_entries_num; ++i) {
+ efx_tunnel_udp_entry_t *p = &etcp->etc_udp_entries[i];
+
+ if (p->etue_port == port) {
+ *entryp = i;
+ return (0);
+ }
+ }
+
+ return (ENOENT);
+}
+
+ __checkReturn efx_rc_t
+efx_tunnel_config_udp_add(
+ __in efx_nic_t *enp,
+ __in uint16_t port /* host/cpu-endian */,
+ __in efx_tunnel_protocol_t protocol)
+{
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
+ efsys_lock_state_t state;
+ efx_rc_t rc;
+ unsigned int entry;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL);
+
+ if (protocol >= EFX_TUNNEL_NPROTOS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((encp->enc_tunnel_encapsulations_supported &
+ (1u << protocol)) == 0) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = efx_tunnel_config_find_udp_tunnel_entry(etcp, port, &entry);
+ if (rc == 0) {
+ rc = EEXIST;
+ goto fail3;
+ }
+
+ if (etcp->etc_udp_entries_num ==
+ encp->enc_tunnel_config_udp_entries_max) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ etcp->etc_udp_entries[etcp->etc_udp_entries_num].etue_port = port;
+ etcp->etc_udp_entries[etcp->etc_udp_entries_num].etue_protocol =
+ protocol;
+
+ etcp->etc_udp_entries_num++;
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+fail3:
+ EFSYS_PROBE(fail3);
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tunnel_config_udp_remove(
+ __in efx_nic_t *enp,
+ __in uint16_t port /* host/cpu-endian */,
+ __in efx_tunnel_protocol_t protocol)
+{
+ efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
+ efsys_lock_state_t state;
+ unsigned int entry;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = efx_tunnel_config_find_udp_tunnel_entry(etcp, port, &entry);
+ if (rc != 0)
+ goto fail1;
+
+ if (etcp->etc_udp_entries[entry].etue_protocol != protocol) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ EFSYS_ASSERT3U(etcp->etc_udp_entries_num, >, 0);
+ etcp->etc_udp_entries_num--;
+
+ if (entry < etcp->etc_udp_entries_num) {
+ memmove(&etcp->etc_udp_entries[entry],
+ &etcp->etc_udp_entries[entry + 1],
+ (etcp->etc_udp_entries_num - entry) *
+ sizeof (etcp->etc_udp_entries[0]));
+ }
+
+ memset(&etcp->etc_udp_entries[etcp->etc_udp_entries_num], 0,
+ sizeof (etcp->etc_udp_entries[0]));
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+ void
+efx_tunnel_config_clear(
+ __in efx_nic_t *enp)
+{
+ efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ etcp->etc_udp_entries_num = 0;
+ memset(etcp->etc_udp_entries, 0, sizeof (etcp->etc_udp_entries));
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+ __checkReturn efx_rc_t
+efx_tunnel_reconfigure(
+ __in efx_nic_t *enp)
+{
+ const efx_tunnel_ops_t *etop = enp->en_etop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL);
+
+ if (etop->eto_reconfigure == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = enp->en_etop->eto_reconfigure(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+static __checkReturn boolean_t
+ef10_udp_encap_supported(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ uint32_t udp_tunnels_mask = 0;
+
+ udp_tunnels_mask |= (1u << EFX_TUNNEL_PROTOCOL_VXLAN);
+ udp_tunnels_mask |= (1u << EFX_TUNNEL_PROTOCOL_GENEVE);
+
+ return ((encp->enc_tunnel_encapsulations_supported &
+ udp_tunnels_mask) == 0 ? B_FALSE : B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+ef10_tunnel_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
+ efx_rc_t rc;
+ boolean_t resetting;
+ efsys_lock_state_t state;
+ efx_tunnel_cfg_t etc;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+ memcpy(&etc, etcp, sizeof (etc));
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (ef10_udp_encap_supported(enp) == B_FALSE) {
+ /*
+ * It is OK to apply empty UDP tunnel ports when UDP
+ * tunnel encapsulations are not supported - just nothing
+ * should be done.
+ */
+ if (etc.etc_udp_entries_num == 0)
+ return (0);
+ rc = ENOTSUP;
+ goto fail1;
+ } else {
+ /*
+ * All PCI functions can see a reset upon the
+ * MCDI request completion
+ */
+ rc = efx_mcdi_set_tunnel_encap_udp_ports(enp, &etc, B_FALSE,
+ &resetting);
+ if (rc != 0)
+ goto fail2;
+
+ /*
+ * Although the caller should be able to handle MC reboot,
+ * it might come in handy to report the impending reboot
+ * by returning EAGAIN
+ */
+ return ((resetting) ? EAGAIN : 0);
+ }
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+#endif /* EFSYS_OPT_TUNNEL */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_tx.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_tx.c
new file mode 100644
index 00000000..da37580a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_tx.c
@@ -0,0 +1,1136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_QSTATS
+#define EFX_TX_QSTAT_INCR(_etp, _stat) \
+ do { \
+ (_etp)->et_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_TX_QSTAT_INCR(_etp, _stat)
+#endif
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_tx_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_tx_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp);
+
+static void
+siena_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+static __checkReturn efx_rc_t
+siena_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_buffer_t *eb,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+static void
+siena_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+static __checkReturn efx_rc_t
+siena_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+static __checkReturn efx_rc_t
+siena_tx_qflush(
+ __in efx_txq_t *etp);
+
+static void
+siena_tx_qenable(
+ __in efx_txq_t *etp);
+
+ __checkReturn efx_rc_t
+siena_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_desc_t *ed,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+ void
+siena_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+#endif
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_tx_ops_t __efx_tx_siena_ops = {
+ siena_tx_init, /* etxo_init */
+ siena_tx_fini, /* etxo_fini */
+ siena_tx_qcreate, /* etxo_qcreate */
+ siena_tx_qdestroy, /* etxo_qdestroy */
+ siena_tx_qpost, /* etxo_qpost */
+ siena_tx_qpush, /* etxo_qpush */
+ siena_tx_qpace, /* etxo_qpace */
+ siena_tx_qflush, /* etxo_qflush */
+ siena_tx_qenable, /* etxo_qenable */
+ NULL, /* etxo_qpio_enable */
+ NULL, /* etxo_qpio_disable */
+ NULL, /* etxo_qpio_write */
+ NULL, /* etxo_qpio_post */
+ siena_tx_qdesc_post, /* etxo_qdesc_post */
+ siena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ NULL, /* etxo_qdesc_tso2_create */
+ NULL, /* etxo_qdesc_vlantci_create */
+ NULL, /* etxo_qdesc_checksum_create */
+#if EFSYS_OPT_QSTATS
+ siena_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+static const efx_tx_ops_t __efx_tx_hunt_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+ ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+static const efx_tx_ops_t __efx_tx_medford_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+ ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+static const efx_tx_ops_t __efx_tx_medford2_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+ ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+
+ __checkReturn efx_rc_t
+efx_tx_init(
+ __in efx_nic_t *enp)
+{
+ const efx_tx_ops_t *etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_EV)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_mod_flags & EFX_MOD_TX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ etxop = &__efx_tx_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ etxop = &__efx_tx_hunt_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ etxop = &__efx_tx_medford_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ etxop = &__efx_tx_medford2_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+ EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
+
+ if ((rc = etxop->etxo_init(enp)) != 0)
+ goto fail4;
+
+ enp->en_etxop = etxop;
+ enp->en_mod_flags |= EFX_MOD_TX;
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_etxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TX;
+ return (rc);
+}
+
+ void
+efx_tx_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
+ EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
+
+ etxop->etxo_fini(enp);
+
+ enp->en_etxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TX;
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_txq_t **etpp,
+ __out unsigned int *addedp)
+{
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_txq_t *etp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
+
+ EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <,
+ enp->en_nic_cfg.enc_txq_limit);
+
+ /* Allocate an TXQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_txq_t), etp);
+
+ if (etp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ etp->et_magic = EFX_TXQ_MAGIC;
+ etp->et_enp = enp;
+ etp->et_index = index;
+ etp->et_mask = ndescs - 1;
+ etp->et_esmp = esmp;
+
+ /* Initial descriptor index may be modified by etxo_qcreate */
+ *addedp = 0;
+
+ if ((rc = etxop->etxo_qcreate(enp, index, label, esmp,
+ ndescs, id, flags, eep, etp, addedp)) != 0)
+ goto fail2;
+
+ enp->en_tx_qcount++;
+ *etpp = etp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_tx_qcount != 0);
+ --enp->en_tx_qcount;
+
+ etxop->etxo_qdestroy(etp);
+
+ /* Free the TXQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_buffer_t *eb,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qpost(etp, eb, ndescs, completed, addedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qpush(etp, added, pushed);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qpace(etp, ns)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qflush(etp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qenable(etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_enable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (~enp->en_features & EFX_FEATURE_PIO_BUFFERS) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if (etxop->etxo_qpio_enable == NULL) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ if ((rc = etxop->etxo_qpio_enable(etp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qpio_disable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_disable != NULL)
+ etxop->etxo_qpio_disable(etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_write != NULL) {
+ if ((rc = etxop->etxo_qpio_write(etp, buffer, buf_length,
+ pio_buf_offset)) != 0)
+ goto fail1;
+ return (0);
+ }
+
+ return (ENOTSUP);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_post != NULL) {
+ if ((rc = etxop->etxo_qpio_post(etp, pkt_length, completed,
+ addedp)) != 0)
+ goto fail1;
+ return (0);
+ }
+
+ return (ENOTSUP);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_desc_t *ed,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qdesc_post(etp, ed,
+ ndescs, completed, addedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_dma_create != NULL);
+
+ etxop->etxo_qdesc_dma_create(etp, addr, size, eop, edp);
+}
+
+ void
+efx_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_tso_create != NULL);
+
+ etxop->etxo_qdesc_tso_create(etp, ipv4_id, tcp_seq, tcp_flags, edp);
+}
+
+ void
+efx_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint16_t outer_ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_tso2_create != NULL);
+
+ etxop->etxo_qdesc_tso2_create(etp, ipv4_id, outer_ipv4_id,
+ tcp_seq, mss, edp, count);
+}
+
+ void
+efx_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_vlantci_create != NULL);
+
+ etxop->etxo_qdesc_vlantci_create(etp, tci, edp);
+}
+
+ void
+efx_tx_qdesc_checksum_create(
+ __in efx_txq_t *etp,
+ __in uint16_t flags,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_checksum_create != NULL);
+
+ etxop->etxo_qdesc_checksum_create(etp, flags, edp);
+}
+
+
+#if EFSYS_OPT_QSTATS
+ void
+efx_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qstats_update(etp, stat);
+}
+#endif
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_tx_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * Disable the timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level (although always allow a
+ * minimal trickle).
+ */
+ EFX_BAR_READO(enp, FR_AZ_TX_RESERVED_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+
+ /*
+ * Filter all packets less than 14 bytes to avoid parsing
+ * errors.
+ */
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_RESERVED_REG, &oword);
+
+ /*
+ * Do not set TX_NO_EOP_DISC_EN, since it limits packets to 16
+ * descriptors (which is bad).
+ */
+ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
+
+ return (0);
+}
+
+#define EFX_TX_DESC(_etp, _addr, _size, _eop, _added) \
+ do { \
+ unsigned int id; \
+ size_t offset; \
+ efx_qword_t qword; \
+ \
+ id = (_added)++ & (_etp)->et_mask; \
+ offset = id * sizeof (efx_qword_t); \
+ \
+ EFSYS_PROBE5(tx_post, unsigned int, (_etp)->et_index, \
+ unsigned int, id, efsys_dma_addr_t, (_addr), \
+ size_t, (_size), boolean_t, (_eop)); \
+ \
+ EFX_POPULATE_QWORD_4(qword, \
+ FSF_AZ_TX_KER_CONT, (_eop) ? 0 : 1, \
+ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)(_size), \
+ FSF_AZ_TX_KER_BUF_ADDR_DW0, \
+ (uint32_t)((_addr) & 0xffffffff), \
+ FSF_AZ_TX_KER_BUF_ADDR_DW1, \
+ (uint32_t)((_addr) >> 32)); \
+ EFSYS_MEM_WRITEQ((_etp)->et_esmp, offset, &qword); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+static __checkReturn efx_rc_t
+siena_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_buffer_t *eb,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ int rc = ENOSPC;
+
+ if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1))
+ goto fail1;
+
+ for (i = 0; i < ndescs; i++) {
+ efx_buffer_t *ebp = &eb[i];
+ efsys_dma_addr_t start = ebp->eb_addr;
+ size_t size = ebp->eb_size;
+ efsys_dma_addr_t end = start + size;
+
+ /*
+ * Fragments must not span 4k boundaries.
+ * Here it is a stricter requirement than the maximum length.
+ */
+ EFSYS_ASSERT(P2ROUNDUP(start + 1,
+ etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= end);
+
+ EFX_TX_DESC(etp, start, size, ebp->eb_eop, added);
+ }
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+siena_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ uint32_t wptr;
+ efx_dword_t dword;
+ efx_oword_t oword;
+
+ /* Push the populated descriptors out */
+ wptr = added & etp->et_mask;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DESC_WPTR, wptr);
+
+ /* Only write the third DWORD */
+ EFX_POPULATE_DWORD_1(dword,
+ EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
+ wptr, pushed & etp->et_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED3(enp, FR_BZ_TX_DESC_UPD_REGP0,
+ etp->et_index, &dword, B_FALSE);
+}
+
+#define EFX_MAX_PACE_VALUE 20
+
+static __checkReturn efx_rc_t
+siena_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ unsigned int pace_val;
+ unsigned int timer_period;
+ efx_rc_t rc;
+
+ if (ns == 0) {
+ pace_val = 0;
+ } else {
+ /*
+ * The pace_val to write into the table is s.t
+ * ns <= timer_period * (2 ^ pace_val)
+ */
+ timer_period = 104 / encp->enc_clk_mult;
+ for (pace_val = 1; pace_val <= EFX_MAX_PACE_VALUE; pace_val++) {
+ if ((timer_period << pace_val) >= ns)
+ break;
+ }
+ }
+ if (pace_val > EFX_MAX_PACE_VALUE) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Update the pacing table */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_PACE, pace_val);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_PACE_TBL, etp->et_index,
+ &oword, B_TRUE);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+ uint32_t label;
+
+ efx_tx_qpace(etp, 0);
+
+ label = etp->et_index;
+
+ /* Flush the queue */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, label);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_FLUSH_DESCQ_REG, &oword);
+
+ return (0);
+}
+
+static void
+siena_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+
+ EFSYS_PROBE5(tx_descq_ptr, unsigned int, etp->et_index,
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_3),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_2),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_1),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_0));
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DC_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_EN, 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t size;
+ uint16_t inner_csum;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(esmp))
+
+ EFX_STATIC_ASSERT(EFX_EV_TX_NLABELS ==
+ (1 << FRF_AZ_TX_DESCQ_LABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_TX_NLABELS);
+
+ EFSYS_ASSERT(ISP2(encp->enc_txq_max_ndescs));
+ EFX_STATIC_ASSERT(ISP2(EFX_TXQ_MINNDESCS));
+
+ if (!ISP2(ndescs) ||
+ (ndescs < EFX_TXQ_MINNDESCS) || (ndescs > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_txq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ for (size = 0;
+ (1 << size) <= (int)(encp->enc_txq_max_ndescs / EFX_TXQ_MINNDESCS);
+ size++)
+ if ((1 << size) == (int)(ndescs / EFX_TXQ_MINNDESCS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP;
+ if ((flags & inner_csum) != 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* Set up the new descriptor queue */
+ *addedp = 0;
+
+ EFX_POPULATE_OWORD_6(oword,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, id,
+ FRF_AZ_TX_DESCQ_EVQ_ID, eep->ee_index,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, label,
+ FRF_AZ_TX_DESCQ_SIZE, size,
+ FRF_AZ_TX_DESCQ_TYPE, 0);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_IP_CHKSM_DIS,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_TCP_CHKSM_DIS,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_desc_t *ed,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < ndescs; i++) {
+ efx_desc_t *edp = &ed[i];
+ unsigned int id;
+ size_t offset;
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_desc_t);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
+ }
+
+ EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
+ unsigned int, added, unsigned int, ndescs);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+siena_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ /*
+ * Fragments must not span 4k boundaries.
+ * Here it is a stricter requirement than the maximum length.
+ */
+ EFSYS_ASSERT(P2ROUNDUP(addr + 1,
+ etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= addr + size);
+
+ EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
+ efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_4(edp->ed_eq,
+ FSF_AZ_TX_KER_CONT, eop ? 0 : 1,
+ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)size,
+ FSF_AZ_TX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addr & 0xffffffff),
+ FSF_AZ_TX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addr >> 32));
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_QSTATS
+#if EFSYS_OPT_NAMES
+/* START MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock 2866874ecd7a363b */
+static const char * const __efx_tx_qstat_name[] = {
+ "post",
+ "post_pio",
+};
+/* END MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock */
+
+ const char *
+efx_tx_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(id, <, TX_NQSTATS);
+
+ return (__efx_tx_qstat_name[id]);
+}
+#endif /* EFSYS_OPT_NAMES */
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < TX_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, etp->et_stat[id]);
+ etp->et_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+static void
+siena_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+
+ /* Purge descriptor queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+}
+
+static void
+siena_tx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_types.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_types.h
new file mode 100644
index 00000000..65168ab7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_types.h
@@ -0,0 +1,1634 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Ackowledgement to Fen Systems Ltd.
+ */
+
+#ifndef _SYS_EFX_TYPES_H
+#define _SYS_EFX_TYPES_H
+
+#include "efsys.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Bitfield access
+ *
+ * Solarflare NICs make extensive use of bitfields up to 128 bits
+ * wide. Since there is no native 128-bit datatype on most systems,
+ * and since 64-bit datatypes are inefficient on 32-bit systems and
+ * vice versa, we wrap accesses in a way that uses the most efficient
+ * datatype.
+ *
+ * The NICs are PCI devices and therefore little-endian. Since most
+ * of the quantities that we deal with are DMAed to/from host memory,
+ * we define our datatypes (efx_oword_t, efx_qword_t and efx_dword_t)
+ * to be little-endian.
+ *
+ * In the less common case of using PIO for individual register
+ * writes, we construct the little-endian datatype in host memory and
+ * then use non-swapping register access primitives, rather than
+ * constructing a native-endian datatype and relying on implicit
+ * byte-swapping. (We use a similar strategy for register reads.)
+ */
+
+/*
+ * NOTE: Field definitions here and elsewhere are done in terms of a lowest
+ * bit number (LBN) and a width.
+ */
+
+#define EFX_DUMMY_FIELD_LBN 0
+#define EFX_DUMMY_FIELD_WIDTH 0
+
+#define EFX_BYTE_0_LBN 0
+#define EFX_BYTE_0_WIDTH 8
+
+#define EFX_BYTE_1_LBN 8
+#define EFX_BYTE_1_WIDTH 8
+
+#define EFX_BYTE_2_LBN 16
+#define EFX_BYTE_2_WIDTH 8
+
+#define EFX_BYTE_3_LBN 24
+#define EFX_BYTE_3_WIDTH 8
+
+#define EFX_BYTE_4_LBN 32
+#define EFX_BYTE_4_WIDTH 8
+
+#define EFX_BYTE_5_LBN 40
+#define EFX_BYTE_5_WIDTH 8
+
+#define EFX_BYTE_6_LBN 48
+#define EFX_BYTE_6_WIDTH 8
+
+#define EFX_BYTE_7_LBN 56
+#define EFX_BYTE_7_WIDTH 8
+
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
+
+#define EFX_WORD_2_LBN 32
+#define EFX_WORD_2_WIDTH 16
+
+#define EFX_WORD_3_LBN 48
+#define EFX_WORD_3_WIDTH 16
+
+#define EFX_DWORD_0_LBN 0
+#define EFX_DWORD_0_WIDTH 32
+
+#define EFX_DWORD_1_LBN 32
+#define EFX_DWORD_1_WIDTH 32
+
+#define EFX_DWORD_2_LBN 64
+#define EFX_DWORD_2_WIDTH 32
+
+#define EFX_DWORD_3_LBN 96
+#define EFX_DWORD_3_WIDTH 32
+
+/*
+ * There are intentionally no EFX_QWORD_0 or EFX_QWORD_1 field definitions
+ * here as the implementaion of EFX_QWORD_FIELD and EFX_OWORD_FIELD do not
+ * support field widths larger than 32 bits.
+ */
+
+/* Specified attribute (i.e. LBN ow WIDTH) of the specified field */
+#define EFX_VAL(_field, _attribute) \
+ _field ## _ ## _attribute
+
+/* Lowest bit number of the specified field */
+#define EFX_LOW_BIT(_field) \
+ EFX_VAL(_field, LBN)
+
+/* Width of the specified field */
+#define EFX_WIDTH(_field) \
+ EFX_VAL(_field, WIDTH)
+
+/* Highest bit number of the specified field */
+#define EFX_HIGH_BIT(_field) \
+ (EFX_LOW_BIT(_field) + EFX_WIDTH(_field) - 1)
+
+/*
+ * 64-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x000000000000001f.
+ */
+#define EFX_MASK64(_field) \
+ ((EFX_WIDTH(_field) == 64) ? ~((uint64_t)0) : \
+ (((((uint64_t)1) << EFX_WIDTH(_field))) - 1))
+/*
+ * 32-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x0000001f.
+ */
+#define EFX_MASK32(_field) \
+ ((EFX_WIDTH(_field) == 32) ? ~((uint32_t)0) : \
+ (((((uint32_t)1) << EFX_WIDTH(_field))) - 1))
+
+/*
+ * 16-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x001f.
+ */
+#define EFX_MASK16(_field) \
+ ((EFX_WIDTH(_field) == 16) ? 0xffffu : \
+ (uint16_t)((1 << EFX_WIDTH(_field)) - 1))
+
+/*
+ * 8-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ */
+#define EFX_MASK8(_field) \
+ ((uint8_t)((1 << EFX_WIDTH(_field)) - 1))
+
+#pragma pack(1)
+
+/*
+ * A byte (i.e. 8-bit) datatype
+ */
+typedef union efx_byte_u {
+ uint8_t eb_u8[1];
+} efx_byte_t;
+
+/*
+ * A word (i.e. 16-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_word_u {
+ efx_byte_t ew_byte[2];
+ uint16_t ew_u16[1];
+ uint8_t ew_u8[2];
+} efx_word_t;
+
+/*
+ * A doubleword (i.e. 32-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_dword_u {
+ efx_byte_t ed_byte[4];
+ efx_word_t ed_word[2];
+ uint32_t ed_u32[1];
+ uint16_t ed_u16[2];
+ uint8_t ed_u8[4];
+} efx_dword_t;
+
+/*
+ * A quadword (i.e. 64-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_qword_u {
+ efx_byte_t eq_byte[8];
+ efx_word_t eq_word[4];
+ efx_dword_t eq_dword[2];
+#if EFSYS_HAS_UINT64
+ uint64_t eq_u64[1];
+#endif
+ uint32_t eq_u32[2];
+ uint16_t eq_u16[4];
+ uint8_t eq_u8[8];
+} efx_qword_t;
+
+/*
+ * An octword (i.e. 128-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_oword_u {
+ efx_byte_t eo_byte[16];
+ efx_word_t eo_word[8];
+ efx_dword_t eo_dword[4];
+ efx_qword_t eo_qword[2];
+#if EFSYS_HAS_SSE2_M128
+ __m128i eo_u128[1];
+#endif
+#if EFSYS_HAS_UINT64
+ uint64_t eo_u64[2];
+#endif
+ uint32_t eo_u32[4];
+ uint16_t eo_u16[8];
+ uint8_t eo_u8[16];
+} efx_oword_t;
+
+#pragma pack()
+
+#define __SWAP16(_x) \
+ ((((_x) & 0xff) << 8) | \
+ (((_x) >> 8) & 0xff))
+
+#define __SWAP32(_x) \
+ ((__SWAP16((_x) & 0xffff) << 16) | \
+ __SWAP16(((_x) >> 16) & 0xffff))
+
+#define __SWAP64(_x) \
+ ((__SWAP32((_x) & 0xffffffff) << 32) | \
+ __SWAP32(((_x) >> 32) & 0xffffffff))
+
+#define __NOSWAP16(_x) (_x)
+#define __NOSWAP32(_x) (_x)
+#define __NOSWAP64(_x) (_x)
+
+#if EFSYS_IS_BIG_ENDIAN
+
+#define __CPU_TO_LE_16(_x) ((uint16_t)__SWAP16(_x))
+#define __LE_TO_CPU_16(_x) ((uint16_t)__SWAP16(_x))
+#define __CPU_TO_BE_16(_x) ((uint16_t)__NOSWAP16(_x))
+#define __BE_TO_CPU_16(_x) ((uint16_t)__NOSWAP16(_x))
+
+#define __CPU_TO_LE_32(_x) ((uint32_t)__SWAP32(_x))
+#define __LE_TO_CPU_32(_x) ((uint32_t)__SWAP32(_x))
+#define __CPU_TO_BE_32(_x) ((uint32_t)__NOSWAP32(_x))
+#define __BE_TO_CPU_32(_x) ((uint32_t)__NOSWAP32(_x))
+
+#define __CPU_TO_LE_64(_x) ((uint64_t)__SWAP64(_x))
+#define __LE_TO_CPU_64(_x) ((uint64_t)__SWAP64(_x))
+#define __CPU_TO_BE_64(_x) ((uint64_t)__NOSWAP64(_x))
+#define __BE_TO_CPU_64(_x) ((uint64_t)__NOSWAP64(_x))
+
+#elif EFSYS_IS_LITTLE_ENDIAN
+
+#define __CPU_TO_LE_16(_x) ((uint16_t)__NOSWAP16(_x))
+#define __LE_TO_CPU_16(_x) ((uint16_t)__NOSWAP16(_x))
+#define __CPU_TO_BE_16(_x) ((uint16_t)__SWAP16(_x))
+#define __BE_TO_CPU_16(_x) ((uint16_t)__SWAP16(_x))
+
+#define __CPU_TO_LE_32(_x) ((uint32_t)__NOSWAP32(_x))
+#define __LE_TO_CPU_32(_x) ((uint32_t)__NOSWAP32(_x))
+#define __CPU_TO_BE_32(_x) ((uint32_t)__SWAP32(_x))
+#define __BE_TO_CPU_32(_x) ((uint32_t)__SWAP32(_x))
+
+#define __CPU_TO_LE_64(_x) ((uint64_t)__NOSWAP64(_x))
+#define __LE_TO_CPU_64(_x) ((uint64_t)__NOSWAP64(_x))
+#define __CPU_TO_BE_64(_x) ((uint64_t)__SWAP64(_x))
+#define __BE_TO_CPU_64(_x) ((uint64_t)__SWAP64(_x))
+
+#else
+
+#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set"
+
+#endif
+
+#define __NATIVE_8(_x) (uint8_t)(_x)
+
+/* Format string for printing an efx_byte_t */
+#define EFX_BYTE_FMT "0x%02x"
+
+/* Format string for printing an efx_word_t */
+#define EFX_WORD_FMT "0x%04x"
+
+/* Format string for printing an efx_dword_t */
+#define EFX_DWORD_FMT "0x%08x"
+
+/* Format string for printing an efx_qword_t */
+#define EFX_QWORD_FMT "0x%08x:%08x"
+
+/* Format string for printing an efx_oword_t */
+#define EFX_OWORD_FMT "0x%08x:%08x:%08x:%08x"
+
+/* Parameters for printing an efx_byte_t */
+#define EFX_BYTE_VAL(_byte) \
+ ((unsigned int)__NATIVE_8((_byte).eb_u8[0]))
+
+/* Parameters for printing an efx_word_t */
+#define EFX_WORD_VAL(_word) \
+ ((unsigned int)__LE_TO_CPU_16((_word).ew_u16[0]))
+
+/* Parameters for printing an efx_dword_t */
+#define EFX_DWORD_VAL(_dword) \
+ ((unsigned int)__LE_TO_CPU_32((_dword).ed_u32[0]))
+
+/* Parameters for printing an efx_qword_t */
+#define EFX_QWORD_VAL(_qword) \
+ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[1])), \
+ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[0]))
+
+/* Parameters for printing an efx_oword_t */
+#define EFX_OWORD_VAL(_oword) \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[3])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[2])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[1])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[0]))
+
+/*
+ * Stop lint complaining about some shifts.
+ */
+#ifdef __lint
+extern int fix_lint;
+#define FIX_LINT(_x) (_x + fix_lint)
+#else
+#define FIX_LINT(_x) (_x)
+#endif
+
+/*
+ * Saturation arithmetic subtract with minimum equal to zero.
+ *
+ * Use saturating arithmetic to ensure a non-negative result. This
+ * avoids undefined behaviour (and compiler warnings) when used as a
+ * shift count.
+ */
+#define EFX_SSUB(_val, _sub) \
+ ((_val) > (_sub) ? ((_val) - (_sub)) : 0)
+
+/*
+ * Extract bit field portion [low,high) from the native-endian element
+ * which contains bits [min,max).
+ *
+ * For example, suppose "element" represents the high 32 bits of a
+ * 64-bit value, and we wish to extract the bits belonging to the bit
+ * field occupying bits 28-45 of this 64-bit value.
+ *
+ * Then EFX_EXTRACT(_element, 32, 63, 28, 45) would give
+ *
+ * (_element) << 4
+ *
+ * The result will contain the relevant bits filled in in the range
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+#define EFX_EXTRACT_NATIVE(_element, _min, _max, _low, _high) \
+ ((FIX_LINT(_low > _max) || FIX_LINT(_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ ((_element) >> EFX_SSUB(_low, _min)) : \
+ ((_element) << EFX_SSUB(_min, _low))))
+
+/*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT64(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_64(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT32(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_32(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 16-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT16(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_16(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 8-bit
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT8(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__NATIVE_8(_element), _min, _max, _low, _high)
+
+#define EFX_EXTRACT_OWORD64(_oword, _low, _high) \
+ (EFX_EXTRACT64((_oword).eo_u64[0], FIX_LINT(0), FIX_LINT(63), \
+ _low, _high) | \
+ EFX_EXTRACT64((_oword).eo_u64[1], FIX_LINT(64), FIX_LINT(127), \
+ _low, _high))
+
+#define EFX_EXTRACT_OWORD32(_oword, _low, _high) \
+ (EFX_EXTRACT32((_oword).eo_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[1], FIX_LINT(32), FIX_LINT(63), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[2], FIX_LINT(64), FIX_LINT(95), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[3], FIX_LINT(96), FIX_LINT(127), \
+ _low, _high))
+
+#define EFX_EXTRACT_QWORD64(_qword, _low, _high) \
+ (EFX_EXTRACT64((_qword).eq_u64[0], FIX_LINT(0), FIX_LINT(63), \
+ _low, _high))
+
+#define EFX_EXTRACT_QWORD32(_qword, _low, _high) \
+ (EFX_EXTRACT32((_qword).eq_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high) | \
+ EFX_EXTRACT32((_qword).eq_u32[1], FIX_LINT(32), FIX_LINT(63), \
+ _low, _high))
+
+#define EFX_EXTRACT_DWORD(_dword, _low, _high) \
+ (EFX_EXTRACT32((_dword).ed_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high))
+
+#define EFX_EXTRACT_WORD(_word, _low, _high) \
+ (EFX_EXTRACT16((_word).ew_u16[0], FIX_LINT(0), FIX_LINT(15), \
+ _low, _high))
+
+#define EFX_EXTRACT_BYTE(_byte, _low, _high) \
+ (EFX_EXTRACT8((_byte).eb_u8[0], FIX_LINT(0), FIX_LINT(7), \
+ _low, _high))
+
+
+#define EFX_OWORD_FIELD64(_oword, _field) \
+ ((uint32_t)EFX_EXTRACT_OWORD64(_oword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_OWORD_FIELD32(_oword, _field) \
+ (EFX_EXTRACT_OWORD32(_oword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_QWORD_FIELD64(_qword, _field) \
+ ((uint32_t)EFX_EXTRACT_QWORD64(_qword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_QWORD_FIELD32(_qword, _field) \
+ (EFX_EXTRACT_QWORD32(_qword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_DWORD_FIELD(_dword, _field) \
+ (EFX_EXTRACT_DWORD(_dword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_WORD_FIELD(_word, _field) \
+ (EFX_EXTRACT_WORD(_word, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK16(_field))
+
+#define EFX_BYTE_FIELD(_byte, _field) \
+ (EFX_EXTRACT_BYTE(_byte, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK8(_field))
+
+
+#define EFX_OWORD_IS_EQUAL64(_oword_a, _oword_b) \
+ ((_oword_a).eo_u64[0] == (_oword_b).eo_u64[0] && \
+ (_oword_a).eo_u64[1] == (_oword_b).eo_u64[1])
+
+#define EFX_OWORD_IS_EQUAL32(_oword_a, _oword_b) \
+ ((_oword_a).eo_u32[0] == (_oword_b).eo_u32[0] && \
+ (_oword_a).eo_u32[1] == (_oword_b).eo_u32[1] && \
+ (_oword_a).eo_u32[2] == (_oword_b).eo_u32[2] && \
+ (_oword_a).eo_u32[3] == (_oword_b).eo_u32[3])
+
+#define EFX_QWORD_IS_EQUAL64(_qword_a, _qword_b) \
+ ((_qword_a).eq_u64[0] == (_qword_b).eq_u64[0])
+
+#define EFX_QWORD_IS_EQUAL32(_qword_a, _qword_b) \
+ ((_qword_a).eq_u32[0] == (_qword_b).eq_u32[0] && \
+ (_qword_a).eq_u32[1] == (_qword_b).eq_u32[1])
+
+#define EFX_DWORD_IS_EQUAL(_dword_a, _dword_b) \
+ ((_dword_a).ed_u32[0] == (_dword_b).ed_u32[0])
+
+#define EFX_WORD_IS_EQUAL(_word_a, _word_b) \
+ ((_word_a).ew_u16[0] == (_word_b).ew_u16[0])
+
+#define EFX_BYTE_IS_EQUAL(_byte_a, _byte_b) \
+ ((_byte_a).eb_u8[0] == (_byte_b).eb_u8[0])
+
+
+#define EFX_OWORD_IS_ZERO64(_oword) \
+ (((_oword).eo_u64[0] | \
+ (_oword).eo_u64[1]) == 0)
+
+#define EFX_OWORD_IS_ZERO32(_oword) \
+ (((_oword).eo_u32[0] | \
+ (_oword).eo_u32[1] | \
+ (_oword).eo_u32[2] | \
+ (_oword).eo_u32[3]) == 0)
+
+#define EFX_QWORD_IS_ZERO64(_qword) \
+ (((_qword).eq_u64[0]) == 0)
+
+#define EFX_QWORD_IS_ZERO32(_qword) \
+ (((_qword).eq_u32[0] | \
+ (_qword).eq_u32[1]) == 0)
+
+#define EFX_DWORD_IS_ZERO(_dword) \
+ (((_dword).ed_u32[0]) == 0)
+
+#define EFX_WORD_IS_ZERO(_word) \
+ (((_word).ew_u16[0]) == 0)
+
+#define EFX_BYTE_IS_ZERO(_byte) \
+ (((_byte).eb_u8[0]) == 0)
+
+
+#define EFX_OWORD_IS_SET64(_oword) \
+ (((_oword).eo_u64[0] & \
+ (_oword).eo_u64[1]) == ~((uint64_t)0))
+
+#define EFX_OWORD_IS_SET32(_oword) \
+ (((_oword).eo_u32[0] & \
+ (_oword).eo_u32[1] & \
+ (_oword).eo_u32[2] & \
+ (_oword).eo_u32[3]) == ~((uint32_t)0))
+
+#define EFX_QWORD_IS_SET64(_qword) \
+ (((_qword).eq_u64[0]) == ~((uint64_t)0))
+
+#define EFX_QWORD_IS_SET32(_qword) \
+ (((_qword).eq_u32[0] & \
+ (_qword).eq_u32[1]) == ~((uint32_t)0))
+
+#define EFX_DWORD_IS_SET(_dword) \
+ ((_dword).ed_u32[0] == ~((uint32_t)0))
+
+#define EFX_WORD_IS_SET(_word) \
+ ((_word).ew_u16[0] == ~((uint16_t)0))
+
+#define EFX_BYTE_IS_SET(_byte) \
+ ((_byte).eb_u8[0] == ~((uint8_t)0))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the bit field [low,high) that lies within
+ * the range [min,max).
+ */
+
+#define EFX_INSERT_NATIVE64(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ (((uint64_t)(_value)) << EFX_SSUB(_low, _min)) :\
+ (((uint64_t)(_value)) >> EFX_SSUB(_min, _low))))
+
+#define EFX_INSERT_NATIVE32(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ (((uint32_t)(_value)) << EFX_SSUB(_low, _min)) :\
+ (((uint32_t)(_value)) >> EFX_SSUB(_min, _low))))
+
+#define EFX_INSERT_NATIVE16(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ (uint16_t)((_low > _min) ? \
+ ((_value) << EFX_SSUB(_low, _min)) : \
+ ((_value) >> EFX_SSUB(_min, _low))))
+
+#define EFX_INSERT_NATIVE8(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ (uint8_t)((_low > _min) ? \
+ ((_value) << EFX_SSUB(_low, _min)) : \
+ ((_value) >> EFX_SSUB(_min, _low))))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE64(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE32(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE16(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE8(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+/*
+ * Construct bit field
+ *
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELDS64(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_64( \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS32(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_32( \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS16(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_16( \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS8(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __NATIVE_8( \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field10, _value10))
+
+#define EFX_POPULATE_OWORD64(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] = EFX_INSERT_FIELDS64(0, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[1] = EFX_INSERT_FIELDS64(64, 127, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_OWORD32(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[1] = EFX_INSERT_FIELDS32(32, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[2] = EFX_INSERT_FIELDS32(64, 95, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[3] = EFX_INSERT_FIELDS32(96, 127, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_QWORD64(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] = EFX_INSERT_FIELDS64(0, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_QWORD32(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[1] = EFX_INSERT_FIELDS32(32, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_DWORD(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_dword).ed_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_WORD(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_word).ew_u16[0] = EFX_INSERT_FIELDS16(0, 15, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_BYTE(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_byte).eb_u8[0] = EFX_INSERT_FIELDS8(0, 7, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Populate an octword field with various numbers of arguments */
+#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
+
+#define EFX_POPULATE_OWORD_9(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_OWORD_10(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_OWORD_8(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_OWORD_9(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_OWORD_7(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_OWORD_8(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_OWORD_6(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_OWORD_7(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_OWORD_5(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_OWORD_6(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_OWORD_4(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_OWORD_5(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_OWORD_3(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_OWORD_4(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_OWORD_2(_oword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_OWORD_3(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_OWORD_1(_oword, \
+ _field1, _value1) \
+ EFX_POPULATE_OWORD_2(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_OWORD(_oword) \
+ EFX_POPULATE_OWORD_1(_oword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_OWORD(_oword) \
+ EFX_POPULATE_OWORD_4(_oword, \
+ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff, \
+ EFX_DWORD_2, 0xffffffff, EFX_DWORD_3, 0xffffffff)
+
+/* Populate a quadword field with various numbers of arguments */
+#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
+
+#define EFX_POPULATE_QWORD_9(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_QWORD_10(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_QWORD_8(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_QWORD_9(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_QWORD_7(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_QWORD_8(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_QWORD_6(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_QWORD_7(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_QWORD_5(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_QWORD_6(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_QWORD_4(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_QWORD_5(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_QWORD_3(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_QWORD_4(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_QWORD_2(_qword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_QWORD_3(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_QWORD_1(_qword, \
+ _field1, _value1) \
+ EFX_POPULATE_QWORD_2(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_QWORD(_qword) \
+ EFX_POPULATE_QWORD_1(_qword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_QWORD(_qword) \
+ EFX_POPULATE_QWORD_2(_qword, \
+ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff)
+
+/* Populate a dword field with various numbers of arguments */
+#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
+
+#define EFX_POPULATE_DWORD_9(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_DWORD_10(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_DWORD_8(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_DWORD_9(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_DWORD_7(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_DWORD_8(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_DWORD_6(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_DWORD_7(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_DWORD_5(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_DWORD_6(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_DWORD_4(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_DWORD_5(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_DWORD_3(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_DWORD_4(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_DWORD_2(_dword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_DWORD_3(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_DWORD_1(_dword, \
+ _field1, _value1) \
+ EFX_POPULATE_DWORD_2(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_DWORD(_dword) \
+ EFX_POPULATE_DWORD_1(_dword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_DWORD(_dword) \
+ EFX_POPULATE_DWORD_1(_dword, \
+ EFX_DWORD_0, 0xffffffff)
+
+/* Populate a word field with various numbers of arguments */
+#define EFX_POPULATE_WORD_10 EFX_POPULATE_WORD
+
+#define EFX_POPULATE_WORD_9(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_WORD_10(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_WORD_8(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_WORD_9(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_WORD_7(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_WORD_8(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_WORD_6(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_WORD_7(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_WORD_5(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_WORD_6(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_WORD_4(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_WORD_5(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_WORD_3(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_WORD_4(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_WORD_2(_word, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_WORD_3(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_WORD_1(_word, \
+ _field1, _value1) \
+ EFX_POPULATE_WORD_2(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_WORD(_word) \
+ EFX_POPULATE_WORD_1(_word, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_WORD(_word) \
+ EFX_POPULATE_WORD_1(_word, \
+ EFX_WORD_0, 0xffff)
+
+/* Populate a byte field with various numbers of arguments */
+#define EFX_POPULATE_BYTE_10 EFX_POPULATE_BYTE
+
+#define EFX_POPULATE_BYTE_9(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_BYTE_10(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_BYTE_8(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_BYTE_9(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_BYTE_7(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_BYTE_8(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_BYTE_6(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_BYTE_7(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_BYTE_5(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_BYTE_6(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_BYTE_4(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_BYTE_5(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_BYTE_3(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_BYTE_4(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_BYTE_2(_byte, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_BYTE_3(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_BYTE_1(_byte, \
+ _field1, _value1) \
+ EFX_POPULATE_BYTE_2(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_BYTE(_byte) \
+ EFX_POPULATE_BYTE_1(_byte, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_BYTE(_byte) \
+ EFX_POPULATE_BYTE_1(_byte, \
+ EFX_BYTE_0, 0xff)
+
+/*
+ * Modify a named field within an already-populated structure. Used
+ * for read-modify-write operations.
+ */
+
+#define EFX_INSERT_FIELD64(_min, _max, _field, _value) \
+ __CPU_TO_LE_64(EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD32(_min, _max, _field, _value) \
+ __CPU_TO_LE_32(EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD16(_min, _max, _field, _value) \
+ __CPU_TO_LE_16(EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD8(_min, _max, _field, _value) \
+ __NATIVE_8(EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value))
+
+#define EFX_INPLACE_MASK64(_min, _max, _field) \
+ EFX_INSERT_FIELD64(_min, _max, _field, EFX_MASK64(_field))
+
+#define EFX_INPLACE_MASK32(_min, _max, _field) \
+ EFX_INSERT_FIELD32(_min, _max, _field, EFX_MASK32(_field))
+
+#define EFX_INPLACE_MASK16(_min, _max, _field) \
+ EFX_INSERT_FIELD16(_min, _max, _field, EFX_MASK16(_field))
+
+#define EFX_INPLACE_MASK8(_min, _max, _field) \
+ EFX_INSERT_FIELD8(_min, _max, _field, EFX_MASK8(_field))
+
+#define EFX_SET_OWORD_FIELD64(_oword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] = (((_oword).eo_u64[0] & \
+ ~EFX_INPLACE_MASK64(0, 63, _field)) | \
+ EFX_INSERT_FIELD64(0, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[1] = (((_oword).eo_u64[1] & \
+ ~EFX_INPLACE_MASK64(64, 127, _field)) | \
+ EFX_INSERT_FIELD64(64, 127, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_OWORD_FIELD32(_oword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] = (((_oword).eo_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[1] = (((_oword).eo_u32[1] & \
+ ~EFX_INPLACE_MASK32(32, 63, _field)) | \
+ EFX_INSERT_FIELD32(32, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[2] = (((_oword).eo_u32[2] & \
+ ~EFX_INPLACE_MASK32(64, 95, _field)) | \
+ EFX_INSERT_FIELD32(64, 95, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[3] = (((_oword).eo_u32[3] & \
+ ~EFX_INPLACE_MASK32(96, 127, _field)) | \
+ EFX_INSERT_FIELD32(96, 127, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_FIELD64(_qword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] = (((_qword).eq_u64[0] & \
+ ~EFX_INPLACE_MASK64(0, 63, _field)) | \
+ EFX_INSERT_FIELD64(0, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_FIELD32(_qword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] = (((_qword).eq_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[1] = (((_qword).eq_u32[1] & \
+ ~EFX_INPLACE_MASK32(32, 63, _field)) | \
+ EFX_INSERT_FIELD32(32, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_DWORD_FIELD(_dword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_dword).ed_u32[0] = (((_dword).ed_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_WORD_FIELD(_word, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_word).ew_u16[0] = (((_word).ew_u16[0] & \
+ ~EFX_INPLACE_MASK16(0, 15, _field)) | \
+ EFX_INSERT_FIELD16(0, 15, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_BYTE_FIELD(_byte, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_byte).eb_u8[0] = (((_byte).eb_u8[0] & \
+ ~EFX_INPLACE_MASK8(0, 7, _field)) | \
+ EFX_INSERT_FIELD8(0, 7, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Set or clear a numbered bit within an octword.
+ */
+
+#define EFX_SHIFT64(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 64) ? \
+ ((uint64_t)1 << EFX_SSUB((_bit), (_base))) : \
+ 0U)
+
+#define EFX_SHIFT32(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 32) ? \
+ ((uint32_t)1 << EFX_SSUB((_bit),(_base))) : \
+ 0U)
+
+#define EFX_SHIFT16(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 16) ? \
+ (uint16_t)(1 << EFX_SSUB((_bit), (_base))) : \
+ 0U)
+
+#define EFX_SHIFT8(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 8) ? \
+ (uint8_t)(1 << EFX_SSUB((_bit), (_base))) : \
+ 0U)
+
+#define EFX_SET_OWORD_BIT64(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ (_oword).eo_u64[1] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_OWORD_BIT32(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_oword).eo_u32[1] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ (_oword).eo_u32[2] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64))); \
+ (_oword).eo_u32[3] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_OWORD_BIT64(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ (_oword).eo_u64[1] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(64))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_OWORD_BIT32(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_oword).eo_u32[1] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ (_oword).eo_u32[2] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(64))); \
+ (_oword).eo_u32[3] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(96))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_OWORD_BIT64(_oword, _bit) \
+ (((_oword).eo_u64[0] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0)))) || \
+ ((_oword).eo_u64[1] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64)))))
+
+#define EFX_TEST_OWORD_BIT32(_oword, _bit) \
+ (((_oword).eo_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) || \
+ ((_oword).eo_u32[1] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32)))) || \
+ ((_oword).eo_u32[2] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64)))) || \
+ ((_oword).eo_u32[3] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96)))))
+
+
+#define EFX_SET_QWORD_BIT64(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_BIT32(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_qword).eq_u32[1] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_QWORD_BIT64(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_QWORD_BIT32(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_qword).eq_u32[1] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_QWORD_BIT64(_qword, _bit) \
+ (((_qword).eq_u64[0] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0)))) != 0)
+
+#define EFX_TEST_QWORD_BIT32(_qword, _bit) \
+ (((_qword).eq_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) || \
+ ((_qword).eq_u32[1] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32)))))
+
+
+#define EFX_SET_DWORD_BIT(_dword, _bit) \
+ do { \
+ (_dword).ed_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_DWORD_BIT(_dword, _bit) \
+ do { \
+ (_dword).ed_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_DWORD_BIT(_dword, _bit) \
+ (((_dword).ed_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_SET_WORD_BIT(_word, _bit) \
+ do { \
+ (_word).ew_u16[0] |= \
+ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_WORD_BIT(_word, _bit) \
+ do { \
+ (_word).ew_u32[0] &= \
+ __CPU_TO_LE_16(~EFX_SHIFT16(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_WORD_BIT(_word, _bit) \
+ (((_word).ew_u16[0] & \
+ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_SET_BYTE_BIT(_byte, _bit) \
+ do { \
+ (_byte).eb_u8[0] |= \
+ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_BYTE_BIT(_byte, _bit) \
+ do { \
+ (_byte).eb_u8[0] &= \
+ __NATIVE_8(~EFX_SHIFT8(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_BYTE_BIT(_byte, _bit) \
+ (((_byte).eb_u8[0] & \
+ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_OR_OWORD64(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u64[0] |= (_oword2).eo_u64[0]; \
+ (_oword1).eo_u64[1] |= (_oword2).eo_u64[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_OWORD32(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u32[0] |= (_oword2).eo_u32[0]; \
+ (_oword1).eo_u32[1] |= (_oword2).eo_u32[1]; \
+ (_oword1).eo_u32[2] |= (_oword2).eo_u32[2]; \
+ (_oword1).eo_u32[3] |= (_oword2).eo_u32[3]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_OWORD64(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u64[0] &= (_oword2).eo_u64[0]; \
+ (_oword1).eo_u64[1] &= (_oword2).eo_u64[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_OWORD32(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u32[0] &= (_oword2).eo_u32[0]; \
+ (_oword1).eo_u32[1] &= (_oword2).eo_u32[1]; \
+ (_oword1).eo_u32[2] &= (_oword2).eo_u32[2]; \
+ (_oword1).eo_u32[3] &= (_oword2).eo_u32[3]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_QWORD64(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u64[0] |= (_qword2).eq_u64[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_QWORD32(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u32[0] |= (_qword2).eq_u32[0]; \
+ (_qword1).eq_u32[1] |= (_qword2).eq_u32[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_QWORD64(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u64[0] &= (_qword2).eq_u64[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_QWORD32(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u32[0] &= (_qword2).eq_u32[0]; \
+ (_qword1).eq_u32[1] &= (_qword2).eq_u32[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_DWORD(_dword1, _dword2) \
+ do { \
+ (_dword1).ed_u32[0] |= (_dword2).ed_u32[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_DWORD(_dword1, _dword2) \
+ do { \
+ (_dword1).ed_u32[0] &= (_dword2).ed_u32[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_WORD(_word1, _word2) \
+ do { \
+ (_word1).ew_u16[0] |= (_word2).ew_u16[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_WORD(_word1, _word2) \
+ do { \
+ (_word1).ew_u16[0] &= (_word2).ew_u16[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_BYTE(_byte1, _byte2) \
+ do { \
+ (_byte1).eb_u8[0] |= (_byte2).eb_u8[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_BYTE(_byte1, _byte2) \
+ do { \
+ (_byte1).eb_u8[0] &= (_byte2).eb_u8[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if EFSYS_USE_UINT64
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
+#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL64
+#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL64
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
+#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET64
+#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET64
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
+#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT64
+#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT64
+#define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT64
+#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT64
+#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT64
+#define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT64
+#define EFX_OR_OWORD EFX_OR_OWORD64
+#define EFX_AND_OWORD EFX_AND_OWORD64
+#define EFX_OR_QWORD EFX_OR_QWORD64
+#define EFX_AND_QWORD EFX_AND_QWORD64
+#else
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
+#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL32
+#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL32
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
+#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET32
+#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET32
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
+#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT32
+#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT32
+#define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT32
+#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT32
+#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT32
+#define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT32
+#define EFX_OR_OWORD EFX_OR_OWORD32
+#define EFX_AND_OWORD EFX_AND_OWORD32
+#define EFX_OR_QWORD EFX_OR_QWORD32
+#define EFX_AND_QWORD EFX_AND_QWORD32
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_TYPES_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_vpd.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_vpd.c
new file mode 100644
index 00000000..6d783d74
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_vpd.c
@@ -0,0 +1,998 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_VPD
+
+#define TAG_TYPE_LBN 7
+#define TAG_TYPE_WIDTH 1
+#define TAG_TYPE_LARGE_ITEM_DECODE 1
+#define TAG_TYPE_SMALL_ITEM_DECODE 0
+
+#define TAG_SMALL_ITEM_NAME_LBN 3
+#define TAG_SMALL_ITEM_NAME_WIDTH 4
+#define TAG_SMALL_ITEM_SIZE_LBN 0
+#define TAG_SMALL_ITEM_SIZE_WIDTH 3
+
+#define TAG_LARGE_ITEM_NAME_LBN 0
+#define TAG_LARGE_ITEM_NAME_WIDTH 7
+
+#define TAG_NAME_END_DECODE 0x0f
+#define TAG_NAME_ID_STRING_DECODE 0x02
+#define TAG_NAME_VPD_R_DECODE 0x10
+#define TAG_NAME_VPD_W_DECODE 0x11
+
+#if EFSYS_OPT_SIENA
+
+static const efx_vpd_ops_t __efx_vpd_siena_ops = {
+ siena_vpd_init, /* evpdo_init */
+ siena_vpd_size, /* evpdo_size */
+ siena_vpd_read, /* evpdo_read */
+ siena_vpd_verify, /* evpdo_verify */
+ siena_vpd_reinit, /* evpdo_reinit */
+ siena_vpd_get, /* evpdo_get */
+ siena_vpd_set, /* evpdo_set */
+ siena_vpd_next, /* evpdo_next */
+ siena_vpd_write, /* evpdo_write */
+ siena_vpd_fini, /* evpdo_fini */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+static const efx_vpd_ops_t __efx_vpd_ef10_ops = {
+ ef10_vpd_init, /* evpdo_init */
+ ef10_vpd_size, /* evpdo_size */
+ ef10_vpd_read, /* evpdo_read */
+ ef10_vpd_verify, /* evpdo_verify */
+ ef10_vpd_reinit, /* evpdo_reinit */
+ ef10_vpd_get, /* evpdo_get */
+ ef10_vpd_set, /* evpdo_set */
+ ef10_vpd_next, /* evpdo_next */
+ ef10_vpd_write, /* evpdo_write */
+ ef10_vpd_fini, /* evpdo_fini */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
+
+ __checkReturn efx_rc_t
+efx_vpd_init(
+ __in efx_nic_t *enp)
+{
+ const efx_vpd_ops_t *evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_VPD));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ evpdop = &__efx_vpd_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (evpdop->evpdo_init != NULL) {
+ if ((rc = evpdop->evpdo_init(enp)) != 0)
+ goto fail2;
+ }
+
+ enp->en_evpdop = evpdop;
+ enp->en_mod_flags |= EFX_MOD_VPD;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_size(enp, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_read(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_verify(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if (evpdop->evpdo_reinit == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = evpdop->evpdo_reinit(enp, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_get(enp, data, size, evvp)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_set(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_set(enp, data, size, evvp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_next(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_next(enp, data, size, evvp, contp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_write(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_vpd_next_tag(
+ __in caddr_t data,
+ __in size_t size,
+ __inout unsigned int *offsetp,
+ __out efx_vpd_tag_t *tagp,
+ __out uint16_t *lengthp)
+{
+ efx_byte_t byte;
+ efx_word_t word;
+ uint8_t name;
+ uint16_t length;
+ size_t headlen;
+ efx_rc_t rc;
+
+ if (*offsetp >= size) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ EFX_POPULATE_BYTE_1(byte, EFX_BYTE_0, data[*offsetp]);
+
+ switch (EFX_BYTE_FIELD(byte, TAG_TYPE)) {
+ case TAG_TYPE_SMALL_ITEM_DECODE:
+ headlen = 1;
+
+ name = EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_NAME);
+ length = (uint16_t)EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_SIZE);
+
+ break;
+
+ case TAG_TYPE_LARGE_ITEM_DECODE:
+ headlen = 3;
+
+ if (*offsetp + headlen > size) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ name = EFX_BYTE_FIELD(byte, TAG_LARGE_ITEM_NAME);
+ EFX_POPULATE_WORD_2(word,
+ EFX_BYTE_0, data[*offsetp + 1],
+ EFX_BYTE_1, data[*offsetp + 2]);
+ length = EFX_WORD_FIELD(word, EFX_WORD_0);
+
+ break;
+
+ default:
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ if (*offsetp + headlen + length > size) {
+ rc = EFAULT;
+ goto fail3;
+ }
+
+ EFX_STATIC_ASSERT(TAG_NAME_END_DECODE == EFX_VPD_END);
+ EFX_STATIC_ASSERT(TAG_NAME_ID_STRING_DECODE == EFX_VPD_ID);
+ EFX_STATIC_ASSERT(TAG_NAME_VPD_R_DECODE == EFX_VPD_RO);
+ EFX_STATIC_ASSERT(TAG_NAME_VPD_W_DECODE == EFX_VPD_RW);
+ if (name != EFX_VPD_END && name != EFX_VPD_ID &&
+ name != EFX_VPD_RO) {
+ rc = EFAULT;
+ goto fail4;
+ }
+
+ *tagp = name;
+ *lengthp = length;
+ *offsetp += headlen;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_vpd_next_keyword(
+ __in_bcount(size) caddr_t tag,
+ __in size_t size,
+ __in unsigned int pos,
+ __out efx_vpd_keyword_t *keywordp,
+ __out uint8_t *lengthp)
+{
+ efx_vpd_keyword_t keyword;
+ uint8_t length;
+ efx_rc_t rc;
+
+ if (pos + 3U > size) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ keyword = EFX_VPD_KEYWORD(tag[pos], tag[pos + 1]);
+ length = tag[pos + 2];
+
+ if (length == 0 || pos + 3U + length > size) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ *keywordp = keyword;
+ *lengthp = length;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_length(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *lengthp)
+{
+ efx_vpd_tag_t tag;
+ unsigned int offset;
+ uint16_t taglen;
+ efx_rc_t rc;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ offset += taglen;
+ if (tag == EFX_VPD_END)
+ break;
+ }
+
+ *lengthp = offset;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_verify(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt boolean_t *cksummedp)
+{
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int i;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t cksum;
+ boolean_t cksummed = B_FALSE;
+ efx_rc_t rc;
+
+ /*
+ * Parse every tag,keyword in the existing VPD. If the csum is present,
+ * the assert it is correct, and is the final keyword in the RO block.
+ */
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ if (tag == EFX_VPD_END)
+ break;
+ else if (tag == EFX_VPD_ID)
+ goto done;
+
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ /* RV keyword must be the last in the block */
+ if (cksummed) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail3;
+
+ if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ cksum = 0;
+ for (i = 0; i < offset + pos + 4; i++)
+ cksum += data[i];
+
+ if (cksum != 0) {
+ rc = EFAULT;
+ goto fail4;
+ }
+
+ cksummed = B_TRUE;
+ }
+ }
+
+ done:
+ offset += taglen;
+ }
+
+ if (!cksummed) {
+ rc = EFAULT;
+ goto fail5;
+ }
+
+ if (cksummedp != NULL)
+ *cksummedp = cksummed;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint8_t __efx_vpd_blank_pid[] = {
+ /* Large resource type ID length 1 */
+ 0x82, 0x01, 0x00,
+ /* Product name ' ' */
+ 0x32,
+};
+
+static uint8_t __efx_vpd_blank_r[] = {
+ /* Large resource type VPD-R length 4 */
+ 0x90, 0x04, 0x00,
+ /* RV keyword length 1 */
+ 'R', 'V', 0x01,
+ /* RV payload checksum */
+ 0x00,
+};
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_reinit(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t wantpid)
+{
+ unsigned int offset = 0;
+ unsigned int pos;
+ efx_byte_t byte;
+ uint8_t cksum;
+ efx_rc_t rc;
+
+ if (size < 0x100) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if (wantpid) {
+ memcpy(data + offset, __efx_vpd_blank_pid,
+ sizeof (__efx_vpd_blank_pid));
+ offset += sizeof (__efx_vpd_blank_pid);
+ }
+
+ memcpy(data + offset, __efx_vpd_blank_r, sizeof (__efx_vpd_blank_r));
+ offset += sizeof (__efx_vpd_blank_r);
+
+ /* Update checksum */
+ cksum = 0;
+ for (pos = 0; pos < offset; pos++)
+ cksum += data[pos];
+ data[offset - 1] -= cksum;
+
+ /* Append trailing tag */
+ EFX_POPULATE_BYTE_3(byte,
+ TAG_TYPE, TAG_TYPE_SMALL_ITEM_DECODE,
+ TAG_SMALL_ITEM_NAME, TAG_NAME_END_DECODE,
+ TAG_SMALL_ITEM_SIZE, 0);
+ data[offset] = EFX_BYTE_FIELD(byte, EFX_BYTE_0);
+ offset++;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_next(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_tag_t *tagp,
+ __out efx_vpd_keyword_t *keywordp,
+ __out_opt unsigned int *payloadp,
+ __out_opt uint8_t *paylenp,
+ __inout unsigned int *contp)
+{
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword = 0;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int index;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t paylen;
+ efx_rc_t rc;
+
+ offset = index = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+
+ if (tag == EFX_VPD_END) {
+ keyword = 0;
+ paylen = 0;
+ index = 0;
+ break;
+ }
+
+ if (tag == EFX_VPD_ID) {
+ if (index++ == *contp) {
+ EFSYS_ASSERT3U(taglen, <, 0x100);
+ keyword = 0;
+ paylen = (uint8_t)MIN(taglen, 0xff);
+
+ goto done;
+ }
+ } else {
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail2;
+
+ if (index++ == *contp) {
+ offset += pos + 3;
+ paylen = keylen;
+
+ goto done;
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+done:
+ *tagp = tag;
+ *keywordp = keyword;
+ if (payloadp != NULL)
+ *payloadp = offset;
+ if (paylenp != NULL)
+ *paylenp = paylen;
+
+ *contp = index;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_get(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_tag_t tag,
+ __in efx_vpd_keyword_t keyword,
+ __out unsigned int *payloadp,
+ __out uint8_t *paylenp)
+{
+ efx_vpd_tag_t itag;
+ efx_vpd_keyword_t ikeyword;
+ unsigned int offset;
+ unsigned int pos;
+ uint16_t taglen;
+ uint8_t keylen;
+ efx_rc_t rc;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &itag, &taglen)) != 0)
+ goto fail1;
+ if (itag == EFX_VPD_END)
+ break;
+
+ if (itag == tag) {
+ if (itag == EFX_VPD_ID) {
+ EFSYS_ASSERT3U(taglen, <, 0x100);
+
+ *paylenp = (uint8_t)MIN(taglen, 0xff);
+ *payloadp = offset;
+ return (0);
+ }
+
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &ikeyword, &keylen)) != 0)
+ goto fail2;
+
+ if (ikeyword == keyword) {
+ *paylenp = keylen;
+ *payloadp = offset + pos + 3;
+ return (0);
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ /* Not an error */
+ return (ENOENT);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_set(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_word_t word;
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int taghead;
+ unsigned int source;
+ unsigned int dest;
+ unsigned int i;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t cksum;
+ size_t used;
+ efx_rc_t rc;
+
+ switch (evvp->evv_tag) {
+ case EFX_VPD_ID:
+ if (evvp->evv_keyword != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Can't delete the ID keyword */
+ if (evvp->evv_length == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ case EFX_VPD_RO:
+ if (evvp->evv_keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Determine total size of all current tags */
+ if ((rc = efx_vpd_hunk_length(data, size, &used)) != 0)
+ goto fail2;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ taghead = offset;
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail3;
+ if (tag == EFX_VPD_END)
+ break;
+ else if (tag != evvp->evv_tag) {
+ offset += taglen;
+ continue;
+ }
+
+ /* We only support modifying large resource tags */
+ if (offset - taghead != 3) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /*
+ * Work out the offset of the byte immediately after the
+ * old (=source) and new (=dest) new keyword/tag
+ */
+ pos = 0;
+ if (tag == EFX_VPD_ID) {
+ source = offset + taglen;
+ dest = offset + evvp->evv_length;
+ goto check_space;
+ }
+
+ EFSYS_ASSERT3U(tag, ==, EFX_VPD_RO);
+ source = dest = 0;
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail5;
+
+ if (keyword == evvp->evv_keyword &&
+ evvp->evv_length == 0) {
+ /* Deleting this keyword */
+ source = offset + pos + 3 + keylen;
+ dest = offset + pos;
+ break;
+
+ } else if (keyword == evvp->evv_keyword) {
+ /* Adjusting this keyword */
+ source = offset + pos + 3 + keylen;
+ dest = offset + pos + 3 + evvp->evv_length;
+ break;
+
+ } else if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ /* The RV keyword must be at the end */
+ EFSYS_ASSERT3U(pos + 3 + keylen, ==, taglen);
+
+ /*
+ * The keyword doesn't already exist. If the
+ * user deleting a non-existant keyword then
+ * this is a no-op.
+ */
+ if (evvp->evv_length == 0)
+ return (0);
+
+ /* Insert this keyword before the RV keyword */
+ source = offset + pos;
+ dest = offset + pos + 3 + evvp->evv_length;
+ break;
+ }
+ }
+
+ check_space:
+ if (used + dest > size + source) {
+ rc = ENOSPC;
+ goto fail6;
+ }
+
+ /* Move trailing data */
+ (void) memmove(data + dest, data + source, used - source);
+
+ /* Copy contents */
+ memcpy(data + dest - evvp->evv_length, evvp->evv_value,
+ evvp->evv_length);
+
+ /* Insert new keyword header if required */
+ if (tag != EFX_VPD_ID && evvp->evv_length > 0) {
+ EFX_POPULATE_WORD_1(word, EFX_WORD_0,
+ evvp->evv_keyword);
+ data[offset + pos + 0] =
+ EFX_WORD_FIELD(word, EFX_BYTE_0);
+ data[offset + pos + 1] =
+ EFX_WORD_FIELD(word, EFX_BYTE_1);
+ data[offset + pos + 2] = evvp->evv_length;
+ }
+
+ /* Modify tag length (large resource type) */
+ taglen += (uint16_t)(dest - source);
+ EFX_POPULATE_WORD_1(word, EFX_WORD_0, taglen);
+ data[offset - 2] = EFX_WORD_FIELD(word, EFX_BYTE_0);
+ data[offset - 1] = EFX_WORD_FIELD(word, EFX_BYTE_1);
+
+ goto checksum;
+ }
+
+ /* Unable to find the matching tag */
+ rc = ENOENT;
+ goto fail7;
+
+checksum:
+ /* Find the RV tag, and update the checksum */
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail8;
+ if (tag == EFX_VPD_END)
+ break;
+ if (tag == EFX_VPD_RO) {
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail9;
+
+ if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ cksum = 0;
+ for (i = 0; i < offset + pos + 3; i++)
+ cksum += data[i];
+ data[i] = -cksum;
+ break;
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ /* Zero out the unused portion */
+ (void) memset(data + offset + taglen, 0xff, size - offset - taglen);
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if (evpdop->evpdo_fini != NULL)
+ evpdop->evpdo_fini(enp);
+
+ enp->en_evpdop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_VPD;
+}
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/hunt_impl.h b/src/spdk/dpdk/drivers/net/sfc/base/hunt_impl.h
new file mode 100644
index 00000000..d8dddce8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/hunt_impl.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_HUNT_IMPL_H
+#define _SYS_HUNT_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+#include "efx_mcdi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Missing register definitions */
+#ifndef ER_DZ_TX_PIOBUF_OFST
+#define ER_DZ_TX_PIOBUF_OFST 0x00001000
+#endif
+#ifndef ER_DZ_TX_PIOBUF_STEP
+#define ER_DZ_TX_PIOBUF_STEP 8192
+#endif
+#ifndef ER_DZ_TX_PIOBUF_ROWS
+#define ER_DZ_TX_PIOBUF_ROWS 2048
+#endif
+
+#ifndef ER_DZ_TX_PIOBUF_SIZE
+#define ER_DZ_TX_PIOBUF_SIZE 2048
+#endif
+
+#define HUNT_PIOBUF_NBUFS (16)
+#define HUNT_PIOBUF_SIZE (ER_DZ_TX_PIOBUF_SIZE)
+
+#define HUNT_MIN_PIO_ALLOC_SIZE (HUNT_PIOBUF_SIZE / 32)
+
+
+/* NIC */
+
+extern __checkReturn efx_rc_t
+hunt_board_cfg(
+ __in efx_nic_t *enp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HUNT_IMPL_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/hunt_nic.c b/src/spdk/dpdk/drivers/net/sfc/base/hunt_nic.c
new file mode 100644
index 00000000..16ea81d2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/hunt_nic.c
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON
+
+#include "ef10_tlv_layout.h"
+
+static __checkReturn efx_rc_t
+hunt_nic_get_required_pcie_bandwidth(
+ __in efx_nic_t *enp,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t port_modes;
+ uint32_t max_port_mode;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /*
+ * On Huntington, the firmware may not give us the current port mode, so
+ * we need to go by the set of available port modes and assume the most
+ * capable mode is in use.
+ */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, NULL)) != 0) {
+ /* No port mode info available */
+ bandwidth = 0;
+ goto out;
+ }
+
+ if (port_modes & (1U << TLV_PORT_MODE_40G_40G)) {
+ /*
+ * This needs the full PCIe bandwidth (and could use
+ * more) - roughly 64 Gbit/s for 8 lanes of Gen3.
+ */
+ if ((rc = efx_nic_calculate_pcie_link_bandwidth(8,
+ EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0)
+ goto fail1;
+ } else {
+ if (port_modes & (1U << TLV_PORT_MODE_40G)) {
+ max_port_mode = TLV_PORT_MODE_40G;
+ } else if (port_modes & (1U << TLV_PORT_MODE_10G_10G_10G_10G)) {
+ max_port_mode = TLV_PORT_MODE_10G_10G_10G_10G;
+ } else {
+ /* Assume two 10G ports */
+ max_port_mode = TLV_PORT_MODE_10G_10G;
+ }
+
+ if ((rc = ef10_nic_get_port_mode_bandwidth(max_port_mode,
+ &bandwidth)) != 0)
+ goto fail2;
+ }
+
+out:
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+hunt_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ uint32_t flags;
+ uint32_t sysclk, dpcpu_clk;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /*
+ * Enable firmware workarounds for hardware errata.
+ * Expected responses are:
+ * - 0 (zero):
+ * Success: workaround enabled or disabled as requested.
+ * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+ * Firmware does not support the MC_CMD_WORKAROUND request.
+ * (assume that the workaround is not supported).
+ * - MC_CMD_ERR_ENOENT (reported as ENOENT):
+ * Firmware does not support the requested workaround.
+ * - MC_CMD_ERR_EPERM (reported as EACCES):
+ * Unprivileged function cannot enable/disable workarounds.
+ *
+ * See efx_mcdi_request_errcode() for MCDI error translations.
+ */
+
+ /*
+ * If the bug35388 workaround is enabled, then use an indirect access
+ * method to avoid unsafe EVQ writes.
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG35388, B_TRUE,
+ NULL);
+ if ((rc == 0) || (rc == EACCES))
+ encp->enc_bug35388_workaround = B_TRUE;
+ else if ((rc == ENOTSUP) || (rc == ENOENT))
+ encp->enc_bug35388_workaround = B_FALSE;
+ else
+ goto fail1;
+
+ /*
+ * If the bug41750 workaround is enabled, then do not test interrupts,
+ * as the test will fail (seen with Greenport controllers).
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG41750, B_TRUE,
+ NULL);
+ if (rc == 0) {
+ encp->enc_bug41750_workaround = B_TRUE;
+ } else if (rc == EACCES) {
+ /* Assume a controller with 40G ports needs the workaround. */
+ if (epp->ep_default_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+ encp->enc_bug41750_workaround = B_TRUE;
+ else
+ encp->enc_bug41750_workaround = B_FALSE;
+ } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
+ encp->enc_bug41750_workaround = B_FALSE;
+ } else {
+ goto fail2;
+ }
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /* Interrupt testing does not work for VFs. See bug50084. */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /*
+ * If the bug26807 workaround is enabled, then firmware has enabled
+ * support for chained multicast filters. Firmware will reset (FLR)
+ * functions which have filters in the hardware filter table when the
+ * workaround is enabled/disabled.
+ *
+ * We must recheck if the workaround is enabled after inserting the
+ * first hardware filter, in case it has been changed since this check.
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807,
+ B_TRUE, &flags);
+ if (rc == 0) {
+ encp->enc_bug26807_workaround = B_TRUE;
+ if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) {
+ /*
+ * Other functions had installed filters before the
+ * workaround was enabled, and they have been reset
+ * by firmware.
+ */
+ EFSYS_PROBE(bug26807_workaround_flr_done);
+ /* FIXME: bump MC warm boot count ? */
+ }
+ } else if (rc == EACCES) {
+ /*
+ * Unprivileged functions cannot enable the workaround in older
+ * firmware.
+ */
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else {
+ goto fail3;
+ }
+
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
+ goto fail4;
+
+ /*
+ * The Huntington timer quantum is 1536 sysclk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */
+ if (encp->enc_bug35388_workaround) {
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ ERF_DD_EVQ_IND_TIMER_VAL_WIDTH) / 1000;
+ } else {
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+ }
+
+ encp->enc_bug61265_workaround = B_FALSE; /* Medford only */
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+ encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */
+
+ /*
+ * The workaround for bug35388 uses the top bit of transmit queue
+ * descriptor writes, preventing the use of 4096 descriptor TXQs.
+ */
+ encp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ? 2048 : 4096;
+
+ EFX_STATIC_ASSERT(HUNT_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
+ encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = HUNT_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE;
+
+ if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0)
+ goto fail5;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+
+ /* All Huntington devices have a PCIe Gen3, 8 lane connector */
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#endif /* EFSYS_OPT_HUNTINGTON */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.c b/src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.c
new file mode 100644
index 00000000..940bd026
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.c
@@ -0,0 +1,560 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MON_MCDI
+
+#if EFSYS_OPT_MON_STATS
+
+#define MCDI_MON_NEXT_PAGE ((uint16_t)0xfffe)
+#define MCDI_MON_INVALID_SENSOR ((uint16_t)0xfffd)
+#define MCDI_MON_PAGE_SIZE 0x20
+
+/* Bitmasks of valid port(s) for each sensor */
+#define MCDI_MON_PORT_NONE (0x00)
+#define MCDI_MON_PORT_P1 (0x01)
+#define MCDI_MON_PORT_P2 (0x02)
+#define MCDI_MON_PORT_P3 (0x04)
+#define MCDI_MON_PORT_P4 (0x08)
+#define MCDI_MON_PORT_Px (0xFFFF)
+
+/* Get port mask from one-based MCDI port number */
+#define MCDI_MON_PORT_MASK(_emip) (1U << ((_emip)->emi_port - 1))
+
+/* Entry for MCDI sensor in sensor map */
+#define STAT(portmask, stat) \
+ { (MCDI_MON_PORT_##portmask), (EFX_MON_STAT_##stat) }
+
+/* Entry for sensor next page flag in sensor map */
+#define STAT_NEXT_PAGE() \
+ { MCDI_MON_PORT_NONE, MCDI_MON_NEXT_PAGE }
+
+/* Placeholder for gaps in the array */
+#define STAT_NO_SENSOR() \
+ { MCDI_MON_PORT_NONE, MCDI_MON_INVALID_SENSOR }
+
+/* Map from MC sensors to monitor statistics */
+static const struct mcdi_sensor_map_s {
+ uint16_t msm_port_mask;
+ uint16_t msm_stat;
+} mcdi_sensor_map[] = {
+ /* Sensor page 0 MC_CMD_SENSOR_xxx */
+ STAT(Px, INT_TEMP), /* 0x00 CONTROLLER_TEMP */
+ STAT(Px, EXT_TEMP), /* 0x01 PHY_COMMON_TEMP */
+ STAT(Px, INT_COOLING), /* 0x02 CONTROLLER_COOLING */
+ STAT(P1, EXT_TEMP), /* 0x03 PHY0_TEMP */
+ STAT(P1, EXT_COOLING), /* 0x04 PHY0_COOLING */
+ STAT(P2, EXT_TEMP), /* 0x05 PHY1_TEMP */
+ STAT(P2, EXT_COOLING), /* 0x06 PHY1_COOLING */
+ STAT(Px, 1V), /* 0x07 IN_1V0 */
+ STAT(Px, 1_2V), /* 0x08 IN_1V2 */
+ STAT(Px, 1_8V), /* 0x09 IN_1V8 */
+ STAT(Px, 2_5V), /* 0x0a IN_2V5 */
+ STAT(Px, 3_3V), /* 0x0b IN_3V3 */
+ STAT(Px, 12V), /* 0x0c IN_12V0 */
+ STAT(Px, 1_2VA), /* 0x0d IN_1V2A */
+ STAT(Px, VREF), /* 0x0e IN_VREF */
+ STAT(Px, VAOE), /* 0x0f OUT_VAOE */
+ STAT(Px, AOE_TEMP), /* 0x10 AOE_TEMP */
+ STAT(Px, PSU_AOE_TEMP), /* 0x11 PSU_AOE_TEMP */
+ STAT(Px, PSU_TEMP), /* 0x12 PSU_TEMP */
+ STAT(Px, FAN0), /* 0x13 FAN_0 */
+ STAT(Px, FAN1), /* 0x14 FAN_1 */
+ STAT(Px, FAN2), /* 0x15 FAN_2 */
+ STAT(Px, FAN3), /* 0x16 FAN_3 */
+ STAT(Px, FAN4), /* 0x17 FAN_4 */
+ STAT(Px, VAOE_IN), /* 0x18 IN_VAOE */
+ STAT(Px, IAOE), /* 0x19 OUT_IAOE */
+ STAT(Px, IAOE_IN), /* 0x1a IN_IAOE */
+ STAT(Px, NIC_POWER), /* 0x1b NIC_POWER */
+ STAT(Px, 0_9V), /* 0x1c IN_0V9 */
+ STAT(Px, I0_9V), /* 0x1d IN_I0V9 */
+ STAT(Px, I1_2V), /* 0x1e IN_I1V2 */
+ STAT_NEXT_PAGE(), /* 0x1f Next page flag (not a sensor) */
+
+ /* Sensor page 1 MC_CMD_SENSOR_xxx */
+ STAT(Px, 0_9V_ADC), /* 0x20 IN_0V9_ADC */
+ STAT(Px, INT_TEMP2), /* 0x21 CONTROLLER_2_TEMP */
+ STAT(Px, VREG_TEMP), /* 0x22 VREG_INTERNAL_TEMP */
+ STAT(Px, VREG_0_9V_TEMP), /* 0x23 VREG_0V9_TEMP */
+ STAT(Px, VREG_1_2V_TEMP), /* 0x24 VREG_1V2_TEMP */
+ STAT(Px, INT_VPTAT), /* 0x25 CTRLR. VPTAT */
+ STAT(Px, INT_ADC_TEMP), /* 0x26 CTRLR. INTERNAL_TEMP */
+ STAT(Px, EXT_VPTAT), /* 0x27 CTRLR. VPTAT_EXTADC */
+ STAT(Px, EXT_ADC_TEMP), /* 0x28 CTRLR. INTERNAL_TEMP_EXTADC */
+ STAT(Px, AMBIENT_TEMP), /* 0x29 AMBIENT_TEMP */
+ STAT(Px, AIRFLOW), /* 0x2a AIRFLOW */
+ STAT(Px, VDD08D_VSS08D_CSR), /* 0x2b VDD08D_VSS08D_CSR */
+ STAT(Px, VDD08D_VSS08D_CSR_EXTADC), /* 0x2c VDD08D_VSS08D_CSR_EXTADC */
+ STAT(Px, HOTPOINT_TEMP), /* 0x2d HOTPOINT_TEMP */
+ STAT(P1, PHY_POWER_SWITCH_PORT0), /* 0x2e PHY_POWER_SWITCH_PORT0 */
+ STAT(P2, PHY_POWER_SWITCH_PORT1), /* 0x2f PHY_POWER_SWITCH_PORT1 */
+ STAT(Px, MUM_VCC), /* 0x30 MUM_VCC */
+ STAT(Px, 0V9_A), /* 0x31 0V9_A */
+ STAT(Px, I0V9_A), /* 0x32 I0V9_A */
+ STAT(Px, 0V9_A_TEMP), /* 0x33 0V9_A_TEMP */
+ STAT(Px, 0V9_B), /* 0x34 0V9_B */
+ STAT(Px, I0V9_B), /* 0x35 I0V9_B */
+ STAT(Px, 0V9_B_TEMP), /* 0x36 0V9_B_TEMP */
+ STAT(Px, CCOM_AVREG_1V2_SUPPLY), /* 0x37 CCOM_AVREG_1V2_SUPPLY */
+ STAT(Px, CCOM_AVREG_1V2_SUPPLY_EXT_ADC),
+ /* 0x38 CCOM_AVREG_1V2_SUPPLY_EXT_ADC */
+ STAT(Px, CCOM_AVREG_1V8_SUPPLY), /* 0x39 CCOM_AVREG_1V8_SUPPLY */
+ STAT(Px, CCOM_AVREG_1V8_SUPPLY_EXT_ADC),
+ /* 0x3a CCOM_AVREG_1V8_SUPPLY_EXT_ADC */
+ STAT_NO_SENSOR(), /* 0x3b (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3c (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3d (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3e (no sensor) */
+ STAT_NEXT_PAGE(), /* 0x3f Next page flag (not a sensor) */
+
+ /* Sensor page 2 MC_CMD_SENSOR_xxx */
+ STAT(Px, CONTROLLER_MASTER_VPTAT), /* 0x40 MASTER_VPTAT */
+ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP), /* 0x41 MASTER_INT_TEMP */
+ STAT(Px, CONTROLLER_MASTER_VPTAT_EXT_ADC), /* 0x42 MAST_VPTAT_EXT_ADC */
+ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC),
+ /* 0x43 MASTER_INTERNAL_TEMP_EXT_ADC */
+ STAT(Px, CONTROLLER_SLAVE_VPTAT), /* 0x44 SLAVE_VPTAT */
+ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP), /* 0x45 SLAVE_INTERNAL_TEMP */
+ STAT(Px, CONTROLLER_SLAVE_VPTAT_EXT_ADC), /* 0x46 SLAVE_VPTAT_EXT_ADC */
+ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC),
+ /* 0x47 SLAVE_INTERNAL_TEMP_EXT_ADC */
+ STAT_NO_SENSOR(), /* 0x48 (no sensor) */
+ STAT(Px, SODIMM_VOUT), /* 0x49 SODIMM_VOUT */
+ STAT(Px, SODIMM_0_TEMP), /* 0x4a SODIMM_0_TEMP */
+ STAT(Px, SODIMM_1_TEMP), /* 0x4b SODIMM_1_TEMP */
+ STAT(Px, PHY0_VCC), /* 0x4c PHY0_VCC */
+ STAT(Px, PHY1_VCC), /* 0x4d PHY1_VCC */
+ STAT(Px, CONTROLLER_TDIODE_TEMP), /* 0x4e CONTROLLER_TDIODE_TEMP */
+ STAT(Px, BOARD_FRONT_TEMP), /* 0x4f BOARD_FRONT_TEMP */
+ STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */
+ STAT(Px, I1V8), /* 0x51 IN_I1V8 */
+ STAT(Px, I2V5), /* 0x52 IN_I2V5 */
+ STAT(Px, I3V3), /* 0x53 IN_I3V3 */
+ STAT(Px, I12V0), /* 0x54 IN_I12V0 */
+ STAT(Px, 1_3V), /* 0x55 IN_1V3 */
+ STAT(Px, I1V3), /* 0x56 IN_I1V3 */
+};
+
+#define MCDI_STATIC_SENSOR_ASSERT(_field) \
+ EFX_STATIC_ASSERT(MC_CMD_SENSOR_STATE_ ## _field \
+ == EFX_MON_STAT_STATE_ ## _field)
+
+static void
+mcdi_mon_decode_stats(
+ __in efx_nic_t *enp,
+ __in_bcount(sensor_mask_size) uint32_t *sensor_mask,
+ __in size_t sensor_mask_size,
+ __in_opt efsys_mem_t *esmp,
+ __out_bcount_opt(sensor_mask_size) uint32_t *stat_maskp,
+ __inout_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *stat)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ uint16_t port_mask;
+ uint16_t sensor;
+ size_t sensor_max;
+ uint32_t stat_mask[(EFX_ARRAY_SIZE(mcdi_sensor_map) + 31) / 32];
+ uint32_t idx = 0;
+ uint32_t page = 0;
+
+ /* Assert the MC_CMD_SENSOR and EFX_MON_STATE namespaces agree */
+ MCDI_STATIC_SENSOR_ASSERT(OK);
+ MCDI_STATIC_SENSOR_ASSERT(WARNING);
+ MCDI_STATIC_SENSOR_ASSERT(FATAL);
+ MCDI_STATIC_SENSOR_ASSERT(BROKEN);
+ MCDI_STATIC_SENSOR_ASSERT(NO_READING);
+
+ EFX_STATIC_ASSERT(sizeof (stat_mask[0]) * 8 ==
+ EFX_MON_MASK_ELEMENT_SIZE);
+ sensor_max =
+ MIN((8 * sensor_mask_size), EFX_ARRAY_SIZE(mcdi_sensor_map));
+
+ EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */
+ port_mask = MCDI_MON_PORT_MASK(emip);
+
+ memset(stat_mask, 0, sizeof (stat_mask));
+
+ /*
+ * The MCDI sensor readings in the DMA buffer are a packed array of
+ * MC_CMD_SENSOR_VALUE_ENTRY structures, which only includes entries for
+ * supported sensors (bit set in sensor_mask). The sensor_mask and
+ * sensor readings do not include entries for the per-page NEXT_PAGE
+ * flag.
+ *
+ * sensor_mask may legitimately contain MCDI sensors that the driver
+ * does not understand.
+ */
+ for (sensor = 0; sensor < sensor_max; ++sensor) {
+ efx_mon_stat_t id = mcdi_sensor_map[sensor].msm_stat;
+
+ if ((sensor % MCDI_MON_PAGE_SIZE) == MC_CMD_SENSOR_PAGE0_NEXT) {
+ EFSYS_ASSERT3U(id, ==, MCDI_MON_NEXT_PAGE);
+ page++;
+ continue;
+ }
+ if (~(sensor_mask[page]) & (1U << sensor))
+ continue;
+ idx++;
+
+ if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0)
+ continue;
+ EFSYS_ASSERT(id < EFX_MON_NSTATS);
+
+ /*
+ * stat_mask is a bitmask indexed by EFX_MON_* monitor statistic
+ * identifiers from efx_mon_stat_t (without NEXT_PAGE bits).
+ *
+ * If there is an entry in the MCDI sensor to monitor statistic
+ * map then the sensor reading is used for the value of the
+ * monitor statistic.
+ */
+ stat_mask[id / EFX_MON_MASK_ELEMENT_SIZE] |=
+ (1U << (id % EFX_MON_MASK_ELEMENT_SIZE));
+
+ if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+
+ /* Get MCDI sensor reading from DMA buffer */
+ EFSYS_MEM_READD(esmp, 4 * (idx - 1), &dword);
+
+ /* Update EFX monitor stat from MCDI sensor reading */
+ stat[id].emsv_value = (uint16_t)EFX_DWORD_FIELD(dword,
+ MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
+
+ stat[id].emsv_state = (uint16_t)EFX_DWORD_FIELD(dword,
+ MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ }
+ }
+
+ if (stat_maskp != NULL) {
+ memcpy(stat_maskp, stat_mask, sizeof (stat_mask));
+ }
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_mon_stat_t *idp,
+ __out efx_mon_stat_value_t *valuep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ uint16_t port_mask;
+ uint16_t sensor;
+ uint16_t state;
+ uint16_t value;
+ efx_mon_stat_t id;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */
+ port_mask = MCDI_MON_PORT_MASK(emip);
+
+ sensor = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_MONITOR);
+ state = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_STATE);
+ value = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_VALUE);
+
+ /* Hardware must support this MCDI sensor */
+ EFSYS_ASSERT3U(sensor, <,
+ (8 * enp->en_nic_cfg.enc_mcdi_sensor_mask_size));
+ EFSYS_ASSERT((sensor % MCDI_MON_PAGE_SIZE) != MC_CMD_SENSOR_PAGE0_NEXT);
+ EFSYS_ASSERT(enp->en_nic_cfg.enc_mcdi_sensor_maskp != NULL);
+ EFSYS_ASSERT(
+ (enp->en_nic_cfg.enc_mcdi_sensor_maskp[sensor/MCDI_MON_PAGE_SIZE] &
+ (1U << (sensor % MCDI_MON_PAGE_SIZE))) != 0);
+
+ /* But we don't have to understand it */
+ if (sensor >= EFX_ARRAY_SIZE(mcdi_sensor_map)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ id = mcdi_sensor_map[sensor].msm_stat;
+ if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0)
+ return (ENODEV);
+ EFSYS_ASSERT(id < EFX_MON_NSTATS);
+
+ *idp = id;
+ valuep->emsv_value = value;
+ valuep->emsv_state = state;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_read_sensors(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint32_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_READ_SENSORS_EXT_IN_LEN,
+ MC_CMD_READ_SENSORS_EXT_OUT_LEN)];
+ uint32_t addr_lo, addr_hi;
+
+ req.emr_cmd = MC_CMD_READ_SENSORS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_READ_SENSORS_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_READ_SENSORS_EXT_OUT_LEN;
+
+ addr_lo = (uint32_t)(EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ addr_hi = (uint32_t)(EFSYS_MEM_ADDR(esmp) >> 32);
+
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_LO, addr_lo);
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_HI, addr_hi);
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_LENGTH, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ return (req.emr_rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_sensor_info_npages(
+ __in efx_nic_t *enp,
+ __out uint32_t *npagesp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ int page;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(npagesp != NULL);
+
+ page = 0;
+ do {
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SENSOR_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page++);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ } while (MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK) &
+ (1U << MC_CMD_SENSOR_PAGE0_NEXT));
+
+ *npagesp = page;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_sensor_info(
+ __in efx_nic_t *enp,
+ __out_ecount(npages) uint32_t *sensor_maskp,
+ __in size_t npages)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ uint32_t page;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(sensor_maskp != NULL);
+
+ if (npages < 1) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ for (page = 0; page < npages; page++) {
+ uint32_t mask;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SENSOR_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ mask = MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK);
+
+ if ((page != (npages - 1)) &&
+ ((mask & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) == 0)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ sensor_maskp[page] = mask;
+ }
+
+ if (sensor_maskp[npages - 1] & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t size = encp->enc_mon_stat_dma_buf_size;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_read_sensors(enp, esmp, size)) != 0)
+ goto fail1;
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, size);
+
+ mcdi_mon_decode_stats(enp,
+ encp->enc_mcdi_sensor_maskp,
+ encp->enc_mcdi_sensor_mask_size,
+ esmp, NULL, values);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_cfg_build(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t npages;
+ efx_rc_t rc;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ encp->enc_mon_type = EFX_MON_SFC90X0;
+ break;
+#endif
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ encp->enc_mon_type = EFX_MON_SFC91X0;
+ break;
+#endif
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ encp->enc_mon_type = EFX_MON_SFC92X0;
+ break;
+#endif
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ encp->enc_mon_type = EFX_MON_SFC92X0;
+ break;
+#endif
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Get mc sensor mask size */
+ npages = 0;
+ if ((rc = efx_mcdi_sensor_info_npages(enp, &npages)) != 0)
+ goto fail2;
+
+ encp->enc_mon_stat_dma_buf_size = npages * EFX_MON_STATS_PAGE_SIZE;
+ encp->enc_mcdi_sensor_mask_size = npages * sizeof (uint32_t);
+
+ /* Allocate mc sensor mask */
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+
+ if (encp->enc_mcdi_sensor_maskp == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ /* Read mc sensor mask */
+ if ((rc = efx_mcdi_sensor_info(enp,
+ encp->enc_mcdi_sensor_maskp,
+ npages)) != 0)
+ goto fail4;
+
+ /* Build monitor statistics mask */
+ mcdi_mon_decode_stats(enp,
+ encp->enc_mcdi_sensor_maskp,
+ encp->enc_mcdi_sensor_mask_size,
+ NULL, encp->enc_mon_stat_mask, NULL);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+ EFSYS_KMEM_FREE(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+mcdi_mon_cfg_free(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ if (encp->enc_mcdi_sensor_maskp != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+ }
+}
+
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#endif /* EFSYS_OPT_MON_MCDI */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.h b/src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.h
new file mode 100644
index 00000000..5aa6a6a2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_MCDI_MON_H
+#define _SYS_MCDI_MON_H
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if EFSYS_OPT_MON_MCDI
+
+#if EFSYS_OPT_MON_STATS
+
+ __checkReturn efx_rc_t
+mcdi_mon_cfg_build(
+ __in efx_nic_t *enp);
+
+ void
+mcdi_mon_cfg_free(
+ __in efx_nic_t *enp);
+
+
+extern __checkReturn efx_rc_t
+mcdi_mon_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_mon_stat_t *idp,
+ __out efx_mon_stat_value_t *valuep);
+
+extern __checkReturn efx_rc_t
+mcdi_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#endif /* EFSYS_OPT_MON_MCDI */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MCDI_MON_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/medford2_impl.h b/src/spdk/dpdk/drivers/net/sfc/base/medford2_impl.h
new file mode 100644
index 00000000..6259a700
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/medford2_impl.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_MEDFORD2_IMPL_H
+#define _SYS_MEDFORD2_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifndef ER_EZ_TX_PIOBUF_SIZE
+#define ER_EZ_TX_PIOBUF_SIZE 4096
+#endif
+
+
+#define MEDFORD2_PIOBUF_NBUFS (16)
+#define MEDFORD2_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE)
+
+#define MEDFORD2_MIN_PIO_ALLOC_SIZE (MEDFORD2_PIOBUF_SIZE / 32)
+
+
+extern __checkReturn efx_rc_t
+medford2_board_cfg(
+ __in efx_nic_t *enp);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MEDFORD2_IMPL_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/medford2_nic.c b/src/spdk/dpdk/drivers/net/sfc/base/medford2_nic.c
new file mode 100644
index 00000000..7f5ad175
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/medford2_nic.c
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_MEDFORD2
+
+static __checkReturn efx_rc_t
+medford2_nic_get_required_pcie_bandwidth(
+ __in efx_nic_t *enp,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t port_modes;
+ uint32_t current_mode;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /* FIXME: support new Medford2 dynamic port modes */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
+ &current_mode)) != 0) {
+ /* No port mode info available. */
+ bandwidth = 0;
+ goto out;
+ }
+
+ if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode,
+ &bandwidth)) != 0)
+ goto fail1;
+
+out:
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+medford2_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t sysclk, dpcpu_clk;
+ uint32_t end_padding;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /*
+ * Enable firmware workarounds for hardware errata.
+ * Expected responses are:
+ * - 0 (zero):
+ * Success: workaround enabled or disabled as requested.
+ * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+ * Firmware does not support the MC_CMD_WORKAROUND request.
+ * (assume that the workaround is not supported).
+ * - MC_CMD_ERR_ENOENT (reported as ENOENT):
+ * Firmware does not support the requested workaround.
+ * - MC_CMD_ERR_EPERM (reported as EACCES):
+ * Unprivileged function cannot enable/disable workarounds.
+ *
+ * See efx_mcdi_request_errcode() for MCDI error translations.
+ */
+
+
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /*
+ * Interrupt testing does not work for VFs on Medford2.
+ * See bug50084 and bug71432 comment 21.
+ */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /* Chained multicast is always enabled on Medford2 */
+ encp->enc_bug26807_workaround = B_TRUE;
+
+ /*
+ * If the bug61265 workaround is enabled, then interrupt holdoff timers
+ * cannot be controlled by timer table writes, so MCDI must be used
+ * (timer table writes can still be used for wakeup timers).
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE,
+ NULL);
+ if ((rc == 0) || (rc == EACCES))
+ encp->enc_bug61265_workaround = B_TRUE;
+ else if ((rc == ENOTSUP) || (rc == ENOENT))
+ encp->enc_bug61265_workaround = B_FALSE;
+ else
+ goto fail1;
+
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
+ goto fail2;
+
+ /*
+ * The Medford2 timer quantum is 1536 dpcpu_clk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+
+ /* Get the RX DMA end padding alignment configuration */
+ if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
+ if (rc != EACCES)
+ goto fail3;
+
+ /* Assume largest tail padding size supported by hardware */
+ end_padding = 256;
+ }
+ encp->enc_rx_buf_align_end = end_padding;
+
+ /*
+ * The maximum supported transmit queue size is 2048. TXQs with 4096
+ * descriptors are not supported as the top bit is used for vfifo
+ * stuffing.
+ */
+ encp->enc_txq_max_ndescs = 2048;
+
+ EFX_STATIC_ASSERT(MEDFORD2_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
+ encp->enc_piobuf_limit = MEDFORD2_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = MEDFORD2_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = MEDFORD2_MIN_PIO_ALLOC_SIZE;
+
+ /*
+ * Medford2 stores a single global copy of VPD, not per-PF as on
+ * Huntington.
+ */
+ encp->enc_vpd_is_global = B_TRUE;
+
+ rc = medford2_nic_get_required_pcie_bandwidth(enp, &bandwidth);
+ if (rc != 0)
+ goto fail4;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MEDFORD2 */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/medford_impl.h b/src/spdk/dpdk/drivers/net/sfc/base/medford_impl.h
new file mode 100644
index 00000000..d076afa2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/medford_impl.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_MEDFORD_IMPL_H
+#define _SYS_MEDFORD_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifndef ER_EZ_TX_PIOBUF_SIZE
+#define ER_EZ_TX_PIOBUF_SIZE 4096
+#endif
+
+
+#define MEDFORD_PIOBUF_NBUFS (16)
+#define MEDFORD_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE)
+
+#define MEDFORD_MIN_PIO_ALLOC_SIZE (MEDFORD_PIOBUF_SIZE / 32)
+
+
+extern __checkReturn efx_rc_t
+medford_board_cfg(
+ __in efx_nic_t *enp);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MEDFORD_IMPL_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/medford_nic.c b/src/spdk/dpdk/drivers/net/sfc/base/medford_nic.c
new file mode 100644
index 00000000..6dc895f5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/medford_nic.c
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+medford_nic_get_required_pcie_bandwidth(
+ __in efx_nic_t *enp,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t port_modes;
+ uint32_t current_mode;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
+ &current_mode)) != 0) {
+ /* No port mode info available. */
+ bandwidth = 0;
+ goto out;
+ }
+
+ if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode,
+ &bandwidth)) != 0)
+ goto fail1;
+
+out:
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+medford_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t sysclk, dpcpu_clk;
+ uint32_t end_padding;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /*
+ * Enable firmware workarounds for hardware errata.
+ * Expected responses are:
+ * - 0 (zero):
+ * Success: workaround enabled or disabled as requested.
+ * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+ * Firmware does not support the MC_CMD_WORKAROUND request.
+ * (assume that the workaround is not supported).
+ * - MC_CMD_ERR_ENOENT (reported as ENOENT):
+ * Firmware does not support the requested workaround.
+ * - MC_CMD_ERR_EPERM (reported as EACCES):
+ * Unprivileged function cannot enable/disable workarounds.
+ *
+ * See efx_mcdi_request_errcode() for MCDI error translations.
+ */
+
+
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /*
+ * Interrupt testing does not work for VFs. See bug50084 and
+ * bug71432 comment 21.
+ */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /* Chained multicast is always enabled on Medford */
+ encp->enc_bug26807_workaround = B_TRUE;
+
+ /*
+ * If the bug61265 workaround is enabled, then interrupt holdoff timers
+ * cannot be controlled by timer table writes, so MCDI must be used
+ * (timer table writes can still be used for wakeup timers).
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE,
+ NULL);
+ if ((rc == 0) || (rc == EACCES))
+ encp->enc_bug61265_workaround = B_TRUE;
+ else if ((rc == ENOTSUP) || (rc == ENOENT))
+ encp->enc_bug61265_workaround = B_FALSE;
+ else
+ goto fail1;
+
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
+ goto fail2;
+
+ /*
+ * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+
+ /* Get the RX DMA end padding alignment configuration */
+ if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
+ if (rc != EACCES)
+ goto fail3;
+
+ /* Assume largest tail padding size supported by hardware */
+ end_padding = 256;
+ }
+ encp->enc_rx_buf_align_end = end_padding;
+
+ /*
+ * The maximum supported transmit queue size is 2048. TXQs with 4096
+ * descriptors are not supported as the top bit is used for vfifo
+ * stuffing.
+ */
+ encp->enc_txq_max_ndescs = 2048;
+
+ EFX_STATIC_ASSERT(MEDFORD_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
+ encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE;
+
+ /*
+ * Medford stores a single global copy of VPD, not per-PF as on
+ * Huntington.
+ */
+ encp->enc_vpd_is_global = B_TRUE;
+
+ rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth);
+ if (rc != 0)
+ goto fail4;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MEDFORD */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/meson.build b/src/spdk/dpdk/drivers/net/sfc/base/meson.build
new file mode 100644
index 00000000..da2bf44d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/meson.build
@@ -0,0 +1,76 @@
+# Copyright (c) 2016-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+sources = [
+ 'efx_bootcfg.c',
+ 'efx_crc32.c',
+ 'efx_ev.c',
+ 'efx_filter.c',
+ 'efx_hash.c',
+ 'efx_intr.c',
+ 'efx_lic.c',
+ 'efx_mac.c',
+ 'efx_mcdi.c',
+ 'efx_mon.c',
+ 'efx_nic.c',
+ 'efx_nvram.c',
+ 'efx_phy.c',
+ 'efx_port.c',
+ 'efx_rx.c',
+ 'efx_sram.c',
+ 'efx_tunnel.c',
+ 'efx_tx.c',
+ 'efx_vpd.c',
+ 'mcdi_mon.c',
+ 'siena_mac.c',
+ 'siena_mcdi.c',
+ 'siena_nic.c',
+ 'siena_nvram.c',
+ 'siena_phy.c',
+ 'siena_sram.c',
+ 'siena_vpd.c',
+ 'ef10_ev.c',
+ 'ef10_filter.c',
+ 'ef10_image.c',
+ 'ef10_intr.c',
+ 'ef10_mac.c',
+ 'ef10_mcdi.c',
+ 'ef10_nic.c',
+ 'ef10_nvram.c',
+ 'ef10_phy.c',
+ 'ef10_rx.c',
+ 'ef10_tx.c',
+ 'ef10_vpd.c',
+ 'hunt_nic.c',
+ 'medford_nic.c',
+ 'medford2_nic.c'
+]
+
+extra_flags = [
+ '-Wno-sign-compare',
+ '-Wno-unused-parameter',
+ '-Wno-unused-variable',
+ '-Wno-empty-body',
+ '-Wno-unused-but-set-variable'
+]
+
+c_args = cflags
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+if build
+ base_lib = static_library('sfc_base', sources,
+ include_directories: includes,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+
+ base_objs = base_lib.extract_all_objects()
+else
+ base_objs = []
+endif
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_flash.h b/src/spdk/dpdk/drivers/net/sfc/base/siena_flash.h
new file mode 100644
index 00000000..74bb9496
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_flash.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_SIENA_FLASH_H
+#define _SYS_SIENA_FLASH_H
+
+#pragma pack(1)
+
+/* Fixed locations near the start of flash (which may be in the internal PHY
+ * firmware header) point to the boot header.
+ *
+ * - parsed by MC boot ROM and firmware
+ * - reserved (but not parsed) by PHY firmware
+ * - opaque to driver
+ */
+
+#define SIENA_MC_BOOT_PHY_FW_HDR_LEN (0x20)
+
+#define SIENA_MC_BOOT_PTR_LOCATION (0x18) /* First thing we try to boot */
+#define SIENA_MC_BOOT_ALT_PTR_LOCATION (0x1c) /* Alternative if that fails */
+
+#define SIENA_MC_BOOT_HDR_LEN (0x200)
+
+#define SIENA_MC_BOOT_MAGIC (0x51E4A001)
+#define SIENA_MC_BOOT_VERSION (1)
+
+
+/*Structures supporting an arbitrary number of binary blobs in the flash image
+ intended to house code and tables for the satellite cpus*/
+/*thanks to random.org for:*/
+#define BLOBS_HEADER_MAGIC (0xBDA3BBD4)
+#define BLOB_HEADER_MAGIC (0xA1478A91)
+
+typedef struct blobs_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic;
+ efx_dword_t no_of_blobs;
+} blobs_hdr_t;
+
+typedef struct blob_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic;
+ efx_dword_t cpu_type;
+ efx_dword_t build_variant;
+ efx_dword_t offset;
+ efx_dword_t length;
+ efx_dword_t checksum;
+} blob_hdr_t;
+
+#define BLOB_CPU_TYPE_TXDI_TEXT (0)
+#define BLOB_CPU_TYPE_RXDI_TEXT (1)
+#define BLOB_CPU_TYPE_TXDP_TEXT (2)
+#define BLOB_CPU_TYPE_RXDP_TEXT (3)
+#define BLOB_CPU_TYPE_RXHRSL_HR_LUT (4)
+#define BLOB_CPU_TYPE_RXHRSL_HR_LUT_CFG (5)
+#define BLOB_CPU_TYPE_TXHRSL_HR_LUT (6)
+#define BLOB_CPU_TYPE_TXHRSL_HR_LUT_CFG (7)
+#define BLOB_CPU_TYPE_RXHRSL_HR_PGM (8)
+#define BLOB_CPU_TYPE_RXHRSL_SL_PGM (9)
+#define BLOB_CPU_TYPE_TXHRSL_HR_PGM (10)
+#define BLOB_CPU_TYPE_TXHRSL_SL_PGM (11)
+#define BLOB_CPU_TYPE_RXDI_VTBL0 (12)
+#define BLOB_CPU_TYPE_TXDI_VTBL0 (13)
+#define BLOB_CPU_TYPE_RXDI_VTBL1 (14)
+#define BLOB_CPU_TYPE_TXDI_VTBL1 (15)
+#define BLOB_CPU_TYPE_DUMPSPEC (32)
+#define BLOB_CPU_TYPE_MC_XIP (33)
+
+#define BLOB_CPU_TYPE_INVALID (31)
+
+/*
+ * The upper four bits of the CPU type field specify the compression
+ * algorithm used for this blob.
+ */
+#define BLOB_COMPRESSION_MASK (0xf0000000)
+#define BLOB_CPU_TYPE_MASK (0x0fffffff)
+
+#define BLOB_COMPRESSION_NONE (0x00000000) /* Stored as is */
+#define BLOB_COMPRESSION_LZ (0x10000000) /* see lib/lzdecoder.c */
+
+typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_BOOT_MAGIC */
+ efx_word_t hdr_version; /* this structure definition is version 1 */
+ efx_byte_t board_type;
+ efx_byte_t firmware_version_a;
+ efx_byte_t firmware_version_b;
+ efx_byte_t firmware_version_c;
+ efx_word_t checksum; /* of whole header area + firmware image */
+ efx_word_t firmware_version_d;
+ efx_byte_t mcfw_subtype;
+ efx_byte_t generation; /* MC (Medford and later): MC partition generation when */
+ /* written to NVRAM. */
+ /* MUM & SUC images: subtype. */
+ /* (Otherwise set to 0) */
+ efx_dword_t firmware_text_offset; /* offset to firmware .text */
+ efx_dword_t firmware_text_size; /* length of firmware .text, in bytes */
+ efx_dword_t firmware_data_offset; /* offset to firmware .data */
+ efx_dword_t firmware_data_size; /* length of firmware .data, in bytes */
+ efx_byte_t spi_rate; /* SPI rate for reading image, 0 is BootROM default */
+ efx_byte_t spi_phase_adj; /* SPI SDO/SCL phase adjustment, 0 is default (no adj) */
+ efx_word_t xpm_sector; /* XPM (MEDFORD and later): The sector that contains */
+ /* the key, or 0xffff if unsigned. (Otherwise set to 0) */
+ efx_byte_t mumfw_subtype; /* MUM & SUC images: subtype. (Otherwise set to 0) */
+ efx_byte_t reserved_b[3]; /* (set to 0) */
+ efx_dword_t security_level; /* This number increases every time a serious security flaw */
+ /* is fixed. A secure NIC may not downgrade to any image */
+ /* with a lower security level than the current image. */
+ /* Note: The number in this header should only be used for */
+ /* determining the level of new images, not to determine */
+ /* the level of the current image as this header is not */
+ /* protected by a CMAC. */
+ efx_dword_t reserved_c[5]; /* (set to 0) */
+} siena_mc_boot_hdr_t;
+
+#define SIENA_MC_BOOT_HDR_PADDING \
+ (SIENA_MC_BOOT_HDR_LEN - sizeof(siena_mc_boot_hdr_t))
+
+#define SIENA_MC_STATIC_CONFIG_MAGIC (0xBDCF5555)
+#define SIENA_MC_STATIC_CONFIG_VERSION (0)
+
+typedef struct siena_mc_static_config_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_STATIC_CONFIG_MAGIC */
+ efx_word_t length; /* of header area (i.e. not including VPD) */
+ efx_byte_t version;
+ efx_byte_t csum; /* over header area (i.e. not including VPD) */
+ efx_dword_t static_vpd_offset;
+ efx_dword_t static_vpd_length;
+ efx_dword_t capabilities;
+ efx_byte_t mac_addr_base[6];
+ efx_byte_t green_mode_cal; /* Green mode calibration result */
+ efx_byte_t green_mode_valid; /* Whether cal holds a valid value */
+ efx_word_t mac_addr_count;
+ efx_word_t mac_addr_stride;
+ efx_word_t calibrated_vref; /* Vref as measured during production */
+ efx_word_t adc_vref; /* Vref as read by ADC */
+ efx_dword_t reserved2[1]; /* (write as zero) */
+ efx_dword_t num_dbi_items;
+ struct {
+ efx_word_t addr;
+ efx_word_t byte_enables;
+ efx_dword_t value;
+ } dbi[];
+} siena_mc_static_config_hdr_t;
+
+/* This prefixes a valid XIP partition */
+#define XIP_PARTITION_MAGIC (0x51DEC0DE)
+
+#define SIENA_MC_DYNAMIC_CONFIG_MAGIC (0xBDCFDDDD)
+#define SIENA_MC_DYNAMIC_CONFIG_VERSION (0)
+
+typedef struct siena_mc_fw_version_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t fw_subtype;
+ efx_word_t version_w;
+ efx_word_t version_x;
+ efx_word_t version_y;
+ efx_word_t version_z;
+} siena_mc_fw_version_t;
+
+typedef struct siena_mc_dynamic_config_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_DYNAMIC_CONFIG_MAGIC */
+ efx_word_t length; /* of header area (i.e. not including VPD) */
+ efx_byte_t version;
+ efx_byte_t csum; /* over header area (i.e. not including VPD) */
+ efx_dword_t dynamic_vpd_offset;
+ efx_dword_t dynamic_vpd_length;
+ efx_dword_t num_fw_version_items;
+ siena_mc_fw_version_t fw_version[];
+} siena_mc_dynamic_config_hdr_t;
+
+#define SIENA_MC_EXPROM_SINGLE_MAGIC (0xAA55) /* little-endian uint16_t */
+
+#define SIENA_MC_EXPROM_COMBO_MAGIC (0xB0070102) /* little-endian uint32_t */
+#define SIENA_MC_EXPROM_COMBO_V2_MAGIC (0xB0070103) /* little-endian uint32_t */
+
+typedef struct siena_mc_combo_rom_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_EXPROM_COMBO_MAGIC or SIENA_MC_EXPROM_COMBO_V2_MAGIC */
+ union {
+ struct {
+ efx_dword_t len1; /* length of first image */
+ efx_dword_t len2; /* length of second image */
+ efx_dword_t off1; /* offset of first byte to edit to combine images */
+ efx_dword_t off2; /* offset of second byte to edit to combine images */
+ efx_word_t infoblk0_off;/* infoblk offset */
+ efx_word_t infoblk1_off;/* infoblk offset */
+ efx_byte_t infoblk_len;/* length of space reserved for one infoblk structure */
+ efx_byte_t reserved[7];/* (set to 0) */
+ } v1;
+ struct {
+ efx_dword_t len1; /* length of first image */
+ efx_dword_t len2; /* length of second image */
+ efx_dword_t off1; /* offset of first byte to edit to combine images */
+ efx_dword_t off2; /* offset of second byte to edit to combine images */
+ efx_word_t infoblk_off;/* infoblk start offset */
+ efx_word_t infoblk_count;/* infoblk count */
+ efx_byte_t infoblk_len;/* length of space reserved for one infoblk structure */
+ efx_byte_t reserved[7];/* (set to 0) */
+ } v2;
+ } data;
+} siena_mc_combo_rom_hdr_t;
+
+#pragma pack()
+
+#endif /* _SYS_SIENA_FLASH_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_impl.h b/src/spdk/dpdk/drivers/net/sfc/base/siena_impl.h
new file mode 100644
index 00000000..d70bbff8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_impl.h
@@ -0,0 +1,427 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_SIENA_IMPL_H
+#define _SYS_SIENA_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_mcdi.h"
+#include "siena_flash.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef EFX_TXQ_DC_SIZE
+#define EFX_TXQ_DC_SIZE 1 /* 16 descriptors */
+#endif
+#ifndef EFX_RXQ_DC_SIZE
+#define EFX_RXQ_DC_SIZE 3 /* 64 descriptors */
+#endif
+#define EFX_TXQ_DC_NDESCS(_dcsize) (8 << (_dcsize))
+
+#define SIENA_NVRAM_CHUNK 0x80
+
+
+extern __checkReturn efx_rc_t
+siena_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_nic_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_nic_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern efx_sram_pattern_fn_t __efx_sram_pattern_fns[];
+
+typedef struct siena_register_set_s {
+ unsigned int address;
+ unsigned int step;
+ unsigned int rows;
+ efx_oword_t mask;
+} siena_register_set_t;
+
+extern __checkReturn efx_rc_t
+siena_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+siena_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+siena_nic_unprobe(
+ __in efx_nic_t *enp);
+
+#define SIENA_SRAM_ROWS 0x12000
+
+extern void
+siena_sram_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_sram_pattern_fn_t func);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+siena_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern void
+siena_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len);
+
+extern __checkReturn boolean_t
+siena_mcdi_poll_response(
+ __in efx_nic_t *enp);
+
+extern void
+siena_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length);
+
+extern efx_rc_t
+siena_mcdi_poll_reboot(
+ __in efx_nic_t *enp);
+
+extern void
+siena_mcdi_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp);
+
+extern void
+siena_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp);
+
+extern __checkReturn efx_rc_t
+siena_nvram_get_dynamic_cfg(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t vpd,
+ __out siena_mc_dynamic_config_hdr_t **dcfgp,
+ __out size_t *sizep);
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+siena_nvram_get_subtype(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4]);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+siena_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+siena_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+siena_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+typedef struct siena_link_state_s {
+ uint32_t sls_adv_cap_mask;
+ uint32_t sls_lp_cap_mask;
+ unsigned int sls_fcntl;
+ efx_link_mode_t sls_link_mode;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t sls_loopback;
+#endif
+ boolean_t sls_mac_up;
+} siena_link_state_t;
+
+extern void
+siena_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+siena_phy_get_link(
+ __in efx_nic_t *enp,
+ __out siena_link_state_t *slsp);
+
+extern __checkReturn efx_rc_t
+siena_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t on);
+
+extern __checkReturn efx_rc_t
+siena_phy_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_phy_verify(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+#if EFSYS_OPT_PHY_STATS
+
+extern void
+siena_phy_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t vmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint64_t *smaskp,
+ __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat);
+
+extern __checkReturn efx_rc_t
+siena_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+extern __checkReturn efx_rc_t
+siena_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+siena_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+siena_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+extern __checkReturn efx_rc_t
+siena_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+siena_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+extern __checkReturn efx_rc_t
+siena_mac_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+#if EFSYS_OPT_LOOPBACK
+
+extern __checkReturn efx_rc_t
+siena_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type);
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+extern __checkReturn efx_rc_t
+siena_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size);
+
+extern __checkReturn efx_rc_t
+siena_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SIENA_IMPL_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_mac.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_mac.c
new file mode 100644
index 00000000..f8857cdd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_mac.c
@@ -0,0 +1,471 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+ __checkReturn efx_rc_t
+siena_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ siena_link_state_t sls;
+ efx_rc_t rc;
+
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail1;
+
+ epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
+ epp->ep_fcntl = sls.sls_fcntl;
+
+ *link_modep = sls.sls_link_mode;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ siena_link_state_t sls;
+ efx_rc_t rc;
+
+ /*
+ * Because Siena doesn't *require* polling, we can't rely on
+ * siena_mac_poll() being executed to populate epp->ep_mac_up.
+ */
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail1;
+
+ *mac_upp = sls.sls_mac_up;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_mac_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_oword_t multicast_hash[2];
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN),
+ MAX(MC_CMD_SET_MCAST_HASH_IN_LEN,
+ MC_CMD_SET_MCAST_HASH_OUT_LEN))];
+ unsigned int fcntl;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
+ epp->ep_mac_addr);
+ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, !epp->ep_all_unicst,
+ SET_MAC_IN_REJECT_BRDCST, !epp->ep_brdcst);
+
+ if (epp->ep_fcntl_autoneg)
+ /* efx_fcntl_set() has already set the phy capabilities */
+ fcntl = MC_CMD_FCNTL_AUTO;
+ else if (epp->ep_fcntl & EFX_FCNTL_RESPOND)
+ fcntl = (epp->ep_fcntl & EFX_FCNTL_GENERATE)
+ ? MC_CMD_FCNTL_BIDIR
+ : MC_CMD_FCNTL_RESPOND;
+ else
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, fcntl);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* Push multicast hash */
+
+ if (epp->ep_all_mulcst) {
+ /* A hash matching all multicast is all 1s */
+ EFX_SET_OWORD(multicast_hash[0]);
+ EFX_SET_OWORD(multicast_hash[1]);
+ } else if (epp->ep_mulcst) {
+ /* Use the hash set by the multicast list */
+ multicast_hash[0] = epp->ep_multicst_hash[0];
+ multicast_hash[1] = epp->ep_multicst_hash[1];
+ } else {
+ /* A hash matching no traffic is simply 0 */
+ EFX_ZERO_OWORD(multicast_hash[0]);
+ EFX_ZERO_OWORD(multicast_hash[1]);
+ }
+
+ /*
+ * Broadcast packets go through the multicast hash filter.
+ * The IEEE 802.3 CRC32 of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask (bit 0x7f in the
+ * second octword).
+ */
+ if (epp->ep_brdcst) {
+ /*
+ * NOTE: due to constant folding, some of this evaluates
+ * to null expressions, giving E_EXPR_NULL_EFFECT during
+ * lint on Illumos. No good way to fix this without
+ * explicit coding the individual word/bit setting.
+ * So just suppress lint for this one line.
+ */
+ /* LINTED */
+ EFX_SET_OWORD_BIT(multicast_hash[1], 0x7f);
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MCAST_HASH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MCAST_HASH_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MCAST_HASH_OUT_LEN;
+
+ memcpy(MCDI_IN2(req, uint8_t, SET_MCAST_HASH_IN_HASH0),
+ multicast_hash, sizeof (multicast_hash));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+siena_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_loopback_type_t old_loopback_type;
+ efx_link_mode_t old_loopback_link_mode;
+ efx_rc_t rc;
+
+ /* The PHY object handles this on Siena */
+ old_loopback_type = epp->ep_loopback_type;
+ old_loopback_link_mode = epp->ep_loopback_link_mode;
+ epp->ep_loopback_type = loopback_type;
+ epp->ep_loopback_link_mode = link_mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_loopback_type = old_loopback_type;
+ epp->ep_loopback_link_mode = old_loopback_link_mode;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+ __checkReturn efx_rc_t
+siena_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size)
+{
+ const struct efx_mac_stats_range siena_stats[] = {
+ { EFX_MAC_RX_OCTETS, EFX_MAC_RX_GE_15XX_PKTS },
+ /* EFX_MAC_RX_ERRORS is not supported */
+ { EFX_MAC_RX_FCS_ERRORS, EFX_MAC_TX_EX_DEF_PKTS },
+ };
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ siena_stats, EFX_ARRAY_SIZE(siena_stats))) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#define SIENA_MAC_STAT_READ(_esmp, _field, _eqp) \
+ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp)
+
+ __checkReturn efx_rc_t
+siena_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp)
+{
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_qword_t generation_start;
+ efx_qword_t generation_end;
+ efx_qword_t value;
+ efx_rc_t rc;
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) {
+ /* MAC stats count too small */
+ rc = ENOSPC;
+ goto fail1;
+ }
+ if (EFSYS_MEM_SIZE(esmp) <
+ (encp->enc_mac_stats_nstats * sizeof (efx_qword_t))) {
+ /* DMA buffer too small */
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ /* Read END first so we don't race with the MC */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp));
+ SIENA_MAC_STAT_READ(esmp, (encp->enc_mac_stats_nstats - 1),
+ &generation_end);
+ EFSYS_MEM_READ_BARRIER();
+
+ /* TX */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value);
+ EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value);
+
+ /* RX */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp));
+ EFSYS_MEM_READ_BARRIER();
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
+ &generation_start);
+
+ /* Check that we didn't read the stats in the middle of a DMA */
+ /* Not a good enough check ? */
+ if (memcmp(&generation_start, &generation_end,
+ sizeof (generation_start)))
+ return (EAGAIN);
+
+ if (generationp)
+ *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+ __checkReturn efx_rc_t
+siena_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ return (ENOTSUP);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_mcdi.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_mcdi.c
new file mode 100644
index 00000000..d727c187
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_mcdi.c
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA && EFSYS_OPT_MCDI
+
+#define SIENA_MCDI_PDU(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_PDU_OFST >> 2 \
+ : MC_SMEM_P1_PDU_OFST >> 2)
+
+#define SIENA_MCDI_DOORBELL(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_DOORBELL_OFST >> 2 \
+ : MC_SMEM_P1_DOORBELL_OFST >> 2)
+
+#define SIENA_MCDI_STATUS(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_STATUS_OFST >> 2 \
+ : MC_SMEM_P1_STATUS_OFST >> 2)
+
+
+ void
+siena_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t dword;
+ unsigned int pdur;
+ unsigned int dbr;
+ unsigned int pos;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+ dbr = SIENA_MCDI_DOORBELL(emip);
+
+ /* Write the header */
+ EFSYS_ASSERT3U(hdr_len, ==, sizeof (efx_dword_t));
+ dword = *(efx_dword_t *)hdrp;
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, pdur, &dword, B_TRUE);
+
+ /* Write the payload */
+ for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)sdup + pos);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + 1 + (pos >> 2), &dword, B_FALSE);
+ }
+
+ /* Ring the doorbell */
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xd004be11);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, dbr, &dword, B_FALSE);
+}
+
+ efx_rc_t
+siena_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+#if 1
+ /*
+ * XXX Bug 25922, bug 26099: This function is not being used
+ * properly. Until its callers are fixed, it should always
+ * return 0.
+ */
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+#else
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int rebootr;
+ efx_dword_t dword;
+ uint32_t value;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ rebootr = SIENA_MCDI_STATUS(emip);
+
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
+ value = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ if (value == 0)
+ return (0);
+
+ EFX_ZERO_DWORD(dword);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return (EINTR);
+ else
+ return (EIO);
+#endif
+}
+
+extern __checkReturn boolean_t
+siena_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr;
+ unsigned int pdur;
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur, &hdr, B_FALSE);
+ return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE);
+}
+
+ void
+siena_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int pdur;
+ unsigned int pos = 0;
+ efx_dword_t data;
+ size_t remaining = length;
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+
+ while (remaining > 0) {
+ size_t chunk = MIN(remaining, sizeof (data));
+
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + ((offset + pos) >> 2), &data, B_FALSE);
+ memcpy((uint8_t *)bufferp + pos, &data, chunk);
+ pos += chunk;
+ remaining -= chunk;
+ }
+}
+
+ __checkReturn efx_rc_t
+siena_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_oword_t oword;
+ unsigned int portnum;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(mtp))
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Determine the port number to use for MCDI */
+ EFX_BAR_READO(enp, FR_AZ_CS_DEBUG_REG, &oword);
+ portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM);
+
+ if (portnum == 0) {
+ /* Presumably booted from ROM; only MCDI port 1 will work */
+ emip->emi_port = 1;
+ } else if (portnum <= 2) {
+ emip->emi_port = portnum;
+ } else {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Siena BootROM and firmware only support MCDIv1 */
+ emip->emi_max_version = 1;
+
+ /*
+ * Wipe the atomic reboot status so subsequent MCDI requests succeed.
+ * BOOT_STATUS is preserved so eno_nic_probe() can boot out of the
+ * assertion handler.
+ */
+ (void) siena_mcdi_poll_reboot(enp);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+siena_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ switch (id) {
+ case EFX_MCDI_FEATURE_FW_UPDATE:
+ case EFX_MCDI_FEATURE_LINK_CONTROL:
+ case EFX_MCDI_FEATURE_MACADDR_CHANGE:
+ case EFX_MCDI_FEATURE_MAC_SPOOFING:
+ *supportedp = B_TRUE;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Default timeout for MCDI command processing. */
+#define SIENA_MCDI_CMD_TIMEOUT_US (10 * 1000 * 1000)
+
+ void
+siena_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp)
+{
+ _NOTE(ARGUNUSED(enp, emrp))
+
+ *timeoutp = SIENA_MCDI_CMD_TIMEOUT_US;
+}
+
+
+#endif /* EFSYS_OPT_SIENA && EFSYS_OPT_MCDI */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_nic.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_nic.c
new file mode 100644
index 00000000..31eef80b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_nic.c
@@ -0,0 +1,795 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#include "mcdi_mon.h"
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+static __checkReturn efx_rc_t
+siena_nic_get_partn_mask(
+ __in efx_nic_t *enp,
+ __out unsigned int *maskp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_TYPES_IN_LEN,
+ MC_CMD_NVRAM_TYPES_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_TYPES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_TYPES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_TYPES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *maskp = MCDI_OUT_DWORD(req, NVRAM_TYPES_OUT_TYPES);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+static __checkReturn efx_rc_t
+siena_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6];
+ efx_dword_t capabilities;
+ uint32_t board_type;
+ uint32_t nevq, nrxq, ntxq;
+ efx_rc_t rc;
+
+ /* Siena has a fixed 8Kbyte VI window size */
+ EFX_STATIC_ASSERT(1U << EFX_VI_WINDOW_SHIFT_8K == 8192);
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
+
+ /* External port identifier using one-based port numbering */
+ encp->enc_external_port = (uint8_t)enp->en_mcdi.em_emip.emi_port;
+
+ /* Board configuration */
+ if ((rc = efx_mcdi_get_board_cfg(enp, &board_type,
+ &capabilities, mac_addr)) != 0)
+ goto fail1;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ encp->enc_board_type = board_type;
+
+ /*
+ * There is no possibility to determine the number of PFs on Siena
+ * by issuing MCDI request, and it is not an easy task to find the
+ * value based on the board type, so 'enc_hw_pf_count' is set to 1
+ */
+ encp->enc_hw_pf_count = 1;
+
+ /* Additional capabilities */
+ encp->enc_clk_mult = 1;
+ if (EFX_DWORD_FIELD(capabilities, MC_CMD_CAPABILITIES_TURBO)) {
+ enp->en_features |= EFX_FEATURE_TURBO;
+
+ if (EFX_DWORD_FIELD(capabilities,
+ MC_CMD_CAPABILITIES_TURBO_ACTIVE)) {
+ encp->enc_clk_mult = 2;
+ }
+ }
+
+ encp->enc_evq_timer_quantum_ns =
+ EFX_EVQ_SIENA_TIMER_QUANTUM_NS / encp->enc_clk_mult;
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* When hash header insertion is enabled, Siena inserts 16 bytes */
+ encp->enc_rx_prefix_size = 16;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+ encp->enc_rx_buf_align_end = 1;
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = 1;
+
+ /* There is one RSS context per function */
+ encp->enc_rx_scale_max_exclusive_contexts = 1;
+
+ encp->enc_rx_scale_hash_alg_mask |= (1U << EFX_RX_HASHALG_LFSR);
+ encp->enc_rx_scale_hash_alg_mask |= (1U << EFX_RX_HASHALG_TOEPLITZ);
+
+ /*
+ * It is always possible to use port numbers
+ * as the input data for hash computation.
+ */
+ encp->enc_rx_scale_l4_hash_supported = B_TRUE;
+
+ /* There is no support for additional RSS modes */
+ encp->enc_rx_scale_additional_modes_supported = B_FALSE;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(FSF_AZ_TX_KER_BYTE_COUNT);
+ /* Fragments must not span 4k boundaries. */
+ encp->enc_tx_dma_desc_boundary = 4096;
+
+ /* Resource limits */
+ rc = efx_mcdi_get_resource_limits(enp, &nevq, &nrxq, &ntxq);
+ if (rc != 0) {
+ if (rc != ENOTSUP)
+ goto fail2;
+
+ nevq = 1024;
+ nrxq = EFX_RXQ_LIMIT_TARGET;
+ ntxq = EFX_TXQ_LIMIT_TARGET;
+ }
+ encp->enc_evq_limit = nevq;
+ encp->enc_rxq_limit = MIN(EFX_RXQ_LIMIT_TARGET, nrxq);
+ encp->enc_txq_limit = MIN(EFX_TXQ_LIMIT_TARGET, ntxq);
+
+ encp->enc_txq_max_ndescs = 4096;
+
+ encp->enc_buftbl_limit = SIENA_SRAM_ROWS -
+ (encp->enc_txq_limit * EFX_TXQ_DC_NDESCS(EFX_TXQ_DC_SIZE)) -
+ (encp->enc_rxq_limit * EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE));
+
+ encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_v2_n_contexts = 0;
+ encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
+ encp->enc_rx_packed_stream_supported = B_FALSE;
+ encp->enc_rx_var_packed_stream_supported = B_FALSE;
+ encp->enc_rx_es_super_buffer_supported = B_FALSE;
+ encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
+
+ /* Siena supports two 10G ports, and 8 lanes of PCIe Gen2 */
+ encp->enc_required_pcie_bandwidth_mbps = 2 * 10000;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN2;
+
+ encp->enc_nvram_update_verify_result_supported = B_FALSE;
+
+ encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
+
+ encp->enc_filter_action_flag_supported = B_FALSE;
+ encp->enc_filter_action_mark_supported = B_FALSE;
+ encp->enc_filter_action_mark_max = 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_phy_cfg(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_PHY_STATS
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+#endif /* EFSYS_OPT_PHY_STATS */
+ efx_rc_t rc;
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail1;
+
+#if EFSYS_OPT_PHY_STATS
+ /* Convert the MCDI statistic mask into the EFX_PHY_STAT mask */
+ siena_phy_decode_stats(enp, encp->enc_mcdi_phy_stat_mask,
+ NULL, &encp->enc_phy_stat_mask, NULL);
+#endif /* EFSYS_OPT_PHY_STATS */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#define SIENA_BIU_MAGIC0 0x01234567
+#define SIENA_BIU_MAGIC1 0xfedcba98
+
+static __checkReturn efx_rc_t
+siena_nic_biu_test(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ /*
+ * Write magic values to scratch registers 0 and 1, then
+ * verify that the values were written correctly. Interleave
+ * the accesses to ensure that the BIU is not just reading
+ * back the cached value that was last written.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ /*
+ * Perform the same test, with the values swapped. This
+ * ensures that subsequent tests don't start with the correct
+ * values already written into the scratch registers.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_probe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ siena_link_state_t sls;
+ unsigned int mask;
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* Test BIU */
+ if ((rc = siena_nic_biu_test(enp)) != 0)
+ goto fail1;
+
+ /* Clear the region register */
+ EFX_POPULATE_OWORD_4(oword,
+ FRF_AZ_ADR_REGION0, 0,
+ FRF_AZ_ADR_REGION1, (1 << 16),
+ FRF_AZ_ADR_REGION2, (2 << 16),
+ FRF_AZ_ADR_REGION3, (3 << 16));
+ EFX_BAR_WRITEO(enp, FR_AZ_ADR_REGION_REG, &oword);
+
+ /* Read clear any assertion state */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail2;
+
+ /* Exit the assertion handler */
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail3;
+
+ /* Wrestle control from the BMC */
+ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+ goto fail4;
+
+ if ((rc = siena_board_cfg(enp)) != 0)
+ goto fail5;
+
+ if ((rc = siena_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = siena_nic_reset(enp)) != 0)
+ goto fail7;
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail8;
+ epp->ep_default_adv_cap_mask = sls.sls_adv_cap_mask;
+ epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+ if ((rc = siena_nic_get_partn_mask(enp, &mask)) != 0)
+ goto fail9;
+ enp->en_u.siena.enu_partn_mask = mask;
+#endif
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
+ goto fail10;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
+ goto fail11;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = mcdi_mon_cfg_build(enp)) != 0)
+ goto fail12;
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail12:
+ EFSYS_PROBE(fail12);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail11:
+ EFSYS_PROBE(fail11);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail10:
+ EFSYS_PROBE(fail10);
+#endif
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+fail9:
+ EFSYS_PROBE(fail9);
+#endif
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* siena_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ /*
+ * Bug24908: ENTITY_RESET_IN_LEN is non zero but zero may be supplied
+ * for backwards compatibility with PORT_RESET_IN_LEN.
+ */
+ EFX_STATIC_ASSERT(MC_CMD_ENTITY_RESET_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+static void
+siena_nic_rx_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * RX_INGR_EN is always enabled on Siena, because we rely on
+ * the RX parser to be resiliant to missing SOP/EOP.
+ */
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_INGR_EN, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Disable parsing of additional 802.1Q in Q packets */
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+}
+
+static void
+siena_nic_usrev_dis(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_CZ_USREV_DIS, 1);
+ EFX_BAR_WRITEO(enp, FR_CZ_USR_EV_CFG, &oword);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* Enable reporting of some events (e.g. link change) */
+ if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+ goto fail1;
+
+ siena_sram_init(enp);
+
+ /* Configure Siena's RX block */
+ siena_nic_rx_cfg(enp);
+
+ /* Disable USR_EVents for now */
+ siena_nic_usrev_dis(enp);
+
+ /* bug17057: Ensure set_link is called */
+ if ((rc = siena_phy_reconfigure(enp)) != 0)
+ goto fail2;
+
+ enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V1;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_nic_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ void
+siena_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_cfg_free(enp);
+#endif /* EFSYS_OPT_MON_STATS */
+ (void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+static siena_register_set_t __siena_registers[] = {
+ { FR_AZ_ADR_REGION_REG_OFST, 0, 1 },
+ { FR_CZ_USR_EV_CFG_OFST, 0, 1 },
+ { FR_AZ_RX_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_TX_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_TX_RESERVED_REG_OFST, 0, 1 },
+ { FR_AZ_SRM_TX_DC_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_RX_DC_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_RX_DC_PF_WM_REG_OFST, 0, 1 },
+ { FR_AZ_DP_CTRL_REG_OFST, 0, 1 },
+ { FR_BZ_RX_RSS_TKEY_REG_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG1_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG2_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG3_OFST, 0, 1}
+};
+
+static const uint32_t __siena_register_masks[] = {
+ 0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF,
+ 0x000103FF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000,
+ 0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF,
+ 0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF,
+ 0x001FFFFF, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000003, 0x00000000, 0x00000000, 0x00000000,
+ 0x000003FF, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000FFF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000
+};
+
+static siena_register_set_t __siena_tables[] = {
+ { FR_AZ_RX_FILTER_TBL0_OFST, FR_AZ_RX_FILTER_TBL0_STEP,
+ FR_AZ_RX_FILTER_TBL0_ROWS },
+ { FR_CZ_RX_MAC_FILTER_TBL0_OFST, FR_CZ_RX_MAC_FILTER_TBL0_STEP,
+ FR_CZ_RX_MAC_FILTER_TBL0_ROWS },
+ { FR_AZ_RX_DESC_PTR_TBL_OFST,
+ FR_AZ_RX_DESC_PTR_TBL_STEP, FR_CZ_RX_DESC_PTR_TBL_ROWS },
+ { FR_AZ_TX_DESC_PTR_TBL_OFST,
+ FR_AZ_TX_DESC_PTR_TBL_STEP, FR_CZ_TX_DESC_PTR_TBL_ROWS },
+ { FR_AZ_TIMER_TBL_OFST, FR_AZ_TIMER_TBL_STEP, FR_CZ_TIMER_TBL_ROWS },
+ { FR_CZ_TX_FILTER_TBL0_OFST,
+ FR_CZ_TX_FILTER_TBL0_STEP, FR_CZ_TX_FILTER_TBL0_ROWS },
+ { FR_CZ_TX_MAC_FILTER_TBL0_OFST,
+ FR_CZ_TX_MAC_FILTER_TBL0_STEP, FR_CZ_TX_MAC_FILTER_TBL0_ROWS }
+};
+
+static const uint32_t __siena_table_masks[] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000003FF,
+ 0xFFFF0FFF, 0xFFFFFFFF, 0x00000E7F, 0x00000000,
+ 0xFFFFFFFE, 0x0FFFFFFF, 0x01800000, 0x00000000,
+ 0xFFFFFFFE, 0x0FFFFFFF, 0x0C000000, 0x00000000,
+ 0x3FFFFFFF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000013FF,
+ 0xFFFF07FF, 0xFFFFFFFF, 0x0000007F, 0x00000000,
+};
+
+ __checkReturn efx_rc_t
+siena_nic_test_registers(
+ __in efx_nic_t *enp,
+ __in siena_register_set_t *rsp,
+ __in size_t count)
+{
+ unsigned int bit;
+ efx_oword_t original;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ efx_rc_t rc;
+
+ while (count > 0) {
+ /* This function is only suitable for registers */
+ EFSYS_ASSERT(rsp->rows == 1);
+
+ /* bit sweep on and off */
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+ for (bit = 0; bit < 128; bit++) {
+ /* Is this bit in the mask? */
+ if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit))
+ continue;
+
+ /* Test this bit can be set in isolation */
+ reg = original;
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFX_SET_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ /* Test this bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, rsp->mask);
+ EFX_CLEAR_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail2;
+ }
+ }
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+
+ --count;
+ ++rsp;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_test_tables(
+ __in efx_nic_t *enp,
+ __in siena_register_set_t *rsp,
+ __in efx_pattern_type_t pattern,
+ __in size_t count)
+{
+ efx_sram_pattern_fn_t func;
+ unsigned int index;
+ unsigned int address;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[pattern];
+
+ while (count > 0) {
+ /* Write */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_WRITEO(enp->en_esbp, address, &reg, B_TRUE);
+
+ address += rsp->step;
+ }
+
+ /* Read */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ address += rsp->step;
+ }
+
+ ++rsp;
+ --count;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+siena_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ siena_register_set_t *rsp;
+ const uint32_t *dwordp;
+ unsigned int nitems;
+ unsigned int count;
+ efx_rc_t rc;
+
+ /* Fill out the register mask entries */
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_register_masks)
+ == EFX_ARRAY_SIZE(__siena_registers) * 4);
+
+ nitems = EFX_ARRAY_SIZE(__siena_registers);
+ dwordp = __siena_register_masks;
+ for (count = 0; count < nitems; ++count) {
+ rsp = __siena_registers + count;
+ rsp->mask.eo_u32[0] = *dwordp++;
+ rsp->mask.eo_u32[1] = *dwordp++;
+ rsp->mask.eo_u32[2] = *dwordp++;
+ rsp->mask.eo_u32[3] = *dwordp++;
+ }
+
+ /* Fill out the register table entries */
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_table_masks)
+ == EFX_ARRAY_SIZE(__siena_tables) * 4);
+
+ nitems = EFX_ARRAY_SIZE(__siena_tables);
+ dwordp = __siena_table_masks;
+ for (count = 0; count < nitems; ++count) {
+ rsp = __siena_tables + count;
+ rsp->mask.eo_u32[0] = *dwordp++;
+ rsp->mask.eo_u32[1] = *dwordp++;
+ rsp->mask.eo_u32[2] = *dwordp++;
+ rsp->mask.eo_u32[3] = *dwordp++;
+ }
+
+ if ((rc = siena_nic_test_registers(enp, __siena_registers,
+ EFX_ARRAY_SIZE(__siena_registers))) != 0)
+ goto fail1;
+
+ if ((rc = siena_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BYTE_ALTERNATE,
+ EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail2;
+
+ if ((rc = siena_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BYTE_CHANGING,
+ EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail3;
+
+ if ((rc = siena_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_nvram.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_nvram.c
new file mode 100644
index 00000000..8cdd2df7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_nvram.c
@@ -0,0 +1,720 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, sizep,
+ NULL, NULL, NULL)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, SIENA_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_read(enp, partn, offset, data, chunk,
+ MC_CMD_NVRAM_READ_IN_V2_DEFAULT)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, SIENA_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_write(enp, partn, offset,
+ data, chunk)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp)
+{
+ boolean_t reboot;
+ efx_rc_t rc;
+
+ /*
+ * Reboot into the new image only for PHYs. The driver has to
+ * explicitly cope with an MC reboot after a firmware update.
+ */
+ reboot = (partn == MC_CMD_NVRAM_TYPE_PHY_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_PHY_PORT1 ||
+ partn == MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO);
+
+ rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, verify_resultp);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef struct siena_parttbl_entry_s {
+ unsigned int partn;
+ unsigned int port;
+ efx_nvram_type_t nvtype;
+} siena_parttbl_entry_t;
+
+static siena_parttbl_entry_t siena_parttbl[] = {
+ {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 1, EFX_NVRAM_NULLPHY},
+ {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 2, EFX_NVRAM_NULLPHY},
+ {MC_CMD_NVRAM_TYPE_MC_FW, 1, EFX_NVRAM_MC_FIRMWARE},
+ {MC_CMD_NVRAM_TYPE_MC_FW, 2, EFX_NVRAM_MC_FIRMWARE},
+ {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM, 1, EFX_NVRAM_BOOTROM},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM, 2, EFX_NVRAM_BOOTROM},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
+ {MC_CMD_NVRAM_TYPE_PHY_PORT0, 1, EFX_NVRAM_PHY},
+ {MC_CMD_NVRAM_TYPE_PHY_PORT1, 2, EFX_NVRAM_PHY},
+ {MC_CMD_NVRAM_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {MC_CMD_NVRAM_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {MC_CMD_NVRAM_TYPE_FC_FW, 1, EFX_NVRAM_FCFW},
+ {MC_CMD_NVRAM_TYPE_FC_FW, 2, EFX_NVRAM_FCFW},
+ {MC_CMD_NVRAM_TYPE_CPLD, 1, EFX_NVRAM_CPLD},
+ {MC_CMD_NVRAM_TYPE_CPLD, 2, EFX_NVRAM_CPLD},
+ {MC_CMD_NVRAM_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {MC_CMD_NVRAM_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE}
+};
+
+ __checkReturn efx_rc_t
+siena_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int i;
+
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT(partnp != NULL);
+
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ siena_parttbl_entry_t *entry = &siena_parttbl[i];
+
+ if (entry->port == emip->emi_port && entry->nvtype == type) {
+ *partnp = entry->partn;
+ return (0);
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+siena_nvram_test(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_parttbl_entry_t *entry;
+ unsigned int i;
+ efx_rc_t rc;
+
+ /*
+ * Iterate over the list of supported partition types
+ * applicable to *this* port
+ */
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ entry = &siena_parttbl[i];
+
+ if (entry->port != emip->emi_port ||
+ !(enp->en_u.siena.enu_partn_mask & (1 << entry->partn)))
+ continue;
+
+ if ((rc = efx_mcdi_nvram_test(enp, entry->partn)) != 0) {
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#define SIENA_DYNAMIC_CFG_SIZE(_nitems) \
+ (sizeof (siena_mc_dynamic_config_hdr_t) + ((_nitems) * \
+ sizeof (((siena_mc_dynamic_config_hdr_t *)NULL)->fw_version[0])))
+
+ __checkReturn efx_rc_t
+siena_nvram_get_dynamic_cfg(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t vpd,
+ __out siena_mc_dynamic_config_hdr_t **dcfgp,
+ __out size_t *sizep)
+{
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ size_t size;
+ uint8_t cksum;
+ unsigned int vpd_offset;
+ unsigned int vpd_length;
+ unsigned int hdr_length;
+ unsigned int nversions;
+ unsigned int pos;
+ unsigned int region;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1);
+
+ /*
+ * Allocate sufficient memory for the entire dynamiccfg area, even
+ * if we're not actually going to read in the VPD.
+ */
+ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
+ goto fail1;
+
+ if (size < SIENA_NVRAM_CHUNK) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg);
+ if (dcfg == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, partn, 0,
+ (caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0)
+ goto fail4;
+
+ /* Verify the magic */
+ if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0)
+ != SIENA_MC_DYNAMIC_CONFIG_MAGIC)
+ goto invalid1;
+
+ /* All future versions of the structure must be backwards compatible */
+ EFX_STATIC_ASSERT(SIENA_MC_DYNAMIC_CONFIG_VERSION == 0);
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+ nversions = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+
+ /* Verify the hdr doesn't overflow the partn size */
+ if (hdr_length > size || vpd_offset > size || vpd_length > size ||
+ vpd_length + vpd_offset > size)
+ goto invalid2;
+
+ /* Verify the header has room for all it's versions */
+ if (hdr_length < SIENA_DYNAMIC_CFG_SIZE(0) ||
+ hdr_length < SIENA_DYNAMIC_CFG_SIZE(nversions))
+ goto invalid3;
+
+ /*
+ * Read the remaining portion of the dcfg, either including
+ * the whole of VPD (there is no vpd length in this structure,
+ * so we have to parse each tag), or just the dcfg header itself
+ */
+ region = vpd ? vpd_offset + vpd_length : hdr_length;
+ if (region > SIENA_NVRAM_CHUNK) {
+ if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
+ (caddr_t)dcfg + SIENA_NVRAM_CHUNK,
+ region - SIENA_NVRAM_CHUNK)) != 0)
+ goto fail5;
+ }
+
+ /* Verify checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ if (cksum != 0)
+ goto invalid4;
+
+ goto done;
+
+invalid4:
+ EFSYS_PROBE(invalid4);
+invalid3:
+ EFSYS_PROBE(invalid3);
+invalid2:
+ EFSYS_PROBE(invalid2);
+invalid1:
+ EFSYS_PROBE(invalid1);
+
+ /*
+ * Construct a new "null" dcfg, with an empty version vector,
+ * and an empty VPD chunk trailing. This has the neat side effect
+ * of testing the exception paths in the write path.
+ */
+ EFX_POPULATE_DWORD_1(dcfg->magic,
+ EFX_DWORD_0, SIENA_MC_DYNAMIC_CONFIG_MAGIC);
+ EFX_POPULATE_WORD_1(dcfg->length, EFX_WORD_0, sizeof (*dcfg));
+ EFX_POPULATE_BYTE_1(dcfg->version, EFX_BYTE_0,
+ SIENA_MC_DYNAMIC_CONFIG_VERSION);
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, sizeof (*dcfg));
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, 0);
+ EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items, EFX_DWORD_0, 0);
+
+done:
+ *dcfgp = dcfg;
+ *sizep = size;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, dcfg);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_get_subtype(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMAX)];
+ efx_word_t *fw_list;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOARD_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST +
+ (partn + 1) * sizeof (efx_word_t)) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ fw_list = MCDI_OUT2(req, efx_word_t,
+ GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+ *subtypep = EFX_WORD_FIELD(fw_list[partn], EFX_WORD_0);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ siena_mc_dynamic_config_hdr_t *dcfg;
+ siena_parttbl_entry_t *entry;
+ uint32_t dcfg_partn;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = siena_nvram_get_subtype(enp, partn, subtypep)) != 0)
+ goto fail2;
+
+ /*
+ * Some partitions are accessible from both ports (for instance BOOTROM)
+ * Find the highest version reported by all dcfg structures on ports
+ * that have access to this partition.
+ */
+ version[0] = version[1] = version[2] = version[3] = 0;
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ siena_mc_fw_version_t *verp;
+ unsigned int nitems;
+ uint16_t temp[4];
+ size_t length;
+
+ entry = &siena_parttbl[i];
+ if (entry->partn != partn)
+ continue;
+
+ dcfg_partn = (entry->port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+ /*
+ * Ingore missing partitions on port 2, assuming they're due
+ * to to running on a single port part.
+ */
+ if ((1 << dcfg_partn) & ~enp->en_u.siena.enu_partn_mask) {
+ if (entry->port == 2)
+ continue;
+ }
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_FALSE, &dcfg, &length)) != 0)
+ goto fail3;
+
+ nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items,
+ EFX_DWORD_0);
+ if (nitems < entry->partn)
+ goto done;
+
+ verp = &dcfg->fw_version[partn];
+ temp[0] = EFX_WORD_FIELD(verp->version_w, EFX_WORD_0);
+ temp[1] = EFX_WORD_FIELD(verp->version_x, EFX_WORD_0);
+ temp[2] = EFX_WORD_FIELD(verp->version_y, EFX_WORD_0);
+ temp[3] = EFX_WORD_FIELD(verp->version_z, EFX_WORD_0);
+ if (memcmp(version, temp, sizeof (temp)) < 0)
+ memcpy(version, temp, sizeof (temp));
+
+done:
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = siena_nvram_partn_lock(enp, partn)) != 0)
+ goto fail1;
+
+ if (chunk_sizep != NULL)
+ *chunk_sizep = SIENA_NVRAM_CHUNK;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp)
+{
+ efx_rc_t rc;
+
+ if ((rc = siena_nvram_partn_unlock(enp, partn, verify_resultp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4])
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ siena_mc_fw_version_t *fwverp;
+ uint32_t dcfg_partn;
+ size_t dcfg_size;
+ unsigned int hdr_length;
+ unsigned int vpd_length;
+ unsigned int vpd_offset;
+ unsigned int nitems;
+ unsigned int required_hdr_length;
+ unsigned int pos;
+ uint8_t cksum;
+ uint32_t subtype;
+ size_t length;
+ efx_rc_t rc;
+
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &dcfg_size)) != 0)
+ goto fail1;
+
+ if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_TRUE, &dcfg, &length)) != 0)
+ goto fail3;
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+ nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+
+ /*
+ * NOTE: This function will blatt any fields trailing the version
+ * vector, or the VPD chunk.
+ */
+ required_hdr_length = SIENA_DYNAMIC_CFG_SIZE(partn + 1);
+ if (required_hdr_length + vpd_length > length) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ if (vpd_offset < required_hdr_length) {
+ (void) memmove((caddr_t)dcfg + required_hdr_length,
+ (caddr_t)dcfg + vpd_offset, vpd_length);
+ vpd_offset = required_hdr_length;
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, vpd_offset);
+ }
+
+ if (hdr_length < required_hdr_length) {
+ (void) memset((caddr_t)dcfg + hdr_length, 0,
+ required_hdr_length - hdr_length);
+ hdr_length = required_hdr_length;
+ EFX_POPULATE_WORD_1(dcfg->length,
+ EFX_WORD_0, hdr_length);
+ }
+
+ /* Get the subtype to insert into the fw_subtype array */
+ if ((rc = siena_nvram_get_subtype(enp, partn, &subtype)) != 0)
+ goto fail5;
+
+ /* Fill out the new version */
+ fwverp = &dcfg->fw_version[partn];
+ EFX_POPULATE_DWORD_1(fwverp->fw_subtype, EFX_DWORD_0, subtype);
+ EFX_POPULATE_WORD_1(fwverp->version_w, EFX_WORD_0, version[0]);
+ EFX_POPULATE_WORD_1(fwverp->version_x, EFX_WORD_0, version[1]);
+ EFX_POPULATE_WORD_1(fwverp->version_y, EFX_WORD_0, version[2]);
+ EFX_POPULATE_WORD_1(fwverp->version_z, EFX_WORD_0, version[3]);
+
+ /* Update the version count */
+ if (nitems < partn + 1) {
+ nitems = partn + 1;
+ EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items,
+ EFX_DWORD_0, nitems);
+ }
+
+ /* Update the checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ dcfg->csum.eb_u8[0] -= cksum;
+
+ /* Erase and write the new partition */
+ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, dcfg_size)) != 0)
+ goto fail6;
+
+ /* Write out the new structure to nvram */
+ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0,
+ (caddr_t)dcfg, vpd_offset + vpd_length)) != 0)
+ goto fail7;
+
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn, NULL);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_phy.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_phy.c
new file mode 100644
index 00000000..4b2190d3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_phy.c
@@ -0,0 +1,782 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+static void
+siena_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ *maskp = mask;
+}
+
+static void
+siena_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+ void
+siena_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+ siena_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ siena_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn efx_rc_t
+siena_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ efx_rc_t rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = siena_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_get_link(
+ __in efx_nic_t *enp,
+ __out siena_link_state_t *slsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &slsp->sls_adv_cap_mask);
+ siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &slsp->sls_lp_cap_mask);
+
+ siena_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &slsp->sls_link_mode, &slsp->sls_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+
+ slsp->sls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ slsp->sls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_SET_ID_LED_IN_LEN,
+ MC_CMD_SET_ID_LED_OUT_LEN),
+ MAX(MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN))];
+ uint32_t cap_mask;
+#if EFSYS_OPT_PHY_LED_CONTROL
+ unsigned int led_mode;
+#endif
+ unsigned int speed;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* And set the blink mode */
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ uint32_t state;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+#define SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
+ _mc_record, _efx_record) \
+ if ((_vmask) & (1ULL << (_mc_record))) { \
+ (_smask) |= (1ULL << (_efx_record)); \
+ if ((_stat) != NULL && !EFSYS_MEM_IS_NULL(_esmp)) { \
+ efx_dword_t dword; \
+ EFSYS_MEM_READD(_esmp, (_mc_record) * 4, &dword);\
+ (_stat)[_efx_record] = \
+ EFX_DWORD_FIELD(dword, EFX_DWORD_0); \
+ } \
+ }
+
+#define SIENA_SIMPLE_STAT_SET2(_vmask, _esmp, _smask, _stat, _record) \
+ SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
+ MC_CMD_ ## _record, \
+ EFX_PHY_STAT_ ## _record)
+
+ void
+siena_phy_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t vmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint64_t *smaskp,
+ __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ uint64_t smask = 0;
+
+ _NOTE(ARGUNUSED(enp))
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, OUI);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_TX_FAULT);
+
+ if (vmask & (1 << MC_CMD_PMA_PMD_SIGNAL)) {
+ smask |= ((1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_A) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_B) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_C) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_D));
+ if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+ uint32_t sig;
+ EFSYS_MEM_READD(esmp, 4 * MC_CMD_PMA_PMD_SIGNAL,
+ &dword);
+ sig = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_A] = (sig >> 1) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_B] = (sig >> 2) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_C] = (sig >> 3) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_D] = (sig >> 4) & 1;
+ }
+ }
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_A,
+ EFX_PHY_STAT_SNR_A);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_B,
+ EFX_PHY_STAT_SNR_B);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_C,
+ EFX_PHY_STAT_SNR_C);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_D,
+ EFX_PHY_STAT_SNR_D);
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_TX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BER);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BLOCK_ERRORS);
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_LINK_UP,
+ EFX_PHY_STAT_PHY_XS_LINK_UP);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_RX_FAULT,
+ EFX_PHY_STAT_PHY_XS_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_TX_FAULT,
+ EFX_PHY_STAT_PHY_XS_TX_FAULT);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_ALIGN,
+ EFX_PHY_STAT_PHY_XS_ALIGN);
+
+ if (vmask & (1 << MC_CMD_PHYXS_SYNC)) {
+ smask |= ((1 << EFX_PHY_STAT_PHY_XS_SYNC_A) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_B) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_C) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_D));
+ if (stat != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+ uint32_t sync;
+ EFSYS_MEM_READD(esmp, 4 * MC_CMD_PHYXS_SYNC, &dword);
+ sync = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_A] = (sync >> 0) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_B] = (sync >> 1) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_C] = (sync >> 2) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_D] = (sync >> 3) & 1;
+ }
+ }
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_COMPLETE);
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_CL22_LINK_UP,
+ EFX_PHY_STAT_CL22EXT_LINK_UP);
+
+ if (smaskp != NULL)
+ *smaskp = smask;
+}
+
+ __checkReturn efx_rc_t
+siena_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t vmask = encp->enc_mcdi_phy_stat_mask;
+ uint64_t smask;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_PHY_STATS_IN_LEN,
+ MC_CMD_PHY_STATS_OUT_DMA_LEN)];
+ efx_rc_t rc;
+
+ if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_PHY_STATS_SIZE)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_PHY_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_PHY_STATS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_PHY_STATS_OUT_DMA_LEN;
+
+ MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+ EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN);
+
+ siena_phy_decode_stats(enp, vmask, esmp, &smask, stat);
+ EFSYS_ASSERT(smask == encp->enc_phy_stat_mask);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+siena_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_start(enp, type)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn unsigned long
+siena_phy_sft9001_bist_status(
+ __in uint16_t code)
+{
+ switch (code) {
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY:
+ return (EFX_PHY_CABLE_STATUS_BUSY);
+ case MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT:
+ return (EFX_PHY_CABLE_STATUS_INTERPAIRSHORT);
+ case MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT:
+ return (EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT);
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN:
+ return (EFX_PHY_CABLE_STATUS_OPEN);
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_OK:
+ return (EFX_PHY_CABLE_STATUS_OK);
+ default:
+ return (EFX_PHY_CABLE_STATUS_INVALID);
+ }
+}
+
+ __checkReturn efx_rc_t
+siena_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX)];
+ uint32_t value_mask = 0;
+ efx_mcdi_req_t req;
+ uint32_t result;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_POLL_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MCDI_CTL_SDU_LEN_MAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (count > 0)
+ (void) memset(valuesp, '\0', count * sizeof (unsigned long));
+
+ result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT);
+
+ /* Extract PHY specific results */
+ if (result == MC_CMD_POLL_BIST_PASSED &&
+ encp->enc_phy_type == EFX_PHY_SFT9001B &&
+ req.emr_out_length_used >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN &&
+ (type == EFX_BIST_TYPE_PHY_CABLE_SHORT ||
+ type == EFX_BIST_TYPE_PHY_CABLE_LONG)) {
+ uint16_t word;
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_A) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_A] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_A);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_B) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_B] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_B);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_C) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_C] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_C);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_D) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_D] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_D);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_A) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_A);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_A] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_A);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_B) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_B);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_B] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_B);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_C) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_C);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_C] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_C);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_D) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_D);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_D] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_D);
+ }
+
+ } else if (result == MC_CMD_POLL_BIST_FAILED &&
+ encp->enc_phy_type == EFX_PHY_QLX111V &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN &&
+ count > EFX_BIST_FAULT_CODE) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_FAULT_CODE] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST);
+ value_mask |= 1 << EFX_BIST_FAULT_CODE;
+ }
+
+ if (value_maskp != NULL)
+ *value_maskp = value_mask;
+
+ EFSYS_ASSERT(resultp != NULL);
+ if (result == MC_CMD_POLL_BIST_RUNNING)
+ *resultp = EFX_BIST_RESULT_RUNNING;
+ else if (result == MC_CMD_POLL_BIST_PASSED)
+ *resultp = EFX_BIST_RESULT_PASSED;
+ else
+ *resultp = EFX_BIST_RESULT_FAILED;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ /* There is no way to stop BIST on Siena */
+ _NOTE(ARGUNUSED(enp, type))
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_sram.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_sram.c
new file mode 100644
index 00000000..c9ef786c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_sram.c
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+ void
+siena_sram_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t rx_base, tx_base;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ rx_base = encp->enc_buftbl_limit;
+ tx_base = rx_base + (encp->enc_rxq_limit *
+ EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE));
+
+ /* Initialize the transmit descriptor cache */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, tx_base);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DC_SIZE, EFX_TXQ_DC_SIZE);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_DC_CFG_REG, &oword);
+
+ /* Initialize the receive descriptor cache */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rx_base);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_SIZE, EFX_RXQ_DC_SIZE);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_CFG_REG, &oword);
+
+ /* Set receive descriptor pre-fetch low water mark */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_PF_LWM, 56);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_PF_WM_REG, &oword);
+
+ /* Set the event queue to use for SRAM updates */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_UPD_EVQ_ID, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_UPD_EVQ_REG, &oword);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+siena_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_sram_pattern_fn_t func)
+{
+ efx_oword_t oword;
+ efx_qword_t qword;
+ efx_qword_t verify;
+ size_t rows;
+ unsigned int wptr;
+ unsigned int rptr;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Reconfigure into HALF buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ /*
+ * Move the descriptor caches up to the top of SRAM, and test
+ * all of SRAM below them. We only miss out one row here.
+ */
+ rows = SIENA_SRAM_ROWS - 1;
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rows);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, rows + 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
+
+ /*
+ * Write the pattern through BUF_HALF_TBL. Write
+ * in 64 entry batches, waiting 1us in between each batch
+ * to guarantee not to overflow the SRAM fifo
+ */
+ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
+ func(wptr, B_FALSE, &qword);
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
+
+ if ((wptr - rptr) < 64 && wptr < rows - 1)
+ continue;
+
+ EFSYS_SPIN(1);
+
+ for (; rptr <= wptr; ++rptr) {
+ func(rptr, B_FALSE, &qword);
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
+ &verify);
+
+ if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ }
+ }
+
+ /* And do the same negated */
+ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
+ func(wptr, B_TRUE, &qword);
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
+
+ if ((wptr - rptr) < 64 && wptr < rows - 1)
+ continue;
+
+ EFSYS_SPIN(1);
+
+ for (; rptr <= wptr; ++rptr) {
+ func(rptr, B_TRUE, &qword);
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
+ &verify);
+
+ if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+ }
+
+ /* Restore back to FULL buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ /*
+ * We don't need to reconfigure SRAM again because the API
+ * requires efx_nic_fini() to be called after an sram test.
+ */
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Restore back to FULL buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_vpd.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_vpd.c
new file mode 100644
index 00000000..ebb12abf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_vpd.c
@@ -0,0 +1,601 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_VPD
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_vpd_get_static(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __deref_out_bcount_opt(*sizep) caddr_t *svpdp,
+ __out size_t *sizep)
+{
+ siena_mc_static_config_hdr_t *scfg;
+ caddr_t svpd;
+ size_t size;
+ uint8_t cksum;
+ unsigned int vpd_offset;
+ unsigned int vpd_length;
+ unsigned int hdr_length;
+ unsigned int pos;
+ unsigned int region;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1);
+
+ /* Allocate sufficient memory for the entire static cfg area */
+ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
+ goto fail1;
+
+ if (size < SIENA_NVRAM_CHUNK) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg);
+ if (scfg == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, partn, 0,
+ (caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0)
+ goto fail4;
+
+ /* Verify the magic number */
+ if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) !=
+ SIENA_MC_STATIC_CONFIG_MAGIC) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ /* All future versions of the structure must be backwards compatible */
+ EFX_STATIC_ASSERT(SIENA_MC_STATIC_CONFIG_VERSION == 0);
+
+ hdr_length = EFX_WORD_FIELD(scfg->length, EFX_WORD_0);
+ vpd_offset = EFX_DWORD_FIELD(scfg->static_vpd_offset, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(scfg->static_vpd_length, EFX_DWORD_0);
+
+ /* Verify the hdr doesn't overflow the sector size */
+ if (hdr_length > size || vpd_offset > size || vpd_length > size ||
+ vpd_length + vpd_offset > size) {
+ rc = EINVAL;
+ goto fail6;
+ }
+
+ /* Read the remainder of scfg + static vpd */
+ region = vpd_offset + vpd_length;
+ if (region > SIENA_NVRAM_CHUNK) {
+ if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
+ (caddr_t)scfg + SIENA_NVRAM_CHUNK,
+ region - SIENA_NVRAM_CHUNK)) != 0)
+ goto fail7;
+ }
+
+ /* Verify checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)scfg)[pos];
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail8;
+ }
+
+ if (vpd_length == 0)
+ svpd = NULL;
+ else {
+ /* Copy the vpd data out */
+ EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd);
+ if (svpd == NULL) {
+ rc = ENOMEM;
+ goto fail9;
+ }
+ memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length);
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+
+ *svpdp = svpd;
+ *sizep = vpd_length;
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_init(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ caddr_t svpd = NULL;
+ unsigned int partn;
+ size_t size = 0;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1;
+
+ /*
+ * We need the static VPD sector to present a unified static+dynamic
+ * VPD, that is, basically on every read, write, verify cycle. Since
+ * it should *never* change we can just cache it here.
+ */
+ if ((rc = siena_vpd_get_static(enp, partn, &svpd, &size)) != 0)
+ goto fail1;
+
+ if (svpd != NULL && size > 0) {
+ if ((rc = efx_vpd_hunk_verify(svpd, size, NULL)) != 0)
+ goto fail2;
+ }
+
+ enp->en_u.siena.enu_svpd = svpd;
+ enp->en_u.siena.enu_svpd_length = size;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, svpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /*
+ * This function returns the total size the user should allocate
+ * for all VPD operations. We've already cached the static vpd,
+ * so we just need to return an upper bound on the dynamic vpd.
+ * Since the dynamic_config structure can change under our feet,
+ * (as version numbers are inserted), just be safe and return the
+ * total size of the dynamic_config *sector*
+ */
+ partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, partn, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ unsigned int vpd_length;
+ unsigned int vpd_offset;
+ unsigned int dcfg_partn;
+ size_t dcfg_size;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_TRUE, &dcfg, &dcfg_size)) != 0)
+ goto fail1;
+
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+
+ if (vpd_length > size) {
+ rc = EFAULT; /* Invalid dcfg: header bigger than sector */
+ goto fail2;
+ }
+
+ EFSYS_ASSERT3U(vpd_length, <=, size);
+ memcpy(data, (caddr_t)dcfg + vpd_offset, vpd_length);
+
+ /* Pad data with all-1s, consistent with update operations */
+ memset(data + vpd_length, 0xff, size - vpd_length);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_tag_t stag;
+ efx_vpd_tag_t dtag;
+ efx_vpd_keyword_t skey;
+ efx_vpd_keyword_t dkey;
+ unsigned int scont;
+ unsigned int dcont;
+
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /*
+ * Strictly you could take the view that dynamic vpd is optional.
+ * Instead, to conform more closely to the read/verify/reinit()
+ * paradigm, we require dynamic vpd. siena_vpd_reinit() will
+ * reinitialize it as required.
+ */
+ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0)
+ goto fail1;
+
+ /*
+ * Verify that there is no duplication between the static and
+ * dynamic cfg sectors.
+ */
+ if (enp->en_u.siena.enu_svpd_length == 0)
+ goto done;
+
+ dcont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(data, size, &dtag,
+ &dkey, NULL, NULL, &dcont)) != 0)
+ goto fail2;
+ if (dcont == 0)
+ break;
+
+ /*
+ * Skip the RV keyword. It should be present in both the static
+ * and dynamic cfg sectors.
+ */
+ if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V'))
+ continue;
+
+ scont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(
+ enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, &stag, &skey,
+ NULL, NULL, &scont)) != 0)
+ goto fail3;
+ if (scont == 0)
+ break;
+
+ if (stag == dtag && skey == dkey) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ boolean_t wantpid;
+ efx_rc_t rc;
+
+ /*
+ * Only create a PID if the dynamic cfg doesn't have one
+ */
+ if (enp->en_u.siena.enu_svpd_length == 0)
+ wantpid = B_TRUE;
+ else {
+ unsigned int offset;
+ uint8_t length;
+
+ rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length,
+ EFX_VPD_ID, 0, &offset, &length);
+ if (rc == 0)
+ wantpid = B_FALSE;
+ else if (rc == ENOENT)
+ wantpid = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ unsigned int offset;
+ uint8_t length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Attempt to satisfy the request from svpd first */
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value,
+ enp->en_u.siena.enu_svpd + offset, length);
+ return (0);
+ } else if (rc != ENOENT)
+ goto fail1;
+ }
+
+ /* And then from the provided data buffer */
+ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+
+ goto fail2;
+ }
+
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value, data + offset, length);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* If the provided (tag,keyword) exists in svpd, then it is readonly */
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ unsigned int offset;
+ uint8_t length;
+
+ if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ rc = EACCES;
+ goto fail1;
+ }
+ }
+
+ if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ _NOTE(ARGUNUSED(enp, data, size, evvp, contp))
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ unsigned int vpd_offset;
+ unsigned int dcfg_partn;
+ unsigned int hdr_length;
+ unsigned int pos;
+ uint8_t cksum;
+ size_t partn_size, dcfg_size;
+ size_t vpd_length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Determine total length of all tags */
+ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0)
+ goto fail1;
+
+ /* Lock dynamic config sector for write, and read structure only */
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
+ goto fail3;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_FALSE, &dcfg, &dcfg_size)) != 0)
+ goto fail4;
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+
+ /* Allocated memory should have room for the new VPD */
+ if (hdr_length + vpd_length > dcfg_size) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Copy in new vpd and update header */
+ vpd_offset = dcfg_size - vpd_length;
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset, EFX_DWORD_0, vpd_offset);
+ memcpy((caddr_t)dcfg + vpd_offset, data, vpd_length);
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, vpd_length);
+
+ /* Update the checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ dcfg->csum.eb_u8[0] -= cksum;
+
+ /* Erase and write the new sector */
+ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0)
+ goto fail6;
+
+ /* Write out the new structure to nvram */
+ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0, (caddr_t)dcfg,
+ vpd_offset + vpd_length)) != 0)
+ goto fail7;
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn, NULL);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn, NULL);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip, enp->en_u.siena.enu_svpd_length,
+ enp->en_u.siena.enu_svpd);
+
+ enp->en_u.siena.enu_svpd = NULL;
+ enp->en_u.siena.enu_svpd_length = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/src/spdk/dpdk/drivers/net/sfc/efsys.h b/src/spdk/dpdk/drivers/net/sfc/efsys.h
new file mode 100644
index 00000000..b9d2df58
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/efsys.h
@@ -0,0 +1,773 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_COMMON_EFSYS_H
+#define _SFC_COMMON_EFSYS_H
+
+#include <stdbool.h>
+
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_memzone.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#include "sfc_debug.h"
+#include "sfc_log.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFSYS_HAS_UINT64 1
+#define EFSYS_USE_UINT64 1
+#define EFSYS_HAS_SSE2_M128 1
+
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define EFSYS_IS_BIG_ENDIAN 1
+#define EFSYS_IS_LITTLE_ENDIAN 0
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define EFSYS_IS_BIG_ENDIAN 0
+#define EFSYS_IS_LITTLE_ENDIAN 1
+#else
+#error "Cannot determine system endianness"
+#endif
+#include "efx_types.h"
+
+
+#ifndef _NOTE
+#define _NOTE(s)
+#endif
+
+typedef bool boolean_t;
+
+#ifndef B_FALSE
+#define B_FALSE false
+#endif
+#ifndef B_TRUE
+#define B_TRUE true
+#endif
+
+/*
+ * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
+ * expression allowed only inside a function, but MAX() is used as
+ * a number of elements in array.
+ */
+#ifndef MAX
+#define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
+#endif
+#ifndef MIN
+#define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
+#endif
+
+/* There are macros for alignment in DPDK, but we need to make a proper
+ * correspondence here, if we want to re-use them at all
+ */
+#ifndef IS_P2ALIGNED
+#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
+#endif
+
+#ifndef P2ROUNDUP
+#define P2ROUNDUP(x, align) (-(-(x) & -(align)))
+#endif
+
+#ifndef P2ALIGN
+#define P2ALIGN(_x, _a) ((_x) & -(_a))
+#endif
+
+#ifndef IS2P
+#define ISP2(x) rte_is_power_of_2(x)
+#endif
+
+#define ENOTACTIVE ENOTCONN
+
+static inline void
+prefetch_read_many(const volatile void *addr)
+{
+ rte_prefetch0(addr);
+}
+
+static inline void
+prefetch_read_once(const volatile void *addr)
+{
+ rte_prefetch_non_temporal(addr);
+}
+
+/* Modifiers used for Windows builds */
+#define __in
+#define __in_opt
+#define __in_ecount(_n)
+#define __in_ecount_opt(_n)
+#define __in_bcount(_n)
+#define __in_bcount_opt(_n)
+
+#define __out
+#define __out_opt
+#define __out_ecount(_n)
+#define __out_ecount_opt(_n)
+#define __out_bcount(_n)
+#define __out_bcount_opt(_n)
+#define __out_bcount_part(_n, _l)
+#define __out_bcount_part_opt(_n, _l)
+
+#define __deref_out
+
+#define __inout
+#define __inout_opt
+#define __inout_ecount(_n)
+#define __inout_ecount_opt(_n)
+#define __inout_bcount(_n)
+#define __inout_bcount_opt(_n)
+#define __inout_bcount_full_opt(_n)
+
+#define __deref_out_bcount_opt(n)
+
+#define __checkReturn
+#define __success(_x)
+
+#define __drv_when(_p, _c)
+
+/* Code inclusion options */
+
+
+#define EFSYS_OPT_NAMES 1
+
+/* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
+#define EFSYS_OPT_SIENA 0
+/* Enable SFN7xxx support */
+#define EFSYS_OPT_HUNTINGTON 1
+/* Enable SFN8xxx support */
+#define EFSYS_OPT_MEDFORD 1
+/* Enable SFN2xxx support */
+#define EFSYS_OPT_MEDFORD2 1
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+#define EFSYS_OPT_CHECK_REG 1
+#else
+#define EFSYS_OPT_CHECK_REG 0
+#endif
+
+/* MCDI is required for SFN7xxx and SFN8xx */
+#define EFSYS_OPT_MCDI 1
+#define EFSYS_OPT_MCDI_LOGGING 1
+#define EFSYS_OPT_MCDI_PROXY_AUTH 1
+
+#define EFSYS_OPT_MAC_STATS 1
+
+#define EFSYS_OPT_LOOPBACK 1
+
+#define EFSYS_OPT_MON_MCDI 0
+#define EFSYS_OPT_MON_STATS 0
+
+#define EFSYS_OPT_PHY_STATS 0
+#define EFSYS_OPT_BIST 0
+#define EFSYS_OPT_PHY_LED_CONTROL 0
+#define EFSYS_OPT_PHY_FLAGS 0
+
+#define EFSYS_OPT_VPD 0
+#define EFSYS_OPT_NVRAM 0
+#define EFSYS_OPT_BOOTCFG 0
+#define EFSYS_OPT_IMAGE_LAYOUT 0
+
+#define EFSYS_OPT_DIAG 0
+#define EFSYS_OPT_RX_SCALE 1
+#define EFSYS_OPT_QSTATS 0
+/* Filters support is required for SFN7xxx and SFN8xx */
+#define EFSYS_OPT_FILTER 1
+#define EFSYS_OPT_RX_SCATTER 0
+
+#define EFSYS_OPT_EV_PREFETCH 0
+
+#define EFSYS_OPT_DECODE_INTR_FATAL 0
+
+#define EFSYS_OPT_LICENSING 0
+
+#define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
+
+#define EFSYS_OPT_RX_PACKED_STREAM 0
+
+#define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
+
+#define EFSYS_OPT_TUNNEL 1
+
+#define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
+
+/* ID */
+
+typedef struct __efsys_identifier_s efsys_identifier_t;
+
+
+#define EFSYS_PROBE(_name) \
+ do { } while (0)
+
+#define EFSYS_PROBE1(_name, _type1, _arg1) \
+ do { } while (0)
+
+#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
+ do { } while (0)
+
+#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3) \
+ do { } while (0)
+
+#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4) \
+ do { } while (0)
+
+#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5) \
+ do { } while (0)
+
+#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6) \
+ do { } while (0)
+
+#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6, _type7, _arg7) \
+ do { } while (0)
+
+
+/* DMA */
+
+typedef rte_iova_t efsys_dma_addr_t;
+
+typedef struct efsys_mem_s {
+ const struct rte_memzone *esm_mz;
+ /*
+ * Ideally it should have volatile qualifier to denote that
+ * the memory may be updated by someone else. However, it adds
+ * qualifier discard warnings when the pointer or its derivative
+ * is passed to memset() or rte_mov16().
+ * So, skip the qualifier here, but make sure that it is added
+ * below in access macros.
+ */
+ void *esm_base;
+ efsys_dma_addr_t esm_addr;
+} efsys_mem_t;
+
+
+#define EFSYS_MEM_ZERO(_esmp, _size) \
+ do { \
+ (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READD(_esmp, _offset, _edp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ (_edp)->ed_u32[0] = _addr[0]; \
+ \
+ EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ (_eqp)->eq_u64[0] = _addr[0]; \
+ \
+ EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READO(_esmp, _offset, _eop) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ (_eop)->eo_u128[0] = _addr[0]; \
+ \
+ EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+
+#define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ \
+ EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ _addr[0] = (_edp)->ed_u32[0]; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ _addr[0] = (_eqp)->eq_u64[0]; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ \
+ EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ _addr[0] = (_eop)->eo_u128[0]; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+
+#define EFSYS_MEM_SIZE(_esmp) \
+ ((_esmp)->esm_mz->len)
+
+#define EFSYS_MEM_ADDR(_esmp) \
+ ((_esmp)->esm_addr)
+
+#define EFSYS_MEM_IS_NULL(_esmp) \
+ ((_esmp)->esm_base == NULL)
+
+#define EFSYS_MEM_PREFETCH(_esmp, _offset) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ \
+ rte_prefetch0(_base + (_offset)); \
+ } while (0)
+
+
+/* BAR */
+
+typedef struct efsys_bar_s {
+ rte_spinlock_t esb_lock;
+ int esb_rid;
+ struct rte_pci_device *esb_dev;
+ /*
+ * Ideally it should have volatile qualifier to denote that
+ * the memory may be updated by someone else. However, it adds
+ * qualifier discard warnings when the pointer or its derivative
+ * is passed to memset() or rte_mov16().
+ * So, skip the qualifier here, but make sure that it is added
+ * below in access macros.
+ */
+ void *esb_base;
+} efsys_bar_t;
+
+#define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
+ do { \
+ rte_spinlock_init(&(_esbp)->esb_lock); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+#define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
+#define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
+#define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
+
+#define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ rte_rmb(); \
+ (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
+ \
+ EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ rte_rmb(); \
+ (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
+ \
+ EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ rte_rmb(); \
+ /* There is no rte_read128_relaxed() yet */ \
+ (_eop)->eo_u128[0] = _addr[0]; \
+ \
+ EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+
+#define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
+ rte_wmb(); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
+ rte_wmb(); \
+ \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/*
+ * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
+ * (required by PIO hardware).
+ *
+ * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
+ * write-combined memory mapped to user-land, so just abort if used.
+ */
+#define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
+ do { \
+ rte_panic("Write-combined BAR access not supported"); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ /* There is no rte_write128_relaxed() yet */ \
+ _addr[0] = (_eop)->eo_u128[0]; \
+ rte_wmb(); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* Use the standard octo-word write for doorbell writes */
+#define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
+ do { \
+ EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* SPIN */
+
+#define EFSYS_SPIN(_us) \
+ do { \
+ rte_delay_us(_us); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_SLEEP EFSYS_SPIN
+
+/* BARRIERS */
+
+#define EFSYS_MEM_READ_BARRIER() rte_rmb()
+#define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
+
+/* DMA SYNC */
+
+/*
+ * DPDK does not provide any DMA syncing API, and no PMD drivers
+ * have any traces of explicit DMA syncing.
+ * DMA mapping is assumed to be coherent.
+ */
+
+#define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
+
+/* Just avoid store and compiler (impliciltly) reordering */
+#define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
+
+/* TIMESTAMP */
+
+typedef uint64_t efsys_timestamp_t;
+
+#define EFSYS_TIMESTAMP(_usp) \
+ do { \
+ *(_usp) = rte_get_timer_cycles() * 1000000 / \
+ rte_get_timer_hz(); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* KMEM */
+
+#define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
+ do { \
+ (_esip) = (_esip); \
+ (_p) = rte_zmalloc("sfc", (_size), 0); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_KMEM_FREE(_esip, _size, _p) \
+ do { \
+ (void)(_esip); \
+ (void)(_size); \
+ rte_free((_p)); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* LOCK */
+
+typedef rte_spinlock_t efsys_lock_t;
+
+#define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
+ rte_spinlock_init((_eslp))
+#define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
+#define SFC_EFSYS_LOCK(_eslp) \
+ rte_spinlock_lock((_eslp))
+#define SFC_EFSYS_UNLOCK(_eslp) \
+ rte_spinlock_unlock((_eslp))
+#define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
+ SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
+
+typedef int efsys_lock_state_t;
+
+#define EFSYS_LOCK_MAGIC 0x000010c4
+
+#define EFSYS_LOCK(_lockp, _state) \
+ do { \
+ SFC_EFSYS_LOCK(_lockp); \
+ (_state) = EFSYS_LOCK_MAGIC; \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_UNLOCK(_lockp, _state) \
+ do { \
+ SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
+ SFC_EFSYS_UNLOCK(_lockp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* STAT */
+
+typedef uint64_t efsys_stat_t;
+
+#define EFSYS_STAT_INCR(_knp, _delta) \
+ do { \
+ *(_knp) += (_delta); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_DECR(_knp, _delta) \
+ do { \
+ *(_knp) -= (_delta); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET(_knp, _val) \
+ do { \
+ *(_knp) = (_val); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET_DWORD(_knp, _valp) \
+ do { \
+ *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* ERR */
+
+#if EFSYS_OPT_DECODE_INTR_FATAL
+#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
+ do { \
+ (void)(_esip); \
+ SFC_GENERIC_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \
+ (_code), (_dword0), (_dword1)); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+#endif
+
+/* ASSERT */
+
+/* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
+ * so we re-implement it here
+ */
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+#define EFSYS_ASSERT(_exp) \
+ do { \
+ if (unlikely(!(_exp))) \
+ rte_panic("line %d\tassert \"%s\" failed\n", \
+ __LINE__, (#_exp)); \
+ } while (0)
+#else
+#define EFSYS_ASSERT(_exp) (void)(_exp)
+#endif
+
+#define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
+
+#define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
+#define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
+#define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
+
+/* ROTATE */
+
+#define EFSYS_HAS_ROTL_DWORD 0
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SFC_COMMON_EFSYS_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/meson.build b/src/spdk/dpdk/drivers/net/sfc/meson.build
new file mode 100644
index 00000000..2d34e869
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/meson.build
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2016-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+if arch_subdir != 'x86' or cc.sizeof('void *') == 4
+ build = false
+endif
+
+allow_experimental_apis = true
+
+extra_flags = []
+
+# Strict-aliasing rules are violated by rte_eth_link to uint64_t casts
+extra_flags += '-Wno-strict-aliasing'
+
+# Enable more warnings
+extra_flags += [
+ '-Wextra',
+ '-Wdisabled-optimization'
+]
+
+# Compiler and version dependent flags
+extra_flags += [
+ '-Waggregate-return',
+ '-Wnested-externs',
+ '-Wbad-function-cast'
+]
+
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'sfc_ethdev.c',
+ 'sfc_kvargs.c',
+ 'sfc.c',
+ 'sfc_mcdi.c',
+ 'sfc_intr.c',
+ 'sfc_ev.c',
+ 'sfc_port.c',
+ 'sfc_rx.c',
+ 'sfc_tx.c',
+ 'sfc_tso.c',
+ 'sfc_filter.c',
+ 'sfc_flow.c',
+ 'sfc_dp.c',
+ 'sfc_ef10_rx.c',
+ 'sfc_ef10_essb_rx.c',
+ 'sfc_ef10_tx.c'
+)
+
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/sfc/rte_pmd_sfc_version.map b/src/spdk/dpdk/drivers/net/sfc/rte_pmd_sfc_version.map
new file mode 100644
index 00000000..31eca32e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/rte_pmd_sfc_version.map
@@ -0,0 +1,4 @@
+DPDK_17.02 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc.c b/src/spdk/dpdk/drivers/net/sfc/sfc.c
new file mode 100644
index 00000000..6690053f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc.c
@@ -0,0 +1,1103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+/* sysconf() */
+#include <unistd.h>
+
+#include <rte_errno.h>
+#include <rte_alarm.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_tx.h"
+#include "sfc_kvargs.h"
+#include "sfc_tweak.h"
+
+
+int
+sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
+ size_t len, int socket_id, efsys_mem_t *esmp)
+{
+ const struct rte_memzone *mz;
+
+ sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
+ name, id, len, socket_id);
+
+ mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
+ sysconf(_SC_PAGESIZE), socket_id);
+ if (mz == NULL) {
+ sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
+ name, (unsigned int)id, (unsigned int)len, socket_id,
+ rte_strerror(rte_errno));
+ return ENOMEM;
+ }
+
+ esmp->esm_addr = mz->iova;
+ if (esmp->esm_addr == RTE_BAD_IOVA) {
+ (void)rte_memzone_free(mz);
+ return EFAULT;
+ }
+
+ esmp->esm_mz = mz;
+ esmp->esm_base = mz->addr;
+
+ return 0;
+}
+
+void
+sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
+{
+ int rc;
+
+ sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
+
+ rc = rte_memzone_free(esmp->esm_mz);
+ if (rc != 0)
+ sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
+
+ memset(esmp, 0, sizeof(*esmp));
+}
+
+static uint32_t
+sfc_phy_cap_from_link_speeds(uint32_t speeds)
+{
+ uint32_t phy_caps = 0;
+
+ if (~speeds & ETH_LINK_SPEED_FIXED) {
+ phy_caps |= (1 << EFX_PHY_CAP_AN);
+ /*
+ * If no speeds are specified in the mask, any supported
+ * may be negotiated
+ */
+ if (speeds == ETH_LINK_SPEED_AUTONEG)
+ phy_caps |=
+ (1 << EFX_PHY_CAP_1000FDX) |
+ (1 << EFX_PHY_CAP_10000FDX) |
+ (1 << EFX_PHY_CAP_25000FDX) |
+ (1 << EFX_PHY_CAP_40000FDX) |
+ (1 << EFX_PHY_CAP_50000FDX) |
+ (1 << EFX_PHY_CAP_100000FDX);
+ }
+ if (speeds & ETH_LINK_SPEED_1G)
+ phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
+ if (speeds & ETH_LINK_SPEED_10G)
+ phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
+ if (speeds & ETH_LINK_SPEED_25G)
+ phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
+ if (speeds & ETH_LINK_SPEED_40G)
+ phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
+ if (speeds & ETH_LINK_SPEED_50G)
+ phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
+ if (speeds & ETH_LINK_SPEED_100G)
+ phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
+
+ return phy_caps;
+}
+
+/*
+ * Check requested device level configuration.
+ * Receive and transmit configuration is checked in corresponding
+ * modules.
+ */
+static int
+sfc_check_conf(struct sfc_adapter *sa)
+{
+ const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
+ int rc = 0;
+
+ sa->port.phy_adv_cap =
+ sfc_phy_cap_from_link_speeds(conf->link_speeds) &
+ sa->port.phy_adv_cap_mask;
+ if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
+ sfc_err(sa, "No link speeds from mask %#x are supported",
+ conf->link_speeds);
+ rc = EINVAL;
+ }
+
+#if !EFSYS_OPT_LOOPBACK
+ if (conf->lpbk_mode != 0) {
+ sfc_err(sa, "Loopback not supported");
+ rc = EINVAL;
+ }
+#endif
+
+ if (conf->dcb_capability_en != 0) {
+ sfc_err(sa, "Priority-based flow control not supported");
+ rc = EINVAL;
+ }
+
+ if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ sfc_err(sa, "Flow Director not supported");
+ rc = EINVAL;
+ }
+
+ if ((conf->intr_conf.lsc != 0) &&
+ (sa->intr.type != EFX_INTR_LINE) &&
+ (sa->intr.type != EFX_INTR_MESSAGE)) {
+ sfc_err(sa, "Link status change interrupt not supported");
+ rc = EINVAL;
+ }
+
+ if (conf->intr_conf.rxq != 0) {
+ sfc_err(sa, "Receive queue interrupt not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+/*
+ * Find out maximum number of receive and transmit queues which could be
+ * advertised.
+ *
+ * NIC is kept initialized on success to allow other modules acquire
+ * defaults and capabilities.
+ */
+static int
+sfc_estimate_resource_limits(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ efx_drv_limits_t limits;
+ int rc;
+ uint32_t evq_allocated;
+ uint32_t rxq_allocated;
+ uint32_t txq_allocated;
+
+ memset(&limits, 0, sizeof(limits));
+
+ /* Request at least one Rx and Tx queue */
+ limits.edl_min_rxq_count = 1;
+ limits.edl_min_txq_count = 1;
+ /* Management event queue plus event queue for each Tx and Rx queue */
+ limits.edl_min_evq_count =
+ 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
+
+ /* Divide by number of functions to guarantee that all functions
+ * will get promised resources
+ */
+ /* FIXME Divide by number of functions (not 2) below */
+ limits.edl_max_evq_count = encp->enc_evq_limit / 2;
+ SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
+
+ /* Split equally between receive and transmit */
+ limits.edl_max_rxq_count =
+ MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
+ SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
+
+ limits.edl_max_txq_count =
+ MIN(encp->enc_txq_limit,
+ limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
+
+ if (sa->tso)
+ limits.edl_max_txq_count =
+ MIN(limits.edl_max_txq_count,
+ encp->enc_fw_assisted_tso_v2_n_contexts /
+ encp->enc_hw_pf_count);
+
+ SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
+
+ /* Configure the minimum required resources needed for the
+ * driver to operate, and the maximum desired resources that the
+ * driver is capable of using.
+ */
+ efx_nic_set_drv_limits(sa->nic, &limits);
+
+ sfc_log_init(sa, "init nic");
+ rc = efx_nic_init(sa->nic);
+ if (rc != 0)
+ goto fail_nic_init;
+
+ /* Find resource dimensions assigned by firmware to this function */
+ rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
+ &txq_allocated);
+ if (rc != 0)
+ goto fail_get_vi_pool;
+
+ /* It still may allocate more than maximum, ensure limit */
+ evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
+ rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
+ txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
+
+ /* Subtract management EVQ not used for traffic */
+ SFC_ASSERT(evq_allocated > 0);
+ evq_allocated--;
+
+ /* Right now we use separate EVQ for Rx and Tx */
+ sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
+ sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
+
+ /* Keep NIC initialized */
+ return 0;
+
+fail_get_vi_pool:
+fail_nic_init:
+ efx_nic_fini(sa->nic);
+ return rc;
+}
+
+static int
+sfc_set_drv_limits(struct sfc_adapter *sa)
+{
+ const struct rte_eth_dev_data *data = sa->eth_dev->data;
+ efx_drv_limits_t lim;
+
+ memset(&lim, 0, sizeof(lim));
+
+ /* Limits are strict since take into account initial estimation */
+ lim.edl_min_evq_count = lim.edl_max_evq_count =
+ 1 + data->nb_rx_queues + data->nb_tx_queues;
+ lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
+ lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
+
+ return efx_nic_set_drv_limits(sa->nic, &lim);
+}
+
+static int
+sfc_set_fw_subvariant(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
+ unsigned int txq_index;
+ efx_nic_fw_subvariant_t req_fw_subvariant;
+ efx_nic_fw_subvariant_t cur_fw_subvariant;
+ int rc;
+
+ if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
+ sfc_info(sa, "no-Tx-checksum subvariant not supported");
+ return 0;
+ }
+
+ for (txq_index = 0; txq_index < sa->txq_count; ++txq_index) {
+ struct sfc_txq_info *txq_info = &sa->txq_info[txq_index];
+
+ if (txq_info->txq != NULL)
+ tx_offloads |= txq_info->txq->offloads;
+ }
+
+ if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+ req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
+ else
+ req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
+
+ rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
+ if (rc != 0) {
+ sfc_err(sa, "failed to get FW subvariant: %d", rc);
+ return rc;
+ }
+ sfc_info(sa, "FW subvariant is %u vs required %u",
+ cur_fw_subvariant, req_fw_subvariant);
+
+ if (cur_fw_subvariant == req_fw_subvariant)
+ return 0;
+
+ rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
+ if (rc != 0) {
+ sfc_err(sa, "failed to set FW subvariant %u: %d",
+ req_fw_subvariant, rc);
+ return rc;
+ }
+ sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
+
+ return 0;
+}
+
+static int
+sfc_try_start(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+ SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
+
+ sfc_log_init(sa, "set FW subvariant");
+ rc = sfc_set_fw_subvariant(sa);
+ if (rc != 0)
+ goto fail_set_fw_subvariant;
+
+ sfc_log_init(sa, "set resource limits");
+ rc = sfc_set_drv_limits(sa);
+ if (rc != 0)
+ goto fail_set_drv_limits;
+
+ sfc_log_init(sa, "init nic");
+ rc = efx_nic_init(sa->nic);
+ if (rc != 0)
+ goto fail_nic_init;
+
+ encp = efx_nic_cfg_get(sa->nic);
+ if (encp->enc_tunnel_encapsulations_supported != 0) {
+ sfc_log_init(sa, "apply tunnel config");
+ rc = efx_tunnel_reconfigure(sa->nic);
+ if (rc != 0)
+ goto fail_tunnel_reconfigure;
+ }
+
+ rc = sfc_intr_start(sa);
+ if (rc != 0)
+ goto fail_intr_start;
+
+ rc = sfc_ev_start(sa);
+ if (rc != 0)
+ goto fail_ev_start;
+
+ rc = sfc_port_start(sa);
+ if (rc != 0)
+ goto fail_port_start;
+
+ rc = sfc_rx_start(sa);
+ if (rc != 0)
+ goto fail_rx_start;
+
+ rc = sfc_tx_start(sa);
+ if (rc != 0)
+ goto fail_tx_start;
+
+ rc = sfc_flow_start(sa);
+ if (rc != 0)
+ goto fail_flows_insert;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_flows_insert:
+ sfc_tx_stop(sa);
+
+fail_tx_start:
+ sfc_rx_stop(sa);
+
+fail_rx_start:
+ sfc_port_stop(sa);
+
+fail_port_start:
+ sfc_ev_stop(sa);
+
+fail_ev_start:
+ sfc_intr_stop(sa);
+
+fail_intr_start:
+fail_tunnel_reconfigure:
+ efx_nic_fini(sa->nic);
+
+fail_nic_init:
+fail_set_drv_limits:
+fail_set_fw_subvariant:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+int
+sfc_start(struct sfc_adapter *sa)
+{
+ unsigned int start_tries = 3;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ switch (sa->state) {
+ case SFC_ADAPTER_CONFIGURED:
+ break;
+ case SFC_ADAPTER_STARTED:
+ sfc_notice(sa, "already started");
+ return 0;
+ default:
+ rc = EINVAL;
+ goto fail_bad_state;
+ }
+
+ sa->state = SFC_ADAPTER_STARTING;
+
+ do {
+ rc = sfc_try_start(sa);
+ } while ((--start_tries > 0) &&
+ (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
+
+ if (rc != 0)
+ goto fail_try_start;
+
+ sa->state = SFC_ADAPTER_STARTED;
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_try_start:
+ sa->state = SFC_ADAPTER_CONFIGURED;
+fail_bad_state:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_stop(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ switch (sa->state) {
+ case SFC_ADAPTER_STARTED:
+ break;
+ case SFC_ADAPTER_CONFIGURED:
+ sfc_notice(sa, "already stopped");
+ return;
+ default:
+ sfc_err(sa, "stop in unexpected state %u", sa->state);
+ SFC_ASSERT(B_FALSE);
+ return;
+ }
+
+ sa->state = SFC_ADAPTER_STOPPING;
+
+ sfc_flow_stop(sa);
+ sfc_tx_stop(sa);
+ sfc_rx_stop(sa);
+ sfc_port_stop(sa);
+ sfc_ev_stop(sa);
+ sfc_intr_stop(sa);
+ efx_nic_fini(sa->nic);
+
+ sa->state = SFC_ADAPTER_CONFIGURED;
+ sfc_log_init(sa, "done");
+}
+
+static int
+sfc_restart(struct sfc_adapter *sa)
+{
+ int rc;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ if (sa->state != SFC_ADAPTER_STARTED)
+ return EINVAL;
+
+ sfc_stop(sa);
+
+ rc = sfc_start(sa);
+ if (rc != 0)
+ sfc_err(sa, "restart failed");
+
+ return rc;
+}
+
+static void
+sfc_restart_if_required(void *arg)
+{
+ struct sfc_adapter *sa = arg;
+
+ /* If restart is scheduled, clear the flag and do it */
+ if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
+ 1, 0)) {
+ sfc_adapter_lock(sa);
+ if (sa->state == SFC_ADAPTER_STARTED)
+ (void)sfc_restart(sa);
+ sfc_adapter_unlock(sa);
+ }
+}
+
+void
+sfc_schedule_restart(struct sfc_adapter *sa)
+{
+ int rc;
+
+ /* Schedule restart alarm if it is not scheduled yet */
+ if (!rte_atomic32_test_and_set(&sa->restart_required))
+ return;
+
+ rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
+ if (rc == -ENOTSUP)
+ sfc_warn(sa, "alarms are not supported, restart is pending");
+ else if (rc != 0)
+ sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
+ else
+ sfc_notice(sa, "restart scheduled");
+}
+
+int
+sfc_configure(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
+ sa->state == SFC_ADAPTER_CONFIGURED);
+ sa->state = SFC_ADAPTER_CONFIGURING;
+
+ rc = sfc_check_conf(sa);
+ if (rc != 0)
+ goto fail_check_conf;
+
+ rc = sfc_intr_configure(sa);
+ if (rc != 0)
+ goto fail_intr_configure;
+
+ rc = sfc_port_configure(sa);
+ if (rc != 0)
+ goto fail_port_configure;
+
+ rc = sfc_rx_configure(sa);
+ if (rc != 0)
+ goto fail_rx_configure;
+
+ rc = sfc_tx_configure(sa);
+ if (rc != 0)
+ goto fail_tx_configure;
+
+ sa->state = SFC_ADAPTER_CONFIGURED;
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_tx_configure:
+ sfc_rx_close(sa);
+
+fail_rx_configure:
+ sfc_port_close(sa);
+
+fail_port_configure:
+ sfc_intr_close(sa);
+
+fail_intr_configure:
+fail_check_conf:
+ sa->state = SFC_ADAPTER_INITIALIZED;
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_close(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
+ sa->state = SFC_ADAPTER_CLOSING;
+
+ sfc_tx_close(sa);
+ sfc_rx_close(sa);
+ sfc_port_close(sa);
+ sfc_intr_close(sa);
+
+ sa->state = SFC_ADAPTER_INITIALIZED;
+ sfc_log_init(sa, "done");
+}
+
+static int
+sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
+{
+ struct rte_eth_dev *eth_dev = sa->eth_dev;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ efsys_bar_t *ebp = &sa->mem_bar;
+ struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
+
+ SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
+ ebp->esb_rid = membar;
+ ebp->esb_dev = pci_dev;
+ ebp->esb_base = res->addr;
+ return 0;
+}
+
+static void
+sfc_mem_bar_fini(struct sfc_adapter *sa)
+{
+ efsys_bar_t *ebp = &sa->mem_bar;
+
+ SFC_BAR_LOCK_DESTROY(ebp);
+ memset(ebp, 0, sizeof(*ebp));
+}
+
+/*
+ * A fixed RSS key which has a property of being symmetric
+ * (symmetrical flows are distributed to the same CPU)
+ * and also known to give a uniform distribution
+ * (a good distribution of traffic between different CPUs)
+ */
+static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+};
+
+static int
+sfc_rss_attach(struct sfc_adapter *sa)
+{
+ struct sfc_rss *rss = &sa->rss;
+ int rc;
+
+ rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
+ if (rc != 0)
+ goto fail_intr_init;
+
+ rc = efx_ev_init(sa->nic);
+ if (rc != 0)
+ goto fail_ev_init;
+
+ rc = efx_rx_init(sa->nic);
+ if (rc != 0)
+ goto fail_rx_init;
+
+ rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
+ if (rc != 0)
+ goto fail_scale_support_get;
+
+ rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
+ if (rc != 0)
+ goto fail_hash_support_get;
+
+ rc = sfc_rx_hash_init(sa);
+ if (rc != 0)
+ goto fail_rx_hash_init;
+
+ efx_rx_fini(sa->nic);
+ efx_ev_fini(sa->nic);
+ efx_intr_fini(sa->nic);
+
+ rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
+
+ return 0;
+
+fail_rx_hash_init:
+fail_hash_support_get:
+fail_scale_support_get:
+ efx_rx_fini(sa->nic);
+
+fail_rx_init:
+ efx_ev_fini(sa->nic);
+
+fail_ev_init:
+ efx_intr_fini(sa->nic);
+
+fail_intr_init:
+ return rc;
+}
+
+static void
+sfc_rss_detach(struct sfc_adapter *sa)
+{
+ sfc_rx_hash_fini(sa);
+}
+
+int
+sfc_attach(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp;
+ efx_nic_t *enp = sa->nic;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ efx_mcdi_new_epoch(enp);
+
+ sfc_log_init(sa, "reset nic");
+ rc = efx_nic_reset(enp);
+ if (rc != 0)
+ goto fail_nic_reset;
+
+ /*
+ * Probed NIC is sufficient for tunnel init.
+ * Initialize tunnel support to be able to use libefx
+ * efx_tunnel_config_udp_{add,remove}() in any state and
+ * efx_tunnel_reconfigure() on start up.
+ */
+ rc = efx_tunnel_init(enp);
+ if (rc != 0)
+ goto fail_tunnel_init;
+
+ encp = efx_nic_cfg_get(sa->nic);
+
+ if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
+ sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
+ if (!sa->tso)
+ sfc_warn(sa,
+ "TSO support isn't available on this adapter");
+ }
+
+ sfc_log_init(sa, "estimate resource limits");
+ rc = sfc_estimate_resource_limits(sa);
+ if (rc != 0)
+ goto fail_estimate_rsrc_limits;
+
+ sa->txq_max_entries = encp->enc_txq_max_ndescs;
+ SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
+
+ rc = sfc_intr_attach(sa);
+ if (rc != 0)
+ goto fail_intr_attach;
+
+ rc = sfc_ev_attach(sa);
+ if (rc != 0)
+ goto fail_ev_attach;
+
+ rc = sfc_port_attach(sa);
+ if (rc != 0)
+ goto fail_port_attach;
+
+ rc = sfc_rss_attach(sa);
+ if (rc != 0)
+ goto fail_rss_attach;
+
+ rc = sfc_filter_attach(sa);
+ if (rc != 0)
+ goto fail_filter_attach;
+
+ sfc_log_init(sa, "fini nic");
+ efx_nic_fini(enp);
+
+ sfc_flow_init(sa);
+
+ sa->state = SFC_ADAPTER_INITIALIZED;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_filter_attach:
+ sfc_rss_detach(sa);
+
+fail_rss_attach:
+ sfc_port_detach(sa);
+
+fail_port_attach:
+ sfc_ev_detach(sa);
+
+fail_ev_attach:
+ sfc_intr_detach(sa);
+
+fail_intr_attach:
+ efx_nic_fini(sa->nic);
+
+fail_estimate_rsrc_limits:
+fail_tunnel_init:
+ efx_tunnel_fini(sa->nic);
+
+fail_nic_reset:
+
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ sfc_flow_fini(sa);
+
+ sfc_filter_detach(sa);
+ sfc_rss_detach(sa);
+ sfc_port_detach(sa);
+ sfc_ev_detach(sa);
+ sfc_intr_detach(sa);
+ efx_tunnel_fini(sa->nic);
+
+ sa->state = SFC_ADAPTER_UNINITIALIZED;
+}
+
+static int
+sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ uint32_t *value = opaque;
+
+ if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
+ *value = EFX_FW_VARIANT_DONT_CARE;
+ else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
+ *value = EFX_FW_VARIANT_FULL_FEATURED;
+ else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
+ *value = EFX_FW_VARIANT_LOW_LATENCY;
+ else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
+ *value = EFX_FW_VARIANT_PACKED_STREAM;
+ else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
+ *value = EFX_FW_VARIANT_DPDK;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
+{
+ efx_nic_fw_info_t enfi;
+ int rc;
+
+ rc = efx_nic_get_fw_version(sa->nic, &enfi);
+ if (rc != 0)
+ return rc;
+ else if (!enfi.enfi_dpcpu_fw_ids_valid)
+ return ENOTSUP;
+
+ /*
+ * Firmware variant can be uniquely identified by the RxDPCPU
+ * firmware id
+ */
+ switch (enfi.enfi_rx_dpcpu_fw_id) {
+ case EFX_RXDP_FULL_FEATURED_FW_ID:
+ *efv = EFX_FW_VARIANT_FULL_FEATURED;
+ break;
+
+ case EFX_RXDP_LOW_LATENCY_FW_ID:
+ *efv = EFX_FW_VARIANT_LOW_LATENCY;
+ break;
+
+ case EFX_RXDP_PACKED_STREAM_FW_ID:
+ *efv = EFX_FW_VARIANT_PACKED_STREAM;
+ break;
+
+ case EFX_RXDP_DPDK_FW_ID:
+ *efv = EFX_FW_VARIANT_DPDK;
+ break;
+
+ default:
+ /*
+ * Other firmware variants are not considered, since they are
+ * not supported in the device parameters
+ */
+ *efv = EFX_FW_VARIANT_DONT_CARE;
+ break;
+ }
+
+ return 0;
+}
+
+static const char *
+sfc_fw_variant2str(efx_fw_variant_t efv)
+{
+ switch (efv) {
+ case EFX_RXDP_FULL_FEATURED_FW_ID:
+ return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
+ case EFX_RXDP_LOW_LATENCY_FW_ID:
+ return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
+ case EFX_RXDP_PACKED_STREAM_FW_ID:
+ return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
+ case EFX_RXDP_DPDK_FW_ID:
+ return SFC_KVARG_FW_VARIANT_DPDK;
+ default:
+ return "unknown";
+ }
+}
+
+static int
+sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
+{
+ int rc;
+ long value;
+
+ value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
+ sfc_kvarg_long_handler, &value);
+ if (rc != 0)
+ return rc;
+
+ if (value < 0 ||
+ (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
+ sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
+ "was set (%ld);", value);
+ sfc_err(sa, "it must not be less than 0 or greater than %u",
+ EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
+ return EINVAL;
+ }
+
+ sa->rxd_wait_timeout_ns = value;
+ return 0;
+}
+
+static int
+sfc_nic_probe(struct sfc_adapter *sa)
+{
+ efx_nic_t *enp = sa->nic;
+ efx_fw_variant_t preferred_efv;
+ efx_fw_variant_t efv;
+ int rc;
+
+ preferred_efv = EFX_FW_VARIANT_DONT_CARE;
+ rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
+ sfc_kvarg_fv_variant_handler,
+ &preferred_efv);
+ if (rc != 0) {
+ sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
+ return rc;
+ }
+
+ rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
+ if (rc != 0)
+ return rc;
+
+ rc = efx_nic_probe(enp, preferred_efv);
+ if (rc == EACCES) {
+ /* Unprivileged functions cannot set FW variant */
+ rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
+ }
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_get_fw_variant(sa, &efv);
+ if (rc == ENOTSUP) {
+ sfc_warn(sa, "FW variant can not be obtained");
+ return 0;
+ }
+ if (rc != 0)
+ return rc;
+
+ /* Check that firmware variant was changed to the requested one */
+ if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
+ sfc_warn(sa, "FW variant has not changed to the requested %s",
+ sfc_fw_variant2str(preferred_efv));
+ }
+
+ sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
+
+ return 0;
+}
+
+int
+sfc_probe(struct sfc_adapter *sa)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
+ unsigned int membar;
+ efx_nic_t *enp;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ sa->socket_id = rte_socket_id();
+ rte_atomic32_init(&sa->restart_required);
+
+ sfc_log_init(sa, "get family");
+ rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
+ &sa->family, &membar);
+ if (rc != 0)
+ goto fail_family;
+ sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
+
+ sfc_log_init(sa, "init mem bar");
+ rc = sfc_mem_bar_init(sa, membar);
+ if (rc != 0)
+ goto fail_mem_bar_init;
+
+ sfc_log_init(sa, "create nic");
+ rte_spinlock_init(&sa->nic_lock);
+ rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
+ &sa->mem_bar, &sa->nic_lock, &enp);
+ if (rc != 0)
+ goto fail_nic_create;
+ sa->nic = enp;
+
+ rc = sfc_mcdi_init(sa);
+ if (rc != 0)
+ goto fail_mcdi_init;
+
+ sfc_log_init(sa, "probe nic");
+ rc = sfc_nic_probe(sa);
+ if (rc != 0)
+ goto fail_nic_probe;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_nic_probe:
+ sfc_mcdi_fini(sa);
+
+fail_mcdi_init:
+ sfc_log_init(sa, "destroy nic");
+ sa->nic = NULL;
+ efx_nic_destroy(enp);
+
+fail_nic_create:
+ sfc_mem_bar_fini(sa);
+
+fail_mem_bar_init:
+fail_family:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_unprobe(struct sfc_adapter *sa)
+{
+ efx_nic_t *enp = sa->nic;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ sfc_log_init(sa, "unprobe nic");
+ efx_nic_unprobe(enp);
+
+ sfc_mcdi_fini(sa);
+
+ /*
+ * Make sure there is no pending alarm to restart since we are
+ * going to free device private which is passed as the callback
+ * opaque data. A new alarm cannot be scheduled since MCDI is
+ * shut down.
+ */
+ rte_eal_alarm_cancel(sfc_restart_if_required, sa);
+
+ sfc_log_init(sa, "destroy nic");
+ sa->nic = NULL;
+ efx_nic_destroy(enp);
+
+ sfc_mem_bar_fini(sa);
+
+ sfc_flow_fini(sa);
+ sa->state = SFC_ADAPTER_UNINITIALIZED;
+}
+
+uint32_t
+sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
+ uint32_t ll_default)
+{
+ size_t lt_prefix_str_size = strlen(lt_prefix_str);
+ size_t lt_str_size_max;
+ char *lt_str = NULL;
+ int ret;
+
+ if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
+ ++lt_prefix_str_size; /* Reserve space for prefix separator */
+ lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
+ } else {
+ return RTE_LOGTYPE_PMD;
+ }
+
+ lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
+ if (lt_str == NULL)
+ return RTE_LOGTYPE_PMD;
+
+ strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
+ lt_str[lt_prefix_str_size - 1] = '.';
+ rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size,
+ lt_str_size_max - lt_prefix_str_size);
+ lt_str[lt_str_size_max - 1] = '\0';
+
+ ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
+ rte_free(lt_str);
+
+ return (ret < 0) ? RTE_LOGTYPE_PMD : ret;
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc.h b/src/spdk/dpdk/drivers/net/sfc/sfc.h
new file mode 100644
index 00000000..51be4403
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_H
+#define _SFC_H
+
+#include <stdbool.h>
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_ethdev_driver.h>
+#include <rte_kvargs.h>
+#include <rte_spinlock.h>
+#include <rte_atomic.h>
+
+#include "efx.h"
+
+#include "sfc_filter.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * +---------------+
+ * | UNINITIALIZED |<-----------+
+ * +---------------+ |
+ * |.eth_dev_init |.eth_dev_uninit
+ * V |
+ * +---------------+------------+
+ * | INITIALIZED |
+ * +---------------+<-----------<---------------+
+ * |.dev_configure | |
+ * V |failed |
+ * +---------------+------------+ |
+ * | CONFIGURING | |
+ * +---------------+----+ |
+ * |success | |
+ * | | +---------------+
+ * | | | CLOSING |
+ * | | +---------------+
+ * | | ^
+ * V |.dev_configure |
+ * +---------------+----+ |.dev_close
+ * | CONFIGURED |----------------------------+
+ * +---------------+<-----------+
+ * |.dev_start |
+ * V |
+ * +---------------+ |
+ * | STARTING |------------^
+ * +---------------+ failed |
+ * |success |
+ * | +---------------+
+ * | | STOPPING |
+ * | +---------------+
+ * | ^
+ * V |.dev_stop
+ * +---------------+------------+
+ * | STARTED |
+ * +---------------+
+ */
+enum sfc_adapter_state {
+ SFC_ADAPTER_UNINITIALIZED = 0,
+ SFC_ADAPTER_INITIALIZED,
+ SFC_ADAPTER_CONFIGURING,
+ SFC_ADAPTER_CONFIGURED,
+ SFC_ADAPTER_CLOSING,
+ SFC_ADAPTER_STARTING,
+ SFC_ADAPTER_STARTED,
+ SFC_ADAPTER_STOPPING,
+
+ SFC_ADAPTER_NSTATES
+};
+
+enum sfc_dev_filter_mode {
+ SFC_DEV_FILTER_MODE_PROMISC = 0,
+ SFC_DEV_FILTER_MODE_ALLMULTI,
+
+ SFC_DEV_FILTER_NMODES
+};
+
+enum sfc_mcdi_state {
+ SFC_MCDI_UNINITIALIZED = 0,
+ SFC_MCDI_INITIALIZED,
+ SFC_MCDI_BUSY,
+ SFC_MCDI_COMPLETED,
+
+ SFC_MCDI_NSTATES
+};
+
+struct sfc_mcdi {
+ rte_spinlock_t lock;
+ efsys_mem_t mem;
+ enum sfc_mcdi_state state;
+ efx_mcdi_transport_t transport;
+ uint32_t logtype;
+ uint32_t proxy_handle;
+ efx_rc_t proxy_result;
+};
+
+struct sfc_intr {
+ efx_intr_type_t type;
+ rte_intr_callback_fn handler;
+ boolean_t lsc_intr;
+};
+
+struct sfc_rxq_info;
+struct sfc_txq_info;
+struct sfc_dp_rx;
+
+struct sfc_port {
+ unsigned int lsc_seq;
+
+ uint32_t phy_adv_cap_mask;
+ uint32_t phy_adv_cap;
+
+ unsigned int flow_ctrl;
+ boolean_t flow_ctrl_autoneg;
+ size_t pdu;
+
+ /*
+ * Flow API isolated mode overrides promisc and allmulti settings;
+ * they won't be applied if isolated mode is active
+ */
+ boolean_t isolated;
+ boolean_t promisc;
+ boolean_t allmulti;
+
+ struct ether_addr default_mac_addr;
+
+ unsigned int max_mcast_addrs;
+ unsigned int nb_mcast_addrs;
+ uint8_t *mcast_addrs;
+
+ rte_spinlock_t mac_stats_lock;
+ uint64_t *mac_stats_buf;
+ unsigned int mac_stats_nb_supported;
+ efsys_mem_t mac_stats_dma_mem;
+ boolean_t mac_stats_reset_pending;
+ uint16_t mac_stats_update_period_ms;
+ uint32_t mac_stats_update_generation;
+ boolean_t mac_stats_periodic_dma_supported;
+ uint64_t mac_stats_last_request_timestamp;
+
+ uint32_t mac_stats_mask[EFX_MAC_STATS_MASK_NPAGES];
+};
+
+struct sfc_rss_hf_rte_to_efx {
+ uint64_t rte;
+ efx_rx_hash_type_t efx;
+};
+
+struct sfc_rss {
+ unsigned int channels;
+ efx_rx_scale_context_type_t context_type;
+ efx_rx_hash_support_t hash_support;
+ efx_rx_hash_alg_t hash_alg;
+ unsigned int hf_map_nb_entries;
+ struct sfc_rss_hf_rte_to_efx *hf_map;
+
+ efx_rx_hash_type_t hash_types;
+ unsigned int tbl[EFX_RSS_TBL_SIZE];
+ uint8_t key[EFX_RSS_KEY_SIZE];
+};
+
+/* Adapter private data */
+struct sfc_adapter {
+ /*
+ * PMD setup and configuration is not thread safe. Since it is not
+ * performance sensitive, it is better to guarantee thread-safety
+ * and add device level lock. Adapter control operations which
+ * change its state should acquire the lock.
+ */
+ rte_spinlock_t lock;
+ enum sfc_adapter_state state;
+ struct rte_pci_addr pci_addr;
+ uint16_t port_id;
+ struct rte_eth_dev *eth_dev;
+ struct rte_kvargs *kvargs;
+ uint32_t logtype_main;
+ int socket_id;
+ efsys_bar_t mem_bar;
+ efx_family_t family;
+ efx_nic_t *nic;
+ rte_spinlock_t nic_lock;
+ rte_atomic32_t restart_required;
+
+ struct sfc_mcdi mcdi;
+ struct sfc_intr intr;
+ struct sfc_port port;
+ struct sfc_filter filter;
+
+ unsigned int rxq_max;
+ unsigned int txq_max;
+
+ unsigned int txq_max_entries;
+
+ uint32_t evq_flags;
+ unsigned int evq_count;
+
+ unsigned int mgmt_evq_index;
+ /*
+ * The lock is used to serialise management event queue polling
+ * which can be done from different context. Also the lock
+ * guarantees that mgmt_evq_running is preserved while the lock
+ * is held. It is used to serialise polling and start/stop
+ * operations.
+ *
+ * Locks which may be held when the lock is acquired:
+ * - adapter lock, when:
+ * - device start/stop to change mgmt_evq_running
+ * - any control operations in client side MCDI proxy handling to
+ * poll management event queue waiting for proxy response
+ * - MCDI lock, when:
+ * - any control operations in client side MCDI proxy handling to
+ * poll management event queue waiting for proxy response
+ *
+ * Locks which are acquired with the lock held:
+ * - nic_lock, when:
+ * - MC event processing on management event queue polling
+ * (e.g. MC REBOOT or BADASSERT events)
+ */
+ rte_spinlock_t mgmt_evq_lock;
+ bool mgmt_evq_running;
+ struct sfc_evq *mgmt_evq;
+
+ unsigned int rxq_count;
+ struct sfc_rxq_info *rxq_info;
+
+ unsigned int txq_count;
+ struct sfc_txq_info *txq_info;
+
+ boolean_t tso;
+
+ uint32_t rxd_wait_timeout_ns;
+
+ struct sfc_rss rss;
+
+ /*
+ * Shared memory copy of the Rx datapath name to be used by
+ * the secondary process to find Rx datapath to be used.
+ */
+ char *dp_rx_name;
+ const struct sfc_dp_rx *dp_rx;
+
+ /*
+ * Shared memory copy of the Tx datapath name to be used by
+ * the secondary process to find Rx datapath to be used.
+ */
+ char *dp_tx_name;
+ const struct sfc_dp_tx *dp_tx;
+};
+
+/*
+ * Add wrapper functions to acquire/release lock to be able to remove or
+ * change the lock in one place.
+ */
+
+static inline void
+sfc_adapter_lock_init(struct sfc_adapter *sa)
+{
+ rte_spinlock_init(&sa->lock);
+}
+
+static inline int
+sfc_adapter_is_locked(struct sfc_adapter *sa)
+{
+ return rte_spinlock_is_locked(&sa->lock);
+}
+
+static inline void
+sfc_adapter_lock(struct sfc_adapter *sa)
+{
+ rte_spinlock_lock(&sa->lock);
+}
+
+static inline int
+sfc_adapter_trylock(struct sfc_adapter *sa)
+{
+ return rte_spinlock_trylock(&sa->lock);
+}
+
+static inline void
+sfc_adapter_unlock(struct sfc_adapter *sa)
+{
+ rte_spinlock_unlock(&sa->lock);
+}
+
+static inline void
+sfc_adapter_lock_fini(__rte_unused struct sfc_adapter *sa)
+{
+ /* Just for symmetry of the API */
+}
+
+/** Get the number of milliseconds since boot from the default timer */
+static inline uint64_t
+sfc_get_system_msecs(void)
+{
+ return rte_get_timer_cycles() * MS_PER_S / rte_get_timer_hz();
+}
+
+int sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
+ size_t len, int socket_id, efsys_mem_t *esmp);
+void sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp);
+
+uint32_t sfc_register_logtype(struct sfc_adapter *sa,
+ const char *lt_prefix_str,
+ uint32_t ll_default);
+
+int sfc_probe(struct sfc_adapter *sa);
+void sfc_unprobe(struct sfc_adapter *sa);
+int sfc_attach(struct sfc_adapter *sa);
+void sfc_detach(struct sfc_adapter *sa);
+int sfc_start(struct sfc_adapter *sa);
+void sfc_stop(struct sfc_adapter *sa);
+
+void sfc_schedule_restart(struct sfc_adapter *sa);
+
+int sfc_mcdi_init(struct sfc_adapter *sa);
+void sfc_mcdi_fini(struct sfc_adapter *sa);
+
+int sfc_configure(struct sfc_adapter *sa);
+void sfc_close(struct sfc_adapter *sa);
+
+int sfc_intr_attach(struct sfc_adapter *sa);
+void sfc_intr_detach(struct sfc_adapter *sa);
+int sfc_intr_configure(struct sfc_adapter *sa);
+void sfc_intr_close(struct sfc_adapter *sa);
+int sfc_intr_start(struct sfc_adapter *sa);
+void sfc_intr_stop(struct sfc_adapter *sa);
+
+int sfc_port_attach(struct sfc_adapter *sa);
+void sfc_port_detach(struct sfc_adapter *sa);
+int sfc_port_configure(struct sfc_adapter *sa);
+void sfc_port_close(struct sfc_adapter *sa);
+int sfc_port_start(struct sfc_adapter *sa);
+void sfc_port_stop(struct sfc_adapter *sa);
+void sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
+ struct rte_eth_link *link_info);
+int sfc_port_update_mac_stats(struct sfc_adapter *sa);
+int sfc_port_reset_mac_stats(struct sfc_adapter *sa);
+int sfc_set_rx_mode(struct sfc_adapter *sa);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SFC_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_debug.h b/src/spdk/dpdk/drivers/net/sfc/sfc_debug.h
new file mode 100644
index 00000000..6b600ff4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_debug.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_DEBUG_H_
+#define _SFC_DEBUG_H_
+
+#include <rte_debug.h>
+
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+/* Avoid dependency from RTE_LOG_DP_LEVEL to be able to enable debug check
+ * in the driver only.
+ */
+#define SFC_ASSERT(exp) RTE_VERIFY(exp)
+#else
+/* If the driver debug is not enabled, follow DPDK debug/non-debug */
+#define SFC_ASSERT(exp) RTE_ASSERT(exp)
+#endif
+
+/* Log PMD message, automatically add prefix and \n */
+#define sfc_panic(sa, fmt, args...) \
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ rte_panic("sfc " PCI_PRI_FMT " #%" PRIu8 ": " fmt "\n", \
+ _sa->pci_addr.domain, _sa->pci_addr.bus, \
+ _sa->pci_addr.devid, _sa->pci_addr.function, \
+ _sa->port_id, ##args); \
+ } while (0)
+
+#endif /* _SFC_DEBUG_H_ */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_dp.c b/src/spdk/dpdk/drivers/net/sfc/sfc_dp.c
new file mode 100644
index 00000000..b121dc09
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_dp.c
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <sys/queue.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_log.h>
+
+#include "sfc_dp.h"
+#include "sfc_log.h"
+
+void
+sfc_dp_queue_init(struct sfc_dp_queue *dpq, uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr)
+{
+ dpq->port_id = port_id;
+ dpq->queue_id = queue_id;
+ dpq->pci_addr = *pci_addr;
+}
+
+struct sfc_dp *
+sfc_dp_find_by_name(struct sfc_dp_list *head, enum sfc_dp_type type,
+ const char *name)
+{
+ struct sfc_dp *entry;
+
+ TAILQ_FOREACH(entry, head, links) {
+ if (entry->type != type)
+ continue;
+
+ if (strcmp(entry->name, name) == 0)
+ return entry;
+ }
+
+ return NULL;
+}
+
+struct sfc_dp *
+sfc_dp_find_by_caps(struct sfc_dp_list *head, enum sfc_dp_type type,
+ unsigned int avail_caps)
+{
+ struct sfc_dp *entry;
+
+ TAILQ_FOREACH(entry, head, links) {
+ if (entry->type != type)
+ continue;
+
+ /* Take the first matching */
+ if (sfc_dp_match_hw_fw_caps(entry, avail_caps))
+ return entry;
+ }
+
+ return NULL;
+}
+
+int
+sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry)
+{
+ if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) {
+ SFC_GENERIC_LOG(ERR,
+ "sfc %s dapapath '%s' already registered",
+ entry->type == SFC_DP_RX ? "Rx" :
+ entry->type == SFC_DP_TX ? "Tx" :
+ "unknown",
+ entry->name);
+ return EEXIST;
+ }
+
+ TAILQ_INSERT_TAIL(head, entry, links);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_dp.h b/src/spdk/dpdk/drivers/net/sfc/sfc_dp.h
new file mode 100644
index 00000000..3da65abe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_dp.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_DP_H
+#define _SFC_DP_H
+
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_pci.h>
+
+#include "sfc_log.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SFC_DIV_ROUND_UP(a, b) \
+ __extension__ ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ \
+ (_a + (_b - 1)) / _b; \
+ })
+
+/**
+ * Datapath exception handler to be provided by the control path.
+ */
+typedef void (sfc_dp_exception_t)(void *ctrl);
+
+enum sfc_dp_type {
+ SFC_DP_RX = 0, /**< Receive datapath */
+ SFC_DP_TX, /**< Transmit datapath */
+};
+
+
+/** Datapath queue run-time information */
+struct sfc_dp_queue {
+ uint16_t port_id;
+ uint16_t queue_id;
+ struct rte_pci_addr pci_addr;
+};
+
+void sfc_dp_queue_init(struct sfc_dp_queue *dpq,
+ uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr);
+
+/*
+ * Helper macro to define datapath logging macros and have uniform
+ * logging.
+ */
+#define SFC_DP_LOG(dp_name, level, dpq, ...) \
+ do { \
+ const struct sfc_dp_queue *_dpq = (dpq); \
+ const struct rte_pci_addr *_addr = &(_dpq)->pci_addr; \
+ \
+ SFC_GENERIC_LOG(level, \
+ RTE_FMT("%s " PCI_PRI_FMT \
+ " #%" PRIu16 ".%" PRIu16 ": " \
+ RTE_FMT_HEAD(__VA_ARGS__ ,), \
+ dp_name, \
+ _addr->domain, _addr->bus, \
+ _addr->devid, _addr->function, \
+ _dpq->port_id, _dpq->queue_id, \
+ RTE_FMT_TAIL(__VA_ARGS__,))); \
+ } while (0)
+
+
+/** Datapath definition */
+struct sfc_dp {
+ TAILQ_ENTRY(sfc_dp) links;
+ const char *name;
+ enum sfc_dp_type type;
+ /* Mask of required hardware/firmware capabilities */
+ unsigned int hw_fw_caps;
+#define SFC_DP_HW_FW_CAP_EF10 0x1
+#define SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER 0x2
+};
+
+/** List of datapath variants */
+TAILQ_HEAD(sfc_dp_list, sfc_dp);
+
+/* Check if available HW/FW capabilities are sufficient for the datapath */
+static inline bool
+sfc_dp_match_hw_fw_caps(const struct sfc_dp *dp, unsigned int avail_caps)
+{
+ return (dp->hw_fw_caps & avail_caps) == dp->hw_fw_caps;
+}
+
+struct sfc_dp *sfc_dp_find_by_name(struct sfc_dp_list *head,
+ enum sfc_dp_type type, const char *name);
+struct sfc_dp *sfc_dp_find_by_caps(struct sfc_dp_list *head,
+ enum sfc_dp_type type,
+ unsigned int avail_caps);
+int sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_dp_rx.h b/src/spdk/dpdk/drivers/net/sfc/sfc_dp_rx.h
new file mode 100644
index 00000000..ce96e83f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_dp_rx.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_DP_RX_H
+#define _SFC_DP_RX_H
+
+#include <rte_mempool.h>
+#include <rte_ethdev_driver.h>
+
+#include "sfc_dp.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Generic receive queue information used on data path.
+ * It must be kept as small as it is possible since it is built into
+ * the structure used on datapath.
+ */
+struct sfc_dp_rxq {
+ struct sfc_dp_queue dpq;
+};
+
+/**
+ * Datapath receive queue creation information.
+ *
+ * The structure is used just to pass information from control path to
+ * datapath. It could be just function arguments, but it would be hardly
+ * readable.
+ */
+struct sfc_dp_rx_qcreate_info {
+ /** Memory pool to allocate Rx buffer from */
+ struct rte_mempool *refill_mb_pool;
+ /** Maximum number of pushed Rx descriptors in the queue */
+ unsigned int max_fill_level;
+ /** Minimum number of unused Rx descriptors to do refill */
+ unsigned int refill_threshold;
+ /**
+ * Usable mbuf data space in accordance with alignment and
+ * padding requirements imposed by HW.
+ */
+ unsigned int buf_size;
+
+ /**
+ * Maximum number of Rx descriptors completed in one Rx event.
+ * Just for sanity checks if datapath would like to do.
+ */
+ unsigned int batch_max;
+
+ /** Pseudo-header size */
+ unsigned int prefix_size;
+
+ /** Receive queue flags initializer */
+ unsigned int flags;
+#define SFC_RXQ_FLAG_RSS_HASH 0x1
+
+ /** Rx queue size */
+ unsigned int rxq_entries;
+ /** DMA-mapped Rx descriptors ring */
+ void *rxq_hw_ring;
+
+ /** Associated event queue size */
+ unsigned int evq_entries;
+ /** Hardware event ring */
+ void *evq_hw_ring;
+
+ /** The queue index in hardware (required to push right doorbell) */
+ unsigned int hw_index;
+ /**
+ * Virtual address of the memory-mapped BAR to push Rx refill
+ * doorbell
+ */
+ volatile void *mem_bar;
+ /** VI window size shift */
+ unsigned int vi_window_shift;
+};
+
+/**
+ * Get Rx datapath specific device info.
+ *
+ * @param dev_info Device info to be adjusted
+ */
+typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
+
+/**
+ * Test if an Rx datapath supports specific mempool ops.
+ *
+ * @param pool The name of the pool operations to test.
+ *
+ * @return Check status.
+ * @retval 0 Best mempool ops choice.
+ * @retval 1 Mempool ops are supported.
+ * @retval -ENOTSUP Mempool ops not supported.
+ */
+typedef int (sfc_dp_rx_pool_ops_supported_t)(const char *pool);
+
+/**
+ * Get size of receive and event queue rings by the number of Rx
+ * descriptors and mempool configuration.
+ *
+ * @param nb_rx_desc Number of Rx descriptors
+ * @param mb_pool mbuf pool with Rx buffers
+ * @param rxq_entries Location for number of Rx ring entries
+ * @param evq_entries Location for number of event ring entries
+ * @param rxq_max_fill_level Location for maximum Rx ring fill level
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc,
+ struct rte_mempool *mb_pool,
+ unsigned int *rxq_entries,
+ unsigned int *evq_entries,
+ unsigned int *rxq_max_fill_level);
+
+/**
+ * Allocate and initialize datapath receive queue.
+ *
+ * @param port_id The port identifier
+ * @param queue_id The queue identifier
+ * @param pci_addr PCI function address
+ * @param socket_id Socket identifier to allocate memory
+ * @param info Receive queue information
+ * @param dp_rxqp Location for generic datapath receive queue pointer
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp);
+
+/**
+ * Free resources allocated for datapath recevie queue.
+ */
+typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq);
+
+/**
+ * Receive queue start callback.
+ *
+ * It handovers EvQ to the datapath.
+ */
+typedef int (sfc_dp_rx_qstart_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int evq_read_ptr);
+
+/**
+ * Receive queue stop function called before flush.
+ */
+typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int *evq_read_ptr);
+
+/**
+ * Receive event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
+
+/**
+ * Packed stream receive event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int id);
+
+/**
+ * Receive queue purge function called after queue flush.
+ *
+ * Should be used to free unused recevie buffers.
+ */
+typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
+
+/** Get packet types recognized/classified */
+typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(
+ uint32_t tunnel_encaps);
+
+/** Get number of pending Rx descriptors */
+typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
+
+/** Check Rx descriptor status */
+typedef int (sfc_dp_rx_qdesc_status_t)(struct sfc_dp_rxq *dp_rxq,
+ uint16_t offset);
+
+/** Receive datapath definition */
+struct sfc_dp_rx {
+ struct sfc_dp dp;
+
+ unsigned int features;
+#define SFC_DP_RX_FEAT_SCATTER 0x1
+#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2
+#define SFC_DP_RX_FEAT_TUNNELS 0x4
+#define SFC_DP_RX_FEAT_FLOW_FLAG 0x8
+#define SFC_DP_RX_FEAT_FLOW_MARK 0x10
+#define SFC_DP_RX_FEAT_CHECKSUM 0x20
+ sfc_dp_rx_get_dev_info_t *get_dev_info;
+ sfc_dp_rx_pool_ops_supported_t *pool_ops_supported;
+ sfc_dp_rx_qsize_up_rings_t *qsize_up_rings;
+ sfc_dp_rx_qcreate_t *qcreate;
+ sfc_dp_rx_qdestroy_t *qdestroy;
+ sfc_dp_rx_qstart_t *qstart;
+ sfc_dp_rx_qstop_t *qstop;
+ sfc_dp_rx_qrx_ev_t *qrx_ev;
+ sfc_dp_rx_qrx_ps_ev_t *qrx_ps_ev;
+ sfc_dp_rx_qpurge_t *qpurge;
+ sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
+ sfc_dp_rx_qdesc_npending_t *qdesc_npending;
+ sfc_dp_rx_qdesc_status_t *qdesc_status;
+ eth_rx_burst_t pkt_burst;
+};
+
+static inline struct sfc_dp_rx *
+sfc_dp_find_rx_by_name(struct sfc_dp_list *head, const char *name)
+{
+ struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_RX, name);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
+}
+
+static inline struct sfc_dp_rx *
+sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
+{
+ struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_RX, avail_caps);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
+}
+
+extern struct sfc_dp_rx sfc_efx_rx;
+extern struct sfc_dp_rx sfc_ef10_rx;
+extern struct sfc_dp_rx sfc_ef10_essb_rx;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_RX_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_dp_tx.h b/src/spdk/dpdk/drivers/net/sfc/sfc_dp_tx.h
new file mode 100644
index 00000000..eda9676c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_dp_tx.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_DP_TX_H
+#define _SFC_DP_TX_H
+
+#include <rte_ethdev_driver.h>
+
+#include "sfc_dp.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Generic transmit queue information used on data path.
+ * It must be kept as small as it is possible since it is built into
+ * the structure used on datapath.
+ */
+struct sfc_dp_txq {
+ struct sfc_dp_queue dpq;
+};
+
+/**
+ * Datapath transmit queue creation information.
+ *
+ * The structure is used just to pass information from control path to
+ * datapath. It could be just function arguments, but it would be hardly
+ * readable.
+ */
+struct sfc_dp_tx_qcreate_info {
+ /** Maximum number of pushed Tx descriptors */
+ unsigned int max_fill_level;
+ /** Minimum number of unused Tx descriptors to do reap */
+ unsigned int free_thresh;
+ /** Offloads enabled on the transmit queue */
+ uint64_t offloads;
+ /** Tx queue size */
+ unsigned int txq_entries;
+ /** Maximum size of data in the DMA descriptor */
+ uint16_t dma_desc_size_max;
+ /** DMA-mapped Tx descriptors ring */
+ void *txq_hw_ring;
+ /** Associated event queue size */
+ unsigned int evq_entries;
+ /** Hardware event ring */
+ void *evq_hw_ring;
+ /** The queue index in hardware (required to push right doorbell) */
+ unsigned int hw_index;
+ /** Virtual address of the memory-mapped BAR to push Tx doorbell */
+ volatile void *mem_bar;
+ /** VI window size shift */
+ unsigned int vi_window_shift;
+};
+
+/**
+ * Get Tx datapath specific device info.
+ *
+ * @param dev_info Device info to be adjusted
+ */
+typedef void (sfc_dp_tx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
+
+/**
+ * Get size of transmit and event queue rings by the number of Tx
+ * descriptors.
+ *
+ * @param nb_tx_desc Number of Tx descriptors
+ * @param txq_entries Location for number of Tx ring entries
+ * @param evq_entries Location for number of event ring entries
+ * @param txq_max_fill_level Location for maximum Tx ring fill level
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_tx_qsize_up_rings_t)(uint16_t nb_tx_desc,
+ unsigned int *txq_entries,
+ unsigned int *evq_entries,
+ unsigned int *txq_max_fill_level);
+
+/**
+ * Allocate and initialize datapath transmit queue.
+ *
+ * @param port_id The port identifier
+ * @param queue_id The queue identifier
+ * @param pci_addr PCI function address
+ * @param socket_id Socket identifier to allocate memory
+ * @param info Tx queue details wrapped in structure
+ * @param dp_txqp Location for generic datapath transmit queue pointer
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_tx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp);
+
+/**
+ * Free resources allocated for datapath transmit queue.
+ */
+typedef void (sfc_dp_tx_qdestroy_t)(struct sfc_dp_txq *dp_txq);
+
+/**
+ * Transmit queue start callback.
+ *
+ * It handovers EvQ to the datapath.
+ */
+typedef int (sfc_dp_tx_qstart_t)(struct sfc_dp_txq *dp_txq,
+ unsigned int evq_read_ptr,
+ unsigned int txq_desc_index);
+
+/**
+ * Transmit queue stop function called before the queue flush.
+ *
+ * It returns EvQ to the control path.
+ */
+typedef void (sfc_dp_tx_qstop_t)(struct sfc_dp_txq *dp_txq,
+ unsigned int *evq_read_ptr);
+
+/**
+ * Transmit event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_tx_qtx_ev_t)(struct sfc_dp_txq *dp_txq, unsigned int id);
+
+/**
+ * Transmit queue function called after the queue flush.
+ */
+typedef void (sfc_dp_tx_qreap_t)(struct sfc_dp_txq *dp_txq);
+
+/**
+ * Check Tx descriptor status
+ */
+typedef int (sfc_dp_tx_qdesc_status_t)(struct sfc_dp_txq *dp_txq,
+ uint16_t offset);
+
+/** Transmit datapath definition */
+struct sfc_dp_tx {
+ struct sfc_dp dp;
+
+ unsigned int features;
+#define SFC_DP_TX_FEAT_VLAN_INSERT 0x1
+#define SFC_DP_TX_FEAT_TSO 0x2
+#define SFC_DP_TX_FEAT_MULTI_SEG 0x4
+#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8
+#define SFC_DP_TX_FEAT_MULTI_POOL 0x10
+#define SFC_DP_TX_FEAT_REFCNT 0x20
+ sfc_dp_tx_get_dev_info_t *get_dev_info;
+ sfc_dp_tx_qsize_up_rings_t *qsize_up_rings;
+ sfc_dp_tx_qcreate_t *qcreate;
+ sfc_dp_tx_qdestroy_t *qdestroy;
+ sfc_dp_tx_qstart_t *qstart;
+ sfc_dp_tx_qstop_t *qstop;
+ sfc_dp_tx_qtx_ev_t *qtx_ev;
+ sfc_dp_tx_qreap_t *qreap;
+ sfc_dp_tx_qdesc_status_t *qdesc_status;
+ eth_tx_burst_t pkt_burst;
+};
+
+static inline struct sfc_dp_tx *
+sfc_dp_find_tx_by_name(struct sfc_dp_list *head, const char *name)
+{
+ struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_TX, name);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
+}
+
+static inline struct sfc_dp_tx *
+sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
+{
+ struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_TX, avail_caps);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
+}
+
+extern struct sfc_dp_tx sfc_efx_tx;
+extern struct sfc_dp_tx sfc_ef10_tx;
+extern struct sfc_dp_tx sfc_ef10_simple_tx;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_TX_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ef10.h b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10.h
new file mode 100644
index 00000000..a73e0bde
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_EF10_H
+#define _SFC_EF10_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Number of events in one cache line */
+#define SFC_EF10_EV_PER_CACHE_LINE \
+ (RTE_CACHE_LINE_SIZE / sizeof(efx_qword_t))
+
+#define SFC_EF10_EV_QCLEAR_MASK (~(SFC_EF10_EV_PER_CACHE_LINE - 1))
+
+#if defined(SFC_EF10_EV_QCLEAR_USE_EFX)
+static inline void
+sfc_ef10_ev_qclear_cache_line(void *ptr)
+{
+ efx_qword_t *entry = ptr;
+ unsigned int i;
+
+ for (i = 0; i < SFC_EF10_EV_PER_CACHE_LINE; ++i)
+ EFX_SET_QWORD(entry[i]);
+}
+#else
+/*
+ * It is possible to do it using AVX2 and AVX512F, but it shows less
+ * performance.
+ */
+static inline void
+sfc_ef10_ev_qclear_cache_line(void *ptr)
+{
+ const __m128i val = _mm_set1_epi64x(UINT64_MAX);
+ __m128i *addr = ptr;
+ unsigned int i;
+
+ RTE_BUILD_BUG_ON(sizeof(val) > RTE_CACHE_LINE_SIZE);
+ RTE_BUILD_BUG_ON(RTE_CACHE_LINE_SIZE % sizeof(val) != 0);
+
+ for (i = 0; i < RTE_CACHE_LINE_SIZE / sizeof(val); ++i)
+ _mm_store_si128(&addr[i], val);
+}
+#endif
+
+static inline void
+sfc_ef10_ev_qclear(efx_qword_t *hw_ring, unsigned int ptr_mask,
+ unsigned int old_read_ptr, unsigned int read_ptr)
+{
+ const unsigned int clear_ptr = read_ptr & SFC_EF10_EV_QCLEAR_MASK;
+ unsigned int old_clear_ptr = old_read_ptr & SFC_EF10_EV_QCLEAR_MASK;
+
+ while (old_clear_ptr != clear_ptr) {
+ sfc_ef10_ev_qclear_cache_line(
+ &hw_ring[old_clear_ptr & ptr_mask]);
+ old_clear_ptr += SFC_EF10_EV_PER_CACHE_LINE;
+ }
+
+ /*
+ * No barriers here.
+ * Functions which push doorbell should care about correct
+ * ordering: store instructions which fill in EvQ ring should be
+ * retired from CPU and DMA sync before doorbell which will allow
+ * to use these event entries.
+ */
+}
+
+static inline bool
+sfc_ef10_ev_present(const efx_qword_t ev)
+{
+ return ~EFX_QWORD_FIELD(ev, EFX_DWORD_0) |
+ ~EFX_QWORD_FIELD(ev, EFX_DWORD_1);
+}
+
+
+/**
+ * Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary.
+ */
+#define SFC_EF10_RX_WPTR_ALIGN 8u
+
+static inline void
+sfc_ef10_rx_qpush(volatile void *doorbell, unsigned int added,
+ unsigned int ptr_mask)
+{
+ efx_dword_t dword;
+
+ /* Hardware has alignment restriction for WPTR */
+ RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
+ SFC_ASSERT(RTE_ALIGN(added, SFC_EF10_RX_WPTR_ALIGN) == added);
+
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, added & ptr_mask);
+
+ /* DMA sync to device is not required */
+
+ /*
+ * rte_write32() has rte_io_wmb() which guarantees that the STORE
+ * operations (i.e. Rx and event descriptor updates) that precede
+ * the rte_io_wmb() call are visible to NIC before the STORE
+ * operations that follow it (i.e. doorbell write).
+ */
+ rte_write32(dword.ed_u32[0], doorbell);
+}
+
+
+const uint32_t * sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps);
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_EF10_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c
new file mode 100644
index 00000000..81c8f7fb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c
@@ -0,0 +1,723 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+/* EF10 equal stride packed stream receive native datapath implementation */
+
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_mbuf_ptype.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_tweak.h"
+#include "sfc_dp_rx.h"
+#include "sfc_kvargs.h"
+#include "sfc_ef10.h"
+
+/* Tunnels are not supported */
+#define SFC_EF10_RX_EV_ENCAP_SUPPORT 0
+#include "sfc_ef10_rx_ev.h"
+
+#define sfc_ef10_essb_rx_err(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__)
+
+#define sfc_ef10_essb_rx_info(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__)
+
+/*
+ * Fake length for RXQ descriptors in equal stride super-buffer mode
+ * to make hardware happy.
+ */
+#define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32
+
+/**
+ * Minimum number of Rx buffers the datapath allows to use.
+ *
+ * Each HW Rx descriptor has many Rx buffers. The number of buffers
+ * in one HW Rx descriptor is equal to size of contiguous block
+ * provided by Rx buffers memory pool. The contiguous block size
+ * depends on CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf
+ * data size specified on the memory pool creation. Typical rte_mbuf
+ * data size is about 2k which makes a bit less than 32 buffers in
+ * contiguous block with default bucket size equal to 64k.
+ * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN),
+ * it makes about 256 as required minimum. Double it in advertised
+ * minimum to allow for at least 2 refill blocks.
+ */
+#define SFC_EF10_ESSB_RX_DESCS_MIN 512
+
+/**
+ * Number of Rx buffers should be aligned to.
+ *
+ * There are no extra requirements on alignment since actual number of
+ * pushed Rx buffers will be multiple by contiguous block size which
+ * is unknown beforehand.
+ */
+#define SFC_EF10_ESSB_RX_DESCS_ALIGN 1
+
+/**
+ * Maximum number of descriptors/buffers in the Rx ring.
+ * It should guarantee that corresponding event queue never overfill.
+ */
+#define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \
+ ((_nevs) - 1 /* head must not step on tail */ - \
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
+ 1 /* Rx error */ - 1 /* flush */)
+
+struct sfc_ef10_essb_rx_sw_desc {
+ struct rte_mbuf *first_mbuf;
+};
+
+struct sfc_ef10_essb_rxq {
+ /* Used on data path */
+ unsigned int flags;
+#define SFC_EF10_ESSB_RXQ_STARTED 0x1
+#define SFC_EF10_ESSB_RXQ_NOT_RUNNING 0x2
+#define SFC_EF10_ESSB_RXQ_EXCEPTION 0x4
+ unsigned int rxq_ptr_mask;
+ unsigned int block_size;
+ unsigned int buf_stride;
+ unsigned int bufs_ptr;
+ unsigned int completed;
+ unsigned int pending_id;
+ unsigned int bufs_pending;
+ unsigned int left_in_completed;
+ unsigned int left_in_pending;
+ unsigned int evq_read_ptr;
+ unsigned int evq_ptr_mask;
+ efx_qword_t *evq_hw_ring;
+ struct sfc_ef10_essb_rx_sw_desc *sw_ring;
+ uint16_t port_id;
+
+ /* Used on refill */
+ unsigned int added;
+ unsigned int max_fill_level;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
+ efx_qword_t *rxq_hw_ring;
+ volatile void *doorbell;
+
+ /* Datapath receive queue anchor */
+ struct sfc_dp_rxq dp;
+};
+
+static inline struct sfc_ef10_essb_rxq *
+sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+ return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp);
+}
+
+static struct rte_mbuf *
+sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq,
+ struct rte_mbuf *mbuf)
+{
+ return (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
+}
+
+static struct rte_mbuf *
+sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq,
+ struct rte_mbuf *mbuf, unsigned int idx)
+{
+ return (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
+}
+
+static struct rte_mbuf *
+sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq)
+{
+ const struct sfc_ef10_essb_rx_sw_desc *rxd;
+
+ if (rxq->left_in_completed != 0) {
+ rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
+ return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+ rxq->block_size - rxq->left_in_completed);
+ } else {
+ rxq->completed++;
+ rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
+ rxq->left_in_completed = rxq->block_size;
+ return rxd->first_mbuf;
+ }
+}
+
+static void
+sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq)
+{
+ const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask;
+ unsigned int free_space;
+ unsigned int bulks;
+ void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN];
+ unsigned int added = rxq->added;
+
+ free_space = rxq->max_fill_level - (added - rxq->completed);
+
+ if (free_space < rxq->refill_threshold)
+ return;
+
+ bulks = free_space / RTE_DIM(mbuf_blocks);
+ /* refill_threshold guarantees that bulks is positive */
+ SFC_ASSERT(bulks > 0);
+
+ do {
+ unsigned int id;
+ unsigned int i;
+
+ if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool,
+ mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) {
+ struct rte_eth_dev_data *dev_data =
+ rte_eth_devices[rxq->port_id].data;
+
+ /*
+ * It is hardly a safe way to increment counter
+ * from different contexts, but all PMDs do it.
+ */
+ dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks);
+ /* Return if we have posted nothing yet */
+ if (added == rxq->added)
+ return;
+ /* Push posted */
+ break;
+ }
+
+ for (i = 0, id = added & rxq_ptr_mask;
+ i < RTE_DIM(mbuf_blocks);
+ ++i, ++id) {
+ struct rte_mbuf *m = mbuf_blocks[i];
+ struct sfc_ef10_essb_rx_sw_desc *rxd;
+
+ SFC_ASSERT((id & ~rxq_ptr_mask) == 0);
+ rxd = &rxq->sw_ring[id];
+ rxd->first_mbuf = m;
+
+ /* RX_KER_BYTE_CNT is ignored by firmware */
+ EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
+ ESF_DZ_RX_KER_BYTE_CNT,
+ SFC_EF10_ESSB_RX_FAKE_BUF_SIZE,
+ ESF_DZ_RX_KER_BUF_ADDR,
+ rte_mbuf_data_iova_default(m));
+ }
+
+ added += RTE_DIM(mbuf_blocks);
+
+ } while (--bulks > 0);
+
+ SFC_ASSERT(rxq->added != added);
+ rxq->added = added;
+ sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask);
+}
+
+static bool
+sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev)
+{
+ *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask];
+
+ if (!sfc_ef10_ev_present(*rx_ev))
+ return false;
+
+ if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
+ FSE_AZ_EV_CODE_RX_EV)) {
+ /*
+ * Do not move read_ptr to keep the event for exception
+ * handling
+ */
+ rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION;
+ sfc_ef10_essb_rx_err(&rxq->dp.dpq,
+ "RxQ exception at EvQ read ptr %#x",
+ rxq->evq_read_ptr);
+ return false;
+ }
+
+ rxq->evq_read_ptr++;
+ return true;
+}
+
+static void
+sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev)
+{
+ unsigned int ready;
+
+ ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
+ rxq->bufs_ptr) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+
+ rxq->bufs_ptr += ready;
+ rxq->bufs_pending += ready;
+
+ SFC_ASSERT(ready > 0);
+ do {
+ const struct sfc_ef10_essb_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ unsigned int todo_bufs;
+ struct rte_mbuf *m0;
+
+ rxd = &rxq->sw_ring[rxq->pending_id];
+ m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+ rxq->block_size - rxq->left_in_pending);
+
+ if (ready < rxq->left_in_pending) {
+ todo_bufs = ready;
+ ready = 0;
+ rxq->left_in_pending -= todo_bufs;
+ } else {
+ todo_bufs = rxq->left_in_pending;
+ ready -= todo_bufs;
+ rxq->left_in_pending = rxq->block_size;
+ if (rxq->pending_id != rxq->rxq_ptr_mask)
+ rxq->pending_id++;
+ else
+ rxq->pending_id = 0;
+ }
+
+ SFC_ASSERT(todo_bufs > 0);
+ --todo_bufs;
+
+ sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull);
+
+ /* Prefetch pseudo-header */
+ rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
+
+ m0 = m;
+ while (todo_bufs-- > 0) {
+ m = sfc_ef10_essb_next_mbuf(rxq, m);
+ m->ol_flags = m0->ol_flags;
+ m->packet_type = m0->packet_type;
+ /* Prefetch pseudo-header */
+ rte_prefetch0((uint8_t *)m->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
+ }
+ } while (ready > 0);
+}
+
+static unsigned int
+sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ unsigned int n_rx_pkts = 0;
+ unsigned int todo_bufs;
+ struct rte_mbuf *m;
+
+ while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts,
+ rxq->bufs_pending)) > 0) {
+ m = sfc_ef10_essb_maybe_next_completed(rxq);
+
+ todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed);
+
+ rxq->bufs_pending -= todo_bufs;
+ rxq->left_in_completed -= todo_bufs;
+
+ SFC_ASSERT(todo_bufs > 0);
+ todo_bufs--;
+
+ do {
+ const efx_qword_t *qwordp;
+ uint16_t pkt_len;
+
+ /* Buffers to be discarded have 0 in packet type */
+ if (unlikely(m->packet_type == 0)) {
+ rte_mempool_put(rxq->refill_mb_pool, m);
+ goto next_buf;
+ }
+
+ rx_pkts[n_rx_pkts++] = m;
+
+ /* Parse pseudo-header */
+ qwordp = (const efx_qword_t *)
+ ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
+ pkt_len =
+ EFX_QWORD_FIELD(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_DATA_LEN);
+
+ m->data_off = RTE_PKTMBUF_HEADROOM +
+ ES_EZ_ESSB_RX_PREFIX_LEN;
+ m->port = rxq->port_id;
+
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+ rte_pktmbuf_data_len(m) = pkt_len;
+
+ m->ol_flags |=
+ (PKT_RX_RSS_HASH *
+ !!EFX_TEST_QWORD_BIT(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) |
+ (PKT_RX_FDIR_ID *
+ !!EFX_TEST_QWORD_BIT(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) |
+ (PKT_RX_FDIR *
+ !!EFX_TEST_QWORD_BIT(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN));
+
+ /* EFX_QWORD_FIELD converts little-endian to CPU */
+ m->hash.rss =
+ EFX_QWORD_FIELD(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_HASH);
+ m->hash.fdir.hi =
+ EFX_QWORD_FIELD(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_MARK);
+
+next_buf:
+ m = sfc_ef10_essb_next_mbuf(rxq, m);
+ } while (todo_bufs-- > 0);
+ }
+
+ return n_rx_pkts;
+}
+
+
+static uint16_t
+sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue);
+ const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
+ uint16_t n_rx_pkts;
+ efx_qword_t rx_ev;
+
+ if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
+ SFC_EF10_ESSB_RXQ_EXCEPTION)))
+ return 0;
+
+ n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts);
+
+ while (n_rx_pkts != nb_pkts &&
+ sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+
+ sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
+ n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq,
+ rx_pkts + n_rx_pkts,
+ nb_pkts - n_rx_pkts);
+ }
+
+ sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
+ evq_old_read_ptr, rxq->evq_read_ptr);
+
+ /* It is not a problem if we refill in the case of exception */
+ sfc_ef10_essb_rx_qrefill(rxq);
+
+ return n_rx_pkts;
+}
+
+static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending;
+static unsigned int
+sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
+ efx_qword_t rx_ev;
+
+ if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
+ SFC_EF10_ESSB_RXQ_EXCEPTION)))
+ return rxq->bufs_pending;
+
+ while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+ sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
+ }
+
+ sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
+ evq_old_read_ptr, rxq->evq_read_ptr);
+
+ return rxq->bufs_pending;
+}
+
+static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status;
+static int
+sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq);
+
+ if (offset < pending)
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (offset < (rxq->added - rxq->completed) * rxq->block_size +
+ rxq->left_in_completed - rxq->block_size)
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
+}
+
+static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info;
+static void
+sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
+{
+ /*
+ * Number of descriptors just defines maximum number of pushed
+ * descriptors (fill level).
+ */
+ dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN;
+ dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN;
+}
+
+static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported;
+static int
+sfc_ef10_essb_rx_pool_ops_supported(const char *pool)
+{
+ SFC_ASSERT(pool != NULL);
+
+ if (strcmp(pool, "bucket") == 0)
+ return 0;
+
+ return -ENOTSUP;
+}
+
+static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings;
+static int
+sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ struct rte_mempool *mb_pool,
+ unsigned int *rxq_entries,
+ unsigned int *evq_entries,
+ unsigned int *rxq_max_fill_level)
+{
+ int rc;
+ struct rte_mempool_info mp_info;
+ unsigned int nb_hw_rx_desc;
+ unsigned int max_events;
+
+ rc = rte_mempool_ops_get_info(mb_pool, &mp_info);
+ if (rc != 0)
+ return -rc;
+ if (mp_info.contig_block_size == 0)
+ return EINVAL;
+
+ /*
+ * Calculate required number of hardware Rx descriptors each
+ * carrying contig block size Rx buffers.
+ * It cannot be less than Rx write pointer alignment plus 1
+ * in order to avoid cases when the ring is guaranteed to be
+ * empty.
+ */
+ nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc,
+ mp_info.contig_block_size),
+ SFC_EF10_RX_WPTR_ALIGN + 1);
+ if (nb_hw_rx_desc <= EFX_RXQ_MINNDESCS) {
+ *rxq_entries = EFX_RXQ_MINNDESCS;
+ } else {
+ *rxq_entries = rte_align32pow2(nb_hw_rx_desc);
+ if (*rxq_entries > EFX_RXQ_MAXNDESCS)
+ return EINVAL;
+ }
+
+ max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) *
+ mp_info.contig_block_size +
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ +
+ 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */;
+
+ *evq_entries = rte_align32pow2(max_events);
+ *evq_entries = RTE_MAX(*evq_entries, (unsigned int)EFX_EVQ_MINNEVS);
+ *evq_entries = RTE_MIN(*evq_entries, (unsigned int)EFX_EVQ_MAXNEVS);
+
+ /*
+ * May be even maximum event queue size is insufficient to handle
+ * so many Rx descriptors. If so, we should limit Rx queue fill level.
+ */
+ *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
+ SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries));
+ return 0;
+}
+
+static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate;
+static int
+sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp)
+{
+ struct rte_mempool * const mp = info->refill_mb_pool;
+ struct rte_mempool_info mp_info;
+ struct sfc_ef10_essb_rxq *rxq;
+ int rc;
+
+ rc = rte_mempool_ops_get_info(mp, &mp_info);
+ if (rc != 0) {
+ /* Positive errno is used in the driver */
+ rc = -rc;
+ goto fail_get_contig_block_size;
+ }
+
+ /* Check if the mempool provides block dequeue */
+ rc = EINVAL;
+ if (mp_info.contig_block_size == 0)
+ goto fail_no_block_dequeue;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
+ info->rxq_entries,
+ sizeof(*rxq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_ring == NULL)
+ goto fail_desc_alloc;
+
+ rxq->block_size = mp_info.contig_block_size;
+ rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size;
+ rxq->rxq_ptr_mask = info->rxq_entries - 1;
+ rxq->evq_ptr_mask = info->evq_entries - 1;
+ rxq->evq_hw_ring = info->evq_hw_ring;
+ rxq->port_id = port_id;
+
+ rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size;
+ rxq->refill_threshold =
+ RTE_MAX(info->refill_threshold / mp_info.contig_block_size,
+ SFC_EF10_RX_WPTR_ALIGN);
+ rxq->refill_mb_pool = mp;
+ rxq->rxq_hw_ring = info->rxq_hw_ring;
+
+ rxq->doorbell = (volatile uint8_t *)info->mem_bar +
+ ER_DZ_RX_DESC_UPD_REG_OFST +
+ (info->hw_index << info->vi_window_shift);
+
+ sfc_ef10_essb_rx_info(&rxq->dp.dpq,
+ "block size is %u, buf stride is %u",
+ rxq->block_size, rxq->buf_stride);
+ sfc_ef10_essb_rx_info(&rxq->dp.dpq,
+ "max fill level is %u descs (%u bufs), "
+ "refill threashold %u descs (%u bufs)",
+ rxq->max_fill_level,
+ rxq->max_fill_level * rxq->block_size,
+ rxq->refill_threshold,
+ rxq->refill_threshold * rxq->block_size);
+
+ *dp_rxqp = &rxq->dp;
+ return 0;
+
+fail_desc_alloc:
+ rte_free(rxq);
+
+fail_rxq_alloc:
+fail_no_block_dequeue:
+fail_get_contig_block_size:
+ return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy;
+static void
+sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart;
+static int
+sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->evq_read_ptr = evq_read_ptr;
+
+ /* Initialize before refill */
+ rxq->completed = rxq->pending_id = rxq->added = 0;
+ rxq->left_in_completed = rxq->left_in_pending = rxq->block_size;
+ rxq->bufs_ptr = UINT_MAX;
+ rxq->bufs_pending = 0;
+
+ sfc_ef10_essb_rx_qrefill(rxq);
+
+ rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED;
+ rxq->flags &=
+ ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION);
+
+ return 0;
+}
+
+static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop;
+static void
+sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING;
+
+ *evq_read_ptr = rxq->evq_read_ptr;
+}
+
+static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev;
+static bool
+sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
+{
+ __rte_unused struct sfc_ef10_essb_rxq *rxq;
+
+ rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING);
+
+ /*
+ * It is safe to ignore Rx event since we free all mbufs on
+ * queue purge anyway.
+ */
+
+ return false;
+}
+
+static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge;
+static void
+sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ unsigned int i;
+ const struct sfc_ef10_essb_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+
+ for (i = rxq->completed; i != rxq->added; ++i) {
+ rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask];
+ m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+ rxq->block_size - rxq->left_in_completed);
+ while (rxq->left_in_completed > 0) {
+ rte_mempool_put(rxq->refill_mb_pool, m);
+ m = sfc_ef10_essb_next_mbuf(rxq, m);
+ rxq->left_in_completed--;
+ }
+ rxq->left_in_completed = rxq->block_size;
+ }
+
+ rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED;
+}
+
+struct sfc_dp_rx sfc_ef10_essb_rx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10_ESSB,
+ .type = SFC_DP_RX,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10 |
+ SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
+ },
+ .features = SFC_DP_RX_FEAT_FLOW_FLAG |
+ SFC_DP_RX_FEAT_FLOW_MARK |
+ SFC_DP_RX_FEAT_CHECKSUM,
+ .get_dev_info = sfc_ef10_essb_rx_get_dev_info,
+ .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
+ .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings,
+ .qcreate = sfc_ef10_essb_rx_qcreate,
+ .qdestroy = sfc_ef10_essb_rx_qdestroy,
+ .qstart = sfc_ef10_essb_rx_qstart,
+ .qstop = sfc_ef10_essb_rx_qstop,
+ .qrx_ev = sfc_ef10_essb_rx_qrx_ev,
+ .qpurge = sfc_ef10_essb_rx_qpurge,
+ .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
+ .qdesc_npending = sfc_ef10_essb_rx_qdesc_npending,
+ .qdesc_status = sfc_ef10_essb_rx_qdesc_status,
+ .pkt_burst = sfc_ef10_essb_recv_pkts,
+};
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx.c b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx.c
new file mode 100644
index 00000000..6a5052b9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx.c
@@ -0,0 +1,675 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+/* EF10 native datapath implementation */
+
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_mbuf_ptype.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_tweak.h"
+#include "sfc_dp_rx.h"
+#include "sfc_kvargs.h"
+#include "sfc_ef10.h"
+
+#define SFC_EF10_RX_EV_ENCAP_SUPPORT 1
+#include "sfc_ef10_rx_ev.h"
+
+#define sfc_ef10_rx_err(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
+
+/**
+ * Maximum number of descriptors/buffers in the Rx ring.
+ * It should guarantee that corresponding event queue never overfill.
+ * EF10 native datapath uses event queue of the same size as Rx queue.
+ * Maximum number of events on datapath can be estimated as number of
+ * Rx queue entries (one event per Rx buffer in the worst case) plus
+ * Rx error and flush events.
+ */
+#define SFC_EF10_RXQ_LIMIT(_ndesc) \
+ ((_ndesc) - 1 /* head must not step on tail */ - \
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
+ 1 /* Rx error */ - 1 /* flush */)
+
+struct sfc_ef10_rx_sw_desc {
+ struct rte_mbuf *mbuf;
+};
+
+struct sfc_ef10_rxq {
+ /* Used on data path */
+ unsigned int flags;
+#define SFC_EF10_RXQ_STARTED 0x1
+#define SFC_EF10_RXQ_NOT_RUNNING 0x2
+#define SFC_EF10_RXQ_EXCEPTION 0x4
+#define SFC_EF10_RXQ_RSS_HASH 0x8
+ unsigned int ptr_mask;
+ unsigned int prepared;
+ unsigned int completed;
+ unsigned int evq_read_ptr;
+ efx_qword_t *evq_hw_ring;
+ struct sfc_ef10_rx_sw_desc *sw_ring;
+ uint64_t rearm_data;
+ uint16_t prefix_size;
+
+ /* Used on refill */
+ uint16_t buf_size;
+ unsigned int added;
+ unsigned int max_fill_level;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
+ efx_qword_t *rxq_hw_ring;
+ volatile void *doorbell;
+
+ /* Datapath receive queue anchor */
+ struct sfc_dp_rxq dp;
+};
+
+static inline struct sfc_ef10_rxq *
+sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+ return container_of(dp_rxq, struct sfc_ef10_rxq, dp);
+}
+
+static void
+sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
+{
+ const unsigned int ptr_mask = rxq->ptr_mask;
+ const uint32_t buf_size = rxq->buf_size;
+ unsigned int free_space;
+ unsigned int bulks;
+ void *objs[SFC_RX_REFILL_BULK];
+ unsigned int added = rxq->added;
+
+ RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
+
+ free_space = rxq->max_fill_level - (added - rxq->completed);
+
+ if (free_space < rxq->refill_threshold)
+ return;
+
+ bulks = free_space / RTE_DIM(objs);
+ /* refill_threshold guarantees that bulks is positive */
+ SFC_ASSERT(bulks > 0);
+
+ do {
+ unsigned int id;
+ unsigned int i;
+
+ if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
+ RTE_DIM(objs)) < 0)) {
+ struct rte_eth_dev_data *dev_data =
+ rte_eth_devices[rxq->dp.dpq.port_id].data;
+
+ /*
+ * It is hardly a safe way to increment counter
+ * from different contexts, but all PMDs do it.
+ */
+ dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
+ /* Return if we have posted nothing yet */
+ if (added == rxq->added)
+ return;
+ /* Push posted */
+ break;
+ }
+
+ for (i = 0, id = added & ptr_mask;
+ i < RTE_DIM(objs);
+ ++i, ++id) {
+ struct rte_mbuf *m = objs[i];
+ struct sfc_ef10_rx_sw_desc *rxd;
+ rte_iova_t phys_addr;
+
+ SFC_ASSERT((id & ~ptr_mask) == 0);
+ rxd = &rxq->sw_ring[id];
+ rxd->mbuf = m;
+
+ /*
+ * Avoid writing to mbuf. It is cheaper to do it
+ * when we receive packet and fill in nearby
+ * structure members.
+ */
+
+ phys_addr = rte_mbuf_data_iova_default(m);
+ EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
+ ESF_DZ_RX_KER_BYTE_CNT, buf_size,
+ ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
+ }
+
+ added += RTE_DIM(objs);
+ } while (--bulks > 0);
+
+ SFC_ASSERT(rxq->added != added);
+ rxq->added = added;
+ sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask);
+}
+
+static void
+sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id)
+{
+ struct rte_mbuf *next_mbuf;
+
+ /* Prefetch next bunch of software descriptors */
+ if ((next_id % (RTE_CACHE_LINE_SIZE / sizeof(rxq->sw_ring[0]))) == 0)
+ rte_prefetch0(&rxq->sw_ring[next_id]);
+
+ /*
+ * It looks strange to prefetch depending on previous prefetch
+ * data, but measurements show that it is really efficient and
+ * increases packet rate.
+ */
+ next_mbuf = rxq->sw_ring[next_id].mbuf;
+ if (likely(next_mbuf != NULL)) {
+ /* Prefetch the next mbuf structure */
+ rte_mbuf_prefetch_part1(next_mbuf);
+
+ /* Prefetch pseudo header of the next packet */
+ /* data_off is not filled in yet */
+ /* Yes, data could be not ready yet, but we hope */
+ rte_prefetch0((uint8_t *)next_mbuf->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
+ }
+}
+
+static uint16_t
+sfc_ef10_rx_prepared(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->prepared);
+ unsigned int completed = rxq->completed;
+ unsigned int i;
+
+ rxq->prepared -= n_rx_pkts;
+ rxq->completed = completed + n_rx_pkts;
+
+ for (i = 0; i < n_rx_pkts; ++i, ++completed)
+ rx_pkts[i] = rxq->sw_ring[completed & rxq->ptr_mask].mbuf;
+
+ return n_rx_pkts;
+}
+
+static uint16_t
+sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr)
+{
+ return rte_le_to_cpu_16(*(const uint16_t *)&pseudo_hdr[8]);
+}
+
+static uint32_t
+sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr)
+{
+ return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr);
+}
+
+static uint16_t
+sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ const unsigned int ptr_mask = rxq->ptr_mask;
+ unsigned int completed = rxq->completed;
+ unsigned int ready;
+ struct sfc_ef10_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ struct rte_mbuf *m0;
+ uint16_t n_rx_pkts;
+ const uint8_t *pseudo_hdr;
+ uint16_t pkt_len;
+
+ ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - completed) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ SFC_ASSERT(ready > 0);
+
+ if (rx_ev.eq_u64[0] &
+ rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
+ (1ull << ESF_DZ_RX_ECRC_ERR_LBN))) {
+ SFC_ASSERT(rxq->prepared == 0);
+ rxq->completed += ready;
+ while (ready-- > 0) {
+ rxd = &rxq->sw_ring[completed++ & ptr_mask];
+ rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ }
+ return 0;
+ }
+
+ n_rx_pkts = RTE_MIN(ready, nb_pkts);
+ rxq->prepared = ready - n_rx_pkts;
+ rxq->completed += n_rx_pkts;
+
+ rxd = &rxq->sw_ring[completed++ & ptr_mask];
+
+ sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask);
+
+ m = rxd->mbuf;
+
+ *rx_pkts++ = m;
+
+ RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data));
+ m->rearm_data[0] = rxq->rearm_data;
+
+ /* Classify packet based on Rx event */
+ /* Mask RSS hash offload flag if RSS is not enabled */
+ sfc_ef10_rx_ev_to_offloads(rx_ev, m,
+ (rxq->flags & SFC_EF10_RXQ_RSS_HASH) ?
+ ~0ull : ~PKT_RX_RSS_HASH);
+
+ /* data_off already moved past pseudo header */
+ pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * Always get RSS hash from pseudo header to avoid
+ * condition/branching. If it is valid or not depends on
+ * PKT_RX_RSS_HASH in m->ol_flags.
+ */
+ m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
+
+ if (ready == 1)
+ pkt_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) -
+ rxq->prefix_size;
+ else
+ pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
+ SFC_ASSERT(pkt_len > 0);
+ rte_pktmbuf_data_len(m) = pkt_len;
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+
+ SFC_ASSERT(m->next == NULL);
+
+ /* Remember mbuf to copy offload flags and packet type from */
+ m0 = m;
+ for (--ready; ready > 0; --ready) {
+ rxd = &rxq->sw_ring[completed++ & ptr_mask];
+
+ sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask);
+
+ m = rxd->mbuf;
+
+ if (ready > rxq->prepared)
+ *rx_pkts++ = m;
+
+ RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) !=
+ sizeof(rxq->rearm_data));
+ m->rearm_data[0] = rxq->rearm_data;
+
+ /* Event-dependent information is the same */
+ m->ol_flags = m0->ol_flags;
+ m->packet_type = m0->packet_type;
+
+ /* data_off already moved past pseudo header */
+ pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * Always get RSS hash from pseudo header to avoid
+ * condition/branching. If it is valid or not depends on
+ * PKT_RX_RSS_HASH in m->ol_flags.
+ */
+ m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
+
+ pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
+ SFC_ASSERT(pkt_len > 0);
+ rte_pktmbuf_data_len(m) = pkt_len;
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+
+ SFC_ASSERT(m->next == NULL);
+ }
+
+ return n_rx_pkts;
+}
+
+static bool
+sfc_ef10_rx_get_event(struct sfc_ef10_rxq *rxq, efx_qword_t *rx_ev)
+{
+ *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
+
+ if (!sfc_ef10_ev_present(*rx_ev))
+ return false;
+
+ if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
+ FSE_AZ_EV_CODE_RX_EV)) {
+ /*
+ * Do not move read_ptr to keep the event for exception
+ * handling by the control path.
+ */
+ rxq->flags |= SFC_EF10_RXQ_EXCEPTION;
+ sfc_ef10_rx_err(&rxq->dp.dpq,
+ "RxQ exception at EvQ read ptr %#x",
+ rxq->evq_read_ptr);
+ return false;
+ }
+
+ rxq->evq_read_ptr++;
+ return true;
+}
+
+static uint16_t
+sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue);
+ unsigned int evq_old_read_ptr;
+ uint16_t n_rx_pkts;
+ efx_qword_t rx_ev;
+
+ if (unlikely(rxq->flags &
+ (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
+ return 0;
+
+ n_rx_pkts = sfc_ef10_rx_prepared(rxq, rx_pkts, nb_pkts);
+
+ evq_old_read_ptr = rxq->evq_read_ptr;
+ while (n_rx_pkts != nb_pkts && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+
+ n_rx_pkts += sfc_ef10_rx_process_event(rxq, rx_ev,
+ rx_pkts + n_rx_pkts,
+ nb_pkts - n_rx_pkts);
+ }
+
+ sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr,
+ rxq->evq_read_ptr);
+
+ /* It is not a problem if we refill in the case of exception */
+ sfc_ef10_rx_qrefill(rxq);
+
+ return n_rx_pkts;
+}
+
+const uint32_t *
+sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
+{
+ static const uint32_t ef10_native_ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_QINQ,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+ static const uint32_t ef10_overlay_ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_QINQ,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_VXLAN,
+ RTE_PTYPE_TUNNEL_NVGRE,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L2_ETHER_QINQ,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ /*
+ * The function returns static set of supported packet types,
+ * so we can't build it dynamically based on supported tunnel
+ * encapsulations and should limit to known sets.
+ */
+ switch (tunnel_encaps) {
+ case (1u << EFX_TUNNEL_PROTOCOL_VXLAN |
+ 1u << EFX_TUNNEL_PROTOCOL_GENEVE |
+ 1u << EFX_TUNNEL_PROTOCOL_NVGRE):
+ return ef10_overlay_ptypes;
+ default:
+ SFC_GENERIC_LOG(ERR,
+ "Unexpected set of supported tunnel encapsulations: %#x",
+ tunnel_encaps);
+ /* FALLTHROUGH */
+ case 0:
+ return ef10_native_ptypes;
+ }
+}
+
+static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
+static unsigned int
+sfc_ef10_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
+{
+ /*
+ * Correct implementation requires EvQ polling and events
+ * processing (keeping all ready mbufs in prepared).
+ */
+ return -ENOTSUP;
+}
+
+static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status;
+static int
+sfc_ef10_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,
+ __rte_unused uint16_t offset)
+{
+ return -ENOTSUP;
+}
+
+
+static sfc_dp_rx_get_dev_info_t sfc_ef10_rx_get_dev_info;
+static void
+sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
+{
+ /*
+ * Number of descriptors just defines maximum number of pushed
+ * descriptors (fill level).
+ */
+ dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
+ dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
+}
+
+
+static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings;
+static int
+sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ __rte_unused struct rte_mempool *mb_pool,
+ unsigned int *rxq_entries,
+ unsigned int *evq_entries,
+ unsigned int *rxq_max_fill_level)
+{
+ /*
+ * rte_ethdev API guarantees that the number meets min, max and
+ * alignment requirements.
+ */
+ if (nb_rx_desc <= EFX_RXQ_MINNDESCS)
+ *rxq_entries = EFX_RXQ_MINNDESCS;
+ else
+ *rxq_entries = rte_align32pow2(nb_rx_desc);
+
+ *evq_entries = *rxq_entries;
+
+ *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
+ SFC_EF10_RXQ_LIMIT(*evq_entries));
+ return 0;
+}
+
+
+static uint64_t
+sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
+{
+ struct rte_mbuf m;
+
+ memset(&m, 0, sizeof(m));
+
+ rte_mbuf_refcnt_set(&m, 1);
+ m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
+ m.nb_segs = 1;
+ m.port = port_id;
+
+ /* rearm_data covers structure members filled in above */
+ rte_compiler_barrier();
+ RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
+ return m.rearm_data[0];
+}
+
+static sfc_dp_rx_qcreate_t sfc_ef10_rx_qcreate;
+static int
+sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp)
+{
+ struct sfc_ef10_rxq *rxq;
+ int rc;
+
+ rc = EINVAL;
+ if (info->rxq_entries != info->evq_entries)
+ goto fail_rxq_args;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
+ info->rxq_entries,
+ sizeof(*rxq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_ring == NULL)
+ goto fail_desc_alloc;
+
+ rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
+ if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
+ rxq->flags |= SFC_EF10_RXQ_RSS_HASH;
+ rxq->ptr_mask = info->rxq_entries - 1;
+ rxq->evq_hw_ring = info->evq_hw_ring;
+ rxq->max_fill_level = info->max_fill_level;
+ rxq->refill_threshold = info->refill_threshold;
+ rxq->rearm_data =
+ sfc_ef10_mk_mbuf_rearm_data(port_id, info->prefix_size);
+ rxq->prefix_size = info->prefix_size;
+ rxq->buf_size = info->buf_size;
+ rxq->refill_mb_pool = info->refill_mb_pool;
+ rxq->rxq_hw_ring = info->rxq_hw_ring;
+ rxq->doorbell = (volatile uint8_t *)info->mem_bar +
+ ER_DZ_RX_DESC_UPD_REG_OFST +
+ (info->hw_index << info->vi_window_shift);
+
+ *dp_rxqp = &rxq->dp;
+ return 0;
+
+fail_desc_alloc:
+ rte_free(rxq);
+
+fail_rxq_alloc:
+fail_rxq_args:
+ return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_ef10_rx_qdestroy;
+static void
+sfc_ef10_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_ef10_rx_qstart;
+static int
+sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->prepared = 0;
+ rxq->completed = rxq->added = 0;
+
+ sfc_ef10_rx_qrefill(rxq);
+
+ rxq->evq_read_ptr = evq_read_ptr;
+
+ rxq->flags |= SFC_EF10_RXQ_STARTED;
+ rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION);
+
+ return 0;
+}
+
+static sfc_dp_rx_qstop_t sfc_ef10_rx_qstop;
+static void
+sfc_ef10_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
+
+ *evq_read_ptr = rxq->evq_read_ptr;
+}
+
+static sfc_dp_rx_qrx_ev_t sfc_ef10_rx_qrx_ev;
+static bool
+sfc_ef10_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
+{
+ __rte_unused struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ SFC_ASSERT(rxq->flags & SFC_EF10_RXQ_NOT_RUNNING);
+
+ /*
+ * It is safe to ignore Rx event since we free all mbufs on
+ * queue purge anyway.
+ */
+
+ return false;
+}
+
+static sfc_dp_rx_qpurge_t sfc_ef10_rx_qpurge;
+static void
+sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+ unsigned int i;
+ struct sfc_ef10_rx_sw_desc *rxd;
+
+ for (i = rxq->completed; i != rxq->added; ++i) {
+ rxd = &rxq->sw_ring[i & rxq->ptr_mask];
+ rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ rxd->mbuf = NULL;
+ }
+
+ rxq->flags &= ~SFC_EF10_RXQ_STARTED;
+}
+
+struct sfc_dp_rx sfc_ef10_rx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10,
+ .type = SFC_DP_RX,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
+ },
+ .features = SFC_DP_RX_FEAT_MULTI_PROCESS |
+ SFC_DP_RX_FEAT_TUNNELS |
+ SFC_DP_RX_FEAT_CHECKSUM,
+ .get_dev_info = sfc_ef10_rx_get_dev_info,
+ .qsize_up_rings = sfc_ef10_rx_qsize_up_rings,
+ .qcreate = sfc_ef10_rx_qcreate,
+ .qdestroy = sfc_ef10_rx_qdestroy,
+ .qstart = sfc_ef10_rx_qstart,
+ .qstop = sfc_ef10_rx_qstop,
+ .qrx_ev = sfc_ef10_rx_qrx_ev,
+ .qpurge = sfc_ef10_rx_qpurge,
+ .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
+ .qdesc_npending = sfc_ef10_rx_qdesc_npending,
+ .qdesc_status = sfc_ef10_rx_qdesc_status,
+ .pkt_burst = sfc_ef10_recv_pkts,
+};
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h
new file mode 100644
index 00000000..868c755f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_EF10_RX_EV_H
+#define _SFC_EF10_RX_EV_H
+
+#include <rte_mbuf.h>
+
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline void
+sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m,
+ uint64_t ol_mask)
+{
+ uint32_t tun_ptype = 0;
+ /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */
+ int8_t ip_csum_err_bit;
+ /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */
+ int8_t l4_csum_err_bit;
+ uint32_t l2_ptype = 0;
+ uint32_t l3_ptype = 0;
+ uint32_t l4_ptype = 0;
+ uint64_t ol_flags = 0;
+
+ if (unlikely(rx_ev.eq_u64[0] &
+ rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
+ (1ull << ESF_DZ_RX_ECRC_ERR_LBN) |
+ (1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))) {
+ /* Zero packet type is used as a marker to dicard bad packets */
+ goto done;
+ }
+
+#if SFC_EF10_RX_EV_ENCAP_SUPPORT
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) {
+ default:
+ /* Unexpected encapsulation tag class */
+ SFC_ASSERT(false);
+ /* FALLTHROUGH */
+ case ESE_EZ_ENCAP_HDR_NONE:
+ break;
+ case ESE_EZ_ENCAP_HDR_VXLAN:
+ /*
+ * It is definitely UDP, but we have no information
+ * about IPv4 vs IPv6 and VLAN tagging.
+ */
+ tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
+ break;
+ case ESE_EZ_ENCAP_HDR_GRE:
+ /*
+ * We have no information about IPv4 vs IPv6 and VLAN tagging.
+ */
+ tun_ptype = RTE_PTYPE_TUNNEL_NVGRE;
+ break;
+ }
+#endif
+
+ if (tun_ptype == 0) {
+ ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN;
+ l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN;
+ } else {
+ ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN;
+ l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN;
+ if (unlikely(EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_IPCKSUM_ERR_LBN)))
+ ol_flags |= PKT_RX_EIP_CKSUM_BAD;
+ }
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) {
+ case ESE_DZ_ETH_TAG_CLASS_NONE:
+ l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER :
+ RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case ESE_DZ_ETH_TAG_CLASS_VLAN1:
+ l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN :
+ RTE_PTYPE_INNER_L2_ETHER_VLAN;
+ break;
+ case ESE_DZ_ETH_TAG_CLASS_VLAN2:
+ l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ :
+ RTE_PTYPE_INNER_L2_ETHER_QINQ;
+ break;
+ default:
+ /* Unexpected Eth tag class */
+ SFC_ASSERT(false);
+ }
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) {
+ case ESE_DZ_L3_CLASS_IP4_FRAG:
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
+ RTE_PTYPE_INNER_L4_FRAG;
+ /* FALLTHROUGH */
+ case ESE_DZ_L3_CLASS_IP4:
+ l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN :
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ ol_flags |= PKT_RX_RSS_HASH |
+ ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ?
+ PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
+ break;
+ case ESE_DZ_L3_CLASS_IP6_FRAG:
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
+ RTE_PTYPE_INNER_L4_FRAG;
+ /* FALLTHROUGH */
+ case ESE_DZ_L3_CLASS_IP6:
+ l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ ol_flags |= PKT_RX_RSS_HASH;
+ break;
+ case ESE_DZ_L3_CLASS_ARP:
+ /* Override Layer 2 packet type */
+ /* There is no ARP classification for inner packets */
+ if (tun_ptype == 0)
+ l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
+ break;
+ case ESE_DZ_L3_CLASS_UNKNOWN:
+ break;
+ default:
+ /* Unexpected Layer 3 class */
+ SFC_ASSERT(false);
+ }
+
+ /*
+ * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
+ * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
+ * and values for all EF10 controllers.
+ */
+ RTE_BUILD_BUG_ON(ESF_FZ_RX_L4_CLASS_LBN != ESF_DE_RX_L4_CLASS_LBN);
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_FZ_RX_L4_CLASS)) {
+ case ESE_FZ_L4_CLASS_TCP:
+ RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_TCP != ESE_DE_L4_CLASS_TCP);
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP :
+ RTE_PTYPE_INNER_L4_TCP;
+ ol_flags |=
+ (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
+ PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case ESE_FZ_L4_CLASS_UDP:
+ RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UDP != ESE_DE_L4_CLASS_UDP);
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP :
+ RTE_PTYPE_INNER_L4_UDP;
+ ol_flags |=
+ (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
+ PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case ESE_FZ_L4_CLASS_UNKNOWN:
+ RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UNKNOWN !=
+ ESE_DE_L4_CLASS_UNKNOWN);
+ break;
+ default:
+ /* Unexpected Layer 4 class */
+ SFC_ASSERT(false);
+ }
+
+ SFC_ASSERT(l2_ptype != 0);
+
+done:
+ m->ol_flags = ol_flags & ol_mask;
+ m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype;
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_EF10_RX_EV_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_tx.c b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_tx.c
new file mode 100644
index 00000000..d0daa3b3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_tx.c
@@ -0,0 +1,667 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <stdbool.h>
+
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_dp_tx.h"
+#include "sfc_tweak.h"
+#include "sfc_kvargs.h"
+#include "sfc_ef10.h"
+
+#define sfc_ef10_tx_err(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
+
+/** Maximum length of the DMA descriptor data */
+#define SFC_EF10_TX_DMA_DESC_LEN_MAX \
+ ((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
+
+/**
+ * Maximum number of descriptors/buffers in the Tx ring.
+ * It should guarantee that corresponding event queue never overfill.
+ * EF10 native datapath uses event queue of the same size as Tx queue.
+ * Maximum number of events on datapath can be estimated as number of
+ * Tx queue entries (one event per Tx buffer in the worst case) plus
+ * Tx error and flush events.
+ */
+#define SFC_EF10_TXQ_LIMIT(_ndesc) \
+ ((_ndesc) - 1 /* head must not step on tail */ - \
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
+ 1 /* Rx error */ - 1 /* flush */)
+
+struct sfc_ef10_tx_sw_desc {
+ struct rte_mbuf *mbuf;
+};
+
+struct sfc_ef10_txq {
+ unsigned int flags;
+#define SFC_EF10_TXQ_STARTED 0x1
+#define SFC_EF10_TXQ_NOT_RUNNING 0x2
+#define SFC_EF10_TXQ_EXCEPTION 0x4
+
+ unsigned int ptr_mask;
+ unsigned int added;
+ unsigned int completed;
+ unsigned int max_fill_level;
+ unsigned int free_thresh;
+ unsigned int evq_read_ptr;
+ struct sfc_ef10_tx_sw_desc *sw_ring;
+ efx_qword_t *txq_hw_ring;
+ volatile void *doorbell;
+ efx_qword_t *evq_hw_ring;
+
+ /* Datapath transmit queue anchor */
+ struct sfc_dp_txq dp;
+};
+
+static inline struct sfc_ef10_txq *
+sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
+{
+ return container_of(dp_txq, struct sfc_ef10_txq, dp);
+}
+
+static bool
+sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
+{
+ volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
+
+ /*
+ * Exception flag is set when reap is done.
+ * It is never done twice per packet burst get and absence of
+ * the flag is checked on burst get entry.
+ */
+ SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0);
+
+ *tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
+
+ if (!sfc_ef10_ev_present(*tx_ev))
+ return false;
+
+ if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
+ FSE_AZ_EV_CODE_TX_EV)) {
+ /*
+ * Do not move read_ptr to keep the event for exception
+ * handling by the control path.
+ */
+ txq->flags |= SFC_EF10_TXQ_EXCEPTION;
+ sfc_ef10_tx_err(&txq->dp.dpq,
+ "TxQ exception at EvQ read ptr %#x",
+ txq->evq_read_ptr);
+ return false;
+ }
+
+ txq->evq_read_ptr++;
+ return true;
+}
+
+static unsigned int
+sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq)
+{
+ const unsigned int curr_done = txq->completed - 1;
+ unsigned int anew_done = curr_done;
+ efx_qword_t tx_ev;
+
+ while (sfc_ef10_tx_get_event(txq, &tx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+
+ /* Update the latest done descriptor */
+ anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
+ }
+ return (anew_done - curr_done) & txq->ptr_mask;
+}
+
+static void
+sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
+{
+ const unsigned int old_read_ptr = txq->evq_read_ptr;
+ const unsigned int ptr_mask = txq->ptr_mask;
+ unsigned int completed = txq->completed;
+ unsigned int pending = completed;
+
+ pending += sfc_ef10_tx_process_events(txq);
+
+ if (pending != completed) {
+ struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
+ unsigned int nb = 0;
+
+ do {
+ struct sfc_ef10_tx_sw_desc *txd;
+ struct rte_mbuf *m;
+
+ txd = &txq->sw_ring[completed & ptr_mask];
+ if (txd->mbuf == NULL)
+ continue;
+
+ m = rte_pktmbuf_prefree_seg(txd->mbuf);
+ txd->mbuf = NULL;
+ if (m == NULL)
+ continue;
+
+ if ((nb == RTE_DIM(bulk)) ||
+ ((nb != 0) && (m->pool != bulk[0]->pool))) {
+ rte_mempool_put_bulk(bulk[0]->pool,
+ (void *)bulk, nb);
+ nb = 0;
+ }
+
+ bulk[nb++] = m;
+ } while (++completed != pending);
+
+ if (nb != 0)
+ rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
+
+ txq->completed = completed;
+ }
+
+ sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
+ txq->evq_read_ptr);
+}
+
+static void
+sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop,
+ efx_qword_t *edp)
+{
+ EFX_POPULATE_QWORD_4(*edp,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, !eop,
+ ESF_DZ_TX_KER_BYTE_CNT, size,
+ ESF_DZ_TX_KER_BUF_ADDR, addr);
+}
+
+static inline void
+sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
+ unsigned int pushed)
+{
+ efx_qword_t desc;
+ efx_oword_t oword;
+
+ /*
+ * This improves performance by pushing a TX descriptor at the same
+ * time as the doorbell. The descriptor must be added to the TXQ,
+ * so that can be used if the hardware decides not to use the pushed
+ * descriptor.
+ */
+ desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
+ EFX_POPULATE_OWORD_3(oword,
+ ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
+ ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
+ ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
+
+ /* DMA sync to device is not required */
+
+ /*
+ * rte_io_wmb() which guarantees that the STORE operations
+ * (i.e. Tx and event descriptor updates) that precede
+ * the rte_io_wmb() call are visible to NIC before the STORE
+ * operations that follow it (i.e. doorbell write).
+ */
+ rte_io_wmb();
+
+ *(volatile __m128i *)txq->doorbell = oword.eo_u128[0];
+}
+
+static unsigned int
+sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m)
+{
+ unsigned int extra_descs_per_seg;
+ unsigned int extra_descs_per_pkt;
+
+ /*
+ * VLAN offload is not supported yet, so no extra descriptors
+ * are required for VLAN option descriptor.
+ */
+
+/** Maximum length of the mbuf segment data */
+#define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
+ RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
+
+ /*
+ * Each segment is already counted once below. So, calculate
+ * how many extra DMA descriptors may be required per segment in
+ * the worst case because of maximum DMA descriptor length limit.
+ * If maximum segment length is less or equal to maximum DMA
+ * descriptor length, no extra DMA descriptors are required.
+ */
+ extra_descs_per_seg =
+ (SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX;
+
+/** Maximum length of the packet */
+#define SFC_MBUF_PKT_LEN_MAX UINT32_MAX
+ RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4);
+
+ /*
+ * One more limitation on maximum number of extra DMA descriptors
+ * comes from slicing entire packet because of DMA descriptor length
+ * limit taking into account that there is at least one segment
+ * which is already counted below (so division of the maximum
+ * packet length minus one with round down).
+ * TSO is not supported yet, so packet length is limited by
+ * maximum PDU size.
+ */
+ extra_descs_per_pkt =
+ (RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
+ SFC_MBUF_PKT_LEN_MAX) - 1) /
+ SFC_EF10_TX_DMA_DESC_LEN_MAX;
+
+ return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg,
+ extra_descs_per_pkt);
+}
+
+static uint16_t
+sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
+ unsigned int added;
+ unsigned int dma_desc_space;
+ bool reap_done;
+ struct rte_mbuf **pktp;
+ struct rte_mbuf **pktp_end;
+
+ if (unlikely(txq->flags &
+ (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
+ return 0;
+
+ added = txq->added;
+ dma_desc_space = txq->max_fill_level - (added - txq->completed);
+
+ reap_done = (dma_desc_space < txq->free_thresh);
+ if (reap_done) {
+ sfc_ef10_tx_reap(txq);
+ dma_desc_space = txq->max_fill_level - (added - txq->completed);
+ }
+
+ for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
+ pktp != pktp_end;
+ ++pktp) {
+ struct rte_mbuf *m_seg = *pktp;
+ unsigned int pkt_start = added;
+ uint32_t pkt_len;
+
+ if (likely(pktp + 1 != pktp_end))
+ rte_mbuf_prefetch_part1(pktp[1]);
+
+ if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) {
+ if (reap_done)
+ break;
+
+ /* Push already prepared descriptors before polling */
+ if (added != txq->added) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+ sfc_ef10_tx_reap(txq);
+ reap_done = true;
+ dma_desc_space = txq->max_fill_level -
+ (added - txq->completed);
+ if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space)
+ break;
+ }
+
+ pkt_len = m_seg->pkt_len;
+ do {
+ rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg);
+ unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
+ unsigned int id = added & txq->ptr_mask;
+
+ SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
+
+ pkt_len -= seg_len;
+
+ sfc_ef10_tx_qdesc_dma_create(seg_addr,
+ seg_len, (pkt_len == 0),
+ &txq->txq_hw_ring[id]);
+
+ /*
+ * rte_pktmbuf_free() is commonly used in DPDK for
+ * recycling packets - the function checks every
+ * segment's reference counter and returns the
+ * buffer to its pool whenever possible;
+ * nevertheless, freeing mbuf segments one by one
+ * may entail some performance decline;
+ * from this point, sfc_efx_tx_reap() does the same job
+ * on its own and frees buffers in bulks (all mbufs
+ * within a bulk belong to the same pool);
+ * from this perspective, individual segment pointers
+ * must be associated with the corresponding SW
+ * descriptors independently so that only one loop
+ * is sufficient on reap to inspect all the buffers
+ */
+ txq->sw_ring[id].mbuf = m_seg;
+
+ ++added;
+
+ } while ((m_seg = m_seg->next) != 0);
+
+ dma_desc_space -= (added - pkt_start);
+ }
+
+ if (likely(added != txq->added)) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+ if (!reap_done)
+ sfc_ef10_tx_reap(txq);
+#endif
+
+ return pktp - &tx_pkts[0];
+}
+
+static void
+sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
+{
+ const unsigned int old_read_ptr = txq->evq_read_ptr;
+ const unsigned int ptr_mask = txq->ptr_mask;
+ unsigned int completed = txq->completed;
+ unsigned int pending = completed;
+
+ pending += sfc_ef10_tx_process_events(txq);
+
+ if (pending != completed) {
+ struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
+ unsigned int nb = 0;
+
+ do {
+ struct sfc_ef10_tx_sw_desc *txd;
+
+ txd = &txq->sw_ring[completed & ptr_mask];
+
+ if (nb == RTE_DIM(bulk)) {
+ rte_mempool_put_bulk(bulk[0]->pool,
+ (void *)bulk, nb);
+ nb = 0;
+ }
+
+ bulk[nb++] = txd->mbuf;
+ } while (++completed != pending);
+
+ rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
+
+ txq->completed = completed;
+ }
+
+ sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
+ txq->evq_read_ptr);
+}
+
+
+static uint16_t
+sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
+ unsigned int ptr_mask;
+ unsigned int added;
+ unsigned int dma_desc_space;
+ bool reap_done;
+ struct rte_mbuf **pktp;
+ struct rte_mbuf **pktp_end;
+
+ if (unlikely(txq->flags &
+ (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
+ return 0;
+
+ ptr_mask = txq->ptr_mask;
+ added = txq->added;
+ dma_desc_space = txq->max_fill_level - (added - txq->completed);
+
+ reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
+ if (reap_done) {
+ sfc_ef10_simple_tx_reap(txq);
+ dma_desc_space = txq->max_fill_level - (added - txq->completed);
+ }
+
+ pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)];
+ for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) {
+ struct rte_mbuf *pkt = *pktp;
+ unsigned int id = added & ptr_mask;
+
+ SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
+ SFC_EF10_TX_DMA_DESC_LEN_MAX);
+
+ sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
+ rte_pktmbuf_data_len(pkt),
+ true, &txq->txq_hw_ring[id]);
+
+ txq->sw_ring[id].mbuf = pkt;
+
+ ++added;
+ }
+
+ if (likely(added != txq->added)) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+ if (!reap_done)
+ sfc_ef10_simple_tx_reap(txq);
+#endif
+
+ return pktp - &tx_pkts[0];
+}
+
+static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info;
+static void
+sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info)
+{
+ /*
+ * Number of descriptors just defines maximum number of pushed
+ * descriptors (fill level).
+ */
+ dev_info->tx_desc_lim.nb_min = 1;
+ dev_info->tx_desc_lim.nb_align = 1;
+}
+
+static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings;
+static int
+sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc,
+ unsigned int *txq_entries,
+ unsigned int *evq_entries,
+ unsigned int *txq_max_fill_level)
+{
+ /*
+ * rte_ethdev API guarantees that the number meets min, max and
+ * alignment requirements.
+ */
+ if (nb_tx_desc <= EFX_TXQ_MINNDESCS)
+ *txq_entries = EFX_TXQ_MINNDESCS;
+ else
+ *txq_entries = rte_align32pow2(nb_tx_desc);
+
+ *evq_entries = *txq_entries;
+
+ *txq_max_fill_level = RTE_MIN(nb_tx_desc,
+ SFC_EF10_TXQ_LIMIT(*evq_entries));
+ return 0;
+}
+
+static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
+static int
+sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp)
+{
+ struct sfc_ef10_txq *txq;
+ int rc;
+
+ rc = EINVAL;
+ if (info->txq_entries != info->evq_entries)
+ goto fail_bad_args;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
+ info->txq_entries,
+ sizeof(*txq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL)
+ goto fail_sw_ring_alloc;
+
+ txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
+ txq->ptr_mask = info->txq_entries - 1;
+ txq->max_fill_level = info->max_fill_level;
+ txq->free_thresh = info->free_thresh;
+ txq->txq_hw_ring = info->txq_hw_ring;
+ txq->doorbell = (volatile uint8_t *)info->mem_bar +
+ ER_DZ_TX_DESC_UPD_REG_OFST +
+ (info->hw_index << info->vi_window_shift);
+ txq->evq_hw_ring = info->evq_hw_ring;
+
+ *dp_txqp = &txq->dp;
+ return 0;
+
+fail_sw_ring_alloc:
+ rte_free(txq);
+
+fail_txq_alloc:
+fail_bad_args:
+ return rc;
+}
+
+static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
+static void
+sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+}
+
+static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
+static int
+sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
+ unsigned int txq_desc_index)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ txq->evq_read_ptr = evq_read_ptr;
+ txq->added = txq->completed = txq_desc_index;
+
+ txq->flags |= SFC_EF10_TXQ_STARTED;
+ txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
+
+ return 0;
+}
+
+static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
+static void
+sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
+
+ *evq_read_ptr = txq->evq_read_ptr;
+}
+
+static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
+static bool
+sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
+{
+ __rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
+
+ /*
+ * It is safe to ignore Tx event since we reap all mbufs on
+ * queue purge anyway.
+ */
+
+ return false;
+}
+
+static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
+static void
+sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+ unsigned int completed;
+
+ for (completed = txq->completed; completed != txq->added; ++completed) {
+ struct sfc_ef10_tx_sw_desc *txd;
+
+ txd = &txq->sw_ring[completed & txq->ptr_mask];
+ if (txd->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txd->mbuf);
+ txd->mbuf = NULL;
+ }
+ }
+
+ txq->flags &= ~SFC_EF10_TXQ_STARTED;
+}
+
+static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status;
+static int
+sfc_ef10_tx_qdesc_status(__rte_unused struct sfc_dp_txq *dp_txq,
+ __rte_unused uint16_t offset)
+{
+ return -ENOTSUP;
+}
+
+struct sfc_dp_tx sfc_ef10_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10,
+ .type = SFC_DP_TX,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
+ },
+ .features = SFC_DP_TX_FEAT_MULTI_SEG |
+ SFC_DP_TX_FEAT_MULTI_POOL |
+ SFC_DP_TX_FEAT_REFCNT |
+ SFC_DP_TX_FEAT_MULTI_PROCESS,
+ .get_dev_info = sfc_ef10_get_dev_info,
+ .qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
+ .qcreate = sfc_ef10_tx_qcreate,
+ .qdestroy = sfc_ef10_tx_qdestroy,
+ .qstart = sfc_ef10_tx_qstart,
+ .qtx_ev = sfc_ef10_tx_qtx_ev,
+ .qstop = sfc_ef10_tx_qstop,
+ .qreap = sfc_ef10_tx_qreap,
+ .qdesc_status = sfc_ef10_tx_qdesc_status,
+ .pkt_burst = sfc_ef10_xmit_pkts,
+};
+
+struct sfc_dp_tx sfc_ef10_simple_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10_SIMPLE,
+ .type = SFC_DP_TX,
+ },
+ .features = SFC_DP_TX_FEAT_MULTI_PROCESS,
+ .get_dev_info = sfc_ef10_get_dev_info,
+ .qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
+ .qcreate = sfc_ef10_tx_qcreate,
+ .qdestroy = sfc_ef10_tx_qdestroy,
+ .qstart = sfc_ef10_tx_qstart,
+ .qtx_ev = sfc_ef10_tx_qtx_ev,
+ .qstop = sfc_ef10_tx_qstop,
+ .qreap = sfc_ef10_tx_qreap,
+ .qdesc_status = sfc_ef10_tx_qdesc_status,
+ .pkt_burst = sfc_ef10_simple_xmit_pkts,
+};
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ethdev.c b/src/spdk/dpdk/drivers/net/sfc/sfc_ethdev.c
new file mode 100644
index 00000000..9decbf5a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ethdev.c
@@ -0,0 +1,2102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <rte_dev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_kvargs.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_tx.h"
+#include "sfc_flow.h"
+#include "sfc_dp.h"
+#include "sfc_dp_rx.h"
+
+uint32_t sfc_logtype_driver;
+
+static struct sfc_dp_list sfc_dp_head =
+ TAILQ_HEAD_INITIALIZER(sfc_dp_head);
+
+static int
+sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ efx_nic_fw_info_t enfi;
+ int ret;
+ int rc;
+
+ /*
+ * Return value of the callback is likely supposed to be
+ * equal to or greater than 0, nevertheless, if an error
+ * occurs, it will be desirable to pass it to the caller
+ */
+ if ((fw_version == NULL) || (fw_size == 0))
+ return -EINVAL;
+
+ rc = efx_nic_get_fw_version(sa->nic, &enfi);
+ if (rc != 0)
+ return -rc;
+
+ ret = snprintf(fw_version, fw_size,
+ "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
+ enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
+ enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
+ if (ret < 0)
+ return ret;
+
+ if (enfi.enfi_dpcpu_fw_ids_valid) {
+ size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
+ int ret_extra;
+
+ ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
+ fw_size - dpcpu_fw_ids_offset,
+ " rx%" PRIx16 " tx%" PRIx16,
+ enfi.enfi_rx_dpcpu_fw_id,
+ enfi.enfi_tx_dpcpu_fw_id);
+ if (ret_extra < 0)
+ return ret_extra;
+
+ ret += ret_extra;
+ }
+
+ if (fw_size < (size_t)(++ret))
+ return ret;
+ else
+ return 0;
+}
+
+static void
+sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
+ uint64_t txq_offloads_def = 0;
+
+ sfc_log_init(sa, "entry");
+
+ dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
+
+ /* Autonegotiation may be disabled */
+ dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_1G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_25000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_25G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_50000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_100000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+
+ dev_info->max_rx_queues = sa->rxq_max;
+ dev_info->max_tx_queues = sa->txq_max;
+
+ /* By default packets are dropped if no descriptors are available */
+ dev_info->default_rxconf.rx_drop_en = 1;
+
+ dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa);
+
+ /*
+ * rx_offload_capa includes both device and queue offloads since
+ * the latter may be requested on a per device basis which makes
+ * sense when some offloads are needed to be set on all queues.
+ */
+ dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) |
+ dev_info->rx_queue_offload_capa;
+
+ dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa);
+
+ /*
+ * tx_offload_capa includes both device and queue offloads since
+ * the latter may be requested on a per device basis which makes
+ * sense when some offloads are needed to be set on all queues.
+ */
+ dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
+ dev_info->tx_queue_offload_capa;
+
+ if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ dev_info->default_txconf.offloads |= txq_offloads_def;
+
+ if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) {
+ uint64_t rte_hf = 0;
+ unsigned int i;
+
+ for (i = 0; i < rss->hf_map_nb_entries; ++i)
+ rte_hf |= rss->hf_map[i].rte;
+
+ dev_info->reta_size = EFX_RSS_TBL_SIZE;
+ dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
+ dev_info->flow_type_rss_offloads = rte_hf;
+ }
+
+ /* Initialize to hardware limits */
+ dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
+ dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
+ /* The RXQ hardware requires that the descriptor count is a power
+ * of 2, but rx_desc_lim cannot properly describe that constraint.
+ */
+ dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
+
+ /* Initialize to hardware limits */
+ dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
+ dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
+ /*
+ * The TXQ hardware requires that the descriptor count is a power
+ * of 2, but tx_desc_lim cannot properly describe that constraint
+ */
+ dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
+
+ if (sa->dp_rx->get_dev_info != NULL)
+ sa->dp_rx->get_dev_info(dev_info);
+ if (sa->dp_tx->get_dev_info != NULL)
+ sa->dp_tx->get_dev_info(dev_info);
+}
+
+static const uint32_t *
+sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint32_t tunnel_encaps = encp->enc_tunnel_encapsulations_supported;
+
+ return sa->dp_rx->supported_ptypes_get(tunnel_encaps);
+}
+
+static int
+sfc_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *dev_data = dev->data;
+ struct sfc_adapter *sa = dev_data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
+ dev_data->nb_rx_queues, dev_data->nb_tx_queues);
+
+ sfc_adapter_lock(sa);
+ switch (sa->state) {
+ case SFC_ADAPTER_CONFIGURED:
+ /* FALLTHROUGH */
+ case SFC_ADAPTER_INITIALIZED:
+ rc = sfc_configure(sa);
+ break;
+ default:
+ sfc_err(sa, "unexpected adapter state %u to configure",
+ sa->state);
+ rc = EINVAL;
+ break;
+ }
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done %d", rc);
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_start(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ rc = sfc_start(sa);
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done %d", rc);
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_eth_link current_link;
+ int ret;
+
+ sfc_log_init(sa, "entry");
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &current_link);
+ } else if (wait_to_complete) {
+ efx_link_mode_t link_mode;
+
+ if (efx_port_poll(sa->nic, &link_mode) != 0)
+ link_mode = EFX_LINK_UNKNOWN;
+ sfc_port_link_mode_to_info(link_mode, &current_link);
+
+ } else {
+ sfc_ev_mgmt_qpoll(sa);
+ rte_eth_linkstatus_get(dev, &current_link);
+ }
+
+ ret = rte_eth_linkstatus_set(dev, &current_link);
+ if (ret == 0)
+ sfc_notice(sa, "Link status is %s",
+ current_link.link_status ? "UP" : "DOWN");
+
+ return ret;
+}
+
+static void
+sfc_dev_stop(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ sfc_stop(sa);
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+}
+
+static int
+sfc_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ rc = sfc_start(sa);
+ sfc_adapter_unlock(sa);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ sfc_stop(sa);
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static void
+sfc_dev_close(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ switch (sa->state) {
+ case SFC_ADAPTER_STARTED:
+ sfc_stop(sa);
+ SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
+ /* FALLTHROUGH */
+ case SFC_ADAPTER_CONFIGURED:
+ sfc_close(sa);
+ SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
+ /* FALLTHROUGH */
+ case SFC_ADAPTER_INITIALIZED:
+ break;
+ default:
+ sfc_err(sa, "unexpected adapter state %u on close", sa->state);
+ break;
+ }
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+}
+
+static void
+sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
+ boolean_t enabled)
+{
+ struct sfc_port *port;
+ boolean_t *toggle;
+ struct sfc_adapter *sa = dev->data->dev_private;
+ boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
+ const char *desc = (allmulti) ? "all-multi" : "promiscuous";
+
+ sfc_adapter_lock(sa);
+
+ port = &sa->port;
+ toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
+
+ if (*toggle != enabled) {
+ *toggle = enabled;
+
+ if (port->isolated) {
+ sfc_warn(sa, "isolated mode is active on the port");
+ sfc_warn(sa, "the change is to be applied on the next "
+ "start provided that isolated mode is "
+ "disabled prior the next start");
+ } else if ((sa->state == SFC_ADAPTER_STARTED) &&
+ (sfc_set_rx_mode(sa) != 0)) {
+ *toggle = !(enabled);
+ sfc_warn(sa, "Failed to %s %s mode",
+ ((enabled) ? "enable" : "disable"), desc);
+ }
+ }
+
+ sfc_adapter_unlock(sa);
+}
+
+static void
+sfc_dev_promisc_enable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
+}
+
+static void
+sfc_dev_promisc_disable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
+}
+
+static void
+sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
+}
+
+static void
+sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
+}
+
+static int
+sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
+ rx_queue_id, nb_rx_desc, socket_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
+ rx_conf, mb_pool);
+ if (rc != 0)
+ goto fail_rx_qinit;
+
+ dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq->dp;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_rx_qinit:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static void
+sfc_rx_queue_release(void *queue)
+{
+ struct sfc_dp_rxq *dp_rxq = queue;
+ struct sfc_rxq *rxq;
+ struct sfc_adapter *sa;
+ unsigned int sw_index;
+
+ if (dp_rxq == NULL)
+ return;
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ sa = rxq->evq->sa;
+ sfc_adapter_lock(sa);
+
+ sw_index = sfc_rxq_sw_index(rxq);
+
+ sfc_log_init(sa, "RxQ=%u", sw_index);
+
+ sa->eth_dev->data->rx_queues[sw_index] = NULL;
+
+ sfc_rx_qfini(sa, sw_index);
+
+ sfc_adapter_unlock(sa);
+}
+
+static int
+sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
+ tx_queue_id, nb_tx_desc, socket_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+ if (rc != 0)
+ goto fail_tx_qinit;
+
+ dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq->dp;
+
+ sfc_adapter_unlock(sa);
+ return 0;
+
+fail_tx_qinit:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static void
+sfc_tx_queue_release(void *queue)
+{
+ struct sfc_dp_txq *dp_txq = queue;
+ struct sfc_txq *txq;
+ unsigned int sw_index;
+ struct sfc_adapter *sa;
+
+ if (dp_txq == NULL)
+ return;
+
+ txq = sfc_txq_by_dp_txq(dp_txq);
+ sw_index = sfc_txq_sw_index(txq);
+
+ SFC_ASSERT(txq->evq != NULL);
+ sa = txq->evq->sa;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ sfc_adapter_lock(sa);
+
+ SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
+ sa->eth_dev->data->tx_queues[sw_index] = NULL;
+
+ sfc_tx_qfini(sa, sw_index);
+
+ sfc_adapter_unlock(sa);
+}
+
+static int
+sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint64_t *mac_stats;
+ int ret;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+
+ ret = sfc_port_update_mac_stats(sa);
+ if (ret != 0)
+ goto unlock;
+
+ mac_stats = port->mac_stats_buf;
+
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
+ EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
+ stats->ipackets =
+ mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
+ stats->opackets =
+ mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
+ stats->ibytes =
+ mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
+ stats->obytes =
+ mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
+ stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW];
+ stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
+ stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
+ } else {
+ stats->ipackets = mac_stats[EFX_MAC_RX_PKTS];
+ stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
+ stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
+ stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
+ /*
+ * Take into account stats which are whenever supported
+ * on EF10. If some stat is not supported by current
+ * firmware variant or HW revision, it is guaranteed
+ * to be zero in mac_stats.
+ */
+ stats->imissed =
+ mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
+ mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
+ mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
+ mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
+ mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
+ mac_stats[EFX_MAC_PM_TRUNC_QBB] +
+ mac_stats[EFX_MAC_PM_DISCARD_QBB] +
+ mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
+ mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
+ mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
+ stats->ierrors =
+ mac_stats[EFX_MAC_RX_FCS_ERRORS] +
+ mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
+ mac_stats[EFX_MAC_RX_JABBER_PKTS];
+ /* no oerrors counters supported on EF10 */
+ }
+
+unlock:
+ rte_spinlock_unlock(&port->mac_stats_lock);
+ SFC_ASSERT(ret >= 0);
+ return -ret;
+}
+
+static void
+sfc_stats_reset(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ /*
+ * The operation cannot be done if port is not started; it
+ * will be scheduled to be done during the next port start
+ */
+ port->mac_stats_reset_pending = B_TRUE;
+ return;
+ }
+
+ rc = sfc_port_reset_mac_stats(sa);
+ if (rc != 0)
+ sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
+}
+
+static int
+sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int xstats_count)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint64_t *mac_stats;
+ int rc;
+ unsigned int i;
+ int nstats = 0;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+
+ rc = sfc_port_update_mac_stats(sa);
+ if (rc != 0) {
+ SFC_ASSERT(rc > 0);
+ nstats = -rc;
+ goto unlock;
+ }
+
+ mac_stats = port->mac_stats_buf;
+
+ for (i = 0; i < EFX_MAC_NSTATS; ++i) {
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
+ if (xstats != NULL && nstats < (int)xstats_count) {
+ xstats[nstats].id = nstats;
+ xstats[nstats].value = mac_stats[i];
+ }
+ nstats++;
+ }
+ }
+
+unlock:
+ rte_spinlock_unlock(&port->mac_stats_lock);
+
+ return nstats;
+}
+
+static int
+sfc_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int xstats_count)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ unsigned int i;
+ unsigned int nstats = 0;
+
+ for (i = 0; i < EFX_MAC_NSTATS; ++i) {
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
+ if (xstats_names != NULL && nstats < xstats_count)
+ strlcpy(xstats_names[nstats].name,
+ efx_mac_stat_name(sa->nic, i),
+ sizeof(xstats_names[0].name));
+ nstats++;
+ }
+ }
+
+ return nstats;
+}
+
+static int
+sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint64_t *mac_stats;
+ unsigned int nb_supported = 0;
+ unsigned int nb_written = 0;
+ unsigned int i;
+ int ret;
+ int rc;
+
+ if (unlikely(values == NULL) ||
+ unlikely((ids == NULL) && (n < port->mac_stats_nb_supported)))
+ return port->mac_stats_nb_supported;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+
+ rc = sfc_port_update_mac_stats(sa);
+ if (rc != 0) {
+ SFC_ASSERT(rc > 0);
+ ret = -rc;
+ goto unlock;
+ }
+
+ mac_stats = port->mac_stats_buf;
+
+ for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) {
+ if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
+ continue;
+
+ if ((ids == NULL) || (ids[nb_written] == nb_supported))
+ values[nb_written++] = mac_stats[i];
+
+ ++nb_supported;
+ }
+
+ ret = nb_written;
+
+unlock:
+ rte_spinlock_unlock(&port->mac_stats_lock);
+
+ return ret;
+}
+
+static int
+sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids, unsigned int size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ unsigned int nb_supported = 0;
+ unsigned int nb_written = 0;
+ unsigned int i;
+
+ if (unlikely(xstats_names == NULL) ||
+ unlikely((ids == NULL) && (size < port->mac_stats_nb_supported)))
+ return port->mac_stats_nb_supported;
+
+ for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) {
+ if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
+ continue;
+
+ if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
+ char *name = xstats_names[nb_written++].name;
+
+ strlcpy(name, efx_mac_stat_name(sa->nic, i),
+ sizeof(xstats_names[0].name));
+ }
+
+ ++nb_supported;
+ }
+
+ return nb_written;
+}
+
+static int
+sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int wanted_fc, link_fc;
+
+ memset(fc_conf, 0, sizeof(*fc_conf));
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state == SFC_ADAPTER_STARTED)
+ efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
+ else
+ link_fc = sa->port.flow_ctrl;
+
+ switch (link_fc) {
+ case 0:
+ fc_conf->mode = RTE_FC_NONE;
+ break;
+ case EFX_FCNTL_RESPOND:
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ break;
+ case EFX_FCNTL_GENERATE:
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ break;
+ case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
+ fc_conf->mode = RTE_FC_FULL;
+ break;
+ default:
+ sfc_err(sa, "%s: unexpected flow control value %#x",
+ __func__, link_fc);
+ }
+
+ fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ unsigned int fcntl;
+ int rc;
+
+ if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
+ fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
+ fc_conf->mac_ctrl_frame_fwd != 0) {
+ sfc_err(sa, "unsupported flow control settings specified");
+ rc = EINVAL;
+ goto fail_inval;
+ }
+
+ switch (fc_conf->mode) {
+ case RTE_FC_NONE:
+ fcntl = 0;
+ break;
+ case RTE_FC_RX_PAUSE:
+ fcntl = EFX_FCNTL_RESPOND;
+ break;
+ case RTE_FC_TX_PAUSE:
+ fcntl = EFX_FCNTL_GENERATE;
+ break;
+ case RTE_FC_FULL:
+ fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail_inval;
+ }
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
+ if (rc != 0)
+ goto fail_mac_fcntl_set;
+ }
+
+ port->flow_ctrl = fcntl;
+ port->flow_ctrl_autoneg = fc_conf->autoneg;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_mac_fcntl_set:
+ sfc_adapter_unlock(sa);
+fail_inval:
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ size_t pdu = EFX_MAC_PDU(mtu);
+ size_t old_pdu;
+ int rc;
+
+ sfc_log_init(sa, "mtu=%u", mtu);
+
+ rc = EINVAL;
+ if (pdu < EFX_MAC_PDU_MIN) {
+ sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
+ (unsigned int)mtu, (unsigned int)pdu,
+ EFX_MAC_PDU_MIN);
+ goto fail_inval;
+ }
+ if (pdu > EFX_MAC_PDU_MAX) {
+ sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
+ (unsigned int)mtu, (unsigned int)pdu,
+ EFX_MAC_PDU_MAX);
+ goto fail_inval;
+ }
+
+ sfc_adapter_lock(sa);
+
+ if (pdu != sa->port.pdu) {
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ sfc_stop(sa);
+
+ old_pdu = sa->port.pdu;
+ sa->port.pdu = pdu;
+ rc = sfc_start(sa);
+ if (rc != 0)
+ goto fail_start;
+ } else {
+ sa->port.pdu = pdu;
+ }
+ }
+
+ /*
+ * The driver does not use it, but other PMDs update jumbo frame
+ * flag and max_rx_pkt_len when MTU is set.
+ */
+ if (mtu > ETHER_MAX_LEN) {
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ }
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
+
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_start:
+ sa->port.pdu = old_pdu;
+ if (sfc_start(sa) != 0)
+ sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
+ "PDU max size - port is stopped",
+ (unsigned int)pdu, (unsigned int)old_pdu);
+ sfc_adapter_unlock(sa);
+
+fail_inval:
+ sfc_log_init(sa, "failed %d", rc);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+static int
+sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_port *port = &sa->port;
+ struct ether_addr *old_addr = &dev->data->mac_addrs[0];
+ int rc = 0;
+
+ sfc_adapter_lock(sa);
+
+ /*
+ * Copy the address to the device private data so that
+ * it could be recalled in the case of adapter restart.
+ */
+ ether_addr_copy(mac_addr, &port->default_mac_addr);
+
+ /*
+ * Neither of the two following checks can return
+ * an error. The new MAC address is preserved in
+ * the device private data and can be activated
+ * on the next port start if the user prevents
+ * isolated mode from being enabled.
+ */
+ if (port->isolated) {
+ sfc_warn(sa, "isolated mode is active on the port");
+ sfc_warn(sa, "will not set MAC address");
+ goto unlock;
+ }
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ sfc_notice(sa, "the port is not started");
+ sfc_notice(sa, "the new MAC address will be set on port start");
+
+ goto unlock;
+ }
+
+ if (encp->enc_allow_set_mac_with_installed_filters) {
+ rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
+ if (rc != 0) {
+ sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
+ goto unlock;
+ }
+
+ /*
+ * Changing the MAC address by means of MCDI request
+ * has no effect on received traffic, therefore
+ * we also need to update unicast filters
+ */
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0) {
+ sfc_err(sa, "cannot set filter (rc = %u)", rc);
+ /* Rollback the old address */
+ (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes);
+ (void)sfc_set_rx_mode(sa);
+ }
+ } else {
+ sfc_warn(sa, "cannot set MAC address with filters installed");
+ sfc_warn(sa, "adapter will be restarted to pick the new MAC");
+ sfc_warn(sa, "(some traffic may be dropped)");
+
+ /*
+ * Since setting MAC address with filters installed is not
+ * allowed on the adapter, the new MAC address will be set
+ * by means of adapter restart. sfc_start() shall retrieve
+ * the new address from the device private data and set it.
+ */
+ sfc_stop(sa);
+ rc = sfc_start(sa);
+ if (rc != 0)
+ sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
+ }
+
+unlock:
+ if (rc != 0)
+ ether_addr_copy(old_addr, &port->default_mac_addr);
+
+ sfc_adapter_unlock(sa);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+
+static int
+sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint8_t *mc_addrs = port->mcast_addrs;
+ int rc;
+ unsigned int i;
+
+ if (port->isolated) {
+ sfc_err(sa, "isolated mode is active on the port");
+ sfc_err(sa, "will not set multicast address list");
+ return -ENOTSUP;
+ }
+
+ if (mc_addrs == NULL)
+ return -ENOBUFS;
+
+ if (nb_mc_addr > port->max_mcast_addrs) {
+ sfc_err(sa, "too many multicast addresses: %u > %u",
+ nb_mc_addr, port->max_mcast_addrs);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_mc_addr; ++i) {
+ rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
+ EFX_MAC_ADDR_LEN);
+ mc_addrs += EFX_MAC_ADDR_LEN;
+ }
+
+ port->nb_mcast_addrs = nb_mc_addr;
+
+ if (sa->state != SFC_ADAPTER_STARTED)
+ return 0;
+
+ rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
+ port->nb_mcast_addrs);
+ if (rc != 0)
+ sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+/*
+ * The function is used by the secondary process as well. It must not
+ * use any process-local pointers from the adapter data.
+ */
+static void
+sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+
+ sfc_adapter_lock(sa);
+
+ SFC_ASSERT(rx_queue_id < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[rx_queue_id];
+ rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq != NULL);
+
+ qinfo->mp = rxq->refill_mb_pool;
+ qinfo->conf.rx_free_thresh = rxq->refill_threshold;
+ qinfo->conf.rx_drop_en = 1;
+ qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
+ qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
+ if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
+ qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
+ qinfo->scattered_rx = 1;
+ }
+ qinfo->nb_desc = rxq_info->entries;
+
+ sfc_adapter_unlock(sa);
+}
+
+/*
+ * The function is used by the secondary process as well. It must not
+ * use any process-local pointers from the adapter data.
+ */
+static void
+sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_txq_info *txq_info;
+
+ sfc_adapter_lock(sa);
+
+ SFC_ASSERT(tx_queue_id < sa->txq_count);
+
+ txq_info = &sa->txq_info[tx_queue_id];
+ SFC_ASSERT(txq_info->txq != NULL);
+
+ memset(qinfo, 0, sizeof(*qinfo));
+
+ qinfo->conf.offloads = txq_info->txq->offloads;
+ qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
+ qinfo->conf.tx_deferred_start = txq_info->deferred_start;
+ qinfo->nb_desc = txq_info->entries;
+
+ sfc_adapter_unlock(sa);
+}
+
+static uint32_t
+sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+
+ return sfc_rx_qdesc_npending(sa, rx_queue_id);
+}
+
+static int
+sfc_rx_descriptor_done(void *queue, uint16_t offset)
+{
+ struct sfc_dp_rxq *dp_rxq = queue;
+
+ return sfc_rx_qdesc_done(dp_rxq, offset);
+}
+
+static int
+sfc_rx_descriptor_status(void *queue, uint16_t offset)
+{
+ struct sfc_dp_rxq *dp_rxq = queue;
+ struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ return rxq->evq->sa->dp_rx->qdesc_status(dp_rxq, offset);
+}
+
+static int
+sfc_tx_descriptor_status(void *queue, uint16_t offset)
+{
+ struct sfc_dp_txq *dp_txq = queue;
+ struct sfc_txq *txq = sfc_txq_by_dp_txq(dp_txq);
+
+ return txq->evq->sa->dp_tx->qdesc_status(dp_txq, offset);
+}
+
+static int
+sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = EINVAL;
+ if (sa->state != SFC_ADAPTER_STARTED)
+ goto fail_not_started;
+
+ rc = sfc_rx_qstart(sa, rx_queue_id);
+ if (rc != 0)
+ goto fail_rx_qstart;
+
+ sa->rxq_info[rx_queue_id].deferred_started = B_TRUE;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_rx_qstart:
+fail_not_started:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+
+ sfc_adapter_lock(sa);
+ sfc_rx_qstop(sa, rx_queue_id);
+
+ sa->rxq_info[rx_queue_id].deferred_started = B_FALSE;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "TxQ = %u", tx_queue_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = EINVAL;
+ if (sa->state != SFC_ADAPTER_STARTED)
+ goto fail_not_started;
+
+ rc = sfc_tx_qstart(sa, tx_queue_id);
+ if (rc != 0)
+ goto fail_tx_qstart;
+
+ sa->txq_info[tx_queue_id].deferred_started = B_TRUE;
+
+ sfc_adapter_unlock(sa);
+ return 0;
+
+fail_tx_qstart:
+
+fail_not_started:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "TxQ = %u", tx_queue_id);
+
+ sfc_adapter_lock(sa);
+
+ sfc_tx_qstop(sa, tx_queue_id);
+
+ sa->txq_info[tx_queue_id].deferred_started = B_FALSE;
+
+ sfc_adapter_unlock(sa);
+ return 0;
+}
+
+static efx_tunnel_protocol_t
+sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
+{
+ switch (rte_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ return EFX_TUNNEL_PROTOCOL_VXLAN;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ return EFX_TUNNEL_PROTOCOL_GENEVE;
+ default:
+ return EFX_TUNNEL_NPROTOS;
+ }
+}
+
+enum sfc_udp_tunnel_op_e {
+ SFC_UDP_TUNNEL_ADD_PORT,
+ SFC_UDP_TUNNEL_DEL_PORT,
+};
+
+static int
+sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *tunnel_udp,
+ enum sfc_udp_tunnel_op_e op)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ efx_tunnel_protocol_t tunnel_proto;
+ int rc;
+
+ sfc_log_init(sa, "%s udp_port=%u prot_type=%u",
+ (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" :
+ (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown",
+ tunnel_udp->udp_port, tunnel_udp->prot_type);
+
+ tunnel_proto =
+ sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type);
+ if (tunnel_proto >= EFX_TUNNEL_NPROTOS) {
+ rc = ENOTSUP;
+ goto fail_bad_proto;
+ }
+
+ sfc_adapter_lock(sa);
+
+ switch (op) {
+ case SFC_UDP_TUNNEL_ADD_PORT:
+ rc = efx_tunnel_config_udp_add(sa->nic,
+ tunnel_udp->udp_port,
+ tunnel_proto);
+ break;
+ case SFC_UDP_TUNNEL_DEL_PORT:
+ rc = efx_tunnel_config_udp_remove(sa->nic,
+ tunnel_udp->udp_port,
+ tunnel_proto);
+ break;
+ default:
+ rc = EINVAL;
+ goto fail_bad_op;
+ }
+
+ if (rc != 0)
+ goto fail_op;
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_tunnel_reconfigure(sa->nic);
+ if (rc == EAGAIN) {
+ /*
+ * Configuration is accepted by FW and MC reboot
+ * is initiated to apply the changes. MC reboot
+ * will be handled in a usual way (MC reboot
+ * event on management event queue and adapter
+ * restart).
+ */
+ rc = 0;
+ } else if (rc != 0) {
+ goto fail_reconfigure;
+ }
+ }
+
+ sfc_adapter_unlock(sa);
+ return 0;
+
+fail_reconfigure:
+ /* Remove/restore entry since the change makes the trouble */
+ switch (op) {
+ case SFC_UDP_TUNNEL_ADD_PORT:
+ (void)efx_tunnel_config_udp_remove(sa->nic,
+ tunnel_udp->udp_port,
+ tunnel_proto);
+ break;
+ case SFC_UDP_TUNNEL_DEL_PORT:
+ (void)efx_tunnel_config_udp_add(sa->nic,
+ tunnel_udp->udp_port,
+ tunnel_proto);
+ break;
+ }
+
+fail_op:
+fail_bad_op:
+ sfc_adapter_unlock(sa);
+
+fail_bad_proto:
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT);
+}
+
+static int
+sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT);
+}
+
+static int
+sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
+ struct sfc_port *port = &sa->port;
+
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated)
+ return -ENOTSUP;
+
+ if (rss->channels == 0)
+ return -EINVAL;
+
+ sfc_adapter_lock(sa);
+
+ /*
+ * Mapping of hash configuration between RTE and EFX is not one-to-one,
+ * hence, conversion is done here to derive a correct set of ETH_RSS
+ * flags which corresponds to the active EFX configuration stored
+ * locally in 'sfc_adapter' and kept up-to-date
+ */
+ rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(sa, rss->hash_types);
+ rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
+ if (rss_conf->rss_key != NULL)
+ rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE);
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
+ struct sfc_port *port = &sa->port;
+ unsigned int efx_hash_types;
+ int rc = 0;
+
+ if (port->isolated)
+ return -ENOTSUP;
+
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
+ sfc_err(sa, "RSS is not available");
+ return -ENOTSUP;
+ }
+
+ if (rss->channels == 0) {
+ sfc_err(sa, "RSS is not configured");
+ return -EINVAL;
+ }
+
+ if ((rss_conf->rss_key != NULL) &&
+ (rss_conf->rss_key_len != sizeof(rss->key))) {
+ sfc_err(sa, "RSS key size is wrong (should be %lu)",
+ sizeof(rss->key));
+ return -EINVAL;
+ }
+
+ sfc_adapter_lock(sa);
+
+ rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types);
+ if (rc != 0)
+ goto fail_rx_hf_rte_to_efx;
+
+ rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ rss->hash_alg, efx_hash_types, B_TRUE);
+ if (rc != 0)
+ goto fail_scale_mode_set;
+
+ if (rss_conf->rss_key != NULL) {
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_rx_scale_key_set(sa->nic,
+ EFX_RSS_CONTEXT_DEFAULT,
+ rss_conf->rss_key,
+ sizeof(rss->key));
+ if (rc != 0)
+ goto fail_scale_key_set;
+ }
+
+ rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key));
+ }
+
+ rss->hash_types = efx_hash_types;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_scale_key_set:
+ if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ EFX_RX_HASHALG_TOEPLITZ,
+ rss->hash_types, B_TRUE) != 0)
+ sfc_err(sa, "failed to restore RSS mode");
+
+fail_scale_mode_set:
+fail_rx_hf_rte_to_efx:
+ sfc_adapter_unlock(sa);
+ return -rc;
+}
+
+static int
+sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
+ struct sfc_port *port = &sa->port;
+ int entry;
+
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated)
+ return -ENOTSUP;
+
+ if (rss->channels == 0)
+ return -EINVAL;
+
+ if (reta_size != EFX_RSS_TBL_SIZE)
+ return -EINVAL;
+
+ sfc_adapter_lock(sa);
+
+ for (entry = 0; entry < reta_size; entry++) {
+ int grp = entry / RTE_RETA_GROUP_SIZE;
+ int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+
+ if ((reta_conf[grp].mask >> grp_idx) & 1)
+ reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
+ }
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
+ struct sfc_port *port = &sa->port;
+ unsigned int *rss_tbl_new;
+ uint16_t entry;
+ int rc = 0;
+
+
+ if (port->isolated)
+ return -ENOTSUP;
+
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
+ sfc_err(sa, "RSS is not available");
+ return -ENOTSUP;
+ }
+
+ if (rss->channels == 0) {
+ sfc_err(sa, "RSS is not configured");
+ return -EINVAL;
+ }
+
+ if (reta_size != EFX_RSS_TBL_SIZE) {
+ sfc_err(sa, "RETA size is wrong (should be %u)",
+ EFX_RSS_TBL_SIZE);
+ return -EINVAL;
+ }
+
+ rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0);
+ if (rss_tbl_new == NULL)
+ return -ENOMEM;
+
+ sfc_adapter_lock(sa);
+
+ rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
+
+ for (entry = 0; entry < reta_size; entry++) {
+ int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+ struct rte_eth_rss_reta_entry64 *grp;
+
+ grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
+
+ if (grp->mask & (1ull << grp_idx)) {
+ if (grp->reta[grp_idx] >= rss->channels) {
+ rc = EINVAL;
+ goto bad_reta_entry;
+ }
+ rss_tbl_new[entry] = grp->reta[grp_idx];
+ }
+ }
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ rss_tbl_new, EFX_RSS_TBL_SIZE);
+ if (rc != 0)
+ goto fail_scale_tbl_set;
+ }
+
+ rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl));
+
+fail_scale_tbl_set:
+bad_reta_entry:
+ sfc_adapter_unlock(sa);
+
+ rte_free(rss_tbl_new);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc = ENOTSUP;
+
+ sfc_log_init(sa, "entry");
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NONE:
+ sfc_err(sa, "Global filters configuration not supported");
+ break;
+ case RTE_ETH_FILTER_MACVLAN:
+ sfc_err(sa, "MACVLAN filters not supported");
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ sfc_err(sa, "EtherType filters not supported");
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ sfc_err(sa, "Flexible filters not supported");
+ break;
+ case RTE_ETH_FILTER_SYN:
+ sfc_err(sa, "SYN filters not supported");
+ break;
+ case RTE_ETH_FILTER_NTUPLE:
+ sfc_err(sa, "NTUPLE filters not supported");
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ sfc_err(sa, "Tunnel filters not supported");
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ sfc_err(sa, "Flow Director filters not supported");
+ break;
+ case RTE_ETH_FILTER_HASH:
+ sfc_err(sa, "Hash filters not supported");
+ break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET) {
+ rc = EINVAL;
+ } else {
+ *(const void **)arg = &sfc_flow_ops;
+ rc = 0;
+ }
+ break;
+ default:
+ sfc_err(sa, "Unknown filter type %u", filter_type);
+ break;
+ }
+
+ sfc_log_init(sa, "exit: %d", -rc);
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ /*
+ * If Rx datapath does not provide callback to check mempool,
+ * all pools are supported.
+ */
+ if (sa->dp_rx->pool_ops_supported == NULL)
+ return 1;
+
+ return sa->dp_rx->pool_ops_supported(pool);
+}
+
+static const struct eth_dev_ops sfc_eth_dev_ops = {
+ .dev_configure = sfc_dev_configure,
+ .dev_start = sfc_dev_start,
+ .dev_stop = sfc_dev_stop,
+ .dev_set_link_up = sfc_dev_set_link_up,
+ .dev_set_link_down = sfc_dev_set_link_down,
+ .dev_close = sfc_dev_close,
+ .promiscuous_enable = sfc_dev_promisc_enable,
+ .promiscuous_disable = sfc_dev_promisc_disable,
+ .allmulticast_enable = sfc_dev_allmulti_enable,
+ .allmulticast_disable = sfc_dev_allmulti_disable,
+ .link_update = sfc_dev_link_update,
+ .stats_get = sfc_stats_get,
+ .stats_reset = sfc_stats_reset,
+ .xstats_get = sfc_xstats_get,
+ .xstats_reset = sfc_stats_reset,
+ .xstats_get_names = sfc_xstats_get_names,
+ .dev_infos_get = sfc_dev_infos_get,
+ .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
+ .mtu_set = sfc_dev_set_mtu,
+ .rx_queue_start = sfc_rx_queue_start,
+ .rx_queue_stop = sfc_rx_queue_stop,
+ .tx_queue_start = sfc_tx_queue_start,
+ .tx_queue_stop = sfc_tx_queue_stop,
+ .rx_queue_setup = sfc_rx_queue_setup,
+ .rx_queue_release = sfc_rx_queue_release,
+ .rx_queue_count = sfc_rx_queue_count,
+ .rx_descriptor_done = sfc_rx_descriptor_done,
+ .rx_descriptor_status = sfc_rx_descriptor_status,
+ .tx_descriptor_status = sfc_tx_descriptor_status,
+ .tx_queue_setup = sfc_tx_queue_setup,
+ .tx_queue_release = sfc_tx_queue_release,
+ .flow_ctrl_get = sfc_flow_ctrl_get,
+ .flow_ctrl_set = sfc_flow_ctrl_set,
+ .mac_addr_set = sfc_mac_addr_set,
+ .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del,
+ .reta_update = sfc_dev_rss_reta_update,
+ .reta_query = sfc_dev_rss_reta_query,
+ .rss_hash_update = sfc_dev_rss_hash_update,
+ .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
+ .filter_ctrl = sfc_dev_filter_ctrl,
+ .set_mc_addr_list = sfc_set_mc_addr_list,
+ .rxq_info_get = sfc_rx_queue_info_get,
+ .txq_info_get = sfc_tx_queue_info_get,
+ .fw_version_get = sfc_fw_version_get,
+ .xstats_get_by_id = sfc_xstats_get_by_id,
+ .xstats_get_names_by_id = sfc_xstats_get_names_by_id,
+ .pool_ops_supported = sfc_pool_ops_supported,
+};
+
+/**
+ * Duplicate a string in potentially shared memory required for
+ * multi-process support.
+ *
+ * strdup() allocates from process-local heap/memory.
+ */
+static char *
+sfc_strdup(const char *str)
+{
+ size_t size;
+ char *copy;
+
+ if (str == NULL)
+ return NULL;
+
+ size = strlen(str) + 1;
+ copy = rte_malloc(__func__, size, 0);
+ if (copy != NULL)
+ rte_memcpy(copy, str, size);
+
+ return copy;
+}
+
+static int
+sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ const efx_nic_cfg_t *encp;
+ unsigned int avail_caps = 0;
+ const char *rx_name = NULL;
+ const char *tx_name = NULL;
+ int rc;
+
+ switch (sa->family) {
+ case EFX_FAMILY_HUNTINGTON:
+ case EFX_FAMILY_MEDFORD:
+ case EFX_FAMILY_MEDFORD2:
+ avail_caps |= SFC_DP_HW_FW_CAP_EF10;
+ break;
+ default:
+ break;
+ }
+
+ encp = efx_nic_cfg_get(sa->nic);
+ if (encp->enc_rx_es_super_buffer_supported)
+ avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
+ sfc_kvarg_string_handler, &rx_name);
+ if (rc != 0)
+ goto fail_kvarg_rx_datapath;
+
+ if (rx_name != NULL) {
+ sa->dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
+ if (sa->dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath %s not found", rx_name);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&sa->dp_rx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Rx datapath %s",
+ rx_name);
+ rc = EINVAL;
+ goto fail_dp_rx_caps;
+ }
+ } else {
+ sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
+ if (sa->dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ }
+
+ sa->dp_rx_name = sfc_strdup(sa->dp_rx->dp.name);
+ if (sa->dp_rx_name == NULL) {
+ rc = ENOMEM;
+ goto fail_dp_rx_name;
+ }
+
+ sfc_notice(sa, "use %s Rx datapath", sa->dp_rx_name);
+
+ dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
+ sfc_kvarg_string_handler, &tx_name);
+ if (rc != 0)
+ goto fail_kvarg_tx_datapath;
+
+ if (tx_name != NULL) {
+ sa->dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
+ if (sa->dp_tx == NULL) {
+ sfc_err(sa, "Tx datapath %s not found", tx_name);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&sa->dp_tx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Tx datapath %s",
+ tx_name);
+ rc = EINVAL;
+ goto fail_dp_tx_caps;
+ }
+ } else {
+ sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
+ if (sa->dp_tx == NULL) {
+ sfc_err(sa, "Tx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ }
+
+ sa->dp_tx_name = sfc_strdup(sa->dp_tx->dp.name);
+ if (sa->dp_tx_name == NULL) {
+ rc = ENOMEM;
+ goto fail_dp_tx_name;
+ }
+
+ sfc_notice(sa, "use %s Tx datapath", sa->dp_tx_name);
+
+ dev->tx_pkt_burst = sa->dp_tx->pkt_burst;
+
+ dev->dev_ops = &sfc_eth_dev_ops;
+
+ return 0;
+
+fail_dp_tx_name:
+fail_dp_tx_caps:
+ sa->dp_tx = NULL;
+
+fail_dp_tx:
+fail_kvarg_tx_datapath:
+ rte_free(sa->dp_rx_name);
+ sa->dp_rx_name = NULL;
+
+fail_dp_rx_name:
+fail_dp_rx_caps:
+ sa->dp_rx = NULL;
+
+fail_dp_rx:
+fail_kvarg_rx_datapath:
+ return rc;
+}
+
+static void
+sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ rte_free(sa->dp_tx_name);
+ sa->dp_tx_name = NULL;
+ sa->dp_tx = NULL;
+
+ rte_free(sa->dp_rx_name);
+ sa->dp_rx_name = NULL;
+ sa->dp_rx = NULL;
+}
+
+static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
+ .rxq_info_get = sfc_rx_queue_info_get,
+ .txq_info_get = sfc_tx_queue_info_get,
+};
+
+static int
+sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev)
+{
+ /*
+ * Device private data has really many process-local pointers.
+ * Below code should be extremely careful to use data located
+ * in shared memory only.
+ */
+ struct sfc_adapter *sa = dev->data->dev_private;
+ const struct sfc_dp_rx *dp_rx;
+ const struct sfc_dp_tx *dp_tx;
+ int rc;
+
+ dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sa->dp_rx_name);
+ if (dp_rx == NULL) {
+ sfc_err(sa, "cannot find %s Rx datapath", sa->dp_tx_name);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
+ sfc_err(sa, "%s Rx datapath does not support multi-process",
+ sa->dp_tx_name);
+ rc = EINVAL;
+ goto fail_dp_rx_multi_process;
+ }
+
+ dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sa->dp_tx_name);
+ if (dp_tx == NULL) {
+ sfc_err(sa, "cannot find %s Tx datapath", sa->dp_tx_name);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
+ sfc_err(sa, "%s Tx datapath does not support multi-process",
+ sa->dp_tx_name);
+ rc = EINVAL;
+ goto fail_dp_tx_multi_process;
+ }
+
+ dev->rx_pkt_burst = dp_rx->pkt_burst;
+ dev->tx_pkt_burst = dp_tx->pkt_burst;
+ dev->dev_ops = &sfc_eth_dev_secondary_ops;
+
+ return 0;
+
+fail_dp_tx_multi_process:
+fail_dp_tx:
+fail_dp_rx_multi_process:
+fail_dp_rx:
+ return rc;
+}
+
+static void
+sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
+{
+ dev->dev_ops = NULL;
+ dev->tx_pkt_burst = NULL;
+ dev->rx_pkt_burst = NULL;
+}
+
+static void
+sfc_register_dp(void)
+{
+ /* Register once */
+ if (TAILQ_EMPTY(&sfc_dp_head)) {
+ /* Prefer EF10 datapath */
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
+
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
+ }
+}
+
+static int
+sfc_eth_dev_init(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ int rc;
+ const efx_nic_cfg_t *encp;
+ const struct ether_addr *from;
+
+ sfc_register_dp();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -sfc_eth_dev_secondary_set_ops(dev);
+
+ /* Required for logging */
+ sa->pci_addr = pci_dev->addr;
+ sa->port_id = dev->data->port_id;
+
+ sa->eth_dev = dev;
+
+ /* Copy PCI device info to the dev->data */
+ rte_eth_copy_pci_info(dev, pci_dev);
+
+ sa->logtype_main = sfc_register_logtype(sa, SFC_LOGTYPE_MAIN_STR,
+ RTE_LOG_NOTICE);
+
+ rc = sfc_kvargs_parse(sa);
+ if (rc != 0)
+ goto fail_kvargs_parse;
+
+ sfc_log_init(sa, "entry");
+
+ dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
+ if (dev->data->mac_addrs == NULL) {
+ rc = ENOMEM;
+ goto fail_mac_addrs;
+ }
+
+ sfc_adapter_lock_init(sa);
+ sfc_adapter_lock(sa);
+
+ sfc_log_init(sa, "probing");
+ rc = sfc_probe(sa);
+ if (rc != 0)
+ goto fail_probe;
+
+ sfc_log_init(sa, "set device ops");
+ rc = sfc_eth_dev_set_ops(dev);
+ if (rc != 0)
+ goto fail_set_ops;
+
+ sfc_log_init(sa, "attaching");
+ rc = sfc_attach(sa);
+ if (rc != 0)
+ goto fail_attach;
+
+ encp = efx_nic_cfg_get(sa->nic);
+
+ /*
+ * The arguments are really reverse order in comparison to
+ * Linux kernel. Copy from NIC config to Ethernet device data.
+ */
+ from = (const struct ether_addr *)(encp->enc_mac_addr);
+ ether_addr_copy(from, &dev->data->mac_addrs[0]);
+
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_attach:
+ sfc_eth_dev_clear_ops(dev);
+
+fail_set_ops:
+ sfc_unprobe(sa);
+
+fail_probe:
+ sfc_adapter_unlock(sa);
+ sfc_adapter_lock_fini(sa);
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+fail_mac_addrs:
+ sfc_kvargs_cleanup(sa);
+
+fail_kvargs_parse:
+ sfc_log_init(sa, "failed %d", rc);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_eth_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ sfc_eth_dev_secondary_clear_ops(dev);
+ return 0;
+ }
+
+ sa = dev->data->dev_private;
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+
+ sfc_eth_dev_clear_ops(dev);
+
+ sfc_detach(sa);
+ sfc_unprobe(sa);
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ sfc_kvargs_cleanup(sa);
+
+ sfc_adapter_unlock(sa);
+ sfc_adapter_lock_fini(sa);
+
+ sfc_log_init(sa, "done");
+
+ /* Required for logging, so cleanup last */
+ sa->eth_dev = NULL;
+ return 0;
+}
+
+static const struct rte_pci_id pci_id_sfc_efx_map[] = {
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) },
+ { .vendor_id = 0 /* sentinel */ }
+};
+
+static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct sfc_adapter), sfc_eth_dev_init);
+}
+
+static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
+}
+
+static struct rte_pci_driver sfc_efx_pmd = {
+ .id_table = pci_id_sfc_efx_map,
+ .drv_flags =
+ RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_NEED_MAPPING,
+ .probe = sfc_eth_dev_pci_probe,
+ .remove = sfc_eth_dev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
+ SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
+ SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
+ SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
+ SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " "
+ SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> "
+ SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>");
+
+RTE_INIT(sfc_driver_register_logtype)
+{
+ int ret;
+
+ ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver",
+ RTE_LOG_NOTICE);
+ sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret;
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ev.c b/src/spdk/dpdk/drivers/net/sfc/sfc_ev.c
new file mode 100644
index 00000000..f93d30e5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ev.c
@@ -0,0 +1,921 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_alarm.h>
+#include <rte_branch_prediction.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_tx.h"
+#include "sfc_kvargs.h"
+
+
+/* Initial delay when waiting for event queue init complete event */
+#define SFC_EVQ_INIT_BACKOFF_START_US (1)
+/* Maximum delay between event queue polling attempts */
+#define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
+/* Event queue init approx timeout */
+#define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
+
+/* Management event queue polling period in microseconds */
+#define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
+
+static const char *
+sfc_evq_type2str(enum sfc_evq_type type)
+{
+ switch (type) {
+ case SFC_EVQ_TYPE_MGMT:
+ return "mgmt-evq";
+ case SFC_EVQ_TYPE_RX:
+ return "rx-evq";
+ case SFC_EVQ_TYPE_TX:
+ return "tx-evq";
+ default:
+ SFC_ASSERT(B_FALSE);
+ return NULL;
+ }
+}
+
+static boolean_t
+sfc_ev_initialized(void *arg)
+{
+ struct sfc_evq *evq = arg;
+
+ /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
+ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
+ evq->init_state == SFC_EVQ_STARTED);
+
+ evq->init_state = SFC_EVQ_STARTED;
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
+ uint32_t size, uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa,
+ "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
+ evq->evq_index, label, id, size, flags);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
+ uint32_t size, uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_efx_rxq *rxq;
+ unsigned int stop;
+ unsigned int pending_id;
+ unsigned int delta;
+ unsigned int i;
+ struct sfc_efx_rx_sw_desc *rxd;
+
+ if (unlikely(evq->exception))
+ goto done;
+
+ rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
+
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->evq == evq);
+ SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
+
+ stop = (id + 1) & rxq->ptr_mask;
+ pending_id = rxq->pending & rxq->ptr_mask;
+ delta = (stop >= pending_id) ? (stop - pending_id) :
+ (rxq->ptr_mask + 1 - pending_id + stop);
+
+ if (delta == 0) {
+ /*
+ * Rx event with no new descriptors done and zero length
+ * is used to abort scattered packet when there is no room
+ * for the tail.
+ */
+ if (unlikely(size != 0)) {
+ evq->exception = B_TRUE;
+ sfc_err(evq->sa,
+ "EVQ %u RxQ %u invalid RX abort "
+ "(id=%#x size=%u flags=%#x); needs restart",
+ evq->evq_index, rxq->dp.dpq.queue_id,
+ id, size, flags);
+ goto done;
+ }
+
+ /* Add discard flag to the first fragment */
+ rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
+ /* Remove continue flag from the last fragment */
+ rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
+ } else if (unlikely(delta > rxq->batch_max)) {
+ evq->exception = B_TRUE;
+
+ sfc_err(evq->sa,
+ "EVQ %u RxQ %u completion out of order "
+ "(id=%#x delta=%u flags=%#x); needs restart",
+ evq->evq_index, rxq->dp.dpq.queue_id,
+ id, delta, flags);
+
+ goto done;
+ }
+
+ for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
+ rxd = &rxq->sw_desc[i];
+
+ rxd->flags = flags;
+
+ SFC_ASSERT(size < (1 << 16));
+ rxd->size = (uint16_t)size;
+ }
+
+ rxq->pending += delta;
+
+done:
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
+ __rte_unused uint32_t size, __rte_unused uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL);
+ return evq->sa->dp_rx->qrx_ev(dp_rxq, id);
+}
+
+static boolean_t
+sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id,
+ uint32_t pkt_count, uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa,
+ "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x",
+ evq->evq_index, label, id, pkt_count, flags);
+ return B_TRUE;
+}
+
+/* It is not actually used on datapath, but required on RxQ flush */
+static boolean_t
+sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id,
+ __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ if (evq->sa->dp_rx->qrx_ps_ev != NULL)
+ return evq->sa->dp_rx->qrx_ps_ev(dp_rxq, id);
+ else
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
+ evq->evq_index, label, id);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_txq *dp_txq;
+ struct sfc_efx_txq *txq;
+ unsigned int stop;
+ unsigned int delta;
+
+ dp_txq = evq->dp_txq;
+ SFC_ASSERT(dp_txq != NULL);
+
+ txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ SFC_ASSERT(txq->evq == evq);
+
+ if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0))
+ goto done;
+
+ stop = (id + 1) & txq->ptr_mask;
+ id = txq->pending & txq->ptr_mask;
+
+ delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
+
+ txq->pending += delta;
+
+done:
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_txq *dp_txq;
+
+ dp_txq = evq->dp_txq;
+ SFC_ASSERT(dp_txq != NULL);
+
+ SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL);
+ return evq->sa->dp_tx->qtx_ev(dp_txq, id);
+}
+
+static boolean_t
+sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data)
+{
+ struct sfc_evq *evq = arg;
+
+ if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
+ return B_FALSE;
+
+ evq->exception = B_TRUE;
+ sfc_warn(evq->sa,
+ "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
+ " needs recovery",
+ (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
+ (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
+ (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
+ (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
+ (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
+ (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
+ (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
+ (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
+ "UNKNOWN",
+ code, data, evq->evq_index);
+
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
+ evq->evq_index, rxq_hw_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+ struct sfc_rxq *rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->hw_index == rxq_hw_index);
+ SFC_ASSERT(rxq->evq == evq);
+ sfc_rx_qflush_done(rxq);
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
+ evq->evq_index, rxq_hw_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+ struct sfc_rxq *rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->hw_index == rxq_hw_index);
+ SFC_ASSERT(rxq->evq == evq);
+ sfc_rx_qflush_failed(rxq);
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
+ evq->evq_index, txq_hw_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_txq *dp_txq;
+ struct sfc_txq *txq;
+
+ dp_txq = evq->dp_txq;
+ SFC_ASSERT(dp_txq != NULL);
+
+ txq = sfc_txq_by_dp_txq(dp_txq);
+ SFC_ASSERT(txq != NULL);
+ SFC_ASSERT(txq->hw_index == txq_hw_index);
+ SFC_ASSERT(txq->evq == evq);
+ sfc_tx_qflush_done(txq);
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_software(void *arg, uint16_t magic)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
+ evq->evq_index, magic);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_sram(void *arg, uint32_t code)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
+ evq->evq_index, code);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_wake_up(void *arg, uint32_t index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
+ evq->evq_index, index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_timer(void *arg, uint32_t index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
+ evq->evq_index, index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected link change event",
+ evq->evq_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_adapter *sa = evq->sa;
+ struct rte_eth_link new_link;
+
+ sfc_port_link_mode_to_info(link_mode, &new_link);
+ if (rte_eth_linkstatus_set(sa->eth_dev, &new_link))
+ evq->sa->port.lsc_seq++;
+
+ return B_FALSE;
+}
+
+static const efx_ev_callbacks_t sfc_ev_callbacks = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
+ .eec_rx_ps = sfc_ev_nop_rx_ps,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_efx_rx,
+ .eec_rx_ps = sfc_ev_nop_rx_ps,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_dp_rx,
+ .eec_rx_ps = sfc_ev_dp_rx_ps,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
+ .eec_rx_ps = sfc_ev_nop_rx_ps,
+ .eec_tx = sfc_ev_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
+ .eec_rx_ps = sfc_ev_nop_rx_ps,
+ .eec_tx = sfc_ev_dp_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+
+void
+sfc_ev_qpoll(struct sfc_evq *evq)
+{
+ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
+ evq->init_state == SFC_EVQ_STARTING);
+
+ /* Synchronize the DMA memory for reading not required */
+
+ efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
+
+ if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
+ struct sfc_adapter *sa = evq->sa;
+ int rc;
+
+ if (evq->dp_rxq != NULL) {
+ unsigned int rxq_sw_index;
+
+ rxq_sw_index = evq->dp_rxq->dpq.queue_id;
+
+ sfc_warn(sa,
+ "restart RxQ %u because of exception on its EvQ %u",
+ rxq_sw_index, evq->evq_index);
+
+ sfc_rx_qstop(sa, rxq_sw_index);
+ rc = sfc_rx_qstart(sa, rxq_sw_index);
+ if (rc != 0)
+ sfc_err(sa, "cannot restart RxQ %u",
+ rxq_sw_index);
+ }
+
+ if (evq->dp_txq != NULL) {
+ unsigned int txq_sw_index;
+
+ txq_sw_index = evq->dp_txq->dpq.queue_id;
+
+ sfc_warn(sa,
+ "restart TxQ %u because of exception on its EvQ %u",
+ txq_sw_index, evq->evq_index);
+
+ sfc_tx_qstop(sa, txq_sw_index);
+ rc = sfc_tx_qstart(sa, txq_sw_index);
+ if (rc != 0)
+ sfc_err(sa, "cannot restart TxQ %u",
+ txq_sw_index);
+ }
+
+ if (evq->exception)
+ sfc_panic(sa, "unrecoverable exception on EvQ %u",
+ evq->evq_index);
+
+ sfc_adapter_unlock(sa);
+ }
+
+ /* Poll-mode driver does not re-prime the event queue for interrupts */
+}
+
+void
+sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
+{
+ if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
+ if (sa->mgmt_evq_running)
+ sfc_ev_qpoll(sa->mgmt_evq);
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ }
+}
+
+int
+sfc_ev_qprime(struct sfc_evq *evq)
+{
+ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
+ return efx_ev_qprime(evq->common, evq->read_ptr);
+}
+
+/* Event queue HW index allocation scheme is described in sfc_ev.h. */
+int
+sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
+{
+ struct sfc_adapter *sa = evq->sa;
+ efsys_mem_t *esmp;
+ uint32_t evq_flags = sa->evq_flags;
+ unsigned int total_delay_us;
+ unsigned int delay_us;
+ int rc;
+
+ sfc_log_init(sa, "hw_index=%u", hw_index);
+
+ esmp = &evq->mem;
+
+ evq->evq_index = hw_index;
+
+ /* Clear all events */
+ (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));
+
+ if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index)
+ evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
+ else
+ evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
+
+ /* Create the common code event queue */
+ rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries,
+ 0 /* unused on EF10 */, 0, evq_flags,
+ &evq->common);
+ if (rc != 0)
+ goto fail_ev_qcreate;
+
+ SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
+ if (evq->dp_rxq != 0) {
+ if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+ evq->callbacks = &sfc_ev_callbacks_efx_rx;
+ else
+ evq->callbacks = &sfc_ev_callbacks_dp_rx;
+ } else if (evq->dp_txq != 0) {
+ if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+ evq->callbacks = &sfc_ev_callbacks_efx_tx;
+ else
+ evq->callbacks = &sfc_ev_callbacks_dp_tx;
+ } else {
+ evq->callbacks = &sfc_ev_callbacks;
+ }
+
+ evq->init_state = SFC_EVQ_STARTING;
+
+ /* Wait for the initialization event */
+ total_delay_us = 0;
+ delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
+ do {
+ (void)sfc_ev_qpoll(evq);
+
+ /* Check to see if the initialization complete indication
+ * posted by the hardware.
+ */
+ if (evq->init_state == SFC_EVQ_STARTED)
+ goto done;
+
+ /* Give event queue some time to init */
+ rte_delay_us(delay_us);
+
+ total_delay_us += delay_us;
+
+ /* Exponential backoff */
+ delay_us *= 2;
+ if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
+ delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
+
+ } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
+
+ rc = ETIMEDOUT;
+ goto fail_timedout;
+
+done:
+ return 0;
+
+fail_timedout:
+ evq->init_state = SFC_EVQ_INITIALIZED;
+ efx_ev_qdestroy(evq->common);
+
+fail_ev_qcreate:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_qstop(struct sfc_evq *evq)
+{
+ if (evq == NULL)
+ return;
+
+ sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index);
+
+ if (evq->init_state != SFC_EVQ_STARTED)
+ return;
+
+ evq->init_state = SFC_EVQ_INITIALIZED;
+ evq->callbacks = NULL;
+ evq->read_ptr = 0;
+ evq->exception = B_FALSE;
+
+ efx_ev_qdestroy(evq->common);
+
+ evq->evq_index = 0;
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll(void *arg)
+{
+ struct sfc_adapter *sa = arg;
+ int rc;
+
+ sfc_ev_mgmt_qpoll(sa);
+
+ rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
+ sfc_ev_mgmt_periodic_qpoll, sa);
+ if (rc == -ENOTSUP) {
+ sfc_warn(sa, "alarms are not supported");
+ sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
+ } else if (rc != 0) {
+ sfc_err(sa,
+ "cannot rearm management EVQ polling alarm (rc=%d)",
+ rc);
+ }
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
+{
+ sfc_ev_mgmt_periodic_qpoll(sa);
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
+{
+ rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
+}
+
+int
+sfc_ev_start(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ rc = efx_ev_init(sa->nic);
+ if (rc != 0)
+ goto fail_ev_init;
+
+ /* Start management EVQ used for global events */
+
+ /*
+ * Management event queue start polls the queue, but it cannot
+ * interfere with other polling contexts since mgmt_evq_running
+ * is false yet.
+ */
+ rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
+ if (rc != 0)
+ goto fail_mgmt_evq_start;
+
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+ sa->mgmt_evq_running = true;
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
+ if (sa->intr.lsc_intr) {
+ rc = sfc_ev_qprime(sa->mgmt_evq);
+ if (rc != 0)
+ goto fail_mgmt_evq_prime;
+ }
+
+ /*
+ * Start management EVQ polling. If interrupts are disabled
+ * (not used), it is required to process link status change
+ * and other device level events to avoid unrecoverable
+ * error because the event queue overflow.
+ */
+ sfc_ev_mgmt_periodic_qpoll_start(sa);
+
+ /*
+ * Rx/Tx event queues are started/stopped when corresponding
+ * Rx/Tx queue is started/stopped.
+ */
+
+ return 0;
+
+fail_mgmt_evq_prime:
+ sfc_ev_qstop(sa->mgmt_evq);
+
+fail_mgmt_evq_start:
+ efx_ev_fini(sa->nic);
+
+fail_ev_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_stop(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_ev_mgmt_periodic_qpoll_stop(sa);
+
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+ sa->mgmt_evq_running = false;
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
+ sfc_ev_qstop(sa->mgmt_evq);
+
+ efx_ev_fini(sa->nic);
+}
+
+int
+sfc_ev_qinit(struct sfc_adapter *sa,
+ enum sfc_evq_type type, unsigned int type_index,
+ unsigned int entries, int socket_id, struct sfc_evq **evqp)
+{
+ struct sfc_evq *evq;
+ int rc;
+
+ sfc_log_init(sa, "type=%s type_index=%u",
+ sfc_evq_type2str(type), type_index);
+
+ SFC_ASSERT(rte_is_power_of_2(entries));
+
+ rc = ENOMEM;
+ evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (evq == NULL)
+ goto fail_evq_alloc;
+
+ evq->sa = sa;
+ evq->type = type;
+ evq->entries = entries;
+
+ /* Allocate DMA space */
+ rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index,
+ EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ evq->init_state = SFC_EVQ_INITIALIZED;
+
+ sa->evq_count++;
+
+ *evqp = evq;
+
+ return 0;
+
+fail_dma_alloc:
+ rte_free(evq);
+
+fail_evq_alloc:
+
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_qfini(struct sfc_evq *evq)
+{
+ struct sfc_adapter *sa = evq->sa;
+
+ SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
+
+ sfc_dma_free(sa, &evq->mem);
+
+ rte_free(evq);
+
+ SFC_ASSERT(sa->evq_count > 0);
+ sa->evq_count--;
+}
+
+static int
+sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ uint32_t *value = opaque;
+
+ if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
+ *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
+ else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
+ *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
+ else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
+ *value = EFX_EVQ_FLAGS_TYPE_AUTO;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+sfc_ev_attach(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
+ rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
+ sfc_kvarg_perf_profile_handler,
+ &sa->evq_flags);
+ if (rc != 0) {
+ sfc_err(sa, "invalid %s parameter value",
+ SFC_KVARG_PERF_PROFILE);
+ goto fail_kvarg_perf_profile;
+ }
+
+ sa->mgmt_evq_index = 0;
+ rte_spinlock_init(&sa->mgmt_evq_lock);
+
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES,
+ sa->socket_id, &sa->mgmt_evq);
+ if (rc != 0)
+ goto fail_mgmt_evq_init;
+
+ /*
+ * Rx/Tx event queues are created/destroyed when corresponding
+ * Rx/Tx queue is created/destroyed.
+ */
+
+ return 0;
+
+fail_mgmt_evq_init:
+
+fail_kvarg_perf_profile:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_ev_qfini(sa->mgmt_evq);
+
+ if (sa->evq_count != 0)
+ sfc_err(sa, "%u EvQs are not destroyed before detach",
+ sa->evq_count);
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ev.h b/src/spdk/dpdk/drivers/net/sfc/sfc_ev.h
new file mode 100644
index 00000000..872f79b9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ev.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_EV_H_
+#define _SFC_EV_H_
+
+#include <rte_ethdev_driver.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Number of entries in the management event queue */
+#define SFC_MGMT_EVQ_ENTRIES (EFX_EVQ_MINNEVS)
+
+struct sfc_adapter;
+struct sfc_dp_rxq;
+struct sfc_dp_txq;
+
+enum sfc_evq_state {
+ SFC_EVQ_UNINITIALIZED = 0,
+ SFC_EVQ_INITIALIZED,
+ SFC_EVQ_STARTING,
+ SFC_EVQ_STARTED,
+
+ SFC_EVQ_NSTATES
+};
+
+enum sfc_evq_type {
+ SFC_EVQ_TYPE_MGMT = 0,
+ SFC_EVQ_TYPE_RX,
+ SFC_EVQ_TYPE_TX,
+
+ SFC_EVQ_NTYPES
+};
+
+struct sfc_evq {
+ /* Used on datapath */
+ efx_evq_t *common;
+ const efx_ev_callbacks_t *callbacks;
+ unsigned int read_ptr;
+ boolean_t exception;
+ efsys_mem_t mem;
+ struct sfc_dp_rxq *dp_rxq;
+ struct sfc_dp_txq *dp_txq;
+
+ /* Not used on datapath */
+ struct sfc_adapter *sa;
+ unsigned int evq_index;
+ enum sfc_evq_state init_state;
+ enum sfc_evq_type type;
+ unsigned int entries;
+};
+
+/*
+ * Functions below define event queue to transmit/receive queue and vice
+ * versa mapping.
+ * Own event queue is allocated for management, each Rx and each Tx queue.
+ * Zero event queue is used for management events.
+ * Rx event queues from 1 to RxQ number follow management event queue.
+ * Tx event queues follow Rx event queues.
+ */
+
+static inline unsigned int
+sfc_evq_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa,
+ unsigned int rxq_sw_index)
+{
+ return 1 + rxq_sw_index;
+}
+
+static inline unsigned int
+sfc_evq_index_by_txq_sw_index(struct sfc_adapter *sa, unsigned int txq_sw_index)
+{
+ return 1 + sa->eth_dev->data->nb_rx_queues + txq_sw_index;
+}
+
+int sfc_ev_attach(struct sfc_adapter *sa);
+void sfc_ev_detach(struct sfc_adapter *sa);
+int sfc_ev_start(struct sfc_adapter *sa);
+void sfc_ev_stop(struct sfc_adapter *sa);
+
+int sfc_ev_qinit(struct sfc_adapter *sa,
+ enum sfc_evq_type type, unsigned int type_index,
+ unsigned int entries, int socket_id, struct sfc_evq **evqp);
+void sfc_ev_qfini(struct sfc_evq *evq);
+int sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index);
+void sfc_ev_qstop(struct sfc_evq *evq);
+
+int sfc_ev_qprime(struct sfc_evq *evq);
+void sfc_ev_qpoll(struct sfc_evq *evq);
+
+void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_EV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_filter.c b/src/spdk/dpdk/drivers/net/sfc/sfc_filter.c
new file mode 100644
index 00000000..6ff380a3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_filter.c
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <rte_common.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+
+boolean_t
+sfc_filter_is_match_supported(struct sfc_adapter *sa, uint32_t match)
+{
+ struct sfc_filter *filter = &sa->filter;
+ size_t i;
+
+ for (i = 0; i < filter->supported_match_num; ++i) {
+ if (match == filter->supported_match[i])
+ return B_TRUE;
+ }
+
+ return B_FALSE;
+}
+
+static int
+sfc_filter_cache_match_supported(struct sfc_adapter *sa)
+{
+ struct sfc_filter *filter = &sa->filter;
+ size_t num = filter->supported_match_num;
+ uint32_t *buf = filter->supported_match;
+ unsigned int retry;
+ int rc;
+
+ /* Just a guess of possibly sufficient entries */
+ if (num == 0)
+ num = 16;
+
+ for (retry = 0; retry < 2; ++retry) {
+ if (num != filter->supported_match_num) {
+ rc = ENOMEM;
+ buf = rte_realloc(buf, num * sizeof(*buf), 0);
+ if (buf == NULL)
+ goto fail_realloc;
+ }
+
+ rc = efx_filter_supported_filters(sa->nic, buf, num, &num);
+ if (rc == 0) {
+ filter->supported_match_num = num;
+ filter->supported_match = buf;
+
+ return 0;
+ } else if (rc != ENOSPC) {
+ goto fail_efx_filter_supported_filters;
+ }
+ }
+
+ SFC_ASSERT(rc == ENOSPC);
+
+fail_efx_filter_supported_filters:
+fail_realloc:
+ /* Original pointer is not freed by rte_realloc() on failure */
+ rte_free(buf);
+ filter->supported_match = NULL;
+ filter->supported_match_num = 0;
+ return rc;
+}
+
+int
+sfc_filter_attach(struct sfc_adapter *sa)
+{
+ int rc;
+ unsigned int i;
+
+ sfc_log_init(sa, "entry");
+
+ rc = efx_filter_init(sa->nic);
+ if (rc != 0)
+ goto fail_filter_init;
+
+ rc = sfc_filter_cache_match_supported(sa);
+ if (rc != 0)
+ goto fail_cache_match_supported;
+
+ efx_filter_fini(sa->nic);
+
+ sa->filter.supports_ip_proto_or_addr_filter = B_FALSE;
+ sa->filter.supports_rem_or_local_port_filter = B_FALSE;
+ for (i = 0; i < sa->filter.supported_match_num; ++i) {
+ if (sa->filter.supported_match[i] &
+ (EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST |
+ EFX_FILTER_MATCH_REM_HOST))
+ sa->filter.supports_ip_proto_or_addr_filter = B_TRUE;
+
+ if (sa->filter.supported_match[i] &
+ (EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))
+ sa->filter.supports_rem_or_local_port_filter = B_TRUE;
+ }
+
+ sfc_log_init(sa, "done");
+
+ return 0;
+
+fail_cache_match_supported:
+ efx_filter_fini(sa->nic);
+
+fail_filter_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_filter_detach(struct sfc_adapter *sa)
+{
+ struct sfc_filter *filter = &sa->filter;
+
+ sfc_log_init(sa, "entry");
+
+ rte_free(filter->supported_match);
+ filter->supported_match = NULL;
+ filter->supported_match_num = 0;
+
+ sfc_log_init(sa, "done");
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_filter.h b/src/spdk/dpdk/drivers/net/sfc/sfc_filter.h
new file mode 100644
index 00000000..64ab114e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_filter.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_FILTER_H
+#define _SFC_FILTER_H
+
+#include "efx.h"
+
+#include "sfc_flow.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_filter {
+ /** Number of elements in match_supported array */
+ size_t supported_match_num;
+ /** Driver cache of supported filter match masks */
+ uint32_t *supported_match;
+ /** List of flow rules */
+ struct sfc_flow_list flow_list;
+ /**
+ * Supports any of ip_proto, remote host or local host
+ * filters. This flag is used for filter match exceptions
+ */
+ boolean_t supports_ip_proto_or_addr_filter;
+ /**
+ * Supports any of remote port or local port filters.
+ * This flag is used for filter match exceptions
+ */
+ boolean_t supports_rem_or_local_port_filter;
+};
+
+struct sfc_adapter;
+
+int sfc_filter_attach(struct sfc_adapter *sa);
+void sfc_filter_detach(struct sfc_adapter *sa);
+
+boolean_t sfc_filter_is_match_supported(struct sfc_adapter *sa, uint32_t match);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_FILTER_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_flow.c b/src/spdk/dpdk/drivers/net/sfc/sfc_flow.c
new file mode 100644
index 00000000..371648b0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_flow.c
@@ -0,0 +1,2504 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <rte_byteorder.h>
+#include <rte_tailq.h>
+#include <rte_common.h>
+#include <rte_ethdev_driver.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_rx.h"
+#include "sfc_filter.h"
+#include "sfc_flow.h"
+#include "sfc_log.h"
+#include "sfc_dp_rx.h"
+
+/*
+ * At now flow API is implemented in such a manner that each
+ * flow rule is converted to one or more hardware filters.
+ * All elements of flow rule (attributes, pattern items, actions)
+ * correspond to one or more fields in the efx_filter_spec_s structure
+ * that is responsible for the hardware filter.
+ * If some required field is unset in the flow rule, then a handful
+ * of filter copies will be created to cover all possible values
+ * of such a field.
+ */
+
+enum sfc_flow_item_layers {
+ SFC_FLOW_ITEM_ANY_LAYER,
+ SFC_FLOW_ITEM_START_LAYER,
+ SFC_FLOW_ITEM_L2,
+ SFC_FLOW_ITEM_L3,
+ SFC_FLOW_ITEM_L4,
+};
+
+typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
+ efx_filter_spec_t *spec,
+ struct rte_flow_error *error);
+
+struct sfc_flow_item {
+ enum rte_flow_item_type type; /* Type of item */
+ enum sfc_flow_item_layers layer; /* Layer of item */
+ enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
+ sfc_flow_item_parse *parse; /* Parsing function */
+};
+
+static sfc_flow_item_parse sfc_flow_parse_void;
+static sfc_flow_item_parse sfc_flow_parse_eth;
+static sfc_flow_item_parse sfc_flow_parse_vlan;
+static sfc_flow_item_parse sfc_flow_parse_ipv4;
+static sfc_flow_item_parse sfc_flow_parse_ipv6;
+static sfc_flow_item_parse sfc_flow_parse_tcp;
+static sfc_flow_item_parse sfc_flow_parse_udp;
+static sfc_flow_item_parse sfc_flow_parse_vxlan;
+static sfc_flow_item_parse sfc_flow_parse_geneve;
+static sfc_flow_item_parse sfc_flow_parse_nvgre;
+
+typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error);
+
+typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
+ efx_filter_spec_t *spec,
+ struct sfc_filter *filter);
+
+struct sfc_flow_copy_flag {
+ /* EFX filter specification match flag */
+ efx_filter_match_flags_t flag;
+ /* Number of values of corresponding field */
+ unsigned int vals_count;
+ /* Function to set values in specifications */
+ sfc_flow_spec_set_vals *set_vals;
+ /*
+ * Function to check that the specification is suitable
+ * for adding this match flag
+ */
+ sfc_flow_spec_check *spec_check;
+};
+
+static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
+static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
+static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
+static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
+static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
+static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
+static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
+
+static boolean_t
+sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
+{
+ uint8_t sum = 0;
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ sum |= buf[i];
+
+ return (sum == 0) ? B_TRUE : B_FALSE;
+}
+
+/*
+ * Validate item and prepare structures spec and mask for parsing
+ */
+static int
+sfc_flow_parse_init(const struct rte_flow_item *item,
+ const void **spec_ptr,
+ const void **mask_ptr,
+ const void *supp_mask,
+ const void *def_mask,
+ unsigned int size,
+ struct rte_flow_error *error)
+{
+ const uint8_t *spec;
+ const uint8_t *mask;
+ const uint8_t *last;
+ uint8_t supp;
+ unsigned int i;
+
+ if (item == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "NULL item");
+ return -rte_errno;
+ }
+
+ if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Mask or last is set without spec");
+ return -rte_errno;
+ }
+
+ /*
+ * If "mask" is not set, default mask is used,
+ * but if default mask is NULL, "mask" should be set
+ */
+ if (item->mask == NULL) {
+ if (def_mask == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Mask should be specified");
+ return -rte_errno;
+ }
+
+ mask = def_mask;
+ } else {
+ mask = item->mask;
+ }
+
+ spec = item->spec;
+ last = item->last;
+
+ if (spec == NULL)
+ goto exit;
+
+ /*
+ * If field values in "last" are either 0 or equal to the corresponding
+ * values in "spec" then they are ignored
+ */
+ if (last != NULL &&
+ !sfc_flow_is_zero(last, size) &&
+ memcmp(last, spec, size) != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Ranging is not supported");
+ return -rte_errno;
+ }
+
+ if (supp_mask == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Supported mask for item should be specified");
+ return -rte_errno;
+ }
+
+ /* Check that mask does not ask for more match than supp_mask */
+ for (i = 0; i < size; i++) {
+ supp = ((const uint8_t *)supp_mask)[i];
+
+ if (~supp & mask[i]) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Item's field is not supported");
+ return -rte_errno;
+ }
+ }
+
+exit:
+ *spec_ptr = spec;
+ *mask_ptr = mask;
+ return 0;
+}
+
+/*
+ * Protocol parsers.
+ * Masking is not supported, so masks in items should be either
+ * full or empty (zeroed) and set only for supported fields which
+ * are specified in the supp_mask.
+ */
+
+static int
+sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
+ __rte_unused efx_filter_spec_t *efx_spec,
+ __rte_unused struct rte_flow_error *error)
+{
+ return 0;
+}
+
+/**
+ * Convert Ethernet item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Outer frame specification may only comprise
+ * source/destination addresses and Ethertype field.
+ * Inner frame specification may contain destination address only.
+ * There is support for individual/group mask as well as for empty and full.
+ * If the mask is NULL, default mask will be used. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_eth(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_eth *spec = NULL;
+ const struct rte_flow_item_eth *mask = NULL;
+ const struct rte_flow_item_eth supp_mask = {
+ .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .type = 0xffff,
+ };
+ const struct rte_flow_item_eth ifrm_supp_mask = {
+ .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ };
+ const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+ const struct rte_flow_item_eth *supp_mask_p;
+ const struct rte_flow_item_eth *def_mask_p;
+ uint8_t *loc_mac = NULL;
+ boolean_t is_ifrm = (efx_spec->efs_encap_type !=
+ EFX_TUNNEL_PROTOCOL_NONE);
+
+ if (is_ifrm) {
+ supp_mask_p = &ifrm_supp_mask;
+ def_mask_p = &ifrm_supp_mask;
+ loc_mac = efx_spec->efs_ifrm_loc_mac;
+ } else {
+ supp_mask_p = &supp_mask;
+ def_mask_p = &rte_flow_item_eth_mask;
+ loc_mac = efx_spec->efs_loc_mac;
+ }
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ supp_mask_p, def_mask_p,
+ sizeof(struct rte_flow_item_eth),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /* If "spec" is not set, could be any Ethernet */
+ if (spec == NULL)
+ return 0;
+
+ if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
+ efx_spec->efs_match_flags |= is_ifrm ?
+ EFX_FILTER_MATCH_IFRM_LOC_MAC :
+ EFX_FILTER_MATCH_LOC_MAC;
+ rte_memcpy(loc_mac, spec->dst.addr_bytes,
+ EFX_MAC_ADDR_LEN);
+ } else if (memcmp(mask->dst.addr_bytes, ig_mask,
+ EFX_MAC_ADDR_LEN) == 0) {
+ if (is_unicast_ether_addr(&spec->dst))
+ efx_spec->efs_match_flags |= is_ifrm ?
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
+ else
+ efx_spec->efs_match_flags |= is_ifrm ?
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
+ } else if (!is_zero_ether_addr(&mask->dst)) {
+ goto fail_bad_mask;
+ }
+
+ /*
+ * ifrm_supp_mask ensures that the source address and
+ * ethertype masks are equal to zero in inner frame,
+ * so these fields are filled in only for the outer frame
+ */
+ if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
+ rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
+ EFX_MAC_ADDR_LEN);
+ } else if (!is_zero_ether_addr(&mask->src)) {
+ goto fail_bad_mask;
+ }
+
+ /*
+ * Ether type is in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used
+ */
+ if (mask->type == supp_mask.type) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = rte_bswap16(spec->type);
+ } else if (mask->type != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the ETH pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert VLAN item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only VID field is supported.
+ * The mask can not be NULL. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_vlan(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ uint16_t vid;
+ const struct rte_flow_item_vlan *spec = NULL;
+ const struct rte_flow_item_vlan *mask = NULL;
+ const struct rte_flow_item_vlan supp_mask = {
+ .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+ .inner_type = RTE_BE16(0xffff),
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ NULL,
+ sizeof(struct rte_flow_item_vlan),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * VID is in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used.
+ * If two VLAN items are included, the first matches
+ * the outer tag and the next matches the inner tag.
+ */
+ if (mask->tci == supp_mask.tci) {
+ /* Apply mask to keep VID only */
+ vid = rte_bswap16(spec->tci & mask->tci);
+
+ if (!(efx_spec->efs_match_flags &
+ EFX_FILTER_MATCH_OUTER_VID)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ efx_spec->efs_outer_vid = vid;
+ } else if (!(efx_spec->efs_match_flags &
+ EFX_FILTER_MATCH_INNER_VID)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
+ efx_spec->efs_inner_vid = vid;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "More than two VLAN items");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN ID in TCI match is required");
+ return -rte_errno;
+ }
+
+ if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN TPID matching is not supported");
+ return -rte_errno;
+ }
+ if (mask->inner_type == supp_mask.inner_type) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
+ } else if (mask->inner_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask for VLAN inner_type");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Convert IPv4 item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination addresses and
+ * protocol fields are supported. If the mask is NULL, default
+ * mask will be used. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_ipv4(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_ipv4 *spec = NULL;
+ const struct rte_flow_item_ipv4 *mask = NULL;
+ const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
+ const struct rte_flow_item_ipv4 supp_mask = {
+ .hdr = {
+ .src_addr = 0xffffffff,
+ .dst_addr = 0xffffffff,
+ .next_proto_id = 0xff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by IPv4 source and destination addresses requires
+ * the appropriate ETHER_TYPE in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = ether_type_ipv4;
+ } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Ethertype in pattern with IPV4 item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * IPv4 addresses are in big-endian byte order in item and in
+ * efx_spec
+ */
+ if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
+ } else if (mask->hdr.src_addr != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
+ } else if (mask->hdr.dst_addr != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
+ } else if (mask->hdr.next_proto_id != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the IPV4 pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert IPv6 item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination addresses and
+ * next header fields are supported. If the mask is NULL, default
+ * mask will be used. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_ipv6(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_ipv6 *spec = NULL;
+ const struct rte_flow_item_ipv6 *mask = NULL;
+ const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
+ const struct rte_flow_item_ipv6 supp_mask = {
+ .hdr = {
+ .src_addr = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff },
+ .dst_addr = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff },
+ .proto = 0xff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_ipv6_mask,
+ sizeof(struct rte_flow_item_ipv6),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by IPv6 source and destination addresses requires
+ * the appropriate ETHER_TYPE in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = ether_type_ipv6;
+ } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Ethertype in pattern with IPV6 item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * IPv6 addresses are in big-endian byte order in item and in
+ * efx_spec
+ */
+ if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
+ sizeof(mask->hdr.src_addr)) == 0) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
+
+ RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
+ sizeof(spec->hdr.src_addr));
+ rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
+ sizeof(efx_spec->efs_rem_host));
+ } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
+ sizeof(mask->hdr.src_addr))) {
+ goto fail_bad_mask;
+ }
+
+ if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
+ sizeof(mask->hdr.dst_addr)) == 0) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+
+ RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
+ sizeof(spec->hdr.dst_addr));
+ rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
+ sizeof(efx_spec->efs_loc_host));
+ } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
+ sizeof(mask->hdr.dst_addr))) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.proto == supp_mask.hdr.proto) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = spec->hdr.proto;
+ } else if (mask->hdr.proto != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the IPV6 pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert TCP item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination ports fields
+ * are supported. If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_tcp(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_tcp *spec = NULL;
+ const struct rte_flow_item_tcp *mask = NULL;
+ const struct rte_flow_item_tcp supp_mask = {
+ .hdr = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_tcp_mask,
+ sizeof(struct rte_flow_item_tcp),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by TCP source and destination ports requires
+ * the appropriate IP_PROTO in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
+ } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IP proto in pattern with TCP item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * Source and destination ports are in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used
+ */
+ if (mask->hdr.src_port == supp_mask.hdr.src_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
+ } else if (mask->hdr.src_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
+ } else if (mask->hdr.dst_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the TCP pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert UDP item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination ports fields
+ * are supported. If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_udp(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_udp *spec = NULL;
+ const struct rte_flow_item_udp *mask = NULL;
+ const struct rte_flow_item_udp supp_mask = {
+ .hdr = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_udp_mask,
+ sizeof(struct rte_flow_item_udp),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by UDP source and destination ports requires
+ * the appropriate IP_PROTO in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
+ } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IP proto in pattern with UDP item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * Source and destination ports are in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used
+ */
+ if (mask->hdr.src_port == supp_mask.hdr.src_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
+ } else if (mask->hdr.src_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
+ } else if (mask->hdr.dst_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the UDP pattern item");
+ return -rte_errno;
+}
+
+/*
+ * Filters for encapsulated packets match based on the EtherType and IP
+ * protocol in the outer frame.
+ */
+static int
+sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ uint8_t ip_proto,
+ struct rte_flow_error *error)
+{
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = ip_proto;
+ } else if (efx_spec->efs_ip_proto != ip_proto) {
+ switch (ip_proto) {
+ case EFX_IPPROTO_UDP:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer IP header protocol must be UDP "
+ "in VxLAN/GENEVE pattern");
+ return -rte_errno;
+
+ case EFX_IPPROTO_GRE:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer IP header protocol must be GRE "
+ "in NVGRE pattern");
+ return -rte_errno;
+
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Only VxLAN/GENEVE/NVGRE tunneling patterns "
+ "are supported");
+ return -rte_errno;
+ }
+ }
+
+ if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
+ efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
+ efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer frame EtherType in pattern with tunneling "
+ "must be IPv4 or IPv6");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
+ const uint8_t *vni_or_vsid_val,
+ const uint8_t *vni_or_vsid_mask,
+ const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
+ 0xff, 0xff, 0xff
+ };
+
+ if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
+ EFX_VNI_OR_VSID_LEN) == 0) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
+ rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
+ EFX_VNI_OR_VSID_LEN);
+ } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported VNI/VSID mask");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Convert VXLAN item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only VXLAN network identifier field is supported.
+ * If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_vxlan(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_vxlan *spec = NULL;
+ const struct rte_flow_item_vxlan *mask = NULL;
+ const struct rte_flow_item_vxlan supp_mask = {
+ .vni = { 0xff, 0xff, 0xff }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan),
+ error);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
+ EFX_IPPROTO_UDP, error);
+ if (rc != 0)
+ return rc;
+
+ efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ if (spec == NULL)
+ return 0;
+
+ rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
+ mask->vni, item, error);
+
+ return rc;
+}
+
+/**
+ * Convert GENEVE item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only Virtual Network Identifier and protocol type
+ * fields are supported. But protocol type can be only Ethernet (0x6558).
+ * If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_geneve(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_geneve *spec = NULL;
+ const struct rte_flow_item_geneve *mask = NULL;
+ const struct rte_flow_item_geneve supp_mask = {
+ .protocol = RTE_BE16(0xffff),
+ .vni = { 0xff, 0xff, 0xff }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_geneve_mask,
+ sizeof(struct rte_flow_item_geneve),
+ error);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
+ EFX_IPPROTO_UDP, error);
+ if (rc != 0)
+ return rc;
+
+ efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ if (spec == NULL)
+ return 0;
+
+ if (mask->protocol == supp_mask.protocol) {
+ if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "GENEVE encap. protocol must be Ethernet "
+ "(0x6558) in the GENEVE pattern item");
+ return -rte_errno;
+ }
+ } else if (mask->protocol != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported mask for GENEVE encap. protocol");
+ return -rte_errno;
+ }
+
+ rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
+ mask->vni, item, error);
+
+ return rc;
+}
+
+/**
+ * Convert NVGRE item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only virtual subnet ID field is supported.
+ * If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_nvgre(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_nvgre *spec = NULL;
+ const struct rte_flow_item_nvgre *mask = NULL;
+ const struct rte_flow_item_nvgre supp_mask = {
+ .tni = { 0xff, 0xff, 0xff }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_nvgre_mask,
+ sizeof(struct rte_flow_item_nvgre),
+ error);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
+ EFX_IPPROTO_GRE, error);
+ if (rc != 0)
+ return rc;
+
+ efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ if (spec == NULL)
+ return 0;
+
+ rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
+ mask->tni, item, error);
+
+ return rc;
+}
+
+static const struct sfc_flow_item sfc_flow_items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
+ .layer = SFC_FLOW_ITEM_ANY_LAYER,
+ .parse = sfc_flow_parse_void,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .prev_layer = SFC_FLOW_ITEM_START_LAYER,
+ .layer = SFC_FLOW_ITEM_L2,
+ .parse = sfc_flow_parse_eth,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L2,
+ .parse = sfc_flow_parse_vlan,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L3,
+ .parse = sfc_flow_parse_ipv4,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L3,
+ .parse = sfc_flow_parse_ipv6,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .prev_layer = SFC_FLOW_ITEM_L3,
+ .layer = SFC_FLOW_ITEM_L4,
+ .parse = sfc_flow_parse_tcp,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .prev_layer = SFC_FLOW_ITEM_L3,
+ .layer = SFC_FLOW_ITEM_L4,
+ .parse = sfc_flow_parse_udp,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+ .prev_layer = SFC_FLOW_ITEM_L4,
+ .layer = SFC_FLOW_ITEM_START_LAYER,
+ .parse = sfc_flow_parse_vxlan,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_GENEVE,
+ .prev_layer = SFC_FLOW_ITEM_L4,
+ .layer = SFC_FLOW_ITEM_START_LAYER,
+ .parse = sfc_flow_parse_geneve,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_NVGRE,
+ .prev_layer = SFC_FLOW_ITEM_L3,
+ .layer = SFC_FLOW_ITEM_START_LAYER,
+ .parse = sfc_flow_parse_nvgre,
+ },
+};
+
+/*
+ * Protocol-independent flow API support
+ */
+static int
+sfc_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ if (attr == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ "NULL attribute");
+ return -rte_errno;
+ }
+ if (attr->group != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+ "Groups are not supported");
+ return -rte_errno;
+ }
+ if (attr->priority != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+ "Priorities are not supported");
+ return -rte_errno;
+ }
+ if (attr->egress != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+ "Egress is not supported");
+ return -rte_errno;
+ }
+ if (attr->transfer != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+ "Transfer is not supported");
+ return -rte_errno;
+ }
+ if (attr->ingress == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+ "Only ingress is supported");
+ return -rte_errno;
+ }
+
+ flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
+ flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
+
+ return 0;
+}
+
+/* Get item from array sfc_flow_items */
+static const struct sfc_flow_item *
+sfc_flow_get_item(enum rte_flow_item_type type)
+{
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
+ if (sfc_flow_items[i].type == type)
+ return &sfc_flow_items[i];
+
+ return NULL;
+}
+
+static int
+sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc;
+ unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
+ boolean_t is_ifrm = B_FALSE;
+ const struct sfc_flow_item *item;
+
+ if (pattern == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+ "NULL pattern");
+ return -rte_errno;
+ }
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ item = sfc_flow_get_item(pattern->type);
+ if (item == NULL) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+ "Unsupported pattern item");
+ return -rte_errno;
+ }
+
+ /*
+ * Omitting one or several protocol layers at the beginning
+ * of pattern is supported
+ */
+ if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
+ prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
+ item->prev_layer != prev_layer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+ "Unexpected sequence of pattern items");
+ return -rte_errno;
+ }
+
+ /*
+ * Allow only VOID and ETH pattern items in the inner frame.
+ * Also check that there is only one tunneling protocol.
+ */
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ if (is_ifrm) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "More than one tunneling protocol");
+ return -rte_errno;
+ }
+ is_ifrm = B_TRUE;
+ break;
+
+ default:
+ if (is_ifrm) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "There is an unsupported pattern item "
+ "in the inner frame");
+ return -rte_errno;
+ }
+ break;
+ }
+
+ rc = item->parse(pattern, &flow->spec.template, error);
+ if (rc != 0)
+ return rc;
+
+ if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
+ prev_layer = item->layer;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_parse_queue(struct sfc_adapter *sa,
+ const struct rte_flow_action_queue *queue,
+ struct rte_flow *flow)
+{
+ struct sfc_rxq *rxq;
+
+ if (queue->index >= sa->rxq_count)
+ return -EINVAL;
+
+ rxq = sa->rxq_info[queue->index].rxq;
+ flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
+
+ return 0;
+}
+
+static int
+sfc_flow_parse_rss(struct sfc_adapter *sa,
+ const struct rte_flow_action_rss *action_rss,
+ struct rte_flow *flow)
+{
+ struct sfc_rss *rss = &sa->rss;
+ unsigned int rxq_sw_index;
+ struct sfc_rxq *rxq;
+ unsigned int rxq_hw_index_min;
+ unsigned int rxq_hw_index_max;
+ efx_rx_hash_type_t efx_hash_types;
+ const uint8_t *rss_key;
+ struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
+ unsigned int i;
+
+ if (action_rss->queue_num == 0)
+ return -EINVAL;
+
+ rxq_sw_index = sa->rxq_count - 1;
+ rxq = sa->rxq_info[rxq_sw_index].rxq;
+ rxq_hw_index_min = rxq->hw_index;
+ rxq_hw_index_max = 0;
+
+ for (i = 0; i < action_rss->queue_num; ++i) {
+ rxq_sw_index = action_rss->queue[i];
+
+ if (rxq_sw_index >= sa->rxq_count)
+ return -EINVAL;
+
+ rxq = sa->rxq_info[rxq_sw_index].rxq;
+
+ if (rxq->hw_index < rxq_hw_index_min)
+ rxq_hw_index_min = rxq->hw_index;
+
+ if (rxq->hw_index > rxq_hw_index_max)
+ rxq_hw_index_max = rxq->hw_index;
+ }
+
+ switch (action_rss->func) {
+ case RTE_ETH_HASH_FUNCTION_DEFAULT:
+ case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (action_rss->level)
+ return -EINVAL;
+
+ /*
+ * Dummy RSS action with only one queue and no specific settings
+ * for hash types and key does not require dedicated RSS context
+ * and may be simplified to single queue action.
+ */
+ if (action_rss->queue_num == 1 && action_rss->types == 0 &&
+ action_rss->key_len == 0) {
+ flow->spec.template.efs_dmaq_id = rxq_hw_index_min;
+ return 0;
+ }
+
+ if (action_rss->types) {
+ int rc;
+
+ rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
+ &efx_hash_types);
+ if (rc != 0)
+ return -rc;
+ } else {
+ unsigned int i;
+
+ efx_hash_types = 0;
+ for (i = 0; i < rss->hf_map_nb_entries; ++i)
+ efx_hash_types |= rss->hf_map[i].efx;
+ }
+
+ if (action_rss->key_len) {
+ if (action_rss->key_len != sizeof(rss->key))
+ return -EINVAL;
+
+ rss_key = action_rss->key;
+ } else {
+ rss_key = rss->key;
+ }
+
+ flow->rss = B_TRUE;
+
+ sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
+ sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
+ sfc_rss_conf->rss_hash_types = efx_hash_types;
+ rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
+
+ for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
+ unsigned int nb_queues = action_rss->queue_num;
+ unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
+ struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
+
+ sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
+ unsigned int filters_count)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < filters_count; i++) {
+ int rc;
+
+ rc = efx_filter_remove(sa->nic, &spec->filters[i]);
+ if (ret == 0 && rc != 0) {
+ sfc_err(sa, "failed to remove filter specification "
+ "(rc = %d)", rc);
+ ret = rc;
+ }
+ }
+
+ return ret;
+}
+
+static int
+sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < spec->count; i++) {
+ rc = efx_filter_insert(sa->nic, &spec->filters[i]);
+ if (rc != 0) {
+ sfc_flow_spec_flush(sa, spec, i);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int
+sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
+{
+ return sfc_flow_spec_flush(sa, spec, spec->count);
+}
+
+static int
+sfc_flow_filter_insert(struct sfc_adapter *sa,
+ struct rte_flow *flow)
+{
+ struct sfc_rss *rss = &sa->rss;
+ struct sfc_flow_rss *flow_rss = &flow->rss_conf;
+ uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
+ unsigned int i;
+ int rc = 0;
+
+ if (flow->rss) {
+ unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
+ flow_rss->rxq_hw_index_min + 1,
+ EFX_MAXRSS);
+
+ rc = efx_rx_scale_context_alloc(sa->nic,
+ EFX_RX_SCALE_EXCLUSIVE,
+ rss_spread,
+ &efs_rss_context);
+ if (rc != 0)
+ goto fail_scale_context_alloc;
+
+ rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
+ rss->hash_alg,
+ flow_rss->rss_hash_types, B_TRUE);
+ if (rc != 0)
+ goto fail_scale_mode_set;
+
+ rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
+ flow_rss->rss_key,
+ sizeof(rss->key));
+ if (rc != 0)
+ goto fail_scale_key_set;
+
+ /*
+ * At this point, fully elaborated filter specifications
+ * have been produced from the template. To make sure that
+ * RSS behaviour is consistent between them, set the same
+ * RSS context value everywhere.
+ */
+ for (i = 0; i < flow->spec.count; i++) {
+ efx_filter_spec_t *spec = &flow->spec.filters[i];
+
+ spec->efs_rss_context = efs_rss_context;
+ spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
+ spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
+ }
+ }
+
+ rc = sfc_flow_spec_insert(sa, &flow->spec);
+ if (rc != 0)
+ goto fail_filter_insert;
+
+ if (flow->rss) {
+ /*
+ * Scale table is set after filter insertion because
+ * the table entries are relative to the base RxQ ID
+ * and the latter is submitted to the HW by means of
+ * inserting a filter, so by the time of the request
+ * the HW knows all the information needed to verify
+ * the table entries, and the operation will succeed
+ */
+ rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
+ flow_rss->rss_tbl,
+ RTE_DIM(flow_rss->rss_tbl));
+ if (rc != 0)
+ goto fail_scale_tbl_set;
+ }
+
+ return 0;
+
+fail_scale_tbl_set:
+ sfc_flow_spec_remove(sa, &flow->spec);
+
+fail_filter_insert:
+fail_scale_key_set:
+fail_scale_mode_set:
+ if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
+ efx_rx_scale_context_free(sa->nic, efs_rss_context);
+
+fail_scale_context_alloc:
+ return rc;
+}
+
+static int
+sfc_flow_filter_remove(struct sfc_adapter *sa,
+ struct rte_flow *flow)
+{
+ int rc = 0;
+
+ rc = sfc_flow_spec_remove(sa, &flow->spec);
+ if (rc != 0)
+ return rc;
+
+ if (flow->rss) {
+ /*
+ * All specifications for a given flow rule have the same RSS
+ * context, so that RSS context value is taken from the first
+ * filter specification
+ */
+ efx_filter_spec_t *spec = &flow->spec.filters[0];
+
+ rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
+ }
+
+ return rc;
+}
+
+static int
+sfc_flow_parse_mark(struct sfc_adapter *sa,
+ const struct rte_flow_action_mark *mark,
+ struct rte_flow *flow)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+
+ if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
+ return EINVAL;
+
+ flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
+ flow->spec.template.efs_mark = mark->id;
+
+ return 0;
+}
+
+static int
+sfc_flow_parse_actions(struct sfc_adapter *sa,
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const unsigned int dp_rx_features = sa->dp_rx->features;
+ uint32_t actions_set = 0;
+ const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
+ (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
+ (1UL << RTE_FLOW_ACTION_TYPE_DROP);
+ const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
+ (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
+
+ if (actions == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+ "NULL actions");
+ return -rte_errno;
+ }
+
+#define SFC_BUILD_SET_OVERFLOW(_action, _set) \
+ RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
+ actions_set);
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
+ actions_set);
+ if ((actions_set & fate_actions_mask) != 0)
+ goto fail_fate_actions;
+
+ rc = sfc_flow_parse_queue(sa, actions->conf, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Bad QUEUE action");
+ return -rte_errno;
+ }
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
+ actions_set);
+ if ((actions_set & fate_actions_mask) != 0)
+ goto fail_fate_actions;
+
+ rc = sfc_flow_parse_rss(sa, actions->conf, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, -rc,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Bad RSS action");
+ return -rte_errno;
+ }
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
+ actions_set);
+ if ((actions_set & fate_actions_mask) != 0)
+ goto fail_fate_actions;
+
+ flow->spec.template.efs_dmaq_id =
+ EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
+ actions_set);
+ if ((actions_set & mark_actions_mask) != 0)
+ goto fail_actions_overlap;
+
+ if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "FLAG action is not supported on the current Rx datapath");
+ return -rte_errno;
+ }
+
+ flow->spec.template.efs_flags |=
+ EFX_FILTER_FLAG_ACTION_FLAG;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
+ actions_set);
+ if ((actions_set & mark_actions_mask) != 0)
+ goto fail_actions_overlap;
+
+ if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "MARK action is not supported on the current Rx datapath");
+ return -rte_errno;
+ }
+
+ rc = sfc_flow_parse_mark(sa, actions->conf, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Bad MARK action");
+ return -rte_errno;
+ }
+ break;
+
+ default:
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Action is not supported");
+ return -rte_errno;
+ }
+
+ actions_set |= (1UL << actions->type);
+ }
+#undef SFC_BUILD_SET_OVERFLOW
+
+ /* When fate is unknown, drop traffic. */
+ if ((actions_set & fate_actions_mask) == 0) {
+ flow->spec.template.efs_dmaq_id =
+ EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
+ }
+
+ return 0;
+
+fail_fate_actions:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Cannot combine several fate-deciding actions, "
+ "choose between QUEUE, RSS or DROP");
+ return -rte_errno;
+
+fail_actions_overlap:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Overlapping actions are not supported");
+ return -rte_errno;
+}
+
+/**
+ * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
+ * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
+ * specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same match flag, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ static const efx_filter_match_flags_t vals[] = {
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
+ };
+
+ if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect while copying "
+ "by unknown destination flags");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec->count; i++) {
+ /* The check above ensures that divisor can't be zero here */
+ spec->filters[i].efs_match_flags |=
+ vals[i / filters_count_for_one_val];
+ }
+
+ return 0;
+}
+
+/**
+ * Check that the following conditions are met:
+ * - the list of supported filters has a filter
+ * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
+ * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
+ * be inserted.
+ *
+ * @param match[in]
+ * The match flags of filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter with list of supported filters.
+ */
+static boolean_t
+sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
+ __rte_unused efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_filter_match_flags_t match_mcast_dst;
+
+ match_mcast_dst =
+ (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if (match_mcast_dst == filter->supported_match[i])
+ return B_TRUE;
+ }
+
+ return B_FALSE;
+}
+
+/**
+ * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
+ * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
+ * specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same EtherType value, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ static const uint16_t vals[] = {
+ EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
+ };
+
+ if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect "
+ "while copying by Ethertype");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec->count; i++) {
+ spec->filters[i].efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE;
+
+ /*
+ * The check above ensures that
+ * filters_count_for_one_val is not 0
+ */
+ spec->filters[i].efs_ether_type =
+ vals[i / filters_count_for_one_val];
+ }
+
+ return 0;
+}
+
+/**
+ * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
+ * in the same specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same match flag, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+
+ if (filters_count_for_one_val != spec->count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect "
+ "while copying by outer VLAN ID");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec->count; i++) {
+ spec->filters[i].efs_match_flags |=
+ EFX_FILTER_MATCH_OUTER_VID;
+
+ spec->filters[i].efs_outer_vid = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
+ * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
+ * specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same match flag, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ static const efx_filter_match_flags_t vals[] = {
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
+ };
+
+ if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect while copying "
+ "by inner frame unknown destination flags");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec->count; i++) {
+ /* The check above ensures that divisor can't be zero here */
+ spec->filters[i].efs_match_flags |=
+ vals[i / filters_count_for_one_val];
+ }
+
+ return 0;
+}
+
+/**
+ * Check that the following conditions are met:
+ * - the specification corresponds to a filter for encapsulated traffic
+ * - the list of supported filters has a filter
+ * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
+ * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
+ * be inserted.
+ *
+ * @param match[in]
+ * The match flags of filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter with list of supported filters.
+ */
+static boolean_t
+sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
+ efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
+ efx_filter_match_flags_t match_mcast_dst;
+
+ if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
+ return B_FALSE;
+
+ match_mcast_dst =
+ (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if (match_mcast_dst == filter->supported_match[i])
+ return B_TRUE;
+ }
+
+ return B_FALSE;
+}
+
+/**
+ * Check that the list of supported filters has a filter that differs
+ * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
+ * in this case that filter will be used and the flag
+ * EFX_FILTER_MATCH_OUTER_VID is not needed.
+ *
+ * @param match[in]
+ * The match flags of filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter with list of supported filters.
+ */
+static boolean_t
+sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
+ __rte_unused efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_filter_match_flags_t match_without_vid =
+ match & ~EFX_FILTER_MATCH_OUTER_VID;
+
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if (match_without_vid == filter->supported_match[i])
+ return B_FALSE;
+ }
+
+ return B_TRUE;
+}
+
+/*
+ * Match flags that can be automatically added to filters.
+ * Selecting the last minimum when searching for the copy flag ensures that the
+ * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
+ * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
+ * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
+ * filters.
+ */
+static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
+ {
+ .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
+ .vals_count = 2,
+ .set_vals = sfc_flow_set_unknown_dst_flags,
+ .spec_check = sfc_flow_check_unknown_dst_flags,
+ },
+ {
+ .flag = EFX_FILTER_MATCH_ETHER_TYPE,
+ .vals_count = 2,
+ .set_vals = sfc_flow_set_ethertypes,
+ .spec_check = NULL,
+ },
+ {
+ .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
+ .vals_count = 2,
+ .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
+ .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
+ },
+ {
+ .flag = EFX_FILTER_MATCH_OUTER_VID,
+ .vals_count = 1,
+ .set_vals = sfc_flow_set_outer_vid_flag,
+ .spec_check = sfc_flow_check_outer_vid_flag,
+ },
+};
+
+/* Get item from array sfc_flow_copy_flags */
+static const struct sfc_flow_copy_flag *
+sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
+{
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
+ if (sfc_flow_copy_flags[i].flag == flag)
+ return &sfc_flow_copy_flags[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * Make copies of the specifications, set match flag and values
+ * of the field that corresponds to it.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param flag[in]
+ * The match flag to add.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
+ efx_filter_match_flags_t flag,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ unsigned int new_filters_count;
+ unsigned int filters_count_for_one_val;
+ const struct sfc_flow_copy_flag *copy_flag;
+ int rc;
+
+ copy_flag = sfc_flow_get_copy_flag(flag);
+ if (copy_flag == NULL) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Unsupported spec field for copying");
+ return -rte_errno;
+ }
+
+ new_filters_count = spec->count * copy_flag->vals_count;
+ if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Too much EFX specifications in the flow rule");
+ return -rte_errno;
+ }
+
+ /* Copy filters specifications */
+ for (i = spec->count; i < new_filters_count; i++)
+ spec->filters[i] = spec->filters[i - spec->count];
+
+ filters_count_for_one_val = spec->count;
+ spec->count = new_filters_count;
+
+ rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * Check that the given set of match flags missing in the original filter spec
+ * could be covered by adding spec copies which specify the corresponding
+ * flags and packet field values to match.
+ *
+ * @param miss_flags[in]
+ * Flags that are missing until the supported filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter.
+ *
+ * @return
+ * Number of specifications after copy or 0, if the flags can not be added.
+ */
+static unsigned int
+sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
+ efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_filter_match_flags_t copy_flags = 0;
+ efx_filter_match_flags_t flag;
+ efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
+ sfc_flow_spec_check *check;
+ unsigned int multiplier = 1;
+
+ for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
+ flag = sfc_flow_copy_flags[i].flag;
+ check = sfc_flow_copy_flags[i].spec_check;
+ if ((flag & miss_flags) == flag) {
+ if (check != NULL && (!check(match, spec, filter)))
+ continue;
+
+ copy_flags |= flag;
+ multiplier *= sfc_flow_copy_flags[i].vals_count;
+ }
+ }
+
+ if (copy_flags == miss_flags)
+ return multiplier;
+
+ return 0;
+}
+
+/**
+ * Attempt to supplement the specification template to the minimally
+ * supported set of match flags. To do this, it is necessary to copy
+ * the specifications, filling them with the values of fields that
+ * correspond to the missing flags.
+ * The necessary and sufficient filter is built from the fewest number
+ * of copies which could be made to cover the minimally required set
+ * of flags.
+ *
+ * @param sa[in]
+ * SFC adapter.
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
+ struct sfc_flow_spec *spec,
+ struct rte_flow_error *error)
+{
+ struct sfc_filter *filter = &sa->filter;
+ efx_filter_match_flags_t miss_flags;
+ efx_filter_match_flags_t min_miss_flags = 0;
+ efx_filter_match_flags_t match;
+ unsigned int min_multiplier = UINT_MAX;
+ unsigned int multiplier;
+ unsigned int i;
+ int rc;
+
+ match = spec->template.efs_match_flags;
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if ((match & filter->supported_match[i]) == match) {
+ miss_flags = filter->supported_match[i] & (~match);
+ multiplier = sfc_flow_check_missing_flags(miss_flags,
+ &spec->template, filter);
+ if (multiplier > 0) {
+ if (multiplier <= min_multiplier) {
+ min_multiplier = multiplier;
+ min_miss_flags = miss_flags;
+ }
+ }
+ }
+ }
+
+ if (min_multiplier == UINT_MAX) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "The flow rule pattern is unsupported");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
+ efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
+
+ if ((flag & min_miss_flags) == flag) {
+ rc = sfc_flow_spec_add_match_flag(spec, flag, error);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Check that set of match flags is referred to by a filter. Filter is
+ * described by match flags with the ability to add OUTER_VID and INNER_VID
+ * flags.
+ *
+ * @param match_flags[in]
+ * Set of match flags.
+ * @param flags_pattern[in]
+ * Pattern of filter match flags.
+ */
+static boolean_t
+sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
+ efx_filter_match_flags_t flags_pattern)
+{
+ if ((match_flags & flags_pattern) != flags_pattern)
+ return B_FALSE;
+
+ switch (match_flags & ~flags_pattern) {
+ case 0:
+ case EFX_FILTER_MATCH_OUTER_VID:
+ case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
+ return B_TRUE;
+ default:
+ return B_FALSE;
+ }
+}
+
+/**
+ * Check whether the spec maps to a hardware filter which is known to be
+ * ineffective despite being valid.
+ *
+ * @param filter[in]
+ * SFC filter with list of supported filters.
+ * @param spec[in]
+ * SFC flow specification.
+ */
+static boolean_t
+sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
+ struct sfc_flow_spec *spec)
+{
+ unsigned int i;
+ uint16_t ether_type;
+ uint8_t ip_proto;
+ efx_filter_match_flags_t match_flags;
+
+ for (i = 0; i < spec->count; i++) {
+ match_flags = spec->filters[i].efs_match_flags;
+
+ if (sfc_flow_is_match_with_vids(match_flags,
+ EFX_FILTER_MATCH_ETHER_TYPE) ||
+ sfc_flow_is_match_with_vids(match_flags,
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_LOC_MAC)) {
+ ether_type = spec->filters[i].efs_ether_type;
+ if (filter->supports_ip_proto_or_addr_filter &&
+ (ether_type == EFX_ETHER_TYPE_IPV4 ||
+ ether_type == EFX_ETHER_TYPE_IPV6))
+ return B_TRUE;
+ } else if (sfc_flow_is_match_with_vids(match_flags,
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO) ||
+ sfc_flow_is_match_with_vids(match_flags,
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_MAC)) {
+ ip_proto = spec->filters[i].efs_ip_proto;
+ if (filter->supports_rem_or_local_port_filter &&
+ (ip_proto == EFX_IPPROTO_TCP ||
+ ip_proto == EFX_IPPROTO_UDP))
+ return B_TRUE;
+ }
+ }
+
+ return B_FALSE;
+}
+
+static int
+sfc_flow_validate_match_flags(struct sfc_adapter *sa,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ efx_filter_spec_t *spec_tmpl = &flow->spec.template;
+ efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
+ int rc;
+
+ /* Initialize the first filter spec with template */
+ flow->spec.filters[0] = *spec_tmpl;
+ flow->spec.count = 1;
+
+ if (!sfc_filter_is_match_supported(sa, match_flags)) {
+ rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
+ if (rc != 0)
+ return rc;
+ }
+
+ if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "The flow rule pattern is unsupported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ rc = sfc_flow_parse_attr(attr, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ rc = sfc_flow_parse_pattern(pattern, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ rc = sfc_flow_parse_actions(sa, actions, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ rc = sfc_flow_validate_match_flags(sa, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ return 0;
+
+fail_bad_value:
+ return rc;
+}
+
+static int
+sfc_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow flow;
+
+ memset(&flow, 0, sizeof(flow));
+
+ return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
+}
+
+static struct rte_flow *
+sfc_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_flow *flow = NULL;
+ int rc;
+
+ flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to allocate memory");
+ goto fail_no_mem;
+ }
+
+ rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = sfc_flow_filter_insert(sa, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to insert filter");
+ goto fail_filter_insert;
+ }
+ }
+
+ sfc_adapter_unlock(sa);
+
+ return flow;
+
+fail_filter_insert:
+ TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+
+fail_bad_value:
+ rte_free(flow);
+ sfc_adapter_unlock(sa);
+
+fail_no_mem:
+ return NULL;
+}
+
+static int
+sfc_flow_remove(struct sfc_adapter *sa,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc = 0;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = sfc_flow_filter_remove(sa, flow);
+ if (rc != 0)
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to destroy flow rule");
+ }
+
+ TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+ rte_free(flow);
+
+ return rc;
+}
+
+static int
+sfc_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_flow *flow_ptr;
+ int rc = EINVAL;
+
+ sfc_adapter_lock(sa);
+
+ TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
+ if (flow_ptr == flow)
+ rc = 0;
+ }
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to find flow rule to destroy");
+ goto fail_bad_value;
+ }
+
+ rc = sfc_flow_remove(sa, flow, error);
+
+fail_bad_value:
+ sfc_adapter_unlock(sa);
+
+ return -rc;
+}
+
+static int
+sfc_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_flow *flow;
+ int rc = 0;
+ int ret = 0;
+
+ sfc_adapter_lock(sa);
+
+ while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
+ rc = sfc_flow_remove(sa, flow, error);
+ if (rc != 0)
+ ret = rc;
+ }
+
+ sfc_adapter_unlock(sa);
+
+ return -ret;
+}
+
+static int
+sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ int ret = 0;
+
+ sfc_adapter_lock(sa);
+ if (sa->state != SFC_ADAPTER_INITIALIZED) {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "please close the port first");
+ ret = -rte_errno;
+ } else {
+ port->isolated = (enable) ? B_TRUE : B_FALSE;
+ }
+ sfc_adapter_unlock(sa);
+
+ return ret;
+}
+
+const struct rte_flow_ops sfc_flow_ops = {
+ .validate = sfc_flow_validate,
+ .create = sfc_flow_create,
+ .destroy = sfc_flow_destroy,
+ .flush = sfc_flow_flush,
+ .query = NULL,
+ .isolate = sfc_flow_isolate,
+};
+
+void
+sfc_flow_init(struct sfc_adapter *sa)
+{
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_INIT(&sa->filter.flow_list);
+}
+
+void
+sfc_flow_fini(struct sfc_adapter *sa)
+{
+ struct rte_flow *flow;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
+ TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+ rte_free(flow);
+ }
+}
+
+void
+sfc_flow_stop(struct sfc_adapter *sa)
+{
+ struct rte_flow *flow;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
+ sfc_flow_filter_remove(sa, flow);
+}
+
+int
+sfc_flow_start(struct sfc_adapter *sa)
+{
+ struct rte_flow *flow;
+ int rc = 0;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
+ rc = sfc_flow_filter_insert(sa, flow);
+ if (rc != 0)
+ goto fail_bad_flow;
+ }
+
+ sfc_log_init(sa, "done");
+
+fail_bad_flow:
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_flow.h b/src/spdk/dpdk/drivers/net/sfc/sfc_flow.h
new file mode 100644
index 00000000..71ec18cb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_flow.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_FLOW_H
+#define _SFC_FLOW_H
+
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The maximum number of fully elaborated hardware filter specifications
+ * which can be produced from a template by means of multiplication, if
+ * missing match flags are needed to be taken into account
+ */
+#define SF_FLOW_SPEC_NB_FILTERS_MAX 8
+
+/* RSS configuration storage */
+struct sfc_flow_rss {
+ unsigned int rxq_hw_index_min;
+ unsigned int rxq_hw_index_max;
+ unsigned int rss_hash_types;
+ uint8_t rss_key[EFX_RSS_KEY_SIZE];
+ unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
+};
+
+/* Filter specification storage */
+struct sfc_flow_spec {
+ /* partial specification from flow rule */
+ efx_filter_spec_t template;
+ /* fully elaborated hardware filters specifications */
+ efx_filter_spec_t filters[SF_FLOW_SPEC_NB_FILTERS_MAX];
+ /* number of complete specifications */
+ unsigned int count;
+};
+
+/* PMD-specific definition of the opaque type from rte_flow.h */
+struct rte_flow {
+ struct sfc_flow_spec spec; /* flow spec for hardware filter(s) */
+ boolean_t rss; /* RSS toggle */
+ struct sfc_flow_rss rss_conf; /* RSS configuration */
+ TAILQ_ENTRY(rte_flow) entries; /* flow list entries */
+};
+
+TAILQ_HEAD(sfc_flow_list, rte_flow);
+
+extern const struct rte_flow_ops sfc_flow_ops;
+
+struct sfc_adapter;
+
+void sfc_flow_init(struct sfc_adapter *sa);
+void sfc_flow_fini(struct sfc_adapter *sa);
+int sfc_flow_start(struct sfc_adapter *sa);
+void sfc_flow_stop(struct sfc_adapter *sa);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_FLOW_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_intr.c b/src/spdk/dpdk/drivers/net/sfc/sfc_intr.c
new file mode 100644
index 00000000..fbdc7eea
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_intr.c
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+/*
+ * At the momemt of writing DPDK v16.07 has notion of two types of
+ * interrupts: LSC (link status change) and RXQ (receive indication).
+ * It allows to register interrupt callback for entire device which is
+ * not intended to be used for receive indication (i.e. link status
+ * change indication only). The handler has no information which HW
+ * interrupt has triggered it, so we don't know which event queue should
+ * be polled/reprimed (except qmask in the case of legacy line interrupt).
+ */
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+
+static void
+sfc_intr_handle_mgmt_evq(struct sfc_adapter *sa)
+{
+ struct sfc_evq *evq;
+
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+
+ evq = sa->mgmt_evq;
+
+ if (!sa->mgmt_evq_running) {
+ sfc_log_init(sa, "interrupt on not running management EVQ %u",
+ evq->evq_index);
+ } else {
+ sfc_ev_qpoll(evq);
+
+ if (sfc_ev_qprime(evq) != 0)
+ sfc_err(sa, "cannot prime EVQ %u", evq->evq_index);
+ }
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+}
+
+static void
+sfc_intr_line_handler(void *cb_arg)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)cb_arg;
+ efx_nic_t *enp = sa->nic;
+ boolean_t fatal;
+ uint32_t qmask;
+ unsigned int lsc_seq = sa->port.lsc_seq;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ if (sa->state != SFC_ADAPTER_STARTED &&
+ sa->state != SFC_ADAPTER_STARTING &&
+ sa->state != SFC_ADAPTER_STOPPING) {
+ sfc_log_init(sa,
+ "interrupt on stopped adapter, don't reenable");
+ goto exit;
+ }
+
+ efx_intr_status_line(enp, &fatal, &qmask);
+ if (fatal) {
+ (void)efx_intr_disable(enp);
+ (void)efx_intr_fatal(enp);
+ sfc_err(sa, "fatal, interrupts disabled");
+ goto exit;
+ }
+
+ if (qmask & (1 << sa->mgmt_evq_index))
+ sfc_intr_handle_mgmt_evq(sa);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) != 0)
+ sfc_err(sa, "cannot reenable interrupts");
+
+ sfc_log_init(sa, "done");
+
+exit:
+ if (lsc_seq != sa->port.lsc_seq) {
+ sfc_notice(sa, "link status change event: link %s",
+ sa->eth_dev->data->dev_link.link_status ?
+ "UP" : "DOWN");
+ _rte_eth_dev_callback_process(sa->eth_dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+}
+
+static void
+sfc_intr_message_handler(void *cb_arg)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)cb_arg;
+ efx_nic_t *enp = sa->nic;
+ boolean_t fatal;
+ unsigned int lsc_seq = sa->port.lsc_seq;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ if (sa->state != SFC_ADAPTER_STARTED &&
+ sa->state != SFC_ADAPTER_STARTING &&
+ sa->state != SFC_ADAPTER_STOPPING) {
+ sfc_log_init(sa, "adapter not-started, don't reenable");
+ goto exit;
+ }
+
+ efx_intr_status_message(enp, sa->mgmt_evq_index, &fatal);
+ if (fatal) {
+ (void)efx_intr_disable(enp);
+ (void)efx_intr_fatal(enp);
+ sfc_err(sa, "fatal, interrupts disabled");
+ goto exit;
+ }
+
+ sfc_intr_handle_mgmt_evq(sa);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) != 0)
+ sfc_err(sa, "cannot reenable interrupts");
+
+ sfc_log_init(sa, "done");
+
+exit:
+ if (lsc_seq != sa->port.lsc_seq) {
+ sfc_notice(sa, "link status change event");
+ _rte_eth_dev_callback_process(sa->eth_dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+}
+
+int
+sfc_intr_start(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+ struct rte_intr_handle *intr_handle;
+ struct rte_pci_device *pci_dev;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ /*
+ * The EFX common code event queue module depends on the interrupt
+ * module. Ensure that the interrupt module is always initialized
+ * (even if interrupts are not used). Status memory is required
+ * for Siena only and may be NULL for EF10.
+ */
+ sfc_log_init(sa, "efx_intr_init");
+ rc = efx_intr_init(sa->nic, intr->type, NULL);
+ if (rc != 0)
+ goto fail_intr_init;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
+ intr_handle = &pci_dev->intr_handle;
+
+ if (intr->handler != NULL) {
+ sfc_log_init(sa, "rte_intr_callback_register");
+ rc = rte_intr_callback_register(intr_handle, intr->handler,
+ (void *)sa);
+ if (rc != 0) {
+ sfc_err(sa,
+ "cannot register interrupt handler (rc=%d)",
+ rc);
+ /*
+ * Convert error code from negative returned by RTE API
+ * to positive used in the driver.
+ */
+ rc = -rc;
+ goto fail_rte_intr_cb_reg;
+ }
+
+ sfc_log_init(sa, "rte_intr_enable");
+ rc = rte_intr_enable(intr_handle);
+ if (rc != 0) {
+ sfc_err(sa, "cannot enable interrupts (rc=%d)", rc);
+ /*
+ * Convert error code from negative returned by RTE API
+ * to positive used in the driver.
+ */
+ rc = -rc;
+ goto fail_rte_intr_enable;
+ }
+
+ sfc_log_init(sa, "efx_intr_enable");
+ efx_intr_enable(sa->nic);
+ }
+
+ sfc_log_init(sa, "done type=%u max_intr=%d nb_efd=%u vec=%p",
+ intr_handle->type, intr_handle->max_intr,
+ intr_handle->nb_efd, intr_handle->intr_vec);
+ return 0;
+
+fail_rte_intr_enable:
+ rte_intr_callback_unregister(intr_handle, intr->handler, (void *)sa);
+
+fail_rte_intr_cb_reg:
+ efx_intr_fini(sa->nic);
+
+fail_intr_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_intr_stop(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ if (intr->handler != NULL) {
+ struct rte_intr_handle *intr_handle;
+ int rc;
+
+ efx_intr_disable(sa->nic);
+
+ intr_handle = &pci_dev->intr_handle;
+ if (rte_intr_disable(intr_handle) != 0)
+ sfc_err(sa, "cannot disable interrupts");
+
+ while ((rc = rte_intr_callback_unregister(intr_handle,
+ intr->handler, (void *)sa)) == -EAGAIN)
+ ;
+ if (rc != 1)
+ sfc_err(sa,
+ "cannot unregister interrupt handler %d",
+ rc);
+ }
+
+ efx_intr_fini(sa->nic);
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_intr_configure(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+
+ sfc_log_init(sa, "entry");
+
+ intr->handler = NULL;
+ intr->lsc_intr = (sa->eth_dev->data->dev_conf.intr_conf.lsc != 0);
+ if (!intr->lsc_intr) {
+ sfc_notice(sa, "LSC tracking using interrupts is disabled");
+ goto done;
+ }
+
+ switch (intr->type) {
+ case EFX_INTR_MESSAGE:
+ intr->handler = sfc_intr_message_handler;
+ break;
+ case EFX_INTR_LINE:
+ intr->handler = sfc_intr_line_handler;
+ break;
+ case EFX_INTR_INVALID:
+ sfc_warn(sa, "interrupts are not supported");
+ break;
+ default:
+ sfc_panic(sa, "unexpected EFX interrupt type %u\n", intr->type);
+ break;
+ }
+
+done:
+ sfc_log_init(sa, "done");
+ return 0;
+}
+
+void
+sfc_intr_close(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_intr_attach(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ switch (pci_dev->intr_handle.type) {
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ case RTE_INTR_HANDLE_UIO_INTX:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ intr->type = EFX_INTR_LINE;
+ break;
+ case RTE_INTR_HANDLE_UIO:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ intr->type = EFX_INTR_MESSAGE;
+ break;
+#endif
+ default:
+ intr->type = EFX_INTR_INVALID;
+ break;
+ }
+
+ sfc_log_init(sa, "done");
+ return 0;
+}
+
+void
+sfc_intr_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sa->intr.type = EFX_INTR_INVALID;
+
+ sfc_log_init(sa, "done");
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.c b/src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.c
new file mode 100644
index 00000000..7a89c769
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.c
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <stdbool.h>
+#include <strings.h>
+
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+
+#include "sfc.h"
+#include "sfc_kvargs.h"
+
+int
+sfc_kvargs_parse(struct sfc_adapter *sa)
+{
+ struct rte_eth_dev *eth_dev = (sa)->eth_dev;
+ struct rte_devargs *devargs = eth_dev->device->devargs;
+ const char **params = (const char *[]){
+ SFC_KVARG_STATS_UPDATE_PERIOD_MS,
+ SFC_KVARG_PERF_PROFILE,
+ SFC_KVARG_RX_DATAPATH,
+ SFC_KVARG_TX_DATAPATH,
+ SFC_KVARG_FW_VARIANT,
+ SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
+ NULL,
+ };
+
+ if (devargs == NULL)
+ return 0;
+
+ sa->kvargs = rte_kvargs_parse(devargs->args, params);
+ if (sa->kvargs == NULL)
+ return EINVAL;
+
+ return 0;
+}
+
+void
+sfc_kvargs_cleanup(struct sfc_adapter *sa)
+{
+ rte_kvargs_free(sa->kvargs);
+}
+
+static int
+sfc_kvarg_match_value(const char *value, const char * const *values,
+ unsigned int n_values)
+{
+ unsigned int i;
+
+ for (i = 0; i < n_values; ++i)
+ if (strcasecmp(value, values[i]) == 0)
+ return 1;
+
+ return 0;
+}
+
+int
+sfc_kvargs_process(struct sfc_adapter *sa, const char *key_match,
+ arg_handler_t handler, void *opaque_arg)
+{
+ if (sa->kvargs == NULL)
+ return 0;
+
+ return -rte_kvargs_process(sa->kvargs, key_match, handler, opaque_arg);
+}
+
+int
+sfc_kvarg_bool_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ const char * const true_strs[] = {
+ "1", "y", "yes", "on", "true"
+ };
+ const char * const false_strs[] = {
+ "0", "n", "no", "off", "false"
+ };
+ bool *value = opaque;
+
+ if (sfc_kvarg_match_value(value_str, true_strs,
+ RTE_DIM(true_strs)))
+ *value = true;
+ else if (sfc_kvarg_match_value(value_str, false_strs,
+ RTE_DIM(false_strs)))
+ *value = false;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+sfc_kvarg_long_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ long value;
+ char *endptr;
+
+ if (!value_str || !opaque)
+ return -EINVAL;
+
+ value = strtol(value_str, &endptr, 0);
+ if (endptr == value_str)
+ return -EINVAL;
+
+ *(long *)opaque = value;
+
+ return 0;
+}
+
+int
+sfc_kvarg_string_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ *(const char **)opaque = value_str;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.h b/src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.h
new file mode 100644
index 00000000..4506667a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_KVARGS_H
+#define _SFC_KVARGS_H
+
+#include <rte_kvargs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SFC_KVARG_VALUES_BOOL "[1|y|yes|on|0|n|no|off]"
+
+#define SFC_KVARG_PERF_PROFILE "perf_profile"
+
+#define SFC_KVARG_PERF_PROFILE_AUTO "auto"
+#define SFC_KVARG_PERF_PROFILE_THROUGHPUT "throughput"
+#define SFC_KVARG_PERF_PROFILE_LOW_LATENCY "low-latency"
+#define SFC_KVARG_VALUES_PERF_PROFILE \
+ "[" SFC_KVARG_PERF_PROFILE_AUTO "|" \
+ SFC_KVARG_PERF_PROFILE_THROUGHPUT "|" \
+ SFC_KVARG_PERF_PROFILE_LOW_LATENCY "]"
+
+#define SFC_KVARG_STATS_UPDATE_PERIOD_MS "stats_update_period_ms"
+
+#define SFC_KVARG_DATAPATH_EFX "efx"
+#define SFC_KVARG_DATAPATH_EF10 "ef10"
+#define SFC_KVARG_DATAPATH_EF10_SIMPLE "ef10_simple"
+#define SFC_KVARG_DATAPATH_EF10_ESSB "ef10_essb"
+
+#define SFC_KVARG_RX_DATAPATH "rx_datapath"
+#define SFC_KVARG_VALUES_RX_DATAPATH \
+ "[" SFC_KVARG_DATAPATH_EFX "|" \
+ SFC_KVARG_DATAPATH_EF10 "|" \
+ SFC_KVARG_DATAPATH_EF10_ESSB "]"
+
+#define SFC_KVARG_TX_DATAPATH "tx_datapath"
+#define SFC_KVARG_VALUES_TX_DATAPATH \
+ "[" SFC_KVARG_DATAPATH_EFX "|" \
+ SFC_KVARG_DATAPATH_EF10 "|" \
+ SFC_KVARG_DATAPATH_EF10_SIMPLE "]"
+
+#define SFC_KVARG_FW_VARIANT "fw_variant"
+
+#define SFC_KVARG_FW_VARIANT_DONT_CARE "dont-care"
+#define SFC_KVARG_FW_VARIANT_FULL_FEATURED "full-feature"
+#define SFC_KVARG_FW_VARIANT_LOW_LATENCY "ultra-low-latency"
+#define SFC_KVARG_FW_VARIANT_PACKED_STREAM "capture-packed-stream"
+#define SFC_KVARG_FW_VARIANT_DPDK "dpdk"
+#define SFC_KVARG_VALUES_FW_VARIANT \
+ "[" SFC_KVARG_FW_VARIANT_DONT_CARE "|" \
+ SFC_KVARG_FW_VARIANT_FULL_FEATURED "|" \
+ SFC_KVARG_FW_VARIANT_LOW_LATENCY "|" \
+ SFC_KVARG_FW_VARIANT_PACKED_STREAM "|" \
+ SFC_KVARG_FW_VARIANT_DPDK "]"
+
+#define SFC_KVARG_RXD_WAIT_TIMEOUT_NS "rxd_wait_timeout_ns"
+
+struct sfc_adapter;
+
+int sfc_kvargs_parse(struct sfc_adapter *sa);
+void sfc_kvargs_cleanup(struct sfc_adapter *sa);
+
+int sfc_kvargs_process(struct sfc_adapter *sa, const char *key_match,
+ arg_handler_t handler, void *opaque_arg);
+
+int sfc_kvarg_bool_handler(const char *key, const char *value_str,
+ void *opaque);
+int sfc_kvarg_long_handler(const char *key, const char *value_str,
+ void *opaque);
+int sfc_kvarg_string_handler(const char *key, const char *value_str,
+ void *opaque);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SFC_KVARGS_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_log.h b/src/spdk/dpdk/drivers/net/sfc/sfc_log.h
new file mode 100644
index 00000000..d6f34352
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_log.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_LOG_H_
+#define _SFC_LOG_H_
+
+/** Generic driver log type */
+extern uint32_t sfc_logtype_driver;
+
+/** Common log type name prefix */
+#define SFC_LOGTYPE_PREFIX "pmd.net.sfc."
+
+/** Log PMD generic message, add a prefix and a line break */
+#define SFC_GENERIC_LOG(level, ...) \
+ rte_log(RTE_LOG_ ## level, sfc_logtype_driver, \
+ RTE_FMT("PMD: " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \
+ RTE_FMT_TAIL(__VA_ARGS__ ,)))
+
+/** Name prefix for the per-device log type used to report basic information */
+#define SFC_LOGTYPE_MAIN_STR SFC_LOGTYPE_PREFIX "main"
+
+/** Device MCDI log type name prefix */
+#define SFC_LOGTYPE_MCDI_STR SFC_LOGTYPE_PREFIX "mcdi"
+
+/** Level value used by MCDI log statements */
+#define SFC_LOG_LEVEL_MCDI RTE_LOG_INFO
+
+/* Log PMD message, automatically add prefix and \n */
+#define SFC_LOG(sa, level, type, ...) \
+ do { \
+ const struct sfc_adapter *__sa = (sa); \
+ \
+ rte_log(level, type, \
+ RTE_FMT("PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu8 \
+ ": " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \
+ __sa->pci_addr.domain, \
+ __sa->pci_addr.bus, \
+ __sa->pci_addr.devid, \
+ __sa->pci_addr.function, \
+ __sa->port_id, \
+ RTE_FMT_TAIL(__VA_ARGS__,))); \
+ } while (0)
+
+#define sfc_err(sa, ...) \
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_ERR, _sa->logtype_main, \
+ __VA_ARGS__); \
+ } while (0)
+
+#define sfc_warn(sa, ...) \
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_WARNING, _sa->logtype_main, \
+ __VA_ARGS__); \
+ } while (0)
+
+#define sfc_notice(sa, ...) \
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_NOTICE, _sa->logtype_main, \
+ __VA_ARGS__); \
+ } while (0)
+
+#define sfc_info(sa, ...) \
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_INFO, _sa->logtype_main, \
+ __VA_ARGS__); \
+ } while (0)
+
+#define sfc_log_init(sa, ...) \
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_INFO, _sa->logtype_main, \
+ RTE_FMT("%s(): " \
+ RTE_FMT_HEAD(__VA_ARGS__ ,), \
+ __func__, \
+ RTE_FMT_TAIL(__VA_ARGS__ ,))); \
+ } while (0)
+
+#define sfc_log_mcdi(sa, ...) \
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, SFC_LOG_LEVEL_MCDI, _sa->mcdi.logtype, \
+ __VA_ARGS__); \
+ } while (0)
+
+
+#endif /* _SFC_LOG_H_ */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_mcdi.c b/src/spdk/dpdk/drivers/net/sfc/sfc_mcdi.c
new file mode 100644
index 00000000..007506b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_mcdi.c
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <rte_cycles.h>
+
+#include "efx.h"
+#include "efx_mcdi.h"
+#include "efx_regs_mcdi.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+
+#define SFC_MCDI_POLL_INTERVAL_MIN_US 10 /* 10us in 1us units */
+#define SFC_MCDI_POLL_INTERVAL_MAX_US (US_PER_S / 10) /* 100ms in 1us units */
+#define SFC_MCDI_WATCHDOG_INTERVAL_US (10 * US_PER_S) /* 10s in 1us units */
+
+static void
+sfc_mcdi_timeout(struct sfc_adapter *sa)
+{
+ sfc_warn(sa, "MC TIMEOUT");
+
+ sfc_panic(sa, "MCDI timeout handling is not implemented\n");
+}
+
+static inline boolean_t
+sfc_mcdi_proxy_event_available(struct sfc_adapter *sa)
+{
+ struct sfc_mcdi *mcdi = &sa->mcdi;
+
+ mcdi->proxy_handle = 0;
+ mcdi->proxy_result = ETIMEDOUT;
+ sfc_ev_mgmt_qpoll(sa);
+ if (mcdi->proxy_result != ETIMEDOUT)
+ return B_TRUE;
+
+ return B_FALSE;
+}
+
+static void
+sfc_mcdi_poll(struct sfc_adapter *sa, boolean_t proxy)
+{
+ efx_nic_t *enp;
+ unsigned int delay_total;
+ unsigned int delay_us;
+ boolean_t aborted __rte_unused;
+
+ delay_total = 0;
+ delay_us = SFC_MCDI_POLL_INTERVAL_MIN_US;
+ enp = sa->nic;
+
+ do {
+ boolean_t poll_completed;
+
+ poll_completed = (proxy) ? sfc_mcdi_proxy_event_available(sa) :
+ efx_mcdi_request_poll(enp);
+ if (poll_completed)
+ return;
+
+ if (delay_total > SFC_MCDI_WATCHDOG_INTERVAL_US) {
+ if (!proxy) {
+ aborted = efx_mcdi_request_abort(enp);
+ SFC_ASSERT(aborted);
+ sfc_mcdi_timeout(sa);
+ }
+
+ return;
+ }
+
+ rte_delay_us(delay_us);
+
+ delay_total += delay_us;
+
+ /* Exponentially back off the poll frequency */
+ RTE_BUILD_BUG_ON(SFC_MCDI_POLL_INTERVAL_MAX_US > UINT_MAX / 2);
+ delay_us *= 2;
+ if (delay_us > SFC_MCDI_POLL_INTERVAL_MAX_US)
+ delay_us = SFC_MCDI_POLL_INTERVAL_MAX_US;
+
+ } while (1);
+}
+
+static void
+sfc_mcdi_execute(void *arg, efx_mcdi_req_t *emrp)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ struct sfc_mcdi *mcdi = &sa->mcdi;
+ uint32_t proxy_handle;
+
+ rte_spinlock_lock(&mcdi->lock);
+
+ SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
+
+ efx_mcdi_request_start(sa->nic, emrp, B_FALSE);
+ sfc_mcdi_poll(sa, B_FALSE);
+
+ if (efx_mcdi_get_proxy_handle(sa->nic, emrp, &proxy_handle) == 0) {
+ /*
+ * Authorization is required for the MCDI request;
+ * wait for an MCDI proxy response event to bring
+ * a non-zero proxy handle (should be the same as
+ * the value obtained above) and operation status
+ */
+ sfc_mcdi_poll(sa, B_TRUE);
+
+ if ((mcdi->proxy_handle != 0) &&
+ (mcdi->proxy_handle != proxy_handle)) {
+ sfc_err(sa, "Unexpected MCDI proxy event");
+ emrp->emr_rc = EFAULT;
+ } else if (mcdi->proxy_result == 0) {
+ /*
+ * Authorization succeeded; re-issue the original
+ * request and poll for an ordinary MCDI response
+ */
+ efx_mcdi_request_start(sa->nic, emrp, B_FALSE);
+ sfc_mcdi_poll(sa, B_FALSE);
+ } else {
+ emrp->emr_rc = mcdi->proxy_result;
+ sfc_err(sa, "MCDI proxy authorization failed "
+ "(handle=%08x, result=%d)",
+ proxy_handle, mcdi->proxy_result);
+ }
+ }
+
+ rte_spinlock_unlock(&mcdi->lock);
+}
+
+static void
+sfc_mcdi_ev_cpl(void *arg)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ struct sfc_mcdi *mcdi __rte_unused;
+
+ mcdi = &sa->mcdi;
+ SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
+
+ /* MCDI is polled, completions are not expected */
+ SFC_ASSERT(0);
+}
+
+static void
+sfc_mcdi_exception(void *arg, efx_mcdi_exception_t eme)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+
+ sfc_warn(sa, "MC %s",
+ (eme == EFX_MCDI_EXCEPTION_MC_REBOOT) ? "REBOOT" :
+ (eme == EFX_MCDI_EXCEPTION_MC_BADASSERT) ? "BADASSERT" : "UNKNOWN");
+
+ sfc_schedule_restart(sa);
+}
+
+#define SFC_MCDI_LOG_BUF_SIZE 128
+
+static size_t
+sfc_mcdi_do_log(const struct sfc_adapter *sa,
+ char *buffer, void *data, size_t data_size,
+ size_t pfxsize, size_t position)
+{
+ uint32_t *words = data;
+ /* Space separator plus 2 characters per byte */
+ const size_t word_str_space = 1 + 2 * sizeof(*words);
+ size_t i;
+
+ for (i = 0; i < data_size; i += sizeof(*words)) {
+ if (position + word_str_space >=
+ SFC_MCDI_LOG_BUF_SIZE) {
+ /* Flush at SFC_MCDI_LOG_BUF_SIZE with backslash
+ * at the end which is required by netlogdecode.
+ */
+ buffer[position] = '\0';
+ sfc_log_mcdi(sa, "%s \\", buffer);
+ /* Preserve prefix for the next log message */
+ position = pfxsize;
+ }
+ position += snprintf(buffer + position,
+ SFC_MCDI_LOG_BUF_SIZE - position,
+ " %08x", *words);
+ words++;
+ }
+ return position;
+}
+
+static void
+sfc_mcdi_logger(void *arg, efx_log_msg_t type,
+ void *header, size_t header_size,
+ void *data, size_t data_size)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ char buffer[SFC_MCDI_LOG_BUF_SIZE];
+ size_t pfxsize;
+ size_t start;
+
+ /*
+ * Unlike the other cases, MCDI logging implies more onerous work
+ * needed to produce a message. If the dynamic log level prevents
+ * the end result from being printed, the CPU time will be wasted.
+ *
+ * To avoid wasting time, the actual level is examined in advance.
+ */
+ if (rte_log_get_level(sa->mcdi.logtype) < (int)SFC_LOG_LEVEL_MCDI)
+ return;
+
+ /* The format including prefix added by sfc_log_mcdi() is the format
+ * consumed by the Solarflare netlogdecode tool.
+ */
+ pfxsize = snprintf(buffer, sizeof(buffer), "MCDI RPC %s:",
+ type == EFX_LOG_MCDI_REQUEST ? "REQ" :
+ type == EFX_LOG_MCDI_RESPONSE ? "RESP" : "???");
+ start = sfc_mcdi_do_log(sa, buffer, header, header_size,
+ pfxsize, pfxsize);
+ start = sfc_mcdi_do_log(sa, buffer, data, data_size, pfxsize, start);
+ if (start != pfxsize) {
+ buffer[start] = '\0';
+ sfc_log_mcdi(sa, "%s", buffer);
+ }
+}
+
+static void
+sfc_mcdi_ev_proxy_response(void *arg, uint32_t handle, efx_rc_t result)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ struct sfc_mcdi *mcdi = &sa->mcdi;
+
+ mcdi->proxy_handle = handle;
+ mcdi->proxy_result = result;
+}
+
+int
+sfc_mcdi_init(struct sfc_adapter *sa)
+{
+ struct sfc_mcdi *mcdi;
+ size_t max_msg_size;
+ efx_mcdi_transport_t *emtp;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ mcdi = &sa->mcdi;
+
+ SFC_ASSERT(mcdi->state == SFC_MCDI_UNINITIALIZED);
+
+ rte_spinlock_init(&mcdi->lock);
+
+ mcdi->state = SFC_MCDI_INITIALIZED;
+
+ max_msg_size = sizeof(uint32_t) + MCDI_CTL_SDU_LEN_MAX_V2;
+ rc = sfc_dma_alloc(sa, "mcdi", 0, max_msg_size, sa->socket_id,
+ &mcdi->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ mcdi->logtype = sfc_register_logtype(sa, SFC_LOGTYPE_MCDI_STR,
+ RTE_LOG_NOTICE);
+
+ emtp = &mcdi->transport;
+ emtp->emt_context = sa;
+ emtp->emt_dma_mem = &mcdi->mem;
+ emtp->emt_execute = sfc_mcdi_execute;
+ emtp->emt_ev_cpl = sfc_mcdi_ev_cpl;
+ emtp->emt_exception = sfc_mcdi_exception;
+ emtp->emt_logger = sfc_mcdi_logger;
+ emtp->emt_ev_proxy_response = sfc_mcdi_ev_proxy_response;
+
+ sfc_log_init(sa, "init MCDI");
+ rc = efx_mcdi_init(sa->nic, emtp);
+ if (rc != 0)
+ goto fail_mcdi_init;
+
+ return 0;
+
+fail_mcdi_init:
+ memset(emtp, 0, sizeof(*emtp));
+ sfc_dma_free(sa, &mcdi->mem);
+
+fail_dma_alloc:
+ mcdi->state = SFC_MCDI_UNINITIALIZED;
+ return rc;
+}
+
+void
+sfc_mcdi_fini(struct sfc_adapter *sa)
+{
+ struct sfc_mcdi *mcdi;
+ efx_mcdi_transport_t *emtp;
+
+ sfc_log_init(sa, "entry");
+
+ mcdi = &sa->mcdi;
+ emtp = &mcdi->transport;
+
+ rte_spinlock_lock(&mcdi->lock);
+
+ SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
+ mcdi->state = SFC_MCDI_UNINITIALIZED;
+
+ sfc_log_init(sa, "fini MCDI");
+ efx_mcdi_fini(sa->nic);
+ memset(emtp, 0, sizeof(*emtp));
+
+ rte_spinlock_unlock(&mcdi->lock);
+
+ sfc_dma_free(sa, &mcdi->mem);
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_port.c b/src/spdk/dpdk/drivers/net/sfc/sfc_port.c
new file mode 100644
index 00000000..5384dbbd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_port.c
@@ -0,0 +1,554 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_kvargs.h"
+
+/** Default MAC statistics update period is 1 second */
+#define SFC_MAC_STATS_UPDATE_PERIOD_MS_DEF MS_PER_S
+
+/** The number of microseconds to sleep on attempt to get statistics update */
+#define SFC_MAC_STATS_UPDATE_RETRY_INTERVAL_US 10
+
+/** The number of attempts to await arrival of freshly generated statistics */
+#define SFC_MAC_STATS_UPDATE_NB_ATTEMPTS 50
+
+/**
+ * Update MAC statistics in the buffer.
+ *
+ * @param sa Adapter
+ *
+ * @return Status code
+ * @retval 0 Success
+ * @retval EAGAIN Try again
+ * @retval ENOMEM Memory allocation failure
+ */
+int
+sfc_port_update_mac_stats(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ efsys_mem_t *esmp = &port->mac_stats_dma_mem;
+ uint32_t *genp = NULL;
+ uint32_t gen_old;
+ unsigned int nb_attempts = 0;
+ int rc;
+
+ SFC_ASSERT(rte_spinlock_is_locked(&port->mac_stats_lock));
+
+ if (sa->state != SFC_ADAPTER_STARTED)
+ return EINVAL;
+
+ /*
+ * If periodic statistics DMA'ing is off or if not supported,
+ * make a manual request and keep an eye on timer if need be
+ */
+ if (!port->mac_stats_periodic_dma_supported ||
+ (port->mac_stats_update_period_ms == 0)) {
+ if (port->mac_stats_update_period_ms != 0) {
+ uint64_t timestamp = sfc_get_system_msecs();
+
+ if ((timestamp -
+ port->mac_stats_last_request_timestamp) <
+ port->mac_stats_update_period_ms)
+ return 0;
+
+ port->mac_stats_last_request_timestamp = timestamp;
+ }
+
+ rc = efx_mac_stats_upload(sa->nic, esmp);
+ if (rc != 0)
+ return rc;
+
+ genp = &port->mac_stats_update_generation;
+ gen_old = *genp;
+ }
+
+ do {
+ if (nb_attempts > 0)
+ rte_delay_us(SFC_MAC_STATS_UPDATE_RETRY_INTERVAL_US);
+
+ rc = efx_mac_stats_update(sa->nic, esmp,
+ port->mac_stats_buf, genp);
+ if (rc != 0)
+ return rc;
+
+ } while ((genp != NULL) && (*genp == gen_old) &&
+ (++nb_attempts < SFC_MAC_STATS_UPDATE_NB_ATTEMPTS));
+
+ return 0;
+}
+
+int
+sfc_port_reset_mac_stats(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+ rc = efx_mac_stats_clear(sa->nic);
+ rte_spinlock_unlock(&port->mac_stats_lock);
+
+ return rc;
+}
+
+static int
+sfc_port_init_dev_link(struct sfc_adapter *sa)
+{
+ struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
+ int rc;
+ efx_link_mode_t link_mode;
+ struct rte_eth_link current_link;
+
+ rc = efx_port_poll(sa->nic, &link_mode);
+ if (rc != 0)
+ return rc;
+
+ sfc_port_link_mode_to_info(link_mode, &current_link);
+
+ EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+ rte_atomic64_set((rte_atomic64_t *)dev_link,
+ *(uint64_t *)&current_link);
+
+ return 0;
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+static efx_link_mode_t
+sfc_port_phy_caps_to_max_link_speed(uint32_t phy_caps)
+{
+ if (phy_caps & (1u << EFX_PHY_CAP_100000FDX))
+ return EFX_LINK_100000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_50000FDX))
+ return EFX_LINK_50000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_40000FDX))
+ return EFX_LINK_40000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_25000FDX))
+ return EFX_LINK_25000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_10000FDX))
+ return EFX_LINK_10000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_1000FDX))
+ return EFX_LINK_1000FDX;
+ return EFX_LINK_UNKNOWN;
+}
+
+#endif
+
+int
+sfc_port_start(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ int rc;
+ uint32_t phy_adv_cap;
+ const uint32_t phy_pause_caps =
+ ((1u << EFX_PHY_CAP_PAUSE) | (1u << EFX_PHY_CAP_ASYM));
+ unsigned int i;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_log_init(sa, "init filters");
+ rc = efx_filter_init(sa->nic);
+ if (rc != 0)
+ goto fail_filter_init;
+
+ sfc_log_init(sa, "init port");
+ rc = efx_port_init(sa->nic);
+ if (rc != 0)
+ goto fail_port_init;
+
+#if EFSYS_OPT_LOOPBACK
+ if (sa->eth_dev->data->dev_conf.lpbk_mode != 0) {
+ efx_link_mode_t link_mode;
+
+ link_mode =
+ sfc_port_phy_caps_to_max_link_speed(port->phy_adv_cap);
+ sfc_log_init(sa, "set loopback link_mode=%u type=%u", link_mode,
+ sa->eth_dev->data->dev_conf.lpbk_mode);
+ rc = efx_port_loopback_set(sa->nic, link_mode,
+ sa->eth_dev->data->dev_conf.lpbk_mode);
+ if (rc != 0)
+ goto fail_loopback_set;
+ }
+#endif
+
+ sfc_log_init(sa, "set flow control to %#x autoneg=%u",
+ port->flow_ctrl, port->flow_ctrl_autoneg);
+ rc = efx_mac_fcntl_set(sa->nic, port->flow_ctrl,
+ port->flow_ctrl_autoneg);
+ if (rc != 0)
+ goto fail_mac_fcntl_set;
+
+ /* Preserve pause capabilities set by above efx_mac_fcntl_set() */
+ efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_CURRENT, &phy_adv_cap);
+ SFC_ASSERT((port->phy_adv_cap & phy_pause_caps) == 0);
+ phy_adv_cap = port->phy_adv_cap | (phy_adv_cap & phy_pause_caps);
+
+ /*
+ * No controls for FEC yet. Use default FEC mode.
+ * I.e. advertise everything supported (*_FEC=1), but do not request
+ * anything explicitly (*_FEC_REQUESTED=0).
+ */
+ phy_adv_cap |= port->phy_adv_cap_mask &
+ (1u << EFX_PHY_CAP_BASER_FEC |
+ 1u << EFX_PHY_CAP_RS_FEC |
+ 1u << EFX_PHY_CAP_25G_BASER_FEC);
+
+ sfc_log_init(sa, "set phy adv caps to %#x", phy_adv_cap);
+ rc = efx_phy_adv_cap_set(sa->nic, phy_adv_cap);
+ if (rc != 0)
+ goto fail_phy_adv_cap_set;
+
+ sfc_log_init(sa, "set MAC PDU %u", (unsigned int)port->pdu);
+ rc = efx_mac_pdu_set(sa->nic, port->pdu);
+ if (rc != 0)
+ goto fail_mac_pdu_set;
+
+ if (!port->isolated) {
+ struct ether_addr *addr = &port->default_mac_addr;
+
+ sfc_log_init(sa, "set MAC address");
+ rc = efx_mac_addr_set(sa->nic, addr->addr_bytes);
+ if (rc != 0)
+ goto fail_mac_addr_set;
+
+ sfc_log_init(sa, "set MAC filters");
+ port->promisc = (sa->eth_dev->data->promiscuous != 0) ?
+ B_TRUE : B_FALSE;
+ port->allmulti = (sa->eth_dev->data->all_multicast != 0) ?
+ B_TRUE : B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ goto fail_mac_filter_set;
+
+ sfc_log_init(sa, "set multicast address list");
+ rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
+ port->nb_mcast_addrs);
+ if (rc != 0)
+ goto fail_mcast_address_list_set;
+ }
+
+ if (port->mac_stats_reset_pending) {
+ rc = sfc_port_reset_mac_stats(sa);
+ if (rc != 0)
+ sfc_err(sa, "statistics reset failed (requested "
+ "before the port was started)");
+
+ port->mac_stats_reset_pending = B_FALSE;
+ }
+
+ efx_mac_stats_get_mask(sa->nic, port->mac_stats_mask,
+ sizeof(port->mac_stats_mask));
+
+ for (i = 0, port->mac_stats_nb_supported = 0; i < EFX_MAC_NSTATS; ++i)
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
+ port->mac_stats_nb_supported++;
+
+ port->mac_stats_update_generation = 0;
+
+ if (port->mac_stats_update_period_ms != 0) {
+ /*
+ * Update MAC stats using periodic DMA;
+ * any positive update interval different from
+ * 1000 ms can be set only on SFN8xxx provided
+ * that FW version is 6.2.1.1033 or higher
+ */
+ sfc_log_init(sa, "request MAC stats DMA'ing");
+ rc = efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
+ port->mac_stats_update_period_ms,
+ B_FALSE);
+ if (rc == 0) {
+ port->mac_stats_periodic_dma_supported = B_TRUE;
+ } else if (rc == EOPNOTSUPP) {
+ port->mac_stats_periodic_dma_supported = B_FALSE;
+ port->mac_stats_last_request_timestamp = 0;
+ } else {
+ goto fail_mac_stats_periodic;
+ }
+ }
+
+ if ((port->mac_stats_update_period_ms != 0) &&
+ port->mac_stats_periodic_dma_supported) {
+ /*
+ * Request an explicit MAC stats upload immediately to
+ * preclude bogus figures readback if the user decides
+ * to read stats before periodic DMA is really started
+ */
+ rc = efx_mac_stats_upload(sa->nic, &port->mac_stats_dma_mem);
+ if (rc != 0)
+ goto fail_mac_stats_upload;
+ }
+
+ sfc_log_init(sa, "disable MAC drain");
+ rc = efx_mac_drain(sa->nic, B_FALSE);
+ if (rc != 0)
+ goto fail_mac_drain;
+
+ /* Synchronize link status knowledge */
+ rc = sfc_port_init_dev_link(sa);
+ if (rc != 0)
+ goto fail_port_init_dev_link;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_port_init_dev_link:
+ (void)efx_mac_drain(sa->nic, B_TRUE);
+
+fail_mac_drain:
+fail_mac_stats_upload:
+ (void)efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
+ 0, B_FALSE);
+
+fail_mac_stats_periodic:
+fail_mcast_address_list_set:
+fail_mac_filter_set:
+fail_mac_addr_set:
+fail_mac_pdu_set:
+fail_phy_adv_cap_set:
+fail_mac_fcntl_set:
+#if EFSYS_OPT_LOOPBACK
+fail_loopback_set:
+#endif
+ efx_port_fini(sa->nic);
+
+fail_port_init:
+ efx_filter_fini(sa->nic);
+
+fail_filter_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_port_stop(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ efx_mac_drain(sa->nic, B_TRUE);
+
+ (void)efx_mac_stats_periodic(sa->nic, &sa->port.mac_stats_dma_mem,
+ 0, B_FALSE);
+
+ efx_port_fini(sa->nic);
+ efx_filter_fini(sa->nic);
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_port_configure(struct sfc_adapter *sa)
+{
+ const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
+ struct sfc_port *port = &sa->port;
+ const struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
+
+ sfc_log_init(sa, "entry");
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ port->pdu = rxmode->max_rx_pkt_len;
+ else
+ port->pdu = EFX_MAC_PDU(dev_data->mtu);
+
+ return 0;
+}
+
+void
+sfc_port_close(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+}
+
+int
+sfc_port_attach(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const struct ether_addr *from;
+ uint32_t mac_nstats;
+ size_t mac_stats_size;
+ long kvarg_stats_update_period_ms;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_PERM, &port->phy_adv_cap_mask);
+
+ /* Enable flow control by default */
+ port->flow_ctrl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ port->flow_ctrl_autoneg = B_TRUE;
+
+ RTE_BUILD_BUG_ON(sizeof(encp->enc_mac_addr) != sizeof(*from));
+ from = (const struct ether_addr *)(encp->enc_mac_addr);
+ ether_addr_copy(from, &port->default_mac_addr);
+
+ port->max_mcast_addrs = EFX_MAC_MULTICAST_LIST_MAX;
+ port->nb_mcast_addrs = 0;
+ port->mcast_addrs = rte_calloc_socket("mcast_addr_list_buf",
+ port->max_mcast_addrs,
+ EFX_MAC_ADDR_LEN, 0,
+ sa->socket_id);
+ if (port->mcast_addrs == NULL) {
+ rc = ENOMEM;
+ goto fail_mcast_addr_list_buf_alloc;
+ }
+
+ rte_spinlock_init(&port->mac_stats_lock);
+
+ rc = ENOMEM;
+ port->mac_stats_buf = rte_calloc_socket("mac_stats_buf", EFX_MAC_NSTATS,
+ sizeof(uint64_t), 0,
+ sa->socket_id);
+ if (port->mac_stats_buf == NULL)
+ goto fail_mac_stats_buf_alloc;
+
+ mac_nstats = efx_nic_cfg_get(sa->nic)->enc_mac_stats_nstats;
+ mac_stats_size = RTE_ALIGN(mac_nstats * sizeof(uint64_t), EFX_BUF_SIZE);
+ rc = sfc_dma_alloc(sa, "mac_stats", 0, mac_stats_size,
+ sa->socket_id, &port->mac_stats_dma_mem);
+ if (rc != 0)
+ goto fail_mac_stats_dma_alloc;
+
+ port->mac_stats_reset_pending = B_FALSE;
+
+ kvarg_stats_update_period_ms = SFC_MAC_STATS_UPDATE_PERIOD_MS_DEF;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_STATS_UPDATE_PERIOD_MS,
+ sfc_kvarg_long_handler,
+ &kvarg_stats_update_period_ms);
+ if ((rc == 0) &&
+ ((kvarg_stats_update_period_ms < 0) ||
+ (kvarg_stats_update_period_ms > UINT16_MAX))) {
+ sfc_err(sa, "wrong '" SFC_KVARG_STATS_UPDATE_PERIOD_MS "' "
+ "was set (%ld);", kvarg_stats_update_period_ms);
+ sfc_err(sa, "it must not be less than 0 "
+ "or greater than %" PRIu16, UINT16_MAX);
+ rc = EINVAL;
+ goto fail_kvarg_stats_update_period_ms;
+ } else if (rc != 0) {
+ goto fail_kvarg_stats_update_period_ms;
+ }
+
+ port->mac_stats_update_period_ms = kvarg_stats_update_period_ms;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_kvarg_stats_update_period_ms:
+ sfc_dma_free(sa, &port->mac_stats_dma_mem);
+
+fail_mac_stats_dma_alloc:
+ rte_free(port->mac_stats_buf);
+
+fail_mac_stats_buf_alloc:
+ rte_free(port->mcast_addrs);
+
+fail_mcast_addr_list_buf_alloc:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_port_detach(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_dma_free(sa, &port->mac_stats_dma_mem);
+ rte_free(port->mac_stats_buf);
+
+ rte_free(port->mcast_addrs);
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_set_rx_mode(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ rc = efx_mac_filter_set(sa->nic, port->promisc, B_TRUE,
+ port->promisc || port->allmulti, B_TRUE);
+
+ return rc;
+}
+
+void
+sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
+ struct rte_eth_link *link_info)
+{
+ SFC_ASSERT(link_mode < EFX_LINK_NMODES);
+
+ memset(link_info, 0, sizeof(*link_info));
+ if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
+ link_info->link_status = ETH_LINK_DOWN;
+ else
+ link_info->link_status = ETH_LINK_UP;
+
+ switch (link_mode) {
+ case EFX_LINK_10HDX:
+ link_info->link_speed = ETH_SPEED_NUM_10M;
+ link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case EFX_LINK_10FDX:
+ link_info->link_speed = ETH_SPEED_NUM_10M;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_100HDX:
+ link_info->link_speed = ETH_SPEED_NUM_100M;
+ link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case EFX_LINK_100FDX:
+ link_info->link_speed = ETH_SPEED_NUM_100M;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_1000HDX:
+ link_info->link_speed = ETH_SPEED_NUM_1G;
+ link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case EFX_LINK_1000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_1G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_10000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_10G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_25000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_25G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_40000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_40G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_50000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_50G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_100000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_100G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ default:
+ SFC_ASSERT(B_FALSE);
+ /* FALLTHROUGH */
+ case EFX_LINK_UNKNOWN:
+ case EFX_LINK_DOWN:
+ link_info->link_speed = ETH_SPEED_NUM_NONE;
+ link_info->link_duplex = 0;
+ break;
+ }
+
+ link_info->link_autoneg = ETH_LINK_AUTONEG;
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_rx.c b/src/spdk/dpdk/drivers/net/sfc/sfc_rx.c
new file mode 100644
index 00000000..d8503e20
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_rx.c
@@ -0,0 +1,1597 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <rte_mempool.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_kvargs.h"
+#include "sfc_tweak.h"
+
+/*
+ * Maximum number of Rx queue flush attempt in the case of failure or
+ * flush timeout
+ */
+#define SFC_RX_QFLUSH_ATTEMPTS (3)
+
+/*
+ * Time to wait between event queue polling attempts when waiting for Rx
+ * queue flush done or failed events.
+ */
+#define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
+
+/*
+ * Maximum number of event queue polling attempts when waiting for Rx queue
+ * flush done or failed events. It defines Rx queue flush attempt timeout
+ * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
+ */
+#define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
+
+void
+sfc_rx_qflush_done(struct sfc_rxq *rxq)
+{
+ rxq->state |= SFC_RXQ_FLUSHED;
+ rxq->state &= ~SFC_RXQ_FLUSHING;
+}
+
+void
+sfc_rx_qflush_failed(struct sfc_rxq *rxq)
+{
+ rxq->state |= SFC_RXQ_FLUSH_FAILED;
+ rxq->state &= ~SFC_RXQ_FLUSHING;
+}
+
+static void
+sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
+{
+ unsigned int free_space;
+ unsigned int bulks;
+ void *objs[SFC_RX_REFILL_BULK];
+ efsys_dma_addr_t addr[RTE_DIM(objs)];
+ unsigned int added = rxq->added;
+ unsigned int id;
+ unsigned int i;
+ struct sfc_efx_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ uint16_t port_id = rxq->dp.dpq.port_id;
+
+ free_space = rxq->max_fill_level - (added - rxq->completed);
+
+ if (free_space < rxq->refill_threshold)
+ return;
+
+ bulks = free_space / RTE_DIM(objs);
+ /* refill_threshold guarantees that bulks is positive */
+ SFC_ASSERT(bulks > 0);
+
+ id = added & rxq->ptr_mask;
+ do {
+ if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
+ RTE_DIM(objs)) < 0)) {
+ /*
+ * It is hardly a safe way to increment counter
+ * from different contexts, but all PMDs do it.
+ */
+ rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
+ RTE_DIM(objs);
+ /* Return if we have posted nothing yet */
+ if (added == rxq->added)
+ return;
+ /* Push posted */
+ break;
+ }
+
+ for (i = 0; i < RTE_DIM(objs);
+ ++i, id = (id + 1) & rxq->ptr_mask) {
+ m = objs[i];
+
+ rxd = &rxq->sw_desc[id];
+ rxd->mbuf = m;
+
+ SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+ SFC_ASSERT(m->next == NULL);
+ SFC_ASSERT(m->nb_segs == 1);
+ m->port = port_id;
+
+ addr[i] = rte_pktmbuf_iova(m);
+ }
+
+ efx_rx_qpost(rxq->common, addr, rxq->buf_size,
+ RTE_DIM(objs), rxq->completed, added);
+ added += RTE_DIM(objs);
+ } while (--bulks > 0);
+
+ SFC_ASSERT(added != rxq->added);
+ rxq->added = added;
+ efx_rx_qpush(rxq->common, added, &rxq->pushed);
+}
+
+static uint64_t
+sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
+{
+ uint64_t mbuf_flags = 0;
+
+ switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
+ case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
+ mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+ break;
+ case EFX_PKT_IPV4:
+ mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+ break;
+ default:
+ RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
+ SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
+ PKT_RX_IP_CKSUM_UNKNOWN);
+ break;
+ }
+
+ switch ((desc_flags &
+ (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
+ case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
+ case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
+ mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case EFX_PKT_TCP:
+ case EFX_PKT_UDP:
+ mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+ break;
+ default:
+ RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
+ SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
+ PKT_RX_L4_CKSUM_UNKNOWN);
+ break;
+ }
+
+ return mbuf_flags;
+}
+
+static uint32_t
+sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
+{
+ return RTE_PTYPE_L2_ETHER |
+ ((desc_flags & EFX_PKT_IPV4) ?
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
+ ((desc_flags & EFX_PKT_IPV6) ?
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
+ ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
+ ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
+}
+
+static const uint32_t *
+sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes;
+}
+
+static void
+sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
+ struct rte_mbuf *m)
+{
+ uint8_t *mbuf_data;
+
+
+ if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
+ return;
+
+ mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
+
+ if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
+ m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
+ EFX_RX_HASHALG_TOEPLITZ,
+ mbuf_data);
+
+ m->ol_flags |= PKT_RX_RSS_HASH;
+ }
+}
+
+static uint16_t
+sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_dp_rxq *dp_rxq = rx_queue;
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ unsigned int completed;
+ unsigned int prefix_size = rxq->prefix_size;
+ unsigned int done_pkts = 0;
+ boolean_t discard_next = B_FALSE;
+ struct rte_mbuf *scatter_pkt = NULL;
+
+ if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
+ return 0;
+
+ sfc_ev_qpoll(rxq->evq);
+
+ completed = rxq->completed;
+ while (completed != rxq->pending && done_pkts < nb_pkts) {
+ unsigned int id;
+ struct sfc_efx_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ unsigned int seg_len;
+ unsigned int desc_flags;
+
+ id = completed++ & rxq->ptr_mask;
+ rxd = &rxq->sw_desc[id];
+ m = rxd->mbuf;
+ desc_flags = rxd->flags;
+
+ if (discard_next)
+ goto discard;
+
+ if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
+ goto discard;
+
+ if (desc_flags & EFX_PKT_PREFIX_LEN) {
+ uint16_t tmp_size;
+ int rc __rte_unused;
+
+ rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
+ rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
+ SFC_ASSERT(rc == 0);
+ seg_len = tmp_size;
+ } else {
+ seg_len = rxd->size - prefix_size;
+ }
+
+ rte_pktmbuf_data_len(m) = seg_len;
+ rte_pktmbuf_pkt_len(m) = seg_len;
+
+ if (scatter_pkt != NULL) {
+ if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
+ rte_pktmbuf_free(scatter_pkt);
+ goto discard;
+ }
+ /* The packet to deliver */
+ m = scatter_pkt;
+ }
+
+ if (desc_flags & EFX_PKT_CONT) {
+ /* The packet is scattered, more fragments to come */
+ scatter_pkt = m;
+ /* Further fragments have no prefix */
+ prefix_size = 0;
+ continue;
+ }
+
+ /* Scattered packet is done */
+ scatter_pkt = NULL;
+ /* The first fragment of the packet has prefix */
+ prefix_size = rxq->prefix_size;
+
+ m->ol_flags =
+ sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
+ m->packet_type =
+ sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
+
+ /*
+ * Extract RSS hash from the packet prefix and
+ * set the corresponding field (if needed and possible)
+ */
+ sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
+
+ m->data_off += prefix_size;
+
+ *rx_pkts++ = m;
+ done_pkts++;
+ continue;
+
+discard:
+ discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
+ rte_mempool_put(rxq->refill_mb_pool, m);
+ rxd->mbuf = NULL;
+ }
+
+ /* pending is only moved when entire packet is received */
+ SFC_ASSERT(scatter_pkt == NULL);
+
+ rxq->completed = completed;
+
+ sfc_efx_rx_qrefill(rxq);
+
+ return done_pkts;
+}
+
+static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
+static unsigned int
+sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
+ return 0;
+
+ sfc_ev_qpoll(rxq->evq);
+
+ return rxq->pending - rxq->completed;
+}
+
+static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
+static int
+sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ if (unlikely(offset > rxq->ptr_mask))
+ return -EINVAL;
+
+ /*
+ * Poll EvQ to derive up-to-date 'rxq->pending' figure;
+ * it is required for the queue to be running, but the
+ * check is omitted because API design assumes that it
+ * is the duty of the caller to satisfy all conditions
+ */
+ SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
+ SFC_EFX_RXQ_FLAG_RUNNING);
+ sfc_ev_qpoll(rxq->evq);
+
+ /*
+ * There is a handful of reserved entries in the ring,
+ * but an explicit check whether the offset points to
+ * a reserved entry is neglected since the two checks
+ * below rely on the figures which take the HW limits
+ * into account and thus if an entry is reserved, the
+ * checks will fail and UNAVAIL code will be returned
+ */
+
+ if (offset < (rxq->pending - rxq->completed))
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (offset < (rxq->added - rxq->completed))
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
+}
+
+struct sfc_rxq *
+sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
+{
+ const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
+ struct rte_eth_dev *eth_dev;
+ struct sfc_adapter *sa;
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
+ eth_dev = &rte_eth_devices[dpq->port_id];
+
+ sa = eth_dev->data->dev_private;
+
+ SFC_ASSERT(dpq->queue_id < sa->rxq_count);
+ rxq = sa->rxq_info[dpq->queue_id].rxq;
+
+ SFC_ASSERT(rxq != NULL);
+ return rxq;
+}
+
+static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
+static int
+sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ __rte_unused struct rte_mempool *mb_pool,
+ unsigned int *rxq_entries,
+ unsigned int *evq_entries,
+ unsigned int *rxq_max_fill_level)
+{
+ *rxq_entries = nb_rx_desc;
+ *evq_entries = nb_rx_desc;
+ *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
+ return 0;
+}
+
+static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
+static int
+sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp)
+{
+ struct sfc_efx_rxq *rxq;
+ int rc;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
+ info->rxq_entries,
+ sizeof(*rxq->sw_desc),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_desc == NULL)
+ goto fail_desc_alloc;
+
+ /* efx datapath is bound to efx control path */
+ rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
+ if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
+ rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
+ rxq->ptr_mask = info->rxq_entries - 1;
+ rxq->batch_max = info->batch_max;
+ rxq->prefix_size = info->prefix_size;
+ rxq->max_fill_level = info->max_fill_level;
+ rxq->refill_threshold = info->refill_threshold;
+ rxq->buf_size = info->buf_size;
+ rxq->refill_mb_pool = info->refill_mb_pool;
+
+ *dp_rxqp = &rxq->dp;
+ return 0;
+
+fail_desc_alloc:
+ rte_free(rxq);
+
+fail_rxq_alloc:
+ return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
+static void
+sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ rte_free(rxq->sw_desc);
+ rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
+static int
+sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
+ __rte_unused unsigned int evq_read_ptr)
+{
+ /* libefx-based datapath is specific to libefx-based PMD */
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->common = crxq->common;
+
+ rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
+
+ sfc_efx_rx_qrefill(rxq);
+
+ rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
+
+ return 0;
+}
+
+static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
+static void
+sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
+ __rte_unused unsigned int *evq_read_ptr)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
+
+ /* libefx-based datapath is bound to libefx-based PMD and uses
+ * event queue structure directly. So, there is no necessity to
+ * return EvQ read pointer.
+ */
+}
+
+static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
+static void
+sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ unsigned int i;
+ struct sfc_efx_rx_sw_desc *rxd;
+
+ for (i = rxq->completed; i != rxq->added; ++i) {
+ rxd = &rxq->sw_desc[i & rxq->ptr_mask];
+ rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ rxd->mbuf = NULL;
+ /* Packed stream relies on 0 in inactive SW desc.
+ * Rx queue stop is not performance critical, so
+ * there is no harm to do it always.
+ */
+ rxd->flags = 0;
+ rxd->size = 0;
+ }
+
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
+}
+
+struct sfc_dp_rx sfc_efx_rx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EFX,
+ .type = SFC_DP_RX,
+ .hw_fw_caps = 0,
+ },
+ .features = SFC_DP_RX_FEAT_SCATTER |
+ SFC_DP_RX_FEAT_CHECKSUM,
+ .qsize_up_rings = sfc_efx_rx_qsize_up_rings,
+ .qcreate = sfc_efx_rx_qcreate,
+ .qdestroy = sfc_efx_rx_qdestroy,
+ .qstart = sfc_efx_rx_qstart,
+ .qstop = sfc_efx_rx_qstop,
+ .qpurge = sfc_efx_rx_qpurge,
+ .supported_ptypes_get = sfc_efx_supported_ptypes_get,
+ .qdesc_npending = sfc_efx_rx_qdesc_npending,
+ .qdesc_status = sfc_efx_rx_qdesc_status,
+ .pkt_burst = sfc_efx_recv_pkts,
+};
+
+unsigned int
+sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+ rxq = sa->rxq_info[sw_index].rxq;
+
+ if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
+ return 0;
+
+ return sa->dp_rx->qdesc_npending(rxq->dp);
+}
+
+int
+sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
+{
+ struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
+}
+
+static void
+sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq *rxq;
+ unsigned int retry_count;
+ unsigned int wait_count;
+ int rc;
+
+ rxq = sa->rxq_info[sw_index].rxq;
+ SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
+
+ /*
+ * Retry Rx queue flushing in the case of flush failed or
+ * timeout. In the worst case it can delay for 6 seconds.
+ */
+ for (retry_count = 0;
+ ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
+ (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
+ ++retry_count) {
+ rc = efx_rx_qflush(rxq->common);
+ if (rc != 0) {
+ rxq->state |= (rc == EALREADY) ?
+ SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
+ break;
+ }
+ rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
+ rxq->state |= SFC_RXQ_FLUSHING;
+
+ /*
+ * Wait for Rx queue flush done or failed event at least
+ * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
+ * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
+ * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
+ */
+ wait_count = 0;
+ do {
+ rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
+ sfc_ev_qpoll(rxq->evq);
+ } while ((rxq->state & SFC_RXQ_FLUSHING) &&
+ (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
+
+ if (rxq->state & SFC_RXQ_FLUSHING)
+ sfc_err(sa, "RxQ %u flush timed out", sw_index);
+
+ if (rxq->state & SFC_RXQ_FLUSH_FAILED)
+ sfc_err(sa, "RxQ %u flush failed", sw_index);
+
+ if (rxq->state & SFC_RXQ_FLUSHED)
+ sfc_notice(sa, "RxQ %u flushed", sw_index);
+ }
+
+ sa->dp_rx->qpurge(rxq->dp);
+}
+
+static int
+sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
+{
+ struct sfc_rss *rss = &sa->rss;
+ boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE;
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ /*
+ * If promiscuous or all-multicast mode has been requested, setting
+ * filter for the default Rx queue might fail, in particular, while
+ * running over PCI function which is not a member of corresponding
+ * privilege groups; if this occurs, few iterations will be made to
+ * repeat this step without promiscuous and all-multicast flags set
+ */
+retry:
+ rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss);
+ if (rc == 0)
+ return 0;
+ else if (rc != EOPNOTSUPP)
+ return rc;
+
+ if (port->promisc) {
+ sfc_warn(sa, "promiscuous mode has been requested, "
+ "but the HW rejects it");
+ sfc_warn(sa, "promiscuous mode will be disabled");
+
+ port->promisc = B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ return rc;
+
+ goto retry;
+ }
+
+ if (port->allmulti) {
+ sfc_warn(sa, "all-multicast mode has been requested, "
+ "but the HW rejects it");
+ sfc_warn(sa, "all-multicast mode will be disabled");
+
+ port->allmulti = B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ return rc;
+
+ goto retry;
+ }
+
+ return rc;
+}
+
+int
+sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_port *port = &sa->port;
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+ struct sfc_evq *evq;
+ int rc;
+
+ sfc_log_init(sa, "sw_index=%u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[sw_index];
+ rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
+
+ evq = rxq->evq;
+
+ rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
+ if (rc != 0)
+ goto fail_ev_qstart;
+
+ switch (rxq_info->type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
+ &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */,
+ rxq_info->type_flags, evq->common, &rxq->common);
+ break;
+ case EFX_RXQ_TYPE_ES_SUPER_BUFFER: {
+ struct rte_mempool *mp = rxq->refill_mb_pool;
+ struct rte_mempool_info mp_info;
+
+ rc = rte_mempool_ops_get_info(mp, &mp_info);
+ if (rc != 0) {
+ /* Positive errno is used in the driver */
+ rc = -rc;
+ goto fail_mp_get_info;
+ }
+ if (mp_info.contig_block_size <= 0) {
+ rc = EINVAL;
+ goto fail_bad_contig_block_size;
+ }
+ rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0,
+ mp_info.contig_block_size, rxq->buf_size,
+ mp->header_size + mp->elt_size + mp->trailer_size,
+ sa->rxd_wait_timeout_ns,
+ &rxq->mem, rxq_info->entries, rxq_info->type_flags,
+ evq->common, &rxq->common);
+ break;
+ }
+ default:
+ rc = ENOTSUP;
+ }
+ if (rc != 0)
+ goto fail_rx_qcreate;
+
+ efx_rx_qenable(rxq->common);
+
+ rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
+ if (rc != 0)
+ goto fail_dp_qstart;
+
+ rxq->state |= SFC_RXQ_STARTED;
+
+ if ((sw_index == 0) && !port->isolated) {
+ rc = sfc_rx_default_rxq_set_filter(sa, rxq);
+ if (rc != 0)
+ goto fail_mac_filter_default_rxq_set;
+ }
+
+ /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
+ sa->eth_dev->data->rx_queue_state[sw_index] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+
+fail_mac_filter_default_rxq_set:
+ sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
+
+fail_dp_qstart:
+ sfc_rx_qflush(sa, sw_index);
+
+fail_rx_qcreate:
+fail_bad_contig_block_size:
+fail_mp_get_info:
+ sfc_ev_qstop(evq);
+
+fail_ev_qstart:
+ return rc;
+}
+
+void
+sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+
+ sfc_log_init(sa, "sw_index=%u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[sw_index];
+ rxq = rxq_info->rxq;
+
+ if (rxq->state == SFC_RXQ_INITIALIZED)
+ return;
+ SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
+
+ /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
+ sa->eth_dev->data->rx_queue_state[sw_index] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+
+ sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
+
+ if (sw_index == 0)
+ efx_mac_filter_default_rxq_clear(sa->nic);
+
+ sfc_rx_qflush(sa, sw_index);
+
+ rxq->state = SFC_RXQ_INITIALIZED;
+
+ efx_rx_qdestroy(rxq->common);
+
+ sfc_ev_qstop(rxq->evq);
+}
+
+uint64_t
+sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t caps = 0;
+
+ caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ caps |= DEV_RX_OFFLOAD_CRC_STRIP;
+
+ if (sa->dp_rx->features & SFC_DP_RX_FEAT_CHECKSUM) {
+ caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+ caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
+ caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
+ }
+
+ if (encp->enc_tunnel_encapsulations_supported &&
+ (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
+ caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+ return caps;
+}
+
+uint64_t
+sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
+{
+ uint64_t caps = 0;
+
+ if (sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)
+ caps |= DEV_RX_OFFLOAD_SCATTER;
+
+ return caps;
+}
+
+static int
+sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
+ const struct rte_eth_rxconf *rx_conf,
+ __rte_unused uint64_t offloads)
+{
+ int rc = 0;
+
+ if (rx_conf->rx_thresh.pthresh != 0 ||
+ rx_conf->rx_thresh.hthresh != 0 ||
+ rx_conf->rx_thresh.wthresh != 0) {
+ sfc_warn(sa,
+ "RxQ prefetch/host/writeback thresholds are not supported");
+ }
+
+ if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
+ sfc_err(sa,
+ "RxQ free threshold too large: %u vs maximum %u",
+ rx_conf->rx_free_thresh, rxq_max_fill_level);
+ rc = EINVAL;
+ }
+
+ if (rx_conf->rx_drop_en == 0) {
+ sfc_err(sa, "RxQ drop disable is not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+static unsigned int
+sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
+{
+ uint32_t data_off;
+ uint32_t order;
+
+ /* The mbuf object itself is always cache line aligned */
+ order = rte_bsf32(RTE_CACHE_LINE_SIZE);
+
+ /* Data offset from mbuf object start */
+ data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
+ RTE_PKTMBUF_HEADROOM;
+
+ order = MIN(order, rte_bsf32(data_off));
+
+ return 1u << order;
+}
+
+static uint16_t
+sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
+ const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
+ uint16_t buf_size;
+ unsigned int buf_aligned;
+ unsigned int start_alignment;
+ unsigned int end_padding_alignment;
+
+ /* Below it is assumed that both alignments are power of 2 */
+ SFC_ASSERT(rte_is_power_of_2(nic_align_start));
+ SFC_ASSERT(rte_is_power_of_2(nic_align_end));
+
+ /*
+ * mbuf is always cache line aligned, double-check
+ * that it meets rx buffer start alignment requirements.
+ */
+
+ /* Start from mbuf pool data room size */
+ buf_size = rte_pktmbuf_data_room_size(mb_pool);
+
+ /* Remove headroom */
+ if (buf_size <= RTE_PKTMBUF_HEADROOM) {
+ sfc_err(sa,
+ "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
+ mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
+ return 0;
+ }
+ buf_size -= RTE_PKTMBUF_HEADROOM;
+
+ /* Calculate guaranteed data start alignment */
+ buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
+
+ /* Reserve space for start alignment */
+ if (buf_aligned < nic_align_start) {
+ start_alignment = nic_align_start - buf_aligned;
+ if (buf_size <= start_alignment) {
+ sfc_err(sa,
+ "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
+ mb_pool->name,
+ rte_pktmbuf_data_room_size(mb_pool),
+ RTE_PKTMBUF_HEADROOM, start_alignment);
+ return 0;
+ }
+ buf_aligned = nic_align_start;
+ buf_size -= start_alignment;
+ } else {
+ start_alignment = 0;
+ }
+
+ /* Make sure that end padding does not write beyond the buffer */
+ if (buf_aligned < nic_align_end) {
+ /*
+ * Estimate space which can be lost. If guarnteed buffer
+ * size is odd, lost space is (nic_align_end - 1). More
+ * accurate formula is below.
+ */
+ end_padding_alignment = nic_align_end -
+ MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
+ if (buf_size <= end_padding_alignment) {
+ sfc_err(sa,
+ "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
+ mb_pool->name,
+ rte_pktmbuf_data_room_size(mb_pool),
+ RTE_PKTMBUF_HEADROOM, start_alignment,
+ end_padding_alignment);
+ return 0;
+ }
+ buf_size -= end_padding_alignment;
+ } else {
+ /*
+ * Start is aligned the same or better than end,
+ * just align length.
+ */
+ buf_size = P2ALIGN(buf_size, nic_align_end);
+ }
+
+ return buf_size;
+}
+
+int
+sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_rss *rss = &sa->rss;
+ int rc;
+ unsigned int rxq_entries;
+ unsigned int evq_entries;
+ unsigned int rxq_max_fill_level;
+ uint64_t offloads;
+ uint16_t buf_size;
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_evq *evq;
+ struct sfc_rxq *rxq;
+ struct sfc_dp_rx_qcreate_info info;
+
+ rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries,
+ &evq_entries, &rxq_max_fill_level);
+ if (rc != 0)
+ goto fail_size_up_rings;
+ SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
+ SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
+ SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
+
+ offloads = rx_conf->offloads |
+ sa->eth_dev->data->dev_conf.rxmode.offloads;
+ rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
+ if (rc != 0)
+ goto fail_bad_conf;
+
+ buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
+ if (buf_size == 0) {
+ sfc_err(sa, "RxQ %u mbuf pool object size is too small",
+ sw_index);
+ rc = EINVAL;
+ goto fail_bad_conf;
+ }
+
+ if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
+ (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
+ "object size is too small", sw_index);
+ sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
+ "PDU size %u plus Rx prefix %u bytes",
+ sw_index, buf_size, (unsigned int)sa->port.pdu,
+ encp->enc_rx_prefix_size);
+ rc = EINVAL;
+ goto fail_bad_conf;
+ }
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+ rxq_info = &sa->rxq_info[sw_index];
+
+ SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
+ rxq_info->entries = rxq_entries;
+
+ if (sa->dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
+ rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER;
+ else
+ rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
+
+ rxq_info->type_flags =
+ (offloads & DEV_RX_OFFLOAD_SCATTER) ?
+ EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
+
+ if ((encp->enc_tunnel_encapsulations_supported != 0) &&
+ (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
+ rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
+
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
+ evq_entries, socket_id, &evq);
+ if (rc != 0)
+ goto fail_ev_qinit;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ rxq_info->rxq = rxq;
+
+ rxq->evq = evq;
+ rxq->hw_index = sw_index;
+ rxq->refill_threshold =
+ RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
+ rxq->refill_mb_pool = mb_pool;
+ rxq->buf_size = buf_size;
+
+ rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
+ socket_id, &rxq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ memset(&info, 0, sizeof(info));
+ info.refill_mb_pool = rxq->refill_mb_pool;
+ info.max_fill_level = rxq_max_fill_level;
+ info.refill_threshold = rxq->refill_threshold;
+ info.buf_size = buf_size;
+ info.batch_max = encp->enc_rx_batch_max;
+ info.prefix_size = encp->enc_rx_prefix_size;
+
+ if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0)
+ info.flags |= SFC_RXQ_FLAG_RSS_HASH;
+
+ info.rxq_entries = rxq_info->entries;
+ info.rxq_hw_ring = rxq->mem.esm_base;
+ info.evq_entries = evq_entries;
+ info.evq_hw_ring = evq->mem.esm_base;
+ info.hw_index = rxq->hw_index;
+ info.mem_bar = sa->mem_bar.esb_base;
+ info.vi_window_shift = encp->enc_vi_window_shift;
+
+ rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
+ &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
+ socket_id, &info, &rxq->dp);
+ if (rc != 0)
+ goto fail_dp_rx_qcreate;
+
+ evq->dp_rxq = rxq->dp;
+
+ rxq->state = SFC_RXQ_INITIALIZED;
+
+ rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
+
+ return 0;
+
+fail_dp_rx_qcreate:
+ sfc_dma_free(sa, &rxq->mem);
+
+fail_dma_alloc:
+ rxq_info->rxq = NULL;
+ rte_free(rxq);
+
+fail_rxq_alloc:
+ sfc_ev_qfini(evq);
+
+fail_ev_qinit:
+ rxq_info->entries = 0;
+
+fail_bad_conf:
+fail_size_up_rings:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[sw_index];
+
+ rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
+
+ sa->dp_rx->qdestroy(rxq->dp);
+ rxq->dp = NULL;
+
+ rxq_info->rxq = NULL;
+ rxq_info->entries = 0;
+
+ sfc_dma_free(sa, &rxq->mem);
+
+ sfc_ev_qfini(rxq->evq);
+ rxq->evq = NULL;
+
+ rte_free(rxq);
+}
+
+/*
+ * Mapping between RTE RSS hash functions and their EFX counterparts.
+ */
+struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
+ { ETH_RSS_NONFRAG_IPV4_TCP,
+ EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
+ { ETH_RSS_NONFRAG_IPV4_UDP,
+ EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
+ { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+ EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
+ { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+ EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
+ { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+ EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
+ EFX_RX_HASH(IPV4, 2TUPLE) },
+ { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
+ ETH_RSS_IPV6_EX,
+ EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
+ EFX_RX_HASH(IPV6, 2TUPLE) }
+};
+
+static efx_rx_hash_type_t
+sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type,
+ unsigned int *hash_type_flags_supported,
+ unsigned int nb_hash_type_flags_supported)
+{
+ efx_rx_hash_type_t hash_type_masked = 0;
+ unsigned int i, j;
+
+ for (i = 0; i < nb_hash_type_flags_supported; ++i) {
+ unsigned int class_tuple_lbn[] = {
+ EFX_RX_CLASS_IPV4_TCP_LBN,
+ EFX_RX_CLASS_IPV4_UDP_LBN,
+ EFX_RX_CLASS_IPV4_LBN,
+ EFX_RX_CLASS_IPV6_TCP_LBN,
+ EFX_RX_CLASS_IPV6_UDP_LBN,
+ EFX_RX_CLASS_IPV6_LBN
+ };
+
+ for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) {
+ unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE;
+ unsigned int flag;
+
+ tuple_mask <<= class_tuple_lbn[j];
+ flag = hash_type & tuple_mask;
+
+ if (flag == hash_type_flags_supported[i])
+ hash_type_masked |= flag;
+ }
+ }
+
+ return hash_type_masked;
+}
+
+int
+sfc_rx_hash_init(struct sfc_adapter *sa)
+{
+ struct sfc_rss *rss = &sa->rss;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask;
+ efx_rx_hash_alg_t alg;
+ unsigned int flags_supp[EFX_RX_HASH_NFLAGS];
+ unsigned int nb_flags_supp;
+ struct sfc_rss_hf_rte_to_efx *hf_map;
+ struct sfc_rss_hf_rte_to_efx *entry;
+ efx_rx_hash_type_t efx_hash_types;
+ unsigned int i;
+ int rc;
+
+ if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ))
+ alg = EFX_RX_HASHALG_TOEPLITZ;
+ else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM))
+ alg = EFX_RX_HASHALG_PACKED_STREAM;
+ else
+ return EINVAL;
+
+ rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp,
+ &nb_flags_supp);
+ if (rc != 0)
+ return rc;
+
+ hf_map = rte_calloc_socket("sfc-rss-hf-map",
+ RTE_DIM(sfc_rss_hf_map),
+ sizeof(*hf_map), 0, sa->socket_id);
+ if (hf_map == NULL)
+ return ENOMEM;
+
+ entry = hf_map;
+ efx_hash_types = 0;
+ for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) {
+ efx_rx_hash_type_t ht;
+
+ ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx,
+ flags_supp, nb_flags_supp);
+ if (ht != 0) {
+ entry->rte = sfc_rss_hf_map[i].rte;
+ entry->efx = ht;
+ efx_hash_types |= ht;
+ ++entry;
+ }
+ }
+
+ rss->hash_alg = alg;
+ rss->hf_map_nb_entries = (unsigned int)(entry - hf_map);
+ rss->hf_map = hf_map;
+ rss->hash_types = efx_hash_types;
+
+ return 0;
+}
+
+void
+sfc_rx_hash_fini(struct sfc_adapter *sa)
+{
+ struct sfc_rss *rss = &sa->rss;
+
+ rte_free(rss->hf_map);
+}
+
+int
+sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
+ efx_rx_hash_type_t *efx)
+{
+ struct sfc_rss *rss = &sa->rss;
+ efx_rx_hash_type_t hash_types = 0;
+ unsigned int i;
+
+ for (i = 0; i < rss->hf_map_nb_entries; ++i) {
+ uint64_t rte_mask = rss->hf_map[i].rte;
+
+ if ((rte & rte_mask) != 0) {
+ rte &= ~rte_mask;
+ hash_types |= rss->hf_map[i].efx;
+ }
+ }
+
+ if (rte != 0) {
+ sfc_err(sa, "unsupported hash functions requested");
+ return EINVAL;
+ }
+
+ *efx = hash_types;
+
+ return 0;
+}
+
+uint64_t
+sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa, efx_rx_hash_type_t efx)
+{
+ struct sfc_rss *rss = &sa->rss;
+ uint64_t rte = 0;
+ unsigned int i;
+
+ for (i = 0; i < rss->hf_map_nb_entries; ++i) {
+ efx_rx_hash_type_t hash_type = rss->hf_map[i].efx;
+
+ if ((efx & hash_type) == hash_type)
+ rte |= rss->hf_map[i].rte;
+ }
+
+ return rte;
+}
+
+static int
+sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
+ struct rte_eth_rss_conf *conf)
+{
+ struct sfc_rss *rss = &sa->rss;
+ efx_rx_hash_type_t efx_hash_types = rss->hash_types;
+ uint64_t rss_hf = sfc_rx_hf_efx_to_rte(sa, efx_hash_types);
+ int rc;
+
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
+ if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) ||
+ conf->rss_key != NULL)
+ return EINVAL;
+ }
+
+ if (conf->rss_hf != 0) {
+ rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types);
+ if (rc != 0)
+ return rc;
+ }
+
+ if (conf->rss_key != NULL) {
+ if (conf->rss_key_len != sizeof(rss->key)) {
+ sfc_err(sa, "RSS key size is wrong (should be %lu)",
+ sizeof(rss->key));
+ return EINVAL;
+ }
+ rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key));
+ }
+
+ rss->hash_types = efx_hash_types;
+
+ return 0;
+}
+
+static int
+sfc_rx_rss_config(struct sfc_adapter *sa)
+{
+ struct sfc_rss *rss = &sa->rss;
+ int rc = 0;
+
+ if (rss->channels > 0) {
+ rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ rss->hash_alg, rss->hash_types,
+ B_TRUE);
+ if (rc != 0)
+ goto finish;
+
+ rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ rss->key, sizeof(rss->key));
+ if (rc != 0)
+ goto finish;
+
+ rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ rss->tbl, RTE_DIM(rss->tbl));
+ }
+
+finish:
+ return rc;
+}
+
+int
+sfc_rx_start(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+ int rc;
+
+ sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
+
+ rc = efx_rx_init(sa->nic);
+ if (rc != 0)
+ goto fail_rx_init;
+
+ rc = sfc_rx_rss_config(sa);
+ if (rc != 0)
+ goto fail_rss_config;
+
+ for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
+ if ((!sa->rxq_info[sw_index].deferred_start ||
+ sa->rxq_info[sw_index].deferred_started)) {
+ rc = sfc_rx_qstart(sa, sw_index);
+ if (rc != 0)
+ goto fail_rx_qstart;
+ }
+ }
+
+ return 0;
+
+fail_rx_qstart:
+ while (sw_index-- > 0)
+ sfc_rx_qstop(sa, sw_index);
+
+fail_rss_config:
+ efx_rx_fini(sa->nic);
+
+fail_rx_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_rx_stop(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+
+ sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
+
+ sw_index = sa->rxq_count;
+ while (sw_index-- > 0) {
+ if (sa->rxq_info[sw_index].rxq != NULL)
+ sfc_rx_qstop(sa, sw_index);
+ }
+
+ efx_rx_fini(sa->nic);
+}
+
+static int
+sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
+ unsigned int max_entries;
+
+ max_entries = EFX_RXQ_MAXNDESCS;
+ SFC_ASSERT(rte_is_power_of_2(max_entries));
+
+ rxq_info->max_entries = max_entries;
+
+ return 0;
+}
+
+static int
+sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
+{
+ uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
+ sfc_rx_get_queue_offload_caps(sa);
+ struct sfc_rss *rss = &sa->rss;
+ int rc = 0;
+
+ switch (rxmode->mq_mode) {
+ case ETH_MQ_RX_NONE:
+ /* No special checks are required */
+ break;
+ case ETH_MQ_RX_RSS:
+ if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
+ sfc_err(sa, "RSS is not available");
+ rc = EINVAL;
+ }
+ break;
+ default:
+ sfc_err(sa, "Rx multi-queue mode %u not supported",
+ rxmode->mq_mode);
+ rc = EINVAL;
+ }
+
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
+ sfc_warn(sa, "FCS stripping cannot be disabled - always on");
+ rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ }
+
+ /*
+ * Requested offloads are validated against supported by ethdev,
+ * so unsupported offloads cannot be added as the result of
+ * below check.
+ */
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+ (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+ sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
+ rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ }
+
+ if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+ (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+ sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
+ rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ }
+
+ return rc;
+}
+
+/**
+ * Destroy excess queues that are no longer needed after reconfiguration
+ * or complete close.
+ */
+static void
+sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
+{
+ int sw_index;
+
+ SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
+
+ sw_index = sa->rxq_count;
+ while (--sw_index >= (int)nb_rx_queues) {
+ if (sa->rxq_info[sw_index].rxq != NULL)
+ sfc_rx_qfini(sa, sw_index);
+ }
+
+ sa->rxq_count = nb_rx_queues;
+}
+
+/**
+ * Initialize Rx subsystem.
+ *
+ * Called at device (re)configuration stage when number of receive queues is
+ * specified together with other device level receive configuration.
+ *
+ * It should be used to allocate NUMA-unaware resources.
+ */
+int
+sfc_rx_configure(struct sfc_adapter *sa)
+{
+ struct sfc_rss *rss = &sa->rss;
+ struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
+ const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
+ int rc;
+
+ sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
+ nb_rx_queues, sa->rxq_count);
+
+ rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
+ if (rc != 0)
+ goto fail_check_mode;
+
+ if (nb_rx_queues == sa->rxq_count)
+ goto done;
+
+ if (sa->rxq_info == NULL) {
+ rc = ENOMEM;
+ sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
+ sizeof(sa->rxq_info[0]), 0,
+ sa->socket_id);
+ if (sa->rxq_info == NULL)
+ goto fail_rxqs_alloc;
+ } else {
+ struct sfc_rxq_info *new_rxq_info;
+
+ if (nb_rx_queues < sa->rxq_count)
+ sfc_rx_fini_queues(sa, nb_rx_queues);
+
+ rc = ENOMEM;
+ new_rxq_info =
+ rte_realloc(sa->rxq_info,
+ nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
+ if (new_rxq_info == NULL && nb_rx_queues > 0)
+ goto fail_rxqs_realloc;
+
+ sa->rxq_info = new_rxq_info;
+ if (nb_rx_queues > sa->rxq_count)
+ memset(&sa->rxq_info[sa->rxq_count], 0,
+ (nb_rx_queues - sa->rxq_count) *
+ sizeof(sa->rxq_info[0]));
+ }
+
+ while (sa->rxq_count < nb_rx_queues) {
+ rc = sfc_rx_qinit_info(sa, sa->rxq_count);
+ if (rc != 0)
+ goto fail_rx_qinit_info;
+
+ sa->rxq_count++;
+ }
+
+ rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+ MIN(sa->rxq_count, EFX_MAXRSS) : 0;
+
+ if (rss->channels > 0) {
+ struct rte_eth_rss_conf *adv_conf_rss;
+ unsigned int sw_index;
+
+ for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
+ rss->tbl[sw_index] = sw_index % rss->channels;
+
+ adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf;
+ rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss);
+ if (rc != 0)
+ goto fail_rx_process_adv_conf_rss;
+ }
+
+done:
+ return 0;
+
+fail_rx_process_adv_conf_rss:
+fail_rx_qinit_info:
+fail_rxqs_realloc:
+fail_rxqs_alloc:
+ sfc_rx_close(sa);
+
+fail_check_mode:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+/**
+ * Shutdown Rx subsystem.
+ *
+ * Called at device close stage, for example, before device shutdown.
+ */
+void
+sfc_rx_close(struct sfc_adapter *sa)
+{
+ struct sfc_rss *rss = &sa->rss;
+
+ sfc_rx_fini_queues(sa, 0);
+
+ rss->channels = 0;
+
+ rte_free(sa->rxq_info);
+ sa->rxq_info = NULL;
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_rx.h b/src/spdk/dpdk/drivers/net/sfc/sfc_rx.h
new file mode 100644
index 00000000..3fba7d8a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_rx.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_RX_H
+#define _SFC_RX_H
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_ethdev_driver.h>
+
+#include "efx.h"
+
+#include "sfc_dp_rx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_adapter;
+struct sfc_evq;
+
+/**
+ * Software Rx descriptor information associated with hardware Rx
+ * descriptor.
+ */
+struct sfc_efx_rx_sw_desc {
+ struct rte_mbuf *mbuf;
+ unsigned int flags;
+ unsigned int size;
+};
+
+/** Receive queue state bits */
+enum sfc_rxq_state_bit {
+ SFC_RXQ_INITIALIZED_BIT = 0,
+#define SFC_RXQ_INITIALIZED (1 << SFC_RXQ_INITIALIZED_BIT)
+ SFC_RXQ_STARTED_BIT,
+#define SFC_RXQ_STARTED (1 << SFC_RXQ_STARTED_BIT)
+ SFC_RXQ_FLUSHING_BIT,
+#define SFC_RXQ_FLUSHING (1 << SFC_RXQ_FLUSHING_BIT)
+ SFC_RXQ_FLUSHED_BIT,
+#define SFC_RXQ_FLUSHED (1 << SFC_RXQ_FLUSHED_BIT)
+ SFC_RXQ_FLUSH_FAILED_BIT,
+#define SFC_RXQ_FLUSH_FAILED (1 << SFC_RXQ_FLUSH_FAILED_BIT)
+};
+
+/**
+ * Receive queue control information.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_rxq {
+ struct sfc_evq *evq;
+ efx_rxq_t *common;
+ efsys_mem_t mem;
+ unsigned int hw_index;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
+ uint16_t buf_size;
+ struct sfc_dp_rxq *dp;
+ unsigned int state;
+};
+
+static inline unsigned int
+sfc_rxq_sw_index_by_hw_index(unsigned int hw_index)
+{
+ return hw_index;
+}
+
+static inline unsigned int
+sfc_rxq_sw_index(const struct sfc_rxq *rxq)
+{
+ return sfc_rxq_sw_index_by_hw_index(rxq->hw_index);
+}
+
+struct sfc_rxq *sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);
+
+/**
+ * Receive queue information used on libefx-based data path.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_efx_rxq {
+ /* Used on data path */
+ struct sfc_evq *evq;
+ unsigned int flags;
+#define SFC_EFX_RXQ_FLAG_STARTED 0x1
+#define SFC_EFX_RXQ_FLAG_RUNNING 0x2
+#define SFC_EFX_RXQ_FLAG_RSS_HASH 0x4
+ unsigned int ptr_mask;
+ unsigned int pending;
+ unsigned int completed;
+ uint16_t batch_max;
+ uint16_t prefix_size;
+ struct sfc_efx_rx_sw_desc *sw_desc;
+
+ /* Used on refill */
+ unsigned int added;
+ unsigned int pushed;
+ unsigned int max_fill_level;
+ unsigned int refill_threshold;
+ uint16_t buf_size;
+ struct rte_mempool *refill_mb_pool;
+ efx_rxq_t *common;
+
+ /* Datapath receive queue anchor */
+ struct sfc_dp_rxq dp;
+};
+
+static inline struct sfc_efx_rxq *
+sfc_efx_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+ return container_of(dp_rxq, struct sfc_efx_rxq, dp);
+}
+
+/**
+ * Receive queue information used during setup/release only.
+ * Allocated on the same socket as adapter data.
+ */
+struct sfc_rxq_info {
+ unsigned int max_entries;
+ unsigned int entries;
+ efx_rxq_type_t type;
+ unsigned int type_flags;
+ struct sfc_rxq *rxq;
+ boolean_t deferred_start;
+ boolean_t deferred_started;
+};
+
+int sfc_rx_configure(struct sfc_adapter *sa);
+void sfc_rx_close(struct sfc_adapter *sa);
+int sfc_rx_start(struct sfc_adapter *sa);
+void sfc_rx_stop(struct sfc_adapter *sa);
+
+int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
+int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
+void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
+
+uint64_t sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa);
+uint64_t sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa);
+
+void sfc_rx_qflush_done(struct sfc_rxq *rxq);
+void sfc_rx_qflush_failed(struct sfc_rxq *rxq);
+
+unsigned int sfc_rx_qdesc_npending(struct sfc_adapter *sa,
+ unsigned int sw_index);
+int sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset);
+
+int sfc_rx_hash_init(struct sfc_adapter *sa);
+void sfc_rx_hash_fini(struct sfc_adapter *sa);
+int sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
+ efx_rx_hash_type_t *efx);
+uint64_t sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa,
+ efx_rx_hash_type_t efx);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_RX_H */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_tso.c b/src/spdk/dpdk/drivers/net/sfc/sfc_tso.c
new file mode 100644
index 00000000..effe9853
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_tso.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <rte_ip.h>
+#include <rte_tcp.h>
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_tx.h"
+#include "sfc_ev.h"
+
+/** Standard TSO header length */
+#define SFC_TSOH_STD_LEN 256
+
+/** The number of TSO option descriptors that precede the packet descriptors */
+#define SFC_TSO_OPDESCS_IDX_SHIFT 2
+
+int
+sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries, unsigned int socket_id)
+{
+ unsigned int i;
+
+ for (i = 0; i < txq_entries; ++i) {
+ sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj",
+ SFC_TSOH_STD_LEN,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (sw_ring[i].tsoh == NULL)
+ goto fail_alloc_tsoh_objs;
+ }
+
+ return 0;
+
+fail_alloc_tsoh_objs:
+ while (i > 0)
+ rte_free(sw_ring[--i].tsoh);
+
+ return ENOMEM;
+}
+
+void
+sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries)
+{
+ unsigned int i;
+
+ for (i = 0; i < txq_entries; ++i) {
+ rte_free(sw_ring[i].tsoh);
+ sw_ring[i].tsoh = NULL;
+ }
+}
+
+static void
+sfc_efx_tso_prepare_header(struct sfc_efx_txq *txq, struct rte_mbuf **in_seg,
+ size_t *in_off, unsigned int idx, size_t bytes_left)
+{
+ struct rte_mbuf *m = *in_seg;
+ size_t bytes_to_copy = 0;
+ uint8_t *tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
+
+ do {
+ bytes_to_copy = MIN(bytes_left, m->data_len);
+
+ rte_memcpy(tsoh, rte_pktmbuf_mtod(m, uint8_t *),
+ bytes_to_copy);
+
+ bytes_left -= bytes_to_copy;
+ tsoh += bytes_to_copy;
+
+ if (bytes_left > 0) {
+ m = m->next;
+ SFC_ASSERT(m != NULL);
+ }
+ } while (bytes_left > 0);
+
+ if (bytes_to_copy == m->data_len) {
+ *in_seg = m->next;
+ *in_off = 0;
+ } else {
+ *in_seg = m;
+ *in_off = bytes_to_copy;
+ }
+}
+
+int
+sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
+ struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
+ unsigned int *pkt_descs, size_t *pkt_len)
+{
+ uint8_t *tsoh;
+ const struct tcp_hdr *th;
+ efsys_dma_addr_t header_paddr;
+ uint16_t packet_id;
+ uint32_t sent_seq;
+ struct rte_mbuf *m = *in_seg;
+ size_t nh_off = m->l2_len; /* IP header offset */
+ size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */
+ size_t header_len = m->l2_len + m->l3_len + m->l4_len;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
+
+ idx += SFC_TSO_OPDESCS_IDX_SHIFT;
+
+ /* Packets which have too big headers should be discarded */
+ if (unlikely(header_len > SFC_TSOH_STD_LEN))
+ return EMSGSIZE;
+
+ /*
+ * The TCP header must start at most 208 bytes into the frame.
+ * If it starts later than this then the NIC won't realise
+ * it's a TCP packet and TSO edits won't be applied
+ */
+ if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
+ return EMSGSIZE;
+
+ header_paddr = rte_pktmbuf_iova(m);
+
+ /*
+ * Sometimes headers may be split across multiple mbufs. In such cases
+ * we need to glue those pieces and store them in some temporary place.
+ * Also, packet headers must be contiguous in memory, so that
+ * they can be referred to with a single DMA descriptor. EF10 has no
+ * limitations on address boundaries crossing by DMA descriptor data.
+ */
+ if (m->data_len < header_len) {
+ sfc_efx_tso_prepare_header(txq, in_seg, in_off, idx,
+ header_len);
+ tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
+
+ header_paddr = rte_malloc_virt2iova((void *)tsoh);
+ } else {
+ if (m->data_len == header_len) {
+ *in_off = 0;
+ *in_seg = m->next;
+ } else {
+ *in_off = header_len;
+ }
+
+ tsoh = rte_pktmbuf_mtod(m, uint8_t *);
+ }
+
+ /* Handle IP header */
+ if (m->ol_flags & PKT_TX_IPV4) {
+ const struct ipv4_hdr *iphe4;
+
+ iphe4 = (const struct ipv4_hdr *)(tsoh + nh_off);
+ rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
+ packet_id = rte_be_to_cpu_16(packet_id);
+ } else if (m->ol_flags & PKT_TX_IPV6) {
+ packet_id = 0;
+ } else {
+ return EINVAL;
+ }
+
+ /* Handle TCP header */
+ th = (const struct tcp_hdr *)(tsoh + tcph_off);
+
+ rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
+ sent_seq = rte_be_to_cpu_32(sent_seq);
+
+ efx_tx_qdesc_tso2_create(txq->common, packet_id, 0, sent_seq,
+ m->tso_segsz,
+ *pend, EFX_TX_FATSOV2_OPT_NDESCS);
+
+ *pend += EFX_TX_FATSOV2_OPT_NDESCS;
+ *pkt_descs += EFX_TX_FATSOV2_OPT_NDESCS;
+
+ efx_tx_qdesc_dma_create(txq->common, header_paddr, header_len,
+ B_FALSE, (*pend)++);
+ (*pkt_descs)++;
+ *pkt_len -= header_len;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_tweak.h b/src/spdk/dpdk/drivers/net/sfc/sfc_tweak.h
new file mode 100644
index 00000000..4d543f68
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_tweak.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_TWEAK_H_
+#define _SFC_TWEAK_H_
+
+/*
+ * The header is intended to collect defines/constants which could be
+ * tweaked to improve the PMD performance characteristics depending on
+ * the usecase or requirements (CPU load, packet rate, latency).
+ */
+
+/**
+ * Number of Rx descriptors in the bulk submitted on Rx ring refill.
+ */
+#define SFC_RX_REFILL_BULK (RTE_CACHE_LINE_SIZE / sizeof(efx_qword_t))
+
+/**
+ * Make the transmit path reap at least one time per a burst;
+ * this improves cache locality because the same mbufs may be used to send
+ * subsequent bursts in certain cases because of well-timed reap
+ */
+#define SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE 0
+
+/** Default free threshold follows recommendations from DPDK documentation */
+#define SFC_TX_DEFAULT_FREE_THRESH 32
+
+/** Number of mbufs to be freed in bulk in a single call */
+#define SFC_TX_REAP_BULK_SIZE 32
+
+/**
+ * Default head-of-line block timeout to wait for Rx descriptor before
+ * packet drop because of no descriptors available.
+ *
+ * DPDK FW variant only with equal stride super-buffer Rx mode.
+ */
+#define SFC_RXD_WAIT_TIMEOUT_NS_DEF (200U * 1000)
+
+#endif /* _SFC_TWEAK_H_ */
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_tx.c b/src/spdk/dpdk/drivers/net/sfc/sfc_tx.c
new file mode 100644
index 00000000..6d42a1a6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_tx.c
@@ -0,0 +1,1064 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_tx.h"
+#include "sfc_tweak.h"
+#include "sfc_kvargs.h"
+
+/*
+ * Maximum number of TX queue flush attempts in case of
+ * failure or flush timeout
+ */
+#define SFC_TX_QFLUSH_ATTEMPTS (3)
+
+/*
+ * Time to wait between event queue polling attempts when waiting for TX
+ * queue flush done or flush failed events
+ */
+#define SFC_TX_QFLUSH_POLL_WAIT_MS (1)
+
+/*
+ * Maximum number of event queue polling attempts when waiting for TX queue
+ * flush done or flush failed events; it defines TX queue flush attempt timeout
+ * together with SFC_TX_QFLUSH_POLL_WAIT_MS
+ */
+#define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
+
+uint64_t
+sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t caps = 0;
+
+ if ((sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) &&
+ encp->enc_hw_tx_insert_vlan_enabled)
+ caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
+
+ if (sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
+ caps |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT))
+ caps |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ return caps;
+}
+
+uint64_t
+sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t caps = 0;
+
+ caps |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ caps |= DEV_TX_OFFLOAD_UDP_CKSUM;
+ caps |= DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if (encp->enc_tunnel_encapsulations_supported)
+ caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+ if (sa->tso)
+ caps |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ return caps;
+}
+
+static int
+sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
+ const struct rte_eth_txconf *tx_conf,
+ uint64_t offloads)
+{
+ int rc = 0;
+
+ if (tx_conf->tx_rs_thresh != 0) {
+ sfc_err(sa, "RS bit in transmit descriptor is not supported");
+ rc = EINVAL;
+ }
+
+ if (tx_conf->tx_free_thresh > txq_max_fill_level) {
+ sfc_err(sa,
+ "TxQ free threshold too large: %u vs maximum %u",
+ tx_conf->tx_free_thresh, txq_max_fill_level);
+ rc = EINVAL;
+ }
+
+ if (tx_conf->tx_thresh.pthresh != 0 ||
+ tx_conf->tx_thresh.hthresh != 0 ||
+ tx_conf->tx_thresh.wthresh != 0) {
+ sfc_warn(sa,
+ "prefetch/host/writeback thresholds are not supported");
+ }
+
+ /* We either perform both TCP and UDP offload, or no offload at all */
+ if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+ ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+ sfc_err(sa, "TCP and UDP offloads can't be set independently");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+void
+sfc_tx_qflush_done(struct sfc_txq *txq)
+{
+ txq->state |= SFC_TXQ_FLUSHED;
+ txq->state &= ~SFC_TXQ_FLUSHING;
+}
+
+int
+sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ unsigned int txq_entries;
+ unsigned int evq_entries;
+ unsigned int txq_max_fill_level;
+ struct sfc_txq_info *txq_info;
+ struct sfc_evq *evq;
+ struct sfc_txq *txq;
+ int rc = 0;
+ struct sfc_dp_tx_qcreate_info info;
+ uint64_t offloads;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ rc = sa->dp_tx->qsize_up_rings(nb_tx_desc, &txq_entries, &evq_entries,
+ &txq_max_fill_level);
+ if (rc != 0)
+ goto fail_size_up_rings;
+ SFC_ASSERT(txq_entries >= EFX_TXQ_MINNDESCS);
+ SFC_ASSERT(txq_entries <= sa->txq_max_entries);
+ SFC_ASSERT(txq_entries >= nb_tx_desc);
+ SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
+
+ offloads = tx_conf->offloads |
+ sa->eth_dev->data->dev_conf.txmode.offloads;
+ rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
+ if (rc != 0)
+ goto fail_bad_conf;
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq_info->entries = txq_entries;
+
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index,
+ evq_entries, socket_id, &evq);
+ if (rc != 0)
+ goto fail_ev_qinit;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ txq_info->txq = txq;
+
+ txq->hw_index = sw_index;
+ txq->evq = evq;
+ txq->free_thresh =
+ (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
+ SFC_TX_DEFAULT_FREE_THRESH;
+ txq->offloads = offloads;
+
+ rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
+ socket_id, &txq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ memset(&info, 0, sizeof(info));
+ info.max_fill_level = txq_max_fill_level;
+ info.free_thresh = txq->free_thresh;
+ info.offloads = offloads;
+ info.txq_entries = txq_info->entries;
+ info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
+ info.txq_hw_ring = txq->mem.esm_base;
+ info.evq_entries = evq_entries;
+ info.evq_hw_ring = evq->mem.esm_base;
+ info.hw_index = txq->hw_index;
+ info.mem_bar = sa->mem_bar.esb_base;
+ info.vi_window_shift = encp->enc_vi_window_shift;
+
+ rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
+ &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
+ socket_id, &info, &txq->dp);
+ if (rc != 0)
+ goto fail_dp_tx_qinit;
+
+ evq->dp_txq = txq->dp;
+
+ txq->state = SFC_TXQ_INITIALIZED;
+
+ txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
+
+ return 0;
+
+fail_dp_tx_qinit:
+ sfc_dma_free(sa, &txq->mem);
+
+fail_dma_alloc:
+ txq_info->txq = NULL;
+ rte_free(txq);
+
+fail_txq_alloc:
+ sfc_ev_qfini(evq);
+
+fail_ev_qinit:
+ txq_info->entries = 0;
+
+fail_bad_conf:
+fail_size_up_rings:
+ sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
+ return rc;
+}
+
+void
+sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_txq_info *txq_info;
+ struct sfc_txq *txq;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq = txq_info->txq;
+ SFC_ASSERT(txq != NULL);
+ SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
+
+ sa->dp_tx->qdestroy(txq->dp);
+ txq->dp = NULL;
+
+ txq_info->txq = NULL;
+ txq_info->entries = 0;
+
+ sfc_dma_free(sa, &txq->mem);
+
+ sfc_ev_qfini(txq->evq);
+ txq->evq = NULL;
+
+ rte_free(txq);
+}
+
+static int
+sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ return 0;
+}
+
+static int
+sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
+{
+ int rc = 0;
+
+ switch (txmode->mq_mode) {
+ case ETH_MQ_TX_NONE:
+ break;
+ default:
+ sfc_err(sa, "Tx multi-queue mode %u not supported",
+ txmode->mq_mode);
+ rc = EINVAL;
+ }
+
+ /*
+ * These features are claimed to be i40e-specific,
+ * but it does make sense to double-check their absence
+ */
+ if (txmode->hw_vlan_reject_tagged) {
+ sfc_err(sa, "Rejecting tagged packets not supported");
+ rc = EINVAL;
+ }
+
+ if (txmode->hw_vlan_reject_untagged) {
+ sfc_err(sa, "Rejecting untagged packets not supported");
+ rc = EINVAL;
+ }
+
+ if (txmode->hw_vlan_insert_pvid) {
+ sfc_err(sa, "Port-based VLAN insertion not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * Destroy excess queues that are no longer needed after reconfiguration
+ * or complete close.
+ */
+static void
+sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues)
+{
+ int sw_index;
+
+ SFC_ASSERT(nb_tx_queues <= sa->txq_count);
+
+ sw_index = sa->txq_count;
+ while (--sw_index >= (int)nb_tx_queues) {
+ if (sa->txq_info[sw_index].txq != NULL)
+ sfc_tx_qfini(sa, sw_index);
+ }
+
+ sa->txq_count = nb_tx_queues;
+}
+
+int
+sfc_tx_configure(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
+ const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues;
+ int rc = 0;
+
+ sfc_log_init(sa, "nb_tx_queues=%u (old %u)",
+ nb_tx_queues, sa->txq_count);
+
+ /*
+ * The datapath implementation assumes absence of boundary
+ * limits on Tx DMA descriptors. Addition of these checks on
+ * datapath would simply make the datapath slower.
+ */
+ if (encp->enc_tx_dma_desc_boundary != 0) {
+ rc = ENOTSUP;
+ goto fail_tx_dma_desc_boundary;
+ }
+
+ rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
+ if (rc != 0)
+ goto fail_check_mode;
+
+ if (nb_tx_queues == sa->txq_count)
+ goto done;
+
+ if (sa->txq_info == NULL) {
+ sa->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues,
+ sizeof(sa->txq_info[0]), 0,
+ sa->socket_id);
+ if (sa->txq_info == NULL)
+ goto fail_txqs_alloc;
+ } else {
+ struct sfc_txq_info *new_txq_info;
+
+ if (nb_tx_queues < sa->txq_count)
+ sfc_tx_fini_queues(sa, nb_tx_queues);
+
+ new_txq_info =
+ rte_realloc(sa->txq_info,
+ nb_tx_queues * sizeof(sa->txq_info[0]), 0);
+ if (new_txq_info == NULL && nb_tx_queues > 0)
+ goto fail_txqs_realloc;
+
+ sa->txq_info = new_txq_info;
+ if (nb_tx_queues > sa->txq_count)
+ memset(&sa->txq_info[sa->txq_count], 0,
+ (nb_tx_queues - sa->txq_count) *
+ sizeof(sa->txq_info[0]));
+ }
+
+ while (sa->txq_count < nb_tx_queues) {
+ rc = sfc_tx_qinit_info(sa, sa->txq_count);
+ if (rc != 0)
+ goto fail_tx_qinit_info;
+
+ sa->txq_count++;
+ }
+
+done:
+ return 0;
+
+fail_tx_qinit_info:
+fail_txqs_realloc:
+fail_txqs_alloc:
+ sfc_tx_close(sa);
+
+fail_check_mode:
+fail_tx_dma_desc_boundary:
+ sfc_log_init(sa, "failed (rc = %d)", rc);
+ return rc;
+}
+
+void
+sfc_tx_close(struct sfc_adapter *sa)
+{
+ sfc_tx_fini_queues(sa, 0);
+
+ rte_free(sa->txq_info);
+ sa->txq_info = NULL;
+}
+
+int
+sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) |
+ sfc_tx_get_queue_offload_caps(sa);
+ struct rte_eth_dev_data *dev_data;
+ struct sfc_txq_info *txq_info;
+ struct sfc_txq *txq;
+ struct sfc_evq *evq;
+ uint16_t flags = 0;
+ unsigned int desc_index;
+ int rc = 0;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq = txq_info->txq;
+
+ SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
+
+ evq = txq->evq;
+
+ rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index));
+ if (rc != 0)
+ goto fail_ev_qstart;
+
+ if (txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ flags |= EFX_TXQ_CKSUM_IPV4;
+
+ if (txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ flags |= EFX_TXQ_CKSUM_INNER_IPV4;
+
+ if ((txq->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
+ (txq->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+ flags |= EFX_TXQ_CKSUM_TCPUDP;
+
+ if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
+ }
+
+ if (txq->offloads & DEV_TX_OFFLOAD_TCP_TSO)
+ flags |= EFX_TXQ_FATSOV2;
+
+ rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
+ txq_info->entries, 0 /* not used on EF10 */,
+ flags, evq->common,
+ &txq->common, &desc_index);
+ if (rc != 0) {
+ if (sa->tso && (rc == ENOSPC))
+ sfc_err(sa, "ran out of TSO contexts");
+
+ goto fail_tx_qcreate;
+ }
+
+ efx_tx_qenable(txq->common);
+
+ txq->state |= SFC_TXQ_STARTED;
+
+ rc = sa->dp_tx->qstart(txq->dp, evq->read_ptr, desc_index);
+ if (rc != 0)
+ goto fail_dp_qstart;
+
+ /*
+ * It seems to be used by DPDK for debug purposes only ('rte_ether')
+ */
+ dev_data = sa->eth_dev->data;
+ dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+
+fail_dp_qstart:
+ txq->state = SFC_TXQ_INITIALIZED;
+ efx_tx_qdestroy(txq->common);
+
+fail_tx_qcreate:
+ sfc_ev_qstop(evq);
+
+fail_ev_qstart:
+ return rc;
+}
+
+void
+sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct rte_eth_dev_data *dev_data;
+ struct sfc_txq_info *txq_info;
+ struct sfc_txq *txq;
+ unsigned int retry_count;
+ unsigned int wait_count;
+ int rc;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq = txq_info->txq;
+
+ if (txq->state == SFC_TXQ_INITIALIZED)
+ return;
+
+ SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
+
+ sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr);
+
+ /*
+ * Retry TX queue flushing in case of flush failed or
+ * timeout; in the worst case it can delay for 6 seconds
+ */
+ for (retry_count = 0;
+ ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
+ (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
+ ++retry_count) {
+ rc = efx_tx_qflush(txq->common);
+ if (rc != 0) {
+ txq->state |= (rc == EALREADY) ?
+ SFC_TXQ_FLUSHED : SFC_TXQ_FLUSH_FAILED;
+ break;
+ }
+
+ /*
+ * Wait for TX queue flush done or flush failed event at least
+ * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
+ * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
+ * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
+ */
+ wait_count = 0;
+ do {
+ rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
+ sfc_ev_qpoll(txq->evq);
+ } while ((txq->state & SFC_TXQ_FLUSHING) &&
+ wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
+
+ if (txq->state & SFC_TXQ_FLUSHING)
+ sfc_err(sa, "TxQ %u flush timed out", sw_index);
+
+ if (txq->state & SFC_TXQ_FLUSHED)
+ sfc_notice(sa, "TxQ %u flushed", sw_index);
+ }
+
+ sa->dp_tx->qreap(txq->dp);
+
+ txq->state = SFC_TXQ_INITIALIZED;
+
+ efx_tx_qdestroy(txq->common);
+
+ sfc_ev_qstop(txq->evq);
+
+ /*
+ * It seems to be used by DPDK for debug purposes only ('rte_ether')
+ */
+ dev_data = sa->eth_dev->data;
+ dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
+}
+
+int
+sfc_tx_start(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+ int rc = 0;
+
+ sfc_log_init(sa, "txq_count = %u", sa->txq_count);
+
+ if (sa->tso) {
+ if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) {
+ sfc_warn(sa, "TSO support was unable to be restored");
+ sa->tso = B_FALSE;
+ }
+ }
+
+ rc = efx_tx_init(sa->nic);
+ if (rc != 0)
+ goto fail_efx_tx_init;
+
+ for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
+ if (!(sa->txq_info[sw_index].deferred_start) ||
+ sa->txq_info[sw_index].deferred_started) {
+ rc = sfc_tx_qstart(sa, sw_index);
+ if (rc != 0)
+ goto fail_tx_qstart;
+ }
+ }
+
+ return 0;
+
+fail_tx_qstart:
+ while (sw_index-- > 0)
+ sfc_tx_qstop(sa, sw_index);
+
+ efx_tx_fini(sa->nic);
+
+fail_efx_tx_init:
+ sfc_log_init(sa, "failed (rc = %d)", rc);
+ return rc;
+}
+
+void
+sfc_tx_stop(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+
+ sfc_log_init(sa, "txq_count = %u", sa->txq_count);
+
+ sw_index = sa->txq_count;
+ while (sw_index-- > 0) {
+ if (sa->txq_info[sw_index].txq != NULL)
+ sfc_tx_qstop(sa, sw_index);
+ }
+
+ efx_tx_fini(sa->nic);
+}
+
+static void
+sfc_efx_tx_reap(struct sfc_efx_txq *txq)
+{
+ unsigned int completed;
+
+ sfc_ev_qpoll(txq->evq);
+
+ for (completed = txq->completed;
+ completed != txq->pending; completed++) {
+ struct sfc_efx_tx_sw_desc *txd;
+
+ txd = &txq->sw_ring[completed & txq->ptr_mask];
+
+ if (txd->mbuf != NULL) {
+ rte_pktmbuf_free(txd->mbuf);
+ txd->mbuf = NULL;
+ }
+ }
+
+ txq->completed = completed;
+}
+
+/*
+ * The function is used to insert or update VLAN tag;
+ * the firmware has state of the firmware tag to insert per TxQ
+ * (controlled by option descriptors), hence, if the tag of the
+ * packet to be sent is different from one remembered by the firmware,
+ * the function will update it
+ */
+static unsigned int
+sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
+ efx_desc_t **pend)
+{
+ uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
+ m->vlan_tci : 0);
+
+ if (this_tag == txq->hw_vlan_tci)
+ return 0;
+
+ /*
+ * The expression inside SFC_ASSERT() is not desired to be checked in
+ * a non-debug build because it might be too expensive on the data path
+ */
+ SFC_ASSERT(efx_nic_cfg_get(txq->evq->sa->nic)->enc_hw_tx_insert_vlan_enabled);
+
+ efx_tx_qdesc_vlantci_create(txq->common, rte_cpu_to_be_16(this_tag),
+ *pend);
+ (*pend)++;
+ txq->hw_vlan_tci = this_tag;
+
+ return 1;
+}
+
+static uint16_t
+sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_dp_txq *dp_txq = (struct sfc_dp_txq *)tx_queue;
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ unsigned int added = txq->added;
+ unsigned int pushed = added;
+ unsigned int pkts_sent = 0;
+ efx_desc_t *pend = &txq->pend_desc[0];
+ const unsigned int hard_max_fill = txq->max_fill_level;
+ const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh;
+ unsigned int fill_level = added - txq->completed;
+ boolean_t reap_done;
+ int rc __rte_unused;
+ struct rte_mbuf **pktp;
+
+ if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == 0))
+ goto done;
+
+ /*
+ * If insufficient space for a single packet is present,
+ * we should reap; otherwise, we shouldn't do that all the time
+ * to avoid latency increase
+ */
+ reap_done = (fill_level > soft_max_fill);
+
+ if (reap_done) {
+ sfc_efx_tx_reap(txq);
+ /*
+ * Recalculate fill level since 'txq->completed'
+ * might have changed on reap
+ */
+ fill_level = added - txq->completed;
+ }
+
+ for (pkts_sent = 0, pktp = &tx_pkts[0];
+ (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
+ pkts_sent++, pktp++) {
+ struct rte_mbuf *m_seg = *pktp;
+ size_t pkt_len = m_seg->pkt_len;
+ unsigned int pkt_descs = 0;
+ size_t in_off = 0;
+
+ /*
+ * Here VLAN TCI is expected to be zero in case if no
+ * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
+ * if the calling app ignores the absence of
+ * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
+ * TX_ERROR will occur
+ */
+ pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
+
+ if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
+ /*
+ * We expect correct 'pkt->l[2, 3, 4]_len' values
+ * to be set correctly by the caller
+ */
+ if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend,
+ &pkt_descs, &pkt_len) != 0) {
+ /* We may have reached this place for
+ * one of the following reasons:
+ *
+ * 1) Packet header length is greater
+ * than SFC_TSOH_STD_LEN
+ * 2) TCP header starts at more then
+ * 208 bytes into the frame
+ *
+ * We will deceive RTE saying that we have sent
+ * the packet, but we will actually drop it.
+ * Hence, we should revert 'pend' to the
+ * previous state (in case we have added
+ * VLAN descriptor) and start processing
+ * another one packet. But the original
+ * mbuf shouldn't be orphaned
+ */
+ pend -= pkt_descs;
+
+ rte_pktmbuf_free(*pktp);
+
+ continue;
+ }
+
+ /*
+ * We've only added 2 FATSOv2 option descriptors
+ * and 1 descriptor for the linearized packet header.
+ * The outstanding work will be done in the same manner
+ * as for the usual non-TSO path
+ */
+ }
+
+ for (; m_seg != NULL; m_seg = m_seg->next) {
+ efsys_dma_addr_t next_frag;
+ size_t seg_len;
+
+ seg_len = m_seg->data_len;
+ next_frag = rte_mbuf_data_iova(m_seg);
+
+ /*
+ * If we've started TSO transaction few steps earlier,
+ * we'll skip packet header using an offset in the
+ * current segment (which has been set to the
+ * first one containing payload)
+ */
+ seg_len -= in_off;
+ next_frag += in_off;
+ in_off = 0;
+
+ do {
+ efsys_dma_addr_t frag_addr = next_frag;
+ size_t frag_len;
+
+ /*
+ * It is assumed here that there is no
+ * limitation on address boundary
+ * crossing by DMA descriptor.
+ */
+ frag_len = MIN(seg_len, txq->dma_desc_size_max);
+ next_frag += frag_len;
+ seg_len -= frag_len;
+ pkt_len -= frag_len;
+
+ efx_tx_qdesc_dma_create(txq->common,
+ frag_addr, frag_len,
+ (pkt_len == 0),
+ pend++);
+
+ pkt_descs++;
+ } while (seg_len != 0);
+ }
+
+ added += pkt_descs;
+
+ fill_level += pkt_descs;
+ if (unlikely(fill_level > hard_max_fill)) {
+ /*
+ * Our estimation for maximum number of descriptors
+ * required to send a packet seems to be wrong.
+ * Try to reap (if we haven't yet).
+ */
+ if (!reap_done) {
+ sfc_efx_tx_reap(txq);
+ reap_done = B_TRUE;
+ fill_level = added - txq->completed;
+ if (fill_level > hard_max_fill) {
+ pend -= pkt_descs;
+ break;
+ }
+ } else {
+ pend -= pkt_descs;
+ break;
+ }
+ }
+
+ /* Assign mbuf to the last used desc */
+ txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp;
+ }
+
+ if (likely(pkts_sent > 0)) {
+ rc = efx_tx_qdesc_post(txq->common, txq->pend_desc,
+ pend - &txq->pend_desc[0],
+ txq->completed, &txq->added);
+ SFC_ASSERT(rc == 0);
+
+ if (likely(pushed != txq->added))
+ efx_tx_qpush(txq->common, txq->added, pushed);
+ }
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+ if (!reap_done)
+ sfc_efx_tx_reap(txq);
+#endif
+
+done:
+ return pkts_sent;
+}
+
+struct sfc_txq *
+sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq)
+{
+ const struct sfc_dp_queue *dpq = &dp_txq->dpq;
+ struct rte_eth_dev *eth_dev;
+ struct sfc_adapter *sa;
+ struct sfc_txq *txq;
+
+ SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
+ eth_dev = &rte_eth_devices[dpq->port_id];
+
+ sa = eth_dev->data->dev_private;
+
+ SFC_ASSERT(dpq->queue_id < sa->txq_count);
+ txq = sa->txq_info[dpq->queue_id].txq;
+
+ SFC_ASSERT(txq != NULL);
+ return txq;
+}
+
+static sfc_dp_tx_qsize_up_rings_t sfc_efx_tx_qsize_up_rings;
+static int
+sfc_efx_tx_qsize_up_rings(uint16_t nb_tx_desc,
+ unsigned int *txq_entries,
+ unsigned int *evq_entries,
+ unsigned int *txq_max_fill_level)
+{
+ *txq_entries = nb_tx_desc;
+ *evq_entries = nb_tx_desc;
+ *txq_max_fill_level = EFX_TXQ_LIMIT(*txq_entries);
+ return 0;
+}
+
+static sfc_dp_tx_qcreate_t sfc_efx_tx_qcreate;
+static int
+sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp)
+{
+ struct sfc_efx_txq *txq;
+ struct sfc_txq *ctrl_txq;
+ int rc;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ txq->pend_desc = rte_calloc_socket("sfc-efx-txq-pend-desc",
+ EFX_TXQ_LIMIT(info->txq_entries),
+ sizeof(*txq->pend_desc), 0,
+ socket_id);
+ if (txq->pend_desc == NULL)
+ goto fail_pend_desc_alloc;
+
+ rc = ENOMEM;
+ txq->sw_ring = rte_calloc_socket("sfc-efx-txq-sw_ring",
+ info->txq_entries,
+ sizeof(*txq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL)
+ goto fail_sw_ring_alloc;
+
+ ctrl_txq = sfc_txq_by_dp_txq(&txq->dp);
+ if (ctrl_txq->evq->sa->tso) {
+ rc = sfc_efx_tso_alloc_tsoh_objs(txq->sw_ring,
+ info->txq_entries, socket_id);
+ if (rc != 0)
+ goto fail_alloc_tsoh_objs;
+ }
+
+ txq->evq = ctrl_txq->evq;
+ txq->ptr_mask = info->txq_entries - 1;
+ txq->max_fill_level = info->max_fill_level;
+ txq->free_thresh = info->free_thresh;
+ txq->dma_desc_size_max = info->dma_desc_size_max;
+
+ *dp_txqp = &txq->dp;
+ return 0;
+
+fail_alloc_tsoh_objs:
+ rte_free(txq->sw_ring);
+
+fail_sw_ring_alloc:
+ rte_free(txq->pend_desc);
+
+fail_pend_desc_alloc:
+ rte_free(txq);
+
+fail_txq_alloc:
+ return rc;
+}
+
+static sfc_dp_tx_qdestroy_t sfc_efx_tx_qdestroy;
+static void
+sfc_efx_tx_qdestroy(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ sfc_efx_tso_free_tsoh_objs(txq->sw_ring, txq->ptr_mask + 1);
+ rte_free(txq->sw_ring);
+ rte_free(txq->pend_desc);
+ rte_free(txq);
+}
+
+static sfc_dp_tx_qstart_t sfc_efx_tx_qstart;
+static int
+sfc_efx_tx_qstart(struct sfc_dp_txq *dp_txq,
+ __rte_unused unsigned int evq_read_ptr,
+ unsigned int txq_desc_index)
+{
+ /* libefx-based datapath is specific to libefx-based PMD */
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ struct sfc_txq *ctrl_txq = sfc_txq_by_dp_txq(dp_txq);
+
+ txq->common = ctrl_txq->common;
+
+ txq->pending = txq->completed = txq->added = txq_desc_index;
+ txq->hw_vlan_tci = 0;
+
+ txq->flags |= (SFC_EFX_TXQ_FLAG_STARTED | SFC_EFX_TXQ_FLAG_RUNNING);
+
+ return 0;
+}
+
+static sfc_dp_tx_qstop_t sfc_efx_tx_qstop;
+static void
+sfc_efx_tx_qstop(struct sfc_dp_txq *dp_txq,
+ __rte_unused unsigned int *evq_read_ptr)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ txq->flags &= ~SFC_EFX_TXQ_FLAG_RUNNING;
+}
+
+static sfc_dp_tx_qreap_t sfc_efx_tx_qreap;
+static void
+sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ unsigned int txds;
+
+ sfc_efx_tx_reap(txq);
+
+ for (txds = 0; txds <= txq->ptr_mask; txds++) {
+ if (txq->sw_ring[txds].mbuf != NULL) {
+ rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
+ txq->sw_ring[txds].mbuf = NULL;
+ }
+ }
+
+ txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
+}
+
+static sfc_dp_tx_qdesc_status_t sfc_efx_tx_qdesc_status;
+static int
+sfc_efx_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ if (unlikely(offset > txq->ptr_mask))
+ return -EINVAL;
+
+ if (unlikely(offset >= txq->max_fill_level))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+
+ /*
+ * Poll EvQ to derive up-to-date 'txq->pending' figure;
+ * it is required for the queue to be running, but the
+ * check is omitted because API design assumes that it
+ * is the duty of the caller to satisfy all conditions
+ */
+ SFC_ASSERT((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) ==
+ SFC_EFX_TXQ_FLAG_RUNNING);
+ sfc_ev_qpoll(txq->evq);
+
+ /*
+ * Ring tail is 'txq->pending', and although descriptors
+ * between 'txq->completed' and 'txq->pending' are still
+ * in use by the driver, they should be reported as DONE
+ */
+ if (unlikely(offset < (txq->added - txq->pending)))
+ return RTE_ETH_TX_DESC_FULL;
+
+ /*
+ * There is no separate return value for unused descriptors;
+ * the latter will be reported as DONE because genuine DONE
+ * descriptors will be freed anyway in SW on the next burst
+ */
+ return RTE_ETH_TX_DESC_DONE;
+}
+
+struct sfc_dp_tx sfc_efx_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EFX,
+ .type = SFC_DP_TX,
+ .hw_fw_caps = 0,
+ },
+ .features = SFC_DP_TX_FEAT_VLAN_INSERT |
+ SFC_DP_TX_FEAT_TSO |
+ SFC_DP_TX_FEAT_MULTI_POOL |
+ SFC_DP_TX_FEAT_REFCNT |
+ SFC_DP_TX_FEAT_MULTI_SEG,
+ .qsize_up_rings = sfc_efx_tx_qsize_up_rings,
+ .qcreate = sfc_efx_tx_qcreate,
+ .qdestroy = sfc_efx_tx_qdestroy,
+ .qstart = sfc_efx_tx_qstart,
+ .qstop = sfc_efx_tx_qstop,
+ .qreap = sfc_efx_tx_qreap,
+ .qdesc_status = sfc_efx_tx_qdesc_status,
+ .pkt_burst = sfc_efx_xmit_pkts,
+};
diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_tx.h b/src/spdk/dpdk/drivers/net/sfc/sfc_tx.h
new file mode 100644
index 00000000..146b805c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/sfc/sfc_tx.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_TX_H
+#define _SFC_TX_H
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+
+#include "efx.h"
+
+#include "sfc_dp_tx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_adapter;
+struct sfc_evq;
+
+/**
+ * Software Tx descriptor information associated with hardware Tx
+ * descriptor.
+ */
+struct sfc_efx_tx_sw_desc {
+ struct rte_mbuf *mbuf;
+ uint8_t *tsoh; /* Buffer to store TSO header */
+};
+
+enum sfc_txq_state_bit {
+ SFC_TXQ_INITIALIZED_BIT = 0,
+#define SFC_TXQ_INITIALIZED (1 << SFC_TXQ_INITIALIZED_BIT)
+ SFC_TXQ_STARTED_BIT,
+#define SFC_TXQ_STARTED (1 << SFC_TXQ_STARTED_BIT)
+ SFC_TXQ_FLUSHING_BIT,
+#define SFC_TXQ_FLUSHING (1 << SFC_TXQ_FLUSHING_BIT)
+ SFC_TXQ_FLUSHED_BIT,
+#define SFC_TXQ_FLUSHED (1 << SFC_TXQ_FLUSHED_BIT)
+ SFC_TXQ_FLUSH_FAILED_BIT,
+#define SFC_TXQ_FLUSH_FAILED (1 << SFC_TXQ_FLUSH_FAILED_BIT)
+};
+
+/**
+ * Transmit queue control information. Not used on datapath.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_txq {
+ unsigned int state;
+ unsigned int hw_index;
+ struct sfc_evq *evq;
+ efsys_mem_t mem;
+ struct sfc_dp_txq *dp;
+ efx_txq_t *common;
+ unsigned int free_thresh;
+ uint64_t offloads;
+};
+
+static inline unsigned int
+sfc_txq_sw_index_by_hw_index(unsigned int hw_index)
+{
+ return hw_index;
+}
+
+static inline unsigned int
+sfc_txq_sw_index(const struct sfc_txq *txq)
+{
+ return sfc_txq_sw_index_by_hw_index(txq->hw_index);
+}
+
+struct sfc_txq *sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq);
+
+/**
+ * Transmit queue information used on libefx-based data path.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_efx_txq {
+ struct sfc_evq *evq;
+ struct sfc_efx_tx_sw_desc *sw_ring;
+ unsigned int ptr_mask;
+ efx_desc_t *pend_desc;
+ efx_txq_t *common;
+ unsigned int added;
+ unsigned int pending;
+ unsigned int completed;
+ unsigned int max_fill_level;
+ unsigned int free_thresh;
+ uint16_t hw_vlan_tci;
+ uint16_t dma_desc_size_max;
+
+ unsigned int hw_index;
+ unsigned int flags;
+#define SFC_EFX_TXQ_FLAG_STARTED 0x1
+#define SFC_EFX_TXQ_FLAG_RUNNING 0x2
+
+ /* Datapath transmit queue anchor */
+ struct sfc_dp_txq dp;
+};
+
+static inline struct sfc_efx_txq *
+sfc_efx_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
+{
+ return container_of(dp_txq, struct sfc_efx_txq, dp);
+}
+
+struct sfc_txq_info {
+ unsigned int entries;
+ struct sfc_txq *txq;
+ boolean_t deferred_start;
+ boolean_t deferred_started;
+};
+
+int sfc_tx_configure(struct sfc_adapter *sa);
+void sfc_tx_close(struct sfc_adapter *sa);
+
+int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
+
+void sfc_tx_qflush_done(struct sfc_txq *txq);
+int sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
+void sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
+int sfc_tx_start(struct sfc_adapter *sa);
+void sfc_tx_stop(struct sfc_adapter *sa);
+
+uint64_t sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa);
+uint64_t sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa);
+
+/* From 'sfc_tso.c' */
+int sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries,
+ unsigned int socket_id);
+void sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries);
+int sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
+ struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
+ unsigned int *pkt_descs, size_t *pkt_len);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_TX_H */
diff --git a/src/spdk/dpdk/drivers/net/softnic/Makefile b/src/spdk/dpdk/drivers/net/softnic/Makefile
new file mode 100644
index 00000000..ea9b65f4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/Makefile
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_softnic.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_pipeline -lrte_port -lrte_table
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_sched
+LDLIBS += -lrte_bus_vdev
+
+EXPORT_MAP := rte_pmd_softnic_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_mempool.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_swq.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_link.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tap.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_action.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_pipeline.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_thread.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_cli.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += parser.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += conn.c
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_eth_softnic.h
+
+ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+$(info Softnic PMD can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+all:
+clean:
+else
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
+endif
diff --git a/src/spdk/dpdk/drivers/net/softnic/conn.c b/src/spdk/dpdk/drivers/net/softnic/conn.c
new file mode 100644
index 00000000..990cf40f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/conn.c
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+
+#define __USE_GNU
+#include <sys/socket.h>
+
+#include <sys/epoll.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <errno.h>
+
+#include "conn.h"
+
+#define MSG_CMD_TOO_LONG "Command too long."
+
+struct softnic_conn {
+ char *welcome;
+ char *prompt;
+ char *buf;
+ char *msg_in;
+ char *msg_out;
+ size_t buf_size;
+ size_t msg_in_len_max;
+ size_t msg_out_len_max;
+ size_t msg_in_len;
+ int fd_server;
+ int fd_client_group;
+ softnic_conn_msg_handle_t msg_handle;
+ void *msg_handle_arg;
+};
+
+struct softnic_conn *
+softnic_conn_init(struct softnic_conn_params *p)
+{
+ struct sockaddr_in server_address;
+ struct softnic_conn *conn;
+ int fd_server, fd_client_group, status;
+
+ memset(&server_address, 0, sizeof(server_address));
+
+ /* Check input arguments */
+ if (p == NULL ||
+ p->welcome == NULL ||
+ p->prompt == NULL ||
+ p->addr == NULL ||
+ p->buf_size == 0 ||
+ p->msg_in_len_max == 0 ||
+ p->msg_out_len_max == 0 ||
+ p->msg_handle == NULL)
+ return NULL;
+
+ status = inet_aton(p->addr, &server_address.sin_addr);
+ if (status == 0)
+ return NULL;
+
+ /* Memory allocation */
+ conn = calloc(1, sizeof(struct softnic_conn));
+ if (conn == NULL)
+ return NULL;
+
+ conn->welcome = calloc(1, CONN_WELCOME_LEN_MAX + 1);
+ conn->prompt = calloc(1, CONN_PROMPT_LEN_MAX + 1);
+ conn->buf = calloc(1, p->buf_size);
+ conn->msg_in = calloc(1, p->msg_in_len_max + 1);
+ conn->msg_out = calloc(1, p->msg_out_len_max + 1);
+
+ if (conn->welcome == NULL ||
+ conn->prompt == NULL ||
+ conn->buf == NULL ||
+ conn->msg_in == NULL ||
+ conn->msg_out == NULL) {
+ softnic_conn_free(conn);
+ return NULL;
+ }
+
+ /* Server socket */
+ server_address.sin_family = AF_INET;
+ server_address.sin_port = htons(p->port);
+
+ fd_server = socket(AF_INET,
+ SOCK_STREAM | SOCK_NONBLOCK,
+ 0);
+ if (fd_server == -1) {
+ softnic_conn_free(conn);
+ return NULL;
+ }
+
+ status = bind(fd_server,
+ (struct sockaddr *)&server_address,
+ sizeof(server_address));
+ if (status == -1) {
+ softnic_conn_free(conn);
+ close(fd_server);
+ return NULL;
+ }
+
+ status = listen(fd_server, 16);
+ if (status == -1) {
+ softnic_conn_free(conn);
+ close(fd_server);
+ return NULL;
+ }
+
+ /* Client group */
+ fd_client_group = epoll_create(1);
+ if (fd_client_group == -1) {
+ softnic_conn_free(conn);
+ close(fd_server);
+ return NULL;
+ }
+
+ /* Fill in */
+ strncpy(conn->welcome, p->welcome, CONN_WELCOME_LEN_MAX);
+ strncpy(conn->prompt, p->prompt, CONN_PROMPT_LEN_MAX);
+ conn->buf_size = p->buf_size;
+ conn->msg_in_len_max = p->msg_in_len_max;
+ conn->msg_out_len_max = p->msg_out_len_max;
+ conn->msg_in_len = 0;
+ conn->fd_server = fd_server;
+ conn->fd_client_group = fd_client_group;
+ conn->msg_handle = p->msg_handle;
+ conn->msg_handle_arg = p->msg_handle_arg;
+
+ return conn;
+}
+
+void
+softnic_conn_free(struct softnic_conn *conn)
+{
+ if (conn == NULL)
+ return;
+
+ if (conn->fd_client_group)
+ close(conn->fd_client_group);
+
+ if (conn->fd_server)
+ close(conn->fd_server);
+
+ free(conn->msg_out);
+ free(conn->msg_in);
+ free(conn->prompt);
+ free(conn->welcome);
+ free(conn);
+}
+
+int
+softnic_conn_poll_for_conn(struct softnic_conn *conn)
+{
+ struct sockaddr_in client_address;
+ struct epoll_event event;
+ socklen_t client_address_length;
+ int fd_client, status;
+
+ /* Check input arguments */
+ if (conn == NULL)
+ return -1;
+
+ /* Server socket */
+ client_address_length = sizeof(client_address);
+ fd_client = accept4(conn->fd_server,
+ (struct sockaddr *)&client_address,
+ &client_address_length,
+ SOCK_NONBLOCK);
+ if (fd_client == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ return 0;
+
+ return -1;
+ }
+
+ /* Client group */
+ event.events = EPOLLIN | EPOLLRDHUP | EPOLLHUP;
+ event.data.fd = fd_client;
+
+ status = epoll_ctl(conn->fd_client_group,
+ EPOLL_CTL_ADD,
+ fd_client,
+ &event);
+ if (status == -1) {
+ close(fd_client);
+ return -1;
+ }
+
+ /* Client */
+ status = write(fd_client,
+ conn->welcome,
+ strlen(conn->welcome));
+ if (status == -1) {
+ close(fd_client);
+ return -1;
+ }
+
+ status = write(fd_client,
+ conn->prompt,
+ strlen(conn->prompt));
+ if (status == -1) {
+ close(fd_client);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+data_event_handle(struct softnic_conn *conn,
+ int fd_client)
+{
+ ssize_t len, i, status;
+
+ /* Read input message */
+
+ len = read(fd_client,
+ conn->buf,
+ conn->buf_size);
+ if (len == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ return 0;
+
+ return -1;
+ }
+ if (len == 0)
+ return 0;
+
+ /* Handle input messages */
+ for (i = 0; i < len; i++) {
+ if (conn->buf[i] == '\n') {
+ size_t n;
+
+ conn->msg_in[conn->msg_in_len] = 0;
+ conn->msg_out[0] = 0;
+
+ conn->msg_handle(conn->msg_in,
+ conn->msg_out,
+ conn->msg_out_len_max,
+ conn->msg_handle_arg);
+
+ n = strlen(conn->msg_out);
+ if (n) {
+ status = write(fd_client,
+ conn->msg_out,
+ n);
+ if (status == -1)
+ return status;
+ }
+
+ conn->msg_in_len = 0;
+ } else if (conn->msg_in_len < conn->msg_in_len_max) {
+ conn->msg_in[conn->msg_in_len] = conn->buf[i];
+ conn->msg_in_len++;
+ } else {
+ status = write(fd_client,
+ MSG_CMD_TOO_LONG,
+ strlen(MSG_CMD_TOO_LONG));
+ if (status == -1)
+ return status;
+
+ conn->msg_in_len = 0;
+ }
+ }
+
+ /* Write prompt */
+ status = write(fd_client,
+ conn->prompt,
+ strlen(conn->prompt));
+ if (status == -1)
+ return status;
+
+ return 0;
+}
+
+static int
+control_event_handle(struct softnic_conn *conn,
+ int fd_client)
+{
+ int status;
+
+ status = epoll_ctl(conn->fd_client_group,
+ EPOLL_CTL_DEL,
+ fd_client,
+ NULL);
+ if (status == -1)
+ return -1;
+
+ status = close(fd_client);
+ if (status == -1)
+ return -1;
+
+ return 0;
+}
+
+int
+softnic_conn_poll_for_msg(struct softnic_conn *conn)
+{
+ struct epoll_event event;
+ int fd_client, status, status_data = 0, status_control = 0;
+
+ /* Check input arguments */
+ if (conn == NULL)
+ return -1;
+
+ /* Client group */
+ status = epoll_wait(conn->fd_client_group,
+ &event,
+ 1,
+ 0);
+ if (status == -1)
+ return -1;
+ if (status == 0)
+ return 0;
+
+ fd_client = event.data.fd;
+
+ /* Data available */
+ if (event.events & EPOLLIN)
+ status_data = data_event_handle(conn, fd_client);
+
+ /* Control events */
+ if (event.events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP))
+ status_control = control_event_handle(conn, fd_client);
+
+ if (status_data || status_control)
+ return -1;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/softnic/conn.h b/src/spdk/dpdk/drivers/net/softnic/conn.h
new file mode 100644
index 00000000..631edeef
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/conn.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_CONN_H__
+#define __INCLUDE_CONN_H__
+
+#include <stdint.h>
+
+struct softnic_conn;
+
+#ifndef CONN_WELCOME_LEN_MAX
+#define CONN_WELCOME_LEN_MAX 1024
+#endif
+
+#ifndef CONN_PROMPT_LEN_MAX
+#define CONN_PROMPT_LEN_MAX 16
+#endif
+
+typedef void (*softnic_conn_msg_handle_t)(char *msg_in,
+ char *msg_out,
+ size_t msg_out_len_max,
+ void *arg);
+
+struct softnic_conn_params {
+ const char *welcome;
+ const char *prompt;
+ const char *addr;
+ uint16_t port;
+ size_t buf_size;
+ size_t msg_in_len_max;
+ size_t msg_out_len_max;
+ softnic_conn_msg_handle_t msg_handle;
+ void *msg_handle_arg;
+};
+
+struct softnic_conn *
+softnic_conn_init(struct softnic_conn_params *p);
+
+void
+softnic_conn_free(struct softnic_conn *conn);
+
+int
+softnic_conn_poll_for_conn(struct softnic_conn *conn);
+
+int
+softnic_conn_poll_for_msg(struct softnic_conn *conn);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/softnic/firmware.cli b/src/spdk/dpdk/drivers/net/softnic/firmware.cli
new file mode 100644
index 00000000..300cf6e3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/firmware.cli
@@ -0,0 +1,21 @@
+; SPDX-License-Identifier: BSD-3-Clause
+; Copyright(c) 2018 Intel Corporation
+
+link LINK dev 0000:02:00.0
+
+pipeline RX period 10 offset_port_id 0
+pipeline RX port in bsz 32 link LINK rxq 0
+pipeline RX port out bsz 32 swq RXQ0
+pipeline RX table match stub
+pipeline RX port in 0 table 0
+pipeline RX table 0 rule add match default action fwd port 0
+
+pipeline TX period 10 offset_port_id 0
+pipeline TX port in bsz 32 swq TXQ0
+pipeline TX port out bsz 32 link LINK txq 0
+pipeline TX table match stub
+pipeline TX port in 0 table 0
+pipeline TX table 0 rule add match default action fwd port 0
+
+thread 1 pipeline RX enable
+thread 1 pipeline TX enable
diff --git a/src/spdk/dpdk/drivers/net/softnic/hash_func.h b/src/spdk/dpdk/drivers/net/softnic/hash_func.h
new file mode 100644
index 00000000..198d2b20
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/hash_func.h
@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_HASH_FUNC_H__
+#define __INCLUDE_HASH_FUNC_H__
+
+#include <rte_common.h>
+
+static inline uint64_t
+hash_xor_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0;
+
+ xor0 = seed ^ (k[0] & m[0]);
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+
+ xor0 ^= k[2] & m[2];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+
+ xor0 ^= xor1;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+
+ xor0 ^= xor1;
+
+ xor0 ^= k[4] & m[4];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+
+ xor0 ^= xor1;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+
+ xor0 ^= xor1;
+ xor2 ^= k[6] & m[6];
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1, xor2, xor3;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+ xor3 = (k[6] & m[6]) ^ (k[7] & m[7]);
+
+ xor0 ^= xor1;
+ xor2 ^= xor3;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+#if defined(RTE_ARCH_X86_64)
+
+#include <x86intrin.h>
+
+static inline uint64_t
+hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t crc0;
+
+ crc0 = _mm_crc32_u64(seed, k[0] & m[0]);
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, crc0, crc1;
+
+ k0 = k[0] & m[0];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, crc0, crc1;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc0 = _mm_crc32_u64(crc0, k2);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+
+ crc0 = _mm_crc32_u64(crc0, crc1);
+ crc1 = _mm_crc32_u64(crc2, crc3);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
+
+ crc0 = _mm_crc32_u64(crc0, crc1);
+ crc1 = _mm_crc32_u64(crc2, crc3);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, k5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
+
+ crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
+ crc5 = k5 >> 32;
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
+
+ crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
+ crc5 = _mm_crc32_u64(k5 >> 32, k[7] & m[7]);
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+#define hash_default_key8 hash_crc_key8
+#define hash_default_key16 hash_crc_key16
+#define hash_default_key24 hash_crc_key24
+#define hash_default_key32 hash_crc_key32
+#define hash_default_key40 hash_crc_key40
+#define hash_default_key48 hash_crc_key48
+#define hash_default_key56 hash_crc_key56
+#define hash_default_key64 hash_crc_key64
+
+#elif defined(RTE_ARCH_ARM64)
+#include "hash_func_arm64.h"
+#else
+
+#define hash_default_key8 hash_xor_key8
+#define hash_default_key16 hash_xor_key16
+#define hash_default_key24 hash_xor_key24
+#define hash_default_key32 hash_xor_key32
+#define hash_default_key40 hash_xor_key40
+#define hash_default_key48 hash_xor_key48
+#define hash_default_key56 hash_xor_key56
+#define hash_default_key64 hash_xor_key64
+
+#endif
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/softnic/hash_func_arm64.h b/src/spdk/dpdk/drivers/net/softnic/hash_func_arm64.h
new file mode 100644
index 00000000..ae6c0f41
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/hash_func_arm64.h
@@ -0,0 +1,261 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Linaro Limited. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __HASH_FUNC_ARM64_H__
+#define __HASH_FUNC_ARM64_H__
+
+#define _CRC32CX(crc, val) \
+ __asm__("crc32cx %w[c], %w[c], %x[v]":[c] "+r" (crc):[v] "r" (val))
+
+static inline uint64_t
+hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint32_t crc0;
+
+ crc0 = seed;
+ _CRC32CX(crc0, k[0] & m[0]);
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1;
+
+ k0 = k[0] & m[0];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ _CRC32CX(crc0, k2);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+
+ _CRC32CX(crc0, crc1);
+ _CRC32CX(crc2, crc3);
+
+ crc0 ^= crc2;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ _CRC32CX(crc0, crc1);
+ _CRC32CX(crc2, crc3);
+
+ crc0 ^= crc2;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, k5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ crc4 = k5;
+ _CRC32CX(crc4, k[6] & m[6]);
+ crc5 = k5 >> 32;
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ crc4 = k5;
+ _CRC32CX(crc4, k[6] & m[6]);
+ crc5 = k5 >> 32;
+ _CRC32CX(crc5, k[7] & m[7]);
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+#define hash_default_key8 hash_crc_key8
+#define hash_default_key16 hash_crc_key16
+#define hash_default_key24 hash_crc_key24
+#define hash_default_key32 hash_crc_key32
+#define hash_default_key40 hash_crc_key40
+#define hash_default_key48 hash_crc_key48
+#define hash_default_key56 hash_crc_key56
+#define hash_default_key64 hash_crc_key64
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/softnic/meson.build b/src/spdk/dpdk/drivers/net/softnic/meson.build
new file mode 100644
index 00000000..ff982274
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+install_headers('rte_eth_softnic.h')
+sources = files('rte_eth_softnic_tm.c',
+ 'rte_eth_softnic.c',
+ 'rte_eth_softnic_mempool.c',
+ 'rte_eth_softnic_swq.c',
+ 'rte_eth_softnic_link.c',
+ 'rte_eth_softnic_tap.c',
+ 'rte_eth_softnic_action.c',
+ 'rte_eth_softnic_pipeline.c',
+ 'rte_eth_softnic_thread.c',
+ 'rte_eth_softnic_cli.c',
+ 'parser.c',
+ 'conn.c')
+deps += ['pipeline', 'port', 'table', 'sched']
diff --git a/src/spdk/dpdk/drivers/net/softnic/parser.c b/src/spdk/dpdk/drivers/net/softnic/parser.c
new file mode 100644
index 00000000..a8688a21
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/parser.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation.
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ */
+
+/* For inet_pton4() and inet_pton6() functions:
+ *
+ * Copyright (c) 1996 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
+ * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
+ * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+#include <rte_errno.h>
+
+#include "parser.h"
+
+static uint32_t
+get_hex_val(char c)
+{
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ return c - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return c - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return c - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+int
+softnic_parser_read_arg_bool(const char *p)
+{
+ p = skip_white_spaces(p);
+ int result = -EINVAL;
+
+ if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
+ ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
+ p += 3;
+ result = 1;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'n')) ||
+ ((p[0] == 'O') && (p[1] == 'N'))) {
+ p += 2;
+ result = 1;
+ }
+
+ if (((p[0] == 'n') && (p[1] == 'o')) ||
+ ((p[0] == 'N') && (p[1] == 'O'))) {
+ p += 2;
+ result = 0;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
+ ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
+ p += 3;
+ result = 0;
+ }
+
+ p = skip_white_spaces(p);
+
+ if (p[0] != '\0')
+ return -EINVAL;
+
+ return result;
+}
+
+int
+softnic_parser_read_int32(int32_t *value, const char *p)
+{
+ char *next;
+ int32_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtol(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint64(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtoul(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ p = next;
+ switch (*p) {
+ case 'T':
+ val *= 1024ULL;
+ /* fall through */
+ case 'G':
+ val *= 1024ULL;
+ /* fall through */
+ case 'M':
+ val *= 1024ULL;
+ /* fall through */
+ case 'k':
+ case 'K':
+ val *= 1024ULL;
+ p++;
+ break;
+ }
+
+ p = skip_white_spaces(p);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint64_hex(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+
+ val = strtoul(p, &next, 16);
+ if (p == next)
+ return -EINVAL;
+
+ p = skip_white_spaces(next);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint32(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint32_hex(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint16(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint16_hex(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint8(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint8_hex(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens)
+{
+ uint32_t i;
+
+ if (string == NULL ||
+ tokens == NULL ||
+ (*n_tokens < 1))
+ return -EINVAL;
+
+ for (i = 0; i < *n_tokens; i++) {
+ tokens[i] = strtok_r(string, PARSE_DELIMITER, &string);
+ if (tokens[i] == NULL)
+ break;
+ }
+
+ if (i == *n_tokens &&
+ strtok_r(string, PARSE_DELIMITER, &string) != NULL)
+ return -E2BIG;
+
+ *n_tokens = i;
+ return 0;
+}
+
+int
+softnic_parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c;
+ uint32_t len, i;
+
+ /* Check input parameters */
+ if (src == NULL ||
+ dst == NULL ||
+ size == NULL ||
+ (*size == 0))
+ return -1;
+
+ len = strlen(src);
+ if (((len & 3) != 0) ||
+ (len > (*size) * 2))
+ return -1;
+ *size = len / 2;
+
+ for (c = src; *c != 0; c++) {
+ if ((((*c) >= '0') && ((*c) <= '9')) ||
+ (((*c) >= 'A') && ((*c) <= 'F')) ||
+ (((*c) >= 'a') && ((*c) <= 'f')))
+ continue;
+
+ return -1;
+ }
+
+ /* Convert chars to bytes */
+ for (i = 0; i < *size; i++)
+ dst[i] = get_hex_val(src[2 * i]) * 16 +
+ get_hex_val(src[2 * i + 1]);
+
+ return 0;
+}
+
+int
+softnic_parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels)
+{
+ uint32_t n_max_labels = *n_labels, count = 0;
+
+ /* Check for void list of labels */
+ if (strcmp(string, "<void>") == 0) {
+ *n_labels = 0;
+ return 0;
+ }
+
+ /* At least one label should be present */
+ for ( ; (*string != '\0'); ) {
+ char *next;
+ int value;
+
+ if (count >= n_max_labels)
+ return -1;
+
+ if (count > 0) {
+ if (string[0] != ':')
+ return -1;
+
+ string++;
+ }
+
+ value = strtol(string, &next, 10);
+ if (next == string)
+ return -1;
+ string = next;
+
+ labels[count++] = (uint32_t)value;
+ }
+
+ *n_labels = count;
+ return 0;
+}
+
+#define INADDRSZ 4
+#define IN6ADDRSZ 16
+
+/* int
+ * inet_pton4(src, dst)
+ * like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ * 1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ * does not touch `dst' unless it's returning 1.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton4(const char *src, unsigned char *dst)
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr(digits, ch);
+ if (pch != NULL) {
+ unsigned int new = *tp * 10 + (pch - digits);
+
+ if (new > 255)
+ return 0;
+ if (!saw_digit) {
+ if (++octets > 4)
+ return 0;
+ saw_digit = 1;
+ }
+ *tp = (unsigned char)new;
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return 0;
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return 0;
+ }
+ if (octets < 4)
+ return 0;
+
+ memcpy(dst, tmp, INADDRSZ);
+ return 1;
+}
+
+/* int
+ * inet_pton6(src, dst)
+ * convert presentation level address to network order binary form.
+ * return:
+ * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ * (1) does not touch `dst' unless it's returning 1.
+ * (2) :: in a full address is silently ignored.
+ * credit:
+ * inspired by Mark Andrews.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton6(const char *src, unsigned char *dst)
+{
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+ unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
+ const char *xdigits = 0, *curtok = 0;
+ int ch = 0, saw_xdigit = 0, count_xdigit = 0;
+ unsigned int val = 0;
+ unsigned int dbloct_count = 0;
+
+ memset((tp = tmp), '\0', IN6ADDRSZ);
+ endp = tp + IN6ADDRSZ;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return 0;
+ curtok = src;
+ saw_xdigit = count_xdigit = 0;
+ val = 0;
+
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr((xdigits = xdigits_l), ch);
+ if (pch == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ if (count_xdigit >= 4)
+ return 0;
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (val > 0xffff)
+ return 0;
+ saw_xdigit = 1;
+ count_xdigit++;
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!saw_xdigit) {
+ if (colonp)
+ return 0;
+ colonp = tp;
+ continue;
+ } else if (*src == '\0') {
+ return 0;
+ }
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char)((val >> 8) & 0xff);
+ *tp++ = (unsigned char)(val & 0xff);
+ saw_xdigit = 0;
+ count_xdigit = 0;
+ val = 0;
+ dbloct_count++;
+ continue;
+ }
+ if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
+ inet_pton4(curtok, tp) > 0) {
+ tp += INADDRSZ;
+ saw_xdigit = 0;
+ dbloct_count += 2;
+ break; /* '\0' was seen by inet_pton4(). */
+ }
+ return 0;
+ }
+ if (saw_xdigit) {
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char)((val >> 8) & 0xff);
+ *tp++ = (unsigned char)(val & 0xff);
+ dbloct_count++;
+ }
+ if (colonp != NULL) {
+ /* if we already have 8 double octets, having a colon means error */
+ if (dbloct_count == 8)
+ return 0;
+
+ /* Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ for (i = 1; i <= n; i++) {
+ endp[-i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return 0;
+ memcpy(dst, tmp, IN6ADDRSZ);
+ return 1;
+}
+
+static struct ether_addr *
+my_ether_aton(const char *a)
+{
+ int i;
+ char *end;
+ unsigned long o[ETHER_ADDR_LEN];
+ static struct ether_addr ether_addr;
+
+ i = 0;
+ do {
+ errno = 0;
+ o[i] = strtoul(a, &end, 16);
+ if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
+ return NULL;
+ a = end + 1;
+ } while (++i != sizeof(o) / sizeof(o[0]) && end[0] != 0);
+
+ /* Junk at the end of line */
+ if (end[0] != 0)
+ return NULL;
+
+ /* Support the format XX:XX:XX:XX:XX:XX */
+ if (i == ETHER_ADDR_LEN) {
+ while (i-- != 0) {
+ if (o[i] > UINT8_MAX)
+ return NULL;
+ ether_addr.addr_bytes[i] = (uint8_t)o[i];
+ }
+ /* Support the format XXXX:XXXX:XXXX */
+ } else if (i == ETHER_ADDR_LEN / 2) {
+ while (i-- != 0) {
+ if (o[i] > UINT16_MAX)
+ return NULL;
+ ether_addr.addr_bytes[i * 2] = (uint8_t)(o[i] >> 8);
+ ether_addr.addr_bytes[i * 2 + 1] = (uint8_t)(o[i] & 0xff);
+ }
+ /* unknown format */
+ } else
+ return NULL;
+
+ return (struct ether_addr *)&ether_addr;
+}
+
+int
+softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4)
+{
+ if (strlen(token) >= INET_ADDRSTRLEN)
+ return -EINVAL;
+
+ if (inet_pton4(token, (unsigned char *)ipv4) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6)
+{
+ if (strlen(token) >= INET6_ADDRSTRLEN)
+ return -EINVAL;
+
+ if (inet_pton6(token, (unsigned char *)ipv6) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+softnic_parse_mac_addr(const char *token, struct ether_addr *addr)
+{
+ struct ether_addr *tmp;
+
+ tmp = my_ether_aton(token);
+ if (tmp == NULL)
+ return -1;
+
+ memcpy(addr, tmp, sizeof(struct ether_addr));
+ return 0;
+}
+
+int
+softnic_parse_cpu_core(const char *entry,
+ struct softnic_cpu_core_params *p)
+{
+ size_t num_len;
+ char num[8];
+
+ uint32_t s = 0, c = 0, h = 0, val;
+ uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0;
+ const char *next = skip_white_spaces(entry);
+ char type;
+
+ if (p == NULL)
+ return -EINVAL;
+
+ /* Expect <CORE> or [sX][cY][h]. At least one parameter is required. */
+ while (*next != '\0') {
+ /* If everything parsed nothing should left */
+ if (s_parsed && c_parsed && h_parsed)
+ return -EINVAL;
+
+ type = *next;
+ switch (type) {
+ case 's':
+ case 'S':
+ if (s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+ s_parsed = 1;
+ next++;
+ break;
+ case 'c':
+ case 'C':
+ if (c_parsed || h_parsed)
+ return -EINVAL;
+ c_parsed = 1;
+ next++;
+ break;
+ case 'h':
+ case 'H':
+ if (h_parsed)
+ return -EINVAL;
+ h_parsed = 1;
+ next++;
+ break;
+ default:
+ /* If it start from digit it must be only core id. */
+ if (!isdigit(*next) || s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+
+ type = 'C';
+ }
+
+ for (num_len = 0; *next != '\0'; next++, num_len++) {
+ if (num_len == RTE_DIM(num))
+ return -EINVAL;
+
+ if (!isdigit(*next))
+ break;
+
+ num[num_len] = *next;
+ }
+
+ if (num_len == 0 && type != 'h' && type != 'H')
+ return -EINVAL;
+
+ if (num_len != 0 && (type == 'h' || type == 'H'))
+ return -EINVAL;
+
+ num[num_len] = '\0';
+ val = strtol(num, NULL, 10);
+
+ h = 0;
+ switch (type) {
+ case 's':
+ case 'S':
+ s = val;
+ break;
+ case 'c':
+ case 'C':
+ c = val;
+ break;
+ case 'h':
+ case 'H':
+ h = 1;
+ break;
+ }
+ }
+
+ p->socket_id = s;
+ p->core_id = c;
+ p->thread_id = h;
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/softnic/parser.h b/src/spdk/dpdk/drivers/net/softnic/parser.h
new file mode 100644
index 00000000..1ee3f82a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/parser.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#ifndef __INCLUDE_SOFTNIC_PARSER_H__
+#define __INCLUDE_SOFTNIC_PARSER_H__
+
+#include <stdint.h>
+
+#include <rte_ip.h>
+#include <rte_ether.h>
+
+#define PARSE_DELIMITER " \f\n\r\t\v"
+
+#define skip_white_spaces(pos) \
+({ \
+ __typeof__(pos) _p = (pos); \
+ for ( ; isspace(*_p); _p++) \
+ ; \
+ _p; \
+})
+
+static inline size_t
+skip_digits(const char *src)
+{
+ size_t i;
+
+ for (i = 0; isdigit(src[i]); i++)
+ ;
+
+ return i;
+}
+
+int softnic_parser_read_arg_bool(const char *p);
+
+int softnic_parser_read_int32(int32_t *value, const char *p);
+
+int softnic_parser_read_uint64(uint64_t *value, const char *p);
+int softnic_parser_read_uint32(uint32_t *value, const char *p);
+int softnic_parser_read_uint16(uint16_t *value, const char *p);
+int softnic_parser_read_uint8(uint8_t *value, const char *p);
+
+int softnic_parser_read_uint64_hex(uint64_t *value, const char *p);
+int softnic_parser_read_uint32_hex(uint32_t *value, const char *p);
+int softnic_parser_read_uint16_hex(uint16_t *value, const char *p);
+int softnic_parser_read_uint8_hex(uint8_t *value, const char *p);
+
+int softnic_parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+
+int softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4);
+int softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6);
+int softnic_parse_mac_addr(const char *token, struct ether_addr *addr);
+int softnic_parse_mpls_labels(char *string,
+ uint32_t *labels, uint32_t *n_labels);
+
+struct softnic_cpu_core_params {
+ uint32_t socket_id;
+ uint32_t core_id;
+ uint32_t thread_id;
+};
+
+int softnic_parse_cpu_core(const char *entry,
+ struct softnic_cpu_core_params *p);
+
+int softnic_parse_tokenize_string(char *string,
+ char *tokens[], uint32_t *n_tokens);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.c
new file mode 100644
index 00000000..30fb3952
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.c
@@ -0,0 +1,594 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_malloc.h>
+#include <rte_bus_vdev.h>
+#include <rte_kvargs.h>
+#include <rte_errno.h>
+#include <rte_ring.h>
+#include <rte_tm_driver.h>
+
+#include "rte_eth_softnic.h"
+#include "rte_eth_softnic_internals.h"
+
+#define PMD_PARAM_FIRMWARE "firmware"
+#define PMD_PARAM_CONN_PORT "conn_port"
+#define PMD_PARAM_CPU_ID "cpu_id"
+#define PMD_PARAM_TM_N_QUEUES "tm_n_queues"
+#define PMD_PARAM_TM_QSIZE0 "tm_qsize0"
+#define PMD_PARAM_TM_QSIZE1 "tm_qsize1"
+#define PMD_PARAM_TM_QSIZE2 "tm_qsize2"
+#define PMD_PARAM_TM_QSIZE3 "tm_qsize3"
+
+static const char *pmd_valid_args[] = {
+ PMD_PARAM_FIRMWARE,
+ PMD_PARAM_CONN_PORT,
+ PMD_PARAM_CPU_ID,
+ PMD_PARAM_TM_N_QUEUES,
+ PMD_PARAM_TM_QSIZE0,
+ PMD_PARAM_TM_QSIZE1,
+ PMD_PARAM_TM_QSIZE2,
+ PMD_PARAM_TM_QSIZE3,
+ NULL
+};
+
+static const char welcome[] =
+ "\n"
+ "Welcome to Soft NIC!\n"
+ "\n";
+
+static const char prompt[] = "softnic> ";
+
+struct softnic_conn_params conn_params_default = {
+ .welcome = welcome,
+ .prompt = prompt,
+ .addr = "0.0.0.0",
+ .port = 0,
+ .buf_size = 1024 * 1024,
+ .msg_in_len_max = 1024,
+ .msg_out_len_max = 1024 * 1024,
+ .msg_handle = softnic_cli_process,
+ .msg_handle_arg = NULL,
+};
+
+static const struct rte_eth_dev_info pmd_dev_info = {
+ .min_rx_bufsize = 0,
+ .max_rx_pktlen = UINT32_MAX,
+ .max_rx_queues = UINT16_MAX,
+ .max_tx_queues = UINT16_MAX,
+ .rx_desc_lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ },
+ .tx_desc_lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ },
+ .rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP,
+};
+
+static int pmd_softnic_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, pmd_softnic_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+static void
+pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_dev_info *dev_info)
+{
+ memcpy(dev_info, &pmd_dev_info, sizeof(*dev_info));
+}
+
+static int
+pmd_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static int
+pmd_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool __rte_unused)
+{
+ char name[NAME_SIZE];
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_swq *swq;
+
+ struct softnic_swq_params params = {
+ .size = nb_rx_desc,
+ };
+
+ snprintf(name, sizeof(name), "RXQ%u", rx_queue_id);
+
+ swq = softnic_swq_create(p,
+ name,
+ &params);
+ if (swq == NULL)
+ return -1;
+
+ dev->data->rx_queues[rx_queue_id] = swq->r;
+ return 0;
+}
+
+static int
+pmd_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ char name[NAME_SIZE];
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_swq *swq;
+
+ struct softnic_swq_params params = {
+ .size = nb_tx_desc,
+ };
+
+ snprintf(name, sizeof(name), "TXQ%u", tx_queue_id);
+
+ swq = softnic_swq_create(p,
+ name,
+ &params);
+ if (swq == NULL)
+ return -1;
+
+ dev->data->tx_queues[tx_queue_id] = swq->r;
+ return 0;
+}
+
+static int
+pmd_dev_start(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ int status;
+
+ /* Firmware */
+ status = softnic_cli_script_process(p,
+ p->params.firmware,
+ conn_params_default.msg_in_len_max,
+ conn_params_default.msg_out_len_max);
+ if (status)
+ return status;
+
+ /* Link UP */
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+
+ return 0;
+}
+
+static void
+pmd_dev_stop(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* Link DOWN */
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+ /* Firmware */
+ softnic_pipeline_disable_all(p);
+ softnic_pipeline_free(p);
+ softnic_table_action_profile_free(p);
+ softnic_port_in_action_profile_free(p);
+ softnic_tap_free(p);
+ softnic_tmgr_free(p);
+ softnic_link_free(p);
+ softnic_softnic_swq_free_keep_rxq_txq(p);
+ softnic_mempool_free(p);
+
+ tm_hierarchy_free(p);
+}
+
+static void
+pmd_dev_close(struct rte_eth_dev *dev __rte_unused)
+{
+ return;
+}
+
+static int
+pmd_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
+{
+ return 0;
+}
+
+static int
+pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
+{
+ *(const struct rte_tm_ops **)arg = &pmd_tm_ops;
+
+ return 0;
+}
+
+static const struct eth_dev_ops pmd_ops = {
+ .dev_configure = pmd_dev_configure,
+ .dev_start = pmd_dev_start,
+ .dev_stop = pmd_dev_stop,
+ .dev_close = pmd_dev_close,
+ .link_update = pmd_link_update,
+ .dev_infos_get = pmd_dev_infos_get,
+ .rx_queue_setup = pmd_rx_queue_setup,
+ .tx_queue_setup = pmd_tx_queue_setup,
+ .tm_ops_get = pmd_tm_ops_get,
+};
+
+static uint16_t
+pmd_rx_pkt_burst(void *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return (uint16_t)rte_ring_sc_dequeue_burst(rxq,
+ (void **)rx_pkts,
+ nb_pkts,
+ NULL);
+}
+
+static uint16_t
+pmd_tx_pkt_burst(void *txq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ return (uint16_t)rte_ring_sp_enqueue_burst(txq,
+ (void **)tx_pkts,
+ nb_pkts,
+ NULL);
+}
+
+static void *
+pmd_init(struct pmd_params *params)
+{
+ struct pmd_internals *p;
+ int status;
+
+ p = rte_zmalloc_socket(params->name,
+ sizeof(struct pmd_internals),
+ 0,
+ params->cpu_id);
+ if (p == NULL)
+ return NULL;
+
+ /* Params */
+ memcpy(&p->params, params, sizeof(p->params));
+
+ /* Resources */
+ tm_hierarchy_init(p);
+
+ softnic_mempool_init(p);
+ softnic_swq_init(p);
+ softnic_link_init(p);
+ softnic_tmgr_init(p);
+ softnic_tap_init(p);
+ softnic_port_in_action_profile_init(p);
+ softnic_table_action_profile_init(p);
+ softnic_pipeline_init(p);
+
+ status = softnic_thread_init(p);
+ if (status) {
+ rte_free(p);
+ return NULL;
+ }
+
+ if (params->conn_port) {
+ struct softnic_conn_params conn_params;
+
+ memcpy(&conn_params, &conn_params_default, sizeof(conn_params));
+ conn_params.port = p->params.conn_port;
+ conn_params.msg_handle_arg = p;
+
+ p->conn = softnic_conn_init(&conn_params);
+ if (p->conn == NULL) {
+ softnic_thread_free(p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ return p;
+}
+
+static void
+pmd_free(struct pmd_internals *p)
+{
+ if (p == NULL)
+ return;
+
+ if (p->params.conn_port)
+ softnic_conn_free(p->conn);
+
+ softnic_thread_free(p);
+ softnic_pipeline_free(p);
+ softnic_table_action_profile_free(p);
+ softnic_port_in_action_profile_free(p);
+ softnic_tap_free(p);
+ softnic_tmgr_free(p);
+ softnic_link_free(p);
+ softnic_swq_free(p);
+ softnic_mempool_free(p);
+
+ tm_hierarchy_free(p);
+
+ rte_free(p);
+}
+
+static struct ether_addr eth_addr = {
+ .addr_bytes = {0},
+};
+
+static int
+pmd_ethdev_register(struct rte_vdev_device *vdev,
+ struct pmd_params *params,
+ void *dev_private)
+{
+ struct rte_eth_dev *dev;
+
+ /* Ethdev entry allocation */
+ dev = rte_eth_dev_allocate(params->name);
+ if (!dev)
+ return -ENOMEM;
+
+ /* dev */
+ dev->rx_pkt_burst = pmd_rx_pkt_burst;
+ dev->tx_pkt_burst = pmd_tx_pkt_burst;
+ dev->tx_pkt_prepare = NULL;
+ dev->dev_ops = &pmd_ops;
+ dev->device = &vdev->device;
+
+ /* dev->data */
+ dev->data->dev_private = dev_private;
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
+ dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->mac_addrs = &eth_addr;
+ dev->data->promiscuous = 1;
+ dev->data->kdrv = RTE_KDRV_NONE;
+ dev->data->numa_node = params->cpu_id;
+
+ rte_eth_dev_probing_finish(dev);
+
+ return 0;
+}
+
+static int
+get_string(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(char **)extra_args = strdup(value);
+
+ if (!*(char **)extra_args)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int
+get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(uint32_t *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+
+static int
+get_uint16(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(uint16_t *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+
+static int
+pmd_parse_args(struct pmd_params *p, const char *params)
+{
+ struct rte_kvargs *kvlist;
+ int ret = 0;
+
+ kvlist = rte_kvargs_parse(params, pmd_valid_args);
+ if (kvlist == NULL)
+ return -EINVAL;
+
+ /* Set default values */
+ memset(p, 0, sizeof(*p));
+ p->firmware = SOFTNIC_FIRMWARE;
+ p->cpu_id = SOFTNIC_CPU_ID;
+ p->tm.n_queues = SOFTNIC_TM_N_QUEUES;
+ p->tm.qsize[0] = SOFTNIC_TM_QUEUE_SIZE;
+ p->tm.qsize[1] = SOFTNIC_TM_QUEUE_SIZE;
+ p->tm.qsize[2] = SOFTNIC_TM_QUEUE_SIZE;
+ p->tm.qsize[3] = SOFTNIC_TM_QUEUE_SIZE;
+
+ /* Firmware script (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_FIRMWARE) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_FIRMWARE,
+ &get_string, &p->firmware);
+ if (ret < 0)
+ goto out_free;
+ }
+
+ /* Connection listening port (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_CONN_PORT) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_CONN_PORT,
+ &get_uint16, &p->conn_port);
+ if (ret < 0)
+ goto out_free;
+ }
+
+ /* CPU ID (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_CPU_ID) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_CPU_ID,
+ &get_uint32, &p->cpu_id);
+ if (ret < 0)
+ goto out_free;
+ }
+
+ /* TM number of queues (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_N_QUEUES) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_N_QUEUES,
+ &get_uint32, &p->tm.n_queues);
+ if (ret < 0)
+ goto out_free;
+ }
+
+ /* TM queue size 0 .. 3 (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE0) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE0,
+ &get_uint32, &p->tm.qsize[0]);
+ if (ret < 0)
+ goto out_free;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE1) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE1,
+ &get_uint32, &p->tm.qsize[1]);
+ if (ret < 0)
+ goto out_free;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE2) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE2,
+ &get_uint32, &p->tm.qsize[2]);
+ if (ret < 0)
+ goto out_free;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE3) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE3,
+ &get_uint32, &p->tm.qsize[3]);
+ if (ret < 0)
+ goto out_free;
+ }
+
+out_free:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+pmd_probe(struct rte_vdev_device *vdev)
+{
+ struct pmd_params p;
+ const char *params;
+ int status = 0;
+
+ void *dev_private;
+ const char *name = rte_vdev_device_name(vdev);
+
+ PMD_LOG(INFO, "Probing device \"%s\"", name);
+
+ /* Parse input arguments */
+ params = rte_vdev_device_args(vdev);
+ if (!params)
+ return -EINVAL;
+
+ status = pmd_parse_args(&p, params);
+ if (status)
+ return status;
+
+ p.name = name;
+
+ /* Allocate and initialize soft ethdev private data */
+ dev_private = pmd_init(&p);
+ if (dev_private == NULL)
+ return -ENOMEM;
+
+ /* Register soft ethdev */
+ PMD_LOG(INFO, "Creating soft ethdev \"%s\"", p.name);
+
+ status = pmd_ethdev_register(vdev, &p, dev_private);
+ if (status) {
+ pmd_free(dev_private);
+ return status;
+ }
+
+ return 0;
+}
+
+static int
+pmd_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *dev = NULL;
+ struct pmd_internals *p;
+
+ if (!vdev)
+ return -EINVAL;
+
+ PMD_LOG(INFO, "Removing device \"%s\"", rte_vdev_device_name(vdev));
+
+ /* Find the ethdev entry */
+ dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
+ if (dev == NULL)
+ return -ENODEV;
+ p = dev->data->dev_private;
+
+ /* Free device data structures*/
+ rte_free(dev->data);
+ rte_eth_dev_release_port(dev);
+ pmd_free(p);
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_softnic_drv = {
+ .probe = pmd_probe,
+ .remove = pmd_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
+RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
+ PMD_PARAM_FIRMWARE "=<string> "
+ PMD_PARAM_CONN_PORT "=<uint16> "
+ PMD_PARAM_CPU_ID "=<uint32> "
+ PMD_PARAM_TM_N_QUEUES "=<uint32> "
+ PMD_PARAM_TM_QSIZE0 "=<uint32> "
+ PMD_PARAM_TM_QSIZE1 "=<uint32> "
+ PMD_PARAM_TM_QSIZE2 "=<uint32> "
+ PMD_PARAM_TM_QSIZE3 "=<uint32>"
+);
+
+
+RTE_INIT(pmd_softnic_init_log)
+{
+ pmd_softnic_logtype = rte_log_register("pmd.net.softnic");
+ if (pmd_softnic_logtype >= 0)
+ rte_log_set_level(pmd_softnic_logtype, RTE_LOG_NOTICE);
+}
+
+int
+rte_pmd_softnic_manage(uint16_t port_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct pmd_internals *softnic;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+#endif
+
+ softnic = dev->data->dev_private;
+
+ softnic_conn_poll_for_conn(softnic->conn);
+
+ softnic_conn_poll_for_msg(softnic->conn);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.h b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.h
new file mode 100644
index 00000000..048dfe6b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef __INCLUDE_RTE_ETH_SOFTNIC_H__
+#define __INCLUDE_RTE_ETH_SOFTNIC_H__
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Firmware. */
+#ifndef SOFTNIC_FIRMWARE
+#define SOFTNIC_FIRMWARE "firmware.cli"
+#endif
+
+/** TCP connection port (0 = no connectivity). */
+#ifndef SOFTNIC_CONN_PORT
+#define SOFTNIC_CONN_PORT 0
+#endif
+
+/** NUMA node ID. */
+#ifndef SOFTNIC_CPU_ID
+#define SOFTNIC_CPU_ID 0
+#endif
+
+/** Traffic Manager: Number of scheduler queues. */
+#ifndef SOFTNIC_TM_N_QUEUES
+#define SOFTNIC_TM_N_QUEUES (64 * 1024)
+#endif
+
+/** Traffic Manager: Scheduler queue size (per traffic class). */
+#ifndef SOFTNIC_TM_QUEUE_SIZE
+#define SOFTNIC_TM_QUEUE_SIZE 64
+#endif
+
+/**
+ * Soft NIC run.
+ *
+ * @param port_id
+ * Port ID of the Soft NIC device.
+ * @return
+ * Zero on success, error code otherwise.
+ */
+int
+rte_pmd_softnic_run(uint16_t port_id);
+
+/**
+ * Soft NIC manage.
+ *
+ * @param port_id
+ * Port ID of the Soft NIC device.
+ * @return
+ * Zero on success, error code otherwise.
+ */
+int __rte_experimental
+rte_pmd_softnic_manage(uint16_t port_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_ETH_SOFTNIC_H__ */
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_action.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_action.c
new file mode 100644
index 00000000..c25f4dd9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_action.c
@@ -0,0 +1,389 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_string_fns.h>
+
+#include "hash_func.h"
+#include "rte_eth_softnic_internals.h"
+
+/**
+ * Input port
+ */
+int
+softnic_port_in_action_profile_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->port_in_action_profile_list);
+
+ return 0;
+}
+
+void
+softnic_port_in_action_profile_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_port_in_action_profile *profile;
+
+ profile = TAILQ_FIRST(&p->port_in_action_profile_list);
+ if (profile == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->port_in_action_profile_list, profile, node);
+ free(profile);
+ }
+}
+
+struct softnic_port_in_action_profile *
+softnic_port_in_action_profile_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_port_in_action_profile *profile;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(profile, &p->port_in_action_profile_list, node)
+ if (strcmp(profile->name, name) == 0)
+ return profile;
+
+ return NULL;
+}
+
+struct softnic_port_in_action_profile *
+softnic_port_in_action_profile_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_port_in_action_profile_params *params)
+{
+ struct softnic_port_in_action_profile *profile;
+ struct rte_port_in_action_profile *ap;
+ int status;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_port_in_action_profile_find(p, name) ||
+ params == NULL)
+ return NULL;
+
+ if ((params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) &&
+ params->lb.f_hash == NULL) {
+ switch (params->lb.key_size) {
+ case 8:
+ params->lb.f_hash = hash_default_key8;
+ break;
+
+ case 16:
+ params->lb.f_hash = hash_default_key16;
+ break;
+
+ case 24:
+ params->lb.f_hash = hash_default_key24;
+ break;
+
+ case 32:
+ params->lb.f_hash = hash_default_key32;
+ break;
+
+ case 40:
+ params->lb.f_hash = hash_default_key40;
+ break;
+
+ case 48:
+ params->lb.f_hash = hash_default_key48;
+ break;
+
+ case 56:
+ params->lb.f_hash = hash_default_key56;
+ break;
+
+ case 64:
+ params->lb.f_hash = hash_default_key64;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ params->lb.seed = 0;
+ }
+
+ /* Resource */
+ ap = rte_port_in_action_profile_create(0);
+ if (ap == NULL)
+ return NULL;
+
+ if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_FLTR)) {
+ status = rte_port_in_action_profile_action_register(ap,
+ RTE_PORT_IN_ACTION_FLTR,
+ &params->fltr);
+
+ if (status) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) {
+ status = rte_port_in_action_profile_action_register(ap,
+ RTE_PORT_IN_ACTION_LB,
+ &params->lb);
+
+ if (status) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ status = rte_port_in_action_profile_freeze(ap);
+ if (status) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node allocation */
+ profile = calloc(1, sizeof(struct softnic_port_in_action_profile));
+ if (profile == NULL) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(profile->name, name, sizeof(profile->name));
+ memcpy(&profile->params, params, sizeof(*params));
+ profile->ap = ap;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->port_in_action_profile_list, profile, node);
+
+ return profile;
+}
+
+/**
+ * Table
+ */
+int
+softnic_table_action_profile_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->table_action_profile_list);
+
+ return 0;
+}
+
+void
+softnic_table_action_profile_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_table_action_profile *profile;
+
+ profile = TAILQ_FIRST(&p->table_action_profile_list);
+ if (profile == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->table_action_profile_list, profile, node);
+ free(profile);
+ }
+}
+
+struct softnic_table_action_profile *
+softnic_table_action_profile_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_table_action_profile *profile;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(profile, &p->table_action_profile_list, node)
+ if (strcmp(profile->name, name) == 0)
+ return profile;
+
+ return NULL;
+}
+
+struct softnic_table_action_profile *
+softnic_table_action_profile_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_table_action_profile_params *params)
+{
+ struct softnic_table_action_profile *profile;
+ struct rte_table_action_profile *ap;
+ int status;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_table_action_profile_find(p, name) ||
+ params == NULL ||
+ ((params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) == 0))
+ return NULL;
+
+ if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) &&
+ params->lb.f_hash == NULL) {
+ switch (params->lb.key_size) {
+ case 8:
+ params->lb.f_hash = hash_default_key8;
+ break;
+
+ case 16:
+ params->lb.f_hash = hash_default_key16;
+ break;
+
+ case 24:
+ params->lb.f_hash = hash_default_key24;
+ break;
+
+ case 32:
+ params->lb.f_hash = hash_default_key32;
+ break;
+
+ case 40:
+ params->lb.f_hash = hash_default_key40;
+ break;
+
+ case 48:
+ params->lb.f_hash = hash_default_key48;
+ break;
+
+ case 56:
+ params->lb.f_hash = hash_default_key56;
+ break;
+
+ case 64:
+ params->lb.f_hash = hash_default_key64;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ params->lb.seed = 0;
+ }
+
+ /* Resource */
+ ap = rte_table_action_profile_create(&params->common);
+ if (ap == NULL)
+ return NULL;
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_FWD,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_LB,
+ &params->lb);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_MTR,
+ &params->mtr);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TM,
+ &params->tm);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_ENCAP,
+ &params->encap);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_NAT,
+ &params->nat);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TTL,
+ &params->ttl);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_STATS,
+ &params->stats);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TIME,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ status = rte_table_action_profile_freeze(ap);
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node allocation */
+ profile = calloc(1, sizeof(struct softnic_table_action_profile));
+ if (profile == NULL) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(profile->name, name, sizeof(profile->name));
+ memcpy(&profile->params, params, sizeof(*params));
+ profile->ap = ap;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->table_action_profile_list, profile, node);
+
+ return profile;
+}
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c
new file mode 100644
index 00000000..0c7448cc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c
@@ -0,0 +1,5259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include "rte_eth_softnic_internals.h"
+#include "parser.h"
+
+#ifndef CMD_MAX_TOKENS
+#define CMD_MAX_TOKENS 256
+#endif
+
+#define MSG_OUT_OF_MEMORY "Not enough memory.\n"
+#define MSG_CMD_UNKNOWN "Unknown command \"%s\".\n"
+#define MSG_CMD_UNIMPLEM "Command \"%s\" not implemented.\n"
+#define MSG_ARG_NOT_ENOUGH "Not enough arguments for command \"%s\".\n"
+#define MSG_ARG_TOO_MANY "Too many arguments for command \"%s\".\n"
+#define MSG_ARG_MISMATCH "Wrong number of arguments for command \"%s\".\n"
+#define MSG_ARG_NOT_FOUND "Argument \"%s\" not found.\n"
+#define MSG_ARG_INVALID "Invalid value for argument \"%s\".\n"
+#define MSG_FILE_ERR "Error in file \"%s\" at line %u.\n"
+#define MSG_FILE_NOT_ENOUGH "Not enough rules in file \"%s\".\n"
+#define MSG_CMD_FAIL "Command \"%s\" failed.\n"
+
+static int
+is_comment(char *in)
+{
+ if ((strlen(in) && index("!#%;", in[0])) ||
+ (strncmp(in, "//", 2) == 0) ||
+ (strncmp(in, "--", 2) == 0))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * mempool <mempool_name>
+ * buffer <buffer_size>
+ * pool <pool_size>
+ * cache <cache_size>
+ */
+static void
+cmd_mempool(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_mempool_params p;
+ char *name;
+ struct softnic_mempool *mempool;
+
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "buffer") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buffer");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.buffer_size, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "buffer_size");
+ return;
+ }
+
+ if (strcmp(tokens[4], "pool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pool");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.pool_size, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pool_size");
+ return;
+ }
+
+ if (strcmp(tokens[6], "cache") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cache");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.cache_size, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cache_size");
+ return;
+ }
+
+ mempool = softnic_mempool_create(softnic, name, &p);
+ if (mempool == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * link <link_name>
+ * dev <device_name> | port <port_id>
+ */
+static void
+cmd_link(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_link_params p;
+ struct softnic_link *link;
+ char *name;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens != 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "dev") == 0) {
+ p.dev_name = tokens[3];
+ } else if (strcmp(tokens[2], "port") == 0) {
+ p.dev_name = NULL;
+
+ if (softnic_parser_read_uint16(&p.port_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dev or port");
+ return;
+ }
+
+ link = softnic_link_create(softnic, name, &p);
+ if (link == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * swq <swq_name>
+ * size <size>
+ */
+static void
+cmd_swq(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_swq_params p;
+ char *name;
+ struct softnic_swq *swq;
+
+ if (n_tokens != 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.size, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "size");
+ return;
+ }
+
+ swq = softnic_swq_create(softnic, name, &p);
+ if (swq == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr shaper profile
+ * id <profile_id>
+ * rate <tb_rate> size <tb_size>
+ * adj <packet_length_adjust>
+ */
+static void
+cmd_tmgr_shaper_profile(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_tm_shaper_params sp;
+ struct rte_tm_error error;
+ uint32_t shaper_profile_id;
+ uint16_t port_id;
+ int status;
+
+ memset(&sp, 0, sizeof(struct rte_tm_shaper_params));
+
+ if (n_tokens != 11) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "shaper") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
+ return;
+ }
+
+ if (strcmp(tokens[2], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (strcmp(tokens[3], "id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&shaper_profile_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "rate") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rate");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&sp.peak.rate, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tb_rate");
+ return;
+ }
+
+ if (strcmp(tokens[7], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&sp.peak.size, tokens[8]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tb_size");
+ return;
+ }
+
+ if (strcmp(tokens[9], "adj") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "adj");
+ return;
+ }
+
+ if (softnic_parser_read_int32(&sp.pkt_length_adjust, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "packet_length_adjust");
+ return;
+ }
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status)
+ return;
+
+ status = rte_tm_shaper_profile_add(port_id, shaper_profile_id, &sp, &error);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr shared shaper
+ * id <shared_shaper_id>
+ * profile <shaper_profile_id>
+ */
+static void
+cmd_tmgr_shared_shaper(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_tm_error error;
+ uint32_t shared_shaper_id, shaper_profile_id;
+ uint16_t port_id;
+ int status;
+
+ if (n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "shared") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared");
+ return;
+ }
+
+ if (strcmp(tokens[2], "shaper") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
+ return;
+ }
+
+ if (strcmp(tokens[3], "id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&shared_shaper_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&shaper_profile_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id");
+ return;
+ }
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status)
+ return;
+
+ status = rte_tm_shared_shaper_add_update(port_id,
+ shared_shaper_id,
+ shaper_profile_id,
+ &error);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr node
+ * id <node_id>
+ * parent <parent_node_id | none>
+ * priority <priority>
+ * weight <weight>
+ * [shaper profile <shaper_profile_id>]
+ * [shared shaper <shared_shaper_id>]
+ * [nonleaf sp <n_sp_priorities>]
+ */
+static void
+cmd_tmgr_node(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_tm_error error;
+ struct rte_tm_node_params np;
+ uint32_t node_id, parent_node_id, priority, weight, shared_shaper_id;
+ uint16_t port_id;
+ int status;
+
+ memset(&np, 0, sizeof(struct rte_tm_node_params));
+ np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
+ np.nonleaf.n_sp_priorities = 1;
+
+ if (n_tokens < 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "node") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "node");
+ return;
+ }
+
+ if (strcmp(tokens[2], "id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&node_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "node_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "parent") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "parent");
+ return;
+ }
+
+ if (strcmp(tokens[5], "none") == 0)
+ parent_node_id = RTE_TM_NODE_ID_NULL;
+ else {
+ if (softnic_parser_read_uint32(&parent_node_id, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "parent_node_id");
+ return;
+ }
+ }
+
+ if (strcmp(tokens[6], "priority") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&priority, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "priority");
+ return;
+ }
+
+ if (strcmp(tokens[8], "weight") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&weight, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "weight");
+ return;
+ }
+
+ tokens += 10;
+ n_tokens -= 10;
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[0], "shaper") == 0) &&
+ (strcmp(tokens[1], "profile") == 0)) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node");
+ return;
+ }
+
+ if (strcmp(tokens[2], "none") == 0) {
+ np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
+ } else {
+ if (softnic_parser_read_uint32(&np.shaper_profile_id, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id");
+ return;
+ }
+ }
+
+ tokens += 3;
+ n_tokens -= 3;
+ } /* shaper profile */
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[0], "shared") == 0) &&
+ (strcmp(tokens[1], "shaper") == 0)) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&shared_shaper_id, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id");
+ return;
+ }
+
+ np.shared_shaper_id = &shared_shaper_id;
+ np.n_shared_shapers = 1;
+
+ tokens += 3;
+ n_tokens -= 3;
+ } /* shared shaper */
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[0], "nonleaf") == 0) &&
+ (strcmp(tokens[1], "sp") == 0)) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&np.nonleaf.n_sp_priorities, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_sp_priorities");
+ return;
+ }
+
+ tokens += 3;
+ n_tokens -= 3;
+ } /* nonleaf sp <n_sp_priorities> */
+
+ if (n_tokens) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status != 0)
+ return;
+
+ status = rte_tm_node_add(port_id,
+ node_id,
+ parent_node_id,
+ priority,
+ weight,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &np,
+ &error);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static uint32_t
+root_node_id(uint32_t n_spp,
+ uint32_t n_pps)
+{
+ uint32_t n_queues = n_spp * n_pps * RTE_SCHED_QUEUES_PER_PIPE;
+ uint32_t n_tc = n_spp * n_pps * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+ uint32_t n_pipes = n_spp * n_pps;
+
+ return n_queues + n_tc + n_pipes + n_spp;
+}
+
+static uint32_t
+subport_node_id(uint32_t n_spp,
+ uint32_t n_pps,
+ uint32_t subport_id)
+{
+ uint32_t n_pipes = n_spp * n_pps;
+ uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+ uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE;
+
+ return n_queues + n_tc + n_pipes + subport_id;
+}
+
+static uint32_t
+pipe_node_id(uint32_t n_spp,
+ uint32_t n_pps,
+ uint32_t subport_id,
+ uint32_t pipe_id)
+{
+ uint32_t n_pipes = n_spp * n_pps;
+ uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+ uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE;
+
+ return n_queues +
+ n_tc +
+ pipe_id +
+ subport_id * n_pps;
+}
+
+static uint32_t
+tc_node_id(uint32_t n_spp,
+ uint32_t n_pps,
+ uint32_t subport_id,
+ uint32_t pipe_id,
+ uint32_t tc_id)
+{
+ uint32_t n_pipes = n_spp * n_pps;
+ uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE;
+
+ return n_queues +
+ tc_id +
+ (pipe_id + subport_id * n_pps) * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+}
+
+static uint32_t
+queue_node_id(uint32_t n_spp __rte_unused,
+ uint32_t n_pps,
+ uint32_t subport_id,
+ uint32_t pipe_id,
+ uint32_t tc_id,
+ uint32_t queue_id)
+{
+ return queue_id +
+ tc_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE +
+ (pipe_id + subport_id * n_pps) * RTE_SCHED_QUEUES_PER_PIPE;
+}
+
+struct tmgr_hierarchy_default_params {
+ uint32_t n_spp; /**< Number of subports per port. */
+ uint32_t n_pps; /**< Number of pipes per subport. */
+
+ struct {
+ uint32_t port;
+ uint32_t subport;
+ uint32_t pipe;
+ uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ } shaper_profile_id;
+
+ struct {
+ uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t tc_valid[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ } shared_shaper_id;
+
+ struct {
+ uint32_t queue[RTE_SCHED_QUEUES_PER_PIPE];
+ } weight;
+};
+
+static int
+tmgr_hierarchy_default(struct pmd_internals *softnic,
+ struct tmgr_hierarchy_default_params *params)
+{
+ struct rte_tm_node_params root_node_params = {
+ .shaper_profile_id = params->shaper_profile_id.port,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ };
+
+ struct rte_tm_node_params subport_node_params = {
+ .shaper_profile_id = params->shaper_profile_id.subport,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ };
+
+ struct rte_tm_node_params pipe_node_params = {
+ .shaper_profile_id = params->shaper_profile_id.pipe,
+ .nonleaf = {
+ .n_sp_priorities = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ },
+ };
+
+ struct rte_tm_node_params tc_node_params[] = {
+ [0] = {
+ .shaper_profile_id = params->shaper_profile_id.tc[0],
+ .shared_shaper_id = &params->shared_shaper_id.tc[0],
+ .n_shared_shapers =
+ (&params->shared_shaper_id.tc_valid[0]) ? 1 : 0,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ },
+
+ [1] = {
+ .shaper_profile_id = params->shaper_profile_id.tc[1],
+ .shared_shaper_id = &params->shared_shaper_id.tc[1],
+ .n_shared_shapers =
+ (&params->shared_shaper_id.tc_valid[1]) ? 1 : 0,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ },
+
+ [2] = {
+ .shaper_profile_id = params->shaper_profile_id.tc[2],
+ .shared_shaper_id = &params->shared_shaper_id.tc[2],
+ .n_shared_shapers =
+ (&params->shared_shaper_id.tc_valid[2]) ? 1 : 0,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ },
+
+ [3] = {
+ .shaper_profile_id = params->shaper_profile_id.tc[3],
+ .shared_shaper_id = &params->shared_shaper_id.tc[3],
+ .n_shared_shapers =
+ (&params->shared_shaper_id.tc_valid[3]) ? 1 : 0,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ },
+ };
+
+ struct rte_tm_node_params queue_node_params = {
+ .shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE,
+ };
+
+ struct rte_tm_error error;
+ uint32_t n_spp = params->n_spp, n_pps = params->n_pps, s;
+ int status;
+ uint16_t port_id;
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 0: Root node */
+ status = rte_tm_node_add(port_id,
+ root_node_id(n_spp, n_pps),
+ RTE_TM_NODE_ID_NULL,
+ 0,
+ 1,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &root_node_params,
+ &error);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 1: Subport nodes */
+ for (s = 0; s < params->n_spp; s++) {
+ uint32_t p;
+
+ status = rte_tm_node_add(port_id,
+ subport_node_id(n_spp, n_pps, s),
+ root_node_id(n_spp, n_pps),
+ 0,
+ 1,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &subport_node_params,
+ &error);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 2: Pipe nodes */
+ for (p = 0; p < params->n_pps; p++) {
+ uint32_t t;
+
+ status = rte_tm_node_add(port_id,
+ pipe_node_id(n_spp, n_pps, s, p),
+ subport_node_id(n_spp, n_pps, s),
+ 0,
+ 1,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &pipe_node_params,
+ &error);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 3: Traffic class nodes */
+ for (t = 0; t < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; t++) {
+ uint32_t q;
+
+ status = rte_tm_node_add(port_id,
+ tc_node_id(n_spp, n_pps, s, p, t),
+ pipe_node_id(n_spp, n_pps, s, p),
+ t,
+ 1,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &tc_node_params[t],
+ &error);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 4: Queue nodes */
+ for (q = 0; q < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; q++) {
+ status = rte_tm_node_add(port_id,
+ queue_node_id(n_spp, n_pps, s, p, t, q),
+ tc_node_id(n_spp, n_pps, s, p, t),
+ 0,
+ params->weight.queue[q],
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &queue_node_params,
+ &error);
+ if (status)
+ return -1;
+ } /* Queue */
+ } /* TC */
+ } /* Pipe */
+ } /* Subport */
+
+ return 0;
+}
+
+
+/**
+ * tmgr hierarchy-default
+ * spp <n_subports_per_port>
+ * pps <n_pipes_per_subport>
+ * shaper profile
+ * port <profile_id>
+ * subport <profile_id>
+ * pipe <profile_id>
+ * tc0 <profile_id>
+ * tc1 <profile_id>
+ * tc2 <profile_id>
+ * tc3 <profile_id>
+ * shared shaper
+ * tc0 <id | none>
+ * tc1 <id | none>
+ * tc2 <id | none>
+ * tc3 <id | none>
+ * weight
+ * queue <q0> ... <q15>
+ */
+static void
+cmd_tmgr_hierarchy_default(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct tmgr_hierarchy_default_params p;
+ int i, status;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens != 50) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "hierarchy-default") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy-default");
+ return;
+ }
+
+ if (strcmp(tokens[2], "spp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.n_spp, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_subports_per_port");
+ return;
+ }
+
+ if (strcmp(tokens[4], "pps") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.n_pps, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_pipes_per_subport");
+ return;
+ }
+
+ /* Shaper profile */
+
+ if (strcmp(tokens[6], "shaper") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
+ return;
+ }
+
+ if (strcmp(tokens[7], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (strcmp(tokens[8], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.port, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port profile id");
+ return;
+ }
+
+ if (strcmp(tokens[10], "subport") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "subport");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.subport, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "subport profile id");
+ return;
+ }
+
+ if (strcmp(tokens[12], "pipe") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipe");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.pipe, tokens[13]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pipe_profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[14], "tc0") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[0], tokens[15]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc0 profile id");
+ return;
+ }
+
+ if (strcmp(tokens[16], "tc1") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[1], tokens[17]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc1 profile id");
+ return;
+ }
+
+ if (strcmp(tokens[18], "tc2") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[2], tokens[19]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc2 profile id");
+ return;
+ }
+
+ if (strcmp(tokens[20], "tc3") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[3], tokens[21]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc3 profile id");
+ return;
+ }
+
+ /* Shared shaper */
+
+ if (strcmp(tokens[22], "shared") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared");
+ return;
+ }
+
+ if (strcmp(tokens[23], "shaper") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
+ return;
+ }
+
+ if (strcmp(tokens[24], "tc0") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0");
+ return;
+ }
+
+ if (strcmp(tokens[25], "none") == 0)
+ p.shared_shaper_id.tc_valid[0] = 0;
+ else {
+ if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[0], tokens[25]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc0");
+ return;
+ }
+
+ p.shared_shaper_id.tc_valid[0] = 1;
+ }
+
+ if (strcmp(tokens[26], "tc1") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1");
+ return;
+ }
+
+ if (strcmp(tokens[27], "none") == 0)
+ p.shared_shaper_id.tc_valid[1] = 0;
+ else {
+ if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[1], tokens[27]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc1");
+ return;
+ }
+
+ p.shared_shaper_id.tc_valid[1] = 1;
+ }
+
+ if (strcmp(tokens[28], "tc2") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2");
+ return;
+ }
+
+ if (strcmp(tokens[29], "none") == 0)
+ p.shared_shaper_id.tc_valid[2] = 0;
+ else {
+ if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[2], tokens[29]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc2");
+ return;
+ }
+
+ p.shared_shaper_id.tc_valid[2] = 1;
+ }
+
+ if (strcmp(tokens[30], "tc3") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3");
+ return;
+ }
+
+ if (strcmp(tokens[31], "none") == 0)
+ p.shared_shaper_id.tc_valid[3] = 0;
+ else {
+ if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[3], tokens[31]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc3");
+ return;
+ }
+
+ p.shared_shaper_id.tc_valid[3] = 1;
+ }
+
+ /* Weight */
+
+ if (strcmp(tokens[32], "weight") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight");
+ return;
+ }
+
+ if (strcmp(tokens[33], "queue") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "queue");
+ return;
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (softnic_parser_read_uint32(&p.weight.queue[i], tokens[34 + i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "weight queue");
+ return;
+ }
+ }
+
+ status = tmgr_hierarchy_default(softnic, &p);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr hierarchy commit
+ */
+static void
+cmd_tmgr_hierarchy_commit(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_tm_error error;
+ uint16_t port_id;
+ int status;
+
+ if (n_tokens != 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "hierarchy") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy");
+ return;
+ }
+
+ if (strcmp(tokens[2], "commit") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "commit");
+ return;
+ }
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status != 0)
+ return;
+
+ status = rte_tm_hierarchy_commit(port_id, 1, &error);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr <tmgr_name>
+ */
+static void
+cmd_tmgr(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *name;
+ struct softnic_tmgr_port *tmgr_port;
+
+ if (n_tokens != 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ tmgr_port = softnic_tmgr_port_create(softnic, name);
+ if (tmgr_port == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tap <tap_name>
+ */
+static void
+cmd_tap(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *name;
+ struct softnic_tap *tap;
+
+ if (n_tokens != 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ tap = softnic_tap_create(softnic, name);
+ if (tap == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * port in action profile <profile_name>
+ * [filter match | mismatch offset <key_offset> mask <key_mask> key <key_value> port <port_id>]
+ * [balance offset <key_offset> mask <key_mask> port <port_id0> ... <port_id15>]
+ */
+static void
+cmd_port_in_action_profile(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_port_in_action_profile_params p;
+ struct softnic_port_in_action_profile *ap;
+ char *name;
+ uint32_t t0;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (strcmp(tokens[2], "action") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action");
+ return;
+ }
+
+ if (strcmp(tokens[3], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ name = tokens[4];
+
+ t0 = 5;
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "filter") == 0)) {
+ uint32_t size;
+
+ if (n_tokens < t0 + 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "port in action profile filter");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "match") == 0) {
+ p.fltr.filter_on_match = 1;
+ } else if (strcmp(tokens[t0 + 1], "mismatch") == 0) {
+ p.fltr.filter_on_match = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, "match or mismatch");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.fltr.key_offset,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE;
+ if ((softnic_parse_hex_string(tokens[t0 + 5],
+ p.fltr.key_mask, &size) != 0) ||
+ size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 6], "key") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key");
+ return;
+ }
+
+ size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE;
+ if ((softnic_parse_hex_string(tokens[t0 + 7],
+ p.fltr.key, &size) != 0) ||
+ size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_value");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 8], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.fltr.port_id,
+ tokens[t0 + 9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_FLTR;
+ t0 += 10;
+ } /* filter */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "balance") == 0)) {
+ uint32_t i;
+
+ if (n_tokens < t0 + 22) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "port in action profile balance");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.lb.key_offset,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX;
+ if (softnic_parse_hex_string(tokens[t0 + 4],
+ p.lb.key_mask, &p.lb.key_size) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 5], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ for (i = 0; i < 16; i++)
+ if (softnic_parser_read_uint32(&p.lb.port_id[i],
+ tokens[t0 + 6 + i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_LB;
+ t0 += 22;
+ } /* balance */
+
+ if (t0 < n_tokens) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ ap = softnic_port_in_action_profile_create(softnic, name, &p);
+ if (ap == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * table action profile <profile_name>
+ * ipv4 | ipv6
+ * offset <ip_offset>
+ * fwd
+ * [balance offset <key_offset> mask <key_mask> outoffset <out_offset>]
+ * [meter srtcm | trtcm
+ * tc <n_tc>
+ * stats none | pkts | bytes | both]
+ * [tm spp <n_subports_per_port> pps <n_pipes_per_subport>]
+ * [encap ether | vlan | qinq | mpls | pppoe]
+ * [nat src | dst
+ * proto udp | tcp]
+ * [ttl drop | fwd
+ * stats none | pkts]
+ * [stats pkts | bytes | both]
+ * [time]
+ */
+static void
+cmd_table_action_profile(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_action_profile_params p;
+ struct softnic_table_action_profile *ap;
+ char *name;
+ uint32_t t0;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "action") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action");
+ return;
+ }
+
+ if (strcmp(tokens[2], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ name = tokens[3];
+
+ if (strcmp(tokens[4], "ipv4") == 0) {
+ p.common.ip_version = 1;
+ } else if (strcmp(tokens[4], "ipv6") == 0) {
+ p.common.ip_version = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, "ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[5], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.common.ip_offset,
+ tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "ip_offset");
+ return;
+ }
+
+ if (strcmp(tokens[7], "fwd") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "fwd");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
+
+ t0 = 8;
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "balance") == 0)) {
+ if (n_tokens < t0 + 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "table action profile balance");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.lb.key_offset,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX;
+ if (softnic_parse_hex_string(tokens[t0 + 4],
+ p.lb.key_mask, &p.lb.key_size) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 5], "outoffset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "outoffset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.lb.out_offset,
+ tokens[t0 + 6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "out_offset");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_LB;
+ t0 += 7;
+ } /* balance */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "meter") == 0)) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile meter");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "srtcm") == 0) {
+ p.mtr.alg = RTE_TABLE_ACTION_METER_SRTCM;
+ } else if (strcmp(tokens[t0 + 1], "trtcm") == 0) {
+ p.mtr.alg = RTE_TABLE_ACTION_METER_TRTCM;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "srtcm or trtcm");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "tc") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.mtr.n_tc,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_tc");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 5], "none") == 0) {
+ p.mtr.n_packets_enabled = 0;
+ p.mtr.n_bytes_enabled = 0;
+ } else if (strcmp(tokens[t0 + 5], "pkts") == 0) {
+ p.mtr.n_packets_enabled = 1;
+ p.mtr.n_bytes_enabled = 0;
+ } else if (strcmp(tokens[t0 + 5], "bytes") == 0) {
+ p.mtr.n_packets_enabled = 0;
+ p.mtr.n_bytes_enabled = 1;
+ } else if (strcmp(tokens[t0 + 5], "both") == 0) {
+ p.mtr.n_packets_enabled = 1;
+ p.mtr.n_bytes_enabled = 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "none or pkts or bytes or both");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_MTR;
+ t0 += 6;
+ } /* meter */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "tm") == 0)) {
+ if (n_tokens < t0 + 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile tm");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "spp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.tm.n_subports_per_port,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "n_subports_per_port");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "pps") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.tm.n_pipes_per_subport,
+ tokens[t0 + 4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "n_pipes_per_subport");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TM;
+ t0 += 5;
+ } /* tm */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "encap") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "action profile encap");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "ether") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER;
+ } else if (strcmp(tokens[t0 + 1], "vlan") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN;
+ } else if (strcmp(tokens[t0 + 1], "qinq") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ;
+ } else if (strcmp(tokens[t0 + 1], "mpls") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS;
+ } else if (strcmp(tokens[t0 + 1], "pppoe") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE;
+ } else {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "encap");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_ENCAP;
+ t0 += 2;
+ } /* encap */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "nat") == 0)) {
+ if (n_tokens < t0 + 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile nat");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "src") == 0) {
+ p.nat.source_nat = 1;
+ } else if (strcmp(tokens[t0 + 1], "dst") == 0) {
+ p.nat.source_nat = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "src or dst");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "proto") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "proto");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "tcp") == 0) {
+ p.nat.proto = 0x06;
+ } else if (strcmp(tokens[t0 + 3], "udp") == 0) {
+ p.nat.proto = 0x11;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "tcp or udp");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_NAT;
+ t0 += 4;
+ } /* nat */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "ttl") == 0)) {
+ if (n_tokens < t0 + 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile ttl");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "drop") == 0) {
+ p.ttl.drop = 1;
+ } else if (strcmp(tokens[t0 + 1], "fwd") == 0) {
+ p.ttl.drop = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "drop or fwd");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "none") == 0) {
+ p.ttl.n_packets_enabled = 0;
+ } else if (strcmp(tokens[t0 + 3], "pkts") == 0) {
+ p.ttl.n_packets_enabled = 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "none or pkts");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TTL;
+ t0 += 4;
+ } /* ttl */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "stats") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile stats");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "pkts") == 0) {
+ p.stats.n_packets_enabled = 1;
+ p.stats.n_bytes_enabled = 0;
+ } else if (strcmp(tokens[t0 + 1], "bytes") == 0) {
+ p.stats.n_packets_enabled = 0;
+ p.stats.n_bytes_enabled = 1;
+ } else if (strcmp(tokens[t0 + 1], "both") == 0) {
+ p.stats.n_packets_enabled = 1;
+ p.stats.n_bytes_enabled = 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "pkts or bytes or both");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_STATS;
+ t0 += 2;
+ } /* stats */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "time") == 0)) {
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TIME;
+ t0 += 1;
+ } /* time */
+
+ if (t0 < n_tokens) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ ap = softnic_table_action_profile_create(softnic, name, &p);
+ if (ap == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name>
+ * period <timer_period_ms>
+ * offset_port_id <offset_port_id>
+ */
+static void
+cmd_pipeline(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct pipeline_params p;
+ char *name;
+ struct pipeline *pipeline;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "period") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "period");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.timer_period_ms,
+ tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "timer_period_ms");
+ return;
+ }
+
+ if (strcmp(tokens[4], "offset_port_id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset_port_id");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.offset_port_id,
+ tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "offset_port_id");
+ return;
+ }
+
+ pipeline = softnic_pipeline_create(softnic, name, &p);
+ if (pipeline == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port in
+ * bsz <burst_size>
+ * link <link_name> rxq <queue_id>
+ * | swq <swq_name>
+ * | tmgr <tmgr_name>
+ * | tap <tap_name> mempool <mempool_name> mtu <mtu>
+ * | source mempool <mempool_name> file <file_name> bpp <n_bytes_per_pkt>
+ * [action <port_in_action_profile_name>]
+ * [disabled]
+ */
+static void
+cmd_pipeline_port_in(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_port_in_params p;
+ char *pipeline_name;
+ uint32_t t0;
+ int enabled, status;
+
+ if (n_tokens < 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (strcmp(tokens[4], "bsz") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.burst_size, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "burst_size");
+ return;
+ }
+
+ t0 = 6;
+
+ if (strcmp(tokens[t0], "link") == 0) {
+ if (n_tokens < t0 + 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in link");
+ return;
+ }
+
+ p.type = PORT_IN_RXQ;
+
+ p.dev_name = tokens[t0 + 1];
+
+ if (strcmp(tokens[t0 + 2], "rxq") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rxq");
+ return;
+ }
+
+ if (softnic_parser_read_uint16(&p.rxq.queue_id,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "queue_id");
+ return;
+ }
+ t0 += 4;
+ } else if (strcmp(tokens[t0], "swq") == 0) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in swq");
+ return;
+ }
+
+ p.type = PORT_IN_SWQ;
+
+ p.dev_name = tokens[t0 + 1];
+
+ t0 += 2;
+ } else if (strcmp(tokens[t0], "tmgr") == 0) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in tmgr");
+ return;
+ }
+
+ p.type = PORT_IN_TMGR;
+
+ p.dev_name = tokens[t0 + 1];
+
+ t0 += 2;
+ } else if (strcmp(tokens[t0], "tap") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in tap");
+ return;
+ }
+
+ p.type = PORT_IN_TAP;
+
+ p.dev_name = tokens[t0 + 1];
+
+ if (strcmp(tokens[t0 + 2], "mempool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "mempool");
+ return;
+ }
+
+ p.tap.mempool_name = tokens[t0 + 3];
+
+ if (strcmp(tokens[t0 + 4], "mtu") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "mtu");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.tap.mtu,
+ tokens[t0 + 5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "mtu");
+ return;
+ }
+
+ t0 += 6;
+ } else if (strcmp(tokens[t0], "source") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in source");
+ return;
+ }
+
+ p.type = PORT_IN_SOURCE;
+
+ p.dev_name = NULL;
+
+ if (strcmp(tokens[t0 + 1], "mempool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "mempool");
+ return;
+ }
+
+ p.source.mempool_name = tokens[t0 + 2];
+
+ if (strcmp(tokens[t0 + 3], "file") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "file");
+ return;
+ }
+
+ p.source.file_name = tokens[t0 + 4];
+
+ if (strcmp(tokens[t0 + 5], "bpp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "bpp");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.source.n_bytes_per_pkt,
+ tokens[t0 + 6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "n_bytes_per_pkt");
+ return;
+ }
+
+ t0 += 7;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ p.action_profile_name = NULL;
+ if (n_tokens > t0 &&
+ (strcmp(tokens[t0], "action") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "action");
+ return;
+ }
+
+ p.action_profile_name = tokens[t0 + 1];
+
+ t0 += 2;
+ }
+
+ enabled = 1;
+ if (n_tokens > t0 &&
+ (strcmp(tokens[t0], "disabled") == 0)) {
+ enabled = 0;
+
+ t0 += 1;
+ }
+
+ if (n_tokens != t0) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_port_in_create(softnic,
+ pipeline_name,
+ &p,
+ enabled);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port out
+ * bsz <burst_size>
+ * link <link_name> txq <txq_id>
+ * | swq <swq_name>
+ * | tmgr <tmgr_name>
+ * | tap <tap_name>
+ * | sink [file <file_name> pkts <max_n_pkts>]
+ */
+static void
+cmd_pipeline_port_out(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_port_out_params p;
+ char *pipeline_name;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens < 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "out") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out");
+ return;
+ }
+
+ if (strcmp(tokens[4], "bsz") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.burst_size, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "burst_size");
+ return;
+ }
+
+ if (strcmp(tokens[6], "link") == 0) {
+ if (n_tokens != 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out link");
+ return;
+ }
+
+ p.type = PORT_OUT_TXQ;
+
+ p.dev_name = tokens[7];
+
+ if (strcmp(tokens[8], "txq") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "txq");
+ return;
+ }
+
+ if (softnic_parser_read_uint16(&p.txq.queue_id,
+ tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_id");
+ return;
+ }
+ } else if (strcmp(tokens[6], "swq") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out swq");
+ return;
+ }
+
+ p.type = PORT_OUT_SWQ;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "tmgr") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out tmgr");
+ return;
+ }
+
+ p.type = PORT_OUT_TMGR;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "tap") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out tap");
+ return;
+ }
+
+ p.type = PORT_OUT_TAP;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "sink") == 0) {
+ if ((n_tokens != 7) && (n_tokens != 11)) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out sink");
+ return;
+ }
+
+ p.type = PORT_OUT_SINK;
+
+ p.dev_name = NULL;
+
+ if (n_tokens == 7) {
+ p.sink.file_name = NULL;
+ p.sink.max_n_pkts = 0;
+ } else {
+ if (strcmp(tokens[7], "file") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "file");
+ return;
+ }
+
+ p.sink.file_name = tokens[8];
+
+ if (strcmp(tokens[9], "pkts") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pkts");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.sink.max_n_pkts,
+ tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "max_n_pkts");
+ return;
+ }
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_port_out_create(softnic, pipeline_name, &p);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table
+ * match
+ * acl
+ * ipv4 | ipv6
+ * offset <ip_header_offset>
+ * size <n_rules>
+ * | array
+ * offset <key_offset>
+ * size <n_keys>
+ * | hash
+ * ext | lru
+ * key <key_size>
+ * mask <key_mask>
+ * offset <key_offset>
+ * buckets <n_buckets>
+ * size <n_keys>
+ * | lpm
+ * ipv4 | ipv6
+ * offset <ip_header_offset>
+ * size <n_rules>
+ * | stub
+ * [action <table_action_profile_name>]
+ */
+static void
+cmd_pipeline_table(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ uint8_t key_mask[TABLE_RULE_MATCH_SIZE_MAX];
+ struct softnic_table_params p;
+ char *pipeline_name;
+ uint32_t t0;
+ int status;
+
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (strcmp(tokens[3], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match");
+ return;
+ }
+
+ t0 = 4;
+ if (strcmp(tokens[t0], "acl") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table acl");
+ return;
+ }
+
+ p.match_type = TABLE_ACL;
+
+ if (strcmp(tokens[t0 + 1], "ipv4") == 0) {
+ p.match.acl.ip_version = 1;
+ } else if (strcmp(tokens[t0 + 1], "ipv6") == 0) {
+ p.match.acl.ip_version = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.acl.ip_header_offset,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "ip_header_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.acl.n_rules,
+ tokens[t0 + 5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_rules");
+ return;
+ }
+
+ t0 += 6;
+ } else if (strcmp(tokens[t0], "array") == 0) {
+ if (n_tokens < t0 + 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table array");
+ return;
+ }
+
+ p.match_type = TABLE_ARRAY;
+
+ if (strcmp(tokens[t0 + 1], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.array.key_offset,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.array.n_keys,
+ tokens[t0 + 4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_keys");
+ return;
+ }
+
+ t0 += 5;
+ } else if (strcmp(tokens[t0], "hash") == 0) {
+ uint32_t key_mask_size = TABLE_RULE_MATCH_SIZE_MAX;
+
+ if (n_tokens < t0 + 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table hash");
+ return;
+ }
+
+ p.match_type = TABLE_HASH;
+
+ if (strcmp(tokens[t0 + 1], "ext") == 0) {
+ p.match.hash.extendable_bucket = 1;
+ } else if (strcmp(tokens[t0 + 1], "lru") == 0) {
+ p.match.hash.extendable_bucket = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ext or lru");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "key") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key");
+ return;
+ }
+
+ if ((softnic_parser_read_uint32(&p.match.hash.key_size,
+ tokens[t0 + 3]) != 0) ||
+ p.match.hash.key_size == 0 ||
+ p.match.hash.key_size > TABLE_RULE_MATCH_SIZE_MAX) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_size");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ if ((softnic_parse_hex_string(tokens[t0 + 5],
+ key_mask, &key_mask_size) != 0) ||
+ key_mask_size != p.match.hash.key_size) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+ p.match.hash.key_mask = key_mask;
+
+ if (strcmp(tokens[t0 + 6], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.hash.key_offset,
+ tokens[t0 + 7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 8], "buckets") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buckets");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.hash.n_buckets,
+ tokens[t0 + 9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_buckets");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 10], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.hash.n_keys,
+ tokens[t0 + 11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_keys");
+ return;
+ }
+
+ t0 += 12;
+ } else if (strcmp(tokens[t0], "lpm") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table lpm");
+ return;
+ }
+
+ p.match_type = TABLE_LPM;
+
+ if (strcmp(tokens[t0 + 1], "ipv4") == 0) {
+ p.match.lpm.key_size = 4;
+ } else if (strcmp(tokens[t0 + 1], "ipv6") == 0) {
+ p.match.lpm.key_size = 16;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.lpm.key_offset,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.lpm.n_rules,
+ tokens[t0 + 5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_rules");
+ return;
+ }
+
+ t0 += 6;
+ } else if (strcmp(tokens[t0], "stub") == 0) {
+ p.match_type = TABLE_STUB;
+
+ t0 += 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ p.action_profile_name = NULL;
+ if (n_tokens > t0 &&
+ (strcmp(tokens[t0], "action") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "action");
+ return;
+ }
+
+ p.action_profile_name = tokens[t0 + 1];
+
+ t0 += 2;
+ }
+
+ if (n_tokens > t0) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_table_create(softnic, pipeline_name, &p);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port in <port_id> table <table_id>
+ */
+static void
+cmd_pipeline_port_in_table(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t port_id, table_id;
+ int status;
+
+ if (n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ status = softnic_pipeline_port_in_connect_to_table(softnic,
+ pipeline_name,
+ port_id,
+ table_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port in <port_id> stats read [clear]
+ */
+
+#define MSG_PIPELINE_PORT_IN_STATS \
+ "Pkts in: %" PRIu64 "\n" \
+ "Pkts dropped by AH: %" PRIu64 "\n" \
+ "Pkts dropped by other: %" PRIu64 "\n"
+
+static void
+cmd_pipeline_port_in_stats(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_pipeline_port_in_stats stats;
+ char *pipeline_name;
+ uint32_t port_id;
+ int clear, status;
+
+ if (n_tokens != 7 &&
+ n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[6], "read") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read");
+ return;
+ }
+
+ clear = 0;
+ if (n_tokens == 8) {
+ if (strcmp(tokens[7], "clear") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "clear");
+ return;
+ }
+
+ clear = 1;
+ }
+
+ status = softnic_pipeline_port_in_stats_read(softnic,
+ pipeline_name,
+ port_id,
+ &stats,
+ clear);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+
+ snprintf(out, out_size, MSG_PIPELINE_PORT_IN_STATS,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+/**
+ * pipeline <pipeline_name> port in <port_id> enable
+ */
+static void
+cmd_softnic_pipeline_port_in_enable(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t port_id;
+ int status;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "enable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable");
+ return;
+ }
+
+ status = softnic_pipeline_port_in_enable(softnic, pipeline_name, port_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port in <port_id> disable
+ */
+static void
+cmd_softnic_pipeline_port_in_disable(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t port_id;
+ int status;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "disable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable");
+ return;
+ }
+
+ status = softnic_pipeline_port_in_disable(softnic, pipeline_name, port_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port out <port_id> stats read [clear]
+ */
+#define MSG_PIPELINE_PORT_OUT_STATS \
+ "Pkts in: %" PRIu64 "\n" \
+ "Pkts dropped by AH: %" PRIu64 "\n" \
+ "Pkts dropped by other: %" PRIu64 "\n"
+
+static void
+cmd_pipeline_port_out_stats(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_pipeline_port_out_stats stats;
+ char *pipeline_name;
+ uint32_t port_id;
+ int clear, status;
+
+ if (n_tokens != 7 &&
+ n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "out") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[6], "read") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read");
+ return;
+ }
+
+ clear = 0;
+ if (n_tokens == 8) {
+ if (strcmp(tokens[7], "clear") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "clear");
+ return;
+ }
+
+ clear = 1;
+ }
+
+ status = softnic_pipeline_port_out_stats_read(softnic,
+ pipeline_name,
+ port_id,
+ &stats,
+ clear);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+
+ snprintf(out, out_size, MSG_PIPELINE_PORT_OUT_STATS,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> stats read [clear]
+ */
+#define MSG_PIPELINE_TABLE_STATS \
+ "Pkts in: %" PRIu64 "\n" \
+ "Pkts in with lookup miss: %" PRIu64 "\n" \
+ "Pkts in with lookup hit dropped by AH: %" PRIu64 "\n" \
+ "Pkts in with lookup hit dropped by others: %" PRIu64 "\n" \
+ "Pkts in with lookup miss dropped by AH: %" PRIu64 "\n" \
+ "Pkts in with lookup miss dropped by others: %" PRIu64 "\n"
+
+static void
+cmd_pipeline_table_stats(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_pipeline_table_stats stats;
+ char *pipeline_name;
+ uint32_t table_id;
+ int clear, status;
+
+ if (n_tokens != 6 &&
+ n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[5], "read") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read");
+ return;
+ }
+
+ clear = 0;
+ if (n_tokens == 7) {
+ if (strcmp(tokens[6], "clear") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "clear");
+ return;
+ }
+
+ clear = 1;
+ }
+
+ status = softnic_pipeline_table_stats_read(softnic,
+ pipeline_name,
+ table_id,
+ &stats,
+ clear);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+
+ snprintf(out, out_size, MSG_PIPELINE_TABLE_STATS,
+ stats.stats.n_pkts_in,
+ stats.stats.n_pkts_lookup_miss,
+ stats.n_pkts_dropped_by_lkp_hit_ah,
+ stats.n_pkts_dropped_lkp_hit,
+ stats.n_pkts_dropped_by_lkp_miss_ah,
+ stats.n_pkts_dropped_lkp_miss);
+}
+
+/**
+ * <match> ::=
+ *
+ * match
+ * acl
+ * priority <priority>
+ * ipv4 | ipv6 <sa> <sa_depth> <da> <da_depth>
+ * <sp0> <sp1> <dp0> <dp1> <proto>
+ * | array <pos>
+ * | hash
+ * raw <key>
+ * | ipv4_5tuple <sa> <da> <sp> <dp> <proto>
+ * | ipv6_5tuple <sa> <da> <sp> <dp> <proto>
+ * | ipv4_addr <addr>
+ * | ipv6_addr <addr>
+ * | qinq <svlan> <cvlan>
+ * | lpm
+ * ipv4 | ipv6 <addr> <depth>
+ */
+struct pkt_key_qinq {
+ uint16_t ethertype_svlan;
+ uint16_t svlan;
+ uint16_t ethertype_cvlan;
+ uint16_t cvlan;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv4_5tuple {
+ uint8_t time_to_live;
+ uint8_t proto;
+ uint16_t hdr_checksum;
+ uint32_t sa;
+ uint32_t da;
+ uint16_t sp;
+ uint16_t dp;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv6_5tuple {
+ uint16_t payload_length;
+ uint8_t proto;
+ uint8_t hop_limit;
+ uint8_t sa[16];
+ uint8_t da[16];
+ uint16_t sp;
+ uint16_t dp;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv4_addr {
+ uint32_t addr;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv6_addr {
+ uint8_t addr[16];
+} __attribute__((__packed__));
+
+static uint32_t
+parse_match(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size,
+ struct softnic_table_rule_match *m)
+{
+ memset(m, 0, sizeof(*m));
+
+ if (n_tokens < 2)
+ return 0;
+
+ if (strcmp(tokens[0], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match");
+ return 0;
+ }
+
+ if (strcmp(tokens[1], "acl") == 0) {
+ if (n_tokens < 14) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_ACL;
+
+ if (strcmp(tokens[2], "priority") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint32(&m->match.acl.priority,
+ tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "priority");
+ return 0;
+ }
+
+ if (strcmp(tokens[4], "ipv4") == 0) {
+ struct in_addr saddr, daddr;
+
+ m->match.acl.ip_version = 1;
+
+ if (softnic_parse_ipv4_addr(tokens[5], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+ m->match.acl.ipv4.sa = rte_be_to_cpu_32(saddr.s_addr);
+
+ if (softnic_parse_ipv4_addr(tokens[7], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+ m->match.acl.ipv4.da = rte_be_to_cpu_32(daddr.s_addr);
+ } else if (strcmp(tokens[4], "ipv6") == 0) {
+ struct in6_addr saddr, daddr;
+
+ m->match.acl.ip_version = 0;
+
+ if (softnic_parse_ipv6_addr(tokens[5], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+ memcpy(m->match.acl.ipv6.sa, saddr.s6_addr, 16);
+
+ if (softnic_parse_ipv6_addr(tokens[7], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+ memcpy(m->match.acl.ipv6.da, daddr.s6_addr, 16);
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ipv4 or ipv6");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint32(&m->match.acl.sa_depth,
+ tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa_depth");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint32(&m->match.acl.da_depth,
+ tokens[8]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da_depth");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&m->match.acl.sp0, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp0");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&m->match.acl.sp1, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp1");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&m->match.acl.dp0, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp0");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&m->match.acl.dp1, tokens[12]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp1");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint8(&m->match.acl.proto, tokens[13]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "proto");
+ return 0;
+ }
+
+ m->match.acl.proto_mask = 0xff;
+
+ return 14;
+ } /* acl */
+
+ if (strcmp(tokens[1], "array") == 0) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_ARRAY;
+
+ if (softnic_parser_read_uint32(&m->match.array.pos, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pos");
+ return 0;
+ }
+
+ return 3;
+ } /* array */
+
+ if (strcmp(tokens[1], "hash") == 0) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_HASH;
+
+ if (strcmp(tokens[2], "raw") == 0) {
+ uint32_t key_size = TABLE_RULE_MATCH_SIZE_MAX;
+
+ if (n_tokens < 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_hex_string(tokens[3],
+ m->match.hash.key, &key_size) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key");
+ return 0;
+ }
+
+ return 4;
+ } /* hash raw */
+
+ if (strcmp(tokens[2], "ipv4_5tuple") == 0) {
+ struct pkt_key_ipv4_5tuple *ipv4 =
+ (struct pkt_key_ipv4_5tuple *)m->match.hash.key;
+ struct in_addr saddr, daddr;
+ uint16_t sp, dp;
+ uint8_t proto;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_ipv4_addr(tokens[3], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+
+ if (softnic_parse_ipv4_addr(tokens[4], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&sp, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&dp, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint8(&proto, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "proto");
+ return 0;
+ }
+
+ ipv4->sa = saddr.s_addr;
+ ipv4->da = daddr.s_addr;
+ ipv4->sp = rte_cpu_to_be_16(sp);
+ ipv4->dp = rte_cpu_to_be_16(dp);
+ ipv4->proto = proto;
+
+ return 8;
+ } /* hash ipv4_5tuple */
+
+ if (strcmp(tokens[2], "ipv6_5tuple") == 0) {
+ struct pkt_key_ipv6_5tuple *ipv6 =
+ (struct pkt_key_ipv6_5tuple *)m->match.hash.key;
+ struct in6_addr saddr, daddr;
+ uint16_t sp, dp;
+ uint8_t proto;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_ipv6_addr(tokens[3], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+
+ if (softnic_parse_ipv6_addr(tokens[4], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&sp, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&dp, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint8(&proto, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "proto");
+ return 0;
+ }
+
+ memcpy(ipv6->sa, saddr.s6_addr, 16);
+ memcpy(ipv6->da, daddr.s6_addr, 16);
+ ipv6->sp = rte_cpu_to_be_16(sp);
+ ipv6->dp = rte_cpu_to_be_16(dp);
+ ipv6->proto = proto;
+
+ return 8;
+ } /* hash ipv6_5tuple */
+
+ if (strcmp(tokens[2], "ipv4_addr") == 0) {
+ struct pkt_key_ipv4_addr *ipv4_addr =
+ (struct pkt_key_ipv4_addr *)m->match.hash.key;
+ struct in_addr addr;
+
+ if (n_tokens < 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_ipv4_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ ipv4_addr->addr = addr.s_addr;
+
+ return 4;
+ } /* hash ipv4_addr */
+
+ if (strcmp(tokens[2], "ipv6_addr") == 0) {
+ struct pkt_key_ipv6_addr *ipv6_addr =
+ (struct pkt_key_ipv6_addr *)m->match.hash.key;
+ struct in6_addr addr;
+
+ if (n_tokens < 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_ipv6_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ memcpy(ipv6_addr->addr, addr.s6_addr, 16);
+
+ return 4;
+ } /* hash ipv6_5tuple */
+
+ if (strcmp(tokens[2], "qinq") == 0) {
+ struct pkt_key_qinq *qinq =
+ (struct pkt_key_qinq *)m->match.hash.key;
+ uint16_t svlan, cvlan;
+
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if ((softnic_parser_read_uint16(&svlan, tokens[3]) != 0) ||
+ svlan > 0xFFF) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "svlan");
+ return 0;
+ }
+
+ if ((softnic_parser_read_uint16(&cvlan, tokens[4]) != 0) ||
+ cvlan > 0xFFF) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "cvlan");
+ return 0;
+ }
+
+ qinq->svlan = rte_cpu_to_be_16(svlan);
+ qinq->cvlan = rte_cpu_to_be_16(cvlan);
+
+ return 5;
+ } /* hash qinq */
+
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ } /* hash */
+
+ if (strcmp(tokens[1], "lpm") == 0) {
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_LPM;
+
+ if (strcmp(tokens[2], "ipv4") == 0) {
+ struct in_addr addr;
+
+ m->match.lpm.ip_version = 1;
+
+ if (softnic_parse_ipv4_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ m->match.lpm.ipv4 = rte_be_to_cpu_32(addr.s_addr);
+ } else if (strcmp(tokens[2], "ipv6") == 0) {
+ struct in6_addr addr;
+
+ m->match.lpm.ip_version = 0;
+
+ if (softnic_parse_ipv6_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ memcpy(m->match.lpm.ipv6, addr.s6_addr, 16);
+ } else {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "ipv4 or ipv6");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint8(&m->match.lpm.depth, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "depth");
+ return 0;
+ }
+
+ return 5;
+ } /* lpm */
+
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "acl or array or hash or lpm");
+ return 0;
+}
+
+/**
+ * table_action ::=
+ *
+ * action
+ * fwd
+ * drop
+ * | port <port_id>
+ * | meta
+ * | table <table_id>
+ * [balance <out0> ... <out7>]
+ * [meter
+ * tc0 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ * [tc1 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ * tc2 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ * tc3 meter <meter_profile_id> policer g <pa> y <pa> r <pa>]]
+ * [tm subport <subport_id> pipe <pipe_id>]
+ * [encap
+ * ether <da> <sa>
+ * | vlan <da> <sa> <pcp> <dei> <vid>
+ * | qinq <da> <sa> <pcp> <dei> <vid> <pcp> <dei> <vid>
+ * | mpls unicast | multicast
+ * <da> <sa>
+ * label0 <label> <tc> <ttl>
+ * [label1 <label> <tc> <ttl>
+ * [label2 <label> <tc> <ttl>
+ * [label3 <label> <tc> <ttl>]]]
+ * | pppoe <da> <sa> <session_id>]
+ * [nat ipv4 | ipv6 <addr> <port>]
+ * [ttl dec | keep]
+ * [stats]
+ * [time]
+ *
+ * where:
+ * <pa> ::= g | y | r | drop
+ */
+static uint32_t
+parse_table_action_fwd(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens == 0 ||
+ (strcmp(tokens[0], "fwd") != 0))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens && (strcmp(tokens[0], "drop") == 0)) {
+ a->fwd.action = RTE_PIPELINE_ACTION_DROP;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 1;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "port") == 0)) {
+ uint32_t id;
+
+ if (n_tokens < 2 ||
+ softnic_parser_read_uint32(&id, tokens[1]))
+ return 0;
+
+ a->fwd.action = RTE_PIPELINE_ACTION_PORT;
+ a->fwd.id = id;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 2;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "meta") == 0)) {
+ a->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 1;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "table") == 0)) {
+ uint32_t id;
+
+ if (n_tokens < 2 ||
+ softnic_parser_read_uint32(&id, tokens[1]))
+ return 0;
+
+ a->fwd.action = RTE_PIPELINE_ACTION_TABLE;
+ a->fwd.id = id;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 2;
+ }
+
+ return 0;
+}
+
+static uint32_t
+parse_table_action_balance(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ uint32_t i;
+
+ if (n_tokens == 0 ||
+ (strcmp(tokens[0], "balance") != 0))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens < RTE_TABLE_ACTION_LB_TABLE_SIZE)
+ return 0;
+
+ for (i = 0; i < RTE_TABLE_ACTION_LB_TABLE_SIZE; i++)
+ if (softnic_parser_read_uint32(&a->lb.out[i], tokens[i]) != 0)
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_LB;
+ return 1 + RTE_TABLE_ACTION_LB_TABLE_SIZE;
+}
+
+static int
+parse_policer_action(char *token, enum rte_table_action_policer *a)
+{
+ if (strcmp(token, "g") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_COLOR_GREEN;
+ return 0;
+ }
+
+ if (strcmp(token, "y") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_COLOR_YELLOW;
+ return 0;
+ }
+
+ if (strcmp(token, "r") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_COLOR_RED;
+ return 0;
+ }
+
+ if (strcmp(token, "drop") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_DROP;
+ return 0;
+ }
+
+ return -1;
+}
+
+static uint32_t
+parse_table_action_meter_tc(char **tokens,
+ uint32_t n_tokens,
+ struct rte_table_action_mtr_tc_params *mtr)
+{
+ if (n_tokens < 9 ||
+ strcmp(tokens[0], "meter") ||
+ softnic_parser_read_uint32(&mtr->meter_profile_id, tokens[1]) ||
+ strcmp(tokens[2], "policer") ||
+ strcmp(tokens[3], "g") ||
+ parse_policer_action(tokens[4], &mtr->policer[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[5], "y") ||
+ parse_policer_action(tokens[6], &mtr->policer[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[7], "r") ||
+ parse_policer_action(tokens[8], &mtr->policer[e_RTE_METER_RED]))
+ return 0;
+
+ return 9;
+}
+
+static uint32_t
+parse_table_action_meter(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "meter"))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens < 10 ||
+ strcmp(tokens[0], "tc0") ||
+ (parse_table_action_meter_tc(tokens + 1,
+ n_tokens - 1,
+ &a->mtr.mtr[0]) == 0))
+ return 0;
+
+ tokens += 10;
+ n_tokens -= 10;
+
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "tc1")) {
+ a->mtr.tc_mask = 1;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
+ return 1 + 10;
+ }
+
+ if (n_tokens < 30 ||
+ (parse_table_action_meter_tc(tokens + 1,
+ n_tokens - 1, &a->mtr.mtr[1]) == 0) ||
+ strcmp(tokens[10], "tc2") ||
+ (parse_table_action_meter_tc(tokens + 11,
+ n_tokens - 11, &a->mtr.mtr[2]) == 0) ||
+ strcmp(tokens[20], "tc3") ||
+ (parse_table_action_meter_tc(tokens + 21,
+ n_tokens - 21, &a->mtr.mtr[3]) == 0))
+ return 0;
+
+ a->mtr.tc_mask = 0xF;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
+ return 1 + 10 + 3 * 10;
+}
+
+static uint32_t
+parse_table_action_tm(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ uint32_t subport_id, pipe_id;
+
+ if (n_tokens < 5 ||
+ strcmp(tokens[0], "tm") ||
+ strcmp(tokens[1], "subport") ||
+ softnic_parser_read_uint32(&subport_id, tokens[2]) ||
+ strcmp(tokens[3], "pipe") ||
+ softnic_parser_read_uint32(&pipe_id, tokens[4]))
+ return 0;
+
+ a->tm.subport_id = subport_id;
+ a->tm.pipe_id = pipe_id;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TM;
+ return 5;
+}
+
+static uint32_t
+parse_table_action_encap(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "encap"))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ /* ether */
+ if (n_tokens && (strcmp(tokens[0], "ether") == 0)) {
+ if (n_tokens < 3 ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.ether.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.ether.ether.sa))
+ return 0;
+
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_ETHER;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 3;
+ }
+
+ /* vlan */
+ if (n_tokens && (strcmp(tokens[0], "vlan") == 0)) {
+ uint32_t pcp, dei, vid;
+
+ if (n_tokens < 6 ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.vlan.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.vlan.ether.sa) ||
+ softnic_parser_read_uint32(&pcp, tokens[3]) ||
+ pcp > 0x7 ||
+ softnic_parser_read_uint32(&dei, tokens[4]) ||
+ dei > 0x1 ||
+ softnic_parser_read_uint32(&vid, tokens[5]) ||
+ vid > 0xFFF)
+ return 0;
+
+ a->encap.vlan.vlan.pcp = pcp & 0x7;
+ a->encap.vlan.vlan.dei = dei & 0x1;
+ a->encap.vlan.vlan.vid = vid & 0xFFF;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_VLAN;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 6;
+ }
+
+ /* qinq */
+ if (n_tokens && (strcmp(tokens[0], "qinq") == 0)) {
+ uint32_t svlan_pcp, svlan_dei, svlan_vid;
+ uint32_t cvlan_pcp, cvlan_dei, cvlan_vid;
+
+ if (n_tokens < 9 ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.qinq.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.qinq.ether.sa) ||
+ softnic_parser_read_uint32(&svlan_pcp, tokens[3]) ||
+ svlan_pcp > 0x7 ||
+ softnic_parser_read_uint32(&svlan_dei, tokens[4]) ||
+ svlan_dei > 0x1 ||
+ softnic_parser_read_uint32(&svlan_vid, tokens[5]) ||
+ svlan_vid > 0xFFF ||
+ softnic_parser_read_uint32(&cvlan_pcp, tokens[6]) ||
+ cvlan_pcp > 0x7 ||
+ softnic_parser_read_uint32(&cvlan_dei, tokens[7]) ||
+ cvlan_dei > 0x1 ||
+ softnic_parser_read_uint32(&cvlan_vid, tokens[8]) ||
+ cvlan_vid > 0xFFF)
+ return 0;
+
+ a->encap.qinq.svlan.pcp = svlan_pcp & 0x7;
+ a->encap.qinq.svlan.dei = svlan_dei & 0x1;
+ a->encap.qinq.svlan.vid = svlan_vid & 0xFFF;
+ a->encap.qinq.cvlan.pcp = cvlan_pcp & 0x7;
+ a->encap.qinq.cvlan.dei = cvlan_dei & 0x1;
+ a->encap.qinq.cvlan.vid = cvlan_vid & 0xFFF;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_QINQ;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 9;
+ }
+
+ /* mpls */
+ if (n_tokens && (strcmp(tokens[0], "mpls") == 0)) {
+ uint32_t label, tc, ttl;
+
+ if (n_tokens < 8)
+ return 0;
+
+ if (strcmp(tokens[1], "unicast") == 0)
+ a->encap.mpls.unicast = 1;
+ else if (strcmp(tokens[1], "multicast") == 0)
+ a->encap.mpls.unicast = 0;
+ else
+ return 0;
+
+ if (softnic_parse_mac_addr(tokens[2], &a->encap.mpls.ether.da) ||
+ softnic_parse_mac_addr(tokens[3], &a->encap.mpls.ether.sa) ||
+ strcmp(tokens[4], "label0") ||
+ softnic_parser_read_uint32(&label, tokens[5]) ||
+ label > 0xFFFFF ||
+ softnic_parser_read_uint32(&tc, tokens[6]) ||
+ tc > 0x7 ||
+ softnic_parser_read_uint32(&ttl, tokens[7]) ||
+ ttl > 0x3F)
+ return 0;
+
+ a->encap.mpls.mpls[0].label = label;
+ a->encap.mpls.mpls[0].tc = tc;
+ a->encap.mpls.mpls[0].ttl = ttl;
+
+ tokens += 8;
+ n_tokens -= 8;
+
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "label1")) {
+ a->encap.mpls.mpls_count = 1;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8;
+ }
+
+ if (n_tokens < 4 ||
+ softnic_parser_read_uint32(&label, tokens[1]) ||
+ label > 0xFFFFF ||
+ softnic_parser_read_uint32(&tc, tokens[2]) ||
+ tc > 0x7 ||
+ softnic_parser_read_uint32(&ttl, tokens[3]) ||
+ ttl > 0x3F)
+ return 0;
+
+ a->encap.mpls.mpls[1].label = label;
+ a->encap.mpls.mpls[1].tc = tc;
+ a->encap.mpls.mpls[1].ttl = ttl;
+
+ tokens += 4;
+ n_tokens -= 4;
+
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "label2")) {
+ a->encap.mpls.mpls_count = 2;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8 + 4;
+ }
+
+ if (n_tokens < 4 ||
+ softnic_parser_read_uint32(&label, tokens[1]) ||
+ label > 0xFFFFF ||
+ softnic_parser_read_uint32(&tc, tokens[2]) ||
+ tc > 0x7 ||
+ softnic_parser_read_uint32(&ttl, tokens[3]) ||
+ ttl > 0x3F)
+ return 0;
+
+ a->encap.mpls.mpls[2].label = label;
+ a->encap.mpls.mpls[2].tc = tc;
+ a->encap.mpls.mpls[2].ttl = ttl;
+
+ tokens += 4;
+ n_tokens -= 4;
+
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "label3")) {
+ a->encap.mpls.mpls_count = 3;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8 + 4 + 4;
+ }
+
+ if (n_tokens < 4 ||
+ softnic_parser_read_uint32(&label, tokens[1]) ||
+ label > 0xFFFFF ||
+ softnic_parser_read_uint32(&tc, tokens[2]) ||
+ tc > 0x7 ||
+ softnic_parser_read_uint32(&ttl, tokens[3]) ||
+ ttl > 0x3F)
+ return 0;
+
+ a->encap.mpls.mpls[3].label = label;
+ a->encap.mpls.mpls[3].tc = tc;
+ a->encap.mpls.mpls[3].ttl = ttl;
+
+ a->encap.mpls.mpls_count = 4;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8 + 4 + 4 + 4;
+ }
+
+ /* pppoe */
+ if (n_tokens && (strcmp(tokens[0], "pppoe") == 0)) {
+ if (n_tokens < 4 ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.pppoe.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.pppoe.ether.sa) ||
+ softnic_parser_read_uint16(&a->encap.pppoe.pppoe.session_id,
+ tokens[3]))
+ return 0;
+
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_PPPOE;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 4;
+ }
+
+ return 0;
+}
+
+static uint32_t
+parse_table_action_nat(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 4 ||
+ strcmp(tokens[0], "nat"))
+ return 0;
+
+ if (strcmp(tokens[1], "ipv4") == 0) {
+ struct in_addr addr;
+ uint16_t port;
+
+ if (softnic_parse_ipv4_addr(tokens[2], &addr) ||
+ softnic_parser_read_uint16(&port, tokens[3]))
+ return 0;
+
+ a->nat.ip_version = 1;
+ a->nat.addr.ipv4 = rte_be_to_cpu_32(addr.s_addr);
+ a->nat.port = port;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_NAT;
+ return 4;
+ }
+
+ if (strcmp(tokens[1], "ipv6") == 0) {
+ struct in6_addr addr;
+ uint16_t port;
+
+ if (softnic_parse_ipv6_addr(tokens[2], &addr) ||
+ softnic_parser_read_uint16(&port, tokens[3]))
+ return 0;
+
+ a->nat.ip_version = 0;
+ memcpy(a->nat.addr.ipv6, addr.s6_addr, 16);
+ a->nat.port = port;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_NAT;
+ return 4;
+ }
+
+ return 0;
+}
+
+static uint32_t
+parse_table_action_ttl(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 2 ||
+ strcmp(tokens[0], "ttl"))
+ return 0;
+
+ if (strcmp(tokens[1], "dec") == 0)
+ a->ttl.decrement = 1;
+ else if (strcmp(tokens[1], "keep") == 0)
+ a->ttl.decrement = 0;
+ else
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TTL;
+ return 2;
+}
+
+static uint32_t
+parse_table_action_stats(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 1 ||
+ strcmp(tokens[0], "stats"))
+ return 0;
+
+ a->stats.n_packets = 0;
+ a->stats.n_bytes = 0;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
+ return 1;
+}
+
+static uint32_t
+parse_table_action_time(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 1 ||
+ strcmp(tokens[0], "time"))
+ return 0;
+
+ a->time.time = rte_rdtsc();
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TIME;
+ return 1;
+}
+
+static uint32_t
+parse_table_action(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size,
+ struct softnic_table_rule_action *a)
+{
+ uint32_t n_tokens0 = n_tokens;
+
+ memset(a, 0, sizeof(*a));
+
+ if (n_tokens < 2 ||
+ strcmp(tokens[0], "action"))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens && (strcmp(tokens[0], "fwd") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_fwd(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action fwd");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "balance") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_balance(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action balance");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "meter") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_meter(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action meter");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "tm") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_tm(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action tm");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "encap") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_encap(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action encap");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "nat") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_nat(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action nat");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "ttl") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_ttl(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action ttl");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "stats") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_stats(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action stats");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "time") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_time(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action time");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens0 - n_tokens == 1) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "action");
+ return 0;
+ }
+
+ return n_tokens0 - n_tokens;
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule add
+ * match <match>
+ * action <table_action>
+ */
+static void
+cmd_softnic_pipeline_table_rule_add(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_rule_match m;
+ struct softnic_table_rule_action a;
+ char *pipeline_name;
+ void *data;
+ uint32_t table_id, t0, n_tokens_parsed;
+ int status;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ t0 = 6;
+
+ /* match */
+ n_tokens_parsed = parse_match(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &m);
+ if (n_tokens_parsed == 0)
+ return;
+ t0 += n_tokens_parsed;
+
+ /* action */
+ n_tokens_parsed = parse_table_action(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &a);
+ if (n_tokens_parsed == 0)
+ return;
+ t0 += n_tokens_parsed;
+
+ if (t0 != n_tokens) {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_table_rule_add(softnic,
+ pipeline_name,
+ table_id,
+ &m,
+ &a,
+ &data);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule add
+ * match
+ * default
+ * action
+ * fwd
+ * drop
+ * | port <port_id>
+ * | meta
+ * | table <table_id>
+ */
+static void
+cmd_softnic_pipeline_table_rule_add_default(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_rule_action action;
+ void *data;
+ char *pipeline_name;
+ uint32_t table_id;
+ int status;
+
+ if (n_tokens != 11 &&
+ n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ if (strcmp(tokens[6], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "match");
+ return;
+ }
+
+ if (strcmp(tokens[7], "default") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "default");
+ return;
+ }
+
+ if (strcmp(tokens[8], "action") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "action");
+ return;
+ }
+
+ if (strcmp(tokens[9], "fwd") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "fwd");
+ return;
+ }
+
+ action.action_mask = 1 << RTE_TABLE_ACTION_FWD;
+
+ if (strcmp(tokens[10], "drop") == 0) {
+ if (n_tokens != 11) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_DROP;
+ } else if (strcmp(tokens[10], "port") == 0) {
+ uint32_t id;
+
+ if (n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&id, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_PORT;
+ action.fwd.id = id;
+ } else if (strcmp(tokens[10], "meta") == 0) {
+ if (n_tokens != 11) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_PORT_META;
+ } else if (strcmp(tokens[10], "table") == 0) {
+ uint32_t id;
+
+ if (n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&id, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_TABLE;
+ action.fwd.id = id;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "drop or port or meta or table");
+ return;
+ }
+
+ status = softnic_pipeline_table_rule_add_default(softnic,
+ pipeline_name,
+ table_id,
+ &action,
+ &data);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule add bulk <file_name> <n_rules>
+ *
+ * File <file_name>:
+ * - line format: match <match> action <action>
+ */
+static int
+cli_rule_file_process(const char *file_name,
+ size_t line_len_max,
+ struct softnic_table_rule_match *m,
+ struct softnic_table_rule_action *a,
+ uint32_t *n_rules,
+ uint32_t *line_number,
+ char *out,
+ size_t out_size);
+
+static void
+cmd_softnic_pipeline_table_rule_add_bulk(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_rule_match *match;
+ struct softnic_table_rule_action *action;
+ void **data;
+ char *pipeline_name, *file_name;
+ uint32_t table_id, n_rules, n_rules_parsed, line_number;
+ int status;
+
+ if (n_tokens != 9) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ if (strcmp(tokens[6], "bulk") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "bulk");
+ return;
+ }
+
+ file_name = tokens[7];
+
+ if ((softnic_parser_read_uint32(&n_rules, tokens[8]) != 0) ||
+ n_rules == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_rules");
+ return;
+ }
+
+ /* Memory allocation. */
+ match = calloc(n_rules, sizeof(struct softnic_table_rule_match));
+ action = calloc(n_rules, sizeof(struct softnic_table_rule_action));
+ data = calloc(n_rules, sizeof(void *));
+ if (match == NULL ||
+ action == NULL ||
+ data == NULL) {
+ snprintf(out, out_size, MSG_OUT_OF_MEMORY);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+
+ /* Load rule file */
+ n_rules_parsed = n_rules;
+ status = cli_rule_file_process(file_name,
+ 1024,
+ match,
+ action,
+ &n_rules_parsed,
+ &line_number,
+ out,
+ out_size);
+ if (status) {
+ snprintf(out, out_size, MSG_FILE_ERR, file_name, line_number);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+ if (n_rules_parsed != n_rules) {
+ snprintf(out, out_size, MSG_FILE_NOT_ENOUGH, file_name);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+
+ /* Rule bulk add */
+ status = softnic_pipeline_table_rule_add_bulk(softnic,
+ pipeline_name,
+ table_id,
+ match,
+ action,
+ data,
+ &n_rules);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+
+ /* Memory free */
+ free(data);
+ free(action);
+ free(match);
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule delete
+ * match <match>
+ */
+static void
+cmd_softnic_pipeline_table_rule_delete(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_rule_match m;
+ char *pipeline_name;
+ uint32_t table_id, n_tokens_parsed, t0;
+ int status;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "delete") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete");
+ return;
+ }
+
+ t0 = 6;
+
+ /* match */
+ n_tokens_parsed = parse_match(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &m);
+ if (n_tokens_parsed == 0)
+ return;
+ t0 += n_tokens_parsed;
+
+ if (n_tokens != t0) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_table_rule_delete(softnic,
+ pipeline_name,
+ table_id,
+ &m);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule delete
+ * match
+ * default
+ */
+static void
+cmd_softnic_pipeline_table_rule_delete_default(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t table_id;
+ int status;
+
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "delete") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete");
+ return;
+ }
+
+ if (strcmp(tokens[6], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "match");
+ return;
+ }
+
+ if (strcmp(tokens[7], "default") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "default");
+ return;
+ }
+
+ status = softnic_pipeline_table_rule_delete_default(softnic,
+ pipeline_name,
+ table_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule read stats [clear]
+ */
+static void
+cmd_softnic_pipeline_table_rule_stats_read(struct pmd_internals *softnic __rte_unused,
+ char **tokens,
+ uint32_t n_tokens __rte_unused,
+ char *out,
+ size_t out_size)
+{
+ snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]);
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> meter profile <meter_profile_id>
+ * add srtcm cir <cir> cbs <cbs> ebs <ebs>
+ * | trtcm cir <cir> pir <pir> cbs <cbs> pbs <pbs>
+ */
+static void
+cmd_pipeline_table_meter_profile_add(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_table_action_meter_profile p;
+ char *pipeline_name;
+ uint32_t table_id, meter_profile_id;
+ int status;
+
+ if (n_tokens < 9) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "meter") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "meter");
+ return;
+ }
+
+ if (strcmp(tokens[5], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&meter_profile_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "meter_profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[7], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ if (strcmp(tokens[8], "srtcm") == 0) {
+ if (n_tokens != 15) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return;
+ }
+
+ p.alg = RTE_TABLE_ACTION_METER_SRTCM;
+
+ if (strcmp(tokens[9], "cir") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cir");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.srtcm.cir, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cir");
+ return;
+ }
+
+ if (strcmp(tokens[11], "cbs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cbs");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.srtcm.cbs, tokens[12]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cbs");
+ return;
+ }
+
+ if (strcmp(tokens[13], "ebs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "ebs");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.srtcm.ebs, tokens[14]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "ebs");
+ return;
+ }
+ } else if (strcmp(tokens[8], "trtcm") == 0) {
+ if (n_tokens != 17) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ p.alg = RTE_TABLE_ACTION_METER_TRTCM;
+
+ if (strcmp(tokens[9], "cir") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cir");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.trtcm.cir, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cir");
+ return;
+ }
+
+ if (strcmp(tokens[11], "pir") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pir");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.trtcm.pir, tokens[12]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pir");
+ return;
+ }
+ if (strcmp(tokens[13], "cbs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cbs");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.trtcm.cbs, tokens[14]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cbs");
+ return;
+ }
+
+ if (strcmp(tokens[15], "pbs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pbs");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.trtcm.pbs, tokens[16]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pbs");
+ return;
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_table_mtr_profile_add(softnic,
+ pipeline_name,
+ table_id,
+ meter_profile_id,
+ &p);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id>
+ * meter profile <meter_profile_id> delete
+ */
+static void
+cmd_pipeline_table_meter_profile_delete(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t table_id, meter_profile_id;
+ int status;
+
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "meter") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "meter");
+ return;
+ }
+
+ if (strcmp(tokens[5], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&meter_profile_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "meter_profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[7], "delete") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete");
+ return;
+ }
+
+ status = softnic_pipeline_table_mtr_profile_delete(softnic,
+ pipeline_name,
+ table_id,
+ meter_profile_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule read meter [clear]
+ */
+static void
+cmd_pipeline_table_rule_meter_read(struct pmd_internals *softnic __rte_unused,
+ char **tokens,
+ uint32_t n_tokens __rte_unused,
+ char *out,
+ size_t out_size)
+{
+ snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]);
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> dscp <file_name>
+ *
+ * File <file_name>:
+ * - exactly 64 lines
+ * - line format: <tc_id> <tc_queue_id> <color>, with <color> as: g | y | r
+ */
+static int
+load_dscp_table(struct rte_table_action_dscp_table *dscp_table,
+ const char *file_name,
+ uint32_t *line_number)
+{
+ FILE *f = NULL;
+ uint32_t dscp, l;
+
+ /* Check input arguments */
+ if (dscp_table == NULL ||
+ file_name == NULL ||
+ line_number == NULL) {
+ if (line_number)
+ *line_number = 0;
+ return -EINVAL;
+ }
+
+ /* Open input file */
+ f = fopen(file_name, "r");
+ if (f == NULL) {
+ *line_number = 0;
+ return -EINVAL;
+ }
+
+ /* Read file */
+ for (dscp = 0, l = 1; ; l++) {
+ char line[64];
+ char *tokens[3];
+ enum rte_meter_color color;
+ uint32_t tc_id, tc_queue_id, n_tokens = RTE_DIM(tokens);
+
+ if (fgets(line, sizeof(line), f) == NULL)
+ break;
+
+ if (is_comment(line))
+ continue;
+
+ if (softnic_parse_tokenize_string(line, tokens, &n_tokens)) {
+ *line_number = l;
+ fclose(f);
+ return -EINVAL;
+ }
+
+ if (n_tokens == 0)
+ continue;
+
+ if (dscp >= RTE_DIM(dscp_table->entry) ||
+ n_tokens != RTE_DIM(tokens) ||
+ softnic_parser_read_uint32(&tc_id, tokens[0]) ||
+ tc_id >= RTE_TABLE_ACTION_TC_MAX ||
+ softnic_parser_read_uint32(&tc_queue_id, tokens[1]) ||
+ tc_queue_id >= RTE_TABLE_ACTION_TC_QUEUE_MAX ||
+ (strlen(tokens[2]) != 1)) {
+ *line_number = l;
+ fclose(f);
+ return -EINVAL;
+ }
+
+ switch (tokens[2][0]) {
+ case 'g':
+ case 'G':
+ color = e_RTE_METER_GREEN;
+ break;
+
+ case 'y':
+ case 'Y':
+ color = e_RTE_METER_YELLOW;
+ break;
+
+ case 'r':
+ case 'R':
+ color = e_RTE_METER_RED;
+ break;
+
+ default:
+ *line_number = l;
+ fclose(f);
+ return -EINVAL;
+ }
+
+ dscp_table->entry[dscp].tc_id = tc_id;
+ dscp_table->entry[dscp].tc_queue_id = tc_queue_id;
+ dscp_table->entry[dscp].color = color;
+ dscp++;
+ }
+
+ /* Close file */
+ fclose(f);
+ return 0;
+}
+
+static void
+cmd_pipeline_table_dscp(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_table_action_dscp_table dscp_table;
+ char *pipeline_name, *file_name;
+ uint32_t table_id, line_number;
+ int status;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "dscp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dscp");
+ return;
+ }
+
+ file_name = tokens[5];
+
+ status = load_dscp_table(&dscp_table, file_name, &line_number);
+ if (status) {
+ snprintf(out, out_size, MSG_FILE_ERR, file_name, line_number);
+ return;
+ }
+
+ status = softnic_pipeline_table_dscp_table_update(softnic,
+ pipeline_name,
+ table_id,
+ UINT64_MAX,
+ &dscp_table);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule read ttl [clear]
+ */
+static void
+cmd_softnic_pipeline_table_rule_ttl_read(struct pmd_internals *softnic __rte_unused,
+ char **tokens,
+ uint32_t n_tokens __rte_unused,
+ char *out,
+ size_t out_size)
+{
+ snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]);
+}
+
+/**
+ * thread <thread_id> pipeline <pipeline_name> enable
+ */
+static void
+cmd_softnic_thread_pipeline_enable(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t thread_id;
+ int status;
+
+ if (n_tokens != 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&thread_id, tokens[1]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "thread_id");
+ return;
+ }
+
+ if (strcmp(tokens[2], "pipeline") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline");
+ return;
+ }
+
+ pipeline_name = tokens[3];
+
+ if (strcmp(tokens[4], "enable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable");
+ return;
+ }
+
+ status = softnic_thread_pipeline_enable(softnic, thread_id, pipeline_name);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, "thread pipeline enable");
+ return;
+ }
+}
+
+/**
+ * thread <thread_id> pipeline <pipeline_name> disable
+ */
+static void
+cmd_softnic_thread_pipeline_disable(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t thread_id;
+ int status;
+
+ if (n_tokens != 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&thread_id, tokens[1]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "thread_id");
+ return;
+ }
+
+ if (strcmp(tokens[2], "pipeline") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline");
+ return;
+ }
+
+ pipeline_name = tokens[3];
+
+ if (strcmp(tokens[4], "disable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable");
+ return;
+ }
+
+ status = softnic_thread_pipeline_disable(softnic, thread_id, pipeline_name);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL,
+ "thread pipeline disable");
+ return;
+ }
+}
+
+void
+softnic_cli_process(char *in, char *out, size_t out_size, void *arg)
+{
+ char *tokens[CMD_MAX_TOKENS];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ struct pmd_internals *softnic = arg;
+ int status;
+
+ if (is_comment(in))
+ return;
+
+ status = softnic_parse_tokenize_string(in, tokens, &n_tokens);
+ if (status) {
+ snprintf(out, out_size, MSG_ARG_TOO_MANY, "");
+ return;
+ }
+
+ if (n_tokens == 0)
+ return;
+
+ if (strcmp(tokens[0], "mempool") == 0) {
+ cmd_mempool(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "link") == 0) {
+ cmd_link(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "swq") == 0) {
+ cmd_swq(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "tmgr") == 0) {
+ if (n_tokens == 2) {
+ cmd_tmgr(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 3 &&
+ (strcmp(tokens[1], "shaper") == 0) &&
+ (strcmp(tokens[2], "profile") == 0)) {
+ cmd_tmgr_shaper_profile(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 3 &&
+ (strcmp(tokens[1], "shared") == 0) &&
+ (strcmp(tokens[2], "shaper") == 0)) {
+ cmd_tmgr_shared_shaper(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[1], "node") == 0)) {
+ cmd_tmgr_node(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[1], "hierarchy-default") == 0)) {
+ cmd_tmgr_hierarchy_default(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 3 &&
+ (strcmp(tokens[1], "hierarchy") == 0) &&
+ (strcmp(tokens[2], "commit") == 0)) {
+ cmd_tmgr_hierarchy_commit(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+ }
+
+ if (strcmp(tokens[0], "tap") == 0) {
+ cmd_tap(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "port") == 0) {
+ cmd_port_in_action_profile(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "table") == 0) {
+ cmd_table_action_profile(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "pipeline") == 0) {
+ if (n_tokens >= 3 &&
+ (strcmp(tokens[2], "period") == 0)) {
+ cmd_pipeline(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[4], "bsz") == 0)) {
+ cmd_pipeline_port_in(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "out") == 0) &&
+ (strcmp(tokens[4], "bsz") == 0)) {
+ cmd_pipeline_port_out(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 4 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[3], "match") == 0)) {
+ cmd_pipeline_table(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "table") == 0)) {
+ cmd_pipeline_port_in_table(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "stats") == 0)) {
+ cmd_pipeline_port_in_stats(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "enable") == 0)) {
+ cmd_softnic_pipeline_port_in_enable(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "disable") == 0)) {
+ cmd_softnic_pipeline_port_in_disable(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "out") == 0) &&
+ (strcmp(tokens[5], "stats") == 0)) {
+ cmd_pipeline_port_out_stats(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "stats") == 0)) {
+ cmd_pipeline_table_stats(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "add") == 0) &&
+ (strcmp(tokens[6], "match") == 0)) {
+ if (n_tokens >= 8 &&
+ (strcmp(tokens[7], "default") == 0)) {
+ cmd_softnic_pipeline_table_rule_add_default(softnic, tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ cmd_softnic_pipeline_table_rule_add(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "add") == 0) &&
+ (strcmp(tokens[6], "bulk") == 0)) {
+ cmd_softnic_pipeline_table_rule_add_bulk(softnic, tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "delete") == 0) &&
+ (strcmp(tokens[6], "match") == 0)) {
+ if (n_tokens >= 8 &&
+ (strcmp(tokens[7], "default") == 0)) {
+ cmd_softnic_pipeline_table_rule_delete_default(softnic, tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ cmd_softnic_pipeline_table_rule_delete(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "read") == 0) &&
+ (strcmp(tokens[6], "stats") == 0)) {
+ cmd_softnic_pipeline_table_rule_stats_read(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 8 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "meter") == 0) &&
+ (strcmp(tokens[5], "profile") == 0) &&
+ (strcmp(tokens[7], "add") == 0)) {
+ cmd_pipeline_table_meter_profile_add(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 8 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "meter") == 0) &&
+ (strcmp(tokens[5], "profile") == 0) &&
+ (strcmp(tokens[7], "delete") == 0)) {
+ cmd_pipeline_table_meter_profile_delete(softnic, tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "read") == 0) &&
+ (strcmp(tokens[6], "meter") == 0)) {
+ cmd_pipeline_table_rule_meter_read(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "dscp") == 0)) {
+ cmd_pipeline_table_dscp(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "read") == 0) &&
+ (strcmp(tokens[6], "ttl") == 0)) {
+ cmd_softnic_pipeline_table_rule_ttl_read(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+ }
+
+ if (strcmp(tokens[0], "thread") == 0) {
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[4], "enable") == 0)) {
+ cmd_softnic_thread_pipeline_enable(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[4], "disable") == 0)) {
+ cmd_softnic_thread_pipeline_disable(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+ }
+
+ snprintf(out, out_size, MSG_CMD_UNKNOWN, tokens[0]);
+}
+
+int
+softnic_cli_script_process(struct pmd_internals *softnic,
+ const char *file_name,
+ size_t msg_in_len_max,
+ size_t msg_out_len_max)
+{
+ char *msg_in = NULL, *msg_out = NULL;
+ FILE *f = NULL;
+
+ /* Check input arguments */
+ if (file_name == NULL ||
+ (strlen(file_name) == 0) ||
+ msg_in_len_max == 0 ||
+ msg_out_len_max == 0)
+ return -EINVAL;
+
+ msg_in = malloc(msg_in_len_max + 1);
+ msg_out = malloc(msg_out_len_max + 1);
+ if (msg_in == NULL ||
+ msg_out == NULL) {
+ free(msg_out);
+ free(msg_in);
+ return -ENOMEM;
+ }
+
+ /* Open input file */
+ f = fopen(file_name, "r");
+ if (f == NULL) {
+ free(msg_out);
+ free(msg_in);
+ return -EIO;
+ }
+
+ /* Read file */
+ for ( ; ; ) {
+ if (fgets(msg_in, msg_in_len_max + 1, f) == NULL)
+ break;
+
+ printf("%s", msg_in);
+ msg_out[0] = 0;
+
+ softnic_cli_process(msg_in,
+ msg_out,
+ msg_out_len_max,
+ softnic);
+
+ if (strlen(msg_out))
+ printf("%s", msg_out);
+ }
+
+ /* Close file */
+ fclose(f);
+ free(msg_out);
+ free(msg_in);
+ return 0;
+}
+
+static int
+cli_rule_file_process(const char *file_name,
+ size_t line_len_max,
+ struct softnic_table_rule_match *m,
+ struct softnic_table_rule_action *a,
+ uint32_t *n_rules,
+ uint32_t *line_number,
+ char *out,
+ size_t out_size)
+{
+ FILE *f = NULL;
+ char *line = NULL;
+ uint32_t rule_id, line_id;
+ int status = 0;
+
+ /* Check input arguments */
+ if (file_name == NULL ||
+ (strlen(file_name) == 0) ||
+ line_len_max == 0) {
+ *line_number = 0;
+ return -EINVAL;
+ }
+
+ /* Memory allocation */
+ line = malloc(line_len_max + 1);
+ if (line == NULL) {
+ *line_number = 0;
+ return -ENOMEM;
+ }
+
+ /* Open file */
+ f = fopen(file_name, "r");
+ if (f == NULL) {
+ *line_number = 0;
+ free(line);
+ return -EIO;
+ }
+
+ /* Read file */
+ for (line_id = 1, rule_id = 0; rule_id < *n_rules; line_id++) {
+ char *tokens[CMD_MAX_TOKENS];
+ uint32_t n_tokens, n_tokens_parsed, t0;
+
+ /* Read next line from file. */
+ if (fgets(line, line_len_max + 1, f) == NULL)
+ break;
+
+ /* Comment. */
+ if (is_comment(line))
+ continue;
+
+ /* Parse line. */
+ n_tokens = RTE_DIM(tokens);
+ status = softnic_parse_tokenize_string(line, tokens, &n_tokens);
+ if (status) {
+ status = -EINVAL;
+ break;
+ }
+
+ /* Empty line. */
+ if (n_tokens == 0)
+ continue;
+ t0 = 0;
+
+ /* Rule match. */
+ n_tokens_parsed = parse_match(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &m[rule_id]);
+ if (n_tokens_parsed == 0) {
+ status = -EINVAL;
+ break;
+ }
+ t0 += n_tokens_parsed;
+
+ /* Rule action. */
+ n_tokens_parsed = parse_table_action(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &a[rule_id]);
+ if (n_tokens_parsed == 0) {
+ status = -EINVAL;
+ break;
+ }
+ t0 += n_tokens_parsed;
+
+ /* Line completed. */
+ if (t0 < n_tokens) {
+ status = -EINVAL;
+ break;
+ }
+
+ /* Increment rule count */
+ rule_id++;
+ }
+
+ /* Close file */
+ fclose(f);
+
+ /* Memory free */
+ free(line);
+
+ *n_rules = rule_id;
+ *line_number = line_id;
+ return status;
+}
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_internals.h b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_internals.h
new file mode 100644
index 00000000..a25eb874
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -0,0 +1,910 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
+#define __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ring.h>
+#include <rte_ethdev.h>
+#include <rte_sched.h>
+#include <rte_port_in_action.h>
+#include <rte_table_action.h>
+#include <rte_pipeline.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_tm_driver.h>
+
+#include "rte_eth_softnic.h"
+#include "conn.h"
+
+#define NAME_SIZE 64
+
+/**
+ * PMD Parameters
+ */
+
+struct pmd_params {
+ const char *name;
+ const char *firmware;
+ uint16_t conn_port;
+ uint32_t cpu_id;
+
+ /** Traffic Management (TM) */
+ struct {
+ uint32_t n_queues; /**< Number of queues */
+ uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ } tm;
+};
+
+/**
+ * MEMPOOL
+ */
+struct softnic_mempool_params {
+ uint32_t buffer_size;
+ uint32_t pool_size;
+ uint32_t cache_size;
+};
+
+struct softnic_mempool {
+ TAILQ_ENTRY(softnic_mempool) node;
+ char name[NAME_SIZE];
+ struct rte_mempool *m;
+ uint32_t buffer_size;
+};
+
+TAILQ_HEAD(softnic_mempool_list, softnic_mempool);
+
+/**
+ * SWQ
+ */
+struct softnic_swq_params {
+ uint32_t size;
+};
+
+struct softnic_swq {
+ TAILQ_ENTRY(softnic_swq) node;
+ char name[NAME_SIZE];
+ struct rte_ring *r;
+};
+
+TAILQ_HEAD(softnic_swq_list, softnic_swq);
+
+/**
+ * LINK
+ */
+struct softnic_link_params {
+ const char *dev_name;
+ uint16_t port_id; /**< Valid only when *dev_name* is NULL. */
+};
+
+struct softnic_link {
+ TAILQ_ENTRY(softnic_link) node;
+ char name[NAME_SIZE];
+ uint16_t port_id;
+ uint32_t n_rxq;
+ uint32_t n_txq;
+};
+
+TAILQ_HEAD(softnic_link_list, softnic_link);
+
+/**
+ * TMGR
+ */
+
+#ifndef TM_MAX_SUBPORTS
+#define TM_MAX_SUBPORTS 8
+#endif
+
+#ifndef TM_MAX_PIPES_PER_SUBPORT
+#define TM_MAX_PIPES_PER_SUBPORT 4096
+#endif
+
+struct tm_params {
+ struct rte_sched_port_params port_params;
+
+ struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS];
+
+ struct rte_sched_pipe_params
+ pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
+ uint32_t n_pipe_profiles;
+ uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT];
+};
+
+/* TM Levels */
+enum tm_node_level {
+ TM_NODE_LEVEL_PORT = 0,
+ TM_NODE_LEVEL_SUBPORT,
+ TM_NODE_LEVEL_PIPE,
+ TM_NODE_LEVEL_TC,
+ TM_NODE_LEVEL_QUEUE,
+ TM_NODE_LEVEL_MAX,
+};
+
+/* TM Shaper Profile */
+struct tm_shaper_profile {
+ TAILQ_ENTRY(tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t n_users;
+ struct rte_tm_shaper_params params;
+};
+
+TAILQ_HEAD(tm_shaper_profile_list, tm_shaper_profile);
+
+/* TM Shared Shaper */
+struct tm_shared_shaper {
+ TAILQ_ENTRY(tm_shared_shaper) node;
+ uint32_t shared_shaper_id;
+ uint32_t n_users;
+ uint32_t shaper_profile_id;
+};
+
+TAILQ_HEAD(tm_shared_shaper_list, tm_shared_shaper);
+
+/* TM WRED Profile */
+struct tm_wred_profile {
+ TAILQ_ENTRY(tm_wred_profile) node;
+ uint32_t wred_profile_id;
+ uint32_t n_users;
+ struct rte_tm_wred_params params;
+};
+
+TAILQ_HEAD(tm_wred_profile_list, tm_wred_profile);
+
+/* TM Node */
+struct tm_node {
+ TAILQ_ENTRY(tm_node) node;
+ uint32_t node_id;
+ uint32_t parent_node_id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t level;
+ struct tm_node *parent_node;
+ struct tm_shaper_profile *shaper_profile;
+ struct tm_wred_profile *wred_profile;
+ struct rte_tm_node_params params;
+ struct rte_tm_node_stats stats;
+ uint32_t n_children;
+};
+
+TAILQ_HEAD(tm_node_list, tm_node);
+
+/* TM Hierarchy Specification */
+struct tm_hierarchy {
+ struct tm_shaper_profile_list shaper_profiles;
+ struct tm_shared_shaper_list shared_shapers;
+ struct tm_wred_profile_list wred_profiles;
+ struct tm_node_list nodes;
+
+ uint32_t n_shaper_profiles;
+ uint32_t n_shared_shapers;
+ uint32_t n_wred_profiles;
+ uint32_t n_nodes;
+
+ uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX];
+};
+
+struct tm_internals {
+ /** Hierarchy specification
+ *
+ * -Hierarchy is unfrozen at init and when port is stopped.
+ * -Hierarchy is frozen on successful hierarchy commit.
+ * -Run-time hierarchy changes are not allowed, therefore it makes
+ * sense to keep the hierarchy frozen after the port is started.
+ */
+ struct tm_hierarchy h;
+ int hierarchy_frozen;
+
+ /** Blueprints */
+ struct tm_params params;
+};
+
+struct softnic_tmgr_port {
+ TAILQ_ENTRY(softnic_tmgr_port) node;
+ char name[NAME_SIZE];
+ struct rte_sched_port *s;
+};
+
+TAILQ_HEAD(softnic_tmgr_port_list, softnic_tmgr_port);
+
+/**
+ * TAP
+ */
+struct softnic_tap {
+ TAILQ_ENTRY(softnic_tap) node;
+ char name[NAME_SIZE];
+ int fd;
+};
+
+TAILQ_HEAD(softnic_tap_list, softnic_tap);
+
+/**
+ * Input port action
+ */
+struct softnic_port_in_action_profile_params {
+ uint64_t action_mask;
+ struct rte_port_in_action_fltr_config fltr;
+ struct rte_port_in_action_lb_config lb;
+};
+
+struct softnic_port_in_action_profile {
+ TAILQ_ENTRY(softnic_port_in_action_profile) node;
+ char name[NAME_SIZE];
+ struct softnic_port_in_action_profile_params params;
+ struct rte_port_in_action_profile *ap;
+};
+
+TAILQ_HEAD(softnic_port_in_action_profile_list, softnic_port_in_action_profile);
+
+/**
+ * Table action
+ */
+struct softnic_table_action_profile_params {
+ uint64_t action_mask;
+ struct rte_table_action_common_config common;
+ struct rte_table_action_lb_config lb;
+ struct rte_table_action_mtr_config mtr;
+ struct rte_table_action_tm_config tm;
+ struct rte_table_action_encap_config encap;
+ struct rte_table_action_nat_config nat;
+ struct rte_table_action_ttl_config ttl;
+ struct rte_table_action_stats_config stats;
+};
+
+struct softnic_table_action_profile {
+ TAILQ_ENTRY(softnic_table_action_profile) node;
+ char name[NAME_SIZE];
+ struct softnic_table_action_profile_params params;
+ struct rte_table_action_profile *ap;
+};
+
+TAILQ_HEAD(softnic_table_action_profile_list, softnic_table_action_profile);
+
+/**
+ * Pipeline
+ */
+struct pipeline_params {
+ uint32_t timer_period_ms;
+ uint32_t offset_port_id;
+};
+
+enum softnic_port_in_type {
+ PORT_IN_RXQ,
+ PORT_IN_SWQ,
+ PORT_IN_TMGR,
+ PORT_IN_TAP,
+ PORT_IN_SOURCE,
+};
+
+struct softnic_port_in_params {
+ /* Read */
+ enum softnic_port_in_type type;
+ const char *dev_name;
+ union {
+ struct {
+ uint16_t queue_id;
+ } rxq;
+
+ struct {
+ const char *mempool_name;
+ uint32_t mtu;
+ } tap;
+
+ struct {
+ const char *mempool_name;
+ const char *file_name;
+ uint32_t n_bytes_per_pkt;
+ } source;
+ };
+ uint32_t burst_size;
+
+ /* Action */
+ const char *action_profile_name;
+};
+
+enum softnic_port_out_type {
+ PORT_OUT_TXQ,
+ PORT_OUT_SWQ,
+ PORT_OUT_TMGR,
+ PORT_OUT_TAP,
+ PORT_OUT_SINK,
+};
+
+struct softnic_port_out_params {
+ enum softnic_port_out_type type;
+ const char *dev_name;
+ union {
+ struct {
+ uint16_t queue_id;
+ } txq;
+
+ struct {
+ const char *file_name;
+ uint32_t max_n_pkts;
+ } sink;
+ };
+ uint32_t burst_size;
+ int retry;
+ uint32_t n_retries;
+};
+
+enum softnic_table_type {
+ TABLE_ACL,
+ TABLE_ARRAY,
+ TABLE_HASH,
+ TABLE_LPM,
+ TABLE_STUB,
+};
+
+struct softnic_table_acl_params {
+ uint32_t n_rules;
+ uint32_t ip_header_offset;
+ int ip_version;
+};
+
+struct softnic_table_array_params {
+ uint32_t n_keys;
+ uint32_t key_offset;
+};
+
+struct softnic_table_hash_params {
+ uint32_t n_keys;
+ uint32_t key_offset;
+ uint32_t key_size;
+ uint8_t *key_mask;
+ uint32_t n_buckets;
+ int extendable_bucket;
+};
+
+struct softnic_table_lpm_params {
+ uint32_t n_rules;
+ uint32_t key_offset;
+ uint32_t key_size;
+};
+
+struct softnic_table_params {
+ /* Match */
+ enum softnic_table_type match_type;
+ union {
+ struct softnic_table_acl_params acl;
+ struct softnic_table_array_params array;
+ struct softnic_table_hash_params hash;
+ struct softnic_table_lpm_params lpm;
+ } match;
+
+ /* Action */
+ const char *action_profile_name;
+};
+
+struct softnic_port_in {
+ struct softnic_port_in_params params;
+ struct softnic_port_in_action_profile *ap;
+ struct rte_port_in_action *a;
+};
+
+struct softnic_table {
+ struct softnic_table_params params;
+ struct softnic_table_action_profile *ap;
+ struct rte_table_action *a;
+};
+
+struct pipeline {
+ TAILQ_ENTRY(pipeline) node;
+ char name[NAME_SIZE];
+
+ struct rte_pipeline *p;
+ struct softnic_port_in port_in[RTE_PIPELINE_PORT_IN_MAX];
+ struct softnic_table table[RTE_PIPELINE_TABLE_MAX];
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ uint32_t n_tables;
+
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint32_t timer_period_ms;
+
+ int enabled;
+ uint32_t thread_id;
+ uint32_t cpu_id;
+};
+
+TAILQ_HEAD(pipeline_list, pipeline);
+
+/**
+ * Thread
+ */
+#ifndef THREAD_PIPELINES_MAX
+#define THREAD_PIPELINES_MAX 256
+#endif
+
+#ifndef THREAD_MSGQ_SIZE
+#define THREAD_MSGQ_SIZE 64
+#endif
+
+#ifndef THREAD_TIMER_PERIOD_MS
+#define THREAD_TIMER_PERIOD_MS 100
+#endif
+
+/**
+ * Master thead: data plane thread context
+ */
+struct softnic_thread {
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+
+ uint32_t enabled;
+};
+
+/**
+ * Data plane threads: context
+ */
+#ifndef TABLE_RULE_ACTION_SIZE_MAX
+#define TABLE_RULE_ACTION_SIZE_MAX 2048
+#endif
+
+struct softnic_table_data {
+ struct rte_table_action *a;
+};
+
+struct pipeline_data {
+ struct rte_pipeline *p;
+ struct softnic_table_data table_data[RTE_PIPELINE_TABLE_MAX];
+ uint32_t n_tables;
+
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint64_t timer_period; /* Measured in CPU cycles. */
+ uint64_t time_next;
+
+ uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
+};
+
+struct softnic_thread_data {
+ struct rte_pipeline *p[THREAD_PIPELINES_MAX];
+ uint32_t n_pipelines;
+
+ struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint64_t timer_period; /* Measured in CPU cycles. */
+ uint64_t time_next;
+ uint64_t time_next_min;
+ uint64_t iter;
+} __rte_cache_aligned;
+
+/**
+ * PMD Internals
+ */
+struct pmd_internals {
+ /** Params */
+ struct pmd_params params;
+
+ struct {
+ struct tm_internals tm; /**< Traffic Management */
+ } soft;
+
+ struct softnic_conn *conn;
+ struct softnic_mempool_list mempool_list;
+ struct softnic_swq_list swq_list;
+ struct softnic_link_list link_list;
+ struct softnic_tmgr_port_list tmgr_port_list;
+ struct softnic_tap_list tap_list;
+ struct softnic_port_in_action_profile_list port_in_action_profile_list;
+ struct softnic_table_action_profile_list table_action_profile_list;
+ struct pipeline_list pipeline_list;
+ struct softnic_thread thread[RTE_MAX_LCORE];
+ struct softnic_thread_data thread_data[RTE_MAX_LCORE];
+};
+
+/**
+ * MEMPOOL
+ */
+int
+softnic_mempool_init(struct pmd_internals *p);
+
+void
+softnic_mempool_free(struct pmd_internals *p);
+
+struct softnic_mempool *
+softnic_mempool_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_mempool *
+softnic_mempool_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_mempool_params *params);
+
+/**
+ * SWQ
+ */
+int
+softnic_swq_init(struct pmd_internals *p);
+
+void
+softnic_swq_free(struct pmd_internals *p);
+
+void
+softnic_softnic_swq_free_keep_rxq_txq(struct pmd_internals *p);
+
+struct softnic_swq *
+softnic_swq_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_swq *
+softnic_swq_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_swq_params *params);
+
+/**
+ * LINK
+ */
+int
+softnic_link_init(struct pmd_internals *p);
+
+void
+softnic_link_free(struct pmd_internals *p);
+
+struct softnic_link *
+softnic_link_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_link *
+softnic_link_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_link_params *params);
+
+/**
+ * TMGR
+ */
+int
+softnic_tmgr_init(struct pmd_internals *p);
+
+void
+softnic_tmgr_free(struct pmd_internals *p);
+
+struct softnic_tmgr_port *
+softnic_tmgr_port_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_tmgr_port *
+softnic_tmgr_port_create(struct pmd_internals *p,
+ const char *name);
+
+void
+tm_hierarchy_init(struct pmd_internals *p);
+
+void
+tm_hierarchy_free(struct pmd_internals *p);
+
+static inline int
+tm_used(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ return p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
+}
+
+extern const struct rte_tm_ops pmd_tm_ops;
+
+/**
+ * TAP
+ */
+int
+softnic_tap_init(struct pmd_internals *p);
+
+void
+softnic_tap_free(struct pmd_internals *p);
+
+struct softnic_tap *
+softnic_tap_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_tap *
+softnic_tap_create(struct pmd_internals *p,
+ const char *name);
+
+/**
+ * Input port action
+ */
+int
+softnic_port_in_action_profile_init(struct pmd_internals *p);
+
+void
+softnic_port_in_action_profile_free(struct pmd_internals *p);
+
+struct softnic_port_in_action_profile *
+softnic_port_in_action_profile_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_port_in_action_profile *
+softnic_port_in_action_profile_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_port_in_action_profile_params *params);
+
+/**
+ * Table action
+ */
+int
+softnic_table_action_profile_init(struct pmd_internals *p);
+
+void
+softnic_table_action_profile_free(struct pmd_internals *p);
+
+struct softnic_table_action_profile *
+softnic_table_action_profile_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_table_action_profile *
+softnic_table_action_profile_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_table_action_profile_params *params);
+
+/**
+ * Pipeline
+ */
+int
+softnic_pipeline_init(struct pmd_internals *p);
+
+void
+softnic_pipeline_free(struct pmd_internals *p);
+
+void
+softnic_pipeline_disable_all(struct pmd_internals *p);
+
+struct pipeline *
+softnic_pipeline_find(struct pmd_internals *p, const char *name);
+
+struct pipeline *
+softnic_pipeline_create(struct pmd_internals *p,
+ const char *name,
+ struct pipeline_params *params);
+
+int
+softnic_pipeline_port_in_create(struct pmd_internals *p,
+ const char *pipeline_name,
+ struct softnic_port_in_params *params,
+ int enabled);
+
+int
+softnic_pipeline_port_in_connect_to_table(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id,
+ uint32_t table_id);
+
+int
+softnic_pipeline_port_out_create(struct pmd_internals *p,
+ const char *pipeline_name,
+ struct softnic_port_out_params *params);
+
+int
+softnic_pipeline_table_create(struct pmd_internals *p,
+ const char *pipeline_name,
+ struct softnic_table_params *params);
+
+struct softnic_table_rule_match_acl {
+ int ip_version;
+
+ RTE_STD_C11
+ union {
+ struct {
+ uint32_t sa;
+ uint32_t da;
+ } ipv4;
+
+ struct {
+ uint8_t sa[16];
+ uint8_t da[16];
+ } ipv6;
+ };
+
+ uint32_t sa_depth;
+ uint32_t da_depth;
+ uint16_t sp0;
+ uint16_t sp1;
+ uint16_t dp0;
+ uint16_t dp1;
+ uint8_t proto;
+ uint8_t proto_mask;
+ uint32_t priority;
+};
+
+struct softnic_table_rule_match_array {
+ uint32_t pos;
+};
+
+#ifndef TABLE_RULE_MATCH_SIZE_MAX
+#define TABLE_RULE_MATCH_SIZE_MAX 256
+#endif
+
+struct softnic_table_rule_match_hash {
+ uint8_t key[TABLE_RULE_MATCH_SIZE_MAX];
+};
+
+struct softnic_table_rule_match_lpm {
+ int ip_version;
+
+ RTE_STD_C11
+ union {
+ uint32_t ipv4;
+ uint8_t ipv6[16];
+ };
+
+ uint8_t depth;
+};
+
+struct softnic_table_rule_match {
+ enum softnic_table_type match_type;
+
+ union {
+ struct softnic_table_rule_match_acl acl;
+ struct softnic_table_rule_match_array array;
+ struct softnic_table_rule_match_hash hash;
+ struct softnic_table_rule_match_lpm lpm;
+ } match;
+};
+
+struct softnic_table_rule_action {
+ uint64_t action_mask;
+ struct rte_table_action_fwd_params fwd;
+ struct rte_table_action_lb_params lb;
+ struct rte_table_action_mtr_params mtr;
+ struct rte_table_action_tm_params tm;
+ struct rte_table_action_encap_params encap;
+ struct rte_table_action_nat_params nat;
+ struct rte_table_action_ttl_params ttl;
+ struct rte_table_action_stats_params stats;
+ struct rte_table_action_time_params time;
+};
+
+int
+softnic_pipeline_port_in_stats_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats,
+ int clear);
+
+int
+softnic_pipeline_port_in_enable(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id);
+
+int
+softnic_pipeline_port_in_disable(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id);
+
+int
+softnic_pipeline_port_out_stats_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats,
+ int clear);
+
+int
+softnic_pipeline_table_stats_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats,
+ int clear);
+
+int
+softnic_pipeline_table_rule_add(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match,
+ struct softnic_table_rule_action *action,
+ void **data);
+
+int
+softnic_pipeline_table_rule_add_bulk(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match,
+ struct softnic_table_rule_action *action,
+ void **data,
+ uint32_t *n_rules);
+
+int
+softnic_pipeline_table_rule_add_default(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_action *action,
+ void **data);
+
+int
+softnic_pipeline_table_rule_delete(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match);
+
+int
+softnic_pipeline_table_rule_delete_default(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id);
+
+int
+softnic_pipeline_table_rule_stats_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_stats_counters *stats,
+ int clear);
+
+int
+softnic_pipeline_table_mtr_profile_add(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id,
+ struct rte_table_action_meter_profile *profile);
+
+int
+softnic_pipeline_table_mtr_profile_delete(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id);
+
+int
+softnic_pipeline_table_rule_mtr_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ uint32_t tc_mask,
+ struct rte_table_action_mtr_counters *stats,
+ int clear);
+
+int
+softnic_pipeline_table_dscp_table_update(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint64_t dscp_mask,
+ struct rte_table_action_dscp_table *dscp_table);
+
+int
+softnic_pipeline_table_rule_ttl_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_ttl_counters *stats,
+ int clear);
+
+/**
+ * Thread
+ */
+int
+softnic_thread_init(struct pmd_internals *p);
+
+void
+softnic_thread_free(struct pmd_internals *p);
+
+int
+softnic_thread_pipeline_enable(struct pmd_internals *p,
+ uint32_t thread_id,
+ const char *pipeline_name);
+
+int
+softnic_thread_pipeline_disable(struct pmd_internals *p,
+ uint32_t thread_id,
+ const char *pipeline_name);
+
+/**
+ * CLI
+ */
+void
+softnic_cli_process(char *in,
+ char *out,
+ size_t out_size,
+ void *arg);
+
+int
+softnic_cli_script_process(struct pmd_internals *softnic,
+ const char *file_name,
+ size_t msg_in_len_max,
+ size_t msg_out_len_max);
+
+#endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_link.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_link.c
new file mode 100644
index 00000000..d669913a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_link.c
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "rte_eth_softnic_internals.h"
+
+int
+softnic_link_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->link_list);
+
+ return 0;
+}
+
+void
+softnic_link_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_link *link;
+
+ link = TAILQ_FIRST(&p->link_list);
+ if (link == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->link_list, link, node);
+ free(link);
+ }
+}
+
+struct softnic_link *
+softnic_link_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_link *link;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(link, &p->link_list, node)
+ if (strcmp(link->name, name) == 0)
+ return link;
+
+ return NULL;
+}
+
+struct softnic_link *
+softnic_link_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_link_params *params)
+{
+ struct rte_eth_dev_info port_info;
+ struct softnic_link *link;
+ uint16_t port_id;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_link_find(p, name) ||
+ params == NULL)
+ return NULL;
+
+ port_id = params->port_id;
+ if (params->dev_name) {
+ int status;
+
+ status = rte_eth_dev_get_port_by_name(params->dev_name,
+ &port_id);
+
+ if (status)
+ return NULL;
+ } else {
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return NULL;
+ }
+
+ rte_eth_dev_info_get(port_id, &port_info);
+
+ /* Node allocation */
+ link = calloc(1, sizeof(struct softnic_link));
+ if (link == NULL)
+ return NULL;
+
+ /* Node fill in */
+ strlcpy(link->name, name, sizeof(link->name));
+ link->port_id = port_id;
+ link->n_rxq = port_info.nb_rx_queues;
+ link->n_txq = port_info.nb_tx_queues;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->link_list, link, node);
+
+ return link;
+}
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_mempool.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_mempool.c
new file mode 100644
index 00000000..d5c569f9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_mempool.c
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+
+#include "rte_eth_softnic_internals.h"
+
+#define BUFFER_SIZE_MIN (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+
+int
+softnic_mempool_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->mempool_list);
+
+ return 0;
+}
+
+void
+softnic_mempool_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_mempool *mempool;
+
+ mempool = TAILQ_FIRST(&p->mempool_list);
+ if (mempool == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->mempool_list, mempool, node);
+ rte_mempool_free(mempool->m);
+ free(mempool);
+ }
+}
+
+struct softnic_mempool *
+softnic_mempool_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_mempool *mempool;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(mempool, &p->mempool_list, node)
+ if (strcmp(mempool->name, name) == 0)
+ return mempool;
+
+ return NULL;
+}
+
+struct softnic_mempool *
+softnic_mempool_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_mempool_params *params)
+{
+ char mempool_name[NAME_SIZE];
+ struct softnic_mempool *mempool;
+ struct rte_mempool *m;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_mempool_find(p, name) ||
+ params == NULL ||
+ params->buffer_size < BUFFER_SIZE_MIN ||
+ params->pool_size == 0)
+ return NULL;
+
+ /* Resource create */
+ snprintf(mempool_name, sizeof(mempool_name), "%s_%s",
+ p->params.name,
+ name);
+
+ m = rte_pktmbuf_pool_create(mempool_name,
+ params->pool_size,
+ params->cache_size,
+ 0,
+ params->buffer_size - sizeof(struct rte_mbuf),
+ p->params.cpu_id);
+
+ if (m == NULL)
+ return NULL;
+
+ /* Node allocation */
+ mempool = calloc(1, sizeof(struct softnic_mempool));
+ if (mempool == NULL) {
+ rte_mempool_free(m);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(mempool->name, name, sizeof(mempool->name));
+ mempool->m = m;
+ mempool->buffer_size = params->buffer_size;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->mempool_list, mempool, node);
+
+ return mempool;
+}
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_pipeline.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_pipeline.c
new file mode 100644
index 00000000..45136a4a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_pipeline.c
@@ -0,0 +1,966 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+
+#include <rte_string_fns.h>
+#include <rte_port_ethdev.h>
+#include <rte_port_ring.h>
+#include <rte_port_source_sink.h>
+#include <rte_port_fd.h>
+#include <rte_port_sched.h>
+
+#include <rte_table_acl.h>
+#include <rte_table_array.h>
+#include <rte_table_hash.h>
+#include <rte_table_lpm.h>
+#include <rte_table_lpm_ipv6.h>
+#include <rte_table_stub.h>
+
+#include "rte_eth_softnic_internals.h"
+
+#include "hash_func.h"
+
+#ifndef PIPELINE_MSGQ_SIZE
+#define PIPELINE_MSGQ_SIZE 64
+#endif
+
+#ifndef TABLE_LPM_NUMBER_TBL8
+#define TABLE_LPM_NUMBER_TBL8 256
+#endif
+
+int
+softnic_pipeline_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->pipeline_list);
+
+ return 0;
+}
+
+void
+softnic_pipeline_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct pipeline *pipeline;
+
+ pipeline = TAILQ_FIRST(&p->pipeline_list);
+ if (pipeline == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->pipeline_list, pipeline, node);
+ rte_ring_free(pipeline->msgq_req);
+ rte_ring_free(pipeline->msgq_rsp);
+ rte_pipeline_free(pipeline->p);
+ free(pipeline);
+ }
+}
+
+void
+softnic_pipeline_disable_all(struct pmd_internals *p)
+{
+ struct pipeline *pipeline;
+
+ TAILQ_FOREACH(pipeline, &p->pipeline_list, node)
+ if (pipeline->enabled)
+ softnic_thread_pipeline_disable(p,
+ pipeline->thread_id,
+ pipeline->name);
+}
+
+struct pipeline *
+softnic_pipeline_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct pipeline *pipeline;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(pipeline, &p->pipeline_list, node)
+ if (strcmp(name, pipeline->name) == 0)
+ return pipeline;
+
+ return NULL;
+}
+
+struct pipeline *
+softnic_pipeline_create(struct pmd_internals *softnic,
+ const char *name,
+ struct pipeline_params *params)
+{
+ char resource_name[NAME_MAX];
+ struct rte_pipeline_params pp;
+ struct pipeline *pipeline;
+ struct rte_pipeline *p;
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_pipeline_find(softnic, name) ||
+ params == NULL ||
+ params->timer_period_ms == 0)
+ return NULL;
+
+ /* Resource create */
+ snprintf(resource_name, sizeof(resource_name), "%s-%s-REQ",
+ softnic->params.name,
+ name);
+
+ msgq_req = rte_ring_create(resource_name,
+ PIPELINE_MSGQ_SIZE,
+ softnic->params.cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (msgq_req == NULL)
+ return NULL;
+
+ snprintf(resource_name, sizeof(resource_name), "%s-%s-RSP",
+ softnic->params.name,
+ name);
+
+ msgq_rsp = rte_ring_create(resource_name,
+ PIPELINE_MSGQ_SIZE,
+ softnic->params.cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (msgq_rsp == NULL) {
+ rte_ring_free(msgq_req);
+ return NULL;
+ }
+
+ snprintf(resource_name, sizeof(resource_name), "%s_%s",
+ softnic->params.name,
+ name);
+
+ pp.name = resource_name;
+ pp.socket_id = (int)softnic->params.cpu_id;
+ pp.offset_port_id = params->offset_port_id;
+
+ p = rte_pipeline_create(&pp);
+ if (p == NULL) {
+ rte_ring_free(msgq_rsp);
+ rte_ring_free(msgq_req);
+ return NULL;
+ }
+
+ /* Node allocation */
+ pipeline = calloc(1, sizeof(struct pipeline));
+ if (pipeline == NULL) {
+ rte_pipeline_free(p);
+ rte_ring_free(msgq_rsp);
+ rte_ring_free(msgq_req);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(pipeline->name, name, sizeof(pipeline->name));
+ pipeline->p = p;
+ pipeline->n_ports_in = 0;
+ pipeline->n_ports_out = 0;
+ pipeline->n_tables = 0;
+ pipeline->msgq_req = msgq_req;
+ pipeline->msgq_rsp = msgq_rsp;
+ pipeline->timer_period_ms = params->timer_period_ms;
+ pipeline->enabled = 0;
+ pipeline->cpu_id = softnic->params.cpu_id;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&softnic->pipeline_list, pipeline, node);
+
+ return pipeline;
+}
+
+int
+softnic_pipeline_port_in_create(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ struct softnic_port_in_params *params,
+ int enabled)
+{
+ struct rte_pipeline_port_in_params p;
+
+ union {
+ struct rte_port_ethdev_reader_params ethdev;
+ struct rte_port_ring_reader_params ring;
+ struct rte_port_sched_reader_params sched;
+ struct rte_port_fd_reader_params fd;
+ struct rte_port_source_params source;
+ } pp;
+
+ struct pipeline *pipeline;
+ struct softnic_port_in *port_in;
+ struct softnic_port_in_action_profile *ap;
+ struct rte_port_in_action *action;
+ uint32_t port_id;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+ memset(&pp, 0, sizeof(pp));
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ params == NULL ||
+ params->burst_size == 0 ||
+ params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL)
+ return -1;
+
+ ap = NULL;
+ if (params->action_profile_name) {
+ ap = softnic_port_in_action_profile_find(softnic,
+ params->action_profile_name);
+ if (ap == NULL)
+ return -1;
+ }
+
+ switch (params->type) {
+ case PORT_IN_RXQ:
+ {
+ struct softnic_link *link;
+
+ link = softnic_link_find(softnic, params->dev_name);
+ if (link == NULL)
+ return -1;
+
+ if (params->rxq.queue_id >= link->n_rxq)
+ return -1;
+
+ pp.ethdev.port_id = link->port_id;
+ pp.ethdev.queue_id = params->rxq.queue_id;
+
+ p.ops = &rte_port_ethdev_reader_ops;
+ p.arg_create = &pp.ethdev;
+ break;
+ }
+
+ case PORT_IN_SWQ:
+ {
+ struct softnic_swq *swq;
+
+ swq = softnic_swq_find(softnic, params->dev_name);
+ if (swq == NULL)
+ return -1;
+
+ pp.ring.ring = swq->r;
+
+ p.ops = &rte_port_ring_reader_ops;
+ p.arg_create = &pp.ring;
+ break;
+ }
+
+ case PORT_IN_TMGR:
+ {
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = softnic_tmgr_port_find(softnic, params->dev_name);
+ if (tmgr_port == NULL)
+ return -1;
+
+ pp.sched.sched = tmgr_port->s;
+
+ p.ops = &rte_port_sched_reader_ops;
+ p.arg_create = &pp.sched;
+ break;
+ }
+
+ case PORT_IN_TAP:
+ {
+ struct softnic_tap *tap;
+ struct softnic_mempool *mempool;
+
+ tap = softnic_tap_find(softnic, params->dev_name);
+ mempool = softnic_mempool_find(softnic, params->tap.mempool_name);
+ if (tap == NULL || mempool == NULL)
+ return -1;
+
+ pp.fd.fd = tap->fd;
+ pp.fd.mempool = mempool->m;
+ pp.fd.mtu = params->tap.mtu;
+
+ p.ops = &rte_port_fd_reader_ops;
+ p.arg_create = &pp.fd;
+ break;
+ }
+
+ case PORT_IN_SOURCE:
+ {
+ struct softnic_mempool *mempool;
+
+ mempool = softnic_mempool_find(softnic, params->source.mempool_name);
+ if (mempool == NULL)
+ return -1;
+
+ pp.source.mempool = mempool->m;
+ pp.source.file_name = params->source.file_name;
+ pp.source.n_bytes_per_pkt = params->source.n_bytes_per_pkt;
+
+ p.ops = &rte_port_source_ops;
+ p.arg_create = &pp.source;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ p.burst_size = params->burst_size;
+
+ /* Resource create */
+ action = NULL;
+ p.f_action = NULL;
+ p.arg_ah = NULL;
+
+ if (ap) {
+ action = rte_port_in_action_create(ap->ap,
+ softnic->params.cpu_id);
+ if (action == NULL)
+ return -1;
+
+ status = rte_port_in_action_params_get(action,
+ &p);
+ if (status) {
+ rte_port_in_action_free(action);
+ return -1;
+ }
+ }
+
+ status = rte_pipeline_port_in_create(pipeline->p,
+ &p,
+ &port_id);
+ if (status) {
+ rte_port_in_action_free(action);
+ return -1;
+ }
+
+ if (enabled)
+ rte_pipeline_port_in_enable(pipeline->p, port_id);
+
+ /* Pipeline */
+ port_in = &pipeline->port_in[pipeline->n_ports_in];
+ memcpy(&port_in->params, params, sizeof(*params));
+ port_in->ap = ap;
+ port_in->a = action;
+ pipeline->n_ports_in++;
+
+ return 0;
+}
+
+int
+softnic_pipeline_port_in_connect_to_table(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id,
+ uint32_t table_id)
+{
+ struct pipeline *pipeline;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL ||
+ port_id >= pipeline->n_ports_in ||
+ table_id >= pipeline->n_tables)
+ return -1;
+
+ /* Resource */
+ status = rte_pipeline_port_in_connect_to_table(pipeline->p,
+ port_id,
+ table_id);
+
+ return status;
+}
+
+int
+softnic_pipeline_port_out_create(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ struct softnic_port_out_params *params)
+{
+ struct rte_pipeline_port_out_params p;
+
+ union {
+ struct rte_port_ethdev_writer_params ethdev;
+ struct rte_port_ring_writer_params ring;
+ struct rte_port_sched_writer_params sched;
+ struct rte_port_fd_writer_params fd;
+ struct rte_port_sink_params sink;
+ } pp;
+
+ union {
+ struct rte_port_ethdev_writer_nodrop_params ethdev;
+ struct rte_port_ring_writer_nodrop_params ring;
+ struct rte_port_fd_writer_nodrop_params fd;
+ } pp_nodrop;
+
+ struct pipeline *pipeline;
+ uint32_t port_id;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+ memset(&pp, 0, sizeof(pp));
+ memset(&pp_nodrop, 0, sizeof(pp_nodrop));
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ params == NULL ||
+ params->burst_size == 0 ||
+ params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL)
+ return -1;
+
+ switch (params->type) {
+ case PORT_OUT_TXQ:
+ {
+ struct softnic_link *link;
+
+ link = softnic_link_find(softnic, params->dev_name);
+ if (link == NULL)
+ return -1;
+
+ if (params->txq.queue_id >= link->n_txq)
+ return -1;
+
+ pp.ethdev.port_id = link->port_id;
+ pp.ethdev.queue_id = params->txq.queue_id;
+ pp.ethdev.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.ethdev.port_id = link->port_id;
+ pp_nodrop.ethdev.queue_id = params->txq.queue_id;
+ pp_nodrop.ethdev.tx_burst_sz = params->burst_size;
+ pp_nodrop.ethdev.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_ethdev_writer_ops;
+ p.arg_create = &pp.ethdev;
+ } else {
+ p.ops = &rte_port_ethdev_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.ethdev;
+ }
+ break;
+ }
+
+ case PORT_OUT_SWQ:
+ {
+ struct softnic_swq *swq;
+
+ swq = softnic_swq_find(softnic, params->dev_name);
+ if (swq == NULL)
+ return -1;
+
+ pp.ring.ring = swq->r;
+ pp.ring.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.ring.ring = swq->r;
+ pp_nodrop.ring.tx_burst_sz = params->burst_size;
+ pp_nodrop.ring.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_ring_writer_ops;
+ p.arg_create = &pp.ring;
+ } else {
+ p.ops = &rte_port_ring_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.ring;
+ }
+ break;
+ }
+
+ case PORT_OUT_TMGR:
+ {
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = softnic_tmgr_port_find(softnic, params->dev_name);
+ if (tmgr_port == NULL)
+ return -1;
+
+ pp.sched.sched = tmgr_port->s;
+ pp.sched.tx_burst_sz = params->burst_size;
+
+ p.ops = &rte_port_sched_writer_ops;
+ p.arg_create = &pp.sched;
+ break;
+ }
+
+ case PORT_OUT_TAP:
+ {
+ struct softnic_tap *tap;
+
+ tap = softnic_tap_find(softnic, params->dev_name);
+ if (tap == NULL)
+ return -1;
+
+ pp.fd.fd = tap->fd;
+ pp.fd.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.fd.fd = tap->fd;
+ pp_nodrop.fd.tx_burst_sz = params->burst_size;
+ pp_nodrop.fd.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_fd_writer_ops;
+ p.arg_create = &pp.fd;
+ } else {
+ p.ops = &rte_port_fd_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.fd;
+ }
+ break;
+ }
+
+ case PORT_OUT_SINK:
+ {
+ pp.sink.file_name = params->sink.file_name;
+ pp.sink.max_n_pkts = params->sink.max_n_pkts;
+
+ p.ops = &rte_port_sink_ops;
+ p.arg_create = &pp.sink;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ p.f_action = NULL;
+ p.arg_ah = NULL;
+
+ /* Resource create */
+ status = rte_pipeline_port_out_create(pipeline->p,
+ &p,
+ &port_id);
+
+ if (status)
+ return -1;
+
+ /* Pipeline */
+ pipeline->n_ports_out++;
+
+ return 0;
+}
+
+static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 3,
+ .offset = sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = offsetof(struct ipv6_hdr, proto),
+ },
+
+ /* Source IP address (IPv6) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = offsetof(struct ipv6_hdr, src_addr[0]),
+ },
+
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = offsetof(struct ipv6_hdr, src_addr[4]),
+ },
+
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = offsetof(struct ipv6_hdr, src_addr[8]),
+ },
+
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 4,
+ .input_index = 4,
+ .offset = offsetof(struct ipv6_hdr, src_addr[12]),
+ },
+
+ /* Destination IP address (IPv6) */
+ [5] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 5,
+ .input_index = 5,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[0]),
+ },
+
+ [6] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 6,
+ .input_index = 6,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[4]),
+ },
+
+ [7] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 7,
+ .input_index = 7,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[8]),
+ },
+
+ [8] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 8,
+ .input_index = 8,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[12]),
+ },
+
+ /* Source Port */
+ [9] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 9,
+ .input_index = 9,
+ .offset = sizeof(struct ipv6_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [10] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 10,
+ .input_index = 9,
+ .offset = sizeof(struct ipv6_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+int
+softnic_pipeline_table_create(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ struct softnic_table_params *params)
+{
+ char name[NAME_MAX];
+ struct rte_pipeline_table_params p;
+
+ union {
+ struct rte_table_acl_params acl;
+ struct rte_table_array_params array;
+ struct rte_table_hash_params hash;
+ struct rte_table_lpm_params lpm;
+ struct rte_table_lpm_ipv6_params lpm_ipv6;
+ } pp;
+
+ struct pipeline *pipeline;
+ struct softnic_table *table;
+ struct softnic_table_action_profile *ap;
+ struct rte_table_action *action;
+ uint32_t table_id;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+ memset(&pp, 0, sizeof(pp));
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ params == NULL)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL ||
+ pipeline->n_tables >= RTE_PIPELINE_TABLE_MAX)
+ return -1;
+
+ ap = NULL;
+ if (params->action_profile_name) {
+ ap = softnic_table_action_profile_find(softnic,
+ params->action_profile_name);
+ if (ap == NULL)
+ return -1;
+ }
+
+ snprintf(name, NAME_MAX, "%s_%s_table%u",
+ softnic->params.name, pipeline_name, pipeline->n_tables);
+
+ switch (params->match_type) {
+ case TABLE_ACL:
+ {
+ uint32_t ip_header_offset = params->match.acl.ip_header_offset -
+ (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
+ uint32_t i;
+
+ if (params->match.acl.n_rules == 0)
+ return -1;
+
+ pp.acl.name = name;
+ pp.acl.n_rules = params->match.acl.n_rules;
+ if (params->match.acl.ip_version) {
+ memcpy(&pp.acl.field_format,
+ &table_acl_field_format_ipv4,
+ sizeof(table_acl_field_format_ipv4));
+ pp.acl.n_rule_fields =
+ RTE_DIM(table_acl_field_format_ipv4);
+ } else {
+ memcpy(&pp.acl.field_format,
+ &table_acl_field_format_ipv6,
+ sizeof(table_acl_field_format_ipv6));
+ pp.acl.n_rule_fields =
+ RTE_DIM(table_acl_field_format_ipv6);
+ }
+
+ for (i = 0; i < pp.acl.n_rule_fields; i++)
+ pp.acl.field_format[i].offset += ip_header_offset;
+
+ p.ops = &rte_table_acl_ops;
+ p.arg_create = &pp.acl;
+ break;
+ }
+
+ case TABLE_ARRAY:
+ {
+ if (params->match.array.n_keys == 0)
+ return -1;
+
+ pp.array.n_entries = params->match.array.n_keys;
+ pp.array.offset = params->match.array.key_offset;
+
+ p.ops = &rte_table_array_ops;
+ p.arg_create = &pp.array;
+ break;
+ }
+
+ case TABLE_HASH:
+ {
+ struct rte_table_ops *ops;
+ rte_table_hash_op_hash f_hash;
+
+ if (params->match.hash.n_keys == 0)
+ return -1;
+
+ switch (params->match.hash.key_size) {
+ case 8:
+ f_hash = hash_default_key8;
+ break;
+ case 16:
+ f_hash = hash_default_key16;
+ break;
+ case 24:
+ f_hash = hash_default_key24;
+ break;
+ case 32:
+ f_hash = hash_default_key32;
+ break;
+ case 40:
+ f_hash = hash_default_key40;
+ break;
+ case 48:
+ f_hash = hash_default_key48;
+ break;
+ case 56:
+ f_hash = hash_default_key56;
+ break;
+ case 64:
+ f_hash = hash_default_key64;
+ break;
+ default:
+ return -1;
+ }
+
+ pp.hash.name = name;
+ pp.hash.key_size = params->match.hash.key_size;
+ pp.hash.key_offset = params->match.hash.key_offset;
+ pp.hash.key_mask = params->match.hash.key_mask;
+ pp.hash.n_keys = params->match.hash.n_keys;
+ pp.hash.n_buckets = params->match.hash.n_buckets;
+ pp.hash.f_hash = f_hash;
+ pp.hash.seed = 0;
+
+ if (params->match.hash.extendable_bucket)
+ switch (params->match.hash.key_size) {
+ case 8:
+ ops = &rte_table_hash_key8_ext_ops;
+ break;
+ case 16:
+ ops = &rte_table_hash_key16_ext_ops;
+ break;
+ default:
+ ops = &rte_table_hash_ext_ops;
+ }
+ else
+ switch (params->match.hash.key_size) {
+ case 8:
+ ops = &rte_table_hash_key8_lru_ops;
+ break;
+ case 16:
+ ops = &rte_table_hash_key16_lru_ops;
+ break;
+ default:
+ ops = &rte_table_hash_lru_ops;
+ }
+
+ p.ops = ops;
+ p.arg_create = &pp.hash;
+ break;
+ }
+
+ case TABLE_LPM:
+ {
+ if (params->match.lpm.n_rules == 0)
+ return -1;
+
+ switch (params->match.lpm.key_size) {
+ case 4:
+ {
+ pp.lpm.name = name;
+ pp.lpm.n_rules = params->match.lpm.n_rules;
+ pp.lpm.number_tbl8s = TABLE_LPM_NUMBER_TBL8;
+ pp.lpm.flags = 0;
+ pp.lpm.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+ pp.lpm.offset = params->match.lpm.key_offset;
+
+ p.ops = &rte_table_lpm_ops;
+ p.arg_create = &pp.lpm;
+ break;
+ }
+
+ case 16:
+ {
+ pp.lpm_ipv6.name = name;
+ pp.lpm_ipv6.n_rules = params->match.lpm.n_rules;
+ pp.lpm_ipv6.number_tbl8s = TABLE_LPM_NUMBER_TBL8;
+ pp.lpm_ipv6.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+ pp.lpm_ipv6.offset = params->match.lpm.key_offset;
+
+ p.ops = &rte_table_lpm_ipv6_ops;
+ p.arg_create = &pp.lpm_ipv6;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ break;
+ }
+
+ case TABLE_STUB:
+ {
+ p.ops = &rte_table_stub_ops;
+ p.arg_create = NULL;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ /* Resource create */
+ action = NULL;
+ p.f_action_hit = NULL;
+ p.f_action_miss = NULL;
+ p.arg_ah = NULL;
+
+ if (ap) {
+ action = rte_table_action_create(ap->ap,
+ softnic->params.cpu_id);
+ if (action == NULL)
+ return -1;
+
+ status = rte_table_action_table_params_get(action,
+ &p);
+ if (status ||
+ ((p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry)) >
+ TABLE_RULE_ACTION_SIZE_MAX)) {
+ rte_table_action_free(action);
+ return -1;
+ }
+ }
+
+ if (params->match_type == TABLE_LPM) {
+ if (params->match.lpm.key_size == 4)
+ pp.lpm.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+
+ if (params->match.lpm.key_size == 16)
+ pp.lpm_ipv6.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+ }
+
+ status = rte_pipeline_table_create(pipeline->p,
+ &p,
+ &table_id);
+ if (status) {
+ rte_table_action_free(action);
+ return -1;
+ }
+
+ /* Pipeline */
+ table = &pipeline->table[pipeline->n_tables];
+ memcpy(&table->params, params, sizeof(*params));
+ table->ap = ap;
+ table->a = action;
+ pipeline->n_tables++;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_swq.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_swq.c
new file mode 100644
index 00000000..2083d0a9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_swq.c
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_string_fns.h>
+#include <rte_tailq.h>
+
+#include "rte_eth_softnic_internals.h"
+
+int
+softnic_swq_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->swq_list);
+
+ return 0;
+}
+
+void
+softnic_swq_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_swq *swq;
+
+ swq = TAILQ_FIRST(&p->swq_list);
+ if (swq == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->swq_list, swq, node);
+ rte_ring_free(swq->r);
+ free(swq);
+ }
+}
+
+void
+softnic_softnic_swq_free_keep_rxq_txq(struct pmd_internals *p)
+{
+ struct softnic_swq *swq, *tswq;
+
+ TAILQ_FOREACH_SAFE(swq, &p->swq_list, node, tswq) {
+ if ((strncmp(swq->name, "RXQ", strlen("RXQ")) == 0) ||
+ (strncmp(swq->name, "TXQ", strlen("TXQ")) == 0))
+ continue;
+
+ TAILQ_REMOVE(&p->swq_list, swq, node);
+ rte_ring_free(swq->r);
+ free(swq);
+ }
+}
+
+struct softnic_swq *
+softnic_swq_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_swq *swq;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(swq, &p->swq_list, node)
+ if (strcmp(swq->name, name) == 0)
+ return swq;
+
+ return NULL;
+}
+
+struct softnic_swq *
+softnic_swq_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_swq_params *params)
+{
+ char ring_name[NAME_SIZE];
+ struct softnic_swq *swq;
+ struct rte_ring *r;
+ unsigned int flags = RING_F_SP_ENQ | RING_F_SC_DEQ;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_swq_find(p, name) ||
+ params == NULL ||
+ params->size == 0)
+ return NULL;
+
+ /* Resource create */
+ snprintf(ring_name, sizeof(ring_name), "%s_%s",
+ p->params.name,
+ name);
+
+ r = rte_ring_create(ring_name,
+ params->size,
+ p->params.cpu_id,
+ flags);
+
+ if (r == NULL)
+ return NULL;
+
+ /* Node allocation */
+ swq = calloc(1, sizeof(struct softnic_swq));
+ if (swq == NULL) {
+ rte_ring_free(r);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(swq->name, name, sizeof(swq->name));
+ swq->r = r;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->swq_list, swq, node);
+
+ return swq;
+}
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tap.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tap.c
new file mode 100644
index 00000000..bcc23a9f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tap.c
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <netinet/in.h>
+#ifdef RTE_EXEC_ENV_LINUXAPP
+#include <linux/if.h>
+#include <linux/if_tun.h>
+#endif
+#include <sys/ioctl.h>
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "rte_eth_softnic_internals.h"
+
+#define TAP_DEV "/dev/net/tun"
+
+int
+softnic_tap_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->tap_list);
+
+ return 0;
+}
+
+void
+softnic_tap_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_tap *tap;
+
+ tap = TAILQ_FIRST(&p->tap_list);
+ if (tap == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->tap_list, tap, node);
+ free(tap);
+ }
+}
+
+struct softnic_tap *
+softnic_tap_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tap *tap;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(tap, &p->tap_list, node)
+ if (strcmp(tap->name, name) == 0)
+ return tap;
+
+ return NULL;
+}
+
+#ifndef RTE_EXEC_ENV_LINUXAPP
+
+struct softnic_tap *
+softnic_tap_create(struct pmd_internals *p __rte_unused,
+ const char *name __rte_unused)
+{
+ return NULL;
+}
+
+#else
+
+struct softnic_tap *
+softnic_tap_create(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tap *tap;
+ struct ifreq ifr;
+ int fd, status;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_tap_find(p, name))
+ return NULL;
+
+ /* Resource create */
+ fd = open(TAP_DEV, O_RDWR | O_NONBLOCK);
+ if (fd < 0)
+ return NULL;
+
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", name);
+
+ status = ioctl(fd, TUNSETIFF, (void *)&ifr);
+ if (status < 0) {
+ close(fd);
+ return NULL;
+ }
+
+ /* Node allocation */
+ tap = calloc(1, sizeof(struct softnic_tap));
+ if (tap == NULL) {
+ close(fd);
+ return NULL;
+ }
+ /* Node fill in */
+ strlcpy(tap->name, name, sizeof(tap->name));
+ tap->fd = fd;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->tap_list, tap, node);
+
+ return tap;
+}
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_thread.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_thread.c
new file mode 100644
index 00000000..8a150903
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_thread.c
@@ -0,0 +1,2929 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_lcore.h>
+#include <rte_ring.h>
+
+#include <rte_table_acl.h>
+#include <rte_table_array.h>
+#include <rte_table_hash.h>
+#include <rte_table_lpm.h>
+#include <rte_table_lpm_ipv6.h>
+#include "rte_eth_softnic_internals.h"
+
+/**
+ * Master thread: data plane thread init
+ */
+void
+softnic_thread_free(struct pmd_internals *softnic)
+{
+ uint32_t i;
+
+ RTE_LCORE_FOREACH_SLAVE(i) {
+ struct softnic_thread *t = &softnic->thread[i];
+
+ /* MSGQs */
+ if (t->msgq_req)
+ rte_ring_free(t->msgq_req);
+
+ if (t->msgq_rsp)
+ rte_ring_free(t->msgq_rsp);
+ }
+}
+
+int
+softnic_thread_init(struct pmd_internals *softnic)
+{
+ uint32_t i;
+
+ RTE_LCORE_FOREACH_SLAVE(i) {
+ char ring_name[NAME_MAX];
+ struct rte_ring *msgq_req, *msgq_rsp;
+ struct softnic_thread *t = &softnic->thread[i];
+ struct softnic_thread_data *t_data = &softnic->thread_data[i];
+ uint32_t cpu_id = rte_lcore_to_socket_id(i);
+
+ /* MSGQs */
+ snprintf(ring_name, sizeof(ring_name), "%s-TH%u-REQ",
+ softnic->params.name,
+ i);
+
+ msgq_req = rte_ring_create(ring_name,
+ THREAD_MSGQ_SIZE,
+ cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (msgq_req == NULL) {
+ softnic_thread_free(softnic);
+ return -1;
+ }
+
+ snprintf(ring_name, sizeof(ring_name), "%s-TH%u-RSP",
+ softnic->params.name,
+ i);
+
+ msgq_rsp = rte_ring_create(ring_name,
+ THREAD_MSGQ_SIZE,
+ cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (msgq_rsp == NULL) {
+ softnic_thread_free(softnic);
+ return -1;
+ }
+
+ /* Master thread records */
+ t->msgq_req = msgq_req;
+ t->msgq_rsp = msgq_rsp;
+ t->enabled = 1;
+
+ /* Data plane thread records */
+ t_data->n_pipelines = 0;
+ t_data->msgq_req = msgq_req;
+ t_data->msgq_rsp = msgq_rsp;
+ t_data->timer_period =
+ (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
+ t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
+ t_data->time_next_min = t_data->time_next;
+ }
+
+ return 0;
+}
+
+static inline int
+thread_is_running(uint32_t thread_id)
+{
+ enum rte_lcore_state_t thread_state;
+
+ thread_state = rte_eal_get_lcore_state(thread_id);
+ return (thread_state == RUNNING)? 1 : 0;
+}
+
+/**
+ * Pipeline is running when:
+ * (A) Pipeline is mapped to a data plane thread AND
+ * (B) Its data plane thread is in RUNNING state.
+ */
+static inline int
+pipeline_is_running(struct pipeline *p)
+{
+ if (p->enabled == 0)
+ return 0;
+
+ return thread_is_running(p->thread_id);
+}
+
+/**
+ * Master thread & data plane threads: message passing
+ */
+enum thread_req_type {
+ THREAD_REQ_PIPELINE_ENABLE = 0,
+ THREAD_REQ_PIPELINE_DISABLE,
+ THREAD_REQ_MAX
+};
+
+struct thread_msg_req {
+ enum thread_req_type type;
+
+ union {
+ struct {
+ struct rte_pipeline *p;
+ struct {
+ struct rte_table_action *a;
+ } table[RTE_PIPELINE_TABLE_MAX];
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint32_t timer_period_ms;
+ uint32_t n_tables;
+ } pipeline_enable;
+
+ struct {
+ struct rte_pipeline *p;
+ } pipeline_disable;
+ };
+};
+
+struct thread_msg_rsp {
+ int status;
+};
+
+/**
+ * Master thread
+ */
+static struct thread_msg_req *
+thread_msg_alloc(void)
+{
+ size_t size = RTE_MAX(sizeof(struct thread_msg_req),
+ sizeof(struct thread_msg_rsp));
+
+ return calloc(1, size);
+}
+
+static void
+thread_msg_free(struct thread_msg_rsp *rsp)
+{
+ free(rsp);
+}
+
+static struct thread_msg_rsp *
+thread_msg_send_recv(struct pmd_internals *softnic,
+ uint32_t thread_id,
+ struct thread_msg_req *req)
+{
+ struct softnic_thread *t = &softnic->thread[thread_id];
+ struct rte_ring *msgq_req = t->msgq_req;
+ struct rte_ring *msgq_rsp = t->msgq_rsp;
+ struct thread_msg_rsp *rsp;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(msgq_req, req);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ do {
+ status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
+ } while (status != 0);
+
+ return rsp;
+}
+
+int
+softnic_thread_pipeline_enable(struct pmd_internals *softnic,
+ uint32_t thread_id,
+ const char *pipeline_name)
+{
+ struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name);
+ struct softnic_thread *t;
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+ uint32_t i;
+ int status;
+
+ /* Check input params */
+ if ((thread_id >= RTE_MAX_LCORE) ||
+ (p == NULL) ||
+ (p->n_ports_in == 0) ||
+ (p->n_ports_out == 0) ||
+ (p->n_tables == 0))
+ return -1;
+
+ t = &softnic->thread[thread_id];
+ if ((t->enabled == 0) ||
+ p->enabled)
+ return -1;
+
+ if (!thread_is_running(thread_id)) {
+ struct softnic_thread_data *td = &softnic->thread_data[thread_id];
+ struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
+
+ if (td->n_pipelines >= THREAD_PIPELINES_MAX)
+ return -1;
+
+ /* Data plane thread */
+ td->p[td->n_pipelines] = p->p;
+
+ tdp->p = p->p;
+ for (i = 0; i < p->n_tables; i++)
+ tdp->table_data[i].a =
+ p->table[i].a;
+ tdp->n_tables = p->n_tables;
+
+ tdp->msgq_req = p->msgq_req;
+ tdp->msgq_rsp = p->msgq_rsp;
+ tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000;
+ tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
+
+ td->n_pipelines++;
+
+ /* Pipeline */
+ p->thread_id = thread_id;
+ p->enabled = 1;
+
+ return 0;
+ }
+
+ /* Allocate request */
+ req = thread_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = THREAD_REQ_PIPELINE_ENABLE;
+ req->pipeline_enable.p = p->p;
+ for (i = 0; i < p->n_tables; i++)
+ req->pipeline_enable.table[i].a =
+ p->table[i].a;
+ req->pipeline_enable.msgq_req = p->msgq_req;
+ req->pipeline_enable.msgq_rsp = p->msgq_rsp;
+ req->pipeline_enable.timer_period_ms = p->timer_period_ms;
+ req->pipeline_enable.n_tables = p->n_tables;
+
+ /* Send request and wait for response */
+ rsp = thread_msg_send_recv(softnic, thread_id, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ thread_msg_free(rsp);
+
+ /* Request completion */
+ if (status)
+ return status;
+
+ p->thread_id = thread_id;
+ p->enabled = 1;
+
+ return 0;
+}
+
+int
+softnic_thread_pipeline_disable(struct pmd_internals *softnic,
+ uint32_t thread_id,
+ const char *pipeline_name)
+{
+ struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name);
+ struct softnic_thread *t;
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((thread_id >= RTE_MAX_LCORE) ||
+ (p == NULL))
+ return -1;
+
+ t = &softnic->thread[thread_id];
+ if (t->enabled == 0)
+ return -1;
+
+ if (p->enabled == 0)
+ return 0;
+
+ if (p->thread_id != thread_id)
+ return -1;
+
+ if (!thread_is_running(thread_id)) {
+ struct softnic_thread_data *td = &softnic->thread_data[thread_id];
+ uint32_t i;
+
+ for (i = 0; i < td->n_pipelines; i++) {
+ struct pipeline_data *tdp = &td->pipeline_data[i];
+
+ if (tdp->p != p->p)
+ continue;
+
+ /* Data plane thread */
+ if (i < td->n_pipelines - 1) {
+ struct rte_pipeline *pipeline_last =
+ td->p[td->n_pipelines - 1];
+ struct pipeline_data *tdp_last =
+ &td->pipeline_data[td->n_pipelines - 1];
+
+ td->p[i] = pipeline_last;
+ memcpy(tdp, tdp_last, sizeof(*tdp));
+ }
+
+ td->n_pipelines--;
+
+ /* Pipeline */
+ p->enabled = 0;
+
+ break;
+ }
+
+ return 0;
+ }
+
+ /* Allocate request */
+ req = thread_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = THREAD_REQ_PIPELINE_DISABLE;
+ req->pipeline_disable.p = p->p;
+
+ /* Send request and wait for response */
+ rsp = thread_msg_send_recv(softnic, thread_id, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ thread_msg_free(rsp);
+
+ /* Request completion */
+ if (status)
+ return status;
+
+ p->enabled = 0;
+
+ return 0;
+}
+
+/**
+ * Data plane threads: message handling
+ */
+static inline struct thread_msg_req *
+thread_msg_recv(struct rte_ring *msgq_req)
+{
+ struct thread_msg_req *req;
+
+ int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
+
+ if (status != 0)
+ return NULL;
+
+ return req;
+}
+
+static inline void
+thread_msg_send(struct rte_ring *msgq_rsp,
+ struct thread_msg_rsp *rsp)
+{
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(msgq_rsp, rsp);
+ } while (status == -ENOBUFS);
+}
+
+static struct thread_msg_rsp *
+thread_msg_handle_pipeline_enable(struct softnic_thread_data *t,
+ struct thread_msg_req *req)
+{
+ struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req;
+ struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
+ uint32_t i;
+
+ /* Request */
+ if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ t->p[t->n_pipelines] = req->pipeline_enable.p;
+
+ p->p = req->pipeline_enable.p;
+ for (i = 0; i < req->pipeline_enable.n_tables; i++)
+ p->table_data[i].a =
+ req->pipeline_enable.table[i].a;
+
+ p->n_tables = req->pipeline_enable.n_tables;
+
+ p->msgq_req = req->pipeline_enable.msgq_req;
+ p->msgq_rsp = req->pipeline_enable.msgq_rsp;
+ p->timer_period =
+ (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
+ p->time_next = rte_get_tsc_cycles() + p->timer_period;
+
+ t->n_pipelines++;
+
+ /* Response */
+ rsp->status = 0;
+ return rsp;
+}
+
+static struct thread_msg_rsp *
+thread_msg_handle_pipeline_disable(struct softnic_thread_data *t,
+ struct thread_msg_req *req)
+{
+ struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req;
+ uint32_t n_pipelines = t->n_pipelines;
+ struct rte_pipeline *pipeline = req->pipeline_disable.p;
+ uint32_t i;
+
+ /* find pipeline */
+ for (i = 0; i < n_pipelines; i++) {
+ struct pipeline_data *p = &t->pipeline_data[i];
+
+ if (p->p != pipeline)
+ continue;
+
+ if (i < n_pipelines - 1) {
+ struct rte_pipeline *pipeline_last =
+ t->p[n_pipelines - 1];
+ struct pipeline_data *p_last =
+ &t->pipeline_data[n_pipelines - 1];
+
+ t->p[i] = pipeline_last;
+ memcpy(p, p_last, sizeof(*p));
+ }
+
+ t->n_pipelines--;
+
+ rsp->status = 0;
+ return rsp;
+ }
+
+ /* should not get here */
+ rsp->status = 0;
+ return rsp;
+}
+
+static void
+thread_msg_handle(struct softnic_thread_data *t)
+{
+ for ( ; ; ) {
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+
+ req = thread_msg_recv(t->msgq_req);
+ if (req == NULL)
+ break;
+
+ switch (req->type) {
+ case THREAD_REQ_PIPELINE_ENABLE:
+ rsp = thread_msg_handle_pipeline_enable(t, req);
+ break;
+
+ case THREAD_REQ_PIPELINE_DISABLE:
+ rsp = thread_msg_handle_pipeline_disable(t, req);
+ break;
+
+ default:
+ rsp = (struct thread_msg_rsp *)req;
+ rsp->status = -1;
+ }
+
+ thread_msg_send(t->msgq_rsp, rsp);
+ }
+}
+
+/**
+ * Master thread & data plane threads: message passing
+ */
+enum pipeline_req_type {
+ /* Port IN */
+ PIPELINE_REQ_PORT_IN_STATS_READ,
+ PIPELINE_REQ_PORT_IN_ENABLE,
+ PIPELINE_REQ_PORT_IN_DISABLE,
+
+ /* Port OUT */
+ PIPELINE_REQ_PORT_OUT_STATS_READ,
+
+ /* Table */
+ PIPELINE_REQ_TABLE_STATS_READ,
+ PIPELINE_REQ_TABLE_RULE_ADD,
+ PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
+ PIPELINE_REQ_TABLE_RULE_ADD_BULK,
+ PIPELINE_REQ_TABLE_RULE_DELETE,
+ PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
+ PIPELINE_REQ_TABLE_RULE_STATS_READ,
+ PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
+ PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
+ PIPELINE_REQ_TABLE_RULE_MTR_READ,
+ PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE,
+ PIPELINE_REQ_TABLE_RULE_TTL_READ,
+ PIPELINE_REQ_MAX
+};
+
+struct pipeline_msg_req_port_in_stats_read {
+ int clear;
+};
+
+struct pipeline_msg_req_port_out_stats_read {
+ int clear;
+};
+
+struct pipeline_msg_req_table_stats_read {
+ int clear;
+};
+
+struct pipeline_msg_req_table_rule_add {
+ struct softnic_table_rule_match match;
+ struct softnic_table_rule_action action;
+};
+
+struct pipeline_msg_req_table_rule_add_default {
+ struct softnic_table_rule_action action;
+};
+
+struct pipeline_msg_req_table_rule_add_bulk {
+ struct softnic_table_rule_match *match;
+ struct softnic_table_rule_action *action;
+ void **data;
+ uint32_t n_rules;
+ int bulk;
+};
+
+struct pipeline_msg_req_table_rule_delete {
+ struct softnic_table_rule_match match;
+};
+
+struct pipeline_msg_req_table_rule_stats_read {
+ void *data;
+ int clear;
+};
+
+struct pipeline_msg_req_table_mtr_profile_add {
+ uint32_t meter_profile_id;
+ struct rte_table_action_meter_profile profile;
+};
+
+struct pipeline_msg_req_table_mtr_profile_delete {
+ uint32_t meter_profile_id;
+};
+
+struct pipeline_msg_req_table_rule_mtr_read {
+ void *data;
+ uint32_t tc_mask;
+ int clear;
+};
+
+struct pipeline_msg_req_table_dscp_table_update {
+ uint64_t dscp_mask;
+ struct rte_table_action_dscp_table dscp_table;
+};
+
+struct pipeline_msg_req_table_rule_ttl_read {
+ void *data;
+ int clear;
+};
+
+struct pipeline_msg_req {
+ enum pipeline_req_type type;
+ uint32_t id; /* Port IN, port OUT or table ID */
+
+ RTE_STD_C11
+ union {
+ struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
+ struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
+ struct pipeline_msg_req_table_stats_read table_stats_read;
+ struct pipeline_msg_req_table_rule_add table_rule_add;
+ struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
+ struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
+ struct pipeline_msg_req_table_rule_delete table_rule_delete;
+ struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
+ struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
+ struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
+ struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read;
+ struct pipeline_msg_req_table_dscp_table_update table_dscp_table_update;
+ struct pipeline_msg_req_table_rule_ttl_read table_rule_ttl_read;
+ };
+};
+
+struct pipeline_msg_rsp_port_in_stats_read {
+ struct rte_pipeline_port_in_stats stats;
+};
+
+struct pipeline_msg_rsp_port_out_stats_read {
+ struct rte_pipeline_port_out_stats stats;
+};
+
+struct pipeline_msg_rsp_table_stats_read {
+ struct rte_pipeline_table_stats stats;
+};
+
+struct pipeline_msg_rsp_table_rule_add {
+ void *data;
+};
+
+struct pipeline_msg_rsp_table_rule_add_default {
+ void *data;
+};
+
+struct pipeline_msg_rsp_table_rule_add_bulk {
+ uint32_t n_rules;
+};
+
+struct pipeline_msg_rsp_table_rule_stats_read {
+ struct rte_table_action_stats_counters stats;
+};
+
+struct pipeline_msg_rsp_table_rule_mtr_read {
+ struct rte_table_action_mtr_counters stats;
+};
+
+struct pipeline_msg_rsp_table_rule_ttl_read {
+ struct rte_table_action_ttl_counters stats;
+};
+
+struct pipeline_msg_rsp {
+ int status;
+
+ RTE_STD_C11
+ union {
+ struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
+ struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
+ struct pipeline_msg_rsp_table_stats_read table_stats_read;
+ struct pipeline_msg_rsp_table_rule_add table_rule_add;
+ struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
+ struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
+ struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
+ struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read;
+ struct pipeline_msg_rsp_table_rule_ttl_read table_rule_ttl_read;
+ };
+};
+
+/**
+ * Master thread
+ */
+static struct pipeline_msg_req *
+pipeline_msg_alloc(void)
+{
+ size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
+ sizeof(struct pipeline_msg_rsp));
+
+ return calloc(1, size);
+}
+
+static void
+pipeline_msg_free(struct pipeline_msg_rsp *rsp)
+{
+ free(rsp);
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_send_recv(struct pipeline *p,
+ struct pipeline_msg_req *req)
+{
+ struct rte_ring *msgq_req = p->msgq_req;
+ struct rte_ring *msgq_rsp = p->msgq_rsp;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(msgq_req, req);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ do {
+ status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
+ } while (status != 0);
+
+ return rsp;
+}
+
+int
+softnic_pipeline_port_in_stats_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ port_id >= p->n_ports_in)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_in_stats_read(p->p,
+ port_id,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
+ req->id = port_id;
+ req->port_in_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_port_in_enable(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ port_id >= p->n_ports_in)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_in_enable(p->p, port_id);
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_IN_ENABLE;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_port_in_disable(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ port_id >= p->n_ports_in)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_in_disable(p->p, port_id);
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_IN_DISABLE;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_port_out_stats_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ port_id >= p->n_ports_out)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_out_stats_read(p->p,
+ port_id,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
+ req->id = port_id;
+ req->port_out_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_stats_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_table_stats_read(p->p,
+ table_id,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_STATS_READ;
+ req->id = table_id;
+ req->table_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+static int
+match_check(struct softnic_table_rule_match *match,
+ struct pipeline *p,
+ uint32_t table_id)
+{
+ struct softnic_table *table;
+
+ if (match == NULL ||
+ p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ table = &p->table[table_id];
+ if (match->match_type != table->params.match_type)
+ return -1;
+
+ switch (match->match_type) {
+ case TABLE_ACL:
+ {
+ struct softnic_table_acl_params *t = &table->params.match.acl;
+ struct softnic_table_rule_match_acl *r = &match->match.acl;
+
+ if ((r->ip_version && (t->ip_version == 0)) ||
+ ((r->ip_version == 0) && t->ip_version))
+ return -1;
+
+ if (r->ip_version) {
+ if (r->sa_depth > 32 ||
+ r->da_depth > 32)
+ return -1;
+ } else {
+ if (r->sa_depth > 128 ||
+ r->da_depth > 128)
+ return -1;
+ }
+ return 0;
+ }
+
+ case TABLE_ARRAY:
+ return 0;
+
+ case TABLE_HASH:
+ return 0;
+
+ case TABLE_LPM:
+ {
+ struct softnic_table_lpm_params *t = &table->params.match.lpm;
+ struct softnic_table_rule_match_lpm *r = &match->match.lpm;
+
+ if ((r->ip_version && (t->key_size != 4)) ||
+ ((r->ip_version == 0) && (t->key_size != 16)))
+ return -1;
+
+ if (r->ip_version) {
+ if (r->depth > 32)
+ return -1;
+ } else {
+ if (r->depth > 128)
+ return -1;
+ }
+ return 0;
+ }
+
+ case TABLE_STUB:
+ return -1;
+
+ default:
+ return -1;
+ }
+}
+
+static int
+action_check(struct softnic_table_rule_action *action,
+ struct pipeline *p,
+ uint32_t table_id)
+{
+ struct softnic_table_action_profile *ap;
+
+ if (action == NULL ||
+ p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ ap = p->table[table_id].ap;
+ if (action->action_mask != ap->params.action_mask)
+ return -1;
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT &&
+ action->fwd.id >= p->n_ports_out)
+ return -1;
+
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE &&
+ action->fwd.id >= p->n_tables)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
+ uint32_t tc_mask1 = action->mtr.tc_mask;
+
+ if (tc_mask1 != tc_mask0)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ uint32_t n_subports_per_port =
+ ap->params.tm.n_subports_per_port;
+ uint32_t n_pipes_per_subport =
+ ap->params.tm.n_pipes_per_subport;
+ uint32_t subport_id = action->tm.subport_id;
+ uint32_t pipe_id = action->tm.pipe_id;
+
+ if (subport_id >= n_subports_per_port ||
+ pipe_id >= n_pipes_per_subport)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ uint64_t encap_mask = ap->params.encap.encap_mask;
+ enum rte_table_action_encap_type type = action->encap.type;
+
+ if ((encap_mask & (1LLU << type)) == 0)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ int ip_version0 = ap->params.common.ip_version;
+ int ip_version1 = action->nat.ip_version;
+
+ if ((ip_version1 && (ip_version0 == 0)) ||
+ ((ip_version1 == 0) && ip_version0))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+action_default_check(struct softnic_table_rule_action *action,
+ struct pipeline *p,
+ uint32_t table_id)
+{
+ if (action == NULL ||
+ action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD) ||
+ p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT &&
+ action->fwd.id >= p->n_ports_out)
+ return -1;
+
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE &&
+ action->fwd.id >= p->n_tables)
+ return -1;
+ }
+
+ return 0;
+}
+
+union table_rule_match_low_level {
+ struct rte_table_acl_rule_add_params acl_add;
+ struct rte_table_acl_rule_delete_params acl_delete;
+ struct rte_table_array_key array;
+ uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
+ struct rte_table_lpm_key lpm_ipv4;
+ struct rte_table_lpm_ipv6_key lpm_ipv6;
+};
+
+static int
+match_convert(struct softnic_table_rule_match *mh,
+ union table_rule_match_low_level *ml,
+ int add);
+
+static int
+action_convert(struct rte_table_action *a,
+ struct softnic_table_rule_action *action,
+ struct rte_pipeline_table_entry *data);
+
+int
+softnic_pipeline_table_rule_add(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match,
+ struct softnic_table_rule_action *action,
+ void **data)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ match == NULL ||
+ action == NULL ||
+ data == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables ||
+ match_check(match, p, table_id) ||
+ action_check(action, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+ union table_rule_match_low_level match_ll;
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ int key_found;
+ uint8_t *buffer;
+
+ buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
+ if (buffer == NULL)
+ return -1;
+
+ /* Table match-action rule conversion */
+ data_in = (struct rte_pipeline_table_entry *)buffer;
+
+ status = match_convert(match, &match_ll, 1);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ status = action_convert(a, action, data_in);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ /* Add rule (match, action) to table */
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ &match_ll,
+ data_in,
+ &key_found,
+ &data_out);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ /* Write Response */
+ *data = data_out;
+
+ free(buffer);
+ return 0;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_ADD;
+ req->id = table_id;
+ memcpy(&req->table_rule_add.match, match, sizeof(*match));
+ memcpy(&req->table_rule_add.action, action, sizeof(*action));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status == 0)
+ *data = rsp->table_rule_add.data;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_add_default(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_action *action,
+ void **data)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ action == NULL ||
+ data == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables ||
+ action_default_check(action, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ uint8_t *buffer;
+
+ buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
+ if (buffer == NULL)
+ return -1;
+
+ /* Apply actions */
+ data_in = (struct rte_pipeline_table_entry *)buffer;
+
+ data_in->action = action->fwd.action;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
+ data_in->port_id = action->fwd.id;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
+ data_in->table_id = action->fwd.id;
+
+ /* Add default rule to table */
+ status = rte_pipeline_table_default_entry_add(p->p,
+ table_id,
+ data_in,
+ &data_out);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ /* Write Response */
+ *data = data_out;
+
+ free(buffer);
+ return 0;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
+ req->id = table_id;
+ memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status == 0)
+ *data = rsp->table_rule_add_default.data;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_add_bulk(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match,
+ struct softnic_table_rule_action *action,
+ void **data,
+ uint32_t *n_rules)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ uint32_t i;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ match == NULL ||
+ action == NULL ||
+ data == NULL ||
+ n_rules == NULL ||
+ (*n_rules == 0))
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ for (i = 0; i < *n_rules; i++)
+ if (match_check(match, p, table_id) ||
+ action_check(action, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+ union table_rule_match_low_level *match_ll;
+ uint8_t *action_ll;
+ void **match_ll_ptr;
+ struct rte_pipeline_table_entry **action_ll_ptr;
+ struct rte_pipeline_table_entry **entries_ptr =
+ (struct rte_pipeline_table_entry **)data;
+ uint32_t bulk =
+ (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
+ int *found;
+
+ /* Memory allocation */
+ match_ll = calloc(*n_rules, sizeof(union table_rule_match_low_level));
+ action_ll = calloc(*n_rules, TABLE_RULE_ACTION_SIZE_MAX);
+ match_ll_ptr = calloc(*n_rules, sizeof(void *));
+ action_ll_ptr =
+ calloc(*n_rules, sizeof(struct rte_pipeline_table_entry *));
+ found = calloc(*n_rules, sizeof(int));
+
+ if (match_ll == NULL ||
+ action_ll == NULL ||
+ match_ll_ptr == NULL ||
+ action_ll_ptr == NULL ||
+ found == NULL)
+ goto fail;
+
+ for (i = 0; i < *n_rules; i++) {
+ match_ll_ptr[i] = (void *)&match_ll[i];
+ action_ll_ptr[i] =
+ (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
+ }
+
+ /* Rule match conversion */
+ for (i = 0; i < *n_rules; i++) {
+ status = match_convert(&match[i], match_ll_ptr[i], 1);
+ if (status)
+ goto fail;
+ }
+
+ /* Rule action conversion */
+ for (i = 0; i < *n_rules; i++) {
+ status = action_convert(a, &action[i], action_ll_ptr[i]);
+ if (status)
+ goto fail;
+ }
+
+ /* Add rule (match, action) to table */
+ if (bulk) {
+ status = rte_pipeline_table_entry_add_bulk(p->p,
+ table_id,
+ match_ll_ptr,
+ action_ll_ptr,
+ *n_rules,
+ found,
+ entries_ptr);
+ if (status)
+ *n_rules = 0;
+ } else {
+ for (i = 0; i < *n_rules; i++) {
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ match_ll_ptr[i],
+ action_ll_ptr[i],
+ &found[i],
+ &entries_ptr[i]);
+ if (status) {
+ *n_rules = i;
+ break;
+ }
+ }
+ }
+
+ /* Free */
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ return status;
+
+fail:
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ *n_rules = 0;
+ return -1;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
+ req->id = table_id;
+ req->table_rule_add_bulk.match = match;
+ req->table_rule_add_bulk.action = action;
+ req->table_rule_add_bulk.data = data;
+ req->table_rule_add_bulk.n_rules = *n_rules;
+ req->table_rule_add_bulk.bulk =
+ (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status == 0)
+ *n_rules = rsp->table_rule_add_bulk.n_rules;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_delete(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ match == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables ||
+ match_check(match, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ union table_rule_match_low_level match_ll;
+ int key_found;
+
+ status = match_convert(match, &match_ll, 0);
+ if (status)
+ return -1;
+
+ status = rte_pipeline_table_entry_delete(p->p,
+ table_id,
+ &match_ll,
+ &key_found,
+ NULL);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
+ req->id = table_id;
+ memcpy(&req->table_rule_delete.match, match, sizeof(*match));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_delete_default(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_table_default_entry_delete(p->p,
+ table_id,
+ NULL);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
+ req->id = table_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_stats_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_stats_counters *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ data == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_stats_read(a,
+ data,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
+ req->id = table_id;
+ req->table_rule_stats_read.data = data;
+ req->table_rule_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_mtr_profile_add(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id,
+ struct rte_table_action_meter_profile *profile)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ profile == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_meter_profile_add(a,
+ meter_profile_id,
+ profile);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
+ req->id = table_id;
+ req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
+ memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_mtr_profile_delete(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_meter_profile_delete(a,
+ meter_profile_id);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
+ req->id = table_id;
+ req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_mtr_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ uint32_t tc_mask,
+ struct rte_table_action_mtr_counters *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ data == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_meter_read(a,
+ data,
+ tc_mask,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ;
+ req->id = table_id;
+ req->table_rule_mtr_read.data = data;
+ req->table_rule_mtr_read.tc_mask = tc_mask;
+ req->table_rule_mtr_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_dscp_table_update(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint64_t dscp_mask,
+ struct rte_table_action_dscp_table *dscp_table)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ dscp_table == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_dscp_table_update(a,
+ dscp_mask,
+ dscp_table);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE;
+ req->id = table_id;
+ req->table_dscp_table_update.dscp_mask = dscp_mask;
+ memcpy(&req->table_dscp_table_update.dscp_table,
+ dscp_table, sizeof(*dscp_table));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_ttl_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_ttl_counters *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ data == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_ttl_read(a,
+ data,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_TTL_READ;
+ req->id = table_id;
+ req->table_rule_ttl_read.data = data;
+ req->table_rule_ttl_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_rule_ttl_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+/**
+ * Data plane threads: message handling
+ */
+static inline struct pipeline_msg_req *
+pipeline_msg_recv(struct rte_ring *msgq_req)
+{
+ struct pipeline_msg_req *req;
+
+ int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
+
+ if (status != 0)
+ return NULL;
+
+ return req;
+}
+
+static inline void
+pipeline_msg_send(struct rte_ring *msgq_rsp,
+ struct pipeline_msg_rsp *rsp)
+{
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(msgq_rsp, rsp);
+ } while (status == -ENOBUFS);
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+ int clear = req->port_in_stats_read.clear;
+
+ rsp->status = rte_pipeline_port_in_stats_read(p->p,
+ port_id,
+ &rsp->port_in_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+
+ rsp->status = rte_pipeline_port_in_enable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+
+ rsp->status = rte_pipeline_port_in_disable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+ int clear = req->port_out_stats_read.clear;
+
+ rsp->status = rte_pipeline_port_out_stats_read(p->p,
+ port_id,
+ &rsp->port_out_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+ int clear = req->table_stats_read.clear;
+
+ rsp->status = rte_pipeline_table_stats_read(p->p,
+ port_id,
+ &rsp->table_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static int
+match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
+{
+ if (depth > 128)
+ return -1;
+
+ switch (depth / 32) {
+ case 0:
+ depth32[0] = depth;
+ depth32[1] = 0;
+ depth32[2] = 0;
+ depth32[3] = 0;
+ return 0;
+
+ case 1:
+ depth32[0] = 32;
+ depth32[1] = depth - 32;
+ depth32[2] = 0;
+ depth32[3] = 0;
+ return 0;
+
+ case 2:
+ depth32[0] = 32;
+ depth32[1] = 32;
+ depth32[2] = depth - 64;
+ depth32[3] = 0;
+ return 0;
+
+ case 3:
+ depth32[0] = 32;
+ depth32[1] = 32;
+ depth32[2] = 32;
+ depth32[3] = depth - 96;
+ return 0;
+
+ case 4:
+ depth32[0] = 32;
+ depth32[1] = 32;
+ depth32[2] = 32;
+ depth32[3] = 32;
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+static int
+match_convert(struct softnic_table_rule_match *mh,
+ union table_rule_match_low_level *ml,
+ int add)
+{
+ memset(ml, 0, sizeof(*ml));
+
+ switch (mh->match_type) {
+ case TABLE_ACL:
+ if (mh->match.acl.ip_version)
+ if (add) {
+ ml->acl_add.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_add.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_add.field_value[1].value.u32 =
+ mh->match.acl.ipv4.sa;
+ ml->acl_add.field_value[1].mask_range.u32 =
+ mh->match.acl.sa_depth;
+
+ ml->acl_add.field_value[2].value.u32 =
+ mh->match.acl.ipv4.da;
+ ml->acl_add.field_value[2].mask_range.u32 =
+ mh->match.acl.da_depth;
+
+ ml->acl_add.field_value[3].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_add.field_value[3].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_add.field_value[4].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_add.field_value[4].mask_range.u16 =
+ mh->match.acl.dp1;
+
+ ml->acl_add.priority =
+ (int32_t)mh->match.acl.priority;
+ } else {
+ ml->acl_delete.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_delete.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_delete.field_value[1].value.u32 =
+ mh->match.acl.ipv4.sa;
+ ml->acl_delete.field_value[1].mask_range.u32 =
+ mh->match.acl.sa_depth;
+
+ ml->acl_delete.field_value[2].value.u32 =
+ mh->match.acl.ipv4.da;
+ ml->acl_delete.field_value[2].mask_range.u32 =
+ mh->match.acl.da_depth;
+
+ ml->acl_delete.field_value[3].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_delete.field_value[3].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_delete.field_value[4].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_delete.field_value[4].mask_range.u16 =
+ mh->match.acl.dp1;
+ }
+ else
+ if (add) {
+ uint32_t *sa32 =
+ (uint32_t *)mh->match.acl.ipv6.sa;
+ uint32_t *da32 =
+ (uint32_t *)mh->match.acl.ipv6.da;
+ uint32_t sa32_depth[4], da32_depth[4];
+ int status;
+
+ status = match_convert_ipv6_depth(mh->match.acl.sa_depth,
+ sa32_depth);
+ if (status)
+ return status;
+
+ status = match_convert_ipv6_depth(
+ mh->match.acl.da_depth,
+ da32_depth);
+ if (status)
+ return status;
+
+ ml->acl_add.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_add.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_add.field_value[1].value.u32 = sa32[0];
+ ml->acl_add.field_value[1].mask_range.u32 =
+ sa32_depth[0];
+ ml->acl_add.field_value[2].value.u32 = sa32[1];
+ ml->acl_add.field_value[2].mask_range.u32 =
+ sa32_depth[1];
+ ml->acl_add.field_value[3].value.u32 = sa32[2];
+ ml->acl_add.field_value[3].mask_range.u32 =
+ sa32_depth[2];
+ ml->acl_add.field_value[4].value.u32 = sa32[3];
+ ml->acl_add.field_value[4].mask_range.u32 =
+ sa32_depth[3];
+
+ ml->acl_add.field_value[5].value.u32 = da32[0];
+ ml->acl_add.field_value[5].mask_range.u32 =
+ da32_depth[0];
+ ml->acl_add.field_value[6].value.u32 = da32[1];
+ ml->acl_add.field_value[6].mask_range.u32 =
+ da32_depth[1];
+ ml->acl_add.field_value[7].value.u32 = da32[2];
+ ml->acl_add.field_value[7].mask_range.u32 =
+ da32_depth[2];
+ ml->acl_add.field_value[8].value.u32 = da32[3];
+ ml->acl_add.field_value[8].mask_range.u32 =
+ da32_depth[3];
+
+ ml->acl_add.field_value[9].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_add.field_value[9].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_add.field_value[10].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_add.field_value[10].mask_range.u16 =
+ mh->match.acl.dp1;
+
+ ml->acl_add.priority =
+ (int32_t)mh->match.acl.priority;
+ } else {
+ uint32_t *sa32 =
+ (uint32_t *)mh->match.acl.ipv6.sa;
+ uint32_t *da32 =
+ (uint32_t *)mh->match.acl.ipv6.da;
+ uint32_t sa32_depth[4], da32_depth[4];
+ int status;
+
+ status = match_convert_ipv6_depth(mh->match.acl.sa_depth,
+ sa32_depth);
+ if (status)
+ return status;
+
+ status = match_convert_ipv6_depth(mh->match.acl.da_depth,
+ da32_depth);
+ if (status)
+ return status;
+
+ ml->acl_delete.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_delete.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_delete.field_value[1].value.u32 =
+ sa32[0];
+ ml->acl_delete.field_value[1].mask_range.u32 =
+ sa32_depth[0];
+ ml->acl_delete.field_value[2].value.u32 =
+ sa32[1];
+ ml->acl_delete.field_value[2].mask_range.u32 =
+ sa32_depth[1];
+ ml->acl_delete.field_value[3].value.u32 =
+ sa32[2];
+ ml->acl_delete.field_value[3].mask_range.u32 =
+ sa32_depth[2];
+ ml->acl_delete.field_value[4].value.u32 =
+ sa32[3];
+ ml->acl_delete.field_value[4].mask_range.u32 =
+ sa32_depth[3];
+
+ ml->acl_delete.field_value[5].value.u32 =
+ da32[0];
+ ml->acl_delete.field_value[5].mask_range.u32 =
+ da32_depth[0];
+ ml->acl_delete.field_value[6].value.u32 =
+ da32[1];
+ ml->acl_delete.field_value[6].mask_range.u32 =
+ da32_depth[1];
+ ml->acl_delete.field_value[7].value.u32 =
+ da32[2];
+ ml->acl_delete.field_value[7].mask_range.u32 =
+ da32_depth[2];
+ ml->acl_delete.field_value[8].value.u32 =
+ da32[3];
+ ml->acl_delete.field_value[8].mask_range.u32 =
+ da32_depth[3];
+
+ ml->acl_delete.field_value[9].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_delete.field_value[9].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_delete.field_value[10].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_delete.field_value[10].mask_range.u16 =
+ mh->match.acl.dp1;
+ }
+ return 0;
+
+ case TABLE_ARRAY:
+ ml->array.pos = mh->match.array.pos;
+ return 0;
+
+ case TABLE_HASH:
+ memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
+ return 0;
+
+ case TABLE_LPM:
+ if (mh->match.lpm.ip_version) {
+ ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
+ ml->lpm_ipv4.depth = mh->match.lpm.depth;
+ } else {
+ memcpy(ml->lpm_ipv6.ip,
+ mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
+ ml->lpm_ipv6.depth = mh->match.lpm.depth;
+ }
+
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+static int
+action_convert(struct rte_table_action *a,
+ struct softnic_table_rule_action *action,
+ struct rte_pipeline_table_entry *data)
+{
+ int status;
+
+ /* Apply actions */
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_FWD,
+ &action->fwd);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_LB,
+ &action->lb);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_MTR,
+ &action->mtr);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TM,
+ &action->tm);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_ENCAP,
+ &action->encap);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_NAT,
+ &action->nat);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TTL,
+ &action->ttl);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_STATS,
+ &action->stats);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TIME,
+ &action->time);
+
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ union table_rule_match_low_level match_ll;
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ struct softnic_table_rule_match *match = &req->table_rule_add.match;
+ struct softnic_table_rule_action *action = &req->table_rule_add.action;
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ uint32_t table_id = req->id;
+ int key_found, status;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ /* Apply actions */
+ memset(p->buffer, 0, sizeof(p->buffer));
+ data_in = (struct rte_pipeline_table_entry *)p->buffer;
+
+ status = match_convert(match, &match_ll, 1);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ status = action_convert(a, action, data_in);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ &match_ll,
+ data_in,
+ &key_found,
+ &data_out);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ /* Write response */
+ rsp->status = 0;
+ rsp->table_rule_add.data = data_out;
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ struct softnic_table_rule_action *action = &req->table_rule_add_default.action;
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ uint32_t table_id = req->id;
+ int status;
+
+ /* Apply actions */
+ memset(p->buffer, 0, sizeof(p->buffer));
+ data_in = (struct rte_pipeline_table_entry *)p->buffer;
+
+ data_in->action = action->fwd.action;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
+ data_in->port_id = action->fwd.id;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
+ data_in->table_id = action->fwd.id;
+
+ /* Add default rule to table */
+ status = rte_pipeline_table_default_entry_add(p->p,
+ table_id,
+ data_in,
+ &data_out);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ /* Write response */
+ rsp->status = 0;
+ rsp->table_rule_add_default.data = data_out;
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+
+ uint32_t table_id = req->id;
+ struct softnic_table_rule_match *match = req->table_rule_add_bulk.match;
+ struct softnic_table_rule_action *action = req->table_rule_add_bulk.action;
+ struct rte_pipeline_table_entry **data =
+ (struct rte_pipeline_table_entry **)req->table_rule_add_bulk.data;
+ uint32_t n_rules = req->table_rule_add_bulk.n_rules;
+ uint32_t bulk = req->table_rule_add_bulk.bulk;
+
+ struct rte_table_action *a = p->table_data[table_id].a;
+ union table_rule_match_low_level *match_ll;
+ uint8_t *action_ll;
+ void **match_ll_ptr;
+ struct rte_pipeline_table_entry **action_ll_ptr;
+ int *found, status;
+ uint32_t i;
+
+ /* Memory allocation */
+ match_ll = calloc(n_rules, sizeof(union table_rule_match_low_level));
+ action_ll = calloc(n_rules, TABLE_RULE_ACTION_SIZE_MAX);
+ match_ll_ptr = calloc(n_rules, sizeof(void *));
+ action_ll_ptr =
+ calloc(n_rules, sizeof(struct rte_pipeline_table_entry *));
+ found = calloc(n_rules, sizeof(int));
+
+ if (match_ll == NULL ||
+ action_ll == NULL ||
+ match_ll_ptr == NULL ||
+ action_ll_ptr == NULL ||
+ found == NULL)
+ goto fail;
+
+ for (i = 0; i < n_rules; i++) {
+ match_ll_ptr[i] = (void *)&match_ll[i];
+ action_ll_ptr[i] =
+ (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
+ }
+
+ /* Rule match conversion */
+ for (i = 0; i < n_rules; i++) {
+ status = match_convert(&match[i], match_ll_ptr[i], 1);
+ if (status)
+ goto fail;
+ }
+
+ /* Rule action conversion */
+ for (i = 0; i < n_rules; i++) {
+ status = action_convert(a, &action[i], action_ll_ptr[i]);
+ if (status)
+ goto fail;
+ }
+
+ /* Add rule (match, action) to table */
+ if (bulk) {
+ status = rte_pipeline_table_entry_add_bulk(p->p,
+ table_id,
+ match_ll_ptr,
+ action_ll_ptr,
+ n_rules,
+ found,
+ data);
+ if (status)
+ n_rules = 0;
+ } else {
+ for (i = 0; i < n_rules; i++) {
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ match_ll_ptr[i],
+ action_ll_ptr[i],
+ &found[i],
+ &data[i]);
+ if (status) {
+ n_rules = i;
+ break;
+ }
+ }
+ }
+
+ /* Write response */
+ rsp->status = 0;
+ rsp->table_rule_add_bulk.n_rules = n_rules;
+
+ /* Free */
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ return rsp;
+
+fail:
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ rsp->status = -1;
+ rsp->table_rule_add_bulk.n_rules = 0;
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ union table_rule_match_low_level match_ll;
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ struct softnic_table_rule_match *match = &req->table_rule_delete.match;
+ uint32_t table_id = req->id;
+ int key_found, status;
+
+ status = match_convert(match, &match_ll, 0);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ rsp->status = rte_pipeline_table_entry_delete(p->p,
+ table_id,
+ &match_ll,
+ &key_found,
+ NULL);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+
+ rsp->status = rte_pipeline_table_default_entry_delete(p->p,
+ table_id,
+ NULL);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ void *data = req->table_rule_stats_read.data;
+ int clear = req->table_rule_stats_read.clear;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_stats_read(a,
+ data,
+ &rsp->table_rule_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
+ struct rte_table_action_meter_profile *profile =
+ &req->table_mtr_profile_add.profile;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_meter_profile_add(a,
+ meter_profile_id,
+ profile);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ uint32_t meter_profile_id =
+ req->table_mtr_profile_delete.meter_profile_id;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_meter_profile_delete(a,
+ meter_profile_id);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ void *data = req->table_rule_mtr_read.data;
+ uint32_t tc_mask = req->table_rule_mtr_read.tc_mask;
+ int clear = req->table_rule_mtr_read.clear;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_meter_read(a,
+ data,
+ tc_mask,
+ &rsp->table_rule_mtr_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_dscp_table_update(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ uint64_t dscp_mask = req->table_dscp_table_update.dscp_mask;
+ struct rte_table_action_dscp_table *dscp_table =
+ &req->table_dscp_table_update.dscp_table;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_dscp_table_update(a,
+ dscp_mask,
+ dscp_table);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_ttl_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ void *data = req->table_rule_ttl_read.data;
+ int clear = req->table_rule_ttl_read.clear;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_ttl_read(a,
+ data,
+ &rsp->table_rule_ttl_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static void
+pipeline_msg_handle(struct pipeline_data *p)
+{
+ for ( ; ; ) {
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+
+ req = pipeline_msg_recv(p->msgq_req);
+ if (req == NULL)
+ break;
+
+ switch (req->type) {
+ case PIPELINE_REQ_PORT_IN_STATS_READ:
+ rsp = pipeline_msg_handle_port_in_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_PORT_IN_ENABLE:
+ rsp = pipeline_msg_handle_port_in_enable(p, req);
+ break;
+
+ case PIPELINE_REQ_PORT_IN_DISABLE:
+ rsp = pipeline_msg_handle_port_in_disable(p, req);
+ break;
+
+ case PIPELINE_REQ_PORT_OUT_STATS_READ:
+ rsp = pipeline_msg_handle_port_out_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_STATS_READ:
+ rsp = pipeline_msg_handle_table_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_ADD:
+ rsp = pipeline_msg_handle_table_rule_add(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
+ rsp = pipeline_msg_handle_table_rule_add_default(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
+ rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_DELETE:
+ rsp = pipeline_msg_handle_table_rule_delete(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
+ rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_STATS_READ:
+ rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
+ rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
+ rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_MTR_READ:
+ rsp = pipeline_msg_handle_table_rule_mtr_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE:
+ rsp = pipeline_msg_handle_table_dscp_table_update(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_TTL_READ:
+ rsp = pipeline_msg_handle_table_rule_ttl_read(p, req);
+ break;
+
+ default:
+ rsp = (struct pipeline_msg_rsp *)req;
+ rsp->status = -1;
+ }
+
+ pipeline_msg_send(p->msgq_rsp, rsp);
+ }
+}
+
+/**
+ * Data plane threads: main
+ */
+int
+rte_pmd_softnic_run(uint16_t port_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct pmd_internals *softnic;
+ struct softnic_thread_data *t;
+ uint32_t thread_id, j;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+#endif
+
+ softnic = dev->data->dev_private;
+ thread_id = rte_lcore_id();
+ t = &softnic->thread_data[thread_id];
+ t->iter++;
+
+ /* Data Plane */
+ for (j = 0; j < t->n_pipelines; j++)
+ rte_pipeline_run(t->p[j]);
+
+ /* Control Plane */
+ if ((t->iter & 0xFLLU) == 0) {
+ uint64_t time = rte_get_tsc_cycles();
+ uint64_t time_next_min = UINT64_MAX;
+
+ if (time < t->time_next_min)
+ return 0;
+
+ /* Pipeline message queues */
+ for (j = 0; j < t->n_pipelines; j++) {
+ struct pipeline_data *p =
+ &t->pipeline_data[j];
+ uint64_t time_next = p->time_next;
+
+ if (time_next <= time) {
+ pipeline_msg_handle(p);
+ rte_pipeline_flush(p->p);
+ time_next = time + p->timer_period;
+ p->time_next = time_next;
+ }
+
+ if (time_next < time_next_min)
+ time_next_min = time_next;
+ }
+
+ /* Thread message queues */
+ {
+ uint64_t time_next = t->time_next;
+
+ if (time_next <= time) {
+ thread_msg_handle(t);
+ time_next = time + t->timer_period;
+ t->time_next = time_next;
+ }
+
+ if (time_next < time_next_min)
+ time_next_min = time_next;
+ }
+
+ t->time_next_min = time_next_min;
+ }
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tm.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tm.c
new file mode 100644
index 00000000..baaafbe2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -0,0 +1,3412 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_eth_softnic_internals.h"
+#include "rte_eth_softnic.h"
+
+#define SUBPORT_TC_PERIOD 10
+#define PIPE_TC_PERIOD 40
+
+int
+softnic_tmgr_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->tmgr_port_list);
+
+ return 0;
+}
+
+void
+softnic_tmgr_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
+ if (tmgr_port == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
+ rte_sched_port_free(tmgr_port->s);
+ free(tmgr_port);
+ }
+}
+
+struct softnic_tmgr_port *
+softnic_tmgr_port_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tmgr_port *tmgr_port;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
+ if (strcmp(tmgr_port->name, name) == 0)
+ return tmgr_port;
+
+ return NULL;
+}
+
+struct softnic_tmgr_port *
+softnic_tmgr_port_create(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tmgr_port *tmgr_port;
+ struct tm_params *t = &p->soft.tm.params;
+ struct rte_sched_port *sched;
+ uint32_t n_subports, subport_id;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_tmgr_port_find(p, name))
+ return NULL;
+
+ /*
+ * Resource
+ */
+
+ /* Is hierarchy frozen? */
+ if (p->soft.tm.hierarchy_frozen == 0)
+ return NULL;
+
+ /* Port */
+ sched = rte_sched_port_config(&t->port_params);
+ if (sched == NULL)
+ return NULL;
+
+ /* Subport */
+ n_subports = t->port_params.n_subports_per_port;
+ for (subport_id = 0; subport_id < n_subports; subport_id++) {
+ uint32_t n_pipes_per_subport = t->port_params.n_pipes_per_subport;
+ uint32_t pipe_id;
+ int status;
+
+ status = rte_sched_subport_config(sched,
+ subport_id,
+ &t->subport_params[subport_id]);
+ if (status) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
+
+ /* Pipe */
+ for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
+ int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
+ int profile_id = t->pipe_to_profile[pos];
+
+ if (profile_id < 0)
+ continue;
+
+ status = rte_sched_pipe_config(sched,
+ subport_id,
+ pipe_id,
+ profile_id);
+ if (status) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
+ }
+ }
+
+ /* Node allocation */
+ tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
+ if (tmgr_port == NULL) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
+ tmgr_port->s = sched;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
+
+ return tmgr_port;
+}
+
+static struct rte_sched_port *
+SCHED(struct pmd_internals *p)
+{
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = softnic_tmgr_port_find(p, "TMGR");
+ if (tmgr_port == NULL)
+ return NULL;
+
+ return tmgr_port->s;
+}
+
+void
+tm_hierarchy_init(struct pmd_internals *p)
+{
+ memset(&p->soft.tm, 0, sizeof(p->soft.tm));
+
+ /* Initialize shaper profile list */
+ TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
+
+ /* Initialize shared shaper list */
+ TAILQ_INIT(&p->soft.tm.h.shared_shapers);
+
+ /* Initialize wred profile list */
+ TAILQ_INIT(&p->soft.tm.h.wred_profiles);
+
+ /* Initialize TM node list */
+ TAILQ_INIT(&p->soft.tm.h.nodes);
+}
+
+void
+tm_hierarchy_free(struct pmd_internals *p)
+{
+ /* Remove all nodes*/
+ for ( ; ; ) {
+ struct tm_node *tm_node;
+
+ tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
+ if (tm_node == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
+ free(tm_node);
+ }
+
+ /* Remove all WRED profiles */
+ for ( ; ; ) {
+ struct tm_wred_profile *wred_profile;
+
+ wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
+ if (wred_profile == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
+ free(wred_profile);
+ }
+
+ /* Remove all shared shapers */
+ for ( ; ; ) {
+ struct tm_shared_shaper *shared_shaper;
+
+ shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
+ if (shared_shaper == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
+ free(shared_shaper);
+ }
+
+ /* Remove all shaper profiles */
+ for ( ; ; ) {
+ struct tm_shaper_profile *shaper_profile;
+
+ shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
+ if (shaper_profile == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
+ shaper_profile, node);
+ free(shaper_profile);
+ }
+
+ tm_hierarchy_init(p);
+}
+
+static struct tm_shaper_profile *
+tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
+ struct tm_shaper_profile *sp;
+
+ TAILQ_FOREACH(sp, spl, node)
+ if (shaper_profile_id == sp->shaper_profile_id)
+ return sp;
+
+ return NULL;
+}
+
+static struct tm_shared_shaper *
+tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
+ struct tm_shared_shaper *ss;
+
+ TAILQ_FOREACH(ss, ssl, node)
+ if (shared_shaper_id == ss->shared_shaper_id)
+ return ss;
+
+ return NULL;
+}
+
+static struct tm_wred_profile *
+tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
+ struct tm_wred_profile *wp;
+
+ TAILQ_FOREACH(wp, wpl, node)
+ if (wred_profile_id == wp->wred_profile_id)
+ return wp;
+
+ return NULL;
+}
+
+static struct tm_node *
+tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+
+ TAILQ_FOREACH(n, nl, node)
+ if (n->node_id == node_id)
+ return n;
+
+ return NULL;
+}
+
+static struct tm_node *
+tm_root_node_present(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+
+ TAILQ_FOREACH(n, nl, node)
+ if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
+ return n;
+
+ return NULL;
+}
+
+static uint32_t
+tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *ns;
+ uint32_t subport_id;
+
+ subport_id = 0;
+ TAILQ_FOREACH(ns, nl, node) {
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ if (ns->node_id == subport_node->node_id)
+ return subport_id;
+
+ subport_id++;
+ }
+
+ return UINT32_MAX;
+}
+
+static uint32_t
+tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *np;
+ uint32_t pipe_id;
+
+ pipe_id = 0;
+ TAILQ_FOREACH(np, nl, node) {
+ if (np->level != TM_NODE_LEVEL_PIPE ||
+ np->parent_node_id != pipe_node->parent_node_id)
+ continue;
+
+ if (np->node_id == pipe_node->node_id)
+ return pipe_id;
+
+ pipe_id++;
+ }
+
+ return UINT32_MAX;
+}
+
+static uint32_t
+tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
+{
+ return tc_node->priority;
+}
+
+static uint32_t
+tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *nq;
+ uint32_t queue_id;
+
+ queue_id = 0;
+ TAILQ_FOREACH(nq, nl, node) {
+ if (nq->level != TM_NODE_LEVEL_QUEUE ||
+ nq->parent_node_id != queue_node->parent_node_id)
+ continue;
+
+ if (nq->node_id == queue_node->node_id)
+ return queue_id;
+
+ queue_id++;
+ }
+
+ return UINT32_MAX;
+}
+
+static uint32_t
+tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t n_queues_max = p->params.tm.n_queues;
+ uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+ uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+ uint32_t n_subports_max = n_pipes_max;
+ uint32_t n_root_max = 1;
+
+ switch (level) {
+ case TM_NODE_LEVEL_PORT:
+ return n_root_max;
+ case TM_NODE_LEVEL_SUBPORT:
+ return n_subports_max;
+ case TM_NODE_LEVEL_PIPE:
+ return n_pipes_max;
+ case TM_NODE_LEVEL_TC:
+ return n_tc_max;
+ case TM_NODE_LEVEL_QUEUE:
+ default:
+ return n_queues_max;
+ }
+}
+
+/* Traffic manager node type get */
+static int
+pmd_tm_node_type_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ int *is_leaf,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ if (is_leaf == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (node_id == RTE_TM_NODE_ID_NULL ||
+ (tm_node_search(dev, node_id) == NULL))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ *is_leaf = node_id < p->params.tm.n_queues;
+
+ return 0;
+}
+
+#ifdef RTE_SCHED_RED
+#define WRED_SUPPORTED 1
+#else
+#define WRED_SUPPORTED 0
+#endif
+
+#define STATS_MASK_DEFAULT \
+ (RTE_TM_STATS_N_PKTS | \
+ RTE_TM_STATS_N_BYTES | \
+ RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
+ RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
+
+#define STATS_MASK_QUEUE \
+ (STATS_MASK_DEFAULT | \
+ RTE_TM_STATS_N_PKTS_QUEUED)
+
+static const struct rte_tm_capabilities tm_cap = {
+ .n_nodes_max = UINT32_MAX,
+ .n_levels_max = TM_NODE_LEVEL_MAX,
+
+ .non_leaf_nodes_identical = 0,
+ .leaf_nodes_identical = 1,
+
+ .shaper_n_max = UINT32_MAX,
+ .shaper_private_n_max = UINT32_MAX,
+ .shaper_private_dual_rate_n_max = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+
+ .shaper_shared_n_max = UINT32_MAX,
+ .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
+ .shaper_shared_n_shapers_per_node_max = 1,
+ .shaper_shared_dual_rate_n_max = 0,
+ .shaper_shared_rate_min = 1,
+ .shaper_shared_rate_max = UINT32_MAX,
+
+ .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
+ .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
+
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = UINT32_MAX,
+
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
+ .cman_head_drop_supported = 0,
+ .cman_wred_context_n_max = 0,
+ .cman_wred_context_private_n_max = 0,
+ .cman_wred_context_shared_n_max = 0,
+ .cman_wred_context_shared_n_nodes_per_context_max = 0,
+ .cman_wred_context_shared_n_contexts_per_node_max = 0,
+
+ .mark_vlan_dei_supported = {0, 0, 0},
+ .mark_ip_ecn_tcp_supported = {0, 0, 0},
+ .mark_ip_ecn_sctp_supported = {0, 0, 0},
+ .mark_ip_dscp_supported = {0, 0, 0},
+
+ .dynamic_update_mask = 0,
+
+ .stats_mask = STATS_MASK_QUEUE,
+};
+
+/* Traffic manager capabilities get */
+static int
+pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ memcpy(cap, &tm_cap, sizeof(*cap));
+
+ cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
+
+ cap->shaper_private_n_max =
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
+
+ cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
+
+ cap->shaper_n_max = cap->shaper_private_n_max +
+ cap->shaper_shared_n_max;
+
+ cap->shaper_shared_n_nodes_per_shaper_max =
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
+
+ cap->sched_n_children_max = RTE_MAX(
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
+ (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
+
+ cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
+
+ if (WRED_SUPPORTED)
+ cap->cman_wred_context_private_n_max =
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
+
+ cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
+ cap->cman_wred_context_shared_n_max;
+
+ return 0;
+}
+
+static const struct rte_tm_level_capabilities tm_level_cap[] = {
+ [TM_NODE_LEVEL_PORT] = {
+ .n_nodes_max = 1,
+ .n_nodes_nonleaf_max = 1,
+ .n_nodes_leaf_max = 0,
+ .non_leaf_nodes_identical = 1,
+ .leaf_nodes_identical = 0,
+
+ {.nonleaf = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = 1,
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ } },
+ },
+
+ [TM_NODE_LEVEL_SUBPORT] = {
+ .n_nodes_max = UINT32_MAX,
+ .n_nodes_nonleaf_max = UINT32_MAX,
+ .n_nodes_leaf_max = 0,
+ .non_leaf_nodes_identical = 1,
+ .leaf_nodes_identical = 0,
+
+ {.nonleaf = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ .sched_wfq_weight_max = UINT32_MAX,
+#else
+ .sched_wfq_weight_max = 1,
+#endif
+ .stats_mask = STATS_MASK_DEFAULT,
+ } },
+ },
+
+ [TM_NODE_LEVEL_PIPE] = {
+ .n_nodes_max = UINT32_MAX,
+ .n_nodes_nonleaf_max = UINT32_MAX,
+ .n_nodes_leaf_max = 0,
+ .non_leaf_nodes_identical = 1,
+ .leaf_nodes_identical = 0,
+
+ {.nonleaf = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .sched_n_children_max =
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_sp_n_priorities_max =
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_wfq_n_children_per_group_max = 1,
+ .sched_wfq_n_groups_max = 0,
+ .sched_wfq_weight_max = 1,
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ } },
+ },
+
+ [TM_NODE_LEVEL_TC] = {
+ .n_nodes_max = UINT32_MAX,
+ .n_nodes_nonleaf_max = UINT32_MAX,
+ .n_nodes_leaf_max = 0,
+ .non_leaf_nodes_identical = 1,
+ .leaf_nodes_identical = 0,
+
+ {.nonleaf = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 1,
+
+ .sched_n_children_max =
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max =
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = UINT32_MAX,
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ } },
+ },
+
+ [TM_NODE_LEVEL_QUEUE] = {
+ .n_nodes_max = UINT32_MAX,
+ .n_nodes_nonleaf_max = 0,
+ .n_nodes_leaf_max = UINT32_MAX,
+ .non_leaf_nodes_identical = 0,
+ .leaf_nodes_identical = 1,
+
+ {.leaf = {
+ .shaper_private_supported = 0,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 0,
+ .shaper_private_rate_max = 0,
+ .shaper_shared_n_max = 0,
+
+ .cman_head_drop_supported = 0,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
+ .cman_wred_context_private_supported = WRED_SUPPORTED,
+ .cman_wred_context_shared_n_max = 0,
+
+ .stats_mask = STATS_MASK_QUEUE,
+ } },
+ },
+};
+
+/* Traffic manager level capabilities get */
+static int
+pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (level_id >= TM_NODE_LEVEL_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
+
+ switch (level_id) {
+ case TM_NODE_LEVEL_PORT:
+ cap->nonleaf.sched_n_children_max =
+ tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_SUBPORT);
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ break;
+
+ case TM_NODE_LEVEL_SUBPORT:
+ cap->n_nodes_max = tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_SUBPORT);
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ cap->nonleaf.sched_n_children_max =
+ tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_PIPE);
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ break;
+
+ case TM_NODE_LEVEL_PIPE:
+ cap->n_nodes_max = tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_PIPE);
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ break;
+
+ case TM_NODE_LEVEL_TC:
+ cap->n_nodes_max = tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_TC);
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ break;
+
+ case TM_NODE_LEVEL_QUEUE:
+ default:
+ cap->n_nodes_max = tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_QUEUE);
+ cap->n_nodes_leaf_max = cap->n_nodes_max;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct rte_tm_node_capabilities tm_node_cap[] = {
+ [TM_NODE_LEVEL_PORT] = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ {.nonleaf = {
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = 1,
+ } },
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+
+ [TM_NODE_LEVEL_SUBPORT] = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ {.nonleaf = {
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = UINT32_MAX,
+ } },
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+
+ [TM_NODE_LEVEL_PIPE] = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ {.nonleaf = {
+ .sched_n_children_max =
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_sp_n_priorities_max =
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_wfq_n_children_per_group_max = 1,
+ .sched_wfq_n_groups_max = 0,
+ .sched_wfq_weight_max = 1,
+ } },
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+
+ [TM_NODE_LEVEL_TC] = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 1,
+
+ {.nonleaf = {
+ .sched_n_children_max =
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max =
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = UINT32_MAX,
+ } },
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+
+ [TM_NODE_LEVEL_QUEUE] = {
+ .shaper_private_supported = 0,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 0,
+ .shaper_private_rate_max = 0,
+ .shaper_shared_n_max = 0,
+
+
+ {.leaf = {
+ .cman_head_drop_supported = 0,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
+ .cman_wred_context_private_supported = WRED_SUPPORTED,
+ .cman_wred_context_shared_n_max = 0,
+ } },
+
+ .stats_mask = STATS_MASK_QUEUE,
+ },
+};
+
+/* Traffic manager node capabilities get */
+static int
+pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct tm_node *tm_node;
+
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ tm_node = tm_node_search(dev, node_id);
+ if (tm_node == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
+
+ switch (tm_node->level) {
+ case TM_NODE_LEVEL_PORT:
+ cap->nonleaf.sched_n_children_max =
+ tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_SUBPORT);
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ break;
+
+ case TM_NODE_LEVEL_SUBPORT:
+ cap->nonleaf.sched_n_children_max =
+ tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_PIPE);
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ break;
+
+ case TM_NODE_LEVEL_PIPE:
+ case TM_NODE_LEVEL_TC:
+ case TM_NODE_LEVEL_QUEUE:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+shaper_profile_check(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct tm_shaper_profile *sp;
+
+ /* Shaper profile ID must not be NONE. */
+ if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper profile must not exist. */
+ sp = tm_shaper_profile_search(dev, shaper_profile_id);
+ if (sp)
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+
+ /* Profile must not be NULL. */
+ if (profile == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Peak rate: non-zero, 32-bit */
+ if (profile->peak.rate == 0 ||
+ profile->peak.rate >= UINT32_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Peak size: non-zero, 32-bit */
+ if (profile->peak.size == 0 ||
+ profile->peak.size >= UINT32_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Dual-rate profiles are not supported. */
+ if (profile->committed.rate != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Packet length adjust: 24 bytes */
+ if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+/* Traffic manager shaper profile add */
+static int
+pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
+ struct tm_shaper_profile *sp;
+ int status;
+
+ /* Check input params */
+ status = shaper_profile_check(dev, shaper_profile_id, profile, error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ sp = calloc(1, sizeof(struct tm_shaper_profile));
+ if (sp == NULL)
+ return -rte_tm_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOMEM));
+
+ /* Fill in */
+ sp->shaper_profile_id = shaper_profile_id;
+ memcpy(&sp->params, profile, sizeof(sp->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(spl, sp, node);
+ p->soft.tm.h.n_shaper_profiles++;
+
+ return 0;
+}
+
+/* Traffic manager shaper profile delete */
+static int
+pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shaper_profile *sp;
+
+ /* Check existing */
+ sp = tm_shaper_profile_search(dev, shaper_profile_id);
+ if (sp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (sp->n_users)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
+ p->soft.tm.h.n_shaper_profiles--;
+ free(sp);
+
+ return 0;
+}
+
+static struct tm_node *
+tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
+ struct tm_shared_shaper *ss)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+
+ /* Subport: each TC uses shared shaper */
+ TAILQ_FOREACH(n, nl, node) {
+ if (n->level != TM_NODE_LEVEL_TC ||
+ n->params.n_shared_shapers == 0 ||
+ n->params.shared_shaper_id[0] != ss->shared_shaper_id)
+ continue;
+
+ return n;
+ }
+
+ return NULL;
+}
+
+static int
+update_subport_tc_rate(struct rte_eth_dev *dev,
+ struct tm_node *nt,
+ struct tm_shared_shaper *ss,
+ struct tm_shaper_profile *sp_new)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_subport_params subport_params;
+
+ struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
+ ss->shaper_profile_id);
+
+ /* Derive new subport configuration. */
+ memcpy(&subport_params,
+ &p->soft.tm.params.subport_params[subport_id],
+ sizeof(subport_params));
+ subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
+
+ /* Update the subport configuration. */
+ if (rte_sched_subport_config(SCHED(p),
+ subport_id, &subport_params))
+ return -1;
+
+ /* Commit changes. */
+ sp_old->n_users--;
+
+ ss->shaper_profile_id = sp_new->shaper_profile_id;
+ sp_new->n_users++;
+
+ memcpy(&p->soft.tm.params.subport_params[subport_id],
+ &subport_params,
+ sizeof(subport_params));
+
+ return 0;
+}
+
+/* Traffic manager shared shaper add/update */
+static int
+pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
+ uint32_t shared_shaper_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shared_shaper *ss;
+ struct tm_shaper_profile *sp;
+ struct tm_node *nt;
+
+ /* Shaper profile must be valid. */
+ sp = tm_shaper_profile_search(dev, shaper_profile_id);
+ if (sp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /**
+ * Add new shared shaper
+ */
+ ss = tm_shared_shaper_search(dev, shared_shaper_id);
+ if (ss == NULL) {
+ struct tm_shared_shaper_list *ssl =
+ &p->soft.tm.h.shared_shapers;
+
+ /* Hierarchy must not be frozen */
+ if (p->soft.tm.hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Memory allocation */
+ ss = calloc(1, sizeof(struct tm_shared_shaper));
+ if (ss == NULL)
+ return -rte_tm_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOMEM));
+
+ /* Fill in */
+ ss->shared_shaper_id = shared_shaper_id;
+ ss->shaper_profile_id = shaper_profile_id;
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(ssl, ss, node);
+ p->soft.tm.h.n_shared_shapers++;
+
+ return 0;
+ }
+
+ /**
+ * Update existing shared shaper
+ */
+ /* Hierarchy must be frozen (run-time update) */
+ if (p->soft.tm.hierarchy_frozen == 0)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+
+ /* Propagate change. */
+ nt = tm_shared_shaper_get_tc(dev, ss);
+ if (update_subport_tc_rate(dev, nt, ss, sp))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+/* Traffic manager shared shaper delete */
+static int
+pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
+ uint32_t shared_shaper_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shared_shaper *ss;
+
+ /* Check existing */
+ ss = tm_shared_shaper_search(dev, shared_shaper_id);
+ if (ss == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (ss->n_users)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
+ p->soft.tm.h.n_shared_shapers--;
+ free(ss);
+
+ return 0;
+}
+
+static int
+wred_profile_check(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error)
+{
+ struct tm_wred_profile *wp;
+ enum rte_tm_color color;
+
+ /* WRED profile ID must not be NONE. */
+ if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* WRED profile must not exist. */
+ wp = tm_wred_profile_search(dev, wred_profile_id);
+ if (wp)
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+
+ /* Profile must not be NULL. */
+ if (profile == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* WRED profile should be in packet mode */
+ if (profile->packet_mode == 0)
+ return -rte_tm_error_set(error,
+ ENOTSUP,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(ENOTSUP));
+
+ /* min_th <= max_th, max_th > 0 */
+ for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
+ uint32_t min_th = profile->red_params[color].min_th;
+ uint32_t max_th = profile->red_params[color].max_th;
+
+ if (min_th > max_th ||
+ max_th == 0 ||
+ min_th > UINT16_MAX ||
+ max_th > UINT16_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+/* Traffic manager WRED profile add */
+static int
+pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
+ struct tm_wred_profile *wp;
+ int status;
+
+ /* Check input params */
+ status = wred_profile_check(dev, wred_profile_id, profile, error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ wp = calloc(1, sizeof(struct tm_wred_profile));
+ if (wp == NULL)
+ return -rte_tm_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOMEM));
+
+ /* Fill in */
+ wp->wred_profile_id = wred_profile_id;
+ memcpy(&wp->params, profile, sizeof(wp->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(wpl, wp, node);
+ p->soft.tm.h.n_wred_profiles++;
+
+ return 0;
+}
+
+/* Traffic manager WRED profile delete */
+static int
+pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_wred_profile *wp;
+
+ /* Check existing */
+ wp = tm_wred_profile_search(dev, wred_profile_id);
+ if (wp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (wp->n_users)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
+ p->soft.tm.h.n_wred_profiles--;
+ free(wp);
+
+ return 0;
+}
+
+static int
+node_add_check_port(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
+ params->shaper_profile_id);
+
+ /* node type: non-leaf */
+ if (node_id < p->params.tm.n_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be 0 */
+ if (priority != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Weight must be 1 */
+ if (weight != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper must be valid */
+ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
+ sp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of SP priorities must be 1 */
+ if (params->nonleaf.n_sp_priorities != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_DEFAULT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check_subport(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* node type: non-leaf */
+ if (node_id < p->params.tm.n_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be 0 */
+ if (priority != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Weight must be 1 */
+ if (weight != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper must be valid */
+ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
+ (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of SP priorities must be 1 */
+ if (params->nonleaf.n_sp_priorities != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_DEFAULT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check_pipe(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority,
+ uint32_t weight __rte_unused,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* node type: non-leaf */
+ if (node_id < p->params.tm.n_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be 0 */
+ if (priority != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper must be valid */
+ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
+ (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of SP priorities must be 4 */
+ if (params->nonleaf.n_sp_priorities !=
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* WFQ mode must be byte mode */
+ if (params->nonleaf.wfq_weight_mode != NULL &&
+ params->nonleaf.wfq_weight_mode[0] != 0 &&
+ params->nonleaf.wfq_weight_mode[1] != 0 &&
+ params->nonleaf.wfq_weight_mode[2] != 0 &&
+ params->nonleaf.wfq_weight_mode[3] != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_DEFAULT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check_tc(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority __rte_unused,
+ uint32_t weight,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* node type: non-leaf */
+ if (node_id < p->params.tm.n_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Weight must be 1 */
+ if (weight != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper must be valid */
+ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
+ (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Single valid shared shaper */
+ if (params->n_shared_shapers > 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (params->n_shared_shapers == 1 &&
+ (params->shared_shaper_id == NULL ||
+ (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of priorities must be 1 */
+ if (params->nonleaf.n_sp_priorities != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_DEFAULT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check_queue(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority,
+ uint32_t weight __rte_unused,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* node type: leaf */
+ if (node_id >= p->params.tm.n_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be 0 */
+ if (priority != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shaper */
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Congestion management must not be head drop */
+ if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Congestion management set to WRED */
+ if (params->leaf.cman == RTE_TM_CMAN_WRED) {
+ uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
+ struct tm_wred_profile *wp = tm_wred_profile_search(dev,
+ wred_profile_id);
+
+ /* WRED profile (for private WRED context) must be valid */
+ if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
+ wp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared WRED contexts */
+ if (params->leaf.wred.n_shared_wred_contexts != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_QUEUE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct tm_node *pn;
+ uint32_t level;
+ int status;
+
+ /* node_id, parent_node_id:
+ * -node_id must not be RTE_TM_NODE_ID_NULL
+ * -node_id must not be in use
+ * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
+ * -root node must not exist
+ * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
+ * -parent_node_id must be valid
+ */
+ if (node_id == RTE_TM_NODE_ID_NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (tm_node_search(dev, node_id))
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ pn = NULL;
+ if (tm_root_node_present(dev))
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+ } else {
+ pn = tm_node_search(dev, parent_node_id);
+ if (pn == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* priority: must be 0 .. 3 */
+ if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* weight: must be 1 .. 255 */
+ if (weight == 0 || weight >= UINT8_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* level_id: if valid, then
+ * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
+ * -level_id must be zero
+ * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
+ * -level_id must be parent level ID plus one
+ */
+ level = (pn == NULL) ? 0 : pn->level + 1;
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* params: must not be NULL */
+ if (params == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* params: per level checks */
+ switch (level) {
+ case TM_NODE_LEVEL_PORT:
+ status = node_add_check_port(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ case TM_NODE_LEVEL_SUBPORT:
+ status = node_add_check_subport(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ case TM_NODE_LEVEL_PIPE:
+ status = node_add_check_pipe(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ case TM_NODE_LEVEL_TC:
+ status = node_add_check_tc(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ case TM_NODE_LEVEL_QUEUE:
+ status = node_add_check_queue(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+/* Traffic manager node add */
+static int
+pmd_tm_node_add(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+ uint32_t i;
+ int status;
+
+ /* Checks */
+ if (p->soft.tm.hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ status = node_add_check(dev, node_id, parent_node_id, priority, weight,
+ level_id, params, error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ n = calloc(1, sizeof(struct tm_node));
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOMEM));
+
+ /* Fill in */
+ n->node_id = node_id;
+ n->parent_node_id = parent_node_id;
+ n->priority = priority;
+ n->weight = weight;
+
+ if (parent_node_id != RTE_TM_NODE_ID_NULL) {
+ n->parent_node = tm_node_search(dev, parent_node_id);
+ n->level = n->parent_node->level + 1;
+ }
+
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
+ n->shaper_profile = tm_shaper_profile_search(dev,
+ params->shaper_profile_id);
+
+ if (n->level == TM_NODE_LEVEL_QUEUE &&
+ params->leaf.cman == RTE_TM_CMAN_WRED)
+ n->wred_profile = tm_wred_profile_search(dev,
+ params->leaf.wred.wred_profile_id);
+
+ memcpy(&n->params, params, sizeof(n->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(nl, n, node);
+ p->soft.tm.h.n_nodes++;
+
+ /* Update dependencies */
+ if (n->parent_node)
+ n->parent_node->n_children++;
+
+ if (n->shaper_profile)
+ n->shaper_profile->n_users++;
+
+ for (i = 0; i < params->n_shared_shapers; i++) {
+ struct tm_shared_shaper *ss;
+
+ ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
+ ss->n_users++;
+ }
+
+ if (n->wred_profile)
+ n->wred_profile->n_users++;
+
+ p->soft.tm.h.n_tm_nodes[n->level]++;
+
+ return 0;
+}
+
+/* Traffic manager node delete */
+static int
+pmd_tm_node_delete(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node *n;
+ uint32_t i;
+
+ /* Check hierarchy changes are currently allowed */
+ if (p->soft.tm.hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Check existing */
+ n = tm_node_search(dev, node_id);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (n->n_children)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Update dependencies */
+ p->soft.tm.h.n_tm_nodes[n->level]--;
+
+ if (n->wred_profile)
+ n->wred_profile->n_users--;
+
+ for (i = 0; i < n->params.n_shared_shapers; i++) {
+ struct tm_shared_shaper *ss;
+
+ ss = tm_shared_shaper_search(dev,
+ n->params.shared_shaper_id[i]);
+ ss->n_users--;
+ }
+
+ if (n->shaper_profile)
+ n->shaper_profile->n_users--;
+
+ if (n->parent_node)
+ n->parent_node->n_children--;
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
+ p->soft.tm.h.n_nodes--;
+ free(n);
+
+ return 0;
+}
+
+
+static void
+pipe_profile_build(struct rte_eth_dev *dev,
+ struct tm_node *np,
+ struct rte_sched_pipe_params *pp)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *nt, *nq;
+
+ memset(pp, 0, sizeof(*pp));
+
+ /* Pipe */
+ pp->tb_rate = np->shaper_profile->params.peak.rate;
+ pp->tb_size = np->shaper_profile->params.peak.size;
+
+ /* Traffic Class (TC) */
+ pp->tc_period = PIPE_TC_PERIOD;
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ pp->tc_ov_weight = np->weight;
+#endif
+
+ TAILQ_FOREACH(nt, nl, node) {
+ uint32_t queue_id = 0;
+
+ if (nt->level != TM_NODE_LEVEL_TC ||
+ nt->parent_node_id != np->node_id)
+ continue;
+
+ pp->tc_rate[nt->priority] =
+ nt->shaper_profile->params.peak.rate;
+
+ /* Queue */
+ TAILQ_FOREACH(nq, nl, node) {
+ uint32_t pipe_queue_id;
+
+ if (nq->level != TM_NODE_LEVEL_QUEUE ||
+ nq->parent_node_id != nt->node_id)
+ continue;
+
+ pipe_queue_id = nt->priority *
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
+ pp->wrr_weights[pipe_queue_id] = nq->weight;
+
+ queue_id++;
+ }
+ }
+}
+
+static int
+pipe_profile_free_exists(struct rte_eth_dev *dev,
+ uint32_t *pipe_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+
+ if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
+ *pipe_profile_id = t->n_pipe_profiles;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+pipe_profile_exists(struct rte_eth_dev *dev,
+ struct rte_sched_pipe_params *pp,
+ uint32_t *pipe_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t i;
+
+ for (i = 0; i < t->n_pipe_profiles; i++)
+ if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
+ if (pipe_profile_id)
+ *pipe_profile_id = i;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+pipe_profile_install(struct rte_eth_dev *dev,
+ struct rte_sched_pipe_params *pp,
+ uint32_t pipe_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+
+ memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
+ t->n_pipe_profiles++;
+}
+
+static void
+pipe_profile_mark(struct rte_eth_dev *dev,
+ uint32_t subport_id,
+ uint32_t pipe_id,
+ uint32_t pipe_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t n_pipes_per_subport, pos;
+
+ n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+ pos = subport_id * n_pipes_per_subport + pipe_id;
+
+ t->pipe_to_profile[pos] = pipe_profile_id;
+}
+
+static struct rte_sched_pipe_params *
+pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+
+ uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
+ uint32_t pipe_profile_id = t->pipe_to_profile[pos];
+
+ return &t->pipe_profiles[pipe_profile_id];
+}
+
+static int
+pipe_profiles_generate(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *ns, *np;
+ uint32_t subport_id;
+
+ /* Objective: Fill in the following fields in struct tm_params:
+ * - pipe_profiles
+ * - n_pipe_profiles
+ * - pipe_to_profile
+ */
+
+ subport_id = 0;
+ TAILQ_FOREACH(ns, nl, node) {
+ uint32_t pipe_id;
+
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ pipe_id = 0;
+ TAILQ_FOREACH(np, nl, node) {
+ struct rte_sched_pipe_params pp;
+ uint32_t pos;
+
+ if (np->level != TM_NODE_LEVEL_PIPE ||
+ np->parent_node_id != ns->node_id)
+ continue;
+
+ pipe_profile_build(dev, np, &pp);
+
+ if (!pipe_profile_exists(dev, &pp, &pos)) {
+ if (!pipe_profile_free_exists(dev, &pos))
+ return -1;
+
+ pipe_profile_install(dev, &pp, pos);
+ }
+
+ pipe_profile_mark(dev, subport_id, pipe_id, pos);
+
+ pipe_id++;
+ }
+
+ subport_id++;
+ }
+
+ return 0;
+}
+
+static struct tm_wred_profile *
+tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *nq;
+
+ TAILQ_FOREACH(nq, nl, node) {
+ if (nq->level != TM_NODE_LEVEL_QUEUE ||
+ nq->parent_node->priority != tc_id)
+ continue;
+
+ return nq->wred_profile;
+ }
+
+ return NULL;
+}
+
+#ifdef RTE_SCHED_RED
+
+static void
+wred_profiles_set(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
+ uint32_t tc_id;
+ enum rte_tm_color color;
+
+ for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
+ for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
+ struct rte_red_params *dst =
+ &pp->red_params[tc_id][color];
+ struct tm_wred_profile *src_wp =
+ tm_tc_wred_profile_get(dev, tc_id);
+ struct rte_tm_red_params *src =
+ &src_wp->params.red_params[color];
+
+ memcpy(dst, src, sizeof(*dst));
+ }
+}
+
+#else
+
+#define wred_profiles_set(dev)
+
+#endif
+
+static struct tm_shared_shaper *
+tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
+{
+ return (tc_node->params.n_shared_shapers) ?
+ tm_shared_shaper_search(dev,
+ tc_node->params.shared_shaper_id[0]) :
+ NULL;
+}
+
+static struct tm_shared_shaper *
+tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
+ struct tm_node *subport_node,
+ uint32_t tc_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+
+ TAILQ_FOREACH(n, nl, node) {
+ if (n->level != TM_NODE_LEVEL_TC ||
+ n->parent_node->parent_node_id !=
+ subport_node->node_id ||
+ n->priority != tc_id)
+ continue;
+
+ return tm_tc_shared_shaper_get(dev, n);
+ }
+
+ return NULL;
+}
+
+static int
+hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_shared_shaper_list *ssl = &h->shared_shapers;
+ struct tm_wred_profile_list *wpl = &h->wred_profiles;
+ struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
+ struct tm_shared_shaper *ss;
+
+ uint32_t n_pipes_per_subport;
+
+ /* Root node exists. */
+ if (nr == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* There is at least one subport, max is not exceeded. */
+ if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* There is at least one pipe. */
+ if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of pipes is the same for all subports. Maximum number of pipes
+ * per subport is not exceeded.
+ */
+ n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+
+ if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ TAILQ_FOREACH(ns, nl, node) {
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ if (ns->n_children != n_pipes_per_subport)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
+ TAILQ_FOREACH(np, nl, node) {
+ uint32_t mask = 0, mask_expected =
+ RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ uint32_t);
+
+ if (np->level != TM_NODE_LEVEL_PIPE)
+ continue;
+
+ if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ TAILQ_FOREACH(nt, nl, node) {
+ if (nt->level != TM_NODE_LEVEL_TC ||
+ nt->parent_node_id != np->node_id)
+ continue;
+
+ mask |= 1 << nt->priority;
+ }
+
+ if (mask != mask_expected)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Each TC has exactly 4 packet queues. */
+ TAILQ_FOREACH(nt, nl, node) {
+ if (nt->level != TM_NODE_LEVEL_TC)
+ continue;
+
+ if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /**
+ * Shared shapers:
+ * -For each TC #i, all pipes in the same subport use the same
+ * shared shaper (or no shared shaper) for their TC#i.
+ * -Each shared shaper needs to have at least one user. All its
+ * users have to be TC nodes with the same priority and the same
+ * subport.
+ */
+ TAILQ_FOREACH(ns, nl, node) {
+ struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t id;
+
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
+ s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
+
+ TAILQ_FOREACH(nt, nl, node) {
+ struct tm_shared_shaper *subport_ss, *tc_ss;
+
+ if (nt->level != TM_NODE_LEVEL_TC ||
+ nt->parent_node->parent_node_id !=
+ ns->node_id)
+ continue;
+
+ subport_ss = s[nt->priority];
+ tc_ss = tm_tc_shared_shaper_get(dev, nt);
+
+ if (subport_ss == NULL && tc_ss == NULL)
+ continue;
+
+ if ((subport_ss == NULL && tc_ss != NULL) ||
+ (subport_ss != NULL && tc_ss == NULL) ||
+ subport_ss->shared_shaper_id !=
+ tc_ss->shared_shaper_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ }
+
+ TAILQ_FOREACH(ss, ssl, node) {
+ struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
+ uint32_t n_users = 0;
+
+ if (nt_any != NULL)
+ TAILQ_FOREACH(nt, nl, node) {
+ if (nt->level != TM_NODE_LEVEL_TC ||
+ nt->priority != nt_any->priority ||
+ nt->parent_node->parent_node_id !=
+ nt_any->parent_node->parent_node_id)
+ continue;
+
+ n_users++;
+ }
+
+ if (ss->n_users == 0 || ss->n_users != n_users)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Not too many pipe profiles. */
+ if (pipe_profiles_generate(dev))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /**
+ * WRED (when used, i.e. at least one WRED profile defined):
+ * -Each WRED profile must have at least one user.
+ * -All leaf nodes must have their private WRED context enabled.
+ * -For each TC #i, all leaf nodes must use the same WRED profile
+ * for their private WRED context.
+ */
+ if (h->n_wred_profiles) {
+ struct tm_wred_profile *wp;
+ struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t id;
+
+ TAILQ_FOREACH(wp, wpl, node)
+ if (wp->n_users == 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
+ w[id] = tm_tc_wred_profile_get(dev, id);
+
+ if (w[id] == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ TAILQ_FOREACH(nq, nl, node) {
+ uint32_t id;
+
+ if (nq->level != TM_NODE_LEVEL_QUEUE)
+ continue;
+
+ id = nq->parent_node->priority;
+
+ if (nq->wred_profile == NULL ||
+ nq->wred_profile->wred_profile_id !=
+ w[id]->wred_profile_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ }
+
+ return 0;
+}
+
+static void
+hierarchy_blueprints_create(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *root = tm_root_node_present(dev), *n;
+
+ uint32_t subport_id;
+
+ t->port_params = (struct rte_sched_port_params) {
+ .name = dev->data->name,
+ .socket = dev->data->numa_node,
+ .rate = root->shaper_profile->params.peak.rate,
+ .mtu = dev->data->mtu,
+ .frame_overhead =
+ root->shaper_profile->params.pkt_length_adjust,
+ .n_subports_per_port = root->n_children,
+ .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
+ .qsize = {p->params.tm.qsize[0],
+ p->params.tm.qsize[1],
+ p->params.tm.qsize[2],
+ p->params.tm.qsize[3],
+ },
+ .pipe_profiles = t->pipe_profiles,
+ .n_pipe_profiles = t->n_pipe_profiles,
+ };
+
+ wred_profiles_set(dev);
+
+ subport_id = 0;
+ TAILQ_FOREACH(n, nl, node) {
+ uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t i;
+
+ if (n->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ struct tm_shared_shaper *ss;
+ struct tm_shaper_profile *sp;
+
+ ss = tm_subport_tc_shared_shaper_get(dev, n, i);
+ sp = (ss) ? tm_shaper_profile_search(dev,
+ ss->shaper_profile_id) :
+ n->shaper_profile;
+ tc_rate[i] = sp->params.peak.rate;
+ }
+
+ t->subport_params[subport_id] =
+ (struct rte_sched_subport_params) {
+ .tb_rate = n->shaper_profile->params.peak.rate,
+ .tb_size = n->shaper_profile->params.peak.size,
+
+ .tc_rate = {tc_rate[0],
+ tc_rate[1],
+ tc_rate[2],
+ tc_rate[3],
+ },
+ .tc_period = SUBPORT_TC_PERIOD,
+ };
+
+ subport_id++;
+ }
+}
+
+/* Traffic manager hierarchy commit */
+static int
+pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ int status;
+
+ /* Checks */
+ if (p->soft.tm.hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ status = hierarchy_commit_check(dev, error);
+ if (status) {
+ if (clear_on_fail)
+ tm_hierarchy_free(p);
+
+ return status;
+ }
+
+ /* Create blueprints */
+ hierarchy_blueprints_create(dev);
+
+ /* Freeze hierarchy */
+ p->soft.tm.hierarchy_frozen = 1;
+
+ return 0;
+}
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+
+static int
+update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
+ struct rte_sched_pipe_params profile1;
+ uint32_t pipe_profile_id;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.tc_ov_weight = (uint8_t)weight;
+
+ /* Since implementation does not allow adding more pipe profiles after
+ * port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set of
+ * pipe profiles.
+ */
+ if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
+ return -1;
+
+ /* Update the pipe profile used by the current pipe. */
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
+ (int32_t)pipe_profile_id))
+ return -1;
+
+ /* Commit changes. */
+ pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
+ np->weight = weight;
+
+ return 0;
+}
+
+#endif
+
+static int
+update_queue_weight(struct rte_eth_dev *dev,
+ struct tm_node *nq, uint32_t weight)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t queue_id = tm_node_queue_id(dev, nq);
+
+ struct tm_node *nt = nq->parent_node;
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ uint32_t pipe_queue_id =
+ tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
+
+ struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
+ struct rte_sched_pipe_params profile1;
+ uint32_t pipe_profile_id;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
+
+ /* Since implementation does not allow adding more pipe profiles after
+ * port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set
+ * of pipe profiles.
+ */
+ if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
+ return -1;
+
+ /* Update the pipe profile used by the current pipe. */
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
+ (int32_t)pipe_profile_id))
+ return -1;
+
+ /* Commit changes. */
+ pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
+ nq->weight = weight;
+
+ return 0;
+}
+
+/* Traffic manager node parent update */
+static int
+pmd_tm_node_parent_update(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ struct rte_tm_error *error)
+{
+ struct tm_node *n;
+
+ /* Port must be started and TM used. */
+ if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Node must be valid */
+ n = tm_node_search(dev, node_id);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Parent node must be the same */
+ if (n->parent_node_id != parent_node_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be the same */
+ if (n->priority != priority)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* weight: must be 1 .. 255 */
+ if (weight == 0 || weight >= UINT8_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ switch (n->level) {
+ case TM_NODE_LEVEL_PORT:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+ /* fall-through */
+ case TM_NODE_LEVEL_SUBPORT:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+ /* fall-through */
+ case TM_NODE_LEVEL_PIPE:
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ if (update_pipe_weight(dev, n, weight))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+#else
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+#endif
+ /* fall-through */
+ case TM_NODE_LEVEL_TC:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+ /* fall-through */
+ case TM_NODE_LEVEL_QUEUE:
+ /* fall-through */
+ default:
+ if (update_queue_weight(dev, n, weight))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ }
+}
+
+static int
+update_subport_rate(struct rte_eth_dev *dev,
+ struct tm_node *ns,
+ struct tm_shaper_profile *sp)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_subport_params subport_params;
+
+ /* Derive new subport configuration. */
+ memcpy(&subport_params,
+ &p->soft.tm.params.subport_params[subport_id],
+ sizeof(subport_params));
+ subport_params.tb_rate = sp->params.peak.rate;
+ subport_params.tb_size = sp->params.peak.size;
+
+ /* Update the subport configuration. */
+ if (rte_sched_subport_config(SCHED(p), subport_id,
+ &subport_params))
+ return -1;
+
+ /* Commit changes. */
+ ns->shaper_profile->n_users--;
+
+ ns->shaper_profile = sp;
+ ns->params.shaper_profile_id = sp->shaper_profile_id;
+ sp->n_users++;
+
+ memcpy(&p->soft.tm.params.subport_params[subport_id],
+ &subport_params,
+ sizeof(subport_params));
+
+ return 0;
+}
+
+static int
+update_pipe_rate(struct rte_eth_dev *dev,
+ struct tm_node *np,
+ struct tm_shaper_profile *sp)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
+ struct rte_sched_pipe_params profile1;
+ uint32_t pipe_profile_id;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.tb_rate = sp->params.peak.rate;
+ profile1.tb_size = sp->params.peak.size;
+
+ /* Since implementation does not allow adding more pipe profiles after
+ * port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set of
+ * pipe profiles.
+ */
+ if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
+ return -1;
+
+ /* Update the pipe profile used by the current pipe. */
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
+ (int32_t)pipe_profile_id))
+ return -1;
+
+ /* Commit changes. */
+ pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
+ np->shaper_profile->n_users--;
+ np->shaper_profile = sp;
+ np->params.shaper_profile_id = sp->shaper_profile_id;
+ sp->n_users++;
+
+ return 0;
+}
+
+static int
+update_tc_rate(struct rte_eth_dev *dev,
+ struct tm_node *nt,
+ struct tm_shaper_profile *sp)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
+ struct rte_sched_pipe_params profile1;
+ uint32_t pipe_profile_id;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.tc_rate[tc_id] = sp->params.peak.rate;
+
+ /* Since implementation does not allow adding more pipe profiles after
+ * port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set of
+ * pipe profiles.
+ */
+ if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
+ return -1;
+
+ /* Update the pipe profile used by the current pipe. */
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
+ (int32_t)pipe_profile_id))
+ return -1;
+
+ /* Commit changes. */
+ pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
+ nt->shaper_profile->n_users--;
+ nt->shaper_profile = sp;
+ nt->params.shaper_profile_id = sp->shaper_profile_id;
+ sp->n_users++;
+
+ return 0;
+}
+
+/* Traffic manager node shaper update */
+static int
+pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct tm_node *n;
+ struct tm_shaper_profile *sp;
+
+ /* Port must be started and TM used. */
+ if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Node must be valid */
+ n = tm_node_search(dev, node_id);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper profile must be valid. */
+ sp = tm_shaper_profile_search(dev, shaper_profile_id);
+ if (sp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ switch (n->level) {
+ case TM_NODE_LEVEL_PORT:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ /* fall-through */
+ case TM_NODE_LEVEL_SUBPORT:
+ if (update_subport_rate(dev, n, sp))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ /* fall-through */
+ case TM_NODE_LEVEL_PIPE:
+ if (update_pipe_rate(dev, n, sp))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ /* fall-through */
+ case TM_NODE_LEVEL_TC:
+ if (update_tc_rate(dev, n, sp))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ /* fall-through */
+ case TM_NODE_LEVEL_QUEUE:
+ /* fall-through */
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+}
+
+static inline uint32_t
+tm_port_queue_id(struct rte_eth_dev *dev,
+ uint32_t port_subport_id,
+ uint32_t subport_pipe_id,
+ uint32_t pipe_tc_id,
+ uint32_t tc_queue_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+
+ uint32_t port_pipe_id =
+ port_subport_id * n_pipes_per_subport + subport_pipe_id;
+ uint32_t port_tc_id =
+ port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
+ uint32_t port_queue_id =
+ port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
+
+ return port_queue_id;
+}
+
+static int
+read_port_stats(struct rte_eth_dev *dev,
+ struct tm_node *nr,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+ uint32_t subport_id;
+
+ for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
+ struct rte_sched_subport_stats s;
+ uint32_t tc_ov, id;
+
+ /* Stats read */
+ int status = rte_sched_subport_read_stats(SCHED(p),
+ subport_id,
+ &s,
+ &tc_ov);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
+ nr->stats.n_pkts +=
+ s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
+ nr->stats.n_bytes +=
+ s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
+ nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
+ s.n_pkts_tc_dropped[id];
+ nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_tc_dropped[id];
+ }
+ }
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &nr->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_DEFAULT;
+
+ /* Stats clear */
+ if (clear)
+ memset(&nr->stats, 0, sizeof(nr->stats));
+
+ return 0;
+}
+
+static int
+read_subport_stats(struct rte_eth_dev *dev,
+ struct tm_node *ns,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+ struct rte_sched_subport_stats s;
+ uint32_t tc_ov, tc_id;
+
+ /* Stats read */
+ int status = rte_sched_subport_read_stats(SCHED(p),
+ subport_id,
+ &s,
+ &tc_ov);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
+ ns->stats.n_pkts +=
+ s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
+ ns->stats.n_bytes +=
+ s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
+ ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
+ s.n_pkts_tc_dropped[tc_id];
+ ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_tc_dropped[tc_id];
+ }
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &ns->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_DEFAULT;
+
+ /* Stats clear */
+ if (clear)
+ memset(&ns->stats, 0, sizeof(ns->stats));
+
+ return 0;
+}
+
+static int
+read_pipe_stats(struct rte_eth_dev *dev,
+ struct tm_node *np,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ uint32_t i;
+
+ /* Stats read */
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
+ struct rte_sched_queue_stats s;
+ uint16_t qlen;
+
+ uint32_t qid = tm_port_queue_id(dev,
+ subport_id,
+ pipe_id,
+ i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+
+ int status = rte_sched_queue_read_stats(SCHED(p),
+ qid,
+ &s,
+ &qlen);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
+ np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
+ np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
+ np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_dropped;
+ np->stats.leaf.n_pkts_queued = qlen;
+ }
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &np->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_DEFAULT;
+
+ /* Stats clear */
+ if (clear)
+ memset(&np->stats, 0, sizeof(np->stats));
+
+ return 0;
+}
+
+static int
+read_tc_stats(struct rte_eth_dev *dev,
+ struct tm_node *nt,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ uint32_t i;
+
+ /* Stats read */
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ struct rte_sched_queue_stats s;
+ uint16_t qlen;
+
+ uint32_t qid = tm_port_queue_id(dev,
+ subport_id,
+ pipe_id,
+ tc_id,
+ i);
+
+ int status = rte_sched_queue_read_stats(SCHED(p),
+ qid,
+ &s,
+ &qlen);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
+ nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
+ nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
+ nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_dropped;
+ nt->stats.leaf.n_pkts_queued = qlen;
+ }
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &nt->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_DEFAULT;
+
+ /* Stats clear */
+ if (clear)
+ memset(&nt->stats, 0, sizeof(nt->stats));
+
+ return 0;
+}
+
+static int
+read_queue_stats(struct rte_eth_dev *dev,
+ struct tm_node *nq,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct rte_sched_queue_stats s;
+ uint16_t qlen;
+
+ uint32_t queue_id = tm_node_queue_id(dev, nq);
+
+ struct tm_node *nt = nq->parent_node;
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ /* Stats read */
+ uint32_t qid = tm_port_queue_id(dev,
+ subport_id,
+ pipe_id,
+ tc_id,
+ queue_id);
+
+ int status = rte_sched_queue_read_stats(SCHED(p),
+ qid,
+ &s,
+ &qlen);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
+ nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
+ nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
+ nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_dropped;
+ nq->stats.leaf.n_pkts_queued = qlen;
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &nq->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_QUEUE;
+
+ /* Stats clear */
+ if (clear)
+ memset(&nq->stats, 0, sizeof(nq->stats));
+
+ return 0;
+}
+
+/* Traffic manager read stats counters for specific node */
+static int
+pmd_tm_node_stats_read(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_tm_error *error)
+{
+ struct tm_node *n;
+
+ /* Port must be started and TM used. */
+ if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Node must be valid */
+ n = tm_node_search(dev, node_id);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ switch (n->level) {
+ case TM_NODE_LEVEL_PORT:
+ if (read_port_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+
+ case TM_NODE_LEVEL_SUBPORT:
+ if (read_subport_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+
+ case TM_NODE_LEVEL_PIPE:
+ if (read_pipe_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+
+ case TM_NODE_LEVEL_TC:
+ if (read_tc_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+
+ case TM_NODE_LEVEL_QUEUE:
+ default:
+ if (read_queue_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ }
+}
+
+const struct rte_tm_ops pmd_tm_ops = {
+ .node_type_get = pmd_tm_node_type_get,
+ .capabilities_get = pmd_tm_capabilities_get,
+ .level_capabilities_get = pmd_tm_level_capabilities_get,
+ .node_capabilities_get = pmd_tm_node_capabilities_get,
+
+ .wred_profile_add = pmd_tm_wred_profile_add,
+ .wred_profile_delete = pmd_tm_wred_profile_delete,
+ .shared_wred_context_add_update = NULL,
+ .shared_wred_context_delete = NULL,
+
+ .shaper_profile_add = pmd_tm_shaper_profile_add,
+ .shaper_profile_delete = pmd_tm_shaper_profile_delete,
+ .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
+ .shared_shaper_delete = pmd_tm_shared_shaper_delete,
+
+ .node_add = pmd_tm_node_add,
+ .node_delete = pmd_tm_node_delete,
+ .node_suspend = NULL,
+ .node_resume = NULL,
+ .hierarchy_commit = pmd_tm_hierarchy_commit,
+
+ .node_parent_update = pmd_tm_node_parent_update,
+ .node_shaper_update = pmd_tm_node_shaper_update,
+ .node_shared_shaper_update = NULL,
+ .node_stats_update = NULL,
+ .node_wfq_weight_mode_update = NULL,
+ .node_cman_update = NULL,
+ .node_wred_context_update = NULL,
+ .node_shared_wred_context_update = NULL,
+
+ .node_stats_read = pmd_tm_node_stats_read,
+};
diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_pmd_softnic_version.map b/src/spdk/dpdk/drivers/net/softnic/rte_pmd_softnic_version.map
new file mode 100644
index 00000000..bc44b06f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/softnic/rte_pmd_softnic_version.map
@@ -0,0 +1,13 @@
+DPDK_17.11 {
+ global:
+
+ rte_pmd_softnic_run;
+
+ local: *;
+};
+
+EXPERIMENTAL {
+ global:
+
+ rte_pmd_softnic_manage;
+};
diff --git a/src/spdk/dpdk/drivers/net/szedata2/Makefile b/src/spdk/dpdk/drivers/net/szedata2/Makefile
new file mode 100644
index 00000000..b77fae16
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/szedata2/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015 CESNET
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_szedata2.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lsze2
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+EXPORT_MAP := rte_pmd_szedata2_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += rte_eth_szedata2.c
+
+#
+# Export include files
+#
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/szedata2/meson.build b/src/spdk/dpdk/drivers/net/szedata2/meson.build
new file mode 100644
index 00000000..da373374
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/szedata2/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+dep = cc.find_library('sze2', required: false)
+build = dep.found()
+ext_deps += dep
+sources = files('rte_eth_szedata2.c')
diff --git a/src/spdk/dpdk/drivers/net/szedata2/rte_eth_szedata2.c b/src/spdk/dpdk/drivers/net/szedata2/rte_eth_szedata2.c
new file mode 100644
index 00000000..1d20cb51
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/szedata2/rte_eth_szedata2.c
@@ -0,0 +1,1934 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 - 2016 CESNET
+ */
+
+#include <stdint.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <err.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+#include <libsze2.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+
+#include "rte_eth_szedata2.h"
+#include "szedata2_logs.h"
+
+#define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
+#define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
+#define RTE_ETH_SZEDATA2_TX_LOCK_SIZE (32 * 1024 * 1024)
+
+/**
+ * size of szedata2_packet header with alignment
+ */
+#define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
+
+#define RTE_SZEDATA2_DRIVER_NAME net_szedata2
+
+#define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u"
+
+/**
+ * Format string for suffix used to differentiate between Ethernet ports
+ * on the same PCI device.
+ */
+#define SZEDATA2_ETH_DEV_NAME_SUFFIX_FMT "-port%u"
+
+/**
+ * Maximum number of ports for one device.
+ */
+#define SZEDATA2_MAX_PORTS 2
+
+/**
+ * Entry in list of PCI devices for this driver.
+ */
+struct pci_dev_list_entry;
+struct pci_dev_list_entry {
+ LIST_ENTRY(pci_dev_list_entry) next;
+ struct rte_pci_device *pci_dev;
+ unsigned int port_count;
+};
+
+/* List of PCI devices with number of ports for this driver. */
+LIST_HEAD(pci_dev_list, pci_dev_list_entry) szedata2_pci_dev_list =
+ LIST_HEAD_INITIALIZER(szedata2_pci_dev_list);
+
+struct port_info {
+ unsigned int rx_base_id;
+ unsigned int tx_base_id;
+ unsigned int rx_count;
+ unsigned int tx_count;
+ int numa_node;
+};
+
+struct pmd_internals {
+ struct rte_eth_dev *dev;
+ uint16_t max_rx_queues;
+ uint16_t max_tx_queues;
+ unsigned int rxq_base_id;
+ unsigned int txq_base_id;
+ char *sze_dev_path;
+};
+
+struct szedata2_rx_queue {
+ struct pmd_internals *priv;
+ struct szedata *sze;
+ uint8_t rx_channel;
+ uint16_t qid;
+ uint16_t in_port;
+ struct rte_mempool *mb_pool;
+ volatile uint64_t rx_pkts;
+ volatile uint64_t rx_bytes;
+ volatile uint64_t err_pkts;
+};
+
+struct szedata2_tx_queue {
+ struct pmd_internals *priv;
+ struct szedata *sze;
+ uint8_t tx_channel;
+ uint16_t qid;
+ volatile uint64_t tx_pkts;
+ volatile uint64_t tx_bytes;
+ volatile uint64_t err_pkts;
+};
+
+int szedata2_logtype_init;
+int szedata2_logtype_driver;
+
+static struct ether_addr eth_addr = {
+ .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
+};
+
+static uint16_t
+eth_szedata2_rx(void *queue,
+ struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ unsigned int i;
+ struct rte_mbuf *mbuf;
+ struct szedata2_rx_queue *sze_q = queue;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint16_t num_rx = 0;
+ uint16_t buf_size;
+ uint16_t sg_size;
+ uint16_t hw_size;
+ uint16_t packet_size;
+ uint64_t num_bytes = 0;
+ struct szedata *sze = sze_q->sze;
+ uint8_t *header_ptr = NULL; /* header of packet */
+ uint8_t *packet_ptr1 = NULL;
+ uint8_t *packet_ptr2 = NULL;
+ uint16_t packet_len1 = 0;
+ uint16_t packet_len2 = 0;
+ uint16_t hw_data_align;
+
+ if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
+ return 0;
+
+ /*
+ * Reads the given number of packets from szedata2 channel given
+ * by queue and copies the packet data into a newly allocated mbuf
+ * to return.
+ */
+ for (i = 0; i < nb_pkts; i++) {
+ mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
+
+ if (unlikely(mbuf == NULL)) {
+ sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ /* get the next sze packet */
+ if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
+ sze->ct_rx_lck->next == NULL) {
+ /* unlock old data */
+ szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
+ sze->ct_rx_lck_orig = NULL;
+ sze->ct_rx_lck = NULL;
+ }
+
+ if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
+ /* nothing to read, lock new data */
+ sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
+ sze->ct_rx_lck_orig = sze->ct_rx_lck;
+
+ if (sze->ct_rx_lck == NULL) {
+ /* nothing to lock */
+ rte_pktmbuf_free(mbuf);
+ break;
+ }
+
+ sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
+ sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
+
+ if (!sze->ct_rx_rem_bytes) {
+ rte_pktmbuf_free(mbuf);
+ break;
+ }
+ }
+
+ if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
+ /*
+ * cut in header
+ * copy parts of header to merge buffer
+ */
+ if (sze->ct_rx_lck->next == NULL) {
+ rte_pktmbuf_free(mbuf);
+ break;
+ }
+
+ /* copy first part of header */
+ rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
+ sze->ct_rx_rem_bytes);
+
+ /* copy second part of header */
+ sze->ct_rx_lck = sze->ct_rx_lck->next;
+ sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
+ rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
+ sze->ct_rx_cur_ptr,
+ RTE_SZE2_PACKET_HEADER_SIZE -
+ sze->ct_rx_rem_bytes);
+
+ sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
+ sze->ct_rx_rem_bytes;
+ sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
+ RTE_SZE2_PACKET_HEADER_SIZE +
+ sze->ct_rx_rem_bytes;
+
+ header_ptr = (uint8_t *)sze->ct_rx_buffer;
+ } else {
+ /* not cut */
+ header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
+ sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
+ sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
+ }
+
+ sg_size = le16toh(*((uint16_t *)header_ptr));
+ hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
+ packet_size = sg_size -
+ RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
+
+
+ /* checks if packet all right */
+ if (!sg_size)
+ errx(5, "Zero segsize");
+
+ /* check sg_size and hwsize */
+ if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
+ errx(10, "Hwsize bigger than expected. Segsize: %d, "
+ "hwsize: %d", sg_size, hw_size);
+ }
+
+ hw_data_align =
+ RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size) -
+ RTE_SZE2_PACKET_HEADER_SIZE;
+
+ if (sze->ct_rx_rem_bytes >=
+ (uint16_t)(sg_size -
+ RTE_SZE2_PACKET_HEADER_SIZE)) {
+ /* no cut */
+ /* one packet ready - go to another */
+ packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
+ packet_len1 = packet_size;
+ packet_ptr2 = NULL;
+ packet_len2 = 0;
+
+ sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
+ RTE_SZE2_PACKET_HEADER_SIZE;
+ sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
+ RTE_SZE2_PACKET_HEADER_SIZE;
+ } else {
+ /* cut in data */
+ if (sze->ct_rx_lck->next == NULL) {
+ errx(6, "Need \"next\" lock, "
+ "but it is missing: %u",
+ sze->ct_rx_rem_bytes);
+ }
+
+ /* skip hw data */
+ if (sze->ct_rx_rem_bytes <= hw_data_align) {
+ uint16_t rem_size = hw_data_align -
+ sze->ct_rx_rem_bytes;
+
+ /* MOVE to next lock */
+ sze->ct_rx_lck = sze->ct_rx_lck->next;
+ sze->ct_rx_cur_ptr =
+ (void *)(((uint8_t *)
+ (sze->ct_rx_lck->start)) + rem_size);
+
+ packet_ptr1 = sze->ct_rx_cur_ptr;
+ packet_len1 = packet_size;
+ packet_ptr2 = NULL;
+ packet_len2 = 0;
+
+ sze->ct_rx_cur_ptr +=
+ RTE_SZE2_ALIGN8(packet_size);
+ sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
+ rem_size - RTE_SZE2_ALIGN8(packet_size);
+ } else {
+ /* get pointer and length from first part */
+ packet_ptr1 = sze->ct_rx_cur_ptr +
+ hw_data_align;
+ packet_len1 = sze->ct_rx_rem_bytes -
+ hw_data_align;
+
+ /* MOVE to next lock */
+ sze->ct_rx_lck = sze->ct_rx_lck->next;
+ sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
+
+ /* get pointer and length from second part */
+ packet_ptr2 = sze->ct_rx_cur_ptr;
+ packet_len2 = packet_size - packet_len1;
+
+ sze->ct_rx_cur_ptr +=
+ RTE_SZE2_ALIGN8(packet_size) -
+ packet_len1;
+ sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
+ (RTE_SZE2_ALIGN8(packet_size) -
+ packet_len1);
+ }
+ }
+
+ if (unlikely(packet_ptr1 == NULL)) {
+ rte_pktmbuf_free(mbuf);
+ break;
+ }
+
+ /* get the space available for data in the mbuf */
+ mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
+ buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+
+ if (packet_size <= buf_size) {
+ /* sze packet will fit in one mbuf, go ahead and copy */
+ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
+ packet_ptr1, packet_len1);
+ if (packet_ptr2 != NULL) {
+ rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf,
+ uint8_t *) + packet_len1),
+ packet_ptr2, packet_len2);
+ }
+ mbuf->data_len = (uint16_t)packet_size;
+
+ mbuf->pkt_len = packet_size;
+ mbuf->port = sze_q->in_port;
+ bufs[num_rx] = mbuf;
+ num_rx++;
+ num_bytes += packet_size;
+ } else {
+ /*
+ * sze packet will not fit in one mbuf,
+ * scattered mode is not enabled, drop packet
+ */
+ PMD_DRV_LOG(ERR,
+ "SZE segment %d bytes will not fit in one mbuf "
+ "(%d bytes), scattered mode is not enabled, "
+ "drop packet!!",
+ packet_size, buf_size);
+ rte_pktmbuf_free(mbuf);
+ }
+ }
+
+ sze_q->rx_pkts += num_rx;
+ sze_q->rx_bytes += num_bytes;
+ return num_rx;
+}
+
+static uint16_t
+eth_szedata2_rx_scattered(void *queue,
+ struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ unsigned int i;
+ struct rte_mbuf *mbuf;
+ struct szedata2_rx_queue *sze_q = queue;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint16_t num_rx = 0;
+ uint16_t buf_size;
+ uint16_t sg_size;
+ uint16_t hw_size;
+ uint16_t packet_size;
+ uint64_t num_bytes = 0;
+ struct szedata *sze = sze_q->sze;
+ uint8_t *header_ptr = NULL; /* header of packet */
+ uint8_t *packet_ptr1 = NULL;
+ uint8_t *packet_ptr2 = NULL;
+ uint16_t packet_len1 = 0;
+ uint16_t packet_len2 = 0;
+ uint16_t hw_data_align;
+ uint64_t *mbuf_failed_ptr =
+ &sze_q->priv->dev->data->rx_mbuf_alloc_failed;
+
+ if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
+ return 0;
+
+ /*
+ * Reads the given number of packets from szedata2 channel given
+ * by queue and copies the packet data into a newly allocated mbuf
+ * to return.
+ */
+ for (i = 0; i < nb_pkts; i++) {
+ const struct szedata_lock *ct_rx_lck_backup;
+ unsigned int ct_rx_rem_bytes_backup;
+ unsigned char *ct_rx_cur_ptr_backup;
+
+ /* get the next sze packet */
+ if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
+ sze->ct_rx_lck->next == NULL) {
+ /* unlock old data */
+ szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
+ sze->ct_rx_lck_orig = NULL;
+ sze->ct_rx_lck = NULL;
+ }
+
+ /*
+ * Store items from sze structure which can be changed
+ * before mbuf allocating. Use these items in case of mbuf
+ * allocating failure.
+ */
+ ct_rx_lck_backup = sze->ct_rx_lck;
+ ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
+ ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
+
+ if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
+ /* nothing to read, lock new data */
+ sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
+ sze->ct_rx_lck_orig = sze->ct_rx_lck;
+
+ /*
+ * Backup items from sze structure must be updated
+ * after locking to contain pointers to new locks.
+ */
+ ct_rx_lck_backup = sze->ct_rx_lck;
+ ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
+ ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
+
+ if (sze->ct_rx_lck == NULL)
+ /* nothing to lock */
+ break;
+
+ sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
+ sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
+
+ if (!sze->ct_rx_rem_bytes)
+ break;
+ }
+
+ if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
+ /*
+ * cut in header - copy parts of header to merge buffer
+ */
+ if (sze->ct_rx_lck->next == NULL)
+ break;
+
+ /* copy first part of header */
+ rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
+ sze->ct_rx_rem_bytes);
+
+ /* copy second part of header */
+ sze->ct_rx_lck = sze->ct_rx_lck->next;
+ sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
+ rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
+ sze->ct_rx_cur_ptr,
+ RTE_SZE2_PACKET_HEADER_SIZE -
+ sze->ct_rx_rem_bytes);
+
+ sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
+ sze->ct_rx_rem_bytes;
+ sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
+ RTE_SZE2_PACKET_HEADER_SIZE +
+ sze->ct_rx_rem_bytes;
+
+ header_ptr = (uint8_t *)sze->ct_rx_buffer;
+ } else {
+ /* not cut */
+ header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
+ sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
+ sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
+ }
+
+ sg_size = le16toh(*((uint16_t *)header_ptr));
+ hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
+ packet_size = sg_size -
+ RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
+
+
+ /* checks if packet all right */
+ if (!sg_size)
+ errx(5, "Zero segsize");
+
+ /* check sg_size and hwsize */
+ if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
+ errx(10, "Hwsize bigger than expected. Segsize: %d, "
+ "hwsize: %d", sg_size, hw_size);
+ }
+
+ hw_data_align =
+ RTE_SZE2_ALIGN8((RTE_SZE2_PACKET_HEADER_SIZE +
+ hw_size)) - RTE_SZE2_PACKET_HEADER_SIZE;
+
+ if (sze->ct_rx_rem_bytes >=
+ (uint16_t)(sg_size -
+ RTE_SZE2_PACKET_HEADER_SIZE)) {
+ /* no cut */
+ /* one packet ready - go to another */
+ packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
+ packet_len1 = packet_size;
+ packet_ptr2 = NULL;
+ packet_len2 = 0;
+
+ sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
+ RTE_SZE2_PACKET_HEADER_SIZE;
+ sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
+ RTE_SZE2_PACKET_HEADER_SIZE;
+ } else {
+ /* cut in data */
+ if (sze->ct_rx_lck->next == NULL) {
+ errx(6, "Need \"next\" lock, but it is "
+ "missing: %u", sze->ct_rx_rem_bytes);
+ }
+
+ /* skip hw data */
+ if (sze->ct_rx_rem_bytes <= hw_data_align) {
+ uint16_t rem_size = hw_data_align -
+ sze->ct_rx_rem_bytes;
+
+ /* MOVE to next lock */
+ sze->ct_rx_lck = sze->ct_rx_lck->next;
+ sze->ct_rx_cur_ptr =
+ (void *)(((uint8_t *)
+ (sze->ct_rx_lck->start)) + rem_size);
+
+ packet_ptr1 = sze->ct_rx_cur_ptr;
+ packet_len1 = packet_size;
+ packet_ptr2 = NULL;
+ packet_len2 = 0;
+
+ sze->ct_rx_cur_ptr +=
+ RTE_SZE2_ALIGN8(packet_size);
+ sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
+ rem_size - RTE_SZE2_ALIGN8(packet_size);
+ } else {
+ /* get pointer and length from first part */
+ packet_ptr1 = sze->ct_rx_cur_ptr +
+ hw_data_align;
+ packet_len1 = sze->ct_rx_rem_bytes -
+ hw_data_align;
+
+ /* MOVE to next lock */
+ sze->ct_rx_lck = sze->ct_rx_lck->next;
+ sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
+
+ /* get pointer and length from second part */
+ packet_ptr2 = sze->ct_rx_cur_ptr;
+ packet_len2 = packet_size - packet_len1;
+
+ sze->ct_rx_cur_ptr +=
+ RTE_SZE2_ALIGN8(packet_size) -
+ packet_len1;
+ sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
+ (RTE_SZE2_ALIGN8(packet_size) -
+ packet_len1);
+ }
+ }
+
+ if (unlikely(packet_ptr1 == NULL))
+ break;
+
+ mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
+
+ if (unlikely(mbuf == NULL)) {
+ /*
+ * Restore items from sze structure to state after
+ * unlocking (eventually locking).
+ */
+ sze->ct_rx_lck = ct_rx_lck_backup;
+ sze->ct_rx_rem_bytes = ct_rx_rem_bytes_backup;
+ sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup;
+ sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ /* get the space available for data in the mbuf */
+ mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
+ buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+
+ if (packet_size <= buf_size) {
+ /* sze packet will fit in one mbuf, go ahead and copy */
+ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
+ packet_ptr1, packet_len1);
+ if (packet_ptr2 != NULL) {
+ rte_memcpy((void *)
+ (rte_pktmbuf_mtod(mbuf, uint8_t *) +
+ packet_len1), packet_ptr2, packet_len2);
+ }
+ mbuf->data_len = (uint16_t)packet_size;
+ } else {
+ /*
+ * sze packet will not fit in one mbuf,
+ * scatter packet into more mbufs
+ */
+ struct rte_mbuf *m = mbuf;
+ uint16_t len = rte_pktmbuf_tailroom(mbuf);
+
+ /* copy first part of packet */
+ /* fill first mbuf */
+ rte_memcpy(rte_pktmbuf_append(mbuf, len), packet_ptr1,
+ len);
+ packet_len1 -= len;
+ packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
+
+ while (packet_len1 > 0) {
+ /* fill new mbufs */
+ m->next = rte_pktmbuf_alloc(sze_q->mb_pool);
+
+ if (unlikely(m->next == NULL)) {
+ rte_pktmbuf_free(mbuf);
+ /*
+ * Restore items from sze structure
+ * to state after unlocking (eventually
+ * locking).
+ */
+ sze->ct_rx_lck = ct_rx_lck_backup;
+ sze->ct_rx_rem_bytes =
+ ct_rx_rem_bytes_backup;
+ sze->ct_rx_cur_ptr =
+ ct_rx_cur_ptr_backup;
+ (*mbuf_failed_ptr)++;
+ goto finish;
+ }
+
+ m = m->next;
+
+ len = RTE_MIN(rte_pktmbuf_tailroom(m),
+ packet_len1);
+ rte_memcpy(rte_pktmbuf_append(mbuf, len),
+ packet_ptr1, len);
+
+ (mbuf->nb_segs)++;
+ packet_len1 -= len;
+ packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
+ }
+
+ if (packet_ptr2 != NULL) {
+ /* copy second part of packet, if exists */
+ /* fill the rest of currently last mbuf */
+ len = rte_pktmbuf_tailroom(m);
+ rte_memcpy(rte_pktmbuf_append(mbuf, len),
+ packet_ptr2, len);
+ packet_len2 -= len;
+ packet_ptr2 = ((uint8_t *)packet_ptr2) + len;
+
+ while (packet_len2 > 0) {
+ /* fill new mbufs */
+ m->next = rte_pktmbuf_alloc(
+ sze_q->mb_pool);
+
+ if (unlikely(m->next == NULL)) {
+ rte_pktmbuf_free(mbuf);
+ /*
+ * Restore items from sze
+ * structure to state after
+ * unlocking (eventually
+ * locking).
+ */
+ sze->ct_rx_lck =
+ ct_rx_lck_backup;
+ sze->ct_rx_rem_bytes =
+ ct_rx_rem_bytes_backup;
+ sze->ct_rx_cur_ptr =
+ ct_rx_cur_ptr_backup;
+ (*mbuf_failed_ptr)++;
+ goto finish;
+ }
+
+ m = m->next;
+
+ len = RTE_MIN(rte_pktmbuf_tailroom(m),
+ packet_len2);
+ rte_memcpy(
+ rte_pktmbuf_append(mbuf, len),
+ packet_ptr2, len);
+
+ (mbuf->nb_segs)++;
+ packet_len2 -= len;
+ packet_ptr2 = ((uint8_t *)packet_ptr2) +
+ len;
+ }
+ }
+ }
+ mbuf->pkt_len = packet_size;
+ mbuf->port = sze_q->in_port;
+ bufs[num_rx] = mbuf;
+ num_rx++;
+ num_bytes += packet_size;
+ }
+
+finish:
+ sze_q->rx_pkts += num_rx;
+ sze_q->rx_bytes += num_bytes;
+ return num_rx;
+}
+
+static uint16_t
+eth_szedata2_tx(void *queue,
+ struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *mbuf;
+ struct szedata2_tx_queue *sze_q = queue;
+ uint16_t num_tx = 0;
+ uint64_t num_bytes = 0;
+
+ const struct szedata_lock *lck;
+ uint32_t lock_size;
+ uint32_t lock_size2;
+ void *dst;
+ uint32_t pkt_len;
+ uint32_t hwpkt_len;
+ uint32_t unlock_size;
+ uint32_t rem_len;
+ uint16_t mbuf_segs;
+ uint16_t pkt_left = nb_pkts;
+
+ if (sze_q->sze == NULL || nb_pkts == 0)
+ return 0;
+
+ while (pkt_left > 0) {
+ unlock_size = 0;
+ lck = szedata_tx_lock_data(sze_q->sze,
+ RTE_ETH_SZEDATA2_TX_LOCK_SIZE,
+ sze_q->tx_channel);
+ if (lck == NULL)
+ continue;
+
+ dst = lck->start;
+ lock_size = lck->len;
+ lock_size2 = lck->next ? lck->next->len : 0;
+
+next_packet:
+ mbuf = bufs[nb_pkts - pkt_left];
+
+ pkt_len = mbuf->pkt_len;
+ mbuf_segs = mbuf->nb_segs;
+
+ hwpkt_len = RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
+ RTE_SZE2_ALIGN8(pkt_len);
+
+ if (lock_size + lock_size2 < hwpkt_len) {
+ szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
+ continue;
+ }
+
+ num_bytes += pkt_len;
+
+ if (lock_size > hwpkt_len) {
+ void *tmp_dst;
+
+ rem_len = 0;
+
+ /* write packet length at first 2 bytes in 8B header */
+ *((uint16_t *)dst) = htole16(
+ RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
+ pkt_len);
+ *(((uint16_t *)dst) + 1) = htole16(0);
+
+ /* copy packet from mbuf */
+ tmp_dst = ((uint8_t *)(dst)) +
+ RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
+ if (mbuf_segs == 1) {
+ /*
+ * non-scattered packet,
+ * transmit from one mbuf
+ */
+ rte_memcpy(tmp_dst,
+ rte_pktmbuf_mtod(mbuf, const void *),
+ pkt_len);
+ } else {
+ /* scattered packet, transmit from more mbufs */
+ struct rte_mbuf *m = mbuf;
+ while (m) {
+ rte_memcpy(tmp_dst,
+ rte_pktmbuf_mtod(m,
+ const void *),
+ m->data_len);
+ tmp_dst = ((uint8_t *)(tmp_dst)) +
+ m->data_len;
+ m = m->next;
+ }
+ }
+
+
+ dst = ((uint8_t *)dst) + hwpkt_len;
+ unlock_size += hwpkt_len;
+ lock_size -= hwpkt_len;
+
+ rte_pktmbuf_free(mbuf);
+ num_tx++;
+ pkt_left--;
+ if (pkt_left == 0) {
+ szedata_tx_unlock_data(sze_q->sze, lck,
+ unlock_size);
+ break;
+ }
+ goto next_packet;
+ } else if (lock_size + lock_size2 >= hwpkt_len) {
+ void *tmp_dst;
+ uint16_t write_len;
+
+ /* write packet length at first 2 bytes in 8B header */
+ *((uint16_t *)dst) =
+ htole16(RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
+ pkt_len);
+ *(((uint16_t *)dst) + 1) = htole16(0);
+
+ /*
+ * If the raw packet (pkt_len) is smaller than lock_size
+ * get the correct length for memcpy
+ */
+ write_len =
+ pkt_len < lock_size -
+ RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED ?
+ pkt_len :
+ lock_size - RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
+
+ rem_len = hwpkt_len - lock_size;
+
+ tmp_dst = ((uint8_t *)(dst)) +
+ RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
+ if (mbuf_segs == 1) {
+ /*
+ * non-scattered packet,
+ * transmit from one mbuf
+ */
+ /* copy part of packet to first area */
+ rte_memcpy(tmp_dst,
+ rte_pktmbuf_mtod(mbuf, const void *),
+ write_len);
+
+ if (lck->next)
+ dst = lck->next->start;
+
+ /* copy part of packet to second area */
+ rte_memcpy(dst,
+ (const void *)(rte_pktmbuf_mtod(mbuf,
+ const uint8_t *) +
+ write_len), pkt_len - write_len);
+ } else {
+ /* scattered packet, transmit from more mbufs */
+ struct rte_mbuf *m = mbuf;
+ uint16_t written = 0;
+ uint16_t to_write = 0;
+ bool new_mbuf = true;
+ uint16_t write_off = 0;
+
+ /* copy part of packet to first area */
+ while (m && written < write_len) {
+ to_write = RTE_MIN(m->data_len,
+ write_len - written);
+ rte_memcpy(tmp_dst,
+ rte_pktmbuf_mtod(m,
+ const void *),
+ to_write);
+
+ tmp_dst = ((uint8_t *)(tmp_dst)) +
+ to_write;
+ if (m->data_len <= write_len -
+ written) {
+ m = m->next;
+ new_mbuf = true;
+ } else {
+ new_mbuf = false;
+ }
+ written += to_write;
+ }
+
+ if (lck->next)
+ dst = lck->next->start;
+
+ tmp_dst = dst;
+ written = 0;
+ write_off = new_mbuf ? 0 : to_write;
+
+ /* copy part of packet to second area */
+ while (m && written < pkt_len - write_len) {
+ rte_memcpy(tmp_dst, (const void *)
+ (rte_pktmbuf_mtod(m,
+ uint8_t *) + write_off),
+ m->data_len - write_off);
+
+ tmp_dst = ((uint8_t *)(tmp_dst)) +
+ (m->data_len - write_off);
+ written += m->data_len - write_off;
+ m = m->next;
+ write_off = 0;
+ }
+ }
+
+ dst = ((uint8_t *)dst) + rem_len;
+ unlock_size += hwpkt_len;
+ lock_size = lock_size2 - rem_len;
+ lock_size2 = 0;
+
+ rte_pktmbuf_free(mbuf);
+ num_tx++;
+ }
+
+ szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
+ pkt_left--;
+ }
+
+ sze_q->tx_pkts += num_tx;
+ sze_q->err_pkts += nb_pkts - num_tx;
+ sze_q->tx_bytes += num_bytes;
+ return num_tx;
+}
+
+static int
+eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id)
+{
+ struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
+ int ret;
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+
+ if (rxq->sze == NULL) {
+ uint32_t rx = 1 << rxq->rx_channel;
+ uint32_t tx = 0;
+ rxq->sze = szedata_open(internals->sze_dev_path);
+ if (rxq->sze == NULL)
+ return -EINVAL;
+ ret = szedata_subscribe3(rxq->sze, &rx, &tx);
+ if (ret != 0 || rx == 0)
+ goto err;
+ }
+
+ ret = szedata_start(rxq->sze);
+ if (ret != 0)
+ goto err;
+ dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+
+err:
+ szedata_close(rxq->sze);
+ rxq->sze = NULL;
+ return -EINVAL;
+}
+
+static int
+eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id)
+{
+ struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
+
+ if (rxq->sze != NULL) {
+ szedata_close(rxq->sze);
+ rxq->sze = NULL;
+ }
+
+ dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static int
+eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id)
+{
+ struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
+ int ret;
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+
+ if (txq->sze == NULL) {
+ uint32_t rx = 0;
+ uint32_t tx = 1 << txq->tx_channel;
+ txq->sze = szedata_open(internals->sze_dev_path);
+ if (txq->sze == NULL)
+ return -EINVAL;
+ ret = szedata_subscribe3(txq->sze, &rx, &tx);
+ if (ret != 0 || tx == 0)
+ goto err;
+ }
+
+ ret = szedata_start(txq->sze);
+ if (ret != 0)
+ goto err;
+ dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+
+err:
+ szedata_close(txq->sze);
+ txq->sze = NULL;
+ return -EINVAL;
+}
+
+static int
+eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t txq_id)
+{
+ struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
+
+ if (txq->sze != NULL) {
+ szedata_close(txq->sze);
+ txq->sze = NULL;
+ }
+
+ dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static int
+eth_dev_start(struct rte_eth_dev *dev)
+{
+ int ret;
+ uint16_t i;
+ uint16_t nb_rx = dev->data->nb_rx_queues;
+ uint16_t nb_tx = dev->data->nb_tx_queues;
+
+ for (i = 0; i < nb_rx; i++) {
+ ret = eth_rx_queue_start(dev, i);
+ if (ret != 0)
+ goto err_rx;
+ }
+
+ for (i = 0; i < nb_tx; i++) {
+ ret = eth_tx_queue_start(dev, i);
+ if (ret != 0)
+ goto err_tx;
+ }
+
+ return 0;
+
+err_tx:
+ for (i = 0; i < nb_tx; i++)
+ eth_tx_queue_stop(dev, i);
+err_rx:
+ for (i = 0; i < nb_rx; i++)
+ eth_rx_queue_stop(dev, i);
+ return ret;
+}
+
+static void
+eth_dev_stop(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ uint16_t nb_rx = dev->data->nb_rx_queues;
+ uint16_t nb_tx = dev->data->nb_tx_queues;
+
+ for (i = 0; i < nb_tx; i++)
+ eth_tx_queue_stop(dev, i);
+
+ for (i = 0; i < nb_rx; i++)
+ eth_rx_queue_stop(dev, i);
+}
+
+static int
+eth_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+ dev->rx_pkt_burst = eth_szedata2_rx_scattered;
+ data->scattered_rx = 1;
+ } else {
+ dev->rx_pkt_burst = eth_szedata2_rx;
+ data->scattered_rx = 0;
+ }
+ return 0;
+}
+
+static void
+eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ dev_info->if_index = 0;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)-1;
+ dev_info->max_rx_queues = internals->max_rx_queues;
+ dev_info->max_tx_queues = internals->max_tx_queues;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_CRC_STRIP;
+ dev_info->tx_offload_capa = 0;
+ dev_info->rx_queue_offload_capa = 0;
+ dev_info->tx_queue_offload_capa = 0;
+ dev_info->speed_capa = ETH_LINK_SPEED_100G;
+}
+
+static int
+eth_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ uint16_t i;
+ uint16_t nb_rx = dev->data->nb_rx_queues;
+ uint16_t nb_tx = dev->data->nb_tx_queues;
+ uint64_t rx_total = 0;
+ uint64_t tx_total = 0;
+ uint64_t tx_err_total = 0;
+ uint64_t rx_total_bytes = 0;
+ uint64_t tx_total_bytes = 0;
+
+ for (i = 0; i < nb_rx; i++) {
+ struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = rxq->rx_pkts;
+ stats->q_ibytes[i] = rxq->rx_bytes;
+ }
+ rx_total += rxq->rx_pkts;
+ rx_total_bytes += rxq->rx_bytes;
+ }
+
+ for (i = 0; i < nb_tx; i++) {
+ struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = txq->tx_pkts;
+ stats->q_obytes[i] = txq->tx_bytes;
+ stats->q_errors[i] = txq->err_pkts;
+ }
+ tx_total += txq->tx_pkts;
+ tx_total_bytes += txq->tx_bytes;
+ tx_err_total += txq->err_pkts;
+ }
+
+ stats->ipackets = rx_total;
+ stats->opackets = tx_total;
+ stats->ibytes = rx_total_bytes;
+ stats->obytes = tx_total_bytes;
+ stats->oerrors = tx_err_total;
+ stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+
+ return 0;
+}
+
+static void
+eth_stats_reset(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ uint16_t nb_rx = dev->data->nb_rx_queues;
+ uint16_t nb_tx = dev->data->nb_tx_queues;
+
+ for (i = 0; i < nb_rx; i++) {
+ struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
+ rxq->rx_pkts = 0;
+ rxq->rx_bytes = 0;
+ rxq->err_pkts = 0;
+ }
+ for (i = 0; i < nb_tx; i++) {
+ struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
+ txq->tx_pkts = 0;
+ txq->tx_bytes = 0;
+ txq->err_pkts = 0;
+ }
+}
+
+static void
+eth_rx_queue_release(void *q)
+{
+ struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;
+
+ if (rxq != NULL) {
+ if (rxq->sze != NULL)
+ szedata_close(rxq->sze);
+ rte_free(rxq);
+ }
+}
+
+static void
+eth_tx_queue_release(void *q)
+{
+ struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;
+
+ if (txq != NULL) {
+ if (txq->sze != NULL)
+ szedata_close(txq->sze);
+ rte_free(txq);
+ }
+}
+
+static void
+eth_dev_close(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ uint16_t nb_rx = dev->data->nb_rx_queues;
+ uint16_t nb_tx = dev->data->nb_tx_queues;
+
+ eth_dev_stop(dev);
+
+ for (i = 0; i < nb_rx; i++) {
+ eth_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+ for (i = 0; i < nb_tx; i++) {
+ eth_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+static int
+eth_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct rte_eth_link link;
+
+ memset(&link, 0, sizeof(link));
+
+ link.link_speed = ETH_SPEED_NUM_100G;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = ETH_LINK_UP;
+ link.link_autoneg = ETH_LINK_FIXED;
+
+ rte_eth_linkstatus_set(dev, &link);
+ return 0;
+}
+
+static int
+eth_dev_set_link_up(struct rte_eth_dev *dev __rte_unused)
+{
+ PMD_DRV_LOG(WARNING, "Setting link up is not supported.");
+ return 0;
+}
+
+static int
+eth_dev_set_link_down(struct rte_eth_dev *dev __rte_unused)
+{
+ PMD_DRV_LOG(WARNING, "Setting link down is not supported.");
+ return 0;
+}
+
+static int
+eth_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct szedata2_rx_queue *rxq;
+ int ret;
+ struct pmd_internals *internals = dev->data->dev_private;
+ uint8_t rx_channel = internals->rxq_base_id + rx_queue_id;
+ uint32_t rx = 1 << rx_channel;
+ uint32_t tx = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->rx_queues[rx_queue_id] != NULL) {
+ eth_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queues[rx_queue_id] = NULL;
+ }
+
+ rxq = rte_zmalloc_socket("szedata2 rx queue",
+ sizeof(struct szedata2_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for rx queue id "
+ "%" PRIu16 "!", rx_queue_id);
+ return -ENOMEM;
+ }
+
+ rxq->priv = internals;
+ rxq->sze = szedata_open(internals->sze_dev_path);
+ if (rxq->sze == NULL) {
+ PMD_INIT_LOG(ERR, "szedata_open() failed for rx queue id "
+ "%" PRIu16 "!", rx_queue_id);
+ eth_rx_queue_release(rxq);
+ return -EINVAL;
+ }
+ ret = szedata_subscribe3(rxq->sze, &rx, &tx);
+ if (ret != 0 || rx == 0) {
+ PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for rx queue id "
+ "%" PRIu16 "!", rx_queue_id);
+ eth_rx_queue_release(rxq);
+ return -EINVAL;
+ }
+ rxq->rx_channel = rx_channel;
+ rxq->qid = rx_queue_id;
+ rxq->in_port = dev->data->port_id;
+ rxq->mb_pool = mb_pool;
+ rxq->rx_pkts = 0;
+ rxq->rx_bytes = 0;
+ rxq->err_pkts = 0;
+
+ dev->data->rx_queues[rx_queue_id] = rxq;
+
+ PMD_INIT_LOG(DEBUG, "Configured rx queue id %" PRIu16 " on socket "
+ "%u (channel id %u).", rxq->qid, socket_id,
+ rxq->rx_channel);
+
+ return 0;
+}
+
+static int
+eth_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct szedata2_tx_queue *txq;
+ int ret;
+ struct pmd_internals *internals = dev->data->dev_private;
+ uint8_t tx_channel = internals->txq_base_id + tx_queue_id;
+ uint32_t rx = 0;
+ uint32_t tx = 1 << tx_channel;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queues[tx_queue_id] != NULL) {
+ eth_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queues[tx_queue_id] = NULL;
+ }
+
+ txq = rte_zmalloc_socket("szedata2 tx queue",
+ sizeof(struct szedata2_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for tx queue id "
+ "%" PRIu16 "!", tx_queue_id);
+ return -ENOMEM;
+ }
+
+ txq->priv = internals;
+ txq->sze = szedata_open(internals->sze_dev_path);
+ if (txq->sze == NULL) {
+ PMD_INIT_LOG(ERR, "szedata_open() failed for tx queue id "
+ "%" PRIu16 "!", tx_queue_id);
+ eth_tx_queue_release(txq);
+ return -EINVAL;
+ }
+ ret = szedata_subscribe3(txq->sze, &rx, &tx);
+ if (ret != 0 || tx == 0) {
+ PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for tx queue id "
+ "%" PRIu16 "!", tx_queue_id);
+ eth_tx_queue_release(txq);
+ return -EINVAL;
+ }
+ txq->tx_channel = tx_channel;
+ txq->qid = tx_queue_id;
+ txq->tx_pkts = 0;
+ txq->tx_bytes = 0;
+ txq->err_pkts = 0;
+
+ dev->data->tx_queues[tx_queue_id] = txq;
+
+ PMD_INIT_LOG(DEBUG, "Configured tx queue id %" PRIu16 " on socket "
+ "%u (channel id %u).", txq->qid, socket_id,
+ txq->tx_channel);
+
+ return 0;
+}
+
+static int
+eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
+ struct ether_addr *mac_addr __rte_unused)
+{
+ return 0;
+}
+
+static void
+eth_promiscuous_enable(struct rte_eth_dev *dev __rte_unused)
+{
+ PMD_DRV_LOG(WARNING, "Enabling promiscuous mode is not supported. "
+ "The card is always in promiscuous mode.");
+}
+
+static void
+eth_promiscuous_disable(struct rte_eth_dev *dev __rte_unused)
+{
+ PMD_DRV_LOG(WARNING, "Disabling promiscuous mode is not supported. "
+ "The card is always in promiscuous mode.");
+}
+
+static void
+eth_allmulticast_enable(struct rte_eth_dev *dev __rte_unused)
+{
+ PMD_DRV_LOG(WARNING, "Enabling allmulticast mode is not supported.");
+}
+
+static void
+eth_allmulticast_disable(struct rte_eth_dev *dev __rte_unused)
+{
+ PMD_DRV_LOG(WARNING, "Disabling allmulticast mode is not supported.");
+}
+
+static const struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_set_link_up = eth_dev_set_link_up,
+ .dev_set_link_down = eth_dev_set_link_down,
+ .dev_close = eth_dev_close,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .promiscuous_enable = eth_promiscuous_enable,
+ .promiscuous_disable = eth_promiscuous_disable,
+ .allmulticast_enable = eth_allmulticast_enable,
+ .allmulticast_disable = eth_allmulticast_disable,
+ .rx_queue_start = eth_rx_queue_start,
+ .rx_queue_stop = eth_rx_queue_stop,
+ .tx_queue_start = eth_tx_queue_start,
+ .tx_queue_stop = eth_tx_queue_stop,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_release = eth_rx_queue_release,
+ .tx_queue_release = eth_tx_queue_release,
+ .link_update = eth_link_update,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
+ .mac_addr_set = eth_mac_addr_set,
+};
+
+/*
+ * This function goes through sysfs and looks for an index of szedata2
+ * device file (/dev/szedataIIX, where X is the index).
+ *
+ * @return
+ * 0 on success
+ * -1 on error
+ */
+static int
+get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
+{
+ DIR *dir;
+ struct dirent *entry;
+ int ret;
+ uint32_t tmp_index;
+ FILE *fd;
+ char pcislot_path[PATH_MAX];
+ uint32_t domain;
+ uint8_t bus;
+ uint8_t devid;
+ uint8_t function;
+
+ dir = opendir("/sys/class/combo");
+ if (dir == NULL)
+ return -1;
+
+ /*
+ * Iterate through all combosixX directories.
+ * When the value in /sys/class/combo/combosixX/device/pcislot
+ * file is the location of the ethernet device dev, "X" is the
+ * index of the device.
+ */
+ while ((entry = readdir(dir)) != NULL) {
+ ret = sscanf(entry->d_name, "combosix%u", &tmp_index);
+ if (ret != 1)
+ continue;
+
+ snprintf(pcislot_path, PATH_MAX,
+ "/sys/class/combo/combosix%u/device/pcislot",
+ tmp_index);
+
+ fd = fopen(pcislot_path, "r");
+ if (fd == NULL)
+ continue;
+
+ ret = fscanf(fd, "%8" SCNx32 ":%2" SCNx8 ":%2" SCNx8 ".%" SCNx8,
+ &domain, &bus, &devid, &function);
+ fclose(fd);
+ if (ret != 4)
+ continue;
+
+ if (pcislot_addr->domain == domain &&
+ pcislot_addr->bus == bus &&
+ pcislot_addr->devid == devid &&
+ pcislot_addr->function == function) {
+ *index = tmp_index;
+ closedir(dir);
+ return 0;
+ }
+ }
+
+ closedir(dir);
+ return -1;
+}
+
+/**
+ * @brief Initializes rte_eth_dev device.
+ * @param dev Device to initialize.
+ * @param pi Structure with info about DMA queues.
+ * @return 0 on success, negative error code on error.
+ */
+static int
+rte_szedata2_eth_dev_init(struct rte_eth_dev *dev, struct port_info *pi)
+{
+ int ret;
+ uint32_t szedata2_index;
+ char name[PATH_MAX];
+ struct rte_eth_dev_data *data = dev->data;
+ struct pmd_internals *internals = (struct pmd_internals *)
+ data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ PMD_INIT_LOG(INFO, "Initializing eth_dev %s (driver %s)", data->name,
+ dev->device->driver->name);
+
+ /* Fill internal private structure. */
+ internals->dev = dev;
+ /* Get index of szedata2 device file and create path to device file */
+ ret = get_szedata2_index(&pci_dev->addr, &szedata2_index);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to get szedata2 device index!");
+ return -ENODEV;
+ }
+ snprintf(name, PATH_MAX, SZEDATA2_DEV_PATH_FMT, szedata2_index);
+ internals->sze_dev_path = strdup(name);
+ if (internals->sze_dev_path == NULL) {
+ PMD_INIT_LOG(ERR, "strdup() failed!");
+ return -ENOMEM;
+ }
+ PMD_INIT_LOG(INFO, "SZEDATA2 path: %s", internals->sze_dev_path);
+ internals->max_rx_queues = pi->rx_count;
+ internals->max_tx_queues = pi->tx_count;
+ internals->rxq_base_id = pi->rx_base_id;
+ internals->txq_base_id = pi->tx_base_id;
+ PMD_INIT_LOG(INFO, "%u RX DMA channels from id %u",
+ internals->max_rx_queues, internals->rxq_base_id);
+ PMD_INIT_LOG(INFO, "%u TX DMA channels from id %u",
+ internals->max_tx_queues, internals->txq_base_id);
+
+ /* Set rx, tx burst functions */
+ if (data->scattered_rx == 1)
+ dev->rx_pkt_burst = eth_szedata2_rx_scattered;
+ else
+ dev->rx_pkt_burst = eth_szedata2_rx;
+ dev->tx_pkt_burst = eth_szedata2_tx;
+
+ /* Set function callbacks for Ethernet API */
+ dev->dev_ops = &ops;
+
+ /* Get link state */
+ eth_link_update(dev, 0);
+
+ /* Allocate space for one mac address */
+ data->mac_addrs = rte_zmalloc(data->name, sizeof(struct ether_addr),
+ RTE_CACHE_LINE_SIZE);
+ if (data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Could not alloc space for MAC address!");
+ free(internals->sze_dev_path);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(&eth_addr, data->mac_addrs);
+
+ PMD_INIT_LOG(INFO, "%s device %s successfully initialized",
+ dev->device->driver->name, data->name);
+
+ return 0;
+}
+
+/**
+ * @brief Unitializes rte_eth_dev device.
+ * @param dev Device to uninitialize.
+ * @return 0 on success, negative error code on error.
+ */
+static int
+rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ free(internals->sze_dev_path);
+ rte_free(dev->data->mac_addrs);
+
+ PMD_DRV_LOG(INFO, "%s device %s successfully uninitialized",
+ dev->device->driver->name, dev->data->name);
+
+ return 0;
+}
+
+static const struct rte_pci_id rte_szedata2_pci_id_table[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
+ PCI_DEVICE_ID_NETCOPE_COMBO80G)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
+ PCI_DEVICE_ID_NETCOPE_COMBO100G)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
+ PCI_DEVICE_ID_NETCOPE_COMBO100G2)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
+ PCI_DEVICE_ID_NETCOPE_NFB200G2QL)
+ },
+ {
+ .vendor_id = 0,
+ }
+};
+
+/**
+ * @brief Gets info about DMA queues for ports.
+ * @param pci_dev PCI device structure.
+ * @param port_count Pointer to variable set with number of ports.
+ * @param pi Pointer to array of structures with info about DMA queues
+ * for ports.
+ * @param max_ports Maximum number of ports.
+ * @return 0 on success, negative error code on error.
+ */
+static int
+get_port_info(struct rte_pci_device *pci_dev, unsigned int *port_count,
+ struct port_info *pi, unsigned int max_ports)
+{
+ struct szedata *szedata_temp;
+ char sze_dev_path[PATH_MAX];
+ uint32_t szedata2_index;
+ int ret;
+ uint16_t max_rx_queues;
+ uint16_t max_tx_queues;
+
+ if (max_ports == 0)
+ return -EINVAL;
+
+ memset(pi, 0, max_ports * sizeof(struct port_info));
+ *port_count = 0;
+
+ /* Get index of szedata2 device file and create path to device file */
+ ret = get_szedata2_index(&pci_dev->addr, &szedata2_index);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to get szedata2 device index!");
+ return -ENODEV;
+ }
+ snprintf(sze_dev_path, PATH_MAX, SZEDATA2_DEV_PATH_FMT, szedata2_index);
+
+ /*
+ * Get number of available DMA RX and TX channels, which is maximum
+ * number of queues that can be created.
+ */
+ szedata_temp = szedata_open(sze_dev_path);
+ if (szedata_temp == NULL) {
+ PMD_INIT_LOG(ERR, "szedata_open(%s) failed", sze_dev_path);
+ return -EINVAL;
+ }
+ max_rx_queues = szedata_ifaces_available(szedata_temp, SZE2_DIR_RX);
+ max_tx_queues = szedata_ifaces_available(szedata_temp, SZE2_DIR_TX);
+ PMD_INIT_LOG(INFO, "Available DMA channels RX: %u TX: %u",
+ max_rx_queues, max_tx_queues);
+ if (max_rx_queues > RTE_ETH_SZEDATA2_MAX_RX_QUEUES) {
+ PMD_INIT_LOG(ERR, "%u RX queues exceeds supported number %u",
+ max_rx_queues, RTE_ETH_SZEDATA2_MAX_RX_QUEUES);
+ szedata_close(szedata_temp);
+ return -EINVAL;
+ }
+ if (max_tx_queues > RTE_ETH_SZEDATA2_MAX_TX_QUEUES) {
+ PMD_INIT_LOG(ERR, "%u TX queues exceeds supported number %u",
+ max_tx_queues, RTE_ETH_SZEDATA2_MAX_TX_QUEUES);
+ szedata_close(szedata_temp);
+ return -EINVAL;
+ }
+
+ if (pci_dev->id.device_id == PCI_DEVICE_ID_NETCOPE_NFB200G2QL) {
+ unsigned int i;
+ unsigned int rx_queues = max_rx_queues / max_ports;
+ unsigned int tx_queues = max_tx_queues / max_ports;
+
+ /*
+ * Number of queues reported by szedata_ifaces_available()
+ * is the number of all queues from all DMA controllers which
+ * may reside at different numa locations.
+ * All queues from the same DMA controller have the same numa
+ * node.
+ * Numa node from the first queue of each DMA controller is
+ * retrieved.
+ * If the numa node differs from the numa node of the queues
+ * from the previous DMA controller the queues are assigned
+ * to the next port.
+ */
+
+ for (i = 0; i < max_ports; i++) {
+ int numa_rx = szedata_get_area_numa_node(szedata_temp,
+ SZE2_DIR_RX, rx_queues * i);
+ int numa_tx = szedata_get_area_numa_node(szedata_temp,
+ SZE2_DIR_TX, tx_queues * i);
+ unsigned int port_rx_queues = numa_rx != -1 ?
+ rx_queues : 0;
+ unsigned int port_tx_queues = numa_tx != -1 ?
+ tx_queues : 0;
+ PMD_INIT_LOG(DEBUG, "%u rx queues from id %u, numa %d",
+ rx_queues, rx_queues * i, numa_rx);
+ PMD_INIT_LOG(DEBUG, "%u tx queues from id %u, numa %d",
+ tx_queues, tx_queues * i, numa_tx);
+
+ if (port_rx_queues != 0 && port_tx_queues != 0 &&
+ numa_rx != numa_tx) {
+ PMD_INIT_LOG(ERR, "RX queue %u numa %d differs "
+ "from TX queue %u numa %d "
+ "unexpectedly",
+ rx_queues * i, numa_rx,
+ tx_queues * i, numa_tx);
+ szedata_close(szedata_temp);
+ return -EINVAL;
+ } else if (port_rx_queues == 0 && port_tx_queues == 0) {
+ continue;
+ } else {
+ unsigned int j;
+ unsigned int current = *port_count;
+ int port_numa = port_rx_queues != 0 ?
+ numa_rx : numa_tx;
+
+ for (j = 0; j < *port_count; j++) {
+ if (pi[j].numa_node ==
+ port_numa) {
+ current = j;
+ break;
+ }
+ }
+ if (pi[current].rx_count == 0 &&
+ pi[current].tx_count == 0) {
+ pi[current].rx_base_id = rx_queues * i;
+ pi[current].tx_base_id = tx_queues * i;
+ (*port_count)++;
+ } else if ((rx_queues * i !=
+ pi[current].rx_base_id +
+ pi[current].rx_count) ||
+ (tx_queues * i !=
+ pi[current].tx_base_id +
+ pi[current].tx_count)) {
+ PMD_INIT_LOG(ERR, "Queue ids does not "
+ "fulfill constraints");
+ szedata_close(szedata_temp);
+ return -EINVAL;
+ }
+ pi[current].rx_count += port_rx_queues;
+ pi[current].tx_count += port_tx_queues;
+ pi[current].numa_node = port_numa;
+ }
+ }
+ } else {
+ pi[0].rx_count = max_rx_queues;
+ pi[0].tx_count = max_tx_queues;
+ pi[0].numa_node = pci_dev->device.numa_node;
+ *port_count = 1;
+ }
+
+ szedata_close(szedata_temp);
+ return 0;
+}
+
+/**
+ * @brief Allocates rte_eth_dev device.
+ * @param pci_dev Corresponding PCI device.
+ * @param numa_node NUMA node on which device is allocated.
+ * @param port_no Id of rte_eth_device created on PCI device pci_dev.
+ * @return Pointer to allocated device or NULL on error.
+ */
+static struct rte_eth_dev *
+szedata2_eth_dev_allocate(struct rte_pci_device *pci_dev, int numa_node,
+ unsigned int port_no)
+{
+ struct rte_eth_dev *eth_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ PMD_INIT_FUNC_TRACE();
+
+ snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s"
+ SZEDATA2_ETH_DEV_NAME_SUFFIX_FMT,
+ pci_dev->device.name, port_no);
+ PMD_INIT_LOG(DEBUG, "Allocating eth_dev %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
+ return NULL;
+
+ eth_dev->data->dev_private = rte_zmalloc_socket(name,
+ sizeof(struct pmd_internals), RTE_CACHE_LINE_SIZE,
+ numa_node);
+ if (!eth_dev->data->dev_private) {
+ rte_eth_dev_release_port(eth_dev);
+ return NULL;
+ }
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev)
+ return NULL;
+ }
+
+ eth_dev->device = &pci_dev->device;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->numa_node = numa_node;
+ return eth_dev;
+}
+
+/**
+ * @brief Releases interval of rte_eth_dev devices from array.
+ * @param eth_devs Array of pointers to rte_eth_dev devices.
+ * @param from Index in array eth_devs to start with.
+ * @param to Index in array right after the last element to release.
+ *
+ * Used for releasing at failed initialization.
+ */
+static void
+szedata2_eth_dev_release_interval(struct rte_eth_dev **eth_devs,
+ unsigned int from, unsigned int to)
+{
+ unsigned int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = from; i < to; i++) {
+ rte_szedata2_eth_dev_uninit(eth_devs[i]);
+ rte_eth_dev_pci_release(eth_devs[i]);
+ }
+}
+
+/**
+ * @brief Callback .probe for struct rte_pci_driver.
+ */
+static int szedata2_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct port_info port_info[SZEDATA2_MAX_PORTS];
+ unsigned int port_count;
+ int ret;
+ unsigned int i;
+ struct pci_dev_list_entry *list_entry;
+ struct rte_eth_dev *eth_devs[SZEDATA2_MAX_PORTS] = {NULL,};
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = get_port_info(pci_dev, &port_count, port_info,
+ SZEDATA2_MAX_PORTS);
+ if (ret != 0)
+ return ret;
+
+ if (port_count == 0) {
+ PMD_INIT_LOG(ERR, "No available ports!");
+ return -ENODEV;
+ }
+
+ list_entry = rte_zmalloc(NULL, sizeof(struct pci_dev_list_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (list_entry == NULL) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc() failed!");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < port_count; i++) {
+ eth_devs[i] = szedata2_eth_dev_allocate(pci_dev,
+ port_info[i].numa_node, i);
+ if (eth_devs[i] == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to alloc eth_dev for port %u",
+ i);
+ szedata2_eth_dev_release_interval(eth_devs, 0, i);
+ rte_free(list_entry);
+ return -ENOMEM;
+ }
+
+ ret = rte_szedata2_eth_dev_init(eth_devs[i], &port_info[i]);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init eth_dev for port %u",
+ i);
+ rte_eth_dev_pci_release(eth_devs[i]);
+ szedata2_eth_dev_release_interval(eth_devs, 0, i);
+ rte_free(list_entry);
+ return ret;
+ }
+
+ rte_eth_dev_probing_finish(eth_devs[i]);
+ }
+
+ /*
+ * Add pci_dev to list of PCI devices for this driver
+ * which is used at remove callback to release all created eth_devs.
+ */
+ list_entry->pci_dev = pci_dev;
+ list_entry->port_count = port_count;
+ LIST_INSERT_HEAD(&szedata2_pci_dev_list, list_entry, next);
+ return 0;
+}
+
+/**
+ * @brief Callback .remove for struct rte_pci_driver.
+ */
+static int szedata2_eth_pci_remove(struct rte_pci_device *pci_dev)
+{
+ unsigned int i;
+ unsigned int port_count;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+ int ret;
+ int retval = 0;
+ bool found = false;
+ struct pci_dev_list_entry *list_entry = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+
+ LIST_FOREACH(list_entry, &szedata2_pci_dev_list, next) {
+ if (list_entry->pci_dev == pci_dev) {
+ port_count = list_entry->port_count;
+ found = true;
+ break;
+ }
+ }
+ LIST_REMOVE(list_entry, next);
+ rte_free(list_entry);
+
+ if (!found) {
+ PMD_DRV_LOG(ERR, "PCI device " PCI_PRI_FMT " not found",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < port_count; i++) {
+ snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s"
+ SZEDATA2_ETH_DEV_NAME_SUFFIX_FMT,
+ pci_dev->device.name, i);
+ PMD_DRV_LOG(DEBUG, "Removing eth_dev %s", name);
+ eth_dev = rte_eth_dev_allocated(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(ERR, "eth_dev %s not found", name);
+ retval = retval ? retval : -ENODEV;
+ }
+
+ ret = rte_szedata2_eth_dev_uninit(eth_dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "eth_dev %s uninit failed", name);
+ retval = retval ? retval : ret;
+ }
+
+ rte_eth_dev_pci_release(eth_dev);
+ }
+
+ return retval;
+}
+
+static struct rte_pci_driver szedata2_eth_driver = {
+ .id_table = rte_szedata2_pci_id_table,
+ .probe = szedata2_eth_pci_probe,
+ .remove = szedata2_eth_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver);
+RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
+RTE_PMD_REGISTER_KMOD_DEP(RTE_SZEDATA2_DRIVER_NAME,
+ "* combo6core & combov3 & szedata2 & ( szedata2_cv3 | szedata2_cv3_fdt )");
+
+RTE_INIT(szedata2_init_log)
+{
+ szedata2_logtype_init = rte_log_register("pmd.net.szedata2.init");
+ if (szedata2_logtype_init >= 0)
+ rte_log_set_level(szedata2_logtype_init, RTE_LOG_NOTICE);
+ szedata2_logtype_driver = rte_log_register("pmd.net.szedata2.driver");
+ if (szedata2_logtype_driver >= 0)
+ rte_log_set_level(szedata2_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/szedata2/rte_eth_szedata2.h b/src/spdk/dpdk/drivers/net/szedata2/rte_eth_szedata2.h
new file mode 100644
index 00000000..26a82b35
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/szedata2/rte_eth_szedata2.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 - 2016 CESNET
+ */
+
+#ifndef RTE_PMD_SZEDATA2_H_
+#define RTE_PMD_SZEDATA2_H_
+
+#include <stdint.h>
+
+#include <libsze2.h>
+
+#include <rte_common.h>
+
+/* PCI Vendor ID */
+#define PCI_VENDOR_ID_NETCOPE 0x1b26
+
+/* PCI Device IDs */
+#define PCI_DEVICE_ID_NETCOPE_COMBO80G 0xcb80
+#define PCI_DEVICE_ID_NETCOPE_COMBO100G 0xc1c1
+#define PCI_DEVICE_ID_NETCOPE_COMBO100G2 0xc2c1
+#define PCI_DEVICE_ID_NETCOPE_NFB200G2QL 0xc250
+
+/* szedata2_packet header length == 4 bytes == 2B segment size + 2B hw size */
+#define RTE_SZE2_PACKET_HEADER_SIZE 4
+
+#define RTE_SZE2_MMIO_MAX 10
+
+/*!
+ * Round 'what' to the nearest larger (or equal) multiple of '8'
+ * (szedata2 packet is aligned to 8 bytes)
+ */
+#define RTE_SZE2_ALIGN8(what) RTE_ALIGN(what, 8)
+
+/*! main handle structure */
+struct szedata {
+ int fd;
+ struct sze2_instance_info *info;
+ uint32_t *write_size;
+ void *space[RTE_SZE2_MMIO_MAX];
+ struct szedata_lock lock[2][2];
+
+ __u32 *rx_asize, *tx_asize;
+
+ /* szedata_read_next variables - to keep context (ct) */
+
+ /*
+ * rx
+ */
+ /** initial sze lock ptr */
+ const struct szedata_lock *ct_rx_lck_orig;
+ /** current sze lock ptr (initial or next) */
+ const struct szedata_lock *ct_rx_lck;
+ /** remaining bytes (not read) within current lock */
+ unsigned int ct_rx_rem_bytes;
+ /** current pointer to locked memory */
+ unsigned char *ct_rx_cur_ptr;
+ /**
+ * allocated buffer to store RX packet if it was split
+ * into 2 buffers
+ */
+ unsigned char *ct_rx_buffer;
+ /** registered function to provide filtering based on hwdata */
+ int (*ct_rx_filter)(u_int16_t hwdata_len, u_char *hwdata);
+
+ /*
+ * tx
+ */
+ /**
+ * buffer for tx - packet is prepared here
+ * (in future for burst write)
+ */
+ unsigned char *ct_tx_buffer;
+ /** initial sze TX lock ptrs - number according to TX interfaces */
+ const struct szedata_lock **ct_tx_lck_orig;
+ /** current sze TX lock ptrs - number according to TX interfaces */
+ const struct szedata_lock **ct_tx_lck;
+ /** already written bytes in both locks */
+ unsigned int *ct_tx_written_bytes;
+ /** remaining bytes (not written) within current lock */
+ unsigned int *ct_tx_rem_bytes;
+ /** current pointers to locked memory */
+ unsigned char **ct_tx_cur_ptr;
+ /** NUMA node closest to PCIe device, or -1 */
+ int numa_node;
+};
+
+#endif /* RTE_PMD_SZEDATA2_H_ */
diff --git a/src/spdk/dpdk/drivers/net/szedata2/rte_pmd_szedata2_version.map b/src/spdk/dpdk/drivers/net/szedata2/rte_pmd_szedata2_version.map
new file mode 100644
index 00000000..ad607bbe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/szedata2/rte_pmd_szedata2_version.map
@@ -0,0 +1,3 @@
+DPDK_2.2 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/szedata2/szedata2_logs.h b/src/spdk/dpdk/drivers/net/szedata2/szedata2_logs.h
new file mode 100644
index 00000000..8d06ffa3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/szedata2/szedata2_logs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 CESNET
+ */
+
+#ifndef _SZEDATA2_LOGS_H_
+#define _SZEDATA2_LOGS_H_
+
+#include <rte_log.h>
+
+extern int szedata2_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, szedata2_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int szedata2_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, szedata2_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#endif /* _SZEDATA2_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/tap/Makefile b/src/spdk/dpdk/drivers/net/tap/Makefile
new file mode 100644
index 00000000..32433653
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/Makefile
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_tap.a
+
+EXPORT_MAP := rte_pmd_tap_version.map
+
+LIBABIVER := 1
+
+#
+# TAP_MAX_QUEUES must be a power of 2
+#
+ifeq ($(TAP_MAX_QUEUES),)
+ TAP_MAX_QUEUES = 16
+endif
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -I.
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_vdev -lrte_gso
+
+CFLAGS += -DTAP_MAX_QUEUES=$(TAP_MAX_QUEUES)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += rte_eth_tap.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_netlink.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_tcmsgs.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_bpf_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_intr.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
+# Generate and clean-up tap_autoconf.h.
+
+export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS
+export AUTO_CONFIG_CFLAGS = -Wno-error
+
+ifndef V
+AUTOCONF_OUTPUT := >/dev/null
+endif
+
+tap_autoconf.h.new: FORCE
+
+tap_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
+ $Q $(RM) -f -- '$@'
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_FLOWER \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_UNSPEC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_VLAN_ID \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_VLAN_PRIO \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_BPF \
+ linux/pkt_cls.h \
+ enum TCA_BPF_UNSPEC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_BPF_FD \
+ linux/pkt_cls.h \
+ enum TCA_BPF_FD \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_ACT_BPF \
+ linux/tc_act/tc_bpf.h \
+ enum TCA_ACT_BPF_UNSPEC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_ACT_BPF_FD \
+ linux/tc_act/tc_bpf.h \
+ enum TCA_ACT_BPF_FD \
+ $(AUTOCONF_OUTPUT)
+
+# Create tap_autoconf.h or update it in case it differs from the new one.
+
+tap_autoconf.h: tap_autoconf.h.new
+ $Q [ -f '$@' ] && \
+ cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \
+ mv '$<' '$@'
+
+$(SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP):.c=.o): tap_autoconf.h
+
+clean_tap: FORCE
+ $Q rm -f -- tap_autoconf.h tap_autoconf.h.new
+
+clean: clean_tap
diff --git a/src/spdk/dpdk/drivers/net/tap/rte_eth_tap.c b/src/spdk/dpdk/drivers/net/tap/rte_eth_tap.c
new file mode 100644
index 00000000..feb92b48
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/rte_eth_tap.c
@@ -0,0 +1,2140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_malloc.h>
+#include <rte_bus_vdev.h>
+#include <rte_kvargs.h>
+#include <rte_net.h>
+#include <rte_debug.h>
+#include <rte_ip.h>
+#include <rte_string_fns.h>
+
+#include <assert.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <sys/utsname.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <sys/uio.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <net/if.h>
+#include <linux/if_tun.h>
+#include <linux/if_ether.h>
+#include <fcntl.h>
+
+#include <tap_rss.h>
+#include <rte_eth_tap.h>
+#include <tap_flow.h>
+#include <tap_netlink.h>
+#include <tap_tcmsgs.h>
+
+/* Linux based path to the TUN device */
+#define TUN_TAP_DEV_PATH "/dev/net/tun"
+#define DEFAULT_TAP_NAME "dtap"
+#define DEFAULT_TUN_NAME "dtun"
+
+#define ETH_TAP_IFACE_ARG "iface"
+#define ETH_TAP_REMOTE_ARG "remote"
+#define ETH_TAP_MAC_ARG "mac"
+#define ETH_TAP_MAC_FIXED "fixed"
+
+#define ETH_TAP_USR_MAC_FMT "xx:xx:xx:xx:xx:xx"
+#define ETH_TAP_CMP_MAC_FMT "0123456789ABCDEFabcdef"
+#define ETH_TAP_MAC_ARG_FMT ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT
+
+#define TAP_GSO_MBUFS_PER_CORE 128
+#define TAP_GSO_MBUF_SEG_SIZE 128
+#define TAP_GSO_MBUF_CACHE_SIZE 4
+#define TAP_GSO_MBUFS_NUM \
+ (TAP_GSO_MBUFS_PER_CORE * TAP_GSO_MBUF_CACHE_SIZE)
+
+static struct rte_vdev_driver pmd_tap_drv;
+static struct rte_vdev_driver pmd_tun_drv;
+
+static const char *valid_arguments[] = {
+ ETH_TAP_IFACE_ARG,
+ ETH_TAP_REMOTE_ARG,
+ ETH_TAP_MAC_ARG,
+ NULL
+};
+
+static unsigned int tap_unit;
+static unsigned int tun_unit;
+
+static char tuntap_name[8];
+
+static volatile uint32_t tap_trigger; /* Rx trigger */
+
+static struct rte_eth_link pmd_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_FIXED,
+};
+
+static void
+tap_trigger_cb(int sig __rte_unused)
+{
+ /* Valid trigger values are nonzero */
+ tap_trigger = (tap_trigger + 1) | 0x80000000;
+}
+
+/* Specifies on what netdevices the ioctl should be applied */
+enum ioctl_mode {
+ LOCAL_AND_REMOTE,
+ LOCAL_ONLY,
+ REMOTE_ONLY,
+};
+
+static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
+
+/**
+ * Tun/Tap allocation routine
+ *
+ * @param[in] pmd
+ * Pointer to private structure.
+ *
+ * @param[in] is_keepalive
+ * Keepalive flag
+ *
+ * @return
+ * -1 on failure, fd on success
+ */
+static int
+tun_alloc(struct pmd_internals *pmd, int is_keepalive)
+{
+ struct ifreq ifr;
+#ifdef IFF_MULTI_QUEUE
+ unsigned int features;
+#endif
+ int fd;
+
+ memset(&ifr, 0, sizeof(struct ifreq));
+
+ /*
+ * Do not set IFF_NO_PI as packet information header will be needed
+ * to check if a received packet has been truncated.
+ */
+ ifr.ifr_flags = (pmd->type == ETH_TUNTAP_TYPE_TAP) ?
+ IFF_TAP : IFF_TUN | IFF_POINTOPOINT;
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
+
+ TAP_LOG(DEBUG, "ifr_name '%s'", ifr.ifr_name);
+
+ fd = open(TUN_TAP_DEV_PATH, O_RDWR);
+ if (fd < 0) {
+ TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
+ goto error;
+ }
+
+#ifdef IFF_MULTI_QUEUE
+ /* Grab the TUN features to verify we can work multi-queue */
+ if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
+ TAP_LOG(ERR, "%s unable to get TUN/TAP features",
+ tuntap_name);
+ goto error;
+ }
+ TAP_LOG(DEBUG, "%s Features %08x", tuntap_name, features);
+
+ if (features & IFF_MULTI_QUEUE) {
+ TAP_LOG(DEBUG, " Multi-queue support for %d queues",
+ RTE_PMD_TAP_MAX_QUEUES);
+ ifr.ifr_flags |= IFF_MULTI_QUEUE;
+ } else
+#endif
+ {
+ ifr.ifr_flags |= IFF_ONE_QUEUE;
+ TAP_LOG(DEBUG, " Single queue only support");
+ }
+
+ /* Set the TUN/TAP configuration and set the name if needed */
+ if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
+ TAP_LOG(WARNING, "Unable to set TUNSETIFF for %s: %s",
+ ifr.ifr_name, strerror(errno));
+ goto error;
+ }
+
+ if (is_keepalive) {
+ /*
+ * Detach the TUN/TAP keep-alive queue
+ * to avoid traffic through it
+ */
+ ifr.ifr_flags = IFF_DETACH_QUEUE;
+ if (ioctl(fd, TUNSETQUEUE, (void *)&ifr) < 0) {
+ TAP_LOG(WARNING,
+ "Unable to detach keep-alive queue for %s: %s",
+ ifr.ifr_name, strerror(errno));
+ goto error;
+ }
+ }
+
+ /* Always set the file descriptor to non-blocking */
+ if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
+ TAP_LOG(WARNING,
+ "Unable to set %s to nonblocking: %s",
+ ifr.ifr_name, strerror(errno));
+ goto error;
+ }
+
+ /* Set up trigger to optimize empty Rx bursts */
+ errno = 0;
+ do {
+ struct sigaction sa;
+ int flags = fcntl(fd, F_GETFL);
+
+ if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
+ break;
+ if (sa.sa_handler != tap_trigger_cb) {
+ /*
+ * Make sure SIGIO is not already taken. This is done
+ * as late as possible to leave the application a
+ * chance to set up its own signal handler first.
+ */
+ if (sa.sa_handler != SIG_IGN &&
+ sa.sa_handler != SIG_DFL) {
+ errno = EBUSY;
+ break;
+ }
+ sa = (struct sigaction){
+ .sa_flags = SA_RESTART,
+ .sa_handler = tap_trigger_cb,
+ };
+ if (sigaction(SIGIO, &sa, NULL) == -1)
+ break;
+ }
+ /* Enable SIGIO on file descriptor */
+ fcntl(fd, F_SETFL, flags | O_ASYNC);
+ fcntl(fd, F_SETOWN, getpid());
+ } while (0);
+
+ if (errno) {
+ /* Disable trigger globally in case of error */
+ tap_trigger = 0;
+ TAP_LOG(WARNING, "Rx trigger disabled: %s",
+ strerror(errno));
+ }
+
+ return fd;
+
+error:
+ if (fd > 0)
+ close(fd);
+ return -1;
+}
+
+static void
+tap_verify_csum(struct rte_mbuf *mbuf)
+{
+ uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
+ uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
+ uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
+ unsigned int l2_len = sizeof(struct ether_hdr);
+ unsigned int l3_len;
+ uint16_t cksum = 0;
+ void *l3_hdr;
+ void *l4_hdr;
+
+ if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
+ l2_len += 4;
+ else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
+ l2_len += 8;
+ /* Don't verify checksum for packets with discontinuous L2 header */
+ if (unlikely(l2_len + sizeof(struct ipv4_hdr) >
+ rte_pktmbuf_data_len(mbuf)))
+ return;
+ l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
+ if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
+ struct ipv4_hdr *iph = l3_hdr;
+
+ /* ihl contains the number of 4-byte words in the header */
+ l3_len = 4 * (iph->version_ihl & 0xf);
+ if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
+ return;
+
+ cksum = ~rte_raw_cksum(iph, l3_len);
+ mbuf->ol_flags |= cksum ?
+ PKT_RX_IP_CKSUM_BAD :
+ PKT_RX_IP_CKSUM_GOOD;
+ } else if (l3 == RTE_PTYPE_L3_IPV6) {
+ l3_len = sizeof(struct ipv6_hdr);
+ } else {
+ /* IPv6 extensions are not supported */
+ return;
+ }
+ if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
+ l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
+ /* Don't verify checksum for multi-segment packets. */
+ if (mbuf->nb_segs > 1)
+ return;
+ if (l3 == RTE_PTYPE_L3_IPV4)
+ cksum = ~rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
+ else if (l3 == RTE_PTYPE_L3_IPV6)
+ cksum = ~rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
+ mbuf->ol_flags |= cksum ?
+ PKT_RX_L4_CKSUM_BAD :
+ PKT_RX_L4_CKSUM_GOOD;
+ }
+}
+
+static uint64_t
+tap_rx_offload_get_port_capa(void)
+{
+ /*
+ * No specific port Rx offload capabilities.
+ */
+ return 0;
+}
+
+static uint64_t
+tap_rx_offload_get_queue_capa(void)
+{
+ return DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP;
+}
+
+/* Callback to handle the rx burst of packets to the correct interface and
+ * file descriptor(s) in a multi-queue setup.
+ */
+static uint16_t
+pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct rx_queue *rxq = queue;
+ uint16_t num_rx;
+ unsigned long num_rx_bytes = 0;
+ uint32_t trigger = tap_trigger;
+
+ if (trigger == rxq->trigger_seen)
+ return 0;
+ if (trigger)
+ rxq->trigger_seen = trigger;
+ rte_compiler_barrier();
+ for (num_rx = 0; num_rx < nb_pkts; ) {
+ struct rte_mbuf *mbuf = rxq->pool;
+ struct rte_mbuf *seg = NULL;
+ struct rte_mbuf *new_tail = NULL;
+ uint16_t data_off = rte_pktmbuf_headroom(mbuf);
+ int len;
+
+ len = readv(rxq->fd, *rxq->iovecs,
+ 1 +
+ (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+ rxq->nb_rx_desc : 1));
+ if (len < (int)sizeof(struct tun_pi))
+ break;
+
+ /* Packet couldn't fit in the provided mbuf */
+ if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
+ rxq->stats.ierrors++;
+ continue;
+ }
+
+ len -= sizeof(struct tun_pi);
+
+ mbuf->pkt_len = len;
+ mbuf->port = rxq->in_port;
+ while (1) {
+ struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
+
+ if (unlikely(!buf)) {
+ rxq->stats.rx_nombuf++;
+ /* No new buf has been allocated: do nothing */
+ if (!new_tail || !seg)
+ goto end;
+
+ seg->next = NULL;
+ rte_pktmbuf_free(mbuf);
+
+ goto end;
+ }
+ seg = seg ? seg->next : mbuf;
+ if (rxq->pool == mbuf)
+ rxq->pool = buf;
+ if (new_tail)
+ new_tail->next = buf;
+ new_tail = buf;
+ new_tail->next = seg->next;
+
+ /* iovecs[0] is reserved for packet info (pi) */
+ (*rxq->iovecs)[mbuf->nb_segs].iov_len =
+ buf->buf_len - data_off;
+ (*rxq->iovecs)[mbuf->nb_segs].iov_base =
+ (char *)buf->buf_addr + data_off;
+
+ seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
+ seg->data_off = data_off;
+
+ len -= seg->data_len;
+ if (len <= 0)
+ break;
+ mbuf->nb_segs++;
+ /* First segment has headroom, not the others */
+ data_off = 0;
+ }
+ seg->next = NULL;
+ mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
+ RTE_PTYPE_ALL_MASK);
+ if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ tap_verify_csum(mbuf);
+
+ /* account for the receive frame */
+ bufs[num_rx++] = mbuf;
+ num_rx_bytes += mbuf->pkt_len;
+ }
+end:
+ rxq->stats.ipackets += num_rx;
+ rxq->stats.ibytes += num_rx_bytes;
+
+ return num_rx;
+}
+
+static uint64_t
+tap_tx_offload_get_port_capa(void)
+{
+ /*
+ * No specific port Tx offload capabilities.
+ */
+ return 0;
+}
+
+static uint64_t
+tap_tx_offload_get_queue_capa(void)
+{
+ return DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+}
+
+/* Finalize l4 checksum calculation */
+static void
+tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
+ uint32_t l4_raw_cksum)
+{
+ if (l4_cksum) {
+ uint32_t cksum;
+
+ cksum = __rte_raw_cksum_reduce(l4_raw_cksum);
+ cksum += l4_phdr_cksum;
+
+ cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
+ cksum = (~cksum) & 0xffff;
+ if (cksum == 0)
+ cksum = 0xffff;
+ *l4_cksum = cksum;
+ }
+}
+
+/* Accumaulate L4 raw checksums */
+static void
+tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,
+ uint32_t *l4_raw_cksum)
+{
+ if (l4_cksum == NULL)
+ return;
+
+ *l4_raw_cksum = __rte_raw_cksum(l4_data, l4_len, *l4_raw_cksum);
+}
+
+/* L3 and L4 pseudo headers checksum offloads */
+static void
+tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
+ unsigned int l3_len, unsigned int l4_len, uint16_t **l4_cksum,
+ uint16_t *l4_phdr_cksum, uint32_t *l4_raw_cksum)
+{
+ void *l3_hdr = packet + l2_len;
+
+ if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+ struct ipv4_hdr *iph = l3_hdr;
+ uint16_t cksum;
+
+ iph->hdr_checksum = 0;
+ cksum = rte_raw_cksum(iph, l3_len);
+ iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
+ }
+ if (ol_flags & PKT_TX_L4_MASK) {
+ void *l4_hdr;
+
+ l4_hdr = packet + l2_len + l3_len;
+ if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+ *l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
+ else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
+ *l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
+ else
+ return;
+ **l4_cksum = 0;
+ if (ol_flags & PKT_TX_IPV4)
+ *l4_phdr_cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
+ else
+ *l4_phdr_cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
+ *l4_raw_cksum = __rte_raw_cksum(l4_hdr, l4_len, 0);
+ }
+}
+
+static inline void
+tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
+ struct rte_mbuf **pmbufs,
+ uint16_t *num_packets, unsigned long *num_tx_bytes)
+{
+ int i;
+ uint16_t l234_hlen;
+
+ for (i = 0; i < num_mbufs; i++) {
+ struct rte_mbuf *mbuf = pmbufs[i];
+ struct iovec iovecs[mbuf->nb_segs + 2];
+ struct tun_pi pi = { .flags = 0, .proto = 0x00 };
+ struct rte_mbuf *seg = mbuf;
+ char m_copy[mbuf->data_len];
+ int proto;
+ int n;
+ int j;
+ int k; /* current index in iovecs for copying segments */
+ uint16_t seg_len; /* length of first segment */
+ uint16_t nb_segs;
+ uint16_t *l4_cksum; /* l4 checksum (pseudo header + payload) */
+ uint32_t l4_raw_cksum = 0; /* TCP/UDP payload raw checksum */
+ uint16_t l4_phdr_cksum = 0; /* TCP/UDP pseudo header checksum */
+ uint16_t is_cksum = 0; /* in case cksum should be offloaded */
+
+ l4_cksum = NULL;
+ if (txq->type == ETH_TUNTAP_TYPE_TUN) {
+ /*
+ * TUN and TAP are created with IFF_NO_PI disabled.
+ * For TUN PMD this mandatory as fields are used by
+ * Kernel tun.c to determine whether its IP or non IP
+ * packets.
+ *
+ * The logic fetches the first byte of data from mbuf
+ * then compares whether its v4 or v6. If first byte
+ * is 4 or 6, then protocol field is updated.
+ */
+ char *buff_data = rte_pktmbuf_mtod(seg, void *);
+ proto = (*buff_data & 0xf0);
+ pi.proto = (proto == 0x40) ?
+ rte_cpu_to_be_16(ETHER_TYPE_IPv4) :
+ ((proto == 0x60) ?
+ rte_cpu_to_be_16(ETHER_TYPE_IPv6) :
+ 0x00);
+ }
+
+ k = 0;
+ iovecs[k].iov_base = &pi;
+ iovecs[k].iov_len = sizeof(pi);
+ k++;
+
+ nb_segs = mbuf->nb_segs;
+ if (txq->csum &&
+ ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
+ (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
+ (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
+ is_cksum = 1;
+
+ /* Support only packets with at least layer 4
+ * header included in the first segment
+ */
+ seg_len = rte_pktmbuf_data_len(mbuf);
+ l234_hlen = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ if (seg_len < l234_hlen)
+ break;
+
+ /* To change checksums, work on a * copy of l2, l3
+ * headers + l4 pseudo header
+ */
+ rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
+ l234_hlen);
+ tap_tx_l3_cksum(m_copy, mbuf->ol_flags,
+ mbuf->l2_len, mbuf->l3_len, mbuf->l4_len,
+ &l4_cksum, &l4_phdr_cksum,
+ &l4_raw_cksum);
+ iovecs[k].iov_base = m_copy;
+ iovecs[k].iov_len = l234_hlen;
+ k++;
+
+ /* Update next iovecs[] beyond l2, l3, l4 headers */
+ if (seg_len > l234_hlen) {
+ iovecs[k].iov_len = seg_len - l234_hlen;
+ iovecs[k].iov_base =
+ rte_pktmbuf_mtod(seg, char *) +
+ l234_hlen;
+ tap_tx_l4_add_rcksum(iovecs[k].iov_base,
+ iovecs[k].iov_len, l4_cksum,
+ &l4_raw_cksum);
+ k++;
+ nb_segs++;
+ }
+ seg = seg->next;
+ }
+
+ for (j = k; j <= nb_segs; j++) {
+ iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
+ iovecs[j].iov_base = rte_pktmbuf_mtod(seg, void *);
+ if (is_cksum)
+ tap_tx_l4_add_rcksum(iovecs[j].iov_base,
+ iovecs[j].iov_len, l4_cksum,
+ &l4_raw_cksum);
+ seg = seg->next;
+ }
+
+ if (is_cksum)
+ tap_tx_l4_cksum(l4_cksum, l4_phdr_cksum, l4_raw_cksum);
+
+ /* copy the tx frame data */
+ n = writev(txq->fd, iovecs, j);
+ if (n <= 0)
+ break;
+ (*num_packets)++;
+ (*num_tx_bytes) += rte_pktmbuf_pkt_len(mbuf);
+ }
+}
+
+/* Callback to handle sending packets from the tap interface
+ */
+static uint16_t
+pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct tx_queue *txq = queue;
+ uint16_t num_tx = 0;
+ uint16_t num_packets = 0;
+ unsigned long num_tx_bytes = 0;
+ uint32_t max_size;
+ int i;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ struct rte_mbuf *gso_mbufs[MAX_GSO_MBUFS];
+ max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf_in = bufs[num_tx];
+ struct rte_mbuf **mbuf;
+ uint16_t num_mbufs = 0;
+ uint16_t tso_segsz = 0;
+ int ret;
+ uint16_t hdrs_len;
+ int j;
+ uint64_t tso;
+
+ tso = mbuf_in->ol_flags & PKT_TX_TCP_SEG;
+ if (tso) {
+ struct rte_gso_ctx *gso_ctx = &txq->gso_ctx;
+
+ assert(gso_ctx != NULL);
+
+ /* TCP segmentation implies TCP checksum offload */
+ mbuf_in->ol_flags |= PKT_TX_TCP_CKSUM;
+
+ /* gso size is calculated without ETHER_CRC_LEN */
+ hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +
+ mbuf_in->l4_len;
+ tso_segsz = mbuf_in->tso_segsz + hdrs_len;
+ if (unlikely(tso_segsz == hdrs_len) ||
+ tso_segsz > *txq->mtu) {
+ txq->stats.errs++;
+ break;
+ }
+ gso_ctx->gso_size = tso_segsz;
+ ret = rte_gso_segment(mbuf_in, /* packet to segment */
+ gso_ctx, /* gso control block */
+ (struct rte_mbuf **)&gso_mbufs, /* out mbufs */
+ RTE_DIM(gso_mbufs)); /* max tso mbufs */
+
+ /* ret contains the number of new created mbufs */
+ if (ret < 0)
+ break;
+
+ mbuf = gso_mbufs;
+ num_mbufs = ret;
+ } else {
+ /* stats.errs will be incremented */
+ if (rte_pktmbuf_pkt_len(mbuf_in) > max_size)
+ break;
+
+ /* ret 0 indicates no new mbufs were created */
+ ret = 0;
+ mbuf = &mbuf_in;
+ num_mbufs = 1;
+ }
+
+ tap_write_mbufs(txq, num_mbufs, mbuf,
+ &num_packets, &num_tx_bytes);
+ num_tx++;
+ /* free original mbuf */
+ rte_pktmbuf_free(mbuf_in);
+ /* free tso mbufs */
+ for (j = 0; j < ret; j++)
+ rte_pktmbuf_free(mbuf[j]);
+ }
+
+ txq->stats.opackets += num_packets;
+ txq->stats.errs += nb_pkts - num_tx;
+ txq->stats.obytes += num_tx_bytes;
+
+ return num_tx;
+}
+
+static const char *
+tap_ioctl_req2str(unsigned long request)
+{
+ switch (request) {
+ case SIOCSIFFLAGS:
+ return "SIOCSIFFLAGS";
+ case SIOCGIFFLAGS:
+ return "SIOCGIFFLAGS";
+ case SIOCGIFHWADDR:
+ return "SIOCGIFHWADDR";
+ case SIOCSIFHWADDR:
+ return "SIOCSIFHWADDR";
+ case SIOCSIFMTU:
+ return "SIOCSIFMTU";
+ }
+ return "UNKNOWN";
+}
+
+static int
+tap_ioctl(struct pmd_internals *pmd, unsigned long request,
+ struct ifreq *ifr, int set, enum ioctl_mode mode)
+{
+ short req_flags = ifr->ifr_flags;
+ int remote = pmd->remote_if_index &&
+ (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
+
+ if (!pmd->remote_if_index && mode == REMOTE_ONLY)
+ return 0;
+ /*
+ * If there is a remote netdevice, apply ioctl on it, then apply it on
+ * the tap netdevice.
+ */
+apply:
+ if (remote)
+ snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->remote_iface);
+ else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
+ snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->name);
+ switch (request) {
+ case SIOCSIFFLAGS:
+ /* fetch current flags to leave other flags untouched */
+ if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
+ goto error;
+ if (set)
+ ifr->ifr_flags |= req_flags;
+ else
+ ifr->ifr_flags &= ~req_flags;
+ break;
+ case SIOCGIFFLAGS:
+ case SIOCGIFHWADDR:
+ case SIOCSIFHWADDR:
+ case SIOCSIFMTU:
+ break;
+ default:
+ RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
+ pmd->name);
+ return -EINVAL;
+ }
+ if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
+ goto error;
+ if (remote-- && mode == LOCAL_AND_REMOTE)
+ goto apply;
+ return 0;
+
+error:
+ TAP_LOG(DEBUG, "%s(%s) failed: %s(%d)", ifr->ifr_name,
+ tap_ioctl_req2str(request), strerror(errno), errno);
+ return -errno;
+}
+
+static int
+tap_link_set_down(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_UP };
+
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
+}
+
+static int
+tap_link_set_up(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_UP };
+
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+ return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+}
+
+static int
+tap_dev_start(struct rte_eth_dev *dev)
+{
+ int err, i;
+
+ err = tap_intr_handle_set(dev, 1);
+ if (err)
+ return err;
+
+ err = tap_link_set_up(dev);
+ if (err)
+ return err;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return err;
+}
+
+/* This function gets called when the current port gets stopped.
+ */
+static void
+tap_dev_stop(struct rte_eth_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ tap_intr_handle_set(dev, 0);
+ tap_link_set_down(dev);
+}
+
+static int
+tap_dev_configure(struct rte_eth_dev *dev)
+{
+ if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
+ TAP_LOG(ERR,
+ "%s: number of rx queues %d exceeds max num of queues %d",
+ dev->device->name,
+ dev->data->nb_rx_queues,
+ RTE_PMD_TAP_MAX_QUEUES);
+ return -1;
+ }
+ if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
+ TAP_LOG(ERR,
+ "%s: number of tx queues %d exceeds max num of queues %d",
+ dev->device->name,
+ dev->data->nb_tx_queues,
+ RTE_PMD_TAP_MAX_QUEUES);
+ return -1;
+ }
+
+ TAP_LOG(INFO, "%s: %p: TX configured queues number: %u",
+ dev->device->name, (void *)dev, dev->data->nb_tx_queues);
+
+ TAP_LOG(INFO, "%s: %p: RX configured queues number: %u",
+ dev->device->name, (void *)dev, dev->data->nb_rx_queues);
+
+ return 0;
+}
+
+static uint32_t
+tap_dev_speed_capa(void)
+{
+ uint32_t speed = pmd_link.link_speed;
+ uint32_t capa = 0;
+
+ if (speed >= ETH_SPEED_NUM_10M)
+ capa |= ETH_LINK_SPEED_10M;
+ if (speed >= ETH_SPEED_NUM_100M)
+ capa |= ETH_LINK_SPEED_100M;
+ if (speed >= ETH_SPEED_NUM_1G)
+ capa |= ETH_LINK_SPEED_1G;
+ if (speed >= ETH_SPEED_NUM_5G)
+ capa |= ETH_LINK_SPEED_2_5G;
+ if (speed >= ETH_SPEED_NUM_5G)
+ capa |= ETH_LINK_SPEED_5G;
+ if (speed >= ETH_SPEED_NUM_10G)
+ capa |= ETH_LINK_SPEED_10G;
+ if (speed >= ETH_SPEED_NUM_20G)
+ capa |= ETH_LINK_SPEED_20G;
+ if (speed >= ETH_SPEED_NUM_25G)
+ capa |= ETH_LINK_SPEED_25G;
+ if (speed >= ETH_SPEED_NUM_40G)
+ capa |= ETH_LINK_SPEED_40G;
+ if (speed >= ETH_SPEED_NUM_50G)
+ capa |= ETH_LINK_SPEED_50G;
+ if (speed >= ETH_SPEED_NUM_56G)
+ capa |= ETH_LINK_SPEED_56G;
+ if (speed >= ETH_SPEED_NUM_100G)
+ capa |= ETH_LINK_SPEED_100G;
+
+ return capa;
+}
+
+static void
+tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ dev_info->if_index = internals->if_index;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
+ dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
+ dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->speed_capa = tap_dev_speed_capa();
+ dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa();
+ dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa();
+ dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() |
+ dev_info->tx_queue_offload_capa;
+ dev_info->hash_key_size = TAP_RSS_HASH_KEY_SIZE;
+ /*
+ * limitation: TAP supports all of IP, UDP and TCP hash
+ * functions together and not in partial combinations
+ */
+ dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
+}
+
+static int
+tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
+{
+ unsigned int i, imax;
+ unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
+ unsigned long rx_nombuf = 0, ierrors = 0;
+ const struct pmd_internals *pmd = dev->data->dev_private;
+
+ /* rx queue statistics */
+ imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
+ dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
+ for (i = 0; i < imax; i++) {
+ tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
+ tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
+ rx_total += tap_stats->q_ipackets[i];
+ rx_bytes_total += tap_stats->q_ibytes[i];
+ rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
+ ierrors += pmd->rxq[i].stats.ierrors;
+ }
+
+ /* tx queue statistics */
+ imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
+ dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
+
+ for (i = 0; i < imax; i++) {
+ tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
+ tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
+ tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
+ tx_total += tap_stats->q_opackets[i];
+ tx_err_total += tap_stats->q_errors[i];
+ tx_bytes_total += tap_stats->q_obytes[i];
+ }
+
+ tap_stats->ipackets = rx_total;
+ tap_stats->ibytes = rx_bytes_total;
+ tap_stats->ierrors = ierrors;
+ tap_stats->rx_nombuf = rx_nombuf;
+ tap_stats->opackets = tx_total;
+ tap_stats->oerrors = tx_err_total;
+ tap_stats->obytes = tx_bytes_total;
+ return 0;
+}
+
+static void
+tap_stats_reset(struct rte_eth_dev *dev)
+{
+ int i;
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
+ pmd->rxq[i].stats.ipackets = 0;
+ pmd->rxq[i].stats.ibytes = 0;
+ pmd->rxq[i].stats.ierrors = 0;
+ pmd->rxq[i].stats.rx_nombuf = 0;
+
+ pmd->txq[i].stats.opackets = 0;
+ pmd->txq[i].stats.errs = 0;
+ pmd->txq[i].stats.obytes = 0;
+ }
+}
+
+static void
+tap_dev_close(struct rte_eth_dev *dev)
+{
+ int i;
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ tap_link_set_down(dev);
+ tap_flow_flush(dev, NULL);
+ tap_flow_implicit_flush(internals, NULL);
+
+ for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
+ if (internals->rxq[i].fd != -1) {
+ close(internals->rxq[i].fd);
+ internals->rxq[i].fd = -1;
+ }
+ if (internals->txq[i].fd != -1) {
+ close(internals->txq[i].fd);
+ internals->txq[i].fd = -1;
+ }
+ }
+
+ if (internals->remote_if_index) {
+ /* Restore initial remote state */
+ ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
+ &internals->remote_initial_flags);
+ }
+
+ if (internals->ka_fd != -1) {
+ close(internals->ka_fd);
+ internals->ka_fd = -1;
+ }
+ /*
+ * Since TUN device has no more opened file descriptors
+ * it will be removed from kernel
+ */
+}
+
+static void
+tap_rx_queue_release(void *queue)
+{
+ struct rx_queue *rxq = queue;
+
+ if (rxq && (rxq->fd > 0)) {
+ close(rxq->fd);
+ rxq->fd = -1;
+ rte_pktmbuf_free(rxq->pool);
+ rte_free(rxq->iovecs);
+ rxq->pool = NULL;
+ rxq->iovecs = NULL;
+ }
+}
+
+static void
+tap_tx_queue_release(void *queue)
+{
+ struct tx_queue *txq = queue;
+
+ if (txq && (txq->fd > 0)) {
+ close(txq->fd);
+ txq->fd = -1;
+ }
+}
+
+static int
+tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
+{
+ struct rte_eth_link *dev_link = &dev->data->dev_link;
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = 0 };
+
+ if (pmd->remote_if_index) {
+ tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
+ if (!(ifr.ifr_flags & IFF_UP) ||
+ !(ifr.ifr_flags & IFF_RUNNING)) {
+ dev_link->link_status = ETH_LINK_DOWN;
+ return 0;
+ }
+ }
+ tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
+ dev_link->link_status =
+ ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
+ ETH_LINK_UP :
+ ETH_LINK_DOWN);
+ return 0;
+}
+
+static void
+tap_promisc_enable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
+
+ dev->data->promiscuous = 1;
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+ if (pmd->remote_if_index && !pmd->flow_isolate)
+ tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
+}
+
+static void
+tap_promisc_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
+
+ dev->data->promiscuous = 0;
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+ if (pmd->remote_if_index && !pmd->flow_isolate)
+ tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
+}
+
+static void
+tap_allmulti_enable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
+
+ dev->data->all_multicast = 1;
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+ if (pmd->remote_if_index && !pmd->flow_isolate)
+ tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
+}
+
+static void
+tap_allmulti_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
+
+ dev->data->all_multicast = 0;
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+ if (pmd->remote_if_index && !pmd->flow_isolate)
+ tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
+}
+
+static int
+tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ enum ioctl_mode mode = LOCAL_ONLY;
+ struct ifreq ifr;
+ int ret;
+
+ if (pmd->type == ETH_TUNTAP_TYPE_TUN) {
+ TAP_LOG(ERR, "%s: can't MAC address for TUN",
+ dev->device->name);
+ return -ENOTSUP;
+ }
+
+ if (is_zero_ether_addr(mac_addr)) {
+ TAP_LOG(ERR, "%s: can't set an empty MAC address",
+ dev->device->name);
+ return -EINVAL;
+ }
+ /* Check the actual current MAC address on the tap netdevice */
+ ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY);
+ if (ret < 0)
+ return ret;
+ if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
+ mac_addr))
+ return 0;
+ /* Check the current MAC address on the remote */
+ ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY);
+ if (ret < 0)
+ return ret;
+ if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
+ mac_addr))
+ mode = LOCAL_AND_REMOTE;
+ ifr.ifr_hwaddr.sa_family = AF_LOCAL;
+ rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
+ ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);
+ if (ret < 0)
+ return ret;
+ rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
+ if (pmd->remote_if_index && !pmd->flow_isolate) {
+ /* Replace MAC redirection rule after a MAC change */
+ ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);
+ if (ret < 0) {
+ TAP_LOG(ERR,
+ "%s: Couldn't delete MAC redirection rule",
+ dev->device->name);
+ return ret;
+ }
+ ret = tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC);
+ if (ret < 0) {
+ TAP_LOG(ERR,
+ "%s: Couldn't add MAC redirection rule",
+ dev->device->name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
+{
+ uint32_t gso_types;
+ char pool_name[64];
+
+ /*
+ * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
+ * size per mbuf use this pool for both direct and indirect mbufs
+ */
+
+ struct rte_mempool *mp; /* Mempool for GSO packets */
+
+ /* initialize GSO context */
+ gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+ snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
+ mp = rte_mempool_lookup((const char *)pool_name);
+ if (!mp) {
+ mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
+ TAP_GSO_MBUF_CACHE_SIZE, 0,
+ RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
+ SOCKET_ID_ANY);
+ if (!mp) {
+ struct pmd_internals *pmd = dev->data->dev_private;
+ RTE_LOG(DEBUG, PMD, "%s: failed to create mbuf pool for device %s\n",
+ pmd->name, dev->device->name);
+ return -1;
+ }
+ }
+
+ gso_ctx->direct_pool = mp;
+ gso_ctx->indirect_pool = mp;
+ gso_ctx->gso_types = gso_types;
+ gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
+ gso_ctx->flag = 0;
+
+ return 0;
+}
+
+static int
+tap_setup_queue(struct rte_eth_dev *dev,
+ struct pmd_internals *internals,
+ uint16_t qid,
+ int is_rx)
+{
+ int ret;
+ int *fd;
+ int *other_fd;
+ const char *dir;
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct rx_queue *rx = &internals->rxq[qid];
+ struct tx_queue *tx = &internals->txq[qid];
+ struct rte_gso_ctx *gso_ctx;
+
+ if (is_rx) {
+ fd = &rx->fd;
+ other_fd = &tx->fd;
+ dir = "rx";
+ gso_ctx = NULL;
+ } else {
+ fd = &tx->fd;
+ other_fd = &rx->fd;
+ dir = "tx";
+ gso_ctx = &tx->gso_ctx;
+ }
+ if (*fd != -1) {
+ /* fd for this queue already exists */
+ TAP_LOG(DEBUG, "%s: fd %d for %s queue qid %d exists",
+ pmd->name, *fd, dir, qid);
+ gso_ctx = NULL;
+ } else if (*other_fd != -1) {
+ /* Only other_fd exists. dup it */
+ *fd = dup(*other_fd);
+ if (*fd < 0) {
+ *fd = -1;
+ TAP_LOG(ERR, "%s: dup() failed.", pmd->name);
+ return -1;
+ }
+ TAP_LOG(DEBUG, "%s: dup fd %d for %s queue qid %d (%d)",
+ pmd->name, *other_fd, dir, qid, *fd);
+ } else {
+ /* Both RX and TX fds do not exist (equal -1). Create fd */
+ *fd = tun_alloc(pmd, 0);
+ if (*fd < 0) {
+ *fd = -1; /* restore original value */
+ TAP_LOG(ERR, "%s: tun_alloc() failed.", pmd->name);
+ return -1;
+ }
+ TAP_LOG(DEBUG, "%s: add %s queue for qid %d fd %d",
+ pmd->name, dir, qid, *fd);
+ }
+
+ tx->mtu = &dev->data->mtu;
+ rx->rxmode = &dev->data->dev_conf.rxmode;
+ if (gso_ctx) {
+ ret = tap_gso_ctx_setup(gso_ctx, dev);
+ if (ret)
+ return -1;
+ }
+
+ tx->type = pmd->type;
+
+ return *fd;
+}
+
+static int
+tap_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct rx_queue *rxq = &internals->rxq[rx_queue_id];
+ struct rte_mbuf **tmp = &rxq->pool;
+ long iov_max = sysconf(_SC_IOV_MAX);
+ uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
+ struct iovec (*iovecs)[nb_desc + 1];
+ int data_off = RTE_PKTMBUF_HEADROOM;
+ int ret = 0;
+ int fd;
+ int i;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
+ TAP_LOG(WARNING,
+ "nb_rx_queues %d too small or mempool NULL",
+ dev->data->nb_rx_queues);
+ return -1;
+ }
+
+ rxq->mp = mp;
+ rxq->trigger_seen = 1; /* force initial burst */
+ rxq->in_port = dev->data->port_id;
+ rxq->nb_rx_desc = nb_desc;
+ iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
+ socket_id);
+ if (!iovecs) {
+ TAP_LOG(WARNING,
+ "%s: Couldn't allocate %d RX descriptors",
+ dev->device->name, nb_desc);
+ return -ENOMEM;
+ }
+ rxq->iovecs = iovecs;
+
+ dev->data->rx_queues[rx_queue_id] = rxq;
+ fd = tap_setup_queue(dev, internals, rx_queue_id, 1);
+ if (fd == -1) {
+ ret = fd;
+ goto error;
+ }
+
+ (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
+ (*rxq->iovecs)[0].iov_base = &rxq->pi;
+
+ for (i = 1; i <= nb_desc; i++) {
+ *tmp = rte_pktmbuf_alloc(rxq->mp);
+ if (!*tmp) {
+ TAP_LOG(WARNING,
+ "%s: couldn't allocate memory for queue %d",
+ dev->device->name, rx_queue_id);
+ ret = -ENOMEM;
+ goto error;
+ }
+ (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
+ (*rxq->iovecs)[i].iov_base =
+ (char *)(*tmp)->buf_addr + data_off;
+ data_off = 0;
+ tmp = &(*tmp)->next;
+ }
+
+ TAP_LOG(DEBUG, " RX TUNTAP device name %s, qid %d on fd %d",
+ internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
+
+ return 0;
+
+error:
+ rte_pktmbuf_free(rxq->pool);
+ rxq->pool = NULL;
+ rte_free(rxq->iovecs);
+ rxq->iovecs = NULL;
+ return ret;
+}
+
+static int
+tap_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct tx_queue *txq;
+ int ret;
+ uint64_t offloads;
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -1;
+ dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ txq->csum = !!(offloads &
+ (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM));
+
+ ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
+ if (ret == -1)
+ return -1;
+ TAP_LOG(DEBUG,
+ " TX TUNTAP device name %s, qid %d on fd %d csum %s",
+ internals->name, tx_queue_id, internals->txq[tx_queue_id].fd,
+ txq->csum ? "on" : "off");
+
+ return 0;
+}
+
+static int
+tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_mtu = mtu };
+ int err = 0;
+
+ err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
+ if (!err)
+ dev->data->mtu = mtu;
+
+ return err;
+}
+
+static int
+tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
+ struct ether_addr *mc_addr_set __rte_unused,
+ uint32_t nb_mc_addr __rte_unused)
+{
+ /*
+ * Nothing to do actually: the tap has no filtering whatsoever, every
+ * packet is received.
+ */
+ return 0;
+}
+
+static int
+tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
+{
+ struct rte_eth_dev *dev = arg;
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifinfomsg *info = NLMSG_DATA(nh);
+
+ if (nh->nlmsg_type != RTM_NEWLINK ||
+ (info->ifi_index != pmd->if_index &&
+ info->ifi_index != pmd->remote_if_index))
+ return 0;
+ return tap_link_update(dev, 0);
+}
+
+static void
+tap_dev_intr_handler(void *cb_arg)
+{
+ struct rte_eth_dev *dev = cb_arg;
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
+}
+
+static int
+tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ /* In any case, disable interrupt if the conf is no longer there. */
+ if (!dev->data->dev_conf.intr_conf.lsc) {
+ if (pmd->intr_handle.fd != -1) {
+ tap_nl_final(pmd->intr_handle.fd);
+ rte_intr_callback_unregister(&pmd->intr_handle,
+ tap_dev_intr_handler, dev);
+ }
+ return 0;
+ }
+ if (set) {
+ pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK);
+ if (unlikely(pmd->intr_handle.fd == -1))
+ return -EBADF;
+ return rte_intr_callback_register(
+ &pmd->intr_handle, tap_dev_intr_handler, dev);
+ }
+ tap_nl_final(pmd->intr_handle.fd);
+ return rte_intr_callback_unregister(&pmd->intr_handle,
+ tap_dev_intr_handler, dev);
+}
+
+static int
+tap_intr_handle_set(struct rte_eth_dev *dev, int set)
+{
+ int err;
+
+ err = tap_lsc_intr_handle_set(dev, set);
+ if (err)
+ return err;
+ err = tap_rx_intr_vec_set(dev, set);
+ if (err && set)
+ tap_lsc_intr_handle_set(dev, 0);
+ return err;
+}
+
+static const uint32_t*
+tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L2_ETHER_QINQ,
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_QINQ,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_SCTP,
+ };
+
+ return ptypes;
+}
+
+static int
+tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ fc_conf->mode = RTE_FC_NONE;
+ return 0;
+}
+
+static int
+tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ if (fc_conf->mode != RTE_FC_NONE)
+ return -ENOTSUP;
+ return 0;
+}
+
+/**
+ * DPDK callback to update the RSS hash configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] rss_conf
+ * RSS configuration data.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+tap_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ if (rss_conf->rss_hf & TAP_RSS_HF_MASK) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (rss_conf->rss_key && rss_conf->rss_key_len) {
+ /*
+ * Currently TAP RSS key is hard coded
+ * and cannot be updated
+ */
+ TAP_LOG(ERR,
+ "port %u RSS key cannot be updated",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ return 0;
+}
+
+static int
+tap_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+tap_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+tap_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+tap_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+static const struct eth_dev_ops ops = {
+ .dev_start = tap_dev_start,
+ .dev_stop = tap_dev_stop,
+ .dev_close = tap_dev_close,
+ .dev_configure = tap_dev_configure,
+ .dev_infos_get = tap_dev_info,
+ .rx_queue_setup = tap_rx_queue_setup,
+ .tx_queue_setup = tap_tx_queue_setup,
+ .rx_queue_start = tap_rx_queue_start,
+ .tx_queue_start = tap_tx_queue_start,
+ .rx_queue_stop = tap_rx_queue_stop,
+ .tx_queue_stop = tap_tx_queue_stop,
+ .rx_queue_release = tap_rx_queue_release,
+ .tx_queue_release = tap_tx_queue_release,
+ .flow_ctrl_get = tap_flow_ctrl_get,
+ .flow_ctrl_set = tap_flow_ctrl_set,
+ .link_update = tap_link_update,
+ .dev_set_link_up = tap_link_set_up,
+ .dev_set_link_down = tap_link_set_down,
+ .promiscuous_enable = tap_promisc_enable,
+ .promiscuous_disable = tap_promisc_disable,
+ .allmulticast_enable = tap_allmulti_enable,
+ .allmulticast_disable = tap_allmulti_disable,
+ .mac_addr_set = tap_mac_set,
+ .mtu_set = tap_mtu_set,
+ .set_mc_addr_list = tap_set_mc_addr_list,
+ .stats_get = tap_stats_get,
+ .stats_reset = tap_stats_reset,
+ .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
+ .rss_hash_update = tap_rss_hash_update,
+ .filter_ctrl = tap_dev_filter_ctrl,
+};
+
+static int
+eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
+ char *remote_iface, struct ether_addr *mac_addr,
+ enum rte_tuntap_type type)
+{
+ int numa_node = rte_socket_id();
+ struct rte_eth_dev *dev;
+ struct pmd_internals *pmd;
+ struct rte_eth_dev_data *data;
+ struct ifreq ifr;
+ int i;
+
+ TAP_LOG(DEBUG, "%s device on numa %u",
+ tuntap_name, rte_socket_id());
+
+ dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
+ if (!dev) {
+ TAP_LOG(ERR, "%s Unable to allocate device struct",
+ tuntap_name);
+ goto error_exit_nodev;
+ }
+
+ pmd = dev->data->dev_private;
+ pmd->dev = dev;
+ snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
+ pmd->type = type;
+
+ pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (pmd->ioctl_sock == -1) {
+ TAP_LOG(ERR,
+ "%s Unable to get a socket for management: %s",
+ tuntap_name, strerror(errno));
+ goto error_exit;
+ }
+
+ /* Setup some default values */
+ data = dev->data;
+ data->dev_private = pmd;
+ data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+ data->numa_node = numa_node;
+
+ data->dev_link = pmd_link;
+ data->mac_addrs = &pmd->eth_addr;
+ /* Set the number of RX and TX queues */
+ data->nb_rx_queues = 0;
+ data->nb_tx_queues = 0;
+
+ dev->dev_ops = &ops;
+ dev->rx_pkt_burst = pmd_rx_burst;
+ dev->tx_pkt_burst = pmd_tx_burst;
+
+ pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
+ pmd->intr_handle.fd = -1;
+ dev->intr_handle = &pmd->intr_handle;
+
+ /* Presetup the fds to -1 as being not valid */
+ pmd->ka_fd = -1;
+ for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
+ pmd->rxq[i].fd = -1;
+ pmd->txq[i].fd = -1;
+ }
+
+ if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
+ if (is_zero_ether_addr(mac_addr))
+ eth_random_addr((uint8_t *)&pmd->eth_addr);
+ else
+ rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(*mac_addr));
+ }
+
+ /*
+ * Allocate a TUN device keep-alive file descriptor that will only be
+ * closed when the TUN device itself is closed or removed.
+ * This keep-alive file descriptor will guarantee that the TUN device
+ * exists even when all of its queues are closed
+ */
+ pmd->ka_fd = tun_alloc(pmd, 1);
+ if (pmd->ka_fd == -1) {
+ TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
+ goto error_exit;
+ }
+
+ ifr.ifr_mtu = dev->data->mtu;
+ if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
+ goto error_exit;
+
+ if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
+ memset(&ifr, 0, sizeof(struct ifreq));
+ ifr.ifr_hwaddr.sa_family = AF_LOCAL;
+ rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
+ ETHER_ADDR_LEN);
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
+ goto error_exit;
+ }
+
+ /*
+ * Set up everything related to rte_flow:
+ * - netlink socket
+ * - tap / remote if_index
+ * - mandatory QDISCs
+ * - rte_flow actual/implicit lists
+ * - implicit rules
+ */
+ pmd->nlsk_fd = tap_nl_init(0);
+ if (pmd->nlsk_fd == -1) {
+ TAP_LOG(WARNING, "%s: failed to create netlink socket.",
+ pmd->name);
+ goto disable_rte_flow;
+ }
+ pmd->if_index = if_nametoindex(pmd->name);
+ if (!pmd->if_index) {
+ TAP_LOG(ERR, "%s: failed to get if_index.", pmd->name);
+ goto disable_rte_flow;
+ }
+ if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
+ TAP_LOG(ERR, "%s: failed to create multiq qdisc.",
+ pmd->name);
+ goto disable_rte_flow;
+ }
+ if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
+ TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
+ pmd->name);
+ goto disable_rte_flow;
+ }
+ LIST_INIT(&pmd->flows);
+
+ if (strlen(remote_iface)) {
+ pmd->remote_if_index = if_nametoindex(remote_iface);
+ if (!pmd->remote_if_index) {
+ TAP_LOG(ERR, "%s: failed to get %s if_index.",
+ pmd->name, remote_iface);
+ goto error_remote;
+ }
+ snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
+ "%s", remote_iface);
+
+ /* Save state of remote device */
+ tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY);
+
+ /* Replicate remote MAC address */
+ if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
+ TAP_LOG(ERR, "%s: failed to get %s MAC address.",
+ pmd->name, pmd->remote_iface);
+ goto error_remote;
+ }
+ rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
+ ETHER_ADDR_LEN);
+ /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
+ TAP_LOG(ERR, "%s: failed to get %s MAC address.",
+ pmd->name, remote_iface);
+ goto error_remote;
+ }
+
+ /*
+ * Flush usually returns negative value because it tries to
+ * delete every QDISC (and on a running device, one QDISC at
+ * least is needed). Ignore negative return value.
+ */
+ qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
+ if (qdisc_create_ingress(pmd->nlsk_fd,
+ pmd->remote_if_index) < 0) {
+ TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
+ pmd->remote_iface);
+ goto error_remote;
+ }
+ LIST_INIT(&pmd->implicit_flows);
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
+ tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
+ tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
+ tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
+ TAP_LOG(ERR,
+ "%s: failed to create implicit rules.",
+ pmd->name);
+ goto error_remote;
+ }
+ }
+
+ rte_eth_dev_probing_finish(dev);
+ return 0;
+
+disable_rte_flow:
+ TAP_LOG(ERR, " Disabling rte flow support: %s(%d)",
+ strerror(errno), errno);
+ if (strlen(remote_iface)) {
+ TAP_LOG(ERR, "Remote feature requires flow support.");
+ goto error_exit;
+ }
+ return 0;
+
+error_remote:
+ TAP_LOG(ERR, " Can't set up remote feature: %s(%d)",
+ strerror(errno), errno);
+ tap_flow_implicit_flush(pmd, NULL);
+
+error_exit:
+ if (pmd->ioctl_sock > 0)
+ close(pmd->ioctl_sock);
+ rte_eth_dev_release_port(dev);
+
+error_exit_nodev:
+ TAP_LOG(ERR, "%s Unable to initialize %s",
+ tuntap_name, rte_vdev_device_name(vdev));
+
+ return -EINVAL;
+}
+
+static int
+set_interface_name(const char *key __rte_unused,
+ const char *value,
+ void *extra_args)
+{
+ char *name = (char *)extra_args;
+
+ if (value)
+ strlcpy(name, value, RTE_ETH_NAME_MAX_LEN - 1);
+ else
+ snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
+ DEFAULT_TAP_NAME, (tap_unit - 1));
+
+ return 0;
+}
+
+static int
+set_remote_iface(const char *key __rte_unused,
+ const char *value,
+ void *extra_args)
+{
+ char *name = (char *)extra_args;
+
+ if (value)
+ strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static int parse_user_mac(struct ether_addr *user_mac,
+ const char *value)
+{
+ unsigned int index = 0;
+ char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL;
+
+ if (user_mac == NULL || value == NULL)
+ return 0;
+
+ strlcpy(mac_temp, value, sizeof(mac_temp));
+ mac_byte = strtok(mac_temp, ":");
+
+ while ((mac_byte != NULL) &&
+ (strlen(mac_byte) <= 2) &&
+ (strlen(mac_byte) == strspn(mac_byte,
+ ETH_TAP_CMP_MAC_FMT))) {
+ user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16);
+ mac_byte = strtok(NULL, ":");
+ }
+
+ return index;
+}
+
+static int
+set_mac_type(const char *key __rte_unused,
+ const char *value,
+ void *extra_args)
+{
+ struct ether_addr *user_mac = extra_args;
+
+ if (!value)
+ return 0;
+
+ if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) {
+ static int iface_idx;
+
+ /* fixed mac = 00:64:74:61:70:<iface_idx> */
+ memcpy((char *)user_mac->addr_bytes, "\0dtap", ETHER_ADDR_LEN);
+ user_mac->addr_bytes[ETHER_ADDR_LEN - 1] = iface_idx++ + '0';
+ goto success;
+ }
+
+ if (parse_user_mac(user_mac, value) != 6)
+ goto error;
+success:
+ TAP_LOG(DEBUG, "TAP user MAC param (%s)", value);
+ return 0;
+
+error:
+ TAP_LOG(ERR, "TAP user MAC (%s) is not in format (%s|%s)",
+ value, ETH_TAP_MAC_FIXED, ETH_TAP_USR_MAC_FMT);
+ return -1;
+}
+
+/*
+ * Open a TUN interface device. TUN PMD
+ * 1) sets tap_type as false
+ * 2) intakes iface as argument.
+ * 3) as interface is virtual set speed to 10G
+ */
+static int
+rte_pmd_tun_probe(struct rte_vdev_device *dev)
+{
+ const char *name, *params;
+ int ret;
+ struct rte_kvargs *kvlist = NULL;
+ char tun_name[RTE_ETH_NAME_MAX_LEN];
+ char remote_iface[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+
+ strcpy(tuntap_name, "TUN");
+
+ name = rte_vdev_device_name(dev);
+ params = rte_vdev_device_args(dev);
+ memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ TAP_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ snprintf(tun_name, sizeof(tun_name), "%s%u",
+ DEFAULT_TUN_NAME, tun_unit++);
+
+ if (params && (params[0] != '\0')) {
+ TAP_LOG(DEBUG, "parameters (%s)", params);
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist) {
+ if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_IFACE_ARG,
+ &set_interface_name,
+ tun_name);
+
+ if (ret == -1)
+ goto leave;
+ }
+ }
+ }
+ pmd_link.link_speed = ETH_SPEED_NUM_10G;
+
+ TAP_LOG(NOTICE, "Initializing pmd_tun for %s as %s",
+ name, tun_name);
+
+ ret = eth_dev_tap_create(dev, tun_name, remote_iface, 0,
+ ETH_TUNTAP_TYPE_TUN);
+
+leave:
+ if (ret == -1) {
+ TAP_LOG(ERR, "Failed to create pmd for %s as %s",
+ name, tun_name);
+ tun_unit--; /* Restore the unit number */
+ }
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+/* Open a TAP interface device.
+ */
+static int
+rte_pmd_tap_probe(struct rte_vdev_device *dev)
+{
+ const char *name, *params;
+ int ret;
+ struct rte_kvargs *kvlist = NULL;
+ int speed;
+ char tap_name[RTE_ETH_NAME_MAX_LEN];
+ char remote_iface[RTE_ETH_NAME_MAX_LEN];
+ struct ether_addr user_mac = { .addr_bytes = {0} };
+ struct rte_eth_dev *eth_dev;
+
+ strcpy(tuntap_name, "TAP");
+
+ name = rte_vdev_device_name(dev);
+ params = rte_vdev_device_args(dev);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ TAP_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ speed = ETH_SPEED_NUM_10G;
+ snprintf(tap_name, sizeof(tap_name), "%s%u",
+ DEFAULT_TAP_NAME, tap_unit++);
+ memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
+
+ if (params && (params[0] != '\0')) {
+ TAP_LOG(DEBUG, "parameters (%s)", params);
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist) {
+ if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_IFACE_ARG,
+ &set_interface_name,
+ tap_name);
+ if (ret == -1)
+ goto leave;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_REMOTE_ARG,
+ &set_remote_iface,
+ remote_iface);
+ if (ret == -1)
+ goto leave;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_MAC_ARG,
+ &set_mac_type,
+ &user_mac);
+ if (ret == -1)
+ goto leave;
+ }
+ }
+ }
+ pmd_link.link_speed = speed;
+
+ TAP_LOG(NOTICE, "Initializing pmd_tap for %s as %s",
+ name, tap_name);
+
+ ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac,
+ ETH_TUNTAP_TYPE_TAP);
+
+leave:
+ if (ret == -1) {
+ TAP_LOG(ERR, "Failed to create pmd for %s as %s",
+ name, tap_name);
+ tap_unit--; /* Restore the unit number */
+ }
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+/* detach a TUNTAP device.
+ */
+static int
+rte_pmd_tap_remove(struct rte_vdev_device *dev)
+{
+ struct rte_eth_dev *eth_dev = NULL;
+ struct pmd_internals *internals;
+ int i;
+
+ /* find the ethdev entry */
+ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
+ if (!eth_dev)
+ return 0;
+
+ internals = eth_dev->data->dev_private;
+
+ TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
+ (internals->type == ETH_TUNTAP_TYPE_TAP) ? "TAP" : "TUN",
+ rte_socket_id());
+
+ if (internals->nlsk_fd) {
+ tap_flow_flush(eth_dev, NULL);
+ tap_flow_implicit_flush(internals, NULL);
+ tap_nl_final(internals->nlsk_fd);
+ }
+ for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
+ if (internals->rxq[i].fd != -1) {
+ close(internals->rxq[i].fd);
+ internals->rxq[i].fd = -1;
+ }
+ if (internals->txq[i].fd != -1) {
+ close(internals->txq[i].fd);
+ internals->txq[i].fd = -1;
+ }
+ }
+
+ close(internals->ioctl_sock);
+ rte_free(eth_dev->data->dev_private);
+ rte_eth_dev_release_port(eth_dev);
+
+ if (internals->ka_fd != -1) {
+ close(internals->ka_fd);
+ internals->ka_fd = -1;
+ }
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_tun_drv = {
+ .probe = rte_pmd_tun_probe,
+ .remove = rte_pmd_tap_remove,
+};
+
+static struct rte_vdev_driver pmd_tap_drv = {
+ .probe = rte_pmd_tap_probe,
+ .remove = rte_pmd_tap_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
+RTE_PMD_REGISTER_VDEV(net_tun, pmd_tun_drv);
+RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
+RTE_PMD_REGISTER_PARAM_STRING(net_tun,
+ ETH_TAP_IFACE_ARG "=<string> ");
+RTE_PMD_REGISTER_PARAM_STRING(net_tap,
+ ETH_TAP_IFACE_ARG "=<string> "
+ ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
+ ETH_TAP_REMOTE_ARG "=<string>");
+int tap_logtype;
+
+RTE_INIT(tap_init_log)
+{
+ tap_logtype = rte_log_register("pmd.net.tap");
+ if (tap_logtype >= 0)
+ rte_log_set_level(tap_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/tap/rte_eth_tap.h b/src/spdk/dpdk/drivers/net/tap/rte_eth_tap.h
new file mode 100644
index 00000000..44e2773f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/rte_eth_tap.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef _RTE_ETH_TAP_H_
+#define _RTE_ETH_TAP_H_
+
+#include <sys/queue.h>
+#include <sys/uio.h>
+#include <inttypes.h>
+#include <net/if.h>
+
+#include <linux/if_tun.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+#include <rte_gso.h>
+#include "tap_log.h"
+
+#ifdef IFF_MULTI_QUEUE
+#define RTE_PMD_TAP_MAX_QUEUES TAP_MAX_QUEUES
+#else
+#define RTE_PMD_TAP_MAX_QUEUES 1
+#endif
+#define MAX_GSO_MBUFS 64
+
+enum rte_tuntap_type {
+ ETH_TUNTAP_TYPE_UNKNOWN,
+ ETH_TUNTAP_TYPE_TUN,
+ ETH_TUNTAP_TYPE_TAP,
+ ETH_TUNTAP_TYPE_MAX,
+};
+
+struct pkt_stats {
+ uint64_t opackets; /* Number of output packets */
+ uint64_t ipackets; /* Number of input packets */
+ uint64_t obytes; /* Number of bytes on output */
+ uint64_t ibytes; /* Number of bytes on input */
+ uint64_t errs; /* Number of TX error packets */
+ uint64_t ierrors; /* Number of RX error packets */
+ uint64_t rx_nombuf; /* Nb of RX mbuf alloc failures */
+};
+
+struct rx_queue {
+ struct rte_mempool *mp; /* Mempool for RX packets */
+ uint32_t trigger_seen; /* Last seen Rx trigger value */
+ uint16_t in_port; /* Port ID */
+ int fd;
+ struct pkt_stats stats; /* Stats for this RX queue */
+ uint16_t nb_rx_desc; /* max number of mbufs available */
+ struct rte_eth_rxmode *rxmode; /* RX features */
+ struct rte_mbuf *pool; /* mbufs pool for this queue */
+ struct iovec (*iovecs)[]; /* descriptors for this queue */
+ struct tun_pi pi; /* packet info for iovecs */
+};
+
+struct tx_queue {
+ int fd;
+ int type; /* Type field - TUN|TAP */
+ uint16_t *mtu; /* Pointer to MTU from dev_data */
+ uint16_t csum:1; /* Enable checksum offloading */
+ struct pkt_stats stats; /* Stats for this TX queue */
+ struct rte_gso_ctx gso_ctx; /* GSO context */
+};
+
+struct pmd_internals {
+ struct rte_eth_dev *dev; /* Ethernet device. */
+ char remote_iface[RTE_ETH_NAME_MAX_LEN]; /* Remote netdevice name */
+ char name[RTE_ETH_NAME_MAX_LEN]; /* Internal Tap device name */
+ int type; /* Type field - TUN|TAP */
+ struct ether_addr eth_addr; /* Mac address of the device port */
+ struct ifreq remote_initial_flags; /* Remote netdevice flags on init */
+ int remote_if_index; /* remote netdevice IF_INDEX */
+ int if_index; /* IF_INDEX for the port */
+ int ioctl_sock; /* socket for ioctl calls */
+ int nlsk_fd; /* Netlink socket fd */
+ int flow_isolate; /* 1 if flow isolation is enabled */
+ int flower_support; /* 1 if kernel supports, else 0 */
+ int flower_vlan_support; /* 1 if kernel supports, else 0 */
+ int rss_enabled; /* 1 if RSS is enabled, else 0 */
+ /* implicit rules set when RSS is enabled */
+ int map_fd; /* BPF RSS map fd */
+ int bpf_fd[RTE_PMD_TAP_MAX_QUEUES];/* List of bpf fds per queue */
+ LIST_HEAD(tap_rss_flows, rte_flow) rss_flows;
+ LIST_HEAD(tap_flows, rte_flow) flows; /* rte_flow rules */
+ /* implicit rte_flow rules set when a remote device is active */
+ LIST_HEAD(tap_implicit_flows, rte_flow) implicit_flows;
+ struct rx_queue rxq[RTE_PMD_TAP_MAX_QUEUES]; /* List of RX queues */
+ struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */
+ struct rte_intr_handle intr_handle; /* LSC interrupt handle. */
+ int ka_fd; /* keep-alive file descriptor */
+};
+
+/* tap_intr.c */
+
+int tap_rx_intr_vec_set(struct rte_eth_dev *dev, int set);
+
+#endif /* _RTE_ETH_TAP_H_ */
diff --git a/src/spdk/dpdk/drivers/net/tap/rte_pmd_tap_version.map b/src/spdk/dpdk/drivers/net/tap/rte_pmd_tap_version.map
new file mode 100644
index 00000000..31eca32e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/rte_pmd_tap_version.map
@@ -0,0 +1,4 @@
+DPDK_17.02 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_bpf.h b/src/spdk/dpdk/drivers/net/tap/tap_bpf.h
new file mode 100644
index 00000000..9192686a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_bpf.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef __TAP_BPF_H__
+#define __TAP_BPF_H__
+
+#include <tap_autoconf.h>
+
+/* Do not #include <linux/bpf.h> since eBPF must compile on different
+ * distros which may include partial definitions for eBPF (while the
+ * kernel itself may support eBPF). Instead define here all that is needed
+ */
+
+/* BPF_MAP_UPDATE_ELEM command flags */
+#define BPF_ANY 0 /* create a new element or update an existing */
+
+/* BPF architecture instruction struct */
+struct bpf_insn {
+ __u8 code;
+ __u8 dst_reg:4;
+ __u8 src_reg:4;
+ __s16 off;
+ __s32 imm; /* immediate value */
+};
+
+/* BPF program types */
+enum bpf_prog_type {
+ BPF_PROG_TYPE_UNSPEC,
+ BPF_PROG_TYPE_SOCKET_FILTER,
+ BPF_PROG_TYPE_KPROBE,
+ BPF_PROG_TYPE_SCHED_CLS,
+ BPF_PROG_TYPE_SCHED_ACT,
+};
+
+/* BPF commands types */
+enum bpf_cmd {
+ BPF_MAP_CREATE,
+ BPF_MAP_LOOKUP_ELEM,
+ BPF_MAP_UPDATE_ELEM,
+ BPF_MAP_DELETE_ELEM,
+ BPF_MAP_GET_NEXT_KEY,
+ BPF_PROG_LOAD,
+};
+
+/* BPF maps types */
+enum bpf_map_type {
+ BPF_MAP_TYPE_UNSPEC,
+ BPF_MAP_TYPE_HASH,
+};
+
+/* union of anonymous structs used with TAP BPF commands */
+union bpf_attr {
+ /* BPF_MAP_CREATE command */
+ struct {
+ __u32 map_type;
+ __u32 key_size;
+ __u32 value_size;
+ __u32 max_entries;
+ __u32 map_flags;
+ __u32 inner_map_fd;
+ };
+
+ /* BPF_MAP_UPDATE_ELEM, BPF_MAP_DELETE_ELEM commands */
+ struct {
+ __u32 map_fd;
+ __aligned_u64 key;
+ union {
+ __aligned_u64 value;
+ __aligned_u64 next_key;
+ };
+ __u64 flags;
+ };
+
+ /* BPF_PROG_LOAD command */
+ struct {
+ __u32 prog_type;
+ __u32 insn_cnt;
+ __aligned_u64 insns;
+ __aligned_u64 license;
+ __u32 log_level;
+ __u32 log_size;
+ __aligned_u64 log_buf;
+ __u32 kern_version;
+ __u32 prog_flags;
+ };
+} __attribute__((aligned(8)));
+
+#ifndef __NR_bpf
+# if defined(__i386__)
+# define __NR_bpf 357
+# elif defined(__x86_64__)
+# define __NR_bpf 321
+# elif defined(__arm__)
+# define __NR_bpf 386
+# elif defined(__aarch64__)
+# define __NR_bpf 280
+# elif defined(__sparc__)
+# define __NR_bpf 349
+# elif defined(__s390__)
+# define __NR_bpf 351
+# elif defined(__powerpc__)
+# define __NR_bpf 361
+# else
+# error __NR_bpf not defined
+# endif
+#endif
+
+enum {
+ BPF_MAP_ID_KEY,
+ BPF_MAP_ID_SIMPLE,
+};
+
+static int bpf_load(enum bpf_prog_type type, const struct bpf_insn *insns,
+ size_t insns_cnt, const char *license);
+
+#endif /* __TAP_BPF_H__ */
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_bpf_api.c b/src/spdk/dpdk/drivers/net/tap/tap_bpf_api.c
new file mode 100644
index 00000000..98f6a760
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_bpf_api.c
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <rte_malloc.h>
+#include <rte_eth_tap.h>
+#include <tap_flow.h>
+#include <tap_autoconf.h>
+#include <tap_tcmsgs.h>
+#include <tap_bpf.h>
+#include <tap_bpf_insns.h>
+
+/**
+ * Load BPF program (section cls_q) into the kernel and return a bpf fd
+ *
+ * @param queue_idx
+ * Queue index matching packet cb
+ *
+ * @return
+ * -1 if the BPF program couldn't be loaded. An fd (int) otherwise.
+ */
+int tap_flow_bpf_cls_q(__u32 queue_idx)
+{
+ cls_q_insns[1].imm = queue_idx;
+
+ return bpf_load(BPF_PROG_TYPE_SCHED_CLS,
+ (struct bpf_insn *)cls_q_insns,
+ RTE_DIM(cls_q_insns),
+ "Dual BSD/GPL");
+}
+
+/**
+ * Load BPF program (section l3_l4) into the kernel and return a bpf fd.
+ *
+ * @param[in] key_idx
+ * RSS MAP key index
+ *
+ * @param[in] map_fd
+ * BPF RSS map file descriptor
+ *
+ * @return
+ * -1 if the BPF program couldn't be loaded. An fd (int) otherwise.
+ */
+int tap_flow_bpf_calc_l3_l4_hash(__u32 key_idx, int map_fd)
+{
+ l3_l4_hash_insns[4].imm = key_idx;
+ l3_l4_hash_insns[9].imm = map_fd;
+
+ return bpf_load(BPF_PROG_TYPE_SCHED_ACT,
+ (struct bpf_insn *)l3_l4_hash_insns,
+ RTE_DIM(l3_l4_hash_insns),
+ "Dual BSD/GPL");
+}
+
+/**
+ * Helper function to convert a pointer to unsigned 64 bits
+ *
+ * @param[in] ptr
+ * pointer to address
+ *
+ * @return
+ * 64 bit unsigned long type of pointer address
+ */
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64)(unsigned long)ptr;
+}
+
+/**
+ * Call BPF system call
+ *
+ * @param[in] cmd
+ * BPF command for program loading, map creation, map entry update, etc
+ *
+ * @param[in] attr
+ * System call attributes relevant to system call command
+ *
+ * @param[in] size
+ * size of attr parameter
+ *
+ * @return
+ * -1 if BPF system call failed, 0 otherwise
+ */
+static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
+ unsigned int size)
+{
+ return syscall(__NR_bpf, cmd, attr, size);
+}
+
+/**
+ * Load BPF instructions to kernel
+ *
+ * @param[in] type
+ * BPF program type: classifieir or action
+ *
+ * @param[in] insns
+ * Array of BPF instructions (equivalent to BPF instructions)
+ *
+ * @param[in] insns_cnt
+ * Number of BPF instructions (size of array)
+ *
+ * @param[in] lincense
+ * License string that must be acknowledged by the kernel
+ *
+ * @return
+ * -1 if the BPF program couldn't be loaded, fd (file descriptor) otherwise
+ */
+static int bpf_load(enum bpf_prog_type type,
+ const struct bpf_insn *insns,
+ size_t insns_cnt,
+ const char *license)
+{
+ union bpf_attr attr = {};
+
+ bzero(&attr, sizeof(attr));
+ attr.prog_type = type;
+ attr.insn_cnt = (__u32)insns_cnt;
+ attr.insns = ptr_to_u64(insns);
+ attr.license = ptr_to_u64(license);
+ attr.log_buf = ptr_to_u64(NULL);
+ attr.log_level = 0;
+ attr.kern_version = 0;
+
+ return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+}
+
+/**
+ * Create BPF map for RSS rules
+ *
+ * @param[in] key_size
+ * map RSS key size
+ *
+ * @param[in] value_size
+ * Map RSS value size
+ *
+ * @param[in] max_entries
+ * Map max number of RSS entries (limit on max RSS rules)
+ *
+ * @return
+ * -1 if BPF map couldn't be created, map fd otherwise
+ */
+int tap_flow_bpf_rss_map_create(unsigned int key_size,
+ unsigned int value_size,
+ unsigned int max_entries)
+{
+ union bpf_attr attr = {};
+
+ bzero(&attr, sizeof(attr));
+ attr.map_type = BPF_MAP_TYPE_HASH;
+ attr.key_size = key_size;
+ attr.value_size = value_size;
+ attr.max_entries = max_entries;
+
+ return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+}
+
+/**
+ * Update RSS entry in BPF map
+ *
+ * @param[in] fd
+ * RSS map fd
+ *
+ * @param[in] key
+ * Pointer to RSS key whose entry is updated
+ *
+ * @param[in] value
+ * Pointer to RSS new updated value
+ *
+ * @return
+ * -1 if RSS entry failed to be updated, 0 otherwise
+ */
+int tap_flow_bpf_update_rss_elem(int fd, void *key, void *value)
+{
+ union bpf_attr attr = {};
+
+ bzero(&attr, sizeof(attr));
+
+ attr.map_type = BPF_MAP_TYPE_HASH;
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
+ attr.flags = BPF_ANY;
+
+ return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
+}
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_bpf_insns.h b/src/spdk/dpdk/drivers/net/tap/tap_bpf_insns.h
new file mode 100644
index 00000000..79e3e66b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_bpf_insns.h
@@ -0,0 +1,1696 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <tap_bpf.h>
+
+/* bpf_insn array matching cls_q section. See tap_bpf_program.c file */
+struct bpf_insn cls_q_insns[] = {
+ {0x61, 2, 1, 52, 0x00000000},
+ {0x18, 3, 0, 0, 0xdeadbeef},
+ {0x00, 0, 0, 0, 0x00000000},
+ {0x63, 10, 3, -4, 0x00000000},
+ {0xb7, 0, 0, 0, 0x00000000},
+ {0x61, 3, 10, -4, 0x00000000},
+ {0x07, 3, 0, 0, 0x7cafe800},
+ {0x67, 3, 0, 0, 0x00000020},
+ {0x77, 3, 0, 0, 0x00000020},
+ {0x5d, 2, 3, 4, 0x00000000},
+ {0xb7, 2, 0, 0, 0x00000000},
+ {0x63, 1, 2, 52, 0x00000000},
+ {0x18, 0, 0, 0, 0xffffffff},
+ {0x00, 0, 0, 0, 0x00000000},
+ {0x95, 0, 0, 0, 0x00000000},
+};
+
+/* bpf_insn array matching l3_l4 section. see tap_bpf_program.c file */
+struct bpf_insn l3_l4_hash_insns[] = {
+ {0xbf, 7, 1, 0, 0x00000000},
+ {0x61, 8, 7, 16, 0x00000000},
+ {0x61, 6, 7, 76, 0x00000000},
+ {0x61, 9, 7, 80, 0x00000000},
+ {0x18, 1, 0, 0, 0xdeadbeef},
+ {0x00, 0, 0, 0, 0x00000000},
+ {0x63, 10, 1, -4, 0x00000000},
+ {0xbf, 2, 10, 0, 0x00000000},
+ {0x07, 2, 0, 0, 0xfffffffc},
+ {0x18, 1, 1, 0, 0x0000cafe},
+ {0x00, 0, 0, 0, 0x00000000},
+ {0x85, 0, 0, 0, 0x00000001},
+ {0x55, 0, 0, 21, 0x00000000},
+ {0xb7, 1, 0, 0, 0x00000a64},
+ {0x6b, 10, 1, -16, 0x00000000},
+ {0x18, 1, 0, 0, 0x69666e6f},
+ {0x00, 0, 0, 0, 0x65727567},
+ {0x7b, 10, 1, -24, 0x00000000},
+ {0x18, 1, 0, 0, 0x6e207369},
+ {0x00, 0, 0, 0, 0x6320746f},
+ {0x7b, 10, 1, -32, 0x00000000},
+ {0x18, 1, 0, 0, 0x20737372},
+ {0x00, 0, 0, 0, 0x2079656b},
+ {0x7b, 10, 1, -40, 0x00000000},
+ {0x18, 1, 0, 0, 0x68736168},
+ {0x00, 0, 0, 0, 0x203a2928},
+ {0x7b, 10, 1, -48, 0x00000000},
+ {0xb7, 7, 0, 0, 0x00000000},
+ {0x73, 10, 7, -14, 0x00000000},
+ {0xbf, 1, 10, 0, 0x00000000},
+ {0x07, 1, 0, 0, 0xffffffd0},
+ {0xb7, 2, 0, 0, 0x00000023},
+ {0x85, 0, 0, 0, 0x00000006},
+ {0x05, 0, 0, 1632, 0x00000000},
+ {0xb7, 1, 0, 0, 0x0000000e},
+ {0x61, 2, 7, 20, 0x00000000},
+ {0x15, 2, 0, 10, 0x00000000},
+ {0x61, 2, 7, 28, 0x00000000},
+ {0x55, 2, 0, 8, 0x0000a888},
+ {0xbf, 2, 7, 0, 0x00000000},
+ {0xb7, 7, 0, 0, 0x00000000},
+ {0xbf, 1, 6, 0, 0x00000000},
+ {0x07, 1, 0, 0, 0x00000012},
+ {0x2d, 1, 9, 1622, 0x00000000},
+ {0xb7, 1, 0, 0, 0x00000012},
+ {0x69, 8, 6, 16, 0x00000000},
+ {0xbf, 7, 2, 0, 0x00000000},
+ {0x7b, 10, 7, -56, 0x00000000},
+ {0x57, 8, 0, 0, 0x0000ffff},
+ {0x15, 8, 0, 409, 0x0000dd86},
+ {0xb7, 7, 0, 0, 0x00000003},
+ {0x55, 8, 0, 1614, 0x00000008},
+ {0x0f, 6, 1, 0, 0x00000000},
+ {0xb7, 7, 0, 0, 0x00000000},
+ {0xbf, 1, 6, 0, 0x00000000},
+ {0x07, 1, 0, 0, 0x00000018},
+ {0x2d, 1, 9, 1609, 0x00000000},
+ {0x71, 3, 6, 12, 0x00000000},
+ {0xbf, 1, 3, 0, 0x00000000},
+ {0x67, 1, 0, 0, 0x00000038},
+ {0xc7, 1, 0, 0, 0x00000020},
+ {0x77, 1, 0, 0, 0x0000001f},
+ {0x57, 1, 0, 0, 0x2cc681d1},
+ {0x67, 3, 0, 0, 0x00000018},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x40000000},
+ {0xb7, 2, 0, 0, 0x00000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x598d03a2},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x20000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb31a0745},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x10000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x66340e8a},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x08000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xcc681d15},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x04000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x98d03a2b},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x02000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x31a07456},
+ {0x57, 3, 0, 0, 0x01000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6340e8ad},
+ {0x71, 3, 6, 13, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000010},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00800000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc681d15b},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00400000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8d03a2b7},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00200000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1a07456f},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00100000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x340e8ade},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00080000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x681d15bd},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00040000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd03a2b7b},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00020000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa07456f6},
+ {0x57, 3, 0, 0, 0x00010000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x40e8aded},
+ {0x71, 3, 6, 14, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000008},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00008000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x81d15bdb},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00004000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x03a2b7b7},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00002000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x07456f6f},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00001000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0e8adedf},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000800},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1d15bdbf},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000400},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3a2b7b7e},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000200},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7456f6fd},
+ {0x57, 3, 0, 0, 0x00000100},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe8adedfa},
+ {0x71, 3, 6, 15, 0x00000000},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000080},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd15bdbf4},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000040},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa2b7b7e9},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000020},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x456f6fd3},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000010},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8adedfa7},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000008},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x15bdbf4f},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000004},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2b7b7e9e},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000002},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x56f6fd3d},
+ {0x57, 3, 0, 0, 0x00000001},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xadedfa7b},
+ {0x71, 4, 6, 16, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000038},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0xb7, 3, 0, 0, 0xffffffff},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x5bdbf4f7},
+ {0x67, 4, 0, 0, 0x00000018},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb7b7e9ef},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6f6fd3df},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdedfa7bf},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbdbf4f7f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7b7e9eff},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf6fd3dff},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xedfa7bfe},
+ {0x71, 4, 6, 17, 0x00000000},
+ {0x67, 4, 0, 0, 0x00000010},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdbf4f7fc},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb7e9eff9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6fd3dff2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdfa7bfe5},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbf4f7fca},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7e9eff94},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfd3dff28},
+ {0x57, 4, 0, 0, 0x00010000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfa7bfe51},
+ {0x71, 4, 6, 18, 0x00000000},
+ {0x67, 4, 0, 0, 0x00000008},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf4f7fca2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe9eff945},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd3dff28a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa7bfe514},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4f7fca28},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9eff9450},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3dff28a0},
+ {0x57, 4, 0, 0, 0x00000100},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7bfe5141},
+ {0x71, 4, 6, 19, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf7fca283},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xeff94506},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdff28a0c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbfe51418},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7fca2831},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xff945063},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xff28a0c6},
+ {0x57, 4, 0, 0, 0x00000001},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfe51418c},
+ {0x71, 4, 6, 20, 0x00000000},
+ {0x67, 4, 0, 0, 0x00000008},
+ {0x71, 5, 6, 21, 0x00000000},
+ {0x4f, 4, 5, 0, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000030},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfca28319},
+ {0x67, 4, 0, 0, 0x00000010},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x40000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf9450633},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x20000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf28a0c67},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x10000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe51418ce},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x08000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xca28319d},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x04000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9450633b},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x02000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x28a0c676},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x01000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x51418ced},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00800000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa28319db},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00400000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x450633b6},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00200000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8a0c676c},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00100000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1418ced8},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00080000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x28319db1},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00040000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x50633b63},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00020000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa0c676c6},
+ {0x57, 4, 0, 0, 0x00010000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x418ced8d},
+ {0x71, 3, 6, 22, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000008},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00008000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8319db1a},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00004000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0633b634},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00002000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0c676c68},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00001000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x18ced8d1},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000800},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x319db1a3},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000400},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x633b6347},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000200},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc676c68f},
+ {0x57, 3, 0, 0, 0x00000100},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8ced8d1f},
+ {0x71, 3, 6, 23, 0x00000000},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000080},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x19db1a3e},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000040},
+ {0x79, 5, 10, -56, 0x00000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x33b6347d},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000020},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x676c68fa},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000010},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xced8d1f4},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000008},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9db1a3e9},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000004},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3b6347d2},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000002},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x76c68fa5},
+ {0x57, 3, 0, 0, 0x00000001},
+ {0x1d, 3, 2, 1177, 0x00000000},
+ {0xa7, 1, 0, 0, 0xed8d1f4a},
+ {0x05, 0, 0, 1175, 0x00000000},
+ {0x0f, 6, 1, 0, 0x00000000},
+ {0xb7, 7, 0, 0, 0x00000000},
+ {0xbf, 1, 6, 0, 0x00000000},
+ {0x07, 1, 0, 0, 0x0000002c},
+ {0x2d, 1, 9, 1202, 0x00000000},
+ {0x61, 4, 6, 8, 0x00000000},
+ {0xbf, 1, 4, 0, 0x00000000},
+ {0x67, 1, 0, 0, 0x00000038},
+ {0xc7, 1, 0, 0, 0x00000020},
+ {0x77, 1, 0, 0, 0x0000001f},
+ {0x57, 1, 0, 0, 0x2cc681d1},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000040},
+ {0xb7, 2, 0, 0, 0x00000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x598d03a2},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000020},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb31a0745},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000010},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x66340e8a},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000008},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xcc681d15},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000004},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x98d03a2b},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000002},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x31a07456},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000001},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6340e8ad},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00008000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc681d15b},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00004000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8d03a2b7},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00002000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1a07456f},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00001000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x340e8ade},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000800},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x681d15bd},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000400},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd03a2b7b},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000200},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa07456f6},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000100},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x40e8aded},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00800000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x81d15bdb},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00400000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x03a2b7b7},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00200000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x07456f6f},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00100000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0e8adedf},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00080000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1d15bdbf},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00040000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3a2b7b7e},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00020000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7456f6fd},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00010000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe8adedfa},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0xb7, 3, 0, 0, 0xffffffff},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd15bdbf4},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa2b7b7e9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x456f6fd3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8adedfa7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x15bdbf4f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2b7b7e9e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x56f6fd3d},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xadedfa7b},
+ {0x61, 4, 6, 12, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x5bdbf4f7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb7b7e9ef},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6f6fd3df},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdedfa7bf},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbdbf4f7f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7b7e9eff},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf6fd3dff},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xedfa7bfe},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdbf4f7fc},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb7e9eff9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6fd3dff2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdfa7bfe5},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbf4f7fca},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7e9eff94},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfd3dff28},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfa7bfe51},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf4f7fca2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe9eff945},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd3dff28a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa7bfe514},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4f7fca28},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9eff9450},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3dff28a0},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7bfe5141},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf7fca283},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xeff94506},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdff28a0c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbfe51418},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7fca2831},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xff945063},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xff28a0c6},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfe51418c},
+ {0x61, 4, 6, 16, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfca28319},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf9450633},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf28a0c67},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe51418ce},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xca28319d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9450633b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x28a0c676},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x51418ced},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa28319db},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x450633b6},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8a0c676c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1418ced8},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x28319db1},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x50633b63},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa0c676c6},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x418ced8d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8319db1a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0633b634},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0c676c68},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x18ced8d1},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x319db1a3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x633b6347},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc676c68f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8ced8d1f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x19db1a3e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x33b6347d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x676c68fa},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xced8d1f4},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9db1a3e9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3b6347d2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x76c68fa5},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xed8d1f4a},
+ {0x61, 4, 6, 20, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdb1a3e94},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb6347d28},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6c68fa51},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd8d1f4a3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb1a3e946},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6347d28d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc68fa51a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8d1f4a35},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1a3e946b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x347d28d7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x68fa51ae},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd1f4a35c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa3e946b9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x47d28d73},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8fa51ae7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1f4a35cf},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3e946b9e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7d28d73c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfa51ae78},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf4a35cf1},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe946b9e3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd28d73c7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa51ae78e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4a35cf1c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x946b9e38},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x28d73c71},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x51ae78e3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa35cf1c6},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x46b9e38d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8d73c71b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1ae78e36},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x35cf1c6c},
+ {0x61, 4, 6, 24, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6b9e38d9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd73c71b2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xae78e364},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x5cf1c6c9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb9e38d92},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x73c71b25},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe78e364b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xcf1c6c96},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9e38d92c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3c71b259},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x78e364b2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf1c6c964},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe38d92c9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc71b2593},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8e364b27},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1c6c964e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x38d92c9c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x71b25938},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe364b270},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc6c964e0},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8d92c9c0},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1b259380},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x364b2700},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6c964e01},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd92c9c03},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb2593807},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x64b2700f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc964e01e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x92c9c03d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2593807a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4b2700f4},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x964e01e8},
+ {0x61, 4, 6, 28, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2c9c03d1},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x593807a3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb2700f46},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x64e01e8d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc9c03d1a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x93807a35},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2700f46b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4e01e8d6},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9c03d1ad},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3807a35b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x700f46b6},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe01e8d6c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc03d1ad9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x807a35b3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x00f46b66},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x01e8d6cc},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x03d1ad99},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x07a35b32},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0f46b665},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1e8d6cca},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3d1ad994},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7a35b328},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf46b6651},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe8d6cca2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd1ad9944},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa35b3289},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x46b66512},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8d6cca25},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1ad9944a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x35b32894},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6b665129},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd6cca253},
+ {0x61, 4, 6, 32, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xad9944a7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x5b32894f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb665129f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6cca253e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd9944a7d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb32894fb},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x665129f6},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xcca253ec},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9944a7d9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x32894fb2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x65129f65},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xca253eca},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x944a7d95},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2894fb2a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x5129f655},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa253ecab},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x44a7d956},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x894fb2ac},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x129f6558},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x253ecab1},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4a7d9563},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x94fb2ac7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x29f6558f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x53ecab1e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa7d9563d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4fb2ac7a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9f6558f5},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3ecab1ea},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7d9563d5},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfb2ac7ab},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf6558f56},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xecab1eac},
+ {0x61, 4, 6, 36, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd9563d59},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb2ac7ab2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6558f564},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xcab1eac8},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9563d590},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2ac7ab20},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x558f5641},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xab1eac83},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x563d5906},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xac7ab20c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x58f56418},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb1eac831},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x63d59063},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc7ab20c7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8f56418f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1eac831e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3d59063c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7ab20c78},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf56418f0},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xeac831e1},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd59063c2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xab20c784},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x56418f09},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xac831e12},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x59063c25},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb20c784b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6418f097},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc831e12f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9063c25f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x20c784be},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x418f097c},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x831e12f9},
+ {0x71, 4, 6, 40, 0x00000000},
+ {0x67, 4, 0, 0, 0x00000008},
+ {0x71, 5, 6, 41, 0x00000000},
+ {0x4f, 4, 5, 0, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000030},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x063c25f3},
+ {0x67, 4, 0, 0, 0x00000010},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x40000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0c784be7},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x20000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x18f097cf},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x10000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x31e12f9f},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x08000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x63c25f3f},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x04000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc784be7f},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x02000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8f097cff},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x01000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1e12f9fe},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00800000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3c25f3fc},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00400000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x784be7f8},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00200000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf097cff0},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00100000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe12f9fe0},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00080000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc25f3fc1},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00040000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x84be7f83},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00020000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x097cff07},
+ {0x57, 4, 0, 0, 0x00010000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x12f9fe0f},
+ {0x71, 3, 6, 42, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000008},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00008000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x25f3fc1f},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00004000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4be7f83f},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00002000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x97cff07f},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00001000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2f9fe0fe},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000800},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x5f3fc1fd},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000400},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbe7f83fb},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000200},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7cff07f7},
+ {0x57, 3, 0, 0, 0x00000100},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf9fe0fee},
+ {0x71, 3, 6, 43, 0x00000000},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000080},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf3fc1fdc},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000040},
+ {0x79, 5, 10, -56, 0x00000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe7f83fb8},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000020},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xcff07f70},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000010},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9fe0fee1},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000008},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3fc1fdc2},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000004},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7f83fb85},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000002},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xff07f70a},
+ {0x57, 3, 0, 0, 0x00000001},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfe0fee15},
+ {0x71, 2, 0, 201, 0x00000000},
+ {0x67, 2, 0, 0, 0x00000008},
+ {0x71, 3, 0, 200, 0x00000000},
+ {0x4f, 2, 3, 0, 0x00000000},
+ {0x71, 3, 0, 203, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000008},
+ {0x71, 4, 0, 202, 0x00000000},
+ {0x4f, 3, 4, 0, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000010},
+ {0x4f, 3, 2, 0, 0x00000000},
+ {0x67, 1, 0, 0, 0x00000020},
+ {0x77, 1, 0, 0, 0x00000020},
+ {0xbf, 2, 1, 0, 0x00000000},
+ {0x3f, 2, 3, 0, 0x00000000},
+ {0x2f, 2, 3, 0, 0x00000000},
+ {0x1f, 1, 2, 0, 0x00000000},
+ {0x57, 1, 0, 0, 0x0000000f},
+ {0x67, 1, 0, 0, 0x00000002},
+ {0x0f, 0, 1, 0, 0x00000000},
+ {0x71, 1, 0, 137, 0x00000000},
+ {0x67, 1, 0, 0, 0x00000008},
+ {0x71, 2, 0, 136, 0x00000000},
+ {0x4f, 1, 2, 0, 0x00000000},
+ {0x71, 2, 0, 138, 0x00000000},
+ {0x71, 3, 0, 139, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000008},
+ {0x4f, 3, 2, 0, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000010},
+ {0x4f, 3, 1, 0, 0x00000000},
+ {0x07, 3, 0, 0, 0x7cafe800},
+ {0x63, 5, 3, 52, 0x00000000},
+ {0xb7, 7, 0, 0, 0x00000001},
+ {0xbf, 0, 7, 0, 0x00000000},
+ {0x95, 0, 0, 0, 0x00000000},
+};
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_bpf_program.c b/src/spdk/dpdk/drivers/net/tap/tap_bpf_program.c
new file mode 100644
index 00000000..1cb73822
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_bpf_program.c
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <asm/types.h>
+#include <linux/in.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_tunnel.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+
+#include "tap_rss.h"
+
+/** Create IPv4 address */
+#define IPv4(a, b, c, d) ((__u32)(((a) & 0xff) << 24) | \
+ (((b) & 0xff) << 16) | \
+ (((c) & 0xff) << 8) | \
+ ((d) & 0xff))
+
+#define PORT(a, b) ((__u16)(((a) & 0xff) << 8) | \
+ ((b) & 0xff))
+
+/*
+ * The queue number is offset by a unique QUEUE_OFFSET, to distinguish
+ * packets that have gone through this rule (skb->cb[1] != 0) from others.
+ */
+#define QUEUE_OFFSET 0x7cafe800
+#define PIN_GLOBAL_NS 2
+
+#define KEY_IDX 0
+#define BPF_MAP_ID_KEY 1
+
+struct vlan_hdr {
+ __be16 proto;
+ __be16 tci;
+};
+
+struct bpf_elf_map __attribute__((section("maps"), used))
+map_keys = {
+ .type = BPF_MAP_TYPE_HASH,
+ .id = BPF_MAP_ID_KEY,
+ .size_key = sizeof(__u32),
+ .size_value = sizeof(struct rss_key),
+ .max_elem = 256,
+ .pinning = PIN_GLOBAL_NS,
+};
+
+__section("cls_q") int
+match_q(struct __sk_buff *skb)
+{
+ __u32 queue = skb->cb[1];
+ volatile __u32 q = 0xdeadbeef;
+ __u32 match_queue = QUEUE_OFFSET + q;
+
+ /* printt("match_q$i() queue = %d\n", queue); */
+
+ if (queue != match_queue)
+ return TC_ACT_OK;
+
+ /* queue match */
+ skb->cb[1] = 0;
+ return TC_ACT_UNSPEC;
+}
+
+
+struct ipv4_l3_l4_tuple {
+ __u32 src_addr;
+ __u32 dst_addr;
+ __u16 dport;
+ __u16 sport;
+} __attribute__((packed));
+
+struct ipv6_l3_l4_tuple {
+ __u8 src_addr[16];
+ __u8 dst_addr[16];
+ __u16 dport;
+ __u16 sport;
+} __attribute__((packed));
+
+static const __u8 def_rss_key[TAP_RSS_HASH_KEY_SIZE] = {
+ 0xd1, 0x81, 0xc6, 0x2c,
+ 0xf7, 0xf4, 0xdb, 0x5b,
+ 0x19, 0x83, 0xa2, 0xfc,
+ 0x94, 0x3e, 0x1a, 0xdb,
+ 0xd9, 0x38, 0x9e, 0x6b,
+ 0xd1, 0x03, 0x9c, 0x2c,
+ 0xa7, 0x44, 0x99, 0xad,
+ 0x59, 0x3d, 0x56, 0xd9,
+ 0xf3, 0x25, 0x3c, 0x06,
+ 0x2a, 0xdc, 0x1f, 0xfc,
+};
+
+static __u32 __attribute__((always_inline))
+rte_softrss_be(const __u32 *input_tuple, const uint8_t *rss_key,
+ __u8 input_len)
+{
+ __u32 i, j, hash = 0;
+#pragma unroll
+ for (j = 0; j < input_len; j++) {
+#pragma unroll
+ for (i = 0; i < 32; i++) {
+ if (input_tuple[j] & (1 << (31 - i))) {
+ hash ^= ((const __u32 *)def_rss_key)[j] << i |
+ (__u32)((uint64_t)
+ (((const __u32 *)def_rss_key)[j + 1])
+ >> (32 - i));
+ }
+ }
+ }
+ return hash;
+}
+
+static int __attribute__((always_inline))
+rss_l3_l4(struct __sk_buff *skb)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ __u16 proto = (__u16)skb->protocol;
+ __u32 key_idx = 0xdeadbeef;
+ __u32 hash;
+ struct rss_key *rsskey;
+ __u64 off = ETH_HLEN;
+ int j;
+ __u8 *key = 0;
+ __u32 len;
+ __u32 queue = 0;
+
+ rsskey = map_lookup_elem(&map_keys, &key_idx);
+ if (!rsskey) {
+ printt("hash(): rss key is not configured\n");
+ return TC_ACT_OK;
+ }
+ key = (__u8 *)rsskey->key;
+
+ /* Get correct proto for 802.1ad */
+ if (skb->vlan_present && skb->vlan_proto == htons(ETH_P_8021AD)) {
+ if (data + ETH_ALEN * 2 + sizeof(struct vlan_hdr) +
+ sizeof(proto) > data_end)
+ return TC_ACT_OK;
+ proto = *(__u16 *)(data + ETH_ALEN * 2 +
+ sizeof(struct vlan_hdr));
+ off += sizeof(struct vlan_hdr);
+ }
+
+ if (proto == htons(ETH_P_IP)) {
+ if (data + off + sizeof(struct iphdr) + sizeof(__u32)
+ > data_end)
+ return TC_ACT_OK;
+
+ __u8 *src_dst_addr = data + off + offsetof(struct iphdr, saddr);
+ __u8 *src_dst_port = data + off + sizeof(struct iphdr);
+ struct ipv4_l3_l4_tuple v4_tuple = {
+ .src_addr = IPv4(*(src_dst_addr + 0),
+ *(src_dst_addr + 1),
+ *(src_dst_addr + 2),
+ *(src_dst_addr + 3)),
+ .dst_addr = IPv4(*(src_dst_addr + 4),
+ *(src_dst_addr + 5),
+ *(src_dst_addr + 6),
+ *(src_dst_addr + 7)),
+ .sport = PORT(*(src_dst_port + 0),
+ *(src_dst_port + 1)),
+ .dport = PORT(*(src_dst_port + 2),
+ *(src_dst_port + 3)),
+ };
+ __u8 input_len = sizeof(v4_tuple) / sizeof(__u32);
+ if (rsskey->hash_fields & (1 << HASH_FIELD_IPV4_L3))
+ input_len--;
+ hash = rte_softrss_be((__u32 *)&v4_tuple, key, 3);
+ } else if (proto == htons(ETH_P_IPV6)) {
+ if (data + off + sizeof(struct ipv6hdr) +
+ sizeof(__u32) > data_end)
+ return TC_ACT_OK;
+ __u8 *src_dst_addr = data + off +
+ offsetof(struct ipv6hdr, saddr);
+ __u8 *src_dst_port = data + off +
+ sizeof(struct ipv6hdr);
+ struct ipv6_l3_l4_tuple v6_tuple;
+ for (j = 0; j < 4; j++)
+ *((uint32_t *)&v6_tuple.src_addr + j) =
+ __builtin_bswap32(*((uint32_t *)
+ src_dst_addr + j));
+ for (j = 0; j < 4; j++)
+ *((uint32_t *)&v6_tuple.dst_addr + j) =
+ __builtin_bswap32(*((uint32_t *)
+ src_dst_addr + 4 + j));
+ v6_tuple.sport = PORT(*(src_dst_port + 0),
+ *(src_dst_port + 1));
+ v6_tuple.dport = PORT(*(src_dst_port + 2),
+ *(src_dst_port + 3));
+
+ __u8 input_len = sizeof(v6_tuple) / sizeof(__u32);
+ if (rsskey->hash_fields & (1 << HASH_FIELD_IPV6_L3))
+ input_len--;
+ hash = rte_softrss_be((__u32 *)&v6_tuple, key, 9);
+ } else {
+ return TC_ACT_PIPE;
+ }
+
+ queue = rsskey->queues[(hash % rsskey->nb_queues) &
+ (TAP_MAX_QUEUES - 1)];
+ skb->cb[1] = QUEUE_OFFSET + queue;
+ /* printt(">>>>> rss_l3_l4 hash=0x%x queue=%u\n", hash, queue); */
+
+ return TC_ACT_RECLASSIFY;
+}
+
+#define RSS(L) \
+ __section(#L) int \
+ L ## _hash(struct __sk_buff *skb) \
+ { \
+ return rss_ ## L (skb); \
+ }
+
+RSS(l3_l4)
+
+BPF_LICENSE("Dual BSD/GPL");
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_flow.c b/src/spdk/dpdk/drivers/net/tap/tap_flow.c
new file mode 100644
index 00000000..0e01af62
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_flow.c
@@ -0,0 +1,2191 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/queue.h>
+#include <sys/resource.h>
+
+#include <rte_byteorder.h>
+#include <rte_jhash.h>
+#include <rte_malloc.h>
+#include <rte_eth_tap.h>
+#include <tap_flow.h>
+#include <tap_autoconf.h>
+#include <tap_tcmsgs.h>
+#include <tap_rss.h>
+
+#ifndef HAVE_TC_FLOWER
+/*
+ * For kernels < 4.2, this enum is not defined. Runtime checks will be made to
+ * avoid sending TC messages the kernel cannot understand.
+ */
+enum {
+ TCA_FLOWER_UNSPEC,
+ TCA_FLOWER_CLASSID,
+ TCA_FLOWER_INDEV,
+ TCA_FLOWER_ACT,
+ TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
+ TCA_FLOWER_KEY_IP_PROTO, /* u8 */
+ TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
+ TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_TCP_SRC, /* be16 */
+ TCA_FLOWER_KEY_TCP_DST, /* be16 */
+ TCA_FLOWER_KEY_UDP_SRC, /* be16 */
+ TCA_FLOWER_KEY_UDP_DST, /* be16 */
+};
+#endif
+#ifndef HAVE_TC_VLAN_ID
+enum {
+ /* TCA_FLOWER_FLAGS, */
+ TCA_FLOWER_KEY_VLAN_ID = TCA_FLOWER_KEY_UDP_DST + 2, /* be16 */
+ TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
+};
+#endif
+/*
+ * For kernels < 4.2 BPF related enums may not be defined.
+ * Runtime checks will be carried out to gracefully report on TC messages that
+ * are rejected by the kernel. Rejection reasons may be due to:
+ * 1. enum is not defined
+ * 2. enum is defined but kernel is not configured to support BPF system calls,
+ * BPF classifications or BPF actions.
+ */
+#ifndef HAVE_TC_BPF
+enum {
+ TCA_BPF_UNSPEC,
+ TCA_BPF_ACT,
+ TCA_BPF_POLICE,
+ TCA_BPF_CLASSID,
+ TCA_BPF_OPS_LEN,
+ TCA_BPF_OPS,
+};
+#endif
+#ifndef HAVE_TC_BPF_FD
+enum {
+ TCA_BPF_FD = TCA_BPF_OPS + 1,
+ TCA_BPF_NAME,
+};
+#endif
+#ifndef HAVE_TC_ACT_BPF
+#define tc_gen \
+ __u32 index; \
+ __u32 capab; \
+ int action; \
+ int refcnt; \
+ int bindcnt
+
+struct tc_act_bpf {
+ tc_gen;
+};
+
+enum {
+ TCA_ACT_BPF_UNSPEC,
+ TCA_ACT_BPF_TM,
+ TCA_ACT_BPF_PARMS,
+ TCA_ACT_BPF_OPS_LEN,
+ TCA_ACT_BPF_OPS,
+};
+
+#endif
+#ifndef HAVE_TC_ACT_BPF_FD
+enum {
+ TCA_ACT_BPF_FD = TCA_ACT_BPF_OPS + 1,
+ TCA_ACT_BPF_NAME,
+};
+#endif
+
+/* RSS key management */
+enum bpf_rss_key_e {
+ KEY_CMD_GET = 1,
+ KEY_CMD_RELEASE,
+ KEY_CMD_INIT,
+ KEY_CMD_DEINIT,
+};
+
+enum key_status_e {
+ KEY_STAT_UNSPEC,
+ KEY_STAT_USED,
+ KEY_STAT_AVAILABLE,
+};
+
+#define ISOLATE_HANDLE 1
+#define REMOTE_PROMISCUOUS_HANDLE 2
+
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
+ struct rte_flow *remote_flow; /* associated remote flow */
+ int bpf_fd[SEC_MAX]; /* list of bfs fds per ELF section */
+ uint32_t key_idx; /* RSS rule key index into BPF map */
+ struct nlmsg msg;
+};
+
+struct convert_data {
+ uint16_t eth_type;
+ uint16_t ip_proto;
+ uint8_t vlan;
+ struct rte_flow *flow;
+};
+
+struct remote_rule {
+ struct rte_flow_attr attr;
+ struct rte_flow_item items[2];
+ struct rte_flow_action actions[2];
+ int mirred;
+};
+
+struct action_data {
+ char id[16];
+
+ union {
+ struct tc_gact gact;
+ struct tc_mirred mirred;
+ struct skbedit {
+ struct tc_skbedit skbedit;
+ uint16_t queue;
+ } skbedit;
+ struct bpf {
+ struct tc_act_bpf bpf;
+ int bpf_fd;
+ const char *annotation;
+ } bpf;
+ };
+};
+
+static int tap_flow_create_eth(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_vlan(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_ipv4(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_ipv6(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_udp(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_tcp(const struct rte_flow_item *item, void *data);
+static int
+tap_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+static struct rte_flow *
+tap_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+static void
+tap_flow_free(struct pmd_internals *pmd,
+ struct rte_flow *flow);
+
+static int
+tap_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+
+static int
+tap_flow_isolate(struct rte_eth_dev *dev,
+ int set,
+ struct rte_flow_error *error);
+
+static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx);
+static int rss_enable(struct pmd_internals *pmd,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
+ const struct rte_flow_action_rss *rss,
+ struct rte_flow_error *error);
+
+static const struct rte_flow_ops tap_flow_ops = {
+ .validate = tap_flow_validate,
+ .create = tap_flow_create,
+ .destroy = tap_flow_destroy,
+ .flush = tap_flow_flush,
+ .isolate = tap_flow_isolate,
+};
+
+/* Static initializer for items. */
+#define ITEMS(...) \
+ (const enum rte_flow_item_type []){ \
+ __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
+ }
+
+/* Structure to generate a simple graph of layers supported by the NIC. */
+struct tap_flow_items {
+ /* Bit-mask corresponding to what is supported for this item. */
+ const void *mask;
+ const unsigned int mask_sz; /* Bit-mask size in bytes. */
+ /*
+ * Bit-mask corresponding to the default mask, if none is provided
+ * along with the item.
+ */
+ const void *default_mask;
+ /**
+ * Conversion function from rte_flow to netlink attributes.
+ *
+ * @param item
+ * rte_flow item to convert.
+ * @param data
+ * Internal structure to store the conversion.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+ int (*convert)(const struct rte_flow_item *item, void *data);
+ /** List of possible following items. */
+ const enum rte_flow_item_type *const items;
+};
+
+/* Graph of supported items and associated actions. */
+static const struct tap_flow_items tap_flow_items[] = {
+ [RTE_FLOW_ITEM_TYPE_END] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+ },
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .items = ITEMS(
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6),
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = -1,
+ },
+ .mask_sz = sizeof(struct rte_flow_item_eth),
+ .default_mask = &rte_flow_item_eth_mask,
+ .convert = tap_flow_create_eth,
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6),
+ .mask = &(const struct rte_flow_item_vlan){
+ /* DEI matching is not supported */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ .tci = 0xffef,
+#else
+ .tci = 0xefff,
+#endif
+ .inner_type = -1,
+ },
+ .mask_sz = sizeof(struct rte_flow_item_vlan),
+ .default_mask = &rte_flow_item_vlan_mask,
+ .convert = tap_flow_create_vlan,
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .mask = &(const struct rte_flow_item_ipv4){
+ .hdr = {
+ .src_addr = -1,
+ .dst_addr = -1,
+ .next_proto_id = -1,
+ },
+ },
+ .mask_sz = sizeof(struct rte_flow_item_ipv4),
+ .default_mask = &rte_flow_item_ipv4_mask,
+ .convert = tap_flow_create_ipv4,
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .mask = &(const struct rte_flow_item_ipv6){
+ .hdr = {
+ .src_addr = {
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+ .dst_addr = {
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+ .proto = -1,
+ },
+ },
+ .mask_sz = sizeof(struct rte_flow_item_ipv6),
+ .default_mask = &rte_flow_item_ipv6_mask,
+ .convert = tap_flow_create_ipv6,
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .mask = &(const struct rte_flow_item_udp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .mask_sz = sizeof(struct rte_flow_item_udp),
+ .default_mask = &rte_flow_item_udp_mask,
+ .convert = tap_flow_create_udp,
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .mask = &(const struct rte_flow_item_tcp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .mask_sz = sizeof(struct rte_flow_item_tcp),
+ .default_mask = &rte_flow_item_tcp_mask,
+ .convert = tap_flow_create_tcp,
+ },
+};
+
+/*
+ * TC rules, by growing priority
+ *
+ * Remote netdevice Tap netdevice
+ * +-------------+-------------+ +-------------+-------------+
+ * | Ingress | Egress | | Ingress | Egress |
+ * |-------------|-------------| |-------------|-------------|
+ * | | \ / | | | REMOTE TX | prio 1
+ * | | \ / | | | \ / | prio 2
+ * | EXPLICIT | \ / | | EXPLICIT | \ / | .
+ * | | \ / | | | \ / | .
+ * | RULES | X | | RULES | X | .
+ * | . | / \ | | . | / \ | .
+ * | . | / \ | | . | / \ | .
+ * | . | / \ | | . | / \ | .
+ * | . | / \ | | . | / \ | .
+ *
+ * .... .... .... ....
+ *
+ * | . | \ / | | . | \ / | .
+ * | . | \ / | | . | \ / | .
+ * | | \ / | | | \ / |
+ * | LOCAL_MAC | \ / | | \ / | \ / | last prio - 5
+ * | PROMISC | X | | \ / | X | last prio - 4
+ * | ALLMULTI | / \ | | X | / \ | last prio - 3
+ * | BROADCAST | / \ | | / \ | / \ | last prio - 2
+ * | BROADCASTV6 | / \ | | / \ | / \ | last prio - 1
+ * | xx | / \ | | ISOLATE | / \ | last prio
+ * +-------------+-------------+ +-------------+-------------+
+ *
+ * The implicit flow rules are stored in a list in with mandatorily the last two
+ * being the ISOLATE and REMOTE_TX rules. e.g.:
+ *
+ * LOCAL_MAC -> BROADCAST -> BROADCASTV6 -> REMOTE_TX -> ISOLATE -> NULL
+ *
+ * That enables tap_flow_isolate() to remove implicit rules by popping the list
+ * head and remove it as long as it applies on the remote netdevice. The
+ * implicit rule for TX redirection is not removed, as isolate concerns only
+ * incoming traffic.
+ */
+
+static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
+ [TAP_REMOTE_LOCAL_MAC] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_LOCAL_MAC,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_REDIR,
+ },
+ [TAP_REMOTE_BROADCAST] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_BROADCAST,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ .spec = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_REMOTE_BROADCASTV6] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_BROADCASTV6,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
+ },
+ .spec = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
+ },
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_REMOTE_PROMISC] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_PROMISC,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_REMOTE_ALLMULTI] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_ALLMULTI,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ },
+ .spec = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ },
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_REMOTE_TX] = {
+ .attr = {
+ .group = 0,
+ .priority = TAP_REMOTE_TX,
+ .egress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_ISOLATE] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_ISOLATE,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+};
+
+/**
+ * Make as much checks as possible on an Ethernet item, and if a flow is
+ * provided, fill it appropriately with Ethernet info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_eth(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_ETH].default_mask;
+ /* TC does not support eth_type masking. Only accept if exact match. */
+ if (mask->type && mask->type != 0xffff)
+ return -1;
+ if (!spec)
+ return 0;
+ /* store eth_type for consistency if ipv4/6 pattern item comes next */
+ if (spec->type & mask->type)
+ info->eth_type = spec->type;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ if (!is_zero_ether_addr(&mask->dst)) {
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
+ &spec->dst.addr_bytes);
+ tap_nlattr_add(&msg->nh,
+ TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
+ &mask->dst.addr_bytes);
+ }
+ if (!is_zero_ether_addr(&mask->src)) {
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
+ &spec->src.addr_bytes);
+ tap_nlattr_add(&msg->nh,
+ TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
+ &mask->src.addr_bytes);
+ }
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on a VLAN item, and if a flow is provided,
+ * fill it appropriately with VLAN info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask;
+ /* Outer TPID cannot be matched. */
+ if (info->eth_type)
+ return -1;
+ /* Double-tagging not supported. */
+ if (info->vlan)
+ return -1;
+ info->vlan = 1;
+ if (mask->inner_type) {
+ /* TC does not support partial eth_type masking */
+ if (mask->inner_type != RTE_BE16(0xffff))
+ return -1;
+ info->eth_type = spec->inner_type;
+ }
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_8021Q));
+#define VLAN_PRIO(tci) ((tci) >> 13)
+#define VLAN_ID(tci) ((tci) & 0xfff)
+ if (!spec)
+ return 0;
+ if (spec->tci) {
+ uint16_t tci = ntohs(spec->tci) & mask->tci;
+ uint16_t prio = VLAN_PRIO(tci);
+ uint8_t vid = VLAN_ID(tci);
+
+ if (prio)
+ tap_nlattr_add8(&msg->nh,
+ TCA_FLOWER_KEY_VLAN_PRIO, prio);
+ if (vid)
+ tap_nlattr_add16(&msg->nh,
+ TCA_FLOWER_KEY_VLAN_ID, vid);
+ }
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on an IPv4 item, and if a flow is provided,
+ * fill it appropriately with IPv4 info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV4].default_mask;
+ /* check that previous eth type is compatible with ipv4 */
+ if (info->eth_type && info->eth_type != htons(ETH_P_IP))
+ return -1;
+ /* store ip_proto for consistency if udp/tcp pattern item comes next */
+ if (spec)
+ info->ip_proto = spec->hdr.next_proto_id;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ if (!info->eth_type)
+ info->eth_type = htons(ETH_P_IP);
+ if (!spec)
+ return 0;
+ if (mask->hdr.dst_addr) {
+ tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
+ spec->hdr.dst_addr);
+ tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
+ mask->hdr.dst_addr);
+ }
+ if (mask->hdr.src_addr) {
+ tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
+ spec->hdr.src_addr);
+ tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
+ mask->hdr.src_addr);
+ }
+ if (spec->hdr.next_proto_id)
+ tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO,
+ spec->hdr.next_proto_id);
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on an IPv6 item, and if a flow is provided,
+ * fill it appropriately with IPv6 info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ uint8_t empty_addr[16] = { 0 };
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV6].default_mask;
+ /* check that previous eth type is compatible with ipv6 */
+ if (info->eth_type && info->eth_type != htons(ETH_P_IPV6))
+ return -1;
+ /* store ip_proto for consistency if udp/tcp pattern item comes next */
+ if (spec)
+ info->ip_proto = spec->hdr.proto;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ if (!info->eth_type)
+ info->eth_type = htons(ETH_P_IPV6);
+ if (!spec)
+ return 0;
+ if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) {
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
+ sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
+ sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
+ }
+ if (memcmp(mask->hdr.src_addr, empty_addr, 16)) {
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
+ sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
+ sizeof(mask->hdr.src_addr), &mask->hdr.src_addr);
+ }
+ if (spec->hdr.proto)
+ tap_nlattr_add8(&msg->nh,
+ TCA_FLOWER_KEY_IP_PROTO, spec->hdr.proto);
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on a UDP item, and if a flow is provided,
+ * fill it appropriately with UDP info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_udp(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_UDP].default_mask;
+ /* check that previous ip_proto is compatible with udp */
+ if (info->ip_proto && info->ip_proto != IPPROTO_UDP)
+ return -1;
+ /* TC does not support UDP port masking. Only accept if exact match. */
+ if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
+ (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
+ return -1;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
+ if (!spec)
+ return 0;
+ if (mask->hdr.dst_port)
+ tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
+ spec->hdr.dst_port);
+ if (mask->hdr.src_port)
+ tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
+ spec->hdr.src_port);
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on a TCP item, and if a flow is provided,
+ * fill it appropriately with TCP info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_TCP].default_mask;
+ /* check that previous ip_proto is compatible with tcp */
+ if (info->ip_proto && info->ip_proto != IPPROTO_TCP)
+ return -1;
+ /* TC does not support TCP port masking. Only accept if exact match. */
+ if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
+ (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
+ return -1;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
+ if (!spec)
+ return 0;
+ if (mask->hdr.dst_port)
+ tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
+ spec->hdr.dst_port);
+ if (mask->hdr.src_port)
+ tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
+ spec->hdr.src_port);
+ return 0;
+}
+
+/**
+ * Check support for a given item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param size
+ * Bit-Mask size in bytes.
+ * @param[in] supported_mask
+ * Bit-mask covering supported fields to compare with spec, last and mask in
+ * \item.
+ * @param[in] default_mask
+ * Bit-mask default mask if none is provided in \item.
+ *
+ * @return
+ * 0 on success.
+ */
+static int
+tap_flow_item_validate(const struct rte_flow_item *item,
+ unsigned int size,
+ const uint8_t *supported_mask,
+ const uint8_t *default_mask)
+{
+ int ret = 0;
+
+ /* An empty layer is allowed, as long as all fields are NULL */
+ if (!item->spec && (item->mask || item->last))
+ return -1;
+ /* Is the item spec compatible with what the NIC supports? */
+ if (item->spec && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->spec;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | supported_mask[i]) != supported_mask[i])
+ return -1;
+ /* Is the default mask compatible with what the NIC supports? */
+ for (i = 0; i < size; i++)
+ if ((default_mask[i] | supported_mask[i]) !=
+ supported_mask[i])
+ return -1;
+ }
+ /* Is the item last compatible with what the NIC supports? */
+ if (item->last && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->last;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | supported_mask[i]) != supported_mask[i])
+ return -1;
+ }
+ /* Is the item mask compatible with what the NIC supports? */
+ if (item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->mask;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | supported_mask[i]) != supported_mask[i])
+ return -1;
+ }
+ /**
+ * Once masked, Are item spec and item last equal?
+ * TC does not support range so anything else is invalid.
+ */
+ if (item->spec && item->last) {
+ uint8_t spec[size];
+ uint8_t last[size];
+ const uint8_t *apply = default_mask;
+ unsigned int i;
+
+ if (item->mask)
+ apply = item->mask;
+ for (i = 0; i < size; ++i) {
+ spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
+ last[i] = ((const uint8_t *)item->last)[i] & apply[i];
+ }
+ ret = memcmp(spec, last, size);
+ }
+ return ret;
+}
+
+/**
+ * Configure the kernel with a TC action and its configured parameters
+ * Handled actions: "gact", "mirred", "skbedit", "bpf"
+ *
+ * @param[in] flow
+ * Pointer to rte flow containing the netlink message
+ *
+ * @param[in, out] act_index
+ * Pointer to action sequence number in the TC command
+ *
+ * @param[in] adata
+ * Pointer to struct holding the action parameters
+ *
+ * @return
+ * -1 on failure, 0 on success
+ */
+static int
+add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata)
+{
+ struct nlmsg *msg = &flow->msg;
+
+ if (tap_nlattr_nested_start(msg, (*act_index)++) < 0)
+ return -1;
+
+ tap_nlattr_add(&msg->nh, TCA_ACT_KIND,
+ strlen(adata->id) + 1, adata->id);
+ if (tap_nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
+ return -1;
+ if (strcmp("gact", adata->id) == 0) {
+ tap_nlattr_add(&msg->nh, TCA_GACT_PARMS, sizeof(adata->gact),
+ &adata->gact);
+ } else if (strcmp("mirred", adata->id) == 0) {
+ if (adata->mirred.eaction == TCA_EGRESS_MIRROR)
+ adata->mirred.action = TC_ACT_PIPE;
+ else /* REDIRECT */
+ adata->mirred.action = TC_ACT_STOLEN;
+ tap_nlattr_add(&msg->nh, TCA_MIRRED_PARMS,
+ sizeof(adata->mirred),
+ &adata->mirred);
+ } else if (strcmp("skbedit", adata->id) == 0) {
+ tap_nlattr_add(&msg->nh, TCA_SKBEDIT_PARMS,
+ sizeof(adata->skbedit.skbedit),
+ &adata->skbedit.skbedit);
+ tap_nlattr_add16(&msg->nh, TCA_SKBEDIT_QUEUE_MAPPING,
+ adata->skbedit.queue);
+ } else if (strcmp("bpf", adata->id) == 0) {
+ tap_nlattr_add32(&msg->nh, TCA_ACT_BPF_FD, adata->bpf.bpf_fd);
+ tap_nlattr_add(&msg->nh, TCA_ACT_BPF_NAME,
+ strlen(adata->bpf.annotation) + 1,
+ adata->bpf.annotation);
+ tap_nlattr_add(&msg->nh, TCA_ACT_BPF_PARMS,
+ sizeof(adata->bpf.bpf),
+ &adata->bpf.bpf);
+ } else {
+ return -1;
+ }
+ tap_nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
+ tap_nlattr_nested_finish(msg); /* nested act_index */
+ return 0;
+}
+
+/**
+ * Helper function to send a serie of TC actions to the kernel
+ *
+ * @param[in] flow
+ * Pointer to rte flow containing the netlink message
+ *
+ * @param[in] nb_actions
+ * Number of actions in an array of action structs
+ *
+ * @param[in] data
+ * Pointer to an array of action structs
+ *
+ * @param[in] classifier_actions
+ * The classifier on behave of which the actions are configured
+ *
+ * @return
+ * -1 on failure, 0 on success
+ */
+static int
+add_actions(struct rte_flow *flow, int nb_actions, struct action_data *data,
+ int classifier_action)
+{
+ struct nlmsg *msg = &flow->msg;
+ size_t act_index = 1;
+ int i;
+
+ if (tap_nlattr_nested_start(msg, classifier_action) < 0)
+ return -1;
+ for (i = 0; i < nb_actions; i++)
+ if (add_action(flow, &act_index, data + i) < 0)
+ return -1;
+ tap_nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
+ return 0;
+}
+
+/**
+ * Validate a flow supported by TC.
+ * If flow param is not NULL, then also fill the netlink message inside.
+ *
+ * @param pmd
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] flow
+ * Flow structure to update.
+ * @param[in] mirred
+ * If set to TCA_EGRESS_REDIR, provided actions will be replaced with a
+ * redirection to the tap netdevice, and the TC rule will be configured
+ * on the remote netdevice in pmd.
+ * If set to TCA_EGRESS_MIRROR, provided actions will be replaced with a
+ * mirroring to the tap netdevice, and the TC rule will be configured
+ * on the remote netdevice in pmd. Matching packets will thus be duplicated.
+ * If set to 0, the standard behavior is to be used: set correct actions for
+ * the TC rule, and apply it on the tap netdevice.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_process(struct pmd_internals *pmd,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow,
+ int mirred)
+{
+ const struct tap_flow_items *cur_item = tap_flow_items;
+ struct convert_data data = {
+ .eth_type = 0,
+ .ip_proto = 0,
+ .flow = flow,
+ };
+ int action = 0; /* Only one action authorized for now */
+
+ if (attr->transfer) {
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL, "transfer is not supported");
+ return -rte_errno;
+ }
+ if (attr->group > MAX_GROUP) {
+ rte_flow_error_set(
+ error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL, "group value too big: cannot exceed 15");
+ return -rte_errno;
+ }
+ if (attr->priority > MAX_PRIORITY) {
+ rte_flow_error_set(
+ error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "priority value too big");
+ return -rte_errno;
+ } else if (flow) {
+ uint16_t group = attr->group << GROUP_SHIFT;
+ uint16_t prio = group | (attr->priority +
+ RSS_PRIORITY_OFFSET + PRIORITY_OFFSET);
+ flow->msg.t.tcm_info = TC_H_MAKE(prio << 16,
+ flow->msg.t.tcm_info);
+ }
+ if (flow) {
+ if (mirred) {
+ /*
+ * If attr->ingress, the rule applies on remote ingress
+ * to match incoming packets
+ * If attr->egress, the rule applies on tap ingress (as
+ * seen from the kernel) to deal with packets going out
+ * from the DPDK app.
+ */
+ flow->msg.t.tcm_parent = TC_H_MAKE(TC_H_INGRESS, 0);
+ } else {
+ /* Standard rule on tap egress (kernel standpoint). */
+ flow->msg.t.tcm_parent =
+ TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
+ }
+ /* use flower filter type */
+ tap_nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
+ if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
+ goto exit_item_not_supported;
+ }
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ const struct tap_flow_items *token = NULL;
+ unsigned int i;
+ int err = 0;
+
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ for (i = 0;
+ cur_item->items &&
+ cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
+ ++i) {
+ if (cur_item->items[i] == items->type) {
+ token = &tap_flow_items[items->type];
+ break;
+ }
+ }
+ if (!token)
+ goto exit_item_not_supported;
+ cur_item = token;
+ err = tap_flow_item_validate(
+ items, cur_item->mask_sz,
+ (const uint8_t *)cur_item->mask,
+ (const uint8_t *)cur_item->default_mask);
+ if (err)
+ goto exit_item_not_supported;
+ if (flow && cur_item->convert) {
+ err = cur_item->convert(items, &data);
+ if (err)
+ goto exit_item_not_supported;
+ }
+ }
+ if (flow) {
+ if (data.vlan) {
+ tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
+ htons(ETH_P_8021Q));
+ tap_nlattr_add16(&flow->msg.nh,
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ data.eth_type ?
+ data.eth_type : htons(ETH_P_ALL));
+ } else if (data.eth_type) {
+ tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
+ data.eth_type);
+ }
+ }
+ if (mirred && flow) {
+ struct action_data adata = {
+ .id = "mirred",
+ .mirred = {
+ .eaction = mirred,
+ },
+ };
+
+ /*
+ * If attr->egress && mirred, then this is a special
+ * case where the rule must be applied on the tap, to
+ * redirect packets coming from the DPDK App, out
+ * through the remote netdevice.
+ */
+ adata.mirred.ifindex = attr->ingress ? pmd->if_index :
+ pmd->remote_if_index;
+ if (mirred == TCA_EGRESS_MIRROR)
+ adata.mirred.action = TC_ACT_PIPE;
+ else
+ adata.mirred.action = TC_ACT_STOLEN;
+ if (add_actions(flow, 1, &adata, TCA_FLOWER_ACT) < 0)
+ goto exit_action_not_supported;
+ else
+ goto end;
+ }
+actions:
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+ int err = 0;
+
+ if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ continue;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ if (action)
+ goto exit_action_not_supported;
+ action = 1;
+ if (flow) {
+ struct action_data adata = {
+ .id = "gact",
+ .gact = {
+ .action = TC_ACT_SHOT,
+ },
+ };
+
+ err = add_actions(flow, 1, &adata,
+ TCA_FLOWER_ACT);
+ }
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_PASSTHRU) {
+ if (action)
+ goto exit_action_not_supported;
+ action = 1;
+ if (flow) {
+ struct action_data adata = {
+ .id = "gact",
+ .gact = {
+ /* continue */
+ .action = TC_ACT_UNSPEC,
+ },
+ };
+
+ err = add_actions(flow, 1, &adata,
+ TCA_FLOWER_ACT);
+ }
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+
+ if (action)
+ goto exit_action_not_supported;
+ action = 1;
+ if (!queue ||
+ (queue->index > pmd->dev->data->nb_rx_queues - 1))
+ goto exit_action_not_supported;
+ if (flow) {
+ struct action_data adata = {
+ .id = "skbedit",
+ .skbedit = {
+ .skbedit = {
+ .action = TC_ACT_PIPE,
+ },
+ .queue = queue->index,
+ },
+ };
+
+ err = add_actions(flow, 1, &adata,
+ TCA_FLOWER_ACT);
+ }
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ const struct rte_flow_action_rss *rss =
+ (const struct rte_flow_action_rss *)
+ actions->conf;
+
+ if (action++)
+ goto exit_action_not_supported;
+
+ if (!pmd->rss_enabled) {
+ err = rss_enable(pmd, attr, error);
+ if (err)
+ goto exit_action_not_supported;
+ }
+ if (flow)
+ err = rss_add_actions(flow, pmd, rss, error);
+ } else {
+ goto exit_action_not_supported;
+ }
+ if (err)
+ goto exit_action_not_supported;
+ }
+ /* When fate is unknown, drop traffic. */
+ if (!action) {
+ static const struct rte_flow_action drop[] = {
+ { .type = RTE_FLOW_ACTION_TYPE_DROP, },
+ { .type = RTE_FLOW_ACTION_TYPE_END, },
+ };
+
+ actions = drop;
+ goto actions;
+ }
+end:
+ if (flow)
+ tap_nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
+ return 0;
+exit_item_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
+ return -rte_errno;
+exit_action_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "action not supported");
+ return -rte_errno;
+}
+
+
+
+/**
+ * Validate a flow.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+static int
+tap_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ return priv_flow_process(pmd, attr, items, actions, error, NULL, 0);
+}
+
+/**
+ * Set a unique handle in a flow.
+ *
+ * The kernel supports TC rules with equal priority, as long as they use the
+ * same matching fields (e.g.: dst mac and ipv4) with different values (and
+ * full mask to ensure no collision is possible).
+ * In those rules, the handle (uint32_t) is the part that would identify
+ * specifically each rule.
+ *
+ * On 32-bit architectures, the handle can simply be the flow's pointer address.
+ * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently)
+ * unique handle.
+ *
+ * @param[in, out] flow
+ * The flow that needs its handle set.
+ */
+static void
+tap_flow_set_handle(struct rte_flow *flow)
+{
+ uint32_t handle = 0;
+
+ if (sizeof(flow) > 4)
+ handle = rte_jhash(&flow, sizeof(flow), 1);
+ else
+ handle = (uintptr_t)flow;
+ /* must be at least 1 to avoid letting the kernel choose one for us */
+ if (!handle)
+ handle = 1;
+ flow->msg.t.tcm_handle = handle;
+}
+
+/**
+ * Free the flow opened file descriptors and allocated memory
+ *
+ * @param[in] flow
+ * Pointer to the flow to free
+ *
+ */
+static void
+tap_flow_free(struct pmd_internals *pmd, struct rte_flow *flow)
+{
+ int i;
+
+ if (!flow)
+ return;
+
+ if (pmd->rss_enabled) {
+ /* Close flow BPF file descriptors */
+ for (i = 0; i < SEC_MAX; i++)
+ if (flow->bpf_fd[i] != 0) {
+ close(flow->bpf_fd[i]);
+ flow->bpf_fd[i] = 0;
+ }
+
+ /* Release the map key for this RSS rule */
+ bpf_rss_key(KEY_CMD_RELEASE, &flow->key_idx);
+ flow->key_idx = 0;
+ }
+
+ /* Free flow allocated memory */
+ rte_free(flow);
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+static struct rte_flow *
+tap_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct rte_flow *remote_flow = NULL;
+ struct rte_flow *flow = NULL;
+ struct nlmsg *msg = NULL;
+ int err;
+
+ if (!pmd->if_index) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "can't create rule, ifindex not found");
+ goto fail;
+ }
+ /*
+ * No rules configured through standard rte_flow should be set on the
+ * priorities used by implicit rules.
+ */
+ if ((attr->group == MAX_GROUP) &&
+ attr->priority > (MAX_PRIORITY - TAP_REMOTE_MAX_IDX)) {
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "priority value too big");
+ goto fail;
+ }
+ flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate memory for rte_flow");
+ goto fail;
+ }
+ msg = &flow->msg;
+ tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
+ tap_flow_set_handle(flow);
+ if (priv_flow_process(pmd, attr, items, actions, error, flow, 0))
+ goto fail;
+ err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
+ if (err < 0) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "couldn't send request to kernel");
+ goto fail;
+ }
+ err = tap_nl_recv_ack(pmd->nlsk_fd);
+ if (err < 0) {
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule creation (%d): %s",
+ errno, strerror(errno));
+ rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "overlapping rules or Kernel too old for flower support");
+ goto fail;
+ }
+ LIST_INSERT_HEAD(&pmd->flows, flow, next);
+ /**
+ * If a remote device is configured, a TC rule with identical items for
+ * matching must be set on that device, with a single action: redirect
+ * to the local pmd->if_index.
+ */
+ if (pmd->remote_if_index) {
+ remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ if (!remote_flow) {
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "cannot allocate memory for rte_flow");
+ goto fail;
+ }
+ msg = &remote_flow->msg;
+ /* set the rule if_index for the remote netdevice */
+ tc_init_msg(
+ msg, pmd->remote_if_index, RTM_NEWTFILTER,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
+ tap_flow_set_handle(remote_flow);
+ if (priv_flow_process(pmd, attr, items, NULL,
+ error, remote_flow, TCA_EGRESS_REDIR)) {
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "rte flow rule validation failed");
+ goto fail;
+ }
+ err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
+ if (err < 0) {
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failure sending nl request");
+ goto fail;
+ }
+ err = tap_nl_recv_ack(pmd->nlsk_fd);
+ if (err < 0) {
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule creation (%d): %s",
+ errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "overlapping rules or Kernel too old for flower support");
+ goto fail;
+ }
+ flow->remote_flow = remote_flow;
+ }
+ return flow;
+fail:
+ if (remote_flow)
+ rte_free(remote_flow);
+ if (flow)
+ tap_flow_free(pmd, flow);
+ return NULL;
+}
+
+/**
+ * Destroy a flow using pointer to pmd_internal.
+ *
+ * @param[in, out] pmd
+ * Pointer to private structure.
+ * @param[in] flow
+ * Pointer to the flow to destroy.
+ * @param[in, out] error
+ * Pointer to the flow error handler
+ *
+ * @return 0 if the flow could be destroyed, -1 otherwise.
+ */
+static int
+tap_flow_destroy_pmd(struct pmd_internals *pmd,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *remote_flow = flow->remote_flow;
+ int ret = 0;
+
+ LIST_REMOVE(flow, next);
+ flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
+
+ ret = tap_nl_send(pmd->nlsk_fd, &flow->msg.nh);
+ if (ret < 0) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "couldn't send request to kernel");
+ goto end;
+ }
+ ret = tap_nl_recv_ack(pmd->nlsk_fd);
+ /* If errno is ENOENT, the rule is already no longer in the kernel. */
+ if (ret < 0 && errno == ENOENT)
+ ret = 0;
+ if (ret < 0) {
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule deletion (%d): %s",
+ errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "couldn't receive kernel ack to our request");
+ goto end;
+ }
+
+ if (remote_flow) {
+ remote_flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ remote_flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
+
+ ret = tap_nl_send(pmd->nlsk_fd, &remote_flow->msg.nh);
+ if (ret < 0) {
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failure sending nl request");
+ goto end;
+ }
+ ret = tap_nl_recv_ack(pmd->nlsk_fd);
+ if (ret < 0 && errno == ENOENT)
+ ret = 0;
+ if (ret < 0) {
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule deletion (%d): %s",
+ errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failure trying to receive nl ack");
+ goto end;
+ }
+ }
+end:
+ if (remote_flow)
+ rte_free(remote_flow);
+ tap_flow_free(pmd, flow);
+ return ret;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+static int
+tap_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ return tap_flow_destroy_pmd(pmd, flow, error);
+}
+
+/**
+ * Enable/disable flow isolation.
+ *
+ * @see rte_flow_isolate()
+ * @see rte_flow_ops
+ */
+static int
+tap_flow_isolate(struct rte_eth_dev *dev,
+ int set,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ /* normalize 'set' variable to contain 0 or 1 values */
+ if (set)
+ set = 1;
+ /* if already in the right isolation mode - nothing to do */
+ if ((set ^ pmd->flow_isolate) == 0)
+ return 0;
+ /* mark the isolation mode for tap_flow_implicit_create() */
+ pmd->flow_isolate = set;
+ /*
+ * If netdevice is there, setup appropriate flow rules immediately.
+ * Otherwise it will be set when bringing up the netdevice (tun_alloc).
+ */
+ if (!pmd->rxq[0].fd)
+ return 0;
+ if (set) {
+ struct rte_flow *remote_flow;
+
+ while (1) {
+ remote_flow = LIST_FIRST(&pmd->implicit_flows);
+ if (!remote_flow)
+ break;
+ /*
+ * Remove all implicit rules on the remote.
+ * Keep the local rule to redirect packets on TX.
+ * Keep also the last implicit local rule: ISOLATE.
+ */
+ if (remote_flow->msg.t.tcm_ifindex == pmd->if_index)
+ break;
+ if (tap_flow_destroy_pmd(pmd, remote_flow, NULL) < 0)
+ goto error;
+ }
+ /* Switch the TC rule according to pmd->flow_isolate */
+ if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
+ goto error;
+ } else {
+ /* Switch the TC rule according to pmd->flow_isolate */
+ if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
+ goto error;
+ if (!pmd->remote_if_index)
+ return 0;
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0)
+ goto error;
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
+ goto error;
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0)
+ goto error;
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0)
+ goto error;
+ if (dev->data->promiscuous &&
+ tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC) < 0)
+ goto error;
+ if (dev->data->all_multicast &&
+ tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI) < 0)
+ goto error;
+ }
+ return 0;
+error:
+ pmd->flow_isolate = 0;
+ return rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "TC rule creation failed");
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct rte_flow *flow;
+
+ while (!LIST_EMPTY(&pmd->flows)) {
+ flow = LIST_FIRST(&pmd->flows);
+ if (tap_flow_destroy(dev, flow, error) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Add an implicit flow rule on the remote device to make sure traffic gets to
+ * the tap netdevice from there.
+ *
+ * @param pmd
+ * Pointer to private structure.
+ * @param[in] idx
+ * The idx in the implicit_rte_flows array specifying which rule to apply.
+ *
+ * @return -1 if the rule couldn't be applied, 0 otherwise.
+ */
+int tap_flow_implicit_create(struct pmd_internals *pmd,
+ enum implicit_rule_index idx)
+{
+ uint16_t flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE;
+ struct rte_flow_action *actions = implicit_rte_flows[idx].actions;
+ struct rte_flow_action isolate_actions[2] = {
+ [1] = {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow_item *items = implicit_rte_flows[idx].items;
+ struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
+ struct rte_flow_item_eth eth_local = { .type = 0 };
+ uint16_t if_index = pmd->remote_if_index;
+ struct rte_flow *remote_flow = NULL;
+ struct nlmsg *msg = NULL;
+ int err = 0;
+ struct rte_flow_item items_local[2] = {
+ [0] = {
+ .type = items[0].type,
+ .spec = &eth_local,
+ .mask = items[0].mask,
+ },
+ [1] = {
+ .type = items[1].type,
+ }
+ };
+
+ remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ if (!remote_flow) {
+ TAP_LOG(ERR, "Cannot allocate memory for rte_flow");
+ goto fail;
+ }
+ msg = &remote_flow->msg;
+ if (idx == TAP_REMOTE_TX) {
+ if_index = pmd->if_index;
+ } else if (idx == TAP_ISOLATE) {
+ if_index = pmd->if_index;
+ /* Don't be exclusive for this rule, it can be changed later. */
+ flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
+ isolate_actions[0].type = pmd->flow_isolate ?
+ RTE_FLOW_ACTION_TYPE_DROP :
+ RTE_FLOW_ACTION_TYPE_PASSTHRU;
+ actions = isolate_actions;
+ } else if (idx == TAP_REMOTE_LOCAL_MAC) {
+ /*
+ * eth addr couldn't be set in implicit_rte_flows[] as it is not
+ * known at compile time.
+ */
+ memcpy(&eth_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
+ items = items_local;
+ }
+ tc_init_msg(msg, if_index, RTM_NEWTFILTER, flags);
+ msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
+ /*
+ * The ISOLATE rule is always present and must have a static handle, as
+ * the action is changed whether the feature is enabled (DROP) or
+ * disabled (PASSTHRU).
+ * There is just one REMOTE_PROMISCUOUS rule in all cases. It should
+ * have a static handle such that adding it twice will fail with EEXIST
+ * with any kernel version. Remark: old kernels may falsely accept the
+ * same REMOTE_PROMISCUOUS rules if they had different handles.
+ */
+ if (idx == TAP_ISOLATE)
+ remote_flow->msg.t.tcm_handle = ISOLATE_HANDLE;
+ else if (idx == TAP_REMOTE_PROMISC)
+ remote_flow->msg.t.tcm_handle = REMOTE_PROMISCUOUS_HANDLE;
+ else
+ tap_flow_set_handle(remote_flow);
+ if (priv_flow_process(pmd, attr, items, actions, NULL,
+ remote_flow, implicit_rte_flows[idx].mirred)) {
+ TAP_LOG(ERR, "rte flow rule validation failed");
+ goto fail;
+ }
+ err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
+ if (err < 0) {
+ TAP_LOG(ERR, "Failure sending nl request");
+ goto fail;
+ }
+ err = tap_nl_recv_ack(pmd->nlsk_fd);
+ if (err < 0) {
+ /* Silently ignore re-entering existing rule */
+ if (errno == EEXIST)
+ goto success;
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule creation (%d): %s",
+ errno, strerror(errno));
+ goto fail;
+ }
+ LIST_INSERT_HEAD(&pmd->implicit_flows, remote_flow, next);
+success:
+ return 0;
+fail:
+ if (remote_flow)
+ rte_free(remote_flow);
+ return -1;
+}
+
+/**
+ * Remove specific implicit flow rule on the remote device.
+ *
+ * @param[in, out] pmd
+ * Pointer to private structure.
+ * @param[in] idx
+ * The idx in the implicit_rte_flows array specifying which rule to remove.
+ *
+ * @return -1 if one of the implicit rules couldn't be created, 0 otherwise.
+ */
+int tap_flow_implicit_destroy(struct pmd_internals *pmd,
+ enum implicit_rule_index idx)
+{
+ struct rte_flow *remote_flow;
+ int cur_prio = -1;
+ int idx_prio = implicit_rte_flows[idx].attr.priority + PRIORITY_OFFSET;
+
+ for (remote_flow = LIST_FIRST(&pmd->implicit_flows);
+ remote_flow;
+ remote_flow = LIST_NEXT(remote_flow, next)) {
+ cur_prio = (remote_flow->msg.t.tcm_info >> 16) & PRIORITY_MASK;
+ if (cur_prio != idx_prio)
+ continue;
+ return tap_flow_destroy_pmd(pmd, remote_flow, NULL);
+ }
+ return 0;
+}
+
+/**
+ * Destroy all implicit flows.
+ *
+ * @see rte_flow_flush()
+ */
+int
+tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
+{
+ struct rte_flow *remote_flow;
+
+ while (!LIST_EMPTY(&pmd->implicit_flows)) {
+ remote_flow = LIST_FIRST(&pmd->implicit_flows);
+ if (tap_flow_destroy_pmd(pmd, remote_flow, error) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+#define MAX_RSS_KEYS 256
+#define KEY_IDX_OFFSET (3 * MAX_RSS_KEYS)
+#define SEC_NAME_CLS_Q "cls_q"
+
+const char *sec_name[SEC_MAX] = {
+ [SEC_L3_L4] = "l3_l4",
+};
+
+/**
+ * Enable RSS on tap: create TC rules for queuing.
+ *
+ * @param[in, out] pmd
+ * Pointer to private structure.
+ *
+ * @param[in] attr
+ * Pointer to rte_flow to get flow group
+ *
+ * @param[out] error
+ * Pointer to error reporting if not NULL.
+ *
+ * @return 0 on success, negative value on failure.
+ */
+static int rss_enable(struct pmd_internals *pmd,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *rss_flow = NULL;
+ struct nlmsg *msg = NULL;
+ /* 4096 is the maximum number of instructions for a BPF program */
+ char annotation[64];
+ int i;
+ int err = 0;
+
+ /* unlimit locked memory */
+ struct rlimit memlock_limit = {
+ .rlim_cur = RLIM_INFINITY,
+ .rlim_max = RLIM_INFINITY,
+ };
+ setrlimit(RLIMIT_MEMLOCK, &memlock_limit);
+
+ /* Get a new map key for a new RSS rule */
+ err = bpf_rss_key(KEY_CMD_INIT, NULL);
+ if (err < 0) {
+ rte_flow_error_set(
+ error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to initialize BPF RSS keys");
+
+ return -1;
+ }
+
+ /*
+ * Create BPF RSS MAP
+ */
+ pmd->map_fd = tap_flow_bpf_rss_map_create(sizeof(__u32), /* key size */
+ sizeof(struct rss_key),
+ MAX_RSS_KEYS);
+ if (pmd->map_fd < 0) {
+ TAP_LOG(ERR,
+ "Failed to create BPF map (%d): %s",
+ errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Kernel too old or not configured "
+ "to support BPF maps");
+
+ return -ENOTSUP;
+ }
+
+ /*
+ * Add a rule per queue to match reclassified packets and direct them to
+ * the correct queue.
+ */
+ for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
+ pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i);
+ if (pmd->bpf_fd[i] < 0) {
+ TAP_LOG(ERR,
+ "Failed to load BPF section %s for queue %d",
+ SEC_NAME_CLS_Q, i);
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Kernel too old or not configured "
+ "to support BPF programs loading");
+
+ return -ENOTSUP;
+ }
+
+ rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ if (!rss_flow) {
+ TAP_LOG(ERR,
+ "Cannot allocate memory for rte_flow");
+ return -1;
+ }
+ msg = &rss_flow->msg;
+ tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER, NLM_F_REQUEST |
+ NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
+ tap_flow_set_handle(rss_flow);
+ uint16_t group = attr->group << GROUP_SHIFT;
+ uint16_t prio = group | (i + PRIORITY_OFFSET);
+ msg->t.tcm_info = TC_H_MAKE(prio << 16, msg->t.tcm_info);
+ msg->t.tcm_parent = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
+
+ tap_nlattr_add(&msg->nh, TCA_KIND, sizeof("bpf"), "bpf");
+ if (tap_nlattr_nested_start(msg, TCA_OPTIONS) < 0)
+ return -1;
+ tap_nlattr_add32(&msg->nh, TCA_BPF_FD, pmd->bpf_fd[i]);
+ snprintf(annotation, sizeof(annotation), "[%s%d]",
+ SEC_NAME_CLS_Q, i);
+ tap_nlattr_add(&msg->nh, TCA_BPF_NAME, strlen(annotation) + 1,
+ annotation);
+ /* Actions */
+ {
+ struct action_data adata = {
+ .id = "skbedit",
+ .skbedit = {
+ .skbedit = {
+ .action = TC_ACT_PIPE,
+ },
+ .queue = i,
+ },
+ };
+ if (add_actions(rss_flow, 1, &adata, TCA_BPF_ACT) < 0)
+ return -1;
+ }
+ tap_nlattr_nested_finish(msg); /* nested TCA_OPTIONS */
+
+ /* Netlink message is now ready to be sent */
+ if (tap_nl_send(pmd->nlsk_fd, &msg->nh) < 0)
+ return -1;
+ err = tap_nl_recv_ack(pmd->nlsk_fd);
+ if (err < 0) {
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule creation (%d): %s",
+ errno, strerror(errno));
+ return err;
+ }
+ LIST_INSERT_HEAD(&pmd->rss_flows, rss_flow, next);
+ }
+
+ pmd->rss_enabled = 1;
+ return err;
+}
+
+/**
+ * Manage bpf RSS keys repository with operations: init, get, release
+ *
+ * @param[in] cmd
+ * Command on RSS keys: init, get, release
+ *
+ * @param[in, out] key_idx
+ * Pointer to RSS Key index (out for get command, in for release command)
+ *
+ * @return -1 if couldn't get, release or init the RSS keys, 0 otherwise.
+ */
+static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx)
+{
+ __u32 i;
+ int err = 0;
+ static __u32 num_used_keys;
+ static __u32 rss_keys[MAX_RSS_KEYS] = {KEY_STAT_UNSPEC};
+ static __u32 rss_keys_initialized;
+ __u32 key;
+
+ switch (cmd) {
+ case KEY_CMD_GET:
+ if (!rss_keys_initialized) {
+ err = -1;
+ break;
+ }
+
+ if (num_used_keys == RTE_DIM(rss_keys)) {
+ err = -1;
+ break;
+ }
+
+ *key_idx = num_used_keys % RTE_DIM(rss_keys);
+ while (rss_keys[*key_idx] == KEY_STAT_USED)
+ *key_idx = (*key_idx + 1) % RTE_DIM(rss_keys);
+
+ rss_keys[*key_idx] = KEY_STAT_USED;
+
+ /*
+ * Add an offset to key_idx in order to handle a case of
+ * RSS and non RSS flows mixture.
+ * If a non RSS flow is destroyed it has an eBPF map
+ * index 0 (initialized on flow creation) and might
+ * unintentionally remove RSS entry 0 from eBPF map.
+ * To avoid this issue, add an offset to the real index
+ * during a KEY_CMD_GET operation and subtract this offset
+ * during a KEY_CMD_RELEASE operation in order to restore
+ * the real index.
+ */
+ *key_idx += KEY_IDX_OFFSET;
+ num_used_keys++;
+ break;
+
+ case KEY_CMD_RELEASE:
+ if (!rss_keys_initialized)
+ break;
+
+ /*
+ * Subtract offest to restore real key index
+ * If a non RSS flow is falsely trying to release map
+ * entry 0 - the offset subtraction will calculate the real
+ * map index as an out-of-range value and the release operation
+ * will be silently ignored.
+ */
+ key = *key_idx - KEY_IDX_OFFSET;
+ if (key >= RTE_DIM(rss_keys))
+ break;
+
+ if (rss_keys[key] == KEY_STAT_USED) {
+ rss_keys[key] = KEY_STAT_AVAILABLE;
+ num_used_keys--;
+ }
+ break;
+
+ case KEY_CMD_INIT:
+ for (i = 0; i < RTE_DIM(rss_keys); i++)
+ rss_keys[i] = KEY_STAT_AVAILABLE;
+
+ rss_keys_initialized = 1;
+ num_used_keys = 0;
+ break;
+
+ case KEY_CMD_DEINIT:
+ for (i = 0; i < RTE_DIM(rss_keys); i++)
+ rss_keys[i] = KEY_STAT_UNSPEC;
+
+ rss_keys_initialized = 0;
+ num_used_keys = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * Add RSS hash calculations and queue selection
+ *
+ * @param[in, out] pmd
+ * Pointer to internal structure. Used to set/get RSS map fd
+ *
+ * @param[in] rss
+ * Pointer to RSS flow actions
+ *
+ * @param[out] error
+ * Pointer to error reporting if not NULL.
+ *
+ * @return 0 on success, negative value on failure
+ */
+static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
+ const struct rte_flow_action_rss *rss,
+ struct rte_flow_error *error)
+{
+ /* 4096 is the maximum number of instructions for a BPF program */
+ unsigned int i;
+ int err;
+ struct rss_key rss_entry = { .hash_fields = 0,
+ .key_size = 0 };
+
+ /* Check supported RSS features */
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "a nonzero RSS encapsulation level is not supported");
+
+ /* Get a new map key for a new RSS rule */
+ err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx);
+ if (err < 0) {
+ rte_flow_error_set(
+ error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to get BPF RSS key");
+
+ return -1;
+ }
+
+ /* Update RSS map entry with queues */
+ rss_entry.nb_queues = rss->queue_num;
+ for (i = 0; i < rss->queue_num; i++)
+ rss_entry.queues[i] = rss->queue[i];
+ rss_entry.hash_fields =
+ (1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4);
+
+ /* Add this RSS entry to map */
+ err = tap_flow_bpf_update_rss_elem(pmd->map_fd,
+ &flow->key_idx, &rss_entry);
+
+ if (err) {
+ TAP_LOG(ERR,
+ "Failed to update BPF map entry #%u (%d): %s",
+ flow->key_idx, errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Kernel too old or not configured "
+ "to support BPF maps updates");
+
+ return -ENOTSUP;
+ }
+
+
+ /*
+ * Load bpf rules to calculate hash for this key_idx
+ */
+
+ flow->bpf_fd[SEC_L3_L4] =
+ tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd);
+ if (flow->bpf_fd[SEC_L3_L4] < 0) {
+ TAP_LOG(ERR,
+ "Failed to load BPF section %s (%d): %s",
+ sec_name[SEC_L3_L4], errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Kernel too old or not configured "
+ "to support BPF program loading");
+
+ return -ENOTSUP;
+ }
+
+ /* Actions */
+ {
+ struct action_data adata[] = {
+ {
+ .id = "bpf",
+ .bpf = {
+ .bpf_fd = flow->bpf_fd[SEC_L3_L4],
+ .annotation = sec_name[SEC_L3_L4],
+ .bpf = {
+ .action = TC_ACT_PIPE,
+ },
+ },
+ },
+ };
+
+ if (add_actions(flow, RTE_DIM(adata), adata,
+ TCA_FLOWER_ACT) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Manage filter operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+tap_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &tap_flow_ops;
+ return 0;
+ default:
+ TAP_LOG(ERR, "%p: filter type (%d) not supported",
+ dev, filter_type);
+ }
+ return -EINVAL;
+}
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_flow.h b/src/spdk/dpdk/drivers/net/tap/tap_flow.h
new file mode 100644
index 00000000..ac60a9ae
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_flow.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef _TAP_FLOW_H_
+#define _TAP_FLOW_H_
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_eth_tap.h>
+#include <tap_autoconf.h>
+
+/**
+ * In TC, priority 0 means we require the kernel to allocate one for us.
+ * In rte_flow, however, we want the priority 0 to be the most important one.
+ * Use an offset to have the most important priority being 1 in TC.
+ */
+#define PRIORITY_OFFSET 1
+#define PRIORITY_MASK (0xfff)
+#define MAX_PRIORITY (PRIORITY_MASK - PRIORITY_OFFSET)
+#define GROUP_MASK (0xf)
+#define GROUP_SHIFT 12
+#define MAX_GROUP GROUP_MASK
+#define RSS_PRIORITY_OFFSET RTE_PMD_TAP_MAX_QUEUES
+
+/**
+ * These index are actually in reversed order: their priority is processed
+ * by subtracting their value to the lowest priority (PRIORITY_MASK).
+ * Thus the first one will have the lowest priority in the end
+ * (but biggest value).
+ */
+enum implicit_rule_index {
+ TAP_REMOTE_TX,
+ TAP_ISOLATE,
+ TAP_REMOTE_BROADCASTV6,
+ TAP_REMOTE_BROADCAST,
+ TAP_REMOTE_ALLMULTI,
+ TAP_REMOTE_PROMISC,
+ TAP_REMOTE_LOCAL_MAC,
+ TAP_REMOTE_MAX_IDX,
+};
+
+enum bpf_fd_idx {
+ SEC_L3_L4,
+ SEC_MAX,
+};
+
+int tap_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+int tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+
+int tap_flow_implicit_create(struct pmd_internals *pmd,
+ enum implicit_rule_index idx);
+int tap_flow_implicit_destroy(struct pmd_internals *pmd,
+ enum implicit_rule_index idx);
+int tap_flow_implicit_flush(struct pmd_internals *pmd,
+ struct rte_flow_error *error);
+
+int tap_flow_bpf_cls_q(__u32 queue_idx);
+int tap_flow_bpf_calc_l3_l4_hash(__u32 key_idx, int map_fd);
+int tap_flow_bpf_rss_map_create(unsigned int key_size, unsigned int value_size,
+ unsigned int max_entries);
+int tap_flow_bpf_update_rss_elem(int fd, void *key, void *value);
+
+#endif /* _TAP_FLOW_H_ */
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_intr.c b/src/spdk/dpdk/drivers/net/tap/tap_intr.c
new file mode 100644
index 00000000..fc590181
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_intr.c
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+/**
+ * @file
+ * Interrupts handling for tap driver.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <rte_eth_tap.h>
+#include <rte_errno.h>
+#include <rte_interrupts.h>
+
+
+/**
+ * Unregister Rx interrupts free the queue interrupt vector.
+ *
+ * @param dev
+ * Pointer to the tap rte_eth_dev structure.
+ */
+static void
+tap_rx_intr_vec_uninstall(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct rte_intr_handle *intr_handle = &pmd->intr_handle;
+
+ rte_intr_free_epoll_fd(intr_handle);
+ free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ intr_handle->nb_efd = 0;
+}
+
+/**
+ * Allocate Rx queue interrupt vector and register Rx interrupts.
+ *
+ * @param dev
+ * Pointer to the tap rte_eth_dev device structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+tap_rx_intr_vec_install(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ unsigned int rxqs_n = pmd->dev->data->nb_rx_queues;
+ struct rte_intr_handle *intr_handle = &pmd->intr_handle;
+ unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ unsigned int i;
+ unsigned int count = 0;
+
+ if (!dev->data->dev_conf.intr_conf.rxq)
+ return 0;
+ intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
+ if (intr_handle->intr_vec == NULL) {
+ rte_errno = ENOMEM;
+ TAP_LOG(ERR,
+ "failed to allocate memory for interrupt vector,"
+ " Rx interrupts will not be supported");
+ return -rte_errno;
+ }
+ for (i = 0; i < n; i++) {
+ struct rx_queue *rxq = pmd->dev->data->rx_queues[i];
+
+ /* Skip queues that cannot request interrupts. */
+ if (!rxq || rxq->fd <= 0) {
+ /* Use invalid intr_vec[] index to disable entry. */
+ intr_handle->intr_vec[i] =
+ RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID;
+ continue;
+ }
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+ intr_handle->efds[count] = rxq->fd;
+ count++;
+ }
+ if (!count)
+ tap_rx_intr_vec_uninstall(dev);
+ else
+ intr_handle->nb_efd = count;
+ return 0;
+}
+
+/**
+ * Register or unregister the Rx interrupts.
+ *
+ * @param dev
+ * Pointer to the tap rte_eth_dev device structure.
+ * @param set
+ * should the operation be register or unregister the interrupts.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+tap_rx_intr_vec_set(struct rte_eth_dev *dev, int set)
+{
+ tap_rx_intr_vec_uninstall(dev);
+ if (set)
+ return tap_rx_intr_vec_install(dev);
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_log.h b/src/spdk/dpdk/drivers/net/tap/tap_log.h
new file mode 100644
index 00000000..fa06843a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_log.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+extern int tap_logtype;
+
+#define TAP_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, tap_logtype, "%s(): " fmt "\n", \
+ __func__, ## args)
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_netlink.c b/src/spdk/dpdk/drivers/net/tap/tap_netlink.c
new file mode 100644
index 00000000..6cb51009
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_netlink.c
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/netlink.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <rte_malloc.h>
+#include <tap_netlink.h>
+#include <rte_random.h>
+#include "tap_log.h"
+
+/* Must be quite large to support dumping a huge list of QDISC or filters. */
+#define BUF_SIZE (32 * 1024) /* Size of the buffer to receive kernel messages */
+#define SNDBUF_SIZE 32768 /* Send buffer size for the netlink socket */
+#define RCVBUF_SIZE 32768 /* Receive buffer size for the netlink socket */
+
+struct nested_tail {
+ struct rtattr *tail;
+ struct nested_tail *prev;
+};
+
+/**
+ * Initialize a netlink socket for communicating with the kernel.
+ *
+ * @param nl_groups
+ * Set it to a netlink group value (e.g. RTMGRP_LINK) to receive messages for
+ * specific netlink multicast groups. Otherwise, no subscription will be made.
+ *
+ * @return
+ * netlink socket file descriptor on success, -1 otherwise.
+ */
+int
+tap_nl_init(uint32_t nl_groups)
+{
+ int fd, sndbuf_size = SNDBUF_SIZE, rcvbuf_size = RCVBUF_SIZE;
+ struct sockaddr_nl local = {
+ .nl_family = AF_NETLINK,
+ .nl_groups = nl_groups,
+ };
+
+ fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
+ if (fd < 0) {
+ TAP_LOG(ERR, "Unable to create a netlink socket");
+ return -1;
+ }
+ if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int))) {
+ TAP_LOG(ERR, "Unable to set socket buffer send size");
+ return -1;
+ }
+ if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int))) {
+ TAP_LOG(ERR, "Unable to set socket buffer receive size");
+ return -1;
+ }
+ if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) {
+ TAP_LOG(ERR, "Unable to bind to the netlink socket");
+ return -1;
+ }
+ return fd;
+}
+
+/**
+ * Clean up a netlink socket once all communicating with the kernel is finished.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ *
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+int
+tap_nl_final(int nlsk_fd)
+{
+ if (close(nlsk_fd)) {
+ TAP_LOG(ERR, "Failed to close netlink socket: %s (%d)",
+ strerror(errno), errno);
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Send a message to the kernel on the netlink socket.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] nh
+ * The netlink message send to the kernel.
+ *
+ * @return
+ * the number of sent bytes on success, -1 otherwise.
+ */
+int
+tap_nl_send(int nlsk_fd, struct nlmsghdr *nh)
+{
+ /* man 7 netlink EXAMPLE */
+ struct sockaddr_nl sa = {
+ .nl_family = AF_NETLINK,
+ };
+ struct iovec iov = {
+ .iov_base = nh,
+ .iov_len = nh->nlmsg_len,
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+ int send_bytes;
+
+ nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */
+ nh->nlmsg_seq = (uint32_t)rte_rand();
+ send_bytes = sendmsg(nlsk_fd, &msg, 0);
+ if (send_bytes < 0) {
+ TAP_LOG(ERR, "Failed to send netlink message: %s (%d)",
+ strerror(errno), errno);
+ return -1;
+ }
+ return send_bytes;
+}
+
+/**
+ * Check that the kernel sends an appropriate ACK in response
+ * to an tap_nl_send().
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+tap_nl_recv_ack(int nlsk_fd)
+{
+ return tap_nl_recv(nlsk_fd, NULL, NULL);
+}
+
+/**
+ * Receive a message from the kernel on the netlink socket, following an
+ * tap_nl_send().
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] cb
+ * The callback function to call for each netlink message received.
+ * @param[in, out] arg
+ * Custom arguments for the callback.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+tap_nl_recv(int nlsk_fd, int (*cb)(struct nlmsghdr *, void *arg), void *arg)
+{
+ /* man 7 netlink EXAMPLE */
+ struct sockaddr_nl sa;
+ char buf[BUF_SIZE];
+ struct iovec iov = {
+ .iov_base = buf,
+ .iov_len = sizeof(buf),
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ /* One message at a time */
+ .msg_iovlen = 1,
+ };
+ int multipart = 0;
+ int ret = 0;
+
+ do {
+ struct nlmsghdr *nh;
+ int recv_bytes = 0;
+
+ recv_bytes = recvmsg(nlsk_fd, &msg, 0);
+ if (recv_bytes < 0)
+ return -1;
+ for (nh = (struct nlmsghdr *)buf;
+ NLMSG_OK(nh, (unsigned int)recv_bytes);
+ nh = NLMSG_NEXT(nh, recv_bytes)) {
+ if (nh->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *err_data = NLMSG_DATA(nh);
+
+ if (err_data->error < 0) {
+ errno = -err_data->error;
+ return -1;
+ }
+ /* Ack message. */
+ return 0;
+ }
+ /* Multi-part msgs and their trailing DONE message. */
+ if (nh->nlmsg_flags & NLM_F_MULTI) {
+ if (nh->nlmsg_type == NLMSG_DONE)
+ return 0;
+ multipart = 1;
+ }
+ if (cb)
+ ret = cb(nh, arg);
+ }
+ } while (multipart);
+ return ret;
+}
+
+/**
+ * Append a netlink attribute to a message.
+ *
+ * @param[in, out] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] type
+ * The type of attribute to append.
+ * @param[in] data_len
+ * The length of the data to append.
+ * @param[in] data
+ * The data to append.
+ */
+void
+tap_nlattr_add(struct nlmsghdr *nh, unsigned short type,
+ unsigned int data_len, const void *data)
+{
+ /* see man 3 rtnetlink */
+ struct rtattr *rta;
+
+ rta = (struct rtattr *)NLMSG_TAIL(nh);
+ rta->rta_len = RTA_LENGTH(data_len);
+ rta->rta_type = type;
+ memcpy(RTA_DATA(rta), data, data_len);
+ nh->nlmsg_len = NLMSG_ALIGN(nh->nlmsg_len) + RTA_ALIGN(rta->rta_len);
+}
+
+/**
+ * Append a uint8_t netlink attribute to a message.
+ *
+ * @param[in, out] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] type
+ * The type of attribute to append.
+ * @param[in] data
+ * The data to append.
+ */
+void
+tap_nlattr_add8(struct nlmsghdr *nh, unsigned short type, uint8_t data)
+{
+ tap_nlattr_add(nh, type, sizeof(uint8_t), &data);
+}
+
+/**
+ * Append a uint16_t netlink attribute to a message.
+ *
+ * @param[in, out] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] type
+ * The type of attribute to append.
+ * @param[in] data
+ * The data to append.
+ */
+void
+tap_nlattr_add16(struct nlmsghdr *nh, unsigned short type, uint16_t data)
+{
+ tap_nlattr_add(nh, type, sizeof(uint16_t), &data);
+}
+
+/**
+ * Append a uint16_t netlink attribute to a message.
+ *
+ * @param[in, out] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] type
+ * The type of attribute to append.
+ * @param[in] data
+ * The data to append.
+ */
+void
+tap_nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data)
+{
+ tap_nlattr_add(nh, type, sizeof(uint32_t), &data);
+}
+
+/**
+ * Start a nested netlink attribute.
+ * It must be followed later by a call to tap_nlattr_nested_finish().
+ *
+ * @param[in, out] msg
+ * The netlink message where to edit the nested_tails metadata.
+ * @param[in] type
+ * The nested attribute type to append.
+ *
+ * @return
+ * -1 if adding a nested netlink attribute failed, 0 otherwise.
+ */
+int
+tap_nlattr_nested_start(struct nlmsg *msg, uint16_t type)
+{
+ struct nested_tail *tail;
+
+ tail = rte_zmalloc(NULL, sizeof(struct nested_tail), 0);
+ if (!tail) {
+ TAP_LOG(ERR,
+ "Couldn't allocate memory for nested netlink attribute");
+ return -1;
+ }
+
+ tail->tail = (struct rtattr *)NLMSG_TAIL(&msg->nh);
+
+ tap_nlattr_add(&msg->nh, type, 0, NULL);
+
+ tail->prev = msg->nested_tails;
+
+ msg->nested_tails = tail;
+
+ return 0;
+}
+
+/**
+ * End a nested netlink attribute.
+ * It follows a call to tap_nlattr_nested_start().
+ * In effect, it will modify the nested attribute length to include every bytes
+ * from the nested attribute start, up to here.
+ *
+ * @param[in, out] msg
+ * The netlink message where to edit the nested_tails metadata.
+ */
+void
+tap_nlattr_nested_finish(struct nlmsg *msg)
+{
+ struct nested_tail *tail = msg->nested_tails;
+
+ tail->tail->rta_len = (char *)NLMSG_TAIL(&msg->nh) - (char *)tail->tail;
+
+ if (tail->prev)
+ msg->nested_tails = tail->prev;
+
+ rte_free(tail);
+}
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_netlink.h b/src/spdk/dpdk/drivers/net/tap/tap_netlink.h
new file mode 100644
index 00000000..faa73ba1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_netlink.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef _TAP_NETLINK_H_
+#define _TAP_NETLINK_H_
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <linux/rtnetlink.h>
+#include <linux/netlink.h>
+#include <stdio.h>
+
+#include <rte_log.h>
+
+#define NLMSG_BUF 512
+
+struct nlmsg {
+ struct nlmsghdr nh;
+ struct tcmsg t;
+ char buf[NLMSG_BUF];
+ struct nested_tail *nested_tails;
+};
+
+#define NLMSG_TAIL(nlh) (void *)((char *)(nlh) + NLMSG_ALIGN((nlh)->nlmsg_len))
+
+int tap_nl_init(uint32_t nl_groups);
+int tap_nl_final(int nlsk_fd);
+int tap_nl_send(int nlsk_fd, struct nlmsghdr *nh);
+int tap_nl_recv(int nlsk_fd, int (*callback)(struct nlmsghdr *, void *),
+ void *arg);
+int tap_nl_recv_ack(int nlsk_fd);
+void tap_nlattr_add(struct nlmsghdr *nh, unsigned short type,
+ unsigned int data_len, const void *data);
+void tap_nlattr_add8(struct nlmsghdr *nh, unsigned short type, uint8_t data);
+void tap_nlattr_add16(struct nlmsghdr *nh, unsigned short type, uint16_t data);
+void tap_nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data);
+int tap_nlattr_nested_start(struct nlmsg *msg, uint16_t type);
+void tap_nlattr_nested_finish(struct nlmsg *msg);
+
+#endif /* _TAP_NETLINK_H_ */
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_rss.h b/src/spdk/dpdk/drivers/net/tap/tap_rss.h
new file mode 100644
index 00000000..17606b2d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_rss.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef _TAP_RSS_H_
+#define _TAP_RSS_H_
+
+#ifndef TAP_MAX_QUEUES
+#define TAP_MAX_QUEUES 16
+#endif
+
+/* Fixed RSS hash key size in bytes. */
+#define TAP_RSS_HASH_KEY_SIZE 40
+
+/* Supported RSS */
+#define TAP_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+
+/* hashed fields for RSS */
+enum hash_field {
+ HASH_FIELD_IPV4_L3, /* IPv4 src/dst addr */
+ HASH_FIELD_IPV4_L3_L4, /* IPv4 src/dst addr + L4 src/dst ports */
+ HASH_FIELD_IPV6_L3, /* IPv6 src/dst addr */
+ HASH_FIELD_IPV6_L3_L4, /* IPv6 src/dst addr + L4 src/dst ports */
+ HASH_FIELD_L2_SRC, /* Ethernet src addr */
+ HASH_FIELD_L2_DST, /* Ethernet dst addr */
+ HASH_FIELD_L3_SRC, /* L3 src addr */
+ HASH_FIELD_L3_DST, /* L3 dst addr */
+ HASH_FIELD_L4_SRC, /* TCP/UDP src ports */
+ HASH_FIELD_L4_DST, /* TCP/UDP dst ports */
+};
+
+struct rss_key {
+ __u8 key[128];
+ __u32 hash_fields;
+ __u32 key_size;
+ __u32 queues[TAP_MAX_QUEUES];
+ __u32 nb_queues;
+} __attribute__((packed));
+
+#endif /* _TAP_RSS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_tcmsgs.c b/src/spdk/dpdk/drivers/net/tap/tap_tcmsgs.c
new file mode 100644
index 00000000..3c9d0366
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_tcmsgs.c
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <inttypes.h>
+#include <linux/netlink.h>
+#include <net/if.h>
+#include <string.h>
+
+#include <rte_log.h>
+#include <tap_tcmsgs.h>
+#include "tap_log.h"
+
+struct qdisc {
+ uint32_t handle;
+ uint32_t parent;
+};
+
+struct list_args {
+ int nlsk_fd;
+ uint16_t ifindex;
+ void *custom_arg;
+};
+
+struct qdisc_custom_arg {
+ uint32_t handle;
+ uint32_t parent;
+ uint8_t exists;
+};
+
+/**
+ * Initialize a netlink message with a TC header.
+ *
+ * @param[in, out] msg
+ * The netlink message to fill.
+ * @param[in] ifindex
+ * The netdevice ifindex where the rule will be applied.
+ * @param[in] type
+ * The type of TC message to create (RTM_NEWTFILTER, RTM_NEWQDISC, etc.).
+ * @param[in] flags
+ * Overrides the default netlink flags for this msg with those specified.
+ */
+void
+tc_init_msg(struct nlmsg *msg, uint16_t ifindex, uint16_t type, uint16_t flags)
+{
+ struct nlmsghdr *n = &msg->nh;
+
+ n->nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ n->nlmsg_type = type;
+ if (flags)
+ n->nlmsg_flags = flags;
+ else
+ n->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ msg->t.tcm_family = AF_UNSPEC;
+ msg->t.tcm_ifindex = ifindex;
+}
+
+/**
+ * Delete a specific QDISC identified by its iface, and it's handle and parent.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex on whom the deletion will happen.
+ * @param[in] qinfo
+ * Additional info to identify the QDISC (handle and parent).
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+static int
+qdisc_del(int nlsk_fd, uint16_t ifindex, struct qdisc *qinfo)
+{
+ struct nlmsg msg;
+ int fd = 0;
+
+ tc_init_msg(&msg, ifindex, RTM_DELQDISC, 0);
+ msg.t.tcm_handle = qinfo->handle;
+ msg.t.tcm_parent = qinfo->parent;
+ /* if no netlink socket is provided, create one */
+ if (!nlsk_fd) {
+ fd = tap_nl_init(0);
+ if (fd < 0) {
+ TAP_LOG(ERR,
+ "Could not delete QDISC: null netlink socket");
+ return -1;
+ }
+ } else {
+ fd = nlsk_fd;
+ }
+ if (tap_nl_send(fd, &msg.nh) < 0)
+ goto error;
+ if (tap_nl_recv_ack(fd) < 0)
+ goto error;
+ if (!nlsk_fd)
+ return tap_nl_final(fd);
+ return 0;
+error:
+ if (!nlsk_fd)
+ tap_nl_final(fd);
+ return -1;
+}
+
+/**
+ * Add the multiqueue QDISC with MULTIQ_MAJOR_HANDLE handle.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to add the multiqueue QDISC.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+qdisc_add_multiq(int nlsk_fd, uint16_t ifindex)
+{
+ struct tc_multiq_qopt opt;
+ struct nlmsg msg;
+
+ tc_init_msg(&msg, ifindex, RTM_NEWQDISC,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg.t.tcm_handle = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
+ msg.t.tcm_parent = TC_H_ROOT;
+ tap_nlattr_add(&msg.nh, TCA_KIND, sizeof("multiq"), "multiq");
+ tap_nlattr_add(&msg.nh, TCA_OPTIONS, sizeof(opt), &opt);
+ if (tap_nl_send(nlsk_fd, &msg.nh) < 0)
+ return -1;
+ if (tap_nl_recv_ack(nlsk_fd) < 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * Add the ingress QDISC with default ffff: handle.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where the QDISC will be added.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+qdisc_add_ingress(int nlsk_fd, uint16_t ifindex)
+{
+ struct nlmsg msg;
+
+ tc_init_msg(&msg, ifindex, RTM_NEWQDISC,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg.t.tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ msg.t.tcm_parent = TC_H_INGRESS;
+ tap_nlattr_add(&msg.nh, TCA_KIND, sizeof("ingress"), "ingress");
+ if (tap_nl_send(nlsk_fd, &msg.nh) < 0)
+ return -1;
+ if (tap_nl_recv_ack(nlsk_fd) < 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * Callback function to delete a QDISC.
+ *
+ * @param[in] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] arg
+ * Custom arguments for the callback.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+static int
+qdisc_del_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct tcmsg *t = NLMSG_DATA(nh);
+ struct list_args *args = arg;
+
+ struct qdisc qinfo = {
+ .handle = t->tcm_handle,
+ .parent = t->tcm_parent,
+ };
+
+ /* filter out other ifaces' qdiscs */
+ if (args->ifindex != (unsigned int)t->tcm_ifindex)
+ return 0;
+ /*
+ * Use another nlsk_fd (0) to avoid tampering with the current list
+ * iteration.
+ */
+ return qdisc_del(0, args->ifindex, &qinfo);
+}
+
+/**
+ * Iterate over all QDISC, and call the callback() function for each.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to find QDISCs.
+ * @param[in] callback
+ * The function to call for each QDISC.
+ * @param[in, out] arg
+ * The arguments to provide the callback function with.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+static int
+qdisc_iterate(int nlsk_fd, uint16_t ifindex,
+ int (*callback)(struct nlmsghdr *, void *), void *arg)
+{
+ struct nlmsg msg;
+ struct list_args args = {
+ .nlsk_fd = nlsk_fd,
+ .ifindex = ifindex,
+ .custom_arg = arg,
+ };
+
+ tc_init_msg(&msg, ifindex, RTM_GETQDISC, NLM_F_REQUEST | NLM_F_DUMP);
+ if (tap_nl_send(nlsk_fd, &msg.nh) < 0)
+ return -1;
+ if (tap_nl_recv(nlsk_fd, callback, &args) < 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * Delete all QDISCs for a given netdevice.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to find QDISCs.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+qdisc_flush(int nlsk_fd, uint16_t ifindex)
+{
+ return qdisc_iterate(nlsk_fd, ifindex, qdisc_del_cb, NULL);
+}
+
+/**
+ * Create the multiqueue QDISC, only if it does not exist already.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to add the multiqueue QDISC.
+ *
+ * @return
+ * 0 if the qdisc exists or if has been successfully added.
+ * Return -1 otherwise.
+ */
+int
+qdisc_create_multiq(int nlsk_fd, uint16_t ifindex)
+{
+ int err = 0;
+
+ err = qdisc_add_multiq(nlsk_fd, ifindex);
+ if (err < 0 && errno != -EEXIST) {
+ TAP_LOG(ERR, "Could not add multiq qdisc (%d): %s",
+ errno, strerror(errno));
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Create the ingress QDISC, only if it does not exist already.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to add the ingress QDISC.
+ *
+ * @return
+ * 0 if the qdisc exists or if has been successfully added.
+ * Return -1 otherwise.
+ */
+int
+qdisc_create_ingress(int nlsk_fd, uint16_t ifindex)
+{
+ int err = 0;
+
+ err = qdisc_add_ingress(nlsk_fd, ifindex);
+ if (err < 0 && errno != -EEXIST) {
+ TAP_LOG(ERR, "Could not add ingress qdisc (%d): %s",
+ errno, strerror(errno));
+ return -1;
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/tap/tap_tcmsgs.h b/src/spdk/dpdk/drivers/net/tap/tap_tcmsgs.h
new file mode 100644
index 00000000..8cedea84
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/tap/tap_tcmsgs.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#ifndef _TAP_TCMSGS_H_
+#define _TAP_TCMSGS_H_
+
+#include <tap_autoconf.h>
+#include <linux/if_ether.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
+#include <linux/pkt_cls.h>
+#include <linux/tc_act/tc_mirred.h>
+#include <linux/tc_act/tc_gact.h>
+#include <linux/tc_act/tc_skbedit.h>
+#ifdef HAVE_TC_ACT_BPF
+#include <linux/tc_act/tc_bpf.h>
+#endif
+#include <inttypes.h>
+
+#include <rte_ether.h>
+#include <tap_netlink.h>
+
+#define MULTIQ_MAJOR_HANDLE (1 << 16)
+
+void tc_init_msg(struct nlmsg *msg, uint16_t ifindex, uint16_t type,
+ uint16_t flags);
+int qdisc_list(int nlsk_fd, uint16_t ifindex);
+int qdisc_flush(int nlsk_fd, uint16_t ifindex);
+int qdisc_create_ingress(int nlsk_fd, uint16_t ifindex);
+int qdisc_create_multiq(int nlsk_fd, uint16_t ifindex);
+int qdisc_add_ingress(int nlsk_fd, uint16_t ifindex);
+int qdisc_add_multiq(int nlsk_fd, uint16_t ifindex);
+int filter_list_ingress(int nlsk_fd, uint16_t ifindex);
+
+#endif /* _TAP_TCMSGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/thunderx/Makefile b/src/spdk/dpdk/drivers/net/thunderx/Makefile
new file mode 100644
index 00000000..e6bf4975
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/Makefile
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_thunderx_nicvf.a
+
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lm
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+EXPORT_MAP := rte_pmd_thunderx_version.map
+
+LIBABIVER := 1
+
+OBJS_BASE_DRIVER=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_mbox.c
+SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_bsvf.c
+SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_svf.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_nicvf_rxtx.o += -fno-prefetch-loop-arrays
+endif
+CFLAGS_nicvf_rxtx.o += -Ofast
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/thunderx/base/meson.build b/src/spdk/dpdk/drivers/net/thunderx/base/meson.build
new file mode 100644
index 00000000..c9d5a8f4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/base/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+sources = [
+ 'nicvf_hw.c',
+ 'nicvf_mbox.c',
+ 'nicvf_bsvf.c'
+]
+
+base_lib = static_library('nicvf_base', sources,
+ c_args: cflags,
+ dependencies: static_rte_ethdev
+)
+
+base_objs = base_lib.extract_all_objects()
diff --git a/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_bsvf.c b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_bsvf.c
new file mode 100644
index 00000000..df8c016a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_bsvf.c
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <err.h>
+
+#include "nicvf_bsvf.h"
+#include "nicvf_plat.h"
+
+static STAILQ_HEAD(, svf_entry) head = STAILQ_HEAD_INITIALIZER(head);
+
+void
+nicvf_bsvf_push(struct svf_entry *entry)
+{
+ assert(entry != NULL);
+ assert(entry->vf != NULL);
+
+ STAILQ_INSERT_TAIL(&head, entry, next);
+}
+
+struct svf_entry *
+nicvf_bsvf_pop(void)
+{
+ struct svf_entry *entry;
+
+ assert(!STAILQ_EMPTY(&head));
+
+ entry = STAILQ_FIRST(&head);
+
+ assert(entry != NULL);
+ assert(entry->vf != NULL);
+
+ STAILQ_REMOVE_HEAD(&head, next);
+
+ return entry;
+}
+
+int
+nicvf_bsvf_empty(void)
+{
+ return STAILQ_EMPTY(&head);
+}
diff --git a/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_bsvf.h b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_bsvf.h
new file mode 100644
index 00000000..4c7615ca
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_bsvf.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef __THUNDERX_NICVF_BSVF_H__
+#define __THUNDERX_NICVF_BSVF_H__
+
+#include <sys/queue.h>
+
+struct nicvf;
+
+/**
+ * The base queue structure to hold secondary qsets.
+ */
+struct svf_entry {
+ STAILQ_ENTRY(svf_entry) next; /**< Next element's pointer */
+ struct nicvf *vf; /**< Holder of a secondary qset */
+};
+
+/**
+ * Enqueue new entry to secondary qsets.
+ *
+ * @param entry
+ * Entry to be enqueued.
+ */
+void
+nicvf_bsvf_push(struct svf_entry *entry);
+
+/**
+ * Dequeue an entry from secondary qsets.
+ *
+ * @return
+ * Dequeued entry.
+ */
+struct svf_entry *
+nicvf_bsvf_pop(void);
+
+/**
+ * Check if the queue of secondary qsets is empty.
+ *
+ * @return
+ * 0 on non-empty
+ * otherwise empty
+ */
+int
+nicvf_bsvf_empty(void);
+
+#endif /* __THUNDERX_NICVF_BSVF_H__ */
diff --git a/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw.c b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw.c
new file mode 100644
index 00000000..5b1abe20
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw.c
@@ -0,0 +1,918 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#include <unistd.h>
+#include <math.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "nicvf_plat.h"
+
+struct nicvf_reg_info {
+ uint32_t offset;
+ const char *name;
+};
+
+#define NICVF_REG_POLL_ITER_NR (10)
+#define NICVF_REG_POLL_DELAY_US (2000)
+#define NICVF_REG_INFO(reg) {reg, #reg}
+
+static const struct nicvf_reg_info nicvf_reg_tbl[] = {
+ NICVF_REG_INFO(NIC_VF_CFG),
+ NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
+ NICVF_REG_INFO(NIC_VF_INT),
+ NICVF_REG_INFO(NIC_VF_INT_W1S),
+ NICVF_REG_INFO(NIC_VF_ENA_W1C),
+ NICVF_REG_INFO(NIC_VF_ENA_W1S),
+ NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
+ NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
+};
+
+static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
+ {NIC_VNIC_RSS_KEY_0_4 + 0, "NIC_VNIC_RSS_KEY_0"},
+ {NIC_VNIC_RSS_KEY_0_4 + 8, "NIC_VNIC_RSS_KEY_1"},
+ {NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
+ {NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
+ {NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
+ {NIC_VNIC_TX_STAT_0_4 + 0, "NIC_VNIC_STAT_TX_OCTS"},
+ {NIC_VNIC_TX_STAT_0_4 + 8, "NIC_VNIC_STAT_TX_UCAST"},
+ {NIC_VNIC_TX_STAT_0_4 + 16, "NIC_VNIC_STAT_TX_BCAST"},
+ {NIC_VNIC_TX_STAT_0_4 + 24, "NIC_VNIC_STAT_TX_MCAST"},
+ {NIC_VNIC_TX_STAT_0_4 + 32, "NIC_VNIC_STAT_TX_DROP"},
+ {NIC_VNIC_RX_STAT_0_13 + 0, "NIC_VNIC_STAT_RX_OCTS"},
+ {NIC_VNIC_RX_STAT_0_13 + 8, "NIC_VNIC_STAT_RX_UCAST"},
+ {NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
+ {NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
+ {NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
+ {NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
+ {NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
+ {NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
+ {NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
+ {NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
+ {NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
+ {NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
+ {NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
+ {NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
+};
+
+static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
+};
+
+static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
+ NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
+ NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
+ NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
+};
+
+static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
+};
+
+static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
+};
+
+int
+nicvf_base_init(struct nicvf *nic)
+{
+ nic->hwcap = 0;
+ if (nic->subsystem_device_id == 0)
+ return NICVF_ERR_BASE_INIT;
+
+ if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
+ nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
+
+ if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
+ nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
+
+ if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN83XX_NICVF)
+ nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2 |
+ NICVF_CAP_DISABLE_APAD;
+
+ return NICVF_OK;
+}
+
+/* dump on stdout if data is NULL */
+int
+nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
+{
+ uint32_t i, q;
+ bool dump_stdout;
+
+ dump_stdout = data ? 0 : 1;
+
+ for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
+ if (dump_stdout)
+ nicvf_log("%24s = 0x%" PRIx64 "\n",
+ nicvf_reg_tbl[i].name,
+ nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
+ else
+ *data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
+
+ for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
+ if (dump_stdout)
+ nicvf_log("%24s = 0x%" PRIx64 "\n",
+ nicvf_multi_reg_tbl[i].name,
+ nicvf_reg_read(nic,
+ nicvf_multi_reg_tbl[i].offset));
+ else
+ *data++ = nicvf_reg_read(nic,
+ nicvf_multi_reg_tbl[i].offset);
+
+ for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
+ for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
+ if (dump_stdout)
+ nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
+ nicvf_qset_cq_reg_tbl[i].name, q,
+ nicvf_queue_reg_read(nic,
+ nicvf_qset_cq_reg_tbl[i].offset, q));
+ else
+ *data++ = nicvf_queue_reg_read(nic,
+ nicvf_qset_cq_reg_tbl[i].offset, q);
+
+ for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
+ for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
+ if (dump_stdout)
+ nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
+ nicvf_qset_rq_reg_tbl[i].name, q,
+ nicvf_queue_reg_read(nic,
+ nicvf_qset_rq_reg_tbl[i].offset, q));
+ else
+ *data++ = nicvf_queue_reg_read(nic,
+ nicvf_qset_rq_reg_tbl[i].offset, q);
+
+ for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
+ for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
+ if (dump_stdout)
+ nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
+ nicvf_qset_sq_reg_tbl[i].name, q,
+ nicvf_queue_reg_read(nic,
+ nicvf_qset_sq_reg_tbl[i].offset, q));
+ else
+ *data++ = nicvf_queue_reg_read(nic,
+ nicvf_qset_sq_reg_tbl[i].offset, q);
+
+ for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
+ for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
+ if (dump_stdout)
+ nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
+ nicvf_qset_rbdr_reg_tbl[i].name, q,
+ nicvf_queue_reg_read(nic,
+ nicvf_qset_rbdr_reg_tbl[i].offset, q));
+ else
+ *data++ = nicvf_queue_reg_read(nic,
+ nicvf_qset_rbdr_reg_tbl[i].offset, q);
+ return 0;
+}
+
+int
+nicvf_reg_get_count(void)
+{
+ int nr_regs;
+
+ nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
+ nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
+ nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
+ MAX_CMP_QUEUES_PER_QS;
+ nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
+ MAX_RCV_QUEUES_PER_QS;
+ nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
+ MAX_SND_QUEUES_PER_QS;
+ nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
+ MAX_RCV_BUF_DESC_RINGS_PER_QS;
+
+ return nr_regs;
+}
+
+static int
+nicvf_qset_config_internal(struct nicvf *nic, bool enable)
+{
+ int ret;
+ struct pf_qs_cfg pf_qs_cfg = {.value = 0};
+
+ pf_qs_cfg.ena = enable ? 1 : 0;
+ pf_qs_cfg.vnic = nic->vf_id;
+ ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
+ return ret ? NICVF_ERR_SET_QS : 0;
+}
+
+/* Requests PF to assign and enable Qset */
+int
+nicvf_qset_config(struct nicvf *nic)
+{
+ /* Enable Qset */
+ return nicvf_qset_config_internal(nic, true);
+}
+
+int
+nicvf_qset_reclaim(struct nicvf *nic)
+{
+ /* Disable Qset */
+ return nicvf_qset_config_internal(nic, false);
+}
+
+static int
+cmpfunc(const void *a, const void *b)
+{
+ return (*(const uint32_t *)a - *(const uint32_t *)b);
+}
+
+static uint32_t
+nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
+{
+ uint32_t i;
+
+ qsort(list, entries, sizeof(uint32_t), cmpfunc);
+ for (i = 0; i < entries; i++)
+ if (val <= list[i])
+ break;
+ /* Not in the list */
+ if (i >= entries)
+ return 0;
+ else
+ return list[i];
+}
+
+static void
+nicvf_handle_qset_err_intr(struct nicvf *nic)
+{
+ uint16_t qidx;
+ uint64_t status;
+
+ nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
+ nicvf_reg_dump(nic, NULL);
+
+ for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
+ status = nicvf_queue_reg_read(
+ nic, NIC_QSET_CQ_0_7_STATUS, qidx);
+ if (!(status & NICVF_CQ_ERR_MASK))
+ continue;
+
+ if (status & NICVF_CQ_WR_FULL)
+ nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
+ if (status & NICVF_CQ_WR_DISABLE)
+ nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
+ if (status & NICVF_CQ_WR_FAULT)
+ nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
+ }
+
+ for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+ status = nicvf_queue_reg_read(
+ nic, NIC_QSET_SQ_0_7_STATUS, qidx);
+ if (!(status & NICVF_SQ_ERR_MASK))
+ continue;
+
+ if (status & NICVF_SQ_ERR_STOPPED)
+ nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
+ if (status & NICVF_SQ_ERR_SEND)
+ nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
+ if (status & NICVF_SQ_ERR_DPE)
+ nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
+ }
+
+ for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
+ status = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_STATUS0, qidx);
+ status &= NICVF_RBDR_FIFO_STATE_MASK;
+ status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
+
+ if (status == RBDR_FIFO_STATE_FAIL)
+ nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
+ }
+
+ nicvf_disable_all_interrupts(nic);
+ abort();
+}
+
+/*
+ * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
+ * This function is not re-entrant.
+ * The caller should provide proper serialization.
+ */
+int
+nicvf_reg_poll_interrupts(struct nicvf *nic)
+{
+ int msg = 0;
+ uint64_t intr;
+
+ intr = nicvf_reg_read(nic, NIC_VF_INT);
+ if (intr & NICVF_INTR_MBOX_MASK) {
+ nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
+ msg = nicvf_handle_mbx_intr(nic);
+ }
+ if (intr & NICVF_INTR_QS_ERR_MASK) {
+ nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
+ nicvf_handle_qset_err_intr(nic);
+ }
+ return msg;
+}
+
+static int
+nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
+ uint32_t bit_pos, uint32_t bits, uint64_t val)
+{
+ uint64_t bit_mask;
+ uint64_t reg_val;
+ int timeout = NICVF_REG_POLL_ITER_NR;
+
+ bit_mask = (1ULL << bits) - 1;
+ bit_mask = (bit_mask << bit_pos);
+
+ while (timeout) {
+ reg_val = nicvf_queue_reg_read(nic, offset, qidx);
+ if (((reg_val & bit_mask) >> bit_pos) == val)
+ return NICVF_OK;
+ nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
+ timeout--;
+ }
+ return NICVF_ERR_REG_POLL;
+}
+
+int
+nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+ uint64_t status;
+ int timeout = NICVF_REG_POLL_ITER_NR;
+ struct nicvf_rbdr *rbdr = nic->rbdr;
+
+ /* Save head and tail pointers for freeing up buffers */
+ if (rbdr) {
+ rbdr->head = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
+ rbdr->tail = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
+ rbdr->next_tail = rbdr->tail;
+ }
+
+ /* Reset RBDR */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
+ NICVF_RBDR_RESET);
+
+ /* Disable RBDR */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+ if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
+ 62, 2, 0x00))
+ return NICVF_ERR_RBDR_DISABLE;
+
+ while (1) {
+ status = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_PRFCH_STATUS, qidx);
+ if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
+ break;
+ nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
+ timeout--;
+ if (!timeout)
+ return NICVF_ERR_RBDR_PREFETCH;
+ }
+
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
+ NICVF_RBDR_RESET);
+ if (nicvf_qset_poll_reg(nic, qidx,
+ NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+ return NICVF_ERR_RBDR_RESET1;
+
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+ if (nicvf_qset_poll_reg(nic, qidx,
+ NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return NICVF_ERR_RBDR_RESET2;
+
+ return NICVF_OK;
+}
+
+static int
+nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
+{
+ int val;
+
+ val = nicvf_log2_u32(len) - len_shift;
+
+ assert(val >= NICVF_QSIZE_MIN_VAL);
+ assert(val <= NICVF_QSIZE_MAX_VAL);
+ return val;
+}
+
+int
+nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
+{
+ int ret;
+ uint64_t head, tail;
+ struct nicvf_rbdr *rbdr = nic->rbdr;
+ struct rbdr_cfg rbdr_cfg = {.value = 0};
+
+ ret = nicvf_qset_rbdr_reclaim(nic, qidx);
+ if (ret)
+ return ret;
+
+ /* Set descriptor base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
+
+ /* Enable RBDR & set queue size */
+ rbdr_cfg.ena = 1;
+ rbdr_cfg.reset = 0;
+ rbdr_cfg.ldwb = 0;
+ rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
+ RBDR_SIZE_SHIFT);
+ rbdr_cfg.avg_con = 0;
+ rbdr_cfg.lines = rbdr->buffsz / 128;
+
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
+
+ /* Verify proper RBDR reset */
+ head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
+
+ if (head | tail)
+ return NICVF_ERR_RBDR_RESET;
+
+ return NICVF_OK;
+}
+
+uint32_t
+nicvf_qsize_rbdr_roundup(uint32_t val)
+{
+ uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
+ RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
+ RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
+ RBDR_QUEUE_SZ_512K};
+ return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+}
+
+int
+nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
+ uint16_t ridx, rbdr_pool_get_handler handler,
+ uint32_t max_buffs)
+{
+ struct rbdr_entry_t *desc, *desc0;
+ struct nicvf_rbdr *rbdr = nic->rbdr;
+ uint32_t count;
+ nicvf_iova_addr_t phy;
+
+ assert(rbdr != NULL);
+ desc = rbdr->desc;
+ count = 0;
+ /* Don't fill beyond max numbers of desc */
+ while (count < rbdr->qlen_mask) {
+ if (count >= max_buffs)
+ break;
+ desc0 = desc + count;
+ phy = handler(dev, nic);
+ if (phy) {
+ desc0->full_addr = phy;
+ count++;
+ } else {
+ break;
+ }
+ }
+ nicvf_smp_wmb();
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
+ rbdr->tail = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
+ rbdr->next_tail = rbdr->tail;
+ nicvf_smp_rmb();
+ return 0;
+}
+
+int
+nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
+{
+ return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+}
+
+int
+nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+ uint64_t head, tail;
+ struct sq_cfg sq_cfg;
+
+ sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+
+ /* Disable send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+
+ /* Check if SQ is stopped */
+ if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
+ NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
+ return NICVF_ERR_SQ_DISABLE;
+
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+ head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
+ if (head | tail)
+ return NICVF_ERR_SQ_RESET;
+
+ return 0;
+}
+
+int
+nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
+{
+ int ret;
+ struct sq_cfg sq_cfg = {.value = 0};
+
+ ret = nicvf_qset_sq_reclaim(nic, qidx);
+ if (ret)
+ return ret;
+
+ /* Send a mailbox msg to PF to config SQ */
+ if (nicvf_mbox_sq_config(nic, qidx))
+ return NICVF_ERR_SQ_PF_CFG;
+
+ /* Set queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
+
+ /* Enable send queue & set queue size */
+ sq_cfg.cq_limit = 0;
+ sq_cfg.ena = 1;
+ sq_cfg.reset = 0;
+ sq_cfg.ldwb = 0;
+ sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
+ sq_cfg.tstmp_bgx_intf = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
+
+ /* Ring doorbell so that H/W restarts processing SQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+
+ return 0;
+}
+
+uint32_t
+nicvf_qsize_sq_roundup(uint32_t val)
+{
+ uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
+ SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
+ SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
+ SND_QUEUE_SZ_64K};
+ return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+}
+
+int
+nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+ /* Disable receive queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+ return nicvf_mbox_rq_sync(nic);
+}
+
+int
+nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
+{
+ struct pf_rq_cfg pf_rq_cfg = {.value = 0};
+ struct rq_cfg rq_cfg = {.value = 0};
+
+ if (nicvf_qset_rq_reclaim(nic, qidx))
+ return NICVF_ERR_RQ_CLAIM;
+
+ pf_rq_cfg.strip_pre_l2 = 0;
+ /* First cache line of RBDR data will be allocated into L2C */
+ pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
+ pf_rq_cfg.cq_qs = nic->vf_id;
+ pf_rq_cfg.cq_idx = qidx;
+ pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
+ pf_rq_cfg.rbdr_cont_idx = 0;
+ pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
+ pf_rq_cfg.rbdr_strt_idx = 0;
+
+ /* Send a mailbox msg to PF to config RQ */
+ if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
+ return NICVF_ERR_RQ_PF_CFG;
+
+ /* Select Rx backpressure */
+ if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
+ return NICVF_ERR_RQ_BP_CFG;
+
+ /* Send a mailbox msg to PF to config RQ drop */
+ if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
+ return NICVF_ERR_RQ_DROP_CFG;
+
+ /* Enable Receive queue */
+ rq_cfg.ena = 1;
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
+
+ return 0;
+}
+
+int
+nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+ uint64_t tail, head;
+
+ /* Disable completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+ if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
+ return NICVF_ERR_CQ_DISABLE;
+
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
+ head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
+ if (head | tail)
+ return NICVF_ERR_CQ_RESET;
+
+ /* Disable timer threshold (doesn't get reset upon CQ reset) */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+ return 0;
+}
+
+int
+nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
+{
+ int ret;
+ struct cq_cfg cq_cfg = {.value = 0};
+
+ ret = nicvf_qset_cq_reclaim(nic, qidx);
+ if (ret)
+ return ret;
+
+ /* Set completion queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
+
+ cq_cfg.ena = 1;
+ cq_cfg.reset = 0;
+ /* Writes of CQE will be allocated into L2C */
+ cq_cfg.caching = 1;
+ cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
+ cq_cfg.avg_con = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+ return 0;
+}
+
+uint32_t
+nicvf_qsize_cq_roundup(uint32_t val)
+{
+ uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
+ CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
+ CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
+ CMP_QUEUE_SZ_64K};
+ return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+}
+
+
+void
+nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
+{
+ uint64_t val;
+
+ val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
+ if (enable)
+ val |= (STRIP_FIRST_VLAN << 25);
+ else
+ val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
+
+ nic->vlan_strip = enable;
+ nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
+}
+
+void
+nicvf_first_skip_config(struct nicvf *nic, uint8_t num_dwords)
+{
+ uint64_t val;
+
+ val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
+ val &= ~(0xfULL);
+ val |= (num_dwords & 0xf);
+
+ nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
+}
+
+void
+nicvf_apad_config(struct nicvf *nic, bool enable)
+{
+ uint64_t val;
+
+ /* APAD always enabled in this device */
+ if (!(nic->hwcap & NICVF_CAP_DISABLE_APAD))
+ return;
+
+ val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
+ if (enable)
+ val &= ~(1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
+ else
+ val |= (1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
+
+ nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
+}
+
+void
+nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
+{
+ int idx;
+ uint64_t addr, val;
+ uint64_t *keyptr = (uint64_t *)key;
+
+ addr = NIC_VNIC_RSS_KEY_0_4;
+ for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+ val = nicvf_cpu_to_be_64(*keyptr);
+ nicvf_reg_write(nic, addr, val);
+ addr += sizeof(uint64_t);
+ keyptr++;
+ }
+}
+
+void
+nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
+{
+ int idx;
+ uint64_t addr, val;
+ uint64_t *keyptr = (uint64_t *)key;
+
+ addr = NIC_VNIC_RSS_KEY_0_4;
+ for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+ val = nicvf_reg_read(nic, addr);
+ *keyptr = nicvf_be_to_cpu_64(val);
+ addr += sizeof(uint64_t);
+ keyptr++;
+ }
+}
+
+void
+nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
+{
+ nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
+}
+
+uint64_t
+nicvf_rss_get_cfg(struct nicvf *nic)
+{
+ return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+}
+
+int
+nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
+{
+ uint32_t idx;
+ struct nicvf_rss_reta_info *rss = &nic->rss_info;
+
+ /* result will be stored in nic->rss_info.rss_size */
+ if (nicvf_mbox_get_rss_size(nic))
+ return NICVF_ERR_RSS_GET_SZ;
+
+ assert(rss->rss_size > 0);
+ rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
+ for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
+ rss->ind_tbl[idx] = tbl[idx];
+
+ if (nicvf_mbox_config_rss(nic))
+ return NICVF_ERR_RSS_TBL_UPDATE;
+
+ return NICVF_OK;
+}
+
+int
+nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
+{
+ uint32_t idx;
+ struct nicvf_rss_reta_info *rss = &nic->rss_info;
+
+ /* result will be stored in nic->rss_info.rss_size */
+ if (nicvf_mbox_get_rss_size(nic))
+ return NICVF_ERR_RSS_GET_SZ;
+
+ assert(rss->rss_size > 0);
+ rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
+
+ for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
+ tbl[idx] = rss->ind_tbl[idx];
+
+ return NICVF_OK;
+}
+
+int
+nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg)
+{
+ uint32_t idx;
+ uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
+ uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
+ };
+
+ if (nic->cpi_alg != CPI_ALG_NONE)
+ return -EINVAL;
+
+ if (cfg == 0)
+ return -EINVAL;
+
+ /* Update default RSS key and cfg */
+ nicvf_rss_set_key(nic, default_key);
+ nicvf_rss_set_cfg(nic, cfg);
+
+ /* Update default RSS RETA */
+ for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
+ default_reta[idx] = idx % qcnt;
+
+ return nicvf_rss_reta_update(nic, default_reta,
+ NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
+int
+nicvf_rss_term(struct nicvf *nic)
+{
+ uint32_t idx;
+ uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
+
+ nicvf_rss_set_cfg(nic, 0);
+ /* Redirect the output to 0th queue */
+ for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
+ disable_rss[idx] = 0;
+
+ return nicvf_rss_reta_update(nic, disable_rss,
+ NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
+int
+nicvf_loopback_config(struct nicvf *nic, bool enable)
+{
+ if (enable && nic->loopback_supported == 0)
+ return NICVF_ERR_LOOPBACK_CFG;
+
+ return nicvf_mbox_loopback_config(nic, enable);
+}
+
+void
+nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
+{
+ stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
+ stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
+ stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
+ stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
+ stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
+ stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
+ stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
+ stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
+ stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
+ stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
+ stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
+ stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
+ stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
+ stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
+
+ stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
+ stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
+ stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
+ stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
+ stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
+}
+
+void
+nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
+ uint16_t qidx)
+{
+ qstats->q_rx_bytes =
+ nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
+ qstats->q_rx_packets =
+ nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
+}
+
+void
+nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
+ uint16_t qidx)
+{
+ qstats->q_tx_bytes =
+ nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
+ qstats->q_tx_packets =
+ nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);
+}
diff --git a/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw.h b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw.h
new file mode 100644
index 00000000..fd13ea84
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef _THUNDERX_NICVF_HW_H
+#define _THUNDERX_NICVF_HW_H
+
+#include <stdint.h>
+
+#include "nicvf_hw_defs.h"
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF 0x0011
+#define PCI_DEVICE_ID_THUNDERX_NICVF 0xA034
+#define PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF 0xA11E
+#define PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF 0xA134
+#define PCI_SUB_DEVICE_ID_CN81XX_NICVF 0xA234
+#define PCI_SUB_DEVICE_ID_CN83XX_NICVF 0xA334
+
+#define NICVF_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#define NICVF_GET_RX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
+#define NICVF_GET_TX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
+
+#define NICVF_CAP_TUNNEL_PARSING (1ULL << 0)
+/* Additional word in Rx descriptor to hold optional tunneling extension info */
+#define NICVF_CAP_CQE_RX2 (1ULL << 1)
+/* The device capable of setting NIC_CQE_RX_S[APAD] == 0 */
+#define NICVF_CAP_DISABLE_APAD (1ULL << 2)
+
+enum nicvf_tns_mode {
+ NIC_TNS_BYPASS_MODE,
+ NIC_TNS_MODE,
+};
+
+enum nicvf_err_e {
+ NICVF_OK,
+ NICVF_ERR_SET_QS = -8191,/* -8191 */
+ NICVF_ERR_RESET_QS, /* -8190 */
+ NICVF_ERR_REG_POLL, /* -8189 */
+ NICVF_ERR_RBDR_RESET, /* -8188 */
+ NICVF_ERR_RBDR_DISABLE, /* -8187 */
+ NICVF_ERR_RBDR_PREFETCH, /* -8186 */
+ NICVF_ERR_RBDR_RESET1, /* -8185 */
+ NICVF_ERR_RBDR_RESET2, /* -8184 */
+ NICVF_ERR_RQ_CLAIM, /* -8183 */
+ NICVF_ERR_RQ_PF_CFG, /* -8182 */
+ NICVF_ERR_RQ_BP_CFG, /* -8181 */
+ NICVF_ERR_RQ_DROP_CFG, /* -8180 */
+ NICVF_ERR_CQ_DISABLE, /* -8179 */
+ NICVF_ERR_CQ_RESET, /* -8178 */
+ NICVF_ERR_SQ_DISABLE, /* -8177 */
+ NICVF_ERR_SQ_RESET, /* -8176 */
+ NICVF_ERR_SQ_PF_CFG, /* -8175 */
+ NICVF_ERR_LOOPBACK_CFG, /* -8174 */
+ NICVF_ERR_BASE_INIT, /* -8173 */
+ NICVF_ERR_RSS_TBL_UPDATE,/* -8172 */
+ NICVF_ERR_RSS_GET_SZ, /* -8171 */
+};
+
+typedef nicvf_iova_addr_t (*rbdr_pool_get_handler)(void *dev, void *opaque);
+
+struct nicvf_hw_rx_qstats {
+ uint64_t q_rx_bytes;
+ uint64_t q_rx_packets;
+};
+
+struct nicvf_hw_tx_qstats {
+ uint64_t q_tx_bytes;
+ uint64_t q_tx_packets;
+};
+
+struct nicvf_hw_stats {
+ uint64_t rx_bytes;
+ uint64_t rx_ucast_frames;
+ uint64_t rx_bcast_frames;
+ uint64_t rx_mcast_frames;
+ uint64_t rx_fcs_errors;
+ uint64_t rx_l2_errors;
+ uint64_t rx_drop_red;
+ uint64_t rx_drop_red_bytes;
+ uint64_t rx_drop_overrun;
+ uint64_t rx_drop_overrun_bytes;
+ uint64_t rx_drop_bcast;
+ uint64_t rx_drop_mcast;
+ uint64_t rx_drop_l3_bcast;
+ uint64_t rx_drop_l3_mcast;
+
+ uint64_t tx_bytes_ok;
+ uint64_t tx_ucast_frames_ok;
+ uint64_t tx_bcast_frames_ok;
+ uint64_t tx_mcast_frames_ok;
+ uint64_t tx_drops;
+};
+
+struct nicvf_rss_reta_info {
+ uint8_t hash_bits;
+ uint16_t rss_size;
+ uint8_t ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+};
+
+/* Common structs used in DPDK and base layer are defined in DPDK layer */
+#include "../nicvf_struct.h"
+
+NICVF_STATIC_ASSERT(sizeof(struct nicvf_rbdr) <= 128);
+NICVF_STATIC_ASSERT(sizeof(struct nicvf_txq) <= 128);
+NICVF_STATIC_ASSERT(sizeof(struct nicvf_rxq) <= 128);
+
+static inline void
+nicvf_reg_write(struct nicvf *nic, uint32_t offset, uint64_t val)
+{
+ nicvf_addr_write(nic->reg_base + offset, val);
+}
+
+static inline uint64_t
+nicvf_reg_read(struct nicvf *nic, uint32_t offset)
+{
+ return nicvf_addr_read(nic->reg_base + offset);
+}
+
+static inline uintptr_t
+nicvf_qset_base(struct nicvf *nic, uint32_t qidx)
+{
+ return nic->reg_base + (qidx << NIC_Q_NUM_SHIFT);
+}
+
+static inline void
+nicvf_queue_reg_write(struct nicvf *nic, uint32_t offset, uint32_t qidx,
+ uint64_t val)
+{
+ nicvf_addr_write(nicvf_qset_base(nic, qidx) + offset, val);
+}
+
+static inline uint64_t
+nicvf_queue_reg_read(struct nicvf *nic, uint32_t offset, uint32_t qidx)
+{
+ return nicvf_addr_read(nicvf_qset_base(nic, qidx) + offset);
+}
+
+static inline void
+nicvf_disable_all_interrupts(struct nicvf *nic)
+{
+ nicvf_reg_write(nic, NIC_VF_ENA_W1C, NICVF_INTR_ALL_MASK);
+ nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_ALL_MASK);
+}
+
+static inline uint32_t
+nicvf_hw_version(struct nicvf *nic)
+{
+ return nic->subsystem_device_id;
+}
+
+static inline uint64_t
+nicvf_hw_cap(struct nicvf *nic)
+{
+ return nic->hwcap;
+}
+
+int nicvf_base_init(struct nicvf *nic);
+
+int nicvf_reg_get_count(void);
+int nicvf_reg_poll_interrupts(struct nicvf *nic);
+int nicvf_reg_dump(struct nicvf *nic, uint64_t *data);
+
+int nicvf_qset_config(struct nicvf *nic);
+int nicvf_qset_reclaim(struct nicvf *nic);
+
+int nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx);
+int nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx);
+int nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
+ uint16_t ridx, rbdr_pool_get_handler handler,
+ uint32_t max_buffs);
+int nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx);
+
+int nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx,
+ struct nicvf_rxq *rxq);
+int nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx);
+
+int nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx,
+ struct nicvf_rxq *rxq);
+int nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx);
+
+int nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx,
+ struct nicvf_txq *txq);
+int nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx);
+
+uint32_t nicvf_qsize_rbdr_roundup(uint32_t val);
+uint32_t nicvf_qsize_cq_roundup(uint32_t val);
+uint32_t nicvf_qsize_sq_roundup(uint32_t val);
+
+void nicvf_vlan_hw_strip(struct nicvf *nic, bool enable);
+
+void nicvf_apad_config(struct nicvf *nic, bool enable);
+void nicvf_first_skip_config(struct nicvf *nic, uint8_t dwords);
+
+int nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg);
+int nicvf_rss_term(struct nicvf *nic);
+
+int nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count);
+int nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count);
+
+void nicvf_rss_set_key(struct nicvf *nic, uint8_t *key);
+void nicvf_rss_get_key(struct nicvf *nic, uint8_t *key);
+
+void nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val);
+uint64_t nicvf_rss_get_cfg(struct nicvf *nic);
+
+int nicvf_loopback_config(struct nicvf *nic, bool enable);
+
+void nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats);
+void nicvf_hw_get_rx_qstats(struct nicvf *nic,
+ struct nicvf_hw_rx_qstats *qstats, uint16_t qidx);
+void nicvf_hw_get_tx_qstats(struct nicvf *nic,
+ struct nicvf_hw_tx_qstats *qstats, uint16_t qidx);
+
+#endif /* _THUNDERX_NICVF_HW_H */
diff --git a/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h
new file mode 100644
index 00000000..b12c8ec5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -0,0 +1,1200 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef _THUNDERX_NICVF_HW_DEFS_H
+#define _THUNDERX_NICVF_HW_DEFS_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include "nicvf_plat.h"
+
+/* Virtual function register offsets */
+
+#define NIC_VF_CFG (0x000020)
+#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
+#define NIC_VF_INT (0x000200)
+#define NIC_VF_INT_W1S (0x000220)
+#define NIC_VF_ENA_W1C (0x000240)
+#define NIC_VF_ENA_W1S (0x000260)
+
+#define NIC_VNIC_RSS_CFG (0x0020E0)
+#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
+#define NIC_VNIC_TX_STAT_0_4 (0x004000)
+#define NIC_VNIC_RX_STAT_0_13 (0x004100)
+#define NIC_VNIC_RQ_GEN_CFG (0x010010)
+
+#define NIC_QSET_CQ_0_7_CFG (0x010400)
+#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
+#define NIC_QSET_CQ_0_7_THRESH (0x010410)
+#define NIC_QSET_CQ_0_7_BASE (0x010420)
+#define NIC_QSET_CQ_0_7_HEAD (0x010428)
+#define NIC_QSET_CQ_0_7_TAIL (0x010430)
+#define NIC_QSET_CQ_0_7_DOOR (0x010438)
+#define NIC_QSET_CQ_0_7_STATUS (0x010440)
+#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
+#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
+
+#define NIC_QSET_RQ_0_7_CFG (0x010600)
+#define NIC_QSET_RQ_0_7_STATUS0 (0x010700)
+#define NIC_QSET_RQ_0_7_STATUS1 (0x010708)
+
+#define NIC_QSET_SQ_0_7_CFG (0x010800)
+#define NIC_QSET_SQ_0_7_THRESH (0x010810)
+#define NIC_QSET_SQ_0_7_BASE (0x010820)
+#define NIC_QSET_SQ_0_7_HEAD (0x010828)
+#define NIC_QSET_SQ_0_7_TAIL (0x010830)
+#define NIC_QSET_SQ_0_7_DOOR (0x010838)
+#define NIC_QSET_SQ_0_7_STATUS (0x010840)
+#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
+#define NIC_QSET_SQ_0_7_STATUS0 (0x010900)
+#define NIC_QSET_SQ_0_7_STATUS1 (0x010908)
+
+#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
+#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
+#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
+#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
+#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
+#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
+#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
+#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
+#define NIC_QSET_RBDR_0_1_PRFCH_STATUS (0x010C50)
+
+/* vNIC HW Constants */
+
+#define NIC_Q_NUM_SHIFT 18
+
+#define MAX_QUEUE_SET 128
+#define MAX_RCV_QUEUES_PER_QS 8
+#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
+#define MAX_SND_QUEUES_PER_QS 8
+#define MAX_CMP_QUEUES_PER_QS 8
+
+#define NICVF_INTR_CQ_SHIFT 0
+#define NICVF_INTR_SQ_SHIFT 8
+#define NICVF_INTR_RBDR_SHIFT 16
+#define NICVF_INTR_PKT_DROP_SHIFT 20
+#define NICVF_INTR_TCP_TIMER_SHIFT 21
+#define NICVF_INTR_MBOX_SHIFT 22
+#define NICVF_INTR_QS_ERR_SHIFT 23
+
+#define NICVF_QS_RQ_DIS_APAD_SHIFT 22
+
+#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
+#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
+#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
+#define NICVF_INTR_PKT_DROP_MASK (1 << NICVF_INTR_PKT_DROP_SHIFT)
+#define NICVF_INTR_TCP_TIMER_MASK (1 << NICVF_INTR_TCP_TIMER_SHIFT)
+#define NICVF_INTR_MBOX_MASK (1 << NICVF_INTR_MBOX_SHIFT)
+#define NICVF_INTR_QS_ERR_MASK (1 << NICVF_INTR_QS_ERR_SHIFT)
+#define NICVF_INTR_ALL_MASK (0x7FFFFF)
+
+#define NICVF_CQ_WR_FULL (1ULL << 26)
+#define NICVF_CQ_WR_DISABLE (1ULL << 25)
+#define NICVF_CQ_WR_FAULT (1ULL << 24)
+#define NICVF_CQ_ERR_MASK (NICVF_CQ_WR_FULL |\
+ NICVF_CQ_WR_DISABLE |\
+ NICVF_CQ_WR_FAULT)
+#define NICVF_CQ_CQE_COUNT_MASK (0xFFFF)
+
+#define NICVF_SQ_ERR_STOPPED (1ULL << 21)
+#define NICVF_SQ_ERR_SEND (1ULL << 20)
+#define NICVF_SQ_ERR_DPE (1ULL << 19)
+#define NICVF_SQ_ERR_MASK (NICVF_SQ_ERR_STOPPED |\
+ NICVF_SQ_ERR_SEND |\
+ NICVF_SQ_ERR_DPE)
+#define NICVF_SQ_STATUS_STOPPED_BIT (21)
+
+#define NICVF_RBDR_FIFO_STATE_SHIFT (62)
+#define NICVF_RBDR_FIFO_STATE_MASK (3ULL << NICVF_RBDR_FIFO_STATE_SHIFT)
+#define NICVF_RBDR_COUNT_MASK (0x7FFFF)
+
+/* Queue reset */
+#define NICVF_CQ_RESET (1ULL << 41)
+#define NICVF_SQ_RESET (1ULL << 17)
+#define NICVF_RBDR_RESET (1ULL << 43)
+
+/* RSS constants */
+#define NIC_MAX_RSS_HASH_BITS (8)
+#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
+#define RSS_HASH_KEY_SIZE (5) /* 320 bit key */
+#define RSS_HASH_KEY_BYTE_SIZE (40) /* 320 bit key */
+
+#define RSS_L2_EXTENDED_HASH_ENA (1 << 0)
+#define RSS_IP_ENA (1 << 1)
+#define RSS_TCP_ENA (1 << 2)
+#define RSS_TCP_SYN_ENA (1 << 3)
+#define RSS_UDP_ENA (1 << 4)
+#define RSS_L4_EXTENDED_ENA (1 << 5)
+#define RSS_L3_BI_DIRECTION_ENA (1 << 7)
+#define RSS_L4_BI_DIRECTION_ENA (1 << 8)
+#define RSS_TUN_VXLAN_ENA (1 << 9)
+#define RSS_TUN_GENEVE_ENA (1 << 10)
+#define RSS_TUN_NVGRE_ENA (1 << 11)
+
+#define RBDR_QUEUE_SZ_8K (8 * 1024)
+#define RBDR_QUEUE_SZ_16K (16 * 1024)
+#define RBDR_QUEUE_SZ_32K (32 * 1024)
+#define RBDR_QUEUE_SZ_64K (64 * 1024)
+#define RBDR_QUEUE_SZ_128K (128 * 1024)
+#define RBDR_QUEUE_SZ_256K (256 * 1024)
+#define RBDR_QUEUE_SZ_512K (512 * 1024)
+#define RBDR_QUEUE_SZ_MAX RBDR_QUEUE_SZ_512K
+
+#define RBDR_SIZE_SHIFT (13) /* 8k */
+
+#define SND_QUEUE_SZ_1K (1 * 1024)
+#define SND_QUEUE_SZ_2K (2 * 1024)
+#define SND_QUEUE_SZ_4K (4 * 1024)
+#define SND_QUEUE_SZ_8K (8 * 1024)
+#define SND_QUEUE_SZ_16K (16 * 1024)
+#define SND_QUEUE_SZ_32K (32 * 1024)
+#define SND_QUEUE_SZ_64K (64 * 1024)
+#define SND_QUEUE_SZ_MAX SND_QUEUE_SZ_64K
+
+#define SND_QSIZE_SHIFT (10) /* 1k */
+
+#define CMP_QUEUE_SZ_1K (1 * 1024)
+#define CMP_QUEUE_SZ_2K (2 * 1024)
+#define CMP_QUEUE_SZ_4K (4 * 1024)
+#define CMP_QUEUE_SZ_8K (8 * 1024)
+#define CMP_QUEUE_SZ_16K (16 * 1024)
+#define CMP_QUEUE_SZ_32K (32 * 1024)
+#define CMP_QUEUE_SZ_64K (64 * 1024)
+#define CMP_QUEUE_SZ_MAX CMP_QUEUE_SZ_64K
+
+#define CMP_QSIZE_SHIFT (10) /* 1k */
+
+#define NICVF_QSIZE_MIN_VAL (0)
+#define NICVF_QSIZE_MAX_VAL (6)
+
+/* Min/Max packet size */
+#define NIC_HW_MIN_FRS (64)
+/* ETH_HLEN+ETH_FCS_LEN+2*VLAN_HLEN */
+#define NIC_HW_L2_OVERHEAD (26)
+#define NIC_HW_MAX_MTU (9190)
+#define NIC_HW_MAX_FRS (NIC_HW_MAX_MTU + NIC_HW_L2_OVERHEAD)
+#define NIC_HW_MAX_SEGS (12)
+
+/* Descriptor alignments */
+#define NICVF_RBDR_BASE_ALIGN_BYTES (128) /* 7 bits */
+#define NICVF_CQ_BASE_ALIGN_BYTES (512) /* 9 bits */
+#define NICVF_SQ_BASE_ALIGN_BYTES (128) /* 7 bits */
+
+#define NICVF_CQE_RBPTR_WORD (6)
+#define NICVF_CQE_RX2_RBPTR_WORD (7)
+
+#define NICVF_STATIC_ASSERT(s) _Static_assert(s, #s)
+#define assert_primary(nic) assert((nic)->sqs_mode == 0)
+
+typedef uint64_t nicvf_iova_addr_t;
+
+/* vNIC HW Enumerations */
+
+enum nic_send_ld_type_e {
+ NIC_SEND_LD_TYPE_E_LDD,
+ NIC_SEND_LD_TYPE_E_LDT,
+ NIC_SEND_LD_TYPE_E_LDWB,
+ NIC_SEND_LD_TYPE_E_ENUM_LAST,
+};
+
+enum ether_type_algorithm {
+ ETYPE_ALG_NONE,
+ ETYPE_ALG_SKIP,
+ ETYPE_ALG_ENDPARSE,
+ ETYPE_ALG_VLAN,
+ ETYPE_ALG_VLAN_STRIP,
+};
+
+enum layer3_type {
+ L3TYPE_NONE,
+ L3TYPE_GRH,
+ L3TYPE_IPV4 = 0x4,
+ L3TYPE_IPV4_OPTIONS = 0x5,
+ L3TYPE_IPV6 = 0x6,
+ L3TYPE_IPV6_OPTIONS = 0x7,
+ L3TYPE_ET_STOP = 0xD,
+ L3TYPE_OTHER = 0xE,
+};
+
+#define NICVF_L3TYPE_OPTIONS_MASK ((uint8_t)1)
+#define NICVF_L3TYPE_IPVX_MASK ((uint8_t)0x06)
+
+enum layer4_type {
+ L4TYPE_NONE,
+ L4TYPE_IPSEC_ESP,
+ L4TYPE_IPFRAG,
+ L4TYPE_IPCOMP,
+ L4TYPE_TCP,
+ L4TYPE_UDP,
+ L4TYPE_SCTP,
+ L4TYPE_GRE,
+ L4TYPE_ROCE_BTH,
+ L4TYPE_OTHER = 0xE,
+};
+
+/* CPI and RSSI configuration */
+enum cpi_algorithm_type {
+ CPI_ALG_NONE,
+ CPI_ALG_VLAN,
+ CPI_ALG_VLAN16,
+ CPI_ALG_DIFF,
+};
+
+enum rss_algorithm_type {
+ RSS_ALG_NONE,
+ RSS_ALG_PORT,
+ RSS_ALG_IP,
+ RSS_ALG_TCP_IP,
+ RSS_ALG_UDP_IP,
+ RSS_ALG_SCTP_IP,
+ RSS_ALG_GRE_IP,
+ RSS_ALG_ROCE,
+};
+
+enum rss_hash_cfg {
+ RSS_HASH_L2ETC,
+ RSS_HASH_IP,
+ RSS_HASH_TCP,
+ RSS_HASH_TCP_SYN_DIS,
+ RSS_HASH_UDP,
+ RSS_HASH_L4ETC,
+ RSS_HASH_ROCE,
+ RSS_L3_BIDI,
+ RSS_L4_BIDI,
+};
+
+/* Completion queue entry types */
+enum cqe_type {
+ CQE_TYPE_INVALID,
+ CQE_TYPE_RX = 0x2,
+ CQE_TYPE_RX_SPLIT = 0x3,
+ CQE_TYPE_RX_TCP = 0x4,
+ CQE_TYPE_SEND = 0x8,
+ CQE_TYPE_SEND_PTP = 0x9,
+};
+
+enum cqe_rx_tcp_status {
+ CQE_RX_STATUS_VALID_TCP_CNXT,
+ CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
+};
+
+enum cqe_send_status {
+ CQE_SEND_STATUS_GOOD,
+ CQE_SEND_STATUS_DESC_FAULT = 0x01,
+ CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
+ CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
+ CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
+ CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
+ CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
+ CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
+ CQE_SEND_STATUS_LOCK_VIOL = 0x84,
+ CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
+ CQE_SEND_STATUS_DATA_FAULT = 0x86,
+ CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
+ CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
+ CQE_SEND_STATUS_MEM_FAULT = 0x89,
+ CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
+ CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
+};
+
+enum cqe_rx_tcp_end_reason {
+ CQE_RX_TCP_END_FIN_FLAG_DET,
+ CQE_RX_TCP_END_INVALID_FLAG,
+ CQE_RX_TCP_END_TIMEOUT,
+ CQE_RX_TCP_END_OUT_OF_SEQ,
+ CQE_RX_TCP_END_PKT_ERR,
+ CQE_RX_TCP_END_QS_DISABLED = 0x0F,
+};
+
+/* Packet protocol level error enumeration */
+enum cqe_rx_err_level {
+ CQE_RX_ERRLVL_RE,
+ CQE_RX_ERRLVL_L2,
+ CQE_RX_ERRLVL_L3,
+ CQE_RX_ERRLVL_L4,
+};
+
+/* Packet protocol level error type enumeration */
+enum cqe_rx_err_opcode {
+ CQE_RX_ERR_RE_NONE,
+ CQE_RX_ERR_RE_PARTIAL,
+ CQE_RX_ERR_RE_JABBER,
+ CQE_RX_ERR_RE_FCS = 0x7,
+ CQE_RX_ERR_RE_TERMINATE = 0x9,
+ CQE_RX_ERR_RE_RX_CTL = 0xb,
+ CQE_RX_ERR_PREL2_ERR = 0x1f,
+ CQE_RX_ERR_L2_FRAGMENT = 0x20,
+ CQE_RX_ERR_L2_OVERRUN = 0x21,
+ CQE_RX_ERR_L2_PFCS = 0x22,
+ CQE_RX_ERR_L2_PUNY = 0x23,
+ CQE_RX_ERR_L2_MAL = 0x24,
+ CQE_RX_ERR_L2_OVERSIZE = 0x25,
+ CQE_RX_ERR_L2_UNDERSIZE = 0x26,
+ CQE_RX_ERR_L2_LENMISM = 0x27,
+ CQE_RX_ERR_L2_PCLP = 0x28,
+ CQE_RX_ERR_IP_NOT = 0x41,
+ CQE_RX_ERR_IP_CHK = 0x42,
+ CQE_RX_ERR_IP_MAL = 0x43,
+ CQE_RX_ERR_IP_MALD = 0x44,
+ CQE_RX_ERR_IP_HOP = 0x45,
+ CQE_RX_ERR_L3_ICRC = 0x46,
+ CQE_RX_ERR_L3_PCLP = 0x47,
+ CQE_RX_ERR_L4_MAL = 0x61,
+ CQE_RX_ERR_L4_CHK = 0x62,
+ CQE_RX_ERR_UDP_LEN = 0x63,
+ CQE_RX_ERR_L4_PORT = 0x64,
+ CQE_RX_ERR_TCP_FLAG = 0x65,
+ CQE_RX_ERR_TCP_OFFSET = 0x66,
+ CQE_RX_ERR_L4_PCLP = 0x67,
+ CQE_RX_ERR_RBDR_TRUNC = 0x70,
+};
+
+enum send_l4_csum_type {
+ SEND_L4_CSUM_DISABLE,
+ SEND_L4_CSUM_UDP,
+ SEND_L4_CSUM_TCP,
+};
+
+enum send_crc_alg {
+ SEND_CRCALG_CRC32,
+ SEND_CRCALG_CRC32C,
+ SEND_CRCALG_ICRC,
+};
+
+enum send_load_type {
+ SEND_LD_TYPE_LDD,
+ SEND_LD_TYPE_LDT,
+ SEND_LD_TYPE_LDWB,
+};
+
+enum send_mem_alg_type {
+ SEND_MEMALG_SET,
+ SEND_MEMALG_ADD = 0x08,
+ SEND_MEMALG_SUB = 0x09,
+ SEND_MEMALG_ADDLEN = 0x0A,
+ SEND_MEMALG_SUBLEN = 0x0B,
+};
+
+enum send_mem_dsz_type {
+ SEND_MEMDSZ_B64,
+ SEND_MEMDSZ_B32,
+ SEND_MEMDSZ_B8 = 0x03,
+};
+
+enum sq_subdesc_type {
+ SQ_DESC_TYPE_INVALID,
+ SQ_DESC_TYPE_HEADER,
+ SQ_DESC_TYPE_CRC,
+ SQ_DESC_TYPE_IMMEDIATE,
+ SQ_DESC_TYPE_GATHER,
+ SQ_DESC_TYPE_MEMORY,
+};
+
+enum l3_type_t {
+ L3_NONE,
+ L3_IPV4 = 0x04,
+ L3_IPV4_OPT = 0x05,
+ L3_IPV6 = 0x06,
+ L3_IPV6_OPT = 0x07,
+ L3_ET_STOP = 0x0D,
+ L3_OTHER = 0x0E
+};
+
+enum l4_type_t {
+ L4_NONE,
+ L4_IPSEC_ESP = 0x01,
+ L4_IPFRAG = 0x02,
+ L4_IPCOMP = 0x03,
+ L4_TCP = 0x04,
+ L4_UDP_PASS1 = 0x05,
+ L4_GRE = 0x07,
+ L4_UDP_PASS2 = 0x08,
+ L4_UDP_GENEVE = 0x09,
+ L4_UDP_VXLAN = 0x0A,
+ L4_NVGRE = 0x0C,
+ L4_OTHER = 0x0E
+};
+
+enum vlan_strip {
+ NO_STRIP,
+ STRIP_FIRST_VLAN,
+ STRIP_SECOND_VLAN,
+ STRIP_RESERV,
+};
+
+enum rbdr_state {
+ RBDR_FIFO_STATE_INACTIVE,
+ RBDR_FIFO_STATE_ACTIVE,
+ RBDR_FIFO_STATE_RESET,
+ RBDR_FIFO_STATE_FAIL,
+};
+
+enum rq_cache_allocation {
+ RQ_CACHE_ALLOC_OFF,
+ RQ_CACHE_ALLOC_ALL,
+ RQ_CACHE_ALLOC_FIRST,
+ RQ_CACHE_ALLOC_TWO,
+};
+
+enum cq_rx_errlvl_e {
+ CQ_ERRLVL_MAC,
+ CQ_ERRLVL_L2,
+ CQ_ERRLVL_L3,
+ CQ_ERRLVL_L4,
+};
+
+enum cq_rx_errop_e {
+ CQ_RX_ERROP_RE_NONE,
+ CQ_RX_ERROP_RE_PARTIAL = 0x1,
+ CQ_RX_ERROP_RE_JABBER = 0x2,
+ CQ_RX_ERROP_RE_FCS = 0x7,
+ CQ_RX_ERROP_RE_TERMINATE = 0x9,
+ CQ_RX_ERROP_RE_RX_CTL = 0xb,
+ CQ_RX_ERROP_PREL2_ERR = 0x1f,
+ CQ_RX_ERROP_L2_FRAGMENT = 0x20,
+ CQ_RX_ERROP_L2_OVERRUN = 0x21,
+ CQ_RX_ERROP_L2_PFCS = 0x22,
+ CQ_RX_ERROP_L2_PUNY = 0x23,
+ CQ_RX_ERROP_L2_MAL = 0x24,
+ CQ_RX_ERROP_L2_OVERSIZE = 0x25,
+ CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
+ CQ_RX_ERROP_L2_LENMISM = 0x27,
+ CQ_RX_ERROP_L2_PCLP = 0x28,
+ CQ_RX_ERROP_IP_NOT = 0x41,
+ CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
+ CQ_RX_ERROP_IP_MAL = 0x43,
+ CQ_RX_ERROP_IP_MALD = 0x44,
+ CQ_RX_ERROP_IP_HOP = 0x45,
+ CQ_RX_ERROP_L3_ICRC = 0x46,
+ CQ_RX_ERROP_L3_PCLP = 0x47,
+ CQ_RX_ERROP_L4_MAL = 0x61,
+ CQ_RX_ERROP_L4_CHK = 0x62,
+ CQ_RX_ERROP_UDP_LEN = 0x63,
+ CQ_RX_ERROP_L4_PORT = 0x64,
+ CQ_RX_ERROP_TCP_FLAG = 0x65,
+ CQ_RX_ERROP_TCP_OFFSET = 0x66,
+ CQ_RX_ERROP_L4_PCLP = 0x67,
+ CQ_RX_ERROP_RBDR_TRUNC = 0x70,
+};
+
+enum cq_tx_errop_e {
+ CQ_TX_ERROP_GOOD,
+ CQ_TX_ERROP_DESC_FAULT = 0x10,
+ CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
+ CQ_TX_ERROP_SUBDC_ERR = 0x12,
+ CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
+ CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
+ CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
+ CQ_TX_ERROP_LOCK_VIOL = 0x83,
+ CQ_TX_ERROP_DATA_FAULT = 0x84,
+ CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
+ CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
+ CQ_TX_ERROP_MEM_FAULT = 0x87,
+ CQ_TX_ERROP_CK_OVERLAP = 0x88,
+ CQ_TX_ERROP_CK_OFLOW = 0x89,
+ CQ_TX_ERROP_ENUM_LAST = 0x8a,
+};
+
+enum rq_sq_stats_reg_offset {
+ RQ_SQ_STATS_OCTS,
+ RQ_SQ_STATS_PKTS,
+};
+
+enum nic_stat_vnic_rx_e {
+ RX_OCTS,
+ RX_UCAST,
+ RX_BCAST,
+ RX_MCAST,
+ RX_RED,
+ RX_RED_OCTS,
+ RX_ORUN,
+ RX_ORUN_OCTS,
+ RX_FCS,
+ RX_L2ERR,
+ RX_DRP_BCAST,
+ RX_DRP_MCAST,
+ RX_DRP_L3BCAST,
+ RX_DRP_L3MCAST,
+};
+
+enum nic_stat_vnic_tx_e {
+ TX_OCTS,
+ TX_UCAST,
+ TX_BCAST,
+ TX_MCAST,
+ TX_DROP,
+};
+
+/* vNIC HW Register structures */
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t cqe_type:4;
+ uint64_t stdn_fault:1;
+ uint64_t rsvd0:1;
+ uint64_t rq_qs:7;
+ uint64_t rq_idx:3;
+ uint64_t rsvd1:12;
+ uint64_t rss_alg:4;
+ uint64_t rsvd2:4;
+ uint64_t rb_cnt:4;
+ uint64_t vlan_found:1;
+ uint64_t vlan_stripped:1;
+ uint64_t vlan2_found:1;
+ uint64_t vlan2_stripped:1;
+ uint64_t l4_type:4;
+ uint64_t l3_type:4;
+ uint64_t l2_present:1;
+ uint64_t err_level:3;
+ uint64_t err_opcode:8;
+#else
+ uint64_t err_opcode:8;
+ uint64_t err_level:3;
+ uint64_t l2_present:1;
+ uint64_t l3_type:4;
+ uint64_t l4_type:4;
+ uint64_t vlan2_stripped:1;
+ uint64_t vlan2_found:1;
+ uint64_t vlan_stripped:1;
+ uint64_t vlan_found:1;
+ uint64_t rb_cnt:4;
+ uint64_t rsvd2:4;
+ uint64_t rss_alg:4;
+ uint64_t rsvd1:12;
+ uint64_t rq_idx:3;
+ uint64_t rq_qs:7;
+ uint64_t rsvd0:1;
+ uint64_t stdn_fault:1;
+ uint64_t cqe_type:4;
+#endif
+ };
+} cqe_rx_word0_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t pkt_len:16;
+ uint64_t l2_ptr:8;
+ uint64_t l3_ptr:8;
+ uint64_t l4_ptr:8;
+ uint64_t cq_pkt_len:8;
+ uint64_t align_pad:3;
+ uint64_t rsvd3:1;
+ uint64_t chan:12;
+#else
+ uint64_t chan:12;
+ uint64_t rsvd3:1;
+ uint64_t align_pad:3;
+ uint64_t cq_pkt_len:8;
+ uint64_t l4_ptr:8;
+ uint64_t l3_ptr:8;
+ uint64_t l2_ptr:8;
+ uint64_t pkt_len:16;
+#endif
+ };
+} cqe_rx_word1_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t rss_tag:32;
+ uint64_t vlan_tci:16;
+ uint64_t vlan_ptr:8;
+ uint64_t vlan2_ptr:8;
+#else
+ uint64_t vlan2_ptr:8;
+ uint64_t vlan_ptr:8;
+ uint64_t vlan_tci:16;
+ uint64_t rss_tag:32;
+#endif
+ };
+} cqe_rx_word2_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint16_t rb3_sz;
+ uint16_t rb2_sz;
+ uint16_t rb1_sz;
+ uint16_t rb0_sz;
+#else
+ uint16_t rb0_sz;
+ uint16_t rb1_sz;
+ uint16_t rb2_sz;
+ uint16_t rb3_sz;
+#endif
+ };
+} cqe_rx_word3_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint16_t rb7_sz;
+ uint16_t rb6_sz;
+ uint16_t rb5_sz;
+ uint16_t rb4_sz;
+#else
+ uint16_t rb4_sz;
+ uint16_t rb5_sz;
+ uint16_t rb6_sz;
+ uint16_t rb7_sz;
+#endif
+ };
+} cqe_rx_word4_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint16_t rb11_sz;
+ uint16_t rb10_sz;
+ uint16_t rb9_sz;
+ uint16_t rb8_sz;
+#else
+ uint16_t rb8_sz;
+ uint16_t rb9_sz;
+ uint16_t rb10_sz;
+ uint16_t rb11_sz;
+#endif
+ };
+} cqe_rx_word5_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t vlan_found:1;
+ uint64_t vlan_stripped:1;
+ uint64_t vlan2_found:1;
+ uint64_t vlan2_stripped:1;
+ uint64_t rsvd2:3;
+ uint64_t inner_l2:1;
+ uint64_t inner_l4type:4;
+ uint64_t inner_l3type:4;
+ uint64_t vlan_ptr:8;
+ uint64_t vlan2_ptr:8;
+ uint64_t rsvd1:8;
+ uint64_t rsvd0:8;
+ uint64_t inner_l3ptr:8;
+ uint64_t inner_l4ptr:8;
+#else
+ uint64_t inner_l4ptr:8;
+ uint64_t inner_l3ptr:8;
+ uint64_t rsvd0:8;
+ uint64_t rsvd1:8;
+ uint64_t vlan2_ptr:8;
+ uint64_t vlan_ptr:8;
+ uint64_t inner_l3type:4;
+ uint64_t inner_l4type:4;
+ uint64_t inner_l2:1;
+ uint64_t rsvd2:3;
+ uint64_t vlan2_stripped:1;
+ uint64_t vlan2_found:1;
+ uint64_t vlan_stripped:1;
+ uint64_t vlan_found:1;
+#endif
+ };
+} cqe_rx2_word6_t;
+
+struct cqe_rx_t {
+ cqe_rx_word0_t word0;
+ cqe_rx_word1_t word1;
+ cqe_rx_word2_t word2;
+ cqe_rx_word3_t word3;
+ cqe_rx_word4_t word4;
+ cqe_rx_word5_t word5;
+ cqe_rx2_word6_t word6; /* if NIC_PF_RX_CFG[CQE_RX2_ENA] set */
+};
+
+struct cqe_rx_tcp_err_t {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t cqe_type:4; /* W0 */
+ uint64_t rsvd0:60;
+
+ uint64_t rsvd1:4; /* W1 */
+ uint64_t partial_first:1;
+ uint64_t rsvd2:27;
+ uint64_t rbdr_bytes:8;
+ uint64_t rsvd3:24;
+#else
+ uint64_t rsvd0:60;
+ uint64_t cqe_type:4;
+
+ uint64_t rsvd3:24;
+ uint64_t rbdr_bytes:8;
+ uint64_t rsvd2:27;
+ uint64_t partial_first:1;
+ uint64_t rsvd1:4;
+#endif
+};
+
+struct cqe_rx_tcp_t {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t cqe_type:4; /* W0 */
+ uint64_t rsvd0:52;
+ uint64_t cq_tcp_status:8;
+
+ uint64_t rsvd1:32; /* W1 */
+ uint64_t tcp_cntx_bytes:8;
+ uint64_t rsvd2:8;
+ uint64_t tcp_err_bytes:16;
+#else
+ uint64_t cq_tcp_status:8;
+ uint64_t rsvd0:52;
+ uint64_t cqe_type:4; /* W0 */
+
+ uint64_t tcp_err_bytes:16;
+ uint64_t rsvd2:8;
+ uint64_t tcp_cntx_bytes:8;
+ uint64_t rsvd1:32; /* W1 */
+#endif
+};
+
+struct cqe_send_t {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t cqe_type:4; /* W0 */
+ uint64_t rsvd0:4;
+ uint64_t sqe_ptr:16;
+ uint64_t rsvd1:4;
+ uint64_t rsvd2:10;
+ uint64_t sq_qs:7;
+ uint64_t sq_idx:3;
+ uint64_t rsvd3:8;
+ uint64_t send_status:8;
+
+ uint64_t ptp_timestamp:64; /* W1 */
+#elif NICVF_BYTE_ORDER == NICVF_LITTLE_ENDIAN
+ uint64_t send_status:8;
+ uint64_t rsvd3:8;
+ uint64_t sq_idx:3;
+ uint64_t sq_qs:7;
+ uint64_t rsvd2:10;
+ uint64_t rsvd1:4;
+ uint64_t sqe_ptr:16;
+ uint64_t rsvd0:4;
+ uint64_t cqe_type:4; /* W0 */
+
+ uint64_t ptp_timestamp:64;
+#endif
+};
+
+struct cq_entry_type_t {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t cqe_type:4;
+ uint64_t __pad:60;
+#else
+ uint64_t __pad:60;
+ uint64_t cqe_type:4;
+#endif
+};
+
+union cq_entry_t {
+ uint64_t u[64];
+ struct cq_entry_type_t type;
+ struct cqe_rx_t rx_hdr;
+ struct cqe_rx_tcp_t rx_tcp_hdr;
+ struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
+ struct cqe_send_t cqe_send;
+};
+
+NICVF_STATIC_ASSERT(sizeof(union cq_entry_t) == 512);
+
+struct rbdr_entry_t {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ union {
+ struct {
+ uint64_t rsvd0:15;
+ uint64_t buf_addr:42;
+ uint64_t cache_align:7;
+ };
+ nicvf_iova_addr_t full_addr;
+ };
+#else
+ union {
+ struct {
+ uint64_t cache_align:7;
+ uint64_t buf_addr:42;
+ uint64_t rsvd0:15;
+ };
+ nicvf_iova_addr_t full_addr;
+ };
+#endif
+};
+
+NICVF_STATIC_ASSERT(sizeof(struct rbdr_entry_t) == sizeof(uint64_t));
+
+/* TCP reassembly context */
+struct rbe_tcp_cnxt_t {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t tcp_pkt_cnt:12;
+ uint64_t rsvd1:4;
+ uint64_t align_hdr_bytes:4;
+ uint64_t align_ptr_bytes:4;
+ uint64_t ptr_bytes:16;
+ uint64_t rsvd2:24;
+ uint64_t cqe_type:4;
+ uint64_t rsvd0:54;
+ uint64_t tcp_end_reason:2;
+ uint64_t tcp_status:4;
+#else
+ uint64_t tcp_status:4;
+ uint64_t tcp_end_reason:2;
+ uint64_t rsvd0:54;
+ uint64_t cqe_type:4;
+ uint64_t rsvd2:24;
+ uint64_t ptr_bytes:16;
+ uint64_t align_ptr_bytes:4;
+ uint64_t align_hdr_bytes:4;
+ uint64_t rsvd1:4;
+ uint64_t tcp_pkt_cnt:12;
+#endif
+};
+
+/* Always Big endian */
+struct rx_hdr_t {
+ uint64_t opaque:32;
+ uint64_t rss_flow:8;
+ uint64_t skip_length:6;
+ uint64_t disable_rss:1;
+ uint64_t disable_tcp_reassembly:1;
+ uint64_t nodrop:1;
+ uint64_t dest_alg:2;
+ uint64_t rsvd0:2;
+ uint64_t dest_rq:11;
+};
+
+struct sq_crc_subdesc {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t rsvd1:32;
+ uint64_t crc_ival:32;
+ uint64_t subdesc_type:4;
+ uint64_t crc_alg:2;
+ uint64_t rsvd0:10;
+ uint64_t crc_insert_pos:16;
+ uint64_t hdr_start:16;
+ uint64_t crc_len:16;
+#else
+ uint64_t crc_len:16;
+ uint64_t hdr_start:16;
+ uint64_t crc_insert_pos:16;
+ uint64_t rsvd0:10;
+ uint64_t crc_alg:2;
+ uint64_t subdesc_type:4;
+ uint64_t crc_ival:32;
+ uint64_t rsvd1:32;
+#endif
+};
+
+struct sq_gather_subdesc {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t subdesc_type:4; /* W0 */
+ uint64_t ld_type:2;
+ uint64_t rsvd0:42;
+ uint64_t size:16;
+
+ uint64_t rsvd1:15; /* W1 */
+ uint64_t addr:49;
+#else
+ uint64_t size:16;
+ uint64_t rsvd0:42;
+ uint64_t ld_type:2;
+ uint64_t subdesc_type:4; /* W0 */
+
+ uint64_t addr:49;
+ uint64_t rsvd1:15; /* W1 */
+#endif
+};
+
+/* SQ immediate subdescriptor */
+struct sq_imm_subdesc {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t subdesc_type:4; /* W0 */
+ uint64_t rsvd0:46;
+ uint64_t len:14;
+
+ uint64_t data:64; /* W1 */
+#else
+ uint64_t len:14;
+ uint64_t rsvd0:46;
+ uint64_t subdesc_type:4; /* W0 */
+
+ uint64_t data:64; /* W1 */
+#endif
+};
+
+struct sq_mem_subdesc {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t subdesc_type:4; /* W0 */
+ uint64_t mem_alg:4;
+ uint64_t mem_dsz:2;
+ uint64_t wmem:1;
+ uint64_t rsvd0:21;
+ uint64_t offset:32;
+
+ uint64_t rsvd1:15; /* W1 */
+ uint64_t addr:49;
+#else
+ uint64_t offset:32;
+ uint64_t rsvd0:21;
+ uint64_t wmem:1;
+ uint64_t mem_dsz:2;
+ uint64_t mem_alg:4;
+ uint64_t subdesc_type:4; /* W0 */
+
+ uint64_t addr:49;
+ uint64_t rsvd1:15; /* W1 */
+#endif
+};
+
+struct sq_hdr_subdesc {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t subdesc_type:4;
+ uint64_t tso:1;
+ uint64_t post_cqe:1; /* Post CQE on no error also */
+ uint64_t dont_send:1;
+ uint64_t tstmp:1;
+ uint64_t subdesc_cnt:8;
+ uint64_t csum_l4:2;
+ uint64_t csum_l3:1;
+ uint64_t csum_inner_l4:2;
+ uint64_t csum_inner_l3:1;
+ uint64_t rsvd0:2;
+ uint64_t l4_offset:8;
+ uint64_t l3_offset:8;
+ uint64_t rsvd1:4;
+ uint64_t tot_len:20; /* W0 */
+
+ uint64_t rsvd2:24;
+ uint64_t inner_l4_offset:8;
+ uint64_t inner_l3_offset:8;
+ uint64_t tso_start:8;
+ uint64_t rsvd3:2;
+ uint64_t tso_max_paysize:14; /* W1 */
+#else
+ uint64_t tot_len:20;
+ uint64_t rsvd1:4;
+ uint64_t l3_offset:8;
+ uint64_t l4_offset:8;
+ uint64_t rsvd0:2;
+ uint64_t csum_inner_l3:1;
+ uint64_t csum_inner_l4:2;
+ uint64_t csum_l3:1;
+ uint64_t csum_l4:2;
+ uint64_t subdesc_cnt:8;
+ uint64_t tstmp:1;
+ uint64_t dont_send:1;
+ uint64_t post_cqe:1; /* Post CQE on no error also */
+ uint64_t tso:1;
+ uint64_t subdesc_type:4; /* W0 */
+
+ uint64_t tso_max_paysize:14;
+ uint64_t rsvd3:2;
+ uint64_t tso_start:8;
+ uint64_t inner_l3_offset:8;
+ uint64_t inner_l4_offset:8;
+ uint64_t rsvd2:24; /* W1 */
+#endif
+};
+
+/* Each sq entry is 128 bits wide */
+union sq_entry_t {
+ uint64_t buff[2];
+ struct sq_hdr_subdesc hdr;
+ struct sq_imm_subdesc imm;
+ struct sq_gather_subdesc gather;
+ struct sq_crc_subdesc crc;
+ struct sq_mem_subdesc mem;
+};
+
+NICVF_STATIC_ASSERT(sizeof(union sq_entry_t) == 16);
+
+/* Queue config register formats */
+struct rq_cfg { union { struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t reserved_2_63:62;
+ uint64_t ena:1;
+ uint64_t reserved_0:1;
+#else
+ uint64_t reserved_0:1;
+ uint64_t ena:1;
+ uint64_t reserved_2_63:62;
+#endif
+ };
+ uint64_t value;
+}; };
+
+struct cq_cfg { union { struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t reserved_43_63:21;
+ uint64_t ena:1;
+ uint64_t reset:1;
+ uint64_t caching:1;
+ uint64_t reserved_35_39:5;
+ uint64_t qsize:3;
+ uint64_t reserved_25_31:7;
+ uint64_t avg_con:9;
+ uint64_t reserved_0_15:16;
+#else
+ uint64_t reserved_0_15:16;
+ uint64_t avg_con:9;
+ uint64_t reserved_25_31:7;
+ uint64_t qsize:3;
+ uint64_t reserved_35_39:5;
+ uint64_t caching:1;
+ uint64_t reset:1;
+ uint64_t ena:1;
+ uint64_t reserved_43_63:21;
+#endif
+ };
+ uint64_t value;
+}; };
+
+struct sq_cfg { union { struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t reserved_32_63:32;
+ uint64_t cq_limit:8;
+ uint64_t ena:1;
+ uint64_t reserved_18_18:1;
+ uint64_t reset:1;
+ uint64_t ldwb:1;
+ uint64_t reserved_11_15:5;
+ uint64_t qsize:3;
+ uint64_t reserved_3_7:5;
+ uint64_t tstmp_bgx_intf:3;
+#else
+ uint64_t tstmp_bgx_intf:3;
+ uint64_t reserved_3_7:5;
+ uint64_t qsize:3;
+ uint64_t reserved_11_15:5;
+ uint64_t ldwb:1;
+ uint64_t reset:1;
+ uint64_t reserved_18_18:1;
+ uint64_t ena:1;
+ uint64_t cq_limit:8;
+ uint64_t reserved_32_63:32;
+#endif
+ };
+ uint64_t value;
+}; };
+
+struct rbdr_cfg { union { struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t reserved_45_63:19;
+ uint64_t ena:1;
+ uint64_t reset:1;
+ uint64_t ldwb:1;
+ uint64_t reserved_36_41:6;
+ uint64_t qsize:4;
+ uint64_t reserved_25_31:7;
+ uint64_t avg_con:9;
+ uint64_t reserved_12_15:4;
+ uint64_t lines:12;
+#else
+ uint64_t lines:12;
+ uint64_t reserved_12_15:4;
+ uint64_t avg_con:9;
+ uint64_t reserved_25_31:7;
+ uint64_t qsize:4;
+ uint64_t reserved_36_41:6;
+ uint64_t ldwb:1;
+ uint64_t reset:1;
+ uint64_t ena: 1;
+ uint64_t reserved_45_63:19;
+#endif
+ };
+ uint64_t value;
+}; };
+
+struct pf_qs_cfg { union { struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t reserved_32_63:32;
+ uint64_t ena:1;
+ uint64_t reserved_27_30:4;
+ uint64_t sq_ins_ena:1;
+ uint64_t sq_ins_pos:6;
+ uint64_t lock_ena:1;
+ uint64_t lock_viol_cqe_ena:1;
+ uint64_t send_tstmp_ena:1;
+ uint64_t be:1;
+ uint64_t reserved_7_15:9;
+ uint64_t vnic:7;
+#else
+ uint64_t vnic:7;
+ uint64_t reserved_7_15:9;
+ uint64_t be:1;
+ uint64_t send_tstmp_ena:1;
+ uint64_t lock_viol_cqe_ena:1;
+ uint64_t lock_ena:1;
+ uint64_t sq_ins_pos:6;
+ uint64_t sq_ins_ena:1;
+ uint64_t reserved_27_30:4;
+ uint64_t ena:1;
+ uint64_t reserved_32_63:32;
+#endif
+ };
+ uint64_t value;
+}; };
+
+struct pf_rq_cfg { union { struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t reserved1:1;
+ uint64_t reserved0:34;
+ uint64_t strip_pre_l2:1;
+ uint64_t caching:2;
+ uint64_t cq_qs:7;
+ uint64_t cq_idx:3;
+ uint64_t rbdr_cont_qs:7;
+ uint64_t rbdr_cont_idx:1;
+ uint64_t rbdr_strt_qs:7;
+ uint64_t rbdr_strt_idx:1;
+#else
+ uint64_t rbdr_strt_idx:1;
+ uint64_t rbdr_strt_qs:7;
+ uint64_t rbdr_cont_idx:1;
+ uint64_t rbdr_cont_qs:7;
+ uint64_t cq_idx:3;
+ uint64_t cq_qs:7;
+ uint64_t caching:2;
+ uint64_t strip_pre_l2:1;
+ uint64_t reserved0:34;
+ uint64_t reserved1:1;
+#endif
+ };
+ uint64_t value;
+}; };
+
+struct pf_rq_drop_cfg { union { struct {
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ uint64_t rbdr_red:1;
+ uint64_t cq_red:1;
+ uint64_t reserved3:14;
+ uint64_t rbdr_pass:8;
+ uint64_t rbdr_drop:8;
+ uint64_t reserved2:8;
+ uint64_t cq_pass:8;
+ uint64_t cq_drop:8;
+ uint64_t reserved1:8;
+#else
+ uint64_t reserved1:8;
+ uint64_t cq_drop:8;
+ uint64_t cq_pass:8;
+ uint64_t reserved2:8;
+ uint64_t rbdr_drop:8;
+ uint64_t rbdr_pass:8;
+ uint64_t reserved3:14;
+ uint64_t cq_red:1;
+ uint64_t rbdr_red:1;
+#endif
+ };
+ uint64_t value;
+}; };
+
+#endif /* _THUNDERX_NICVF_HW_DEFS_H */
diff --git a/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_mbox.c b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_mbox.c
new file mode 100644
index 00000000..8f83d41d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_mbox.c
@@ -0,0 +1,432 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "nicvf_plat.h"
+
+#define NICVF_MBOX_PF_RESPONSE_DELAY_US (1000)
+
+static const char *mbox_message[NIC_MBOX_MSG_MAX] = {
+ [NIC_MBOX_MSG_INVALID] = "NIC_MBOX_MSG_INVALID",
+ [NIC_MBOX_MSG_READY] = "NIC_MBOX_MSG_READY",
+ [NIC_MBOX_MSG_ACK] = "NIC_MBOX_MSG_ACK",
+ [NIC_MBOX_MSG_NACK] = "NIC_MBOX_MSG_ACK",
+ [NIC_MBOX_MSG_QS_CFG] = "NIC_MBOX_MSG_QS_CFG",
+ [NIC_MBOX_MSG_RQ_CFG] = "NIC_MBOX_MSG_RQ_CFG",
+ [NIC_MBOX_MSG_SQ_CFG] = "NIC_MBOX_MSG_SQ_CFG",
+ [NIC_MBOX_MSG_RQ_DROP_CFG] = "NIC_MBOX_MSG_RQ_DROP_CFG",
+ [NIC_MBOX_MSG_SET_MAC] = "NIC_MBOX_MSG_SET_MAC",
+ [NIC_MBOX_MSG_SET_MAX_FRS] = "NIC_MBOX_MSG_SET_MAX_FRS",
+ [NIC_MBOX_MSG_CPI_CFG] = "NIC_MBOX_MSG_CPI_CFG",
+ [NIC_MBOX_MSG_RSS_SIZE] = "NIC_MBOX_MSG_RSS_SIZE",
+ [NIC_MBOX_MSG_RSS_CFG] = "NIC_MBOX_MSG_RSS_CFG",
+ [NIC_MBOX_MSG_RSS_CFG_CONT] = "NIC_MBOX_MSG_RSS_CFG_CONT",
+ [NIC_MBOX_MSG_RQ_BP_CFG] = "NIC_MBOX_MSG_RQ_BP_CFG",
+ [NIC_MBOX_MSG_RQ_SW_SYNC] = "NIC_MBOX_MSG_RQ_SW_SYNC",
+ [NIC_MBOX_MSG_BGX_LINK_CHANGE] = "NIC_MBOX_MSG_BGX_LINK_CHANGE",
+ [NIC_MBOX_MSG_ALLOC_SQS] = "NIC_MBOX_MSG_ALLOC_SQS",
+ [NIC_MBOX_MSG_LOOPBACK] = "NIC_MBOX_MSG_LOOPBACK",
+ [NIC_MBOX_MSG_RESET_STAT_COUNTER] = "NIC_MBOX_MSG_RESET_STAT_COUNTER",
+ [NIC_MBOX_MSG_CFG_DONE] = "NIC_MBOX_MSG_CFG_DONE",
+ [NIC_MBOX_MSG_SHUTDOWN] = "NIC_MBOX_MSG_SHUTDOWN",
+};
+
+static inline const char * __attribute__((unused))
+nicvf_mbox_msg_str(int msg)
+{
+ assert(msg >= 0 && msg < NIC_MBOX_MSG_MAX);
+ /* undefined messages */
+ if (mbox_message[msg] == NULL)
+ msg = 0;
+ return mbox_message[msg];
+}
+
+static inline void
+nicvf_mbox_send_msg_to_pf_raw(struct nicvf *nic, struct nic_mbx *mbx)
+{
+ uint64_t *mbx_data;
+ uint64_t mbx_addr;
+ int i;
+
+ mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+ mbx_data = (uint64_t *)mbx;
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ nicvf_reg_write(nic, mbx_addr, *mbx_data);
+ mbx_data++;
+ mbx_addr += sizeof(uint64_t);
+ }
+ nicvf_mbox_log("msg sent %s (VF%d)",
+ nicvf_mbox_msg_str(mbx->msg.msg), nic->vf_id);
+}
+
+static inline void
+nicvf_mbox_send_async_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx)
+{
+ nicvf_mbox_send_msg_to_pf_raw(nic, mbx);
+ /* Messages without ack are racy!*/
+ nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US);
+}
+
+static inline int
+nicvf_mbox_send_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx)
+{
+ long timeout;
+ long sleep = 10;
+ int i, retry = 5;
+
+ for (i = 0; i < retry; i++) {
+ nic->pf_acked = false;
+ nic->pf_nacked = false;
+ nicvf_smp_wmb();
+
+ nicvf_mbox_send_msg_to_pf_raw(nic, mbx);
+ /* Give some time to get PF response */
+ nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US);
+ timeout = NIC_MBOX_MSG_TIMEOUT;
+ while (timeout > 0) {
+ /* Periodic poll happens from nicvf_interrupt() */
+ nicvf_smp_rmb();
+
+ if (nic->pf_nacked)
+ return -EINVAL;
+ if (nic->pf_acked)
+ return 0;
+
+ nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US);
+ timeout -= sleep;
+ }
+ nicvf_log_error("PF didn't ack to msg 0x%02x %s VF%d (%d/%d)",
+ mbx->msg.msg, nicvf_mbox_msg_str(mbx->msg.msg),
+ nic->vf_id, i, retry);
+ }
+ return -EBUSY;
+}
+
+
+int
+nicvf_handle_mbx_intr(struct nicvf *nic)
+{
+ struct nic_mbx mbx;
+ uint64_t *mbx_data = (uint64_t *)&mbx;
+ uint64_t mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+ size_t i;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nicvf_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(uint64_t);
+ }
+
+ /* Overwrite the message so we won't receive it again */
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1, 0x0);
+
+ nicvf_mbox_log("msg received id=0x%hhx %s (VF%d)", mbx.msg.msg,
+ nicvf_mbox_msg_str(mbx.msg.msg), nic->vf_id);
+
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
+ nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
+ nic->node = mbx.nic_cfg.node_id;
+ nic->sqs_mode = mbx.nic_cfg.sqs_mode;
+ nic->loopback_supported = mbx.nic_cfg.loopback_supported;
+ ether_addr_copy((struct ether_addr *)mbx.nic_cfg.mac_addr,
+ (struct ether_addr *)nic->mac_addr);
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_ACK:
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_NACK:
+ nic->pf_nacked = true;
+ break;
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+ nic->link_up = mbx.link_status.link_up;
+ nic->duplex = mbx.link_status.duplex;
+ nic->speed = mbx.link_status.speed;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_ALLOC_SQS:
+ assert_primary(nic);
+ if (mbx.sqs_alloc.qs_count != nic->sqs_count) {
+ nicvf_log_error("Received %" PRIu8 "/%" PRIu8
+ " secondary qsets",
+ mbx.sqs_alloc.qs_count,
+ nic->sqs_count);
+ abort();
+ }
+ for (i = 0; i < mbx.sqs_alloc.qs_count; i++) {
+ if (mbx.sqs_alloc.svf[i] != nic->snicvf[i]->vf_id) {
+ nicvf_log_error("Received secondary qset[%zu] "
+ "ID %" PRIu8 " expected %"
+ PRIu8, i, mbx.sqs_alloc.svf[i],
+ nic->snicvf[i]->vf_id);
+ abort();
+ }
+ }
+ nic->pf_acked = true;
+ break;
+ default:
+ nicvf_log_error("Invalid message from PF, msg_id=0x%hhx %s",
+ mbx.msg.msg, nicvf_mbox_msg_str(mbx.msg.msg));
+ break;
+ }
+ nicvf_smp_wmb();
+
+ return mbx.msg.msg;
+}
+
+/*
+ * Checks if VF is able to communicate with PF
+ * and also gets the VNIC number this VF is associated to.
+ */
+int
+nicvf_mbox_check_pf_ready(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = {.msg = NIC_MBOX_MSG_READY} };
+
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_set_mac_addr(struct nicvf *nic,
+ const uint8_t mac[NICVF_MAC_ADDR_SIZE])
+{
+ struct nic_mbx mbx = { .msg = {0} };
+ int i;
+
+ mbx.msg.msg = NIC_MBOX_MSG_SET_MAC;
+ mbx.mac.vf_id = nic->vf_id;
+ for (i = 0; i < 6; i++)
+ mbx.mac.mac_addr[i] = mac[i];
+
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_config_cpi(struct nicvf *nic, uint32_t qcnt)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_CPI_CFG;
+ mbx.cpi_cfg.vf_id = nic->vf_id;
+ mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
+ mbx.cpi_cfg.rq_cnt = qcnt;
+
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_get_rss_size(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_RSS_SIZE;
+ mbx.rss_size.vf_id = nic->vf_id;
+
+ /* Result will be stored in nic->rss_info.rss_size */
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_config_rss(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+ struct nicvf_rss_reta_info *rss = &nic->rss_info;
+ size_t tot_len = rss->rss_size;
+ size_t cur_len;
+ size_t cur_idx = 0;
+ size_t i;
+
+ mbx.rss_cfg.vf_id = nic->vf_id;
+ mbx.rss_cfg.hash_bits = rss->hash_bits;
+ mbx.rss_cfg.tbl_len = 0;
+ mbx.rss_cfg.tbl_offset = 0;
+
+ while (cur_idx < tot_len) {
+ cur_len = nicvf_min(tot_len - cur_idx,
+ (size_t)RSS_IND_TBL_LEN_PER_MBX_MSG);
+ mbx.msg.msg = (cur_idx > 0) ?
+ NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
+ mbx.rss_cfg.tbl_offset = cur_idx;
+ mbx.rss_cfg.tbl_len = cur_len;
+ for (i = 0; i < cur_len; i++)
+ mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[cur_idx++];
+
+ if (nicvf_mbox_send_msg_to_pf(nic, &mbx))
+ return NICVF_ERR_RSS_TBL_UPDATE;
+ }
+
+ return 0;
+}
+
+int
+nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx,
+ struct pf_rq_cfg *pf_rq_cfg)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_CFG;
+ mbx.rq.qs_num = nic->vf_id;
+ mbx.rq.rq_num = qidx;
+ mbx.rq.cfg = pf_rq_cfg->value;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_SQ_CFG;
+ mbx.sq.qs_num = nic->vf_id;
+ mbx.sq.sq_num = qidx;
+ mbx.sq.sqs_mode = nic->sqs_mode;
+ mbx.sq.cfg = (nic->vf_id << 3) | qidx;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
+ qs_cfg->be = 1;
+#endif
+ /* Send a mailbox msg to PF to config Qset */
+ mbx.msg.msg = NIC_MBOX_MSG_QS_CFG;
+ mbx.qs.num = nic->vf_id;
+ mbx.qs.sqs_count = nic->sqs_count;
+ mbx.qs.cfg = qs_cfg->value;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_request_sqs(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+ size_t i;
+
+ assert_primary(nic);
+ assert(nic->sqs_count > 0);
+ assert(nic->sqs_count <= MAX_SQS_PER_VF);
+
+ mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
+ mbx.sqs_alloc.spec = 1;
+ mbx.sqs_alloc.qs_count = nic->sqs_count;
+
+ /* Set no of Rx/Tx queues in each of the SQsets */
+ for (i = 0; i < nic->sqs_count; i++)
+ mbx.sqs_alloc.svf[i] = nic->snicvf[i]->vf_id;
+
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+ struct pf_rq_drop_cfg *drop_cfg;
+
+ /* Enable CQ drop to reserve sufficient CQEs for all tx packets */
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+ mbx.rq.qs_num = nic->vf_id;
+ mbx.rq.rq_num = qidx;
+ drop_cfg = (struct pf_rq_drop_cfg *)&mbx.rq.cfg;
+ drop_cfg->value = 0;
+ if (enable) {
+ drop_cfg->cq_red = 1;
+ drop_cfg->cq_drop = 2;
+ }
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_update_hw_max_frs(struct nicvf *nic, uint16_t mtu)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+ mbx.frs.max_frs = mtu;
+ mbx.frs.vf_id = nic->vf_id;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_rq_sync(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ /* Make sure all packets in the pipeline are written back into mem */
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
+ mbx.rq.cfg = 0;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+ mbx.rq.qs_num = nic->vf_id;
+ mbx.rq.rq_num = qidx;
+ mbx.rq.cfg = 0;
+ if (enable)
+ mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (nic->vf_id << 0);
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_loopback_config(struct nicvf *nic, bool enable)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
+ mbx.lbk.vf_id = nic->vf_id;
+ mbx.lbk.enable = enable;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_reset_stat_counters(struct nicvf *nic, uint16_t rx_stat_mask,
+ uint8_t tx_stat_mask, uint16_t rq_stat_mask,
+ uint16_t sq_stat_mask)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
+ mbx.reset_stat.rx_stat_mask = rx_stat_mask;
+ mbx.reset_stat.tx_stat_mask = tx_stat_mask;
+ mbx.reset_stat.rq_stat_mask = rq_stat_mask;
+ mbx.reset_stat.sq_stat_mask = sq_stat_mask;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+void
+nicvf_mbox_shutdown(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
+ nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+void
+nicvf_mbox_cfg_done(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+ nicvf_mbox_send_async_msg_to_pf(nic, &mbx);
+}
diff --git a/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_mbox.h b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_mbox.h
new file mode 100644
index 00000000..81f1f408
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_mbox.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef __THUNDERX_NICVF_MBOX__
+#define __THUNDERX_NICVF_MBOX__
+
+#include <stdint.h>
+
+#include "nicvf_plat.h"
+#include "../nicvf_struct.h"
+
+/* PF <--> VF Mailbox communication
+ * Two 64bit registers are shared between PF and VF for each VF
+ * Writing into second register means end of message.
+ */
+
+/* PF <--> VF mailbox communication */
+#define NIC_PF_VF_MAILBOX_SIZE 2
+#define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */
+
+/* Mailbox message types */
+#define NIC_MBOX_MSG_INVALID 0x00 /* Invalid message */
+#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
+#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
+#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
+#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
+#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
+#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
+#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
+#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
+#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
+#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
+#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
+#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
+#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
+#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
+#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
+#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
+#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
+#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
+#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
+#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
+#define NIC_MBOX_MSG_MAX 0x100 /* Maximum number of messages */
+
+/* Get vNIC VF configuration */
+struct nic_cfg_msg {
+ uint8_t msg;
+ uint8_t vf_id;
+ uint8_t node_id;
+ bool tns_mode:1;
+ bool sqs_mode:1;
+ bool loopback_supported:1;
+ uint8_t mac_addr[NICVF_MAC_ADDR_SIZE];
+};
+
+/* Qset configuration */
+struct qs_cfg_msg {
+ uint8_t msg;
+ uint8_t num;
+ uint8_t sqs_count;
+ uint64_t cfg;
+};
+
+/* Receive queue configuration */
+struct rq_cfg_msg {
+ uint8_t msg;
+ uint8_t qs_num;
+ uint8_t rq_num;
+ uint64_t cfg;
+};
+
+/* Send queue configuration */
+struct sq_cfg_msg {
+ uint8_t msg;
+ uint8_t qs_num;
+ uint8_t sq_num;
+ bool sqs_mode;
+ uint64_t cfg;
+};
+
+/* Set VF's MAC address */
+struct set_mac_msg {
+ uint8_t msg;
+ uint8_t vf_id;
+ uint8_t mac_addr[NICVF_MAC_ADDR_SIZE];
+};
+
+/* Set Maximum frame size */
+struct set_frs_msg {
+ uint8_t msg;
+ uint8_t vf_id;
+ uint16_t max_frs;
+};
+
+/* Set CPI algorithm type */
+struct cpi_cfg_msg {
+ uint8_t msg;
+ uint8_t vf_id;
+ uint8_t rq_cnt;
+ uint8_t cpi_alg;
+};
+
+/* Get RSS table size */
+struct rss_sz_msg {
+ uint8_t msg;
+ uint8_t vf_id;
+ uint16_t ind_tbl_size;
+};
+
+/* Set RSS configuration */
+struct rss_cfg_msg {
+ uint8_t msg;
+ uint8_t vf_id;
+ uint8_t hash_bits;
+ uint8_t tbl_len;
+ uint8_t tbl_offset;
+#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
+ uint8_t ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
+};
+
+/* Physical interface link status */
+struct bgx_link_status {
+ uint8_t msg;
+ uint8_t mac_type;
+ uint8_t link_up;
+ uint8_t duplex;
+ uint32_t speed;
+};
+
+/* Allocate additional SQS to VF */
+struct sqs_alloc {
+ uint8_t msg;
+ uint8_t spec;
+ uint8_t qs_count;
+ uint8_t svf[MAX_SQS_PER_VF];
+};
+
+/* Set interface in loopback mode */
+struct set_loopback {
+ uint8_t msg;
+ uint8_t vf_id;
+ bool enable;
+};
+
+/* Reset statistics counters */
+struct reset_stat_cfg {
+ uint8_t msg;
+ /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
+ uint16_t rx_stat_mask;
+ /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
+ uint8_t tx_stat_mask;
+ /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
+ * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
+ * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
+ * ..
+ * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
+ * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
+ */
+ uint16_t rq_stat_mask;
+ /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
+ * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
+ * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
+ * ..
+ * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
+ * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
+ */
+ uint16_t sq_stat_mask;
+};
+
+struct nic_mbx {
+/* 128 bit shared memory between PF and each VF */
+union {
+ struct { uint8_t msg; } msg;
+ struct nic_cfg_msg nic_cfg;
+ struct qs_cfg_msg qs;
+ struct rq_cfg_msg rq;
+ struct sq_cfg_msg sq;
+ struct set_mac_msg mac;
+ struct set_frs_msg frs;
+ struct cpi_cfg_msg cpi_cfg;
+ struct rss_sz_msg rss_size;
+ struct rss_cfg_msg rss_cfg;
+ struct bgx_link_status link_status;
+ struct sqs_alloc sqs_alloc;
+ struct set_loopback lbk;
+ struct reset_stat_cfg reset_stat;
+};
+};
+
+NICVF_STATIC_ASSERT(sizeof(struct nic_mbx) <= 16);
+
+int nicvf_handle_mbx_intr(struct nicvf *nic);
+int nicvf_mbox_check_pf_ready(struct nicvf *nic);
+int nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg);
+int nicvf_mbox_request_sqs(struct nicvf *nic);
+int nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx,
+ struct pf_rq_cfg *pf_rq_cfg);
+int nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx);
+int nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable);
+int nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable);
+int nicvf_mbox_set_mac_addr(struct nicvf *nic,
+ const uint8_t mac[NICVF_MAC_ADDR_SIZE]);
+int nicvf_mbox_config_cpi(struct nicvf *nic, uint32_t qcnt);
+int nicvf_mbox_get_rss_size(struct nicvf *nic);
+int nicvf_mbox_config_rss(struct nicvf *nic);
+int nicvf_mbox_update_hw_max_frs(struct nicvf *nic, uint16_t mtu);
+int nicvf_mbox_rq_sync(struct nicvf *nic);
+int nicvf_mbox_loopback_config(struct nicvf *nic, bool enable);
+int nicvf_mbox_reset_stat_counters(struct nicvf *nic, uint16_t rx_stat_mask,
+ uint8_t tx_stat_mask, uint16_t rq_stat_mask, uint16_t sq_stat_mask);
+void nicvf_mbox_shutdown(struct nicvf *nic);
+void nicvf_mbox_cfg_done(struct nicvf *nic);
+
+#endif /* __THUNDERX_NICVF_MBOX__ */
diff --git a/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_plat.h b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_plat.h
new file mode 100644
index 00000000..6de07c70
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/base/nicvf_plat.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef _THUNDERX_NICVF_H
+#define _THUNDERX_NICVF_H
+
+/* Platform/OS/arch specific abstractions */
+
+/* log */
+#include <rte_log.h>
+#include "../nicvf_logs.h"
+
+#define nicvf_log_error(s, ...) PMD_DRV_LOG(ERR, s, ##__VA_ARGS__)
+
+#define nicvf_log_debug(s, ...) PMD_DRV_LOG(DEBUG, s, ##__VA_ARGS__)
+
+#define nicvf_mbox_log(s, ...) PMD_MBOX_LOG(DEBUG, s, ##__VA_ARGS__)
+
+#define nicvf_log(s, ...) fprintf(stderr, s, ##__VA_ARGS__)
+
+/* delay */
+#include <rte_cycles.h>
+#define nicvf_delay_us(x) rte_delay_us(x)
+
+/* barrier */
+#include <rte_atomic.h>
+#define nicvf_smp_wmb() rte_smp_wmb()
+#define nicvf_smp_rmb() rte_smp_rmb()
+
+/* utils */
+#include <rte_common.h>
+#define nicvf_min(x, y) RTE_MIN(x, y)
+#define nicvf_log2_u32(x) rte_log2_u32(x)
+
+/* byte order */
+#include <rte_byteorder.h>
+#define nicvf_cpu_to_be_64(x) rte_cpu_to_be_64(x)
+#define nicvf_be_to_cpu_64(x) rte_be_to_cpu_64(x)
+
+#define NICVF_BYTE_ORDER RTE_BYTE_ORDER
+#define NICVF_BIG_ENDIAN RTE_BIG_ENDIAN
+#define NICVF_LITTLE_ENDIAN RTE_LITTLE_ENDIAN
+
+/* Constants */
+#include <rte_ether.h>
+#define NICVF_MAC_ADDR_SIZE ETHER_ADDR_LEN
+
+#include <rte_io.h>
+#define nicvf_addr_write(addr, val) rte_write64_relaxed((val), (void *)(addr))
+#define nicvf_addr_read(addr) rte_read64_relaxed((void *)(addr))
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define nicvf_prefetch_store_keep(_ptr) ({\
+ asm volatile("prfm pstl1keep, [%x0]\n" : : "r" (_ptr)); })
+
+
+#define NICVF_LOAD_PAIR(reg1, reg2, addr) ({ \
+ asm volatile( \
+ "ldp %x[x1], %x[x0], [%x[p1]]" \
+ : [x1]"=r"(reg1), [x0]"=r"(reg2)\
+ : [p1]"r"(addr) \
+ ); })
+
+#else /* non optimized functions for building on non arm64 arch */
+
+#define nicvf_prefetch_store_keep(_ptr) do {} while (0)
+
+#define NICVF_LOAD_PAIR(reg1, reg2, addr) \
+do { \
+ reg1 = nicvf_addr_read((uintptr_t)addr); \
+ reg2 = nicvf_addr_read((uintptr_t)addr + 8); \
+} while (0)
+
+#endif
+
+#include "nicvf_hw.h"
+#include "nicvf_mbox.h"
+
+#endif /* _THUNDERX_NICVF_H */
diff --git a/src/spdk/dpdk/drivers/net/thunderx/meson.build b/src/spdk/dpdk/drivers/net/thunderx/meson.build
new file mode 100644
index 00000000..69819a97
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/meson.build
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+subdir('base')
+objs = [base_objs]
+
+sources = files('nicvf_rxtx.c',
+ 'nicvf_ethdev.c',
+ 'nicvf_svf.c'
+)
+
+if cc.has_argument('-fno-prefetch-loop-arrays')
+ cflags += '-fno-prefetch-loop-arrays'
+endif
+
+if cc.has_argument('-Wno-maybe-uninitialized')
+ cflags += '-Wno-maybe-uninitialized'
+endif
+
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/thunderx/nicvf_ethdev.c b/src/spdk/dpdk/drivers/net/thunderx/nicvf_ethdev.c
new file mode 100644
index 00000000..a55c3ca6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/nicvf_ethdev.c
@@ -0,0 +1,2280 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <sys/queue.h>
+
+#include <rte_alarm.h>
+#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_tailq.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+
+#include "base/nicvf_plat.h"
+
+#include "nicvf_ethdev.h"
+#include "nicvf_rxtx.h"
+#include "nicvf_svf.h"
+#include "nicvf_logs.h"
+
+int nicvf_logtype_mbox;
+int nicvf_logtype_init;
+int nicvf_logtype_driver;
+
+static void nicvf_dev_stop(struct rte_eth_dev *dev);
+static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
+static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
+ bool cleanup);
+static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
+static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+
+RTE_INIT(nicvf_init_log)
+{
+ nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
+ if (nicvf_logtype_mbox >= 0)
+ rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE);
+
+ nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init");
+ if (nicvf_logtype_init >= 0)
+ rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE);
+
+ nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver");
+ if (nicvf_logtype_driver >= 0)
+ rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
+}
+
+static void
+nicvf_link_status_update(struct nicvf *nic,
+ struct rte_eth_link *link)
+{
+ memset(link, 0, sizeof(*link));
+
+ link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+ if (nic->duplex == NICVF_HALF_DUPLEX)
+ link->link_duplex = ETH_LINK_HALF_DUPLEX;
+ else if (nic->duplex == NICVF_FULL_DUPLEX)
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_speed = nic->speed;
+ link->link_autoneg = ETH_LINK_AUTONEG;
+}
+
+static void
+nicvf_interrupt(void *arg)
+{
+ struct rte_eth_dev *dev = arg;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_eth_link link;
+
+ if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ nicvf_link_status_update(nic, &link);
+ rte_eth_linkstatus_set(dev, &link);
+
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+ }
+
+ rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
+ nicvf_interrupt, dev);
+}
+
+static void
+nicvf_vf_interrupt(void *arg)
+{
+ struct nicvf *nic = arg;
+
+ nicvf_reg_poll_interrupts(nic);
+
+ rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
+ nicvf_vf_interrupt, nic);
+}
+
+static int
+nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
+{
+ return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
+}
+
+static int
+nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
+{
+ return rte_eal_alarm_cancel(fn, arg);
+}
+
+/*
+ * Return 0 means link status changed, -1 means not changed
+ */
+static int
+nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ struct rte_eth_link link;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (wait_to_complete) {
+ /* rte_eth_link_get() might need to wait up to 9 seconds */
+ for (i = 0; i < MAX_CHECK_TIME; i++) {
+ nicvf_link_status_update(nic, &link);
+ if (link.link_status == ETH_LINK_UP)
+ break;
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+ } else {
+ nicvf_link_status_update(nic, &link);
+ }
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD;
+ size_t i;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (frame_size > NIC_HW_MAX_FRS)
+ return -EINVAL;
+
+ if (frame_size < NIC_HW_MIN_FRS)
+ return -EINVAL;
+
+ buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * Refuse mtu that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (dev->data->dev_started && !dev->data->scattered_rx &&
+ (frame_size + 2 * VLAN_TAG_SIZE > buffsz))
+ return -EINVAL;
+
+ /* check <seg size> * <max_seg> >= max_frame */
+ if (dev->data->scattered_rx &&
+ (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
+ return -EINVAL;
+
+ if (frame_size > ETHER_MAX_LEN)
+ rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ if (nicvf_mbox_update_hw_max_frs(nic, mtu))
+ return -EINVAL;
+
+ /* Update max_rx_pkt_len */
+ rxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN;
+ nic->mtu = mtu;
+
+ for (i = 0; i < nic->sqs_count; i++)
+ nic->snicvf[i]->mtu = mtu;
+
+ return 0;
+}
+
+static int
+nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
+{
+ uint64_t *data = regs->data;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (data == NULL) {
+ regs->length = nicvf_reg_get_count();
+ regs->width = THUNDERX_REG_BYTES;
+ return 0;
+ }
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)nicvf_reg_get_count())) {
+ regs->version = nic->vendor_id << 16 | nic->device_id;
+ nicvf_reg_dump(nic, data);
+ return 0;
+ }
+ return -ENOTSUP;
+}
+
+static int
+nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint16_t qidx;
+ struct nicvf_hw_rx_qstats rx_qstats;
+ struct nicvf_hw_tx_qstats tx_qstats;
+ struct nicvf_hw_stats port_stats;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint16_t rx_start, rx_end;
+ uint16_t tx_start, tx_end;
+ size_t i;
+
+ /* RX queue indices for the first VF */
+ nicvf_rx_range(dev, nic, &rx_start, &rx_end);
+
+ /* Reading per RX ring stats */
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
+ stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
+ stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
+ }
+
+ /* TX queue indices for the first VF */
+ nicvf_tx_range(dev, nic, &tx_start, &tx_end);
+
+ /* Reading per TX ring stats */
+ for (qidx = tx_start; qidx <= tx_end; qidx++) {
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
+ stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
+ stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
+ }
+
+ for (i = 0; i < nic->sqs_count; i++) {
+ struct nicvf *snic = nic->snicvf[i];
+
+ if (snic == NULL)
+ break;
+
+ /* RX queue indices for a secondary VF */
+ nicvf_rx_range(dev, snic, &rx_start, &rx_end);
+
+ /* Reading per RX ring stats */
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ nicvf_hw_get_rx_qstats(snic, &rx_qstats,
+ qidx % MAX_RCV_QUEUES_PER_QS);
+ stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
+ stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
+ }
+
+ /* TX queue indices for a secondary VF */
+ nicvf_tx_range(dev, snic, &tx_start, &tx_end);
+ /* Reading per TX ring stats */
+ for (qidx = tx_start; qidx <= tx_end; qidx++) {
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ nicvf_hw_get_tx_qstats(snic, &tx_qstats,
+ qidx % MAX_SND_QUEUES_PER_QS);
+ stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
+ stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
+ }
+ }
+
+ nicvf_hw_get_stats(nic, &port_stats);
+ stats->ibytes = port_stats.rx_bytes;
+ stats->ipackets = port_stats.rx_ucast_frames;
+ stats->ipackets += port_stats.rx_bcast_frames;
+ stats->ipackets += port_stats.rx_mcast_frames;
+ stats->ierrors = port_stats.rx_l2_errors;
+ stats->imissed = port_stats.rx_drop_red;
+ stats->imissed += port_stats.rx_drop_overrun;
+ stats->imissed += port_stats.rx_drop_bcast;
+ stats->imissed += port_stats.rx_drop_mcast;
+ stats->imissed += port_stats.rx_drop_l3_bcast;
+ stats->imissed += port_stats.rx_drop_l3_mcast;
+
+ stats->obytes = port_stats.tx_bytes_ok;
+ stats->opackets = port_stats.tx_ucast_frames_ok;
+ stats->opackets += port_stats.tx_bcast_frames_ok;
+ stats->opackets += port_stats.tx_mcast_frames_ok;
+ stats->oerrors = port_stats.tx_drops;
+
+ return 0;
+}
+
+static const uint32_t *
+nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ size_t copied;
+ static uint32_t ptypes[32];
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ static const uint32_t ptypes_common[] = {
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_FRAG,
+ };
+ static const uint32_t ptypes_tunnel[] = {
+ RTE_PTYPE_TUNNEL_GRE,
+ RTE_PTYPE_TUNNEL_GENEVE,
+ RTE_PTYPE_TUNNEL_VXLAN,
+ RTE_PTYPE_TUNNEL_NVGRE,
+ };
+ static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
+
+ copied = sizeof(ptypes_common);
+ memcpy(ptypes, ptypes_common, copied);
+ if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
+ memcpy((char *)ptypes + copied, ptypes_tunnel,
+ sizeof(ptypes_tunnel));
+ copied += sizeof(ptypes_tunnel);
+ }
+
+ memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
+
+ /* All Ptypes are supported in all Rx functions. */
+ return ptypes;
+}
+
+static void
+nicvf_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ int i;
+ uint16_t rxqs = 0, txqs = 0;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint16_t rx_start, rx_end;
+ uint16_t tx_start, tx_end;
+
+ /* Reset all primary nic counters */
+ nicvf_rx_range(dev, nic, &rx_start, &rx_end);
+ for (i = rx_start; i <= rx_end; i++)
+ rxqs |= (0x3 << (i * 2));
+
+ nicvf_tx_range(dev, nic, &tx_start, &tx_end);
+ for (i = tx_start; i <= tx_end; i++)
+ txqs |= (0x3 << (i * 2));
+
+ nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
+
+ /* Reset secondary nic queue counters */
+ for (i = 0; i < nic->sqs_count; i++) {
+ struct nicvf *snic = nic->snicvf[i];
+ if (snic == NULL)
+ break;
+
+ nicvf_rx_range(dev, snic, &rx_start, &rx_end);
+ for (i = rx_start; i <= rx_end; i++)
+ rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
+
+ nicvf_tx_range(dev, snic, &tx_start, &tx_end);
+ for (i = tx_start; i <= tx_end; i++)
+ txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
+
+ nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
+ }
+}
+
+/* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
+static void
+nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
+{
+}
+
+static inline uint64_t
+nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
+{
+ uint64_t nic_rss = 0;
+
+ if (ethdev_rss & ETH_RSS_IPV4)
+ nic_rss |= RSS_IP_ENA;
+
+ if (ethdev_rss & ETH_RSS_IPV6)
+ nic_rss |= RSS_IP_ENA;
+
+ if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
+ nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
+
+ if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
+ nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
+
+ if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
+ nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
+
+ if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
+ nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
+
+ if (ethdev_rss & ETH_RSS_PORT)
+ nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
+
+ if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
+ if (ethdev_rss & ETH_RSS_VXLAN)
+ nic_rss |= RSS_TUN_VXLAN_ENA;
+
+ if (ethdev_rss & ETH_RSS_GENEVE)
+ nic_rss |= RSS_TUN_GENEVE_ENA;
+
+ if (ethdev_rss & ETH_RSS_NVGRE)
+ nic_rss |= RSS_TUN_NVGRE_ENA;
+ }
+
+ return nic_rss;
+}
+
+static inline uint64_t
+nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss)
+{
+ uint64_t ethdev_rss = 0;
+
+ if (nic_rss & RSS_IP_ENA)
+ ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
+
+ if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
+ ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_NONFRAG_IPV6_TCP);
+
+ if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
+ ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_NONFRAG_IPV6_UDP);
+
+ if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
+ ethdev_rss |= ETH_RSS_PORT;
+
+ if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
+ if (nic_rss & RSS_TUN_VXLAN_ENA)
+ ethdev_rss |= ETH_RSS_VXLAN;
+
+ if (nic_rss & RSS_TUN_GENEVE_ENA)
+ ethdev_rss |= ETH_RSS_GENEVE;
+
+ if (nic_rss & RSS_TUN_NVGRE_ENA)
+ ethdev_rss |= ETH_RSS_NVGRE;
+ }
+ return ethdev_rss;
+}
+
+static int
+nicvf_dev_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+ int ret, i, j;
+
+ if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
+ RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
+ return -EINVAL;
+ }
+
+ ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
+ if (ret)
+ return ret;
+
+ /* Copy RETA table */
+ for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ if ((reta_conf[i].mask >> j) & 0x01)
+ reta_conf[i].reta[j] = tbl[j];
+ }
+
+ return 0;
+}
+
+static int
+nicvf_dev_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+ int ret, i, j;
+
+ if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
+ RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
+ return -EINVAL;
+ }
+
+ ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
+ if (ret)
+ return ret;
+
+ /* Copy RETA table */
+ for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ if ((reta_conf[i].mask >> j) & 0x01)
+ tbl[j] = reta_conf[i].reta[j];
+ }
+
+ return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
+static int
+nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (rss_conf->rss_key)
+ nicvf_rss_get_key(nic, rss_conf->rss_key);
+
+ rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE;
+ rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
+ return 0;
+}
+
+static int
+nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint64_t nic_rss;
+
+ if (rss_conf->rss_key &&
+ rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
+ RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
+ rss_conf->rss_key_len);
+ return -EINVAL;
+ }
+
+ if (rss_conf->rss_key)
+ nicvf_rss_set_key(nic, rss_conf->rss_key);
+
+ nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
+ nicvf_rss_set_cfg(nic, nic_rss);
+ return 0;
+}
+
+static int
+nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
+ struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
+{
+ const struct rte_memzone *rz;
+ uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
+
+ rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
+ nicvf_netdev_qidx(nic, qidx), ring_size,
+ NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
+ if (rz == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
+ return -ENOMEM;
+ }
+
+ memset(rz->addr, 0, ring_size);
+
+ rxq->phys = rz->iova;
+ rxq->desc = rz->addr;
+ rxq->qlen_mask = desc_cnt - 1;
+
+ return 0;
+}
+
+static int
+nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
+ struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
+{
+ const struct rte_memzone *rz;
+ uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
+
+ rz = rte_eth_dma_zone_reserve(dev, "sq",
+ nicvf_netdev_qidx(nic, qidx), ring_size,
+ NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
+ if (rz == NULL) {
+ PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
+ return -ENOMEM;
+ }
+
+ memset(rz->addr, 0, ring_size);
+
+ sq->phys = rz->iova;
+ sq->desc = rz->addr;
+ sq->qlen_mask = desc_cnt - 1;
+
+ return 0;
+}
+
+static int
+nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint32_t desc_cnt, uint32_t buffsz)
+{
+ struct nicvf_rbdr *rbdr;
+ const struct rte_memzone *rz;
+ uint32_t ring_size;
+
+ assert(nic->rbdr == NULL);
+ rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (rbdr == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
+ return -ENOMEM;
+ }
+
+ ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
+ rz = rte_eth_dma_zone_reserve(dev, "rbdr",
+ nicvf_netdev_qidx(nic, 0), ring_size,
+ NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
+ if (rz == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
+ return -ENOMEM;
+ }
+
+ memset(rz->addr, 0, ring_size);
+
+ rbdr->phys = rz->iova;
+ rbdr->tail = 0;
+ rbdr->next_tail = 0;
+ rbdr->desc = rz->addr;
+ rbdr->buffsz = buffsz;
+ rbdr->qlen_mask = desc_cnt - 1;
+ rbdr->rbdr_status =
+ nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
+ rbdr->rbdr_door =
+ nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
+
+ nic->rbdr = rbdr;
+ return 0;
+}
+
+static void
+nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
+ nicvf_iova_addr_t phy)
+{
+ uint16_t qidx;
+ void *obj;
+ struct nicvf_rxq *rxq;
+ uint16_t rx_start, rx_end;
+
+ /* Get queue ranges for this VF */
+ nicvf_rx_range(dev, nic, &rx_start, &rx_end);
+
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
+ if (rxq->precharge_cnt) {
+ obj = (void *)nicvf_mbuff_phy2virt(phy,
+ rxq->mbuf_phys_off);
+ rte_mempool_put(rxq->pool, obj);
+ rxq->precharge_cnt--;
+ break;
+ }
+ }
+}
+
+static inline void
+nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
+{
+ uint32_t qlen_mask, head;
+ struct rbdr_entry_t *entry;
+ struct nicvf_rbdr *rbdr = nic->rbdr;
+
+ qlen_mask = rbdr->qlen_mask;
+ head = rbdr->head;
+ while (head != rbdr->tail) {
+ entry = rbdr->desc + head;
+ nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
+ head++;
+ head = head & qlen_mask;
+ }
+}
+
+static inline void
+nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
+{
+ uint32_t head;
+
+ head = txq->head;
+ while (head != txq->tail) {
+ if (txq->txbuffs[head]) {
+ rte_pktmbuf_free_seg(txq->txbuffs[head]);
+ txq->txbuffs[head] = NULL;
+ }
+ head++;
+ head = head & txq->qlen_mask;
+ }
+}
+
+static void
+nicvf_tx_queue_reset(struct nicvf_txq *txq)
+{
+ uint32_t txq_desc_cnt = txq->qlen_mask + 1;
+
+ memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
+ memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
+ txq->tail = 0;
+ txq->head = 0;
+ txq->xmit_bufs = 0;
+}
+
+static inline int
+nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint16_t qidx)
+{
+ struct nicvf_txq *txq;
+ int ret;
+
+ assert(qidx < MAX_SND_QUEUES_PER_QS);
+
+ if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
+ RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
+ txq->pool = NULL;
+ ret = nicvf_qset_sq_config(nic, qidx, txq);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
+ nic->vf_id, qidx, ret);
+ goto config_sq_error;
+ }
+
+ dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ return ret;
+
+config_sq_error:
+ nicvf_qset_sq_reclaim(nic, qidx);
+ return ret;
+}
+
+static inline int
+nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint16_t qidx)
+{
+ struct nicvf_txq *txq;
+ int ret;
+
+ assert(qidx < MAX_SND_QUEUES_PER_QS);
+
+ if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
+ RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ ret = nicvf_qset_sq_reclaim(nic, qidx);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
+ nic->vf_id, qidx, ret);
+
+ txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
+ nicvf_tx_queue_release_mbufs(txq);
+ nicvf_tx_queue_reset(txq);
+
+ dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ return ret;
+}
+
+static inline int
+nicvf_configure_cpi(struct rte_eth_dev *dev)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint16_t qidx, qcnt;
+ int ret;
+
+ /* Count started rx queues */
+ for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
+ if (dev->data->rx_queue_state[qidx] ==
+ RTE_ETH_QUEUE_STATE_STARTED)
+ qcnt++;
+
+ nic->cpi_alg = CPI_ALG_NONE;
+ ret = nicvf_mbox_config_cpi(nic, qcnt);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
+
+ return ret;
+}
+
+static inline int
+nicvf_configure_rss(struct rte_eth_dev *dev)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint64_t rsshf;
+ int ret = -EINVAL;
+
+ rsshf = nicvf_rss_ethdev_to_nic(nic,
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
+ PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
+ dev->data->dev_conf.rxmode.mq_mode,
+ dev->data->nb_rx_queues,
+ dev->data->dev_conf.lpbk_mode, rsshf);
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+ ret = nicvf_rss_term(nic);
+ else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
+
+ return ret;
+}
+
+static int
+nicvf_configure_rss_reta(struct rte_eth_dev *dev)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ unsigned int idx, qmap_size;
+ uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
+ uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
+
+ if (nic->cpi_alg != CPI_ALG_NONE)
+ return -EINVAL;
+
+ /* Prepare queue map */
+ for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
+ if (dev->data->rx_queue_state[idx] ==
+ RTE_ETH_QUEUE_STATE_STARTED)
+ qmap[qmap_size++] = idx;
+ }
+
+ /* Update default RSS RETA */
+ for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
+ default_reta[idx] = qmap[idx % qmap_size];
+
+ return nicvf_rss_reta_update(nic, default_reta,
+ NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
+static void
+nicvf_dev_tx_queue_release(void *sq)
+{
+ struct nicvf_txq *txq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = (struct nicvf_txq *)sq;
+ if (txq) {
+ if (txq->txbuffs != NULL) {
+ nicvf_tx_queue_release_mbufs(txq);
+ rte_free(txq->txbuffs);
+ txq->txbuffs = NULL;
+ }
+ rte_free(txq);
+ }
+}
+
+static void
+nicvf_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct nicvf_txq *txq = NULL;
+ size_t i;
+ bool multiseg = false;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+ multiseg = true;
+ break;
+ }
+ }
+
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (multiseg) {
+ PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
+ dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
+ dev->tx_pkt_burst = nicvf_xmit_pkts;
+ }
+
+ if (!txq)
+ return;
+
+ if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
+ PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
+ else
+ PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
+}
+
+static void
+nicvf_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ const eth_rx_burst_t rx_burst_func[2][2][2] = {
+ /* [NORMAL/SCATTER] [CKSUM/NO_CKSUM] [VLAN_STRIP/NO_VLAN_STRIP] */
+ [0][0][0] = nicvf_recv_pkts_no_offload,
+ [0][0][1] = nicvf_recv_pkts_vlan_strip,
+ [0][1][0] = nicvf_recv_pkts_cksum,
+ [0][1][1] = nicvf_recv_pkts_cksum_vlan_strip,
+ [1][0][0] = nicvf_recv_pkts_multiseg_no_offload,
+ [1][0][1] = nicvf_recv_pkts_multiseg_vlan_strip,
+ [1][1][0] = nicvf_recv_pkts_multiseg_cksum,
+ [1][1][1] = nicvf_recv_pkts_multiseg_cksum_vlan_strip,
+ };
+
+ dev->rx_pkt_burst =
+ rx_burst_func[dev->data->scattered_rx]
+ [nic->offload_cksum][nic->vlan_strip];
+}
+
+static int
+nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ uint16_t tx_free_thresh;
+ bool is_single_pool;
+ struct nicvf_txq *txq;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (qidx >= MAX_SND_QUEUES_PER_QS)
+ nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
+
+ qidx = qidx % MAX_SND_QUEUES_PER_QS;
+
+ /* Socket id check */
+ if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
+ PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
+ socket_id, nic->node);
+
+ /* Tx deferred start is not supported */
+ if (tx_conf->tx_deferred_start) {
+ PMD_INIT_LOG(ERR, "Tx deferred start not supported");
+ return -EINVAL;
+ }
+
+ /* Roundup nb_desc to available qsize and validate max number of desc */
+ nb_desc = nicvf_qsize_sq_roundup(nb_desc);
+ if (nb_desc == 0) {
+ PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
+ return -EINVAL;
+ }
+
+ /* Validate tx_free_thresh */
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh :
+ NICVF_DEFAULT_TX_FREE_THRESH);
+
+ if (tx_free_thresh > (nb_desc) ||
+ tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
+ PMD_INIT_LOG(ERR,
+ "tx_free_thresh must be less than the number of TX "
+ "descriptors. (tx_free_thresh=%u port=%d "
+ "queue=%d)", (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)qidx);
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
+ PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+ nicvf_netdev_qidx(nic, qidx));
+ nicvf_dev_tx_queue_release(
+ dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
+ dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
+ }
+
+ /* Allocating tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (txq == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
+ nicvf_netdev_qidx(nic, qidx));
+ return -ENOMEM;
+ }
+
+ txq->nic = nic;
+ txq->queue_id = qidx;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
+ txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ txq->offloads = offloads;
+
+ is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+
+ /* Choose optimum free threshold value for multipool case */
+ if (!is_single_pool) {
+ txq->tx_free_thresh = (uint16_t)
+ (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
+ NICVF_TX_FREE_MPOOL_THRESH :
+ tx_conf->tx_free_thresh);
+ txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
+ } else {
+ txq->pool_free = nicvf_single_pool_free_xmited_buffers;
+ }
+
+ /* Allocate software ring */
+ txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
+ nb_desc * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE, nic->node);
+
+ if (txq->txbuffs == NULL) {
+ nicvf_dev_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
+ nicvf_dev_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ nicvf_tx_queue_reset(txq);
+
+ PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
+ " phys=0x%" PRIx64 " offloads=0x%" PRIx64,
+ nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
+ txq->phys, txq->offloads);
+
+ dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
+ dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static inline void
+nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
+{
+ uint32_t rxq_cnt;
+ uint32_t nb_pkts, released_pkts = 0;
+ uint32_t refill_cnt = 0;
+ struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
+
+ if (dev->rx_pkt_burst == NULL)
+ return;
+
+ while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
+ nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
+ nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
+ NICVF_MAX_RX_FREE_THRESH);
+ PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
+ while (nb_pkts) {
+ rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
+ released_pkts++;
+ }
+ }
+
+
+ refill_cnt += nicvf_dev_rbdr_refill(dev,
+ nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
+
+ PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d",
+ released_pkts, refill_cnt);
+}
+
+static void
+nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
+{
+ rxq->head = 0;
+ rxq->available_space = 0;
+ rxq->recv_buffers = 0;
+}
+
+static inline int
+nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint16_t qidx)
+{
+ struct nicvf_rxq *rxq;
+ int ret;
+
+ assert(qidx < MAX_RCV_QUEUES_PER_QS);
+
+ if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
+ RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ /* Update rbdr pointer to all rxq */
+ rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
+ rxq->shared_rbdr = nic->rbdr;
+
+ ret = nicvf_qset_rq_config(nic, qidx, rxq);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
+ nic->vf_id, qidx, ret);
+ goto config_rq_error;
+ }
+ ret = nicvf_qset_cq_config(nic, qidx, rxq);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
+ nic->vf_id, qidx, ret);
+ goto config_cq_error;
+ }
+
+ dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+
+config_cq_error:
+ nicvf_qset_cq_reclaim(nic, qidx);
+config_rq_error:
+ nicvf_qset_rq_reclaim(nic, qidx);
+ return ret;
+}
+
+static inline int
+nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint16_t qidx)
+{
+ struct nicvf_rxq *rxq;
+ int ret, other_error;
+
+ if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
+ RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ ret = nicvf_qset_rq_reclaim(nic, qidx);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
+ nic->vf_id, qidx, ret);
+
+ other_error = ret;
+ rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
+ nicvf_rx_queue_release_mbufs(dev, rxq);
+ nicvf_rx_queue_reset(rxq);
+
+ ret = nicvf_qset_cq_reclaim(nic, qidx);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
+ nic->vf_id, qidx, ret);
+
+ other_error |= ret;
+ dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ return other_error;
+}
+
+static void
+nicvf_dev_rx_queue_release(void *rx_queue)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_free(rx_queue);
+}
+
+static int
+nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ int ret;
+
+ if (qidx >= MAX_RCV_QUEUES_PER_QS)
+ nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
+
+ qidx = qidx % MAX_RCV_QUEUES_PER_QS;
+
+ ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
+ if (ret)
+ return ret;
+
+ ret = nicvf_configure_cpi(dev);
+ if (ret)
+ return ret;
+
+ return nicvf_configure_rss_reta(dev);
+}
+
+static int
+nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ int ret;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (qidx >= MAX_SND_QUEUES_PER_QS)
+ nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
+
+ qidx = qidx % MAX_RCV_QUEUES_PER_QS;
+
+ ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
+ ret |= nicvf_configure_cpi(dev);
+ ret |= nicvf_configure_rss_reta(dev);
+ return ret;
+}
+
+static int
+nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (qidx >= MAX_SND_QUEUES_PER_QS)
+ nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
+
+ qidx = qidx % MAX_SND_QUEUES_PER_QS;
+
+ return nicvf_vf_start_tx_queue(dev, nic, qidx);
+}
+
+static int
+nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (qidx >= MAX_SND_QUEUES_PER_QS)
+ nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
+
+ qidx = qidx % MAX_SND_QUEUES_PER_QS;
+
+ return nicvf_vf_stop_tx_queue(dev, nic, qidx);
+}
+
+static inline void
+nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def;
+ struct nicvf *nic = rxq->nic;
+
+ RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
+ offsetof(struct rte_mbuf, data_off) != 2);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
+ offsetof(struct rte_mbuf, data_off) != 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
+ offsetof(struct rte_mbuf, data_off) != 6);
+ RTE_BUILD_BUG_ON(offsetof(struct nicvf_rxq, rxq_fastpath_data_end) -
+ offsetof(struct nicvf_rxq,
+ rxq_fastpath_data_start) > 128);
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM + (nic->skip_bytes);
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* Prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer.value = *(uint64_t *)p;
+}
+
+static int
+nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ uint16_t rx_free_thresh;
+ struct nicvf_rxq *rxq;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint64_t offloads;
+ uint32_t buffsz;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* First skip check */
+ mbp_priv = rte_mempool_get_priv(mp);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+ if (buffsz < (uint32_t)(nic->skip_bytes)) {
+ PMD_INIT_LOG(ERR, "First skip is more than configured buffer size");
+ return -EINVAL;
+ }
+
+ if (qidx >= MAX_RCV_QUEUES_PER_QS)
+ nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
+
+ qidx = qidx % MAX_RCV_QUEUES_PER_QS;
+
+ /* Socket id check */
+ if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
+ PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
+ socket_id, nic->node);
+
+ /* Mempool memory must be contiguous, so must be one memory segment*/
+ if (mp->nb_mem_chunks != 1) {
+ PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
+ return -EINVAL;
+ }
+
+ /* Mempool memory must be physically contiguous */
+ if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) {
+ PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
+ return -EINVAL;
+ }
+
+ /* Rx deferred start is not supported */
+ if (rx_conf->rx_deferred_start) {
+ PMD_INIT_LOG(ERR, "Rx deferred start not supported");
+ return -EINVAL;
+ }
+
+ /* Roundup nb_desc to available qsize and validate max number of desc */
+ nb_desc = nicvf_qsize_cq_roundup(nb_desc);
+ if (nb_desc == 0) {
+ PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
+ return -EINVAL;
+ }
+
+
+ /* Check rx_free_thresh upper bound */
+ rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
+ rx_conf->rx_free_thresh :
+ NICVF_DEFAULT_RX_FREE_THRESH);
+ if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
+ rx_free_thresh >= nb_desc * .75) {
+ PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
+ rx_free_thresh);
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
+ PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+ nicvf_netdev_qidx(nic, qidx));
+ nicvf_dev_rx_queue_release(
+ dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
+ dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
+ }
+
+ /* Allocate rxq memory */
+ rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (rxq == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
+ nicvf_netdev_qidx(nic, qidx));
+ return -ENOMEM;
+ }
+
+ rxq->nic = nic;
+ rxq->pool = mp;
+ rxq->queue_id = qidx;
+ rxq->port_id = dev->data->port_id;
+ rxq->rx_free_thresh = rx_free_thresh;
+ rxq->rx_drop_en = rx_conf->rx_drop_en;
+ rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
+ rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
+ rxq->precharge_cnt = 0;
+
+ if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
+ rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
+ else
+ rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
+
+ nicvf_rxq_mbuf_setup(rxq);
+
+ /* Alloc completion queue */
+ if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
+ PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
+ nicvf_dev_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ nicvf_rx_queue_reset(rxq);
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+ PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
+ " phy=0x%" PRIx64 " offloads=0x%" PRIx64,
+ nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
+ rte_mempool_avail_count(mp), rxq->phys, offloads);
+
+ dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
+ dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static void
+nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Autonegotiation may be disabled */
+ dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+ dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
+ dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+
+ dev_info->min_rx_bufsize = ETHER_MIN_MTU;
+ dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN;
+ dev_info->max_rx_queues =
+ (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
+ dev_info->max_tx_queues =
+ (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_vfs = pci_dev->max_vfs;
+
+ dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+ dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
+ dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+ dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
+
+ dev_info->reta_size = nic->rss_info.rss_size;
+ dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
+ dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
+ if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
+ dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
+ .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM,
+ };
+}
+
+static nicvf_iova_addr_t
+rbdr_rte_mempool_get(void *dev, void *opaque)
+{
+ uint16_t qidx;
+ uintptr_t mbuf;
+ struct nicvf_rxq *rxq;
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
+ struct nicvf *nic = (struct nicvf *)opaque;
+ uint16_t rx_start, rx_end;
+
+ /* Get queue ranges for this VF */
+ nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
+
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
+ rxq = eth_dev->data->rx_queues[qidx];
+ /* Maintain equal buffer count across all pools */
+ if (rxq->precharge_cnt >= rxq->qlen_mask)
+ continue;
+ rxq->precharge_cnt++;
+ mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
+ if (mbuf)
+ return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
+ }
+ return 0;
+}
+
+static int
+nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
+{
+ int ret;
+ uint16_t qidx, data_off;
+ uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
+ uint64_t mbuf_phys_off = 0;
+ struct nicvf_rxq *rxq;
+ struct rte_mbuf *mbuf;
+ uint16_t rx_start, rx_end;
+ uint16_t tx_start, tx_end;
+ int mask;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Userspace process exited without proper shutdown in last run */
+ if (nicvf_qset_rbdr_active(nic, 0))
+ nicvf_vf_stop(dev, nic, false);
+
+ /* Get queue ranges for this VF */
+ nicvf_rx_range(dev, nic, &rx_start, &rx_end);
+
+ /*
+ * Thunderx nicvf PMD can support more than one pool per port only when
+ * 1) Data payload size is same across all the pools in given port
+ * AND
+ * 2) All mbuffs in the pools are from the same hugepage
+ * AND
+ * 3) Mbuff metadata size is same across all the pools in given port
+ *
+ * This is to support existing application that uses multiple pool/port.
+ * But, the purpose of using multipool for QoS will not be addressed.
+ *
+ */
+
+ /* Validate mempool attributes */
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
+ rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
+ mbuf = rte_pktmbuf_alloc(rxq->pool);
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
+ "pool=%s",
+ nic->vf_id, qidx, rxq->pool->name);
+ return -ENOMEM;
+ }
+ data_off = nicvf_mbuff_meta_length(mbuf);
+ data_off += RTE_PKTMBUF_HEADROOM;
+ rte_pktmbuf_free(mbuf);
+
+ if (data_off % RTE_CACHE_LINE_SIZE) {
+ PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
+ rxq->pool->name, data_off,
+ data_off % RTE_CACHE_LINE_SIZE);
+ return -EINVAL;
+ }
+ rxq->mbuf_phys_off -= data_off;
+ rxq->mbuf_phys_off -= nic->skip_bytes;
+
+ if (mbuf_phys_off == 0)
+ mbuf_phys_off = rxq->mbuf_phys_off;
+ if (mbuf_phys_off != rxq->mbuf_phys_off) {
+ PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
+ PRIx64, rxq->pool->name, nic->vf_id,
+ mbuf_phys_off);
+ return -EINVAL;
+ }
+ }
+
+ /* Check the level of buffers in the pool */
+ total_rxq_desc = 0;
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
+ /* Count total numbers of rxq descs */
+ total_rxq_desc += rxq->qlen_mask + 1;
+ exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
+ exp_buffs *= dev->data->nb_rx_queues;
+ if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
+ PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
+ rxq->pool->name,
+ rte_mempool_avail_count(rxq->pool),
+ exp_buffs);
+ return -ENOENT;
+ }
+ }
+
+ /* Check RBDR desc overflow */
+ ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
+ if (ret == 0) {
+ PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
+ "VF%d", nic->vf_id);
+ return -ENOMEM;
+ }
+
+ /* Enable qset */
+ ret = nicvf_qset_config(nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
+ nic->vf_id);
+ return ret;
+ }
+
+ /* Allocate RBDR and RBDR ring desc */
+ nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
+ ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
+ "VF%d", nic->vf_id);
+ goto qset_reclaim;
+ }
+
+ /* Enable and configure RBDR registers */
+ ret = nicvf_qset_rbdr_config(nic, 0);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
+ nic->vf_id);
+ goto qset_rbdr_free;
+ }
+
+ /* Fill rte_mempool buffers in RBDR pool and precharge it */
+ ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
+ total_rxq_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
+ nic->vf_id);
+ goto qset_rbdr_reclaim;
+ }
+
+ PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
+ nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
+
+ /* Configure VLAN Strip */
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ret = nicvf_vlan_offload_config(dev, mask);
+
+ /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
+ * to the 64bit memory address.
+ * The alignment creates a hole in mbuf(between the end of headroom and
+ * packet data start). The new revision of the HW provides an option to
+ * disable the L3 alignment feature and make mbuf layout looks
+ * more like other NICs. For better application compatibility, disabling
+ * l3 alignment feature on the hardware revisions it supports
+ */
+ nicvf_apad_config(nic, false);
+
+ /* Get queue ranges for this VF */
+ nicvf_tx_range(dev, nic, &tx_start, &tx_end);
+
+ /* Configure TX queues */
+ for (qidx = tx_start; qidx <= tx_end; qidx++) {
+ ret = nicvf_vf_start_tx_queue(dev, nic,
+ qidx % MAX_SND_QUEUES_PER_QS);
+ if (ret)
+ goto start_txq_error;
+ }
+
+ /* Configure RX queues */
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
+ ret = nicvf_vf_start_rx_queue(dev, nic,
+ qidx % MAX_RCV_QUEUES_PER_QS);
+ if (ret)
+ goto start_rxq_error;
+ }
+
+ if (!nic->sqs_mode) {
+ /* Configure CPI algorithm */
+ ret = nicvf_configure_cpi(dev);
+ if (ret)
+ goto start_txq_error;
+
+ ret = nicvf_mbox_get_rss_size(nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to get rss table size");
+ goto qset_rss_error;
+ }
+
+ /* Configure RSS */
+ ret = nicvf_configure_rss(dev);
+ if (ret)
+ goto qset_rss_error;
+ }
+
+ /* Done; Let PF make the BGX's RX and TX switches to ON position */
+ nicvf_mbox_cfg_done(nic);
+ return 0;
+
+qset_rss_error:
+ nicvf_rss_term(nic);
+start_rxq_error:
+ for (qidx = rx_start; qidx <= rx_end; qidx++)
+ nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
+start_txq_error:
+ for (qidx = tx_start; qidx <= tx_end; qidx++)
+ nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
+qset_rbdr_reclaim:
+ nicvf_qset_rbdr_reclaim(nic, 0);
+ nicvf_rbdr_release_mbufs(dev, nic);
+qset_rbdr_free:
+ if (nic->rbdr) {
+ rte_free(nic->rbdr);
+ nic->rbdr = NULL;
+ }
+qset_reclaim:
+ nicvf_qset_reclaim(nic);
+ return ret;
+}
+
+static int
+nicvf_dev_start(struct rte_eth_dev *dev)
+{
+ uint16_t qidx;
+ int ret;
+ size_t i;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ uint16_t mtu;
+ uint32_t buffsz = 0, rbdrsz = 0;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct nicvf_rxq *rxq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* This function must be called for a primary device */
+ assert_primary(nic);
+
+ /* Validate RBDR buff size */
+ for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
+ mbp_priv = rte_mempool_get_priv(rxq->pool);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+ if (buffsz % 128) {
+ PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
+ return -EINVAL;
+ }
+ if (rbdrsz == 0)
+ rbdrsz = buffsz;
+ if (rbdrsz != buffsz) {
+ PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
+ qidx, rbdrsz, buffsz);
+ return -EINVAL;
+ }
+ }
+
+ /* Configure loopback */
+ ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
+ return ret;
+ }
+
+ /* Reset all statistics counters attached to this port */
+ ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
+ return ret;
+ }
+
+ /* Setup scatter mode if needed by jumbo */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * VLAN_TAG_SIZE > buffsz)
+ dev->data->scattered_rx = 1;
+ if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
+ dev->data->scattered_rx = 1;
+
+ /* Setup MTU based on max_rx_pkt_len or default */
+ mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
+ dev->data->dev_conf.rxmode.max_rx_pkt_len
+ - ETHER_HDR_LEN : ETHER_MTU;
+
+ if (nicvf_dev_set_mtu(dev, mtu)) {
+ PMD_INIT_LOG(ERR, "Failed to set default mtu size");
+ return -EBUSY;
+ }
+
+ ret = nicvf_vf_start(dev, nic, rbdrsz);
+ if (ret != 0)
+ return ret;
+
+ for (i = 0; i < nic->sqs_count; i++) {
+ assert(nic->snicvf[i]);
+
+ ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
+ if (ret != 0)
+ return ret;
+ }
+
+ /* Configure callbacks based on offloads */
+ nicvf_set_tx_function(dev);
+ nicvf_set_rx_function(dev);
+
+ return 0;
+}
+
+static void
+nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
+{
+ size_t i;
+ int ret;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Teardown secondary vf first */
+ for (i = 0; i < nic->sqs_count; i++) {
+ if (!nic->snicvf[i])
+ continue;
+
+ nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
+ }
+
+ /* Stop the primary VF now */
+ nicvf_vf_stop(dev, nic, cleanup);
+
+ /* Disable loopback */
+ ret = nicvf_loopback_config(nic, 0);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
+
+ /* Reclaim CPI configuration */
+ ret = nicvf_mbox_config_cpi(nic, 0);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
+}
+
+static void
+nicvf_dev_stop(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ nicvf_dev_stop_cleanup(dev, false);
+}
+
+static void
+nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
+{
+ int ret;
+ uint16_t qidx;
+ uint16_t tx_start, tx_end;
+ uint16_t rx_start, rx_end;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (cleanup) {
+ /* Let PF make the BGX's RX and TX switches to OFF position */
+ nicvf_mbox_shutdown(nic);
+ }
+
+ /* Disable VLAN Strip */
+ nicvf_vlan_hw_strip(nic, 0);
+
+ /* Get queue ranges for this VF */
+ nicvf_tx_range(dev, nic, &tx_start, &tx_end);
+
+ for (qidx = tx_start; qidx <= tx_end; qidx++)
+ nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
+
+ /* Get queue ranges for this VF */
+ nicvf_rx_range(dev, nic, &rx_start, &rx_end);
+
+ /* Reclaim rq */
+ for (qidx = rx_start; qidx <= rx_end; qidx++)
+ nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
+
+ /* Reclaim RBDR */
+ ret = nicvf_qset_rbdr_reclaim(nic, 0);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
+
+ /* Move all charged buffers in RBDR back to pool */
+ if (nic->rbdr != NULL)
+ nicvf_rbdr_release_mbufs(dev, nic);
+
+ /* Disable qset */
+ ret = nicvf_qset_reclaim(nic);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
+
+ /* Disable all interrupts */
+ nicvf_disable_all_interrupts(nic);
+
+ /* Free RBDR SW structure */
+ if (nic->rbdr) {
+ rte_free(nic->rbdr);
+ nic->rbdr = NULL;
+ }
+}
+
+static void
+nicvf_dev_close(struct rte_eth_dev *dev)
+{
+ size_t i;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ nicvf_dev_stop_cleanup(dev, true);
+ nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
+
+ for (i = 0; i < nic->sqs_count; i++) {
+ if (!nic->snicvf[i])
+ continue;
+
+ nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
+ }
+}
+
+static int
+nicvf_request_sqs(struct nicvf *nic)
+{
+ size_t i;
+
+ assert_primary(nic);
+ assert(nic->sqs_count > 0);
+ assert(nic->sqs_count <= MAX_SQS_PER_VF);
+
+ /* Set no of Rx/Tx queues in each of the SQsets */
+ for (i = 0; i < nic->sqs_count; i++) {
+ if (nicvf_svf_empty())
+ rte_panic("Cannot assign sufficient number of "
+ "secondary queues to primary VF%" PRIu8 "\n",
+ nic->vf_id);
+
+ nic->snicvf[i] = nicvf_svf_pop();
+ nic->snicvf[i]->sqs_id = i;
+ }
+
+ return nicvf_mbox_request_sqs(nic);
+}
+
+static int
+nicvf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *conf = &data->dev_conf;
+ struct rte_eth_rxmode *rxmode = &conf->rxmode;
+ struct rte_eth_txmode *txmode = &conf->txmode;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint8_t cqcount;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!rte_eal_has_hugepages()) {
+ PMD_INIT_LOG(INFO, "Huge page is not configured");
+ return -EINVAL;
+ }
+
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
+ PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
+ rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ }
+
+ if (txmode->mq_mode) {
+ PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
+ return -EINVAL;
+ }
+
+ if (rxmode->split_hdr_size) {
+ PMD_INIT_LOG(INFO, "Rxmode does not support split header");
+ return -EINVAL;
+ }
+
+ if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
+ return -EINVAL;
+ }
+
+ if (conf->dcb_capability_en) {
+ PMD_INIT_LOG(INFO, "DCB enable not supported");
+ return -EINVAL;
+ }
+
+ if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ PMD_INIT_LOG(INFO, "Flow director not supported");
+ return -EINVAL;
+ }
+
+ assert_primary(nic);
+ NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
+ cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
+ if (cqcount > MAX_RCV_QUEUES_PER_QS) {
+ nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
+ nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
+ } else {
+ nic->sqs_count = 0;
+ }
+
+ assert(nic->sqs_count <= MAX_SQS_PER_VF);
+
+ if (nic->sqs_count > 0) {
+ if (nicvf_request_sqs(nic)) {
+ rte_panic("Cannot assign sufficient number of "
+ "secondary queues to PORT%d VF%" PRIu8 "\n",
+ dev->data->port_id, nic->vf_id);
+ }
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ nic->offload_cksum = 1;
+
+ PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
+ dev->data->port_id, nicvf_hw_cap(nic));
+
+ return 0;
+}
+
+/* Initialize and register driver with DPDK Application */
+static const struct eth_dev_ops nicvf_eth_dev_ops = {
+ .dev_configure = nicvf_dev_configure,
+ .dev_start = nicvf_dev_start,
+ .dev_stop = nicvf_dev_stop,
+ .link_update = nicvf_dev_link_update,
+ .dev_close = nicvf_dev_close,
+ .stats_get = nicvf_dev_stats_get,
+ .stats_reset = nicvf_dev_stats_reset,
+ .promiscuous_enable = nicvf_dev_promisc_enable,
+ .dev_infos_get = nicvf_dev_info_get,
+ .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
+ .mtu_set = nicvf_dev_set_mtu,
+ .vlan_offload_set = nicvf_vlan_offload_set,
+ .reta_update = nicvf_dev_reta_update,
+ .reta_query = nicvf_dev_reta_query,
+ .rss_hash_update = nicvf_dev_rss_hash_update,
+ .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get,
+ .rx_queue_start = nicvf_dev_rx_queue_start,
+ .rx_queue_stop = nicvf_dev_rx_queue_stop,
+ .tx_queue_start = nicvf_dev_tx_queue_start,
+ .tx_queue_stop = nicvf_dev_tx_queue_stop,
+ .rx_queue_setup = nicvf_dev_rx_queue_setup,
+ .rx_queue_release = nicvf_dev_rx_queue_release,
+ .rx_queue_count = nicvf_dev_rx_queue_count,
+ .tx_queue_setup = nicvf_dev_tx_queue_setup,
+ .tx_queue_release = nicvf_dev_tx_queue_release,
+ .get_reg = nicvf_dev_get_regs,
+};
+
+static int
+nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ nicvf_vlan_hw_strip(nic, true);
+ else
+ nicvf_vlan_hw_strip(nic, false);
+ }
+
+ return 0;
+}
+
+static int
+nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ nicvf_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
+static inline int
+nicvf_set_first_skip(struct rte_eth_dev *dev)
+{
+ int bytes_to_skip = 0;
+ int ret = 0;
+ unsigned int i;
+ struct rte_kvargs *kvlist;
+ static const char *const skip[] = {
+ SKIP_DATA_BYTES,
+ NULL};
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (!dev->device->devargs) {
+ nicvf_first_skip_config(nic, 0);
+ return ret;
+ }
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, skip);
+ if (!kvlist)
+ return -EINVAL;
+
+ if (kvlist->count == 0)
+ goto exit;
+
+ for (i = 0; i != kvlist->count; ++i) {
+ const struct rte_kvargs_pair *pair = &kvlist->pairs[i];
+
+ if (!strcmp(pair->key, SKIP_DATA_BYTES))
+ bytes_to_skip = atoi(pair->value);
+ }
+
+ /*128 bytes amounts to one cache line*/
+ if (bytes_to_skip >= 0 && bytes_to_skip < 128) {
+ if (!(bytes_to_skip % 8)) {
+ nicvf_first_skip_config(nic, (bytes_to_skip / 8));
+ nic->skip_bytes = bytes_to_skip;
+ goto kvlist_free;
+ } else {
+ PMD_INIT_LOG(ERR, "skip_data_bytes should be multiple of 8");
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ PMD_INIT_LOG(ERR, "skip_data_bytes should be less than 128");
+ ret = -EINVAL;
+ goto exit;
+ }
+exit:
+ nicvf_first_skip_config(nic, 0);
+kvlist_free:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+static int
+nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+ int ret;
+ struct rte_pci_device *pci_dev;
+ struct nicvf *nic = nicvf_pmd_priv(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &nicvf_eth_dev_ops;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ if (nic) {
+ /* Setup callbacks for secondary process */
+ nicvf_set_tx_function(eth_dev);
+ nicvf_set_rx_function(eth_dev);
+ return 0;
+ } else {
+ /* If nic == NULL than it is secondary function
+ * so ethdev need to be released by caller */
+ return ENOTSUP;
+ }
+ }
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ nic->device_id = pci_dev->id.device_id;
+ nic->vendor_id = pci_dev->id.vendor_id;
+ nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+
+ PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
+ pci_dev->id.vendor_id, pci_dev->id.device_id,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+
+ nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
+ if (!nic->reg_base) {
+ PMD_INIT_LOG(ERR, "Failed to map BAR0");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ nicvf_disable_all_interrupts(nic);
+
+ ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to start period alarm");
+ goto fail;
+ }
+
+ ret = nicvf_mbox_check_pf_ready(nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
+ goto alarm_fail;
+ } else {
+ PMD_INIT_LOG(INFO,
+ "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
+ nic->node, nic->vf_id,
+ nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
+ nic->sqs_mode ? "true" : "false",
+ nic->loopback_supported ? "true" : "false"
+ );
+ }
+
+ ret = nicvf_base_init(nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
+ goto malloc_fail;
+ }
+
+ if (nic->sqs_mode) {
+ /* Push nic to stack of secondary vfs */
+ nicvf_svf_push(nic);
+
+ /* Steal nic pointer from the device for further reuse */
+ eth_dev->data->dev_private = NULL;
+
+ nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
+ ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to start period alarm");
+ goto fail;
+ }
+
+ /* Detach port by returning positive error number */
+ return ENOTSUP;
+ }
+
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
+ ret = -ENOMEM;
+ goto alarm_fail;
+ }
+ if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
+ eth_random_addr(&nic->mac_addr[0]);
+
+ ether_addr_copy((struct ether_addr *)nic->mac_addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to set mac addr");
+ goto malloc_fail;
+ }
+
+ ret = nicvf_set_first_skip(eth_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure first skip");
+ goto malloc_fail;
+ }
+ PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
+ eth_dev->data->port_id, nic->vendor_id, nic->device_id,
+ nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
+ nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
+
+ return 0;
+
+malloc_fail:
+ rte_free(eth_dev->data->mac_addrs);
+alarm_fail:
+ nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
+fail:
+ return ret;
+}
+
+static const struct rte_pci_id pci_id_nicvf_map[] = {
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
+ },
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
+ },
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
+ },
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
+ nicvf_eth_dev_init);
+}
+
+static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_nicvf_pmd = {
+ .id_table = pci_id_nicvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
+ RTE_PCI_DRV_INTR_LSC,
+ .probe = nicvf_eth_pci_probe,
+ .remove = nicvf_eth_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_thunderx, SKIP_DATA_BYTES "=<int>");
diff --git a/src/spdk/dpdk/drivers/net/thunderx/nicvf_ethdev.h b/src/spdk/dpdk/drivers/net/thunderx/nicvf_ethdev.h
new file mode 100644
index 00000000..ae440fef
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/nicvf_ethdev.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef __THUNDERX_NICVF_ETHDEV_H__
+#define __THUNDERX_NICVF_ETHDEV_H__
+
+#include <rte_ethdev_driver.h>
+
+#define THUNDERX_NICVF_PMD_VERSION "2.0"
+#define THUNDERX_REG_BYTES 8
+
+#define NICVF_INTR_POLL_INTERVAL_MS 50
+#define NICVF_HALF_DUPLEX 0x00
+#define NICVF_FULL_DUPLEX 0x01
+#define NICVF_UNKNOWN_DUPLEX 0xff
+
+#define NICVF_RSS_OFFLOAD_PASS1 ( \
+ ETH_RSS_PORT | \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP)
+
+#define NICVF_RSS_OFFLOAD_TUNNEL ( \
+ ETH_RSS_VXLAN | \
+ ETH_RSS_GENEVE | \
+ ETH_RSS_NVGRE)
+
+#define NICVF_TX_OFFLOAD_CAPA ( \
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define NICVF_RX_OFFLOAD_CAPA ( \
+ DEV_RX_OFFLOAD_CHECKSUM | \
+ DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_CRC_STRIP | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_SCATTER)
+
+#define NICVF_DEFAULT_RX_FREE_THRESH 224
+#define NICVF_DEFAULT_TX_FREE_THRESH 224
+#define NICVF_TX_FREE_MPOOL_THRESH 16
+#define NICVF_MAX_RX_FREE_THRESH 1024
+#define NICVF_MAX_TX_FREE_THRESH 1024
+
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag */
+
+#define SKIP_DATA_BYTES "skip_data_bytes"
+static inline struct nicvf *
+nicvf_pmd_priv(struct rte_eth_dev *eth_dev)
+{
+ return eth_dev->data->dev_private;
+}
+
+static inline uint64_t
+nicvf_mempool_phy_offset(struct rte_mempool *mp)
+{
+ struct rte_mempool_memhdr *hdr;
+
+ hdr = STAILQ_FIRST(&mp->mem_list);
+ assert(hdr != NULL);
+ return (uint64_t)((uintptr_t)hdr->addr - hdr->iova);
+}
+
+static inline uint16_t
+nicvf_mbuff_meta_length(struct rte_mbuf *mbuf)
+{
+ return (uint16_t)((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
+}
+
+static inline uint16_t
+nicvf_netdev_qidx(struct nicvf *nic, uint8_t local_qidx)
+{
+ uint16_t global_qidx = local_qidx;
+
+ if (nic->sqs_mode)
+ global_qidx += ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
+
+ return global_qidx;
+}
+
+/*
+ * Simple phy2virt functions assuming mbufs are in a single huge page
+ * V = P + offset
+ * P = V - offset
+ */
+static inline uintptr_t
+nicvf_mbuff_phy2virt(rte_iova_t phy, uint64_t mbuf_phys_off)
+{
+ return (uintptr_t)(phy + mbuf_phys_off);
+}
+
+static inline uintptr_t
+nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t mbuf_phys_off)
+{
+ return (rte_iova_t)(virt - mbuf_phys_off);
+}
+
+static inline void
+nicvf_tx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *tx_start,
+ uint16_t *tx_end)
+{
+ uint16_t tmp;
+
+ *tx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
+ MAX_SND_QUEUES_PER_QS);
+ tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
+ MAX_SND_QUEUES_PER_QS) - 1;
+ *tx_end = dev->data->nb_tx_queues ?
+ RTE_MIN(tmp, dev->data->nb_tx_queues - 1) : 0;
+}
+
+static inline void
+nicvf_rx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *rx_start,
+ uint16_t *rx_end)
+{
+ uint16_t tmp;
+
+ *rx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
+ MAX_RCV_QUEUES_PER_QS);
+ tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
+ MAX_RCV_QUEUES_PER_QS) - 1;
+ *rx_end = dev->data->nb_rx_queues ?
+ RTE_MIN(tmp, dev->data->nb_rx_queues - 1) : 0;
+}
+
+#endif /* __THUNDERX_NICVF_ETHDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/net/thunderx/nicvf_logs.h b/src/spdk/dpdk/drivers/net/thunderx/nicvf_logs.h
new file mode 100644
index 00000000..3c455b42
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/nicvf_logs.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef __THUNDERX_NICVF_LOGS__
+#define __THUNDERX_NICVF_LOGS__
+
+#include <assert.h>
+
+#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX
+#define NICVF_RX_ASSERT(x) assert(x)
+#else
+#define NICVF_RX_ASSERT(x) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX
+#define NICVF_TX_ASSERT(x) assert(x)
+#else
+#define NICVF_TX_ASSERT(x) do { } while (0)
+#endif
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, nicvf_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, ">>")
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, nicvf_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
+
+#define PMD_MBOX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, nicvf_logtype_mbox, \
+ "%s(): " fmt "\n", __func__, ## args)
+#define PMD_MBOX_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
+
+#define PMD_RX_LOG PMD_DRV_LOG
+#define PMD_TX_LOG PMD_DRV_LOG
+
+extern int nicvf_logtype_init;
+extern int nicvf_logtype_driver;
+extern int nicvf_logtype_mbox;
+
+#endif /* __THUNDERX_NICVF_LOGS__ */
diff --git a/src/spdk/dpdk/drivers/net/thunderx/nicvf_rxtx.c b/src/spdk/dpdk/drivers/net/thunderx/nicvf_rxtx.c
new file mode 100644
index 00000000..247c3568
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/nicvf_rxtx.c
@@ -0,0 +1,669 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_errno.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_mbuf.h>
+#include <rte_prefetch.h>
+
+#include "base/nicvf_plat.h"
+
+#include "nicvf_ethdev.h"
+#include "nicvf_rxtx.h"
+#include "nicvf_logs.h"
+
+static inline void __hot
+fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt)
+{
+ /* Local variable sqe to avoid read from sq desc memory*/
+ union sq_entry_t sqe;
+ uint64_t ol_flags;
+
+ /* Fill SQ header descriptor */
+ sqe.buff[0] = 0;
+ sqe.hdr.subdesc_type = SQ_DESC_TYPE_HEADER;
+ /* Number of sub-descriptors following this one */
+ sqe.hdr.subdesc_cnt = pkt->nb_segs;
+ sqe.hdr.tot_len = pkt->pkt_len;
+
+ ol_flags = pkt->ol_flags & NICVF_TX_OFFLOAD_MASK;
+ if (unlikely(ol_flags)) {
+ /* L4 cksum */
+ uint64_t l4_flags = ol_flags & PKT_TX_L4_MASK;
+ if (l4_flags == PKT_TX_TCP_CKSUM)
+ sqe.hdr.csum_l4 = SEND_L4_CSUM_TCP;
+ else if (l4_flags == PKT_TX_UDP_CKSUM)
+ sqe.hdr.csum_l4 = SEND_L4_CSUM_UDP;
+ else
+ sqe.hdr.csum_l4 = SEND_L4_CSUM_DISABLE;
+
+ sqe.hdr.l3_offset = pkt->l2_len;
+ sqe.hdr.l4_offset = pkt->l3_len + pkt->l2_len;
+
+ /* L3 cksum */
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ sqe.hdr.csum_l3 = 1;
+ }
+
+ entry->buff[0] = sqe.buff[0];
+}
+
+void __hot
+nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq)
+{
+ int j = 0;
+ uint32_t curr_head;
+ uint32_t head = sq->head;
+ struct rte_mbuf **txbuffs = sq->txbuffs;
+ void *obj_p[NICVF_MAX_TX_FREE_THRESH] __rte_cache_aligned;
+
+ curr_head = nicvf_addr_read(sq->sq_head) >> 4;
+ while (head != curr_head) {
+ if (txbuffs[head])
+ obj_p[j++] = txbuffs[head];
+
+ head = (head + 1) & sq->qlen_mask;
+ }
+
+ rte_mempool_put_bulk(sq->pool, obj_p, j);
+ sq->head = curr_head;
+ sq->xmit_bufs -= j;
+ NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
+}
+
+void __hot
+nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq)
+{
+ uint32_t n = 0;
+ uint32_t curr_head;
+ uint32_t head = sq->head;
+ struct rte_mbuf **txbuffs = sq->txbuffs;
+
+ curr_head = nicvf_addr_read(sq->sq_head) >> 4;
+ while (head != curr_head) {
+ if (txbuffs[head]) {
+ rte_pktmbuf_free_seg(txbuffs[head]);
+ n++;
+ }
+
+ head = (head + 1) & sq->qlen_mask;
+ }
+
+ sq->head = curr_head;
+ sq->xmit_bufs -= n;
+ NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
+}
+
+static inline uint32_t __hot
+nicvf_free_tx_desc(struct nicvf_txq *sq)
+{
+ return ((sq->head - sq->tail - 1) & sq->qlen_mask);
+}
+
+/* Send Header + Packet */
+#define TX_DESC_PER_PKT 2
+
+static inline uint32_t __hot
+nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint32_t free_desc = nicvf_free_tx_desc(sq);
+
+ if (free_desc < nb_pkts * TX_DESC_PER_PKT ||
+ sq->xmit_bufs > sq->tx_free_thresh) {
+ if (unlikely(sq->pool == NULL))
+ sq->pool = tx_pkts[0]->pool;
+
+ sq->pool_free(sq);
+ /* Freed now, let see the number of free descs again */
+ free_desc = nicvf_free_tx_desc(sq);
+ }
+ return free_desc;
+}
+
+uint16_t __hot
+nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+ uint32_t free_desc;
+ uint32_t tail;
+ struct nicvf_txq *sq = tx_queue;
+ union sq_entry_t *desc_ptr = sq->desc;
+ struct rte_mbuf **txbuffs = sq->txbuffs;
+ struct rte_mbuf *pkt;
+ uint32_t qlen_mask = sq->qlen_mask;
+
+ tail = sq->tail;
+ free_desc = nicvf_free_xmitted_buffers(sq, tx_pkts, nb_pkts);
+
+ for (i = 0; i < nb_pkts && (int)free_desc >= TX_DESC_PER_PKT; i++) {
+ pkt = tx_pkts[i];
+
+ txbuffs[tail] = NULL;
+ fill_sq_desc_header(desc_ptr + tail, pkt);
+ tail = (tail + 1) & qlen_mask;
+
+ txbuffs[tail] = pkt;
+ fill_sq_desc_gather(desc_ptr + tail, pkt);
+ tail = (tail + 1) & qlen_mask;
+ free_desc -= TX_DESC_PER_PKT;
+ }
+
+ if (likely(i)) {
+ sq->tail = tail;
+ sq->xmit_bufs += i;
+ rte_wmb();
+
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ }
+ return i;
+}
+
+uint16_t __hot
+nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, k;
+ uint32_t used_desc, next_used_desc, used_bufs, free_desc, tail;
+ struct nicvf_txq *sq = tx_queue;
+ union sq_entry_t *desc_ptr = sq->desc;
+ struct rte_mbuf **txbuffs = sq->txbuffs;
+ struct rte_mbuf *pkt, *seg;
+ uint32_t qlen_mask = sq->qlen_mask;
+ uint16_t nb_segs;
+
+ tail = sq->tail;
+ used_desc = 0;
+ used_bufs = 0;
+
+ free_desc = nicvf_free_xmitted_buffers(sq, tx_pkts, nb_pkts);
+
+ for (i = 0; i < nb_pkts; i++) {
+ pkt = tx_pkts[i];
+
+ nb_segs = pkt->nb_segs;
+
+ next_used_desc = used_desc + nb_segs + 1;
+ if (next_used_desc > free_desc)
+ break;
+ used_desc = next_used_desc;
+ used_bufs += nb_segs;
+
+ txbuffs[tail] = NULL;
+ fill_sq_desc_header(desc_ptr + tail, pkt);
+ tail = (tail + 1) & qlen_mask;
+
+ txbuffs[tail] = pkt;
+ fill_sq_desc_gather(desc_ptr + tail, pkt);
+ tail = (tail + 1) & qlen_mask;
+
+ seg = pkt->next;
+ for (k = 1; k < nb_segs; k++) {
+ txbuffs[tail] = seg;
+ fill_sq_desc_gather(desc_ptr + tail, seg);
+ tail = (tail + 1) & qlen_mask;
+ seg = seg->next;
+ }
+ }
+
+ if (likely(used_desc)) {
+ sq->tail = tail;
+ sq->xmit_bufs += used_bufs;
+ rte_wmb();
+
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, used_desc);
+ }
+ return i;
+}
+
+static const uint32_t ptype_table[16][16] __rte_cache_aligned = {
+ [L3_NONE][L4_NONE] = RTE_PTYPE_UNKNOWN,
+ [L3_NONE][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+ [L3_NONE][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
+ [L3_NONE][L4_IPCOMP] = RTE_PTYPE_UNKNOWN,
+ [L3_NONE][L4_TCP] = RTE_PTYPE_L4_TCP,
+ [L3_NONE][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP,
+ [L3_NONE][L4_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [L3_NONE][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP,
+ [L3_NONE][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_NONE][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_NONE][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+ [L3_IPV4][L4_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+ [L3_IPV4][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
+ [L3_IPV4][L4_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
+ [L3_IPV4][L4_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+ [L3_IPV4][L4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [L3_IPV4][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [L3_IPV4][L4_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
+ [L3_IPV4][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [L3_IPV4][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_IPV4][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_IPV4][L4_NVGRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+
+ [L3_IPV4_OPT][L4_NONE] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+ [L3_IPV4_OPT][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L3_IPV4,
+ [L3_IPV4_OPT][L4_IPFRAG] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
+ [L3_IPV4_OPT][L4_IPCOMP] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+ [L3_IPV4_OPT][L4_TCP] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+ [L3_IPV4_OPT][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [L3_IPV4_OPT][L4_GRE] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
+ [L3_IPV4_OPT][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [L3_IPV4_OPT][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_IPV4_OPT][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_IPV4_OPT][L4_NVGRE] = RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_TUNNEL_NVGRE,
+
+ [L3_IPV6][L4_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+ [L3_IPV6][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
+ [L3_IPV6][L4_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
+ [L3_IPV6][L4_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+ [L3_IPV6][L4_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [L3_IPV6][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [L3_IPV6][L4_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
+ [L3_IPV6][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [L3_IPV6][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_IPV6][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_IPV6][L4_NVGRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_NVGRE,
+
+ [L3_IPV6_OPT][L4_NONE] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+ [L3_IPV6_OPT][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV6_EXT |
+ RTE_PTYPE_L3_IPV4,
+ [L3_IPV6_OPT][L4_IPFRAG] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
+ [L3_IPV6_OPT][L4_IPCOMP] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+ [L3_IPV6_OPT][L4_TCP] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [L3_IPV6_OPT][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [L3_IPV6_OPT][L4_GRE] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
+ [L3_IPV6_OPT][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [L3_IPV6_OPT][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV6_EXT |
+ RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_IPV6_OPT][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV6_EXT |
+ RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_IPV6_OPT][L4_NVGRE] = RTE_PTYPE_L3_IPV6_EXT |
+ RTE_PTYPE_TUNNEL_NVGRE,
+
+ [L3_ET_STOP][L4_NONE] = RTE_PTYPE_UNKNOWN,
+ [L3_ET_STOP][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+ [L3_ET_STOP][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
+ [L3_ET_STOP][L4_IPCOMP] = RTE_PTYPE_UNKNOWN,
+ [L3_ET_STOP][L4_TCP] = RTE_PTYPE_L4_TCP,
+ [L3_ET_STOP][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP,
+ [L3_ET_STOP][L4_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [L3_ET_STOP][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP,
+ [L3_ET_STOP][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_ET_STOP][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_ET_STOP][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+ [L3_OTHER][L4_NONE] = RTE_PTYPE_UNKNOWN,
+ [L3_OTHER][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+ [L3_OTHER][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
+ [L3_OTHER][L4_IPCOMP] = RTE_PTYPE_UNKNOWN,
+ [L3_OTHER][L4_TCP] = RTE_PTYPE_L4_TCP,
+ [L3_OTHER][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP,
+ [L3_OTHER][L4_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [L3_OTHER][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP,
+ [L3_OTHER][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_OTHER][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_OTHER][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+};
+
+static inline uint32_t __hot
+nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0)
+{
+ return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type];
+}
+
+static inline uint64_t __hot
+nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
+{
+ static const uint64_t flag_table[3] __rte_cache_aligned = {
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_UNKNOWN,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ };
+
+ const uint8_t idx = (cqe_rx_w0.err_opcode == CQE_RX_ERR_L4_CHK) << 1 |
+ (cqe_rx_w0.err_opcode == CQE_RX_ERR_IP_CHK);
+ return flag_table[idx];
+}
+
+static inline int __hot
+nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
+{
+ int i;
+ uint32_t ltail, next_tail;
+ struct nicvf_rbdr *rbdr = rxq->shared_rbdr;
+ uint64_t mbuf_phys_off = rxq->mbuf_phys_off;
+ struct rbdr_entry_t *desc = rbdr->desc;
+ uint32_t qlen_mask = rbdr->qlen_mask;
+ uintptr_t door = rbdr->rbdr_door;
+ void *obj_p[NICVF_MAX_RX_FREE_THRESH] __rte_cache_aligned;
+
+ if (unlikely(rte_mempool_get_bulk(rxq->pool, obj_p, to_fill) < 0)) {
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ to_fill;
+ return 0;
+ }
+
+ NICVF_RX_ASSERT((unsigned int)to_fill <= (qlen_mask -
+ (nicvf_addr_read(rbdr->rbdr_status) & NICVF_RBDR_COUNT_MASK)));
+
+ next_tail = __atomic_fetch_add(&rbdr->next_tail, to_fill,
+ __ATOMIC_ACQUIRE);
+ ltail = next_tail;
+ for (i = 0; i < to_fill; i++) {
+ struct rbdr_entry_t *entry = desc + (ltail & qlen_mask);
+
+ entry->full_addr = nicvf_mbuff_virt2phy((uintptr_t)obj_p[i],
+ mbuf_phys_off);
+ ltail++;
+ }
+
+ while (__atomic_load_n(&rbdr->tail, __ATOMIC_RELAXED) != next_tail)
+ rte_pause();
+
+ __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);
+ nicvf_addr_write(door, to_fill);
+ return to_fill;
+}
+
+static inline int32_t __hot
+nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts,
+ int32_t available_space)
+{
+ if (unlikely(available_space < nb_pkts))
+ rxq->available_space = nicvf_addr_read(rxq->cq_status)
+ & NICVF_CQ_CQE_COUNT_MASK;
+
+ return RTE_MIN(nb_pkts, available_space);
+}
+
+static inline void __hot
+nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
+ struct rte_mbuf *pkt)
+{
+ if (likely(cqe_rx_w0.rss_alg)) {
+ pkt->hash.rss = cqe_rx_w2.rss_tag;
+ pkt->ol_flags |= PKT_RX_RSS_HASH;
+
+ }
+}
+
+static __rte_always_inline uint16_t
+nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+ const uint32_t flag)
+{
+ uint32_t i, to_process;
+ struct cqe_rx_t *cqe_rx;
+ struct rte_mbuf *pkt;
+ cqe_rx_word0_t cqe_rx_w0;
+ cqe_rx_word1_t cqe_rx_w1;
+ cqe_rx_word2_t cqe_rx_w2;
+ cqe_rx_word3_t cqe_rx_w3;
+ struct nicvf_rxq *rxq = rx_queue;
+ union cq_entry_t *desc = rxq->desc;
+ const uint64_t cqe_mask = rxq->qlen_mask;
+ uint64_t rb0_ptr, mbuf_phys_off = rxq->mbuf_phys_off;
+ const uint64_t mbuf_init = rxq->mbuf_initializer.value;
+ uint32_t cqe_head = rxq->head & cqe_mask;
+ int32_t available_space = rxq->available_space;
+ const uint8_t rbptr_offset = rxq->rbptr_offset;
+
+ to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
+
+ for (i = 0; i < to_process; i++) {
+ rte_prefetch_non_temporal(&desc[cqe_head + 2]);
+ cqe_rx = (struct cqe_rx_t *)&desc[cqe_head];
+ NICVF_RX_ASSERT(((struct cq_entry_type_t *)cqe_rx)->cqe_type
+ == CQE_TYPE_RX);
+
+ NICVF_LOAD_PAIR(cqe_rx_w0.u64, cqe_rx_w1.u64, cqe_rx);
+ NICVF_LOAD_PAIR(cqe_rx_w2.u64, cqe_rx_w3.u64, &cqe_rx->word2);
+ rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
+ pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
+ (rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
+
+ if (flag & NICVF_RX_OFFLOAD_NONE)
+ pkt->ol_flags = 0;
+ if (flag & NICVF_RX_OFFLOAD_CKSUM)
+ pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
+ if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+ if (unlikely(cqe_rx_w0.vlan_stripped)) {
+ pkt->ol_flags |= PKT_RX_VLAN
+ | PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci =
+ rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+ }
+ }
+ pkt->data_len = cqe_rx_w3.rb0_sz;
+ pkt->pkt_len = cqe_rx_w3.rb0_sz;
+ pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
+ nicvf_mbuff_init_update(pkt, mbuf_init, cqe_rx_w1.align_pad);
+ nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
+ rx_pkts[i] = pkt;
+ cqe_head = (cqe_head + 1) & cqe_mask;
+ nicvf_prefetch_store_keep(pkt);
+ }
+
+ if (likely(to_process)) {
+ rxq->available_space -= to_process;
+ rxq->head = cqe_head;
+ nicvf_addr_write(rxq->cq_door, to_process);
+ rxq->recv_buffers += to_process;
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
+ }
+
+ return to_process;
+}
+
+uint16_t __hot
+nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+static __rte_always_inline uint16_t __hot
+nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
+ uint64_t mbuf_phys_off,
+ struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
+ uint64_t mbuf_init, const uint32_t flag)
+{
+ struct rte_mbuf *pkt, *seg, *prev;
+ cqe_rx_word0_t cqe_rx_w0;
+ cqe_rx_word1_t cqe_rx_w1;
+ cqe_rx_word2_t cqe_rx_w2;
+ uint16_t *rb_sz, nb_segs, seg_idx;
+ uint64_t *rb_ptr;
+
+ NICVF_LOAD_PAIR(cqe_rx_w0.u64, cqe_rx_w1.u64, cqe_rx);
+ NICVF_RX_ASSERT(cqe_rx_w0.cqe_type == CQE_TYPE_RX);
+ cqe_rx_w2 = cqe_rx->word2;
+ rb_sz = &cqe_rx->word3.rb0_sz;
+ rb_ptr = (uint64_t *)cqe_rx + rbptr_offset;
+ nb_segs = cqe_rx_w0.rb_cnt;
+ pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
+ (rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
+
+ pkt->pkt_len = cqe_rx_w1.pkt_len;
+ pkt->data_len = rb_sz[nicvf_frag_num(0)];
+ nicvf_mbuff_init_mseg_update(
+ pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
+ pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
+ if (flag & NICVF_RX_OFFLOAD_NONE)
+ pkt->ol_flags = 0;
+ if (flag & NICVF_RX_OFFLOAD_CKSUM)
+ pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
+ if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+ if (unlikely(cqe_rx_w0.vlan_stripped)) {
+ pkt->ol_flags |= PKT_RX_VLAN
+ | PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+ }
+ }
+ nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
+
+ *rx_pkt = pkt;
+ prev = pkt;
+ for (seg_idx = 1; seg_idx < nb_segs; seg_idx++) {
+ seg = (struct rte_mbuf *)nicvf_mbuff_phy2virt
+ (rb_ptr[seg_idx], mbuf_phys_off);
+
+ prev->next = seg;
+ seg->data_len = rb_sz[nicvf_frag_num(seg_idx)];
+ nicvf_mbuff_init_update(seg, mbuf_init, 0);
+
+ prev = seg;
+ }
+ prev->next = NULL;
+ return nb_segs;
+}
+
+static __rte_always_inline uint16_t __hot
+nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, const uint32_t flag)
+{
+ union cq_entry_t *cq_entry;
+ struct cqe_rx_t *cqe_rx;
+ struct nicvf_rxq *rxq = rx_queue;
+ union cq_entry_t *desc = rxq->desc;
+ const uint64_t cqe_mask = rxq->qlen_mask;
+ uint64_t mbuf_phys_off = rxq->mbuf_phys_off;
+ uint32_t i, to_process, cqe_head, buffers_consumed = 0;
+ int32_t available_space = rxq->available_space;
+ uint16_t nb_segs;
+ const uint64_t mbuf_init = rxq->mbuf_initializer.value;
+ const uint8_t rbptr_offset = rxq->rbptr_offset;
+
+ cqe_head = rxq->head & cqe_mask;
+ to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
+
+ for (i = 0; i < to_process; i++) {
+ rte_prefetch_non_temporal(&desc[cqe_head + 2]);
+ cq_entry = &desc[cqe_head];
+ cqe_rx = (struct cqe_rx_t *)cq_entry;
+ nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
+ rx_pkts + i, rbptr_offset, mbuf_init, flag);
+ buffers_consumed += nb_segs;
+ cqe_head = (cqe_head + 1) & cqe_mask;
+ nicvf_prefetch_store_keep(rx_pkts[i]);
+ }
+
+ if (likely(to_process)) {
+ rxq->available_space -= to_process;
+ rxq->head = cqe_head;
+ nicvf_addr_write(rxq->cq_door, to_process);
+ rxq->recv_buffers += buffers_consumed;
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
+ }
+
+ return to_process;
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint32_t
+nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct nicvf_rxq *rxq;
+
+ rxq = dev->data->rx_queues[queue_idx];
+ return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK;
+}
+
+uint32_t
+nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct nicvf_rxq *rxq;
+ uint32_t to_process;
+ uint32_t rx_free;
+
+ rxq = dev->data->rx_queues[queue_idx];
+ to_process = rxq->recv_buffers;
+ while (rxq->recv_buffers > 0) {
+ rx_free = RTE_MIN(rxq->recv_buffers, NICVF_MAX_RX_FREE_THRESH);
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rx_free);
+ }
+
+ assert(rxq->recv_buffers == 0);
+ return to_process;
+}
diff --git a/src/spdk/dpdk/drivers/net/thunderx/nicvf_rxtx.h b/src/spdk/dpdk/drivers/net/thunderx/nicvf_rxtx.h
new file mode 100644
index 00000000..a39808cb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/nicvf_rxtx.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef __THUNDERX_NICVF_RXTX_H__
+#define __THUNDERX_NICVF_RXTX_H__
+
+#include <rte_byteorder.h>
+#include <rte_ethdev_driver.h>
+
+#define NICVF_RX_OFFLOAD_NONE 0x1
+#define NICVF_RX_OFFLOAD_CKSUM 0x2
+#define NICVF_RX_OFFLOAD_VLAN_STRIP 0x4
+
+#define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
+
+#ifndef __hot
+#define __hot __attribute__((hot))
+#endif
+
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+static inline uint16_t __attribute__((const))
+nicvf_frag_num(uint16_t i)
+{
+ return (i & ~3) + 3 - (i & 3);
+}
+
+static inline void __hot
+fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
+{
+ /* Local variable sqe to avoid read from sq desc memory*/
+ union sq_entry_t sqe;
+
+ /* Fill the SQ gather entry */
+ sqe.buff[0] = 0; sqe.buff[1] = 0;
+ sqe.gather.subdesc_type = SQ_DESC_TYPE_GATHER;
+ sqe.gather.ld_type = NIC_SEND_LD_TYPE_E_LDT;
+ sqe.gather.size = pkt->data_len;
+ sqe.gather.addr = rte_mbuf_data_iova(pkt);
+
+ entry->buff[0] = sqe.buff[0];
+ entry->buff[1] = sqe.buff[1];
+}
+
+#else
+
+static inline uint16_t __attribute__((const))
+nicvf_frag_num(uint16_t i)
+{
+ return i;
+}
+
+static inline void __hot
+fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
+{
+ entry->buff[0] = (uint64_t)SQ_DESC_TYPE_GATHER << 60 |
+ (uint64_t)NIC_SEND_LD_TYPE_E_LDT << 58 |
+ pkt->data_len;
+ entry->buff[1] = rte_mbuf_data_iova(pkt);
+}
+#endif
+
+static inline void
+nicvf_mbuff_init_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
+ uint16_t apad)
+{
+ union mbuf_initializer init = {.value = mbuf_init};
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ init.fields.data_off += apad;
+#else
+ init.value += apad;
+#endif
+ *(uint64_t *)(&pkt->rearm_data) = init.value;
+}
+
+static inline void
+nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
+ uint16_t apad, uint16_t nb_segs)
+{
+ union mbuf_initializer init = {.value = mbuf_init};
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ init.fields.data_off += apad;
+#else
+ init.value += apad;
+#endif
+ init.fields.nb_segs = nb_segs;
+ *(uint64_t *)(&pkt->rearm_data) = init.value;
+}
+
+uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
+uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
+
+uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t pkts);
+uint16_t nicvf_recv_pkts_cksum(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t pkts);
+uint16_t nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t nicvf_recv_pkts_multiseg_no_offload(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_cksum(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
+uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t pkts);
+
+void nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq);
+void nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq);
+
+#endif /* __THUNDERX_NICVF_RXTX_H__ */
diff --git a/src/spdk/dpdk/drivers/net/thunderx/nicvf_struct.h b/src/spdk/dpdk/drivers/net/thunderx/nicvf_struct.h
new file mode 100644
index 00000000..dd52f38e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/nicvf_struct.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef _THUNDERX_NICVF_STRUCT_H
+#define _THUNDERX_NICVF_STRUCT_H
+
+#include <stdint.h>
+
+#include <rte_spinlock.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_ethdev_driver.h>
+#include <rte_memory.h>
+
+struct nicvf_rbdr {
+ uintptr_t rbdr_status;
+ uintptr_t rbdr_door;
+ struct rbdr_entry_t *desc;
+ nicvf_iova_addr_t phys;
+ uint32_t buffsz;
+ uint32_t tail;
+ uint32_t next_tail;
+ uint32_t head;
+ uint32_t qlen_mask;
+} __rte_cache_aligned;
+
+struct nicvf_txq {
+ union sq_entry_t *desc;
+ nicvf_iova_addr_t phys;
+ struct rte_mbuf **txbuffs;
+ uintptr_t sq_head;
+ uintptr_t sq_door;
+ struct rte_mempool *pool;
+ struct nicvf *nic;
+ void (*pool_free)(struct nicvf_txq *sq);
+ uint32_t head;
+ uint32_t tail;
+ int32_t xmit_bufs;
+ uint32_t qlen_mask;
+ uint64_t offloads;
+ uint16_t queue_id;
+ uint16_t tx_free_thresh;
+} __rte_cache_aligned;
+
+union mbuf_initializer {
+ struct {
+ uint16_t data_off;
+ uint16_t refcnt;
+ uint16_t nb_segs;
+ uint16_t port;
+ } fields;
+ uint64_t value;
+};
+
+struct nicvf_rxq {
+ MARKER rxq_fastpath_data_start;
+ uint8_t rbptr_offset;
+ uint16_t rx_free_thresh;
+ uint32_t head;
+ uint32_t qlen_mask;
+ int32_t recv_buffers;
+ int32_t available_space;
+ uint64_t mbuf_phys_off;
+ uintptr_t cq_status;
+ uintptr_t cq_door;
+ struct nicvf_rbdr *shared_rbdr;
+ struct rte_mempool *pool;
+ union cq_entry_t *desc;
+ union mbuf_initializer mbuf_initializer;
+ MARKER rxq_fastpath_data_end;
+ uint8_t rx_drop_en;
+ uint16_t precharge_cnt;
+ uint16_t port_id;
+ uint16_t queue_id;
+ struct nicvf *nic;
+ nicvf_iova_addr_t phys;
+} __rte_cache_aligned;
+
+struct nicvf {
+ uint8_t vf_id;
+ uint8_t node;
+ uintptr_t reg_base;
+ bool tns_mode;
+ bool sqs_mode;
+ bool loopback_supported;
+ bool pf_acked:1;
+ bool pf_nacked:1;
+ bool offload_cksum:1;
+ bool vlan_strip:1;
+ uint64_t hwcap;
+ uint8_t link_up;
+ uint8_t duplex;
+ uint32_t speed;
+ uint32_t msg_enable;
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t subsystem_device_id;
+ uint16_t subsystem_vendor_id;
+ struct nicvf_rbdr *rbdr;
+ struct nicvf_rss_reta_info rss_info;
+ struct rte_intr_handle intr_handle;
+ uint8_t cpi_alg;
+ uint16_t mtu;
+ int skip_bytes;
+ bool vlan_filter_en;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ /* secondary queue set support */
+ uint8_t sqs_id;
+ uint8_t sqs_count;
+#define MAX_SQS_PER_VF 11
+ struct nicvf *snicvf[MAX_SQS_PER_VF];
+} __rte_cache_aligned;
+
+#endif /* _THUNDERX_NICVF_STRUCT_H */
diff --git a/src/spdk/dpdk/drivers/net/thunderx/nicvf_svf.c b/src/spdk/dpdk/drivers/net/thunderx/nicvf_svf.c
new file mode 100644
index 00000000..bccf2905
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/nicvf_svf.c
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <rte_debug.h>
+#include <rte_malloc.h>
+
+#include "base/nicvf_bsvf.h"
+
+#include "nicvf_svf.h"
+
+void
+nicvf_svf_push(struct nicvf *vf)
+{
+ struct svf_entry *entry = NULL;
+
+ assert(vf != NULL);
+
+ entry = rte_zmalloc("nicvf", sizeof(*entry), RTE_CACHE_LINE_SIZE);
+ if (entry == NULL)
+ rte_panic("Cannoc allocate memory for svf_entry\n");
+
+ entry->vf = vf;
+
+ nicvf_bsvf_push(entry);
+}
+
+struct nicvf *
+nicvf_svf_pop(void)
+{
+ struct nicvf *vf;
+ struct svf_entry *entry;
+
+ entry = nicvf_bsvf_pop();
+
+ vf = entry->vf;
+
+ rte_free(entry);
+
+ return vf;
+}
+
+int
+nicvf_svf_empty(void)
+{
+ return nicvf_bsvf_empty();
+}
diff --git a/src/spdk/dpdk/drivers/net/thunderx/nicvf_svf.h b/src/spdk/dpdk/drivers/net/thunderx/nicvf_svf.h
new file mode 100644
index 00000000..db5cb139
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/nicvf_svf.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef __THUNDERX_NICVF_SVF_H__
+#define __THUNDERX_NICVF_SVF_H__
+
+struct nicvf;
+
+/**
+ * Enqueue new VF to secondary qsets.
+ *
+ * @param entry
+ * Entry to be enqueued.
+ */
+void
+nicvf_svf_push(struct nicvf *vf);
+
+/**
+ * Dequeue a VF from secondary qsets.
+ *
+ * @return
+ * Dequeued entry.
+ */
+struct nicvf *
+nicvf_svf_pop(void);
+
+/**
+ * Check if the queue of secondary qsets is empty.
+ *
+ * @return
+ * 0 on non-empty
+ * otherwise empty
+ */
+int
+nicvf_svf_empty(void);
+
+#endif /* __THUNDERX_NICVF_SVF_H__ */
diff --git a/src/spdk/dpdk/drivers/net/thunderx/rte_pmd_thunderx_version.map b/src/spdk/dpdk/drivers/net/thunderx/rte_pmd_thunderx_version.map
new file mode 100644
index 00000000..1901bcb3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/thunderx/rte_pmd_thunderx_version.map
@@ -0,0 +1,4 @@
+DPDK_16.07 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/vdev_netvsc/Makefile b/src/spdk/dpdk/drivers/net/vdev_netvsc/Makefile
new file mode 100644
index 00000000..690cb8f8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vdev_netvsc/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 6WIND S.A.
+# Copyright 2017 Mellanox Technologies, Ltd
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# Properties of the generated library.
+LIB = librte_pmd_vdev_netvsc.a
+LIBABIVER := 1
+EXPORT_MAP := rte_pmd_vdev_netvsc_version.map
+
+# Additional compilation flags.
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += -g
+CFLAGS += -Wall -Wextra
+CFLAGS += -D_XOPEN_SOURCE=600
+CFLAGS += -D_BSD_SOURCE
+CFLAGS += -D_DEFAULT_SOURCE
+CFLAGS += $(WERROR_FLAGS)
+
+# Dependencies.
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_ethdev
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_net
+
+# Source files.
+SRCS-$(CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD) += vdev_netvsc.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/vdev_netvsc/rte_pmd_vdev_netvsc_version.map b/src/spdk/dpdk/drivers/net/vdev_netvsc/rte_pmd_vdev_netvsc_version.map
new file mode 100644
index 00000000..179140fb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vdev_netvsc/rte_pmd_vdev_netvsc_version.map
@@ -0,0 +1,4 @@
+DPDK_18.02 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c b/src/spdk/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c
new file mode 100644
index 00000000..48717f2f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c
@@ -0,0 +1,835 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <linux/sockios.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <netinet/ip.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <rte_alarm.h>
+#include <rte_bus.h>
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_hypervisor.h>
+#include <rte_kvargs.h>
+#include <rte_log.h>
+#include <rte_string_fns.h>
+
+#define VDEV_NETVSC_DRIVER net_vdev_netvsc
+#define VDEV_NETVSC_DRIVER_NAME RTE_STR(VDEV_NETVSC_DRIVER)
+#define VDEV_NETVSC_DRIVER_NAME_LEN 15
+#define VDEV_NETVSC_ARG_IFACE "iface"
+#define VDEV_NETVSC_ARG_MAC "mac"
+#define VDEV_NETVSC_ARG_FORCE "force"
+#define VDEV_NETVSC_ARG_IGNORE "ignore"
+#define VDEV_NETVSC_PROBE_MS 1000
+
+#define NETVSC_CLASS_ID "{f8615163-df3e-46c5-913f-f2d2f965ed0e}"
+#define NETVSC_MAX_ROUTE_LINE_SIZE 300
+
+#define DRV_LOG(level, ...) \
+ rte_log(RTE_LOG_ ## level, \
+ vdev_netvsc_logtype, \
+ RTE_FMT(VDEV_NETVSC_DRIVER_NAME ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+
+/** Driver-specific log messages type. */
+static int vdev_netvsc_logtype;
+
+/** Context structure for a vdev_netvsc instance. */
+struct vdev_netvsc_ctx {
+ LIST_ENTRY(vdev_netvsc_ctx) entry; /**< Next entry in list. */
+ unsigned int id; /**< Unique ID. */
+ char name[64]; /**< Unique name. */
+ char devname[64]; /**< Fail-safe instance name. */
+ char devargs[256]; /**< Fail-safe device arguments. */
+ char if_name[IF_NAMESIZE]; /**< NetVSC netdevice name. */
+ unsigned int if_index; /**< NetVSC netdevice index. */
+ struct ether_addr if_addr; /**< NetVSC MAC address. */
+ int pipe[2]; /**< Fail-safe communication pipe. */
+ char yield[256]; /**< PCI sub-device arguments. */
+};
+
+/** Context list is common to all driver instances. */
+static LIST_HEAD(, vdev_netvsc_ctx) vdev_netvsc_ctx_list =
+ LIST_HEAD_INITIALIZER(vdev_netvsc_ctx_list);
+
+/** Number of entries in context list. */
+static unsigned int vdev_netvsc_ctx_count;
+
+/** Number of driver instances relying on context list. */
+static unsigned int vdev_netvsc_ctx_inst;
+
+/**
+ * Destroy a vdev_netvsc context instance.
+ *
+ * @param ctx
+ * Context to destroy.
+ */
+static void
+vdev_netvsc_ctx_destroy(struct vdev_netvsc_ctx *ctx)
+{
+ if (ctx->pipe[0] != -1)
+ close(ctx->pipe[0]);
+ if (ctx->pipe[1] != -1)
+ close(ctx->pipe[1]);
+ free(ctx);
+}
+
+/**
+ * Determine if a network interface is NetVSC.
+ *
+ * @param[in] iface
+ * Pointer to netdevice description structure (name and index).
+ *
+ * @return
+ * A nonzero value when interface is detected as NetVSC. In case of error,
+ * rte_errno is updated and 0 returned.
+ */
+static int
+vdev_netvsc_iface_is_netvsc(const struct if_nameindex *iface)
+{
+ static const char temp[] = "/sys/class/net/%s/device/class_id";
+ char path[sizeof(temp) + IF_NAMESIZE];
+ FILE *f;
+ int ret;
+ int len = 0;
+
+ ret = snprintf(path, sizeof(path), temp, iface->if_name);
+ if (ret == -1 || (size_t)ret >= sizeof(path)) {
+ rte_errno = ENOBUFS;
+ return 0;
+ }
+ f = fopen(path, "r");
+ if (!f) {
+ rte_errno = errno;
+ return 0;
+ }
+ ret = fscanf(f, NETVSC_CLASS_ID "%n", &len);
+ if (ret == EOF)
+ rte_errno = errno;
+ ret = len == (int)strlen(NETVSC_CLASS_ID);
+ fclose(f);
+ return ret;
+}
+
+/**
+ * Iterate over system network interfaces.
+ *
+ * This function runs a given callback function for each netdevice found on
+ * the system.
+ *
+ * @param func
+ * Callback function pointer. List traversal is aborted when this function
+ * returns a nonzero value.
+ * @param is_netvsc
+ * Indicates the device type to iterate - netvsc or non-netvsc.
+ * @param ...
+ * Variable parameter list passed as @p va_list to @p func.
+ *
+ * @return
+ * 0 when the entire list is traversed successfully, a negative error code
+ * in case or failure, or the nonzero value returned by @p func when list
+ * traversal is aborted.
+ */
+static int
+vdev_netvsc_foreach_iface(int (*func)(const struct if_nameindex *iface,
+ const struct ether_addr *eth_addr,
+ va_list ap), int is_netvsc, ...)
+{
+ struct if_nameindex *iface = if_nameindex();
+ int s = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
+ unsigned int i;
+ int ret = 0;
+
+ if (!iface) {
+ ret = -ENOBUFS;
+ DRV_LOG(ERR, "cannot retrieve system network interfaces");
+ goto error;
+ }
+ if (s == -1) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot open socket: %s", rte_strerror(errno));
+ goto error;
+ }
+ for (i = 0; iface[i].if_name; ++i) {
+ int is_netvsc_ret;
+ struct ifreq req;
+ struct ether_addr eth_addr;
+ va_list ap;
+
+ is_netvsc_ret = vdev_netvsc_iface_is_netvsc(&iface[i]) ? 1 : 0;
+ if (is_netvsc ^ is_netvsc_ret)
+ continue;
+ strlcpy(req.ifr_name, iface[i].if_name, sizeof(req.ifr_name));
+ if (ioctl(s, SIOCGIFHWADDR, &req) == -1) {
+ DRV_LOG(WARNING, "cannot retrieve information about"
+ " interface \"%s\": %s",
+ req.ifr_name, rte_strerror(errno));
+ continue;
+ }
+ if (req.ifr_hwaddr.sa_family != ARPHRD_ETHER) {
+ DRV_LOG(DEBUG, "interface %s is non-ethernet device",
+ req.ifr_name);
+ continue;
+ }
+ memcpy(eth_addr.addr_bytes, req.ifr_hwaddr.sa_data,
+ RTE_DIM(eth_addr.addr_bytes));
+ va_start(ap, is_netvsc);
+ ret = func(&iface[i], &eth_addr, ap);
+ va_end(ap);
+ if (ret)
+ break;
+ }
+error:
+ if (s != -1)
+ close(s);
+ if (iface)
+ if_freenameindex(iface);
+ return ret;
+}
+
+/**
+ * Determine if a network interface has a route.
+ *
+ * @param[in] name
+ * Network device name.
+ * @param[in] family
+ * Address family: AF_INET for IPv4 or AF_INET6 for IPv6.
+ *
+ * @return
+ * 1 when interface has a route, negative errno value in case of error and
+ * 0 otherwise.
+ */
+static int
+vdev_netvsc_has_route(const struct if_nameindex *iface,
+ const unsigned char family)
+{
+ /*
+ * The implementation can be simpler by getifaddrs() function usage but
+ * it works for IPv6 only starting from glibc 2.3.3.
+ */
+ char buf[4096];
+ int len;
+ int ret = 0;
+ int res;
+ int sock;
+ struct nlmsghdr *retmsg = (struct nlmsghdr *)buf;
+ struct sockaddr_nl sa;
+ struct {
+ struct nlmsghdr nlhdr;
+ struct ifaddrmsg addrmsg;
+ } msg;
+
+ if (!iface || (family != AF_INET && family != AF_INET6)) {
+ DRV_LOG(ERR, "%s", rte_strerror(EINVAL));
+ return -EINVAL;
+ }
+ sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (sock == -1) {
+ DRV_LOG(ERR, "cannot open socket: %s", rte_strerror(errno));
+ return -errno;
+ }
+ memset(&sa, 0, sizeof(sa));
+ sa.nl_family = AF_NETLINK;
+ sa.nl_groups = RTMGRP_LINK | RTMGRP_IPV4_IFADDR;
+ res = bind(sock, (struct sockaddr *)&sa, sizeof(sa));
+ if (res == -1) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot bind socket: %s", rte_strerror(errno));
+ goto close;
+ }
+ memset(&msg, 0, sizeof(msg));
+ msg.nlhdr.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifaddrmsg));
+ msg.nlhdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
+ msg.nlhdr.nlmsg_type = RTM_GETADDR;
+ msg.nlhdr.nlmsg_pid = getpid();
+ msg.addrmsg.ifa_family = family;
+ msg.addrmsg.ifa_index = iface->if_index;
+ res = send(sock, &msg, msg.nlhdr.nlmsg_len, 0);
+ if (res == -1) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot send socket message: %s",
+ rte_strerror(errno));
+ goto close;
+ }
+ memset(buf, 0, sizeof(buf));
+ len = recv(sock, buf, sizeof(buf), 0);
+ if (len == -1) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot receive socket message: %s",
+ rte_strerror(errno));
+ goto close;
+ }
+ while (NLMSG_OK(retmsg, (unsigned int)len)) {
+ struct ifaddrmsg *retaddr =
+ (struct ifaddrmsg *)NLMSG_DATA(retmsg);
+
+ if (retaddr->ifa_family == family &&
+ retaddr->ifa_index == iface->if_index) {
+ struct rtattr *retrta = IFA_RTA(retaddr);
+ int attlen = IFA_PAYLOAD(retmsg);
+
+ while (RTA_OK(retrta, attlen)) {
+ if (retrta->rta_type == IFA_ADDRESS) {
+ ret = 1;
+ DRV_LOG(DEBUG, "interface %s has IP",
+ iface->if_name);
+ goto close;
+ }
+ retrta = RTA_NEXT(retrta, attlen);
+ }
+ }
+ retmsg = NLMSG_NEXT(retmsg, len);
+ }
+close:
+ close(sock);
+ return ret;
+}
+
+/**
+ * Retrieve network interface data from sysfs symbolic link.
+ *
+ * @param[out] buf
+ * Output data buffer.
+ * @param size
+ * Output buffer size.
+ * @param[in] if_name
+ * Netdevice name.
+ * @param[in] relpath
+ * Symbolic link path relative to netdevice sysfs entry.
+ *
+ * @return
+ * 0 on success, a negative error code otherwise.
+ */
+static int
+vdev_netvsc_sysfs_readlink(char *buf, size_t size, const char *if_name,
+ const char *relpath)
+{
+ struct vdev_netvsc_ctx *ctx;
+ char in[RTE_MAX(sizeof(ctx->yield), 256u)];
+ int ret;
+
+ ret = snprintf(in, sizeof(in) - 1, "/sys/class/net/%s/%s",
+ if_name, relpath);
+ if (ret == -1 || (size_t)ret >= sizeof(in))
+ return -ENOBUFS;
+ ret = readlink(in, buf, size);
+ if (ret == -1)
+ return -errno;
+ if ((size_t)ret >= size - 1)
+ return -ENOBUFS;
+ buf[ret] = '\0';
+ return 0;
+}
+
+/**
+ * Probe a network interface to associate with vdev_netvsc context.
+ *
+ * This function determines if the network device matches the properties of
+ * the NetVSC interface associated with the vdev_netvsc context and
+ * communicates its bus address to the fail-safe PMD instance if so.
+ *
+ * It is normally used with vdev_netvsc_foreach_iface().
+ *
+ * @param[in] iface
+ * Pointer to netdevice description structure (name and index).
+ * @param[in] eth_addr
+ * MAC address associated with @p iface.
+ * @param ap
+ * Variable arguments list comprising:
+ *
+ * - struct vdev_netvsc_ctx *ctx:
+ * Context to associate network interface with.
+ *
+ * @return
+ * A nonzero value when interface matches, 0 otherwise or in case of
+ * error.
+ */
+static int
+vdev_netvsc_device_probe(const struct if_nameindex *iface,
+ const struct ether_addr *eth_addr,
+ va_list ap)
+{
+ struct vdev_netvsc_ctx *ctx = va_arg(ap, struct vdev_netvsc_ctx *);
+ char buf[RTE_MAX(sizeof(ctx->yield), 256u)];
+ const char *addr;
+ size_t len;
+ int ret;
+
+ /* Skip non-matching or unwanted NetVSC interfaces. */
+ if (ctx->if_index == iface->if_index) {
+ if (!strcmp(ctx->if_name, iface->if_name))
+ return 0;
+ DRV_LOG(DEBUG,
+ "NetVSC interface \"%s\" (index %u) renamed \"%s\"",
+ ctx->if_name, ctx->if_index, iface->if_name);
+ strlcpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name));
+ return 0;
+ }
+ if (!is_same_ether_addr(eth_addr, &ctx->if_addr))
+ return 0;
+ /* Look for associated PCI device. */
+ ret = vdev_netvsc_sysfs_readlink(buf, sizeof(buf), iface->if_name,
+ "device/subsystem");
+ if (ret)
+ return 0;
+ addr = strrchr(buf, '/');
+ addr = addr ? addr + 1 : buf;
+ if (strcmp(addr, "pci"))
+ return 0;
+ ret = vdev_netvsc_sysfs_readlink(buf, sizeof(buf), iface->if_name,
+ "device");
+ if (ret)
+ return 0;
+ addr = strrchr(buf, '/');
+ addr = addr ? addr + 1 : buf;
+ len = strlen(addr);
+ if (!len)
+ return 0;
+ /* Send PCI device argument to fail-safe PMD instance. */
+ if (strcmp(addr, ctx->yield))
+ DRV_LOG(DEBUG, "associating PCI device \"%s\" with NetVSC"
+ " interface \"%s\" (index %u)", addr, ctx->if_name,
+ ctx->if_index);
+ memmove(buf, addr, len + 1);
+ addr = buf;
+ buf[len] = '\n';
+ ret = write(ctx->pipe[1], addr, len + 1);
+ buf[len] = '\0';
+ if (ret == -1) {
+ if (errno == EINTR || errno == EAGAIN)
+ return 1;
+ DRV_LOG(WARNING, "cannot associate PCI device name \"%s\" with"
+ " interface \"%s\": %s", addr, ctx->if_name,
+ rte_strerror(errno));
+ return 1;
+ }
+ if ((size_t)ret != len + 1) {
+ /*
+ * Attempt to override previous partial write, no need to
+ * recover if that fails.
+ */
+ ret = write(ctx->pipe[1], "\n", 1);
+ (void)ret;
+ return 1;
+ }
+ fsync(ctx->pipe[1]);
+ memcpy(ctx->yield, addr, len + 1);
+ return 1;
+}
+
+/**
+ * Alarm callback that regularly probes system network interfaces.
+ *
+ * This callback runs at a frequency determined by VDEV_NETVSC_PROBE_MS as
+ * long as an vdev_netvsc context instance exists.
+ *
+ * @param arg
+ * Ignored.
+ */
+static void
+vdev_netvsc_alarm(__rte_unused void *arg)
+{
+ struct vdev_netvsc_ctx *ctx;
+ int ret;
+
+ LIST_FOREACH(ctx, &vdev_netvsc_ctx_list, entry) {
+ ret = vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, 0,
+ ctx);
+ if (ret < 0)
+ break;
+ }
+ if (!vdev_netvsc_ctx_count)
+ return;
+ ret = rte_eal_alarm_set(VDEV_NETVSC_PROBE_MS * 1000,
+ vdev_netvsc_alarm, NULL);
+ if (ret < 0) {
+ DRV_LOG(ERR, "unable to reschedule alarm callback: %s",
+ rte_strerror(-ret));
+ }
+}
+
+/**
+ * Probe a NetVSC interface to generate a vdev_netvsc context from.
+ *
+ * This function instantiates vdev_netvsc contexts either for all NetVSC
+ * devices found on the system or only a subset provided as device
+ * arguments.
+ *
+ * It is normally used with vdev_netvsc_foreach_iface().
+ *
+ * @param[in] iface
+ * Pointer to netdevice description structure (name and index).
+ * @param[in] eth_addr
+ * MAC address associated with @p iface.
+ * @param ap
+ * Variable arguments list comprising:
+ *
+ * - const char *name:
+ * Name associated with current driver instance.
+ *
+ * - struct rte_kvargs *kvargs:
+ * Device arguments provided to current driver instance.
+ *
+ * - int force:
+ * Accept specified interface even if not detected as NetVSC.
+ *
+ * - unsigned int specified:
+ * Number of specific netdevices provided as device arguments.
+ *
+ * - unsigned int *matched:
+ * The number of specified netdevices matched by this function.
+ *
+ * @return
+ * A nonzero value when interface matches, 0 otherwise or in case of
+ * error.
+ */
+static int
+vdev_netvsc_netvsc_probe(const struct if_nameindex *iface,
+ const struct ether_addr *eth_addr,
+ va_list ap)
+{
+ const char *name = va_arg(ap, const char *);
+ struct rte_kvargs *kvargs = va_arg(ap, struct rte_kvargs *);
+ unsigned int specified = va_arg(ap, unsigned int);
+ unsigned int *matched = va_arg(ap, unsigned int *);
+ unsigned int i;
+ struct vdev_netvsc_ctx *ctx;
+ int ret;
+
+ /* Probe all interfaces when none are specified. */
+ if (specified) {
+ for (i = 0; i != kvargs->count; ++i) {
+ const struct rte_kvargs_pair *pair = &kvargs->pairs[i];
+
+ if (!strcmp(pair->key, VDEV_NETVSC_ARG_IFACE)) {
+ if (!strcmp(pair->value, iface->if_name))
+ break;
+ } else if (!strcmp(pair->key, VDEV_NETVSC_ARG_MAC)) {
+ struct ether_addr tmp;
+
+ if (sscanf(pair->value,
+ "%" SCNx8 ":%" SCNx8 ":%" SCNx8 ":"
+ "%" SCNx8 ":%" SCNx8 ":%" SCNx8,
+ &tmp.addr_bytes[0],
+ &tmp.addr_bytes[1],
+ &tmp.addr_bytes[2],
+ &tmp.addr_bytes[3],
+ &tmp.addr_bytes[4],
+ &tmp.addr_bytes[5]) != 6) {
+ DRV_LOG(ERR,
+ "invalid MAC address format"
+ " \"%s\"",
+ pair->value);
+ return -EINVAL;
+ }
+ if (is_same_ether_addr(eth_addr, &tmp))
+ break;
+ }
+ }
+ if (i == kvargs->count)
+ return 0;
+ ++(*matched);
+ }
+ /* Weed out interfaces already handled. */
+ LIST_FOREACH(ctx, &vdev_netvsc_ctx_list, entry)
+ if (ctx->if_index == iface->if_index)
+ break;
+ if (ctx) {
+ if (!specified)
+ return 0;
+ DRV_LOG(WARNING,
+ "interface \"%s\" (index %u) is already handled,"
+ " skipping",
+ iface->if_name, iface->if_index);
+ return 0;
+ }
+ /* Routed NetVSC should not be probed. */
+ if (vdev_netvsc_has_route(iface, AF_INET) ||
+ vdev_netvsc_has_route(iface, AF_INET6)) {
+ if (!specified)
+ return 0;
+ DRV_LOG(WARNING, "probably using routed NetVSC interface \"%s\""
+ " (index %u)", iface->if_name, iface->if_index);
+ }
+ /* Create interface context. */
+ ctx = calloc(1, sizeof(*ctx));
+ if (!ctx) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot allocate context for interface \"%s\": %s",
+ iface->if_name, rte_strerror(errno));
+ goto error;
+ }
+ ctx->id = vdev_netvsc_ctx_count;
+ strlcpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name));
+ ctx->if_index = iface->if_index;
+ ctx->if_addr = *eth_addr;
+ ctx->pipe[0] = -1;
+ ctx->pipe[1] = -1;
+ ctx->yield[0] = '\0';
+ if (pipe(ctx->pipe) == -1) {
+ ret = -errno;
+ DRV_LOG(ERR,
+ "cannot allocate control pipe for interface \"%s\": %s",
+ ctx->if_name, rte_strerror(errno));
+ goto error;
+ }
+ for (i = 0; i != RTE_DIM(ctx->pipe); ++i) {
+ int flf = fcntl(ctx->pipe[i], F_GETFL);
+
+ if (flf != -1 &&
+ fcntl(ctx->pipe[i], F_SETFL, flf | O_NONBLOCK) != -1)
+ continue;
+ ret = -errno;
+ DRV_LOG(ERR, "cannot toggle non-blocking flag on control file"
+ " descriptor #%u (%d): %s", i, ctx->pipe[i],
+ rte_strerror(errno));
+ goto error;
+ }
+ /* Generate virtual device name and arguments. */
+ i = 0;
+ ret = snprintf(ctx->name, sizeof(ctx->name), "%s_id%u",
+ name, ctx->id);
+ if (ret == -1 || (size_t)ret >= sizeof(ctx->name))
+ ++i;
+ ret = snprintf(ctx->devname, sizeof(ctx->devname), "net_failsafe_vsc%u",
+ ctx->id);
+ if (ret == -1 || (size_t)ret >= sizeof(ctx->devname))
+ ++i;
+ ret = snprintf(ctx->devargs, sizeof(ctx->devargs),
+ "fd(%d),dev(net_tap_vsc%u,remote=%s)",
+ ctx->pipe[0], ctx->id, ctx->if_name);
+ if (ret == -1 || (size_t)ret >= sizeof(ctx->devargs))
+ ++i;
+ if (i) {
+ ret = -ENOBUFS;
+ DRV_LOG(ERR, "generated virtual device name or argument list"
+ " too long for interface \"%s\"", ctx->if_name);
+ goto error;
+ }
+ /* Request virtual device generation. */
+ DRV_LOG(DEBUG, "generating virtual device \"%s\" with arguments \"%s\"",
+ ctx->devname, ctx->devargs);
+ vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, 0, ctx);
+ ret = rte_eal_hotplug_add("vdev", ctx->devname, ctx->devargs);
+ if (ret)
+ goto error;
+ LIST_INSERT_HEAD(&vdev_netvsc_ctx_list, ctx, entry);
+ ++vdev_netvsc_ctx_count;
+ DRV_LOG(DEBUG, "added NetVSC interface \"%s\" to context list",
+ ctx->if_name);
+ return 0;
+error:
+ if (ctx)
+ vdev_netvsc_ctx_destroy(ctx);
+ return ret;
+}
+
+/**
+ * Probe NetVSC interfaces.
+ *
+ * This function probes system netdevices according to the specified device
+ * arguments and starts a periodic alarm callback to notify the resulting
+ * fail-safe PMD instances of their sub-devices whereabouts.
+ *
+ * @param dev
+ * Virtual device context for driver instance.
+ *
+ * @return
+ * Always 0, even in case of errors.
+ */
+static int
+vdev_netvsc_vdev_probe(struct rte_vdev_device *dev)
+{
+ static const char *const vdev_netvsc_arg[] = {
+ VDEV_NETVSC_ARG_IFACE,
+ VDEV_NETVSC_ARG_MAC,
+ VDEV_NETVSC_ARG_FORCE,
+ VDEV_NETVSC_ARG_IGNORE,
+ NULL,
+ };
+ const char *name = rte_vdev_device_name(dev);
+ const char *args = rte_vdev_device_args(dev);
+ struct rte_kvargs *kvargs = rte_kvargs_parse(args ? args : "",
+ vdev_netvsc_arg);
+ unsigned int specified = 0;
+ unsigned int matched = 0;
+ int force = 0;
+ int ignore = 0;
+ unsigned int i;
+ int ret;
+
+ DRV_LOG(DEBUG, "invoked as \"%s\", using arguments \"%s\"", name, args);
+ if (!kvargs) {
+ DRV_LOG(ERR, "cannot parse arguments list");
+ goto error;
+ }
+ for (i = 0; i != kvargs->count; ++i) {
+ const struct rte_kvargs_pair *pair = &kvargs->pairs[i];
+
+ if (!strcmp(pair->key, VDEV_NETVSC_ARG_FORCE))
+ force = !!atoi(pair->value);
+ else if (!strcmp(pair->key, VDEV_NETVSC_ARG_IGNORE))
+ ignore = !!atoi(pair->value);
+ else if (!strcmp(pair->key, VDEV_NETVSC_ARG_IFACE) ||
+ !strcmp(pair->key, VDEV_NETVSC_ARG_MAC))
+ ++specified;
+ }
+ if (ignore) {
+ if (kvargs)
+ rte_kvargs_free(kvargs);
+ return 0;
+ }
+ if (specified > 1) {
+ DRV_LOG(ERR, "More than one way used to specify the netvsc"
+ " device.");
+ goto error;
+ }
+ rte_eal_alarm_cancel(vdev_netvsc_alarm, NULL);
+ /* Gather interfaces. */
+ ret = vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, 1, name,
+ kvargs, specified, &matched);
+ if (ret < 0)
+ goto error;
+ if (specified && matched < specified) {
+ if (!force) {
+ DRV_LOG(ERR, "Cannot find the specified netvsc device");
+ goto error;
+ }
+ /* Try to force probing on non-netvsc specified device. */
+ if (vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, 0, name,
+ kvargs, specified, &matched) < 0)
+ goto error;
+ if (matched < specified) {
+ DRV_LOG(ERR, "Cannot find the specified device");
+ goto error;
+ }
+ DRV_LOG(WARNING, "non-netvsc device was probed as netvsc");
+ }
+ ret = rte_eal_alarm_set(VDEV_NETVSC_PROBE_MS * 1000,
+ vdev_netvsc_alarm, NULL);
+ if (ret < 0) {
+ DRV_LOG(ERR, "unable to schedule alarm callback: %s",
+ rte_strerror(-ret));
+ goto error;
+ }
+error:
+ if (kvargs)
+ rte_kvargs_free(kvargs);
+ ++vdev_netvsc_ctx_inst;
+ return 0;
+}
+
+/**
+ * Remove driver instance.
+ *
+ * The alarm callback and underlying vdev_netvsc context instances are only
+ * destroyed after the last PMD instance is removed.
+ *
+ * @param dev
+ * Virtual device context for driver instance.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+vdev_netvsc_vdev_remove(__rte_unused struct rte_vdev_device *dev)
+{
+ if (--vdev_netvsc_ctx_inst)
+ return 0;
+ rte_eal_alarm_cancel(vdev_netvsc_alarm, NULL);
+ while (!LIST_EMPTY(&vdev_netvsc_ctx_list)) {
+ struct vdev_netvsc_ctx *ctx = LIST_FIRST(&vdev_netvsc_ctx_list);
+
+ LIST_REMOVE(ctx, entry);
+ --vdev_netvsc_ctx_count;
+ vdev_netvsc_ctx_destroy(ctx);
+ }
+ return 0;
+}
+
+/** Virtual device descriptor. */
+static struct rte_vdev_driver vdev_netvsc_vdev = {
+ .probe = vdev_netvsc_vdev_probe,
+ .remove = vdev_netvsc_vdev_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(VDEV_NETVSC_DRIVER, vdev_netvsc_vdev);
+RTE_PMD_REGISTER_ALIAS(VDEV_NETVSC_DRIVER, eth_vdev_netvsc);
+RTE_PMD_REGISTER_PARAM_STRING(net_vdev_netvsc,
+ VDEV_NETVSC_ARG_IFACE "=<string> "
+ VDEV_NETVSC_ARG_MAC "=<string> "
+ VDEV_NETVSC_ARG_FORCE "=<int> "
+ VDEV_NETVSC_ARG_IGNORE "=<int>");
+
+/** Initialize driver log type. */
+RTE_INIT(vdev_netvsc_init_log)
+{
+ vdev_netvsc_logtype = rte_log_register("pmd.vdev_netvsc");
+ if (vdev_netvsc_logtype >= 0)
+ rte_log_set_level(vdev_netvsc_logtype, RTE_LOG_NOTICE);
+}
+
+/** Compare function for vdev find device operation. */
+static int
+vdev_netvsc_cmp_rte_device(const struct rte_device *dev1,
+ __rte_unused const void *_dev2)
+{
+ return strncmp(dev1->devargs->name, VDEV_NETVSC_DRIVER_NAME,
+ VDEV_NETVSC_DRIVER_NAME_LEN);
+}
+
+/**
+ * A callback called by vdev bus scan function to ensure this driver probing
+ * automatically in Hyper-V VM system unless it already exists in the
+ * devargs list.
+ */
+static void
+vdev_netvsc_scan_callback(__rte_unused void *arg)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ struct rte_bus *vbus = rte_bus_find_by_name("vdev");
+
+ RTE_EAL_DEVARGS_FOREACH("vdev", devargs)
+ if (!strncmp(devargs->name, VDEV_NETVSC_DRIVER_NAME,
+ VDEV_NETVSC_DRIVER_NAME_LEN))
+ return;
+ dev = (struct rte_vdev_device *)vbus->find_device(NULL,
+ vdev_netvsc_cmp_rte_device, VDEV_NETVSC_DRIVER_NAME);
+ if (dev)
+ return;
+ if (rte_devargs_add(RTE_DEVTYPE_VIRTUAL, VDEV_NETVSC_DRIVER_NAME))
+ DRV_LOG(ERR, "unable to add netvsc devargs.");
+}
+
+/** Initialize the custom scan. */
+RTE_INIT(vdev_netvsc_custom_scan_add)
+{
+ if (rte_hypervisor_get() == RTE_HYPERVISOR_HYPERV)
+ rte_vdev_add_custom_scan(vdev_netvsc_scan_callback, NULL);
+}
diff --git a/src/spdk/dpdk/drivers/net/vhost/Makefile b/src/spdk/dpdk/drivers/net/vhost/Makefile
new file mode 100644
index 00000000..83b5a8b8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vhost/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_vhost.a
+
+LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_vhost
+LDLIBS += -lrte_bus_vdev
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_vhost_version.map
+
+LIBABIVER := 2
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += rte_eth_vhost.c
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_eth_vhost.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/vhost/meson.build b/src/spdk/dpdk/drivers/net/vhost/meson.build
new file mode 100644
index 00000000..9b067c35
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vhost/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+build = dpdk_conf.has('RTE_LIBRTE_VHOST')
+version = 2
+sources = files('rte_eth_vhost.c')
+install_headers('rte_eth_vhost.h')
+deps += 'vhost'
diff --git a/src/spdk/dpdk/drivers/net/vhost/rte_eth_vhost.c b/src/spdk/dpdk/drivers/net/vhost/rte_eth_vhost.c
new file mode 100644
index 00000000..e58f3221
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vhost/rte_eth_vhost.c
@@ -0,0 +1,1466 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 IGEL Co., Ltd.
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+#include <unistd.h>
+#include <pthread.h>
+#include <stdbool.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_bus_vdev.h>
+#include <rte_kvargs.h>
+#include <rte_vhost.h>
+#include <rte_spinlock.h>
+
+#include "rte_eth_vhost.h"
+
+static int vhost_logtype;
+
+#define VHOST_LOG(level, ...) \
+ rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
+
+enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
+
+#define ETH_VHOST_IFACE_ARG "iface"
+#define ETH_VHOST_QUEUES_ARG "queues"
+#define ETH_VHOST_CLIENT_ARG "client"
+#define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
+#define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
+#define VHOST_MAX_PKT_BURST 32
+
+static const char *valid_arguments[] = {
+ ETH_VHOST_IFACE_ARG,
+ ETH_VHOST_QUEUES_ARG,
+ ETH_VHOST_CLIENT_ARG,
+ ETH_VHOST_DEQUEUE_ZERO_COPY,
+ ETH_VHOST_IOMMU_SUPPORT,
+ NULL
+};
+
+static struct ether_addr base_eth_addr = {
+ .addr_bytes = {
+ 0x56 /* V */,
+ 0x48 /* H */,
+ 0x4F /* O */,
+ 0x53 /* S */,
+ 0x54 /* T */,
+ 0x00
+ }
+};
+
+enum vhost_xstats_pkts {
+ VHOST_UNDERSIZE_PKT = 0,
+ VHOST_64_PKT,
+ VHOST_65_TO_127_PKT,
+ VHOST_128_TO_255_PKT,
+ VHOST_256_TO_511_PKT,
+ VHOST_512_TO_1023_PKT,
+ VHOST_1024_TO_1522_PKT,
+ VHOST_1523_TO_MAX_PKT,
+ VHOST_BROADCAST_PKT,
+ VHOST_MULTICAST_PKT,
+ VHOST_UNICAST_PKT,
+ VHOST_ERRORS_PKT,
+ VHOST_ERRORS_FRAGMENTED,
+ VHOST_ERRORS_JABBER,
+ VHOST_UNKNOWN_PROTOCOL,
+ VHOST_XSTATS_MAX,
+};
+
+struct vhost_stats {
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t missed_pkts;
+ uint64_t xstats[VHOST_XSTATS_MAX];
+};
+
+struct vhost_queue {
+ int vid;
+ rte_atomic32_t allow_queuing;
+ rte_atomic32_t while_queuing;
+ struct pmd_internal *internal;
+ struct rte_mempool *mb_pool;
+ uint16_t port;
+ uint16_t virtqueue_id;
+ struct vhost_stats stats;
+};
+
+struct pmd_internal {
+ rte_atomic32_t dev_attached;
+ char *dev_name;
+ char *iface_name;
+ uint16_t max_queues;
+ int vid;
+ rte_atomic32_t started;
+ uint8_t vlan_strip;
+};
+
+struct internal_list {
+ TAILQ_ENTRY(internal_list) next;
+ struct rte_eth_dev *eth_dev;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+static struct internal_list_head internal_list =
+ TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static struct rte_eth_link pmd_link = {
+ .link_speed = 10000,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN
+};
+
+struct rte_vhost_vring_state {
+ rte_spinlock_t lock;
+
+ bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
+ bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
+ unsigned int index;
+ unsigned int max_vring;
+};
+
+static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
+
+#define VHOST_XSTATS_NAME_SIZE 64
+
+struct vhost_xstats_name_off {
+ char name[VHOST_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+
+/* [rx]_is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+ {"fragmented_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
+ {"jabber_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
+ {"unknown_protos_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
+};
+
+/* [tx]_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+};
+
+#define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
+ sizeof(vhost_rxport_stat_strings[0]))
+
+#define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
+ sizeof(vhost_txport_stat_strings[0]))
+
+static void
+vhost_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct vhost_queue *vq = NULL;
+ unsigned int i = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+}
+
+static int
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit __rte_unused)
+{
+ unsigned int t = 0;
+ int count = 0;
+ int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
+
+ if (!xstats_names)
+ return nstats;
+ for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_%s", vhost_rxport_stat_strings[t].name);
+ count++;
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_%s", vhost_txport_stat_strings[t].name);
+ count++;
+ }
+ return count;
+}
+
+static int
+vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i;
+ unsigned int t;
+ unsigned int count = 0;
+ struct vhost_queue *vq = NULL;
+ unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
+
+ if (n < nxstats)
+ return nxstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ + vq->stats.missed_pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
+ xstats[count].value = 0;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ xstats[count].value +=
+ *(uint64_t *)(((char *)vq)
+ + vhost_rxport_stat_strings[t].offset);
+ }
+ xstats[count].id = count;
+ count++;
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
+ xstats[count].value = 0;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ xstats[count].value +=
+ *(uint64_t *)(((char *)vq)
+ + vhost_txport_stat_strings[t].offset);
+ }
+ xstats[count].id = count;
+ count++;
+ }
+ return count;
+}
+
+static inline void
+vhost_count_multicast_broadcast(struct vhost_queue *vq,
+ struct rte_mbuf *mbuf)
+{
+ struct ether_addr *ea = NULL;
+ struct vhost_stats *pstats = &vq->stats;
+
+ ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ pstats->xstats[VHOST_BROADCAST_PKT]++;
+ else
+ pstats->xstats[VHOST_MULTICAST_PKT]++;
+ }
+}
+
+static void
+vhost_update_packet_xstats(struct vhost_queue *vq,
+ struct rte_mbuf **bufs,
+ uint16_t count)
+{
+ uint32_t pkt_len = 0;
+ uint64_t i = 0;
+ uint64_t index;
+ struct vhost_stats *pstats = &vq->stats;
+
+ for (i = 0; i < count ; i++) {
+ pkt_len = bufs[i]->pkt_len;
+ if (pkt_len == 64) {
+ pstats->xstats[VHOST_64_PKT]++;
+ } else if (pkt_len > 64 && pkt_len < 1024) {
+ index = (sizeof(pkt_len) * 8)
+ - __builtin_clz(pkt_len) - 5;
+ pstats->xstats[index]++;
+ } else {
+ if (pkt_len < 64)
+ pstats->xstats[VHOST_UNDERSIZE_PKT]++;
+ else if (pkt_len <= 1522)
+ pstats->xstats[VHOST_1024_TO_1522_PKT]++;
+ else if (pkt_len > 1522)
+ pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
+ }
+ vhost_count_multicast_broadcast(vq, bufs[i]);
+ }
+}
+
+static uint16_t
+eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ struct vhost_queue *r = q;
+ uint16_t i, nb_rx = 0;
+ uint16_t nb_receive = nb_bufs;
+
+ if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
+ return 0;
+
+ rte_atomic32_set(&r->while_queuing, 1);
+
+ if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
+ goto out;
+
+ /* Dequeue packets from guest TX queue */
+ while (nb_receive) {
+ uint16_t nb_pkts;
+ uint16_t num = (uint16_t)RTE_MIN(nb_receive,
+ VHOST_MAX_PKT_BURST);
+
+ nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
+ r->mb_pool, &bufs[nb_rx],
+ num);
+
+ nb_rx += nb_pkts;
+ nb_receive -= nb_pkts;
+ if (nb_pkts < num)
+ break;
+ }
+
+ r->stats.pkts += nb_rx;
+
+ for (i = 0; likely(i < nb_rx); i++) {
+ bufs[i]->port = r->port;
+ bufs[i]->vlan_tci = 0;
+
+ if (r->internal->vlan_strip)
+ rte_vlan_strip(bufs[i]);
+
+ r->stats.bytes += bufs[i]->pkt_len;
+ }
+
+ vhost_update_packet_xstats(r, bufs, nb_rx);
+
+out:
+ rte_atomic32_set(&r->while_queuing, 0);
+
+ return nb_rx;
+}
+
+static uint16_t
+eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ struct vhost_queue *r = q;
+ uint16_t i, nb_tx = 0;
+ uint16_t nb_send = 0;
+
+ if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
+ return 0;
+
+ rte_atomic32_set(&r->while_queuing, 1);
+
+ if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
+ goto out;
+
+ for (i = 0; i < nb_bufs; i++) {
+ struct rte_mbuf *m = bufs[i];
+
+ /* Do VLAN tag insertion */
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ int error = rte_vlan_insert(&m);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(m);
+ continue;
+ }
+ }
+
+ bufs[nb_send] = m;
+ ++nb_send;
+ }
+
+ /* Enqueue packets to guest RX queue */
+ while (nb_send) {
+ uint16_t nb_pkts;
+ uint16_t num = (uint16_t)RTE_MIN(nb_send,
+ VHOST_MAX_PKT_BURST);
+
+ nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
+ &bufs[nb_tx], num);
+
+ nb_tx += nb_pkts;
+ nb_send -= nb_pkts;
+ if (nb_pkts < num)
+ break;
+ }
+
+ r->stats.pkts += nb_tx;
+ r->stats.missed_pkts += nb_bufs - nb_tx;
+
+ for (i = 0; likely(i < nb_tx); i++)
+ r->stats.bytes += bufs[i]->pkt_len;
+
+ vhost_update_packet_xstats(r, bufs, nb_tx);
+
+ /* According to RFC2863 page42 section ifHCOutMulticastPkts and
+ * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
+ * are increased when packets are not transmitted successfully.
+ */
+ for (i = nb_tx; i < nb_bufs; i++)
+ vhost_count_multicast_broadcast(r, bufs[i]);
+
+ for (i = 0; likely(i < nb_tx); i++)
+ rte_pktmbuf_free(bufs[i]);
+out:
+ rte_atomic32_set(&r->while_queuing, 0);
+
+ return nb_tx;
+}
+
+static int
+eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ struct pmd_internal *internal = dev->data->dev_private;
+ const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+ internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+
+ return 0;
+}
+
+static inline struct internal_list *
+find_internal_resource(char *ifname)
+{
+ int found = 0;
+ struct internal_list *list;
+ struct pmd_internal *internal;
+
+ if (ifname == NULL)
+ return NULL;
+
+ pthread_mutex_lock(&internal_list_lock);
+
+ TAILQ_FOREACH(list, &internal_list, next) {
+ internal = list->eth_dev->data->dev_private;
+ if (!strcmp(internal->iface_name, ifname)) {
+ found = 1;
+ break;
+ }
+ }
+
+ pthread_mutex_unlock(&internal_list_lock);
+
+ if (!found)
+ return NULL;
+
+ return list;
+}
+
+static int
+eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct rte_vhost_vring vring;
+ struct vhost_queue *vq;
+ int ret = 0;
+
+ vq = dev->data->rx_queues[qid];
+ if (!vq) {
+ VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
+ return -1;
+ }
+
+ ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
+ if (ret < 0) {
+ VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
+ return ret;
+ }
+ VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
+ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
+ rte_wmb();
+
+ return ret;
+}
+
+static int
+eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct rte_vhost_vring vring;
+ struct vhost_queue *vq;
+ int ret = 0;
+
+ vq = dev->data->rx_queues[qid];
+ if (!vq) {
+ VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
+ return -1;
+ }
+
+ ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
+ if (ret < 0) {
+ VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
+ return ret;
+ }
+ VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
+ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
+ rte_wmb();
+
+ return 0;
+}
+
+static void
+eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+
+ if (intr_handle) {
+ if (intr_handle->intr_vec)
+ free(intr_handle->intr_vec);
+ free(intr_handle);
+ }
+
+ dev->intr_handle = NULL;
+}
+
+static int
+eth_vhost_install_intr(struct rte_eth_dev *dev)
+{
+ struct rte_vhost_vring vring;
+ struct vhost_queue *vq;
+ int count = 0;
+ int nb_rxq = dev->data->nb_rx_queues;
+ int i;
+ int ret;
+
+ /* uninstall firstly if we are reconnecting */
+ if (dev->intr_handle)
+ eth_vhost_uninstall_intr(dev);
+
+ dev->intr_handle = malloc(sizeof(*dev->intr_handle));
+ if (!dev->intr_handle) {
+ VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
+ return -ENOMEM;
+ }
+ memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
+
+ dev->intr_handle->efd_counter_size = sizeof(uint64_t);
+
+ dev->intr_handle->intr_vec =
+ malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
+
+ if (!dev->intr_handle->intr_vec) {
+ VHOST_LOG(ERR,
+ "Failed to allocate memory for interrupt vector\n");
+ free(dev->intr_handle);
+ return -ENOMEM;
+ }
+
+ VHOST_LOG(INFO, "Prepare intr vec\n");
+ for (i = 0; i < nb_rxq; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq) {
+ VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
+ continue;
+ }
+
+ ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
+ if (ret < 0) {
+ VHOST_LOG(INFO,
+ "Failed to get rxq-%d's vring, skip!\n", i);
+ continue;
+ }
+
+ if (vring.kickfd < 0) {
+ VHOST_LOG(INFO,
+ "rxq-%d's kickfd is invalid, skip!\n", i);
+ continue;
+ }
+ dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
+ dev->intr_handle->efds[i] = vring.kickfd;
+ count++;
+ VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
+ }
+
+ dev->intr_handle->nb_efd = count;
+ dev->intr_handle->max_intr = count + 1;
+ dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
+
+ return 0;
+}
+
+static void
+update_queuing_status(struct rte_eth_dev *dev)
+{
+ struct pmd_internal *internal = dev->data->dev_private;
+ struct vhost_queue *vq;
+ unsigned int i;
+ int allow_queuing = 1;
+
+ if (!dev->data->rx_queues || !dev->data->tx_queues)
+ return;
+
+ if (rte_atomic32_read(&internal->started) == 0 ||
+ rte_atomic32_read(&internal->dev_attached) == 0)
+ allow_queuing = 0;
+
+ /* Wait until rx/tx_pkt_burst stops accessing vhost device */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (vq == NULL)
+ continue;
+ rte_atomic32_set(&vq->allow_queuing, allow_queuing);
+ while (rte_atomic32_read(&vq->while_queuing))
+ rte_pause();
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (vq == NULL)
+ continue;
+ rte_atomic32_set(&vq->allow_queuing, allow_queuing);
+ while (rte_atomic32_read(&vq->while_queuing))
+ rte_pause();
+ }
+}
+
+static void
+queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
+{
+ struct vhost_queue *vq;
+ int i;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ vq = eth_dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = internal->vid;
+ vq->internal = internal;
+ vq->port = eth_dev->data->port_id;
+ }
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ vq = eth_dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = internal->vid;
+ vq->internal = internal;
+ vq->port = eth_dev->data->port_id;
+ }
+}
+
+static int
+new_device(int vid)
+{
+ struct rte_eth_dev *eth_dev;
+ struct internal_list *list;
+ struct pmd_internal *internal;
+ struct rte_eth_conf *dev_conf;
+ unsigned i;
+ char ifname[PATH_MAX];
+#ifdef RTE_LIBRTE_VHOST_NUMA
+ int newnode;
+#endif
+
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+ list = find_internal_resource(ifname);
+ if (list == NULL) {
+ VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
+ return -1;
+ }
+
+ eth_dev = list->eth_dev;
+ internal = eth_dev->data->dev_private;
+ dev_conf = &eth_dev->data->dev_conf;
+
+#ifdef RTE_LIBRTE_VHOST_NUMA
+ newnode = rte_vhost_get_numa_node(vid);
+ if (newnode >= 0)
+ eth_dev->data->numa_node = newnode;
+#endif
+
+ internal->vid = vid;
+ if (rte_atomic32_read(&internal->started) == 1) {
+ queue_setup(eth_dev, internal);
+
+ if (dev_conf->intr_conf.rxq) {
+ if (eth_vhost_install_intr(eth_dev) < 0) {
+ VHOST_LOG(INFO,
+ "Failed to install interrupt handler.");
+ return -1;
+ }
+ }
+ } else {
+ VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
+ }
+
+ for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
+ rte_vhost_enable_guest_notification(vid, i, 0);
+
+ rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
+
+ eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+
+ rte_atomic32_set(&internal->dev_attached, 1);
+ update_queuing_status(eth_dev);
+
+ VHOST_LOG(INFO, "Vhost device %d created\n", vid);
+
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+
+ return 0;
+}
+
+static void
+destroy_device(int vid)
+{
+ struct rte_eth_dev *eth_dev;
+ struct pmd_internal *internal;
+ struct vhost_queue *vq;
+ struct internal_list *list;
+ char ifname[PATH_MAX];
+ unsigned i;
+ struct rte_vhost_vring_state *state;
+
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+ list = find_internal_resource(ifname);
+ if (list == NULL) {
+ VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
+ return;
+ }
+ eth_dev = list->eth_dev;
+ internal = eth_dev->data->dev_private;
+
+ rte_atomic32_set(&internal->dev_attached, 0);
+ update_queuing_status(eth_dev);
+
+ eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+ if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ vq = eth_dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = -1;
+ }
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ vq = eth_dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = -1;
+ }
+ }
+
+ state = vring_states[eth_dev->data->port_id];
+ rte_spinlock_lock(&state->lock);
+ for (i = 0; i <= state->max_vring; i++) {
+ state->cur[i] = false;
+ state->seen[i] = false;
+ }
+ state->max_vring = 0;
+ rte_spinlock_unlock(&state->lock);
+
+ VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
+ eth_vhost_uninstall_intr(eth_dev);
+
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
+static int
+vring_state_changed(int vid, uint16_t vring, int enable)
+{
+ struct rte_vhost_vring_state *state;
+ struct rte_eth_dev *eth_dev;
+ struct internal_list *list;
+ char ifname[PATH_MAX];
+
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+ list = find_internal_resource(ifname);
+ if (list == NULL) {
+ VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
+ return -1;
+ }
+
+ eth_dev = list->eth_dev;
+ /* won't be NULL */
+ state = vring_states[eth_dev->data->port_id];
+ rte_spinlock_lock(&state->lock);
+ state->cur[vring] = enable;
+ state->max_vring = RTE_MAX(vring, state->max_vring);
+ rte_spinlock_unlock(&state->lock);
+
+ VHOST_LOG(INFO, "vring%u is %s\n",
+ vring, enable ? "enabled" : "disabled");
+
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
+
+ return 0;
+}
+
+static struct vhost_device_ops vhost_ops = {
+ .new_device = new_device,
+ .destroy_device = destroy_device,
+ .vring_state_changed = vring_state_changed,
+};
+
+int
+rte_eth_vhost_get_queue_event(uint16_t port_id,
+ struct rte_eth_vhost_queue_event *event)
+{
+ struct rte_vhost_vring_state *state;
+ unsigned int i;
+ int idx;
+
+ if (port_id >= RTE_MAX_ETHPORTS) {
+ VHOST_LOG(ERR, "Invalid port id\n");
+ return -1;
+ }
+
+ state = vring_states[port_id];
+ if (!state) {
+ VHOST_LOG(ERR, "Unused port\n");
+ return -1;
+ }
+
+ rte_spinlock_lock(&state->lock);
+ for (i = 0; i <= state->max_vring; i++) {
+ idx = state->index++ % (state->max_vring + 1);
+
+ if (state->cur[idx] != state->seen[idx]) {
+ state->seen[idx] = state->cur[idx];
+ event->queue_id = idx / 2;
+ event->rx = idx & 1;
+ event->enable = state->cur[idx];
+ rte_spinlock_unlock(&state->lock);
+ return 0;
+ }
+ }
+ rte_spinlock_unlock(&state->lock);
+
+ return -1;
+}
+
+int
+rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
+{
+ struct internal_list *list;
+ struct rte_eth_dev *eth_dev;
+ struct vhost_queue *vq;
+ int vid = -1;
+
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return -1;
+
+ pthread_mutex_lock(&internal_list_lock);
+
+ TAILQ_FOREACH(list, &internal_list, next) {
+ eth_dev = list->eth_dev;
+ if (eth_dev->data->port_id == port_id) {
+ vq = eth_dev->data->rx_queues[0];
+ if (vq) {
+ vid = vq->vid;
+ }
+ break;
+ }
+ }
+
+ pthread_mutex_unlock(&internal_list_lock);
+
+ return vid;
+}
+
+static int
+eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct pmd_internal *internal = eth_dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
+
+ queue_setup(eth_dev, internal);
+
+ if (rte_atomic32_read(&internal->dev_attached) == 1) {
+ if (dev_conf->intr_conf.rxq) {
+ if (eth_vhost_install_intr(eth_dev) < 0) {
+ VHOST_LOG(INFO,
+ "Failed to install interrupt handler.");
+ return -1;
+ }
+ }
+ }
+
+ rte_atomic32_set(&internal->started, 1);
+ update_queuing_status(eth_dev);
+
+ return 0;
+}
+
+static void
+eth_dev_stop(struct rte_eth_dev *dev)
+{
+ struct pmd_internal *internal = dev->data->dev_private;
+
+ rte_atomic32_set(&internal->started, 0);
+ update_queuing_status(dev);
+}
+
+static void
+eth_dev_close(struct rte_eth_dev *dev)
+{
+ struct pmd_internal *internal;
+ struct internal_list *list;
+ unsigned int i;
+
+ internal = dev->data->dev_private;
+ if (!internal)
+ return;
+
+ eth_dev_stop(dev);
+
+ rte_vhost_driver_unregister(internal->iface_name);
+
+ list = find_internal_resource(internal->iface_name);
+ if (!list)
+ return;
+
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_REMOVE(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+ rte_free(list);
+
+ if (dev->data->rx_queues)
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ rte_free(dev->data->rx_queues[i]);
+
+ if (dev->data->tx_queues)
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ rte_free(dev->data->tx_queues[i]);
+
+ rte_free(dev->data->mac_addrs);
+ free(internal->dev_name);
+ free(internal->iface_name);
+ rte_free(internal);
+
+ dev->data->dev_private = NULL;
+}
+
+static int
+eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct vhost_queue *vq;
+
+ vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (vq == NULL) {
+ VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
+ return -ENOMEM;
+ }
+
+ vq->mb_pool = mb_pool;
+ vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
+ dev->data->rx_queues[rx_queue_id] = vq;
+
+ return 0;
+}
+
+static int
+eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct vhost_queue *vq;
+
+ vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (vq == NULL) {
+ VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
+ return -ENOMEM;
+ }
+
+ vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
+ dev->data->tx_queues[tx_queue_id] = vq;
+
+ return 0;
+}
+
+static void
+eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internal *internal;
+
+ internal = dev->data->dev_private;
+ if (internal == NULL) {
+ VHOST_LOG(ERR, "Invalid device specified\n");
+ return;
+ }
+
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)-1;
+ dev_info->max_rx_queues = internal->max_queues;
+ dev_info->max_tx_queues = internal->max_queues;
+ dev_info->min_rx_bufsize = 0;
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_CRC_STRIP;
+}
+
+static int
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ unsigned i;
+ unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
+ unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
+ struct vhost_queue *vq;
+
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+ i < dev->data->nb_rx_queues; i++) {
+ if (dev->data->rx_queues[i] == NULL)
+ continue;
+ vq = dev->data->rx_queues[i];
+ stats->q_ipackets[i] = vq->stats.pkts;
+ rx_total += stats->q_ipackets[i];
+
+ stats->q_ibytes[i] = vq->stats.bytes;
+ rx_total_bytes += stats->q_ibytes[i];
+ }
+
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+ i < dev->data->nb_tx_queues; i++) {
+ if (dev->data->tx_queues[i] == NULL)
+ continue;
+ vq = dev->data->tx_queues[i];
+ stats->q_opackets[i] = vq->stats.pkts;
+ tx_missed_total += vq->stats.missed_pkts;
+ tx_total += stats->q_opackets[i];
+
+ stats->q_obytes[i] = vq->stats.bytes;
+ tx_total_bytes += stats->q_obytes[i];
+ }
+
+ stats->ipackets = rx_total;
+ stats->opackets = tx_total;
+ stats->oerrors = tx_missed_total;
+ stats->ibytes = rx_total_bytes;
+ stats->obytes = tx_total_bytes;
+
+ return 0;
+}
+
+static void
+eth_stats_reset(struct rte_eth_dev *dev)
+{
+ struct vhost_queue *vq;
+ unsigned i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (dev->data->rx_queues[i] == NULL)
+ continue;
+ vq = dev->data->rx_queues[i];
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (dev->data->tx_queues[i] == NULL)
+ continue;
+ vq = dev->data->tx_queues[i];
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
+ vq->stats.missed_pkts = 0;
+ }
+}
+
+static void
+eth_queue_release(void *q)
+{
+ rte_free(q);
+}
+
+static int
+eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
+{
+ /*
+ * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
+ * and releases mbuf, so nothing to cleanup.
+ */
+ return 0;
+}
+
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
+{
+ return 0;
+}
+
+static uint32_t
+eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct vhost_queue *vq;
+
+ vq = dev->data->rx_queues[rx_queue_id];
+ if (vq == NULL)
+ return 0;
+
+ return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
+}
+
+static const struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_close = eth_dev_close,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_release = eth_queue_release,
+ .tx_queue_release = eth_queue_release,
+ .tx_done_cleanup = eth_tx_done_cleanup,
+ .rx_queue_count = eth_rx_queue_count,
+ .link_update = eth_link_update,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
+ .xstats_reset = vhost_dev_xstats_reset,
+ .xstats_get = vhost_dev_xstats_get,
+ .xstats_get_names = vhost_dev_xstats_get_names,
+ .rx_queue_intr_enable = eth_rxq_intr_enable,
+ .rx_queue_intr_disable = eth_rxq_intr_disable,
+};
+
+static struct rte_vdev_driver pmd_vhost_drv;
+
+static int
+eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
+ int16_t queues, const unsigned int numa_node, uint64_t flags)
+{
+ const char *name = rte_vdev_device_name(dev);
+ struct rte_eth_dev_data *data;
+ struct pmd_internal *internal = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct ether_addr *eth_addr = NULL;
+ struct rte_vhost_vring_state *vring_state = NULL;
+ struct internal_list *list = NULL;
+
+ VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
+ numa_node);
+
+ list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
+ if (list == NULL)
+ goto error;
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
+ if (eth_dev == NULL)
+ goto error;
+
+ eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
+ if (eth_addr == NULL)
+ goto error;
+ *eth_addr = base_eth_addr;
+ eth_addr->addr_bytes[5] = eth_dev->data->port_id;
+
+ vring_state = rte_zmalloc_socket(name,
+ sizeof(*vring_state), 0, numa_node);
+ if (vring_state == NULL)
+ goto error;
+
+ /* now put it all together
+ * - store queue data in internal,
+ * - point eth_dev_data to internals
+ * - and point eth_dev structure to new eth_dev_data structure
+ */
+ internal = eth_dev->data->dev_private;
+ internal->dev_name = strdup(name);
+ if (internal->dev_name == NULL)
+ goto error;
+ internal->iface_name = strdup(iface_name);
+ if (internal->iface_name == NULL)
+ goto error;
+
+ list->eth_dev = eth_dev;
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_INSERT_TAIL(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+
+ rte_spinlock_init(&vring_state->lock);
+ vring_states[eth_dev->data->port_id] = vring_state;
+
+ data = eth_dev->data;
+ data->nb_rx_queues = queues;
+ data->nb_tx_queues = queues;
+ internal->max_queues = queues;
+ internal->vid = -1;
+ data->dev_link = pmd_link;
+ data->mac_addrs = eth_addr;
+ data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+
+ eth_dev->dev_ops = &ops;
+
+ /* finally assign rx and tx ops */
+ eth_dev->rx_pkt_burst = eth_vhost_rx;
+ eth_dev->tx_pkt_burst = eth_vhost_tx;
+
+ if (rte_vhost_driver_register(iface_name, flags))
+ goto error;
+
+ if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
+ VHOST_LOG(ERR, "Can't register callbacks\n");
+ goto error;
+ }
+
+ if (rte_vhost_driver_start(iface_name) < 0) {
+ VHOST_LOG(ERR, "Failed to start driver for %s\n",
+ iface_name);
+ goto error;
+ }
+
+ rte_eth_dev_probing_finish(eth_dev);
+ return data->port_id;
+
+error:
+ if (internal) {
+ free(internal->iface_name);
+ free(internal->dev_name);
+ }
+ rte_free(vring_state);
+ rte_free(eth_addr);
+ if (eth_dev)
+ rte_eth_dev_release_port(eth_dev);
+ rte_free(internal);
+ rte_free(list);
+
+ return -1;
+}
+
+static inline int
+open_iface(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ const char **iface_name = extra_args;
+
+ if (value == NULL)
+ return -1;
+
+ *iface_name = value;
+
+ return 0;
+}
+
+static inline int
+open_int(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ uint16_t *n = extra_args;
+
+ if (value == NULL || extra_args == NULL)
+ return -EINVAL;
+
+ *n = (uint16_t)strtoul(value, NULL, 0);
+ if (*n == USHRT_MAX && errno == ERANGE)
+ return -1;
+
+ return 0;
+}
+
+static int
+rte_pmd_vhost_probe(struct rte_vdev_device *dev)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+ char *iface_name;
+ uint16_t queues;
+ uint64_t flags = 0;
+ int client_mode = 0;
+ int dequeue_zero_copy = 0;
+ int iommu_support = 0;
+ struct rte_eth_dev *eth_dev;
+ const char *name = rte_vdev_device_name(dev);
+
+ VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ VHOST_LOG(ERR, "Failed to probe %s\n", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+
+ if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
+ &open_iface, &iface_name);
+ if (ret < 0)
+ goto out_free;
+ } else {
+ ret = -1;
+ goto out_free;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
+ &open_int, &queues);
+ if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
+ goto out_free;
+
+ } else
+ queues = 1;
+
+ if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
+ &open_int, &client_mode);
+ if (ret < 0)
+ goto out_free;
+
+ if (client_mode)
+ flags |= RTE_VHOST_USER_CLIENT;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
+ &open_int, &dequeue_zero_copy);
+ if (ret < 0)
+ goto out_free;
+
+ if (dequeue_zero_copy)
+ flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
+ &open_int, &iommu_support);
+ if (ret < 0)
+ goto out_free;
+
+ if (iommu_support)
+ flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
+ }
+
+ if (dev->device.numa_node == SOCKET_ID_ANY)
+ dev->device.numa_node = rte_socket_id();
+
+ eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
+ flags);
+
+out_free:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+rte_pmd_vhost_remove(struct rte_vdev_device *dev)
+{
+ const char *name;
+ struct rte_eth_dev *eth_dev = NULL;
+
+ name = rte_vdev_device_name(dev);
+ VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
+
+ /* find an ethdev entry */
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ eth_dev_close(eth_dev);
+
+ rte_free(vring_states[eth_dev->data->port_id]);
+ vring_states[eth_dev->data->port_id] = NULL;
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_vhost_drv = {
+ .probe = rte_pmd_vhost_probe,
+ .remove = rte_pmd_vhost_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
+RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
+RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
+ "iface=<ifc> "
+ "queues=<int>");
+
+RTE_INIT(vhost_init_log)
+{
+ vhost_logtype = rte_log_register("pmd.net.vhost");
+ if (vhost_logtype >= 0)
+ rte_log_set_level(vhost_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/vhost/rte_eth_vhost.h b/src/spdk/dpdk/drivers/net/vhost/rte_eth_vhost.h
new file mode 100644
index 00000000..0e68b9f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vhost/rte_eth_vhost.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 IGEL Co., Ltd.
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+#ifndef _RTE_ETH_VHOST_H_
+#define _RTE_ETH_VHOST_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include <rte_vhost.h>
+
+/*
+ * Event description.
+ */
+struct rte_eth_vhost_queue_event {
+ uint16_t queue_id;
+ bool rx;
+ bool enable;
+};
+
+/**
+ * Get queue events from specified port.
+ * If a callback for below event is registered by
+ * rte_eth_dev_callback_register(), this function will describe what was
+ * changed.
+ * - RTE_ETH_EVENT_QUEUE_STATE
+ * Multiple events may cause only one callback kicking, so call this function
+ * while returning 0.
+ *
+ * @param port_id
+ * Port id.
+ * @param event
+ * Pointer to a rte_eth_vhost_queue_event structure.
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int rte_eth_vhost_get_queue_event(uint16_t port_id,
+ struct rte_eth_vhost_queue_event *event);
+
+/**
+ * Get the 'vid' value associated with the specified port.
+ *
+ * @return
+ * - On success, the 'vid' associated with 'port_id'.
+ * - On failure, a negative value.
+ */
+int rte_eth_vhost_get_vid_from_port_id(uint16_t port_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/vhost/rte_pmd_vhost_version.map b/src/spdk/dpdk/drivers/net/vhost/rte_pmd_vhost_version.map
new file mode 100644
index 00000000..695db857
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vhost/rte_pmd_vhost_version.map
@@ -0,0 +1,13 @@
+DPDK_16.04 {
+ global:
+
+ rte_eth_vhost_get_queue_event;
+
+ local: *;
+};
+
+DPDK_16.11 {
+ global:
+
+ rte_eth_vhost_get_vid_from_port_id;
+};
diff --git a/src/spdk/dpdk/drivers/net/virtio/Makefile b/src/spdk/dpdk/drivers/net/virtio/Makefile
new file mode 100644
index 00000000..6c2c9967
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/Makefile
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_virtio.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+ifeq ($(CONFIG_RTE_VIRTIO_USER),y)
+LDLIBS += -lrte_bus_vdev
+endif
+
+EXPORT_MAP := rte_pmd_virtio_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtqueue.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx_simple.c
+
+ifeq ($(CONFIG_RTE_ARCH_X86),y)
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx_simple_sse.c
+else ifneq ($(filter y,$(CONFIG_RTE_ARCH_ARM) $(CONFIG_RTE_ARCH_ARM64)),)
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx_simple_neon.c
+endif
+
+ifeq ($(CONFIG_RTE_VIRTIO_USER),y)
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/vhost_user.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/vhost_kernel.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/vhost_kernel_tap.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/virtio_user_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user_ethdev.c
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/virtio/meson.build b/src/spdk/dpdk/drivers/net/virtio/meson.build
new file mode 100644
index 00000000..e43ce6bb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+sources += files('virtio_ethdev.c',
+ 'virtio_pci.c',
+ 'virtio_rxtx.c',
+ 'virtio_rxtx_simple.c',
+ 'virtqueue.c')
+deps += ['kvargs', 'bus_pci']
+
+if arch_subdir == 'x86'
+ sources += files('virtio_rxtx_simple_sse.c')
+elif arch_subdir == 'arm' and host_machine.cpu_family().startswith('aarch64')
+ sources += files('virtio_rxtx_simple_neon.c')
+endif
+
+if host_machine.system() == 'linux'
+ dpdk_conf.set('RTE_VIRTIO_USER', 1)
+
+ sources += files('virtio_user_ethdev.c',
+ 'virtio_user/vhost_kernel.c',
+ 'virtio_user/vhost_kernel_tap.c',
+ 'virtio_user/vhost_user.c',
+ 'virtio_user/virtio_user_dev.c')
+ deps += ['bus_vdev']
+endif
diff --git a/src/spdk/dpdk/drivers/net/virtio/rte_pmd_virtio_version.map b/src/spdk/dpdk/drivers/net/virtio/rte_pmd_virtio_version.map
new file mode 100644
index 00000000..ef353984
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/rte_pmd_virtio_version.map
@@ -0,0 +1,4 @@
+DPDK_2.0 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_ethdev.c b/src/spdk/dpdk/drivers/net/virtio/virtio_ethdev.c
new file mode 100644
index 00000000..614357da
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_ethdev.c
@@ -0,0 +1,2219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_branch_prediction.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_arp.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_cpuflags.h>
+
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+
+#include "virtio_ethdev.h"
+#include "virtio_pci.h"
+#include "virtio_logs.h"
+#include "virtqueue.h"
+#include "virtio_rxtx.h"
+
+static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
+static int virtio_dev_configure(struct rte_eth_dev *dev);
+static int virtio_dev_start(struct rte_eth_dev *dev);
+static void virtio_dev_stop(struct rte_eth_dev *dev);
+static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static void virtio_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int virtio_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+
+static void virtio_set_hwaddr(struct virtio_hw *hw);
+static void virtio_get_hwaddr(struct virtio_hw *hw);
+
+static int virtio_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned n);
+static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit);
+static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
+static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
+static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static int virtio_mac_addr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq);
+static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
+static int virtio_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+
+static int virtio_intr_enable(struct rte_eth_dev *dev);
+static int virtio_intr_disable(struct rte_eth_dev *dev);
+
+static int virtio_dev_queue_stats_mapping_set(
+ struct rte_eth_dev *eth_dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx);
+
+int virtio_logtype_init;
+int virtio_logtype_driver;
+
+static void virtio_notify_peers(struct rte_eth_dev *dev);
+static void virtio_ack_link_announce(struct rte_eth_dev *dev);
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_virtio_map[] = {
+ { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
+ { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+struct rte_virtio_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+/* [rt]x_qX_ is prepended to the name string here */
+static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
+ {"good_packets", offsetof(struct virtnet_rx, stats.packets)},
+ {"good_bytes", offsetof(struct virtnet_rx, stats.bytes)},
+ {"errors", offsetof(struct virtnet_rx, stats.errors)},
+ {"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)},
+ {"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)},
+ {"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])},
+ {"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])},
+ {"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])},
+ {"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])},
+ {"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])},
+ {"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])},
+ {"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
+ {"size_1519_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])},
+};
+
+/* [rt]x_qX_ is prepended to the name string here */
+static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
+ {"good_packets", offsetof(struct virtnet_tx, stats.packets)},
+ {"good_bytes", offsetof(struct virtnet_tx, stats.bytes)},
+ {"errors", offsetof(struct virtnet_tx, stats.errors)},
+ {"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)},
+ {"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)},
+ {"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])},
+ {"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])},
+ {"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])},
+ {"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])},
+ {"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])},
+ {"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])},
+ {"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
+ {"size_1519_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])},
+};
+
+#define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
+ sizeof(rte_virtio_rxq_stat_strings[0]))
+#define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
+ sizeof(rte_virtio_txq_stat_strings[0]))
+
+struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
+
+static int
+virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
+{
+ uint32_t head, i;
+ int k, sum = 0;
+ virtio_net_ctrl_ack status = ~0;
+ struct virtio_pmd_ctrl *result;
+ struct virtqueue *vq;
+
+ ctrl->status = status;
+
+ if (!cvq || !cvq->vq) {
+ PMD_INIT_LOG(ERR, "Control queue is not supported.");
+ return -1;
+ }
+
+ rte_spinlock_lock(&cvq->lock);
+ vq = cvq->vq;
+ head = vq->vq_desc_head_idx;
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
+ "vq->hw->cvq = %p vq = %p",
+ vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
+
+ if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
+ rte_spinlock_unlock(&cvq->lock);
+ return -1;
+ }
+
+ memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
+ sizeof(struct virtio_pmd_ctrl));
+
+ /*
+ * Format is enforced in qemu code:
+ * One TX packet for header;
+ * At least one TX packet per argument;
+ * One RX packet for ACK.
+ */
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
+ vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;
+ vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_free_cnt--;
+ i = vq->vq_ring.desc[head].next;
+
+ for (k = 0; k < pkt_num; k++) {
+ vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
+ vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr)
+ + sizeof(ctrl->status) + sizeof(uint8_t)*sum;
+ vq->vq_ring.desc[i].len = dlen[k];
+ sum += dlen[k];
+ vq->vq_free_cnt--;
+ i = vq->vq_ring.desc[i].next;
+ }
+
+ vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
+ vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_ring.desc[i].len = sizeof(ctrl->status);
+ vq->vq_free_cnt--;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (VIRTQUEUE_NUSED(vq) == 0) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (VIRTQUEUE_NUSED(vq)) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+
+ while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ result = cvq->virtio_net_hdr_mz->addr;
+
+ rte_spinlock_unlock(&cvq->lock);
+ return result->status;
+}
+
+static int
+virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtio_pmd_ctrl ctrl;
+ int dlen[1];
+ int ret;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
+ memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
+
+ dlen[0] = sizeof(uint16_t);
+
+ ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
+ "failed, this is too late now...");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+virtio_dev_queue_release(void *queue __rte_unused)
+{
+ /* do nothing */
+}
+
+static uint16_t
+virtio_get_nr_vq(struct virtio_hw *hw)
+{
+ uint16_t nr_vq = hw->max_queue_pairs * 2;
+
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
+ nr_vq += 1;
+
+ return nr_vq;
+}
+
+static void
+virtio_init_vring(struct virtqueue *vq)
+{
+ int size = vq->vq_nentries;
+ struct vring *vr = &vq->vq_ring;
+ uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*
+ * Reinitialise since virtio port might have been stopped and restarted
+ */
+ memset(ring_mem, 0, vq->vq_ring_size);
+ vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_used_cons_idx = 0;
+ vq->vq_desc_head_idx = 0;
+ vq->vq_avail_idx = 0;
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+ memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
+
+ vring_desc_init(vr->desc, size);
+
+ /*
+ * Disable device(host) interrupting guest
+ */
+ virtqueue_disable_intr(vq);
+}
+
+static int
+virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
+{
+ char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+ char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
+ const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
+ unsigned int vq_size, size;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtnet_rx *rxvq = NULL;
+ struct virtnet_tx *txvq = NULL;
+ struct virtnet_ctl *cvq = NULL;
+ struct virtqueue *vq;
+ size_t sz_hdr_mz = 0;
+ void *sw_ring = NULL;
+ int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
+ int ret;
+
+ PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
+
+ /*
+ * Read the virtqueue size from the Queue Size field
+ * Always power of 2 and if 0 virtqueue does not exist
+ */
+ vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
+ PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
+ if (vq_size == 0) {
+ PMD_INIT_LOG(ERR, "virtqueue does not exist");
+ return -EINVAL;
+ }
+
+ if (!rte_is_power_of_2(vq_size)) {
+ PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2");
+ return -EINVAL;
+ }
+
+ snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
+ dev->data->port_id, vtpci_queue_idx);
+
+ size = RTE_ALIGN_CEIL(sizeof(*vq) +
+ vq_size * sizeof(struct vq_desc_extra),
+ RTE_CACHE_LINE_SIZE);
+ if (queue_type == VTNET_TQ) {
+ /*
+ * For each xmit packet, allocate a virtio_net_hdr
+ * and indirect ring elements
+ */
+ sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
+ } else if (queue_type == VTNET_CQ) {
+ /* Allocate a page for control vq command, data and status */
+ sz_hdr_mz = PAGE_SIZE;
+ }
+
+ vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (vq == NULL) {
+ PMD_INIT_LOG(ERR, "can not allocate vq");
+ return -ENOMEM;
+ }
+ hw->vqs[vtpci_queue_idx] = vq;
+
+ vq->hw = hw;
+ vq->vq_queue_index = vtpci_queue_idx;
+ vq->vq_nentries = vq_size;
+
+ /*
+ * Reserve a memzone for vring elements
+ */
+ size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
+ size, vq->vq_ring_size);
+
+ mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ VIRTIO_PCI_VRING_ALIGN);
+ if (mz == NULL) {
+ if (rte_errno == EEXIST)
+ mz = rte_memzone_lookup(vq_name);
+ if (mz == NULL) {
+ ret = -ENOMEM;
+ goto fail_q_alloc;
+ }
+ }
+
+ memset(mz->addr, 0, mz->len);
+
+ vq->vq_ring_mem = mz->iova;
+ vq->vq_ring_virt_mem = mz->addr;
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
+ (uint64_t)mz->iova);
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
+ (uint64_t)(uintptr_t)mz->addr);
+
+ virtio_init_vring(vq);
+
+ if (sz_hdr_mz) {
+ snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
+ dev->data->port_id, vtpci_queue_idx);
+ hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ RTE_CACHE_LINE_SIZE);
+ if (hdr_mz == NULL) {
+ if (rte_errno == EEXIST)
+ hdr_mz = rte_memzone_lookup(vq_hdr_name);
+ if (hdr_mz == NULL) {
+ ret = -ENOMEM;
+ goto fail_q_alloc;
+ }
+ }
+ }
+
+ if (queue_type == VTNET_RQ) {
+ size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
+ sizeof(vq->sw_ring[0]);
+
+ sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ if (!sw_ring) {
+ PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
+ ret = -ENOMEM;
+ goto fail_q_alloc;
+ }
+
+ vq->sw_ring = sw_ring;
+ rxvq = &vq->rxq;
+ rxvq->vq = vq;
+ rxvq->port_id = dev->data->port_id;
+ rxvq->mz = mz;
+ } else if (queue_type == VTNET_TQ) {
+ txvq = &vq->txq;
+ txvq->vq = vq;
+ txvq->port_id = dev->data->port_id;
+ txvq->mz = mz;
+ txvq->virtio_net_hdr_mz = hdr_mz;
+ txvq->virtio_net_hdr_mem = hdr_mz->iova;
+ } else if (queue_type == VTNET_CQ) {
+ cvq = &vq->cq;
+ cvq->vq = vq;
+ cvq->mz = mz;
+ cvq->virtio_net_hdr_mz = hdr_mz;
+ cvq->virtio_net_hdr_mem = hdr_mz->iova;
+ memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
+
+ hw->cvq = cvq;
+ }
+
+ /* For virtio_user case (that is when hw->dev is NULL), we use
+ * virtual address. And we need properly set _offset_, please see
+ * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
+ */
+ if (!hw->virtio_user_dev)
+ vq->offset = offsetof(struct rte_mbuf, buf_iova);
+ else {
+ vq->vq_ring_mem = (uintptr_t)mz->addr;
+ vq->offset = offsetof(struct rte_mbuf, buf_addr);
+ if (queue_type == VTNET_TQ)
+ txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
+ else if (queue_type == VTNET_CQ)
+ cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
+ }
+
+ if (queue_type == VTNET_TQ) {
+ struct virtio_tx_region *txr;
+ unsigned int i;
+
+ txr = hdr_mz->addr;
+ memset(txr, 0, vq_size * sizeof(*txr));
+ for (i = 0; i < vq_size; i++) {
+ struct vring_desc *start_dp = txr[i].tx_indir;
+
+ vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
+
+ /* first indirect descriptor is always the tx header */
+ start_dp->addr = txvq->virtio_net_hdr_mem
+ + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region, tx_hdr);
+
+ start_dp->len = hw->vtnet_hdr_size;
+ start_dp->flags = VRING_DESC_F_NEXT;
+ }
+ }
+
+ if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
+ PMD_INIT_LOG(ERR, "setup_queue failed");
+ return -EINVAL;
+ }
+
+ return 0;
+
+fail_q_alloc:
+ rte_free(sw_ring);
+ rte_memzone_free(hdr_mz);
+ rte_memzone_free(mz);
+ rte_free(vq);
+
+ return ret;
+}
+
+static void
+virtio_free_queues(struct virtio_hw *hw)
+{
+ uint16_t nr_vq = virtio_get_nr_vq(hw);
+ struct virtqueue *vq;
+ int queue_type;
+ uint16_t i;
+
+ if (hw->vqs == NULL)
+ return;
+
+ for (i = 0; i < nr_vq; i++) {
+ vq = hw->vqs[i];
+ if (!vq)
+ continue;
+
+ queue_type = virtio_get_queue_type(hw, i);
+ if (queue_type == VTNET_RQ) {
+ rte_free(vq->sw_ring);
+ rte_memzone_free(vq->rxq.mz);
+ } else if (queue_type == VTNET_TQ) {
+ rte_memzone_free(vq->txq.mz);
+ rte_memzone_free(vq->txq.virtio_net_hdr_mz);
+ } else {
+ rte_memzone_free(vq->cq.mz);
+ rte_memzone_free(vq->cq.virtio_net_hdr_mz);
+ }
+
+ rte_free(vq);
+ hw->vqs[i] = NULL;
+ }
+
+ rte_free(hw->vqs);
+ hw->vqs = NULL;
+}
+
+static int
+virtio_alloc_queues(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ uint16_t nr_vq = virtio_get_nr_vq(hw);
+ uint16_t i;
+ int ret;
+
+ hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
+ if (!hw->vqs) {
+ PMD_INIT_LOG(ERR, "failed to allocate vqs");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_vq; i++) {
+ ret = virtio_init_queue(dev, i);
+ if (ret < 0) {
+ virtio_free_queues(hw);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
+
+static void
+virtio_dev_close(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+
+ PMD_INIT_LOG(DEBUG, "virtio_dev_close");
+
+ /* reset the NIC */
+ if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
+ if (intr_conf->rxq)
+ virtio_queues_unbind_intr(dev);
+
+ if (intr_conf->lsc || intr_conf->rxq) {
+ virtio_intr_disable(dev);
+ rte_intr_efd_disable(dev->intr_handle);
+ rte_free(dev->intr_handle->intr_vec);
+ dev->intr_handle->intr_vec = NULL;
+ }
+
+ vtpci_reset(hw);
+ virtio_dev_free_mbufs(dev);
+ virtio_free_queues(hw);
+}
+
+static void
+virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtio_pmd_ctrl ctrl;
+ int dlen[1];
+ int ret;
+
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
+ PMD_INIT_LOG(INFO, "host does not support rx control");
+ return;
+ }
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
+ ctrl.data[0] = 1;
+ dlen[0] = 1;
+
+ ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to enable promisc");
+}
+
+static void
+virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtio_pmd_ctrl ctrl;
+ int dlen[1];
+ int ret;
+
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
+ PMD_INIT_LOG(INFO, "host does not support rx control");
+ return;
+ }
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
+ ctrl.data[0] = 0;
+ dlen[0] = 1;
+
+ ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to disable promisc");
+}
+
+static void
+virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtio_pmd_ctrl ctrl;
+ int dlen[1];
+ int ret;
+
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
+ PMD_INIT_LOG(INFO, "host does not support rx control");
+ return;
+ }
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
+ ctrl.data[0] = 1;
+ dlen[0] = 1;
+
+ ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
+}
+
+static void
+virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtio_pmd_ctrl ctrl;
+ int dlen[1];
+ int ret;
+
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
+ PMD_INIT_LOG(INFO, "host does not support rx control");
+ return;
+ }
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
+ ctrl.data[0] = 0;
+ dlen[0] = 1;
+
+ ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
+}
+
+#define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */
+static int
+virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
+ hw->vtnet_hdr_size;
+ uint32_t frame_size = mtu + ether_hdr_len;
+ uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
+
+ max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
+
+ if (mtu < ETHER_MIN_MTU || frame_size > max_frame_size) {
+ PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
+ ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
+ struct virtqueue *vq = rxvq->vq;
+
+ virtqueue_enable_intr(vq);
+ return 0;
+}
+
+static int
+virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
+ struct virtqueue *vq = rxvq->vq;
+
+ virtqueue_disable_intr(vq);
+ return 0;
+}
+
+/*
+ * dev_ops for virtio, bare necessities for basic operation
+ */
+static const struct eth_dev_ops virtio_eth_dev_ops = {
+ .dev_configure = virtio_dev_configure,
+ .dev_start = virtio_dev_start,
+ .dev_stop = virtio_dev_stop,
+ .dev_close = virtio_dev_close,
+ .promiscuous_enable = virtio_dev_promiscuous_enable,
+ .promiscuous_disable = virtio_dev_promiscuous_disable,
+ .allmulticast_enable = virtio_dev_allmulticast_enable,
+ .allmulticast_disable = virtio_dev_allmulticast_disable,
+ .mtu_set = virtio_mtu_set,
+ .dev_infos_get = virtio_dev_info_get,
+ .stats_get = virtio_dev_stats_get,
+ .xstats_get = virtio_dev_xstats_get,
+ .xstats_get_names = virtio_dev_xstats_get_names,
+ .stats_reset = virtio_dev_stats_reset,
+ .xstats_reset = virtio_dev_stats_reset,
+ .link_update = virtio_dev_link_update,
+ .vlan_offload_set = virtio_dev_vlan_offload_set,
+ .rx_queue_setup = virtio_dev_rx_queue_setup,
+ .rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
+ .rx_queue_release = virtio_dev_queue_release,
+ .rx_descriptor_done = virtio_dev_rx_queue_done,
+ .tx_queue_setup = virtio_dev_tx_queue_setup,
+ .tx_queue_release = virtio_dev_queue_release,
+ /* collect stats per queue */
+ .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
+ .vlan_filter_set = virtio_vlan_filter_set,
+ .mac_addr_add = virtio_mac_addr_add,
+ .mac_addr_remove = virtio_mac_addr_remove,
+ .mac_addr_set = virtio_mac_addr_set,
+};
+
+static void
+virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ unsigned i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct virtnet_tx *txvq = dev->data->tx_queues[i];
+ if (txvq == NULL)
+ continue;
+
+ stats->opackets += txvq->stats.packets;
+ stats->obytes += txvq->stats.bytes;
+ stats->oerrors += txvq->stats.errors;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = txvq->stats.packets;
+ stats->q_obytes[i] = txvq->stats.bytes;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
+ if (rxvq == NULL)
+ continue;
+
+ stats->ipackets += rxvq->stats.packets;
+ stats->ibytes += rxvq->stats.bytes;
+ stats->ierrors += rxvq->stats.errors;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = rxvq->stats.packets;
+ stats->q_ibytes[i] = rxvq->stats.bytes;
+ }
+ }
+
+ stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+}
+
+static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned limit)
+{
+ unsigned i;
+ unsigned count = 0;
+ unsigned t;
+
+ unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
+ dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
+
+ if (xstats_names != NULL) {
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct virtnet_rx *rxvq = dev->data->rx_queues[i];
+ if (rxvq == NULL)
+ continue;
+ for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", i,
+ rte_virtio_rxq_stat_strings[t].name);
+ count++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct virtnet_tx *txvq = dev->data->tx_queues[i];
+ if (txvq == NULL)
+ continue;
+ for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", i,
+ rte_virtio_txq_stat_strings[t].name);
+ count++;
+ }
+ }
+ return count;
+ }
+ return nstats;
+}
+
+static int
+virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned n)
+{
+ unsigned i;
+ unsigned count = 0;
+
+ unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
+ dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
+
+ if (n < nstats)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct virtnet_rx *rxvq = dev->data->rx_queues[i];
+
+ if (rxvq == NULL)
+ continue;
+
+ unsigned t;
+
+ for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
+ xstats[count].value = *(uint64_t *)(((char *)rxvq) +
+ rte_virtio_rxq_stat_strings[t].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct virtnet_tx *txvq = dev->data->tx_queues[i];
+
+ if (txvq == NULL)
+ continue;
+
+ unsigned t;
+
+ for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
+ xstats[count].value = *(uint64_t *)(((char *)txvq) +
+ rte_virtio_txq_stat_strings[t].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ virtio_update_stats(dev, stats);
+
+ return 0;
+}
+
+static void
+virtio_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct virtnet_tx *txvq = dev->data->tx_queues[i];
+ if (txvq == NULL)
+ continue;
+
+ txvq->stats.packets = 0;
+ txvq->stats.bytes = 0;
+ txvq->stats.errors = 0;
+ txvq->stats.multicast = 0;
+ txvq->stats.broadcast = 0;
+ memset(txvq->stats.size_bins, 0,
+ sizeof(txvq->stats.size_bins[0]) * 8);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct virtnet_rx *rxvq = dev->data->rx_queues[i];
+ if (rxvq == NULL)
+ continue;
+
+ rxvq->stats.packets = 0;
+ rxvq->stats.bytes = 0;
+ rxvq->stats.errors = 0;
+ rxvq->stats.multicast = 0;
+ rxvq->stats.broadcast = 0;
+ memset(rxvq->stats.size_bins, 0,
+ sizeof(rxvq->stats.size_bins[0]) * 8);
+ }
+}
+
+static void
+virtio_set_hwaddr(struct virtio_hw *hw)
+{
+ vtpci_write_dev_config(hw,
+ offsetof(struct virtio_net_config, mac),
+ &hw->mac_addr, ETHER_ADDR_LEN);
+}
+
+static void
+virtio_get_hwaddr(struct virtio_hw *hw)
+{
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, mac),
+ &hw->mac_addr, ETHER_ADDR_LEN);
+ } else {
+ eth_random_addr(&hw->mac_addr[0]);
+ virtio_set_hwaddr(hw);
+ }
+}
+
+static int
+virtio_mac_table_set(struct virtio_hw *hw,
+ const struct virtio_net_ctrl_mac *uc,
+ const struct virtio_net_ctrl_mac *mc)
+{
+ struct virtio_pmd_ctrl ctrl;
+ int err, len[2];
+
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ PMD_DRV_LOG(INFO, "host does not support mac table");
+ return -1;
+ }
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
+
+ len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries);
+ memcpy(ctrl.data, uc, len[0]);
+
+ len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries);
+ memcpy(ctrl.data + len[0], mc, len[1]);
+
+ err = virtio_send_command(hw->cvq, &ctrl, len, 2);
+ if (err != 0)
+ PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
+ return err;
+}
+
+static int
+virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq __rte_unused)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ const struct ether_addr *addrs = dev->data->mac_addrs;
+ unsigned int i;
+ struct virtio_net_ctrl_mac *uc, *mc;
+
+ if (index >= VIRTIO_MAX_MAC_ADDRS) {
+ PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
+ return -EINVAL;
+ }
+
+ uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
+ uc->entries = 0;
+ mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
+ mc->entries = 0;
+
+ for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
+ const struct ether_addr *addr
+ = (i == index) ? mac_addr : addrs + i;
+ struct virtio_net_ctrl_mac *tbl
+ = is_multicast_ether_addr(addr) ? mc : uc;
+
+ memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
+ }
+
+ return virtio_mac_table_set(hw, uc, mc);
+}
+
+static void
+virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct ether_addr *addrs = dev->data->mac_addrs;
+ struct virtio_net_ctrl_mac *uc, *mc;
+ unsigned int i;
+
+ if (index >= VIRTIO_MAX_MAC_ADDRS) {
+ PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
+ return;
+ }
+
+ uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
+ uc->entries = 0;
+ mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
+ mc->entries = 0;
+
+ for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
+ struct virtio_net_ctrl_mac *tbl;
+
+ if (i == index || is_zero_ether_addr(addrs + i))
+ continue;
+
+ tbl = is_multicast_ether_addr(addrs + i) ? mc : uc;
+ memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN);
+ }
+
+ virtio_mac_table_set(hw, uc, mc);
+}
+
+static int
+virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);
+
+ /* Use atomic update if available */
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ struct virtio_pmd_ctrl ctrl;
+ int len = ETHER_ADDR_LEN;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
+
+ memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
+ return virtio_send_command(hw->cvq, &ctrl, &len, 1);
+ }
+
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
+ return -ENOTSUP;
+
+ virtio_set_hwaddr(hw);
+ return 0;
+}
+
+static int
+virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtio_pmd_ctrl ctrl;
+ int len;
+
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
+ return -ENOTSUP;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
+ ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
+ memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
+ len = sizeof(vlan_id);
+
+ return virtio_send_command(hw->cvq, &ctrl, &len, 1);
+}
+
+static int
+virtio_intr_enable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (rte_intr_enable(dev->intr_handle) < 0)
+ return -1;
+
+ if (!hw->virtio_user_dev)
+ hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
+
+ return 0;
+}
+
+static int
+virtio_intr_disable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (rte_intr_disable(dev->intr_handle) < 0)
+ return -1;
+
+ if (!hw->virtio_user_dev)
+ hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
+
+ return 0;
+}
+
+static int
+virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
+{
+ uint64_t host_features;
+
+ /* Prepare guest_features: feature that driver wants to support */
+ PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
+ req_features);
+
+ /* Read device(host) feature bits */
+ host_features = VTPCI_OPS(hw)->get_features(hw);
+ PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
+ host_features);
+
+ /* If supported, ensure MTU value is valid before acknowledging it. */
+ if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
+ struct virtio_net_config config;
+
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, mtu),
+ &config.mtu, sizeof(config.mtu));
+
+ if (config.mtu < ETHER_MIN_MTU)
+ req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
+ }
+
+ /*
+ * Negotiate features: Subset of device feature bits are written back
+ * guest feature bits.
+ */
+ hw->guest_features = req_features;
+ hw->guest_features = vtpci_negotiate_features(hw, host_features);
+ PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
+ hw->guest_features);
+
+ if (hw->modern) {
+ if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
+ PMD_INIT_LOG(ERR,
+ "VIRTIO_F_VERSION_1 features is not enabled.");
+ return -1;
+ }
+ vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
+ if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
+ PMD_INIT_LOG(ERR,
+ "failed to set FEATURES_OK status!");
+ return -1;
+ }
+ }
+
+ hw->req_guest_features = req_features;
+
+ return 0;
+}
+
+int
+virtio_dev_pause(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ rte_spinlock_lock(&hw->state_lock);
+
+ if (hw->started == 0) {
+ /* Device is just stopped. */
+ rte_spinlock_unlock(&hw->state_lock);
+ return -1;
+ }
+ hw->started = 0;
+ /*
+ * Prevent the worker threads from touching queues to avoid contention,
+ * 1 ms should be enough for the ongoing Tx function to finish.
+ */
+ rte_delay_ms(1);
+ return 0;
+}
+
+/*
+ * Recover hw state to let the worker threads continue.
+ */
+void
+virtio_dev_resume(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ hw->started = 1;
+ rte_spinlock_unlock(&hw->state_lock);
+}
+
+/*
+ * Should be called only after device is paused.
+ */
+int
+virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
+ int nb_pkts)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtnet_tx *txvq = dev->data->tx_queues[0];
+ int ret;
+
+ hw->inject_pkts = tx_pkts;
+ ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);
+ hw->inject_pkts = NULL;
+
+ return ret;
+}
+
+static void
+virtio_notify_peers(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtnet_rx *rxvq;
+ struct rte_mbuf *rarp_mbuf;
+
+ if (!dev->data->rx_queues)
+ return;
+
+ rxvq = dev->data->rx_queues[0];
+ if (!rxvq)
+ return;
+
+ rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
+ (struct ether_addr *)hw->mac_addr);
+ if (rarp_mbuf == NULL) {
+ PMD_DRV_LOG(ERR, "failed to make RARP packet.");
+ return;
+ }
+
+ /* If virtio port just stopped, no need to send RARP */
+ if (virtio_dev_pause(dev) < 0) {
+ rte_pktmbuf_free(rarp_mbuf);
+ return;
+ }
+
+ virtio_inject_pkts(dev, &rarp_mbuf, 1);
+ virtio_dev_resume(dev);
+}
+
+static void
+virtio_ack_link_announce(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtio_pmd_ctrl ctrl;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_ANNOUNCE;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_ANNOUNCE_ACK;
+
+ virtio_send_command(hw->cvq, &ctrl, NULL, 0);
+}
+
+/*
+ * Process virtio config changed interrupt. Call the callback
+ * if link state changed, generate gratuitous RARP packet if
+ * the status indicates an ANNOUNCE.
+ */
+void
+virtio_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = param;
+ struct virtio_hw *hw = dev->data->dev_private;
+ uint8_t isr;
+
+ /* Read interrupt status which clears interrupt */
+ isr = vtpci_isr(hw);
+ PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
+
+ if (virtio_intr_enable(dev) < 0)
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+
+ if (isr & VIRTIO_PCI_ISR_CONFIG) {
+ if (virtio_dev_link_update(dev, 0) == 0)
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+
+ if (isr & VIRTIO_NET_S_ANNOUNCE) {
+ virtio_notify_peers(dev);
+ if (hw->cvq)
+ virtio_ack_link_announce(dev);
+ }
+}
+
+/* set rx and tx handlers according to what is supported */
+static void
+set_rxtx_funcs(struct rte_eth_dev *eth_dev)
+{
+ struct virtio_hw *hw = eth_dev->data->dev_private;
+
+ if (hw->use_simple_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (hw->use_inorder_rx) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using inorder mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ }
+
+ if (hw->use_inorder_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts;
+ }
+}
+
+/* Only support 1:1 queue/interrupt mapping so far.
+ * TODO: support n:1 queue/interrupt mapping when there are limited number of
+ * interrupt vectors (<N+1).
+ */
+static int
+virtio_queues_bind_intr(struct rte_eth_dev *dev)
+{
+ uint32_t i;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_LOG(INFO, "queue/interrupt binding");
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ dev->intr_handle->intr_vec[i] = i + 1;
+ if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
+ VIRTIO_MSI_NO_VECTOR) {
+ PMD_DRV_LOG(ERR, "failed to set queue vector");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void
+virtio_queues_unbind_intr(struct rte_eth_dev *dev)
+{
+ uint32_t i;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
+ for (i = 0; i < dev->data->nb_rx_queues; ++i)
+ VTPCI_OPS(hw)->set_queue_irq(hw,
+ hw->vqs[i * VTNET_CQ],
+ VIRTIO_MSI_NO_VECTOR);
+}
+
+static int
+virtio_configure_intr(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (!rte_intr_cap_multiple(dev->intr_handle)) {
+ PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
+ return -ENOTSUP;
+ }
+
+ if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) {
+ PMD_INIT_LOG(ERR, "Fail to create eventfd");
+ return -1;
+ }
+
+ if (!dev->intr_handle->intr_vec) {
+ dev->intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ hw->max_queue_pairs * sizeof(int), 0);
+ if (!dev->intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
+ hw->max_queue_pairs);
+ return -ENOMEM;
+ }
+ }
+
+ /* Re-register callback to update max_intr */
+ rte_intr_callback_unregister(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+ rte_intr_callback_register(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+
+ /* DO NOT try to remove this! This function will enable msix, or QEMU
+ * will encounter SIGSEGV when DRIVER_OK is sent.
+ * And for legacy devices, this should be done before queue/vec binding
+ * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
+ * (22) will be ignored.
+ */
+ if (virtio_intr_enable(dev) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return -1;
+ }
+
+ if (virtio_queues_bind_intr(dev) < 0) {
+ PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* reset device and renegotiate features if needed */
+static int
+virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
+{
+ struct virtio_hw *hw = eth_dev->data->dev_private;
+ struct virtio_net_config *config;
+ struct virtio_net_config local_config;
+ struct rte_pci_device *pci_dev = NULL;
+ int ret;
+
+ /* Reset the device although not necessary at startup */
+ vtpci_reset(hw);
+
+ if (hw->vqs) {
+ virtio_dev_free_mbufs(eth_dev);
+ virtio_free_queues(hw);
+ }
+
+ /* Tell the host we've noticed this device. */
+ vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+
+ /* Tell the host we've known how to drive the device. */
+ vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+ if (virtio_negotiate_features(hw, req_features) < 0)
+ return -1;
+
+ if (!hw->virtio_user_dev) {
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ }
+
+ /* If host does not support both status and MSI-X then disable LSC */
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) &&
+ hw->use_msix != VIRTIO_MSIX_NONE)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ else
+ eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+
+ /* Setting up rx_header size for the device */
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
+ hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ else
+ hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
+
+ /* Copy the permanent MAC address to: virtio_hw */
+ virtio_get_hwaddr(hw);
+ ether_addr_copy((struct ether_addr *) hw->mac_addr,
+ &eth_dev->data->mac_addrs[0]);
+ PMD_INIT_LOG(DEBUG,
+ "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
+ hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
+ hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
+
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
+ config = &local_config;
+
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, mac),
+ &config->mac, sizeof(config->mac));
+
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, status),
+ &config->status, sizeof(config->status));
+ } else {
+ PMD_INIT_LOG(DEBUG,
+ "VIRTIO_NET_F_STATUS is not supported");
+ config->status = 0;
+ }
+
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, max_virtqueue_pairs),
+ &config->max_virtqueue_pairs,
+ sizeof(config->max_virtqueue_pairs));
+ } else {
+ PMD_INIT_LOG(DEBUG,
+ "VIRTIO_NET_F_MQ is not supported");
+ config->max_virtqueue_pairs = 1;
+ }
+
+ hw->max_queue_pairs = config->max_virtqueue_pairs;
+
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, mtu),
+ &config->mtu,
+ sizeof(config->mtu));
+
+ /*
+ * MTU value has already been checked at negotiation
+ * time, but check again in case it has changed since
+ * then, which should not happen.
+ */
+ if (config->mtu < ETHER_MIN_MTU) {
+ PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
+ config->mtu);
+ return -1;
+ }
+
+ hw->max_mtu = config->mtu;
+ /* Set initial MTU to maximum one supported by vhost */
+ eth_dev->data->mtu = config->mtu;
+
+ } else {
+ hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
+ VLAN_TAG_LEN - hw->vtnet_hdr_size;
+ }
+
+ PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
+ config->max_virtqueue_pairs);
+ PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
+ PMD_INIT_LOG(DEBUG,
+ "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
+ config->mac[0], config->mac[1],
+ config->mac[2], config->mac[3],
+ config->mac[4], config->mac[5]);
+ } else {
+ PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
+ hw->max_queue_pairs = 1;
+ hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
+ VLAN_TAG_LEN - hw->vtnet_hdr_size;
+ }
+
+ ret = virtio_alloc_queues(eth_dev);
+ if (ret < 0)
+ return ret;
+
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ if (virtio_configure_intr(eth_dev) < 0) {
+ PMD_INIT_LOG(ERR, "failed to configure interrupt");
+ return -1;
+ }
+ }
+
+ vtpci_reinit_complete(hw);
+
+ if (pci_dev)
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ return 0;
+}
+
+/*
+ * Remap the PCI device again (IO port map for legacy device and
+ * memory map for modern device), so that the secondary process
+ * could have the PCI initiated correctly.
+ */
+static int
+virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
+{
+ if (hw->modern) {
+ /*
+ * We don't have to re-parse the PCI config space, since
+ * rte_pci_map_device() makes sure the mapped address
+ * in secondary process would equal to the one mapped in
+ * the primary process: error will be returned if that
+ * requirement is not met.
+ *
+ * That said, we could simply reuse all cap pointers
+ * (such as dev_cfg, common_cfg, etc.) parsed from the
+ * primary process, which is stored in shared memory.
+ */
+ if (rte_pci_map_device(pci_dev)) {
+ PMD_INIT_LOG(DEBUG, "failed to map pci device!");
+ return -1;
+ }
+ } else {
+ if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+virtio_set_vtpci_ops(struct virtio_hw *hw)
+{
+#ifdef RTE_VIRTIO_USER
+ if (hw->virtio_user_dev)
+ VTPCI_OPS(hw) = &virtio_user_ops;
+ else
+#endif
+ if (hw->modern)
+ VTPCI_OPS(hw) = &modern_ops;
+ else
+ VTPCI_OPS(hw) = &legacy_ops;
+}
+
+/*
+ * This function is based on probe() function in virtio_pci.c
+ * It returns 0 on success.
+ */
+int
+eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct virtio_hw *hw = eth_dev->data->dev_private;
+ int ret;
+
+ RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
+
+ eth_dev->dev_ops = &virtio_eth_dev_ops;
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ if (!hw->virtio_user_dev) {
+ ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
+ if (ret)
+ return ret;
+ }
+
+ virtio_set_vtpci_ops(hw);
+ set_rxtx_funcs(eth_dev);
+
+ return 0;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);
+ return -ENOMEM;
+ }
+
+ hw->port_id = eth_dev->data->port_id;
+ /* For virtio_user case the hw->virtio_user_dev is populated by
+ * virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
+ */
+ if (!hw->virtio_user_dev) {
+ ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
+ if (ret)
+ goto out;
+ }
+
+ /* reset device and negotiate default features */
+ ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
+ if (ret < 0)
+ goto out;
+
+ /* Setup interrupt callback */
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ rte_intr_callback_register(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+
+ return 0;
+
+out:
+ rte_free(eth_dev->data->mac_addrs);
+ return ret;
+}
+
+static int
+eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return -EPERM;
+
+ virtio_dev_stop(eth_dev);
+ virtio_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ /* reset interrupt callback */
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ rte_intr_callback_unregister(eth_dev->intr_handle,
+ virtio_interrupt_handler,
+ eth_dev);
+ if (eth_dev->device)
+ rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
+
+ PMD_INIT_LOG(DEBUG, "dev_uninit completed");
+
+ return 0;
+}
+
+static int vdpa_check_handler(__rte_unused const char *key,
+ const char *value, __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+vdpa_mode_selected(struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ const char *key = "vdpa";
+ int ret = 0;
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key))
+ goto exit;
+
+ /* vdpa mode selected when there's a key-value pair: vdpa=1 */
+ if (rte_kvargs_process(kvlist, key,
+ vdpa_check_handler, NULL) < 0) {
+ goto exit;
+ }
+ ret = 1;
+
+exit:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ /* virtio pmd skips probe if device needs to work in vdpa mode */
+ if (vdpa_mode_selected(pci_dev->device.devargs))
+ return 1;
+
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
+ eth_virtio_dev_init);
+}
+
+static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit);
+}
+
+static struct rte_pci_driver rte_virtio_pmd = {
+ .driver = {
+ .name = "net_virtio",
+ },
+ .id_table = pci_id_virtio_map,
+ .drv_flags = 0,
+ .probe = eth_virtio_pci_probe,
+ .remove = eth_virtio_pci_remove,
+};
+
+RTE_INIT(rte_virtio_pmd_init)
+{
+ if (rte_eal_iopl_init() != 0) {
+ PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
+ return;
+ }
+
+ rte_pci_register(&rte_virtio_pmd);
+}
+
+static bool
+rx_offload_enabled(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
+}
+
+static bool
+tx_offload_enabled(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
+}
+
+/*
+ * Configure virtio device
+ * It returns 0 on success.
+ */
+static int
+virtio_dev_configure(struct rte_eth_dev *dev)
+{
+ const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
+ struct virtio_hw *hw = dev->data->dev_private;
+ uint64_t rx_offloads = rxmode->offloads;
+ uint64_t tx_offloads = txmode->offloads;
+ uint64_t req_features;
+ int ret;
+
+ PMD_INIT_LOG(DEBUG, "configure");
+ req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
+
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ ret = virtio_init_device(dev, hw->req_guest_features);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM))
+ req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
+
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ req_features |=
+ (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6);
+
+ if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM))
+ req_features |= (1ULL << VIRTIO_NET_F_CSUM);
+
+ if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
+ req_features |=
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6);
+
+ /* if request features changed, reinit the device */
+ if (req_features != hw->req_guest_features) {
+ ret = virtio_init_device(dev, req_features);
+ if (ret < 0)
+ return ret;
+ }
+
+ if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM)) &&
+ !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
+ PMD_DRV_LOG(ERR,
+ "rx checksum not available on this host");
+ return -ENOTSUP;
+ }
+
+ if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
+ (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
+ PMD_DRV_LOG(ERR,
+ "Large Receive Offload not available on this host");
+ return -ENOTSUP;
+ }
+
+ /* start control queue */
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
+ virtio_dev_cq_start(dev);
+
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ hw->vlan_strip = 1;
+
+ if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
+ PMD_DRV_LOG(ERR,
+ "vlan filtering not available on this host");
+ return -ENOTSUP;
+ }
+
+ hw->has_tx_offload = tx_offload_enabled(hw);
+ hw->has_rx_offload = rx_offload_enabled(hw);
+
+ if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ /* Enable vector (0) for Link State Intrerrupt */
+ if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
+ VIRTIO_MSI_NO_VECTOR) {
+ PMD_DRV_LOG(ERR, "failed to set config vector");
+ return -EBUSY;
+ }
+
+ rte_spinlock_init(&hw->state_lock);
+
+ hw->use_simple_rx = 1;
+
+ if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
+ hw->use_inorder_tx = 1;
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ hw->use_inorder_rx = 1;
+ hw->use_simple_rx = 0;
+ } else {
+ hw->use_inorder_rx = 0;
+ }
+ }
+
+#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+ hw->use_simple_rx = 0;
+ }
+#endif
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ hw->use_simple_rx = 0;
+ }
+
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_VLAN_STRIP))
+ hw->use_simple_rx = 0;
+
+ return 0;
+}
+
+
+static int
+virtio_dev_start(struct rte_eth_dev *dev)
+{
+ uint16_t nb_queues, i;
+ struct virtnet_rx *rxvq;
+ struct virtnet_tx *txvq __rte_unused;
+ struct virtio_hw *hw = dev->data->dev_private;
+ int ret;
+
+ /* Finish the initialization of the queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ ret = virtio_dev_rx_queue_setup_finish(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ ret = virtio_dev_tx_queue_setup_finish(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* check if lsc interrupt feature is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
+ PMD_DRV_LOG(ERR, "link status not supported by host");
+ return -ENOTSUP;
+ }
+ }
+
+ /* Enable uio/vfio intr/eventfd mapping: althrough we already did that
+ * in device configure, but it could be unmapped when device is
+ * stopped.
+ */
+ if (dev->data->dev_conf.intr_conf.lsc ||
+ dev->data->dev_conf.intr_conf.rxq) {
+ virtio_intr_disable(dev);
+
+ if (virtio_intr_enable(dev) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return -EIO;
+ }
+ }
+
+ /*Notify the backend
+ *Otherwise the tap backend might already stop its queue due to fullness.
+ *vhost backend will have no chance to be waked up
+ */
+ nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
+ if (hw->max_queue_pairs > 1) {
+ if (virtio_set_multiple_queues(dev, nb_queues) != 0)
+ return -EINVAL;
+ }
+
+ PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxvq = dev->data->rx_queues[i];
+ /* Flush the old packets */
+ virtqueue_rxvq_flush(rxvq->vq);
+ virtqueue_notify(rxvq->vq);
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txvq = dev->data->tx_queues[i];
+ virtqueue_notify(txvq->vq);
+ }
+
+ PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxvq = dev->data->rx_queues[i];
+ VIRTQUEUE_DUMP(rxvq->vq);
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txvq = dev->data->tx_queues[i];
+ VIRTQUEUE_DUMP(txvq->vq);
+ }
+
+ set_rxtx_funcs(dev);
+ hw->started = 1;
+
+ /* Initialize Link state */
+ virtio_dev_link_update(dev, 0);
+
+ return 0;
+}
+
+static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ uint16_t nr_vq = virtio_get_nr_vq(hw);
+ const char *type __rte_unused;
+ unsigned int i, mbuf_num = 0;
+ struct virtqueue *vq;
+ struct rte_mbuf *buf;
+ int queue_type;
+
+ if (hw->vqs == NULL)
+ return;
+
+ for (i = 0; i < nr_vq; i++) {
+ vq = hw->vqs[i];
+ if (!vq)
+ continue;
+
+ queue_type = virtio_get_queue_type(hw, i);
+ if (queue_type == VTNET_RQ)
+ type = "rxq";
+ else if (queue_type == VTNET_TQ)
+ type = "txq";
+ else
+ continue;
+
+ PMD_INIT_LOG(DEBUG,
+ "Before freeing %s[%d] used and unused buf",
+ type, i);
+ VIRTQUEUE_DUMP(vq);
+
+ while ((buf = virtqueue_detach_unused(vq)) != NULL) {
+ rte_pktmbuf_free(buf);
+ mbuf_num++;
+ }
+
+ PMD_INIT_LOG(DEBUG,
+ "After freeing %s[%d] used and unused buf",
+ type, i);
+ VIRTQUEUE_DUMP(vq);
+ }
+
+ PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
+}
+
+/*
+ * Stop device: disable interrupt and mark link down
+ */
+static void
+virtio_dev_stop(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct rte_eth_link link;
+ struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+
+ PMD_INIT_LOG(DEBUG, "stop");
+
+ rte_spinlock_lock(&hw->state_lock);
+ if (intr_conf->lsc || intr_conf->rxq)
+ virtio_intr_disable(dev);
+
+ hw->started = 0;
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(dev, &link);
+ rte_spinlock_unlock(&hw->state_lock);
+}
+
+static int
+virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
+{
+ struct rte_eth_link link;
+ uint16_t status;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ memset(&link, 0, sizeof(link));
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
+
+ if (hw->started == 0) {
+ link.link_status = ETH_LINK_DOWN;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ PMD_INIT_LOG(DEBUG, "Get link status from hw");
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, status),
+ &status, sizeof(status));
+ if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
+ link.link_status = ETH_LINK_DOWN;
+ PMD_INIT_LOG(DEBUG, "Port %d is down",
+ dev->data->port_id);
+ } else {
+ link.link_status = ETH_LINK_UP;
+ PMD_INIT_LOG(DEBUG, "Port %d is up",
+ dev->data->port_id);
+ }
+ } else {
+ link.link_status = ETH_LINK_UP;
+ }
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct virtio_hw *hw = dev->data->dev_private;
+ uint64_t offloads = rxmode->offloads;
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+ !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
+
+ PMD_DRV_LOG(NOTICE,
+ "vlan filtering not available on this host");
+
+ return -ENOTSUP;
+ }
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK)
+ hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+
+ return 0;
+}
+
+static void
+virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ uint64_t tso_mask, host_features;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
+
+ dev_info->max_rx_queues =
+ RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
+ dev_info->max_tx_queues =
+ RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
+ dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
+ dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
+ dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
+
+ host_features = VTPCI_OPS(hw)->get_features(hw);
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_CRC_STRIP;
+ if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
+ dev_info->rx_offload_capa |=
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM;
+ }
+ if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6);
+ if ((host_features & tso_mask) == tso_mask)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT;
+ if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
+ dev_info->tx_offload_capa |=
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+ }
+ tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6);
+ if ((host_features & tso_mask) == tso_mask)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+}
+
+/*
+ * It enables testpmd to collect per queue stats.
+ */
+static int
+virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
+__rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
+__rte_unused uint8_t is_rx)
+{
+ return 0;
+}
+
+RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
+RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(virtio_init_log)
+{
+ virtio_logtype_init = rte_log_register("pmd.net.virtio.init");
+ if (virtio_logtype_init >= 0)
+ rte_log_set_level(virtio_logtype_init, RTE_LOG_NOTICE);
+ virtio_logtype_driver = rte_log_register("pmd.net.virtio.driver");
+ if (virtio_logtype_driver >= 0)
+ rte_log_set_level(virtio_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_ethdev.h b/src/spdk/dpdk/drivers/net/virtio/virtio_ethdev.h
new file mode 100644
index 00000000..b726ad10
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_ethdev.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef _VIRTIO_ETHDEV_H_
+#define _VIRTIO_ETHDEV_H_
+
+#include <stdint.h>
+
+#include "virtio_pci.h"
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define VIRTIO_MAX_RX_QUEUES 128U
+#define VIRTIO_MAX_TX_QUEUES 128U
+#define VIRTIO_MAX_MAC_ADDRS 64
+#define VIRTIO_MIN_RX_BUFSIZE 64
+#define VIRTIO_MAX_RX_PKTLEN 9728U
+
+/* Features desired/implemented by this driver. */
+#define VIRTIO_PMD_DEFAULT_GUEST_FEATURES \
+ (1u << VIRTIO_NET_F_MAC | \
+ 1u << VIRTIO_NET_F_STATUS | \
+ 1u << VIRTIO_NET_F_MQ | \
+ 1u << VIRTIO_NET_F_CTRL_MAC_ADDR | \
+ 1u << VIRTIO_NET_F_CTRL_VQ | \
+ 1u << VIRTIO_NET_F_CTRL_RX | \
+ 1u << VIRTIO_NET_F_CTRL_VLAN | \
+ 1u << VIRTIO_NET_F_MRG_RXBUF | \
+ 1u << VIRTIO_NET_F_MTU | \
+ 1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE | \
+ 1u << VIRTIO_RING_F_INDIRECT_DESC | \
+ 1ULL << VIRTIO_F_VERSION_1 | \
+ 1ULL << VIRTIO_F_IN_ORDER | \
+ 1ULL << VIRTIO_F_IOMMU_PLATFORM)
+
+#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
+ (VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
+ 1u << VIRTIO_NET_F_GUEST_CSUM | \
+ 1u << VIRTIO_NET_F_GUEST_TSO4 | \
+ 1u << VIRTIO_NET_F_GUEST_TSO6)
+
+/*
+ * CQ function prototype
+ */
+void virtio_dev_cq_start(struct rte_eth_dev *dev);
+
+/*
+ * RX/TX function prototypes
+ */
+
+int virtio_dev_rx_queue_done(void *rxq, uint16_t offset);
+
+int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+int virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+
+int virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id);
+
+uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+int eth_virtio_dev_init(struct rte_eth_dev *eth_dev);
+
+void virtio_interrupt_handler(void *param);
+
+int virtio_dev_pause(struct rte_eth_dev *dev);
+void virtio_dev_resume(struct rte_eth_dev *dev);
+int virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
+ int nb_pkts);
+
+#endif /* _VIRTIO_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_logs.h b/src/spdk/dpdk/drivers/net/virtio/virtio_logs.h
new file mode 100644
index 00000000..9b1b1def
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_logs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _VIRTIO_LOGS_H_
+#define _VIRTIO_LOGS_H_
+
+#include <rte_log.h>
+
+extern int virtio_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() rx: " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() tx: " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+extern int virtio_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#endif /* _VIRTIO_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_pci.c b/src/spdk/dpdk/drivers/net/virtio/virtio_pci.c
new file mode 100644
index 00000000..6bd22e54
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_pci.c
@@ -0,0 +1,722 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+#include <stdint.h>
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ #include <dirent.h>
+ #include <fcntl.h>
+#endif
+
+#include <rte_io.h>
+#include <rte_bus.h>
+
+#include "virtio_pci.h"
+#include "virtio_logs.h"
+#include "virtqueue.h"
+
+/*
+ * Following macros are derived from linux/pci_regs.h, however,
+ * we can't simply include that header here, as there is no such
+ * file for non-Linux platform.
+ */
+#define PCI_CAPABILITY_LIST 0x34
+#define PCI_CAP_ID_VNDR 0x09
+#define PCI_CAP_ID_MSIX 0x11
+
+/*
+ * The remaining space is defined by each driver as the per-driver
+ * configuration space.
+ */
+#define VIRTIO_PCI_CONFIG(hw) \
+ (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
+
+static inline int
+check_vq_phys_addr_ok(struct virtqueue *vq)
+{
+ /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
+ (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Since we are in legacy mode:
+ * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf
+ *
+ * "Note that this is possible because while the virtio header is PCI (i.e.
+ * little) endian, the device-specific region is encoded in the native endian of
+ * the guest (where such distinction is applicable)."
+ *
+ * For powerpc which supports both, qemu supposes that cpu is big endian and
+ * enforces this for the virtio-net stuff.
+ */
+static void
+legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
+ void *dst, int length)
+{
+#ifdef RTE_ARCH_PPC_64
+ int size;
+
+ while (length > 0) {
+ if (length >= 4) {
+ size = 4;
+ rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+ *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
+ } else if (length >= 2) {
+ size = 2;
+ rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+ *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
+ } else {
+ size = 1;
+ rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+ }
+
+ dst = (char *)dst + size;
+ offset += size;
+ length -= size;
+ }
+#else
+ rte_pci_ioport_read(VTPCI_IO(hw), dst, length,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+#endif
+}
+
+static void
+legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
+ const void *src, int length)
+{
+#ifdef RTE_ARCH_PPC_64
+ union {
+ uint32_t u32;
+ uint16_t u16;
+ } tmp;
+ int size;
+
+ while (length > 0) {
+ if (length >= 4) {
+ size = 4;
+ tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src);
+ rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+ } else if (length >= 2) {
+ size = 2;
+ tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src);
+ rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+ } else {
+ size = 1;
+ rte_pci_ioport_write(VTPCI_IO(hw), src, size,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+ }
+
+ src = (const char *)src + size;
+ offset += size;
+ length -= size;
+ }
+#else
+ rte_pci_ioport_write(VTPCI_IO(hw), src, length,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+#endif
+}
+
+static uint64_t
+legacy_get_features(struct virtio_hw *hw)
+{
+ uint32_t dst;
+
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES);
+ return dst;
+}
+
+static void
+legacy_set_features(struct virtio_hw *hw, uint64_t features)
+{
+ if ((features >> 32) != 0) {
+ PMD_DRV_LOG(ERR,
+ "only 32 bit features are allowed for legacy virtio!");
+ return;
+ }
+ rte_pci_ioport_write(VTPCI_IO(hw), &features, 4,
+ VIRTIO_PCI_GUEST_FEATURES);
+}
+
+static uint8_t
+legacy_get_status(struct virtio_hw *hw)
+{
+ uint8_t dst;
+
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
+ return dst;
+}
+
+static void
+legacy_set_status(struct virtio_hw *hw, uint8_t status)
+{
+ rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
+}
+
+static void
+legacy_reset(struct virtio_hw *hw)
+{
+ legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+}
+
+static uint8_t
+legacy_get_isr(struct virtio_hw *hw)
+{
+ uint8_t dst;
+
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
+ return dst;
+}
+
+/* Enable one vector (0) for Link State Intrerrupt */
+static uint16_t
+legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
+{
+ uint16_t dst;
+
+ rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
+ return dst;
+}
+
+static uint16_t
+legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
+{
+ uint16_t dst;
+
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ VIRTIO_PCI_QUEUE_SEL);
+ rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
+ return dst;
+}
+
+static uint16_t
+legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
+{
+ uint16_t dst;
+
+ rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
+ return dst;
+}
+
+static int
+legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ uint32_t src;
+
+ if (!check_vq_phys_addr_ok(vq))
+ return -1;
+
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ VIRTIO_PCI_QUEUE_SEL);
+ src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
+ rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
+
+ return 0;
+}
+
+static void
+legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ uint32_t src = 0;
+
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ VIRTIO_PCI_QUEUE_SEL);
+ rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
+}
+
+static void
+legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ VIRTIO_PCI_QUEUE_NOTIFY);
+}
+
+const struct virtio_pci_ops legacy_ops = {
+ .read_dev_cfg = legacy_read_dev_config,
+ .write_dev_cfg = legacy_write_dev_config,
+ .reset = legacy_reset,
+ .get_status = legacy_get_status,
+ .set_status = legacy_set_status,
+ .get_features = legacy_get_features,
+ .set_features = legacy_set_features,
+ .get_isr = legacy_get_isr,
+ .set_config_irq = legacy_set_config_irq,
+ .set_queue_irq = legacy_set_queue_irq,
+ .get_queue_num = legacy_get_queue_num,
+ .setup_queue = legacy_setup_queue,
+ .del_queue = legacy_del_queue,
+ .notify_queue = legacy_notify_queue,
+};
+
+static inline void
+io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
+{
+ rte_write32(val & ((1ULL << 32) - 1), lo);
+ rte_write32(val >> 32, hi);
+}
+
+static void
+modern_read_dev_config(struct virtio_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ int i;
+ uint8_t *p;
+ uint8_t old_gen, new_gen;
+
+ do {
+ old_gen = rte_read8(&hw->common_cfg->config_generation);
+
+ p = dst;
+ for (i = 0; i < length; i++)
+ *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
+
+ new_gen = rte_read8(&hw->common_cfg->config_generation);
+ } while (old_gen != new_gen);
+}
+
+static void
+modern_write_dev_config(struct virtio_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ int i;
+ const uint8_t *p = src;
+
+ for (i = 0; i < length; i++)
+ rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
+}
+
+static uint64_t
+modern_get_features(struct virtio_hw *hw)
+{
+ uint32_t features_lo, features_hi;
+
+ rte_write32(0, &hw->common_cfg->device_feature_select);
+ features_lo = rte_read32(&hw->common_cfg->device_feature);
+
+ rte_write32(1, &hw->common_cfg->device_feature_select);
+ features_hi = rte_read32(&hw->common_cfg->device_feature);
+
+ return ((uint64_t)features_hi << 32) | features_lo;
+}
+
+static void
+modern_set_features(struct virtio_hw *hw, uint64_t features)
+{
+ rte_write32(0, &hw->common_cfg->guest_feature_select);
+ rte_write32(features & ((1ULL << 32) - 1),
+ &hw->common_cfg->guest_feature);
+
+ rte_write32(1, &hw->common_cfg->guest_feature_select);
+ rte_write32(features >> 32,
+ &hw->common_cfg->guest_feature);
+}
+
+static uint8_t
+modern_get_status(struct virtio_hw *hw)
+{
+ return rte_read8(&hw->common_cfg->device_status);
+}
+
+static void
+modern_set_status(struct virtio_hw *hw, uint8_t status)
+{
+ rte_write8(status, &hw->common_cfg->device_status);
+}
+
+static void
+modern_reset(struct virtio_hw *hw)
+{
+ modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ modern_get_status(hw);
+}
+
+static uint8_t
+modern_get_isr(struct virtio_hw *hw)
+{
+ return rte_read8(hw->isr);
+}
+
+static uint16_t
+modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
+{
+ rte_write16(vec, &hw->common_cfg->msix_config);
+ return rte_read16(&hw->common_cfg->msix_config);
+}
+
+static uint16_t
+modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vec, &hw->common_cfg->queue_msix_vector);
+ return rte_read16(&hw->common_cfg->queue_msix_vector);
+}
+
+static uint16_t
+modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
+{
+ rte_write16(queue_id, &hw->common_cfg->queue_select);
+ return rte_read16(&hw->common_cfg->queue_size);
+}
+
+static int
+modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ uint64_t desc_addr, avail_addr, used_addr;
+ uint16_t notify_off;
+
+ if (!check_vq_phys_addr_ok(vq))
+ return -1;
+
+ desc_addr = vq->vq_ring_mem;
+ avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
+ ring[vq->vq_nentries]),
+ VIRTIO_PCI_VRING_ALIGN);
+
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
+ vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
+ notify_off * hw->notify_off_multiplier);
+
+ rte_write16(1, &hw->common_cfg->queue_enable);
+
+ PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index);
+ PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr);
+ PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr);
+ PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr);
+ PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)",
+ vq->notify_addr, notify_off);
+
+ return 0;
+}
+
+static void
+modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ rte_write16(0, &hw->common_cfg->queue_enable);
+}
+
+static void
+modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, vq->notify_addr);
+}
+
+const struct virtio_pci_ops modern_ops = {
+ .read_dev_cfg = modern_read_dev_config,
+ .write_dev_cfg = modern_write_dev_config,
+ .reset = modern_reset,
+ .get_status = modern_get_status,
+ .set_status = modern_set_status,
+ .get_features = modern_get_features,
+ .set_features = modern_set_features,
+ .get_isr = modern_get_isr,
+ .set_config_irq = modern_set_config_irq,
+ .set_queue_irq = modern_set_queue_irq,
+ .get_queue_num = modern_get_queue_num,
+ .setup_queue = modern_setup_queue,
+ .del_queue = modern_del_queue,
+ .notify_queue = modern_notify_queue,
+};
+
+
+void
+vtpci_read_dev_config(struct virtio_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
+}
+
+void
+vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
+}
+
+uint64_t
+vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
+{
+ uint64_t features;
+
+ /*
+ * Limit negotiated features to what the driver, virtqueue, and
+ * host all support.
+ */
+ features = host_features & hw->guest_features;
+ VTPCI_OPS(hw)->set_features(hw, features);
+
+ return features;
+}
+
+void
+vtpci_reset(struct virtio_hw *hw)
+{
+ VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ /* flush status write */
+ VTPCI_OPS(hw)->get_status(hw);
+}
+
+void
+vtpci_reinit_complete(struct virtio_hw *hw)
+{
+ vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+}
+
+void
+vtpci_set_status(struct virtio_hw *hw, uint8_t status)
+{
+ if (status != VIRTIO_CONFIG_STATUS_RESET)
+ status |= VTPCI_OPS(hw)->get_status(hw);
+
+ VTPCI_OPS(hw)->set_status(hw, status);
+}
+
+uint8_t
+vtpci_get_status(struct virtio_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_status(hw);
+}
+
+uint8_t
+vtpci_isr(struct virtio_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_isr(hw);
+}
+
+static void *
+get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
+{
+ uint8_t bar = cap->bar;
+ uint32_t length = cap->length;
+ uint32_t offset = cap->offset;
+ uint8_t *base;
+
+ if (bar >= PCI_MAX_RESOURCE) {
+ PMD_INIT_LOG(ERR, "invalid bar: %u", bar);
+ return NULL;
+ }
+
+ if (offset + length < offset) {
+ PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows",
+ offset, length);
+ return NULL;
+ }
+
+ if (offset + length > dev->mem_resource[bar].len) {
+ PMD_INIT_LOG(ERR,
+ "invalid cap: overflows bar space: %u > %" PRIu64,
+ offset + length, dev->mem_resource[bar].len);
+ return NULL;
+ }
+
+ base = dev->mem_resource[bar].addr;
+ if (base == NULL) {
+ PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar);
+ return NULL;
+ }
+
+ return base + offset;
+}
+
+#define PCI_MSIX_ENABLE 0x8000
+
+static int
+virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
+{
+ uint8_t pos;
+ struct virtio_pci_cap cap;
+ int ret;
+
+ if (rte_pci_map_device(dev)) {
+ PMD_INIT_LOG(DEBUG, "failed to map pci device!");
+ return -1;
+ }
+
+ ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
+ return -1;
+ }
+
+ while (pos) {
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR,
+ "failed to read pci cap at pos: %x", pos);
+ break;
+ }
+
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
+ /* Transitional devices would also have this capability,
+ * that's why we also check if msix is enabled.
+ * 1st byte is cap ID; 2nd byte is the position of next
+ * cap; next two bytes are the flags.
+ */
+ uint16_t flags = ((uint16_t *)&cap)[1];
+
+ if (flags & PCI_MSIX_ENABLE)
+ hw->use_msix = VIRTIO_MSIX_ENABLED;
+ else
+ hw->use_msix = VIRTIO_MSIX_DISABLED;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
+ PMD_INIT_LOG(DEBUG,
+ "[%2x] skipping non VNDR cap id: %02x",
+ pos, cap.cap_vndr);
+ goto next;
+ }
+
+ PMD_INIT_LOG(DEBUG,
+ "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
+ pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
+
+ switch (cap.cfg_type) {
+ case VIRTIO_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_NOTIFY_CFG:
+ rte_pci_read_config(dev, &hw->notify_off_multiplier,
+ 4, pos + sizeof(cap));
+ hw->notify_base = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_DEVICE_CFG:
+ hw->dev_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_ISR_CFG:
+ hw->isr = get_cfg_addr(dev, &cap);
+ break;
+ }
+
+next:
+ pos = cap.cap_next;
+ }
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->dev_cfg == NULL || hw->isr == NULL) {
+ PMD_INIT_LOG(INFO, "no modern virtio pci device found.");
+ return -1;
+ }
+
+ PMD_INIT_LOG(INFO, "found modern virtio pci device.");
+
+ PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", hw->common_cfg);
+ PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", hw->dev_cfg);
+ PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", hw->isr);
+ PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u",
+ hw->notify_base, hw->notify_off_multiplier);
+
+ return 0;
+}
+
+/*
+ * Return -1:
+ * if there is error mapping with VFIO/UIO.
+ * if port map error when driver type is KDRV_NONE.
+ * if whitelisted but driver type is KDRV_UNKNOWN.
+ * Return 1 if kernel driver is managing the device.
+ * Return 0 on success.
+ */
+int
+vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
+{
+ /*
+ * Try if we can succeed reading virtio pci caps, which exists
+ * only on modern pci device. If failed, we fallback to legacy
+ * virtio handling.
+ */
+ if (virtio_read_caps(dev, hw) == 0) {
+ PMD_INIT_LOG(INFO, "modern virtio pci detected.");
+ virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops;
+ hw->modern = 1;
+ return 0;
+ }
+
+ PMD_INIT_LOG(INFO, "trying with legacy virtio pci.");
+ if (rte_pci_ioport_map(dev, 0, VTPCI_IO(hw)) < 0) {
+ if (dev->kdrv == RTE_KDRV_UNKNOWN &&
+ (!dev->device.devargs ||
+ dev->device.devargs->bus !=
+ rte_bus_find_by_name("pci"))) {
+ PMD_INIT_LOG(INFO,
+ "skip kernel managed virtio device.");
+ return 1;
+ }
+ return -1;
+ }
+
+ virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops;
+ hw->modern = 0;
+
+ return 0;
+}
+
+enum virtio_msix_status
+vtpci_msix_detect(struct rte_pci_device *dev)
+{
+ uint8_t pos;
+ struct virtio_pci_cap cap;
+ int ret;
+
+ ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
+ return VIRTIO_MSIX_NONE;
+ }
+
+ while (pos) {
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR,
+ "failed to read pci cap at pos: %x", pos);
+ break;
+ }
+
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
+ uint16_t flags = ((uint16_t *)&cap)[1];
+
+ if (flags & PCI_MSIX_ENABLE)
+ return VIRTIO_MSIX_ENABLED;
+ else
+ return VIRTIO_MSIX_DISABLED;
+ }
+
+ pos = cap.cap_next;
+ }
+
+ return VIRTIO_MSIX_NONE;
+}
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_pci.h b/src/spdk/dpdk/drivers/net/virtio/virtio_pci.h
new file mode 100644
index 00000000..58fdd3d4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_pci.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _VIRTIO_PCI_H_
+#define _VIRTIO_PCI_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_ethdev_driver.h>
+
+struct virtqueue;
+struct virtnet_ctl;
+
+/* VirtIO PCI vendor/device ID. */
+#define VIRTIO_PCI_VENDORID 0x1AF4
+#define VIRTIO_PCI_LEGACY_DEVICEID_NET 0x1000
+#define VIRTIO_PCI_MODERN_DEVICEID_NET 0x1041
+
+/* VirtIO ABI version, this must match exactly. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+/*
+ * VirtIO Header, located in BAR 0.
+ */
+#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/
+#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */
+#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */
+#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */
+#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */
+#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */
+#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */
+#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading
+ * also clears the register (8, RO) */
+/* Only if MSIX is enabled: */
+#define VIRTIO_MSI_CONFIG_VECTOR 20 /* configuration change vector (16, RW) */
+#define VIRTIO_MSI_QUEUE_VECTOR 22 /* vector for selected VQ notifications
+ (16, RW) */
+
+/* The bit of the ISR which indicates a device has an interrupt. */
+#define VIRTIO_PCI_ISR_INTR 0x1
+/* The bit of the ISR which indicates a device configuration change. */
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* Vector value used to disable MSI for queue. */
+#define VIRTIO_MSI_NO_VECTOR 0xFFFF
+
+/* VirtIO device IDs. */
+#define VIRTIO_ID_NETWORK 0x01
+#define VIRTIO_ID_BLOCK 0x02
+#define VIRTIO_ID_CONSOLE 0x03
+#define VIRTIO_ID_ENTROPY 0x04
+#define VIRTIO_ID_BALLOON 0x05
+#define VIRTIO_ID_IOMEMORY 0x06
+#define VIRTIO_ID_9P 0x09
+
+/* Status byte for guest to report progress. */
+#define VIRTIO_CONFIG_STATUS_RESET 0x00
+#define VIRTIO_CONFIG_STATUS_ACK 0x01
+#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
+#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
+#define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08
+#define VIRTIO_CONFIG_STATUS_FAILED 0x80
+
+/*
+ * Each virtqueue indirect descriptor list must be physically contiguous.
+ * To allow us to malloc(9) each list individually, limit the number
+ * supported to what will fit in one page. With 4KB pages, this is a limit
+ * of 256 descriptors. If there is ever a need for more, we can switch to
+ * contigmalloc(9) for the larger allocations, similar to what
+ * bus_dmamem_alloc(9) does.
+ *
+ * Note the sizeof(struct vring_desc) is 16 bytes.
+ */
+#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
+
+/* The feature bitmap for virtio net */
+#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
+#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
+#define VIRTIO_NET_F_MTU 3 /* Initial MTU advice. */
+#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
+#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
+#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
+#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */
+#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
+#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
+#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
+#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
+#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
+#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */
+#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
+#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
+#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
+#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
+#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the
+ * network */
+#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
+ * Steering */
+#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
+
+/* Do we get callbacks when the ring is completely used, even if we've
+ * suppressed them? */
+#define VIRTIO_F_NOTIFY_ON_EMPTY 24
+
+/* Can the device handle any descriptor layout? */
+#define VIRTIO_F_ANY_LAYOUT 27
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+#define VIRTIO_F_VERSION_1 32
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+/*
+ * Some VirtIO feature bits (currently bits 28 through 31) are
+ * reserved for the transport being used (eg. virtio_ring), the
+ * rest are per-device feature bits.
+ */
+#define VIRTIO_TRANSPORT_F_START 28
+#define VIRTIO_TRANSPORT_F_END 34
+
+/*
+ * Inorder feature indicates that all buffers are used by the device
+ * in the same order in which they have been made available.
+ */
+#define VIRTIO_F_IN_ORDER 35
+
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field. */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field. */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
+#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
+
+/*
+ * Maximum number of virtqueues per device.
+ */
+#define VIRTIO_MAX_VIRTQUEUE_PAIRS 8
+#define VIRTIO_MAX_VIRTQUEUES (VIRTIO_MAX_VIRTQUEUE_PAIRS * 2 + 1)
+
+/* Common configuration */
+#define VIRTIO_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
+/* ISR Status */
+#define VIRTIO_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define VIRTIO_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define VIRTIO_PCI_CAP_PCI_CFG 5
+
+/* This is the PCI capability header: */
+struct virtio_pci_cap {
+ uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ uint8_t cap_next; /* Generic PCI field: next ptr. */
+ uint8_t cap_len; /* Generic PCI field: capability length */
+ uint8_t cfg_type; /* Identifies the structure. */
+ uint8_t bar; /* Where to find it. */
+ uint8_t padding[3]; /* Pad to full dword. */
+ uint32_t offset; /* Offset within bar. */
+ uint32_t length; /* Length of the structure, in bytes. */
+};
+
+struct virtio_pci_notify_cap {
+ struct virtio_pci_cap cap;
+ uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
+struct virtio_pci_common_cfg {
+ /* About the whole device. */
+ uint32_t device_feature_select; /* read-write */
+ uint32_t device_feature; /* read-only */
+ uint32_t guest_feature_select; /* read-write */
+ uint32_t guest_feature; /* read-write */
+ uint16_t msix_config; /* read-write */
+ uint16_t num_queues; /* read-only */
+ uint8_t device_status; /* read-write */
+ uint8_t config_generation; /* read-only */
+
+ /* About a specific virtqueue. */
+ uint16_t queue_select; /* read-write */
+ uint16_t queue_size; /* read-write, power of 2. */
+ uint16_t queue_msix_vector; /* read-write */
+ uint16_t queue_enable; /* read-write */
+ uint16_t queue_notify_off; /* read-only */
+ uint32_t queue_desc_lo; /* read-write */
+ uint32_t queue_desc_hi; /* read-write */
+ uint32_t queue_avail_lo; /* read-write */
+ uint32_t queue_avail_hi; /* read-write */
+ uint32_t queue_used_lo; /* read-write */
+ uint32_t queue_used_hi; /* read-write */
+};
+
+struct virtio_hw;
+
+struct virtio_pci_ops {
+ void (*read_dev_cfg)(struct virtio_hw *hw, size_t offset,
+ void *dst, int len);
+ void (*write_dev_cfg)(struct virtio_hw *hw, size_t offset,
+ const void *src, int len);
+ void (*reset)(struct virtio_hw *hw);
+
+ uint8_t (*get_status)(struct virtio_hw *hw);
+ void (*set_status)(struct virtio_hw *hw, uint8_t status);
+
+ uint64_t (*get_features)(struct virtio_hw *hw);
+ void (*set_features)(struct virtio_hw *hw, uint64_t features);
+
+ uint8_t (*get_isr)(struct virtio_hw *hw);
+
+ uint16_t (*set_config_irq)(struct virtio_hw *hw, uint16_t vec);
+
+ uint16_t (*set_queue_irq)(struct virtio_hw *hw, struct virtqueue *vq,
+ uint16_t vec);
+
+ uint16_t (*get_queue_num)(struct virtio_hw *hw, uint16_t queue_id);
+ int (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq);
+ void (*del_queue)(struct virtio_hw *hw, struct virtqueue *vq);
+ void (*notify_queue)(struct virtio_hw *hw, struct virtqueue *vq);
+};
+
+struct virtio_net_config;
+
+struct virtio_hw {
+ struct virtnet_ctl *cvq;
+ uint64_t req_guest_features;
+ uint64_t guest_features;
+ uint32_t max_queue_pairs;
+ uint16_t started;
+ uint16_t max_mtu;
+ uint16_t vtnet_hdr_size;
+ uint8_t vlan_strip;
+ uint8_t use_msix;
+ uint8_t modern;
+ uint8_t use_simple_rx;
+ uint8_t use_inorder_rx;
+ uint8_t use_inorder_tx;
+ bool has_tx_offload;
+ bool has_rx_offload;
+ uint16_t port_id;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint32_t notify_off_multiplier;
+ uint8_t *isr;
+ uint16_t *notify_base;
+ struct virtio_pci_common_cfg *common_cfg;
+ struct virtio_net_config *dev_cfg;
+ void *virtio_user_dev;
+ /*
+ * App management thread and virtio interrupt handler thread
+ * both can change device state, this lock is meant to avoid
+ * such a contention.
+ */
+ rte_spinlock_t state_lock;
+ struct rte_mbuf **inject_pkts;
+
+ struct virtqueue **vqs;
+};
+
+
+/*
+ * While virtio_hw is stored in shared memory, this structure stores
+ * some infos that may vary in the multiple process model locally.
+ * For example, the vtpci_ops pointer.
+ */
+struct virtio_hw_internal {
+ const struct virtio_pci_ops *vtpci_ops;
+ struct rte_pci_ioport io;
+};
+
+#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->port_id].vtpci_ops)
+#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->port_id].io)
+
+extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
+
+
+/*
+ * This structure is just a reference to read
+ * net device specific config space; it just a chodu structure
+ *
+ */
+struct virtio_net_config {
+ /* The config defining mac address (if VIRTIO_NET_F_MAC) */
+ uint8_t mac[ETHER_ADDR_LEN];
+ /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
+ uint16_t status;
+ uint16_t max_virtqueue_pairs;
+ uint16_t mtu;
+} __attribute__((packed));
+
+/*
+ * How many bits to shift physical queue address written to QUEUE_PFN.
+ * 12 is historical, and due to x86 page size.
+ */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
+
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+enum virtio_msix_status {
+ VIRTIO_MSIX_NONE = 0,
+ VIRTIO_MSIX_DISABLED = 1,
+ VIRTIO_MSIX_ENABLED = 2
+};
+
+static inline int
+vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
+{
+ return (hw->guest_features & (1ULL << bit)) != 0;
+}
+
+/*
+ * Function declaration from virtio_pci.c
+ */
+int vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw);
+void vtpci_reset(struct virtio_hw *);
+
+void vtpci_reinit_complete(struct virtio_hw *);
+
+uint8_t vtpci_get_status(struct virtio_hw *);
+void vtpci_set_status(struct virtio_hw *, uint8_t);
+
+uint64_t vtpci_negotiate_features(struct virtio_hw *, uint64_t);
+
+void vtpci_write_dev_config(struct virtio_hw *, size_t, const void *, int);
+
+void vtpci_read_dev_config(struct virtio_hw *, size_t, void *, int);
+
+uint8_t vtpci_isr(struct virtio_hw *);
+
+enum virtio_msix_status vtpci_msix_detect(struct rte_pci_device *dev);
+
+extern const struct virtio_pci_ops legacy_ops;
+extern const struct virtio_pci_ops modern_ops;
+extern const struct virtio_pci_ops virtio_user_ops;
+
+#endif /* _VIRTIO_PCI_H_ */
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_ring.h b/src/spdk/dpdk/drivers/net/virtio/virtio_ring.h
new file mode 100644
index 00000000..9e3c2a01
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_ring.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _VIRTIO_RING_H_
+#define _VIRTIO_RING_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me
+ * when you add a buffer. It's unreliable, so it's simply an
+ * optimization. Guest will still kick if it's out of buffers. */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't
+ * interrupt me when you consume a buffer. It's unreliable, so it's
+ * simply an optimization. */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* VirtIO ring descriptors: 16 bytes.
+ * These can chain together via "next". */
+struct vring_desc {
+ uint64_t addr; /* Address (guest-physical). */
+ uint32_t len; /* Length. */
+ uint16_t flags; /* The flags as indicated above. */
+ uint16_t next; /* We chain unused descriptors via this. */
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[0];
+};
+
+/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was written to. */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ volatile uint16_t idx;
+ struct vring_used_elem ring[0];
+};
+
+struct vring {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which
+ * looks like this. We assume num is a power of 2.
+ *
+ * struct vring {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * __u16 avail_flags;
+ * __u16 avail_idx;
+ * __u16 available[num];
+ * __u16 used_event_idx;
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * __u16 used_flags;
+ * __u16 used_idx;
+ * struct vring_used_elem used[num];
+ * __u16 avail_event_idx;
+ * };
+ *
+ * NOTE: for VirtIO PCI, align is 4096.
+ */
+
+/*
+ * We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility.
+ */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
+
+static inline size_t
+vring_size(unsigned int num, unsigned long align)
+{
+ size_t size;
+
+ size = num * sizeof(struct vring_desc);
+ size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct vring_used) +
+ (num * sizeof(struct vring_used_elem));
+ return size;
+}
+
+static inline void
+vring_init(struct vring *vr, unsigned int num, uint8_t *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->desc = (struct vring_desc *) p;
+ vr->avail = (struct vring_avail *) (p +
+ num * sizeof(struct vring_desc));
+ vr->used = (void *)
+ RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
+}
+
+/*
+ * The following is used with VIRTIO_RING_F_EVENT_IDX.
+ * Assuming a given event_idx value from the other size, if we have
+ * just incremented index from old to new_idx, should we trigger an
+ * event?
+ */
+static inline int
+vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+#endif /* _VIRTIO_RING_H_ */
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx.c b/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx.c
new file mode 100644
index 00000000..eb891433
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx.c
@@ -0,0 +1,1555 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_prefetch.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_byteorder.h>
+#include <rte_net.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+
+#include "virtio_logs.h"
+#include "virtio_ethdev.h"
+#include "virtio_pci.h"
+#include "virtqueue.h"
+#include "virtio_rxtx.h"
+#include "virtio_rxtx_simple.h"
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
+#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
+#else
+#define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
+#endif
+
+int
+virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
+{
+ struct virtnet_rx *rxvq = rxq;
+ struct virtqueue *vq = rxvq->vq;
+
+ return VIRTQUEUE_NUSED(vq) >= offset;
+}
+
+void
+vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
+{
+ vq->vq_free_cnt += num;
+ vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
+}
+
+void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+ struct vring_desc *dp, *dp_tail;
+ struct vq_desc_extra *dxp;
+ uint16_t desc_idx_last = desc_idx;
+
+ dp = &vq->vq_ring.desc[desc_idx];
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+ if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+ while (dp->flags & VRING_DESC_F_NEXT) {
+ desc_idx_last = dp->next;
+ dp = &vq->vq_ring.desc[dp->next];
+ }
+ }
+ dxp->ndescs = 0;
+
+ /*
+ * We must append the existing free chain, if any, to the end of
+ * newly freed chain. If the virtqueue was completely used, then
+ * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+ */
+ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+ vq->vq_desc_head_idx = desc_idx;
+ } else {
+ dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+ dp_tail->next = desc_idx;
+ }
+
+ vq->vq_desc_tail_idx = desc_idx_last;
+ dp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
+ uint32_t *len, uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_mbuf *cookie;
+ uint16_t used_idx, desc_idx;
+ uint16_t i;
+
+ /* Caller does the check */
+ for (i = 0; i < num ; i++) {
+ used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t) uep->id;
+ len[i] = uep->len;
+ cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
+
+ if (unlikely(cookie == NULL)) {
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+
+ rte_prefetch0(cookie);
+ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+ rx_pkts[i] = cookie;
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+ vq->vq_descx[desc_idx].cookie = NULL;
+ }
+
+ return i;
+}
+
+static uint16_t
+virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
+ struct rte_mbuf **rx_pkts,
+ uint32_t *len,
+ uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_mbuf *cookie;
+ uint16_t used_idx = 0;
+ uint16_t i;
+
+ if (unlikely(num == 0))
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+ /* Desc idx same as used idx */
+ uep = &vq->vq_ring.used->ring[used_idx];
+ len[i] = uep->len;
+ cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
+
+ if (unlikely(cookie == NULL)) {
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+
+ rte_prefetch0(cookie);
+ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+ rx_pkts[i] = cookie;
+ vq->vq_used_cons_idx++;
+ vq->vq_descx[used_idx].cookie = NULL;
+ }
+
+ vq_ring_free_inorder(vq, used_idx, i);
+ return i;
+}
+
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+/* Cleanup from completed transmits. */
+static void
+virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
+{
+ uint16_t i, used_idx, desc_idx;
+ for (i = 0; i < num; i++) {
+ struct vring_used_elem *uep;
+ struct vq_desc_extra *dxp;
+
+ used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+
+ desc_idx = (uint16_t) uep->id;
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ }
+}
+
+/* Cleanup from completed inorder transmits. */
+static void
+virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
+{
+ uint16_t i, used_idx, desc_idx = 0, last_idx;
+ int16_t free_cnt = 0;
+ struct vq_desc_extra *dxp = NULL;
+
+ if (unlikely(num == 0))
+ return;
+
+ for (i = 0; i < num; i++) {
+ struct vring_used_elem *uep;
+
+ used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t)uep->id;
+
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_used_cons_idx++;
+
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ }
+
+ last_idx = desc_idx + dxp->ndescs - 1;
+ free_cnt = last_idx - vq->vq_desc_tail_idx;
+ if (free_cnt <= 0)
+ free_cnt += vq->vq_nentries;
+
+ vq_ring_free_inorder(vq, last_idx, free_cnt);
+}
+
+static inline int
+virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
+ struct rte_mbuf **cookies,
+ uint16_t num)
+{
+ struct vq_desc_extra *dxp;
+ struct virtio_hw *hw = vq->hw;
+ struct vring_desc *start_dp;
+ uint16_t head_idx, idx, i = 0;
+
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < num))
+ return -EMSGSIZE;
+
+ head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
+ start_dp = vq->vq_ring.desc;
+
+ while (i < num) {
+ idx = head_idx & (vq->vq_nentries - 1);
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookies[i];
+ dxp->ndescs = 1;
+
+ start_dp[idx].addr =
+ VIRTIO_MBUF_ADDR(cookies[i], vq) +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ start_dp[idx].len =
+ cookies[i]->buf_len -
+ RTE_PKTMBUF_HEADROOM +
+ hw->vtnet_hdr_size;
+ start_dp[idx].flags = VRING_DESC_F_WRITE;
+
+ vq_update_avail_ring(vq, idx);
+ head_idx++;
+ i++;
+ }
+
+ vq->vq_desc_head_idx += num;
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ return 0;
+}
+
+static inline int
+virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
+{
+ struct vq_desc_extra *dxp;
+ struct virtio_hw *hw = vq->hw;
+ struct vring_desc *start_dp;
+ uint16_t needed = 1;
+ uint16_t head_idx, idx;
+
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+
+ head_idx = vq->vq_desc_head_idx;
+ if (unlikely(head_idx >= vq->vq_nentries))
+ return -EFAULT;
+
+ idx = head_idx;
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookie;
+ dxp->ndescs = needed;
+
+ start_dp = vq->vq_ring.desc;
+ start_dp[idx].addr =
+ VIRTIO_MBUF_ADDR(cookie, vq) +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ start_dp[idx].len =
+ cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+ start_dp[idx].flags = VRING_DESC_F_WRITE;
+ idx = start_dp[idx].next;
+ vq->vq_desc_head_idx = idx;
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = idx;
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+ vq_update_avail_ring(vq, head_idx);
+
+ return 0;
+}
+
+/* When doing TSO, the IP length is not included in the pseudo header
+ * checksum of the packet given to the PMD, but for virtio it is
+ * expected.
+ */
+static void
+virtio_tso_fix_cksum(struct rte_mbuf *m)
+{
+ /* common case: header is not fragmented */
+ if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
+ m->l4_len)) {
+ struct ipv4_hdr *iph;
+ struct ipv6_hdr *ip6h;
+ struct tcp_hdr *th;
+ uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
+ uint32_t tmp;
+
+ iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ th = RTE_PTR_ADD(iph, m->l3_len);
+ if ((iph->version_ihl >> 4) == 4) {
+ iph->hdr_checksum = 0;
+ iph->hdr_checksum = rte_ipv4_cksum(iph);
+ ip_len = iph->total_length;
+ ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
+ m->l3_len);
+ } else {
+ ip6h = (struct ipv6_hdr *)iph;
+ ip_paylen = ip6h->payload_len;
+ }
+
+ /* calculate the new phdr checksum not including ip_paylen */
+ prev_cksum = th->cksum;
+ tmp = prev_cksum;
+ tmp += ip_paylen;
+ tmp = (tmp & 0xffff) + (tmp >> 16);
+ new_cksum = tmp;
+
+ /* replace it in the packet */
+ th->cksum = new_cksum;
+ }
+}
+
+
+/* avoid write operation when necessary, to lessen cache issues */
+#define ASSIGN_UNLESS_EQUAL(var, val) do { \
+ if ((var) != (val)) \
+ (var) = (val); \
+} while (0)
+
+static inline void
+virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
+ struct rte_mbuf *cookie,
+ bool offload)
+{
+ if (offload) {
+ if (cookie->ol_flags & PKT_TX_TCP_SEG)
+ cookie->ol_flags |= PKT_TX_TCP_CKSUM;
+
+ switch (cookie->ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct udp_hdr,
+ dgram_cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ case PKT_TX_TCP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ default:
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ break;
+ }
+
+ /* TCP Segmentation Offload */
+ if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+ virtio_tso_fix_cksum(cookie);
+ hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+ VIRTIO_NET_HDR_GSO_TCPV6 :
+ VIRTIO_NET_HDR_GSO_TCPV4;
+ hdr->gso_size = cookie->tso_segsz;
+ hdr->hdr_len =
+ cookie->l2_len +
+ cookie->l3_len +
+ cookie->l4_len;
+ } else {
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
+ }
+}
+
+static inline void
+virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
+ struct rte_mbuf **cookies,
+ uint16_t num)
+{
+ struct vq_desc_extra *dxp;
+ struct virtqueue *vq = txvq->vq;
+ struct vring_desc *start_dp;
+ struct virtio_net_hdr *hdr;
+ uint16_t idx;
+ uint16_t head_size = vq->hw->vtnet_hdr_size;
+ uint16_t i = 0;
+
+ idx = vq->vq_desc_head_idx;
+ start_dp = vq->vq_ring.desc;
+
+ while (i < num) {
+ idx = idx & (vq->vq_nentries - 1);
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookies[i];
+ dxp->ndescs = 1;
+
+ hdr = (struct virtio_net_hdr *)
+ rte_pktmbuf_prepend(cookies[i], head_size);
+ cookies[i]->pkt_len -= head_size;
+
+ /* if offload disabled, it is not zeroed below, do it now */
+ if (!vq->hw->has_tx_offload) {
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
+
+ virtqueue_xmit_offload(hdr, cookies[i],
+ vq->hw->has_tx_offload);
+
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
+ start_dp[idx].len = cookies[i]->data_len;
+ start_dp[idx].flags = 0;
+
+ vq_update_avail_ring(vq, idx);
+
+ idx++;
+ i++;
+ };
+
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
+}
+
+static inline void
+virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
+ uint16_t needed, int use_indirect, int can_push,
+ int in_order)
+{
+ struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
+ struct vq_desc_extra *dxp;
+ struct virtqueue *vq = txvq->vq;
+ struct vring_desc *start_dp;
+ uint16_t seg_num = cookie->nb_segs;
+ uint16_t head_idx, idx;
+ uint16_t head_size = vq->hw->vtnet_hdr_size;
+ struct virtio_net_hdr *hdr;
+
+ head_idx = vq->vq_desc_head_idx;
+ idx = head_idx;
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookie;
+ dxp->ndescs = needed;
+
+ start_dp = vq->vq_ring.desc;
+
+ if (can_push) {
+ /* prepend cannot fail, checked by caller */
+ hdr = (struct virtio_net_hdr *)
+ rte_pktmbuf_prepend(cookie, head_size);
+ /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
+ * which is wrong. Below subtract restores correct pkt size.
+ */
+ cookie->pkt_len -= head_size;
+
+ /* if offload disabled, it is not zeroed below, do it now */
+ if (!vq->hw->has_tx_offload) {
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
+ } else if (use_indirect) {
+ /* setup tx ring slot to point to indirect
+ * descriptor list stored in reserved region.
+ *
+ * the first slot in indirect ring is already preset
+ * to point to the header in reserved region
+ */
+ start_dp[idx].addr = txvq->virtio_net_hdr_mem +
+ RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
+ start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
+ start_dp[idx].flags = VRING_DESC_F_INDIRECT;
+ hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+
+ /* loop below will fill in rest of the indirect elements */
+ start_dp = txr[idx].tx_indir;
+ idx = 1;
+ } else {
+ /* setup first tx ring slot to point to header
+ * stored in reserved region.
+ */
+ start_dp[idx].addr = txvq->virtio_net_hdr_mem +
+ RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+ start_dp[idx].len = vq->hw->vtnet_hdr_size;
+ start_dp[idx].flags = VRING_DESC_F_NEXT;
+ hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+
+ idx = start_dp[idx].next;
+ }
+
+ virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+
+ do {
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+ start_dp[idx].len = cookie->data_len;
+ start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+ idx = start_dp[idx].next;
+ } while ((cookie = cookie->next) != NULL);
+
+ if (use_indirect)
+ idx = vq->vq_ring.desc[head_idx].next;
+
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+
+ vq->vq_desc_head_idx = idx;
+ vq_update_avail_ring(vq, head_idx);
+
+ if (!in_order) {
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = idx;
+ }
+}
+
+void
+virtio_dev_cq_start(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (hw->cvq && hw->cvq->vq) {
+ rte_spinlock_init(&hw->cvq->lock);
+ VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
+ }
+}
+
+int
+virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+ struct virtnet_rx *rxvq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (nb_desc == 0 || nb_desc > vq->vq_nentries)
+ nb_desc = vq->vq_nentries;
+ vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
+
+ rxvq = &vq->rxq;
+ rxvq->queue_id = queue_idx;
+ rxvq->mpool = mp;
+ if (rxvq->mpool == NULL) {
+ rte_exit(EXIT_FAILURE,
+ "Cannot allocate mbufs for rx virtqueue");
+ }
+
+ dev->data->rx_queues[queue_idx] = rxvq;
+
+ return 0;
+}
+
+int
+virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+ struct virtnet_rx *rxvq = &vq->rxq;
+ struct rte_mbuf *m;
+ uint16_t desc_idx;
+ int error, nbufs, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Allocate blank mbufs for the each rx descriptor */
+ nbufs = 0;
+
+ if (hw->use_simple_rx) {
+ for (desc_idx = 0; desc_idx < vq->vq_nentries;
+ desc_idx++) {
+ vq->vq_ring.avail->ring[desc_idx] = desc_idx;
+ vq->vq_ring.desc[desc_idx].flags =
+ VRING_DESC_F_WRITE;
+ }
+
+ virtio_rxq_vec_setup(rxvq);
+ }
+
+ memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
+ for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
+ desc_idx++) {
+ vq->sw_ring[vq->vq_nentries + desc_idx] =
+ &rxvq->fake_mbuf;
+ }
+
+ if (hw->use_simple_rx) {
+ while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ virtio_rxq_rearm_vec(rxvq);
+ nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ }
+ } else if (hw->use_inorder_rx) {
+ if ((!virtqueue_full(vq))) {
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *pkts[free_cnt];
+
+ if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
+ free_cnt)) {
+ error = virtqueue_enqueue_refill_inorder(vq,
+ pkts,
+ free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
+ }
+
+ nbufs += free_cnt;
+ vq_update_avail_idx(vq);
+ }
+ } else {
+ while (!virtqueue_full(vq)) {
+ m = rte_mbuf_raw_alloc(rxvq->mpool);
+ if (m == NULL)
+ break;
+
+ /* Enqueue allocated buffers */
+ error = virtqueue_enqueue_recv_refill(vq, m);
+ if (error) {
+ rte_pktmbuf_free(m);
+ break;
+ }
+ nbufs++;
+ }
+
+ vq_update_avail_idx(vq);
+ }
+
+ PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
+
+ VIRTQUEUE_DUMP(vq);
+
+ return 0;
+}
+
+/*
+ * struct rte_eth_dev *dev: Used to update dev
+ * uint16_t nb_desc: Defaults to values read from config space
+ * unsigned int socket_id: Used to allocate memzone
+ * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
+ * uint16_t queue_idx: Just used as an index in dev txq list
+ */
+int
+virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf)
+{
+ uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+ struct virtnet_tx *txvq;
+ uint16_t tx_free_thresh;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (nb_desc == 0 || nb_desc > vq->vq_nentries)
+ nb_desc = vq->vq_nentries;
+ vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
+
+ txvq = &vq->txq;
+ txvq->queue_id = queue_idx;
+
+ tx_free_thresh = tx_conf->tx_free_thresh;
+ if (tx_free_thresh == 0)
+ tx_free_thresh =
+ RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
+
+ if (tx_free_thresh >= (vq->vq_nentries - 3)) {
+ RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
+ "number of TX entries minus 3 (%u)."
+ " (tx_free_thresh=%u port=%u queue=%u)\n",
+ vq->vq_nentries - 3,
+ tx_free_thresh, dev->data->port_id, queue_idx);
+ return -EINVAL;
+ }
+
+ vq->vq_free_thresh = tx_free_thresh;
+
+ dev->data->tx_queues[queue_idx] = txvq;
+ return 0;
+}
+
+int
+virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
+ uint16_t queue_idx)
+{
+ uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->use_inorder_tx)
+ vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
+
+ VIRTQUEUE_DUMP(vq);
+
+ return 0;
+}
+
+static void
+virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
+{
+ int error;
+ /*
+ * Requeue the discarded mbuf. This should always be
+ * successful since it was just dequeued.
+ */
+ error = virtqueue_enqueue_recv_refill(vq, m);
+
+ if (unlikely(error)) {
+ RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
+ rte_pktmbuf_free(m);
+ }
+}
+
+static void
+virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
+{
+ int error;
+
+ error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
+ if (unlikely(error)) {
+ RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
+ rte_pktmbuf_free(m);
+ }
+}
+
+static void
+virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
+{
+ uint32_t s = mbuf->pkt_len;
+ struct ether_addr *ea;
+
+ if (s == 64) {
+ stats->size_bins[1]++;
+ } else if (s > 64 && s < 1024) {
+ uint32_t bin;
+
+ /* count zeros, and offset into correct bin */
+ bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+ stats->size_bins[bin]++;
+ } else {
+ if (s < 64)
+ stats->size_bins[0]++;
+ else if (s < 1519)
+ stats->size_bins[6]++;
+ else if (s >= 1519)
+ stats->size_bins[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ stats->broadcast++;
+ else
+ stats->multicast++;
+ }
+}
+
+static inline void
+virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
+{
+ VIRTIO_DUMP_PACKET(m, m->data_len);
+
+ rxvq->stats.bytes += m->pkt_len;
+ virtio_update_packet_stats(&rxvq->stats, m);
+}
+
+/* Optionally fill offload information in structure */
+static int
+virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
+{
+ struct rte_net_hdr_lens hdr_lens;
+ uint32_t hdrlen, ptype;
+ int l4_supported = 0;
+
+ /* nothing to do */
+ if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
+ return 0;
+
+ m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+
+ ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
+ m->packet_type = ptype;
+ if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
+ (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
+ (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
+ l4_supported = 1;
+
+ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
+ if (hdr->csum_start <= hdrlen && l4_supported) {
+ m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ } else {
+ /* Unknown proto or tunnel, do sw cksum. We can assume
+ * the cksum field is in the first segment since the
+ * buffers we provided to the host are large enough.
+ * In case of SCTP, this will be wrong since it's a CRC
+ * but there's nothing we can do.
+ */
+ uint16_t csum = 0, off;
+
+ rte_raw_cksum_mbuf(m, hdr->csum_start,
+ rte_pktmbuf_pkt_len(m) - hdr->csum_start,
+ &csum);
+ if (likely(csum != 0xffff))
+ csum = ~csum;
+ off = hdr->csum_offset + hdr->csum_start;
+ if (rte_pktmbuf_data_len(m) >= off + 1)
+ *rte_pktmbuf_mtod_offset(m, uint16_t *,
+ off) = csum;
+ }
+ } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
+ m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+
+ /* GSO request, save required information in mbuf */
+ if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ /* Check unsupported modes */
+ if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
+ (hdr->gso_size == 0)) {
+ return -EINVAL;
+ }
+
+ /* Update mss lengthes in mbuf */
+ m->tso_segsz = hdr->gso_size;
+ switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ m->ol_flags |= PKT_RX_LRO | \
+ PKT_RX_L4_CKSUM_NONE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+#define VIRTIO_MBUF_BURST_SZ 64
+#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
+uint16_t
+virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ struct rte_mbuf *rxm, *new_mbuf;
+ uint16_t nb_used, num, nb_rx;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ int error;
+ uint32_t i, nb_enqueued;
+ uint32_t hdr_size;
+ struct virtio_net_hdr *hdr;
+
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ virtio_rmb();
+
+ num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
+ if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
+ num = VIRTIO_MBUF_BURST_SZ;
+ if (likely(num > DESC_PER_CACHELINE))
+ num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
+
+ num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
+ PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
+
+ nb_enqueued = 0;
+ hdr_size = hw->vtnet_hdr_size;
+
+ for (i = 0; i < num ; i++) {
+ rxm = rcv_pkts[i];
+
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
+
+ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ rxm->port = rxvq->port_id;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
+
+ hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
+ RTE_PKTMBUF_HEADROOM - hdr_size);
+
+ if (hw->vlan_strip)
+ rte_vlan_strip(rxm);
+
+ if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ virtio_rx_stats_updated(rxvq, rxm);
+
+ rx_pkts[nb_rx++] = rxm;
+ }
+
+ rxvq->stats.packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+ while (likely(!virtqueue_full(vq))) {
+ new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
+ if (unlikely(new_mbuf == NULL)) {
+ struct rte_eth_dev *dev
+ = &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(new_mbuf);
+ break;
+ }
+ nb_enqueued++;
+ }
+
+ if (likely(nb_enqueued)) {
+ vq_update_avail_idx(vq);
+
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ return nb_rx;
+}
+
+uint16_t
+virtio_recv_mergeable_pkts_inorder(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *prev;
+ uint16_t nb_used, num, nb_rx;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ int error;
+ uint32_t nb_enqueued;
+ uint32_t seg_num;
+ uint32_t seg_res;
+ uint32_t hdr_size;
+ int32_t i;
+
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+ nb_used = RTE_MIN(nb_used, nb_pkts);
+ nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
+
+ virtio_rmb();
+
+ PMD_RX_LOG(DEBUG, "used:%d", nb_used);
+
+ nb_enqueued = 0;
+ seg_num = 1;
+ seg_res = 0;
+ hdr_size = hw->vtnet_hdr_size;
+
+ num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
+
+ for (i = 0; i < num; i++) {
+ struct virtio_net_hdr_mrg_rxbuf *header;
+
+ PMD_RX_LOG(DEBUG, "dequeue:%d", num);
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
+
+ rxm = rcv_pkts[i];
+
+ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf_inorder(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ header = (struct virtio_net_hdr_mrg_rxbuf *)
+ ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
+ - hdr_size);
+ seg_num = header->num_buffers;
+
+ if (seg_num == 0)
+ seg_num = 1;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->nb_segs = seg_num;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
+
+ rxm->port = rxvq->port_id;
+
+ rx_pkts[nb_rx] = rxm;
+ prev = rxm;
+
+ if (vq->hw->has_rx_offload &&
+ virtio_rx_offload(rxm, &header->hdr) < 0) {
+ virtio_discard_rxbuf_inorder(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ if (hw->vlan_strip)
+ rte_vlan_strip(rx_pkts[nb_rx]);
+
+ seg_res = seg_num - 1;
+
+ /* Merge remaining segments */
+ while (seg_res != 0 && i < (num - 1)) {
+ i++;
+
+ rxm = rcv_pkts[i];
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[i]);
+ rxm->data_len = (uint16_t)(len[i]);
+
+ rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
+ rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
+
+ if (prev)
+ prev->next = rxm;
+
+ prev = rxm;
+ seg_res -= 1;
+ }
+
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+ }
+
+ /* Last packet still need merge segments */
+ while (seg_res != 0) {
+ uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
+ VIRTIO_MBUF_BURST_SZ);
+
+ prev = rcv_pkts[nb_rx];
+ if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
+ num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
+ rcv_cnt);
+ uint16_t extra_idx = 0;
+
+ rcv_cnt = num;
+ while (extra_idx < rcv_cnt) {
+ rxm = rcv_pkts[extra_idx];
+ rxm->data_off =
+ RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
+ prev->next = rxm;
+ prev = rxm;
+ rx_pkts[nb_rx]->pkt_len += len[extra_idx];
+ rx_pkts[nb_rx]->data_len += len[extra_idx];
+ extra_idx += 1;
+ };
+ seg_res -= rcv_cnt;
+
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+ } else {
+ PMD_RX_LOG(ERR,
+ "No enough segments for packet.");
+ virtio_discard_rxbuf_inorder(vq, prev);
+ rxvq->stats.errors++;
+ break;
+ }
+ }
+
+ rxvq->stats.packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+
+ if (likely(!virtqueue_full(vq))) {
+ /* free_cnt may include mrg descs */
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *new_pkts[free_cnt];
+
+ if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
+ error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
+ free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(new_pkts[i]);
+ }
+ nb_enqueued += free_cnt;
+ } else {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed += free_cnt;
+ }
+ }
+
+ if (likely(nb_enqueued)) {
+ vq_update_avail_idx(vq);
+
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ return nb_rx;
+}
+
+uint16_t
+virtio_recv_mergeable_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ struct rte_mbuf *rxm, *new_mbuf;
+ uint16_t nb_used, num, nb_rx;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *prev;
+ int error;
+ uint32_t i, nb_enqueued;
+ uint32_t seg_num;
+ uint16_t extra_idx;
+ uint32_t seg_res;
+ uint32_t hdr_size;
+
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ virtio_rmb();
+
+ PMD_RX_LOG(DEBUG, "used:%d", nb_used);
+
+ i = 0;
+ nb_enqueued = 0;
+ seg_num = 0;
+ extra_idx = 0;
+ seg_res = 0;
+ hdr_size = hw->vtnet_hdr_size;
+
+ while (i < nb_used) {
+ struct virtio_net_hdr_mrg_rxbuf *header;
+
+ if (nb_rx == nb_pkts)
+ break;
+
+ num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
+ if (num != 1)
+ continue;
+
+ i++;
+
+ PMD_RX_LOG(DEBUG, "dequeue:%d", num);
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[0]);
+
+ rxm = rcv_pkts[0];
+
+ if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr +
+ RTE_PKTMBUF_HEADROOM - hdr_size);
+ seg_num = header->num_buffers;
+
+ if (seg_num == 0)
+ seg_num = 1;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->nb_segs = seg_num;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+ rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
+ rxm->data_len = (uint16_t)(len[0] - hdr_size);
+
+ rxm->port = rxvq->port_id;
+ rx_pkts[nb_rx] = rxm;
+ prev = rxm;
+
+ if (hw->has_rx_offload &&
+ virtio_rx_offload(rxm, &header->hdr) < 0) {
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ seg_res = seg_num - 1;
+
+ while (seg_res != 0) {
+ /*
+ * Get extra segments for current uncompleted packet.
+ */
+ uint16_t rcv_cnt =
+ RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
+ if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
+ uint32_t rx_num =
+ virtqueue_dequeue_burst_rx(vq,
+ rcv_pkts, len, rcv_cnt);
+ i += rx_num;
+ rcv_cnt = rx_num;
+ } else {
+ PMD_RX_LOG(ERR,
+ "No enough segments for packet.");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ break;
+ }
+
+ extra_idx = 0;
+
+ while (extra_idx < rcv_cnt) {
+ rxm = rcv_pkts[extra_idx];
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
+
+ if (prev)
+ prev->next = rxm;
+
+ prev = rxm;
+ rx_pkts[nb_rx]->pkt_len += rxm->pkt_len;
+ extra_idx++;
+ };
+ seg_res -= rcv_cnt;
+ }
+
+ if (hw->vlan_strip)
+ rte_vlan_strip(rx_pkts[nb_rx]);
+
+ VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
+ rx_pkts[nb_rx]->data_len);
+
+ rxvq->stats.bytes += rx_pkts[nb_rx]->pkt_len;
+ virtio_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+
+ rxvq->stats.packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+ while (likely(!virtqueue_full(vq))) {
+ new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
+ if (unlikely(new_mbuf == NULL)) {
+ struct rte_eth_dev *dev
+ = &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(new_mbuf);
+ break;
+ }
+ nb_enqueued++;
+ }
+
+ if (likely(nb_enqueued)) {
+ vq_update_avail_idx(vq);
+
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ return nb_rx;
+}
+
+uint16_t
+virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct virtnet_tx *txvq = tx_queue;
+ struct virtqueue *vq = txvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ uint16_t hdr_size = hw->vtnet_hdr_size;
+ uint16_t nb_used, nb_tx = 0;
+ int error;
+
+ if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
+ return nb_tx;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+
+ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ virtio_rmb();
+ if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
+ virtio_xmit_cleanup(vq, nb_used);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
+ int can_push = 0, use_indirect = 0, slots, need;
+
+ /* Do VLAN tag insertion */
+ if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&txm);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(txm);
+ continue;
+ }
+ }
+
+ /* optimize ring usage */
+ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+ rte_mbuf_refcnt_read(txm) == 1 &&
+ RTE_MBUF_DIRECT(txm) &&
+ txm->nb_segs == 1 &&
+ rte_pktmbuf_headroom(txm) >= hdr_size &&
+ rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
+ __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
+ can_push = 1;
+ else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+ txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
+ use_indirect = 1;
+
+ /* How many main ring entries are needed to this Tx?
+ * any_layout => number of segments
+ * indirect => 1
+ * default => number of segments + 1
+ */
+ slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
+ need = slots - vq->vq_free_cnt;
+
+ /* Positive value indicates it need free vring descriptors */
+ if (unlikely(need > 0)) {
+ nb_used = VIRTQUEUE_NUSED(vq);
+ virtio_rmb();
+ need = RTE_MIN(need, (int)nb_used);
+
+ virtio_xmit_cleanup(vq, need);
+ need = slots - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ break;
+ }
+ }
+
+ /* Enqueue Packet buffers */
+ virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
+ can_push, 0);
+
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
+ }
+
+ txvq->stats.packets += nb_tx;
+
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(vq);
+
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ }
+ }
+
+ return nb_tx;
+}
+
+uint16_t
+virtio_xmit_pkts_inorder(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_tx *txvq = tx_queue;
+ struct virtqueue *vq = txvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ uint16_t hdr_size = hw->vtnet_hdr_size;
+ uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
+ struct rte_mbuf *inorder_pkts[nb_pkts];
+ int error;
+
+ if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
+ return nb_tx;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+
+ VIRTQUEUE_DUMP(vq);
+ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ virtio_rmb();
+ if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
+ virtio_xmit_cleanup_inorder(vq, nb_used);
+
+ if (unlikely(!vq->vq_free_cnt))
+ virtio_xmit_cleanup_inorder(vq, nb_used);
+
+ nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
+
+ for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
+ int slots, need;
+
+ /* Do VLAN tag insertion */
+ if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&txm);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(txm);
+ continue;
+ }
+ }
+
+ /* optimize ring usage */
+ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+ rte_mbuf_refcnt_read(txm) == 1 &&
+ RTE_MBUF_DIRECT(txm) &&
+ txm->nb_segs == 1 &&
+ rte_pktmbuf_headroom(txm) >= hdr_size &&
+ rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
+ __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
+ inorder_pkts[nb_inorder_pkts] = txm;
+ nb_inorder_pkts++;
+
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
+ continue;
+ }
+
+ if (nb_inorder_pkts) {
+ virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
+ nb_inorder_pkts);
+ nb_inorder_pkts = 0;
+ }
+
+ slots = txm->nb_segs + 1;
+ need = slots - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ nb_used = VIRTQUEUE_NUSED(vq);
+ virtio_rmb();
+ need = RTE_MIN(need, (int)nb_used);
+
+ virtio_xmit_cleanup_inorder(vq, need);
+
+ need = slots - vq->vq_free_cnt;
+
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ break;
+ }
+ }
+ /* Enqueue Packet buffers */
+ virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
+
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
+ }
+
+ /* Transmit all inorder packets */
+ if (nb_inorder_pkts)
+ virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
+ nb_inorder_pkts);
+
+ txvq->stats.packets += nb_tx;
+
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(vq);
+
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ }
+ }
+
+ VIRTQUEUE_DUMP(vq);
+
+ return nb_tx;
+}
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx.h b/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx.h
new file mode 100644
index 00000000..685cc4f8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef _VIRTIO_RXTX_H_
+#define _VIRTIO_RXTX_H_
+
+#define RTE_PMD_VIRTIO_RX_MAX_BURST 64
+
+struct virtnet_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+ uint64_t multicast;
+ uint64_t broadcast;
+ /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
+ uint64_t size_bins[8];
+};
+
+struct virtnet_rx {
+ struct virtqueue *vq;
+ /* dummy mbuf, for wraparound when processing RX ring. */
+ struct rte_mbuf fake_mbuf;
+ uint64_t mbuf_initializer; /**< value to init mbufs. */
+ struct rte_mempool *mpool; /**< mempool for mbuf allocation */
+
+ uint16_t queue_id; /**< DPDK queue index. */
+ uint16_t port_id; /**< Device port identifier. */
+
+ /* Statistics */
+ struct virtnet_stats stats;
+
+ const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
+};
+
+struct virtnet_tx {
+ struct virtqueue *vq;
+ /**< memzone to populate hdr. */
+ const struct rte_memzone *virtio_net_hdr_mz;
+ rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+
+ uint16_t queue_id; /**< DPDK queue index. */
+ uint16_t port_id; /**< Device port identifier. */
+
+ /* Statistics */
+ struct virtnet_stats stats;
+
+ const struct rte_memzone *mz; /**< mem zone to populate TX ring. */
+};
+
+struct virtnet_ctl {
+ struct virtqueue *vq;
+ /**< memzone to populate hdr. */
+ const struct rte_memzone *virtio_net_hdr_mz;
+ rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+ uint16_t port_id; /**< Device port identifier. */
+ const struct rte_memzone *mz; /**< mem zone to populate CTL ring. */
+ rte_spinlock_t lock; /**< spinlock for control queue. */
+};
+
+int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
+
+#endif /* _VIRTIO_RXTX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple.c b/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple.c
new file mode 100644
index 00000000..31e565b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple.c
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_prefetch.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_byteorder.h>
+
+#include "virtio_rxtx_simple.h"
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+int __attribute__((cold))
+virtio_rxq_vec_setup(struct virtnet_rx *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+
+ return 0;
+}
+
+/* Stub for linkage when arch specific implementation is not available */
+uint16_t __attribute__((weak))
+virtio_recv_pkts_vec(void *rx_queue __rte_unused,
+ struct rte_mbuf **rx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ rte_panic("Wrong weak function linked by linker\n");
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple.h b/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple.h
new file mode 100644
index 00000000..dc97e4cc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#ifndef _VIRTIO_RXTX_SIMPLE_H_
+#define _VIRTIO_RXTX_SIMPLE_H_
+
+#include <stdint.h>
+
+#include "virtio_logs.h"
+#include "virtio_ethdev.h"
+#include "virtqueue.h"
+#include "virtio_rxtx.h"
+
+#define RTE_VIRTIO_VPMD_RX_BURST 32
+#define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
+
+static inline void
+virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
+{
+ int i;
+ uint16_t desc_idx;
+ struct rte_mbuf **sw_ring;
+ struct vring_desc *start_dp;
+ int ret;
+ struct virtqueue *vq = rxvq->vq;
+
+ desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
+ sw_ring = &vq->sw_ring[desc_idx];
+ start_dp = &vq->vq_ring.desc[desc_idx];
+
+ ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
+ RTE_VIRTIO_VPMD_RX_REARM_THRESH);
+ if (unlikely(ret)) {
+ rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ return;
+ }
+
+ for (i = 0; i < RTE_VIRTIO_VPMD_RX_REARM_THRESH; i++) {
+ uintptr_t p;
+
+ p = (uintptr_t)&sw_ring[i]->rearm_data;
+ *(uint64_t *)p = rxvq->mbuf_initializer;
+
+ start_dp[i].addr =
+ VIRTIO_MBUF_ADDR(sw_ring[i], vq) +
+ RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
+ start_dp[i].len = sw_ring[i]->buf_len -
+ RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
+ }
+
+ vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ vq_update_avail_idx(vq);
+}
+
+#endif /* _VIRTIO_RXTX_SIMPLE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple_neon.c b/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple_neon.c
new file mode 100644
index 00000000..d6207d7b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_byteorder.h>
+#include <rte_branch_prediction.h>
+#include <rte_cycles.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_prefetch.h>
+#include <rte_string_fns.h>
+#include <rte_vect.h>
+
+#include "virtio_rxtx_simple.h"
+
+#define RTE_VIRTIO_DESC_PER_LOOP 8
+
+/* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
+ *
+ * This routine is for non-mergeable RX, one desc for each guest buffer.
+ * This routine is based on the RX ring layout optimization. Each entry in the
+ * avail ring points to the desc with the same index in the desc ring and this
+ * will never be changed in the driver.
+ *
+ * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
+ */
+uint16_t
+virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ uint16_t nb_used;
+ uint16_t desc_idx;
+ struct vring_used_elem *rused;
+ struct rte_mbuf **sw_ring;
+ struct rte_mbuf **sw_ring_end;
+ uint16_t nb_pkts_received = 0;
+
+ uint8x16_t shuf_msk1 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
+ 4, 5, 0xFF, 0xFF, /* pkt len */
+ 4, 5, /* dat len */
+ 0xFF, 0xFF, /* vlan tci */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ };
+
+ uint8x16_t shuf_msk2 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
+ 12, 13, 0xFF, 0xFF, /* pkt len */
+ 12, 13, /* dat len */
+ 0xFF, 0xFF, /* vlan tci */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ };
+
+ /* Subtract the header length.
+ * In which case do we need the header length in used->len ?
+ */
+ uint16x8_t len_adjust = {
+ 0, 0,
+ (uint16_t)vq->hw->vtnet_hdr_size, 0,
+ (uint16_t)vq->hw->vtnet_hdr_size,
+ 0,
+ 0, 0
+ };
+
+ if (unlikely(hw->started == 0))
+ return nb_pkts_received;
+
+ if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
+ return 0;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ rte_rmb();
+
+ if (unlikely(nb_used == 0))
+ return 0;
+
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
+ nb_used = RTE_MIN(nb_used, nb_pkts);
+
+ desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ rused = &vq->vq_ring.used->ring[desc_idx];
+ sw_ring = &vq->sw_ring[desc_idx];
+ sw_ring_end = &vq->sw_ring[vq->vq_nentries];
+
+ rte_prefetch_non_temporal(rused);
+
+ if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ virtio_rxq_rearm_vec(rxvq);
+ if (unlikely(virtqueue_kick_prepare(vq)))
+ virtqueue_notify(vq);
+ }
+
+ for (nb_pkts_received = 0;
+ nb_pkts_received < nb_used;) {
+ uint64x2_t desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
+ uint64x2_t mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
+ uint64x2_t pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
+
+ mbp[0] = vld1q_u64((uint64_t *)(sw_ring + 0));
+ desc[0] = vld1q_u64((uint64_t *)(rused + 0));
+ vst1q_u64((uint64_t *)&rx_pkts[0], mbp[0]);
+
+ mbp[1] = vld1q_u64((uint64_t *)(sw_ring + 2));
+ desc[1] = vld1q_u64((uint64_t *)(rused + 2));
+ vst1q_u64((uint64_t *)&rx_pkts[2], mbp[1]);
+
+ mbp[2] = vld1q_u64((uint64_t *)(sw_ring + 4));
+ desc[2] = vld1q_u64((uint64_t *)(rused + 4));
+ vst1q_u64((uint64_t *)&rx_pkts[4], mbp[2]);
+
+ mbp[3] = vld1q_u64((uint64_t *)(sw_ring + 6));
+ desc[3] = vld1q_u64((uint64_t *)(rused + 6));
+ vst1q_u64((uint64_t *)&rx_pkts[6], mbp[3]);
+
+ pkt_mb[1] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[0]), shuf_msk2));
+ pkt_mb[0] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[0]), shuf_msk1));
+ pkt_mb[1] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[1]), len_adjust));
+ pkt_mb[0] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[0]), len_adjust));
+ vst1q_u64((void *)&rx_pkts[1]->rx_descriptor_fields1,
+ pkt_mb[1]);
+ vst1q_u64((void *)&rx_pkts[0]->rx_descriptor_fields1,
+ pkt_mb[0]);
+
+ pkt_mb[3] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[1]), shuf_msk2));
+ pkt_mb[2] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[1]), shuf_msk1));
+ pkt_mb[3] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[3]), len_adjust));
+ pkt_mb[2] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[2]), len_adjust));
+ vst1q_u64((void *)&rx_pkts[3]->rx_descriptor_fields1,
+ pkt_mb[3]);
+ vst1q_u64((void *)&rx_pkts[2]->rx_descriptor_fields1,
+ pkt_mb[2]);
+
+ pkt_mb[5] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[2]), shuf_msk2));
+ pkt_mb[4] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[2]), shuf_msk1));
+ pkt_mb[5] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[5]), len_adjust));
+ pkt_mb[4] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[4]), len_adjust));
+ vst1q_u64((void *)&rx_pkts[5]->rx_descriptor_fields1,
+ pkt_mb[5]);
+ vst1q_u64((void *)&rx_pkts[4]->rx_descriptor_fields1,
+ pkt_mb[4]);
+
+ pkt_mb[7] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[3]), shuf_msk2));
+ pkt_mb[6] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[3]), shuf_msk1));
+ pkt_mb[7] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[7]), len_adjust));
+ pkt_mb[6] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[6]), len_adjust));
+ vst1q_u64((void *)&rx_pkts[7]->rx_descriptor_fields1,
+ pkt_mb[7]);
+ vst1q_u64((void *)&rx_pkts[6]->rx_descriptor_fields1,
+ pkt_mb[6]);
+
+ if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
+ if (sw_ring + nb_used <= sw_ring_end)
+ nb_pkts_received += nb_used;
+ else
+ nb_pkts_received += sw_ring_end - sw_ring;
+ break;
+ } else {
+ if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
+ sw_ring_end)) {
+ nb_pkts_received += sw_ring_end - sw_ring;
+ break;
+ } else {
+ nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
+
+ rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
+ sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
+ rused += RTE_VIRTIO_DESC_PER_LOOP;
+ nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
+ }
+ }
+ }
+
+ vq->vq_used_cons_idx += nb_pkts_received;
+ vq->vq_free_cnt += nb_pkts_received;
+ rxvq->stats.packets += nb_pkts_received;
+ return nb_pkts_received;
+}
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple_sse.c b/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple_sse.c
new file mode 100644
index 00000000..d768d075
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple_sse.c
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <tmmintrin.h>
+
+#include <rte_byteorder.h>
+#include <rte_branch_prediction.h>
+#include <rte_cycles.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_prefetch.h>
+#include <rte_string_fns.h>
+
+#include "virtio_rxtx_simple.h"
+
+#define RTE_VIRTIO_DESC_PER_LOOP 8
+
+/* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
+ *
+ * This routine is for non-mergeable RX, one desc for each guest buffer.
+ * This routine is based on the RX ring layout optimization. Each entry in the
+ * avail ring points to the desc with the same index in the desc ring and this
+ * will never be changed in the driver.
+ *
+ * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
+ */
+uint16_t
+virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ uint16_t nb_used;
+ uint16_t desc_idx;
+ struct vring_used_elem *rused;
+ struct rte_mbuf **sw_ring;
+ struct rte_mbuf **sw_ring_end;
+ uint16_t nb_pkts_received = 0;
+ __m128i shuf_msk1, shuf_msk2, len_adjust;
+
+ shuf_msk1 = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, /* vlan tci */
+ 5, 4, /* dat len */
+ 0xFF, 0xFF, 5, 4, /* pkt len */
+ 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
+
+ );
+
+ shuf_msk2 = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, /* vlan tci */
+ 13, 12, /* dat len */
+ 0xFF, 0xFF, 13, 12, /* pkt len */
+ 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
+ );
+
+ /* Subtract the header length.
+ * In which case do we need the header length in used->len ?
+ */
+ len_adjust = _mm_set_epi16(
+ 0, 0,
+ 0,
+ (uint16_t)-vq->hw->vtnet_hdr_size,
+ 0, (uint16_t)-vq->hw->vtnet_hdr_size,
+ 0, 0);
+
+ if (unlikely(hw->started == 0))
+ return nb_pkts_received;
+
+ if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
+ return 0;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ rte_compiler_barrier();
+
+ if (unlikely(nb_used == 0))
+ return 0;
+
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
+ nb_used = RTE_MIN(nb_used, nb_pkts);
+
+ desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ rused = &vq->vq_ring.used->ring[desc_idx];
+ sw_ring = &vq->sw_ring[desc_idx];
+ sw_ring_end = &vq->sw_ring[vq->vq_nentries];
+
+ rte_prefetch0(rused);
+
+ if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ virtio_rxq_rearm_vec(rxvq);
+ if (unlikely(virtqueue_kick_prepare(vq)))
+ virtqueue_notify(vq);
+ }
+
+ for (nb_pkts_received = 0;
+ nb_pkts_received < nb_used;) {
+ __m128i desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
+ __m128i mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
+ __m128i pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
+
+ mbp[0] = _mm_loadu_si128((__m128i *)(sw_ring + 0));
+ desc[0] = _mm_loadu_si128((__m128i *)(rused + 0));
+ _mm_storeu_si128((__m128i *)&rx_pkts[0], mbp[0]);
+
+ mbp[1] = _mm_loadu_si128((__m128i *)(sw_ring + 2));
+ desc[1] = _mm_loadu_si128((__m128i *)(rused + 2));
+ _mm_storeu_si128((__m128i *)&rx_pkts[2], mbp[1]);
+
+ mbp[2] = _mm_loadu_si128((__m128i *)(sw_ring + 4));
+ desc[2] = _mm_loadu_si128((__m128i *)(rused + 4));
+ _mm_storeu_si128((__m128i *)&rx_pkts[4], mbp[2]);
+
+ mbp[3] = _mm_loadu_si128((__m128i *)(sw_ring + 6));
+ desc[3] = _mm_loadu_si128((__m128i *)(rused + 6));
+ _mm_storeu_si128((__m128i *)&rx_pkts[6], mbp[3]);
+
+ pkt_mb[1] = _mm_shuffle_epi8(desc[0], shuf_msk2);
+ pkt_mb[0] = _mm_shuffle_epi8(desc[0], shuf_msk1);
+ pkt_mb[1] = _mm_add_epi16(pkt_mb[1], len_adjust);
+ pkt_mb[0] = _mm_add_epi16(pkt_mb[0], len_adjust);
+ _mm_storeu_si128((void *)&rx_pkts[1]->rx_descriptor_fields1,
+ pkt_mb[1]);
+ _mm_storeu_si128((void *)&rx_pkts[0]->rx_descriptor_fields1,
+ pkt_mb[0]);
+
+ pkt_mb[3] = _mm_shuffle_epi8(desc[1], shuf_msk2);
+ pkt_mb[2] = _mm_shuffle_epi8(desc[1], shuf_msk1);
+ pkt_mb[3] = _mm_add_epi16(pkt_mb[3], len_adjust);
+ pkt_mb[2] = _mm_add_epi16(pkt_mb[2], len_adjust);
+ _mm_storeu_si128((void *)&rx_pkts[3]->rx_descriptor_fields1,
+ pkt_mb[3]);
+ _mm_storeu_si128((void *)&rx_pkts[2]->rx_descriptor_fields1,
+ pkt_mb[2]);
+
+ pkt_mb[5] = _mm_shuffle_epi8(desc[2], shuf_msk2);
+ pkt_mb[4] = _mm_shuffle_epi8(desc[2], shuf_msk1);
+ pkt_mb[5] = _mm_add_epi16(pkt_mb[5], len_adjust);
+ pkt_mb[4] = _mm_add_epi16(pkt_mb[4], len_adjust);
+ _mm_storeu_si128((void *)&rx_pkts[5]->rx_descriptor_fields1,
+ pkt_mb[5]);
+ _mm_storeu_si128((void *)&rx_pkts[4]->rx_descriptor_fields1,
+ pkt_mb[4]);
+
+ pkt_mb[7] = _mm_shuffle_epi8(desc[3], shuf_msk2);
+ pkt_mb[6] = _mm_shuffle_epi8(desc[3], shuf_msk1);
+ pkt_mb[7] = _mm_add_epi16(pkt_mb[7], len_adjust);
+ pkt_mb[6] = _mm_add_epi16(pkt_mb[6], len_adjust);
+ _mm_storeu_si128((void *)&rx_pkts[7]->rx_descriptor_fields1,
+ pkt_mb[7]);
+ _mm_storeu_si128((void *)&rx_pkts[6]->rx_descriptor_fields1,
+ pkt_mb[6]);
+
+ if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
+ if (sw_ring + nb_used <= sw_ring_end)
+ nb_pkts_received += nb_used;
+ else
+ nb_pkts_received += sw_ring_end - sw_ring;
+ break;
+ } else {
+ if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
+ sw_ring_end)) {
+ nb_pkts_received += sw_ring_end - sw_ring;
+ break;
+ } else {
+ nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
+
+ rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
+ sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
+ rused += RTE_VIRTIO_DESC_PER_LOOP;
+ nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
+ }
+ }
+ }
+
+ vq->vq_used_cons_idx += nb_pkts_received;
+ vq->vq_free_cnt += nb_pkts_received;
+ rxvq->stats.packets += nb_pkts_received;
+ return nb_pkts_received;
+}
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost.h b/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost.h
new file mode 100644
index 00000000..668cc99f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#ifndef _VHOST_NET_USER_H
+#define _VHOST_NET_USER_H
+
+#include <stdint.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#include "../virtio_pci.h"
+#include "../virtio_logs.h"
+#include "../virtqueue.h"
+
+struct vhost_vring_state {
+ unsigned int index;
+ unsigned int num;
+};
+
+struct vhost_vring_file {
+ unsigned int index;
+ int fd;
+};
+
+struct vhost_vring_addr {
+ unsigned int index;
+ /* Option flags. */
+ unsigned int flags;
+ /* Flag values: */
+ /* Whether log address is valid. If set enables logging. */
+#define VHOST_VRING_F_LOG 0
+
+ /* Start of array of descriptors (virtually contiguous) */
+ uint64_t desc_user_addr;
+ /* Used structure address. Must be 32 bit aligned */
+ uint64_t used_user_addr;
+ /* Available structure address. Must be 16 bit aligned */
+ uint64_t avail_user_addr;
+ /* Logging support. */
+ /* Log writes to used structure, at offset calculated from specified
+ * address. Address must be 32 bit aligned.
+ */
+ uint64_t log_guest_addr;
+};
+
+enum vhost_user_request {
+ VHOST_USER_NONE = 0,
+ VHOST_USER_GET_FEATURES = 1,
+ VHOST_USER_SET_FEATURES = 2,
+ VHOST_USER_SET_OWNER = 3,
+ VHOST_USER_RESET_OWNER = 4,
+ VHOST_USER_SET_MEM_TABLE = 5,
+ VHOST_USER_SET_LOG_BASE = 6,
+ VHOST_USER_SET_LOG_FD = 7,
+ VHOST_USER_SET_VRING_NUM = 8,
+ VHOST_USER_SET_VRING_ADDR = 9,
+ VHOST_USER_SET_VRING_BASE = 10,
+ VHOST_USER_GET_VRING_BASE = 11,
+ VHOST_USER_SET_VRING_KICK = 12,
+ VHOST_USER_SET_VRING_CALL = 13,
+ VHOST_USER_SET_VRING_ERR = 14,
+ VHOST_USER_GET_PROTOCOL_FEATURES = 15,
+ VHOST_USER_SET_PROTOCOL_FEATURES = 16,
+ VHOST_USER_GET_QUEUE_NUM = 17,
+ VHOST_USER_SET_VRING_ENABLE = 18,
+ VHOST_USER_MAX
+};
+
+const char * const vhost_msg_strings[VHOST_USER_MAX];
+
+struct vhost_memory_region {
+ uint64_t guest_phys_addr;
+ uint64_t memory_size; /* bytes */
+ uint64_t userspace_addr;
+ uint64_t mmap_offset;
+};
+
+struct virtio_user_dev;
+
+struct virtio_user_backend_ops {
+ int (*setup)(struct virtio_user_dev *dev);
+ int (*send_request)(struct virtio_user_dev *dev,
+ enum vhost_user_request req,
+ void *arg);
+ int (*enable_qp)(struct virtio_user_dev *dev,
+ uint16_t pair_idx,
+ int enable);
+};
+
+struct virtio_user_backend_ops ops_user;
+struct virtio_user_backend_ops ops_kernel;
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel.c b/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel.c
new file mode 100644
index 00000000..b2444096
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel.c
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <rte_memory.h>
+#include <rte_eal_memconfig.h>
+
+#include "vhost.h"
+#include "virtio_user_dev.h"
+#include "vhost_kernel_tap.h"
+
+struct vhost_memory_kernel {
+ uint32_t nregions;
+ uint32_t padding;
+ struct vhost_memory_region regions[0];
+};
+
+/* vhost kernel ioctls */
+#define VHOST_VIRTIO 0xAF
+#define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
+#define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
+#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
+#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
+#define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, struct vhost_memory_kernel)
+#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
+#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
+#define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
+#define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
+#define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
+#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
+#define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
+#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
+#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
+#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
+
+static uint64_t max_regions = 64;
+
+static void
+get_vhost_kernel_max_regions(void)
+{
+ int fd;
+ char buf[20] = {'\0'};
+
+ fd = open("/sys/module/vhost/parameters/max_mem_regions", O_RDONLY);
+ if (fd < 0)
+ return;
+
+ if (read(fd, buf, sizeof(buf) - 1) > 0)
+ max_regions = strtoull(buf, NULL, 10);
+
+ close(fd);
+}
+
+static uint64_t vhost_req_user_to_kernel[] = {
+ [VHOST_USER_SET_OWNER] = VHOST_SET_OWNER,
+ [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
+ [VHOST_USER_SET_FEATURES] = VHOST_SET_FEATURES,
+ [VHOST_USER_GET_FEATURES] = VHOST_GET_FEATURES,
+ [VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
+ [VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM,
+ [VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE,
+ [VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
+ [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
+ [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
+ [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
+};
+
+struct walk_arg {
+ struct vhost_memory_kernel *vm;
+ uint32_t region_nr;
+};
+static int
+add_memory_region(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, size_t len, void *arg)
+{
+ struct walk_arg *wa = arg;
+ struct vhost_memory_region *mr;
+ void *start_addr;
+
+ if (wa->region_nr >= max_regions)
+ return -1;
+
+ mr = &wa->vm->regions[wa->region_nr++];
+ start_addr = ms->addr;
+
+ mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
+ mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
+ mr->memory_size = len;
+ mr->mmap_offset = 0;
+
+ return 0;
+}
+
+/* By default, vhost kernel module allows 64 regions, but DPDK allows
+ * 256 segments. As a relief, below function merges those virtually
+ * adjacent memsegs into one region.
+ */
+static struct vhost_memory_kernel *
+prepare_vhost_memory_kernel(void)
+{
+ struct vhost_memory_kernel *vm;
+ struct walk_arg wa;
+
+ vm = malloc(sizeof(struct vhost_memory_kernel) +
+ max_regions *
+ sizeof(struct vhost_memory_region));
+ if (!vm)
+ return NULL;
+
+ wa.region_nr = 0;
+ wa.vm = vm;
+
+ if (rte_memseg_contig_walk(add_memory_region, &wa) < 0) {
+ free(vm);
+ return NULL;
+ }
+
+ vm->nregions = wa.region_nr;
+ vm->padding = 0;
+ return vm;
+}
+
+/* with below features, vhost kernel does not need to do the checksum and TSO,
+ * these info will be passed to virtio_user through virtio net header.
+ */
+#define VHOST_KERNEL_GUEST_OFFLOADS_MASK \
+ ((1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
+ (1ULL << VIRTIO_NET_F_GUEST_UFO))
+
+/* with below features, when flows from virtio_user to vhost kernel
+ * (1) if flows goes up through the kernel networking stack, it does not need
+ * to verify checksum, which can save CPU cycles;
+ * (2) if flows goes through a Linux bridge and outside from an interface
+ * (kernel driver), checksum and TSO will be done by GSO in kernel or even
+ * offloaded into real physical device.
+ */
+#define VHOST_KERNEL_HOST_OFFLOADS_MASK \
+ ((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_CSUM))
+
+static int
+tap_supporte_mq(void)
+{
+ int tapfd;
+ unsigned int tap_features;
+
+ tapfd = open(PATH_NET_TUN, O_RDWR);
+ if (tapfd < 0) {
+ PMD_DRV_LOG(ERR, "fail to open %s: %s",
+ PATH_NET_TUN, strerror(errno));
+ return -1;
+ }
+
+ if (ioctl(tapfd, TUNGETFEATURES, &tap_features) == -1) {
+ PMD_DRV_LOG(ERR, "TUNGETFEATURES failed: %s", strerror(errno));
+ close(tapfd);
+ return -1;
+ }
+
+ close(tapfd);
+ return tap_features & IFF_MULTI_QUEUE;
+}
+
+static int
+vhost_kernel_ioctl(struct virtio_user_dev *dev,
+ enum vhost_user_request req,
+ void *arg)
+{
+ int ret = -1;
+ unsigned int i;
+ uint64_t req_kernel;
+ struct vhost_memory_kernel *vm = NULL;
+ int vhostfd;
+ unsigned int queue_sel;
+
+ PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
+
+ req_kernel = vhost_req_user_to_kernel[req];
+
+ if (req_kernel == VHOST_SET_MEM_TABLE) {
+ vm = prepare_vhost_memory_kernel();
+ if (!vm)
+ return -1;
+ arg = (void *)vm;
+ }
+
+ if (req_kernel == VHOST_SET_FEATURES) {
+ /* We don't need memory protection here */
+ *(uint64_t *)arg &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
+
+ /* VHOST kernel does not know about below flags */
+ *(uint64_t *)arg &= ~VHOST_KERNEL_GUEST_OFFLOADS_MASK;
+ *(uint64_t *)arg &= ~VHOST_KERNEL_HOST_OFFLOADS_MASK;
+
+ *(uint64_t *)arg &= ~(1ULL << VIRTIO_NET_F_MQ);
+ }
+
+ switch (req_kernel) {
+ case VHOST_SET_VRING_NUM:
+ case VHOST_SET_VRING_ADDR:
+ case VHOST_SET_VRING_BASE:
+ case VHOST_GET_VRING_BASE:
+ case VHOST_SET_VRING_KICK:
+ case VHOST_SET_VRING_CALL:
+ queue_sel = *(unsigned int *)arg;
+ vhostfd = dev->vhostfds[queue_sel / 2];
+ *(unsigned int *)arg = queue_sel % 2;
+ PMD_DRV_LOG(DEBUG, "vhostfd=%d, index=%u",
+ vhostfd, *(unsigned int *)arg);
+ break;
+ default:
+ vhostfd = -1;
+ }
+ if (vhostfd == -1) {
+ for (i = 0; i < dev->max_queue_pairs; ++i) {
+ if (dev->vhostfds[i] < 0)
+ continue;
+
+ ret = ioctl(dev->vhostfds[i], req_kernel, arg);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ ret = ioctl(vhostfd, req_kernel, arg);
+ }
+
+ if (!ret && req_kernel == VHOST_GET_FEATURES) {
+ /* with tap as the backend, all these features are supported
+ * but not claimed by vhost-net, so we add them back when
+ * reporting to upper layer.
+ */
+ *((uint64_t *)arg) |= VHOST_KERNEL_GUEST_OFFLOADS_MASK;
+ *((uint64_t *)arg) |= VHOST_KERNEL_HOST_OFFLOADS_MASK;
+
+ /* vhost_kernel will not declare this feature, but it does
+ * support multi-queue.
+ */
+ if (tap_supporte_mq())
+ *(uint64_t *)arg |= (1ull << VIRTIO_NET_F_MQ);
+ }
+
+ if (vm)
+ free(vm);
+
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "%s failed: %s",
+ vhost_msg_strings[req], strerror(errno));
+
+ return ret;
+}
+
+/**
+ * Set up environment to talk with a vhost kernel backend.
+ *
+ * @return
+ * - (-1) if fail to set up;
+ * - (>=0) if successful.
+ */
+static int
+vhost_kernel_setup(struct virtio_user_dev *dev)
+{
+ int vhostfd;
+ uint32_t i;
+
+ get_vhost_kernel_max_regions();
+
+ for (i = 0; i < dev->max_queue_pairs; ++i) {
+ vhostfd = open(dev->path, O_RDWR);
+ if (vhostfd < 0) {
+ PMD_DRV_LOG(ERR, "fail to open %s, %s",
+ dev->path, strerror(errno));
+ return -1;
+ }
+
+ dev->vhostfds[i] = vhostfd;
+ }
+
+ return 0;
+}
+
+static int
+vhost_kernel_set_backend(int vhostfd, int tapfd)
+{
+ struct vhost_vring_file f;
+
+ f.fd = tapfd;
+ f.index = 0;
+ if (ioctl(vhostfd, VHOST_NET_SET_BACKEND, &f) < 0) {
+ PMD_DRV_LOG(ERR, "VHOST_NET_SET_BACKEND fails, %s",
+ strerror(errno));
+ return -1;
+ }
+
+ f.index = 1;
+ if (ioctl(vhostfd, VHOST_NET_SET_BACKEND, &f) < 0) {
+ PMD_DRV_LOG(ERR, "VHOST_NET_SET_BACKEND fails, %s",
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
+ uint16_t pair_idx,
+ int enable)
+{
+ int hdr_size;
+ int vhostfd;
+ int tapfd;
+ int req_mq = (dev->max_queue_pairs > 1);
+
+ vhostfd = dev->vhostfds[pair_idx];
+
+ if (!enable) {
+ if (dev->tapfds[pair_idx] >= 0) {
+ close(dev->tapfds[pair_idx]);
+ dev->tapfds[pair_idx] = -1;
+ }
+ return vhost_kernel_set_backend(vhostfd, -1);
+ } else if (dev->tapfds[pair_idx] >= 0) {
+ return 0;
+ }
+
+ if ((dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF)) ||
+ (dev->features & (1ULL << VIRTIO_F_VERSION_1)))
+ hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ else
+ hdr_size = sizeof(struct virtio_net_hdr);
+
+ tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq,
+ (char *)dev->mac_addr);
+ if (tapfd < 0) {
+ PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel");
+ return -1;
+ }
+
+ if (vhost_kernel_set_backend(vhostfd, tapfd) < 0) {
+ PMD_DRV_LOG(ERR, "fail to set backend for vhost kernel");
+ close(tapfd);
+ return -1;
+ }
+
+ dev->tapfds[pair_idx] = tapfd;
+ return 0;
+}
+
+struct virtio_user_backend_ops ops_kernel = {
+ .setup = vhost_kernel_setup,
+ .send_request = vhost_kernel_ioctl,
+ .enable_qp = vhost_kernel_enable_queue_pair
+};
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
new file mode 100644
index 00000000..9ea7ade7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <errno.h>
+#include <string.h>
+#include <limits.h>
+
+#include <rte_ether.h>
+
+#include "vhost_kernel_tap.h"
+#include "../virtio_logs.h"
+
+int
+vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
+ const char *mac)
+{
+ unsigned int tap_features;
+ int sndbuf = INT_MAX;
+ struct ifreq ifr;
+ int tapfd;
+ unsigned int offload =
+ TUN_F_CSUM |
+ TUN_F_TSO4 |
+ TUN_F_TSO6 |
+ TUN_F_TSO_ECN |
+ TUN_F_UFO;
+
+ /* TODO:
+ * 1. verify we can get/set vnet_hdr_len, tap_probe_vnet_hdr_len
+ * 2. get number of memory regions from vhost module parameter
+ * max_mem_regions, supported in newer version linux kernel
+ */
+ tapfd = open(PATH_NET_TUN, O_RDWR);
+ if (tapfd < 0) {
+ PMD_DRV_LOG(ERR, "fail to open %s: %s",
+ PATH_NET_TUN, strerror(errno));
+ return -1;
+ }
+
+ /* Construct ifr */
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
+
+ if (ioctl(tapfd, TUNGETFEATURES, &tap_features) == -1) {
+ PMD_DRV_LOG(ERR, "TUNGETFEATURES failed: %s", strerror(errno));
+ goto error;
+ }
+ if (tap_features & IFF_ONE_QUEUE)
+ ifr.ifr_flags |= IFF_ONE_QUEUE;
+
+ /* Let tap instead of vhost-net handle vnet header, as the latter does
+ * not support offloading. And in this case, we should not set feature
+ * bit VHOST_NET_F_VIRTIO_NET_HDR.
+ */
+ if (tap_features & IFF_VNET_HDR) {
+ ifr.ifr_flags |= IFF_VNET_HDR;
+ } else {
+ PMD_DRV_LOG(ERR, "TAP does not support IFF_VNET_HDR");
+ goto error;
+ }
+
+ if (req_mq)
+ ifr.ifr_flags |= IFF_MULTI_QUEUE;
+
+ if (*p_ifname)
+ strncpy(ifr.ifr_name, *p_ifname, IFNAMSIZ - 1);
+ else
+ strncpy(ifr.ifr_name, "tap%d", IFNAMSIZ - 1);
+ if (ioctl(tapfd, TUNSETIFF, (void *)&ifr) == -1) {
+ PMD_DRV_LOG(ERR, "TUNSETIFF failed: %s", strerror(errno));
+ goto error;
+ }
+
+ fcntl(tapfd, F_SETFL, O_NONBLOCK);
+
+ if (ioctl(tapfd, TUNSETVNETHDRSZ, &hdr_size) < 0) {
+ PMD_DRV_LOG(ERR, "TUNSETVNETHDRSZ failed: %s", strerror(errno));
+ goto error;
+ }
+
+ if (ioctl(tapfd, TUNSETSNDBUF, &sndbuf) < 0) {
+ PMD_DRV_LOG(ERR, "TUNSETSNDBUF failed: %s", strerror(errno));
+ goto error;
+ }
+
+ /* TODO: before set the offload capabilities, we'd better (1) check
+ * negotiated features to see if necessary to offload; (2) query tap
+ * to see if it supports the offload capabilities.
+ */
+ if (ioctl(tapfd, TUNSETOFFLOAD, offload) != 0)
+ PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s",
+ strerror(errno));
+
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_hwaddr.sa_family = ARPHRD_ETHER;
+ memcpy(ifr.ifr_hwaddr.sa_data, mac, ETHER_ADDR_LEN);
+ if (ioctl(tapfd, SIOCSIFHWADDR, (void *)&ifr) == -1) {
+ PMD_DRV_LOG(ERR, "SIOCSIFHWADDR failed: %s", strerror(errno));
+ goto error;
+ }
+
+ if (!(*p_ifname))
+ *p_ifname = strdup(ifr.ifr_name);
+
+ return tapfd;
+error:
+ close(tapfd);
+ return -1;
+}
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.h b/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
new file mode 100644
index 00000000..01a026f5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#include <sys/ioctl.h>
+
+/* TUN ioctls */
+#define TUNSETIFF _IOW('T', 202, int)
+#define TUNGETFEATURES _IOR('T', 207, unsigned int)
+#define TUNSETOFFLOAD _IOW('T', 208, unsigned int)
+#define TUNGETIFF _IOR('T', 210, unsigned int)
+#define TUNSETSNDBUF _IOW('T', 212, int)
+#define TUNGETVNETHDRSZ _IOR('T', 215, int)
+#define TUNSETVNETHDRSZ _IOW('T', 216, int)
+#define TUNSETQUEUE _IOW('T', 217, int)
+#define TUNSETVNETLE _IOW('T', 220, int)
+#define TUNSETVNETBE _IOW('T', 222, int)
+
+/* TUNSETIFF ifr flags */
+#define IFF_TAP 0x0002
+#define IFF_NO_PI 0x1000
+#define IFF_ONE_QUEUE 0x2000
+#define IFF_VNET_HDR 0x4000
+#define IFF_MULTI_QUEUE 0x0100
+#define IFF_ATTACH_QUEUE 0x0200
+#define IFF_DETACH_QUEUE 0x0400
+
+/* Features for GSO (TUNSETOFFLOAD). */
+#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */
+#define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */
+#define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */
+#define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */
+#define TUN_F_UFO 0x10 /* I can handle UFO packets */
+
+/* Constants */
+#define PATH_NET_TUN "/dev/net/tun"
+
+int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
+ const char *mac);
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_user.c b/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_user.c
new file mode 100644
index 00000000..ef6e43df
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_user.c
@@ -0,0 +1,504 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/un.h>
+#include <string.h>
+#include <errno.h>
+
+#include "vhost.h"
+#include "virtio_user_dev.h"
+
+/* The version of the protocol we support */
+#define VHOST_USER_VERSION 0x1
+
+#define VHOST_MEMORY_MAX_NREGIONS 8
+struct vhost_memory {
+ uint32_t nregions;
+ uint32_t padding;
+ struct vhost_memory_region regions[VHOST_MEMORY_MAX_NREGIONS];
+};
+
+struct vhost_user_msg {
+ enum vhost_user_request request;
+
+#define VHOST_USER_VERSION_MASK 0x3
+#define VHOST_USER_REPLY_MASK (0x1 << 2)
+ uint32_t flags;
+ uint32_t size; /* the following payload size */
+ union {
+#define VHOST_USER_VRING_IDX_MASK 0xff
+#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
+ uint64_t u64;
+ struct vhost_vring_state state;
+ struct vhost_vring_addr addr;
+ struct vhost_memory memory;
+ } payload;
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+} __attribute((packed));
+
+#define VHOST_USER_HDR_SIZE offsetof(struct vhost_user_msg, payload.u64)
+#define VHOST_USER_PAYLOAD_SIZE \
+ (sizeof(struct vhost_user_msg) - VHOST_USER_HDR_SIZE)
+
+static int
+vhost_user_write(int fd, void *buf, int len, int *fds, int fd_num)
+{
+ int r;
+ struct msghdr msgh;
+ struct iovec iov;
+ size_t fd_size = fd_num * sizeof(int);
+ char control[CMSG_SPACE(fd_size)];
+ struct cmsghdr *cmsg;
+
+ memset(&msgh, 0, sizeof(msgh));
+ memset(control, 0, sizeof(control));
+
+ iov.iov_base = (uint8_t *)buf;
+ iov.iov_len = len;
+
+ msgh.msg_iov = &iov;
+ msgh.msg_iovlen = 1;
+ msgh.msg_control = control;
+ msgh.msg_controllen = sizeof(control);
+
+ cmsg = CMSG_FIRSTHDR(&msgh);
+ cmsg->cmsg_len = CMSG_LEN(fd_size);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ memcpy(CMSG_DATA(cmsg), fds, fd_size);
+
+ do {
+ r = sendmsg(fd, &msgh, 0);
+ } while (r < 0 && errno == EINTR);
+
+ return r;
+}
+
+static int
+vhost_user_read(int fd, struct vhost_user_msg *msg)
+{
+ uint32_t valid_flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION;
+ int ret, sz_hdr = VHOST_USER_HDR_SIZE, sz_payload;
+
+ ret = recv(fd, (void *)msg, sz_hdr, 0);
+ if (ret < sz_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to recv msg hdr: %d instead of %d.",
+ ret, sz_hdr);
+ goto fail;
+ }
+
+ /* validate msg flags */
+ if (msg->flags != (valid_flags)) {
+ PMD_DRV_LOG(ERR, "Failed to recv msg: flags %x instead of %x.",
+ msg->flags, valid_flags);
+ goto fail;
+ }
+
+ sz_payload = msg->size;
+
+ if ((size_t)sz_payload > sizeof(msg->payload))
+ goto fail;
+
+ if (sz_payload) {
+ ret = recv(fd, (void *)((char *)msg + sz_hdr), sz_payload, 0);
+ if (ret < sz_payload) {
+ PMD_DRV_LOG(ERR,
+ "Failed to recv msg payload: %d instead of %d.",
+ ret, msg->size);
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ return -1;
+}
+
+struct hugepage_file_info {
+ uint64_t addr; /**< virtual addr */
+ size_t size; /**< the file size */
+ char path[PATH_MAX]; /**< path to backing file */
+};
+
+/* Two possible options:
+ * 1. Match HUGEPAGE_INFO_FMT to find the file storing struct hugepage_file
+ * array. This is simple but cannot be used in secondary process because
+ * secondary process will close and munmap that file.
+ * 2. Match HUGEFILE_FMT to find hugepage files directly.
+ *
+ * We choose option 2.
+ */
+static int
+get_hugepage_file_info(struct hugepage_file_info huges[], int max)
+{
+ int idx, k, exist;
+ FILE *f;
+ char buf[BUFSIZ], *tmp, *tail;
+ char *str_underline, *str_start;
+ int huge_index;
+ uint64_t v_start, v_end;
+ struct stat stats;
+
+ f = fopen("/proc/self/maps", "r");
+ if (!f) {
+ PMD_DRV_LOG(ERR, "cannot open /proc/self/maps");
+ return -1;
+ }
+
+ idx = 0;
+ while (fgets(buf, sizeof(buf), f) != NULL) {
+ if (sscanf(buf, "%" PRIx64 "-%" PRIx64, &v_start, &v_end) < 2) {
+ PMD_DRV_LOG(ERR, "Failed to parse address");
+ goto error;
+ }
+
+ tmp = strchr(buf, ' ') + 1; /** skip address */
+ tmp = strchr(tmp, ' ') + 1; /** skip perm */
+ tmp = strchr(tmp, ' ') + 1; /** skip offset */
+ tmp = strchr(tmp, ' ') + 1; /** skip dev */
+ tmp = strchr(tmp, ' ') + 1; /** skip inode */
+ while (*tmp == ' ') /** skip spaces */
+ tmp++;
+ tail = strrchr(tmp, '\n'); /** remove newline if exists */
+ if (tail)
+ *tail = '\0';
+
+ /* Match HUGEFILE_FMT, aka "%s/%smap_%d",
+ * which is defined in eal_filesystem.h
+ */
+ str_underline = strrchr(tmp, '_');
+ if (!str_underline)
+ continue;
+
+ str_start = str_underline - strlen("map");
+ if (str_start < tmp)
+ continue;
+
+ if (sscanf(str_start, "map_%d", &huge_index) != 1)
+ continue;
+
+ /* skip duplicated file which is mapped to different regions */
+ for (k = 0, exist = -1; k < idx; ++k) {
+ if (!strcmp(huges[k].path, tmp)) {
+ exist = k;
+ break;
+ }
+ }
+ if (exist >= 0)
+ continue;
+
+ if (idx >= max) {
+ PMD_DRV_LOG(ERR, "Exceed maximum of %d", max);
+ goto error;
+ }
+
+ huges[idx].addr = v_start;
+ huges[idx].size = v_end - v_start; /* To be corrected later */
+ snprintf(huges[idx].path, PATH_MAX, "%s", tmp);
+ idx++;
+ }
+
+ /* correct the size for files who have many regions */
+ for (k = 0; k < idx; ++k) {
+ if (stat(huges[k].path, &stats) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to stat %s, %s\n",
+ huges[k].path, strerror(errno));
+ continue;
+ }
+ huges[k].size = stats.st_size;
+ PMD_DRV_LOG(INFO, "file %s, size %zx\n",
+ huges[k].path, huges[k].size);
+ }
+
+ fclose(f);
+ return idx;
+
+error:
+ fclose(f);
+ return -1;
+}
+
+static int
+prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[])
+{
+ int i, num;
+ struct hugepage_file_info huges[VHOST_MEMORY_MAX_NREGIONS];
+ struct vhost_memory_region *mr;
+
+ num = get_hugepage_file_info(huges, VHOST_MEMORY_MAX_NREGIONS);
+ if (num < 0) {
+ PMD_INIT_LOG(ERR, "Failed to prepare memory for vhost-user");
+ return -1;
+ }
+
+ for (i = 0; i < num; ++i) {
+ mr = &msg->payload.memory.regions[i];
+ mr->guest_phys_addr = huges[i].addr; /* use vaddr! */
+ mr->userspace_addr = huges[i].addr;
+ mr->memory_size = huges[i].size;
+ mr->mmap_offset = 0;
+ fds[i] = open(huges[i].path, O_RDWR);
+ }
+
+ msg->payload.memory.nregions = num;
+ msg->payload.memory.padding = 0;
+
+ return 0;
+}
+
+static struct vhost_user_msg m;
+
+const char * const vhost_msg_strings[] = {
+ [VHOST_USER_SET_OWNER] = "VHOST_SET_OWNER",
+ [VHOST_USER_RESET_OWNER] = "VHOST_RESET_OWNER",
+ [VHOST_USER_SET_FEATURES] = "VHOST_SET_FEATURES",
+ [VHOST_USER_GET_FEATURES] = "VHOST_GET_FEATURES",
+ [VHOST_USER_SET_VRING_CALL] = "VHOST_SET_VRING_CALL",
+ [VHOST_USER_SET_VRING_NUM] = "VHOST_SET_VRING_NUM",
+ [VHOST_USER_SET_VRING_BASE] = "VHOST_SET_VRING_BASE",
+ [VHOST_USER_GET_VRING_BASE] = "VHOST_GET_VRING_BASE",
+ [VHOST_USER_SET_VRING_ADDR] = "VHOST_SET_VRING_ADDR",
+ [VHOST_USER_SET_VRING_KICK] = "VHOST_SET_VRING_KICK",
+ [VHOST_USER_SET_MEM_TABLE] = "VHOST_SET_MEM_TABLE",
+ [VHOST_USER_SET_VRING_ENABLE] = "VHOST_SET_VRING_ENABLE",
+};
+
+static int
+vhost_user_sock(struct virtio_user_dev *dev,
+ enum vhost_user_request req,
+ void *arg)
+{
+ struct vhost_user_msg msg;
+ struct vhost_vring_file *file = 0;
+ int need_reply = 0;
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+ int fd_num = 0;
+ int i, len;
+ int vhostfd = dev->vhostfd;
+
+ RTE_SET_USED(m);
+
+ PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
+
+ if (dev->is_server && vhostfd < 0)
+ return -1;
+
+ msg.request = req;
+ msg.flags = VHOST_USER_VERSION;
+ msg.size = 0;
+
+ switch (req) {
+ case VHOST_USER_GET_FEATURES:
+ need_reply = 1;
+ break;
+
+ case VHOST_USER_SET_FEATURES:
+ case VHOST_USER_SET_LOG_BASE:
+ msg.payload.u64 = *((__u64 *)arg);
+ msg.size = sizeof(m.payload.u64);
+ break;
+
+ case VHOST_USER_SET_OWNER:
+ case VHOST_USER_RESET_OWNER:
+ break;
+
+ case VHOST_USER_SET_MEM_TABLE:
+ if (prepare_vhost_memory_user(&msg, fds) < 0)
+ return -1;
+ fd_num = msg.payload.memory.nregions;
+ msg.size = sizeof(m.payload.memory.nregions);
+ msg.size += sizeof(m.payload.memory.padding);
+ msg.size += fd_num * sizeof(struct vhost_memory_region);
+ break;
+
+ case VHOST_USER_SET_LOG_FD:
+ fds[fd_num++] = *((int *)arg);
+ break;
+
+ case VHOST_USER_SET_VRING_NUM:
+ case VHOST_USER_SET_VRING_BASE:
+ case VHOST_USER_SET_VRING_ENABLE:
+ memcpy(&msg.payload.state, arg, sizeof(msg.payload.state));
+ msg.size = sizeof(m.payload.state);
+ break;
+
+ case VHOST_USER_GET_VRING_BASE:
+ memcpy(&msg.payload.state, arg, sizeof(msg.payload.state));
+ msg.size = sizeof(m.payload.state);
+ need_reply = 1;
+ break;
+
+ case VHOST_USER_SET_VRING_ADDR:
+ memcpy(&msg.payload.addr, arg, sizeof(msg.payload.addr));
+ msg.size = sizeof(m.payload.addr);
+ break;
+
+ case VHOST_USER_SET_VRING_KICK:
+ case VHOST_USER_SET_VRING_CALL:
+ case VHOST_USER_SET_VRING_ERR:
+ file = arg;
+ msg.payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
+ msg.size = sizeof(m.payload.u64);
+ if (file->fd > 0)
+ fds[fd_num++] = file->fd;
+ else
+ msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "trying to send unhandled msg type");
+ return -1;
+ }
+
+ len = VHOST_USER_HDR_SIZE + msg.size;
+ if (vhost_user_write(vhostfd, &msg, len, fds, fd_num) < 0) {
+ PMD_DRV_LOG(ERR, "%s failed: %s",
+ vhost_msg_strings[req], strerror(errno));
+ return -1;
+ }
+
+ if (req == VHOST_USER_SET_MEM_TABLE)
+ for (i = 0; i < fd_num; ++i)
+ close(fds[i]);
+
+ if (need_reply) {
+ if (vhost_user_read(vhostfd, &msg) < 0) {
+ PMD_DRV_LOG(ERR, "Received msg failed: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ if (req != msg.request) {
+ PMD_DRV_LOG(ERR, "Received unexpected msg type");
+ return -1;
+ }
+
+ switch (req) {
+ case VHOST_USER_GET_FEATURES:
+ if (msg.size != sizeof(m.payload.u64)) {
+ PMD_DRV_LOG(ERR, "Received bad msg size");
+ return -1;
+ }
+ *((__u64 *)arg) = msg.payload.u64;
+ break;
+ case VHOST_USER_GET_VRING_BASE:
+ if (msg.size != sizeof(m.payload.state)) {
+ PMD_DRV_LOG(ERR, "Received bad msg size");
+ return -1;
+ }
+ memcpy(arg, &msg.payload.state,
+ sizeof(struct vhost_vring_state));
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Received unexpected msg type");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+#define MAX_VIRTIO_USER_BACKLOG 1
+static int
+virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un)
+{
+ int ret;
+ int flag;
+ int fd = dev->listenfd;
+
+ ret = bind(fd, (struct sockaddr *)un, sizeof(*un));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to bind to %s: %s; remove it and try again\n",
+ dev->path, strerror(errno));
+ return -1;
+ }
+ ret = listen(fd, MAX_VIRTIO_USER_BACKLOG);
+ if (ret < 0)
+ return -1;
+
+ flag = fcntl(fd, F_GETFL);
+ fcntl(fd, F_SETFL, flag | O_NONBLOCK);
+
+ return 0;
+}
+
+/**
+ * Set up environment to talk with a vhost user backend.
+ *
+ * @return
+ * - (-1) if fail;
+ * - (0) if succeed.
+ */
+static int
+vhost_user_setup(struct virtio_user_dev *dev)
+{
+ int fd;
+ int flag;
+ struct sockaddr_un un;
+
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (fd < 0) {
+ PMD_DRV_LOG(ERR, "socket() error, %s", strerror(errno));
+ return -1;
+ }
+
+ flag = fcntl(fd, F_GETFD);
+ if (fcntl(fd, F_SETFD, flag | FD_CLOEXEC) < 0)
+ PMD_DRV_LOG(WARNING, "fcntl failed, %s", strerror(errno));
+
+ memset(&un, 0, sizeof(un));
+ un.sun_family = AF_UNIX;
+ snprintf(un.sun_path, sizeof(un.sun_path), "%s", dev->path);
+
+ if (dev->is_server) {
+ dev->listenfd = fd;
+ if (virtio_user_start_server(dev, &un) < 0) {
+ PMD_DRV_LOG(ERR, "virtio-user startup fails in server mode");
+ close(fd);
+ return -1;
+ }
+ dev->vhostfd = -1;
+ } else {
+ if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
+ PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
+ close(fd);
+ return -1;
+ }
+ dev->vhostfd = fd;
+ }
+
+ return 0;
+}
+
+static int
+vhost_user_enable_queue_pair(struct virtio_user_dev *dev,
+ uint16_t pair_idx,
+ int enable)
+{
+ int i;
+
+ for (i = 0; i < 2; ++i) {
+ struct vhost_vring_state state = {
+ .index = pair_idx * 2 + i,
+ .num = enable,
+ };
+
+ if (vhost_user_sock(dev, VHOST_USER_SET_VRING_ENABLE, &state))
+ return -1;
+ }
+
+ return 0;
+}
+
+struct virtio_user_backend_ops ops_user = {
+ .setup = vhost_user_setup,
+ .send_request = vhost_user_sock,
+ .enable_qp = vhost_user_enable_queue_pair
+};
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c b/src/spdk/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c
new file mode 100644
index 00000000..7df600b0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -0,0 +1,606 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <sys/eventfd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "vhost.h"
+#include "virtio_user_dev.h"
+#include "../virtio_ethdev.h"
+
+#define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
+
+static int
+virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+ /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
+ * firstly because vhost depends on this msg to allocate virtqueue
+ * pair.
+ */
+ struct vhost_vring_file file;
+
+ file.index = queue_sel;
+ file.fd = dev->callfds[queue_sel];
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file);
+
+ return 0;
+}
+
+static int
+virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+ struct vhost_vring_file file;
+ struct vhost_vring_state state;
+ struct vring *vring = &dev->vrings[queue_sel];
+ struct vhost_vring_addr addr = {
+ .index = queue_sel,
+ .desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
+ .avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
+ .used_user_addr = (uint64_t)(uintptr_t)vring->used,
+ .log_guest_addr = 0,
+ .flags = 0, /* disable log */
+ };
+
+ state.index = queue_sel;
+ state.num = vring->num;
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
+
+ state.index = queue_sel;
+ state.num = 0; /* no reservation */
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
+
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
+
+ /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
+ * lastly because vhost depends on this msg to judge if
+ * virtio is ready.
+ */
+ file.index = queue_sel;
+ file.fd = dev->kickfds[queue_sel];
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file);
+
+ return 0;
+}
+
+static int
+virtio_user_queue_setup(struct virtio_user_dev *dev,
+ int (*fn)(struct virtio_user_dev *, uint32_t))
+{
+ uint32_t i, queue_sel;
+
+ for (i = 0; i < dev->max_queue_pairs; ++i) {
+ queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
+ if (fn(dev, queue_sel) < 0) {
+ PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i);
+ return -1;
+ }
+ }
+ for (i = 0; i < dev->max_queue_pairs; ++i) {
+ queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
+ if (fn(dev, queue_sel) < 0) {
+ PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int
+is_vhost_user_by_type(const char *path)
+{
+ struct stat sb;
+
+ if (stat(path, &sb) == -1)
+ return 0;
+
+ return S_ISSOCK(sb.st_mode);
+}
+
+int
+virtio_user_start_device(struct virtio_user_dev *dev)
+{
+ uint64_t features;
+ int ret;
+
+ pthread_mutex_lock(&dev->mutex);
+
+ if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
+ goto error;
+
+ /* Do not check return as already done in init, or reset in stop */
+ dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL);
+
+ /* Step 0: tell vhost to create queues */
+ if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
+ goto error;
+
+ /* Step 1: set features */
+ features = dev->features;
+ /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
+ features &= ~(1ull << VIRTIO_NET_F_MAC);
+ /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
+ features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
+ features &= ~(1ull << VIRTIO_NET_F_STATUS);
+ ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
+ if (ret < 0)
+ goto error;
+ PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
+
+ /* Step 2: share memory regions */
+ ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
+ if (ret < 0)
+ goto error;
+
+ /* Step 3: kick queues */
+ if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0)
+ goto error;
+
+ /* Step 4: enable queues
+ * we enable the 1st queue pair by default.
+ */
+ dev->ops->enable_qp(dev, 0, 1);
+
+ dev->started = true;
+ pthread_mutex_unlock(&dev->mutex);
+
+ return 0;
+error:
+ pthread_mutex_unlock(&dev->mutex);
+ /* TODO: free resource here or caller to check */
+ return -1;
+}
+
+int virtio_user_stop_device(struct virtio_user_dev *dev)
+{
+ uint32_t i;
+
+ pthread_mutex_lock(&dev->mutex);
+ for (i = 0; i < dev->max_queue_pairs; ++i)
+ dev->ops->enable_qp(dev, i, 0);
+
+ if (dev->ops->send_request(dev, VHOST_USER_RESET_OWNER, NULL) < 0) {
+ PMD_DRV_LOG(INFO, "Failed to reset the device\n");
+ pthread_mutex_unlock(&dev->mutex);
+ return -1;
+ }
+ dev->started = false;
+ pthread_mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static inline void
+parse_mac(struct virtio_user_dev *dev, const char *mac)
+{
+ int i, r;
+ uint32_t tmp[ETHER_ADDR_LEN];
+
+ if (!mac)
+ return;
+
+ r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0],
+ &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]);
+ if (r == ETHER_ADDR_LEN) {
+ for (i = 0; i < ETHER_ADDR_LEN; ++i)
+ dev->mac_addr[i] = (uint8_t)tmp[i];
+ dev->mac_specified = 1;
+ } else {
+ /* ignore the wrong mac, use random mac */
+ PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
+ }
+}
+
+static int
+virtio_user_dev_init_notify(struct virtio_user_dev *dev)
+{
+ uint32_t i, j;
+ int callfd;
+ int kickfd;
+
+ for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
+ if (i >= dev->max_queue_pairs * 2) {
+ dev->kickfds[i] = -1;
+ dev->callfds[i] = -1;
+ continue;
+ }
+
+ /* May use invalid flag, but some backend uses kickfd and
+ * callfd as criteria to judge if dev is alive. so finally we
+ * use real event_fd.
+ */
+ callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (callfd < 0) {
+ PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
+ break;
+ }
+ kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (kickfd < 0) {
+ PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
+ break;
+ }
+ dev->callfds[i] = callfd;
+ dev->kickfds[i] = kickfd;
+ }
+
+ if (i < VIRTIO_MAX_VIRTQUEUES) {
+ for (j = 0; j <= i; ++j) {
+ close(dev->callfds[j]);
+ close(dev->kickfds[j]);
+ }
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
+{
+ uint32_t i;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+
+ if (!eth_dev->intr_handle) {
+ eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
+ if (!eth_dev->intr_handle) {
+ PMD_DRV_LOG(ERR, "fail to allocate intr_handle");
+ return -1;
+ }
+ memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
+ }
+
+ for (i = 0; i < dev->max_queue_pairs; ++i)
+ eth_dev->intr_handle->efds[i] = dev->callfds[i];
+ eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
+ eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
+ eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
+ /* For virtio vdev, no need to read counter for clean */
+ eth_dev->intr_handle->efd_counter_size = 0;
+ eth_dev->intr_handle->fd = -1;
+ if (dev->vhostfd >= 0)
+ eth_dev->intr_handle->fd = dev->vhostfd;
+ else if (dev->is_server)
+ eth_dev->intr_handle->fd = dev->listenfd;
+
+ return 0;
+}
+
+static void
+virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
+ const void *addr __rte_unused,
+ size_t len __rte_unused,
+ void *arg)
+{
+ struct virtio_user_dev *dev = arg;
+ uint16_t i;
+
+ pthread_mutex_lock(&dev->mutex);
+
+ if (dev->started == false)
+ goto exit;
+
+ /* Step 1: pause the active queues */
+ for (i = 0; i < dev->queue_pairs; i++)
+ dev->ops->enable_qp(dev, i, 0);
+
+ /* Step 2: update memory regions */
+ dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
+
+ /* Step 3: resume the active queues */
+ for (i = 0; i < dev->queue_pairs; i++)
+ dev->ops->enable_qp(dev, i, 1);
+
+exit:
+ pthread_mutex_unlock(&dev->mutex);
+}
+
+static int
+virtio_user_dev_setup(struct virtio_user_dev *dev)
+{
+ uint32_t q;
+
+ dev->vhostfd = -1;
+ dev->vhostfds = NULL;
+ dev->tapfds = NULL;
+
+ if (dev->is_server) {
+ if (access(dev->path, F_OK) == 0 &&
+ !is_vhost_user_by_type(dev->path)) {
+ PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!");
+ return -1;
+ }
+ dev->ops = &ops_user;
+ } else {
+ if (is_vhost_user_by_type(dev->path)) {
+ dev->ops = &ops_user;
+ } else {
+ dev->ops = &ops_kernel;
+
+ dev->vhostfds = malloc(dev->max_queue_pairs *
+ sizeof(int));
+ dev->tapfds = malloc(dev->max_queue_pairs *
+ sizeof(int));
+ if (!dev->vhostfds || !dev->tapfds) {
+ PMD_INIT_LOG(ERR, "Failed to malloc");
+ return -1;
+ }
+
+ for (q = 0; q < dev->max_queue_pairs; ++q) {
+ dev->vhostfds[q] = -1;
+ dev->tapfds[q] = -1;
+ }
+ }
+ }
+
+ if (dev->ops->setup(dev) < 0)
+ return -1;
+
+ if (virtio_user_dev_init_notify(dev) < 0)
+ return -1;
+
+ if (virtio_user_fill_intr_handle(dev) < 0)
+ return -1;
+
+ return 0;
+}
+
+/* Use below macro to filter features from vhost backend */
+#define VIRTIO_USER_SUPPORTED_FEATURES \
+ (1ULL << VIRTIO_NET_F_MAC | \
+ 1ULL << VIRTIO_NET_F_STATUS | \
+ 1ULL << VIRTIO_NET_F_MQ | \
+ 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \
+ 1ULL << VIRTIO_NET_F_CTRL_VQ | \
+ 1ULL << VIRTIO_NET_F_CTRL_RX | \
+ 1ULL << VIRTIO_NET_F_CTRL_VLAN | \
+ 1ULL << VIRTIO_NET_F_CSUM | \
+ 1ULL << VIRTIO_NET_F_HOST_TSO4 | \
+ 1ULL << VIRTIO_NET_F_HOST_TSO6 | \
+ 1ULL << VIRTIO_NET_F_MRG_RXBUF | \
+ 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \
+ 1ULL << VIRTIO_NET_F_GUEST_CSUM | \
+ 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
+ 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
+ 1ULL << VIRTIO_F_IN_ORDER | \
+ 1ULL << VIRTIO_F_VERSION_1)
+
+int
+virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
+ int cq, int queue_size, const char *mac, char **ifname,
+ int mrg_rxbuf, int in_order)
+{
+ pthread_mutex_init(&dev->mutex, NULL);
+ snprintf(dev->path, PATH_MAX, "%s", path);
+ dev->started = 0;
+ dev->max_queue_pairs = queues;
+ dev->queue_pairs = 1; /* mq disabled by default */
+ dev->queue_size = queue_size;
+ dev->mac_specified = 0;
+ dev->unsupported_features = 0;
+ parse_mac(dev, mac);
+
+ if (*ifname) {
+ dev->ifname = *ifname;
+ *ifname = NULL;
+ }
+
+ if (virtio_user_dev_setup(dev) < 0) {
+ PMD_INIT_LOG(ERR, "backend set up fails");
+ return -1;
+ }
+
+ if (!dev->is_server) {
+ if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
+ NULL) < 0) {
+ PMD_INIT_LOG(ERR, "set_owner fails: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
+ &dev->device_features) < 0) {
+ PMD_INIT_LOG(ERR, "get_features failed: %s",
+ strerror(errno));
+ return -1;
+ }
+ } else {
+ /* We just pretend vhost-user can support all these features.
+ * Note that this could be problematic that if some feature is
+ * negotiated but not supported by the vhost-user which comes
+ * later.
+ */
+ dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
+ }
+
+ if (!mrg_rxbuf) {
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MRG_RXBUF);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
+ }
+
+ if (!in_order) {
+ dev->device_features &= ~(1ull << VIRTIO_F_IN_ORDER);
+ dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
+ }
+
+ if (dev->mac_specified) {
+ dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
+ } else {
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
+ }
+
+ if (cq) {
+ /* device does not really need to know anything about CQ,
+ * so if necessary, we just claim to support CQ
+ */
+ dev->device_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+ } else {
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
+ /* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_RX);
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ);
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
+ dev->unsupported_features |=
+ (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
+ dev->unsupported_features |=
+ (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
+ }
+
+ /* The backend will not report this feature, we add it explicitly */
+ if (is_vhost_user_by_type(dev->path))
+ dev->device_features |= (1ull << VIRTIO_NET_F_STATUS);
+
+ dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
+ dev->unsupported_features |= ~VIRTIO_USER_SUPPORTED_FEATURES;
+
+ if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
+ virtio_user_mem_event_cb, dev)) {
+ if (rte_errno != ENOTSUP) {
+ PMD_INIT_LOG(ERR, "Failed to register mem event"
+ " callback\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+void
+virtio_user_dev_uninit(struct virtio_user_dev *dev)
+{
+ uint32_t i;
+
+ virtio_user_stop_device(dev);
+
+ rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
+
+ for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
+ close(dev->callfds[i]);
+ close(dev->kickfds[i]);
+ }
+
+ close(dev->vhostfd);
+
+ if (dev->is_server && dev->listenfd >= 0) {
+ close(dev->listenfd);
+ dev->listenfd = -1;
+ }
+
+ if (dev->vhostfds) {
+ for (i = 0; i < dev->max_queue_pairs; ++i)
+ close(dev->vhostfds[i]);
+ free(dev->vhostfds);
+ free(dev->tapfds);
+ }
+
+ free(dev->ifname);
+
+ if (dev->is_server)
+ unlink(dev->path);
+}
+
+uint8_t
+virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
+{
+ uint16_t i;
+ uint8_t ret = 0;
+
+ if (q_pairs > dev->max_queue_pairs) {
+ PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported",
+ q_pairs, dev->max_queue_pairs);
+ return -1;
+ }
+
+ /* Server mode can't enable queue pairs if vhostfd is invalid,
+ * always return 0 in this case.
+ */
+ if (dev->vhostfd >= 0) {
+ for (i = 0; i < q_pairs; ++i)
+ ret |= dev->ops->enable_qp(dev, i, 1);
+ for (i = q_pairs; i < dev->max_queue_pairs; ++i)
+ ret |= dev->ops->enable_qp(dev, i, 0);
+ } else if (!dev->is_server) {
+ ret = ~0;
+ }
+ dev->queue_pairs = q_pairs;
+
+ return ret;
+}
+
+static uint32_t
+virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
+ uint16_t idx_hdr)
+{
+ struct virtio_net_ctrl_hdr *hdr;
+ virtio_net_ctrl_ack status = ~0;
+ uint16_t i, idx_data, idx_status;
+ uint32_t n_descs = 0;
+
+ /* locate desc for header, data, and status */
+ idx_data = vring->desc[idx_hdr].next;
+ n_descs++;
+
+ i = idx_data;
+ while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
+ i = vring->desc[i].next;
+ n_descs++;
+ }
+
+ /* locate desc for status */
+ idx_status = i;
+ n_descs++;
+
+ hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
+ if (hdr->class == VIRTIO_NET_CTRL_MQ &&
+ hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ uint16_t queues;
+
+ queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
+ status = virtio_user_handle_mq(dev, queues);
+ }
+
+ /* Update status */
+ *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
+
+ return n_descs;
+}
+
+void
+virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
+{
+ uint16_t avail_idx, desc_idx;
+ struct vring_used_elem *uep;
+ uint32_t n_descs;
+ struct vring *vring = &dev->vrings[queue_idx];
+
+ /* Consume avail ring, using used ring idx as first one */
+ while (vring->used->idx != vring->avail->idx) {
+ avail_idx = (vring->used->idx) & (vring->num - 1);
+ desc_idx = vring->avail->ring[avail_idx];
+
+ n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
+
+ /* Update used ring */
+ uep = &vring->used->ring[avail_idx];
+ uep->id = avail_idx;
+ uep->len = n_descs;
+
+ vring->used->idx++;
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h b/src/spdk/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h
new file mode 100644
index 00000000..d6e0e137
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#ifndef _VIRTIO_USER_DEV_H
+#define _VIRTIO_USER_DEV_H
+
+#include <limits.h>
+#include <stdbool.h>
+#include "../virtio_pci.h"
+#include "../virtio_ring.h"
+#include "vhost.h"
+
+struct virtio_user_dev {
+ /* for vhost_user backend */
+ int vhostfd;
+ int listenfd; /* listening fd */
+ bool is_server; /* server or client mode */
+
+ /* for vhost_kernel backend */
+ char *ifname;
+ int *vhostfds;
+ int *tapfds;
+
+ /* for both vhost_user and vhost_kernel */
+ int callfds[VIRTIO_MAX_VIRTQUEUES];
+ int kickfds[VIRTIO_MAX_VIRTQUEUES];
+ int mac_specified;
+ uint32_t max_queue_pairs;
+ uint32_t queue_pairs;
+ uint32_t queue_size;
+ uint64_t features; /* the negotiated features with driver,
+ * and will be sync with device
+ */
+ uint64_t device_features; /* supported features by device */
+ uint64_t unsupported_features; /* unsupported features mask */
+ uint8_t status;
+ uint16_t port_id;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ char path[PATH_MAX];
+ struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
+ struct virtio_user_backend_ops *ops;
+ pthread_mutex_t mutex;
+ bool started;
+};
+
+int is_vhost_user_by_type(const char *path);
+int virtio_user_start_device(struct virtio_user_dev *dev);
+int virtio_user_stop_device(struct virtio_user_dev *dev);
+int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
+ int cq, int queue_size, const char *mac, char **ifname,
+ int mrg_rxbuf, int in_order);
+void virtio_user_dev_uninit(struct virtio_user_dev *dev);
+void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
+uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtio_user_ethdev.c b/src/spdk/dpdk/drivers/net/virtio/virtio_user_ethdev.c
new file mode 100644
index 00000000..525d16ca
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtio_user_ethdev.c
@@ -0,0 +1,687 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+
+#include <rte_malloc.h>
+#include <rte_kvargs.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_bus_vdev.h>
+#include <rte_alarm.h>
+
+#include "virtio_ethdev.h"
+#include "virtio_logs.h"
+#include "virtio_pci.h"
+#include "virtqueue.h"
+#include "virtio_rxtx.h"
+#include "virtio_user/virtio_user_dev.h"
+
+#define virtio_user_get_dev(hw) \
+ ((struct virtio_user_dev *)(hw)->virtio_user_dev)
+
+static int
+virtio_user_server_reconnect(struct virtio_user_dev *dev)
+{
+ int ret;
+ int flag;
+ int connectfd;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+
+ connectfd = accept(dev->listenfd, NULL, NULL);
+ if (connectfd < 0)
+ return -1;
+
+ dev->vhostfd = connectfd;
+ if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
+ &dev->device_features) < 0) {
+ PMD_INIT_LOG(ERR, "get_features failed: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ /* umask vhost-user unsupported features */
+ dev->device_features &= ~(dev->unsupported_features);
+
+ dev->features &= dev->device_features;
+
+ flag = fcntl(connectfd, F_GETFD);
+ fcntl(connectfd, F_SETFL, flag | O_NONBLOCK);
+
+ ret = virtio_user_start_device(dev);
+ if (ret < 0)
+ return -1;
+
+ if (dev->queue_pairs > 1) {
+ ret = virtio_user_handle_mq(dev, dev->queue_pairs);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
+ return -1;
+ }
+ }
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+ if (rte_intr_disable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt disable failed");
+ return -1;
+ }
+ rte_intr_callback_unregister(eth_dev->intr_handle,
+ virtio_interrupt_handler,
+ eth_dev);
+ eth_dev->intr_handle->fd = connectfd;
+ rte_intr_callback_register(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+
+ if (rte_intr_enable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return -1;
+ }
+ }
+ PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
+ return 0;
+}
+
+static void
+virtio_user_delayed_handler(void *param)
+{
+ struct virtio_hw *hw = (struct virtio_hw *)param;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id];
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if (rte_intr_disable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt disable failed");
+ return;
+ }
+ rte_intr_callback_unregister(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+ if (dev->is_server) {
+ if (dev->vhostfd >= 0) {
+ close(dev->vhostfd);
+ dev->vhostfd = -1;
+ }
+ eth_dev->intr_handle->fd = dev->listenfd;
+ rte_intr_callback_register(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+ if (rte_intr_enable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return;
+ }
+ }
+}
+
+static void
+virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ int i;
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if (offset == offsetof(struct virtio_net_config, mac) &&
+ length == ETHER_ADDR_LEN) {
+ for (i = 0; i < ETHER_ADDR_LEN; ++i)
+ ((uint8_t *)dst)[i] = dev->mac_addr[i];
+ return;
+ }
+
+ if (offset == offsetof(struct virtio_net_config, status)) {
+ char buf[128];
+
+ if (dev->vhostfd >= 0) {
+ int r;
+ int flags;
+
+ flags = fcntl(dev->vhostfd, F_GETFL);
+ if (fcntl(dev->vhostfd, F_SETFL,
+ flags | O_NONBLOCK) == -1) {
+ PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag");
+ return;
+ }
+ r = recv(dev->vhostfd, buf, 128, MSG_PEEK);
+ if (r == 0 || (r < 0 && errno != EAGAIN)) {
+ dev->status &= (~VIRTIO_NET_S_LINK_UP);
+ PMD_DRV_LOG(ERR, "virtio-user port %u is down",
+ hw->port_id);
+
+ /* This function could be called in the process
+ * of interrupt handling, callback cannot be
+ * unregistered here, set an alarm to do it.
+ */
+ rte_eal_alarm_set(1,
+ virtio_user_delayed_handler,
+ (void *)hw);
+ } else {
+ dev->status |= VIRTIO_NET_S_LINK_UP;
+ }
+ if (fcntl(dev->vhostfd, F_SETFL,
+ flags & ~O_NONBLOCK) == -1) {
+ PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag");
+ return;
+ }
+ } else if (dev->is_server) {
+ dev->status &= (~VIRTIO_NET_S_LINK_UP);
+ if (virtio_user_server_reconnect(dev) >= 0)
+ dev->status |= VIRTIO_NET_S_LINK_UP;
+ }
+
+ *(uint16_t *)dst = dev->status;
+ }
+
+ if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs))
+ *(uint16_t *)dst = dev->max_queue_pairs;
+}
+
+static void
+virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ int i;
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if ((offset == offsetof(struct virtio_net_config, mac)) &&
+ (length == ETHER_ADDR_LEN))
+ for (i = 0; i < ETHER_ADDR_LEN; ++i)
+ dev->mac_addr[i] = ((const uint8_t *)src)[i];
+ else
+ PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
+ offset, length);
+}
+
+static void
+virtio_user_reset(struct virtio_hw *hw)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
+ virtio_user_stop_device(dev);
+}
+
+static void
+virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
+ virtio_user_start_device(dev);
+ else if (status == VIRTIO_CONFIG_STATUS_RESET)
+ virtio_user_reset(hw);
+ dev->status = status;
+}
+
+static uint8_t
+virtio_user_get_status(struct virtio_hw *hw)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ return dev->status;
+}
+
+static uint64_t
+virtio_user_get_features(struct virtio_hw *hw)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ /* unmask feature bits defined in vhost user protocol */
+ return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES;
+}
+
+static void
+virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ dev->features = features & dev->device_features;
+}
+
+static uint8_t
+virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
+{
+ /* rxq interrupts and config interrupt are separated in virtio-user,
+ * here we only report config change.
+ */
+ return VIRTIO_PCI_ISR_CONFIG;
+}
+
+static uint16_t
+virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused,
+ uint16_t vec __rte_unused)
+{
+ return 0;
+}
+
+static uint16_t
+virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused,
+ struct virtqueue *vq __rte_unused,
+ uint16_t vec)
+{
+ /* pretend we have done that */
+ return vec;
+}
+
+/* This function is to get the queue size, aka, number of descs, of a specified
+ * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the
+ * max supported queues.
+ */
+static uint16_t
+virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ /* Currently, each queue has same queue size */
+ return dev->queue_size;
+}
+
+static int
+virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+ uint16_t queue_idx = vq->vq_queue_index;
+ uint64_t desc_addr, avail_addr, used_addr;
+
+ desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
+ avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
+ ring[vq->vq_nentries]),
+ VIRTIO_PCI_VRING_ALIGN);
+
+ dev->vrings[queue_idx].num = vq->vq_nentries;
+ dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
+ dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
+ dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
+
+ return 0;
+}
+
+static void
+virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
+ * correspondingly stops the ioeventfds, and reset the status of
+ * the device.
+ * For modern devices, set queue desc, avail, used in PCI bar to 0,
+ * not see any more behavior in QEMU.
+ *
+ * Here we just care about what information to deliver to vhost-user
+ * or vhost-kernel. So we just close ioeventfd for now.
+ */
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ close(dev->callfds[vq->vq_queue_index]);
+ close(dev->kickfds[vq->vq_queue_index]);
+}
+
+static void
+virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ uint64_t buf = 1;
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if (hw->cvq && (hw->cvq->vq == vq)) {
+ virtio_user_handle_cq(dev, vq->vq_queue_index);
+ return;
+ }
+
+ if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
+ PMD_DRV_LOG(ERR, "failed to kick backend: %s",
+ strerror(errno));
+}
+
+const struct virtio_pci_ops virtio_user_ops = {
+ .read_dev_cfg = virtio_user_read_dev_config,
+ .write_dev_cfg = virtio_user_write_dev_config,
+ .reset = virtio_user_reset,
+ .get_status = virtio_user_get_status,
+ .set_status = virtio_user_set_status,
+ .get_features = virtio_user_get_features,
+ .set_features = virtio_user_set_features,
+ .get_isr = virtio_user_get_isr,
+ .set_config_irq = virtio_user_set_config_irq,
+ .set_queue_irq = virtio_user_set_queue_irq,
+ .get_queue_num = virtio_user_get_queue_num,
+ .setup_queue = virtio_user_setup_queue,
+ .del_queue = virtio_user_del_queue,
+ .notify_queue = virtio_user_notify_queue,
+};
+
+static const char *valid_args[] = {
+#define VIRTIO_USER_ARG_QUEUES_NUM "queues"
+ VIRTIO_USER_ARG_QUEUES_NUM,
+#define VIRTIO_USER_ARG_CQ_NUM "cq"
+ VIRTIO_USER_ARG_CQ_NUM,
+#define VIRTIO_USER_ARG_MAC "mac"
+ VIRTIO_USER_ARG_MAC,
+#define VIRTIO_USER_ARG_PATH "path"
+ VIRTIO_USER_ARG_PATH,
+#define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size"
+ VIRTIO_USER_ARG_QUEUE_SIZE,
+#define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
+ VIRTIO_USER_ARG_INTERFACE_NAME,
+#define VIRTIO_USER_ARG_SERVER_MODE "server"
+ VIRTIO_USER_ARG_SERVER_MODE,
+#define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf"
+ VIRTIO_USER_ARG_MRG_RXBUF,
+#define VIRTIO_USER_ARG_IN_ORDER "in_order"
+ VIRTIO_USER_ARG_IN_ORDER,
+ NULL
+};
+
+#define VIRTIO_USER_DEF_CQ_EN 0
+#define VIRTIO_USER_DEF_Q_NUM 1
+#define VIRTIO_USER_DEF_Q_SZ 256
+#define VIRTIO_USER_DEF_SERVER_MODE 0
+
+static int
+get_string_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(char **)extra_args = strdup(value);
+
+ if (!*(char **)extra_args)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int
+get_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(uint64_t *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+
+static struct rte_vdev_driver virtio_user_driver;
+
+static struct rte_eth_dev *
+virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *eth_dev;
+ struct rte_eth_dev_data *data;
+ struct virtio_hw *hw;
+ struct virtio_user_dev *dev;
+
+ eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*hw));
+ if (!eth_dev) {
+ PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev");
+ return NULL;
+ }
+
+ data = eth_dev->data;
+ hw = eth_dev->data->dev_private;
+
+ dev = rte_zmalloc(NULL, sizeof(*dev), 0);
+ if (!dev) {
+ PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed");
+ rte_eth_dev_release_port(eth_dev);
+ rte_free(hw);
+ return NULL;
+ }
+
+ hw->port_id = data->port_id;
+ dev->port_id = data->port_id;
+ virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops;
+ /*
+ * MSIX is required to enable LSC (see virtio_init_device).
+ * Here just pretend that we support msix.
+ */
+ hw->use_msix = 1;
+ hw->modern = 0;
+ hw->use_simple_rx = 0;
+ hw->use_inorder_rx = 0;
+ hw->use_inorder_tx = 0;
+ hw->virtio_user_dev = dev;
+ return eth_dev;
+}
+
+static void
+virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev)
+{
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct virtio_hw *hw = data->dev_private;
+
+ rte_free(hw->virtio_user_dev);
+ rte_free(hw);
+ rte_eth_dev_release_port(eth_dev);
+}
+
+/* Dev initialization routine. Invoked once for each virtio vdev at
+ * EAL init time, see rte_bus_probe().
+ * Returns 0 on success.
+ */
+static int
+virtio_user_pmd_probe(struct rte_vdev_device *dev)
+{
+ struct rte_kvargs *kvlist = NULL;
+ struct rte_eth_dev *eth_dev;
+ struct virtio_hw *hw;
+ uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
+ uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
+ uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
+ uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
+ uint64_t mrg_rxbuf = 1;
+ uint64_t in_order = 1;
+ char *path = NULL;
+ char *ifname = NULL;
+ char *mac_addr = NULL;
+ int ret = -1;
+
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args);
+ if (!kvlist) {
+ PMD_INIT_LOG(ERR, "error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH,
+ &get_string_arg, &path) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_PATH);
+ goto end;
+ }
+ } else {
+ PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
+ VIRTIO_USER_ARG_QUEUE_SIZE);
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) {
+ if (is_vhost_user_by_type(path)) {
+ PMD_INIT_LOG(ERR,
+ "arg %s applies only to vhost-kernel backend",
+ VIRTIO_USER_ARG_INTERFACE_NAME);
+ goto end;
+ }
+
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME,
+ &get_string_arg, &ifname) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_INTERFACE_NAME);
+ goto end;
+ }
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC,
+ &get_string_arg, &mac_addr) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_MAC);
+ goto end;
+ }
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE,
+ &get_integer_arg, &queue_size) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_QUEUE_SIZE);
+ goto end;
+ }
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM,
+ &get_integer_arg, &queues) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_QUEUES_NUM);
+ goto end;
+ }
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE,
+ &get_integer_arg, &server_mode) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_SERVER_MODE);
+ goto end;
+ }
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM,
+ &get_integer_arg, &cq) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_CQ_NUM);
+ goto end;
+ }
+ } else if (queues > 1) {
+ cq = 1;
+ }
+
+ if (queues > 1 && cq == 0) {
+ PMD_INIT_LOG(ERR, "multi-q requires ctrl-q");
+ goto end;
+ }
+
+ if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
+ PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
+ VIRTIO_USER_ARG_QUEUES_NUM, queues,
+ VIRTIO_MAX_VIRTQUEUE_PAIRS);
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF,
+ &get_integer_arg, &mrg_rxbuf) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_MRG_RXBUF);
+ goto end;
+ }
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER,
+ &get_integer_arg, &in_order) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_IN_ORDER);
+ goto end;
+ }
+ }
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ struct virtio_user_dev *vu_dev;
+
+ eth_dev = virtio_user_eth_dev_alloc(dev);
+ if (!eth_dev) {
+ PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
+ goto end;
+ }
+
+ hw = eth_dev->data->dev_private;
+ vu_dev = virtio_user_get_dev(hw);
+ if (server_mode == 1)
+ vu_dev->is_server = true;
+ else
+ vu_dev->is_server = false;
+ if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
+ queue_size, mac_addr, &ifname, mrg_rxbuf,
+ in_order) < 0) {
+ PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
+ virtio_user_eth_dev_free(eth_dev);
+ goto end;
+ }
+
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(rte_vdev_device_name(dev));
+ if (!eth_dev)
+ goto end;
+ }
+
+ /* previously called by rte_pci_probe() for physical dev */
+ if (eth_virtio_dev_init(eth_dev) < 0) {
+ PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
+ virtio_user_eth_dev_free(eth_dev);
+ goto end;
+ }
+
+ rte_eth_dev_probing_finish(eth_dev);
+ ret = 0;
+
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (path)
+ free(path);
+ if (mac_addr)
+ free(mac_addr);
+ if (ifname)
+ free(ifname);
+ return ret;
+}
+
+/** Called by rte_eth_dev_detach() */
+static int
+virtio_user_pmd_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ struct rte_eth_dev *eth_dev;
+ struct virtio_hw *hw;
+ struct virtio_user_dev *dev;
+
+ if (!vdev)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ PMD_DRV_LOG(INFO, "Un-Initializing %s", name);
+ eth_dev = rte_eth_dev_allocated(name);
+ if (!eth_dev)
+ return -ENODEV;
+
+ /* make sure the device is stopped, queues freed */
+ rte_eth_dev_close(eth_dev->data->port_id);
+
+ hw = eth_dev->data->dev_private;
+ dev = hw->virtio_user_dev;
+ virtio_user_dev_uninit(dev);
+
+ rte_free(eth_dev->data->dev_private);
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver virtio_user_driver = {
+ .probe = virtio_user_pmd_probe,
+ .remove = virtio_user_pmd_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver);
+RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user);
+RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
+ "path=<path> "
+ "mac=<mac addr> "
+ "cq=<int> "
+ "queue_size=<int> "
+ "queues=<int> "
+ "iface=<string> "
+ "server=<0|1> "
+ "mrg_rxbuf=<0|1> "
+ "in_order=<0|1>");
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtqueue.c b/src/spdk/dpdk/drivers/net/virtio/virtqueue.c
new file mode 100644
index 00000000..56a77cc7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtqueue.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+#include "virtqueue.h"
+#include "virtio_logs.h"
+#include "virtio_pci.h"
+#include "virtio_rxtx_simple.h"
+
+/*
+ * Two types of mbuf to be cleaned:
+ * 1) mbuf that has been consumed by backend but not used by virtio.
+ * 2) mbuf that hasn't been consued by backend.
+ */
+struct rte_mbuf *
+virtqueue_detach_unused(struct virtqueue *vq)
+{
+ struct rte_mbuf *cookie;
+ struct virtio_hw *hw;
+ uint16_t start, end;
+ int type, idx;
+
+ if (vq == NULL)
+ return NULL;
+
+ hw = vq->hw;
+ type = virtio_get_queue_type(hw, vq->vq_queue_index);
+ start = vq->vq_avail_idx & (vq->vq_nentries - 1);
+ end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
+
+ for (idx = 0; idx < vq->vq_nentries; idx++) {
+ if (hw->use_simple_rx && type == VTNET_RQ) {
+ if (start <= end && idx >= start && idx < end)
+ continue;
+ if (start > end && (idx >= start || idx < end))
+ continue;
+ cookie = vq->sw_ring[idx];
+ if (cookie != NULL) {
+ vq->sw_ring[idx] = NULL;
+ return cookie;
+ }
+ } else {
+ cookie = vq->vq_descx[idx].cookie;
+ if (cookie != NULL) {
+ vq->vq_descx[idx].cookie = NULL;
+ return cookie;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* Flush the elements in the used ring. */
+void
+virtqueue_rxvq_flush(struct virtqueue *vq)
+{
+ struct virtnet_rx *rxq = &vq->rxq;
+ struct virtio_hw *hw = vq->hw;
+ struct vring_used_elem *uep;
+ struct vq_desc_extra *dxp;
+ uint16_t used_idx, desc_idx;
+ uint16_t nb_used, i;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ for (i = 0; i < nb_used; i++) {
+ used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+ uep = &vq->vq_ring.used->ring[used_idx];
+ if (hw->use_simple_rx) {
+ desc_idx = used_idx;
+ rte_pktmbuf_free(vq->sw_ring[desc_idx]);
+ vq->vq_free_cnt++;
+ } else if (hw->use_inorder_rx) {
+ desc_idx = (uint16_t)uep->id;
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq_ring_free_inorder(vq, desc_idx, 1);
+ } else {
+ desc_idx = (uint16_t)uep->id;
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq_ring_free_chain(vq, desc_idx);
+ }
+ vq->vq_used_cons_idx++;
+ }
+
+ if (hw->use_simple_rx) {
+ while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ virtio_rxq_rearm_vec(rxq);
+ if (virtqueue_kick_prepare(vq))
+ virtqueue_notify(vq);
+ }
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/virtio/virtqueue.h b/src/spdk/dpdk/drivers/net/virtio/virtqueue.h
new file mode 100644
index 00000000..26518ed9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/virtio/virtqueue.h
@@ -0,0 +1,371 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _VIRTQUEUE_H_
+#define _VIRTQUEUE_H_
+
+#include <stdint.h>
+
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+#include "virtio_logs.h"
+#include "virtio_rxtx.h"
+
+struct rte_mbuf;
+
+/*
+ * Per virtio_config.h in Linux.
+ * For virtio_pci on SMP, we don't need to order with respect to MMIO
+ * accesses through relaxed memory I/O windows, so smp_mb() et al are
+ * sufficient.
+ *
+ */
+#define virtio_mb() rte_smp_mb()
+#define virtio_rmb() rte_smp_rmb()
+#define virtio_wmb() rte_smp_wmb()
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while(0)
+#endif
+
+#define VIRTQUEUE_MAX_NAME_SZ 32
+
+#ifdef RTE_VIRTIO_USER
+/**
+ * Return the physical address (or virtual address in case of
+ * virtio-user) of mbuf data buffer.
+ *
+ * The address is firstly casted to the word size (sizeof(uintptr_t))
+ * before casting it to uint64_t. This is to make it work with different
+ * combination of word size (64 bit and 32 bit) and virtio device
+ * (virtio-pci and virtio-user).
+ */
+#define VIRTIO_MBUF_ADDR(mb, vq) \
+ ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
+#else
+#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova)
+#endif
+
+/**
+ * Return the physical address (or virtual address in case of
+ * virtio-user) of mbuf data buffer, taking care of mbuf data offset
+ */
+#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
+ (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
+
+#define VTNET_SQ_RQ_QUEUE_IDX 0
+#define VTNET_SQ_TQ_QUEUE_IDX 1
+#define VTNET_SQ_CQ_QUEUE_IDX 2
+
+enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
+/**
+ * The maximum virtqueue size is 2^15. Use that value as the end of
+ * descriptor chain terminator since it will never be a valid index
+ * in the descriptor table. This is used to verify we are correctly
+ * handling vq_free_cnt.
+ */
+#define VQ_RING_DESC_CHAIN_END 32768
+
+/**
+ * Control the RX mode, ie. promiscuous, allmulti, etc...
+ * All commands require an "out" sg entry containing a 1 byte
+ * state value, zero = disable, non-zero = enable. Commands
+ * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
+ * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
+ */
+#define VIRTIO_NET_CTRL_RX 0
+#define VIRTIO_NET_CTRL_RX_PROMISC 0
+#define VIRTIO_NET_CTRL_RX_ALLMULTI 1
+#define VIRTIO_NET_CTRL_RX_ALLUNI 2
+#define VIRTIO_NET_CTRL_RX_NOMULTI 3
+#define VIRTIO_NET_CTRL_RX_NOUNI 4
+#define VIRTIO_NET_CTRL_RX_NOBCAST 5
+
+/**
+ * Control the MAC
+ *
+ * The MAC filter table is managed by the hypervisor, the guest should
+ * assume the size is infinite. Filtering should be considered
+ * non-perfect, ie. based on hypervisor resources, the guest may
+ * received packets from sources not specified in the filter list.
+ *
+ * In addition to the class/cmd header, the TABLE_SET command requires
+ * two out scatterlists. Each contains a 4 byte count of entries followed
+ * by a concatenated byte stream of the ETH_ALEN MAC addresses. The
+ * first sg list contains unicast addresses, the second is for multicast.
+ * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
+ * is available.
+ *
+ * The ADDR_SET command requests one out scatterlist, it contains a
+ * 6 bytes MAC address. This functionality is present if the
+ * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
+ */
+struct virtio_net_ctrl_mac {
+ uint32_t entries;
+ uint8_t macs[][ETHER_ADDR_LEN];
+} __attribute__((__packed__));
+
+#define VIRTIO_NET_CTRL_MAC 1
+#define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
+#define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
+
+/**
+ * Control VLAN filtering
+ *
+ * The VLAN filter table is controlled via a simple ADD/DEL interface.
+ * VLAN IDs not added may be filtered by the hypervisor. Del is the
+ * opposite of add. Both commands expect an out entry containing a 2
+ * byte VLAN ID. VLAN filtering is available with the
+ * VIRTIO_NET_F_CTRL_VLAN feature bit.
+ */
+#define VIRTIO_NET_CTRL_VLAN 2
+#define VIRTIO_NET_CTRL_VLAN_ADD 0
+#define VIRTIO_NET_CTRL_VLAN_DEL 1
+
+/*
+ * Control link announce acknowledgement
+ *
+ * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
+ * driver has recevied the notification; device would clear the
+ * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
+ * this command.
+ */
+#define VIRTIO_NET_CTRL_ANNOUNCE 3
+#define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
+
+struct virtio_net_ctrl_hdr {
+ uint8_t class;
+ uint8_t cmd;
+} __attribute__((packed));
+
+typedef uint8_t virtio_net_ctrl_ack;
+
+#define VIRTIO_NET_OK 0
+#define VIRTIO_NET_ERR 1
+
+#define VIRTIO_MAX_CTRL_DATA 2048
+
+struct virtio_pmd_ctrl {
+ struct virtio_net_ctrl_hdr hdr;
+ virtio_net_ctrl_ack status;
+ uint8_t data[VIRTIO_MAX_CTRL_DATA];
+};
+
+struct vq_desc_extra {
+ void *cookie;
+ uint16_t ndescs;
+};
+
+struct virtqueue {
+ struct virtio_hw *hw; /**< virtio_hw structure pointer. */
+ struct vring vq_ring; /**< vring keeping desc, used and avail */
+ /**
+ * Last consumed descriptor in the used table,
+ * trails vq_ring.used->idx.
+ */
+ uint16_t vq_used_cons_idx;
+ uint16_t vq_nentries; /**< vring desc numbers */
+ uint16_t vq_free_cnt; /**< num of desc available */
+ uint16_t vq_avail_idx; /**< sync until needed */
+ uint16_t vq_free_thresh; /**< free threshold */
+
+ void *vq_ring_virt_mem; /**< linear address of vring*/
+ unsigned int vq_ring_size;
+
+ union {
+ struct virtnet_rx rxq;
+ struct virtnet_tx txq;
+ struct virtnet_ctl cq;
+ };
+
+ rte_iova_t vq_ring_mem; /**< physical address of vring,
+ * or virtual address for virtio_user. */
+
+ /**
+ * Head of the free chain in the descriptor table. If
+ * there are no free descriptors, this will be set to
+ * VQ_RING_DESC_CHAIN_END.
+ */
+ uint16_t vq_desc_head_idx;
+ uint16_t vq_desc_tail_idx;
+ uint16_t vq_queue_index; /**< PCI queue index */
+ uint16_t offset; /**< relative offset to obtain addr in mbuf */
+ uint16_t *notify_addr;
+ struct rte_mbuf **sw_ring; /**< RX software ring. */
+ struct vq_desc_extra vq_descx[0];
+};
+
+/* If multiqueue is provided by host, then we suppport it. */
+#define VIRTIO_NET_CTRL_MQ 4
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
+
+/**
+ * This is the first element of the scatter-gather list. If you don't
+ * specify GSO or CSUM features, you can simply ignore the header.
+ */
+struct virtio_net_hdr {
+#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /**< Use csum_start,csum_offset*/
+#define VIRTIO_NET_HDR_F_DATA_VALID 2 /**< Checksum is valid */
+ uint8_t flags;
+#define VIRTIO_NET_HDR_GSO_NONE 0 /**< Not a GSO frame */
+#define VIRTIO_NET_HDR_GSO_TCPV4 1 /**< GSO frame, IPv4 TCP (TSO) */
+#define VIRTIO_NET_HDR_GSO_UDP 3 /**< GSO frame, IPv4 UDP (UFO) */
+#define VIRTIO_NET_HDR_GSO_TCPV6 4 /**< GSO frame, IPv6 TCP */
+#define VIRTIO_NET_HDR_GSO_ECN 0x80 /**< TCP has ECN set */
+ uint8_t gso_type;
+ uint16_t hdr_len; /**< Ethernet + IP + tcp/udp hdrs */
+ uint16_t gso_size; /**< Bytes to append to hdr_len per frame */
+ uint16_t csum_start; /**< Position to start checksumming from */
+ uint16_t csum_offset; /**< Offset after that to place checksum */
+};
+
+/**
+ * This is the version of the header to use when the MRG_RXBUF
+ * feature has been negotiated.
+ */
+struct virtio_net_hdr_mrg_rxbuf {
+ struct virtio_net_hdr hdr;
+ uint16_t num_buffers; /**< Number of merged rx buffers */
+};
+
+/* Region reserved to allow for transmit header and indirect ring */
+#define VIRTIO_MAX_TX_INDIRECT 8
+struct virtio_tx_region {
+ struct virtio_net_hdr_mrg_rxbuf tx_hdr;
+ struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
+ __attribute__((__aligned__(16)));
+};
+
+/* Chain all the descriptors in the ring with an END */
+static inline void
+vring_desc_init(struct vring_desc *dp, uint16_t n)
+{
+ uint16_t i;
+
+ for (i = 0; i < n - 1; i++)
+ dp[i].next = (uint16_t)(i + 1);
+ dp[i].next = VQ_RING_DESC_CHAIN_END;
+}
+
+/**
+ * Tell the backend not to interrupt us.
+ */
+static inline void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+/**
+ * Tell the backend to interrupt us.
+ */
+static inline void
+virtqueue_enable_intr(struct virtqueue *vq)
+{
+ vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+}
+
+/**
+ * Dump virtqueue internal structures, for debug purpose only.
+ */
+void virtqueue_dump(struct virtqueue *vq);
+/**
+ * Get all mbufs to be freed.
+ */
+struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
+
+/* Flush the elements in the used ring. */
+void virtqueue_rxvq_flush(struct virtqueue *vq);
+
+static inline int
+virtqueue_full(const struct virtqueue *vq)
+{
+ return vq->vq_free_cnt == 0;
+}
+
+static inline int
+virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
+{
+ if (vtpci_queue_idx == hw->max_queue_pairs * 2)
+ return VTNET_CQ;
+ else if (vtpci_queue_idx % 2 == 0)
+ return VTNET_RQ;
+ else
+ return VTNET_TQ;
+}
+
+#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+
+void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
+void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
+ uint16_t num);
+
+static inline void
+vq_update_avail_idx(struct virtqueue *vq)
+{
+ virtio_wmb();
+ vq->vq_ring.avail->idx = vq->vq_avail_idx;
+}
+
+static inline void
+vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
+{
+ uint16_t avail_idx;
+ /*
+ * Place the head of the descriptor chain into the next slot and make
+ * it usable to the host. The chain is made available now rather than
+ * deferring to virtqueue_notify() in the hopes that if the host is
+ * currently running on another CPU, we can keep it processing the new
+ * descriptor.
+ */
+ avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
+ if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
+ vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+ vq->vq_avail_idx++;
+}
+
+static inline int
+virtqueue_kick_prepare(struct virtqueue *vq)
+{
+ return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+}
+
+static inline void
+virtqueue_notify(struct virtqueue *vq)
+{
+ /*
+ * Ensure updated avail->idx is visible to host.
+ * For virtio on IA, the notificaiton is through io port operation
+ * which is a serialization instruction itself.
+ */
+ VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
+}
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
+#define VIRTQUEUE_DUMP(vq) do { \
+ uint16_t used_idx, nused; \
+ used_idx = (vq)->vq_ring.used->idx; \
+ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ PMD_INIT_LOG(DEBUG, \
+ "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
+ " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
+ " avail.flags=0x%x; used.flags=0x%x", \
+ (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
+ (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
+ (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
+ (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+} while (0)
+#else
+#define VIRTQUEUE_DUMP(vq) do { } while (0)
+#endif
+
+#endif /* _VIRTQUEUE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/Makefile b/src/spdk/dpdk/drivers/net/vmxnet3/Makefile
new file mode 100644
index 00000000..f1141da6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/Makefile
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2015 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_vmxnet3_uio.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
+
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+#
+# CFLAGS for clang
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+else
+#
+# CFLAGS for gcc
+#
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
+CFLAGS += -Wno-deprecated
+endif
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+VPATH += $(SRCDIR)/base
+
+EXPORT_MAP := rte_pmd_vmxnet3_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_ethdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/base/README b/src/spdk/dpdk/drivers/net/vmxnet3/base/README
new file mode 100644
index 00000000..599a3661
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/base/README
@@ -0,0 +1,47 @@
+..
+ BSD LICENSE
+
+ Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Intel VMXNET3 driver
+===================
+
+This directory contains source code of FreeBSD VMXNET3 driver released by VMware.
+In which, upt1_defs.h and vmxnet3_defs.h is introduced without any change.
+The other 4 files: includeCheck.h, vmware_pack_begin.h, vmware_pack_end.h and vmxnet3_osdep.h
+are crated to adapt to the needs from above 2 files.
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ vmxnet3_osdep.h
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/base/upt1_defs.h b/src/spdk/dpdk/drivers/net/vmxnet3/base/upt1_defs.h
new file mode 100644
index 00000000..5fd7a397
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/base/upt1_defs.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2007 VMware, Inc. All rights reserved.
+ */
+
+/* upt1_defs.h
+ *
+ * Definitions for UPTv1
+ *
+ * Some of the defs are duplicated in vmkapi_net_upt.h, because
+ * vmkapi_net_upt.h cannot distribute with OSS yet and vmkapi headers can
+ * only include vmkapi headers. Make sure they are kept in sync!
+ */
+
+#ifndef _UPT1_DEFS_H
+#define _UPT1_DEFS_H
+
+#define UPT1_MAX_TX_QUEUES 64
+#define UPT1_MAX_RX_QUEUES 64
+
+#define UPT1_MAX_INTRS (UPT1_MAX_TX_QUEUES + UPT1_MAX_RX_QUEUES)
+
+typedef
+#include "vmware_pack_begin.h"
+struct UPT1_TxStats {
+ uint64 TSOPktsTxOK; /* TSO pkts post-segmentation */
+ uint64 TSOBytesTxOK;
+ uint64 ucastPktsTxOK;
+ uint64 ucastBytesTxOK;
+ uint64 mcastPktsTxOK;
+ uint64 mcastBytesTxOK;
+ uint64 bcastPktsTxOK;
+ uint64 bcastBytesTxOK;
+ uint64 pktsTxError;
+ uint64 pktsTxDiscard;
+}
+#include "vmware_pack_end.h"
+UPT1_TxStats;
+
+typedef
+#include "vmware_pack_begin.h"
+struct UPT1_RxStats {
+ uint64 LROPktsRxOK; /* LRO pkts */
+ uint64 LROBytesRxOK; /* bytes from LRO pkts */
+ /* the following counters are for pkts from the wire, i.e., pre-LRO */
+ uint64 ucastPktsRxOK;
+ uint64 ucastBytesRxOK;
+ uint64 mcastPktsRxOK;
+ uint64 mcastBytesRxOK;
+ uint64 bcastPktsRxOK;
+ uint64 bcastBytesRxOK;
+ uint64 pktsRxOutOfBuf;
+ uint64 pktsRxError;
+}
+#include "vmware_pack_end.h"
+UPT1_RxStats;
+
+/* interrupt moderation level */
+#define UPT1_IML_NONE 0 /* no interrupt moderation */
+#define UPT1_IML_HIGHEST 7 /* least intr generated */
+#define UPT1_IML_ADAPTIVE 8 /* adpative intr moderation */
+
+/* values for UPT1_RSSConf.hashFunc */
+#define UPT1_RSS_HASH_TYPE_NONE 0x0
+#define UPT1_RSS_HASH_TYPE_IPV4 0x01
+#define UPT1_RSS_HASH_TYPE_TCP_IPV4 0x02
+#define UPT1_RSS_HASH_TYPE_IPV6 0x04
+#define UPT1_RSS_HASH_TYPE_TCP_IPV6 0x08
+
+#define UPT1_RSS_HASH_FUNC_NONE 0x0
+#define UPT1_RSS_HASH_FUNC_TOEPLITZ 0x01
+
+#define UPT1_RSS_MAX_KEY_SIZE 40
+#define UPT1_RSS_MAX_IND_TABLE_SIZE 128
+
+typedef
+#include "vmware_pack_begin.h"
+struct UPT1_RSSConf {
+ uint16 hashType;
+ uint16 hashFunc;
+ uint16 hashKeySize;
+ uint16 indTableSize;
+ uint8 hashKey[UPT1_RSS_MAX_KEY_SIZE];
+ uint8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE];
+}
+#include "vmware_pack_end.h"
+UPT1_RSSConf;
+
+/* features */
+#define UPT1_F_RXCSUM 0x0001 /* rx csum verification */
+#define UPT1_F_RSS 0x0002
+#define UPT1_F_RXVLAN 0x0004 /* VLAN tag stripping */
+#define UPT1_F_LRO 0x0008
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/base/vmware_pack_begin.h b/src/spdk/dpdk/drivers/net/vmxnet3/base/vmware_pack_begin.h
new file mode 100644
index 00000000..df22590f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/base/vmware_pack_begin.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/base/vmware_pack_end.h b/src/spdk/dpdk/drivers/net/vmxnet3/base/vmware_pack_end.h
new file mode 100644
index 00000000..df22590f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/base/vmware_pack_end.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h b/src/spdk/dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h
new file mode 100644
index 00000000..bbec708c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h
@@ -0,0 +1,821 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2007 VMware, Inc. All rights reserved.
+ */
+
+/*
+ * vmxnet3_defs.h --
+ *
+ * Definitions shared by device emulation and guest drivers for
+ * VMXNET3 NIC
+ */
+
+#ifndef _VMXNET3_DEFS_H_
+#define _VMXNET3_DEFS_H_
+
+#include "vmxnet3_osdep.h"
+#include "upt1_defs.h"
+
+/* all registers are 32 bit wide */
+/* BAR 1 */
+#define VMXNET3_REG_VRRS 0x0 /* Vmxnet3 Revision Report Selection */
+#define VMXNET3_REG_UVRS 0x8 /* UPT Version Report Selection */
+#define VMXNET3_REG_DSAL 0x10 /* Driver Shared Address Low */
+#define VMXNET3_REG_DSAH 0x18 /* Driver Shared Address High */
+#define VMXNET3_REG_CMD 0x20 /* Command */
+#define VMXNET3_REG_MACL 0x28 /* MAC Address Low */
+#define VMXNET3_REG_MACH 0x30 /* MAC Address High */
+#define VMXNET3_REG_ICR 0x38 /* Interrupt Cause Register */
+#define VMXNET3_REG_ECR 0x40 /* Event Cause Register */
+
+#define VMXNET3_REG_WSAL 0xF00 /* Wireless Shared Address Lo */
+#define VMXNET3_REG_WSAH 0xF08 /* Wireless Shared Address Hi */
+#define VMXNET3_REG_WCMD 0xF18 /* Wireless Command */
+
+/* BAR 0 */
+#define VMXNET3_REG_IMR 0x0 /* Interrupt Mask Register */
+#define VMXNET3_REG_TXPROD 0x600 /* Tx Producer Index */
+#define VMXNET3_REG_RXPROD 0x800 /* Rx Producer Index for ring 1 */
+#define VMXNET3_REG_RXPROD2 0xA00 /* Rx Producer Index for ring 2 */
+
+#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
+#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
+
+/*
+ * The two Vmxnet3 MMIO Register PCI BARs (BAR 0 at offset 10h and BAR 1 at
+ * offset 14h) as well as the MSI-X BAR are combined into one PhysMem region:
+ * <-VMXNET3_PT_REG_SIZE-><-VMXNET3_VD_REG_SIZE-><-VMXNET3_MSIX_BAR_SIZE-->
+ * -------------------------------------------------------------------------
+ * |Pass Thru Registers | Virtual Dev Registers | MSI-X Vector/PBA Table |
+ * -------------------------------------------------------------------------
+ * VMXNET3_MSIX_BAR_SIZE is defined in "vmxnet3Int.h"
+ */
+#define VMXNET3_PHYSMEM_PAGES 4
+
+#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */
+#define VMXNET3_REG_ALIGN_MASK 0x7
+
+/* I/O Mapped access to registers */
+#define VMXNET3_IO_TYPE_PT 0
+#define VMXNET3_IO_TYPE_VD 1
+#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF))
+#define VMXNET3_IO_TYPE(addr) ((addr) >> 24)
+#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
+
+#ifndef __le16
+#define __le16 uint16
+#endif
+#ifndef __le32
+#define __le32 uint32
+#endif
+#ifndef __le64
+#define __le64 uint64
+#endif
+
+typedef enum {
+ VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
+ VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET,
+ VMXNET3_CMD_QUIESCE_DEV,
+ VMXNET3_CMD_RESET_DEV,
+ VMXNET3_CMD_UPDATE_RX_MODE,
+ VMXNET3_CMD_UPDATE_MAC_FILTERS,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS,
+ VMXNET3_CMD_UPDATE_RSSIDT,
+ VMXNET3_CMD_UPDATE_IML,
+ VMXNET3_CMD_UPDATE_PMCFG,
+ VMXNET3_CMD_UPDATE_FEATURE,
+ VMXNET3_CMD_STOP_EMULATION,
+ VMXNET3_CMD_LOAD_PLUGIN,
+ VMXNET3_CMD_ACTIVATE_VF,
+ VMXNET3_CMD_RESERVED3,
+ VMXNET3_CMD_RESERVED4,
+ VMXNET3_CMD_REGISTER_MEMREGS,
+
+ VMXNET3_CMD_FIRST_GET = 0xF00D0000,
+ VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
+ VMXNET3_CMD_GET_STATS,
+ VMXNET3_CMD_GET_LINK,
+ VMXNET3_CMD_GET_PERM_MAC_LO,
+ VMXNET3_CMD_GET_PERM_MAC_HI,
+ VMXNET3_CMD_GET_DID_LO,
+ VMXNET3_CMD_GET_DID_HI,
+ VMXNET3_CMD_GET_DEV_EXTRA_INFO,
+ VMXNET3_CMD_GET_CONF_INTR,
+ VMXNET3_CMD_GET_ADAPTIVE_RING_INFO,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE,
+ VMXNET3_CMD_RESERVED5,
+} Vmxnet3_Cmd;
+
+/* Adaptive Ring Info Flags */
+#define VMXNET3_DISABLE_ADAPTIVE_RING 1
+
+/*
+ * Little Endian layout of bitfields -
+ * Byte 0 : 7.....len.....0
+ * Byte 1 : rsvd gen 13.len.8
+ * Byte 2 : 5.msscof.0 ext1 dtype
+ * Byte 3 : 13...msscof...6
+ *
+ * Big Endian layout of bitfields -
+ * Byte 0: 13...msscof...6
+ * Byte 1 : 5.msscof.0 ext1 dtype
+ * Byte 2 : rsvd gen 13.len.8
+ * Byte 3 : 7.....len.....0
+ *
+ * Thus, le32_to_cpu on the dword will allow the big endian driver to read
+ * the bit fields correctly. And cpu_to_le32 will convert bitfields
+ * bit fields written by big endian driver to format required by device.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxDesc {
+ __le64 addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 msscof:14; /* MSS, checksum offset, flags */
+ uint32 ext1:1;
+ uint32 dtype:1; /* descriptor type */
+ uint32 rsvd:1;
+ uint32 gen:1; /* generation bit */
+ uint32 len:14;
+#else
+ uint32 len:14;
+ uint32 gen:1; /* generation bit */
+ uint32 rsvd:1;
+ uint32 dtype:1; /* descriptor type */
+ uint32 ext1:1;
+ uint32 msscof:14; /* MSS, checksum offset, flags */
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 tci:16; /* Tag to Insert */
+ uint32 ti:1; /* VLAN Tag Insertion */
+ uint32 ext2:1;
+ uint32 cq:1; /* completion request */
+ uint32 eop:1; /* End Of Packet */
+ uint32 om:2; /* offload mode */
+ uint32 hlen:10; /* header len */
+#else
+ uint32 hlen:10; /* header len */
+ uint32 om:2; /* offload mode */
+ uint32 eop:1; /* End Of Packet */
+ uint32 cq:1; /* completion request */
+ uint32 ext2:1;
+ uint32 ti:1; /* VLAN Tag Insertion */
+ uint32 tci:16; /* Tag to Insert */
+#endif /* __BIG_ENDIAN_BITFIELD */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxDesc;
+
+/* TxDesc.OM values */
+#define VMXNET3_OM_NONE 0
+#define VMXNET3_OM_CSUM 2
+#define VMXNET3_OM_TSO 3
+
+/* fields in TxDesc we access w/o using bit fields */
+#define VMXNET3_TXD_EOP_SHIFT 12
+#define VMXNET3_TXD_CQ_SHIFT 13
+#define VMXNET3_TXD_GEN_SHIFT 14
+#define VMXNET3_TXD_EOP_DWORD_SHIFT 3
+#define VMXNET3_TXD_GEN_DWORD_SHIFT 2
+
+#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
+#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
+#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT)
+
+#define VMXNET3_TXD_GEN_SIZE 1
+#define VMXNET3_TXD_EOP_SIZE 1
+
+#define VMXNET3_HDR_COPY_SIZE 128
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxDataDesc {
+ uint8 data[VMXNET3_HDR_COPY_SIZE];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxDataDesc;
+
+#define VMXNET3_TCD_GEN_SHIFT 31
+#define VMXNET3_TCD_GEN_SIZE 1
+#define VMXNET3_TCD_TXIDX_SHIFT 0
+#define VMXNET3_TCD_TXIDX_SIZE 12
+#define VMXNET3_TCD_GEN_DWORD_SHIFT 3
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxCompDesc {
+ uint32 txdIdx:12; /* Index of the EOP TxDesc */
+ uint32 ext1:20;
+
+ __le32 ext2;
+ __le32 ext3;
+
+ uint32 rsvd:24;
+ uint32 type:7; /* completion type */
+ uint32 gen:1; /* generation bit */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxCompDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxDesc {
+ __le64 addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 gen:1; /* Generation bit */
+ uint32 rsvd:15;
+ uint32 dtype:1; /* Descriptor type */
+ uint32 btype:1; /* Buffer Type */
+ uint32 len:14;
+#else
+ uint32 len:14;
+ uint32 btype:1; /* Buffer Type */
+ uint32 dtype:1; /* Descriptor type */
+ uint32 rsvd:15;
+ uint32 gen:1; /* Generation bit */
+#endif
+ __le32 ext1;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxDesc;
+
+/* values of RXD.BTYPE */
+#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */
+#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */
+
+/* fields in RxDesc we access w/o using bit fields */
+#define VMXNET3_RXD_BTYPE_SHIFT 14
+#define VMXNET3_RXD_GEN_SHIFT 31
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxCompDesc {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 ext2:1;
+ uint32 cnc:1; /* Checksum Not Calculated */
+ uint32 rssType:4; /* RSS hash type used */
+ uint32 rqID:10; /* rx queue/ring ID */
+ uint32 sop:1; /* Start of Packet */
+ uint32 eop:1; /* End of Packet */
+ uint32 ext1:2;
+ uint32 rxdIdx:12; /* Index of the RxDesc */
+#else
+ uint32 rxdIdx:12; /* Index of the RxDesc */
+ uint32 ext1:2;
+ uint32 eop:1; /* End of Packet */
+ uint32 sop:1; /* Start of Packet */
+ uint32 rqID:10; /* rx queue/ring ID */
+ uint32 rssType:4; /* RSS hash type used */
+ uint32 cnc:1; /* Checksum Not Calculated */
+ uint32 ext2:1;
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+ __le32 rssHash; /* RSS hash value */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 tci:16; /* Tag stripped */
+ uint32 ts:1; /* Tag is stripped */
+ uint32 err:1; /* Error */
+ uint32 len:14; /* data length */
+#else
+ uint32 len:14; /* data length */
+ uint32 err:1; /* Error */
+ uint32 ts:1; /* Tag is stripped */
+ uint32 tci:16; /* Tag stripped */
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 gen:1; /* generation bit */
+ uint32 type:7; /* completion type */
+ uint32 fcs:1; /* Frame CRC correct */
+ uint32 frg:1; /* IP Fragment */
+ uint32 v4:1; /* IPv4 */
+ uint32 v6:1; /* IPv6 */
+ uint32 ipc:1; /* IP Checksum Correct */
+ uint32 tcp:1; /* TCP packet */
+ uint32 udp:1; /* UDP packet */
+ uint32 tuc:1; /* TCP/UDP Checksum Correct */
+ uint32 csum:16;
+#else
+ uint32 csum:16;
+ uint32 tuc:1; /* TCP/UDP Checksum Correct */
+ uint32 udp:1; /* UDP packet */
+ uint32 tcp:1; /* TCP packet */
+ uint32 ipc:1; /* IP Checksum Correct */
+ uint32 v6:1; /* IPv6 */
+ uint32 v4:1; /* IPv4 */
+ uint32 frg:1; /* IP Fragment */
+ uint32 fcs:1; /* Frame CRC correct */
+ uint32 type:7; /* completion type */
+ uint32 gen:1; /* generation bit */
+#endif /* __BIG_ENDIAN_BITFIELD */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxCompDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxCompDescExt {
+ __le32 dword1;
+ uint8 segCnt; /* Number of aggregated packets */
+ uint8 dupAckCnt; /* Number of duplicate Acks */
+ __le16 tsDelta; /* TCP timestamp difference */
+ __le32 dword2;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 gen : 1; /* generation bit */
+ uint32 type : 7; /* completion type */
+ uint32 fcs : 1; /* Frame CRC correct */
+ uint32 frg : 1; /* IP Fragment */
+ uint32 v4 : 1; /* IPv4 */
+ uint32 v6 : 1; /* IPv6 */
+ uint32 ipc : 1; /* IP Checksum Correct */
+ uint32 tcp : 1; /* TCP packet */
+ uint32 udp : 1; /* UDP packet */
+ uint32 tuc : 1; /* TCP/UDP Checksum Correct */
+ uint32 mss : 16;
+#else
+ uint32 mss : 16;
+ uint32 tuc : 1; /* TCP/UDP Checksum Correct */
+ uint32 udp : 1; /* UDP packet */
+ uint32 tcp : 1; /* TCP packet */
+ uint32 ipc : 1; /* IP Checksum Correct */
+ uint32 v6 : 1; /* IPv6 */
+ uint32 v4 : 1; /* IPv4 */
+ uint32 frg : 1; /* IP Fragment */
+ uint32 fcs : 1; /* Frame CRC correct */
+ uint32 type : 7; /* completion type */
+ uint32 gen : 1; /* generation bit */
+#endif /* __BIG_ENDIAN_BITFIELD */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxCompDescExt;
+
+/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
+#define VMXNET3_RCD_TUC_SHIFT 16
+#define VMXNET3_RCD_IPC_SHIFT 19
+
+/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */
+#define VMXNET3_RCD_TYPE_SHIFT 56
+#define VMXNET3_RCD_GEN_SHIFT 63
+
+/* csum OK for TCP/UDP pkts over IP */
+#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | 1 << VMXNET3_RCD_IPC_SHIFT)
+
+/* value of RxCompDesc.rssType */
+#define VMXNET3_RCD_RSS_TYPE_NONE 0
+#define VMXNET3_RCD_RSS_TYPE_IPV4 1
+#define VMXNET3_RCD_RSS_TYPE_TCPIPV4 2
+#define VMXNET3_RCD_RSS_TYPE_IPV6 3
+#define VMXNET3_RCD_RSS_TYPE_TCPIPV6 4
+
+/* a union for accessing all cmd/completion descriptors */
+typedef union Vmxnet3_GenericDesc {
+ __le64 qword[2];
+ __le32 dword[4];
+ __le16 word[8];
+ Vmxnet3_TxDesc txd;
+ Vmxnet3_RxDesc rxd;
+ Vmxnet3_TxCompDesc tcd;
+ Vmxnet3_RxCompDesc rcd;
+ Vmxnet3_RxCompDescExt rcdExt;
+} Vmxnet3_GenericDesc;
+
+#define VMXNET3_INIT_GEN 1
+
+/* Max size of a single tx buffer */
+#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14)
+
+/* # of tx desc needed for a tx buffer size */
+#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / VMXNET3_MAX_TX_BUF_SIZE)
+
+/* max # of tx descs for a non-tso pkt */
+#define VMXNET3_MAX_TXD_PER_PKT 16
+
+/* Max size of a single rx buffer */
+#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1)
+/* Minimum size of a type 0 buffer */
+#define VMXNET3_MIN_T0_BUF_SIZE 128
+#define VMXNET3_MAX_CSUM_OFFSET 1024
+
+/* Ring base address alignment */
+#define VMXNET3_RING_BA_ALIGN 512
+#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1)
+
+/* Ring size must be a multiple of 32 */
+#define VMXNET3_RING_SIZE_ALIGN 32
+#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
+
+/* Tx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1)
+
+/* Rx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1)
+
+/* Max ring size */
+#define VMXNET3_TX_RING_MAX_SIZE 4096
+#define VMXNET3_TC_RING_MAX_SIZE 4096
+#define VMXNET3_RX_RING_MAX_SIZE 4096
+#define VMXNET3_RC_RING_MAX_SIZE 8192
+
+#define VMXNET3_TXDATA_DESC_MIN_SIZE 128
+#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048
+
+#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048
+
+/* a list of reasons for queue stop */
+
+#define VMXNET3_ERR_NOEOP 0x80000000 /* cannot find the EOP desc of a pkt */
+#define VMXNET3_ERR_TXD_REUSE 0x80000001 /* reuse a TxDesc before tx completion */
+#define VMXNET3_ERR_BIG_PKT 0x80000002 /* too many TxDesc for a pkt */
+#define VMXNET3_ERR_DESC_NOT_SPT 0x80000003 /* descriptor type not supported */
+#define VMXNET3_ERR_SMALL_BUF 0x80000004 /* type 0 buffer too small */
+#define VMXNET3_ERR_STRESS 0x80000005 /* stress option firing in vmkernel */
+#define VMXNET3_ERR_SWITCH 0x80000006 /* mode switch failure */
+#define VMXNET3_ERR_TXD_INVALID 0x80000007 /* invalid TxDesc */
+
+/* completion descriptor types */
+#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */
+#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */
+#define VMXNET3_CDTYPE_RXCOMP_LRO 4 /* Rx Completion Descriptor for LRO */
+
+#define VMXNET3_GOS_BITS_UNK 0 /* unknown */
+#define VMXNET3_GOS_BITS_32 1
+#define VMXNET3_GOS_BITS_64 2
+
+#define VMXNET3_GOS_TYPE_UNK 0 /* unknown */
+#define VMXNET3_GOS_TYPE_LINUX 1
+#define VMXNET3_GOS_TYPE_WIN 2
+#define VMXNET3_GOS_TYPE_SOLARIS 3
+#define VMXNET3_GOS_TYPE_FREEBSD 4
+#define VMXNET3_GOS_TYPE_PXE 5
+
+/* All structures in DriverShared are padded to multiples of 8 bytes */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_GOSInfo {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 gosMisc: 10; /* other info about gos */
+ uint32 gosVer: 16; /* gos version */
+ uint32 gosType: 4; /* which guest */
+ uint32 gosBits: 2; /* 32-bit or 64-bit? */
+#else
+ uint32 gosBits: 2; /* 32-bit or 64-bit? */
+ uint32 gosType: 4; /* which guest */
+ uint32 gosVer: 16; /* gos version */
+ uint32 gosMisc: 10; /* other info about gos */
+#endif /* __BIG_ENDIAN_BITFIELD */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_GOSInfo;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_DriverInfo {
+ __le32 version; /* driver version */
+ Vmxnet3_GOSInfo gos;
+ __le32 vmxnet3RevSpt; /* vmxnet3 revision supported */
+ __le32 uptVerSpt; /* upt version supported */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_DriverInfo;
+
+#define VMXNET3_REV1_MAGIC 0xbabefee1
+
+/*
+ * QueueDescPA must be 128 bytes aligned. It points to an array of
+ * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc.
+ * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by
+ * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively.
+ */
+#define VMXNET3_QUEUE_DESC_ALIGN 128
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_MiscConf {
+ Vmxnet3_DriverInfo driverInfo;
+ __le64 uptFeatures;
+ __le64 ddPA; /* driver data PA */
+ __le64 queueDescPA; /* queue descriptor table PA */
+ __le32 ddLen; /* driver data len */
+ __le32 queueDescLen; /* queue descriptor table len, in bytes */
+ __le32 mtu;
+ __le16 maxNumRxSG;
+ uint8 numTxQueues;
+ uint8 numRxQueues;
+ __le32 reserved[4];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_MiscConf;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxQueueConf {
+ __le64 txRingBasePA;
+ __le64 dataRingBasePA;
+ __le64 compRingBasePA;
+ __le64 ddPA; /* driver data */
+ __le64 reserved;
+ __le32 txRingSize; /* # of tx desc */
+ __le32 dataRingSize; /* # of data desc */
+ __le32 compRingSize; /* # of comp desc */
+ __le32 ddLen; /* size of driver data */
+ uint8 intrIdx;
+ uint8 _pad[1];
+ __le16 txDataRingDescSize;
+ uint8 _pad2[4];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxQueueConf;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxQueueConf {
+ __le64 rxRingBasePA[2];
+ __le64 compRingBasePA;
+ __le64 ddPA; /* driver data */
+ __le64 rxDataRingBasePA;
+ __le32 rxRingSize[2]; /* # of rx desc */
+ __le32 compRingSize; /* # of rx comp desc */
+ __le32 ddLen; /* size of driver data */
+ uint8 intrIdx;
+ uint8 _pad1[1];
+ __le16 rxDataRingDescSize; /* size of rx data ring buffer */
+ uint8 _pad2[4];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxQueueConf;
+
+enum vmxnet3_intr_mask_mode {
+ VMXNET3_IMM_AUTO = 0,
+ VMXNET3_IMM_ACTIVE = 1,
+ VMXNET3_IMM_LAZY = 2
+};
+
+enum vmxnet3_intr_type {
+ VMXNET3_IT_AUTO = 0,
+ VMXNET3_IT_INTX = 1,
+ VMXNET3_IT_MSI = 2,
+ VMXNET3_IT_MSIX = 3
+};
+
+#define VMXNET3_MAX_TX_QUEUES 8
+#define VMXNET3_MAX_RX_QUEUES 16
+/* addition 1 for events */
+#define VMXNET3_MAX_INTRS 25
+
+/* value of intrCtrl */
+#define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_IntrConf {
+ Bool autoMask;
+ uint8 numIntrs; /* # of interrupts */
+ uint8 eventIntrIdx;
+ uint8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for each intr */
+ __le32 intrCtrl;
+ __le32 reserved[2];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_IntrConf;
+
+/* one bit per VLAN ID, the size is in the units of uint32 */
+#define VMXNET3_VFT_SIZE (4096 / (sizeof(uint32) * 8))
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_QueueStatus {
+ Bool stopped;
+ uint8 _pad[3];
+ __le32 error;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_QueueStatus;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxQueueCtrl {
+ __le32 txNumDeferred;
+ __le32 txThreshold;
+ __le64 reserved;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxQueueCtrl;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxQueueCtrl {
+ Bool updateRxProd;
+ uint8 _pad[7];
+ __le64 reserved;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxQueueCtrl;
+
+#define VMXNET3_RXM_UCAST 0x01 /* unicast only */
+#define VMXNET3_RXM_MCAST 0x02 /* multicast passing the filters */
+#define VMXNET3_RXM_BCAST 0x04 /* broadcast only */
+#define VMXNET3_RXM_ALL_MULTI 0x08 /* all multicast */
+#define VMXNET3_RXM_PROMISC 0x10 /* promiscuous */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxFilterConf {
+ __le32 rxMode; /* VMXNET3_RXM_xxx */
+ __le16 mfTableLen; /* size of the multicast filter table */
+ __le16 _pad1;
+ __le64 mfTablePA; /* PA of the multicast filters table */
+ __le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxFilterConf;
+
+#define VMXNET3_PM_MAX_FILTERS 6
+#define VMXNET3_PM_MAX_PATTERN_SIZE 128
+#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
+
+#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
+#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching filters */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_PM_PktFilter {
+ uint8 maskSize;
+ uint8 patternSize;
+ uint8 mask[VMXNET3_PM_MAX_MASK_SIZE];
+ uint8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE];
+ uint8 pad[6];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_PM_PktFilter;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_PMConf {
+ __le16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
+ uint8 numFilters;
+ uint8 pad[5];
+ Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_PMConf;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_VariableLenConfDesc {
+ __le32 confVer;
+ __le32 confLen;
+ __le64 confPA;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_VariableLenConfDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_DSDevRead {
+ /* read-only region for device, read by dev in response to a SET cmd */
+ Vmxnet3_MiscConf misc;
+ Vmxnet3_IntrConf intrConf;
+ Vmxnet3_RxFilterConf rxFilterConf;
+ Vmxnet3_VariableLenConfDesc rssConfDesc;
+ Vmxnet3_VariableLenConfDesc pmConfDesc;
+ Vmxnet3_VariableLenConfDesc pluginConfDesc;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_DSDevRead;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxQueueDesc {
+ Vmxnet3_TxQueueCtrl ctrl;
+ Vmxnet3_TxQueueConf conf;
+ /* Driver read after a GET command */
+ Vmxnet3_QueueStatus status;
+ UPT1_TxStats stats;
+ uint8 _pad[88]; /* 128 aligned */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxQueueDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxQueueDesc {
+ Vmxnet3_RxQueueCtrl ctrl;
+ Vmxnet3_RxQueueConf conf;
+ /* Driver read after a GET command */
+ Vmxnet3_QueueStatus status;
+ UPT1_RxStats stats;
+ uint8 _pad[88]; /* 128 aligned */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxQueueDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_SetPolling {
+ uint8 enablePolling;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_SetPolling;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_MemoryRegion {
+ __le64 startPA;
+ __le32 length;
+ __le16 txQueueBits; /* bit n corresponding to tx queue n */
+ __le16 rxQueueBits; /* bit n corresponding to rx queue n */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_MemoryRegion;
+
+#define MAX_MEMORY_REGION_PER_QUEUE 16
+#define MAX_MEMORY_REGION_PER_DEVICE 256
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_MemRegs {
+ __le16 numRegs;
+ __le16 pad[3];
+ Vmxnet3_MemoryRegion memRegs[1];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_MemRegs;
+
+/*
+ * If the command data <= 16 bytes, use the shared memory direcly.
+ * Otherwise, use the variable length configuration descriptor.
+ */
+typedef
+#include "vmware_pack_begin.h"
+union Vmxnet3_CmdInfo {
+ Vmxnet3_VariableLenConfDesc varConf;
+ Vmxnet3_SetPolling setPolling;
+ __le64 data[2];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_CmdInfo;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_DriverShared {
+ __le32 magic;
+ __le32 pad; /* make devRead start at 64-bit boundaries */
+ Vmxnet3_DSDevRead devRead;
+ __le32 ecr;
+ __le32 reserved;
+
+ union {
+ __le32 reserved1[4];
+ Vmxnet3_CmdInfo cmdInfo; /* only valid in the context of executing the
+ * relevant command
+ */
+ } cu;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_DriverShared;
+
+#define VMXNET3_ECR_RQERR (1 << 0)
+#define VMXNET3_ECR_TQERR (1 << 1)
+#define VMXNET3_ECR_LINK (1 << 2)
+#define VMXNET3_ECR_DIC (1 << 3)
+#define VMXNET3_ECR_DEBUG (1 << 4)
+
+/* flip the gen bit of a ring */
+#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1)
+
+/* only use this if moving the idx won't affect the gen bit */
+#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \
+do {\
+ (idx)++;\
+ if (UNLIKELY((idx) == (ring_size))) {\
+ (idx) = 0;\
+ }\
+} while (0)
+
+#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \
+ vfTable[vid >> 5] |= (1 << (vid & 31))
+#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \
+ vfTable[vid >> 5] &= ~(1 << (vid & 31))
+
+#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \
+ ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
+
+#define VMXNET3_MAX_MTU 9000
+#define VMXNET3_MIN_MTU 60
+
+#define VMXNET3_LINK_UP (10000 << 16 | 1) // 10 Gbps, up
+#define VMXNET3_LINK_DOWN 0
+
+#define VMXWIFI_DRIVER_SHARED_LEN 8192
+
+#define VMXNET3_DID_PASSTHRU 0xFFFF
+
+#endif /* _VMXNET3_DEFS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/base/vmxnet3_osdep.h b/src/spdk/dpdk/drivers/net/vmxnet3/base/vmxnet3_osdep.h
new file mode 100644
index 00000000..c9b92b04
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/base/vmxnet3_osdep.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _VMXNET3_OSDEP_H
+#define _VMXNET3_OSDEP_H
+
+typedef uint64_t uint64;
+typedef uint32_t uint32;
+typedef uint16_t uint16;
+typedef uint8_t uint8;
+typedef int bool;
+typedef char Bool;
+
+#ifndef UNLIKELY
+#define UNLIKELY(x) __builtin_expect((x),0)
+#endif /* unlikely */
+
+#endif /* _VMXNET3_OSDEP_H */
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/rte_pmd_vmxnet3_version.map b/src/spdk/dpdk/drivers/net/vmxnet3/rte_pmd_vmxnet3_version.map
new file mode 100644
index 00000000..ef353984
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/rte_pmd_vmxnet3_version.map
@@ -0,0 +1,4 @@
+DPDK_2.0 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c b/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c
new file mode 100644
index 00000000..2613cd13
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -0,0 +1,1331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_string_fns.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include "base/vmxnet3_defs.h"
+
+#include "vmxnet3_ring.h"
+#include "vmxnet3_logs.h"
+#include "vmxnet3_ethdev.h"
+
+#define PROCESS_SYS_EVENTS 0
+
+#define VMXNET3_TX_MAX_SEG UINT8_MAX
+
+#define VMXNET3_TX_OFFLOAD_CAP \
+ (DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define VMXNET3_RX_OFFLOAD_CAP \
+ (DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_SCATTER | \
+ DEV_RX_OFFLOAD_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_LRO | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_CRC_STRIP)
+
+static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
+static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
+static int vmxnet3_dev_start(struct rte_eth_dev *dev);
+static void vmxnet3_dev_stop(struct rte_eth_dev *dev);
+static void vmxnet3_dev_close(struct rte_eth_dev *dev);
+static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
+static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
+static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats,
+ unsigned int n);
+static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned int n);
+static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static const uint32_t *
+vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vid, int on);
+static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+static void vmxnet3_interrupt_handler(void *param);
+
+int vmxnet3_logtype_init;
+int vmxnet3_logtype_driver;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+#define VMWARE_PCI_VENDOR_ID 0x15AD
+#define VMWARE_DEV_ID_VMXNET3 0x07B0
+static const struct rte_pci_id pci_id_vmxnet3_map[] = {
+ { RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
+ .dev_configure = vmxnet3_dev_configure,
+ .dev_start = vmxnet3_dev_start,
+ .dev_stop = vmxnet3_dev_stop,
+ .dev_close = vmxnet3_dev_close,
+ .promiscuous_enable = vmxnet3_dev_promiscuous_enable,
+ .promiscuous_disable = vmxnet3_dev_promiscuous_disable,
+ .allmulticast_enable = vmxnet3_dev_allmulticast_enable,
+ .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
+ .link_update = vmxnet3_dev_link_update,
+ .stats_get = vmxnet3_dev_stats_get,
+ .xstats_get_names = vmxnet3_dev_xstats_get_names,
+ .xstats_get = vmxnet3_dev_xstats_get,
+ .mac_addr_set = vmxnet3_mac_addr_set,
+ .dev_infos_get = vmxnet3_dev_info_get,
+ .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
+ .vlan_filter_set = vmxnet3_dev_vlan_filter_set,
+ .vlan_offload_set = vmxnet3_dev_vlan_offload_set,
+ .rx_queue_setup = vmxnet3_dev_rx_queue_setup,
+ .rx_queue_release = vmxnet3_dev_rx_queue_release,
+ .tx_queue_setup = vmxnet3_dev_tx_queue_setup,
+ .tx_queue_release = vmxnet3_dev_tx_queue_release,
+};
+
+struct vmxnet3_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+/* tx_qX_ is prepended to the name string here */
+static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
+ {"drop_total", offsetof(struct vmxnet3_txq_stats, drop_total)},
+ {"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
+ {"drop_tso", offsetof(struct vmxnet3_txq_stats, drop_tso)},
+ {"tx_ring_full", offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
+};
+
+/* rx_qX_ is prepended to the name string here */
+static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
+ {"drop_total", offsetof(struct vmxnet3_rxq_stats, drop_total)},
+ {"drop_err", offsetof(struct vmxnet3_rxq_stats, drop_err)},
+ {"drop_fcs", offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
+ {"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
+};
+
+static const struct rte_memzone *
+gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
+ const char *post_string, int socket_id,
+ uint16_t align, bool reuse)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(z_name, sizeof(z_name), "%s_%d_%s",
+ dev->device->driver->name, dev->data->port_id, post_string);
+
+ mz = rte_memzone_lookup(z_name);
+ if (!reuse) {
+ if (mz)
+ rte_memzone_free(mz);
+ return rte_memzone_reserve_aligned(z_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
+ }
+
+ if (mz)
+ return mz;
+
+ return rte_memzone_reserve_aligned(z_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
+}
+
+/*
+ * This function is based on vmxnet3_disable_intr()
+ */
+static void
+vmxnet3_disable_intr(struct vmxnet3_hw *hw)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
+ for (i = 0; i < hw->num_intrs; i++)
+ VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
+}
+
+static void
+vmxnet3_enable_intr(struct vmxnet3_hw *hw)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->shared->devRead.intrConf.intrCtrl &= ~VMXNET3_IC_DISABLE_ALL;
+ for (i = 0; i < hw->num_intrs; i++)
+ VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 0);
+}
+
+/*
+ * Gets tx data ring descriptor size.
+ */
+static uint16_t
+eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
+{
+ uint16 txdata_desc_size;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
+ txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+ return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
+ txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
+ txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
+ sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
+}
+
+/*
+ * It returns 0 on success.
+ */
+static int
+eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct vmxnet3_hw *hw = eth_dev->data->dev_private;
+ uint32_t mac_hi, mac_lo, ver;
+ struct rte_eth_link link;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
+ eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
+ eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ /*
+ * for secondary processes, we don't initialize any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ /* Vendor and Device ID need to be set before init of shared code */
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
+
+ hw->num_rx_queues = 1;
+ hw->num_tx_queues = 1;
+ hw->bufs_per_pkt = 1;
+
+ /* Check h/w version compatibility with driver. */
+ ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
+ PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
+
+ if (ver & (1 << VMXNET3_REV_3)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_3);
+ hw->version = VMXNET3_REV_3 + 1;
+ } else if (ver & (1 << VMXNET3_REV_2)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_2);
+ hw->version = VMXNET3_REV_2 + 1;
+ } else if (ver & (1 << VMXNET3_REV_1)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_1);
+ hw->version = VMXNET3_REV_1 + 1;
+ } else {
+ PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
+ return -EIO;
+ }
+
+ PMD_INIT_LOG(DEBUG, "Using device version %d\n", hw->version);
+
+ /* Check UPT version compatibility with driver. */
+ ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
+ PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
+ if (ver & 0x1)
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
+ else {
+ PMD_INIT_LOG(ERR, "Incompatible UPT version.");
+ return -EIO;
+ }
+
+ /* Getting MAC Address */
+ mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
+ mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
+ memcpy(hw->perm_addr, &mac_lo, 4);
+ memcpy(hw->perm_addr + 4, &mac_hi, 2);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
+ VMXNET3_MAX_MAC_ADDRS, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
+ return -ENOMEM;
+ }
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->perm_addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
+ hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
+ hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
+
+ /* Put device in Quiesce Mode */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
+
+ /* allow untagged pkts */
+ VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
+
+ hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
+ eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
+
+ hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
+ VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
+ RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
+ hw->rxdata_desc_size);
+
+ /* clear shadow stats */
+ memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
+ memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
+
+ /* set the initial link status */
+ memset(&link, 0, sizeof(link));
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
+ rte_eth_linkstatus_set(eth_dev, &link);
+
+ return 0;
+}
+
+static int
+eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct vmxnet3_hw *hw = eth_dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (hw->adapter_stopped == 0)
+ vmxnet3_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ eth_dev->tx_pkt_prepare = NULL;
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
+static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
+}
+
+static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
+}
+
+static struct rte_pci_driver rte_vmxnet3_pmd = {
+ .id_table = pci_id_vmxnet3_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_vmxnet3_pci_probe,
+ .remove = eth_vmxnet3_pci_remove,
+};
+
+static int
+vmxnet3_dev_configure(struct rte_eth_dev *dev)
+{
+ const struct rte_memzone *mz;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ size_t size;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
+ dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
+ PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
+ return -EINVAL;
+ }
+
+ if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
+ PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
+ return -EINVAL;
+ }
+
+ size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
+ dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
+
+ if (size > UINT16_MAX)
+ return -EINVAL;
+
+ hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
+ hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
+
+ /*
+ * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
+ * on current socket
+ */
+ mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
+ "shared", rte_socket_id(), 8, 1);
+
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ hw->shared = mz->addr;
+ hw->sharedPA = mz->iova;
+
+ /*
+ * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
+ * on current socket.
+ *
+ * We cannot reuse this memzone from previous allocation as its size
+ * depends on the number of tx and rx queues, which could be different
+ * from one config to another.
+ */
+ mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
+ VMXNET3_QUEUE_DESC_ALIGN, 0);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
+ hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
+
+ hw->queueDescPA = mz->iova;
+ hw->queue_desc_len = (uint16_t)size;
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ /* Allocate memory structure for UPT1_RSSConf and configure */
+ mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
+ "rss_conf", rte_socket_id(),
+ RTE_CACHE_LINE_SIZE, 1);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR,
+ "ERROR: Creating rss_conf structure zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ hw->rss_conf = mz->addr;
+ hw->rss_confPA = mz->iova;
+ }
+
+ return 0;
+}
+
+static void
+vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
+{
+ uint32_t val;
+
+ PMD_INIT_LOG(DEBUG,
+ "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
+ addr[0], addr[1], addr[2],
+ addr[3], addr[4], addr[5]);
+
+ memcpy(&val, addr, 4);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
+
+ memcpy(&val, addr + 4, 2);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
+}
+
+static int
+vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ Vmxnet3_DriverShared *shared = hw->shared;
+ Vmxnet3_CmdInfo *cmdInfo;
+ struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
+ uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
+ uint32_t num, i, j, size;
+
+ if (hw->memRegsPA == 0) {
+ const struct rte_memzone *mz;
+
+ size = sizeof(Vmxnet3_MemRegs) +
+ (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
+ sizeof(Vmxnet3_MemoryRegion);
+
+ mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
+ 1);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ hw->memRegs = mz->addr;
+ hw->memRegsPA = mz->iova;
+ }
+
+ num = hw->num_rx_queues;
+
+ for (i = 0; i < num; i++) {
+ vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+
+ mp[i] = rxq->mp;
+ index[i] = 1 << i;
+ }
+
+ /*
+ * The same mempool could be used by multiple queues. In such a case,
+ * remove duplicate mempool entries. Only one entry is kept with
+ * bitmask indicating queues that are using this mempool.
+ */
+ for (i = 1; i < num; i++) {
+ for (j = 0; j < i; j++) {
+ if (mp[i] == mp[j]) {
+ mp[i] = NULL;
+ index[j] |= 1 << i;
+ break;
+ }
+ }
+ }
+
+ j = 0;
+ for (i = 0; i < num; i++) {
+ if (mp[i] == NULL)
+ continue;
+
+ Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
+
+ mr->startPA =
+ (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova;
+ mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
+ STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
+ mr->txQueueBits = index[i];
+ mr->rxQueueBits = index[i];
+
+ PMD_INIT_LOG(INFO,
+ "index: %u startPA: %" PRIu64 " length: %u, "
+ "rxBits: %x",
+ j, mr->startPA, mr->length, mr->rxQueueBits);
+ j++;
+ }
+ hw->memRegs->numRegs = j;
+ PMD_INIT_LOG(INFO, "numRegs: %u", j);
+
+ size = sizeof(Vmxnet3_MemRegs) +
+ (j - 1) * sizeof(Vmxnet3_MemoryRegion);
+
+ cmdInfo = &shared->cu.cmdInfo;
+ cmdInfo->varConf.confVer = 1;
+ cmdInfo->varConf.confLen = size;
+ cmdInfo->varConf.confPA = hw->memRegsPA;
+
+ return 0;
+}
+
+static int
+vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf port_conf = dev->data->dev_conf;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ uint32_t mtu = dev->data->mtu;
+ Vmxnet3_DriverShared *shared = hw->shared;
+ Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ uint32_t i;
+ int ret;
+
+ hw->mtu = mtu;
+
+ shared->magic = VMXNET3_REV1_MAGIC;
+ devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
+
+ /* Setting up Guest OS information */
+ devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ?
+ VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
+ devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
+ devRead->misc.driverInfo.vmxnet3RevSpt = 1;
+ devRead->misc.driverInfo.uptVerSpt = 1;
+
+ devRead->misc.mtu = rte_le_to_cpu_32(mtu);
+ devRead->misc.queueDescPA = hw->queueDescPA;
+ devRead->misc.queueDescLen = hw->queue_desc_len;
+ devRead->misc.numTxQueues = hw->num_tx_queues;
+ devRead->misc.numRxQueues = hw->num_rx_queues;
+
+ /*
+ * Set number of interrupts to 1
+ * PMD by default disables all the interrupts but this is MUST
+ * to activate device. It needs at least one interrupt for
+ * link events to handle
+ */
+ hw->num_intrs = devRead->intrConf.numIntrs = 1;
+ devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
+
+ for (i = 0; i < hw->num_tx_queues; i++) {
+ Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
+ vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i];
+
+ txq->shared = &hw->tqd_start[i];
+
+ tqd->ctrl.txNumDeferred = 0;
+ tqd->ctrl.txThreshold = 1;
+ tqd->conf.txRingBasePA = txq->cmd_ring.basePA;
+ tqd->conf.compRingBasePA = txq->comp_ring.basePA;
+ tqd->conf.dataRingBasePA = txq->data_ring.basePA;
+
+ tqd->conf.txRingSize = txq->cmd_ring.size;
+ tqd->conf.compRingSize = txq->comp_ring.size;
+ tqd->conf.dataRingSize = txq->data_ring.size;
+ tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
+ tqd->conf.intrIdx = txq->comp_ring.intr_idx;
+ tqd->status.stopped = TRUE;
+ tqd->status.error = 0;
+ memset(&tqd->stats, 0, sizeof(tqd->stats));
+ }
+
+ for (i = 0; i < hw->num_rx_queues; i++) {
+ Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i];
+ vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+
+ rxq->shared = &hw->rqd_start[i];
+
+ rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
+ rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
+ rqd->conf.compRingBasePA = rxq->comp_ring.basePA;
+
+ rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size;
+ rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size;
+ rqd->conf.compRingSize = rxq->comp_ring.size;
+ rqd->conf.intrIdx = rxq->comp_ring.intr_idx;
+ if (VMXNET3_VERSION_GE_3(hw)) {
+ rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
+ rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
+ }
+ rqd->status.stopped = TRUE;
+ rqd->status.error = 0;
+ memset(&rqd->stats, 0, sizeof(rqd->stats));
+ }
+
+ /* RxMode set to 0 of VMXNET3_RXM_xxx */
+ devRead->rxFilterConf.rxMode = 0;
+
+ /* Setting up feature flags */
+ if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
+
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ devRead->misc.uptFeatures |= VMXNET3_F_LRO;
+ devRead->misc.maxNumRxSG = 0;
+ }
+
+ if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ ret = vmxnet3_rss_configure(dev);
+ if (ret != VMXNET3_SUCCESS)
+ return ret;
+
+ devRead->misc.uptFeatures |= VMXNET3_F_RSS;
+ devRead->rssConfDesc.confVer = 1;
+ devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
+ devRead->rssConfDesc.confPA = hw->rss_confPA;
+ }
+
+ ret = vmxnet3_dev_vlan_offload_set(dev,
+ ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+ if (ret)
+ return ret;
+
+ vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
+
+ return VMXNET3_SUCCESS;
+}
+
+/*
+ * Configure device link speed and setup link.
+ * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
+ * It returns 0 on success.
+ */
+static int
+vmxnet3_dev_start(struct rte_eth_dev *dev)
+{
+ int ret;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Save stats before it is reset by CMD_ACTIVATE */
+ vmxnet3_hw_stats_save(hw);
+
+ ret = vmxnet3_setup_driver_shared(dev);
+ if (ret != VMXNET3_SUCCESS)
+ return ret;
+
+ /* check if lsc interrupt feature is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ /* Setup interrupt callback */
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ vmxnet3_interrupt_handler, dev);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) < 0) {
+ PMD_INIT_LOG(ERR, "interrupt enable failed");
+ return -EIO;
+ }
+ }
+
+ /* Exchange shared data with device */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
+ VMXNET3_GET_ADDR_LO(hw->sharedPA));
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
+ VMXNET3_GET_ADDR_HI(hw->sharedPA));
+
+ /* Activate device by register write */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
+ ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
+ return -EINVAL;
+ }
+
+ /* Setup memory region for rx buffers */
+ ret = vmxnet3_dev_setup_memreg(dev);
+ if (ret == 0) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_REGISTER_MEMREGS);
+ ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+ if (ret != 0)
+ PMD_INIT_LOG(DEBUG,
+ "Failed in setup memory region cmd\n");
+ ret = 0;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
+ }
+
+ /* Disable interrupts */
+ vmxnet3_disable_intr(hw);
+
+ /*
+ * Load RX queues with blank mbufs and update next2fill index for device
+ * Update RxMode of the device
+ */
+ ret = vmxnet3_dev_rxtx_init(dev);
+ if (ret != VMXNET3_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
+ return ret;
+ }
+
+ hw->adapter_stopped = FALSE;
+
+ /* Setting proper Rx Mode and issue Rx Mode Update command */
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
+
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ vmxnet3_enable_intr(hw);
+
+ /*
+ * Update link state from device since this won't be
+ * done upon starting with lsc in use. This is done
+ * only after enabling interrupts to avoid any race
+ * where the link state could change without an
+ * interrupt being fired.
+ */
+ __vmxnet3_dev_link_update(dev, 0);
+ }
+
+ return VMXNET3_SUCCESS;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void
+vmxnet3_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_eth_link link;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->adapter_stopped == 1) {
+ PMD_INIT_LOG(DEBUG, "Device already closed.");
+ return;
+ }
+
+ /* disable interrupts */
+ vmxnet3_disable_intr(hw);
+
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ rte_intr_disable(&pci_dev->intr_handle);
+
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ vmxnet3_interrupt_handler, dev);
+ }
+
+ /* quiesce the device first */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
+
+ /* reset the device */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
+ PMD_INIT_LOG(DEBUG, "Device reset.");
+ hw->adapter_stopped = 0;
+
+ vmxnet3_dev_clear_queues(dev);
+
+ /* Clear recorded link status */
+ memset(&link, 0, sizeof(link));
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
+ rte_eth_linkstatus_set(dev, &link);
+}
+
+/*
+ * Reset and stop device.
+ */
+static void
+vmxnet3_dev_close(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vmxnet3_dev_stop(dev);
+ hw->adapter_stopped = 1;
+}
+
+static void
+vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
+ struct UPT1_TxStats *res)
+{
+#define VMXNET3_UPDATE_TX_STAT(h, i, f, r) \
+ ((r)->f = (h)->tqd_start[(i)].stats.f + \
+ (h)->saved_tx_stats[(i)].f)
+
+ VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
+
+#undef VMXNET3_UPDATE_TX_STAT
+}
+
+static void
+vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
+ struct UPT1_RxStats *res)
+{
+#define VMXNET3_UPDATE_RX_STAT(h, i, f, r) \
+ ((r)->f = (h)->rqd_start[(i)].stats.f + \
+ (h)->saved_rx_stats[(i)].f)
+
+ VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
+
+#undef VMXNET3_UPDATE_RX_STATS
+}
+
+static void
+vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
+{
+ unsigned int i;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+
+ RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
+
+ for (i = 0; i < hw->num_tx_queues; i++)
+ vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
+ for (i = 0; i < hw->num_rx_queues; i++)
+ vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
+}
+
+static int
+vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int n)
+{
+ unsigned int i, t, count = 0;
+ unsigned int nstats =
+ dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
+ dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
+
+ if (!xstats_names || n < nstats)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
+
+ for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", i,
+ vmxnet3_rxq_stat_strings[t].name);
+ count++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
+
+ for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", i,
+ vmxnet3_txq_stat_strings[t].name);
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i, t, count = 0;
+ unsigned int nstats =
+ dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
+ dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
+
+ if (n < nstats)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq == NULL)
+ continue;
+
+ for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
+ xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
+ vmxnet3_rxq_stat_strings[t].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (txq == NULL)
+ continue;
+
+ for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
+ xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
+ vmxnet3_txq_stat_strings[t].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ unsigned int i;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct UPT1_TxStats txStats;
+ struct UPT1_RxStats rxStats;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+
+ RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
+ for (i = 0; i < hw->num_tx_queues; i++) {
+ vmxnet3_hw_tx_stats_get(hw, i, &txStats);
+
+ stats->q_opackets[i] = txStats.ucastPktsTxOK +
+ txStats.mcastPktsTxOK +
+ txStats.bcastPktsTxOK;
+
+ stats->q_obytes[i] = txStats.ucastBytesTxOK +
+ txStats.mcastBytesTxOK +
+ txStats.bcastBytesTxOK;
+
+ stats->opackets += stats->q_opackets[i];
+ stats->obytes += stats->q_obytes[i];
+ stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
+ }
+
+ RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
+ for (i = 0; i < hw->num_rx_queues; i++) {
+ vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
+
+ stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
+ rxStats.mcastPktsRxOK +
+ rxStats.bcastPktsRxOK;
+
+ stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
+ rxStats.mcastBytesRxOK +
+ rxStats.bcastBytesRxOK;
+
+ stats->ipackets += stats->q_ipackets[i];
+ stats->ibytes += stats->q_ibytes[i];
+
+ stats->q_errors[i] = rxStats.pktsRxError;
+ stats->ierrors += rxStats.pktsRxError;
+ stats->imissed += rxStats.pktsRxOutOfBuf;
+ }
+
+ return 0;
+}
+
+static void
+vmxnet3_dev_info_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_dev_info *dev_info)
+{
+ dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
+ dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
+ dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
+ dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
+ dev_info->speed_capa = ETH_LINK_SPEED_10G;
+ dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
+
+ dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = VMXNET3_RX_RING_MAX_SIZE,
+ .nb_min = VMXNET3_DEF_RX_RING_SIZE,
+ .nb_align = 1,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = VMXNET3_TX_RING_MAX_SIZE,
+ .nb_min = VMXNET3_DEF_TX_RING_SIZE,
+ .nb_align = 1,
+ .nb_seg_max = VMXNET3_TX_MAX_SEG,
+ .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
+ };
+
+ dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP;
+ dev_info->rx_queue_offload_capa = 0;
+ dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
+ dev_info->tx_queue_offload_capa = 0;
+}
+
+static const uint32_t *
+vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
+static int
+vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ ether_addr_copy(mac_addr, (struct ether_addr *)(hw->perm_addr));
+ vmxnet3_write_mac(hw, mac_addr->addr_bytes);
+ return 0;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+__vmxnet3_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct rte_eth_link link;
+ uint32_t ret;
+
+ memset(&link, 0, sizeof(link));
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
+ ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+ if (ret & 0x1)
+ link.link_status = ETH_LINK_UP;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+vmxnet3_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ /* Link status doesn't change for stopped dev */
+ if (dev->data->dev_started == 0)
+ return -1;
+
+ return __vmxnet3_dev_link_update(dev, wait_to_complete);
+}
+
+/* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
+static void
+vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
+{
+ struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
+
+ if (set)
+ rxConf->rxMode = rxConf->rxMode | feature;
+ else
+ rxConf->rxMode = rxConf->rxMode & (~feature);
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
+}
+
+/* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
+static void
+vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
+
+ memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+}
+
+/* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
+static void
+vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
+
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ else
+ memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+}
+
+/* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
+static void
+vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
+}
+
+/* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
+static void
+vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
+}
+
+/* Enable/disable filter on vlan */
+static int
+vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
+ uint32_t *vf_table = rxConf->vfTable;
+
+ /* save state for restore */
+ if (on)
+ VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
+ else
+ VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
+
+ /* don't change active filter if in promiscuous mode */
+ if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
+ return 0;
+
+ /* set in hardware */
+ if (on)
+ VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
+ else
+ VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ return 0;
+}
+
+static int
+vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
+ uint32_t *vf_table = devRead->rxFilterConf.vfTable;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
+ else
+ devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_FEATURE);
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ else
+ memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ }
+
+ return 0;
+}
+
+static void
+vmxnet3_process_events(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ uint32_t events = hw->shared->ecr;
+
+ if (!events)
+ return;
+
+ /*
+ * ECR bits when written with 1b are cleared. Hence write
+ * events back to ECR so that the bits which were set will be reset.
+ */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
+
+ /* Check if link state has changed */
+ if (events & VMXNET3_ECR_LINK) {
+ PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event");
+ if (vmxnet3_dev_link_update(dev, 0) == 0)
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+
+ /* Check if there is an error on xmit/recv queues */
+ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_QUEUE_STATUS);
+
+ if (hw->tqd_start->status.stopped)
+ PMD_DRV_LOG(ERR, "tq error 0x%x",
+ hw->tqd_start->status.error);
+
+ if (hw->rqd_start->status.stopped)
+ PMD_DRV_LOG(ERR, "rq error 0x%x",
+ hw->rqd_start->status.error);
+
+ /* Reset the device */
+ /* Have to reset the device */
+ }
+
+ if (events & VMXNET3_ECR_DIC)
+ PMD_DRV_LOG(DEBUG, "Device implementation change event.");
+
+ if (events & VMXNET3_ECR_DEBUG)
+ PMD_DRV_LOG(DEBUG, "Debug event generated by device.");
+}
+
+static void
+vmxnet3_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = param;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ vmxnet3_process_events(dev);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) < 0)
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+}
+
+RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(vmxnet3_init_log)
+{
+ vmxnet3_logtype_init = rte_log_register("pmd.net.vmxnet3.init");
+ if (vmxnet3_logtype_init >= 0)
+ rte_log_set_level(vmxnet3_logtype_init, RTE_LOG_NOTICE);
+ vmxnet3_logtype_driver = rte_log_register("pmd.net.vmxnet3.driver");
+ if (vmxnet3_logtype_driver >= 0)
+ rte_log_set_level(vmxnet3_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h b/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h
new file mode 100644
index 00000000..d3f2b352
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _VMXNET3_ETHDEV_H_
+#define _VMXNET3_ETHDEV_H_
+
+#include <rte_io.h>
+
+#define VMXNET3_MAX_MAC_ADDRS 1
+
+/* UPT feature to negotiate */
+#define VMXNET3_F_RXCSUM 0x0001
+#define VMXNET3_F_RSS 0x0002
+#define VMXNET3_F_RXVLAN 0x0004
+#define VMXNET3_F_LRO 0x0008
+
+/* Hash Types supported by device */
+#define VMXNET3_RSS_HASH_TYPE_NONE 0x0
+#define VMXNET3_RSS_HASH_TYPE_IPV4 0x01
+#define VMXNET3_RSS_HASH_TYPE_TCP_IPV4 0x02
+#define VMXNET3_RSS_HASH_TYPE_IPV6 0x04
+#define VMXNET3_RSS_HASH_TYPE_TCP_IPV6 0x08
+
+#define VMXNET3_RSS_HASH_FUNC_NONE 0x0
+#define VMXNET3_RSS_HASH_FUNC_TOEPLITZ 0x01
+
+#define VMXNET3_RSS_MAX_KEY_SIZE 40
+#define VMXNET3_RSS_MAX_IND_TABLE_SIZE 128
+
+#define VMXNET3_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP)
+
+/* RSS configuration structure - shared with device through GPA */
+typedef struct VMXNET3_RSSConf {
+ uint16_t hashType;
+ uint16_t hashFunc;
+ uint16_t hashKeySize;
+ uint16_t indTableSize;
+ uint8_t hashKey[VMXNET3_RSS_MAX_KEY_SIZE];
+ /*
+ * indTable is only element that can be changed without
+ * device quiesce-reset-update-activation cycle
+ */
+ uint8_t indTable[VMXNET3_RSS_MAX_IND_TABLE_SIZE];
+} VMXNET3_RSSConf;
+
+typedef struct vmxnet3_mf_table {
+ void *mfTableBase; /* Multicast addresses list */
+ uint64_t mfTablePA; /* Physical address of the list */
+ uint16_t num_addrs; /* number of multicast addrs */
+} vmxnet3_mf_table_t;
+
+struct vmxnet3_hw {
+ uint8_t *hw_addr0; /* BAR0: PT-Passthrough Regs */
+ uint8_t *hw_addr1; /* BAR1: VD-Virtual Device Regs */
+ /* BAR2: MSI-X Regs */
+ /* BAR3: Port IO */
+ void *back;
+
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t subsystem_device_id;
+ uint16_t subsystem_vendor_id;
+ bool adapter_stopped;
+
+ uint8_t perm_addr[ETHER_ADDR_LEN];
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t bufs_per_pkt;
+
+ uint8_t version;
+
+ uint16_t txdata_desc_size; /* tx data ring buffer size */
+ uint16_t rxdata_desc_size; /* rx data ring buffer size */
+
+ uint8_t num_intrs;
+
+ Vmxnet3_TxQueueDesc *tqd_start; /* start address of all tx queue desc */
+ Vmxnet3_RxQueueDesc *rqd_start; /* start address of all rx queue desc */
+
+ Vmxnet3_DriverShared *shared;
+ uint64_t sharedPA;
+
+ uint64_t queueDescPA;
+ uint16_t queue_desc_len;
+ uint16_t mtu;
+
+ VMXNET3_RSSConf *rss_conf;
+ uint64_t rss_confPA;
+ vmxnet3_mf_table_t *mf_table;
+ uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
+ Vmxnet3_MemRegs *memRegs;
+ uint64_t memRegsPA;
+#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t))
+ UPT1_TxStats saved_tx_stats[VMXNET3_MAX_TX_QUEUES];
+ UPT1_RxStats saved_rx_stats[VMXNET3_MAX_RX_QUEUES];
+};
+
+#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
+#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
+#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
+
+#define VMXNET3_VERSION_GE_3(hw) ((hw)->version >= VMXNET3_REV_3 + 1)
+#define VMXNET3_VERSION_GE_2(hw) ((hw)->version >= VMXNET3_REV_2 + 1)
+
+#define VMXNET3_GET_ADDR_LO(reg) ((uint32_t)(reg))
+#define VMXNET3_GET_ADDR_HI(reg) ((uint32_t)(((uint64_t)(reg)) >> 32))
+
+/* Config space read/writes */
+
+#define VMXNET3_PCI_REG(reg) rte_read32(reg)
+
+static inline uint32_t
+vmxnet3_read_addr(volatile void *addr)
+{
+ return VMXNET3_PCI_REG(addr);
+}
+
+#define VMXNET3_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
+
+#define VMXNET3_PCI_BAR0_REG_ADDR(hw, reg) \
+ ((volatile uint32_t *)((char *)(hw)->hw_addr0 + (reg)))
+#define VMXNET3_READ_BAR0_REG(hw, reg) \
+ vmxnet3_read_addr(VMXNET3_PCI_BAR0_REG_ADDR((hw), (reg)))
+#define VMXNET3_WRITE_BAR0_REG(hw, reg, value) \
+ VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR0_REG_ADDR((hw), (reg)), (value))
+
+#define VMXNET3_PCI_BAR1_REG_ADDR(hw, reg) \
+ ((volatile uint32_t *)((char *)(hw)->hw_addr1 + (reg)))
+#define VMXNET3_READ_BAR1_REG(hw, reg) \
+ vmxnet3_read_addr(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg)))
+#define VMXNET3_WRITE_BAR1_REG(hw, reg, value) \
+ VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg)), (value))
+
+static inline uint8_t
+vmxnet3_get_ring_idx(struct vmxnet3_hw *hw, uint32 rqID)
+{
+ return (rqID >= hw->num_rx_queues &&
+ rqID < 2 * hw->num_rx_queues) ? 1 : 0;
+}
+
+static inline bool
+vmxnet3_rx_data_ring(struct vmxnet3_hw *hw, uint32 rqID)
+{
+ return (rqID >= 2 * hw->num_rx_queues &&
+ rqID < 3 * hw->num_rx_queues);
+}
+
+/*
+ * RX/TX function prototypes
+ */
+
+void vmxnet3_dev_clear_queues(struct rte_eth_dev *dev);
+
+void vmxnet3_dev_rx_queue_release(void *rxq);
+void vmxnet3_dev_tx_queue_release(void *txq);
+
+int vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+int vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+int vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev);
+
+int vmxnet3_rss_configure(struct rte_eth_dev *dev);
+
+uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t vmxnet3_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _VMXNET3_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h b/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h
new file mode 100644
index 00000000..74154e3a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _VMXNET3_LOGS_H_
+#define _VMXNET3_LOGS_H_
+
+extern int vmxnet3_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, vmxnet3_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+extern int vmxnet3_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, vmxnet3_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#endif /* _VMXNET3_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ring.h b/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ring.h
new file mode 100644
index 00000000..50992349
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _VMXNET3_RING_H_
+#define _VMXNET3_RING_H_
+
+#define VMXNET3_RX_CMDRING_SIZE 2
+
+#define VMXNET3_DRIVER_VERSION_NUM 0x01012000
+
+/* Default ring size */
+#define VMXNET3_DEF_TX_RING_SIZE 512
+#define VMXNET3_DEF_RX_RING_SIZE 128
+
+/* Default rx data ring desc size */
+#define VMXNET3_DEF_RXDATA_DESC_SIZE 256
+
+#define VMXNET3_SUCCESS 0
+#define VMXNET3_FAIL -1
+
+#define TRUE 1
+#define FALSE 0
+
+
+typedef struct vmxnet3_buf_info {
+ uint16_t len;
+ struct rte_mbuf *m;
+ uint64_t bufPA;
+} vmxnet3_buf_info_t;
+
+typedef struct vmxnet3_cmd_ring {
+ vmxnet3_buf_info_t *buf_info;
+ uint32_t size;
+ uint32_t next2fill;
+ uint32_t next2comp;
+ uint8_t gen;
+ uint8_t rid;
+ Vmxnet3_GenericDesc *base;
+ uint64_t basePA;
+} vmxnet3_cmd_ring_t;
+
+static inline void
+vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
+{
+ ring->next2fill++;
+ if (unlikely(ring->next2fill == ring->size)) {
+ ring->next2fill = 0;
+ ring->gen = (uint8_t)(ring->gen ^ 1);
+ }
+}
+
+static inline void
+vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
+{
+ VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
+}
+
+static inline uint32_t
+vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
+{
+ return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
+ ring->next2comp - ring->next2fill - 1;
+}
+
+static inline bool
+vmxnet3_cmd_ring_desc_empty(struct vmxnet3_cmd_ring *ring)
+{
+ return ring->next2comp == ring->next2fill;
+}
+
+typedef struct vmxnet3_comp_ring {
+ uint32_t size;
+ uint32_t next2proc;
+ uint8_t gen;
+ uint8_t intr_idx;
+ Vmxnet3_GenericDesc *base;
+ uint64_t basePA;
+} vmxnet3_comp_ring_t;
+
+struct vmxnet3_data_ring {
+ struct Vmxnet3_TxDataDesc *base;
+ uint32_t size;
+ uint64_t basePA;
+};
+
+static inline void
+vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
+{
+ ring->next2proc++;
+ if (unlikely(ring->next2proc == ring->size)) {
+ ring->next2proc = 0;
+ ring->gen = (uint8_t)(ring->gen ^ 1);
+ }
+}
+
+struct vmxnet3_txq_stats {
+ uint64_t drop_total; /* # of pkts dropped by the driver,
+ * the counters below track droppings due to
+ * different reasons
+ */
+ uint64_t drop_too_many_segs;
+ uint64_t drop_tso;
+ uint64_t tx_ring_full;
+};
+
+typedef struct vmxnet3_tx_queue {
+ struct vmxnet3_hw *hw;
+ struct vmxnet3_cmd_ring cmd_ring;
+ struct vmxnet3_comp_ring comp_ring;
+ struct vmxnet3_data_ring data_ring;
+ uint32_t qid;
+ struct Vmxnet3_TxQueueDesc *shared;
+ struct vmxnet3_txq_stats stats;
+ const struct rte_memzone *mz;
+ bool stopped;
+ uint16_t queue_id; /**< Device TX queue index. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint16_t txdata_desc_size;
+} vmxnet3_tx_queue_t;
+
+struct vmxnet3_rxq_stats {
+ uint64_t drop_total;
+ uint64_t drop_err;
+ uint64_t drop_fcs;
+ uint64_t rx_buf_alloc_failure;
+};
+
+struct vmxnet3_rx_data_ring {
+ uint8_t *base;
+ uint64_t basePA;
+ uint32_t size;
+};
+
+typedef struct vmxnet3_rx_queue {
+ struct rte_mempool *mp;
+ struct vmxnet3_hw *hw;
+ struct vmxnet3_cmd_ring cmd_ring[VMXNET3_RX_CMDRING_SIZE];
+ struct vmxnet3_comp_ring comp_ring;
+ struct vmxnet3_rx_data_ring data_ring;
+ uint16_t data_desc_size;
+ uint32_t qid1;
+ uint32_t qid2;
+ /* rqID in RCD for buffer from data ring */
+ uint32_t data_ring_qid;
+ Vmxnet3_RxQueueDesc *shared;
+ struct rte_mbuf *start_seg;
+ struct rte_mbuf *last_seg;
+ struct vmxnet3_rxq_stats stats;
+ const struct rte_memzone *mz;
+ bool stopped;
+ uint16_t queue_id; /**< Device RX queue index. */
+ uint16_t port_id; /**< Device port identifier. */
+} vmxnet3_rx_queue_t;
+
+#endif /* _VMXNET3_RING_H_ */
diff --git a/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c b/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c
new file mode 100644
index 00000000..cf85f3d6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -0,0 +1,1345 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_prefetch.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_net.h>
+
+#include "base/vmxnet3_defs.h"
+#include "vmxnet3_ring.h"
+
+#include "vmxnet3_logs.h"
+#include "vmxnet3_ethdev.h"
+
+#define VMXNET3_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define VMXNET3_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
+
+static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
+
+static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
+static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
+static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
+static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
+#endif
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
+static void
+vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
+{
+ uint32_t avail = 0;
+
+ if (rxq == NULL)
+ return;
+
+ PMD_RX_LOG(DEBUG,
+ "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.",
+ rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
+ PMD_RX_LOG(DEBUG,
+ "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
+ (unsigned long)rxq->cmd_ring[0].basePA,
+ (unsigned long)rxq->cmd_ring[1].basePA,
+ (unsigned long)rxq->comp_ring.basePA);
+
+ avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
+ PMD_RX_LOG(DEBUG,
+ "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
+ (uint32_t)rxq->cmd_ring[0].size, avail,
+ rxq->comp_ring.next2proc,
+ rxq->cmd_ring[0].size - avail);
+
+ avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
+ PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
+ (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
+ rxq->cmd_ring[1].size - avail);
+
+}
+
+static void
+vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
+{
+ uint32_t avail = 0;
+
+ if (txq == NULL)
+ return;
+
+ PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.",
+ txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
+ PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
+ (unsigned long)txq->cmd_ring.basePA,
+ (unsigned long)txq->comp_ring.basePA,
+ (unsigned long)txq->data_ring.basePA);
+
+ avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
+ PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
+ (uint32_t)txq->cmd_ring.size, avail,
+ txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
+}
+#endif
+
+static void
+vmxnet3_tx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
+{
+ while (ring->next2comp != ring->next2fill) {
+ /* No need to worry about desc ownership, device is quiesced by now. */
+ vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
+
+ if (buf_info->m) {
+ rte_pktmbuf_free(buf_info->m);
+ buf_info->m = NULL;
+ buf_info->bufPA = 0;
+ buf_info->len = 0;
+ }
+ vmxnet3_cmd_ring_adv_next2comp(ring);
+ }
+}
+
+static void
+vmxnet3_rx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
+{
+ uint32_t i;
+
+ for (i = 0; i < ring->size; i++) {
+ /* No need to worry about desc ownership, device is quiesced by now. */
+ vmxnet3_buf_info_t *buf_info = &ring->buf_info[i];
+
+ if (buf_info->m) {
+ rte_pktmbuf_free_seg(buf_info->m);
+ buf_info->m = NULL;
+ buf_info->bufPA = 0;
+ buf_info->len = 0;
+ }
+ vmxnet3_cmd_ring_adv_next2comp(ring);
+ }
+}
+
+static void
+vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
+{
+ rte_free(ring->buf_info);
+ ring->buf_info = NULL;
+}
+
+void
+vmxnet3_dev_tx_queue_release(void *txq)
+{
+ vmxnet3_tx_queue_t *tq = txq;
+
+ if (tq != NULL) {
+ /* Release mbufs */
+ vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
+ /* Release the cmd_ring */
+ vmxnet3_cmd_ring_release(&tq->cmd_ring);
+ /* Release the memzone */
+ rte_memzone_free(tq->mz);
+ /* Release the queue */
+ rte_free(tq);
+ }
+}
+
+void
+vmxnet3_dev_rx_queue_release(void *rxq)
+{
+ int i;
+ vmxnet3_rx_queue_t *rq = rxq;
+
+ if (rq != NULL) {
+ /* Release mbufs */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+ vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
+
+ /* Release both the cmd_rings */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+ vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
+
+ /* Release the memzone */
+ rte_memzone_free(rq->mz);
+
+ /* Release the queue */
+ rte_free(rq);
+ }
+}
+
+static void
+vmxnet3_dev_tx_queue_reset(void *txq)
+{
+ vmxnet3_tx_queue_t *tq = txq;
+ struct vmxnet3_cmd_ring *ring = &tq->cmd_ring;
+ struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
+ struct vmxnet3_data_ring *data_ring = &tq->data_ring;
+ int size;
+
+ if (tq != NULL) {
+ /* Release the cmd_ring mbufs */
+ vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
+ }
+
+ /* Tx vmxnet rings structure initialization*/
+ ring->next2fill = 0;
+ ring->next2comp = 0;
+ ring->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
+ size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
+ size += tq->txdata_desc_size * data_ring->size;
+
+ memset(ring->base, 0, size);
+}
+
+static void
+vmxnet3_dev_rx_queue_reset(void *rxq)
+{
+ int i;
+ vmxnet3_rx_queue_t *rq = rxq;
+ struct vmxnet3_hw *hw = rq->hw;
+ struct vmxnet3_cmd_ring *ring0, *ring1;
+ struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
+ int size;
+
+ /* Release both the cmd_rings mbufs */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+ vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
+
+ ring0 = &rq->cmd_ring[0];
+ ring1 = &rq->cmd_ring[1];
+ comp_ring = &rq->comp_ring;
+
+ /* Rx vmxnet rings structure initialization */
+ ring0->next2fill = 0;
+ ring1->next2fill = 0;
+ ring0->next2comp = 0;
+ ring1->next2comp = 0;
+ ring0->gen = VMXNET3_INIT_GEN;
+ ring1->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
+ size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ if (VMXNET3_VERSION_GE_3(hw) && rq->data_desc_size)
+ size += rq->data_desc_size * data_ring->size;
+
+ memset(ring0->base, 0, size);
+}
+
+void
+vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ unsigned i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (txq != NULL) {
+ txq->stopped = TRUE;
+ vmxnet3_dev_tx_queue_reset(txq);
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq != NULL) {
+ rxq->stopped = TRUE;
+ vmxnet3_dev_rx_queue_reset(rxq);
+ }
+ }
+}
+
+static int
+vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
+{
+ int completed = 0;
+ struct rte_mbuf *mbuf;
+
+ /* Release cmd_ring descriptor and free mbuf */
+ RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
+
+ mbuf = txq->cmd_ring.buf_info[eop_idx].m;
+ if (mbuf == NULL)
+ rte_panic("EOP desc does not point to a valid mbuf");
+ rte_pktmbuf_free(mbuf);
+
+ txq->cmd_ring.buf_info[eop_idx].m = NULL;
+
+ while (txq->cmd_ring.next2comp != eop_idx) {
+ /* no out-of-order completion */
+ RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
+ vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
+ completed++;
+ }
+
+ /* Mark the txd for which tcd was generated as completed */
+ vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
+
+ return completed + 1;
+}
+
+static void
+vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
+{
+ int completed = 0;
+ vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
+ struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
+ (comp_ring->base + comp_ring->next2proc);
+
+ while (tcd->gen == comp_ring->gen) {
+ completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq);
+
+ vmxnet3_comp_ring_adv_next2proc(comp_ring);
+ tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
+ comp_ring->next2proc);
+ }
+
+ PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
+}
+
+uint16_t
+vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int32_t ret;
+ uint32_t i;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /* Non-TSO packet cannot occupy more than
+ * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
+ */
+ if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
+ m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ /* check that only supported TX offloads are requested. */
+ if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
+ (ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_SCTP_CKSUM) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+uint16_t
+vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+ vmxnet3_tx_queue_t *txq = tx_queue;
+ struct vmxnet3_hw *hw = txq->hw;
+ Vmxnet3_TxQueueCtrl *txq_ctrl = &txq->shared->ctrl;
+ uint32_t deferred = rte_le_to_cpu_32(txq_ctrl->txNumDeferred);
+
+ if (unlikely(txq->stopped)) {
+ PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
+ return 0;
+ }
+
+ /* Free up the comp_descriptors aggressively */
+ vmxnet3_tq_tx_complete(txq);
+
+ nb_tx = 0;
+ while (nb_tx < nb_pkts) {
+ Vmxnet3_GenericDesc *gdesc;
+ vmxnet3_buf_info_t *tbi;
+ uint32_t first2fill, avail, dw2;
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
+ struct rte_mbuf *m_seg = txm;
+ int copy_size = 0;
+ bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
+ /* # of descriptors needed for a packet. */
+ unsigned count = txm->nb_segs;
+
+ avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
+ if (count > avail) {
+ /* Is command ring full? */
+ if (unlikely(avail == 0)) {
+ PMD_TX_LOG(DEBUG, "No free ring descriptors");
+ txq->stats.tx_ring_full++;
+ txq->stats.drop_total += (nb_pkts - nb_tx);
+ break;
+ }
+
+ /* Command ring is not full but cannot handle the
+ * multi-segmented packet. Let's try the next packet
+ * in this case.
+ */
+ PMD_TX_LOG(DEBUG, "Running out of ring descriptors "
+ "(avail %d needed %d)", avail, count);
+ txq->stats.drop_total++;
+ if (tso)
+ txq->stats.drop_tso++;
+ rte_pktmbuf_free(txm);
+ nb_tx++;
+ continue;
+ }
+
+ /* Drop non-TSO packet that is excessively fragmented */
+ if (unlikely(!tso && count > VMXNET3_MAX_TXD_PER_PKT)) {
+ PMD_TX_LOG(ERR, "Non-TSO packet cannot occupy more than %d tx "
+ "descriptors. Packet dropped.", VMXNET3_MAX_TXD_PER_PKT);
+ txq->stats.drop_too_many_segs++;
+ txq->stats.drop_total++;
+ rte_pktmbuf_free(txm);
+ nb_tx++;
+ continue;
+ }
+
+ if (txm->nb_segs == 1 &&
+ rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
+ struct Vmxnet3_TxDataDesc *tdd;
+
+ /* Skip empty packets */
+ if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) {
+ txq->stats.drop_total++;
+ rte_pktmbuf_free(txm);
+ nb_tx++;
+ continue;
+ }
+
+ tdd = (struct Vmxnet3_TxDataDesc *)
+ ((uint8 *)txq->data_ring.base +
+ txq->cmd_ring.next2fill *
+ txq->txdata_desc_size);
+ copy_size = rte_pktmbuf_pkt_len(txm);
+ rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
+ }
+
+ /* use the previous gen bit for the SOP desc */
+ dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
+ first2fill = txq->cmd_ring.next2fill;
+ do {
+ /* Remember the transmit buffer for cleanup */
+ tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
+
+ /* NB: the following assumes that VMXNET3 maximum
+ * transmit buffer size (16K) is greater than
+ * maximum size of mbuf segment size.
+ */
+ gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
+
+ /* Skip empty segments */
+ if (unlikely(m_seg->data_len == 0))
+ continue;
+
+ if (copy_size) {
+ uint64 offset =
+ (uint64)txq->cmd_ring.next2fill *
+ txq->txdata_desc_size;
+ gdesc->txd.addr =
+ rte_cpu_to_le_64(txq->data_ring.basePA +
+ offset);
+ } else {
+ gdesc->txd.addr = rte_mbuf_data_iova(m_seg);
+ }
+
+ gdesc->dword[2] = dw2 | m_seg->data_len;
+ gdesc->dword[3] = 0;
+
+ /* move to the next2fill descriptor */
+ vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
+
+ /* use the right gen for non-SOP desc */
+ dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+ } while ((m_seg = m_seg->next) != NULL);
+
+ /* set the last buf_info for the pkt */
+ tbi->m = txm;
+ /* Update the EOP descriptor */
+ gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;
+
+ /* Add VLAN tag if present */
+ gdesc = txq->cmd_ring.base + first2fill;
+ if (txm->ol_flags & PKT_TX_VLAN_PKT) {
+ gdesc->txd.ti = 1;
+ gdesc->txd.tci = txm->vlan_tci;
+ }
+
+ if (tso) {
+ uint16_t mss = txm->tso_segsz;
+
+ RTE_ASSERT(mss > 0);
+
+ gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
+ gdesc->txd.om = VMXNET3_OM_TSO;
+ gdesc->txd.msscof = mss;
+
+ deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
+ } else if (txm->ol_flags & PKT_TX_L4_MASK) {
+ gdesc->txd.om = VMXNET3_OM_CSUM;
+ gdesc->txd.hlen = txm->l2_len + txm->l3_len;
+
+ switch (txm->ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum);
+ break;
+ case PKT_TX_UDP_CKSUM:
+ gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
+ break;
+ default:
+ PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
+ txm->ol_flags & PKT_TX_L4_MASK);
+ abort();
+ }
+ deferred++;
+ } else {
+ gdesc->txd.hlen = 0;
+ gdesc->txd.om = VMXNET3_OM_NONE;
+ gdesc->txd.msscof = 0;
+ deferred++;
+ }
+
+ /* flip the GEN bit on the SOP */
+ rte_compiler_barrier();
+ gdesc->dword[2] ^= VMXNET3_TXD_GEN;
+
+ txq_ctrl->txNumDeferred = rte_cpu_to_le_32(deferred);
+ nb_tx++;
+ }
+
+ PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", rte_le_to_cpu_32(txq_ctrl->txThreshold));
+
+ if (deferred >= rte_le_to_cpu_32(txq_ctrl->txThreshold)) {
+ txq_ctrl->txNumDeferred = 0;
+ /* Notify vSwitch that packets are available. */
+ VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
+ txq->cmd_ring.next2fill);
+ }
+
+ return nb_tx;
+}
+
+static inline void
+vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
+ struct rte_mbuf *mbuf)
+{
+ uint32_t val;
+ struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
+ struct Vmxnet3_RxDesc *rxd =
+ (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
+ vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
+
+ if (ring_id == 0) {
+ /* Usually: One HEAD type buf per packet
+ * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
+ * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
+ */
+
+ /* We use single packet buffer so all heads here */
+ val = VMXNET3_RXD_BTYPE_HEAD;
+ } else {
+ /* All BODY type buffers for 2nd ring */
+ val = VMXNET3_RXD_BTYPE_BODY;
+ }
+
+ /*
+ * Load mbuf pointer into buf_info[ring_size]
+ * buf_info structure is equivalent to cookie for virtio-virtqueue
+ */
+ buf_info->m = mbuf;
+ buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
+ buf_info->bufPA = rte_mbuf_data_iova_default(mbuf);
+
+ /* Load Rx Descriptor with the buffer's GPA */
+ rxd->addr = buf_info->bufPA;
+
+ /* After this point rxd->addr MUST not be NULL */
+ rxd->btype = val;
+ rxd->len = buf_info->len;
+ /* Flip gen bit at the end to change ownership */
+ rxd->gen = ring->gen;
+
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+}
+/*
+ * Allocates mbufs and clusters. Post rx descriptors with buffer details
+ * so that device can receive packets in those buffers.
+ * Ring layout:
+ * Among the two rings, 1st ring contains buffers of type 0 and type 1.
+ * bufs_per_pkt is set such that for non-LRO cases all the buffers required
+ * by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
+ * 2nd ring contains buffers of type 1 alone. Second ring mostly be used
+ * only for LRO.
+ */
+static int
+vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
+{
+ int err = 0;
+ uint32_t i = 0;
+ struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
+
+ while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
+ struct rte_mbuf *mbuf;
+
+ /* Allocate blank mbuf for the current Rx Descriptor */
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(mbuf == NULL)) {
+ PMD_RX_LOG(ERR, "Error allocating mbuf");
+ rxq->stats.rx_buf_alloc_failure++;
+ err = ENOMEM;
+ break;
+ }
+
+ vmxnet3_renew_desc(rxq, ring_id, mbuf);
+ i++;
+ }
+
+ /* Return error only if no buffers are posted at present */
+ if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
+ return -err;
+ else
+ return i;
+}
+
+/* MSS not provided by vmxnet3, guess one with available information */
+static uint16_t
+vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
+ struct rte_mbuf *rxm)
+{
+ uint32_t hlen, slen;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ struct tcp_hdr *tcp_hdr;
+ char *ptr;
+
+ RTE_ASSERT(rcd->tcp);
+
+ ptr = rte_pktmbuf_mtod(rxm, char *);
+ slen = rte_pktmbuf_data_len(rxm);
+ hlen = sizeof(struct ether_hdr);
+
+ if (rcd->v4) {
+ if (unlikely(slen < hlen + sizeof(struct ipv4_hdr)))
+ return hw->mtu - sizeof(struct ipv4_hdr)
+ - sizeof(struct tcp_hdr);
+
+ ipv4_hdr = (struct ipv4_hdr *)(ptr + hlen);
+ hlen += (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
+ IPV4_IHL_MULTIPLIER;
+ } else if (rcd->v6) {
+ if (unlikely(slen < hlen + sizeof(struct ipv6_hdr)))
+ return hw->mtu - sizeof(struct ipv6_hdr) -
+ sizeof(struct tcp_hdr);
+
+ ipv6_hdr = (struct ipv6_hdr *)(ptr + hlen);
+ hlen += sizeof(struct ipv6_hdr);
+ if (unlikely(ipv6_hdr->proto != IPPROTO_TCP)) {
+ int frag;
+
+ rte_net_skip_ip6_ext(ipv6_hdr->proto, rxm,
+ &hlen, &frag);
+ }
+ }
+
+ if (unlikely(slen < hlen + sizeof(struct tcp_hdr)))
+ return hw->mtu - hlen - sizeof(struct tcp_hdr) +
+ sizeof(struct ether_hdr);
+
+ tcp_hdr = (struct tcp_hdr *)(ptr + hlen);
+ hlen += (tcp_hdr->data_off & 0xf0) >> 2;
+
+ if (rxm->udata64 > 1)
+ return (rte_pktmbuf_pkt_len(rxm) - hlen +
+ rxm->udata64 - 1) / rxm->udata64;
+ else
+ return hw->mtu - hlen + sizeof(struct ether_hdr);
+}
+
+/* Receive side checksum and other offloads */
+static inline void
+vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
+ struct rte_mbuf *rxm, const uint8_t sop)
+{
+ uint64_t ol_flags = rxm->ol_flags;
+ uint32_t packet_type = rxm->packet_type;
+
+ /* Offloads set in sop */
+ if (sop) {
+ /* Set packet type */
+ packet_type |= RTE_PTYPE_L2_ETHER;
+
+ /* Check large packet receive */
+ if (VMXNET3_VERSION_GE_2(hw) &&
+ rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
+ const Vmxnet3_RxCompDescExt *rcde =
+ (const Vmxnet3_RxCompDescExt *)rcd;
+
+ rxm->tso_segsz = rcde->mss;
+ rxm->udata64 = rcde->segCnt;
+ ol_flags |= PKT_RX_LRO;
+ }
+ } else { /* Offloads set in eop */
+ /* Check for RSS */
+ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
+ ol_flags |= PKT_RX_RSS_HASH;
+ rxm->hash.rss = rcd->rssHash;
+ }
+
+ /* Check for hardware stripped VLAN tag */
+ if (rcd->ts) {
+ ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
+ }
+
+ /* Check packet type, checksum errors, etc. */
+ if (rcd->cnc) {
+ ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ } else {
+ if (rcd->v4) {
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+
+ if (rcd->ipc)
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if (rcd->tuc) {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (rcd->tcp)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else
+ packet_type |= RTE_PTYPE_L4_UDP;
+ } else {
+ if (rcd->tcp) {
+ packet_type |= RTE_PTYPE_L4_TCP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else if (rcd->udp) {
+ packet_type |= RTE_PTYPE_L4_UDP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ } else if (rcd->v6) {
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
+ if (rcd->tuc) {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (rcd->tcp)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else
+ packet_type |= RTE_PTYPE_L4_UDP;
+ } else {
+ if (rcd->tcp) {
+ packet_type |= RTE_PTYPE_L4_TCP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else if (rcd->udp) {
+ packet_type |= RTE_PTYPE_L4_UDP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ } else {
+ packet_type |= RTE_PTYPE_UNKNOWN;
+ }
+
+ /* Old variants of vmxnet3 do not provide MSS */
+ if ((ol_flags & PKT_RX_LRO) && rxm->tso_segsz == 0)
+ rxm->tso_segsz = vmxnet3_guess_mss(hw,
+ rcd, rxm);
+ }
+ }
+
+ rxm->ol_flags = ol_flags;
+ rxm->packet_type = packet_type;
+}
+
+/*
+ * Process the Rx Completion Ring of given vmxnet3_rx_queue
+ * for nb_pkts burst and return the number of packets received
+ */
+uint16_t
+vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ uint16_t nb_rx;
+ uint32_t nb_rxd, idx;
+ uint8_t ring_idx;
+ vmxnet3_rx_queue_t *rxq;
+ Vmxnet3_RxCompDesc *rcd;
+ vmxnet3_buf_info_t *rbi;
+ Vmxnet3_RxDesc *rxd;
+ struct rte_mbuf *rxm = NULL;
+ struct vmxnet3_hw *hw;
+
+ nb_rx = 0;
+ ring_idx = 0;
+ nb_rxd = 0;
+ idx = 0;
+
+ rxq = rx_queue;
+ hw = rxq->hw;
+
+ rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
+
+ if (unlikely(rxq->stopped)) {
+ PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
+ return 0;
+ }
+
+ while (rcd->gen == rxq->comp_ring.gen) {
+ struct rte_mbuf *newm;
+
+ if (nb_rx >= nb_pkts)
+ break;
+
+ newm = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(newm == NULL)) {
+ PMD_RX_LOG(ERR, "Error allocating mbuf");
+ rxq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+
+ idx = rcd->rxdIdx;
+ ring_idx = vmxnet3_get_ring_idx(hw, rcd->rqID);
+ rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
+ RTE_SET_USED(rxd); /* used only for assert when enabled */
+ rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
+
+ PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
+
+ RTE_ASSERT(rcd->len <= rxd->len);
+ RTE_ASSERT(rbi->m);
+
+ /* Get the packet buffer pointer from buf_info */
+ rxm = rbi->m;
+
+ /* Clear descriptor associated buf_info to be reused */
+ rbi->m = NULL;
+ rbi->bufPA = 0;
+
+ /* Update the index that we received a packet */
+ rxq->cmd_ring[ring_idx].next2comp = idx;
+
+ /* For RCD with EOP set, check if there is frame error */
+ if (unlikely(rcd->eop && rcd->err)) {
+ rxq->stats.drop_total++;
+ rxq->stats.drop_err++;
+
+ if (!rcd->fcs) {
+ rxq->stats.drop_fcs++;
+ PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
+ }
+ PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
+ (int)(rcd - (struct Vmxnet3_RxCompDesc *)
+ rxq->comp_ring.base), rcd->rxdIdx);
+ rte_pktmbuf_free_seg(rxm);
+ if (rxq->start_seg) {
+ struct rte_mbuf *start = rxq->start_seg;
+
+ rxq->start_seg = NULL;
+ rte_pktmbuf_free(start);
+ }
+ goto rcd_done;
+ }
+
+ /* Initialize newly received packet buffer */
+ rxm->port = rxq->port_id;
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = (uint16_t)rcd->len;
+ rxm->data_len = (uint16_t)rcd->len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+ rxm->packet_type = 0;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (rcd->sop) {
+ RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
+
+ if (unlikely(rcd->len == 0)) {
+ RTE_ASSERT(rcd->eop);
+
+ PMD_RX_LOG(DEBUG,
+ "Rx buf was skipped. rxring[%d][%d])",
+ ring_idx, idx);
+ rte_pktmbuf_free_seg(rxm);
+ goto rcd_done;
+ }
+
+ if (vmxnet3_rx_data_ring(hw, rcd->rqID)) {
+ uint8_t *rdd = rxq->data_ring.base +
+ idx * rxq->data_desc_size;
+
+ RTE_ASSERT(VMXNET3_VERSION_GE_3(hw));
+ rte_memcpy(rte_pktmbuf_mtod(rxm, char *),
+ rdd, rcd->len);
+ }
+
+ rxq->start_seg = rxm;
+ rxq->last_seg = rxm;
+ vmxnet3_rx_offload(hw, rcd, rxm, 1);
+ } else {
+ struct rte_mbuf *start = rxq->start_seg;
+
+ RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
+
+ if (rxm->data_len) {
+ start->pkt_len += rxm->data_len;
+ start->nb_segs++;
+
+ rxq->last_seg->next = rxm;
+ rxq->last_seg = rxm;
+ } else {
+ rte_pktmbuf_free_seg(rxm);
+ }
+ }
+
+ if (rcd->eop) {
+ struct rte_mbuf *start = rxq->start_seg;
+
+ vmxnet3_rx_offload(hw, rcd, start, 0);
+ rx_pkts[nb_rx++] = start;
+ rxq->start_seg = NULL;
+ }
+
+rcd_done:
+ rxq->cmd_ring[ring_idx].next2comp = idx;
+ VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
+ rxq->cmd_ring[ring_idx].size);
+
+ /* It's time to renew descriptors */
+ vmxnet3_renew_desc(rxq, ring_idx, newm);
+ if (unlikely(rxq->shared->ctrl.updateRxProd)) {
+ VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
+ rxq->cmd_ring[ring_idx].next2fill);
+ }
+
+ /* Advance to the next descriptor in comp_ring */
+ vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);
+
+ rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
+ nb_rxd++;
+ if (nb_rxd > rxq->cmd_ring[0].size) {
+ PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
+ " relinquish control.");
+ break;
+ }
+ }
+
+ if (unlikely(nb_rxd == 0)) {
+ uint32_t avail;
+ for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
+ avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[ring_idx]);
+ if (unlikely(avail > 0)) {
+ /* try to alloc new buf and renew descriptors */
+ vmxnet3_post_rx_bufs(rxq, ring_idx);
+ }
+ }
+ if (unlikely(rxq->shared->ctrl.updateRxProd)) {
+ for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
+ VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
+ rxq->cmd_ring[ring_idx].next2fill);
+ }
+ }
+ }
+
+ return nb_rx;
+}
+
+int
+vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ const struct rte_memzone *mz;
+ struct vmxnet3_tx_queue *txq;
+ struct vmxnet3_cmd_ring *ring;
+ struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_data_ring *data_ring;
+ int size;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
+ RTE_CACHE_LINE_SIZE);
+ if (txq == NULL) {
+ PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
+ txq->hw = hw;
+ txq->qid = queue_idx;
+ txq->stopped = TRUE;
+ txq->txdata_desc_size = hw->txdata_desc_size;
+
+ ring = &txq->cmd_ring;
+ comp_ring = &txq->comp_ring;
+ data_ring = &txq->data_ring;
+
+ /* Tx vmxnet ring length should be between 512-4096 */
+ if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
+ PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
+ VMXNET3_DEF_TX_RING_SIZE);
+ return -EINVAL;
+ } else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
+ PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
+ VMXNET3_TX_RING_MAX_SIZE);
+ return -EINVAL;
+ } else {
+ ring->size = nb_desc;
+ ring->size &= ~VMXNET3_RING_SIZE_MASK;
+ }
+ comp_ring->size = data_ring->size = ring->size;
+
+ /* Tx vmxnet rings structure initialization*/
+ ring->next2fill = 0;
+ ring->next2comp = 0;
+ ring->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
+ size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
+ size += txq->txdata_desc_size * data_ring->size;
+
+ mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
+ VMXNET3_RING_BA_ALIGN, socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
+ return -ENOMEM;
+ }
+ txq->mz = mz;
+ memset(mz->addr, 0, mz->len);
+
+ /* cmd_ring initialization */
+ ring->base = mz->addr;
+ ring->basePA = mz->iova;
+
+ /* comp_ring initialization */
+ comp_ring->base = ring->base + ring->size;
+ comp_ring->basePA = ring->basePA +
+ (sizeof(struct Vmxnet3_TxDesc) * ring->size);
+
+ /* data_ring initialization */
+ data_ring->base = (Vmxnet3_TxDataDesc *)(comp_ring->base + comp_ring->size);
+ data_ring->basePA = comp_ring->basePA +
+ (sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size);
+
+ /* cmd_ring0 buf_info allocation */
+ ring->buf_info = rte_zmalloc("tx_ring_buf_info",
+ ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
+ if (ring->buf_info == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
+ return -ENOMEM;
+ }
+
+ /* Update the data portion with txq */
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+}
+
+int
+vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *mz;
+ struct vmxnet3_rx_queue *rxq;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
+ struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_rx_data_ring *data_ring;
+ int size;
+ uint8_t i;
+ char mem_name[32];
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
+ RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL) {
+ PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
+ return -ENOMEM;
+ }
+
+ rxq->mp = mp;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
+ rxq->hw = hw;
+ rxq->qid1 = queue_idx;
+ rxq->qid2 = queue_idx + hw->num_rx_queues;
+ rxq->data_ring_qid = queue_idx + 2 * hw->num_rx_queues;
+ rxq->data_desc_size = hw->rxdata_desc_size;
+ rxq->stopped = TRUE;
+
+ ring0 = &rxq->cmd_ring[0];
+ ring1 = &rxq->cmd_ring[1];
+ comp_ring = &rxq->comp_ring;
+ data_ring = &rxq->data_ring;
+
+ /* Rx vmxnet rings length should be between 256-4096 */
+ if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
+ PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
+ return -EINVAL;
+ } else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
+ PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
+ return -EINVAL;
+ } else {
+ ring0->size = nb_desc;
+ ring0->size &= ~VMXNET3_RING_SIZE_MASK;
+ ring1->size = ring0->size;
+ }
+
+ comp_ring->size = ring0->size + ring1->size;
+ data_ring->size = ring0->size;
+
+ /* Rx vmxnet rings structure initialization */
+ ring0->next2fill = 0;
+ ring1->next2fill = 0;
+ ring0->next2comp = 0;
+ ring1->next2comp = 0;
+ ring0->gen = VMXNET3_INIT_GEN;
+ ring1->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
+ size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size)
+ size += rxq->data_desc_size * data_ring->size;
+
+ mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
+ VMXNET3_RING_BA_ALIGN, socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
+ return -ENOMEM;
+ }
+ rxq->mz = mz;
+ memset(mz->addr, 0, mz->len);
+
+ /* cmd_ring0 initialization */
+ ring0->base = mz->addr;
+ ring0->basePA = mz->iova;
+
+ /* cmd_ring1 initialization */
+ ring1->base = ring0->base + ring0->size;
+ ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * ring0->size;
+
+ /* comp_ring initialization */
+ comp_ring->base = ring1->base + ring1->size;
+ comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
+ ring1->size;
+
+ /* data_ring initialization */
+ if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size) {
+ data_ring->base =
+ (uint8_t *)(comp_ring->base + comp_ring->size);
+ data_ring->basePA = comp_ring->basePA +
+ sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ }
+
+ /* cmd_ring0-cmd_ring1 buf_info allocation */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
+
+ ring = &rxq->cmd_ring[i];
+ ring->rid = i;
+ snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
+
+ ring->buf_info = rte_zmalloc(mem_name,
+ ring->size * sizeof(vmxnet3_buf_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (ring->buf_info == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
+ return -ENOMEM;
+ }
+ }
+
+ /* Update the data portion with rxq */
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ return 0;
+}
+
+/*
+ * Initializes Receive Unit
+ * Load mbufs in rx queue in advance
+ */
+int
+vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ int i, ret;
+ uint8_t j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < hw->num_rx_queues; i++) {
+ vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+
+ for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
+ /* Passing 0 as alloc_num will allocate full ring */
+ ret = vmxnet3_post_rx_bufs(rxq, j);
+ if (ret <= 0) {
+ PMD_INIT_LOG(ERR,
+ "ERROR: Posting Rxq: %d buffers ring: %d",
+ i, j);
+ return -ret;
+ }
+ /*
+ * Updating device with the index:next2fill to fill the
+ * mbufs for coming packets.
+ */
+ if (unlikely(rxq->shared->ctrl.updateRxProd)) {
+ VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
+ rxq->cmd_ring[j].next2fill);
+ }
+ }
+ rxq->stopped = FALSE;
+ rxq->start_seg = NULL;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
+ txq->stopped = FALSE;
+ }
+
+ return 0;
+}
+
+static uint8_t rss_intel_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+/*
+ * Configure RSS feature
+ */
+int
+vmxnet3_rss_configure(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct VMXNET3_RSSConf *dev_rss_conf;
+ struct rte_eth_rss_conf *port_rss_conf;
+ uint64_t rss_hf;
+ uint8_t i, j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_rss_conf = hw->rss_conf;
+ port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+
+ /* loading hashFunc */
+ dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
+ /* loading hashKeySize */
+ dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
+ /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
+ dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
+
+ if (port_rss_conf->rss_key == NULL) {
+ /* Default hash key */
+ port_rss_conf->rss_key = rss_intel_key;
+ }
+
+ /* loading hashKey */
+ memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
+ dev_rss_conf->hashKeySize);
+
+ /* loading indTable */
+ for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ dev_rss_conf->indTable[i] = j;
+ }
+
+ /* loading hashType */
+ dev_rss_conf->hashType = 0;
+ rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
+ if (rss_hf & ETH_RSS_IPV4)
+ dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
+ if (rss_hf & ETH_RSS_IPV6)
+ dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
+
+ return VMXNET3_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/raw/Makefile b/src/spdk/dpdk/drivers/raw/Makefile
new file mode 100644
index 00000000..8e29b4a5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# DIRS-$(<configuration>) += <directory>
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += skeleton_rawdev
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV) += dpaa2_cmdif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV) += dpaa2_qdma
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV) += ifpga_rawdev
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/Makefile b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/Makefile
new file mode 100644
index 00000000..9b863dda
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_cmdif.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_rawdev
+
+EXPORT_MAP := rte_pmd_dpaa2_cmdif_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV) += dpaa2_cmdif.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV)-include += rte_pmd_dpaa2_cmdif.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
new file mode 100644
index 00000000..469960a3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_atomic.h>
+#include <rte_interrupts.h>
+#include <rte_branch_prediction.h>
+#include <rte_lcore.h>
+
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+#include "dpaa2_cmdif_logs.h"
+#include "rte_pmd_dpaa2_cmdif.h"
+
+/* Dynamic log type identifier */
+int dpaa2_cmdif_logtype;
+
+/* CMDIF driver name */
+#define DPAA2_CMDIF_PMD_NAME dpaa2_dpci
+
+/* CMDIF driver object */
+static struct rte_vdev_driver dpaa2_cmdif_drv;
+
+/*
+ * This API provides the DPCI device ID in 'attr_value'.
+ * The device ID shall be passed by GPP to the AIOP using CMDIF commands.
+ */
+static int
+dpaa2_cmdif_get_attr(struct rte_rawdev *dev,
+ const char *attr_name,
+ uint64_t *attr_value)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(attr_name);
+
+ if (!attr_value) {
+ DPAA2_CMDIF_ERR("Invalid arguments for getting attributes");
+ return -EINVAL;
+ }
+ *attr_value = cidev->dpci_id;
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_enqueue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+ struct rte_dpaa2_cmdif_context *cmdif_send_cnxt;
+ struct dpaa2_queue *txq;
+ struct qbman_fd fd;
+ struct qbman_eq_desc eqdesc;
+ struct qbman_swp *swp;
+ int ret;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(count);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_CMDIF_ERR("Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ cmdif_send_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
+ txq = &(cidev->tx_queue[cmdif_send_cnxt->priority]);
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc, 0);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+ /* Set some of the FD parameters to i.
+ * For performance reasons do not memset
+ */
+ fd.simple.bpid_offset = 0;
+ fd.simple.ctrl = 0;
+
+ DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(buffers[0]->buf_addr));
+ DPAA2_SET_FD_LEN(&fd, cmdif_send_cnxt->size);
+ DPAA2_SET_FD_FRC(&fd, cmdif_send_cnxt->frc);
+ DPAA2_SET_FD_FLC(&fd, cmdif_send_cnxt->flc);
+
+ /* Enqueue a packet to the QBMAN */
+ do {
+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
+ if (ret < 0 && ret != -EBUSY)
+ DPAA2_CMDIF_ERR("Transmit failure with err: %d\n", ret);
+ } while (ret == -EBUSY);
+
+ DPAA2_CMDIF_DP_DEBUG("Successfully transmitted a packet\n");
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+ struct rte_dpaa2_cmdif_context *cmdif_rcv_cnxt;
+ struct dpaa2_queue *rxq;
+ struct qbman_swp *swp;
+ struct qbman_result *dq_storage;
+ const struct qbman_fd *fd;
+ struct qbman_pull_desc pulldesc;
+ uint8_t status;
+ int ret;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(count);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_CMDIF_ERR("Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
+ rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
+ dq_storage = rxq->q_storage->dq_storage[0];
+
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
+ qbman_pull_desc_set_numframes(&pulldesc, 1);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_CMDIF_DP_WARN("VDQ cmd not issued. QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+
+ /* Check if previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+ /* Loop until the dq_storage is updated with new token by QBMAN */
+ while (!qbman_result_has_new_result(swp, dq_storage))
+ ;
+
+ /* Check for valid frame. */
+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ DPAA2_CMDIF_DP_DEBUG("No frame is delivered\n");
+ return 0;
+ }
+
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ buffers[0]->buf_addr = (void *)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd) + DPAA2_GET_FD_OFFSET(fd));
+ cmdif_rcv_cnxt->size = DPAA2_GET_FD_LEN(fd);
+ cmdif_rcv_cnxt->flc = DPAA2_GET_FD_FLC(fd);
+ cmdif_rcv_cnxt->frc = DPAA2_GET_FD_FRC(fd);
+
+ DPAA2_CMDIF_DP_DEBUG("packet received\n");
+
+ return 1;
+}
+
+static const struct rte_rawdev_ops dpaa2_cmdif_ops = {
+ .attr_get = dpaa2_cmdif_get_attr,
+ .enqueue_bufs = dpaa2_cmdif_enqueue_bufs,
+ .dequeue_bufs = dpaa2_cmdif_dequeue_bufs,
+};
+
+static int
+dpaa2_cmdif_create(const char *name,
+ struct rte_vdev_device *vdev,
+ int socket_id)
+{
+ struct rte_rawdev *rawdev;
+ struct dpaa2_dpci_dev *cidev;
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct dpaa2_dpci_dev),
+ socket_id);
+ if (!rawdev) {
+ DPAA2_CMDIF_ERR("Unable to allocate rawdevice");
+ return -EINVAL;
+ }
+
+ rawdev->dev_ops = &dpaa2_cmdif_ops;
+ rawdev->device = &vdev->device;
+ rawdev->driver_name = vdev->device.driver->name;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ cidev = rte_dpaa2_alloc_dpci_dev();
+ if (!cidev) {
+ DPAA2_CMDIF_ERR("Unable to allocate CI device");
+ rte_rawdev_pmd_release(rawdev);
+ return -ENODEV;
+ }
+
+ rawdev->dev_private = cidev;
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_destroy(const char *name)
+{
+ int ret;
+ struct rte_rawdev *rdev;
+
+ rdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rdev) {
+ DPAA2_CMDIF_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ /* The primary process will only free the DPCI device */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_dpaa2_free_dpci_dev(rdev->dev_private);
+
+ ret = rte_rawdev_pmd_release(rdev);
+ if (ret)
+ DPAA2_CMDIF_DEBUG("Device cleanup failed");
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret = 0;
+
+ name = rte_vdev_device_name(vdev);
+
+ DPAA2_CMDIF_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ ret = dpaa2_cmdif_create(name, vdev, rte_socket_id());
+
+ return ret;
+}
+
+static int
+dpaa2_cmdif_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+
+ DPAA2_CMDIF_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
+
+ ret = dpaa2_cmdif_destroy(name);
+
+ return ret;
+}
+
+static struct rte_vdev_driver dpaa2_cmdif_drv = {
+ .probe = dpaa2_cmdif_probe,
+ .remove = dpaa2_cmdif_remove
+};
+
+RTE_PMD_REGISTER_VDEV(DPAA2_CMDIF_PMD_NAME, dpaa2_cmdif_drv);
+
+RTE_INIT(dpaa2_cmdif_init_log)
+{
+ dpaa2_cmdif_logtype = rte_log_register("pmd.raw.dpaa2.cmdif");
+ if (dpaa2_cmdif_logtype >= 0)
+ rte_log_set_level(dpaa2_cmdif_logtype, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
new file mode 100644
index 00000000..8991e832
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_CMDIF_LOGS_H__
+#define __DPAA2_CMDIF_LOGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int dpaa2_cmdif_logtype;
+
+#define DPAA2_CMDIF_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_cmdif_logtype, "dpaa2_cmdif: " \
+ fmt "\n", ## args)
+
+#define DPAA2_CMDIF_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_cmdif_logtype, "dpaa2_cmdif: %s(): " \
+ fmt "\n", __func__, ## args)
+
+#define DPAA2_CMDIF_FUNC_TRACE() DPAA2_CMDIF_DEBUG(">>")
+
+#define DPAA2_CMDIF_INFO(fmt, args...) \
+ DPAA2_CMDIF_LOG(INFO, fmt, ## args)
+#define DPAA2_CMDIF_ERR(fmt, args...) \
+ DPAA2_CMDIF_LOG(ERR, fmt, ## args)
+#define DPAA2_CMDIF_WARN(fmt, args...) \
+ DPAA2_CMDIF_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_CMDIF_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "dpaa2_cmdif: " fmt "\n", ## args)
+
+#define DPAA2_CMDIF_DP_DEBUG(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_CMDIF_DP_INFO(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_CMDIF_DP_WARN(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(WARNING, fmt, ## args)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __DPAA2_CMDIF_LOGS_H__ */
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/meson.build b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/meson.build
new file mode 100644
index 00000000..1d146872
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
+deps += ['rawdev', 'mempool_dpaa2', 'bus_vdev']
+sources = files('dpaa2_cmdif.c')
+
+allow_experimental_apis = true
+
+install_headers('rte_pmd_dpaa2_cmdif.h')
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h
new file mode 100644
index 00000000..483b66ea
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_PMD_DPAA2_CMDIF_H__
+#define __RTE_PMD_DPAA2_CMDIF_H__
+
+/**
+ * @file
+ *
+ * NXP dpaa2 AIOP CMDIF PMD specific structures.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** The context required in the I/O path for DPAA2 AIOP Command Interface */
+struct rte_dpaa2_cmdif_context {
+ /** Size to populate in QBMAN FD */
+ uint32_t size;
+ /** FRC to populate in QBMAN FD */
+ uint32_t frc;
+ /** FLC to populate in QBMAN FD */
+ uint64_t flc;
+ /** Priority of the command. This priority determines DPCI Queue*/
+ uint8_t priority;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_PMD_DPAA2_CMDIF_H__ */
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/Makefile b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/Makefile
new file mode 100644
index 00000000..d88809ea
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/Makefile
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_qdma.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_mempool
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_ring
+
+EXPORT_MAP := rte_pmd_dpaa2_qdma_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV) += dpaa2_qdma.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV)-include += rte_pmd_dpaa2_qdma.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
new file mode 100644
index 00000000..2787d302
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
@@ -0,0 +1,1001 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <string.h>
+
+#include <rte_eal.h>
+#include <rte_fslmc.h>
+#include <rte_atomic.h>
+#include <rte_lcore.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+
+#include <mc/fsl_dpdmai.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+
+#include "dpaa2_qdma.h"
+#include "dpaa2_qdma_logs.h"
+#include "rte_pmd_dpaa2_qdma.h"
+
+/* Dynamic log type identifier */
+int dpaa2_qdma_logtype;
+
+/* QDMA device */
+static struct qdma_device qdma_dev;
+
+/* QDMA H/W queues list */
+TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
+static struct qdma_hw_queue_list qdma_queue_list
+ = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
+
+/* QDMA Virtual Queues */
+struct qdma_virt_queue *qdma_vqs;
+
+/* QDMA per core data */
+struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
+
+static struct qdma_hw_queue *
+alloc_hw_queue(uint32_t lcore_id)
+{
+ struct qdma_hw_queue *queue = NULL;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Get a free queue from the list */
+ TAILQ_FOREACH(queue, &qdma_queue_list, next) {
+ if (queue->num_users == 0) {
+ queue->lcore_id = lcore_id;
+ queue->num_users++;
+ break;
+ }
+ }
+
+ return queue;
+}
+
+static void
+free_hw_queue(struct qdma_hw_queue *queue)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ queue->num_users--;
+}
+
+
+static struct qdma_hw_queue *
+get_hw_queue(uint32_t lcore_id)
+{
+ struct qdma_per_core_info *core_info;
+ struct qdma_hw_queue *queue, *temp;
+ uint32_t least_num_users;
+ int num_hw_queues, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ core_info = &qdma_core_info[lcore_id];
+ num_hw_queues = core_info->num_hw_queues;
+
+ /*
+ * Allocate a HW queue if there are less queues
+ * than maximum per core queues configured
+ */
+ if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
+ queue = alloc_hw_queue(lcore_id);
+ if (queue) {
+ core_info->hw_queues[num_hw_queues] = queue;
+ core_info->num_hw_queues++;
+ return queue;
+ }
+ }
+
+ queue = core_info->hw_queues[0];
+ /* In case there is no queue associated with the core return NULL */
+ if (!queue)
+ return NULL;
+
+ /* Fetch the least loaded H/W queue */
+ least_num_users = core_info->hw_queues[0]->num_users;
+ for (i = 0; i < num_hw_queues; i++) {
+ temp = core_info->hw_queues[i];
+ if (temp->num_users < least_num_users)
+ queue = temp;
+ }
+
+ if (queue)
+ queue->num_users++;
+
+ return queue;
+}
+
+static void
+put_hw_queue(struct qdma_hw_queue *queue)
+{
+ struct qdma_per_core_info *core_info;
+ int lcore_id, num_hw_queues, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /*
+ * If this is the last user of the queue free it.
+ * Also remove it from QDMA core info.
+ */
+ if (queue->num_users == 1) {
+ free_hw_queue(queue);
+
+ /* Remove the physical queue from core info */
+ lcore_id = queue->lcore_id;
+ core_info = &qdma_core_info[lcore_id];
+ num_hw_queues = core_info->num_hw_queues;
+ for (i = 0; i < num_hw_queues; i++) {
+ if (queue == core_info->hw_queues[i])
+ break;
+ }
+ for (; i < num_hw_queues - 1; i++)
+ core_info->hw_queues[i] = core_info->hw_queues[i + 1];
+ core_info->hw_queues[i] = NULL;
+ } else {
+ queue->num_users--;
+ }
+}
+
+int __rte_experimental
+rte_qdma_init(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_spinlock_init(&qdma_dev.lock);
+
+ return 0;
+}
+
+void __rte_experimental
+rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
+}
+
+int __rte_experimental
+rte_qdma_reset(void)
+{
+ struct qdma_hw_queue *queue;
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev.state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before reset.");
+ return -EBUSY;
+ }
+
+ /* In case there are pending jobs on any VQ, return -EBUSY */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
+ qdma_vqs[i].num_dequeues))
+ DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+ return -EBUSY;
+ }
+
+ /* Reset HW queues */
+ TAILQ_FOREACH(queue, &qdma_queue_list, next)
+ queue->num_users = 0;
+
+ /* Reset and free virtual queues */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].status_ring)
+ rte_ring_free(qdma_vqs[i].status_ring);
+ }
+ if (qdma_vqs)
+ rte_free(qdma_vqs);
+ qdma_vqs = NULL;
+
+ /* Reset per core info */
+ memset(&qdma_core_info, 0,
+ sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
+
+ /* Free the FLE pool */
+ if (qdma_dev.fle_pool)
+ rte_mempool_free(qdma_dev.fle_pool);
+
+ /* Reset QDMA device structure */
+ qdma_dev.mode = RTE_QDMA_MODE_HW;
+ qdma_dev.max_hw_queues_per_core = 0;
+ qdma_dev.fle_pool = NULL;
+ qdma_dev.fle_pool_count = 0;
+ qdma_dev.max_vqs = 0;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_qdma_configure(struct rte_qdma_config *qdma_config)
+{
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev.state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before config.");
+ return -1;
+ }
+
+ /* Reset the QDMA device */
+ ret = rte_qdma_reset();
+ if (ret) {
+ DPAA2_QDMA_ERR("Resetting QDMA failed");
+ return ret;
+ }
+
+ /* Set mode */
+ qdma_dev.mode = qdma_config->mode;
+
+ /* Set max HW queue per core */
+ if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
+ DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
+ MAX_HW_QUEUE_PER_CORE);
+ return -EINVAL;
+ }
+ qdma_dev.max_hw_queues_per_core =
+ qdma_config->max_hw_queues_per_core;
+
+ /* Allocate Virtual Queues */
+ qdma_vqs = rte_malloc("qdma_virtual_queues",
+ (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
+ RTE_CACHE_LINE_SIZE);
+ if (!qdma_vqs) {
+ DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
+ return -ENOMEM;
+ }
+ qdma_dev.max_vqs = qdma_config->max_vqs;
+
+ /* Allocate FLE pool */
+ qdma_dev.fle_pool = rte_mempool_create("qdma_fle_pool",
+ qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
+ QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
+ NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (!qdma_dev.fle_pool) {
+ DPAA2_QDMA_ERR("qdma_fle_pool create failed");
+ rte_free(qdma_vqs);
+ qdma_vqs = NULL;
+ return -ENOMEM;
+ }
+ qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_qdma_start(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev.state = 1;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
+{
+ char ring_name[32];
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ /* Get a free Virtual Queue */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].in_use == 0)
+ break;
+ }
+
+ /* Return in case no VQ is free */
+ if (i == qdma_dev.max_vqs) {
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return -ENODEV;
+ }
+
+ if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
+ (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
+ /* Allocate HW queue for a VQ */
+ qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
+ qdma_vqs[i].exclusive_hw_queue = 1;
+ } else {
+ /* Allocate a Ring for Virutal Queue in VQ mode */
+ sprintf(ring_name, "status ring %d", i);
+ qdma_vqs[i].status_ring = rte_ring_create(ring_name,
+ qdma_dev.fle_pool_count, rte_socket_id(), 0);
+ if (!qdma_vqs[i].status_ring) {
+ DPAA2_QDMA_ERR("Status ring creation failed for vq");
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return rte_errno;
+ }
+
+ /* Get a HW queue (shared) for a VQ */
+ qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
+ qdma_vqs[i].exclusive_hw_queue = 0;
+ }
+
+ if (qdma_vqs[i].hw_queue == NULL) {
+ DPAA2_QDMA_ERR("No H/W queue available for VQ");
+ if (qdma_vqs[i].status_ring)
+ rte_ring_free(qdma_vqs[i].status_ring);
+ qdma_vqs[i].status_ring = NULL;
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return -ENODEV;
+ }
+
+ qdma_vqs[i].in_use = 1;
+ qdma_vqs[i].lcore_id = lcore_id;
+
+ rte_spinlock_unlock(&qdma_dev.lock);
+
+ return i;
+}
+
+static void
+dpaa2_qdma_populate_fle(struct qbman_fle *fle,
+ uint64_t src, uint64_t dest,
+ size_t len, uint32_t flags)
+{
+ struct qdma_sdd *sdd;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
+ (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
+
+ /* first frame list to source descriptor */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
+ DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+
+ /* source and destination descriptor */
+ DPAA2_SET_SDD_RD_COHERENT(sdd); /* source descriptor CMD */
+ sdd++;
+ DPAA2_SET_SDD_WR_COHERENT(sdd); /* dest descriptor CMD */
+
+ fle++;
+ /* source frame list to source buffer */
+ if (flags & RTE_QDMA_JOB_SRC_PHY) {
+ DPAA2_SET_FLE_ADDR(fle, src);
+ DPAA2_SET_FLE_BMT(fle);
+ } else {
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
+ }
+ DPAA2_SET_FLE_LEN(fle, len);
+
+ fle++;
+ /* destination frame list to destination buffer */
+ if (flags & RTE_QDMA_JOB_DEST_PHY) {
+ DPAA2_SET_FLE_BMT(fle);
+ DPAA2_SET_FLE_ADDR(fle, dest);
+ } else {
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
+ }
+ DPAA2_SET_FLE_LEN(fle, len);
+
+ /* Final bit: 1, for last frame list */
+ DPAA2_SET_FLE_FIN(fle);
+}
+
+static int
+dpdmai_dev_enqueue(struct dpaa2_dpdmai_dev *dpdmai_dev,
+ uint16_t txq_id,
+ uint16_t vq_id,
+ struct rte_qdma_job *job)
+{
+ struct qdma_io_meta *io_meta;
+ struct qbman_fd fd;
+ struct dpaa2_queue *txq;
+ struct qbman_fle *fle;
+ struct qbman_eq_desc eqdesc;
+ struct qbman_swp *swp;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ txq = &(dpdmai_dev->tx_queue[txq_id]);
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc, 0);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+ /*
+ * Get an FLE/SDD from FLE pool.
+ * Note: IO metadata is before the FLE and SDD memory.
+ */
+ ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&io_meta));
+ if (ret) {
+ DPAA2_QDMA_DP_WARN("Memory alloc failed for FLE");
+ return ret;
+ }
+
+ /* Set the metadata */
+ io_meta->cnxt = (size_t)job;
+ io_meta->id = vq_id;
+
+ fle = (struct qbman_fle *)(io_meta + 1);
+
+ /* populate Frame descriptor */
+ memset(&fd, 0, sizeof(struct qbman_fd));
+ DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(&fd);
+ DPAA2_SET_FD_FRC(&fd, QDMA_SER_CTX);
+
+ /* Populate FLE */
+ memset(fle, 0, QDMA_FLE_POOL_SIZE);
+ dpaa2_qdma_populate_fle(fle, job->src, job->dest, job->len, job->flags);
+
+ /* Enqueue the packet to the QBMAN */
+ do {
+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
+ if (ret < 0 && ret != -EBUSY)
+ DPAA2_QDMA_ERR("Transmit failure with err: %d", ret);
+ } while (ret == -EBUSY);
+
+ DPAA2_QDMA_DP_DEBUG("Successfully transmitted a packet");
+
+ return ret;
+}
+
+int __rte_experimental
+rte_qdma_vq_enqueue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ int i, ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < nb_jobs; i++) {
+ ret = rte_qdma_vq_enqueue(vq_id, job[i]);
+ if (ret < 0)
+ break;
+ }
+
+ return i;
+}
+
+int __rte_experimental
+rte_qdma_vq_enqueue(uint16_t vq_id,
+ struct rte_qdma_job *job)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+ struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Return error in case of wrong lcore_id */
+ if (rte_lcore_id() != qdma_vq->lcore_id) {
+ DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
+ vq_id);
+ return -EINVAL;
+ }
+
+ ret = dpdmai_dev_enqueue(dpdmai_dev, qdma_pq->queue_id, vq_id, job);
+ if (ret < 0) {
+ DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
+ return ret;
+ }
+
+ qdma_vq->num_enqueues++;
+
+ return 1;
+}
+
+/* Function to receive a QDMA job for a given device and queue*/
+static int
+dpdmai_dev_dequeue(struct dpaa2_dpdmai_dev *dpdmai_dev,
+ uint16_t rxq_id,
+ uint16_t *vq_id,
+ struct rte_qdma_job **job)
+{
+ struct qdma_io_meta *io_meta;
+ struct dpaa2_queue *rxq;
+ struct qbman_result *dq_storage;
+ struct qbman_pull_desc pulldesc;
+ const struct qbman_fd *fd;
+ struct qbman_swp *swp;
+ struct qbman_fle *fle;
+ uint32_t fqid;
+ uint8_t status;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ rxq = &(dpdmai_dev->rx_queue[rxq_id]);
+ dq_storage = rxq->q_storage->dq_storage[0];
+ fqid = rxq->fqid;
+
+ /* Prepare dequeue descriptor */
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ qbman_pull_desc_set_numframes(&pulldesc, 1);
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
+ continue;
+ }
+ break;
+ }
+
+ /* Check if previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+ /* Loop until dq_storage is updated with new token by QBMAN */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ DPAA2_QDMA_DP_DEBUG("No frame is delivered");
+ return 0;
+ }
+
+ /* Get the FD */
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ /*
+ * Fetch metadata from FLE. job and vq_id were set
+ * in metadata in the enqueue operation.
+ */
+ fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+ io_meta = (struct qdma_io_meta *)(fle) - 1;
+ if (vq_id)
+ *vq_id = io_meta->id;
+
+ *job = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
+ (*job)->status = DPAA2_GET_FD_ERR(fd);
+
+ /* Free FLE to the pool */
+ rte_mempool_put(qdma_dev.fle_pool, io_meta);
+
+ DPAA2_QDMA_DP_DEBUG("packet received");
+
+ return 1;
+}
+
+int __rte_experimental
+rte_qdma_vq_dequeue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < nb_jobs; i++) {
+ job[i] = rte_qdma_vq_dequeue(vq_id);
+ if (!job[i])
+ break;
+ }
+
+ return i;
+}
+
+struct rte_qdma_job * __rte_experimental
+rte_qdma_vq_dequeue(uint16_t vq_id)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+ struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
+ struct rte_qdma_job *job = NULL;
+ struct qdma_virt_queue *temp_qdma_vq;
+ int dequeue_budget = QDMA_DEQUEUE_BUDGET;
+ int ring_count, ret, i;
+ uint16_t temp_vq_id;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Return error in case of wrong lcore_id */
+ if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
+ DPAA2_QDMA_ERR("QDMA dequeue for vqid %d on wrong core",
+ vq_id);
+ return NULL;
+ }
+
+ /* Only dequeue when there are pending jobs on VQ */
+ if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
+ return NULL;
+
+ if (qdma_vq->exclusive_hw_queue) {
+ /* In case of exclusive queue directly fetch from HW queue */
+ ret = dpdmai_dev_dequeue(dpdmai_dev, qdma_pq->queue_id,
+ NULL, &job);
+ if (ret < 0) {
+ DPAA2_QDMA_ERR(
+ "Dequeue from DPDMAI device failed: %d", ret);
+ return NULL;
+ }
+ } else {
+ /*
+ * Get the QDMA completed jobs from the software ring.
+ * In case they are not available on the ring poke the HW
+ * to fetch completed jobs from corresponding HW queues
+ */
+ ring_count = rte_ring_count(qdma_vq->status_ring);
+ if (ring_count == 0) {
+ /* TODO - How to have right budget */
+ for (i = 0; i < dequeue_budget; i++) {
+ ret = dpdmai_dev_dequeue(dpdmai_dev,
+ qdma_pq->queue_id, &temp_vq_id, &job);
+ if (ret == 0)
+ break;
+ temp_qdma_vq = &qdma_vqs[temp_vq_id];
+ rte_ring_enqueue(temp_qdma_vq->status_ring,
+ (void *)(job));
+ ring_count = rte_ring_count(
+ qdma_vq->status_ring);
+ if (ring_count)
+ break;
+ }
+ }
+
+ /* Dequeue job from the software ring to provide to the user */
+ rte_ring_dequeue(qdma_vq->status_ring, (void **)&job);
+ if (job)
+ qdma_vq->num_dequeues++;
+ }
+
+ return job;
+}
+
+void __rte_experimental
+rte_qdma_vq_stats(uint16_t vq_id,
+ struct rte_qdma_vq_stats *vq_status)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (qdma_vq->in_use) {
+ vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
+ vq_status->lcore_id = qdma_vq->lcore_id;
+ vq_status->num_enqueues = qdma_vq->num_enqueues;
+ vq_status->num_dequeues = qdma_vq->num_dequeues;
+ vq_status->num_pending_jobs = vq_status->num_enqueues -
+ vq_status->num_dequeues;
+ }
+}
+
+int __rte_experimental
+rte_qdma_vq_destroy(uint16_t vq_id)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case there are pending jobs on any VQ, return -EBUSY */
+ if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
+ return -EBUSY;
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ if (qdma_vq->exclusive_hw_queue)
+ free_hw_queue(qdma_vq->hw_queue);
+ else {
+ if (qdma_vqs->status_ring)
+ rte_ring_free(qdma_vqs->status_ring);
+
+ put_hw_queue(qdma_vq->hw_queue);
+ }
+
+ memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ return 0;
+}
+
+void __rte_experimental
+rte_qdma_stop(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev.state = 0;
+}
+
+void __rte_experimental
+rte_qdma_destroy(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_qdma_reset();
+}
+
+static const struct rte_rawdev_ops dpaa2_qdma_ops;
+
+static int
+add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
+{
+ struct qdma_hw_queue *queue;
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < dpdmai_dev->num_queues; i++) {
+ queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
+ if (!queue) {
+ DPAA2_QDMA_ERR(
+ "Memory allocation failed for QDMA queue");
+ return -ENOMEM;
+ }
+
+ queue->dpdmai_dev = dpdmai_dev;
+ queue->queue_id = i;
+
+ TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
+ qdma_dev.num_hw_queues++;
+ }
+
+ return 0;
+}
+
+static void
+remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
+{
+ struct qdma_hw_queue *queue = NULL;
+ struct qdma_hw_queue *tqueue = NULL;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
+ if (queue->dpdmai_dev == dpdmai_dev) {
+ TAILQ_REMOVE(&qdma_queue_list, queue, next);
+ rte_free(queue);
+ queue = NULL;
+ }
+ }
+}
+
+static int
+dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ int ret, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Remove HW queues from global list */
+ remove_hw_queues_from_list(dpdmai_dev);
+
+ ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (ret)
+ DPAA2_QDMA_ERR("dmdmai disable failed");
+
+ /* Set up the DQRR storage for Rx */
+ for (i = 0; i < DPDMAI_PRIO_NUM; i++) {
+ struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
+
+ if (rxq->q_storage) {
+ dpaa2_free_dq_storage(rxq->q_storage);
+ rte_free(rxq->q_storage);
+ }
+ }
+
+ /* Close the device at underlying layer*/
+ ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
+ if (ret)
+ DPAA2_QDMA_ERR("Failure closing dpdmai device");
+
+ return 0;
+}
+
+static int
+dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ struct dpdmai_rx_queue_cfg rx_queue_cfg;
+ struct dpdmai_attr attr;
+ struct dpdmai_rx_queue_attr rx_attr;
+ struct dpdmai_tx_queue_attr tx_attr;
+ int ret, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Open DPDMAI device */
+ dpdmai_dev->dpdmai_id = dpdmai_id;
+ dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
+ return ret;
+ }
+
+ /* Get DPDMAI attributes */
+ ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, &attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->num_queues = attr.num_of_priorities;
+
+ /* Set up Rx Queues */
+ for (i = 0; i < attr.num_of_priorities; i++) {
+ struct dpaa2_queue *rxq;
+
+ memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+ ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
+ CMD_PRI_LOW,
+ dpdmai_dev->token,
+ i, &rx_queue_cfg);
+ if (ret) {
+ DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
+ ret);
+ goto init_err;
+ }
+
+ /* Allocate DQ storage for the DPDMAI Rx queues */
+ rxq = &(dpdmai_dev->rx_queue[i]);
+ rxq->q_storage = rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->q_storage) {
+ DPAA2_QDMA_ERR("q_storage allocation failed");
+ ret = -ENOMEM;
+ goto init_err;
+ }
+
+ memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+ ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
+ goto init_err;
+ }
+ }
+
+ /* Get Rx and Tx queues FQID's */
+ for (i = 0; i < DPDMAI_PRIO_NUM; i++) {
+ ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, i, &rx_attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("Reading device failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
+
+ ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, i, &tx_attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("Reading device failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
+ }
+
+ /* Enable the device */
+ ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (ret) {
+ DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+ goto init_err;
+ }
+
+ /* Add the HW queue to the global list */
+ ret = add_hw_queues_to_list(dpdmai_dev);
+ if (ret) {
+ DPAA2_QDMA_ERR("Adding H/W queue to list failed");
+ goto init_err;
+ }
+ DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
+
+ return 0;
+init_err:
+ dpaa2_dpdmai_dev_uninit(rawdev);
+ return ret;
+}
+
+static int
+rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_rawdev *rawdev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
+ sizeof(struct dpaa2_dpdmai_dev),
+ rte_socket_id());
+ if (!rawdev) {
+ DPAA2_QDMA_ERR("Unable to allocate rawdevice");
+ return -EINVAL;
+ }
+
+ dpaa2_dev->rawdev = rawdev;
+ rawdev->dev_ops = &dpaa2_qdma_ops;
+ rawdev->device = &dpaa2_dev->device;
+ rawdev->driver_name = dpaa2_drv->driver.name;
+
+ /* Invoke PMD device initialization function */
+ ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
+ if (ret) {
+ rte_rawdev_pmd_release(rawdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ dpaa2_dpdmai_dev_uninit(rawdev);
+
+ ret = rte_rawdev_pmd_release(rawdev);
+ if (ret)
+ DPAA2_QDMA_ERR("Device cleanup failed");
+
+ return 0;
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
+ .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
+ .drv_type = DPAA2_QDMA,
+ .probe = rte_dpaa2_qdma_probe,
+ .remove = rte_dpaa2_qdma_remove,
+};
+
+RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
+
+RTE_INIT(dpaa2_qdma_init_log)
+{
+ dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
+ if (dpaa2_qdma_logtype >= 0)
+ rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
new file mode 100644
index 00000000..c6a05780
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_QDMA_H__
+#define __DPAA2_QDMA_H__
+
+struct qdma_sdd;
+struct qdma_io_meta;
+
+#define DPAA2_QDMA_MAX_FLE 3
+#define DPAA2_QDMA_MAX_SDD 2
+
+/** FLE pool size: 3 Frame list + 2 source/destination descriptor */
+#define QDMA_FLE_POOL_SIZE (sizeof(struct qdma_io_meta) + \
+ sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
+ sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
+/** FLE pool cache size */
+#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
+
+/** Notification by FQD_CTX[fqid] */
+#define QDMA_SER_CTX (1 << 8)
+
+/**
+ * Source descriptor command read transaction type for RBP=0:
+ * coherent copy of cacheable memory
+ */
+#define DPAA2_SET_SDD_RD_COHERENT(sdd) ((sdd)->cmd = (0xb << 28))
+/**
+ * Destination descriptor command write transaction type for RBP=0:
+ * coherent copy of cacheable memory
+ */
+#define DPAA2_SET_SDD_WR_COHERENT(sdd) ((sdd)->cmd = (0x6 << 28))
+
+/** Maximum possible H/W Queues on each core */
+#define MAX_HW_QUEUE_PER_CORE 64
+
+/**
+ * In case of Virtual Queue mode, this specifies the number of
+ * dequeue the 'qdma_vq_dequeue/multi' API does from the H/W Queue
+ * in case there is no job present on the Virtual Queue ring.
+ */
+#define QDMA_DEQUEUE_BUDGET 64
+
+/**
+ * Represents a QDMA device.
+ * A single QDMA device exists which is combination of multiple DPDMAI rawdev's.
+ */
+struct qdma_device {
+ /** total number of hw queues. */
+ uint16_t num_hw_queues;
+ /**
+ * Maximum number of hw queues to be alocated per core.
+ * This is limited by MAX_HW_QUEUE_PER_CORE
+ */
+ uint16_t max_hw_queues_per_core;
+ /** Maximum number of VQ's */
+ uint16_t max_vqs;
+ /** mode of operation - physical(h/w) or virtual */
+ uint8_t mode;
+ /** Device state - started or stopped */
+ uint8_t state;
+ /** FLE pool for the device */
+ struct rte_mempool *fle_pool;
+ /** FLE pool size */
+ int fle_pool_count;
+ /** A lock to QDMA device whenever required */
+ rte_spinlock_t lock;
+};
+
+/** Represents a QDMA H/W queue */
+struct qdma_hw_queue {
+ /** Pointer to Next instance */
+ TAILQ_ENTRY(qdma_hw_queue) next;
+ /** DPDMAI device to communicate with HW */
+ struct dpaa2_dpdmai_dev *dpdmai_dev;
+ /** queue ID to communicate with HW */
+ uint16_t queue_id;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /** Number of users of this hw queue */
+ uint32_t num_users;
+};
+
+/** Represents a QDMA virtual queue */
+struct qdma_virt_queue {
+ /** Status ring of the virtual queue */
+ struct rte_ring *status_ring;
+ /** Associated hw queue */
+ struct qdma_hw_queue *hw_queue;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /** States if this vq is in use or not */
+ uint8_t in_use;
+ /** States if this vq has exclusively associated hw queue */
+ uint8_t exclusive_hw_queue;
+ /* Total number of enqueues on this VQ */
+ uint64_t num_enqueues;
+ /* Total number of dequeues from this VQ */
+ uint64_t num_dequeues;
+};
+
+/** Represents a QDMA per core hw queues allocation in virtual mode */
+struct qdma_per_core_info {
+ /** list for allocated hw queues */
+ struct qdma_hw_queue *hw_queues[MAX_HW_QUEUE_PER_CORE];
+ /* Number of hw queues allocated for this core */
+ uint16_t num_hw_queues;
+};
+
+/** Metadata which is stored with each operation */
+struct qdma_io_meta {
+ /**
+ * Context which is stored in the FLE pool (just before the FLE).
+ * QDMA job is stored as a this context as a part of metadata.
+ */
+ uint64_t cnxt;
+ /** VQ ID is stored as a part of metadata of the enqueue command */
+ uint64_t id;
+};
+
+/** Source/Destination Descriptor */
+struct qdma_sdd {
+ uint32_t rsv;
+ /** Stride configuration */
+ uint32_t stride;
+ /** Route-by-port command */
+ uint32_t rbpcmd;
+ uint32_t cmd;
+} __attribute__((__packed__));
+
+/** Represents a DPDMAI raw device */
+struct dpaa2_dpdmai_dev {
+ /** Pointer to Next device instance */
+ TAILQ_ENTRY(dpaa2_qdma_device) next;
+ /** handle to DPDMAI object */
+ struct fsl_mc_io dpdmai;
+ /** HW ID for DPDMAI object */
+ uint32_t dpdmai_id;
+ /** Tocken of this device */
+ uint16_t token;
+ /** Number of queue in this DPDMAI device */
+ uint8_t num_queues;
+ /** RX queues */
+ struct dpaa2_queue rx_queue[DPDMAI_PRIO_NUM];
+ /** TX queues */
+ struct dpaa2_queue tx_queue[DPDMAI_PRIO_NUM];
+};
+
+#endif /* __DPAA2_QDMA_H__ */
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
new file mode 100644
index 00000000..4779e4ce
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_QDMA_LOGS_H__
+#define __DPAA2_QDMA_LOGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int dpaa2_qdma_logtype;
+
+#define DPAA2_QDMA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_qdma_logtype, "dpaa2_qdma: " \
+ fmt "\n", ## args)
+
+#define DPAA2_QDMA_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_qdma_logtype, "dpaa2_qdma: %s(): " \
+ fmt "\n", __func__, ## args)
+
+#define DPAA2_QDMA_FUNC_TRACE() DPAA2_QDMA_DEBUG(">>")
+
+#define DPAA2_QDMA_INFO(fmt, args...) \
+ DPAA2_QDMA_LOG(INFO, fmt, ## args)
+#define DPAA2_QDMA_ERR(fmt, args...) \
+ DPAA2_QDMA_LOG(ERR, fmt, ## args)
+#define DPAA2_QDMA_WARN(fmt, args...) \
+ DPAA2_QDMA_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_QDMA_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "dpaa2_qdma: " fmt "\n", ## args)
+
+#define DPAA2_QDMA_DP_DEBUG(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_QDMA_DP_INFO(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_QDMA_DP_WARN(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(WARNING, fmt, ## args)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __DPAA2_QDMA_LOGS_H__ */
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/meson.build b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/meson.build
new file mode 100644
index 00000000..b6a081f1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
+deps += ['rawdev', 'mempool_dpaa2', 'ring']
+sources = files('dpaa2_qdma.c')
+
+allow_experimental_apis = true
+
+install_headers('rte_pmd_dpaa2_qdma.h')
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
new file mode 100644
index 00000000..17fffcb7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_PMD_DPAA2_QDMA_H__
+#define __RTE_PMD_DPAA2_QDMA_H__
+
+/**
+ * @file
+ *
+ * NXP dpaa2 QDMA specific structures.
+ *
+ */
+
+/** Determines the mode of operation */
+enum {
+ /**
+ * Allocate a H/W queue per VQ i.e. Exclusive hardware queue for a VQ.
+ * This mode will have best performance.
+ */
+ RTE_QDMA_MODE_HW,
+ /**
+ * A VQ shall not have an exclusive associated H/W queue.
+ * Rather a H/W Queue will be shared by multiple Virtual Queues.
+ * This mode will have intermediate data structures to support
+ * multi VQ to PQ mappings thus having some performance implications.
+ * Note: Even in this mode there is an option to allocate a H/W
+ * queue for a VQ. Please see 'RTE_QDMA_VQ_EXCLUSIVE_PQ' flag.
+ */
+ RTE_QDMA_MODE_VIRTUAL
+};
+
+/**
+ * If user has configued a Virtual Queue mode, but for some particular VQ
+ * user needs an exclusive H/W queue associated (for better performance
+ * on that particular VQ), then user can pass this flag while creating the
+ * Virtual Queue. A H/W queue will be allocated corresponding to
+ * VQ which uses this flag.
+ */
+#define RTE_QDMA_VQ_EXCLUSIVE_PQ (1ULL)
+
+/** States if the source addresses is physical. */
+#define RTE_QDMA_JOB_SRC_PHY (1ULL)
+
+/** States if the destination addresses is physical. */
+#define RTE_QDMA_JOB_DEST_PHY (1ULL << 1)
+
+/** Provides QDMA device attributes */
+struct rte_qdma_attr {
+ /** total number of hw QDMA queues present */
+ uint16_t num_hw_queues;
+};
+
+/** QDMA device configuration structure */
+struct rte_qdma_config {
+ /** Number of maximum hw queues to allocate per core. */
+ uint16_t max_hw_queues_per_core;
+ /** Maximum number of VQ's to be used. */
+ uint16_t max_vqs;
+ /** mode of operation - physical(h/w) or virtual */
+ uint8_t mode;
+ /**
+ * User provides this as input to the driver as a size of the FLE pool.
+ * FLE's (and corresponding source/destination descriptors) are
+ * allocated by the driver at enqueue time to store src/dest and
+ * other data and are freed at the dequeue time. This determines the
+ * maximum number of inflight jobs on the QDMA device. This should
+ * be power of 2.
+ */
+ int fle_pool_count;
+};
+
+/** Provides QDMA device statistics */
+struct rte_qdma_vq_stats {
+ /** States if this vq has exclusively associated hw queue */
+ uint8_t exclusive_hw_queue;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /* Total number of enqueues on this VQ */
+ uint64_t num_enqueues;
+ /* Total number of dequeues from this VQ */
+ uint64_t num_dequeues;
+ /* total number of pending jobs in this VQ */
+ uint64_t num_pending_jobs;
+};
+
+/** Determines a QDMA job */
+struct rte_qdma_job {
+ /** Source Address from where DMA is (to be) performed */
+ uint64_t src;
+ /** Destination Address where DMA is (to be) done */
+ uint64_t dest;
+ /** Length of the DMA operation in bytes. */
+ uint32_t len;
+ /** See RTE_QDMA_JOB_ flags */
+ uint32_t flags;
+ /**
+ * User can specify a context which will be maintained
+ * on the dequeue operation.
+ */
+ uint64_t cnxt;
+ /**
+ * Status of the transaction.
+ * This is filled in the dequeue operation by the driver.
+ */
+ uint8_t status;
+};
+
+/**
+ * Initialize the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_init(void);
+
+/**
+ * Get the QDMA attributes.
+ *
+ * @param qdma_attr
+ * QDMA attributes providing total number of hw queues etc.
+ */
+void __rte_experimental
+rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr);
+
+/**
+ * Reset the QDMA device. This API will completely reset the QDMA
+ * device, bringing it to original state as if only rte_qdma_init() API
+ * has been called.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_reset(void);
+
+/**
+ * Configure the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_configure(struct rte_qdma_config *qdma_config);
+
+/**
+ * Start the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_start(void);
+
+/**
+ * Create a Virtual Queue on a particular lcore id.
+ * This API can be called from any thread/core. User can create/destroy
+ * VQ's at runtime.
+ *
+ * @param lcore_id
+ * LCORE ID on which this particular queue would be associated with.
+ * @param flags
+ * RTE_QDMA_VQ_ flags. See macro definitions.
+ *
+ * @returns
+ * - >= 0: Virtual queue ID.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags);
+
+/**
+ * Enqueue multiple jobs to a Virtual Queue.
+ * If the enqueue is successful, the H/W will perform DMA operations
+ * on the basis of the QDMA jobs provided.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * List of QDMA Jobs containing relevant information related to DMA.
+ * @param nb_jobs
+ * Number of QDMA jobs provided by the user.
+ *
+ * @returns
+ * - >=0: Number of jobs successfully submitted
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_enqueue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs);
+
+/**
+ * Enqueue a single job to a Virtual Queue.
+ * If the enqueue is successful, the H/W will perform DMA operations
+ * on the basis of the QDMA job provided.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * A QDMA Job containing relevant information related to DMA.
+ *
+ * @returns
+ * - >=0: Number of jobs successfully submitted
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_enqueue(uint16_t vq_id,
+ struct rte_qdma_job *job);
+
+/**
+ * Dequeue multiple completed jobs from a Virtual Queue.
+ * Provides the list of completed jobs capped by nb_jobs.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * List of QDMA Jobs returned from the API.
+ * @param nb_jobs
+ * Number of QDMA jobs requested for dequeue by the user.
+ *
+ * @returns
+ * Number of jobs actually dequeued.
+ */
+int __rte_experimental
+rte_qdma_vq_dequeue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs);
+
+/**
+ * Dequeue a single completed jobs from a Virtual Queue.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ *
+ * @returns
+ * - A completed job or NULL if no job is there.
+ */
+struct rte_qdma_job * __rte_experimental
+rte_qdma_vq_dequeue(uint16_t vq_id);
+
+/**
+ * Get a Virtual Queue statistics.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param vq_stats
+ * VQ statistics structure which will be filled in by the driver.
+ */
+void __rte_experimental
+rte_qdma_vq_stats(uint16_t vq_id,
+ struct rte_qdma_vq_stats *vq_stats);
+
+/**
+ * Destroy the Virtual Queue specified by vq_id.
+ * This API can be called from any thread/core. User can create/destroy
+ * VQ's at runtime.
+ *
+ * @param vq_id
+ * Virtual Queue ID which needs to be deinialized.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_destroy(uint16_t vq_id);
+
+/**
+ * Stop QDMA device.
+ */
+void __rte_experimental
+rte_qdma_stop(void);
+
+/**
+ * Destroy the QDMA device.
+ */
+void __rte_experimental
+rte_qdma_destroy(void);
+
+#endif /* __RTE_PMD_DPAA2_QDMA_H__*/
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map
new file mode 100644
index 00000000..fe42a227
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+ global:
+
+ rte_qdma_attr_get;
+ rte_qdma_configure;
+ rte_qdma_destroy;
+ rte_qdma_init;
+ rte_qdma_reset;
+ rte_qdma_start;
+ rte_qdma_stop;
+ rte_qdma_vq_create;
+ rte_qdma_vq_destroy;
+ rte_qdma_vq_dequeue;
+ rte_qdma_vq_dequeue_multi;
+ rte_qdma_vq_enqueue;
+ rte_qdma_vq_enqueue_multi;
+ rte_qdma_vq_stats;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/Makefile b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/Makefile
new file mode 100644
index 00000000..f3b9d5e6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ifpga_rawdev.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/bus/ifpga
+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_ifpga
+
+EXPORT_MAP := rte_pmd_ifpga_rawdev_version.map
+
+LIBABIVER := 1
+
+VPATH += $(SRCDIR)/base
+
+include $(RTE_SDK)/drivers/raw/ifpga_rawdev/base/Makefile
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV) += ifpga_rawdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/Makefile b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/Makefile
new file mode 100644
index 00000000..d79da72b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/Makefile
@@ -0,0 +1,26 @@
+#SPDX-License-Identifier: BSD-3-Clause
+#Copyright(c) 2010-2018 Intel Corporation
+
+ifneq ($(CONFIG_RTE_LIBRTE_EAL),)
+OSDEP := osdep_rte
+else
+OSDEP := osdep_raw
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev/base/$(OSDEP)
+
+SRCS-y += ifpga_api.c
+SRCS-y += ifpga_enumerate.c
+SRCS-y += ifpga_feature_dev.c
+SRCS-y += ifpga_fme.c
+SRCS-y += ifpga_fme_iperf.c
+SRCS-y += ifpga_fme_dperf.c
+SRCS-y += ifpga_fme_error.c
+SRCS-y += ifpga_port.c
+SRCS-y += ifpga_port_error.c
+SRCS-y += opae_hw_api.c
+SRCS-y += opae_ifpga_hw_api.c
+SRCS-y += opae_debug.c
+SRCS-y += ifpga_fme_pr.c
+
+SRCS-y += $(wildcard $(SRCDIR)/base/$(OSDEP)/*.c)
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/README b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/README
new file mode 100644
index 00000000..5bc2ed0f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/README
@@ -0,0 +1,31 @@
+..
+
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+Intel iFPGA driver
+==================
+
+This directory contains source code of Intel FPGA driver released by
+the team which develops Intel FPGA Open Programmable Acceleration Engine (OPAE).
+The directory of base/ contains the original source package. The base code
+currently supports Intel FPGA solutions including integrated solution (Intel(R)
+Xeon(R) CPU with FPGAs) and discrete solution (Intel(R) Programmable Acceleration
+Card with Intel(R) Arria(R) 10 FPGA) and it could be extended to support more FPGA
+devices in the future.
+
+Please refer to [1][2] for more introduction on OPAE and Intel FPGAs.
+
+[1] https://01.org/OPAE
+[2] https://www.altera.com/solutions/acceleration-hub/overview.html
+
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ osdep_raw/osdep_generic.h
+ osdep_rte/osdep_generic.h
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.c
new file mode 100644
index 00000000..540e171a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_api.h"
+#include "ifpga_enumerate.h"
+#include "ifpga_feature_dev.h"
+
+#include "opae_hw_api.h"
+
+/* Accelerator APIs */
+static int ifpga_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid)
+{
+ struct opae_bridge *br = acc->br;
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return fpga_get_afu_uuid(port, uuid);
+}
+
+static int ifpga_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[])
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_bridge *br = acc->br;
+ struct ifpga_port_hw *port;
+ struct fpga_uafu_irq_set irq_set;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ if (start >= afu_info->num_irqs || start + count > afu_info->num_irqs)
+ return -EINVAL;
+
+ port = br->data;
+
+ irq_set.start = start;
+ irq_set.count = count;
+ irq_set.evtfds = evtfds;
+
+ return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
+ IFPGA_PORT_FEATURE_ID_UINT, &irq_set);
+}
+
+static int ifpga_acc_get_info(struct opae_accelerator *acc,
+ struct opae_acc_info *info)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+
+ if (!afu_info)
+ return -ENODEV;
+
+ info->num_regions = afu_info->num_regions;
+ info->num_irqs = afu_info->num_irqs;
+
+ return 0;
+}
+
+static int ifpga_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (info->index >= afu_info->num_regions)
+ return -EINVAL;
+
+ /* always one RW region only for AFU now */
+ info->flags = ACC_REGION_READ | ACC_REGION_WRITE | ACC_REGION_MMIO;
+ info->len = afu_info->region[info->index].len;
+ info->addr = afu_info->region[info->index].addr;
+
+ return 0;
+}
+
+static int ifpga_acc_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_reg_region *region;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (offset + byte <= offset)
+ return -EINVAL;
+
+ if (region_idx >= afu_info->num_regions)
+ return -EINVAL;
+
+ region = &afu_info->region[region_idx];
+ if (offset + byte > region->len)
+ return -EINVAL;
+
+ switch (byte) {
+ case 8:
+ *(u64 *)data = opae_readq(region->addr + offset);
+ break;
+ case 4:
+ *(u32 *)data = opae_readl(region->addr + offset);
+ break;
+ case 2:
+ *(u16 *)data = opae_readw(region->addr + offset);
+ break;
+ case 1:
+ *(u8 *)data = opae_readb(region->addr + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ifpga_acc_write(struct opae_accelerator *acc,
+ unsigned int region_idx, u64 offset,
+ unsigned int byte, void *data)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_reg_region *region;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (offset + byte <= offset)
+ return -EINVAL;
+
+ if (region_idx >= afu_info->num_regions)
+ return -EINVAL;
+
+ region = &afu_info->region[region_idx];
+ if (offset + byte > region->len)
+ return -EINVAL;
+
+ /* normal mmio case */
+ switch (byte) {
+ case 8:
+ opae_writeq(*(u64 *)data, region->addr + offset);
+ break;
+ case 4:
+ opae_writel(*(u32 *)data, region->addr + offset);
+ break;
+ case 2:
+ opae_writew(*(u16 *)data, region->addr + offset);
+ break;
+ case 1:
+ opae_writeb(*(u8 *)data, region->addr + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct opae_accelerator_ops ifpga_acc_ops = {
+ .read = ifpga_acc_read,
+ .write = ifpga_acc_write,
+ .set_irq = ifpga_acc_set_irq,
+ .get_info = ifpga_acc_get_info,
+ .get_region_info = ifpga_acc_get_region_info,
+ .get_uuid = ifpga_acc_get_uuid,
+};
+
+/* Bridge APIs */
+
+static int ifpga_br_reset(struct opae_bridge *br)
+{
+ struct ifpga_port_hw *port = br->data;
+
+ return fpga_port_reset(port);
+}
+
+struct opae_bridge_ops ifpga_br_ops = {
+ .reset = ifpga_br_reset,
+};
+
+/* Manager APIs */
+static int ifpga_mgr_flash(struct opae_manager *mgr, int id, void *buf,
+ u32 size, u64 *status)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+ struct ifpga_hw *hw = fme->parent;
+
+ return ifpga_pr(hw, id, buf, size, status);
+}
+
+struct opae_manager_ops ifpga_mgr_ops = {
+ .flash = ifpga_mgr_flash,
+};
+
+/* Adapter APIs */
+static int ifpga_adapter_enumerate(struct opae_adapter *adapter)
+{
+ struct ifpga_hw *hw = malloc(sizeof(*hw));
+
+ if (hw) {
+ memset(hw, 0, sizeof(*hw));
+ hw->pci_data = adapter->data;
+ hw->adapter = adapter;
+ if (ifpga_bus_enumerate(hw))
+ goto error;
+ return ifpga_bus_init(hw);
+ }
+
+error:
+ return -ENOMEM;
+}
+
+struct opae_adapter_ops ifpga_adapter_ops = {
+ .enumerate = ifpga_adapter_enumerate,
+};
+
+/**
+ * ifpga_pr - do the partial reconfiguration for a given port device
+ * @hw: pointer to the HW structure
+ * @port_id: the port device id
+ * @buffer: the buffer of the bitstream
+ * @size: the size of the bitstream
+ * @status: hardware status including PR error code if return -EIO.
+ *
+ * @return
+ * - 0: Success, partial reconfiguration finished.
+ * - <0: Error code returned in partial reconfiguration.
+ **/
+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status)
+{
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+
+ return do_pr(hw, port_id, buffer, size, status);
+}
+
+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop)
+{
+ if (!hw || !prop)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_get_prop(&hw->fme, prop);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_get_prop(&hw->port[port_id], prop);
+ }
+
+ return -ENOENT;
+}
+
+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop)
+{
+ if (!hw || !prop)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_set_prop(&hw->fme, prop);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_set_prop(&hw->port[port_id], prop);
+ }
+
+ return -ENOENT;
+}
+
+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ u32 feature_id, void *irq_set)
+{
+ if (!hw || !irq_set)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_set_irq(&hw->fme, feature_id, irq_set);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_set_irq(&hw->port[port_id], feature_id, irq_set);
+ }
+
+ return -ENOENT;
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.h
new file mode 100644
index 00000000..dae7ca14
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_API_H_
+#define _IFPGA_API_H_
+
+#include "opae_hw_api.h"
+#include "ifpga_hw.h"
+
+extern struct opae_adapter_ops ifpga_adapter_ops;
+extern struct opae_manager_ops ifpga_mgr_ops;
+extern struct opae_bridge_ops ifpga_br_ops;
+extern struct opae_accelerator_ops ifpga_acc_ops;
+
+/* common APIs */
+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop);
+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop);
+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ u32 feature_id, void *irq_set);
+
+/* FME APIs */
+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status);
+
+#endif /* _IFPGA_API_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_compat.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_compat.h
new file mode 100644
index 00000000..931c8938
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_compat.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_COMPAT_H_
+#define _IFPGA_COMPAT_H_
+
+#include "opae_osdep.h"
+
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define IFPGA_PAGE_SHIFT 12
+#define IFPGA_PAGE_SIZE (1 << IFPGA_PAGE_SHIFT)
+#define IFPGA_PAGE_MASK (~(IFPGA_PAGE_SIZE - 1))
+#define IFPGA_PAGE_ALIGN(addr) (((addr) + IFPGA_PAGE_SIZE - 1)\
+ & IFPGA_PAGE_MASK)
+#define IFPGA_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
+
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), IFPGA_PAGE_SIZE)
+
+#define readl(addr) opae_readl(addr)
+#define readq(addr) opae_readq(addr)
+#define writel(value, addr) opae_writel(value, addr)
+#define writeq(value, addr) opae_writeq(value, addr)
+
+#define malloc(size) opae_malloc(size)
+#define zmalloc(size) opae_zmalloc(size)
+#define free(size) opae_free(size)
+
+/*
+ * Wait register's _field to be changed to the given value (_expect's _field)
+ * by polling with given interval and timeout.
+ */
+#define fpga_wait_register_field(_field, _expect, _reg_addr, _timeout, _invl)\
+({ \
+ int wait = 0; \
+ int ret = -ETIMEDOUT; \
+ typeof(_expect) value; \
+ for (; wait <= _timeout; wait += _invl) { \
+ value.csr = readq(_reg_addr); \
+ if (_expect._field == value._field) { \
+ ret = 0; \
+ break; \
+ } \
+ udelay(_invl); \
+ } \
+ ret; \
+})
+
+#define __maybe_unused __attribute__((__unused__))
+
+#define UNUSED(x) (void)(x)
+
+#endif /* _IFPGA_COMPAT_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_defines.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_defines.h
new file mode 100644
index 00000000..aa025272
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_defines.h
@@ -0,0 +1,1663 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_DEFINES_H_
+#define _IFPGA_DEFINES_H_
+
+#include "ifpga_compat.h"
+
+#define MAX_FPGA_PORT_NUM 4
+
+#define FME_FEATURE_HEADER "fme_hdr"
+#define FME_FEATURE_THERMAL_MGMT "fme_thermal"
+#define FME_FEATURE_POWER_MGMT "fme_power"
+#define FME_FEATURE_GLOBAL_IPERF "fme_iperf"
+#define FME_FEATURE_GLOBAL_ERR "fme_error"
+#define FME_FEATURE_PR_MGMT "fme_pr"
+#define FME_FEATURE_HSSI_ETH "fme_hssi"
+#define FME_FEATURE_GLOBAL_DPERF "fme_dperf"
+#define FME_FEATURE_QSPI_FLASH "fme_qspi_flash"
+
+#define PORT_FEATURE_HEADER "port_hdr"
+#define PORT_FEATURE_UAFU "port_uafu"
+#define PORT_FEATURE_ERR "port_err"
+#define PORT_FEATURE_UMSG "port_umsg"
+#define PORT_FEATURE_PR "port_pr"
+#define PORT_FEATURE_UINT "port_uint"
+#define PORT_FEATURE_STP "port_stp"
+
+/*
+ * do not check the revision id as id may be dynamic under
+ * some cases, e.g, UAFU.
+ */
+#define SKIP_REVISION_CHECK 0xff
+
+#define FME_HEADER_REVISION 1
+#define FME_THERMAL_MGMT_REVISION 0
+#define FME_POWER_MGMT_REVISION 1
+#define FME_GLOBAL_IPERF_REVISION 1
+#define FME_GLOBAL_ERR_REVISION 1
+#define FME_PR_MGMT_REVISION 2
+#define FME_HSSI_ETH_REVISION 0
+#define FME_GLOBAL_DPERF_REVISION 0
+#define FME_QSPI_REVISION 0
+
+#define PORT_HEADER_REVISION 0
+/* UAFU's header info depends on the downloaded GBS */
+#define PORT_UAFU_REVISION SKIP_REVISION_CHECK
+#define PORT_ERR_REVISION 1
+#define PORT_UMSG_REVISION 0
+#define PORT_UINT_REVISION 0
+#define PORT_STP_REVISION 1
+
+#define FEATURE_TYPE_AFU 0x1
+#define FEATURE_TYPE_BBB 0x2
+#define FEATURE_TYPE_PRIVATE 0x3
+#define FEATURE_TYPE_FIU 0x4
+
+#define FEATURE_FIU_ID_FME 0x0
+#define FEATURE_FIU_ID_PORT 0x1
+
+#define FEATURE_ID_HEADER 0x0
+#define FEATURE_ID_AFU 0xff
+
+enum fpga_id_type {
+ FME_ID,
+ PORT_ID,
+ FPGA_ID_MAX,
+};
+
+enum fme_feature_id {
+ FME_FEATURE_ID_HEADER = 0x0,
+
+ FME_FEATURE_ID_THERMAL_MGMT = 0x1,
+ FME_FEATURE_ID_POWER_MGMT = 0x2,
+ FME_FEATURE_ID_GLOBAL_IPERF = 0x3,
+ FME_FEATURE_ID_GLOBAL_ERR = 0x4,
+ FME_FEATURE_ID_PR_MGMT = 0x5,
+ FME_FEATURE_ID_HSSI_ETH = 0x6,
+ FME_FEATURE_ID_GLOBAL_DPERF = 0x7,
+ FME_FEATURE_ID_QSPI_FLASH = 0x8,
+
+ /* one for fme header. */
+ FME_FEATURE_ID_MAX = 0x9,
+};
+
+enum port_feature_id {
+ PORT_FEATURE_ID_HEADER = 0x0,
+ PORT_FEATURE_ID_ERROR = 0x1,
+ PORT_FEATURE_ID_UMSG = 0x2,
+ PORT_FEATURE_ID_UINT = 0x3,
+ PORT_FEATURE_ID_STP = 0x4,
+ PORT_FEATURE_ID_UAFU = 0x5,
+ PORT_FEATURE_ID_MAX = 0x6,
+};
+
+/*
+ * All headers and structures must be byte-packed to match the spec.
+ */
+#pragma pack(push, 1)
+
+struct feature_header {
+ union {
+ u64 csr;
+ struct {
+ u16 id:12;
+ u8 revision:4;
+ u32 next_header_offset:24;
+ u8 end_of_list:1;
+ u32 reserved:19;
+ u8 type:4;
+ };
+ };
+};
+
+struct feature_bbb_header {
+ struct uuid guid;
+};
+
+struct feature_afu_header {
+ struct uuid guid;
+ union {
+ u64 csr;
+ struct {
+ u64 next_afu:24;
+ u64 reserved:40;
+ };
+ };
+};
+
+struct feature_fiu_header {
+ struct uuid guid;
+ union {
+ u64 csr;
+ struct {
+ u64 next_afu:24;
+ u64 reserved:40;
+ };
+ };
+};
+
+struct feature_fme_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 fabric_verid; /* Fabric version ID */
+ u8 socket_id:1; /* Socket id */
+ u8 rsvd1:3; /* Reserved */
+ /* pci0 link available yes /no */
+ u8 pci0_link_avile:1;
+ /* pci1 link available yes /no */
+ u8 pci1_link_avile:1;
+ /* Coherent (QPI/UPI) link available yes /no */
+ u8 qpi_link_avile:1;
+ u8 rsvd2:1; /* Reserved */
+ /* IOMMU or VT-d supported yes/no */
+ u8 iommu_support:1;
+ u8 num_ports:3; /* Number of ports */
+ u8 sf_fab_ctl:1; /* Internal validation bit */
+ u8 rsvd3:3; /* Reserved */
+ /*
+ * Address width supported in bits
+ * BXT -0x26 , SKX -0x30
+ */
+ u8 address_width_bits:6;
+ u8 rsvd4:2; /* Reserved */
+ /* Size of cache supported in kb */
+ u16 cache_size:12;
+ u8 cache_assoc:4; /* Cache Associativity */
+ u16 rsvd5:15; /* Reserved */
+ u8 lock_bit:1; /* Lock bit */
+ };
+ };
+};
+
+#define FME_AFU_ACCESS_PF 0
+#define FME_AFU_ACCESS_VF 1
+
+struct feature_fme_port {
+ union {
+ u64 csr;
+ struct {
+ u32 port_offset:24;
+ u8 reserved1;
+ u8 port_bar:3;
+ u32 reserved2:20;
+ u8 afu_access_control:1;
+ u8 reserved3:4;
+ u8 port_implemented:1;
+ u8 reserved4:3;
+ };
+ };
+};
+
+struct feature_fme_fab_status {
+ union {
+ u64 csr;
+ struct {
+ u8 upilink_status:4; /* UPI Link Status */
+ u8 rsvd1:4; /* Reserved */
+ u8 pci0link_status:1; /* pci0 link status */
+ u8 rsvd2:3; /* Reserved */
+ u8 pci1link_status:1; /* pci1 link status */
+ u64 rsvd3:51; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_genprotrange2_base {
+ union {
+ u64 csr;
+ struct {
+ u16 rsvd1; /* Reserved */
+ /* Base Address of memory range */
+ u8 protected_base_addrss:4;
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_genprotrange2_limit {
+ union {
+ u64 csr;
+ struct {
+ u16 rsvd1; /* Reserved */
+ /* Limit Address of memory range */
+ u8 protected_limit_addrss:4;
+ u16 rsvd2:11; /* Reserved */
+ u8 enable:1; /* Enable GENPROTRANGE check */
+ u32 rsvd3; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_dxe_lock {
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Determines write access to the DXE region CSRs
+ * 1 - CSR region is locked;
+ * 0 - it is open for write access.
+ */
+ u8 dxe_early_lock:1;
+ /*
+ * Determines write access to the HSSI CSR
+ * 1 - CSR region is locked;
+ * 0 - it is open for write access.
+ */
+ u8 dxe_late_lock:1;
+ u64 rsvd:62;
+ };
+ };
+};
+
+#define HSSI_ID_NO_HASSI 0
+#define HSSI_ID_PCIE_RP 1
+#define HSSI_ID_ETHERNET 2
+
+struct feature_fme_bitstream_id {
+ union {
+ u64 csr;
+ struct {
+ u32 gitrepo_hash:32; /* GIT repository hash */
+ /*
+ * HSSI configuration identifier:
+ * 0 - No HSSI
+ * 1 - PCIe-RP
+ * 2 - Ethernet
+ */
+ u8 hssi_id:4;
+ u16 rsvd1:12; /* Reserved */
+ /* Bitstream version patch number */
+ u8 bs_verpatch:4;
+ /* Bitstream version minor number */
+ u8 bs_verminor:4;
+ /* Bitstream version major number */
+ u8 bs_vermajor:4;
+ /* Bitstream version debug number */
+ u8 bs_verdebug:4;
+ };
+ };
+};
+
+struct feature_fme_bitstream_md {
+ union {
+ u64 csr;
+ struct {
+ /* Seed number userd for synthesis flow */
+ u8 synth_seed:4;
+ /* Synthesis date(day number - 2 digits) */
+ u8 synth_day:8;
+ /* Synthesis date(month number - 2 digits) */
+ u8 synth_month:8;
+ /* Synthesis date(year number - 2 digits) */
+ u8 synth_year:8;
+ u64 rsvd:36; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_iommu_ctrl {
+ union {
+ u64 csr;
+ struct {
+ /* Disables IOMMU prefetcher for C0 channel */
+ u8 prefetch_disableC0:1;
+ /* Disables IOMMU prefetcher for C1 channel */
+ u8 prefetch_disableC1:1;
+ /* Disables IOMMU partial cache line writes */
+ u8 prefetch_wrdisable:1;
+ u8 rsvd1:1; /* Reserved */
+ /*
+ * Select counter and read value from register
+ * iommu_stat.dbg_counters
+ * 0 - Number of 4K page translation response
+ * 1 - Number of 2M page translation response
+ * 2 - Number of 1G page translation response
+ */
+ u8 counter_sel:2;
+ u32 rsvd2:26; /* Reserved */
+ /* Connected to IOMMU SIP Capabilities */
+ u32 capecap_defeature;
+ };
+ };
+};
+
+struct feature_fme_iommu_stat {
+ union {
+ u64 csr;
+ struct {
+ /* Translation Enable bit from IOMMU SIP */
+ u8 translation_enable:1;
+ /* Drain request in progress */
+ u8 drain_req_inprog:1;
+ /* Invalidation current state */
+ u8 inv_state:3;
+ /* C0 Response Buffer current state */
+ u8 respbuffer_stateC0:3;
+ /* C1 Response Buffer current state */
+ u8 respbuffer_stateC1:3;
+ /* Last request ID to IOMMU SIP */
+ u8 last_reqID:4;
+ /* Last IOMMU SIP response ID value */
+ u8 last_respID:4;
+ /* Last IOMMU SIP response status value */
+ u8 last_respstatus:3;
+ /* C0 Transaction Buffer is not empty */
+ u8 transbuf_notEmptyC0:1;
+ /* C1 Transaction Buffer is not empty */
+ u8 transbuf_notEmptyC1:1;
+ /* C0 Request FIFO is not empty */
+ u8 reqFIFO_notemptyC0:1;
+ /* C1 Request FIFO is not empty */
+ u8 reqFIFO_notemptyC1:1;
+ /* C0 Response FIFO is not empty */
+ u8 respFIFO_notemptyC0:1;
+ /* C1 Response FIFO is not empty */
+ u8 respFIFO_notemptyC1:1;
+ /* C0 Response FIFO overflow detected */
+ u8 respFIFO_overflowC0:1;
+ /* C1 Response FIFO overflow detected */
+ u8 respFIFO_overflowC1:1;
+ /* C0 Transaction Buffer overflow detected */
+ u8 tranbuf_overflowC0:1;
+ /* C1 Transaction Buffer overflow detected */
+ u8 tranbuf_overflowC1:1;
+ /* Request FIFO overflow detected */
+ u8 reqFIFO_overflow:1;
+ /* IOMMU memory read in progress */
+ u8 memrd_inprog:1;
+ /* IOMMU memory write in progress */
+ u8 memwr_inprog:1;
+ u8 rsvd1:1; /* Reserved */
+ /* Value of counter selected by iommu_ctl.counter_sel */
+ u16 dbg_counters:16;
+ u16 rsvd2:12; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_pcie0_ctrl {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_bar_lock:1; /* Lock VT-D BAR register */
+ u64 rsvd1:3;
+ u64 rciep:1; /* Configure PCIE0 as RCiEP */
+ u64 rsvd2:59;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr_base {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:12;
+ u64 base:20; /* SMRR2 memory range base address */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr_mask {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:11;
+ u64 valid:1; /* LLPR_SMRR rule is valid or not */
+ /*
+ * SMRR memory range mask which determines the range
+ * of region being mapped
+ */
+ u64 phys_mask:20;
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr2_base {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:12;
+ u64 base:20; /* SMRR2 memory range base address */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr2_mask {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:11;
+ u64 valid:1; /* LLPR_SMRR2 rule is valid or not */
+ /*
+ * SMRR2 memory range mask which determines the range
+ * of region being mapped
+ */
+ u64 phys_mask:20;
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_meseg_base {
+ union {
+ u64 csr;
+ struct {
+ /* A[45:19] of base address memory range */
+ u64 me_base:27;
+ u64 rsvd:37;
+ };
+ };
+};
+
+struct feature_fme_llpr_meseg_limit {
+ union {
+ u64 csr;
+ struct {
+ /* A[45:19] of limit address memory range */
+ u64 me_limit:27;
+ u64 rsvd1:4;
+ u64 enable:1; /* Enable LLPR MESEG rule */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_header {
+ struct feature_header header;
+ struct feature_afu_header afu_header;
+ u64 reserved;
+ u64 scratchpad;
+ struct feature_fme_capability capability;
+ struct feature_fme_port port[MAX_FPGA_PORT_NUM];
+ struct feature_fme_fab_status fab_status;
+ struct feature_fme_bitstream_id bitstream_id;
+ struct feature_fme_bitstream_md bitstream_md;
+ struct feature_fme_genprotrange2_base genprotrange2_base;
+ struct feature_fme_genprotrange2_limit genprotrange2_limit;
+ struct feature_fme_dxe_lock dxe_lock;
+ struct feature_fme_iommu_ctrl iommu_ctrl;
+ struct feature_fme_iommu_stat iommu_stat;
+ struct feature_fme_pcie0_ctrl pcie0_control;
+ struct feature_fme_llpr_smrr_base smrr_base;
+ struct feature_fme_llpr_smrr_mask smrr_mask;
+ struct feature_fme_llpr_smrr2_base smrr2_base;
+ struct feature_fme_llpr_smrr2_mask smrr2_mask;
+ struct feature_fme_llpr_meseg_base meseg_base;
+ struct feature_fme_llpr_meseg_limit meseg_limit;
+};
+
+struct feature_port_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 port_number:2; /* Port Number 0-3 */
+ u8 rsvd1:6; /* Reserved */
+ u16 mmio_size; /* User MMIO size in KB */
+ u8 rsvd2; /* Reserved */
+ u8 sp_intr_num:4; /* Supported interrupts num */
+ u32 rsvd3:28; /* Reserved */
+ };
+ };
+};
+
+struct feature_port_control {
+ union {
+ u64 csr;
+ struct {
+ u8 port_sftrst:1; /* Port Soft Reset */
+ u8 rsvd1:1; /* Reserved */
+ u8 latency_tolerance:1;/* '1' >= 40us, '0' < 40us */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_sftrst_ack:1; /* HW ACK for Soft Reset */
+ u64 rsvd3:59; /* Reserved */
+ };
+ };
+};
+
+#define PORT_POWER_STATE_NORMAL 0
+#define PORT_POWER_STATE_AP1 1
+#define PORT_POWER_STATE_AP2 2
+#define PORT_POWER_STATE_AP6 6
+
+struct feature_port_status {
+ union {
+ u64 csr;
+ struct {
+ u8 port_freeze:1; /* '1' - freezed '0' - normal */
+ u8 rsvd1:7; /* Reserved */
+ u8 power_state:4; /* Power State */
+ u8 ap1_event:1; /* AP1 event was detected */
+ u8 ap2_event:1; /* AP2 event was detected */
+ u64 rsvd2:50; /* Reserved */
+ };
+ };
+};
+
+/* Port Header Register Set */
+struct feature_port_header {
+ struct feature_header header;
+ struct feature_afu_header afu_header;
+ u64 port_mailbox;
+ u64 scratchpad;
+ struct feature_port_capability capability;
+ struct feature_port_control control;
+ struct feature_port_status status;
+ u64 rsvd2;
+ u64 user_clk_freq_cmd0;
+ u64 user_clk_freq_cmd1;
+ u64 user_clk_freq_sts0;
+ u64 user_clk_freq_sts1;
+};
+
+struct feature_fme_tmp_threshold {
+ union {
+ u64 csr;
+ struct {
+ u8 tmp_thshold1:7; /* temperature Threshold 1 */
+ /* temperature Threshold 1 enable/disable */
+ u8 tmp_thshold1_enable:1;
+ u8 tmp_thshold2:7; /* temperature Threshold 2 */
+ /* temperature Threshold 2 enable /disable */
+ u8 tmp_thshold2_enable:1;
+ u8 pro_hot_setpoint:7; /* Proc Hot set point */
+ u8 rsvd4:1; /* Reserved */
+ u8 therm_trip_thshold:7; /* Thermeal Trip Threshold */
+ u8 rsvd3:1; /* Reserved */
+ u8 thshold1_status:1; /* Threshold 1 Status */
+ u8 thshold2_status:1; /* Threshold 2 Status */
+ u8 rsvd5:1; /* Reserved */
+ /* Thermeal Trip Threshold status */
+ u8 therm_trip_thshold_status:1;
+ u8 rsvd6:4; /* Reserved */
+ /* Validation mode- Force Proc Hot */
+ u8 valmodeforce:1;
+ /* Validation mode - Therm trip Hot */
+ u8 valmodetherm:1;
+ u8 rsvd2:2; /* Reserved */
+ u8 thshold_policy:1; /* threshold policy */
+ u32 rsvd:19; /* Reserved */
+ };
+ };
+};
+
+/* Temperature Sensor Read values format 1 */
+struct feature_fme_temp_rdsensor_fmt1 {
+ union {
+ u64 csr;
+ struct {
+ /* Reads out FPGA temperature in celsius */
+ u8 fpga_temp:7;
+ u8 rsvd0:1; /* Reserved */
+ /* Temperature reading sequence number */
+ u16 tmp_reading_seq_num;
+ /* Temperature reading is valid */
+ u8 tmp_reading_valid:1;
+ u8 rsvd1:7; /* Reserved */
+ u16 dbg_mode:10; /* Debug mode */
+ u32 rsvd2:22; /* Reserved */
+ };
+ };
+};
+
+/* Temperature sensor read values format 2 */
+struct feature_fme_temp_rdsensor_fmt2 {
+ u64 rsvd; /* Reserved */
+};
+
+/* Temperature Threshold Capability Register */
+struct feature_fme_tmp_threshold_cap {
+ union {
+ u64 csr;
+ struct {
+ /* Temperature Threshold Unsupported */
+ u8 tmp_thshold_disabled:1;
+ u64 rsvd:63; /* Reserved */
+ };
+ };
+};
+
+/* FME THERNAL FEATURE */
+struct feature_fme_thermal {
+ struct feature_header header;
+ struct feature_fme_tmp_threshold threshold;
+ struct feature_fme_temp_rdsensor_fmt1 rdsensor_fm1;
+ struct feature_fme_temp_rdsensor_fmt2 rdsensor_fm2;
+ struct feature_fme_tmp_threshold_cap threshold_cap;
+};
+
+/* Power Status register */
+struct feature_fme_pm_status {
+ union {
+ u64 csr;
+ struct {
+ /* FPGA Power consumed, The format is to be defined */
+ u32 pwr_consumed:18;
+ /* FPGA Latency Tolerance Reporting */
+ u8 fpga_latency_report:1;
+ u64 rsvd:45; /* Reserved */
+ };
+ };
+};
+
+/* AP Thresholds */
+struct feature_fme_pm_ap_threshold {
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Number of clocks (5ns period) for assertion
+ * of FME_data
+ */
+ u8 threshold1:7;
+ u8 rsvd1:1;
+ u8 threshold2:7;
+ u8 rsvd2:1;
+ u8 threshold1_status:1;
+ u8 threshold2_status:1;
+ u64 rsvd3:46; /* Reserved */
+ };
+ };
+};
+
+/* Xeon Power Limit */
+struct feature_fme_pm_xeon_limit {
+ union {
+ u64 csr;
+ struct {
+ /* Power limit in Watts in 12.3 format */
+ u16 pwr_limit:15;
+ /* Indicates that power limit has been written */
+ u8 enable:1;
+ /* 0 - Turbe range, 1 - Entire range */
+ u8 clamping:1;
+ /* Time constant in XXYYY format */
+ u8 time:7;
+ u64 rsvd:40; /* Reserved */
+ };
+ };
+};
+
+/* FPGA Power Limit */
+struct feature_fme_pm_fpga_limit {
+ union {
+ u64 csr;
+ struct {
+ /* Power limit in Watts in 12.3 format */
+ u16 pwr_limit:15;
+ /* Indicates that power limit has been written */
+ u8 enable:1;
+ /* 0 - Turbe range, 1 - Entire range */
+ u8 clamping:1;
+ /* Time constant in XXYYY format */
+ u8 time:7;
+ u64 rsvd:40; /* Reserved */
+ };
+ };
+};
+
+/* FME POWER FEATURE */
+struct feature_fme_power {
+ struct feature_header header;
+ struct feature_fme_pm_status status;
+ struct feature_fme_pm_ap_threshold threshold;
+ struct feature_fme_pm_xeon_limit xeon_limit;
+ struct feature_fme_pm_fpga_limit fpga_limit;
+};
+
+#define CACHE_CHANNEL_RD 0
+#define CACHE_CHANNEL_WR 1
+
+enum iperf_cache_events {
+ IPERF_CACHE_RD_HIT,
+ IPERF_CACHE_WR_HIT,
+ IPERF_CACHE_RD_MISS,
+ IPERF_CACHE_WR_MISS,
+ IPERF_CACHE_RSVD, /* reserved */
+ IPERF_CACHE_HOLD_REQ,
+ IPERF_CACHE_DATA_WR_PORT_CONTEN,
+ IPERF_CACHE_TAG_WR_PORT_CONTEN,
+ IPERF_CACHE_TX_REQ_STALL,
+ IPERF_CACHE_RX_REQ_STALL,
+ IPERF_CACHE_EVICTIONS,
+};
+
+/* FPMON Cache Control */
+struct feature_fme_ifpmon_ch_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd1:7; /* Reserved */
+ u8 freeze:1; /* Freeze if set to 1 */
+ u8 rsvd2:7; /* Reserved */
+ u8 cache_event:4; /* Select the cache event */
+ u8 cci_chsel:1; /* Select the channel */
+ u64 rsvd3:43; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Cache Counter */
+struct feature_fme_ifpmon_ch_ctr {
+ union {
+ u64 csr;
+ struct {
+ /* Cache Counter for even addresse */
+ u64 cache_counter:48;
+ u16 rsvd:12; /* Reserved */
+ /* Cache Event being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+enum iperf_fab_events {
+ IPERF_FAB_PCIE0_RD,
+ IPERF_FAB_PCIE0_WR,
+ IPERF_FAB_PCIE1_RD,
+ IPERF_FAB_PCIE1_WR,
+ IPERF_FAB_UPI_RD,
+ IPERF_FAB_UPI_WR,
+ IPERF_FAB_MMIO_RD,
+ IPERF_FAB_MMIO_WR,
+};
+
+#define FAB_DISABLE_FILTER 0
+#define FAB_ENABLE_FILTER 1
+
+/* FPMON FAB Control */
+struct feature_fme_ifpmon_fab_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 fab_evtcode:4; /* Fabric Event Code */
+ u8 port_id:2; /* Port ID */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_filter:1; /* Port Filter */
+ u64 rsvd3:40; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Event Counter */
+struct feature_fme_ifpmon_fab_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 fab_cnt:60; /* Fabric event counter */
+ /* Fabric event code being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+/* FPMON Clock Counter */
+struct feature_fme_ifpmon_clk_ctr {
+ u64 afu_interf_clock; /* Clk_16UI (AFU clock) counter. */
+};
+
+enum iperf_vtd_events {
+ IPERF_VTD_AFU_MEM_RD_TRANS,
+ IPERF_VTD_AFU_MEM_WR_TRANS,
+ IPERF_VTD_AFU_DEVTLB_RD_HIT,
+ IPERF_VTD_AFU_DEVTLB_WR_HIT,
+ IPERF_VTD_DEVTLB_4K_FILL,
+ IPERF_VTD_DEVTLB_2M_FILL,
+ IPERF_VTD_DEVTLB_1G_FILL,
+};
+
+/* VT-d control register */
+struct feature_fme_ifpmon_vtd_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 vtd_evtcode:4; /* VTd and TLB event code */
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+/* VT-d event counter */
+struct feature_fme_ifpmon_vtd_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_counter:48; /* VTd event counter */
+ u16 rsvd:12; /* Reserved */
+ u8 event_code:4; /* VTd event code */
+ };
+ };
+};
+
+enum iperf_vtd_sip_events {
+ IPERF_VTD_SIP_IOTLB_4K_HIT,
+ IPERF_VTD_SIP_IOTLB_2M_HIT,
+ IPERF_VTD_SIP_IOTLB_1G_HIT,
+ IPERF_VTD_SIP_SLPWC_L3_HIT,
+ IPERF_VTD_SIP_SLPWC_L4_HIT,
+ IPERF_VTD_SIP_RCC_HIT,
+ IPERF_VTD_SIP_IOTLB_4K_MISS,
+ IPERF_VTD_SIP_IOTLB_2M_MISS,
+ IPERF_VTD_SIP_IOTLB_1G_MISS,
+ IPERF_VTD_SIP_SLPWC_L3_MISS,
+ IPERF_VTD_SIP_SLPWC_L4_MISS,
+ IPERF_VTD_SIP_RCC_MISS,
+};
+
+/* VT-d SIP control register */
+struct feature_fme_ifpmon_vtd_sip_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 vtd_evtcode:4; /* VTd and TLB event code */
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+/* VT-d SIP event counter */
+struct feature_fme_ifpmon_vtd_sip_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_counter:48; /* VTd event counter */
+ u16 rsvd:12; /* Reserved */
+ u8 event_code:4; /* VTd event code */
+ };
+ };
+};
+
+/* FME IPERF FEATURE */
+struct feature_fme_iperf {
+ struct feature_header header;
+ struct feature_fme_ifpmon_ch_ctl ch_ctl;
+ struct feature_fme_ifpmon_ch_ctr ch_ctr0;
+ struct feature_fme_ifpmon_ch_ctr ch_ctr1;
+ struct feature_fme_ifpmon_fab_ctl fab_ctl;
+ struct feature_fme_ifpmon_fab_ctr fab_ctr;
+ struct feature_fme_ifpmon_clk_ctr clk;
+ struct feature_fme_ifpmon_vtd_ctl vtd_ctl;
+ struct feature_fme_ifpmon_vtd_ctr vtd_ctr;
+ struct feature_fme_ifpmon_vtd_sip_ctl vtd_sip_ctl;
+ struct feature_fme_ifpmon_vtd_sip_ctr vtd_sip_ctr;
+};
+
+enum dperf_fab_events {
+ DPERF_FAB_PCIE0_RD,
+ DPERF_FAB_PCIE0_WR,
+ DPERF_FAB_MMIO_RD = 6,
+ DPERF_FAB_MMIO_WR,
+};
+
+/* FPMON FAB Control */
+struct feature_fme_dfpmon_fab_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 fab_evtcode:4; /* Fabric Event Code */
+ u8 port_id:2; /* Port ID */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_filter:1; /* Port Filter */
+ u64 rsvd3:40; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Event Counter */
+struct feature_fme_dfpmon_fab_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 fab_cnt:60; /* Fabric event counter */
+ /* Fabric event code being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+/* FPMON Clock Counter */
+struct feature_fme_dfpmon_clk_ctr {
+ u64 afu_interf_clock; /* Clk_16UI (AFU clock) counter. */
+};
+
+/* FME DPERF FEATURE */
+struct feature_fme_dperf {
+ struct feature_header header;
+ u64 rsvd[3];
+ struct feature_fme_dfpmon_fab_ctl fab_ctl;
+ struct feature_fme_dfpmon_fab_ctr fab_ctr;
+ struct feature_fme_dfpmon_clk_ctr clk;
+};
+
+struct feature_fme_error0 {
+#define FME_ERROR0_MASK 0xFFUL
+#define FME_ERROR0_MASK_DEFAULT 0x40UL /* pcode workaround */
+ union {
+ u64 csr;
+ struct {
+ u8 fabric_err:1; /* Fabric error */
+ u8 fabfifo_overflow:1; /* Fabric fifo overflow */
+ u8 kticdc_parity_err:2;/* KTI CDC Parity Error */
+ u8 iommu_parity_err:1; /* IOMMU Parity error */
+ /* AFU PF/VF access mismatch detected */
+ u8 afu_acc_mode_err:1;
+ u8 mbp_err:1; /* Indicates an MBP event */
+ /* PCIE0 CDC Parity Error */
+ u8 pcie0cdc_parity_err:5;
+ /* PCIE1 CDC Parity Error */
+ u8 pcie1cdc_parity_err:5;
+ /* CVL CDC Parity Error */
+ u8 cvlcdc_parity_err:3;
+ u64 rsvd:44; /* Reserved */
+ };
+ };
+};
+
+/* PCIe0 Error Status register */
+struct feature_fme_pcie0_error {
+#define FME_PCIE0_ERROR_MASK 0xFFUL
+ union {
+ u64 csr;
+ struct {
+ u8 formattype_err:1; /* TLP format/type error */
+ u8 MWAddr_err:1; /* TLP MW address error */
+ u8 MWAddrLength_err:1; /* TLP MW length error */
+ u8 MRAddr_err:1; /* TLP MR address error */
+ u8 MRAddrLength_err:1; /* TLP MR length error */
+ u8 cpl_tag_err:1; /* TLP CPL tag error */
+ u8 cpl_status_err:1; /* TLP CPL status error */
+ u8 cpl_timeout_err:1; /* TLP CPL timeout */
+ u8 cci_parity_err:1; /* CCI bridge parity error */
+ u8 rxpoison_tlp_err:1; /* Received a TLP with EP set */
+ u64 rsvd:52; /* Reserved */
+ u8 vfnumb_err:1; /* Number of error VF */
+ u8 funct_type_err:1; /* Virtual (1) or Physical */
+ };
+ };
+};
+
+/* PCIe1 Error Status register */
+struct feature_fme_pcie1_error {
+#define FME_PCIE1_ERROR_MASK 0xFFUL
+ union {
+ u64 csr;
+ struct {
+ u8 formattype_err:1; /* TLP format/type error */
+ u8 MWAddr_err:1; /* TLP MW address error */
+ u8 MWAddrLength_err:1; /* TLP MW length error */
+ u8 MRAddr_err:1; /* TLP MR address error */
+ u8 MRAddrLength_err:1; /* TLP MR length error */
+ u8 cpl_tag_err:1; /* TLP CPL tag error */
+ u8 cpl_status_err:1; /* TLP CPL status error */
+ u8 cpl_timeout_err:1; /* TLP CPL timeout */
+ u8 cci_parity_err:1; /* CCI bridge parity error */
+ u8 rxpoison_tlp_err:1; /* Received a TLP with EP set */
+ u64 rsvd:54; /* Reserved */
+ };
+ };
+};
+
+/* FME First Error register */
+struct feature_fme_first_error {
+#define FME_FIRST_ERROR_MASK ((1ULL << 60) - 1)
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Indicates the Error Register that was
+ * triggered first
+ */
+ u64 err_reg_status:60;
+ /*
+ * Holds 60 LSBs from the Error register that was
+ * triggered first
+ */
+ u8 errReg_id:4;
+ };
+ };
+};
+
+/* FME Next Error register */
+struct feature_fme_next_error {
+#define FME_NEXT_ERROR_MASK ((1ULL << 60) - 1)
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Indicates the Error Register that was
+ * triggered second
+ */
+ u64 err_reg_status:60;
+ /*
+ * Holds 60 LSBs from the Error register that was
+ * triggered second
+ */
+ u8 errReg_id:4;
+ };
+ };
+};
+
+/* RAS Non Fatal Error Status register */
+struct feature_fme_ras_nonfaterror {
+ union {
+ u64 csr;
+ struct {
+ /* thremal threshold AP1 */
+ u8 temp_thresh_ap1:1;
+ /* thremal threshold AP2 */
+ u8 temp_thresh_ap2:1;
+ u8 pcie_error:1; /* pcie Error */
+ u8 portfatal_error:1; /* port fatal error */
+ u8 proc_hot:1; /* Indicates a ProcHot event */
+ /* Indicates an AFU PF/VF access mismatch */
+ u8 afu_acc_mode_err:1;
+ /* Injected nonfata Error */
+ u8 injected_nonfata_err:1;
+ u8 rsvd1:2;
+ /* Temperature threshold triggered AP6*/
+ u8 temp_thresh_AP6:1;
+ /* Power threshold triggered AP1 */
+ u8 power_thresh_AP1:1;
+ /* Power threshold triggered AP2 */
+ u8 power_thresh_AP2:1;
+ /* Indicates a MBP event */
+ u8 mbp_err:1;
+ u64 rsvd2:51; /* Reserved */
+ };
+ };
+};
+
+/* RAS Catastrophic Fatal Error Status register */
+struct feature_fme_ras_catfaterror {
+ union {
+ u64 csr;
+ struct {
+ /* KTI Link layer error detected */
+ u8 ktilink_fatal_err:1;
+ /* tag-n-cache error detected */
+ u8 tagcch_fatal_err:1;
+ /* CCI error detected */
+ u8 cci_fatal_err:1;
+ /* KTI Protocol error detected */
+ u8 ktiprpto_fatal_err:1;
+ /* Fatal DRAM error detected */
+ u8 dram_fatal_err:1;
+ /* IOMMU detected */
+ u8 iommu_fatal_err:1;
+ /* Fabric Fatal Error */
+ u8 fabric_fatal_err:1;
+ /* PCIe possion Error */
+ u8 pcie_poison_err:1;
+ /* Injected fatal Error */
+ u8 inject_fata_err:1;
+ /* Catastrophic CRC Error */
+ u8 crc_catast_err:1;
+ /* Catastrophic Thermal Error */
+ u8 therm_catast_err:1;
+ /* Injected Catastrophic Error */
+ u8 injected_catast_err:1;
+ u64 rsvd:52;
+ };
+ };
+};
+
+/* RAS Error injection register */
+struct feature_fme_ras_error_inj {
+#define FME_RAS_ERROR_INJ_MASK 0x7UL
+ union {
+ u64 csr;
+ struct {
+ u8 catast_error:1; /* Catastrophic error flag */
+ u8 fatal_error:1; /* Fatal error flag */
+ u8 nonfatal_error:1; /* NonFatal error flag */
+ u64 rsvd:61; /* Reserved */
+ };
+ };
+};
+
+/* FME error capabilities */
+struct feature_fme_error_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 support_intr:1;
+ /* MSI-X vector table entry number */
+ u16 intr_vector_num:12;
+ u64 rsvd:51; /* Reserved */
+ };
+ };
+};
+
+/* FME ERR FEATURE */
+struct feature_fme_err {
+ struct feature_header header;
+ struct feature_fme_error0 fme_err_mask;
+ struct feature_fme_error0 fme_err;
+ struct feature_fme_pcie0_error pcie0_err_mask;
+ struct feature_fme_pcie0_error pcie0_err;
+ struct feature_fme_pcie1_error pcie1_err_mask;
+ struct feature_fme_pcie1_error pcie1_err;
+ struct feature_fme_first_error fme_first_err;
+ struct feature_fme_next_error fme_next_err;
+ struct feature_fme_ras_nonfaterror ras_nonfat_mask;
+ struct feature_fme_ras_nonfaterror ras_nonfaterr;
+ struct feature_fme_ras_catfaterror ras_catfat_mask;
+ struct feature_fme_ras_catfaterror ras_catfaterr;
+ struct feature_fme_ras_error_inj ras_error_inj;
+ struct feature_fme_error_capability fme_err_capability;
+};
+
+/* FME Partial Reconfiguration Control */
+struct feature_fme_pr_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 pr_reset:1; /* Reset PR Engine */
+ u8 rsvd3:3; /* Reserved */
+ u8 pr_reset_ack:1; /* Reset PR Engine Ack */
+ u8 rsvd4:3; /* Reserved */
+ u8 pr_regionid:2; /* PR Region ID */
+ u8 rsvd1:2; /* Reserved */
+ u8 pr_start_req:1; /* PR Start Request */
+ u8 pr_push_complete:1; /* PR Data push complete */
+ u8 pr_kind:1; /* PR Data push complete */
+ u32 rsvd:17; /* Reserved */
+ u32 config_data; /* Config data TBD */
+ };
+ };
+};
+
+/* FME Partial Reconfiguration Status */
+struct feature_fme_pr_status {
+ union {
+ u64 csr;
+ struct {
+ u16 pr_credit:9; /* PR Credits */
+ u8 rsvd2:7; /* Reserved */
+ u8 pr_status:1; /* PR status */
+ u8 rsvd:3; /* Reserved */
+ /* Altra PR Controller Block status */
+ u8 pr_controller_status:3;
+ u8 rsvd1:1; /* Reserved */
+ u8 pr_host_status:4; /* PR Host status */
+ u8 rsvd3:4; /* Reserved */
+ /* Security Block Status fields (TBD) */
+ u32 security_bstatus;
+ };
+ };
+};
+
+/* FME Partial Reconfiguration Data */
+struct feature_fme_pr_data {
+ union {
+ u64 csr; /* PR data from the raw-binary file */
+ struct {
+ /* PR data from the raw-binary file */
+ u32 pr_data_raw;
+ u32 rsvd;
+ };
+ };
+};
+
+/* FME PR Public Key */
+struct feature_fme_pr_key {
+ u64 key; /* FME PR Public Hash */
+};
+
+/* FME PR FEATURE */
+struct feature_fme_pr {
+ struct feature_header header;
+ /*Partial Reconfiguration control */
+ struct feature_fme_pr_ctl ccip_fme_pr_control;
+
+ /* Partial Reconfiguration Status */
+ struct feature_fme_pr_status ccip_fme_pr_status;
+
+ /* Partial Reconfiguration data */
+ struct feature_fme_pr_data ccip_fme_pr_data;
+
+ /* Partial Reconfiguration data */
+ u64 ccip_fme_pr_err;
+
+ u64 rsvd1[3];
+
+ /* Partial Reconfiguration data registers */
+ u64 fme_pr_data1;
+ u64 fme_pr_data2;
+ u64 fme_pr_data3;
+ u64 fme_pr_data4;
+ u64 fme_pr_data5;
+ u64 fme_pr_data6;
+ u64 fme_pr_data7;
+ u64 fme_pr_data8;
+
+ u64 rsvd2[5];
+
+ /* PR Interface ID */
+ u64 fme_pr_intfc_id_l;
+ u64 fme_pr_intfc_id_h;
+
+ /* MSIX filed to be Added */
+};
+
+/* FME HSSI Control */
+struct feature_fme_hssi_eth_ctrl {
+ union {
+ u64 csr;
+ struct {
+ u32 data:32; /* HSSI data */
+ u16 address:16; /* HSSI address */
+ /*
+ * HSSI comamnd
+ * 0x0 - No request
+ * 0x08 - SW register RD request
+ * 0x10 - SW register WR request
+ * 0x40 - Auxiliar bus RD request
+ * 0x80 - Auxiliar bus WR request
+ */
+ u16 cmd:16;
+ };
+ };
+};
+
+/* FME HSSI Status */
+struct feature_fme_hssi_eth_stat {
+ union {
+ u64 csr;
+ struct {
+ u32 data:32; /* HSSI data */
+ u8 acknowledge:1; /* HSSI acknowledge */
+ u8 spare:1; /* HSSI spare */
+ u32 rsvd:30; /* Reserved */
+ };
+ };
+};
+
+/* FME HSSI FEATURE */
+struct feature_fme_hssi {
+ struct feature_header header;
+ struct feature_fme_hssi_eth_ctrl hssi_control;
+ struct feature_fme_hssi_eth_stat hssi_status;
+};
+
+#define PORT_ERR_MASK 0xfff0703ff001f
+struct feature_port_err_key {
+ union {
+ u64 csr;
+ struct {
+ /* Tx Channel0: Overflow */
+ u8 tx_ch0_overflow:1;
+ /* Tx Channel0: Invalid request encoding */
+ u8 tx_ch0_invaldreq :1;
+ /* Tx Channel0: Request with cl_len=3 not supported */
+ u8 tx_ch0_cl_len3:1;
+ /* Tx Channel0: Request with cl_len=2 not aligned 2CL */
+ u8 tx_ch0_cl_len2:1;
+ /* Tx Channel0: Request with cl_len=4 not aligned 4CL */
+ u8 tx_ch0_cl_len4:1;
+
+ u16 rsvd1:4; /* Reserved */
+
+ /* AFU MMIO RD received while PORT is in reset */
+ u8 mmio_rd_whilerst:1;
+ /* AFU MMIO WR received while PORT is in reset */
+ u8 mmio_wr_whilerst:1;
+
+ u16 rsvd2:5; /* Reserved */
+
+ /* Tx Channel1: Overflow */
+ u8 tx_ch1_overflow:1;
+ /* Tx Channel1: Invalid request encoding */
+ u8 tx_ch1_invaldreq:1;
+ /* Tx Channel1: Request with cl_len=3 not supported */
+ u8 tx_ch1_cl_len3:1;
+ /* Tx Channel1: Request with cl_len=2 not aligned 2CL */
+ u8 tx_ch1_cl_len2:1;
+ /* Tx Channel1: Request with cl_len=4 not aligned 4CL */
+ u8 tx_ch1_cl_len4:1;
+
+ /* Tx Channel1: Insufficient data payload */
+ u8 tx_ch1_insuff_data:1;
+ /* Tx Channel1: Data payload overrun */
+ u8 tx_ch1_data_overrun:1;
+ /* Tx Channel1 : Incorrect address */
+ u8 tx_ch1_incorr_addr:1;
+ /* Tx Channel1 : NON-Zero SOP Detected */
+ u8 tx_ch1_nzsop:1;
+ /* Tx Channel1 : Illegal VC_SEL, atomic request VLO */
+ u8 tx_ch1_illegal_vcsel:1;
+
+ u8 rsvd3:6; /* Reserved */
+
+ /* MMIO Read Timeout in AFU */
+ u8 mmioread_timeout:1;
+
+ /* Tx Channel2: FIFO Overflow */
+ u8 tx_ch2_fifo_overflow:1;
+
+ /* MMIO read is not matching pending request */
+ u8 unexp_mmio_resp:1;
+
+ u8 rsvd4:5; /* Reserved */
+
+ /* Number of pending Requests: counter overflow */
+ u8 tx_req_counter_overflow:1;
+ /* Req with Address violating SMM Range */
+ u8 llpr_smrr_err:1;
+ /* Req with Address violating second SMM Range */
+ u8 llpr_smrr2_err:1;
+ /* Req with Address violating ME Stolen message */
+ u8 llpr_mesg_err:1;
+ /* Req with Address violating Generic Protected Range */
+ u8 genprot_range_err:1;
+ /* Req with Address violating Legacy Range low */
+ u8 legrange_low_err:1;
+ /* Req with Address violating Legacy Range High */
+ u8 legrange_high_err:1;
+ /* Req with Address violating VGA memory range */
+ u8 vgmem_range_err:1;
+ u8 page_fault_err:1; /* Page fault */
+ u8 pmr_err:1; /* PMR Error */
+ u8 ap6_event:1; /* AP6 event */
+ /* VF FLR detected on Port with PF access control */
+ u8 vfflr_access_err:1;
+ u16 rsvd5:12; /* Reserved */
+ };
+ };
+};
+
+/* Port first error register, not contain all error bits in error register. */
+struct feature_port_first_err_key {
+ union {
+ u64 csr;
+ struct {
+ u8 tx_ch0_overflow:1;
+ u8 tx_ch0_invaldreq :1;
+ u8 tx_ch0_cl_len3:1;
+ u8 tx_ch0_cl_len2:1;
+ u8 tx_ch0_cl_len4:1;
+ u8 rsvd1:4; /* Reserved */
+ u8 mmio_rd_whilerst:1;
+ u8 mmio_wr_whilerst:1;
+ u8 rsvd2:5; /* Reserved */
+ u8 tx_ch1_overflow:1;
+ u8 tx_ch1_invaldreq:1;
+ u8 tx_ch1_cl_len3:1;
+ u8 tx_ch1_cl_len2:1;
+ u8 tx_ch1_cl_len4:1;
+ u8 tx_ch1_insuff_data:1;
+ u8 tx_ch1_data_overrun:1;
+ u8 tx_ch1_incorr_addr:1;
+ u8 tx_ch1_nzsop:1;
+ u8 tx_ch1_illegal_vcsel:1;
+ u8 rsvd3:6; /* Reserved */
+ u8 mmioread_timeout:1;
+ u8 tx_ch2_fifo_overflow:1;
+ u8 rsvd4:6; /* Reserved */
+ u8 tx_req_counter_overflow:1;
+ u32 rsvd5:23; /* Reserved */
+ };
+ };
+};
+
+/* Port malformed Req0 */
+struct feature_port_malformed_req0 {
+ u64 header_lsb;
+};
+
+/* Port malformed Req1 */
+struct feature_port_malformed_req1 {
+ u64 header_msb;
+};
+
+/* Port debug register */
+struct feature_port_debug {
+ u64 port_debug;
+};
+
+/* Port error capabilities */
+struct feature_port_err_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 support_intr:1;
+ /* MSI-X vector table entry number */
+ u16 intr_vector_num:12;
+ u64 rsvd:51; /* Reserved */
+ };
+ };
+};
+
+/* PORT FEATURE ERROR */
+struct feature_port_error {
+ struct feature_header header;
+ struct feature_port_err_key error_mask;
+ struct feature_port_err_key port_error;
+ struct feature_port_first_err_key port_first_error;
+ struct feature_port_malformed_req0 malreq0;
+ struct feature_port_malformed_req1 malreq1;
+ struct feature_port_debug port_debug;
+ struct feature_port_err_capability error_capability;
+};
+
+/* Port UMSG Capability */
+struct feature_port_umsg_cap {
+ union {
+ u64 csr;
+ struct {
+ /* Number of umsg allocated to this port */
+ u8 umsg_allocated;
+ /* Enable / Disable UMsg engine for this port */
+ u8 umsg_enable:1;
+ /* Usmg initialization status */
+ u8 umsg_init_complete:1;
+ /* IOMMU can not translate the umsg base address */
+ u8 umsg_trans_error:1;
+ u64 rsvd:53; /* Reserved */
+ };
+ };
+};
+
+/* Port UMSG base address */
+struct feature_port_umsg_baseaddr {
+ union {
+ u64 csr;
+ struct {
+ u64 base_addr:48; /* 48 bit physical address */
+ u16 rsvd; /* Reserved */
+ };
+ };
+};
+
+struct feature_port_umsg_mode {
+ union {
+ u64 csr;
+ struct {
+ u32 umsg_hint_enable; /* UMSG hint enable/disable */
+ u32 rsvd; /* Reserved */
+ };
+ };
+};
+
+/* PORT FEATURE UMSG */
+struct feature_port_umsg {
+ struct feature_header header;
+ struct feature_port_umsg_cap capability;
+ struct feature_port_umsg_baseaddr baseaddr;
+ struct feature_port_umsg_mode mode;
+};
+
+#define UMSG_EN_POLL_INVL 10 /* us */
+#define UMSG_EN_POLL_TIMEOUT 1000 /* us */
+
+/* Port UINT Capability */
+struct feature_port_uint_cap {
+ union {
+ u64 csr;
+ struct {
+ u16 intr_num:12; /* Supported interrupts num */
+ /* First MSI-X vector table entry number */
+ u16 first_vec_num:12;
+ u64 rsvd:40;
+ };
+ };
+};
+
+/* PORT FEATURE UINT */
+struct feature_port_uint {
+ struct feature_header header;
+ struct feature_port_uint_cap capability;
+};
+
+/* STP region supports mmap operation, so use page aligned size. */
+#define PORT_FEATURE_STP_REGION_SIZE \
+ IFPGA_PAGE_ALIGN(sizeof(struct feature_port_stp))
+
+/* Port STP status register (for debug only)*/
+struct feature_port_stp_status {
+ union {
+ u64 csr;
+ struct {
+ /* SLD Hub end-point read/write timeout */
+ u8 sld_ep_timeout:1;
+ /* Remote STP in reset/disable */
+ u8 rstp_disabled:1;
+ u8 unsupported_read:1;
+ /* MMIO timeout detected and faked with a response */
+ u8 mmio_timeout:1;
+ u8 txfifo_count:4;
+ u8 rxfifo_count:4;
+ u8 txfifo_overflow:1;
+ u8 txfifo_underflow:1;
+ u8 rxfifo_overflow:1;
+ u8 rxfifo_underflow:1;
+ /* Number of MMIO write requests */
+ u16 write_requests;
+ /* Number of MMIO read requests */
+ u16 read_requests;
+ /* Number of MMIO read responses */
+ u16 read_responses;
+ };
+ };
+};
+
+/*
+ * PORT FEATURE STP
+ * Most registers in STP region are not touched by driver, but mmapped to user
+ * space. So they are not defined in below data structure, as its actual size
+ * is 0x18c per spec.
+ */
+struct feature_port_stp {
+ struct feature_header header;
+ struct feature_port_stp_status stp_status;
+};
+
+/**
+ * enum fpga_pr_states - fpga PR states
+ * @FPGA_PR_STATE_UNKNOWN: can't determine state
+ * @FPGA_PR_STATE_WRITE_INIT: preparing FPGA for programming
+ * @FPGA_PR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
+ * @FPGA_PR_STATE_WRITE: writing image to FPGA
+ * @FPGA_PR_STATE_WRITE_ERR: Error while writing FPGA
+ * @FPGA_PR_STATE_WRITE_COMPLETE: Doing post programming steps
+ * @FPGA_PR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE
+ * @FPGA_PR_STATE_OPERATING: FPGA PR done
+ */
+enum fpga_pr_states {
+ /* canot determine state states */
+ FPGA_PR_STATE_UNKNOWN,
+
+ /* write sequence: init, write, complete */
+ FPGA_PR_STATE_WRITE_INIT,
+ FPGA_PR_STATE_WRITE_INIT_ERR,
+ FPGA_PR_STATE_WRITE,
+ FPGA_PR_STATE_WRITE_ERR,
+ FPGA_PR_STATE_WRITE_COMPLETE,
+ FPGA_PR_STATE_WRITE_COMPLETE_ERR,
+
+ /* FPGA PR done */
+ FPGA_PR_STATE_DONE,
+};
+
+/*
+ * FPGA Manager flags
+ * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ */
+#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
+
+/**
+ * struct fpga_pr_info - specific information to a FPGA PR
+ * @flags: boolean flags as defined above
+ * @pr_err: PR error code
+ * @state: fpga manager state
+ * @port_id: port id
+ */
+struct fpga_pr_info {
+ u32 flags;
+ u64 pr_err;
+ enum fpga_pr_states state;
+ int port_id;
+};
+
+#define DEFINE_FPGA_PR_ERR_MSG(_name_) \
+static const char * const _name_[] = { \
+ "PR operation error detected", \
+ "PR CRC error detected", \
+ "PR incompatiable bitstream error detected", \
+ "PR IP protocol error detected", \
+ "PR FIFO overflow error detected", \
+ "PR timeout error detected", \
+ "PR secure load error detected", \
+}
+
+#define RST_POLL_INVL 10 /* us */
+#define RST_POLL_TIMEOUT 1000 /* us */
+
+#define PR_WAIT_TIMEOUT 15000000
+
+#define PR_HOST_STATUS_IDLE 0
+#define PR_MAX_ERR_NUM 7
+
+DEFINE_FPGA_PR_ERR_MSG(pr_err_msg);
+
+/*
+ * green bitstream header must be byte-packed to match the
+ * real file format.
+ */
+struct bts_header {
+ u64 guid_h;
+ u64 guid_l;
+ u32 metadata_len;
+};
+
+#define GBS_GUID_H 0x414750466e6f6558
+#define GBS_GUID_L 0x31303076534247b7
+#define is_valid_bts(bts_hdr) \
+ (((bts_hdr)->guid_h == GBS_GUID_H) && \
+ ((bts_hdr)->guid_l == GBS_GUID_L))
+
+#pragma pack(pop)
+#endif /* _BASE_IFPGA_DEFINES_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
new file mode 100644
index 00000000..f0939dc3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
@@ -0,0 +1,821 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_hw_api.h"
+#include "ifpga_api.h"
+
+#include "ifpga_hw.h"
+#include "ifpga_enumerate.h"
+#include "ifpga_feature_dev.h"
+
+struct build_feature_devs_info {
+ struct opae_adapter_data_pci *pci_data;
+
+ struct ifpga_afu_info *acc_info;
+
+ void *fiu;
+ enum fpga_id_type current_type;
+ int current_port_id;
+
+ void *ioaddr;
+ void *ioend;
+ uint64_t phys_addr;
+ int current_bar;
+
+ void *pfme_hdr;
+
+ struct ifpga_hw *hw;
+};
+
+struct feature_info {
+ const char *name;
+ u32 resource_size;
+ int feature_index;
+ int revision_id;
+ unsigned int vec_start;
+ unsigned int vec_cnt;
+
+ struct feature_ops *ops;
+};
+
+/* indexed by fme feature IDs which are defined in 'enum fme_feature_id'. */
+static struct feature_info fme_features[] = {
+ {
+ .name = FME_FEATURE_HEADER,
+ .resource_size = sizeof(struct feature_fme_header),
+ .feature_index = FME_FEATURE_ID_HEADER,
+ .revision_id = FME_HEADER_REVISION,
+ .ops = &fme_hdr_ops,
+ },
+ {
+ .name = FME_FEATURE_THERMAL_MGMT,
+ .resource_size = sizeof(struct feature_fme_thermal),
+ .feature_index = FME_FEATURE_ID_THERMAL_MGMT,
+ .revision_id = FME_THERMAL_MGMT_REVISION,
+ .ops = &fme_thermal_mgmt_ops,
+ },
+ {
+ .name = FME_FEATURE_POWER_MGMT,
+ .resource_size = sizeof(struct feature_fme_power),
+ .feature_index = FME_FEATURE_ID_POWER_MGMT,
+ .revision_id = FME_POWER_MGMT_REVISION,
+ .ops = &fme_power_mgmt_ops,
+ },
+ {
+ .name = FME_FEATURE_GLOBAL_IPERF,
+ .resource_size = sizeof(struct feature_fme_iperf),
+ .feature_index = FME_FEATURE_ID_GLOBAL_IPERF,
+ .revision_id = FME_GLOBAL_IPERF_REVISION,
+ .ops = &fme_global_iperf_ops,
+ },
+ {
+ .name = FME_FEATURE_GLOBAL_ERR,
+ .resource_size = sizeof(struct feature_fme_err),
+ .feature_index = FME_FEATURE_ID_GLOBAL_ERR,
+ .revision_id = FME_GLOBAL_ERR_REVISION,
+ .ops = &fme_global_err_ops,
+ },
+ {
+ .name = FME_FEATURE_PR_MGMT,
+ .resource_size = sizeof(struct feature_fme_pr),
+ .feature_index = FME_FEATURE_ID_PR_MGMT,
+ .revision_id = FME_PR_MGMT_REVISION,
+ .ops = &fme_pr_mgmt_ops,
+ },
+ {
+ .name = FME_FEATURE_HSSI_ETH,
+ .resource_size = sizeof(struct feature_fme_hssi),
+ .feature_index = FME_FEATURE_ID_HSSI_ETH,
+ .revision_id = FME_HSSI_ETH_REVISION
+ },
+ {
+ .name = FME_FEATURE_GLOBAL_DPERF,
+ .resource_size = sizeof(struct feature_fme_dperf),
+ .feature_index = FME_FEATURE_ID_GLOBAL_DPERF,
+ .revision_id = FME_GLOBAL_DPERF_REVISION,
+ .ops = &fme_global_dperf_ops,
+ }
+};
+
+static struct feature_info port_features[] = {
+ {
+ .name = PORT_FEATURE_HEADER,
+ .resource_size = sizeof(struct feature_port_header),
+ .feature_index = PORT_FEATURE_ID_HEADER,
+ .revision_id = PORT_HEADER_REVISION,
+ .ops = &port_hdr_ops,
+ },
+ {
+ .name = PORT_FEATURE_ERR,
+ .resource_size = sizeof(struct feature_port_error),
+ .feature_index = PORT_FEATURE_ID_ERROR,
+ .revision_id = PORT_ERR_REVISION,
+ .ops = &port_error_ops,
+ },
+ {
+ .name = PORT_FEATURE_UMSG,
+ .resource_size = sizeof(struct feature_port_umsg),
+ .feature_index = PORT_FEATURE_ID_UMSG,
+ .revision_id = PORT_UMSG_REVISION,
+ },
+ {
+ .name = PORT_FEATURE_UINT,
+ .resource_size = sizeof(struct feature_port_uint),
+ .feature_index = PORT_FEATURE_ID_UINT,
+ .revision_id = PORT_UINT_REVISION,
+ .ops = &port_uint_ops,
+ },
+ {
+ .name = PORT_FEATURE_STP,
+ .resource_size = PORT_FEATURE_STP_REGION_SIZE,
+ .feature_index = PORT_FEATURE_ID_STP,
+ .revision_id = PORT_STP_REVISION,
+ .ops = &port_stp_ops,
+ },
+ {
+ .name = PORT_FEATURE_UAFU,
+ /* UAFU feature size should be read from PORT_CAP.MMIOSIZE.
+ * Will set uafu feature size while parse port device.
+ */
+ .resource_size = 0,
+ .feature_index = PORT_FEATURE_ID_UAFU,
+ .revision_id = PORT_UAFU_REVISION
+ },
+};
+
+static u64 feature_id(void __iomem *start)
+{
+ struct feature_header header;
+
+ header.csr = readq(start);
+
+ switch (header.type) {
+ case FEATURE_TYPE_FIU:
+ return FEATURE_ID_HEADER;
+ case FEATURE_TYPE_PRIVATE:
+ return header.id;
+ case FEATURE_TYPE_AFU:
+ return FEATURE_ID_AFU;
+ }
+
+ WARN_ON(1);
+ return 0;
+}
+
+static int
+build_info_add_sub_feature(struct build_feature_devs_info *binfo,
+ struct feature_info *finfo, void __iomem *start)
+{
+ struct ifpga_hw *hw = binfo->hw;
+ struct feature *feature = NULL;
+ int feature_idx = finfo->feature_index;
+ unsigned int vec_start = finfo->vec_start;
+ unsigned int vec_cnt = finfo->vec_cnt;
+ struct feature_irq_ctx *ctx = NULL;
+ int port_id, ret = 0;
+ unsigned int i;
+
+ if (binfo->current_type == FME_ID) {
+ feature = &hw->fme.sub_feature[feature_idx];
+ feature->parent = &hw->fme;
+ } else if (binfo->current_type == PORT_ID) {
+ port_id = binfo->current_port_id;
+ feature = &hw->port[port_id].sub_feature[feature_idx];
+ feature->parent = &hw->port[port_id];
+ } else {
+ return -EFAULT;
+ }
+
+ feature->state = IFPGA_FEATURE_ATTACHED;
+ feature->addr = start;
+ feature->id = feature_id(start);
+ feature->size = finfo->resource_size;
+ feature->name = finfo->name;
+ feature->revision = finfo->revision_id;
+ feature->ops = finfo->ops;
+ feature->phys_addr = binfo->phys_addr +
+ ((u8 *)start - (u8 *)binfo->ioaddr);
+
+ if (vec_cnt) {
+ if (vec_start + vec_cnt <= vec_start)
+ return -EINVAL;
+
+ ctx = zmalloc(sizeof(*ctx) * vec_cnt);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < vec_cnt; i++) {
+ ctx[i].eventfd = -1;
+ ctx[i].idx = vec_start + i;
+ }
+ }
+
+ feature->ctx = ctx;
+ feature->ctx_num = vec_cnt;
+ feature->vfio_dev_fd = binfo->pci_data->vfio_dev_fd;
+
+ return ret;
+}
+
+static int
+create_feature_instance(struct build_feature_devs_info *binfo,
+ void __iomem *start, struct feature_info *finfo)
+{
+ struct feature_header *hdr = start;
+
+ if (finfo->revision_id != SKIP_REVISION_CHECK &&
+ hdr->revision > finfo->revision_id) {
+ dev_err(binfo, "feature %s revision :default:%x, now at:%x, mis-match.\n",
+ finfo->name, finfo->revision_id, hdr->revision);
+ }
+
+ return build_info_add_sub_feature(binfo, finfo, start);
+}
+
+/*
+ * UAFU GUID is dynamic as it can be changed after FME downloads different
+ * Green Bitstream to the port, so we treat the unknown GUIDs which are
+ * attached on port's feature list as UAFU.
+ */
+static bool feature_is_UAFU(struct build_feature_devs_info *binfo)
+{
+ if (binfo->current_type != PORT_ID)
+ return false;
+
+ return true;
+}
+
+static int parse_feature_port_uafu(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ enum port_feature_id id = PORT_FEATURE_ID_UAFU;
+ struct ifpga_afu_info *info;
+ void *start = (void *)hdr;
+ int ret;
+
+ if (port_features[id].resource_size) {
+ ret = create_feature_instance(binfo, hdr, &port_features[id]);
+ } else {
+ dev_err(binfo, "the uafu feature header is mis-configured.\n");
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* FIXME: need to figure out a better name */
+ info = malloc(sizeof(*info));
+ if (!info)
+ return -ENOMEM;
+
+ info->region[0].addr = start;
+ info->region[0].phys_addr = binfo->phys_addr +
+ (uint8_t *)start - (uint8_t *)binfo->ioaddr;
+ info->region[0].len = port_features[id].resource_size;
+ port_features[id].resource_size = 0;
+ info->num_regions = 1;
+
+ binfo->acc_info = info;
+
+ return ret;
+}
+
+static int parse_feature_afus(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ int ret;
+ struct feature_afu_header *afu_hdr, header;
+ u8 __iomem *start;
+ u8 __iomem *end = binfo->ioend;
+
+ start = (u8 __iomem *)hdr;
+ for (; start < end; start += header.next_afu) {
+ if ((unsigned int)(end - start) <
+ (unsigned int)(sizeof(*afu_hdr) + sizeof(*hdr)))
+ return -EINVAL;
+
+ hdr = (struct feature_header *)start;
+ afu_hdr = (struct feature_afu_header *)(hdr + 1);
+ header.csr = readq(&afu_hdr->csr);
+
+ if (feature_is_UAFU(binfo)) {
+ ret = parse_feature_port_uafu(binfo, hdr);
+ if (ret)
+ return ret;
+ }
+
+ if (!header.next_afu)
+ break;
+ }
+
+ return 0;
+}
+
+/* create and register proper private data */
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct ifpga_afu_info *info = binfo->acc_info;
+ struct ifpga_hw *hw = binfo->hw;
+ struct opae_manager *mgr;
+ struct opae_bridge *br;
+ struct opae_accelerator *acc;
+
+ if (!binfo->fiu)
+ return 0;
+
+ if (binfo->current_type == PORT_ID) {
+ /* return error if no valid acc info data structure */
+ if (!info)
+ return -EFAULT;
+
+ br = opae_bridge_alloc(hw->adapter->name, &ifpga_br_ops,
+ binfo->fiu);
+ if (!br)
+ return -ENOMEM;
+
+ br->id = binfo->current_port_id;
+
+ /* update irq info */
+ info->num_irqs = port_features[PORT_FEATURE_ID_UINT].vec_cnt;
+
+ acc = opae_accelerator_alloc(hw->adapter->name,
+ &ifpga_acc_ops, info);
+ if (!acc) {
+ opae_bridge_free(br);
+ return -ENOMEM;
+ }
+
+ acc->br = br;
+ acc->index = br->id;
+
+ opae_adapter_add_acc(hw->adapter, acc);
+
+ } else if (binfo->current_type == FME_ID) {
+ mgr = opae_manager_alloc(hw->adapter->name, &ifpga_mgr_ops,
+ binfo->fiu);
+ if (!mgr)
+ return -ENOMEM;
+
+ mgr->adapter = hw->adapter;
+ hw->adapter->mgr = mgr;
+ }
+
+ binfo->fiu = NULL;
+
+ return 0;
+}
+
+static int
+build_info_create_dev(struct build_feature_devs_info *binfo,
+ enum fpga_id_type type, unsigned int index)
+{
+ int ret;
+
+ ret = build_info_commit_dev(binfo);
+ if (ret)
+ return ret;
+
+ binfo->current_type = type;
+
+ if (type == FME_ID) {
+ binfo->fiu = &binfo->hw->fme;
+ } else if (type == PORT_ID) {
+ binfo->fiu = &binfo->hw->port[index];
+ binfo->current_port_id = index;
+ }
+
+ return 0;
+}
+
+static int parse_feature_fme(struct build_feature_devs_info *binfo,
+ struct feature_header *start)
+{
+ struct ifpga_hw *hw = binfo->hw;
+ struct ifpga_fme_hw *fme = &hw->fme;
+ int ret;
+
+ ret = build_info_create_dev(binfo, FME_ID, 0);
+ if (ret)
+ return ret;
+
+ /* Update FME states */
+ fme->state = IFPGA_FME_IMPLEMENTED;
+ fme->parent = hw;
+ spinlock_init(&fme->lock);
+
+ return create_feature_instance(binfo, start,
+ &fme_features[FME_FEATURE_ID_HEADER]);
+}
+
+static int parse_feature_port(struct build_feature_devs_info *binfo,
+ void __iomem *start)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+ struct ifpga_hw *hw = binfo->hw;
+ struct ifpga_port_hw *port;
+ unsigned int port_id;
+ int ret;
+
+ /* Get current port's id */
+ port_hdr = (struct feature_port_header *)start;
+ capability.csr = readq(&port_hdr->capability);
+ port_id = capability.port_number;
+
+ ret = build_info_create_dev(binfo, PORT_ID, port_id);
+ if (ret)
+ return ret;
+
+ /*found a Port device*/
+ port = &hw->port[port_id];
+ port->port_id = binfo->current_port_id;
+ port->parent = hw;
+ port->state = IFPGA_PORT_ATTACHED;
+ spinlock_init(&port->lock);
+
+ return create_feature_instance(binfo, start,
+ &port_features[PORT_FEATURE_ID_HEADER]);
+}
+
+static void enable_port_uafu(struct build_feature_devs_info *binfo,
+ void __iomem *start)
+{
+ enum port_feature_id id = PORT_FEATURE_ID_UAFU;
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+ struct ifpga_port_hw *port = &binfo->hw->port[binfo->current_port_id];
+
+ port_hdr = (struct feature_port_header *)start;
+ capability.csr = readq(&port_hdr->capability);
+ port_features[id].resource_size = (capability.mmio_size << 10);
+
+ /*
+ * From spec, to Enable UAFU, we should reset related port,
+ * or the whole mmio space in this UAFU will be invalid
+ */
+ if (port_features[id].resource_size)
+ fpga_port_reset(port);
+}
+
+static int parse_feature_fiu(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ struct feature_fiu_header *fiu_hdr, fiu_header;
+ u8 __iomem *start = (u8 __iomem *)hdr;
+ int ret;
+
+ header.csr = readq(hdr);
+
+ switch (header.id) {
+ case FEATURE_FIU_ID_FME:
+ ret = parse_feature_fme(binfo, hdr);
+ binfo->pfme_hdr = hdr;
+ if (ret)
+ return ret;
+ break;
+ case FEATURE_FIU_ID_PORT:
+ ret = parse_feature_port(binfo, hdr);
+ enable_port_uafu(binfo, hdr);
+ if (ret)
+ return ret;
+
+ /* Check Port FIU's next_afu pointer to User AFU DFH */
+ fiu_hdr = (struct feature_fiu_header *)(hdr + 1);
+ fiu_header.csr = readq(&fiu_hdr->csr);
+
+ if (fiu_header.next_afu) {
+ start += fiu_header.next_afu;
+ ret = parse_feature_afus(binfo,
+ (struct feature_header *)start);
+ if (ret)
+ return ret;
+ } else {
+ dev_info(binfo, "No AFUs detected on Port\n");
+ }
+
+ break;
+ default:
+ dev_info(binfo, "FIU TYPE %d is not supported yet.\n",
+ header.id);
+ }
+
+ return 0;
+}
+
+static void parse_feature_irqs(struct build_feature_devs_info *binfo,
+ void __iomem *start, struct feature_info *finfo)
+{
+ finfo->vec_start = 0;
+ finfo->vec_cnt = 0;
+
+ UNUSED(binfo);
+
+ if (!strcmp(finfo->name, PORT_FEATURE_UINT)) {
+ struct feature_port_uint *port_uint = start;
+ struct feature_port_uint_cap uint_cap;
+
+ uint_cap.csr = readq(&port_uint->capability);
+ if (uint_cap.intr_num) {
+ finfo->vec_start = uint_cap.first_vec_num;
+ finfo->vec_cnt = uint_cap.intr_num;
+ } else {
+ dev_debug(binfo, "UAFU doesn't support interrupt\n");
+ }
+ } else if (!strcmp(finfo->name, PORT_FEATURE_ERR)) {
+ struct feature_port_error *port_err = start;
+ struct feature_port_err_capability port_err_cap;
+
+ port_err_cap.csr = readq(&port_err->error_capability);
+ if (port_err_cap.support_intr) {
+ finfo->vec_start = port_err_cap.intr_vector_num;
+ finfo->vec_cnt = 1;
+ } else {
+ dev_debug(&binfo, "Port error doesn't support interrupt\n");
+ }
+
+ } else if (!strcmp(finfo->name, FME_FEATURE_GLOBAL_ERR)) {
+ struct feature_fme_err *fme_err = start;
+ struct feature_fme_error_capability fme_err_cap;
+
+ fme_err_cap.csr = readq(&fme_err->fme_err_capability);
+ if (fme_err_cap.support_intr) {
+ finfo->vec_start = fme_err_cap.intr_vector_num;
+ finfo->vec_cnt = 1;
+ } else {
+ dev_debug(&binfo, "FME error doesn't support interrupt\n");
+ }
+ }
+}
+
+static int parse_feature_fme_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+
+ header.csr = readq(hdr);
+
+ if (header.id >= ARRAY_SIZE(fme_features)) {
+ dev_err(binfo, "FME feature id %x is not supported yet.\n",
+ header.id);
+ return 0;
+ }
+
+ parse_feature_irqs(binfo, hdr, &fme_features[header.id]);
+
+ return create_feature_instance(binfo, hdr, &fme_features[header.id]);
+}
+
+static int parse_feature_port_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ enum port_feature_id id;
+
+ header.csr = readq(hdr);
+ /*
+ * the region of port feature id is [0x10, 0x13], + 1 to reserve 0
+ * which is dedicated for port-hdr.
+ */
+ id = (header.id & 0x000f) + 1;
+
+ if (id >= ARRAY_SIZE(port_features)) {
+ dev_err(binfo, "Port feature id %x is not supported yet.\n",
+ header.id);
+ return 0;
+ }
+
+ parse_feature_irqs(binfo, hdr, &port_features[id]);
+
+ return create_feature_instance(binfo, hdr, &port_features[id]);
+}
+
+static int parse_feature_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+
+ header.csr = readq(hdr);
+
+ switch (binfo->current_type) {
+ case FME_ID:
+ return parse_feature_fme_private(binfo, hdr);
+ case PORT_ID:
+ return parse_feature_port_private(binfo, hdr);
+ default:
+ dev_err(binfo, "private feature %x belonging to AFU %d (unknown_type) is not supported yet.\n",
+ header.id, binfo->current_type);
+ }
+ return 0;
+}
+
+static int parse_feature(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ int ret = 0;
+
+ header.csr = readq(hdr);
+
+ switch (header.type) {
+ case FEATURE_TYPE_AFU:
+ ret = parse_feature_afus(binfo, hdr);
+ break;
+ case FEATURE_TYPE_PRIVATE:
+ ret = parse_feature_private(binfo, hdr);
+ break;
+ case FEATURE_TYPE_FIU:
+ ret = parse_feature_fiu(binfo, hdr);
+ break;
+ default:
+ dev_err(binfo, "Feature Type %x is not supported.\n",
+ hdr->type);
+ };
+
+ return ret;
+}
+
+static int
+parse_feature_list(struct build_feature_devs_info *binfo, u8 __iomem *start)
+{
+ struct feature_header *hdr, header;
+ u8 __iomem *end = (u8 __iomem *)binfo->ioend;
+ int ret = 0;
+
+ for (; start < end; start += header.next_header_offset) {
+ if ((unsigned int)(end - start) < (unsigned int)sizeof(*hdr)) {
+ dev_err(binfo, "The region is too small to contain a feature.\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ hdr = (struct feature_header *)start;
+ ret = parse_feature(binfo, hdr);
+ if (ret)
+ return ret;
+
+ header.csr = readq(hdr);
+ if (!header.next_header_offset)
+ break;
+ }
+
+ return build_info_commit_dev(binfo);
+}
+
+/* switch the memory mapping to BAR# @bar */
+static int parse_switch_to(struct build_feature_devs_info *binfo, int bar)
+{
+ struct opae_adapter_data_pci *pci_data = binfo->pci_data;
+
+ if (!pci_data->region[bar].addr)
+ return -ENOMEM;
+
+ binfo->ioaddr = pci_data->region[bar].addr;
+ binfo->ioend = (u8 __iomem *)binfo->ioaddr + pci_data->region[bar].len;
+ binfo->phys_addr = pci_data->region[bar].phys_addr;
+ binfo->current_bar = bar;
+
+ return 0;
+}
+
+static int parse_ports_from_fme(struct build_feature_devs_info *binfo)
+{
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_port port;
+ int i = 0, ret = 0;
+
+ if (!binfo->pfme_hdr) {
+ dev_info(binfo, "VF is detected.\n");
+ return ret;
+ }
+
+ fme_hdr = binfo->pfme_hdr;
+
+ do {
+ port.csr = readq(&fme_hdr->port[i]);
+ if (!port.port_implemented)
+ break;
+
+ /* skip port which only could be accessed via VF */
+ if (port.afu_access_control == FME_AFU_ACCESS_VF)
+ continue;
+
+ ret = parse_switch_to(binfo, port.port_bar);
+ if (ret)
+ break;
+
+ ret = parse_feature_list(binfo,
+ (u8 __iomem *)binfo->ioaddr +
+ port.port_offset);
+ if (ret)
+ break;
+ } while (++i < MAX_FPGA_PORT_NUM);
+
+ return ret;
+}
+
+static struct build_feature_devs_info *
+build_info_alloc_and_init(struct ifpga_hw *hw)
+{
+ struct build_feature_devs_info *binfo;
+
+ binfo = zmalloc(sizeof(*binfo));
+ if (!binfo)
+ return binfo;
+
+ binfo->hw = hw;
+ binfo->pci_data = hw->pci_data;
+
+ /* fpga feature list starts from BAR 0 */
+ if (parse_switch_to(binfo, 0)) {
+ free(binfo);
+ return NULL;
+ }
+
+ return binfo;
+}
+
+static void build_info_free(struct build_feature_devs_info *binfo)
+{
+ free(binfo);
+}
+
+static void ifpga_print_device_feature_list(struct ifpga_hw *hw)
+{
+ struct ifpga_fme_hw *fme = &hw->fme;
+ struct ifpga_port_hw *port;
+ struct feature *feature;
+ int i, j;
+
+ dev_info(hw, "found fme_device, is in PF: %s\n",
+ is_ifpga_hw_pf(hw) ? "yes" : "no");
+
+ for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+ feature = &fme->sub_feature[i];
+ if (feature->state != IFPGA_FEATURE_ATTACHED)
+ continue;
+
+ dev_info(hw, "%12s: 0x%p - 0x%p - paddr: 0x%lx\n",
+ feature->name, feature->addr,
+ feature->addr + feature->size - 1,
+ (unsigned long)feature->phys_addr);
+ }
+
+ for (i = 0; i < MAX_FPGA_PORT_NUM; i++) {
+ port = &hw->port[i];
+
+ if (port->state != IFPGA_PORT_ATTACHED)
+ continue;
+
+ dev_info(hw, "port device: %d\n", port->port_id);
+
+ for (j = 0; j < PORT_FEATURE_ID_MAX; j++) {
+ feature = &port->sub_feature[j];
+ if (feature->state != IFPGA_FEATURE_ATTACHED)
+ continue;
+
+ dev_info(hw, "%12s: 0x%p - 0x%p - paddr:0x%lx\n",
+ feature->name,
+ feature->addr,
+ feature->addr +
+ feature->size - 1,
+ (unsigned long)feature->phys_addr);
+ }
+ }
+}
+
+int ifpga_bus_enumerate(struct ifpga_hw *hw)
+{
+ struct build_feature_devs_info *binfo;
+ int ret;
+
+ binfo = build_info_alloc_and_init(hw);
+ if (!binfo)
+ return -ENOMEM;
+
+ ret = parse_feature_list(binfo, binfo->ioaddr);
+ if (ret)
+ goto exit;
+
+ ret = parse_ports_from_fme(binfo);
+ if (ret)
+ goto exit;
+
+ ifpga_print_device_feature_list(hw);
+
+exit:
+ build_info_free(binfo);
+ return ret;
+}
+
+int ifpga_bus_init(struct ifpga_hw *hw)
+{
+ int i;
+
+ fme_hw_init(&hw->fme);
+ for (i = 0; i < MAX_FPGA_PORT_NUM; i++)
+ port_hw_init(&hw->port[i]);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h
new file mode 100644
index 00000000..14131e32
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_ENUMERATE_H_
+#define _IFPGA_ENUMERATE_H_
+
+int ifpga_bus_init(struct ifpga_hw *hw);
+int ifpga_bus_enumerate(struct ifpga_hw *hw);
+
+#endif /* _IFPGA_ENUMERATE_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c
new file mode 100644
index 00000000..be7ac9ee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <sys/ioctl.h>
+
+#include "ifpga_feature_dev.h"
+
+/*
+ * Enable Port by clear the port soft reset bit, which is set by default.
+ * The AFU is unable to respond to any MMIO access while in reset.
+ * __fpga_port_enable function should only be used after __fpga_port_disable
+ * function.
+ */
+void __fpga_port_enable(struct ifpga_port_hw *port)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ WARN_ON(!port->disable_count);
+
+ if (--port->disable_count != 0)
+ return;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ WARN_ON(!port_hdr);
+
+ control.csr = readq(&port_hdr->control);
+ control.port_sftrst = 0x0;
+ writeq(control.csr, &port_hdr->control);
+}
+
+int __fpga_port_disable(struct ifpga_port_hw *port)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ if (port->disable_count++ != 0)
+ return 0;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ WARN_ON(!port_hdr);
+
+ /* Set port soft reset */
+ control.csr = readq(&port_hdr->control);
+ control.port_sftrst = 0x1;
+ writeq(control.csr, &port_hdr->control);
+
+ /*
+ * HW sets ack bit to 1 when all outstanding requests have been drained
+ * on this port and minimum soft reset pulse width has elapsed.
+ * Driver polls port_soft_reset_ack to determine if reset done by HW.
+ */
+ control.port_sftrst_ack = 1;
+
+ if (fpga_wait_register_field(port_sftrst_ack, control,
+ &port_hdr->control, RST_POLL_TIMEOUT,
+ RST_POLL_INVL)) {
+ dev_err(port, "timeout, fail to reset device\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
+{
+ struct feature_port_header *port_hdr;
+ u64 guidl, guidh;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
+
+ spinlock_lock(&port->lock);
+ guidl = readq(&port_hdr->afu_header.guid.b[0]);
+ guidh = readq(&port_hdr->afu_header.guid.b[8]);
+ spinlock_unlock(&port->lock);
+
+ memcpy(uuid->b, &guidl, sizeof(u64));
+ memcpy(uuid->b + 8, &guidh, sizeof(u64));
+
+ return 0;
+}
+
+/* Mask / Unmask Port Errors by the Error Mask register. */
+void port_err_mask(struct ifpga_port_hw *port, bool mask)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key err_mask;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ if (mask)
+ err_mask.csr = PORT_ERR_MASK;
+ else
+ err_mask.csr = 0;
+
+ writeq(err_mask.csr, &port_err->error_mask);
+}
+
+/* Clear All Port Errors. */
+int port_err_clear(struct ifpga_port_hw *port, u64 err)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_error *port_err;
+ struct feature_port_err_key mask;
+ struct feature_port_first_err_key first;
+ struct feature_port_status status;
+ int ret = 0;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ /*
+ * Clear All Port Errors
+ *
+ * - Check for AP6 State
+ * - Halt Port by keeping Port in reset
+ * - Set PORT Error mask to all 1 to mask errors
+ * - Clear all errors
+ * - Set Port mask to all 0 to enable errors
+ * - All errors start capturing new errors
+ * - Enable Port by pulling the port out of reset
+ */
+
+ /* If device is still in AP6 state, can not clear any error.*/
+ status.csr = readq(&port_hdr->status);
+ if (status.power_state == PORT_POWER_STATE_AP6) {
+ dev_err(dev, "Could not clear errors, device in AP6 state.\n");
+ return -EBUSY;
+ }
+
+ /* Halt Port by keeping Port in reset */
+ ret = __fpga_port_disable(port);
+ if (ret)
+ return ret;
+
+ /* Mask all errors */
+ port_err_mask(port, true);
+
+ /* Clear errors if err input matches with current port errors.*/
+ mask.csr = readq(&port_err->port_error);
+
+ if (mask.csr == err) {
+ writeq(mask.csr, &port_err->port_error);
+
+ first.csr = readq(&port_err->port_first_error);
+ writeq(first.csr, &port_err->port_first_error);
+ } else {
+ ret = -EBUSY;
+ }
+
+ /* Clear mask */
+ port_err_mask(port, false);
+
+ /* Enable the Port by clear the reset */
+ __fpga_port_enable(port);
+
+ return ret;
+}
+
+int port_clear_error(struct ifpga_port_hw *port)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ error.csr = readq(&port_err->port_error);
+
+ dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
+
+ return port_err_clear(port, error.csr);
+}
+
+void fme_hw_uinit(struct ifpga_fme_hw *fme)
+{
+ struct feature *feature;
+ int i;
+
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return;
+
+ for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+ feature = &fme->sub_feature[i];
+ if (feature->state == IFPGA_FEATURE_ATTACHED &&
+ feature->ops && feature->ops->uinit)
+ feature->ops->uinit(feature);
+ }
+}
+
+int fme_hw_init(struct ifpga_fme_hw *fme)
+{
+ struct feature *feature;
+ int i, ret;
+
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return -EINVAL;
+
+ for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+ feature = &fme->sub_feature[i];
+ if (feature->state == IFPGA_FEATURE_ATTACHED &&
+ feature->ops && feature->ops->init) {
+ ret = feature->ops->init(feature);
+ if (ret) {
+ fme_hw_uinit(fme);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void port_hw_uinit(struct ifpga_port_hw *port)
+{
+ struct feature *feature;
+ int i;
+
+ for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
+ feature = &port->sub_feature[i];
+ if (feature->state == IFPGA_FEATURE_ATTACHED &&
+ feature->ops && feature->ops->uinit)
+ feature->ops->uinit(feature);
+ }
+}
+
+int port_hw_init(struct ifpga_port_hw *port)
+{
+ struct feature *feature;
+ int i, ret;
+
+ if (port->state == IFPGA_PORT_UNUSED)
+ return 0;
+
+ for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
+ feature = &port->sub_feature[i];
+ if (feature->ops && feature->ops->init) {
+ ret = feature->ops->init(feature);
+ if (ret) {
+ port_hw_uinit(port);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
new file mode 100644
index 00000000..7a39a580
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_FEATURE_DEV_H_
+#define _IFPGA_FEATURE_DEV_H_
+
+#include "ifpga_hw.h"
+
+static inline struct ifpga_port_hw *
+get_port(struct ifpga_hw *hw, u32 port_id)
+{
+ if (!is_valid_port_id(hw, port_id))
+ return NULL;
+
+ return &hw->port[port_id];
+}
+
+#define ifpga_for_each_fme_feature(hw, feature) \
+ for ((feature) = (hw)->sub_feature; \
+ (feature) < (hw)->sub_feature + (FME_FEATURE_ID_MAX); (feature)++)
+
+#define ifpga_for_each_port_feature(hw, feature) \
+ for ((feature) = (hw)->sub_feature; \
+ (feature) < (hw)->sub_feature + (PORT_FEATURE_ID_MAX); (feature)++)
+
+static inline struct feature *
+get_fme_feature_by_id(struct ifpga_fme_hw *fme, u64 id)
+{
+ struct feature *feature;
+
+ ifpga_for_each_fme_feature(fme, feature) {
+ if (feature->id == id)
+ return feature;
+ }
+
+ return NULL;
+}
+
+static inline struct feature *
+get_port_feature_by_id(struct ifpga_port_hw *port, u64 id)
+{
+ struct feature *feature;
+
+ ifpga_for_each_port_feature(port, feature) {
+ if (feature->id == id)
+ return feature;
+ }
+
+ return NULL;
+}
+
+static inline void *
+get_fme_feature_ioaddr_by_index(struct ifpga_fme_hw *fme, int index)
+{
+ return fme->sub_feature[index].addr;
+}
+
+static inline void *
+get_port_feature_ioaddr_by_index(struct ifpga_port_hw *port, int index)
+{
+ return port->sub_feature[index].addr;
+}
+
+static inline bool
+is_fme_feature_present(struct ifpga_fme_hw *fme, int index)
+{
+ return !!get_fme_feature_ioaddr_by_index(fme, index);
+}
+
+static inline bool
+is_port_feature_present(struct ifpga_port_hw *port, int index)
+{
+ return !!get_port_feature_ioaddr_by_index(port, index);
+}
+
+int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid);
+
+int __fpga_port_disable(struct ifpga_port_hw *port);
+void __fpga_port_enable(struct ifpga_port_hw *port);
+
+static inline int fpga_port_disable(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = __fpga_port_disable(port);
+ spinlock_unlock(&port->lock);
+ return ret;
+}
+
+static inline int fpga_port_enable(struct ifpga_port_hw *port)
+{
+ spinlock_lock(&port->lock);
+ __fpga_port_enable(port);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static inline int __fpga_port_reset(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ ret = __fpga_port_disable(port);
+ if (ret)
+ return ret;
+
+ __fpga_port_enable(port);
+
+ return 0;
+}
+
+static inline int fpga_port_reset(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = __fpga_port_reset(port);
+ spinlock_unlock(&port->lock);
+ return ret;
+}
+
+int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status);
+
+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set);
+
+int fme_hw_init(struct ifpga_fme_hw *fme);
+void fme_hw_uinit(struct ifpga_fme_hw *fme);
+void port_hw_uinit(struct ifpga_port_hw *port);
+int port_hw_init(struct ifpga_port_hw *port);
+int port_clear_error(struct ifpga_port_hw *port);
+void port_err_mask(struct ifpga_port_hw *port, bool mask);
+int port_err_clear(struct ifpga_port_hw *port, u64 err);
+
+extern struct feature_ops fme_hdr_ops;
+extern struct feature_ops fme_thermal_mgmt_ops;
+extern struct feature_ops fme_power_mgmt_ops;
+extern struct feature_ops fme_global_err_ops;
+extern struct feature_ops fme_pr_mgmt_ops;
+extern struct feature_ops fme_global_iperf_ops;
+extern struct feature_ops fme_global_dperf_ops;
+
+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
+
+/* This struct is used when parsing uafu irq_set */
+struct fpga_uafu_irq_set {
+ u32 start;
+ u32 count;
+ s32 *evtfds;
+};
+
+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set);
+
+extern struct feature_ops port_hdr_ops;
+extern struct feature_ops port_error_ops;
+extern struct feature_ops port_stp_ops;
+extern struct feature_ops port_uint_ops;
+
+/* help functions for feature ops */
+int fpga_msix_set_block(struct feature *feature, unsigned int start,
+ unsigned int count, s32 *fds);
+
+#endif /* _IFPGA_FEATURE_DEV_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme.c
new file mode 100644
index 00000000..4be60c0c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme.c
@@ -0,0 +1,734 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PWR_THRESHOLD_MAX 0x7F
+
+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->get_prop)
+ return feature->ops->get_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->set_prop)
+ return feature->ops->set_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
+{
+ struct feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, feature_id);
+
+ if (feature && feature->ops && feature->ops->set_irq)
+ return feature->ops->set_irq(feature, irq_set);
+
+ return -ENOENT;
+}
+
+/* fme private feature head */
+static int fme_hdr_init(struct feature *feature)
+{
+ struct feature_fme_header *fme_hdr;
+
+ fme_hdr = (struct feature_fme_header *)feature->addr;
+
+ dev_info(NULL, "FME HDR Init.\n");
+ dev_info(NULL, "FME cap %llx.\n",
+ (unsigned long long)fme_hdr->capability.csr);
+
+ return 0;
+}
+
+static void fme_hdr_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME HDR UInit.\n");
+}
+
+static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_header header;
+
+ header.csr = readq(&fme_hdr->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *ports_num = fme_capability.num_ports;
+
+ return 0;
+}
+
+static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *cache_size = fme_capability.cache_size;
+
+ return 0;
+}
+
+static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *version = fme_capability.fabric_verid;
+
+ return 0;
+}
+
+static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *socket_id = fme_capability.socket_id;
+
+ return 0;
+}
+
+static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
+ u64 *bitstream_id)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ *bitstream_id = readq(&fme_hdr->bitstream_id);
+
+ return 0;
+}
+
+static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
+ u64 *bitstream_metadata)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ *bitstream_metadata = readq(&fme_hdr->bitstream_md);
+
+ return 0;
+}
+
+static int
+fme_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_HDR_PROP_REVISION:
+ return fme_hdr_get_revision(fme, &prop->data);
+ case FME_HDR_PROP_PORTS_NUM:
+ return fme_hdr_get_ports_num(fme, &prop->data);
+ case FME_HDR_PROP_CACHE_SIZE:
+ return fme_hdr_get_cache_size(fme, &prop->data);
+ case FME_HDR_PROP_VERSION:
+ return fme_hdr_get_version(fme, &prop->data);
+ case FME_HDR_PROP_SOCKET_ID:
+ return fme_hdr_get_socket_id(fme, &prop->data);
+ case FME_HDR_PROP_BITSTREAM_ID:
+ return fme_hdr_get_bitstream_id(fme, &prop->data);
+ case FME_HDR_PROP_BITSTREAM_METADATA:
+ return fme_hdr_get_bitstream_metadata(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_hdr_ops = {
+ .init = fme_hdr_init,
+ .uinit = fme_hdr_uinit,
+ .get_prop = fme_hdr_get_prop,
+};
+
+/* thermal management */
+static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1 = temp_threshold.tmp_thshold1;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_tmp_threshold tmp_threshold;
+ struct feature_fme_capability fme_capability;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+ fme_capability.csr = readq(&fme_hdr->capability);
+
+ if (fme_capability.lock_bit == 1) {
+ spinlock_unlock(&fme->lock);
+ return -EBUSY;
+ } else if (thres1 > 100) {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ } else if (thres1 == 0) {
+ tmp_threshold.tmp_thshold1_enable = 0;
+ tmp_threshold.tmp_thshold1 = thres1;
+ } else {
+ tmp_threshold.tmp_thshold1_enable = 1;
+ tmp_threshold.tmp_thshold1 = thres1;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres2 = temp_threshold.tmp_thshold2;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_tmp_threshold tmp_threshold;
+ struct feature_fme_capability fme_capability;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+ fme_capability.csr = readq(&fme_hdr->capability);
+
+ if (fme_capability.lock_bit == 1) {
+ spinlock_unlock(&fme->lock);
+ return -EBUSY;
+ } else if (thres2 > 100) {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ } else if (thres2 == 0) {
+ tmp_threshold.tmp_thshold2_enable = 0;
+ tmp_threshold.tmp_thshold2 = thres2;
+ } else {
+ tmp_threshold.tmp_thshold2_enable = 1;
+ tmp_threshold.tmp_thshold2 = thres2;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
+ u64 *thres_trip)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres_trip = temp_threshold.therm_trip_thshold;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
+ u64 *thres1_reached)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_reached = temp_threshold.thshold1_status;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
+ u64 *thres1_reached)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_reached = temp_threshold.thshold2_status;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
+ u64 *thres1_policy)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_policy = temp_threshold.thshold_policy;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
+ u64 thres1_policy)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold tmp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+
+ if (thres1_policy == 0) {
+ tmp_threshold.thshold_policy = 0;
+ } else if (thres1_policy == 1) {
+ tmp_threshold.thshold_policy = 1;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
+ *temp = temp_rdsensor_fmt1.fpga_temp;
+
+ return 0;
+}
+
+static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_thermal *fme_thermal
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ struct feature_header header;
+
+ header.csr = readq(&fme_thermal->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+#define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
+
+static int fme_thermal_mgmt_init(struct feature *feature)
+{
+ struct feature_fme_thermal *fme_thermal;
+ struct feature_fme_tmp_threshold_cap thermal_cap;
+
+ UNUSED(feature);
+
+ dev_info(NULL, "FME thermal mgmt Init.\n");
+
+ fme_thermal = (struct feature_fme_thermal *)feature->addr;
+ thermal_cap.csr = readq(&fme_thermal->threshold_cap);
+
+ dev_info(NULL, "FME thermal cap %llx.\n",
+ (unsigned long long)fme_thermal->threshold_cap.csr);
+
+ if (thermal_cap.tmp_thshold_disabled)
+ feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
+
+ return 0;
+}
+
+static void fme_thermal_mgmt_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME thermal mgmt UInit.\n");
+}
+
+static int
+fme_thermal_set_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
+ return -ENOENT;
+
+ switch (prop->prop_id) {
+ case FME_THERMAL_PROP_THRESHOLD1:
+ return fme_thermal_set_threshold1(fme, prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2:
+ return fme_thermal_set_threshold2(fme, prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_POLICY:
+ return fme_thermal_set_threshold1_policy(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int
+fme_thermal_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
+ prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
+ prop->prop_id != FME_THERMAL_PROP_REVISION)
+ return -ENOENT;
+
+ switch (prop->prop_id) {
+ case FME_THERMAL_PROP_THRESHOLD1:
+ return fme_thermal_get_threshold1(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2:
+ return fme_thermal_get_threshold2(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD_TRIP:
+ return fme_thermal_get_threshold_trip(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_REACHED:
+ return fme_thermal_get_threshold1_reached(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2_REACHED:
+ return fme_thermal_get_threshold2_reached(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_POLICY:
+ return fme_thermal_get_threshold1_policy(fme, &prop->data);
+ case FME_THERMAL_PROP_TEMPERATURE:
+ return fme_thermal_get_temperature(fme, &prop->data);
+ case FME_THERMAL_PROP_REVISION:
+ return fme_thermal_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_thermal_mgmt_ops = {
+ .init = fme_thermal_mgmt_init,
+ .uinit = fme_thermal_mgmt_uinit,
+ .get_prop = fme_thermal_get_prop,
+ .set_prop = fme_thermal_set_prop,
+};
+
+static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_status pm_status;
+
+ pm_status.csr = readq(&fme_power->status);
+
+ *consumed = pm_status.pwr_consumed;
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold = pm_ap_threshold.threshold1;
+
+ return 0;
+}
+
+static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ spinlock_lock(&fme->lock);
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ if (threshold <= PWR_THRESHOLD_MAX) {
+ pm_ap_threshold.threshold1 = threshold;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(pm_ap_threshold.csr, &fme_power->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold = pm_ap_threshold.threshold2;
+
+ return 0;
+}
+
+static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ spinlock_lock(&fme->lock);
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ if (threshold <= PWR_THRESHOLD_MAX) {
+ pm_ap_threshold.threshold2 = threshold;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(pm_ap_threshold.csr, &fme_power->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
+ u64 *threshold_status)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold_status = pm_ap_threshold.threshold1_status;
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
+ u64 *threshold_status)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold_status = pm_ap_threshold.threshold2_status;
+
+ return 0;
+}
+
+static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_status pm_status;
+
+ pm_status.csr = readq(&fme_power->status);
+
+ *rtl = pm_status.fpga_latency_report;
+
+ return 0;
+}
+
+static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_xeon_limit xeon_limit;
+
+ xeon_limit.csr = readq(&fme_power->xeon_limit);
+
+ if (!xeon_limit.enable)
+ xeon_limit.pwr_limit = 0;
+
+ *limit = xeon_limit.pwr_limit;
+
+ return 0;
+}
+
+static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_fpga_limit fpga_limit;
+
+ fpga_limit.csr = readq(&fme_power->fpga_limit);
+
+ if (!fpga_limit.enable)
+ fpga_limit.pwr_limit = 0;
+
+ *limit = fpga_limit.pwr_limit;
+
+ return 0;
+}
+
+static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_header header;
+
+ header.csr = readq(&fme_power->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_power_mgmt_init(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME power mgmt Init.\n");
+
+ return 0;
+}
+
+static void fme_power_mgmt_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME power mgmt UInit.\n");
+}
+
+static int fme_power_mgmt_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_PWR_PROP_CONSUMED:
+ return fme_pwr_get_consumed(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD1:
+ return fme_pwr_get_threshold1(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD2:
+ return fme_pwr_get_threshold2(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD1_STATUS:
+ return fme_pwr_get_threshold1_status(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD2_STATUS:
+ return fme_pwr_get_threshold2_status(fme, &prop->data);
+ case FME_PWR_PROP_RTL:
+ return fme_pwr_get_rtl(fme, &prop->data);
+ case FME_PWR_PROP_XEON_LIMIT:
+ return fme_pwr_get_xeon_limit(fme, &prop->data);
+ case FME_PWR_PROP_FPGA_LIMIT:
+ return fme_pwr_get_fpga_limit(fme, &prop->data);
+ case FME_PWR_PROP_REVISION:
+ return fme_pwr_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_power_mgmt_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_PWR_PROP_THRESHOLD1:
+ return fme_pwr_set_threshold1(fme, prop->data);
+ case FME_PWR_PROP_THRESHOLD2:
+ return fme_pwr_set_threshold2(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_power_mgmt_ops = {
+ .init = fme_power_mgmt_init,
+ .uinit = fme_power_mgmt_uinit,
+ .get_prop = fme_power_mgmt_get_prop,
+ .set_prop = fme_power_mgmt_set_prop,
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c
new file mode 100644
index 00000000..1773b87e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PERF_OBJ_ROOT_ID 0xff
+
+static int fme_dperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_clk_ctr clk;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ clk.afu_interf_clock = readq(&dperf->clk);
+
+ *clock = clk.afu_interf_clock;
+ return 0;
+}
+
+static int fme_dperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_header header;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ header.csr = readq(&dperf->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+#define DPERF_TIMEOUT 30
+
+static bool fabric_pobj_is_enabled(int port_id,
+ struct feature_fme_dperf *dperf)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+
+ ctl.csr = readq(&dperf->fab_ctl);
+
+ if (ctl.port_filter == FAB_DISABLE_FILTER)
+ return port_id == PERF_OBJ_ROOT_ID;
+
+ return port_id == ctl.port_id;
+}
+
+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum dperf_fab_events fab_event)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ struct feature_fme_dfpmon_fab_ctr ctr;
+ struct feature_fme_dperf *dperf;
+ u64 counter = 0;
+
+ spinlock_lock(&fme->lock);
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ /* if it is disabled, force the counter to return zero. */
+ if (!fabric_pobj_is_enabled(port_id, dperf))
+ goto exit;
+
+ ctl.csr = readq(&dperf->fab_ctl);
+ ctl.fab_evtcode = fab_event;
+ writeq(ctl.csr, &dperf->fab_ctl);
+
+ ctr.event_code = fab_event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &dperf->fab_ctr, DPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&dperf->fab_ctr);
+ counter = ctr.fab_cnt;
+exit:
+ spinlock_unlock(&fme->lock);
+ return counter;
+}
+
+#define FAB_PORT_SHOW(name, event) \
+static int fme_dperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_fabric_counter(fme, port_id, event); \
+ return 0; \
+}
+
+FAB_PORT_SHOW(pcie0_read, DPERF_FAB_PCIE0_RD);
+FAB_PORT_SHOW(pcie0_write, DPERF_FAB_PCIE0_WR);
+FAB_PORT_SHOW(mmio_read, DPERF_FAB_MMIO_RD);
+FAB_PORT_SHOW(mmio_write, DPERF_FAB_MMIO_WR);
+
+static int fme_dperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 *enable)
+{
+ struct feature_fme_dperf *dperf;
+ int status;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ status = fabric_pobj_is_enabled(port_id, dperf);
+ *enable = (u64)status;
+
+ return 0;
+}
+
+/*
+ * If enable one port or all port event counter in fabric, other
+ * fabric event counter originally enabled will be disable automatically.
+ */
+static int fme_dperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 enable)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ struct feature_fme_dperf *dperf;
+ bool state;
+
+ state = !!enable;
+
+ if (!state)
+ return -EINVAL;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ /* if it is already enabled. */
+ if (fabric_pobj_is_enabled(port_id, dperf))
+ return 0;
+
+ spinlock_lock(&fme->lock);
+ ctl.csr = readq(&dperf->fab_ctl);
+ if (port_id == PERF_OBJ_ROOT_ID) {
+ ctl.port_filter = FAB_DISABLE_FILTER;
+ } else {
+ ctl.port_filter = FAB_ENABLE_FILTER;
+ ctl.port_id = port_id;
+ }
+
+ writeq(ctl.csr, &dperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_dperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_fab_ctl ctl;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ ctl.csr = readq(&dperf->fab_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_dperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ ctl.csr = readq(&dperf->fab_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &dperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define PERF_MAX_PORT_NUM 1
+
+static int fme_global_dperf_init(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_dperf Init.\n");
+
+ return 0;
+}
+
+static void fme_global_dperf_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_dperf UInit.\n");
+}
+
+static int fme_dperf_fab_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_dperf_get_fab_freeze(fme, &prop->data);
+ case 0x2: /* PCIE0_READ */
+ return fme_dperf_get_fab_port_pcie0_read(fme, sub, &prop->data);
+ case 0x3: /* PCIE0_WRITE */
+ return fme_dperf_get_fab_port_pcie0_write(fme, sub,
+ &prop->data);
+ case 0x4: /* MMIO_READ */
+ return fme_dperf_get_fab_port_mmio_read(fme, sub, &prop->data);
+ case 0x5: /* MMIO_WRITE */
+ return fme_dperf_get_fab_port_mmio_write(fme, sub, &prop->data);
+ case 0x6: /* ENABLE */
+ return fme_dperf_get_fab_port_enable(fme, sub, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_dperf_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* CLOCK */
+ return fme_dperf_get_clock(fme, &prop->data);
+ case 0x2: /* REVISION */
+ return fme_dperf_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_dperf_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_FAB:
+ return fme_dperf_fab_get_prop(feature, prop);
+ case PERF_PROP_TOP_UNUSED:
+ return fme_dperf_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_dperf_fab_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE - fab root only prop */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_dperf_set_fab_freeze(fme, prop->data);
+ case 0x6: /* ENABLE - fab both root and sub */
+ return fme_dperf_set_fab_port_enable(fme, sub, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_dperf_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_FAB:
+ return fme_dperf_fab_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_global_dperf_ops = {
+ .init = fme_global_dperf_init,
+ .uinit = fme_global_dperf_uinit,
+ .get_prop = fme_global_dperf_get_prop,
+ .set_prop = fme_global_dperf_set_prop,
+
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c
new file mode 100644
index 00000000..8c26fb25
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_error0 fme_error0;
+
+ fme_error0.csr = readq(&fme_err->fme_err);
+ *val = fme_error0.csr;
+
+ return 0;
+}
+
+static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_first_error fme_first_err;
+
+ fme_first_err.csr = readq(&fme_err->fme_first_err);
+ *val = fme_first_err.err_reg_status;
+
+ return 0;
+}
+
+static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_next_error fme_next_err;
+
+ fme_next_err.csr = readq(&fme_err->fme_next_err);
+ *val = fme_next_err.err_reg_status;
+
+ return 0;
+}
+
+static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_error0 fme_error0;
+ struct feature_fme_first_error fme_first_err;
+ struct feature_fme_next_error fme_next_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_ERROR0_MASK, &fme_err->fme_err_mask);
+
+ fme_error0.csr = readq(&fme_err->fme_err);
+ if (val != fme_error0.csr) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ fme_first_err.csr = readq(&fme_err->fme_first_err);
+ fme_next_err.csr = readq(&fme_err->fme_next_err);
+
+ writeq(fme_error0.csr & FME_ERROR0_MASK, &fme_err->fme_err);
+ writeq(fme_first_err.csr & FME_FIRST_ERROR_MASK,
+ &fme_err->fme_first_err);
+ writeq(fme_next_err.csr & FME_NEXT_ERROR_MASK,
+ &fme_err->fme_next_err);
+
+exit:
+ writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_header header;
+
+ header.csr = readq(&fme_err->header);
+ *val = header.revision;
+
+ return 0;
+}
+
+static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie0_error pcie0_err;
+
+ pcie0_err.csr = readq(&fme_err->pcie0_err);
+ *val = pcie0_err.csr;
+
+ return 0;
+}
+
+static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie0_error pcie0_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
+
+ pcie0_err.csr = readq(&fme_err->pcie0_err);
+ if (val != pcie0_err.csr)
+ ret = -EBUSY;
+ else
+ writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
+ &fme_err->pcie0_err);
+
+ writeq(0UL, &fme_err->pcie0_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie1_error pcie1_err;
+
+ pcie1_err.csr = readq(&fme_err->pcie1_err);
+ *val = pcie1_err.csr;
+
+ return 0;
+}
+
+static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie1_error pcie1_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
+
+ pcie1_err.csr = readq(&fme_err->pcie1_err);
+ if (val != pcie1_err.csr)
+ ret = -EBUSY;
+ else
+ writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
+ &fme_err->pcie1_err);
+
+ writeq(0UL, &fme_err->pcie1_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_nonfaterror ras_nonfaterr;
+
+ ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
+ *val = ras_nonfaterr.csr;
+
+ return 0;
+}
+
+static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_catfaterror ras_catfaterr;
+
+ ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
+ *val = ras_catfaterr.csr;
+
+ return 0;
+}
+
+static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_error_inj ras_error_inj;
+
+ ras_error_inj.csr = readq(&fme_err->ras_error_inj);
+ *val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
+
+ return 0;
+}
+
+static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_error_inj ras_error_inj;
+
+ spinlock_lock(&fme->lock);
+ ras_error_inj.csr = readq(&fme_err->ras_error_inj);
+
+ if (val <= FME_RAS_ERROR_INJ_MASK) {
+ ras_error_inj.csr = val;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static void fme_error_enable(struct ifpga_fme_hw *fme)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+
+ writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
+ writeq(0UL, &fme_err->pcie0_err_mask);
+ writeq(0UL, &fme_err->pcie1_err_mask);
+ writeq(0UL, &fme_err->ras_nonfat_mask);
+ writeq(0UL, &fme_err->ras_catfat_mask);
+}
+
+static int fme_global_error_init(struct feature *feature)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ fme_error_enable(fme);
+
+ if (feature->ctx_num)
+ fme->capability |= FPGA_FME_CAP_ERR_IRQ;
+
+ return 0;
+}
+
+static void fme_global_error_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+}
+
+static int fme_err_fme_err_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* ERRORS */
+ return fme_err_get_errors(fme, &prop->data);
+ case 0x2: /* FIRST_ERROR */
+ return fme_err_get_first_error(fme, &prop->data);
+ case 0x3: /* NEXT_ERROR */
+ return fme_err_get_next_error(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x5: /* REVISION */
+ return fme_err_get_revision(fme, &prop->data);
+ case 0x6: /* PCIE0_ERRORS */
+ return fme_err_get_pcie0_errors(fme, &prop->data);
+ case 0x7: /* PCIE1_ERRORS */
+ return fme_err_get_pcie1_errors(fme, &prop->data);
+ case 0x8: /* NONFATAL_ERRORS */
+ return fme_err_get_nonfatal_errors(fme, &prop->data);
+ case 0x9: /* CATFATAL_ERRORS */
+ return fme_err_get_catfatal_errors(fme, &prop->data);
+ case 0xa: /* INJECT_ERRORS */
+ return fme_err_get_inject_errors(fme, &prop->data);
+ case 0xb: /* REVISION*/
+ return fme_err_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_error_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ /* PROP_SUB is never used */
+ if (sub != PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (top) {
+ case ERR_PROP_TOP_FME_ERR:
+ return fme_err_fme_err_get_prop(feature, prop);
+ case ERR_PROP_TOP_UNUSED:
+ return fme_err_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_fme_err_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x4: /* CLEAR */
+ return fme_err_set_clear(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_root_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x6: /* PCIE0_ERRORS */
+ return fme_err_set_pcie0_errors(fme, prop->data);
+ case 0x7: /* PCIE1_ERRORS */
+ return fme_err_set_pcie1_errors(fme, prop->data);
+ case 0xa: /* INJECT_ERRORS */
+ return fme_err_set_inject_errors(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_error_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ /* PROP_SUB is never used */
+ if (sub != PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (top) {
+ case ERR_PROP_TOP_FME_ERR:
+ return fme_err_fme_err_set_prop(feature, prop);
+ case ERR_PROP_TOP_UNUSED:
+ return fme_err_root_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_global_err_ops = {
+ .init = fme_global_error_init,
+ .uinit = fme_global_error_uinit,
+ .get_prop = fme_global_error_get_prop,
+ .set_prop = fme_global_error_set_prop,
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c
new file mode 100644
index 00000000..e6c40a19
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c
@@ -0,0 +1,715 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PERF_OBJ_ROOT_ID 0xff
+
+static int fme_iperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_clk_ctr clk;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ clk.afu_interf_clock = readq(&iperf->clk);
+
+ *clock = clk.afu_interf_clock;
+ return 0;
+}
+
+static int fme_iperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_header header;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ header.csr = readq(&iperf->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_iperf_get_cache_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->ch_ctl);
+ *freeze = (u64)ctl.freeze;
+ return 0;
+}
+
+static int fme_iperf_set_cache_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->ch_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->ch_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define IPERF_TIMEOUT 30
+
+static u64 read_cache_counter(struct ifpga_fme_hw *fme,
+ u8 channel, enum iperf_cache_events event)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+ struct feature_fme_ifpmon_ch_ctr ctr0, ctr1;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* set channel access type and cache event code. */
+ ctl.csr = readq(&iperf->ch_ctl);
+ ctl.cci_chsel = channel;
+ ctl.cache_event = event;
+ writeq(ctl.csr, &iperf->ch_ctl);
+
+ /* check the event type in the counter registers */
+ ctr0.event_code = event;
+
+ if (fpga_wait_register_field(event_code, ctr0,
+ &iperf->ch_ctr0, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched cache event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr0.csr = readq(&iperf->ch_ctr0);
+ ctr1.csr = readq(&iperf->ch_ctr1);
+ counter = ctr0.cache_counter + ctr1.cache_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define CACHE_SHOW(name, type, event) \
+static int fme_iperf_get_cache_##name(struct ifpga_fme_hw *fme, \
+ u64 *counter) \
+{ \
+ *counter = read_cache_counter(fme, type, event); \
+ return 0; \
+}
+
+CACHE_SHOW(read_hit, CACHE_CHANNEL_RD, IPERF_CACHE_RD_HIT);
+CACHE_SHOW(read_miss, CACHE_CHANNEL_RD, IPERF_CACHE_RD_MISS);
+CACHE_SHOW(write_hit, CACHE_CHANNEL_WR, IPERF_CACHE_WR_HIT);
+CACHE_SHOW(write_miss, CACHE_CHANNEL_WR, IPERF_CACHE_WR_MISS);
+CACHE_SHOW(hold_request, CACHE_CHANNEL_RD, IPERF_CACHE_HOLD_REQ);
+CACHE_SHOW(tx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_TX_REQ_STALL);
+CACHE_SHOW(rx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_RX_REQ_STALL);
+CACHE_SHOW(rx_eviction, CACHE_CHANNEL_RD, IPERF_CACHE_EVICTIONS);
+CACHE_SHOW(data_write_port_contention, CACHE_CHANNEL_WR,
+ IPERF_CACHE_DATA_WR_PORT_CONTEN);
+CACHE_SHOW(tag_write_port_contention, CACHE_CHANNEL_WR,
+ IPERF_CACHE_TAG_WR_PORT_CONTEN);
+
+static int fme_iperf_get_vtd_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_iperf *iperf;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_iperf_set_vtd_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_iperf *iperf;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->vtd_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static u64 read_iommu_sip_counter(struct ifpga_fme_hw *fme,
+ enum iperf_vtd_sip_events event)
+{
+ struct feature_fme_ifpmon_vtd_sip_ctl sip_ctl;
+ struct feature_fme_ifpmon_vtd_sip_ctr sip_ctr;
+ struct feature_fme_iperf *iperf;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ sip_ctl.csr = readq(&iperf->vtd_sip_ctl);
+ sip_ctl.vtd_evtcode = event;
+ writeq(sip_ctl.csr, &iperf->vtd_sip_ctl);
+
+ sip_ctr.event_code = event;
+
+ if (fpga_wait_register_field(event_code, sip_ctr,
+ &iperf->vtd_sip_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd SIP event type in counter registers\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ sip_ctr.csr = readq(&iperf->vtd_sip_ctr);
+ counter = sip_ctr.vtd_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define VTD_SIP_SHOW(name, event) \
+static int fme_iperf_get_vtd_sip_##name(struct ifpga_fme_hw *fme, \
+ u64 *counter) \
+{ \
+ *counter = read_iommu_sip_counter(fme, event); \
+ return 0; \
+}
+
+VTD_SIP_SHOW(iotlb_4k_hit, IPERF_VTD_SIP_IOTLB_4K_HIT);
+VTD_SIP_SHOW(iotlb_2m_hit, IPERF_VTD_SIP_IOTLB_2M_HIT);
+VTD_SIP_SHOW(iotlb_1g_hit, IPERF_VTD_SIP_IOTLB_1G_HIT);
+VTD_SIP_SHOW(slpwc_l3_hit, IPERF_VTD_SIP_SLPWC_L3_HIT);
+VTD_SIP_SHOW(slpwc_l4_hit, IPERF_VTD_SIP_SLPWC_L4_HIT);
+VTD_SIP_SHOW(rcc_hit, IPERF_VTD_SIP_RCC_HIT);
+VTD_SIP_SHOW(iotlb_4k_miss, IPERF_VTD_SIP_IOTLB_4K_MISS);
+VTD_SIP_SHOW(iotlb_2m_miss, IPERF_VTD_SIP_IOTLB_2M_MISS);
+VTD_SIP_SHOW(iotlb_1g_miss, IPERF_VTD_SIP_IOTLB_1G_MISS);
+VTD_SIP_SHOW(slpwc_l3_miss, IPERF_VTD_SIP_SLPWC_L3_MISS);
+VTD_SIP_SHOW(slpwc_l4_miss, IPERF_VTD_SIP_SLPWC_L4_MISS);
+VTD_SIP_SHOW(rcc_miss, IPERF_VTD_SIP_RCC_MISS);
+
+static u64 read_iommu_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum iperf_vtd_events base_event)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_ifpmon_vtd_ctr ctr;
+ struct feature_fme_iperf *iperf;
+ enum iperf_vtd_events event = base_event + port_id;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ ctl.vtd_evtcode = event;
+ writeq(ctl.csr, &iperf->vtd_ctl);
+
+ ctr.event_code = event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &iperf->vtd_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&iperf->vtd_ctr);
+ counter = ctr.vtd_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define VTD_PORT_SHOW(name, base_event) \
+static int fme_iperf_get_vtd_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_iommu_counter(fme, port_id, base_event); \
+ return 0; \
+}
+
+VTD_PORT_SHOW(read_transaction, IPERF_VTD_AFU_MEM_RD_TRANS);
+VTD_PORT_SHOW(write_transaction, IPERF_VTD_AFU_MEM_WR_TRANS);
+VTD_PORT_SHOW(devtlb_read_hit, IPERF_VTD_AFU_DEVTLB_RD_HIT);
+VTD_PORT_SHOW(devtlb_write_hit, IPERF_VTD_AFU_DEVTLB_WR_HIT);
+VTD_PORT_SHOW(devtlb_4k_fill, IPERF_VTD_DEVTLB_4K_FILL);
+VTD_PORT_SHOW(devtlb_2m_fill, IPERF_VTD_DEVTLB_2M_FILL);
+VTD_PORT_SHOW(devtlb_1g_fill, IPERF_VTD_DEVTLB_1G_FILL);
+
+static bool fabric_pobj_is_enabled(u8 port_id, struct feature_fme_iperf *iperf)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+
+ ctl.csr = readq(&iperf->fab_ctl);
+
+ if (ctl.port_filter == FAB_DISABLE_FILTER)
+ return port_id == PERF_OBJ_ROOT_ID;
+
+ return port_id == ctl.port_id;
+}
+
+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum iperf_fab_events fab_event)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ struct feature_fme_ifpmon_fab_ctr ctr;
+ struct feature_fme_iperf *iperf;
+ u64 counter = 0;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* if it is disabled, force the counter to return zero. */
+ if (!fabric_pobj_is_enabled(port_id, iperf))
+ goto exit;
+
+ ctl.csr = readq(&iperf->fab_ctl);
+ ctl.fab_evtcode = fab_event;
+ writeq(ctl.csr, &iperf->fab_ctl);
+
+ ctr.event_code = fab_event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &iperf->fab_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&iperf->fab_ctr);
+ counter = ctr.fab_cnt;
+exit:
+ spinlock_unlock(&fme->lock);
+ return counter;
+}
+
+#define FAB_PORT_SHOW(name, event) \
+static int fme_iperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_fabric_counter(fme, port_id, event); \
+ return 0; \
+}
+
+FAB_PORT_SHOW(pcie0_read, IPERF_FAB_PCIE0_RD);
+FAB_PORT_SHOW(pcie0_write, IPERF_FAB_PCIE0_WR);
+FAB_PORT_SHOW(pcie1_read, IPERF_FAB_PCIE1_RD);
+FAB_PORT_SHOW(pcie1_write, IPERF_FAB_PCIE1_WR);
+FAB_PORT_SHOW(upi_read, IPERF_FAB_UPI_RD);
+FAB_PORT_SHOW(upi_write, IPERF_FAB_UPI_WR);
+FAB_PORT_SHOW(mmio_read, IPERF_FAB_MMIO_RD);
+FAB_PORT_SHOW(mmio_write, IPERF_FAB_MMIO_WR);
+
+static int fme_iperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 *enable)
+{
+ struct feature_fme_iperf *iperf;
+ int status;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ status = fabric_pobj_is_enabled(port_id, iperf);
+ *enable = (u64)status;
+
+ return 0;
+}
+
+/*
+ * If enable one port or all port event counter in fabric, other
+ * fabric event counter originally enabled will be disable automatically.
+ */
+static int fme_iperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 enable)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ struct feature_fme_iperf *iperf;
+ bool state;
+
+ state = !!enable;
+
+ if (!state)
+ return -EINVAL;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* if it is already enabled. */
+ if (fabric_pobj_is_enabled(port_id, iperf))
+ return 0;
+
+ spinlock_lock(&fme->lock);
+ ctl.csr = readq(&iperf->fab_ctl);
+ if (port_id == PERF_OBJ_ROOT_ID) {
+ ctl.port_filter = FAB_DISABLE_FILTER;
+ } else {
+ ctl.port_filter = FAB_ENABLE_FILTER;
+ ctl.port_id = port_id;
+ }
+
+ writeq(ctl.csr, &iperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_iperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_fab_ctl ctl;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->fab_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_iperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->fab_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define PERF_MAX_PORT_NUM 1
+#define FME_IPERF_CAP_IOMMU 0x1
+
+static int fme_global_iperf_init(struct feature *feature)
+{
+ struct ifpga_fme_hw *fme;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_capability fme_capability;
+
+ dev_info(NULL, "FME global_iperf Init.\n");
+
+ fme = (struct ifpga_fme_hw *)feature->parent;
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ /* check if iommu is not supported on this device. */
+ fme_capability.csr = readq(&fme_hdr->capability);
+ dev_info(NULL, "FME HEAD fme_capability %llx.\n",
+ (unsigned long long)fme_hdr->capability.csr);
+
+ if (fme_capability.iommu_support)
+ feature->cap |= FME_IPERF_CAP_IOMMU;
+
+ return 0;
+}
+
+static void fme_global_iperf_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_iperf UInit.\n");
+}
+
+static int fme_iperf_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* CLOCK */
+ return fme_iperf_get_clock(fme, &prop->data);
+ case 0x2: /* REVISION */
+ return fme_iperf_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_cache_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_iperf_get_cache_freeze(fme, &prop->data);
+ case 0x2: /* READ_HIT */
+ return fme_iperf_get_cache_read_hit(fme, &prop->data);
+ case 0x3: /* READ_MISS */
+ return fme_iperf_get_cache_read_miss(fme, &prop->data);
+ case 0x4: /* WRITE_HIT */
+ return fme_iperf_get_cache_write_hit(fme, &prop->data);
+ case 0x5: /* WRITE_MISS */
+ return fme_iperf_get_cache_write_miss(fme, &prop->data);
+ case 0x6: /* HOLD_REQUEST */
+ return fme_iperf_get_cache_hold_request(fme, &prop->data);
+ case 0x7: /* TX_REQ_STALL */
+ return fme_iperf_get_cache_tx_req_stall(fme, &prop->data);
+ case 0x8: /* RX_REQ_STALL */
+ return fme_iperf_get_cache_rx_req_stall(fme, &prop->data);
+ case 0x9: /* RX_EVICTION */
+ return fme_iperf_get_cache_rx_eviction(fme, &prop->data);
+ case 0xa: /* DATA_WRITE_PORT_CONTENTION */
+ return fme_iperf_get_cache_data_write_port_contention(fme,
+ &prop->data);
+ case 0xb: /* TAG_WRITE_PORT_CONTENTION */
+ return fme_iperf_get_cache_tag_write_port_contention(fme,
+ &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_iperf_get_vtd_freeze(fme, &prop->data);
+ case 0x2: /* IOTLB_4K_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_4k_hit(fme, &prop->data);
+ case 0x3: /* IOTLB_2M_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_2m_hit(fme, &prop->data);
+ case 0x4: /* IOTLB_1G_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_1g_hit(fme, &prop->data);
+ case 0x5: /* SLPWC_L3_HIT */
+ return fme_iperf_get_vtd_sip_slpwc_l3_hit(fme, &prop->data);
+ case 0x6: /* SLPWC_L4_HIT */
+ return fme_iperf_get_vtd_sip_slpwc_l4_hit(fme, &prop->data);
+ case 0x7: /* RCC_HIT */
+ return fme_iperf_get_vtd_sip_rcc_hit(fme, &prop->data);
+ case 0x8: /* IOTLB_4K_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_4k_miss(fme, &prop->data);
+ case 0x9: /* IOTLB_2M_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_2m_miss(fme, &prop->data);
+ case 0xa: /* IOTLB_1G_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_1g_miss(fme, &prop->data);
+ case 0xb: /* SLPWC_L3_MISS */
+ return fme_iperf_get_vtd_sip_slpwc_l3_miss(fme, &prop->data);
+ case 0xc: /* SLPWC_L4_MISS */
+ return fme_iperf_get_vtd_sip_slpwc_l4_miss(fme, &prop->data);
+ case 0xd: /* RCC_MISS */
+ return fme_iperf_get_vtd_sip_rcc_miss(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_sub_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ if (sub > PERF_MAX_PORT_NUM)
+ return -ENOENT;
+
+ switch (id) {
+ case 0xe: /* READ_TRANSACTION */
+ return fme_iperf_get_vtd_port_read_transaction(fme, sub,
+ &prop->data);
+ case 0xf: /* WRITE_TRANSACTION */
+ return fme_iperf_get_vtd_port_write_transaction(fme, sub,
+ &prop->data);
+ case 0x10: /* DEVTLB_READ_HIT */
+ return fme_iperf_get_vtd_port_devtlb_read_hit(fme, sub,
+ &prop->data);
+ case 0x11: /* DEVTLB_WRITE_HIT */
+ return fme_iperf_get_vtd_port_devtlb_write_hit(fme, sub,
+ &prop->data);
+ case 0x12: /* DEVTLB_4K_FILL */
+ return fme_iperf_get_vtd_port_devtlb_4k_fill(fme, sub,
+ &prop->data);
+ case 0x13: /* DEVTLB_2M_FILL */
+ return fme_iperf_get_vtd_port_devtlb_2m_fill(fme, sub,
+ &prop->data);
+ case 0x14: /* DEVTLB_1G_FILL */
+ return fme_iperf_get_vtd_port_devtlb_1g_fill(fme, sub,
+ &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED)
+ return fme_iperf_vtd_root_get_prop(feature, prop);
+
+ return fme_iperf_vtd_sub_get_prop(feature, prop);
+}
+
+static int fme_iperf_fab_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ /* Other properties are present for both top and sub levels */
+ switch (id) {
+ case 0x1: /* FREEZE */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_iperf_get_fab_freeze(fme, &prop->data);
+ case 0x2: /* PCIE0_READ */
+ return fme_iperf_get_fab_port_pcie0_read(fme, sub,
+ &prop->data);
+ case 0x3: /* PCIE0_WRITE */
+ return fme_iperf_get_fab_port_pcie0_write(fme, sub,
+ &prop->data);
+ case 0x4: /* PCIE1_READ */
+ return fme_iperf_get_fab_port_pcie1_read(fme, sub,
+ &prop->data);
+ case 0x5: /* PCIE1_WRITE */
+ return fme_iperf_get_fab_port_pcie1_write(fme, sub,
+ &prop->data);
+ case 0x6: /* UPI_READ */
+ return fme_iperf_get_fab_port_upi_read(fme, sub,
+ &prop->data);
+ case 0x7: /* UPI_WRITE */
+ return fme_iperf_get_fab_port_upi_write(fme, sub,
+ &prop->data);
+ case 0x8: /* MMIO_READ */
+ return fme_iperf_get_fab_port_mmio_read(fme, sub,
+ &prop->data);
+ case 0x9: /* MMIO_WRITE */
+ return fme_iperf_get_fab_port_mmio_write(fme, sub,
+ &prop->data);
+ case 0xa: /* ENABLE */
+ return fme_iperf_get_fab_port_enable(fme, sub, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_iperf_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_CACHE:
+ return fme_iperf_cache_get_prop(feature, prop);
+ case PERF_PROP_TOP_VTD:
+ return fme_iperf_vtd_get_prop(feature, prop);
+ case PERF_PROP_TOP_FAB:
+ return fme_iperf_fab_get_prop(feature, prop);
+ case PERF_PROP_TOP_UNUSED:
+ return fme_iperf_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_cache_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
+ return fme_iperf_set_cache_freeze(fme, prop->data);
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
+ return fme_iperf_set_vtd_freeze(fme, prop->data);
+
+ return -ENOENT;
+}
+
+static int fme_iperf_fab_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_iperf_set_fab_freeze(fme, prop->data);
+ case 0xa: /* ENABLE */
+ return fme_iperf_set_fab_port_enable(fme, sub, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_iperf_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_CACHE:
+ return fme_iperf_cache_set_prop(feature, prop);
+ case PERF_PROP_TOP_VTD:
+ return fme_iperf_vtd_set_prop(feature, prop);
+ case PERF_PROP_TOP_FAB:
+ return fme_iperf_fab_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_global_iperf_ops = {
+ .init = fme_global_iperf_init,
+ .uinit = fme_global_iperf_uinit,
+ .get_prop = fme_global_iperf_get_prop,
+ .set_prop = fme_global_iperf_set_prop,
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c
new file mode 100644
index 00000000..ec0beeb1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static u64
+pr_err_handle(struct feature_fme_pr *fme_pr)
+{
+ struct feature_fme_pr_status fme_pr_status;
+ unsigned long err_code;
+ u64 fme_pr_error;
+ int i;
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ if (!fme_pr_status.pr_status)
+ return 0;
+
+ err_code = readq(&fme_pr->ccip_fme_pr_err);
+ fme_pr_error = err_code;
+
+ for (i = 0; i < PR_MAX_ERR_NUM; i++) {
+ if (err_code & (1 << i))
+ dev_info(NULL, "%s\n", pr_err_msg[i]);
+ }
+
+ writeq(fme_pr_error, &fme_pr->ccip_fme_pr_err);
+ return fme_pr_error;
+}
+
+static int fme_pr_write_init(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+ struct feature_fme_pr_status fme_pr_status;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+ if (!fme_pr)
+ return -EINVAL;
+
+ if (info->flags != FPGA_MGR_PARTIAL_RECONFIG)
+ return -EINVAL;
+
+ dev_info(fme_dev, "resetting PR before initiated PR\n");
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_reset = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ fme_pr_ctl.pr_reset_ack = 1;
+
+ if (fpga_wait_register_field(pr_reset_ack, fme_pr_ctl,
+ &fme_pr->ccip_fme_pr_control,
+ PR_WAIT_TIMEOUT, 1)) {
+ dev_err(fme_dev, "maximum PR timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_reset = 0;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "waiting for PR resource in HW to be initialized and ready\n");
+
+ fme_pr_status.pr_host_status = PR_HOST_STATUS_IDLE;
+
+ if (fpga_wait_register_field(pr_host_status, fme_pr_status,
+ &fme_pr->ccip_fme_pr_status,
+ PR_WAIT_TIMEOUT, 1)) {
+ dev_err(fme_dev, "maximum PR timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_info(fme_dev, "check if have any previous PR error\n");
+ pr_err_handle(fme_pr);
+ return 0;
+}
+
+static int fme_pr_write(struct ifpga_fme_hw *fme_dev,
+ int port_id, const char *buf, size_t count,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+ struct feature_fme_pr_status fme_pr_status;
+ struct feature_fme_pr_data fme_pr_data;
+ int delay, pr_credit;
+ int ret = 0;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+ if (!fme_pr)
+ return -EINVAL;
+
+ dev_info(fme_dev, "set PR port ID and start request\n");
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_regionid = port_id;
+ fme_pr_ctl.pr_start_req = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "pushing data from bitstream to HW\n");
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ pr_credit = fme_pr_status.pr_credit;
+
+ while (count > 0) {
+ delay = 0;
+ while (pr_credit <= 1) {
+ if (delay++ > PR_WAIT_TIMEOUT) {
+ dev_err(fme_dev, "maximum try\n");
+
+ info->pr_err = pr_err_handle(fme_pr);
+ return info->pr_err ? -EIO : -ETIMEDOUT;
+ }
+ udelay(1);
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ pr_credit = fme_pr_status.pr_credit;
+ };
+
+ if (count >= fme_dev->pr_bandwidth) {
+ switch (fme_dev->pr_bandwidth) {
+ case 4:
+ fme_pr_data.rsvd = 0;
+ fme_pr_data.pr_data_raw = *((const u32 *)buf);
+ writeq(fme_pr_data.csr,
+ &fme_pr->ccip_fme_pr_data);
+ break;
+ default:
+ ret = -EFAULT;
+ goto done;
+ }
+
+ buf += fme_dev->pr_bandwidth;
+ count -= fme_dev->pr_bandwidth;
+ pr_credit--;
+ } else {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
+static int fme_pr_write_complete(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_push_complete = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "green bitstream push complete\n");
+ dev_info(fme_dev, "waiting for HW to release PR resource\n");
+
+ fme_pr_ctl.pr_start_req = 0;
+
+ if (fpga_wait_register_field(pr_start_req, fme_pr_ctl,
+ &fme_pr->ccip_fme_pr_control,
+ PR_WAIT_TIMEOUT, 1)) {
+ printf("maximum try.\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_info(fme_dev, "PR operation complete, checking status\n");
+ info->pr_err = pr_err_handle(fme_pr);
+ if (info->pr_err)
+ return -EIO;
+
+ dev_info(fme_dev, "PR done successfully\n");
+ return 0;
+}
+
+static int fpga_pr_buf_load(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info, const char *buf,
+ size_t count)
+{
+ int ret;
+
+ info->state = FPGA_PR_STATE_WRITE_INIT;
+ ret = fme_pr_write_init(fme_dev, info);
+ if (ret) {
+ dev_err(fme_dev, "Error preparing FPGA for writing\n");
+ info->state = FPGA_PR_STATE_WRITE_INIT_ERR;
+ return ret;
+ }
+
+ /*
+ * Write the FPGA image to the FPGA.
+ */
+ info->state = FPGA_PR_STATE_WRITE;
+ ret = fme_pr_write(fme_dev, info->port_id, buf, count, info);
+ if (ret) {
+ dev_err(fme_dev, "Error while writing image data to FPGA\n");
+ info->state = FPGA_PR_STATE_WRITE_ERR;
+ return ret;
+ }
+
+ /*
+ * After all the FPGA image has been written, do the device specific
+ * steps to finish and set the FPGA into operating mode.
+ */
+ info->state = FPGA_PR_STATE_WRITE_COMPLETE;
+ ret = fme_pr_write_complete(fme_dev, info);
+ if (ret) {
+ dev_err(fme_dev, "Error after writing image data to FPGA\n");
+ info->state = FPGA_PR_STATE_WRITE_COMPLETE_ERR;
+ return ret;
+ }
+ info->state = FPGA_PR_STATE_DONE;
+
+ return 0;
+}
+
+static int fme_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status)
+{
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_capability fme_capability;
+ struct ifpga_fme_hw *fme = &hw->fme;
+ struct fpga_pr_info info;
+ struct ifpga_port_hw *port;
+ int ret = 0;
+
+ if (!buffer || size == 0)
+ return -EINVAL;
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return -EINVAL;
+
+ /*
+ * Padding extra zeros to align PR buffer with PR bandwidth, HW will
+ * ignore these zeros automatically.
+ */
+ size = IFPGA_ALIGN(size, fme->pr_bandwidth);
+
+ /* get fme header region */
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_HEADER);
+ if (!fme_hdr)
+ return -EINVAL;
+
+ /* check port id */
+ fme_capability.csr = readq(&fme_hdr->capability);
+ if (port_id >= fme_capability.num_ports) {
+ dev_err(fme, "port number more than maximum\n");
+ return -EINVAL;
+ }
+
+ memset(&info, 0, sizeof(struct fpga_pr_info));
+ info.flags = FPGA_MGR_PARTIAL_RECONFIG;
+ info.port_id = port_id;
+
+ spinlock_lock(&fme->lock);
+
+ /* get port device by port_id */
+ port = &hw->port[port_id];
+
+ /* Disable Port before PR */
+ fpga_port_disable(port);
+
+ ret = fpga_pr_buf_load(fme, &info, (void *)buffer, size);
+
+ *status = info.pr_err;
+
+ /* Re-enable Port after PR finished */
+ fpga_port_enable(port);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, u64 *status)
+{
+ struct bts_header *bts_hdr;
+ void *buf;
+ struct ifpga_port_hw *port;
+ int ret;
+
+ if (!buffer || size == 0) {
+ dev_err(hw, "invalid parameter\n");
+ return -EINVAL;
+ }
+
+ bts_hdr = (struct bts_header *)buffer;
+
+ if (is_valid_bts(bts_hdr)) {
+ dev_info(hw, "this is a valid bitsteam..\n");
+ size -= (sizeof(struct bts_header) +
+ bts_hdr->metadata_len);
+ buf = (u8 *)buffer + sizeof(struct bts_header) +
+ bts_hdr->metadata_len;
+ } else {
+ return -EINVAL;
+ }
+
+ /* clean port error before do PR */
+ port = &hw->port[port_id];
+ ret = port_clear_error(port);
+ if (ret) {
+ dev_err(hw, "port cannot clear error\n");
+ return -EINVAL;
+ }
+
+ return fme_pr(hw, port_id, buf, size, status);
+}
+
+static int fme_pr_mgmt_init(struct feature *feature)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_header fme_pr_header;
+ struct ifpga_fme_hw *fme;
+
+ dev_info(NULL, "FME PR MGMT Init.\n");
+
+ fme = (struct ifpga_fme_hw *)feature->parent;
+
+ fme_pr = (struct feature_fme_pr *)feature->addr;
+
+ fme_pr_header.csr = readq(&fme_pr->header);
+ if (fme_pr_header.revision == 2) {
+ dev_info(NULL, "using 512-bit PR\n");
+ fme->pr_bandwidth = 64;
+ } else {
+ dev_info(NULL, "using 32-bit PR\n");
+ fme->pr_bandwidth = 4;
+ }
+
+ return 0;
+}
+
+static void fme_pr_mgmt_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME PR MGMT UInit.\n");
+}
+
+struct feature_ops fme_pr_mgmt_ops = {
+ .init = fme_pr_mgmt_init,
+ .uinit = fme_pr_mgmt_uinit,
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_hw.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_hw.h
new file mode 100644
index 00000000..a20520c9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_hw.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_HW_H_
+#define _IFPGA_HW_H_
+
+#include "ifpga_defines.h"
+#include "opae_ifpga_hw_api.h"
+
+enum ifpga_feature_state {
+ IFPGA_FEATURE_UNUSED = 0,
+ IFPGA_FEATURE_ATTACHED,
+};
+
+struct feature_irq_ctx {
+ int eventfd;
+ int idx;
+};
+
+struct feature {
+ enum ifpga_feature_state state;
+ const char *name;
+ u64 id;
+ u8 *addr;
+ uint64_t phys_addr;
+ u32 size;
+ int revision;
+ u64 cap;
+ int vfio_dev_fd;
+ struct feature_irq_ctx *ctx;
+ unsigned int ctx_num;
+
+ void *parent; /* to parent hw data structure */
+
+ struct feature_ops *ops;/* callback to this private feature */
+};
+
+struct feature_ops {
+ int (*init)(struct feature *feature);
+ void (*uinit)(struct feature *feature);
+ int (*get_prop)(struct feature *feature, struct feature_prop *prop);
+ int (*set_prop)(struct feature *feature, struct feature_prop *prop);
+ int (*set_irq)(struct feature *feature, void *irq_set);
+};
+
+enum ifpga_fme_state {
+ IFPGA_FME_UNUSED = 0,
+ IFPGA_FME_IMPLEMENTED,
+};
+
+struct ifpga_fme_hw {
+ enum ifpga_fme_state state;
+
+ struct feature sub_feature[FME_FEATURE_ID_MAX];
+ spinlock_t lock; /* protect hardware access */
+
+ void *parent; /* pointer to ifpga_hw */
+
+ /* provied by HEADER feature */
+ u32 port_num;
+ struct uuid bitstream_id;
+ u64 bitstream_md;
+ size_t pr_bandwidth;
+ u32 socket_id;
+ u32 fabric_version_id;
+ u32 cache_size;
+
+ u32 capability;
+};
+
+enum ifpga_port_state {
+ IFPGA_PORT_UNUSED = 0,
+ IFPGA_PORT_ATTACHED,
+ IFPGA_PORT_DETACHED,
+};
+
+struct ifpga_port_hw {
+ enum ifpga_port_state state;
+
+ struct feature sub_feature[PORT_FEATURE_ID_MAX];
+ spinlock_t lock; /* protect access to hw */
+
+ void *parent; /* pointer to ifpga_hw */
+
+ int port_id; /* provied by HEADER feature */
+ struct uuid afu_id; /* provied by User AFU feature */
+
+ unsigned int disable_count;
+
+ u32 capability;
+ u32 num_umsgs; /* The number of allocated umsgs */
+ u32 num_uafu_irqs; /* The number of uafu interrupts */
+ u8 *stp_addr;
+ u32 stp_size;
+};
+
+#define AFU_MAX_REGION 1
+
+struct ifpga_afu_info {
+ struct opae_reg_region region[AFU_MAX_REGION];
+ unsigned int num_regions;
+ unsigned int num_irqs;
+};
+
+struct ifpga_hw {
+ struct opae_adapter *adapter;
+ struct opae_adapter_data_pci *pci_data;
+
+ struct ifpga_fme_hw fme;
+ struct ifpga_port_hw port[MAX_FPGA_PORT_NUM];
+};
+
+static inline bool is_ifpga_hw_pf(struct ifpga_hw *hw)
+{
+ return hw->fme.state != IFPGA_FME_UNUSED;
+}
+
+static inline bool is_valid_port_id(struct ifpga_hw *hw, u32 port_id)
+{
+ if (port_id >= MAX_FPGA_PORT_NUM ||
+ hw->port[port_id].state != IFPGA_PORT_ATTACHED)
+ return false;
+
+ return true;
+}
+#endif /* _IFPGA_HW_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_port.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_port.c
new file mode 100644
index 00000000..a962f5b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_port.c
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->get_prop)
+ return feature->ops->get_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->set_prop)
+ return feature->ops->set_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set)
+{
+ struct feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, feature_id);
+
+ if (feature && feature->ops && feature->ops->set_irq)
+ return feature->ops->set_irq(feature, irq_set);
+
+ return -ENOENT;
+}
+
+static int port_get_revision(struct ifpga_port_hw *port, u64 *revision)
+{
+ struct feature_port_header *port_hdr
+ = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ struct feature_header header;
+
+ header.csr = readq(&port_hdr->header);
+
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int port_get_portidx(struct ifpga_port_hw *port, u64 *idx)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ capability.csr = readq(&port_hdr->capability);
+ *idx = capability.port_number;
+
+ return 0;
+}
+
+static int port_get_latency_tolerance(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ control.csr = readq(&port_hdr->control);
+ *val = control.latency_tolerance;
+
+ return 0;
+}
+
+static int port_get_ap1_event(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.ap1_event;
+
+ return 0;
+}
+
+static int port_set_ap1_event(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ status.ap1_event = val;
+ writeq(status.csr, &port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_ap2_event(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.ap2_event;
+
+ return 0;
+}
+
+static int port_set_ap2_event(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ status.ap2_event = val;
+ writeq(status.csr, &port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_power_state(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.power_state;
+
+ return 0;
+}
+
+static int port_get_userclk_freqcmd(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_cmd0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_set_userclk_freqcmd(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ writeq(val, &port_hdr->user_clk_freq_cmd0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_cmd1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_set_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ writeq(val, &port_hdr->user_clk_freq_cmd1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqsts(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_sts0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqcntrsts(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_sts1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_hdr_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port hdr Init.\n");
+
+ fpga_port_reset(port);
+
+ return 0;
+}
+
+static void port_hdr_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "port hdr uinit.\n");
+}
+
+static int port_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_HDR_PROP_REVISION:
+ return port_get_revision(port, &prop->data);
+ case PORT_HDR_PROP_PORTIDX:
+ return port_get_portidx(port, &prop->data);
+ case PORT_HDR_PROP_LATENCY_TOLERANCE:
+ return port_get_latency_tolerance(port, &prop->data);
+ case PORT_HDR_PROP_AP1_EVENT:
+ return port_get_ap1_event(port, &prop->data);
+ case PORT_HDR_PROP_AP2_EVENT:
+ return port_get_ap2_event(port, &prop->data);
+ case PORT_HDR_PROP_POWER_STATE:
+ return port_get_power_state(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCMD:
+ return port_get_userclk_freqcmd(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCNTRCMD:
+ return port_get_userclk_freqcntrcmd(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQSTS:
+ return port_get_userclk_freqsts(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_CNTRSTS:
+ return port_get_userclk_freqcntrsts(port, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int port_hdr_set_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_HDR_PROP_AP1_EVENT:
+ return port_set_ap1_event(port, prop->data);
+ case PORT_HDR_PROP_AP2_EVENT:
+ return port_set_ap2_event(port, prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCMD:
+ return port_set_userclk_freqcmd(port, prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCNTRCMD:
+ return port_set_userclk_freqcntrcmd(port, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops port_hdr_ops = {
+ .init = port_hdr_init,
+ .uinit = port_hdr_uinit,
+ .get_prop = port_hdr_get_prop,
+ .set_prop = port_hdr_set_prop,
+};
+
+static int port_stp_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port stp Init.\n");
+
+ spinlock_lock(&port->lock);
+ port->stp_addr = feature->addr;
+ port->stp_size = feature->size;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_stp_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "port stp uinit.\n");
+}
+
+struct feature_ops port_stp_ops = {
+ .init = port_stp_init,
+ .uinit = port_stp_uinit,
+};
+
+static int port_uint_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "PORT UINT Init.\n");
+
+ spinlock_lock(&port->lock);
+ if (feature->ctx_num) {
+ port->capability |= FPGA_PORT_CAP_UAFU_IRQ;
+ port->num_uafu_irqs = feature->ctx_num;
+ }
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_uint_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "PORT UINT UInit.\n");
+}
+
+struct feature_ops port_uint_ops = {
+ .init = port_uint_init,
+ .uinit = port_uint_uinit,
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
new file mode 100644
index 00000000..23db562b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static int port_err_get_revision(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_header header;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ header.csr = readq(&port_err->header);
+ *val = header.revision;
+
+ return 0;
+}
+
+static int port_err_get_errors(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ error.csr = readq(&port_err->port_error);
+ *val = error.csr;
+
+ return 0;
+}
+
+static int port_err_get_first_error(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_first_err_key first_error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ first_error.csr = readq(&port_err->port_first_error);
+ *val = first_error.csr;
+
+ return 0;
+}
+
+static int port_err_get_first_malformed_req_lsb(struct ifpga_port_hw *port,
+ u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_malformed_req0 malreq0;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ malreq0.header_lsb = readq(&port_err->malreq0);
+ *val = malreq0.header_lsb;
+
+ return 0;
+}
+
+static int port_err_get_first_malformed_req_msb(struct ifpga_port_hw *port,
+ u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_malformed_req1 malreq1;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ malreq1.header_msb = readq(&port_err->malreq1);
+ *val = malreq1.header_msb;
+
+ return 0;
+}
+
+static int port_err_set_clear(struct ifpga_port_hw *port, u64 val)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = port_err_clear(port, val);
+ spinlock_unlock(&port->lock);
+
+ return ret;
+}
+
+static int port_error_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port error Init.\n");
+
+ spinlock_lock(&port->lock);
+ port_err_mask(port, false);
+ if (feature->ctx_num)
+ port->capability |= FPGA_PORT_CAP_ERR_IRQ;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_error_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+}
+
+static int port_error_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_ERR_PROP_REVISION:
+ return port_err_get_revision(port, &prop->data);
+ case PORT_ERR_PROP_ERRORS:
+ return port_err_get_errors(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_ERROR:
+ return port_err_get_first_error(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB:
+ return port_err_get_first_malformed_req_lsb(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB:
+ return port_err_get_first_malformed_req_msb(port, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int port_error_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ if (prop->prop_id == PORT_ERR_PROP_CLEAR)
+ return port_err_set_clear(port, prop->data);
+
+ return -ENOENT;
+}
+
+struct feature_ops port_error_ops = {
+ .init = port_error_init,
+ .uinit = port_error_uinit,
+ .get_prop = port_error_get_prop,
+ .set_prop = port_error_set_prop,
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/meson.build b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/meson.build
new file mode 100644
index 00000000..cb655352
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/meson.build
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = [
+ 'ifpga_api.c',
+ 'ifpga_enumerate.c',
+ 'ifpga_feature_dev.c',
+ 'ifpga_fme.c',
+ 'ifpga_fme_iperf.c',
+ 'ifpga_fme_dperf.c',
+ 'ifpga_fme_error.c',
+ 'ifpga_port.c',
+ 'ifpga_port_error.c',
+ 'ifpga_fme_pr.c',
+ 'opae_hw_api.c',
+ 'opae_ifpga_hw_api.c',
+ 'opae_debug.c'
+]
+
+error_cflags = ['-Wno-sign-compare', '-Wno-unused-value',
+ '-Wno-format', '-Wno-unused-but-set-variable',
+ '-Wno-strict-aliasing'
+]
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('ifpga_rawdev_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_debug.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_debug.c
new file mode 100644
index 00000000..024d7d29
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_debug.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#define OPAE_HW_DEBUG
+
+#include "opae_hw_api.h"
+#include "opae_debug.h"
+
+void opae_manager_dump(struct opae_manager *mgr)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Manger %s\n", mgr->name);
+ opae_log("OPAE Manger OPs = %p\n", mgr->ops);
+ opae_log("OPAE Manager Private Data = %p\n", mgr->data);
+ opae_log("OPAE Adapter(parent) = %p\n", mgr->adapter);
+ opae_log("==========================\n");
+}
+
+void opae_bridge_dump(struct opae_bridge *br)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Bridge %s\n", br->name);
+ opae_log("OPAE Bridge ID = %d\n", br->id);
+ opae_log("OPAE Bridge OPs = %p\n", br->ops);
+ opae_log("OPAE Bridge Private Data = %p\n", br->data);
+ opae_log("OPAE Accelerator(under this bridge) = %p\n", br->acc);
+ opae_log("==========================\n");
+}
+
+void opae_accelerator_dump(struct opae_accelerator *acc)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Accelerator %s\n", acc->name);
+ opae_log("OPAE Accelerator Index = %d\n", acc->index);
+ opae_log("OPAE Accelerator OPs = %p\n", acc->ops);
+ opae_log("OPAE Accelerator Private Data = %p\n", acc->data);
+ opae_log("OPAE Bridge (upstream) = %p\n", acc->br);
+ opae_log("OPAE Manager (upstream) = %p\n", acc->mgr);
+ opae_log("==========================\n");
+
+ if (acc->br)
+ opae_bridge_dump(acc->br);
+}
+
+static void opae_adapter_data_dump(void *data)
+{
+ struct opae_adapter_data *d = data;
+ struct opae_adapter_data_pci *d_pci;
+ struct opae_reg_region *r;
+ int i;
+
+ opae_log("=====%s=====\n", __func__);
+
+ switch (d->type) {
+ case OPAE_FPGA_PCI:
+ d_pci = (struct opae_adapter_data_pci *)d;
+
+ opae_log("OPAE Adapter Type = PCI\n");
+ opae_log("PCI Device ID: 0x%04x\n", d_pci->device_id);
+ opae_log("PCI Vendor ID: 0x%04x\n", d_pci->vendor_id);
+
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ r = &d_pci->region[i];
+ opae_log("PCI Bar %d: phy(%llx) len(%llx) addr(%p)\n",
+ i, (unsigned long long)r->phys_addr,
+ (unsigned long long)r->len, r->addr);
+ }
+ break;
+ case OPAE_FPGA_NET:
+ break;
+ }
+
+ opae_log("==========================\n");
+}
+
+void opae_adapter_dump(struct opae_adapter *adapter, int verbose)
+{
+ struct opae_accelerator *acc;
+
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Adapter %s\n", adapter->name);
+ opae_log("OPAE Adapter OPs = %p\n", adapter->ops);
+ opae_log("OPAE Adapter Private Data = %p\n", adapter->data);
+ opae_log("OPAE Manager (downstream) = %p\n", adapter->mgr);
+
+ if (verbose) {
+ if (adapter->mgr)
+ opae_manager_dump(adapter->mgr);
+
+ opae_adapter_for_each_acc(adapter, acc)
+ opae_accelerator_dump(acc);
+
+ if (adapter->data)
+ opae_adapter_data_dump(adapter->data);
+ }
+
+ opae_log("==========================\n");
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_debug.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_debug.h
new file mode 100644
index 00000000..a03dff92
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_debug.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_DEBUG_H_
+#define _OPAE_DEBUG_H_
+
+#ifdef OPAE_HW_DEBUG
+#define opae_log(fmt, args...) printf(fmt, ## args)
+#else
+#define opae_log(fme, args...) do {} while (0)
+#endif
+
+void opae_manager_dump(struct opae_manager *mgr);
+void opae_bridge_dump(struct opae_bridge *br);
+void opae_accelerator_dump(struct opae_accelerator *acc);
+void opae_adapter_dump(struct opae_adapter *adapter, int verbose);
+
+#endif /* _OPAE_DEBUG_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.c
new file mode 100644
index 00000000..a533dfea
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_hw_api.h"
+#include "opae_debug.h"
+#include "ifpga_api.h"
+
+/* OPAE Bridge Functions */
+
+/**
+ * opae_bridge_alloc - alloc opae_bridge data structure
+ * @name: bridge name.
+ * @ops: ops of this bridge.
+ * @data: private data of this bridge.
+ *
+ * Return opae_bridge on success, otherwise NULL.
+ */
+struct opae_bridge *
+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data)
+{
+ struct opae_bridge *br = opae_zmalloc(sizeof(*br));
+
+ if (!br)
+ return NULL;
+
+ br->name = name;
+ br->ops = ops;
+ br->data = data;
+
+ opae_log("%s %p\n", __func__, br);
+
+ return br;
+}
+
+/**
+ * opae_bridge_reset - reset opae_bridge
+ * @br: bridge to be reset.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_bridge_reset(struct opae_bridge *br)
+{
+ if (!br)
+ return -EINVAL;
+
+ if (br->ops && br->ops->reset)
+ return br->ops->reset(br);
+
+ opae_log("%s no ops\n", __func__);
+
+ return -ENOENT;
+}
+
+/* Accelerator Functions */
+
+/**
+ * opae_accelerator_alloc - alloc opae_accelerator data structure
+ * @name: accelerator name.
+ * @ops: ops of this accelerator.
+ * @data: private data of this accelerator.
+ *
+ * Return: opae_accelerator on success, otherwise NULL.
+ */
+struct opae_accelerator *
+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
+ void *data)
+{
+ struct opae_accelerator *acc = opae_zmalloc(sizeof(*acc));
+
+ if (!acc)
+ return NULL;
+
+ acc->name = name;
+ acc->ops = ops;
+ acc->data = data;
+
+ opae_log("%s %p\n", __func__, acc);
+
+ return acc;
+}
+
+/**
+ * opae_acc_reg_read - read accelerator's register from its reg region.
+ * @acc: accelerator to read.
+ * @region_idx: reg region index.
+ * @offset: reg offset.
+ * @byte: read operation width, e.g 4 byte = 32bit read.
+ * @data: data to store the value read from the register.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ if (!acc || !data)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->read)
+ return acc->ops->read(acc, region_idx, offset, byte, data);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_reg_write - write to accelerator's register from its reg region.
+ * @acc: accelerator to write.
+ * @region_idx: reg region index.
+ * @offset: reg offset.
+ * @byte: write operation width, e.g 4 byte = 32bit write.
+ * @data: data stored the value to write to the register.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ if (!acc || !data)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->write)
+ return acc->ops->write(acc, region_idx, offset, byte, data);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_info - get information of an accelerator.
+ * @acc: targeted accelerator
+ * @info: accelerator info data structure to be filled.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info)
+{
+ if (!acc || !info)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_info)
+ return acc->ops->get_info(acc, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_region_info - get information of an accelerator register region.
+ * @acc: targeted accelerator
+ * @info: accelerator region info data structure to be filled.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info)
+{
+ if (!acc || !info)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_region_info)
+ return acc->ops->get_region_info(acc, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_set_irq - set an accelerator's irq.
+ * @acc: targeted accelerator
+ * @start: start vector number
+ * @count: count of vectors to be set from the start vector
+ * @evtfds: event fds to be notified when corresponding irqs happens
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[])
+{
+ if (!acc || !acc->data)
+ return -EINVAL;
+
+ if (start + count <= start)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->set_irq)
+ return acc->ops->set_irq(acc, start, count, evtfds);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_uuid - get accelerator's UUID.
+ * @acc: targeted accelerator
+ * @uuid: a pointer to UUID
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid)
+{
+ if (!acc || !uuid)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_uuid)
+ return acc->ops->get_uuid(acc, uuid);
+
+ return -ENOENT;
+}
+
+/* Manager Functions */
+
+/**
+ * opae_manager_alloc - alloc opae_manager data structure
+ * @name: manager name.
+ * @ops: ops of this manager.
+ * @data: private data of this manager.
+ *
+ * Return: opae_manager on success, otherwise NULL.
+ */
+struct opae_manager *
+opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data)
+{
+ struct opae_manager *mgr = opae_zmalloc(sizeof(*mgr));
+
+ if (!mgr)
+ return NULL;
+
+ mgr->name = name;
+ mgr->ops = ops;
+ mgr->data = data;
+
+ opae_log("%s %p\n", __func__, mgr);
+
+ return mgr;
+}
+
+/**
+ * opae_manager_flash - flash a reconfiguration image via opae_manager
+ * @mgr: opae_manager for flash.
+ * @id: id of target region (accelerator).
+ * @buf: image data buffer.
+ * @size: buffer size.
+ * @status: status to store flash result.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_manager_flash(struct opae_manager *mgr, int id, void *buf, u32 size,
+ u64 *status)
+{
+ if (!mgr)
+ return -EINVAL;
+
+ if (mgr && mgr->ops && mgr->ops->flash)
+ return mgr->ops->flash(mgr, id, buf, size, status);
+
+ return -ENOENT;
+}
+
+/* Adapter Functions */
+
+/**
+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure
+ * @type: opae_adapter_type.
+ *
+ * Return: opae_adapter_data on success, otherwise NULL.
+ */
+void *opae_adapter_data_alloc(enum opae_adapter_type type)
+{
+ struct opae_adapter_data *data;
+ int size;
+
+ switch (type) {
+ case OPAE_FPGA_PCI:
+ size = sizeof(struct opae_adapter_data_pci);
+ break;
+ case OPAE_FPGA_NET:
+ size = sizeof(struct opae_adapter_data_net);
+ break;
+ default:
+ size = sizeof(struct opae_adapter_data);
+ break;
+ }
+
+ data = opae_zmalloc(size);
+ if (!data)
+ return NULL;
+
+ data->type = type;
+
+ return data;
+}
+
+static struct opae_adapter_ops *match_ops(struct opae_adapter *adapter)
+{
+ struct opae_adapter_data *data;
+
+ if (!adapter || !adapter->data)
+ return NULL;
+
+ data = adapter->data;
+
+ if (data->type == OPAE_FPGA_PCI)
+ return &ifpga_adapter_ops;
+
+ return NULL;
+}
+
+/**
+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure
+ * @name: adapter name.
+ * @data: private data of this adapter.
+ *
+ * Return: opae_adapter on success, otherwise NULL.
+ */
+struct opae_adapter *opae_adapter_alloc(const char *name, void *data)
+{
+ struct opae_adapter *adapter = opae_zmalloc(sizeof(*adapter));
+
+ if (!adapter)
+ return NULL;
+
+ TAILQ_INIT(&adapter->acc_list);
+ adapter->data = data;
+ adapter->name = name;
+ adapter->ops = match_ops(adapter);
+
+ return adapter;
+}
+
+/**
+ * opae_adapter_enumerate - enumerate this adapter
+ * @adapter: adapter to enumerate.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_adapter_enumerate(struct opae_adapter *adapter)
+{
+ int ret = -ENOENT;
+
+ if (!adapter)
+ return -EINVAL;
+
+ if (adapter->ops && adapter->ops->enumerate)
+ ret = adapter->ops->enumerate(adapter);
+
+ if (!ret)
+ opae_adapter_dump(adapter, 1);
+
+ return ret;
+}
+
+/**
+ * opae_adapter_destroy - destroy this adapter
+ * @adapter: adapter to destroy.
+ *
+ * destroy things allocated during adapter enumeration.
+ */
+void opae_adapter_destroy(struct opae_adapter *adapter)
+{
+ if (adapter && adapter->ops && adapter->ops->destroy)
+ adapter->ops->destroy(adapter);
+}
+
+/**
+ * opae_adapter_get_acc - find and return accelerator with matched id
+ * @adapter: adapter to find the accelerator.
+ * @acc_id: id (index) of the accelerator.
+ *
+ * destroy things allocated during adapter enumeration.
+ */
+struct opae_accelerator *
+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id)
+{
+ struct opae_accelerator *acc = NULL;
+
+ if (!adapter)
+ return NULL;
+
+ opae_adapter_for_each_acc(adapter, acc)
+ if (acc->index == acc_id)
+ return acc;
+
+ return NULL;
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.h
new file mode 100644
index 00000000..4bbc9df5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_HW_API_H_
+#define _OPAE_HW_API_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+
+#include "opae_osdep.h"
+
+#ifndef PCI_MAX_RESOURCE
+#define PCI_MAX_RESOURCE 6
+#endif
+
+struct opae_adapter;
+
+enum opae_adapter_type {
+ OPAE_FPGA_PCI,
+ OPAE_FPGA_NET,
+};
+
+/* OPAE Manager Data Structure */
+struct opae_manager_ops;
+
+/*
+ * opae_manager has pointer to its parent adapter, as it could be able to manage
+ * all components on this FPGA device (adapter). If not the case, don't set this
+ * adapter, which limit opae_manager ops to manager itself.
+ */
+struct opae_manager {
+ const char *name;
+ struct opae_adapter *adapter;
+ struct opae_manager_ops *ops;
+ void *data;
+};
+
+/* FIXME: add more management ops, e.g power/thermal and etc */
+struct opae_manager_ops {
+ int (*flash)(struct opae_manager *mgr, int id, void *buffer,
+ u32 size, u64 *status);
+};
+
+/* OPAE Manager APIs */
+struct opae_manager *
+opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data);
+#define opae_manager_free(mgr) opae_free(mgr)
+int opae_manager_flash(struct opae_manager *mgr, int acc_id, void *buf,
+ u32 size, u64 *status);
+
+/* OPAE Bridge Data Structure */
+struct opae_bridge_ops;
+
+/*
+ * opae_bridge only has pointer to its downstream accelerator.
+ */
+struct opae_bridge {
+ const char *name;
+ int id;
+ struct opae_accelerator *acc;
+ struct opae_bridge_ops *ops;
+ void *data;
+};
+
+struct opae_bridge_ops {
+ int (*reset)(struct opae_bridge *br);
+};
+
+/* OPAE Bridge APIs */
+struct opae_bridge *
+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data);
+int opae_bridge_reset(struct opae_bridge *br);
+#define opae_bridge_free(br) opae_free(br)
+
+/* OPAE Acceleraotr Data Structure */
+struct opae_accelerator_ops;
+
+/*
+ * opae_accelerator has pointer to its upstream bridge(port).
+ * In some cases, if we allow same user to do PR on its own accelerator, then
+ * set the manager pointer during the enumeration. But in other cases, the PR
+ * functions only could be done via manager in another module / thread / service
+ * / application for better protection.
+ */
+struct opae_accelerator {
+ TAILQ_ENTRY(opae_accelerator) node;
+ const char *name;
+ int index;
+ struct opae_bridge *br;
+ struct opae_manager *mgr;
+ struct opae_accelerator_ops *ops;
+ void *data;
+};
+
+struct opae_acc_info {
+ unsigned int num_regions;
+ unsigned int num_irqs;
+};
+
+struct opae_acc_region_info {
+ u32 flags;
+#define ACC_REGION_READ (1 << 0)
+#define ACC_REGION_WRITE (1 << 1)
+#define ACC_REGION_MMIO (1 << 2)
+ u32 index;
+ u64 phys_addr;
+ u64 len;
+ u8 *addr;
+};
+
+struct opae_accelerator_ops {
+ int (*read)(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+ int (*write)(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+ int (*get_info)(struct opae_accelerator *acc,
+ struct opae_acc_info *info);
+ int (*get_region_info)(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info);
+ int (*set_irq)(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[]);
+ int (*get_uuid)(struct opae_accelerator *acc,
+ struct uuid *uuid);
+};
+
+/* OPAE accelerator APIs */
+struct opae_accelerator *
+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
+ void *data);
+#define opae_accelerator_free(acc) opae_free(acc)
+int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info);
+int opae_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info);
+int opae_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[]);
+int opae_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid);
+
+static inline struct opae_bridge *
+opae_acc_get_br(struct opae_accelerator *acc)
+{
+ return acc ? acc->br : NULL;
+}
+
+static inline struct opae_manager *
+opae_acc_get_mgr(struct opae_accelerator *acc)
+{
+ return acc ? acc->mgr : NULL;
+}
+
+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+
+#define opae_acc_reg_read64(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 8, data)
+#define opae_acc_reg_write64(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 8, data)
+#define opae_acc_reg_read32(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 4, data)
+#define opae_acc_reg_write32(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 4, data)
+#define opae_acc_reg_read16(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 2, data)
+#define opae_acc_reg_write16(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 2, data)
+#define opae_acc_reg_read8(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 1, data)
+#define opae_acc_reg_write8(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 1, data)
+
+/*for data stream read/write*/
+int opae_acc_data_read(struct opae_accelerator *acc, unsigned int flags,
+ u64 offset, unsigned int byte, void *data);
+int opae_acc_data_write(struct opae_accelerator *acc, unsigned int flags,
+ u64 offset, unsigned int byte, void *data);
+
+/* OPAE Adapter Data Structure */
+struct opae_adapter_data {
+ enum opae_adapter_type type;
+};
+
+struct opae_reg_region {
+ u64 phys_addr;
+ u64 len;
+ u8 *addr;
+};
+
+struct opae_adapter_data_pci {
+ enum opae_adapter_type type;
+ u16 device_id;
+ u16 vendor_id;
+ struct opae_reg_region region[PCI_MAX_RESOURCE];
+ int vfio_dev_fd; /* VFIO device file descriptor */
+};
+
+/* FIXME: OPAE_FPGA_NET type */
+struct opae_adapter_data_net {
+ enum opae_adapter_type type;
+};
+
+struct opae_adapter_ops {
+ int (*enumerate)(struct opae_adapter *adapter);
+ void (*destroy)(struct opae_adapter *adapter);
+};
+
+TAILQ_HEAD(opae_accelerator_list, opae_accelerator);
+
+#define opae_adapter_for_each_acc(adatper, acc) \
+ TAILQ_FOREACH(acc, &adapter->acc_list, node)
+
+struct opae_adapter {
+ const char *name;
+ struct opae_manager *mgr;
+ struct opae_accelerator_list acc_list;
+ struct opae_adapter_ops *ops;
+ void *data;
+};
+
+/* OPAE Adapter APIs */
+void *opae_adapter_data_alloc(enum opae_adapter_type type);
+#define opae_adapter_data_free(data) opae_free(data)
+
+struct opae_adapter *opae_adapter_alloc(const char *name, void *data);
+#define opae_adapter_free(adapter) opae_free(adapter)
+
+int opae_adapter_enumerate(struct opae_adapter *adapter);
+void opae_adapter_destroy(struct opae_adapter *adapter);
+static inline struct opae_manager *
+opae_adapter_get_mgr(struct opae_adapter *adapter)
+{
+ return adapter ? adapter->mgr : NULL;
+}
+
+struct opae_accelerator *
+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id);
+
+static inline void opae_adapter_add_acc(struct opae_adapter *adapter,
+ struct opae_accelerator *acc)
+{
+ TAILQ_INSERT_TAIL(&adapter->acc_list, acc, node);
+}
+
+static inline void opae_adapter_remove_acc(struct opae_adapter *adapter,
+ struct opae_accelerator *acc)
+{
+ TAILQ_REMOVE(&adapter->acc_list, acc, node);
+}
+#endif /* _OPAE_HW_API_H_*/
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c
new file mode 100644
index 00000000..89c7b492
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_ifpga_hw_api.h"
+#include "ifpga_api.h"
+
+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_get_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
+}
+
+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_set_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
+}
+
+int opae_manager_ifpga_get_info(struct opae_manager *mgr,
+ struct fpga_fme_info *fme_info)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data || !fme_info)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ spinlock_lock(&fme->lock);
+ fme_info->capability = fme->capability;
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
+ struct fpga_fme_err_irq_set *err_irq_set)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_set_irq(fme->parent, FEATURE_FIU_ID_FME, 0,
+ IFPGA_FME_FEATURE_ID_GLOBAL_ERR, err_irq_set);
+}
+
+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_get_prop(port->parent, FEATURE_FIU_ID_PORT,
+ port->port_id, prop);
+}
+
+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_set_prop(port->parent, FEATURE_FIU_ID_PORT,
+ port->port_id, prop);
+}
+
+int opae_bridge_ifpga_get_info(struct opae_bridge *br,
+ struct fpga_port_info *port_info)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data || !port_info)
+ return -EINVAL;
+
+ port = br->data;
+
+ spinlock_lock(&port->lock);
+ port_info->capability = port->capability;
+ port_info->num_uafu_irqs = port->num_uafu_irqs;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,
+ struct fpga_port_region_info *info)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data || !info)
+ return -EINVAL;
+
+ /* Only support STP region now */
+ if (info->index != PORT_REGION_INDEX_STP)
+ return -EINVAL;
+
+ port = br->data;
+
+ spinlock_lock(&port->lock);
+ info->addr = port->stp_addr;
+ info->size = port->stp_size;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
+ struct fpga_port_err_irq_set *err_irq_set)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
+ IFPGA_PORT_FEATURE_ID_ERROR, err_irq_set);
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h
new file mode 100644
index 00000000..4c2c9904
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_IFPGA_HW_API_H_
+#define _OPAE_IFPGA_HW_API_H_
+
+#include "opae_hw_api.h"
+
+/**
+ * struct feature_prop - data structure for feature property
+ * @feature_id: id of this feature.
+ * @prop_id: id of this property under this feature.
+ * @data: property value to set/get.
+ */
+struct feature_prop {
+ u64 feature_id;
+ u64 prop_id;
+ u64 data;
+};
+
+#define IFPGA_FIU_ID_FME 0x0
+#define IFPGA_FIU_ID_PORT 0x1
+
+#define IFPGA_FME_FEATURE_ID_HEADER 0x0
+#define IFPGA_FME_FEATURE_ID_THERMAL_MGMT 0x1
+#define IFPGA_FME_FEATURE_ID_POWER_MGMT 0x2
+#define IFPGA_FME_FEATURE_ID_GLOBAL_IPERF 0x3
+#define IFPGA_FME_FEATURE_ID_GLOBAL_ERR 0x4
+#define IFPGA_FME_FEATURE_ID_PR_MGMT 0x5
+#define IFPGA_FME_FEATURE_ID_HSSI 0x6
+#define IFPGA_FME_FEATURE_ID_GLOBAL_DPERF 0x7
+
+#define IFPGA_PORT_FEATURE_ID_HEADER 0x0
+#define IFPGA_PORT_FEATURE_ID_AFU 0xff
+#define IFPGA_PORT_FEATURE_ID_ERROR 0x10
+#define IFPGA_PORT_FEATURE_ID_UMSG 0x11
+#define IFPGA_PORT_FEATURE_ID_UINT 0x12
+#define IFPGA_PORT_FEATURE_ID_STP 0x13
+
+/*
+ * PROP format (TOP + SUB + ID)
+ *
+ * (~0x0) means this field is unused.
+ */
+#define PROP_TOP GENMASK(31, 24)
+#define PROP_TOP_UNUSED 0xff
+#define PROP_SUB GENMASK(23, 16)
+#define PROP_SUB_UNUSED 0xff
+#define PROP_ID GENMASK(15, 0)
+
+#define PROP(_top, _sub, _id) \
+ (SET_FIELD(PROP_TOP, _top) | SET_FIELD(PROP_SUB, _sub) |\
+ SET_FIELD(PROP_ID, _id))
+
+/* FME head feature's properties*/
+#define FME_HDR_PROP_REVISION 0x1 /* RDONLY */
+#define FME_HDR_PROP_PORTS_NUM 0x2 /* RDONLY */
+#define FME_HDR_PROP_CACHE_SIZE 0x3 /* RDONLY */
+#define FME_HDR_PROP_VERSION 0x4 /* RDONLY */
+#define FME_HDR_PROP_SOCKET_ID 0x5 /* RDONLY */
+#define FME_HDR_PROP_BITSTREAM_ID 0x6 /* RDONLY */
+#define FME_HDR_PROP_BITSTREAM_METADATA 0x7 /* RDONLY */
+
+/* FME error reporting feature's properties */
+/* FME error reporting properties format */
+#define ERR_PROP(_top, _id) PROP(_top, 0xff, _id)
+#define ERR_PROP_TOP_UNUSED PROP_TOP_UNUSED
+#define ERR_PROP_TOP_FME_ERR 0x1
+#define ERR_PROP_ROOT(_id) ERR_PROP(0xff, _id)
+#define ERR_PROP_FME_ERR(_id) ERR_PROP(ERR_PROP_TOP_FME_ERR, _id)
+
+#define FME_ERR_PROP_ERRORS ERR_PROP_FME_ERR(0x1)
+#define FME_ERR_PROP_FIRST_ERROR ERR_PROP_FME_ERR(0x2)
+#define FME_ERR_PROP_NEXT_ERROR ERR_PROP_FME_ERR(0x3)
+#define FME_ERR_PROP_CLEAR ERR_PROP_FME_ERR(0x4) /* WO */
+#define FME_ERR_PROP_REVISION ERR_PROP_ROOT(0x5)
+#define FME_ERR_PROP_PCIE0_ERRORS ERR_PROP_ROOT(0x6) /* RW */
+#define FME_ERR_PROP_PCIE1_ERRORS ERR_PROP_ROOT(0x7) /* RW */
+#define FME_ERR_PROP_NONFATAL_ERRORS ERR_PROP_ROOT(0x8)
+#define FME_ERR_PROP_CATFATAL_ERRORS ERR_PROP_ROOT(0x9)
+#define FME_ERR_PROP_INJECT_ERRORS ERR_PROP_ROOT(0xa) /* RW */
+
+/* FME thermal feature's properties */
+#define FME_THERMAL_PROP_THRESHOLD1 0x1 /* RW */
+#define FME_THERMAL_PROP_THRESHOLD2 0x2 /* RW */
+#define FME_THERMAL_PROP_THRESHOLD_TRIP 0x3 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD1_REACHED 0x4 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD2_REACHED 0x5 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD1_POLICY 0x6 /* RW */
+#define FME_THERMAL_PROP_TEMPERATURE 0x7 /* RDONLY */
+#define FME_THERMAL_PROP_REVISION 0x8 /* RDONLY */
+
+/* FME power feature's properties */
+#define FME_PWR_PROP_CONSUMED 0x1 /* RDONLY */
+#define FME_PWR_PROP_THRESHOLD1 0x2 /* RW */
+#define FME_PWR_PROP_THRESHOLD2 0x3 /* RW */
+#define FME_PWR_PROP_THRESHOLD1_STATUS 0x4 /* RDONLY */
+#define FME_PWR_PROP_THRESHOLD2_STATUS 0x5 /* RDONLY */
+#define FME_PWR_PROP_RTL 0x6 /* RDONLY */
+#define FME_PWR_PROP_XEON_LIMIT 0x7 /* RDONLY */
+#define FME_PWR_PROP_FPGA_LIMIT 0x8 /* RDONLY */
+#define FME_PWR_PROP_REVISION 0x9 /* RDONLY */
+
+/* FME iperf/dperf PROP format */
+#define PERF_PROP_TOP_CACHE 0x1
+#define PERF_PROP_TOP_VTD 0x2
+#define PERF_PROP_TOP_FAB 0x3
+#define PERF_PROP_TOP_UNUSED PROP_TOP_UNUSED
+#define PERF_PROP_SUB_UNUSED PROP_SUB_UNUSED
+
+#define PERF_PROP_ROOT(_id) PROP(0xff, 0xff, _id)
+#define PERF_PROP_CACHE(_id) PROP(PERF_PROP_TOP_CACHE, 0xff, _id)
+#define PERF_PROP_VTD(_sub, _id) PROP(PERF_PROP_TOP_VTD, _sub, _id)
+#define PERF_PROP_VTD_ROOT(_id) PROP(PERF_PROP_TOP_VTD, 0xff, _id)
+#define PERF_PROP_FAB(_sub, _id) PROP(PERF_PROP_TOP_FAB, _sub, _id)
+#define PERF_PROP_FAB_ROOT(_id) PROP(PERF_PROP_TOP_FAB, 0xff, _id)
+
+/* FME iperf feature's properties */
+#define FME_IPERF_PROP_CLOCK PERF_PROP_ROOT(0x1)
+#define FME_IPERF_PROP_REVISION PERF_PROP_ROOT(0x2)
+
+/* iperf CACHE properties */
+#define FME_IPERF_PROP_CACHE_FREEZE PERF_PROP_CACHE(0x1) /* RW */
+#define FME_IPERF_PROP_CACHE_READ_HIT PERF_PROP_CACHE(0x2)
+#define FME_IPERF_PROP_CACHE_READ_MISS PERF_PROP_CACHE(0x3)
+#define FME_IPERF_PROP_CACHE_WRITE_HIT PERF_PROP_CACHE(0x4)
+#define FME_IPERF_PROP_CACHE_WRITE_MISS PERF_PROP_CACHE(0x5)
+#define FME_IPERF_PROP_CACHE_HOLD_REQUEST PERF_PROP_CACHE(0x6)
+#define FME_IPERF_PROP_CACHE_TX_REQ_STALL PERF_PROP_CACHE(0x7)
+#define FME_IPERF_PROP_CACHE_RX_REQ_STALL PERF_PROP_CACHE(0x8)
+#define FME_IPERF_PROP_CACHE_RX_EVICTION PERF_PROP_CACHE(0x9)
+#define FME_IPERF_PROP_CACHE_DATA_WRITE_PORT_CONTENTION PERF_PROP_CACHE(0xa)
+#define FME_IPERF_PROP_CACHE_TAG_WRITE_PORT_CONTENTION PERF_PROP_CACHE(0xb)
+/* iperf VTD properties */
+#define FME_IPERF_PROP_VTD_FREEZE PERF_PROP_VTD_ROOT(0x1) /* RW */
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_HIT PERF_PROP_VTD_ROOT(0x2)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_HIT PERF_PROP_VTD_ROOT(0x3)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_HIT PERF_PROP_VTD_ROOT(0x4)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_HIT PERF_PROP_VTD_ROOT(0x5)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_HIT PERF_PROP_VTD_ROOT(0x6)
+#define FME_IPERF_PROP_VTD_SIP_RCC_HIT PERF_PROP_VTD_ROOT(0x7)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_MISS PERF_PROP_VTD_ROOT(0x8)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_MISS PERF_PROP_VTD_ROOT(0x9)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_MISS PERF_PROP_VTD_ROOT(0xa)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_MISS PERF_PROP_VTD_ROOT(0xb)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_MISS PERF_PROP_VTD_ROOT(0xc)
+#define FME_IPERF_PROP_VTD_SIP_RCC_MISS PERF_PROP_VTD_ROOT(0xd)
+#define FME_IPERF_PROP_VTD_PORT_READ_TRANSACTION(n) PERF_PROP_VTD(n, 0xe)
+#define FME_IPERF_PROP_VTD_PORT_WRITE_TRANSACTION(n) PERF_PROP_VTD(n, 0xf)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_READ_HIT(n) PERF_PROP_VTD(n, 0x10)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_WRITE_HIT(n) PERF_PROP_VTD(n, 0x11)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_4K_FILL(n) PERF_PROP_VTD(n, 0x12)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_2M_FILL(n) PERF_PROP_VTD(n, 0x13)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_1G_FILL(n) PERF_PROP_VTD(n, 0x14)
+/* iperf FAB properties */
+#define FME_IPERF_PROP_FAB_FREEZE PERF_PROP_FAB_ROOT(0x1) /* RW */
+#define FME_IPERF_PROP_FAB_PCIE0_READ PERF_PROP_FAB_ROOT(0x2)
+#define FME_IPERF_PROP_FAB_PORT_PCIE0_READ(n) PERF_PROP_FAB(n, 0x2)
+#define FME_IPERF_PROP_FAB_PCIE0_WRITE PERF_PROP_FAB_ROOT(0x3)
+#define FME_IPERF_PROP_FAB_PORT_PCIE0_WRITE(n) PERF_PROP_FAB(n, 0x3)
+#define FME_IPERF_PROP_FAB_PCIE1_READ PERF_PROP_FAB_ROOT(0x4)
+#define FME_IPERF_PROP_FAB_PORT_PCIE1_READ(n) PERF_PROP_FAB(n, 0x4)
+#define FME_IPERF_PROP_FAB_PCIE1_WRITE PERF_PROP_FAB_ROOT(0x5)
+#define FME_IPERF_PROP_FAB_PORT_PCIE1_WRITE(n) PERF_PROP_FAB(n, 0x5)
+#define FME_IPERF_PROP_FAB_UPI_READ PERF_PROP_FAB_ROOT(0x6)
+#define FME_IPERF_PROP_FAB_PORT_UPI_READ(n) PERF_PROP_FAB(n, 0x6)
+#define FME_IPERF_PROP_FAB_UPI_WRITE PERF_PROP_FAB_ROOT(0x7)
+#define FME_IPERF_PROP_FAB_PORT_UPI_WRITE(n) PERF_PROP_FAB(n, 0x7)
+#define FME_IPERF_PROP_FAB_MMIO_READ PERF_PROP_FAB_ROOT(0x8)
+#define FME_IPERF_PROP_FAB_PORT_MMIO_READ(n) PERF_PROP_FAB(n, 0x8)
+#define FME_IPERF_PROP_FAB_MMIO_WRITE PERF_PROP_FAB_ROOT(0x9)
+#define FME_IPERF_PROP_FAB_PORT_MMIO_WRITE(n) PERF_PROP_FAB(n, 0x9)
+#define FME_IPERF_PROP_FAB_ENABLE PERF_PROP_FAB_ROOT(0xa) /* RW */
+#define FME_IPERF_PROP_FAB_PORT_ENABLE(n) PERF_PROP_FAB(n, 0xa) /* RW */
+
+/* FME dperf properties */
+#define FME_DPERF_PROP_CLOCK PERF_PROP_ROOT(0x1)
+#define FME_DPERF_PROP_REVISION PERF_PROP_ROOT(0x2)
+
+/* dperf FAB properties */
+#define FME_DPERF_PROP_FAB_FREEZE PERF_PROP_FAB_ROOT(0x1) /* RW */
+#define FME_DPERF_PROP_FAB_PCIE0_READ PERF_PROP_FAB_ROOT(0x2)
+#define FME_DPERF_PROP_FAB_PORT_PCIE0_READ(n) PERF_PROP_FAB(n, 0x2)
+#define FME_DPERF_PROP_FAB_PCIE0_WRITE PERF_PROP_FAB_ROOT(0x3)
+#define FME_DPERF_PROP_FAB_PORT_PCIE0_WRITE(n) PERF_PROP_FAB(n, 0x3)
+#define FME_DPERF_PROP_FAB_MMIO_READ PERF_PROP_FAB_ROOT(0x4)
+#define FME_DPERF_PROP_FAB_PORT_MMIO_READ(n) PERF_PROP_FAB(n, 0x4)
+#define FME_DPERF_PROP_FAB_MMIO_WRITE PERF_PROP_FAB_ROOT(0x5)
+#define FME_DPERF_PROP_FAB_PORT_MMIO_WRITE(n) PERF_PROP_FAB(n, 0x5)
+#define FME_DPERF_PROP_FAB_ENABLE PERF_PROP_FAB_ROOT(0x6) /* RW */
+#define FME_DPERF_PROP_FAB_PORT_ENABLE(n) PERF_PROP_FAB(n, 0x6) /* RW */
+
+/*PORT hdr feature's properties*/
+#define PORT_HDR_PROP_REVISION 0x1 /* RDONLY */
+#define PORT_HDR_PROP_PORTIDX 0x2 /* RDONLY */
+#define PORT_HDR_PROP_LATENCY_TOLERANCE 0x3 /* RDONLY */
+#define PORT_HDR_PROP_AP1_EVENT 0x4 /* RW */
+#define PORT_HDR_PROP_AP2_EVENT 0x5 /* RW */
+#define PORT_HDR_PROP_POWER_STATE 0x6 /* RDONLY */
+#define PORT_HDR_PROP_USERCLK_FREQCMD 0x7 /* RW */
+#define PORT_HDR_PROP_USERCLK_FREQCNTRCMD 0x8 /* RW */
+#define PORT_HDR_PROP_USERCLK_FREQSTS 0x9 /* RDONLY */
+#define PORT_HDR_PROP_USERCLK_CNTRSTS 0xa /* RDONLY */
+
+/*PORT error feature's properties*/
+#define PORT_ERR_PROP_REVISION 0x1 /* RDONLY */
+#define PORT_ERR_PROP_ERRORS 0x2 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_ERROR 0x3 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB 0x4 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB 0x5 /* RDONLY */
+#define PORT_ERR_PROP_CLEAR 0x6 /* WRONLY */
+
+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
+ struct feature_prop *prop);
+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
+ struct feature_prop *prop);
+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
+ struct feature_prop *prop);
+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
+ struct feature_prop *prop);
+
+/*
+ * Retrieve information about the fpga fme.
+ * Driver fills the info in provided struct fpga_fme_info.
+ */
+struct fpga_fme_info {
+ u32 capability; /* The capability of FME device */
+#define FPGA_FME_CAP_ERR_IRQ (1 << 0) /* Support fme error interrupt */
+};
+
+int opae_manager_ifpga_get_info(struct opae_manager *mgr,
+ struct fpga_fme_info *fme_info);
+
+/* Set eventfd information for ifpga FME error interrupt */
+struct fpga_fme_err_irq_set {
+ s32 evtfd; /* Eventfd handler */
+};
+
+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
+ struct fpga_fme_err_irq_set *err_irq_set);
+
+/*
+ * Retrieve information about the fpga port.
+ * Driver fills the info in provided struct fpga_port_info.
+ */
+struct fpga_port_info {
+ u32 capability; /* The capability of port device */
+#define FPGA_PORT_CAP_ERR_IRQ (1 << 0) /* Support port error interrupt */
+#define FPGA_PORT_CAP_UAFU_IRQ (1 << 1) /* Support uafu error interrupt */
+ u32 num_umsgs; /* The number of allocated umsgs */
+ u32 num_uafu_irqs; /* The number of uafu interrupts */
+};
+
+int opae_bridge_ifpga_get_info(struct opae_bridge *br,
+ struct fpga_port_info *port_info);
+/*
+ * Retrieve region information about the fpga port.
+ * Driver needs to fill the index of struct fpga_port_region_info.
+ */
+struct fpga_port_region_info {
+ u32 index;
+#define PORT_REGION_INDEX_STP (1 << 1) /* Signal Tap Region */
+ u64 size; /* Region Size */
+ u8 *addr; /* Base address of the region */
+};
+
+int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,
+ struct fpga_port_region_info *info);
+
+/* Set eventfd information for ifpga port error interrupt */
+struct fpga_port_err_irq_set {
+ s32 evtfd; /* Eventfd handler */
+};
+
+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
+ struct fpga_port_err_irq_set *err_irq_set);
+
+#endif /* _OPAE_IFPGA_HW_API_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_osdep.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_osdep.h
new file mode 100644
index 00000000..90f54f77
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/opae_osdep.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_OSDEP_H
+#define _OPAE_OSDEP_H
+
+#include <string.h>
+#include <stdbool.h>
+
+#ifdef RTE_LIBRTE_EAL
+#include "osdep_rte/osdep_generic.h"
+#else
+#include "osdep_raw/osdep_generic.h"
+#endif
+
+#define __iomem
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef uint64_t dma_addr_t;
+
+struct uuid {
+ u8 b[16];
+};
+
+#ifndef LINUX_MACROS
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#endif
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+#ifndef GENMASK
+#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#endif /* GENMASK */
+#ifndef GENMASK_ULL
+#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#endif /* GENMASK_ULL */
+#endif /* LINUX_MACROS */
+
+#define SET_FIELD(m, v) (((v) << (__builtin_ffsll(m) - 1)) & (m))
+#define GET_FIELD(m, v) (((v) & (m)) >> (__builtin_ffsll(m) - 1))
+
+#define dev_err(x, args...) dev_printf(ERR, args)
+#define dev_info(x, args...) dev_printf(INFO, args)
+#define dev_warn(x, args...) dev_printf(WARNING, args)
+
+#ifdef OPAE_DEBUG
+#define dev_debug(x, args...) dev_printf(DEBUG, args)
+#else
+#define dev_debug(x, args...) do { } while (0)
+#endif
+
+#define pr_err(y, args...) dev_err(0, y, ##args)
+#define pr_warn(y, args...) dev_warn(0, y, ##args)
+#define pr_info(y, args...) dev_info(0, y, ##args)
+
+#ifndef WARN_ON
+#define WARN_ON(x) do { \
+ int ret = !!(x); \
+ if (unlikely(ret)) \
+ pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \
+} while (0)
+#endif
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define udelay(x) opae_udelay(x)
+#define msleep(x) opae_udelay(1000 * (x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#endif
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h
new file mode 100644
index 00000000..895a1d82
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OSDEP_RAW_GENERIC_H
+#define _OSDEP_RAW_GENERIC_H
+
+#define compiler_barrier() (asm volatile ("" : : : "memory"))
+
+#define io_wmb() compiler_barrier()
+#define io_rmb() compiler_barrier()
+
+static inline uint8_t opae_readb(const volatile void *addr)
+{
+ uint8_t val;
+
+ val = *(const volatile uint8_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint16_t opae_readw(const volatile void *addr)
+{
+ uint16_t val;
+
+ val = *(const volatile uint16_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint32_t opae_readl(const volatile void *addr)
+{
+ uint32_t val;
+
+ val = *(const volatile uint32_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint64_t opae_readq(const volatile void *addr)
+{
+ uint64_t val;
+
+ val = *(const volatile uint64_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline void opae_writeb(uint8_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint8_t *)addr = value;
+}
+
+static inline void opae_writew(uint16_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint16_t *)addr = value;
+}
+
+static inline void opae_writel(uint32_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint32_t *)addr = value;
+}
+
+static inline void opae_writeq(uint64_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint64_t *)addr = value;
+}
+
+#define opae_free(addr) free(addr)
+
+#endif
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h
new file mode 100644
index 00000000..76902e28
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OSDEP_RTE_GENERIC_H
+#define _OSDEP_RTE_GENERIC_H
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+#include <rte_malloc.h>
+
+#define dev_printf(level, fmt, args...) \
+ RTE_LOG(level, PMD, "osdep_rte: " fmt, ## args)
+
+#define osdep_panic(...) rte_panic(...)
+
+#define opae_udelay(x) rte_delay_us(x)
+
+#define opae_readb(addr) rte_read8(addr)
+#define opae_readw(addr) rte_read16(addr)
+#define opae_readl(addr) rte_read32(addr)
+#define opae_readq(addr) rte_read64(addr)
+#define opae_writeb(value, addr) rte_write8(value, addr)
+#define opae_writew(value, addr) rte_write16(value, addr)
+#define opae_writel(value, addr) rte_write32(value, addr)
+#define opae_writeq(value, addr) rte_write64(value, addr)
+
+#define opae_malloc(size) rte_malloc(NULL, size, 0)
+#define opae_zmalloc(size) rte_zmalloc(NULL, size, 0)
+#define opae_free(addr) rte_free(addr)
+
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+#define spinlock_t rte_spinlock_t
+#define spinlock_init(x) rte_spinlock_init(x)
+#define spinlock_lock(x) rte_spinlock_lock(x)
+#define spinlock_unlock(x) rte_spinlock_unlock(x)
+
+#endif
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/ifpga_rawdev.c b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
new file mode 100644
index 00000000..3fed0578
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
@@ -0,0 +1,617 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_devargs.h>
+#include <rte_memcpy.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+
+#include <rte_errno.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+#include <rte_bus_vdev.h>
+
+#include "base/opae_hw_api.h"
+#include "rte_rawdev.h"
+#include "rte_rawdev_pmd.h"
+#include "rte_bus_ifpga.h"
+#include "ifpga_common.h"
+#include "ifpga_logs.h"
+#include "ifpga_rawdev.h"
+
+int ifpga_rawdev_logtype;
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+/* PCI Device ID */
+#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
+#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
+#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
+/* VF Device */
+#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
+#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
+#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
+#define RTE_MAX_RAW_DEVICE 10
+
+static const struct rte_pci_id pci_ifpga_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static int
+ifpga_fill_afu_dev(struct opae_accelerator *acc,
+ struct rte_afu_device *afu_dev)
+{
+ struct rte_mem_resource *res = afu_dev->mem_resource;
+ struct opae_acc_region_info region_info;
+ struct opae_acc_info info;
+ unsigned long i;
+ int ret;
+
+ ret = opae_acc_get_info(acc, &info);
+ if (ret)
+ return ret;
+
+ if (info.num_regions > PCI_MAX_RESOURCE)
+ return -EFAULT;
+
+ afu_dev->num_region = info.num_regions;
+
+ for (i = 0; i < info.num_regions; i++) {
+ region_info.index = i;
+ ret = opae_acc_get_region_info(acc, &region_info);
+ if (ret)
+ return ret;
+
+ if ((region_info.flags & ACC_REGION_MMIO) &&
+ (region_info.flags & ACC_REGION_READ) &&
+ (region_info.flags & ACC_REGION_WRITE)) {
+ res[i].phys_addr = region_info.phys_addr;
+ res[i].len = region_info.len;
+ res[i].addr = region_info.addr;
+ } else
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void
+ifpga_rawdev_info_get(struct rte_rawdev *dev,
+ rte_rawdev_obj_t dev_info)
+{
+ struct opae_adapter *adapter;
+ struct opae_accelerator *acc;
+ struct rte_afu_device *afu_dev;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ if (!dev_info) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid request");
+ return;
+ }
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return;
+
+ afu_dev = dev_info;
+ afu_dev->rawdev = dev;
+
+ /* find opae_accelerator and fill info into afu_device */
+ opae_adapter_for_each_acc(adapter, acc) {
+ if (acc->index != afu_dev->id.port)
+ continue;
+
+ if (ifpga_fill_afu_dev(acc, afu_dev)) {
+ IFPGA_RAWDEV_PMD_ERR("cannot get info\n");
+ return;
+ }
+ }
+}
+
+static int
+ifpga_rawdev_configure(const struct rte_rawdev *dev,
+ rte_rawdev_obj_t config)
+{
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ return config ? 0 : 1;
+}
+
+static int
+ifpga_rawdev_start(struct rte_rawdev *dev)
+{
+ int ret = 0;
+ struct opae_adapter *adapter;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return -ENODEV;
+
+ return ret;
+}
+
+static void
+ifpga_rawdev_stop(struct rte_rawdev *dev)
+{
+ dev->started = 0;
+}
+
+static int
+ifpga_rawdev_close(struct rte_rawdev *dev)
+{
+ return dev ? 0:1;
+}
+
+static int
+ifpga_rawdev_reset(struct rte_rawdev *dev)
+{
+ return dev ? 0:1;
+}
+
+static int
+fpga_pr(struct rte_rawdev *raw_dev, u32 port_id, u64 *buffer, u32 size,
+ u64 *status)
+{
+
+ struct opae_adapter *adapter;
+ struct opae_manager *mgr;
+ struct opae_accelerator *acc;
+ struct opae_bridge *br;
+ int ret;
+
+ adapter = ifpga_rawdev_get_priv(raw_dev);
+ if (!adapter)
+ return -ENODEV;
+
+ mgr = opae_adapter_get_mgr(adapter);
+ if (!mgr)
+ return -ENODEV;
+
+ acc = opae_adapter_get_acc(adapter, port_id);
+ if (!acc)
+ return -ENODEV;
+
+ br = opae_acc_get_br(acc);
+ if (!br)
+ return -ENODEV;
+
+ ret = opae_manager_flash(mgr, port_id, buffer, size, status);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("%s pr error %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = opae_bridge_reset(br);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("%s reset port:%d error %d\n",
+ __func__, port_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id,
+ const char *file_name)
+{
+ struct stat file_stat;
+ int file_fd;
+ int ret = 0;
+ ssize_t buffer_size;
+ void *buffer;
+ u64 pr_error;
+
+ if (!file_name)
+ return -EINVAL;
+
+ file_fd = open(file_name, O_RDONLY);
+ if (file_fd < 0) {
+ IFPGA_RAWDEV_PMD_ERR("%s: open file error: %s\n",
+ __func__, file_name);
+ IFPGA_RAWDEV_PMD_ERR("Message : %s\n", strerror(errno));
+ return -EINVAL;
+ }
+ ret = stat(file_name, &file_stat);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("stat on bitstream file failed: %s\n",
+ file_name);
+ return -EINVAL;
+ }
+ buffer_size = file_stat.st_size;
+ IFPGA_RAWDEV_PMD_INFO("bitstream file size: %zu\n", buffer_size);
+ buffer = rte_malloc(NULL, buffer_size, 0);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto close_fd;
+ }
+
+ /*read the raw data*/
+ if (buffer_size != read(file_fd, (void *)buffer, buffer_size)) {
+ ret = -EINVAL;
+ goto free_buffer;
+ }
+
+ /*do PR now*/
+ ret = fpga_pr(rawdev, port_id, buffer, buffer_size, &pr_error);
+ IFPGA_RAWDEV_PMD_INFO("downloading to device port %d....%s.\n", port_id,
+ ret ? "failed" : "success");
+ if (ret) {
+ ret = -EINVAL;
+ goto free_buffer;
+ }
+
+free_buffer:
+ if (buffer)
+ rte_free(buffer);
+close_fd:
+ close(file_fd);
+ file_fd = 0;
+ return ret;
+}
+
+static int
+ifpga_rawdev_pr(struct rte_rawdev *dev,
+ rte_rawdev_obj_t pr_conf)
+{
+ struct opae_adapter *adapter;
+ struct rte_afu_pr_conf *afu_pr_conf;
+ int ret;
+ struct uuid uuid;
+ struct opae_accelerator *acc;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return -ENODEV;
+
+ if (!pr_conf)
+ return -EINVAL;
+
+ afu_pr_conf = pr_conf;
+
+ if (afu_pr_conf->pr_enable) {
+ ret = rte_fpga_do_pr(dev,
+ afu_pr_conf->afu_id.port,
+ afu_pr_conf->bs_path);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("do pr error %d\n", ret);
+ return ret;
+ }
+ }
+
+ acc = opae_adapter_get_acc(adapter, afu_pr_conf->afu_id.port);
+ if (!acc)
+ return -ENODEV;
+
+ ret = opae_acc_get_uuid(acc, &uuid);
+ if (ret)
+ return ret;
+
+ memcpy(&afu_pr_conf->afu_id.uuid.uuid_low, uuid.b, sizeof(u64));
+ memcpy(&afu_pr_conf->afu_id.uuid.uuid_high, uuid.b + 8, sizeof(u64));
+
+ IFPGA_RAWDEV_PMD_INFO("%s: uuid_l=0x%lx, uuid_h=0x%lx\n", __func__,
+ (unsigned long)afu_pr_conf->afu_id.uuid.uuid_low,
+ (unsigned long)afu_pr_conf->afu_id.uuid.uuid_high);
+
+ return 0;
+}
+
+static const struct rte_rawdev_ops ifpga_rawdev_ops = {
+ .dev_info_get = ifpga_rawdev_info_get,
+ .dev_configure = ifpga_rawdev_configure,
+ .dev_start = ifpga_rawdev_start,
+ .dev_stop = ifpga_rawdev_stop,
+ .dev_close = ifpga_rawdev_close,
+ .dev_reset = ifpga_rawdev_reset,
+
+ .queue_def_conf = NULL,
+ .queue_setup = NULL,
+ .queue_release = NULL,
+
+ .attr_get = NULL,
+ .attr_set = NULL,
+
+ .enqueue_bufs = NULL,
+ .dequeue_bufs = NULL,
+
+ .dump = NULL,
+
+ .xstats_get = NULL,
+ .xstats_get_names = NULL,
+ .xstats_get_by_name = NULL,
+ .xstats_reset = NULL,
+
+ .firmware_status_get = NULL,
+ .firmware_version_get = NULL,
+ .firmware_load = ifpga_rawdev_pr,
+ .firmware_unload = NULL,
+
+ .dev_selftest = NULL,
+};
+
+static int
+ifpga_rawdev_create(struct rte_pci_device *pci_dev,
+ int socket_id)
+{
+ int ret = 0;
+ struct rte_rawdev *rawdev = NULL;
+ struct opae_adapter *adapter = NULL;
+ struct opae_manager *mgr = NULL;
+ struct opae_adapter_data_pci *data = NULL;
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ int i;
+
+ if (!pci_dev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid pci_dev of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+ IFPGA_RAWDEV_PMD_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct opae_adapter),
+ socket_id);
+ if (rawdev == NULL) {
+ IFPGA_RAWDEV_PMD_ERR("Unable to allocate rawdevice");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* alloc OPAE_FPGA_PCI data to register to OPAE hardware level API */
+ data = opae_adapter_data_alloc(OPAE_FPGA_PCI);
+ if (!data) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* init opae_adapter_data_pci for device specific information */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ data->region[i].phys_addr = pci_dev->mem_resource[i].phys_addr;
+ data->region[i].len = pci_dev->mem_resource[i].len;
+ data->region[i].addr = pci_dev->mem_resource[i].addr;
+ }
+ data->device_id = pci_dev->id.device_id;
+ data->vendor_id = pci_dev->id.vendor_id;
+
+ /* create a opae_adapter based on above device data */
+ adapter = opae_adapter_alloc(pci_dev->device.name, data);
+ if (!adapter) {
+ ret = -ENOMEM;
+ goto free_adapter_data;
+ }
+
+ rawdev->dev_ops = &ifpga_rawdev_ops;
+ rawdev->device = &pci_dev->device;
+ rawdev->driver_name = pci_dev->device.driver->name;
+
+ rawdev->dev_private = adapter;
+
+ /* must enumerate the adapter before use it */
+ ret = opae_adapter_enumerate(adapter);
+ if (ret)
+ goto free_adapter;
+
+ /* get opae_manager to rawdev */
+ mgr = opae_adapter_get_mgr(adapter);
+ if (mgr) {
+ /* PF function */
+ IFPGA_RAWDEV_PMD_INFO("this is a PF function");
+ }
+
+ return ret;
+
+free_adapter:
+ if (adapter)
+ opae_adapter_free(adapter);
+free_adapter_data:
+ if (data)
+ opae_adapter_data_free(data);
+cleanup:
+ if (rawdev)
+ rte_rawdev_pmd_release(rawdev);
+
+ return ret;
+}
+
+static int
+ifpga_rawdev_destroy(struct rte_pci_device *pci_dev)
+{
+ int ret;
+ struct rte_rawdev *rawdev;
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ struct opae_adapter *adapter;
+
+ if (!pci_dev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid pci_dev of the device!");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+ IFPGA_RAWDEV_PMD_INFO("Closing %s on NUMA node %d",
+ name, rte_socket_id());
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rawdev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ adapter = ifpga_rawdev_get_priv(rawdev);
+ if (!adapter)
+ return -ENODEV;
+
+ opae_adapter_data_free(adapter->data);
+ opae_adapter_free(adapter);
+
+ /* rte_rawdev_close is called by pmd_release */
+ ret = rte_rawdev_pmd_release(rawdev);
+ if (ret)
+ IFPGA_RAWDEV_PMD_DEBUG("Device cleanup failed");
+
+ return ret;
+}
+
+static int
+ifpga_rawdev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+ return ifpga_rawdev_create(pci_dev, rte_socket_id());
+}
+
+static int
+ifpga_rawdev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return ifpga_rawdev_destroy(pci_dev);
+}
+
+static struct rte_pci_driver rte_ifpga_rawdev_pmd = {
+ .id_table = pci_ifpga_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ifpga_rawdev_pci_probe,
+ .remove = ifpga_rawdev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(ifpga_rawdev_pci_driver, rte_ifpga_rawdev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(ifpga_rawdev_pci_driver, rte_ifpga_rawdev_pmd);
+RTE_PMD_REGISTER_KMOD_DEP(ifpga_rawdev_pci_driver, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(ifpga_rawdev_init_log)
+{
+ ifpga_rawdev_logtype = rte_log_register("driver.raw.init");
+ if (ifpga_rawdev_logtype >= 0)
+ rte_log_set_level(ifpga_rawdev_logtype, RTE_LOG_NOTICE);
+}
+
+static const char * const valid_args[] = {
+#define IFPGA_ARG_NAME "ifpga"
+ IFPGA_ARG_NAME,
+#define IFPGA_ARG_PORT "port"
+ IFPGA_ARG_PORT,
+#define IFPGA_AFU_BTS "afu_bts"
+ IFPGA_AFU_BTS,
+ NULL
+};
+
+static int
+ifpga_cfg_probe(struct rte_vdev_device *dev)
+{
+ struct rte_devargs *devargs;
+ struct rte_kvargs *kvlist = NULL;
+ int port;
+ char *name = NULL;
+ char dev_name[RTE_RAWDEV_NAME_MAX_LEN];
+
+ devargs = dev->device.devargs;
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_RAWDEV_PMD_LOG(ERR, "error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_NAME) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_NAME,
+ &rte_ifpga_get_string_arg, &name) < 0) {
+ IFPGA_RAWDEV_PMD_ERR("error to parse %s",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+ } else {
+ IFPGA_RAWDEV_PMD_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_PORT) == 1) {
+ if (rte_kvargs_process(kvlist,
+ IFPGA_ARG_PORT,
+ &rte_ifpga_get_integer32_arg,
+ &port) < 0) {
+ IFPGA_RAWDEV_PMD_ERR("error to parse %s",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+ } else {
+ IFPGA_RAWDEV_PMD_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+
+ memset(dev_name, 0, sizeof(dev_name));
+ snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "%d|%s",
+ port, name);
+
+ rte_eal_hotplug_add(RTE_STR(IFPGA_BUS_NAME),
+ dev_name, devargs->args);
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (name)
+ free(name);
+
+ return 0;
+}
+
+static int
+ifpga_cfg_remove(struct rte_vdev_device *vdev)
+{
+ IFPGA_RAWDEV_PMD_INFO("Remove ifpga_cfg %p",
+ vdev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver ifpga_cfg_driver = {
+ .probe = ifpga_cfg_probe,
+ .remove = ifpga_cfg_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(ifpga_rawdev_cfg, ifpga_cfg_driver);
+RTE_PMD_REGISTER_ALIAS(ifpga_rawdev_cfg, ifpga_cfg);
+RTE_PMD_REGISTER_PARAM_STRING(ifpga_rawdev_cfg,
+ "ifpga=<string> "
+ "port=<int> "
+ "afu_bts=<path>");
+
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/ifpga_rawdev.h b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/ifpga_rawdev.h
new file mode 100644
index 00000000..c7759b8b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/ifpga_rawdev.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_RAWDEV_H_
+#define _IFPGA_RAWDEV_H_
+
+extern int ifpga_rawdev_logtype;
+
+#define IFPGA_RAWDEV_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_rawdev_logtype, "ifgpa: " fmt, \
+ ##args)
+
+#define IFPGA_RAWDEV_PMD_FUNC_TRACE() IFPGA_RAWDEV_PMD_LOG(DEBUG, ">>")
+
+#define IFPGA_RAWDEV_PMD_DEBUG(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(DEBUG, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_INFO(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(INFO, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_ERR(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(ERR, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_WARN(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(WARNING, fmt, ## args)
+
+enum ifpga_rawdev_device_state {
+ IFPGA_IDLE,
+ IFPGA_READY,
+ IFPGA_ERROR
+};
+
+static inline struct opae_adapter *
+ifpga_rawdev_get_priv(const struct rte_rawdev *rawdev)
+{
+ return rawdev->dev_private;
+}
+
+#endif /* _IFPGA_RAWDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/meson.build b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/meson.build
new file mode 100644
index 00000000..67256872
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+version = 1
+
+subdir('base')
+objs = [base_objs]
+
+deps += ['rawdev', 'pci', 'bus_pci', 'kvargs',
+ 'bus_vdev', 'bus_ifpga']
+sources = files('ifpga_rawdev.c')
+
+includes += include_directories('base')
+
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/raw/meson.build b/src/spdk/dpdk/drivers/raw/meson.build
new file mode 100644
index 00000000..a61cdcce
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+drivers = ['skeleton_rawdev', 'dpaa2_cmdif', 'dpaa2_qdma', 'ifpga_rawdev']
+std_deps = ['rawdev']
+config_flag_fmt = 'RTE_LIBRTE_PMD_@0@_RAWDEV'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/src/spdk/dpdk/drivers/raw/skeleton_rawdev/Makefile b/src/spdk/dpdk/drivers/raw/skeleton_rawdev/Makefile
new file mode 100644
index 00000000..3f97c2ee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/skeleton_rawdev/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_skeleton_rawdev.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+
+EXPORT_MAP := rte_pmd_skeleton_rawdev_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += skeleton_rawdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += skeleton_rawdev_test.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/skeleton_rawdev/meson.build b/src/spdk/dpdk/drivers/raw/skeleton_rawdev/meson.build
new file mode 100644
index 00000000..b4a6ed08
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/skeleton_rawdev/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+deps += ['rawdev', 'kvargs', 'mbuf', 'bus_vdev']
+sources = files('skeleton_rawdev.c',
+ 'skeleton_rawdev_test.c')
diff --git a/src/spdk/dpdk/drivers/raw/skeleton_rawdev/rte_pmd_skeleton_rawdev_version.map b/src/spdk/dpdk/drivers/raw/skeleton_rawdev/rte_pmd_skeleton_rawdev_version.map
new file mode 100644
index 00000000..179140fb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/skeleton_rawdev/rte_pmd_skeleton_rawdev_version.map
@@ -0,0 +1,4 @@
+DPDK_18.02 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.c b/src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
new file mode 100644
index 00000000..6518a2d9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
@@ -0,0 +1,765 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_lcore.h>
+#include <rte_bus_vdev.h>
+
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include "skeleton_rawdev.h"
+
+/* Dynamic log type identifier */
+int skeleton_pmd_logtype;
+
+/* Count of instances */
+uint16_t skeldev_init_once;
+
+/**< Rawdev Skeleton dummy driver name */
+#define SKELETON_PMD_RAWDEV_NAME rawdev_skeleton
+
+/**< Skeleton rawdev driver object */
+static struct rte_vdev_driver skeleton_pmd_drv;
+
+struct queue_buffers {
+ void *bufs[SKELETON_QUEUE_MAX_DEPTH];
+};
+
+static struct queue_buffers queue_buf[SKELETON_MAX_QUEUES] = {};
+static void clear_queue_bufs(int queue_id);
+
+static void skeleton_rawdev_info_get(struct rte_rawdev *dev,
+ rte_rawdev_obj_t dev_info)
+{
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_rawdev_conf *skeldev_conf;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev_info) {
+ SKELETON_PMD_ERR("Invalid request");
+ return;
+ }
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ skeldev_conf = dev_info;
+
+ skeldev_conf->num_queues = skeldev->num_queues;
+ skeldev_conf->capabilities = skeldev->capabilities;
+ skeldev_conf->device_state = skeldev->device_state;
+ skeldev_conf->firmware_state = skeldev->fw.firmware_state;
+}
+
+static int skeleton_rawdev_configure(const struct rte_rawdev *dev,
+ rte_rawdev_obj_t config)
+{
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_rawdev_conf *skeldev_conf;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ if (!config) {
+ SKELETON_PMD_ERR("Invalid configuration");
+ return -EINVAL;
+ }
+
+ skeldev_conf = config;
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ if (skeldev_conf->num_queues <= SKELETON_MAX_QUEUES)
+ skeldev->num_queues = skeldev_conf->num_queues;
+ else
+ return -EINVAL;
+
+ skeldev->capabilities = skeldev_conf->capabilities;
+ skeldev->num_queues = skeldev_conf->num_queues;
+
+ return 0;
+}
+
+static int skeleton_rawdev_start(struct rte_rawdev *dev)
+{
+ int ret = 0;
+ struct skeleton_rawdev *skeldev;
+ enum skeleton_firmware_state fw_state;
+ enum skeleton_device_state device_state;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ fw_state = skeldev->fw.firmware_state;
+ device_state = skeldev->device_state;
+
+ if (fw_state == SKELETON_FW_LOADED &&
+ device_state == SKELETON_DEV_STOPPED) {
+ skeldev->device_state = SKELETON_DEV_RUNNING;
+ } else {
+ SKELETON_PMD_ERR("Device not ready for starting");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void skeleton_rawdev_stop(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (dev) {
+ skeldev = skeleton_rawdev_get_priv(dev);
+ skeldev->device_state = SKELETON_DEV_STOPPED;
+ }
+}
+
+static void
+reset_queues(struct skeleton_rawdev *skeldev)
+{
+ int i;
+
+ for (i = 0; i < SKELETON_MAX_QUEUES; i++) {
+ skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH;
+ skeldev->queues[i].state = SKELETON_QUEUE_DETACH;
+ }
+}
+
+static void
+reset_attribute_table(struct skeleton_rawdev *skeldev)
+{
+ int i;
+
+ for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
+ if (skeldev->attr[i].name) {
+ free(skeldev->attr[i].name);
+ skeldev->attr[i].name = NULL;
+ }
+ }
+}
+
+static int skeleton_rawdev_close(struct rte_rawdev *dev)
+{
+ int ret = 0, i;
+ struct skeleton_rawdev *skeldev;
+ enum skeleton_firmware_state fw_state;
+ enum skeleton_device_state device_state;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ fw_state = skeldev->fw.firmware_state;
+ device_state = skeldev->device_state;
+
+ reset_queues(skeldev);
+ reset_attribute_table(skeldev);
+
+ switch (fw_state) {
+ case SKELETON_FW_LOADED:
+ if (device_state == SKELETON_DEV_RUNNING) {
+ SKELETON_PMD_ERR("Cannot close running device");
+ ret = -EINVAL;
+ } else {
+ /* Probably call fw reset here */
+ skeldev->fw.firmware_state = SKELETON_FW_READY;
+ }
+ break;
+ case SKELETON_FW_READY:
+ case SKELETON_FW_ERROR:
+ default:
+ SKELETON_PMD_DEBUG("Device already in stopped state");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* Clear all allocated queues */
+ for (i = 0; i < SKELETON_MAX_QUEUES; i++)
+ clear_queue_bufs(i);
+
+ return ret;
+}
+
+static int skeleton_rawdev_reset(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ SKELETON_PMD_DEBUG("Resetting device");
+ skeldev->fw.firmware_state = SKELETON_FW_READY;
+
+ return 0;
+}
+
+static void skeleton_rawdev_queue_def_conf(struct rte_rawdev *dev,
+ uint16_t queue_id,
+ rte_rawdev_obj_t queue_conf)
+{
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_rawdev_queue *skelq;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev || !queue_conf)
+ return;
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ skelq = &skeldev->queues[queue_id];
+
+ if (queue_id < SKELETON_MAX_QUEUES)
+ rte_memcpy(queue_conf, skelq,
+ sizeof(struct skeleton_rawdev_queue));
+}
+
+static void
+clear_queue_bufs(int queue_id)
+{
+ int i;
+
+ /* Clear buffers for queue_id */
+ for (i = 0; i < SKELETON_QUEUE_MAX_DEPTH; i++)
+ queue_buf[queue_id].bufs[i] = NULL;
+}
+
+static int skeleton_rawdev_queue_setup(struct rte_rawdev *dev,
+ uint16_t queue_id,
+ rte_rawdev_obj_t queue_conf)
+{
+ int ret = 0;
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_rawdev_queue *q;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev || !queue_conf)
+ return -EINVAL;
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ q = &skeldev->queues[queue_id];
+
+ if (skeldev->num_queues > queue_id &&
+ q->depth < SKELETON_QUEUE_MAX_DEPTH) {
+ rte_memcpy(q, queue_conf,
+ sizeof(struct skeleton_rawdev_queue));
+ clear_queue_bufs(queue_id);
+ } else {
+ SKELETON_PMD_ERR("Invalid queue configuration");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int skeleton_rawdev_queue_release(struct rte_rawdev *dev,
+ uint16_t queue_id)
+{
+ int ret = 0;
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ if (skeldev->num_queues > queue_id) {
+ skeldev->queues[queue_id].state = SKELETON_QUEUE_DETACH;
+ skeldev->queues[queue_id].depth = SKELETON_QUEUE_DEF_DEPTH;
+ clear_queue_bufs(queue_id);
+ } else {
+ SKELETON_PMD_ERR("Invalid queue configuration");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static uint16_t skeleton_rawdev_queue_count(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ return skeldev->num_queues;
+}
+
+static int skeleton_rawdev_get_attr(struct rte_rawdev *dev,
+ const char *attr_name,
+ uint64_t *attr_value)
+{
+ int i;
+ uint8_t done = 0;
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev || !attr_name || !attr_value) {
+ SKELETON_PMD_ERR("Invalid arguments for getting attributes");
+ return -EINVAL;
+ }
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
+ if (!skeldev->attr[i].name)
+ continue;
+
+ if (!strncmp(skeldev->attr[i].name, attr_name,
+ SKELETON_ATTRIBUTE_NAME_MAX)) {
+ *attr_value = skeldev->attr[i].value;
+ done = 1;
+ SKELETON_PMD_DEBUG("Attribute (%s) Value (%" PRIu64 ")",
+ attr_name, *attr_value);
+ break;
+ }
+ }
+
+ if (done)
+ return 0;
+
+ /* Attribute not found */
+ return -EINVAL;
+}
+
+static int skeleton_rawdev_set_attr(struct rte_rawdev *dev,
+ const char *attr_name,
+ const uint64_t attr_value)
+{
+ int i;
+ uint8_t done = 0;
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev || !attr_name) {
+ SKELETON_PMD_ERR("Invalid arguments for setting attributes");
+ return -EINVAL;
+ }
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ /* Check if attribute already exists */
+ for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
+ if (!skeldev->attr[i].name)
+ break;
+
+ if (!strncmp(skeldev->attr[i].name, attr_name,
+ SKELETON_ATTRIBUTE_NAME_MAX)) {
+ /* Update value */
+ skeldev->attr[i].value = attr_value;
+ done = 1;
+ break;
+ }
+ }
+
+ if (!done) {
+ if (i < (SKELETON_MAX_ATTRIBUTES - 1)) {
+ /* There is still space to insert one more */
+ skeldev->attr[i].name = strdup(attr_name);
+ if (!skeldev->attr[i].name)
+ return -ENOMEM;
+
+ skeldev->attr[i].value = attr_value;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int skeleton_rawdev_enqueue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ unsigned int i;
+ uint16_t q_id;
+ RTE_SET_USED(dev);
+
+ /* context is essentially the queue_id which is
+ * transferred as opaque object through the library layer. This can
+ * help in complex implementation which require more information than
+ * just an integer - for example, a queue-pair.
+ */
+ q_id = *((int *)context);
+
+ for (i = 0; i < count; i++)
+ queue_buf[q_id].bufs[i] = buffers[i]->buf_addr;
+
+ return i;
+}
+
+static int skeleton_rawdev_dequeue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ unsigned int i;
+ uint16_t q_id;
+ RTE_SET_USED(dev);
+
+ /* context is essentially the queue_id which is
+ * transferred as opaque object through the library layer. This can
+ * help in complex implementation which require more information than
+ * just an integer - for example, a queue-pair.
+ */
+ q_id = *((int *)context);
+
+ for (i = 0; i < count; i++)
+ buffers[i]->buf_addr = queue_buf[q_id].bufs[i];
+
+ return i;
+}
+
+static int skeleton_rawdev_dump(struct rte_rawdev *dev, FILE *f)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(f);
+
+ return 0;
+}
+
+static int skeleton_rawdev_firmware_status_get(struct rte_rawdev *dev,
+ rte_rawdev_obj_t status_info)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ if (status_info)
+ memcpy(status_info, &skeldev->fw.firmware_state,
+ sizeof(enum skeleton_firmware_state));
+
+ return 0;
+}
+
+
+static int skeleton_rawdev_firmware_version_get(
+ struct rte_rawdev *dev,
+ rte_rawdev_obj_t version_info)
+{
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_firmware_version_info *vi;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ vi = version_info;
+
+ vi->major = skeldev->fw.firmware_version.major;
+ vi->minor = skeldev->fw.firmware_version.minor;
+ vi->subrel = skeldev->fw.firmware_version.subrel;
+
+ return 0;
+}
+
+static int skeleton_rawdev_firmware_load(struct rte_rawdev *dev,
+ rte_rawdev_obj_t firmware_buf)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ /* firmware_buf is a mmaped, possibly DMA'able area, buffer. Being
+ * dummy, all this does is check if firmware_buf is not NULL and
+ * sets the state of the firmware.
+ */
+ if (!firmware_buf)
+ return -EINVAL;
+
+ skeldev->fw.firmware_state = SKELETON_FW_LOADED;
+
+ return 0;
+}
+
+static int skeleton_rawdev_firmware_unload(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ skeldev->fw.firmware_state = SKELETON_FW_READY;
+
+ return 0;
+}
+
+static const struct rte_rawdev_ops skeleton_rawdev_ops = {
+ .dev_info_get = skeleton_rawdev_info_get,
+ .dev_configure = skeleton_rawdev_configure,
+ .dev_start = skeleton_rawdev_start,
+ .dev_stop = skeleton_rawdev_stop,
+ .dev_close = skeleton_rawdev_close,
+ .dev_reset = skeleton_rawdev_reset,
+
+ .queue_def_conf = skeleton_rawdev_queue_def_conf,
+ .queue_setup = skeleton_rawdev_queue_setup,
+ .queue_release = skeleton_rawdev_queue_release,
+ .queue_count = skeleton_rawdev_queue_count,
+
+ .attr_get = skeleton_rawdev_get_attr,
+ .attr_set = skeleton_rawdev_set_attr,
+
+ .enqueue_bufs = skeleton_rawdev_enqueue_bufs,
+ .dequeue_bufs = skeleton_rawdev_dequeue_bufs,
+
+ .dump = skeleton_rawdev_dump,
+
+ .xstats_get = NULL,
+ .xstats_get_names = NULL,
+ .xstats_get_by_name = NULL,
+ .xstats_reset = NULL,
+
+ .firmware_status_get = skeleton_rawdev_firmware_status_get,
+ .firmware_version_get = skeleton_rawdev_firmware_version_get,
+ .firmware_load = skeleton_rawdev_firmware_load,
+ .firmware_unload = skeleton_rawdev_firmware_unload,
+
+ .dev_selftest = test_rawdev_skeldev,
+};
+
+static int
+skeleton_rawdev_create(const char *name,
+ struct rte_vdev_device *vdev,
+ int socket_id)
+{
+ int ret = 0, i;
+ struct rte_rawdev *rawdev = NULL;
+ struct skeleton_rawdev *skeldev = NULL;
+
+ if (!name) {
+ SKELETON_PMD_ERR("Invalid name of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct skeleton_rawdev),
+ socket_id);
+ if (rawdev == NULL) {
+ SKELETON_PMD_ERR("Unable to allocate rawdevice");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ rawdev->dev_ops = &skeleton_rawdev_ops;
+ rawdev->device = &vdev->device;
+ rawdev->driver_name = vdev->device.driver->name;
+
+ skeldev = skeleton_rawdev_get_priv(rawdev);
+
+ skeldev->device_id = SKELETON_DEVICE_ID;
+ skeldev->vendor_id = SKELETON_VENDOR_ID;
+ skeldev->capabilities = SKELETON_DEFAULT_CAPA;
+
+ memset(&skeldev->fw, 0, sizeof(struct skeleton_firmware));
+
+ skeldev->fw.firmware_state = SKELETON_FW_READY;
+ skeldev->fw.firmware_version.major = SKELETON_MAJOR_VER;
+ skeldev->fw.firmware_version.minor = SKELETON_MINOR_VER;
+ skeldev->fw.firmware_version.subrel = SKELETON_SUB_VER;
+
+ skeldev->device_state = SKELETON_DEV_STOPPED;
+
+ /* Reset/set to default queue configuration for this device */
+ for (i = 0; i < SKELETON_MAX_QUEUES; i++) {
+ skeldev->queues[i].state = SKELETON_QUEUE_DETACH;
+ skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH;
+ }
+
+ /* Clear all allocated queue buffers */
+ for (i = 0; i < SKELETON_MAX_QUEUES; i++)
+ clear_queue_bufs(i);
+
+ return ret;
+
+cleanup:
+ if (rawdev)
+ rte_rawdev_pmd_release(rawdev);
+
+ return ret;
+}
+
+static int
+skeleton_rawdev_destroy(const char *name)
+{
+ int ret;
+ struct rte_rawdev *rdev;
+
+ if (!name) {
+ SKELETON_PMD_ERR("Invalid device name");
+ return -EINVAL;
+ }
+
+ rdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rdev) {
+ SKELETON_PMD_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ /* rte_rawdev_close is called by pmd_release */
+ ret = rte_rawdev_pmd_release(rdev);
+ if (ret)
+ SKELETON_PMD_DEBUG("Device cleanup failed");
+
+ return 0;
+}
+
+static int
+skeldev_get_selftest(const char *key __rte_unused,
+ const char *value,
+ void *opaque)
+{
+ int *flag = opaque;
+ *flag = atoi(value);
+ return 0;
+}
+
+static int
+skeldev_parse_vdev_args(struct rte_vdev_device *vdev)
+{
+ int selftest = 0;
+ const char *name;
+ const char *params;
+
+ static const char *const args[] = {
+ SKELETON_SELFTEST_ARG,
+ NULL
+ };
+
+ name = rte_vdev_device_name(vdev);
+
+ params = rte_vdev_device_args(vdev);
+ if (params != NULL && params[0] != '\0') {
+ struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+ if (!kvlist) {
+ SKELETON_PMD_INFO(
+ "Ignoring unsupported params supplied '%s'",
+ name);
+ } else {
+ int ret = rte_kvargs_process(kvlist,
+ SKELETON_SELFTEST_ARG,
+ skeldev_get_selftest, &selftest);
+ if (ret != 0 || (selftest < 0 || selftest > 1)) {
+ SKELETON_PMD_ERR("%s: Error in parsing args",
+ name);
+ rte_kvargs_free(kvlist);
+ ret = -1; /* enforce if selftest is invalid */
+ return ret;
+ }
+ }
+
+ rte_kvargs_free(kvlist);
+ }
+
+ return selftest;
+}
+
+static int
+skeleton_rawdev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int selftest = 0, ret = 0;
+
+
+ name = rte_vdev_device_name(vdev);
+ /* More than one instance is not supported */
+ if (skeldev_init_once) {
+ SKELETON_PMD_ERR("Multiple instance not supported for %s",
+ name);
+ return -EINVAL;
+ }
+
+ SKELETON_PMD_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ selftest = skeldev_parse_vdev_args(vdev);
+ /* In case of invalid argument, selftest != 1; ignore other values */
+
+ ret = skeleton_rawdev_create(name, vdev, rte_socket_id());
+ if (!ret) {
+ /* In case command line argument for 'selftest' was passed;
+ * if invalid arguments were passed, execution continues but
+ * without selftest.
+ */
+ if (selftest == 1)
+ test_rawdev_skeldev();
+ }
+
+ /* Device instance created; Second instance not possible */
+ skeldev_init_once = 1;
+
+ return ret;
+}
+
+static int
+skeleton_rawdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+
+ SKELETON_PMD_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
+
+ ret = skeleton_rawdev_destroy(name);
+ if (!ret)
+ skeldev_init_once = 0;
+
+ return ret;
+}
+
+static struct rte_vdev_driver skeleton_pmd_drv = {
+ .probe = skeleton_rawdev_probe,
+ .remove = skeleton_rawdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(SKELETON_PMD_RAWDEV_NAME, skeleton_pmd_drv);
+
+RTE_INIT(skeleton_pmd_init_log)
+{
+ skeleton_pmd_logtype = rte_log_register("rawdev.skeleton");
+ if (skeleton_pmd_logtype >= 0)
+ rte_log_set_level(skeleton_pmd_logtype, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.h b/src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.h
new file mode 100644
index 00000000..5045b592
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef __SKELETON_RAWDEV_H__
+#define __SKELETON_RAWDEV_H__
+
+#include <rte_rawdev.h>
+
+extern int skeleton_pmd_logtype;
+
+#define SKELETON_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, skeleton_pmd_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define SKELETON_PMD_FUNC_TRACE() SKELETON_PMD_LOG(DEBUG, ">>")
+
+#define SKELETON_PMD_DEBUG(fmt, args...) \
+ SKELETON_PMD_LOG(DEBUG, fmt, ## args)
+#define SKELETON_PMD_INFO(fmt, args...) \
+ SKELETON_PMD_LOG(INFO, fmt, ## args)
+#define SKELETON_PMD_ERR(fmt, args...) \
+ SKELETON_PMD_LOG(ERR, fmt, ## args)
+#define SKELETON_PMD_WARN(fmt, args...) \
+ SKELETON_PMD_LOG(WARNING, fmt, ## args)
+/* Macros for self test application */
+#define SKELETON_TEST_INFO SKELETON_PMD_INFO
+#define SKELETON_TEST_DEBUG SKELETON_PMD_DEBUG
+#define SKELETON_TEST_ERR SKELETON_PMD_ERR
+#define SKELETON_TEST_WARN SKELETON_PMD_WARN
+
+#define SKELETON_SELFTEST_ARG ("selftest")
+
+#define SKELETON_VENDOR_ID 0x10
+#define SKELETON_DEVICE_ID 0x01
+
+#define SKELETON_MAJOR_VER 1
+#define SKELETON_MINOR_VER 0
+#define SKELETON_SUB_VER 0
+
+#define SKELETON_MAX_QUEUES 1
+
+enum skeleton_firmware_state {
+ SKELETON_FW_READY,
+ SKELETON_FW_LOADED,
+ SKELETON_FW_ERROR
+};
+
+enum skeleton_device_state {
+ SKELETON_DEV_RUNNING,
+ SKELETON_DEV_STOPPED
+};
+
+enum skeleton_queue_state {
+ SKELETON_QUEUE_DETACH,
+ SKELETON_QUEUE_ATTACH
+};
+
+#define SKELETON_QUEUE_DEF_DEPTH 10
+#define SKELETON_QUEUE_MAX_DEPTH 25
+
+struct skeleton_firmware_version_info {
+ uint8_t major;
+ uint8_t minor;
+ uint8_t subrel;
+};
+
+struct skeleton_firmware {
+ /**< Device firmware information */
+ struct skeleton_firmware_version_info firmware_version;
+ /**< Device state */
+ enum skeleton_firmware_state firmware_state;
+
+};
+
+#define SKELETON_MAX_ATTRIBUTES 10
+#define SKELETON_ATTRIBUTE_NAME_MAX 20
+
+struct skeleton_rawdev_attributes {
+ /**< Name of the attribute */
+ char *name;
+ /**< Value or reference of value of attribute */
+ uint64_t value;
+};
+
+/**< Device supports firmware loading/unloading */
+#define SKELETON_CAPA_FW_LOAD 0x0001
+/**< Device supports firmware reset */
+#define SKELETON_CAPA_FW_RESET 0x0002
+/**< Device support queue based communication */
+#define SKELETON_CAPA_QUEUES 0x0004
+/**< Default Capabilities: FW_LOAD, FW_RESET, QUEUES */
+#define SKELETON_DEFAULT_CAPA 0x7
+
+struct skeleton_rawdev_queue {
+ uint8_t state;
+ uint32_t depth;
+};
+
+struct skeleton_rawdev {
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t num_queues;
+ /**< One of SKELETON_CAPA_* */
+ uint16_t capabilities;
+ /**< State of device; linked to firmware state */
+ enum skeleton_device_state device_state;
+ /**< Firmware configuration */
+ struct skeleton_firmware fw;
+ /**< Collection of all communication channels - which can be referred
+ * to as queues.
+ */
+ struct skeleton_rawdev_queue queues[SKELETON_MAX_QUEUES];
+ /**< Global table containing various pre-defined and user-defined
+ * attributes.
+ */
+ struct skeleton_rawdev_attributes attr[SKELETON_MAX_ATTRIBUTES];
+ struct rte_device *device;
+};
+
+struct skeleton_rawdev_conf {
+ uint16_t num_queues;
+ unsigned int capabilities;
+ enum skeleton_device_state device_state;
+ enum skeleton_firmware_state firmware_state;
+};
+
+static inline struct skeleton_rawdev *
+skeleton_rawdev_get_priv(const struct rte_rawdev *rawdev)
+{
+ return rawdev->dev_private;
+}
+
+int test_rawdev_skeldev(void);
+
+#endif /* __SKELETON_RAWDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c b/src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
new file mode 100644
index 00000000..3405b898
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_dev.h>
+#include <rte_rawdev.h>
+#include <rte_bus_vdev.h>
+#include <rte_test.h>
+
+/* Using relative path as skeleton_rawdev is not part of exported headers */
+#include "skeleton_rawdev.h"
+
+#define TEST_DEV_ID 0
+#define TEST_DEV_NAME "rawdev_skeleton"
+
+#define SKELDEV_LOGS(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, skeleton_pmd_logtype, fmt "\n", \
+ ##args)
+
+#define SKELDEV_TEST_INFO(fmt, args...) \
+ SKELDEV_LOGS(INFO, fmt, ## args)
+#define SKELDEV_TEST_DEBUG(fmt, args...) \
+ SKELDEV_LOGS(DEBUG, fmt, ## args)
+
+#define SKELDEV_TEST_RUN(setup, teardown, test) \
+ skeldev_test_run(setup, teardown, test, #test)
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED -1
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int
+testsuite_setup(void)
+{
+ uint8_t count;
+ count = rte_rawdev_count();
+ if (!count) {
+ SKELDEV_TEST_INFO("\tNo existing rawdev; "
+ "Creating 'skeldev_rawdev'");
+ return rte_vdev_init(TEST_DEV_NAME, NULL);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static void local_teardown(void);
+
+static void
+testsuite_teardown(void)
+{
+ local_teardown();
+}
+
+static void
+local_teardown(void)
+{
+ rte_vdev_uninit(TEST_DEV_NAME);
+}
+
+static int
+test_rawdev_count(void)
+{
+ uint8_t count;
+ count = rte_rawdev_count();
+ RTE_TEST_ASSERT(count > 0, "Invalid rawdev count %" PRIu8, count);
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_get_dev_id(void)
+{
+ int ret;
+ ret = rte_rawdev_get_dev_id("invalid_rawdev_device");
+ RTE_TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d",
+ ret);
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_socket_id(void)
+{
+ int socket_id;
+ socket_id = rte_rawdev_socket_id(TEST_DEV_ID);
+ RTE_TEST_ASSERT(socket_id != -EINVAL,
+ "Failed to get socket_id %d", socket_id);
+ socket_id = rte_rawdev_socket_id(RTE_RAWDEV_MAX_DEVS);
+ RTE_TEST_ASSERT(socket_id == -EINVAL,
+ "Expected -EINVAL %d", socket_id);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_info_get(void)
+{
+ int ret;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf skel_conf = {0};
+
+ ret = rte_rawdev_info_get(TEST_DEV_ID, NULL);
+ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ rdev_info.dev_private = &skel_conf;
+
+ ret = rte_rawdev_info_get(TEST_DEV_ID, &rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get raw dev info");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_configure(void)
+{
+ int ret;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf rdev_conf_set = {0};
+ struct skeleton_rawdev_conf rdev_conf_get = {0};
+
+ /* Check invalid configuration */
+ ret = rte_rawdev_configure(TEST_DEV_ID, NULL);
+ RTE_TEST_ASSERT(ret == -EINVAL,
+ "Null configure; Expected -EINVAL, got %d", ret);
+
+ /* Valid configuration test */
+ rdev_conf_set.num_queues = 1;
+ rdev_conf_set.capabilities = SKELETON_CAPA_FW_LOAD |
+ SKELETON_CAPA_FW_RESET;
+
+ rdev_info.dev_private = &rdev_conf_set;
+ ret = rte_rawdev_configure(TEST_DEV_ID,
+ (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure rawdev (%d)", ret);
+
+ rdev_info.dev_private = &rdev_conf_get;
+ ret = rte_rawdev_info_get(TEST_DEV_ID,
+ (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to obtain rawdev configuration (%d)",
+ ret);
+
+ RTE_TEST_ASSERT_EQUAL(rdev_conf_set.num_queues,
+ rdev_conf_get.num_queues,
+ "Configuration test failed; num_queues (%d)(%d)",
+ rdev_conf_set.num_queues,
+ rdev_conf_get.num_queues);
+ RTE_TEST_ASSERT_EQUAL(rdev_conf_set.capabilities,
+ rdev_conf_get.capabilities,
+ "Configuration test failed; capabilities");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_queue_default_conf_get(void)
+{
+ int ret, i;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf rdev_conf_get = {0};
+ struct skeleton_rawdev_queue q = {0};
+
+ /* Get the current configuration */
+ rdev_info.dev_private = &rdev_conf_get;
+ ret = rte_rawdev_info_get(TEST_DEV_ID,
+ (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain rawdev configuration (%d)",
+ ret);
+
+ /* call to test_rawdev_configure would have set the num_queues = 1 */
+ RTE_TEST_ASSERT_SUCCESS(!(rdev_conf_get.num_queues > 0),
+ "Invalid number of queues (%d). Expected 1",
+ rdev_conf_get.num_queues);
+ /* All queues by default should have state = DETACH and
+ * depth = DEF_DEPTH
+ */
+ for (i = 0; i < rdev_conf_get.num_queues; i++) {
+ rte_rawdev_queue_conf_get(TEST_DEV_ID, i, &q);
+ RTE_TEST_ASSERT_EQUAL(q.depth, SKELETON_QUEUE_DEF_DEPTH,
+ "Invalid default depth of queue (%d)",
+ q.depth);
+ RTE_TEST_ASSERT_EQUAL(q.state, SKELETON_QUEUE_DETACH,
+ "Invalid default state of queue (%d)",
+ q.state);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_queue_count(void)
+{
+ unsigned int q_count;
+
+ /* Get the current configuration */
+ q_count = rte_rawdev_queue_count(TEST_DEV_ID);
+ RTE_TEST_ASSERT_EQUAL(q_count, 1, "Invalid queue count (%d)", q_count);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_queue_setup(void)
+{
+ int ret;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf rdev_conf_get = {0};
+ struct skeleton_rawdev_queue qset = {0};
+ struct skeleton_rawdev_queue qget = {0};
+
+ /* Get the current configuration */
+ rdev_info.dev_private = &rdev_conf_get;
+ ret = rte_rawdev_info_get(TEST_DEV_ID,
+ (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to obtain rawdev configuration (%d)",
+ ret);
+
+ /* call to test_rawdev_configure would have set the num_queues = 1 */
+ RTE_TEST_ASSERT_SUCCESS(!(rdev_conf_get.num_queues > 0),
+ "Invalid number of queues (%d). Expected 1",
+ rdev_conf_get.num_queues);
+
+ /* Modify the queue depth for Queue 0 and attach it */
+ qset.depth = 15;
+ qset.state = SKELETON_QUEUE_ATTACH;
+ ret = rte_rawdev_queue_setup(TEST_DEV_ID, 0, &qset);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue (%d)", ret);
+
+ /* Now, fetching the queue 0 should show depth as 15 */
+ ret = rte_rawdev_queue_conf_get(TEST_DEV_ID, 0, &qget);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get queue config (%d)", ret);
+
+ RTE_TEST_ASSERT_EQUAL(qset.depth, qget.depth,
+ "Failed to set queue depth: Need(%d), has(%d)",
+ qset.depth, qget.depth);
+
+ return TEST_SUCCESS;
+}
+
+/* After executing test_rawdev_queue_setup, queue_id=0 would have depth as 15.
+ * Releasing should set it back to default. state would set to DETACH
+ */
+static int
+test_rawdev_queue_release(void)
+{
+ int ret;
+ struct skeleton_rawdev_queue qget = {0};
+
+ /* Now, fetching the queue 0 should show depth as 100 */
+ ret = rte_rawdev_queue_release(TEST_DEV_ID, 0);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to release queue 0; (%d)", ret);
+
+ /* Now, fetching the queue 0 should show depth as default */
+ ret = rte_rawdev_queue_conf_get(TEST_DEV_ID, 0, &qget);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get queue config (%d)", ret);
+
+ RTE_TEST_ASSERT_EQUAL(qget.depth, SKELETON_QUEUE_DEF_DEPTH,
+ "Release of Queue 0 failed; (depth)");
+
+ RTE_TEST_ASSERT_EQUAL(qget.state, SKELETON_QUEUE_DETACH,
+ "Release of Queue 0 failed; (state)");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_attr_set_get(void)
+{
+ int ret;
+ int *dummy_value;
+ uint64_t ret_value;
+
+ /* Set an attribute and fetch it */
+ ret = rte_rawdev_set_attr(TEST_DEV_ID, "Test1", 100);
+ RTE_TEST_ASSERT(!ret, "Unable to set an attribute (Test1)");
+
+ dummy_value = malloc(sizeof(int));
+ if (!dummy_value)
+ RTE_TEST_ASSERT(1, "Unable to allocate memory (dummy_value)");
+
+ *dummy_value = 200;
+ ret = rte_rawdev_set_attr(TEST_DEV_ID, "Test2", (uintptr_t)dummy_value);
+
+ /* Check if attributes have been set */
+ ret = rte_rawdev_get_attr(TEST_DEV_ID, "Test1", &ret_value);
+ RTE_TEST_ASSERT_EQUAL(ret_value, 100,
+ "Attribute (Test1) not set correctly (%" PRIu64 ")",
+ ret_value);
+
+ ret_value = 0;
+ ret = rte_rawdev_get_attr(TEST_DEV_ID, "Test2", &ret_value);
+ RTE_TEST_ASSERT_EQUAL(*((int *)(uintptr_t)ret_value), 200,
+ "Attribute (Test2) not set correctly (%" PRIu64 ")",
+ ret_value);
+
+ free(dummy_value);
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_start_stop(void)
+{
+ int ret;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf rdev_conf_get = {0};
+ char *dummy_firmware = NULL;
+
+ /* Get the current configuration */
+ rdev_info.dev_private = &rdev_conf_get;
+
+ /* Load a firmware using a dummy address area */
+ dummy_firmware = rte_zmalloc("RAWDEV SKELETON", sizeof(int) * 10, 0);
+ RTE_TEST_ASSERT(dummy_firmware != NULL,
+ "Failed to create firmware memory backing");
+
+ ret = rte_rawdev_firmware_load(TEST_DEV_ID, dummy_firmware);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Firmware loading failed (%d)", ret);
+
+ /* Skeleton doesn't do anything with the firmware area - that is dummy
+ * and can be removed.
+ */
+ rte_free(dummy_firmware);
+ dummy_firmware = NULL;
+
+ rte_rawdev_start(TEST_DEV_ID);
+ ret = rte_rawdev_info_get(TEST_DEV_ID, (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to obtain rawdev configuration (%d)",
+ ret);
+ RTE_TEST_ASSERT_EQUAL(rdev_conf_get.device_state, SKELETON_DEV_RUNNING,
+ "Device start failed. State is (%d)",
+ rdev_conf_get.device_state);
+
+ rte_rawdev_stop(TEST_DEV_ID);
+ ret = rte_rawdev_info_get(TEST_DEV_ID, (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to obtain rawdev configuration (%d)",
+ ret);
+ RTE_TEST_ASSERT_EQUAL(rdev_conf_get.device_state, SKELETON_DEV_STOPPED,
+ "Device stop failed. State is (%d)",
+ rdev_conf_get.device_state);
+
+ /* Unloading the firmware once device is stopped */
+ ret = rte_rawdev_firmware_unload(TEST_DEV_ID);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to unload firmware (%d)", ret);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_enqdeq(void)
+{
+ int ret;
+ unsigned int count = 1;
+ uint16_t queue_id = 0;
+ struct rte_rawdev_buf buffers[1];
+ struct rte_rawdev_buf *deq_buffers = NULL;
+
+ buffers[0].buf_addr = malloc(strlen(TEST_DEV_NAME) + 3);
+ if (!buffers[0].buf_addr)
+ goto cleanup;
+ snprintf(buffers[0].buf_addr, strlen(TEST_DEV_NAME) + 2, "%s%d",
+ TEST_DEV_NAME, 0);
+
+ ret = rte_rawdev_enqueue_buffers(TEST_DEV_ID,
+ (struct rte_rawdev_buf **)&buffers,
+ count, &queue_id);
+ RTE_TEST_ASSERT_EQUAL((unsigned int)ret, count,
+ "Unable to enqueue buffers");
+
+ deq_buffers = malloc(sizeof(struct rte_rawdev_buf) * count);
+ if (!deq_buffers)
+ goto cleanup;
+
+ ret = rte_rawdev_dequeue_buffers(TEST_DEV_ID,
+ (struct rte_rawdev_buf **)&deq_buffers,
+ count, &queue_id);
+ RTE_TEST_ASSERT_EQUAL((unsigned int)ret, count,
+ "Unable to dequeue buffers");
+
+ if (deq_buffers)
+ free(deq_buffers);
+
+ return TEST_SUCCESS;
+cleanup:
+ if (buffers[0].buf_addr)
+ free(buffers[0].buf_addr);
+
+ return TEST_FAILED;
+}
+
+static void skeldev_test_run(int (*setup)(void),
+ void (*teardown)(void),
+ int (*test)(void),
+ const char *name)
+{
+ int ret = 0;
+
+ if (setup) {
+ ret = setup();
+ if (ret < 0) {
+ SKELDEV_TEST_INFO("Error setting up test %s", name);
+ unsupported++;
+ }
+ }
+
+ if (test) {
+ ret = test();
+ if (ret < 0) {
+ failed++;
+ SKELDEV_TEST_INFO("%s Failed", name);
+ } else {
+ passed++;
+ SKELDEV_TEST_DEBUG("%s Passed", name);
+ }
+ }
+
+ if (teardown)
+ teardown();
+
+ total++;
+}
+
+int
+test_rawdev_skeldev(void)
+{
+ testsuite_setup();
+
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_count);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_get_dev_id);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_socket_id);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_info_get);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_configure);
+ SKELDEV_TEST_RUN(test_rawdev_configure, NULL,
+ test_rawdev_queue_default_conf_get);
+ SKELDEV_TEST_RUN(test_rawdev_configure, NULL, test_rawdev_queue_setup);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_queue_count);
+ SKELDEV_TEST_RUN(test_rawdev_queue_setup, NULL,
+ test_rawdev_queue_release);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_attr_set_get);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_start_stop);
+ SKELDEV_TEST_RUN(test_rawdev_queue_setup, NULL, test_rawdev_enqdeq);
+
+ testsuite_teardown();
+
+ SKELDEV_TEST_INFO("Total tests : %d", total);
+ SKELDEV_TEST_INFO("Passed : %d", passed);
+ SKELDEV_TEST_INFO("Failed : %d", failed);
+ SKELDEV_TEST_INFO("Not supported : %d", unsupported);
+
+ if (failed)
+ return -1;
+
+ return 0;
+};